mlrun 1.8.0rc45__py3-none-any.whl → 1.8.0rc46__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

@@ -289,6 +289,11 @@ class ModelMonitoringMode(StrEnum):
289
289
  disabled = "disabled"
290
290
 
291
291
 
292
+ class ScheduleChiefFields(StrEnum):
293
+ LAST_REQUEST = "last_request"
294
+ LAST_ANALYZED = "last_analyzed"
295
+
296
+
292
297
  class EndpointType(IntEnum):
293
298
  NODE_EP = 1 # end point that is not a child of a router
294
299
  ROUTER = 2 # endpoint that is router
@@ -27,8 +27,12 @@ class BaseDataInfer:
27
27
  get_stats = None
28
28
 
29
29
 
30
+ def is_spark_dataframe(df) -> bool:
31
+ return "rdd" in dir(df)
32
+
33
+
30
34
  def get_infer_interface(df) -> BaseDataInfer:
31
- if hasattr(df, "rdd"):
35
+ if is_spark_dataframe(df):
32
36
  from .spark import SparkDataInfer
33
37
 
34
38
  return SparkDataInfer
@@ -40,7 +40,7 @@ from mlrun.utils.helpers import to_parquet
40
40
  from mlrun.utils.v3io_clients import get_frames_client
41
41
 
42
42
  from .. import errors
43
- from ..data_types import ValueType
43
+ from ..data_types import ValueType, is_spark_dataframe
44
44
  from ..platforms.iguazio import parse_path, split_path
45
45
  from .datastore_profile import datastore_profile_read
46
46
  from .spark_utils import spark_session_update_hadoop_options
@@ -86,8 +86,10 @@ def generate_target_run_id():
86
86
 
87
87
 
88
88
  def write_spark_dataframe_with_options(spark_options, df, mode, write_format=None):
89
+ # TODO: Replace with just df.sparkSession when Spark 3.2 support is dropped
90
+ spark_session = getattr(df, "sparkSession") or df.sql_ctx.sparkSession
89
91
  non_hadoop_spark_options = spark_session_update_hadoop_options(
90
- df.sql_ctx.sparkSession, spark_options
92
+ spark_session, spark_options
91
93
  )
92
94
  if write_format:
93
95
  df.write.format(write_format).mode(mode).save(**non_hadoop_spark_options)
@@ -510,7 +512,7 @@ class BaseStoreTarget(DataTargetBase):
510
512
  chunk_id=0,
511
513
  **kwargs,
512
514
  ) -> Optional[int]:
513
- if hasattr(df, "rdd"):
515
+ if is_spark_dataframe(df):
514
516
  options = self.get_spark_options(key_column, timestamp_key)
515
517
  options.update(kwargs)
516
518
  df = self.prepare_spark_df(df, key_column, timestamp_key, options)
@@ -1376,7 +1378,7 @@ class NoSqlBaseTarget(BaseStoreTarget):
1376
1378
  def write_dataframe(
1377
1379
  self, df, key_column=None, timestamp_key=None, chunk_id=0, **kwargs
1378
1380
  ):
1379
- if hasattr(df, "rdd"):
1381
+ if is_spark_dataframe(df):
1380
1382
  options = self.get_spark_options(key_column, timestamp_key)
1381
1383
  options.update(kwargs)
1382
1384
  df = self.prepare_spark_df(df)
@@ -2108,7 +2110,7 @@ class SQLTarget(BaseStoreTarget):
2108
2110
 
2109
2111
  self._create_sql_table()
2110
2112
 
2111
- if hasattr(df, "rdd"):
2113
+ if is_spark_dataframe(df):
2112
2114
  raise ValueError("Spark is not supported")
2113
2115
  else:
2114
2116
  (
@@ -50,8 +50,8 @@ DatasetType = typing.Union[
50
50
 
51
51
  def get_or_create_model_endpoint(
52
52
  project: str,
53
+ model_endpoint_name: str,
53
54
  model_path: str = "",
54
- model_endpoint_name: str = "",
55
55
  endpoint_id: str = "",
56
56
  function_name: str = "",
57
57
  function_tag: str = "latest",
@@ -59,6 +59,7 @@ def get_or_create_model_endpoint(
59
59
  sample_set_statistics: typing.Optional[dict[str, typing.Any]] = None,
60
60
  monitoring_mode: mm_constants.ModelMonitoringMode = mm_constants.ModelMonitoringMode.enabled,
61
61
  db_session=None,
62
+ feature_analysis: bool = False,
62
63
  ) -> ModelEndpoint:
63
64
  """
64
65
  Get a single model endpoint object. If not exist, generate a new model endpoint with the provided parameters. Note
@@ -66,9 +67,9 @@ def get_or_create_model_endpoint(
66
67
  features, set `monitoring_mode=enabled`.
67
68
 
68
69
  :param project: Project name.
69
- :param model_path: The model store path (applicable only to new endpoint_id).
70
70
  :param model_endpoint_name: If a new model endpoint is created, the model endpoint name will be presented
71
71
  under this endpoint (applicable only to new endpoint_id).
72
+ :param model_path: The model store path (applicable only to new endpoint_id).
72
73
  :param endpoint_id: Model endpoint unique ID. If not exist in DB, will generate a new record based
73
74
  on the provided `endpoint_id`.
74
75
  :param function_name: If a new model endpoint is created, use this function name.
@@ -80,6 +81,7 @@ def get_or_create_model_endpoint(
80
81
  :param monitoring_mode: If enabled, apply model monitoring features on the provided endpoint id
81
82
  (applicable only to new endpoint_id).
82
83
  :param db_session: A runtime session that manages the current dialog with the database.
84
+ :param feature_analysis: If True, the model endpoint will be retrieved with the feature analysis mode.
83
85
 
84
86
  :return: A ModelEndpoint object
85
87
  """
@@ -99,6 +101,7 @@ def get_or_create_model_endpoint(
99
101
  endpoint_id=endpoint_id,
100
102
  function_name=function_name,
101
103
  function_tag=function_tag or "latest",
104
+ feature_analysis=feature_analysis,
102
105
  )
103
106
  # If other fields provided, validate that they are correspond to the existing model endpoint data
104
107
  _model_endpoint_validations(
@@ -76,7 +76,6 @@ class MonitoringApplicationContext:
76
76
  :param sample_df: (pd.DataFrame) The new sample DataFrame.
77
77
  :param start_infer_time: (pd.Timestamp) Start time of the monitoring schedule.
78
78
  :param end_infer_time: (pd.Timestamp) End time of the monitoring schedule.
79
- :param latest_request: (pd.Timestamp) Timestamp of the latest request on this endpoint_id.
80
79
  :param endpoint_id: (str) ID of the monitored model endpoint
81
80
  :param feature_set: (FeatureSet) the model endpoint feature set
82
81
  :param endpoint_name: (str) Name of the monitored model endpoint
@@ -208,6 +207,20 @@ class MonitoringApplicationContext:
208
207
  @property
209
208
  def sample_df(self) -> pd.DataFrame:
210
209
  if self._sample_df is None:
210
+ if (
211
+ self.endpoint_name is None
212
+ or self.endpoint_id is None
213
+ or pd.isnull(self.start_infer_time)
214
+ or pd.isnull(self.end_infer_time)
215
+ ):
216
+ raise mlrun.errors.MLRunValueError(
217
+ "You have tried to access `monitoring_context.sample_df`, but have not provided it directly "
218
+ "through `sample_data`, nor have you provided the model endpoint's name, ID, and the start and "
219
+ f"end times: `endpoint_name`={self.endpoint_name}, `endpoint_uid`={self.endpoint_id}, "
220
+ f"`start`={self.start_infer_time}, and `end`={self.end_infer_time}. "
221
+ "You can either provide the sample dataframe directly, the model endpoint's details and times, "
222
+ "or adapt the application's logic to not access the sample dataframe."
223
+ )
211
224
  feature_set = self.feature_set
212
225
  features = [f"{feature_set.metadata.name}.*"]
213
226
  vector = fstore.FeatureVector(
@@ -28,6 +28,7 @@ import mlrun
28
28
  import mlrun.common.schemas.model_monitoring.constants as mm_constants
29
29
  import mlrun.feature_store as fstore
30
30
  import mlrun.model_monitoring
31
+ import mlrun.model_monitoring.db._schedules as schedules
31
32
  import mlrun.model_monitoring.helpers
32
33
  from mlrun.common.schemas import EndpointType
33
34
  from mlrun.common.schemas.model_monitoring.constants import (
@@ -36,7 +37,6 @@ from mlrun.common.schemas.model_monitoring.constants import (
36
37
  ControllerEventKind,
37
38
  )
38
39
  from mlrun.errors import err_to_str
39
- from mlrun.model_monitoring.db._schedules import ModelMonitoringSchedulesFile
40
40
  from mlrun.model_monitoring.helpers import batch_dict2timedelta
41
41
  from mlrun.utils import datetime_now, logger
42
42
 
@@ -53,7 +53,7 @@ class _BatchWindow:
53
53
  def __init__(
54
54
  self,
55
55
  *,
56
- schedules_file: ModelMonitoringSchedulesFile,
56
+ schedules_file: schedules.ModelMonitoringSchedulesFileEndpoint,
57
57
  application: str,
58
58
  timedelta_seconds: int,
59
59
  last_updated: int,
@@ -153,7 +153,7 @@ class _BatchWindowGenerator(AbstractContextManager):
153
153
  self._project = project
154
154
  self._endpoint_id = endpoint_id
155
155
  self._timedelta = window_length
156
- self._schedules_file = ModelMonitoringSchedulesFile(
156
+ self._schedules_file = schedules.ModelMonitoringSchedulesFileEndpoint(
157
157
  project=project, endpoint_id=endpoint_id
158
158
  )
159
159
 
@@ -273,6 +273,7 @@ class MonitoringApplicationController:
273
273
  endpoint: mlrun.common.schemas.ModelEndpoint,
274
274
  application_names: set,
275
275
  base_period_minutes: int,
276
+ schedules_file: schedules.ModelMonitoringSchedulesFileChief,
276
277
  ) -> bool:
277
278
  """
278
279
  checks if there is a need to monitor the given endpoint, we should monitor endpoint if it stands in the
@@ -281,11 +282,23 @@ class MonitoringApplicationController:
281
282
  2. first request exists
282
283
  3. last request exists
283
284
  4. endpoint_type is not ROUTER
284
- if the four above conditions apply we require one of the three conditions to monitor:
285
+ if the four above conditions apply we require one of the two condition monitor:
285
286
  1. never monitored the one of the endpoint applications meaning min_last_analyzed is None
286
- 2. last request has a higher timestamp than the min_last_analyzed timestamp
287
- 3. We didn't analyze one of the application for over than _MAX_OPEN_WINDOWS_ALLOWED windows
287
+ 2. min_last_analyzed stands in the condition for sending NOP event and this the first time regular event
288
+ is sent with the combination of current last_request & current last_analyzed per endpoint.
288
289
  """
290
+ last_timestamp_sent = schedules_file.get_endpoint_last_request(
291
+ endpoint.metadata.uid
292
+ )
293
+ last_analyzed_sent = schedules_file.get_endpoint_last_analyzed(
294
+ endpoint.metadata.uid
295
+ )
296
+ logger.debug(
297
+ "Chief should monitor endpoint check",
298
+ last_timestamp_sent=last_timestamp_sent,
299
+ last_analyzed_sent=last_analyzed_sent,
300
+ uid=endpoint.metadata.uid,
301
+ )
289
302
  if (
290
303
  # Is the model endpoint monitored?
291
304
  endpoint.status.monitoring_mode == mm_constants.ModelMonitoringMode.enabled
@@ -300,26 +313,43 @@ class MonitoringApplicationController:
300
313
  project=endpoint.metadata.project,
301
314
  endpoint_id=endpoint.metadata.uid,
302
315
  ) as batch_window_generator:
303
- base_period_seconds = base_period_minutes * _SECONDS_IN_MINUTE
304
- if application_names != batch_window_generator.get_application_list():
316
+ current_time = mlrun.utils.datetime_now()
317
+ current_min_last_analyzed = (
318
+ batch_window_generator.get_min_last_analyzed()
319
+ )
320
+ if (
321
+ # Different application names, or last analyzed never updated while there are application to monitor
322
+ application_names
323
+ and (
324
+ application_names
325
+ != batch_window_generator.get_application_list()
326
+ or not current_min_last_analyzed
327
+ )
328
+ ):
305
329
  return True
306
330
  elif (
307
- not batch_window_generator.get_min_last_analyzed()
308
- or batch_window_generator.get_min_last_analyzed()
309
- <= int(endpoint.status.last_request.timestamp())
310
- or mlrun.utils.datetime_now().timestamp()
311
- - batch_window_generator.get_min_last_analyzed()
312
- >= self._MAX_OPEN_WINDOWS_ALLOWED * base_period_seconds
331
+ # Does nop event will be sent to close the relevant window
332
+ self._should_send_nop_event(
333
+ base_period_minutes, current_min_last_analyzed, current_time
334
+ )
335
+ and (
336
+ int(endpoint.status.last_request.timestamp())
337
+ != last_timestamp_sent
338
+ or current_min_last_analyzed != last_analyzed_sent
339
+ )
313
340
  ):
341
+ # Write to schedule chief file the last_request, min_last_analyzed we pushed event to stream
342
+ schedules_file.update_endpoint_timestamps(
343
+ endpoint_uid=endpoint.metadata.uid,
344
+ last_request=int(endpoint.status.last_request.timestamp()),
345
+ last_analyzed=current_min_last_analyzed,
346
+ )
314
347
  return True
315
348
  else:
316
349
  logger.info(
317
350
  "All the possible intervals were already analyzed, didn't push regular event",
318
351
  endpoint_id=endpoint.metadata.uid,
319
- last_analyzed=datetime.datetime.fromtimestamp(
320
- batch_window_generator.get_min_last_analyzed(),
321
- tz=datetime.timezone.utc,
322
- ),
352
+ last_analyzed=current_min_last_analyzed,
323
353
  last_request=endpoint.status.last_request,
324
354
  )
325
355
  else:
@@ -334,6 +364,21 @@ class MonitoringApplicationController:
334
364
  )
335
365
  return False
336
366
 
367
+ @staticmethod
368
+ def _should_send_nop_event(
369
+ base_period_minutes: int,
370
+ min_last_analyzed: int,
371
+ current_time: datetime.datetime,
372
+ ):
373
+ if min_last_analyzed:
374
+ return (
375
+ current_time.timestamp() - min_last_analyzed
376
+ >= datetime.timedelta(minutes=base_period_minutes).total_seconds()
377
+ + mlrun.mlconf.model_endpoint_monitoring.parquet_batching_timeout_secs
378
+ )
379
+ else:
380
+ return True
381
+
337
382
  def run(self, event: nuclio_sdk.Event) -> None:
338
383
  """
339
384
  Main method for controller chief, runs all the relevant monitoring applications for a single endpoint.
@@ -441,9 +486,11 @@ class MonitoringApplicationController:
441
486
  ]
442
487
  current_time = mlrun.utils.datetime_now()
443
488
  if (
444
- current_time.timestamp()
445
- - batch_window_generator.get_min_last_analyzed()
446
- >= datetime.timedelta(minutes=base_period).total_seconds()
489
+ self._should_send_nop_event(
490
+ base_period,
491
+ batch_window_generator.get_min_last_analyzed(),
492
+ current_time,
493
+ )
447
494
  and event[ControllerEvent.KIND] != ControllerEventKind.NOP_EVENT
448
495
  ):
449
496
  event = {
@@ -581,29 +628,33 @@ class MonitoringApplicationController:
581
628
  with concurrent.futures.ThreadPoolExecutor(
582
629
  max_workers=min(len(endpoints), 10)
583
630
  ) as pool:
584
- futures = {
585
- pool.submit(
586
- self.endpoint_to_regular_event,
587
- endpoint,
588
- policy,
589
- set(applications_names),
590
- self.v3io_access_key,
591
- ): endpoint
592
- for endpoint in endpoints
593
- }
594
- for future in concurrent.futures.as_completed(futures):
595
- if future.exception():
596
- exception = future.exception()
597
- error = (
598
- f"Failed to push event. Endpoint name: {futures[future].metadata.name}, "
599
- f"endpoint uid: {futures[future].metadata.uid}, traceback:\n"
600
- )
601
- error += "".join(
602
- traceback.format_exception(
603
- None, exception, exception.__traceback__
631
+ with schedules.ModelMonitoringSchedulesFileChief(
632
+ self.project
633
+ ) as schedule_file:
634
+ futures = {
635
+ pool.submit(
636
+ self.endpoint_to_regular_event,
637
+ endpoint,
638
+ policy,
639
+ set(applications_names),
640
+ self.v3io_access_key,
641
+ schedule_file,
642
+ ): endpoint
643
+ for endpoint in endpoints
644
+ }
645
+ for future in concurrent.futures.as_completed(futures):
646
+ if future.exception():
647
+ exception = future.exception()
648
+ error = (
649
+ f"Failed to push event. Endpoint name: {futures[future].metadata.name}, "
650
+ f"endpoint uid: {futures[future].metadata.uid}, traceback:\n"
604
651
  )
605
- )
606
- logger.error(error)
652
+ error += "".join(
653
+ traceback.format_exception(
654
+ None, exception, exception.__traceback__
655
+ )
656
+ )
657
+ logger.error(error)
607
658
  logger.info("Finishing monitoring controller chief")
608
659
 
609
660
  def endpoint_to_regular_event(
@@ -612,14 +663,16 @@ class MonitoringApplicationController:
612
663
  policy: dict,
613
664
  applications_names: set,
614
665
  v3io_access_key: str,
666
+ schedule_file: schedules.ModelMonitoringSchedulesFileChief,
615
667
  ) -> None:
616
668
  if self._should_monitor_endpoint(
617
669
  endpoint,
618
670
  set(applications_names),
619
671
  policy.get(ControllerEventEndpointPolicy.BASE_PERIOD, 10),
672
+ schedule_file,
620
673
  ):
621
- logger.info(
622
- "Regular event is being pushed to controller stream for model endpoint",
674
+ logger.debug(
675
+ "Endpoint data is being prepared for regular event",
623
676
  endpoint_id=endpoint.metadata.uid,
624
677
  endpoint_name=endpoint.metadata.name,
625
678
  timestamp=endpoint.status.last_request.isoformat(
@@ -13,52 +13,38 @@
13
13
  # limitations under the License.
14
14
 
15
15
  import json
16
+ from abc import ABC, abstractmethod
16
17
  from contextlib import AbstractContextManager
17
18
  from types import TracebackType
18
19
  from typing import Final, Optional
19
20
 
20
21
  import botocore.exceptions
21
22
 
22
- import mlrun.common.schemas
23
+ import mlrun.common.schemas as schemas
23
24
  import mlrun.errors
24
25
  import mlrun.model_monitoring.helpers
25
26
  from mlrun.utils import logger
26
27
 
27
28
 
28
- class ModelMonitoringSchedulesFile(AbstractContextManager):
29
+ class ModelMonitoringSchedulesFileBase(AbstractContextManager, ABC):
29
30
  DEFAULT_SCHEDULES: Final = {}
30
31
  INITIAL_CONTENT = json.dumps(DEFAULT_SCHEDULES)
31
32
  ENCODING = "utf-8"
32
33
 
33
- def __init__(self, project: str, endpoint_id: str) -> None:
34
- """
35
- Initialize applications monitoring schedules file object.
36
- The JSON file stores a dictionary of registered application name as key and Unix timestamp as value.
37
- When working with the schedules data, use this class as a context manager to read and write the data.
38
-
39
- :param project: The project name.
40
- :param endpoint_id: The endpoint ID.
41
- """
42
- # `self._item` is the persistent version of the monitoring schedules.
43
- self._item = mlrun.model_monitoring.helpers.get_monitoring_schedules_data(
44
- project=project, endpoint_id=endpoint_id
45
- )
46
- self._path = self._item.url
47
- self._fs = self._item.store.filesystem
48
- # `self._schedules` is an in-memory copy of the DB for all the applications for
49
- # the same model endpoint.
50
- self._schedules: dict[str, int] = self.DEFAULT_SCHEDULES.copy()
51
- # Does `self._schedules` hold the content of `self._item`?
52
- self._open_schedules = False
53
-
54
- @classmethod
55
- def from_model_endpoint(
56
- cls, model_endpoint: mlrun.common.schemas.ModelEndpoint
57
- ) -> "ModelMonitoringSchedulesFile":
58
- return cls(
59
- project=model_endpoint.metadata.project,
60
- endpoint_id=model_endpoint.metadata.uid,
61
- )
34
+ def __init__(self):
35
+ self._item = self.get_data_item_object()
36
+ if self._item:
37
+ self._path = self._item.url
38
+ self._fs = self._item.store.filesystem
39
+ # `self._schedules` is an in-memory copy of the DB for all the applications for
40
+ # the same model endpoint.
41
+ self._schedules = self.DEFAULT_SCHEDULES.copy()
42
+ # Does `self._schedules` hold the content of `self._item`?
43
+ self._open_schedules = False
44
+
45
+ @abstractmethod
46
+ def get_data_item_object(self) -> mlrun.DataItem:
47
+ pass
62
48
 
63
49
  def create(self) -> None:
64
50
  """Create a schedules file with initial content - an empty dictionary"""
@@ -114,7 +100,7 @@ class ModelMonitoringSchedulesFile(AbstractContextManager):
114
100
  self._schedules = self.DEFAULT_SCHEDULES
115
101
  self._open_schedules = False
116
102
 
117
- def __enter__(self) -> "ModelMonitoringSchedulesFile":
103
+ def __enter__(self) -> "ModelMonitoringSchedulesFileBase":
118
104
  self._open()
119
105
  return super().__enter__()
120
106
 
@@ -132,6 +118,36 @@ class ModelMonitoringSchedulesFile(AbstractContextManager):
132
118
  "Open the schedules file as a context manager first"
133
119
  )
134
120
 
121
+
122
+ class ModelMonitoringSchedulesFileEndpoint(ModelMonitoringSchedulesFileBase):
123
+ def __init__(self, project: str, endpoint_id: str) -> None:
124
+ """
125
+ Initialize applications monitoring schedules file object.
126
+ The JSON file stores a dictionary of registered application name as key and Unix timestamp as value.
127
+ When working with the schedules data, use this class as a context manager to read and write the data.
128
+
129
+ :param project: The project name.
130
+ :param endpoint_id: The endpoint ID.
131
+ """
132
+ # `self._item` is the persistent version of the monitoring schedules.
133
+ self._project = project
134
+ self._endpoint_id = endpoint_id
135
+ super().__init__()
136
+
137
+ def get_data_item_object(self) -> mlrun.DataItem:
138
+ return mlrun.model_monitoring.helpers.get_monitoring_schedules_endpoint_data(
139
+ project=self._project, endpoint_id=self._endpoint_id
140
+ )
141
+
142
+ @classmethod
143
+ def from_model_endpoint(
144
+ cls, model_endpoint: schemas.ModelEndpoint
145
+ ) -> "ModelMonitoringSchedulesFileEndpoint":
146
+ return cls(
147
+ project=model_endpoint.metadata.project,
148
+ endpoint_id=model_endpoint.metadata.uid,
149
+ )
150
+
135
151
  def get_application_time(self, application: str) -> Optional[int]:
136
152
  self._check_open_schedules()
137
153
  return self._schedules.get(application)
@@ -149,6 +165,68 @@ class ModelMonitoringSchedulesFile(AbstractContextManager):
149
165
  return min(self._schedules.values(), default=None)
150
166
 
151
167
 
168
+ class ModelMonitoringSchedulesFileChief(ModelMonitoringSchedulesFileBase):
169
+ def __init__(self, project: str) -> None:
170
+ """
171
+ Initialize applications monitoring schedules chief file object.
172
+ The JSON file stores a dictionary of registered model endpoints uid as key and point to a dictionary of
173
+ "last_request" and "last_analyzed" mapped to two Unix timestamps as values.
174
+ When working with the schedules data, use this class as a context manager to read and write the data.
175
+
176
+ :param project: The project name.
177
+ """
178
+ # `self._item` is the persistent version of the monitoring schedules.
179
+ self._project = project
180
+ super().__init__()
181
+
182
+ def get_data_item_object(self) -> mlrun.DataItem:
183
+ return mlrun.model_monitoring.helpers.get_monitoring_schedules_chief_data(
184
+ project=self._project
185
+ )
186
+
187
+ def get_endpoint_last_request(self, endpoint_uid: str) -> Optional[int]:
188
+ self._check_open_schedules()
189
+ if endpoint_uid in self._schedules:
190
+ return self._schedules[endpoint_uid].get(
191
+ schemas.model_monitoring.constants.ScheduleChiefFields.LAST_REQUEST
192
+ )
193
+ else:
194
+ return None
195
+
196
+ def update_endpoint_timestamps(
197
+ self, endpoint_uid: str, last_request: int, last_analyzed: int
198
+ ) -> None:
199
+ self._check_open_schedules()
200
+ self._schedules[endpoint_uid] = {
201
+ schemas.model_monitoring.constants.ScheduleChiefFields.LAST_REQUEST: last_request,
202
+ schemas.model_monitoring.constants.ScheduleChiefFields.LAST_ANALYZED: last_analyzed,
203
+ }
204
+
205
+ def get_endpoint_last_analyzed(self, endpoint_uid: str) -> Optional[int]:
206
+ self._check_open_schedules()
207
+ if endpoint_uid in self._schedules:
208
+ return self._schedules[endpoint_uid].get(
209
+ schemas.model_monitoring.constants.ScheduleChiefFields.LAST_ANALYZED
210
+ )
211
+ else:
212
+ return None
213
+
214
+ def get_endpoint_list(self) -> set[str]:
215
+ self._check_open_schedules()
216
+ return set(self._schedules.keys())
217
+
218
+ def get_or_create(self) -> None:
219
+ try:
220
+ self._open()
221
+ except (
222
+ mlrun.errors.MLRunNotFoundError,
223
+ # Different errors are raised for S3 or local storage, see ML-8042
224
+ botocore.exceptions.ClientError,
225
+ FileNotFoundError,
226
+ ):
227
+ self.create()
228
+
229
+
152
230
  def delete_model_monitoring_schedules_folder(project: str) -> None:
153
231
  """Delete the model monitoring schedules folder of the project"""
154
232
  folder = mlrun.model_monitoring.helpers._get_monitoring_schedules_folder_path(
@@ -472,6 +472,7 @@ def update_model_endpoint_last_request(
472
472
  "Bumping model endpoint last request time (EP without serving)",
473
473
  project=project,
474
474
  endpoint_id=model_endpoint.metadata.uid,
475
+ function_name=model_endpoint.spec.function_name,
475
476
  last_request=model_endpoint.status.last_request,
476
477
  current_request=current_request.isoformat(),
477
478
  bumped_last_request=bumped_last_request,
@@ -586,16 +587,43 @@ def _get_monitoring_schedules_folder_path(project: str) -> str:
586
587
  )
587
588
 
588
589
 
589
- def _get_monitoring_schedules_file_path(*, project: str, endpoint_id: str) -> str:
590
+ def _get_monitoring_schedules_file_endpoint_path(
591
+ *, project: str, endpoint_id: str
592
+ ) -> str:
590
593
  return os.path.join(
591
594
  _get_monitoring_schedules_folder_path(project), f"{endpoint_id}.json"
592
595
  )
593
596
 
594
597
 
595
- def get_monitoring_schedules_data(*, project: str, endpoint_id: str) -> "DataItem":
598
+ def get_monitoring_schedules_endpoint_data(
599
+ *, project: str, endpoint_id: str
600
+ ) -> "DataItem":
601
+ """
602
+ Get the model monitoring schedules' data item of the project's model endpoint.
603
+ """
604
+ return mlrun.datastore.store_manager.object(
605
+ _get_monitoring_schedules_file_endpoint_path(
606
+ project=project, endpoint_id=endpoint_id
607
+ )
608
+ )
609
+
610
+
611
+ def get_monitoring_schedules_chief_data(
612
+ *,
613
+ project: str,
614
+ ) -> "DataItem":
596
615
  """
597
616
  Get the model monitoring schedules' data item of the project's model endpoint.
598
617
  """
599
618
  return mlrun.datastore.store_manager.object(
600
- _get_monitoring_schedules_file_path(project=project, endpoint_id=endpoint_id)
619
+ _get_monitoring_schedules_file_chief_path(project=project)
620
+ )
621
+
622
+
623
+ def _get_monitoring_schedules_file_chief_path(
624
+ *,
625
+ project: str,
626
+ ) -> str:
627
+ return os.path.join(
628
+ _get_monitoring_schedules_folder_path(project), f"{project}.json"
601
629
  )
mlrun/projects/project.py CHANGED
@@ -5266,7 +5266,7 @@ class MlrunProject(ModelObj):
5266
5266
  )
5267
5267
 
5268
5268
  # if engine is remote then skip the local file validation
5269
- if engine and not engine.startswith("remote"):
5269
+ if engine and engine.startswith("remote"):
5270
5270
  return
5271
5271
 
5272
5272
  code_path = self.spec.get_code_path()
@@ -36,6 +36,7 @@ class FunctionReference(ModelObj):
36
36
  spec=None,
37
37
  kind=None,
38
38
  name=None,
39
+ track_models=None,
39
40
  ):
40
41
  self.url = url
41
42
  self.kind = kind
@@ -46,6 +47,7 @@ class FunctionReference(ModelObj):
46
47
  spec = spec.to_dict()
47
48
  self.spec = spec
48
49
  self.code = code
50
+ self.track_models = track_models
49
51
 
50
52
  self._function = None
51
53
  self._address = None
@@ -130,6 +132,7 @@ class FunctionReference(ModelObj):
130
132
  if self.requirements:
131
133
  func.with_requirements(self.requirements)
132
134
  self._function = func
135
+ func.spec.track_models = self.track_models
133
136
  return func
134
137
 
135
138
  @property
@@ -337,6 +337,17 @@ class ServingRuntime(RemoteRuntime):
337
337
  """
338
338
  # Applying model monitoring configurations
339
339
  self.spec.track_models = enable_tracking
340
+ if self._spec and self._spec.function_refs:
341
+ logger.debug(
342
+ "Set tracking for children references", enable_tracking=enable_tracking
343
+ )
344
+ for name in self._spec.function_refs.keys():
345
+ self._spec.function_refs[name].track_models = enable_tracking
346
+ # Check if function_refs _function is filled if so update track_models field:
347
+ if self._spec.function_refs[name]._function:
348
+ self._spec.function_refs[
349
+ name
350
+ ]._function.spec.track_models = enable_tracking
340
351
 
341
352
  if not 0 < sampling_percentage <= 100:
342
353
  raise mlrun.errors.MLRunInvalidArgumentError(
@@ -506,7 +517,11 @@ class ServingRuntime(RemoteRuntime):
506
517
  :return function object
507
518
  """
508
519
  function_reference = FunctionReference(
509
- url, image, requirements=requirements, kind=kind or "serving"
520
+ url,
521
+ image,
522
+ requirements=requirements,
523
+ kind=kind or "serving",
524
+ track_models=self.spec.track_models,
510
525
  )
511
526
  self._spec.function_refs.update(function_reference, name)
512
527
  func = function_reference.to_function(self.kind)
@@ -145,8 +145,14 @@ class V2ModelServer(StepToDict):
145
145
  feature.name for feature in self.model_spec.outputs
146
146
  ]
147
147
 
148
+ if (
149
+ kwargs.get("endpoint_type", mlrun.common.schemas.EndpointType.LEAF_EP)
150
+ == mlrun.common.schemas.EndpointType.NODE_EP
151
+ ):
152
+ self._initialize_model_logger()
153
+
148
154
  def _lazy_init(self, event):
149
- if event and isinstance(event, dict):
155
+ if event and isinstance(event, dict) and not self.initialized:
150
156
  background_task_state = event.get("background_task_state", None)
151
157
  if (
152
158
  background_task_state
@@ -461,6 +467,50 @@ class V2ModelServer(StepToDict):
461
467
  request["inputs"] = new_inputs
462
468
  return request
463
469
 
470
+ def _initialize_model_logger(self):
471
+ server: mlrun.serving.GraphServer = getattr(
472
+ self.context, "_server", None
473
+ ) or getattr(self.context, "server", None)
474
+ if not self.context.is_mock or self.context.monitoring_mock:
475
+ if server.model_endpoint_creation_task_name:
476
+ background_task = mlrun.get_run_db().get_project_background_task(
477
+ server.project, server.model_endpoint_creation_task_name
478
+ )
479
+ logger.debug(
480
+ "Checking model endpoint creation task status",
481
+ task_name=server.model_endpoint_creation_task_name,
482
+ )
483
+ if (
484
+ background_task.status.state
485
+ in mlrun.common.schemas.BackgroundTaskState.terminal_states()
486
+ ):
487
+ logger.debug(
488
+ f"Model endpoint creation task completed with state {background_task.status.state}"
489
+ )
490
+ if (
491
+ background_task.status.state
492
+ == mlrun.common.schemas.BackgroundTaskState.succeeded
493
+ ):
494
+ self._model_logger = (
495
+ _ModelLogPusher(self, self.context)
496
+ if self.context
497
+ and self.context.stream.enabled
498
+ and self.model_endpoint_uid
499
+ else None
500
+ )
501
+ self.initialized = True
502
+
503
+ else: # in progress
504
+ logger.debug(
505
+ f"Model endpoint creation task is still in progress with the current state: "
506
+ f"{background_task.status.state}.",
507
+ name=self.name,
508
+ )
509
+ else:
510
+ logger.debug(
511
+ "Model endpoint creation task name not provided",
512
+ )
513
+
464
514
 
465
515
  class _ModelLogPusher:
466
516
  def __init__(self, model: V2ModelServer, context, output_stream=None):
@@ -1,4 +1,4 @@
1
1
  {
2
- "git_commit": "b434e35c26bb66407a60bedddb7a9af71141902b",
3
- "version": "1.8.0-rc45"
2
+ "git_commit": "78aff2128b074d4d18751c0cbd2078e14929fa17",
3
+ "version": "1.8.0-rc46"
4
4
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mlrun
3
- Version: 1.8.0rc45
3
+ Version: 1.8.0rc46
4
4
  Summary: Tracking and config of machine learning runs
5
5
  Home-page: https://github.com/mlrun/mlrun
6
6
  Author: Yaron Haviv
@@ -73,10 +73,10 @@ mlrun/common/schemas/serving.py,sha256=81ZxlDHP1fm9VPmXZGkjZj2n6cVRmqEN478hsmvv5
73
73
  mlrun/common/schemas/tag.py,sha256=HRZi5QZ4vVGaCr2AMk9eJgcNiAIXmH4YDc8a4fvF770,893
74
74
  mlrun/common/schemas/workflow.py,sha256=6u9niXfXpV-_c2rZL97gFIdAnOfM5WK-OCbrM5Kk34s,2108
75
75
  mlrun/common/schemas/model_monitoring/__init__.py,sha256=SxHG-GIdcTEuFxpKzkUdT9zKaU5Xqz9qF1uCwXvZ2z8,1709
76
- mlrun/common/schemas/model_monitoring/constants.py,sha256=BWGNxOUoXMcR2epGck_UId1C33bbmxhLwgQYktIrPxE,12462
76
+ mlrun/common/schemas/model_monitoring/constants.py,sha256=wbNe_n5wX98gD1XQ6jmt97Jh59S9GsE54UBPZl9Pg20,12570
77
77
  mlrun/common/schemas/model_monitoring/grafana.py,sha256=THQlLfPBevBksta8p5OaIsBaJtsNSXexLvHrDxOaVns,2095
78
78
  mlrun/common/schemas/model_monitoring/model_endpoints.py,sha256=O0i-pvzcXJVgf9E_tNcudDTa1xLaJchzPGfZZ8MNdD4,11482
79
- mlrun/data_types/__init__.py,sha256=unRo9GGwCmj0hBKBRsXJ2P4BzpQaddlQTvIrVQaKluI,984
79
+ mlrun/data_types/__init__.py,sha256=wdxGS1PTnaKXiNZ7PYGxxo86OifHH7NYoArIjDJksLA,1054
80
80
  mlrun/data_types/data_types.py,sha256=0_oKLC6-sXL2_nnaDMP_HSXB3fD1nJAG4J2Jq6sGNNw,4998
81
81
  mlrun/data_types/infer.py,sha256=Ogp3rsENVkjU0GDaGa9J1vjGrvMxgzwbSEuG51nt61E,6477
82
82
  mlrun/data_types/spark.py,sha256=4fPpqjFCYeFgK_yHhUNM4rT-1Gw9YiXazyjTK7TtbTI,9626
@@ -100,7 +100,7 @@ mlrun/datastore/spark_udf.py,sha256=NnnB3DZxZb-rqpRy7b-NC7QWXuuqFn3XkBDc86tU4mQ,
100
100
  mlrun/datastore/spark_utils.py,sha256=_AsVoU5Ix_-W7Gyq8io8V-2GTk0m8THJNDP3WGGaWJY,2865
101
101
  mlrun/datastore/store_resources.py,sha256=PFOMrZ6KH6hBOb0PiO-cHx_kv0UpHu5P2t8_mrR-lS4,6842
102
102
  mlrun/datastore/storeytargets.py,sha256=g5zAdizdFkcESoVGxbKWC11ZiXFgM77UL4642G32JaU,6459
103
- mlrun/datastore/targets.py,sha256=eBp2ECV1Wk6D1X7kYhaneiVfsGW_TyKkaKdBXYgvWVM,81018
103
+ mlrun/datastore/targets.py,sha256=k6IU7XPOYm9jJi5foINfO4NH3NvuXcwYB_B7Rt7V-cg,81195
104
104
  mlrun/datastore/utils.py,sha256=CbKbDI6CdFRCqyAXe-jykVvN_GH6R0JkxIQFAogR2GA,10604
105
105
  mlrun/datastore/v3io.py,sha256=QSYBORRLcJTeM9mt0EaWzyLcdmzrPkqrF7k5uLTam5U,8209
106
106
  mlrun/datastore/vectorstore.py,sha256=k-yom5gfw20hnVG0Rg7aBEehuXwvAloZwn0cx0VGals,11708
@@ -218,23 +218,23 @@ mlrun/launcher/factory.py,sha256=RW7mfzEFi8fR0M-4W1JQg1iq3_muUU6OTqT_3l4Ubrk,233
218
218
  mlrun/launcher/local.py,sha256=775HY-8S9LFUX5ubGXrLO0N1lVh8bn-DHFmNYuNqQPA,11451
219
219
  mlrun/launcher/remote.py,sha256=rLJW4UAnUT5iUb4BsGBOAV3K4R29a0X4lFtRkVKlyYU,7709
220
220
  mlrun/model_monitoring/__init__.py,sha256=ELy7njEtZnz09Dc6PGZSFFEGtnwI15bJNWM3Pj4_YIs,753
221
- mlrun/model_monitoring/api.py,sha256=nkNlBq_X12tGgs4rbVutzq-ce9P49zAyg_hvffwmz7I,27544
222
- mlrun/model_monitoring/controller.py,sha256=kRKoI47YMAhL8YdqD5D7n8fX4vqcX4uWDl6gi5WVnqA,31698
221
+ mlrun/model_monitoring/api.py,sha256=Dk5uEAk8HTU00vUwyDPUFqSOgkT9z4gxwaDXbtj-4-U,27737
222
+ mlrun/model_monitoring/controller.py,sha256=mbKonkixNgeXT1BRNKQi6i-bk0pquwWgNclQHAjRvcA,33862
223
223
  mlrun/model_monitoring/features_drift_table.py,sha256=c6GpKtpOJbuT1u5uMWDL_S-6N4YPOmlktWMqPme3KFY,25308
224
- mlrun/model_monitoring/helpers.py,sha256=Q4vcc7x41lCJdFQIE8UFPY0WIQ8a-4tSGhziMA4ib4w,22003
224
+ mlrun/model_monitoring/helpers.py,sha256=Cc1TGsqoiuey_XDkwOeaOVV2OnXCryZnBEGGlI8Z524,22620
225
225
  mlrun/model_monitoring/stream_processing.py,sha256=4M0H4txMlsC2Q5iKTPp992KWoNPAJjPHj9rqWhXbl8w,33321
226
226
  mlrun/model_monitoring/tracking_policy.py,sha256=PBIGrUYWrwcE5gwXupBIVzOb0QRRwPJsgQm_yLGQxB4,5595
227
227
  mlrun/model_monitoring/writer.py,sha256=ibbhvfSHb8Reqlb7RGFEAUNM4iTyK1gk8-2m46mP6VM,8428
228
228
  mlrun/model_monitoring/applications/__init__.py,sha256=xDBxkBjl-whHSG_4t1mLkxiypLH-fzn8TmAW9Mjo2uI,759
229
229
  mlrun/model_monitoring/applications/_application_steps.py,sha256=PxULZznKW66Oq-fKaraOAbsTuGnV0zgXh6_91wX3KUo,8367
230
230
  mlrun/model_monitoring/applications/base.py,sha256=7XL12idItWkoE3CJ_48F6cwVx5pJH3bgfG92hb8LcN8,24872
231
- mlrun/model_monitoring/applications/context.py,sha256=Wou9lviSETjEqyMoIAi0Ko58luRkx0uy3ZDUVyRheNA,16144
231
+ mlrun/model_monitoring/applications/context.py,sha256=DKUDOfN4iY5wpOMjfsarx4pVN9A1sORyu7y2EEKEvMs,16964
232
232
  mlrun/model_monitoring/applications/histogram_data_drift.py,sha256=09t0tfC35W0SeJA3fzN29pJiB6G-V_8GlcvULVq6H9Q,15179
233
233
  mlrun/model_monitoring/applications/results.py,sha256=_qmj6TWT0SR2bi7gUyRKBU418eGgGoLW2_hTJ7S-ock,5782
234
234
  mlrun/model_monitoring/applications/evidently/__init__.py,sha256=-DqdPnBSrjZhFvKOu_Ie3MiFvlur9sPTZpZ1u0_1AE8,690
235
235
  mlrun/model_monitoring/applications/evidently/base.py,sha256=C8402vQJH7jmY-i49DnYjy6p6dETWex4Tdi8ylFLecA,5097
236
236
  mlrun/model_monitoring/db/__init__.py,sha256=r47xPGZpIfMuv8J3PQCZTSqVPMhUta4sSJCZFKcS7FM,644
237
- mlrun/model_monitoring/db/_schedules.py,sha256=AKyCJBAt0opNE3K3pg2TjCoD_afk1LKw5TY88rLQ2VA,6097
237
+ mlrun/model_monitoring/db/_schedules.py,sha256=RWn4wtKsIXg668gMLpxO9I8GlkxvPSaA5y7w-wFDcgE,9048
238
238
  mlrun/model_monitoring/db/_stats.py,sha256=VVMWLMqG3Us3ozBkLaokJF22Ewv8WKmVE1-OvS_g9vA,6943
239
239
  mlrun/model_monitoring/db/tsdb/__init__.py,sha256=4S86V_Ot_skE16SLkw0WwsaAUB0ECH6SoJdp-TIu6s8,4645
240
240
  mlrun/model_monitoring/db/tsdb/base.py,sha256=55lZfKmAWPW_Zi8DJhGib6euYhRhNxEpj528_rfh9Ww,26894
@@ -270,12 +270,12 @@ mlrun/platforms/iguazio.py,sha256=6VBTq8eQ3mzT96tzjYhAtcMQ2VjF4x8LpIPW5DAcX2Q,13
270
270
  mlrun/projects/__init__.py,sha256=0Krf0WIKfnZa71WthYOg0SoaTodGg3sV_hK3f_OlTPI,1220
271
271
  mlrun/projects/operations.py,sha256=TzPbTYBgmYrjxTKP_wOtBJYFFFwDCQtaVvF1Snr0TfM,20029
272
272
  mlrun/projects/pipelines.py,sha256=wud7ezeEmhIJvfYE_wzQbA4ygEfGXHtbOtoOpan6poY,48556
273
- mlrun/projects/project.py,sha256=H-mjTHqV_T9PhfCfGQRNm1R-7cM-7boS1Jd1TN9SyYo,235337
273
+ mlrun/projects/project.py,sha256=Jmr7l9VwFGBItS50DElEN6PuDE6OPPvXn1j16kb1z80,235333
274
274
  mlrun/runtimes/__init__.py,sha256=J9Sy2HiyMlztNv6VUurMzF5H2XzttNil8nRsWDsqLyg,8923
275
275
  mlrun/runtimes/base.py,sha256=EL14Kmc1vWEjnBPJwLj5hHC6CtRAQHJLmohCD3sFEHo,37855
276
276
  mlrun/runtimes/daskjob.py,sha256=JwuGvOiPsxEDHHMMUS4Oie4hLlYYIZwihAl6DjroTY0,19521
277
277
  mlrun/runtimes/funcdoc.py,sha256=zRFHrJsV8rhDLJwoUhcfZ7Cs0j-tQ76DxwUqdXV_Wyc,9810
278
- mlrun/runtimes/function_reference.py,sha256=CLvRY-wXX9qhI9YEzSl0VWt8piH_-5FQYQ8ObUYLLDc,4911
278
+ mlrun/runtimes/function_reference.py,sha256=fnMKUEieKgy4JyVLhFpDtr6JvKgOaQP8F_K2H3-Pk9U,5030
279
279
  mlrun/runtimes/generators.py,sha256=X8NDlCEPveDDPOHtOGcSpbl3pAVM3DP7fuPj5xVhxEY,7290
280
280
  mlrun/runtimes/kubejob.py,sha256=K-nR3J0-S3Em6Ez-JD0BxHczobQhC4m0829HLdSwX8g,8797
281
281
  mlrun/runtimes/local.py,sha256=yedo3R1c46cB1mX7aOz8zORXswQPvX86U-_fYxXoqTY,22717
@@ -294,7 +294,7 @@ mlrun/runtimes/nuclio/__init__.py,sha256=gx1kizzKv8pGT5TNloN1js1hdbxqDw3rM90sLVY
294
294
  mlrun/runtimes/nuclio/api_gateway.py,sha256=vH9ClKVP4Mb24rvA67xPuAvAhX-gAv6vVtjVxyplhdc,26969
295
295
  mlrun/runtimes/nuclio/function.py,sha256=j_gKYhaGfJjr_mVBdUcnSgXcXOHJrKHtUMpmOu8TII8,52979
296
296
  mlrun/runtimes/nuclio/nuclio.py,sha256=sLK8KdGO1LbftlL3HqPZlFOFTAAuxJACZCVl1c0Ha6E,2942
297
- mlrun/runtimes/nuclio/serving.py,sha256=1QPza0oG63bt3Bpib2VGhDcW3PNEjjsBUzIYBhiYR0s,32666
297
+ mlrun/runtimes/nuclio/serving.py,sha256=qetAyl-nfn8SWp7KyNgRtMNUVcX_q75SY9dLZP0uH6o,33365
298
298
  mlrun/runtimes/nuclio/application/__init__.py,sha256=rRs5vasy_G9IyoTpYIjYDafGoL6ifFBKgBtsXn31Atw,614
299
299
  mlrun/runtimes/nuclio/application/application.py,sha256=VPX-ruYQJ7-7yd5c2sWdF4U5JCGSS3kYjUfOgev6l_Y,29186
300
300
  mlrun/runtimes/nuclio/application/reverse_proxy.go,sha256=lEHH74vr2PridIHp1Jkc_NjkrWb5b6zawRrNxHQhwGU,2913
@@ -309,7 +309,7 @@ mlrun/serving/serving_wrapper.py,sha256=R670-S6PX_d5ER6jiHtRvacuPyFzQH0mEf2K0sBI
309
309
  mlrun/serving/states.py,sha256=Kst2N7R5SaTKYMYB8re9wTlhQwEDgkG61-4JtROKlNI,72803
310
310
  mlrun/serving/utils.py,sha256=k2EIYDWHUGkE-IBI6T0UNT32fw-KySsccIJM_LObI00,4171
311
311
  mlrun/serving/v1_serving.py,sha256=c6J_MtpE-Tqu00-6r4eJOCO6rUasHDal9W2eBIcrl50,11853
312
- mlrun/serving/v2_serving.py,sha256=nMxaFcc7vzjIYDqZMRGkMcvm2VxwnTCKocoYLByQdbw,23121
312
+ mlrun/serving/v2_serving.py,sha256=b3C5Utv2_AOPrH_hPi3NarjNbAK3kRoeIfqMU4qNuUo,25362
313
313
  mlrun/track/__init__.py,sha256=yVXbT52fXvGKRlc_ByHqIVt7-9L3DRE634RSeQwgXtU,665
314
314
  mlrun/track/tracker.py,sha256=CyTU6Qd3_5GGEJ_hpocOj71wvV65EuFYUjaYEUKAL6Q,3575
315
315
  mlrun/track/tracker_manager.py,sha256=IYBl99I62IC6VCCmG1yt6JoHNOQXa53C4DURJ2sWgio,5726
@@ -340,11 +340,11 @@ mlrun/utils/notifications/notification/mail.py,sha256=ZyJ3eqd8simxffQmXzqd3bgbAq
340
340
  mlrun/utils/notifications/notification/slack.py,sha256=eQvmctTh6wIG5xVOesLLV9S1-UUCu5UEQ9JIJOor3ts,7183
341
341
  mlrun/utils/notifications/notification/webhook.py,sha256=NeyIMSBojjjTJaUHmPbxMByp34GxYkl1-16NqzU27fU,4943
342
342
  mlrun/utils/version/__init__.py,sha256=7kkrB7hEZ3cLXoWj1kPoDwo4MaswsI2JVOBpbKgPAgc,614
343
- mlrun/utils/version/version.json,sha256=M386FKHwnC4eHpp3xG7fFtQEQllorzsiu9pDs5U8888,89
343
+ mlrun/utils/version/version.json,sha256=BdVuIuX64LHL8ZydQn4fu4jqZorhEvAHUQZ0YBVTWr8,89
344
344
  mlrun/utils/version/version.py,sha256=eEW0tqIAkU9Xifxv8Z9_qsYnNhn3YH7NRAfM-pPLt1g,1878
345
- mlrun-1.8.0rc45.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
346
- mlrun-1.8.0rc45.dist-info/METADATA,sha256=Hrx8b3n6ywe0Ttl7L2PHZSvuIXzm9ELWceKlutJ-7jk,26008
347
- mlrun-1.8.0rc45.dist-info/WHEEL,sha256=1tXe9gY0PYatrMPMDd6jXqjfpz_B-Wqm32CPfRC58XU,91
348
- mlrun-1.8.0rc45.dist-info/entry_points.txt,sha256=1Owd16eAclD5pfRCoJpYC2ZJSyGNTtUr0nCELMioMmU,46
349
- mlrun-1.8.0rc45.dist-info/top_level.txt,sha256=NObLzw3maSF9wVrgSeYBv-fgnHkAJ1kEkh12DLdd5KM,6
350
- mlrun-1.8.0rc45.dist-info/RECORD,,
345
+ mlrun-1.8.0rc46.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
346
+ mlrun-1.8.0rc46.dist-info/METADATA,sha256=-rHQLgCZW8i6RUoHhIzNHEJKGFNvyoI_kIgS59Ajzdk,26008
347
+ mlrun-1.8.0rc46.dist-info/WHEEL,sha256=DK49LOLCYiurdXXOXwGJm6U4DkHkg4lcxjhqwRa0CP4,91
348
+ mlrun-1.8.0rc46.dist-info/entry_points.txt,sha256=1Owd16eAclD5pfRCoJpYC2ZJSyGNTtUr0nCELMioMmU,46
349
+ mlrun-1.8.0rc46.dist-info/top_level.txt,sha256=NObLzw3maSF9wVrgSeYBv-fgnHkAJ1kEkh12DLdd5KM,6
350
+ mlrun-1.8.0rc46.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (77.0.3)
2
+ Generator: setuptools (78.0.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5