mlrun 1.8.0rc41__py3-none-any.whl → 1.8.0rc42__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

@@ -161,6 +161,7 @@ class ApplicationEvent:
161
161
  END_INFER_TIME = "end_infer_time"
162
162
  ENDPOINT_ID = "endpoint_id"
163
163
  ENDPOINT_NAME = "endpoint_name"
164
+ ENDPOINT_UPDATED = "endpoint_updated"
164
165
 
165
166
 
166
167
  class WriterEvent(MonitoringStrEnum):
@@ -190,7 +191,13 @@ class ControllerEvent(MonitoringStrEnum):
190
191
  ENDPOINT_TYPE = "endpoint_type"
191
192
  ENDPOINT_POLICY = "endpoint_policy"
192
193
  # Note: currently under endpoint policy we will have a dictionary including the keys: "application_names"
193
- # and "base_period"
194
+ # "base_period", and "updated_endpoint" stand for when the MEP was updated
195
+
196
+
197
+ class ControllerEventEndpointPolicy(MonitoringStrEnum):
198
+ BASE_PERIOD = "base_period"
199
+ MONITORING_APPLICATIONS = "monitoring_applications"
200
+ ENDPOINT_UPDATED = "endpoint_updated"
194
201
 
195
202
 
196
203
  class ControllerEventKind(MonitoringStrEnum):
mlrun/datastore/base.py CHANGED
@@ -14,6 +14,7 @@
14
14
  import tempfile
15
15
  import urllib.parse
16
16
  from base64 import b64encode
17
+ from copy import copy
17
18
  from os import path, remove
18
19
  from typing import Optional, Union
19
20
  from urllib.parse import urlparse
@@ -703,7 +704,11 @@ class HttpStore(DataStore):
703
704
  raise ValueError("unimplemented")
704
705
 
705
706
  def get(self, key, size=None, offset=0):
706
- data = self._http_get(self.url + self._join(key), self._headers, self.auth)
707
+ headers = self._headers
708
+ if urlparse(self.url).hostname == "api.github.com":
709
+ headers = copy(self._headers)
710
+ headers["Accept"] = headers.get("Accept", "application/vnd.github.raw")
711
+ data = self._http_get(self.url + self._join(key), headers, self.auth)
707
712
  if offset:
708
713
  data = data[offset:]
709
714
  if size:
@@ -714,7 +719,7 @@ class HttpStore(DataStore):
714
719
  token = self._get_secret_or_env("HTTPS_AUTH_TOKEN")
715
720
  if token:
716
721
  self._https_auth_token = token
717
- self._headers.setdefault("Authorization", f"token {token}")
722
+ self._headers.setdefault("Authorization", f"Bearer {token}")
718
723
 
719
724
  def _validate_https_token(self):
720
725
  if self._https_auth_token and self._schema in ["http"]:
@@ -477,7 +477,7 @@ class DatastoreProfileHdfs(DatastoreProfile):
477
477
  return f"webhdfs://{self.host}:{self.http_port}{subpath}"
478
478
 
479
479
 
480
- class TDEngineDatastoreProfile(DatastoreProfile):
480
+ class DatastoreProfileTDEngine(DatastoreProfile):
481
481
  """
482
482
  A profile that holds the required parameters for a TDEngine database, with the websocket scheme.
483
483
  https://docs.tdengine.com/developer-guide/connecting-to-tdengine/#websocket-connection
@@ -496,7 +496,7 @@ class TDEngineDatastoreProfile(DatastoreProfile):
496
496
  return f"{self.type}://{self.user}:{self.password}@{self.host}:{self.port}"
497
497
 
498
498
  @classmethod
499
- def from_dsn(cls, dsn: str, profile_name: str) -> "TDEngineDatastoreProfile":
499
+ def from_dsn(cls, dsn: str, profile_name: str) -> "DatastoreProfileTDEngine":
500
500
  """
501
501
  Construct a TDEngine profile from DSN (connection string) and a name for the profile.
502
502
 
@@ -525,7 +525,7 @@ _DATASTORE_TYPE_TO_PROFILE_CLASS: dict[str, type[DatastoreProfile]] = {
525
525
  "gcs": DatastoreProfileGCS,
526
526
  "az": DatastoreProfileAzureBlob,
527
527
  "hdfs": DatastoreProfileHdfs,
528
- "taosws": TDEngineDatastoreProfile,
528
+ "taosws": DatastoreProfileTDEngine,
529
529
  "config": ConfigProfile,
530
530
  }
531
531
 
mlrun/datastore/s3.py CHANGED
@@ -165,6 +165,7 @@ class S3Store(DataStore):
165
165
  key=access_key_id,
166
166
  secret=secret,
167
167
  token=token,
168
+ use_listings_cache=False,
168
169
  )
169
170
 
170
171
  if endpoint_url:
@@ -23,7 +23,7 @@ from mlrun.datastore.base import DataStore
23
23
  from mlrun.datastore.datastore_profile import (
24
24
  DatastoreProfileKafkaSource,
25
25
  DatastoreProfileKafkaTarget,
26
- TDEngineDatastoreProfile,
26
+ DatastoreProfileTDEngine,
27
27
  datastore_profile_read,
28
28
  )
29
29
 
@@ -53,10 +53,10 @@ class TDEngineStoreyTarget(storey.TDEngineTarget):
53
53
  def __init__(self, *args, url: str, **kwargs):
54
54
  if url.startswith("ds://"):
55
55
  datastore_profile = datastore_profile_read(url)
56
- if not isinstance(datastore_profile, TDEngineDatastoreProfile):
56
+ if not isinstance(datastore_profile, DatastoreProfileTDEngine):
57
57
  raise ValueError(
58
58
  f"Unexpected datastore profile type:{datastore_profile.type}."
59
- "Only TDEngineDatastoreProfile is supported"
59
+ "Only DatastoreProfileTDEngine is supported"
60
60
  )
61
61
  url = datastore_profile.dsn()
62
62
  super().__init__(*args, url=url, **kwargs)
@@ -1708,6 +1708,11 @@ class KafkaTarget(BaseStoreTarget):
1708
1708
  if not path:
1709
1709
  raise mlrun.errors.MLRunInvalidArgumentError("KafkaTarget requires a path")
1710
1710
 
1711
+ # Filter attributes to keep only Kafka-related parameters
1712
+ # This removes any non-Kafka parameters inherited from BaseStoreTarget
1713
+ attributes = mlrun.datastore.utils.KafkaParameters().valid_entries_only(
1714
+ self.attributes
1715
+ )
1711
1716
  graph.add_step(
1712
1717
  name=self.name or "KafkaTarget",
1713
1718
  after=after,
@@ -1715,7 +1720,7 @@ class KafkaTarget(BaseStoreTarget):
1715
1720
  class_name="mlrun.datastore.storeytargets.KafkaStoreyTarget",
1716
1721
  columns=column_list,
1717
1722
  path=path,
1718
- attributes=self.attributes,
1723
+ attributes=attributes,
1719
1724
  )
1720
1725
 
1721
1726
  def purge(self):
mlrun/datastore/utils.py CHANGED
@@ -225,9 +225,11 @@ def validate_additional_filters(additional_filters):
225
225
 
226
226
 
227
227
  class KafkaParameters:
228
- def __init__(self, kwargs: dict):
228
+ def __init__(self, kwargs: typing.Optional[dict] = None):
229
229
  import kafka
230
230
 
231
+ if kwargs is None:
232
+ kwargs = {}
231
233
  self._kafka = kafka
232
234
  self._kwargs = kwargs
233
235
  self._client_configs = {
@@ -245,17 +247,18 @@ class KafkaParameters:
245
247
  "sasl": "",
246
248
  "worker_allocation_mode": "",
247
249
  }
248
- self._validate_keys()
249
-
250
- def _validate_keys(self) -> None:
251
- reference_dicts = (
250
+ self._reference_dicts = (
252
251
  self._custom_attributes,
253
252
  self._kafka.KafkaAdminClient.DEFAULT_CONFIG,
254
253
  self._kafka.KafkaProducer.DEFAULT_CONFIG,
255
254
  self._kafka.KafkaConsumer.DEFAULT_CONFIG,
256
255
  )
256
+
257
+ self._validate_keys()
258
+
259
+ def _validate_keys(self) -> None:
257
260
  for key in self._kwargs:
258
- if all(key not in d for d in reference_dicts):
261
+ if all(key not in d for d in self._reference_dicts):
259
262
  raise ValueError(
260
263
  f"Key '{key}' not found in any of the Kafka reference dictionaries"
261
264
  )
@@ -295,3 +298,10 @@ class KafkaParameters:
295
298
  res["password"] = pwd
296
299
  res["mechanism"] = self._kwargs.get("sasl_mechanism", "PLAIN")
297
300
  return res
301
+
302
+ def valid_entries_only(self, input_dict: dict) -> dict:
303
+ valid_keys = set()
304
+ for ref_dict in self._reference_dicts:
305
+ valid_keys.update(ref_dict.keys())
306
+ # Return a new dictionary with only valid keys
307
+ return {k: v for k, v in input_dict.items() if k in valid_keys}
@@ -12,8 +12,11 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
+ import collections
15
16
  import json
16
17
  import traceback
18
+ from collections import OrderedDict
19
+ from datetime import datetime
17
20
  from typing import Any, Optional, Union
18
21
 
19
22
  import mlrun.common.schemas
@@ -105,6 +108,8 @@ class _PushToMonitoringWriter(StepToDict):
105
108
 
106
109
 
107
110
  class _PrepareMonitoringEvent(StepToDict):
111
+ MAX_MODEL_ENDPOINTS: int = 1500
112
+
108
113
  def __init__(self, context: GraphContext, application_name: str) -> None:
109
114
  """
110
115
  Class for preparing the application event for the application step.
@@ -114,7 +119,10 @@ class _PrepareMonitoringEvent(StepToDict):
114
119
  self.graph_context = context
115
120
  _ = self.graph_context.project_obj # Ensure project exists
116
121
  self.application_name = application_name
117
- self.model_endpoints: dict[str, mlrun.common.schemas.ModelEndpoint] = {}
122
+ self.model_endpoints: OrderedDict[str, mlrun.common.schemas.ModelEndpoint] = (
123
+ collections.OrderedDict()
124
+ )
125
+ self.feature_sets: dict[str, mlrun.common.schemas.FeatureSet] = {}
118
126
 
119
127
  def do(self, event: dict[str, Any]) -> MonitoringApplicationContext:
120
128
  """
@@ -123,16 +131,48 @@ class _PrepareMonitoringEvent(StepToDict):
123
131
  :param event: Application event.
124
132
  :return: Application context.
125
133
  """
134
+ endpoint_id = event.get(mm_constants.ApplicationEvent.ENDPOINT_ID)
135
+ endpoint_updated = datetime.fromisoformat(
136
+ event.get(mm_constants.ApplicationEvent.ENDPOINT_UPDATED)
137
+ )
138
+ if (
139
+ endpoint_id in self.model_endpoints
140
+ and endpoint_updated != self.model_endpoints[endpoint_id].metadata.updated
141
+ ):
142
+ logger.debug(
143
+ "Updated endpoint removing endpoint from cash",
144
+ new_updated=endpoint_updated.isoformat(),
145
+ old_updated=self.model_endpoints[
146
+ endpoint_id
147
+ ].metadata.updated.isoformat(),
148
+ )
149
+ self.model_endpoints.pop(endpoint_id)
150
+
126
151
  application_context = MonitoringApplicationContext._from_graph_ctx(
127
152
  application_name=self.application_name,
128
153
  event=event,
129
154
  model_endpoint_dict=self.model_endpoints,
130
155
  graph_context=self.graph_context,
156
+ feature_sets_dict=self.feature_sets,
131
157
  )
132
158
 
133
159
  self.model_endpoints.setdefault(
134
160
  application_context.endpoint_id, application_context.model_endpoint
135
161
  )
162
+ self.feature_sets.setdefault(
163
+ application_context.endpoint_id, application_context.feature_set
164
+ )
165
+ # every used endpoint goes to first location allowing to pop last used:
166
+ self.model_endpoints.move_to_end(application_context.endpoint_id, last=False)
167
+ if len(self.model_endpoints) > self.MAX_MODEL_ENDPOINTS:
168
+ removed_endpoint_id, _ = self.model_endpoints.popitem(
169
+ last=True
170
+ ) # Removing the LRU endpoint
171
+ self.feature_sets.pop(removed_endpoint_id, None)
172
+ logger.debug(
173
+ "Exceeded maximum number of model endpoints removing the LRU from cash",
174
+ endpoint_id=removed_endpoint_id,
175
+ )
136
176
 
137
177
  return application_context
138
178
 
@@ -28,7 +28,7 @@ import mlrun.serving
28
28
  import mlrun.utils
29
29
  from mlrun.artifacts import Artifact, DatasetArtifact, ModelArtifact, get_model
30
30
  from mlrun.common.model_monitoring.helpers import FeatureStats
31
- from mlrun.common.schemas import ModelEndpoint
31
+ from mlrun.common.schemas import FeatureSet, ModelEndpoint
32
32
  from mlrun.model_monitoring.helpers import (
33
33
  calculate_inputs_statistics,
34
34
  )
@@ -59,6 +59,7 @@ class MonitoringApplicationContext:
59
59
  model_endpoint_dict: Optional[dict[str, ModelEndpoint]] = None,
60
60
  sample_df: Optional[pd.DataFrame] = None,
61
61
  feature_stats: Optional[FeatureStats] = None,
62
+ feature_sets_dict: Optional[dict[str, FeatureSet]] = None,
62
63
  ) -> None:
63
64
  """
64
65
  The :code:`MonitoringApplicationContext` object holds all the relevant information for the
@@ -77,6 +78,7 @@ class MonitoringApplicationContext:
77
78
  :param end_infer_time: (pd.Timestamp) End time of the monitoring schedule.
78
79
  :param latest_request: (pd.Timestamp) Timestamp of the latest request on this endpoint_id.
79
80
  :param endpoint_id: (str) ID of the monitored model endpoint
81
+ :param feature_set: (FeatureSet) the model endpoint feature set
80
82
  :param endpoint_name: (str) Name of the monitored model endpoint
81
83
  :param output_stream_uri: (str) URI of the output stream for results
82
84
  :param model_endpoint: (ModelEndpoint) The model endpoint object.
@@ -123,6 +125,9 @@ class MonitoringApplicationContext:
123
125
  self._model_endpoint: Optional[ModelEndpoint] = (
124
126
  model_endpoint_dict.get(self.endpoint_id) if model_endpoint_dict else None
125
127
  )
128
+ self._feature_set: Optional[FeatureSet] = (
129
+ feature_sets_dict.get(self.endpoint_id) if feature_sets_dict else None
130
+ )
126
131
 
127
132
  @classmethod
128
133
  def _from_ml_ctx(
@@ -165,6 +170,7 @@ class MonitoringApplicationContext:
165
170
  model_endpoint_dict: Optional[dict[str, ModelEndpoint]] = None,
166
171
  sample_df: Optional[pd.DataFrame] = None,
167
172
  feature_stats: Optional[FeatureStats] = None,
173
+ feature_sets_dict: Optional[dict[str, FeatureSet]] = None,
168
174
  ) -> "MonitoringApplicationContext":
169
175
  nuclio_logger = graph_context.logger
170
176
  artifacts_logger = graph_context.project_obj
@@ -183,6 +189,7 @@ class MonitoringApplicationContext:
183
189
  artifacts_logger=artifacts_logger,
184
190
  sample_df=sample_df,
185
191
  feature_stats=feature_stats,
192
+ feature_sets_dict=feature_sets_dict,
186
193
  )
187
194
 
188
195
  def _get_default_labels(self) -> dict[str, str]:
@@ -201,9 +208,7 @@ class MonitoringApplicationContext:
201
208
  @property
202
209
  def sample_df(self) -> pd.DataFrame:
203
210
  if self._sample_df is None:
204
- feature_set = fstore.get_feature_set(
205
- self.model_endpoint.spec.monitoring_feature_set_uri
206
- )
211
+ feature_set = self.feature_set
207
212
  features = [f"{feature_set.metadata.name}.*"]
208
213
  vector = fstore.FeatureVector(
209
214
  name=f"{self.endpoint_id}_vector",
@@ -224,6 +229,14 @@ class MonitoringApplicationContext:
224
229
  @property
225
230
  def model_endpoint(self) -> ModelEndpoint:
226
231
  if not self._model_endpoint:
232
+ if self.endpoint_name is None or self.endpoint_id is None:
233
+ raise mlrun.errors.MLRunValueError(
234
+ "You have NOT provided the model endpoint's name and ID: "
235
+ f"`endpoint_name`={self.endpoint_name} and `endpoint_id`={self.endpoint_id}, "
236
+ "but you have tried to access `monitoring_context.model_endpoint` "
237
+ "directly or indirectly in your application. You can either provide them, "
238
+ "or adapt the application's logic to not access the model endpoint."
239
+ )
227
240
  self._model_endpoint = mlrun.db.get_run_db().get_model_endpoint(
228
241
  name=self.endpoint_name,
229
242
  project=self.project_name,
@@ -232,6 +245,14 @@ class MonitoringApplicationContext:
232
245
  )
233
246
  return self._model_endpoint
234
247
 
248
+ @property
249
+ def feature_set(self) -> FeatureSet:
250
+ if not self._feature_set and self.model_endpoint:
251
+ self._feature_set = fstore.get_feature_set(
252
+ self.model_endpoint.spec.monitoring_feature_set_uri
253
+ )
254
+ return self._feature_set
255
+
235
256
  @property
236
257
  def feature_stats(self) -> FeatureStats:
237
258
  if not self._feature_stats:
@@ -32,6 +32,7 @@ import mlrun.model_monitoring.helpers
32
32
  from mlrun.common.schemas import EndpointType
33
33
  from mlrun.common.schemas.model_monitoring.constants import (
34
34
  ControllerEvent,
35
+ ControllerEventEndpointPolicy,
35
36
  ControllerEventKind,
36
37
  )
37
38
  from mlrun.errors import err_to_str
@@ -40,6 +41,7 @@ from mlrun.model_monitoring.helpers import batch_dict2timedelta
40
41
  from mlrun.utils import datetime_now, logger
41
42
 
42
43
  _SECONDS_IN_DAY = int(datetime.timedelta(days=1).total_seconds())
44
+ _SECONDS_IN_MINUTE = 60
43
45
 
44
46
 
45
47
  class _Interval(NamedTuple):
@@ -241,6 +243,8 @@ class MonitoringApplicationController:
241
243
  Note that the MonitoringApplicationController object requires access keys along with valid project configurations.
242
244
  """
243
245
 
246
+ _MAX_OPEN_WINDOWS_ALLOWED = 5
247
+
244
248
  def __init__(self) -> None:
245
249
  """Initialize Monitoring Application Controller"""
246
250
  self.project = cast(str, mlrun.mlconf.default_project)
@@ -264,10 +268,24 @@ class MonitoringApplicationController:
264
268
  access_key = mlrun.mlconf.get_v3io_access_key()
265
269
  return access_key
266
270
 
267
- @staticmethod
268
271
  def _should_monitor_endpoint(
269
- endpoint: mlrun.common.schemas.ModelEndpoint, application_names: set
272
+ self,
273
+ endpoint: mlrun.common.schemas.ModelEndpoint,
274
+ application_names: set,
275
+ base_period_minutes: int,
270
276
  ) -> bool:
277
+ """
278
+ checks if there is a need to monitor the given endpoint, we should monitor endpoint if it stands in the
279
+ next conditions:
280
+ 1. monitoring_mode is enabled
281
+ 2. first request exists
282
+ 3. last request exists
283
+ 4. endpoint_type is not ROUTER
284
+ if the four above conditions apply we require one of the three conditions to monitor:
285
+ 1. never monitored the one of the endpoint applications meaning min_last_analyzed is None
286
+ 2. last request has a higher timestamp than the min_last_analyzed timestamp
287
+ 3. We didn't analyze one of the application for over than _MAX_OPEN_WINDOWS_ALLOWED windows
288
+ """
271
289
  if (
272
290
  # Is the model endpoint monitored?
273
291
  endpoint.status.monitoring_mode == mm_constants.ModelMonitoringMode.enabled
@@ -282,12 +300,16 @@ class MonitoringApplicationController:
282
300
  project=endpoint.metadata.project,
283
301
  endpoint_id=endpoint.metadata.uid,
284
302
  ) as batch_window_generator:
303
+ base_period_seconds = base_period_minutes * _SECONDS_IN_MINUTE
285
304
  if application_names != batch_window_generator.get_application_list():
286
305
  return True
287
306
  elif (
288
307
  not batch_window_generator.get_min_last_analyzed()
289
308
  or batch_window_generator.get_min_last_analyzed()
290
309
  <= int(endpoint.status.last_request.timestamp())
310
+ or mlrun.utils.datetime_now().timestamp()
311
+ - batch_window_generator.get_min_last_analyzed()
312
+ >= self._MAX_OPEN_WINDOWS_ALLOWED * base_period_seconds
291
313
  ):
292
314
  return True
293
315
  else:
@@ -351,7 +373,7 @@ class MonitoringApplicationController:
351
373
  endpoint_id = event[ControllerEvent.ENDPOINT_ID]
352
374
  endpoint_name = event[ControllerEvent.ENDPOINT_NAME]
353
375
  applications_names = event[ControllerEvent.ENDPOINT_POLICY][
354
- "monitoring_applications"
376
+ ControllerEventEndpointPolicy.MONITORING_APPLICATIONS
355
377
  ]
356
378
 
357
379
  not_batch_endpoint = (
@@ -410,8 +432,13 @@ class MonitoringApplicationController:
410
432
  project=project_name,
411
433
  applications_names=[application],
412
434
  model_monitoring_access_key=self.model_monitoring_access_key,
435
+ endpoint_updated=event[ControllerEvent.ENDPOINT_POLICY][
436
+ ControllerEventEndpointPolicy.ENDPOINT_UPDATED
437
+ ],
413
438
  )
414
- base_period = event[ControllerEvent.ENDPOINT_POLICY]["base_period"]
439
+ base_period = event[ControllerEvent.ENDPOINT_POLICY][
440
+ ControllerEventEndpointPolicy.BASE_PERIOD
441
+ ]
415
442
  current_time = mlrun.utils.datetime_now()
416
443
  if (
417
444
  current_time.timestamp()
@@ -463,6 +490,7 @@ class MonitoringApplicationController:
463
490
  project: str,
464
491
  applications_names: list[str],
465
492
  model_monitoring_access_key: str,
493
+ endpoint_updated: str,
466
494
  ):
467
495
  """
468
496
  Pushes data to multiple stream applications.
@@ -473,7 +501,7 @@ class MonitoringApplicationController:
473
501
  :param project: mlrun Project name.
474
502
  :param applications_names: List of application names to which data will be pushed.
475
503
  :param model_monitoring_access_key: Access key to apply the model monitoring process.
476
-
504
+ :param endpoint_updated: str isoformet for the timestamp the model endpoint was updated
477
505
  """
478
506
  data = {
479
507
  mm_constants.ApplicationEvent.START_INFER_TIME: start_infer_time.isoformat(
@@ -484,6 +512,7 @@ class MonitoringApplicationController:
484
512
  ),
485
513
  mm_constants.ApplicationEvent.ENDPOINT_ID: endpoint_id,
486
514
  mm_constants.ApplicationEvent.ENDPOINT_NAME: endpoint_name,
515
+ mm_constants.ApplicationEvent.ENDPOINT_UPDATED: endpoint_updated,
487
516
  }
488
517
  for app_name in applications_names:
489
518
  data.update({mm_constants.ApplicationEvent.APPLICATION_NAME: app_name})
@@ -536,8 +565,8 @@ class MonitoringApplicationController:
536
565
  logger.info("No monitoring functions found", project=self.project)
537
566
  return
538
567
  policy = {
539
- "monitoring_applications": applications_names,
540
- "base_period": int(
568
+ ControllerEventEndpointPolicy.MONITORING_APPLICATIONS: applications_names,
569
+ ControllerEventEndpointPolicy.BASE_PERIOD: int(
541
570
  batch_dict2timedelta(
542
571
  json.loads(
543
572
  cast(
@@ -546,7 +575,7 @@ class MonitoringApplicationController:
546
575
  )
547
576
  )
548
577
  ).total_seconds()
549
- // 60
578
+ // _SECONDS_IN_MINUTE
550
579
  ),
551
580
  }
552
581
  with concurrent.futures.ThreadPoolExecutor(
@@ -585,7 +614,9 @@ class MonitoringApplicationController:
585
614
  v3io_access_key: str,
586
615
  ) -> None:
587
616
  if MonitoringApplicationController._should_monitor_endpoint(
588
- endpoint, set(applications_names)
617
+ endpoint,
618
+ set(applications_names),
619
+ policy.get(ControllerEventEndpointPolicy.BASE_PERIOD, 10),
589
620
  ):
590
621
  logger.info(
591
622
  "Regular event is being pushed to controller stream for model endpoint",
@@ -601,6 +632,9 @@ class MonitoringApplicationController:
601
632
  feature_set_uri=endpoint.spec.monitoring_feature_set_uri,
602
633
  endpoint_policy=json.dumps(policy),
603
634
  )
635
+ policy[ControllerEventEndpointPolicy.ENDPOINT_UPDATED] = (
636
+ endpoint.metadata.updated.isoformat()
637
+ )
604
638
  MonitoringApplicationController.push_to_controller_stream(
605
639
  kind=mm_constants.ControllerEventKind.REGULAR_EVENT,
606
640
  project=endpoint.metadata.project,
@@ -92,7 +92,7 @@ def get_tsdb_connector(
92
92
  if isinstance(profile, mlrun.datastore.datastore_profile.DatastoreProfileV3io):
93
93
  tsdb_connector_type = mlrun.common.schemas.model_monitoring.TSDBTarget.V3IO_TSDB
94
94
  elif isinstance(
95
- profile, mlrun.datastore.datastore_profile.TDEngineDatastoreProfile
95
+ profile, mlrun.datastore.datastore_profile.DatastoreProfileTDEngine
96
96
  ):
97
97
  tsdb_connector_type = mlrun.common.schemas.model_monitoring.TSDBTarget.TDEngine
98
98
  else:
@@ -11,7 +11,6 @@
11
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
- import asyncio
15
14
  import math
16
15
  from datetime import datetime, timedelta, timezone
17
16
  from io import StringIO
@@ -150,24 +149,32 @@ class V3IOTSDBConnector(TSDBConnector):
150
149
 
151
150
  def create_tables(self) -> None:
152
151
  """
153
- Create the tables using the TSDB connector. The tables are being created in the V3IO TSDB and include:
152
+ Create the tables using the TSDB connector. These are the tables that are stored in the V3IO TSDB:
154
153
  - app_results: a detailed result that includes status, kind, extra data, etc.
155
154
  - metrics: a basic key value that represents a single numeric metric.
156
- Note that the predictions table is automatically created by the model monitoring stream pod.
155
+ - events: A statistics table that includes pre-aggregated metrics (such as average latency over the
156
+ last 5 minutes) and data samples
157
+ - predictions: a detailed prediction that includes latency, request timestamp, etc. This table also
158
+ includes pre-aggregated operations such as count and average on 1 minute granularity.
159
+ - errors: a detailed error that includes error desc, error type, etc.
160
+
157
161
  """
158
- application_tables = [
159
- mm_schemas.V3IOTSDBTables.APP_RESULTS,
160
- mm_schemas.V3IOTSDBTables.METRICS,
161
- ]
162
- for table_name in application_tables:
162
+
163
+ default_configurations = {
164
+ "backend": _TSDB_BE,
165
+ "if_exists": v3io_frames.IGNORE,
166
+ "rate": _TSDB_RATE,
167
+ }
168
+
169
+ for table_name in self.tables:
170
+ default_configurations["table"] = self.tables[table_name]
171
+ if table_name == mm_schemas.V3IOTSDBTables.PREDICTIONS:
172
+ default_configurations["aggregates"] = "count,avg"
173
+ default_configurations["aggregation_granularity"] = "1m"
174
+ elif table_name == mm_schemas.V3IOTSDBTables.EVENTS:
175
+ default_configurations["rate"] = "10/m"
163
176
  logger.info("Creating table in V3IO TSDB", table_name=table_name)
164
- table = self.tables[table_name]
165
- self.frames_client.create(
166
- backend=_TSDB_BE,
167
- table=table,
168
- if_exists=v3io_frames.IGNORE,
169
- rate=_TSDB_RATE,
170
- )
177
+ self.frames_client.create(**default_configurations)
171
178
 
172
179
  def apply_monitoring_stream_steps(
173
180
  self,
@@ -228,7 +235,6 @@ class V3IOTSDBConnector(TSDBConnector):
228
235
  name="tsdb_predictions",
229
236
  after="FilterNOP",
230
237
  path=f"{self.container}/{self.tables[mm_schemas.V3IOTSDBTables.PREDICTIONS]}",
231
- rate="1/s",
232
238
  time_col=mm_schemas.EventFieldType.TIMESTAMP,
233
239
  container=self.container,
234
240
  v3io_frames=self.v3io_framesd,
@@ -241,8 +247,6 @@ class V3IOTSDBConnector(TSDBConnector):
241
247
  index_cols=[
242
248
  mm_schemas.EventFieldType.ENDPOINT_ID,
243
249
  ],
244
- aggr="count,avg",
245
- aggr_granularity="1m",
246
250
  max_events=tsdb_batching_max_events,
247
251
  flush_after_seconds=tsdb_batching_timeout_secs,
248
252
  key=mm_schemas.EventFieldType.ENDPOINT_ID,
@@ -281,7 +285,6 @@ class V3IOTSDBConnector(TSDBConnector):
281
285
  name=name,
282
286
  after=after,
283
287
  path=f"{self.container}/{self.tables[mm_schemas.V3IOTSDBTables.EVENTS]}",
284
- rate="10/m",
285
288
  time_col=mm_schemas.EventFieldType.TIMESTAMP,
286
289
  container=self.container,
287
290
  v3io_frames=self.v3io_framesd,
@@ -345,7 +348,6 @@ class V3IOTSDBConnector(TSDBConnector):
345
348
  name="tsdb_error",
346
349
  after="error_extractor",
347
350
  path=f"{self.container}/{self.tables[mm_schemas.FileTargetKind.ERRORS]}",
348
- rate="1/s",
349
351
  time_col=mm_schemas.EventFieldType.TIMESTAMP,
350
352
  container=self.container,
351
353
  v3io_frames=self.v3io_framesd,
@@ -772,6 +774,9 @@ class V3IOTSDBConnector(TSDBConnector):
772
774
  end: Union[datetime, str],
773
775
  aggregation_window: Optional[str] = None,
774
776
  agg_funcs: Optional[list[str]] = None,
777
+ limit: Optional[
778
+ int
779
+ ] = None, # no effect, just for compatibility with the abstract method
775
780
  ) -> Union[
776
781
  mm_schemas.ModelEndpointMonitoringMetricNoData,
777
782
  mm_schemas.ModelEndpointMonitoringMetricValues,
@@ -825,6 +830,7 @@ class V3IOTSDBConnector(TSDBConnector):
825
830
  ) -> Union[pd.DataFrame, list[v3io_frames.client.RawFrame]]:
826
831
  filter_query = self._get_endpoint_filter(endpoint_id=endpoint_ids)
827
832
  start, end = self._get_start_end(start, end)
833
+
828
834
  res = self._get_records(
829
835
  table=mm_schemas.V3IOTSDBTables.PREDICTIONS,
830
836
  start=start,
@@ -1018,7 +1024,7 @@ class V3IOTSDBConnector(TSDBConnector):
1018
1024
  :param model_endpoint_objects: A list of `ModelEndpoint` objects that will
1019
1025
  be filled with the relevant basic metrics.
1020
1026
  :param project: The name of the project.
1021
- :param run_in_threadpool: A function that runs another function in a thread pool.
1027
+ :param run_in_threadpool: Has no effect.
1022
1028
 
1023
1029
  :return: A list of `ModelEndpointMonitoringMetric` objects.
1024
1030
  """
@@ -1030,35 +1036,10 @@ class V3IOTSDBConnector(TSDBConnector):
1030
1036
  uids.append(uid)
1031
1037
  model_endpoint_objects_by_uid[uid] = model_endpoint_object
1032
1038
 
1033
- coroutines = [
1034
- run_in_threadpool(
1035
- self.get_error_count,
1036
- endpoint_ids=uids,
1037
- get_raw=True,
1038
- ),
1039
- run_in_threadpool(
1040
- self.get_last_request,
1041
- endpoint_ids=uids,
1042
- get_raw=True,
1043
- ),
1044
- run_in_threadpool(
1045
- self.get_avg_latency,
1046
- endpoint_ids=uids,
1047
- get_raw=True,
1048
- ),
1049
- run_in_threadpool(
1050
- self.get_drift_status,
1051
- endpoint_ids=uids,
1052
- get_raw=True,
1053
- ),
1054
- ]
1055
-
1056
- (
1057
- error_count_res,
1058
- last_request_res,
1059
- avg_latency_res,
1060
- drift_status_res,
1061
- ) = await asyncio.gather(*coroutines)
1039
+ error_count_res = self.get_error_count(endpoint_ids=uids, get_raw=True)
1040
+ last_request_res = self.get_last_request(endpoint_ids=uids, get_raw=True)
1041
+ avg_latency_res = self.get_avg_latency(endpoint_ids=uids, get_raw=True)
1042
+ drift_status_res = self.get_drift_status(endpoint_ids=uids, get_raw=True)
1062
1043
 
1063
1044
  def add_metric(
1064
1045
  metric: str,
mlrun/projects/project.py CHANGED
@@ -3484,7 +3484,7 @@ class MlrunProject(ModelObj):
3484
3484
  "Remote repo is not defined, use .create_remote() + push()"
3485
3485
  )
3486
3486
 
3487
- if engine not in ["remote"] and not schedule:
3487
+ if (engine is None or not engine.startswith("remote")) and not schedule:
3488
3488
  # For remote/scheduled runs there is no need to sync functions as they can be loaded dynamically during run
3489
3489
  self.sync_functions(always=sync, silent=True)
3490
3490
  if not self.spec._function_objects:
@@ -3692,13 +3692,13 @@ class MlrunProject(ModelObj):
3692
3692
  import mlrun
3693
3693
  from mlrun.datastore.datastore_profile import (
3694
3694
  DatastoreProfileKafkaSource,
3695
- TDEngineDatastoreProfile,
3695
+ DatastoreProfileTDEngine,
3696
3696
  )
3697
3697
 
3698
3698
  project = mlrun.get_or_create_project("mm-infra-setup")
3699
3699
 
3700
3700
  # Create and register TSDB profile
3701
- tsdb_profile = TDEngineDatastoreProfile(
3701
+ tsdb_profile = DatastoreProfileTDEngine(
3702
3702
  name="my-tdengine",
3703
3703
  host="<tdengine-server-ip-address>",
3704
3704
  port=6041,
@@ -3750,7 +3750,7 @@ class MlrunProject(ModelObj):
3750
3750
  monitoring. The supported profiles are:
3751
3751
 
3752
3752
  * :py:class:`~mlrun.datastore.datastore_profile.DatastoreProfileV3io`
3753
- * :py:class:`~mlrun.datastore.datastore_profile.TDEngineDatastoreProfile`
3753
+ * :py:class:`~mlrun.datastore.datastore_profile.DatastoreProfileTDEngine`
3754
3754
 
3755
3755
  You need to register one of them, and pass the profile's name.
3756
3756
  :param stream_profile_name: The datastore profile name of the stream to be used in model monitoring.
mlrun/utils/helpers.py CHANGED
@@ -1371,6 +1371,18 @@ def has_timezone(timestamp):
1371
1371
  return False
1372
1372
 
1373
1373
 
1374
+ def format_datetime(dt: datetime) -> str:
1375
+ # If the datetime is naive
1376
+ if dt.tzinfo is None:
1377
+ dt = dt.replace(tzinfo=timezone.utc)
1378
+
1379
+ # TODO: Once Python 3.12 is the minimal version, use %:z to format the timezone offset with a colon
1380
+ formatted_time = dt.strftime("%Y-%m-%d %H:%M:%S.%f%z")
1381
+
1382
+ # For versions earlier than Python 3.12, we manually insert the colon in the timezone offset
1383
+ return formatted_time[:-2] + ":" + formatted_time[-2:]
1384
+
1385
+
1374
1386
  def as_list(element: Any) -> list[Any]:
1375
1387
  return element if isinstance(element, list) else [element]
1376
1388
 
@@ -1,4 +1,4 @@
1
1
  {
2
- "git_commit": "18a4adfd7aa39a10127ad6a505bce173beab12a4",
3
- "version": "1.8.0-rc41"
2
+ "git_commit": "3a6636b3c157e7b97066aafb130368828f360190",
3
+ "version": "1.8.0-rc42"
4
4
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: mlrun
3
- Version: 1.8.0rc41
3
+ Version: 1.8.0rc42
4
4
  Summary: Tracking and config of machine learning runs
5
5
  Home-page: https://github.com/mlrun/mlrun
6
6
  Author: Yaron Haviv
@@ -73,7 +73,7 @@ mlrun/common/schemas/serving.py,sha256=81ZxlDHP1fm9VPmXZGkjZj2n6cVRmqEN478hsmvv5
73
73
  mlrun/common/schemas/tag.py,sha256=HRZi5QZ4vVGaCr2AMk9eJgcNiAIXmH4YDc8a4fvF770,893
74
74
  mlrun/common/schemas/workflow.py,sha256=6u9niXfXpV-_c2rZL97gFIdAnOfM5WK-OCbrM5Kk34s,2108
75
75
  mlrun/common/schemas/model_monitoring/__init__.py,sha256=SxHG-GIdcTEuFxpKzkUdT9zKaU5Xqz9qF1uCwXvZ2z8,1709
76
- mlrun/common/schemas/model_monitoring/constants.py,sha256=yXSftuCZVSdaVkKFL3PTpSkjiFs1H2QCRN0NWCk8JLs,12143
76
+ mlrun/common/schemas/model_monitoring/constants.py,sha256=YhbybKq2y8wKG9XZGz9sO1SrhDRIF9bF2m5-mbzc6AI,12428
77
77
  mlrun/common/schemas/model_monitoring/grafana.py,sha256=THQlLfPBevBksta8p5OaIsBaJtsNSXexLvHrDxOaVns,2095
78
78
  mlrun/common/schemas/model_monitoring/model_endpoints.py,sha256=O0i-pvzcXJVgf9E_tNcudDTa1xLaJchzPGfZZ8MNdD4,11482
79
79
  mlrun/data_types/__init__.py,sha256=unRo9GGwCmj0hBKBRsXJ2P4BzpQaddlQTvIrVQaKluI,984
@@ -84,24 +84,24 @@ mlrun/data_types/to_pandas.py,sha256=KOy0FLXPJirsgH6szcC5BI6t70yVDCjuo6LmuYHNTuI
84
84
  mlrun/datastore/__init__.py,sha256=81ulmQnRk1ENvwYOdetxqsLnr2gYVtW-KsvF-tY1Jxk,5783
85
85
  mlrun/datastore/alibaba_oss.py,sha256=k-OHVe08HjMewlkpsT657CbOiVFAfSq9_EqhCE-k86s,4940
86
86
  mlrun/datastore/azure_blob.py,sha256=SzAcHYSXkm8Zpopz2Ea-rWVClH0URocUazcNK04S9W0,12776
87
- mlrun/datastore/base.py,sha256=9G1euAK6Bn3nRzjEQkUl8iEa7dukGdAcS1o6pb8OHUg,25996
87
+ mlrun/datastore/base.py,sha256=9R3lwB_L4hv5WW2q24WS62_KTh-wO4UG6pwzISZU6bM,26231
88
88
  mlrun/datastore/datastore.py,sha256=frUYYP4i8ZmnY8GNXSgN_3x_exRgRPfxrCtAGEUifEU,9478
89
- mlrun/datastore/datastore_profile.py,sha256=Ybh_75jnkwuyAXOIFc_Wm-3XxOpEovlQNZ2R0OfniBg,23860
89
+ mlrun/datastore/datastore_profile.py,sha256=RRpb5TfTDBOnZQGSr6Zlmi1QSPHRDssBlWGLIpNBHM0,23860
90
90
  mlrun/datastore/dbfs_store.py,sha256=QkDRzwFnvm7CgEg4NuGxes6tBgKDyhX0CiBUvK8c9pk,6568
91
91
  mlrun/datastore/filestore.py,sha256=OcykjzhbUAZ6_Cb9bGAXRL2ngsOpxXSb4rR0lyogZtM,3773
92
92
  mlrun/datastore/google_cloud_storage.py,sha256=MnToY6irdhBZ8Wcapqnr1Yq2724LAh2uPO7MAtdWfUY,8716
93
93
  mlrun/datastore/hdfs.py,sha256=NhxvPojQQDEm0xzB6RcvnD4uLZOxfHHKYWV4gwzG7D4,1928
94
94
  mlrun/datastore/inmem.py,sha256=IsM83nn-3CqmGdLzim7i9ZmJwG6ZGhBZGN6_hszWZnE,2951
95
95
  mlrun/datastore/redis.py,sha256=QeNMkSz3zQXiXZhFUZcEtViqqbUysGJditbqe5M-J48,5682
96
- mlrun/datastore/s3.py,sha256=GjJnQLrigCqU9_ukRWv1pKhxfUtrMGFBUp6fmpPXUCY,9224
96
+ mlrun/datastore/s3.py,sha256=lD4Fs69rwMeISovZzOxRdz_z9FuffysTdjJA9ybdnLA,9262
97
97
  mlrun/datastore/snowflake_utils.py,sha256=Wohvnlmq8j1d98RCaknll-iWdZZpSlCrKhUOEy0_-CA,1483
98
98
  mlrun/datastore/sources.py,sha256=KQp1nNN7TcaewFm3It03H1R28uzlWGZDDHJyqiT--vw,49062
99
99
  mlrun/datastore/spark_udf.py,sha256=NnnB3DZxZb-rqpRy7b-NC7QWXuuqFn3XkBDc86tU4mQ,1498
100
100
  mlrun/datastore/spark_utils.py,sha256=_AsVoU5Ix_-W7Gyq8io8V-2GTk0m8THJNDP3WGGaWJY,2865
101
101
  mlrun/datastore/store_resources.py,sha256=PFOMrZ6KH6hBOb0PiO-cHx_kv0UpHu5P2t8_mrR-lS4,6842
102
- mlrun/datastore/storeytargets.py,sha256=PnlEMc4iD_0zhZZYZtEISPoGIgbsEtZNUvZ7a7ALlXo,6459
103
- mlrun/datastore/targets.py,sha256=QiEK-mHmUt2qnS2yaBSSKgk8CKqsGU-JoQ9kHoW1bvE,80759
104
- mlrun/datastore/utils.py,sha256=L51jAKsIqnl5_Q_x4sI37TbGK2JCqWE9NiS5nWF3bts,10207
102
+ mlrun/datastore/storeytargets.py,sha256=g5zAdizdFkcESoVGxbKWC11ZiXFgM77UL4642G32JaU,6459
103
+ mlrun/datastore/targets.py,sha256=eBp2ECV1Wk6D1X7kYhaneiVfsGW_TyKkaKdBXYgvWVM,81018
104
+ mlrun/datastore/utils.py,sha256=CbKbDI6CdFRCqyAXe-jykVvN_GH6R0JkxIQFAogR2GA,10604
105
105
  mlrun/datastore/v3io.py,sha256=QSYBORRLcJTeM9mt0EaWzyLcdmzrPkqrF7k5uLTam5U,8209
106
106
  mlrun/datastore/vectorstore.py,sha256=k-yom5gfw20hnVG0Rg7aBEehuXwvAloZwn0cx0VGals,11708
107
107
  mlrun/datastore/wasbfs/__init__.py,sha256=s5Ul-0kAhYqFjKDR2X0O2vDGDbLQQduElb32Ev56Te4,1343
@@ -218,16 +218,16 @@ mlrun/launcher/local.py,sha256=775HY-8S9LFUX5ubGXrLO0N1lVh8bn-DHFmNYuNqQPA,11451
218
218
  mlrun/launcher/remote.py,sha256=rLJW4UAnUT5iUb4BsGBOAV3K4R29a0X4lFtRkVKlyYU,7709
219
219
  mlrun/model_monitoring/__init__.py,sha256=ELy7njEtZnz09Dc6PGZSFFEGtnwI15bJNWM3Pj4_YIs,753
220
220
  mlrun/model_monitoring/api.py,sha256=nkNlBq_X12tGgs4rbVutzq-ce9P49zAyg_hvffwmz7I,27544
221
- mlrun/model_monitoring/controller.py,sha256=Kml08GtbNhln_r9urJxxJxTtefcgXwdRQLu4v5-L3Bg,29814
221
+ mlrun/model_monitoring/controller.py,sha256=ulrIjjx5Gf8Uwtk9hmxSnkP5fIXkQgFG7PsVkncaUpE,31756
222
222
  mlrun/model_monitoring/features_drift_table.py,sha256=c6GpKtpOJbuT1u5uMWDL_S-6N4YPOmlktWMqPme3KFY,25308
223
223
  mlrun/model_monitoring/helpers.py,sha256=Q4vcc7x41lCJdFQIE8UFPY0WIQ8a-4tSGhziMA4ib4w,22003
224
224
  mlrun/model_monitoring/stream_processing.py,sha256=4M0H4txMlsC2Q5iKTPp992KWoNPAJjPHj9rqWhXbl8w,33321
225
225
  mlrun/model_monitoring/tracking_policy.py,sha256=PBIGrUYWrwcE5gwXupBIVzOb0QRRwPJsgQm_yLGQxB4,5595
226
226
  mlrun/model_monitoring/writer.py,sha256=vbL7bqTyNu8q4bNcebX72sUMybVDAoTWg-CXq4fov3Y,8429
227
227
  mlrun/model_monitoring/applications/__init__.py,sha256=xDBxkBjl-whHSG_4t1mLkxiypLH-fzn8TmAW9Mjo2uI,759
228
- mlrun/model_monitoring/applications/_application_steps.py,sha256=97taCEkfGx-QO-gD9uKnRF1PDIxQhY7sjPg85GxgIpA,6628
228
+ mlrun/model_monitoring/applications/_application_steps.py,sha256=PxULZznKW66Oq-fKaraOAbsTuGnV0zgXh6_91wX3KUo,8367
229
229
  mlrun/model_monitoring/applications/base.py,sha256=7XL12idItWkoE3CJ_48F6cwVx5pJH3bgfG92hb8LcN8,24872
230
- mlrun/model_monitoring/applications/context.py,sha256=xqbKS61iXE6jBekyW8zjo_E3lxe2D8VepuXG_BA5y2k,14931
230
+ mlrun/model_monitoring/applications/context.py,sha256=Wou9lviSETjEqyMoIAi0Ko58luRkx0uy3ZDUVyRheNA,16144
231
231
  mlrun/model_monitoring/applications/histogram_data_drift.py,sha256=RnrSRkNM5WYOjhiEIdkmYmUDGNnuRL8xtV-CpJ83r1U,15233
232
232
  mlrun/model_monitoring/applications/results.py,sha256=_qmj6TWT0SR2bi7gUyRKBU418eGgGoLW2_hTJ7S-ock,5782
233
233
  mlrun/model_monitoring/applications/evidently/__init__.py,sha256=-DqdPnBSrjZhFvKOu_Ie3MiFvlur9sPTZpZ1u0_1AE8,690
@@ -235,7 +235,7 @@ mlrun/model_monitoring/applications/evidently/base.py,sha256=C8402vQJH7jmY-i49Dn
235
235
  mlrun/model_monitoring/db/__init__.py,sha256=r47xPGZpIfMuv8J3PQCZTSqVPMhUta4sSJCZFKcS7FM,644
236
236
  mlrun/model_monitoring/db/_schedules.py,sha256=AKyCJBAt0opNE3K3pg2TjCoD_afk1LKw5TY88rLQ2VA,6097
237
237
  mlrun/model_monitoring/db/_stats.py,sha256=VVMWLMqG3Us3ozBkLaokJF22Ewv8WKmVE1-OvS_g9vA,6943
238
- mlrun/model_monitoring/db/tsdb/__init__.py,sha256=GRaQ9b4zNnbJIugRUwR2t5B1nNTtvPPf9RdNLTHRLKA,4645
238
+ mlrun/model_monitoring/db/tsdb/__init__.py,sha256=4S86V_Ot_skE16SLkw0WwsaAUB0ECH6SoJdp-TIu6s8,4645
239
239
  mlrun/model_monitoring/db/tsdb/base.py,sha256=ayWMWmpm35mDVTEP9AvU-P5_5rBAkBO1hULajkgt0Vw,26995
240
240
  mlrun/model_monitoring/db/tsdb/helpers.py,sha256=0oUXc4aUkYtP2SGP6jTb3uPPKImIUsVsrb9otX9a7O4,1189
241
241
  mlrun/model_monitoring/db/tsdb/tdengine/__init__.py,sha256=vgBdsKaXUURKqIf3M0y4sRatmSVA4CQiJs7J5dcVBkQ,620
@@ -244,7 +244,7 @@ mlrun/model_monitoring/db/tsdb/tdengine/stream_graph_steps.py,sha256=Uadj0UvAmln
244
244
  mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py,sha256=wjoxlNUFoHgMiG7yAMNsk1_1scca9EmMlM2Jp4Qv00c,37796
245
245
  mlrun/model_monitoring/db/tsdb/v3io/__init__.py,sha256=aL3bfmQsUQ-sbvKGdNihFj8gLCK3mSys0qDcXtYOwgc,616
246
246
  mlrun/model_monitoring/db/tsdb/v3io/stream_graph_steps.py,sha256=_-zo9relCDtjGgievxAcAP9gVN9nDWs8BzGtFwTjb9M,6284
247
- mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py,sha256=8gzw69Y9mKFOAW1-_o1gr0SXgBbY9QhOhWdMUR2MdqM,42633
247
+ mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py,sha256=oIGkdjp9PH-giOLzeRwzM3KIHCkSWOAgZN92DiWoWBQ,42635
248
248
  mlrun/model_monitoring/metrics/__init__.py,sha256=6CsTXAxeLbbf8yfCADTaxmiavqwrLEdYFJ-qc5kgDAY,569
249
249
  mlrun/model_monitoring/metrics/histogram_distance.py,sha256=E9_WIl2vd6qNvoHVHoFcnuQk3ekbFWOdi8aU7sHrfk4,4724
250
250
  mlrun/package/__init__.py,sha256=v7VDyK9kDOOuDvFo4oiGV2fx-vM1KL7fdN9pGLakhUQ,7008
@@ -269,7 +269,7 @@ mlrun/platforms/iguazio.py,sha256=6VBTq8eQ3mzT96tzjYhAtcMQ2VjF4x8LpIPW5DAcX2Q,13
269
269
  mlrun/projects/__init__.py,sha256=0Krf0WIKfnZa71WthYOg0SoaTodGg3sV_hK3f_OlTPI,1220
270
270
  mlrun/projects/operations.py,sha256=TzPbTYBgmYrjxTKP_wOtBJYFFFwDCQtaVvF1Snr0TfM,20029
271
271
  mlrun/projects/pipelines.py,sha256=wud7ezeEmhIJvfYE_wzQbA4ygEfGXHtbOtoOpan6poY,48556
272
- mlrun/projects/project.py,sha256=md2ieQ0gmsVaRj0urWax0aFld88cIF-9r0JIMgmXiC8,235111
272
+ mlrun/projects/project.py,sha256=Xf1dHTJ5zkxxh9e8ZyuMRz0WtfQp06Ympc1r3Edv-tk,235138
273
273
  mlrun/runtimes/__init__.py,sha256=J9Sy2HiyMlztNv6VUurMzF5H2XzttNil8nRsWDsqLyg,8923
274
274
  mlrun/runtimes/base.py,sha256=EL14Kmc1vWEjnBPJwLj5hHC6CtRAQHJLmohCD3sFEHo,37855
275
275
  mlrun/runtimes/daskjob.py,sha256=JwuGvOiPsxEDHHMMUS4Oie4hLlYYIZwihAl6DjroTY0,19521
@@ -320,7 +320,7 @@ mlrun/utils/azure_vault.py,sha256=IEFizrDGDbAaoWwDr1WoA88S_EZ0T--vjYtY-i0cvYQ,34
320
320
  mlrun/utils/clones.py,sha256=yXOeuLtgIiKZdmjeKK0Z_vIrH19ds5JuoJaCeDjhwOo,7516
321
321
  mlrun/utils/condition_evaluator.py,sha256=-nGfRmZzivn01rHTroiGY4rqEv8T1irMyhzxEei-sKc,1897
322
322
  mlrun/utils/db.py,sha256=blQgkWMfFH9lcN4sgJQcPQgEETz2Dl_zwbVA0SslpFg,2186
323
- mlrun/utils/helpers.py,sha256=ws-4ekIh2PvwO6n3-3_jm9b9RDAfSGqxC3IIiqqhnlk,73926
323
+ mlrun/utils/helpers.py,sha256=OCX51Gn7rrKPpvob_lUnyfs1lFhVHtlzSYKHXAqJJ0Q,74393
324
324
  mlrun/utils/http.py,sha256=t6FrXQstZm9xVVjxqIGiLzrwZNCR4CSienSOuVgNIcI,8706
325
325
  mlrun/utils/logger.py,sha256=RG0m1rx6gfkJ-2C1r_p41MMpPiaDYqaYM2lYHDlNZEU,14767
326
326
  mlrun/utils/regex.py,sha256=jbR7IiOp6OO0mg9Fl_cVZCpWb9fL9nTPONCUxCDNWXg,5201
@@ -339,11 +339,11 @@ mlrun/utils/notifications/notification/mail.py,sha256=ZyJ3eqd8simxffQmXzqd3bgbAq
339
339
  mlrun/utils/notifications/notification/slack.py,sha256=eQvmctTh6wIG5xVOesLLV9S1-UUCu5UEQ9JIJOor3ts,7183
340
340
  mlrun/utils/notifications/notification/webhook.py,sha256=NeyIMSBojjjTJaUHmPbxMByp34GxYkl1-16NqzU27fU,4943
341
341
  mlrun/utils/version/__init__.py,sha256=7kkrB7hEZ3cLXoWj1kPoDwo4MaswsI2JVOBpbKgPAgc,614
342
- mlrun/utils/version/version.json,sha256=ixZhSkY3aQXk61oqGZn9xgYm6Aj1zyZNZQOWMEPTPtI,89
342
+ mlrun/utils/version/version.json,sha256=65OWSZpvM9Y3MGeASH_-mtvz_E1p9viDfjuw4z9-CS4,89
343
343
  mlrun/utils/version/version.py,sha256=eEW0tqIAkU9Xifxv8Z9_qsYnNhn3YH7NRAfM-pPLt1g,1878
344
- mlrun-1.8.0rc41.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
345
- mlrun-1.8.0rc41.dist-info/METADATA,sha256=UVLiV1nBd9z2-TfLSnNVUgKVxFlFyrcA1rZiwyTNi8M,25986
346
- mlrun-1.8.0rc41.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
347
- mlrun-1.8.0rc41.dist-info/entry_points.txt,sha256=1Owd16eAclD5pfRCoJpYC2ZJSyGNTtUr0nCELMioMmU,46
348
- mlrun-1.8.0rc41.dist-info/top_level.txt,sha256=NObLzw3maSF9wVrgSeYBv-fgnHkAJ1kEkh12DLdd5KM,6
349
- mlrun-1.8.0rc41.dist-info/RECORD,,
344
+ mlrun-1.8.0rc42.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
345
+ mlrun-1.8.0rc42.dist-info/METADATA,sha256=J_7vhfXaVJiBb-XcTJ8hrSXsqqjizBM8HNBC5vNct2w,25986
346
+ mlrun-1.8.0rc42.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91
347
+ mlrun-1.8.0rc42.dist-info/entry_points.txt,sha256=1Owd16eAclD5pfRCoJpYC2ZJSyGNTtUr0nCELMioMmU,46
348
+ mlrun-1.8.0rc42.dist-info/top_level.txt,sha256=NObLzw3maSF9wVrgSeYBv-fgnHkAJ1kEkh12DLdd5KM,6
349
+ mlrun-1.8.0rc42.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.8.2)
2
+ Generator: setuptools (76.0.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5