mlrun 1.8.0rc46__py3-none-any.whl → 1.9.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

mlrun/alerts/alert.py CHANGED
@@ -112,7 +112,7 @@ class AlertConfig(ModelObj):
112
112
  complex trigger which is based on a prometheus alert
113
113
  :param criteria: When the alert will be triggered based on the specified number of events within the
114
114
  defined time period.
115
- :param reset_policy: When to clear the alert. May be "manual" for manual reset of the alert, or
115
+ :param reset_policy: When to clear the alert. Either "manual" for manual reset of the alert, or
116
116
  "auto" if the criteria contains a time period
117
117
  :param notifications: List of notifications to invoke once the alert is triggered
118
118
  :param entities: Entities that the event relates to. The entity object will contain fields that
mlrun/config.py CHANGED
@@ -64,7 +64,7 @@ default_config = {
64
64
  # url to nuclio dashboard api (can be with user & token, e.g. https://username:password@dashboard-url.com)
65
65
  "nuclio_dashboard_url": "",
66
66
  "nuclio_version": "",
67
- "default_nuclio_runtime": "python:3.9",
67
+ "default_nuclio_runtime": "python:3.11",
68
68
  "nest_asyncio_enabled": "", # enable import of nest_asyncio for corner cases with old jupyter, set "1"
69
69
  "ui_url": "", # remote/external mlrun UI url (for hyperlinks) (This is deprecated in favor of the ui block)
70
70
  "remote_host": "",
@@ -631,6 +631,8 @@ default_config = {
631
631
  "parquet_batching_max_events": 10_000,
632
632
  "parquet_batching_timeout_secs": timedelta(minutes=1).total_seconds(),
633
633
  "tdengine": {
634
+ "run_directly": True,
635
+ # timeout and retry are ignored when run_directly is set to True
634
636
  "timeout": 10,
635
637
  "retries": 1,
636
638
  },
@@ -87,7 +87,7 @@ def generate_target_run_id():
87
87
 
88
88
  def write_spark_dataframe_with_options(spark_options, df, mode, write_format=None):
89
89
  # TODO: Replace with just df.sparkSession when Spark 3.2 support is dropped
90
- spark_session = getattr(df, "sparkSession") or df.sql_ctx.sparkSession
90
+ spark_session = getattr(df, "sparkSession", None) or df.sql_ctx.sparkSession
91
91
  non_hadoop_spark_options = spark_session_update_hadoop_options(
92
92
  spark_session, spark_options
93
93
  )
@@ -160,7 +160,8 @@ def record_results(
160
160
  :param context: MLRun context. Note that the context is required generating the model endpoint.
161
161
  :param infer_results_df: DataFrame that will be stored under the model endpoint parquet target. Will be
162
162
  used for doing the drift analysis. Please make sure that the dataframe includes
163
- both feature names and label columns.
163
+ both feature names and label columns. If you are recording results for existing
164
+ model endpoint, the endpoint should be a batch endpoint.
164
165
  :param sample_set_statistics: Dictionary of sample set statistics that will be used as a reference data for
165
166
  the current model endpoint.
166
167
  :param monitoring_mode: If enabled, apply model monitoring features on the provided endpoint id. Enabled
@@ -221,23 +222,32 @@ def record_results(
221
222
  )
222
223
  logger.debug("Model endpoint", endpoint=model_endpoint)
223
224
 
224
- timestamp = datetime_now()
225
225
  if infer_results_df is not None:
226
- # Write the monitoring parquet to the relevant model endpoint context
227
- write_monitoring_df(
228
- feature_set_uri=model_endpoint.spec.monitoring_feature_set_uri,
229
- infer_datetime=timestamp,
230
- endpoint_id=model_endpoint.metadata.uid,
231
- infer_results_df=infer_results_df,
232
- )
226
+ if (
227
+ model_endpoint.metadata.endpoint_type
228
+ != mlrun.common.schemas.model_monitoring.EndpointType.BATCH_EP
229
+ ):
230
+ logger.warning(
231
+ "Inference results can be recorded only for batch endpoints. "
232
+ "Therefore the current results won't be monitored."
233
+ )
234
+ else:
235
+ timestamp = datetime_now()
236
+ # Write the monitoring parquet to the relevant model endpoint context
237
+ write_monitoring_df(
238
+ feature_set_uri=model_endpoint.spec.monitoring_feature_set_uri,
239
+ infer_datetime=timestamp,
240
+ endpoint_id=model_endpoint.metadata.uid,
241
+ infer_results_df=infer_results_df,
242
+ )
233
243
 
234
- # Update the last request time
235
- update_model_endpoint_last_request(
236
- project=project,
237
- model_endpoint=model_endpoint,
238
- current_request=timestamp,
239
- db=db,
240
- )
244
+ # Update the last request time
245
+ update_model_endpoint_last_request(
246
+ project=project,
247
+ model_endpoint=model_endpoint,
248
+ current_request=timestamp,
249
+ db=db,
250
+ )
241
251
 
242
252
  return model_endpoint
243
253
 
@@ -12,12 +12,15 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
+ import json
16
+ import posixpath
15
17
  import uuid
16
18
  import warnings
17
19
  from abc import ABC
18
20
 
19
21
  import pandas as pd
20
22
  import semver
23
+ from evidently.ui.storage.local.base import METADATA_PATH, FSLocation
21
24
 
22
25
  import mlrun.model_monitoring.applications.base as mm_base
23
26
  import mlrun.model_monitoring.applications.context as mm_context
@@ -81,12 +84,47 @@ class EvidentlyModelMonitoringApplicationBase(
81
84
  # TODO : more then one project (mep -> project)
82
85
  if not _HAS_EVIDENTLY:
83
86
  raise ModuleNotFoundError("Evidently is not installed - the app cannot run")
87
+ self._log_location(evidently_workspace_path)
84
88
  self.evidently_workspace = Workspace.create(evidently_workspace_path)
85
89
  self.evidently_project_id = evidently_project_id
86
90
  self.evidently_project = self.evidently_workspace.get_project(
87
91
  evidently_project_id
88
92
  )
89
93
 
94
+ @staticmethod
95
+ def _log_location(evidently_workspace_path):
96
+ # TODO remove function + usage after solving issue ML-9530
97
+ location = FSLocation(base_path=evidently_workspace_path)
98
+ location.invalidate_cache("")
99
+ paths = [p for p in location.listdir("") if location.isdir(p)]
100
+
101
+ for path in paths:
102
+ metadata_path = posixpath.join(path, METADATA_PATH)
103
+ full_path = posixpath.join(location.path, metadata_path)
104
+ print(f"evidently json issue, working on path: {full_path}")
105
+ try:
106
+ with location.open(metadata_path) as f:
107
+ content = json.load(f)
108
+ print(
109
+ f"evidently json issue, successful load path: {full_path}, content: {content}"
110
+ )
111
+ except FileNotFoundError:
112
+ print(f"evidently json issue, path not found: {full_path}")
113
+ continue
114
+ except json.decoder.JSONDecodeError as json_error:
115
+ print(
116
+ f"evidently json issue, path got json error, path:{full_path}, error: {json_error}"
117
+ )
118
+ print("evidently json issue, file content:")
119
+ with location.open(metadata_path) as f:
120
+ print(f.read())
121
+ continue
122
+ except Exception as error:
123
+ print(
124
+ f"evidently json issue, path got general error, path:{full_path}, error: {error}"
125
+ )
126
+ continue
127
+
90
128
  @staticmethod
91
129
  def log_evidently_object(
92
130
  monitoring_context: mm_context.MonitoringApplicationContext,
@@ -12,15 +12,17 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
+ import collections
15
16
  import concurrent.futures
16
17
  import datetime
17
18
  import json
18
19
  import os
19
20
  import traceback
21
+ from collections import OrderedDict
20
22
  from collections.abc import Iterator
21
23
  from contextlib import AbstractContextManager
22
24
  from types import TracebackType
23
- from typing import Any, NamedTuple, Optional, cast
25
+ from typing import Any, NamedTuple, Optional, Union, cast
24
26
 
25
27
  import nuclio_sdk
26
28
 
@@ -30,6 +32,7 @@ import mlrun.feature_store as fstore
30
32
  import mlrun.model_monitoring
31
33
  import mlrun.model_monitoring.db._schedules as schedules
32
34
  import mlrun.model_monitoring.helpers
35
+ import mlrun.platforms.iguazio
33
36
  from mlrun.common.schemas import EndpointType
34
37
  from mlrun.common.schemas.model_monitoring.constants import (
35
38
  ControllerEvent,
@@ -243,7 +246,7 @@ class MonitoringApplicationController:
243
246
  Note that the MonitoringApplicationController object requires access keys along with valid project configurations.
244
247
  """
245
248
 
246
- _MAX_OPEN_WINDOWS_ALLOWED = 5
249
+ _MAX_FEATURE_SET_PER_WORKER = 1000
247
250
 
248
251
  def __init__(self) -> None:
249
252
  """Initialize Monitoring Application Controller"""
@@ -259,6 +262,61 @@ class MonitoringApplicationController:
259
262
  mlrun.mlconf.artifact_path
260
263
  )
261
264
  self.storage_options = store.get_storage_options()
265
+ self._controller_stream: Optional[
266
+ Union[
267
+ mlrun.platforms.iguazio.OutputStream,
268
+ mlrun.platforms.iguazio.KafkaOutputStream,
269
+ ]
270
+ ] = None
271
+ self._model_monitoring_stream: Optional[
272
+ Union[
273
+ mlrun.platforms.iguazio.OutputStream,
274
+ mlrun.platforms.iguazio.KafkaOutputStream,
275
+ ]
276
+ ] = None
277
+ self.applications_streams: dict[
278
+ str,
279
+ Union[
280
+ mlrun.platforms.iguazio.OutputStream,
281
+ mlrun.platforms.iguazio.KafkaOutputStream,
282
+ ],
283
+ ] = {}
284
+ self.feature_sets: OrderedDict[str, mlrun.feature_store.FeatureSet] = (
285
+ collections.OrderedDict()
286
+ )
287
+ self.tsdb_connector = mlrun.model_monitoring.get_tsdb_connector(
288
+ project=self.project
289
+ )
290
+
291
+ @property
292
+ def controller_stream(
293
+ self,
294
+ ) -> Union[
295
+ mlrun.platforms.iguazio.OutputStream,
296
+ mlrun.platforms.iguazio.KafkaOutputStream,
297
+ ]:
298
+ if self._controller_stream is None:
299
+ self._controller_stream = mlrun.model_monitoring.helpers.get_output_stream(
300
+ project=self.project,
301
+ function_name=mm_constants.MonitoringFunctionNames.APPLICATION_CONTROLLER,
302
+ v3io_access_key=self.v3io_access_key,
303
+ )
304
+ return self._controller_stream
305
+
306
+ @property
307
+ def model_monitoring_stream(
308
+ self,
309
+ ) -> Union[
310
+ mlrun.platforms.iguazio.OutputStream,
311
+ mlrun.platforms.iguazio.KafkaOutputStream,
312
+ ]:
313
+ if self._model_monitoring_stream is None:
314
+ self._model_monitoring_stream = mlrun.model_monitoring.helpers.get_output_stream(
315
+ project=self.project,
316
+ function_name=mm_constants.MonitoringFunctionNames.APPLICATION_CONTROLLER,
317
+ v3io_access_key=self.v3io_access_key,
318
+ )
319
+ return self._model_monitoring_stream
262
320
 
263
321
  @staticmethod
264
322
  def _get_model_monitoring_access_key() -> Optional[str]:
@@ -422,9 +480,9 @@ class MonitoringApplicationController:
422
480
  ]
423
481
 
424
482
  not_batch_endpoint = (
425
- event[ControllerEvent.ENDPOINT_POLICY] != EndpointType.BATCH_EP
483
+ event[ControllerEvent.ENDPOINT_TYPE] != EndpointType.BATCH_EP
426
484
  )
427
- m_fs = fstore.get_feature_set(event[ControllerEvent.FEATURE_SET_URI])
485
+
428
486
  logger.info(
429
487
  "Starting analyzing for", timestamp=event[ControllerEvent.TIMESTAMP]
430
488
  )
@@ -449,13 +507,39 @@ class MonitoringApplicationController:
449
507
  first_request=first_request,
450
508
  last_request=last_stream_timestamp,
451
509
  ):
452
- df = m_fs.to_dataframe(
453
- start_time=start_infer_time,
454
- end_time=end_infer_time,
455
- time_column=mm_constants.EventFieldType.TIMESTAMP,
456
- storage_options=self.storage_options,
457
- )
458
- if len(df) == 0:
510
+ data_in_window = False
511
+ if not_batch_endpoint:
512
+ # Serving endpoint - get the relevant window data from the TSDB
513
+ prediction_metric = self.tsdb_connector.read_predictions(
514
+ start=start_infer_time,
515
+ end=end_infer_time,
516
+ endpoint_id=endpoint_id,
517
+ )
518
+ if prediction_metric.data:
519
+ data_in_window = True
520
+ else:
521
+ if endpoint_id not in self.feature_sets:
522
+ self.feature_sets[endpoint_id] = fstore.get_feature_set(
523
+ event[ControllerEvent.FEATURE_SET_URI]
524
+ )
525
+ self.feature_sets.move_to_end(endpoint_id, last=False)
526
+ if (
527
+ len(self.feature_sets)
528
+ > self._MAX_FEATURE_SET_PER_WORKER
529
+ ):
530
+ self.feature_sets.popitem(last=True)
531
+ m_fs = self.feature_sets.get(endpoint_id)
532
+
533
+ # Batch endpoint - get the relevant window data from the parquet target
534
+ df = m_fs.to_dataframe(
535
+ start_time=start_infer_time,
536
+ end_time=end_infer_time,
537
+ time_column=mm_constants.EventFieldType.TIMESTAMP,
538
+ storage_options=self.storage_options,
539
+ )
540
+ if len(df) > 0:
541
+ data_in_window = True
542
+ if not data_in_window:
459
543
  logger.info(
460
544
  "No data found for the given interval",
461
545
  start=start_infer_time,
@@ -528,8 +612,8 @@ class MonitoringApplicationController:
528
612
  endpoint_id=event[ControllerEvent.ENDPOINT_ID],
529
613
  )
530
614
 
531
- @staticmethod
532
615
  def _push_to_applications(
616
+ self,
533
617
  start_infer_time: datetime.datetime,
534
618
  end_infer_time: datetime.datetime,
535
619
  endpoint_id: str,
@@ -563,12 +647,15 @@ class MonitoringApplicationController:
563
647
  }
564
648
  for app_name in applications_names:
565
649
  data.update({mm_constants.ApplicationEvent.APPLICATION_NAME: app_name})
566
-
567
- app_stream = mlrun.model_monitoring.helpers.get_output_stream(
568
- project=project,
569
- function_name=app_name,
570
- v3io_access_key=model_monitoring_access_key,
571
- )
650
+ if app_name not in self.applications_streams:
651
+ self.applications_streams[app_name] = (
652
+ mlrun.model_monitoring.helpers.get_output_stream(
653
+ project=project,
654
+ function_name=app_name,
655
+ v3io_access_key=model_monitoring_access_key,
656
+ )
657
+ )
658
+ app_stream = self.applications_streams.get(app_name)
572
659
 
573
660
  logger.info(
574
661
  "Pushing data to application stream",
@@ -581,7 +668,6 @@ class MonitoringApplicationController:
581
668
  def push_regular_event_to_controller_stream(self) -> None:
582
669
  """
583
670
  pushes a regular event to the controller stream.
584
- :param event: the nuclio trigger event
585
671
  """
586
672
  logger.info("Starting monitoring controller chief")
587
673
  applications_names = []
@@ -637,7 +723,6 @@ class MonitoringApplicationController:
637
723
  endpoint,
638
724
  policy,
639
725
  set(applications_names),
640
- self.v3io_access_key,
641
726
  schedule_file,
642
727
  ): endpoint
643
728
  for endpoint in endpoints
@@ -662,7 +747,6 @@ class MonitoringApplicationController:
662
747
  endpoint: mlrun.common.schemas.ModelEndpoint,
663
748
  policy: dict,
664
749
  applications_names: set,
665
- v3io_access_key: str,
666
750
  schedule_file: schedules.ModelMonitoringSchedulesFileChief,
667
751
  ) -> None:
668
752
  if self._should_monitor_endpoint(
@@ -688,12 +772,11 @@ class MonitoringApplicationController:
688
772
  policy[ControllerEventEndpointPolicy.ENDPOINT_UPDATED] = (
689
773
  endpoint.metadata.updated.isoformat()
690
774
  )
691
- MonitoringApplicationController.push_to_controller_stream(
775
+ self.push_to_controller_stream(
692
776
  kind=mm_constants.ControllerEventKind.REGULAR_EVENT,
693
777
  project=endpoint.metadata.project,
694
778
  endpoint_id=endpoint.metadata.uid,
695
779
  endpoint_name=endpoint.metadata.name,
696
- stream_access_key=v3io_access_key,
697
780
  timestamp=endpoint.status.last_request.isoformat(
698
781
  sep=" ", timespec="microseconds"
699
782
  ),
@@ -705,13 +788,12 @@ class MonitoringApplicationController:
705
788
  endpoint_policy=policy,
706
789
  )
707
790
 
708
- @staticmethod
709
791
  def push_to_controller_stream(
792
+ self,
710
793
  kind: str,
711
794
  project: str,
712
795
  endpoint_id: str,
713
796
  endpoint_name: str,
714
- stream_access_key: str,
715
797
  timestamp: str,
716
798
  first_request: str,
717
799
  endpoint_type: int,
@@ -729,7 +811,6 @@ class MonitoringApplicationController:
729
811
  :param endpoint_name: the endpoint name string
730
812
  :param endpoint_type: Enum of the endpoint type
731
813
  :param feature_set_uri: the feature set uri string
732
- :param stream_access_key: access key to apply the model monitoring process.
733
814
  """
734
815
  event = {
735
816
  ControllerEvent.KIND.value: kind,
@@ -742,18 +823,13 @@ class MonitoringApplicationController:
742
823
  ControllerEvent.FEATURE_SET_URI.value: feature_set_uri,
743
824
  ControllerEvent.ENDPOINT_POLICY.value: endpoint_policy,
744
825
  }
745
- controller_stream = mlrun.model_monitoring.helpers.get_output_stream(
746
- project=project,
747
- function_name=mm_constants.MonitoringFunctionNames.APPLICATION_CONTROLLER,
748
- v3io_access_key=stream_access_key,
749
- )
750
826
  logger.info(
751
827
  "Pushing data to controller stream",
752
828
  event=event,
753
829
  endpoint_id=endpoint_id,
754
- controller_stream_type=str(type(controller_stream)),
830
+ controller_stream_type=str(type(self.controller_stream)),
755
831
  )
756
- controller_stream.push([event], partition_key=endpoint_id)
832
+ self.controller_stream.push([event], partition_key=endpoint_id)
757
833
 
758
834
  def _push_to_main_stream(self, event: dict, endpoint_id: str) -> None:
759
835
  """
@@ -761,18 +837,13 @@ class MonitoringApplicationController:
761
837
  :param event: event dictionary to push to stream
762
838
  :param endpoint_id: endpoint id string
763
839
  """
764
- mm_stream = mlrun.model_monitoring.helpers.get_output_stream(
765
- project=event.get(ControllerEvent.PROJECT),
766
- function_name=mm_constants.MonitoringFunctionNames.APPLICATION_CONTROLLER,
767
- v3io_access_key=self.v3io_access_key,
768
- )
769
840
  logger.info(
770
841
  "Pushing data to main stream, NOP event is been generated",
771
842
  event=json.dumps(event),
772
843
  endpoint_id=endpoint_id,
773
- mm_stream_type=str(type(mm_stream)),
844
+ mm_stream_type=str(type(self.model_monitoring_stream)),
774
845
  )
775
- mm_stream.push([event], partition_key=endpoint_id)
846
+ self.model_monitoring_stream.push([event], partition_key=endpoint_id)
776
847
 
777
848
 
778
849
  def handler(context: nuclio_sdk.Context, event: nuclio_sdk.Event) -> None:
@@ -55,6 +55,9 @@ class TDEngineConnector(TSDBConnector):
55
55
 
56
56
  self._init_super_tables()
57
57
 
58
+ self._run_directly = (
59
+ mlrun.mlconf.model_endpoint_monitoring.tdengine.run_directly
60
+ )
58
61
  self._timeout = mlrun.mlconf.model_endpoint_monitoring.tdengine.timeout
59
62
  self._retries = mlrun.mlconf.model_endpoint_monitoring.tdengine.retries
60
63
 
@@ -74,7 +77,9 @@ class TDEngineConnector(TSDBConnector):
74
77
  def _create_connection(self) -> TDEngineConnection:
75
78
  """Establish a connection to the TSDB server."""
76
79
  logger.debug("Creating a new connection to TDEngine", project=self.project)
77
- conn = TDEngineConnection(self._tdengine_connection_profile.dsn())
80
+ conn = TDEngineConnection(
81
+ self._tdengine_connection_profile.dsn(), run_directly=self._run_directly
82
+ )
78
83
  conn.prefix_statements = [f"USE {self.database}"]
79
84
 
80
85
  return conn
@@ -1090,9 +1090,9 @@ class V3IOTSDBConnector(TSDBConnector):
1090
1090
  Fetch basic metrics from V3IO TSDB and add them to MEP objects.
1091
1091
 
1092
1092
  :param model_endpoint_objects: A list of `ModelEndpoint` objects that will
1093
- be filled with the relevant basic metrics.
1093
+ be filled with the relevant basic metrics.
1094
1094
  :param project: The name of the project.
1095
- :param run_in_threadpool: Has no effect.
1095
+ :param run_in_threadpool: A function that runs another function in a thread pool.
1096
1096
 
1097
1097
  :return: A list of `ModelEndpointMonitoringMetric` objects.
1098
1098
  """
@@ -1104,9 +1104,15 @@ class V3IOTSDBConnector(TSDBConnector):
1104
1104
  uids.append(uid)
1105
1105
  model_endpoint_objects_by_uid[uid] = model_endpoint_object
1106
1106
 
1107
- error_count_res = self.get_error_count(endpoint_ids=uids, get_raw=True)
1108
- avg_latency_res = self.get_avg_latency(endpoint_ids=uids, get_raw=True)
1109
- drift_status_res = self.get_drift_status(endpoint_ids=uids, get_raw=True)
1107
+ error_count_res = await run_in_threadpool(
1108
+ self.get_error_count, endpoint_ids=uids, get_raw=True
1109
+ )
1110
+ avg_latency_res = await run_in_threadpool(
1111
+ self.get_avg_latency, endpoint_ids=uids, get_raw=True
1112
+ )
1113
+ drift_status_res = await run_in_threadpool(
1114
+ self.get_drift_status, endpoint_ids=uids, get_raw=True
1115
+ )
1110
1116
 
1111
1117
  def add_metric(
1112
1118
  metric: str,
@@ -432,58 +432,23 @@ def update_model_endpoint_last_request(
432
432
  :param current_request: current request time
433
433
  :param db: DB interface.
434
434
  """
435
- is_batch_endpoint = (
436
- model_endpoint.metadata.endpoint_type == mm_constants.EndpointType.BATCH_EP
437
- )
438
- if not is_batch_endpoint:
439
- logger.info(
440
- "Update model endpoint last request time (EP with serving)",
441
- project=project,
442
- endpoint_id=model_endpoint.metadata.uid,
443
- name=model_endpoint.metadata.name,
444
- function_name=model_endpoint.spec.function_name,
445
- last_request=model_endpoint.status.last_request,
446
- current_request=current_request,
447
- )
448
- db.patch_model_endpoint(
449
- project=project,
450
- endpoint_id=model_endpoint.metadata.uid,
451
- name=model_endpoint.metadata.name,
452
- attributes={mm_constants.EventFieldType.LAST_REQUEST: current_request},
453
- )
454
- else: # model endpoint without any serving function - close the window "manually"
455
- try:
456
- time_window = _get_monitoring_time_window_from_controller_run(project, db)
457
- except mlrun.errors.MLRunNotFoundError:
458
- logger.warn(
459
- "Not bumping model endpoint last request time - the monitoring controller isn't deployed yet.\n"
460
- "Call `project.enable_model_monitoring()` first."
461
- )
462
- return
463
435
 
464
- bumped_last_request = (
465
- current_request
466
- + time_window
467
- + datetime.timedelta(
468
- seconds=mlrun.mlconf.model_endpoint_monitoring.parquet_batching_timeout_secs
469
- )
470
- )
471
- logger.info(
472
- "Bumping model endpoint last request time (EP without serving)",
473
- project=project,
474
- endpoint_id=model_endpoint.metadata.uid,
475
- function_name=model_endpoint.spec.function_name,
476
- last_request=model_endpoint.status.last_request,
477
- current_request=current_request.isoformat(),
478
- bumped_last_request=bumped_last_request,
479
- )
480
- db.patch_model_endpoint(
481
- project=project,
482
- endpoint_id=model_endpoint.metadata.uid,
483
- name=model_endpoint.metadata.name,
484
- function_name=model_endpoint.spec.function_name,
485
- attributes={mm_constants.EventFieldType.LAST_REQUEST: bumped_last_request},
486
- )
436
+ logger.info(
437
+ "Update model endpoint last request time (EP with serving)",
438
+ project=project,
439
+ endpoint_id=model_endpoint.metadata.uid,
440
+ name=model_endpoint.metadata.name,
441
+ function_name=model_endpoint.spec.function_name,
442
+ last_request=model_endpoint.status.last_request,
443
+ current_request=current_request,
444
+ )
445
+ db.patch_model_endpoint(
446
+ project=project,
447
+ endpoint_id=model_endpoint.metadata.uid,
448
+ name=model_endpoint.metadata.name,
449
+ function_name=model_endpoint.spec.function_name,
450
+ attributes={mm_constants.EventFieldType.LAST_REQUEST: current_request},
451
+ )
487
452
 
488
453
 
489
454
  def calculate_inputs_statistics(
mlrun/projects/project.py CHANGED
@@ -2144,29 +2144,34 @@ class MlrunProject(ModelObj):
2144
2144
  reset_policy: mlrun.common.schemas.alert.ResetPolicy = mlrun.common.schemas.alert.ResetPolicy.AUTO,
2145
2145
  ) -> list[mlrun.alerts.alert.AlertConfig]:
2146
2146
  """
2147
- :param name: The name of the AlertConfig template. It will be combined with mep_id, app-name
2148
- and result name to generate a unique name.
2149
- :param summary: Summary of the alert, will be sent in the generated notifications
2150
- :param endpoints: The endpoints from which metrics will be retrieved to configure the alerts.
2151
- This `ModelEndpointList` object obtained via the `list_model_endpoints`
2152
- method or created manually using `ModelEndpoint` objects.
2153
- :param events: AlertTrigger event types (EventKind).
2154
- :param notifications: List of notifications to invoke once the alert is triggered
2155
- :param result_names: Optional. Filters the result names used to create the alert configuration,
2156
- constructed from the app and result_name regex.
2157
-
2158
- For example:
2159
- [`app1.result-*`, `*.result1`]
2160
- will match "mep_uid1.app1.result.result-1" and "mep_uid1.app2.result.result1".
2161
- A specific result_name (not a wildcard) will always create a new alert
2162
- config, regardless of whether the result name exists.
2163
- :param severity: Severity of the alert.
2164
- :param criteria: When the alert will be triggered based on the
2165
- specified number of events within the defined time period.
2166
- :param reset_policy: When to clear the alert. May be "manual" for manual reset of the alert,
2167
- or "auto" if the criteria contains a time period.
2168
- :returns: List of AlertConfig according to endpoints results,
2169
- filtered by result_names.
2147
+ Generate alert configurations based on specified model endpoints and result names, which can be defined
2148
+ explicitly or using regex patterns.
2149
+
2150
+ :param name: The name of the AlertConfig template. It will be combined with
2151
+ mep id, app name and result name to generate a unique name.
2152
+ :param summary: Summary of the alert, will be sent in the generated notifications
2153
+ :param endpoints: The endpoints from which metrics will be retrieved to configure
2154
+ the alerts.
2155
+ The ModelEndpointList object is obtained via the `list_model_endpoints`
2156
+ method or created manually using `ModelEndpoint` objects.
2157
+ :param events: AlertTrigger event types (EventKind).
2158
+ :param notifications: List of notifications to invoke once the alert is triggered
2159
+ :param result_names: Optional. Filters the result names used to create the alert
2160
+ configuration, constructed from the app and result_name regex.
2161
+
2162
+ For example:
2163
+ [`app1.result-*`, `*.result1`]
2164
+ will match "mep_uid1.app1.result.result-1" and
2165
+ "mep_uid1.app2.result.result1".
2166
+ A specific result_name (not a wildcard) will always create a new alert
2167
+ config, regardless of whether the result name exists.
2168
+ :param severity: Severity of the alert.
2169
+ :param criteria: The threshold for triggering the alert based on the
2170
+ specified number of events within the defined time period.
2171
+ :param reset_policy: When to clear the alert. Either "manual" for manual reset of the alert,
2172
+ or "auto" if the criteria contains a time period.
2173
+ :returns: List of AlertConfig according to endpoints results,
2174
+ filtered by result_names.
2170
2175
  """
2171
2176
  db = mlrun.db.get_run_db(secrets=self._secrets)
2172
2177
  matching_results = []
@@ -13,6 +13,7 @@
13
13
  # limitations under the License.
14
14
 
15
15
  import asyncio
16
+ import copy
16
17
  import json
17
18
  import typing
18
19
  import warnings
@@ -50,6 +51,19 @@ from mlrun.runtimes.utils import get_item_name, log_std
50
51
  from mlrun.utils import get_in, logger, update_in
51
52
  from mlrun_pipelines.common.ops import deploy_op
52
53
 
54
+ SENSITIVE_PATHS_IN_TRIGGER_CONFIG = {
55
+ "password",
56
+ "secret",
57
+ "attributes/password",
58
+ "attributes/accesskeyid",
59
+ "attributes/secretaccesskey",
60
+ "attributes/cacert",
61
+ "attributes/accesskey",
62
+ "attributes/accesscertificate",
63
+ "attributes/sasl/password",
64
+ "attributes/sasl/oauth/clientsecret",
65
+ }
66
+
53
67
 
54
68
  def validate_nuclio_version_compatibility(*min_versions):
55
69
  """
@@ -274,6 +288,37 @@ class RemoteRuntime(KubeResource):
274
288
  if self.metadata.tag:
275
289
  mlrun.utils.validate_tag_name(self.metadata.tag, "function.metadata.tag")
276
290
 
291
+ def mask_sensitive_data_in_config(self):
292
+ if not self.spec.config:
293
+ return {}
294
+
295
+ raw_config = copy.deepcopy(self.spec.config)
296
+
297
+ for key, value in self.spec.config.items():
298
+ if key.startswith("spec.triggers"):
299
+ trigger_name = key.split(".")[-1]
300
+
301
+ for path in SENSITIVE_PATHS_IN_TRIGGER_CONFIG:
302
+ # Handle nested keys
303
+ nested_keys = path.split("/")
304
+ target = value
305
+ for sub_key in nested_keys[:-1]:
306
+ target = target.get(sub_key, {})
307
+
308
+ last_key = nested_keys[-1]
309
+ if last_key in target:
310
+ sensitive_field = target[last_key]
311
+ if sensitive_field.startswith(
312
+ mlrun.model.Credentials.secret_reference_prefix
313
+ ):
314
+ # already masked
315
+ continue
316
+ target[last_key] = (
317
+ f"{mlrun.model.Credentials.secret_reference_prefix}/spec/triggers/{trigger_name}/{path}"
318
+ )
319
+
320
+ return raw_config
321
+
277
322
  def set_config(self, key, value):
278
323
  self.spec.config[key] = value
279
324
  return self
@@ -1230,6 +1275,9 @@ class RemoteRuntime(KubeResource):
1230
1275
  if remote_env.get("name") in credentials_env_var_names:
1231
1276
  new_env.append(remote_env)
1232
1277
 
1278
+ # update nuclio-specific credentials
1279
+ self.mask_sensitive_data_in_config()
1280
+
1233
1281
  self.spec.env = new_env
1234
1282
 
1235
1283
  def _set_as_mock(self, enable):
mlrun/serving/states.py CHANGED
@@ -363,15 +363,22 @@ class BaseStep(ModelObj):
363
363
  event: {"x": 5} , result_path="y" means the output of the step will be written
364
364
  to event["y"] resulting in {"x": 5, "y": <result>}
365
365
  :param model_endpoint_creation_strategy: Strategy for creating or updating the model endpoint:
366
- * **overwrite**:
367
- 1. If model endpoints with the same name exist, delete the `latest` one.
368
- 2. Create a new model endpoint entry and set it as `latest`.
369
- * **inplace** (default):
370
- 1. If model endpoints with the same name exist, update the `latest` entry.
371
- 2. Otherwise, create a new entry.
372
- * **archive**:
373
- 1. If model endpoints with the same name exist, preserve them.
374
- 2. Create a new model endpoint with the same name and set it to `latest`.
366
+
367
+ * **overwrite**:
368
+
369
+ 1. If model endpoints with the same name exist, delete the `latest` one.
370
+ 2. Create a new model endpoint entry and set it as `latest`.
371
+
372
+ * **inplace** (default):
373
+
374
+ 1. If model endpoints with the same name exist, update the `latest` entry.
375
+ 2. Otherwise, create a new entry.
376
+
377
+ * **archive**:
378
+
379
+ 1. If model endpoints with the same name exist, preserve them.
380
+ 2. Create a new model endpoint with the same name and set it to `latest`.
381
+
375
382
  :param class_args: class init arguments
376
383
  """
377
384
  if hasattr(self, "steps"):
@@ -810,15 +817,22 @@ class RouterStep(TaskStep):
810
817
  :param handler: class handler to invoke on run/event
811
818
  :param function: function this step should run in
812
819
  :param creation_strategy: Strategy for creating or updating the model endpoint:
813
- * **overwrite**:
814
- 1. If model endpoints with the same name exist, delete the `latest` one.
815
- 2. Create a new model endpoint entry and set it as `latest`.
816
- * **inplace** (default):
817
- 1. If model endpoints with the same name exist, update the `latest` entry.
818
- 2. Otherwise, create a new entry.
819
- * **archive**:
820
- 1. If model endpoints with the same name exist, preserve them.
821
- 2. Create a new model endpoint with the same name and set it to `latest`.
820
+
821
+ * **overwrite**:
822
+
823
+ 1. If model endpoints with the same name exist, delete the `latest` one.
824
+ 2. Create a new model endpoint entry and set it as `latest`.
825
+
826
+ * **inplace** (default):
827
+
828
+ 1. If model endpoints with the same name exist, update the `latest` entry.
829
+ 2. Otherwise, create a new entry.
830
+
831
+ * **archive**:
832
+
833
+ 1. If model endpoints with the same name exist, preserve them.
834
+ 2. Create a new model endpoint with the same name and set it to `latest`.
835
+
822
836
  """
823
837
 
824
838
  if len(self.routes.keys()) >= MAX_MODELS_PER_ROUTER and key not in self.routes:
@@ -1207,15 +1221,22 @@ class FlowStep(BaseStep):
1207
1221
  event: {"x": 5} , result_path="y" means the output of the step will be written
1208
1222
  to event["y"] resulting in {"x": 5, "y": <result>}
1209
1223
  :param model_endpoint_creation_strategy: Strategy for creating or updating the model endpoint:
1210
- * **overwrite**:
1211
- 1. If model endpoints with the same name exist, delete the `latest` one.
1212
- 2. Create a new model endpoint entry and set it as `latest`.
1213
- * **inplace** (default):
1214
- 1. If model endpoints with the same name exist, update the `latest` entry.
1215
- 2. Otherwise, create a new entry.
1216
- * **archive**:
1217
- 1. If model endpoints with the same name exist, preserve them.
1218
- 2. Create a new model endpoint with the same name and set it to `latest`.
1224
+
1225
+ * **overwrite**:
1226
+
1227
+ 1. If model endpoints with the same name exist, delete the `latest` one.
1228
+ 2. Create a new model endpoint entry and set it as `latest`.
1229
+
1230
+ * **inplace** (default):
1231
+
1232
+ 1. If model endpoints with the same name exist, update the `latest` entry.
1233
+ 2. Otherwise, create a new entry.
1234
+
1235
+ * **archive**:
1236
+
1237
+ 1. If model endpoints with the same name exist, preserve them.
1238
+ 2. Create a new model endpoint with the same name and set it to `latest`.
1239
+
1219
1240
  :param class_args: class init arguments
1220
1241
  """
1221
1242
 
mlrun/utils/helpers.py CHANGED
@@ -882,9 +882,12 @@ def enrich_image_url(
882
882
  image_url = image_url.strip()
883
883
  mlrun_version = config.images_tag or client_version or server_version
884
884
  tag = mlrun_version
885
- tag += resolve_image_tag_suffix(
886
- mlrun_version=mlrun_version, python_version=client_python_version
887
- )
885
+
886
+ # TODO: Remove condition when mlrun/mlrun-kfp image is also supported
887
+ if "mlrun-kfp" not in image_url:
888
+ tag += resolve_image_tag_suffix(
889
+ mlrun_version=mlrun_version, python_version=client_python_version
890
+ )
888
891
 
889
892
  # it's an mlrun image if the repository is mlrun
890
893
  is_mlrun_image = image_url.startswith("mlrun/") or "/mlrun/" in image_url
@@ -916,7 +919,7 @@ def resolve_image_tag_suffix(
916
919
  mlrun_version: Optional[str] = None, python_version: Optional[str] = None
917
920
  ) -> str:
918
921
  """
919
- resolves what suffix should be appended to the image tag
922
+ Resolves what suffix to be appended to the image tag
920
923
  :param mlrun_version: the mlrun version
921
924
  :param python_version: the requested python version
922
925
  :return: the suffix to append to the image tag
@@ -928,19 +931,19 @@ def resolve_image_tag_suffix(
928
931
  # mlrun version is higher than 1.3.0, but we can check the python version and if python version was passed it
929
932
  # means it 1.3.0-rc or higher, so we can add the suffix of the python version.
930
933
  if mlrun_version.startswith("0.0.0-") or "unstable" in mlrun_version:
931
- if python_version.startswith("3.7"):
932
- return "-py37"
934
+ if python_version.startswith("3.9"):
935
+ return "-py39"
933
936
  return ""
934
937
 
935
- # For mlrun 1.3.x and 1.4.x, we support mlrun runtimes images with both python 3.7 and 3.9 images.
936
- # While the python 3.9 images will continue to have no suffix, the python 3.7 images will have a '-py37' suffix.
937
- # Python 3.8 images will not be supported for mlrun 1.3.0, meaning that if the user has client with python 3.8
938
- # and mlrun 1.3.x then the image will be pulled without a suffix (which is the python 3.9 image).
938
+ # For mlrun 1.9.x and 1.10.x, we support mlrun runtimes images with both python 3.9 and 3.11 images.
939
+ # While the python 3.11 images will continue to have no suffix, the python 3.9 images will have a '-py39' suffix.
940
+ # Python 3.10 images are not supported in mlrun 1.9.0, meaning that if the user has client with python 3.10
941
+ # and mlrun 1.9.x then the image will be pulled without a suffix (which is the python 3.11 image).
939
942
  # using semver (x.y.z-X) to include rc versions as well
940
- if semver.VersionInfo.parse("1.5.0-X") > semver.VersionInfo.parse(
943
+ if semver.VersionInfo.parse("1.11.0-X") > semver.VersionInfo.parse(
941
944
  mlrun_version
942
- ) >= semver.VersionInfo.parse("1.3.0-X") and python_version.startswith("3.7"):
943
- return "-py37"
945
+ ) >= semver.VersionInfo.parse("1.9.0-X") and python_version.startswith("3.9"):
946
+ return "-py39"
944
947
  return ""
945
948
 
946
949
 
@@ -1371,13 +1374,16 @@ def has_timezone(timestamp):
1371
1374
  return False
1372
1375
 
1373
1376
 
1374
- def format_datetime(dt: datetime) -> str:
1377
+ def format_datetime(dt: datetime, fmt: Optional[str] = None) -> str:
1378
+ if dt is None:
1379
+ return ""
1380
+
1375
1381
  # If the datetime is naive
1376
1382
  if dt.tzinfo is None:
1377
1383
  dt = dt.replace(tzinfo=timezone.utc)
1378
1384
 
1379
1385
  # TODO: Once Python 3.12 is the minimal version, use %:z to format the timezone offset with a colon
1380
- formatted_time = dt.strftime("%Y-%m-%d %H:%M:%S.%f%z")
1386
+ formatted_time = dt.strftime(fmt or "%Y-%m-%d %H:%M:%S.%f%z")
1381
1387
 
1382
1388
  # For versions earlier than Python 3.12, we manually insert the colon in the timezone offset
1383
1389
  return formatted_time[:-2] + ":" + formatted_time[-2:]
@@ -1,4 +1,4 @@
1
1
  {
2
- "git_commit": "78aff2128b074d4d18751c0cbd2078e14929fa17",
3
- "version": "1.8.0-rc46"
2
+ "git_commit": "b08bde55bcb7fde423002ec145ace1e99e677955",
3
+ "version": "1.9.0-rc1"
4
4
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mlrun
3
- Version: 1.8.0rc46
3
+ Version: 1.9.0rc1
4
4
  Summary: Tracking and config of machine learning runs
5
5
  Home-page: https://github.com/mlrun/mlrun
6
6
  Author: Yaron Haviv
@@ -53,6 +53,7 @@ Requires-Dist: jinja2>=3.1.3,~=3.1
53
53
  Requires-Dist: orjson<4,>=3.9.15
54
54
  Requires-Dist: mlrun-pipelines-kfp-common~=0.3.12
55
55
  Requires-Dist: mlrun-pipelines-kfp-v1-8~=0.3.9; python_version < "3.11"
56
+ Requires-Dist: mlrun-pipelines-kfp-v2~=0.3.8; python_version >= "3.11"
56
57
  Requires-Dist: docstring_parser~=0.16
57
58
  Requires-Dist: aiosmtplib~=3.0
58
59
  Provides-Extra: s3
@@ -99,7 +100,7 @@ Requires-Dist: ossfs==2023.12.0; extra == "alibaba-oss"
99
100
  Requires-Dist: oss2==2.18.1; extra == "alibaba-oss"
100
101
  Provides-Extra: tdengine
101
102
  Requires-Dist: taos-ws-py==0.3.2; extra == "tdengine"
102
- Requires-Dist: taoswswrap~=0.3.2; extra == "tdengine"
103
+ Requires-Dist: taoswswrap~=0.3.4; extra == "tdengine"
103
104
  Provides-Extra: snowflake
104
105
  Requires-Dist: snowflake-connector-python~=3.7; extra == "snowflake"
105
106
  Provides-Extra: kfp18
@@ -152,7 +153,7 @@ Requires-Dist: s3fs<2024.7,>=2023.9.2; extra == "all"
152
153
  Requires-Dist: snowflake-connector-python~=3.7; extra == "all"
153
154
  Requires-Dist: sqlalchemy~=1.4; extra == "all"
154
155
  Requires-Dist: taos-ws-py==0.3.2; extra == "all"
155
- Requires-Dist: taoswswrap~=0.3.2; extra == "all"
156
+ Requires-Dist: taoswswrap~=0.3.4; extra == "all"
156
157
  Provides-Extra: complete
157
158
  Requires-Dist: adlfs==2023.9.0; extra == "complete"
158
159
  Requires-Dist: aiobotocore<2.16,>=2.5.0; extra == "complete"
@@ -184,7 +185,7 @@ Requires-Dist: s3fs<2024.7,>=2023.9.2; extra == "complete"
184
185
  Requires-Dist: snowflake-connector-python~=3.7; extra == "complete"
185
186
  Requires-Dist: sqlalchemy~=1.4; extra == "complete"
186
187
  Requires-Dist: taos-ws-py==0.3.2; extra == "complete"
187
- Requires-Dist: taoswswrap~=0.3.2; extra == "complete"
188
+ Requires-Dist: taoswswrap~=0.3.4; extra == "complete"
188
189
  Provides-Extra: complete-api
189
190
  Requires-Dist: adlfs==2023.9.0; extra == "complete-api"
190
191
  Requires-Dist: aiobotocore<2.16,>=2.5.0; extra == "complete-api"
@@ -229,7 +230,7 @@ Requires-Dist: s3fs<2024.7,>=2023.9.2; extra == "complete-api"
229
230
  Requires-Dist: snowflake-connector-python~=3.7; extra == "complete-api"
230
231
  Requires-Dist: sqlalchemy~=1.4; extra == "complete-api"
231
232
  Requires-Dist: taos-ws-py==0.3.2; extra == "complete-api"
232
- Requires-Dist: taoswswrap~=0.3.2; extra == "complete-api"
233
+ Requires-Dist: taoswswrap~=0.3.4; extra == "complete-api"
233
234
  Requires-Dist: timelength~=1.1; extra == "complete-api"
234
235
  Requires-Dist: uvicorn~=0.32.1; extra == "complete-api"
235
236
  Dynamic: author
@@ -1,6 +1,6 @@
1
1
  mlrun/__init__.py,sha256=Cqm9U9eCEdLpMejhU2BEhubu0mHL71igJJIwYa738EA,7450
2
2
  mlrun/__main__.py,sha256=0NDzPf9VFRO8KFfGgb8mkGUPIDS285aASV8Hbxs-ND0,45920
3
- mlrun/config.py,sha256=JEE29XflgA8NsdAOeSmIFBKhS_r-FKHUFwMjcSOYq30,71820
3
+ mlrun/config.py,sha256=vjlKlex9TV1Gfgix4iOM_nzX5wN-YuV6xXPmh0JDWy4,71932
4
4
  mlrun/errors.py,sha256=LkcbXTLANGdsgo2CRX2pdbyNmt--lMsjGv0XZMgP-Nc,8222
5
5
  mlrun/execution.py,sha256=FUktsD3puSFjc3LZJU35b-OmFBrBPBNntViCLQVuwnk,50008
6
6
  mlrun/features.py,sha256=ReBaNGsBYXqcbgI012n-SO_j6oHIbk_Vpv0CGPXbUmo,15842
@@ -11,7 +11,7 @@ mlrun/render.py,sha256=940H9fBBFeghH4dlifbURvtjlvw4GlWdAXezN6ky4rI,13275
11
11
  mlrun/run.py,sha256=n9n5IWBEaOrMIeSakp01DyL09_6FvLy3LCqWpBtvc08,45140
12
12
  mlrun/secrets.py,sha256=dZPdkc_zzfscVQepOHUwmzFqnBavDCBXV9DQoH_eIYM,7800
13
13
  mlrun/alerts/__init__.py,sha256=0gtG1BG0DXxFrXegIkjbM1XEN4sP9ODo0ucXrNld1hU,601
14
- mlrun/alerts/alert.py,sha256=9kGTtV385Ax-aTm-450HzPwEek9e0c3O3Qln-jXjhFg,15948
14
+ mlrun/alerts/alert.py,sha256=QQFZGydQbx9RvAaSiaH-ALQZVcDKQX5lgizqj_rXW2k,15948
15
15
  mlrun/api/schemas/__init__.py,sha256=tVAnpexDkfI0JWMJNlPSnVOzoV4xqIjWGSln9UkPS4I,13921
16
16
  mlrun/artifacts/__init__.py,sha256=ofC2extBCOC1wg1YtdTzWzH3eeG_f-sFBUkHjYtZJpk,1175
17
17
  mlrun/artifacts/base.py,sha256=SFHe44o9RV9C3-WODOD53WdBjWk0Ya8lnap9LmERwrQ,29959
@@ -100,7 +100,7 @@ mlrun/datastore/spark_udf.py,sha256=NnnB3DZxZb-rqpRy7b-NC7QWXuuqFn3XkBDc86tU4mQ,
100
100
  mlrun/datastore/spark_utils.py,sha256=_AsVoU5Ix_-W7Gyq8io8V-2GTk0m8THJNDP3WGGaWJY,2865
101
101
  mlrun/datastore/store_resources.py,sha256=PFOMrZ6KH6hBOb0PiO-cHx_kv0UpHu5P2t8_mrR-lS4,6842
102
102
  mlrun/datastore/storeytargets.py,sha256=g5zAdizdFkcESoVGxbKWC11ZiXFgM77UL4642G32JaU,6459
103
- mlrun/datastore/targets.py,sha256=k6IU7XPOYm9jJi5foINfO4NH3NvuXcwYB_B7Rt7V-cg,81195
103
+ mlrun/datastore/targets.py,sha256=7qLf26BDH3qYTHOR7TSP0tUMPBhYOkaaOwffUBxgqY0,81201
104
104
  mlrun/datastore/utils.py,sha256=CbKbDI6CdFRCqyAXe-jykVvN_GH6R0JkxIQFAogR2GA,10604
105
105
  mlrun/datastore/v3io.py,sha256=QSYBORRLcJTeM9mt0EaWzyLcdmzrPkqrF7k5uLTam5U,8209
106
106
  mlrun/datastore/vectorstore.py,sha256=k-yom5gfw20hnVG0Rg7aBEehuXwvAloZwn0cx0VGals,11708
@@ -218,10 +218,10 @@ mlrun/launcher/factory.py,sha256=RW7mfzEFi8fR0M-4W1JQg1iq3_muUU6OTqT_3l4Ubrk,233
218
218
  mlrun/launcher/local.py,sha256=775HY-8S9LFUX5ubGXrLO0N1lVh8bn-DHFmNYuNqQPA,11451
219
219
  mlrun/launcher/remote.py,sha256=rLJW4UAnUT5iUb4BsGBOAV3K4R29a0X4lFtRkVKlyYU,7709
220
220
  mlrun/model_monitoring/__init__.py,sha256=ELy7njEtZnz09Dc6PGZSFFEGtnwI15bJNWM3Pj4_YIs,753
221
- mlrun/model_monitoring/api.py,sha256=Dk5uEAk8HTU00vUwyDPUFqSOgkT9z4gxwaDXbtj-4-U,27737
222
- mlrun/model_monitoring/controller.py,sha256=mbKonkixNgeXT1BRNKQi6i-bk0pquwWgNclQHAjRvcA,33862
221
+ mlrun/model_monitoring/api.py,sha256=LU58dzE4QZiMH23lgiqfI__3m2E3eEZP-DQe2ioUSwM,28317
222
+ mlrun/model_monitoring/controller.py,sha256=fpCfM2wrd9sk-GzmTsqqf1jL1DNgvQhseRRqI-MvMFU,36803
223
223
  mlrun/model_monitoring/features_drift_table.py,sha256=c6GpKtpOJbuT1u5uMWDL_S-6N4YPOmlktWMqPme3KFY,25308
224
- mlrun/model_monitoring/helpers.py,sha256=Cc1TGsqoiuey_XDkwOeaOVV2OnXCryZnBEGGlI8Z524,22620
224
+ mlrun/model_monitoring/helpers.py,sha256=8QsoYRPOVSnR3Lcv99m4XYrp_cR6hSqBUflYSOkJmFQ,21019
225
225
  mlrun/model_monitoring/stream_processing.py,sha256=4M0H4txMlsC2Q5iKTPp992KWoNPAJjPHj9rqWhXbl8w,33321
226
226
  mlrun/model_monitoring/tracking_policy.py,sha256=PBIGrUYWrwcE5gwXupBIVzOb0QRRwPJsgQm_yLGQxB4,5595
227
227
  mlrun/model_monitoring/writer.py,sha256=ibbhvfSHb8Reqlb7RGFEAUNM4iTyK1gk8-2m46mP6VM,8428
@@ -232,7 +232,7 @@ mlrun/model_monitoring/applications/context.py,sha256=DKUDOfN4iY5wpOMjfsarx4pVN9
232
232
  mlrun/model_monitoring/applications/histogram_data_drift.py,sha256=09t0tfC35W0SeJA3fzN29pJiB6G-V_8GlcvULVq6H9Q,15179
233
233
  mlrun/model_monitoring/applications/results.py,sha256=_qmj6TWT0SR2bi7gUyRKBU418eGgGoLW2_hTJ7S-ock,5782
234
234
  mlrun/model_monitoring/applications/evidently/__init__.py,sha256=-DqdPnBSrjZhFvKOu_Ie3MiFvlur9sPTZpZ1u0_1AE8,690
235
- mlrun/model_monitoring/applications/evidently/base.py,sha256=C8402vQJH7jmY-i49DnYjy6p6dETWex4Tdi8ylFLecA,5097
235
+ mlrun/model_monitoring/applications/evidently/base.py,sha256=_n_2CCQL-fC6hGUZSCLZxZuvXqMqjDHSFX0Giok8HZw,6793
236
236
  mlrun/model_monitoring/db/__init__.py,sha256=r47xPGZpIfMuv8J3PQCZTSqVPMhUta4sSJCZFKcS7FM,644
237
237
  mlrun/model_monitoring/db/_schedules.py,sha256=RWn4wtKsIXg668gMLpxO9I8GlkxvPSaA5y7w-wFDcgE,9048
238
238
  mlrun/model_monitoring/db/_stats.py,sha256=VVMWLMqG3Us3ozBkLaokJF22Ewv8WKmVE1-OvS_g9vA,6943
@@ -242,10 +242,10 @@ mlrun/model_monitoring/db/tsdb/helpers.py,sha256=0oUXc4aUkYtP2SGP6jTb3uPPKImIUsV
242
242
  mlrun/model_monitoring/db/tsdb/tdengine/__init__.py,sha256=vgBdsKaXUURKqIf3M0y4sRatmSVA4CQiJs7J5dcVBkQ,620
243
243
  mlrun/model_monitoring/db/tsdb/tdengine/schemas.py,sha256=EslhaR65jfeNdD5Ibk-3Hb4e5r5qYPfHb9rTChX3sG0,12689
244
244
  mlrun/model_monitoring/db/tsdb/tdengine/stream_graph_steps.py,sha256=Uadj0UvAmln2MxDWod-kAzau1uNlqZh981rPhbUH_5M,2857
245
- mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py,sha256=gDK6nNbAwTprs2UAI1r7r6loZB40I_8iQ2JvedvAs78,37765
245
+ mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py,sha256=5jgimfu2-omy8Cnnby7GpgB_MWEp9mmLX0zpbGC2JZ8,37934
246
246
  mlrun/model_monitoring/db/tsdb/v3io/__init__.py,sha256=aL3bfmQsUQ-sbvKGdNihFj8gLCK3mSys0qDcXtYOwgc,616
247
247
  mlrun/model_monitoring/db/tsdb/v3io/stream_graph_steps.py,sha256=_-zo9relCDtjGgievxAcAP9gVN9nDWs8BzGtFwTjb9M,6284
248
- mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py,sha256=GLxg8N1eK7agKgPanOoA1JNbXEH9_kesNRzAPYOgtAQ,46033
248
+ mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py,sha256=IzdThNwWMBWo0D0VzXV-WVvGg-z7Y9e8ke8_LYJTeVA,46214
249
249
  mlrun/model_monitoring/metrics/__init__.py,sha256=6CsTXAxeLbbf8yfCADTaxmiavqwrLEdYFJ-qc5kgDAY,569
250
250
  mlrun/model_monitoring/metrics/histogram_distance.py,sha256=E9_WIl2vd6qNvoHVHoFcnuQk3ekbFWOdi8aU7sHrfk4,4724
251
251
  mlrun/package/__init__.py,sha256=v7VDyK9kDOOuDvFo4oiGV2fx-vM1KL7fdN9pGLakhUQ,7008
@@ -270,7 +270,7 @@ mlrun/platforms/iguazio.py,sha256=6VBTq8eQ3mzT96tzjYhAtcMQ2VjF4x8LpIPW5DAcX2Q,13
270
270
  mlrun/projects/__init__.py,sha256=0Krf0WIKfnZa71WthYOg0SoaTodGg3sV_hK3f_OlTPI,1220
271
271
  mlrun/projects/operations.py,sha256=TzPbTYBgmYrjxTKP_wOtBJYFFFwDCQtaVvF1Snr0TfM,20029
272
272
  mlrun/projects/pipelines.py,sha256=wud7ezeEmhIJvfYE_wzQbA4ygEfGXHtbOtoOpan6poY,48556
273
- mlrun/projects/project.py,sha256=Jmr7l9VwFGBItS50DElEN6PuDE6OPPvXn1j16kb1z80,235333
273
+ mlrun/projects/project.py,sha256=WsNZUz_k52llBI5rLBwJeGuIzSlAapVXBQfCL7NVI8E,235765
274
274
  mlrun/runtimes/__init__.py,sha256=J9Sy2HiyMlztNv6VUurMzF5H2XzttNil8nRsWDsqLyg,8923
275
275
  mlrun/runtimes/base.py,sha256=EL14Kmc1vWEjnBPJwLj5hHC6CtRAQHJLmohCD3sFEHo,37855
276
276
  mlrun/runtimes/daskjob.py,sha256=JwuGvOiPsxEDHHMMUS4Oie4hLlYYIZwihAl6DjroTY0,19521
@@ -292,7 +292,7 @@ mlrun/runtimes/mpijob/abstract.py,sha256=JGMjcJ4dvpJbctF6psU9UvYyNCutMxTMgBQeTlz
292
292
  mlrun/runtimes/mpijob/v1.py,sha256=1XQZC7AIMGX_AQCbApcwpH8I7y39-v0v2O35MvxjXoo,3213
293
293
  mlrun/runtimes/nuclio/__init__.py,sha256=gx1kizzKv8pGT5TNloN1js1hdbxqDw3rM90sLVYVffY,794
294
294
  mlrun/runtimes/nuclio/api_gateway.py,sha256=vH9ClKVP4Mb24rvA67xPuAvAhX-gAv6vVtjVxyplhdc,26969
295
- mlrun/runtimes/nuclio/function.py,sha256=j_gKYhaGfJjr_mVBdUcnSgXcXOHJrKHtUMpmOu8TII8,52979
295
+ mlrun/runtimes/nuclio/function.py,sha256=1EFdGFqlyEfPUVK4Rhh8zWUrff7MNKaHrg7V-bejewg,54618
296
296
  mlrun/runtimes/nuclio/nuclio.py,sha256=sLK8KdGO1LbftlL3HqPZlFOFTAAuxJACZCVl1c0Ha6E,2942
297
297
  mlrun/runtimes/nuclio/serving.py,sha256=qetAyl-nfn8SWp7KyNgRtMNUVcX_q75SY9dLZP0uH6o,33365
298
298
  mlrun/runtimes/nuclio/application/__init__.py,sha256=rRs5vasy_G9IyoTpYIjYDafGoL6ifFBKgBtsXn31Atw,614
@@ -306,7 +306,7 @@ mlrun/serving/remote.py,sha256=gxJkj_J3j-sZcVUbUzbAmJafP6t6y4NVFsu0kWmYngA,18818
306
306
  mlrun/serving/routers.py,sha256=SY6AsaiSnh8ssXq8hQE2z9MYapOxFOFJBx9QomiZMO8,53915
307
307
  mlrun/serving/server.py,sha256=KiNhW0nTV5STZPzR6kEAUFVzCCAX8qv0g9AoCopARrM,23429
308
308
  mlrun/serving/serving_wrapper.py,sha256=R670-S6PX_d5ER6jiHtRvacuPyFzQH0mEf2K0sBIIOM,836
309
- mlrun/serving/states.py,sha256=Kst2N7R5SaTKYMYB8re9wTlhQwEDgkG61-4JtROKlNI,72803
309
+ mlrun/serving/states.py,sha256=UWiE85MB_SK3rgzWgNqQU2MKeyN2yF2BCvMcMAqLMTs,73247
310
310
  mlrun/serving/utils.py,sha256=k2EIYDWHUGkE-IBI6T0UNT32fw-KySsccIJM_LObI00,4171
311
311
  mlrun/serving/v1_serving.py,sha256=c6J_MtpE-Tqu00-6r4eJOCO6rUasHDal9W2eBIcrl50,11853
312
312
  mlrun/serving/v2_serving.py,sha256=b3C5Utv2_AOPrH_hPi3NarjNbAK3kRoeIfqMU4qNuUo,25362
@@ -321,7 +321,7 @@ mlrun/utils/azure_vault.py,sha256=IEFizrDGDbAaoWwDr1WoA88S_EZ0T--vjYtY-i0cvYQ,34
321
321
  mlrun/utils/clones.py,sha256=yXOeuLtgIiKZdmjeKK0Z_vIrH19ds5JuoJaCeDjhwOo,7516
322
322
  mlrun/utils/condition_evaluator.py,sha256=-nGfRmZzivn01rHTroiGY4rqEv8T1irMyhzxEei-sKc,1897
323
323
  mlrun/utils/db.py,sha256=blQgkWMfFH9lcN4sgJQcPQgEETz2Dl_zwbVA0SslpFg,2186
324
- mlrun/utils/helpers.py,sha256=OCX51Gn7rrKPpvob_lUnyfs1lFhVHtlzSYKHXAqJJ0Q,74393
324
+ mlrun/utils/helpers.py,sha256=FflaMvt_8zYDsW5zHG8s1WWda3CbERTCMJUGyziWIjg,74587
325
325
  mlrun/utils/http.py,sha256=t6FrXQstZm9xVVjxqIGiLzrwZNCR4CSienSOuVgNIcI,8706
326
326
  mlrun/utils/logger.py,sha256=RG0m1rx6gfkJ-2C1r_p41MMpPiaDYqaYM2lYHDlNZEU,14767
327
327
  mlrun/utils/regex.py,sha256=jbR7IiOp6OO0mg9Fl_cVZCpWb9fL9nTPONCUxCDNWXg,5201
@@ -340,11 +340,11 @@ mlrun/utils/notifications/notification/mail.py,sha256=ZyJ3eqd8simxffQmXzqd3bgbAq
340
340
  mlrun/utils/notifications/notification/slack.py,sha256=eQvmctTh6wIG5xVOesLLV9S1-UUCu5UEQ9JIJOor3ts,7183
341
341
  mlrun/utils/notifications/notification/webhook.py,sha256=NeyIMSBojjjTJaUHmPbxMByp34GxYkl1-16NqzU27fU,4943
342
342
  mlrun/utils/version/__init__.py,sha256=7kkrB7hEZ3cLXoWj1kPoDwo4MaswsI2JVOBpbKgPAgc,614
343
- mlrun/utils/version/version.json,sha256=BdVuIuX64LHL8ZydQn4fu4jqZorhEvAHUQZ0YBVTWr8,89
343
+ mlrun/utils/version/version.json,sha256=WlaePndVtYxwhDSf-ETGiiy3I87M637FDZBAAl1SjvM,88
344
344
  mlrun/utils/version/version.py,sha256=eEW0tqIAkU9Xifxv8Z9_qsYnNhn3YH7NRAfM-pPLt1g,1878
345
- mlrun-1.8.0rc46.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
346
- mlrun-1.8.0rc46.dist-info/METADATA,sha256=-rHQLgCZW8i6RUoHhIzNHEJKGFNvyoI_kIgS59Ajzdk,26008
347
- mlrun-1.8.0rc46.dist-info/WHEEL,sha256=DK49LOLCYiurdXXOXwGJm6U4DkHkg4lcxjhqwRa0CP4,91
348
- mlrun-1.8.0rc46.dist-info/entry_points.txt,sha256=1Owd16eAclD5pfRCoJpYC2ZJSyGNTtUr0nCELMioMmU,46
349
- mlrun-1.8.0rc46.dist-info/top_level.txt,sha256=NObLzw3maSF9wVrgSeYBv-fgnHkAJ1kEkh12DLdd5KM,6
350
- mlrun-1.8.0rc46.dist-info/RECORD,,
345
+ mlrun-1.9.0rc1.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
346
+ mlrun-1.9.0rc1.dist-info/METADATA,sha256=sRPSncr_i_JhtIKIVROkuR5UMqd6VMEcav64CqOf1_0,26078
347
+ mlrun-1.9.0rc1.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
348
+ mlrun-1.9.0rc1.dist-info/entry_points.txt,sha256=1Owd16eAclD5pfRCoJpYC2ZJSyGNTtUr0nCELMioMmU,46
349
+ mlrun-1.9.0rc1.dist-info/top_level.txt,sha256=NObLzw3maSF9wVrgSeYBv-fgnHkAJ1kEkh12DLdd5KM,6
350
+ mlrun-1.9.0rc1.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (78.0.2)
2
+ Generator: setuptools (78.1.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5