mlrun 1.9.0rc1__py3-none-any.whl → 1.9.0rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mlrun/config.py CHANGED
@@ -486,6 +486,10 @@ default_config = {
486
486
  "iguazio_client_job_cache_ttl": "20 minutes",
487
487
  "nuclio_project_deletion_verification_timeout": "300 seconds",
488
488
  "nuclio_project_deletion_verification_interval": "5 seconds",
489
+ "summaries": {
490
+ # Number of days back to include when calculating the project pipeline summary.
491
+ "list_pipelines_time_period_in_days": 7,
492
+ },
489
493
  },
490
494
  # The API needs to know what is its k8s svc url so it could enrich it in the jobs it creates
491
495
  "api_url": "",
@@ -971,13 +971,8 @@ class OnlineSource(BaseSourceDriver):
971
971
  def set_explicit_ack_mode(function: Function, **extra_arguments) -> dict[str, Any]:
972
972
  extra_arguments = extra_arguments or {}
973
973
  engine = "sync"
974
- if (
975
- function.spec
976
- and hasattr(function.spec, "graph")
977
- and function.spec.graph
978
- and function.spec.graph.engine
979
- ):
980
- engine = function.spec.graph.engine
974
+ if function.spec and hasattr(function.spec, "graph"):
975
+ engine = getattr(function.spec.graph, "engine", None) or engine
981
976
  if mlrun.mlconf.is_explicit_ack_enabled() and engine == "async":
982
977
  extra_arguments["explicit_ack_mode"] = extra_arguments.get(
983
978
  "explicit_ack_mode", "explicitOnly"
@@ -109,17 +109,20 @@ class StreamStoreyTarget(storey.StreamTarget):
109
109
  raise mlrun.errors.MLRunInvalidArgumentError("StreamTarget requires a path")
110
110
 
111
111
  _, storage_options = get_url_and_storage_options(uri)
112
- endpoint, path = parse_path(uri)
112
+ _, path = parse_path(uri)
113
113
 
114
114
  access_key = storage_options.get("v3io_access_key")
115
- storage = V3ioDriver(
116
- webapi=endpoint or mlrun.mlconf.v3io_api, access_key=access_key
117
- )
115
+
116
+ if alt_key_name := kwargs.pop("alternative_v3io_access_key", None):
117
+ if alt_key := mlrun.get_secret_or_env(alt_key_name):
118
+ access_key = alt_key
119
+
120
+ storage = V3ioDriver(access_key=access_key)
118
121
 
119
122
  if storage_options:
120
123
  kwargs["storage"] = storage
121
124
  if args:
122
- args[0] = endpoint
125
+ args[0] = path
123
126
  if "stream_path" in kwargs:
124
127
  kwargs["stream_path"] = path
125
128
 
@@ -128,6 +131,7 @@ class StreamStoreyTarget(storey.StreamTarget):
128
131
 
129
132
  class KafkaStoreyTarget(storey.KafkaTarget):
130
133
  def __init__(self, *args, **kwargs):
134
+ kwargs.pop("alternative_v3io_access_key", None)
131
135
  path = kwargs.pop("path")
132
136
  attributes = kwargs.pop("attributes", {})
133
137
  if path and path.startswith("ds://"):
@@ -38,21 +38,30 @@ class ModelMonitoringApplicationBase(MonitoringApplicationToDict, ABC):
38
38
 
39
39
  For example, :code:`MyApp` below is a simplistic custom application::
40
40
 
41
+ from mlrun.common.schemas.model_monitoring.constants import (
42
+ ResultKindApp,
43
+ ResultStatusApp,
44
+ )
45
+ from mlrun.model_monitoring.applications import (
46
+ ModelMonitoringApplicationBase,
47
+ ModelMonitoringApplicationResult,
48
+ MonitoringApplicationContext,
49
+ )
50
+
51
+
41
52
  class MyApp(ModelMonitoringApplicationBase):
42
53
  def do_tracking(
43
- self,
44
- monitoring_context: mm_context.MonitoringApplicationContext,
54
+ self, monitoring_context: MonitoringApplicationContext
45
55
  ) -> ModelMonitoringApplicationResult:
46
- monitoring_context.log_artifact(
47
- TableArtifact(
48
- "sample_df_stats", df=self.dict_to_histogram(sample_df_stats)
49
- )
56
+ monitoring_context.logger.info(
57
+ "Running application",
58
+ application_name=monitoring_context.application_name,
50
59
  )
51
60
  return ModelMonitoringApplicationResult(
52
61
  name="data_drift_test",
53
62
  value=0.5,
54
- kind=mm_constant.ResultKindApp.data_drift,
55
- status=mm_constant.ResultStatusApp.detected,
63
+ kind=ResultKindApp.data_drift,
64
+ status=ResultStatusApp.detected,
56
65
  )
57
66
  """
58
67
 
@@ -23,12 +23,13 @@ import mlrun.common.constants as mlrun_constants
23
23
  import mlrun.common.schemas.model_monitoring.constants as mm_constants
24
24
  import mlrun.errors
25
25
  import mlrun.feature_store as fstore
26
+ import mlrun.feature_store.feature_set as fs
26
27
  import mlrun.features
27
28
  import mlrun.serving
28
29
  import mlrun.utils
29
30
  from mlrun.artifacts import Artifact, DatasetArtifact, ModelArtifact, get_model
30
31
  from mlrun.common.model_monitoring.helpers import FeatureStats
31
- from mlrun.common.schemas import FeatureSet, ModelEndpoint
32
+ from mlrun.common.schemas import ModelEndpoint
32
33
  from mlrun.model_monitoring.helpers import (
33
34
  calculate_inputs_statistics,
34
35
  )
@@ -41,7 +42,6 @@ class _ArtifactsLogger(Protocol):
41
42
 
42
43
  def log_artifact(self, *args, **kwargs) -> Artifact: ...
43
44
  def log_dataset(self, *args, **kwargs) -> DatasetArtifact: ...
44
- def log_model(self, *args, **kwargs) -> ModelArtifact: ...
45
45
 
46
46
 
47
47
  class MonitoringApplicationContext:
@@ -59,7 +59,7 @@ class MonitoringApplicationContext:
59
59
  model_endpoint_dict: Optional[dict[str, ModelEndpoint]] = None,
60
60
  sample_df: Optional[pd.DataFrame] = None,
61
61
  feature_stats: Optional[FeatureStats] = None,
62
- feature_sets_dict: Optional[dict[str, FeatureSet]] = None,
62
+ feature_sets_dict: Optional[dict[str, fs.FeatureSet]] = None,
63
63
  ) -> None:
64
64
  """
65
65
  The :code:`MonitoringApplicationContext` object holds all the relevant information for the
@@ -124,9 +124,13 @@ class MonitoringApplicationContext:
124
124
  self._model_endpoint: Optional[ModelEndpoint] = (
125
125
  model_endpoint_dict.get(self.endpoint_id) if model_endpoint_dict else None
126
126
  )
127
- self._feature_set: Optional[FeatureSet] = (
127
+ self._feature_set: Optional[fs.FeatureSet] = (
128
128
  feature_sets_dict.get(self.endpoint_id) if feature_sets_dict else None
129
129
  )
130
+ store, _, _ = mlrun.store_manager.get_or_create_store(
131
+ mlrun.mlconf.artifact_path
132
+ )
133
+ self.storage_options = store.get_storage_options()
130
134
 
131
135
  @classmethod
132
136
  def _from_ml_ctx(
@@ -169,7 +173,7 @@ class MonitoringApplicationContext:
169
173
  model_endpoint_dict: Optional[dict[str, ModelEndpoint]] = None,
170
174
  sample_df: Optional[pd.DataFrame] = None,
171
175
  feature_stats: Optional[FeatureStats] = None,
172
- feature_sets_dict: Optional[dict[str, FeatureSet]] = None,
176
+ feature_sets_dict: Optional[dict[str, fs.FeatureSet]] = None,
173
177
  ) -> "MonitoringApplicationContext":
174
178
  nuclio_logger = graph_context.logger
175
179
  artifacts_logger = graph_context.project_obj
@@ -192,13 +196,14 @@ class MonitoringApplicationContext:
192
196
  )
193
197
 
194
198
  def _get_default_labels(self) -> dict[str, str]:
195
- return {
199
+ labels = {
196
200
  mlrun_constants.MLRunInternalLabels.runner_pod: socket.gethostname(),
197
201
  mlrun_constants.MLRunInternalLabels.producer_type: "model-monitoring-app",
198
202
  mlrun_constants.MLRunInternalLabels.app_name: self.application_name,
199
203
  mlrun_constants.MLRunInternalLabels.endpoint_id: self.endpoint_id,
200
204
  mlrun_constants.MLRunInternalLabels.endpoint_name: self.endpoint_name,
201
205
  }
206
+ return {key: value for key, value in labels.items() if value is not None}
202
207
 
203
208
  def _add_default_labels(self, labels: Optional[dict[str, str]]) -> dict[str, str]:
204
209
  """Add the default labels to logged artifacts labels"""
@@ -221,22 +226,13 @@ class MonitoringApplicationContext:
221
226
  "You can either provide the sample dataframe directly, the model endpoint's details and times, "
222
227
  "or adapt the application's logic to not access the sample dataframe."
223
228
  )
224
- feature_set = self.feature_set
225
- features = [f"{feature_set.metadata.name}.*"]
226
- vector = fstore.FeatureVector(
227
- name=f"{self.endpoint_id}_vector",
228
- features=features,
229
- with_indexes=True,
230
- )
231
- vector.metadata.tag = self.application_name
232
- vector.feature_set_objects = {feature_set.metadata.name: feature_set}
233
-
234
- offline_response = vector.get_offline_features(
229
+ df = self.feature_set.to_dataframe(
235
230
  start_time=self.start_infer_time,
236
231
  end_time=self.end_infer_time,
237
- timestamp_for_filtering=mm_constants.FeatureSetFeatures.time_stamp(),
232
+ time_column=mm_constants.EventFieldType.TIMESTAMP,
233
+ storage_options=self.storage_options,
238
234
  )
239
- self._sample_df = offline_response.to_dataframe().reset_index(drop=True)
235
+ self._sample_df = df.reset_index(drop=True)
240
236
  return self._sample_df
241
237
 
242
238
  @property
@@ -259,7 +255,7 @@ class MonitoringApplicationContext:
259
255
  return self._model_endpoint
260
256
 
261
257
  @property
262
- def feature_set(self) -> FeatureSet:
258
+ def feature_set(self) -> fs.FeatureSet:
263
259
  if not self._feature_set and self.model_endpoint:
264
260
  self._feature_set = fstore.get_feature_set(
265
261
  self.model_endpoint.spec.monitoring_feature_set_uri
@@ -329,13 +325,31 @@ class MonitoringApplicationContext:
329
325
  upload: Optional[bool] = None,
330
326
  labels: Optional[dict[str, str]] = None,
331
327
  target_path: Optional[str] = None,
328
+ unique_per_endpoint: bool = True,
332
329
  **kwargs,
333
330
  ) -> Artifact:
334
331
  """
335
332
  Log an artifact.
336
- See :func:`~mlrun.projects.MlrunProject.log_artifact` for the documentation.
333
+
334
+ .. caution::
335
+
336
+ Logging artifacts in every model monitoring window may cause scale issues.
337
+ This method should be called on special occasions only.
338
+
339
+ See :func:`~mlrun.projects.MlrunProject.log_artifact` for the full documentation, except for one
340
+ new argument:
341
+
342
+ :param unique_per_endpoint: by default ``True``, we will log different artifact for each model endpoint,
343
+ set to ``False`` without changing item key will cause artifact override.
337
344
  """
338
345
  labels = self._add_default_labels(labels)
346
+ # By default, we want to log different artifact for each model endpoint
347
+ endpoint_id = labels.get(mlrun_constants.MLRunInternalLabels.endpoint_id, "")
348
+ if unique_per_endpoint and isinstance(item, str):
349
+ item = f"{item}-{endpoint_id}" if endpoint_id else item
350
+ elif unique_per_endpoint: # isinstance(item, Artifact) is True
351
+ item.key = f"{item.key}-{endpoint_id}" if endpoint_id else item.key
352
+
339
353
  return self._artifacts_logger.log_artifact(
340
354
  item,
341
355
  body=body,
@@ -364,13 +378,29 @@ class MonitoringApplicationContext:
364
378
  target_path="",
365
379
  extra_data=None,
366
380
  label_column: Optional[str] = None,
381
+ unique_per_endpoint: bool = True,
367
382
  **kwargs,
368
383
  ) -> DatasetArtifact:
369
384
  """
370
385
  Log a dataset artifact.
371
- See :func:`~mlrun.projects.MlrunProject.log_dataset` for the documentation.
386
+
387
+ .. caution::
388
+
389
+ Logging datasets in every model monitoring window may cause scale issues.
390
+ This method should be called on special occasions only.
391
+
392
+ See :func:`~mlrun.projects.MlrunProject.log_dataset` for the full documentation, except for one
393
+ new argument:
394
+
395
+ :param unique_per_endpoint: by default ``True``, we will log different artifact for each model endpoint,
396
+ set to ``False`` without changing item key will cause artifact override.
372
397
  """
373
398
  labels = self._add_default_labels(labels)
399
+ # By default, we want to log different artifact for each model endpoint
400
+ endpoint_id = labels.get(mlrun_constants.MLRunInternalLabels.endpoint_id, "")
401
+ if unique_per_endpoint and isinstance(key, str):
402
+ key = f"{key}-{endpoint_id}" if endpoint_id else key
403
+
374
404
  return self._artifacts_logger.log_dataset(
375
405
  key,
376
406
  df,
@@ -387,54 +417,3 @@ class MonitoringApplicationContext:
387
417
  label_column=label_column,
388
418
  **kwargs,
389
419
  )
390
-
391
- def log_model(
392
- self,
393
- key,
394
- body=None,
395
- framework="",
396
- tag="",
397
- model_dir=None,
398
- model_file=None,
399
- algorithm=None,
400
- metrics=None,
401
- parameters=None,
402
- artifact_path=None,
403
- upload=None,
404
- labels=None,
405
- inputs: Optional[list[mlrun.features.Feature]] = None,
406
- outputs: Optional[list[mlrun.features.Feature]] = None,
407
- feature_vector: Optional[str] = None,
408
- feature_weights: Optional[list] = None,
409
- training_set=None,
410
- label_column=None,
411
- extra_data=None,
412
- **kwargs,
413
- ) -> ModelArtifact:
414
- """
415
- Log a model artifact.
416
- See :func:`~mlrun.projects.MlrunProject.log_model` for the documentation.
417
- """
418
- labels = self._add_default_labels(labels)
419
- return self._artifacts_logger.log_model(
420
- key,
421
- body=body,
422
- framework=framework,
423
- tag=tag,
424
- model_dir=model_dir,
425
- model_file=model_file,
426
- algorithm=algorithm,
427
- metrics=metrics,
428
- parameters=parameters,
429
- artifact_path=artifact_path,
430
- upload=upload,
431
- labels=labels,
432
- inputs=inputs,
433
- outputs=outputs,
434
- feature_vector=feature_vector,
435
- feature_weights=feature_weights,
436
- training_set=training_set,
437
- label_column=label_column,
438
- extra_data=extra_data,
439
- **kwargs,
440
- )
@@ -130,17 +130,28 @@ class EvidentlyModelMonitoringApplicationBase(
130
130
  monitoring_context: mm_context.MonitoringApplicationContext,
131
131
  evidently_object: "Display",
132
132
  artifact_name: str,
133
+ unique_per_endpoint: bool = True,
133
134
  ) -> None:
134
135
  """
135
- Logs an Evidently report or suite as an artifact.
136
+ Logs an Evidently report or suite as an artifact.
137
+
138
+ .. caution::
139
+
140
+ Logging Evidently objects in every model monitoring window may cause scale issues.
141
+ This method should be called on special occasions only.
136
142
 
137
143
  :param monitoring_context: (MonitoringApplicationContext) The monitoring context to process.
138
144
  :param evidently_object: (Display) The Evidently display to log, e.g. a report or a test suite object.
139
145
  :param artifact_name: (str) The name for the logged artifact.
146
+ :param unique_per_endpoint: by default ``True``, we will log different artifact for each model endpoint,
147
+ set to ``False`` without changing item key will cause artifact override.
140
148
  """
141
149
  evidently_object_html = evidently_object.get_html()
142
150
  monitoring_context.log_artifact(
143
- artifact_name, body=evidently_object_html.encode("utf-8"), format="html"
151
+ artifact_name,
152
+ body=evidently_object_html.encode("utf-8"),
153
+ format="html",
154
+ unique_per_endpoint=unique_per_endpoint,
144
155
  )
145
156
 
146
157
  def log_project_dashboard(
@@ -149,14 +160,22 @@ class EvidentlyModelMonitoringApplicationBase(
149
160
  timestamp_start: pd.Timestamp,
150
161
  timestamp_end: pd.Timestamp,
151
162
  artifact_name: str = "dashboard",
163
+ unique_per_endpoint: bool = True,
152
164
  ) -> None:
153
165
  """
154
166
  Logs an Evidently project dashboard.
155
167
 
168
+ .. caution::
169
+
170
+ Logging Evidently dashboards in every model monitoring window may cause scale issues.
171
+ This method should be called on special occasions only.
172
+
156
173
  :param monitoring_context: (MonitoringApplicationContext) The monitoring context to process.
157
174
  :param timestamp_start: (pd.Timestamp) The start timestamp for the dashboard data.
158
175
  :param timestamp_end: (pd.Timestamp) The end timestamp for the dashboard data.
159
176
  :param artifact_name: (str) The name for the logged artifact.
177
+ :param unique_per_endpoint: by default ``True``, we will log different artifact for each model endpoint,
178
+ set to ``False`` without changing item key will cause artifact override.
160
179
  """
161
180
 
162
181
  dashboard_info = self.evidently_project.build_dashboard_info(
@@ -170,5 +189,8 @@ class EvidentlyModelMonitoringApplicationBase(
170
189
 
171
190
  dashboard_html = file_html_template(params=template_params)
172
191
  monitoring_context.log_artifact(
173
- artifact_name, body=dashboard_html.encode("utf-8"), format="html"
192
+ artifact_name,
193
+ body=dashboard_html.encode("utf-8"),
194
+ format="html",
195
+ unique_per_endpoint=unique_per_endpoint,
174
196
  )
@@ -102,10 +102,10 @@ class HistogramDataDriftApplication(ModelMonitoringApplicationBase):
102
102
  Each metric is calculated over all the features individually and the mean is taken as the metric value.
103
103
  The average of Hellinger and total variance distance is taken as the result.
104
104
 
105
- The application can log two artifacts:
105
+ The application can log two artifacts (disabled by default due to performance issues):
106
106
 
107
- * JSON with the general drift value per feature, produced by default.
108
- * Plotly table with the various metrics and histograms per feature (disabled by default due to performance issues).
107
+ * JSON with the general drift value per feature.
108
+ * Plotly table with the various metrics and histograms per feature.
109
109
 
110
110
  This application is deployed by default when calling
111
111
  :py:func:`~mlrun.projects.MlrunProject.enable_model_monitoring`.
@@ -134,12 +134,14 @@ class HistogramDataDriftApplication(ModelMonitoringApplicationBase):
134
134
  def __init__(
135
135
  self,
136
136
  value_classifier: Optional[ValueClassifier] = None,
137
- produce_json_artifact: bool = True,
137
+ produce_json_artifact: bool = False,
138
138
  produce_plotly_artifact: bool = False,
139
139
  ) -> None:
140
140
  """
141
- :param value_classifier: Classifier object that adheres to the :py:class:`~ValueClassifier` protocol.
142
- If not provided, the default :py:class:`~DataDriftClassifier` is used.
141
+ :param value_classifier: Classifier object that adheres to the :py:class:`~ValueClassifier` protocol.
142
+ If not provided, the default :py:class:`~DataDriftClassifier` is used.
143
+ :param produce_json_artifact: Whether to produce the JSON artifact or not, ``False`` by default.
144
+ :param produce_plotly_artifact: Whether to produce the Plotly artifact or not, ``False`` by default.
143
145
  """
144
146
  self._value_classifier = value_classifier or DataDriftClassifier()
145
147
  assert self._REQUIRED_METRICS <= set(
@@ -311,10 +311,12 @@ class MonitoringApplicationController:
311
311
  mlrun.platforms.iguazio.KafkaOutputStream,
312
312
  ]:
313
313
  if self._model_monitoring_stream is None:
314
- self._model_monitoring_stream = mlrun.model_monitoring.helpers.get_output_stream(
315
- project=self.project,
316
- function_name=mm_constants.MonitoringFunctionNames.APPLICATION_CONTROLLER,
317
- v3io_access_key=self.v3io_access_key,
314
+ self._model_monitoring_stream = (
315
+ mlrun.model_monitoring.helpers.get_output_stream(
316
+ project=self.project,
317
+ function_name=mm_constants.MonitoringFunctionNames.STREAM,
318
+ v3io_access_key=self.model_monitoring_access_key,
319
+ )
318
320
  )
319
321
  return self._model_monitoring_stream
320
322
 
@@ -11,10 +11,11 @@
11
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
+
14
15
  import asyncio
15
16
  from datetime import datetime, timedelta
16
17
  from threading import Lock
17
- from typing import Callable, Literal, Optional, Union
18
+ from typing import Callable, Final, Literal, Optional, Union
18
19
 
19
20
  import pandas as pd
20
21
  import taosws
@@ -24,6 +25,7 @@ from taoswswrap.tdengine_connection import (
24
25
  )
25
26
 
26
27
  import mlrun.common.schemas.model_monitoring as mm_schemas
28
+ import mlrun.common.types
27
29
  import mlrun.model_monitoring.db.tsdb.tdengine.schemas as tdengine_schemas
28
30
  import mlrun.model_monitoring.db.tsdb.tdengine.stream_graph_steps
29
31
  from mlrun.datastore.datastore_profile import DatastoreProfile
@@ -35,6 +37,19 @@ _connection = None
35
37
  _connection_lock = Lock()
36
38
 
37
39
 
40
+ class TDEngineTimestampPrecision(mlrun.common.types.StrEnum):
41
+ """
42
+ The timestamp precision for the TDEngine database.
43
+ For more information, see:
44
+ https://docs.tdengine.com/tdengine-reference/sql-manual/data-types/#timestamp
45
+ https://docs.tdengine.com/tdengine-reference/sql-manual/manage-databases/#create-database
46
+ """
47
+
48
+ MILLISECOND = "ms" # TDEngine's default
49
+ MICROSECOND = "us" # MLRun's default
50
+ NANOSECOND = "ns"
51
+
52
+
38
53
  class TDEngineConnector(TSDBConnector):
39
54
  """
40
55
  Handles the TSDB operations when the TSDB connector is of type TDEngine.
@@ -47,12 +62,17 @@ class TDEngineConnector(TSDBConnector):
47
62
  self,
48
63
  project: str,
49
64
  profile: DatastoreProfile,
65
+ timestamp_precision: TDEngineTimestampPrecision = TDEngineTimestampPrecision.MICROSECOND,
50
66
  **kwargs,
51
67
  ):
52
68
  super().__init__(project=project)
53
69
 
54
70
  self._tdengine_connection_profile = profile
55
71
 
72
+ self._timestamp_precision: Final = ( # cannot be changed after initialization
73
+ timestamp_precision
74
+ )
75
+
56
76
  self._init_super_tables()
57
77
 
58
78
  self._run_directly = (
@@ -105,7 +125,7 @@ class TDEngineConnector(TSDBConnector):
105
125
  """Create the database if it does not exist."""
106
126
  self.connection.prefix_statements = []
107
127
  self.connection.run(
108
- statements=f"CREATE DATABASE IF NOT EXISTS {self.database}",
128
+ statements=f"CREATE DATABASE IF NOT EXISTS {self.database} PRECISION '{self._timestamp_precision}'",
109
129
  timeout=self._timeout,
110
130
  retries=self._retries,
111
131
  )
@@ -180,6 +200,7 @@ class TDEngineConnector(TSDBConnector):
180
200
  columns=columns,
181
201
  subtable=table_name,
182
202
  values=event,
203
+ timestamp_precision=self._timestamp_precision,
183
204
  )
184
205
 
185
206
  self.connection.run(
@@ -15,8 +15,6 @@
15
15
  import datetime
16
16
  import typing
17
17
 
18
- import storey
19
-
20
18
  import mlrun
21
19
  import mlrun.common.model_monitoring.helpers
22
20
  import mlrun.feature_store as fstore
@@ -144,7 +142,7 @@ class EventStreamProcessor:
144
142
 
145
143
  graph = typing.cast(
146
144
  mlrun.serving.states.RootFlowStep,
147
- fn.set_topology(mlrun.serving.states.StepKinds.flow),
145
+ fn.set_topology(mlrun.serving.states.StepKinds.flow, engine="async"),
148
146
  )
149
147
 
150
148
  # split the graph between event with error vs valid event
@@ -264,6 +262,9 @@ class EventStreamProcessor:
264
262
  path=stream_uri,
265
263
  sharding_func=ControllerEvent.ENDPOINT_ID,
266
264
  after="ForwardNOP",
265
+ # Force using the pipeline key instead of the one in the profile in case of v3io profile.
266
+ # In case of Kafka, this parameter will be ignored.
267
+ alternative_v3io_access_key="V3IO_ACCESS_KEY",
267
268
  )
268
269
 
269
270
  apply_push_controller_stream(controller_stream_uri)
@@ -342,7 +343,8 @@ class ProcessEndpointEvent(mlrun.feature_store.steps.MapClass):
342
343
  logger.debug(
343
344
  "Skipped nop event inside of ProcessEndpointEvent", event=event
344
345
  )
345
- return storey.Event(body=[event])
346
+ full_event.body = [event]
347
+ return full_event
346
348
  # Getting model version and function uri from event
347
349
  # and use them for retrieving the endpoint_id
348
350
  function_uri = full_event.body.get(EventFieldType.FUNCTION_URI)
@@ -475,8 +477,9 @@ class ProcessEndpointEvent(mlrun.feature_store.steps.MapClass):
475
477
 
476
478
  # Create a storey event object with list of events, based on endpoint_id which will be used
477
479
  # in the upcoming steps
478
- storey_event = storey.Event(body=events, key=endpoint_id)
479
- return storey_event
480
+ full_event.key = endpoint_id
481
+ full_event.body = events
482
+ return full_event
480
483
 
481
484
  def resume_state(self, endpoint_id, endpoint_name):
482
485
  # Make sure process is resumable, if process fails for any reason, be able to pick things up close to where we
@@ -577,13 +577,9 @@ class RemoteRuntime(KubeResource):
577
577
  access_key = self._resolve_v3io_access_key()
578
578
  engine = "sync"
579
579
  explicit_ack_mode = kwargs.pop("explicit_ack_mode", None)
580
- if (
581
- self.spec
582
- and hasattr(self.spec, "graph")
583
- and self.spec.graph
584
- and self.spec.graph.engine
585
- ):
586
- engine = self.spec.graph.engine
580
+ if self.spec and hasattr(self.spec, "graph"):
581
+ engine = getattr(self.spec.graph, "engine", None) or engine
582
+
587
583
  if mlrun.mlconf.is_explicit_ack_enabled() and engine == "async":
588
584
  explicit_ack_mode = explicit_ack_mode or "explicitOnly"
589
585
 
@@ -271,7 +271,8 @@ class ServingRuntime(RemoteRuntime):
271
271
  can specify special router class and router arguments
272
272
 
273
273
  flow - workflow (DAG) with a chain of states
274
- flow support "sync" and "async" engines, branches are not allowed in sync mode
274
+ flow supports both "sync" and "async" engines, with "async" being the default.
275
+ Branches are not allowed in sync mode.
275
276
  when using async mode calling state.respond() will mark the state as the
276
277
  one which generates the (REST) call response
277
278
 
@@ -300,7 +301,7 @@ class ServingRuntime(RemoteRuntime):
300
301
  step = RouterStep(class_name=class_name, class_args=class_args)
301
302
  self.spec.graph = step
302
303
  elif topology == StepKinds.flow:
303
- self.spec.graph = RootFlowStep(engine=engine)
304
+ self.spec.graph = RootFlowStep(engine=engine or "async")
304
305
  else:
305
306
  raise mlrun.errors.MLRunInvalidArgumentError(
306
307
  f"unsupported topology {topology}, use 'router' or 'flow'"
mlrun/serving/states.py CHANGED
@@ -1833,7 +1833,7 @@ def params_to_step(
1833
1833
  class_args = class_args or {}
1834
1834
 
1835
1835
  if isinstance(class_name, QueueStep):
1836
- if not name or class_name.name:
1836
+ if not (name or class_name.name):
1837
1837
  raise MLRunInvalidArgumentError("queue name must be specified")
1838
1838
 
1839
1839
  step = class_name
@@ -1854,7 +1854,11 @@ def params_to_step(
1854
1854
  elif class_name and hasattr(class_name, "to_dict"):
1855
1855
  struct = class_name.to_dict()
1856
1856
  kind = struct.get("kind", StepKinds.task)
1857
- name = name or struct.get("name", struct.get("class_name"))
1857
+ name = (
1858
+ name
1859
+ or struct.get("name", struct.get("class_name"))
1860
+ or class_name.to_dict(["name"]).get("name")
1861
+ )
1858
1862
  cls = classes_map.get(kind, RootFlowStep)
1859
1863
  step = cls.from_dict(struct)
1860
1864
  step.function = function
mlrun/utils/helpers.py CHANGED
@@ -41,6 +41,7 @@ import inflection
41
41
  import numpy as np
42
42
  import packaging.version
43
43
  import pandas
44
+ import pytz
44
45
  import semver
45
46
  import yaml
46
47
  from dateutil import parser
@@ -1131,21 +1132,83 @@ def get_workflow_url(
1131
1132
  return url
1132
1133
 
1133
1134
 
1134
- def get_kfp_project_filter(project_name: str) -> str:
1135
+ def get_kfp_list_runs_filter(
1136
+ project_name: Optional[str] = None,
1137
+ end_date: Optional[str] = None,
1138
+ start_date: Optional[str] = None,
1139
+ ) -> str:
1135
1140
  """
1136
- Generates a filter string for KFP runs, using a substring predicate
1137
- on the run's 'name' field. This is used as a heuristic to retrieve runs that are associated
1138
- with a specific project. The 'op: 9' operator indicates that the filter checks if the
1139
- project name appears as a substring in the run's name, ensuring that we can identify
1140
- runs belonging to the desired project.
1141
+ Generates a filter for listing Kubeflow Pipelines (KFP) runs.
1142
+
1143
+ :param project_name: The name of the project. If "*", it won't filter by project.
1144
+ :param end_date: The latest creation date for filtering runs (ISO 8601 format).
1145
+ :param start_date: The earliest creation date for filtering runs (ISO 8601 format).
1146
+ :return: A JSON-formatted filter string for KFP.
1141
1147
  """
1142
- is_substring_op = 9
1143
- project_name_filter = {
1144
- "predicates": [
1145
- {"key": "name", "op": is_substring_op, "string_value": project_name}
1146
- ]
1147
- }
1148
- return json.dumps(project_name_filter)
1148
+
1149
+ # KFP filter operation codes
1150
+ kfp_less_than_or_equal_op = 7 # '<='
1151
+ kfp_greater_than_or_equal_op = 5 # '>='
1152
+ kfp_substring_op = 9 # Substring match
1153
+
1154
+ filters = {"predicates": []}
1155
+
1156
+ if end_date:
1157
+ filters["predicates"].append(
1158
+ {
1159
+ "key": "created_at",
1160
+ "op": kfp_less_than_or_equal_op,
1161
+ "timestamp_value": end_date,
1162
+ }
1163
+ )
1164
+
1165
+ if project_name and project_name != "*":
1166
+ filters["predicates"].append(
1167
+ {
1168
+ "key": "name",
1169
+ "op": kfp_substring_op,
1170
+ "string_value": project_name,
1171
+ }
1172
+ )
1173
+ if start_date:
1174
+ filters["predicates"].append(
1175
+ {
1176
+ "key": "created_at",
1177
+ "op": kfp_greater_than_or_equal_op,
1178
+ "timestamp_value": start_date,
1179
+ }
1180
+ )
1181
+ return json.dumps(filters)
1182
+
1183
+
1184
+ def validate_and_convert_date(date_input: str) -> str:
1185
+ """
1186
+ Converts any recognizable date string into a standardized RFC 3339 format.
1187
+ :param date_input: A date string in a recognizable format.
1188
+ """
1189
+ try:
1190
+ dt_object = parser.parse(date_input)
1191
+ if dt_object.tzinfo is not None:
1192
+ # Convert to UTC if it's in a different timezone
1193
+ dt_object = dt_object.astimezone(pytz.utc)
1194
+ else:
1195
+ # If no timezone info is present, assume it's in local time
1196
+ local_tz = pytz.timezone("UTC")
1197
+ dt_object = local_tz.localize(dt_object)
1198
+
1199
+ # Convert the datetime object to an RFC 3339-compliant string.
1200
+ # RFC 3339 requires timestamps to be in ISO 8601 format with a 'Z' suffix for UTC time.
1201
+ # The isoformat() method adds a "+00:00" suffix for UTC by default,
1202
+ # so we replace it with "Z" to ensure compliance.
1203
+ formatted_date = dt_object.isoformat().replace("+00:00", "Z")
1204
+ formatted_date = formatted_date.rstrip("Z") + "Z"
1205
+
1206
+ return formatted_date
1207
+ except (ValueError, OverflowError) as e:
1208
+ raise ValueError(
1209
+ f"Invalid date format: {date_input}."
1210
+ f" Date format must adhere to the RFC 3339 standard (e.g., 'YYYY-MM-DDTHH:MM:SSZ' for UTC)."
1211
+ ) from e
1149
1212
 
1150
1213
 
1151
1214
  def are_strings_in_exception_chain_messages(
@@ -2156,7 +2219,10 @@ class Workflow:
2156
2219
  def _get_workflow_manifest(
2157
2220
  workflow_id: str,
2158
2221
  ) -> typing.Optional[mlrun_pipelines.models.PipelineManifest]:
2159
- kfp_client = mlrun_pipelines.utils.get_client(mlrun.mlconf.kfp_url)
2222
+ kfp_client = mlrun_pipelines.utils.get_client(
2223
+ url=mlrun.mlconf.kfp_url,
2224
+ namespace=mlrun.mlconf.namespace,
2225
+ )
2160
2226
 
2161
2227
  # arbitrary timeout of 5 seconds, the workflow should be done by now
2162
2228
  kfp_run = kfp_client.wait_for_run_completion(workflow_id, 5)
@@ -1,4 +1,4 @@
1
1
  {
2
- "git_commit": "b08bde55bcb7fde423002ec145ace1e99e677955",
3
- "version": "1.9.0-rc1"
2
+ "git_commit": "73f7c48f424b72bc46d43b4a27f7b470e5290d5c",
3
+ "version": "1.9.0-rc2"
4
4
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mlrun
3
- Version: 1.9.0rc1
3
+ Version: 1.9.0rc2
4
4
  Summary: Tracking and config of machine learning runs
5
5
  Home-page: https://github.com/mlrun/mlrun
6
6
  Author: Yaron Haviv
@@ -44,16 +44,15 @@ Requires-Dist: semver~=3.0
44
44
  Requires-Dist: dependency-injector~=4.41
45
45
  Requires-Dist: fsspec<2024.7,>=2023.9.2
46
46
  Requires-Dist: v3iofs~=0.1.17
47
- Requires-Dist: storey~=1.8.9
47
+ Requires-Dist: storey~=1.8.11
48
48
  Requires-Dist: inflection~=0.5.0
49
49
  Requires-Dist: python-dotenv~=1.0
50
50
  Requires-Dist: setuptools>=75.2
51
51
  Requires-Dist: deprecated~=1.2
52
52
  Requires-Dist: jinja2>=3.1.3,~=3.1
53
53
  Requires-Dist: orjson<4,>=3.9.15
54
- Requires-Dist: mlrun-pipelines-kfp-common~=0.3.12
55
- Requires-Dist: mlrun-pipelines-kfp-v1-8~=0.3.9; python_version < "3.11"
56
- Requires-Dist: mlrun-pipelines-kfp-v2~=0.3.8; python_version >= "3.11"
54
+ Requires-Dist: mlrun-pipelines-kfp-common~=0.4.1
55
+ Requires-Dist: mlrun-pipelines-kfp-v1-8~=0.4.0
57
56
  Requires-Dist: docstring_parser~=0.16
58
57
  Requires-Dist: aiosmtplib~=3.0
59
58
  Provides-Extra: s3
@@ -100,11 +99,11 @@ Requires-Dist: ossfs==2023.12.0; extra == "alibaba-oss"
100
99
  Requires-Dist: oss2==2.18.1; extra == "alibaba-oss"
101
100
  Provides-Extra: tdengine
102
101
  Requires-Dist: taos-ws-py==0.3.2; extra == "tdengine"
103
- Requires-Dist: taoswswrap~=0.3.4; extra == "tdengine"
102
+ Requires-Dist: taoswswrap~=0.3.5; extra == "tdengine"
104
103
  Provides-Extra: snowflake
105
104
  Requires-Dist: snowflake-connector-python~=3.7; extra == "snowflake"
106
105
  Provides-Extra: kfp18
107
- Requires-Dist: mlrun_pipelines_kfp_v1_8[kfp]>=0.3.2; python_version < "3.11" and extra == "kfp18"
106
+ Requires-Dist: mlrun_pipelines_kfp_v1_8[kfp]>=0.4.0; python_version < "3.11" and extra == "kfp18"
108
107
  Provides-Extra: api
109
108
  Requires-Dist: uvicorn~=0.32.1; extra == "api"
110
109
  Requires-Dist: dask-kubernetes~=0.11.0; extra == "api"
@@ -120,7 +119,7 @@ Requires-Dist: timelength~=1.1; extra == "api"
120
119
  Requires-Dist: memray~=1.12; sys_platform != "win32" and extra == "api"
121
120
  Requires-Dist: aiosmtplib~=3.0; extra == "api"
122
121
  Requires-Dist: pydantic<2,>=1; extra == "api"
123
- Requires-Dist: mlrun-pipelines-kfp-v1-8[kfp]~=0.3.9; python_version < "3.11" and extra == "api"
122
+ Requires-Dist: mlrun-pipelines-kfp-v1-8~=0.4.0; extra == "api"
124
123
  Requires-Dist: grpcio~=1.70.0; extra == "api"
125
124
  Provides-Extra: all
126
125
  Requires-Dist: adlfs==2023.9.0; extra == "all"
@@ -153,7 +152,7 @@ Requires-Dist: s3fs<2024.7,>=2023.9.2; extra == "all"
153
152
  Requires-Dist: snowflake-connector-python~=3.7; extra == "all"
154
153
  Requires-Dist: sqlalchemy~=1.4; extra == "all"
155
154
  Requires-Dist: taos-ws-py==0.3.2; extra == "all"
156
- Requires-Dist: taoswswrap~=0.3.4; extra == "all"
155
+ Requires-Dist: taoswswrap~=0.3.5; extra == "all"
157
156
  Provides-Extra: complete
158
157
  Requires-Dist: adlfs==2023.9.0; extra == "complete"
159
158
  Requires-Dist: aiobotocore<2.16,>=2.5.0; extra == "complete"
@@ -185,7 +184,7 @@ Requires-Dist: s3fs<2024.7,>=2023.9.2; extra == "complete"
185
184
  Requires-Dist: snowflake-connector-python~=3.7; extra == "complete"
186
185
  Requires-Dist: sqlalchemy~=1.4; extra == "complete"
187
186
  Requires-Dist: taos-ws-py==0.3.2; extra == "complete"
188
- Requires-Dist: taoswswrap~=0.3.4; extra == "complete"
187
+ Requires-Dist: taoswswrap~=0.3.5; extra == "complete"
189
188
  Provides-Extra: complete-api
190
189
  Requires-Dist: adlfs==2023.9.0; extra == "complete-api"
191
190
  Requires-Dist: aiobotocore<2.16,>=2.5.0; extra == "complete-api"
@@ -216,7 +215,7 @@ Requires-Dist: igz-mgmt~=0.4.1; extra == "complete-api"
216
215
  Requires-Dist: kafka-python~=2.0; extra == "complete-api"
217
216
  Requires-Dist: memray~=1.12; sys_platform != "win32" and extra == "complete-api"
218
217
  Requires-Dist: mlflow~=2.16; extra == "complete-api"
219
- Requires-Dist: mlrun-pipelines-kfp-v1-8[kfp]~=0.3.9; python_version < "3.11" and extra == "complete-api"
218
+ Requires-Dist: mlrun-pipelines-kfp-v1-8~=0.4.0; extra == "complete-api"
220
219
  Requires-Dist: msrest~=0.6.21; extra == "complete-api"
221
220
  Requires-Dist: objgraph~=3.6; extra == "complete-api"
222
221
  Requires-Dist: oss2==2.18.1; extra == "complete-api"
@@ -230,7 +229,7 @@ Requires-Dist: s3fs<2024.7,>=2023.9.2; extra == "complete-api"
230
229
  Requires-Dist: snowflake-connector-python~=3.7; extra == "complete-api"
231
230
  Requires-Dist: sqlalchemy~=1.4; extra == "complete-api"
232
231
  Requires-Dist: taos-ws-py==0.3.2; extra == "complete-api"
233
- Requires-Dist: taoswswrap~=0.3.4; extra == "complete-api"
232
+ Requires-Dist: taoswswrap~=0.3.5; extra == "complete-api"
234
233
  Requires-Dist: timelength~=1.1; extra == "complete-api"
235
234
  Requires-Dist: uvicorn~=0.32.1; extra == "complete-api"
236
235
  Dynamic: author
@@ -1,6 +1,6 @@
1
1
  mlrun/__init__.py,sha256=Cqm9U9eCEdLpMejhU2BEhubu0mHL71igJJIwYa738EA,7450
2
2
  mlrun/__main__.py,sha256=0NDzPf9VFRO8KFfGgb8mkGUPIDS285aASV8Hbxs-ND0,45920
3
- mlrun/config.py,sha256=vjlKlex9TV1Gfgix4iOM_nzX5wN-YuV6xXPmh0JDWy4,71932
3
+ mlrun/config.py,sha256=JptQ5cano4qGdSt8sFiEMQpT96Wr9Rv4A9q_cyMJgOo,72127
4
4
  mlrun/errors.py,sha256=LkcbXTLANGdsgo2CRX2pdbyNmt--lMsjGv0XZMgP-Nc,8222
5
5
  mlrun/execution.py,sha256=FUktsD3puSFjc3LZJU35b-OmFBrBPBNntViCLQVuwnk,50008
6
6
  mlrun/features.py,sha256=ReBaNGsBYXqcbgI012n-SO_j6oHIbk_Vpv0CGPXbUmo,15842
@@ -95,11 +95,11 @@ mlrun/datastore/inmem.py,sha256=IsM83nn-3CqmGdLzim7i9ZmJwG6ZGhBZGN6_hszWZnE,2951
95
95
  mlrun/datastore/redis.py,sha256=QeNMkSz3zQXiXZhFUZcEtViqqbUysGJditbqe5M-J48,5682
96
96
  mlrun/datastore/s3.py,sha256=lD4Fs69rwMeISovZzOxRdz_z9FuffysTdjJA9ybdnLA,9262
97
97
  mlrun/datastore/snowflake_utils.py,sha256=Wohvnlmq8j1d98RCaknll-iWdZZpSlCrKhUOEy0_-CA,1483
98
- mlrun/datastore/sources.py,sha256=KQp1nNN7TcaewFm3It03H1R28uzlWGZDDHJyqiT--vw,49062
98
+ mlrun/datastore/sources.py,sha256=juPTIDpxHxbRBoTMPEG1V-6bgR3E3ufCir-Dnq_SFyg,48975
99
99
  mlrun/datastore/spark_udf.py,sha256=NnnB3DZxZb-rqpRy7b-NC7QWXuuqFn3XkBDc86tU4mQ,1498
100
100
  mlrun/datastore/spark_utils.py,sha256=_AsVoU5Ix_-W7Gyq8io8V-2GTk0m8THJNDP3WGGaWJY,2865
101
101
  mlrun/datastore/store_resources.py,sha256=PFOMrZ6KH6hBOb0PiO-cHx_kv0UpHu5P2t8_mrR-lS4,6842
102
- mlrun/datastore/storeytargets.py,sha256=g5zAdizdFkcESoVGxbKWC11ZiXFgM77UL4642G32JaU,6459
102
+ mlrun/datastore/storeytargets.py,sha256=dSy9wr4IyxrIE1GHBxzVEeEY1sdU66s4w-oUuaIfa2U,6620
103
103
  mlrun/datastore/targets.py,sha256=7qLf26BDH3qYTHOR7TSP0tUMPBhYOkaaOwffUBxgqY0,81201
104
104
  mlrun/datastore/utils.py,sha256=CbKbDI6CdFRCqyAXe-jykVvN_GH6R0JkxIQFAogR2GA,10604
105
105
  mlrun/datastore/v3io.py,sha256=QSYBORRLcJTeM9mt0EaWzyLcdmzrPkqrF7k5uLTam5U,8209
@@ -219,20 +219,20 @@ mlrun/launcher/local.py,sha256=775HY-8S9LFUX5ubGXrLO0N1lVh8bn-DHFmNYuNqQPA,11451
219
219
  mlrun/launcher/remote.py,sha256=rLJW4UAnUT5iUb4BsGBOAV3K4R29a0X4lFtRkVKlyYU,7709
220
220
  mlrun/model_monitoring/__init__.py,sha256=ELy7njEtZnz09Dc6PGZSFFEGtnwI15bJNWM3Pj4_YIs,753
221
221
  mlrun/model_monitoring/api.py,sha256=LU58dzE4QZiMH23lgiqfI__3m2E3eEZP-DQe2ioUSwM,28317
222
- mlrun/model_monitoring/controller.py,sha256=fpCfM2wrd9sk-GzmTsqqf1jL1DNgvQhseRRqI-MvMFU,36803
222
+ mlrun/model_monitoring/controller.py,sha256=m4Zx_NQ0C-A7WtjBoXnqBmS11RRtLvBaFgbFbIgrdVc,36847
223
223
  mlrun/model_monitoring/features_drift_table.py,sha256=c6GpKtpOJbuT1u5uMWDL_S-6N4YPOmlktWMqPme3KFY,25308
224
224
  mlrun/model_monitoring/helpers.py,sha256=8QsoYRPOVSnR3Lcv99m4XYrp_cR6hSqBUflYSOkJmFQ,21019
225
- mlrun/model_monitoring/stream_processing.py,sha256=4M0H4txMlsC2Q5iKTPp992KWoNPAJjPHj9rqWhXbl8w,33321
225
+ mlrun/model_monitoring/stream_processing.py,sha256=CEVcIFJ0jYvkIMu8hKsIH0HEkrn5l_NoHsxNLk72D5E,33583
226
226
  mlrun/model_monitoring/tracking_policy.py,sha256=PBIGrUYWrwcE5gwXupBIVzOb0QRRwPJsgQm_yLGQxB4,5595
227
227
  mlrun/model_monitoring/writer.py,sha256=ibbhvfSHb8Reqlb7RGFEAUNM4iTyK1gk8-2m46mP6VM,8428
228
228
  mlrun/model_monitoring/applications/__init__.py,sha256=xDBxkBjl-whHSG_4t1mLkxiypLH-fzn8TmAW9Mjo2uI,759
229
229
  mlrun/model_monitoring/applications/_application_steps.py,sha256=PxULZznKW66Oq-fKaraOAbsTuGnV0zgXh6_91wX3KUo,8367
230
- mlrun/model_monitoring/applications/base.py,sha256=7XL12idItWkoE3CJ_48F6cwVx5pJH3bgfG92hb8LcN8,24872
231
- mlrun/model_monitoring/applications/context.py,sha256=DKUDOfN4iY5wpOMjfsarx4pVN9A1sORyu7y2EEKEvMs,16964
232
- mlrun/model_monitoring/applications/histogram_data_drift.py,sha256=09t0tfC35W0SeJA3fzN29pJiB6G-V_8GlcvULVq6H9Q,15179
230
+ mlrun/model_monitoring/applications/base.py,sha256=f73LycKUG85invl6l7V4MRiRd1bx8jmepayrpwpr3c0,25131
231
+ mlrun/model_monitoring/applications/context.py,sha256=VfyPCIdO4z73uqFcJs87jzSI4PatX5N5Xicg8Ye1Bag,16968
232
+ mlrun/model_monitoring/applications/histogram_data_drift.py,sha256=2qgfFmrpHf-x0_EaHD-0T28piwSQzw-HH71aV1GwbZs,15389
233
233
  mlrun/model_monitoring/applications/results.py,sha256=_qmj6TWT0SR2bi7gUyRKBU418eGgGoLW2_hTJ7S-ock,5782
234
234
  mlrun/model_monitoring/applications/evidently/__init__.py,sha256=-DqdPnBSrjZhFvKOu_Ie3MiFvlur9sPTZpZ1u0_1AE8,690
235
- mlrun/model_monitoring/applications/evidently/base.py,sha256=_n_2CCQL-fC6hGUZSCLZxZuvXqMqjDHSFX0Giok8HZw,6793
235
+ mlrun/model_monitoring/applications/evidently/base.py,sha256=3loXT5gOn5v7j2CiXDQnA0cQXroItBSso6aU0t_pzBs,7851
236
236
  mlrun/model_monitoring/db/__init__.py,sha256=r47xPGZpIfMuv8J3PQCZTSqVPMhUta4sSJCZFKcS7FM,644
237
237
  mlrun/model_monitoring/db/_schedules.py,sha256=RWn4wtKsIXg668gMLpxO9I8GlkxvPSaA5y7w-wFDcgE,9048
238
238
  mlrun/model_monitoring/db/_stats.py,sha256=VVMWLMqG3Us3ozBkLaokJF22Ewv8WKmVE1-OvS_g9vA,6943
@@ -242,7 +242,7 @@ mlrun/model_monitoring/db/tsdb/helpers.py,sha256=0oUXc4aUkYtP2SGP6jTb3uPPKImIUsV
242
242
  mlrun/model_monitoring/db/tsdb/tdengine/__init__.py,sha256=vgBdsKaXUURKqIf3M0y4sRatmSVA4CQiJs7J5dcVBkQ,620
243
243
  mlrun/model_monitoring/db/tsdb/tdengine/schemas.py,sha256=EslhaR65jfeNdD5Ibk-3Hb4e5r5qYPfHb9rTChX3sG0,12689
244
244
  mlrun/model_monitoring/db/tsdb/tdengine/stream_graph_steps.py,sha256=Uadj0UvAmln2MxDWod-kAzau1uNlqZh981rPhbUH_5M,2857
245
- mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py,sha256=5jgimfu2-omy8Cnnby7GpgB_MWEp9mmLX0zpbGC2JZ8,37934
245
+ mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py,sha256=rpE-RB5sqe5GAkd9MZB4JFzSP1skxs0loC5hhAkNgKk,38747
246
246
  mlrun/model_monitoring/db/tsdb/v3io/__init__.py,sha256=aL3bfmQsUQ-sbvKGdNihFj8gLCK3mSys0qDcXtYOwgc,616
247
247
  mlrun/model_monitoring/db/tsdb/v3io/stream_graph_steps.py,sha256=_-zo9relCDtjGgievxAcAP9gVN9nDWs8BzGtFwTjb9M,6284
248
248
  mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py,sha256=IzdThNwWMBWo0D0VzXV-WVvGg-z7Y9e8ke8_LYJTeVA,46214
@@ -292,9 +292,9 @@ mlrun/runtimes/mpijob/abstract.py,sha256=JGMjcJ4dvpJbctF6psU9UvYyNCutMxTMgBQeTlz
292
292
  mlrun/runtimes/mpijob/v1.py,sha256=1XQZC7AIMGX_AQCbApcwpH8I7y39-v0v2O35MvxjXoo,3213
293
293
  mlrun/runtimes/nuclio/__init__.py,sha256=gx1kizzKv8pGT5TNloN1js1hdbxqDw3rM90sLVYVffY,794
294
294
  mlrun/runtimes/nuclio/api_gateway.py,sha256=vH9ClKVP4Mb24rvA67xPuAvAhX-gAv6vVtjVxyplhdc,26969
295
- mlrun/runtimes/nuclio/function.py,sha256=1EFdGFqlyEfPUVK4Rhh8zWUrff7MNKaHrg7V-bejewg,54618
295
+ mlrun/runtimes/nuclio/function.py,sha256=8wzAFYCpSs0KoGPSN6DC19smSfYh8dVqDUhpicr8sJ0,54540
296
296
  mlrun/runtimes/nuclio/nuclio.py,sha256=sLK8KdGO1LbftlL3HqPZlFOFTAAuxJACZCVl1c0Ha6E,2942
297
- mlrun/runtimes/nuclio/serving.py,sha256=qetAyl-nfn8SWp7KyNgRtMNUVcX_q75SY9dLZP0uH6o,33365
297
+ mlrun/runtimes/nuclio/serving.py,sha256=d0nzPALUYXO4fKFFhxW3hY-_NU-ZhBLWXa2vWIetBRI,33434
298
298
  mlrun/runtimes/nuclio/application/__init__.py,sha256=rRs5vasy_G9IyoTpYIjYDafGoL6ifFBKgBtsXn31Atw,614
299
299
  mlrun/runtimes/nuclio/application/application.py,sha256=VPX-ruYQJ7-7yd5c2sWdF4U5JCGSS3kYjUfOgev6l_Y,29186
300
300
  mlrun/runtimes/nuclio/application/reverse_proxy.go,sha256=lEHH74vr2PridIHp1Jkc_NjkrWb5b6zawRrNxHQhwGU,2913
@@ -306,7 +306,7 @@ mlrun/serving/remote.py,sha256=gxJkj_J3j-sZcVUbUzbAmJafP6t6y4NVFsu0kWmYngA,18818
306
306
  mlrun/serving/routers.py,sha256=SY6AsaiSnh8ssXq8hQE2z9MYapOxFOFJBx9QomiZMO8,53915
307
307
  mlrun/serving/server.py,sha256=KiNhW0nTV5STZPzR6kEAUFVzCCAX8qv0g9AoCopARrM,23429
308
308
  mlrun/serving/serving_wrapper.py,sha256=R670-S6PX_d5ER6jiHtRvacuPyFzQH0mEf2K0sBIIOM,836
309
- mlrun/serving/states.py,sha256=UWiE85MB_SK3rgzWgNqQU2MKeyN2yF2BCvMcMAqLMTs,73247
309
+ mlrun/serving/states.py,sha256=Hh3FBoQbHoO4KiofHfSwi_aUx7mQ26iXpKijcGiDJ6c,73341
310
310
  mlrun/serving/utils.py,sha256=k2EIYDWHUGkE-IBI6T0UNT32fw-KySsccIJM_LObI00,4171
311
311
  mlrun/serving/v1_serving.py,sha256=c6J_MtpE-Tqu00-6r4eJOCO6rUasHDal9W2eBIcrl50,11853
312
312
  mlrun/serving/v2_serving.py,sha256=b3C5Utv2_AOPrH_hPi3NarjNbAK3kRoeIfqMU4qNuUo,25362
@@ -321,7 +321,7 @@ mlrun/utils/azure_vault.py,sha256=IEFizrDGDbAaoWwDr1WoA88S_EZ0T--vjYtY-i0cvYQ,34
321
321
  mlrun/utils/clones.py,sha256=yXOeuLtgIiKZdmjeKK0Z_vIrH19ds5JuoJaCeDjhwOo,7516
322
322
  mlrun/utils/condition_evaluator.py,sha256=-nGfRmZzivn01rHTroiGY4rqEv8T1irMyhzxEei-sKc,1897
323
323
  mlrun/utils/db.py,sha256=blQgkWMfFH9lcN4sgJQcPQgEETz2Dl_zwbVA0SslpFg,2186
324
- mlrun/utils/helpers.py,sha256=FflaMvt_8zYDsW5zHG8s1WWda3CbERTCMJUGyziWIjg,74587
324
+ mlrun/utils/helpers.py,sha256=KCvosDXfFNpxnoRAj33BbmMsQqK4wz1Wfj0s9MwOvSY,76782
325
325
  mlrun/utils/http.py,sha256=t6FrXQstZm9xVVjxqIGiLzrwZNCR4CSienSOuVgNIcI,8706
326
326
  mlrun/utils/logger.py,sha256=RG0m1rx6gfkJ-2C1r_p41MMpPiaDYqaYM2lYHDlNZEU,14767
327
327
  mlrun/utils/regex.py,sha256=jbR7IiOp6OO0mg9Fl_cVZCpWb9fL9nTPONCUxCDNWXg,5201
@@ -340,11 +340,11 @@ mlrun/utils/notifications/notification/mail.py,sha256=ZyJ3eqd8simxffQmXzqd3bgbAq
340
340
  mlrun/utils/notifications/notification/slack.py,sha256=eQvmctTh6wIG5xVOesLLV9S1-UUCu5UEQ9JIJOor3ts,7183
341
341
  mlrun/utils/notifications/notification/webhook.py,sha256=NeyIMSBojjjTJaUHmPbxMByp34GxYkl1-16NqzU27fU,4943
342
342
  mlrun/utils/version/__init__.py,sha256=7kkrB7hEZ3cLXoWj1kPoDwo4MaswsI2JVOBpbKgPAgc,614
343
- mlrun/utils/version/version.json,sha256=WlaePndVtYxwhDSf-ETGiiy3I87M637FDZBAAl1SjvM,88
343
+ mlrun/utils/version/version.json,sha256=tuXJq2nSzgMkR_1BOzctty9vrj-bvCoa4ec-YhoGWco,88
344
344
  mlrun/utils/version/version.py,sha256=eEW0tqIAkU9Xifxv8Z9_qsYnNhn3YH7NRAfM-pPLt1g,1878
345
- mlrun-1.9.0rc1.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
346
- mlrun-1.9.0rc1.dist-info/METADATA,sha256=sRPSncr_i_JhtIKIVROkuR5UMqd6VMEcav64CqOf1_0,26078
347
- mlrun-1.9.0rc1.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
348
- mlrun-1.9.0rc1.dist-info/entry_points.txt,sha256=1Owd16eAclD5pfRCoJpYC2ZJSyGNTtUr0nCELMioMmU,46
349
- mlrun-1.9.0rc1.dist-info/top_level.txt,sha256=NObLzw3maSF9wVrgSeYBv-fgnHkAJ1kEkh12DLdd5KM,6
350
- mlrun-1.9.0rc1.dist-info/RECORD,,
345
+ mlrun-1.9.0rc2.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
346
+ mlrun-1.9.0rc2.dist-info/METADATA,sha256=pkNMIypdTNvy1-_p1nJ_KsfFG2DbtD5mRNA0BpuMM-U,25916
347
+ mlrun-1.9.0rc2.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
348
+ mlrun-1.9.0rc2.dist-info/entry_points.txt,sha256=1Owd16eAclD5pfRCoJpYC2ZJSyGNTtUr0nCELMioMmU,46
349
+ mlrun-1.9.0rc2.dist-info/top_level.txt,sha256=NObLzw3maSF9wVrgSeYBv-fgnHkAJ1kEkh12DLdd5KM,6
350
+ mlrun-1.9.0rc2.dist-info/RECORD,,