mlrun 1.10.0rc30__py3-none-any.whl → 1.10.0rc31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

@@ -41,7 +41,7 @@ class LLMPromptArtifactSpec(ArtifactSpec):
41
41
  prompt_template: Optional[list[dict]] = None,
42
42
  prompt_path: Optional[str] = None,
43
43
  prompt_legend: Optional[dict] = None,
44
- model_configuration: Optional[dict] = None,
44
+ invocation_config: Optional[dict] = None,
45
45
  description: Optional[str] = None,
46
46
  target_path: Optional[str] = None,
47
47
  **kwargs,
@@ -68,13 +68,11 @@ class LLMPromptArtifactSpec(ArtifactSpec):
68
68
 
69
69
  self.prompt_template = prompt_template
70
70
  self.prompt_legend = prompt_legend
71
- if model_configuration is not None and not isinstance(
72
- model_configuration, dict
73
- ):
71
+ if invocation_config is not None and not isinstance(invocation_config, dict):
74
72
  raise mlrun.errors.MLRunInvalidArgumentError(
75
73
  "LLMPromptArtifact model_configuration must be a dictionary or None"
76
74
  )
77
- self.model_configuration = model_configuration or {}
75
+ self.model_configuration = invocation_config or {}
78
76
  self.description = description
79
77
  self._model_artifact = (
80
78
  model_artifact
@@ -177,7 +175,7 @@ class LLMPromptArtifact(Artifact):
177
175
  prompt_template: Optional[list[dict]] = None,
178
176
  prompt_path: Optional[str] = None,
179
177
  prompt_legend: Optional[dict] = None,
180
- model_configuration: Optional[dict] = None,
178
+ invocation_config: Optional[dict] = None,
181
179
  description: Optional[str] = None,
182
180
  target_path=None,
183
181
  **kwargs,
@@ -187,7 +185,7 @@ class LLMPromptArtifact(Artifact):
187
185
  prompt_path=prompt_path,
188
186
  prompt_legend=prompt_legend,
189
187
  model_artifact=model_artifact,
190
- model_configuration=model_configuration,
188
+ invocation_config=invocation_config,
191
189
  target_path=target_path,
192
190
  description=description,
193
191
  )
@@ -196,6 +196,10 @@ class WriterEventKind(MonitoringStrEnum):
196
196
  RESULT = "result"
197
197
  STATS = "stats"
198
198
 
199
+ @classmethod
200
+ def user_app_outputs(cls):
201
+ return [cls.METRIC, cls.RESULT]
202
+
199
203
 
200
204
  class ControllerEvent(MonitoringStrEnum):
201
205
  KIND = "kind"
@@ -304,6 +308,7 @@ class FileTargetKind:
304
308
  MONITORING_APPLICATION = "monitoring_application"
305
309
  ERRORS = "errors"
306
310
  STATS = "stats"
311
+ PARQUET_STATS = "parquet_stats"
307
312
  LAST_REQUEST = "last_request"
308
313
 
309
314
 
mlrun/config.py CHANGED
@@ -649,6 +649,13 @@ default_config = {
649
649
  "max_replicas": 1,
650
650
  },
651
651
  },
652
+ "writer_graph": {
653
+ "max_events": 1000,
654
+ "flush_after_seconds": 30,
655
+ "writer_version": "v1", # v1 is the sync version while v2 is async
656
+ "parquet_batching_max_events": 10,
657
+ "parquet_batching_timeout_secs": 30,
658
+ },
652
659
  # Store prefixes are used to handle model monitoring storing policies based on project and kind, such as events,
653
660
  # stream, and endpoints.
654
661
  "store_prefixes": {
mlrun/execution.py CHANGED
@@ -917,7 +917,7 @@ class MLClientCtx:
917
917
  prompt_path: Optional[str] = None,
918
918
  prompt_legend: Optional[dict] = None,
919
919
  model_artifact: Union[ModelArtifact, str] = None,
920
- model_configuration: Optional[dict] = None,
920
+ invocation_config: Optional[dict] = None,
921
921
  description: Optional[str] = None,
922
922
  target_path: Optional[str] = None,
923
923
  artifact_path: Optional[str] = None,
@@ -997,7 +997,7 @@ class MLClientCtx:
997
997
  with the place-holder name. "description" will point to explanation of what that placeholder represents.
998
998
  Useful for documenting and clarifying dynamic parts of the prompt.
999
999
  :param model_artifact: Reference to the parent model (either `ModelArtifact` or model URI string).
1000
- :param model_configuration: Dictionary of generation parameters (e.g., temperature, max_tokens).
1000
+ :param invocation_config: Dictionary of generation parameters (e.g., temperature, max_tokens).
1001
1001
  :param description: Optional description of the prompt.
1002
1002
  :param target_path: Absolute target path (instead of using artifact_path + local_path)
1003
1003
  :param artifact_path: Target artifact path (when not using the default)
@@ -1023,7 +1023,7 @@ class MLClientCtx:
1023
1023
  prompt_path=prompt_path,
1024
1024
  prompt_legend=prompt_legend,
1025
1025
  model_artifact=model_artifact,
1026
- model_configuration=model_configuration,
1026
+ invocation_config=invocation_config,
1027
1027
  target_path=target_path,
1028
1028
  description=description,
1029
1029
  **kwargs,
@@ -13,6 +13,7 @@
13
13
  # limitations under the License.
14
14
  import abc
15
15
  import json
16
+ import typing
16
17
  from abc import abstractmethod
17
18
  from datetime import datetime, timezone
18
19
  from typing import cast
@@ -73,7 +74,7 @@ class ModelMonitoringStatsFile(abc.ABC):
73
74
  path=self._item.url,
74
75
  )
75
76
 
76
- def read(self) -> tuple[dict, datetime]:
77
+ def read(self) -> tuple[dict, typing.Optional[datetime]]:
77
78
  """
78
79
  Read the stats data and timestamp saved in file
79
80
  :return: tuple[dict, str] dictionary with stats data and timestamp saved in file
@@ -99,13 +100,13 @@ class ModelMonitoringStatsFile(abc.ABC):
99
100
  ):
100
101
  raise
101
102
 
102
- logger.exception(
103
+ logger.warning(
103
104
  "The Stats file was not found. It should have been created "
104
105
  "as a part of the model endpoint's creation",
105
106
  path=self._path,
106
107
  error=err,
107
108
  )
108
- raise
109
+ return {}, None
109
110
 
110
111
  def write(self, stats: dict, timestamp: datetime) -> None:
111
112
  """
@@ -60,6 +60,16 @@ class TSDBConnector(ABC):
60
60
  """
61
61
  pass
62
62
 
63
+ def apply_writer_steps(self, graph, after, **kwargs) -> None:
64
+ """
65
+ Apply TSDB steps on the provided writer graph. Throughout these steps, the graph stores metrics / results.
66
+ This data is being used by mlrun UI and the monitoring dashboards in grafana.
67
+ There are 2 different key metric dictionaries that are being generated throughout these steps:
68
+ - metrics (user-defined metrics) - model monitoring application metrics
69
+ - results (user-defined results) - model monitoring application results
70
+ """
71
+ pass
72
+
63
73
  @abstractmethod
64
74
  def handle_model_error(self, graph, **kwargs) -> None:
65
75
  """
@@ -783,3 +793,6 @@ class TSDBConnector(ABC):
783
793
  )
784
794
  )
785
795
  return mm_schemas.ModelEndpointDriftValues(values=values)
796
+
797
+ def add_pre_writer_steps(self, graph, after):
798
+ return None
@@ -22,6 +22,7 @@ import taosws
22
22
  import mlrun.common.schemas.model_monitoring as mm_schemas
23
23
  import mlrun.common.types
24
24
  import mlrun.model_monitoring.db.tsdb.tdengine.schemas as tdengine_schemas
25
+ from mlrun.config import config
25
26
  from mlrun.datastore.datastore_profile import DatastoreProfile
26
27
  from mlrun.model_monitoring.db import TSDBConnector
27
28
  from mlrun.model_monitoring.db.tsdb.tdengine.tdengine_connection import (
@@ -277,6 +278,65 @@ class TDEngineConnector(TSDBConnector):
277
278
  after="ProcessBeforeTDEngine",
278
279
  )
279
280
 
281
+ def add_pre_writer_steps(self, graph, after):
282
+ return graph.add_step(
283
+ "mlrun.model_monitoring.db.tsdb.tdengine.writer_graph_steps.ProcessBeforeTDEngine",
284
+ name="ProcessBeforeTDEngine",
285
+ after=after,
286
+ )
287
+
288
+ def apply_writer_steps(self, graph, after, **kwargs) -> None:
289
+ graph.add_step(
290
+ "mlrun.datastore.storeytargets.TDEngineStoreyTarget",
291
+ name="tsdb_metrics",
292
+ after=after,
293
+ url=f"ds://{self._tdengine_connection_profile.name}",
294
+ supertable=self.tables[mm_schemas.TDEngineSuperTables.METRICS].super_table,
295
+ table_col=mm_schemas.EventFieldType.TABLE_COLUMN,
296
+ time_col=mm_schemas.WriterEvent.END_INFER_TIME,
297
+ database=self.database,
298
+ graph_shape="cylinder",
299
+ columns=[
300
+ mm_schemas.WriterEvent.START_INFER_TIME,
301
+ mm_schemas.MetricData.METRIC_VALUE,
302
+ ],
303
+ tag_cols=[
304
+ mm_schemas.WriterEvent.ENDPOINT_ID,
305
+ mm_schemas.WriterEvent.APPLICATION_NAME,
306
+ mm_schemas.MetricData.METRIC_NAME,
307
+ ],
308
+ max_events=config.model_endpoint_monitoring.writer_graph.max_events,
309
+ flush_after_seconds=config.model_endpoint_monitoring.writer_graph.flush_after_seconds,
310
+ )
311
+
312
+ graph.add_step(
313
+ "mlrun.datastore.storeytargets.TDEngineStoreyTarget",
314
+ name="tsdb_app_results",
315
+ after=after,
316
+ url=f"ds://{self._tdengine_connection_profile.name}",
317
+ supertable=self.tables[
318
+ mm_schemas.TDEngineSuperTables.APP_RESULTS
319
+ ].super_table,
320
+ table_col=mm_schemas.EventFieldType.TABLE_COLUMN,
321
+ time_col=mm_schemas.WriterEvent.END_INFER_TIME,
322
+ database=self.database,
323
+ graph_shape="cylinder",
324
+ columns=[
325
+ mm_schemas.WriterEvent.START_INFER_TIME,
326
+ mm_schemas.ResultData.RESULT_VALUE,
327
+ mm_schemas.ResultData.RESULT_STATUS,
328
+ mm_schemas.ResultData.RESULT_EXTRA_DATA,
329
+ ],
330
+ tag_cols=[
331
+ mm_schemas.WriterEvent.ENDPOINT_ID,
332
+ mm_schemas.WriterEvent.APPLICATION_NAME,
333
+ mm_schemas.ResultData.RESULT_NAME,
334
+ mm_schemas.ResultData.RESULT_KIND,
335
+ ],
336
+ max_events=config.model_endpoint_monitoring.writer_graph.max_events,
337
+ flush_after_seconds=config.model_endpoint_monitoring.writer_graph.flush_after_seconds,
338
+ )
339
+
280
340
  def handle_model_error(
281
341
  self,
282
342
  graph,
@@ -0,0 +1,51 @@
1
+ # Copyright 2025 Iguazio
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from datetime import datetime
16
+
17
+ import mlrun.common.schemas.model_monitoring as mm_schemas
18
+ import mlrun.feature_store.steps
19
+ from mlrun.utils import logger
20
+
21
+
22
+ class ProcessBeforeTDEngine(mlrun.feature_store.steps.MapClass):
23
+ def __init__(self, **kwargs):
24
+ """
25
+ Process the data before writing to TDEngine. This step create the table name.
26
+
27
+ :returns: Event as a dictionary which will be written into the TDEngine Metrics/Results tables.
28
+ """
29
+ super().__init__(**kwargs)
30
+
31
+ def do(self, event):
32
+ logger.info("Process event before writing to TDEngine", event=event)
33
+ kind = event.get("kind")
34
+ table_name = (
35
+ f"{event[mm_schemas.WriterEvent.ENDPOINT_ID]}_"
36
+ f"{event[mm_schemas.WriterEvent.APPLICATION_NAME]}"
37
+ )
38
+ if kind == mm_schemas.WriterEventKind.RESULT:
39
+ # Write a new result
40
+ event[mm_schemas.EventFieldType.TABLE_COLUMN] = (
41
+ f"{table_name}_{event[mm_schemas.ResultData.RESULT_NAME]}"
42
+ ).replace("-", "_")
43
+ elif kind == mm_schemas.WriterEventKind.METRIC:
44
+ # Write a new metric
45
+ event[mm_schemas.EventFieldType.TABLE_COLUMN] = (
46
+ f"{table_name}_{event[mm_schemas.MetricData.METRIC_NAME]}"
47
+ ).replace("-", "_")
48
+ event[mm_schemas.WriterEvent.START_INFER_TIME] = datetime.fromisoformat(
49
+ event[mm_schemas.WriterEvent.START_INFER_TIME]
50
+ )
51
+ return event
@@ -25,6 +25,7 @@ import mlrun.common.schemas.model_monitoring as mm_schemas
25
25
  import mlrun.feature_store.steps
26
26
  import mlrun.utils.v3io_clients
27
27
  from mlrun.common.schemas import EventFieldType
28
+ from mlrun.config import config
28
29
  from mlrun.model_monitoring.db import TSDBConnector
29
30
  from mlrun.model_monitoring.helpers import get_invocations_fqn, get_start_end
30
31
  from mlrun.utils import logger
@@ -369,6 +370,49 @@ class V3IOTSDBConnector(TSDBConnector):
369
370
  apply_storey_filter()
370
371
  apply_tsdb_target(name="tsdb3", after="FilterNotNone")
371
372
 
373
+ def apply_writer_steps(self, graph, after, **kwargs) -> None:
374
+ graph.add_step(
375
+ "storey.TSDBTarget",
376
+ name="tsdb_metrics",
377
+ after=after,
378
+ path=f"{self.container}/{self.tables[mm_schemas.V3IOTSDBTables.METRICS]}",
379
+ time_col=mm_schemas.WriterEvent.END_INFER_TIME,
380
+ container=self.container,
381
+ v3io_frames=self.v3io_framesd,
382
+ infer_columns_from_data=True,
383
+ graph_shape="cylinder",
384
+ index_cols=[
385
+ mm_schemas.WriterEvent.APPLICATION_NAME,
386
+ mm_schemas.WriterEvent.ENDPOINT_NAME,
387
+ mm_schemas.WriterEvent.ENDPOINT_ID,
388
+ mm_schemas.MetricData.METRIC_NAME,
389
+ ],
390
+ max_events=config.model_endpoint_monitoring.writer_graph.max_events,
391
+ flush_after_seconds=config.model_endpoint_monitoring.writer_graph.flush_after_seconds,
392
+ key=mm_schemas.EventFieldType.ENDPOINT_ID,
393
+ )
394
+
395
+ graph.add_step(
396
+ "storey.TSDBTarget",
397
+ name="tsdb_app_results",
398
+ after=after,
399
+ path=f"{self.container}/{self.tables[mm_schemas.V3IOTSDBTables.APP_RESULTS]}",
400
+ time_col=mm_schemas.WriterEvent.END_INFER_TIME,
401
+ container=self.container,
402
+ v3io_frames=self.v3io_framesd,
403
+ infer_columns_from_data=True,
404
+ graph_shape="cylinder",
405
+ index_cols=[
406
+ mm_schemas.WriterEvent.APPLICATION_NAME,
407
+ mm_schemas.WriterEvent.ENDPOINT_NAME,
408
+ mm_schemas.WriterEvent.ENDPOINT_ID,
409
+ mm_schemas.ResultData.RESULT_NAME,
410
+ ],
411
+ max_events=config.model_endpoint_monitoring.writer_graph.max_events,
412
+ flush_after_seconds=config.model_endpoint_monitoring.writer_graph.flush_after_seconds,
413
+ key=mm_schemas.EventFieldType.ENDPOINT_ID,
414
+ )
415
+
372
416
  def handle_model_error(
373
417
  self,
374
418
  graph,
@@ -13,9 +13,12 @@
13
13
  # limitations under the License.
14
14
 
15
15
  import json
16
+ import typing
16
17
  from datetime import datetime, timezone
17
18
  from typing import Any, Callable, NewType, Optional
18
19
 
20
+ import storey
21
+
19
22
  import mlrun.common.model_monitoring
20
23
  import mlrun.common.schemas
21
24
  import mlrun.common.schemas.alert as alert_objects
@@ -31,6 +34,8 @@ from mlrun.common.schemas.model_monitoring.constants import (
31
34
  WriterEvent,
32
35
  WriterEventKind,
33
36
  )
37
+ from mlrun.config import config
38
+ from mlrun.model_monitoring.db import TSDBConnector
34
39
  from mlrun.model_monitoring.db._stats import (
35
40
  ModelMonitoringCurrentStatsFile,
36
41
  ModelMonitoringDriftMeasuresFile,
@@ -73,7 +78,6 @@ class ModelMonitoringWriter(StepToDict):
73
78
  self._tsdb_connector = mlrun.model_monitoring.get_tsdb_connector(
74
79
  project=self.project, secret_provider=secret_provider
75
80
  )
76
- self._endpoints_records = {}
77
81
 
78
82
  def _generate_event_on_drift(
79
83
  self,
@@ -226,3 +230,218 @@ class ModelMonitoringWriter(StepToDict):
226
230
  )
227
231
 
228
232
  logger.info("Model monitoring writer finished handling event")
233
+
234
+
235
+ class WriterGraphFactory:
236
+ def __init__(
237
+ self,
238
+ parquet_path: str,
239
+ ):
240
+ self.parquet_path = parquet_path
241
+ self.parquet_batching_max_events = (
242
+ config.model_endpoint_monitoring.writer_graph.max_events
243
+ )
244
+ self.parquet_batching_timeout_secs = (
245
+ config.model_endpoint_monitoring.writer_graph.parquet_batching_timeout_secs
246
+ )
247
+
248
+ def apply_writer_graph(
249
+ self,
250
+ fn: mlrun.runtimes.ServingRuntime,
251
+ tsdb_connector: TSDBConnector,
252
+ ):
253
+ graph = typing.cast(
254
+ mlrun.serving.states.RootFlowStep,
255
+ fn.set_topology(mlrun.serving.states.StepKinds.flow, engine="async"),
256
+ )
257
+
258
+ graph.to("ReconstructWriterEvent", "event_reconstructor")
259
+ step = tsdb_connector.add_pre_writer_steps(
260
+ graph=graph, after="event_reconstructor"
261
+ )
262
+ before_choice = step.name if step else "event_reconstructor"
263
+ graph.add_step("KindChoice", "kind_choice_step", after=before_choice)
264
+ tsdb_connector.apply_writer_steps(
265
+ graph=graph,
266
+ after="kind_choice_step",
267
+ )
268
+ graph.add_step(
269
+ "AlertGenerator",
270
+ "alert_generator",
271
+ after="kind_choice_step",
272
+ project=fn.metadata.project,
273
+ )
274
+ graph.add_step(
275
+ "storey.Filter",
276
+ name="filter_none",
277
+ _fn="(event is not None)",
278
+ after="alert_generator",
279
+ )
280
+ graph.add_step(
281
+ "mlrun.serving.remote.MLRunAPIRemoteStep",
282
+ name="alert_generator_api_call",
283
+ after="filter_none",
284
+ method="POST",
285
+ path=f"projects/{fn.metadata.project}/events/{{kind}}",
286
+ fill_placeholders=True,
287
+ )
288
+
289
+ graph.add_step(
290
+ "mlrun.datastore.storeytargets.ParquetStoreyTarget",
291
+ alternative_v3io_access_key=mlrun.common.schemas.model_monitoring.ProjectSecretKeys.ACCESS_KEY,
292
+ name="stats_writer",
293
+ after="kind_choice_step",
294
+ graph_shape="cylinder",
295
+ path=self.parquet_path
296
+ if self.parquet_path.endswith("/")
297
+ else self.parquet_path + "/",
298
+ max_events=self.parquet_batching_max_events,
299
+ flush_after_seconds=self.parquet_batching_timeout_secs,
300
+ columns=[
301
+ StatsData.TIMESTAMP,
302
+ StatsData.STATS,
303
+ WriterEvent.ENDPOINT_ID,
304
+ StatsData.STATS_NAME,
305
+ ],
306
+ partition_cols=[WriterEvent.ENDPOINT_ID, StatsData.STATS_NAME],
307
+ single_file=True,
308
+ )
309
+
310
+
311
+ class ReconstructWriterEvent(storey.MapClass):
312
+ def __init__(self):
313
+ super().__init__()
314
+
315
+ def do(self, event: dict) -> dict[str, Any]:
316
+ logger.info("Reconstructing the event", event=event)
317
+ kind = event.pop(WriterEvent.EVENT_KIND, WriterEventKind.RESULT)
318
+ result_event = _AppResultEvent(json.loads(event.pop(WriterEvent.DATA, "{}")))
319
+ result_event.update(_AppResultEvent(event))
320
+
321
+ expected_keys = list(
322
+ set(WriterEvent.list()).difference(
323
+ [WriterEvent.EVENT_KIND, WriterEvent.DATA]
324
+ )
325
+ )
326
+ if kind == WriterEventKind.METRIC:
327
+ expected_keys.extend(MetricData.list())
328
+ elif kind == WriterEventKind.RESULT:
329
+ expected_keys.extend(ResultData.list())
330
+ elif kind == WriterEventKind.STATS:
331
+ expected_keys.extend(StatsData.list())
332
+ else:
333
+ raise _WriterEventValueError(
334
+ f"Unknown event kind: {kind}, expected one of: {WriterEventKind.list()}"
335
+ )
336
+ missing_keys = [key for key in expected_keys if key not in result_event]
337
+ if missing_keys:
338
+ raise _WriterEventValueError(
339
+ f"The received event misses some keys compared to the expected "
340
+ f"monitoring application event schema: {missing_keys} for event kind {kind}"
341
+ )
342
+ result_event["kind"] = kind
343
+ if kind in WriterEventKind.user_app_outputs():
344
+ result_event[WriterEvent.END_INFER_TIME] = datetime.fromisoformat(
345
+ event[WriterEvent.END_INFER_TIME]
346
+ )
347
+ if kind == WriterEventKind.STATS:
348
+ result_event[StatsData.STATS] = json.dumps(result_event[StatsData.STATS])
349
+ return result_event
350
+
351
+
352
+ class KindChoice(storey.Choice):
353
+ def select_outlets(self, event):
354
+ kind = event.get("kind")
355
+ logger.info("Selecting the outlet for the event", kind=kind)
356
+ if kind == WriterEventKind.METRIC:
357
+ outlets = ["tsdb_metrics"]
358
+ elif kind == WriterEventKind.RESULT:
359
+ outlets = ["tsdb_app_results", "alert_generator"]
360
+ elif kind == WriterEventKind.STATS:
361
+ outlets = ["stats_writer"]
362
+ else:
363
+ raise _WriterEventValueError(
364
+ f"Unknown event kind: {kind}, expected one of: {WriterEventKind.list()}"
365
+ )
366
+ return outlets
367
+
368
+
369
+ class AlertGenerator(storey.MapClass):
370
+ def __init__(self, project: str, **kwargs):
371
+ self.project = project
372
+ super().__init__(**kwargs)
373
+
374
+ def do(self, event: dict) -> Optional[dict[str, Any]]:
375
+ kind = event.pop(WriterEvent.EVENT_KIND, WriterEventKind.RESULT)
376
+ if (
377
+ mlrun.mlconf.alerts.mode == mlrun.common.schemas.alert.AlertsModes.enabled
378
+ and kind == WriterEventKind.RESULT
379
+ and (
380
+ event[ResultData.RESULT_STATUS] == ResultStatusApp.detected.value
381
+ or event[ResultData.RESULT_STATUS]
382
+ == ResultStatusApp.potential_detection.value
383
+ )
384
+ ):
385
+ event_value = {
386
+ "app_name": event[WriterEvent.APPLICATION_NAME],
387
+ "model": event[WriterEvent.ENDPOINT_NAME],
388
+ "model_endpoint_id": event[WriterEvent.ENDPOINT_ID],
389
+ "result_name": event[ResultData.RESULT_NAME],
390
+ "result_value": event[ResultData.RESULT_VALUE],
391
+ }
392
+ data = self._generate_event_data(
393
+ entity_id=get_result_instance_fqn(
394
+ event[WriterEvent.ENDPOINT_ID],
395
+ event[WriterEvent.APPLICATION_NAME],
396
+ event[ResultData.RESULT_NAME],
397
+ ),
398
+ result_status=event[ResultData.RESULT_STATUS],
399
+ event_value=event_value,
400
+ project_name=self.project,
401
+ result_kind=event[ResultData.RESULT_KIND],
402
+ )
403
+ event = data.dict()
404
+ logger.info("Generated alert event", event=event)
405
+ return event
406
+ return None
407
+
408
+ @staticmethod
409
+ def _generate_alert_event_kind(
410
+ result_kind: int, result_status: int
411
+ ) -> alert_objects.EventKind:
412
+ """Generate the required Event Kind format for the alerting system"""
413
+ event_kind = ResultKindApp(value=result_kind).name
414
+
415
+ if result_status == ResultStatusApp.detected.value:
416
+ event_kind = f"{event_kind}_detected"
417
+ else:
418
+ event_kind = f"{event_kind}_suspected"
419
+ return alert_objects.EventKind(
420
+ value=mlrun.utils.helpers.normalize_name(event_kind)
421
+ )
422
+
423
+ def _generate_event_data(
424
+ self,
425
+ entity_id: str,
426
+ result_status: int,
427
+ event_value: dict,
428
+ project_name: str,
429
+ result_kind: int,
430
+ ) -> mlrun.common.schemas.Event:
431
+ entity = mlrun.common.schemas.alert.EventEntities(
432
+ kind=alert_objects.EventEntityKind.MODEL_ENDPOINT_RESULT,
433
+ project=project_name,
434
+ ids=[entity_id],
435
+ )
436
+
437
+ event_kind = self._generate_alert_event_kind(
438
+ result_status=result_status, result_kind=result_kind
439
+ )
440
+
441
+ event_data = mlrun.common.schemas.Event(
442
+ kind=alert_objects.EventKind(value=event_kind),
443
+ entity=entity,
444
+ value_dict=event_value,
445
+ )
446
+
447
+ return event_data
mlrun/projects/project.py CHANGED
@@ -1888,7 +1888,7 @@ class MlrunProject(ModelObj):
1888
1888
  prompt_path: Optional[str] = None,
1889
1889
  prompt_legend: Optional[dict] = None,
1890
1890
  model_artifact: Union[ModelArtifact, str] = None,
1891
- model_configuration: Optional[dict] = None,
1891
+ invocation_config: Optional[dict] = None,
1892
1892
  description: Optional[str] = None,
1893
1893
  target_path: Optional[str] = None,
1894
1894
  artifact_path: Optional[str] = None,
@@ -1971,7 +1971,7 @@ class MlrunProject(ModelObj):
1971
1971
  with the place-holder name. "description" will point to explanation of what that placeholder represents.
1972
1972
  Useful for documenting and clarifying dynamic parts of the prompt.
1973
1973
  :param model_artifact: Reference to the parent model (either `ModelArtifact` or model URI string).
1974
- :param model_configuration: Configuration dictionary for model generation parameters
1974
+ :param invocation_config: Configuration dictionary for model generation parameters
1975
1975
  (e.g., temperature, max tokens).
1976
1976
  :param description: Optional description of the prompt.
1977
1977
  :param target_path: Absolute target path (instead of using artifact_path + local_path)
@@ -1998,7 +1998,7 @@ class MlrunProject(ModelObj):
1998
1998
  prompt_path=prompt_path,
1999
1999
  prompt_legend=prompt_legend,
2000
2000
  model_artifact=model_artifact,
2001
- model_configuration=model_configuration,
2001
+ invocation_config=invocation_config,
2002
2002
  target_path=target_path,
2003
2003
  description=description,
2004
2004
  **kwargs,
mlrun/serving/remote.py CHANGED
@@ -23,10 +23,14 @@ import storey
23
23
  from storey.flow import _ConcurrentJobExecution
24
24
 
25
25
  import mlrun
26
+ import mlrun.common.schemas
26
27
  import mlrun.config
28
+ import mlrun.platforms
29
+ import mlrun.utils.async_http
27
30
  from mlrun.errors import err_to_str
28
- from mlrun.utils import logger
31
+ from mlrun.utils import dict_to_json, logger
29
32
 
33
+ from ..config import config
30
34
  from .utils import (
31
35
  _extract_input_data,
32
36
  _update_result_body,
@@ -73,7 +77,9 @@ class RemoteStep(storey.SendToHttp):
73
77
 
74
78
  :param url: http(s) url or function [project/]name to call
75
79
  :param subpath: path (which follows the url), use `$path` to use the event.path
76
- :param method: HTTP method (GET, POST, ..), default to POST
80
+ :param method: The HTTP method to use for the request (e.g., "GET", "POST", "PUT", "DELETE").
81
+ If not provided, the step will try to use `event.method` at runtime, and if that
82
+ is also missing, it defaults to `"POST"`.
77
83
  :param headers: dictionary with http header values
78
84
  :param url_expression: an expression for getting the url from the event, e.g. "event['url']"
79
85
  :param body_expression: an expression for getting the request body from the event, e.g. "event['data']"
@@ -150,8 +156,8 @@ class RemoteStep(storey.SendToHttp):
150
156
  async def _process_event(self, event):
151
157
  # async implementation (with storey)
152
158
  body = self._get_event_or_body(event)
153
- method, url, headers, body = self._generate_request(event, body)
154
- kwargs = {}
159
+ method, url, headers, body, kwargs = self._generate_request(event, body)
160
+ kwargs = kwargs or {}
155
161
  if self.timeout:
156
162
  kwargs["timeout"] = aiohttp.ClientTimeout(total=self.timeout)
157
163
  try:
@@ -191,7 +197,7 @@ class RemoteStep(storey.SendToHttp):
191
197
  )
192
198
 
193
199
  body = _extract_input_data(self._input_path, event.body)
194
- method, url, headers, body = self._generate_request(event, body)
200
+ method, url, headers, body, kwargs = self._generate_request(event, body)
195
201
  try:
196
202
  resp = self._session.request(
197
203
  method,
@@ -200,6 +206,7 @@ class RemoteStep(storey.SendToHttp):
200
206
  headers=headers,
201
207
  data=body,
202
208
  timeout=self.timeout,
209
+ **kwargs,
203
210
  )
204
211
  except requests.exceptions.ReadTimeout as err:
205
212
  raise requests.exceptions.ReadTimeout(
@@ -240,7 +247,7 @@ class RemoteStep(storey.SendToHttp):
240
247
  body = json.dumps(body)
241
248
  headers["Content-Type"] = "application/json"
242
249
 
243
- return method, url, headers, body
250
+ return method, url, headers, body, {}
244
251
 
245
252
  def _get_data(self, data, headers):
246
253
  if (
@@ -454,3 +461,69 @@ class BatchHttpRequests(_ConcurrentJobExecution):
454
461
  ) and isinstance(data, (str, bytes)):
455
462
  data = json.loads(data)
456
463
  return data
464
+
465
+
466
+ class MLRunAPIRemoteStep(RemoteStep):
467
+ def __init__(
468
+ self, method: str, path: str, fill_placeholders: Optional[bool] = None, **kwargs
469
+ ):
470
+ """
471
+ Graph step implementation for calling MLRun API endpoints
472
+
473
+ :param method: The HTTP method to use for the request (e.g., "GET", "POST", "PUT", "DELETE").
474
+ If not provided, the step will try to use `event.method` at runtime, and if that
475
+ is also missing, it defaults to `"POST"`.
476
+ :param path: API path (e.g. /api/projects)
477
+ :param fill_placeholders: if True, fill placeholders in the path using event fields (default to False)
478
+ :param kwargs: other arguments passed to RemoteStep
479
+ """
480
+ super().__init__(url="", method=method, **kwargs)
481
+ self.rundb = None
482
+ self.path = path
483
+ self.fill_placeholders = fill_placeholders
484
+
485
+ def _generate_request(self, event, body):
486
+ method = self.method or event.method or "POST"
487
+ kw = {
488
+ key: value
489
+ for key, value in (
490
+ ("params", body.get("params")),
491
+ ("json", body.get("json")),
492
+ )
493
+ if value is not None
494
+ }
495
+
496
+ headers = self.headers or {}
497
+ headers.update(body.get("headers", {}))
498
+
499
+ if self.rundb.user:
500
+ kw["auth"] = (self.rundb.user, self.rundb.password)
501
+ elif self.rundb.token_provider:
502
+ token = self.rundb.token_provider.get_token()
503
+ if token:
504
+ # Iguazio auth doesn't support passing token through bearer, so use cookie instead
505
+ if self.rundb.token_provider.is_iguazio_session():
506
+ session_cookie = f'session=j:{{"sid": "{token}"}}'
507
+ headers["cookie"] = session_cookie
508
+ else:
509
+ if "Authorization" not in kw.setdefault("headers", {}):
510
+ headers.update({"Authorization": "Bearer " + token})
511
+
512
+ if mlrun.common.schemas.HeaderNames.client_version not in headers:
513
+ headers.update(
514
+ {
515
+ mlrun.common.schemas.HeaderNames.client_version: self.rundb.client_version,
516
+ mlrun.common.schemas.HeaderNames.python_version: self.rundb.python_version,
517
+ "User-Agent": f"{requests.utils.default_user_agent()} mlrun/{config.version}",
518
+ }
519
+ )
520
+
521
+ url = self.url.format(**body) if self.fill_placeholders else self.url
522
+ headers["Content-Type"] = "application/json"
523
+ return method, url, headers, dict_to_json(body), kw
524
+
525
+ def post_init(self, mode="sync", **kwargs):
526
+ super().post_init(mode=mode, **kwargs)
527
+ self.fill_placeholders = self.fill_placeholders or False
528
+ self.rundb = mlrun.get_run_db()
529
+ self.url = self.rundb.get_base_api_url(self.path)
mlrun/serving/states.py CHANGED
@@ -1338,7 +1338,7 @@ class LLModel(Model):
1338
1338
  self,
1339
1339
  body: Any,
1340
1340
  messages: Optional[list[dict]] = None,
1341
- model_configuration: Optional[dict] = None,
1341
+ invocation_config: Optional[dict] = None,
1342
1342
  **kwargs,
1343
1343
  ) -> Any:
1344
1344
  llm_prompt_artifact = kwargs.get("llm_prompt_artifact")
@@ -1349,12 +1349,12 @@ class LLModel(Model):
1349
1349
  "Invoking model provider",
1350
1350
  model_name=self.name,
1351
1351
  messages=messages,
1352
- model_configuration=model_configuration,
1352
+ model_configuration=invocation_config,
1353
1353
  )
1354
1354
  response_with_stats = self.model_provider.invoke(
1355
1355
  messages=messages,
1356
1356
  invoke_response_format=InvokeResponseFormat.USAGE,
1357
- **(model_configuration or {}),
1357
+ **(invocation_config or {}),
1358
1358
  )
1359
1359
  set_data_by_path(
1360
1360
  path=self._result_path, data=body, value=response_with_stats
@@ -1428,7 +1428,7 @@ class LLModel(Model):
1428
1428
  return self.predict(
1429
1429
  body,
1430
1430
  messages=messages,
1431
- model_configuration=model_configuration,
1431
+ invocation_config=model_configuration,
1432
1432
  llm_prompt_artifact=llm_prompt_artifact,
1433
1433
  )
1434
1434
 
@@ -1,4 +1,4 @@
1
1
  {
2
- "git_commit": "24d624f98d65cb9ea9e542f897d15392ee00e687",
3
- "version": "1.10.0-rc30"
2
+ "git_commit": "7714a5aa9f89102e497a5746d9711ea97a7d20c8",
3
+ "version": "1.10.0-rc31"
4
4
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mlrun
3
- Version: 1.10.0rc30
3
+ Version: 1.10.0rc31
4
4
  Summary: Tracking and config of machine learning runs
5
5
  Home-page: https://github.com/mlrun/mlrun
6
6
  Author: Yaron Haviv
@@ -44,15 +44,15 @@ Requires-Dist: semver~=3.0
44
44
  Requires-Dist: dependency-injector~=4.41
45
45
  Requires-Dist: fsspec<=2025.7.0,>=2025.5.1
46
46
  Requires-Dist: v3iofs~=0.1.17
47
- Requires-Dist: storey~=1.10.13
47
+ Requires-Dist: storey~=1.10.14
48
48
  Requires-Dist: inflection~=0.5.0
49
49
  Requires-Dist: python-dotenv~=1.0
50
50
  Requires-Dist: setuptools>=75.2
51
51
  Requires-Dist: deprecated~=1.2
52
52
  Requires-Dist: jinja2>=3.1.6,~=3.1
53
53
  Requires-Dist: orjson<4,>=3.9.15
54
- Requires-Dist: mlrun-pipelines-kfp-common~=0.5.8
55
- Requires-Dist: mlrun-pipelines-kfp-v1-8~=0.5.7
54
+ Requires-Dist: mlrun-pipelines-kfp-common~=0.5.9
55
+ Requires-Dist: mlrun-pipelines-kfp-v1-8~=0.5.8
56
56
  Requires-Dist: docstring_parser~=0.16
57
57
  Requires-Dist: aiosmtplib~=3.0
58
58
  Requires-Dist: deepdiff<9.0.0,>=8.6.1
@@ -123,7 +123,7 @@ Requires-Dist: timelength~=1.1; extra == "api"
123
123
  Requires-Dist: memray~=1.12; sys_platform != "win32" and extra == "api"
124
124
  Requires-Dist: aiosmtplib~=3.0; extra == "api"
125
125
  Requires-Dist: pydantic<2,>=1; extra == "api"
126
- Requires-Dist: mlrun-pipelines-kfp-v1-8~=0.5.7; extra == "api"
126
+ Requires-Dist: mlrun-pipelines-kfp-v1-8~=0.5.8; extra == "api"
127
127
  Provides-Extra: all
128
128
  Requires-Dist: adlfs==2024.12.0; extra == "all"
129
129
  Requires-Dist: aiobotocore<2.16,>=2.5.0; extra == "all"
@@ -215,7 +215,7 @@ Requires-Dist: igz-mgmt~=0.4.1; extra == "complete-api"
215
215
  Requires-Dist: kafka-python~=2.1.0; extra == "complete-api"
216
216
  Requires-Dist: memray~=1.12; sys_platform != "win32" and extra == "complete-api"
217
217
  Requires-Dist: mlflow~=2.22; extra == "complete-api"
218
- Requires-Dist: mlrun-pipelines-kfp-v1-8~=0.5.7; extra == "complete-api"
218
+ Requires-Dist: mlrun-pipelines-kfp-v1-8~=0.5.8; extra == "complete-api"
219
219
  Requires-Dist: msrest~=0.6.21; extra == "complete-api"
220
220
  Requires-Dist: objgraph~=3.6; extra == "complete-api"
221
221
  Requires-Dist: oss2==2.18.4; extra == "complete-api"
@@ -1,8 +1,8 @@
1
1
  mlrun/__init__.py,sha256=acM2jRv7RCvBROwucuC01Rf_HdvV3xUPtJlQtX_01MY,8076
2
2
  mlrun/__main__.py,sha256=wQNaxW7QsqFBtWffnPkw-497fnpsrQzUnscBQQAP_UM,48364
3
- mlrun/config.py,sha256=F1PDI88t2cFujGnDr4YslEBzG6SckBKjUSdkxFX3zUE,73149
3
+ mlrun/config.py,sha256=edvnwbZ2xlHwuRxy32SqzJyJE517zsWoduGYLO0zgGs,73433
4
4
  mlrun/errors.py,sha256=bAk0t_qmCxQSPNK0TugOAfA5R6f0G6OYvEvXUWSJ_5U,9062
5
- mlrun/execution.py,sha256=wkmT1k0QROgGJFMBIsYUsJaqEF2bkqaYVzp_ZQb527Q,58814
5
+ mlrun/execution.py,sha256=0NuuvXR2o3iAw1HiB3DZxOAy9xzSCw45AwASwTUK3I0,58806
6
6
  mlrun/features.py,sha256=jMEXo6NB36A6iaxNEJWzdtYwUmglYD90OIKTIEeWhE8,15841
7
7
  mlrun/k8s_utils.py,sha256=zIacVyvsXrXVO-DdxAoGQOGEDWOGJEFJzYPhPVnn3z8,24548
8
8
  mlrun/lists.py,sha256=OlaV2QIFUzmenad9kxNJ3k4whlDyxI3zFbGwr6vpC5Y,8561
@@ -17,7 +17,7 @@ mlrun/artifacts/base.py,sha256=6x_2KPMNOciiNNUsiKgJ-b6ejxAHm_Ro22xODLoTc44,28559
17
17
  mlrun/artifacts/dataset.py,sha256=bhb5Kfbs8P28yjnpN76th5lLEUl5nAqD4VqVzHEVPrM,16421
18
18
  mlrun/artifacts/document.py,sha256=p5HsWdmIIJ0NahS7y3EEQN2tfHtUrUmUG-8BEEyi_Jc,17373
19
19
  mlrun/artifacts/helpers.py,sha256=ejTEC9vkI2w5FHn5Gopw3VEIxuni0bazWUnR6BBWZfU,1662
20
- mlrun/artifacts/llm_prompt.py,sha256=QyWfQIOxizuekHWgdfl_Zexmpu63kCg_To4UwYy8KkI,9882
20
+ mlrun/artifacts/llm_prompt.py,sha256=zGTOeXOwjXkJaHZY99lU_TUSDOfyLfLCqCTN1vgJpQc,9846
21
21
  mlrun/artifacts/manager.py,sha256=_cDNCS7wwmFIsucJ2uOgHxZQECmIGb8Wye64b6oLgKU,16642
22
22
  mlrun/artifacts/model.py,sha256=9yU9NZlxxY_ifSyXOgMnPi_RMDmawY9A-rLi-_VJs4c,25662
23
23
  mlrun/artifacts/plots.py,sha256=wmaxVXiAPSCyn3M7pIlcBu9pP3O8lrq0Ewx6iHRDF9s,4238
@@ -74,7 +74,7 @@ mlrun/common/schemas/serving.py,sha256=4ek9JZDagkdeXyfkX6P6xp4deUNSf_kqXUaXcKSuv
74
74
  mlrun/common/schemas/tag.py,sha256=1wqEiAujsElojWb3qmuyfcaLFjXSNAAQdafkDx7fkn0,891
75
75
  mlrun/common/schemas/workflow.py,sha256=Y-FHJnxs5c86yetuOAPdEJPkne__tLPCxjSXSb4lrjo,2541
76
76
  mlrun/common/schemas/model_monitoring/__init__.py,sha256=FqFiFIDcylquQdY0XTBamB5kMzMrMFEpVYM_ecsVfLg,1925
77
- mlrun/common/schemas/model_monitoring/constants.py,sha256=6a5SPuKiducBqbITHLDucRbek30HakqZ7tJ-JaW6sKQ,13828
77
+ mlrun/common/schemas/model_monitoring/constants.py,sha256=uQ3ataL-tAcwGY1GQLEvu05gGgMb2kBr6YRzjJS6yYs,13953
78
78
  mlrun/common/schemas/model_monitoring/functions.py,sha256=Ej8ChjmMZq1HP32THNABoktQHN1mdlkSqKbofxu10i4,2536
79
79
  mlrun/common/schemas/model_monitoring/grafana.py,sha256=THQlLfPBevBksta8p5OaIsBaJtsNSXexLvHrDxOaVns,2095
80
80
  mlrun/common/schemas/model_monitoring/model_endpoints.py,sha256=aevkfKWRbRj2cxabeUrVka49lJ2SRDA7I8rD-Fihr2Q,13648
@@ -233,7 +233,7 @@ mlrun/model_monitoring/controller.py,sha256=2XOkOZRB03K9ph6TH-ICspHga-GQOURL0C8-
233
233
  mlrun/model_monitoring/features_drift_table.py,sha256=c6GpKtpOJbuT1u5uMWDL_S-6N4YPOmlktWMqPme3KFY,25308
234
234
  mlrun/model_monitoring/helpers.py,sha256=50oFqgIc5xFHYPIVgq3M-Gbr7epqAI5NgHmvOeMy52U,24667
235
235
  mlrun/model_monitoring/stream_processing.py,sha256=bryYO3D0cC10MAQ-liHxUZ79MrL-VFXCb7KNyj6bl-8,34655
236
- mlrun/model_monitoring/writer.py,sha256=rGRFzSOkqZWvD3Y6sVk2H1Gepfnkzkp9ce00PsApTLo,8288
236
+ mlrun/model_monitoring/writer.py,sha256=l2D_5Ms5Wq5jfyQRVJbGBBRTMLjMmIAxwPeHWmrc9Kg,16382
237
237
  mlrun/model_monitoring/applications/__init__.py,sha256=BwlmRELlFJf2b2YMyv5kUSHNe8--OyqWhDgRlT8a_8g,779
238
238
  mlrun/model_monitoring/applications/_application_steps.py,sha256=t9LDIqQUGE10cyjyhlg0QqN1yVx0apD1HpERYLJfm8U,7409
239
239
  mlrun/model_monitoring/applications/base.py,sha256=X-9zjdnW7i-zfhEdsT76JaxlSBk9J1HSchx-FcJ-Eqo,47911
@@ -244,18 +244,19 @@ mlrun/model_monitoring/applications/evidently/__init__.py,sha256=-DqdPnBSrjZhFvK
244
244
  mlrun/model_monitoring/applications/evidently/base.py,sha256=shH9YwuFrGNWy1IDAbv622l-GE4o1z_u1bqhqTyTHDA,5661
245
245
  mlrun/model_monitoring/db/__init__.py,sha256=r47xPGZpIfMuv8J3PQCZTSqVPMhUta4sSJCZFKcS7FM,644
246
246
  mlrun/model_monitoring/db/_schedules.py,sha256=CJm4ulHFeE2Jxl4TcDMkvDAFfkb4D9Kd7UEzSAe2PNM,11902
247
- mlrun/model_monitoring/db/_stats.py,sha256=VVMWLMqG3Us3ozBkLaokJF22Ewv8WKmVE1-OvS_g9vA,6943
247
+ mlrun/model_monitoring/db/_stats.py,sha256=aZZqaOV9eRSp9aDrlxmFOiGtYGHejLTGgp3Ff0NGs1Y,6982
248
248
  mlrun/model_monitoring/db/tsdb/__init__.py,sha256=4S86V_Ot_skE16SLkw0WwsaAUB0ECH6SoJdp-TIu6s8,4645
249
- mlrun/model_monitoring/db/tsdb/base.py,sha256=34X3LGNo8lrZLbHCVTY1Cp3Y52xnLZ2PukfGzCYI850,33345
249
+ mlrun/model_monitoring/db/tsdb/base.py,sha256=5BHSGoG7I9Tjf0jtpoK4sbg4_-9iAqDlrSWO5JCxdWE,33984
250
250
  mlrun/model_monitoring/db/tsdb/helpers.py,sha256=0oUXc4aUkYtP2SGP6jTb3uPPKImIUsVsrb9otX9a7O4,1189
251
251
  mlrun/model_monitoring/db/tsdb/tdengine/__init__.py,sha256=vgBdsKaXUURKqIf3M0y4sRatmSVA4CQiJs7J5dcVBkQ,620
252
252
  mlrun/model_monitoring/db/tsdb/tdengine/schemas.py,sha256=TuWuaCZw8sV1gSwN2BPmW8Gzwe3dsRN__KkJB9lum00,13116
253
253
  mlrun/model_monitoring/db/tsdb/tdengine/stream_graph_steps.py,sha256=Uadj0UvAmln2MxDWod-kAzau1uNlqZh981rPhbUH_5M,2857
254
254
  mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connection.py,sha256=dtkaHaWKWERPXylEWMECeetwrz3rWl0P43AADcTjlls,9330
255
- mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py,sha256=Vj8eWZ6jxXs9nTlo5Du1jJjYutwSNp4ZtztvKsnrr4M,51333
255
+ mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py,sha256=0ltrjvxsPEKeIgK7Lio9T2YXofIPdB_mGMOtdjI6_KY,53947
256
+ mlrun/model_monitoring/db/tsdb/tdengine/writer_graph_steps.py,sha256=zMof6hUr0dsyor73pnOWkJP62INAvslHU0nUklbT-3w,2053
256
257
  mlrun/model_monitoring/db/tsdb/v3io/__init__.py,sha256=aL3bfmQsUQ-sbvKGdNihFj8gLCK3mSys0qDcXtYOwgc,616
257
258
  mlrun/model_monitoring/db/tsdb/v3io/stream_graph_steps.py,sha256=sNQFj6qyJx5eSBKRC3gyTc1cfh1l2IkRpPtuZwtzCW0,6844
258
- mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py,sha256=3GNMudOpedhu_OId4Gp-r9nj1gtqh_353yn2gWta-BY,61459
259
+ mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py,sha256=1QPeICQJDWTdr4qMVh09s9urzzW3bdNF41ieknXDNX4,63407
259
260
  mlrun/model_monitoring/metrics/__init__.py,sha256=6CsTXAxeLbbf8yfCADTaxmiavqwrLEdYFJ-qc5kgDAY,569
260
261
  mlrun/model_monitoring/metrics/histogram_distance.py,sha256=E9_WIl2vd6qNvoHVHoFcnuQk3ekbFWOdi8aU7sHrfk4,4724
261
262
  mlrun/package/__init__.py,sha256=v7VDyK9kDOOuDvFo4oiGV2fx-vM1KL7fdN9pGLakhUQ,7008
@@ -280,7 +281,7 @@ mlrun/platforms/iguazio.py,sha256=32_o95Ntx9z3ciowt2NcnX7tAiLBwX3VB0mbTQ-KrIQ,13
280
281
  mlrun/projects/__init__.py,sha256=hdCOA6_fp8X4qGGGT7Bj7sPbkM1PayWuaVZL0DkpuZw,1240
281
282
  mlrun/projects/operations.py,sha256=dax9HGvs3S7FzZ2Hok1ixFoToIZI2mkUo0EhNUtsHGk,21020
282
283
  mlrun/projects/pipelines.py,sha256=ZOfuIEHOXfuc4qAkuWvbWhCjP6kqpLkv-yBBaY9RXhg,52219
283
- mlrun/projects/project.py,sha256=Q70Fpxwghuxggdbcizj9pIQ0XsJYMGbmca6arcyrBog,256830
284
+ mlrun/projects/project.py,sha256=E4kX49_D2ZqytqBJMsVH6HBan9D1Z1_UDQPTJgRlJIM,256822
284
285
  mlrun/runtimes/__init__.py,sha256=8cqrYKy1a0_87XG7V_p96untQ4t8RocadM4LVEEN1JM,9029
285
286
  mlrun/runtimes/base.py,sha256=pagMAvF0nEElptqLnBiGx9fpFenEq052B80GaLzR8Y8,38895
286
287
  mlrun/runtimes/daskjob.py,sha256=IN6gKKrmCIjWooj5FgFm-pAb2i7ra1ERRzClfu_rYGI,20102
@@ -312,11 +313,11 @@ mlrun/runtimes/sparkjob/__init__.py,sha256=GPP_ekItxiU9Ydn3mJa4Obph02Bg6DO-JYs79
312
313
  mlrun/runtimes/sparkjob/spark3job.py,sha256=3dW7RG2T58F2dsUw0TsRvE3SIFcekx3CerLdcaG1f50,41458
313
314
  mlrun/serving/__init__.py,sha256=nriJAcVn5aatwU03T7SsE6ngJEGTxr3wIGt4WuvCCzY,1392
314
315
  mlrun/serving/merger.py,sha256=pfOQoozUyObCTpqXAMk94PmhZefn4bBrKufO3MKnkAc,6193
315
- mlrun/serving/remote.py,sha256=Igha2FipK3-6rV_PZ1K464kTbiTu8rhc6SMm-HiEJ6o,18817
316
+ mlrun/serving/remote.py,sha256=p29CBtKwbW_l8BzmNg3Uy__0eMf7_OubTMzga_S3EOA,22089
316
317
  mlrun/serving/routers.py,sha256=pu5jlSLI4Ml68YP_FMFDhhwPfLcT6lRu5yL5QDgXPHQ,52889
317
318
  mlrun/serving/server.py,sha256=WvAQtkNhAcd2vGuMR04OdxfynMNWvtz6LpKEYPhK3z0,40959
318
319
  mlrun/serving/serving_wrapper.py,sha256=UL9hhWCfMPcTJO_XrkvNaFvck1U1E7oS8trTZyak0cA,835
319
- mlrun/serving/states.py,sha256=urq7v4lWwaFP_ZheEqEO1IiX9gkVW3GkuiEbbrBoz90,139012
320
+ mlrun/serving/states.py,sha256=eT3dzYiEzVfDSLae_14m-c5vxHlEF9op4kxQtbZCASA,139004
320
321
  mlrun/serving/system_steps.py,sha256=ZvGkUqiiYOrUlsDnsvzf9u9554mzyFwlKVrybqB7xao,20200
321
322
  mlrun/serving/utils.py,sha256=Zbfqm8TKNcTE8zRBezVBzpvR2WKeKeIRN7otNIaiYEc,4170
322
323
  mlrun/serving/v1_serving.py,sha256=c6J_MtpE-Tqu00-6r4eJOCO6rUasHDal9W2eBIcrl50,11853
@@ -350,11 +351,11 @@ mlrun/utils/notifications/notification/mail.py,sha256=ZyJ3eqd8simxffQmXzqd3bgbAq
350
351
  mlrun/utils/notifications/notification/slack.py,sha256=wSu_7W0EnGLBNwIgWCYEeTP8j9SPAMPDBnfUcPnVZYA,7299
351
352
  mlrun/utils/notifications/notification/webhook.py,sha256=FM5-LQAKAVJKp37MRzR3SsejalcnpM6r_9Oe7znxZEA,5313
352
353
  mlrun/utils/version/__init__.py,sha256=YnzE6tlf24uOQ8y7Z7l96QLAI6-QEii7-77g8ynmzy0,613
353
- mlrun/utils/version/version.json,sha256=9ugXxKtCZ32ybo9VZMeFs-bm7BEmLGoMqOqcIjZ2E5c,90
354
+ mlrun/utils/version/version.json,sha256=GMYi0YSvQVE5LLpEES_qHCE6wE-GSUypn8YD_oQiU_k,90
354
355
  mlrun/utils/version/version.py,sha256=M2hVhRrgkN3SxacZHs3ZqaOsqAA7B6a22ne324IQ1HE,1877
355
- mlrun-1.10.0rc30.dist-info/licenses/LICENSE,sha256=zTiv1CxWNkOk1q8eJS1G_8oD4gWpWLwWxj_Agcsi8Os,11337
356
- mlrun-1.10.0rc30.dist-info/METADATA,sha256=fH0E43MvoVz4SzYojTYFCapk6p0s2iLEaWGX1xZL3Ak,26104
357
- mlrun-1.10.0rc30.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
358
- mlrun-1.10.0rc30.dist-info/entry_points.txt,sha256=1Owd16eAclD5pfRCoJpYC2ZJSyGNTtUr0nCELMioMmU,46
359
- mlrun-1.10.0rc30.dist-info/top_level.txt,sha256=NObLzw3maSF9wVrgSeYBv-fgnHkAJ1kEkh12DLdd5KM,6
360
- mlrun-1.10.0rc30.dist-info/RECORD,,
356
+ mlrun-1.10.0rc31.dist-info/licenses/LICENSE,sha256=zTiv1CxWNkOk1q8eJS1G_8oD4gWpWLwWxj_Agcsi8Os,11337
357
+ mlrun-1.10.0rc31.dist-info/METADATA,sha256=gzxw9bbyGyHnvZlLHeIKmogYbBbL1yVnq6i8nSRadn4,26104
358
+ mlrun-1.10.0rc31.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
359
+ mlrun-1.10.0rc31.dist-info/entry_points.txt,sha256=1Owd16eAclD5pfRCoJpYC2ZJSyGNTtUr0nCELMioMmU,46
360
+ mlrun-1.10.0rc31.dist-info/top_level.txt,sha256=NObLzw3maSF9wVrgSeYBv-fgnHkAJ1kEkh12DLdd5KM,6
361
+ mlrun-1.10.0rc31.dist-info/RECORD,,