mlrun 1.10.0rc18__py3-none-any.whl → 1.11.0rc16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (167) hide show
  1. mlrun/__init__.py +24 -3
  2. mlrun/__main__.py +0 -4
  3. mlrun/artifacts/dataset.py +2 -2
  4. mlrun/artifacts/document.py +6 -1
  5. mlrun/artifacts/llm_prompt.py +21 -15
  6. mlrun/artifacts/model.py +3 -3
  7. mlrun/artifacts/plots.py +1 -1
  8. mlrun/{model_monitoring/db/tsdb/tdengine → auth}/__init__.py +2 -3
  9. mlrun/auth/nuclio.py +89 -0
  10. mlrun/auth/providers.py +429 -0
  11. mlrun/auth/utils.py +415 -0
  12. mlrun/common/constants.py +14 -0
  13. mlrun/common/model_monitoring/helpers.py +123 -0
  14. mlrun/common/runtimes/constants.py +28 -0
  15. mlrun/common/schemas/__init__.py +14 -3
  16. mlrun/common/schemas/alert.py +2 -2
  17. mlrun/common/schemas/api_gateway.py +3 -0
  18. mlrun/common/schemas/auth.py +12 -10
  19. mlrun/common/schemas/client_spec.py +4 -0
  20. mlrun/common/schemas/constants.py +25 -0
  21. mlrun/common/schemas/frontend_spec.py +1 -8
  22. mlrun/common/schemas/function.py +34 -0
  23. mlrun/common/schemas/hub.py +33 -20
  24. mlrun/common/schemas/model_monitoring/__init__.py +2 -1
  25. mlrun/common/schemas/model_monitoring/constants.py +12 -15
  26. mlrun/common/schemas/model_monitoring/functions.py +13 -4
  27. mlrun/common/schemas/model_monitoring/model_endpoints.py +11 -0
  28. mlrun/common/schemas/pipeline.py +1 -1
  29. mlrun/common/schemas/secret.py +17 -2
  30. mlrun/common/secrets.py +95 -1
  31. mlrun/common/types.py +10 -10
  32. mlrun/config.py +69 -19
  33. mlrun/data_types/infer.py +2 -2
  34. mlrun/datastore/__init__.py +12 -5
  35. mlrun/datastore/azure_blob.py +162 -47
  36. mlrun/datastore/base.py +274 -10
  37. mlrun/datastore/datastore.py +7 -2
  38. mlrun/datastore/datastore_profile.py +84 -22
  39. mlrun/datastore/model_provider/huggingface_provider.py +225 -41
  40. mlrun/datastore/model_provider/mock_model_provider.py +87 -0
  41. mlrun/datastore/model_provider/model_provider.py +206 -74
  42. mlrun/datastore/model_provider/openai_provider.py +226 -66
  43. mlrun/datastore/s3.py +39 -18
  44. mlrun/datastore/sources.py +1 -1
  45. mlrun/datastore/store_resources.py +4 -4
  46. mlrun/datastore/storeytargets.py +17 -12
  47. mlrun/datastore/targets.py +1 -1
  48. mlrun/datastore/utils.py +25 -6
  49. mlrun/datastore/v3io.py +1 -1
  50. mlrun/db/base.py +63 -32
  51. mlrun/db/httpdb.py +373 -153
  52. mlrun/db/nopdb.py +54 -21
  53. mlrun/errors.py +4 -2
  54. mlrun/execution.py +66 -25
  55. mlrun/feature_store/api.py +1 -1
  56. mlrun/feature_store/common.py +1 -1
  57. mlrun/feature_store/feature_vector_utils.py +1 -1
  58. mlrun/feature_store/steps.py +8 -6
  59. mlrun/frameworks/_common/utils.py +3 -3
  60. mlrun/frameworks/_dl_common/loggers/logger.py +1 -1
  61. mlrun/frameworks/_dl_common/loggers/tensorboard_logger.py +2 -1
  62. mlrun/frameworks/_ml_common/loggers/mlrun_logger.py +1 -1
  63. mlrun/frameworks/_ml_common/utils.py +2 -1
  64. mlrun/frameworks/auto_mlrun/auto_mlrun.py +4 -3
  65. mlrun/frameworks/lgbm/mlrun_interfaces/mlrun_interface.py +2 -1
  66. mlrun/frameworks/onnx/dataset.py +2 -1
  67. mlrun/frameworks/onnx/mlrun_interface.py +2 -1
  68. mlrun/frameworks/pytorch/callbacks/logging_callback.py +5 -4
  69. mlrun/frameworks/pytorch/callbacks/mlrun_logging_callback.py +2 -1
  70. mlrun/frameworks/pytorch/callbacks/tensorboard_logging_callback.py +2 -1
  71. mlrun/frameworks/pytorch/utils.py +2 -1
  72. mlrun/frameworks/sklearn/metric.py +2 -1
  73. mlrun/frameworks/tf_keras/callbacks/logging_callback.py +5 -4
  74. mlrun/frameworks/tf_keras/callbacks/mlrun_logging_callback.py +2 -1
  75. mlrun/frameworks/tf_keras/callbacks/tensorboard_logging_callback.py +2 -1
  76. mlrun/hub/__init__.py +52 -0
  77. mlrun/hub/base.py +142 -0
  78. mlrun/hub/module.py +172 -0
  79. mlrun/hub/step.py +113 -0
  80. mlrun/k8s_utils.py +105 -16
  81. mlrun/launcher/base.py +15 -7
  82. mlrun/launcher/local.py +4 -1
  83. mlrun/model.py +14 -4
  84. mlrun/model_monitoring/__init__.py +0 -1
  85. mlrun/model_monitoring/api.py +65 -28
  86. mlrun/model_monitoring/applications/__init__.py +1 -1
  87. mlrun/model_monitoring/applications/base.py +299 -128
  88. mlrun/model_monitoring/applications/context.py +2 -4
  89. mlrun/model_monitoring/controller.py +132 -58
  90. mlrun/model_monitoring/db/_schedules.py +38 -29
  91. mlrun/model_monitoring/db/_stats.py +6 -16
  92. mlrun/model_monitoring/db/tsdb/__init__.py +9 -7
  93. mlrun/model_monitoring/db/tsdb/base.py +29 -9
  94. mlrun/model_monitoring/db/tsdb/preaggregate.py +234 -0
  95. mlrun/model_monitoring/db/tsdb/stream_graph_steps.py +63 -0
  96. mlrun/model_monitoring/db/tsdb/timescaledb/queries/timescaledb_metrics_queries.py +414 -0
  97. mlrun/model_monitoring/db/tsdb/timescaledb/queries/timescaledb_predictions_queries.py +376 -0
  98. mlrun/model_monitoring/db/tsdb/timescaledb/queries/timescaledb_results_queries.py +590 -0
  99. mlrun/model_monitoring/db/tsdb/timescaledb/timescaledb_connection.py +434 -0
  100. mlrun/model_monitoring/db/tsdb/timescaledb/timescaledb_connector.py +541 -0
  101. mlrun/model_monitoring/db/tsdb/timescaledb/timescaledb_operations.py +808 -0
  102. mlrun/model_monitoring/db/tsdb/timescaledb/timescaledb_schema.py +502 -0
  103. mlrun/model_monitoring/db/tsdb/timescaledb/timescaledb_stream.py +163 -0
  104. mlrun/model_monitoring/db/tsdb/timescaledb/timescaledb_stream_graph_steps.py +60 -0
  105. mlrun/model_monitoring/db/tsdb/timescaledb/utils/timescaledb_dataframe_processor.py +141 -0
  106. mlrun/model_monitoring/db/tsdb/timescaledb/utils/timescaledb_query_builder.py +585 -0
  107. mlrun/model_monitoring/db/tsdb/timescaledb/writer_graph_steps.py +73 -0
  108. mlrun/model_monitoring/db/tsdb/v3io/stream_graph_steps.py +20 -9
  109. mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +235 -51
  110. mlrun/model_monitoring/features_drift_table.py +2 -1
  111. mlrun/model_monitoring/helpers.py +30 -6
  112. mlrun/model_monitoring/stream_processing.py +34 -28
  113. mlrun/model_monitoring/writer.py +224 -4
  114. mlrun/package/__init__.py +2 -1
  115. mlrun/platforms/__init__.py +0 -43
  116. mlrun/platforms/iguazio.py +8 -4
  117. mlrun/projects/operations.py +17 -11
  118. mlrun/projects/pipelines.py +2 -2
  119. mlrun/projects/project.py +187 -123
  120. mlrun/run.py +95 -21
  121. mlrun/runtimes/__init__.py +2 -186
  122. mlrun/runtimes/base.py +103 -25
  123. mlrun/runtimes/constants.py +225 -0
  124. mlrun/runtimes/daskjob.py +5 -2
  125. mlrun/runtimes/databricks_job/databricks_runtime.py +2 -1
  126. mlrun/runtimes/local.py +5 -2
  127. mlrun/runtimes/mounts.py +20 -2
  128. mlrun/runtimes/nuclio/__init__.py +12 -7
  129. mlrun/runtimes/nuclio/api_gateway.py +36 -6
  130. mlrun/runtimes/nuclio/application/application.py +339 -40
  131. mlrun/runtimes/nuclio/function.py +222 -72
  132. mlrun/runtimes/nuclio/serving.py +132 -42
  133. mlrun/runtimes/pod.py +213 -21
  134. mlrun/runtimes/utils.py +49 -9
  135. mlrun/secrets.py +99 -14
  136. mlrun/serving/__init__.py +2 -0
  137. mlrun/serving/remote.py +84 -11
  138. mlrun/serving/routers.py +26 -44
  139. mlrun/serving/server.py +138 -51
  140. mlrun/serving/serving_wrapper.py +6 -2
  141. mlrun/serving/states.py +997 -283
  142. mlrun/serving/steps.py +62 -0
  143. mlrun/serving/system_steps.py +149 -95
  144. mlrun/serving/v2_serving.py +9 -10
  145. mlrun/track/trackers/mlflow_tracker.py +29 -31
  146. mlrun/utils/helpers.py +292 -94
  147. mlrun/utils/http.py +9 -2
  148. mlrun/utils/notifications/notification/base.py +18 -0
  149. mlrun/utils/notifications/notification/git.py +3 -5
  150. mlrun/utils/notifications/notification/mail.py +39 -16
  151. mlrun/utils/notifications/notification/slack.py +2 -4
  152. mlrun/utils/notifications/notification/webhook.py +2 -5
  153. mlrun/utils/notifications/notification_pusher.py +3 -3
  154. mlrun/utils/version/version.json +2 -2
  155. mlrun/utils/version/version.py +3 -4
  156. {mlrun-1.10.0rc18.dist-info → mlrun-1.11.0rc16.dist-info}/METADATA +63 -74
  157. {mlrun-1.10.0rc18.dist-info → mlrun-1.11.0rc16.dist-info}/RECORD +161 -143
  158. mlrun/api/schemas/__init__.py +0 -259
  159. mlrun/db/auth_utils.py +0 -152
  160. mlrun/model_monitoring/db/tsdb/tdengine/schemas.py +0 -344
  161. mlrun/model_monitoring/db/tsdb/tdengine/stream_graph_steps.py +0 -75
  162. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connection.py +0 -281
  163. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +0 -1266
  164. {mlrun-1.10.0rc18.dist-info → mlrun-1.11.0rc16.dist-info}/WHEEL +0 -0
  165. {mlrun-1.10.0rc18.dist-info → mlrun-1.11.0rc16.dist-info}/entry_points.txt +0 -0
  166. {mlrun-1.10.0rc18.dist-info → mlrun-1.11.0rc16.dist-info}/licenses/LICENSE +0 -0
  167. {mlrun-1.10.0rc18.dist-info → mlrun-1.11.0rc16.dist-info}/top_level.txt +0 -0
@@ -13,8 +13,12 @@
13
13
  # limitations under the License.
14
14
 
15
15
  import json
16
- from datetime import datetime, timezone
17
- from typing import Any, Callable, NewType, Optional
16
+ import typing
17
+ from collections.abc import Callable
18
+ from datetime import UTC, datetime
19
+ from typing import Any, NewType, Optional
20
+
21
+ import storey
18
22
 
19
23
  import mlrun.common.model_monitoring
20
24
  import mlrun.common.schemas
@@ -31,6 +35,8 @@ from mlrun.common.schemas.model_monitoring.constants import (
31
35
  WriterEvent,
32
36
  WriterEventKind,
33
37
  )
38
+ from mlrun.config import config
39
+ from mlrun.model_monitoring.db import TSDBConnector
34
40
  from mlrun.model_monitoring.db._stats import (
35
41
  ModelMonitoringCurrentStatsFile,
36
42
  ModelMonitoringDriftMeasuresFile,
@@ -73,7 +79,6 @@ class ModelMonitoringWriter(StepToDict):
73
79
  self._tsdb_connector = mlrun.model_monitoring.get_tsdb_connector(
74
80
  project=self.project, secret_provider=secret_provider
75
81
  )
76
- self._endpoints_records = {}
77
82
 
78
83
  def _generate_event_on_drift(
79
84
  self,
@@ -167,7 +172,7 @@ class ModelMonitoringWriter(StepToDict):
167
172
  )
168
173
  stat_kind = event.get(StatsData.STATS_NAME)
169
174
  data, timestamp_str = event.get(StatsData.STATS), event.get(StatsData.TIMESTAMP)
170
- timestamp = datetime.fromisoformat(timestamp_str).astimezone(tz=timezone.utc)
175
+ timestamp = datetime.fromisoformat(timestamp_str).astimezone(tz=UTC)
171
176
  if stat_kind == StatsKind.CURRENT_STATS.value:
172
177
  ModelMonitoringCurrentStatsFile(self.project, endpoint_id).write(
173
178
  data, timestamp
@@ -226,3 +231,218 @@ class ModelMonitoringWriter(StepToDict):
226
231
  )
227
232
 
228
233
  logger.info("Model monitoring writer finished handling event")
234
+
235
+
236
+ class WriterGraphFactory:
237
+ def __init__(
238
+ self,
239
+ parquet_path: str,
240
+ ):
241
+ self.parquet_path = parquet_path
242
+ self.parquet_batching_max_events = (
243
+ config.model_endpoint_monitoring.writer_graph.max_events
244
+ )
245
+ self.parquet_batching_timeout_secs = (
246
+ config.model_endpoint_monitoring.writer_graph.parquet_batching_timeout_secs
247
+ )
248
+
249
+ def apply_writer_graph(
250
+ self,
251
+ fn: mlrun.runtimes.ServingRuntime,
252
+ tsdb_connector: TSDBConnector,
253
+ ):
254
+ graph = typing.cast(
255
+ mlrun.serving.states.RootFlowStep,
256
+ fn.set_topology(mlrun.serving.states.StepKinds.flow, engine="async"),
257
+ )
258
+
259
+ graph.to("ReconstructWriterEvent", "event_reconstructor")
260
+ step = tsdb_connector.add_pre_writer_steps(
261
+ graph=graph, after="event_reconstructor"
262
+ )
263
+ before_choice = step.name if step else "event_reconstructor"
264
+ graph.add_step("KindChoice", "kind_choice_step", after=before_choice)
265
+ tsdb_connector.apply_writer_steps(
266
+ graph=graph,
267
+ after="kind_choice_step",
268
+ )
269
+ graph.add_step(
270
+ "AlertGenerator",
271
+ "alert_generator",
272
+ after="kind_choice_step",
273
+ project=fn.metadata.project,
274
+ )
275
+ graph.add_step(
276
+ "storey.Filter",
277
+ name="filter_none",
278
+ _fn="(event is not None)",
279
+ after="alert_generator",
280
+ )
281
+ graph.add_step(
282
+ "mlrun.serving.remote.MLRunAPIRemoteStep",
283
+ name="alert_generator_api_call",
284
+ after="filter_none",
285
+ method="POST",
286
+ path=f"projects/{fn.metadata.project}/events/{{kind}}",
287
+ fill_placeholders=True,
288
+ )
289
+
290
+ graph.add_step(
291
+ "mlrun.datastore.storeytargets.ParquetStoreyTarget",
292
+ alternative_v3io_access_key=mlrun.common.schemas.model_monitoring.ProjectSecretKeys.ACCESS_KEY,
293
+ name="stats_writer",
294
+ after="kind_choice_step",
295
+ graph_shape="cylinder",
296
+ path=self.parquet_path
297
+ if self.parquet_path.endswith("/")
298
+ else self.parquet_path + "/",
299
+ max_events=self.parquet_batching_max_events,
300
+ flush_after_seconds=self.parquet_batching_timeout_secs,
301
+ columns=[
302
+ StatsData.TIMESTAMP,
303
+ StatsData.STATS,
304
+ WriterEvent.ENDPOINT_ID,
305
+ StatsData.STATS_NAME,
306
+ ],
307
+ partition_cols=[WriterEvent.ENDPOINT_ID, StatsData.STATS_NAME],
308
+ single_file=True,
309
+ )
310
+
311
+
312
+ class ReconstructWriterEvent(storey.MapClass):
313
+ def __init__(self):
314
+ super().__init__()
315
+
316
+ def do(self, event: dict) -> dict[str, Any]:
317
+ logger.info("Reconstructing the event", event=event)
318
+ kind = event.pop(WriterEvent.EVENT_KIND, WriterEventKind.RESULT)
319
+ result_event = _AppResultEvent(json.loads(event.pop(WriterEvent.DATA, "{}")))
320
+ result_event.update(_AppResultEvent(event))
321
+
322
+ expected_keys = list(
323
+ set(WriterEvent.list()).difference(
324
+ [WriterEvent.EVENT_KIND, WriterEvent.DATA]
325
+ )
326
+ )
327
+ if kind == WriterEventKind.METRIC:
328
+ expected_keys.extend(MetricData.list())
329
+ elif kind == WriterEventKind.RESULT:
330
+ expected_keys.extend(ResultData.list())
331
+ elif kind == WriterEventKind.STATS:
332
+ expected_keys.extend(StatsData.list())
333
+ else:
334
+ raise _WriterEventValueError(
335
+ f"Unknown event kind: {kind}, expected one of: {WriterEventKind.list()}"
336
+ )
337
+ missing_keys = [key for key in expected_keys if key not in result_event]
338
+ if missing_keys:
339
+ raise _WriterEventValueError(
340
+ f"The received event misses some keys compared to the expected "
341
+ f"monitoring application event schema: {missing_keys} for event kind {kind}"
342
+ )
343
+ result_event["kind"] = kind
344
+ if kind in WriterEventKind.user_app_outputs():
345
+ result_event[WriterEvent.END_INFER_TIME] = datetime.fromisoformat(
346
+ event[WriterEvent.END_INFER_TIME]
347
+ )
348
+ if kind == WriterEventKind.STATS:
349
+ result_event[StatsData.STATS] = json.dumps(result_event[StatsData.STATS])
350
+ return result_event
351
+
352
+
353
+ class KindChoice(storey.Choice):
354
+ def select_outlets(self, event):
355
+ kind = event.get("kind")
356
+ logger.info("Selecting the outlet for the event", kind=kind)
357
+ if kind == WriterEventKind.METRIC:
358
+ outlets = ["tsdb_metrics"]
359
+ elif kind == WriterEventKind.RESULT:
360
+ outlets = ["tsdb_app_results", "alert_generator"]
361
+ elif kind == WriterEventKind.STATS:
362
+ outlets = ["stats_writer"]
363
+ else:
364
+ raise _WriterEventValueError(
365
+ f"Unknown event kind: {kind}, expected one of: {WriterEventKind.list()}"
366
+ )
367
+ return outlets
368
+
369
+
370
+ class AlertGenerator(storey.MapClass):
371
+ def __init__(self, project: str, **kwargs):
372
+ self.project = project
373
+ super().__init__(**kwargs)
374
+
375
+ def do(self, event: dict) -> Optional[dict[str, Any]]:
376
+ kind = event.pop(WriterEvent.EVENT_KIND, WriterEventKind.RESULT)
377
+ if (
378
+ mlrun.mlconf.alerts.mode == mlrun.common.schemas.alert.AlertsModes.enabled
379
+ and kind == WriterEventKind.RESULT
380
+ and (
381
+ event[ResultData.RESULT_STATUS] == ResultStatusApp.detected.value
382
+ or event[ResultData.RESULT_STATUS]
383
+ == ResultStatusApp.potential_detection.value
384
+ )
385
+ ):
386
+ event_value = {
387
+ "app_name": event[WriterEvent.APPLICATION_NAME],
388
+ "model": event[WriterEvent.ENDPOINT_NAME],
389
+ "model_endpoint_id": event[WriterEvent.ENDPOINT_ID],
390
+ "result_name": event[ResultData.RESULT_NAME],
391
+ "result_value": event[ResultData.RESULT_VALUE],
392
+ }
393
+ data = self._generate_event_data(
394
+ entity_id=get_result_instance_fqn(
395
+ event[WriterEvent.ENDPOINT_ID],
396
+ event[WriterEvent.APPLICATION_NAME],
397
+ event[ResultData.RESULT_NAME],
398
+ ),
399
+ result_status=event[ResultData.RESULT_STATUS],
400
+ event_value=event_value,
401
+ project_name=self.project,
402
+ result_kind=event[ResultData.RESULT_KIND],
403
+ )
404
+ event = data.dict()
405
+ logger.info("Generated alert event", event=event)
406
+ return event
407
+ return None
408
+
409
+ @staticmethod
410
+ def _generate_alert_event_kind(
411
+ result_kind: int, result_status: int
412
+ ) -> alert_objects.EventKind:
413
+ """Generate the required Event Kind format for the alerting system"""
414
+ event_kind = ResultKindApp(value=result_kind).name
415
+
416
+ if result_status == ResultStatusApp.detected.value:
417
+ event_kind = f"{event_kind}_detected"
418
+ else:
419
+ event_kind = f"{event_kind}_suspected"
420
+ return alert_objects.EventKind(
421
+ value=mlrun.utils.helpers.normalize_name(event_kind)
422
+ )
423
+
424
+ def _generate_event_data(
425
+ self,
426
+ entity_id: str,
427
+ result_status: int,
428
+ event_value: dict,
429
+ project_name: str,
430
+ result_kind: int,
431
+ ) -> mlrun.common.schemas.Event:
432
+ entity = mlrun.common.schemas.alert.EventEntities(
433
+ kind=alert_objects.EventEntityKind.MODEL_ENDPOINT_RESULT,
434
+ project=project_name,
435
+ ids=[entity_id],
436
+ )
437
+
438
+ event_kind = self._generate_alert_event_kind(
439
+ result_status=result_status, result_kind=result_kind
440
+ )
441
+
442
+ event_data = mlrun.common.schemas.Event(
443
+ kind=alert_objects.EventKind(value=event_kind),
444
+ entity=entity,
445
+ value_dict=event_value,
446
+ )
447
+
448
+ return event_data
mlrun/package/__init__.py CHANGED
@@ -15,7 +15,8 @@
15
15
  import functools
16
16
  import inspect
17
17
  from collections import OrderedDict
18
- from typing import Callable, Optional, Union
18
+ from collections.abc import Callable
19
+ from typing import Optional, Union
19
20
 
20
21
  from ..config import config
21
22
  from .context_handler import ContextHandler
@@ -25,49 +25,6 @@ from .iguazio import (
25
25
  )
26
26
 
27
27
 
28
- class _DeprecationHelper:
29
- """A helper class to deprecate old schemas"""
30
-
31
- def __init__(self, new_target: str, version="1.8.0"):
32
- self._new_target = new_target
33
- self._version = version
34
-
35
- def __call__(self, *args, **kwargs):
36
- self._warn()
37
- return self._lazy_load()(*args, **kwargs)
38
-
39
- def __getattr__(self, attr):
40
- self._warn()
41
- return getattr(self._lazy_load(), attr)
42
-
43
- def _lazy_load(self, *args, **kwargs):
44
- import mlrun.runtimes.mounts as mlrun_mounts
45
-
46
- return getattr(mlrun_mounts, self._new_target)
47
-
48
- def _warn(self):
49
- warnings.warn(
50
- f"mlrun.platforms.{self._new_target} is deprecated since version {self._version}, "
51
- f"and will be removed in 1.10. Use mlrun.runtimes.mounts.{self._new_target} instead.",
52
- FutureWarning,
53
- )
54
-
55
-
56
- # TODO: Remove in 1.10
57
- # For backwards compatibility
58
- VolumeMount = _DeprecationHelper("VolumeMount")
59
- auto_mount = _DeprecationHelper("auto_mount")
60
- mount_configmap = _DeprecationHelper("mount_configmap")
61
- mount_hostpath = _DeprecationHelper("mount_hostpath")
62
- mount_pvc = _DeprecationHelper("mount_pvc")
63
- mount_s3 = _DeprecationHelper("mount_s3")
64
- mount_secret = _DeprecationHelper("mount_secret")
65
- mount_v3io = _DeprecationHelper("mount_v3io")
66
- set_env_variables = _DeprecationHelper("set_env_variables")
67
- v3io_cred = _DeprecationHelper("v3io_cred")
68
- # eof 'For backwards compatibility'
69
-
70
-
71
28
  def watch_stream(
72
29
  url,
73
30
  shard_ids: Optional[list] = None,
@@ -96,7 +96,11 @@ class OutputStream:
96
96
  if access_key:
97
97
  v3io_client_kwargs["access_key"] = access_key
98
98
 
99
- self._v3io_client = v3io.dataplane.Client(**v3io_client_kwargs)
99
+ if not mock:
100
+ self._v3io_client = v3io.dataplane.Client(**v3io_client_kwargs)
101
+ else:
102
+ self._v3io_client = None
103
+
100
104
  self._container, self._stream_path = split_path(stream_path)
101
105
  self._shards = shards
102
106
  self._retention_in_hours = retention_in_hours
@@ -105,7 +109,7 @@ class OutputStream:
105
109
  self._mock = mock
106
110
  self._mock_queue = []
107
111
 
108
- def create_stream(self):
112
+ def create_stream(self) -> None:
109
113
  # this import creates an import loop via the utils module, so putting it in execution path
110
114
  from mlrun.utils.helpers import logger
111
115
 
@@ -136,7 +140,7 @@ class OutputStream:
136
140
  self._lazy_init()
137
141
 
138
142
  def dump_record(rec):
139
- if not isinstance(rec, (str, bytes)):
143
+ if not isinstance(rec, str | bytes):
140
144
  return dict_to_json(rec)
141
145
  return str(rec)
142
146
 
@@ -210,7 +214,7 @@ class KafkaOutputStream:
210
214
  self._initialized = False
211
215
 
212
216
  def _lazy_init(self):
213
- if self._initialized:
217
+ if self._initialized or self._mock:
214
218
  return
215
219
 
216
220
  import kafka
@@ -85,17 +85,17 @@ def run_function(
85
85
  ) -> Union[mlrun.model.RunObject, mlrun_pipelines.models.PipelineNodeWrapper]:
86
86
  """Run a local or remote task as part of a local/kubeflow pipeline
87
87
 
88
- run_function() allow you to execute a function locally, on a remote cluster, or as part of an automated workflow
89
- function can be specified as an object or by name (str), when the function is specified by name it is looked up
90
- in the current project eliminating the need to redefine/edit functions.
88
+ run_function() allows you to execute a function locally, on a remote cluster, or as part of an automated workflow.
89
+ The function can be specified as an object or by name (str). When the function is specified by name it is looked up
90
+ in the current project, eliminating the need to redefine/edit functions.
91
91
 
92
- when functions run as part of a workflow/pipeline (project.run()) some attributes can be set at the run level,
92
+ When functions run as part of a workflow/pipeline (project.run()) some attributes can be set at the run level,
93
93
  e.g. local=True will run all the functions locally, setting artifact_path will direct all outputs to the same path.
94
- project runs provide additional notifications/reporting and exception handling.
95
- inside a Kubeflow pipeline (KFP) run_function() generates KFP node (see PipelineNodeWrapper) which forms a DAG
96
- some behavior may differ between regular runs and deferred KFP runs.
94
+ Project runs provide additional notifications/reporting and exception handling.
95
+ Inside a Kubeflow pipeline (KFP) run_function() generates KFP node (see PipelineNodeWrapper) which forms a DAG.
96
+ Some behavior may differ between regular runs and deferred KFP runs.
97
97
 
98
- example (use with function object)::
98
+ Example (use with function object)::
99
99
 
100
100
  LABELS = "is_error"
101
101
  MODEL_CLASS = "sklearn.ensemble.RandomForestClassifier"
@@ -107,7 +107,7 @@ def run_function(
107
107
  inputs={"dataset": DATA_PATH},
108
108
  )
109
109
 
110
- example (use with project)::
110
+ Example (use with project)::
111
111
 
112
112
  # create a project with two functions (local and from hub)
113
113
  project = mlrun.new_project(project_name, "./proj)
@@ -119,7 +119,7 @@ def run_function(
119
119
  run2 = run_function("train", params={"label_columns": LABELS, "model_class": MODEL_CLASS},
120
120
  inputs={"dataset": run1.outputs["data"]})
121
121
 
122
- example (use in pipeline)::
122
+ Example (use in pipeline)::
123
123
 
124
124
  @dsl.pipeline(name="test pipeline", description="test")
125
125
  def my_pipe(url=""):
@@ -177,7 +177,12 @@ def run_function(
177
177
  This ensures latest code changes are executed. This argument must be used in
178
178
  conjunction with the local=True argument.
179
179
  :param output_path: path to store artifacts, when running in a workflow this will be set automatically
180
- :param retry: Retry configuration for the run, can be a dict or an instance of mlrun.model.Retry.
180
+ :param retry: Retry configuration for the run, can be a dict or an instance of
181
+ :py:class:`~mlrun.model.Retry`.
182
+ The `count` field in the `Retry` object specifies the number of retry attempts.
183
+ If `count=0`, the run will not be retried.
184
+ The `backoff` field specifies the retry backoff strategy between retry attempts.
185
+ If not provided, the default backoff delay is 30 seconds.
181
186
  :return: MLRun RunObject or PipelineNodeWrapper
182
187
  """
183
188
  if artifact_path:
@@ -400,6 +405,7 @@ def deploy_function(
400
405
  :param project_object: override the project object to use, will default to the project set in the runtime context.
401
406
  """
402
407
  engine, function = _get_engine_and_function(function, project_object)
408
+ # TODO in ML-11599 need to handle redeployment with different auth token name
403
409
  if function.kind not in mlrun.runtimes.RuntimeKinds.nuclio_runtimes():
404
410
  raise mlrun.errors.MLRunInvalidArgumentError(
405
411
  "deploy is used with real-time functions, for other kinds use build_function()"
@@ -228,11 +228,11 @@ class _PipelineContext:
228
228
  force_run_local = mlrun.mlconf.force_run_local
229
229
  if force_run_local is None or force_run_local == "auto":
230
230
  force_run_local = not mlrun.mlconf.is_api_running_on_k8s()
231
+
232
+ if self.workflow:
231
233
  if not mlrun.mlconf.kfp_url:
232
234
  logger.debug("Kubeflow pipeline URL is not set, running locally")
233
235
  force_run_local = True
234
-
235
- if self.workflow:
236
236
  force_run_local = force_run_local or self.workflow.run_local
237
237
 
238
238
  return force_run_local