mlrun 1.6.0rc14__py3-none-any.whl → 1.6.0rc16__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mlrun might be problematic. Click here for more details.
- mlrun/artifacts/base.py +1 -5
- mlrun/artifacts/dataset.py +0 -4
- mlrun/artifacts/model.py +0 -5
- mlrun/common/schemas/auth.py +3 -4
- mlrun/config.py +1 -0
- mlrun/data_types/to_pandas.py +0 -1
- mlrun/datastore/base.py +0 -1
- mlrun/datastore/dbfs_store.py +0 -1
- mlrun/datastore/sources.py +1 -1
- mlrun/datastore/targets.py +1 -1
- mlrun/datastore/v3io.py +1 -1
- mlrun/datastore/wasbfs/fs.py +0 -1
- mlrun/errors.py +0 -1
- mlrun/feature_store/api.py +0 -27
- mlrun/feature_store/retrieval/base.py +2 -3
- mlrun/feature_store/retrieval/job.py +0 -1
- mlrun/feature_store/retrieval/spark_merger.py +0 -2
- mlrun/feature_store/steps.py +0 -3
- mlrun/frameworks/_common/model_handler.py +2 -4
- mlrun/frameworks/_dl_common/loggers/logger.py +1 -3
- mlrun/frameworks/_dl_common/loggers/tensorboard_logger.py +1 -3
- mlrun/frameworks/_ml_common/loggers/logger.py +1 -3
- mlrun/frameworks/_ml_common/plans/calibration_curve_plan.py +1 -1
- mlrun/frameworks/_ml_common/plans/confusion_matrix_plan.py +1 -1
- mlrun/frameworks/_ml_common/plans/dataset_plan.py +1 -3
- mlrun/frameworks/lgbm/__init__.py +2 -2
- mlrun/frameworks/pytorch/callbacks/tensorboard_logging_callback.py +3 -3
- mlrun/frameworks/pytorch/mlrun_interface.py +1 -1
- mlrun/frameworks/tf_keras/__init__.py +4 -4
- mlrun/frameworks/tf_keras/callbacks/tensorboard_logging_callback.py +3 -3
- mlrun/frameworks/tf_keras/mlrun_interface.py +6 -1
- mlrun/frameworks/xgboost/__init__.py +1 -1
- mlrun/k8s_utils.py +6 -6
- mlrun/kfpops.py +0 -3
- mlrun/launcher/base.py +0 -1
- mlrun/launcher/local.py +0 -3
- mlrun/model.py +4 -3
- mlrun/model_monitoring/batch.py +3 -1
- mlrun/package/packagers/numpy_packagers.py +1 -1
- mlrun/package/utils/log_hint_utils.py +1 -1
- mlrun/package/utils/type_hint_utils.py +3 -1
- mlrun/platforms/iguazio.py +2 -4
- mlrun/projects/project.py +2 -3
- mlrun/runtimes/constants.py +7 -0
- mlrun/runtimes/daskjob.py +0 -2
- mlrun/runtimes/function.py +0 -3
- mlrun/runtimes/local.py +1 -1
- mlrun/runtimes/mpijob/abstract.py +0 -1
- mlrun/runtimes/pod.py +5 -11
- mlrun/runtimes/sparkjob/spark3job.py +0 -1
- mlrun/secrets.py +0 -1
- mlrun/serving/states.py +0 -2
- mlrun/serving/utils.py +0 -1
- mlrun/serving/v1_serving.py +0 -1
- mlrun/track/tracker.py +1 -1
- mlrun/track/tracker_manager.py +3 -1
- mlrun/utils/azure_vault.py +0 -1
- mlrun/utils/condition_evaluator.py +0 -2
- mlrun/utils/helpers.py +0 -1
- mlrun/utils/logger.py +0 -1
- mlrun/utils/notifications/notification_pusher.py +0 -3
- mlrun/utils/version/version.json +2 -2
- {mlrun-1.6.0rc14.dist-info → mlrun-1.6.0rc16.dist-info}/METADATA +3 -3
- {mlrun-1.6.0rc14.dist-info → mlrun-1.6.0rc16.dist-info}/RECORD +68 -68
- {mlrun-1.6.0rc14.dist-info → mlrun-1.6.0rc16.dist-info}/LICENSE +0 -0
- {mlrun-1.6.0rc14.dist-info → mlrun-1.6.0rc16.dist-info}/WHEEL +0 -0
- {mlrun-1.6.0rc14.dist-info → mlrun-1.6.0rc16.dist-info}/entry_points.txt +0 -0
- {mlrun-1.6.0rc14.dist-info → mlrun-1.6.0rc16.dist-info}/top_level.txt +0 -0
mlrun/model_monitoring/batch.py
CHANGED
|
@@ -526,12 +526,14 @@ class BatchProcessor:
|
|
|
526
526
|
)
|
|
527
527
|
|
|
528
528
|
# Get drift thresholds from the model monitoring configuration
|
|
529
|
+
# fmt: off
|
|
529
530
|
self.default_possible_drift_threshold = (
|
|
530
531
|
mlrun.mlconf.model_endpoint_monitoring.drift_thresholds.default.possible_drift
|
|
531
532
|
)
|
|
532
533
|
self.default_drift_detected_threshold = (
|
|
533
534
|
mlrun.mlconf.model_endpoint_monitoring.drift_thresholds.default.drift_detected
|
|
534
535
|
)
|
|
536
|
+
# fmt: on
|
|
535
537
|
|
|
536
538
|
# Get a runtime database
|
|
537
539
|
|
|
@@ -618,7 +620,7 @@ class BatchProcessor:
|
|
|
618
620
|
|
|
619
621
|
if not mlrun.mlconf.is_ce_mode():
|
|
620
622
|
# Create v3io stream based on the input stream
|
|
621
|
-
response = self.v3io.
|
|
623
|
+
response = self.v3io.stream.create(
|
|
622
624
|
container=self.stream_container,
|
|
623
625
|
path=self.stream_path,
|
|
624
626
|
shard_count=1,
|
|
@@ -514,7 +514,7 @@ class _NumPyNDArrayCollectionPackager(DefaultPackager):
|
|
|
514
514
|
|
|
515
515
|
@staticmethod
|
|
516
516
|
def _is_any_object_dtype(
|
|
517
|
-
array_collection: Union[np.ndarray, NumPyArrayCollectionType]
|
|
517
|
+
array_collection: Union[np.ndarray, NumPyArrayCollectionType],
|
|
518
518
|
):
|
|
519
519
|
"""
|
|
520
520
|
Check if any of the arrays in a collection is of type `object`.
|
|
@@ -35,7 +35,7 @@ class LogHintUtils:
|
|
|
35
35
|
|
|
36
36
|
@staticmethod
|
|
37
37
|
def parse_log_hint(
|
|
38
|
-
log_hint: typing.Union[typing.Dict[str, str], str, None]
|
|
38
|
+
log_hint: typing.Union[typing.Dict[str, str], str, None],
|
|
39
39
|
) -> typing.Union[typing.Dict[str, str], None]:
|
|
40
40
|
"""
|
|
41
41
|
Parse a given log hint from string to a logging configuration dictionary. The string will be read as the
|
|
@@ -249,7 +249,9 @@ class TypeHintUtils:
|
|
|
249
249
|
if type_hint.__forward_module__:
|
|
250
250
|
arg = f"{type_hint.__forward_module__}.{arg}"
|
|
251
251
|
return [TypeHintUtils.parse_type_hint(type_hint=arg)]
|
|
252
|
-
except
|
|
252
|
+
except (
|
|
253
|
+
MLRunInvalidArgumentError
|
|
254
|
+
): # May be raised from `TypeHintUtils.parse_type_hint`
|
|
253
255
|
logger.warn(
|
|
254
256
|
f"Could not reduce the type hint '{type_hint}' as it is a forward reference to a class without "
|
|
255
257
|
f"it's full module path. To enable importing forward references, please provide the full module "
|
mlrun/platforms/iguazio.py
CHANGED
|
@@ -305,7 +305,6 @@ class OutputStream:
|
|
|
305
305
|
self._mock_queue = []
|
|
306
306
|
|
|
307
307
|
if create and not mock:
|
|
308
|
-
|
|
309
308
|
# this import creates an import loop via the utils module, so putting it in execution path
|
|
310
309
|
from mlrun.utils.helpers import logger
|
|
311
310
|
|
|
@@ -318,7 +317,7 @@ class OutputStream:
|
|
|
318
317
|
retention_in_hours=retention_in_hours,
|
|
319
318
|
)
|
|
320
319
|
|
|
321
|
-
response = self._v3io_client.
|
|
320
|
+
response = self._v3io_client.stream.create(
|
|
322
321
|
container=self._container,
|
|
323
322
|
path=self._stream_path,
|
|
324
323
|
shard_count=shards or 1,
|
|
@@ -343,7 +342,7 @@ class OutputStream:
|
|
|
343
342
|
# for mock testing
|
|
344
343
|
self._mock_queue.extend(records)
|
|
345
344
|
else:
|
|
346
|
-
self._v3io_client.put_records(
|
|
345
|
+
self._v3io_client.stream.put_records(
|
|
347
346
|
container=self._container, path=self._stream_path, records=records
|
|
348
347
|
)
|
|
349
348
|
|
|
@@ -368,7 +367,6 @@ class HTTPOutputStream:
|
|
|
368
367
|
data = [data]
|
|
369
368
|
|
|
370
369
|
for record in data:
|
|
371
|
-
|
|
372
370
|
# Convert the new record to the required format
|
|
373
371
|
serialized_record = dump_record(record)
|
|
374
372
|
response = requests.post(self._stream_path, data=serialized_record)
|
mlrun/projects/project.py
CHANGED
|
@@ -793,9 +793,8 @@ class ProjectSpec(ModelObj):
|
|
|
793
793
|
for name, function in self._function_definitions.items():
|
|
794
794
|
if hasattr(function, "to_dict"):
|
|
795
795
|
spec = function.to_dict(strip=True)
|
|
796
|
-
if (
|
|
797
|
-
|
|
798
|
-
and function.spec.build.source.startswith(self._source_repo())
|
|
796
|
+
if function.spec.build.source and function.spec.build.source.startswith(
|
|
797
|
+
self._source_repo()
|
|
799
798
|
):
|
|
800
799
|
update_in(spec, "spec.build.source", "./")
|
|
801
800
|
functions.append({"name": name, "spec": spec})
|
mlrun/runtimes/constants.py
CHANGED
|
@@ -157,6 +157,13 @@ class RunStates(object):
|
|
|
157
157
|
RunStates.aborted,
|
|
158
158
|
]
|
|
159
159
|
|
|
160
|
+
@staticmethod
|
|
161
|
+
def error_states():
|
|
162
|
+
return [
|
|
163
|
+
RunStates.error,
|
|
164
|
+
RunStates.aborted,
|
|
165
|
+
]
|
|
166
|
+
|
|
160
167
|
@staticmethod
|
|
161
168
|
def non_terminal_states():
|
|
162
169
|
return list(set(RunStates.all()) - set(RunStates.terminal_states()))
|
mlrun/runtimes/daskjob.py
CHANGED
|
@@ -94,7 +94,6 @@ class DaskSpec(KubeResourceSpec):
|
|
|
94
94
|
clone_target_dir=None,
|
|
95
95
|
state_thresholds=None,
|
|
96
96
|
):
|
|
97
|
-
|
|
98
97
|
super().__init__(
|
|
99
98
|
command=command,
|
|
100
99
|
args=args,
|
|
@@ -526,7 +525,6 @@ class DaskCluster(KubejobRuntime):
|
|
|
526
525
|
)
|
|
527
526
|
|
|
528
527
|
def _run(self, runobj: RunObject, execution):
|
|
529
|
-
|
|
530
528
|
handler = runobj.spec.handler
|
|
531
529
|
self._force_handler(handler)
|
|
532
530
|
|
mlrun/runtimes/function.py
CHANGED
|
@@ -63,7 +63,6 @@ def validate_nuclio_version_compatibility(*min_versions):
|
|
|
63
63
|
try:
|
|
64
64
|
parsed_current_version = semver.VersionInfo.parse(mlconf.nuclio_version)
|
|
65
65
|
except ValueError:
|
|
66
|
-
|
|
67
66
|
# only log when version is set but invalid
|
|
68
67
|
if mlconf.nuclio_version:
|
|
69
68
|
logger.warning(
|
|
@@ -166,7 +165,6 @@ class NuclioSpec(KubeResourceSpec):
|
|
|
166
165
|
state_thresholds=None,
|
|
167
166
|
disable_default_http_trigger=None,
|
|
168
167
|
):
|
|
169
|
-
|
|
170
168
|
super().__init__(
|
|
171
169
|
command=command,
|
|
172
170
|
args=args,
|
|
@@ -1105,7 +1103,6 @@ class RemoteRuntime(KubeResource):
|
|
|
1105
1103
|
return results
|
|
1106
1104
|
|
|
1107
1105
|
def _resolve_invocation_url(self, path, force_external_address):
|
|
1108
|
-
|
|
1109
1106
|
if not path.startswith("/") and path != "":
|
|
1110
1107
|
path = f"/{path}"
|
|
1111
1108
|
|
mlrun/runtimes/local.py
CHANGED
|
@@ -489,7 +489,7 @@ def exec_from_params(handler, runobj: RunObject, context: MLClientCtx, cwd=None)
|
|
|
489
489
|
context.set_state("completed", commit=False)
|
|
490
490
|
except Exception as exc:
|
|
491
491
|
err = err_to_str(exc)
|
|
492
|
-
logger.error(f"
|
|
492
|
+
logger.error(f"Execution error, {traceback.format_exc()}")
|
|
493
493
|
context.set_state(error=err, commit=False)
|
|
494
494
|
logger.set_logger_level(old_level)
|
|
495
495
|
|
|
@@ -111,7 +111,6 @@ class AbstractMPIJobRuntime(KubejobRuntime, abc.ABC):
|
|
|
111
111
|
|
|
112
112
|
@staticmethod
|
|
113
113
|
def _get_run_completion_updates(run: dict) -> dict:
|
|
114
|
-
|
|
115
114
|
# TODO: add a 'workers' section in run objects state, each worker will update its state while
|
|
116
115
|
# the run state will be resolved by the server.
|
|
117
116
|
# update the run object state if empty so that it won't default to 'created' state
|
mlrun/runtimes/pod.py
CHANGED
|
@@ -491,9 +491,7 @@ class KubeResourceSpec(FunctionSpec):
|
|
|
491
491
|
self._initialize_node_affinity(affinity_field_name)
|
|
492
492
|
|
|
493
493
|
self_affinity = getattr(self, affinity_field_name)
|
|
494
|
-
self_affinity.node_affinity.required_during_scheduling_ignored_during_execution =
|
|
495
|
-
node_selector
|
|
496
|
-
)
|
|
494
|
+
self_affinity.node_affinity.required_during_scheduling_ignored_during_execution = node_selector
|
|
497
495
|
|
|
498
496
|
def enrich_function_preemption_spec(
|
|
499
497
|
self,
|
|
@@ -593,7 +591,6 @@ class KubeResourceSpec(FunctionSpec):
|
|
|
593
591
|
)
|
|
594
592
|
# purge any affinity / anti-affinity preemption related configuration and enrich with preemptible tolerations
|
|
595
593
|
elif self_preemption_mode == PreemptionModes.allow.value:
|
|
596
|
-
|
|
597
594
|
# remove preemptible anti-affinity
|
|
598
595
|
self._prune_affinity_node_selector_requirement(
|
|
599
596
|
generate_preemptible_node_selector_requirements(
|
|
@@ -655,17 +652,13 @@ class KubeResourceSpec(FunctionSpec):
|
|
|
655
652
|
self._initialize_node_affinity(affinity_field_name)
|
|
656
653
|
|
|
657
654
|
self_affinity = getattr(self, affinity_field_name)
|
|
658
|
-
if
|
|
659
|
-
not self_affinity.node_affinity.required_during_scheduling_ignored_during_execution
|
|
660
|
-
):
|
|
655
|
+
if not self_affinity.node_affinity.required_during_scheduling_ignored_during_execution:
|
|
661
656
|
self_affinity.node_affinity.required_during_scheduling_ignored_during_execution = k8s_client.V1NodeSelector(
|
|
662
657
|
node_selector_terms=node_selector_terms
|
|
663
658
|
)
|
|
664
659
|
return
|
|
665
660
|
|
|
666
|
-
node_selector =
|
|
667
|
-
self_affinity.node_affinity.required_during_scheduling_ignored_during_execution
|
|
668
|
-
)
|
|
661
|
+
node_selector = self_affinity.node_affinity.required_during_scheduling_ignored_during_execution
|
|
669
662
|
new_node_selector_terms = []
|
|
670
663
|
|
|
671
664
|
for node_selector_term_to_add in node_selector_terms:
|
|
@@ -741,9 +734,11 @@ class KubeResourceSpec(FunctionSpec):
|
|
|
741
734
|
self._initialize_affinity(affinity_field_name)
|
|
742
735
|
self._initialize_node_affinity(affinity_field_name)
|
|
743
736
|
|
|
737
|
+
# fmt: off
|
|
744
738
|
self_affinity.node_affinity.required_during_scheduling_ignored_during_execution = (
|
|
745
739
|
new_required_during_scheduling_ignored_during_execution
|
|
746
740
|
)
|
|
741
|
+
# fmt: on
|
|
747
742
|
|
|
748
743
|
@staticmethod
|
|
749
744
|
def _prune_node_selector_requirements_from_node_selector_terms(
|
|
@@ -894,7 +889,6 @@ class AutoMountType(str, Enum):
|
|
|
894
889
|
return mlrun.platforms.other.mount_pvc if pvc_configured else None
|
|
895
890
|
|
|
896
891
|
def get_modifier(self):
|
|
897
|
-
|
|
898
892
|
return {
|
|
899
893
|
AutoMountType.none: None,
|
|
900
894
|
AutoMountType.v3io_credentials: mlrun.v3io_cred,
|
mlrun/secrets.py
CHANGED
mlrun/serving/states.py
CHANGED
|
@@ -469,7 +469,6 @@ class TaskStep(BaseStep):
|
|
|
469
469
|
class_name = class_name.__name__
|
|
470
470
|
elif not class_object:
|
|
471
471
|
if class_name == "$remote":
|
|
472
|
-
|
|
473
472
|
from mlrun.serving.remote import RemoteStep
|
|
474
473
|
|
|
475
474
|
class_object = RemoteStep
|
|
@@ -1130,7 +1129,6 @@ class FlowStep(BaseStep):
|
|
|
1130
1129
|
return event
|
|
1131
1130
|
|
|
1132
1131
|
def run(self, event, *args, **kwargs):
|
|
1133
|
-
|
|
1134
1132
|
if self._controller:
|
|
1135
1133
|
# async flow (using storey)
|
|
1136
1134
|
event._awaitable_result = None
|
mlrun/serving/utils.py
CHANGED
mlrun/serving/v1_serving.py
CHANGED
mlrun/track/tracker.py
CHANGED
mlrun/track/tracker_manager.py
CHANGED
|
@@ -45,7 +45,6 @@ class TrackerManager(metaclass=Singleton):
|
|
|
45
45
|
|
|
46
46
|
# Check general config for tracking usage, if false we return an empty manager
|
|
47
47
|
if mlconf.external_platform_tracking.enabled:
|
|
48
|
-
|
|
49
48
|
# Check if the available trackers were collected:
|
|
50
49
|
if _AVAILABLE_TRACKERS is None:
|
|
51
50
|
self._collect_available_trackers()
|
|
@@ -85,6 +84,9 @@ class TrackerManager(metaclass=Singleton):
|
|
|
85
84
|
|
|
86
85
|
:return: The context updated with the trackers products.
|
|
87
86
|
"""
|
|
87
|
+
if not self._trackers:
|
|
88
|
+
return context
|
|
89
|
+
|
|
88
90
|
# Check if the context received is a dict to initialize it as an `MLClientCtx` object:
|
|
89
91
|
is_context_dict = isinstance(context, dict)
|
|
90
92
|
if is_context_dict:
|
mlrun/utils/azure_vault.py
CHANGED
|
@@ -21,7 +21,6 @@ from mlrun.utils import logger
|
|
|
21
21
|
def evaluate_condition_in_separate_process(
|
|
22
22
|
condition: str, context: typing.Dict[str, typing.Any], timeout: int = 5
|
|
23
23
|
):
|
|
24
|
-
|
|
25
24
|
if not condition:
|
|
26
25
|
return True
|
|
27
26
|
|
|
@@ -52,7 +51,6 @@ def _evaluate_condition_wrapper(
|
|
|
52
51
|
|
|
53
52
|
|
|
54
53
|
def _evaluate_condition(condition: str, context: typing.Dict[str, typing.Any]):
|
|
55
|
-
|
|
56
54
|
import jinja2.sandbox
|
|
57
55
|
|
|
58
56
|
jinja_env = jinja2.sandbox.SandboxedEnvironment()
|
mlrun/utils/helpers.py
CHANGED
|
@@ -952,7 +952,6 @@ def fill_object_hash(object_dict, uid_property_name, tag=""):
|
|
|
952
952
|
|
|
953
953
|
|
|
954
954
|
def fill_artifact_object_hash(object_dict, iteration=None, producer_id=None):
|
|
955
|
-
|
|
956
955
|
# remove artifact related fields before calculating hash
|
|
957
956
|
object_dict.setdefault("metadata", {})
|
|
958
957
|
labels = object_dict["metadata"].pop("labels", None)
|
mlrun/utils/logger.py
CHANGED
|
@@ -36,7 +36,6 @@ class _NotificationPusherBase(object):
|
|
|
36
36
|
def _push(
|
|
37
37
|
self, sync_push_callback: typing.Callable, async_push_callback: typing.Callable
|
|
38
38
|
):
|
|
39
|
-
|
|
40
39
|
if mlrun.utils.helpers.is_running_in_jupyter_notebook():
|
|
41
40
|
# Running in Jupyter notebook.
|
|
42
41
|
# In this case, we need to create a new thread, run a separate event loop in
|
|
@@ -88,7 +87,6 @@ class _NotificationPusherBase(object):
|
|
|
88
87
|
|
|
89
88
|
|
|
90
89
|
class NotificationPusher(_NotificationPusherBase):
|
|
91
|
-
|
|
92
90
|
messages = {
|
|
93
91
|
"completed": "{resource} completed",
|
|
94
92
|
"error": "{resource} failed",
|
|
@@ -384,7 +382,6 @@ class NotificationPusher(_NotificationPusherBase):
|
|
|
384
382
|
# but also for human readability reasons.
|
|
385
383
|
notification.reason = notification.reason[:255]
|
|
386
384
|
else:
|
|
387
|
-
|
|
388
385
|
# empty out the reason if the notification is in a non-error state
|
|
389
386
|
# in case a retry would kick in (when such mechanism would be implemented)
|
|
390
387
|
notification.reason = None
|
mlrun/utils/version/version.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: mlrun
|
|
3
|
-
Version: 1.6.
|
|
3
|
+
Version: 1.6.0rc16
|
|
4
4
|
Summary: Tracking and config of machine learning runs
|
|
5
5
|
Home-page: https://github.com/mlrun/mlrun
|
|
6
6
|
Author: Yaron Haviv
|
|
@@ -192,7 +192,7 @@ Requires-Dist: sqlalchemy ~=1.4 ; extra == 'sqlalchemy'
|
|
|
192
192
|
[](https://opensource.org/licenses/Apache-2.0)
|
|
193
193
|
[](https://pypi.python.org/pypi/mlrun/)
|
|
194
194
|
[](https://mlrun.readthedocs.io/en/latest/?badge=latest)
|
|
195
|
-
[](https://github.com/astral-sh/ruff)
|
|
196
196
|

|
|
197
197
|

|
|
198
198
|
[](https://mlopslive.slack.com)
|
|
@@ -226,7 +226,7 @@ See: **Docs:** [Projects and Automation](https://docs.mlrun.org/en/latest/projec
|
|
|
226
226
|
|
|
227
227
|
### Ingest and process data
|
|
228
228
|
|
|
229
|
-
MLRun provides abstract interfaces to various offline and online [**data sources**](
|
|
229
|
+
MLRun provides abstract interfaces to various offline and online [**data sources**](https://docs.mlrun.org/en/latest/store/datastore.html), supports batch or realtime data processing at scale, data lineage and versioning, structured and unstructured data, and more.
|
|
230
230
|
In addition, the MLRun [**Feature Store**](https://docs.mlrun.org/en/latest/feature-store/feature-store.html) automates the collection, transformation, storage, catalog, serving, and monitoring of data features across the ML lifecycle and enables feature reuse and sharing.
|
|
231
231
|
|
|
232
232
|
See: **Docs:** [Ingest and process data](https://docs.mlrun.org/en/latest/data-prep/index.html), [Feature Store](https://docs.mlrun.org/en/latest/feature-store/feature-store.html), [Data & Artifacts](https://docs.mlrun.org/en/latest/concepts/data.html); **Tutorials:** [Quick start](https://docs.mlrun.org/en/latest/tutorials/01-mlrun-basics.html), [Feature Store](https://docs.mlrun.org/en/latest/feature-store/basic-demo.html).
|