mlrun 1.6.4rc7__py3-none-any.whl → 1.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mlrun might be problematic. Click here for more details.
- mlrun/__init__.py +11 -1
- mlrun/__main__.py +40 -122
- mlrun/alerts/__init__.py +15 -0
- mlrun/alerts/alert.py +248 -0
- mlrun/api/schemas/__init__.py +5 -4
- mlrun/artifacts/__init__.py +8 -3
- mlrun/artifacts/base.py +47 -257
- mlrun/artifacts/dataset.py +11 -192
- mlrun/artifacts/manager.py +79 -47
- mlrun/artifacts/model.py +31 -159
- mlrun/artifacts/plots.py +23 -380
- mlrun/common/constants.py +74 -1
- mlrun/common/db/sql_session.py +5 -5
- mlrun/common/formatters/__init__.py +21 -0
- mlrun/common/formatters/artifact.py +45 -0
- mlrun/common/formatters/base.py +113 -0
- mlrun/common/formatters/feature_set.py +33 -0
- mlrun/common/formatters/function.py +46 -0
- mlrun/common/formatters/pipeline.py +53 -0
- mlrun/common/formatters/project.py +51 -0
- mlrun/common/formatters/run.py +29 -0
- mlrun/common/helpers.py +12 -3
- mlrun/common/model_monitoring/helpers.py +9 -5
- mlrun/{runtimes → common/runtimes}/constants.py +37 -9
- mlrun/common/schemas/__init__.py +31 -5
- mlrun/common/schemas/alert.py +202 -0
- mlrun/common/schemas/api_gateway.py +196 -0
- mlrun/common/schemas/artifact.py +25 -4
- mlrun/common/schemas/auth.py +16 -5
- mlrun/common/schemas/background_task.py +1 -1
- mlrun/common/schemas/client_spec.py +4 -2
- mlrun/common/schemas/common.py +7 -4
- mlrun/common/schemas/constants.py +3 -0
- mlrun/common/schemas/feature_store.py +74 -44
- mlrun/common/schemas/frontend_spec.py +15 -7
- mlrun/common/schemas/function.py +12 -1
- mlrun/common/schemas/hub.py +11 -18
- mlrun/common/schemas/memory_reports.py +2 -2
- mlrun/common/schemas/model_monitoring/__init__.py +20 -4
- mlrun/common/schemas/model_monitoring/constants.py +123 -42
- mlrun/common/schemas/model_monitoring/grafana.py +13 -9
- mlrun/common/schemas/model_monitoring/model_endpoints.py +101 -54
- mlrun/common/schemas/notification.py +71 -14
- mlrun/common/schemas/object.py +2 -2
- mlrun/{model_monitoring/controller_handler.py → common/schemas/pagination.py} +9 -12
- mlrun/common/schemas/pipeline.py +8 -1
- mlrun/common/schemas/project.py +69 -18
- mlrun/common/schemas/runs.py +7 -1
- mlrun/common/schemas/runtime_resource.py +8 -12
- mlrun/common/schemas/schedule.py +4 -4
- mlrun/common/schemas/tag.py +1 -2
- mlrun/common/schemas/workflow.py +12 -4
- mlrun/common/types.py +14 -1
- mlrun/config.py +154 -69
- mlrun/data_types/data_types.py +6 -1
- mlrun/data_types/spark.py +2 -2
- mlrun/data_types/to_pandas.py +67 -37
- mlrun/datastore/__init__.py +6 -8
- mlrun/datastore/alibaba_oss.py +131 -0
- mlrun/datastore/azure_blob.py +143 -42
- mlrun/datastore/base.py +102 -58
- mlrun/datastore/datastore.py +34 -13
- mlrun/datastore/datastore_profile.py +146 -20
- mlrun/datastore/dbfs_store.py +3 -7
- mlrun/datastore/filestore.py +1 -4
- mlrun/datastore/google_cloud_storage.py +97 -33
- mlrun/datastore/hdfs.py +56 -0
- mlrun/datastore/inmem.py +6 -3
- mlrun/datastore/redis.py +7 -2
- mlrun/datastore/s3.py +34 -12
- mlrun/datastore/snowflake_utils.py +45 -0
- mlrun/datastore/sources.py +303 -111
- mlrun/datastore/spark_utils.py +31 -2
- mlrun/datastore/store_resources.py +9 -7
- mlrun/datastore/storeytargets.py +151 -0
- mlrun/datastore/targets.py +453 -176
- mlrun/datastore/utils.py +72 -58
- mlrun/datastore/v3io.py +6 -1
- mlrun/db/base.py +274 -41
- mlrun/db/factory.py +1 -1
- mlrun/db/httpdb.py +893 -225
- mlrun/db/nopdb.py +291 -33
- mlrun/errors.py +36 -6
- mlrun/execution.py +115 -42
- mlrun/feature_store/__init__.py +0 -2
- mlrun/feature_store/api.py +65 -73
- mlrun/feature_store/common.py +7 -12
- mlrun/feature_store/feature_set.py +76 -55
- mlrun/feature_store/feature_vector.py +39 -31
- mlrun/feature_store/ingestion.py +7 -6
- mlrun/feature_store/retrieval/base.py +16 -11
- mlrun/feature_store/retrieval/dask_merger.py +2 -0
- mlrun/feature_store/retrieval/job.py +13 -4
- mlrun/feature_store/retrieval/local_merger.py +2 -0
- mlrun/feature_store/retrieval/spark_merger.py +24 -32
- mlrun/feature_store/steps.py +45 -34
- mlrun/features.py +11 -21
- mlrun/frameworks/_common/artifacts_library.py +9 -9
- mlrun/frameworks/_common/mlrun_interface.py +5 -5
- mlrun/frameworks/_common/model_handler.py +48 -48
- mlrun/frameworks/_common/plan.py +5 -6
- mlrun/frameworks/_common/producer.py +3 -4
- mlrun/frameworks/_common/utils.py +5 -5
- mlrun/frameworks/_dl_common/loggers/logger.py +6 -7
- mlrun/frameworks/_dl_common/loggers/mlrun_logger.py +9 -9
- mlrun/frameworks/_dl_common/loggers/tensorboard_logger.py +23 -47
- mlrun/frameworks/_ml_common/artifacts_library.py +1 -2
- mlrun/frameworks/_ml_common/loggers/logger.py +3 -4
- mlrun/frameworks/_ml_common/loggers/mlrun_logger.py +4 -5
- mlrun/frameworks/_ml_common/model_handler.py +24 -24
- mlrun/frameworks/_ml_common/pkl_model_server.py +2 -2
- mlrun/frameworks/_ml_common/plan.py +2 -2
- mlrun/frameworks/_ml_common/plans/calibration_curve_plan.py +2 -3
- mlrun/frameworks/_ml_common/plans/confusion_matrix_plan.py +2 -3
- mlrun/frameworks/_ml_common/plans/dataset_plan.py +3 -3
- mlrun/frameworks/_ml_common/plans/feature_importance_plan.py +3 -3
- mlrun/frameworks/_ml_common/plans/roc_curve_plan.py +4 -4
- mlrun/frameworks/_ml_common/utils.py +4 -4
- mlrun/frameworks/auto_mlrun/auto_mlrun.py +9 -9
- mlrun/frameworks/huggingface/model_server.py +4 -4
- mlrun/frameworks/lgbm/__init__.py +33 -33
- mlrun/frameworks/lgbm/callbacks/callback.py +2 -4
- mlrun/frameworks/lgbm/callbacks/logging_callback.py +4 -5
- mlrun/frameworks/lgbm/callbacks/mlrun_logging_callback.py +4 -5
- mlrun/frameworks/lgbm/mlrun_interfaces/booster_mlrun_interface.py +1 -3
- mlrun/frameworks/lgbm/mlrun_interfaces/mlrun_interface.py +6 -6
- mlrun/frameworks/lgbm/model_handler.py +10 -10
- mlrun/frameworks/lgbm/model_server.py +6 -6
- mlrun/frameworks/lgbm/utils.py +5 -5
- mlrun/frameworks/onnx/dataset.py +8 -8
- mlrun/frameworks/onnx/mlrun_interface.py +3 -3
- mlrun/frameworks/onnx/model_handler.py +6 -6
- mlrun/frameworks/onnx/model_server.py +7 -7
- mlrun/frameworks/parallel_coordinates.py +6 -6
- mlrun/frameworks/pytorch/__init__.py +18 -18
- mlrun/frameworks/pytorch/callbacks/callback.py +4 -5
- mlrun/frameworks/pytorch/callbacks/logging_callback.py +17 -17
- mlrun/frameworks/pytorch/callbacks/mlrun_logging_callback.py +11 -11
- mlrun/frameworks/pytorch/callbacks/tensorboard_logging_callback.py +23 -29
- mlrun/frameworks/pytorch/callbacks_handler.py +38 -38
- mlrun/frameworks/pytorch/mlrun_interface.py +20 -20
- mlrun/frameworks/pytorch/model_handler.py +17 -17
- mlrun/frameworks/pytorch/model_server.py +7 -7
- mlrun/frameworks/sklearn/__init__.py +13 -13
- mlrun/frameworks/sklearn/estimator.py +4 -4
- mlrun/frameworks/sklearn/metrics_library.py +14 -14
- mlrun/frameworks/sklearn/mlrun_interface.py +16 -9
- mlrun/frameworks/sklearn/model_handler.py +2 -2
- mlrun/frameworks/tf_keras/__init__.py +10 -7
- mlrun/frameworks/tf_keras/callbacks/logging_callback.py +15 -15
- mlrun/frameworks/tf_keras/callbacks/mlrun_logging_callback.py +11 -11
- mlrun/frameworks/tf_keras/callbacks/tensorboard_logging_callback.py +19 -23
- mlrun/frameworks/tf_keras/mlrun_interface.py +9 -11
- mlrun/frameworks/tf_keras/model_handler.py +14 -14
- mlrun/frameworks/tf_keras/model_server.py +6 -6
- mlrun/frameworks/xgboost/__init__.py +13 -13
- mlrun/frameworks/xgboost/model_handler.py +6 -6
- mlrun/k8s_utils.py +61 -17
- mlrun/launcher/__init__.py +1 -1
- mlrun/launcher/base.py +16 -15
- mlrun/launcher/client.py +13 -11
- mlrun/launcher/factory.py +1 -1
- mlrun/launcher/local.py +23 -13
- mlrun/launcher/remote.py +17 -10
- mlrun/lists.py +7 -6
- mlrun/model.py +478 -103
- mlrun/model_monitoring/__init__.py +1 -1
- mlrun/model_monitoring/api.py +163 -371
- mlrun/{runtimes/mpijob/v1alpha1.py → model_monitoring/applications/__init__.py} +9 -15
- mlrun/model_monitoring/applications/_application_steps.py +188 -0
- mlrun/model_monitoring/applications/base.py +108 -0
- mlrun/model_monitoring/applications/context.py +341 -0
- mlrun/model_monitoring/{evidently_application.py → applications/evidently_base.py} +27 -22
- mlrun/model_monitoring/applications/histogram_data_drift.py +354 -0
- mlrun/model_monitoring/applications/results.py +99 -0
- mlrun/model_monitoring/controller.py +131 -278
- mlrun/model_monitoring/db/__init__.py +18 -0
- mlrun/model_monitoring/db/stores/__init__.py +136 -0
- mlrun/model_monitoring/db/stores/base/__init__.py +15 -0
- mlrun/model_monitoring/db/stores/base/store.py +213 -0
- mlrun/model_monitoring/db/stores/sqldb/__init__.py +13 -0
- mlrun/model_monitoring/db/stores/sqldb/models/__init__.py +71 -0
- mlrun/model_monitoring/db/stores/sqldb/models/base.py +190 -0
- mlrun/model_monitoring/db/stores/sqldb/models/mysql.py +103 -0
- mlrun/model_monitoring/{stores/models/mysql.py → db/stores/sqldb/models/sqlite.py} +19 -13
- mlrun/model_monitoring/db/stores/sqldb/sql_store.py +659 -0
- mlrun/model_monitoring/db/stores/v3io_kv/__init__.py +13 -0
- mlrun/model_monitoring/db/stores/v3io_kv/kv_store.py +726 -0
- mlrun/model_monitoring/db/tsdb/__init__.py +105 -0
- mlrun/model_monitoring/db/tsdb/base.py +448 -0
- mlrun/model_monitoring/db/tsdb/helpers.py +30 -0
- mlrun/model_monitoring/db/tsdb/tdengine/__init__.py +15 -0
- mlrun/model_monitoring/db/tsdb/tdengine/schemas.py +279 -0
- mlrun/model_monitoring/db/tsdb/tdengine/stream_graph_steps.py +42 -0
- mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +507 -0
- mlrun/model_monitoring/db/tsdb/v3io/__init__.py +15 -0
- mlrun/model_monitoring/db/tsdb/v3io/stream_graph_steps.py +158 -0
- mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +849 -0
- mlrun/model_monitoring/features_drift_table.py +134 -106
- mlrun/model_monitoring/helpers.py +199 -55
- mlrun/model_monitoring/metrics/__init__.py +13 -0
- mlrun/model_monitoring/metrics/histogram_distance.py +127 -0
- mlrun/model_monitoring/model_endpoint.py +3 -2
- mlrun/model_monitoring/stream_processing.py +131 -398
- mlrun/model_monitoring/tracking_policy.py +9 -2
- mlrun/model_monitoring/writer.py +161 -125
- mlrun/package/__init__.py +6 -6
- mlrun/package/context_handler.py +5 -5
- mlrun/package/packager.py +7 -7
- mlrun/package/packagers/default_packager.py +8 -8
- mlrun/package/packagers/numpy_packagers.py +15 -15
- mlrun/package/packagers/pandas_packagers.py +5 -5
- mlrun/package/packagers/python_standard_library_packagers.py +10 -10
- mlrun/package/packagers_manager.py +19 -23
- mlrun/package/utils/_formatter.py +6 -6
- mlrun/package/utils/_pickler.py +2 -2
- mlrun/package/utils/_supported_format.py +4 -4
- mlrun/package/utils/log_hint_utils.py +2 -2
- mlrun/package/utils/type_hint_utils.py +4 -9
- mlrun/platforms/__init__.py +11 -10
- mlrun/platforms/iguazio.py +24 -203
- mlrun/projects/operations.py +52 -25
- mlrun/projects/pipelines.py +191 -197
- mlrun/projects/project.py +1227 -400
- mlrun/render.py +16 -19
- mlrun/run.py +209 -184
- mlrun/runtimes/__init__.py +83 -15
- mlrun/runtimes/base.py +51 -35
- mlrun/runtimes/daskjob.py +17 -10
- mlrun/runtimes/databricks_job/databricks_cancel_task.py +1 -1
- mlrun/runtimes/databricks_job/databricks_runtime.py +8 -7
- mlrun/runtimes/databricks_job/databricks_wrapper.py +1 -1
- mlrun/runtimes/funcdoc.py +1 -29
- mlrun/runtimes/function_reference.py +1 -1
- mlrun/runtimes/kubejob.py +34 -128
- mlrun/runtimes/local.py +40 -11
- mlrun/runtimes/mpijob/__init__.py +0 -20
- mlrun/runtimes/mpijob/abstract.py +9 -10
- mlrun/runtimes/mpijob/v1.py +1 -1
- mlrun/{model_monitoring/stores/models/sqlite.py → runtimes/nuclio/__init__.py} +7 -9
- mlrun/runtimes/nuclio/api_gateway.py +769 -0
- mlrun/runtimes/nuclio/application/__init__.py +15 -0
- mlrun/runtimes/nuclio/application/application.py +758 -0
- mlrun/runtimes/nuclio/application/reverse_proxy.go +95 -0
- mlrun/runtimes/{function.py → nuclio/function.py} +200 -83
- mlrun/runtimes/{nuclio.py → nuclio/nuclio.py} +6 -6
- mlrun/runtimes/{serving.py → nuclio/serving.py} +65 -68
- mlrun/runtimes/pod.py +281 -101
- mlrun/runtimes/remotesparkjob.py +12 -9
- mlrun/runtimes/sparkjob/spark3job.py +67 -51
- mlrun/runtimes/utils.py +41 -75
- mlrun/secrets.py +9 -5
- mlrun/serving/__init__.py +8 -1
- mlrun/serving/remote.py +2 -7
- mlrun/serving/routers.py +85 -69
- mlrun/serving/server.py +69 -44
- mlrun/serving/states.py +209 -36
- mlrun/serving/utils.py +22 -14
- mlrun/serving/v1_serving.py +6 -7
- mlrun/serving/v2_serving.py +129 -54
- mlrun/track/tracker.py +2 -1
- mlrun/track/tracker_manager.py +3 -3
- mlrun/track/trackers/mlflow_tracker.py +6 -2
- mlrun/utils/async_http.py +6 -8
- mlrun/utils/azure_vault.py +1 -1
- mlrun/utils/clones.py +1 -2
- mlrun/utils/condition_evaluator.py +3 -3
- mlrun/utils/db.py +21 -3
- mlrun/utils/helpers.py +405 -225
- mlrun/utils/http.py +3 -6
- mlrun/utils/logger.py +112 -16
- mlrun/utils/notifications/notification/__init__.py +17 -13
- mlrun/utils/notifications/notification/base.py +50 -2
- mlrun/utils/notifications/notification/console.py +2 -0
- mlrun/utils/notifications/notification/git.py +24 -1
- mlrun/utils/notifications/notification/ipython.py +3 -1
- mlrun/utils/notifications/notification/slack.py +96 -21
- mlrun/utils/notifications/notification/webhook.py +59 -2
- mlrun/utils/notifications/notification_pusher.py +149 -30
- mlrun/utils/regex.py +9 -0
- mlrun/utils/retryer.py +208 -0
- mlrun/utils/singleton.py +1 -1
- mlrun/utils/v3io_clients.py +4 -6
- mlrun/utils/version/version.json +2 -2
- mlrun/utils/version/version.py +2 -6
- mlrun-1.7.0.dist-info/METADATA +378 -0
- mlrun-1.7.0.dist-info/RECORD +351 -0
- {mlrun-1.6.4rc7.dist-info → mlrun-1.7.0.dist-info}/WHEEL +1 -1
- mlrun/feature_store/retrieval/conversion.py +0 -273
- mlrun/kfpops.py +0 -868
- mlrun/model_monitoring/application.py +0 -310
- mlrun/model_monitoring/batch.py +0 -1095
- mlrun/model_monitoring/prometheus.py +0 -219
- mlrun/model_monitoring/stores/__init__.py +0 -111
- mlrun/model_monitoring/stores/kv_model_endpoint_store.py +0 -576
- mlrun/model_monitoring/stores/model_endpoint_store.py +0 -147
- mlrun/model_monitoring/stores/models/__init__.py +0 -27
- mlrun/model_monitoring/stores/models/base.py +0 -84
- mlrun/model_monitoring/stores/sql_model_endpoint_store.py +0 -384
- mlrun/platforms/other.py +0 -306
- mlrun-1.6.4rc7.dist-info/METADATA +0 -272
- mlrun-1.6.4rc7.dist-info/RECORD +0 -314
- {mlrun-1.6.4rc7.dist-info → mlrun-1.7.0.dist-info}/LICENSE +0 -0
- {mlrun-1.6.4rc7.dist-info → mlrun-1.7.0.dist-info}/entry_points.txt +0 -0
- {mlrun-1.6.4rc7.dist-info → mlrun-1.7.0.dist-info}/top_level.txt +0 -0
mlrun/run.py
CHANGED
|
@@ -11,6 +11,7 @@
|
|
|
11
11
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
|
+
|
|
14
15
|
import importlib.util as imputil
|
|
15
16
|
import json
|
|
16
17
|
import os
|
|
@@ -20,20 +21,24 @@ import tempfile
|
|
|
20
21
|
import time
|
|
21
22
|
import typing
|
|
22
23
|
import uuid
|
|
24
|
+
import warnings
|
|
23
25
|
from base64 import b64decode
|
|
24
26
|
from copy import deepcopy
|
|
25
27
|
from os import environ, makedirs, path
|
|
26
28
|
from pathlib import Path
|
|
27
|
-
from typing import
|
|
29
|
+
from typing import Optional, Union
|
|
28
30
|
|
|
29
31
|
import nuclio
|
|
30
32
|
import yaml
|
|
31
|
-
from
|
|
33
|
+
from mlrun_pipelines.common.models import RunStatuses
|
|
34
|
+
from mlrun_pipelines.common.ops import format_summary_from_kfp_run, show_kfp_run
|
|
35
|
+
from mlrun_pipelines.utils import get_client
|
|
32
36
|
|
|
37
|
+
import mlrun.common.constants as mlrun_constants
|
|
38
|
+
import mlrun.common.formatters
|
|
33
39
|
import mlrun.common.schemas
|
|
34
40
|
import mlrun.errors
|
|
35
41
|
import mlrun.utils.helpers
|
|
36
|
-
from mlrun.kfpops import format_summary_from_kfp_run, show_kfp_run
|
|
37
42
|
|
|
38
43
|
from .common.helpers import parse_versioned_object_uri
|
|
39
44
|
from .config import config as mlconf
|
|
@@ -47,7 +52,6 @@ from .runtimes import (
|
|
|
47
52
|
KubejobRuntime,
|
|
48
53
|
LocalRuntime,
|
|
49
54
|
MpiRuntimeV1,
|
|
50
|
-
MpiRuntimeV1Alpha1,
|
|
51
55
|
RemoteRuntime,
|
|
52
56
|
RemoteSparkRuntime,
|
|
53
57
|
RuntimeKinds,
|
|
@@ -57,53 +61,19 @@ from .runtimes import (
|
|
|
57
61
|
)
|
|
58
62
|
from .runtimes.databricks_job.databricks_runtime import DatabricksRuntime
|
|
59
63
|
from .runtimes.funcdoc import update_function_entry_points
|
|
60
|
-
from .runtimes.
|
|
64
|
+
from .runtimes.nuclio.application import ApplicationRuntime
|
|
61
65
|
from .runtimes.utils import add_code_metadata, global_context
|
|
62
66
|
from .utils import (
|
|
67
|
+
RunKeys,
|
|
68
|
+
create_ipython_display,
|
|
63
69
|
extend_hub_uri_if_needed,
|
|
64
70
|
get_in,
|
|
65
71
|
logger,
|
|
66
72
|
retry_until_successful,
|
|
67
|
-
run_keys,
|
|
68
73
|
update_in,
|
|
69
74
|
)
|
|
70
75
|
|
|
71
76
|
|
|
72
|
-
class RunStatuses(object):
|
|
73
|
-
succeeded = "Succeeded"
|
|
74
|
-
failed = "Failed"
|
|
75
|
-
skipped = "Skipped"
|
|
76
|
-
error = "Error"
|
|
77
|
-
running = "Running"
|
|
78
|
-
|
|
79
|
-
@staticmethod
|
|
80
|
-
def all():
|
|
81
|
-
return [
|
|
82
|
-
RunStatuses.succeeded,
|
|
83
|
-
RunStatuses.failed,
|
|
84
|
-
RunStatuses.skipped,
|
|
85
|
-
RunStatuses.error,
|
|
86
|
-
RunStatuses.running,
|
|
87
|
-
]
|
|
88
|
-
|
|
89
|
-
@staticmethod
|
|
90
|
-
def stable_statuses():
|
|
91
|
-
return [
|
|
92
|
-
RunStatuses.succeeded,
|
|
93
|
-
RunStatuses.failed,
|
|
94
|
-
RunStatuses.skipped,
|
|
95
|
-
RunStatuses.error,
|
|
96
|
-
]
|
|
97
|
-
|
|
98
|
-
@staticmethod
|
|
99
|
-
def transient_statuses():
|
|
100
|
-
return [
|
|
101
|
-
status
|
|
102
|
-
for status in RunStatuses.all()
|
|
103
|
-
if status not in RunStatuses.stable_statuses()
|
|
104
|
-
]
|
|
105
|
-
|
|
106
|
-
|
|
107
77
|
def function_to_module(code="", workdir=None, secrets=None, silent=False):
|
|
108
78
|
"""Load code, notebook or mlrun function as .py module
|
|
109
79
|
this function can import a local/remote py file or notebook
|
|
@@ -114,16 +84,18 @@ def function_to_module(code="", workdir=None, secrets=None, silent=False):
|
|
|
114
84
|
|
|
115
85
|
example::
|
|
116
86
|
|
|
117
|
-
mod = mlrun.function_to_module(
|
|
118
|
-
task = mlrun.new_task(inputs={
|
|
119
|
-
context = mlrun.get_or_create_ctx(
|
|
120
|
-
mod.my_job(context, p1=1, p2=
|
|
87
|
+
mod = mlrun.function_to_module("./examples/training.py")
|
|
88
|
+
task = mlrun.new_task(inputs={"infile.txt": "../examples/infile.txt"})
|
|
89
|
+
context = mlrun.get_or_create_ctx("myfunc", spec=task)
|
|
90
|
+
mod.my_job(context, p1=1, p2="x")
|
|
121
91
|
print(context.to_yaml())
|
|
122
92
|
|
|
123
|
-
fn = mlrun.import_function(
|
|
93
|
+
fn = mlrun.import_function("hub://open-archive")
|
|
124
94
|
mod = mlrun.function_to_module(fn)
|
|
125
|
-
data = mlrun.run.get_dataitem(
|
|
126
|
-
|
|
95
|
+
data = mlrun.run.get_dataitem(
|
|
96
|
+
"https://fpsignals-public.s3.amazonaws.com/catsndogs.tar.gz"
|
|
97
|
+
)
|
|
98
|
+
context = mlrun.get_or_create_ctx("myfunc")
|
|
127
99
|
mod.open_archive(context, archive_url=data)
|
|
128
100
|
print(context.to_yaml())
|
|
129
101
|
|
|
@@ -226,18 +198,19 @@ def load_func_code(command="", workdir=None, secrets=None, name="name"):
|
|
|
226
198
|
def get_or_create_ctx(
|
|
227
199
|
name: str,
|
|
228
200
|
event=None,
|
|
229
|
-
spec=None,
|
|
201
|
+
spec: Optional[dict] = None,
|
|
230
202
|
with_env: bool = True,
|
|
231
203
|
rundb: str = "",
|
|
232
204
|
project: str = "",
|
|
233
|
-
upload_artifacts=False,
|
|
234
|
-
labels: dict = None,
|
|
235
|
-
):
|
|
236
|
-
"""
|
|
205
|
+
upload_artifacts: bool = False,
|
|
206
|
+
labels: Optional[dict] = None,
|
|
207
|
+
) -> MLClientCtx:
|
|
208
|
+
"""
|
|
209
|
+
Called from within the user program to obtain a run context.
|
|
237
210
|
|
|
238
|
-
|
|
211
|
+
The run context is an interface for receiving parameters, data and logging
|
|
239
212
|
run results, the run context is read from the event, spec, or environment
|
|
240
|
-
(in that order), user can also work without a context (local defaults mode)
|
|
213
|
+
(in that order), user can also work without a context (local defaults mode).
|
|
241
214
|
|
|
242
215
|
all results are automatically stored in the "rundb" or artifact store,
|
|
243
216
|
the path to the rundb can be specified in the call or obtained from env.
|
|
@@ -247,40 +220,56 @@ def get_or_create_ctx(
|
|
|
247
220
|
:param spec: dictionary holding run spec
|
|
248
221
|
:param with_env: look for context in environment vars, default True
|
|
249
222
|
:param rundb: path/url to the metadata and artifact database
|
|
250
|
-
:param project: project to initiate the context in (by default mlrun.
|
|
223
|
+
:param project: project to initiate the context in (by default `mlrun.mlconf.default_project`)
|
|
251
224
|
:param upload_artifacts: when using local context (not as part of a job/run), upload artifacts to the
|
|
252
225
|
system default artifact path location
|
|
253
|
-
:param labels:
|
|
226
|
+
:param labels: (deprecated - use spec instead) dict of the context labels.
|
|
254
227
|
:return: execution context
|
|
255
228
|
|
|
256
229
|
Examples::
|
|
257
230
|
|
|
258
231
|
# load MLRUN runtime context (will be set by the runtime framework e.g. KubeFlow)
|
|
259
|
-
context = get_or_create_ctx(
|
|
232
|
+
context = get_or_create_ctx("train")
|
|
260
233
|
|
|
261
234
|
# get parameters from the runtime context (or use defaults)
|
|
262
|
-
p1 = context.get_param(
|
|
263
|
-
p2 = context.get_param(
|
|
235
|
+
p1 = context.get_param("p1", 1)
|
|
236
|
+
p2 = context.get_param("p2", "a-string")
|
|
264
237
|
|
|
265
238
|
# access input metadata, values, files, and secrets (passwords)
|
|
266
|
-
print(f
|
|
267
|
-
print(f
|
|
239
|
+
print(f"Run: {context.name} (uid={context.uid})")
|
|
240
|
+
print(f"Params: p1={p1}, p2={p2}")
|
|
268
241
|
print(f'accesskey = {context.get_secret("ACCESS_KEY")}')
|
|
269
|
-
input_str = context.get_input(
|
|
270
|
-
print(f
|
|
242
|
+
input_str = context.get_input("infile.txt").get()
|
|
243
|
+
print(f"file: {input_str}")
|
|
271
244
|
|
|
272
245
|
# RUN some useful code e.g. ML training, data prep, etc.
|
|
273
246
|
|
|
274
247
|
# log scalar result values (job result metrics)
|
|
275
|
-
context.log_result(
|
|
276
|
-
context.log_result(
|
|
277
|
-
context.set_label(
|
|
248
|
+
context.log_result("accuracy", p1 * 2)
|
|
249
|
+
context.log_result("loss", p1 * 3)
|
|
250
|
+
context.set_label("framework", "sklearn")
|
|
278
251
|
|
|
279
252
|
# log various types of artifacts (file, web page, table), will be versioned and visible in the UI
|
|
280
|
-
context.log_artifact(
|
|
281
|
-
|
|
253
|
+
context.log_artifact(
|
|
254
|
+
"model.txt", body=b"abc is 123", labels={"framework": "xgboost"}
|
|
255
|
+
)
|
|
256
|
+
context.log_artifact("results.html", body=b"<b> Some HTML <b>", viewer="web-app")
|
|
282
257
|
|
|
283
258
|
"""
|
|
259
|
+
if labels:
|
|
260
|
+
warnings.warn(
|
|
261
|
+
"The `labels` argument is deprecated and will be removed in 1.9.0. "
|
|
262
|
+
"Please use `spec` instead, e.g.:\n"
|
|
263
|
+
"spec={'metadata': {'labels': {'key': 'value'}}}",
|
|
264
|
+
FutureWarning,
|
|
265
|
+
)
|
|
266
|
+
if spec is None:
|
|
267
|
+
spec = {}
|
|
268
|
+
if "metadata" not in spec:
|
|
269
|
+
spec["metadata"] = {}
|
|
270
|
+
if "labels" not in spec["metadata"]:
|
|
271
|
+
spec["metadata"]["labels"] = {}
|
|
272
|
+
spec["metadata"]["labels"].update(labels)
|
|
284
273
|
|
|
285
274
|
if global_context.get() and not spec and not event:
|
|
286
275
|
return global_context.get()
|
|
@@ -308,7 +297,7 @@ def get_or_create_ctx(
|
|
|
308
297
|
artifact_path = mlrun.utils.helpers.template_artifact_path(
|
|
309
298
|
mlconf.artifact_path, project or mlconf.default_project
|
|
310
299
|
)
|
|
311
|
-
update_in(newspec, ["spec",
|
|
300
|
+
update_in(newspec, ["spec", RunKeys.output_path], artifact_path)
|
|
312
301
|
|
|
313
302
|
newspec.setdefault("metadata", {})
|
|
314
303
|
update_in(newspec, "metadata.name", name, replace=False)
|
|
@@ -323,12 +312,17 @@ def get_or_create_ctx(
|
|
|
323
312
|
newspec["metadata"].get("project") or project or mlconf.default_project
|
|
324
313
|
)
|
|
325
314
|
|
|
315
|
+
newspec["metadata"].setdefault("labels", {})
|
|
316
|
+
|
|
317
|
+
# This function can also be called as a local run if it is not called within a function.
|
|
318
|
+
# It will create a local run, and the run kind must be local by default.
|
|
319
|
+
newspec["metadata"]["labels"].setdefault(
|
|
320
|
+
mlrun_constants.MLRunInternalLabels.kind, RuntimeKinds.local
|
|
321
|
+
)
|
|
322
|
+
|
|
326
323
|
ctx = MLClientCtx.from_dict(
|
|
327
324
|
newspec, rundb=out, autocommit=autocommit, tmp=tmp, host=socket.gethostname()
|
|
328
325
|
)
|
|
329
|
-
labels = labels or {}
|
|
330
|
-
for key, val in labels.items():
|
|
331
|
-
ctx.set_label(key=key, value=val)
|
|
332
326
|
global_context.set(ctx)
|
|
333
327
|
return ctx
|
|
334
328
|
|
|
@@ -348,7 +342,9 @@ def import_function(url="", secrets=None, db="", project=None, new_name=None):
|
|
|
348
342
|
|
|
349
343
|
function = mlrun.import_function("hub://auto-trainer")
|
|
350
344
|
function = mlrun.import_function("./func.yaml")
|
|
351
|
-
function = mlrun.import_function(
|
|
345
|
+
function = mlrun.import_function(
|
|
346
|
+
"https://raw.githubusercontent.com/org/repo/func.yaml"
|
|
347
|
+
)
|
|
352
348
|
|
|
353
349
|
:param url: path/url to Function Hub, db or function YAML file
|
|
354
350
|
:param secrets: optional, credentials dict for DB or URL (s3, v3io, ...)
|
|
@@ -389,6 +385,8 @@ def import_function_to_dict(url, secrets=None):
|
|
|
389
385
|
code = get_in(runtime, "spec.build.functionSourceCode")
|
|
390
386
|
update_in(runtime, "metadata.build.code_origin", url)
|
|
391
387
|
cmd = code_file = get_in(runtime, "spec.command", "")
|
|
388
|
+
# use kind = "job" by default if not specified
|
|
389
|
+
runtime.setdefault("kind", "job")
|
|
392
390
|
if " " in cmd:
|
|
393
391
|
code_file = cmd[: cmd.find(" ")]
|
|
394
392
|
if runtime["kind"] in ["", "local"]:
|
|
@@ -425,19 +423,19 @@ def import_function_to_dict(url, secrets=None):
|
|
|
425
423
|
|
|
426
424
|
|
|
427
425
|
def new_function(
|
|
428
|
-
name: str = "",
|
|
429
|
-
project: str = "",
|
|
430
|
-
tag: str = "",
|
|
431
|
-
kind: str = "",
|
|
432
|
-
command: str = "",
|
|
433
|
-
image: str = "",
|
|
434
|
-
args: list = None,
|
|
435
|
-
runtime=None,
|
|
436
|
-
mode=None,
|
|
437
|
-
handler: str = None,
|
|
438
|
-
source: str = None,
|
|
439
|
-
requirements: Union[str,
|
|
440
|
-
kfp=None,
|
|
426
|
+
name: Optional[str] = "",
|
|
427
|
+
project: Optional[str] = "",
|
|
428
|
+
tag: Optional[str] = "",
|
|
429
|
+
kind: Optional[str] = "",
|
|
430
|
+
command: Optional[str] = "",
|
|
431
|
+
image: Optional[str] = "",
|
|
432
|
+
args: Optional[list] = None,
|
|
433
|
+
runtime: Optional[Union[mlrun.runtimes.BaseRuntime, dict]] = None,
|
|
434
|
+
mode: Optional[str] = None,
|
|
435
|
+
handler: Optional[str] = None,
|
|
436
|
+
source: Optional[str] = None,
|
|
437
|
+
requirements: Union[str, list[str]] = None,
|
|
438
|
+
kfp: Optional[bool] = None,
|
|
441
439
|
requirements_file: str = "",
|
|
442
440
|
):
|
|
443
441
|
"""Create a new ML function from base properties
|
|
@@ -445,12 +443,18 @@ def new_function(
|
|
|
445
443
|
Example::
|
|
446
444
|
|
|
447
445
|
# define a container based function (the `training.py` must exist in the container workdir)
|
|
448
|
-
f = new_function(
|
|
446
|
+
f = new_function(
|
|
447
|
+
command="training.py -x {x}", image="myrepo/image:latest", kind="job"
|
|
448
|
+
)
|
|
449
449
|
f.run(params={"x": 5})
|
|
450
450
|
|
|
451
451
|
# define a container based function which reads its source from a git archive
|
|
452
|
-
f = new_function(
|
|
453
|
-
|
|
452
|
+
f = new_function(
|
|
453
|
+
command="training.py -x {x}",
|
|
454
|
+
image="myrepo/image:latest",
|
|
455
|
+
kind="job",
|
|
456
|
+
source="git://github.com/mlrun/something.git",
|
|
457
|
+
)
|
|
454
458
|
f.run(params={"x": 5})
|
|
455
459
|
|
|
456
460
|
# define a local handler function (execute a local function handler)
|
|
@@ -535,9 +539,9 @@ def new_function(
|
|
|
535
539
|
if source:
|
|
536
540
|
runner.spec.build.source = source
|
|
537
541
|
if handler:
|
|
538
|
-
if kind
|
|
542
|
+
if kind in RuntimeKinds.handlerless_runtimes():
|
|
539
543
|
raise MLRunInvalidArgumentError(
|
|
540
|
-
"
|
|
544
|
+
f"Handler is not supported for {kind} runtime"
|
|
541
545
|
)
|
|
542
546
|
elif kind in RuntimeKinds.nuclio_runtimes():
|
|
543
547
|
runner.spec.function_handler = handler
|
|
@@ -575,24 +579,23 @@ def _process_runtime(command, runtime, kind):
|
|
|
575
579
|
|
|
576
580
|
|
|
577
581
|
def code_to_function(
|
|
578
|
-
name: str = "",
|
|
579
|
-
project: str = "",
|
|
580
|
-
tag: str = "",
|
|
581
|
-
filename: str = "",
|
|
582
|
-
handler: str = "",
|
|
583
|
-
kind: str = "",
|
|
584
|
-
image: str = None,
|
|
585
|
-
code_output: str = "",
|
|
582
|
+
name: Optional[str] = "",
|
|
583
|
+
project: Optional[str] = "",
|
|
584
|
+
tag: Optional[str] = "",
|
|
585
|
+
filename: Optional[str] = "",
|
|
586
|
+
handler: Optional[str] = "",
|
|
587
|
+
kind: Optional[str] = "",
|
|
588
|
+
image: Optional[str] = None,
|
|
589
|
+
code_output: Optional[str] = "",
|
|
586
590
|
embed_code: bool = True,
|
|
587
|
-
description: str = "",
|
|
588
|
-
requirements: Union[str,
|
|
589
|
-
categories:
|
|
590
|
-
labels:
|
|
591
|
-
with_doc: bool = True,
|
|
592
|
-
ignored_tags=None,
|
|
593
|
-
requirements_file: str = "",
|
|
591
|
+
description: Optional[str] = "",
|
|
592
|
+
requirements: Optional[Union[str, list[str]]] = None,
|
|
593
|
+
categories: Optional[list[str]] = None,
|
|
594
|
+
labels: Optional[dict[str, str]] = None,
|
|
595
|
+
with_doc: Optional[bool] = True,
|
|
596
|
+
ignored_tags: Optional[str] = None,
|
|
597
|
+
requirements_file: Optional[str] = "",
|
|
594
598
|
) -> Union[
|
|
595
|
-
MpiRuntimeV1Alpha1,
|
|
596
599
|
MpiRuntimeV1,
|
|
597
600
|
RemoteRuntime,
|
|
598
601
|
ServingRuntime,
|
|
@@ -602,6 +605,7 @@ def code_to_function(
|
|
|
602
605
|
Spark3Runtime,
|
|
603
606
|
RemoteSparkRuntime,
|
|
604
607
|
DatabricksRuntime,
|
|
608
|
+
ApplicationRuntime,
|
|
605
609
|
]:
|
|
606
610
|
"""Convenience function to insert code and configure an mlrun runtime.
|
|
607
611
|
|
|
@@ -627,6 +631,8 @@ def code_to_function(
|
|
|
627
631
|
- mpijob: run distributed Horovod jobs over the MPI job operator
|
|
628
632
|
- spark: run distributed Spark job using Spark Kubernetes Operator
|
|
629
633
|
- remote-spark: run distributed Spark job on remote Spark service
|
|
634
|
+
- databricks: run code on Databricks cluster (python scripts, Spark etc.)
|
|
635
|
+
- application: run a long living application (e.g. a web server, UI, etc.)
|
|
630
636
|
|
|
631
637
|
Learn more about [Kinds of function (runtimes)](../concepts/functions-overview.html).
|
|
632
638
|
|
|
@@ -644,11 +650,10 @@ def code_to_function(
|
|
|
644
650
|
:param embed_code: indicates whether or not to inject the code directly into the function runtime spec,
|
|
645
651
|
defaults to True
|
|
646
652
|
:param description: short function description, defaults to ''
|
|
647
|
-
:param requirements: list of python packages or pip requirements file path, defaults to None
|
|
648
653
|
:param requirements: a list of python packages
|
|
649
654
|
:param requirements_file: path to a python requirements file
|
|
650
655
|
:param categories: list of categories for mlrun Function Hub, defaults to None
|
|
651
|
-
:param labels:
|
|
656
|
+
:param labels: name/value pairs dict to tag the function with useful metadata, defaults to None
|
|
652
657
|
:param with_doc: indicates whether to document the function parameters, defaults to True
|
|
653
658
|
:param ignored_tags: notebook cells to ignore when converting notebooks to py code (separated by ';')
|
|
654
659
|
|
|
@@ -660,11 +665,15 @@ def code_to_function(
|
|
|
660
665
|
import mlrun
|
|
661
666
|
|
|
662
667
|
# create job function object from notebook code and add doc/metadata
|
|
663
|
-
fn = mlrun.code_to_function(
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
+
fn = mlrun.code_to_function(
|
|
669
|
+
"file_utils",
|
|
670
|
+
kind="job",
|
|
671
|
+
handler="open_archive",
|
|
672
|
+
image="mlrun/mlrun",
|
|
673
|
+
description="this function opens a zip archive into a local/mounted folder",
|
|
674
|
+
categories=["fileutils"],
|
|
675
|
+
labels={"author": "me"},
|
|
676
|
+
)
|
|
668
677
|
|
|
669
678
|
example::
|
|
670
679
|
|
|
@@ -675,11 +684,15 @@ def code_to_function(
|
|
|
675
684
|
Path("mover.py").touch()
|
|
676
685
|
|
|
677
686
|
# create nuclio function object from python module call mover.py
|
|
678
|
-
fn = mlrun.code_to_function(
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
687
|
+
fn = mlrun.code_to_function(
|
|
688
|
+
"nuclio-mover",
|
|
689
|
+
kind="nuclio",
|
|
690
|
+
filename="mover.py",
|
|
691
|
+
image="python:3.9",
|
|
692
|
+
description="this function moves files from one system to another",
|
|
693
|
+
requirements=["pandas"],
|
|
694
|
+
labels={"author": "me"},
|
|
695
|
+
)
|
|
683
696
|
|
|
684
697
|
"""
|
|
685
698
|
filebase, _ = path.splitext(path.basename(filename))
|
|
@@ -718,35 +731,33 @@ def code_to_function(
|
|
|
718
731
|
fn.metadata.categories = categories
|
|
719
732
|
fn.metadata.labels = labels or fn.metadata.labels
|
|
720
733
|
|
|
721
|
-
def resolve_nuclio_subkind(kind):
|
|
722
|
-
is_nuclio = kind.startswith("nuclio")
|
|
723
|
-
subkind = kind[kind.find(":") + 1 :] if is_nuclio and ":" in kind else None
|
|
724
|
-
if kind == RuntimeKinds.serving:
|
|
725
|
-
is_nuclio = True
|
|
726
|
-
subkind = serving_subkind
|
|
727
|
-
return is_nuclio, subkind
|
|
728
|
-
|
|
729
734
|
if (
|
|
730
735
|
not embed_code
|
|
731
736
|
and not code_output
|
|
732
737
|
and (not filename or filename.endswith(".ipynb"))
|
|
733
738
|
):
|
|
734
739
|
raise ValueError(
|
|
735
|
-
"
|
|
740
|
+
"A valid code file must be specified "
|
|
736
741
|
"when not using the embed_code option"
|
|
737
742
|
)
|
|
738
743
|
|
|
739
744
|
if kind == RuntimeKinds.databricks and not embed_code:
|
|
740
|
-
raise ValueError("
|
|
745
|
+
raise ValueError("Databricks tasks only support embed_code=True")
|
|
741
746
|
|
|
742
|
-
|
|
747
|
+
if kind == RuntimeKinds.application:
|
|
748
|
+
raise MLRunInvalidArgumentError(
|
|
749
|
+
"Embedding a code file is not supported for application runtime. "
|
|
750
|
+
"Code files should be specified via project/function source."
|
|
751
|
+
)
|
|
752
|
+
|
|
753
|
+
is_nuclio, sub_kind = RuntimeKinds.resolve_nuclio_sub_kind(kind)
|
|
743
754
|
code_origin = add_name(add_code_metadata(filename), name)
|
|
744
755
|
|
|
745
756
|
name, spec, code = nuclio.build_file(
|
|
746
757
|
filename,
|
|
747
758
|
name=name,
|
|
748
759
|
handler=handler or "handler",
|
|
749
|
-
kind=
|
|
760
|
+
kind=sub_kind,
|
|
750
761
|
ignored_tags=ignored_tags,
|
|
751
762
|
)
|
|
752
763
|
spec["spec"]["env"].append(
|
|
@@ -759,14 +770,14 @@ def code_to_function(
|
|
|
759
770
|
if not kind and spec_kind not in ["", "Function"]:
|
|
760
771
|
kind = spec_kind.lower()
|
|
761
772
|
|
|
762
|
-
# if its a nuclio
|
|
763
|
-
is_nuclio,
|
|
773
|
+
# if its a nuclio sub kind, redo nb parsing
|
|
774
|
+
is_nuclio, sub_kind = RuntimeKinds.resolve_nuclio_sub_kind(kind)
|
|
764
775
|
if is_nuclio:
|
|
765
776
|
name, spec, code = nuclio.build_file(
|
|
766
777
|
filename,
|
|
767
778
|
name=name,
|
|
768
779
|
handler=handler or "handler",
|
|
769
|
-
kind=
|
|
780
|
+
kind=sub_kind,
|
|
770
781
|
ignored_tags=ignored_tags,
|
|
771
782
|
)
|
|
772
783
|
|
|
@@ -780,33 +791,33 @@ def code_to_function(
|
|
|
780
791
|
raise ValueError("code_output option is only used with notebooks")
|
|
781
792
|
|
|
782
793
|
if is_nuclio:
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
# default_handler is only used in :mlrun
|
|
789
|
-
|
|
790
|
-
|
|
794
|
+
mlrun.utils.helpers.validate_single_def_handler(
|
|
795
|
+
function_kind=sub_kind, code=code
|
|
796
|
+
)
|
|
797
|
+
|
|
798
|
+
runtime = RuntimeKinds.resolve_nuclio_runtime(kind, sub_kind)
|
|
799
|
+
# default_handler is only used in :mlrun sub kind, determine the handler to invoke in function.run()
|
|
800
|
+
runtime.spec.default_handler = handler if sub_kind == "mlrun" else ""
|
|
801
|
+
runtime.spec.function_handler = (
|
|
791
802
|
handler if handler and ":" in handler else get_in(spec, "spec.handler")
|
|
792
803
|
)
|
|
793
804
|
if not embed_code:
|
|
794
|
-
|
|
805
|
+
runtime.spec.source = filename
|
|
795
806
|
nuclio_runtime = get_in(spec, "spec.runtime")
|
|
796
807
|
if nuclio_runtime and not nuclio_runtime.startswith("py"):
|
|
797
|
-
|
|
808
|
+
runtime.spec.nuclio_runtime = nuclio_runtime
|
|
798
809
|
if not name:
|
|
799
|
-
raise ValueError("
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
update_common(
|
|
804
|
-
return
|
|
810
|
+
raise ValueError("Missing required parameter: name")
|
|
811
|
+
runtime.metadata.name = name
|
|
812
|
+
runtime.spec.build.code_origin = code_origin
|
|
813
|
+
runtime.spec.build.origin_filename = filename or (name + ".ipynb")
|
|
814
|
+
update_common(runtime, spec)
|
|
815
|
+
return runtime
|
|
805
816
|
|
|
806
817
|
if kind is None or kind in ["", "Function"]:
|
|
807
818
|
raise ValueError("please specify the function kind")
|
|
808
819
|
elif kind in RuntimeKinds.all():
|
|
809
|
-
|
|
820
|
+
runtime = get_runtime_class(kind)()
|
|
810
821
|
else:
|
|
811
822
|
raise ValueError(f"unsupported runtime ({kind})")
|
|
812
823
|
|
|
@@ -815,10 +826,10 @@ def code_to_function(
|
|
|
815
826
|
if not name:
|
|
816
827
|
raise ValueError("name must be specified")
|
|
817
828
|
h = get_in(spec, "spec.handler", "").split(":")
|
|
818
|
-
|
|
819
|
-
|
|
820
|
-
|
|
821
|
-
build =
|
|
829
|
+
runtime.handler = h[0] if len(h) <= 1 else h[1]
|
|
830
|
+
runtime.metadata = get_in(spec, "spec.metadata")
|
|
831
|
+
runtime.metadata.name = name
|
|
832
|
+
build = runtime.spec.build
|
|
822
833
|
build.code_origin = code_origin
|
|
823
834
|
build.origin_filename = filename or (name + ".ipynb")
|
|
824
835
|
build.extra = get_in(spec, "spec.build.extra")
|
|
@@ -826,18 +837,18 @@ def code_to_function(
|
|
|
826
837
|
build.builder_env = get_in(spec, "spec.build.builder_env")
|
|
827
838
|
if not embed_code:
|
|
828
839
|
if code_output:
|
|
829
|
-
|
|
840
|
+
runtime.spec.command = code_output
|
|
830
841
|
else:
|
|
831
|
-
|
|
842
|
+
runtime.spec.command = filename
|
|
832
843
|
|
|
833
844
|
build.image = get_in(spec, "spec.build.image")
|
|
834
|
-
update_common(
|
|
835
|
-
|
|
845
|
+
update_common(runtime, spec)
|
|
846
|
+
runtime.prepare_image_for_deploy()
|
|
836
847
|
|
|
837
848
|
if with_doc:
|
|
838
|
-
update_function_entry_points(
|
|
839
|
-
|
|
840
|
-
return
|
|
849
|
+
update_function_entry_points(runtime, code)
|
|
850
|
+
runtime.spec.default_handler = handler
|
|
851
|
+
return runtime
|
|
841
852
|
|
|
842
853
|
|
|
843
854
|
def _run_pipeline(
|
|
@@ -898,7 +909,7 @@ def _run_pipeline(
|
|
|
898
909
|
def wait_for_pipeline_completion(
|
|
899
910
|
run_id,
|
|
900
911
|
timeout=60 * 60,
|
|
901
|
-
expected_statuses:
|
|
912
|
+
expected_statuses: list[str] = None,
|
|
902
913
|
namespace=None,
|
|
903
914
|
remote=True,
|
|
904
915
|
project: str = None,
|
|
@@ -931,10 +942,12 @@ def wait_for_pipeline_completion(
|
|
|
931
942
|
if remote:
|
|
932
943
|
mldb = mlrun.db.get_run_db()
|
|
933
944
|
|
|
945
|
+
dag_display_id = create_ipython_display()
|
|
946
|
+
|
|
934
947
|
def _wait_for_pipeline_completion():
|
|
935
948
|
pipeline = mldb.get_pipeline(run_id, namespace=namespace, project=project)
|
|
936
949
|
pipeline_status = pipeline["run"]["status"]
|
|
937
|
-
show_kfp_run(pipeline,
|
|
950
|
+
show_kfp_run(pipeline, dag_display_id=dag_display_id, with_html=False)
|
|
938
951
|
if pipeline_status not in RunStatuses.stable_statuses():
|
|
939
952
|
logger.debug(
|
|
940
953
|
"Waiting for pipeline completion",
|
|
@@ -959,7 +972,7 @@ def wait_for_pipeline_completion(
|
|
|
959
972
|
_wait_for_pipeline_completion,
|
|
960
973
|
)
|
|
961
974
|
else:
|
|
962
|
-
client =
|
|
975
|
+
client = get_client(namespace=namespace)
|
|
963
976
|
resp = client.wait_for_run_completion(run_id, timeout)
|
|
964
977
|
if resp:
|
|
965
978
|
resp = resp.to_dict()
|
|
@@ -967,7 +980,7 @@ def wait_for_pipeline_completion(
|
|
|
967
980
|
show_kfp_run(resp)
|
|
968
981
|
|
|
969
982
|
status = resp["run"]["status"] if resp else "unknown"
|
|
970
|
-
message = resp["run"].get("message", "")
|
|
983
|
+
message = resp["run"].get("message", "") if resp else ""
|
|
971
984
|
if expected_statuses:
|
|
972
985
|
if status not in expected_statuses:
|
|
973
986
|
raise RuntimeError(
|
|
@@ -989,8 +1002,8 @@ def get_pipeline(
|
|
|
989
1002
|
run_id,
|
|
990
1003
|
namespace=None,
|
|
991
1004
|
format_: Union[
|
|
992
|
-
str, mlrun.common.
|
|
993
|
-
] = mlrun.common.
|
|
1005
|
+
str, mlrun.common.formatters.PipelineFormat
|
|
1006
|
+
] = mlrun.common.formatters.PipelineFormat.summary,
|
|
994
1007
|
project: str = None,
|
|
995
1008
|
remote: bool = True,
|
|
996
1009
|
):
|
|
@@ -1004,7 +1017,7 @@ def get_pipeline(
|
|
|
1004
1017
|
:param project: the project of the pipeline run
|
|
1005
1018
|
:param remote: read kfp data from mlrun service (default=True)
|
|
1006
1019
|
|
|
1007
|
-
:return: kfp run
|
|
1020
|
+
:return: kfp run
|
|
1008
1021
|
"""
|
|
1009
1022
|
namespace = namespace or mlconf.namespace
|
|
1010
1023
|
if remote:
|
|
@@ -1020,15 +1033,15 @@ def get_pipeline(
|
|
|
1020
1033
|
)
|
|
1021
1034
|
|
|
1022
1035
|
else:
|
|
1023
|
-
client =
|
|
1036
|
+
client = get_client(namespace=namespace)
|
|
1024
1037
|
resp = client.get_run(run_id)
|
|
1025
1038
|
if resp:
|
|
1026
1039
|
resp = resp.to_dict()
|
|
1027
1040
|
if (
|
|
1028
1041
|
not format_
|
|
1029
|
-
or format_ == mlrun.common.
|
|
1042
|
+
or format_ == mlrun.common.formatters.PipelineFormat.summary.value
|
|
1030
1043
|
):
|
|
1031
|
-
resp =
|
|
1044
|
+
resp = mlrun.common.formatters.PipelineFormat.format_obj(resp, format_)
|
|
1032
1045
|
|
|
1033
1046
|
show_kfp_run(resp)
|
|
1034
1047
|
return resp
|
|
@@ -1042,8 +1055,8 @@ def list_pipelines(
|
|
|
1042
1055
|
filter_="",
|
|
1043
1056
|
namespace=None,
|
|
1044
1057
|
project="*",
|
|
1045
|
-
format_: mlrun.common.
|
|
1046
|
-
) ->
|
|
1058
|
+
format_: mlrun.common.formatters.PipelineFormat = mlrun.common.formatters.PipelineFormat.metadata_only,
|
|
1059
|
+
) -> tuple[int, Optional[int], list[dict]]:
|
|
1047
1060
|
"""List pipelines
|
|
1048
1061
|
|
|
1049
1062
|
:param full: Deprecated, use `format_` instead. if True will set `format_` to full, otherwise `format_` will
|
|
@@ -1062,7 +1075,7 @@ def list_pipelines(
|
|
|
1062
1075
|
:param format_: Control what will be returned (full/metadata_only/name_only)
|
|
1063
1076
|
"""
|
|
1064
1077
|
if full:
|
|
1065
|
-
format_ = mlrun.common.
|
|
1078
|
+
format_ = mlrun.common.formatters.PipelineFormat.full
|
|
1066
1079
|
run_db = mlrun.db.get_run_db()
|
|
1067
1080
|
pipelines = run_db.list_pipelines(
|
|
1068
1081
|
project, namespace, sort_by, page_token, filter_, format_, page_size
|
|
@@ -1098,13 +1111,25 @@ def wait_for_runs_completion(
|
|
|
1098
1111
|
example::
|
|
1099
1112
|
|
|
1100
1113
|
# run two training functions in parallel and wait for the results
|
|
1101
|
-
inputs = {
|
|
1102
|
-
run1 = train.run(
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
|
|
1106
|
-
|
|
1107
|
-
|
|
1114
|
+
inputs = {"dataset": cleaned_data}
|
|
1115
|
+
run1 = train.run(
|
|
1116
|
+
name="train_lr",
|
|
1117
|
+
inputs=inputs,
|
|
1118
|
+
watch=False,
|
|
1119
|
+
params={
|
|
1120
|
+
"model_pkg_class": "sklearn.linear_model.LogisticRegression",
|
|
1121
|
+
"label_column": "label",
|
|
1122
|
+
},
|
|
1123
|
+
)
|
|
1124
|
+
run2 = train.run(
|
|
1125
|
+
name="train_lr",
|
|
1126
|
+
inputs=inputs,
|
|
1127
|
+
watch=False,
|
|
1128
|
+
params={
|
|
1129
|
+
"model_pkg_class": "sklearn.ensemble.RandomForestClassifier",
|
|
1130
|
+
"label_column": "label",
|
|
1131
|
+
},
|
|
1132
|
+
)
|
|
1108
1133
|
completed = wait_for_runs_completion([run1, run2])
|
|
1109
1134
|
|
|
1110
1135
|
:param runs: list of run objects (the returned values of function.run())
|
|
@@ -1119,7 +1144,7 @@ def wait_for_runs_completion(
|
|
|
1119
1144
|
running = []
|
|
1120
1145
|
for run in runs:
|
|
1121
1146
|
state = run.state()
|
|
1122
|
-
if state in mlrun.runtimes.constants.RunStates.terminal_states():
|
|
1147
|
+
if state in mlrun.common.runtimes.constants.RunStates.terminal_states():
|
|
1123
1148
|
completed.append(run)
|
|
1124
1149
|
else:
|
|
1125
1150
|
running.append(run)
|