mlrun 1.7.0rc7__py3-none-any.whl → 1.7.0rc11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mlrun might be problematic. Click here for more details.
- mlrun/__init__.py +1 -0
- mlrun/__main__.py +2 -0
- mlrun/artifacts/model.py +29 -25
- mlrun/common/schemas/__init__.py +4 -0
- mlrun/common/schemas/alert.py +122 -0
- mlrun/common/schemas/api_gateway.py +8 -1
- mlrun/common/schemas/auth.py +4 -0
- mlrun/common/schemas/client_spec.py +1 -0
- mlrun/common/schemas/hub.py +7 -9
- mlrun/common/schemas/model_monitoring/constants.py +4 -2
- mlrun/{datastore/helpers.py → common/schemas/pagination.py} +11 -3
- mlrun/common/schemas/project.py +15 -10
- mlrun/config.py +35 -13
- mlrun/datastore/__init__.py +3 -7
- mlrun/datastore/base.py +6 -5
- mlrun/datastore/datastore_profile.py +19 -1
- mlrun/datastore/snowflake_utils.py +43 -0
- mlrun/datastore/sources.py +18 -30
- mlrun/datastore/targets.py +140 -12
- mlrun/datastore/utils.py +10 -5
- mlrun/datastore/v3io.py +27 -50
- mlrun/db/base.py +88 -2
- mlrun/db/httpdb.py +314 -41
- mlrun/db/nopdb.py +142 -0
- mlrun/execution.py +21 -14
- mlrun/feature_store/api.py +9 -5
- mlrun/feature_store/feature_set.py +39 -23
- mlrun/feature_store/feature_vector.py +2 -1
- mlrun/feature_store/retrieval/spark_merger.py +27 -23
- mlrun/feature_store/steps.py +30 -19
- mlrun/features.py +4 -13
- mlrun/frameworks/auto_mlrun/auto_mlrun.py +2 -2
- mlrun/frameworks/lgbm/__init__.py +1 -1
- mlrun/frameworks/lgbm/callbacks/callback.py +2 -4
- mlrun/frameworks/lgbm/model_handler.py +1 -1
- mlrun/frameworks/pytorch/__init__.py +2 -2
- mlrun/frameworks/sklearn/__init__.py +1 -1
- mlrun/frameworks/tf_keras/__init__.py +1 -1
- mlrun/frameworks/tf_keras/callbacks/logging_callback.py +1 -1
- mlrun/frameworks/tf_keras/mlrun_interface.py +2 -2
- mlrun/frameworks/xgboost/__init__.py +1 -1
- mlrun/kfpops.py +2 -5
- mlrun/launcher/base.py +1 -1
- mlrun/launcher/client.py +2 -2
- mlrun/model.py +2 -2
- mlrun/model_monitoring/application.py +11 -2
- mlrun/model_monitoring/applications/histogram_data_drift.py +3 -3
- mlrun/model_monitoring/controller.py +2 -3
- mlrun/model_monitoring/helpers.py +3 -1
- mlrun/model_monitoring/stream_processing.py +0 -1
- mlrun/model_monitoring/writer.py +32 -0
- mlrun/package/packagers_manager.py +1 -0
- mlrun/platforms/__init__.py +1 -1
- mlrun/platforms/other.py +1 -1
- mlrun/projects/operations.py +11 -4
- mlrun/projects/pipelines.py +1 -1
- mlrun/projects/project.py +180 -73
- mlrun/run.py +77 -41
- mlrun/runtimes/__init__.py +16 -0
- mlrun/runtimes/base.py +4 -1
- mlrun/runtimes/kubejob.py +26 -121
- mlrun/runtimes/mpijob/abstract.py +8 -8
- mlrun/runtimes/nuclio/api_gateway.py +58 -8
- mlrun/runtimes/nuclio/application/application.py +79 -1
- mlrun/runtimes/nuclio/application/reverse_proxy.go +9 -1
- mlrun/runtimes/nuclio/function.py +20 -13
- mlrun/runtimes/nuclio/serving.py +11 -10
- mlrun/runtimes/pod.py +148 -3
- mlrun/runtimes/utils.py +0 -28
- mlrun/secrets.py +6 -2
- mlrun/serving/remote.py +2 -3
- mlrun/serving/routers.py +7 -4
- mlrun/serving/server.py +1 -1
- mlrun/serving/states.py +14 -38
- mlrun/serving/v2_serving.py +8 -7
- mlrun/utils/helpers.py +1 -1
- mlrun/utils/http.py +1 -1
- mlrun/utils/notifications/notification/base.py +12 -0
- mlrun/utils/notifications/notification/console.py +2 -0
- mlrun/utils/notifications/notification/git.py +3 -1
- mlrun/utils/notifications/notification/ipython.py +2 -0
- mlrun/utils/notifications/notification/slack.py +41 -13
- mlrun/utils/notifications/notification/webhook.py +11 -1
- mlrun/utils/retryer.py +3 -2
- mlrun/utils/version/version.json +2 -2
- {mlrun-1.7.0rc7.dist-info → mlrun-1.7.0rc11.dist-info}/METADATA +15 -15
- {mlrun-1.7.0rc7.dist-info → mlrun-1.7.0rc11.dist-info}/RECORD +91 -89
- {mlrun-1.7.0rc7.dist-info → mlrun-1.7.0rc11.dist-info}/LICENSE +0 -0
- {mlrun-1.7.0rc7.dist-info → mlrun-1.7.0rc11.dist-info}/WHEEL +0 -0
- {mlrun-1.7.0rc7.dist-info → mlrun-1.7.0rc11.dist-info}/entry_points.txt +0 -0
- {mlrun-1.7.0rc7.dist-info → mlrun-1.7.0rc11.dist-info}/top_level.txt +0 -0
|
@@ -223,7 +223,42 @@ class ApplicationRuntime(RemoteRuntime):
|
|
|
223
223
|
auth_info: AuthInfo = None,
|
|
224
224
|
builder_env: dict = None,
|
|
225
225
|
force_build: bool = False,
|
|
226
|
+
with_mlrun=None,
|
|
227
|
+
skip_deployed=False,
|
|
228
|
+
is_kfp=False,
|
|
229
|
+
mlrun_version_specifier=None,
|
|
230
|
+
show_on_failure: bool = False,
|
|
226
231
|
):
|
|
232
|
+
"""
|
|
233
|
+
Deploy function, builds the application image if required (self.requires_build()) or force_build is True,
|
|
234
|
+
Once the image is built, the function is deployed.
|
|
235
|
+
:param project: Project name
|
|
236
|
+
:param tag: Function tag
|
|
237
|
+
:param verbose: Set True for verbose logging
|
|
238
|
+
:param auth_info: Service AuthInfo (deprecated and ignored)
|
|
239
|
+
:param builder_env: Env vars dict for source archive config/credentials
|
|
240
|
+
e.g. builder_env={"GIT_TOKEN": token}
|
|
241
|
+
:param force_build: Set True for force building the application image
|
|
242
|
+
:param with_mlrun: Add the current mlrun package to the container build
|
|
243
|
+
:param skip_deployed: Skip the build if we already have an image for the function
|
|
244
|
+
:param is_kfp: Deploy as part of a kfp pipeline
|
|
245
|
+
:param mlrun_version_specifier: Which mlrun package version to include (if not current)
|
|
246
|
+
:param show_on_failure: Show logs only in case of build failure
|
|
247
|
+
:return: True if the function is ready (deployed)
|
|
248
|
+
"""
|
|
249
|
+
if self.requires_build() or force_build:
|
|
250
|
+
self._fill_credentials()
|
|
251
|
+
self._build_application_image(
|
|
252
|
+
builder_env=builder_env,
|
|
253
|
+
force_build=force_build,
|
|
254
|
+
watch=True,
|
|
255
|
+
with_mlrun=with_mlrun,
|
|
256
|
+
skip_deployed=skip_deployed,
|
|
257
|
+
is_kfp=is_kfp,
|
|
258
|
+
mlrun_version_specifier=mlrun_version_specifier,
|
|
259
|
+
show_on_failure=show_on_failure,
|
|
260
|
+
)
|
|
261
|
+
|
|
227
262
|
self._ensure_reverse_proxy_configurations()
|
|
228
263
|
self._configure_application_sidecar()
|
|
229
264
|
super().deploy(
|
|
@@ -232,7 +267,50 @@ class ApplicationRuntime(RemoteRuntime):
|
|
|
232
267
|
verbose,
|
|
233
268
|
auth_info,
|
|
234
269
|
builder_env,
|
|
235
|
-
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
def with_source_archive(
|
|
273
|
+
self, source, workdir=None, pull_at_runtime=True, target_dir=None
|
|
274
|
+
):
|
|
275
|
+
"""load the code from git/tar/zip archive at runtime or build
|
|
276
|
+
|
|
277
|
+
:param source: valid absolute path or URL to git, zip, or tar file, e.g.
|
|
278
|
+
git://github.com/mlrun/something.git
|
|
279
|
+
http://some/url/file.zip
|
|
280
|
+
note path source must exist on the image or exist locally when run is local
|
|
281
|
+
(it is recommended to use 'workdir' when source is a filepath instead)
|
|
282
|
+
:param workdir: working dir relative to the archive root (e.g. './subdir') or absolute to the image root
|
|
283
|
+
:param pull_at_runtime: load the archive into the container at job runtime vs on build/deploy
|
|
284
|
+
:param target_dir: target dir on runtime pod or repo clone / archive extraction
|
|
285
|
+
"""
|
|
286
|
+
self._configure_mlrun_build_with_source(
|
|
287
|
+
source=source,
|
|
288
|
+
workdir=workdir,
|
|
289
|
+
pull_at_runtime=pull_at_runtime,
|
|
290
|
+
target_dir=target_dir,
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
def _build_application_image(
|
|
294
|
+
self,
|
|
295
|
+
builder_env: dict = None,
|
|
296
|
+
force_build: bool = False,
|
|
297
|
+
watch=True,
|
|
298
|
+
with_mlrun=None,
|
|
299
|
+
skip_deployed=False,
|
|
300
|
+
is_kfp=False,
|
|
301
|
+
mlrun_version_specifier=None,
|
|
302
|
+
show_on_failure: bool = False,
|
|
303
|
+
):
|
|
304
|
+
with_mlrun = self._resolve_build_with_mlrun(with_mlrun)
|
|
305
|
+
return self._build_image(
|
|
306
|
+
builder_env=builder_env,
|
|
307
|
+
force_build=force_build,
|
|
308
|
+
mlrun_version_specifier=mlrun_version_specifier,
|
|
309
|
+
show_on_failure=show_on_failure,
|
|
310
|
+
skip_deployed=skip_deployed,
|
|
311
|
+
watch=watch,
|
|
312
|
+
is_kfp=is_kfp,
|
|
313
|
+
with_mlrun=with_mlrun,
|
|
236
314
|
)
|
|
237
315
|
|
|
238
316
|
def _ensure_reverse_proxy_configurations(self):
|
|
@@ -39,11 +39,19 @@ func Handler(context *nuclio.Context, event nuclio.Event) (interface{}, error) {
|
|
|
39
39
|
for k, v := range event.GetHeaders() {
|
|
40
40
|
httpRequest.Header[k] = []string{v.(string)}
|
|
41
41
|
}
|
|
42
|
+
|
|
43
|
+
// populate query params
|
|
44
|
+
query := httpRequest.URL.Query()
|
|
45
|
+
for k, v := range event.GetFields() {
|
|
46
|
+
query.Set(k, v.(string))
|
|
47
|
+
}
|
|
48
|
+
httpRequest.URL.RawQuery = query.Encode()
|
|
49
|
+
|
|
42
50
|
recorder := httptest.NewRecorder()
|
|
43
51
|
reverseProxy.ServeHTTP(recorder, httpRequest)
|
|
44
52
|
|
|
45
53
|
// send request to sidecar
|
|
46
|
-
context.Logger.
|
|
54
|
+
context.Logger.DebugWith("Forwarding request to sidecar", "sidecarUrl", sidecarUrl, "query", httpRequest.URL.Query())
|
|
47
55
|
response := recorder.Result()
|
|
48
56
|
|
|
49
57
|
headers := make(map[string]interface{})
|
|
@@ -345,17 +345,21 @@ class RemoteRuntime(KubeResource):
|
|
|
345
345
|
|
|
346
346
|
git::
|
|
347
347
|
|
|
348
|
-
fn.with_source_archive(
|
|
349
|
-
|
|
350
|
-
|
|
348
|
+
fn.with_source_archive(
|
|
349
|
+
"git://github.com/org/repo#my-branch",
|
|
350
|
+
handler="main:handler",
|
|
351
|
+
workdir="path/inside/repo",
|
|
352
|
+
)
|
|
351
353
|
|
|
352
354
|
s3::
|
|
353
355
|
|
|
354
356
|
fn.spec.nuclio_runtime = "golang"
|
|
355
|
-
fn.with_source_archive(
|
|
357
|
+
fn.with_source_archive(
|
|
358
|
+
"s3://my-bucket/path/in/bucket/my-functions-archive",
|
|
356
359
|
handler="my_func:Handler",
|
|
357
360
|
workdir="path/inside/functions/archive",
|
|
358
|
-
runtime="golang"
|
|
361
|
+
runtime="golang",
|
|
362
|
+
)
|
|
359
363
|
"""
|
|
360
364
|
self.spec.build.source = source
|
|
361
365
|
# update handler in function_handler
|
|
@@ -543,11 +547,16 @@ class RemoteRuntime(KubeResource):
|
|
|
543
547
|
:param project: project name
|
|
544
548
|
:param tag: function tag
|
|
545
549
|
:param verbose: set True for verbose logging
|
|
546
|
-
:param auth_info: service AuthInfo
|
|
550
|
+
:param auth_info: service AuthInfo (deprecated and ignored)
|
|
547
551
|
:param builder_env: env vars dict for source archive config/credentials e.g. builder_env={"GIT_TOKEN": token}
|
|
548
552
|
:param force_build: set True for force building the image
|
|
549
553
|
"""
|
|
550
|
-
|
|
554
|
+
if auth_info:
|
|
555
|
+
# TODO: remove in 1.9.0
|
|
556
|
+
warnings.warn(
|
|
557
|
+
"'auth_info' is deprecated for nuclio runtimes in 1.7.0 and will be removed in 1.9.0",
|
|
558
|
+
FutureWarning,
|
|
559
|
+
)
|
|
551
560
|
|
|
552
561
|
old_http_session = getattr(self, "_http_session", None)
|
|
553
562
|
if old_http_session:
|
|
@@ -570,9 +579,7 @@ class RemoteRuntime(KubeResource):
|
|
|
570
579
|
self._fill_credentials()
|
|
571
580
|
db = self._get_db()
|
|
572
581
|
logger.info("Starting remote function deploy")
|
|
573
|
-
data = db.
|
|
574
|
-
self, False, builder_env=builder_env, force_build=force_build
|
|
575
|
-
)
|
|
582
|
+
data = db.deploy_nuclio_function(func=self, builder_env=builder_env)
|
|
576
583
|
self.status = data["data"].get("status")
|
|
577
584
|
self._update_credentials_from_remote_build(data["data"])
|
|
578
585
|
|
|
@@ -613,7 +620,7 @@ class RemoteRuntime(KubeResource):
|
|
|
613
620
|
int(mlrun.mlconf.httpdb.logs.nuclio.pull_deploy_status_default_interval)
|
|
614
621
|
)
|
|
615
622
|
try:
|
|
616
|
-
text, last_log_timestamp = db.
|
|
623
|
+
text, last_log_timestamp = db.get_nuclio_deploy_status(
|
|
617
624
|
self, last_log_timestamp=last_log_timestamp, verbose=verbose
|
|
618
625
|
)
|
|
619
626
|
except mlrun.db.RunDBError:
|
|
@@ -995,10 +1002,10 @@ class RemoteRuntime(KubeResource):
|
|
|
995
1002
|
]
|
|
996
1003
|
|
|
997
1004
|
if command:
|
|
998
|
-
sidecar["command"] = command
|
|
1005
|
+
sidecar["command"] = mlrun.utils.helpers.as_list(command)
|
|
999
1006
|
|
|
1000
1007
|
if args:
|
|
1001
|
-
sidecar["args"] = args
|
|
1008
|
+
sidecar["args"] = mlrun.utils.helpers.as_list(args)
|
|
1002
1009
|
|
|
1003
1010
|
def _set_sidecar(self, name: str) -> dict:
|
|
1004
1011
|
self.spec.config.setdefault("spec.sidecars", [])
|
mlrun/runtimes/nuclio/serving.py
CHANGED
|
@@ -295,9 +295,7 @@ class ServingRuntime(RemoteRuntime):
|
|
|
295
295
|
"provided class is not a router step, must provide a router class in router topology"
|
|
296
296
|
)
|
|
297
297
|
else:
|
|
298
|
-
step = RouterStep(
|
|
299
|
-
class_name=class_name, class_args=class_args, engine=engine
|
|
300
|
-
)
|
|
298
|
+
step = RouterStep(class_name=class_name, class_args=class_args)
|
|
301
299
|
self.spec.graph = step
|
|
302
300
|
elif topology == StepKinds.flow:
|
|
303
301
|
self.spec.graph = RootFlowStep(engine=engine)
|
|
@@ -367,8 +365,8 @@ class ServingRuntime(RemoteRuntime):
|
|
|
367
365
|
|
|
368
366
|
Example, create a function (from the notebook), add a model class, and deploy::
|
|
369
367
|
|
|
370
|
-
fn = code_to_function(kind=
|
|
371
|
-
fn.add_model(
|
|
368
|
+
fn = code_to_function(kind="serving")
|
|
369
|
+
fn.add_model("boost", model_path, model_class="MyClass", my_arg=5)
|
|
372
370
|
fn.deploy()
|
|
373
371
|
|
|
374
372
|
only works with router topology, for nested topologies (model under router under flow)
|
|
@@ -450,7 +448,7 @@ class ServingRuntime(RemoteRuntime):
|
|
|
450
448
|
|
|
451
449
|
example::
|
|
452
450
|
|
|
453
|
-
fn.add_child_function(
|
|
451
|
+
fn.add_child_function("enrich", "./enrich.ipynb", "mlrun/mlrun")
|
|
454
452
|
|
|
455
453
|
:param name: child function name
|
|
456
454
|
:param url: function/code url, support .py, .ipynb, .yaml extensions
|
|
@@ -491,9 +489,9 @@ class ServingRuntime(RemoteRuntime):
|
|
|
491
489
|
|
|
492
490
|
if (
|
|
493
491
|
stream.path.startswith("kafka://")
|
|
494
|
-
or "
|
|
492
|
+
or "kafka_brokers" in stream.options
|
|
495
493
|
):
|
|
496
|
-
brokers = stream.options.get("
|
|
494
|
+
brokers = stream.options.get("kafka_brokers")
|
|
497
495
|
if brokers:
|
|
498
496
|
brokers = brokers.split(",")
|
|
499
497
|
topic, brokers = parse_kafka_url(stream.path, brokers)
|
|
@@ -731,8 +729,11 @@ class ServingRuntime(RemoteRuntime):
|
|
|
731
729
|
example::
|
|
732
730
|
|
|
733
731
|
serving_fn = mlrun.new_function("serving", image="mlrun/mlrun", kind="serving")
|
|
734
|
-
serving_fn.add_model(
|
|
735
|
-
|
|
732
|
+
serving_fn.add_model(
|
|
733
|
+
"my-classifier",
|
|
734
|
+
model_path=model_path,
|
|
735
|
+
class_name="mlrun.frameworks.sklearn.SklearnModelServer",
|
|
736
|
+
)
|
|
736
737
|
serving_fn.plot(rankdir="LR")
|
|
737
738
|
|
|
738
739
|
:param filename: target filepath for the image (None for the notebook)
|
mlrun/runtimes/pod.py
CHANGED
|
@@ -15,6 +15,7 @@ import copy
|
|
|
15
15
|
import inspect
|
|
16
16
|
import os
|
|
17
17
|
import re
|
|
18
|
+
import time
|
|
18
19
|
import typing
|
|
19
20
|
from enum import Enum
|
|
20
21
|
|
|
@@ -1277,9 +1278,9 @@ class KubeResource(BaseRuntime):
|
|
|
1277
1278
|
from kubernetes import client as k8s_client
|
|
1278
1279
|
|
|
1279
1280
|
security_context = k8s_client.V1SecurityContext(
|
|
1280
|
-
|
|
1281
|
-
|
|
1282
|
-
|
|
1281
|
+
run_as_user=1000,
|
|
1282
|
+
run_as_group=3000,
|
|
1283
|
+
)
|
|
1283
1284
|
function.with_security_context(security_context)
|
|
1284
1285
|
|
|
1285
1286
|
More info:
|
|
@@ -1338,6 +1339,150 @@ class KubeResource(BaseRuntime):
|
|
|
1338
1339
|
|
|
1339
1340
|
self.spec.validate_service_account(allowed_service_accounts)
|
|
1340
1341
|
|
|
1342
|
+
def _configure_mlrun_build_with_source(
|
|
1343
|
+
self, source, workdir=None, handler=None, pull_at_runtime=True, target_dir=None
|
|
1344
|
+
):
|
|
1345
|
+
mlrun.utils.helpers.validate_builder_source(source, pull_at_runtime, workdir)
|
|
1346
|
+
|
|
1347
|
+
self.spec.build.source = source
|
|
1348
|
+
if handler:
|
|
1349
|
+
self.spec.default_handler = handler
|
|
1350
|
+
if workdir:
|
|
1351
|
+
self.spec.workdir = workdir
|
|
1352
|
+
if target_dir:
|
|
1353
|
+
self.spec.build.source_code_target_dir = target_dir
|
|
1354
|
+
|
|
1355
|
+
self.spec.build.load_source_on_run = pull_at_runtime
|
|
1356
|
+
if (
|
|
1357
|
+
self.spec.build.base_image
|
|
1358
|
+
and not self.spec.build.commands
|
|
1359
|
+
and pull_at_runtime
|
|
1360
|
+
and not self.spec.image
|
|
1361
|
+
):
|
|
1362
|
+
# if we load source from repo and don't need a full build use the base_image as the image
|
|
1363
|
+
self.spec.image = self.spec.build.base_image
|
|
1364
|
+
elif not pull_at_runtime:
|
|
1365
|
+
# clear the image so build will not be skipped
|
|
1366
|
+
self.spec.build.base_image = self.spec.build.base_image or self.spec.image
|
|
1367
|
+
self.spec.image = ""
|
|
1368
|
+
|
|
1369
|
+
def _resolve_build_with_mlrun(self, with_mlrun: typing.Optional[bool] = None):
|
|
1370
|
+
build = self.spec.build
|
|
1371
|
+
if with_mlrun is None:
|
|
1372
|
+
if build.with_mlrun is not None:
|
|
1373
|
+
with_mlrun = build.with_mlrun
|
|
1374
|
+
else:
|
|
1375
|
+
with_mlrun = build.base_image and not (
|
|
1376
|
+
build.base_image.startswith("mlrun/")
|
|
1377
|
+
or "/mlrun/" in build.base_image
|
|
1378
|
+
)
|
|
1379
|
+
if (
|
|
1380
|
+
not build.source
|
|
1381
|
+
and not build.commands
|
|
1382
|
+
and not build.requirements
|
|
1383
|
+
and not build.extra
|
|
1384
|
+
and with_mlrun
|
|
1385
|
+
):
|
|
1386
|
+
logger.info(
|
|
1387
|
+
"Running build to add mlrun package, set "
|
|
1388
|
+
"with_mlrun=False to skip if its already in the image"
|
|
1389
|
+
)
|
|
1390
|
+
return with_mlrun
|
|
1391
|
+
|
|
1392
|
+
def _build_image(
|
|
1393
|
+
self,
|
|
1394
|
+
builder_env,
|
|
1395
|
+
force_build,
|
|
1396
|
+
mlrun_version_specifier,
|
|
1397
|
+
show_on_failure,
|
|
1398
|
+
skip_deployed,
|
|
1399
|
+
watch,
|
|
1400
|
+
is_kfp,
|
|
1401
|
+
with_mlrun,
|
|
1402
|
+
):
|
|
1403
|
+
# When we're in pipelines context we must watch otherwise the pipelines pod will exit before the operation
|
|
1404
|
+
# is actually done. (when a pipelines pod exits, the pipeline step marked as done)
|
|
1405
|
+
if is_kfp:
|
|
1406
|
+
watch = True
|
|
1407
|
+
|
|
1408
|
+
db = self._get_db()
|
|
1409
|
+
data = db.remote_builder(
|
|
1410
|
+
self,
|
|
1411
|
+
with_mlrun,
|
|
1412
|
+
mlrun_version_specifier,
|
|
1413
|
+
skip_deployed,
|
|
1414
|
+
builder_env=builder_env,
|
|
1415
|
+
force_build=force_build,
|
|
1416
|
+
)
|
|
1417
|
+
self.status = data["data"].get("status", None)
|
|
1418
|
+
self.spec.image = mlrun.utils.get_in(
|
|
1419
|
+
data, "data.spec.image"
|
|
1420
|
+
) or mlrun.utils.get_in(data, "data.spec.build.image")
|
|
1421
|
+
self.spec.build.base_image = self.spec.build.base_image or mlrun.utils.get_in(
|
|
1422
|
+
data, "data.spec.build.base_image"
|
|
1423
|
+
)
|
|
1424
|
+
# Get the source target dir in case it was enriched due to loading source
|
|
1425
|
+
self.spec.build.source_code_target_dir = mlrun.utils.get_in(
|
|
1426
|
+
data, "data.spec.build.source_code_target_dir"
|
|
1427
|
+
) or mlrun.utils.get_in(data, "data.spec.clone_target_dir")
|
|
1428
|
+
ready = data.get("ready", False)
|
|
1429
|
+
if not ready:
|
|
1430
|
+
logger.info(
|
|
1431
|
+
f"Started building image: {data.get('data', {}).get('spec', {}).get('build', {}).get('image')}"
|
|
1432
|
+
)
|
|
1433
|
+
if watch and not ready:
|
|
1434
|
+
state = self._build_watch(
|
|
1435
|
+
watch=watch,
|
|
1436
|
+
show_on_failure=show_on_failure,
|
|
1437
|
+
)
|
|
1438
|
+
ready = state == "ready"
|
|
1439
|
+
self.status.state = state
|
|
1440
|
+
|
|
1441
|
+
if watch and not ready:
|
|
1442
|
+
raise mlrun.errors.MLRunRuntimeError("Deploy failed")
|
|
1443
|
+
return ready
|
|
1444
|
+
|
|
1445
|
+
def _build_watch(
|
|
1446
|
+
self,
|
|
1447
|
+
watch: bool = True,
|
|
1448
|
+
logs: bool = True,
|
|
1449
|
+
show_on_failure: bool = False,
|
|
1450
|
+
):
|
|
1451
|
+
db = self._get_db()
|
|
1452
|
+
offset = 0
|
|
1453
|
+
try:
|
|
1454
|
+
text, _ = db.get_builder_status(self, 0, logs=logs)
|
|
1455
|
+
except mlrun.db.RunDBError:
|
|
1456
|
+
raise ValueError("function or build process not found")
|
|
1457
|
+
|
|
1458
|
+
def print_log(text):
|
|
1459
|
+
if text and (
|
|
1460
|
+
not show_on_failure
|
|
1461
|
+
or self.status.state == mlrun.common.schemas.FunctionState.error
|
|
1462
|
+
):
|
|
1463
|
+
print(text, end="")
|
|
1464
|
+
|
|
1465
|
+
print_log(text)
|
|
1466
|
+
offset += len(text)
|
|
1467
|
+
if watch:
|
|
1468
|
+
while self.status.state in [
|
|
1469
|
+
mlrun.common.schemas.FunctionState.pending,
|
|
1470
|
+
mlrun.common.schemas.FunctionState.running,
|
|
1471
|
+
]:
|
|
1472
|
+
time.sleep(2)
|
|
1473
|
+
if show_on_failure:
|
|
1474
|
+
text = ""
|
|
1475
|
+
db.get_builder_status(self, 0, logs=False)
|
|
1476
|
+
if self.status.state == mlrun.common.schemas.FunctionState.error:
|
|
1477
|
+
# re-read the full log on failure
|
|
1478
|
+
text, _ = db.get_builder_status(self, offset, logs=logs)
|
|
1479
|
+
else:
|
|
1480
|
+
text, _ = db.get_builder_status(self, offset, logs=logs)
|
|
1481
|
+
print_log(text)
|
|
1482
|
+
offset += len(text)
|
|
1483
|
+
|
|
1484
|
+
return self.status.state
|
|
1485
|
+
|
|
1341
1486
|
|
|
1342
1487
|
def _resolve_if_type_sanitized(attribute_name, attribute):
|
|
1343
1488
|
attribute_config = sanitized_attributes[attribute_name]
|
mlrun/runtimes/utils.py
CHANGED
|
@@ -417,34 +417,6 @@ def get_func_selector(project, name=None, tag=None):
|
|
|
417
417
|
return s
|
|
418
418
|
|
|
419
419
|
|
|
420
|
-
class k8s_resource:
|
|
421
|
-
kind = ""
|
|
422
|
-
per_run = False
|
|
423
|
-
per_function = False
|
|
424
|
-
k8client = None
|
|
425
|
-
|
|
426
|
-
def deploy_function(self, function):
|
|
427
|
-
pass
|
|
428
|
-
|
|
429
|
-
def release_function(self, function):
|
|
430
|
-
pass
|
|
431
|
-
|
|
432
|
-
def submit_run(self, function, runobj):
|
|
433
|
-
pass
|
|
434
|
-
|
|
435
|
-
def get_object(self, name, namespace=None):
|
|
436
|
-
return None
|
|
437
|
-
|
|
438
|
-
def get_status(self, name, namespace=None):
|
|
439
|
-
return None
|
|
440
|
-
|
|
441
|
-
def del_object(self, name, namespace=None):
|
|
442
|
-
pass
|
|
443
|
-
|
|
444
|
-
def get_pods(self, name, namespace=None, master=False):
|
|
445
|
-
return {}
|
|
446
|
-
|
|
447
|
-
|
|
448
420
|
def enrich_function_from_dict(function, function_dict):
|
|
449
421
|
override_function = mlrun.new_function(runtime=function_dict, kind=function.kind)
|
|
450
422
|
for attribute in [
|
mlrun/secrets.py
CHANGED
|
@@ -163,15 +163,19 @@ def get_secret_or_env(
|
|
|
163
163
|
|
|
164
164
|
Example::
|
|
165
165
|
|
|
166
|
-
secrets = {
|
|
166
|
+
secrets = {"KEY1": "VALUE1"}
|
|
167
167
|
secret = get_secret_or_env("KEY1", secret_provider=secrets)
|
|
168
168
|
|
|
169
|
+
|
|
169
170
|
# Using a function to retrieve a secret
|
|
170
171
|
def my_secret_provider(key):
|
|
171
172
|
# some internal logic to retrieve secret
|
|
172
173
|
return value
|
|
173
174
|
|
|
174
|
-
|
|
175
|
+
|
|
176
|
+
secret = get_secret_or_env(
|
|
177
|
+
"KEY1", secret_provider=my_secret_provider, default="TOO-MANY-SECRETS"
|
|
178
|
+
)
|
|
175
179
|
|
|
176
180
|
:param key: Secret key to look for
|
|
177
181
|
:param secret_provider: Dictionary, callable or `SecretsStore` to extract the secret value from. If using a
|
mlrun/serving/remote.py
CHANGED
|
@@ -172,8 +172,7 @@ class RemoteStep(storey.SendToHttp):
|
|
|
172
172
|
if not self._session:
|
|
173
173
|
self._session = mlrun.utils.HTTPSessionWithRetry(
|
|
174
174
|
self.retries,
|
|
175
|
-
self.backoff_factor
|
|
176
|
-
or mlrun.config.config.http_retry_defaults.backoff_factor,
|
|
175
|
+
self.backoff_factor or mlrun.mlconf.http_retry_defaults.backoff_factor,
|
|
177
176
|
retry_on_exception=False,
|
|
178
177
|
retry_on_status=self.retries > 0,
|
|
179
178
|
retry_on_post=True,
|
|
@@ -185,7 +184,7 @@ class RemoteStep(storey.SendToHttp):
|
|
|
185
184
|
resp = self._session.request(
|
|
186
185
|
method,
|
|
187
186
|
url,
|
|
188
|
-
verify=mlrun.
|
|
187
|
+
verify=mlrun.mlconf.httpdb.http.verify,
|
|
189
188
|
headers=headers,
|
|
190
189
|
data=body,
|
|
191
190
|
timeout=self.timeout,
|
mlrun/serving/routers.py
CHANGED
|
@@ -28,6 +28,7 @@ import numpy as np
|
|
|
28
28
|
import mlrun
|
|
29
29
|
import mlrun.common.model_monitoring
|
|
30
30
|
import mlrun.common.schemas.model_monitoring
|
|
31
|
+
from mlrun.errors import err_to_str
|
|
31
32
|
from mlrun.utils import logger, now_date
|
|
32
33
|
|
|
33
34
|
from ..common.helpers import parse_versioned_object_uri
|
|
@@ -271,7 +272,9 @@ class ParallelRun(BaseModelRouter):
|
|
|
271
272
|
fn = mlrun.new_function("parallel", kind="serving")
|
|
272
273
|
graph = fn.set_topology(
|
|
273
274
|
"router",
|
|
274
|
-
mlrun.serving.routers.ParallelRun(
|
|
275
|
+
mlrun.serving.routers.ParallelRun(
|
|
276
|
+
extend_event=True, executor_type=executor
|
|
277
|
+
),
|
|
275
278
|
)
|
|
276
279
|
graph.add_route("child1", class_name="Cls1")
|
|
277
280
|
graph.add_route("child2", class_name="Cls2", my_arg={"c": 7})
|
|
@@ -1013,7 +1016,7 @@ def _init_endpoint_record(
|
|
|
1013
1016
|
graph_server.function_uri
|
|
1014
1017
|
)
|
|
1015
1018
|
except Exception as e:
|
|
1016
|
-
logger.error("Failed to parse function URI", exc=e)
|
|
1019
|
+
logger.error("Failed to parse function URI", exc=err_to_str(e))
|
|
1017
1020
|
return None
|
|
1018
1021
|
|
|
1019
1022
|
# Generating version model value based on the model name and model version
|
|
@@ -1089,12 +1092,12 @@ def _init_endpoint_record(
|
|
|
1089
1092
|
except Exception as exc:
|
|
1090
1093
|
logger.warning(
|
|
1091
1094
|
"Failed creating model endpoint record",
|
|
1092
|
-
exc=exc,
|
|
1095
|
+
exc=err_to_str(exc),
|
|
1093
1096
|
traceback=traceback.format_exc(),
|
|
1094
1097
|
)
|
|
1095
1098
|
|
|
1096
1099
|
except Exception as e:
|
|
1097
|
-
logger.error("Failed to retrieve model endpoint object", exc=e)
|
|
1100
|
+
logger.error("Failed to retrieve model endpoint object", exc=err_to_str(e))
|
|
1098
1101
|
|
|
1099
1102
|
return endpoint_uid
|
|
1100
1103
|
|
mlrun/serving/server.py
CHANGED
|
@@ -53,7 +53,7 @@ class _StreamContext:
|
|
|
53
53
|
Initialize _StreamContext object.
|
|
54
54
|
:param enabled: A boolean indication for applying the stream context
|
|
55
55
|
:param parameters: Dictionary of optional parameters, such as `log_stream` and `stream_args`. Note that these
|
|
56
|
-
parameters might be relevant to the output source such as `
|
|
56
|
+
parameters might be relevant to the output source such as `kafka_brokers` if
|
|
57
57
|
the output source is from type Kafka.
|
|
58
58
|
:param function_uri: Full value of the function uri, usually it's <project-name>/<function-name>
|
|
59
59
|
"""
|
mlrun/serving/states.py
CHANGED
|
@@ -17,6 +17,7 @@ __all__ = ["TaskStep", "RouterStep", "RootFlowStep", "ErrorStep"]
|
|
|
17
17
|
import os
|
|
18
18
|
import pathlib
|
|
19
19
|
import traceback
|
|
20
|
+
import warnings
|
|
20
21
|
from copy import copy, deepcopy
|
|
21
22
|
from inspect import getfullargspec, signature
|
|
22
23
|
from typing import Union
|
|
@@ -590,7 +591,7 @@ class RouterStep(TaskStep):
|
|
|
590
591
|
|
|
591
592
|
kind = "router"
|
|
592
593
|
default_shape = "doubleoctagon"
|
|
593
|
-
_dict_fields = _task_step_fields + ["routes"
|
|
594
|
+
_dict_fields = _task_step_fields + ["routes"]
|
|
594
595
|
_default_class = "mlrun.serving.ModelRouter"
|
|
595
596
|
|
|
596
597
|
def __init__(
|
|
@@ -603,7 +604,6 @@ class RouterStep(TaskStep):
|
|
|
603
604
|
function: str = None,
|
|
604
605
|
input_path: str = None,
|
|
605
606
|
result_path: str = None,
|
|
606
|
-
engine: str = None,
|
|
607
607
|
):
|
|
608
608
|
super().__init__(
|
|
609
609
|
class_name,
|
|
@@ -616,8 +616,6 @@ class RouterStep(TaskStep):
|
|
|
616
616
|
)
|
|
617
617
|
self._routes: ObjectDict = None
|
|
618
618
|
self.routes = routes
|
|
619
|
-
self.engine = engine
|
|
620
|
-
self._controller = None
|
|
621
619
|
|
|
622
620
|
def get_children(self):
|
|
623
621
|
"""get child steps (routes)"""
|
|
@@ -687,33 +685,6 @@ class RouterStep(TaskStep):
|
|
|
687
685
|
self._set_error_handler()
|
|
688
686
|
self._post_init(mode)
|
|
689
687
|
|
|
690
|
-
if self.engine == "async":
|
|
691
|
-
self._build_async_flow()
|
|
692
|
-
self._run_async_flow()
|
|
693
|
-
|
|
694
|
-
def _build_async_flow(self):
|
|
695
|
-
"""initialize and build the async/storey DAG"""
|
|
696
|
-
|
|
697
|
-
self.respond()
|
|
698
|
-
source, self._wait_for_result = _init_async_objects(self.context, [self])
|
|
699
|
-
source.to(self.async_object)
|
|
700
|
-
|
|
701
|
-
self._async_flow = source
|
|
702
|
-
|
|
703
|
-
def _run_async_flow(self):
|
|
704
|
-
self._controller = self._async_flow.run()
|
|
705
|
-
|
|
706
|
-
def run(self, event, *args, **kwargs):
|
|
707
|
-
if self._controller:
|
|
708
|
-
# async flow (using storey)
|
|
709
|
-
event._awaitable_result = None
|
|
710
|
-
resp = self._controller.emit(
|
|
711
|
-
event, return_awaitable_result=self._wait_for_result
|
|
712
|
-
)
|
|
713
|
-
return resp.await_result()
|
|
714
|
-
|
|
715
|
-
return super().run(event, *args, **kwargs)
|
|
716
|
-
|
|
717
688
|
def __getitem__(self, name):
|
|
718
689
|
return self._routes[name]
|
|
719
690
|
|
|
@@ -1524,21 +1495,26 @@ def _init_async_objects(context, steps):
|
|
|
1524
1495
|
endpoint = None
|
|
1525
1496
|
options = {}
|
|
1526
1497
|
options.update(step.options)
|
|
1527
|
-
|
|
1528
|
-
|
|
1529
|
-
|
|
1530
|
-
|
|
1531
|
-
|
|
1532
|
-
|
|
1498
|
+
|
|
1499
|
+
kafka_brokers = options.pop("kafka_brokers", None)
|
|
1500
|
+
if not kafka_brokers and "kafka_bootstrap_servers" in options:
|
|
1501
|
+
kafka_brokers = options.pop("kafka_bootstrap_servers")
|
|
1502
|
+
warnings.warn(
|
|
1503
|
+
"The 'kafka_bootstrap_servers' parameter is deprecated and will be removed in "
|
|
1504
|
+
"1.9.0. Please pass the 'kafka_brokers' parameter instead.",
|
|
1505
|
+
FutureWarning,
|
|
1533
1506
|
)
|
|
1534
1507
|
|
|
1508
|
+
if stream_path.startswith("kafka://") or kafka_brokers:
|
|
1509
|
+
topic, brokers = parse_kafka_url(stream_path, kafka_brokers)
|
|
1510
|
+
|
|
1535
1511
|
kafka_producer_options = options.pop(
|
|
1536
1512
|
"kafka_producer_options", None
|
|
1537
1513
|
)
|
|
1538
1514
|
|
|
1539
1515
|
step._async_object = storey.KafkaTarget(
|
|
1540
1516
|
topic=topic,
|
|
1541
|
-
|
|
1517
|
+
brokers=brokers,
|
|
1542
1518
|
producer_options=kafka_producer_options,
|
|
1543
1519
|
context=context,
|
|
1544
1520
|
**options,
|