mlrun 1.7.0rc35__py3-none-any.whl → 1.7.0rc37__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mlrun might be problematic. Click here for more details.
- mlrun/alerts/alert.py +63 -0
- mlrun/common/schemas/alert.py +2 -2
- mlrun/common/schemas/api_gateway.py +1 -1
- mlrun/common/schemas/notification.py +23 -4
- mlrun/config.py +1 -0
- mlrun/datastore/s3.py +8 -1
- mlrun/datastore/spark_utils.py +30 -0
- mlrun/feature_store/api.py +19 -1
- mlrun/feature_store/steps.py +8 -0
- mlrun/model_monitoring/api.py +24 -7
- mlrun/model_monitoring/applications/_application_steps.py +12 -3
- mlrun/model_monitoring/applications/base.py +8 -0
- mlrun/model_monitoring/applications/evidently_base.py +23 -22
- mlrun/model_monitoring/controller.py +5 -1
- mlrun/model_monitoring/db/stores/sqldb/models/mysql.py +14 -1
- mlrun/model_monitoring/db/stores/v3io_kv/kv_store.py +1 -1
- mlrun/model_monitoring/db/tsdb/base.py +20 -11
- mlrun/model_monitoring/helpers.py +1 -2
- mlrun/model_monitoring/stream_processing.py +20 -0
- mlrun/model_monitoring/writer.py +4 -1
- mlrun/projects/operations.py +4 -0
- mlrun/projects/project.py +4 -0
- mlrun/runtimes/base.py +3 -0
- mlrun/runtimes/nuclio/api_gateway.py +1 -1
- mlrun/runtimes/nuclio/application/application.py +53 -12
- mlrun/runtimes/nuclio/function.py +5 -1
- mlrun/runtimes/sparkjob/spark3job.py +4 -7
- mlrun/runtimes/utils.py +18 -0
- mlrun/serving/routers.py +1 -4
- mlrun/serving/server.py +4 -7
- mlrun/serving/states.py +8 -3
- mlrun/serving/v2_serving.py +9 -9
- mlrun/utils/db.py +15 -0
- mlrun/utils/http.py +1 -1
- mlrun/utils/version/version.json +2 -2
- {mlrun-1.7.0rc35.dist-info → mlrun-1.7.0rc37.dist-info}/METADATA +6 -6
- {mlrun-1.7.0rc35.dist-info → mlrun-1.7.0rc37.dist-info}/RECORD +41 -41
- {mlrun-1.7.0rc35.dist-info → mlrun-1.7.0rc37.dist-info}/LICENSE +0 -0
- {mlrun-1.7.0rc35.dist-info → mlrun-1.7.0rc37.dist-info}/WHEEL +0 -0
- {mlrun-1.7.0rc35.dist-info → mlrun-1.7.0rc37.dist-info}/entry_points.txt +0 -0
- {mlrun-1.7.0rc35.dist-info → mlrun-1.7.0rc37.dist-info}/top_level.txt +0 -0
mlrun/model_monitoring/writer.py
CHANGED
|
@@ -130,7 +130,6 @@ class ModelMonitoringWriter(StepToDict):
|
|
|
130
130
|
project_name: str,
|
|
131
131
|
result_kind: int,
|
|
132
132
|
) -> None:
|
|
133
|
-
logger.info("Sending an event")
|
|
134
133
|
entity = mlrun.common.schemas.alert.EventEntities(
|
|
135
134
|
kind=alert_objects.EventEntityKind.MODEL_ENDPOINT_RESULT,
|
|
136
135
|
project=project_name,
|
|
@@ -146,7 +145,9 @@ class ModelMonitoringWriter(StepToDict):
|
|
|
146
145
|
entity=entity,
|
|
147
146
|
value_dict=event_value,
|
|
148
147
|
)
|
|
148
|
+
logger.info("Sending a drift event")
|
|
149
149
|
mlrun.get_run_db().generate_event(event_kind, event_data)
|
|
150
|
+
logger.info("Drift event sent successfully")
|
|
150
151
|
|
|
151
152
|
@staticmethod
|
|
152
153
|
def _generate_alert_event_kind(
|
|
@@ -261,3 +262,5 @@ class ModelMonitoringWriter(StepToDict):
|
|
|
261
262
|
endpoint_id=endpoint_id,
|
|
262
263
|
attributes=json.loads(event[ResultData.RESULT_EXTRA_DATA]),
|
|
263
264
|
)
|
|
265
|
+
|
|
266
|
+
logger.info("Model monitoring writer finished handling event")
|
mlrun/projects/operations.py
CHANGED
|
@@ -187,6 +187,10 @@ def run_function(
|
|
|
187
187
|
task.spec.verbose = task.spec.verbose or verbose
|
|
188
188
|
|
|
189
189
|
if engine == "kfp":
|
|
190
|
+
if schedule:
|
|
191
|
+
raise mlrun.errors.MLRunInvalidArgumentError(
|
|
192
|
+
"Scheduling job is not supported when running a workflow with kfp engine."
|
|
193
|
+
)
|
|
190
194
|
return function.as_step(
|
|
191
195
|
name=name, runspec=task, workdir=workdir, outputs=outputs, labels=labels
|
|
192
196
|
)
|
mlrun/projects/project.py
CHANGED
|
@@ -2967,6 +2967,7 @@ class MlrunProject(ModelObj):
|
|
|
2967
2967
|
source: str = None,
|
|
2968
2968
|
cleanup_ttl: int = None,
|
|
2969
2969
|
notifications: list[mlrun.model.Notification] = None,
|
|
2970
|
+
send_start_notification: bool = True,
|
|
2970
2971
|
) -> _PipelineRunStatus:
|
|
2971
2972
|
"""Run a workflow using kubeflow pipelines
|
|
2972
2973
|
|
|
@@ -3003,6 +3004,8 @@ class MlrunProject(ModelObj):
|
|
|
3003
3004
|
workflow and all its resources are deleted)
|
|
3004
3005
|
:param notifications:
|
|
3005
3006
|
List of notifications to send for workflow completion
|
|
3007
|
+
:param send_start_notification:
|
|
3008
|
+
Send a notification when the workflow starts
|
|
3006
3009
|
|
|
3007
3010
|
:returns: ~py:class:`~mlrun.projects.pipelines._PipelineRunStatus` instance
|
|
3008
3011
|
"""
|
|
@@ -3080,6 +3083,7 @@ class MlrunProject(ModelObj):
|
|
|
3080
3083
|
namespace=namespace,
|
|
3081
3084
|
source=source,
|
|
3082
3085
|
notifications=notifications,
|
|
3086
|
+
send_start_notification=send_start_notification,
|
|
3083
3087
|
)
|
|
3084
3088
|
# run is None when scheduling
|
|
3085
3089
|
if run and run.state == mlrun_pipelines.common.models.RunStatuses.failed:
|
mlrun/runtimes/base.py
CHANGED
|
@@ -657,7 +657,7 @@ class APIGateway(ModelObj):
|
|
|
657
657
|
host = self.spec.host
|
|
658
658
|
if not self.spec.host.startswith("http"):
|
|
659
659
|
host = f"https://{self.spec.host}"
|
|
660
|
-
return urljoin(host, self.spec.path)
|
|
660
|
+
return urljoin(host, self.spec.path).rstrip("/")
|
|
661
661
|
|
|
662
662
|
@staticmethod
|
|
663
663
|
def _generate_basic_auth(username: str, password: str):
|
|
@@ -18,6 +18,7 @@ import nuclio
|
|
|
18
18
|
|
|
19
19
|
import mlrun.common.schemas as schemas
|
|
20
20
|
import mlrun.errors
|
|
21
|
+
import mlrun.run
|
|
21
22
|
from mlrun.common.runtimes.constants import NuclioIngressAddTemplatedIngressModes
|
|
22
23
|
from mlrun.runtimes import RemoteRuntime
|
|
23
24
|
from mlrun.runtimes.nuclio import min_nuclio_versions
|
|
@@ -174,6 +175,7 @@ class ApplicationStatus(NuclioStatus):
|
|
|
174
175
|
|
|
175
176
|
class ApplicationRuntime(RemoteRuntime):
|
|
176
177
|
kind = "application"
|
|
178
|
+
reverse_proxy_image = None
|
|
177
179
|
|
|
178
180
|
@min_nuclio_versions("1.13.1")
|
|
179
181
|
def __init__(self, spec=None, metadata=None):
|
|
@@ -306,10 +308,11 @@ class ApplicationRuntime(RemoteRuntime):
|
|
|
306
308
|
show_on_failure=show_on_failure,
|
|
307
309
|
)
|
|
308
310
|
|
|
309
|
-
self
|
|
311
|
+
# This is a class method that accepts a function instance, so we pass self as the function instance
|
|
312
|
+
self._ensure_reverse_proxy_configurations(self)
|
|
310
313
|
self._configure_application_sidecar()
|
|
311
314
|
|
|
312
|
-
#
|
|
315
|
+
# We only allow accessing the application via the API Gateway
|
|
313
316
|
name_tag = tag or self.metadata.tag
|
|
314
317
|
self.status.api_gateway_name = (
|
|
315
318
|
f"{self.metadata.name}-{name_tag}" if name_tag else self.metadata.name
|
|
@@ -391,8 +394,8 @@ class ApplicationRuntime(RemoteRuntime):
|
|
|
391
394
|
"main:Handler",
|
|
392
395
|
)
|
|
393
396
|
|
|
394
|
-
@
|
|
395
|
-
def get_filename_and_handler(
|
|
397
|
+
@staticmethod
|
|
398
|
+
def get_filename_and_handler() -> (str, str):
|
|
396
399
|
reverse_proxy_file_path = pathlib.Path(__file__).parent / "reverse_proxy.go"
|
|
397
400
|
return str(reverse_proxy_file_path), "Handler"
|
|
398
401
|
|
|
@@ -488,6 +491,39 @@ class ApplicationRuntime(RemoteRuntime):
|
|
|
488
491
|
**http_client_kwargs,
|
|
489
492
|
)
|
|
490
493
|
|
|
494
|
+
@classmethod
|
|
495
|
+
def deploy_reverse_proxy_image(cls):
|
|
496
|
+
"""
|
|
497
|
+
Build the reverse proxy image and save it.
|
|
498
|
+
The reverse proxy image is used to route requests to the application sidecar.
|
|
499
|
+
This is useful when you want to decrease build time by building the application image only once.
|
|
500
|
+
|
|
501
|
+
:param use_cache: Use the cache when building the image
|
|
502
|
+
"""
|
|
503
|
+
# create a function that includes only the reverse proxy, without the application
|
|
504
|
+
|
|
505
|
+
reverse_proxy_func = mlrun.run.new_function(
|
|
506
|
+
name="reverse-proxy-temp", kind="remote"
|
|
507
|
+
)
|
|
508
|
+
# default max replicas is 4, we only need one replica for the reverse proxy
|
|
509
|
+
reverse_proxy_func.spec.max_replicas = 1
|
|
510
|
+
|
|
511
|
+
# the reverse proxy image should not be based on another image
|
|
512
|
+
reverse_proxy_func.set_config("spec.build.baseImage", None)
|
|
513
|
+
reverse_proxy_func.spec.image = ""
|
|
514
|
+
reverse_proxy_func.spec.build.base_image = ""
|
|
515
|
+
|
|
516
|
+
cls._ensure_reverse_proxy_configurations(reverse_proxy_func)
|
|
517
|
+
reverse_proxy_func.deploy()
|
|
518
|
+
|
|
519
|
+
# save the created container image
|
|
520
|
+
cls.reverse_proxy_image = reverse_proxy_func.status.container_image
|
|
521
|
+
|
|
522
|
+
# delete the function to avoid cluttering the project
|
|
523
|
+
mlrun.get_run_db().delete_function(
|
|
524
|
+
reverse_proxy_func.metadata.name, reverse_proxy_func.metadata.project
|
|
525
|
+
)
|
|
526
|
+
|
|
491
527
|
def _run(self, runobj: "mlrun.RunObject", execution):
|
|
492
528
|
raise mlrun.runtimes.RunError(
|
|
493
529
|
"Application runtime .run() is not yet supported. Use .invoke() instead."
|
|
@@ -527,21 +563,22 @@ class ApplicationRuntime(RemoteRuntime):
|
|
|
527
563
|
with_mlrun=with_mlrun,
|
|
528
564
|
)
|
|
529
565
|
|
|
530
|
-
|
|
531
|
-
|
|
566
|
+
@staticmethod
|
|
567
|
+
def _ensure_reverse_proxy_configurations(function: RemoteRuntime):
|
|
568
|
+
if function.spec.build.functionSourceCode or function.status.container_image:
|
|
532
569
|
return
|
|
533
570
|
|
|
534
571
|
filename, handler = ApplicationRuntime.get_filename_and_handler()
|
|
535
572
|
name, spec, code = nuclio.build_file(
|
|
536
573
|
filename,
|
|
537
|
-
name=
|
|
574
|
+
name=function.metadata.name,
|
|
538
575
|
handler=handler,
|
|
539
576
|
)
|
|
540
|
-
|
|
541
|
-
|
|
577
|
+
function.spec.function_handler = mlrun.utils.get_in(spec, "spec.handler")
|
|
578
|
+
function.spec.build.functionSourceCode = mlrun.utils.get_in(
|
|
542
579
|
spec, "spec.build.functionSourceCode"
|
|
543
580
|
)
|
|
544
|
-
|
|
581
|
+
function.spec.nuclio_runtime = mlrun.utils.get_in(spec, "spec.runtime")
|
|
545
582
|
|
|
546
583
|
def _configure_application_sidecar(self):
|
|
547
584
|
# Save the application image in the status to allow overriding it with the reverse proxy entry point
|
|
@@ -552,8 +589,12 @@ class ApplicationRuntime(RemoteRuntime):
|
|
|
552
589
|
self.status.application_image = self.spec.image
|
|
553
590
|
self.spec.image = ""
|
|
554
591
|
|
|
555
|
-
if
|
|
556
|
-
|
|
592
|
+
# reuse the reverse proxy image if it was built before
|
|
593
|
+
if (
|
|
594
|
+
reverse_proxy_image := self.status.container_image
|
|
595
|
+
or self.reverse_proxy_image
|
|
596
|
+
):
|
|
597
|
+
self.from_image(reverse_proxy_image)
|
|
557
598
|
|
|
558
599
|
self.status.sidecar_name = f"{self.metadata.name}-sidecar"
|
|
559
600
|
self.with_sidecar(
|
|
@@ -689,7 +689,7 @@ class RemoteRuntime(KubeResource):
|
|
|
689
689
|
"State thresholds do not apply for nuclio as it has its own function pods healthiness monitoring"
|
|
690
690
|
)
|
|
691
691
|
|
|
692
|
-
@min_nuclio_versions("1.
|
|
692
|
+
@min_nuclio_versions("1.13.1")
|
|
693
693
|
def disable_default_http_trigger(
|
|
694
694
|
self,
|
|
695
695
|
):
|
|
@@ -707,6 +707,10 @@ class RemoteRuntime(KubeResource):
|
|
|
707
707
|
"""
|
|
708
708
|
self.spec.disable_default_http_trigger = False
|
|
709
709
|
|
|
710
|
+
def skip_image_enrichment(self):
|
|
711
|
+
# make sure the API does not enrich the base image if the function is not a python function
|
|
712
|
+
return self.spec.nuclio_runtime and "python" not in self.spec.nuclio_runtime
|
|
713
|
+
|
|
710
714
|
def _get_state(
|
|
711
715
|
self,
|
|
712
716
|
dashboard="",
|
|
@@ -505,13 +505,10 @@ class Spark3Runtime(KubejobRuntime):
|
|
|
505
505
|
raise NotImplementedError(
|
|
506
506
|
"Setting node name is not supported for spark runtime"
|
|
507
507
|
)
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
"Setting affinity is not supported for spark runtime"
|
|
513
|
-
)
|
|
514
|
-
super().with_node_selection(node_name, node_selector, affinity, tolerations)
|
|
508
|
+
self.with_driver_node_selection(node_name, node_selector, affinity, tolerations)
|
|
509
|
+
self.with_executor_node_selection(
|
|
510
|
+
node_name, node_selector, affinity, tolerations
|
|
511
|
+
)
|
|
515
512
|
|
|
516
513
|
def with_driver_node_selection(
|
|
517
514
|
self,
|
mlrun/runtimes/utils.py
CHANGED
|
@@ -445,3 +445,21 @@ def enrich_run_labels(
|
|
|
445
445
|
if label.value not in labels and enrichment:
|
|
446
446
|
labels[label.value] = enrichment
|
|
447
447
|
return labels
|
|
448
|
+
|
|
449
|
+
|
|
450
|
+
def resolve_node_selectors(
|
|
451
|
+
project_node_selector: dict, instance_node_selector: dict
|
|
452
|
+
) -> dict:
|
|
453
|
+
config_node_selector = mlrun.mlconf.get_default_function_node_selector()
|
|
454
|
+
if project_node_selector or config_node_selector:
|
|
455
|
+
mlrun.utils.logger.debug(
|
|
456
|
+
"Enriching node selector from project and mlrun config",
|
|
457
|
+
project_node_selector=project_node_selector,
|
|
458
|
+
config_node_selector=config_node_selector,
|
|
459
|
+
)
|
|
460
|
+
return mlrun.utils.helpers.merge_dicts_with_precedence(
|
|
461
|
+
config_node_selector,
|
|
462
|
+
project_node_selector,
|
|
463
|
+
instance_node_selector,
|
|
464
|
+
)
|
|
465
|
+
return instance_node_selector
|
mlrun/serving/routers.py
CHANGED
|
@@ -32,7 +32,6 @@ from mlrun.errors import err_to_str
|
|
|
32
32
|
from mlrun.utils import logger, now_date
|
|
33
33
|
|
|
34
34
|
from ..common.helpers import parse_versioned_object_uri
|
|
35
|
-
from ..config import config
|
|
36
35
|
from .server import GraphServer
|
|
37
36
|
from .utils import RouterToDict, _extract_input_data, _update_result_body
|
|
38
37
|
from .v2_serving import _ModelLogPusher
|
|
@@ -1057,9 +1056,7 @@ def _init_endpoint_record(
|
|
|
1057
1056
|
function_uri=graph_server.function_uri,
|
|
1058
1057
|
model=versioned_model_name,
|
|
1059
1058
|
model_class=voting_ensemble.__class__.__name__,
|
|
1060
|
-
stream_path=
|
|
1061
|
-
project=project, kind="stream"
|
|
1062
|
-
),
|
|
1059
|
+
stream_path=voting_ensemble.context.stream.stream_uri,
|
|
1063
1060
|
active=True,
|
|
1064
1061
|
monitoring_mode=mlrun.common.schemas.model_monitoring.ModelMonitoringMode.enabled,
|
|
1065
1062
|
),
|
mlrun/serving/server.py
CHANGED
|
@@ -38,10 +38,7 @@ from ..errors import MLRunInvalidArgumentError
|
|
|
38
38
|
from ..model import ModelObj
|
|
39
39
|
from ..utils import get_caller_globals
|
|
40
40
|
from .states import RootFlowStep, RouterStep, get_function, graph_root_setter
|
|
41
|
-
from .utils import
|
|
42
|
-
event_id_key,
|
|
43
|
-
event_path_key,
|
|
44
|
-
)
|
|
41
|
+
from .utils import event_id_key, event_path_key
|
|
45
42
|
|
|
46
43
|
|
|
47
44
|
class _StreamContext:
|
|
@@ -71,15 +68,15 @@ class _StreamContext:
|
|
|
71
68
|
function_uri, config.default_project
|
|
72
69
|
)
|
|
73
70
|
|
|
74
|
-
stream_uri = mlrun.model_monitoring.get_stream_path(project=project)
|
|
71
|
+
self.stream_uri = mlrun.model_monitoring.get_stream_path(project=project)
|
|
75
72
|
|
|
76
73
|
if log_stream:
|
|
77
74
|
# Update the stream path to the log stream value
|
|
78
|
-
stream_uri = log_stream.format(project=project)
|
|
75
|
+
self.stream_uri = log_stream.format(project=project)
|
|
79
76
|
|
|
80
77
|
stream_args = parameters.get("stream_args", {})
|
|
81
78
|
|
|
82
|
-
self.output_stream = get_stream_pusher(stream_uri, **stream_args)
|
|
79
|
+
self.output_stream = get_stream_pusher(self.stream_uri, **stream_args)
|
|
83
80
|
|
|
84
81
|
|
|
85
82
|
class GraphServer(ModelObj):
|
mlrun/serving/states.py
CHANGED
|
@@ -872,7 +872,8 @@ class QueueStep(BaseStep):
|
|
|
872
872
|
return event
|
|
873
873
|
|
|
874
874
|
if self._stream:
|
|
875
|
-
|
|
875
|
+
full_event = self.options.get("full_event")
|
|
876
|
+
if full_event or full_event is None and self.next:
|
|
876
877
|
data = storey.utils.wrap_event_for_serialization(event, data)
|
|
877
878
|
self._stream.push(data)
|
|
878
879
|
event.terminated = True
|
|
@@ -1390,7 +1391,7 @@ class FlowStep(BaseStep):
|
|
|
1390
1391
|
return step
|
|
1391
1392
|
|
|
1392
1393
|
def supports_termination(self):
|
|
1393
|
-
return self.engine
|
|
1394
|
+
return self.engine != "sync"
|
|
1394
1395
|
|
|
1395
1396
|
|
|
1396
1397
|
class RootFlowStep(FlowStep):
|
|
@@ -1630,7 +1631,11 @@ def _init_async_objects(context, steps):
|
|
|
1630
1631
|
if step.path and not skip_stream:
|
|
1631
1632
|
stream_path = step.path
|
|
1632
1633
|
endpoint = None
|
|
1633
|
-
|
|
1634
|
+
# in case of a queue, we default to a full_event=True
|
|
1635
|
+
full_event = step.options.get("full_event")
|
|
1636
|
+
options = {
|
|
1637
|
+
"full_event": full_event or full_event is None and step.next
|
|
1638
|
+
}
|
|
1634
1639
|
options.update(step.options)
|
|
1635
1640
|
|
|
1636
1641
|
kafka_brokers = get_kafka_brokers_from_dict(options, pop=True)
|
mlrun/serving/v2_serving.py
CHANGED
|
@@ -15,12 +15,11 @@
|
|
|
15
15
|
import threading
|
|
16
16
|
import time
|
|
17
17
|
import traceback
|
|
18
|
-
from typing import Union
|
|
18
|
+
from typing import Optional, Union
|
|
19
19
|
|
|
20
|
+
import mlrun.artifacts
|
|
20
21
|
import mlrun.common.model_monitoring
|
|
21
22
|
import mlrun.common.schemas.model_monitoring
|
|
22
|
-
from mlrun.artifacts import ModelArtifact # noqa: F401
|
|
23
|
-
from mlrun.config import config
|
|
24
23
|
from mlrun.errors import err_to_str
|
|
25
24
|
from mlrun.utils import logger, now_date
|
|
26
25
|
|
|
@@ -102,7 +101,7 @@ class V2ModelServer(StepToDict):
|
|
|
102
101
|
self.error = ""
|
|
103
102
|
self.protocol = protocol or "v2"
|
|
104
103
|
self.model_path = model_path
|
|
105
|
-
self.model_spec: mlrun.artifacts.ModelArtifact = None
|
|
104
|
+
self.model_spec: Optional[mlrun.artifacts.ModelArtifact] = None
|
|
106
105
|
self._input_path = input_path
|
|
107
106
|
self._result_path = result_path
|
|
108
107
|
self._kwargs = kwargs # for to_dict()
|
|
@@ -335,6 +334,7 @@ class V2ModelServer(StepToDict):
|
|
|
335
334
|
else:
|
|
336
335
|
track_request = {"id": event_id, "inputs": inputs or []}
|
|
337
336
|
track_response = {"outputs": outputs or []}
|
|
337
|
+
# TODO : check dict/list
|
|
338
338
|
self._model_logger.push(start, track_request, track_response, op)
|
|
339
339
|
event.body = _update_result_body(self._result_path, original_body, response)
|
|
340
340
|
return event
|
|
@@ -376,8 +376,10 @@ class V2ModelServer(StepToDict):
|
|
|
376
376
|
"""postprocess, before returning response"""
|
|
377
377
|
return request
|
|
378
378
|
|
|
379
|
-
def predict(self, request: dict) ->
|
|
380
|
-
"""model prediction operation
|
|
379
|
+
def predict(self, request: dict) -> list:
|
|
380
|
+
"""model prediction operation
|
|
381
|
+
:return: list with the model prediction results (can be multi-port) or list of lists for multiple predictions
|
|
382
|
+
"""
|
|
381
383
|
raise NotImplementedError()
|
|
382
384
|
|
|
383
385
|
def explain(self, request: dict) -> dict:
|
|
@@ -567,9 +569,7 @@ def _init_endpoint_record(
|
|
|
567
569
|
model=versioned_model_name,
|
|
568
570
|
model_class=model.__class__.__name__,
|
|
569
571
|
model_uri=model.model_path,
|
|
570
|
-
stream_path=
|
|
571
|
-
project=project, kind="stream"
|
|
572
|
-
),
|
|
572
|
+
stream_path=model.context.stream.stream_uri,
|
|
573
573
|
active=True,
|
|
574
574
|
monitoring_mode=mlrun.common.schemas.model_monitoring.ModelMonitoringMode.enabled,
|
|
575
575
|
),
|
mlrun/utils/db.py
CHANGED
|
@@ -12,6 +12,7 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
#
|
|
15
|
+
import abc
|
|
15
16
|
import pickle
|
|
16
17
|
from datetime import datetime
|
|
17
18
|
|
|
@@ -37,6 +38,13 @@ class BaseModel:
|
|
|
37
38
|
|
|
38
39
|
return dict(map(get_key_value, columns))
|
|
39
40
|
|
|
41
|
+
@abc.abstractmethod
|
|
42
|
+
def get_identifier_string(self):
|
|
43
|
+
"""
|
|
44
|
+
This method must be implemented by any subclass.
|
|
45
|
+
"""
|
|
46
|
+
pass
|
|
47
|
+
|
|
40
48
|
|
|
41
49
|
class HasStruct(BaseModel):
|
|
42
50
|
@property
|
|
@@ -54,3 +62,10 @@ class HasStruct(BaseModel):
|
|
|
54
62
|
exclude = exclude or []
|
|
55
63
|
exclude.append("body")
|
|
56
64
|
return super().to_dict(exclude, strip=strip)
|
|
65
|
+
|
|
66
|
+
@abc.abstractmethod
|
|
67
|
+
def get_identifier_string(self):
|
|
68
|
+
"""
|
|
69
|
+
This method must be implemented by any subclass.
|
|
70
|
+
"""
|
|
71
|
+
pass
|
mlrun/utils/http.py
CHANGED
|
@@ -95,7 +95,7 @@ class HTTPSessionWithRetry(requests.Session):
|
|
|
95
95
|
total=self.max_retries,
|
|
96
96
|
backoff_factor=self.retry_backoff_factor,
|
|
97
97
|
status_forcelist=config.http_retry_defaults.status_codes,
|
|
98
|
-
|
|
98
|
+
allowed_methods=self._retry_methods,
|
|
99
99
|
# we want to retry but not to raise since we do want that last response (to parse details on the
|
|
100
100
|
# error from response body) we'll handle raising ourselves
|
|
101
101
|
raise_on_status=False,
|
mlrun/utils/version/version.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: mlrun
|
|
3
|
-
Version: 1.7.
|
|
3
|
+
Version: 1.7.0rc37
|
|
4
4
|
Summary: Tracking and config of machine learning runs
|
|
5
5
|
Home-page: https://github.com/mlrun/mlrun
|
|
6
6
|
Author: Yaron Haviv
|
|
@@ -28,7 +28,7 @@ Requires-Dist: aiohttp-retry ~=2.8
|
|
|
28
28
|
Requires-Dist: click ~=8.1
|
|
29
29
|
Requires-Dist: nest-asyncio ~=1.0
|
|
30
30
|
Requires-Dist: ipython ~=8.10
|
|
31
|
-
Requires-Dist: nuclio-jupyter ~=0.10.
|
|
31
|
+
Requires-Dist: nuclio-jupyter ~=0.10.4
|
|
32
32
|
Requires-Dist: numpy <1.27.0,>=1.16.5
|
|
33
33
|
Requires-Dist: pandas <2.2,>=1.2
|
|
34
34
|
Requires-Dist: pyarrow <15,>=10.0
|
|
@@ -50,8 +50,8 @@ Requires-Dist: setuptools ~=71.0
|
|
|
50
50
|
Requires-Dist: deprecated ~=1.2
|
|
51
51
|
Requires-Dist: jinja2 >=3.1.3,~=3.1
|
|
52
52
|
Requires-Dist: orjson <4,>=3.9.15
|
|
53
|
-
Requires-Dist: mlrun-pipelines-kfp-common ~=0.1.
|
|
54
|
-
Requires-Dist: mlrun-pipelines-kfp-v1-8 ~=0.1.
|
|
53
|
+
Requires-Dist: mlrun-pipelines-kfp-common ~=0.1.6
|
|
54
|
+
Requires-Dist: mlrun-pipelines-kfp-v1-8 ~=0.1.6
|
|
55
55
|
Provides-Extra: alibaba-oss
|
|
56
56
|
Requires-Dist: ossfs ==2023.12.0 ; extra == 'alibaba-oss'
|
|
57
57
|
Requires-Dist: oss2 ==2.18.1 ; extra == 'alibaba-oss'
|
|
@@ -97,7 +97,7 @@ Requires-Dist: sqlalchemy ~=1.4 ; extra == 'api'
|
|
|
97
97
|
Requires-Dist: pymysql ~=1.0 ; extra == 'api'
|
|
98
98
|
Requires-Dist: alembic ~=1.9 ; extra == 'api'
|
|
99
99
|
Requires-Dist: timelength ~=1.1 ; extra == 'api'
|
|
100
|
-
Requires-Dist: memray ~=1.12 ; extra == 'api'
|
|
100
|
+
Requires-Dist: memray ~=1.12 ; (sys_platform != "win32") and extra == 'api'
|
|
101
101
|
Provides-Extra: azure-blob-storage
|
|
102
102
|
Requires-Dist: msrest ~=0.6.21 ; extra == 'azure-blob-storage'
|
|
103
103
|
Requires-Dist: azure-core ~=1.24 ; extra == 'azure-blob-storage'
|
|
@@ -156,7 +156,6 @@ Requires-Dist: graphviz ~=0.20.0 ; extra == 'complete-api'
|
|
|
156
156
|
Requires-Dist: humanfriendly ~=10.0 ; extra == 'complete-api'
|
|
157
157
|
Requires-Dist: igz-mgmt ~=0.2.0 ; extra == 'complete-api'
|
|
158
158
|
Requires-Dist: kafka-python ~=2.0 ; extra == 'complete-api'
|
|
159
|
-
Requires-Dist: memray ~=1.12 ; extra == 'complete-api'
|
|
160
159
|
Requires-Dist: mlflow ~=2.8 ; extra == 'complete-api'
|
|
161
160
|
Requires-Dist: msrest ~=0.6.21 ; extra == 'complete-api'
|
|
162
161
|
Requires-Dist: objgraph ~=3.6 ; extra == 'complete-api'
|
|
@@ -172,6 +171,7 @@ Requires-Dist: sqlalchemy ~=1.4 ; extra == 'complete-api'
|
|
|
172
171
|
Requires-Dist: taos-ws-py ~=0.3.2 ; extra == 'complete-api'
|
|
173
172
|
Requires-Dist: timelength ~=1.1 ; extra == 'complete-api'
|
|
174
173
|
Requires-Dist: uvicorn ~=0.27.1 ; extra == 'complete-api'
|
|
174
|
+
Requires-Dist: memray ~=1.12 ; (sys_platform != "win32") and extra == 'complete-api'
|
|
175
175
|
Provides-Extra: dask
|
|
176
176
|
Requires-Dist: dask ~=2023.9.0 ; extra == 'dask'
|
|
177
177
|
Requires-Dist: distributed ~=2023.9.0 ; extra == 'dask'
|