mlrun 1.7.0rc5__py3-none-any.whl → 1.7.0rc7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (75) hide show
  1. mlrun/artifacts/base.py +2 -1
  2. mlrun/artifacts/plots.py +9 -5
  3. mlrun/common/constants.py +6 -0
  4. mlrun/common/schemas/__init__.py +2 -0
  5. mlrun/common/schemas/model_monitoring/__init__.py +4 -0
  6. mlrun/common/schemas/model_monitoring/constants.py +35 -18
  7. mlrun/common/schemas/project.py +1 -0
  8. mlrun/common/types.py +7 -1
  9. mlrun/config.py +19 -6
  10. mlrun/data_types/data_types.py +4 -0
  11. mlrun/datastore/alibaba_oss.py +130 -0
  12. mlrun/datastore/azure_blob.py +4 -5
  13. mlrun/datastore/base.py +22 -16
  14. mlrun/datastore/datastore.py +4 -0
  15. mlrun/datastore/google_cloud_storage.py +1 -1
  16. mlrun/datastore/sources.py +7 -7
  17. mlrun/db/base.py +14 -6
  18. mlrun/db/factory.py +1 -1
  19. mlrun/db/httpdb.py +61 -56
  20. mlrun/db/nopdb.py +3 -0
  21. mlrun/launcher/__init__.py +1 -1
  22. mlrun/launcher/base.py +1 -1
  23. mlrun/launcher/client.py +1 -1
  24. mlrun/launcher/factory.py +1 -1
  25. mlrun/launcher/local.py +1 -1
  26. mlrun/launcher/remote.py +1 -1
  27. mlrun/model.py +1 -0
  28. mlrun/model_monitoring/__init__.py +1 -1
  29. mlrun/model_monitoring/api.py +104 -301
  30. mlrun/model_monitoring/application.py +21 -21
  31. mlrun/model_monitoring/applications/histogram_data_drift.py +130 -40
  32. mlrun/model_monitoring/controller.py +26 -33
  33. mlrun/model_monitoring/db/__init__.py +16 -0
  34. mlrun/model_monitoring/{stores → db/stores}/__init__.py +43 -34
  35. mlrun/model_monitoring/db/stores/base/__init__.py +15 -0
  36. mlrun/model_monitoring/{stores/model_endpoint_store.py → db/stores/base/store.py} +47 -6
  37. mlrun/model_monitoring/db/stores/sqldb/__init__.py +13 -0
  38. mlrun/model_monitoring/db/stores/sqldb/models/__init__.py +49 -0
  39. mlrun/model_monitoring/{stores → db/stores/sqldb}/models/base.py +76 -3
  40. mlrun/model_monitoring/db/stores/sqldb/models/mysql.py +68 -0
  41. mlrun/model_monitoring/{stores → db/stores/sqldb}/models/sqlite.py +13 -1
  42. mlrun/model_monitoring/db/stores/sqldb/sql_store.py +662 -0
  43. mlrun/model_monitoring/db/stores/v3io_kv/__init__.py +13 -0
  44. mlrun/model_monitoring/{stores/kv_model_endpoint_store.py → db/stores/v3io_kv/kv_store.py} +134 -3
  45. mlrun/model_monitoring/features_drift_table.py +34 -22
  46. mlrun/model_monitoring/helpers.py +45 -6
  47. mlrun/model_monitoring/stream_processing.py +43 -9
  48. mlrun/model_monitoring/tracking_policy.py +7 -1
  49. mlrun/model_monitoring/writer.py +4 -36
  50. mlrun/projects/pipelines.py +13 -1
  51. mlrun/projects/project.py +279 -117
  52. mlrun/run.py +72 -74
  53. mlrun/runtimes/__init__.py +35 -0
  54. mlrun/runtimes/base.py +7 -1
  55. mlrun/runtimes/nuclio/api_gateway.py +188 -61
  56. mlrun/runtimes/nuclio/application/__init__.py +15 -0
  57. mlrun/runtimes/nuclio/application/application.py +283 -0
  58. mlrun/runtimes/nuclio/application/reverse_proxy.go +87 -0
  59. mlrun/runtimes/nuclio/function.py +53 -1
  60. mlrun/runtimes/nuclio/serving.py +28 -32
  61. mlrun/runtimes/pod.py +27 -1
  62. mlrun/serving/server.py +4 -6
  63. mlrun/serving/states.py +41 -33
  64. mlrun/utils/helpers.py +34 -0
  65. mlrun/utils/version/version.json +2 -2
  66. {mlrun-1.7.0rc5.dist-info → mlrun-1.7.0rc7.dist-info}/METADATA +14 -5
  67. {mlrun-1.7.0rc5.dist-info → mlrun-1.7.0rc7.dist-info}/RECORD +71 -64
  68. mlrun/model_monitoring/batch.py +0 -974
  69. mlrun/model_monitoring/stores/models/__init__.py +0 -27
  70. mlrun/model_monitoring/stores/models/mysql.py +0 -34
  71. mlrun/model_monitoring/stores/sql_model_endpoint_store.py +0 -382
  72. {mlrun-1.7.0rc5.dist-info → mlrun-1.7.0rc7.dist-info}/LICENSE +0 -0
  73. {mlrun-1.7.0rc5.dist-info → mlrun-1.7.0rc7.dist-info}/WHEEL +0 -0
  74. {mlrun-1.7.0rc5.dist-info → mlrun-1.7.0rc7.dist-info}/entry_points.txt +0 -0
  75. {mlrun-1.7.0rc5.dist-info → mlrun-1.7.0rc7.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,87 @@
1
+ // Copyright 2024 Iguazio
2
+ //
3
+ // Licensed under the Apache License, Version 2.0 (the "License");
4
+ // you may not use this file except in compliance with the License.
5
+ // You may obtain a copy of the License at
6
+ //
7
+ // http://www.apache.org/licenses/LICENSE-2.0
8
+ //
9
+ // Unless required by applicable law or agreed to in writing, software
10
+ // distributed under the License is distributed on an "AS IS" BASIS,
11
+ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ // See the License for the specific language governing permissions and
13
+ // limitations under the License.
14
+ package main
15
+
16
+ import (
17
+ "bytes"
18
+ "fmt"
19
+ "net/http"
20
+ "net/http/httptest"
21
+ "net/http/httputil"
22
+ "net/url"
23
+ "os"
24
+ "strings"
25
+
26
+ nuclio "github.com/nuclio/nuclio-sdk-go"
27
+ )
28
+
29
+ func Handler(context *nuclio.Context, event nuclio.Event) (interface{}, error) {
30
+ reverseProxy := context.UserData.(map[string]interface{})["reverseProxy"].(*httputil.ReverseProxy)
31
+ sidecarUrl := context.UserData.(map[string]interface{})["server"].(string)
32
+
33
+ // populate reverse proxy http request
34
+ httpRequest, err := http.NewRequest(event.GetMethod(), event.GetPath(), bytes.NewReader(event.GetBody()))
35
+ if err != nil {
36
+ context.Logger.ErrorWith("Failed to create a reverse proxy request")
37
+ return nil, err
38
+ }
39
+ for k, v := range event.GetHeaders() {
40
+ httpRequest.Header[k] = []string{v.(string)}
41
+ }
42
+ recorder := httptest.NewRecorder()
43
+ reverseProxy.ServeHTTP(recorder, httpRequest)
44
+
45
+ // send request to sidecar
46
+ context.Logger.InfoWith("Forwarding request to sidecar", "sidecarUrl", sidecarUrl)
47
+ response := recorder.Result()
48
+
49
+ headers := make(map[string]interface{})
50
+ for key, value := range response.Header {
51
+ headers[key] = value[0]
52
+ }
53
+
54
+ // let the processor calculate the content length
55
+ delete(headers, "Content-Length")
56
+ return nuclio.Response{
57
+ StatusCode: response.StatusCode,
58
+ Body: recorder.Body.Bytes(),
59
+ ContentType: response.Header.Get("Content-Type"),
60
+ Headers: headers,
61
+ }, nil
62
+ }
63
+
64
+ func InitContext(context *nuclio.Context) error {
65
+ sidecarHost := os.Getenv("SIDECAR_HOST")
66
+ sidecarPort := os.Getenv("SIDECAR_PORT")
67
+ if sidecarHost == "" {
68
+ sidecarHost = "http://localhost"
69
+ } else if !strings.Contains(sidecarHost, "://") {
70
+ sidecarHost = fmt.Sprintf("http://%s", sidecarHost)
71
+ }
72
+
73
+ // url for request forwarding
74
+ sidecarUrl := fmt.Sprintf("%s:%s", sidecarHost, sidecarPort)
75
+ parsedURL, err := url.Parse(sidecarUrl)
76
+ if err != nil {
77
+ context.Logger.ErrorWith("Failed to parse sidecar url", "sidecarUrl", sidecarUrl)
78
+ return err
79
+ }
80
+ reverseProxy := httputil.NewSingleHostReverseProxy(parsedURL)
81
+
82
+ context.UserData = map[string]interface{}{
83
+ "server": sidecarUrl,
84
+ "reverseProxy": reverseProxy,
85
+ }
86
+ return nil
87
+ }
@@ -291,6 +291,9 @@ class RemoteRuntime(KubeResource):
291
291
  def status(self, status):
292
292
  self._status = self._verify_dict(status, "status", NuclioStatus)
293
293
 
294
+ def pre_deploy_validation(self):
295
+ pass
296
+
294
297
  def set_config(self, key, value):
295
298
  self.spec.config[key] = value
296
299
  return self
@@ -603,7 +606,6 @@ class RemoteRuntime(KubeResource):
603
606
  return self.spec.command
604
607
 
605
608
  def _wait_for_function_deployment(self, db, verbose=False):
606
- text = ""
607
609
  state = ""
608
610
  last_log_timestamp = 1
609
611
  while state not in ["ready", "error", "unhealthy"]:
@@ -773,6 +775,9 @@ class RemoteRuntime(KubeResource):
773
775
  ] = self.metadata.credentials.access_key
774
776
  return runtime_env
775
777
 
778
+ def _get_serving_spec(self):
779
+ return None
780
+
776
781
  def _get_nuclio_config_spec_env(self):
777
782
  env_dict = {}
778
783
  external_source_env_dict = {}
@@ -958,6 +963,53 @@ class RemoteRuntime(KubeResource):
958
963
  data = json.loads(data)
959
964
  return data
960
965
 
966
+ def with_sidecar(
967
+ self,
968
+ name: str = None,
969
+ image: str = None,
970
+ ports: typing.Optional[typing.Union[int, list[int]]] = None,
971
+ command: typing.Optional[str] = None,
972
+ args: typing.Optional[list[str]] = None,
973
+ ):
974
+ """
975
+ Add a sidecar container to the function pod
976
+ :param name: Sidecar container name.
977
+ :param image: Sidecar container image.
978
+ :param ports: Sidecar container ports to expose. Can be a single port or a list of ports.
979
+ :param command: Sidecar container command instead of the image entrypoint.
980
+ :param args: Sidecar container command args (requires command to be set).
981
+ """
982
+ name = name or f"{self.metadata.name}-sidecar"
983
+ sidecar = self._set_sidecar(name)
984
+ if image:
985
+ sidecar["image"] = image
986
+
987
+ ports = mlrun.utils.helpers.as_list(ports)
988
+ sidecar["ports"] = [
989
+ {
990
+ "name": "http",
991
+ "containerPort": port,
992
+ "protocol": "TCP",
993
+ }
994
+ for port in ports
995
+ ]
996
+
997
+ if command:
998
+ sidecar["command"] = command
999
+
1000
+ if args:
1001
+ sidecar["args"] = args
1002
+
1003
+ def _set_sidecar(self, name: str) -> dict:
1004
+ self.spec.config.setdefault("spec.sidecars", [])
1005
+ sidecars = self.spec.config["spec.sidecars"]
1006
+ for sidecar in sidecars:
1007
+ if sidecar["name"] == name:
1008
+ return sidecar
1009
+
1010
+ sidecars.append({"name": name})
1011
+ return sidecars[-1]
1012
+
961
1013
  def _trigger_of_kind_exists(self, kind: str) -> bool:
962
1014
  if not self.spec.config:
963
1015
  return False
@@ -14,8 +14,9 @@
14
14
 
15
15
  import json
16
16
  import os
17
+ import warnings
17
18
  from copy import deepcopy
18
- from typing import Union
19
+ from typing import TYPE_CHECKING, Optional, Union
19
20
 
20
21
  import nuclio
21
22
  from nuclio import KafkaTrigger
@@ -24,7 +25,6 @@ import mlrun
24
25
  import mlrun.common.schemas
25
26
  from mlrun.datastore import parse_kafka_url
26
27
  from mlrun.model import ObjectList
27
- from mlrun.model_monitoring.tracking_policy import TrackingPolicy
28
28
  from mlrun.runtimes.function_reference import FunctionReference
29
29
  from mlrun.secrets import SecretsStore
30
30
  from mlrun.serving.server import GraphServer, create_graph_server
@@ -43,6 +43,10 @@ from .function import NuclioSpec, RemoteRuntime
43
43
 
44
44
  serving_subkind = "serving_v2"
45
45
 
46
+ if TYPE_CHECKING:
47
+ # remove this block in 1.9.0
48
+ from mlrun.model_monitoring import TrackingPolicy
49
+
46
50
 
47
51
  def new_v2_model_server(
48
52
  name,
@@ -291,7 +295,9 @@ class ServingRuntime(RemoteRuntime):
291
295
  "provided class is not a router step, must provide a router class in router topology"
292
296
  )
293
297
  else:
294
- step = RouterStep(class_name=class_name, class_args=class_args)
298
+ step = RouterStep(
299
+ class_name=class_name, class_args=class_args, engine=engine
300
+ )
295
301
  self.spec.graph = step
296
302
  elif topology == StepKinds.flow:
297
303
  self.spec.graph = RootFlowStep(engine=engine)
@@ -303,12 +309,12 @@ class ServingRuntime(RemoteRuntime):
303
309
 
304
310
  def set_tracking(
305
311
  self,
306
- stream_path: str = None,
307
- batch: int = None,
308
- sample: int = None,
309
- stream_args: dict = None,
310
- tracking_policy: Union[TrackingPolicy, dict] = None,
311
- ):
312
+ stream_path: Optional[str] = None,
313
+ batch: Optional[int] = None,
314
+ sample: Optional[int] = None,
315
+ stream_args: Optional[dict] = None,
316
+ tracking_policy: Optional[Union["TrackingPolicy", dict]] = None,
317
+ ) -> None:
312
318
  """apply on your serving function to monitor a deployed model, including real-time dashboards to detect drift
313
319
  and analyze performance.
314
320
 
@@ -317,31 +323,17 @@ class ServingRuntime(RemoteRuntime):
317
323
  :param batch: Micro batch size (send micro batches of N records at a time).
318
324
  :param sample: Sample size (send only one of N records).
319
325
  :param stream_args: Stream initialization parameters, e.g. shards, retention_in_hours, ..
320
- :param tracking_policy: Tracking policy object or a dictionary that will be converted into a tracking policy
321
- object. By using TrackingPolicy, the user can apply his model monitoring requirements,
322
- such as setting the scheduling policy of the model monitoring batch job or changing
323
- the image of the model monitoring stream.
324
326
 
325
327
  example::
326
328
 
327
329
  # initialize a new serving function
328
330
  serving_fn = mlrun.import_function("hub://v2-model-server", new_name="serving")
329
- # apply model monitoring and set monitoring batch job to run every 3 hours
330
- tracking_policy = {'default_batch_intervals':"0 */3 * * *"}
331
- serving_fn.set_tracking(tracking_policy=tracking_policy)
331
+ # apply model monitoring
332
+ serving_fn.set_tracking()
332
333
 
333
334
  """
334
-
335
335
  # Applying model monitoring configurations
336
336
  self.spec.track_models = True
337
- self.spec.tracking_policy = None
338
- if tracking_policy:
339
- if isinstance(tracking_policy, dict):
340
- # Convert tracking policy dictionary into `model_monitoring.TrackingPolicy` object
341
- self.spec.tracking_policy = TrackingPolicy.from_dict(tracking_policy)
342
- else:
343
- # Tracking_policy is already a `model_monitoring.TrackingPolicy` object
344
- self.spec.tracking_policy = tracking_policy
345
337
 
346
338
  if stream_path:
347
339
  self.spec.parameters["log_stream"] = stream_path
@@ -351,6 +343,14 @@ class ServingRuntime(RemoteRuntime):
351
343
  self.spec.parameters["log_stream_sample"] = sample
352
344
  if stream_args:
353
345
  self.spec.parameters["stream_args"] = stream_args
346
+ if tracking_policy is not None:
347
+ warnings.warn(
348
+ "The `tracking_policy` argument is deprecated from version 1.7.0 "
349
+ "and has no effect. It will be removed in 1.9.0.\n"
350
+ "To set the desired model monitoring time window and schedule, use "
351
+ "the `base_period` argument in `project.enable_model_monitoring()`.",
352
+ FutureWarning,
353
+ )
354
354
 
355
355
  def add_model(
356
356
  self,
@@ -644,8 +644,7 @@ class ServingRuntime(RemoteRuntime):
644
644
  force_build=force_build,
645
645
  )
646
646
 
647
- def _get_runtime_env(self):
648
- env = super()._get_runtime_env()
647
+ def _get_serving_spec(self):
649
648
  function_name_uri_map = {f.name: f.uri(self) for f in self.spec.function_refs}
650
649
 
651
650
  serving_spec = {
@@ -658,9 +657,7 @@ class ServingRuntime(RemoteRuntime):
658
657
  "graph_initializer": self.spec.graph_initializer,
659
658
  "error_stream": self.spec.error_stream,
660
659
  "track_models": self.spec.track_models,
661
- "tracking_policy": self.spec.tracking_policy.to_dict()
662
- if self.spec.tracking_policy
663
- else None,
660
+ "tracking_policy": None,
664
661
  "default_content_type": self.spec.default_content_type,
665
662
  }
666
663
 
@@ -668,8 +665,7 @@ class ServingRuntime(RemoteRuntime):
668
665
  self._secrets = SecretsStore.from_list(self.spec.secret_sources)
669
666
  serving_spec["secret_sources"] = self._secrets.to_serial()
670
667
 
671
- env["SERVING_SPEC_ENV"] = json.dumps(serving_spec)
672
- return env
668
+ return json.dumps(serving_spec)
673
669
 
674
670
  def to_mock_server(
675
671
  self,
mlrun/runtimes/pod.py CHANGED
@@ -985,7 +985,7 @@ class KubeResource(BaseRuntime):
985
985
  _is_nested = True
986
986
 
987
987
  def __init__(self, spec=None, metadata=None):
988
- super().__init__(metadata, spec)
988
+ super().__init__(metadata=metadata, spec=spec)
989
989
  self.verbose = False
990
990
 
991
991
  @property
@@ -1057,6 +1057,32 @@ class KubeResource(BaseRuntime):
1057
1057
  return True
1058
1058
  return False
1059
1059
 
1060
+ def enrich_runtime_spec(
1061
+ self,
1062
+ project_node_selector: dict[str, str],
1063
+ ):
1064
+ """
1065
+ Enriches the runtime spec with the project-level node selector.
1066
+
1067
+ This method merges the project-level node selector with the existing function node_selector.
1068
+ The merge logic used here combines the two dictionaries, giving precedence to
1069
+ the keys in the runtime node_selector. If there are conflicting keys between the
1070
+ two dictionaries, the values from self.spec.node_selector will overwrite the
1071
+ values from project_node_selector.
1072
+
1073
+ Example:
1074
+ Suppose self.spec.node_selector = {"type": "gpu", "zone": "us-east-1"}
1075
+ and project_node_selector = {"type": "cpu", "environment": "production"}.
1076
+ After the merge, the resulting node_selector will be:
1077
+ {"type": "gpu", "zone": "us-east-1", "environment": "production"}
1078
+
1079
+ Note:
1080
+ - The merge uses the ** operator, also known as the "unpacking" operator in Python,
1081
+ combining key-value pairs from each dictionary. Later dictionaries take precedence
1082
+ when there are conflicting keys.
1083
+ """
1084
+ self.spec.node_selector = {**project_node_selector, **self.spec.node_selector}
1085
+
1060
1086
  def _set_env(self, name, value=None, value_from=None):
1061
1087
  new_var = k8s_client.V1EnvVar(name=name, value=value, value_from=value_from)
1062
1088
  i = 0
mlrun/serving/server.py CHANGED
@@ -23,6 +23,7 @@ import uuid
23
23
  from typing import Optional, Union
24
24
 
25
25
  import mlrun
26
+ import mlrun.common.constants
26
27
  import mlrun.common.helpers
27
28
  import mlrun.model_monitoring
28
29
  from mlrun.config import config
@@ -311,11 +312,8 @@ class GraphServer(ModelObj):
311
312
  def v2_serving_init(context, namespace=None):
312
313
  """hook for nuclio init_context()"""
313
314
 
314
- data = os.environ.get("SERVING_SPEC_ENV", "")
315
- if not data:
316
- raise MLRunInvalidArgumentError("failed to find spec env var")
317
- spec = json.loads(data)
318
315
  context.logger.info("Initializing server from spec")
316
+ spec = mlrun.utils.get_serving_spec()
319
317
  server = GraphServer.from_dict(spec)
320
318
  if config.log_level.lower() == "debug":
321
319
  server.verbose = True
@@ -355,7 +353,7 @@ def v2_serving_init(context, namespace=None):
355
353
 
356
354
  async def termination_callback():
357
355
  context.logger.info("Termination callback called")
358
- await server.wait_for_completion()
356
+ server.wait_for_completion()
359
357
  context.logger.info("Termination of async flow is completed")
360
358
 
361
359
  context.platform.set_termination_callback(termination_callback)
@@ -367,7 +365,7 @@ def v2_serving_init(context, namespace=None):
367
365
 
368
366
  async def drain_callback():
369
367
  context.logger.info("Drain callback called")
370
- await server.wait_for_completion()
368
+ server.wait_for_completion()
371
369
  context.logger.info(
372
370
  "Termination of async flow is completed. Rerunning async flow."
373
371
  )
mlrun/serving/states.py CHANGED
@@ -14,7 +14,6 @@
14
14
 
15
15
  __all__ = ["TaskStep", "RouterStep", "RootFlowStep", "ErrorStep"]
16
16
 
17
- import asyncio
18
17
  import os
19
18
  import pathlib
20
19
  import traceback
@@ -591,7 +590,7 @@ class RouterStep(TaskStep):
591
590
 
592
591
  kind = "router"
593
592
  default_shape = "doubleoctagon"
594
- _dict_fields = _task_step_fields + ["routes"]
593
+ _dict_fields = _task_step_fields + ["routes", "engine"]
595
594
  _default_class = "mlrun.serving.ModelRouter"
596
595
 
597
596
  def __init__(
@@ -604,6 +603,7 @@ class RouterStep(TaskStep):
604
603
  function: str = None,
605
604
  input_path: str = None,
606
605
  result_path: str = None,
606
+ engine: str = None,
607
607
  ):
608
608
  super().__init__(
609
609
  class_name,
@@ -616,6 +616,8 @@ class RouterStep(TaskStep):
616
616
  )
617
617
  self._routes: ObjectDict = None
618
618
  self.routes = routes
619
+ self.engine = engine
620
+ self._controller = None
619
621
 
620
622
  def get_children(self):
621
623
  """get child steps (routes)"""
@@ -685,6 +687,33 @@ class RouterStep(TaskStep):
685
687
  self._set_error_handler()
686
688
  self._post_init(mode)
687
689
 
690
+ if self.engine == "async":
691
+ self._build_async_flow()
692
+ self._run_async_flow()
693
+
694
+ def _build_async_flow(self):
695
+ """initialize and build the async/storey DAG"""
696
+
697
+ self.respond()
698
+ source, self._wait_for_result = _init_async_objects(self.context, [self])
699
+ source.to(self.async_object)
700
+
701
+ self._async_flow = source
702
+
703
+ def _run_async_flow(self):
704
+ self._controller = self._async_flow.run()
705
+
706
+ def run(self, event, *args, **kwargs):
707
+ if self._controller:
708
+ # async flow (using storey)
709
+ event._awaitable_result = None
710
+ resp = self._controller.emit(
711
+ event, return_awaitable_result=self._wait_for_result
712
+ )
713
+ return resp.await_result()
714
+
715
+ return super().run(event, *args, **kwargs)
716
+
688
717
  def __getitem__(self, name):
689
718
  return self._routes[name]
690
719
 
@@ -1161,19 +1190,11 @@ class FlowStep(BaseStep):
1161
1190
  if self._controller:
1162
1191
  # async flow (using storey)
1163
1192
  event._awaitable_result = None
1164
- if self.context.is_mock:
1165
- resp = self._controller.emit(
1166
- event, return_awaitable_result=self._wait_for_result
1167
- )
1168
- if self._wait_for_result and resp:
1169
- return resp.await_result()
1170
- else:
1171
- resp_awaitable = self._controller.emit(
1172
- event, await_result=self._wait_for_result
1173
- )
1174
- if self._wait_for_result:
1175
- return resp_awaitable
1176
- return self._await_and_return_id(resp_awaitable, event)
1193
+ resp = self._controller.emit(
1194
+ event, return_awaitable_result=self._wait_for_result
1195
+ )
1196
+ if self._wait_for_result and resp:
1197
+ return resp.await_result()
1177
1198
  event = copy(event)
1178
1199
  event.body = {"id": event.id}
1179
1200
  return event
@@ -1213,18 +1234,9 @@ class FlowStep(BaseStep):
1213
1234
  """wait for completion of run in async flows"""
1214
1235
 
1215
1236
  if self._controller:
1216
- if asyncio.iscoroutinefunction(self._controller.await_termination):
1217
-
1218
- async def terminate_and_await_termination():
1219
- if hasattr(self._controller, "terminate"):
1220
- await self._controller.terminate()
1221
- return await self._controller.await_termination()
1222
-
1223
- return terminate_and_await_termination()
1224
- else:
1225
- if hasattr(self._controller, "terminate"):
1226
- self._controller.terminate()
1227
- return self._controller.await_termination()
1237
+ if hasattr(self._controller, "terminate"):
1238
+ self._controller.terminate()
1239
+ return self._controller.await_termination()
1228
1240
 
1229
1241
  def plot(self, filename=None, format=None, source=None, targets=None, **kw):
1230
1242
  """plot/save graph using graphviz
@@ -1568,12 +1580,8 @@ def _init_async_objects(context, steps):
1568
1580
  source_args = context.get_param("source_args", {})
1569
1581
  explicit_ack = is_explicit_ack_supported(context) and mlrun.mlconf.is_explicit_ack()
1570
1582
 
1571
- if context.is_mock:
1572
- source_class = storey.SyncEmitSource
1573
- else:
1574
- source_class = storey.AsyncEmitSource
1575
-
1576
- default_source = source_class(
1583
+ # TODO: Change to AsyncEmitSource once we can drop support for nuclio<1.12.10
1584
+ default_source = storey.SyncEmitSource(
1577
1585
  context=context,
1578
1586
  explicit_ack=explicit_ack,
1579
1587
  **source_args,
mlrun/utils/helpers.py CHANGED
@@ -1405,6 +1405,18 @@ def as_number(field_name, field_value):
1405
1405
 
1406
1406
 
1407
1407
  def filter_warnings(action, category):
1408
+ """
1409
+ Decorator to filter warnings
1410
+
1411
+ Example::
1412
+ @filter_warnings("ignore", FutureWarning)
1413
+ def my_function():
1414
+ pass
1415
+
1416
+ :param action: one of "error", "ignore", "always", "default", "module", or "once"
1417
+ :param category: a class that the warning must be a subclass of
1418
+ """
1419
+
1408
1420
  def decorator(function):
1409
1421
  def wrapper(*args, **kwargs):
1410
1422
  # context manager that copies and, upon exit, restores the warnings filter and the showwarning() function.
@@ -1562,3 +1574,25 @@ def is_safe_path(base, filepath, is_symlink=False):
1562
1574
  os.path.abspath(filepath) if not is_symlink else os.path.realpath(filepath)
1563
1575
  )
1564
1576
  return base == os.path.commonpath((base, resolved_filepath))
1577
+
1578
+
1579
+ def get_serving_spec():
1580
+ data = None
1581
+
1582
+ # we will have the serving spec in either mounted config map
1583
+ # or env depending on the size of the spec and configuration
1584
+
1585
+ try:
1586
+ with open(mlrun.common.constants.MLRUN_SERVING_SPEC_PATH) as f:
1587
+ data = f.read()
1588
+ except FileNotFoundError:
1589
+ pass
1590
+
1591
+ if data is None:
1592
+ data = os.environ.get("SERVING_SPEC_ENV", "")
1593
+ if not data:
1594
+ raise mlrun.errors.MLRunInvalidArgumentError(
1595
+ "Failed to find serving spec in env var or config file"
1596
+ )
1597
+ spec = json.loads(data)
1598
+ return spec
@@ -1,4 +1,4 @@
1
1
  {
2
- "git_commit": "d3324e482f4a4182ee0c8eda4af0d312718b599d",
3
- "version": "1.7.0-rc5"
2
+ "git_commit": "06b1879c4a1857b20f07e805c46f51aa4ac74cef",
3
+ "version": "1.7.0-rc7"
4
4
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mlrun
3
- Version: 1.7.0rc5
3
+ Version: 1.7.0rc7
4
4
  Summary: Tracking and config of machine learning runs
5
5
  Home-page: https://github.com/mlrun/mlrun
6
6
  Author: Yaron Haviv
@@ -36,7 +36,7 @@ Requires-Dist: pyarrow <15,>=10.0
36
36
  Requires-Dist: pyyaml ~=5.1
37
37
  Requires-Dist: requests ~=2.31
38
38
  Requires-Dist: tabulate ~=0.8.6
39
- Requires-Dist: v3io ~=0.6.2
39
+ Requires-Dist: v3io ~=0.6.4
40
40
  Requires-Dist: pydantic >=1.10.8,~=1.10
41
41
  Requires-Dist: mergedeep ~=1.3
42
42
  Requires-Dist: v3io-frames ~=0.10.12
@@ -44,13 +44,16 @@ Requires-Dist: semver ~=3.0
44
44
  Requires-Dist: dependency-injector ~=4.41
45
45
  Requires-Dist: fsspec ==2023.9.2
46
46
  Requires-Dist: v3iofs ~=0.1.17
47
- Requires-Dist: storey ~=1.7.5
47
+ Requires-Dist: storey ~=1.7.6
48
48
  Requires-Dist: inflection ~=0.5.0
49
49
  Requires-Dist: python-dotenv ~=0.17.0
50
50
  Requires-Dist: setuptools ~=69.1
51
51
  Requires-Dist: deprecated ~=1.2
52
52
  Requires-Dist: jinja2 >=3.1.3,~=3.1
53
53
  Requires-Dist: orjson ~=3.9
54
+ Provides-Extra: alibaba-oss
55
+ Requires-Dist: ossfs ==2023.12.0 ; extra == 'alibaba-oss'
56
+ Requires-Dist: oss2 ==2.18.1 ; extra == 'alibaba-oss'
54
57
  Provides-Extra: all
55
58
  Requires-Dist: adlfs ==2023.9.0 ; extra == 'all'
56
59
  Requires-Dist: aiobotocore <2.8,>=2.5.0 ; extra == 'all'
@@ -71,6 +74,8 @@ Requires-Dist: graphviz ~=0.20.0 ; extra == 'all'
71
74
  Requires-Dist: kafka-python ~=2.0 ; extra == 'all'
72
75
  Requires-Dist: mlflow ~=2.8 ; extra == 'all'
73
76
  Requires-Dist: msrest ~=0.6.21 ; extra == 'all'
77
+ Requires-Dist: oss2 ==2.18.1 ; extra == 'all'
78
+ Requires-Dist: ossfs ==2023.12.0 ; extra == 'all'
74
79
  Requires-Dist: plotly <5.12.0,~=5.4 ; extra == 'all'
75
80
  Requires-Dist: pyopenssl >=23 ; extra == 'all'
76
81
  Requires-Dist: redis ~=4.3 ; extra == 'all'
@@ -81,7 +86,7 @@ Requires-Dist: uvicorn ~=0.27.1 ; extra == 'api'
81
86
  Requires-Dist: dask-kubernetes ~=0.11.0 ; extra == 'api'
82
87
  Requires-Dist: apscheduler <4,>=3.10.3 ; extra == 'api'
83
88
  Requires-Dist: objgraph ~=3.6 ; extra == 'api'
84
- Requires-Dist: igz-mgmt ~=0.1.0 ; extra == 'api'
89
+ Requires-Dist: igz-mgmt ~=0.1.1 ; extra == 'api'
85
90
  Requires-Dist: humanfriendly ~=10.0 ; extra == 'api'
86
91
  Requires-Dist: fastapi ~=0.110.0 ; extra == 'api'
87
92
  Requires-Dist: sqlalchemy ~=1.4 ; extra == 'api'
@@ -116,6 +121,8 @@ Requires-Dist: graphviz ~=0.20.0 ; extra == 'complete'
116
121
  Requires-Dist: kafka-python ~=2.0 ; extra == 'complete'
117
122
  Requires-Dist: mlflow ~=2.8 ; extra == 'complete'
118
123
  Requires-Dist: msrest ~=0.6.21 ; extra == 'complete'
124
+ Requires-Dist: oss2 ==2.18.1 ; extra == 'complete'
125
+ Requires-Dist: ossfs ==2023.12.0 ; extra == 'complete'
119
126
  Requires-Dist: plotly <5.12.0,~=5.4 ; extra == 'complete'
120
127
  Requires-Dist: pyopenssl >=23 ; extra == 'complete'
121
128
  Requires-Dist: redis ~=4.3 ; extra == 'complete'
@@ -140,11 +147,13 @@ Requires-Dist: gcsfs ==2023.9.2 ; extra == 'complete-api'
140
147
  Requires-Dist: google-cloud-bigquery[bqstorage,pandas] ==3.14.1 ; extra == 'complete-api'
141
148
  Requires-Dist: graphviz ~=0.20.0 ; extra == 'complete-api'
142
149
  Requires-Dist: humanfriendly ~=10.0 ; extra == 'complete-api'
143
- Requires-Dist: igz-mgmt ~=0.1.0 ; extra == 'complete-api'
150
+ Requires-Dist: igz-mgmt ~=0.1.1 ; extra == 'complete-api'
144
151
  Requires-Dist: kafka-python ~=2.0 ; extra == 'complete-api'
145
152
  Requires-Dist: mlflow ~=2.8 ; extra == 'complete-api'
146
153
  Requires-Dist: msrest ~=0.6.21 ; extra == 'complete-api'
147
154
  Requires-Dist: objgraph ~=3.6 ; extra == 'complete-api'
155
+ Requires-Dist: oss2 ==2.18.1 ; extra == 'complete-api'
156
+ Requires-Dist: ossfs ==2023.12.0 ; extra == 'complete-api'
148
157
  Requires-Dist: plotly <5.12.0,~=5.4 ; extra == 'complete-api'
149
158
  Requires-Dist: pymysql ~=1.0 ; extra == 'complete-api'
150
159
  Requires-Dist: pyopenssl >=23 ; extra == 'complete-api'