mlrun 1.8.0rc44__py3-none-any.whl → 1.8.0rc46__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

@@ -13,52 +13,38 @@
13
13
  # limitations under the License.
14
14
 
15
15
  import json
16
+ from abc import ABC, abstractmethod
16
17
  from contextlib import AbstractContextManager
17
18
  from types import TracebackType
18
19
  from typing import Final, Optional
19
20
 
20
21
  import botocore.exceptions
21
22
 
22
- import mlrun.common.schemas
23
+ import mlrun.common.schemas as schemas
23
24
  import mlrun.errors
24
25
  import mlrun.model_monitoring.helpers
25
26
  from mlrun.utils import logger
26
27
 
27
28
 
28
- class ModelMonitoringSchedulesFile(AbstractContextManager):
29
+ class ModelMonitoringSchedulesFileBase(AbstractContextManager, ABC):
29
30
  DEFAULT_SCHEDULES: Final = {}
30
31
  INITIAL_CONTENT = json.dumps(DEFAULT_SCHEDULES)
31
32
  ENCODING = "utf-8"
32
33
 
33
- def __init__(self, project: str, endpoint_id: str) -> None:
34
- """
35
- Initialize applications monitoring schedules file object.
36
- The JSON file stores a dictionary of registered application name as key and Unix timestamp as value.
37
- When working with the schedules data, use this class as a context manager to read and write the data.
38
-
39
- :param project: The project name.
40
- :param endpoint_id: The endpoint ID.
41
- """
42
- # `self._item` is the persistent version of the monitoring schedules.
43
- self._item = mlrun.model_monitoring.helpers.get_monitoring_schedules_data(
44
- project=project, endpoint_id=endpoint_id
45
- )
46
- self._path = self._item.url
47
- self._fs = self._item.store.filesystem
48
- # `self._schedules` is an in-memory copy of the DB for all the applications for
49
- # the same model endpoint.
50
- self._schedules: dict[str, int] = self.DEFAULT_SCHEDULES.copy()
51
- # Does `self._schedules` hold the content of `self._item`?
52
- self._open_schedules = False
53
-
54
- @classmethod
55
- def from_model_endpoint(
56
- cls, model_endpoint: mlrun.common.schemas.ModelEndpoint
57
- ) -> "ModelMonitoringSchedulesFile":
58
- return cls(
59
- project=model_endpoint.metadata.project,
60
- endpoint_id=model_endpoint.metadata.uid,
61
- )
34
+ def __init__(self):
35
+ self._item = self.get_data_item_object()
36
+ if self._item:
37
+ self._path = self._item.url
38
+ self._fs = self._item.store.filesystem
39
+ # `self._schedules` is an in-memory copy of the DB for all the applications for
40
+ # the same model endpoint.
41
+ self._schedules = self.DEFAULT_SCHEDULES.copy()
42
+ # Does `self._schedules` hold the content of `self._item`?
43
+ self._open_schedules = False
44
+
45
+ @abstractmethod
46
+ def get_data_item_object(self) -> mlrun.DataItem:
47
+ pass
62
48
 
63
49
  def create(self) -> None:
64
50
  """Create a schedules file with initial content - an empty dictionary"""
@@ -114,7 +100,7 @@ class ModelMonitoringSchedulesFile(AbstractContextManager):
114
100
  self._schedules = self.DEFAULT_SCHEDULES
115
101
  self._open_schedules = False
116
102
 
117
- def __enter__(self) -> "ModelMonitoringSchedulesFile":
103
+ def __enter__(self) -> "ModelMonitoringSchedulesFileBase":
118
104
  self._open()
119
105
  return super().__enter__()
120
106
 
@@ -132,6 +118,36 @@ class ModelMonitoringSchedulesFile(AbstractContextManager):
132
118
  "Open the schedules file as a context manager first"
133
119
  )
134
120
 
121
+
122
+ class ModelMonitoringSchedulesFileEndpoint(ModelMonitoringSchedulesFileBase):
123
+ def __init__(self, project: str, endpoint_id: str) -> None:
124
+ """
125
+ Initialize applications monitoring schedules file object.
126
+ The JSON file stores a dictionary of registered application name as key and Unix timestamp as value.
127
+ When working with the schedules data, use this class as a context manager to read and write the data.
128
+
129
+ :param project: The project name.
130
+ :param endpoint_id: The endpoint ID.
131
+ """
132
+ # `self._item` is the persistent version of the monitoring schedules.
133
+ self._project = project
134
+ self._endpoint_id = endpoint_id
135
+ super().__init__()
136
+
137
+ def get_data_item_object(self) -> mlrun.DataItem:
138
+ return mlrun.model_monitoring.helpers.get_monitoring_schedules_endpoint_data(
139
+ project=self._project, endpoint_id=self._endpoint_id
140
+ )
141
+
142
+ @classmethod
143
+ def from_model_endpoint(
144
+ cls, model_endpoint: schemas.ModelEndpoint
145
+ ) -> "ModelMonitoringSchedulesFileEndpoint":
146
+ return cls(
147
+ project=model_endpoint.metadata.project,
148
+ endpoint_id=model_endpoint.metadata.uid,
149
+ )
150
+
135
151
  def get_application_time(self, application: str) -> Optional[int]:
136
152
  self._check_open_schedules()
137
153
  return self._schedules.get(application)
@@ -149,6 +165,68 @@ class ModelMonitoringSchedulesFile(AbstractContextManager):
149
165
  return min(self._schedules.values(), default=None)
150
166
 
151
167
 
168
+ class ModelMonitoringSchedulesFileChief(ModelMonitoringSchedulesFileBase):
169
+ def __init__(self, project: str) -> None:
170
+ """
171
+ Initialize applications monitoring schedules chief file object.
172
+ The JSON file stores a dictionary of registered model endpoints uid as key and point to a dictionary of
173
+ "last_request" and "last_analyzed" mapped to two Unix timestamps as values.
174
+ When working with the schedules data, use this class as a context manager to read and write the data.
175
+
176
+ :param project: The project name.
177
+ """
178
+ # `self._item` is the persistent version of the monitoring schedules.
179
+ self._project = project
180
+ super().__init__()
181
+
182
+ def get_data_item_object(self) -> mlrun.DataItem:
183
+ return mlrun.model_monitoring.helpers.get_monitoring_schedules_chief_data(
184
+ project=self._project
185
+ )
186
+
187
+ def get_endpoint_last_request(self, endpoint_uid: str) -> Optional[int]:
188
+ self._check_open_schedules()
189
+ if endpoint_uid in self._schedules:
190
+ return self._schedules[endpoint_uid].get(
191
+ schemas.model_monitoring.constants.ScheduleChiefFields.LAST_REQUEST
192
+ )
193
+ else:
194
+ return None
195
+
196
+ def update_endpoint_timestamps(
197
+ self, endpoint_uid: str, last_request: int, last_analyzed: int
198
+ ) -> None:
199
+ self._check_open_schedules()
200
+ self._schedules[endpoint_uid] = {
201
+ schemas.model_monitoring.constants.ScheduleChiefFields.LAST_REQUEST: last_request,
202
+ schemas.model_monitoring.constants.ScheduleChiefFields.LAST_ANALYZED: last_analyzed,
203
+ }
204
+
205
+ def get_endpoint_last_analyzed(self, endpoint_uid: str) -> Optional[int]:
206
+ self._check_open_schedules()
207
+ if endpoint_uid in self._schedules:
208
+ return self._schedules[endpoint_uid].get(
209
+ schemas.model_monitoring.constants.ScheduleChiefFields.LAST_ANALYZED
210
+ )
211
+ else:
212
+ return None
213
+
214
+ def get_endpoint_list(self) -> set[str]:
215
+ self._check_open_schedules()
216
+ return set(self._schedules.keys())
217
+
218
+ def get_or_create(self) -> None:
219
+ try:
220
+ self._open()
221
+ except (
222
+ mlrun.errors.MLRunNotFoundError,
223
+ # Different errors are raised for S3 or local storage, see ML-8042
224
+ botocore.exceptions.ClientError,
225
+ FileNotFoundError,
226
+ ):
227
+ self.create()
228
+
229
+
152
230
  def delete_model_monitoring_schedules_folder(project: str) -> None:
153
231
  """Delete the model monitoring schedules folder of the project"""
154
232
  folder = mlrun.model_monitoring.helpers._get_monitoring_schedules_folder_path(
@@ -33,7 +33,12 @@ _TSDB_BE = "tsdb"
33
33
  _TSDB_RATE = "1/s"
34
34
  _CONTAINER = "users"
35
35
 
36
- V3IO_MEPS_LIMIT = 200
36
+ V3IO_FRAMESD_MEPS_LIMIT = (
37
+ 200 # Maximum number of model endpoints per single request when using V3IO Frames
38
+ )
39
+ V3IO_CLIENT_MEPS_LIMIT = (
40
+ 150 # Maximum number of model endpoints per single request when using V3IO Client
41
+ )
37
42
 
38
43
 
39
44
  def _is_no_schema_error(exc: v3io_frames.Error) -> bool:
@@ -475,8 +480,8 @@ class V3IOTSDBConnector(TSDBConnector):
475
480
  tables = mm_schemas.V3IOTSDBTables.list()
476
481
 
477
482
  # Split the endpoint ids into chunks to avoid exceeding the v3io-engine filter-expression limit
478
- for i in range(0, len(endpoint_ids), V3IO_MEPS_LIMIT):
479
- endpoint_id_chunk = endpoint_ids[i : i + V3IO_MEPS_LIMIT]
483
+ for i in range(0, len(endpoint_ids), V3IO_FRAMESD_MEPS_LIMIT):
484
+ endpoint_id_chunk = endpoint_ids[i : i + V3IO_FRAMESD_MEPS_LIMIT]
480
485
  filter_query = f"endpoint_id IN({str(endpoint_id_chunk)[1:-1]}) "
481
486
  for table in tables:
482
487
  try:
@@ -684,11 +689,11 @@ class V3IOTSDBConnector(TSDBConnector):
684
689
  if isinstance(endpoint_id, str):
685
690
  return f"endpoint_id=='{endpoint_id}'"
686
691
  elif isinstance(endpoint_id, list):
687
- if len(endpoint_id) > V3IO_MEPS_LIMIT:
692
+ if len(endpoint_id) > V3IO_FRAMESD_MEPS_LIMIT:
688
693
  logger.info(
689
694
  "The number of endpoint ids exceeds the v3io-engine filter-expression limit, "
690
695
  "retrieving all the model endpoints from the db.",
691
- limit=V3IO_MEPS_LIMIT,
696
+ limit=V3IO_FRAMESD_MEPS_LIMIT,
692
697
  amount=len(endpoint_id),
693
698
  )
694
699
  return None
@@ -880,23 +885,41 @@ class V3IOTSDBConnector(TSDBConnector):
880
885
  start: Optional[datetime] = None,
881
886
  end: Optional[datetime] = None,
882
887
  ) -> dict[str, float]:
883
- if isinstance(endpoint_ids, str):
884
- filter_expression = f"__name=='{endpoint_ids}'"
885
- else:
886
- filter_expression = " OR ".join(
887
- [f"__name=='{endpoint_id}'" for endpoint_id in endpoint_ids]
888
- )
889
-
890
888
  # Get the last request timestamp for each endpoint from the KV table.
891
889
  # The result of the query is a list of dictionaries,
892
890
  # each dictionary contains the endpoint id and the last request timestamp.
891
+ last_request_timestamps = {}
892
+ if isinstance(endpoint_ids, str):
893
+ endpoint_ids = [endpoint_ids]
893
894
 
894
895
  try:
895
- res = self.v3io_client.kv.new_cursor(
896
- container=self.container,
897
- table_path=self.last_request_table,
898
- filter_expression=filter_expression,
899
- ).all()
896
+ if len(endpoint_ids) > V3IO_CLIENT_MEPS_LIMIT:
897
+ logger.warning(
898
+ "The number of endpoint ids exceeds the v3io-engine filter-expression limit, "
899
+ "retrieving last request for all the model endpoints from the KV table.",
900
+ limit=V3IO_CLIENT_MEPS_LIMIT,
901
+ amount=len(endpoint_ids),
902
+ )
903
+
904
+ res = self.v3io_client.kv.new_cursor(
905
+ container=self.container,
906
+ table_path=self.last_request_table,
907
+ ).all()
908
+ last_request_timestamps.update(
909
+ {d["__name"]: d["last_request_timestamp"] for d in res}
910
+ )
911
+ else:
912
+ filter_expression = " OR ".join(
913
+ [f"__name=='{endpoint_id}'" for endpoint_id in endpoint_ids]
914
+ )
915
+ res = self.v3io_client.kv.new_cursor(
916
+ container=self.container,
917
+ table_path=self.last_request_table,
918
+ filter_expression=filter_expression,
919
+ ).all()
920
+ last_request_timestamps.update(
921
+ {d["__name"]: d["last_request_timestamp"] for d in res}
922
+ )
900
923
  except Exception as e:
901
924
  logger.warning(
902
925
  "Failed to get last request timestamp from V3IO KV table.",
@@ -904,9 +927,8 @@ class V3IOTSDBConnector(TSDBConnector):
904
927
  project=self.project,
905
928
  table=self.last_request_table,
906
929
  )
907
- return {}
908
930
 
909
- return {d["__name"]: d["last_request_timestamp"] for d in res}
931
+ return last_request_timestamps
910
932
 
911
933
  def get_drift_status(
912
934
  self,
@@ -1131,4 +1153,8 @@ class V3IOTSDBConnector(TSDBConnector):
1131
1153
  endpoint_ids=list(model_endpoint_objects_by_uid.keys())
1132
1154
  )
1133
1155
  for uid, mep in model_endpoint_objects_by_uid.items():
1134
- mep.status.last_request = last_request_dictionary.get(uid)
1156
+ # Set the last request timestamp to the MEP object. If not found, keep the existing value from the
1157
+ # DB (relevant for batch EP).
1158
+ mep.status.last_request = last_request_dictionary.get(
1159
+ uid, mep.status.last_request
1160
+ )
@@ -472,6 +472,7 @@ def update_model_endpoint_last_request(
472
472
  "Bumping model endpoint last request time (EP without serving)",
473
473
  project=project,
474
474
  endpoint_id=model_endpoint.metadata.uid,
475
+ function_name=model_endpoint.spec.function_name,
475
476
  last_request=model_endpoint.status.last_request,
476
477
  current_request=current_request.isoformat(),
477
478
  bumped_last_request=bumped_last_request,
@@ -586,16 +587,43 @@ def _get_monitoring_schedules_folder_path(project: str) -> str:
586
587
  )
587
588
 
588
589
 
589
- def _get_monitoring_schedules_file_path(*, project: str, endpoint_id: str) -> str:
590
+ def _get_monitoring_schedules_file_endpoint_path(
591
+ *, project: str, endpoint_id: str
592
+ ) -> str:
590
593
  return os.path.join(
591
594
  _get_monitoring_schedules_folder_path(project), f"{endpoint_id}.json"
592
595
  )
593
596
 
594
597
 
595
- def get_monitoring_schedules_data(*, project: str, endpoint_id: str) -> "DataItem":
598
+ def get_monitoring_schedules_endpoint_data(
599
+ *, project: str, endpoint_id: str
600
+ ) -> "DataItem":
601
+ """
602
+ Get the model monitoring schedules' data item of the project's model endpoint.
603
+ """
604
+ return mlrun.datastore.store_manager.object(
605
+ _get_monitoring_schedules_file_endpoint_path(
606
+ project=project, endpoint_id=endpoint_id
607
+ )
608
+ )
609
+
610
+
611
+ def get_monitoring_schedules_chief_data(
612
+ *,
613
+ project: str,
614
+ ) -> "DataItem":
596
615
  """
597
616
  Get the model monitoring schedules' data item of the project's model endpoint.
598
617
  """
599
618
  return mlrun.datastore.store_manager.object(
600
- _get_monitoring_schedules_file_path(project=project, endpoint_id=endpoint_id)
619
+ _get_monitoring_schedules_file_chief_path(project=project)
620
+ )
621
+
622
+
623
+ def _get_monitoring_schedules_file_chief_path(
624
+ *,
625
+ project: str,
626
+ ) -> str:
627
+ return os.path.join(
628
+ _get_monitoring_schedules_folder_path(project), f"{project}.json"
601
629
  )
@@ -180,7 +180,7 @@ class ModelMonitoringWriter(StepToDict):
180
180
  data, timestamp
181
181
  )
182
182
  logger.info(
183
- "Updating the model endpoint statistics",
183
+ "Updated the model endpoint statistics",
184
184
  endpoint_id=endpoint_id,
185
185
  stats_kind=stat_kind,
186
186
  )
mlrun/projects/project.py CHANGED
@@ -2451,7 +2451,22 @@ class MlrunProject(ModelObj):
2451
2451
  :param image: The image of the model monitoring controller, writer, monitoring
2452
2452
  stream & histogram data drift functions, which are real time nuclio
2453
2453
  functions. By default, the image is mlrun/mlrun.
2454
- :param deploy_histogram_data_drift_app: If true, deploy the default histogram-based data drift application.
2454
+ :param deploy_histogram_data_drift_app: If true, deploy the default histogram-based data drift application:
2455
+ :py:class:`~mlrun.model_monitoring.applications.histogram_data_drift.HistogramDataDriftApplication`.
2456
+ If false, and you want to deploy the histogram data drift application
2457
+ afterwards, you may use the
2458
+ :py:func:`~set_model_monitoring_function` method::
2459
+
2460
+ import mlrun.model_monitoring.applications.histogram_data_drift as histogram_data_drift
2461
+
2462
+ hist_app = project.set_model_monitoring_function(
2463
+ name=histogram_data_drift.HistogramDataDriftApplicationConstants.NAME, # keep the default name
2464
+ func=histogram_data_drift.__file__,
2465
+ application_class=histogram_data_drift.HistogramDataDriftApplication.__name__,
2466
+ )
2467
+
2468
+ project.deploy_function(hist_app)
2469
+
2455
2470
  :param wait_for_deployment: If true, return only after the deployment is done on the backend.
2456
2471
  Otherwise, deploy the model monitoring infrastructure on the
2457
2472
  background, including the histogram data drift app if selected.
@@ -2488,30 +2503,6 @@ class MlrunProject(ModelObj):
2488
2503
  )
2489
2504
  self._wait_for_functions_deployment(deployment_functions)
2490
2505
 
2491
- def deploy_histogram_data_drift_app(
2492
- self,
2493
- *,
2494
- image: str = "mlrun/mlrun",
2495
- db: Optional[mlrun.db.RunDBInterface] = None,
2496
- wait_for_deployment: bool = False,
2497
- ) -> None:
2498
- """
2499
- Deploy the histogram data drift application.
2500
-
2501
- :param image: The image on which the application will run.
2502
- :param db: An optional DB object.
2503
- :param wait_for_deployment: If true, return only after the deployment is done on the backend.
2504
- Otherwise, deploy the application on the background.
2505
- """
2506
- if db is None:
2507
- db = mlrun.db.get_run_db(secrets=self._secrets)
2508
- db.deploy_histogram_data_drift_app(project=self.name, image=image)
2509
-
2510
- if wait_for_deployment:
2511
- self._wait_for_functions_deployment(
2512
- [mm_constants.HistogramDataDriftApplicationConstants.NAME]
2513
- )
2514
-
2515
2506
  def update_model_monitoring_controller(
2516
2507
  self,
2517
2508
  base_period: int = 10,
@@ -5034,14 +5025,20 @@ class MlrunProject(ModelObj):
5034
5025
  db = mlrun.db.get_run_db(secrets=self._secrets)
5035
5026
  return db.get_alert_config(alert_name, self.metadata.name)
5036
5027
 
5037
- def list_alerts_configs(self) -> list[AlertConfig]:
5028
+ def list_alerts_configs(
5029
+ self, limit: Optional[int] = None, offset: Optional[int] = None
5030
+ ) -> list[AlertConfig]:
5038
5031
  """
5039
5032
  Retrieve list of alerts of a project.
5040
5033
 
5034
+ :param limit: The maximum number of alerts to return.
5035
+ Defaults to `mlconf.alerts.default_list_alert_configs_limit` if not provided.
5036
+ :param offset: The number of alerts to skip before starting to collect alerts.
5037
+
5041
5038
  :return: All the alerts objects of the project.
5042
5039
  """
5043
5040
  db = mlrun.db.get_run_db(secrets=self._secrets)
5044
- return db.list_alerts_configs(self.metadata.name)
5041
+ return db.list_alerts_configs(self.metadata.name, limit=limit, offset=offset)
5045
5042
 
5046
5043
  def delete_alert_config(
5047
5044
  self, alert_data: AlertConfig = None, alert_name: Optional[str] = None
@@ -5269,7 +5266,7 @@ class MlrunProject(ModelObj):
5269
5266
  )
5270
5267
 
5271
5268
  # if engine is remote then skip the local file validation
5272
- if engine and not engine.startswith("remote"):
5269
+ if engine and engine.startswith("remote"):
5273
5270
  return
5274
5271
 
5275
5272
  code_path = self.spec.get_code_path()
@@ -36,6 +36,7 @@ class FunctionReference(ModelObj):
36
36
  spec=None,
37
37
  kind=None,
38
38
  name=None,
39
+ track_models=None,
39
40
  ):
40
41
  self.url = url
41
42
  self.kind = kind
@@ -46,6 +47,7 @@ class FunctionReference(ModelObj):
46
47
  spec = spec.to_dict()
47
48
  self.spec = spec
48
49
  self.code = code
50
+ self.track_models = track_models
49
51
 
50
52
  self._function = None
51
53
  self._address = None
@@ -130,6 +132,7 @@ class FunctionReference(ModelObj):
130
132
  if self.requirements:
131
133
  func.with_requirements(self.requirements)
132
134
  self._function = func
135
+ func.spec.track_models = self.track_models
133
136
  return func
134
137
 
135
138
  @property
@@ -4,7 +4,7 @@
4
4
  // you may not use this file except in compliance with the License.
5
5
  // You may obtain a copy of the License at
6
6
  //
7
- // http://www.apache.org/licenses/LICENSE-2.0
7
+ // http://www.apache.org/licenses/LICENSE-2.0
8
8
  //
9
9
  // Unless required by applicable law or agreed to in writing, software
10
10
  // distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,82 +14,84 @@
14
14
  package main
15
15
 
16
16
  import (
17
- "bytes"
18
- "fmt"
19
- "net/http"
20
- "net/http/httptest"
21
- "net/http/httputil"
22
- "net/url"
23
- "os"
24
- "strings"
17
+ "bytes"
18
+ "fmt"
19
+ "net/http"
20
+ "net/http/httptest"
21
+ "net/http/httputil"
22
+ "net/url"
23
+ "os"
24
+ "strings"
25
25
 
26
- nuclio "github.com/nuclio/nuclio-sdk-go"
26
+ nuclio "github.com/nuclio/nuclio-sdk-go"
27
27
  )
28
28
 
29
29
  func Handler(context *nuclio.Context, event nuclio.Event) (interface{}, error) {
30
- reverseProxy := context.UserData.(map[string]interface{})["reverseProxy"].(*httputil.ReverseProxy)
31
- sidecarUrl := context.UserData.(map[string]interface{})["server"].(string)
30
+ reverseProxy := context.UserData.(map[string]interface{})["reverseProxy"].(*httputil.ReverseProxy)
31
+ sidecarUrl := context.UserData.(map[string]interface{})["server"].(string)
32
32
 
33
- // populate reverse proxy http request
34
- httpRequest, err := http.NewRequest(event.GetMethod(), event.GetPath(), bytes.NewReader(event.GetBody()))
35
- if err != nil {
36
- context.Logger.ErrorWith("Failed to create a reverse proxy request")
37
- return nil, err
38
- }
39
- for k, v := range event.GetHeaders() {
40
- httpRequest.Header[k] = []string{v.(string)}
41
- }
33
+ // populate reverse proxy http request
34
+ httpRequest, err := http.NewRequest(event.GetMethod(), event.GetPath(), bytes.NewReader(event.GetBody()))
35
+ if err != nil {
36
+ context.Logger.ErrorWith("Failed to create a reverse proxy request")
37
+ return nil, err
38
+ }
39
+ for k, v := range event.GetHeaders() {
40
+ httpRequest.Header[k] = []string{v.(string)}
41
+ }
42
42
 
43
- // populate query params
44
- query := httpRequest.URL.Query()
45
- for k, v := range event.GetFields() {
46
- query.Set(k, v.(string))
47
- }
48
- httpRequest.URL.RawQuery = query.Encode()
43
+ // populate query params
44
+ query := httpRequest.URL.Query()
45
+ for k, v := range event.GetFields() {
46
+ query.Set(k, v.(string))
47
+ }
48
+ httpRequest.URL.RawQuery = query.Encode()
49
49
 
50
- recorder := httptest.NewRecorder()
51
- reverseProxy.ServeHTTP(recorder, httpRequest)
50
+ recorder := httptest.NewRecorder()
51
+ reverseProxy.ServeHTTP(recorder, httpRequest)
52
52
 
53
- // send request to sidecar
54
- context.Logger.DebugWith("Forwarding request to sidecar", "sidecarUrl", sidecarUrl, "query", httpRequest.URL.Query())
55
- response := recorder.Result()
53
+ // send request to sidecar
54
+ context.Logger.DebugWith("Forwarding request to sidecar",
55
+ "sidecarUrl", sidecarUrl,
56
+ "method", event.GetMethod())
57
+ response := recorder.Result()
56
58
 
57
- headers := make(map[string]interface{})
58
- for key, value := range response.Header {
59
- headers[key] = value[0]
60
- }
59
+ headers := make(map[string]interface{})
60
+ for key, value := range response.Header {
61
+ headers[key] = value[0]
62
+ }
61
63
 
62
- // let the processor calculate the content length
63
- delete(headers, "Content-Length")
64
- return nuclio.Response{
65
- StatusCode: response.StatusCode,
66
- Body: recorder.Body.Bytes(),
67
- ContentType: response.Header.Get("Content-Type"),
68
- Headers: headers,
69
- }, nil
64
+ // let the processor calculate the content length
65
+ delete(headers, "Content-Length")
66
+ return nuclio.Response{
67
+ StatusCode: response.StatusCode,
68
+ Body: recorder.Body.Bytes(),
69
+ ContentType: response.Header.Get("Content-Type"),
70
+ Headers: headers,
71
+ }, nil
70
72
  }
71
73
 
72
74
  func InitContext(context *nuclio.Context) error {
73
- sidecarHost := os.Getenv("SIDECAR_HOST")
74
- sidecarPort := os.Getenv("SIDECAR_PORT")
75
- if sidecarHost == "" {
76
- sidecarHost = "http://localhost"
77
- } else if !strings.Contains(sidecarHost, "://") {
78
- sidecarHost = fmt.Sprintf("http://%s", sidecarHost)
79
- }
75
+ sidecarHost := os.Getenv("SIDECAR_HOST")
76
+ sidecarPort := os.Getenv("SIDECAR_PORT")
77
+ if sidecarHost == "" {
78
+ sidecarHost = "http://localhost"
79
+ } else if !strings.Contains(sidecarHost, "://") {
80
+ sidecarHost = fmt.Sprintf("http://%s", sidecarHost)
81
+ }
80
82
 
81
- // url for request forwarding
82
- sidecarUrl := fmt.Sprintf("%s:%s", sidecarHost, sidecarPort)
83
- parsedURL, err := url.Parse(sidecarUrl)
84
- if err != nil {
85
- context.Logger.ErrorWith("Failed to parse sidecar url", "sidecarUrl", sidecarUrl)
86
- return err
87
- }
88
- reverseProxy := httputil.NewSingleHostReverseProxy(parsedURL)
83
+ // url for request forwarding
84
+ sidecarUrl := fmt.Sprintf("%s:%s", sidecarHost, sidecarPort)
85
+ parsedURL, err := url.Parse(sidecarUrl)
86
+ if err != nil {
87
+ context.Logger.ErrorWith("Failed to parse sidecar url", "sidecarUrl", sidecarUrl)
88
+ return err
89
+ }
90
+ reverseProxy := httputil.NewSingleHostReverseProxy(parsedURL)
89
91
 
90
- context.UserData = map[string]interface{}{
91
- "server": sidecarUrl,
92
- "reverseProxy": reverseProxy,
93
- }
94
- return nil
92
+ context.UserData = map[string]interface{}{
93
+ "server": sidecarUrl,
94
+ "reverseProxy": reverseProxy,
95
+ }
96
+ return nil
95
97
  }
@@ -337,6 +337,17 @@ class ServingRuntime(RemoteRuntime):
337
337
  """
338
338
  # Applying model monitoring configurations
339
339
  self.spec.track_models = enable_tracking
340
+ if self._spec and self._spec.function_refs:
341
+ logger.debug(
342
+ "Set tracking for children references", enable_tracking=enable_tracking
343
+ )
344
+ for name in self._spec.function_refs.keys():
345
+ self._spec.function_refs[name].track_models = enable_tracking
346
+ # Check if function_refs _function is filled if so update track_models field:
347
+ if self._spec.function_refs[name]._function:
348
+ self._spec.function_refs[
349
+ name
350
+ ]._function.spec.track_models = enable_tracking
340
351
 
341
352
  if not 0 < sampling_percentage <= 100:
342
353
  raise mlrun.errors.MLRunInvalidArgumentError(
@@ -506,7 +517,11 @@ class ServingRuntime(RemoteRuntime):
506
517
  :return function object
507
518
  """
508
519
  function_reference = FunctionReference(
509
- url, image, requirements=requirements, kind=kind or "serving"
520
+ url,
521
+ image,
522
+ requirements=requirements,
523
+ kind=kind or "serving",
524
+ track_models=self.spec.track_models,
510
525
  )
511
526
  self._spec.function_refs.update(function_reference, name)
512
527
  func = function_reference.to_function(self.kind)