mlrun 1.7.1rc4__py3-none-any.whl → 1.8.0rc8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (257) hide show
  1. mlrun/__init__.py +23 -21
  2. mlrun/__main__.py +3 -3
  3. mlrun/alerts/alert.py +148 -14
  4. mlrun/artifacts/__init__.py +1 -2
  5. mlrun/artifacts/base.py +46 -12
  6. mlrun/artifacts/dataset.py +16 -16
  7. mlrun/artifacts/document.py +334 -0
  8. mlrun/artifacts/manager.py +15 -13
  9. mlrun/artifacts/model.py +66 -53
  10. mlrun/common/constants.py +7 -0
  11. mlrun/common/formatters/__init__.py +1 -0
  12. mlrun/common/formatters/feature_set.py +1 -0
  13. mlrun/common/formatters/function.py +1 -0
  14. mlrun/{model_monitoring/db/stores/base/__init__.py → common/formatters/model_endpoint.py} +16 -1
  15. mlrun/common/formatters/pipeline.py +1 -2
  16. mlrun/common/formatters/project.py +9 -0
  17. mlrun/common/model_monitoring/__init__.py +0 -5
  18. mlrun/common/model_monitoring/helpers.py +1 -29
  19. mlrun/common/runtimes/constants.py +1 -2
  20. mlrun/common/schemas/__init__.py +6 -2
  21. mlrun/common/schemas/alert.py +111 -19
  22. mlrun/common/schemas/api_gateway.py +3 -3
  23. mlrun/common/schemas/artifact.py +11 -7
  24. mlrun/common/schemas/auth.py +6 -4
  25. mlrun/common/schemas/background_task.py +7 -7
  26. mlrun/common/schemas/client_spec.py +2 -3
  27. mlrun/common/schemas/clusterization_spec.py +2 -2
  28. mlrun/common/schemas/common.py +53 -3
  29. mlrun/common/schemas/constants.py +15 -0
  30. mlrun/common/schemas/datastore_profile.py +1 -1
  31. mlrun/common/schemas/feature_store.py +9 -9
  32. mlrun/common/schemas/frontend_spec.py +4 -4
  33. mlrun/common/schemas/function.py +10 -10
  34. mlrun/common/schemas/hub.py +1 -1
  35. mlrun/common/schemas/k8s.py +3 -3
  36. mlrun/common/schemas/memory_reports.py +3 -3
  37. mlrun/common/schemas/model_monitoring/__init__.py +2 -1
  38. mlrun/common/schemas/model_monitoring/constants.py +66 -14
  39. mlrun/common/schemas/model_monitoring/grafana.py +1 -1
  40. mlrun/common/schemas/model_monitoring/model_endpoints.py +91 -147
  41. mlrun/common/schemas/notification.py +24 -3
  42. mlrun/common/schemas/object.py +1 -1
  43. mlrun/common/schemas/pagination.py +4 -4
  44. mlrun/common/schemas/partition.py +137 -0
  45. mlrun/common/schemas/pipeline.py +2 -2
  46. mlrun/common/schemas/project.py +25 -17
  47. mlrun/common/schemas/runs.py +2 -2
  48. mlrun/common/schemas/runtime_resource.py +5 -5
  49. mlrun/common/schemas/schedule.py +1 -1
  50. mlrun/common/schemas/secret.py +1 -1
  51. mlrun/common/schemas/tag.py +3 -3
  52. mlrun/common/schemas/workflow.py +5 -5
  53. mlrun/config.py +67 -10
  54. mlrun/data_types/__init__.py +0 -2
  55. mlrun/data_types/infer.py +3 -1
  56. mlrun/data_types/spark.py +2 -1
  57. mlrun/datastore/__init__.py +0 -2
  58. mlrun/datastore/alibaba_oss.py +4 -1
  59. mlrun/datastore/azure_blob.py +4 -1
  60. mlrun/datastore/base.py +12 -4
  61. mlrun/datastore/datastore.py +9 -3
  62. mlrun/datastore/datastore_profile.py +79 -20
  63. mlrun/datastore/dbfs_store.py +4 -1
  64. mlrun/datastore/filestore.py +4 -1
  65. mlrun/datastore/google_cloud_storage.py +4 -1
  66. mlrun/datastore/hdfs.py +4 -1
  67. mlrun/datastore/inmem.py +4 -1
  68. mlrun/datastore/redis.py +4 -1
  69. mlrun/datastore/s3.py +4 -1
  70. mlrun/datastore/sources.py +52 -51
  71. mlrun/datastore/store_resources.py +0 -2
  72. mlrun/datastore/targets.py +21 -21
  73. mlrun/datastore/utils.py +2 -2
  74. mlrun/datastore/v3io.py +4 -1
  75. mlrun/datastore/vectorstore.py +194 -0
  76. mlrun/datastore/wasbfs/fs.py +13 -12
  77. mlrun/db/base.py +208 -82
  78. mlrun/db/factory.py +0 -3
  79. mlrun/db/httpdb.py +1237 -386
  80. mlrun/db/nopdb.py +201 -74
  81. mlrun/errors.py +2 -2
  82. mlrun/execution.py +136 -50
  83. mlrun/feature_store/__init__.py +0 -2
  84. mlrun/feature_store/api.py +41 -40
  85. mlrun/feature_store/common.py +9 -9
  86. mlrun/feature_store/feature_set.py +20 -18
  87. mlrun/feature_store/feature_vector.py +27 -24
  88. mlrun/feature_store/retrieval/base.py +14 -9
  89. mlrun/feature_store/retrieval/job.py +2 -1
  90. mlrun/feature_store/steps.py +2 -2
  91. mlrun/features.py +30 -13
  92. mlrun/frameworks/__init__.py +1 -2
  93. mlrun/frameworks/_common/__init__.py +1 -2
  94. mlrun/frameworks/_common/artifacts_library.py +2 -2
  95. mlrun/frameworks/_common/mlrun_interface.py +10 -6
  96. mlrun/frameworks/_common/model_handler.py +29 -27
  97. mlrun/frameworks/_common/producer.py +3 -1
  98. mlrun/frameworks/_dl_common/__init__.py +1 -2
  99. mlrun/frameworks/_dl_common/loggers/__init__.py +1 -2
  100. mlrun/frameworks/_dl_common/loggers/mlrun_logger.py +4 -4
  101. mlrun/frameworks/_dl_common/loggers/tensorboard_logger.py +3 -3
  102. mlrun/frameworks/_ml_common/__init__.py +1 -2
  103. mlrun/frameworks/_ml_common/loggers/__init__.py +1 -2
  104. mlrun/frameworks/_ml_common/model_handler.py +21 -21
  105. mlrun/frameworks/_ml_common/plans/__init__.py +1 -2
  106. mlrun/frameworks/_ml_common/plans/confusion_matrix_plan.py +3 -1
  107. mlrun/frameworks/_ml_common/plans/dataset_plan.py +3 -3
  108. mlrun/frameworks/_ml_common/plans/roc_curve_plan.py +4 -4
  109. mlrun/frameworks/auto_mlrun/__init__.py +1 -2
  110. mlrun/frameworks/auto_mlrun/auto_mlrun.py +22 -15
  111. mlrun/frameworks/huggingface/__init__.py +1 -2
  112. mlrun/frameworks/huggingface/model_server.py +9 -9
  113. mlrun/frameworks/lgbm/__init__.py +47 -44
  114. mlrun/frameworks/lgbm/callbacks/__init__.py +1 -2
  115. mlrun/frameworks/lgbm/callbacks/logging_callback.py +4 -2
  116. mlrun/frameworks/lgbm/callbacks/mlrun_logging_callback.py +4 -2
  117. mlrun/frameworks/lgbm/mlrun_interfaces/__init__.py +1 -2
  118. mlrun/frameworks/lgbm/mlrun_interfaces/mlrun_interface.py +5 -5
  119. mlrun/frameworks/lgbm/model_handler.py +15 -11
  120. mlrun/frameworks/lgbm/model_server.py +11 -7
  121. mlrun/frameworks/lgbm/utils.py +2 -2
  122. mlrun/frameworks/onnx/__init__.py +1 -2
  123. mlrun/frameworks/onnx/dataset.py +3 -3
  124. mlrun/frameworks/onnx/mlrun_interface.py +2 -2
  125. mlrun/frameworks/onnx/model_handler.py +7 -5
  126. mlrun/frameworks/onnx/model_server.py +8 -6
  127. mlrun/frameworks/parallel_coordinates.py +11 -11
  128. mlrun/frameworks/pytorch/__init__.py +22 -23
  129. mlrun/frameworks/pytorch/callbacks/__init__.py +1 -2
  130. mlrun/frameworks/pytorch/callbacks/callback.py +2 -1
  131. mlrun/frameworks/pytorch/callbacks/logging_callback.py +15 -8
  132. mlrun/frameworks/pytorch/callbacks/mlrun_logging_callback.py +19 -12
  133. mlrun/frameworks/pytorch/callbacks/tensorboard_logging_callback.py +22 -15
  134. mlrun/frameworks/pytorch/callbacks_handler.py +36 -30
  135. mlrun/frameworks/pytorch/mlrun_interface.py +17 -17
  136. mlrun/frameworks/pytorch/model_handler.py +21 -17
  137. mlrun/frameworks/pytorch/model_server.py +13 -9
  138. mlrun/frameworks/sklearn/__init__.py +19 -18
  139. mlrun/frameworks/sklearn/estimator.py +2 -2
  140. mlrun/frameworks/sklearn/metric.py +3 -3
  141. mlrun/frameworks/sklearn/metrics_library.py +8 -6
  142. mlrun/frameworks/sklearn/mlrun_interface.py +3 -2
  143. mlrun/frameworks/sklearn/model_handler.py +4 -3
  144. mlrun/frameworks/tf_keras/__init__.py +11 -12
  145. mlrun/frameworks/tf_keras/callbacks/__init__.py +1 -2
  146. mlrun/frameworks/tf_keras/callbacks/logging_callback.py +17 -14
  147. mlrun/frameworks/tf_keras/callbacks/mlrun_logging_callback.py +15 -12
  148. mlrun/frameworks/tf_keras/callbacks/tensorboard_logging_callback.py +21 -18
  149. mlrun/frameworks/tf_keras/model_handler.py +17 -13
  150. mlrun/frameworks/tf_keras/model_server.py +12 -8
  151. mlrun/frameworks/xgboost/__init__.py +19 -18
  152. mlrun/frameworks/xgboost/model_handler.py +13 -9
  153. mlrun/launcher/base.py +3 -4
  154. mlrun/launcher/local.py +1 -1
  155. mlrun/launcher/remote.py +1 -1
  156. mlrun/lists.py +4 -3
  157. mlrun/model.py +117 -46
  158. mlrun/model_monitoring/__init__.py +4 -4
  159. mlrun/model_monitoring/api.py +61 -59
  160. mlrun/model_monitoring/applications/_application_steps.py +17 -17
  161. mlrun/model_monitoring/applications/base.py +165 -6
  162. mlrun/model_monitoring/applications/context.py +88 -37
  163. mlrun/model_monitoring/applications/evidently_base.py +1 -2
  164. mlrun/model_monitoring/applications/histogram_data_drift.py +43 -21
  165. mlrun/model_monitoring/applications/results.py +55 -3
  166. mlrun/model_monitoring/controller.py +207 -239
  167. mlrun/model_monitoring/db/__init__.py +0 -2
  168. mlrun/model_monitoring/db/_schedules.py +156 -0
  169. mlrun/model_monitoring/db/_stats.py +189 -0
  170. mlrun/model_monitoring/db/tsdb/base.py +78 -25
  171. mlrun/model_monitoring/db/tsdb/tdengine/schemas.py +90 -16
  172. mlrun/model_monitoring/db/tsdb/tdengine/stream_graph_steps.py +33 -0
  173. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +279 -59
  174. mlrun/model_monitoring/db/tsdb/v3io/stream_graph_steps.py +1 -0
  175. mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +78 -17
  176. mlrun/model_monitoring/helpers.py +152 -49
  177. mlrun/model_monitoring/stream_processing.py +99 -283
  178. mlrun/model_monitoring/tracking_policy.py +10 -3
  179. mlrun/model_monitoring/writer.py +48 -36
  180. mlrun/package/__init__.py +3 -6
  181. mlrun/package/context_handler.py +1 -1
  182. mlrun/package/packager.py +12 -9
  183. mlrun/package/packagers/__init__.py +0 -2
  184. mlrun/package/packagers/default_packager.py +14 -11
  185. mlrun/package/packagers/numpy_packagers.py +16 -7
  186. mlrun/package/packagers/pandas_packagers.py +18 -18
  187. mlrun/package/packagers/python_standard_library_packagers.py +25 -11
  188. mlrun/package/packagers_manager.py +31 -14
  189. mlrun/package/utils/__init__.py +0 -3
  190. mlrun/package/utils/_pickler.py +6 -6
  191. mlrun/platforms/__init__.py +47 -16
  192. mlrun/platforms/iguazio.py +4 -1
  193. mlrun/projects/operations.py +27 -27
  194. mlrun/projects/pipelines.py +75 -38
  195. mlrun/projects/project.py +865 -206
  196. mlrun/run.py +53 -10
  197. mlrun/runtimes/__init__.py +1 -3
  198. mlrun/runtimes/base.py +15 -11
  199. mlrun/runtimes/daskjob.py +9 -9
  200. mlrun/runtimes/generators.py +2 -1
  201. mlrun/runtimes/kubejob.py +4 -5
  202. mlrun/runtimes/mounts.py +572 -0
  203. mlrun/runtimes/mpijob/__init__.py +0 -2
  204. mlrun/runtimes/mpijob/abstract.py +7 -6
  205. mlrun/runtimes/nuclio/api_gateway.py +7 -7
  206. mlrun/runtimes/nuclio/application/application.py +11 -11
  207. mlrun/runtimes/nuclio/function.py +19 -17
  208. mlrun/runtimes/nuclio/serving.py +18 -11
  209. mlrun/runtimes/pod.py +154 -45
  210. mlrun/runtimes/remotesparkjob.py +3 -2
  211. mlrun/runtimes/sparkjob/__init__.py +0 -2
  212. mlrun/runtimes/sparkjob/spark3job.py +21 -11
  213. mlrun/runtimes/utils.py +6 -5
  214. mlrun/serving/merger.py +6 -4
  215. mlrun/serving/remote.py +18 -17
  216. mlrun/serving/routers.py +185 -172
  217. mlrun/serving/server.py +7 -1
  218. mlrun/serving/states.py +97 -78
  219. mlrun/serving/utils.py +13 -2
  220. mlrun/serving/v1_serving.py +3 -2
  221. mlrun/serving/v2_serving.py +74 -65
  222. mlrun/track/__init__.py +1 -1
  223. mlrun/track/tracker.py +2 -2
  224. mlrun/track/trackers/mlflow_tracker.py +6 -5
  225. mlrun/utils/async_http.py +1 -1
  226. mlrun/utils/clones.py +1 -1
  227. mlrun/utils/helpers.py +66 -18
  228. mlrun/utils/logger.py +106 -4
  229. mlrun/utils/notifications/notification/__init__.py +22 -19
  230. mlrun/utils/notifications/notification/base.py +33 -14
  231. mlrun/utils/notifications/notification/console.py +6 -6
  232. mlrun/utils/notifications/notification/git.py +11 -11
  233. mlrun/utils/notifications/notification/ipython.py +10 -9
  234. mlrun/utils/notifications/notification/mail.py +176 -0
  235. mlrun/utils/notifications/notification/slack.py +6 -6
  236. mlrun/utils/notifications/notification/webhook.py +6 -6
  237. mlrun/utils/notifications/notification_pusher.py +86 -44
  238. mlrun/utils/regex.py +3 -1
  239. mlrun/utils/version/version.json +2 -2
  240. {mlrun-1.7.1rc4.dist-info → mlrun-1.8.0rc8.dist-info}/METADATA +191 -186
  241. mlrun-1.8.0rc8.dist-info/RECORD +347 -0
  242. {mlrun-1.7.1rc4.dist-info → mlrun-1.8.0rc8.dist-info}/WHEEL +1 -1
  243. mlrun/model_monitoring/db/stores/__init__.py +0 -136
  244. mlrun/model_monitoring/db/stores/base/store.py +0 -213
  245. mlrun/model_monitoring/db/stores/sqldb/__init__.py +0 -13
  246. mlrun/model_monitoring/db/stores/sqldb/models/__init__.py +0 -71
  247. mlrun/model_monitoring/db/stores/sqldb/models/base.py +0 -190
  248. mlrun/model_monitoring/db/stores/sqldb/models/mysql.py +0 -103
  249. mlrun/model_monitoring/db/stores/sqldb/models/sqlite.py +0 -40
  250. mlrun/model_monitoring/db/stores/sqldb/sql_store.py +0 -659
  251. mlrun/model_monitoring/db/stores/v3io_kv/__init__.py +0 -13
  252. mlrun/model_monitoring/db/stores/v3io_kv/kv_store.py +0 -726
  253. mlrun/model_monitoring/model_endpoint.py +0 -118
  254. mlrun-1.7.1rc4.dist-info/RECORD +0 -351
  255. {mlrun-1.7.1rc4.dist-info → mlrun-1.8.0rc8.dist-info}/LICENSE +0 -0
  256. {mlrun-1.7.1rc4.dist-info → mlrun-1.8.0rc8.dist-info}/entry_points.txt +0 -0
  257. {mlrun-1.7.1rc4.dist-info → mlrun-1.8.0rc8.dist-info}/top_level.txt +0 -0
mlrun/serving/routers.py CHANGED
@@ -28,10 +28,9 @@ import numpy as np
28
28
  import mlrun
29
29
  import mlrun.common.model_monitoring
30
30
  import mlrun.common.schemas.model_monitoring
31
- from mlrun.errors import err_to_str
32
31
  from mlrun.utils import logger, now_date
33
32
 
34
- from ..common.helpers import parse_versioned_object_uri
33
+ from ..common.schemas.model_monitoring import ModelEndpointSchema
35
34
  from .server import GraphServer
36
35
  from .utils import RouterToDict, _extract_input_data, _update_result_body
37
36
  from .v2_serving import _ModelLogPusher
@@ -46,13 +45,13 @@ class BaseModelRouter(RouterToDict):
46
45
  def __init__(
47
46
  self,
48
47
  context=None,
49
- name: str = None,
48
+ name: typing.Optional[str] = None,
50
49
  routes=None,
51
- protocol: str = None,
52
- url_prefix: str = None,
53
- health_prefix: str = None,
54
- input_path: str = None,
55
- result_path: str = None,
50
+ protocol: typing.Optional[str] = None,
51
+ url_prefix: typing.Optional[str] = None,
52
+ health_prefix: typing.Optional[str] = None,
53
+ input_path: typing.Optional[str] = None,
54
+ result_path: typing.Optional[str] = None,
56
55
  **kwargs,
57
56
  ):
58
57
  """Model Serving Router, route between child models
@@ -249,11 +248,11 @@ class ParallelRun(BaseModelRouter):
249
248
  def __init__(
250
249
  self,
251
250
  context=None,
252
- name: str = None,
251
+ name: typing.Optional[str] = None,
253
252
  routes=None,
254
- protocol: str = None,
255
- url_prefix: str = None,
256
- health_prefix: str = None,
253
+ protocol: typing.Optional[str] = None,
254
+ url_prefix: typing.Optional[str] = None,
255
+ health_prefix: typing.Optional[str] = None,
257
256
  extend_event=None,
258
257
  executor_type: Union[ParallelRunnerModes, str] = ParallelRunnerModes.thread,
259
258
  **kwargs,
@@ -481,13 +480,13 @@ class VotingEnsemble(ParallelRun):
481
480
  def __init__(
482
481
  self,
483
482
  context=None,
484
- name: str = None,
483
+ name: typing.Optional[str] = None,
485
484
  routes=None,
486
- protocol: str = None,
487
- url_prefix: str = None,
488
- health_prefix: str = None,
489
- vote_type: str = None,
490
- weights: dict[str, float] = None,
485
+ protocol: typing.Optional[str] = None,
486
+ url_prefix: typing.Optional[str] = None,
487
+ health_prefix: typing.Optional[str] = None,
488
+ vote_type: typing.Optional[str] = None,
489
+ weights: typing.Optional[dict[str, float]] = None,
491
490
  executor_type: Union[ParallelRunnerModes, str] = ParallelRunnerModes.thread,
492
491
  format_response_with_col_name_flag: bool = False,
493
492
  prediction_col_name: str = "prediction",
@@ -814,7 +813,8 @@ class VotingEnsemble(ParallelRun):
814
813
  return self.logic(flattened_predictions, np.array(weights))
815
814
 
816
815
  def do_event(self, event, *args, **kwargs):
817
- """Handles incoming requests.
816
+ """
817
+ Handles incoming requests.
818
818
 
819
819
  Parameters
820
820
  ----------
@@ -1016,130 +1016,139 @@ def _init_endpoint_record(
1016
1016
  """
1017
1017
 
1018
1018
  logger.info("Initializing endpoint records")
1019
-
1020
- # Generate required values for the model endpoint record
1021
- try:
1022
- # Getting project name from the function uri
1023
- project, uri, tag, hash_key = parse_versioned_object_uri(
1024
- graph_server.function_uri
1025
- )
1026
- except Exception as e:
1027
- logger.error("Failed to parse function URI", exc=err_to_str(e))
1028
- return None
1029
-
1030
- # Generating version model value based on the model name and model version
1031
- if voting_ensemble.version:
1032
- versioned_model_name = f"{voting_ensemble.name}:{voting_ensemble.version}"
1033
- else:
1034
- versioned_model_name = f"{voting_ensemble.name}:latest"
1035
-
1036
- # Generating model endpoint ID based on function uri and model version
1037
- endpoint_uid = mlrun.common.model_monitoring.create_model_endpoint_uid(
1038
- function_uri=graph_server.function_uri, versioned_model=versioned_model_name
1039
- ).uid
1040
-
1041
1019
  try:
1042
- model_ep = mlrun.get_run_db().get_model_endpoint(
1043
- project=project, endpoint_id=endpoint_uid
1020
+ model_endpoint = mlrun.get_run_db().get_model_endpoint(
1021
+ project=graph_server.project,
1022
+ name=voting_ensemble.name,
1023
+ function_name=graph_server.function_name,
1044
1024
  )
1045
1025
  except mlrun.errors.MLRunNotFoundError:
1046
- model_ep = None
1026
+ model_endpoint = None
1047
1027
  except mlrun.errors.MLRunBadRequestError as err:
1048
- logger.debug(
1049
- f"Cant reach to model endpoints store, due to : {err}",
1028
+ logger.info(
1029
+ "Cannot get the model endpoints store", err=mlrun.errors.err_to_str(err)
1050
1030
  )
1051
1031
  return
1052
1032
 
1053
- if voting_ensemble.context.server.track_models and not model_ep:
1054
- logger.info("Creating a new model endpoint record", endpoint_id=endpoint_uid)
1055
- # Get the children model endpoints ids
1056
- children_uids = []
1057
- for _, c in voting_ensemble.routes.items():
1058
- if hasattr(c, "endpoint_uid"):
1059
- children_uids.append(c.endpoint_uid)
1033
+ function = mlrun.get_run_db().get_function(
1034
+ name=graph_server.function_name,
1035
+ project=graph_server.project,
1036
+ tag=graph_server.function_tag or "latest",
1037
+ )
1038
+ function_uid = function.get("metadata", {}).get("uid")
1039
+ # Get the children model endpoints ids
1040
+ children_uids = []
1041
+ children_names = []
1042
+ for _, c in voting_ensemble.routes.items():
1043
+ if hasattr(c, "endpoint_uid"):
1044
+ children_uids.append(c.endpoint_uid)
1045
+ children_names.append(c.name)
1046
+ if not model_endpoint and voting_ensemble.context.server.track_models:
1047
+ logger.info(
1048
+ "Creating a new model endpoint record",
1049
+ name=voting_ensemble.name,
1050
+ project=graph_server.project,
1051
+ function_name=graph_server.function_name,
1052
+ function_uid=function_uid,
1053
+ model_class=voting_ensemble.__class__.__name__,
1054
+ )
1060
1055
  model_endpoint = mlrun.common.schemas.ModelEndpoint(
1061
1056
  metadata=mlrun.common.schemas.ModelEndpointMetadata(
1062
- project=project, uid=endpoint_uid
1057
+ project=graph_server.project,
1058
+ name=voting_ensemble.name,
1059
+ endpoint_type=mlrun.common.schemas.model_monitoring.EndpointType.ROUTER,
1063
1060
  ),
1064
1061
  spec=mlrun.common.schemas.ModelEndpointSpec(
1065
- function_uri=graph_server.function_uri,
1066
- model=versioned_model_name,
1062
+ function_name=graph_server.function_name,
1063
+ function_uid=function_uid,
1064
+ function_tag=graph_server.function_tag or "latest",
1067
1065
  model_class=voting_ensemble.__class__.__name__,
1068
- stream_path=voting_ensemble.context.stream.stream_uri,
1069
- active=True,
1070
- monitoring_mode=mlrun.common.schemas.model_monitoring.ModelMonitoringMode.enabled,
1066
+ children_uids=list(voting_ensemble.routes.keys()),
1071
1067
  ),
1072
1068
  status=mlrun.common.schemas.ModelEndpointStatus(
1073
- children=list(voting_ensemble.routes.keys()),
1074
- endpoint_type=mlrun.common.schemas.model_monitoring.EndpointType.ROUTER,
1075
- children_uids=children_uids,
1069
+ monitoring_mode=mlrun.common.schemas.model_monitoring.ModelMonitoringMode.enabled
1070
+ if voting_ensemble.context.server.track_models
1071
+ else mlrun.common.schemas.model_monitoring.ModelMonitoringMode.disabled,
1076
1072
  ),
1077
1073
  )
1078
-
1079
1074
  db = mlrun.get_run_db()
1080
-
1081
- db.create_model_endpoint(
1082
- project=project,
1083
- endpoint_id=model_endpoint.metadata.uid,
1084
- model_endpoint=model_endpoint.dict(),
1085
- )
1086
-
1087
- # Update model endpoint children type
1088
- for model_endpoint in children_uids:
1089
- current_endpoint = db.get_model_endpoint(
1090
- project=project, endpoint_id=model_endpoint
1075
+ db.create_model_endpoint(model_endpoint=model_endpoint)
1076
+
1077
+ elif model_endpoint:
1078
+ attributes = {}
1079
+ if function_uid != model_endpoint.spec.function_uid:
1080
+ attributes[ModelEndpointSchema.FUNCTION_UID] = function_uid
1081
+ if children_uids != model_endpoint.spec.children_uids:
1082
+ attributes[ModelEndpointSchema.CHILDREN_UIDS] = children_uids
1083
+ if (
1084
+ model_endpoint.status.monitoring_mode
1085
+ == mlrun.common.schemas.model_monitoring.ModelMonitoringMode.enabled
1086
+ ) != voting_ensemble.context.server.track_models:
1087
+ attributes[ModelEndpointSchema.MONITORING_MODE] = (
1088
+ mlrun.common.schemas.model_monitoring.ModelMonitoringMode.enabled
1089
+ if voting_ensemble.context.server.track_models
1090
+ else mlrun.common.schemas.model_monitoring.ModelMonitoringMode.disabled
1091
1091
  )
1092
- current_endpoint.status.endpoint_type = (
1093
- mlrun.common.schemas.model_monitoring.EndpointType.LEAF_EP
1092
+ if attributes:
1093
+ db = mlrun.get_run_db()
1094
+ logger.info(
1095
+ "Updating model endpoint attributes",
1096
+ attributes=attributes,
1097
+ project=model_endpoint.metadata.project,
1098
+ name=model_endpoint.metadata.name,
1099
+ function_name=model_endpoint.spec.function_name,
1094
1100
  )
1095
- db.create_model_endpoint(
1096
- project=project,
1097
- endpoint_id=model_endpoint,
1098
- model_endpoint=current_endpoint,
1101
+ model_endpoint = db.patch_model_endpoint(
1102
+ project=model_endpoint.metadata.project,
1103
+ name=model_endpoint.metadata.name,
1104
+ function_name=model_endpoint.spec.function_name,
1105
+ endpoint_id=model_endpoint.metadata.uid,
1106
+ attributes=attributes,
1099
1107
  )
1100
- elif (
1101
- model_ep
1102
- and (
1103
- model_ep.spec.monitoring_mode
1104
- == mlrun.common.schemas.model_monitoring.ModelMonitoringMode.enabled
1105
- )
1106
- != voting_ensemble.context.server.track_models
1107
- ):
1108
- monitoring_mode = (
1109
- mlrun.common.schemas.model_monitoring.ModelMonitoringMode.enabled
1110
- if voting_ensemble.context.server.track_models
1111
- else mlrun.common.schemas.model_monitoring.ModelMonitoringMode.disabled
1112
- )
1113
- db = mlrun.get_run_db()
1114
- db.patch_model_endpoint(
1115
- project=project,
1116
- endpoint_id=endpoint_uid,
1117
- attributes={"monitoring_mode": monitoring_mode},
1118
- )
1119
- logger.debug(
1120
- f"Updating model endpoint monitoring_mode to {monitoring_mode}",
1121
- endpoint_id=endpoint_uid,
1108
+ else:
1109
+ logger.info(
1110
+ "Did not create a new model endpoint record, monitoring is disabled"
1122
1111
  )
1112
+ return None
1123
1113
 
1124
- return endpoint_uid
1114
+ # Update model endpoint children type
1115
+ logger.info(
1116
+ "Updating children model endpoint type",
1117
+ children_uids=children_uids,
1118
+ children_names=children_names,
1119
+ )
1120
+ for uid, name in zip(children_uids, children_names):
1121
+ mlrun.get_run_db().patch_model_endpoint(
1122
+ name=name,
1123
+ project=graph_server.project,
1124
+ function_name=graph_server.function_name,
1125
+ endpoint_id=uid,
1126
+ attributes={
1127
+ ModelEndpointSchema.ENDPOINT_TYPE: mlrun.common.schemas.model_monitoring.EndpointType.LEAF_EP
1128
+ },
1129
+ )
1130
+ return model_endpoint.metadata.uid
1125
1131
 
1126
1132
 
1127
1133
  class EnrichmentModelRouter(ModelRouter):
1128
- """model router with feature enrichment and imputing"""
1134
+ """
1135
+ Model router with feature enrichment and imputing
1136
+ """
1129
1137
 
1130
1138
  def __init__(
1131
1139
  self,
1132
1140
  context=None,
1133
- name: str = None,
1141
+ name: typing.Optional[str] = None,
1134
1142
  routes=None,
1135
- protocol: str = None,
1136
- url_prefix: str = None,
1137
- health_prefix: str = None,
1143
+ protocol: typing.Optional[str] = None,
1144
+ url_prefix: typing.Optional[str] = None,
1145
+ health_prefix: typing.Optional[str] = None,
1138
1146
  feature_vector_uri: str = "",
1139
- impute_policy: dict = None,
1147
+ impute_policy: typing.Optional[dict] = None,
1140
1148
  **kwargs,
1141
1149
  ):
1142
- """Model router with feature enrichment (from the feature store)
1150
+ """
1151
+ Model router with feature enrichment (from the feature store)
1143
1152
 
1144
1153
  The `EnrichmentModelRouter` class enrich the incoming event with real-time features
1145
1154
  read from a feature vector (in MLRun feature store) and forwards the enriched event to the child models
@@ -1147,27 +1156,25 @@ class EnrichmentModelRouter(ModelRouter):
1147
1156
  The feature vector is specified using the `feature_vector_uri`, in addition an imputing policy
1148
1157
  can be specified to substitute None/NaN values with pre defines constant or stats.
1149
1158
 
1150
- :param feature_vector_uri : feature vector uri in the form: [project/]name[:tag]
1151
- :param impute_policy : value imputing (substitute NaN/Inf values with statistical or constant value),
1152
- you can set the `impute_policy` parameter with the imputing policy, and specify which
1153
- constant or statistical value will be used instead of NaN/Inf value, this can be defined
1154
- per column or for all the columns ("*"). the replaced value can be fixed number for
1155
- constants or $mean, $max, $min, $std, $count for statistical values.
1156
- “*” is used to specify the default for all features, example:
1157
- impute_policy={"*": "$mean", "age": 33}
1159
+ :param feature_vector_uri: feature vector uri in the form: [project/]name[:tag]
1160
+ :param impute_policy: value imputing (substitute NaN/Inf values with statistical or constant value),
1161
+ you can set the `impute_policy` parameter with the imputing policy, and specify which constant or
1162
+ statistical value will be used instead of NaN/Inf value, this can be defined per column or
1163
+ for all the columns ("*"). The replaced value can be fixed number for constants or $mean, $max, $min, $std,
1164
+ $count for statistical values.
1165
+ “*” is used to specify the default for all features, example: impute_policy={"*": "$mean", "age": 33}
1158
1166
  :param context: for internal use (passed in init)
1159
1167
  :param name: step name
1160
1168
  :param routes: for internal use (routes passed in init)
1161
1169
  :param protocol: serving API protocol (default "v2")
1162
1170
  :param url_prefix: url prefix for the router (default /v2/models)
1163
1171
  :param health_prefix: health api url prefix (default /v2/health)
1164
- :param input_path: when specified selects the key/path in the event to use as body
1165
- this require that the event body will behave like a dict, example:
1166
- event: {"data": {"a": 5, "b": 7}}, input_path="data.b" means request body will be 7
1167
- :param result_path: selects the key/path in the event to write the results to
1168
- this require that the event body will behave like a dict, example:
1169
- event: {"x": 5} , result_path="resp" means the returned response will be written
1170
- to event["y"] resulting in {"x": 5, "resp": <result>}
1172
+ :param input_path: when specified selects the key/path in the event to use as body this require that the
1173
+ event body will behave like a dict, example: event: {"data": {"a": 5, "b": 7}}, input_path="data.b"
1174
+ means request body will be 7.
1175
+ :param result_path: selects the key/path in the event to write the results to this require that the event body
1176
+ will behave like a dict, example: event: {"x": 5} , result_path="resp" means the returned response will be
1177
+ written to event["y"] resulting in {"x": 5, "resp": <result>}
1171
1178
  :param kwargs: extra arguments
1172
1179
  """
1173
1180
  super().__init__(
@@ -1206,33 +1213,37 @@ class EnrichmentModelRouter(ModelRouter):
1206
1213
 
1207
1214
 
1208
1215
  class EnrichmentVotingEnsemble(VotingEnsemble):
1209
- """Voting Ensemble with feature enrichment (from the feature store)"""
1216
+ """
1217
+ Voting Ensemble with feature enrichment (from the feature store)
1218
+ """
1210
1219
 
1211
1220
  def __init__(
1212
1221
  self,
1213
1222
  context=None,
1214
- name: str = None,
1223
+ name: typing.Optional[str] = None,
1215
1224
  routes=None,
1216
1225
  protocol=None,
1217
- url_prefix: str = None,
1218
- health_prefix: str = None,
1219
- vote_type: str = None,
1226
+ url_prefix: typing.Optional[str] = None,
1227
+ health_prefix: typing.Optional[str] = None,
1228
+ vote_type: typing.Optional[str] = None,
1220
1229
  executor_type: Union[ParallelRunnerModes, str] = ParallelRunnerModes.thread,
1221
- prediction_col_name: str = None,
1230
+ prediction_col_name: typing.Optional[str] = None,
1222
1231
  feature_vector_uri: str = "",
1223
- impute_policy: dict = None,
1232
+ impute_policy: typing.Optional[dict] = None,
1224
1233
  **kwargs,
1225
1234
  ):
1226
- """Voting Ensemble with feature enrichment (from the feature store)
1235
+ """
1236
+ Voting Ensemble with feature enrichment (from the feature store)
1227
1237
 
1228
1238
  The `EnrichmentVotingEnsemble` class enables to enrich the incoming event with real-time features
1229
1239
  read from a feature vector (in MLRun feature store) and apply prediction logic on top of
1230
1240
  the different added models.
1231
1241
 
1232
1242
  You can use it by calling:
1233
- - <prefix>/<model>[/versions/<ver>]/operation
1243
+
1244
+ - `<prefix>/<model>[/versions/<ver>]/operation`
1234
1245
  Sends the event to the specific <model>[/versions/<ver>]
1235
- - <prefix>/operation
1246
+ - `<prefix>/operation`
1236
1247
  Sends the event to all models and applies `vote(self, event)`
1237
1248
 
1238
1249
  The `VotingEnsemble` applies the following logic:
@@ -1243,7 +1254,7 @@ class EnrichmentVotingEnsemble(VotingEnsemble):
1243
1254
  The feature vector is specified using the `feature_vector_uri`, in addition an imputing policy
1244
1255
  can be specified to substitute None/NaN values with pre defines constant or stats.
1245
1256
 
1246
- * When enabling model tracking via `set_tracking()` the ensemble logic
1257
+ When enabling model tracking via `set_tracking()` the ensemble logic
1247
1258
  predictions will appear with model name as the given VotingEnsemble name
1248
1259
  or "VotingEnsemble" by default.
1249
1260
 
@@ -1251,17 +1262,20 @@ class EnrichmentVotingEnsemble(VotingEnsemble):
1251
1262
 
1252
1263
  # Define a serving function
1253
1264
  # Note: You can point the function to a file containing you own Router or Classifier Model class
1254
- # this basic class supports sklearn based models (with `<model>.predict()` api)
1255
- fn = mlrun.code_to_function(name='ensemble',
1256
- kind='serving',
1257
- filename='model-server.py'
1258
- image='mlrun/mlrun')
1265
+ # this basic class supports sklearn based models (with `<model>.predict()` api)
1266
+ fn = mlrun.code_to_function(
1267
+ name='ensemble',
1268
+ kind='serving',
1269
+ filename='model-server.py',
1270
+ image='mlrun/mlrun')
1271
+
1259
1272
 
1260
1273
  # Set the router class
1261
1274
  # You can set your own classes by simply changing the `class_name`
1262
- fn.set_topology(class_name='mlrun.serving.routers.EnrichmentVotingEnsemble',
1263
- feature_vector_uri="transactions-fraud",
1264
- impute_policy={"*": "$mean"})
1275
+ fn.set_topology(
1276
+ class_name='mlrun.serving.routers.EnrichmentVotingEnsemble',
1277
+ feature_vector_uri="transactions-fraud",
1278
+ impute_policy={"*": "$mean"})
1265
1279
 
1266
1280
  # Add models
1267
1281
  fn.add_model(<model_name>, <model_path>, <model_class_name>)
@@ -1283,35 +1297,32 @@ class EnrichmentVotingEnsemble(VotingEnsemble):
1283
1297
  :param context: for internal use (passed in init)
1284
1298
  :param name: step name
1285
1299
  :param routes: for internal use (routes passed in init)
1286
- :param protocol: serving API protocol (default "v2")
1287
- :param url_prefix: url prefix for the router (default /v2/models)
1288
- :param health_prefix: health api url prefix (default /v2/health)
1289
- :param feature_vector_uri : feature vector uri in the form: [project/]name[:tag]
1290
- :param impute_policy : value imputing (substitute NaN/Inf values with statistical or constant value),
1291
- you can set the `impute_policy` parameter with the imputing policy, and specify which
1292
- constant or statistical value will be used instead of NaN/Inf value, this can be defined
1293
- per column or for all the columns ("*").
1294
- the replaced value can be fixed number for constants or $mean, $max, $min, $std, $count
1295
- for statistical values. “*” is used to specify the default for all features, example:
1296
- impute_policy={"*": "$mean", "age": 33}
1297
- :param input_path: when specified selects the key/path in the event to use as body
1298
- this require that the event body will behave like a dict, example:
1299
- event: {"data": {"a": 5, "b": 7}}, input_path="data.b" means request body will be 7
1300
- :param result_path: selects the key/path in the event to write the results to
1301
- this require that the event body will behave like a dict, example:
1302
- event: {"x": 5} , result_path="resp" means the returned response will be written
1303
- to event["y"] resulting in {"x": 5, "resp": <result>}
1304
- :param vote_type: Voting type to be used (from `VotingTypes`).
1305
- by default will try to self-deduct upon the first event:
1306
- - float prediction type: regression
1307
- - int prediction type: classification
1300
+ :param protocol: serving API protocol (default `v2`)
1301
+ :param url_prefix: url prefix for the router (default `/v2/models`)
1302
+ :param health_prefix: health api url prefix (default `/v2/health`)
1303
+ :param feature_vector_uri: feature vector uri in the form `[project/]name[:tag]`
1304
+ :param impute_policy: value imputing (substitute NaN/Inf values with statistical or constant value),
1305
+ you can set the `impute_policy` parameter with the imputing policy, and specify which constant or
1306
+ statistical value will be used instead of NaN/Inf value, this can be defined per column or for all
1307
+ the columns ("*"). The replaced value can be fixed number for constants or $mean, $max, $min, $std, $count
1308
+ for statistical values. “*” is used to specify the default for all features,
1309
+ example: impute_policy={"*": "$mean", "age": 33}
1310
+ :param input_path: when specified selects the key/path in the event to use as body this require that
1311
+ the event body will behave like a dict, example: event: {"data": {"a": 5, "b": 7}}, input_path="data.b"
1312
+ means request body will be 7.
1313
+ :param result_path: selects the key/path in the event to write the results to this require that the event body
1314
+ will behave like a dict, example: event: {"x": 5} , result_path="resp" means the returned response will be
1315
+ written to event["y"] resulting in {"x": 5, "resp": <result>}.
1316
+ :param vote_type: Voting type to be used (from `VotingTypes`). by default will try to self-deduct upon the
1317
+ first event:
1318
+ * float prediction type: regression
1319
+ * int prediction type: classification
1308
1320
  :param executor_type: Parallelism mechanism, out of `ParallelRunnerModes`, by default `threads`
1309
1321
  :param prediction_col_name: The dict key for the predictions column in the model's responses output.
1310
- Example: If the model returns
1311
- {id: <id>, model_name: <name>, outputs: {..., prediction: [<predictions>], ...}}
1312
- the prediction_col_name should be `prediction`.
1313
- by default, `prediction`
1314
- :param kwargs: extra arguments
1322
+ Example:
1323
+ If the model returns `{id: <id>, model_name: <name>, outputs: {..., prediction: [<predictions>], ...}}`,
1324
+ the prediction_col_name should be `prediction`. By default, `prediction`.
1325
+ :param kwargs: extra arguments
1315
1326
  """
1316
1327
  super().__init__(
1317
1328
  context=context,
@@ -1342,7 +1353,9 @@ class EnrichmentVotingEnsemble(VotingEnsemble):
1342
1353
  )
1343
1354
 
1344
1355
  def preprocess(self, event):
1345
- """Turn an entity identifier (source) to a Feature Vector"""
1356
+ """
1357
+ Turn an entity identifier (source) to a Feature Vector
1358
+ """
1346
1359
  if isinstance(event.body, (str, bytes)):
1347
1360
  event.body = json.loads(event.body)
1348
1361
  event.body["inputs"] = self._feature_service.get(
mlrun/serving/server.py CHANGED
@@ -101,6 +101,9 @@ class GraphServer(ModelObj):
101
101
  tracking_policy=None,
102
102
  secret_sources=None,
103
103
  default_content_type=None,
104
+ function_name=None,
105
+ function_tag=None,
106
+ project=None,
104
107
  ):
105
108
  self._graph = None
106
109
  self.graph: Union[RouterStep, RootFlowStep] = graph
@@ -123,6 +126,9 @@ class GraphServer(ModelObj):
123
126
  self.resource_cache = None
124
127
  self.default_content_type = default_content_type
125
128
  self.http_trigger = True
129
+ self.function_name = function_name
130
+ self.function_tag = function_tag
131
+ self.project = project
126
132
 
127
133
  def set_current_function(self, function):
128
134
  """set which child function this server is currently running on"""
@@ -196,7 +202,7 @@ class GraphServer(ModelObj):
196
202
  def test(
197
203
  self,
198
204
  path: str = "/",
199
- body: Union[str, bytes, dict] = None,
205
+ body: Optional[Union[str, bytes, dict]] = None,
200
206
  method: str = "",
201
207
  headers: Optional[str] = None,
202
208
  content_type: Optional[str] = None,