mlrun 1.7.0rc28__py3-none-any.whl → 1.7.0rc55__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (135) hide show
  1. mlrun/__main__.py +4 -2
  2. mlrun/alerts/alert.py +75 -8
  3. mlrun/artifacts/base.py +1 -0
  4. mlrun/artifacts/manager.py +9 -2
  5. mlrun/common/constants.py +4 -1
  6. mlrun/common/db/sql_session.py +3 -2
  7. mlrun/common/formatters/__init__.py +1 -0
  8. mlrun/common/formatters/artifact.py +1 -0
  9. mlrun/{model_monitoring/application.py → common/formatters/feature_set.py} +20 -6
  10. mlrun/common/formatters/run.py +3 -0
  11. mlrun/common/helpers.py +0 -1
  12. mlrun/common/schemas/__init__.py +3 -1
  13. mlrun/common/schemas/alert.py +15 -12
  14. mlrun/common/schemas/api_gateway.py +6 -6
  15. mlrun/common/schemas/auth.py +5 -0
  16. mlrun/common/schemas/client_spec.py +0 -1
  17. mlrun/common/schemas/common.py +7 -4
  18. mlrun/common/schemas/frontend_spec.py +7 -0
  19. mlrun/common/schemas/function.py +7 -0
  20. mlrun/common/schemas/model_monitoring/__init__.py +4 -3
  21. mlrun/common/schemas/model_monitoring/constants.py +41 -26
  22. mlrun/common/schemas/model_monitoring/model_endpoints.py +23 -47
  23. mlrun/common/schemas/notification.py +69 -12
  24. mlrun/common/schemas/project.py +45 -12
  25. mlrun/common/schemas/workflow.py +10 -2
  26. mlrun/common/types.py +1 -0
  27. mlrun/config.py +91 -35
  28. mlrun/data_types/data_types.py +6 -1
  29. mlrun/data_types/spark.py +2 -2
  30. mlrun/data_types/to_pandas.py +57 -25
  31. mlrun/datastore/__init__.py +1 -0
  32. mlrun/datastore/alibaba_oss.py +3 -2
  33. mlrun/datastore/azure_blob.py +125 -37
  34. mlrun/datastore/base.py +42 -21
  35. mlrun/datastore/datastore.py +4 -2
  36. mlrun/datastore/datastore_profile.py +1 -1
  37. mlrun/datastore/dbfs_store.py +3 -7
  38. mlrun/datastore/filestore.py +1 -3
  39. mlrun/datastore/google_cloud_storage.py +85 -29
  40. mlrun/datastore/inmem.py +4 -1
  41. mlrun/datastore/redis.py +1 -0
  42. mlrun/datastore/s3.py +25 -12
  43. mlrun/datastore/sources.py +76 -4
  44. mlrun/datastore/spark_utils.py +30 -0
  45. mlrun/datastore/storeytargets.py +151 -0
  46. mlrun/datastore/targets.py +102 -131
  47. mlrun/datastore/v3io.py +1 -0
  48. mlrun/db/base.py +15 -6
  49. mlrun/db/httpdb.py +57 -28
  50. mlrun/db/nopdb.py +29 -5
  51. mlrun/errors.py +20 -3
  52. mlrun/execution.py +46 -5
  53. mlrun/feature_store/api.py +25 -1
  54. mlrun/feature_store/common.py +6 -11
  55. mlrun/feature_store/feature_vector.py +3 -1
  56. mlrun/feature_store/retrieval/job.py +4 -1
  57. mlrun/feature_store/retrieval/spark_merger.py +10 -39
  58. mlrun/feature_store/steps.py +8 -0
  59. mlrun/frameworks/_common/plan.py +3 -3
  60. mlrun/frameworks/_ml_common/plan.py +1 -1
  61. mlrun/frameworks/parallel_coordinates.py +2 -3
  62. mlrun/frameworks/sklearn/mlrun_interface.py +13 -3
  63. mlrun/k8s_utils.py +48 -2
  64. mlrun/launcher/client.py +6 -6
  65. mlrun/launcher/local.py +2 -2
  66. mlrun/model.py +215 -34
  67. mlrun/model_monitoring/api.py +38 -24
  68. mlrun/model_monitoring/applications/__init__.py +1 -2
  69. mlrun/model_monitoring/applications/_application_steps.py +60 -29
  70. mlrun/model_monitoring/applications/base.py +2 -174
  71. mlrun/model_monitoring/applications/context.py +197 -70
  72. mlrun/model_monitoring/applications/evidently_base.py +11 -85
  73. mlrun/model_monitoring/applications/histogram_data_drift.py +21 -16
  74. mlrun/model_monitoring/applications/results.py +4 -4
  75. mlrun/model_monitoring/controller.py +110 -282
  76. mlrun/model_monitoring/db/stores/__init__.py +8 -3
  77. mlrun/model_monitoring/db/stores/base/store.py +3 -0
  78. mlrun/model_monitoring/db/stores/sqldb/models/base.py +9 -7
  79. mlrun/model_monitoring/db/stores/sqldb/models/mysql.py +18 -3
  80. mlrun/model_monitoring/db/stores/sqldb/sql_store.py +43 -23
  81. mlrun/model_monitoring/db/stores/v3io_kv/kv_store.py +48 -35
  82. mlrun/model_monitoring/db/tsdb/__init__.py +7 -2
  83. mlrun/model_monitoring/db/tsdb/base.py +147 -15
  84. mlrun/model_monitoring/db/tsdb/tdengine/schemas.py +94 -55
  85. mlrun/model_monitoring/db/tsdb/tdengine/stream_graph_steps.py +0 -3
  86. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +144 -38
  87. mlrun/model_monitoring/db/tsdb/v3io/stream_graph_steps.py +44 -3
  88. mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +246 -57
  89. mlrun/model_monitoring/helpers.py +70 -50
  90. mlrun/model_monitoring/stream_processing.py +96 -195
  91. mlrun/model_monitoring/writer.py +13 -5
  92. mlrun/package/packagers/default_packager.py +2 -2
  93. mlrun/projects/operations.py +16 -8
  94. mlrun/projects/pipelines.py +126 -115
  95. mlrun/projects/project.py +286 -129
  96. mlrun/render.py +3 -3
  97. mlrun/run.py +38 -19
  98. mlrun/runtimes/__init__.py +19 -8
  99. mlrun/runtimes/base.py +4 -1
  100. mlrun/runtimes/daskjob.py +1 -1
  101. mlrun/runtimes/funcdoc.py +1 -1
  102. mlrun/runtimes/kubejob.py +6 -6
  103. mlrun/runtimes/local.py +12 -5
  104. mlrun/runtimes/nuclio/api_gateway.py +68 -8
  105. mlrun/runtimes/nuclio/application/application.py +307 -70
  106. mlrun/runtimes/nuclio/function.py +63 -14
  107. mlrun/runtimes/nuclio/serving.py +10 -10
  108. mlrun/runtimes/pod.py +25 -19
  109. mlrun/runtimes/remotesparkjob.py +2 -5
  110. mlrun/runtimes/sparkjob/spark3job.py +16 -17
  111. mlrun/runtimes/utils.py +34 -0
  112. mlrun/serving/routers.py +2 -5
  113. mlrun/serving/server.py +37 -19
  114. mlrun/serving/states.py +30 -3
  115. mlrun/serving/v2_serving.py +44 -35
  116. mlrun/track/trackers/mlflow_tracker.py +5 -0
  117. mlrun/utils/async_http.py +1 -1
  118. mlrun/utils/db.py +18 -0
  119. mlrun/utils/helpers.py +150 -36
  120. mlrun/utils/http.py +1 -1
  121. mlrun/utils/notifications/notification/__init__.py +0 -1
  122. mlrun/utils/notifications/notification/webhook.py +8 -1
  123. mlrun/utils/notifications/notification_pusher.py +1 -1
  124. mlrun/utils/v3io_clients.py +2 -2
  125. mlrun/utils/version/version.json +2 -2
  126. {mlrun-1.7.0rc28.dist-info → mlrun-1.7.0rc55.dist-info}/METADATA +153 -66
  127. {mlrun-1.7.0rc28.dist-info → mlrun-1.7.0rc55.dist-info}/RECORD +131 -134
  128. {mlrun-1.7.0rc28.dist-info → mlrun-1.7.0rc55.dist-info}/WHEEL +1 -1
  129. mlrun/feature_store/retrieval/conversion.py +0 -271
  130. mlrun/model_monitoring/controller_handler.py +0 -37
  131. mlrun/model_monitoring/evidently_application.py +0 -20
  132. mlrun/model_monitoring/prometheus.py +0 -216
  133. {mlrun-1.7.0rc28.dist-info → mlrun-1.7.0rc55.dist-info}/LICENSE +0 -0
  134. {mlrun-1.7.0rc28.dist-info → mlrun-1.7.0rc55.dist-info}/entry_points.txt +0 -0
  135. {mlrun-1.7.0rc28.dist-info → mlrun-1.7.0rc55.dist-info}/top_level.txt +0 -0
mlrun/render.py CHANGED
@@ -22,7 +22,7 @@ import mlrun.utils
22
22
 
23
23
  from .config import config
24
24
  from .datastore import uri_to_ipython
25
- from .utils import dict_to_list, get_in, is_ipython
25
+ from .utils import dict_to_list, get_in, is_jupyter
26
26
 
27
27
  JUPYTER_SERVER_ROOT = environ.get("HOME", "/User")
28
28
  supported_viewers = [
@@ -181,8 +181,8 @@ def run_to_html(results, display=True):
181
181
 
182
182
 
183
183
  def ipython_display(html, display=True, alt_text=None):
184
- if display and html and is_ipython:
185
- import IPython
184
+ if display and html and is_jupyter:
185
+ import IPython.display
186
186
 
187
187
  IPython.display.display(IPython.display.HTML(html))
188
188
  elif alt_text:
mlrun/run.py CHANGED
@@ -21,6 +21,7 @@ import tempfile
21
21
  import time
22
22
  import typing
23
23
  import uuid
24
+ import warnings
24
25
  from base64 import b64decode
25
26
  from copy import deepcopy
26
27
  from os import environ, makedirs, path
@@ -64,6 +65,7 @@ from .runtimes.nuclio.application import ApplicationRuntime
64
65
  from .runtimes.utils import add_code_metadata, global_context
65
66
  from .utils import (
66
67
  RunKeys,
68
+ create_ipython_display,
67
69
  extend_hub_uri_if_needed,
68
70
  get_in,
69
71
  logger,
@@ -196,18 +198,19 @@ def load_func_code(command="", workdir=None, secrets=None, name="name"):
196
198
  def get_or_create_ctx(
197
199
  name: str,
198
200
  event=None,
199
- spec=None,
201
+ spec: Optional[dict] = None,
200
202
  with_env: bool = True,
201
203
  rundb: str = "",
202
204
  project: str = "",
203
- upload_artifacts=False,
204
- labels: dict = None,
205
- ):
206
- """called from within the user program to obtain a run context
205
+ upload_artifacts: bool = False,
206
+ labels: Optional[dict] = None,
207
+ ) -> MLClientCtx:
208
+ """
209
+ Called from within the user program to obtain a run context.
207
210
 
208
- the run context is an interface for receiving parameters, data and logging
211
+ The run context is an interface for receiving parameters, data and logging
209
212
  run results, the run context is read from the event, spec, or environment
210
- (in that order), user can also work without a context (local defaults mode)
213
+ (in that order), user can also work without a context (local defaults mode).
211
214
 
212
215
  all results are automatically stored in the "rundb" or artifact store,
213
216
  the path to the rundb can be specified in the call or obtained from env.
@@ -217,10 +220,10 @@ def get_or_create_ctx(
217
220
  :param spec: dictionary holding run spec
218
221
  :param with_env: look for context in environment vars, default True
219
222
  :param rundb: path/url to the metadata and artifact database
220
- :param project: project to initiate the context in (by default mlrun.mlctx.default_project)
223
+ :param project: project to initiate the context in (by default `mlrun.mlconf.default_project`)
221
224
  :param upload_artifacts: when using local context (not as part of a job/run), upload artifacts to the
222
225
  system default artifact path location
223
- :param labels: dict of the context labels
226
+ :param labels: (deprecated - use spec instead) dict of the context labels.
224
227
  :return: execution context
225
228
 
226
229
  Examples::
@@ -253,6 +256,20 @@ def get_or_create_ctx(
253
256
  context.log_artifact("results.html", body=b"<b> Some HTML <b>", viewer="web-app")
254
257
 
255
258
  """
259
+ if labels:
260
+ warnings.warn(
261
+ "The `labels` argument is deprecated and will be removed in 1.9.0. "
262
+ "Please use `spec` instead, e.g.:\n"
263
+ "spec={'metadata': {'labels': {'key': 'value'}}}",
264
+ FutureWarning,
265
+ )
266
+ if spec is None:
267
+ spec = {}
268
+ if "metadata" not in spec:
269
+ spec["metadata"] = {}
270
+ if "labels" not in spec["metadata"]:
271
+ spec["metadata"]["labels"] = {}
272
+ spec["metadata"]["labels"].update(labels)
256
273
 
257
274
  if global_context.get() and not spec and not event:
258
275
  return global_context.get()
@@ -306,9 +323,6 @@ def get_or_create_ctx(
306
323
  ctx = MLClientCtx.from_dict(
307
324
  newspec, rundb=out, autocommit=autocommit, tmp=tmp, host=socket.gethostname()
308
325
  )
309
- labels = labels or {}
310
- for key, val in labels.items():
311
- ctx.set_label(key=key, value=val)
312
326
  global_context.set(ctx)
313
327
  return ctx
314
328
 
@@ -639,7 +653,7 @@ def code_to_function(
639
653
  :param requirements: a list of python packages
640
654
  :param requirements_file: path to a python requirements file
641
655
  :param categories: list of categories for mlrun Function Hub, defaults to None
642
- :param labels: immutable name/value pairs to tag the function with useful metadata, defaults to None
656
+ :param labels: name/value pairs dict to tag the function with useful metadata, defaults to None
643
657
  :param with_doc: indicates whether to document the function parameters, defaults to True
644
658
  :param ignored_tags: notebook cells to ignore when converting notebooks to py code (separated by ';')
645
659
 
@@ -731,11 +745,10 @@ def code_to_function(
731
745
  raise ValueError("Databricks tasks only support embed_code=True")
732
746
 
733
747
  if kind == RuntimeKinds.application:
734
- if handler:
735
- raise MLRunInvalidArgumentError(
736
- "Handler is not supported for application runtime"
737
- )
738
- filename, handler = ApplicationRuntime.get_filename_and_handler()
748
+ raise MLRunInvalidArgumentError(
749
+ "Embedding a code file is not supported for application runtime. "
750
+ "Code files should be specified via project/function source."
751
+ )
739
752
 
740
753
  is_nuclio, sub_kind = RuntimeKinds.resolve_nuclio_sub_kind(kind)
741
754
  code_origin = add_name(add_code_metadata(filename), name)
@@ -778,6 +791,10 @@ def code_to_function(
778
791
  raise ValueError("code_output option is only used with notebooks")
779
792
 
780
793
  if is_nuclio:
794
+ mlrun.utils.helpers.validate_single_def_handler(
795
+ function_kind=sub_kind, code=code
796
+ )
797
+
781
798
  runtime = RuntimeKinds.resolve_nuclio_runtime(kind, sub_kind)
782
799
  # default_handler is only used in :mlrun sub kind, determine the handler to invoke in function.run()
783
800
  runtime.spec.default_handler = handler if sub_kind == "mlrun" else ""
@@ -925,10 +942,12 @@ def wait_for_pipeline_completion(
925
942
  if remote:
926
943
  mldb = mlrun.db.get_run_db()
927
944
 
945
+ dag_display_id = create_ipython_display()
946
+
928
947
  def _wait_for_pipeline_completion():
929
948
  pipeline = mldb.get_pipeline(run_id, namespace=namespace, project=project)
930
949
  pipeline_status = pipeline["run"]["status"]
931
- show_kfp_run(pipeline, clear_output=True)
950
+ show_kfp_run(pipeline, dag_display_id=dag_display_id, with_html=False)
932
951
  if pipeline_status not in RunStatuses.stable_statuses():
933
952
  logger.debug(
934
953
  "Waiting for pipeline completion",
@@ -30,6 +30,8 @@ __all__ = [
30
30
  "MpiRuntimeV1",
31
31
  ]
32
32
 
33
+ import typing
34
+
33
35
  from mlrun.runtimes.utils import resolve_spark_operator_version
34
36
 
35
37
  from ..common.runtimes.constants import MPIJobCRDVersions
@@ -181,7 +183,7 @@ class RuntimeKinds:
181
183
  ]
182
184
 
183
185
  @staticmethod
184
- def is_log_collectable_runtime(kind: str):
186
+ def is_log_collectable_runtime(kind: typing.Optional[str]):
185
187
  """
186
188
  whether log collector can collect logs for that runtime
187
189
  :param kind: kind name
@@ -192,13 +194,18 @@ class RuntimeKinds:
192
194
  if RuntimeKinds.is_local_runtime(kind):
193
195
  return False
194
196
 
195
- if kind not in [
196
- # dask implementation is different than other runtimes, because few runs can be run against the same runtime
197
- # resource, so collecting logs on that runtime resource won't be correct, the way we collect logs for dask
198
- # is by using `log_std` on client side after we execute the code against the cluster, as submitting the
199
- # run with the dask client will return the run stdout. for more information head to `DaskCluster._run`
200
- RuntimeKinds.dask
201
- ]:
197
+ if (
198
+ kind
199
+ not in [
200
+ # dask implementation is different from other runtimes, because few runs can be run against the same
201
+ # runtime resource, so collecting logs on that runtime resource won't be correct, the way we collect
202
+ # logs for dask is by using `log_std` on client side after we execute the code against the cluster,
203
+ # as submitting the run with the dask client will return the run stdout.
204
+ # For more information head to `DaskCluster._run`.
205
+ RuntimeKinds.dask
206
+ ]
207
+ + RuntimeKinds.nuclio_runtimes()
208
+ ):
202
209
  return True
203
210
 
204
211
  return False
@@ -235,6 +242,10 @@ class RuntimeKinds:
235
242
  # both spark and remote spark uses different mechanism for assigning images
236
243
  return kind not in [RuntimeKinds.spark, RuntimeKinds.remotespark]
237
244
 
245
+ @staticmethod
246
+ def supports_from_notebook(kind):
247
+ return kind not in [RuntimeKinds.application]
248
+
238
249
  @staticmethod
239
250
  def resolve_nuclio_runtime(kind: str, sub_kind: str):
240
251
  kind = kind.split(":")[0]
mlrun/runtimes/base.py CHANGED
@@ -674,7 +674,7 @@ class BaseRuntime(ModelObj):
674
674
  selector="",
675
675
  hyper_param_options: HyperParamOptions = None,
676
676
  inputs: dict = None,
677
- outputs: dict = None,
677
+ outputs: list = None,
678
678
  workdir: str = "",
679
679
  artifact_path: str = "",
680
680
  image: str = "",
@@ -929,3 +929,6 @@ class BaseRuntime(ModelObj):
929
929
  if "default" in p:
930
930
  line += f", default={p['default']}"
931
931
  print(" " + line)
932
+
933
+ def skip_image_enrichment(self):
934
+ return False
mlrun/runtimes/daskjob.py CHANGED
@@ -379,7 +379,7 @@ class DaskCluster(KubejobRuntime):
379
379
  :param show_on_failure: show logs only in case of build failure
380
380
  :param force_build: force building the image, even when no changes were made
381
381
 
382
- :return True if the function is ready (deployed)
382
+ :return: True if the function is ready (deployed)
383
383
  """
384
384
  return super().deploy(
385
385
  watch,
mlrun/runtimes/funcdoc.py CHANGED
@@ -247,7 +247,7 @@ class ASTVisitor(ast.NodeVisitor):
247
247
  self.exprs.append(node)
248
248
  super().generic_visit(node)
249
249
 
250
- def visit_FunctionDef(self, node):
250
+ def visit_FunctionDef(self, node): # noqa: N802
251
251
  self.funcs.append(node)
252
252
  self.generic_visit(node)
253
253
 
mlrun/runtimes/kubejob.py CHANGED
@@ -11,7 +11,7 @@
11
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
-
14
+ import typing
15
15
  import warnings
16
16
 
17
17
  from mlrun_pipelines.common.ops import build_op
@@ -143,11 +143,11 @@ class KubejobRuntime(KubeResource):
143
143
 
144
144
  def deploy(
145
145
  self,
146
- watch=True,
147
- with_mlrun=None,
148
- skip_deployed=False,
149
- is_kfp=False,
150
- mlrun_version_specifier=None,
146
+ watch: bool = True,
147
+ with_mlrun: typing.Optional[bool] = None,
148
+ skip_deployed: bool = False,
149
+ is_kfp: bool = False,
150
+ mlrun_version_specifier: typing.Optional[bool] = None,
151
151
  builder_env: dict = None,
152
152
  show_on_failure: bool = False,
153
153
  force_build: bool = False,
mlrun/runtimes/local.py CHANGED
@@ -58,7 +58,9 @@ class ParallelRunner:
58
58
 
59
59
  return TrackerManager()
60
60
 
61
- def _get_handler(self, handler, context):
61
+ def _get_handler(
62
+ self, handler: str, context: MLClientCtx, embed_in_sys: bool = True
63
+ ):
62
64
  return handler
63
65
 
64
66
  def _get_dask_client(self, options):
@@ -86,7 +88,7 @@ class ParallelRunner:
86
88
  handler = runobj.spec.handler
87
89
  self._force_handler(handler)
88
90
  set_paths(self.spec.pythonpath)
89
- handler = self._get_handler(handler, execution)
91
+ handler = self._get_handler(handler, execution, embed_in_sys=False)
90
92
 
91
93
  client, function_name = self._get_dask_client(generator.options)
92
94
  parallel_runs = generator.options.parallel_runs or 4
@@ -143,7 +145,10 @@ class ParallelRunner:
143
145
  if function_name and generator.options.teardown_dask:
144
146
  logger.info("Tearing down the dask cluster..")
145
147
  mlrun.get_run_db().delete_runtime_resources(
146
- kind="dask", object_id=function_name, force=True
148
+ project=self.metadata.project,
149
+ kind=mlrun.runtimes.RuntimeKinds.dask,
150
+ object_id=function_name,
151
+ force=True,
147
152
  )
148
153
 
149
154
  return results
@@ -224,12 +229,14 @@ class LocalRuntime(BaseRuntime, ParallelRunner):
224
229
  def is_deployed(self):
225
230
  return True
226
231
 
227
- def _get_handler(self, handler, context):
232
+ def _get_handler(
233
+ self, handler: str, context: MLClientCtx, embed_in_sys: bool = True
234
+ ):
228
235
  command = self.spec.command
229
236
  if not command and self.spec.build.functionSourceCode:
230
237
  # if the code is embedded in the function object extract or find it
231
238
  command, _ = mlrun.run.load_func_code(self)
232
- return load_module(command, handler, context)
239
+ return load_module(command, handler, context, embed_in_sys=embed_in_sys)
233
240
 
234
241
  def _pre_run(self, runobj: RunObject, execution: MLClientCtx):
235
242
  workdir = self.spec.workdir
@@ -22,6 +22,7 @@ from nuclio.auth import AuthKinds as NuclioAuthKinds
22
22
 
23
23
  import mlrun
24
24
  import mlrun.common.constants as mlrun_constants
25
+ import mlrun.common.helpers
25
26
  import mlrun.common.schemas as schemas
26
27
  import mlrun.common.types
27
28
  from mlrun.model import ModelObj
@@ -202,8 +203,13 @@ class APIGatewaySpec(ModelObj):
202
203
  self.project = project
203
204
  self.ports = ports
204
205
 
206
+ self.enrich()
205
207
  self.validate(project=project, functions=functions, canary=canary, ports=ports)
206
208
 
209
+ def enrich(self):
210
+ if self.path and not self.path.startswith("/"):
211
+ self.path = f"/{self.path}"
212
+
207
213
  def validate(
208
214
  self,
209
215
  project: str,
@@ -326,6 +332,11 @@ class APIGatewaySpec(ModelObj):
326
332
  return function_names
327
333
 
328
334
 
335
+ class APIGatewayStatus(ModelObj):
336
+ def __init__(self, state: Optional[schemas.APIGatewayState] = None):
337
+ self.state = state or schemas.APIGatewayState.none
338
+
339
+
329
340
  class APIGateway(ModelObj):
330
341
  _dict_fields = [
331
342
  "metadata",
@@ -338,16 +349,18 @@ class APIGateway(ModelObj):
338
349
  self,
339
350
  metadata: APIGatewayMetadata,
340
351
  spec: APIGatewaySpec,
352
+ status: Optional[APIGatewayStatus] = None,
341
353
  ):
342
354
  """
343
355
  Initialize the APIGateway instance.
344
356
 
345
357
  :param metadata: (APIGatewayMetadata) The metadata of the API gateway.
346
358
  :param spec: (APIGatewaySpec) The spec of the API gateway.
359
+ :param status: (APIGatewayStatus) The status of the API gateway.
347
360
  """
348
361
  self.metadata = metadata
349
362
  self.spec = spec
350
- self.state = ""
363
+ self.status = status
351
364
 
352
365
  @property
353
366
  def metadata(self) -> APIGatewayMetadata:
@@ -365,12 +378,21 @@ class APIGateway(ModelObj):
365
378
  def spec(self, spec):
366
379
  self._spec = self._verify_dict(spec, "spec", APIGatewaySpec)
367
380
 
381
+ @property
382
+ def status(self) -> APIGatewayStatus:
383
+ return self._status
384
+
385
+ @status.setter
386
+ def status(self, status):
387
+ self._status = self._verify_dict(status, "status", APIGatewayStatus)
388
+
368
389
  def invoke(
369
390
  self,
370
391
  method="POST",
371
392
  headers: dict = None,
372
393
  credentials: Optional[tuple[str, str]] = None,
373
394
  path: Optional[str] = None,
395
+ body: Optional[Union[str, bytes, dict]] = None,
374
396
  **kwargs,
375
397
  ):
376
398
  """
@@ -381,6 +403,7 @@ class APIGateway(ModelObj):
381
403
  :param credentials: (Optional[tuple[str, str]], optional) The (username,password) for the invocation if required
382
404
  can also be set by the environment variable (_, V3IO_ACCESS_KEY) for access key authentication.
383
405
  :param path: (str, optional) The sub-path for the invocation.
406
+ :param body: (Optional[Union[str, bytes, dict]]) The body of the invocation.
384
407
  :param kwargs: (dict) Additional keyword arguments.
385
408
 
386
409
  :return: The response from the API gateway invocation.
@@ -394,7 +417,7 @@ class APIGateway(ModelObj):
394
417
  )
395
418
  if not self.is_ready():
396
419
  raise mlrun.errors.MLRunPreconditionFailedError(
397
- f"API gateway is not ready. " f"Current state: {self.state}"
420
+ f"API gateway is not ready. " f"Current state: {self.status.state}"
398
421
  )
399
422
 
400
423
  auth = None
@@ -429,6 +452,13 @@ class APIGateway(ModelObj):
429
452
  "API Gateway invocation requires authentication. Please set V3IO_ACCESS_KEY env var"
430
453
  )
431
454
  url = urljoin(self.invoke_url, path or "")
455
+
456
+ # Determine the correct keyword argument for the body
457
+ if isinstance(body, dict):
458
+ kwargs["json"] = body
459
+ elif isinstance(body, (str, bytes)):
460
+ kwargs["data"] = body
461
+
432
462
  return requests.request(
433
463
  method=method,
434
464
  url=url,
@@ -459,10 +489,10 @@ class APIGateway(ModelObj):
459
489
  )
460
490
 
461
491
  def is_ready(self):
462
- if self.state is not schemas.api_gateway.APIGatewayState.ready:
492
+ if self.status.state is not schemas.api_gateway.APIGatewayState.ready:
463
493
  # try to sync the state
464
494
  self.sync()
465
- return self.state == schemas.api_gateway.APIGatewayState.ready
495
+ return self.status.state == schemas.api_gateway.APIGatewayState.ready
466
496
 
467
497
  def sync(self):
468
498
  """
@@ -479,7 +509,7 @@ class APIGateway(ModelObj):
479
509
  self.spec.functions = synced_gateway.spec.functions
480
510
  self.spec.canary = synced_gateway.spec.canary
481
511
  self.spec.description = synced_gateway.spec.description
482
- self.state = synced_gateway.state
512
+ self.status.state = synced_gateway.status.state
483
513
 
484
514
  def with_basic_auth(self, username: str, password: str):
485
515
  """
@@ -546,6 +576,29 @@ class APIGateway(ModelObj):
546
576
  project=self.spec.project, functions=self.spec.functions, ports=ports
547
577
  )
548
578
 
579
+ def with_force_ssl_redirect(self):
580
+ """
581
+ Set SSL redirect annotation for the API gateway.
582
+ """
583
+ self.metadata.annotations["nginx.ingress.kubernetes.io/force-ssl-redirect"] = (
584
+ "true"
585
+ )
586
+
587
+ def with_gateway_timeout(self, gateway_timeout: int):
588
+ """
589
+ Set gateway proxy connect/read/send timeout annotations
590
+ :param gateway_timeout: The timeout in seconds
591
+ """
592
+ mlrun.runtimes.utils.enrich_gateway_timeout_annotations(
593
+ self.metadata.annotations, gateway_timeout
594
+ )
595
+
596
+ def with_annotations(self, annotations: dict):
597
+ """set a key/value annotations in the metadata of the api gateway"""
598
+ for key, value in annotations.items():
599
+ self.metadata.annotations[key] = str(value)
600
+ return self
601
+
549
602
  @classmethod
550
603
  def from_scheme(cls, api_gateway: schemas.APIGateway):
551
604
  project = api_gateway.metadata.labels.get(
@@ -560,6 +613,8 @@ class APIGateway(ModelObj):
560
613
  new_api_gateway = cls(
561
614
  metadata=APIGatewayMetadata(
562
615
  name=api_gateway.spec.name,
616
+ annotations=api_gateway.metadata.annotations,
617
+ labels=api_gateway.metadata.labels,
563
618
  ),
564
619
  spec=APIGatewaySpec(
565
620
  project=project,
@@ -570,8 +625,8 @@ class APIGateway(ModelObj):
570
625
  functions=functions,
571
626
  canary=canary,
572
627
  ),
628
+ status=APIGatewayStatus(state=state),
573
629
  )
574
- new_api_gateway.state = state
575
630
  return new_api_gateway
576
631
 
577
632
  def to_scheme(self) -> schemas.APIGateway:
@@ -600,7 +655,11 @@ class APIGateway(ModelObj):
600
655
  upstreams[i].port = port
601
656
 
602
657
  api_gateway = schemas.APIGateway(
603
- metadata=schemas.APIGatewayMetadata(name=self.metadata.name, labels={}),
658
+ metadata=schemas.APIGatewayMetadata(
659
+ name=self.metadata.name,
660
+ labels=self.metadata.labels,
661
+ annotations=self.metadata.annotations,
662
+ ),
604
663
  spec=schemas.APIGatewaySpec(
605
664
  name=self.metadata.name,
606
665
  description=self.spec.description,
@@ -611,6 +670,7 @@ class APIGateway(ModelObj):
611
670
  ),
612
671
  upstreams=upstreams,
613
672
  ),
673
+ status=schemas.APIGatewayStatus(state=self.status.state),
614
674
  )
615
675
  api_gateway.spec.authentication = self.spec.authentication.to_scheme()
616
676
  return api_gateway
@@ -627,7 +687,7 @@ class APIGateway(ModelObj):
627
687
  host = self.spec.host
628
688
  if not self.spec.host.startswith("http"):
629
689
  host = f"https://{self.spec.host}"
630
- return urljoin(host, self.spec.path)
690
+ return urljoin(host, self.spec.path).rstrip("/")
631
691
 
632
692
  @staticmethod
633
693
  def _generate_basic_auth(username: str, password: str):