mlrun 1.10.0rc18__py3-none-any.whl → 1.11.0rc16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (167) hide show
  1. mlrun/__init__.py +24 -3
  2. mlrun/__main__.py +0 -4
  3. mlrun/artifacts/dataset.py +2 -2
  4. mlrun/artifacts/document.py +6 -1
  5. mlrun/artifacts/llm_prompt.py +21 -15
  6. mlrun/artifacts/model.py +3 -3
  7. mlrun/artifacts/plots.py +1 -1
  8. mlrun/{model_monitoring/db/tsdb/tdengine → auth}/__init__.py +2 -3
  9. mlrun/auth/nuclio.py +89 -0
  10. mlrun/auth/providers.py +429 -0
  11. mlrun/auth/utils.py +415 -0
  12. mlrun/common/constants.py +14 -0
  13. mlrun/common/model_monitoring/helpers.py +123 -0
  14. mlrun/common/runtimes/constants.py +28 -0
  15. mlrun/common/schemas/__init__.py +14 -3
  16. mlrun/common/schemas/alert.py +2 -2
  17. mlrun/common/schemas/api_gateway.py +3 -0
  18. mlrun/common/schemas/auth.py +12 -10
  19. mlrun/common/schemas/client_spec.py +4 -0
  20. mlrun/common/schemas/constants.py +25 -0
  21. mlrun/common/schemas/frontend_spec.py +1 -8
  22. mlrun/common/schemas/function.py +34 -0
  23. mlrun/common/schemas/hub.py +33 -20
  24. mlrun/common/schemas/model_monitoring/__init__.py +2 -1
  25. mlrun/common/schemas/model_monitoring/constants.py +12 -15
  26. mlrun/common/schemas/model_monitoring/functions.py +13 -4
  27. mlrun/common/schemas/model_monitoring/model_endpoints.py +11 -0
  28. mlrun/common/schemas/pipeline.py +1 -1
  29. mlrun/common/schemas/secret.py +17 -2
  30. mlrun/common/secrets.py +95 -1
  31. mlrun/common/types.py +10 -10
  32. mlrun/config.py +69 -19
  33. mlrun/data_types/infer.py +2 -2
  34. mlrun/datastore/__init__.py +12 -5
  35. mlrun/datastore/azure_blob.py +162 -47
  36. mlrun/datastore/base.py +274 -10
  37. mlrun/datastore/datastore.py +7 -2
  38. mlrun/datastore/datastore_profile.py +84 -22
  39. mlrun/datastore/model_provider/huggingface_provider.py +225 -41
  40. mlrun/datastore/model_provider/mock_model_provider.py +87 -0
  41. mlrun/datastore/model_provider/model_provider.py +206 -74
  42. mlrun/datastore/model_provider/openai_provider.py +226 -66
  43. mlrun/datastore/s3.py +39 -18
  44. mlrun/datastore/sources.py +1 -1
  45. mlrun/datastore/store_resources.py +4 -4
  46. mlrun/datastore/storeytargets.py +17 -12
  47. mlrun/datastore/targets.py +1 -1
  48. mlrun/datastore/utils.py +25 -6
  49. mlrun/datastore/v3io.py +1 -1
  50. mlrun/db/base.py +63 -32
  51. mlrun/db/httpdb.py +373 -153
  52. mlrun/db/nopdb.py +54 -21
  53. mlrun/errors.py +4 -2
  54. mlrun/execution.py +66 -25
  55. mlrun/feature_store/api.py +1 -1
  56. mlrun/feature_store/common.py +1 -1
  57. mlrun/feature_store/feature_vector_utils.py +1 -1
  58. mlrun/feature_store/steps.py +8 -6
  59. mlrun/frameworks/_common/utils.py +3 -3
  60. mlrun/frameworks/_dl_common/loggers/logger.py +1 -1
  61. mlrun/frameworks/_dl_common/loggers/tensorboard_logger.py +2 -1
  62. mlrun/frameworks/_ml_common/loggers/mlrun_logger.py +1 -1
  63. mlrun/frameworks/_ml_common/utils.py +2 -1
  64. mlrun/frameworks/auto_mlrun/auto_mlrun.py +4 -3
  65. mlrun/frameworks/lgbm/mlrun_interfaces/mlrun_interface.py +2 -1
  66. mlrun/frameworks/onnx/dataset.py +2 -1
  67. mlrun/frameworks/onnx/mlrun_interface.py +2 -1
  68. mlrun/frameworks/pytorch/callbacks/logging_callback.py +5 -4
  69. mlrun/frameworks/pytorch/callbacks/mlrun_logging_callback.py +2 -1
  70. mlrun/frameworks/pytorch/callbacks/tensorboard_logging_callback.py +2 -1
  71. mlrun/frameworks/pytorch/utils.py +2 -1
  72. mlrun/frameworks/sklearn/metric.py +2 -1
  73. mlrun/frameworks/tf_keras/callbacks/logging_callback.py +5 -4
  74. mlrun/frameworks/tf_keras/callbacks/mlrun_logging_callback.py +2 -1
  75. mlrun/frameworks/tf_keras/callbacks/tensorboard_logging_callback.py +2 -1
  76. mlrun/hub/__init__.py +52 -0
  77. mlrun/hub/base.py +142 -0
  78. mlrun/hub/module.py +172 -0
  79. mlrun/hub/step.py +113 -0
  80. mlrun/k8s_utils.py +105 -16
  81. mlrun/launcher/base.py +15 -7
  82. mlrun/launcher/local.py +4 -1
  83. mlrun/model.py +14 -4
  84. mlrun/model_monitoring/__init__.py +0 -1
  85. mlrun/model_monitoring/api.py +65 -28
  86. mlrun/model_monitoring/applications/__init__.py +1 -1
  87. mlrun/model_monitoring/applications/base.py +299 -128
  88. mlrun/model_monitoring/applications/context.py +2 -4
  89. mlrun/model_monitoring/controller.py +132 -58
  90. mlrun/model_monitoring/db/_schedules.py +38 -29
  91. mlrun/model_monitoring/db/_stats.py +6 -16
  92. mlrun/model_monitoring/db/tsdb/__init__.py +9 -7
  93. mlrun/model_monitoring/db/tsdb/base.py +29 -9
  94. mlrun/model_monitoring/db/tsdb/preaggregate.py +234 -0
  95. mlrun/model_monitoring/db/tsdb/stream_graph_steps.py +63 -0
  96. mlrun/model_monitoring/db/tsdb/timescaledb/queries/timescaledb_metrics_queries.py +414 -0
  97. mlrun/model_monitoring/db/tsdb/timescaledb/queries/timescaledb_predictions_queries.py +376 -0
  98. mlrun/model_monitoring/db/tsdb/timescaledb/queries/timescaledb_results_queries.py +590 -0
  99. mlrun/model_monitoring/db/tsdb/timescaledb/timescaledb_connection.py +434 -0
  100. mlrun/model_monitoring/db/tsdb/timescaledb/timescaledb_connector.py +541 -0
  101. mlrun/model_monitoring/db/tsdb/timescaledb/timescaledb_operations.py +808 -0
  102. mlrun/model_monitoring/db/tsdb/timescaledb/timescaledb_schema.py +502 -0
  103. mlrun/model_monitoring/db/tsdb/timescaledb/timescaledb_stream.py +163 -0
  104. mlrun/model_monitoring/db/tsdb/timescaledb/timescaledb_stream_graph_steps.py +60 -0
  105. mlrun/model_monitoring/db/tsdb/timescaledb/utils/timescaledb_dataframe_processor.py +141 -0
  106. mlrun/model_monitoring/db/tsdb/timescaledb/utils/timescaledb_query_builder.py +585 -0
  107. mlrun/model_monitoring/db/tsdb/timescaledb/writer_graph_steps.py +73 -0
  108. mlrun/model_monitoring/db/tsdb/v3io/stream_graph_steps.py +20 -9
  109. mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +235 -51
  110. mlrun/model_monitoring/features_drift_table.py +2 -1
  111. mlrun/model_monitoring/helpers.py +30 -6
  112. mlrun/model_monitoring/stream_processing.py +34 -28
  113. mlrun/model_monitoring/writer.py +224 -4
  114. mlrun/package/__init__.py +2 -1
  115. mlrun/platforms/__init__.py +0 -43
  116. mlrun/platforms/iguazio.py +8 -4
  117. mlrun/projects/operations.py +17 -11
  118. mlrun/projects/pipelines.py +2 -2
  119. mlrun/projects/project.py +187 -123
  120. mlrun/run.py +95 -21
  121. mlrun/runtimes/__init__.py +2 -186
  122. mlrun/runtimes/base.py +103 -25
  123. mlrun/runtimes/constants.py +225 -0
  124. mlrun/runtimes/daskjob.py +5 -2
  125. mlrun/runtimes/databricks_job/databricks_runtime.py +2 -1
  126. mlrun/runtimes/local.py +5 -2
  127. mlrun/runtimes/mounts.py +20 -2
  128. mlrun/runtimes/nuclio/__init__.py +12 -7
  129. mlrun/runtimes/nuclio/api_gateway.py +36 -6
  130. mlrun/runtimes/nuclio/application/application.py +339 -40
  131. mlrun/runtimes/nuclio/function.py +222 -72
  132. mlrun/runtimes/nuclio/serving.py +132 -42
  133. mlrun/runtimes/pod.py +213 -21
  134. mlrun/runtimes/utils.py +49 -9
  135. mlrun/secrets.py +99 -14
  136. mlrun/serving/__init__.py +2 -0
  137. mlrun/serving/remote.py +84 -11
  138. mlrun/serving/routers.py +26 -44
  139. mlrun/serving/server.py +138 -51
  140. mlrun/serving/serving_wrapper.py +6 -2
  141. mlrun/serving/states.py +997 -283
  142. mlrun/serving/steps.py +62 -0
  143. mlrun/serving/system_steps.py +149 -95
  144. mlrun/serving/v2_serving.py +9 -10
  145. mlrun/track/trackers/mlflow_tracker.py +29 -31
  146. mlrun/utils/helpers.py +292 -94
  147. mlrun/utils/http.py +9 -2
  148. mlrun/utils/notifications/notification/base.py +18 -0
  149. mlrun/utils/notifications/notification/git.py +3 -5
  150. mlrun/utils/notifications/notification/mail.py +39 -16
  151. mlrun/utils/notifications/notification/slack.py +2 -4
  152. mlrun/utils/notifications/notification/webhook.py +2 -5
  153. mlrun/utils/notifications/notification_pusher.py +3 -3
  154. mlrun/utils/version/version.json +2 -2
  155. mlrun/utils/version/version.py +3 -4
  156. {mlrun-1.10.0rc18.dist-info → mlrun-1.11.0rc16.dist-info}/METADATA +63 -74
  157. {mlrun-1.10.0rc18.dist-info → mlrun-1.11.0rc16.dist-info}/RECORD +161 -143
  158. mlrun/api/schemas/__init__.py +0 -259
  159. mlrun/db/auth_utils.py +0 -152
  160. mlrun/model_monitoring/db/tsdb/tdengine/schemas.py +0 -344
  161. mlrun/model_monitoring/db/tsdb/tdengine/stream_graph_steps.py +0 -75
  162. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connection.py +0 -281
  163. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +0 -1266
  164. {mlrun-1.10.0rc18.dist-info → mlrun-1.11.0rc16.dist-info}/WHEEL +0 -0
  165. {mlrun-1.10.0rc18.dist-info → mlrun-1.11.0rc16.dist-info}/entry_points.txt +0 -0
  166. {mlrun-1.10.0rc18.dist-info → mlrun-1.11.0rc16.dist-info}/licenses/LICENSE +0 -0
  167. {mlrun-1.10.0rc18.dist-info → mlrun-1.11.0rc16.dist-info}/top_level.txt +0 -0
mlrun/serving/server.py CHANGED
@@ -17,12 +17,14 @@ __all__ = ["GraphServer", "create_graph_server", "GraphContext", "MockEvent"]
17
17
  import asyncio
18
18
  import base64
19
19
  import copy
20
+ import importlib
20
21
  import json
21
22
  import os
22
23
  import socket
23
24
  import traceback
24
25
  import uuid
25
- from datetime import datetime, timezone
26
+ from collections import defaultdict
27
+ from datetime import UTC, datetime
26
28
  from typing import Any, Optional, Union
27
29
 
28
30
  import pandas as pd
@@ -31,9 +33,10 @@ from nuclio import Context as NuclioContext
31
33
  from nuclio.request import Logger as NuclioLogger
32
34
 
33
35
  import mlrun
34
- import mlrun.common.constants
35
36
  import mlrun.common.helpers
36
37
  import mlrun.common.schemas
38
+ import mlrun.common.schemas.model_monitoring.constants as mm_constants
39
+ import mlrun.datastore.datastore_profile as ds_profile
37
40
  import mlrun.model_monitoring
38
41
  import mlrun.utils
39
42
  from mlrun.config import config
@@ -48,7 +51,7 @@ from ..datastore.store_resources import ResourceCache
48
51
  from ..errors import MLRunInvalidArgumentError
49
52
  from ..execution import MLClientCtx
50
53
  from ..model import ModelObj
51
- from ..utils import get_caller_globals
54
+ from ..utils import get_caller_globals, get_relative_module_name_from_path
52
55
  from .states import (
53
56
  FlowStep,
54
57
  MonitoredStep,
@@ -80,7 +83,6 @@ class _StreamContext:
80
83
  self.hostname = socket.gethostname()
81
84
  self.function_uri = function_uri
82
85
  self.output_stream = None
83
- stream_uri = None
84
86
  log_stream = parameters.get(FileTargetKind.LOG_STREAM, "")
85
87
 
86
88
  if (enabled or log_stream) and function_uri:
@@ -91,20 +93,16 @@ class _StreamContext:
91
93
 
92
94
  stream_args = parameters.get("stream_args", {})
93
95
 
94
- if log_stream == DUMMY_STREAM:
95
- # Dummy stream used for testing, see tests/serving/test_serving.py
96
- stream_uri = DUMMY_STREAM
97
- elif not stream_args.get("mock"): # if not a mock: `context.is_mock = True`
98
- stream_uri = mlrun.model_monitoring.get_stream_path(project=project)
99
-
100
96
  if log_stream:
101
- # Update the stream path to the log stream value
102
- stream_uri = log_stream.format(project=project)
103
- self.output_stream = get_stream_pusher(stream_uri, **stream_args)
97
+ # Get the output stream from the log stream path
98
+ stream_path = log_stream.format(project=project)
99
+ self.output_stream = get_stream_pusher(stream_path, **stream_args)
104
100
  else:
105
101
  # Get the output stream from the profile
106
102
  self.output_stream = mlrun.model_monitoring.helpers.get_output_stream(
107
- project=project, mock=stream_args.get("mock", False)
103
+ project=project,
104
+ profile=parameters.get("stream_profile"),
105
+ mock=stream_args.get("mock", False),
108
106
  )
109
107
 
110
108
 
@@ -182,11 +180,12 @@ class GraphServer(ModelObj):
182
180
  self,
183
181
  context,
184
182
  namespace,
185
- resource_cache: ResourceCache = None,
183
+ resource_cache: Optional[ResourceCache] = None,
186
184
  logger=None,
187
185
  is_mock=False,
188
186
  monitoring_mock=False,
189
- ):
187
+ stream_profile: Optional[ds_profile.DatastoreProfile] = None,
188
+ ) -> None:
190
189
  """for internal use, initialize all steps (recursively)"""
191
190
 
192
191
  if self.secret_sources:
@@ -201,6 +200,20 @@ class GraphServer(ModelObj):
201
200
  context.monitoring_mock = monitoring_mock
202
201
  context.root = self.graph
203
202
 
203
+ if is_mock and monitoring_mock:
204
+ if stream_profile:
205
+ # Add the user-defined stream profile to the parameters
206
+ self.parameters["stream_profile"] = stream_profile
207
+ elif not (
208
+ self.parameters.get(FileTargetKind.LOG_STREAM)
209
+ or mlrun.get_secret_or_env(
210
+ mm_constants.ProjectSecretKeys.STREAM_PROFILE_NAME
211
+ )
212
+ ):
213
+ # Set a dummy log stream for mocking purposes if there is no direct
214
+ # user-defined stream profile and no information in the environment
215
+ self.parameters[FileTargetKind.LOG_STREAM] = DUMMY_STREAM
216
+
204
217
  context.stream = _StreamContext(
205
218
  self.track_models, self.parameters, self.function_uri
206
219
  )
@@ -290,7 +303,7 @@ class GraphServer(ModelObj):
290
303
  if event_path_key in event.headers:
291
304
  event.path = event.headers.get(event_path_key)
292
305
 
293
- if isinstance(event.body, (str, bytes)) and (
306
+ if isinstance(event.body, str | bytes) and (
294
307
  not event.content_type or event.content_type in ["json", "application/json"]
295
308
  ):
296
309
  # assume it is json and try to load
@@ -335,7 +348,7 @@ class GraphServer(ModelObj):
335
348
  ):
336
349
  return body
337
350
 
338
- if body and not isinstance(body, (str, bytes)):
351
+ if body and not isinstance(body, str | bytes):
339
352
  body = json.dumps(body)
340
353
  return context.Response(
341
354
  body=body, content_type="application/json", status_code=200
@@ -350,8 +363,6 @@ class GraphServer(ModelObj):
350
363
  def add_error_raiser_step(
351
364
  graph: RootFlowStep, monitored_steps: dict[str, MonitoredStep]
352
365
  ) -> RootFlowStep:
353
- monitored_steps_raisers = {}
354
- user_steps = list(graph.steps.values())
355
366
  for monitored_step in monitored_steps.values():
356
367
  error_step = graph.add_step(
357
368
  class_name="mlrun.serving.states.ModelRunnerErrorRaiser",
@@ -361,25 +372,12 @@ def add_error_raiser_step(
361
372
  raise_exception=monitored_step.raise_exception,
362
373
  models_names=list(monitored_step.class_args["models"].keys()),
363
374
  model_endpoint_creation_strategy=mlrun.common.schemas.ModelEndpointCreationStrategy.SKIP,
375
+ function=monitored_step.function,
364
376
  )
365
377
  if monitored_step.responder:
366
378
  monitored_step.responder = False
367
379
  error_step.respond()
368
- monitored_steps_raisers[monitored_step.name] = error_step.name
369
380
  error_step.on_error = monitored_step.on_error
370
- if monitored_steps_raisers:
371
- for step in user_steps:
372
- if step.after:
373
- if isinstance(step.after, list):
374
- for i in range(len(step.after)):
375
- if step.after[i] in monitored_steps_raisers:
376
- step.after[i] = monitored_steps_raisers[step.after[i]]
377
- else:
378
- if (
379
- isinstance(step.after, str)
380
- and step.after in monitored_steps_raisers
381
- ):
382
- step.after = monitored_steps_raisers[step.after]
383
381
  return graph
384
382
 
385
383
 
@@ -403,6 +401,7 @@ def add_monitoring_general_steps(
403
401
  "mlrun.serving.system_steps.BackgroundTaskStatus",
404
402
  "background_task_status_step",
405
403
  model_endpoint_creation_strategy=mlrun.common.schemas.ModelEndpointCreationStrategy.SKIP,
404
+ full_event=True,
406
405
  )
407
406
  monitor_flow_step = graph.add_step(
408
407
  "storey.Filter",
@@ -508,10 +507,6 @@ def add_system_steps_to_graph(
508
507
  monitor_flow_step.after = [
509
508
  step_name,
510
509
  ]
511
- context.logger.info_with(
512
- "Server graph after adding system steps",
513
- graph=str(graph.steps),
514
- )
515
510
  return graph
516
511
 
517
512
 
@@ -569,21 +564,46 @@ async def async_execute_graph(
569
564
  batch_size: Optional[int],
570
565
  read_as_lists: bool,
571
566
  nest_under_inputs: bool,
572
- ) -> list[Any]:
567
+ ) -> None:
568
+ # Validate that data parameter is a DataItem and not passed via params
569
+ if not isinstance(data, DataItem):
570
+ raise MLRunInvalidArgumentError(
571
+ f"Parameter 'data' has type hint 'DataItem' but got {type(data).__name__} instead. "
572
+ f"Data files and artifacts must be passed via the 'inputs' parameter, not 'params'. "
573
+ f"The 'params' parameter is for simple configuration values (strings, numbers, booleans), "
574
+ f"while 'inputs' is for data files that need to be loaded. "
575
+ f"Example: run_function(..., inputs={{'data': 'path/to/data.csv'}}, params={{other_config: value}})"
576
+ )
577
+ run_call_count = 0
573
578
  spec = mlrun.utils.get_serving_spec()
574
-
575
- namespace = {}
579
+ modname = None
576
580
  code = os.getenv("MLRUN_EXEC_CODE")
577
581
  if code:
578
582
  code = base64.b64decode(code).decode("utf-8")
579
- exec(code, namespace)
583
+ with open("user_code.py", "w") as fp:
584
+ fp.write(code)
585
+ modname = "user_code"
580
586
  else:
581
587
  # TODO: find another way to get the local file path, or ensure that MLRUN_EXEC_CODE
582
588
  # gets set in local flow and not just in the remote pod
583
- source_filename = spec.get("filename", None)
584
- if source_filename:
585
- with open(source_filename) as f:
586
- exec(f.read(), namespace)
589
+ source_file_path = spec.get("filename", None)
590
+ if source_file_path:
591
+ source_file_path_object, working_dir_path_object = (
592
+ mlrun.utils.helpers.get_source_and_working_dir_paths(source_file_path)
593
+ )
594
+ if not source_file_path_object.is_relative_to(working_dir_path_object):
595
+ raise mlrun.errors.MLRunRuntimeError(
596
+ f"Source file path '{source_file_path}' is not under the current working directory "
597
+ f"(which is required when running with local=True)"
598
+ )
599
+ modname = get_relative_module_name_from_path(
600
+ source_file_path_object, working_dir_path_object
601
+ )
602
+
603
+ namespace = {}
604
+ if modname:
605
+ mod = importlib.import_module(modname)
606
+ namespace = mod.__dict__
587
607
 
588
608
  server = GraphServer.from_dict(spec)
589
609
 
@@ -613,7 +633,7 @@ async def async_execute_graph(
613
633
 
614
634
  if df.empty:
615
635
  context.logger.warn("Job terminated due to empty inputs (0 rows)")
616
- return []
636
+ return
617
637
 
618
638
  track_models = spec.get("track_models")
619
639
 
@@ -640,7 +660,7 @@ async def async_execute_graph(
640
660
  start_time = end_time = df["timestamp"].iloc[0].isoformat()
641
661
  else:
642
662
  # end time will be set from clock time when the batch completes
643
- start_time = datetime.now(tz=timezone.utc).isoformat()
663
+ start_time = datetime.now(tz=UTC).isoformat()
644
664
 
645
665
  server.graph = add_system_steps_to_graph(
646
666
  server.project,
@@ -653,7 +673,6 @@ async def async_execute_graph(
653
673
 
654
674
  if config.log_level.lower() == "debug":
655
675
  server.verbose = True
656
- context.logger.info_with("Initializing states", namespace=namespace)
657
676
  kwargs = {}
658
677
  if hasattr(context, "is_mock"):
659
678
  kwargs["is_mock"] = context.is_mock
@@ -671,6 +690,7 @@ async def async_execute_graph(
671
690
  context.logger.info(server.to_yaml())
672
691
 
673
692
  async def run(body):
693
+ nonlocal run_call_count
674
694
  event = storey.Event(id=index, body=body)
675
695
  if timestamp_column:
676
696
  if batching:
@@ -685,6 +705,7 @@ async def async_execute_graph(
685
705
  f"Event body '{body}' did not contain timestamp column '{timestamp_column}'"
686
706
  )
687
707
  event._original_timestamp = body[timestamp_column]
708
+ run_call_count += 1
688
709
  return await server.run(event, context)
689
710
 
690
711
  if batching and not batch_size:
@@ -719,7 +740,7 @@ async def async_execute_graph(
719
740
  server = GraphServer.from_dict(spec)
720
741
  server.init_states(None, namespace)
721
742
 
722
- batch_completion_time = datetime.now(tz=timezone.utc).isoformat()
743
+ batch_completion_time = datetime.now(tz=UTC).isoformat()
723
744
 
724
745
  if not timestamp_column:
725
746
  end_time = batch_completion_time
@@ -742,7 +763,70 @@ async def async_execute_graph(
742
763
  model_endpoint_uids=model_endpoint_uids,
743
764
  )
744
765
 
745
- return responses
766
+ has_responder = False
767
+ for step in server.graph.steps.values():
768
+ if getattr(step, "responder", False):
769
+ has_responder = True
770
+ break
771
+
772
+ if has_responder:
773
+ # log the results as a dataset artifact
774
+ artifact_path = None
775
+ if (
776
+ "{{run.uid}}" not in context.artifact_path
777
+ ): # TODO: delete when IG-22841 is resolved
778
+ artifact_path = "+/{{run.uid}}" # will be concatenated to the context's path in extend_artifact_path
779
+ context.log_dataset(
780
+ "prediction", df=pd.DataFrame(responses), artifact_path=artifact_path
781
+ )
782
+
783
+ # if we got responses that appear to be in the right format, try to log per-model datasets too
784
+ if (
785
+ responses
786
+ and responses[0]
787
+ and isinstance(responses[0], dict)
788
+ and isinstance(next(iter(responses[0].values())), dict | list)
789
+ ):
790
+ try:
791
+ # turn this list of samples into a dict of lists, one per model endpoint
792
+ grouped = defaultdict(list)
793
+ for sample in responses:
794
+ for model_name, features in sample.items():
795
+ grouped[model_name].append(features)
796
+ # create a dataframe per model endpoint and log it
797
+ for model_name, features in grouped.items():
798
+ context.log_dataset(
799
+ f"prediction_{model_name}",
800
+ df=pd.DataFrame(features),
801
+ artifact_path=artifact_path,
802
+ )
803
+ except Exception as e:
804
+ context.logger.warning(
805
+ "Failed to log per-model prediction datasets",
806
+ error=err_to_str(e),
807
+ )
808
+
809
+ context.log_result("num_rows", run_call_count)
810
+
811
+
812
+ def _is_inside_asyncio_loop():
813
+ try:
814
+ asyncio.get_running_loop()
815
+ return True
816
+ except RuntimeError:
817
+ return False
818
+
819
+
820
+ # Workaround for running with local=True in Jupyter (ML-10620)
821
+ def _workaround_asyncio_nesting():
822
+ try:
823
+ import nest_asyncio
824
+ except ImportError:
825
+ raise mlrun.errors.MLRunRuntimeError(
826
+ "Cannot execute graph from within an already running asyncio loop. "
827
+ "Attempt to import nest_asyncio as a workaround failed as well."
828
+ )
829
+ nest_asyncio.apply()
746
830
 
747
831
 
748
832
  def execute_graph(
@@ -770,6 +854,9 @@ def execute_graph(
770
854
 
771
855
  :return: A list of responses.
772
856
  """
857
+ if _is_inside_asyncio_loop():
858
+ _workaround_asyncio_nesting()
859
+
773
860
  return asyncio.run(
774
861
  async_execute_graph(
775
862
  context,
@@ -11,6 +11,7 @@
11
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
+ import asyncio
14
15
 
15
16
  # serving runtime hooks, used in empty serving functions
16
17
  from mlrun.runtimes import nuclio_init_hook
@@ -20,5 +21,8 @@ def init_context(context):
20
21
  nuclio_init_hook(context, globals(), "serving_v2")
21
22
 
22
23
 
23
- def handler(context, event):
24
- return context.mlrun_handler(context, event)
24
+ async def handler(context, event):
25
+ result = context.mlrun_handler(context, event)
26
+ if asyncio.iscoroutine(result):
27
+ return await result
28
+ return result