zenml-nightly 0.83.1.dev20250624__py3-none-any.whl → 0.83.1.dev20250625__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. zenml/VERSION +1 -1
  2. zenml/cli/login.py +21 -3
  3. zenml/config/__init__.py +13 -2
  4. zenml/constants.py +0 -1
  5. zenml/exceptions.py +16 -0
  6. zenml/integrations/airflow/orchestrators/airflow_orchestrator.py +15 -6
  7. zenml/integrations/aws/orchestrators/sagemaker_orchestrator.py +54 -58
  8. zenml/integrations/azure/orchestrators/azureml_orchestrator.py +28 -19
  9. zenml/integrations/databricks/orchestrators/databricks_orchestrator.py +19 -63
  10. zenml/integrations/gcp/orchestrators/vertex_orchestrator.py +36 -61
  11. zenml/integrations/hyperai/orchestrators/hyperai_orchestrator.py +19 -22
  12. zenml/integrations/kubeflow/orchestrators/kubeflow_orchestrator.py +28 -31
  13. zenml/integrations/kubernetes/orchestrators/kubernetes_orchestrator.py +33 -20
  14. zenml/integrations/lightning/orchestrators/lightning_orchestrator.py +25 -100
  15. zenml/integrations/skypilot/orchestrators/skypilot_base_vm_orchestrator.py +19 -8
  16. zenml/integrations/skypilot/utils.py +17 -13
  17. zenml/integrations/tekton/orchestrators/tekton_orchestrator.py +28 -12
  18. zenml/models/v2/core/step_run.py +1 -0
  19. zenml/orchestrators/__init__.py +2 -0
  20. zenml/orchestrators/base_orchestrator.py +137 -66
  21. zenml/orchestrators/input_utils.py +5 -13
  22. zenml/orchestrators/local/local_orchestrator.py +19 -9
  23. zenml/orchestrators/local_docker/local_docker_orchestrator.py +15 -5
  24. zenml/orchestrators/publish_utils.py +24 -0
  25. zenml/orchestrators/step_run_utils.py +1 -2
  26. zenml/pipelines/run_utils.py +12 -7
  27. zenml/step_operators/step_operator_entrypoint_configuration.py +1 -1
  28. zenml/zen_stores/rest_zen_store.py +147 -128
  29. zenml/zen_stores/sql_zen_store.py +27 -17
  30. {zenml_nightly-0.83.1.dev20250624.dist-info → zenml_nightly-0.83.1.dev20250625.dist-info}/METADATA +1 -1
  31. {zenml_nightly-0.83.1.dev20250624.dist-info → zenml_nightly-0.83.1.dev20250625.dist-info}/RECORD +34 -34
  32. {zenml_nightly-0.83.1.dev20250624.dist-info → zenml_nightly-0.83.1.dev20250625.dist-info}/LICENSE +0 -0
  33. {zenml_nightly-0.83.1.dev20250624.dist-info → zenml_nightly-0.83.1.dev20250625.dist-info}/WHEEL +0 -0
  34. {zenml_nightly-0.83.1.dev20250624.dist-info → zenml_nightly-0.83.1.dev20250625.dist-info}/entry_points.txt +0 -0
zenml/VERSION CHANGED
@@ -1 +1 @@
1
- 0.83.1.dev20250624
1
+ 0.83.1.dev20250625
zenml/cli/login.py CHANGED
@@ -229,6 +229,7 @@ def connect_to_pro_server(
229
229
  api_key: Optional[str] = None,
230
230
  refresh: bool = False,
231
231
  pro_api_url: Optional[str] = None,
232
+ verify_ssl: Union[str, bool] = True,
232
233
  ) -> None:
233
234
  """Connect the client to a ZenML Pro server.
234
235
 
@@ -238,6 +239,8 @@ def connect_to_pro_server(
238
239
  api_key: The API key to use to authenticate with the ZenML Pro server.
239
240
  refresh: Whether to force a new login flow with the ZenML Pro server.
240
241
  pro_api_url: The URL for the ZenML Pro API.
242
+ verify_ssl: Whether to verify the server's TLS certificate. If a string
243
+ is passed, it is interpreted as the path to a CA bundle file.
241
244
 
242
245
  Raises:
243
246
  ValueError: If incorrect parameters are provided.
@@ -280,7 +283,12 @@ def connect_to_pro_server(
280
283
  # server to connect to.
281
284
  if api_key:
282
285
  if server_url:
283
- connect_to_server(server_url, api_key=api_key, pro_server=True)
286
+ connect_to_server(
287
+ server_url,
288
+ api_key=api_key,
289
+ pro_server=True,
290
+ verify_ssl=verify_ssl,
291
+ )
284
292
  return
285
293
  else:
286
294
  raise ValueError(
@@ -299,6 +307,7 @@ def connect_to_pro_server(
299
307
  try:
300
308
  token = web_login(
301
309
  pro_api_url=pro_api_url,
310
+ verify_ssl=verify_ssl,
302
311
  )
303
312
  except AuthorizationException as e:
304
313
  cli_utils.error(f"Authorization error: {e}")
@@ -418,7 +427,9 @@ def connect_to_pro_server(
418
427
  f"Connecting to ZenML Pro server: {server.name} [{str(server.id)}] "
419
428
  )
420
429
 
421
- connect_to_server(server.url, api_key=api_key, pro_server=True)
430
+ connect_to_server(
431
+ server.url, api_key=api_key, pro_server=True, verify_ssl=verify_ssl
432
+ )
422
433
 
423
434
  # Update the stored server info with more accurate data taken from the
424
435
  # ZenML Pro workspace object.
@@ -555,7 +566,7 @@ def _fail_if_authentication_environment_variables_set() -> None:
555
566
  not return until the server exits or is stopped with CTRL+C
556
567
 
557
568
  * `--docker`: start the local ZenML server as a Docker container instead
558
- of a local process
569
+ of a local background process.
559
570
 
560
571
  * `--port`: use a custom TCP port value for the local ZenML server
561
572
 
@@ -775,6 +786,9 @@ def login(
775
786
  pro_server=server,
776
787
  refresh=True,
777
788
  pro_api_url=pro_api_url,
789
+ verify_ssl=ssl_ca_cert
790
+ if ssl_ca_cert is not None
791
+ else not no_verify_ssl,
778
792
  )
779
793
  return
780
794
 
@@ -822,6 +836,7 @@ def login(
822
836
  # Prefer the pro API URL extracted from the server info if
823
837
  # available
824
838
  pro_api_url=server_pro_api_url or pro_api_url,
839
+ verify_ssl=verify_ssl,
825
840
  )
826
841
  else:
827
842
  connect_to_server(
@@ -837,6 +852,7 @@ def login(
837
852
  api_key=api_key_value,
838
853
  refresh=refresh,
839
854
  pro_api_url=pro_api_url,
855
+ verify_ssl=verify_ssl,
840
856
  )
841
857
 
842
858
  elif current_non_local_server and not refresh:
@@ -861,6 +877,7 @@ def login(
861
877
  # Prefer the pro API URL extracted from the server info if
862
878
  # available
863
879
  pro_api_url=server_pro_api_url or pro_api_url,
880
+ verify_ssl=verify_ssl,
864
881
  )
865
882
  else:
866
883
  cli_utils.declare(
@@ -890,6 +907,7 @@ def login(
890
907
  connect_to_pro_server(
891
908
  api_key=api_key_value,
892
909
  pro_api_url=pro_api_url,
910
+ verify_ssl=verify_ssl,
893
911
  )
894
912
 
895
913
 
zenml/config/__init__.py CHANGED
@@ -23,12 +23,23 @@ configuration. This ``GlobalConfiguration`` object handles the serialization and
23
23
  deserialization of the configuration options that are stored in the file in
24
24
  order to persist the configuration across sessions.
25
25
  """
26
- from zenml.config.docker_settings import DockerSettings
27
- from zenml.config.resource_settings import ResourceSettings
26
+ from zenml.config.docker_settings import (
27
+ DockerSettings,
28
+ PythonPackageInstaller,
29
+ PythonEnvironmentExportMethod,
30
+ )
31
+ from zenml.config.resource_settings import ResourceSettings, ByteUnit
28
32
  from zenml.config.retry_config import StepRetryConfig
33
+ from zenml.config.schedule import Schedule
34
+ from zenml.config.store_config import StoreConfiguration
29
35
 
30
36
  __all__ = [
31
37
  "DockerSettings",
38
+ "PythonPackageInstaller",
39
+ "PythonEnvironmentExportMethod",
32
40
  "ResourceSettings",
41
+ "ByteUnit",
33
42
  "StepRetryConfig",
43
+ "Schedule",
44
+ "StoreConfiguration",
34
45
  ]
zenml/constants.py CHANGED
@@ -173,7 +173,6 @@ ENV_ZENML_DISABLE_STEP_LOGS_STORAGE = "ZENML_DISABLE_STEP_LOGS_STORAGE"
173
173
  ENV_ZENML_DISABLE_STEP_NAMES_IN_LOGS = "ZENML_DISABLE_STEP_NAMES_IN_LOGS"
174
174
  ENV_ZENML_IGNORE_FAILURE_HOOK = "ZENML_IGNORE_FAILURE_HOOK"
175
175
  ENV_ZENML_CUSTOM_SOURCE_ROOT = "ZENML_CUSTOM_SOURCE_ROOT"
176
- ENV_ZENML_WHEEL_PACKAGE_NAME = "ZENML_WHEEL_PACKAGE_NAME"
177
176
  ENV_ZENML_PIPELINE_RUN_API_TOKEN_EXPIRATION = (
178
177
  "ZENML_PIPELINE_API_TOKEN_EXPIRATION"
179
178
  )
zenml/exceptions.py CHANGED
@@ -220,3 +220,19 @@ class CustomFlavorImportError(ImportError):
220
220
 
221
221
  class MaxConcurrentTasksError(ZenMLBaseException):
222
222
  """Raised when the maximum number of concurrent tasks is reached."""
223
+
224
+
225
+ class RunMonitoringError(ZenMLBaseException):
226
+ """Raised when an error occurs while monitoring a pipeline run."""
227
+
228
+ def __init__(
229
+ self,
230
+ original_exception: BaseException,
231
+ ) -> None:
232
+ """Initializes the error.
233
+
234
+ Args:
235
+ original_exception: The original exception that occurred while
236
+ monitoring the pipeline run.
237
+ """
238
+ self.original_exception = original_exception
@@ -38,7 +38,7 @@ from zenml.integrations.airflow.flavors.airflow_orchestrator_flavor import (
38
38
  )
39
39
  from zenml.io import fileio
40
40
  from zenml.logger import get_logger
41
- from zenml.orchestrators import ContainerizedOrchestrator
41
+ from zenml.orchestrators import ContainerizedOrchestrator, SubmissionResult
42
42
  from zenml.orchestrators.utils import get_orchestrator_run_name
43
43
  from zenml.stack import StackValidator
44
44
  from zenml.utils import io_utils
@@ -191,21 +191,29 @@ class AirflowOrchestrator(ContainerizedOrchestrator):
191
191
  if self.config.local:
192
192
  stack.check_local_paths()
193
193
 
194
- def prepare_or_run_pipeline(
194
+ def submit_pipeline(
195
195
  self,
196
196
  deployment: "PipelineDeploymentResponse",
197
197
  stack: "Stack",
198
198
  environment: Dict[str, str],
199
199
  placeholder_run: Optional["PipelineRunResponse"] = None,
200
- ) -> Any:
201
- """Creates and writes an Airflow DAG zip file.
200
+ ) -> Optional[SubmissionResult]:
201
+ """Submits a pipeline to the orchestrator.
202
+
203
+ This method should only submit the pipeline and not wait for it to
204
+ complete. If the orchestrator is configured to wait for the pipeline run
205
+ to complete, a function that waits for the pipeline run to complete can
206
+ be passed as part of the submission result.
202
207
 
203
208
  Args:
204
- deployment: The pipeline deployment to prepare or run.
209
+ deployment: The pipeline deployment to submit.
205
210
  stack: The stack the pipeline will run on.
206
211
  environment: Environment variables to set in the orchestration
207
- environment.
212
+ environment. These don't need to be set if running locally.
208
213
  placeholder_run: An optional placeholder run for the deployment.
214
+
215
+ Returns:
216
+ Optional submission result.
209
217
  """
210
218
  pipeline_settings = cast(
211
219
  AirflowOrchestratorSettings, self.get_settings(deployment)
@@ -277,6 +285,7 @@ class AirflowOrchestrator(ContainerizedOrchestrator):
277
285
  dag_generator_values=dag_generator_values,
278
286
  output_dir=pipeline_settings.dag_output_dir or self.dags_directory,
279
287
  )
288
+ return None
280
289
 
281
290
  def _apply_resource_settings(
282
291
  self,
@@ -19,7 +19,6 @@ from typing import (
19
19
  TYPE_CHECKING,
20
20
  Any,
21
21
  Dict,
22
- Iterator,
23
22
  List,
24
23
  Optional,
25
24
  Tuple,
@@ -60,7 +59,6 @@ from zenml.constants import (
60
59
  )
61
60
  from zenml.enums import (
62
61
  ExecutionStatus,
63
- MetadataResourceTypes,
64
62
  StackComponentType,
65
63
  )
66
64
  from zenml.integrations.aws.flavors.sagemaker_orchestrator_flavor import (
@@ -73,7 +71,7 @@ from zenml.integrations.aws.orchestrators.sagemaker_orchestrator_entrypoint_conf
73
71
  )
74
72
  from zenml.logger import get_logger
75
73
  from zenml.metadata.metadata_types import MetadataType, Uri
76
- from zenml.orchestrators import ContainerizedOrchestrator
74
+ from zenml.orchestrators import ContainerizedOrchestrator, SubmissionResult
77
75
  from zenml.orchestrators.utils import get_orchestrator_run_name
78
76
  from zenml.stack import StackValidator
79
77
  from zenml.utils.env_utils import split_environment_variables
@@ -273,20 +271,25 @@ class SagemakerOrchestrator(ContainerizedOrchestrator):
273
271
  boto_session=boto_session, default_bucket=self.config.bucket
274
272
  )
275
273
 
276
- def prepare_or_run_pipeline(
274
+ def submit_pipeline(
277
275
  self,
278
276
  deployment: "PipelineDeploymentResponse",
279
277
  stack: "Stack",
280
278
  environment: Dict[str, str],
281
279
  placeholder_run: Optional["PipelineRunResponse"] = None,
282
- ) -> Iterator[Dict[str, MetadataType]]:
283
- """Prepares or runs a pipeline on Sagemaker.
280
+ ) -> Optional[SubmissionResult]:
281
+ """Submits a pipeline to the orchestrator.
282
+
283
+ This method should only submit the pipeline and not wait for it to
284
+ complete. If the orchestrator is configured to wait for the pipeline run
285
+ to complete, a function that waits for the pipeline run to complete can
286
+ be passed as part of the submission result.
284
287
 
285
288
  Args:
286
- deployment: The deployment to prepare or run.
287
- stack: The stack to run on.
289
+ deployment: The pipeline deployment to submit.
290
+ stack: The stack the pipeline will run on.
288
291
  environment: Environment variables to set in the orchestration
289
- environment.
292
+ environment. These don't need to be set if running locally.
290
293
  placeholder_run: An optional placeholder run for the deployment.
291
294
 
292
295
  Raises:
@@ -296,8 +299,8 @@ class SagemakerOrchestrator(ContainerizedOrchestrator):
296
299
  AWS SageMaker NetworkConfig class.
297
300
  ValueError: If the schedule is not valid.
298
301
 
299
- Yields:
300
- A dictionary of metadata related to the pipeline run.
302
+ Returns:
303
+ Optional submission result.
301
304
  """
302
305
  # sagemaker requires pipelineName to use alphanum and hyphens only
303
306
  unsanitized_orchestrator_run_name = get_orchestrator_run_name(
@@ -705,26 +708,14 @@ class SagemakerOrchestrator(ContainerizedOrchestrator):
705
708
  )
706
709
  logger.info(f"The schedule ARN is: {triggers[0]}")
707
710
 
711
+ schedule_metadata = {}
708
712
  try:
709
- from zenml.models import RunMetadataResource
710
-
711
713
  schedule_metadata = self.generate_schedule_metadata(
712
714
  schedule_arn=triggers[0]
713
715
  )
714
-
715
- Client().create_run_metadata(
716
- metadata=schedule_metadata, # type: ignore[arg-type]
717
- resources=[
718
- RunMetadataResource(
719
- id=deployment.schedule.id,
720
- type=MetadataResourceTypes.SCHEDULE,
721
- )
722
- ],
723
- )
724
716
  except Exception as e:
725
717
  logger.debug(
726
- "There was an error attaching metadata to the "
727
- f"schedule: {e}"
718
+ "There was an error generating schedule metadata: %s", e
728
719
  )
729
720
 
730
721
  logger.info(
@@ -749,6 +740,7 @@ class SagemakerOrchestrator(ContainerizedOrchestrator):
749
740
  logger.info(
750
741
  f"`aws scheduler delete-schedule --name {schedule_name}`"
751
742
  )
743
+ return SubmissionResult(metadata=schedule_metadata)
752
744
  else:
753
745
  # Execute the pipeline immediately if no schedule is specified
754
746
  execution = pipeline.start()
@@ -757,33 +749,40 @@ class SagemakerOrchestrator(ContainerizedOrchestrator):
757
749
  "when using the Sagemaker Orchestrator."
758
750
  )
759
751
 
760
- # Yield metadata based on the generated execution object
761
- yield from self.compute_metadata(
752
+ run_metadata = self.compute_metadata(
762
753
  execution_arn=execution.arn, settings=settings
763
754
  )
764
755
 
765
- # mainly for testing purposes, we wait for the pipeline to finish
756
+ _wait_for_completion = None
766
757
  if settings.synchronous:
767
- logger.info(
768
- "Executing synchronously. Waiting for pipeline to "
769
- "finish... \n"
770
- "At this point you can `Ctrl-C` out without cancelling the "
771
- "execution."
772
- )
773
- try:
774
- execution.wait(
775
- delay=POLLING_DELAY, max_attempts=MAX_POLLING_ATTEMPTS
776
- )
777
- logger.info("Pipeline completed successfully.")
778
- except WaiterError:
779
- raise RuntimeError(
780
- "Timed out while waiting for pipeline execution to "
781
- "finish. For long-running pipelines we recommend "
782
- "configuring your orchestrator for asynchronous "
783
- "execution. The following command does this for you: \n"
784
- f"`zenml orchestrator update {self.name} "
785
- f"--synchronous=False`"
758
+
759
+ def _wait_for_completion() -> None:
760
+ logger.info(
761
+ "Executing synchronously. Waiting for pipeline to "
762
+ "finish... \n"
763
+ "At this point you can `Ctrl-C` out without cancelling the "
764
+ "execution."
786
765
  )
766
+ try:
767
+ execution.wait(
768
+ delay=POLLING_DELAY,
769
+ max_attempts=MAX_POLLING_ATTEMPTS,
770
+ )
771
+ logger.info("Pipeline completed successfully.")
772
+ except WaiterError:
773
+ raise RuntimeError(
774
+ "Timed out while waiting for pipeline execution to "
775
+ "finish. For long-running pipelines we recommend "
776
+ "configuring your orchestrator for asynchronous "
777
+ "execution. The following command does this for you: \n"
778
+ f"`zenml orchestrator update {self.name} "
779
+ f"--synchronous=False`"
780
+ )
781
+
782
+ return SubmissionResult(
783
+ wait_for_completion=_wait_for_completion,
784
+ metadata=run_metadata,
785
+ )
787
786
 
788
787
  def get_pipeline_run_metadata(
789
788
  self, run_id: UUID
@@ -798,20 +797,15 @@ class SagemakerOrchestrator(ContainerizedOrchestrator):
798
797
  """
799
798
  execution_arn = os.environ[ENV_ZENML_SAGEMAKER_RUN_ID]
800
799
 
801
- run_metadata: Dict[str, "MetadataType"] = {}
802
-
803
800
  settings = cast(
804
801
  SagemakerOrchestratorSettings,
805
802
  self.get_settings(Client().get_pipeline_run(run_id)),
806
803
  )
807
804
 
808
- for metadata in self.compute_metadata(
805
+ return self.compute_metadata(
809
806
  execution_arn=execution_arn,
810
807
  settings=settings,
811
- ):
812
- run_metadata.update(metadata)
813
-
814
- return run_metadata
808
+ )
815
809
 
816
810
  def fetch_status(self, run: "PipelineRunResponse") -> ExecutionStatus:
817
811
  """Refreshes the status of a specific pipeline run.
@@ -873,14 +867,14 @@ class SagemakerOrchestrator(ContainerizedOrchestrator):
873
867
  self,
874
868
  execution_arn: str,
875
869
  settings: SagemakerOrchestratorSettings,
876
- ) -> Iterator[Dict[str, MetadataType]]:
870
+ ) -> Dict[str, MetadataType]:
877
871
  """Generate run metadata based on the generated Sagemaker Execution.
878
872
 
879
873
  Args:
880
874
  execution_arn: The ARN of the pipeline execution.
881
875
  settings: The Sagemaker orchestrator settings.
882
876
 
883
- Yields:
877
+ Returns:
884
878
  A dictionary of metadata related to the pipeline run.
885
879
  """
886
880
  # Orchestrator Run ID
@@ -901,7 +895,7 @@ class SagemakerOrchestrator(ContainerizedOrchestrator):
901
895
  ):
902
896
  metadata[METADATA_ORCHESTRATOR_LOGS_URL] = Uri(logs_url)
903
897
 
904
- yield metadata
898
+ return metadata
905
899
 
906
900
  def _compute_orchestrator_url(
907
901
  self,
@@ -979,7 +973,9 @@ class SagemakerOrchestrator(ContainerizedOrchestrator):
979
973
  return None
980
974
 
981
975
  @staticmethod
982
- def generate_schedule_metadata(schedule_arn: str) -> Dict[str, str]:
976
+ def generate_schedule_metadata(
977
+ schedule_arn: str,
978
+ ) -> Dict[str, MetadataType]:
983
979
  """Attaches metadata to the ZenML Schedules.
984
980
 
985
981
  Args:
@@ -19,7 +19,6 @@ from typing import (
19
19
  TYPE_CHECKING,
20
20
  Any,
21
21
  Dict,
22
- Iterator,
23
22
  List,
24
23
  Optional,
25
24
  Tuple,
@@ -63,7 +62,7 @@ from zenml.integrations.azure.orchestrators.azureml_orchestrator_entrypoint_conf
63
62
  )
64
63
  from zenml.logger import get_logger
65
64
  from zenml.metadata.metadata_types import MetadataType, Uri
66
- from zenml.orchestrators import ContainerizedOrchestrator
65
+ from zenml.orchestrators import ContainerizedOrchestrator, SubmissionResult
67
66
  from zenml.orchestrators.utils import get_orchestrator_run_name
68
67
  from zenml.stack import StackValidator
69
68
  from zenml.utils.string_utils import b64_encode
@@ -198,27 +197,32 @@ class AzureMLOrchestrator(ContainerizedOrchestrator):
198
197
  command=" ".join(command + arguments),
199
198
  )
200
199
 
201
- def prepare_or_run_pipeline(
200
+ def submit_pipeline(
202
201
  self,
203
202
  deployment: "PipelineDeploymentResponse",
204
203
  stack: "Stack",
205
204
  environment: Dict[str, str],
206
205
  placeholder_run: Optional["PipelineRunResponse"] = None,
207
- ) -> Iterator[Dict[str, MetadataType]]:
208
- """Prepares or runs a pipeline on AzureML.
206
+ ) -> Optional[SubmissionResult]:
207
+ """Submits a pipeline to the orchestrator.
208
+
209
+ This method should only submit the pipeline and not wait for it to
210
+ complete. If the orchestrator is configured to wait for the pipeline run
211
+ to complete, a function that waits for the pipeline run to complete can
212
+ be passed as part of the submission result.
209
213
 
210
214
  Args:
211
- deployment: The deployment to prepare or run.
212
- stack: The stack to run on.
215
+ deployment: The pipeline deployment to submit.
216
+ stack: The stack the pipeline will run on.
213
217
  environment: Environment variables to set in the orchestration
214
- environment.
218
+ environment. These don't need to be set if running locally.
215
219
  placeholder_run: An optional placeholder run for the deployment.
216
220
 
217
221
  Raises:
218
222
  RuntimeError: If the creation of the schedule fails.
219
223
 
220
- Yields:
221
- A dictionary of metadata related to the pipeline run.
224
+ Returns:
225
+ Optional submission result.
222
226
  """
223
227
  # Authentication
224
228
  if connector := self.get_connector():
@@ -384,14 +388,11 @@ class AzureMLOrchestrator(ContainerizedOrchestrator):
384
388
  "Failed to create schedule for the pipeline "
385
389
  f"'{run_name}': {str(e)}"
386
390
  )
387
-
391
+ return None
388
392
  else:
389
393
  job = ml_client.jobs.create_or_update(pipeline_job)
390
394
  logger.info(f"Pipeline {run_name} has been started.")
391
395
 
392
- # Yield metadata based on the generated job object
393
- yield from self.compute_metadata(job)
394
-
395
396
  assert job.services is not None
396
397
  assert job.name is not None
397
398
 
@@ -401,9 +402,17 @@ class AzureMLOrchestrator(ContainerizedOrchestrator):
401
402
  f"{job.services['Studio'].endpoint}"
402
403
  )
403
404
 
405
+ _wait_for_completion = None
404
406
  if settings.synchronous:
405
- logger.info("Waiting for pipeline to finish...")
406
- ml_client.jobs.stream(job.name)
407
+
408
+ def _wait_for_completion() -> None:
409
+ logger.info("Waiting for pipeline to finish...")
410
+ ml_client.jobs.stream(job.name)
411
+
412
+ return SubmissionResult(
413
+ metadata=self.compute_metadata(job),
414
+ wait_for_completion=_wait_for_completion,
415
+ )
407
416
 
408
417
  def get_pipeline_run_metadata(
409
418
  self, run_id: UUID
@@ -518,13 +527,13 @@ class AzureMLOrchestrator(ContainerizedOrchestrator):
518
527
  else:
519
528
  raise ValueError("Unknown status for the pipeline job.")
520
529
 
521
- def compute_metadata(self, job: Any) -> Iterator[Dict[str, MetadataType]]:
530
+ def compute_metadata(self, job: Any) -> Dict[str, MetadataType]:
522
531
  """Generate run metadata based on the generated AzureML PipelineJob.
523
532
 
524
533
  Args:
525
534
  job: The corresponding PipelineJob object.
526
535
 
527
- Yields:
536
+ Returns:
528
537
  A dictionary of metadata related to the pipeline run.
529
538
  """
530
539
  # Metadata
@@ -538,7 +547,7 @@ class AzureMLOrchestrator(ContainerizedOrchestrator):
538
547
  if orchestrator_url := self._compute_orchestrator_url(job):
539
548
  metadata[METADATA_ORCHESTRATOR_URL] = Uri(orchestrator_url)
540
549
 
541
- yield metadata
550
+ return metadata
542
551
 
543
552
  @staticmethod
544
553
  def _compute_orchestrator_url(job: Any) -> Optional[str]: