zenml-nightly 0.83.1.dev20250624__py3-none-any.whl → 0.83.1.dev20250626__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. zenml/VERSION +1 -1
  2. zenml/cli/base.py +3 -2
  3. zenml/cli/login.py +21 -3
  4. zenml/cli/service_connectors.py +5 -12
  5. zenml/cli/stack.py +1 -5
  6. zenml/cli/utils.py +8 -52
  7. zenml/client.py +32 -40
  8. zenml/config/__init__.py +13 -2
  9. zenml/constants.py +0 -1
  10. zenml/exceptions.py +16 -0
  11. zenml/integrations/airflow/orchestrators/airflow_orchestrator.py +15 -6
  12. zenml/integrations/aws/container_registries/aws_container_registry.py +3 -1
  13. zenml/integrations/aws/orchestrators/sagemaker_orchestrator.py +54 -58
  14. zenml/integrations/azure/orchestrators/azureml_orchestrator.py +28 -19
  15. zenml/integrations/databricks/orchestrators/databricks_orchestrator.py +19 -63
  16. zenml/integrations/databricks/orchestrators/databricks_orchestrator_entrypoint_config.py +8 -3
  17. zenml/integrations/gcp/orchestrators/vertex_orchestrator.py +36 -61
  18. zenml/integrations/hyperai/orchestrators/hyperai_orchestrator.py +19 -22
  19. zenml/integrations/integration.py +23 -58
  20. zenml/integrations/kubeflow/orchestrators/kubeflow_orchestrator.py +28 -31
  21. zenml/integrations/kubernetes/orchestrators/kubernetes_orchestrator.py +33 -20
  22. zenml/integrations/lightning/orchestrators/lightning_orchestrator.py +25 -100
  23. zenml/integrations/skypilot/orchestrators/skypilot_base_vm_orchestrator.py +19 -8
  24. zenml/integrations/skypilot/utils.py +17 -13
  25. zenml/integrations/tekton/orchestrators/tekton_orchestrator.py +28 -12
  26. zenml/models/__init__.py +2 -0
  27. zenml/models/v2/core/service_connector.py +178 -108
  28. zenml/models/v2/core/step_run.py +1 -0
  29. zenml/orchestrators/__init__.py +2 -0
  30. zenml/orchestrators/base_orchestrator.py +137 -66
  31. zenml/orchestrators/input_utils.py +5 -13
  32. zenml/orchestrators/local/local_orchestrator.py +19 -9
  33. zenml/orchestrators/local_docker/local_docker_orchestrator.py +15 -5
  34. zenml/orchestrators/publish_utils.py +24 -0
  35. zenml/orchestrators/step_run_utils.py +1 -2
  36. zenml/pipelines/run_utils.py +12 -7
  37. zenml/service_connectors/service_connector.py +11 -61
  38. zenml/service_connectors/service_connector_utils.py +4 -2
  39. zenml/step_operators/step_operator_entrypoint_configuration.py +1 -1
  40. zenml/utils/package_utils.py +111 -1
  41. zenml/zen_server/routers/service_connectors_endpoints.py +7 -22
  42. zenml/zen_stores/migrations/versions/5bb25e95849c_add_internal_secrets.py +62 -0
  43. zenml/zen_stores/rest_zen_store.py +204 -132
  44. zenml/zen_stores/schemas/secret_schemas.py +5 -0
  45. zenml/zen_stores/schemas/service_connector_schemas.py +16 -14
  46. zenml/zen_stores/secrets_stores/service_connector_secrets_store.py +4 -1
  47. zenml/zen_stores/sql_zen_store.py +241 -119
  48. zenml/zen_stores/zen_store_interface.py +9 -1
  49. {zenml_nightly-0.83.1.dev20250624.dist-info → zenml_nightly-0.83.1.dev20250626.dist-info}/METADATA +1 -1
  50. {zenml_nightly-0.83.1.dev20250624.dist-info → zenml_nightly-0.83.1.dev20250626.dist-info}/RECORD +53 -53
  51. zenml/utils/integration_utils.py +0 -34
  52. {zenml_nightly-0.83.1.dev20250624.dist-info → zenml_nightly-0.83.1.dev20250626.dist-info}/LICENSE +0 -0
  53. {zenml_nightly-0.83.1.dev20250624.dist-info → zenml_nightly-0.83.1.dev20250626.dist-info}/WHEEL +0 -0
  54. {zenml_nightly-0.83.1.dev20250624.dist-info → zenml_nightly-0.83.1.dev20250626.dist-info}/entry_points.txt +0 -0
@@ -37,7 +37,6 @@ from typing import (
37
37
  TYPE_CHECKING,
38
38
  Any,
39
39
  Dict,
40
- Iterator,
41
40
  List,
42
41
  Optional,
43
42
  Tuple,
@@ -89,7 +88,7 @@ from zenml.integrations.gcp.vertex_custom_job_parameters import (
89
88
  from zenml.io import fileio
90
89
  from zenml.logger import get_logger
91
90
  from zenml.metadata.metadata_types import MetadataType, Uri
92
- from zenml.orchestrators import ContainerizedOrchestrator
91
+ from zenml.orchestrators import ContainerizedOrchestrator, SubmissionResult
93
92
  from zenml.orchestrators.utils import get_orchestrator_run_name
94
93
  from zenml.stack.stack_validator import StackValidator
95
94
  from zenml.utils.io_utils import get_global_config_directory
@@ -402,57 +401,29 @@ class VertexOrchestrator(ContainerizedOrchestrator, GoogleCredentialsMixin):
402
401
 
403
402
  return custom_job_component
404
403
 
405
- def prepare_or_run_pipeline(
404
+ def submit_pipeline(
406
405
  self,
407
406
  deployment: "PipelineDeploymentResponse",
408
407
  stack: "Stack",
409
408
  environment: Dict[str, str],
410
409
  placeholder_run: Optional["PipelineRunResponse"] = None,
411
- ) -> Iterator[Dict[str, MetadataType]]:
412
- """Creates a KFP JSON pipeline.
410
+ ) -> Optional[SubmissionResult]:
411
+ """Submits a pipeline to the orchestrator.
413
412
 
414
- # noqa: DAR402
415
-
416
- This is an intermediary representation of the pipeline which is then
417
- deployed to Vertex AI Pipelines service.
418
-
419
- How it works:
420
- -------------
421
- Before this method is called the `prepare_pipeline_deployment()` method
422
- builds a Docker image that contains the code for the pipeline, all steps
423
- the context around these files.
424
-
425
- Based on this Docker image a callable is created which builds
426
- container_ops for each step (`_construct_kfp_pipeline`). The function
427
- `kfp.components.load_component_from_text` is used to create the
428
- `ContainerOp`, because using the `dsl.ContainerOp` class directly is
429
- deprecated when using the Kubeflow SDK v2. The step entrypoint command
430
- with the entrypoint arguments is the command that will be executed by
431
- the container created using the previously created Docker image.
432
-
433
- This callable is then compiled into a JSON file that is used as the
434
- intermediary representation of the Kubeflow pipeline.
435
-
436
- This file then is submitted to the Vertex AI Pipelines service for
437
- execution.
413
+ This method should only submit the pipeline and not wait for it to
414
+ complete. If the orchestrator is configured to wait for the pipeline run
415
+ to complete, a function that waits for the pipeline run to complete can
416
+ be passed as part of the submission result.
438
417
 
439
418
  Args:
440
- deployment: The pipeline deployment to prepare or run.
419
+ deployment: The pipeline deployment to submit.
441
420
  stack: The stack the pipeline will run on.
442
421
  environment: Environment variables to set in the orchestration
443
- environment.
422
+ environment. These don't need to be set if running locally.
444
423
  placeholder_run: An optional placeholder run for the deployment.
445
424
 
446
- Raises:
447
- ValueError: If the attribute `pipeline_root` is not set, and it
448
- can be not generated using the path of the artifact store in the
449
- stack because it is not a
450
- `zenml.integrations.gcp.artifact_store.GCPArtifactStore`. Also gets
451
- raised if attempting to schedule pipeline run without using the
452
- `zenml.integrations.gcp.artifact_store.GCPArtifactStore`.
453
-
454
- Yields:
455
- A dictionary of metadata related to the pipeline run.
425
+ Returns:
426
+ Optional submission result.
456
427
  """
457
428
  orchestrator_run_name = get_orchestrator_run_name(
458
429
  pipeline_name=deployment.pipeline_configuration.name
@@ -651,16 +622,13 @@ class VertexOrchestrator(ContainerizedOrchestrator, GoogleCredentialsMixin):
651
622
  VertexOrchestratorSettings, self.get_settings(deployment)
652
623
  )
653
624
 
654
- # Using the Google Cloud AIPlatform client, upload and execute the
655
- # pipeline on the Vertex AI Pipelines service.
656
- if metadata := self._upload_and_run_pipeline(
625
+ return self._upload_and_run_pipeline(
657
626
  pipeline_name=deployment.pipeline_configuration.name,
658
627
  pipeline_file_path=pipeline_file_path,
659
628
  run_name=orchestrator_run_name,
660
629
  settings=settings,
661
630
  schedule=deployment.schedule,
662
- ):
663
- yield from metadata
631
+ )
664
632
 
665
633
  def _upload_and_run_pipeline(
666
634
  self,
@@ -669,7 +637,7 @@ class VertexOrchestrator(ContainerizedOrchestrator, GoogleCredentialsMixin):
669
637
  run_name: str,
670
638
  settings: VertexOrchestratorSettings,
671
639
  schedule: Optional["ScheduleResponse"] = None,
672
- ) -> Iterator[Dict[str, MetadataType]]:
640
+ ) -> Optional[SubmissionResult]:
673
641
  """Uploads and run the pipeline on the Vertex AI Pipelines service.
674
642
 
675
643
  Args:
@@ -684,8 +652,8 @@ class VertexOrchestrator(ContainerizedOrchestrator, GoogleCredentialsMixin):
684
652
  RuntimeError: If the Vertex Orchestrator fails to provision or any
685
653
  other Runtime errors.
686
654
 
687
- Yields:
688
- A dictionary of metadata related to the pipeline run.
655
+ Returns:
656
+ Optional submission result.
689
657
  """
690
658
  # We have to replace the hyphens in the run name with underscores
691
659
  # and lower case the string, because the Vertex AI Pipelines service
@@ -772,6 +740,7 @@ class VertexOrchestrator(ContainerizedOrchestrator, GoogleCredentialsMixin):
772
740
  service_account=self.config.workload_service_account,
773
741
  network=self.config.network,
774
742
  )
743
+ return None
775
744
 
776
745
  else:
777
746
  logger.info(
@@ -793,17 +762,23 @@ class VertexOrchestrator(ContainerizedOrchestrator, GoogleCredentialsMixin):
793
762
  run._dashboard_uri(),
794
763
  )
795
764
 
796
- # Yield metadata based on the generated job object
797
- yield from self.compute_metadata(run)
765
+ _wait_for_completion = None
798
766
 
799
767
  if settings.synchronous:
800
- logger.info(
801
- "Waiting for the Vertex AI Pipelines job to finish..."
802
- )
803
- run.wait()
804
- logger.info(
805
- "Vertex AI Pipelines job completed successfully."
806
- )
768
+
769
+ def _wait_for_completion() -> None:
770
+ logger.info(
771
+ "Waiting for the Vertex AI Pipelines job to finish..."
772
+ )
773
+ run.wait()
774
+ logger.info(
775
+ "Vertex AI Pipelines job completed successfully."
776
+ )
777
+
778
+ return SubmissionResult(
779
+ metadata=self.compute_metadata(run),
780
+ wait_for_completion=_wait_for_completion,
781
+ )
807
782
 
808
783
  except google_exceptions.ClientError as e:
809
784
  logger.error("Failed to create the Vertex AI Pipelines job: %s", e)
@@ -993,13 +968,13 @@ class VertexOrchestrator(ContainerizedOrchestrator, GoogleCredentialsMixin):
993
968
 
994
969
  def compute_metadata(
995
970
  self, job: aiplatform.PipelineJob
996
- ) -> Iterator[Dict[str, MetadataType]]:
971
+ ) -> Dict[str, MetadataType]:
997
972
  """Generate run metadata based on the corresponding Vertex PipelineJob.
998
973
 
999
974
  Args:
1000
975
  job: The corresponding PipelineJob object.
1001
976
 
1002
- Yields:
977
+ Returns:
1003
978
  A dictionary of metadata related to the pipeline run.
1004
979
  """
1005
980
  metadata: Dict[str, MetadataType] = {}
@@ -1016,7 +991,7 @@ class VertexOrchestrator(ContainerizedOrchestrator, GoogleCredentialsMixin):
1016
991
  if logs_url := self._compute_orchestrator_logs_url(job):
1017
992
  metadata[METADATA_ORCHESTRATOR_LOGS_URL] = Uri(logs_url)
1018
993
 
1019
- yield metadata
994
+ return metadata
1020
995
 
1021
996
  @staticmethod
1022
997
  def _compute_orchestrator_url(
@@ -30,9 +30,7 @@ from zenml.integrations.hyperai.flavors.hyperai_orchestrator_flavor import (
30
30
  HyperAIOrchestratorSettings,
31
31
  )
32
32
  from zenml.logger import get_logger
33
- from zenml.orchestrators import (
34
- ContainerizedOrchestrator,
35
- )
33
+ from zenml.orchestrators import ContainerizedOrchestrator, SubmissionResult
36
34
  from zenml.stack import Stack, StackValidator
37
35
 
38
36
  if TYPE_CHECKING:
@@ -159,14 +157,19 @@ class HyperAIOrchestrator(ContainerizedOrchestrator):
159
157
  f"Failed to write {description} to HyperAI instance. Does the user have permissions to write?"
160
158
  )
161
159
 
162
- def prepare_or_run_pipeline(
160
+ def submit_pipeline(
163
161
  self,
164
162
  deployment: "PipelineDeploymentResponse",
165
163
  stack: "Stack",
166
164
  environment: Dict[str, str],
167
165
  placeholder_run: Optional["PipelineRunResponse"] = None,
168
- ) -> Any:
169
- """Sequentially runs all pipeline steps in Docker containers.
166
+ ) -> Optional[SubmissionResult]:
167
+ """Submits a pipeline to the orchestrator.
168
+
169
+ This method should only submit the pipeline and not wait for it to
170
+ complete. If the orchestrator is configured to wait for the pipeline run
171
+ to complete, a function that waits for the pipeline run to complete can
172
+ be passed as part of the submission result.
170
173
 
171
174
  Assumes that:
172
175
  - A HyperAI (hyperai.ai) instance is running on the configured IP address.
@@ -179,26 +182,25 @@ class HyperAIOrchestrator(ContainerizedOrchestrator):
179
182
  orchestrator.
180
183
 
181
184
  Args:
182
- deployment: The pipeline deployment to prepare or run.
185
+ deployment: The pipeline deployment to submit.
183
186
  stack: The stack the pipeline will run on.
184
187
  environment: Environment variables to set in the orchestration
185
- environment.
188
+ environment. These don't need to be set if running locally.
186
189
  placeholder_run: An optional placeholder run for the deployment.
187
190
 
188
191
  Raises:
189
- RuntimeError: If a step fails.
192
+ RuntimeError: If running the pipeline fails.
193
+
194
+ Returns:
195
+ Optional submission result.
190
196
  """
191
197
  from zenml.integrations.hyperai.service_connectors.hyperai_service_connector import (
192
198
  HyperAIServiceConnector,
193
199
  )
194
200
 
195
- # Basic Docker Compose definition
196
201
  compose_definition: Dict[str, Any] = {"version": "3", "services": {}}
197
-
198
- # Get deployment id
199
202
  deployment_id = deployment.id
200
203
 
201
- # Set environment
202
204
  os.environ[ENV_ZENML_HYPERAI_RUN_ID] = str(deployment_id)
203
205
  environment[ENV_ZENML_HYPERAI_RUN_ID] = str(deployment_id)
204
206
 
@@ -208,12 +210,9 @@ class HyperAIOrchestrator(ContainerizedOrchestrator):
208
210
  # Get image
209
211
  image = self.get_image(deployment=deployment, step_name=step_name)
210
212
 
211
- # Get settings
212
213
  step_settings = cast(
213
214
  HyperAIOrchestratorSettings, self.get_settings(step)
214
215
  )
215
-
216
- # Define container name as combination between deployment id and step name
217
216
  container_name = f"{deployment_id}-{step_name}"
218
217
 
219
218
  # Make Compose service definition for step
@@ -246,10 +245,9 @@ class HyperAIOrchestrator(ContainerizedOrchestrator):
246
245
  }
247
246
  }
248
247
 
249
- # Depending on whether it is a scheduled or a realtime pipeline, add
250
- # potential .env file to service definition for deployment ID override.
251
248
  if deployment.schedule:
252
- # drop ZENML_HYPERAI_ORCHESTRATOR_RUN_ID from environment but only if it is set
249
+ # If running on a schedule, the run ID is set dynamically via
250
+ # the .env file.
253
251
  if ENV_ZENML_HYPERAI_RUN_ID in environment:
254
252
  del environment[ENV_ZENML_HYPERAI_RUN_ID]
255
253
  compose_definition["services"][container_name]["env_file"] = [
@@ -282,15 +280,12 @@ class HyperAIOrchestrator(ContainerizedOrchestrator):
282
280
  }
283
281
  )
284
282
 
285
- # Convert into yaml
286
- logger.info("Finalizing Docker Compose definition.")
287
283
  compose_definition_yaml: str = yaml.dump(compose_definition)
288
284
 
289
285
  # Connect to configured HyperAI instance
290
286
  logger.info(
291
287
  "Connecting to HyperAI instance and placing Docker Compose file."
292
288
  )
293
- paramiko_client: paramiko.SSHClient
294
289
  if connector := self.get_connector():
295
290
  paramiko_client = connector.connect()
296
291
  if paramiko_client is None:
@@ -510,3 +505,5 @@ class HyperAIOrchestrator(ContainerizedOrchestrator):
510
505
  raise RuntimeError(
511
506
  "A cron expression or start time is required for scheduled pipelines."
512
507
  )
508
+
509
+ return None
@@ -13,16 +13,14 @@
13
13
  # permissions and limitations under the License.
14
14
  """Base and meta classes for ZenML integrations."""
15
15
 
16
- import re
17
16
  from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Type, cast
18
17
 
19
- import pkg_resources
20
- from pkg_resources import Requirement
18
+ from packaging.requirements import Requirement
21
19
 
22
20
  from zenml.integrations.registry import integration_registry
23
21
  from zenml.logger import get_logger
24
22
  from zenml.stack.flavor import Flavor
25
- from zenml.utils.integration_utils import parse_requirement
23
+ from zenml.utils.package_utils import get_dependencies, requirement_installed
26
24
 
27
25
  if TYPE_CHECKING:
28
26
  from zenml.plugins.base_plugin_flavor import BasePluginFlavor
@@ -69,65 +67,32 @@ class Integration(metaclass=IntegrationMeta):
69
67
  Returns:
70
68
  True if all required packages are installed, False otherwise.
71
69
  """
72
- for r in cls.get_requirements():
73
- try:
74
- # First check if the base package is installed
75
- dist = pkg_resources.get_distribution(r)
76
-
77
- # Next, check if the dependencies (including extras) are
78
- # installed
79
- deps: List[Requirement] = []
80
-
81
- _, extras = parse_requirement(r)
82
- if extras:
83
- extra_list = extras[1:-1].split(",")
84
- for extra in extra_list:
85
- try:
86
- requirements = dist.requires(extras=[extra]) # type: ignore[arg-type]
87
- except pkg_resources.UnknownExtra as e:
88
- logger.debug(f"Unknown extra: {str(e)}")
89
- return False
90
- deps.extend(requirements)
91
- else:
92
- deps = dist.requires()
93
-
94
- for ri in deps:
95
- try:
96
- # Remove the "extra == ..." part from the requirement string
97
- cleaned_req = re.sub(
98
- r"; extra == \"\w+\"", "", str(ri)
99
- )
100
- pkg_resources.get_distribution(cleaned_req)
101
- except pkg_resources.DistributionNotFound as e:
102
- logger.debug(
103
- f"Unable to find required dependency "
104
- f"'{e.req}' for requirement '{r}' "
105
- f"necessary for integration '{cls.NAME}'."
106
- )
107
- return False
108
- except pkg_resources.VersionConflict as e:
109
- logger.debug(
110
- f"Package version '{e.dist}' does not match "
111
- f"version '{e.req}' required by '{r}' "
112
- f"necessary for integration '{cls.NAME}'."
113
- )
114
- return False
115
-
116
- except pkg_resources.DistributionNotFound as e:
117
- logger.debug(
118
- f"Unable to find required package '{e.req}' for "
119
- f"integration {cls.NAME}."
120
- )
121
- return False
122
- except pkg_resources.VersionConflict as e:
70
+ for requirement in cls.get_requirements():
71
+ parsed_requirement = Requirement(requirement)
72
+
73
+ if not requirement_installed(parsed_requirement):
123
74
  logger.debug(
124
- f"Package version '{e.dist}' does not match version "
125
- f"'{e.req}' necessary for integration {cls.NAME}."
75
+ "Requirement '%s' for integration '%s' is not installed "
76
+ "or installed with the wrong version.",
77
+ requirement,
78
+ cls.NAME,
126
79
  )
127
80
  return False
128
81
 
82
+ dependencies = get_dependencies(parsed_requirement)
83
+
84
+ for dependency in dependencies:
85
+ if not requirement_installed(dependency):
86
+ logger.debug(
87
+ "Requirement '%s' for integration '%s' is not "
88
+ "installed or installed with the wrong version.",
89
+ dependency,
90
+ cls.NAME,
91
+ )
92
+ return False
93
+
129
94
  logger.debug(
130
- f"Integration {cls.NAME} is installed correctly with "
95
+ f"Integration '{cls.NAME}' is installed correctly with "
131
96
  f"requirements {cls.get_requirements()}."
132
97
  )
133
98
  return True
@@ -69,7 +69,7 @@ from zenml.integrations.kubeflow.flavors.kubeflow_orchestrator_flavor import (
69
69
  from zenml.io import fileio
70
70
  from zenml.logger import get_logger
71
71
  from zenml.metadata.metadata_types import MetadataType, Uri
72
- from zenml.orchestrators import ContainerizedOrchestrator
72
+ from zenml.orchestrators import ContainerizedOrchestrator, SubmissionResult
73
73
  from zenml.orchestrators.utils import get_orchestrator_run_name
74
74
  from zenml.stack import StackValidator
75
75
  from zenml.utils import io_utils, settings_utils, yaml_utils
@@ -466,47 +466,33 @@ class KubeflowOrchestrator(ContainerizedOrchestrator):
466
466
 
467
467
  return pipeline_task
468
468
 
469
- def prepare_or_run_pipeline(
469
+ def submit_pipeline(
470
470
  self,
471
471
  deployment: "PipelineDeploymentResponse",
472
472
  stack: "Stack",
473
473
  environment: Dict[str, str],
474
474
  placeholder_run: Optional["PipelineRunResponse"] = None,
475
- ) -> Any:
476
- """Creates a kfp yaml file.
475
+ ) -> Optional[SubmissionResult]:
476
+ """Submits a pipeline to the orchestrator.
477
477
 
478
- This functions as an intermediary representation of the pipeline which
479
- is then deployed to the kubeflow pipelines instance.
480
-
481
- How it works:
482
- -------------
483
- Before this method is called the `prepare_pipeline_deployment()`
484
- method builds a docker image that contains the code for the
485
- pipeline, all steps the context around these files.
486
-
487
- Based on this docker image a callable is created which builds
488
- container_ops for each step (`_construct_kfp_pipeline`).
489
- To do this the entrypoint of the docker image is configured to
490
- run the correct step within the docker image. The dependencies
491
- between these container_ops are then also configured onto each
492
- container_op by pointing at the downstream steps.
493
-
494
- This callable is then compiled into a kfp yaml file that is used as
495
- the intermediary representation of the kubeflow pipeline.
496
-
497
- This file, together with some metadata, runtime configurations is
498
- then uploaded into the kubeflow pipelines cluster for execution.
478
+ This method should only submit the pipeline and not wait for it to
479
+ complete. If the orchestrator is configured to wait for the pipeline run
480
+ to complete, a function that waits for the pipeline run to complete can
481
+ be passed as part of the submission result.
499
482
 
500
483
  Args:
501
- deployment: The pipeline deployment to prepare or run.
484
+ deployment: The pipeline deployment to submit.
502
485
  stack: The stack the pipeline will run on.
503
486
  environment: Environment variables to set in the orchestration
504
- environment.
487
+ environment. These don't need to be set if running locally.
505
488
  placeholder_run: An optional placeholder run for the deployment.
506
489
 
507
490
  Raises:
508
491
  RuntimeError: If trying to run a pipeline in a notebook
509
492
  environment.
493
+
494
+ Returns:
495
+ Optional submission result.
510
496
  """
511
497
  # First check whether the code running in a notebook
512
498
  if Environment.in_notebook():
@@ -672,7 +658,7 @@ class KubeflowOrchestrator(ContainerizedOrchestrator):
672
658
 
673
659
  # using the kfp client uploads the pipeline to kubeflow pipelines and
674
660
  # runs it there
675
- self._upload_and_run_pipeline(
661
+ return self._upload_and_run_pipeline(
676
662
  deployment=deployment,
677
663
  pipeline_file_path=pipeline_file_path,
678
664
  run_name=orchestrator_run_name,
@@ -683,7 +669,7 @@ class KubeflowOrchestrator(ContainerizedOrchestrator):
683
669
  deployment: "PipelineDeploymentResponse",
684
670
  pipeline_file_path: str,
685
671
  run_name: str,
686
- ) -> None:
672
+ ) -> Optional[SubmissionResult]:
687
673
  """Tries to upload and run a KFP pipeline.
688
674
 
689
675
  Args:
@@ -693,6 +679,9 @@ class KubeflowOrchestrator(ContainerizedOrchestrator):
693
679
 
694
680
  Raises:
695
681
  RuntimeError: If Kubeflow API returns an error.
682
+
683
+ Returns:
684
+ Optional submission result.
696
685
  """
697
686
  pipeline_name = deployment.pipeline_configuration.name
698
687
  settings = cast(
@@ -788,8 +777,14 @@ class KubeflowOrchestrator(ContainerizedOrchestrator):
788
777
  )
789
778
 
790
779
  if settings.synchronous:
791
- client.wait_for_run_completion(
792
- run_id=result.run_id, timeout=settings.timeout
780
+
781
+ def _wait_for_completion() -> None:
782
+ client.wait_for_run_completion(
783
+ run_id=result.run_id, timeout=settings.timeout
784
+ )
785
+
786
+ return SubmissionResult(
787
+ wait_for_completion=_wait_for_completion
793
788
  )
794
789
  except urllib3.exceptions.HTTPError as error:
795
790
  if kubernetes_context:
@@ -811,6 +806,8 @@ class KubeflowOrchestrator(ContainerizedOrchestrator):
811
806
  f"Failed to upload Kubeflow pipeline: {error}. {msg}",
812
807
  )
813
808
 
809
+ return None
810
+
814
811
  def get_orchestrator_run_id(self) -> str:
815
812
  """Returns the active orchestrator run id.
816
813
 
@@ -33,7 +33,6 @@
33
33
  import os
34
34
  from typing import (
35
35
  TYPE_CHECKING,
36
- Any,
37
36
  Dict,
38
37
  List,
39
38
  Optional,
@@ -62,7 +61,7 @@ from zenml.integrations.kubernetes.orchestrators.manifest_utils import (
62
61
  )
63
62
  from zenml.integrations.kubernetes.pod_settings import KubernetesPodSettings
64
63
  from zenml.logger import get_logger
65
- from zenml.orchestrators import ContainerizedOrchestrator
64
+ from zenml.orchestrators import ContainerizedOrchestrator, SubmissionResult
66
65
  from zenml.orchestrators.utils import get_orchestrator_run_name
67
66
  from zenml.stack import StackValidator
68
67
 
@@ -388,24 +387,32 @@ class KubernetesOrchestrator(ContainerizedOrchestrator):
388
387
  """
389
388
  return f"zenml-token-{deployment_id}"
390
389
 
391
- def prepare_or_run_pipeline(
390
+ def submit_pipeline(
392
391
  self,
393
392
  deployment: "PipelineDeploymentResponse",
394
393
  stack: "Stack",
395
394
  environment: Dict[str, str],
396
395
  placeholder_run: Optional["PipelineRunResponse"] = None,
397
- ) -> Any:
398
- """Runs the pipeline in Kubernetes.
396
+ ) -> Optional[SubmissionResult]:
397
+ """Submits a pipeline to the orchestrator.
398
+
399
+ This method should only submit the pipeline and not wait for it to
400
+ complete. If the orchestrator is configured to wait for the pipeline run
401
+ to complete, a function that waits for the pipeline run to complete can
402
+ be passed as part of the submission result.
399
403
 
400
404
  Args:
401
- deployment: The pipeline deployment to prepare or run.
405
+ deployment: The pipeline deployment to submit.
402
406
  stack: The stack the pipeline will run on.
403
407
  environment: Environment variables to set in the orchestration
404
- environment.
408
+ environment. These don't need to be set if running locally.
405
409
  placeholder_run: An optional placeholder run for the deployment.
406
410
 
407
411
  Raises:
408
- RuntimeError: If the Kubernetes orchestrator is not configured.
412
+ RuntimeError: If a schedule without cron expression is given.
413
+
414
+ Returns:
415
+ Optional submission result.
409
416
  """
410
417
  for step_name, step in deployment.step_configurations.items():
411
418
  if self.requires_resources_in_orchestration_environment(step):
@@ -536,7 +543,7 @@ class KubernetesOrchestrator(ContainerizedOrchestrator):
536
543
  f"Scheduling Kubernetes run `{pod_name}` with CRON expression "
537
544
  f'`"{cron_expression}"`.'
538
545
  )
539
- return
546
+ return None
540
547
  else:
541
548
  # Create and run the orchestrator pod.
542
549
  pod_manifest = build_pod_manifest(
@@ -565,18 +572,23 @@ class KubernetesOrchestrator(ContainerizedOrchestrator):
565
572
  startup_timeout=settings.pod_startup_timeout,
566
573
  )
567
574
 
568
- # Wait for the orchestrator pod to finish and stream logs.
569
575
  if settings.synchronous:
570
- logger.info(
571
- "Waiting for Kubernetes orchestrator pod to finish..."
572
- )
573
- kube_utils.wait_pod(
574
- kube_client_fn=self.get_kube_client,
575
- pod_name=pod_name,
576
- namespace=self.config.kubernetes_namespace,
577
- exit_condition_lambda=kube_utils.pod_is_done,
578
- timeout_sec=settings.timeout,
579
- stream_logs=True,
576
+
577
+ def _wait_for_run_to_finish() -> None:
578
+ logger.info(
579
+ "Waiting for Kubernetes orchestrator pod to finish..."
580
+ )
581
+ kube_utils.wait_pod(
582
+ kube_client_fn=self.get_kube_client,
583
+ pod_name=pod_name,
584
+ namespace=self.config.kubernetes_namespace,
585
+ exit_condition_lambda=kube_utils.pod_is_done,
586
+ timeout_sec=settings.timeout,
587
+ stream_logs=True,
588
+ )
589
+
590
+ return SubmissionResult(
591
+ wait_for_completion=_wait_for_run_to_finish
580
592
  )
581
593
  else:
582
594
  logger.info(
@@ -585,6 +597,7 @@ class KubernetesOrchestrator(ContainerizedOrchestrator):
585
597
  f"Run the following command to inspect the logs: "
586
598
  f"`kubectl logs {pod_name} -n {self.config.kubernetes_namespace}`."
587
599
  )
600
+ return None
588
601
 
589
602
  def _get_service_account_name(
590
603
  self, settings: KubernetesOrchestratorSettings