zenml-nightly 0.83.0.dev20250623__py3-none-any.whl → 0.83.1.dev20250625__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (158) hide show
  1. zenml/VERSION +1 -1
  2. zenml/cli/login.py +21 -3
  3. zenml/config/__init__.py +13 -2
  4. zenml/constants.py +0 -1
  5. zenml/exceptions.py +16 -0
  6. zenml/integrations/airflow/orchestrators/airflow_orchestrator.py +15 -6
  7. zenml/integrations/aws/orchestrators/sagemaker_orchestrator.py +54 -58
  8. zenml/integrations/azure/orchestrators/azureml_orchestrator.py +28 -19
  9. zenml/integrations/databricks/orchestrators/databricks_orchestrator.py +19 -63
  10. zenml/integrations/gcp/orchestrators/vertex_orchestrator.py +36 -61
  11. zenml/integrations/hyperai/orchestrators/hyperai_orchestrator.py +19 -22
  12. zenml/integrations/kubeflow/orchestrators/kubeflow_orchestrator.py +28 -31
  13. zenml/integrations/kubernetes/orchestrators/kubernetes_orchestrator.py +33 -20
  14. zenml/integrations/lightning/orchestrators/lightning_orchestrator.py +25 -100
  15. zenml/integrations/skypilot/orchestrators/skypilot_base_vm_orchestrator.py +19 -8
  16. zenml/integrations/skypilot/utils.py +17 -13
  17. zenml/integrations/tekton/orchestrators/tekton_orchestrator.py +28 -12
  18. zenml/models/v2/core/step_run.py +1 -0
  19. zenml/orchestrators/__init__.py +2 -0
  20. zenml/orchestrators/base_orchestrator.py +137 -66
  21. zenml/orchestrators/input_utils.py +5 -13
  22. zenml/orchestrators/local/local_orchestrator.py +19 -9
  23. zenml/orchestrators/local_docker/local_docker_orchestrator.py +15 -5
  24. zenml/orchestrators/publish_utils.py +24 -0
  25. zenml/orchestrators/step_run_utils.py +1 -2
  26. zenml/pipelines/run_utils.py +12 -7
  27. zenml/step_operators/step_operator_entrypoint_configuration.py +1 -1
  28. zenml/zen_server/dashboard/assets/{404-DmJUgorp.js → 404-B5eko6XL.js} +1 -1
  29. zenml/zen_server/dashboard/assets/{@reactflow-8OCk19Fi.js → @reactflow-B_iCtR7X.js} +1 -1
  30. zenml/zen_server/dashboard/assets/{AlertDialogDropdownItem-CZW4QyWn.js → AlertDialogDropdownItem-DsOmO1FH.js} +1 -1
  31. zenml/zen_server/dashboard/assets/{ButtonGroup-DFWWFGUE.js → ButtonGroup-4sPZDv70.js} +1 -1
  32. zenml/zen_server/dashboard/assets/{CodeSnippet-D2HkkAGr.js → CodeSnippet-Ctplhzdc.js} +1 -1
  33. zenml/zen_server/dashboard/assets/{CollapsibleCard-CnS09ljw.js → CollapsibleCard-CBKenz9f.js} +1 -1
  34. zenml/zen_server/dashboard/assets/{ComponentBadge-CDgdd0Ks.js → ComponentBadge-Cnecw3qz.js} +1 -1
  35. zenml/zen_server/dashboard/assets/{ComponentIcon-CbbOc7lb.js → ComponentIcon-CMiVW-O6.js} +1 -1
  36. zenml/zen_server/dashboard/assets/{DeleteAlertDialog-VIOMDLmx.js → DeleteAlertDialog-DEI0YDzP.js} +1 -1
  37. zenml/zen_server/dashboard/assets/{DialogItem-ClFCqxEp.js → DialogItem-CRCDpYU6.js} +1 -1
  38. zenml/zen_server/dashboard/assets/{Error-CQzjbDcN.js → Error-BG6f_WRd.js} +1 -1
  39. zenml/zen_server/dashboard/assets/{ExecutionStatus-CWreILP0.js → ExecutionStatus-BuhNAE9w.js} +1 -1
  40. zenml/zen_server/dashboard/assets/{Helpbox-CiKxG5_X.js → Helpbox-DtUG2Bf_.js} +1 -1
  41. zenml/zen_server/dashboard/assets/{Infobox-CGxFvqzi.js → Infobox-CSBRrM6r.js} +1 -1
  42. zenml/zen_server/dashboard/assets/{LeftSideMenu-DCsKdIjC.js → LeftSideMenu-DPsCCK3z.js} +1 -1
  43. zenml/zen_server/dashboard/assets/{NestedCollapsible-3M4llYtH.js → NestedCollapsible-CMuDIJlp.js} +1 -1
  44. zenml/zen_server/dashboard/assets/{NumberBox-C0mQktmV.js → NumberBox-DtCv7jh3.js} +1 -1
  45. zenml/zen_server/dashboard/assets/Pagination-CWnEpSpN.js +1 -0
  46. zenml/zen_server/dashboard/assets/{Partials-DSjkttlz.js → Partials-CfHD6OH5.js} +1 -1
  47. zenml/zen_server/dashboard/assets/{ProCta-Dm5cWKpS.js → ProCta-CNyp04C8.js} +1 -1
  48. zenml/zen_server/dashboard/assets/{ProviderIcon-DPwMR6nF.js → ProviderIcon-DHejyg7C.js} +1 -1
  49. zenml/zen_server/dashboard/assets/{ProviderRadio-DEDNRgAb.js → ProviderRadio-tGtie8Gc.js} +1 -1
  50. zenml/zen_server/dashboard/assets/RunsBody-mYwMcWWj.js +1 -0
  51. zenml/zen_server/dashboard/assets/SearchField-BtUi6cYl.js +1 -0
  52. zenml/zen_server/dashboard/assets/{SecretTooltip-CZTRnaCV.js → SecretTooltip-B5u1UsQ9.js} +1 -1
  53. zenml/zen_server/dashboard/assets/{SetPassword-BjNGDC5e.js → SetPassword-BmbgL_ed.js} +1 -1
  54. zenml/zen_server/dashboard/assets/{SheetHeader-CASpN2Lz.js → SheetHeader-DkH7aG9K.js} +1 -1
  55. zenml/zen_server/dashboard/assets/StackComponentList-Bi8BKqCu.js +1 -0
  56. zenml/zen_server/dashboard/assets/StackList-Cvxapo0p.js +1 -0
  57. zenml/zen_server/dashboard/assets/{StackName-ojLC6xdl.js → StackName-CFSZL8ec.js} +1 -1
  58. zenml/zen_server/dashboard/assets/Tabs-D4dv48ry.js +1 -0
  59. zenml/zen_server/dashboard/assets/{Tick-BPrWnNlN.js → Tick-Qquvr4P3.js} +1 -1
  60. zenml/zen_server/dashboard/assets/{UpdatePasswordSchemas-CNfKDo2Q.js → UpdatePasswordSchemas-D_DCETSO.js} +1 -1
  61. zenml/zen_server/dashboard/assets/{UsageReason-Cb-mpV8M.js → UsageReason-DhiUV1bu.js} +1 -1
  62. zenml/zen_server/dashboard/assets/{Wizard-Dg8Pmn5A.js → Wizard-BHvY75u_.js} +1 -1
  63. zenml/zen_server/dashboard/assets/{WizardFooter-BcNDIvlQ.js → WizardFooter-FQm8y-jP.js} +1 -1
  64. zenml/zen_server/dashboard/assets/{all-pipeline-runs-query-DCdax7I5.js → all-pipeline-runs-query-DpKw9WL9.js} +1 -1
  65. zenml/zen_server/dashboard/assets/{bulk-delete-C_kpIB9A.js → bulk-delete-CzYA--cC.js} +1 -1
  66. zenml/zen_server/dashboard/assets/{configuration-form-B2hmKGnF.js → configuration-form-DSoMMiPE.js} +1 -1
  67. zenml/zen_server/dashboard/assets/{constants-1EZZxtay.js → constants-DTfsIqHy.js} +1 -1
  68. zenml/zen_server/dashboard/assets/{create-stack-TKmMtrkQ.js → create-stack-BpZrmKDu.js} +1 -1
  69. zenml/zen_server/dashboard/assets/{delete-run-CCR9md_s.js → delete-run-BkyDsKQc.js} +1 -1
  70. zenml/zen_server/dashboard/assets/elk-worker.min-BdOC9sib.js +6263 -0
  71. zenml/zen_server/dashboard/assets/expand-full-BPiXpch2.js +1 -0
  72. zenml/zen_server/dashboard/assets/{form-DFJkaFDX.js → form-BgtamtJm.js} +1 -1
  73. zenml/zen_server/dashboard/assets/{form-schemas-CrznJVzA.js → form-schemas-dyDkAxXP.js} +1 -1
  74. zenml/zen_server/dashboard/assets/{index-BjUu1mP4.js → index-BBt0LDtR.js} +1 -1
  75. zenml/zen_server/dashboard/assets/index-BfNISy0X.css +1 -0
  76. zenml/zen_server/dashboard/assets/{index-BFqbGSck.js → index-BgEfQ3_G.js} +4 -4
  77. zenml/zen_server/dashboard/assets/{index-U992soPJ.js → index-eoDB_1XX.js} +1 -1
  78. zenml/zen_server/dashboard/assets/{layout-Do9YI4QX.js → layout-o1x87a3q.js} +1 -1
  79. zenml/zen_server/dashboard/assets/{login-mutation-D3tFP6Wm.js → login-mutation-C1hvP_cX.js} +1 -1
  80. zenml/zen_server/dashboard/assets/page-4xUZpMN0.js +31 -0
  81. zenml/zen_server/dashboard/assets/page-4zc4xPv2.js +2 -0
  82. zenml/zen_server/dashboard/assets/page-B0104V6C.js +1 -0
  83. zenml/zen_server/dashboard/assets/page-BNJsjvof.js +1 -0
  84. zenml/zen_server/dashboard/assets/{page-sJjNT9xA.js → page-BPQ66vR-.js} +1 -1
  85. zenml/zen_server/dashboard/assets/{page-CAJ8B0vb.js → page-BQgSZ2nH.js} +1 -1
  86. zenml/zen_server/dashboard/assets/page-BXl2ZX6J.js +1 -0
  87. zenml/zen_server/dashboard/assets/{page-CtiuMP_r.js → page-BxeZrG_t.js} +1 -1
  88. zenml/zen_server/dashboard/assets/{page-Cal6XQ4U.js → page-C2A-2Cj_.js} +1 -1
  89. zenml/zen_server/dashboard/assets/page-C2i-C7jv.js +1 -0
  90. zenml/zen_server/dashboard/assets/{page-DJIGaUQ9.js → page-C3JfJxuR.js} +1 -1
  91. zenml/zen_server/dashboard/assets/{page-ChGcZI_6.js → page-CESEqC2L.js} +1 -1
  92. zenml/zen_server/dashboard/assets/{page-DCcuPZ8P.js → page-CF8cTZ7l.js} +1 -1
  93. zenml/zen_server/dashboard/assets/page-CKjsimVu.js +1 -0
  94. zenml/zen_server/dashboard/assets/{page-DNjKHjnH.js → page-COLzBwff.js} +1 -1
  95. zenml/zen_server/dashboard/assets/page-COifg5fa.js +1 -0
  96. zenml/zen_server/dashboard/assets/{page-CnbIYE80.js → page-CQeJuA8T.js} +1 -1
  97. zenml/zen_server/dashboard/assets/{page-DKK6ulgy.js → page-CcjWEjre.js} +1 -1
  98. zenml/zen_server/dashboard/assets/{page-9RjCitFH.js → page-CefGLeWy.js} +1 -1
  99. zenml/zen_server/dashboard/assets/{page-DUKbOhaD.js → page-CfxpV3j4.js} +1 -1
  100. zenml/zen_server/dashboard/assets/{page-D9Hfx6GV.js → page-ClcUzawe.js} +1 -1
  101. zenml/zen_server/dashboard/assets/page-Ct_LB3zo.js +1 -0
  102. zenml/zen_server/dashboard/assets/page-D-ZWUMYY.js +1 -0
  103. zenml/zen_server/dashboard/assets/{page-CUaMMoPG.js → page-D-tJ_Y0a.js} +1 -1
  104. zenml/zen_server/dashboard/assets/{page-CdZCmszX.js → page-DHrvih9u.js} +1 -1
  105. zenml/zen_server/dashboard/assets/{page-DUK0Nd_1.js → page-DMhaHZDw.js} +1 -1
  106. zenml/zen_server/dashboard/assets/{page-CAUYrfui.js → page-DOCOmmKn.js} +1 -1
  107. zenml/zen_server/dashboard/assets/{page-akLcPcKw.js → page-DcQmxKLp.js} +1 -1
  108. zenml/zen_server/dashboard/assets/page-Dh4GRWw5.js +1 -0
  109. zenml/zen_server/dashboard/assets/{page-DwVPpCFg.js → page-DiHZK-1w.js} +1 -1
  110. zenml/zen_server/dashboard/assets/page-Dn7ZNapg.js +1 -0
  111. zenml/zen_server/dashboard/assets/page-Dy4vSQY7.js +1 -0
  112. zenml/zen_server/dashboard/assets/{page-CN7lkvXr.js → page-QrvWQwZb.js} +1 -1
  113. zenml/zen_server/dashboard/assets/{page-BrT0_zSJ.js → page-RF3Fup0q.js} +1 -1
  114. zenml/zen_server/dashboard/assets/page-WuvCrN47.js +1 -0
  115. zenml/zen_server/dashboard/assets/{page-Be3R2uYn.js → page-_WnHBI1F.js} +1 -1
  116. zenml/zen_server/dashboard/assets/{page-C210HcBA.js → page-ghjVNgVE.js} +1 -1
  117. zenml/zen_server/dashboard/assets/{page-BCrKmYIZ.js → page-iDsDiDXw.js} +1 -1
  118. zenml/zen_server/dashboard/assets/{page-ClvmVesa.js → page-rVhXI5ZO.js} +1 -1
  119. zenml/zen_server/dashboard/assets/{page-DEohTSz6.js → page-uxjMX8Iq.js} +1 -1
  120. zenml/zen_server/dashboard/assets/{persist-Dec_w7aB.js → persist-BsdEtCkd.js} +1 -1
  121. zenml/zen_server/dashboard/assets/{persist-DWMWVP-y.js → persist-CFPbMcJX.js} +1 -1
  122. zenml/zen_server/dashboard/assets/{resource-tyes-list-o2LXiMay.js → resource-tyes-list-79FqS3LY.js} +1 -1
  123. zenml/zen_server/dashboard/assets/{resource-type-tooltip-DwHrJstL.js → resource-type-tooltip-BL9ZTRKi.js} +1 -1
  124. zenml/zen_server/dashboard/assets/{service-connectors-DSEMwJ5A.js → service-connectors-Q8h7-_rG.js} +1 -1
  125. zenml/zen_server/dashboard/assets/{service-jxtvgks0.js → service-k-9Vsb30.js} +1 -1
  126. zenml/zen_server/dashboard/assets/{sharedSchema-BXzg0EZz.js → sharedSchema-C_HkejsG.js} +1 -1
  127. zenml/zen_server/dashboard/assets/{stack-detail-query-Cm0fsgo-.js → stack-detail-query-CNmVZ0Bo.js} +1 -1
  128. zenml/zen_server/dashboard/assets/{update-current-user-mutation-D5MjcQ6F.js → update-current-user-mutation-Ca-Lmwuj.js} +1 -1
  129. zenml/zen_server/dashboard/assets/{update-server-settings-mutation-CmnxdxiK.js → update-server-settings-mutation-Bwe3gUt4.js} +1 -1
  130. zenml/zen_server/dashboard/index.html +4 -4
  131. zenml/zen_stores/migrations/versions/0.83.1_release.py +23 -0
  132. zenml/zen_stores/rest_zen_store.py +147 -128
  133. zenml/zen_stores/sql_zen_store.py +27 -17
  134. {zenml_nightly-0.83.0.dev20250623.dist-info → zenml_nightly-0.83.1.dev20250625.dist-info}/METADATA +5 -9
  135. {zenml_nightly-0.83.0.dev20250623.dist-info → zenml_nightly-0.83.1.dev20250625.dist-info}/RECORD +138 -134
  136. zenml/zen_server/dashboard/assets/RunsBody-BRBn1e2O.js +0 -1
  137. zenml/zen_server/dashboard/assets/SearchField-DY6-UbRT.js +0 -1
  138. zenml/zen_server/dashboard/assets/StackComponentList-Be1pQt9m.js +0 -1
  139. zenml/zen_server/dashboard/assets/StackList-BdiR5DvR.js +0 -1
  140. zenml/zen_server/dashboard/assets/Tabs-DNSKblCM.js +0 -1
  141. zenml/zen_server/dashboard/assets/index-DuhuqTCI.css +0 -1
  142. zenml/zen_server/dashboard/assets/page-B0PsXWiT.js +0 -1
  143. zenml/zen_server/dashboard/assets/page-BcRI3-aR.js +0 -29
  144. zenml/zen_server/dashboard/assets/page-BgknnddT.js +0 -1
  145. zenml/zen_server/dashboard/assets/page-Bs3W2FDi.js +0 -1
  146. zenml/zen_server/dashboard/assets/page-C6KaiZ_W.js +0 -1
  147. zenml/zen_server/dashboard/assets/page-CHxVhF3x.js +0 -1
  148. zenml/zen_server/dashboard/assets/page-CktmtZ8Z.js +0 -1
  149. zenml/zen_server/dashboard/assets/page-CoXzjeEY.js +0 -1
  150. zenml/zen_server/dashboard/assets/page-D9iuB88h.js +0 -1
  151. zenml/zen_server/dashboard/assets/page-DYOucPtA.js +0 -1
  152. zenml/zen_server/dashboard/assets/page-DpqRelAy.js +0 -1
  153. zenml/zen_server/dashboard/assets/page-XURWnYZP.js +0 -1
  154. zenml/zen_server/dashboard/assets/page-abw-2oeW.js +0 -1
  155. zenml/zen_server/dashboard/assets/page-n9ejQ2V3.js +0 -2
  156. {zenml_nightly-0.83.0.dev20250623.dist-info → zenml_nightly-0.83.1.dev20250625.dist-info}/LICENSE +0 -0
  157. {zenml_nightly-0.83.0.dev20250623.dist-info → zenml_nightly-0.83.1.dev20250625.dist-info}/WHEEL +0 -0
  158. {zenml_nightly-0.83.0.dev20250623.dist-info → zenml_nightly-0.83.1.dev20250625.dist-info}/entry_points.txt +0 -0
@@ -15,7 +15,7 @@
15
15
 
16
16
  import itertools
17
17
  import os
18
- from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Type, cast
18
+ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Type, cast
19
19
  from uuid import UUID
20
20
 
21
21
  from databricks.sdk import WorkspaceClient as DatabricksClient
@@ -48,10 +48,12 @@ from zenml.io import fileio
48
48
  from zenml.logger import get_logger
49
49
  from zenml.metadata.metadata_types import MetadataType, Uri
50
50
  from zenml.models.v2.core.schedule import ScheduleResponse
51
+ from zenml.orchestrators import (
52
+ SubmissionResult,
53
+ WheeledOrchestrator,
54
+ )
51
55
  from zenml.orchestrators.utils import get_orchestrator_run_name
52
- from zenml.orchestrators.wheeled_orchestrator import WheeledOrchestrator
53
56
  from zenml.stack import StackValidator
54
- from zenml.utils import io_utils
55
57
  from zenml.utils.package_utils import clean_requirements
56
58
  from zenml.utils.pipeline_docker_image_builder import (
57
59
  PipelineDockerImageBuilder,
@@ -67,20 +69,13 @@ logger = get_logger(__name__)
67
69
  ZENML_STEP_DEFAULT_ENTRYPOINT_COMMAND = "entrypoint.main"
68
70
  DATABRICKS_WHEELS_DIRECTORY_PREFIX = "dbfs:/FileStore/zenml"
69
71
  DATABRICKS_LOCAL_FILESYSTEM_PREFIX = "file:/"
70
- DATABRICKS_CLUSTER_DEFAULT_NAME = "zenml-databricks-cluster"
71
72
  DATABRICKS_SPARK_DEFAULT_VERSION = "15.3.x-scala2.12"
72
73
  DATABRICKS_JOB_ID_PARAMETER_REFERENCE = "{{job.id}}"
73
74
  DATABRICKS_ZENML_DEFAULT_CUSTOM_REPOSITORY_PATH = "."
74
75
 
75
76
 
76
77
  class DatabricksOrchestrator(WheeledOrchestrator):
77
- """Base class for Orchestrator responsible for running pipelines remotely in a VM.
78
-
79
- This orchestrator does not support running on a schedule.
80
- """
81
-
82
- # The default instance type to use if none is specified in settings
83
- DEFAULT_INSTANCE_TYPE: Optional[str] = None
78
+ """Databricks orchestrator."""
84
79
 
85
80
  @property
86
81
  def validator(self) -> Optional[StackValidator]:
@@ -168,69 +163,39 @@ class DatabricksOrchestrator(WheeledOrchestrator):
168
163
  f"{ENV_ZENML_DATABRICKS_ORCHESTRATOR_RUN_ID}."
169
164
  )
170
165
 
171
- @property
172
- def root_directory(self) -> str:
173
- """Path to the root directory for all files concerning this orchestrator.
174
-
175
- Returns:
176
- Path to the root directory.
177
- """
178
- return os.path.join(
179
- io_utils.get_global_config_directory(),
180
- "databricks",
181
- str(self.id),
182
- )
183
-
184
- @property
185
- def pipeline_directory(self) -> str:
186
- """Returns path to a directory in which the kubeflow pipeline files are stored.
187
-
188
- Returns:
189
- Path to the pipeline directory.
190
- """
191
- return os.path.join(self.root_directory, "pipelines")
192
-
193
166
  def setup_credentials(self) -> None:
194
167
  """Set up credentials for the orchestrator."""
195
168
  connector = self.get_connector()
196
169
  assert connector is not None
197
170
  connector.configure_local_client()
198
171
 
199
- def prepare_or_run_pipeline(
172
+ def submit_pipeline(
200
173
  self,
201
174
  deployment: "PipelineDeploymentResponse",
202
175
  stack: "Stack",
203
176
  environment: Dict[str, str],
204
177
  placeholder_run: Optional["PipelineRunResponse"] = None,
205
- ) -> Any:
206
- """Creates a wheel and uploads the pipeline to Databricks.
207
-
208
- This functions as an intermediary representation of the pipeline which
209
- is then deployed to the kubeflow pipelines instance.
210
-
211
- How it works:
212
- -------------
213
- Before this method is called the `prepare_pipeline_deployment()`
214
- method builds a docker image that contains the code for the
215
- pipeline, all steps the context around these files.
178
+ ) -> Optional[SubmissionResult]:
179
+ """Submits a pipeline to the orchestrator.
216
180
 
217
- Based on this docker image a callable is created which builds
218
- task for each step (`_construct_databricks_pipeline`).
219
- To do this the entrypoint of the docker image is configured to
220
- run the correct step within the docker image. The dependencies
221
- between these task are then also configured onto each
222
- task by pointing at the downstream steps.
181
+ This method should only submit the pipeline and not wait for it to
182
+ complete. If the orchestrator is configured to wait for the pipeline run
183
+ to complete, a function that waits for the pipeline run to complete can
184
+ be passed as part of the submission result.
223
185
 
224
186
  Args:
225
- deployment: The pipeline deployment to prepare or run.
187
+ deployment: The pipeline deployment to submit.
226
188
  stack: The stack the pipeline will run on.
227
189
  environment: Environment variables to set in the orchestration
228
- environment.
190
+ environment. These don't need to be set if running locally.
229
191
  placeholder_run: An optional placeholder run for the deployment.
230
192
 
231
193
  Raises:
232
194
  ValueError: If the schedule is not set or if the cron expression
233
195
  is not set.
196
+
197
+ Returns:
198
+ Optional submission result.
234
199
  """
235
200
  settings = cast(
236
201
  DatabricksOrchestratorSettings, self.get_settings(deployment)
@@ -339,11 +304,6 @@ class DatabricksOrchestrator(WheeledOrchestrator):
339
304
  orchestrator_run_name = get_orchestrator_run_name(
340
305
  pipeline_name=deployment.pipeline_configuration.name
341
306
  )
342
- # Get a filepath to use to save the finished yaml to
343
- fileio.makedirs(self.pipeline_directory)
344
- pipeline_file_path = os.path.join(
345
- self.pipeline_directory, f"{orchestrator_run_name}.yaml"
346
- )
347
307
 
348
308
  # Copy the repository to a temporary directory and add a setup.py file
349
309
  repository_temp_dir = (
@@ -382,11 +342,6 @@ class DatabricksOrchestrator(WheeledOrchestrator):
382
342
 
383
343
  fileio.rmtree(repository_temp_dir)
384
344
 
385
- logger.info(
386
- "Writing Databricks workflow definition to `%s`.",
387
- pipeline_file_path,
388
- )
389
-
390
345
  # using the databricks client uploads the pipeline to databricks
391
346
  job_cluster_key = self.sanitize_name(f"{deployment_id}")
392
347
  self._upload_and_run_pipeline(
@@ -399,6 +354,7 @@ class DatabricksOrchestrator(WheeledOrchestrator):
399
354
  job_cluster_key=job_cluster_key,
400
355
  schedule=deployment.schedule,
401
356
  )
357
+ return None
402
358
 
403
359
  def _upload_and_run_pipeline(
404
360
  self,
@@ -37,7 +37,6 @@ from typing import (
37
37
  TYPE_CHECKING,
38
38
  Any,
39
39
  Dict,
40
- Iterator,
41
40
  List,
42
41
  Optional,
43
42
  Tuple,
@@ -89,7 +88,7 @@ from zenml.integrations.gcp.vertex_custom_job_parameters import (
89
88
  from zenml.io import fileio
90
89
  from zenml.logger import get_logger
91
90
  from zenml.metadata.metadata_types import MetadataType, Uri
92
- from zenml.orchestrators import ContainerizedOrchestrator
91
+ from zenml.orchestrators import ContainerizedOrchestrator, SubmissionResult
93
92
  from zenml.orchestrators.utils import get_orchestrator_run_name
94
93
  from zenml.stack.stack_validator import StackValidator
95
94
  from zenml.utils.io_utils import get_global_config_directory
@@ -402,57 +401,29 @@ class VertexOrchestrator(ContainerizedOrchestrator, GoogleCredentialsMixin):
402
401
 
403
402
  return custom_job_component
404
403
 
405
- def prepare_or_run_pipeline(
404
+ def submit_pipeline(
406
405
  self,
407
406
  deployment: "PipelineDeploymentResponse",
408
407
  stack: "Stack",
409
408
  environment: Dict[str, str],
410
409
  placeholder_run: Optional["PipelineRunResponse"] = None,
411
- ) -> Iterator[Dict[str, MetadataType]]:
412
- """Creates a KFP JSON pipeline.
410
+ ) -> Optional[SubmissionResult]:
411
+ """Submits a pipeline to the orchestrator.
413
412
 
414
- # noqa: DAR402
415
-
416
- This is an intermediary representation of the pipeline which is then
417
- deployed to Vertex AI Pipelines service.
418
-
419
- How it works:
420
- -------------
421
- Before this method is called the `prepare_pipeline_deployment()` method
422
- builds a Docker image that contains the code for the pipeline, all steps
423
- the context around these files.
424
-
425
- Based on this Docker image a callable is created which builds
426
- container_ops for each step (`_construct_kfp_pipeline`). The function
427
- `kfp.components.load_component_from_text` is used to create the
428
- `ContainerOp`, because using the `dsl.ContainerOp` class directly is
429
- deprecated when using the Kubeflow SDK v2. The step entrypoint command
430
- with the entrypoint arguments is the command that will be executed by
431
- the container created using the previously created Docker image.
432
-
433
- This callable is then compiled into a JSON file that is used as the
434
- intermediary representation of the Kubeflow pipeline.
435
-
436
- This file then is submitted to the Vertex AI Pipelines service for
437
- execution.
413
+ This method should only submit the pipeline and not wait for it to
414
+ complete. If the orchestrator is configured to wait for the pipeline run
415
+ to complete, a function that waits for the pipeline run to complete can
416
+ be passed as part of the submission result.
438
417
 
439
418
  Args:
440
- deployment: The pipeline deployment to prepare or run.
419
+ deployment: The pipeline deployment to submit.
441
420
  stack: The stack the pipeline will run on.
442
421
  environment: Environment variables to set in the orchestration
443
- environment.
422
+ environment. These don't need to be set if running locally.
444
423
  placeholder_run: An optional placeholder run for the deployment.
445
424
 
446
- Raises:
447
- ValueError: If the attribute `pipeline_root` is not set, and it
448
- can be not generated using the path of the artifact store in the
449
- stack because it is not a
450
- `zenml.integrations.gcp.artifact_store.GCPArtifactStore`. Also gets
451
- raised if attempting to schedule pipeline run without using the
452
- `zenml.integrations.gcp.artifact_store.GCPArtifactStore`.
453
-
454
- Yields:
455
- A dictionary of metadata related to the pipeline run.
425
+ Returns:
426
+ Optional submission result.
456
427
  """
457
428
  orchestrator_run_name = get_orchestrator_run_name(
458
429
  pipeline_name=deployment.pipeline_configuration.name
@@ -651,16 +622,13 @@ class VertexOrchestrator(ContainerizedOrchestrator, GoogleCredentialsMixin):
651
622
  VertexOrchestratorSettings, self.get_settings(deployment)
652
623
  )
653
624
 
654
- # Using the Google Cloud AIPlatform client, upload and execute the
655
- # pipeline on the Vertex AI Pipelines service.
656
- if metadata := self._upload_and_run_pipeline(
625
+ return self._upload_and_run_pipeline(
657
626
  pipeline_name=deployment.pipeline_configuration.name,
658
627
  pipeline_file_path=pipeline_file_path,
659
628
  run_name=orchestrator_run_name,
660
629
  settings=settings,
661
630
  schedule=deployment.schedule,
662
- ):
663
- yield from metadata
631
+ )
664
632
 
665
633
  def _upload_and_run_pipeline(
666
634
  self,
@@ -669,7 +637,7 @@ class VertexOrchestrator(ContainerizedOrchestrator, GoogleCredentialsMixin):
669
637
  run_name: str,
670
638
  settings: VertexOrchestratorSettings,
671
639
  schedule: Optional["ScheduleResponse"] = None,
672
- ) -> Iterator[Dict[str, MetadataType]]:
640
+ ) -> Optional[SubmissionResult]:
673
641
  """Uploads and run the pipeline on the Vertex AI Pipelines service.
674
642
 
675
643
  Args:
@@ -684,8 +652,8 @@ class VertexOrchestrator(ContainerizedOrchestrator, GoogleCredentialsMixin):
684
652
  RuntimeError: If the Vertex Orchestrator fails to provision or any
685
653
  other Runtime errors.
686
654
 
687
- Yields:
688
- A dictionary of metadata related to the pipeline run.
655
+ Returns:
656
+ Optional submission result.
689
657
  """
690
658
  # We have to replace the hyphens in the run name with underscores
691
659
  # and lower case the string, because the Vertex AI Pipelines service
@@ -772,6 +740,7 @@ class VertexOrchestrator(ContainerizedOrchestrator, GoogleCredentialsMixin):
772
740
  service_account=self.config.workload_service_account,
773
741
  network=self.config.network,
774
742
  )
743
+ return None
775
744
 
776
745
  else:
777
746
  logger.info(
@@ -793,17 +762,23 @@ class VertexOrchestrator(ContainerizedOrchestrator, GoogleCredentialsMixin):
793
762
  run._dashboard_uri(),
794
763
  )
795
764
 
796
- # Yield metadata based on the generated job object
797
- yield from self.compute_metadata(run)
765
+ _wait_for_completion = None
798
766
 
799
767
  if settings.synchronous:
800
- logger.info(
801
- "Waiting for the Vertex AI Pipelines job to finish..."
802
- )
803
- run.wait()
804
- logger.info(
805
- "Vertex AI Pipelines job completed successfully."
806
- )
768
+
769
+ def _wait_for_completion() -> None:
770
+ logger.info(
771
+ "Waiting for the Vertex AI Pipelines job to finish..."
772
+ )
773
+ run.wait()
774
+ logger.info(
775
+ "Vertex AI Pipelines job completed successfully."
776
+ )
777
+
778
+ return SubmissionResult(
779
+ metadata=self.compute_metadata(run),
780
+ wait_for_completion=_wait_for_completion,
781
+ )
807
782
 
808
783
  except google_exceptions.ClientError as e:
809
784
  logger.error("Failed to create the Vertex AI Pipelines job: %s", e)
@@ -993,13 +968,13 @@ class VertexOrchestrator(ContainerizedOrchestrator, GoogleCredentialsMixin):
993
968
 
994
969
  def compute_metadata(
995
970
  self, job: aiplatform.PipelineJob
996
- ) -> Iterator[Dict[str, MetadataType]]:
971
+ ) -> Dict[str, MetadataType]:
997
972
  """Generate run metadata based on the corresponding Vertex PipelineJob.
998
973
 
999
974
  Args:
1000
975
  job: The corresponding PipelineJob object.
1001
976
 
1002
- Yields:
977
+ Returns:
1003
978
  A dictionary of metadata related to the pipeline run.
1004
979
  """
1005
980
  metadata: Dict[str, MetadataType] = {}
@@ -1016,7 +991,7 @@ class VertexOrchestrator(ContainerizedOrchestrator, GoogleCredentialsMixin):
1016
991
  if logs_url := self._compute_orchestrator_logs_url(job):
1017
992
  metadata[METADATA_ORCHESTRATOR_LOGS_URL] = Uri(logs_url)
1018
993
 
1019
- yield metadata
994
+ return metadata
1020
995
 
1021
996
  @staticmethod
1022
997
  def _compute_orchestrator_url(
@@ -30,9 +30,7 @@ from zenml.integrations.hyperai.flavors.hyperai_orchestrator_flavor import (
30
30
  HyperAIOrchestratorSettings,
31
31
  )
32
32
  from zenml.logger import get_logger
33
- from zenml.orchestrators import (
34
- ContainerizedOrchestrator,
35
- )
33
+ from zenml.orchestrators import ContainerizedOrchestrator, SubmissionResult
36
34
  from zenml.stack import Stack, StackValidator
37
35
 
38
36
  if TYPE_CHECKING:
@@ -159,14 +157,19 @@ class HyperAIOrchestrator(ContainerizedOrchestrator):
159
157
  f"Failed to write {description} to HyperAI instance. Does the user have permissions to write?"
160
158
  )
161
159
 
162
- def prepare_or_run_pipeline(
160
+ def submit_pipeline(
163
161
  self,
164
162
  deployment: "PipelineDeploymentResponse",
165
163
  stack: "Stack",
166
164
  environment: Dict[str, str],
167
165
  placeholder_run: Optional["PipelineRunResponse"] = None,
168
- ) -> Any:
169
- """Sequentially runs all pipeline steps in Docker containers.
166
+ ) -> Optional[SubmissionResult]:
167
+ """Submits a pipeline to the orchestrator.
168
+
169
+ This method should only submit the pipeline and not wait for it to
170
+ complete. If the orchestrator is configured to wait for the pipeline run
171
+ to complete, a function that waits for the pipeline run to complete can
172
+ be passed as part of the submission result.
170
173
 
171
174
  Assumes that:
172
175
  - A HyperAI (hyperai.ai) instance is running on the configured IP address.
@@ -179,26 +182,25 @@ class HyperAIOrchestrator(ContainerizedOrchestrator):
179
182
  orchestrator.
180
183
 
181
184
  Args:
182
- deployment: The pipeline deployment to prepare or run.
185
+ deployment: The pipeline deployment to submit.
183
186
  stack: The stack the pipeline will run on.
184
187
  environment: Environment variables to set in the orchestration
185
- environment.
188
+ environment. These don't need to be set if running locally.
186
189
  placeholder_run: An optional placeholder run for the deployment.
187
190
 
188
191
  Raises:
189
- RuntimeError: If a step fails.
192
+ RuntimeError: If running the pipeline fails.
193
+
194
+ Returns:
195
+ Optional submission result.
190
196
  """
191
197
  from zenml.integrations.hyperai.service_connectors.hyperai_service_connector import (
192
198
  HyperAIServiceConnector,
193
199
  )
194
200
 
195
- # Basic Docker Compose definition
196
201
  compose_definition: Dict[str, Any] = {"version": "3", "services": {}}
197
-
198
- # Get deployment id
199
202
  deployment_id = deployment.id
200
203
 
201
- # Set environment
202
204
  os.environ[ENV_ZENML_HYPERAI_RUN_ID] = str(deployment_id)
203
205
  environment[ENV_ZENML_HYPERAI_RUN_ID] = str(deployment_id)
204
206
 
@@ -208,12 +210,9 @@ class HyperAIOrchestrator(ContainerizedOrchestrator):
208
210
  # Get image
209
211
  image = self.get_image(deployment=deployment, step_name=step_name)
210
212
 
211
- # Get settings
212
213
  step_settings = cast(
213
214
  HyperAIOrchestratorSettings, self.get_settings(step)
214
215
  )
215
-
216
- # Define container name as combination between deployment id and step name
217
216
  container_name = f"{deployment_id}-{step_name}"
218
217
 
219
218
  # Make Compose service definition for step
@@ -246,10 +245,9 @@ class HyperAIOrchestrator(ContainerizedOrchestrator):
246
245
  }
247
246
  }
248
247
 
249
- # Depending on whether it is a scheduled or a realtime pipeline, add
250
- # potential .env file to service definition for deployment ID override.
251
248
  if deployment.schedule:
252
- # drop ZENML_HYPERAI_ORCHESTRATOR_RUN_ID from environment but only if it is set
249
+ # If running on a schedule, the run ID is set dynamically via
250
+ # the .env file.
253
251
  if ENV_ZENML_HYPERAI_RUN_ID in environment:
254
252
  del environment[ENV_ZENML_HYPERAI_RUN_ID]
255
253
  compose_definition["services"][container_name]["env_file"] = [
@@ -282,15 +280,12 @@ class HyperAIOrchestrator(ContainerizedOrchestrator):
282
280
  }
283
281
  )
284
282
 
285
- # Convert into yaml
286
- logger.info("Finalizing Docker Compose definition.")
287
283
  compose_definition_yaml: str = yaml.dump(compose_definition)
288
284
 
289
285
  # Connect to configured HyperAI instance
290
286
  logger.info(
291
287
  "Connecting to HyperAI instance and placing Docker Compose file."
292
288
  )
293
- paramiko_client: paramiko.SSHClient
294
289
  if connector := self.get_connector():
295
290
  paramiko_client = connector.connect()
296
291
  if paramiko_client is None:
@@ -510,3 +505,5 @@ class HyperAIOrchestrator(ContainerizedOrchestrator):
510
505
  raise RuntimeError(
511
506
  "A cron expression or start time is required for scheduled pipelines."
512
507
  )
508
+
509
+ return None
@@ -69,7 +69,7 @@ from zenml.integrations.kubeflow.flavors.kubeflow_orchestrator_flavor import (
69
69
  from zenml.io import fileio
70
70
  from zenml.logger import get_logger
71
71
  from zenml.metadata.metadata_types import MetadataType, Uri
72
- from zenml.orchestrators import ContainerizedOrchestrator
72
+ from zenml.orchestrators import ContainerizedOrchestrator, SubmissionResult
73
73
  from zenml.orchestrators.utils import get_orchestrator_run_name
74
74
  from zenml.stack import StackValidator
75
75
  from zenml.utils import io_utils, settings_utils, yaml_utils
@@ -466,47 +466,33 @@ class KubeflowOrchestrator(ContainerizedOrchestrator):
466
466
 
467
467
  return pipeline_task
468
468
 
469
- def prepare_or_run_pipeline(
469
+ def submit_pipeline(
470
470
  self,
471
471
  deployment: "PipelineDeploymentResponse",
472
472
  stack: "Stack",
473
473
  environment: Dict[str, str],
474
474
  placeholder_run: Optional["PipelineRunResponse"] = None,
475
- ) -> Any:
476
- """Creates a kfp yaml file.
475
+ ) -> Optional[SubmissionResult]:
476
+ """Submits a pipeline to the orchestrator.
477
477
 
478
- This functions as an intermediary representation of the pipeline which
479
- is then deployed to the kubeflow pipelines instance.
480
-
481
- How it works:
482
- -------------
483
- Before this method is called the `prepare_pipeline_deployment()`
484
- method builds a docker image that contains the code for the
485
- pipeline, all steps the context around these files.
486
-
487
- Based on this docker image a callable is created which builds
488
- container_ops for each step (`_construct_kfp_pipeline`).
489
- To do this the entrypoint of the docker image is configured to
490
- run the correct step within the docker image. The dependencies
491
- between these container_ops are then also configured onto each
492
- container_op by pointing at the downstream steps.
493
-
494
- This callable is then compiled into a kfp yaml file that is used as
495
- the intermediary representation of the kubeflow pipeline.
496
-
497
- This file, together with some metadata, runtime configurations is
498
- then uploaded into the kubeflow pipelines cluster for execution.
478
+ This method should only submit the pipeline and not wait for it to
479
+ complete. If the orchestrator is configured to wait for the pipeline run
480
+ to complete, a function that waits for the pipeline run to complete can
481
+ be passed as part of the submission result.
499
482
 
500
483
  Args:
501
- deployment: The pipeline deployment to prepare or run.
484
+ deployment: The pipeline deployment to submit.
502
485
  stack: The stack the pipeline will run on.
503
486
  environment: Environment variables to set in the orchestration
504
- environment.
487
+ environment. These don't need to be set if running locally.
505
488
  placeholder_run: An optional placeholder run for the deployment.
506
489
 
507
490
  Raises:
508
491
  RuntimeError: If trying to run a pipeline in a notebook
509
492
  environment.
493
+
494
+ Returns:
495
+ Optional submission result.
510
496
  """
511
497
  # First check whether the code running in a notebook
512
498
  if Environment.in_notebook():
@@ -672,7 +658,7 @@ class KubeflowOrchestrator(ContainerizedOrchestrator):
672
658
 
673
659
  # using the kfp client uploads the pipeline to kubeflow pipelines and
674
660
  # runs it there
675
- self._upload_and_run_pipeline(
661
+ return self._upload_and_run_pipeline(
676
662
  deployment=deployment,
677
663
  pipeline_file_path=pipeline_file_path,
678
664
  run_name=orchestrator_run_name,
@@ -683,7 +669,7 @@ class KubeflowOrchestrator(ContainerizedOrchestrator):
683
669
  deployment: "PipelineDeploymentResponse",
684
670
  pipeline_file_path: str,
685
671
  run_name: str,
686
- ) -> None:
672
+ ) -> Optional[SubmissionResult]:
687
673
  """Tries to upload and run a KFP pipeline.
688
674
 
689
675
  Args:
@@ -693,6 +679,9 @@ class KubeflowOrchestrator(ContainerizedOrchestrator):
693
679
 
694
680
  Raises:
695
681
  RuntimeError: If Kubeflow API returns an error.
682
+
683
+ Returns:
684
+ Optional submission result.
696
685
  """
697
686
  pipeline_name = deployment.pipeline_configuration.name
698
687
  settings = cast(
@@ -788,8 +777,14 @@ class KubeflowOrchestrator(ContainerizedOrchestrator):
788
777
  )
789
778
 
790
779
  if settings.synchronous:
791
- client.wait_for_run_completion(
792
- run_id=result.run_id, timeout=settings.timeout
780
+
781
+ def _wait_for_completion() -> None:
782
+ client.wait_for_run_completion(
783
+ run_id=result.run_id, timeout=settings.timeout
784
+ )
785
+
786
+ return SubmissionResult(
787
+ wait_for_completion=_wait_for_completion
793
788
  )
794
789
  except urllib3.exceptions.HTTPError as error:
795
790
  if kubernetes_context:
@@ -811,6 +806,8 @@ class KubeflowOrchestrator(ContainerizedOrchestrator):
811
806
  f"Failed to upload Kubeflow pipeline: {error}. {msg}",
812
807
  )
813
808
 
809
+ return None
810
+
814
811
  def get_orchestrator_run_id(self) -> str:
815
812
  """Returns the active orchestrator run id.
816
813