zenml-nightly 0.83.1.dev20250627__py3-none-any.whl → 0.83.1.dev20250628__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
zenml/VERSION CHANGED
@@ -1 +1 @@
1
- 0.83.1.dev20250627
1
+ 0.83.1.dev20250628
@@ -46,6 +46,9 @@ from kubernetes import client as k8s_client
46
46
  from kubernetes import config as k8s_config
47
47
 
48
48
  from zenml.config.base_settings import BaseSettings
49
+ from zenml.constants import (
50
+ METADATA_ORCHESTRATOR_RUN_ID,
51
+ )
49
52
  from zenml.enums import StackComponentType
50
53
  from zenml.integrations.kubernetes.flavors.kubernetes_orchestrator_flavor import (
51
54
  KubernetesOrchestratorConfig,
@@ -61,6 +64,7 @@ from zenml.integrations.kubernetes.orchestrators.manifest_utils import (
61
64
  )
62
65
  from zenml.integrations.kubernetes.pod_settings import KubernetesPodSettings
63
66
  from zenml.logger import get_logger
67
+ from zenml.metadata.metadata_types import MetadataType
64
68
  from zenml.orchestrators import ContainerizedOrchestrator, SubmissionResult
65
69
  from zenml.orchestrators.utils import get_orchestrator_run_name
66
70
  from zenml.stack import StackValidator
@@ -467,9 +471,7 @@ class KubernetesOrchestrator(ContainerizedOrchestrator):
467
471
  # This will internally also build the command/args for all step pods.
468
472
  command = KubernetesOrchestratorEntrypointConfiguration.get_entrypoint_command()
469
473
  args = KubernetesOrchestratorEntrypointConfiguration.get_entrypoint_arguments(
470
- run_name=orchestrator_run_name,
471
474
  deployment_id=deployment.id,
472
- kubernetes_namespace=self.config.kubernetes_namespace,
473
475
  run_id=placeholder_run.id if placeholder_run else None,
474
476
  )
475
477
 
@@ -508,6 +510,18 @@ class KubernetesOrchestrator(ContainerizedOrchestrator):
508
510
  }
509
511
  )
510
512
 
513
+ orchestrator_pod_labels = {
514
+ "pipeline": kube_utils.sanitize_label(pipeline_name),
515
+ }
516
+
517
+ if placeholder_run:
518
+ orchestrator_pod_labels["run_id"] = kube_utils.sanitize_label(
519
+ str(placeholder_run.id)
520
+ )
521
+ orchestrator_pod_labels["run_name"] = kube_utils.sanitize_label(
522
+ str(placeholder_run.name)
523
+ )
524
+
511
525
  # Schedule as CRON job if CRON schedule is given.
512
526
  if deployment.schedule:
513
527
  if not deployment.schedule.cron_expression:
@@ -519,9 +533,7 @@ class KubernetesOrchestrator(ContainerizedOrchestrator):
519
533
  cron_expression = deployment.schedule.cron_expression
520
534
  cron_job_manifest = build_cron_job_manifest(
521
535
  cron_expression=cron_expression,
522
- run_name=orchestrator_run_name,
523
536
  pod_name=pod_name,
524
- pipeline_name=pipeline_name,
525
537
  image_name=image,
526
538
  command=command,
527
539
  args=args,
@@ -533,6 +545,7 @@ class KubernetesOrchestrator(ContainerizedOrchestrator):
533
545
  successful_jobs_history_limit=settings.successful_jobs_history_limit,
534
546
  failed_jobs_history_limit=settings.failed_jobs_history_limit,
535
547
  ttl_seconds_after_finished=settings.ttl_seconds_after_finished,
548
+ labels=orchestrator_pod_labels,
536
549
  )
537
550
 
538
551
  self._k8s_batch_api.create_namespaced_cron_job(
@@ -547,9 +560,7 @@ class KubernetesOrchestrator(ContainerizedOrchestrator):
547
560
  else:
548
561
  # Create and run the orchestrator pod.
549
562
  pod_manifest = build_pod_manifest(
550
- run_name=orchestrator_run_name,
551
563
  pod_name=pod_name,
552
- pipeline_name=pipeline_name,
553
564
  image_name=image,
554
565
  command=command,
555
566
  args=args,
@@ -557,6 +568,7 @@ class KubernetesOrchestrator(ContainerizedOrchestrator):
557
568
  pod_settings=orchestrator_pod_settings,
558
569
  service_account_name=service_account_name,
559
570
  env=environment,
571
+ labels=orchestrator_pod_labels,
560
572
  mount_local_stores=self.config.is_local,
561
573
  )
562
574
 
@@ -572,6 +584,11 @@ class KubernetesOrchestrator(ContainerizedOrchestrator):
572
584
  startup_timeout=settings.pod_startup_timeout,
573
585
  )
574
586
 
587
+ metadata: Dict[str, MetadataType] = {
588
+ METADATA_ORCHESTRATOR_RUN_ID: pod_name,
589
+ }
590
+
591
+ # Wait for the orchestrator pod to finish and stream logs.
575
592
  if settings.synchronous:
576
593
 
577
594
  def _wait_for_run_to_finish() -> None:
@@ -588,7 +605,8 @@ class KubernetesOrchestrator(ContainerizedOrchestrator):
588
605
  )
589
606
 
590
607
  return SubmissionResult(
591
- wait_for_completion=_wait_for_run_to_finish
608
+ metadata=metadata,
609
+ wait_for_completion=_wait_for_run_to_finish,
592
610
  )
593
611
  else:
594
612
  logger.info(
@@ -597,7 +615,9 @@ class KubernetesOrchestrator(ContainerizedOrchestrator):
597
615
  f"Run the following command to inspect the logs: "
598
616
  f"`kubectl logs {pod_name} -n {self.config.kubernetes_namespace}`."
599
617
  )
600
- return None
618
+ return SubmissionResult(
619
+ metadata=metadata,
620
+ )
601
621
 
602
622
  def _get_service_account_name(
603
623
  self, settings: KubernetesOrchestratorSettings
@@ -642,3 +662,18 @@ class KubernetesOrchestrator(ContainerizedOrchestrator):
642
662
  "Unable to read run id from environment variable "
643
663
  f"{ENV_ZENML_KUBERNETES_RUN_ID}."
644
664
  )
665
+
666
+ def get_pipeline_run_metadata(
667
+ self, run_id: UUID
668
+ ) -> Dict[str, "MetadataType"]:
669
+ """Get general component-specific metadata for a pipeline run.
670
+
671
+ Args:
672
+ run_id: The ID of the pipeline run.
673
+
674
+ Returns:
675
+ A dictionary of metadata.
676
+ """
677
+ return {
678
+ METADATA_ORCHESTRATOR_RUN_ID: self.get_orchestrator_run_id(),
679
+ }
@@ -15,8 +15,7 @@
15
15
 
16
16
  import argparse
17
17
  import socket
18
- from typing import Any, Dict, cast
19
- from uuid import UUID
18
+ from typing import Callable, Dict, Optional, cast
20
19
 
21
20
  from kubernetes import client as k8s_client
22
21
 
@@ -41,10 +40,16 @@ from zenml.integrations.kubernetes.orchestrators.manifest_utils import (
41
40
  from zenml.logger import get_logger
42
41
  from zenml.orchestrators import publish_utils
43
42
  from zenml.orchestrators.dag_runner import NodeStatus, ThreadedDagRunner
43
+ from zenml.orchestrators.step_run_utils import (
44
+ StepRunRequestFactory,
45
+ fetch_step_runs_by_names,
46
+ publish_cached_step_run,
47
+ )
44
48
  from zenml.orchestrators.utils import (
45
49
  get_config_environment_vars,
46
50
  get_orchestrator_run_name,
47
51
  )
52
+ from zenml.pipelines.run_utils import create_placeholder_run
48
53
 
49
54
  logger = get_logger(__name__)
50
55
 
@@ -56,9 +61,7 @@ def parse_args() -> argparse.Namespace:
56
61
  Parsed args.
57
62
  """
58
63
  parser = argparse.ArgumentParser()
59
- parser.add_argument("--run_name", type=str, required=True)
60
64
  parser.add_argument("--deployment_id", type=str, required=True)
61
- parser.add_argument("--kubernetes_namespace", type=str, required=True)
62
65
  parser.add_argument("--run_id", type=str, required=False)
63
66
  return parser.parse_args()
64
67
 
@@ -68,7 +71,6 @@ def main() -> None:
68
71
  # Log to the container's stdout so it can be streamed by the client.
69
72
  logger.info("Kubernetes orchestrator pod started.")
70
73
 
71
- # Parse / extract args.
72
74
  args = parse_args()
73
75
 
74
76
  orchestrator_pod_name = socket.gethostname()
@@ -77,6 +79,7 @@ def main() -> None:
77
79
  active_stack = client.active_stack
78
80
  orchestrator = active_stack.orchestrator
79
81
  assert isinstance(orchestrator, KubernetesOrchestrator)
82
+ namespace = orchestrator.config.kubernetes_namespace
80
83
 
81
84
  deployment = client.get_deployment(args.deployment_id)
82
85
  pipeline_settings = cast(
@@ -86,20 +89,6 @@ def main() -> None:
86
89
 
87
90
  step_command = StepEntrypointConfiguration.get_entrypoint_command()
88
91
 
89
- if args.run_id and not pipeline_settings.prevent_orchestrator_pod_caching:
90
- from zenml.orchestrators import cache_utils
91
-
92
- run_required = (
93
- cache_utils.create_cached_step_runs_and_prune_deployment(
94
- deployment=deployment,
95
- pipeline_run=client.get_pipeline_run(args.run_id),
96
- stack=active_stack,
97
- )
98
- )
99
-
100
- if not run_required:
101
- return
102
-
103
92
  mount_local_stores = active_stack.orchestrator.config.is_local
104
93
 
105
94
  # Get a Kubernetes client from the active Kubernetes orchestrator, but
@@ -115,7 +104,7 @@ def main() -> None:
115
104
  owner_references = kube_utils.get_pod_owner_references(
116
105
  core_api=core_api,
117
106
  pod_name=orchestrator_pod_name,
118
- namespace=args.kubernetes_namespace,
107
+ namespace=namespace,
119
108
  )
120
109
  except Exception as e:
121
110
  logger.warning(f"Failed to get pod owner references: {str(e)}")
@@ -126,6 +115,62 @@ def main() -> None:
126
115
  for owner_reference in owner_references:
127
116
  owner_reference.controller = False
128
117
 
118
+ if args.run_id:
119
+ pipeline_run = client.get_pipeline_run(args.run_id)
120
+ else:
121
+ pipeline_run = create_placeholder_run(
122
+ deployment=deployment,
123
+ orchestrator_run_id=orchestrator_pod_name,
124
+ )
125
+
126
+ pre_step_run: Optional[Callable[[str], bool]] = None
127
+
128
+ if not pipeline_settings.prevent_orchestrator_pod_caching:
129
+ step_run_request_factory = StepRunRequestFactory(
130
+ deployment=deployment,
131
+ pipeline_run=pipeline_run,
132
+ stack=active_stack,
133
+ )
134
+ step_runs = {}
135
+
136
+ def pre_step_run(step_name: str) -> bool:
137
+ """Pre-step run.
138
+
139
+ Args:
140
+ step_name: Name of the step.
141
+
142
+ Returns:
143
+ Whether the step node needs to be run.
144
+ """
145
+ step_run_request = step_run_request_factory.create_request(
146
+ step_name
147
+ )
148
+ try:
149
+ step_run_request_factory.populate_request(step_run_request)
150
+ except Exception as e:
151
+ logger.error(
152
+ f"Failed to populate step run request for step {step_name}: {e}"
153
+ )
154
+ return True
155
+
156
+ if step_run_request.status == ExecutionStatus.CACHED:
157
+ step_run = publish_cached_step_run(
158
+ step_run_request, pipeline_run
159
+ )
160
+ step_runs[step_name] = step_run
161
+ logger.info("Using cached version of step `%s`.", step_name)
162
+ return False
163
+
164
+ return True
165
+
166
+ step_pod_labels = {
167
+ "run_id": kube_utils.sanitize_label(str(pipeline_run.id)),
168
+ "run_name": kube_utils.sanitize_label(str(pipeline_run.name)),
169
+ "pipeline": kube_utils.sanitize_label(
170
+ deployment.pipeline_configuration.name
171
+ ),
172
+ }
173
+
129
174
  def run_step_on_kubernetes(step_name: str) -> None:
130
175
  """Run a pipeline step in a separate Kubernetes pod.
131
176
 
@@ -146,7 +191,7 @@ def main() -> None:
146
191
  ):
147
192
  max_length = (
148
193
  kube_utils.calculate_max_pod_name_length_for_namespace(
149
- namespace=args.kubernetes_namespace
194
+ namespace=namespace
150
195
  )
151
196
  )
152
197
  pod_name_prefix = get_orchestrator_run_name(
@@ -156,9 +201,7 @@ def main() -> None:
156
201
  else:
157
202
  pod_name = f"{orchestrator_pod_name}-{step_name}"
158
203
 
159
- pod_name = kube_utils.sanitize_pod_name(
160
- pod_name, namespace=args.kubernetes_namespace
161
- )
204
+ pod_name = kube_utils.sanitize_pod_name(pod_name, namespace=namespace)
162
205
 
163
206
  image = KubernetesOrchestrator.get_image(
164
207
  deployment=deployment, step_name=step_name
@@ -195,8 +238,6 @@ def main() -> None:
195
238
  # Define Kubernetes pod manifest.
196
239
  pod_manifest = build_pod_manifest(
197
240
  pod_name=pod_name,
198
- run_name=args.run_name,
199
- pipeline_name=deployment.pipeline_configuration.name,
200
241
  image_name=image,
201
242
  command=step_command,
202
243
  args=step_args,
@@ -207,6 +248,7 @@ def main() -> None:
207
248
  or settings.service_account_name,
208
249
  mount_local_stores=mount_local_stores,
209
250
  owner_references=owner_references,
251
+ labels=step_pod_labels,
210
252
  )
211
253
 
212
254
  kube_utils.create_and_wait_for_pod_to_start(
@@ -214,7 +256,7 @@ def main() -> None:
214
256
  pod_display_name=f"pod for step `{step_name}`",
215
257
  pod_name=pod_name,
216
258
  pod_manifest=pod_manifest,
217
- namespace=args.kubernetes_namespace,
259
+ namespace=namespace,
218
260
  startup_max_retries=settings.pod_failure_max_retries,
219
261
  startup_failure_delay=settings.pod_failure_retry_delay,
220
262
  startup_failure_backoff=settings.pod_failure_backoff,
@@ -229,7 +271,7 @@ def main() -> None:
229
271
  incluster=True
230
272
  ),
231
273
  pod_name=pod_name,
232
- namespace=args.kubernetes_namespace,
274
+ namespace=namespace,
233
275
  exit_condition_lambda=kube_utils.pod_is_done,
234
276
  stream_logs=True,
235
277
  )
@@ -249,30 +291,15 @@ def main() -> None:
249
291
  try:
250
292
  # Some steps may have failed because the pods could not be created.
251
293
  # We need to check for this and mark the step run as failed if so.
252
-
253
- # Fetch the pipeline run using any means possible.
254
- list_args: Dict[str, Any] = {}
255
- if args.run_id:
256
- # For a run triggered outside of a schedule, we can use the
257
- # placeholder run ID to find the pipeline run.
258
- list_args = dict(id=UUID(args.run_id))
259
- else:
260
- # For a run triggered by a schedule, we can only use the
261
- # orchestrator run ID to find the pipeline run.
262
- list_args = dict(orchestrator_run_id=orchestrator_pod_name)
263
-
264
- pipeline_runs = client.list_pipeline_runs(
265
- hydrate=True,
266
- project=deployment.project_id,
267
- deployment_id=deployment.id,
268
- **list_args,
269
- )
270
- if not len(pipeline_runs):
271
- # No pipeline run found, so we can't mark any step runs as failed.
272
- return
273
-
274
- pipeline_run = pipeline_runs[0]
275
294
  pipeline_failed = False
295
+ failed_step_names = [
296
+ step_name
297
+ for step_name, node_state in node_states.items()
298
+ if node_state == NodeStatus.FAILED
299
+ ]
300
+ step_runs = fetch_step_runs_by_names(
301
+ step_run_names=failed_step_names, pipeline_run=pipeline_run
302
+ )
276
303
 
277
304
  for step_name, node_state in node_states.items():
278
305
  if node_state != NodeStatus.FAILED:
@@ -280,18 +307,14 @@ def main() -> None:
280
307
 
281
308
  pipeline_failed = True
282
309
 
283
- # If steps failed for any reason, we need to mark the step run as
284
- # failed, if it exists and it wasn't already in a final state.
285
-
286
- step_run = pipeline_run.steps.get(step_name)
287
-
288
- # Try to update the step run status, if it exists and is in
289
- # a transient state.
290
- if step_run and step_run.status in {
291
- ExecutionStatus.INITIALIZING,
292
- ExecutionStatus.RUNNING,
293
- }:
294
- publish_utils.publish_failed_step_run(step_run.id)
310
+ if step_run := step_runs.get(step_name, None):
311
+ # Try to update the step run status, if it exists and is in
312
+ # a transient state.
313
+ if step_run and step_run.status in {
314
+ ExecutionStatus.INITIALIZING,
315
+ ExecutionStatus.RUNNING,
316
+ }:
317
+ publish_utils.publish_failed_step_run(step_run.id)
295
318
 
296
319
  # If any steps failed and the pipeline run is still in a transient
297
320
  # state, we need to mark it as failed.
@@ -319,6 +342,7 @@ def main() -> None:
319
342
  ThreadedDagRunner(
320
343
  dag=pipeline_dag,
321
344
  run_fn=run_step_on_kubernetes,
345
+ preparation_fn=pre_step_run,
322
346
  finalize_fn=finalize_run,
323
347
  parallel_node_startup_waiting_period=parallel_node_startup_waiting_period,
324
348
  max_parallelism=pipeline_settings.max_parallelism,
@@ -333,7 +357,7 @@ def main() -> None:
333
357
  try:
334
358
  kube_utils.delete_secret(
335
359
  core_api=core_api,
336
- namespace=args.kubernetes_namespace,
360
+ namespace=namespace,
337
361
  secret_name=secret_name,
338
362
  )
339
363
  except k8s_client.rest.ApiException as e:
@@ -18,9 +18,7 @@ from typing import TYPE_CHECKING, List, Optional, Set
18
18
  if TYPE_CHECKING:
19
19
  from uuid import UUID
20
20
 
21
- RUN_NAME_OPTION = "run_name"
22
21
  DEPLOYMENT_ID_OPTION = "deployment_id"
23
- NAMESPACE_OPTION = "kubernetes_namespace"
24
22
  RUN_ID_OPTION = "run_id"
25
23
 
26
24
 
@@ -35,9 +33,7 @@ class KubernetesOrchestratorEntrypointConfiguration:
35
33
  Entrypoint options.
36
34
  """
37
35
  options = {
38
- RUN_NAME_OPTION,
39
36
  DEPLOYMENT_ID_OPTION,
40
- NAMESPACE_OPTION,
41
37
  }
42
38
  return options
43
39
 
@@ -58,29 +54,21 @@ class KubernetesOrchestratorEntrypointConfiguration:
58
54
  @classmethod
59
55
  def get_entrypoint_arguments(
60
56
  cls,
61
- run_name: str,
62
57
  deployment_id: "UUID",
63
- kubernetes_namespace: str,
64
58
  run_id: Optional["UUID"] = None,
65
59
  ) -> List[str]:
66
60
  """Gets all arguments that the entrypoint command should be called with.
67
61
 
68
62
  Args:
69
- run_name: Name of the ZenML run.
70
63
  deployment_id: ID of the deployment.
71
- kubernetes_namespace: Name of the Kubernetes namespace.
72
64
  run_id: Optional ID of the pipeline run. Not set for scheduled runs.
73
65
 
74
66
  Returns:
75
67
  List of entrypoint arguments.
76
68
  """
77
69
  args = [
78
- f"--{RUN_NAME_OPTION}",
79
- run_name,
80
70
  f"--{DEPLOYMENT_ID_OPTION}",
81
71
  str(deployment_id),
82
- f"--{NAMESPACE_OPTION}",
83
- kubernetes_namespace,
84
72
  ]
85
73
 
86
74
  if run_id:
@@ -26,7 +26,6 @@ from zenml.constants import ENV_ZENML_ENABLE_REPO_INIT_WARNINGS
26
26
  from zenml.integrations.airflow.orchestrators.dag_generator import (
27
27
  ENV_ZENML_LOCAL_STORES_PATH,
28
28
  )
29
- from zenml.integrations.kubernetes.orchestrators import kube_utils
30
29
  from zenml.integrations.kubernetes.pod_settings import KubernetesPodSettings
31
30
  from zenml.logger import get_logger
32
31
 
@@ -97,8 +96,6 @@ def add_local_stores_mount(
97
96
 
98
97
  def build_pod_manifest(
99
98
  pod_name: str,
100
- run_name: str,
101
- pipeline_name: str,
102
99
  image_name: str,
103
100
  command: List[str],
104
101
  args: List[str],
@@ -106,6 +103,7 @@ def build_pod_manifest(
106
103
  pod_settings: Optional[KubernetesPodSettings] = None,
107
104
  service_account_name: Optional[str] = None,
108
105
  env: Optional[Dict[str, str]] = None,
106
+ labels: Optional[Dict[str, str]] = None,
109
107
  mount_local_stores: bool = False,
110
108
  owner_references: Optional[List[k8s_client.V1OwnerReference]] = None,
111
109
  ) -> k8s_client.V1Pod:
@@ -113,8 +111,6 @@ def build_pod_manifest(
113
111
 
114
112
  Args:
115
113
  pod_name: Name of the pod.
116
- run_name: Name of the ZenML run.
117
- pipeline_name: Name of the ZenML pipeline.
118
114
  image_name: Name of the Docker image.
119
115
  command: Command to execute the entrypoint in the pod.
120
116
  args: Arguments provided to the entrypoint command.
@@ -124,6 +120,7 @@ def build_pod_manifest(
124
120
  Can be used to assign certain roles to a pod, e.g., to allow it to
125
121
  run Kubernetes commands from within the cluster.
126
122
  env: Environment variables to set.
123
+ labels: Labels to add to the pod.
127
124
  mount_local_stores: Whether to mount the local stores path inside the
128
125
  pod.
129
126
  owner_references: List of owner references for the pod.
@@ -162,7 +159,7 @@ def build_pod_manifest(
162
159
  if service_account_name is not None:
163
160
  pod_spec.service_account_name = service_account_name
164
161
 
165
- labels = {}
162
+ labels = labels or {}
166
163
 
167
164
  if pod_settings:
168
165
  add_pod_settings(pod_spec, pod_settings)
@@ -171,14 +168,6 @@ def build_pod_manifest(
171
168
  if pod_settings.labels:
172
169
  labels.update(pod_settings.labels)
173
170
 
174
- # Add run_name and pipeline_name to the labels
175
- labels.update(
176
- {
177
- "run": kube_utils.sanitize_label(run_name),
178
- "pipeline": kube_utils.sanitize_label(pipeline_name),
179
- }
180
- )
181
-
182
171
  pod_metadata = k8s_client.V1ObjectMeta(
183
172
  name=pod_name,
184
173
  labels=labels,
@@ -272,8 +261,6 @@ def add_pod_settings(
272
261
  def build_cron_job_manifest(
273
262
  cron_expression: str,
274
263
  pod_name: str,
275
- run_name: str,
276
- pipeline_name: str,
277
264
  image_name: str,
278
265
  command: List[str],
279
266
  args: List[str],
@@ -281,6 +268,7 @@ def build_cron_job_manifest(
281
268
  pod_settings: Optional[KubernetesPodSettings] = None,
282
269
  service_account_name: Optional[str] = None,
283
270
  env: Optional[Dict[str, str]] = None,
271
+ labels: Optional[Dict[str, str]] = None,
284
272
  mount_local_stores: bool = False,
285
273
  successful_jobs_history_limit: Optional[int] = None,
286
274
  failed_jobs_history_limit: Optional[int] = None,
@@ -291,8 +279,6 @@ def build_cron_job_manifest(
291
279
  Args:
292
280
  cron_expression: CRON job schedule expression, e.g. "* * * * *".
293
281
  pod_name: Name of the pod.
294
- run_name: Name of the ZenML run.
295
- pipeline_name: Name of the ZenML pipeline.
296
282
  image_name: Name of the Docker image.
297
283
  command: Command to execute the entrypoint in the pod.
298
284
  args: Arguments provided to the entrypoint command.
@@ -302,6 +288,7 @@ def build_cron_job_manifest(
302
288
  Can be used to assign certain roles to a pod, e.g., to allow it to
303
289
  run Kubernetes commands from within the cluster.
304
290
  env: Environment variables to set.
291
+ labels: Labels to add to the pod.
305
292
  mount_local_stores: Whether to mount the local stores path inside the
306
293
  pod.
307
294
  successful_jobs_history_limit: The number of successful jobs to retain.
@@ -314,8 +301,6 @@ def build_cron_job_manifest(
314
301
  """
315
302
  pod_manifest = build_pod_manifest(
316
303
  pod_name=pod_name,
317
- run_name=run_name,
318
- pipeline_name=pipeline_name,
319
304
  image_name=image_name,
320
305
  command=command,
321
306
  args=args,
@@ -323,6 +308,7 @@ def build_cron_job_manifest(
323
308
  pod_settings=pod_settings,
324
309
  service_account_name=service_account_name,
325
310
  env=env,
311
+ labels=labels,
326
312
  mount_local_stores=mount_local_stores,
327
313
  )
328
314
 
@@ -205,9 +205,7 @@ class KubernetesStepOperator(BaseStepOperator):
205
205
 
206
206
  # Create and run the orchestrator pod.
207
207
  pod_manifest = build_pod_manifest(
208
- run_name=info.run_name,
209
208
  pod_name=pod_name,
210
- pipeline_name=info.pipeline.name,
211
209
  image_name=image_name,
212
210
  command=command,
213
211
  args=args,
@@ -216,6 +214,10 @@ class KubernetesStepOperator(BaseStepOperator):
216
214
  pod_settings=settings.pod_settings,
217
215
  env=environment,
218
216
  mount_local_stores=False,
217
+ labels={
218
+ "run_id": kube_utils.sanitize_label(str(info.run_id)),
219
+ "pipeline": kube_utils.sanitize_label(info.pipeline.name),
220
+ },
219
221
  )
220
222
 
221
223
  kube_utils.create_and_wait_for_pod_to_start(
@@ -150,7 +150,7 @@ class VLLMDeploymentService(LocalDaemonService, BaseDeploymentService):
150
150
 
151
151
  try:
152
152
  parser: argparse.ArgumentParser = make_arg_parser(
153
- FlexibleArgumentParser() # type: ignore[no-untyped-call]
153
+ FlexibleArgumentParser()
154
154
  )
155
155
  # pass in empty list to get default args
156
156
  # otherwise it will try to get the args from sys.argv
@@ -125,6 +125,15 @@ class PipelineRunRequest(ProjectScopedRequest):
125
125
  title="Logs of the pipeline run.",
126
126
  )
127
127
 
128
+ @property
129
+ def is_placeholder_request(self) -> bool:
130
+ """Whether the request is a placeholder request.
131
+
132
+ Returns:
133
+ Whether the request is a placeholder request.
134
+ """
135
+ return self.status == ExecutionStatus.INITIALIZING
136
+
128
137
  model_config = ConfigDict(protected_namespaces=())
129
138
 
130
139
 
@@ -72,6 +72,7 @@ class ThreadedDagRunner:
72
72
  self,
73
73
  dag: Dict[str, List[str]],
74
74
  run_fn: Callable[[str], Any],
75
+ preparation_fn: Optional[Callable[[str], bool]] = None,
75
76
  finalize_fn: Optional[Callable[[Dict[str, NodeStatus]], None]] = None,
76
77
  parallel_node_startup_waiting_period: float = 0.0,
77
78
  max_parallelism: Optional[int] = None,
@@ -83,6 +84,9 @@ class ThreadedDagRunner:
83
84
  E.g.: [(1->2), (1->3), (2->4), (3->4)] should be represented as
84
85
  `dag={2: [1], 3: [1], 4: [2, 3]}`
85
86
  run_fn: A function `run_fn(node)` that runs a single node
87
+ preparation_fn: A function that is called before the node is run.
88
+ If provided, the function return value determines whether the
89
+ node should be run or can be skipped.
86
90
  finalize_fn: A function `finalize_fn(node_states)` that is called
87
91
  when all nodes have completed.
88
92
  parallel_node_startup_waiting_period: Delay in seconds to wait in
@@ -102,6 +106,7 @@ class ThreadedDagRunner:
102
106
  self.dag = dag
103
107
  self.reversed_dag = reverse_dag(dag)
104
108
  self.run_fn = run_fn
109
+ self.preparation_fn = preparation_fn
105
110
  self.finalize_fn = finalize_fn
106
111
  self.nodes = dag.keys()
107
112
  self.node_states = {
@@ -156,7 +161,7 @@ class ThreadedDagRunner:
156
161
  break
157
162
 
158
163
  logger.debug(f"Waiting for {running_nodes} nodes to finish.")
159
- time.sleep(10)
164
+ time.sleep(1)
160
165
 
161
166
  def _run_node(self, node: str) -> None:
162
167
  """Run a single node.
@@ -168,6 +173,12 @@ class ThreadedDagRunner:
168
173
  """
169
174
  self._prepare_node_run(node)
170
175
 
176
+ if self.preparation_fn:
177
+ run_required = self.preparation_fn(node)
178
+ if not run_required:
179
+ self._finish_node(node)
180
+ return
181
+
171
182
  try:
172
183
  self.run_fn(node)
173
184
  self._finish_node(node)
@@ -203,8 +214,6 @@ class ThreadedDagRunner:
203
214
  node: The node.
204
215
  failed: Whether the node failed.
205
216
  """
206
- # Update node status to completed.
207
- assert self.node_states[node] == NodeStatus.RUNNING
208
217
  with self._lock:
209
218
  if failed:
210
219
  self.node_states[node] = NodeStatus.FAILED
@@ -13,14 +13,13 @@
13
13
  # permissions and limitations under the License.
14
14
  """Utilities for inputs."""
15
15
 
16
- import json
17
16
  from typing import TYPE_CHECKING, Dict, Optional
18
17
 
19
18
  from zenml.client import Client
20
19
  from zenml.config.step_configurations import Step
21
20
  from zenml.enums import StepRunInputArtifactType
22
21
  from zenml.exceptions import InputResolutionError
23
- from zenml.utils import pagination_utils, string_utils
22
+ from zenml.utils import string_utils
24
23
 
25
24
  if TYPE_CHECKING:
26
25
  from zenml.models import PipelineRunResponse, StepRunResponse
@@ -52,6 +51,7 @@ def resolve_step_inputs(
52
51
  """
53
52
  from zenml.models import ArtifactVersionResponse
54
53
  from zenml.models.v2.core.step_run import StepRunInputResponse
54
+ from zenml.orchestrators.step_run_utils import fetch_step_runs_by_names
55
55
 
56
56
  step_runs = step_runs or {}
57
57
 
@@ -62,40 +62,11 @@ def resolve_step_inputs(
62
62
  steps_to_fetch.difference_update(step_runs.keys())
63
63
 
64
64
  if steps_to_fetch:
65
- # The list of steps might be too big to fit in the default max URL
66
- # length of 8KB supported by most servers. So we need to split it into
67
- # smaller chunks.
68
- steps_list = list(steps_to_fetch)
69
- chunks = []
70
- current_chunk = []
71
- current_length = 0
72
- # stay under 6KB for good measure.
73
- max_chunk_length = 6000
74
-
75
- for step_name in steps_list:
76
- current_chunk.append(step_name)
77
- current_length += len(step_name) + 5 # 5 is for the JSON encoding
78
-
79
- if current_length > max_chunk_length:
80
- chunks.append(current_chunk)
81
- current_chunk = []
82
- current_length = 0
83
-
84
- if current_chunk:
85
- chunks.append(current_chunk)
86
-
87
- for chunk in chunks:
88
- step_runs.update(
89
- {
90
- run_step.name: run_step
91
- for run_step in pagination_utils.depaginate(
92
- Client().list_run_steps,
93
- pipeline_run_id=pipeline_run.id,
94
- project=pipeline_run.project_id,
95
- name="oneof:" + json.dumps(chunk),
96
- )
97
- }
65
+ step_runs.update(
66
+ fetch_step_runs_by_names(
67
+ step_run_names=list(steps_to_fetch), pipeline_run=pipeline_run
98
68
  )
69
+ )
99
70
 
100
71
  input_artifacts: Dict[str, StepRunInputResponse] = {}
101
72
  for name, input_ in step.spec.inputs.items():
@@ -13,6 +13,7 @@
13
13
  # permissions and limitations under the License.
14
14
  """Utilities for creating step runs."""
15
15
 
16
+ import json
16
17
  from typing import Dict, List, Optional, Set, Tuple, Union
17
18
 
18
19
  from zenml import Tag, add_tags
@@ -32,6 +33,7 @@ from zenml.models import (
32
33
  )
33
34
  from zenml.orchestrators import cache_utils, input_utils, utils
34
35
  from zenml.stack import Stack
36
+ from zenml.utils import pagination_utils
35
37
  from zenml.utils.time_utils import utc_now
36
38
 
37
39
  logger = get_logger(__name__)
@@ -151,6 +153,15 @@ class StepRunRequestFactory:
151
153
  request.status = ExecutionStatus.CACHED
152
154
  request.end_time = request.start_time
153
155
 
156
+ # As a last resort, we try to reuse the docstring/source code
157
+ # from the cached step run. This is part of the cache key
158
+ # computation, so it must be identical to the one we would have
159
+ # computed ourselves.
160
+ if request.source_code is None:
161
+ request.source_code = cached_step_run.source_code
162
+ if request.docstring is None:
163
+ request.docstring = cached_step_run.docstring
164
+
154
165
  def _get_docstring_and_source_code(
155
166
  self, invocation_id: str
156
167
  ) -> Tuple[Optional[str], Optional[str]]:
@@ -333,27 +344,15 @@ def create_cached_step_runs(
333
344
  # -> We don't need to do anything here
334
345
  continue
335
346
 
336
- step_run = Client().zen_store.create_run_step(step_run_request)
347
+ step_run = publish_cached_step_run(
348
+ step_run_request, pipeline_run=pipeline_run
349
+ )
337
350
 
338
351
  # Include the newly created step run in the step runs dictionary to
339
352
  # avoid fetching it again later when downstream steps need it for
340
353
  # input resolution.
341
354
  step_runs[invocation_id] = step_run
342
355
 
343
- if (
344
- model_version := step_run.model_version
345
- or pipeline_run.model_version
346
- ):
347
- link_output_artifacts_to_model_version(
348
- artifacts=step_run.outputs,
349
- model_version=model_version,
350
- )
351
-
352
- cascade_tags_for_output_artifacts(
353
- artifacts=step_run.outputs,
354
- tags=pipeline_run.config.tags,
355
- )
356
-
357
356
  logger.info("Using cached version of step `%s`.", invocation_id)
358
357
  cached_invocations.add(invocation_id)
359
358
 
@@ -426,3 +425,78 @@ def cascade_tags_for_output_artifacts(
426
425
  tags=[t.name for t in cascade_tags],
427
426
  artifact_version_id=output_artifact.id,
428
427
  )
428
+
429
+
430
+ def publish_cached_step_run(
431
+ request: "StepRunRequest", pipeline_run: "PipelineRunResponse"
432
+ ) -> "StepRunResponse":
433
+ """Create a cached step run and link to model version and tags.
434
+
435
+ Args:
436
+ request: The request for the step run.
437
+ pipeline_run: The pipeline run of the step.
438
+
439
+ Returns:
440
+ The createdstep run.
441
+ """
442
+ step_run = Client().zen_store.create_run_step(request)
443
+
444
+ if model_version := step_run.model_version or pipeline_run.model_version:
445
+ link_output_artifacts_to_model_version(
446
+ artifacts=step_run.outputs,
447
+ model_version=model_version,
448
+ )
449
+
450
+ cascade_tags_for_output_artifacts(
451
+ artifacts=step_run.outputs,
452
+ tags=pipeline_run.config.tags,
453
+ )
454
+
455
+ return step_run
456
+
457
+
458
+ def fetch_step_runs_by_names(
459
+ step_run_names: List[str], pipeline_run: "PipelineRunResponse"
460
+ ) -> Dict[str, "StepRunResponse"]:
461
+ """Fetch step runs by names.
462
+
463
+ Args:
464
+ step_run_names: The names of the step runs to fetch.
465
+ pipeline_run: The pipeline run of the step runs.
466
+
467
+ Returns:
468
+ A dictionary of step runs by name.
469
+ """
470
+ step_runs = {}
471
+
472
+ chunks = []
473
+ current_chunk = []
474
+ current_length = 0
475
+ # stay under 6KB for good measure.
476
+ max_chunk_length = 6000
477
+
478
+ for step_name in step_run_names:
479
+ current_chunk.append(step_name)
480
+ current_length += len(step_name) + 5 # 5 is for the JSON encoding
481
+
482
+ if current_length > max_chunk_length:
483
+ chunks.append(current_chunk)
484
+ current_chunk = []
485
+ current_length = 0
486
+
487
+ if current_chunk:
488
+ chunks.append(current_chunk)
489
+
490
+ for chunk in chunks:
491
+ step_runs.update(
492
+ {
493
+ run_step.name: run_step
494
+ for run_step in pagination_utils.depaginate(
495
+ Client().list_run_steps,
496
+ pipeline_run_id=pipeline_run.id,
497
+ project=pipeline_run.project_id,
498
+ name="oneof:" + json.dumps(chunk),
499
+ )
500
+ }
501
+ )
502
+ return step_runs
@@ -863,8 +863,12 @@ To avoid this consider setting pipeline parameters only in one place (config or
863
863
  deployment = self._create_deployment(**self._run_args)
864
864
 
865
865
  self.log_pipeline_deployment_metadata(deployment)
866
- run = create_placeholder_run(
867
- deployment=deployment, logs=logs_model
866
+ run = (
867
+ create_placeholder_run(
868
+ deployment=deployment, logs=logs_model
869
+ )
870
+ if not deployment.schedule
871
+ else None
868
872
  )
869
873
 
870
874
  analytics_handler.metadata = (
@@ -51,23 +51,19 @@ def get_default_run_name(pipeline_name: str) -> str:
51
51
 
52
52
  def create_placeholder_run(
53
53
  deployment: "PipelineDeploymentResponse",
54
+ orchestrator_run_id: Optional[str] = None,
54
55
  logs: Optional["LogsRequest"] = None,
55
- ) -> Optional["PipelineRunResponse"]:
56
+ ) -> "PipelineRunResponse":
56
57
  """Create a placeholder run for the deployment.
57
58
 
58
- If the deployment contains a schedule, no placeholder run will be
59
- created.
60
-
61
59
  Args:
62
60
  deployment: The deployment for which to create the placeholder run.
61
+ orchestrator_run_id: The orchestrator run ID for the run.
63
62
  logs: The logs for the run.
64
63
 
65
64
  Returns:
66
- The placeholder run or `None` if no run was created.
65
+ The placeholder run.
67
66
  """
68
- if deployment.schedule:
69
- return None
70
-
71
67
  start_time = utc_now()
72
68
  run_request = PipelineRunRequest(
73
69
  name=string_utils.format_name_template(
@@ -83,7 +79,7 @@ def create_placeholder_run(
83
79
  # the start_time is only set once the first step starts
84
80
  # running.
85
81
  start_time=start_time,
86
- orchestrator_run_id=None,
82
+ orchestrator_run_id=orchestrator_run_id,
87
83
  project=deployment.project_id,
88
84
  deployment=deployment.id,
89
85
  pipeline=deployment.pipeline.id if deployment.pipeline else None,
@@ -193,7 +193,6 @@ def run_template(
193
193
  zenml_version = build.zenml_version
194
194
 
195
195
  placeholder_run = create_placeholder_run(deployment=new_deployment)
196
- assert placeholder_run
197
196
 
198
197
  report_usage(
199
198
  feature=RUN_TEMPLATE_TRIGGERS_FEATURE_NAME,
@@ -51,7 +51,9 @@ from zenml.zen_stores.schemas.pipeline_deployment_schemas import (
51
51
  from zenml.zen_stores.schemas.pipeline_schemas import PipelineSchema
52
52
  from zenml.zen_stores.schemas.project_schemas import ProjectSchema
53
53
  from zenml.zen_stores.schemas.schedule_schema import ScheduleSchema
54
- from zenml.zen_stores.schemas.schema_utils import build_foreign_key_field
54
+ from zenml.zen_stores.schemas.schema_utils import (
55
+ build_foreign_key_field,
56
+ )
55
57
  from zenml.zen_stores.schemas.stack_schemas import StackSchema
56
58
  from zenml.zen_stores.schemas.trigger_schemas import TriggerExecutionSchema
57
59
  from zenml.zen_stores.schemas.user_schemas import UserSchema
@@ -550,8 +552,8 @@ class PipelineRunSchema(NamedSchema, RunMetadataInterface, table=True):
550
552
 
551
553
  Raises:
552
554
  RuntimeError: If the DB entry does not represent a placeholder run.
553
- ValueError: If the run request does not match the deployment or
554
- pipeline ID of the placeholder run.
555
+ ValueError: If the run request is not a valid request to replace the
556
+ placeholder run.
555
557
 
556
558
  Returns:
557
559
  The updated `PipelineRunSchema`.
@@ -562,13 +564,33 @@ class PipelineRunSchema(NamedSchema, RunMetadataInterface, table=True):
562
564
  "placeholder run."
563
565
  )
564
566
 
567
+ if request.is_placeholder_request:
568
+ raise ValueError(
569
+ "Cannot replace a placeholder run with another placeholder run."
570
+ )
571
+
565
572
  if (
566
573
  self.deployment_id != request.deployment
567
574
  or self.pipeline_id != request.pipeline
575
+ or self.project_id != request.project
568
576
  ):
569
577
  raise ValueError(
570
- "Deployment or orchestrator run ID of placeholder run do not "
571
- "match the IDs of the run request."
578
+ "Deployment, project or pipeline ID of placeholder run "
579
+ "do not match the IDs of the run request."
580
+ )
581
+
582
+ if not request.orchestrator_run_id:
583
+ raise ValueError(
584
+ "Orchestrator run ID is required to replace a placeholder run."
585
+ )
586
+
587
+ if (
588
+ self.orchestrator_run_id
589
+ and self.orchestrator_run_id != request.orchestrator_run_id
590
+ ):
591
+ raise ValueError(
592
+ "Orchestrator run ID of placeholder run does not match the "
593
+ "ID of the run request."
572
594
  )
573
595
 
574
596
  orchestrator_environment = json.dumps(request.orchestrator_environment)
@@ -587,7 +609,4 @@ class PipelineRunSchema(NamedSchema, RunMetadataInterface, table=True):
587
609
  Returns:
588
610
  Whether the pipeline run is a placeholder run.
589
611
  """
590
- return (
591
- self.orchestrator_run_id is None
592
- and self.status == ExecutionStatus.INITIALIZING
593
- )
612
+ return self.status == ExecutionStatus.INITIALIZING.value
@@ -5844,18 +5844,33 @@ class SqlZenStore(BaseZenStore):
5844
5844
  # transaction to do so finishes. After the first transaction
5845
5845
  # finishes, the subsequent queries will not be able to find a
5846
5846
  # placeholder run anymore, as we already updated the
5847
- # orchestrator_run_id.
5848
- # Note: This only locks a single row if the where clause of
5849
- # the query is indexed (we have a unique index due to the
5850
- # unique constraint on those columns). Otherwise, this will lock
5851
- # multiple rows or even the complete table which we want to
5852
- # avoid.
5847
+ # status.
5848
+ # Note: Due to our unique index on deployment_id and
5849
+ # orchestrator_run_id, this only locks a single row. If you're
5850
+ # modifying this WHERE clause, make sure to test/adjust so this
5851
+ # does not lock multiple rows or even the complete table.
5853
5852
  .with_for_update()
5854
5853
  .where(PipelineRunSchema.deployment_id == pipeline_run.deployment)
5855
5854
  .where(
5856
- PipelineRunSchema.orchestrator_run_id.is_(None) # type: ignore[union-attr]
5855
+ or_(
5856
+ PipelineRunSchema.orchestrator_run_id
5857
+ == pipeline_run.orchestrator_run_id,
5858
+ col(PipelineRunSchema.orchestrator_run_id).is_(None),
5859
+ )
5857
5860
  )
5858
- .where(PipelineRunSchema.project_id == pipeline_run.project)
5861
+ .where(
5862
+ PipelineRunSchema.status == ExecutionStatus.INITIALIZING.value
5863
+ )
5864
+ # In very rare cases, there can be multiple placeholder runs for
5865
+ # the same deployment. By ordering by the orchestrator_run_id, we
5866
+ # make sure that we use the placeholder run with the matching
5867
+ # orchestrator_run_id if it exists, before falling back to the
5868
+ # placeholder run without any orchestrator_run_id provided.
5869
+ # Note: This works because both SQLite and MySQL consider NULLs
5870
+ # to be lower than any other value. If we add support for other
5871
+ # databases (e.g. PostgreSQL, which considers NULLs to be greater
5872
+ # than any other value), we need to potentially adjust this.
5873
+ .order_by(desc(PipelineRunSchema.orchestrator_run_id))
5859
5874
  ).first()
5860
5875
 
5861
5876
  if not run_schema:
@@ -5903,6 +5918,9 @@ class SqlZenStore(BaseZenStore):
5903
5918
  .where(
5904
5919
  PipelineRunSchema.orchestrator_run_id == orchestrator_run_id
5905
5920
  )
5921
+ .where(
5922
+ PipelineRunSchema.status != ExecutionStatus.INITIALIZING.value
5923
+ )
5906
5924
  ).first()
5907
5925
 
5908
5926
  if not run_schema:
@@ -5955,27 +5973,31 @@ class SqlZenStore(BaseZenStore):
5955
5973
  except KeyError:
5956
5974
  pass
5957
5975
 
5958
- try:
5959
- return (
5960
- self._replace_placeholder_run(
5961
- pipeline_run=pipeline_run,
5962
- pre_replacement_hook=pre_creation_hook,
5963
- session=session,
5964
- ),
5965
- True,
5966
- )
5967
- except KeyError:
5968
- # We were not able to find/replace a placeholder run. This could
5969
- # be due to one of the following three reasons:
5970
- # (1) There never was a placeholder run for the deployment. This
5971
- # is the case if the user ran the pipeline on a schedule.
5972
- # (2) There was a placeholder run, but a previous pipeline run
5973
- # already used it. This is the case if users rerun a
5974
- # pipeline run e.g. from the orchestrator UI, as they will
5975
- # use the same deployment_id with a new orchestrator_run_id.
5976
- # (3) A step of the same pipeline run already replaced the
5977
- # placeholder run.
5978
- pass
5976
+ if not pipeline_run.is_placeholder_request:
5977
+ # Only run this if the request is not a placeholder run itself,
5978
+ # as we don't want to replace a placeholder run with another
5979
+ # placeholder run.
5980
+ try:
5981
+ return (
5982
+ self._replace_placeholder_run(
5983
+ pipeline_run=pipeline_run,
5984
+ pre_replacement_hook=pre_creation_hook,
5985
+ session=session,
5986
+ ),
5987
+ True,
5988
+ )
5989
+ except KeyError:
5990
+ # We were not able to find/replace a placeholder run. This could
5991
+ # be due to one of the following three reasons:
5992
+ # (1) There never was a placeholder run for the deployment. This
5993
+ # is the case if the user ran the pipeline on a schedule.
5994
+ # (2) There was a placeholder run, but a previous pipeline run
5995
+ # already used it. This is the case if users rerun a
5996
+ # pipeline run e.g. from the orchestrator UI, as they will
5997
+ # use the same deployment_id with a new orchestrator_run_id.
5998
+ # (3) A step of the same pipeline run already replaced the
5999
+ # placeholder run.
6000
+ pass
5979
6001
 
5980
6002
  try:
5981
6003
  # We now try to create a new run. The following will happen in
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: zenml-nightly
3
- Version: 0.83.1.dev20250627
3
+ Version: 0.83.1.dev20250628
4
4
  Summary: ZenML: Write production-ready ML code.
5
5
  License: Apache-2.0
6
6
  Keywords: machine learning,production,pipeline,mlops,devops
@@ -1,5 +1,5 @@
1
1
  zenml/README.md,sha256=827dekbOWAs1BpW7VF1a4d7EbwPbjwccX-2zdXBENZo,1777
2
- zenml/VERSION,sha256=fwg9S_GFBEaPtDBc6LSBWzttPLbJ-KFy1HhTDpt6TXs,19
2
+ zenml/VERSION,sha256=x5dBMBD6SExu8DR6dvH97yI7GYYyLeKD8pBqRadIu00,19
3
3
  zenml/__init__.py,sha256=r7JUg2SVDf_dPhS7iU6vudKusEqK4ics7_jFMZhq0o4,2731
4
4
  zenml/actions/__init__.py,sha256=mrt6wPo73iKRxK754_NqsGyJ3buW7RnVeIGXr1xEw8Y,681
5
5
  zenml/actions/base_action.py,sha256=UcaHev6BTuLDwuswnyaPjdA8AgUqB5xPZ-lRtuvf2FU,25553
@@ -338,16 +338,16 @@ zenml/integrations/kubernetes/flavors/kubernetes_orchestrator_flavor.py,sha256=8
338
338
  zenml/integrations/kubernetes/flavors/kubernetes_step_operator_flavor.py,sha256=xFO7cSusji-mgbRrt4mU29gdyC9iEjEHKtomdFLp9mM,6265
339
339
  zenml/integrations/kubernetes/orchestrators/__init__.py,sha256=TJID3OTieZBox36WpQpzD0jdVRA_aZVcs_bNtfXS8ik,811
340
340
  zenml/integrations/kubernetes/orchestrators/kube_utils.py,sha256=N66GH5ac22Xm_A3nr162kbFBhMeypSFaQjOQRHlGXIQ,18942
341
- zenml/integrations/kubernetes/orchestrators/kubernetes_orchestrator.py,sha256=qmF3i9Zvo2mMtF9WW80SWIPnbths9L_HEUGYZ_OZSus,26575
342
- zenml/integrations/kubernetes/orchestrators/kubernetes_orchestrator_entrypoint.py,sha256=WAeZH5ro1k_edCoE4y2nhX-1lGS_0EEFwzSbp1BItRw,13010
343
- zenml/integrations/kubernetes/orchestrators/kubernetes_orchestrator_entrypoint_configuration.py,sha256=KjHfQK9VQEQkkkM2i9w51AzqolgIU01M5dgb2YGamvY,2754
344
- zenml/integrations/kubernetes/orchestrators/manifest_utils.py,sha256=QmK0HrAMalalcQMyG0rqBysoB3srQ4ywnKARdf-LtFc,14083
341
+ zenml/integrations/kubernetes/orchestrators/kubernetes_orchestrator.py,sha256=iuUllVHlXTvTftGk0Gs3EIITG0XXKwCT68UOfLHoJMs,27603
342
+ zenml/integrations/kubernetes/orchestrators/kubernetes_orchestrator_entrypoint.py,sha256=LTqbww_bQFnRS18VAPrxB356UoX8YJI6nlDR74tzqoo,13572
343
+ zenml/integrations/kubernetes/orchestrators/kubernetes_orchestrator_entrypoint_configuration.py,sha256=QOwQnWCfB-t_BQ2eOZN0SakurGUd0GTMCSdUlREhk6I,2324
344
+ zenml/integrations/kubernetes/orchestrators/manifest_utils.py,sha256=8_OU6cQxLRzkd1r40YNRoAnz5qHSFQOvVxHUhh88T04,13670
345
345
  zenml/integrations/kubernetes/pod_settings.py,sha256=QPV8Fq8SHZ7pwKavplcADJep-Y9olIbC3RlG10HI0pw,6683
346
346
  zenml/integrations/kubernetes/serialization_utils.py,sha256=cPSe4szdBLzDnUZT9nQc2CCA8h84aj5oTA8vsUE36ig,7000
347
347
  zenml/integrations/kubernetes/service_connectors/__init__.py,sha256=Uf6zlHIapYrRDl3xOPWQ2jA7jt85SXx1U7DmSxzxTvQ,818
348
348
  zenml/integrations/kubernetes/service_connectors/kubernetes_service_connector.py,sha256=Cv4tiVxoQOz9ex0lf3JdJrooEkgMwfDfwt5GOeNRpQU,19669
349
349
  zenml/integrations/kubernetes/step_operators/__init__.py,sha256=40utDPYAezxHsFgO0UUIT_6XpCDzDapje6OH951XsTs,806
350
- zenml/integrations/kubernetes/step_operators/kubernetes_step_operator.py,sha256=v_Itgr31-I9EeMngSHk4eSO48taurMEiQNE7SV2L0no,8748
350
+ zenml/integrations/kubernetes/step_operators/kubernetes_step_operator.py,sha256=lpkrA_barndyNLKqEUTaOIYJ6V8kHX9SfK5XFKK1md4,8848
351
351
  zenml/integrations/label_studio/__init__.py,sha256=sF2c9FxTDRlbcu95OxaUNKNtIhC1LgfmBRKY4jBME38,1475
352
352
  zenml/integrations/label_studio/annotators/__init__.py,sha256=YtOtSfS1_NBoLoXIygEerElBP1-B98UU0HOAEfzdRY0,821
353
353
  zenml/integrations/label_studio/annotators/label_studio_annotator.py,sha256=VkuW4zsZhHz8__P9WTTLRTF-FOmoYB-_cFqBdu-PUyA,30498
@@ -549,7 +549,7 @@ zenml/integrations/vllm/flavors/vllm_model_deployer_flavor.py,sha256=_3P0-qyjdsV
549
549
  zenml/integrations/vllm/model_deployers/__init__.py,sha256=Z38oWIfkArNsxCm3rQkTdYK4dbtx2BpTUw1gw_kl6Do,803
550
550
  zenml/integrations/vllm/model_deployers/vllm_model_deployer.py,sha256=OYPNSkB-I5r4eQ_7kr4F7GDwNj6efcsio8WRteQ5cYI,9665
551
551
  zenml/integrations/vllm/services/__init__.py,sha256=Id28GEfHECI0RnGAGGNioD9eZ6aJxdNebe112VgC59g,788
552
- zenml/integrations/vllm/services/vllm_deployment.py,sha256=lU1icw10uMJ8fehc94eqyAQ53WQDkzRwW0G3hiy0hNs,7140
552
+ zenml/integrations/vllm/services/vllm_deployment.py,sha256=wRFo8ebN6BEBLs1ZdEdNsFXSk38MJatKEyu_euJJbLc,7107
553
553
  zenml/integrations/wandb/__init__.py,sha256=5aTIc27MeYzODDGeJHZmtRhTSNkg7Vt2tHgrtGTCD_c,1705
554
554
  zenml/integrations/wandb/experiment_trackers/__init__.py,sha256=8nFyyvh-PTF5d9ZfjS7xFSWTWSpreRB1azePv-Ex2sc,771
555
555
  zenml/integrations/wandb/experiment_trackers/wandb_experiment_tracker.py,sha256=GV5zDPgj6Dh3ho2MMUC1Da1ezPrNtr4RE9tisWGde00,5749
@@ -649,7 +649,7 @@ zenml/models/v2/core/model_version_pipeline_run.py,sha256=JbPZZEQvOK9I32htkWdAON
649
649
  zenml/models/v2/core/pipeline.py,sha256=OXArih3YodMAZBS_GzuLL6VPpFv3c59EthwDfDZiNGk,12044
650
650
  zenml/models/v2/core/pipeline_build.py,sha256=z0LCc8aR8AdNFLLtzaAP0U0ffv7WpmSx9nNAyI__e14,17008
651
651
  zenml/models/v2/core/pipeline_deployment.py,sha256=nrJHrweNlwFsIdExINMF7jwhjOwBpOd8iwtEyAnvlz4,11971
652
- zenml/models/v2/core/pipeline_run.py,sha256=8IzcMYE3Lmx6PCuCwIsnhD-f_E8tvNgF7fnx3rlHuhM,32257
652
+ zenml/models/v2/core/pipeline_run.py,sha256=hWnM6Tf72uE9nxuSV6xacCMogOozuExvdtILgO6il98,32522
653
653
  zenml/models/v2/core/project.py,sha256=fNNO8Tg5OhSzmFf2t6g4SpUzGWC96oHhUccVyWytvIE,5627
654
654
  zenml/models/v2/core/run_metadata.py,sha256=hRGQa_sk99uDSab3EyyOQhefypVpiQDCH3oAtblREDk,2432
655
655
  zenml/models/v2/core/run_template.py,sha256=6jdH1son7kpvFv-vtaOL2nXMATtVpqk_7a2xOVv_7cc,14097
@@ -685,8 +685,8 @@ zenml/orchestrators/__init__.py,sha256=Nhmc6U-q6c8TEH1Jb5l8XaKnc4KmLNspDpvvV5Tcv
685
685
  zenml/orchestrators/base_orchestrator.py,sha256=1L2oJTNpVWUu5ndqyQPdcf2S5ebM7r2IHHrFatV6kPQ,14612
686
686
  zenml/orchestrators/cache_utils.py,sha256=QkmTs-ANfXve9_QzTqgGlyulZDEWOngoTcsiSjG5aA8,5906
687
687
  zenml/orchestrators/containerized_orchestrator.py,sha256=rdebgBW0Bk--JcHcT0NpLkAbyhY0VS5xO1uwWEgkLpA,3230
688
- zenml/orchestrators/dag_runner.py,sha256=4oAGc52nHv8HeI_Vj7GH1jzjJom1uwUJ_la9usQoOFY,9404
689
- zenml/orchestrators/input_utils.py,sha256=dSnL4KDLARY4Ldn3CI61sfCOcgsc2RfLue83WkgRrio,7950
688
+ zenml/orchestrators/dag_runner.py,sha256=Ol5P24i8Oa0mmZeNZnUdd_HrQDawx8kP1epWa-b6f3g,9801
689
+ zenml/orchestrators/input_utils.py,sha256=IwDYEwhFVcvZcdi9Myg__UB1bkZfpa8xZ0XRSwc4-go,6934
690
690
  zenml/orchestrators/local/__init__.py,sha256=qlU91hgqGKeycw-Qntrn-iMuoMTaNrq-RgfOFeqwlM4,662
691
691
  zenml/orchestrators/local/local_orchestrator.py,sha256=KCzc901_wrb1DPTDu_IY6HFxTghe2oiLYPAdxEpj7K4,6033
692
692
  zenml/orchestrators/local_docker/__init__.py,sha256=k8J68ydy6HmmvE9tWo32g761H8P_Dw4AxWNf4UMpsbs,669
@@ -694,7 +694,7 @@ zenml/orchestrators/local_docker/local_docker_orchestrator.py,sha256=RA88Yq8K9-z
694
694
  zenml/orchestrators/output_utils.py,sha256=01vqke1ZfmfuLpgxNerF-QL2wA0VPv1zUdvlMw0OwUY,3508
695
695
  zenml/orchestrators/publish_utils.py,sha256=CSQKhx2f9r2knldDCuPR0lmVyRwI-Ps6Xbihfhxv21U,5477
696
696
  zenml/orchestrators/step_launcher.py,sha256=6hrLj0Dr5-FcKTm3cvLVnr3PuUpaYbyEs1MRQdAVzys,18064
697
- zenml/orchestrators/step_run_utils.py,sha256=jaH-WdJ8Bt6v8m4dD4LgOR0TRa4y50pYFRum14X8B2w,14831
697
+ zenml/orchestrators/step_run_utils.py,sha256=SZjVSkmO9Vvjb2ZqQembOjg3jTU2wHXAykwlVxQNVBg,17051
698
698
  zenml/orchestrators/step_runner.py,sha256=EUgKG_g0fOQ6gnB1hPSSa6UXwUKVkguC-Yj-Q0yEQXg,26632
699
699
  zenml/orchestrators/topsort.py,sha256=D8evz3X47zwpXd90NMLsJD-_uCeXtV6ClzNfDUrq7cM,5784
700
700
  zenml/orchestrators/utils.py,sha256=6bqLc1fmdJTXg8JUwUKs8YNbmxTuMIfWmUbUpg-7hx0,12956
@@ -703,8 +703,8 @@ zenml/pipelines/__init__.py,sha256=hpIX7hN8jsQRHT5R-xSXZL88qrHwkmrvGLQeu1rWt4o,8
703
703
  zenml/pipelines/build_utils.py,sha256=DltGesybT8qYum4i23mvWZlVRgp7UxWdbHd1Y9ySv5c,27889
704
704
  zenml/pipelines/pipeline_context.py,sha256=4BixReLcPo33VtNBDrMwnJqjKTinHjmO5AOfmoeIOQM,3659
705
705
  zenml/pipelines/pipeline_decorator.py,sha256=LB21QYrbFeBdUGwKBUNbdpXAxO4OOtYl5Vs_mzJNXqU,4600
706
- zenml/pipelines/pipeline_definition.py,sha256=CVZDquIjq8WaiRwJuckTH_SxLnkR08g1FdZxCsJlcUU,59634
707
- zenml/pipelines/run_utils.py,sha256=-f4Q32YM0qM3A678qU2L9PcyqhEm6gz2bIg5BRjCmjg,12562
706
+ zenml/pipelines/pipeline_definition.py,sha256=cQUeUcv6l9quw8xkYFA4YFNZcG_AzLHJArUXnxkFXUI,59759
707
+ zenml/pipelines/run_utils.py,sha256=VAjfdu300AKfTuwXll3uoFrwN5dt_hesXxtylndUraQ,12515
708
708
  zenml/plugins/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
709
709
  zenml/plugins/base_plugin_flavor.py,sha256=88IxFW91UB_rQ8xPlfRnIhIJh7A308NEq2epMMdlOng,2530
710
710
  zenml/plugins/plugin_flavor_registry.py,sha256=LsN2Q0K-7EQ9H4uvlEG62Y0C1_Ro1UwppX4cnGbEcOA,10862
@@ -1075,7 +1075,7 @@ zenml/zen_server/routers/webhook_endpoints.py,sha256=4Ca6k_qyE5lZpflEqV0P4mcuXnh
1075
1075
  zenml/zen_server/secure_headers.py,sha256=glh6QujnjyeoH1_FK-tAS-105G-qKS_34AqSzqJ6TRc,4182
1076
1076
  zenml/zen_server/template_execution/__init__.py,sha256=79knXLKfegsvVSVSWecpqrepq6iAavTUA4hKuiDk-WE,613
1077
1077
  zenml/zen_server/template_execution/runner_entrypoint_configuration.py,sha256=Y8aYJhqqs8Kv8I1q-dM1WemS5VBIfyoaaYH_YkzC7iY,1541
1078
- zenml/zen_server/template_execution/utils.py,sha256=DIdi9ipUMT2XkqunBXk5eNUR_qXqO7Bs1OOdWlkoab0,19413
1078
+ zenml/zen_server/template_execution/utils.py,sha256=xaxG2gAaWDSFHH-R0-P_YxwVPnrZzw8imL0VM0I_LTA,19386
1079
1079
  zenml/zen_server/template_execution/workload_manager_interface.py,sha256=CL9c7z8ajuZE01DaHmdCDCZmsroDcTarvN-nE8jv6qQ,2590
1080
1080
  zenml/zen_server/utils.py,sha256=BKwaSRRWpYUaItCH4xwXOfG3JmsuFuf6Oi1B6dms0ps,24657
1081
1081
  zenml/zen_server/zen_server_api.py,sha256=bXriGA06xKe_Pz-Adw-dzJX9yWpQbS5Hqxbmg_7FQSA,11603
@@ -1308,7 +1308,7 @@ zenml/zen_stores/schemas/logs_schemas.py,sha256=qv6fs3JiVgzlmTXJqb_gG5NsU5q_50e0
1308
1308
  zenml/zen_stores/schemas/model_schemas.py,sha256=cDhWggrn3rTjaFML3iQGuW_4CpUJUxAnHieiY-dq0y4,26006
1309
1309
  zenml/zen_stores/schemas/pipeline_build_schemas.py,sha256=8GMdJNNcoSnbYH9daVr8zrhwQ1n-HZrpgzHoBZ7DZyA,7320
1310
1310
  zenml/zen_stores/schemas/pipeline_deployment_schemas.py,sha256=wCZVo8khyMOPMcO9e1itAb_3ehWFObCpgl6Pp2Yz88k,14780
1311
- zenml/zen_stores/schemas/pipeline_run_schemas.py,sha256=k7E9VVJIQ9uYO9tR2pyATuX_F8HrPvi0uvhZ-H91Sbg,21361
1311
+ zenml/zen_stores/schemas/pipeline_run_schemas.py,sha256=nL4E5LMbB2jl_wOG6EbJp2XkaJNL_p4HkAVjoZNpmsU,21974
1312
1312
  zenml/zen_stores/schemas/pipeline_schemas.py,sha256=xgioTeBuFFFDOJi5eESx2j-8mW55B6hshosFylev5Mw,8213
1313
1313
  zenml/zen_stores/schemas/project_schemas.py,sha256=X2GClPNQz0COsEZX8xI-I8Sm68Hb6f20Obm24mQyLS0,6013
1314
1314
  zenml/zen_stores/schemas/run_metadata_schemas.py,sha256=G94rT4ldluMSnf9rm7R_9rw_GlgaAyq72ptkHl0gHeg,3605
@@ -1334,11 +1334,11 @@ zenml/zen_stores/secrets_stores/hashicorp_secrets_store.py,sha256=5err1a-TrV3SR5
1334
1334
  zenml/zen_stores/secrets_stores/secrets_store_interface.py,sha256=Q2Jbnt2Pp7NGlR-u1YBfRZV2g8su2Fd0ArBMdksAE-Q,2819
1335
1335
  zenml/zen_stores/secrets_stores/service_connector_secrets_store.py,sha256=DrXGMkBxQIy2n_kkux5Xh2OM3Ks3MOpqP1D4aY8bfyY,7047
1336
1336
  zenml/zen_stores/secrets_stores/sql_secrets_store.py,sha256=LPFW757WCJLP1S8vrvjsrl2Tf1yo281xUTjSBsos4qk,8788
1337
- zenml/zen_stores/sql_zen_store.py,sha256=WSTNVWbAN9YM2e5ZmyDLvlV0__EgDlL3ZLqyYGe9Ha0,479786
1337
+ zenml/zen_stores/sql_zen_store.py,sha256=vir5WfbwPyZz7bIuxRzT5WHG_qkL5ijhWpSw4nGBfIU,481114
1338
1338
  zenml/zen_stores/template_utils.py,sha256=GbJ7LgGVYHSCKPEA8RNTxPoVTWqpC77F_lGzjJ4O1Fw,9220
1339
1339
  zenml/zen_stores/zen_store_interface.py,sha256=weiSULdI9AsbCE10a5TcwtybX-BJs9hKhjPJnTapWv4,93023
1340
- zenml_nightly-0.83.1.dev20250627.dist-info/LICENSE,sha256=wbnfEnXnafPbqwANHkV6LUsPKOtdpsd-SNw37rogLtc,11359
1341
- zenml_nightly-0.83.1.dev20250627.dist-info/METADATA,sha256=Bj7MlfHDc0hUbq_NSa5yD-_54c8jcHgteVC-Hz5t6bk,24316
1342
- zenml_nightly-0.83.1.dev20250627.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
1343
- zenml_nightly-0.83.1.dev20250627.dist-info/entry_points.txt,sha256=QK3ETQE0YswAM2mWypNMOv8TLtr7EjnqAFq1br_jEFE,43
1344
- zenml_nightly-0.83.1.dev20250627.dist-info/RECORD,,
1340
+ zenml_nightly-0.83.1.dev20250628.dist-info/LICENSE,sha256=wbnfEnXnafPbqwANHkV6LUsPKOtdpsd-SNw37rogLtc,11359
1341
+ zenml_nightly-0.83.1.dev20250628.dist-info/METADATA,sha256=eKK8-lKVvhMh_Sr63zGnmGUHpnH70mxMMx8-AxQDMbc,24316
1342
+ zenml_nightly-0.83.1.dev20250628.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
1343
+ zenml_nightly-0.83.1.dev20250628.dist-info/entry_points.txt,sha256=QK3ETQE0YswAM2mWypNMOv8TLtr7EjnqAFq1br_jEFE,43
1344
+ zenml_nightly-0.83.1.dev20250628.dist-info/RECORD,,