apache-airflow-providers-cncf-kubernetes 10.10.0rc1__py3-none-any.whl → 10.12.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. airflow/providers/cncf/kubernetes/__init__.py +3 -3
  2. airflow/providers/cncf/kubernetes/backcompat/backwards_compat_converters.py +1 -1
  3. airflow/providers/cncf/kubernetes/callbacks.py +1 -1
  4. airflow/providers/cncf/kubernetes/decorators/kubernetes.py +8 -3
  5. airflow/providers/cncf/kubernetes/decorators/kubernetes_cmd.py +6 -3
  6. airflow/providers/cncf/kubernetes/exceptions.py +7 -3
  7. airflow/providers/cncf/kubernetes/executors/kubernetes_executor.py +1 -2
  8. airflow/providers/cncf/kubernetes/executors/kubernetes_executor_utils.py +1 -1
  9. airflow/providers/cncf/kubernetes/hooks/kubernetes.py +118 -18
  10. airflow/providers/cncf/kubernetes/kubernetes_helper_functions.py +65 -20
  11. airflow/providers/cncf/kubernetes/operators/custom_object_launcher.py +1 -1
  12. airflow/providers/cncf/kubernetes/operators/job.py +13 -7
  13. airflow/providers/cncf/kubernetes/operators/kueue.py +1 -1
  14. airflow/providers/cncf/kubernetes/operators/pod.py +86 -34
  15. airflow/providers/cncf/kubernetes/operators/resource.py +3 -9
  16. airflow/providers/cncf/kubernetes/operators/spark_kubernetes.py +20 -9
  17. airflow/providers/cncf/kubernetes/resource_convert/env_variable.py +1 -1
  18. airflow/providers/cncf/kubernetes/sensors/spark_kubernetes.py +2 -3
  19. airflow/providers/cncf/kubernetes/template_rendering.py +1 -1
  20. airflow/providers/cncf/kubernetes/triggers/pod.py +23 -8
  21. airflow/providers/cncf/kubernetes/utils/pod_manager.py +98 -86
  22. airflow/providers/cncf/kubernetes/version_compat.py +5 -1
  23. {apache_airflow_providers_cncf_kubernetes-10.10.0rc1.dist-info → apache_airflow_providers_cncf_kubernetes-10.12.0.dist-info}/METADATA +12 -10
  24. {apache_airflow_providers_cncf_kubernetes-10.10.0rc1.dist-info → apache_airflow_providers_cncf_kubernetes-10.12.0.dist-info}/RECORD +28 -28
  25. {apache_airflow_providers_cncf_kubernetes-10.10.0rc1.dist-info → apache_airflow_providers_cncf_kubernetes-10.12.0.dist-info}/WHEEL +0 -0
  26. {apache_airflow_providers_cncf_kubernetes-10.10.0rc1.dist-info → apache_airflow_providers_cncf_kubernetes-10.12.0.dist-info}/entry_points.txt +0 -0
  27. {apache_airflow_providers_cncf_kubernetes-10.10.0rc1.dist-info → apache_airflow_providers_cncf_kubernetes-10.12.0.dist-info}/licenses/LICENSE +0 -0
  28. {apache_airflow_providers_cncf_kubernetes-10.10.0rc1.dist-info → apache_airflow_providers_cncf_kubernetes-10.12.0.dist-info}/licenses/NOTICE +0 -0
@@ -41,11 +41,6 @@ from kubernetes.stream import stream
41
41
  from urllib3.exceptions import HTTPError
42
42
 
43
43
  from airflow.configuration import conf
44
- from airflow.exceptions import (
45
- AirflowException,
46
- AirflowSkipException,
47
- TaskDeferred,
48
- )
49
44
  from airflow.providers.cncf.kubernetes import pod_generator
50
45
  from airflow.providers.cncf.kubernetes.backcompat.backwards_compat_converters import (
51
46
  convert_affinity,
@@ -65,6 +60,7 @@ from airflow.providers.cncf.kubernetes.kubernetes_helper_functions import (
65
60
  POD_NAME_MAX_LENGTH,
66
61
  add_unique_suffix,
67
62
  create_unique_id,
63
+ generic_api_retry,
68
64
  )
69
65
  from airflow.providers.cncf.kubernetes.pod_generator import PodGenerator
70
66
  from airflow.providers.cncf.kubernetes.triggers.pod import KubernetesPodTrigger
@@ -82,12 +78,15 @@ from airflow.providers.cncf.kubernetes.utils.pod_manager import (
82
78
  PodPhase,
83
79
  )
84
80
  from airflow.providers.cncf.kubernetes.version_compat import AIRFLOW_V_3_1_PLUS
85
- from airflow.providers.common.compat.sdk import XCOM_RETURN_KEY
81
+ from airflow.providers.common.compat.sdk import XCOM_RETURN_KEY, AirflowSkipException, TaskDeferred
86
82
 
87
83
  if AIRFLOW_V_3_1_PLUS:
88
- from airflow.sdk import BaseOperator
84
+ from airflow.sdk import BaseHook, BaseOperator
89
85
  else:
86
+ from airflow.hooks.base import BaseHook # type: ignore[attr-defined, no-redef]
90
87
  from airflow.models import BaseOperator
88
+
89
+ from airflow.providers.common.compat.sdk import AirflowException, AirflowNotFoundException
91
90
  from airflow.settings import pod_mutation_hook
92
91
  from airflow.utils import yaml
93
92
  from airflow.utils.helpers import prune_dict, validate_key
@@ -99,12 +98,7 @@ if TYPE_CHECKING:
99
98
 
100
99
  from airflow.providers.cncf.kubernetes.hooks.kubernetes import PodOperatorHookProtocol
101
100
  from airflow.providers.cncf.kubernetes.secret import Secret
102
-
103
- try:
104
- from airflow.sdk.definitions.context import Context
105
- except ImportError:
106
- # TODO: Remove once provider drops support for Airflow 2
107
- from airflow.utils.context import Context
101
+ from airflow.sdk import Context
108
102
 
109
103
  alphanum_lower = string.ascii_lowercase + string.digits
110
104
 
@@ -126,6 +120,10 @@ class PodCredentialsExpiredFailure(AirflowException):
126
120
  """When pod fails to refresh credentials."""
127
121
 
128
122
 
123
+ class FoundMoreThanOnePodFailure(AirflowException):
124
+ """When during reconnect more than one matching pod was found."""
125
+
126
+
129
127
  class KubernetesPodOperator(BaseOperator):
130
128
  """
131
129
  Execute a task in a Kubernetes Pod.
@@ -233,7 +231,8 @@ class KubernetesPodOperator(BaseOperator):
233
231
  :param log_pod_spec_on_failure: Log the pod's specification if a failure occurs
234
232
  :param on_finish_action: What to do when the pod reaches its final state, or the execution is interrupted.
235
233
  If "delete_pod", the pod will be deleted regardless its state; if "delete_succeeded_pod",
236
- only succeeded pod will be deleted. You can set to "keep_pod" to keep the pod.
234
+ only succeeded pod will be deleted. You can set to "keep_pod" to keep the pod. "delete_active_pod" deletes
235
+ pods that are still active (Pending or Running).
237
236
  :param termination_message_policy: The termination message policy of the base container.
238
237
  Default value is "File"
239
238
  :param active_deadline_seconds: The active_deadline_seconds which translates to active_deadline_seconds
@@ -563,6 +562,7 @@ class KubernetesPodOperator(BaseOperator):
563
562
  callback.on_sync_client_creation(client=client, operator=self)
564
563
  return client
565
564
 
565
+ @generic_api_retry
566
566
  def find_pod(self, namespace: str, context: Context, *, exclude_checked: bool = True) -> k8s.V1Pod | None:
567
567
  """Return an already-running pod for this task instance if one exists."""
568
568
  label_selector = self._build_find_pod_label_selector(context, exclude_checked=exclude_checked)
@@ -579,7 +579,7 @@ class KubernetesPodOperator(BaseOperator):
579
579
  self.log_matching_pod(pod=pod, context=context)
580
580
  elif num_pods > 1:
581
581
  if self.reattach_on_restart:
582
- raise AirflowException(f"More than one pod running with labels {label_selector}")
582
+ raise FoundMoreThanOnePodFailure(f"More than one pod running with labels {label_selector}")
583
583
  self.log.warning("Found more than one pod running with labels %s, resolving ...", label_selector)
584
584
  pod = self.process_duplicate_label_pods(pod_list)
585
585
  self.log_matching_pod(pod=pod, context=context)
@@ -628,14 +628,26 @@ class KubernetesPodOperator(BaseOperator):
628
628
  try:
629
629
 
630
630
  async def _await_pod_start():
631
- events_task = self.pod_manager.watch_pod_events(pod, self.startup_check_interval_seconds)
632
- pod_start_task = self.pod_manager.await_pod_start(
633
- pod=pod,
634
- schedule_timeout=self.schedule_timeout_seconds,
635
- startup_timeout=self.startup_timeout_seconds,
636
- check_interval=self.startup_check_interval_seconds,
631
+ # Start event stream in background
632
+ events_task = asyncio.create_task(
633
+ self.pod_manager.watch_pod_events(pod, self.startup_check_interval_seconds)
637
634
  )
638
- await asyncio.gather(pod_start_task, events_task)
635
+
636
+ # Await pod start completion
637
+ try:
638
+ await self.pod_manager.await_pod_start(
639
+ pod=pod,
640
+ schedule_timeout=self.schedule_timeout_seconds,
641
+ startup_timeout=self.startup_timeout_seconds,
642
+ check_interval=self.startup_check_interval_seconds,
643
+ )
644
+ finally:
645
+ # Stop watching events
646
+ events_task.cancel()
647
+ try:
648
+ await events_task
649
+ except asyncio.CancelledError:
650
+ pass
639
651
 
640
652
  asyncio.run(_await_pod_start())
641
653
  except PodLaunchFailedException:
@@ -854,6 +866,21 @@ class KubernetesPodOperator(BaseOperator):
854
866
  def invoke_defer_method(self, last_log_time: DateTime | None = None) -> None:
855
867
  """Redefine triggers which are being used in child classes."""
856
868
  self.convert_config_file_to_dict()
869
+
870
+ connection_extras = None
871
+ if self.kubernetes_conn_id:
872
+ try:
873
+ conn = BaseHook.get_connection(self.kubernetes_conn_id)
874
+ except AirflowNotFoundException:
875
+ self.log.warning(
876
+ "Could not resolve connection extras for deferral: connection `%s` not found. "
877
+ "Triggerer will try to resolve it from its own environment.",
878
+ self.kubernetes_conn_id,
879
+ )
880
+ else:
881
+ connection_extras = conn.extra_dejson
882
+ self.log.info("Successfully resolved connection extras for deferral.")
883
+
857
884
  trigger_start_time = datetime.datetime.now(tz=datetime.timezone.utc)
858
885
  self.defer(
859
886
  trigger=KubernetesPodTrigger(
@@ -861,6 +888,7 @@ class KubernetesPodOperator(BaseOperator):
861
888
  pod_namespace=self.pod.metadata.namespace, # type: ignore[union-attr]
862
889
  trigger_start_time=trigger_start_time,
863
890
  kubernetes_conn_id=self.kubernetes_conn_id,
891
+ connection_extras=connection_extras,
864
892
  cluster_context=self.cluster_context,
865
893
  config_dict=self._config_dict,
866
894
  in_cluster=self.in_cluster,
@@ -936,6 +964,9 @@ class KubernetesPodOperator(BaseOperator):
936
964
  finally:
937
965
  self._clean(event=event, context=context, result=xcom_sidecar_output)
938
966
 
967
+ if self.do_xcom_push and xcom_sidecar_output:
968
+ context["ti"].xcom_push(XCOM_RETURN_KEY, xcom_sidecar_output)
969
+
939
970
  def _clean(self, event: dict[str, Any], result: dict | None, context: Context) -> None:
940
971
  if self.pod is None:
941
972
  return
@@ -1024,7 +1055,11 @@ class KubernetesPodOperator(BaseOperator):
1024
1055
  pod_phase = remote_pod.status.phase if hasattr(remote_pod, "status") else None
1025
1056
 
1026
1057
  # if the pod fails or success, but we don't want to delete it
1027
- if pod_phase != PodPhase.SUCCEEDED or self.on_finish_action == OnFinishAction.KEEP_POD:
1058
+ if (
1059
+ pod_phase != PodPhase.SUCCEEDED
1060
+ or self.on_finish_action == OnFinishAction.KEEP_POD
1061
+ or self.on_finish_action == OnFinishAction.DELETE_ACTIVE_POD
1062
+ ):
1028
1063
  self.patch_already_checked(remote_pod, reraise=False)
1029
1064
 
1030
1065
  failed = (pod_phase != PodPhase.SUCCEEDED and not istio_enabled) or (
@@ -1160,13 +1195,21 @@ class KubernetesPodOperator(BaseOperator):
1160
1195
  def process_pod_deletion(self, pod: k8s.V1Pod, *, reraise=True) -> bool:
1161
1196
  with _optionally_suppress(reraise=reraise):
1162
1197
  if pod is not None:
1163
- should_delete_pod = (self.on_finish_action == OnFinishAction.DELETE_POD) or (
1164
- self.on_finish_action == OnFinishAction.DELETE_SUCCEEDED_POD
1165
- and (
1166
- pod.status.phase == PodPhase.SUCCEEDED
1167
- or container_is_succeeded(pod, self.base_container_name)
1198
+ should_delete_pod = (
1199
+ (self.on_finish_action == OnFinishAction.DELETE_POD)
1200
+ or (
1201
+ self.on_finish_action == OnFinishAction.DELETE_SUCCEEDED_POD
1202
+ and (
1203
+ pod.status.phase == PodPhase.SUCCEEDED
1204
+ or container_is_succeeded(pod, self.base_container_name)
1205
+ )
1206
+ )
1207
+ or (
1208
+ self.on_finish_action == OnFinishAction.DELETE_ACTIVE_POD
1209
+ and (pod.status.phase == PodPhase.RUNNING or pod.status.phase == PodPhase.PENDING)
1168
1210
  )
1169
1211
  )
1212
+
1170
1213
  if should_delete_pod:
1171
1214
  self.log.info("Deleting pod: %s", pod.metadata.name)
1172
1215
  self.pod_manager.delete_pod(pod)
@@ -1198,11 +1241,16 @@ class KubernetesPodOperator(BaseOperator):
1198
1241
  def patch_already_checked(self, pod: k8s.V1Pod, *, reraise=True):
1199
1242
  """Add an "already checked" label to ensure we don't reattach on retries."""
1200
1243
  with _optionally_suppress(reraise=reraise):
1201
- self.client.patch_namespaced_pod(
1202
- name=pod.metadata.name,
1203
- namespace=pod.metadata.namespace,
1204
- body={"metadata": {"labels": {self.POD_CHECKED_KEY: "True"}}},
1205
- )
1244
+
1245
+ @generic_api_retry
1246
+ def _patch_with_retry():
1247
+ self.client.patch_namespaced_pod(
1248
+ name=pod.metadata.name,
1249
+ namespace=pod.metadata.namespace,
1250
+ body={"metadata": {"labels": {self.POD_CHECKED_KEY: "True"}}},
1251
+ )
1252
+
1253
+ _patch_with_retry()
1206
1254
 
1207
1255
  def on_kill(self) -> None:
1208
1256
  self._killed = True
@@ -1215,8 +1263,12 @@ class KubernetesPodOperator(BaseOperator):
1215
1263
  if self.termination_grace_period is not None:
1216
1264
  kwargs.update(grace_period_seconds=self.termination_grace_period)
1217
1265
 
1218
- try:
1266
+ @generic_api_retry
1267
+ def _delete_with_retry():
1219
1268
  self.client.delete_namespaced_pod(**kwargs)
1269
+
1270
+ try:
1271
+ _delete_with_retry()
1220
1272
  except kubernetes.client.exceptions.ApiException:
1221
1273
  self.log.exception("Unable to delete pod %s", self.pod.metadata.name)
1222
1274
 
@@ -23,16 +23,15 @@ from collections.abc import Sequence
23
23
  from functools import cached_property
24
24
  from typing import TYPE_CHECKING
25
25
 
26
- import tenacity
27
26
  import yaml
28
27
  from kubernetes.utils import create_from_yaml
29
28
 
30
- from airflow.exceptions import AirflowException
31
29
  from airflow.providers.cncf.kubernetes.hooks.kubernetes import KubernetesHook
32
- from airflow.providers.cncf.kubernetes.kubernetes_helper_functions import should_retry_creation
30
+ from airflow.providers.cncf.kubernetes.kubernetes_helper_functions import generic_api_retry
33
31
  from airflow.providers.cncf.kubernetes.utils.delete_from import delete_from_yaml
34
32
  from airflow.providers.cncf.kubernetes.utils.k8s_resource_iterator import k8s_resource_iterator
35
33
  from airflow.providers.cncf.kubernetes.version_compat import AIRFLOW_V_3_1_PLUS
34
+ from airflow.providers.common.compat.sdk import AirflowException
36
35
 
37
36
  if AIRFLOW_V_3_1_PLUS:
38
37
  from airflow.sdk import BaseOperator
@@ -132,12 +131,7 @@ class KubernetesCreateResourceOperator(KubernetesResourceBaseOperator):
132
131
  else:
133
132
  self.custom_object_client.create_cluster_custom_object(group, version, plural, body)
134
133
 
135
- @tenacity.retry(
136
- stop=tenacity.stop_after_attempt(3),
137
- wait=tenacity.wait_random_exponential(),
138
- reraise=True,
139
- retry=tenacity.retry_if_exception(should_retry_creation),
140
- )
134
+ @generic_api_retry
141
135
  def _create_objects(self, objects):
142
136
  self.log.info("Starting resource creation")
143
137
  if not self.custom_resource_definition:
@@ -23,7 +23,6 @@ from typing import TYPE_CHECKING, Any, cast
23
23
 
24
24
  from kubernetes.client import CoreV1Api, CustomObjectsApi, models as k8s
25
25
 
26
- from airflow.exceptions import AirflowException
27
26
  from airflow.providers.cncf.kubernetes import pod_generator
28
27
  from airflow.providers.cncf.kubernetes.hooks.kubernetes import KubernetesHook, _load_body_to_dict
29
28
  from airflow.providers.cncf.kubernetes.kubernetes_helper_functions import add_unique_suffix
@@ -31,16 +30,13 @@ from airflow.providers.cncf.kubernetes.operators.custom_object_launcher import C
31
30
  from airflow.providers.cncf.kubernetes.operators.pod import KubernetesPodOperator
32
31
  from airflow.providers.cncf.kubernetes.pod_generator import MAX_LABEL_LEN, PodGenerator
33
32
  from airflow.providers.cncf.kubernetes.utils.pod_manager import PodManager
33
+ from airflow.providers.common.compat.sdk import AirflowException
34
34
  from airflow.utils.helpers import prune_dict
35
35
 
36
36
  if TYPE_CHECKING:
37
37
  import jinja2
38
38
 
39
- try:
40
- from airflow.sdk.definitions.context import Context
41
- except ImportError:
42
- # TODO: Remove once provider drops support for Airflow 2
43
- from airflow.utils.context import Context
39
+ from airflow.sdk import Context
44
40
 
45
41
 
46
42
  class SparkKubernetesOperator(KubernetesPodOperator):
@@ -286,6 +282,16 @@ class SparkKubernetesOperator(KubernetesPodOperator):
286
282
  def custom_obj_api(self) -> CustomObjectsApi:
287
283
  return CustomObjectsApi()
288
284
 
285
+ @cached_property
286
+ def launcher(self) -> CustomObjectLauncher:
287
+ return CustomObjectLauncher(
288
+ name=self.name,
289
+ namespace=self.namespace,
290
+ kube_client=self.client,
291
+ custom_obj_api=self.custom_obj_api,
292
+ template_body=self.template_body,
293
+ )
294
+
289
295
  def get_or_create_spark_crd(self, launcher: CustomObjectLauncher, context) -> k8s.V1Pod:
290
296
  if self.reattach_on_restart:
291
297
  driver_pod = self.find_spark_job(context)
@@ -323,6 +329,8 @@ class SparkKubernetesOperator(KubernetesPodOperator):
323
329
  )
324
330
  self.pod = existing_pod
325
331
  self.pod_request_obj = None
332
+ if self.pod.metadata.name.endswith("-driver"):
333
+ self.name = self.pod.metadata.name.removesuffix("-driver")
326
334
  return
327
335
 
328
336
  if "spark" not in template_body:
@@ -361,9 +369,12 @@ class SparkKubernetesOperator(KubernetesPodOperator):
361
369
  return self.find_spark_job(context, exclude_checked=exclude_checked)
362
370
 
363
371
  def on_kill(self) -> None:
364
- if self.launcher:
365
- self.log.debug("Deleting spark job for task %s", self.task_id)
366
- self.launcher.delete_spark_job()
372
+ self.log.debug("Deleting spark job for task %s", self.task_id)
373
+ job_name = self.name
374
+ if self.pod and self.pod.metadata and self.pod.metadata.name:
375
+ if self.pod.metadata.name.endswith("-driver"):
376
+ job_name = self.pod.metadata.name.removesuffix("-driver")
377
+ self.launcher.delete_spark_job(spark_job_name=job_name)
367
378
 
368
379
  def patch_already_checked(self, pod: k8s.V1Pod, *, reraise=True):
369
380
  """Add an "already checked" annotation to ensure we don't reattach on retries."""
@@ -18,7 +18,7 @@ from __future__ import annotations
18
18
 
19
19
  from kubernetes.client import models as k8s
20
20
 
21
- from airflow.exceptions import AirflowException
21
+ from airflow.providers.common.compat.sdk import AirflowException
22
22
 
23
23
 
24
24
  def convert_env_vars(env_vars) -> list[k8s.V1EnvVar]:
@@ -23,12 +23,11 @@ from typing import TYPE_CHECKING
23
23
 
24
24
  from kubernetes import client
25
25
 
26
- from airflow.exceptions import AirflowException
27
26
  from airflow.providers.cncf.kubernetes.hooks.kubernetes import KubernetesHook
28
- from airflow.providers.common.compat.sdk import BaseSensorOperator
27
+ from airflow.providers.common.compat.sdk import AirflowException, BaseSensorOperator
29
28
 
30
29
  if TYPE_CHECKING:
31
- from airflow.utils.context import Context
30
+ from airflow.sdk import Context
32
31
 
33
32
 
34
33
  class SparkKubernetesSensor(BaseSensorOperator):
@@ -22,10 +22,10 @@ from typing import TYPE_CHECKING
22
22
  from jinja2 import TemplateAssertionError, UndefinedError
23
23
  from kubernetes.client.api_client import ApiClient
24
24
 
25
- from airflow.exceptions import AirflowException
26
25
  from airflow.providers.cncf.kubernetes.kube_config import KubeConfig
27
26
  from airflow.providers.cncf.kubernetes.kubernetes_helper_functions import create_unique_id
28
27
  from airflow.providers.cncf.kubernetes.pod_generator import PodGenerator, generate_pod_command_args
28
+ from airflow.providers.common.compat.sdk import AirflowException
29
29
  from airflow.utils.session import NEW_SESSION, provide_session
30
30
 
31
31
  if TYPE_CHECKING:
@@ -88,6 +88,7 @@ class KubernetesPodTrigger(BaseTrigger):
88
88
  trigger_start_time: datetime.datetime,
89
89
  base_container_name: str,
90
90
  kubernetes_conn_id: str | None = None,
91
+ connection_extras: dict | None = None,
91
92
  poll_interval: float = 2,
92
93
  cluster_context: str | None = None,
93
94
  config_dict: dict | None = None,
@@ -107,6 +108,7 @@ class KubernetesPodTrigger(BaseTrigger):
107
108
  self.trigger_start_time = trigger_start_time
108
109
  self.base_container_name = base_container_name
109
110
  self.kubernetes_conn_id = kubernetes_conn_id
111
+ self.connection_extras = connection_extras
110
112
  self.poll_interval = poll_interval
111
113
  self.cluster_context = cluster_context
112
114
  self.config_dict = config_dict
@@ -130,6 +132,7 @@ class KubernetesPodTrigger(BaseTrigger):
130
132
  "pod_namespace": self.pod_namespace,
131
133
  "base_container_name": self.base_container_name,
132
134
  "kubernetes_conn_id": self.kubernetes_conn_id,
135
+ "connection_extras": self.connection_extras,
133
136
  "poll_interval": self.poll_interval,
134
137
  "cluster_context": self.cluster_context,
135
138
  "config_dict": self.config_dict,
@@ -241,14 +244,25 @@ class KubernetesPodTrigger(BaseTrigger):
241
244
  async def _wait_for_pod_start(self) -> ContainerState:
242
245
  """Loops until pod phase leaves ``PENDING`` If timeout is reached, throws error."""
243
246
  pod = await self._get_pod()
244
- events_task = self.pod_manager.watch_pod_events(pod, self.startup_check_interval)
245
- pod_start_task = self.pod_manager.await_pod_start(
246
- pod=pod,
247
- schedule_timeout=self.schedule_timeout,
248
- startup_timeout=self.startup_timeout,
249
- check_interval=self.startup_check_interval,
250
- )
251
- await asyncio.gather(pod_start_task, events_task)
247
+ # Start event stream in background
248
+ events_task = asyncio.create_task(self.pod_manager.watch_pod_events(pod, self.startup_check_interval))
249
+
250
+ # Await pod start completion
251
+ try:
252
+ await self.pod_manager.await_pod_start(
253
+ pod=pod,
254
+ schedule_timeout=self.schedule_timeout,
255
+ startup_timeout=self.startup_timeout,
256
+ check_interval=self.startup_check_interval,
257
+ )
258
+ finally:
259
+ # Stop watching events
260
+ events_task.cancel()
261
+ try:
262
+ await events_task
263
+ except asyncio.CancelledError:
264
+ pass
265
+
252
266
  return self.define_container_state(await self._get_pod())
253
267
 
254
268
  async def _wait_for_container_completion(self) -> TriggerEvent:
@@ -313,6 +327,7 @@ class KubernetesPodTrigger(BaseTrigger):
313
327
  in_cluster=self.in_cluster,
314
328
  config_dict=self.config_dict,
315
329
  cluster_context=self.cluster_context,
330
+ connection_extras=self.connection_extras,
316
331
  )
317
332
 
318
333
  @cached_property