apache-airflow-providers-cncf-kubernetes 8.0.1rc1__py3-none-any.whl → 8.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of apache-airflow-providers-cncf-kubernetes might be problematic. Click here for more details.

Files changed (28) hide show
  1. airflow/providers/cncf/kubernetes/__init__.py +1 -1
  2. airflow/providers/cncf/kubernetes/backcompat/backwards_compat_converters.py +1 -0
  3. airflow/providers/cncf/kubernetes/executors/kubernetes_executor.py +1 -0
  4. airflow/providers/cncf/kubernetes/get_provider_info.py +6 -2
  5. airflow/providers/cncf/kubernetes/hooks/kubernetes.py +149 -0
  6. airflow/providers/cncf/kubernetes/k8s_model.py +1 -0
  7. airflow/providers/cncf/kubernetes/kube_client.py +1 -0
  8. airflow/providers/cncf/kubernetes/kubernetes_helper_functions.py +1 -1
  9. airflow/providers/cncf/kubernetes/operators/custom_object_launcher.py +4 -3
  10. airflow/providers/cncf/kubernetes/operators/job.py +239 -0
  11. airflow/providers/cncf/kubernetes/operators/kubernetes_pod.py +1 -0
  12. airflow/providers/cncf/kubernetes/operators/pod.py +35 -16
  13. airflow/providers/cncf/kubernetes/operators/resource.py +47 -13
  14. airflow/providers/cncf/kubernetes/operators/spark_kubernetes.py +27 -3
  15. airflow/providers/cncf/kubernetes/pod_generator.py +3 -1
  16. airflow/providers/cncf/kubernetes/pod_generator_deprecated.py +1 -0
  17. airflow/providers/cncf/kubernetes/pod_launcher_deprecated.py +1 -0
  18. airflow/providers/cncf/kubernetes/python_kubernetes_script.py +1 -0
  19. airflow/providers/cncf/kubernetes/secret.py +1 -0
  20. airflow/providers/cncf/kubernetes/triggers/job.py +101 -0
  21. airflow/providers/cncf/kubernetes/triggers/kubernetes_pod.py +1 -0
  22. airflow/providers/cncf/kubernetes/triggers/pod.py +11 -3
  23. airflow/providers/cncf/kubernetes/utils/pod_manager.py +2 -1
  24. airflow/providers/cncf/kubernetes/utils/xcom_sidecar.py +1 -0
  25. {apache_airflow_providers_cncf_kubernetes-8.0.1rc1.dist-info → apache_airflow_providers_cncf_kubernetes-8.1.0.dist-info}/METADATA +9 -8
  26. {apache_airflow_providers_cncf_kubernetes-8.0.1rc1.dist-info → apache_airflow_providers_cncf_kubernetes-8.1.0.dist-info}/RECORD +28 -27
  27. {apache_airflow_providers_cncf_kubernetes-8.0.1rc1.dist-info → apache_airflow_providers_cncf_kubernetes-8.1.0.dist-info}/WHEEL +0 -0
  28. {apache_airflow_providers_cncf_kubernetes-8.0.1rc1.dist-info → apache_airflow_providers_cncf_kubernetes-8.1.0.dist-info}/entry_points.txt +0 -0
@@ -27,7 +27,7 @@ import packaging.version
27
27
 
28
28
  __all__ = ["__version__"]
29
29
 
30
- __version__ = "8.0.1"
30
+ __version__ = "8.1.0"
31
31
 
32
32
  try:
33
33
  from airflow import __version__ as airflow_version
@@ -15,6 +15,7 @@
15
15
  # specific language governing permissions and limitations
16
16
  # under the License.
17
17
  """Executes task in a Kubernetes POD."""
18
+
18
19
  from __future__ import annotations
19
20
 
20
21
  from kubernetes.client import ApiClient, models as k8s
@@ -21,6 +21,7 @@ KubernetesExecutor.
21
21
  For more information on how the KubernetesExecutor works, take a look at the guide:
22
22
  :doc:`/kubernetes_executor`
23
23
  """
24
+
24
25
  from __future__ import annotations
25
26
 
26
27
  import contextlib
@@ -28,8 +28,9 @@ def get_provider_info():
28
28
  "name": "Kubernetes",
29
29
  "description": "`Kubernetes <https://kubernetes.io/>`__\n",
30
30
  "state": "ready",
31
- "source-date-epoch": 1709555165,
31
+ "source-date-epoch": 1712665231,
32
32
  "versions": [
33
+ "8.1.0",
33
34
  "8.0.1",
34
35
  "8.0.0",
35
36
  "7.14.0",
@@ -135,7 +136,10 @@ def get_provider_info():
135
136
  "triggers": [
136
137
  {
137
138
  "integration-name": "Kubernetes",
138
- "python-modules": ["airflow.providers.cncf.kubernetes.triggers.pod"],
139
+ "python-modules": [
140
+ "airflow.providers.cncf.kubernetes.triggers.pod",
141
+ "airflow.providers.cncf.kubernetes.triggers.job",
142
+ ],
139
143
  }
140
144
  ],
141
145
  "connection-types": [
@@ -16,10 +16,12 @@
16
16
  # under the License.
17
17
  from __future__ import annotations
18
18
 
19
+ import asyncio
19
20
  import contextlib
20
21
  import json
21
22
  import tempfile
22
23
  from functools import cached_property
24
+ from time import sleep
23
25
  from typing import TYPE_CHECKING, Any, Generator
24
26
 
25
27
  import aiofiles
@@ -37,10 +39,18 @@ from airflow.providers.cncf.kubernetes.utils.pod_manager import PodOperatorHookP
37
39
  from airflow.utils import yaml
38
40
 
39
41
  if TYPE_CHECKING:
42
+ from kubernetes.client import V1JobList
40
43
  from kubernetes.client.models import V1Deployment, V1Job, V1Pod
41
44
 
42
45
  LOADING_KUBE_CONFIG_FILE_RESOURCE = "Loading Kubernetes configuration file kube_config from {}..."
43
46
 
47
+ JOB_FINAL_STATUS_CONDITION_TYPES = {
48
+ "Complete",
49
+ "Failed",
50
+ }
51
+
52
+ JOB_STATUS_CONDITION_TYPES = JOB_FINAL_STATUS_CONDITION_TYPES | {"Suspended"}
53
+
44
54
 
45
55
  def _load_body_to_dict(body: str) -> dict:
46
56
  try:
@@ -502,6 +512,114 @@ class KubernetesHook(BaseHook, PodOperatorHookProtocol):
502
512
  raise e
503
513
  return resp
504
514
 
515
+ def get_job(self, job_name: str, namespace: str) -> V1Job:
516
+ """Get Job of specified name and namespace.
517
+
518
+ :param job_name: Name of Job to fetch.
519
+ :param namespace: Namespace of the Job.
520
+ :return: Job object
521
+ """
522
+ return self.batch_v1_client.read_namespaced_job(name=job_name, namespace=namespace, pretty=True)
523
+
524
+ def get_job_status(self, job_name: str, namespace: str) -> V1Job:
525
+ """Get job with status of specified name and namespace.
526
+
527
+ :param job_name: Name of Job to fetch.
528
+ :param namespace: Namespace of the Job.
529
+ :return: Job object
530
+ """
531
+ return self.batch_v1_client.read_namespaced_job_status(
532
+ name=job_name, namespace=namespace, pretty=True
533
+ )
534
+
535
+ def wait_until_job_complete(self, job_name: str, namespace: str, job_poll_interval: float = 10) -> V1Job:
536
+ """Block job of specified name and namespace until it is complete or failed.
537
+
538
+ :param job_name: Name of Job to fetch.
539
+ :param namespace: Namespace of the Job.
540
+ :param job_poll_interval: Interval in seconds between polling the job status
541
+ :return: Job object
542
+ """
543
+ while True:
544
+ self.log.info("Requesting status for the job '%s' ", job_name)
545
+ job: V1Job = self.get_job_status(job_name=job_name, namespace=namespace)
546
+ if self.is_job_complete(job=job):
547
+ return job
548
+ self.log.info("The job '%s' is incomplete. Sleeping for %i sec.", job_name, job_poll_interval)
549
+ sleep(job_poll_interval)
550
+
551
+ def list_jobs_all_namespaces(self) -> V1JobList:
552
+ """Get list of Jobs from all namespaces.
553
+
554
+ :return: V1JobList object
555
+ """
556
+ return self.batch_v1_client.list_job_for_all_namespaces(pretty=True)
557
+
558
+ def list_jobs_from_namespace(self, namespace: str) -> V1JobList:
559
+ """Get list of Jobs from dedicated namespace.
560
+
561
+ :param namespace: Namespace of the Job.
562
+ :return: V1JobList object
563
+ """
564
+ return self.batch_v1_client.list_namespaced_job(namespace=namespace, pretty=True)
565
+
566
+ def is_job_complete(self, job: V1Job) -> bool:
567
+ """Check whether the given job is complete (with success or fail).
568
+
569
+ :return: Boolean indicating that the given job is complete.
570
+ """
571
+ if status := job.status:
572
+ if conditions := status.conditions:
573
+ if final_condition_types := list(
574
+ c for c in conditions if c.type in JOB_FINAL_STATUS_CONDITION_TYPES and c.status
575
+ ):
576
+ s = "s" if len(final_condition_types) > 1 else ""
577
+ self.log.info(
578
+ "The job '%s' state%s: %s",
579
+ job.metadata.name,
580
+ s,
581
+ ", ".join(f"{c.type} at {c.last_transition_time}" for c in final_condition_types),
582
+ )
583
+ return True
584
+ return False
585
+
586
+ @staticmethod
587
+ def is_job_failed(job: V1Job) -> str | bool:
588
+ """Check whether the given job is failed.
589
+
590
+ :return: Error message if the job is failed, and False otherwise.
591
+ """
592
+ if status := job.status:
593
+ conditions = status.conditions or []
594
+ if fail_condition := next((c for c in conditions if c.type == "Failed" and c.status), None):
595
+ return fail_condition.reason
596
+ return False
597
+
598
+ @staticmethod
599
+ def is_job_successful(job: V1Job) -> str | bool:
600
+ """Check whether the given job is completed successfully..
601
+
602
+ :return: Error message if the job is failed, and False otherwise.
603
+ """
604
+ if status := job.status:
605
+ conditions = status.conditions or []
606
+ return bool(next((c for c in conditions if c.type == "Complete" and c.status), None))
607
+ return False
608
+
609
+ def patch_namespaced_job(self, job_name: str, namespace: str, body: object) -> V1Job:
610
+ """
611
+ Update the specified Job.
612
+
613
+ :param job_name: name of the Job
614
+ :param namespace: the namespace to run within kubernetes
615
+ :param body: json object with parameters for update
616
+ """
617
+ return self.batch_v1_client.patch_namespaced_job(
618
+ name=job_name,
619
+ namespace=namespace,
620
+ body=body,
621
+ )
622
+
505
623
 
506
624
  def _get_bool(val) -> bool | None:
507
625
  """Convert val to bool if can be done with certainty; if we cannot infer intention we return None."""
@@ -667,3 +785,34 @@ class AsyncKubernetesHook(KubernetesHook):
667
785
  except HTTPError:
668
786
  self.log.exception("There was an error reading the kubernetes API.")
669
787
  raise
788
+
789
+ async def get_job_status(self, name: str, namespace: str) -> V1Job:
790
+ """
791
+ Get job's status object.
792
+
793
+ :param name: Name of the pod.
794
+ :param namespace: Name of the pod's namespace.
795
+ """
796
+ async with self.get_conn() as connection:
797
+ v1_api = async_client.BatchV1Api(connection)
798
+ job: V1Job = await v1_api.read_namespaced_job_status(
799
+ name=name,
800
+ namespace=namespace,
801
+ )
802
+ return job
803
+
804
+ async def wait_until_job_complete(self, name: str, namespace: str, poll_interval: float = 10) -> V1Job:
805
+ """Block job of specified name and namespace until it is complete or failed.
806
+
807
+ :param name: Name of Job to fetch.
808
+ :param namespace: Namespace of the Job.
809
+ :param poll_interval: Interval in seconds between polling the job status
810
+ :return: Job object
811
+ """
812
+ while True:
813
+ self.log.info("Requesting status for the job '%s' ", name)
814
+ job: V1Job = await self.get_job_status(name=name, namespace=namespace)
815
+ if self.is_job_complete(job=job):
816
+ return job
817
+ self.log.info("The job '%s' is incomplete. Sleeping for %i sec.", name, poll_interval)
818
+ await asyncio.sleep(poll_interval)
@@ -15,6 +15,7 @@
15
15
  # specific language governing permissions and limitations
16
16
  # under the License.
17
17
  """Classes for interacting with Kubernetes API."""
18
+
18
19
  from __future__ import annotations
19
20
 
20
21
  from abc import ABC, abstractmethod
@@ -15,6 +15,7 @@
15
15
  # specific language governing permissions and limitations
16
16
  # under the License.
17
17
  """Client for kubernetes communication."""
18
+
18
19
  from __future__ import annotations
19
20
 
20
21
  import logging
@@ -104,7 +104,7 @@ def create_unique_id(
104
104
  name += task_id
105
105
  base_name = slugify(name, lowercase=True)[:max_length].strip(".-")
106
106
  if unique:
107
- return add_pod_suffix(pod_name=base_name, rand_len=8, max_len=max_length)
107
+ return add_unique_suffix(name=base_name, rand_len=8, max_len=max_length)
108
108
  else:
109
109
  return base_name
110
110
 
@@ -15,6 +15,7 @@
15
15
  # specific language governing permissions and limitations
16
16
  # under the License.
17
17
  """Launches Custom object."""
18
+
18
19
  from __future__ import annotations
19
20
 
20
21
  import time
@@ -59,9 +60,9 @@ class SparkJobSpec:
59
60
  if self.spec.get("dynamicAllocation", {}).get("enabled"):
60
61
  if not all(
61
62
  [
62
- self.spec["dynamicAllocation"]["initialExecutors"],
63
- self.spec["dynamicAllocation"]["minExecutors"],
64
- self.spec["dynamicAllocation"]["maxExecutors"],
63
+ self.spec["dynamicAllocation"].get("initialExecutors"),
64
+ self.spec["dynamicAllocation"].get("minExecutors"),
65
+ self.spec["dynamicAllocation"].get("maxExecutors"),
65
66
  ]
66
67
  ):
67
68
  raise AirflowException("Make sure initial/min/max value for dynamic allocation is passed")
@@ -15,6 +15,7 @@
15
15
  # specific language governing permissions and limitations
16
16
  # under the License.
17
17
  """Executes a Kubernetes Job."""
18
+
18
19
  from __future__ import annotations
19
20
 
20
21
  import copy
@@ -25,7 +26,11 @@ from typing import TYPE_CHECKING, Sequence
25
26
 
26
27
  from kubernetes.client import BatchV1Api, models as k8s
27
28
  from kubernetes.client.api_client import ApiClient
29
+ from kubernetes.client.rest import ApiException
28
30
 
31
+ from airflow.configuration import conf
32
+ from airflow.exceptions import AirflowException
33
+ from airflow.models import BaseOperator
29
34
  from airflow.providers.cncf.kubernetes.hooks.kubernetes import KubernetesHook
30
35
  from airflow.providers.cncf.kubernetes.kubernetes_helper_functions import (
31
36
  add_unique_suffix,
@@ -33,7 +38,9 @@ from airflow.providers.cncf.kubernetes.kubernetes_helper_functions import (
33
38
  )
34
39
  from airflow.providers.cncf.kubernetes.operators.pod import KubernetesPodOperator
35
40
  from airflow.providers.cncf.kubernetes.pod_generator import PodGenerator, merge_objects
41
+ from airflow.providers.cncf.kubernetes.triggers.job import KubernetesJobTrigger
36
42
  from airflow.utils import yaml
43
+ from airflow.utils.context import Context
37
44
 
38
45
  if TYPE_CHECKING:
39
46
  from airflow.utils.context import Context
@@ -65,6 +72,12 @@ class KubernetesJobOperator(KubernetesPodOperator):
65
72
  :param selector: The selector of this V1JobSpec.
66
73
  :param suspend: Suspend specifies whether the Job controller should create Pods or not.
67
74
  :param ttl_seconds_after_finished: ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed).
75
+ :param wait_until_job_complete: Whether to wait until started job finished execution (either Complete or
76
+ Failed). Default is False.
77
+ :param job_poll_interval: Interval in seconds between polling the job status. Default is 10.
78
+ Used if the parameter `wait_until_job_complete` set True.
79
+ :param deferrable: Run operator in the deferrable mode. Note that the parameter
80
+ `wait_until_job_complete` must be set True.
68
81
  """
69
82
 
70
83
  template_fields: Sequence[str] = tuple({"job_template_file"} | set(KubernetesPodOperator.template_fields))
@@ -82,6 +95,9 @@ class KubernetesJobOperator(KubernetesPodOperator):
82
95
  selector: k8s.V1LabelSelector | None = None,
83
96
  suspend: bool | None = None,
84
97
  ttl_seconds_after_finished: int | None = None,
98
+ wait_until_job_complete: bool = False,
99
+ job_poll_interval: float = 10,
100
+ deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
85
101
  **kwargs,
86
102
  ) -> None:
87
103
  super().__init__(**kwargs)
@@ -97,6 +113,9 @@ class KubernetesJobOperator(KubernetesPodOperator):
97
113
  self.selector = selector
98
114
  self.suspend = suspend
99
115
  self.ttl_seconds_after_finished = ttl_seconds_after_finished
116
+ self.wait_until_job_complete = wait_until_job_complete
117
+ self.job_poll_interval = job_poll_interval
118
+ self.deferrable = deferrable
100
119
 
101
120
  @cached_property
102
121
  def _incluster_namespace(self):
@@ -126,6 +145,11 @@ class KubernetesJobOperator(KubernetesPodOperator):
126
145
  return job_request_obj
127
146
 
128
147
  def execute(self, context: Context):
148
+ if self.deferrable and not self.wait_until_job_complete:
149
+ self.log.warning(
150
+ "Deferrable mode is available only with parameter `wait_until_job_complete=True`. "
151
+ "Please, set it up."
152
+ )
129
153
  self.job_request_obj = self.build_job_request_obj(context)
130
154
  self.job = self.create_job( # must set `self.job` for `on_kill`
131
155
  job_request_obj=self.job_request_obj
@@ -135,6 +159,44 @@ class KubernetesJobOperator(KubernetesPodOperator):
135
159
  ti.xcom_push(key="job_name", value=self.job.metadata.name)
136
160
  ti.xcom_push(key="job_namespace", value=self.job.metadata.namespace)
137
161
 
162
+ if self.wait_until_job_complete and self.deferrable:
163
+ self.execute_deferrable()
164
+ return
165
+
166
+ if self.wait_until_job_complete:
167
+ self.job = self.hook.wait_until_job_complete(
168
+ job_name=self.job.metadata.name,
169
+ namespace=self.job.metadata.namespace,
170
+ job_poll_interval=self.job_poll_interval,
171
+ )
172
+
173
+ ti.xcom_push(key="job", value=self.job.to_dict())
174
+ if self.wait_until_job_complete:
175
+ if error_message := self.hook.is_job_failed(job=self.job):
176
+ raise AirflowException(
177
+ f"Kubernetes job '{self.job.metadata.name}' is failed with error '{error_message}'"
178
+ )
179
+
180
+ def execute_deferrable(self):
181
+ self.defer(
182
+ trigger=KubernetesJobTrigger(
183
+ job_name=self.job.metadata.name, # type: ignore[union-attr]
184
+ job_namespace=self.job.metadata.namespace, # type: ignore[union-attr]
185
+ kubernetes_conn_id=self.kubernetes_conn_id,
186
+ cluster_context=self.cluster_context,
187
+ config_file=self.config_file,
188
+ in_cluster=self.in_cluster,
189
+ poll_interval=self.job_poll_interval,
190
+ ),
191
+ method_name="execute_complete",
192
+ )
193
+
194
+ def execute_complete(self, context: Context, event: dict, **kwargs):
195
+ ti = context["ti"]
196
+ ti.xcom_push(key="job", value=event["job"])
197
+ if event["status"] == "error":
198
+ raise AirflowException(event["message"])
199
+
138
200
  @staticmethod
139
201
  def deserialize_job_template_file(path: str) -> k8s.V1Job:
140
202
  """
@@ -163,6 +225,7 @@ class KubernetesJobOperator(KubernetesPodOperator):
163
225
  kwargs = {
164
226
  "name": job.metadata.name,
165
227
  "namespace": job.metadata.namespace,
228
+ "job": self.hook.batch_v1_client.api_client.sanitize_for_serialization(self.job),
166
229
  }
167
230
  if self.termination_grace_period is not None:
168
231
  kwargs.update(grace_period_seconds=self.termination_grace_period)
@@ -284,3 +347,179 @@ class KubernetesJobOperator(KubernetesPodOperator):
284
347
  return merge_objects(base_spec, client_spec)
285
348
 
286
349
  return None
350
+
351
+
352
+ class KubernetesDeleteJobOperator(BaseOperator):
353
+ """
354
+ Delete a Kubernetes Job.
355
+
356
+ .. seealso::
357
+ For more information on how to use this operator, take a look at the guide:
358
+ :ref:`howto/operator:KubernetesDeleteJobOperator`
359
+
360
+ :param name: name of the Job.
361
+ :param namespace: the namespace to run within kubernetes.
362
+ :param kubernetes_conn_id: The :ref:`kubernetes connection id <howto/connection:kubernetes>`
363
+ for the Kubernetes cluster.
364
+ :param config_file: The path to the Kubernetes config file. (templated)
365
+ If not specified, default value is ``~/.kube/config``
366
+ :param in_cluster: run kubernetes client with in_cluster configuration.
367
+ :param cluster_context: context that points to kubernetes cluster.
368
+ Ignored when in_cluster is True. If None, current-context is used. (templated)
369
+ :param delete_on_status: Condition for performing delete operation depending on the job status. Values:
370
+ ``None`` - delete the job regardless of its status, "Complete" - delete only successfully completed
371
+ jobs, "Failed" - delete only failed jobs. (default: ``None``)
372
+ :param wait_for_completion: Whether to wait for the job to complete. (default: ``False``)
373
+ :param poll_interval: Interval in seconds between polling the job status. Used when the `delete_on_status`
374
+ parameter is set. (default: 10.0)
375
+ """
376
+
377
+ template_fields: Sequence[str] = (
378
+ "config_file",
379
+ "name",
380
+ "namespace",
381
+ "cluster_context",
382
+ )
383
+
384
+ def __init__(
385
+ self,
386
+ *,
387
+ name: str,
388
+ namespace: str,
389
+ kubernetes_conn_id: str | None = KubernetesHook.default_conn_name,
390
+ config_file: str | None = None,
391
+ in_cluster: bool | None = None,
392
+ cluster_context: str | None = None,
393
+ delete_on_status: str | None = None,
394
+ wait_for_completion: bool = False,
395
+ poll_interval: float = 10.0,
396
+ **kwargs,
397
+ ) -> None:
398
+ super().__init__(**kwargs)
399
+ self.name = name
400
+ self.namespace = namespace
401
+ self.kubernetes_conn_id = kubernetes_conn_id
402
+ self.config_file = config_file
403
+ self.in_cluster = in_cluster
404
+ self.cluster_context = cluster_context
405
+ self.delete_on_status = delete_on_status
406
+ self.wait_for_completion = wait_for_completion
407
+ self.poll_interval = poll_interval
408
+
409
+ @cached_property
410
+ def hook(self) -> KubernetesHook:
411
+ return KubernetesHook(
412
+ conn_id=self.kubernetes_conn_id,
413
+ in_cluster=self.in_cluster,
414
+ config_file=self.config_file,
415
+ cluster_context=self.cluster_context,
416
+ )
417
+
418
+ @cached_property
419
+ def client(self) -> BatchV1Api:
420
+ return self.hook.batch_v1_client
421
+
422
+ def execute(self, context: Context):
423
+ try:
424
+ if self.delete_on_status not in ("Complete", "Failed", None):
425
+ raise AirflowException(
426
+ "The `delete_on_status` parameter must be one of 'Complete', 'Failed' or None. "
427
+ "The current value is %s",
428
+ str(self.delete_on_status),
429
+ )
430
+
431
+ if self.wait_for_completion:
432
+ job = self.hook.wait_until_job_complete(
433
+ job_name=self.name, namespace=self.namespace, job_poll_interval=self.poll_interval
434
+ )
435
+ else:
436
+ job = self.hook.get_job_status(job_name=self.name, namespace=self.namespace)
437
+
438
+ if (
439
+ self.delete_on_status is None
440
+ or (self.delete_on_status == "Complete" and self.hook.is_job_successful(job=job))
441
+ or (self.delete_on_status == "Failed" and self.hook.is_job_failed(job=job))
442
+ ):
443
+ self.log.info("Deleting kubernetes Job: %s", self.name)
444
+ self.client.delete_namespaced_job(name=self.name, namespace=self.namespace)
445
+ self.log.info("Kubernetes job was deleted.")
446
+ else:
447
+ self.log.info(
448
+ "Deletion of the job %s was skipped due to settings of on_status=%s",
449
+ self.name,
450
+ self.delete_on_status,
451
+ )
452
+ except ApiException as e:
453
+ if e.status == 404:
454
+ self.log.info("The Kubernetes job %s does not exist.", self.name)
455
+ else:
456
+ raise e
457
+
458
+
459
+ class KubernetesPatchJobOperator(BaseOperator):
460
+ """
461
+ Update a Kubernetes Job.
462
+
463
+ .. seealso::
464
+ For more information on how to use this operator, take a look at the guide:
465
+ :ref:`howto/operator:KubernetesPatchJobOperator`
466
+
467
+ :param name: name of the Job
468
+ :param namespace: the namespace to run within kubernetes
469
+ :param body: Job json object with parameters for update
470
+ https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#job-v1-batch
471
+ e.g. ``{"spec": {"suspend": True}}``
472
+ :param kubernetes_conn_id: The :ref:`kubernetes connection id <howto/connection:kubernetes>`
473
+ for the Kubernetes cluster.
474
+ :param config_file: The path to the Kubernetes config file. (templated)
475
+ If not specified, default value is ``~/.kube/config``
476
+ :param in_cluster: run kubernetes client with in_cluster configuration.
477
+ :param cluster_context: context that points to kubernetes cluster.
478
+ Ignored when in_cluster is True. If None, current-context is used. (templated)
479
+ """
480
+
481
+ template_fields: Sequence[str] = (
482
+ "config_file",
483
+ "name",
484
+ "namespace",
485
+ "body",
486
+ "cluster_context",
487
+ )
488
+
489
+ def __init__(
490
+ self,
491
+ *,
492
+ name: str,
493
+ namespace: str,
494
+ body: object,
495
+ kubernetes_conn_id: str | None = KubernetesHook.default_conn_name,
496
+ config_file: str | None = None,
497
+ in_cluster: bool | None = None,
498
+ cluster_context: str | None = None,
499
+ **kwargs,
500
+ ) -> None:
501
+ super().__init__(**kwargs)
502
+ self.name = name
503
+ self.namespace = namespace
504
+ self.body = body
505
+ self.kubernetes_conn_id = kubernetes_conn_id
506
+ self.config_file = config_file
507
+ self.in_cluster = in_cluster
508
+ self.cluster_context = cluster_context
509
+
510
+ @cached_property
511
+ def hook(self) -> KubernetesHook:
512
+ return KubernetesHook(
513
+ conn_id=self.kubernetes_conn_id,
514
+ in_cluster=self.in_cluster,
515
+ config_file=self.config_file,
516
+ cluster_context=self.cluster_context,
517
+ )
518
+
519
+ def execute(self, context: Context) -> dict:
520
+ self.log.info("Updating existing Job: %s", self.name)
521
+ job_object = self.hook.patch_namespaced_job(
522
+ job_name=self.name, namespace=self.namespace, body=self.body
523
+ )
524
+ self.log.info("Job was updated.")
525
+ return k8s.V1Job.to_dict(job_object)
@@ -16,6 +16,7 @@
16
16
  # specific language governing permissions and limitations
17
17
  # under the License.
18
18
  """This module is deprecated. Please use :mod:`airflow.providers.cncf.kubernetes.operators.pod` instead."""
19
+
19
20
  from __future__ import annotations
20
21
 
21
22
  import warnings