apache-airflow-providers-cncf-kubernetes 3.1.0__py3-none-any.whl → 10.10.0rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- airflow/providers/cncf/kubernetes/__init__.py +18 -23
- airflow/providers/cncf/kubernetes/backcompat/__init__.py +17 -0
- airflow/providers/cncf/kubernetes/backcompat/backwards_compat_converters.py +31 -49
- airflow/providers/cncf/kubernetes/callbacks.py +200 -0
- airflow/providers/cncf/kubernetes/cli/__init__.py +16 -0
- airflow/providers/cncf/kubernetes/cli/kubernetes_command.py +195 -0
- airflow/providers/cncf/kubernetes/decorators/kubernetes.py +163 -0
- airflow/providers/cncf/kubernetes/decorators/kubernetes_cmd.py +118 -0
- airflow/providers/cncf/kubernetes/exceptions.py +37 -0
- airflow/providers/cncf/kubernetes/executors/__init__.py +17 -0
- airflow/providers/cncf/kubernetes/executors/kubernetes_executor.py +831 -0
- airflow/providers/cncf/kubernetes/executors/kubernetes_executor_types.py +91 -0
- airflow/providers/cncf/kubernetes/executors/kubernetes_executor_utils.py +736 -0
- airflow/providers/cncf/kubernetes/executors/local_kubernetes_executor.py +306 -0
- airflow/providers/cncf/kubernetes/get_provider_info.py +249 -50
- airflow/providers/cncf/kubernetes/hooks/kubernetes.py +846 -112
- airflow/providers/cncf/kubernetes/k8s_model.py +62 -0
- airflow/providers/cncf/kubernetes/kube_client.py +156 -0
- airflow/providers/cncf/kubernetes/kube_config.py +125 -0
- airflow/providers/cncf/kubernetes/kubernetes_executor_templates/__init__.py +16 -0
- airflow/providers/cncf/kubernetes/kubernetes_executor_templates/basic_template.yaml +79 -0
- airflow/providers/cncf/kubernetes/kubernetes_helper_functions.py +165 -0
- airflow/providers/cncf/kubernetes/operators/custom_object_launcher.py +368 -0
- airflow/providers/cncf/kubernetes/operators/job.py +646 -0
- airflow/providers/cncf/kubernetes/operators/kueue.py +132 -0
- airflow/providers/cncf/kubernetes/operators/pod.py +1417 -0
- airflow/providers/cncf/kubernetes/operators/resource.py +191 -0
- airflow/providers/cncf/kubernetes/operators/spark_kubernetes.py +336 -35
- airflow/providers/cncf/kubernetes/pod_generator.py +592 -0
- airflow/providers/cncf/kubernetes/pod_template_file_examples/__init__.py +16 -0
- airflow/providers/cncf/kubernetes/pod_template_file_examples/dags_in_image_template.yaml +68 -0
- airflow/providers/cncf/kubernetes/pod_template_file_examples/dags_in_volume_template.yaml +74 -0
- airflow/providers/cncf/kubernetes/pod_template_file_examples/git_sync_template.yaml +95 -0
- airflow/providers/cncf/kubernetes/python_kubernetes_script.jinja2 +51 -0
- airflow/providers/cncf/kubernetes/python_kubernetes_script.py +92 -0
- airflow/providers/cncf/kubernetes/resource_convert/__init__.py +16 -0
- airflow/providers/cncf/kubernetes/resource_convert/configmap.py +52 -0
- airflow/providers/cncf/kubernetes/resource_convert/env_variable.py +39 -0
- airflow/providers/cncf/kubernetes/resource_convert/secret.py +40 -0
- airflow/providers/cncf/kubernetes/secret.py +128 -0
- airflow/providers/cncf/kubernetes/sensors/spark_kubernetes.py +30 -14
- airflow/providers/cncf/kubernetes/template_rendering.py +81 -0
- airflow/providers/cncf/kubernetes/triggers/__init__.py +16 -0
- airflow/providers/cncf/kubernetes/triggers/job.py +176 -0
- airflow/providers/cncf/kubernetes/triggers/pod.py +344 -0
- airflow/providers/cncf/kubernetes/utils/__init__.py +3 -0
- airflow/providers/cncf/kubernetes/utils/container.py +118 -0
- airflow/providers/cncf/kubernetes/utils/delete_from.py +154 -0
- airflow/providers/cncf/kubernetes/utils/k8s_resource_iterator.py +46 -0
- airflow/providers/cncf/kubernetes/utils/pod_manager.py +887 -152
- airflow/providers/cncf/kubernetes/utils/xcom_sidecar.py +25 -16
- airflow/providers/cncf/kubernetes/version_compat.py +38 -0
- apache_airflow_providers_cncf_kubernetes-10.10.0rc1.dist-info/METADATA +125 -0
- apache_airflow_providers_cncf_kubernetes-10.10.0rc1.dist-info/RECORD +62 -0
- {apache_airflow_providers_cncf_kubernetes-3.1.0.dist-info → apache_airflow_providers_cncf_kubernetes-10.10.0rc1.dist-info}/WHEEL +1 -2
- apache_airflow_providers_cncf_kubernetes-10.10.0rc1.dist-info/entry_points.txt +3 -0
- apache_airflow_providers_cncf_kubernetes-10.10.0rc1.dist-info/licenses/NOTICE +5 -0
- airflow/providers/cncf/kubernetes/backcompat/pod.py +0 -119
- airflow/providers/cncf/kubernetes/backcompat/pod_runtime_info_env.py +0 -56
- airflow/providers/cncf/kubernetes/backcompat/volume.py +0 -62
- airflow/providers/cncf/kubernetes/backcompat/volume_mount.py +0 -58
- airflow/providers/cncf/kubernetes/example_dags/example_kubernetes.py +0 -163
- airflow/providers/cncf/kubernetes/example_dags/example_spark_kubernetes.py +0 -66
- airflow/providers/cncf/kubernetes/example_dags/example_spark_kubernetes_spark_pi.yaml +0 -57
- airflow/providers/cncf/kubernetes/operators/kubernetes_pod.py +0 -622
- apache_airflow_providers_cncf_kubernetes-3.1.0.dist-info/METADATA +0 -452
- apache_airflow_providers_cncf_kubernetes-3.1.0.dist-info/NOTICE +0 -6
- apache_airflow_providers_cncf_kubernetes-3.1.0.dist-info/RECORD +0 -29
- apache_airflow_providers_cncf_kubernetes-3.1.0.dist-info/entry_points.txt +0 -3
- apache_airflow_providers_cncf_kubernetes-3.1.0.dist-info/top_level.txt +0 -1
- /airflow/providers/cncf/kubernetes/{example_dags → decorators}/__init__.py +0 -0
- {apache_airflow_providers_cncf_kubernetes-3.1.0.dist-info → apache_airflow_providers_cncf_kubernetes-10.10.0rc1.dist-info/licenses}/LICENSE +0 -0
|
@@ -15,13 +15,17 @@
|
|
|
15
15
|
# KIND, either express or implied. See the License for the
|
|
16
16
|
# specific language governing permissions and limitations
|
|
17
17
|
# under the License.
|
|
18
|
-
from
|
|
18
|
+
from __future__ import annotations
|
|
19
|
+
|
|
20
|
+
from collections.abc import Sequence
|
|
21
|
+
from functools import cached_property
|
|
22
|
+
from typing import TYPE_CHECKING
|
|
19
23
|
|
|
20
24
|
from kubernetes import client
|
|
21
25
|
|
|
22
26
|
from airflow.exceptions import AirflowException
|
|
23
27
|
from airflow.providers.cncf.kubernetes.hooks.kubernetes import KubernetesHook
|
|
24
|
-
from airflow.
|
|
28
|
+
from airflow.providers.common.compat.sdk import BaseSensorOperator
|
|
25
29
|
|
|
26
30
|
if TYPE_CHECKING:
|
|
27
31
|
from airflow.utils.context import Context
|
|
@@ -29,7 +33,7 @@ if TYPE_CHECKING:
|
|
|
29
33
|
|
|
30
34
|
class SparkKubernetesSensor(BaseSensorOperator):
|
|
31
35
|
"""
|
|
32
|
-
Checks sparkApplication object in kubernetes cluster
|
|
36
|
+
Checks sparkApplication object in kubernetes cluster.
|
|
33
37
|
|
|
34
38
|
.. seealso::
|
|
35
39
|
For more detail about Spark Application Object have a look at the reference:
|
|
@@ -37,6 +41,7 @@ class SparkKubernetesSensor(BaseSensorOperator):
|
|
|
37
41
|
|
|
38
42
|
:param application_name: spark Application resource name
|
|
39
43
|
:param namespace: the kubernetes namespace where the sparkApplication reside in
|
|
44
|
+
:param container_name: the kubernetes container name where the sparkApplication reside in
|
|
40
45
|
:param kubernetes_conn_id: The :ref:`kubernetes connection<howto/connection:kubernetes>`
|
|
41
46
|
to Kubernetes cluster.
|
|
42
47
|
:param attach_log: determines whether logs for driver pod should be appended to the sensor log
|
|
@@ -53,21 +58,26 @@ class SparkKubernetesSensor(BaseSensorOperator):
|
|
|
53
58
|
*,
|
|
54
59
|
application_name: str,
|
|
55
60
|
attach_log: bool = False,
|
|
56
|
-
namespace:
|
|
61
|
+
namespace: str | None = None,
|
|
62
|
+
container_name: str = "spark-kubernetes-driver",
|
|
57
63
|
kubernetes_conn_id: str = "kubernetes_default",
|
|
58
|
-
api_group: str =
|
|
59
|
-
api_version: str =
|
|
64
|
+
api_group: str = "sparkoperator.k8s.io",
|
|
65
|
+
api_version: str = "v1beta2",
|
|
60
66
|
**kwargs,
|
|
61
67
|
) -> None:
|
|
62
68
|
super().__init__(**kwargs)
|
|
63
69
|
self.application_name = application_name
|
|
64
70
|
self.attach_log = attach_log
|
|
65
71
|
self.namespace = namespace
|
|
72
|
+
self.container_name = container_name
|
|
66
73
|
self.kubernetes_conn_id = kubernetes_conn_id
|
|
67
|
-
self.hook = KubernetesHook(conn_id=self.kubernetes_conn_id)
|
|
68
74
|
self.api_group = api_group
|
|
69
75
|
self.api_version = api_version
|
|
70
76
|
|
|
77
|
+
@cached_property
|
|
78
|
+
def hook(self) -> KubernetesHook:
|
|
79
|
+
return KubernetesHook(conn_id=self.kubernetes_conn_id)
|
|
80
|
+
|
|
71
81
|
def _log_driver(self, application_state: str, response: dict) -> None:
|
|
72
82
|
if not self.attach_log:
|
|
73
83
|
return
|
|
@@ -82,7 +92,9 @@ class SparkKubernetesSensor(BaseSensorOperator):
|
|
|
82
92
|
log_method = self.log.error if application_state in self.FAILURE_STATES else self.log.info
|
|
83
93
|
try:
|
|
84
94
|
log = ""
|
|
85
|
-
for line in self.hook.get_pod_logs(
|
|
95
|
+
for line in self.hook.get_pod_logs(
|
|
96
|
+
driver_pod_name, namespace=namespace, container=self.container_name
|
|
97
|
+
):
|
|
86
98
|
log += line.decode()
|
|
87
99
|
log_method(log)
|
|
88
100
|
except client.rest.ApiException as e:
|
|
@@ -94,8 +106,9 @@ class SparkKubernetesSensor(BaseSensorOperator):
|
|
|
94
106
|
e,
|
|
95
107
|
)
|
|
96
108
|
|
|
97
|
-
def poke(self, context:
|
|
109
|
+
def poke(self, context: Context) -> bool:
|
|
98
110
|
self.log.info("Poking: %s", self.application_name)
|
|
111
|
+
|
|
99
112
|
response = self.hook.get_custom_object(
|
|
100
113
|
group=self.api_group,
|
|
101
114
|
version=self.api_version,
|
|
@@ -103,17 +116,20 @@ class SparkKubernetesSensor(BaseSensorOperator):
|
|
|
103
116
|
name=self.application_name,
|
|
104
117
|
namespace=self.namespace,
|
|
105
118
|
)
|
|
119
|
+
|
|
106
120
|
try:
|
|
107
121
|
application_state = response["status"]["applicationState"]["state"]
|
|
108
122
|
except KeyError:
|
|
109
123
|
return False
|
|
124
|
+
|
|
110
125
|
if self.attach_log and application_state in self.FAILURE_STATES + self.SUCCESS_STATES:
|
|
111
126
|
self._log_driver(application_state, response)
|
|
127
|
+
|
|
112
128
|
if application_state in self.FAILURE_STATES:
|
|
113
|
-
|
|
114
|
-
|
|
129
|
+
message = f"Spark application failed with state: {application_state}"
|
|
130
|
+
raise AirflowException(message)
|
|
131
|
+
if application_state in self.SUCCESS_STATES:
|
|
115
132
|
self.log.info("Spark application ended successfully")
|
|
116
133
|
return True
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
return False
|
|
134
|
+
self.log.info("Spark application is still in state: %s", application_state)
|
|
135
|
+
return False
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
# Licensed to the Apache Software Foundation (ASF) under one
|
|
2
|
+
# or more contributor license agreements. See the NOTICE file
|
|
3
|
+
# distributed with this work for additional information
|
|
4
|
+
# regarding copyright ownership. The ASF licenses this file
|
|
5
|
+
# to you under the Apache License, Version 2.0 (the
|
|
6
|
+
# "License"); you may not use this file except in compliance
|
|
7
|
+
# with the License. You may obtain a copy of the License at
|
|
8
|
+
#
|
|
9
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
10
|
+
#
|
|
11
|
+
# Unless required by applicable law or agreed to in writing,
|
|
12
|
+
# software distributed under the License is distributed on an
|
|
13
|
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
14
|
+
# KIND, either express or implied. See the License for the
|
|
15
|
+
# specific language governing permissions and limitations
|
|
16
|
+
# under the License.
|
|
17
|
+
|
|
18
|
+
from __future__ import annotations
|
|
19
|
+
|
|
20
|
+
from typing import TYPE_CHECKING
|
|
21
|
+
|
|
22
|
+
from jinja2 import TemplateAssertionError, UndefinedError
|
|
23
|
+
from kubernetes.client.api_client import ApiClient
|
|
24
|
+
|
|
25
|
+
from airflow.exceptions import AirflowException
|
|
26
|
+
from airflow.providers.cncf.kubernetes.kube_config import KubeConfig
|
|
27
|
+
from airflow.providers.cncf.kubernetes.kubernetes_helper_functions import create_unique_id
|
|
28
|
+
from airflow.providers.cncf.kubernetes.pod_generator import PodGenerator, generate_pod_command_args
|
|
29
|
+
from airflow.utils.session import NEW_SESSION, provide_session
|
|
30
|
+
|
|
31
|
+
if TYPE_CHECKING:
|
|
32
|
+
from airflow.models.taskinstance import TaskInstance
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def render_k8s_pod_yaml(task_instance: TaskInstance) -> dict | None:
|
|
36
|
+
"""Render k8s pod yaml."""
|
|
37
|
+
kube_config = KubeConfig()
|
|
38
|
+
if task_instance.executor_config and task_instance.executor_config.get("pod_template_file"):
|
|
39
|
+
# If a specific pod_template_file was passed to the executor, we make
|
|
40
|
+
# sure to render the k8s pod spec using this one, and not the default one.
|
|
41
|
+
pod_template_file = task_instance.executor_config["pod_template_file"]
|
|
42
|
+
else:
|
|
43
|
+
# If no such pod_template_file override was passed, we can simply render
|
|
44
|
+
# The pod spec using the default template.
|
|
45
|
+
pod_template_file = kube_config.pod_template_file
|
|
46
|
+
|
|
47
|
+
# Generate command args using shared utility function
|
|
48
|
+
command_args = generate_pod_command_args(task_instance)
|
|
49
|
+
|
|
50
|
+
pod = PodGenerator.construct_pod(
|
|
51
|
+
dag_id=task_instance.dag_id,
|
|
52
|
+
run_id=task_instance.run_id,
|
|
53
|
+
task_id=task_instance.task_id,
|
|
54
|
+
map_index=task_instance.map_index,
|
|
55
|
+
date=None,
|
|
56
|
+
pod_id=create_unique_id(task_instance.dag_id, task_instance.task_id),
|
|
57
|
+
try_number=task_instance.try_number,
|
|
58
|
+
kube_image=kube_config.kube_image,
|
|
59
|
+
args=command_args,
|
|
60
|
+
pod_override_object=PodGenerator.from_obj(task_instance.executor_config),
|
|
61
|
+
scheduler_job_id="0",
|
|
62
|
+
namespace=kube_config.executor_namespace,
|
|
63
|
+
base_worker_pod=PodGenerator.deserialize_model_file(pod_template_file),
|
|
64
|
+
with_mutation_hook=True,
|
|
65
|
+
)
|
|
66
|
+
sanitized_pod = ApiClient().sanitize_for_serialization(pod)
|
|
67
|
+
return sanitized_pod
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
@provide_session
|
|
71
|
+
def get_rendered_k8s_spec(task_instance: TaskInstance, session=NEW_SESSION) -> dict | None:
|
|
72
|
+
"""Fetch rendered template fields from DB."""
|
|
73
|
+
from airflow.models.renderedtifields import RenderedTaskInstanceFields
|
|
74
|
+
|
|
75
|
+
rendered_k8s_spec = RenderedTaskInstanceFields.get_k8s_pod_yaml(task_instance, session=session)
|
|
76
|
+
if not rendered_k8s_spec:
|
|
77
|
+
try:
|
|
78
|
+
rendered_k8s_spec = render_k8s_pod_yaml(task_instance)
|
|
79
|
+
except (TemplateAssertionError, UndefinedError) as e:
|
|
80
|
+
raise AirflowException(f"Unable to render a k8s spec for this taskinstance: {e}") from e
|
|
81
|
+
return rendered_k8s_spec
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
# Licensed to the Apache Software Foundation (ASF) under one
|
|
2
|
+
# or more contributor license agreements. See the NOTICE file
|
|
3
|
+
# distributed with this work for additional information
|
|
4
|
+
# regarding copyright ownership. The ASF licenses this file
|
|
5
|
+
# to you under the Apache License, Version 2.0 (the
|
|
6
|
+
# "License"); you may not use this file except in compliance
|
|
7
|
+
# with the License. You may obtain a copy of the License at
|
|
8
|
+
#
|
|
9
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
10
|
+
#
|
|
11
|
+
# Unless required by applicable law or agreed to in writing,
|
|
12
|
+
# software distributed under the License is distributed on an
|
|
13
|
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
14
|
+
# KIND, either express or implied. See the License for the
|
|
15
|
+
# specific language governing permissions and limitations
|
|
16
|
+
# under the License.
|
|
@@ -0,0 +1,176 @@
|
|
|
1
|
+
# Licensed to the Apache Software Foundation (ASF) under one
|
|
2
|
+
# or more contributor license agreements. See the NOTICE file
|
|
3
|
+
# distributed with this work for additional information
|
|
4
|
+
# regarding copyright ownership. The ASF licenses this file
|
|
5
|
+
# to you under the Apache License, Version 2.0 (the
|
|
6
|
+
# "License"); you may not use this file except in compliance
|
|
7
|
+
# with the License. You may obtain a copy of the License at
|
|
8
|
+
#
|
|
9
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
10
|
+
#
|
|
11
|
+
# Unless required by applicable law or agreed to in writing,
|
|
12
|
+
# software distributed under the License is distributed on an
|
|
13
|
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
14
|
+
# KIND, either express or implied. See the License for the
|
|
15
|
+
# specific language governing permissions and limitations
|
|
16
|
+
# under the License.
|
|
17
|
+
from __future__ import annotations
|
|
18
|
+
|
|
19
|
+
import asyncio
|
|
20
|
+
import warnings
|
|
21
|
+
from collections.abc import AsyncIterator
|
|
22
|
+
from functools import cached_property
|
|
23
|
+
from typing import TYPE_CHECKING, Any
|
|
24
|
+
|
|
25
|
+
from airflow.exceptions import AirflowProviderDeprecationWarning
|
|
26
|
+
from airflow.providers.cncf.kubernetes.hooks.kubernetes import AsyncKubernetesHook, KubernetesHook
|
|
27
|
+
from airflow.providers.cncf.kubernetes.utils.pod_manager import PodManager
|
|
28
|
+
from airflow.providers.cncf.kubernetes.utils.xcom_sidecar import PodDefaults
|
|
29
|
+
from airflow.triggers.base import BaseTrigger, TriggerEvent
|
|
30
|
+
|
|
31
|
+
if TYPE_CHECKING:
|
|
32
|
+
from kubernetes.client import V1Job
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class KubernetesJobTrigger(BaseTrigger):
|
|
36
|
+
"""
|
|
37
|
+
KubernetesJobTrigger run on the trigger worker to check the state of Job.
|
|
38
|
+
|
|
39
|
+
:param job_name: The name of the job.
|
|
40
|
+
:param job_namespace: The namespace of the job.
|
|
41
|
+
:param pod_name: The name of the Pod. Parameter is deprecated, please use pod_names instead.
|
|
42
|
+
:param pod_names: The name of the Pods.
|
|
43
|
+
:param pod_namespace: The namespace of the Pod.
|
|
44
|
+
:param base_container_name: The name of the base container in the pod.
|
|
45
|
+
:param kubernetes_conn_id: The :ref:`kubernetes connection id <howto/connection:kubernetes>`
|
|
46
|
+
for the Kubernetes cluster.
|
|
47
|
+
:param cluster_context: Context that points to kubernetes cluster.
|
|
48
|
+
:param config_file: Path to kubeconfig file.
|
|
49
|
+
:param poll_interval: Polling period in seconds to check for the status.
|
|
50
|
+
:param in_cluster: run kubernetes client with in_cluster configuration.
|
|
51
|
+
:param get_logs: get the stdout of the base container as logs of the tasks.
|
|
52
|
+
:param do_xcom_push: If True, the content of the file
|
|
53
|
+
/airflow/xcom/return.json in the container will also be pushed to an
|
|
54
|
+
XCom when the container completes.
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
def __init__(
|
|
58
|
+
self,
|
|
59
|
+
job_name: str,
|
|
60
|
+
job_namespace: str,
|
|
61
|
+
pod_names: list[str],
|
|
62
|
+
pod_namespace: str,
|
|
63
|
+
base_container_name: str,
|
|
64
|
+
pod_name: str | None = None,
|
|
65
|
+
kubernetes_conn_id: str | None = None,
|
|
66
|
+
poll_interval: float = 10.0,
|
|
67
|
+
cluster_context: str | None = None,
|
|
68
|
+
config_file: str | None = None,
|
|
69
|
+
in_cluster: bool | None = None,
|
|
70
|
+
get_logs: bool = True,
|
|
71
|
+
do_xcom_push: bool = False,
|
|
72
|
+
):
|
|
73
|
+
super().__init__()
|
|
74
|
+
self.job_name = job_name
|
|
75
|
+
self.job_namespace = job_namespace
|
|
76
|
+
if pod_name is not None:
|
|
77
|
+
self._pod_name = pod_name
|
|
78
|
+
self.pod_names = [
|
|
79
|
+
self.pod_name,
|
|
80
|
+
]
|
|
81
|
+
else:
|
|
82
|
+
self.pod_names = pod_names
|
|
83
|
+
self.pod_namespace = pod_namespace
|
|
84
|
+
self.base_container_name = base_container_name
|
|
85
|
+
self.kubernetes_conn_id = kubernetes_conn_id
|
|
86
|
+
self.poll_interval = poll_interval
|
|
87
|
+
self.cluster_context = cluster_context
|
|
88
|
+
self.config_file = config_file
|
|
89
|
+
self.in_cluster = in_cluster
|
|
90
|
+
self.get_logs = get_logs
|
|
91
|
+
self.do_xcom_push = do_xcom_push
|
|
92
|
+
|
|
93
|
+
@property
|
|
94
|
+
def pod_name(self):
|
|
95
|
+
warnings.warn(
|
|
96
|
+
"`pod_name` parameter is deprecated, please use `pod_names`",
|
|
97
|
+
AirflowProviderDeprecationWarning,
|
|
98
|
+
stacklevel=2,
|
|
99
|
+
)
|
|
100
|
+
return self._pod_name
|
|
101
|
+
|
|
102
|
+
def serialize(self) -> tuple[str, dict[str, Any]]:
|
|
103
|
+
"""Serialize KubernetesCreateJobTrigger arguments and classpath."""
|
|
104
|
+
return (
|
|
105
|
+
"airflow.providers.cncf.kubernetes.triggers.job.KubernetesJobTrigger",
|
|
106
|
+
{
|
|
107
|
+
"job_name": self.job_name,
|
|
108
|
+
"job_namespace": self.job_namespace,
|
|
109
|
+
"pod_names": self.pod_names,
|
|
110
|
+
"pod_namespace": self.pod_namespace,
|
|
111
|
+
"base_container_name": self.base_container_name,
|
|
112
|
+
"kubernetes_conn_id": self.kubernetes_conn_id,
|
|
113
|
+
"poll_interval": self.poll_interval,
|
|
114
|
+
"cluster_context": self.cluster_context,
|
|
115
|
+
"config_file": self.config_file,
|
|
116
|
+
"in_cluster": self.in_cluster,
|
|
117
|
+
"get_logs": self.get_logs,
|
|
118
|
+
"do_xcom_push": self.do_xcom_push,
|
|
119
|
+
},
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
async def run(self) -> AsyncIterator[TriggerEvent]:
|
|
123
|
+
"""Get current job status and yield a TriggerEvent."""
|
|
124
|
+
if self.do_xcom_push:
|
|
125
|
+
xcom_results = []
|
|
126
|
+
for pod_name in self.pod_names:
|
|
127
|
+
pod = await self.hook.get_pod(name=pod_name, namespace=self.pod_namespace)
|
|
128
|
+
await self.hook.wait_until_container_complete(
|
|
129
|
+
name=pod_name, namespace=self.pod_namespace, container_name=self.base_container_name
|
|
130
|
+
)
|
|
131
|
+
self.log.info("Checking if xcom sidecar container is started.")
|
|
132
|
+
await self.hook.wait_until_container_started(
|
|
133
|
+
name=pod_name,
|
|
134
|
+
namespace=self.pod_namespace,
|
|
135
|
+
container_name=PodDefaults.SIDECAR_CONTAINER_NAME,
|
|
136
|
+
)
|
|
137
|
+
self.log.info("Extracting result from xcom sidecar container.")
|
|
138
|
+
loop = asyncio.get_running_loop()
|
|
139
|
+
xcom_result = await loop.run_in_executor(None, self.pod_manager.extract_xcom, pod)
|
|
140
|
+
xcom_results.append(xcom_result)
|
|
141
|
+
job: V1Job = await self.hook.wait_until_job_complete(name=self.job_name, namespace=self.job_namespace)
|
|
142
|
+
job_dict = job.to_dict()
|
|
143
|
+
error_message = self.hook.is_job_failed(job=job)
|
|
144
|
+
yield TriggerEvent(
|
|
145
|
+
{
|
|
146
|
+
"name": job.metadata.name,
|
|
147
|
+
"namespace": job.metadata.namespace,
|
|
148
|
+
"pod_names": [pod_name for pod_name in self.pod_names] if self.get_logs else None,
|
|
149
|
+
"pod_namespace": self.pod_namespace if self.get_logs else None,
|
|
150
|
+
"status": "error" if error_message else "success",
|
|
151
|
+
"message": f"Job failed with error: {error_message}"
|
|
152
|
+
if error_message
|
|
153
|
+
else "Job completed successfully",
|
|
154
|
+
"job": job_dict,
|
|
155
|
+
"xcom_result": xcom_results if self.do_xcom_push else None,
|
|
156
|
+
}
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
@cached_property
|
|
160
|
+
def hook(self) -> AsyncKubernetesHook:
|
|
161
|
+
return AsyncKubernetesHook(
|
|
162
|
+
conn_id=self.kubernetes_conn_id,
|
|
163
|
+
in_cluster=self.in_cluster,
|
|
164
|
+
config_file=self.config_file,
|
|
165
|
+
cluster_context=self.cluster_context,
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
@cached_property
|
|
169
|
+
def pod_manager(self) -> PodManager:
|
|
170
|
+
sync_hook = KubernetesHook(
|
|
171
|
+
conn_id=self.kubernetes_conn_id,
|
|
172
|
+
in_cluster=self.in_cluster,
|
|
173
|
+
config_file=self.config_file,
|
|
174
|
+
cluster_context=self.cluster_context,
|
|
175
|
+
)
|
|
176
|
+
return PodManager(kube_client=sync_hook.core_v1_client)
|