apache-airflow-providers-cncf-kubernetes 7.13.0rc1__py3-none-any.whl → 7.14.0rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of apache-airflow-providers-cncf-kubernetes might be problematic. Click here for more details.
- airflow/providers/cncf/kubernetes/__init__.py +1 -1
- airflow/providers/cncf/kubernetes/callbacks.py +111 -0
- airflow/providers/cncf/kubernetes/get_provider_info.py +4 -2
- airflow/providers/cncf/kubernetes/hooks/kubernetes.py +4 -4
- airflow/providers/cncf/kubernetes/operators/custom_object_launcher.py +367 -0
- airflow/providers/cncf/kubernetes/operators/pod.py +74 -13
- airflow/providers/cncf/kubernetes/operators/spark_kubernetes.py +221 -136
- airflow/providers/cncf/kubernetes/pod_generator.py +13 -6
- airflow/providers/cncf/kubernetes/pod_launcher_deprecated.py +3 -3
- airflow/providers/cncf/kubernetes/resource_convert/__init__.py +16 -0
- airflow/providers/cncf/kubernetes/resource_convert/configmap.py +52 -0
- airflow/providers/cncf/kubernetes/resource_convert/env_variable.py +39 -0
- airflow/providers/cncf/kubernetes/resource_convert/secret.py +40 -0
- airflow/providers/cncf/kubernetes/utils/pod_manager.py +18 -4
- {apache_airflow_providers_cncf_kubernetes-7.13.0rc1.dist-info → apache_airflow_providers_cncf_kubernetes-7.14.0rc1.dist-info}/METADATA +6 -6
- {apache_airflow_providers_cncf_kubernetes-7.13.0rc1.dist-info → apache_airflow_providers_cncf_kubernetes-7.14.0rc1.dist-info}/RECORD +18 -12
- {apache_airflow_providers_cncf_kubernetes-7.13.0rc1.dist-info → apache_airflow_providers_cncf_kubernetes-7.14.0rc1.dist-info}/WHEEL +0 -0
- {apache_airflow_providers_cncf_kubernetes-7.13.0rc1.dist-info → apache_airflow_providers_cncf_kubernetes-7.14.0rc1.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
# Licensed to the Apache Software Foundation (ASF) under one
|
|
2
|
+
# or more contributor license agreements. See the NOTICE file
|
|
3
|
+
# distributed with this work for additional information
|
|
4
|
+
# regarding copyright ownership. The ASF licenses this file
|
|
5
|
+
# to you under the Apache License, Version 2.0 (the
|
|
6
|
+
# "License"); you may not use this file except in compliance
|
|
7
|
+
# with the License. You may obtain a copy of the License at
|
|
8
|
+
#
|
|
9
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
10
|
+
#
|
|
11
|
+
# Unless required by applicable law or agreed to in writing,
|
|
12
|
+
# software distributed under the License is distributed on an
|
|
13
|
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
14
|
+
# KIND, either express or implied. See the License for the
|
|
15
|
+
# specific language governing permissions and limitations
|
|
16
|
+
# under the License.
|
|
17
|
+
from __future__ import annotations
|
|
18
|
+
|
|
19
|
+
from enum import Enum
|
|
20
|
+
from typing import Union
|
|
21
|
+
|
|
22
|
+
import kubernetes.client as k8s
|
|
23
|
+
import kubernetes_asyncio.client as async_k8s
|
|
24
|
+
|
|
25
|
+
client_type = Union[k8s.CoreV1Api, async_k8s.CoreV1Api]
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class ExecutionMode(str, Enum):
|
|
29
|
+
"""Enum class for execution mode."""
|
|
30
|
+
|
|
31
|
+
SYNC = "sync"
|
|
32
|
+
ASYNC = "async"
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class KubernetesPodOperatorCallback:
|
|
36
|
+
"""`KubernetesPodOperator` callbacks methods.
|
|
37
|
+
|
|
38
|
+
Currently, the callbacks methods are not called in the async mode, this support will be added
|
|
39
|
+
in the future.
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
@staticmethod
|
|
43
|
+
def on_sync_client_creation(*, client: k8s.CoreV1Api, **kwargs) -> None:
|
|
44
|
+
"""Callback method called after creating the sync client.
|
|
45
|
+
|
|
46
|
+
:param client: the created `kubernetes.client.CoreV1Api` client.
|
|
47
|
+
"""
|
|
48
|
+
pass
|
|
49
|
+
|
|
50
|
+
@staticmethod
|
|
51
|
+
def on_pod_creation(*, pod: k8s.V1Pod, client: client_type, mode: str, **kwargs) -> None:
|
|
52
|
+
"""Callback method called after creating the pod.
|
|
53
|
+
|
|
54
|
+
:param pod: the created pod.
|
|
55
|
+
:param client: the Kubernetes client that can be used in the callback.
|
|
56
|
+
:param mode: the current execution mode, it's one of (`sync`, `async`).
|
|
57
|
+
"""
|
|
58
|
+
pass
|
|
59
|
+
|
|
60
|
+
@staticmethod
|
|
61
|
+
def on_pod_starting(*, pod: k8s.V1Pod, client: client_type, mode: str, **kwargs) -> None:
|
|
62
|
+
"""Callback method called when the pod starts.
|
|
63
|
+
|
|
64
|
+
:param pod: the started pod.
|
|
65
|
+
:param client: the Kubernetes client that can be used in the callback.
|
|
66
|
+
:param mode: the current execution mode, it's one of (`sync`, `async`).
|
|
67
|
+
"""
|
|
68
|
+
pass
|
|
69
|
+
|
|
70
|
+
@staticmethod
|
|
71
|
+
def on_pod_completion(*, pod: k8s.V1Pod, client: client_type, mode: str, **kwargs) -> None:
|
|
72
|
+
"""Callback method called when the pod completes.
|
|
73
|
+
|
|
74
|
+
:param pod: the completed pod.
|
|
75
|
+
:param client: the Kubernetes client that can be used in the callback.
|
|
76
|
+
:param mode: the current execution mode, it's one of (`sync`, `async`).
|
|
77
|
+
"""
|
|
78
|
+
pass
|
|
79
|
+
|
|
80
|
+
@staticmethod
|
|
81
|
+
def on_pod_cleanup(*, pod: k8s.V1Pod, client: client_type, mode: str, **kwargs):
|
|
82
|
+
"""Callback method called after cleaning/deleting the pod.
|
|
83
|
+
|
|
84
|
+
:param pod: the completed pod.
|
|
85
|
+
:param client: the Kubernetes client that can be used in the callback.
|
|
86
|
+
:param mode: the current execution mode, it's one of (`sync`, `async`).
|
|
87
|
+
"""
|
|
88
|
+
pass
|
|
89
|
+
|
|
90
|
+
@staticmethod
|
|
91
|
+
def on_operator_resuming(
|
|
92
|
+
*, pod: k8s.V1Pod, event: dict, client: client_type, mode: str, **kwargs
|
|
93
|
+
) -> None:
|
|
94
|
+
"""Callback method called when resuming the `KubernetesPodOperator` from deferred state.
|
|
95
|
+
|
|
96
|
+
:param pod: the current state of the pod.
|
|
97
|
+
:param event: the returned event from the Trigger.
|
|
98
|
+
:param client: the Kubernetes client that can be used in the callback.
|
|
99
|
+
:param mode: the current execution mode, it's one of (`sync`, `async`).
|
|
100
|
+
"""
|
|
101
|
+
pass
|
|
102
|
+
|
|
103
|
+
@staticmethod
|
|
104
|
+
def progress_callback(*, line: str, client: client_type, mode: str, **kwargs) -> None:
|
|
105
|
+
"""Callback method to process pod container logs.
|
|
106
|
+
|
|
107
|
+
:param line: the read line of log.
|
|
108
|
+
:param client: the Kubernetes client that can be used in the callback.
|
|
109
|
+
:param mode: the current execution mode, it's one of (`sync`, `async`).
|
|
110
|
+
"""
|
|
111
|
+
pass
|
|
@@ -27,9 +27,10 @@ def get_provider_info():
|
|
|
27
27
|
"package-name": "apache-airflow-providers-cncf-kubernetes",
|
|
28
28
|
"name": "Kubernetes",
|
|
29
29
|
"description": "`Kubernetes <https://kubernetes.io/>`__\n",
|
|
30
|
-
"
|
|
31
|
-
"source-date-epoch":
|
|
30
|
+
"state": "ready",
|
|
31
|
+
"source-date-epoch": 1705911981,
|
|
32
32
|
"versions": [
|
|
33
|
+
"7.14.0",
|
|
33
34
|
"7.13.0",
|
|
34
35
|
"7.12.0",
|
|
35
36
|
"7.11.0",
|
|
@@ -109,6 +110,7 @@ def get_provider_info():
|
|
|
109
110
|
{
|
|
110
111
|
"integration-name": "Kubernetes",
|
|
111
112
|
"python-modules": [
|
|
113
|
+
"airflow.providers.cncf.kubernetes.operators.custom_object_launcher",
|
|
112
114
|
"airflow.providers.cncf.kubernetes.operators.pod",
|
|
113
115
|
"airflow.providers.cncf.kubernetes.operators.spark_kubernetes",
|
|
114
116
|
"airflow.providers.cncf.kubernetes.operators.resource",
|
|
@@ -89,15 +89,15 @@ class KubernetesHook(BaseHook, PodOperatorHookProtocol):
|
|
|
89
89
|
@classmethod
|
|
90
90
|
def get_connection_form_widgets(cls) -> dict[str, Any]:
|
|
91
91
|
"""Return connection widgets to add to connection form."""
|
|
92
|
-
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
|
|
92
|
+
from flask_appbuilder.fieldwidgets import BS3PasswordFieldWidget, BS3TextFieldWidget
|
|
93
93
|
from flask_babel import lazy_gettext
|
|
94
|
-
from wtforms import BooleanField, StringField
|
|
94
|
+
from wtforms import BooleanField, PasswordField, StringField
|
|
95
95
|
|
|
96
96
|
return {
|
|
97
97
|
"in_cluster": BooleanField(lazy_gettext("In cluster configuration")),
|
|
98
98
|
"kube_config_path": StringField(lazy_gettext("Kube config path"), widget=BS3TextFieldWidget()),
|
|
99
|
-
"kube_config":
|
|
100
|
-
lazy_gettext("Kube config (JSON format)"), widget=
|
|
99
|
+
"kube_config": PasswordField(
|
|
100
|
+
lazy_gettext("Kube config (JSON format)"), widget=BS3PasswordFieldWidget()
|
|
101
101
|
),
|
|
102
102
|
"namespace": StringField(lazy_gettext("Namespace"), widget=BS3TextFieldWidget()),
|
|
103
103
|
"cluster_context": StringField(lazy_gettext("Cluster context"), widget=BS3TextFieldWidget()),
|
|
@@ -0,0 +1,367 @@
|
|
|
1
|
+
# Licensed to the Apache Software Foundation (ASF) under one
|
|
2
|
+
# or more contributor license agreements. See the NOTICE file
|
|
3
|
+
# distributed with this work for additional information
|
|
4
|
+
# regarding copyright ownership. The ASF licenses this file
|
|
5
|
+
# to you under the Apache License, Version 2.0 (the
|
|
6
|
+
# "License"); you may not use this file except in compliance
|
|
7
|
+
# with the License. You may obtain a copy of the License at
|
|
8
|
+
#
|
|
9
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
10
|
+
#
|
|
11
|
+
# Unless required by applicable law or agreed to in writing,
|
|
12
|
+
# software distributed under the License is distributed on an
|
|
13
|
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
14
|
+
# KIND, either express or implied. See the License for the
|
|
15
|
+
# specific language governing permissions and limitations
|
|
16
|
+
# under the License.
|
|
17
|
+
"""Launches Custom object."""
|
|
18
|
+
from __future__ import annotations
|
|
19
|
+
|
|
20
|
+
import time
|
|
21
|
+
from copy import deepcopy
|
|
22
|
+
from datetime import datetime as dt
|
|
23
|
+
from functools import cached_property
|
|
24
|
+
|
|
25
|
+
import tenacity
|
|
26
|
+
from kubernetes.client import CoreV1Api, CustomObjectsApi, models as k8s
|
|
27
|
+
from kubernetes.client.rest import ApiException
|
|
28
|
+
|
|
29
|
+
from airflow.exceptions import AirflowException
|
|
30
|
+
from airflow.providers.cncf.kubernetes.resource_convert.configmap import (
|
|
31
|
+
convert_configmap,
|
|
32
|
+
convert_configmap_to_volume,
|
|
33
|
+
)
|
|
34
|
+
from airflow.providers.cncf.kubernetes.resource_convert.env_variable import convert_env_vars
|
|
35
|
+
from airflow.providers.cncf.kubernetes.resource_convert.secret import (
|
|
36
|
+
convert_image_pull_secrets,
|
|
37
|
+
convert_secret,
|
|
38
|
+
)
|
|
39
|
+
from airflow.providers.cncf.kubernetes.utils.pod_manager import PodManager
|
|
40
|
+
from airflow.utils.log.logging_mixin import LoggingMixin
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def should_retry_start_spark_job(exception: BaseException) -> bool:
|
|
44
|
+
"""Check if an Exception indicates a transient error and warrants retrying."""
|
|
45
|
+
if isinstance(exception, ApiException):
|
|
46
|
+
return exception.status == 409
|
|
47
|
+
return False
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class SparkJobSpec:
|
|
51
|
+
"""Spark job spec."""
|
|
52
|
+
|
|
53
|
+
def __init__(self, **entries):
|
|
54
|
+
self.__dict__.update(entries)
|
|
55
|
+
self.validate()
|
|
56
|
+
self.update_resources()
|
|
57
|
+
|
|
58
|
+
def validate(self):
|
|
59
|
+
if self.spec.get("dynamicAllocation", {}).get("enabled"):
|
|
60
|
+
if not all(
|
|
61
|
+
[
|
|
62
|
+
self.spec["dynamicAllocation"]["initialExecutors"],
|
|
63
|
+
self.spec["dynamicAllocation"]["minExecutors"],
|
|
64
|
+
self.spec["dynamicAllocation"]["maxExecutors"],
|
|
65
|
+
]
|
|
66
|
+
):
|
|
67
|
+
raise AirflowException("Make sure initial/min/max value for dynamic allocation is passed")
|
|
68
|
+
|
|
69
|
+
def update_resources(self):
|
|
70
|
+
if self.spec["driver"].get("container_resources"):
|
|
71
|
+
spark_resources = SparkResources(
|
|
72
|
+
self.spec["driver"].pop("container_resources"),
|
|
73
|
+
self.spec["executor"].pop("container_resources"),
|
|
74
|
+
)
|
|
75
|
+
self.spec["driver"].update(spark_resources.resources["driver"])
|
|
76
|
+
self.spec["executor"].update(spark_resources.resources["executor"])
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class KubernetesSpec:
|
|
80
|
+
"""Spark kubernetes spec."""
|
|
81
|
+
|
|
82
|
+
def __init__(self, **entries):
|
|
83
|
+
self.__dict__.update(entries)
|
|
84
|
+
self.set_attribute()
|
|
85
|
+
|
|
86
|
+
def set_attribute(self):
|
|
87
|
+
self.env_vars = convert_env_vars(self.env_vars) if self.env_vars else []
|
|
88
|
+
self.image_pull_secrets = (
|
|
89
|
+
convert_image_pull_secrets(self.image_pull_secrets) if self.image_pull_secrets else []
|
|
90
|
+
)
|
|
91
|
+
if self.config_map_mounts:
|
|
92
|
+
vols, vols_mounts = convert_configmap_to_volume(self.config_map_mounts)
|
|
93
|
+
self.volumes.extend(vols)
|
|
94
|
+
self.volume_mounts.extend(vols_mounts)
|
|
95
|
+
if self.from_env_config_map:
|
|
96
|
+
self.env_from.extend([convert_configmap(c_name) for c_name in self.from_env_config_map])
|
|
97
|
+
if self.from_env_secret:
|
|
98
|
+
self.env_from.extend([convert_secret(c) for c in self.from_env_secret])
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
class SparkResources:
|
|
102
|
+
"""spark resources."""
|
|
103
|
+
|
|
104
|
+
def __init__(
|
|
105
|
+
self,
|
|
106
|
+
driver: dict | None = None,
|
|
107
|
+
executor: dict | None = None,
|
|
108
|
+
):
|
|
109
|
+
self.default = {
|
|
110
|
+
"gpu": {"name": None, "quantity": 0},
|
|
111
|
+
"cpu": {"request": None, "limit": None},
|
|
112
|
+
"memory": {"request": None, "limit": None},
|
|
113
|
+
}
|
|
114
|
+
self.driver = deepcopy(self.default)
|
|
115
|
+
self.executor = deepcopy(self.default)
|
|
116
|
+
if driver:
|
|
117
|
+
self.driver.update(driver)
|
|
118
|
+
if executor:
|
|
119
|
+
self.executor.update(executor)
|
|
120
|
+
self.convert_resources()
|
|
121
|
+
|
|
122
|
+
@property
|
|
123
|
+
def resources(self):
|
|
124
|
+
"""Return job resources."""
|
|
125
|
+
return {"driver": self.driver_resources, "executor": self.executor_resources}
|
|
126
|
+
|
|
127
|
+
@property
|
|
128
|
+
def driver_resources(self):
|
|
129
|
+
"""Return resources to use."""
|
|
130
|
+
driver = {}
|
|
131
|
+
if self.driver["cpu"].get("request"):
|
|
132
|
+
driver["cores"] = self.driver["cpu"]["request"]
|
|
133
|
+
if self.driver["cpu"].get("limit"):
|
|
134
|
+
driver["coreLimit"] = self.driver["cpu"]["limit"]
|
|
135
|
+
if self.driver["memory"].get("limit"):
|
|
136
|
+
driver["memory"] = self.driver["memory"]["limit"]
|
|
137
|
+
if self.driver["gpu"].get("name") and self.driver["gpu"].get("quantity"):
|
|
138
|
+
driver["gpu"] = {"name": self.driver["gpu"]["name"], "quantity": self.driver["gpu"]["quantity"]}
|
|
139
|
+
return driver
|
|
140
|
+
|
|
141
|
+
@property
|
|
142
|
+
def executor_resources(self):
|
|
143
|
+
"""Return resources to use."""
|
|
144
|
+
executor = {}
|
|
145
|
+
if self.executor["cpu"].get("request"):
|
|
146
|
+
executor["cores"] = self.executor["cpu"]["request"]
|
|
147
|
+
if self.executor["cpu"].get("limit"):
|
|
148
|
+
executor["coreLimit"] = self.executor["cpu"]["limit"]
|
|
149
|
+
if self.executor["memory"].get("limit"):
|
|
150
|
+
executor["memory"] = self.executor["memory"]["limit"]
|
|
151
|
+
if self.executor["gpu"].get("name") and self.executor["gpu"].get("quantity"):
|
|
152
|
+
executor["gpu"] = {
|
|
153
|
+
"name": self.executor["gpu"]["name"],
|
|
154
|
+
"quantity": self.executor["gpu"]["quantity"],
|
|
155
|
+
}
|
|
156
|
+
return executor
|
|
157
|
+
|
|
158
|
+
def convert_resources(self):
|
|
159
|
+
if isinstance(self.driver["memory"].get("limit"), str):
|
|
160
|
+
if "G" in self.driver["memory"]["limit"] or "Gi" in self.driver["memory"]["limit"]:
|
|
161
|
+
self.driver["memory"]["limit"] = float(self.driver["memory"]["limit"].rstrip("Gi G")) * 1024
|
|
162
|
+
elif "m" in self.driver["memory"]["limit"]:
|
|
163
|
+
self.driver["memory"]["limit"] = float(self.driver["memory"]["limit"].rstrip("m"))
|
|
164
|
+
# Adjusting the memory value as operator adds 40% to the given value
|
|
165
|
+
self.driver["memory"]["limit"] = str(int(self.driver["memory"]["limit"] / 1.4)) + "m"
|
|
166
|
+
|
|
167
|
+
if isinstance(self.executor["memory"].get("limit"), str):
|
|
168
|
+
if "G" in self.executor["memory"]["limit"] or "Gi" in self.executor["memory"]["limit"]:
|
|
169
|
+
self.executor["memory"]["limit"] = (
|
|
170
|
+
float(self.executor["memory"]["limit"].rstrip("Gi G")) * 1024
|
|
171
|
+
)
|
|
172
|
+
elif "m" in self.executor["memory"]["limit"]:
|
|
173
|
+
self.executor["memory"]["limit"] = float(self.executor["memory"]["limit"].rstrip("m"))
|
|
174
|
+
# Adjusting the memory value as operator adds 40% to the given value
|
|
175
|
+
self.executor["memory"]["limit"] = str(int(self.executor["memory"]["limit"] / 1.4)) + "m"
|
|
176
|
+
|
|
177
|
+
if self.driver["cpu"].get("request"):
|
|
178
|
+
self.driver["cpu"]["request"] = int(float(self.driver["cpu"]["request"]))
|
|
179
|
+
if self.driver["cpu"].get("limit"):
|
|
180
|
+
self.driver["cpu"]["limit"] = str(self.driver["cpu"]["limit"])
|
|
181
|
+
if self.executor["cpu"].get("request"):
|
|
182
|
+
self.executor["cpu"]["request"] = int(float(self.executor["cpu"]["request"]))
|
|
183
|
+
if self.executor["cpu"].get("limit"):
|
|
184
|
+
self.executor["cpu"]["limit"] = str(self.executor["cpu"]["limit"])
|
|
185
|
+
|
|
186
|
+
if self.driver["gpu"].get("quantity"):
|
|
187
|
+
self.driver["gpu"]["quantity"] = int(float(self.driver["gpu"]["quantity"]))
|
|
188
|
+
if self.executor["gpu"].get("quantity"):
|
|
189
|
+
self.executor["gpu"]["quantity"] = int(float(self.executor["gpu"]["quantity"]))
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
class CustomObjectStatus:
|
|
193
|
+
"""Status of the PODs."""
|
|
194
|
+
|
|
195
|
+
SUBMITTED = "SUBMITTED"
|
|
196
|
+
RUNNING = "RUNNING"
|
|
197
|
+
FAILED = "FAILED"
|
|
198
|
+
SUCCEEDED = "SUCCEEDED"
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
class CustomObjectLauncher(LoggingMixin):
|
|
202
|
+
"""Launches PODS."""
|
|
203
|
+
|
|
204
|
+
def __init__(
|
|
205
|
+
self,
|
|
206
|
+
name: str | None,
|
|
207
|
+
namespace: str | None,
|
|
208
|
+
kube_client: CoreV1Api,
|
|
209
|
+
custom_obj_api: CustomObjectsApi,
|
|
210
|
+
template_body: str | None = None,
|
|
211
|
+
):
|
|
212
|
+
"""
|
|
213
|
+
Creates custom object launcher(sparkapplications crd).
|
|
214
|
+
|
|
215
|
+
:param kube_client: kubernetes client.
|
|
216
|
+
"""
|
|
217
|
+
super().__init__()
|
|
218
|
+
self.name = name
|
|
219
|
+
self.namespace = namespace
|
|
220
|
+
self.template_body = template_body
|
|
221
|
+
self.body: dict = self.get_body()
|
|
222
|
+
self.kind = self.body["kind"]
|
|
223
|
+
self.plural = f"{self.kind.lower()}s"
|
|
224
|
+
if self.body.get("apiVersion"):
|
|
225
|
+
self.api_group, self.api_version = self.body["apiVersion"].split("/")
|
|
226
|
+
else:
|
|
227
|
+
self.api_group = self.body["apiGroup"]
|
|
228
|
+
self.api_version = self.body["version"]
|
|
229
|
+
self._client = kube_client
|
|
230
|
+
self.custom_obj_api = custom_obj_api
|
|
231
|
+
self.spark_obj_spec: dict = {}
|
|
232
|
+
self.pod_spec: k8s.V1Pod | None = None
|
|
233
|
+
|
|
234
|
+
@cached_property
|
|
235
|
+
def pod_manager(self) -> PodManager:
|
|
236
|
+
return PodManager(kube_client=self._client)
|
|
237
|
+
|
|
238
|
+
def get_body(self):
|
|
239
|
+
self.body: dict = SparkJobSpec(**self.template_body["spark"])
|
|
240
|
+
self.body.metadata = {"name": self.name, "namespace": self.namespace}
|
|
241
|
+
if self.template_body.get("kubernetes"):
|
|
242
|
+
k8s_spec: dict = KubernetesSpec(**self.template_body["kubernetes"])
|
|
243
|
+
self.body.spec["volumes"] = k8s_spec.volumes
|
|
244
|
+
if k8s_spec.image_pull_secrets:
|
|
245
|
+
self.body.spec["imagePullSecrets"] = k8s_spec.image_pull_secrets
|
|
246
|
+
for item in ["driver", "executor"]:
|
|
247
|
+
# Env List
|
|
248
|
+
self.body.spec[item]["env"] = k8s_spec.env_vars
|
|
249
|
+
self.body.spec[item]["envFrom"] = k8s_spec.env_from
|
|
250
|
+
# Volumes
|
|
251
|
+
self.body.spec[item]["volumeMounts"] = k8s_spec.volume_mounts
|
|
252
|
+
# Add affinity
|
|
253
|
+
self.body.spec[item]["affinity"] = k8s_spec.affinity
|
|
254
|
+
self.body.spec[item]["tolerations"] = k8s_spec.tolerations
|
|
255
|
+
self.body.spec[item]["nodeSelector"] = k8s_spec.node_selector
|
|
256
|
+
# Labels
|
|
257
|
+
self.body.spec[item]["labels"] = self.body.spec["labels"]
|
|
258
|
+
|
|
259
|
+
return self.body.__dict__
|
|
260
|
+
|
|
261
|
+
@tenacity.retry(
|
|
262
|
+
stop=tenacity.stop_after_attempt(3),
|
|
263
|
+
wait=tenacity.wait_random_exponential(),
|
|
264
|
+
reraise=True,
|
|
265
|
+
retry=tenacity.retry_if_exception(should_retry_start_spark_job),
|
|
266
|
+
)
|
|
267
|
+
def start_spark_job(self, image=None, code_path=None, startup_timeout: int = 600):
|
|
268
|
+
"""
|
|
269
|
+
Launches the pod synchronously and waits for completion.
|
|
270
|
+
|
|
271
|
+
:param image: image name
|
|
272
|
+
:param code_path: path to the .py file for python and jar file for scala
|
|
273
|
+
:param startup_timeout: Timeout for startup of the pod (if pod is pending for too long, fails task)
|
|
274
|
+
:return:
|
|
275
|
+
"""
|
|
276
|
+
try:
|
|
277
|
+
if image:
|
|
278
|
+
self.body["spec"]["image"] = image
|
|
279
|
+
if code_path:
|
|
280
|
+
self.body["spec"]["mainApplicationFile"] = code_path
|
|
281
|
+
self.log.debug("Spark Job Creation Request Submitted")
|
|
282
|
+
self.spark_obj_spec = self.custom_obj_api.create_namespaced_custom_object(
|
|
283
|
+
group=self.api_group,
|
|
284
|
+
version=self.api_version,
|
|
285
|
+
namespace=self.namespace,
|
|
286
|
+
plural=self.plural,
|
|
287
|
+
body=self.body,
|
|
288
|
+
)
|
|
289
|
+
self.log.debug("Spark Job Creation Response: %s", self.spark_obj_spec)
|
|
290
|
+
|
|
291
|
+
# Wait for the driver pod to come alive
|
|
292
|
+
self.pod_spec = k8s.V1Pod(
|
|
293
|
+
metadata=k8s.V1ObjectMeta(
|
|
294
|
+
labels=self.spark_obj_spec["spec"]["driver"]["labels"],
|
|
295
|
+
name=self.spark_obj_spec["metadata"]["name"] + "-driver",
|
|
296
|
+
namespace=self.namespace,
|
|
297
|
+
)
|
|
298
|
+
)
|
|
299
|
+
curr_time = dt.now()
|
|
300
|
+
while self.spark_job_not_running(self.spark_obj_spec):
|
|
301
|
+
self.log.warning(
|
|
302
|
+
"Spark job submitted but not yet started. job_id: %s",
|
|
303
|
+
self.spark_obj_spec["metadata"]["name"],
|
|
304
|
+
)
|
|
305
|
+
self.check_pod_start_failure()
|
|
306
|
+
delta = dt.now() - curr_time
|
|
307
|
+
if delta.total_seconds() >= startup_timeout:
|
|
308
|
+
pod_status = self.pod_manager.read_pod(self.pod_spec).status.container_statuses
|
|
309
|
+
raise AirflowException(f"Job took too long to start. pod status: {pod_status}")
|
|
310
|
+
time.sleep(10)
|
|
311
|
+
except Exception as e:
|
|
312
|
+
self.log.exception("Exception when attempting to create spark job")
|
|
313
|
+
raise e
|
|
314
|
+
|
|
315
|
+
return self.pod_spec, self.spark_obj_spec
|
|
316
|
+
|
|
317
|
+
def spark_job_not_running(self, spark_obj_spec):
|
|
318
|
+
"""Tests if spark_obj_spec has not started."""
|
|
319
|
+
spark_job_info = self.custom_obj_api.get_namespaced_custom_object_status(
|
|
320
|
+
group=self.api_group,
|
|
321
|
+
version=self.api_version,
|
|
322
|
+
namespace=self.namespace,
|
|
323
|
+
name=spark_obj_spec["metadata"]["name"],
|
|
324
|
+
plural=self.plural,
|
|
325
|
+
)
|
|
326
|
+
driver_state = spark_job_info.get("status", {}).get("applicationState", {}).get("state", "SUBMITTED")
|
|
327
|
+
if driver_state == CustomObjectStatus.FAILED:
|
|
328
|
+
err = spark_job_info.get("status", {}).get("applicationState", {}).get("errorMessage", "N/A")
|
|
329
|
+
try:
|
|
330
|
+
self.pod_manager.fetch_container_logs(
|
|
331
|
+
pod=self.pod_spec, container_name="spark-kubernetes-driver"
|
|
332
|
+
)
|
|
333
|
+
except Exception:
|
|
334
|
+
pass
|
|
335
|
+
raise AirflowException(f"Spark Job Failed. Error stack: {err}")
|
|
336
|
+
return driver_state == CustomObjectStatus.SUBMITTED
|
|
337
|
+
|
|
338
|
+
def check_pod_start_failure(self):
|
|
339
|
+
try:
|
|
340
|
+
waiting_status = (
|
|
341
|
+
self.pod_manager.read_pod(self.pod_spec).status.container_statuses[0].state.waiting
|
|
342
|
+
)
|
|
343
|
+
waiting_reason = waiting_status.reason
|
|
344
|
+
waiting_message = waiting_status.message
|
|
345
|
+
except Exception:
|
|
346
|
+
return
|
|
347
|
+
if waiting_reason != "ContainerCreating":
|
|
348
|
+
raise AirflowException(f"Spark Job Failed. Status: {waiting_reason}, Error: {waiting_message}")
|
|
349
|
+
|
|
350
|
+
def delete_spark_job(self, spark_job_name=None):
|
|
351
|
+
"""Deletes spark job."""
|
|
352
|
+
spark_job_name = spark_job_name or self.spark_obj_spec.get("metadata", {}).get("name")
|
|
353
|
+
if not spark_job_name:
|
|
354
|
+
self.log.warning("Spark job not found: %s", spark_job_name)
|
|
355
|
+
return
|
|
356
|
+
try:
|
|
357
|
+
self.custom_obj_api.delete_namespaced_custom_object(
|
|
358
|
+
group=self.api_group,
|
|
359
|
+
version=self.api_version,
|
|
360
|
+
namespace=self.namespace,
|
|
361
|
+
plural=self.plural,
|
|
362
|
+
name=spark_job_name,
|
|
363
|
+
)
|
|
364
|
+
except ApiException as e:
|
|
365
|
+
# If the pod is already deleted
|
|
366
|
+
if e.status != 404:
|
|
367
|
+
raise
|