apache-airflow-providers-cncf-kubernetes 3.1.0__py3-none-any.whl → 10.10.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. airflow/providers/cncf/kubernetes/__init__.py +18 -23
  2. airflow/providers/cncf/kubernetes/backcompat/__init__.py +17 -0
  3. airflow/providers/cncf/kubernetes/backcompat/backwards_compat_converters.py +31 -49
  4. airflow/providers/cncf/kubernetes/callbacks.py +200 -0
  5. airflow/providers/cncf/kubernetes/cli/__init__.py +16 -0
  6. airflow/providers/cncf/kubernetes/cli/kubernetes_command.py +195 -0
  7. airflow/providers/cncf/kubernetes/decorators/kubernetes.py +163 -0
  8. airflow/providers/cncf/kubernetes/decorators/kubernetes_cmd.py +118 -0
  9. airflow/providers/cncf/kubernetes/exceptions.py +37 -0
  10. airflow/providers/cncf/kubernetes/executors/__init__.py +17 -0
  11. airflow/providers/cncf/kubernetes/executors/kubernetes_executor.py +831 -0
  12. airflow/providers/cncf/kubernetes/executors/kubernetes_executor_types.py +91 -0
  13. airflow/providers/cncf/kubernetes/executors/kubernetes_executor_utils.py +736 -0
  14. airflow/providers/cncf/kubernetes/executors/local_kubernetes_executor.py +306 -0
  15. airflow/providers/cncf/kubernetes/get_provider_info.py +249 -50
  16. airflow/providers/cncf/kubernetes/hooks/kubernetes.py +846 -112
  17. airflow/providers/cncf/kubernetes/k8s_model.py +62 -0
  18. airflow/providers/cncf/kubernetes/kube_client.py +156 -0
  19. airflow/providers/cncf/kubernetes/kube_config.py +125 -0
  20. airflow/providers/cncf/kubernetes/kubernetes_executor_templates/__init__.py +16 -0
  21. airflow/providers/cncf/kubernetes/kubernetes_executor_templates/basic_template.yaml +79 -0
  22. airflow/providers/cncf/kubernetes/kubernetes_helper_functions.py +165 -0
  23. airflow/providers/cncf/kubernetes/operators/custom_object_launcher.py +368 -0
  24. airflow/providers/cncf/kubernetes/operators/job.py +646 -0
  25. airflow/providers/cncf/kubernetes/operators/kueue.py +132 -0
  26. airflow/providers/cncf/kubernetes/operators/pod.py +1417 -0
  27. airflow/providers/cncf/kubernetes/operators/resource.py +191 -0
  28. airflow/providers/cncf/kubernetes/operators/spark_kubernetes.py +336 -35
  29. airflow/providers/cncf/kubernetes/pod_generator.py +592 -0
  30. airflow/providers/cncf/kubernetes/pod_template_file_examples/__init__.py +16 -0
  31. airflow/providers/cncf/kubernetes/pod_template_file_examples/dags_in_image_template.yaml +68 -0
  32. airflow/providers/cncf/kubernetes/pod_template_file_examples/dags_in_volume_template.yaml +74 -0
  33. airflow/providers/cncf/kubernetes/pod_template_file_examples/git_sync_template.yaml +95 -0
  34. airflow/providers/cncf/kubernetes/python_kubernetes_script.jinja2 +51 -0
  35. airflow/providers/cncf/kubernetes/python_kubernetes_script.py +92 -0
  36. airflow/providers/cncf/kubernetes/resource_convert/__init__.py +16 -0
  37. airflow/providers/cncf/kubernetes/resource_convert/configmap.py +52 -0
  38. airflow/providers/cncf/kubernetes/resource_convert/env_variable.py +39 -0
  39. airflow/providers/cncf/kubernetes/resource_convert/secret.py +40 -0
  40. airflow/providers/cncf/kubernetes/secret.py +128 -0
  41. airflow/providers/cncf/kubernetes/sensors/spark_kubernetes.py +30 -14
  42. airflow/providers/cncf/kubernetes/template_rendering.py +81 -0
  43. airflow/providers/cncf/kubernetes/triggers/__init__.py +16 -0
  44. airflow/providers/cncf/kubernetes/triggers/job.py +176 -0
  45. airflow/providers/cncf/kubernetes/triggers/pod.py +344 -0
  46. airflow/providers/cncf/kubernetes/utils/__init__.py +3 -0
  47. airflow/providers/cncf/kubernetes/utils/container.py +118 -0
  48. airflow/providers/cncf/kubernetes/utils/delete_from.py +154 -0
  49. airflow/providers/cncf/kubernetes/utils/k8s_resource_iterator.py +46 -0
  50. airflow/providers/cncf/kubernetes/utils/pod_manager.py +887 -152
  51. airflow/providers/cncf/kubernetes/utils/xcom_sidecar.py +25 -16
  52. airflow/providers/cncf/kubernetes/version_compat.py +38 -0
  53. apache_airflow_providers_cncf_kubernetes-10.10.0rc1.dist-info/METADATA +125 -0
  54. apache_airflow_providers_cncf_kubernetes-10.10.0rc1.dist-info/RECORD +62 -0
  55. {apache_airflow_providers_cncf_kubernetes-3.1.0.dist-info → apache_airflow_providers_cncf_kubernetes-10.10.0rc1.dist-info}/WHEEL +1 -2
  56. apache_airflow_providers_cncf_kubernetes-10.10.0rc1.dist-info/entry_points.txt +3 -0
  57. apache_airflow_providers_cncf_kubernetes-10.10.0rc1.dist-info/licenses/NOTICE +5 -0
  58. airflow/providers/cncf/kubernetes/backcompat/pod.py +0 -119
  59. airflow/providers/cncf/kubernetes/backcompat/pod_runtime_info_env.py +0 -56
  60. airflow/providers/cncf/kubernetes/backcompat/volume.py +0 -62
  61. airflow/providers/cncf/kubernetes/backcompat/volume_mount.py +0 -58
  62. airflow/providers/cncf/kubernetes/example_dags/example_kubernetes.py +0 -163
  63. airflow/providers/cncf/kubernetes/example_dags/example_spark_kubernetes.py +0 -66
  64. airflow/providers/cncf/kubernetes/example_dags/example_spark_kubernetes_spark_pi.yaml +0 -57
  65. airflow/providers/cncf/kubernetes/operators/kubernetes_pod.py +0 -622
  66. apache_airflow_providers_cncf_kubernetes-3.1.0.dist-info/METADATA +0 -452
  67. apache_airflow_providers_cncf_kubernetes-3.1.0.dist-info/NOTICE +0 -6
  68. apache_airflow_providers_cncf_kubernetes-3.1.0.dist-info/RECORD +0 -29
  69. apache_airflow_providers_cncf_kubernetes-3.1.0.dist-info/entry_points.txt +0 -3
  70. apache_airflow_providers_cncf_kubernetes-3.1.0.dist-info/top_level.txt +0 -1
  71. /airflow/providers/cncf/kubernetes/{example_dags → decorators}/__init__.py +0 -0
  72. {apache_airflow_providers_cncf_kubernetes-3.1.0.dist-info → apache_airflow_providers_cncf_kubernetes-10.10.0rc1.dist-info/licenses}/LICENSE +0 -0
@@ -0,0 +1,306 @@
1
+ #
2
+ # Licensed to the Apache Software Foundation (ASF) under one
3
+ # or more contributor license agreements. See the NOTICE file
4
+ # distributed with this work for additional information
5
+ # regarding copyright ownership. The ASF licenses this file
6
+ # to you under the Apache License, Version 2.0 (the
7
+ # "License"); you may not use this file except in compliance
8
+ # with the License. You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing,
13
+ # software distributed under the License is distributed on an
14
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15
+ # KIND, either express or implied. See the License for the
16
+ # specific language governing permissions and limitations
17
+ # under the License.
18
+ from __future__ import annotations
19
+
20
+ from collections.abc import Sequence
21
+ from typing import TYPE_CHECKING, Any
22
+
23
+ from deprecated import deprecated
24
+
25
+ from airflow.configuration import conf
26
+ from airflow.exceptions import AirflowProviderDeprecationWarning
27
+ from airflow.executors.base_executor import BaseExecutor
28
+ from airflow.providers.cncf.kubernetes.executors.kubernetes_executor import KubernetesExecutor
29
+ from airflow.providers.cncf.kubernetes.version_compat import AIRFLOW_V_3_0_PLUS
30
+
31
+ if TYPE_CHECKING:
32
+ from airflow.callbacks.base_callback_sink import BaseCallbackSink
33
+ from airflow.callbacks.callback_requests import CallbackRequest
34
+ from airflow.executors.base_executor import EventBufferValueType
35
+ from airflow.executors.local_executor import LocalExecutor
36
+ from airflow.models.taskinstance import ( # type: ignore[attr-defined]
37
+ SimpleTaskInstance,
38
+ TaskInstance,
39
+ TaskInstanceKey,
40
+ )
41
+
42
+ CommandType = Sequence[str]
43
+
44
+
45
+ class LocalKubernetesExecutor(BaseExecutor):
46
+ """
47
+ Chooses between LocalExecutor and KubernetesExecutor based on the queue defined on the task.
48
+
49
+ When the task's queue is the value of ``kubernetes_queue`` in section ``[local_kubernetes_executor]``
50
+ of the configuration (default value: `kubernetes`), KubernetesExecutor is selected to run the task,
51
+ otherwise, LocalExecutor is used.
52
+ """
53
+
54
+ supports_ad_hoc_ti_run: bool = True
55
+ # TODO: Remove this attribute once providers rely on Airflow >=3.0.0
56
+ supports_pickling: bool = False
57
+ supports_sentry: bool = False
58
+
59
+ is_local: bool = False
60
+ is_single_threaded: bool = False
61
+ is_production: bool = True
62
+
63
+ serve_logs: bool = True
64
+
65
+ callback_sink: BaseCallbackSink | None = None
66
+
67
+ KUBERNETES_QUEUE = conf.get("local_kubernetes_executor", "kubernetes_queue")
68
+
69
+ def __init__(
70
+ self,
71
+ local_executor: LocalExecutor | None = None,
72
+ kubernetes_executor: KubernetesExecutor | None = None,
73
+ ):
74
+ if AIRFLOW_V_3_0_PLUS or not local_executor or not kubernetes_executor:
75
+ raise RuntimeError(
76
+ f"{self.__class__.__name__} does not support Airflow 3.0+. See "
77
+ "https://airflow.apache.org/docs/apache-airflow/stable/core-concepts/executor/index.html#using-multiple-executors-concurrently"
78
+ " how to use multiple executors concurrently."
79
+ )
80
+
81
+ super().__init__()
82
+ self._job_id: int | str | None = None
83
+ self.local_executor = local_executor
84
+ self.kubernetes_executor = kubernetes_executor
85
+ self.kubernetes_executor.kubernetes_queue = self.KUBERNETES_QUEUE
86
+
87
+ @property
88
+ def _task_event_logs(self):
89
+ self.local_executor._task_event_logs += self.kubernetes_executor._task_event_logs
90
+ self.kubernetes_executor._task_event_logs.clear()
91
+ return self.local_executor._task_event_logs
92
+
93
+ @_task_event_logs.setter
94
+ def _task_event_logs(self, value):
95
+ """Not implemented for hybrid executors."""
96
+
97
+ @property
98
+ def queued_tasks(self) -> dict[TaskInstanceKey, Any]:
99
+ """Return queued tasks from local and kubernetes executor."""
100
+ queued_tasks = self.local_executor.queued_tasks.copy()
101
+ # TODO: fix this, there is misalignment between the types of queued_tasks so it is likely wrong
102
+ queued_tasks.update(self.kubernetes_executor.queued_tasks) # type: ignore[arg-type]
103
+
104
+ return queued_tasks
105
+
106
+ @queued_tasks.setter
107
+ def queued_tasks(self, value) -> None:
108
+ """Not implemented for hybrid executors."""
109
+
110
+ @property
111
+ def running(self) -> set[TaskInstanceKey]:
112
+ """Return running tasks from local and kubernetes executor."""
113
+ return self.local_executor.running.union(self.kubernetes_executor.running)
114
+
115
+ @running.setter
116
+ def running(self, value) -> None:
117
+ """Not implemented for hybrid executors."""
118
+
119
+ @property
120
+ def job_id(self) -> int | str | None:
121
+ """
122
+ Inherited attribute from BaseExecutor.
123
+
124
+ Since this is not really an executor, but a wrapper of executors
125
+ we implemented it as property, so we can have custom setter.
126
+ """
127
+ return self._job_id
128
+
129
+ @job_id.setter
130
+ def job_id(self, value: int | str | None) -> None:
131
+ """Expose job ID for SchedulerJob."""
132
+ self._job_id = value
133
+ self.kubernetes_executor.job_id = value
134
+ self.local_executor.job_id = value
135
+
136
+ def start(self) -> None:
137
+ """Start local and kubernetes executor."""
138
+ self.log.info("Starting local and Kubernetes Executor")
139
+ self.local_executor.start()
140
+ self.kubernetes_executor.start()
141
+
142
+ @property
143
+ def slots_available(self) -> int:
144
+ """Number of new tasks this executor instance can accept."""
145
+ return self.local_executor.slots_available
146
+
147
+ @property
148
+ def slots_occupied(self):
149
+ """Number of tasks this executor instance is currently managing."""
150
+ return len(self.running) + len(self.queued_tasks)
151
+
152
+ def queue_command(
153
+ self,
154
+ task_instance: TaskInstance,
155
+ command: CommandType,
156
+ priority: int = 1,
157
+ queue: str | None = None,
158
+ ) -> None:
159
+ """Queues command via local or kubernetes executor."""
160
+ executor = self._router(task_instance)
161
+ self.log.debug("Using executor: %s for %s", executor.__class__.__name__, task_instance.key)
162
+ executor.queue_command(task_instance, command, priority, queue) # type: ignore[union-attr]
163
+
164
+ def queue_task_instance(
165
+ self,
166
+ task_instance: TaskInstance,
167
+ mark_success: bool = False,
168
+ ignore_all_deps: bool = False,
169
+ ignore_depends_on_past: bool = False,
170
+ wait_for_past_depends_before_skipping: bool = False,
171
+ ignore_task_deps: bool = False,
172
+ ignore_ti_state: bool = False,
173
+ pool: str | None = None,
174
+ cfg_path: str | None = None,
175
+ **kwargs,
176
+ ) -> None:
177
+ """Queues task instance via local or kubernetes executor."""
178
+ from airflow.models.taskinstance import SimpleTaskInstance # type: ignore[attr-defined]
179
+
180
+ executor = self._router(SimpleTaskInstance.from_ti(task_instance))
181
+ self.log.debug(
182
+ "Using executor: %s to queue_task_instance for %s", executor.__class__.__name__, task_instance.key
183
+ )
184
+
185
+ if not hasattr(task_instance, "pickle_id"):
186
+ del kwargs["pickle_id"]
187
+
188
+ executor.queue_task_instance( # type: ignore[union-attr]
189
+ task_instance=task_instance,
190
+ mark_success=mark_success,
191
+ ignore_all_deps=ignore_all_deps,
192
+ ignore_depends_on_past=ignore_depends_on_past,
193
+ wait_for_past_depends_before_skipping=wait_for_past_depends_before_skipping,
194
+ ignore_task_deps=ignore_task_deps,
195
+ ignore_ti_state=ignore_ti_state,
196
+ pool=pool,
197
+ cfg_path=cfg_path,
198
+ **kwargs,
199
+ )
200
+
201
+ def get_task_log(self, ti: TaskInstance, try_number: int) -> tuple[list[str], list[str]]:
202
+ """Fetch task log from kubernetes executor."""
203
+ if ti.queue == self.kubernetes_executor.kubernetes_queue:
204
+ return self.kubernetes_executor.get_task_log(ti=ti, try_number=try_number)
205
+ return [], []
206
+
207
+ def has_task(self, task_instance: TaskInstance) -> bool:
208
+ """
209
+ Check if a task is either queued or running in either local or kubernetes executor.
210
+
211
+ :param task_instance: TaskInstance
212
+ :return: True if the task is known to this executor
213
+ """
214
+ return self.local_executor.has_task(task_instance) or self.kubernetes_executor.has_task(task_instance)
215
+
216
+ def heartbeat(self) -> None:
217
+ """Heartbeat sent to trigger new jobs in local and kubernetes executor."""
218
+ self.local_executor.heartbeat()
219
+ self.kubernetes_executor.heartbeat()
220
+
221
+ def get_event_buffer(
222
+ self, dag_ids: list[str] | None = None
223
+ ) -> dict[TaskInstanceKey, EventBufferValueType]:
224
+ """
225
+ Return and flush the event buffer from local and kubernetes executor.
226
+
227
+ :param dag_ids: dag_ids to return events for, if None returns all
228
+ :return: a dict of events
229
+ """
230
+ cleared_events_from_local = self.local_executor.get_event_buffer(dag_ids)
231
+ cleared_events_from_kubernetes = self.kubernetes_executor.get_event_buffer(dag_ids)
232
+
233
+ return {**cleared_events_from_local, **cleared_events_from_kubernetes}
234
+
235
+ def try_adopt_task_instances(self, tis: Sequence[TaskInstance]) -> Sequence[TaskInstance]:
236
+ """
237
+ Try to adopt running task instances that have been abandoned by a SchedulerJob dying.
238
+
239
+ Anything that is not adopted will be cleared by the scheduler (and then become eligible for
240
+ re-scheduling)
241
+
242
+ :return: any TaskInstances that were unable to be adopted
243
+ """
244
+ local_tis = [ti for ti in tis if ti.queue != self.KUBERNETES_QUEUE]
245
+ kubernetes_tis = [ti for ti in tis if ti.queue == self.KUBERNETES_QUEUE]
246
+ return [
247
+ *self.local_executor.try_adopt_task_instances(local_tis),
248
+ *self.kubernetes_executor.try_adopt_task_instances(kubernetes_tis),
249
+ ]
250
+
251
+ @deprecated(
252
+ reason="Replaced by function `revoke_task`. Upgrade airflow core to make this go away.",
253
+ category=AirflowProviderDeprecationWarning,
254
+ action="ignore", # ignoring since will get warning from the nested executors
255
+ )
256
+ def cleanup_stuck_queued_tasks(self, tis: list[TaskInstance]) -> list[str]:
257
+ # LocalExecutor doesn't have a cleanup_stuck_queued_tasks method, so we
258
+ # will only run KubernetesExecutor's
259
+ kubernetes_tis = [ti for ti in tis if ti.queue == self.KUBERNETES_QUEUE]
260
+ return self.kubernetes_executor.cleanup_stuck_queued_tasks(kubernetes_tis)
261
+
262
+ def revoke_task(self, *, ti: TaskInstance):
263
+ if ti.queue == self.KUBERNETES_QUEUE:
264
+ self.kubernetes_executor.revoke_task(ti=ti)
265
+
266
+ def end(self) -> None:
267
+ """End local and kubernetes executor."""
268
+ self.local_executor.end()
269
+ self.kubernetes_executor.end()
270
+
271
+ def terminate(self) -> None:
272
+ """Terminate local and kubernetes executor."""
273
+ self.local_executor.terminate()
274
+ self.kubernetes_executor.terminate()
275
+
276
+ def _router(self, simple_task_instance: SimpleTaskInstance) -> LocalExecutor | KubernetesExecutor:
277
+ """
278
+ Return either local_executor or kubernetes_executor.
279
+
280
+ :param simple_task_instance: SimpleTaskInstance
281
+ :return: local_executor or kubernetes_executor
282
+ """
283
+ if simple_task_instance.queue == self.KUBERNETES_QUEUE:
284
+ return self.kubernetes_executor
285
+ return self.local_executor
286
+
287
+ def debug_dump(self) -> None:
288
+ """Debug dump; called in response to SIGUSR2 by the scheduler."""
289
+ self.log.info("Dumping LocalExecutor state")
290
+ self.local_executor.debug_dump()
291
+ self.log.info("Dumping KubernetesExecutor state")
292
+ self.kubernetes_executor.debug_dump()
293
+
294
+ def send_callback(self, request: CallbackRequest) -> None:
295
+ """
296
+ Send callback for execution.
297
+
298
+ :param request: Callback request to be executed.
299
+ """
300
+ if not self.callback_sink:
301
+ raise ValueError("Callback sink is not ready.")
302
+ self.callback_sink.send(request)
303
+
304
+ @staticmethod
305
+ def get_cli_commands() -> list:
306
+ return KubernetesExecutor.get_cli_commands()
@@ -15,76 +15,275 @@
15
15
  # specific language governing permissions and limitations
16
16
  # under the License.
17
17
 
18
- # NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
19
- # OVERWRITTEN WHEN PREPARING PACKAGES.
18
+ # NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE OVERWRITTEN!
20
19
  #
21
- # IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
22
- # `get_provider_info_TEMPLATE.py.jinja2` IN the `provider_packages` DIRECTORY
20
+ # IF YOU WANT TO MODIFY THIS FILE, YOU SHOULD MODIFY THE TEMPLATE
21
+ # `get_provider_info_TEMPLATE.py.jinja2` IN the `dev/breeze/src/airflow_breeze/templates` DIRECTORY
22
+
23
23
 
24
24
  def get_provider_info():
25
25
  return {
26
- 'package-name': 'apache-airflow-providers-cncf-kubernetes',
27
- 'name': 'Kubernetes',
28
- 'description': '`Kubernetes <https://kubernetes.io/>`__\n',
29
- 'versions': [
30
- '3.1.0',
31
- '3.0.2',
32
- '3.0.1',
33
- '3.0.0',
34
- '2.2.0',
35
- '2.1.0',
36
- '2.0.3',
37
- '2.0.2',
38
- '2.0.1',
39
- '2.0.0',
40
- '1.2.0',
41
- '1.1.0',
42
- '1.0.2',
43
- '1.0.1',
44
- '1.0.0',
45
- ],
46
- 'additional-dependencies': ['apache-airflow>=2.1.0'],
47
- 'integrations': [
26
+ "package-name": "apache-airflow-providers-cncf-kubernetes",
27
+ "name": "Kubernetes",
28
+ "description": "`Kubernetes <https://kubernetes.io/>`__\n",
29
+ "integrations": [
48
30
  {
49
- 'integration-name': 'Kubernetes',
50
- 'external-doc-url': 'https://kubernetes.io/',
51
- 'how-to-guide': ['/docs/apache-airflow-providers-cncf-kubernetes/operators.rst'],
52
- 'logo': '/integration-logos/kubernetes/Kubernetes.png',
53
- 'tags': ['software'],
31
+ "integration-name": "Kubernetes",
32
+ "external-doc-url": "https://kubernetes.io/",
33
+ "how-to-guide": ["/docs/apache-airflow-providers-cncf-kubernetes/operators.rst"],
34
+ "logo": "/docs/integration-logos/Kubernetes.png",
35
+ "tags": ["software"],
54
36
  },
55
37
  {
56
- 'integration-name': 'Spark on Kubernetes',
57
- 'external-doc-url': 'https://github.com/GoogleCloudPlatform/spark-on-k8s-operator',
58
- 'logo': '/integration-logos/kubernetes/Spark-On-Kubernetes.png',
59
- 'tags': ['software'],
38
+ "integration-name": "Spark on Kubernetes",
39
+ "external-doc-url": "https://github.com/GoogleCloudPlatform/spark-on-k8s-operator",
40
+ "logo": "/docs/integration-logos/Spark-On-Kubernetes.png",
41
+ "tags": ["software"],
60
42
  },
61
43
  ],
62
- 'operators': [
44
+ "operators": [
63
45
  {
64
- 'integration-name': 'Kubernetes',
65
- 'python-modules': [
66
- 'airflow.providers.cncf.kubernetes.operators.kubernetes_pod',
67
- 'airflow.providers.cncf.kubernetes.operators.spark_kubernetes',
46
+ "integration-name": "Kubernetes",
47
+ "python-modules": [
48
+ "airflow.providers.cncf.kubernetes.operators.custom_object_launcher",
49
+ "airflow.providers.cncf.kubernetes.operators.kueue",
50
+ "airflow.providers.cncf.kubernetes.operators.pod",
51
+ "airflow.providers.cncf.kubernetes.operators.spark_kubernetes",
52
+ "airflow.providers.cncf.kubernetes.operators.resource",
53
+ "airflow.providers.cncf.kubernetes.operators.job",
68
54
  ],
69
55
  }
70
56
  ],
71
- 'sensors': [
57
+ "sensors": [
72
58
  {
73
- 'integration-name': 'Kubernetes',
74
- 'python-modules': ['airflow.providers.cncf.kubernetes.sensors.spark_kubernetes'],
59
+ "integration-name": "Kubernetes",
60
+ "python-modules": ["airflow.providers.cncf.kubernetes.sensors.spark_kubernetes"],
75
61
  }
76
62
  ],
77
- 'hooks': [
63
+ "hooks": [
78
64
  {
79
- 'integration-name': 'Kubernetes',
80
- 'python-modules': ['airflow.providers.cncf.kubernetes.hooks.kubernetes'],
65
+ "integration-name": "Kubernetes",
66
+ "python-modules": ["airflow.providers.cncf.kubernetes.hooks.kubernetes"],
81
67
  }
82
68
  ],
83
- 'hook-class-names': ['airflow.providers.cncf.kubernetes.hooks.kubernetes.KubernetesHook'],
84
- 'connection-types': [
69
+ "triggers": [
85
70
  {
86
- 'hook-class-name': 'airflow.providers.cncf.kubernetes.hooks.kubernetes.KubernetesHook',
87
- 'connection-type': 'kubernetes',
71
+ "integration-name": "Kubernetes",
72
+ "python-modules": [
73
+ "airflow.providers.cncf.kubernetes.triggers.pod",
74
+ "airflow.providers.cncf.kubernetes.triggers.job",
75
+ ],
88
76
  }
89
77
  ],
78
+ "connection-types": [
79
+ {
80
+ "hook-class-name": "airflow.providers.cncf.kubernetes.hooks.kubernetes.KubernetesHook",
81
+ "connection-type": "kubernetes",
82
+ }
83
+ ],
84
+ "task-decorators": [
85
+ {
86
+ "class-name": "airflow.providers.cncf.kubernetes.decorators.kubernetes.kubernetes_task",
87
+ "name": "kubernetes",
88
+ },
89
+ {
90
+ "class-name": "airflow.providers.cncf.kubernetes.decorators.kubernetes_cmd.kubernetes_cmd_task",
91
+ "name": "kubernetes_cmd",
92
+ },
93
+ ],
94
+ "config": {
95
+ "local_kubernetes_executor": {
96
+ "description": "This section only applies if you are using the ``LocalKubernetesExecutor`` in\n``[core]`` section above\n",
97
+ "options": {
98
+ "kubernetes_queue": {
99
+ "description": "Define when to send a task to ``KubernetesExecutor`` when using ``LocalKubernetesExecutor``.\nWhen the queue of a task is the value of ``kubernetes_queue`` (default ``kubernetes``),\nthe task is executed via ``KubernetesExecutor``,\notherwise via ``LocalExecutor``\n",
100
+ "version_added": None,
101
+ "type": "string",
102
+ "example": None,
103
+ "default": "kubernetes",
104
+ }
105
+ },
106
+ },
107
+ "kubernetes_executor": {
108
+ "description": None,
109
+ "options": {
110
+ "api_client_retry_configuration": {
111
+ "description": "Kwargs to override the default urllib3 Retry used in the kubernetes API client\n",
112
+ "version_added": None,
113
+ "type": "string",
114
+ "example": '{ "total": 3, "backoff_factor": 0.5 }',
115
+ "default": "",
116
+ },
117
+ "logs_task_metadata": {
118
+ "description": "Flag to control the information added to kubernetes executor logs for better traceability\n",
119
+ "version_added": None,
120
+ "type": "boolean",
121
+ "example": None,
122
+ "default": "False",
123
+ },
124
+ "pod_template_file": {
125
+ "description": "Path to the YAML pod file that forms the basis for KubernetesExecutor workers.\n",
126
+ "version_added": None,
127
+ "type": "string",
128
+ "example": None,
129
+ "default": "",
130
+ "see_also": ":ref:`concepts:pod_template_file`",
131
+ },
132
+ "worker_container_repository": {
133
+ "description": "The repository of the Kubernetes Image for the Worker to Run\n",
134
+ "version_added": None,
135
+ "type": "string",
136
+ "example": None,
137
+ "default": "",
138
+ "deprecated": True,
139
+ "deprecation_reason": "This configuration is deprecated. Use `pod_template_file` to specify container image instead.\n",
140
+ },
141
+ "worker_container_tag": {
142
+ "description": "The tag of the Kubernetes Image for the Worker to Run\n",
143
+ "version_added": None,
144
+ "type": "string",
145
+ "example": None,
146
+ "default": "",
147
+ "deprecated": True,
148
+ "deprecation_reason": "This configuration is deprecated. Use `pod_template_file` to specify the image tag instead.\n",
149
+ },
150
+ "namespace": {
151
+ "description": "The Kubernetes namespace where airflow workers should be created. Defaults to ``default``\n",
152
+ "version_added": None,
153
+ "type": "string",
154
+ "example": None,
155
+ "default": "default",
156
+ "deprecated": True,
157
+ "deprecation_reason": "This configuration is deprecated. Use `pod_template_file` to specify namespace instead.\n",
158
+ },
159
+ "delete_worker_pods": {
160
+ "description": "If True, all worker pods will be deleted upon termination\n",
161
+ "version_added": None,
162
+ "type": "string",
163
+ "example": None,
164
+ "default": "True",
165
+ },
166
+ "delete_worker_pods_on_failure": {
167
+ "description": "If False (and delete_worker_pods is True),\nfailed worker pods will not be deleted so users can investigate them.\nThis only prevents removal of worker pods where the worker itself failed,\nnot when the task it ran failed.\n",
168
+ "version_added": None,
169
+ "type": "string",
170
+ "example": None,
171
+ "default": "False",
172
+ },
173
+ "worker_pod_pending_fatal_container_state_reasons": {
174
+ "description": "If the worker pods are in a pending state due to a fatal container\nstate reasons, then fail the task and delete the worker pod\nif delete_worker_pods is True and delete_worker_pods_on_failure is True.\n",
175
+ "version_added": "8.1.0",
176
+ "type": "string",
177
+ "example": None,
178
+ "default": "CreateContainerConfigError,ErrImagePull,CreateContainerError,ImageInspectError,InvalidImageName",
179
+ },
180
+ "worker_pods_creation_batch_size": {
181
+ "description": 'Number of Kubernetes Worker Pod creation calls per scheduler loop.\nNote that the current default of "1" will only launch a single pod\nper-heartbeat. It is HIGHLY recommended that users increase this\nnumber to match the tolerance of their kubernetes cluster for\nbetter performance.\n',
182
+ "version_added": None,
183
+ "type": "string",
184
+ "example": None,
185
+ "default": "1",
186
+ },
187
+ "multi_namespace_mode": {
188
+ "description": "Allows users to launch pods in multiple namespaces.\nWill require creating a cluster-role for the scheduler,\nor use multi_namespace_mode_namespace_list configuration.\n",
189
+ "version_added": None,
190
+ "type": "boolean",
191
+ "example": None,
192
+ "default": "False",
193
+ },
194
+ "multi_namespace_mode_namespace_list": {
195
+ "description": "If multi_namespace_mode is True while scheduler does not have a cluster-role,\ngive the list of namespaces where the scheduler will schedule jobs\nScheduler needs to have the necessary permissions in these namespaces.\n",
196
+ "version_added": None,
197
+ "type": "string",
198
+ "example": None,
199
+ "default": "",
200
+ },
201
+ "in_cluster": {
202
+ "description": "Use the service account kubernetes gives to pods to connect to kubernetes cluster.\nIt's intended for clients that expect to be running inside a pod running on kubernetes.\nIt will raise an exception if called from a process not running in a kubernetes environment.\n",
203
+ "version_added": None,
204
+ "type": "string",
205
+ "example": None,
206
+ "default": "True",
207
+ },
208
+ "cluster_context": {
209
+ "description": "When running with in_cluster=False change the default cluster_context or config_file\noptions to Kubernetes client. Leave blank these to use default behaviour like ``kubectl`` has.\n",
210
+ "version_added": None,
211
+ "type": "string",
212
+ "example": None,
213
+ "default": None,
214
+ },
215
+ "config_file": {
216
+ "description": "Path to the kubernetes configfile to be used when ``in_cluster`` is set to False\n",
217
+ "version_added": None,
218
+ "type": "string",
219
+ "example": None,
220
+ "default": None,
221
+ },
222
+ "kube_client_request_args": {
223
+ "description": "Keyword parameters to pass while calling a kubernetes client core_v1_api methods\nfrom Kubernetes Executor provided as a single line formatted JSON dictionary string.\nList of supported params are similar for all core_v1_apis, hence a single config\nvariable for all apis. See:\nhttps://raw.githubusercontent.com/kubernetes-client/python/41f11a09995efcd0142e25946adc7591431bfb2f/kubernetes/client/api/core_v1_api.py\n",
224
+ "version_added": None,
225
+ "type": "string",
226
+ "example": None,
227
+ "default": "",
228
+ },
229
+ "delete_option_kwargs": {
230
+ "description": "Optional keyword arguments to pass to the ``delete_namespaced_pod`` kubernetes client\n``core_v1_api`` method when using the Kubernetes Executor.\nThis should be an object and can contain any of the options listed in the ``v1DeleteOptions``\nclass defined here:\nhttps://github.com/kubernetes-client/python/blob/41f11a09995efcd0142e25946adc7591431bfb2f/kubernetes/client/models/v1_delete_options.py#L19\n",
231
+ "version_added": None,
232
+ "type": "string",
233
+ "example": '{"grace_period_seconds": 10}',
234
+ "default": "",
235
+ },
236
+ "enable_tcp_keepalive": {
237
+ "description": "Enables TCP keepalive mechanism. This prevents Kubernetes API requests to hang indefinitely\nwhen idle connection is time-outed on services like cloud load balancers or firewalls.\n",
238
+ "version_added": None,
239
+ "type": "boolean",
240
+ "example": None,
241
+ "default": "True",
242
+ },
243
+ "tcp_keep_idle": {
244
+ "description": "When the `enable_tcp_keepalive` option is enabled, TCP probes a connection that has\nbeen idle for `tcp_keep_idle` seconds.\n",
245
+ "version_added": None,
246
+ "type": "integer",
247
+ "example": None,
248
+ "default": "120",
249
+ },
250
+ "tcp_keep_intvl": {
251
+ "description": "When the `enable_tcp_keepalive` option is enabled, if Kubernetes API does not respond\nto a keepalive probe, TCP retransmits the probe after `tcp_keep_intvl` seconds.\n",
252
+ "version_added": None,
253
+ "type": "integer",
254
+ "example": None,
255
+ "default": "30",
256
+ },
257
+ "tcp_keep_cnt": {
258
+ "description": "When the `enable_tcp_keepalive` option is enabled, if Kubernetes API does not respond\nto a keepalive probe, TCP retransmits the probe `tcp_keep_cnt number` of times before\na connection is considered to be broken.\n",
259
+ "version_added": None,
260
+ "type": "integer",
261
+ "example": None,
262
+ "default": "6",
263
+ },
264
+ "verify_ssl": {
265
+ "description": "Set this to false to skip verifying SSL certificate of Kubernetes python client.\n",
266
+ "version_added": None,
267
+ "type": "boolean",
268
+ "example": None,
269
+ "default": "True",
270
+ },
271
+ "ssl_ca_cert": {
272
+ "description": "Path to a CA certificate to be used by the Kubernetes client to verify the server's SSL certificate.\n",
273
+ "version_added": None,
274
+ "type": "string",
275
+ "example": None,
276
+ "default": "",
277
+ },
278
+ "task_publish_max_retries": {
279
+ "description": "The Maximum number of retries for queuing the task to the kubernetes scheduler when\nfailing due to Kube API exceeded quota errors before giving up and marking task as failed.\n-1 for unlimited times.\n",
280
+ "version_added": None,
281
+ "type": "integer",
282
+ "example": None,
283
+ "default": "0",
284
+ },
285
+ },
286
+ },
287
+ },
288
+ "executors": ["airflow.providers.cncf.kubernetes.executors.kubernetes_executor.KubernetesExecutor"],
90
289
  }