apache-airflow-providers-google 12.0.0rc2__py3-none-any.whl → 13.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- airflow/providers/google/LICENSE +0 -52
- airflow/providers/google/__init__.py +1 -1
- airflow/providers/google/ads/hooks/ads.py +27 -13
- airflow/providers/google/ads/transfers/ads_to_gcs.py +18 -4
- airflow/providers/google/assets/bigquery.py +17 -0
- airflow/providers/google/cloud/_internal_client/secret_manager_client.py +2 -3
- airflow/providers/google/cloud/hooks/alloy_db.py +736 -8
- airflow/providers/google/cloud/hooks/automl.py +10 -4
- airflow/providers/google/cloud/hooks/bigquery.py +125 -22
- airflow/providers/google/cloud/hooks/bigquery_dts.py +8 -8
- airflow/providers/google/cloud/hooks/bigtable.py +2 -3
- airflow/providers/google/cloud/hooks/cloud_batch.py +3 -4
- airflow/providers/google/cloud/hooks/cloud_build.py +4 -5
- airflow/providers/google/cloud/hooks/cloud_composer.py +3 -4
- airflow/providers/google/cloud/hooks/cloud_memorystore.py +3 -4
- airflow/providers/google/cloud/hooks/cloud_run.py +3 -4
- airflow/providers/google/cloud/hooks/cloud_sql.py +7 -3
- airflow/providers/google/cloud/hooks/cloud_storage_transfer_service.py +119 -7
- airflow/providers/google/cloud/hooks/compute.py +3 -3
- airflow/providers/google/cloud/hooks/datacatalog.py +3 -4
- airflow/providers/google/cloud/hooks/dataflow.py +12 -12
- airflow/providers/google/cloud/hooks/dataform.py +2 -3
- airflow/providers/google/cloud/hooks/datafusion.py +2 -2
- airflow/providers/google/cloud/hooks/dataplex.py +1032 -11
- airflow/providers/google/cloud/hooks/dataproc.py +4 -5
- airflow/providers/google/cloud/hooks/dataproc_metastore.py +3 -4
- airflow/providers/google/cloud/hooks/dlp.py +3 -4
- airflow/providers/google/cloud/hooks/gcs.py +7 -6
- airflow/providers/google/cloud/hooks/kms.py +2 -3
- airflow/providers/google/cloud/hooks/kubernetes_engine.py +8 -8
- airflow/providers/google/cloud/hooks/life_sciences.py +1 -1
- airflow/providers/google/cloud/hooks/managed_kafka.py +482 -0
- airflow/providers/google/cloud/hooks/natural_language.py +2 -3
- airflow/providers/google/cloud/hooks/os_login.py +2 -3
- airflow/providers/google/cloud/hooks/pubsub.py +6 -6
- airflow/providers/google/cloud/hooks/secret_manager.py +2 -3
- airflow/providers/google/cloud/hooks/spanner.py +2 -2
- airflow/providers/google/cloud/hooks/speech_to_text.py +2 -3
- airflow/providers/google/cloud/hooks/stackdriver.py +4 -4
- airflow/providers/google/cloud/hooks/tasks.py +3 -4
- airflow/providers/google/cloud/hooks/text_to_speech.py +2 -3
- airflow/providers/google/cloud/hooks/translate.py +236 -5
- airflow/providers/google/cloud/hooks/vertex_ai/auto_ml.py +9 -4
- airflow/providers/google/cloud/hooks/vertex_ai/batch_prediction_job.py +3 -4
- airflow/providers/google/cloud/hooks/vertex_ai/custom_job.py +4 -5
- airflow/providers/google/cloud/hooks/vertex_ai/dataset.py +3 -4
- airflow/providers/google/cloud/hooks/vertex_ai/endpoint_service.py +2 -3
- airflow/providers/google/cloud/hooks/vertex_ai/feature_store.py +3 -4
- airflow/providers/google/cloud/hooks/vertex_ai/generative_model.py +1 -181
- airflow/providers/google/cloud/hooks/vertex_ai/hyperparameter_tuning_job.py +3 -4
- airflow/providers/google/cloud/hooks/vertex_ai/model_service.py +2 -3
- airflow/providers/google/cloud/hooks/vertex_ai/pipeline_job.py +3 -4
- airflow/providers/google/cloud/hooks/vertex_ai/prediction_service.py +2 -3
- airflow/providers/google/cloud/hooks/video_intelligence.py +2 -3
- airflow/providers/google/cloud/hooks/vision.py +3 -4
- airflow/providers/google/cloud/hooks/workflows.py +2 -3
- airflow/providers/google/cloud/links/alloy_db.py +46 -0
- airflow/providers/google/cloud/links/bigquery.py +25 -0
- airflow/providers/google/cloud/links/dataplex.py +172 -2
- airflow/providers/google/cloud/links/kubernetes_engine.py +1 -2
- airflow/providers/google/cloud/links/managed_kafka.py +104 -0
- airflow/providers/google/cloud/links/translate.py +28 -0
- airflow/providers/google/cloud/log/gcs_task_handler.py +3 -3
- airflow/providers/google/cloud/log/stackdriver_task_handler.py +11 -10
- airflow/providers/google/cloud/openlineage/facets.py +67 -0
- airflow/providers/google/cloud/openlineage/mixins.py +438 -173
- airflow/providers/google/cloud/openlineage/utils.py +394 -61
- airflow/providers/google/cloud/operators/alloy_db.py +980 -69
- airflow/providers/google/cloud/operators/automl.py +83 -245
- airflow/providers/google/cloud/operators/bigquery.py +377 -74
- airflow/providers/google/cloud/operators/bigquery_dts.py +126 -13
- airflow/providers/google/cloud/operators/bigtable.py +1 -3
- airflow/providers/google/cloud/operators/cloud_base.py +1 -2
- airflow/providers/google/cloud/operators/cloud_batch.py +2 -4
- airflow/providers/google/cloud/operators/cloud_build.py +3 -5
- airflow/providers/google/cloud/operators/cloud_composer.py +5 -7
- airflow/providers/google/cloud/operators/cloud_memorystore.py +4 -6
- airflow/providers/google/cloud/operators/cloud_run.py +6 -5
- airflow/providers/google/cloud/operators/cloud_sql.py +20 -8
- airflow/providers/google/cloud/operators/cloud_storage_transfer_service.py +62 -8
- airflow/providers/google/cloud/operators/compute.py +3 -4
- airflow/providers/google/cloud/operators/datacatalog.py +9 -11
- airflow/providers/google/cloud/operators/dataflow.py +1 -112
- airflow/providers/google/cloud/operators/dataform.py +3 -5
- airflow/providers/google/cloud/operators/datafusion.py +1 -1
- airflow/providers/google/cloud/operators/dataplex.py +2046 -7
- airflow/providers/google/cloud/operators/dataproc.py +102 -17
- airflow/providers/google/cloud/operators/dataproc_metastore.py +7 -9
- airflow/providers/google/cloud/operators/dlp.py +17 -19
- airflow/providers/google/cloud/operators/gcs.py +14 -17
- airflow/providers/google/cloud/operators/kubernetes_engine.py +2 -2
- airflow/providers/google/cloud/operators/managed_kafka.py +788 -0
- airflow/providers/google/cloud/operators/natural_language.py +3 -5
- airflow/providers/google/cloud/operators/pubsub.py +39 -7
- airflow/providers/google/cloud/operators/speech_to_text.py +3 -5
- airflow/providers/google/cloud/operators/stackdriver.py +3 -5
- airflow/providers/google/cloud/operators/tasks.py +4 -6
- airflow/providers/google/cloud/operators/text_to_speech.py +2 -4
- airflow/providers/google/cloud/operators/translate.py +414 -5
- airflow/providers/google/cloud/operators/translate_speech.py +2 -4
- airflow/providers/google/cloud/operators/vertex_ai/auto_ml.py +9 -8
- airflow/providers/google/cloud/operators/vertex_ai/batch_prediction_job.py +4 -6
- airflow/providers/google/cloud/operators/vertex_ai/custom_job.py +6 -8
- airflow/providers/google/cloud/operators/vertex_ai/dataset.py +4 -6
- airflow/providers/google/cloud/operators/vertex_ai/endpoint_service.py +4 -6
- airflow/providers/google/cloud/operators/vertex_ai/generative_model.py +0 -322
- airflow/providers/google/cloud/operators/vertex_ai/hyperparameter_tuning_job.py +4 -6
- airflow/providers/google/cloud/operators/vertex_ai/model_service.py +4 -6
- airflow/providers/google/cloud/operators/vertex_ai/pipeline_job.py +4 -6
- airflow/providers/google/cloud/operators/video_intelligence.py +3 -5
- airflow/providers/google/cloud/operators/vision.py +4 -6
- airflow/providers/google/cloud/operators/workflows.py +5 -7
- airflow/providers/google/cloud/secrets/secret_manager.py +1 -2
- airflow/providers/google/cloud/sensors/bigquery_dts.py +3 -5
- airflow/providers/google/cloud/sensors/bigtable.py +2 -3
- airflow/providers/google/cloud/sensors/cloud_composer.py +32 -8
- airflow/providers/google/cloud/sensors/cloud_storage_transfer_service.py +39 -1
- airflow/providers/google/cloud/sensors/dataplex.py +4 -6
- airflow/providers/google/cloud/sensors/dataproc.py +2 -3
- airflow/providers/google/cloud/sensors/dataproc_metastore.py +1 -2
- airflow/providers/google/cloud/sensors/gcs.py +2 -4
- airflow/providers/google/cloud/sensors/pubsub.py +2 -3
- airflow/providers/google/cloud/sensors/workflows.py +3 -5
- airflow/providers/google/cloud/transfers/bigquery_to_gcs.py +5 -5
- airflow/providers/google/cloud/transfers/gcs_to_bigquery.py +10 -12
- airflow/providers/google/cloud/transfers/gcs_to_gcs.py +1 -1
- airflow/providers/google/cloud/transfers/gcs_to_sftp.py +36 -4
- airflow/providers/google/cloud/transfers/mssql_to_gcs.py +27 -2
- airflow/providers/google/cloud/transfers/mysql_to_gcs.py +27 -2
- airflow/providers/google/cloud/transfers/postgres_to_gcs.py +27 -2
- airflow/providers/google/cloud/transfers/sftp_to_gcs.py +34 -5
- airflow/providers/google/cloud/transfers/sql_to_gcs.py +15 -0
- airflow/providers/google/cloud/transfers/trino_to_gcs.py +25 -2
- airflow/providers/google/cloud/triggers/bigquery_dts.py +1 -2
- airflow/providers/google/cloud/triggers/cloud_batch.py +1 -2
- airflow/providers/google/cloud/triggers/cloud_build.py +1 -2
- airflow/providers/google/cloud/triggers/cloud_composer.py +13 -3
- airflow/providers/google/cloud/triggers/cloud_storage_transfer_service.py +102 -4
- airflow/providers/google/cloud/triggers/dataflow.py +2 -3
- airflow/providers/google/cloud/triggers/dataplex.py +1 -2
- airflow/providers/google/cloud/triggers/dataproc.py +2 -3
- airflow/providers/google/cloud/triggers/kubernetes_engine.py +1 -1
- airflow/providers/google/cloud/triggers/pubsub.py +1 -2
- airflow/providers/google/cloud/triggers/vertex_ai.py +7 -8
- airflow/providers/google/cloud/utils/credentials_provider.py +15 -8
- airflow/providers/google/cloud/utils/external_token_supplier.py +1 -0
- airflow/providers/google/common/auth_backend/google_openid.py +4 -4
- airflow/providers/google/common/consts.py +1 -2
- airflow/providers/google/common/hooks/base_google.py +8 -7
- airflow/providers/google/get_provider_info.py +186 -134
- airflow/providers/google/marketing_platform/hooks/analytics_admin.py +2 -3
- airflow/providers/google/marketing_platform/hooks/search_ads.py +1 -1
- airflow/providers/google/marketing_platform/operators/analytics_admin.py +5 -7
- {apache_airflow_providers_google-12.0.0rc2.dist-info → apache_airflow_providers_google-13.0.0.dist-info}/METADATA +41 -58
- {apache_airflow_providers_google-12.0.0rc2.dist-info → apache_airflow_providers_google-13.0.0.dist-info}/RECORD +157 -159
- airflow/providers/google/cloud/example_dags/example_facebook_ads_to_gcs.py +0 -141
- airflow/providers/google/cloud/example_dags/example_looker.py +0 -64
- airflow/providers/google/cloud/example_dags/example_presto_to_gcs.py +0 -194
- airflow/providers/google/cloud/example_dags/example_salesforce_to_gcs.py +0 -129
- airflow/providers/google/marketing_platform/example_dags/__init__.py +0 -16
- airflow/providers/google/marketing_platform/example_dags/example_display_video.py +0 -213
- {apache_airflow_providers_google-12.0.0rc2.dist-info → apache_airflow_providers_google-13.0.0.dist-info}/WHEEL +0 -0
- {apache_airflow_providers_google-12.0.0rc2.dist-info → apache_airflow_providers_google-13.0.0.dist-info}/entry_points.txt +0 -0
@@ -21,25 +21,28 @@ from __future__ import annotations
|
|
21
21
|
|
22
22
|
from collections.abc import Sequence
|
23
23
|
from functools import cached_property
|
24
|
-
from typing import TYPE_CHECKING
|
25
|
-
|
26
|
-
from google.api_core.exceptions import AlreadyExists, InvalidArgument
|
27
|
-
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
|
28
|
-
from google.cloud import alloydb_v1
|
24
|
+
from typing import TYPE_CHECKING
|
29
25
|
|
30
26
|
from airflow.exceptions import AirflowException
|
31
27
|
from airflow.providers.google.cloud.hooks.alloy_db import AlloyDbHook
|
32
|
-
from airflow.providers.google.cloud.links.alloy_db import
|
28
|
+
from airflow.providers.google.cloud.links.alloy_db import (
|
29
|
+
AlloyDBBackupsLink,
|
30
|
+
AlloyDBClusterLink,
|
31
|
+
AlloyDBUsersLink,
|
32
|
+
)
|
33
33
|
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
|
34
|
+
from google.api_core.exceptions import NotFound
|
35
|
+
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
|
36
|
+
from google.cloud import alloydb_v1
|
34
37
|
|
35
38
|
if TYPE_CHECKING:
|
36
39
|
import proto
|
40
|
+
|
41
|
+
from airflow.utils.context import Context
|
37
42
|
from google.api_core.operation import Operation
|
38
43
|
from google.api_core.retry import Retry
|
39
44
|
from google.protobuf.field_mask_pb2 import FieldMask
|
40
45
|
|
41
|
-
from airflow.utils.context import Context
|
42
|
-
|
43
46
|
|
44
47
|
class AlloyDBBaseOperator(GoogleCloudBaseOperator):
|
45
48
|
"""
|
@@ -139,10 +142,10 @@ class AlloyDBWriteBaseOperator(AlloyDBBaseOperator):
|
|
139
142
|
If the `validate_request` parameter is set, then no operation is performed and thus nothing to wait.
|
140
143
|
"""
|
141
144
|
if self.validate_request:
|
142
|
-
|
145
|
+
# Validation requests are only validated and aren't executed, thus no operation result is expected
|
146
|
+
return None
|
143
147
|
else:
|
144
148
|
return self.hook.wait_for_operation(timeout=self.timeout, operation=operation)
|
145
|
-
return None
|
146
149
|
|
147
150
|
|
148
151
|
class AlloyDBCreateClusterOperator(AlloyDBWriteBaseOperator):
|
@@ -189,7 +192,8 @@ class AlloyDBCreateClusterOperator(AlloyDBWriteBaseOperator):
|
|
189
192
|
"""
|
190
193
|
|
191
194
|
template_fields: Sequence[str] = tuple(
|
192
|
-
{"cluster_id", "is_secondary"}
|
195
|
+
{"cluster_id", "cluster_configuration", "is_secondary"}
|
196
|
+
| set(AlloyDBWriteBaseOperator.template_fields)
|
193
197
|
)
|
194
198
|
operator_extra_links = (AlloyDBClusterLink(),)
|
195
199
|
|
@@ -206,13 +210,40 @@ class AlloyDBCreateClusterOperator(AlloyDBWriteBaseOperator):
|
|
206
210
|
self.cluster_configuration = cluster_configuration
|
207
211
|
self.is_secondary = is_secondary
|
208
212
|
|
209
|
-
def
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
213
|
+
def _get_cluster(self) -> proto.Message | None:
|
214
|
+
self.log.info("Checking if the cluster %s exists already...", self.cluster_id)
|
215
|
+
try:
|
216
|
+
cluster = self.hook.get_cluster(
|
217
|
+
cluster_id=self.cluster_id,
|
218
|
+
location=self.location,
|
219
|
+
project_id=self.project_id,
|
220
|
+
)
|
221
|
+
except NotFound:
|
222
|
+
self.log.info("The cluster %s does not exist yet.", self.cluster_id)
|
223
|
+
except Exception as ex:
|
224
|
+
raise AirflowException(ex) from ex
|
225
|
+
else:
|
226
|
+
self.log.info("AlloyDB cluster %s already exists.", self.cluster_id)
|
227
|
+
result = alloydb_v1.Cluster.to_dict(cluster)
|
228
|
+
return result
|
229
|
+
return None
|
230
|
+
|
231
|
+
def execute(self, context: Context) -> dict | None:
|
232
|
+
AlloyDBClusterLink.persist(
|
233
|
+
context=context,
|
234
|
+
task_instance=self,
|
235
|
+
location_id=self.location,
|
236
|
+
cluster_id=self.cluster_id,
|
237
|
+
project_id=self.project_id,
|
214
238
|
)
|
215
|
-
|
239
|
+
|
240
|
+
if cluster := self._get_cluster():
|
241
|
+
return cluster
|
242
|
+
|
243
|
+
if self.validate_request:
|
244
|
+
self.log.info("Validating a Create AlloyDB cluster request.")
|
245
|
+
else:
|
246
|
+
self.log.info("Creating an AlloyDB cluster.")
|
216
247
|
|
217
248
|
try:
|
218
249
|
create_method = (
|
@@ -229,40 +260,12 @@ class AlloyDBCreateClusterOperator(AlloyDBWriteBaseOperator):
|
|
229
260
|
timeout=self.timeout,
|
230
261
|
metadata=self.metadata,
|
231
262
|
)
|
232
|
-
except AlreadyExists:
|
233
|
-
self.log.info("AlloyDB cluster %s already exists.", self.cluster_id)
|
234
|
-
result = self.hook.get_cluster(
|
235
|
-
cluster_id=self.cluster_id,
|
236
|
-
location=self.location,
|
237
|
-
project_id=self.project_id,
|
238
|
-
)
|
239
|
-
result = alloydb_v1.Cluster.to_dict(result)
|
240
|
-
except InvalidArgument as ex:
|
241
|
-
if "cannot create more than one secondary cluster per primary cluster" in ex.message:
|
242
|
-
result = self.hook.get_cluster(
|
243
|
-
cluster_id=self.cluster_id,
|
244
|
-
location=self.location,
|
245
|
-
project_id=self.project_id,
|
246
|
-
)
|
247
|
-
result = alloydb_v1.Cluster.to_dict(result)
|
248
|
-
self.log.info("AlloyDB cluster %s already exists.", result.get("name").split("/")[-1])
|
249
|
-
else:
|
250
|
-
raise AirflowException(ex.message)
|
251
263
|
except Exception as ex:
|
252
264
|
raise AirflowException(ex)
|
253
265
|
else:
|
254
266
|
operation_result = self.get_operation_result(operation)
|
255
267
|
result = alloydb_v1.Cluster.to_dict(operation_result) if operation_result else None
|
256
268
|
|
257
|
-
if result:
|
258
|
-
AlloyDBClusterLink.persist(
|
259
|
-
context=context,
|
260
|
-
task_instance=self,
|
261
|
-
location_id=self.location,
|
262
|
-
cluster_id=self.cluster_id,
|
263
|
-
project_id=self.project_id,
|
264
|
-
)
|
265
|
-
|
266
269
|
return result
|
267
270
|
|
268
271
|
|
@@ -274,7 +277,7 @@ class AlloyDBUpdateClusterOperator(AlloyDBWriteBaseOperator):
|
|
274
277
|
For more information on how to use this operator, take a look at the guide:
|
275
278
|
:ref:`howto/operator:AlloyDBUpdateClusterOperator`
|
276
279
|
|
277
|
-
:param cluster_id: Required. ID of the cluster to
|
280
|
+
:param cluster_id: Required. ID of the cluster to update.
|
278
281
|
:param cluster_configuration: Required. Cluster to update. For more details please see API documentation:
|
279
282
|
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.Cluster
|
280
283
|
:param update_mask: Optional. Field mask is used to specify the fields to be overwritten in the
|
@@ -311,7 +314,8 @@ class AlloyDBUpdateClusterOperator(AlloyDBWriteBaseOperator):
|
|
311
314
|
"""
|
312
315
|
|
313
316
|
template_fields: Sequence[str] = tuple(
|
314
|
-
{"cluster_id", "allow_missing"}
|
317
|
+
{"cluster_id", "cluster_configuration", "allow_missing"}
|
318
|
+
| set(AlloyDBWriteBaseOperator.template_fields)
|
315
319
|
)
|
316
320
|
operator_extra_links = (AlloyDBClusterLink(),)
|
317
321
|
|
@@ -330,13 +334,18 @@ class AlloyDBUpdateClusterOperator(AlloyDBWriteBaseOperator):
|
|
330
334
|
self.update_mask = update_mask
|
331
335
|
self.allow_missing = allow_missing
|
332
336
|
|
333
|
-
def execute(self, context: Context) ->
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
337
|
+
def execute(self, context: Context) -> dict | None:
|
338
|
+
AlloyDBClusterLink.persist(
|
339
|
+
context=context,
|
340
|
+
task_instance=self,
|
341
|
+
location_id=self.location,
|
342
|
+
cluster_id=self.cluster_id,
|
343
|
+
project_id=self.project_id,
|
338
344
|
)
|
339
|
-
self.
|
345
|
+
if self.validate_request:
|
346
|
+
self.log.info("Validating an Update AlloyDB cluster request.")
|
347
|
+
else:
|
348
|
+
self.log.info("Updating an AlloyDB cluster.")
|
340
349
|
|
341
350
|
try:
|
342
351
|
operation = self.hook.update_cluster(
|
@@ -358,14 +367,6 @@ class AlloyDBUpdateClusterOperator(AlloyDBWriteBaseOperator):
|
|
358
367
|
operation_result = self.get_operation_result(operation)
|
359
368
|
result = alloydb_v1.Cluster.to_dict(operation_result) if operation_result else None
|
360
369
|
|
361
|
-
AlloyDBClusterLink.persist(
|
362
|
-
context=context,
|
363
|
-
task_instance=self,
|
364
|
-
location_id=self.location,
|
365
|
-
cluster_id=self.cluster_id,
|
366
|
-
project_id=self.project_id,
|
367
|
-
)
|
368
|
-
|
369
370
|
if not self.validate_request:
|
370
371
|
self.log.info("AlloyDB cluster %s was successfully updated.", self.cluster_id)
|
371
372
|
return result
|
@@ -379,7 +380,7 @@ class AlloyDBDeleteClusterOperator(AlloyDBWriteBaseOperator):
|
|
379
380
|
For more information on how to use this operator, take a look at the guide:
|
380
381
|
:ref:`howto/operator:AlloyDBDeleteClusterOperator`
|
381
382
|
|
382
|
-
:param cluster_id: Required. ID of the cluster to
|
383
|
+
:param cluster_id: Required. ID of the cluster to delete.
|
383
384
|
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
|
384
385
|
so that if you must retry your request, the server ignores the request if it has already been
|
385
386
|
completed. The server guarantees that for at least 60 minutes since the first request.
|
@@ -429,13 +430,11 @@ class AlloyDBDeleteClusterOperator(AlloyDBWriteBaseOperator):
|
|
429
430
|
self.etag = etag
|
430
431
|
self.force = force
|
431
432
|
|
432
|
-
def execute(self, context: Context) ->
|
433
|
-
|
434
|
-
"Validating a Delete AlloyDB cluster request."
|
435
|
-
|
436
|
-
|
437
|
-
)
|
438
|
-
self.log.info(message)
|
433
|
+
def execute(self, context: Context) -> None:
|
434
|
+
if self.validate_request:
|
435
|
+
self.log.info("Validating a Delete AlloyDB cluster request.")
|
436
|
+
else:
|
437
|
+
self.log.info("Deleting an AlloyDB cluster.")
|
439
438
|
|
440
439
|
try:
|
441
440
|
operation = self.hook.delete_cluster(
|
@@ -457,3 +456,915 @@ class AlloyDBDeleteClusterOperator(AlloyDBWriteBaseOperator):
|
|
457
456
|
|
458
457
|
if not self.validate_request:
|
459
458
|
self.log.info("AlloyDB cluster %s was successfully removed.", self.cluster_id)
|
459
|
+
|
460
|
+
|
461
|
+
class AlloyDBCreateInstanceOperator(AlloyDBWriteBaseOperator):
|
462
|
+
"""
|
463
|
+
Create an Instance in an Alloy DB cluster.
|
464
|
+
|
465
|
+
.. seealso::
|
466
|
+
For more information on how to use this operator, take a look at the guide:
|
467
|
+
:ref:`howto/operator:AlloyDBCreateInstanceOperator`
|
468
|
+
|
469
|
+
:param cluster_id: Required. ID of the cluster for creating an instance in.
|
470
|
+
:param instance_id: Required. ID of the instance to create.
|
471
|
+
:param instance_configuration: Required. Instance to create. For more details please see API documentation:
|
472
|
+
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.Instance
|
473
|
+
:param is_secondary: Required. Specifies if the Instance to be created is Primary or Secondary.
|
474
|
+
Please note, if set True, then specify the `instance_type` field in the instance.
|
475
|
+
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
|
476
|
+
so that if you must retry your request, the server ignores the request if it has already been
|
477
|
+
completed. The server guarantees that for at least 60 minutes since the first request.
|
478
|
+
For example, consider a situation where you make an initial request and the request times out.
|
479
|
+
If you make the request again with the same request ID, the server can check if the original operation
|
480
|
+
with the same request ID was received, and if so, ignores the second request.
|
481
|
+
This prevents clients from accidentally creating duplicate commitments.
|
482
|
+
The request ID must be a valid UUID with the exception that zero UUID is not supported
|
483
|
+
(00000000-0000-0000-0000-000000000000).
|
484
|
+
:param validate_request: Optional. If set, performs request validation, but does not actually
|
485
|
+
execute the request.
|
486
|
+
:param project_id: Required. The ID of the Google Cloud project where the service is used.
|
487
|
+
:param location: Required. The ID of the Google Cloud region where the service is used.
|
488
|
+
:param gcp_conn_id: Optional. The connection ID to use to connect to Google Cloud.
|
489
|
+
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests will not
|
490
|
+
be retried.
|
491
|
+
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
|
492
|
+
Note that if `retry` is specified, the timeout applies to each individual attempt.
|
493
|
+
:param metadata: Optional. Additional metadata that is provided to the method.
|
494
|
+
:param impersonation_chain: Optional service account to impersonate using short-term
|
495
|
+
credentials, or chained list of accounts required to get the access_token
|
496
|
+
of the last account in the list, which will be impersonated in the request.
|
497
|
+
If set as a string, the account must grant the originating account
|
498
|
+
the Service Account Token Creator IAM role.
|
499
|
+
If set as a sequence, the identities from the list must grant
|
500
|
+
Service Account Token Creator IAM role to the directly preceding identity, with first
|
501
|
+
account from the list granting this role to the originating account (templated).
|
502
|
+
"""
|
503
|
+
|
504
|
+
template_fields: Sequence[str] = tuple(
|
505
|
+
{"cluster_id", "instance_id", "is_secondary", "instance_configuration"}
|
506
|
+
| set(AlloyDBWriteBaseOperator.template_fields)
|
507
|
+
)
|
508
|
+
operator_extra_links = (AlloyDBClusterLink(),)
|
509
|
+
|
510
|
+
def __init__(
|
511
|
+
self,
|
512
|
+
cluster_id: str,
|
513
|
+
instance_id: str,
|
514
|
+
instance_configuration: alloydb_v1.Instance | dict,
|
515
|
+
is_secondary: bool = False,
|
516
|
+
*args,
|
517
|
+
**kwargs,
|
518
|
+
):
|
519
|
+
super().__init__(*args, **kwargs)
|
520
|
+
self.cluster_id = cluster_id
|
521
|
+
self.instance_id = instance_id
|
522
|
+
self.instance_configuration = instance_configuration
|
523
|
+
self.is_secondary = is_secondary
|
524
|
+
|
525
|
+
def _get_instance(self) -> proto.Message | None:
|
526
|
+
self.log.info("Checking if the instance %s exists already...", self.instance_id)
|
527
|
+
try:
|
528
|
+
instance = self.hook.get_instance(
|
529
|
+
cluster_id=self.cluster_id,
|
530
|
+
instance_id=self.instance_id,
|
531
|
+
location=self.location,
|
532
|
+
project_id=self.project_id,
|
533
|
+
)
|
534
|
+
except NotFound:
|
535
|
+
self.log.info("The instance %s does not exist yet.", self.instance_id)
|
536
|
+
except Exception as ex:
|
537
|
+
raise AirflowException(ex) from ex
|
538
|
+
else:
|
539
|
+
self.log.info(
|
540
|
+
"AlloyDB instance %s already exists in the cluster %s.",
|
541
|
+
self.cluster_id,
|
542
|
+
self.instance_id,
|
543
|
+
)
|
544
|
+
result = alloydb_v1.Instance.to_dict(instance)
|
545
|
+
return result
|
546
|
+
return None
|
547
|
+
|
548
|
+
def execute(self, context: Context) -> dict | None:
|
549
|
+
AlloyDBClusterLink.persist(
|
550
|
+
context=context,
|
551
|
+
task_instance=self,
|
552
|
+
location_id=self.location,
|
553
|
+
cluster_id=self.cluster_id,
|
554
|
+
project_id=self.project_id,
|
555
|
+
)
|
556
|
+
if instance := self._get_instance():
|
557
|
+
return instance
|
558
|
+
|
559
|
+
if self.validate_request:
|
560
|
+
self.log.info("Validating a Create AlloyDB instance request.")
|
561
|
+
else:
|
562
|
+
self.log.info("Creating an AlloyDB instance.")
|
563
|
+
|
564
|
+
try:
|
565
|
+
create_method = (
|
566
|
+
self.hook.create_secondary_instance if self.is_secondary else self.hook.create_instance
|
567
|
+
)
|
568
|
+
operation = create_method(
|
569
|
+
cluster_id=self.cluster_id,
|
570
|
+
instance_id=self.instance_id,
|
571
|
+
instance=self.instance_configuration,
|
572
|
+
location=self.location,
|
573
|
+
project_id=self.project_id,
|
574
|
+
request_id=self.request_id,
|
575
|
+
validate_only=self.validate_request,
|
576
|
+
retry=self.retry,
|
577
|
+
timeout=self.timeout,
|
578
|
+
metadata=self.metadata,
|
579
|
+
)
|
580
|
+
except Exception as ex:
|
581
|
+
raise AirflowException(ex)
|
582
|
+
else:
|
583
|
+
operation_result = self.get_operation_result(operation)
|
584
|
+
result = alloydb_v1.Instance.to_dict(operation_result) if operation_result else None
|
585
|
+
|
586
|
+
return result
|
587
|
+
|
588
|
+
|
589
|
+
class AlloyDBUpdateInstanceOperator(AlloyDBWriteBaseOperator):
|
590
|
+
"""
|
591
|
+
Update an Alloy DB instance.
|
592
|
+
|
593
|
+
.. seealso::
|
594
|
+
For more information on how to use this operator, take a look at the guide:
|
595
|
+
:ref:`howto/operator:AlloyDBUpdateInstanceOperator`
|
596
|
+
|
597
|
+
:param cluster_id: Required. ID of the cluster.
|
598
|
+
:param instance_id: Required. ID of the instance to update.
|
599
|
+
:param instance_configuration: Required. Instance to update. For more details please see API documentation:
|
600
|
+
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.Instance
|
601
|
+
:param update_mask: Optional. Field mask is used to specify the fields to be overwritten in the
|
602
|
+
Instance resource by the update.
|
603
|
+
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
|
604
|
+
so that if you must retry your request, the server ignores the request if it has already been
|
605
|
+
completed. The server guarantees that for at least 60 minutes since the first request.
|
606
|
+
For example, consider a situation where you make an initial request and the request times out.
|
607
|
+
If you make the request again with the same request ID, the server can check if the original operation
|
608
|
+
with the same request ID was received, and if so, ignores the second request.
|
609
|
+
This prevents clients from accidentally creating duplicate commitments.
|
610
|
+
The request ID must be a valid UUID with the exception that zero UUID is not supported
|
611
|
+
(00000000-0000-0000-0000-000000000000).
|
612
|
+
:param validate_request: Optional. If set, performs request validation, but does not actually
|
613
|
+
execute the request.
|
614
|
+
:param allow_missing: Optional. If set to true, update succeeds even if instance is not found.
|
615
|
+
In that case, a new instance is created and update_mask is ignored.
|
616
|
+
:param project_id: Required. The ID of the Google Cloud project where the service is used.
|
617
|
+
:param location: Required. The ID of the Google Cloud region where the service is used.
|
618
|
+
:param gcp_conn_id: Optional. The connection ID to use to connect to Google Cloud.
|
619
|
+
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests will not
|
620
|
+
be retried.
|
621
|
+
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
|
622
|
+
Note that if `retry` is specified, the timeout applies to each individual attempt.
|
623
|
+
:param metadata: Optional. Additional metadata that is provided to the method.
|
624
|
+
:param impersonation_chain: Optional service account to impersonate using short-term
|
625
|
+
credentials, or chained list of accounts required to get the access_token
|
626
|
+
of the last account in the list, which will be impersonated in the request.
|
627
|
+
If set as a string, the account must grant the originating account
|
628
|
+
the Service Account Token Creator IAM role.
|
629
|
+
If set as a sequence, the identities from the list must grant
|
630
|
+
Service Account Token Creator IAM role to the directly preceding identity, with first
|
631
|
+
account from the list granting this role to the originating account (templated).
|
632
|
+
"""
|
633
|
+
|
634
|
+
template_fields: Sequence[str] = tuple(
|
635
|
+
{"cluster_id", "instance_id", "instance_configuration", "update_mask", "allow_missing"}
|
636
|
+
| set(AlloyDBWriteBaseOperator.template_fields)
|
637
|
+
)
|
638
|
+
operator_extra_links = (AlloyDBClusterLink(),)
|
639
|
+
|
640
|
+
def __init__(
|
641
|
+
self,
|
642
|
+
cluster_id: str,
|
643
|
+
instance_id: str,
|
644
|
+
instance_configuration: alloydb_v1.Instance | dict,
|
645
|
+
update_mask: FieldMask | dict | None = None,
|
646
|
+
allow_missing: bool = False,
|
647
|
+
*args,
|
648
|
+
**kwargs,
|
649
|
+
):
|
650
|
+
super().__init__(*args, **kwargs)
|
651
|
+
self.cluster_id = cluster_id
|
652
|
+
self.instance_id = instance_id
|
653
|
+
self.instance_configuration = instance_configuration
|
654
|
+
self.update_mask = update_mask
|
655
|
+
self.allow_missing = allow_missing
|
656
|
+
|
657
|
+
def execute(self, context: Context) -> dict | None:
|
658
|
+
AlloyDBClusterLink.persist(
|
659
|
+
context=context,
|
660
|
+
task_instance=self,
|
661
|
+
location_id=self.location,
|
662
|
+
cluster_id=self.cluster_id,
|
663
|
+
project_id=self.project_id,
|
664
|
+
)
|
665
|
+
if self.validate_request:
|
666
|
+
self.log.info("Validating an Update AlloyDB instance request.")
|
667
|
+
else:
|
668
|
+
self.log.info("Updating an AlloyDB instance.")
|
669
|
+
|
670
|
+
try:
|
671
|
+
operation = self.hook.update_instance(
|
672
|
+
cluster_id=self.cluster_id,
|
673
|
+
instance_id=self.instance_id,
|
674
|
+
project_id=self.project_id,
|
675
|
+
location=self.location,
|
676
|
+
instance=self.instance_configuration,
|
677
|
+
update_mask=self.update_mask,
|
678
|
+
allow_missing=self.allow_missing,
|
679
|
+
request_id=self.request_id,
|
680
|
+
validate_only=self.validate_request,
|
681
|
+
retry=self.retry,
|
682
|
+
timeout=self.timeout,
|
683
|
+
metadata=self.metadata,
|
684
|
+
)
|
685
|
+
except Exception as ex:
|
686
|
+
raise AirflowException(ex) from ex
|
687
|
+
else:
|
688
|
+
operation_result = self.get_operation_result(operation)
|
689
|
+
result = alloydb_v1.Instance.to_dict(operation_result) if operation_result else None
|
690
|
+
|
691
|
+
if not self.validate_request:
|
692
|
+
self.log.info("AlloyDB instance %s was successfully updated.", self.cluster_id)
|
693
|
+
return result
|
694
|
+
|
695
|
+
|
696
|
+
class AlloyDBDeleteInstanceOperator(AlloyDBWriteBaseOperator):
|
697
|
+
"""
|
698
|
+
Delete an Alloy DB instance.
|
699
|
+
|
700
|
+
.. seealso::
|
701
|
+
For more information on how to use this operator, take a look at the guide:
|
702
|
+
:ref:`howto/operator:AlloyDBDeleteInstanceOperator`
|
703
|
+
|
704
|
+
:param instance_id: Required. ID of the instance to delete.
|
705
|
+
:param cluster_id: Required. ID of the cluster.
|
706
|
+
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
|
707
|
+
so that if you must retry your request, the server ignores the request if it has already been
|
708
|
+
completed. The server guarantees that for at least 60 minutes since the first request.
|
709
|
+
For example, consider a situation where you make an initial request and the request times out.
|
710
|
+
If you make the request again with the same request ID, the server can check if the original operation
|
711
|
+
with the same request ID was received, and if so, ignores the second request.
|
712
|
+
This prevents clients from accidentally creating duplicate commitments.
|
713
|
+
The request ID must be a valid UUID with the exception that zero UUID is not supported
|
714
|
+
(00000000-0000-0000-0000-000000000000).
|
715
|
+
:param validate_request: Optional. If set, performs request validation, but does not actually
|
716
|
+
execute the request.
|
717
|
+
:param etag: Optional. The current etag of the Instance. If an etag is provided and does not match the
|
718
|
+
current etag of the Instance, deletion will be blocked and an ABORTED error will be returned.
|
719
|
+
:param project_id: Required. The ID of the Google Cloud project where the service is used.
|
720
|
+
:param location: Required. The ID of the Google Cloud region where the service is used.
|
721
|
+
:param gcp_conn_id: Optional. The connection ID to use to connect to Google Cloud.
|
722
|
+
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests will not
|
723
|
+
be retried.
|
724
|
+
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
|
725
|
+
Note that if `retry` is specified, the timeout applies to each individual attempt.
|
726
|
+
:param metadata: Optional. Additional metadata that is provided to the method.
|
727
|
+
:param impersonation_chain: Optional service account to impersonate using short-term
|
728
|
+
credentials, or chained list of accounts required to get the access_token
|
729
|
+
of the last account in the list, which will be impersonated in the request.
|
730
|
+
If set as a string, the account must grant the originating account
|
731
|
+
the Service Account Token Creator IAM role.
|
732
|
+
If set as a sequence, the identities from the list must grant
|
733
|
+
Service Account Token Creator IAM role to the directly preceding identity, with first
|
734
|
+
account from the list granting this role to the originating account (templated).
|
735
|
+
"""
|
736
|
+
|
737
|
+
template_fields: Sequence[str] = tuple(
|
738
|
+
{"instance_id", "cluster_id", "etag"} | set(AlloyDBWriteBaseOperator.template_fields)
|
739
|
+
)
|
740
|
+
|
741
|
+
def __init__(
|
742
|
+
self,
|
743
|
+
instance_id: str,
|
744
|
+
cluster_id: str,
|
745
|
+
etag: str | None = None,
|
746
|
+
*args,
|
747
|
+
**kwargs,
|
748
|
+
):
|
749
|
+
super().__init__(*args, **kwargs)
|
750
|
+
self.instance_id = instance_id
|
751
|
+
self.cluster_id = cluster_id
|
752
|
+
self.etag = etag
|
753
|
+
|
754
|
+
def execute(self, context: Context) -> None:
|
755
|
+
if self.validate_request:
|
756
|
+
self.log.info("Validating a Delete AlloyDB instance request.")
|
757
|
+
else:
|
758
|
+
self.log.info("Deleting an AlloyDB instance.")
|
759
|
+
|
760
|
+
try:
|
761
|
+
operation = self.hook.delete_instance(
|
762
|
+
instance_id=self.instance_id,
|
763
|
+
cluster_id=self.cluster_id,
|
764
|
+
project_id=self.project_id,
|
765
|
+
location=self.location,
|
766
|
+
etag=self.etag,
|
767
|
+
request_id=self.request_id,
|
768
|
+
validate_only=self.validate_request,
|
769
|
+
retry=self.retry,
|
770
|
+
timeout=self.timeout,
|
771
|
+
metadata=self.metadata,
|
772
|
+
)
|
773
|
+
except Exception as ex:
|
774
|
+
raise AirflowException(ex) from ex
|
775
|
+
else:
|
776
|
+
self.get_operation_result(operation)
|
777
|
+
|
778
|
+
if not self.validate_request:
|
779
|
+
self.log.info("AlloyDB instance %s was successfully removed.", self.instance_id)
|
780
|
+
|
781
|
+
|
782
|
+
class AlloyDBCreateUserOperator(AlloyDBWriteBaseOperator):
|
783
|
+
"""
|
784
|
+
Create a User in an Alloy DB cluster.
|
785
|
+
|
786
|
+
.. seealso::
|
787
|
+
For more information on how to use this operator, take a look at the guide:
|
788
|
+
:ref:`howto/operator:AlloyDBCreateUserOperator`
|
789
|
+
|
790
|
+
:param user_id: Required. ID of the user to create.
|
791
|
+
:param user_configuration: Required. The user to create. For more details please see API documentation:
|
792
|
+
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.User
|
793
|
+
:param cluster_id: Required. ID of the cluster for creating a user in.
|
794
|
+
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
|
795
|
+
so that if you must retry your request, the server ignores the request if it has already been
|
796
|
+
completed. The server guarantees that for at least 60 minutes since the first request.
|
797
|
+
For example, consider a situation where you make an initial request and the request times out.
|
798
|
+
If you make the request again with the same request ID, the server can check if the original operation
|
799
|
+
with the same request ID was received, and if so, ignores the second request.
|
800
|
+
This prevents clients from accidentally creating duplicate commitments.
|
801
|
+
The request ID must be a valid UUID with the exception that zero UUID is not supported
|
802
|
+
(00000000-0000-0000-0000-000000000000).
|
803
|
+
:param validate_request: Optional. If set, performs request validation, but does not actually
|
804
|
+
execute the request.
|
805
|
+
:param project_id: Required. The ID of the Google Cloud project where the service is used.
|
806
|
+
:param location: Required. The ID of the Google Cloud region where the service is used.
|
807
|
+
:param gcp_conn_id: Optional. The connection ID to use to connect to Google Cloud.
|
808
|
+
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests will not
|
809
|
+
be retried.
|
810
|
+
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
|
811
|
+
Note that if `retry` is specified, the timeout applies to each individual attempt.
|
812
|
+
:param metadata: Optional. Additional metadata that is provided to the method.
|
813
|
+
:param impersonation_chain: Optional service account to impersonate using short-term
|
814
|
+
credentials, or chained list of accounts required to get the access_token
|
815
|
+
of the last account in the list, which will be impersonated in the request.
|
816
|
+
If set as a string, the account must grant the originating account
|
817
|
+
the Service Account Token Creator IAM role.
|
818
|
+
If set as a sequence, the identities from the list must grant
|
819
|
+
Service Account Token Creator IAM role to the directly preceding identity, with first
|
820
|
+
account from the list granting this role to the originating account (templated).
|
821
|
+
"""
|
822
|
+
|
823
|
+
template_fields: Sequence[str] = tuple(
|
824
|
+
{"user_id", "user_configuration", "cluster_id"} | set(AlloyDBWriteBaseOperator.template_fields)
|
825
|
+
)
|
826
|
+
operator_extra_links = (AlloyDBUsersLink(),)
|
827
|
+
|
828
|
+
def __init__(
|
829
|
+
self,
|
830
|
+
user_id: str,
|
831
|
+
user_configuration: alloydb_v1.User | dict,
|
832
|
+
cluster_id: str,
|
833
|
+
*args,
|
834
|
+
**kwargs,
|
835
|
+
):
|
836
|
+
super().__init__(*args, **kwargs)
|
837
|
+
self.user_id = user_id
|
838
|
+
self.user_configuration = user_configuration
|
839
|
+
self.cluster_id = cluster_id
|
840
|
+
|
841
|
+
def _get_user(self) -> proto.Message | None:
|
842
|
+
self.log.info("Checking if the user %s exists already...", self.user_id)
|
843
|
+
try:
|
844
|
+
user = self.hook.get_user(
|
845
|
+
user_id=self.user_id,
|
846
|
+
cluster_id=self.cluster_id,
|
847
|
+
location=self.location,
|
848
|
+
project_id=self.project_id,
|
849
|
+
)
|
850
|
+
except NotFound:
|
851
|
+
self.log.info("The user %s does not exist yet.", self.user_id)
|
852
|
+
except Exception as ex:
|
853
|
+
raise AirflowException(ex) from ex
|
854
|
+
else:
|
855
|
+
self.log.info(
|
856
|
+
"AlloyDB user %s already exists in the cluster %s.",
|
857
|
+
self.user_id,
|
858
|
+
self.cluster_id,
|
859
|
+
)
|
860
|
+
result = alloydb_v1.User.to_dict(user)
|
861
|
+
return result
|
862
|
+
return None
|
863
|
+
|
864
|
+
def execute(self, context: Context) -> dict | None:
|
865
|
+
AlloyDBUsersLink.persist(
|
866
|
+
context=context,
|
867
|
+
task_instance=self,
|
868
|
+
location_id=self.location,
|
869
|
+
cluster_id=self.cluster_id,
|
870
|
+
project_id=self.project_id,
|
871
|
+
)
|
872
|
+
if (_user := self._get_user()) is not None:
|
873
|
+
return _user
|
874
|
+
|
875
|
+
if self.validate_request:
|
876
|
+
self.log.info("Validating a Create AlloyDB user request.")
|
877
|
+
else:
|
878
|
+
self.log.info("Creating an AlloyDB user.")
|
879
|
+
|
880
|
+
try:
|
881
|
+
user = self.hook.create_user(
|
882
|
+
user_id=self.user_id,
|
883
|
+
cluster_id=self.cluster_id,
|
884
|
+
user=self.user_configuration,
|
885
|
+
location=self.location,
|
886
|
+
project_id=self.project_id,
|
887
|
+
request_id=self.request_id,
|
888
|
+
validate_only=self.validate_request,
|
889
|
+
retry=self.retry,
|
890
|
+
timeout=self.timeout,
|
891
|
+
metadata=self.metadata,
|
892
|
+
)
|
893
|
+
except Exception as ex:
|
894
|
+
raise AirflowException(ex)
|
895
|
+
else:
|
896
|
+
result = alloydb_v1.User.to_dict(user) if not self.validate_request else None
|
897
|
+
|
898
|
+
if not self.validate_request:
|
899
|
+
self.log.info("AlloyDB user %s was successfully created.", self.user_id)
|
900
|
+
return result
|
901
|
+
|
902
|
+
|
903
|
+
class AlloyDBUpdateUserOperator(AlloyDBWriteBaseOperator):
|
904
|
+
"""
|
905
|
+
Update an Alloy DB user.
|
906
|
+
|
907
|
+
.. seealso::
|
908
|
+
For more information on how to use this operator, take a look at the guide:
|
909
|
+
:ref:`howto/operator:AlloyDBUpdateUserOperator`
|
910
|
+
|
911
|
+
:param user_id: Required. The ID of the user to update.
|
912
|
+
:param cluster_id: Required. ID of the cluster.
|
913
|
+
:param user_configuration: Required. User to update. For more details please see API documentation:
|
914
|
+
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.User
|
915
|
+
:param update_mask: Optional. Field mask is used to specify the fields to be overwritten in the
|
916
|
+
User resource by the update.
|
917
|
+
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
|
918
|
+
so that if you must retry your request, the server ignores the request if it has already been
|
919
|
+
completed. The server guarantees that for at least 60 minutes since the first request.
|
920
|
+
For example, consider a situation where you make an initial request and the request times out.
|
921
|
+
If you make the request again with the same request ID, the server can check if the original operation
|
922
|
+
with the same request ID was received, and if so, ignores the second request.
|
923
|
+
This prevents clients from accidentally creating duplicate commitments.
|
924
|
+
The request ID must be a valid UUID with the exception that zero UUID is not supported
|
925
|
+
(00000000-0000-0000-0000-000000000000).
|
926
|
+
:param validate_request: Optional. If set, performs request validation, but does not actually
|
927
|
+
execute the request.
|
928
|
+
:param allow_missing: Optional. If set to true, update succeeds even if instance is not found.
|
929
|
+
In that case, a new user is created and update_mask is ignored.
|
930
|
+
:param project_id: Required. The ID of the Google Cloud project where the service is used.
|
931
|
+
:param location: Required. The ID of the Google Cloud region where the service is used.
|
932
|
+
:param gcp_conn_id: Optional. The connection ID to use to connect to Google Cloud.
|
933
|
+
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests will not
|
934
|
+
be retried.
|
935
|
+
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
|
936
|
+
Note that if `retry` is specified, the timeout applies to each individual attempt.
|
937
|
+
:param metadata: Optional. Additional metadata that is provided to the method.
|
938
|
+
:param impersonation_chain: Optional service account to impersonate using short-term
|
939
|
+
credentials, or chained list of accounts required to get the access_token
|
940
|
+
of the last account in the list, which will be impersonated in the request.
|
941
|
+
If set as a string, the account must grant the originating account
|
942
|
+
the Service Account Token Creator IAM role.
|
943
|
+
If set as a sequence, the identities from the list must grant
|
944
|
+
Service Account Token Creator IAM role to the directly preceding identity, with first
|
945
|
+
account from the list granting this role to the originating account (templated).
|
946
|
+
"""
|
947
|
+
|
948
|
+
template_fields: Sequence[str] = tuple(
|
949
|
+
{"cluster_id", "user_id", "user_configuration", "update_mask", "allow_missing"}
|
950
|
+
| set(AlloyDBWriteBaseOperator.template_fields)
|
951
|
+
)
|
952
|
+
operator_extra_links = (AlloyDBUsersLink(),)
|
953
|
+
|
954
|
+
def __init__(
|
955
|
+
self,
|
956
|
+
cluster_id: str,
|
957
|
+
user_id: str,
|
958
|
+
user_configuration: alloydb_v1.User | dict,
|
959
|
+
update_mask: FieldMask | dict | None = None,
|
960
|
+
allow_missing: bool = False,
|
961
|
+
*args,
|
962
|
+
**kwargs,
|
963
|
+
):
|
964
|
+
super().__init__(*args, **kwargs)
|
965
|
+
self.cluster_id = cluster_id
|
966
|
+
self.user_id = user_id
|
967
|
+
self.user_configuration = user_configuration
|
968
|
+
self.update_mask = update_mask
|
969
|
+
self.allow_missing = allow_missing
|
970
|
+
|
971
|
+
def execute(self, context: Context) -> dict | None:
|
972
|
+
AlloyDBUsersLink.persist(
|
973
|
+
context=context,
|
974
|
+
task_instance=self,
|
975
|
+
location_id=self.location,
|
976
|
+
cluster_id=self.cluster_id,
|
977
|
+
project_id=self.project_id,
|
978
|
+
)
|
979
|
+
if self.validate_request:
|
980
|
+
self.log.info("Validating an Update AlloyDB user request.")
|
981
|
+
else:
|
982
|
+
self.log.info("Updating an AlloyDB user.")
|
983
|
+
|
984
|
+
try:
|
985
|
+
user = self.hook.update_user(
|
986
|
+
cluster_id=self.cluster_id,
|
987
|
+
user_id=self.user_id,
|
988
|
+
project_id=self.project_id,
|
989
|
+
location=self.location,
|
990
|
+
user=self.user_configuration,
|
991
|
+
update_mask=self.update_mask,
|
992
|
+
allow_missing=self.allow_missing,
|
993
|
+
request_id=self.request_id,
|
994
|
+
validate_only=self.validate_request,
|
995
|
+
retry=self.retry,
|
996
|
+
timeout=self.timeout,
|
997
|
+
metadata=self.metadata,
|
998
|
+
)
|
999
|
+
except Exception as ex:
|
1000
|
+
raise AirflowException(ex) from ex
|
1001
|
+
else:
|
1002
|
+
result = alloydb_v1.User.to_dict(user) if not self.validate_request else None
|
1003
|
+
|
1004
|
+
if not self.validate_request:
|
1005
|
+
self.log.info("AlloyDB user %s was successfully updated.", self.user_id)
|
1006
|
+
return result
|
1007
|
+
|
1008
|
+
|
1009
|
+
class AlloyDBDeleteUserOperator(AlloyDBWriteBaseOperator):
|
1010
|
+
"""
|
1011
|
+
Delete an Alloy DB user.
|
1012
|
+
|
1013
|
+
.. seealso::
|
1014
|
+
For more information on how to use this operator, take a look at the guide:
|
1015
|
+
:ref:`howto/operator:AlloyDBDeleteUserOperator`
|
1016
|
+
|
1017
|
+
:param user_id: Required. ID of the user to delete.
|
1018
|
+
:param cluster_id: Required. ID of the cluster.
|
1019
|
+
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
|
1020
|
+
so that if you must retry your request, the server ignores the request if it has already been
|
1021
|
+
completed. The server guarantees that for at least 60 minutes since the first request.
|
1022
|
+
For example, consider a situation where you make an initial request and the request times out.
|
1023
|
+
If you make the request again with the same request ID, the server can check if the original operation
|
1024
|
+
with the same request ID was received, and if so, ignores the second request.
|
1025
|
+
This prevents clients from accidentally creating duplicate commitments.
|
1026
|
+
The request ID must be a valid UUID with the exception that zero UUID is not supported
|
1027
|
+
(00000000-0000-0000-0000-000000000000).
|
1028
|
+
:param validate_request: Optional. If set, performs request validation, but does not actually
|
1029
|
+
execute the request.
|
1030
|
+
:param project_id: Required. The ID of the Google Cloud project where the service is used.
|
1031
|
+
:param location: Required. The ID of the Google Cloud region where the service is used.
|
1032
|
+
:param gcp_conn_id: Optional. The connection ID to use to connect to Google Cloud.
|
1033
|
+
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests will not
|
1034
|
+
be retried.
|
1035
|
+
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
|
1036
|
+
Note that if `retry` is specified, the timeout applies to each individual attempt.
|
1037
|
+
:param metadata: Optional. Additional metadata that is provided to the method.
|
1038
|
+
:param impersonation_chain: Optional service account to impersonate using short-term
|
1039
|
+
credentials, or chained list of accounts required to get the access_token
|
1040
|
+
of the last account in the list, which will be impersonated in the request.
|
1041
|
+
If set as a string, the account must grant the originating account
|
1042
|
+
the Service Account Token Creator IAM role.
|
1043
|
+
If set as a sequence, the identities from the list must grant
|
1044
|
+
Service Account Token Creator IAM role to the directly preceding identity, with first
|
1045
|
+
account from the list granting this role to the originating account (templated).
|
1046
|
+
"""
|
1047
|
+
|
1048
|
+
template_fields: Sequence[str] = tuple(
|
1049
|
+
{"user_id", "cluster_id"} | set(AlloyDBWriteBaseOperator.template_fields)
|
1050
|
+
)
|
1051
|
+
|
1052
|
+
def __init__(
|
1053
|
+
self,
|
1054
|
+
user_id: str,
|
1055
|
+
cluster_id: str,
|
1056
|
+
*args,
|
1057
|
+
**kwargs,
|
1058
|
+
):
|
1059
|
+
super().__init__(*args, **kwargs)
|
1060
|
+
self.user_id = user_id
|
1061
|
+
self.cluster_id = cluster_id
|
1062
|
+
|
1063
|
+
def execute(self, context: Context) -> None:
|
1064
|
+
if self.validate_request:
|
1065
|
+
self.log.info("Validating a Delete AlloyDB user request.")
|
1066
|
+
else:
|
1067
|
+
self.log.info("Deleting an AlloyDB user.")
|
1068
|
+
|
1069
|
+
try:
|
1070
|
+
self.hook.delete_user(
|
1071
|
+
user_id=self.user_id,
|
1072
|
+
cluster_id=self.cluster_id,
|
1073
|
+
project_id=self.project_id,
|
1074
|
+
location=self.location,
|
1075
|
+
request_id=self.request_id,
|
1076
|
+
validate_only=self.validate_request,
|
1077
|
+
retry=self.retry,
|
1078
|
+
timeout=self.timeout,
|
1079
|
+
metadata=self.metadata,
|
1080
|
+
)
|
1081
|
+
except Exception as ex:
|
1082
|
+
raise AirflowException(ex) from ex
|
1083
|
+
|
1084
|
+
if not self.validate_request:
|
1085
|
+
self.log.info("AlloyDB user %s was successfully removed.", self.user_id)
|
1086
|
+
|
1087
|
+
|
1088
|
+
class AlloyDBCreateBackupOperator(AlloyDBWriteBaseOperator):
|
1089
|
+
"""
|
1090
|
+
Create a Backup in an Alloy DB cluster.
|
1091
|
+
|
1092
|
+
.. seealso::
|
1093
|
+
For more information on how to use this operator, take a look at the guide:
|
1094
|
+
:ref:`howto/operator:AlloyDBCreateBackupOperator`
|
1095
|
+
|
1096
|
+
:param backup_id: Required. ID of the backup to create.
|
1097
|
+
:param backup_configuration: Required. Backup to create. For more details please see API documentation:
|
1098
|
+
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.Backup
|
1099
|
+
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
|
1100
|
+
so that if you must retry your request, the server ignores the request if it has already been
|
1101
|
+
completed. The server guarantees that for at least 60 minutes since the first request.
|
1102
|
+
For example, consider a situation where you make an initial request and the request times out.
|
1103
|
+
If you make the request again with the same request ID, the server can check if the original operation
|
1104
|
+
with the same request ID was received, and if so, ignores the second request.
|
1105
|
+
This prevents clients from accidentally creating duplicate commitments.
|
1106
|
+
The request ID must be a valid UUID with the exception that zero UUID is not supported
|
1107
|
+
(00000000-0000-0000-0000-000000000000).
|
1108
|
+
:param validate_request: Optional. If set, performs request validation, but does not actually
|
1109
|
+
execute the request.
|
1110
|
+
:param project_id: Required. The ID of the Google Cloud project where the service is used.
|
1111
|
+
:param location: Required. The ID of the Google Cloud region where the backups should be saved.
|
1112
|
+
:param gcp_conn_id: Optional. The connection ID to use to connect to Google Cloud.
|
1113
|
+
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests will not
|
1114
|
+
be retried.
|
1115
|
+
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
|
1116
|
+
Note that if `retry` is specified, the timeout applies to each individual attempt.
|
1117
|
+
:param metadata: Optional. Additional metadata that is provided to the method.
|
1118
|
+
:param impersonation_chain: Optional service account to impersonate using short-term
|
1119
|
+
credentials, or chained list of accounts required to get the access_token
|
1120
|
+
of the last account in the list, which will be impersonated in the request.
|
1121
|
+
If set as a string, the account must grant the originating account
|
1122
|
+
the Service Account Token Creator IAM role.
|
1123
|
+
If set as a sequence, the identities from the list must grant
|
1124
|
+
Service Account Token Creator IAM role to the directly preceding identity, with first
|
1125
|
+
account from the list granting this role to the originating account (templated).
|
1126
|
+
"""
|
1127
|
+
|
1128
|
+
template_fields: Sequence[str] = tuple(
|
1129
|
+
{"backup_id", "backup_configuration"} | set(AlloyDBWriteBaseOperator.template_fields)
|
1130
|
+
)
|
1131
|
+
operator_extra_links = (AlloyDBBackupsLink(),)
|
1132
|
+
|
1133
|
+
def __init__(
|
1134
|
+
self,
|
1135
|
+
backup_id: str,
|
1136
|
+
backup_configuration: alloydb_v1.Backup | dict,
|
1137
|
+
*args,
|
1138
|
+
**kwargs,
|
1139
|
+
):
|
1140
|
+
super().__init__(*args, **kwargs)
|
1141
|
+
self.backup_id = backup_id
|
1142
|
+
self.backup_configuration = backup_configuration
|
1143
|
+
|
1144
|
+
def _get_backup(self) -> proto.Message | None:
|
1145
|
+
self.log.info("Checking if the backup %s exists already...", self.backup_id)
|
1146
|
+
try:
|
1147
|
+
backup = self.hook.get_backup(
|
1148
|
+
backup_id=self.backup_id,
|
1149
|
+
location=self.location,
|
1150
|
+
project_id=self.project_id,
|
1151
|
+
)
|
1152
|
+
except NotFound:
|
1153
|
+
self.log.info("The backup %s does not exist yet.", self.backup_id)
|
1154
|
+
except Exception as ex:
|
1155
|
+
raise AirflowException(ex) from ex
|
1156
|
+
else:
|
1157
|
+
self.log.info("AlloyDB backup %s already exists.", self.backup_id)
|
1158
|
+
result = alloydb_v1.Backup.to_dict(backup)
|
1159
|
+
return result
|
1160
|
+
return None
|
1161
|
+
|
1162
|
+
def execute(self, context: Context) -> dict | None:
|
1163
|
+
AlloyDBBackupsLink.persist(
|
1164
|
+
context=context,
|
1165
|
+
task_instance=self,
|
1166
|
+
project_id=self.project_id,
|
1167
|
+
)
|
1168
|
+
if backup := self._get_backup():
|
1169
|
+
return backup
|
1170
|
+
|
1171
|
+
if self.validate_request:
|
1172
|
+
self.log.info("Validating a Create AlloyDB backup request.")
|
1173
|
+
else:
|
1174
|
+
self.log.info("Creating an AlloyDB backup.")
|
1175
|
+
|
1176
|
+
try:
|
1177
|
+
operation = self.hook.create_backup(
|
1178
|
+
backup_id=self.backup_id,
|
1179
|
+
backup=self.backup_configuration,
|
1180
|
+
location=self.location,
|
1181
|
+
project_id=self.project_id,
|
1182
|
+
request_id=self.request_id,
|
1183
|
+
validate_only=self.validate_request,
|
1184
|
+
retry=self.retry,
|
1185
|
+
timeout=self.timeout,
|
1186
|
+
metadata=self.metadata,
|
1187
|
+
)
|
1188
|
+
except Exception as ex:
|
1189
|
+
raise AirflowException(ex)
|
1190
|
+
else:
|
1191
|
+
operation_result = self.get_operation_result(operation)
|
1192
|
+
result = alloydb_v1.Backup.to_dict(operation_result) if operation_result else None
|
1193
|
+
|
1194
|
+
return result
|
1195
|
+
|
1196
|
+
|
1197
|
+
class AlloyDBUpdateBackupOperator(AlloyDBWriteBaseOperator):
|
1198
|
+
"""
|
1199
|
+
Update an Alloy DB backup.
|
1200
|
+
|
1201
|
+
.. seealso::
|
1202
|
+
For more information on how to use this operator, take a look at the guide:
|
1203
|
+
:ref:`howto/operator:AlloyDBUpdateBackupOperator`
|
1204
|
+
|
1205
|
+
:param backup_id: Required. ID of the backup to update.
|
1206
|
+
:param backup_configuration: Required. Backup to update. For more details please see API documentation:
|
1207
|
+
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.Backup
|
1208
|
+
:param update_mask: Optional. Field mask is used to specify the fields to be overwritten in the
|
1209
|
+
Backup resource by the update.
|
1210
|
+
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
|
1211
|
+
so that if you must retry your request, the server ignores the request if it has already been
|
1212
|
+
completed. The server guarantees that for at least 60 minutes since the first request.
|
1213
|
+
For example, consider a situation where you make an initial request and the request times out.
|
1214
|
+
If you make the request again with the same request ID, the server can check if the original operation
|
1215
|
+
with the same request ID was received, and if so, ignores the second request.
|
1216
|
+
This prevents clients from accidentally creating duplicate commitments.
|
1217
|
+
The request ID must be a valid UUID with the exception that zero UUID is not supported
|
1218
|
+
(00000000-0000-0000-0000-000000000000).
|
1219
|
+
:param validate_request: Optional. If set, performs request validation, but does not actually
|
1220
|
+
execute the request.
|
1221
|
+
:param allow_missing: Optional. If set to true, update succeeds even if backup is not found.
|
1222
|
+
In that case, a new backup is created and update_mask is ignored.
|
1223
|
+
:param project_id: Required. The ID of the Google Cloud project where the service is used.
|
1224
|
+
:param location: Required. The ID of the Google Cloud region where the service is used.
|
1225
|
+
:param gcp_conn_id: Optional. The connection ID to use to connect to Google Cloud.
|
1226
|
+
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests will not
|
1227
|
+
be retried.
|
1228
|
+
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
|
1229
|
+
Note that if `retry` is specified, the timeout applies to each individual attempt.
|
1230
|
+
:param metadata: Optional. Additional metadata that is provided to the method.
|
1231
|
+
:param impersonation_chain: Optional service account to impersonate using short-term
|
1232
|
+
credentials, or chained list of accounts required to get the access_token
|
1233
|
+
of the last account in the list, which will be impersonated in the request.
|
1234
|
+
If set as a string, the account must grant the originating account
|
1235
|
+
the Service Account Token Creator IAM role.
|
1236
|
+
If set as a sequence, the identities from the list must grant
|
1237
|
+
Service Account Token Creator IAM role to the directly preceding identity, with first
|
1238
|
+
account from the list granting this role to the originating account (templated).
|
1239
|
+
"""
|
1240
|
+
|
1241
|
+
template_fields: Sequence[str] = tuple(
|
1242
|
+
{"backup_id", "backup_configuration", "update_mask", "allow_missing"}
|
1243
|
+
| set(AlloyDBWriteBaseOperator.template_fields)
|
1244
|
+
)
|
1245
|
+
operator_extra_links = (AlloyDBBackupsLink(),)
|
1246
|
+
|
1247
|
+
def __init__(
|
1248
|
+
self,
|
1249
|
+
backup_id: str,
|
1250
|
+
backup_configuration: alloydb_v1.Backup | dict,
|
1251
|
+
update_mask: FieldMask | dict | None = None,
|
1252
|
+
allow_missing: bool = False,
|
1253
|
+
*args,
|
1254
|
+
**kwargs,
|
1255
|
+
):
|
1256
|
+
super().__init__(*args, **kwargs)
|
1257
|
+
self.backup_id = backup_id
|
1258
|
+
self.backup_configuration = backup_configuration
|
1259
|
+
self.update_mask = update_mask
|
1260
|
+
self.allow_missing = allow_missing
|
1261
|
+
|
1262
|
+
def execute(self, context: Context) -> dict | None:
|
1263
|
+
AlloyDBBackupsLink.persist(
|
1264
|
+
context=context,
|
1265
|
+
task_instance=self,
|
1266
|
+
project_id=self.project_id,
|
1267
|
+
)
|
1268
|
+
if self.validate_request:
|
1269
|
+
self.log.info("Validating an Update AlloyDB backup request.")
|
1270
|
+
else:
|
1271
|
+
self.log.info("Updating an AlloyDB backup.")
|
1272
|
+
|
1273
|
+
try:
|
1274
|
+
operation = self.hook.update_backup(
|
1275
|
+
backup_id=self.backup_id,
|
1276
|
+
project_id=self.project_id,
|
1277
|
+
location=self.location,
|
1278
|
+
backup=self.backup_configuration,
|
1279
|
+
update_mask=self.update_mask,
|
1280
|
+
allow_missing=self.allow_missing,
|
1281
|
+
request_id=self.request_id,
|
1282
|
+
validate_only=self.validate_request,
|
1283
|
+
retry=self.retry,
|
1284
|
+
timeout=self.timeout,
|
1285
|
+
metadata=self.metadata,
|
1286
|
+
)
|
1287
|
+
except Exception as ex:
|
1288
|
+
raise AirflowException(ex) from ex
|
1289
|
+
else:
|
1290
|
+
operation_result = self.get_operation_result(operation)
|
1291
|
+
result = alloydb_v1.Backup.to_dict(operation_result) if operation_result else None
|
1292
|
+
|
1293
|
+
if not self.validate_request:
|
1294
|
+
self.log.info("AlloyDB backup %s was successfully updated.", self.backup_id)
|
1295
|
+
return result
|
1296
|
+
|
1297
|
+
|
1298
|
+
class AlloyDBDeleteBackupOperator(AlloyDBWriteBaseOperator):
|
1299
|
+
"""
|
1300
|
+
Delete an Alloy DB backup.
|
1301
|
+
|
1302
|
+
.. seealso::
|
1303
|
+
For more information on how to use this operator, take a look at the guide:
|
1304
|
+
:ref:`howto/operator:AlloyDBDeleteBackupOperator`
|
1305
|
+
|
1306
|
+
:param backup_id: Required. ID of the backup to delete.
|
1307
|
+
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
|
1308
|
+
so that if you must retry your request, the server ignores the request if it has already been
|
1309
|
+
completed. The server guarantees that for at least 60 minutes since the first request.
|
1310
|
+
For example, consider a situation where you make an initial request and the request times out.
|
1311
|
+
If you make the request again with the same request ID, the server can check if the original operation
|
1312
|
+
with the same request ID was received, and if so, ignores the second request.
|
1313
|
+
This prevents clients from accidentally creating duplicate commitments.
|
1314
|
+
The request ID must be a valid UUID with the exception that zero UUID is not supported
|
1315
|
+
(00000000-0000-0000-0000-000000000000).
|
1316
|
+
:param validate_request: Optional. If set, performs request validation, but does not actually
|
1317
|
+
execute the request.
|
1318
|
+
:param project_id: Required. The ID of the Google Cloud project where the service is used.
|
1319
|
+
:param location: Required. The ID of the Google Cloud region where the service is used.
|
1320
|
+
:param gcp_conn_id: Optional. The connection ID to use to connect to Google Cloud.
|
1321
|
+
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests will not
|
1322
|
+
be retried.
|
1323
|
+
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
|
1324
|
+
Note that if `retry` is specified, the timeout applies to each individual attempt.
|
1325
|
+
:param metadata: Optional. Additional metadata that is provided to the method.
|
1326
|
+
:param impersonation_chain: Optional service account to impersonate using short-term
|
1327
|
+
credentials, or chained list of accounts required to get the access_token
|
1328
|
+
of the last account in the list, which will be impersonated in the request.
|
1329
|
+
If set as a string, the account must grant the originating account
|
1330
|
+
the Service Account Token Creator IAM role.
|
1331
|
+
If set as a sequence, the identities from the list must grant
|
1332
|
+
Service Account Token Creator IAM role to the directly preceding identity, with first
|
1333
|
+
account from the list granting this role to the originating account (templated).
|
1334
|
+
"""
|
1335
|
+
|
1336
|
+
template_fields: Sequence[str] = tuple({"backup_id"} | set(AlloyDBWriteBaseOperator.template_fields))
|
1337
|
+
|
1338
|
+
def __init__(
|
1339
|
+
self,
|
1340
|
+
backup_id: str,
|
1341
|
+
*args,
|
1342
|
+
**kwargs,
|
1343
|
+
):
|
1344
|
+
super().__init__(*args, **kwargs)
|
1345
|
+
self.backup_id = backup_id
|
1346
|
+
|
1347
|
+
def execute(self, context: Context) -> None:
|
1348
|
+
if self.validate_request:
|
1349
|
+
self.log.info("Validating a Delete AlloyDB backup request.")
|
1350
|
+
else:
|
1351
|
+
self.log.info("Deleting an AlloyDB backup.")
|
1352
|
+
|
1353
|
+
try:
|
1354
|
+
operation = self.hook.delete_backup(
|
1355
|
+
backup_id=self.backup_id,
|
1356
|
+
project_id=self.project_id,
|
1357
|
+
location=self.location,
|
1358
|
+
request_id=self.request_id,
|
1359
|
+
validate_only=self.validate_request,
|
1360
|
+
retry=self.retry,
|
1361
|
+
timeout=self.timeout,
|
1362
|
+
metadata=self.metadata,
|
1363
|
+
)
|
1364
|
+
except Exception as ex:
|
1365
|
+
raise AirflowException(ex) from ex
|
1366
|
+
else:
|
1367
|
+
self.get_operation_result(operation)
|
1368
|
+
|
1369
|
+
if not self.validate_request:
|
1370
|
+
self.log.info("AlloyDB backup %s was successfully removed.", self.backup_id)
|