apache-airflow-providers-google 11.0.0__py3-none-any.whl → 12.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. airflow/providers/google/__init__.py +3 -3
  2. airflow/providers/google/assets/gcs.py +1 -7
  3. airflow/providers/google/cloud/hooks/alloy_db.py +289 -0
  4. airflow/providers/google/cloud/hooks/cloud_batch.py +13 -5
  5. airflow/providers/google/cloud/hooks/dataproc.py +7 -3
  6. airflow/providers/google/cloud/hooks/dataproc_metastore.py +41 -22
  7. airflow/providers/google/cloud/hooks/kubernetes_engine.py +7 -38
  8. airflow/providers/google/cloud/hooks/translate.py +355 -0
  9. airflow/providers/google/cloud/hooks/vertex_ai/feature_store.py +147 -0
  10. airflow/providers/google/cloud/hooks/vertex_ai/generative_model.py +10 -0
  11. airflow/providers/google/cloud/links/alloy_db.py +55 -0
  12. airflow/providers/google/cloud/links/translate.py +98 -0
  13. airflow/providers/google/cloud/log/stackdriver_task_handler.py +1 -5
  14. airflow/providers/google/cloud/openlineage/mixins.py +4 -12
  15. airflow/providers/google/cloud/openlineage/utils.py +200 -22
  16. airflow/providers/google/cloud/operators/alloy_db.py +459 -0
  17. airflow/providers/google/cloud/operators/automl.py +55 -44
  18. airflow/providers/google/cloud/operators/bigquery.py +60 -15
  19. airflow/providers/google/cloud/operators/dataproc.py +12 -0
  20. airflow/providers/google/cloud/operators/gcs.py +5 -14
  21. airflow/providers/google/cloud/operators/kubernetes_engine.py +377 -705
  22. airflow/providers/google/cloud/operators/mlengine.py +41 -31
  23. airflow/providers/google/cloud/operators/translate.py +586 -1
  24. airflow/providers/google/cloud/operators/vertex_ai/feature_store.py +163 -0
  25. airflow/providers/google/cloud/operators/vertex_ai/generative_model.py +5 -0
  26. airflow/providers/google/cloud/sensors/dataproc.py +2 -2
  27. airflow/providers/google/cloud/sensors/vertex_ai/__init__.py +16 -0
  28. airflow/providers/google/cloud/sensors/vertex_ai/feature_store.py +112 -0
  29. airflow/providers/google/cloud/transfers/bigquery_to_gcs.py +6 -11
  30. airflow/providers/google/cloud/transfers/bigquery_to_mssql.py +3 -0
  31. airflow/providers/google/cloud/transfers/bigquery_to_mysql.py +3 -0
  32. airflow/providers/google/cloud/transfers/gcs_to_bigquery.py +5 -10
  33. airflow/providers/google/cloud/transfers/gcs_to_gcs.py +3 -15
  34. airflow/providers/google/cloud/transfers/gcs_to_local.py +9 -0
  35. airflow/providers/google/cloud/transfers/local_to_gcs.py +41 -6
  36. airflow/providers/google/cloud/transfers/s3_to_gcs.py +15 -0
  37. airflow/providers/google/get_provider_info.py +30 -18
  38. airflow/providers/google/version_compat.py +36 -0
  39. {apache_airflow_providers_google-11.0.0.dist-info → apache_airflow_providers_google-12.0.0.dist-info}/METADATA +16 -18
  40. {apache_airflow_providers_google-11.0.0.dist-info → apache_airflow_providers_google-12.0.0.dist-info}/RECORD +42 -37
  41. airflow/providers/google/cloud/hooks/datapipeline.py +0 -71
  42. airflow/providers/google/cloud/openlineage/BigQueryErrorRunFacet.json +0 -30
  43. airflow/providers/google/cloud/operators/datapipeline.py +0 -63
  44. {apache_airflow_providers_google-11.0.0.dist-info → apache_airflow_providers_google-12.0.0.dist-info}/WHEEL +0 -0
  45. {apache_airflow_providers_google-11.0.0.dist-info → apache_airflow_providers_google-12.0.0.dist-info}/entry_points.txt +0 -0
@@ -50,6 +50,12 @@ TRANSLATION_NATIVE_DATASET_LINK = (
50
50
  )
51
51
  TRANSLATION_NATIVE_LIST_LINK = TRANSLATION_BASE_LINK + "/datasets?project={project_id}"
52
52
 
53
+ TRANSLATION_NATIVE_MODEL_LINK = (
54
+ TRANSLATION_BASE_LINK
55
+ + "/locations/{location}/datasets/{dataset_id}/evaluate;modelId={model_id}?project={project_id}"
56
+ )
57
+ TRANSLATION_MODELS_LIST_LINK = TRANSLATION_BASE_LINK + "/models/list?project={project_id}"
58
+
53
59
 
54
60
  class TranslationLegacyDatasetLink(BaseGoogleLink):
55
61
  """
@@ -270,3 +276,95 @@ class TranslationDatasetsListLink(BaseGoogleLink):
270
276
  "project_id": project_id,
271
277
  },
272
278
  )
279
+
280
+
281
+ class TranslationModelLink(BaseGoogleLink):
282
+ """
283
+ Helper class for constructing Translation Model link.
284
+
285
+ Link for legacy and native models.
286
+ """
287
+
288
+ name = "Translation Model"
289
+ key = "translation_model"
290
+ format_str = TRANSLATION_NATIVE_MODEL_LINK
291
+
292
+ @staticmethod
293
+ def persist(
294
+ context: Context,
295
+ task_instance,
296
+ dataset_id: str,
297
+ model_id: str,
298
+ project_id: str,
299
+ ):
300
+ task_instance.xcom_push(
301
+ context,
302
+ key=TranslationLegacyModelLink.key,
303
+ value={
304
+ "location": task_instance.location,
305
+ "dataset_id": dataset_id,
306
+ "model_id": model_id,
307
+ "project_id": project_id,
308
+ },
309
+ )
310
+
311
+
312
+ class TranslationModelsListLink(BaseGoogleLink):
313
+ """
314
+ Helper class for constructing Translation Models List link.
315
+
316
+ Both legacy and native models are available under this link.
317
+ """
318
+
319
+ name = "Translation Models List"
320
+ key = "translation_models_list"
321
+ format_str = TRANSLATION_MODELS_LIST_LINK
322
+
323
+ @staticmethod
324
+ def persist(
325
+ context: Context,
326
+ task_instance,
327
+ project_id: str,
328
+ ):
329
+ task_instance.xcom_push(
330
+ context,
331
+ key=TranslationModelsListLink.key,
332
+ value={
333
+ "project_id": project_id,
334
+ },
335
+ )
336
+
337
+
338
+ class TranslateResultByOutputConfigLink(BaseGoogleLink):
339
+ """
340
+ Helper class for constructing Translation results Link.
341
+
342
+ Provides link to gcs destination output translation results, by provided output_config
343
+ with gcs destination specified.
344
+ """
345
+
346
+ name = "Translate Results By Output Config"
347
+ key = "translate_results_by_output_config"
348
+ format_str = TRANSLATION_TRANSLATE_TEXT_BATCH
349
+
350
+ @staticmethod
351
+ def extract_output_uri_prefix(output_config):
352
+ return output_config["gcs_destination"]["output_uri_prefix"].rpartition("gs://")[-1]
353
+
354
+ @staticmethod
355
+ def persist(
356
+ context: Context,
357
+ task_instance,
358
+ project_id: str,
359
+ output_config: dict,
360
+ ):
361
+ task_instance.xcom_push(
362
+ context,
363
+ key=TranslateResultByOutputConfigLink.key,
364
+ value={
365
+ "project_id": project_id,
366
+ "output_uri_prefix": TranslateResultByOutputConfigLink.extract_output_uri_prefix(
367
+ output_config
368
+ ),
369
+ },
370
+ )
@@ -30,18 +30,14 @@ from google.cloud.logging import Resource
30
30
  from google.cloud.logging.handlers.transports import BackgroundThreadTransport, Transport
31
31
  from google.cloud.logging_v2.services.logging_service_v2 import LoggingServiceV2Client
32
32
  from google.cloud.logging_v2.types import ListLogEntriesRequest, ListLogEntriesResponse
33
- from packaging.version import Version
34
33
 
35
- from airflow import __version__ as airflow_version
36
34
  from airflow.exceptions import RemovedInAirflow3Warning
37
35
  from airflow.providers.google.cloud.utils.credentials_provider import get_credentials_and_project_id
38
36
  from airflow.providers.google.common.consts import CLIENT_INFO
37
+ from airflow.providers.google.version_compat import AIRFLOW_V_3_0_PLUS
39
38
  from airflow.utils.log.trigger_handler import ctx_indiv_trigger
40
39
  from airflow.utils.types import NOTSET, ArgNotSet
41
40
 
42
- AIRFLOW_VERSION = Version(airflow_version)
43
- AIRFLOW_V_3_0_PLUS = Version(AIRFLOW_VERSION.base_version) >= Version("3.0.0")
44
-
45
41
  if TYPE_CHECKING:
46
42
  from google.auth.credentials import Credentials
47
43
 
@@ -88,7 +88,7 @@ class _BigQueryOpenLineageMixin:
88
88
 
89
89
  job_facets = {"sql": SQLJobFacet(query=SQLParser.normalize_sql(self.sql))}
90
90
 
91
- self.client = self.hook.get_client(project_id=self.hook.project_id)
91
+ self.client = self.hook.get_client(project_id=self.hook.project_id, location=self.location)
92
92
  job_ids = self.job_id
93
93
  if isinstance(self.job_id, str):
94
94
  job_ids = [self.job_id]
@@ -108,10 +108,7 @@ class _BigQueryOpenLineageMixin:
108
108
 
109
109
  def get_facets(self, job_id: str):
110
110
  from airflow.providers.common.compat.openlineage.facet import ErrorMessageRunFacet
111
- from airflow.providers.google.cloud.openlineage.utils import (
112
- BigQueryErrorRunFacet,
113
- get_from_nullable_chain,
114
- )
111
+ from airflow.providers.google.cloud.openlineage.utils import get_from_nullable_chain
115
112
 
116
113
  inputs = []
117
114
  outputs = []
@@ -125,8 +122,7 @@ class _BigQueryOpenLineageMixin:
125
122
  if get_from_nullable_chain(props, ["status", "state"]) != "DONE":
126
123
  raise ValueError(f"Trying to extract data from running bigquery job: `{job_id}`")
127
124
 
128
- # TODO: remove bigQuery_job in next release
129
- run_facets["bigQuery_job"] = run_facets["bigQueryJob"] = self._get_bigquery_job_run_facet(props)
125
+ run_facets["bigQueryJob"] = self._get_bigquery_job_run_facet(props)
130
126
 
131
127
  if get_from_nullable_chain(props, ["statistics", "numChildJobs"]):
132
128
  if hasattr(self, "log"):
@@ -145,16 +141,12 @@ class _BigQueryOpenLineageMixin:
145
141
  if hasattr(self, "log"):
146
142
  self.log.warning("Cannot retrieve job details from BigQuery.Client. %s", e, exc_info=True)
147
143
  exception_msg = traceback.format_exc()
148
- # TODO: remove BigQueryErrorRunFacet in next release
149
144
  run_facets.update(
150
145
  {
151
146
  "errorMessage": ErrorMessageRunFacet(
152
147
  message=f"{e}: {exception_msg}",
153
148
  programmingLanguage="python",
154
- ),
155
- "bigQuery_error": BigQueryErrorRunFacet(
156
- clientError=f"{e}: {exception_msg}",
157
- ),
149
+ )
158
150
  }
159
151
  )
160
152
  deduplicated_outputs = self._deduplicate_outputs(outputs)
@@ -17,6 +17,9 @@
17
17
  # under the License.
18
18
  from __future__ import annotations
19
19
 
20
+ import logging
21
+ import os
22
+ import pathlib
20
23
  from typing import TYPE_CHECKING, Any
21
24
 
22
25
  from attr import define, field
@@ -25,21 +28,84 @@ if TYPE_CHECKING:
25
28
  from google.cloud.bigquery.table import Table
26
29
 
27
30
  from airflow.providers.common.compat.openlineage.facet import Dataset
31
+ from airflow.utils.context import Context
28
32
 
29
33
  from airflow.providers.common.compat.openlineage.facet import (
30
34
  BaseFacet,
31
35
  ColumnLineageDatasetFacet,
32
36
  DocumentationDatasetFacet,
33
37
  Fields,
38
+ Identifier,
34
39
  InputField,
35
40
  RunFacet,
36
41
  SchemaDatasetFacet,
37
42
  SchemaDatasetFacetFields,
43
+ SymlinksDatasetFacet,
44
+ )
45
+ from airflow.providers.common.compat.openlineage.utils.spark import (
46
+ inject_parent_job_information_into_spark_properties,
38
47
  )
39
48
  from airflow.providers.google import __version__ as provider_version
49
+ from airflow.providers.google.cloud.hooks.gcs import _parse_gcs_url
50
+
51
+ log = logging.getLogger(__name__)
40
52
 
41
53
  BIGQUERY_NAMESPACE = "bigquery"
42
54
  BIGQUERY_URI = "bigquery"
55
+ WILDCARD = "*"
56
+
57
+
58
+ def extract_ds_name_from_gcs_path(path: str) -> str:
59
+ """
60
+ Extract and process the dataset name from a given path.
61
+
62
+ Args:
63
+ path: The path to process e.g. of a gcs file.
64
+
65
+ Returns:
66
+ The processed dataset name.
67
+
68
+ Examples:
69
+ >>> extract_ds_name_from_gcs_path("/dir/file.*")
70
+ 'dir'
71
+ >>> extract_ds_name_from_gcs_path("/dir/pre_")
72
+ 'dir'
73
+ >>> extract_ds_name_from_gcs_path("/dir/file.txt")
74
+ 'dir/file.txt'
75
+ >>> extract_ds_name_from_gcs_path("/dir/file.")
76
+ 'dir'
77
+ >>> extract_ds_name_from_gcs_path("/dir/")
78
+ 'dir'
79
+ >>> extract_ds_name_from_gcs_path("")
80
+ '/'
81
+ >>> extract_ds_name_from_gcs_path("/")
82
+ '/'
83
+ >>> extract_ds_name_from_gcs_path(".")
84
+ '/'
85
+ """
86
+ if WILDCARD in path:
87
+ path = path.split(WILDCARD, maxsplit=1)[0]
88
+
89
+ # We want to end up with parent directory if the path:
90
+ # - does not refer to a file (no dot in the last segment)
91
+ # and does not explicitly end with a slash, it is treated as a prefix and removed.
92
+ # Example: "/dir/pre_" -> "/dir/"
93
+ # - contains a dot at the end, then it is treated as a prefix (created after removing the wildcard).
94
+ # Example: "/dir/file." (was "/dir/file.*" with wildcard) -> "/dir/"
95
+ last_path_segment = os.path.basename(path).rstrip(".")
96
+ if "." not in last_path_segment and not path.endswith("/"):
97
+ path = pathlib.Path(path).parent.as_posix()
98
+
99
+ # Normalize the path:
100
+ # - Remove trailing slashes.
101
+ # - Remove leading slashes.
102
+ # - Handle edge cases for empty paths or single-dot paths.
103
+ path = path.rstrip("/")
104
+ path = path.lstrip("/")
105
+ if path in ("", "."):
106
+ path = "/"
107
+
108
+ return path
43
109
 
44
110
 
45
111
  def get_facets_from_bq_table(table: Table) -> dict[str, BaseFacet]:
@@ -57,6 +123,20 @@ def get_facets_from_bq_table(table: Table) -> dict[str, BaseFacet]:
57
123
  if table.description:
58
124
  facets["documentation"] = DocumentationDatasetFacet(description=table.description)
59
125
 
126
+ if table.external_data_configuration:
127
+ symlinks = set()
128
+ for uri in table.external_data_configuration.source_uris:
129
+ if uri.startswith("gs://"):
130
+ bucket, blob = _parse_gcs_url(uri)
131
+ blob = extract_ds_name_from_gcs_path(blob)
132
+ symlinks.add((f"gs://{bucket}", blob))
133
+
134
+ facets["symlink"] = SymlinksDatasetFacet(
135
+ identifiers=[
136
+ Identifier(namespace=namespace, name=name, type="file")
137
+ for namespace, name in sorted(symlinks)
138
+ ]
139
+ )
60
140
  return facets
61
141
 
62
142
 
@@ -145,28 +225,6 @@ class BigQueryJobRunFacet(RunFacet):
145
225
  )
146
226
 
147
227
 
148
- # TODO: remove BigQueryErrorRunFacet in next release
149
- @define
150
- class BigQueryErrorRunFacet(RunFacet):
151
- """
152
- Represents errors that can happen during execution of BigqueryExtractor.
153
-
154
- :param clientError: represents errors originating in bigquery client
155
- :param parserError: represents errors that happened during parsing SQL provided to bigquery
156
- """
157
-
158
- clientError: str | None = field(default=None)
159
- parserError: str | None = field(default=None)
160
-
161
- @staticmethod
162
- def _get_schema() -> str:
163
- return (
164
- "https://raw.githubusercontent.com/apache/airflow/"
165
- f"providers-google/{provider_version}/airflow/providers/google/"
166
- "openlineage/BigQueryErrorRunFacet.json"
167
- )
168
-
169
-
170
228
  def get_from_nullable_chain(source: Any, chain: list[str]) -> Any | None:
171
229
  """
172
230
  Get object from nested structure of objects, where it's not guaranteed that all keys in the nested structure exist.
@@ -208,3 +266,123 @@ def get_from_nullable_chain(source: Any, chain: list[str]) -> Any | None:
208
266
  return source
209
267
  except AttributeError:
210
268
  return None
269
+
270
+
271
+ def _is_openlineage_provider_accessible() -> bool:
272
+ """
273
+ Check if the OpenLineage provider is accessible.
274
+
275
+ This function attempts to import the necessary OpenLineage modules and checks if the provider
276
+ is enabled and the listener is available.
277
+
278
+ Returns:
279
+ bool: True if the OpenLineage provider is accessible, False otherwise.
280
+ """
281
+ try:
282
+ from airflow.providers.openlineage.conf import is_disabled
283
+ from airflow.providers.openlineage.plugins.listener import get_openlineage_listener
284
+ except ImportError:
285
+ log.debug("OpenLineage provider could not be imported.")
286
+ return False
287
+
288
+ if is_disabled():
289
+ log.debug("OpenLineage provider is disabled.")
290
+ return False
291
+
292
+ if not get_openlineage_listener():
293
+ log.debug("OpenLineage listener could not be found.")
294
+ return False
295
+
296
+ return True
297
+
298
+
299
+ def _extract_supported_job_type_from_dataproc_job(job: dict) -> str | None:
300
+ """
301
+ Extract job type from a Dataproc job definition.
302
+
303
+ Args:
304
+ job: The Dataproc job definition.
305
+
306
+ Returns:
307
+ The job type for which the automatic OL injection is supported, if found, otherwise None.
308
+ """
309
+ supported_job_types = ("sparkJob", "pysparkJob", "spark_job", "pyspark_job")
310
+ return next((job_type for job_type in supported_job_types if job_type in job), None)
311
+
312
+
313
+ def _replace_dataproc_job_properties(job: dict, job_type: str, new_properties: dict) -> dict:
314
+ """
315
+ Replace the properties of a specific job type in a Dataproc job definition.
316
+
317
+ Args:
318
+ job: The original Dataproc job definition.
319
+ job_type: The key representing the job type (e.g., "sparkJob").
320
+ new_properties: The new properties to replace the existing ones.
321
+
322
+ Returns:
323
+ A modified copy of the job with updated properties.
324
+
325
+ Raises:
326
+ KeyError: If the job_type does not exist in the job or lacks a "properties" field.
327
+ """
328
+ if job_type not in job:
329
+ raise KeyError(f"Job type '{job_type}' is missing in the job definition.")
330
+
331
+ updated_job = job.copy()
332
+ updated_job[job_type] = job[job_type].copy()
333
+ updated_job[job_type]["properties"] = new_properties
334
+
335
+ return updated_job
336
+
337
+
338
+ def inject_openlineage_properties_into_dataproc_job(
339
+ job: dict, context: Context, inject_parent_job_info: bool
340
+ ) -> dict:
341
+ """
342
+ Inject OpenLineage properties into Spark job definition.
343
+
344
+ Function is not removing any configuration or modifying the job in any other way,
345
+ apart from adding desired OpenLineage properties to Dataproc job definition if not already present.
346
+
347
+ Note:
348
+ Any modification to job will be skipped if:
349
+ - OpenLineage provider is not accessible.
350
+ - The job type is not supported.
351
+ - Automatic parent job information injection is disabled.
352
+ - Any OpenLineage properties with parent job information are already present
353
+ in the Spark job definition.
354
+
355
+ Args:
356
+ job: The original Dataproc job definition.
357
+ context: The Airflow context in which the job is running.
358
+ inject_parent_job_info: Flag indicating whether to inject parent job information.
359
+
360
+ Returns:
361
+ The modified job definition with OpenLineage properties injected, if applicable.
362
+ """
363
+ if not inject_parent_job_info:
364
+ log.debug("Automatic injection of OpenLineage information is disabled.")
365
+ return job
366
+
367
+ if not _is_openlineage_provider_accessible():
368
+ log.warning(
369
+ "Could not access OpenLineage provider for automatic OpenLineage "
370
+ "properties injection. No action will be performed."
371
+ )
372
+ return job
373
+
374
+ if (job_type := _extract_supported_job_type_from_dataproc_job(job)) is None:
375
+ log.warning(
376
+ "Could not find a supported Dataproc job type for automatic OpenLineage "
377
+ "properties injection. No action will be performed.",
378
+ )
379
+ return job
380
+
381
+ properties = job[job_type].get("properties", {})
382
+
383
+ properties = inject_parent_job_information_into_spark_properties(properties=properties, context=context)
384
+
385
+ job_with_ol_config = _replace_dataproc_job_properties(
386
+ job=job, job_type=job_type, new_properties=properties
387
+ )
388
+ return job_with_ol_config