apache-airflow-providers-google 10.19.0rc1__py3-none-any.whl → 10.20.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. airflow/providers/google/LICENSE +4 -4
  2. airflow/providers/google/__init__.py +1 -1
  3. airflow/providers/google/ads/hooks/ads.py +4 -4
  4. airflow/providers/google/cloud/hooks/cloud_storage_transfer_service.py +26 -0
  5. airflow/providers/google/cloud/hooks/dataflow.py +132 -1
  6. airflow/providers/google/cloud/hooks/datapipeline.py +22 -73
  7. airflow/providers/google/cloud/hooks/gcs.py +21 -0
  8. airflow/providers/google/cloud/hooks/pubsub.py +10 -1
  9. airflow/providers/google/cloud/hooks/vertex_ai/auto_ml.py +8 -0
  10. airflow/providers/google/cloud/hooks/vertex_ai/generative_model.py +15 -3
  11. airflow/providers/google/cloud/hooks/vertex_ai/hyperparameter_tuning_job.py +1 -1
  12. airflow/providers/google/cloud/links/dataflow.py +25 -0
  13. airflow/providers/google/cloud/openlineage/mixins.py +271 -0
  14. airflow/providers/google/cloud/openlineage/utils.py +5 -218
  15. airflow/providers/google/cloud/operators/bigquery.py +74 -20
  16. airflow/providers/google/cloud/operators/cloud_storage_transfer_service.py +76 -0
  17. airflow/providers/google/cloud/operators/dataflow.py +235 -1
  18. airflow/providers/google/cloud/operators/datapipeline.py +29 -121
  19. airflow/providers/google/cloud/operators/dataplex.py +1 -1
  20. airflow/providers/google/cloud/operators/dataproc_metastore.py +17 -6
  21. airflow/providers/google/cloud/operators/kubernetes_engine.py +9 -6
  22. airflow/providers/google/cloud/operators/pubsub.py +18 -0
  23. airflow/providers/google/cloud/operators/vertex_ai/auto_ml.py +6 -0
  24. airflow/providers/google/cloud/operators/vertex_ai/generative_model.py +16 -0
  25. airflow/providers/google/cloud/sensors/cloud_composer.py +171 -2
  26. airflow/providers/google/cloud/transfers/azure_blob_to_gcs.py +13 -0
  27. airflow/providers/google/cloud/transfers/bigquery_to_postgres.py +56 -1
  28. airflow/providers/google/cloud/transfers/gcs_to_gcs.py +6 -12
  29. airflow/providers/google/cloud/triggers/cloud_composer.py +115 -0
  30. airflow/providers/google/cloud/triggers/kubernetes_engine.py +2 -0
  31. airflow/providers/google/cloud/utils/credentials_provider.py +81 -6
  32. airflow/providers/google/cloud/utils/external_token_supplier.py +175 -0
  33. airflow/providers/google/common/hooks/base_google.py +35 -1
  34. airflow/providers/google/common/utils/id_token_credentials.py +1 -1
  35. airflow/providers/google/get_provider_info.py +19 -14
  36. {apache_airflow_providers_google-10.19.0rc1.dist-info → apache_airflow_providers_google-10.20.0rc1.dist-info}/METADATA +41 -35
  37. {apache_airflow_providers_google-10.19.0rc1.dist-info → apache_airflow_providers_google-10.20.0rc1.dist-info}/RECORD +39 -37
  38. {apache_airflow_providers_google-10.19.0rc1.dist-info → apache_airflow_providers_google-10.20.0rc1.dist-info}/WHEEL +0 -0
  39. {apache_airflow_providers_google-10.19.0rc1.dist-info → apache_airflow_providers_google-10.20.0rc1.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,271 @@
1
+ #
2
+ # Licensed to the Apache Software Foundation (ASF) under one
3
+ # or more contributor license agreements. See the NOTICE file
4
+ # distributed with this work for additional information
5
+ # regarding copyright ownership. The ASF licenses this file
6
+ # to you under the Apache License, Version 2.0 (the
7
+ # "License"); you may not use this file except in compliance
8
+ # with the License. You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing,
13
+ # software distributed under the License is distributed on an
14
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15
+ # KIND, either express or implied. See the License for the
16
+ # specific language governing permissions and limitations
17
+ # under the License.
18
+ from __future__ import annotations
19
+
20
+ import copy
21
+ import json
22
+ import traceback
23
+ from typing import TYPE_CHECKING
24
+
25
+ if TYPE_CHECKING:
26
+ from openlineage.client.facet import (
27
+ BaseFacet,
28
+ OutputStatisticsOutputDatasetFacet,
29
+ SchemaDatasetFacet,
30
+ )
31
+ from openlineage.client.run import Dataset
32
+
33
+ from airflow.providers.google.cloud.openlineage.utils import BigQueryJobRunFacet
34
+
35
+
36
+ class _BigQueryOpenLineageMixin:
37
+ def get_openlineage_facets_on_complete(self, _):
38
+ """
39
+ Retrieve OpenLineage data for a COMPLETE BigQuery job.
40
+
41
+ This method retrieves statistics for the specified job_ids using the BigQueryDatasetsProvider.
42
+ It calls BigQuery API, retrieving input and output dataset info from it, as well as run-level
43
+ usage statistics.
44
+
45
+ Run facets should contain:
46
+ - ExternalQueryRunFacet
47
+ - BigQueryJobRunFacet
48
+
49
+ Run facets may contain:
50
+ - ErrorMessageRunFacet
51
+
52
+ Job facets should contain:
53
+ - SqlJobFacet if operator has self.sql
54
+
55
+ Input datasets should contain facets:
56
+ - DataSourceDatasetFacet
57
+ - SchemaDatasetFacet
58
+
59
+ Output datasets should contain facets:
60
+ - DataSourceDatasetFacet
61
+ - SchemaDatasetFacet
62
+ - OutputStatisticsOutputDatasetFacet
63
+ """
64
+ from openlineage.client.facet import ExternalQueryRunFacet, SqlJobFacet
65
+
66
+ from airflow.providers.openlineage.extractors import OperatorLineage
67
+ from airflow.providers.openlineage.sqlparser import SQLParser
68
+
69
+ if not self.job_id:
70
+ return OperatorLineage()
71
+
72
+ run_facets: dict[str, BaseFacet] = {
73
+ "externalQuery": ExternalQueryRunFacet(externalQueryId=self.job_id, source="bigquery")
74
+ }
75
+
76
+ job_facets = {"sql": SqlJobFacet(query=SQLParser.normalize_sql(self.sql))}
77
+
78
+ self.client = self.hook.get_client(project_id=self.hook.project_id)
79
+ job_ids = self.job_id
80
+ if isinstance(self.job_id, str):
81
+ job_ids = [self.job_id]
82
+ inputs, outputs = [], []
83
+ for job_id in job_ids:
84
+ inner_inputs, inner_outputs, inner_run_facets = self.get_facets(job_id=job_id)
85
+ inputs.extend(inner_inputs)
86
+ outputs.extend(inner_outputs)
87
+ run_facets.update(inner_run_facets)
88
+
89
+ return OperatorLineage(
90
+ inputs=inputs,
91
+ outputs=outputs,
92
+ run_facets=run_facets,
93
+ job_facets=job_facets,
94
+ )
95
+
96
+ def get_facets(self, job_id: str):
97
+ from openlineage.client.facet import ErrorMessageRunFacet
98
+
99
+ from airflow.providers.google.cloud.openlineage.utils import (
100
+ BigQueryErrorRunFacet,
101
+ get_from_nullable_chain,
102
+ )
103
+
104
+ inputs = []
105
+ outputs = []
106
+ run_facets: dict[str, BaseFacet] = {}
107
+ if hasattr(self, "log"):
108
+ self.log.debug("Extracting data from bigquery job: `%s`", job_id)
109
+ try:
110
+ job = self.client.get_job(job_id=job_id) # type: ignore
111
+ props = job._properties
112
+
113
+ if get_from_nullable_chain(props, ["status", "state"]) != "DONE":
114
+ raise ValueError(f"Trying to extract data from running bigquery job: `{job_id}`")
115
+
116
+ # TODO: remove bigQuery_job in next release
117
+ run_facets["bigQuery_job"] = run_facets["bigQueryJob"] = self._get_bigquery_job_run_facet(props)
118
+
119
+ if get_from_nullable_chain(props, ["statistics", "numChildJobs"]):
120
+ if hasattr(self, "log"):
121
+ self.log.debug("Found SCRIPT job. Extracting lineage from child jobs instead.")
122
+ # SCRIPT job type has no input / output information but spawns child jobs that have one
123
+ # https://cloud.google.com/bigquery/docs/information-schema-jobs#multi-statement_query_job
124
+ for child_job_id in self.client.list_jobs(parent_job=job_id):
125
+ child_job = self.client.get_job(job_id=child_job_id) # type: ignore
126
+ child_inputs, child_output = self._get_inputs_outputs_from_job(child_job._properties)
127
+ inputs.extend(child_inputs)
128
+ outputs.append(child_output)
129
+ else:
130
+ inputs, _output = self._get_inputs_outputs_from_job(props)
131
+ outputs.append(_output)
132
+ except Exception as e:
133
+ if hasattr(self, "log"):
134
+ self.log.warning("Cannot retrieve job details from BigQuery.Client. %s", e, exc_info=True)
135
+ exception_msg = traceback.format_exc()
136
+ # TODO: remove BigQueryErrorRunFacet in next release
137
+ run_facets.update(
138
+ {
139
+ "errorMessage": ErrorMessageRunFacet(
140
+ message=f"{e}: {exception_msg}",
141
+ programmingLanguage="python",
142
+ ),
143
+ "bigQuery_error": BigQueryErrorRunFacet(
144
+ clientError=f"{e}: {exception_msg}",
145
+ ),
146
+ }
147
+ )
148
+ deduplicated_outputs = self._deduplicate_outputs(outputs)
149
+ return inputs, deduplicated_outputs, run_facets
150
+
151
+ def _deduplicate_outputs(self, outputs: list[Dataset | None]) -> list[Dataset]:
152
+ # Sources are the same so we can compare only names
153
+ final_outputs = {}
154
+ for single_output in outputs:
155
+ if not single_output:
156
+ continue
157
+ key = single_output.name
158
+ if key not in final_outputs:
159
+ final_outputs[key] = single_output
160
+ continue
161
+
162
+ # No OutputStatisticsOutputDatasetFacet is added to duplicated outputs as we can not determine
163
+ # if the rowCount or size can be summed together.
164
+ single_output.facets.pop("outputStatistics", None)
165
+ final_outputs[key] = single_output
166
+
167
+ return list(final_outputs.values())
168
+
169
+ def _get_inputs_outputs_from_job(self, properties: dict) -> tuple[list[Dataset], Dataset | None]:
170
+ from airflow.providers.google.cloud.openlineage.utils import get_from_nullable_chain
171
+
172
+ input_tables = get_from_nullable_chain(properties, ["statistics", "query", "referencedTables"]) or []
173
+ output_table = get_from_nullable_chain(properties, ["configuration", "query", "destinationTable"])
174
+ inputs = [self._get_dataset(input_table) for input_table in input_tables]
175
+ if output_table:
176
+ output = self._get_dataset(output_table)
177
+ dataset_stat_facet = self._get_statistics_dataset_facet(properties)
178
+ if dataset_stat_facet:
179
+ output.facets.update({"outputStatistics": dataset_stat_facet})
180
+
181
+ return inputs, output
182
+
183
+ @staticmethod
184
+ def _get_bigquery_job_run_facet(properties: dict) -> BigQueryJobRunFacet:
185
+ from airflow.providers.google.cloud.openlineage.utils import (
186
+ BigQueryJobRunFacet,
187
+ get_from_nullable_chain,
188
+ )
189
+
190
+ if get_from_nullable_chain(properties, ["configuration", "query", "query"]):
191
+ # Exclude the query to avoid event size issues and duplicating SqlJobFacet information.
192
+ properties = copy.deepcopy(properties)
193
+ properties["configuration"]["query"].pop("query")
194
+ cache_hit = get_from_nullable_chain(properties, ["statistics", "query", "cacheHit"])
195
+ billed_bytes = get_from_nullable_chain(properties, ["statistics", "query", "totalBytesBilled"])
196
+ return BigQueryJobRunFacet(
197
+ cached=str(cache_hit).lower() == "true",
198
+ billedBytes=int(billed_bytes) if billed_bytes else None,
199
+ properties=json.dumps(properties),
200
+ )
201
+
202
+ @staticmethod
203
+ def _get_statistics_dataset_facet(properties) -> OutputStatisticsOutputDatasetFacet | None:
204
+ from openlineage.client.facet import OutputStatisticsOutputDatasetFacet
205
+
206
+ from airflow.providers.google.cloud.openlineage.utils import get_from_nullable_chain
207
+
208
+ query_plan = get_from_nullable_chain(properties, chain=["statistics", "query", "queryPlan"])
209
+ if not query_plan:
210
+ return None
211
+
212
+ out_stage = query_plan[-1]
213
+ out_rows = out_stage.get("recordsWritten", None)
214
+ out_bytes = out_stage.get("shuffleOutputBytes", None)
215
+ if out_bytes and out_rows:
216
+ return OutputStatisticsOutputDatasetFacet(rowCount=int(out_rows), size=int(out_bytes))
217
+ return None
218
+
219
+ def _get_dataset(self, table: dict) -> Dataset:
220
+ from openlineage.client.run import Dataset
221
+
222
+ BIGQUERY_NAMESPACE = "bigquery"
223
+
224
+ project = table.get("projectId")
225
+ dataset = table.get("datasetId")
226
+ table_name = table.get("tableId")
227
+ dataset_name = f"{project}.{dataset}.{table_name}"
228
+
229
+ dataset_schema = self._get_table_schema_safely(dataset_name)
230
+ return Dataset(
231
+ namespace=BIGQUERY_NAMESPACE,
232
+ name=dataset_name,
233
+ facets={
234
+ "schema": dataset_schema,
235
+ }
236
+ if dataset_schema
237
+ else {},
238
+ )
239
+
240
+ def _get_table_schema_safely(self, table_name: str) -> SchemaDatasetFacet | None:
241
+ try:
242
+ return self._get_table_schema(table_name)
243
+ except Exception as e:
244
+ if hasattr(self, "log"):
245
+ self.log.warning("Could not extract output schema from bigquery. %s", e)
246
+ return None
247
+
248
+ def _get_table_schema(self, table: str) -> SchemaDatasetFacet | None:
249
+ from openlineage.client.facet import SchemaDatasetFacet, SchemaField
250
+
251
+ from airflow.providers.google.cloud.openlineage.utils import get_from_nullable_chain
252
+
253
+ bq_table = self.client.get_table(table)
254
+
255
+ if not bq_table._properties:
256
+ return None
257
+
258
+ fields = get_from_nullable_chain(bq_table._properties, ["schema", "fields"])
259
+ if not fields:
260
+ return None
261
+
262
+ return SchemaDatasetFacet(
263
+ fields=[
264
+ SchemaField(
265
+ name=field.get("name"),
266
+ type=field.get("type"),
267
+ description=field.get("description"),
268
+ )
269
+ for field in fields
270
+ ]
271
+ )
@@ -17,9 +17,6 @@
17
17
  # under the License.
18
18
  from __future__ import annotations
19
19
 
20
- import copy
21
- import json
22
- import traceback
23
20
  from typing import TYPE_CHECKING, Any
24
21
 
25
22
  from attr import define, field
@@ -29,17 +26,15 @@ from openlineage.client.facet import (
29
26
  ColumnLineageDatasetFacetFieldsAdditional,
30
27
  ColumnLineageDatasetFacetFieldsAdditionalInputFields,
31
28
  DocumentationDatasetFacet,
32
- ErrorMessageRunFacet,
33
- OutputStatisticsOutputDatasetFacet,
34
29
  SchemaDatasetFacet,
35
30
  SchemaField,
36
31
  )
37
- from openlineage.client.run import Dataset
38
32
 
39
33
  from airflow.providers.google import __version__ as provider_version
40
34
 
41
35
  if TYPE_CHECKING:
42
36
  from google.cloud.bigquery.table import Table
37
+ from openlineage.client.run import Dataset
43
38
 
44
39
 
45
40
  BIGQUERY_NAMESPACE = "bigquery"
@@ -163,9 +158,13 @@ def get_from_nullable_chain(source: Any, chain: list[str]) -> Any | None:
163
158
  if not result:
164
159
  return None
165
160
  """
161
+ # chain.pop modifies passed list, this can be unexpected
162
+ chain = chain.copy()
166
163
  chain.reverse()
167
164
  try:
168
165
  while chain:
166
+ while isinstance(source, list) and len(source) == 1:
167
+ source = source[0]
169
168
  next_key = chain.pop()
170
169
  if isinstance(source, dict):
171
170
  source = source.get(next_key)
@@ -174,215 +173,3 @@ def get_from_nullable_chain(source: Any, chain: list[str]) -> Any | None:
174
173
  return source
175
174
  except AttributeError:
176
175
  return None
177
-
178
-
179
- class _BigQueryOpenLineageMixin:
180
- def get_openlineage_facets_on_complete(self, _):
181
- """
182
- Retrieve OpenLineage data for a COMPLETE BigQuery job.
183
-
184
- This method retrieves statistics for the specified job_ids using the BigQueryDatasetsProvider.
185
- It calls BigQuery API, retrieving input and output dataset info from it, as well as run-level
186
- usage statistics.
187
-
188
- Run facets should contain:
189
- - ExternalQueryRunFacet
190
- - BigQueryJobRunFacet
191
-
192
- Run facets may contain:
193
- - ErrorMessageRunFacet
194
-
195
- Job facets should contain:
196
- - SqlJobFacet if operator has self.sql
197
-
198
- Input datasets should contain facets:
199
- - DataSourceDatasetFacet
200
- - SchemaDatasetFacet
201
-
202
- Output datasets should contain facets:
203
- - DataSourceDatasetFacet
204
- - SchemaDatasetFacet
205
- - OutputStatisticsOutputDatasetFacet
206
- """
207
- from openlineage.client.facet import ExternalQueryRunFacet, SqlJobFacet
208
-
209
- from airflow.providers.openlineage.extractors import OperatorLineage
210
- from airflow.providers.openlineage.sqlparser import SQLParser
211
-
212
- if not self.job_id:
213
- return OperatorLineage()
214
-
215
- run_facets: dict[str, BaseFacet] = {
216
- "externalQuery": ExternalQueryRunFacet(externalQueryId=self.job_id, source="bigquery")
217
- }
218
-
219
- job_facets = {"sql": SqlJobFacet(query=SQLParser.normalize_sql(self.sql))}
220
-
221
- self.client = self.hook.get_client(project_id=self.hook.project_id)
222
- job_ids = self.job_id
223
- if isinstance(self.job_id, str):
224
- job_ids = [self.job_id]
225
- inputs, outputs = [], []
226
- for job_id in job_ids:
227
- inner_inputs, inner_outputs, inner_run_facets = self.get_facets(job_id=job_id)
228
- inputs.extend(inner_inputs)
229
- outputs.extend(inner_outputs)
230
- run_facets.update(inner_run_facets)
231
-
232
- return OperatorLineage(
233
- inputs=inputs,
234
- outputs=outputs,
235
- run_facets=run_facets,
236
- job_facets=job_facets,
237
- )
238
-
239
- def get_facets(self, job_id: str):
240
- inputs = []
241
- outputs = []
242
- run_facets: dict[str, BaseFacet] = {}
243
- if hasattr(self, "log"):
244
- self.log.debug("Extracting data from bigquery job: `%s`", job_id)
245
- try:
246
- job = self.client.get_job(job_id=job_id) # type: ignore
247
- props = job._properties
248
-
249
- if get_from_nullable_chain(props, ["status", "state"]) != "DONE":
250
- raise ValueError(f"Trying to extract data from running bigquery job: `{job_id}`")
251
-
252
- # TODO: remove bigQuery_job in next release
253
- run_facets["bigQuery_job"] = run_facets["bigQueryJob"] = self._get_bigquery_job_run_facet(props)
254
-
255
- if get_from_nullable_chain(props, ["statistics", "numChildJobs"]):
256
- if hasattr(self, "log"):
257
- self.log.debug("Found SCRIPT job. Extracting lineage from child jobs instead.")
258
- # SCRIPT job type has no input / output information but spawns child jobs that have one
259
- # https://cloud.google.com/bigquery/docs/information-schema-jobs#multi-statement_query_job
260
- for child_job_id in self.client.list_jobs(parent_job=job_id):
261
- child_job = self.client.get_job(job_id=child_job_id) # type: ignore
262
- child_inputs, child_output = self._get_inputs_outputs_from_job(child_job._properties)
263
- inputs.extend(child_inputs)
264
- outputs.append(child_output)
265
- else:
266
- inputs, _output = self._get_inputs_outputs_from_job(props)
267
- outputs.append(_output)
268
- except Exception as e:
269
- if hasattr(self, "log"):
270
- self.log.warning("Cannot retrieve job details from BigQuery.Client. %s", e, exc_info=True)
271
- exception_msg = traceback.format_exc()
272
- # TODO: remove BigQueryErrorRunFacet in next release
273
- run_facets.update(
274
- {
275
- "errorMessage": ErrorMessageRunFacet(
276
- message=f"{e}: {exception_msg}",
277
- programmingLanguage="python",
278
- ),
279
- "bigQuery_error": BigQueryErrorRunFacet(
280
- clientError=f"{e}: {exception_msg}",
281
- ),
282
- }
283
- )
284
- deduplicated_outputs = self._deduplicate_outputs(outputs)
285
- return inputs, deduplicated_outputs, run_facets
286
-
287
- def _deduplicate_outputs(self, outputs: list[Dataset | None]) -> list[Dataset]:
288
- # Sources are the same so we can compare only names
289
- final_outputs = {}
290
- for single_output in outputs:
291
- if not single_output:
292
- continue
293
- key = single_output.name
294
- if key not in final_outputs:
295
- final_outputs[key] = single_output
296
- continue
297
-
298
- # No OutputStatisticsOutputDatasetFacet is added to duplicated outputs as we can not determine
299
- # if the rowCount or size can be summed together.
300
- single_output.facets.pop("outputStatistics", None)
301
- final_outputs[key] = single_output
302
-
303
- return list(final_outputs.values())
304
-
305
- def _get_inputs_outputs_from_job(self, properties: dict) -> tuple[list[Dataset], Dataset | None]:
306
- input_tables = get_from_nullable_chain(properties, ["statistics", "query", "referencedTables"]) or []
307
- output_table = get_from_nullable_chain(properties, ["configuration", "query", "destinationTable"])
308
- inputs = [self._get_dataset(input_table) for input_table in input_tables]
309
- if output_table:
310
- output = self._get_dataset(output_table)
311
- dataset_stat_facet = self._get_statistics_dataset_facet(properties)
312
- if dataset_stat_facet:
313
- output.facets.update({"outputStatistics": dataset_stat_facet})
314
-
315
- return inputs, output
316
-
317
- @staticmethod
318
- def _get_bigquery_job_run_facet(properties: dict) -> BigQueryJobRunFacet:
319
- if get_from_nullable_chain(properties, ["configuration", "query", "query"]):
320
- # Exclude the query to avoid event size issues and duplicating SqlJobFacet information.
321
- properties = copy.deepcopy(properties)
322
- properties["configuration"]["query"].pop("query")
323
- cache_hit = get_from_nullable_chain(properties, ["statistics", "query", "cacheHit"])
324
- billed_bytes = get_from_nullable_chain(properties, ["statistics", "query", "totalBytesBilled"])
325
- return BigQueryJobRunFacet(
326
- cached=str(cache_hit).lower() == "true",
327
- billedBytes=int(billed_bytes) if billed_bytes else None,
328
- properties=json.dumps(properties),
329
- )
330
-
331
- @staticmethod
332
- def _get_statistics_dataset_facet(properties) -> OutputStatisticsOutputDatasetFacet | None:
333
- query_plan = get_from_nullable_chain(properties, chain=["statistics", "query", "queryPlan"])
334
- if not query_plan:
335
- return None
336
-
337
- out_stage = query_plan[-1]
338
- out_rows = out_stage.get("recordsWritten", None)
339
- out_bytes = out_stage.get("shuffleOutputBytes", None)
340
- if out_bytes and out_rows:
341
- return OutputStatisticsOutputDatasetFacet(rowCount=int(out_rows), size=int(out_bytes))
342
- return None
343
-
344
- def _get_dataset(self, table: dict) -> Dataset:
345
- project = table.get("projectId")
346
- dataset = table.get("datasetId")
347
- table_name = table.get("tableId")
348
- dataset_name = f"{project}.{dataset}.{table_name}"
349
-
350
- dataset_schema = self._get_table_schema_safely(dataset_name)
351
- return Dataset(
352
- namespace=BIGQUERY_NAMESPACE,
353
- name=dataset_name,
354
- facets={
355
- "schema": dataset_schema,
356
- }
357
- if dataset_schema
358
- else {},
359
- )
360
-
361
- def _get_table_schema_safely(self, table_name: str) -> SchemaDatasetFacet | None:
362
- try:
363
- return self._get_table_schema(table_name)
364
- except Exception as e:
365
- if hasattr(self, "log"):
366
- self.log.warning("Could not extract output schema from bigquery. %s", e)
367
- return None
368
-
369
- def _get_table_schema(self, table: str) -> SchemaDatasetFacet | None:
370
- bq_table = self.client.get_table(table)
371
-
372
- if not bq_table._properties:
373
- return None
374
-
375
- fields = get_from_nullable_chain(bq_table._properties, ["schema", "fields"])
376
- if not fields:
377
- return None
378
-
379
- return SchemaDatasetFacet(
380
- fields=[
381
- SchemaField(
382
- name=field.get("name"),
383
- type=field.get("type"),
384
- description=field.get("description"),
385
- )
386
- for field in fields
387
- ]
388
- )