apache-airflow-providers-google 10.10.0__py3-none-any.whl → 10.10.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. airflow/providers/google/__init__.py +1 -1
  2. airflow/providers/google/cloud/hooks/cloud_run.py +4 -2
  3. airflow/providers/google/cloud/hooks/vertex_ai/auto_ml.py +131 -27
  4. airflow/providers/google/cloud/hooks/vertex_ai/batch_prediction_job.py +1 -9
  5. airflow/providers/google/cloud/hooks/vertex_ai/custom_job.py +121 -4
  6. airflow/providers/google/cloud/hooks/vertex_ai/endpoint_service.py +1 -11
  7. airflow/providers/google/cloud/hooks/vertex_ai/hyperparameter_tuning_job.py +1 -10
  8. airflow/providers/google/cloud/hooks/vertex_ai/model_service.py +220 -6
  9. airflow/providers/google/cloud/hooks/vertex_ai/pipeline_job.py +409 -0
  10. airflow/providers/google/cloud/links/vertex_ai.py +49 -0
  11. airflow/providers/google/cloud/operators/dataproc.py +32 -10
  12. airflow/providers/google/cloud/operators/gcs.py +1 -1
  13. airflow/providers/google/cloud/operators/mlengine.py +116 -0
  14. airflow/providers/google/cloud/operators/vertex_ai/auto_ml.py +45 -0
  15. airflow/providers/google/cloud/operators/vertex_ai/batch_prediction_job.py +2 -8
  16. airflow/providers/google/cloud/operators/vertex_ai/custom_job.py +287 -201
  17. airflow/providers/google/cloud/operators/vertex_ai/endpoint_service.py +1 -9
  18. airflow/providers/google/cloud/operators/vertex_ai/hyperparameter_tuning_job.py +2 -9
  19. airflow/providers/google/cloud/operators/vertex_ai/model_service.py +451 -12
  20. airflow/providers/google/cloud/operators/vertex_ai/pipeline_job.py +464 -0
  21. airflow/providers/google/cloud/utils/mlengine_operator_utils.py +7 -1
  22. airflow/providers/google/get_provider_info.py +5 -0
  23. {apache_airflow_providers_google-10.10.0.dist-info → apache_airflow_providers_google-10.10.1.dist-info}/METADATA +6 -6
  24. {apache_airflow_providers_google-10.10.0.dist-info → apache_airflow_providers_google-10.10.1.dist-info}/RECORD +29 -27
  25. {apache_airflow_providers_google-10.10.0.dist-info → apache_airflow_providers_google-10.10.1.dist-info}/LICENSE +0 -0
  26. {apache_airflow_providers_google-10.10.0.dist-info → apache_airflow_providers_google-10.10.1.dist-info}/NOTICE +0 -0
  27. {apache_airflow_providers_google-10.10.0.dist-info → apache_airflow_providers_google-10.10.1.dist-info}/WHEEL +0 -0
  28. {apache_airflow_providers_google-10.10.0.dist-info → apache_airflow_providers_google-10.10.1.dist-info}/entry_points.txt +0 -0
  29. {apache_airflow_providers_google-10.10.0.dist-info → apache_airflow_providers_google-10.10.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,464 @@
1
+ #
2
+ # Licensed to the Apache Software Foundation (ASF) under one
3
+ # or more contributor license agreements. See the NOTICE file
4
+ # distributed with this work for additional information
5
+ # regarding copyright ownership. The ASF licenses this file
6
+ # to you under the Apache License, Version 2.0 (the
7
+ # "License"); you may not use this file except in compliance
8
+ # with the License. You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing,
13
+ # software distributed under the License is distributed on an
14
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15
+ # KIND, either express or implied. See the License for the
16
+ # specific language governing permissions and limitations
17
+ # under the License.
18
+ """This module contains Google Vertex AI operators."""
19
+ from __future__ import annotations
20
+
21
+ from typing import TYPE_CHECKING, Any, Sequence
22
+
23
+ from google.api_core.exceptions import NotFound
24
+ from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
25
+ from google.cloud.aiplatform_v1.types import PipelineJob
26
+
27
+ from airflow.providers.google.cloud.hooks.vertex_ai.pipeline_job import PipelineJobHook
28
+ from airflow.providers.google.cloud.links.vertex_ai import (
29
+ VertexAIPipelineJobLink,
30
+ VertexAIPipelineJobListLink,
31
+ )
32
+ from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
33
+
34
+ if TYPE_CHECKING:
35
+ from google.api_core.retry import Retry
36
+ from google.cloud.aiplatform.metadata import experiment_resources
37
+
38
+ from airflow.utils.context import Context
39
+
40
+
41
+ class RunPipelineJobOperator(GoogleCloudBaseOperator):
42
+ """
43
+ Run Pipeline job.
44
+
45
+ :param project_id: Required. The ID of the Google Cloud project that the service belongs to.
46
+ :param region: Required. The ID of the Google Cloud region that the service belongs to.
47
+ :param display_name: Required. The user-defined name of this Pipeline.
48
+ :param template_path: Required. The path of PipelineJob or PipelineSpec JSON or YAML file. It can be
49
+ a local path, a Google Cloud Storage URI (e.g. "gs://project.name"), an Artifact Registry URI
50
+ (e.g. "https://us-central1-kfp.pkg.dev/proj/repo/pack/latest"), or an HTTPS URI.
51
+ :param job_id: Optional. The unique ID of the job run. If not specified, pipeline name + timestamp
52
+ will be used.
53
+ :param pipeline_root: Optional. The root of the pipeline outputs. If not set, the staging bucket set
54
+ in aiplatform.init will be used. If that's not set a pipeline-specific artifacts bucket will be
55
+ used.
56
+ :param parameter_values: Optional. The mapping from runtime parameter names to its values that
57
+ control the pipeline run.
58
+ :param input_artifacts: Optional. The mapping from the runtime parameter name for this artifact to
59
+ its resource id. For example: "vertex_model":"456". Note: full resource name
60
+ ("projects/123/locations/us-central1/metadataStores/default/artifacts/456") cannot be used.
61
+ :param enable_caching: Optional. Whether to turn on caching for the run.
62
+ If this is not set, defaults to the compile time settings, which are True for all tasks by
63
+ default, while users may specify different caching options for individual tasks.
64
+ If this is set, the setting applies to all tasks in the pipeline. Overrides the compile time
65
+ settings.
66
+ :param encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer managed
67
+ encryption key used to protect the job. Has the form:
68
+ ``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
69
+ The key needs to be in the same region as where the compute resource is created. If this is set,
70
+ then all resources created by the PipelineJob will be encrypted with the provided encryption key.
71
+ Overrides encryption_spec_key_name set in aiplatform.init.
72
+ :param labels: Optional. The user defined metadata to organize PipelineJob.
73
+ :param failure_policy: Optional. The failure policy - "slow" or "fast". Currently, the default of a
74
+ pipeline is that the pipeline will continue to run until no more tasks can be executed, also
75
+ known as PIPELINE_FAILURE_POLICY_FAIL_SLOW (corresponds to "slow"). However, if a pipeline is set
76
+ to PIPELINE_FAILURE_POLICY_FAIL_FAST (corresponds to "fast"), it will stop scheduling any new
77
+ tasks when a task has failed. Any scheduled tasks will continue to completion.
78
+ :param service_account: Optional. Specifies the service account for workload run-as account. Users
79
+ submitting jobs must have act-as permission on this run-as account.
80
+ :param network: Optional. The full name of the Compute Engine network to which the job should be
81
+ peered. For example, projects/12345/global/networks/myVPC.
82
+ Private services access must already be configured for the network. If left unspecified, the
83
+ network set in aiplatform.init will be used. Otherwise, the job is not peered with any network.
84
+ :param create_request_timeout: Optional. The timeout for the create request in seconds.
85
+ :param experiment: Optional. The Vertex AI experiment name or instance to associate to this
86
+ PipelineJob. Metrics produced by the PipelineJob as system.Metric Artifacts will be associated as
87
+ metrics to the current Experiment Run. Pipeline parameters will be associated as parameters to
88
+ the current Experiment Run.
89
+ :param gcp_conn_id: The connection ID to use connecting to Google Cloud.
90
+ :param impersonation_chain: Optional service account to impersonate using short-term
91
+ credentials, or chained list of accounts required to get the access_token
92
+ of the last account in the list, which will be impersonated in the request.
93
+ If set as a string, the account must grant the originating account
94
+ the Service Account Token Creator IAM role.
95
+ If set as a sequence, the identities from the list must grant
96
+ Service Account Token Creator IAM role to the directly preceding identity, with first
97
+ account from the list granting this role to the originating account (templated).
98
+ """
99
+
100
+ template_fields = [
101
+ "region",
102
+ "project_id",
103
+ "input_artifacts",
104
+ "impersonation_chain",
105
+ ]
106
+ operator_extra_links = (VertexAIPipelineJobLink(),)
107
+
108
+ def __init__(
109
+ self,
110
+ *,
111
+ project_id: str,
112
+ region: str,
113
+ display_name: str,
114
+ template_path: str,
115
+ job_id: str | None = None,
116
+ pipeline_root: str | None = None,
117
+ parameter_values: dict[str, Any] | None = None,
118
+ input_artifacts: dict[str, str] | None = None,
119
+ enable_caching: bool | None = None,
120
+ encryption_spec_key_name: str | None = None,
121
+ labels: dict[str, str] | None = None,
122
+ failure_policy: str | None = None,
123
+ service_account: str | None = None,
124
+ network: str | None = None,
125
+ create_request_timeout: float | None = None,
126
+ experiment: str | experiment_resources.Experiment | None = None,
127
+ gcp_conn_id: str = "google_cloud_default",
128
+ impersonation_chain: str | Sequence[str] | None = None,
129
+ **kwargs,
130
+ ) -> None:
131
+ super().__init__(**kwargs)
132
+ self.region = region
133
+ self.project_id = project_id
134
+ self.display_name = display_name
135
+ self.template_path = template_path
136
+ self.job_id = job_id
137
+ self.pipeline_root = pipeline_root
138
+ self.parameter_values = parameter_values
139
+ self.input_artifacts = input_artifacts
140
+ self.enable_caching = enable_caching
141
+ self.encryption_spec_key_name = encryption_spec_key_name
142
+ self.labels = labels
143
+ self.failure_policy = failure_policy
144
+ self.service_account = service_account
145
+ self.network = network
146
+ self.create_request_timeout = create_request_timeout
147
+ self.experiment = experiment
148
+ self.gcp_conn_id = gcp_conn_id
149
+ self.impersonation_chain = impersonation_chain
150
+ self.hook: PipelineJobHook | None = None
151
+
152
+ def execute(self, context: Context):
153
+ self.log.info("Running Pipeline job")
154
+ self.hook = PipelineJobHook(
155
+ gcp_conn_id=self.gcp_conn_id,
156
+ impersonation_chain=self.impersonation_chain,
157
+ )
158
+ result = self.hook.run_pipeline_job(
159
+ project_id=self.project_id,
160
+ region=self.region,
161
+ display_name=self.display_name,
162
+ template_path=self.template_path,
163
+ job_id=self.job_id,
164
+ pipeline_root=self.pipeline_root,
165
+ parameter_values=self.parameter_values,
166
+ input_artifacts=self.input_artifacts,
167
+ enable_caching=self.enable_caching,
168
+ encryption_spec_key_name=self.encryption_spec_key_name,
169
+ labels=self.labels,
170
+ failure_policy=self.failure_policy,
171
+ service_account=self.service_account,
172
+ network=self.network,
173
+ create_request_timeout=self.create_request_timeout,
174
+ experiment=self.experiment,
175
+ )
176
+
177
+ pipeline_job = result.to_dict()
178
+ pipeline_job_id = self.hook.extract_pipeline_job_id(pipeline_job)
179
+ self.log.info("Pipeline job was created. Job id: %s", pipeline_job_id)
180
+
181
+ self.xcom_push(context, key="pipeline_job_id", value=pipeline_job_id)
182
+ VertexAIPipelineJobLink.persist(context=context, task_instance=self, pipeline_id=pipeline_job_id)
183
+ return pipeline_job
184
+
185
+ def on_kill(self) -> None:
186
+ """Callback called when the operator is killed; cancel any running job."""
187
+ if self.hook:
188
+ self.hook.cancel_pipeline_job()
189
+
190
+
191
+ class GetPipelineJobOperator(GoogleCloudBaseOperator):
192
+ """
193
+ Get a Pipeline job.
194
+
195
+ :param project_id: Required. The ID of the Google Cloud project that the service belongs to.
196
+ :param region: Required. The ID of the Google Cloud region that the service belongs to.
197
+ :param pipeline_job_id: Required. The ID of the PipelineJob resource.
198
+ :param retry: Designation of what errors, if any, should be retried.
199
+ :param timeout: The timeout for this request.
200
+ :param metadata: Strings which should be sent along with the request as metadata.
201
+ :param gcp_conn_id: The connection ID to use connecting to Google Cloud.
202
+ :param impersonation_chain: Optional service account to impersonate using short-term
203
+ credentials, or chained list of accounts required to get the access_token
204
+ of the last account in the list, which will be impersonated in the request.
205
+ If set as a string, the account must grant the originating account
206
+ the Service Account Token Creator IAM role.
207
+ If set as a sequence, the identities from the list must grant
208
+ Service Account Token Creator IAM role to the directly preceding identity, with first
209
+ account from the list granting this role to the originating account (templated).
210
+
211
+ """
212
+
213
+ template_fields = [
214
+ "region",
215
+ "pipeline_job_id",
216
+ "project_id",
217
+ "impersonation_chain",
218
+ ]
219
+ operator_extra_links = (VertexAIPipelineJobLink(),)
220
+
221
+ def __init__(
222
+ self,
223
+ *,
224
+ project_id: str,
225
+ region: str,
226
+ pipeline_job_id: str,
227
+ retry: Retry | _MethodDefault = DEFAULT,
228
+ timeout: float | None = None,
229
+ metadata: Sequence[tuple[str, str]] = (),
230
+ gcp_conn_id: str = "google_cloud_default",
231
+ impersonation_chain: str | Sequence[str] | None = None,
232
+ **kwargs,
233
+ ) -> None:
234
+ super().__init__(**kwargs)
235
+ self.region = region
236
+ self.project_id = project_id
237
+ self.pipeline_job_id = pipeline_job_id
238
+ self.retry = retry
239
+ self.timeout = timeout
240
+ self.metadata = metadata
241
+ self.gcp_conn_id = gcp_conn_id
242
+ self.impersonation_chain = impersonation_chain
243
+
244
+ def execute(self, context: Context):
245
+ hook = PipelineJobHook(
246
+ gcp_conn_id=self.gcp_conn_id,
247
+ impersonation_chain=self.impersonation_chain,
248
+ )
249
+
250
+ try:
251
+ self.log.info("Get Pipeline job: %s", self.pipeline_job_id)
252
+ result = hook.get_pipeline_job(
253
+ project_id=self.project_id,
254
+ region=self.region,
255
+ pipeline_job_id=self.pipeline_job_id,
256
+ retry=self.retry,
257
+ timeout=self.timeout,
258
+ metadata=self.metadata,
259
+ )
260
+ VertexAIPipelineJobLink.persist(
261
+ context=context, task_instance=self, pipeline_id=self.pipeline_job_id
262
+ )
263
+ self.log.info("Pipeline job was gotten.")
264
+ return PipelineJob.to_dict(result)
265
+ except NotFound:
266
+ self.log.info("The Pipeline job %s does not exist.", self.pipeline_job_id)
267
+
268
+
269
+ class ListPipelineJobOperator(GoogleCloudBaseOperator):
270
+ """Lists PipelineJob in a Location.
271
+
272
+ :param project_id: Required. The ID of the Google Cloud project that the service belongs to.
273
+ :param region: Required. The ID of the Google Cloud region that the service belongs to.
274
+ :param filter: Optional. Lists the PipelineJobs that match the filter expression. The
275
+ following fields are supported:
276
+
277
+ - ``pipeline_name``: Supports ``=`` and ``!=`` comparisons.
278
+ - ``display_name``: Supports ``=``, ``!=`` comparisons, and
279
+ ``:`` wildcard.
280
+ - ``pipeline_job_user_id``: Supports ``=``, ``!=``
281
+ comparisons, and ``:`` wildcard. for example, can check
282
+ if pipeline's display_name contains *step* by doing
283
+ display_name:"*step*"
284
+ - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``,
285
+ ``<=``, and ``>=`` comparisons. Values must be in RFC
286
+ 3339 format.
287
+ - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``,
288
+ ``<=``, and ``>=`` comparisons. Values must be in RFC
289
+ 3339 format.
290
+ - ``end_time``: Supports ``=``, ``!=``, ``<``, ``>``,
291
+ ``<=``, and ``>=`` comparisons. Values must be in RFC
292
+ 3339 format.
293
+ - ``labels``: Supports key-value equality and key presence.
294
+
295
+ Filter expressions can be combined together using logical
296
+ operators (``AND`` & ``OR``). For example:
297
+ ``pipeline_name="test" AND create_time>"2020-05-18T13:30:00Z"``.
298
+
299
+ The syntax to define filter expression is based on
300
+ https://google.aip.dev/160.
301
+ :param page_size: Optional. The standard list page size.
302
+ :param page_token: Optional. The standard list page token. Typically obtained via
303
+ [ListPipelineJobsResponse.next_page_token][google.cloud.aiplatform.v1.ListPipelineJobsResponse.next_page_token]
304
+ of the previous
305
+ [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs]
306
+ call.
307
+ :param order_by: Optional. A comma-separated list of fields to order by. The default
308
+ sort order is in ascending order. Use "desc" after a field
309
+ name for descending. You can have multiple order_by fields
310
+ provided e.g. "create_time desc, end_time", "end_time,
311
+ start_time, update_time" For example, using "create_time
312
+ desc, end_time" will order results by create time in
313
+ descending order, and if there are multiple jobs having the
314
+ same create time, order them by the end time in ascending
315
+ order. if order_by is not specified, it will order by
316
+ default order is create time in descending order. Supported
317
+ fields:
318
+
319
+ - ``create_time``
320
+ - ``update_time``
321
+ - ``end_time``
322
+ - ``start_time``
323
+ :param retry: Designation of what errors, if any, should be retried.
324
+ :param timeout: The timeout for this request.
325
+ :param metadata: Strings which should be sent along with the request as metadata.
326
+ :param gcp_conn_id: The connection ID to use connecting to Google Cloud.
327
+ :param impersonation_chain: Optional service account to impersonate using short-term
328
+ credentials, or chained list of accounts required to get the access_token
329
+ of the last account in the list, which will be impersonated in the request.
330
+ If set as a string, the account must grant the originating account
331
+ the Service Account Token Creator IAM role.
332
+ If set as a sequence, the identities from the list must grant
333
+ Service Account Token Creator IAM role to the directly preceding identity, with first
334
+ account from the list granting this role to the originating account (templated).
335
+ """
336
+
337
+ template_fields = [
338
+ "region",
339
+ "project_id",
340
+ "impersonation_chain",
341
+ ]
342
+ operator_extra_links = [
343
+ VertexAIPipelineJobListLink(),
344
+ ]
345
+
346
+ def __init__(
347
+ self,
348
+ *,
349
+ region: str,
350
+ project_id: str,
351
+ page_size: int | None = None,
352
+ page_token: str | None = None,
353
+ filter: str | None = None,
354
+ order_by: str | None = None,
355
+ retry: Retry | _MethodDefault = DEFAULT,
356
+ timeout: float | None = None,
357
+ metadata: Sequence[tuple[str, str]] = (),
358
+ gcp_conn_id: str = "google_cloud_default",
359
+ impersonation_chain: str | Sequence[str] | None = None,
360
+ **kwargs,
361
+ ) -> None:
362
+ super().__init__(**kwargs)
363
+ self.region = region
364
+ self.project_id = project_id
365
+ self.page_size = page_size
366
+ self.page_token = page_token
367
+ self.filter = filter
368
+ self.order_by = order_by
369
+ self.retry = retry
370
+ self.timeout = timeout
371
+ self.metadata = metadata
372
+ self.gcp_conn_id = gcp_conn_id
373
+ self.impersonation_chain = impersonation_chain
374
+
375
+ def execute(self, context: Context):
376
+ hook = PipelineJobHook(
377
+ gcp_conn_id=self.gcp_conn_id,
378
+ impersonation_chain=self.impersonation_chain,
379
+ )
380
+ results = hook.list_pipeline_jobs(
381
+ region=self.region,
382
+ project_id=self.project_id,
383
+ page_size=self.page_size,
384
+ page_token=self.page_token,
385
+ filter=self.filter,
386
+ order_by=self.order_by,
387
+ retry=self.retry,
388
+ timeout=self.timeout,
389
+ metadata=self.metadata,
390
+ )
391
+ VertexAIPipelineJobListLink.persist(context=context, task_instance=self)
392
+ return [PipelineJob.to_dict(result) for result in results]
393
+
394
+
395
+ class DeletePipelineJobOperator(GoogleCloudBaseOperator):
396
+ """
397
+ Delete a Pipeline job.
398
+
399
+ :param project_id: Required. The ID of the Google Cloud project that the service belongs to.
400
+ :param region: Required. The ID of the Google Cloud region that the service belongs to.
401
+ :param pipeline_job_id: Required. The ID of the PipelineJob resource to be deleted.
402
+ :param retry: Designation of what errors, if any, should be retried.
403
+ :param timeout: The timeout for this request.
404
+ :param metadata: Strings which should be sent along with the request as metadata.
405
+ :param gcp_conn_id: The connection ID to use connecting to Google Cloud.
406
+ :param impersonation_chain: Optional service account to impersonate using short-term
407
+ credentials, or chained list of accounts required to get the access_token
408
+ of the last account in the list, which will be impersonated in the request.
409
+ If set as a string, the account must grant the originating account
410
+ the Service Account Token Creator IAM role.
411
+ If set as a sequence, the identities from the list must grant
412
+ Service Account Token Creator IAM role to the directly preceding identity, with first
413
+ account from the list granting this role to the originating account (templated).
414
+ """
415
+
416
+ template_fields = [
417
+ "region",
418
+ "project_id",
419
+ "pipeline_job_id",
420
+ "impersonation_chain",
421
+ ]
422
+
423
+ def __init__(
424
+ self,
425
+ *,
426
+ project_id: str,
427
+ region: str,
428
+ pipeline_job_id: str,
429
+ retry: Retry | _MethodDefault = DEFAULT,
430
+ timeout: float | None = None,
431
+ metadata: Sequence[tuple[str, str]] = (),
432
+ gcp_conn_id: str = "google_cloud_default",
433
+ impersonation_chain: str | Sequence[str] | None = None,
434
+ **kwargs,
435
+ ) -> None:
436
+ super().__init__(**kwargs)
437
+ self.region = region
438
+ self.project_id = project_id
439
+ self.pipeline_job_id = pipeline_job_id
440
+ self.retry = retry
441
+ self.timeout = timeout
442
+ self.metadata = metadata
443
+ self.gcp_conn_id = gcp_conn_id
444
+ self.impersonation_chain = impersonation_chain
445
+
446
+ def execute(self, context: Context):
447
+ hook = PipelineJobHook(
448
+ gcp_conn_id=self.gcp_conn_id,
449
+ impersonation_chain=self.impersonation_chain,
450
+ )
451
+ try:
452
+ self.log.info("Deleting Pipeline job: %s", self.pipeline_job_id)
453
+ operation = hook.delete_pipeline_job(
454
+ region=self.region,
455
+ project_id=self.project_id,
456
+ pipeline_job_id=self.pipeline_job_id,
457
+ retry=self.retry,
458
+ timeout=self.timeout,
459
+ metadata=self.metadata,
460
+ )
461
+ hook.wait_for_operation(timeout=self.timeout, operation=operation)
462
+ self.log.info("Pipeline job was deleted.")
463
+ except NotFound:
464
+ self.log.info("The Pipeline Job ID %s does not exist.", self.pipeline_job_id)
@@ -56,9 +56,15 @@ def create_evaluate_ops(
56
56
  dag: DAG | None = None,
57
57
  py_interpreter="python3",
58
58
  ) -> tuple[MLEngineStartBatchPredictionJobOperator, BeamRunPythonPipelineOperator, PythonOperator]:
59
- """
59
+ r"""
60
60
  Creates Operators needed for model evaluation and returns.
61
61
 
62
+ This function is deprecated. All the functionality of legacy MLEngine and new features are available
63
+ on the Vertex AI platform.
64
+
65
+ To create and view Model Evaluation, please check the documentation:
66
+ https://cloud.google.com/vertex-ai/docs/evaluation/using-model-evaluation#create_an_evaluation.
67
+
62
68
  It gets prediction over inputs via Cloud ML Engine BatchPrediction API by
63
69
  calling MLEngineBatchPredictionOperator, then summarize and validate
64
70
  the result via Cloud Dataflow using DataFlowPythonOperator.
@@ -29,6 +29,7 @@ def get_provider_info():
29
29
  "description": "Google services including:\n\n - `Google Ads <https://ads.google.com/>`__\n - `Google Cloud (GCP) <https://cloud.google.com/>`__\n - `Google Firebase <https://firebase.google.com/>`__\n - `Google LevelDB <https://github.com/google/leveldb/>`__\n - `Google Marketing Platform <https://marketingplatform.google.com/>`__\n - `Google Workspace <https://workspace.google.com/>`__ (formerly Google Suite)\n",
30
30
  "suspended": False,
31
31
  "versions": [
32
+ "10.10.1",
32
33
  "10.10.0",
33
34
  "10.9.0",
34
35
  "10.8.0",
@@ -773,6 +774,7 @@ def get_provider_info():
773
774
  "airflow.providers.google.cloud.operators.vertex_ai.endpoint_service",
774
775
  "airflow.providers.google.cloud.operators.vertex_ai.hyperparameter_tuning_job",
775
776
  "airflow.providers.google.cloud.operators.vertex_ai.model_service",
777
+ "airflow.providers.google.cloud.operators.vertex_ai.pipeline_job",
776
778
  ],
777
779
  },
778
780
  {
@@ -1097,6 +1099,7 @@ def get_provider_info():
1097
1099
  "airflow.providers.google.cloud.hooks.vertex_ai.endpoint_service",
1098
1100
  "airflow.providers.google.cloud.hooks.vertex_ai.hyperparameter_tuning_job",
1099
1101
  "airflow.providers.google.cloud.hooks.vertex_ai.model_service",
1102
+ "airflow.providers.google.cloud.hooks.vertex_ai.pipeline_job",
1100
1103
  ],
1101
1104
  },
1102
1105
  {
@@ -1464,6 +1467,8 @@ def get_provider_info():
1464
1467
  "airflow.providers.google.cloud.links.vertex_ai.VertexAIBatchPredictionJobListLink",
1465
1468
  "airflow.providers.google.cloud.links.vertex_ai.VertexAIEndpointLink",
1466
1469
  "airflow.providers.google.cloud.links.vertex_ai.VertexAIEndpointListLink",
1470
+ "airflow.providers.google.cloud.links.vertex_ai.VertexAIPipelineJobLink",
1471
+ "airflow.providers.google.cloud.links.vertex_ai.VertexAIPipelineJobListLink",
1467
1472
  "airflow.providers.google.cloud.links.workflows.WorkflowsWorkflowDetailsLink",
1468
1473
  "airflow.providers.google.cloud.links.workflows.WorkflowsListOfWorkflowsLink",
1469
1474
  "airflow.providers.google.cloud.links.workflows.WorkflowsExecutionLink",
@@ -1,14 +1,14 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: apache-airflow-providers-google
3
- Version: 10.10.0
3
+ Version: 10.10.1
4
4
  Summary: Provider for Apache Airflow. Implements apache-airflow-providers-google package
5
5
  Home-page: https://airflow.apache.org/
6
6
  Download-URL: https://archive.apache.org/dist/airflow/providers
7
7
  Author: Apache Software Foundation
8
8
  Author-email: dev@airflow.apache.org
9
9
  License: Apache License 2.0
10
- Project-URL: Documentation, https://airflow.apache.org/docs/apache-airflow-providers-google/10.10.0/
11
- Project-URL: Changelog, https://airflow.apache.org/docs/apache-airflow-providers-google/10.10.0/changelog.html
10
+ Project-URL: Documentation, https://airflow.apache.org/docs/apache-airflow-providers-google/10.10.1/
11
+ Project-URL: Changelog, https://airflow.apache.org/docs/apache-airflow-providers-google/10.10.1/changelog.html
12
12
  Project-URL: Bug Tracker, https://github.com/apache/airflow/issues
13
13
  Project-URL: Source Code, https://github.com/apache/airflow
14
14
  Project-URL: Slack Chat, https://s.apache.org/airflow-slack
@@ -164,7 +164,7 @@ Requires-Dist: apache-airflow-providers-trino ; extra == 'trino'
164
164
 
165
165
  Package ``apache-airflow-providers-google``
166
166
 
167
- Release: ``10.10.0``
167
+ Release: ``10.10.1``
168
168
 
169
169
 
170
170
  Google services including:
@@ -184,7 +184,7 @@ This is a provider package for ``google`` provider. All classes for this provide
184
184
  are in ``airflow.providers.google`` python package.
185
185
 
186
186
  You can find package information and changelog for the provider
187
- in the `documentation <https://airflow.apache.org/docs/apache-airflow-providers-google/10.10.0/>`_.
187
+ in the `documentation <https://airflow.apache.org/docs/apache-airflow-providers-google/10.10.1/>`_.
188
188
 
189
189
 
190
190
  Installation
@@ -297,4 +297,4 @@ Dependent package
297
297
  ======================================================================================================================== ====================
298
298
 
299
299
  The changelog for the provider package can be found in the
300
- `changelog <https://airflow.apache.org/docs/apache-airflow-providers-google/10.10.0/changelog.html>`_.
300
+ `changelog <https://airflow.apache.org/docs/apache-airflow-providers-google/10.10.1/changelog.html>`_.