google-cloud-dataproc-v1 0.14.0 → 0.16.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. checksums.yaml +4 -4
  2. data/lib/google/cloud/dataproc/v1/autoscaling_policy_service/client.rb +18 -6
  3. data/lib/google/cloud/dataproc/v1/autoscaling_policy_service.rb +1 -1
  4. data/lib/google/cloud/dataproc/v1/batch_controller/client.rb +51 -15
  5. data/lib/google/cloud/dataproc/v1/batch_controller/operations.rb +12 -14
  6. data/lib/google/cloud/dataproc/v1/batch_controller/paths.rb +19 -0
  7. data/lib/google/cloud/dataproc/v1/batch_controller.rb +1 -1
  8. data/lib/google/cloud/dataproc/v1/batches_pb.rb +2 -0
  9. data/lib/google/cloud/dataproc/v1/cluster_controller/client.rb +43 -31
  10. data/lib/google/cloud/dataproc/v1/cluster_controller/operations.rb +12 -14
  11. data/lib/google/cloud/dataproc/v1/cluster_controller/paths.rb +19 -0
  12. data/lib/google/cloud/dataproc/v1/cluster_controller.rb +1 -1
  13. data/lib/google/cloud/dataproc/v1/clusters_pb.rb +11 -9
  14. data/lib/google/cloud/dataproc/v1/job_controller/client.rb +22 -10
  15. data/lib/google/cloud/dataproc/v1/job_controller/operations.rb +12 -14
  16. data/lib/google/cloud/dataproc/v1/job_controller.rb +1 -1
  17. data/lib/google/cloud/dataproc/v1/jobs_pb.rb +13 -0
  18. data/lib/google/cloud/dataproc/v1/node_group_controller/client.rb +23 -9
  19. data/lib/google/cloud/dataproc/v1/node_group_controller/operations.rb +12 -14
  20. data/lib/google/cloud/dataproc/v1/node_group_controller.rb +1 -1
  21. data/lib/google/cloud/dataproc/v1/operations_pb.rb +1 -0
  22. data/lib/google/cloud/dataproc/v1/shared_pb.rb +24 -1
  23. data/lib/google/cloud/dataproc/v1/version.rb +1 -1
  24. data/lib/google/cloud/dataproc/v1/workflow_template_service/client.rb +28 -15
  25. data/lib/google/cloud/dataproc/v1/workflow_template_service/operations.rb +12 -14
  26. data/lib/google/cloud/dataproc/v1/workflow_template_service.rb +1 -1
  27. data/lib/google/cloud/dataproc/v1/workflow_templates_services_pb.rb +2 -1
  28. data/lib/google/cloud/dataproc/v1.rb +3 -3
  29. data/proto_docs/google/cloud/dataproc/v1/autoscaling_policies.rb +8 -8
  30. data/proto_docs/google/cloud/dataproc/v1/batches.rb +42 -14
  31. data/proto_docs/google/cloud/dataproc/v1/clusters.rb +70 -54
  32. data/proto_docs/google/cloud/dataproc/v1/jobs.rb +48 -0
  33. data/proto_docs/google/cloud/dataproc/v1/node_groups.rb +1 -1
  34. data/proto_docs/google/cloud/dataproc/v1/operations.rb +3 -0
  35. data/proto_docs/google/cloud/dataproc/v1/shared.rb +174 -56
  36. data/proto_docs/google/cloud/dataproc/v1/workflow_templates.rb +14 -11
  37. metadata +27 -7
@@ -27,12 +27,12 @@ module Google
27
27
  # Optional. Version of the batch runtime.
28
28
  # @!attribute [rw] container_image
29
29
  # @return [::String]
30
- # Optional. Optional custom container image for the job runtime environment. If
31
- # not specified, a default container image will be used.
30
+ # Optional. Optional custom container image for the job runtime environment.
31
+ # If not specified, a default container image will be used.
32
32
  # @!attribute [rw] properties
33
33
  # @return [::Google::Protobuf::Map{::String => ::String}]
34
- # Optional. A mapping of property names to values, which are used to configure workload
35
- # execution.
34
+ # Optional. A mapping of property names to values, which are used to
35
+ # configure workload execution.
36
36
  class RuntimeConfig
37
37
  include ::Google::Protobuf::MessageExts
38
38
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -75,6 +75,28 @@ module Google
75
75
  # @!attribute [rw] kms_key
76
76
  # @return [::String]
77
77
  # Optional. The Cloud KMS key to use for encryption.
78
+ # @!attribute [rw] ttl
79
+ # @return [::Google::Protobuf::Duration]
80
+ # Optional. The duration after which the workload will be terminated.
81
+ # When the workload passes this ttl, it will be unconditionally killed
82
+ # without waiting for ongoing work to finish.
83
+ # Minimum value is 10 minutes; maximum value is 14 days (see JSON
84
+ # representation of
85
+ # [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
86
+ # If both ttl and idle_ttl are specified, the conditions are treated as
87
+ # and OR: the workload will be terminated when it has been idle for idle_ttl
88
+ # or when the ttl has passed, whichever comes first.
89
+ # If ttl is not specified for a session, it defaults to 24h.
90
+ # @!attribute [rw] staging_bucket
91
+ # @return [::String]
92
+ # Optional. A Cloud Storage bucket used to stage workload dependencies,
93
+ # config files, and store workload output and other ephemeral data, such as
94
+ # Spark history files. If you do not specify a staging bucket, Cloud Dataproc
95
+ # will determine a Cloud Storage location according to the region where your
96
+ # workload is running, and then create and manage project-level, per-location
97
+ # staging and temporary buckets.
98
+ # **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
99
+ # a Cloud Storage bucket.**
78
100
  class ExecutionConfig
79
101
  include ::Google::Protobuf::MessageExts
80
102
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -83,8 +105,8 @@ module Google
83
105
  # Spark History Server configuration for the workload.
84
106
  # @!attribute [rw] dataproc_cluster
85
107
  # @return [::String]
86
- # Optional. Resource name of an existing Dataproc Cluster to act as a Spark History
87
- # Server for the workload.
108
+ # Optional. Resource name of an existing Dataproc Cluster to act as a Spark
109
+ # History Server for the workload.
88
110
  #
89
111
  # Example:
90
112
  #
@@ -113,14 +135,23 @@ module Google
113
135
  # Runtime information about workload execution.
114
136
  # @!attribute [r] endpoints
115
137
  # @return [::Google::Protobuf::Map{::String => ::String}]
116
- # Output only. Map of remote access endpoints (such as web interfaces and APIs) to their
117
- # URIs.
138
+ # Output only. Map of remote access endpoints (such as web interfaces and
139
+ # APIs) to their URIs.
118
140
  # @!attribute [r] output_uri
119
141
  # @return [::String]
120
- # Output only. A URI pointing to the location of the stdout and stderr of the workload.
142
+ # Output only. A URI pointing to the location of the stdout and stderr of the
143
+ # workload.
121
144
  # @!attribute [r] diagnostic_output_uri
122
145
  # @return [::String]
123
146
  # Output only. A URI pointing to the location of the diagnostics tarball.
147
+ # @!attribute [r] approximate_usage
148
+ # @return [::Google::Cloud::Dataproc::V1::UsageMetrics]
149
+ # Output only. Approximate workload resource usage calculated after workload
150
+ # finishes (see [Dataproc Serverless pricing]
151
+ # (https://cloud.google.com/dataproc-serverless/pricing)).
152
+ # @!attribute [r] current_usage
153
+ # @return [::Google::Cloud::Dataproc::V1::UsageSnapshot]
154
+ # Output only. Snapshot of current workload resource usage.
124
155
  class RuntimeInfo
125
156
  include ::Google::Protobuf::MessageExts
126
157
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -135,19 +166,56 @@ module Google
135
166
  end
136
167
  end
137
168
 
169
+ # Usage metrics represent approximate total resources consumed by a workload.
170
+ # @!attribute [rw] milli_dcu_seconds
171
+ # @return [::Integer]
172
+ # Optional. DCU (Dataproc Compute Units) usage in (`milliDCU` x `seconds`)
173
+ # (see [Dataproc Serverless pricing]
174
+ # (https://cloud.google.com/dataproc-serverless/pricing)).
175
+ # @!attribute [rw] shuffle_storage_gb_seconds
176
+ # @return [::Integer]
177
+ # Optional. Shuffle storage usage in (`GB` x `seconds`) (see
178
+ # [Dataproc Serverless pricing]
179
+ # (https://cloud.google.com/dataproc-serverless/pricing)).
180
+ class UsageMetrics
181
+ include ::Google::Protobuf::MessageExts
182
+ extend ::Google::Protobuf::MessageExts::ClassMethods
183
+ end
184
+
185
+ # The usage snaphot represents the resources consumed by a workload at a
186
+ # specified time.
187
+ # @!attribute [rw] milli_dcu
188
+ # @return [::Integer]
189
+ # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) (see
190
+ # [Dataproc Serverless pricing]
191
+ # (https://cloud.google.com/dataproc-serverless/pricing)).
192
+ # @!attribute [rw] shuffle_storage_gb
193
+ # @return [::Integer]
194
+ # Optional. Shuffle Storage in gigabytes (GB). (see [Dataproc Serverless
195
+ # pricing] (https://cloud.google.com/dataproc-serverless/pricing))
196
+ # @!attribute [rw] snapshot_time
197
+ # @return [::Google::Protobuf::Timestamp]
198
+ # Optional. The timestamp of the usage snapshot.
199
+ class UsageSnapshot
200
+ include ::Google::Protobuf::MessageExts
201
+ extend ::Google::Protobuf::MessageExts::ClassMethods
202
+ end
203
+
138
204
  # The cluster's GKE config.
139
205
  # @!attribute [rw] gke_cluster_target
140
206
  # @return [::String]
141
- # Optional. A target GKE cluster to deploy to. It must be in the same project and
142
- # region as the Dataproc cluster (the GKE cluster can be zonal or regional).
143
- # Format: 'projects/\\{project}/locations/\\{location}/clusters/\\{cluster_id}'
207
+ # Optional. A target GKE cluster to deploy to. It must be in the same project
208
+ # and region as the Dataproc cluster (the GKE cluster can be zonal or
209
+ # regional). Format:
210
+ # 'projects/\\{project}/locations/\\{location}/clusters/\\{cluster_id}'
144
211
  # @!attribute [rw] node_pool_target
145
212
  # @return [::Array<::Google::Cloud::Dataproc::V1::GkeNodePoolTarget>]
146
- # Optional. GKE NodePools where workloads will be scheduled. At least one node pool
147
- # must be assigned the 'default' role. Each role can be given to only a
148
- # single NodePoolTarget. All NodePools must have the same location settings.
149
- # If a nodePoolTarget is not specified, Dataproc constructs a default
150
- # nodePoolTarget.
213
+ # Optional. GKE node pools where workloads will be scheduled. At least one
214
+ # node pool must be assigned the `DEFAULT`
215
+ # {::Google::Cloud::Dataproc::V1::GkeNodePoolTarget::Role GkeNodePoolTarget.Role}.
216
+ # If a `GkeNodePoolTarget` is not specified, Dataproc constructs a `DEFAULT`
217
+ # `GkeNodePoolTarget`. Each role can be given to only one
218
+ # `GkeNodePoolTarget`. All node pools must have the same location settings.
151
219
  class GkeClusterConfig
152
220
  include ::Google::Protobuf::MessageExts
153
221
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -156,16 +224,17 @@ module Google
156
224
  # The configuration for running the Dataproc cluster on Kubernetes.
157
225
  # @!attribute [rw] kubernetes_namespace
158
226
  # @return [::String]
159
- # Optional. A namespace within the Kubernetes cluster to deploy into. If this namespace
160
- # does not exist, it is created. If it exists, Dataproc
161
- # verifies that another Dataproc VirtualCluster is not installed
162
- # into it. If not specified, the name of the Dataproc Cluster is used.
227
+ # Optional. A namespace within the Kubernetes cluster to deploy into. If this
228
+ # namespace does not exist, it is created. If it exists, Dataproc verifies
229
+ # that another Dataproc VirtualCluster is not installed into it. If not
230
+ # specified, the name of the Dataproc Cluster is used.
163
231
  # @!attribute [rw] gke_cluster_config
164
232
  # @return [::Google::Cloud::Dataproc::V1::GkeClusterConfig]
165
233
  # Required. The configuration for running the Dataproc cluster on GKE.
166
234
  # @!attribute [rw] kubernetes_software_config
167
235
  # @return [::Google::Cloud::Dataproc::V1::KubernetesSoftwareConfig]
168
- # Optional. The software configuration for this Dataproc cluster running on Kubernetes.
236
+ # Optional. The software configuration for this Dataproc cluster running on
237
+ # Kubernetes.
169
238
  class KubernetesClusterConfig
170
239
  include ::Google::Protobuf::MessageExts
171
240
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -213,55 +282,62 @@ module Google
213
282
  end
214
283
  end
215
284
 
216
- # GKE NodePools that Dataproc workloads run on.
285
+ # GKE node pools that Dataproc workloads run on.
217
286
  # @!attribute [rw] node_pool
218
287
  # @return [::String]
219
- # Required. The target GKE NodePool.
288
+ # Required. The target GKE node pool.
220
289
  # Format:
221
290
  # 'projects/\\{project}/locations/\\{location}/clusters/\\{cluster}/nodePools/\\{node_pool}'
222
291
  # @!attribute [rw] roles
223
292
  # @return [::Array<::Google::Cloud::Dataproc::V1::GkeNodePoolTarget::Role>]
224
- # Required. The types of role for a GKE NodePool
293
+ # Required. The roles associated with the GKE node pool.
225
294
  # @!attribute [rw] node_pool_config
226
295
  # @return [::Google::Cloud::Dataproc::V1::GkeNodePoolConfig]
227
- # Optional. The configuration for the GKE NodePool.
296
+ # Input only. The configuration for the GKE node pool.
228
297
  #
229
- # If specified, Dataproc attempts to create a NodePool with the
298
+ # If specified, Dataproc attempts to create a node pool with the
230
299
  # specified shape. If one with the same name already exists, it is
231
300
  # verified against all specified fields. If a field differs, the
232
301
  # virtual cluster creation will fail.
233
302
  #
234
- # If omitted, any NodePool with the specified name is used. If a
235
- # NodePool with the specified name does not exist, Dataproc create a NodePool
236
- # with default values.
303
+ # If omitted, any node pool with the specified name is used. If a
304
+ # node pool with the specified name does not exist, Dataproc create a
305
+ # node pool with default values.
306
+ #
307
+ # This is an input only field. It will not be returned by the API.
237
308
  class GkeNodePoolTarget
238
309
  include ::Google::Protobuf::MessageExts
239
310
  extend ::Google::Protobuf::MessageExts::ClassMethods
240
311
 
241
- # `Role` specifies whose tasks will run on the NodePool. The roles can be
242
- # specific to workloads. Exactly one GkeNodePoolTarget within the
243
- # VirtualCluster must have 'default' role, which is used to run all workloads
244
- # that are not associated with a NodePool.
312
+ # `Role` specifies the tasks that will run on the node pool. Roles can be
313
+ # specific to workloads. Exactly one
314
+ # {::Google::Cloud::Dataproc::V1::GkeNodePoolTarget GkeNodePoolTarget} within the
315
+ # virtual cluster must have the `DEFAULT` role, which is used to run all
316
+ # workloads that are not associated with a node pool.
245
317
  module Role
246
318
  # Role is unspecified.
247
319
  ROLE_UNSPECIFIED = 0
248
320
 
249
- # Any roles that are not directly assigned to a NodePool run on the
250
- # `default` role's NodePool.
321
+ # At least one node pool must have the `DEFAULT` role.
322
+ # Work assigned to a role that is not associated with a node pool
323
+ # is assigned to the node pool with the `DEFAULT` role. For example,
324
+ # work assigned to the `CONTROLLER` role will be assigned to the node pool
325
+ # with the `DEFAULT` role if no node pool has the `CONTROLLER` role.
251
326
  DEFAULT = 1
252
327
 
253
- # Run controllers and webhooks.
328
+ # Run work associated with the Dataproc control plane (for example,
329
+ # controllers and webhooks). Very low resource requirements.
254
330
  CONTROLLER = 2
255
331
 
256
- # Run spark driver.
332
+ # Run work associated with a Spark driver of a job.
257
333
  SPARK_DRIVER = 3
258
334
 
259
- # Run spark executors.
335
+ # Run work associated with a Spark executor of a job.
260
336
  SPARK_EXECUTOR = 4
261
337
  end
262
338
  end
263
339
 
264
- # The configuration of a GKE NodePool used by a [Dataproc-on-GKE
340
+ # The configuration of a GKE node pool used by a [Dataproc-on-GKE
265
341
  # cluster](https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster).
266
342
  # @!attribute [rw] config
267
343
  # @return [::Google::Cloud::Dataproc::V1::GkeNodePoolConfig::GkeNodeConfig]
@@ -270,16 +346,19 @@ module Google
270
346
  # @return [::Array<::String>]
271
347
  # Optional. The list of Compute Engine
272
348
  # [zones](https://cloud.google.com/compute/docs/zones#available) where
273
- # NodePool's nodes will be located.
349
+ # node pool nodes associated with a Dataproc on GKE virtual cluster
350
+ # will be located.
274
351
  #
275
- # **Note:** Currently, only one zone may be specified.
352
+ # **Note:** All node pools associated with a virtual cluster
353
+ # must be located in the same region as the virtual cluster, and they must
354
+ # be located in the same zone within that region.
276
355
  #
277
- # If a location is not specified during NodePool creation, Dataproc will
278
- # choose a location.
356
+ # If a location is not specified during node pool creation, Dataproc on GKE
357
+ # will choose the zone.
279
358
  # @!attribute [rw] autoscaling
280
359
  # @return [::Google::Cloud::Dataproc::V1::GkeNodePoolConfig::GkeNodePoolAutoscalingConfig]
281
- # Optional. The autoscaler configuration for this NodePool. The autoscaler is enabled
282
- # only when a valid configuration is present.
360
+ # Optional. The autoscaler configuration for this node pool. The autoscaler
361
+ # is enabled only when a valid configuration is present.
283
362
  class GkeNodePoolConfig
284
363
  include ::Google::Protobuf::MessageExts
285
364
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -289,15 +368,23 @@ module Google
289
368
  # @return [::String]
290
369
  # Optional. The name of a Compute Engine [machine
291
370
  # type](https://cloud.google.com/compute/docs/machine-types).
292
- # @!attribute [rw] preemptible
293
- # @return [::Boolean]
294
- # Optional. Whether the nodes are created as [preemptible VM
295
- # instances](https://cloud.google.com/compute/docs/instances/preemptible).
296
371
  # @!attribute [rw] local_ssd_count
297
372
  # @return [::Integer]
298
- # Optional. The number of local SSD disks to attach to the node, which is limited by
299
- # the maximum number of disks allowable per zone (see [Adding Local
300
- # SSDs](https://cloud.google.com/compute/docs/disks/local-ssd)).
373
+ # Optional. The number of local SSD disks to attach to the node, which is
374
+ # limited by the maximum number of disks allowable per zone (see [Adding
375
+ # Local SSDs](https://cloud.google.com/compute/docs/disks/local-ssd)).
376
+ # @!attribute [rw] preemptible
377
+ # @return [::Boolean]
378
+ # Optional. Whether the nodes are created as legacy [preemptible VM
379
+ # instances] (https://cloud.google.com/compute/docs/instances/preemptible).
380
+ # Also see
381
+ # {::Google::Cloud::Dataproc::V1::GkeNodePoolConfig::GkeNodeConfig#spot Spot}
382
+ # VMs, preemptible VM instances without a maximum lifetime. Legacy and Spot
383
+ # preemptible nodes cannot be used in a node pool with the `CONTROLLER`
384
+ # [role]
385
+ # (/dataproc/docs/reference/rest/v1/projects.regions.clusters#role)
386
+ # or in the DEFAULT node pool if the CONTROLLER role is not assigned (the
387
+ # DEFAULT node pool will assume the CONTROLLER role).
301
388
  # @!attribute [rw] accelerators
302
389
  # @return [::Array<::Google::Cloud::Dataproc::V1::GkeNodePoolConfig::GkeNodePoolAcceleratorConfig>]
303
390
  # Optional. A list of [hardware
@@ -310,19 +397,43 @@ module Google
310
397
  # to be used by this instance. The instance may be scheduled on the
311
398
  # specified or a newer CPU platform. Specify the friendly names of CPU
312
399
  # platforms, such as "Intel Haswell"` or Intel Sandy Bridge".
400
+ # @!attribute [rw] boot_disk_kms_key
401
+ # @return [::String]
402
+ # Optional. The [Customer Managed Encryption Key (CMEK)]
403
+ # (https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek)
404
+ # used to encrypt the boot disk attached to each node in the node pool.
405
+ # Specify the key using the following format:
406
+ # <code>projects/<var>KEY_PROJECT_ID</var>/locations/<var>LOCATION</var>/keyRings/<var>RING_NAME</var>/cryptoKeys/<var>KEY_NAME</var></code>.
407
+ # @!attribute [rw] spot
408
+ # @return [::Boolean]
409
+ # Optional. Whether the nodes are created as [Spot VM instances]
410
+ # (https://cloud.google.com/compute/docs/instances/spot).
411
+ # Spot VMs are the latest update to legacy
412
+ # [preemptible
413
+ # VMs][google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig.preemptible].
414
+ # Spot VMs do not have a maximum lifetime. Legacy and Spot preemptible
415
+ # nodes cannot be used in a node pool with the `CONTROLLER`
416
+ # [role](/dataproc/docs/reference/rest/v1/projects.regions.clusters#role)
417
+ # or in the DEFAULT node pool if the CONTROLLER role is not assigned (the
418
+ # DEFAULT node pool will assume the CONTROLLER role).
313
419
  class GkeNodeConfig
314
420
  include ::Google::Protobuf::MessageExts
315
421
  extend ::Google::Protobuf::MessageExts::ClassMethods
316
422
  end
317
423
 
318
424
  # A GkeNodeConfigAcceleratorConfig represents a Hardware Accelerator request
319
- # for a NodePool.
425
+ # for a node pool.
320
426
  # @!attribute [rw] accelerator_count
321
427
  # @return [::Integer]
322
428
  # The number of accelerator cards exposed to an instance.
323
429
  # @!attribute [rw] accelerator_type
324
430
  # @return [::String]
325
431
  # The accelerator type resource namename (see GPUs on Compute Engine).
432
+ # @!attribute [rw] gpu_partition_size
433
+ # @return [::String]
434
+ # Size of partitions to create on the GPU. Valid values are described in
435
+ # the NVIDIA [mig user
436
+ # guide](https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning).
326
437
  class GkeNodePoolAcceleratorConfig
327
438
  include ::Google::Protobuf::MessageExts
328
439
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -332,11 +443,12 @@ module Google
332
443
  # adjust the size of the node pool to the current cluster usage.
333
444
  # @!attribute [rw] min_node_count
334
445
  # @return [::Integer]
335
- # The minimum number of nodes in the NodePool. Must be >= 0 and <=
446
+ # The minimum number of nodes in the node pool. Must be >= 0 and <=
336
447
  # max_node_count.
337
448
  # @!attribute [rw] max_node_count
338
449
  # @return [::Integer]
339
- # The maximum number of nodes in the NodePool. Must be >= min_node_count.
450
+ # The maximum number of nodes in the node pool. Must be >= min_node_count,
451
+ # and must be > 0.
340
452
  # **Note:** Quota must be sufficient to scale up the cluster.
341
453
  class GkeNodePoolAutoscalingConfig
342
454
  include ::Google::Protobuf::MessageExts
@@ -371,12 +483,18 @@ module Google
371
483
  # The Hive Web HCatalog (the REST service for accessing HCatalog).
372
484
  HIVE_WEBHCAT = 3
373
485
 
486
+ # Hudi.
487
+ HUDI = 18
488
+
374
489
  # The Jupyter Notebook.
375
490
  JUPYTER = 1
376
491
 
377
492
  # The Presto query engine.
378
493
  PRESTO = 6
379
494
 
495
+ # The Trino query engine.
496
+ TRINO = 17
497
+
380
498
  # The Ranger service.
381
499
  RANGER = 12
382
500
 
@@ -193,8 +193,8 @@ module Google
193
193
  #
194
194
  # The step id is used as prefix for job id, as job
195
195
  # `goog-dataproc-workflow-step-id` label, and in
196
- # {::Google::Cloud::Dataproc::V1::OrderedJob#prerequisite_step_ids prerequisiteStepIds} field from other
197
- # steps.
196
+ # {::Google::Cloud::Dataproc::V1::OrderedJob#prerequisite_step_ids prerequisiteStepIds}
197
+ # field from other steps.
198
198
  #
199
199
  # The id must contain only letters (a-z, A-Z), numbers (0-9),
200
200
  # underscores (_), and hyphens (-). Cannot begin or end with underscore
@@ -280,10 +280,10 @@ module Google
280
280
  # A field is allowed to appear in at most one parameter's list of field
281
281
  # paths.
282
282
  #
283
- # A field path is similar in syntax to a {::Google::Protobuf::FieldMask google.protobuf.FieldMask}.
284
- # For example, a field path that references the zone field of a workflow
285
- # template's cluster selector would be specified as
286
- # `placement.clusterSelector.zone`.
283
+ # A field path is similar in syntax to a
284
+ # {::Google::Protobuf::FieldMask google.protobuf.FieldMask}. For example, a
285
+ # field path that references the zone field of a workflow template's cluster
286
+ # selector would be specified as `placement.clusterSelector.zone`.
287
287
  #
288
288
  # Also, field paths can reference fields using the following syntax:
289
289
  #
@@ -410,16 +410,19 @@ module Google
410
410
  # Output only. The UUID of target cluster.
411
411
  # @!attribute [r] dag_timeout
412
412
  # @return [::Google::Protobuf::Duration]
413
- # Output only. The timeout duration for the DAG of jobs, expressed in seconds (see
414
- # [JSON representation of
413
+ # Output only. The timeout duration for the DAG of jobs, expressed in seconds
414
+ # (see [JSON representation of
415
415
  # duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
416
416
  # @!attribute [r] dag_start_time
417
417
  # @return [::Google::Protobuf::Timestamp]
418
- # Output only. DAG start time, only set for workflows with {::Google::Cloud::Dataproc::V1::WorkflowMetadata#dag_timeout dag_timeout} when DAG
419
- # begins.
418
+ # Output only. DAG start time, only set for workflows with
419
+ # {::Google::Cloud::Dataproc::V1::WorkflowMetadata#dag_timeout dag_timeout} when
420
+ # DAG begins.
420
421
  # @!attribute [r] dag_end_time
421
422
  # @return [::Google::Protobuf::Timestamp]
422
- # Output only. DAG end time, only set for workflows with {::Google::Cloud::Dataproc::V1::WorkflowMetadata#dag_timeout dag_timeout} when DAG ends.
423
+ # Output only. DAG end time, only set for workflows with
424
+ # {::Google::Cloud::Dataproc::V1::WorkflowMetadata#dag_timeout dag_timeout} when
425
+ # DAG ends.
423
426
  class WorkflowMetadata
424
427
  include ::Google::Protobuf::MessageExts
425
428
  extend ::Google::Protobuf::MessageExts::ClassMethods
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: google-cloud-dataproc-v1
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.14.0
4
+ version: 0.16.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Google LLC
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2022-12-14 00:00:00.000000000 Z
11
+ date: 2023-02-23 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: gapic-common
@@ -16,7 +16,7 @@ dependencies:
16
16
  requirements:
17
17
  - - ">="
18
18
  - !ruby/object:Gem::Version
19
- version: '0.12'
19
+ version: 0.17.1
20
20
  - - "<"
21
21
  - !ruby/object:Gem::Version
22
22
  version: 2.a
@@ -26,7 +26,7 @@ dependencies:
26
26
  requirements:
27
27
  - - ">="
28
28
  - !ruby/object:Gem::Version
29
- version: '0.12'
29
+ version: 0.17.1
30
30
  - - "<"
31
31
  - !ruby/object:Gem::Version
32
32
  version: 2.a
@@ -44,20 +44,40 @@ dependencies:
44
44
  - - "~>"
45
45
  - !ruby/object:Gem::Version
46
46
  version: '1.0'
47
+ - !ruby/object:Gem::Dependency
48
+ name: google-iam-v1
49
+ requirement: !ruby/object:Gem::Requirement
50
+ requirements:
51
+ - - ">="
52
+ - !ruby/object:Gem::Version
53
+ version: '0.4'
54
+ - - "<"
55
+ - !ruby/object:Gem::Version
56
+ version: 2.a
57
+ type: :runtime
58
+ prerelease: false
59
+ version_requirements: !ruby/object:Gem::Requirement
60
+ requirements:
61
+ - - ">="
62
+ - !ruby/object:Gem::Version
63
+ version: '0.4'
64
+ - - "<"
65
+ - !ruby/object:Gem::Version
66
+ version: 2.a
47
67
  - !ruby/object:Gem::Dependency
48
68
  name: google-style
49
69
  requirement: !ruby/object:Gem::Requirement
50
70
  requirements:
51
71
  - - "~>"
52
72
  - !ruby/object:Gem::Version
53
- version: 1.26.1
73
+ version: 1.26.3
54
74
  type: :development
55
75
  prerelease: false
56
76
  version_requirements: !ruby/object:Gem::Requirement
57
77
  requirements:
58
78
  - - "~>"
59
79
  - !ruby/object:Gem::Version
60
- version: 1.26.1
80
+ version: 1.26.3
61
81
  - !ruby/object:Gem::Dependency
62
82
  name: minitest
63
83
  requirement: !ruby/object:Gem::Requirement
@@ -253,7 +273,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
253
273
  - !ruby/object:Gem::Version
254
274
  version: '0'
255
275
  requirements: []
256
- rubygems_version: 3.3.14
276
+ rubygems_version: 3.4.2
257
277
  signing_key:
258
278
  specification_version: 4
259
279
  summary: API Client library for the Cloud Dataproc V1 API