google-cloud-dataproc-v1 1.0.2 → 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +31 -21
  3. data/lib/google/cloud/dataproc/v1/autoscaling_policy_service/client.rb +36 -6
  4. data/lib/google/cloud/dataproc/v1/autoscaling_policy_service/rest/client.rb +36 -6
  5. data/lib/google/cloud/dataproc/v1/autoscaling_policy_service/rest/service_stub.rb +54 -32
  6. data/lib/google/cloud/dataproc/v1/batch_controller/client.rb +37 -5
  7. data/lib/google/cloud/dataproc/v1/batch_controller/operations.rb +19 -15
  8. data/lib/google/cloud/dataproc/v1/batch_controller/rest/client.rb +37 -5
  9. data/lib/google/cloud/dataproc/v1/batch_controller/rest/operations.rb +50 -38
  10. data/lib/google/cloud/dataproc/v1/batch_controller/rest/service_stub.rb +46 -26
  11. data/lib/google/cloud/dataproc/v1/batches_pb.rb +1 -1
  12. data/lib/google/cloud/dataproc/v1/cluster_controller/client.rb +53 -17
  13. data/lib/google/cloud/dataproc/v1/cluster_controller/operations.rb +19 -15
  14. data/lib/google/cloud/dataproc/v1/cluster_controller/paths.rb +21 -0
  15. data/lib/google/cloud/dataproc/v1/cluster_controller/rest/client.rb +53 -17
  16. data/lib/google/cloud/dataproc/v1/cluster_controller/rest/operations.rb +50 -38
  17. data/lib/google/cloud/dataproc/v1/cluster_controller/rest/service_stub.rb +78 -50
  18. data/lib/google/cloud/dataproc/v1/clusters_pb.rb +3 -1
  19. data/lib/google/cloud/dataproc/v1/job_controller/client.rb +37 -8
  20. data/lib/google/cloud/dataproc/v1/job_controller/operations.rb +19 -15
  21. data/lib/google/cloud/dataproc/v1/job_controller/rest/client.rb +37 -8
  22. data/lib/google/cloud/dataproc/v1/job_controller/rest/operations.rb +50 -38
  23. data/lib/google/cloud/dataproc/v1/job_controller/rest/service_stub.rb +70 -44
  24. data/lib/google/cloud/dataproc/v1/jobs_pb.rb +2 -1
  25. data/lib/google/cloud/dataproc/v1/node_group_controller/client.rb +37 -4
  26. data/lib/google/cloud/dataproc/v1/node_group_controller/operations.rb +19 -15
  27. data/lib/google/cloud/dataproc/v1/node_group_controller/rest/client.rb +37 -4
  28. data/lib/google/cloud/dataproc/v1/node_group_controller/rest/operations.rb +50 -38
  29. data/lib/google/cloud/dataproc/v1/node_group_controller/rest/service_stub.rb +38 -20
  30. data/lib/google/cloud/dataproc/v1/session_controller/client.rb +43 -9
  31. data/lib/google/cloud/dataproc/v1/session_controller/operations.rb +19 -15
  32. data/lib/google/cloud/dataproc/v1/session_controller/rest/client.rb +43 -9
  33. data/lib/google/cloud/dataproc/v1/session_controller/rest/operations.rb +50 -38
  34. data/lib/google/cloud/dataproc/v1/session_controller/rest/service_stub.rb +54 -32
  35. data/lib/google/cloud/dataproc/v1/session_template_controller/client.rb +36 -6
  36. data/lib/google/cloud/dataproc/v1/session_template_controller/rest/client.rb +36 -6
  37. data/lib/google/cloud/dataproc/v1/session_template_controller/rest/service_stub.rb +54 -32
  38. data/lib/google/cloud/dataproc/v1/session_templates_pb.rb +1 -1
  39. data/lib/google/cloud/dataproc/v1/sessions_pb.rb +2 -1
  40. data/lib/google/cloud/dataproc/v1/shared_pb.rb +1 -1
  41. data/lib/google/cloud/dataproc/v1/version.rb +1 -1
  42. data/lib/google/cloud/dataproc/v1/workflow_template_service/client.rb +38 -8
  43. data/lib/google/cloud/dataproc/v1/workflow_template_service/operations.rb +19 -15
  44. data/lib/google/cloud/dataproc/v1/workflow_template_service/paths.rb +21 -0
  45. data/lib/google/cloud/dataproc/v1/workflow_template_service/rest/client.rb +38 -8
  46. data/lib/google/cloud/dataproc/v1/workflow_template_service/rest/operations.rb +50 -38
  47. data/lib/google/cloud/dataproc/v1/workflow_template_service/rest/service_stub.rb +70 -44
  48. data/lib/google/cloud/dataproc/v1/workflow_templates_pb.rb +2 -1
  49. data/proto_docs/google/api/client.rb +47 -0
  50. data/proto_docs/google/cloud/dataproc/v1/batches.rb +17 -0
  51. data/proto_docs/google/cloud/dataproc/v1/clusters.rb +129 -23
  52. data/proto_docs/google/cloud/dataproc/v1/jobs.rb +130 -23
  53. data/proto_docs/google/cloud/dataproc/v1/session_templates.rb +7 -0
  54. data/proto_docs/google/cloud/dataproc/v1/sessions.rb +17 -3
  55. data/proto_docs/google/cloud/dataproc/v1/shared.rb +9 -4
  56. data/proto_docs/google/cloud/dataproc/v1/workflow_templates.rb +79 -0
  57. data/proto_docs/google/longrunning/operations.rb +23 -14
  58. metadata +6 -9
@@ -28,6 +28,9 @@ module Google
28
28
  # @!attribute [rw] destinations
29
29
  # @return [::Array<::Google::Api::ClientLibraryDestination>]
30
30
  # The destination where API teams want this client library to be published.
31
+ # @!attribute [rw] selective_gapic_generation
32
+ # @return [::Google::Api::SelectiveGapicGeneration]
33
+ # Configuration for which RPCs should be generated in the GAPIC client.
31
34
  class CommonLanguageSettings
32
35
  include ::Google::Protobuf::MessageExts
33
36
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -212,6 +215,12 @@ module Google
212
215
  # enabled. By default, asynchronous REST clients will not be generated.
213
216
  # This feature will be enabled by default 1 month after launching the
214
217
  # feature in preview packages.
218
+ # @!attribute [rw] protobuf_pythonic_types_enabled
219
+ # @return [::Boolean]
220
+ # Enables generation of protobuf code using new types that are more
221
+ # Pythonic which are included in `protobuf>=5.29.x`. This feature will be
222
+ # enabled by default 1 month after launching the feature in preview
223
+ # packages.
215
224
  class ExperimentalFeatures
216
225
  include ::Google::Protobuf::MessageExts
217
226
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -297,9 +306,28 @@ module Google
297
306
  # @!attribute [rw] common
298
307
  # @return [::Google::Api::CommonLanguageSettings]
299
308
  # Some settings.
309
+ # @!attribute [rw] renamed_services
310
+ # @return [::Google::Protobuf::Map{::String => ::String}]
311
+ # Map of service names to renamed services. Keys are the package relative
312
+ # service names and values are the name to be used for the service client
313
+ # and call options.
314
+ #
315
+ # publishing:
316
+ # go_settings:
317
+ # renamed_services:
318
+ # Publisher: TopicAdmin
300
319
  class GoSettings
301
320
  include ::Google::Protobuf::MessageExts
302
321
  extend ::Google::Protobuf::MessageExts::ClassMethods
322
+
323
+ # @!attribute [rw] key
324
+ # @return [::String]
325
+ # @!attribute [rw] value
326
+ # @return [::String]
327
+ class RenamedServicesEntry
328
+ include ::Google::Protobuf::MessageExts
329
+ extend ::Google::Protobuf::MessageExts::ClassMethods
330
+ end
303
331
  end
304
332
 
305
333
  # Describes the generator configuration for a method.
@@ -375,6 +403,25 @@ module Google
375
403
  end
376
404
  end
377
405
 
406
+ # This message is used to configure the generation of a subset of the RPCs in
407
+ # a service for client libraries.
408
+ # @!attribute [rw] methods
409
+ # @return [::Array<::String>]
410
+ # An allowlist of the fully qualified names of RPCs that should be included
411
+ # on public client surfaces.
412
+ # @!attribute [rw] generate_omitted_as_internal
413
+ # @return [::Boolean]
414
+ # Setting this to true indicates to the client generators that methods
415
+ # that would be excluded from the generation should instead be generated
416
+ # in a way that indicates these methods should not be consumed by
417
+ # end users. How this is expressed is up to individual language
418
+ # implementations to decide. Some examples may be: added annotations,
419
+ # obfuscated identifiers, or other language idiomatic patterns.
420
+ class SelectiveGapicGeneration
421
+ include ::Google::Protobuf::MessageExts
422
+ extend ::Google::Protobuf::MessageExts::ClassMethods
423
+ end
424
+
378
425
  # The organization for which the client libraries are being published.
379
426
  # Affects the url where generated docs are published, etc.
380
427
  module ClientLibraryOrganization
@@ -112,6 +112,11 @@ module Google
112
112
  # @return [::String]
113
113
  # A token, which can be sent as `page_token` to retrieve the next page.
114
114
  # If this field is omitted, there are no subsequent pages.
115
+ # @!attribute [r] unreachable
116
+ # @return [::Array<::String>]
117
+ # Output only. List of Batches that could not be included in the response.
118
+ # Attempting to get one of these resources may indicate why it was not
119
+ # included in the list response.
115
120
  class ListBatchesResponse
116
121
  include ::Google::Protobuf::MessageExts
117
122
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -142,15 +147,23 @@ module Google
142
147
  # @!attribute [rw] pyspark_batch
143
148
  # @return [::Google::Cloud::Dataproc::V1::PySparkBatch]
144
149
  # Optional. PySpark batch config.
150
+ #
151
+ # Note: The following fields are mutually exclusive: `pyspark_batch`, `spark_batch`, `spark_r_batch`, `spark_sql_batch`. If a field in that set is populated, all other fields in the set will automatically be cleared.
145
152
  # @!attribute [rw] spark_batch
146
153
  # @return [::Google::Cloud::Dataproc::V1::SparkBatch]
147
154
  # Optional. Spark batch config.
155
+ #
156
+ # Note: The following fields are mutually exclusive: `spark_batch`, `pyspark_batch`, `spark_r_batch`, `spark_sql_batch`. If a field in that set is populated, all other fields in the set will automatically be cleared.
148
157
  # @!attribute [rw] spark_r_batch
149
158
  # @return [::Google::Cloud::Dataproc::V1::SparkRBatch]
150
159
  # Optional. SparkR batch config.
160
+ #
161
+ # Note: The following fields are mutually exclusive: `spark_r_batch`, `pyspark_batch`, `spark_batch`, `spark_sql_batch`. If a field in that set is populated, all other fields in the set will automatically be cleared.
151
162
  # @!attribute [rw] spark_sql_batch
152
163
  # @return [::Google::Cloud::Dataproc::V1::SparkSqlBatch]
153
164
  # Optional. SparkSql batch config.
165
+ #
166
+ # Note: The following fields are mutually exclusive: `spark_sql_batch`, `pyspark_batch`, `spark_batch`, `spark_r_batch`. If a field in that set is populated, all other fields in the set will automatically be cleared.
154
167
  # @!attribute [r] runtime_info
155
168
  # @return [::Google::Cloud::Dataproc::V1::RuntimeInfo]
156
169
  # Output only. Runtime information about batch execution.
@@ -281,10 +294,14 @@ module Google
281
294
  # @!attribute [rw] main_jar_file_uri
282
295
  # @return [::String]
283
296
  # Optional. The HCFS URI of the jar file that contains the main class.
297
+ #
298
+ # Note: The following fields are mutually exclusive: `main_jar_file_uri`, `main_class`. If a field in that set is populated, all other fields in the set will automatically be cleared.
284
299
  # @!attribute [rw] main_class
285
300
  # @return [::String]
286
301
  # Optional. The name of the driver main class. The jar file that contains
287
302
  # the class must be in the classpath or specified in `jar_file_uris`.
303
+ #
304
+ # Note: The following fields are mutually exclusive: `main_class`, `main_jar_file_uri`. If a field in that set is populated, all other fields in the set will automatically be cleared.
288
305
  # @!attribute [rw] args
289
306
  # @return [::Array<::String>]
290
307
  # Optional. The arguments to pass to the driver. Do not include arguments
@@ -263,8 +263,39 @@ module Google
263
263
  # Encryption settings for the cluster.
264
264
  # @!attribute [rw] gce_pd_kms_key_name
265
265
  # @return [::String]
266
- # Optional. The Cloud KMS key name to use for PD disk encryption for all
267
- # instances in the cluster.
266
+ # Optional. The Cloud KMS key resource name to use for persistent disk
267
+ # encryption for all instances in the cluster. See [Use CMEK with cluster
268
+ # data]
269
+ # (https://cloud.google.com//dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_cluster_data)
270
+ # for more information.
271
+ # @!attribute [rw] kms_key
272
+ # @return [::String]
273
+ # Optional. The Cloud KMS key resource name to use for cluster persistent
274
+ # disk and job argument encryption. See [Use CMEK with cluster data]
275
+ # (https://cloud.google.com//dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_cluster_data)
276
+ # for more information.
277
+ #
278
+ # When this key resource name is provided, the following job arguments of
279
+ # the following job types submitted to the cluster are encrypted using CMEK:
280
+ #
281
+ # * [FlinkJob
282
+ # args](https://cloud.google.com/dataproc/docs/reference/rest/v1/FlinkJob)
283
+ # * [HadoopJob
284
+ # args](https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob)
285
+ # * [SparkJob
286
+ # args](https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob)
287
+ # * [SparkRJob
288
+ # args](https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkRJob)
289
+ # * [PySparkJob
290
+ # args](https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob)
291
+ # * [SparkSqlJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob)
292
+ # scriptVariables and queryList.queries
293
+ # * [HiveJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob)
294
+ # scriptVariables and queryList.queries
295
+ # * [PigJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob)
296
+ # scriptVariables and queryList.queries
297
+ # * [PrestoJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/PrestoJob)
298
+ # scriptVariables and queryList.queries
268
299
  class EncryptionConfig
269
300
  include ::Google::Protobuf::MessageExts
270
301
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -309,12 +340,22 @@ module Google
309
340
  # * `sub0`
310
341
  # @!attribute [rw] internal_ip_only
311
342
  # @return [::Boolean]
312
- # Optional. If true, all instances in the cluster will only have internal IP
313
- # addresses. By default, clusters are not restricted to internal IP
314
- # addresses, and will have ephemeral external IP addresses assigned to each
315
- # instance. This `internal_ip_only` restriction can only be enabled for
316
- # subnetwork enabled networks, and all off-cluster dependencies must be
317
- # configured to be accessible without external IP addresses.
343
+ # Optional. This setting applies to subnetwork-enabled networks. It is set to
344
+ # `true` by default in clusters created with image versions 2.2.x.
345
+ #
346
+ # When set to `true`:
347
+ #
348
+ # * All cluster VMs have internal IP addresses.
349
+ # * [Google Private Access]
350
+ # (https://cloud.google.com/vpc/docs/private-google-access)
351
+ # must be enabled to access Dataproc and other Google Cloud APIs.
352
+ # * Off-cluster dependencies must be configured to be accessible
353
+ # without external IP addresses.
354
+ #
355
+ # When set to `false`:
356
+ #
357
+ # * Cluster VMs are not restricted to internal IP addresses.
358
+ # * Ephemeral external IP addresses are assigned to each cluster VM.
318
359
  # @!attribute [rw] private_ipv6_google_access
319
360
  # @return [::Google::Cloud::Dataproc::V1::GceClusterConfig::PrivateIpv6GoogleAccess]
320
361
  # Optional. The type of IPv6 access for a cluster.
@@ -349,8 +390,8 @@ module Google
349
390
  # * https://www.googleapis.com/auth/devstorage.full_control
350
391
  # @!attribute [rw] tags
351
392
  # @return [::Array<::String>]
352
- # The Compute Engine tags to add to all instances (see [Tagging
353
- # instances](https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).
393
+ # The Compute Engine network tags to add to all instances (see [Tagging
394
+ # instances](https://cloud.google.com/vpc/docs/add-remove-network-tags)).
354
395
  # @!attribute [rw] metadata
355
396
  # @return [::Google::Protobuf::Map{::String => ::String}]
356
397
  # Optional. The Compute Engine metadata entries to add to all instances (see
@@ -651,6 +692,10 @@ module Google
651
692
 
652
693
  # Instance flexibility Policy allowing a mixture of VM shapes and provisioning
653
694
  # models.
695
+ # @!attribute [rw] provisioning_model_mix
696
+ # @return [::Google::Cloud::Dataproc::V1::InstanceFlexibilityPolicy::ProvisioningModelMix]
697
+ # Optional. Defines how the Group selects the provisioning model to ensure
698
+ # required reliability.
654
699
  # @!attribute [rw] instance_selection_list
655
700
  # @return [::Array<::Google::Cloud::Dataproc::V1::InstanceFlexibilityPolicy::InstanceSelection>]
656
701
  # Optional. List of instance selection options that the group will use when
@@ -662,6 +707,31 @@ module Google
662
707
  include ::Google::Protobuf::MessageExts
663
708
  extend ::Google::Protobuf::MessageExts::ClassMethods
664
709
 
710
+ # Defines how Dataproc should create VMs with a mixture of provisioning
711
+ # models.
712
+ # @!attribute [rw] standard_capacity_base
713
+ # @return [::Integer]
714
+ # Optional. The base capacity that will always use Standard VMs to avoid
715
+ # risk of more preemption than the minimum capacity you need. Dataproc will
716
+ # create only standard VMs until it reaches standard_capacity_base, then it
717
+ # will start using standard_capacity_percent_above_base to mix Spot with
718
+ # Standard VMs. eg. If 15 instances are requested and
719
+ # standard_capacity_base is 5, Dataproc will create 5 standard VMs and then
720
+ # start mixing spot and standard VMs for remaining 10 instances.
721
+ # @!attribute [rw] standard_capacity_percent_above_base
722
+ # @return [::Integer]
723
+ # Optional. The percentage of target capacity that should use Standard VM.
724
+ # The remaining percentage will use Spot VMs. The percentage applies only
725
+ # to the capacity above standard_capacity_base. eg. If 15 instances are
726
+ # requested and standard_capacity_base is 5 and
727
+ # standard_capacity_percent_above_base is 30, Dataproc will create 5
728
+ # standard VMs and then start mixing spot and standard VMs for remaining 10
729
+ # instances. The mix will be 30% standard and 70% spot.
730
+ class ProvisioningModelMix
731
+ include ::Google::Protobuf::MessageExts
732
+ extend ::Google::Protobuf::MessageExts::ClassMethods
733
+ end
734
+
665
735
  # Defines machines types and a rank to which the machines types belong.
666
736
  # @!attribute [rw] machine_types
667
737
  # @return [::Array<::String>]
@@ -704,15 +774,15 @@ module Google
704
774
  #
705
775
  # Examples:
706
776
  #
707
- # * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-k80`
708
- # * `projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-k80`
709
- # * `nvidia-tesla-k80`
777
+ # * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4`
778
+ # * `projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4`
779
+ # * `nvidia-tesla-t4`
710
780
  #
711
781
  # **Auto Zone Exception**: If you are using the Dataproc
712
782
  # [Auto Zone
713
783
  # Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
714
784
  # feature, you must use the short name of the accelerator type
715
- # resource, for example, `nvidia-tesla-k80`.
785
+ # resource, for example, `nvidia-tesla-t4`.
716
786
  # @!attribute [rw] accelerator_count
717
787
  # @return [::Integer]
718
788
  # The number of the accelerator cards of this type exposed to this instance.
@@ -750,6 +820,17 @@ module Google
750
820
  # "nvme" (Non-Volatile Memory Express).
751
821
  # See [local SSD
752
822
  # performance](https://cloud.google.com/compute/docs/disks/local-ssd#performance).
823
+ # @!attribute [rw] boot_disk_provisioned_iops
824
+ # @return [::Integer]
825
+ # Optional. Indicates how many IOPS to provision for the disk. This sets the
826
+ # number of I/O operations per second that the disk can handle. Note: This
827
+ # field is only supported if boot_disk_type is hyperdisk-balanced.
828
+ # @!attribute [rw] boot_disk_provisioned_throughput
829
+ # @return [::Integer]
830
+ # Optional. Indicates how much throughput to provision for the disk. This
831
+ # sets the number of throughput mb per second that the disk can handle.
832
+ # Values must be greater than or equal to 1. Note: This field is only
833
+ # supported if boot_disk_type is hyperdisk-balanced.
753
834
  class DiskConfig
754
835
  include ::Google::Protobuf::MessageExts
755
836
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -940,7 +1021,7 @@ module Google
940
1021
  # principal password.
941
1022
  # @!attribute [rw] kms_key_uri
942
1023
  # @return [::String]
943
- # Optional. The uri of the KMS key used to encrypt various sensitive
1024
+ # Optional. The URI of the KMS key used to encrypt sensitive
944
1025
  # files.
945
1026
  # @!attribute [rw] keystore_uri
946
1027
  # @return [::String]
@@ -1026,7 +1107,7 @@ module Google
1026
1107
  # @return [::String]
1027
1108
  # Optional. The version of software inside the cluster. It must be one of the
1028
1109
  # supported [Dataproc
1029
- # Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions),
1110
+ # Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported-dataproc-image-versions),
1030
1111
  # such as "1.2" (including a subminor version, such as "1.2.29"), or the
1031
1112
  # ["preview"
1032
1113
  # version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
@@ -1081,12 +1162,16 @@ module Google
1081
1162
  # Optional. The time when cluster will be auto-deleted (see JSON
1082
1163
  # representation of
1083
1164
  # [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
1165
+ #
1166
+ # Note: The following fields are mutually exclusive: `auto_delete_time`, `auto_delete_ttl`. If a field in that set is populated, all other fields in the set will automatically be cleared.
1084
1167
  # @!attribute [rw] auto_delete_ttl
1085
1168
  # @return [::Google::Protobuf::Duration]
1086
1169
  # Optional. The lifetime duration of cluster. The cluster will be
1087
1170
  # auto-deleted at the end of this period. Minimum value is 10 minutes;
1088
1171
  # maximum value is 14 days (see JSON representation of
1089
1172
  # [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
1173
+ #
1174
+ # Note: The following fields are mutually exclusive: `auto_delete_ttl`, `auto_delete_time`. If a field in that set is populated, all other fields in the set will automatically be cleared.
1090
1175
  # @!attribute [r] idle_start_time
1091
1176
  # @return [::Google::Protobuf::Timestamp]
1092
1177
  # Output only. The time when cluster became idle (most recent job finished)
@@ -1225,6 +1310,9 @@ module Google
1225
1310
 
1226
1311
  # hivemetastore metric source
1227
1312
  HIVEMETASTORE = 7
1313
+
1314
+ # flink metric source
1315
+ FLINK = 8
1228
1316
  end
1229
1317
  end
1230
1318
 
@@ -1494,12 +1582,12 @@ module Google
1494
1582
  # where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`,
1495
1583
  # and `[KEY]` is a label key. **value** can be `*` to match all values.
1496
1584
  # `status.state` can be one of the following: `ACTIVE`, `INACTIVE`,
1497
- # `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE`
1498
- # contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE`
1499
- # contains the `DELETING` and `ERROR` states.
1500
- # `clusterName` is the name of the cluster provided at creation time.
1501
- # Only the logical `AND` operator is supported; space-separated items are
1502
- # treated as having an implicit `AND` operator.
1585
+ # `CREATING`, `RUNNING`, `ERROR`, `DELETING`, `UPDATING`, `STOPPING`, or
1586
+ # `STOPPED`. `ACTIVE` contains the `CREATING`, `UPDATING`, and `RUNNING`
1587
+ # states. `INACTIVE` contains the `DELETING`, `ERROR`, `STOPPING`, and
1588
+ # `STOPPED` states. `clusterName` is the name of the cluster provided at
1589
+ # creation time. Only the logical `AND` operator is supported;
1590
+ # space-separated items are treated as having an implicit `AND` operator.
1503
1591
  #
1504
1592
  # Example filter:
1505
1593
  #
@@ -1543,9 +1631,13 @@ module Google
1543
1631
  # Required. The cluster name.
1544
1632
  # @!attribute [rw] tarball_gcs_dir
1545
1633
  # @return [::String]
1546
- # Optional. The output Cloud Storage directory for the diagnostic
1634
+ # Optional. (Optional) The output Cloud Storage directory for the diagnostic
1547
1635
  # tarball. If not specified, a task-specific directory in the cluster's
1548
1636
  # staging bucket will be used.
1637
+ # @!attribute [rw] tarball_access
1638
+ # @return [::Google::Cloud::Dataproc::V1::DiagnoseClusterRequest::TarballAccess]
1639
+ # Optional. (Optional) The access type to the diagnostic tarball. If not
1640
+ # specified, falls back to default access of the bucket
1549
1641
  # @!attribute [rw] diagnosis_interval
1550
1642
  # @return [::Google::Type::Interval]
1551
1643
  # Optional. Time interval in which diagnosis should be carried out on the
@@ -1561,6 +1653,20 @@ module Google
1561
1653
  class DiagnoseClusterRequest
1562
1654
  include ::Google::Protobuf::MessageExts
1563
1655
  extend ::Google::Protobuf::MessageExts::ClassMethods
1656
+
1657
+ # Defines who has access to the diagnostic tarball
1658
+ module TarballAccess
1659
+ # Tarball Access unspecified. Falls back to default access of the bucket
1660
+ TARBALL_ACCESS_UNSPECIFIED = 0
1661
+
1662
+ # Google Cloud Support group has read access to the
1663
+ # diagnostic tarball
1664
+ GOOGLE_CLOUD_SUPPORT = 1
1665
+
1666
+ # Google Cloud Dataproc Diagnose service account has read access to the
1667
+ # diagnostic tarball
1668
+ GOOGLE_DATAPROC_DIAGNOSE = 2
1669
+ end
1564
1670
  end
1565
1671
 
1566
1672
  # The location of diagnostic output.