google-cloud-dataproc-v1 0.6.0 → 0.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. checksums.yaml +4 -4
  2. data/AUTHENTICATION.md +8 -8
  3. data/README.md +1 -1
  4. data/lib/google/cloud/dataproc/v1/autoscaling_policies_pb.rb +3 -2
  5. data/lib/google/cloud/dataproc/v1/autoscaling_policies_services_pb.rb +1 -1
  6. data/lib/google/cloud/dataproc/v1/autoscaling_policy_service/client.rb +48 -54
  7. data/lib/google/cloud/dataproc/v1/batch_controller/client.rb +637 -0
  8. data/lib/google/cloud/dataproc/v1/batch_controller/credentials.rb +51 -0
  9. data/lib/google/cloud/dataproc/v1/batch_controller/operations.rb +664 -0
  10. data/lib/google/cloud/dataproc/v1/batch_controller/paths.rb +69 -0
  11. data/lib/google/cloud/dataproc/v1/batch_controller.rb +50 -0
  12. data/lib/google/cloud/dataproc/v1/batches_pb.rb +123 -0
  13. data/lib/google/cloud/dataproc/v1/batches_services_pb.rb +52 -0
  14. data/lib/google/cloud/dataproc/v1/cluster_controller/client.rb +75 -80
  15. data/lib/google/cloud/dataproc/v1/cluster_controller/operations.rb +34 -25
  16. data/lib/google/cloud/dataproc/v1/clusters_pb.rb +9 -2
  17. data/lib/google/cloud/dataproc/v1/clusters_services_pb.rb +3 -1
  18. data/lib/google/cloud/dataproc/v1/job_controller/client.rb +58 -72
  19. data/lib/google/cloud/dataproc/v1/job_controller/operations.rb +34 -25
  20. data/lib/google/cloud/dataproc/v1/jobs_pb.rb +2 -2
  21. data/lib/google/cloud/dataproc/v1/jobs_services_pb.rb +1 -1
  22. data/lib/google/cloud/dataproc/v1/operations_pb.rb +18 -3
  23. data/lib/google/cloud/dataproc/v1/shared_pb.rb +40 -2
  24. data/lib/google/cloud/dataproc/v1/version.rb +1 -1
  25. data/lib/google/cloud/dataproc/v1/workflow_template_service/client.rb +59 -74
  26. data/lib/google/cloud/dataproc/v1/workflow_template_service/operations.rb +34 -25
  27. data/lib/google/cloud/dataproc/v1/workflow_templates_pb.rb +2 -2
  28. data/lib/google/cloud/dataproc/v1/workflow_templates_services_pb.rb +2 -3
  29. data/lib/google/cloud/dataproc/v1.rb +1 -0
  30. data/proto_docs/google/api/field_behavior.rb +7 -1
  31. data/proto_docs/google/cloud/dataproc/v1/autoscaling_policies.rb +18 -0
  32. data/proto_docs/google/cloud/dataproc/v1/batches.rb +339 -0
  33. data/proto_docs/google/cloud/dataproc/v1/clusters.rb +45 -22
  34. data/proto_docs/google/cloud/dataproc/v1/jobs.rb +8 -9
  35. data/proto_docs/google/cloud/dataproc/v1/operations.rb +48 -0
  36. data/proto_docs/google/cloud/dataproc/v1/shared.rb +117 -1
  37. data/proto_docs/google/cloud/dataproc/v1/workflow_templates.rb +11 -14
  38. metadata +21 -7
@@ -0,0 +1,69 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Copyright 2021 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # https://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # Auto-generated by gapic-generator-ruby. DO NOT EDIT!
18
+
19
+
20
+ module Google
21
+ module Cloud
22
+ module Dataproc
23
+ module V1
24
+ module BatchController
25
+ # Path helper methods for the BatchController API.
26
+ module Paths
27
+ ##
28
+ # Create a fully-qualified Batch resource string.
29
+ #
30
+ # The resource will be in the following format:
31
+ #
32
+ # `projects/{project}/locations/{location}/batches/{batch}`
33
+ #
34
+ # @param project [String]
35
+ # @param location [String]
36
+ # @param batch [String]
37
+ #
38
+ # @return [::String]
39
+ def batch_path project:, location:, batch:
40
+ raise ::ArgumentError, "project cannot contain /" if project.to_s.include? "/"
41
+ raise ::ArgumentError, "location cannot contain /" if location.to_s.include? "/"
42
+
43
+ "projects/#{project}/locations/#{location}/batches/#{batch}"
44
+ end
45
+
46
+ ##
47
+ # Create a fully-qualified Location resource string.
48
+ #
49
+ # The resource will be in the following format:
50
+ #
51
+ # `projects/{project}/locations/{location}`
52
+ #
53
+ # @param project [String]
54
+ # @param location [String]
55
+ #
56
+ # @return [::String]
57
+ def location_path project:, location:
58
+ raise ::ArgumentError, "project cannot contain /" if project.to_s.include? "/"
59
+
60
+ "projects/#{project}/locations/#{location}"
61
+ end
62
+
63
+ extend self
64
+ end
65
+ end
66
+ end
67
+ end
68
+ end
69
+ end
@@ -0,0 +1,50 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Copyright 2021 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # https://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # Auto-generated by gapic-generator-ruby. DO NOT EDIT!
18
+
19
+ require "gapic/common"
20
+ require "gapic/config"
21
+ require "gapic/config/method"
22
+
23
+ require "google/cloud/dataproc/v1/version"
24
+
25
+ require "google/cloud/dataproc/v1/batch_controller/credentials"
26
+ require "google/cloud/dataproc/v1/batch_controller/paths"
27
+ require "google/cloud/dataproc/v1/batch_controller/operations"
28
+ require "google/cloud/dataproc/v1/batch_controller/client"
29
+
30
+ module Google
31
+ module Cloud
32
+ module Dataproc
33
+ module V1
34
+ ##
35
+ # The BatchController provides methods to manage batch workloads.
36
+ #
37
+ # To load this service and instantiate a client:
38
+ #
39
+ # require "google/cloud/dataproc/v1/batch_controller"
40
+ # client = ::Google::Cloud::Dataproc::V1::BatchController::Client.new
41
+ #
42
+ module BatchController
43
+ end
44
+ end
45
+ end
46
+ end
47
+ end
48
+
49
+ helper_path = ::File.join __dir__, "batch_controller", "helpers.rb"
50
+ require "google/cloud/dataproc/v1/batch_controller/helpers" if ::File.file? helper_path
@@ -0,0 +1,123 @@
1
+ # Generated by the protocol buffer compiler. DO NOT EDIT!
2
+ # source: google/cloud/dataproc/v1/batches.proto
3
+
4
+ require 'google/api/annotations_pb'
5
+ require 'google/api/client_pb'
6
+ require 'google/api/field_behavior_pb'
7
+ require 'google/api/resource_pb'
8
+ require 'google/cloud/dataproc/v1/shared_pb'
9
+ require 'google/longrunning/operations_pb'
10
+ require 'google/protobuf/empty_pb'
11
+ require 'google/protobuf/timestamp_pb'
12
+ require 'google/protobuf'
13
+
14
+ Google::Protobuf::DescriptorPool.generated_pool.build do
15
+ add_file("google/cloud/dataproc/v1/batches.proto", :syntax => :proto3) do
16
+ add_message "google.cloud.dataproc.v1.CreateBatchRequest" do
17
+ optional :parent, :string, 1
18
+ optional :batch, :message, 2, "google.cloud.dataproc.v1.Batch"
19
+ optional :batch_id, :string, 3
20
+ optional :request_id, :string, 4
21
+ end
22
+ add_message "google.cloud.dataproc.v1.GetBatchRequest" do
23
+ optional :name, :string, 1
24
+ end
25
+ add_message "google.cloud.dataproc.v1.ListBatchesRequest" do
26
+ optional :parent, :string, 1
27
+ optional :page_size, :int32, 2
28
+ optional :page_token, :string, 3
29
+ end
30
+ add_message "google.cloud.dataproc.v1.ListBatchesResponse" do
31
+ repeated :batches, :message, 1, "google.cloud.dataproc.v1.Batch"
32
+ optional :next_page_token, :string, 2
33
+ end
34
+ add_message "google.cloud.dataproc.v1.DeleteBatchRequest" do
35
+ optional :name, :string, 1
36
+ end
37
+ add_message "google.cloud.dataproc.v1.Batch" do
38
+ optional :name, :string, 1
39
+ optional :uuid, :string, 2
40
+ optional :create_time, :message, 3, "google.protobuf.Timestamp"
41
+ optional :runtime_info, :message, 8, "google.cloud.dataproc.v1.RuntimeInfo"
42
+ optional :state, :enum, 9, "google.cloud.dataproc.v1.Batch.State"
43
+ optional :state_message, :string, 10
44
+ optional :state_time, :message, 11, "google.protobuf.Timestamp"
45
+ optional :creator, :string, 12
46
+ map :labels, :string, :string, 13
47
+ optional :runtime_config, :message, 14, "google.cloud.dataproc.v1.RuntimeConfig"
48
+ optional :environment_config, :message, 15, "google.cloud.dataproc.v1.EnvironmentConfig"
49
+ optional :operation, :string, 16
50
+ repeated :state_history, :message, 17, "google.cloud.dataproc.v1.Batch.StateHistory"
51
+ oneof :batch_config do
52
+ optional :pyspark_batch, :message, 4, "google.cloud.dataproc.v1.PySparkBatch"
53
+ optional :spark_batch, :message, 5, "google.cloud.dataproc.v1.SparkBatch"
54
+ optional :spark_r_batch, :message, 6, "google.cloud.dataproc.v1.SparkRBatch"
55
+ optional :spark_sql_batch, :message, 7, "google.cloud.dataproc.v1.SparkSqlBatch"
56
+ end
57
+ end
58
+ add_message "google.cloud.dataproc.v1.Batch.StateHistory" do
59
+ optional :state, :enum, 1, "google.cloud.dataproc.v1.Batch.State"
60
+ optional :state_message, :string, 2
61
+ optional :state_start_time, :message, 3, "google.protobuf.Timestamp"
62
+ end
63
+ add_enum "google.cloud.dataproc.v1.Batch.State" do
64
+ value :STATE_UNSPECIFIED, 0
65
+ value :PENDING, 1
66
+ value :RUNNING, 2
67
+ value :CANCELLING, 3
68
+ value :CANCELLED, 4
69
+ value :SUCCEEDED, 5
70
+ value :FAILED, 6
71
+ end
72
+ add_message "google.cloud.dataproc.v1.PySparkBatch" do
73
+ optional :main_python_file_uri, :string, 1
74
+ repeated :args, :string, 2
75
+ repeated :python_file_uris, :string, 3
76
+ repeated :jar_file_uris, :string, 4
77
+ repeated :file_uris, :string, 5
78
+ repeated :archive_uris, :string, 6
79
+ end
80
+ add_message "google.cloud.dataproc.v1.SparkBatch" do
81
+ repeated :args, :string, 3
82
+ repeated :jar_file_uris, :string, 4
83
+ repeated :file_uris, :string, 5
84
+ repeated :archive_uris, :string, 6
85
+ oneof :driver do
86
+ optional :main_jar_file_uri, :string, 1
87
+ optional :main_class, :string, 2
88
+ end
89
+ end
90
+ add_message "google.cloud.dataproc.v1.SparkRBatch" do
91
+ optional :main_r_file_uri, :string, 1
92
+ repeated :args, :string, 2
93
+ repeated :file_uris, :string, 3
94
+ repeated :archive_uris, :string, 4
95
+ end
96
+ add_message "google.cloud.dataproc.v1.SparkSqlBatch" do
97
+ optional :query_file_uri, :string, 1
98
+ map :query_variables, :string, :string, 2
99
+ repeated :jar_file_uris, :string, 3
100
+ end
101
+ end
102
+ end
103
+
104
+ module Google
105
+ module Cloud
106
+ module Dataproc
107
+ module V1
108
+ CreateBatchRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.dataproc.v1.CreateBatchRequest").msgclass
109
+ GetBatchRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.dataproc.v1.GetBatchRequest").msgclass
110
+ ListBatchesRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.dataproc.v1.ListBatchesRequest").msgclass
111
+ ListBatchesResponse = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.dataproc.v1.ListBatchesResponse").msgclass
112
+ DeleteBatchRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.dataproc.v1.DeleteBatchRequest").msgclass
113
+ Batch = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.dataproc.v1.Batch").msgclass
114
+ Batch::StateHistory = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.dataproc.v1.Batch.StateHistory").msgclass
115
+ Batch::State = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.dataproc.v1.Batch.State").enummodule
116
+ PySparkBatch = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.dataproc.v1.PySparkBatch").msgclass
117
+ SparkBatch = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.dataproc.v1.SparkBatch").msgclass
118
+ SparkRBatch = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.dataproc.v1.SparkRBatch").msgclass
119
+ SparkSqlBatch = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.dataproc.v1.SparkSqlBatch").msgclass
120
+ end
121
+ end
122
+ end
123
+ end
@@ -0,0 +1,52 @@
1
+ # Generated by the protocol buffer compiler. DO NOT EDIT!
2
+ # Source: google/cloud/dataproc/v1/batches.proto for package 'google.cloud.dataproc.v1'
3
+ # Original file comments:
4
+ # Copyright 2021 Google LLC
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ #
18
+
19
+ require 'grpc'
20
+ require 'google/cloud/dataproc/v1/batches_pb'
21
+
22
+ module Google
23
+ module Cloud
24
+ module Dataproc
25
+ module V1
26
+ module BatchController
27
+ # The BatchController provides methods to manage batch workloads.
28
+ class Service
29
+
30
+ include ::GRPC::GenericService
31
+
32
+ self.marshal_class_method = :encode
33
+ self.unmarshal_class_method = :decode
34
+ self.service_name = 'google.cloud.dataproc.v1.BatchController'
35
+
36
+ # Creates a batch workload that executes asynchronously.
37
+ rpc :CreateBatch, ::Google::Cloud::Dataproc::V1::CreateBatchRequest, ::Google::Longrunning::Operation
38
+ # Gets the batch workload resource representation.
39
+ rpc :GetBatch, ::Google::Cloud::Dataproc::V1::GetBatchRequest, ::Google::Cloud::Dataproc::V1::Batch
40
+ # Lists batch workloads.
41
+ rpc :ListBatches, ::Google::Cloud::Dataproc::V1::ListBatchesRequest, ::Google::Cloud::Dataproc::V1::ListBatchesResponse
42
+ # Deletes the batch workload resource. If the batch is not in terminal state,
43
+ # the delete fails and the response returns `FAILED_PRECONDITION`.
44
+ rpc :DeleteBatch, ::Google::Cloud::Dataproc::V1::DeleteBatchRequest, ::Google::Protobuf::Empty
45
+ end
46
+
47
+ Stub = Service.rpc_stub_class
48
+ end
49
+ end
50
+ end
51
+ end
52
+ end
@@ -42,13 +42,12 @@ module Google
42
42
  # See {::Google::Cloud::Dataproc::V1::ClusterController::Client::Configuration}
43
43
  # for a description of the configuration fields.
44
44
  #
45
- # ## Example
45
+ # @example
46
46
  #
47
- # To modify the configuration for all ClusterController clients:
48
- #
49
- # ::Google::Cloud::Dataproc::V1::ClusterController::Client.configure do |config|
50
- # config.timeout = 10.0
51
- # end
47
+ # # Modify the configuration for all ClusterController clients
48
+ # ::Google::Cloud::Dataproc::V1::ClusterController::Client.configure do |config|
49
+ # config.timeout = 10.0
50
+ # end
52
51
  #
53
52
  # @yield [config] Configure the Client client.
54
53
  # @yieldparam config [Client::Configuration]
@@ -68,50 +67,32 @@ module Google
68
67
 
69
68
  default_config.rpcs.create_cluster.timeout = 300.0
70
69
  default_config.rpcs.create_cluster.retry_policy = {
71
- initial_delay: 0.1,
72
- max_delay: 60.0,
73
- multiplier: 1.3,
74
- retry_codes: [14]
70
+ initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [14]
75
71
  }
76
72
 
77
73
  default_config.rpcs.update_cluster.timeout = 300.0
78
74
  default_config.rpcs.update_cluster.retry_policy = {
79
- initial_delay: 0.1,
80
- max_delay: 60.0,
81
- multiplier: 1.3,
82
- retry_codes: [14]
75
+ initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [14]
83
76
  }
84
77
 
85
78
  default_config.rpcs.delete_cluster.timeout = 300.0
86
79
  default_config.rpcs.delete_cluster.retry_policy = {
87
- initial_delay: 0.1,
88
- max_delay: 60.0,
89
- multiplier: 1.3,
90
- retry_codes: [14]
80
+ initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [14]
91
81
  }
92
82
 
93
83
  default_config.rpcs.get_cluster.timeout = 300.0
94
84
  default_config.rpcs.get_cluster.retry_policy = {
95
- initial_delay: 0.1,
96
- max_delay: 60.0,
97
- multiplier: 1.3,
98
- retry_codes: [13, 4, 14]
85
+ initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [13, 4, 14]
99
86
  }
100
87
 
101
88
  default_config.rpcs.list_clusters.timeout = 300.0
102
89
  default_config.rpcs.list_clusters.retry_policy = {
103
- initial_delay: 0.1,
104
- max_delay: 60.0,
105
- multiplier: 1.3,
106
- retry_codes: [13, 4, 14]
90
+ initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [13, 4, 14]
107
91
  }
108
92
 
109
93
  default_config.rpcs.diagnose_cluster.timeout = 300.0
110
94
  default_config.rpcs.diagnose_cluster.retry_policy = {
111
- initial_delay: 0.1,
112
- max_delay: 60.0,
113
- multiplier: 1.3,
114
- retry_codes: [14]
95
+ initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [14]
115
96
  }
116
97
 
117
98
  default_config
@@ -143,19 +124,15 @@ module Google
143
124
  ##
144
125
  # Create a new ClusterController client object.
145
126
  #
146
- # ## Examples
147
- #
148
- # To create a new ClusterController client with the default
149
- # configuration:
127
+ # @example
150
128
  #
151
- # client = ::Google::Cloud::Dataproc::V1::ClusterController::Client.new
129
+ # # Create a client using the default configuration
130
+ # client = ::Google::Cloud::Dataproc::V1::ClusterController::Client.new
152
131
  #
153
- # To create a new ClusterController client with a custom
154
- # configuration:
155
- #
156
- # client = ::Google::Cloud::Dataproc::V1::ClusterController::Client.new do |config|
157
- # config.timeout = 10.0
158
- # end
132
+ # # Create a client using a custom configuration
133
+ # client = ::Google::Cloud::Dataproc::V1::ClusterController::Client.new do |config|
134
+ # config.timeout = 10.0
135
+ # end
159
136
  #
160
137
  # @yield [config] Configure the ClusterController client.
161
138
  # @yieldparam config [Client::Configuration]
@@ -175,14 +152,13 @@ module Google
175
152
 
176
153
  # Create credentials
177
154
  credentials = @config.credentials
178
- # Use self-signed JWT if the scope and endpoint are unchanged from default,
155
+ # Use self-signed JWT if the endpoint is unchanged from default,
179
156
  # but only if the default endpoint does not have a region prefix.
180
- enable_self_signed_jwt = @config.scope == Client.configure.scope &&
181
- @config.endpoint == Client.configure.endpoint &&
157
+ enable_self_signed_jwt = @config.endpoint == Client.configure.endpoint &&
182
158
  !@config.endpoint.split(".").first.include?("-")
183
159
  credentials ||= Credentials.default scope: @config.scope,
184
160
  enable_self_signed_jwt: enable_self_signed_jwt
185
- if credentials.is_a?(String) || credentials.is_a?(Hash)
161
+ if credentials.is_a?(::String) || credentials.is_a?(::Hash)
186
162
  credentials = Credentials.new credentials, scope: @config.scope
187
163
  end
188
164
  @quota_project_id = @config.quota_project
@@ -226,7 +202,7 @@ module Google
226
202
  # @param options [::Gapic::CallOptions, ::Hash]
227
203
  # Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
228
204
  #
229
- # @overload create_cluster(project_id: nil, region: nil, cluster: nil, request_id: nil)
205
+ # @overload create_cluster(project_id: nil, region: nil, cluster: nil, request_id: nil, action_on_failed_primary_workers: nil)
230
206
  # Pass arguments to `create_cluster` via keyword arguments. Note that at
231
207
  # least one keyword argument is required. To specify no parameters, or to keep all
232
208
  # the default parameter values, pass an empty Hash as a request object (see above).
@@ -239,7 +215,7 @@ module Google
239
215
  # @param cluster [::Google::Cloud::Dataproc::V1::Cluster, ::Hash]
240
216
  # Required. The cluster to create.
241
217
  # @param request_id [::String]
242
- # Optional. A unique id used to identify the request. If the server receives two
218
+ # Optional. A unique ID used to identify the request. If the server receives two
243
219
  # [CreateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s
244
220
  # with the same id, then the second request will be ignored and the
245
221
  # first {::Google::Longrunning::Operation google.longrunning.Operation} created and stored in the backend
@@ -248,8 +224,10 @@ module Google
248
224
  # It is recommended to always set this value to a
249
225
  # [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
250
226
  #
251
- # The id must contain only letters (a-z, A-Z), numbers (0-9),
227
+ # The ID must contain only letters (a-z, A-Z), numbers (0-9),
252
228
  # underscores (_), and hyphens (-). The maximum length is 40 characters.
229
+ # @param action_on_failed_primary_workers [::Google::Cloud::Dataproc::V1::FailureAction]
230
+ # Optional. Failure action when primary worker creation fails.
253
231
  #
254
232
  # @yield [response, operation] Access the result along with the RPC operation
255
233
  # @yieldparam response [::Gapic::Operation]
@@ -286,7 +264,9 @@ module Google
286
264
  options.apply_defaults timeout: @config.rpcs.create_cluster.timeout,
287
265
  metadata: metadata,
288
266
  retry_policy: @config.rpcs.create_cluster.retry_policy
289
- options.apply_defaults metadata: @config.metadata,
267
+
268
+ options.apply_defaults timeout: @config.timeout,
269
+ metadata: @config.metadata,
290
270
  retry_policy: @config.retry_policy
291
271
 
292
272
  @cluster_controller_stub.call_rpc :create_cluster, request, options: options do |response, operation|
@@ -302,6 +282,8 @@ module Google
302
282
  # Updates a cluster in a project. The returned
303
283
  # {::Google::Longrunning::Operation#metadata Operation.metadata} will be
304
284
  # [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
285
+ # The cluster must be in a {::Google::Cloud::Dataproc::V1::ClusterStatus::State `RUNNING`} state or an error
286
+ # is returned.
305
287
  #
306
288
  # @overload update_cluster(request, options = nil)
307
289
  # Pass arguments to `update_cluster` via a request object, either of type
@@ -390,7 +372,7 @@ module Google
390
372
  # </tbody>
391
373
  # </table>
392
374
  # @param request_id [::String]
393
- # Optional. A unique id used to identify the request. If the server
375
+ # Optional. A unique ID used to identify the request. If the server
394
376
  # receives two
395
377
  # [UpdateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s
396
378
  # with the same id, then the second request will be ignored and the
@@ -400,7 +382,7 @@ module Google
400
382
  # It is recommended to always set this value to a
401
383
  # [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
402
384
  #
403
- # The id must contain only letters (a-z, A-Z), numbers (0-9),
385
+ # The ID must contain only letters (a-z, A-Z), numbers (0-9),
404
386
  # underscores (_), and hyphens (-). The maximum length is 40 characters.
405
387
  #
406
388
  # @yield [response, operation] Access the result along with the RPC operation
@@ -439,7 +421,9 @@ module Google
439
421
  options.apply_defaults timeout: @config.rpcs.update_cluster.timeout,
440
422
  metadata: metadata,
441
423
  retry_policy: @config.rpcs.update_cluster.retry_policy
442
- options.apply_defaults metadata: @config.metadata,
424
+
425
+ options.apply_defaults timeout: @config.timeout,
426
+ metadata: @config.metadata,
443
427
  retry_policy: @config.retry_policy
444
428
 
445
429
  @cluster_controller_stub.call_rpc :update_cluster, request, options: options do |response, operation|
@@ -480,7 +464,7 @@ module Google
480
464
  # Optional. Specifying the `cluster_uuid` means the RPC will fail
481
465
  # (with error NOT_FOUND) if a cluster with the specified UUID does not exist.
482
466
  # @param request_id [::String]
483
- # Optional. A unique id used to identify the request. If the server
467
+ # Optional. A unique ID used to identify the request. If the server
484
468
  # receives two
485
469
  # [StopClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s
486
470
  # with the same id, then the second request will be ignored and the
@@ -490,7 +474,7 @@ module Google
490
474
  # Recommendation: Set this value to a
491
475
  # [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
492
476
  #
493
- # The id must contain only letters (a-z, A-Z), numbers (0-9),
477
+ # The ID must contain only letters (a-z, A-Z), numbers (0-9),
494
478
  # underscores (_), and hyphens (-). The maximum length is 40 characters.
495
479
  #
496
480
  # @yield [response, operation] Access the result along with the RPC operation
@@ -529,7 +513,9 @@ module Google
529
513
  options.apply_defaults timeout: @config.rpcs.stop_cluster.timeout,
530
514
  metadata: metadata,
531
515
  retry_policy: @config.rpcs.stop_cluster.retry_policy
532
- options.apply_defaults metadata: @config.metadata,
516
+
517
+ options.apply_defaults timeout: @config.timeout,
518
+ metadata: @config.metadata,
533
519
  retry_policy: @config.retry_policy
534
520
 
535
521
  @cluster_controller_stub.call_rpc :stop_cluster, request, options: options do |response, operation|
@@ -570,7 +556,7 @@ module Google
570
556
  # Optional. Specifying the `cluster_uuid` means the RPC will fail
571
557
  # (with error NOT_FOUND) if a cluster with the specified UUID does not exist.
572
558
  # @param request_id [::String]
573
- # Optional. A unique id used to identify the request. If the server
559
+ # Optional. A unique ID used to identify the request. If the server
574
560
  # receives two
575
561
  # [StartClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s
576
562
  # with the same id, then the second request will be ignored and the
@@ -580,7 +566,7 @@ module Google
580
566
  # Recommendation: Set this value to a
581
567
  # [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
582
568
  #
583
- # The id must contain only letters (a-z, A-Z), numbers (0-9),
569
+ # The ID must contain only letters (a-z, A-Z), numbers (0-9),
584
570
  # underscores (_), and hyphens (-). The maximum length is 40 characters.
585
571
  #
586
572
  # @yield [response, operation] Access the result along with the RPC operation
@@ -619,7 +605,9 @@ module Google
619
605
  options.apply_defaults timeout: @config.rpcs.start_cluster.timeout,
620
606
  metadata: metadata,
621
607
  retry_policy: @config.rpcs.start_cluster.retry_policy
622
- options.apply_defaults metadata: @config.metadata,
608
+
609
+ options.apply_defaults timeout: @config.timeout,
610
+ metadata: @config.metadata,
623
611
  retry_policy: @config.retry_policy
624
612
 
625
613
  @cluster_controller_stub.call_rpc :start_cluster, request, options: options do |response, operation|
@@ -662,7 +650,7 @@ module Google
662
650
  # Optional. Specifying the `cluster_uuid` means the RPC should fail
663
651
  # (with error NOT_FOUND) if cluster with specified UUID does not exist.
664
652
  # @param request_id [::String]
665
- # Optional. A unique id used to identify the request. If the server
653
+ # Optional. A unique ID used to identify the request. If the server
666
654
  # receives two
667
655
  # [DeleteClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s
668
656
  # with the same id, then the second request will be ignored and the
@@ -672,7 +660,7 @@ module Google
672
660
  # It is recommended to always set this value to a
673
661
  # [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
674
662
  #
675
- # The id must contain only letters (a-z, A-Z), numbers (0-9),
663
+ # The ID must contain only letters (a-z, A-Z), numbers (0-9),
676
664
  # underscores (_), and hyphens (-). The maximum length is 40 characters.
677
665
  #
678
666
  # @yield [response, operation] Access the result along with the RPC operation
@@ -711,7 +699,9 @@ module Google
711
699
  options.apply_defaults timeout: @config.rpcs.delete_cluster.timeout,
712
700
  metadata: metadata,
713
701
  retry_policy: @config.rpcs.delete_cluster.retry_policy
714
- options.apply_defaults metadata: @config.metadata,
702
+
703
+ options.apply_defaults timeout: @config.timeout,
704
+ metadata: @config.metadata,
715
705
  retry_policy: @config.retry_policy
716
706
 
717
707
  @cluster_controller_stub.call_rpc :delete_cluster, request, options: options do |response, operation|
@@ -785,7 +775,9 @@ module Google
785
775
  options.apply_defaults timeout: @config.rpcs.get_cluster.timeout,
786
776
  metadata: metadata,
787
777
  retry_policy: @config.rpcs.get_cluster.retry_policy
788
- options.apply_defaults metadata: @config.metadata,
778
+
779
+ options.apply_defaults timeout: @config.timeout,
780
+ metadata: @config.metadata,
789
781
  retry_policy: @config.retry_policy
790
782
 
791
783
  @cluster_controller_stub.call_rpc :get_cluster, request, options: options do |response, operation|
@@ -879,7 +871,9 @@ module Google
879
871
  options.apply_defaults timeout: @config.rpcs.list_clusters.timeout,
880
872
  metadata: metadata,
881
873
  retry_policy: @config.rpcs.list_clusters.retry_policy
882
- options.apply_defaults metadata: @config.metadata,
874
+
875
+ options.apply_defaults timeout: @config.timeout,
876
+ metadata: @config.metadata,
883
877
  retry_policy: @config.retry_policy
884
878
 
885
879
  @cluster_controller_stub.call_rpc :list_clusters, request, options: options do |response, operation|
@@ -959,7 +953,9 @@ module Google
959
953
  options.apply_defaults timeout: @config.rpcs.diagnose_cluster.timeout,
960
954
  metadata: metadata,
961
955
  retry_policy: @config.rpcs.diagnose_cluster.retry_policy
962
- options.apply_defaults metadata: @config.metadata,
956
+
957
+ options.apply_defaults timeout: @config.timeout,
958
+ metadata: @config.metadata,
963
959
  retry_policy: @config.retry_policy
964
960
 
965
961
  @cluster_controller_stub.call_rpc :diagnose_cluster, request, options: options do |response, operation|
@@ -984,22 +980,21 @@ module Google
984
980
  # Configuration can be applied globally to all clients, or to a single client
985
981
  # on construction.
986
982
  #
987
- # # Examples
988
- #
989
- # To modify the global config, setting the timeout for create_cluster
990
- # to 20 seconds, and all remaining timeouts to 10 seconds:
991
- #
992
- # ::Google::Cloud::Dataproc::V1::ClusterController::Client.configure do |config|
993
- # config.timeout = 10.0
994
- # config.rpcs.create_cluster.timeout = 20.0
995
- # end
996
- #
997
- # To apply the above configuration only to a new client:
998
- #
999
- # client = ::Google::Cloud::Dataproc::V1::ClusterController::Client.new do |config|
1000
- # config.timeout = 10.0
1001
- # config.rpcs.create_cluster.timeout = 20.0
1002
- # end
983
+ # @example
984
+ #
985
+ # # Modify the global config, setting the timeout for
986
+ # # create_cluster to 20 seconds,
987
+ # # and all remaining timeouts to 10 seconds.
988
+ # ::Google::Cloud::Dataproc::V1::ClusterController::Client.configure do |config|
989
+ # config.timeout = 10.0
990
+ # config.rpcs.create_cluster.timeout = 20.0
991
+ # end
992
+ #
993
+ # # Apply the above configuration only to a new client.
994
+ # client = ::Google::Cloud::Dataproc::V1::ClusterController::Client.new do |config|
995
+ # config.timeout = 10.0
996
+ # config.rpcs.create_cluster.timeout = 20.0
997
+ # end
1003
998
  #
1004
999
  # @!attribute [rw] endpoint
1005
1000
  # The hostname or hostname:port of the service endpoint.