google-apis-dataproc_v1 0.15.0 → 0.19.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +19 -0
- data/OVERVIEW.md +3 -3
- data/lib/google/apis/dataproc_v1/classes.rb +690 -39
- data/lib/google/apis/dataproc_v1/gem_version.rb +3 -3
- data/lib/google/apis/dataproc_v1/representations.rb +246 -0
- data/lib/google/apis/dataproc_v1/service.rb +151 -2
- metadata +6 -6
@@ -98,6 +98,16 @@ module Google
|
|
98
98
|
# @return [String]
|
99
99
|
attr_accessor :id
|
100
100
|
|
101
|
+
# Optional. The labels to associate with this autoscaling policy. Label keys
|
102
|
+
# must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.
|
103
|
+
# ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must
|
104
|
+
# contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/
|
105
|
+
# rfc/rfc1035.txt). No more than 32 labels can be associated with an autoscaling
|
106
|
+
# policy.
|
107
|
+
# Corresponds to the JSON property `labels`
|
108
|
+
# @return [Hash<String,String>]
|
109
|
+
attr_accessor :labels
|
110
|
+
|
101
111
|
# Output only. The "resource name" of the autoscaling policy, as described in
|
102
112
|
# https://cloud.google.com/apis/design/resource_names. For projects.regions.
|
103
113
|
# autoscalingPolicies, the resource name of the policy has the following format:
|
@@ -129,6 +139,7 @@ module Google
|
|
129
139
|
def update!(**args)
|
130
140
|
@basic_algorithm = args[:basic_algorithm] if args.key?(:basic_algorithm)
|
131
141
|
@id = args[:id] if args.key?(:id)
|
142
|
+
@labels = args[:labels] if args.key?(:labels)
|
132
143
|
@name = args[:name] if args.key?(:name)
|
133
144
|
@secondary_worker_config = args[:secondary_worker_config] if args.key?(:secondary_worker_config)
|
134
145
|
@worker_config = args[:worker_config] if args.key?(:worker_config)
|
@@ -146,6 +157,11 @@ module Google
|
|
146
157
|
# @return [String]
|
147
158
|
attr_accessor :cooldown_period
|
148
159
|
|
160
|
+
# Basic autoscaling configurations for Spark Standalone.
|
161
|
+
# Corresponds to the JSON property `sparkStandaloneConfig`
|
162
|
+
# @return [Google::Apis::DataprocV1::SparkStandaloneAutoscalingConfig]
|
163
|
+
attr_accessor :spark_standalone_config
|
164
|
+
|
149
165
|
# Basic autoscaling configurations for YARN.
|
150
166
|
# Corresponds to the JSON property `yarnConfig`
|
151
167
|
# @return [Google::Apis::DataprocV1::BasicYarnAutoscalingConfig]
|
@@ -158,6 +174,7 @@ module Google
|
|
158
174
|
# Update properties of this object
|
159
175
|
def update!(**args)
|
160
176
|
@cooldown_period = args[:cooldown_period] if args.key?(:cooldown_period)
|
177
|
+
@spark_standalone_config = args[:spark_standalone_config] if args.key?(:spark_standalone_config)
|
161
178
|
@yarn_config = args[:yarn_config] if args.key?(:yarn_config)
|
162
179
|
end
|
163
180
|
end
|
@@ -228,6 +245,131 @@ module Google
|
|
228
245
|
end
|
229
246
|
end
|
230
247
|
|
248
|
+
# A representation of a batch workload in the service.
|
249
|
+
class Batch
|
250
|
+
include Google::Apis::Core::Hashable
|
251
|
+
|
252
|
+
# Output only. The time when the batch was created.
|
253
|
+
# Corresponds to the JSON property `createTime`
|
254
|
+
# @return [String]
|
255
|
+
attr_accessor :create_time
|
256
|
+
|
257
|
+
# Output only. The email address of the user who created the batch.
|
258
|
+
# Corresponds to the JSON property `creator`
|
259
|
+
# @return [String]
|
260
|
+
attr_accessor :creator
|
261
|
+
|
262
|
+
# Environment configuration for a workload.
|
263
|
+
# Corresponds to the JSON property `environmentConfig`
|
264
|
+
# @return [Google::Apis::DataprocV1::EnvironmentConfig]
|
265
|
+
attr_accessor :environment_config
|
266
|
+
|
267
|
+
# Optional. The labels to associate with this batch. Label keys must contain 1
|
268
|
+
# to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/
|
269
|
+
# rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63
|
270
|
+
# characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt)
|
271
|
+
# . No more than 32 labels can be associated with a batch.
|
272
|
+
# Corresponds to the JSON property `labels`
|
273
|
+
# @return [Hash<String,String>]
|
274
|
+
attr_accessor :labels
|
275
|
+
|
276
|
+
# Output only. The resource name of the batch.
|
277
|
+
# Corresponds to the JSON property `name`
|
278
|
+
# @return [String]
|
279
|
+
attr_accessor :name
|
280
|
+
|
281
|
+
# Output only. The resource name of the operation associated with this batch.
|
282
|
+
# Corresponds to the JSON property `operation`
|
283
|
+
# @return [String]
|
284
|
+
attr_accessor :operation
|
285
|
+
|
286
|
+
# A configuration for running an Apache PySpark (https://spark.apache.org/docs/
|
287
|
+
# latest/api/python/getting_started/quickstart.html) batch workload.
|
288
|
+
# Corresponds to the JSON property `pysparkBatch`
|
289
|
+
# @return [Google::Apis::DataprocV1::PySparkBatch]
|
290
|
+
attr_accessor :pyspark_batch
|
291
|
+
|
292
|
+
# Runtime configuration for a workload.
|
293
|
+
# Corresponds to the JSON property `runtimeConfig`
|
294
|
+
# @return [Google::Apis::DataprocV1::RuntimeConfig]
|
295
|
+
attr_accessor :runtime_config
|
296
|
+
|
297
|
+
# Runtime information about workload execution.
|
298
|
+
# Corresponds to the JSON property `runtimeInfo`
|
299
|
+
# @return [Google::Apis::DataprocV1::RuntimeInfo]
|
300
|
+
attr_accessor :runtime_info
|
301
|
+
|
302
|
+
# A configuration for running an Apache Spark (http://spark.apache.org/) batch
|
303
|
+
# workload.
|
304
|
+
# Corresponds to the JSON property `sparkBatch`
|
305
|
+
# @return [Google::Apis::DataprocV1::SparkBatch]
|
306
|
+
attr_accessor :spark_batch
|
307
|
+
|
308
|
+
# A configuration for running an Apache SparkR (https://spark.apache.org/docs/
|
309
|
+
# latest/sparkr.html) batch workload.
|
310
|
+
# Corresponds to the JSON property `sparkRBatch`
|
311
|
+
# @return [Google::Apis::DataprocV1::SparkRBatch]
|
312
|
+
attr_accessor :spark_r_batch
|
313
|
+
|
314
|
+
# A configuration for running Apache Spark SQL (http://spark.apache.org/sql/)
|
315
|
+
# queries as a batch workload.
|
316
|
+
# Corresponds to the JSON property `sparkSqlBatch`
|
317
|
+
# @return [Google::Apis::DataprocV1::SparkSqlBatch]
|
318
|
+
attr_accessor :spark_sql_batch
|
319
|
+
|
320
|
+
# Output only. The state of the batch.
|
321
|
+
# Corresponds to the JSON property `state`
|
322
|
+
# @return [String]
|
323
|
+
attr_accessor :state
|
324
|
+
|
325
|
+
# Output only. Historical state information for the batch.
|
326
|
+
# Corresponds to the JSON property `stateHistory`
|
327
|
+
# @return [Array<Google::Apis::DataprocV1::StateHistory>]
|
328
|
+
attr_accessor :state_history
|
329
|
+
|
330
|
+
# Output only. Batch state details, such as a failure description if the state
|
331
|
+
# is FAILED.
|
332
|
+
# Corresponds to the JSON property `stateMessage`
|
333
|
+
# @return [String]
|
334
|
+
attr_accessor :state_message
|
335
|
+
|
336
|
+
# Output only. The time when the batch entered a current state.
|
337
|
+
# Corresponds to the JSON property `stateTime`
|
338
|
+
# @return [String]
|
339
|
+
attr_accessor :state_time
|
340
|
+
|
341
|
+
# Output only. A batch UUID (Unique Universal Identifier). The service generates
|
342
|
+
# this value when it creates the batch.
|
343
|
+
# Corresponds to the JSON property `uuid`
|
344
|
+
# @return [String]
|
345
|
+
attr_accessor :uuid
|
346
|
+
|
347
|
+
def initialize(**args)
|
348
|
+
update!(**args)
|
349
|
+
end
|
350
|
+
|
351
|
+
# Update properties of this object
|
352
|
+
def update!(**args)
|
353
|
+
@create_time = args[:create_time] if args.key?(:create_time)
|
354
|
+
@creator = args[:creator] if args.key?(:creator)
|
355
|
+
@environment_config = args[:environment_config] if args.key?(:environment_config)
|
356
|
+
@labels = args[:labels] if args.key?(:labels)
|
357
|
+
@name = args[:name] if args.key?(:name)
|
358
|
+
@operation = args[:operation] if args.key?(:operation)
|
359
|
+
@pyspark_batch = args[:pyspark_batch] if args.key?(:pyspark_batch)
|
360
|
+
@runtime_config = args[:runtime_config] if args.key?(:runtime_config)
|
361
|
+
@runtime_info = args[:runtime_info] if args.key?(:runtime_info)
|
362
|
+
@spark_batch = args[:spark_batch] if args.key?(:spark_batch)
|
363
|
+
@spark_r_batch = args[:spark_r_batch] if args.key?(:spark_r_batch)
|
364
|
+
@spark_sql_batch = args[:spark_sql_batch] if args.key?(:spark_sql_batch)
|
365
|
+
@state = args[:state] if args.key?(:state)
|
366
|
+
@state_history = args[:state_history] if args.key?(:state_history)
|
367
|
+
@state_message = args[:state_message] if args.key?(:state_message)
|
368
|
+
@state_time = args[:state_time] if args.key?(:state_time)
|
369
|
+
@uuid = args[:uuid] if args.key?(:uuid)
|
370
|
+
end
|
371
|
+
end
|
372
|
+
|
231
373
|
# Metadata describing the Batch operation.
|
232
374
|
class BatchOperationMetadata
|
233
375
|
include Google::Apis::Core::Hashable
|
@@ -289,7 +431,7 @@ module Google
|
|
289
431
|
end
|
290
432
|
end
|
291
433
|
|
292
|
-
# Associates members with a role.
|
434
|
+
# Associates members, or principals, with a role.
|
293
435
|
class Binding
|
294
436
|
include Google::Apis::Core::Hashable
|
295
437
|
|
@@ -312,7 +454,7 @@ module Google
|
|
312
454
|
# @return [Google::Apis::DataprocV1::Expr]
|
313
455
|
attr_accessor :condition
|
314
456
|
|
315
|
-
# Specifies the
|
457
|
+
# Specifies the principals requesting access for a Cloud Platform resource.
|
316
458
|
# members can have the following values: allUsers: A special identifier that
|
317
459
|
# represents anyone who is on the internet; with or without a Google account.
|
318
460
|
# allAuthenticatedUsers: A special identifier that represents anyone who is
|
@@ -341,8 +483,8 @@ module Google
|
|
341
483
|
# @return [Array<String>]
|
342
484
|
attr_accessor :members
|
343
485
|
|
344
|
-
# Role that is assigned to members. For example,
|
345
|
-
# roles/owner.
|
486
|
+
# Role that is assigned to the list of members, or principals. For example,
|
487
|
+
# roles/viewer, roles/editor, or roles/owner.
|
346
488
|
# Corresponds to the JSON property `role`
|
347
489
|
# @return [String]
|
348
490
|
attr_accessor :role
|
@@ -867,6 +1009,14 @@ module Google
|
|
867
1009
|
# @return [String]
|
868
1010
|
attr_accessor :boot_disk_type
|
869
1011
|
|
1012
|
+
# Optional. Interface type of local SSDs (default is "scsi"). Valid values: "
|
1013
|
+
# scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express).
|
1014
|
+
# See SSD Interface types (https://cloud.google.com/compute/docs/disks/local-ssd#
|
1015
|
+
# performance).
|
1016
|
+
# Corresponds to the JSON property `localSsdInterface`
|
1017
|
+
# @return [String]
|
1018
|
+
attr_accessor :local_ssd_interface
|
1019
|
+
|
870
1020
|
# Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not
|
871
1021
|
# attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.
|
872
1022
|
# apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are
|
@@ -884,6 +1034,7 @@ module Google
|
|
884
1034
|
def update!(**args)
|
885
1035
|
@boot_disk_size_gb = args[:boot_disk_size_gb] if args.key?(:boot_disk_size_gb)
|
886
1036
|
@boot_disk_type = args[:boot_disk_type] if args.key?(:boot_disk_type)
|
1037
|
+
@local_ssd_interface = args[:local_ssd_interface] if args.key?(:local_ssd_interface)
|
887
1038
|
@num_local_ssds = args[:num_local_ssds] if args.key?(:num_local_ssds)
|
888
1039
|
end
|
889
1040
|
end
|
@@ -953,6 +1104,74 @@ module Google
|
|
953
1104
|
end
|
954
1105
|
end
|
955
1106
|
|
1107
|
+
# Environment configuration for a workload.
|
1108
|
+
class EnvironmentConfig
|
1109
|
+
include Google::Apis::Core::Hashable
|
1110
|
+
|
1111
|
+
# Execution configuration for a workload.
|
1112
|
+
# Corresponds to the JSON property `executionConfig`
|
1113
|
+
# @return [Google::Apis::DataprocV1::ExecutionConfig]
|
1114
|
+
attr_accessor :execution_config
|
1115
|
+
|
1116
|
+
# Auxiliary services configuration for a workload.
|
1117
|
+
# Corresponds to the JSON property `peripheralsConfig`
|
1118
|
+
# @return [Google::Apis::DataprocV1::PeripheralsConfig]
|
1119
|
+
attr_accessor :peripherals_config
|
1120
|
+
|
1121
|
+
def initialize(**args)
|
1122
|
+
update!(**args)
|
1123
|
+
end
|
1124
|
+
|
1125
|
+
# Update properties of this object
|
1126
|
+
def update!(**args)
|
1127
|
+
@execution_config = args[:execution_config] if args.key?(:execution_config)
|
1128
|
+
@peripherals_config = args[:peripherals_config] if args.key?(:peripherals_config)
|
1129
|
+
end
|
1130
|
+
end
|
1131
|
+
|
1132
|
+
# Execution configuration for a workload.
|
1133
|
+
class ExecutionConfig
|
1134
|
+
include Google::Apis::Core::Hashable
|
1135
|
+
|
1136
|
+
# Optional. The Cloud KMS key to use for encryption.
|
1137
|
+
# Corresponds to the JSON property `kmsKey`
|
1138
|
+
# @return [String]
|
1139
|
+
attr_accessor :kms_key
|
1140
|
+
|
1141
|
+
# Optional. Tags used for network traffic control.
|
1142
|
+
# Corresponds to the JSON property `networkTags`
|
1143
|
+
# @return [Array<String>]
|
1144
|
+
attr_accessor :network_tags
|
1145
|
+
|
1146
|
+
# Optional. Network URI to connect workload to.
|
1147
|
+
# Corresponds to the JSON property `networkUri`
|
1148
|
+
# @return [String]
|
1149
|
+
attr_accessor :network_uri
|
1150
|
+
|
1151
|
+
# Optional. Service account that used to execute workload.
|
1152
|
+
# Corresponds to the JSON property `serviceAccount`
|
1153
|
+
# @return [String]
|
1154
|
+
attr_accessor :service_account
|
1155
|
+
|
1156
|
+
# Optional. Subnetwork URI to connect workload to.
|
1157
|
+
# Corresponds to the JSON property `subnetworkUri`
|
1158
|
+
# @return [String]
|
1159
|
+
attr_accessor :subnetwork_uri
|
1160
|
+
|
1161
|
+
def initialize(**args)
|
1162
|
+
update!(**args)
|
1163
|
+
end
|
1164
|
+
|
1165
|
+
# Update properties of this object
|
1166
|
+
def update!(**args)
|
1167
|
+
@kms_key = args[:kms_key] if args.key?(:kms_key)
|
1168
|
+
@network_tags = args[:network_tags] if args.key?(:network_tags)
|
1169
|
+
@network_uri = args[:network_uri] if args.key?(:network_uri)
|
1170
|
+
@service_account = args[:service_account] if args.key?(:service_account)
|
1171
|
+
@subnetwork_uri = args[:subnetwork_uri] if args.key?(:subnetwork_uri)
|
1172
|
+
end
|
1173
|
+
end
|
1174
|
+
|
956
1175
|
# Represents a textual expression in the Common Expression Language (CEL) syntax.
|
957
1176
|
# CEL is a C-like expression language. The syntax and semantics of CEL are
|
958
1177
|
# documented at https://github.com/google/cel-spec.Example (Comparison): title: "
|
@@ -1164,12 +1383,16 @@ module Google
|
|
1164
1383
|
class GetPolicyOptions
|
1165
1384
|
include Google::Apis::Core::Hashable
|
1166
1385
|
|
1167
|
-
# Optional. The policy
|
1168
|
-
# 3. Requests specifying an invalid value will be
|
1169
|
-
# with any conditional bindings must specify
|
1170
|
-
# conditional bindings may specify any valid
|
1171
|
-
#
|
1172
|
-
#
|
1386
|
+
# Optional. The maximum policy version that will be used to format the policy.
|
1387
|
+
# Valid values are 0, 1, and 3. Requests specifying an invalid value will be
|
1388
|
+
# rejected.Requests for policies with any conditional role bindings must specify
|
1389
|
+
# version 3. Policies with no conditional role bindings may specify any valid
|
1390
|
+
# value or leave the field unset.The policy in the response might use the policy
|
1391
|
+
# version that you specified, or it might use a lower policy version. For
|
1392
|
+
# example, if you specify version 3, but the policy has no conditional role
|
1393
|
+
# bindings, the response uses version 1.To learn which resources support
|
1394
|
+
# conditions in their IAM policies, see the IAM documentation (https://cloud.
|
1395
|
+
# google.com/iam/help/conditions/resource-policies).
|
1173
1396
|
# Corresponds to the JSON property `requestedPolicyVersion`
|
1174
1397
|
# @return [Fixnum]
|
1175
1398
|
attr_accessor :requested_policy_version
|
@@ -1876,14 +2099,19 @@ module Google
|
|
1876
2099
|
# Optional. Maximum number of times per hour a driver may be restarted as a
|
1877
2100
|
# result of driver exiting with non-zero code before job is reported failed.A
|
1878
2101
|
# job may be reported as thrashing if driver exits with non-zero code 4 times
|
1879
|
-
# within 10 minute window.Maximum value is 10.
|
2102
|
+
# within 10 minute window.Maximum value is 10.Note: Currently, this restartable
|
2103
|
+
# job option is not supported in Dataproc workflow template (https://cloud.
|
2104
|
+
# google.com/dataproc/docs/concepts/workflows/using-workflows#
|
2105
|
+
# adding_jobs_to_a_template) jobs.
|
1880
2106
|
# Corresponds to the JSON property `maxFailuresPerHour`
|
1881
2107
|
# @return [Fixnum]
|
1882
2108
|
attr_accessor :max_failures_per_hour
|
1883
2109
|
|
1884
2110
|
# Optional. Maximum number of times in total a driver may be restarted as a
|
1885
2111
|
# result of driver exiting with non-zero code before job is reported failed.
|
1886
|
-
# Maximum value is 240.
|
2112
|
+
# Maximum value is 240.Note: Currently, this restartable job option is not
|
2113
|
+
# supported in Dataproc workflow template (https://cloud.google.com/dataproc/
|
2114
|
+
# docs/concepts/workflows/using-workflows#adding_jobs_to_a_template) jobs.
|
1887
2115
|
# Corresponds to the JSON property `maxFailuresTotal`
|
1888
2116
|
# @return [Fixnum]
|
1889
2117
|
attr_accessor :max_failures_total
|
@@ -2133,6 +2361,32 @@ module Google
|
|
2133
2361
|
end
|
2134
2362
|
end
|
2135
2363
|
|
2364
|
+
# A list of batch workloads.
|
2365
|
+
class ListBatchesResponse
|
2366
|
+
include Google::Apis::Core::Hashable
|
2367
|
+
|
2368
|
+
# The batches from the specified collection.
|
2369
|
+
# Corresponds to the JSON property `batches`
|
2370
|
+
# @return [Array<Google::Apis::DataprocV1::Batch>]
|
2371
|
+
attr_accessor :batches
|
2372
|
+
|
2373
|
+
# A token, which can be sent as page_token to retrieve the next page. If this
|
2374
|
+
# field is omitted, there are no subsequent pages.
|
2375
|
+
# Corresponds to the JSON property `nextPageToken`
|
2376
|
+
# @return [String]
|
2377
|
+
attr_accessor :next_page_token
|
2378
|
+
|
2379
|
+
def initialize(**args)
|
2380
|
+
update!(**args)
|
2381
|
+
end
|
2382
|
+
|
2383
|
+
# Update properties of this object
|
2384
|
+
def update!(**args)
|
2385
|
+
@batches = args[:batches] if args.key?(:batches)
|
2386
|
+
@next_page_token = args[:next_page_token] if args.key?(:next_page_token)
|
2387
|
+
end
|
2388
|
+
end
|
2389
|
+
|
2136
2390
|
# The list of all clusters in a project.
|
2137
2391
|
class ListClustersResponse
|
2138
2392
|
include Google::Apis::Core::Hashable
|
@@ -2619,6 +2873,32 @@ module Google
|
|
2619
2873
|
end
|
2620
2874
|
end
|
2621
2875
|
|
2876
|
+
# Auxiliary services configuration for a workload.
|
2877
|
+
class PeripheralsConfig
|
2878
|
+
include Google::Apis::Core::Hashable
|
2879
|
+
|
2880
|
+
# Optional. Resource name of an existing Dataproc Metastore service.Example:
|
2881
|
+
# projects/[project_id]/locations/[region]/services/[service_id]
|
2882
|
+
# Corresponds to the JSON property `metastoreService`
|
2883
|
+
# @return [String]
|
2884
|
+
attr_accessor :metastore_service
|
2885
|
+
|
2886
|
+
# Spark History Server configuration for the workload.
|
2887
|
+
# Corresponds to the JSON property `sparkHistoryServerConfig`
|
2888
|
+
# @return [Google::Apis::DataprocV1::SparkHistoryServerConfig]
|
2889
|
+
attr_accessor :spark_history_server_config
|
2890
|
+
|
2891
|
+
def initialize(**args)
|
2892
|
+
update!(**args)
|
2893
|
+
end
|
2894
|
+
|
2895
|
+
# Update properties of this object
|
2896
|
+
def update!(**args)
|
2897
|
+
@metastore_service = args[:metastore_service] if args.key?(:metastore_service)
|
2898
|
+
@spark_history_server_config = args[:spark_history_server_config] if args.key?(:spark_history_server_config)
|
2899
|
+
end
|
2900
|
+
end
|
2901
|
+
|
2622
2902
|
# A Dataproc job for running Apache Pig (https://pig.apache.org/) queries on
|
2623
2903
|
# YARN.
|
2624
2904
|
class PigJob
|
@@ -2685,16 +2965,16 @@ module Google
|
|
2685
2965
|
|
2686
2966
|
# An Identity and Access Management (IAM) policy, which specifies access
|
2687
2967
|
# controls for Google Cloud resources.A Policy is a collection of bindings. A
|
2688
|
-
# binding binds one or more members to a single role.
|
2689
|
-
# accounts, service accounts, Google groups, and domains (such as G
|
2690
|
-
# role is a named list of permissions; each role can be an IAM
|
2691
|
-
# or a user-created custom role.For some types of Google Cloud
|
2692
|
-
# binding can also specify a condition, which is a logical
|
2693
|
-
# allows access to a resource only if the expression evaluates
|
2694
|
-
# condition can add constraints based on attributes of the request,
|
2695
|
-
# or both. To learn which resources support conditions in their
|
2696
|
-
# see the IAM documentation (https://cloud.google.com/iam/help/
|
2697
|
-
# resource-policies).JSON example: ` "bindings": [ ` "role": "roles/
|
2968
|
+
# binding binds one or more members, or principals, to a single role. Principals
|
2969
|
+
# can be user accounts, service accounts, Google groups, and domains (such as G
|
2970
|
+
# Suite). A role is a named list of permissions; each role can be an IAM
|
2971
|
+
# predefined role or a user-created custom role.For some types of Google Cloud
|
2972
|
+
# resources, a binding can also specify a condition, which is a logical
|
2973
|
+
# expression that allows access to a resource only if the expression evaluates
|
2974
|
+
# to true. A condition can add constraints based on attributes of the request,
|
2975
|
+
# the resource, or both. To learn which resources support conditions in their
|
2976
|
+
# IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/
|
2977
|
+
# conditions/resource-policies).JSON example: ` "bindings": [ ` "role": "roles/
|
2698
2978
|
# resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "
|
2699
2979
|
# group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@
|
2700
2980
|
# appspot.gserviceaccount.com" ] `, ` "role": "roles/resourcemanager.
|
@@ -2713,13 +2993,14 @@ module Google
|
|
2713
2993
|
class Policy
|
2714
2994
|
include Google::Apis::Core::Hashable
|
2715
2995
|
|
2716
|
-
# Associates a list of members
|
2717
|
-
# that determines how and when the bindings are applied.
|
2718
|
-
# must contain at least one
|
2719
|
-
# 500
|
2720
|
-
# of a
|
2721
|
-
# different roles to user:alice@example.
|
2722
|
-
# you can add another 1,450
|
2996
|
+
# Associates a list of members, or principals, with a role. Optionally, may
|
2997
|
+
# specify a condition that determines how and when the bindings are applied.
|
2998
|
+
# Each of the bindings must contain at least one principal.The bindings in a
|
2999
|
+
# Policy can refer to up to 1,500 principals; up to 250 of these principals can
|
3000
|
+
# be Google groups. Each occurrence of a principal counts towards these limits.
|
3001
|
+
# For example, if the bindings grant 50 different roles to user:alice@example.
|
3002
|
+
# com, and not to any other principal, then you can add another 1,450 principals
|
3003
|
+
# to the bindings in the Policy.
|
2723
3004
|
# Corresponds to the JSON property `bindings`
|
2724
3005
|
# @return [Array<Google::Apis::DataprocV1::Binding>]
|
2725
3006
|
attr_accessor :bindings
|
@@ -2833,6 +3114,63 @@ module Google
|
|
2833
3114
|
end
|
2834
3115
|
end
|
2835
3116
|
|
3117
|
+
# A configuration for running an Apache PySpark (https://spark.apache.org/docs/
|
3118
|
+
# latest/api/python/getting_started/quickstart.html) batch workload.
|
3119
|
+
class PySparkBatch
|
3120
|
+
include Google::Apis::Core::Hashable
|
3121
|
+
|
3122
|
+
# Optional. HCFS URIs of archives to be extracted into the working directory of
|
3123
|
+
# each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
|
3124
|
+
# Corresponds to the JSON property `archiveUris`
|
3125
|
+
# @return [Array<String>]
|
3126
|
+
attr_accessor :archive_uris
|
3127
|
+
|
3128
|
+
# Optional. The arguments to pass to the driver. Do not include arguments that
|
3129
|
+
# can be set as batch properties, such as --conf, since a collision can occur
|
3130
|
+
# that causes an incorrect batch submission.
|
3131
|
+
# Corresponds to the JSON property `args`
|
3132
|
+
# @return [Array<String>]
|
3133
|
+
attr_accessor :args
|
3134
|
+
|
3135
|
+
# Optional. HCFS URIs of files to be placed in the working directory of each
|
3136
|
+
# executor.
|
3137
|
+
# Corresponds to the JSON property `fileUris`
|
3138
|
+
# @return [Array<String>]
|
3139
|
+
attr_accessor :file_uris
|
3140
|
+
|
3141
|
+
# Optional. HCFS URIs of jar files to add to the classpath of the Spark driver
|
3142
|
+
# and tasks.
|
3143
|
+
# Corresponds to the JSON property `jarFileUris`
|
3144
|
+
# @return [Array<String>]
|
3145
|
+
attr_accessor :jar_file_uris
|
3146
|
+
|
3147
|
+
# Required. The HCFS URI of the main Python file to use as the Spark driver.
|
3148
|
+
# Must be a .py file.
|
3149
|
+
# Corresponds to the JSON property `mainPythonFileUri`
|
3150
|
+
# @return [String]
|
3151
|
+
attr_accessor :main_python_file_uri
|
3152
|
+
|
3153
|
+
# Optional. HCFS file URIs of Python files to pass to the PySpark framework.
|
3154
|
+
# Supported file types: .py, .egg, and .zip.
|
3155
|
+
# Corresponds to the JSON property `pythonFileUris`
|
3156
|
+
# @return [Array<String>]
|
3157
|
+
attr_accessor :python_file_uris
|
3158
|
+
|
3159
|
+
def initialize(**args)
|
3160
|
+
update!(**args)
|
3161
|
+
end
|
3162
|
+
|
3163
|
+
# Update properties of this object
|
3164
|
+
def update!(**args)
|
3165
|
+
@archive_uris = args[:archive_uris] if args.key?(:archive_uris)
|
3166
|
+
@args = args[:args] if args.key?(:args)
|
3167
|
+
@file_uris = args[:file_uris] if args.key?(:file_uris)
|
3168
|
+
@jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
|
3169
|
+
@main_python_file_uri = args[:main_python_file_uri] if args.key?(:main_python_file_uri)
|
3170
|
+
@python_file_uris = args[:python_file_uris] if args.key?(:python_file_uris)
|
3171
|
+
end
|
3172
|
+
end
|
3173
|
+
|
2836
3174
|
# A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/
|
2837
3175
|
# python-programming-guide.html) applications on YARN.
|
2838
3176
|
class PySparkJob
|
@@ -3012,6 +3350,72 @@ module Google
|
|
3012
3350
|
end
|
3013
3351
|
end
|
3014
3352
|
|
3353
|
+
# Runtime configuration for a workload.
|
3354
|
+
class RuntimeConfig
|
3355
|
+
include Google::Apis::Core::Hashable
|
3356
|
+
|
3357
|
+
# Optional. Optional custom container image for the job runtime environment. If
|
3358
|
+
# not specified, a default container image will be used.
|
3359
|
+
# Corresponds to the JSON property `containerImage`
|
3360
|
+
# @return [String]
|
3361
|
+
attr_accessor :container_image
|
3362
|
+
|
3363
|
+
# Optional. A mapping of property names to values, which are used to configure
|
3364
|
+
# workload execution.
|
3365
|
+
# Corresponds to the JSON property `properties`
|
3366
|
+
# @return [Hash<String,String>]
|
3367
|
+
attr_accessor :properties
|
3368
|
+
|
3369
|
+
# Optional. Version of the batch runtime.
|
3370
|
+
# Corresponds to the JSON property `version`
|
3371
|
+
# @return [String]
|
3372
|
+
attr_accessor :version
|
3373
|
+
|
3374
|
+
def initialize(**args)
|
3375
|
+
update!(**args)
|
3376
|
+
end
|
3377
|
+
|
3378
|
+
# Update properties of this object
|
3379
|
+
def update!(**args)
|
3380
|
+
@container_image = args[:container_image] if args.key?(:container_image)
|
3381
|
+
@properties = args[:properties] if args.key?(:properties)
|
3382
|
+
@version = args[:version] if args.key?(:version)
|
3383
|
+
end
|
3384
|
+
end
|
3385
|
+
|
3386
|
+
# Runtime information about workload execution.
|
3387
|
+
class RuntimeInfo
|
3388
|
+
include Google::Apis::Core::Hashable
|
3389
|
+
|
3390
|
+
# Output only. A URI pointing to the location of the diagnostics tarball.
|
3391
|
+
# Corresponds to the JSON property `diagnosticOutputUri`
|
3392
|
+
# @return [String]
|
3393
|
+
attr_accessor :diagnostic_output_uri
|
3394
|
+
|
3395
|
+
# Output only. Map of remote access endpoints (such as web interfaces and APIs)
|
3396
|
+
# to their URIs.
|
3397
|
+
# Corresponds to the JSON property `endpoints`
|
3398
|
+
# @return [Hash<String,String>]
|
3399
|
+
attr_accessor :endpoints
|
3400
|
+
|
3401
|
+
# Output only. A URI pointing to the location of the stdout and stderr of the
|
3402
|
+
# workload.
|
3403
|
+
# Corresponds to the JSON property `outputUri`
|
3404
|
+
# @return [String]
|
3405
|
+
attr_accessor :output_uri
|
3406
|
+
|
3407
|
+
def initialize(**args)
|
3408
|
+
update!(**args)
|
3409
|
+
end
|
3410
|
+
|
3411
|
+
# Update properties of this object
|
3412
|
+
def update!(**args)
|
3413
|
+
@diagnostic_output_uri = args[:diagnostic_output_uri] if args.key?(:diagnostic_output_uri)
|
3414
|
+
@endpoints = args[:endpoints] if args.key?(:endpoints)
|
3415
|
+
@output_uri = args[:output_uri] if args.key?(:output_uri)
|
3416
|
+
end
|
3417
|
+
end
|
3418
|
+
|
3015
3419
|
# Security related configuration, including encryption, Kerberos, etc.
|
3016
3420
|
class SecurityConfig
|
3017
3421
|
include Google::Apis::Core::Hashable
|
@@ -3105,16 +3509,16 @@ module Google
|
|
3105
3509
|
|
3106
3510
|
# An Identity and Access Management (IAM) policy, which specifies access
|
3107
3511
|
# controls for Google Cloud resources.A Policy is a collection of bindings. A
|
3108
|
-
# binding binds one or more members to a single role.
|
3109
|
-
# accounts, service accounts, Google groups, and domains (such as G
|
3110
|
-
# role is a named list of permissions; each role can be an IAM
|
3111
|
-
# or a user-created custom role.For some types of Google Cloud
|
3112
|
-
# binding can also specify a condition, which is a logical
|
3113
|
-
# allows access to a resource only if the expression evaluates
|
3114
|
-
# condition can add constraints based on attributes of the request,
|
3115
|
-
# or both. To learn which resources support conditions in their
|
3116
|
-
# see the IAM documentation (https://cloud.google.com/iam/help/
|
3117
|
-
# resource-policies).JSON example: ` "bindings": [ ` "role": "roles/
|
3512
|
+
# binding binds one or more members, or principals, to a single role. Principals
|
3513
|
+
# can be user accounts, service accounts, Google groups, and domains (such as G
|
3514
|
+
# Suite). A role is a named list of permissions; each role can be an IAM
|
3515
|
+
# predefined role or a user-created custom role.For some types of Google Cloud
|
3516
|
+
# resources, a binding can also specify a condition, which is a logical
|
3517
|
+
# expression that allows access to a resource only if the expression evaluates
|
3518
|
+
# to true. A condition can add constraints based on attributes of the request,
|
3519
|
+
# the resource, or both. To learn which resources support conditions in their
|
3520
|
+
# IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/
|
3521
|
+
# conditions/resource-policies).JSON example: ` "bindings": [ ` "role": "roles/
|
3118
3522
|
# resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "
|
3119
3523
|
# group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@
|
3120
3524
|
# appspot.gserviceaccount.com" ] `, ` "role": "roles/resourcemanager.
|
@@ -3222,6 +3626,83 @@ module Google
|
|
3222
3626
|
end
|
3223
3627
|
end
|
3224
3628
|
|
3629
|
+
# A configuration for running an Apache Spark (http://spark.apache.org/) batch
|
3630
|
+
# workload.
|
3631
|
+
class SparkBatch
|
3632
|
+
include Google::Apis::Core::Hashable
|
3633
|
+
|
3634
|
+
# Optional. HCFS URIs of archives to be extracted into the working directory of
|
3635
|
+
# each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
|
3636
|
+
# Corresponds to the JSON property `archiveUris`
|
3637
|
+
# @return [Array<String>]
|
3638
|
+
attr_accessor :archive_uris
|
3639
|
+
|
3640
|
+
# Optional. The arguments to pass to the driver. Do not include arguments that
|
3641
|
+
# can be set as batch properties, such as --conf, since a collision can occur
|
3642
|
+
# that causes an incorrect batch submission.
|
3643
|
+
# Corresponds to the JSON property `args`
|
3644
|
+
# @return [Array<String>]
|
3645
|
+
attr_accessor :args
|
3646
|
+
|
3647
|
+
# Optional. HCFS URIs of files to be placed in the working directory of each
|
3648
|
+
# executor.
|
3649
|
+
# Corresponds to the JSON property `fileUris`
|
3650
|
+
# @return [Array<String>]
|
3651
|
+
attr_accessor :file_uris
|
3652
|
+
|
3653
|
+
# Optional. HCFS URIs of jar files to add to the classpath of the Spark driver
|
3654
|
+
# and tasks.
|
3655
|
+
# Corresponds to the JSON property `jarFileUris`
|
3656
|
+
# @return [Array<String>]
|
3657
|
+
attr_accessor :jar_file_uris
|
3658
|
+
|
3659
|
+
# Optional. The name of the driver main class. The jar file that contains the
|
3660
|
+
# class must be in the classpath or specified in jar_file_uris.
|
3661
|
+
# Corresponds to the JSON property `mainClass`
|
3662
|
+
# @return [String]
|
3663
|
+
attr_accessor :main_class
|
3664
|
+
|
3665
|
+
# Optional. The HCFS URI of the jar file that contains the main class.
|
3666
|
+
# Corresponds to the JSON property `mainJarFileUri`
|
3667
|
+
# @return [String]
|
3668
|
+
attr_accessor :main_jar_file_uri
|
3669
|
+
|
3670
|
+
def initialize(**args)
|
3671
|
+
update!(**args)
|
3672
|
+
end
|
3673
|
+
|
3674
|
+
# Update properties of this object
|
3675
|
+
def update!(**args)
|
3676
|
+
@archive_uris = args[:archive_uris] if args.key?(:archive_uris)
|
3677
|
+
@args = args[:args] if args.key?(:args)
|
3678
|
+
@file_uris = args[:file_uris] if args.key?(:file_uris)
|
3679
|
+
@jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
|
3680
|
+
@main_class = args[:main_class] if args.key?(:main_class)
|
3681
|
+
@main_jar_file_uri = args[:main_jar_file_uri] if args.key?(:main_jar_file_uri)
|
3682
|
+
end
|
3683
|
+
end
|
3684
|
+
|
3685
|
+
# Spark History Server configuration for the workload.
|
3686
|
+
class SparkHistoryServerConfig
|
3687
|
+
include Google::Apis::Core::Hashable
|
3688
|
+
|
3689
|
+
# Optional. Resource name of an existing Dataproc Cluster to act as a Spark
|
3690
|
+
# History Server for the workload.Example: projects/[project_id]/regions/[region]
|
3691
|
+
# /clusters/[cluster_name]
|
3692
|
+
# Corresponds to the JSON property `dataprocCluster`
|
3693
|
+
# @return [String]
|
3694
|
+
attr_accessor :dataproc_cluster
|
3695
|
+
|
3696
|
+
def initialize(**args)
|
3697
|
+
update!(**args)
|
3698
|
+
end
|
3699
|
+
|
3700
|
+
# Update properties of this object
|
3701
|
+
def update!(**args)
|
3702
|
+
@dataproc_cluster = args[:dataproc_cluster] if args.key?(:dataproc_cluster)
|
3703
|
+
end
|
3704
|
+
end
|
3705
|
+
|
3225
3706
|
# A Dataproc job for running Apache Spark (http://spark.apache.org/)
|
3226
3707
|
# applications on YARN.
|
3227
3708
|
class SparkJob
|
@@ -3293,6 +3774,49 @@ module Google
|
|
3293
3774
|
end
|
3294
3775
|
end
|
3295
3776
|
|
3777
|
+
# A configuration for running an Apache SparkR (https://spark.apache.org/docs/
|
3778
|
+
# latest/sparkr.html) batch workload.
|
3779
|
+
class SparkRBatch
|
3780
|
+
include Google::Apis::Core::Hashable
|
3781
|
+
|
3782
|
+
# Optional. HCFS URIs of archives to be extracted into the working directory of
|
3783
|
+
# each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
|
3784
|
+
# Corresponds to the JSON property `archiveUris`
|
3785
|
+
# @return [Array<String>]
|
3786
|
+
attr_accessor :archive_uris
|
3787
|
+
|
3788
|
+
# Optional. The arguments to pass to the Spark driver. Do not include arguments
|
3789
|
+
# that can be set as batch properties, such as --conf, since a collision can
|
3790
|
+
# occur that causes an incorrect batch submission.
|
3791
|
+
# Corresponds to the JSON property `args`
|
3792
|
+
# @return [Array<String>]
|
3793
|
+
attr_accessor :args
|
3794
|
+
|
3795
|
+
# Optional. HCFS URIs of files to be placed in the working directory of each
|
3796
|
+
# executor.
|
3797
|
+
# Corresponds to the JSON property `fileUris`
|
3798
|
+
# @return [Array<String>]
|
3799
|
+
attr_accessor :file_uris
|
3800
|
+
|
3801
|
+
# Required. The HCFS URI of the main R file to use as the driver. Must be a .R
|
3802
|
+
# or .r file.
|
3803
|
+
# Corresponds to the JSON property `mainRFileUri`
|
3804
|
+
# @return [String]
|
3805
|
+
attr_accessor :main_r_file_uri
|
3806
|
+
|
3807
|
+
def initialize(**args)
|
3808
|
+
update!(**args)
|
3809
|
+
end
|
3810
|
+
|
3811
|
+
# Update properties of this object
|
3812
|
+
def update!(**args)
|
3813
|
+
@archive_uris = args[:archive_uris] if args.key?(:archive_uris)
|
3814
|
+
@args = args[:args] if args.key?(:args)
|
3815
|
+
@file_uris = args[:file_uris] if args.key?(:file_uris)
|
3816
|
+
@main_r_file_uri = args[:main_r_file_uri] if args.key?(:main_r_file_uri)
|
3817
|
+
end
|
3818
|
+
end
|
3819
|
+
|
3296
3820
|
# A Dataproc job for running Apache SparkR (https://spark.apache.org/docs/latest/
|
3297
3821
|
# sparkr.html) applications on YARN.
|
3298
3822
|
class SparkRJob
|
@@ -3351,6 +3875,40 @@ module Google
|
|
3351
3875
|
end
|
3352
3876
|
end
|
3353
3877
|
|
3878
|
+
# A configuration for running Apache Spark SQL (http://spark.apache.org/sql/)
|
3879
|
+
# queries as a batch workload.
|
3880
|
+
class SparkSqlBatch
|
3881
|
+
include Google::Apis::Core::Hashable
|
3882
|
+
|
3883
|
+
# Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
|
3884
|
+
# Corresponds to the JSON property `jarFileUris`
|
3885
|
+
# @return [Array<String>]
|
3886
|
+
attr_accessor :jar_file_uris
|
3887
|
+
|
3888
|
+
# Required. The HCFS URI of the script that contains Spark SQL queries to
|
3889
|
+
# execute.
|
3890
|
+
# Corresponds to the JSON property `queryFileUri`
|
3891
|
+
# @return [String]
|
3892
|
+
attr_accessor :query_file_uri
|
3893
|
+
|
3894
|
+
# Optional. Mapping of query variable names to values (equivalent to the Spark
|
3895
|
+
# SQL command: SET name="value";).
|
3896
|
+
# Corresponds to the JSON property `queryVariables`
|
3897
|
+
# @return [Hash<String,String>]
|
3898
|
+
attr_accessor :query_variables
|
3899
|
+
|
3900
|
+
def initialize(**args)
|
3901
|
+
update!(**args)
|
3902
|
+
end
|
3903
|
+
|
3904
|
+
# Update properties of this object
|
3905
|
+
def update!(**args)
|
3906
|
+
@jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
|
3907
|
+
@query_file_uri = args[:query_file_uri] if args.key?(:query_file_uri)
|
3908
|
+
@query_variables = args[:query_variables] if args.key?(:query_variables)
|
3909
|
+
end
|
3910
|
+
end
|
3911
|
+
|
3354
3912
|
# A Dataproc job for running Apache Spark SQL (http://spark.apache.org/sql/)
|
3355
3913
|
# queries.
|
3356
3914
|
class SparkSqlJob
|
@@ -3404,6 +3962,68 @@ module Google
|
|
3404
3962
|
end
|
3405
3963
|
end
|
3406
3964
|
|
3965
|
+
# Basic autoscaling configurations for Spark Standalone.
|
3966
|
+
class SparkStandaloneAutoscalingConfig
|
3967
|
+
include Google::Apis::Core::Hashable
|
3968
|
+
|
3969
|
+
# Required. Timeout for Spark graceful decommissioning of spark workers.
|
3970
|
+
# Specifies the duration to wait for spark worker to complete spark
|
3971
|
+
# decomissioning tasks before forcefully removing workers. Only applicable to
|
3972
|
+
# downscaling operations.Bounds: 0s, 1d.
|
3973
|
+
# Corresponds to the JSON property `gracefulDecommissionTimeout`
|
3974
|
+
# @return [String]
|
3975
|
+
attr_accessor :graceful_decommission_timeout
|
3976
|
+
|
3977
|
+
# Required. Fraction of required executors to remove from Spark Serverless
|
3978
|
+
# clusters. A scale-down factor of 1.0 will result in scaling down so that there
|
3979
|
+
# are no more executors for the Spark Job.(more aggressive scaling). A scale-
|
3980
|
+
# down factor closer to 0 will result in a smaller magnitude of scaling donw (
|
3981
|
+
# less aggressive scaling).Bounds: 0.0, 1.0.
|
3982
|
+
# Corresponds to the JSON property `scaleDownFactor`
|
3983
|
+
# @return [Float]
|
3984
|
+
attr_accessor :scale_down_factor
|
3985
|
+
|
3986
|
+
# Optional. Minimum scale-down threshold as a fraction of total cluster size
|
3987
|
+
# before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1
|
3988
|
+
# means the autoscaler must recommend at least a 2 worker scale-down for the
|
3989
|
+
# cluster to scale. A threshold of 0 means the autoscaler will scale down on any
|
3990
|
+
# recommended change.Bounds: 0.0, 1.0. Default: 0.0.
|
3991
|
+
# Corresponds to the JSON property `scaleDownMinWorkerFraction`
|
3992
|
+
# @return [Float]
|
3993
|
+
attr_accessor :scale_down_min_worker_fraction
|
3994
|
+
|
3995
|
+
# Required. Fraction of required workers to add to Spark Standalone clusters. A
|
3996
|
+
# scale-up factor of 1.0 will result in scaling up so that there are no more
|
3997
|
+
# required workers for the Spark Job (more aggressive scaling). A scale-up
|
3998
|
+
# factor closer to 0 will result in a smaller magnitude of scaling up (less
|
3999
|
+
# aggressive scaling).Bounds: 0.0, 1.0.
|
4000
|
+
# Corresponds to the JSON property `scaleUpFactor`
|
4001
|
+
# @return [Float]
|
4002
|
+
attr_accessor :scale_up_factor
|
4003
|
+
|
4004
|
+
# Optional. Minimum scale-up threshold as a fraction of total cluster size
|
4005
|
+
# before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1
|
4006
|
+
# means the autoscaler must recommend at least a 2-worker scale-up for the
|
4007
|
+
# cluster to scale. A threshold of 0 means the autoscaler will scale up on any
|
4008
|
+
# recommended change.Bounds: 0.0, 1.0. Default: 0.0.
|
4009
|
+
# Corresponds to the JSON property `scaleUpMinWorkerFraction`
|
4010
|
+
# @return [Float]
|
4011
|
+
attr_accessor :scale_up_min_worker_fraction
|
4012
|
+
|
4013
|
+
def initialize(**args)
|
4014
|
+
update!(**args)
|
4015
|
+
end
|
4016
|
+
|
4017
|
+
# Update properties of this object
|
4018
|
+
def update!(**args)
|
4019
|
+
@graceful_decommission_timeout = args[:graceful_decommission_timeout] if args.key?(:graceful_decommission_timeout)
|
4020
|
+
@scale_down_factor = args[:scale_down_factor] if args.key?(:scale_down_factor)
|
4021
|
+
@scale_down_min_worker_fraction = args[:scale_down_min_worker_fraction] if args.key?(:scale_down_min_worker_fraction)
|
4022
|
+
@scale_up_factor = args[:scale_up_factor] if args.key?(:scale_up_factor)
|
4023
|
+
@scale_up_min_worker_fraction = args[:scale_up_min_worker_fraction] if args.key?(:scale_up_min_worker_fraction)
|
4024
|
+
end
|
4025
|
+
end
|
4026
|
+
|
3407
4027
|
# A request to start a cluster.
|
3408
4028
|
class StartClusterRequest
|
3409
4029
|
include Google::Apis::Core::Hashable
|
@@ -3438,6 +4058,37 @@ module Google
|
|
3438
4058
|
end
|
3439
4059
|
end
|
3440
4060
|
|
4061
|
+
# Historical state information.
|
4062
|
+
class StateHistory
|
4063
|
+
include Google::Apis::Core::Hashable
|
4064
|
+
|
4065
|
+
# Output only. The state of the batch at this point in history.
|
4066
|
+
# Corresponds to the JSON property `state`
|
4067
|
+
# @return [String]
|
4068
|
+
attr_accessor :state
|
4069
|
+
|
4070
|
+
# Output only. Details about the state at this point in history.
|
4071
|
+
# Corresponds to the JSON property `stateMessage`
|
4072
|
+
# @return [String]
|
4073
|
+
attr_accessor :state_message
|
4074
|
+
|
4075
|
+
# Output only. The time when the batch entered the historical state.
|
4076
|
+
# Corresponds to the JSON property `stateStartTime`
|
4077
|
+
# @return [String]
|
4078
|
+
attr_accessor :state_start_time
|
4079
|
+
|
4080
|
+
def initialize(**args)
|
4081
|
+
update!(**args)
|
4082
|
+
end
|
4083
|
+
|
4084
|
+
# Update properties of this object
|
4085
|
+
def update!(**args)
|
4086
|
+
@state = args[:state] if args.key?(:state)
|
4087
|
+
@state_message = args[:state_message] if args.key?(:state_message)
|
4088
|
+
@state_start_time = args[:state_start_time] if args.key?(:state_start_time)
|
4089
|
+
end
|
4090
|
+
end
|
4091
|
+
|
3441
4092
|
# The Status type defines a logical error model that is suitable for different
|
3442
4093
|
# programming environments, including REST APIs and RPC APIs. It is used by gRPC
|
3443
4094
|
# (https://github.com/grpc). Each Status message contains three pieces of data:
|