google-api-client 0.9.28 → 0.10.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (138) hide show
  1. checksums.yaml +4 -4
  2. data/.travis.yml +7 -4
  3. data/CHANGELOG.md +10 -0
  4. data/api_names.yaml +36512 -36326
  5. data/generated/google/apis/adexchangebuyer2_v2beta1.rb +1 -1
  6. data/generated/google/apis/adexchangebuyer2_v2beta1/classes.rb +41 -41
  7. data/generated/google/apis/adexchangebuyer2_v2beta1/representations.rb +13 -13
  8. data/generated/google/apis/adexchangebuyer2_v2beta1/service.rb +110 -110
  9. data/generated/google/apis/adsense_v1_4.rb +1 -1
  10. data/generated/google/apis/adsensehost_v4_1.rb +1 -1
  11. data/generated/google/apis/analyticsreporting_v4.rb +3 -3
  12. data/generated/google/apis/analyticsreporting_v4/classes.rb +845 -845
  13. data/generated/google/apis/analyticsreporting_v4/representations.rb +184 -184
  14. data/generated/google/apis/androidenterprise_v1.rb +1 -1
  15. data/generated/google/apis/androidenterprise_v1/classes.rb +5 -5
  16. data/generated/google/apis/androidenterprise_v1/service.rb +3 -1
  17. data/generated/google/apis/appstate_v1.rb +1 -1
  18. data/generated/google/apis/calendar_v3.rb +1 -1
  19. data/generated/google/apis/classroom_v1.rb +22 -25
  20. data/generated/google/apis/classroom_v1/classes.rb +998 -907
  21. data/generated/google/apis/classroom_v1/representations.rb +240 -240
  22. data/generated/google/apis/classroom_v1/service.rb +1269 -1061
  23. data/generated/google/apis/cloudbuild_v1.rb +1 -1
  24. data/generated/google/apis/cloudbuild_v1/classes.rb +322 -322
  25. data/generated/google/apis/cloudbuild_v1/representations.rb +88 -88
  26. data/generated/google/apis/cloudbuild_v1/service.rb +57 -57
  27. data/generated/google/apis/clouddebugger_v2.rb +4 -4
  28. data/generated/google/apis/clouddebugger_v2/classes.rb +311 -311
  29. data/generated/google/apis/clouddebugger_v2/representations.rb +92 -92
  30. data/generated/google/apis/clouddebugger_v2/service.rb +41 -41
  31. data/generated/google/apis/cloudkms_v1beta1.rb +1 -1
  32. data/generated/google/apis/cloudkms_v1beta1/classes.rb +611 -611
  33. data/generated/google/apis/cloudkms_v1beta1/representations.rb +124 -124
  34. data/generated/google/apis/cloudkms_v1beta1/service.rb +254 -248
  35. data/generated/google/apis/cloudresourcemanager_v1.rb +1 -1
  36. data/generated/google/apis/cloudresourcemanager_v1/classes.rb +116 -116
  37. data/generated/google/apis/cloudresourcemanager_v1/representations.rb +28 -28
  38. data/generated/google/apis/cloudresourcemanager_v1/service.rb +257 -257
  39. data/generated/google/apis/cloudresourcemanager_v1beta1.rb +4 -4
  40. data/generated/google/apis/cloudresourcemanager_v1beta1/classes.rb +394 -394
  41. data/generated/google/apis/cloudresourcemanager_v1beta1/representations.rb +79 -79
  42. data/generated/google/apis/cloudresourcemanager_v1beta1/service.rb +269 -269
  43. data/generated/google/apis/cloudtrace_v1.rb +8 -8
  44. data/generated/google/apis/cloudtrace_v1/classes.rb +90 -84
  45. data/generated/google/apis/cloudtrace_v1/representations.rb +22 -22
  46. data/generated/google/apis/cloudtrace_v1/service.rb +40 -35
  47. data/generated/google/apis/compute_beta.rb +1 -1
  48. data/generated/google/apis/compute_beta/classes.rb +406 -31
  49. data/generated/google/apis/compute_beta/representations.rb +154 -0
  50. data/generated/google/apis/compute_beta/service.rb +262 -9
  51. data/generated/google/apis/compute_v1.rb +1 -1
  52. data/generated/google/apis/compute_v1/classes.rb +17 -20
  53. data/generated/google/apis/content_v2.rb +1 -1
  54. data/generated/google/apis/dataflow_v1b3.rb +1 -1
  55. data/generated/google/apis/dataflow_v1b3/classes.rb +3226 -2847
  56. data/generated/google/apis/dataflow_v1b3/representations.rb +825 -677
  57. data/generated/google/apis/dataflow_v1b3/service.rb +175 -175
  58. data/generated/google/apis/dataproc_v1.rb +2 -2
  59. data/generated/google/apis/dataproc_v1/classes.rb +1213 -1136
  60. data/generated/google/apis/dataproc_v1/representations.rb +262 -244
  61. data/generated/google/apis/dataproc_v1/service.rb +298 -243
  62. data/generated/google/apis/datastore_v1.rb +4 -4
  63. data/generated/google/apis/datastore_v1/classes.rb +728 -728
  64. data/generated/google/apis/datastore_v1/representations.rb +167 -167
  65. data/generated/google/apis/datastore_v1/service.rb +68 -68
  66. data/generated/google/apis/doubleclicksearch_v2.rb +1 -1
  67. data/generated/google/apis/drive_v2.rb +1 -1
  68. data/generated/google/apis/drive_v3.rb +1 -1
  69. data/generated/google/apis/games_configuration_v1configuration.rb +1 -1
  70. data/generated/google/apis/games_management_v1management.rb +1 -1
  71. data/generated/google/apis/games_v1.rb +1 -1
  72. data/generated/google/apis/genomics_v1.rb +7 -7
  73. data/generated/google/apis/genomics_v1/classes.rb +932 -932
  74. data/generated/google/apis/genomics_v1/representations.rb +205 -205
  75. data/generated/google/apis/genomics_v1/service.rb +1228 -1228
  76. data/generated/google/apis/gmail_v1.rb +1 -1
  77. data/generated/google/apis/iam_v1/classes.rb +440 -440
  78. data/generated/google/apis/iam_v1/representations.rb +94 -94
  79. data/generated/google/apis/iam_v1/service.rb +60 -60
  80. data/generated/google/apis/identitytoolkit_v3.rb +1 -1
  81. data/generated/google/apis/identitytoolkit_v3/classes.rb +21 -1
  82. data/generated/google/apis/identitytoolkit_v3/representations.rb +3 -0
  83. data/generated/google/apis/kgsearch_v1/classes.rb +6 -6
  84. data/generated/google/apis/kgsearch_v1/representations.rb +1 -1
  85. data/generated/google/apis/kgsearch_v1/service.rb +21 -21
  86. data/generated/google/apis/language_v1beta1.rb +1 -1
  87. data/generated/google/apis/language_v1beta1/classes.rb +232 -232
  88. data/generated/google/apis/language_v1beta1/representations.rb +75 -75
  89. data/generated/google/apis/logging_v2beta1.rb +1 -1
  90. data/generated/google/apis/logging_v2beta1/classes.rb +345 -337
  91. data/generated/google/apis/logging_v2beta1/representations.rb +55 -55
  92. data/generated/google/apis/logging_v2beta1/service.rb +331 -306
  93. data/generated/google/apis/monitoring_v3.rb +1 -1
  94. data/generated/google/apis/monitoring_v3/classes.rb +253 -253
  95. data/generated/google/apis/monitoring_v3/representations.rb +58 -58
  96. data/generated/google/apis/monitoring_v3/service.rb +135 -135
  97. data/generated/google/apis/people_v1.rb +15 -16
  98. data/generated/google/apis/people_v1/classes.rb +985 -855
  99. data/generated/google/apis/people_v1/representations.rb +227 -220
  100. data/generated/google/apis/people_v1/service.rb +58 -52
  101. data/generated/google/apis/plus_domains_v1.rb +1 -1
  102. data/generated/google/apis/plus_v1.rb +1 -1
  103. data/generated/google/apis/pubsub_v1.rb +1 -1
  104. data/generated/google/apis/pubsub_v1/classes.rb +257 -257
  105. data/generated/google/apis/pubsub_v1/representations.rb +83 -83
  106. data/generated/google/apis/pubsub_v1/service.rb +390 -390
  107. data/generated/google/apis/script_v1.rb +17 -17
  108. data/generated/google/apis/script_v1/classes.rb +149 -141
  109. data/generated/google/apis/script_v1/representations.rb +26 -27
  110. data/generated/google/apis/script_v1/service.rb +9 -8
  111. data/generated/google/apis/sheets_v4.rb +1 -1
  112. data/generated/google/apis/sheets_v4/classes.rb +4223 -4188
  113. data/generated/google/apis/sheets_v4/representations.rb +850 -834
  114. data/generated/google/apis/sheets_v4/service.rb +192 -192
  115. data/generated/google/apis/slides_v1.rb +1 -1
  116. data/generated/google/apis/slides_v1/classes.rb +927 -800
  117. data/generated/google/apis/slides_v1/representations.rb +253 -211
  118. data/generated/google/apis/speech_v1beta1.rb +1 -1
  119. data/generated/google/apis/speech_v1beta1/classes.rb +94 -107
  120. data/generated/google/apis/speech_v1beta1/representations.rb +24 -36
  121. data/generated/google/apis/speech_v1beta1/service.rb +51 -54
  122. data/generated/google/apis/storage_v1.rb +1 -1
  123. data/generated/google/apis/vision_v1.rb +1 -1
  124. data/generated/google/apis/vision_v1/classes.rb +888 -885
  125. data/generated/google/apis/vision_v1/representations.rb +139 -139
  126. data/generated/google/apis/youtube_analytics_v1.rb +1 -1
  127. data/generated/google/apis/youtube_analytics_v1/service.rb +5 -1
  128. data/generated/google/apis/youtube_partner_v1.rb +1 -1
  129. data/generated/google/apis/youtubereporting_v1.rb +1 -1
  130. data/generated/google/apis/youtubereporting_v1/classes.rb +80 -80
  131. data/generated/google/apis/youtubereporting_v1/representations.rb +23 -23
  132. data/generated/google/apis/youtubereporting_v1/service.rb +51 -51
  133. data/google-api-client.gemspec +2 -2
  134. data/lib/google/apis/core/api_command.rb +1 -1
  135. data/lib/google/apis/core/json_representation.rb +2 -2
  136. data/lib/google/apis/version.rb +1 -1
  137. data/samples/cli/lib/samples/you_tube.rb +4 -1
  138. metadata +36 -30
@@ -20,12 +20,12 @@ module Google
20
20
  module Apis
21
21
  # Google Cloud Dataproc API
22
22
  #
23
- # An API for managing Hadoop-based clusters and jobs on Google Cloud Platform.
23
+ # Manages Hadoop-based clusters and jobs on Google Cloud Platform.
24
24
  #
25
25
  # @see https://cloud.google.com/dataproc/
26
26
  module DataprocV1
27
27
  VERSION = 'V1'
28
- REVISION = '20161102'
28
+ REVISION = '20170207'
29
29
 
30
30
  # View and manage your data across Google Cloud Platform services
31
31
  AUTH_CLOUD_PLATFORM = 'https://www.googleapis.com/auth/cloud-platform'
@@ -22,58 +22,35 @@ module Google
22
22
  module Apis
23
23
  module DataprocV1
24
24
 
25
- # Describes the identifying information, config, and status of a cluster of
26
- # Google Compute Engine instances.
27
- class Cluster
25
+ # A YARN application created by a job. Application information is a subset of <
26
+ # code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.
27
+ # Beta Feature: This report is available for testing purposes only. It may be
28
+ # changed before final release.
29
+ class YarnApplication
28
30
  include Google::Apis::Core::Hashable
29
31
 
30
- # [Required] The Google Cloud Platform project ID that the cluster belongs to.
31
- # Corresponds to the JSON property `projectId`
32
+ # Required The application name.
33
+ # Corresponds to the JSON property `name`
32
34
  # @return [String]
33
- attr_accessor :project_id
35
+ attr_accessor :name
34
36
 
35
- # [Required] The cluster name. Cluster names within a project must be unique.
36
- # Names of deleted clusters can be reused.
37
- # Corresponds to the JSON property `clusterName`
37
+ # Optional The HTTP URL of the ApplicationMaster, HistoryServer, or
38
+ # TimelineServer that provides application-specific information. The URL uses
39
+ # the internal hostname, and requires a proxy server for resolution and,
40
+ # possibly, access.
41
+ # Corresponds to the JSON property `trackingUrl`
38
42
  # @return [String]
39
- attr_accessor :cluster_name
40
-
41
- # The cluster config.
42
- # Corresponds to the JSON property `config`
43
- # @return [Google::Apis::DataprocV1::ClusterConfig]
44
- attr_accessor :config
45
-
46
- # [Optional] The labels to associate with this cluster. Label **keys** must
47
- # contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.
48
- # org/rfc/rfc1035.txt). Label **values** may be empty, but, if present, must
49
- # contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.
50
- # org/rfc/rfc1035.txt). No more than 32 labels can be associated with a cluster.
51
- # Corresponds to the JSON property `labels`
52
- # @return [Hash<String,String>]
53
- attr_accessor :labels
54
-
55
- # The status of a cluster and its instances.
56
- # Corresponds to the JSON property `status`
57
- # @return [Google::Apis::DataprocV1::ClusterStatus]
58
- attr_accessor :status
43
+ attr_accessor :tracking_url
59
44
 
60
- # [Output-only] The previous cluster status.
61
- # Corresponds to the JSON property `statusHistory`
62
- # @return [Array<Google::Apis::DataprocV1::ClusterStatus>]
63
- attr_accessor :status_history
45
+ # Required The numerical progress of the application, from 1 to 100.
46
+ # Corresponds to the JSON property `progress`
47
+ # @return [Float]
48
+ attr_accessor :progress
64
49
 
65
- # [Output-only] A cluster UUID (Unique Universal Identifier). Cloud Dataproc
66
- # generates this value when it creates the cluster.
67
- # Corresponds to the JSON property `clusterUuid`
50
+ # Required The application state.
51
+ # Corresponds to the JSON property `state`
68
52
  # @return [String]
69
- attr_accessor :cluster_uuid
70
-
71
- # Contains cluster daemon metrics, such as HDFS and YARN stats. **Beta Feature**:
72
- # This report is available for testing purposes only. It may be changed before
73
- # final release.
74
- # Corresponds to the JSON property `metrics`
75
- # @return [Google::Apis::DataprocV1::ClusterMetrics]
76
- attr_accessor :metrics
53
+ attr_accessor :state
77
54
 
78
55
  def initialize(**args)
79
56
  update!(**args)
@@ -81,70 +58,33 @@ module Google
81
58
 
82
59
  # Update properties of this object
83
60
  def update!(**args)
84
- @project_id = args[:project_id] if args.key?(:project_id)
85
- @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
86
- @config = args[:config] if args.key?(:config)
87
- @labels = args[:labels] if args.key?(:labels)
88
- @status = args[:status] if args.key?(:status)
89
- @status_history = args[:status_history] if args.key?(:status_history)
90
- @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
91
- @metrics = args[:metrics] if args.key?(:metrics)
61
+ @name = args[:name] if args.key?(:name)
62
+ @tracking_url = args[:tracking_url] if args.key?(:tracking_url)
63
+ @progress = args[:progress] if args.key?(:progress)
64
+ @state = args[:state] if args.key?(:state)
92
65
  end
93
66
  end
94
67
 
95
- # The cluster config.
96
- class ClusterConfig
68
+ # A list of queries to run on a cluster.
69
+ class QueryList
97
70
  include Google::Apis::Core::Hashable
98
71
 
99
- # [Optional] A Google Cloud Storage staging bucket used for sharing generated
100
- # SSH keys and config. If you do not specify a staging bucket, Cloud Dataproc
101
- # will determine an appropriate Cloud Storage location (US, ASIA, or EU) for
102
- # your cluster's staging bucket according to the Google Compute Engine zone
103
- # where your cluster is deployed, and then it will create and manage this
104
- # project-level, per-location bucket for you.
105
- # Corresponds to the JSON property `configBucket`
106
- # @return [String]
107
- attr_accessor :config_bucket
108
-
109
- # Common config settings for resources of Google Compute Engine cluster
110
- # instances, applicable to all instances in the cluster.
111
- # Corresponds to the JSON property `gceClusterConfig`
112
- # @return [Google::Apis::DataprocV1::GceClusterConfig]
113
- attr_accessor :gce_cluster_config
114
-
115
- # [Optional] The config settings for Google Compute Engine resources in an
116
- # instance group, such as a master or worker group.
117
- # Corresponds to the JSON property `masterConfig`
118
- # @return [Google::Apis::DataprocV1::InstanceGroupConfig]
119
- attr_accessor :master_config
120
-
121
- # [Optional] The config settings for Google Compute Engine resources in an
122
- # instance group, such as a master or worker group.
123
- # Corresponds to the JSON property `workerConfig`
124
- # @return [Google::Apis::DataprocV1::InstanceGroupConfig]
125
- attr_accessor :worker_config
126
-
127
- # [Optional] The config settings for Google Compute Engine resources in an
128
- # instance group, such as a master or worker group.
129
- # Corresponds to the JSON property `secondaryWorkerConfig`
130
- # @return [Google::Apis::DataprocV1::InstanceGroupConfig]
131
- attr_accessor :secondary_worker_config
132
-
133
- # Specifies the selection and config of software inside the cluster.
134
- # Corresponds to the JSON property `softwareConfig`
135
- # @return [Google::Apis::DataprocV1::SoftwareConfig]
136
- attr_accessor :software_config
137
-
138
- # [Optional] Commands to execute on each node after config is completed. By
139
- # default, executables are run on master and all worker nodes. You can test a
140
- # node's role metadata to run an executable on a master or worker node, as shown
141
- # below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:
142
- # Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role)
143
- # if [[ "$`ROLE`" == 'Master' ]]; then ... master specific actions ... else ...
144
- # worker specific actions ... fi
145
- # Corresponds to the JSON property `initializationActions`
146
- # @return [Array<Google::Apis::DataprocV1::NodeInitializationAction>]
147
- attr_accessor :initialization_actions
72
+ # Required The queries to execute. You do not need to terminate a query with a
73
+ # semicolon. Multiple queries can be specified in one string by separating each
74
+ # with a semicolon. Here is an example of an Cloud Dataproc API snippet that
75
+ # uses a QueryList to specify a HiveJob:
76
+ # "hiveJob": `
77
+ # "queryList": `
78
+ # "queries": [
79
+ # "query1",
80
+ # "query2",
81
+ # "query3;query4",
82
+ # ]
83
+ # `
84
+ # `
85
+ # Corresponds to the JSON property `queries`
86
+ # @return [Array<String>]
87
+ attr_accessor :queries
148
88
 
149
89
  def initialize(**args)
150
90
  update!(**args)
@@ -152,80 +92,69 @@ module Google
152
92
 
153
93
  # Update properties of this object
154
94
  def update!(**args)
155
- @config_bucket = args[:config_bucket] if args.key?(:config_bucket)
156
- @gce_cluster_config = args[:gce_cluster_config] if args.key?(:gce_cluster_config)
157
- @master_config = args[:master_config] if args.key?(:master_config)
158
- @worker_config = args[:worker_config] if args.key?(:worker_config)
159
- @secondary_worker_config = args[:secondary_worker_config] if args.key?(:secondary_worker_config)
160
- @software_config = args[:software_config] if args.key?(:software_config)
161
- @initialization_actions = args[:initialization_actions] if args.key?(:initialization_actions)
95
+ @queries = args[:queries] if args.key?(:queries)
162
96
  end
163
97
  end
164
98
 
165
- # Common config settings for resources of Google Compute Engine cluster
166
- # instances, applicable to all instances in the cluster.
167
- class GceClusterConfig
99
+ # A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.
100
+ # apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/
101
+ # MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/
102
+ # docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
103
+ class HadoopJob
168
104
  include Google::Apis::Core::Hashable
169
105
 
170
- # [Required] The zone where the Google Compute Engine cluster will be located.
171
- # Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[
172
- # zone]`.
173
- # Corresponds to the JSON property `zoneUri`
174
- # @return [String]
175
- attr_accessor :zone_uri
106
+ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
107
+ # Corresponds to the JSON property `jarFileUris`
108
+ # @return [Array<String>]
109
+ attr_accessor :jar_file_uris
176
110
 
177
- # [Optional] The Google Compute Engine network to be used for machine
178
- # communications. Cannot be specified with subnetwork_uri. If neither `
179
- # network_uri` nor `subnetwork_uri` is specified, the "default" network of the
180
- # project is used, if it exists. Cannot be a "Custom Subnet Network" (see [Using
181
- # Subnetworks](/compute/docs/subnetworks) for more information). Example: `https:
182
- # //www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`.
183
- # Corresponds to the JSON property `networkUri`
184
- # @return [String]
185
- attr_accessor :network_uri
111
+ # The runtime logging config of the job.
112
+ # Corresponds to the JSON property `loggingConfig`
113
+ # @return [Google::Apis::DataprocV1::LoggingConfig]
114
+ attr_accessor :logging_config
186
115
 
187
- # [Optional] The Google Compute Engine subnetwork to be used for machine
188
- # communications. Cannot be specified with network_uri. Example: `https://www.
189
- # googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0`.
190
- # Corresponds to the JSON property `subnetworkUri`
191
- # @return [String]
192
- attr_accessor :subnetwork_uri
116
+ # Optional A mapping of property names to values, used to configure Hadoop.
117
+ # Properties that conflict with values set by the Cloud Dataproc API may be
118
+ # overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes
119
+ # in user code.
120
+ # Corresponds to the JSON property `properties`
121
+ # @return [Hash<String,String>]
122
+ attr_accessor :properties
193
123
 
194
- # [Optional] If true, all instances in the cluster will only have internal IP
195
- # addresses. By default, clusters are not restricted to internal IP addresses,
196
- # and will have ephemeral external IP addresses assigned to each instance. This `
197
- # internal_ip_only` restriction can only be enabled for subnetwork enabled
198
- # networks, and all off-cluster dependencies must be configured to be accessible
199
- # without external IP addresses.
200
- # Corresponds to the JSON property `internalIpOnly`
201
- # @return [Boolean]
202
- attr_accessor :internal_ip_only
203
- alias_method :internal_ip_only?, :internal_ip_only
124
+ # Optional The arguments to pass to the driver. Do not include arguments, such
125
+ # as -libjars or -Dfoo=bar, that can be set as job properties, since a collision
126
+ # may occur that causes an incorrect job submission.
127
+ # Corresponds to the JSON property `args`
128
+ # @return [Array<String>]
129
+ attr_accessor :args
204
130
 
205
- # [Optional] The URIs of service account scopes to be included in Google Compute
206
- # Engine instances. The following base set of scopes is always included: * https:
207
- # //www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.
208
- # com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write
209
- # If no scopes are specified, the following defaults are also provided: * https:/
210
- # /www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.
211
- # admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.
212
- # googleapis.com/auth/devstorage.full_control
213
- # Corresponds to the JSON property `serviceAccountScopes`
131
+ # Optional HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the
132
+ # working directory of Hadoop drivers and distributed tasks. Useful for naively
133
+ # parallel tasks.
134
+ # Corresponds to the JSON property `fileUris`
214
135
  # @return [Array<String>]
215
- attr_accessor :service_account_scopes
136
+ attr_accessor :file_uris
216
137
 
217
- # The Google Compute Engine tags to add to all instances (see [Tagging instances]
218
- # (/compute/docs/label-or-tag-resources#tags)).
219
- # Corresponds to the JSON property `tags`
138
+ # The name of the driver's main class. The jar file containing the class must be
139
+ # in the default CLASSPATH or specified in jar_file_uris.
140
+ # Corresponds to the JSON property `mainClass`
141
+ # @return [String]
142
+ attr_accessor :main_class
143
+
144
+ # Optional HCFS URIs of archives to be extracted in the working directory of
145
+ # Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .
146
+ # zip.
147
+ # Corresponds to the JSON property `archiveUris`
220
148
  # @return [Array<String>]
221
- attr_accessor :tags
149
+ attr_accessor :archive_uris
222
150
 
223
- # The Google Compute Engine metadata entries to add to all instances (see [
224
- # Project and instance metadata](https://cloud.google.com/compute/docs/storing-
225
- # retrieving-metadata#project_and_instance_metadata)).
226
- # Corresponds to the JSON property `metadata`
227
- # @return [Hash<String,String>]
228
- attr_accessor :metadata
151
+ # The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-
152
+ # bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-
153
+ # samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-
154
+ # mapreduce-examples.jar'
155
+ # Corresponds to the JSON property `mainJarFileUri`
156
+ # @return [String]
157
+ attr_accessor :main_jar_file_uri
229
158
 
230
159
  def initialize(**args)
231
160
  update!(**args)
@@ -233,76 +162,27 @@ module Google
233
162
 
234
163
  # Update properties of this object
235
164
  def update!(**args)
236
- @zone_uri = args[:zone_uri] if args.key?(:zone_uri)
237
- @network_uri = args[:network_uri] if args.key?(:network_uri)
238
- @subnetwork_uri = args[:subnetwork_uri] if args.key?(:subnetwork_uri)
239
- @internal_ip_only = args[:internal_ip_only] if args.key?(:internal_ip_only)
240
- @service_account_scopes = args[:service_account_scopes] if args.key?(:service_account_scopes)
241
- @tags = args[:tags] if args.key?(:tags)
242
- @metadata = args[:metadata] if args.key?(:metadata)
165
+ @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
166
+ @logging_config = args[:logging_config] if args.key?(:logging_config)
167
+ @properties = args[:properties] if args.key?(:properties)
168
+ @args = args[:args] if args.key?(:args)
169
+ @file_uris = args[:file_uris] if args.key?(:file_uris)
170
+ @main_class = args[:main_class] if args.key?(:main_class)
171
+ @archive_uris = args[:archive_uris] if args.key?(:archive_uris)
172
+ @main_jar_file_uri = args[:main_jar_file_uri] if args.key?(:main_jar_file_uri)
243
173
  end
244
174
  end
245
175
 
246
- # [Optional] The config settings for Google Compute Engine resources in an
247
- # instance group, such as a master or worker group.
248
- class InstanceGroupConfig
176
+ # A request to collect cluster diagnostic information.
177
+ class DiagnoseClusterRequest
249
178
  include Google::Apis::Core::Hashable
250
179
 
251
- # [Required] The number of VM instances in the instance group. For master
252
- # instance groups, must be set to 1.
253
- # Corresponds to the JSON property `numInstances`
254
- # @return [Fixnum]
255
- attr_accessor :num_instances
256
-
257
- # [Optional] The list of instance names. Cloud Dataproc derives the names from `
258
- # cluster_name`, `num_instances`, and the instance group if not set by user (
259
- # recommended practice is to let Cloud Dataproc derive the name).
260
- # Corresponds to the JSON property `instanceNames`
261
- # @return [Array<String>]
262
- attr_accessor :instance_names
263
-
264
- # [Output-only] The Google Compute Engine image resource used for cluster
265
- # instances. Inferred from `SoftwareConfig.image_version`.
266
- # Corresponds to the JSON property `imageUri`
267
- # @return [String]
268
- attr_accessor :image_uri
269
-
270
- # [Required] The Google Compute Engine machine type used for cluster instances.
271
- # Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-
272
- # east1-a/machineTypes/n1-standard-2`.
273
- # Corresponds to the JSON property `machineTypeUri`
274
- # @return [String]
275
- attr_accessor :machine_type_uri
276
-
277
- # Specifies the config of disk options for a group of VM instances.
278
- # Corresponds to the JSON property `diskConfig`
279
- # @return [Google::Apis::DataprocV1::DiskConfig]
280
- attr_accessor :disk_config
281
-
282
- # [Optional] Specifies that this instance group contains preemptible instances.
283
- # Corresponds to the JSON property `isPreemptible`
284
- # @return [Boolean]
285
- attr_accessor :is_preemptible
286
- alias_method :is_preemptible?, :is_preemptible
287
-
288
- # Specifies the resources used to actively manage an instance group.
289
- # Corresponds to the JSON property `managedGroupConfig`
290
- # @return [Google::Apis::DataprocV1::ManagedGroupConfig]
291
- attr_accessor :managed_group_config
292
-
293
180
  def initialize(**args)
294
181
  update!(**args)
295
182
  end
296
183
 
297
184
  # Update properties of this object
298
185
  def update!(**args)
299
- @num_instances = args[:num_instances] if args.key?(:num_instances)
300
- @instance_names = args[:instance_names] if args.key?(:instance_names)
301
- @image_uri = args[:image_uri] if args.key?(:image_uri)
302
- @machine_type_uri = args[:machine_type_uri] if args.key?(:machine_type_uri)
303
- @disk_config = args[:disk_config] if args.key?(:disk_config)
304
- @is_preemptible = args[:is_preemptible] if args.key?(:is_preemptible)
305
- @managed_group_config = args[:managed_group_config] if args.key?(:managed_group_config)
306
186
  end
307
187
  end
308
188
 
@@ -310,76 +190,74 @@ module Google
310
190
  class DiskConfig
311
191
  include Google::Apis::Core::Hashable
312
192
 
313
- # [Optional] Size in GB of the boot disk (default is 500GB).
314
- # Corresponds to the JSON property `bootDiskSizeGb`
315
- # @return [Fixnum]
316
- attr_accessor :boot_disk_size_gb
317
-
318
- # [Optional] Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are
319
- # not attached, the boot disk is used to store runtime logs and [HDFS](https://
320
- # hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs
321
- # are attached, this runtime bulk data is spread across them, and the boot disk
193
+ # Optional Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not
194
+ # attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.
195
+ # apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are
196
+ # attached, this runtime bulk data is spread across them, and the boot disk
322
197
  # contains only basic config and installed binaries.
323
198
  # Corresponds to the JSON property `numLocalSsds`
324
199
  # @return [Fixnum]
325
200
  attr_accessor :num_local_ssds
326
201
 
202
+ # Optional Size in GB of the boot disk (default is 500GB).
203
+ # Corresponds to the JSON property `bootDiskSizeGb`
204
+ # @return [Fixnum]
205
+ attr_accessor :boot_disk_size_gb
206
+
327
207
  def initialize(**args)
328
208
  update!(**args)
329
209
  end
330
210
 
331
211
  # Update properties of this object
332
212
  def update!(**args)
333
- @boot_disk_size_gb = args[:boot_disk_size_gb] if args.key?(:boot_disk_size_gb)
334
213
  @num_local_ssds = args[:num_local_ssds] if args.key?(:num_local_ssds)
214
+ @boot_disk_size_gb = args[:boot_disk_size_gb] if args.key?(:boot_disk_size_gb)
335
215
  end
336
216
  end
337
217
 
338
- # Specifies the resources used to actively manage an instance group.
339
- class ManagedGroupConfig
218
+ # Metadata describing the operation.
219
+ class ClusterOperationMetadata
340
220
  include Google::Apis::Core::Hashable
341
221
 
342
- # [Output-only] The name of the Instance Template used for the Managed Instance
343
- # Group.
344
- # Corresponds to the JSON property `instanceTemplateName`
345
- # @return [String]
346
- attr_accessor :instance_template_name
222
+ # Output-only Errors encountered during operation execution.
223
+ # Corresponds to the JSON property `warnings`
224
+ # @return [Array<String>]
225
+ attr_accessor :warnings
347
226
 
348
- # [Output-only] The name of the Instance Group Manager for this group.
349
- # Corresponds to the JSON property `instanceGroupManagerName`
350
- # @return [String]
351
- attr_accessor :instance_group_manager_name
227
+ # Output-only Labels associated with the operation
228
+ # Corresponds to the JSON property `labels`
229
+ # @return [Hash<String,String>]
230
+ attr_accessor :labels
352
231
 
353
- def initialize(**args)
354
- update!(**args)
355
- end
232
+ # The status of the operation.
233
+ # Corresponds to the JSON property `status`
234
+ # @return [Google::Apis::DataprocV1::ClusterOperationStatus]
235
+ attr_accessor :status
356
236
 
357
- # Update properties of this object
358
- def update!(**args)
359
- @instance_template_name = args[:instance_template_name] if args.key?(:instance_template_name)
360
- @instance_group_manager_name = args[:instance_group_manager_name] if args.key?(:instance_group_manager_name)
361
- end
362
- end
237
+ # Output-only The previous operation status.
238
+ # Corresponds to the JSON property `statusHistory`
239
+ # @return [Array<Google::Apis::DataprocV1::ClusterOperationStatus>]
240
+ attr_accessor :status_history
363
241
 
364
- # Specifies the selection and config of software inside the cluster.
365
- class SoftwareConfig
366
- include Google::Apis::Core::Hashable
242
+ # Output-only Name of the cluster for the operation.
243
+ # Corresponds to the JSON property `clusterName`
244
+ # @return [String]
245
+ attr_accessor :cluster_name
367
246
 
368
- # [Optional] The version of software inside the cluster. It must match the
369
- # regular expression `[0-9]+\.[0-9]+`. If unspecified, it defaults to the latest
370
- # version (see [Cloud Dataproc Versioning](/dataproc/versioning)).
371
- # Corresponds to the JSON property `imageVersion`
247
+ # Output-only Cluster UUID for the operation.
248
+ # Corresponds to the JSON property `clusterUuid`
372
249
  # @return [String]
373
- attr_accessor :image_version
250
+ attr_accessor :cluster_uuid
374
251
 
375
- # [Optional] The properties to set on daemon config files. Property keys are
376
- # specified in `prefix:property` format, such as `core:fs.defaultFS`. The
377
- # following are supported prefixes and their mappings: * core: `core-site.xml` *
378
- # hdfs: `hdfs-site.xml` * mapred: `mapred-site.xml` * yarn: `yarn-site.xml` *
379
- # hive: `hive-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf`
380
- # Corresponds to the JSON property `properties`
381
- # @return [Hash<String,String>]
382
- attr_accessor :properties
252
+ # Output-only The operation type.
253
+ # Corresponds to the JSON property `operationType`
254
+ # @return [String]
255
+ attr_accessor :operation_type
256
+
257
+ # Output-only Short description of operation.
258
+ # Corresponds to the JSON property `description`
259
+ # @return [String]
260
+ attr_accessor :description
383
261
 
384
262
  def initialize(**args)
385
263
  update!(**args)
@@ -387,86 +265,78 @@ module Google
387
265
 
388
266
  # Update properties of this object
389
267
  def update!(**args)
390
- @image_version = args[:image_version] if args.key?(:image_version)
391
- @properties = args[:properties] if args.key?(:properties)
268
+ @warnings = args[:warnings] if args.key?(:warnings)
269
+ @labels = args[:labels] if args.key?(:labels)
270
+ @status = args[:status] if args.key?(:status)
271
+ @status_history = args[:status_history] if args.key?(:status_history)
272
+ @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
273
+ @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
274
+ @operation_type = args[:operation_type] if args.key?(:operation_type)
275
+ @description = args[:description] if args.key?(:description)
392
276
  end
393
277
  end
394
278
 
395
- # Specifies an executable to run on a fully configured node and a timeout period
396
- # for executable completion.
397
- class NodeInitializationAction
279
+ # A generic empty message that you can re-use to avoid defining duplicated empty
280
+ # messages in your APIs. A typical example is to use it as the request or the
281
+ # response type of an API method. For instance:
282
+ # service Foo `
283
+ # rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
284
+ # `
285
+ # The JSON representation for Empty is empty JSON object ``.
286
+ class Empty
398
287
  include Google::Apis::Core::Hashable
399
288
 
400
- # [Required] Google Cloud Storage URI of executable file.
401
- # Corresponds to the JSON property `executableFile`
402
- # @return [String]
403
- attr_accessor :executable_file
404
-
405
- # [Optional] Amount of time executable has to complete. Default is 10 minutes.
406
- # Cluster creation fails with an explanatory error message (the name of the
407
- # executable that caused the error and the exceeded timeout period) if the
408
- # executable is not completed at end of the timeout period.
409
- # Corresponds to the JSON property `executionTimeout`
410
- # @return [String]
411
- attr_accessor :execution_timeout
412
-
413
289
  def initialize(**args)
414
290
  update!(**args)
415
291
  end
416
292
 
417
293
  # Update properties of this object
418
294
  def update!(**args)
419
- @executable_file = args[:executable_file] if args.key?(:executable_file)
420
- @execution_timeout = args[:execution_timeout] if args.key?(:execution_timeout)
421
295
  end
422
296
  end
423
297
 
424
- # The status of a cluster and its instances.
425
- class ClusterStatus
298
+ # A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/)
299
+ # queries on YARN.
300
+ class HiveJob
426
301
  include Google::Apis::Core::Hashable
427
302
 
428
- # [Output-only] The cluster's state.
429
- # Corresponds to the JSON property `state`
430
- # @return [String]
431
- attr_accessor :state
432
-
433
- # [Output-only] Optional details of cluster's state.
434
- # Corresponds to the JSON property `detail`
435
- # @return [String]
436
- attr_accessor :detail
303
+ # A list of queries to run on a cluster.
304
+ # Corresponds to the JSON property `queryList`
305
+ # @return [Google::Apis::DataprocV1::QueryList]
306
+ attr_accessor :query_list
437
307
 
438
- # [Output-only] Time when this state was entered.
439
- # Corresponds to the JSON property `stateStartTime`
308
+ # The HCFS URI of the script that contains Hive queries.
309
+ # Corresponds to the JSON property `queryFileUri`
440
310
  # @return [String]
441
- attr_accessor :state_start_time
442
-
443
- def initialize(**args)
444
- update!(**args)
445
- end
446
-
447
- # Update properties of this object
448
- def update!(**args)
449
- @state = args[:state] if args.key?(:state)
450
- @detail = args[:detail] if args.key?(:detail)
451
- @state_start_time = args[:state_start_time] if args.key?(:state_start_time)
452
- end
453
- end
311
+ attr_accessor :query_file_uri
454
312
 
455
- # Contains cluster daemon metrics, such as HDFS and YARN stats. **Beta Feature**:
456
- # This report is available for testing purposes only. It may be changed before
457
- # final release.
458
- class ClusterMetrics
459
- include Google::Apis::Core::Hashable
313
+ # Optional HCFS URIs of jar files to add to the CLASSPATH of the Hive server and
314
+ # Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.
315
+ # Corresponds to the JSON property `jarFileUris`
316
+ # @return [Array<String>]
317
+ attr_accessor :jar_file_uris
460
318
 
461
- # The HDFS metrics.
462
- # Corresponds to the JSON property `hdfsMetrics`
319
+ # Optional Mapping of query variable names to values (equivalent to the Hive
320
+ # command: SET name="value";).
321
+ # Corresponds to the JSON property `scriptVariables`
463
322
  # @return [Hash<String,String>]
464
- attr_accessor :hdfs_metrics
323
+ attr_accessor :script_variables
465
324
 
466
- # The YARN metrics.
467
- # Corresponds to the JSON property `yarnMetrics`
325
+ # Optional A mapping of property names and values, used to configure Hive.
326
+ # Properties that conflict with values set by the Cloud Dataproc API may be
327
+ # overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/
328
+ # hive/conf/hive-site.xml, and classes in user code.
329
+ # Corresponds to the JSON property `properties`
468
330
  # @return [Hash<String,String>]
469
- attr_accessor :yarn_metrics
331
+ attr_accessor :properties
332
+
333
+ # Optional Whether to continue executing queries if a query fails. The default
334
+ # value is false. Setting to true can be useful when executing independent
335
+ # parallel queries.
336
+ # Corresponds to the JSON property `continueOnFailure`
337
+ # @return [Boolean]
338
+ attr_accessor :continue_on_failure
339
+ alias_method :continue_on_failure?, :continue_on_failure
470
340
 
471
341
  def initialize(**args)
472
342
  update!(**args)
@@ -474,84 +344,24 @@ module Google
474
344
 
475
345
  # Update properties of this object
476
346
  def update!(**args)
477
- @hdfs_metrics = args[:hdfs_metrics] if args.key?(:hdfs_metrics)
478
- @yarn_metrics = args[:yarn_metrics] if args.key?(:yarn_metrics)
347
+ @query_list = args[:query_list] if args.key?(:query_list)
348
+ @query_file_uri = args[:query_file_uri] if args.key?(:query_file_uri)
349
+ @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
350
+ @script_variables = args[:script_variables] if args.key?(:script_variables)
351
+ @properties = args[:properties] if args.key?(:properties)
352
+ @continue_on_failure = args[:continue_on_failure] if args.key?(:continue_on_failure)
479
353
  end
480
354
  end
481
355
 
482
- # This resource represents a long-running operation that is the result of a
483
- # network API call.
484
- class Operation
356
+ # The location of diagnostic output.
357
+ class DiagnoseClusterResults
485
358
  include Google::Apis::Core::Hashable
486
359
 
487
- # The server-assigned name, which is only unique within the same service that
488
- # originally returns it. If you use the default HTTP mapping, the `name` should
489
- # have the format of `operations/some/unique/name`.
490
- # Corresponds to the JSON property `name`
360
+ # Output-only The Google Cloud Storage URI of the diagnostic output. The output
361
+ # report is a plain text file with a summary of collected diagnostics.
362
+ # Corresponds to the JSON property `outputUri`
491
363
  # @return [String]
492
- attr_accessor :name
493
-
494
- # Service-specific metadata associated with the operation. It typically contains
495
- # progress information and common metadata such as create time. Some services
496
- # might not provide such metadata. Any method that returns a long-running
497
- # operation should document the metadata type, if any.
498
- # Corresponds to the JSON property `metadata`
499
- # @return [Hash<String,Object>]
500
- attr_accessor :metadata
501
-
502
- # If the value is `false`, it means the operation is still in progress. If true,
503
- # the operation is completed, and either `error` or `response` is available.
504
- # Corresponds to the JSON property `done`
505
- # @return [Boolean]
506
- attr_accessor :done
507
- alias_method :done?, :done
508
-
509
- # The `Status` type defines a logical error model that is suitable for different
510
- # programming environments, including REST APIs and RPC APIs. It is used by [
511
- # gRPC](https://github.com/grpc). The error model is designed to be: - Simple to
512
- # use and understand for most users - Flexible enough to meet unexpected needs #
513
- # Overview The `Status` message contains three pieces of data: error code, error
514
- # message, and error details. The error code should be an enum value of google.
515
- # rpc.Code, but it may accept additional error codes if needed. The error
516
- # message should be a developer-facing English message that helps developers *
517
- # understand* and *resolve* the error. If a localized user-facing error message
518
- # is needed, put the localized message in the error details or localize it in
519
- # the client. The optional error details may contain arbitrary information about
520
- # the error. There is a predefined set of error detail types in the package `
521
- # google.rpc` which can be used for common error conditions. # Language mapping
522
- # The `Status` message is the logical representation of the error model, but it
523
- # is not necessarily the actual wire format. When the `Status` message is
524
- # exposed in different client libraries and different wire protocols, it can be
525
- # mapped differently. For example, it will likely be mapped to some exceptions
526
- # in Java, but more likely mapped to some error codes in C. # Other uses The
527
- # error model and the `Status` message can be used in a variety of environments,
528
- # either with or without APIs, to provide a consistent developer experience
529
- # across different environments. Example uses of this error model include: -
530
- # Partial errors. If a service needs to return partial errors to the client, it
531
- # may embed the `Status` in the normal response to indicate the partial errors. -
532
- # Workflow errors. A typical workflow has multiple steps. Each step may have a `
533
- # Status` message for error reporting purpose. - Batch operations. If a client
534
- # uses batch request and batch response, the `Status` message should be used
535
- # directly inside batch response, one for each error sub-response. -
536
- # Asynchronous operations. If an API call embeds asynchronous operation results
537
- # in its response, the status of those operations should be represented directly
538
- # using the `Status` message. - Logging. If some API errors are stored in logs,
539
- # the message `Status` could be used directly after any stripping needed for
540
- # security/privacy reasons.
541
- # Corresponds to the JSON property `error`
542
- # @return [Google::Apis::DataprocV1::Status]
543
- attr_accessor :error
544
-
545
- # The normal response of the operation in case of success. If the original
546
- # method returns no data on success, such as `Delete`, the response is `google.
547
- # protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`,
548
- # the response should be the resource. For other methods, the response should
549
- # have the type `XxxResponse`, where `Xxx` is the original method name. For
550
- # example, if the original method name is `TakeSnapshot()`, the inferred
551
- # response type is `TakeSnapshotResponse`.
552
- # Corresponds to the JSON property `response`
553
- # @return [Hash<String,Object>]
554
- attr_accessor :response
364
+ attr_accessor :output_uri
555
365
 
556
366
  def initialize(**args)
557
367
  update!(**args)
@@ -559,94 +369,67 @@ module Google
559
369
 
560
370
  # Update properties of this object
561
371
  def update!(**args)
562
- @name = args[:name] if args.key?(:name)
563
- @metadata = args[:metadata] if args.key?(:metadata)
564
- @done = args[:done] if args.key?(:done)
565
- @error = args[:error] if args.key?(:error)
566
- @response = args[:response] if args.key?(:response)
372
+ @output_uri = args[:output_uri] if args.key?(:output_uri)
567
373
  end
568
374
  end
569
375
 
570
- # The `Status` type defines a logical error model that is suitable for different
571
- # programming environments, including REST APIs and RPC APIs. It is used by [
572
- # gRPC](https://github.com/grpc). The error model is designed to be: - Simple to
573
- # use and understand for most users - Flexible enough to meet unexpected needs #
574
- # Overview The `Status` message contains three pieces of data: error code, error
575
- # message, and error details. The error code should be an enum value of google.
576
- # rpc.Code, but it may accept additional error codes if needed. The error
577
- # message should be a developer-facing English message that helps developers *
578
- # understand* and *resolve* the error. If a localized user-facing error message
579
- # is needed, put the localized message in the error details or localize it in
580
- # the client. The optional error details may contain arbitrary information about
581
- # the error. There is a predefined set of error detail types in the package `
582
- # google.rpc` which can be used for common error conditions. # Language mapping
583
- # The `Status` message is the logical representation of the error model, but it
584
- # is not necessarily the actual wire format. When the `Status` message is
585
- # exposed in different client libraries and different wire protocols, it can be
586
- # mapped differently. For example, it will likely be mapped to some exceptions
587
- # in Java, but more likely mapped to some error codes in C. # Other uses The
588
- # error model and the `Status` message can be used in a variety of environments,
589
- # either with or without APIs, to provide a consistent developer experience
590
- # across different environments. Example uses of this error model include: -
591
- # Partial errors. If a service needs to return partial errors to the client, it
592
- # may embed the `Status` in the normal response to indicate the partial errors. -
593
- # Workflow errors. A typical workflow has multiple steps. Each step may have a `
594
- # Status` message for error reporting purpose. - Batch operations. If a client
595
- # uses batch request and batch response, the `Status` message should be used
596
- # directly inside batch response, one for each error sub-response. -
597
- # Asynchronous operations. If an API call embeds asynchronous operation results
598
- # in its response, the status of those operations should be represented directly
599
- # using the `Status` message. - Logging. If some API errors are stored in logs,
600
- # the message `Status` could be used directly after any stripping needed for
601
- # security/privacy reasons.
602
- class Status
376
+ # The cluster config.
377
+ class ClusterConfig
603
378
  include Google::Apis::Core::Hashable
604
379
 
605
- # The status code, which should be an enum value of google.rpc.Code.
606
- # Corresponds to the JSON property `code`
607
- # @return [Fixnum]
608
- attr_accessor :code
609
-
610
- # A developer-facing error message, which should be in English. Any user-facing
611
- # error message should be localized and sent in the google.rpc.Status.details
612
- # field, or localized by the client.
613
- # Corresponds to the JSON property `message`
614
- # @return [String]
615
- attr_accessor :message
616
-
617
- # A list of messages that carry the error details. There will be a common set of
618
- # message types for APIs to use.
619
- # Corresponds to the JSON property `details`
620
- # @return [Array<Hash<String,Object>>]
621
- attr_accessor :details
380
+ # Common config settings for resources of Google Compute Engine cluster
381
+ # instances, applicable to all instances in the cluster.
382
+ # Corresponds to the JSON property `gceClusterConfig`
383
+ # @return [Google::Apis::DataprocV1::GceClusterConfig]
384
+ attr_accessor :gce_cluster_config
622
385
 
623
- def initialize(**args)
624
- update!(**args)
625
- end
386
+ # Specifies the selection and config of software inside the cluster.
387
+ # Corresponds to the JSON property `softwareConfig`
388
+ # @return [Google::Apis::DataprocV1::SoftwareConfig]
389
+ attr_accessor :software_config
626
390
 
627
- # Update properties of this object
628
- def update!(**args)
629
- @code = args[:code] if args.key?(:code)
630
- @message = args[:message] if args.key?(:message)
631
- @details = args[:details] if args.key?(:details)
632
- end
633
- end
391
+ # Optional The config settings for Google Compute Engine resources in an
392
+ # instance group, such as a master or worker group.
393
+ # Corresponds to the JSON property `masterConfig`
394
+ # @return [Google::Apis::DataprocV1::InstanceGroupConfig]
395
+ attr_accessor :master_config
634
396
 
635
- # The list of all clusters in a project.
636
- class ListClustersResponse
637
- include Google::Apis::Core::Hashable
397
+ # Optional The config settings for Google Compute Engine resources in an
398
+ # instance group, such as a master or worker group.
399
+ # Corresponds to the JSON property `secondaryWorkerConfig`
400
+ # @return [Google::Apis::DataprocV1::InstanceGroupConfig]
401
+ attr_accessor :secondary_worker_config
638
402
 
639
- # [Output-only] The clusters in the project.
640
- # Corresponds to the JSON property `clusters`
641
- # @return [Array<Google::Apis::DataprocV1::Cluster>]
642
- attr_accessor :clusters
403
+ # Optional Commands to execute on each node after config is completed. By
404
+ # default, executables are run on master and all worker nodes. You can test a
405
+ # node's <code>role</code> metadata to run an executable on a master or worker
406
+ # node, as shown below using curl (you can also use wget):
407
+ # ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/
408
+ # instance/attributes/dataproc-role)
409
+ # if [[ "$`ROLE`" == 'Master' ]]; then
410
+ # ... master specific actions ...
411
+ # else
412
+ # ... worker specific actions ...
413
+ # fi
414
+ # Corresponds to the JSON property `initializationActions`
415
+ # @return [Array<Google::Apis::DataprocV1::NodeInitializationAction>]
416
+ attr_accessor :initialization_actions
643
417
 
644
- # [Output-only] This token is included in the response if there are more results
645
- # to fetch. To fetch additional results, provide this value as the `page_token`
646
- # in a subsequent ListClustersRequest.
647
- # Corresponds to the JSON property `nextPageToken`
418
+ # Optional A Google Cloud Storage staging bucket used for sharing generated SSH
419
+ # keys and config. If you do not specify a staging bucket, Cloud Dataproc will
420
+ # determine an appropriate Cloud Storage location (US, ASIA, or EU) for your
421
+ # cluster's staging bucket according to the Google Compute Engine zone where
422
+ # your cluster is deployed, and then it will create and manage this project-
423
+ # level, per-location bucket for you.
424
+ # Corresponds to the JSON property `configBucket`
648
425
  # @return [String]
649
- attr_accessor :next_page_token
426
+ attr_accessor :config_bucket
427
+
428
+ # Optional The config settings for Google Compute Engine resources in an
429
+ # instance group, such as a master or worker group.
430
+ # Corresponds to the JSON property `workerConfig`
431
+ # @return [Google::Apis::DataprocV1::InstanceGroupConfig]
432
+ attr_accessor :worker_config
650
433
 
651
434
  def initialize(**args)
652
435
  update!(**args)
@@ -654,32 +437,164 @@ module Google
654
437
 
655
438
  # Update properties of this object
656
439
  def update!(**args)
657
- @clusters = args[:clusters] if args.key?(:clusters)
658
- @next_page_token = args[:next_page_token] if args.key?(:next_page_token)
440
+ @gce_cluster_config = args[:gce_cluster_config] if args.key?(:gce_cluster_config)
441
+ @software_config = args[:software_config] if args.key?(:software_config)
442
+ @master_config = args[:master_config] if args.key?(:master_config)
443
+ @secondary_worker_config = args[:secondary_worker_config] if args.key?(:secondary_worker_config)
444
+ @initialization_actions = args[:initialization_actions] if args.key?(:initialization_actions)
445
+ @config_bucket = args[:config_bucket] if args.key?(:config_bucket)
446
+ @worker_config = args[:worker_config] if args.key?(:worker_config)
659
447
  end
660
448
  end
661
449
 
662
- # A request to collect cluster diagnostic information.
663
- class DiagnoseClusterRequest
450
+ # A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/
451
+ # 0.9.0/python-programming-guide.html) applications on YARN.
452
+ class PySparkJob
664
453
  include Google::Apis::Core::Hashable
665
454
 
666
- def initialize(**args)
667
- update!(**args)
668
- end
455
+ # Required The HCFS URI of the main Python file to use as the driver. Must be a .
456
+ # py file.
457
+ # Corresponds to the JSON property `mainPythonFileUri`
458
+ # @return [String]
459
+ attr_accessor :main_python_file_uri
460
+
461
+ # Optional HCFS URIs of archives to be extracted in the working directory of .
462
+ # jar, .tar, .tar.gz, .tgz, and .zip.
463
+ # Corresponds to the JSON property `archiveUris`
464
+ # @return [Array<String>]
465
+ attr_accessor :archive_uris
466
+
467
+ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Python driver
468
+ # and tasks.
469
+ # Corresponds to the JSON property `jarFileUris`
470
+ # @return [Array<String>]
471
+ attr_accessor :jar_file_uris
472
+
473
+ # The runtime logging config of the job.
474
+ # Corresponds to the JSON property `loggingConfig`
475
+ # @return [Google::Apis::DataprocV1::LoggingConfig]
476
+ attr_accessor :logging_config
477
+
478
+ # Optional A mapping of property names to values, used to configure PySpark.
479
+ # Properties that conflict with values set by the Cloud Dataproc API may be
480
+ # overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf
481
+ # and classes in user code.
482
+ # Corresponds to the JSON property `properties`
483
+ # @return [Hash<String,String>]
484
+ attr_accessor :properties
485
+
486
+ # Optional The arguments to pass to the driver. Do not include arguments, such
487
+ # as --conf, that can be set as job properties, since a collision may occur that
488
+ # causes an incorrect job submission.
489
+ # Corresponds to the JSON property `args`
490
+ # @return [Array<String>]
491
+ attr_accessor :args
492
+
493
+ # Optional HCFS URIs of files to be copied to the working directory of Python
494
+ # drivers and distributed tasks. Useful for naively parallel tasks.
495
+ # Corresponds to the JSON property `fileUris`
496
+ # @return [Array<String>]
497
+ attr_accessor :file_uris
498
+
499
+ # Optional HCFS file URIs of Python files to pass to the PySpark framework.
500
+ # Supported file types: .py, .egg, and .zip.
501
+ # Corresponds to the JSON property `pythonFileUris`
502
+ # @return [Array<String>]
503
+ attr_accessor :python_file_uris
504
+
505
+ def initialize(**args)
506
+ update!(**args)
507
+ end
669
508
 
670
509
  # Update properties of this object
671
510
  def update!(**args)
511
+ @main_python_file_uri = args[:main_python_file_uri] if args.key?(:main_python_file_uri)
512
+ @archive_uris = args[:archive_uris] if args.key?(:archive_uris)
513
+ @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
514
+ @logging_config = args[:logging_config] if args.key?(:logging_config)
515
+ @properties = args[:properties] if args.key?(:properties)
516
+ @args = args[:args] if args.key?(:args)
517
+ @file_uris = args[:file_uris] if args.key?(:file_uris)
518
+ @python_file_uris = args[:python_file_uris] if args.key?(:python_file_uris)
672
519
  end
673
520
  end
674
521
 
675
- # A request to submit a job.
676
- class SubmitJobRequest
522
+ # Common config settings for resources of Google Compute Engine cluster
523
+ # instances, applicable to all instances in the cluster.
524
+ class GceClusterConfig
677
525
  include Google::Apis::Core::Hashable
678
526
 
679
- # A Cloud Dataproc job resource.
680
- # Corresponds to the JSON property `job`
681
- # @return [Google::Apis::DataprocV1::Job]
682
- attr_accessor :job
527
+ # The Google Compute Engine tags to add to all instances (see Tagging instances).
528
+ # Corresponds to the JSON property `tags`
529
+ # @return [Array<String>]
530
+ attr_accessor :tags
531
+
532
+ # Optional The service account of the instances. Defaults to the default Google
533
+ # Compute Engine service account. Custom service accounts need permissions
534
+ # equivalent to the folloing IAM roles:
535
+ # roles/logging.logWriter
536
+ # roles/storage.objectAdmin(see https://cloud.google.com/compute/docs/access/
537
+ # service-accounts#custom_service_accounts for more information). Example: [
538
+ # account_id]@[project_id].iam.gserviceaccount.com
539
+ # Corresponds to the JSON property `serviceAccount`
540
+ # @return [String]
541
+ attr_accessor :service_account
542
+
543
+ # Optional The Google Compute Engine subnetwork to be used for machine
544
+ # communications. Cannot be specified with network_uri. Example: https://www.
545
+ # googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0.
546
+ # Corresponds to the JSON property `subnetworkUri`
547
+ # @return [String]
548
+ attr_accessor :subnetwork_uri
549
+
550
+ # Optional The Google Compute Engine network to be used for machine
551
+ # communications. Cannot be specified with subnetwork_uri. If neither
552
+ # network_uri nor subnetwork_uri is specified, the "default" network of the
553
+ # project is used, if it exists. Cannot be a "Custom Subnet Network" (see Using
554
+ # Subnetworks for more information). Example: https://www.googleapis.com/compute/
555
+ # v1/projects/[project_id]/regions/global/default.
556
+ # Corresponds to the JSON property `networkUri`
557
+ # @return [String]
558
+ attr_accessor :network_uri
559
+
560
+ # Required The zone where the Google Compute Engine cluster will be located.
561
+ # Example: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[
562
+ # zone].
563
+ # Corresponds to the JSON property `zoneUri`
564
+ # @return [String]
565
+ attr_accessor :zone_uri
566
+
567
+ # Optional If true, all instances in the cluster will only have internal IP
568
+ # addresses. By default, clusters are not restricted to internal IP addresses,
569
+ # and will have ephemeral external IP addresses assigned to each instance. This
570
+ # internal_ip_only restriction can only be enabled for subnetwork enabled
571
+ # networks, and all off-cluster dependencies must be configured to be accessible
572
+ # without external IP addresses.
573
+ # Corresponds to the JSON property `internalIpOnly`
574
+ # @return [Boolean]
575
+ attr_accessor :internal_ip_only
576
+ alias_method :internal_ip_only?, :internal_ip_only
577
+
578
+ # The Google Compute Engine metadata entries to add to all instances (see
579
+ # Project and instance metadata (https://cloud.google.com/compute/docs/storing-
580
+ # retrieving-metadata#project_and_instance_metadata)).
581
+ # Corresponds to the JSON property `metadata`
582
+ # @return [Hash<String,String>]
583
+ attr_accessor :metadata
584
+
585
+ # Optional The URIs of service account scopes to be included in Google Compute
586
+ # Engine instances. The following base set of scopes is always included:
587
+ # https://www.googleapis.com/auth/cloud.useraccounts.readonly
588
+ # https://www.googleapis.com/auth/devstorage.read_write
589
+ # https://www.googleapis.com/auth/logging.writeIf no scopes are specified, the
590
+ # following defaults are also provided:
591
+ # https://www.googleapis.com/auth/bigquery
592
+ # https://www.googleapis.com/auth/bigtable.admin.table
593
+ # https://www.googleapis.com/auth/bigtable.data
594
+ # https://www.googleapis.com/auth/devstorage.full_control
595
+ # Corresponds to the JSON property `serviceAccountScopes`
596
+ # @return [Array<String>]
597
+ attr_accessor :service_account_scopes
683
598
 
684
599
  def initialize(**args)
685
600
  update!(**args)
@@ -687,100 +602,198 @@ module Google
687
602
 
688
603
  # Update properties of this object
689
604
  def update!(**args)
690
- @job = args[:job] if args.key?(:job)
605
+ @tags = args[:tags] if args.key?(:tags)
606
+ @service_account = args[:service_account] if args.key?(:service_account)
607
+ @subnetwork_uri = args[:subnetwork_uri] if args.key?(:subnetwork_uri)
608
+ @network_uri = args[:network_uri] if args.key?(:network_uri)
609
+ @zone_uri = args[:zone_uri] if args.key?(:zone_uri)
610
+ @internal_ip_only = args[:internal_ip_only] if args.key?(:internal_ip_only)
611
+ @metadata = args[:metadata] if args.key?(:metadata)
612
+ @service_account_scopes = args[:service_account_scopes] if args.key?(:service_account_scopes)
691
613
  end
692
614
  end
693
615
 
694
- # A Cloud Dataproc job resource.
695
- class Job
616
+ # Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature:
617
+ # This report is available for testing purposes only. It may be changed before
618
+ # final release.
619
+ class ClusterMetrics
696
620
  include Google::Apis::Core::Hashable
697
621
 
698
- # Encapsulates the full scoping used to reference a job.
699
- # Corresponds to the JSON property `reference`
700
- # @return [Google::Apis::DataprocV1::JobReference]
701
- attr_accessor :reference
622
+ # The HDFS metrics.
623
+ # Corresponds to the JSON property `hdfsMetrics`
624
+ # @return [Hash<String,String>]
625
+ attr_accessor :hdfs_metrics
702
626
 
703
- # Cloud Dataproc job config.
704
- # Corresponds to the JSON property `placement`
705
- # @return [Google::Apis::DataprocV1::JobPlacement]
706
- attr_accessor :placement
627
+ # The YARN metrics.
628
+ # Corresponds to the JSON property `yarnMetrics`
629
+ # @return [Hash<String,String>]
630
+ attr_accessor :yarn_metrics
707
631
 
708
- # A Cloud Dataproc job for running [Apache Hadoop MapReduce](https://hadoop.
709
- # apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/
710
- # MapReduceTutorial.html) jobs on [Apache Hadoop YARN](https://hadoop.apache.org/
711
- # docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
712
- # Corresponds to the JSON property `hadoopJob`
713
- # @return [Google::Apis::DataprocV1::HadoopJob]
714
- attr_accessor :hadoop_job
632
+ def initialize(**args)
633
+ update!(**args)
634
+ end
715
635
 
716
- # A Cloud Dataproc job for running [Apache Spark](http://spark.apache.org/)
717
- # applications on YARN.
718
- # Corresponds to the JSON property `sparkJob`
719
- # @return [Google::Apis::DataprocV1::SparkJob]
720
- attr_accessor :spark_job
636
+ # Update properties of this object
637
+ def update!(**args)
638
+ @hdfs_metrics = args[:hdfs_metrics] if args.key?(:hdfs_metrics)
639
+ @yarn_metrics = args[:yarn_metrics] if args.key?(:yarn_metrics)
640
+ end
641
+ end
721
642
 
722
- # A Cloud Dataproc job for running [Apache PySpark](https://spark.apache.org/
723
- # docs/0.9.0/python-programming-guide.html) applications on YARN.
724
- # Corresponds to the JSON property `pysparkJob`
725
- # @return [Google::Apis::DataprocV1::PySparkJob]
726
- attr_accessor :pyspark_job
643
+ # The runtime logging config of the job.
644
+ class LoggingConfig
645
+ include Google::Apis::Core::Hashable
727
646
 
728
- # A Cloud Dataproc job for running [Apache Hive](https://hive.apache.org/)
729
- # queries on YARN.
730
- # Corresponds to the JSON property `hiveJob`
731
- # @return [Google::Apis::DataprocV1::HiveJob]
732
- attr_accessor :hive_job
647
+ # The per-package log levels for the driver. This may include "root" package
648
+ # name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', '
649
+ # org.apache = DEBUG'
650
+ # Corresponds to the JSON property `driverLogLevels`
651
+ # @return [Hash<String,String>]
652
+ attr_accessor :driver_log_levels
733
653
 
734
- # A Cloud Dataproc job for running [Apache Pig](https://pig.apache.org/) queries
735
- # on YARN.
736
- # Corresponds to the JSON property `pigJob`
737
- # @return [Google::Apis::DataprocV1::PigJob]
738
- attr_accessor :pig_job
654
+ def initialize(**args)
655
+ update!(**args)
656
+ end
739
657
 
740
- # A Cloud Dataproc job for running [Apache Spark SQL](http://spark.apache.org/
741
- # sql/) queries.
742
- # Corresponds to the JSON property `sparkSqlJob`
743
- # @return [Google::Apis::DataprocV1::SparkSqlJob]
744
- attr_accessor :spark_sql_job
658
+ # Update properties of this object
659
+ def update!(**args)
660
+ @driver_log_levels = args[:driver_log_levels] if args.key?(:driver_log_levels)
661
+ end
662
+ end
745
663
 
746
- # Cloud Dataproc job status.
747
- # Corresponds to the JSON property `status`
748
- # @return [Google::Apis::DataprocV1::JobStatus]
749
- attr_accessor :status
664
+ # The location where output from diagnostic command can be found.
665
+ class DiagnoseClusterOutputLocation
666
+ include Google::Apis::Core::Hashable
750
667
 
751
- # [Output-only] The previous job status.
752
- # Corresponds to the JSON property `statusHistory`
753
- # @return [Array<Google::Apis::DataprocV1::JobStatus>]
754
- attr_accessor :status_history
668
+ # Output-only The Google Cloud Storage URI of the diagnostic output. This will
669
+ # be a plain text file with summary of collected diagnostics.
670
+ # Corresponds to the JSON property `outputUri`
671
+ # @return [String]
672
+ attr_accessor :output_uri
755
673
 
756
- # [Output-only] The collection of YARN applications spun up by this job. **Beta**
757
- # Feature: This report is available for testing purposes only. It may be
758
- # changed before final release.
759
- # Corresponds to the JSON property `yarnApplications`
760
- # @return [Array<Google::Apis::DataprocV1::YarnApplication>]
761
- attr_accessor :yarn_applications
674
+ def initialize(**args)
675
+ update!(**args)
676
+ end
762
677
 
763
- # [Output-only] A URI pointing to the location of the stdout of the job's driver
764
- # program.
765
- # Corresponds to the JSON property `driverOutputResourceUri`
678
+ # Update properties of this object
679
+ def update!(**args)
680
+ @output_uri = args[:output_uri] if args.key?(:output_uri)
681
+ end
682
+ end
683
+
684
+ # This resource represents a long-running operation that is the result of a
685
+ # network API call.
686
+ class Operation
687
+ include Google::Apis::Core::Hashable
688
+
689
+ # If the value is false, it means the operation is still in progress. If true,
690
+ # the operation is completed, and either error or response is available.
691
+ # Corresponds to the JSON property `done`
692
+ # @return [Boolean]
693
+ attr_accessor :done
694
+ alias_method :done?, :done
695
+
696
+ # The normal response of the operation in case of success. If the original
697
+ # method returns no data on success, such as Delete, the response is google.
698
+ # protobuf.Empty. If the original method is standard Get/Create/Update, the
699
+ # response should be the resource. For other methods, the response should have
700
+ # the type XxxResponse, where Xxx is the original method name. For example, if
701
+ # the original method name is TakeSnapshot(), the inferred response type is
702
+ # TakeSnapshotResponse.
703
+ # Corresponds to the JSON property `response`
704
+ # @return [Hash<String,Object>]
705
+ attr_accessor :response
706
+
707
+ # The server-assigned name, which is only unique within the same service that
708
+ # originally returns it. If you use the default HTTP mapping, the name should
709
+ # have the format of operations/some/unique/name.
710
+ # Corresponds to the JSON property `name`
766
711
  # @return [String]
767
- attr_accessor :driver_output_resource_uri
712
+ attr_accessor :name
768
713
 
769
- # [Output-only] If present, the location of miscellaneous control files which
770
- # may be used as part of job setup and handling. If not present, control files
771
- # may be placed in the same location as `driver_output_uri`.
772
- # Corresponds to the JSON property `driverControlFilesUri`
714
+ # The Status type defines a logical error model that is suitable for different
715
+ # programming environments, including REST APIs and RPC APIs. It is used by gRPC
716
+ # (https://github.com/grpc). The error model is designed to be:
717
+ # Simple to use and understand for most users
718
+ # Flexible enough to meet unexpected needsOverviewThe Status message contains
719
+ # three pieces of data: error code, error message, and error details. The error
720
+ # code should be an enum value of google.rpc.Code, but it may accept additional
721
+ # error codes if needed. The error message should be a developer-facing English
722
+ # message that helps developers understand and resolve the error. If a localized
723
+ # user-facing error message is needed, put the localized message in the error
724
+ # details or localize it in the client. The optional error details may contain
725
+ # arbitrary information about the error. There is a predefined set of error
726
+ # detail types in the package google.rpc which can be used for common error
727
+ # conditions.Language mappingThe Status message is the logical representation of
728
+ # the error model, but it is not necessarily the actual wire format. When the
729
+ # Status message is exposed in different client libraries and different wire
730
+ # protocols, it can be mapped differently. For example, it will likely be mapped
731
+ # to some exceptions in Java, but more likely mapped to some error codes in C.
732
+ # Other usesThe error model and the Status message can be used in a variety of
733
+ # environments, either with or without APIs, to provide a consistent developer
734
+ # experience across different environments.Example uses of this error model
735
+ # include:
736
+ # Partial errors. If a service needs to return partial errors to the client, it
737
+ # may embed the Status in the normal response to indicate the partial errors.
738
+ # Workflow errors. A typical workflow has multiple steps. Each step may have a
739
+ # Status message for error reporting purpose.
740
+ # Batch operations. If a client uses batch request and batch response, the
741
+ # Status message should be used directly inside batch response, one for each
742
+ # error sub-response.
743
+ # Asynchronous operations. If an API call embeds asynchronous operation results
744
+ # in its response, the status of those operations should be represented directly
745
+ # using the Status message.
746
+ # Logging. If some API errors are stored in logs, the message Status could be
747
+ # used directly after any stripping needed for security/privacy reasons.
748
+ # Corresponds to the JSON property `error`
749
+ # @return [Google::Apis::DataprocV1::Status]
750
+ attr_accessor :error
751
+
752
+ # Service-specific metadata associated with the operation. It typically contains
753
+ # progress information and common metadata such as create time. Some services
754
+ # might not provide such metadata. Any method that returns a long-running
755
+ # operation should document the metadata type, if any.
756
+ # Corresponds to the JSON property `metadata`
757
+ # @return [Hash<String,Object>]
758
+ attr_accessor :metadata
759
+
760
+ def initialize(**args)
761
+ update!(**args)
762
+ end
763
+
764
+ # Update properties of this object
765
+ def update!(**args)
766
+ @done = args[:done] if args.key?(:done)
767
+ @response = args[:response] if args.key?(:response)
768
+ @name = args[:name] if args.key?(:name)
769
+ @error = args[:error] if args.key?(:error)
770
+ @metadata = args[:metadata] if args.key?(:metadata)
771
+ end
772
+ end
773
+
774
+ # The status of the operation.
775
+ class OperationStatus
776
+ include Google::Apis::Core::Hashable
777
+
778
+ # A message containing the operation state.
779
+ # Corresponds to the JSON property `state`
773
780
  # @return [String]
774
- attr_accessor :driver_control_files_uri
781
+ attr_accessor :state
775
782
 
776
- # [Optional] The labels to associate with this job. Label **keys** must contain
777
- # 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/
778
- # rfc1035.txt). Label **values** may be empty, but, if present, must contain 1
779
- # to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/
780
- # rfc1035.txt). No more than 32 labels can be associated with a job.
781
- # Corresponds to the JSON property `labels`
782
- # @return [Hash<String,String>]
783
- attr_accessor :labels
783
+ # A message containing any operation metadata details.
784
+ # Corresponds to the JSON property `details`
785
+ # @return [String]
786
+ attr_accessor :details
787
+
788
+ # A message containing the detailed operation state.
789
+ # Corresponds to the JSON property `innerState`
790
+ # @return [String]
791
+ attr_accessor :inner_state
792
+
793
+ # The time this state was entered.
794
+ # Corresponds to the JSON property `stateStartTime`
795
+ # @return [String]
796
+ attr_accessor :state_start_time
784
797
 
785
798
  def initialize(**args)
786
799
  update!(**args)
@@ -788,20 +801,10 @@ module Google
788
801
 
789
802
  # Update properties of this object
790
803
  def update!(**args)
791
- @reference = args[:reference] if args.key?(:reference)
792
- @placement = args[:placement] if args.key?(:placement)
793
- @hadoop_job = args[:hadoop_job] if args.key?(:hadoop_job)
794
- @spark_job = args[:spark_job] if args.key?(:spark_job)
795
- @pyspark_job = args[:pyspark_job] if args.key?(:pyspark_job)
796
- @hive_job = args[:hive_job] if args.key?(:hive_job)
797
- @pig_job = args[:pig_job] if args.key?(:pig_job)
798
- @spark_sql_job = args[:spark_sql_job] if args.key?(:spark_sql_job)
799
- @status = args[:status] if args.key?(:status)
800
- @status_history = args[:status_history] if args.key?(:status_history)
801
- @yarn_applications = args[:yarn_applications] if args.key?(:yarn_applications)
802
- @driver_output_resource_uri = args[:driver_output_resource_uri] if args.key?(:driver_output_resource_uri)
803
- @driver_control_files_uri = args[:driver_control_files_uri] if args.key?(:driver_control_files_uri)
804
- @labels = args[:labels] if args.key?(:labels)
804
+ @state = args[:state] if args.key?(:state)
805
+ @details = args[:details] if args.key?(:details)
806
+ @inner_state = args[:inner_state] if args.key?(:inner_state)
807
+ @state_start_time = args[:state_start_time] if args.key?(:state_start_time)
805
808
  end
806
809
  end
807
810
 
@@ -809,12 +812,12 @@ module Google
809
812
  class JobReference
810
813
  include Google::Apis::Core::Hashable
811
814
 
812
- # [Required] The ID of the Google Cloud Platform project that the job belongs to.
815
+ # Required The ID of the Google Cloud Platform project that the job belongs to.
813
816
  # Corresponds to the JSON property `projectId`
814
817
  # @return [String]
815
818
  attr_accessor :project_id
816
819
 
817
- # [Optional] The job ID, which must be unique within the project. The job ID is
820
+ # Optional The job ID, which must be unique within the project. The job ID is
818
821
  # generated by the server upon job submission or provided by the user as a means
819
822
  # to perform retries without creating duplicate jobs. The ID must contain only
820
823
  # letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The
@@ -834,20 +837,79 @@ module Google
834
837
  end
835
838
  end
836
839
 
837
- # Cloud Dataproc job config.
838
- class JobPlacement
840
+ # A request to submit a job.
841
+ class SubmitJobRequest
839
842
  include Google::Apis::Core::Hashable
840
843
 
841
- # [Required] The name of the cluster where the job will be submitted.
842
- # Corresponds to the JSON property `clusterName`
843
- # @return [String]
844
- attr_accessor :cluster_name
844
+ # A Cloud Dataproc job resource.
845
+ # Corresponds to the JSON property `job`
846
+ # @return [Google::Apis::DataprocV1::Job]
847
+ attr_accessor :job
845
848
 
846
- # [Output-only] A cluster UUID generated by the Cloud Dataproc service when the
847
- # job is submitted.
848
- # Corresponds to the JSON property `clusterUuid`
849
+ def initialize(**args)
850
+ update!(**args)
851
+ end
852
+
853
+ # Update properties of this object
854
+ def update!(**args)
855
+ @job = args[:job] if args.key?(:job)
856
+ end
857
+ end
858
+
859
+ # The Status type defines a logical error model that is suitable for different
860
+ # programming environments, including REST APIs and RPC APIs. It is used by gRPC
861
+ # (https://github.com/grpc). The error model is designed to be:
862
+ # Simple to use and understand for most users
863
+ # Flexible enough to meet unexpected needsOverviewThe Status message contains
864
+ # three pieces of data: error code, error message, and error details. The error
865
+ # code should be an enum value of google.rpc.Code, but it may accept additional
866
+ # error codes if needed. The error message should be a developer-facing English
867
+ # message that helps developers understand and resolve the error. If a localized
868
+ # user-facing error message is needed, put the localized message in the error
869
+ # details or localize it in the client. The optional error details may contain
870
+ # arbitrary information about the error. There is a predefined set of error
871
+ # detail types in the package google.rpc which can be used for common error
872
+ # conditions.Language mappingThe Status message is the logical representation of
873
+ # the error model, but it is not necessarily the actual wire format. When the
874
+ # Status message is exposed in different client libraries and different wire
875
+ # protocols, it can be mapped differently. For example, it will likely be mapped
876
+ # to some exceptions in Java, but more likely mapped to some error codes in C.
877
+ # Other usesThe error model and the Status message can be used in a variety of
878
+ # environments, either with or without APIs, to provide a consistent developer
879
+ # experience across different environments.Example uses of this error model
880
+ # include:
881
+ # Partial errors. If a service needs to return partial errors to the client, it
882
+ # may embed the Status in the normal response to indicate the partial errors.
883
+ # Workflow errors. A typical workflow has multiple steps. Each step may have a
884
+ # Status message for error reporting purpose.
885
+ # Batch operations. If a client uses batch request and batch response, the
886
+ # Status message should be used directly inside batch response, one for each
887
+ # error sub-response.
888
+ # Asynchronous operations. If an API call embeds asynchronous operation results
889
+ # in its response, the status of those operations should be represented directly
890
+ # using the Status message.
891
+ # Logging. If some API errors are stored in logs, the message Status could be
892
+ # used directly after any stripping needed for security/privacy reasons.
893
+ class Status
894
+ include Google::Apis::Core::Hashable
895
+
896
+ # The status code, which should be an enum value of google.rpc.Code.
897
+ # Corresponds to the JSON property `code`
898
+ # @return [Fixnum]
899
+ attr_accessor :code
900
+
901
+ # A developer-facing error message, which should be in English. Any user-facing
902
+ # error message should be localized and sent in the google.rpc.Status.details
903
+ # field, or localized by the client.
904
+ # Corresponds to the JSON property `message`
849
905
  # @return [String]
850
- attr_accessor :cluster_uuid
906
+ attr_accessor :message
907
+
908
+ # A list of messages that carry the error details. There will be a common set of
909
+ # message types for APIs to use.
910
+ # Corresponds to the JSON property `details`
911
+ # @return [Array<Hash<String,Object>>]
912
+ attr_accessor :details
851
913
 
852
914
  def initialize(**args)
853
915
  update!(**args)
@@ -855,71 +917,142 @@ module Google
855
917
 
856
918
  # Update properties of this object
857
919
  def update!(**args)
858
- @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
859
- @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
920
+ @code = args[:code] if args.key?(:code)
921
+ @message = args[:message] if args.key?(:message)
922
+ @details = args[:details] if args.key?(:details)
860
923
  end
861
924
  end
862
925
 
863
- # A Cloud Dataproc job for running [Apache Hadoop MapReduce](https://hadoop.
864
- # apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/
865
- # MapReduceTutorial.html) jobs on [Apache Hadoop YARN](https://hadoop.apache.org/
866
- # docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
867
- class HadoopJob
926
+ # Job scheduling options.Beta Feature: These options are available for testing
927
+ # purposes only. They may be changed before final release.
928
+ class JobScheduling
868
929
  include Google::Apis::Core::Hashable
869
930
 
870
- # The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-
871
- # bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-
872
- # samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-
873
- # mapreduce-examples.jar'
874
- # Corresponds to the JSON property `mainJarFileUri`
931
+ # Optional Maximum number of times per hour a driver may be restarted as a
932
+ # result of driver terminating with non-zero code before job is reported failed.
933
+ # A job may be reported as thrashing if driver exits with non-zero code 4 times
934
+ # within 10 minute window.Maximum value is 10.
935
+ # Corresponds to the JSON property `maxFailuresPerHour`
936
+ # @return [Fixnum]
937
+ attr_accessor :max_failures_per_hour
938
+
939
+ def initialize(**args)
940
+ update!(**args)
941
+ end
942
+
943
+ # Update properties of this object
944
+ def update!(**args)
945
+ @max_failures_per_hour = args[:max_failures_per_hour] if args.key?(:max_failures_per_hour)
946
+ end
947
+ end
948
+
949
+ # Optional The config settings for Google Compute Engine resources in an
950
+ # instance group, such as a master or worker group.
951
+ class InstanceGroupConfig
952
+ include Google::Apis::Core::Hashable
953
+
954
+ # Specifies the config of disk options for a group of VM instances.
955
+ # Corresponds to the JSON property `diskConfig`
956
+ # @return [Google::Apis::DataprocV1::DiskConfig]
957
+ attr_accessor :disk_config
958
+
959
+ # Required The Google Compute Engine machine type used for cluster instances.
960
+ # Example: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-
961
+ # east1-a/machineTypes/n1-standard-2.
962
+ # Corresponds to the JSON property `machineTypeUri`
875
963
  # @return [String]
876
- attr_accessor :main_jar_file_uri
964
+ attr_accessor :machine_type_uri
877
965
 
878
- # The name of the driver's main class. The jar file containing the class must be
879
- # in the default CLASSPATH or specified in `jar_file_uris`.
880
- # Corresponds to the JSON property `mainClass`
966
+ # Specifies the resources used to actively manage an instance group.
967
+ # Corresponds to the JSON property `managedGroupConfig`
968
+ # @return [Google::Apis::DataprocV1::ManagedGroupConfig]
969
+ attr_accessor :managed_group_config
970
+
971
+ # Optional Specifies that this instance group contains preemptible instances.
972
+ # Corresponds to the JSON property `isPreemptible`
973
+ # @return [Boolean]
974
+ attr_accessor :is_preemptible
975
+ alias_method :is_preemptible?, :is_preemptible
976
+
977
+ # Output-only The Google Compute Engine image resource used for cluster
978
+ # instances. Inferred from SoftwareConfig.image_version.
979
+ # Corresponds to the JSON property `imageUri`
881
980
  # @return [String]
882
- attr_accessor :main_class
981
+ attr_accessor :image_uri
883
982
 
884
- # [Optional] The arguments to pass to the driver. Do not include arguments, such
885
- # as `-libjars` or `-Dfoo=bar`, that can be set as job properties, since a
886
- # collision may occur that causes an incorrect job submission.
887
- # Corresponds to the JSON property `args`
983
+ # Optional The list of instance names. Cloud Dataproc derives the names from
984
+ # cluster_name, num_instances, and the instance group if not set by user (
985
+ # recommended practice is to let Cloud Dataproc derive the name).
986
+ # Corresponds to the JSON property `instanceNames`
888
987
  # @return [Array<String>]
889
- attr_accessor :args
988
+ attr_accessor :instance_names
890
989
 
891
- # [Optional] Jar file URIs to add to the CLASSPATHs of the Hadoop driver and
892
- # tasks.
893
- # Corresponds to the JSON property `jarFileUris`
894
- # @return [Array<String>]
895
- attr_accessor :jar_file_uris
990
+ # Required The number of VM instances in the instance group. For master instance
991
+ # groups, must be set to 1.
992
+ # Corresponds to the JSON property `numInstances`
993
+ # @return [Fixnum]
994
+ attr_accessor :num_instances
896
995
 
897
- # [Optional] HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to
898
- # the working directory of Hadoop drivers and distributed tasks. Useful for
899
- # naively parallel tasks.
900
- # Corresponds to the JSON property `fileUris`
901
- # @return [Array<String>]
902
- attr_accessor :file_uris
996
+ def initialize(**args)
997
+ update!(**args)
998
+ end
903
999
 
904
- # [Optional] HCFS URIs of archives to be extracted in the working directory of
905
- # Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .
906
- # zip.
907
- # Corresponds to the JSON property `archiveUris`
908
- # @return [Array<String>]
909
- attr_accessor :archive_uris
1000
+ # Update properties of this object
1001
+ def update!(**args)
1002
+ @disk_config = args[:disk_config] if args.key?(:disk_config)
1003
+ @machine_type_uri = args[:machine_type_uri] if args.key?(:machine_type_uri)
1004
+ @managed_group_config = args[:managed_group_config] if args.key?(:managed_group_config)
1005
+ @is_preemptible = args[:is_preemptible] if args.key?(:is_preemptible)
1006
+ @image_uri = args[:image_uri] if args.key?(:image_uri)
1007
+ @instance_names = args[:instance_names] if args.key?(:instance_names)
1008
+ @num_instances = args[:num_instances] if args.key?(:num_instances)
1009
+ end
1010
+ end
910
1011
 
911
- # [Optional] A mapping of property names to values, used to configure Hadoop.
912
- # Properties that conflict with values set by the Cloud Dataproc API may be
913
- # overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes
914
- # in user code.
915
- # Corresponds to the JSON property `properties`
916
- # @return [Hash<String,String>]
917
- attr_accessor :properties
1012
+ # A list of jobs in a project.
1013
+ class ListJobsResponse
1014
+ include Google::Apis::Core::Hashable
1015
+
1016
+ # Output-only Jobs list.
1017
+ # Corresponds to the JSON property `jobs`
1018
+ # @return [Array<Google::Apis::DataprocV1::Job>]
1019
+ attr_accessor :jobs
1020
+
1021
+ # Optional This token is included in the response if there are more results to
1022
+ # fetch. To fetch additional results, provide this value as the page_token in a
1023
+ # subsequent <code>ListJobsRequest</code>.
1024
+ # Corresponds to the JSON property `nextPageToken`
1025
+ # @return [String]
1026
+ attr_accessor :next_page_token
1027
+
1028
+ def initialize(**args)
1029
+ update!(**args)
1030
+ end
1031
+
1032
+ # Update properties of this object
1033
+ def update!(**args)
1034
+ @jobs = args[:jobs] if args.key?(:jobs)
1035
+ @next_page_token = args[:next_page_token] if args.key?(:next_page_token)
1036
+ end
1037
+ end
1038
+
1039
+ # Specifies an executable to run on a fully configured node and a timeout period
1040
+ # for executable completion.
1041
+ class NodeInitializationAction
1042
+ include Google::Apis::Core::Hashable
918
1043
 
919
- # The runtime logging config of the job.
920
- # Corresponds to the JSON property `loggingConfig`
921
- # @return [Google::Apis::DataprocV1::LoggingConfig]
922
- attr_accessor :logging_config
1044
+ # Required Google Cloud Storage URI of executable file.
1045
+ # Corresponds to the JSON property `executableFile`
1046
+ # @return [String]
1047
+ attr_accessor :executable_file
1048
+
1049
+ # Optional Amount of time executable has to complete. Default is 10 minutes.
1050
+ # Cluster creation fails with an explanatory error message (the name of the
1051
+ # executable that caused the error and the exceeded timeout period) if the
1052
+ # executable is not completed at end of the timeout period.
1053
+ # Corresponds to the JSON property `executionTimeout`
1054
+ # @return [String]
1055
+ attr_accessor :execution_timeout
923
1056
 
924
1057
  def initialize(**args)
925
1058
  update!(**args)
@@ -927,164 +1060,160 @@ module Google
927
1060
 
928
1061
  # Update properties of this object
929
1062
  def update!(**args)
930
- @main_jar_file_uri = args[:main_jar_file_uri] if args.key?(:main_jar_file_uri)
931
- @main_class = args[:main_class] if args.key?(:main_class)
932
- @args = args[:args] if args.key?(:args)
933
- @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
934
- @file_uris = args[:file_uris] if args.key?(:file_uris)
935
- @archive_uris = args[:archive_uris] if args.key?(:archive_uris)
936
- @properties = args[:properties] if args.key?(:properties)
937
- @logging_config = args[:logging_config] if args.key?(:logging_config)
1063
+ @executable_file = args[:executable_file] if args.key?(:executable_file)
1064
+ @execution_timeout = args[:execution_timeout] if args.key?(:execution_timeout)
938
1065
  end
939
1066
  end
940
1067
 
941
- # The runtime logging config of the job.
942
- class LoggingConfig
1068
+ # A request to cancel a job.
1069
+ class CancelJobRequest
943
1070
  include Google::Apis::Core::Hashable
944
1071
 
945
- # The per-package log levels for the driver. This may include "root" package
946
- # name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', '
947
- # org.apache = DEBUG'
948
- # Corresponds to the JSON property `driverLogLevels`
949
- # @return [Hash<String,String>]
950
- attr_accessor :driver_log_levels
951
-
952
1072
  def initialize(**args)
953
1073
  update!(**args)
954
1074
  end
955
1075
 
956
1076
  # Update properties of this object
957
1077
  def update!(**args)
958
- @driver_log_levels = args[:driver_log_levels] if args.key?(:driver_log_levels)
959
1078
  end
960
1079
  end
961
1080
 
962
- # A Cloud Dataproc job for running [Apache Spark](http://spark.apache.org/)
963
- # applications on YARN.
964
- class SparkJob
1081
+ # A Cloud Dataproc job for running Apache Spark SQL (http://spark.apache.org/sql/
1082
+ # ) queries.
1083
+ class SparkSqlJob
965
1084
  include Google::Apis::Core::Hashable
966
1085
 
967
- # The HCFS URI of the jar file that contains the main class.
968
- # Corresponds to the JSON property `mainJarFileUri`
1086
+ # The HCFS URI of the script that contains SQL queries.
1087
+ # Corresponds to the JSON property `queryFileUri`
969
1088
  # @return [String]
970
- attr_accessor :main_jar_file_uri
1089
+ attr_accessor :query_file_uri
971
1090
 
972
- # The name of the driver's main class. The jar file that contains the class must
973
- # be in the default CLASSPATH or specified in `jar_file_uris`.
974
- # Corresponds to the JSON property `mainClass`
975
- # @return [String]
976
- attr_accessor :main_class
1091
+ # A list of queries to run on a cluster.
1092
+ # Corresponds to the JSON property `queryList`
1093
+ # @return [Google::Apis::DataprocV1::QueryList]
1094
+ attr_accessor :query_list
977
1095
 
978
- # [Optional] The arguments to pass to the driver. Do not include arguments, such
979
- # as `--conf`, that can be set as job properties, since a collision may occur
980
- # that causes an incorrect job submission.
981
- # Corresponds to the JSON property `args`
982
- # @return [Array<String>]
983
- attr_accessor :args
1096
+ # Optional Mapping of query variable names to values (equivalent to the Spark
1097
+ # SQL command: SET name="value";).
1098
+ # Corresponds to the JSON property `scriptVariables`
1099
+ # @return [Hash<String,String>]
1100
+ attr_accessor :script_variables
984
1101
 
985
- # [Optional] HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver
986
- # and tasks.
1102
+ # Optional HCFS URIs of jar files to be added to the Spark CLASSPATH.
987
1103
  # Corresponds to the JSON property `jarFileUris`
988
1104
  # @return [Array<String>]
989
1105
  attr_accessor :jar_file_uris
990
1106
 
991
- # [Optional] HCFS URIs of files to be copied to the working directory of Spark
992
- # drivers and distributed tasks. Useful for naively parallel tasks.
993
- # Corresponds to the JSON property `fileUris`
994
- # @return [Array<String>]
995
- attr_accessor :file_uris
996
-
997
- # [Optional] HCFS URIs of archives to be extracted in the working directory of
998
- # Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .
999
- # zip.
1000
- # Corresponds to the JSON property `archiveUris`
1001
- # @return [Array<String>]
1002
- attr_accessor :archive_uris
1003
-
1004
- # [Optional] A mapping of property names to values, used to configure Spark.
1005
- # Properties that conflict with values set by the Cloud Dataproc API may be
1006
- # overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf
1007
- # and classes in user code.
1008
- # Corresponds to the JSON property `properties`
1009
- # @return [Hash<String,String>]
1010
- attr_accessor :properties
1011
-
1012
1107
  # The runtime logging config of the job.
1013
1108
  # Corresponds to the JSON property `loggingConfig`
1014
1109
  # @return [Google::Apis::DataprocV1::LoggingConfig]
1015
1110
  attr_accessor :logging_config
1016
1111
 
1112
+ # Optional A mapping of property names to values, used to configure Spark SQL's
1113
+ # SparkConf. Properties that conflict with values set by the Cloud Dataproc API
1114
+ # may be overwritten.
1115
+ # Corresponds to the JSON property `properties`
1116
+ # @return [Hash<String,String>]
1117
+ attr_accessor :properties
1118
+
1017
1119
  def initialize(**args)
1018
1120
  update!(**args)
1019
1121
  end
1020
1122
 
1021
1123
  # Update properties of this object
1022
1124
  def update!(**args)
1023
- @main_jar_file_uri = args[:main_jar_file_uri] if args.key?(:main_jar_file_uri)
1024
- @main_class = args[:main_class] if args.key?(:main_class)
1025
- @args = args[:args] if args.key?(:args)
1125
+ @query_file_uri = args[:query_file_uri] if args.key?(:query_file_uri)
1126
+ @query_list = args[:query_list] if args.key?(:query_list)
1127
+ @script_variables = args[:script_variables] if args.key?(:script_variables)
1026
1128
  @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
1027
- @file_uris = args[:file_uris] if args.key?(:file_uris)
1028
- @archive_uris = args[:archive_uris] if args.key?(:archive_uris)
1029
- @properties = args[:properties] if args.key?(:properties)
1030
1129
  @logging_config = args[:logging_config] if args.key?(:logging_config)
1130
+ @properties = args[:properties] if args.key?(:properties)
1031
1131
  end
1032
1132
  end
1033
1133
 
1034
- # A Cloud Dataproc job for running [Apache PySpark](https://spark.apache.org/
1035
- # docs/0.9.0/python-programming-guide.html) applications on YARN.
1036
- class PySparkJob
1134
+ # Describes the identifying information, config, and status of a cluster of
1135
+ # Google Compute Engine instances.
1136
+ class Cluster
1037
1137
  include Google::Apis::Core::Hashable
1038
1138
 
1039
- # [Required] The HCFS URI of the main Python file to use as the driver. Must be
1040
- # a .py file.
1041
- # Corresponds to the JSON property `mainPythonFileUri`
1139
+ # Optional The labels to associate with this cluster. Label keys must contain 1
1140
+ # to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/
1141
+ # rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63
1142
+ # characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt)
1143
+ # . No more than 32 labels can be associated with a cluster.
1144
+ # Corresponds to the JSON property `labels`
1145
+ # @return [Hash<String,String>]
1146
+ attr_accessor :labels
1147
+
1148
+ # The status of a cluster and its instances.
1149
+ # Corresponds to the JSON property `status`
1150
+ # @return [Google::Apis::DataprocV1::ClusterStatus]
1151
+ attr_accessor :status
1152
+
1153
+ # Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature:
1154
+ # This report is available for testing purposes only. It may be changed before
1155
+ # final release.
1156
+ # Corresponds to the JSON property `metrics`
1157
+ # @return [Google::Apis::DataprocV1::ClusterMetrics]
1158
+ attr_accessor :metrics
1159
+
1160
+ # Output-only The previous cluster status.
1161
+ # Corresponds to the JSON property `statusHistory`
1162
+ # @return [Array<Google::Apis::DataprocV1::ClusterStatus>]
1163
+ attr_accessor :status_history
1164
+
1165
+ # The cluster config.
1166
+ # Corresponds to the JSON property `config`
1167
+ # @return [Google::Apis::DataprocV1::ClusterConfig]
1168
+ attr_accessor :config
1169
+
1170
+ # Output-only A cluster UUID (Unique Universal Identifier). Cloud Dataproc
1171
+ # generates this value when it creates the cluster.
1172
+ # Corresponds to the JSON property `clusterUuid`
1042
1173
  # @return [String]
1043
- attr_accessor :main_python_file_uri
1174
+ attr_accessor :cluster_uuid
1044
1175
 
1045
- # [Optional] The arguments to pass to the driver. Do not include arguments, such
1046
- # as `--conf`, that can be set as job properties, since a collision may occur
1047
- # that causes an incorrect job submission.
1048
- # Corresponds to the JSON property `args`
1049
- # @return [Array<String>]
1050
- attr_accessor :args
1176
+ # Required The cluster name. Cluster names within a project must be unique.
1177
+ # Names of deleted clusters can be reused.
1178
+ # Corresponds to the JSON property `clusterName`
1179
+ # @return [String]
1180
+ attr_accessor :cluster_name
1051
1181
 
1052
- # [Optional] HCFS file URIs of Python files to pass to the PySpark framework.
1053
- # Supported file types: .py, .egg, and .zip.
1054
- # Corresponds to the JSON property `pythonFileUris`
1055
- # @return [Array<String>]
1056
- attr_accessor :python_file_uris
1182
+ # Required The Google Cloud Platform project ID that the cluster belongs to.
1183
+ # Corresponds to the JSON property `projectId`
1184
+ # @return [String]
1185
+ attr_accessor :project_id
1057
1186
 
1058
- # [Optional] HCFS URIs of jar files to add to the CLASSPATHs of the Python
1059
- # driver and tasks.
1060
- # Corresponds to the JSON property `jarFileUris`
1061
- # @return [Array<String>]
1062
- attr_accessor :jar_file_uris
1187
+ def initialize(**args)
1188
+ update!(**args)
1189
+ end
1063
1190
 
1064
- # [Optional] HCFS URIs of files to be copied to the working directory of Python
1065
- # drivers and distributed tasks. Useful for naively parallel tasks.
1066
- # Corresponds to the JSON property `fileUris`
1067
- # @return [Array<String>]
1068
- attr_accessor :file_uris
1191
+ # Update properties of this object
1192
+ def update!(**args)
1193
+ @labels = args[:labels] if args.key?(:labels)
1194
+ @status = args[:status] if args.key?(:status)
1195
+ @metrics = args[:metrics] if args.key?(:metrics)
1196
+ @status_history = args[:status_history] if args.key?(:status_history)
1197
+ @config = args[:config] if args.key?(:config)
1198
+ @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
1199
+ @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
1200
+ @project_id = args[:project_id] if args.key?(:project_id)
1201
+ end
1202
+ end
1069
1203
 
1070
- # [Optional] HCFS URIs of archives to be extracted in the working directory of .
1071
- # jar, .tar, .tar.gz, .tgz, and .zip.
1072
- # Corresponds to the JSON property `archiveUris`
1073
- # @return [Array<String>]
1074
- attr_accessor :archive_uris
1204
+ # The response message for Operations.ListOperations.
1205
+ class ListOperationsResponse
1206
+ include Google::Apis::Core::Hashable
1075
1207
 
1076
- # [Optional] A mapping of property names to values, used to configure PySpark.
1077
- # Properties that conflict with values set by the Cloud Dataproc API may be
1078
- # overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf
1079
- # and classes in user code.
1080
- # Corresponds to the JSON property `properties`
1081
- # @return [Hash<String,String>]
1082
- attr_accessor :properties
1208
+ # The standard List next-page token.
1209
+ # Corresponds to the JSON property `nextPageToken`
1210
+ # @return [String]
1211
+ attr_accessor :next_page_token
1083
1212
 
1084
- # The runtime logging config of the job.
1085
- # Corresponds to the JSON property `loggingConfig`
1086
- # @return [Google::Apis::DataprocV1::LoggingConfig]
1087
- attr_accessor :logging_config
1213
+ # A list of operations that matches the specified filter in the request.
1214
+ # Corresponds to the JSON property `operations`
1215
+ # @return [Array<Google::Apis::DataprocV1::Operation>]
1216
+ attr_accessor :operations
1088
1217
 
1089
1218
  def initialize(**args)
1090
1219
  update!(**args)
@@ -1092,59 +1221,79 @@ module Google
1092
1221
 
1093
1222
  # Update properties of this object
1094
1223
  def update!(**args)
1095
- @main_python_file_uri = args[:main_python_file_uri] if args.key?(:main_python_file_uri)
1096
- @args = args[:args] if args.key?(:args)
1097
- @python_file_uris = args[:python_file_uris] if args.key?(:python_file_uris)
1098
- @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
1099
- @file_uris = args[:file_uris] if args.key?(:file_uris)
1100
- @archive_uris = args[:archive_uris] if args.key?(:archive_uris)
1101
- @properties = args[:properties] if args.key?(:properties)
1102
- @logging_config = args[:logging_config] if args.key?(:logging_config)
1224
+ @next_page_token = args[:next_page_token] if args.key?(:next_page_token)
1225
+ @operations = args[:operations] if args.key?(:operations)
1103
1226
  end
1104
1227
  end
1105
1228
 
1106
- # A Cloud Dataproc job for running [Apache Hive](https://hive.apache.org/)
1107
- # queries on YARN.
1108
- class HiveJob
1229
+ # Metadata describing the operation.
1230
+ class OperationMetadata
1109
1231
  include Google::Apis::Core::Hashable
1110
1232
 
1111
- # The HCFS URI of the script that contains Hive queries.
1112
- # Corresponds to the JSON property `queryFileUri`
1233
+ # Output-only The operation type.
1234
+ # Corresponds to the JSON property `operationType`
1113
1235
  # @return [String]
1114
- attr_accessor :query_file_uri
1236
+ attr_accessor :operation_type
1115
1237
 
1116
- # A list of queries to run on a cluster.
1117
- # Corresponds to the JSON property `queryList`
1118
- # @return [Google::Apis::DataprocV1::QueryList]
1119
- attr_accessor :query_list
1238
+ # Output-only Short description of operation.
1239
+ # Corresponds to the JSON property `description`
1240
+ # @return [String]
1241
+ attr_accessor :description
1120
1242
 
1121
- # [Optional] Whether to continue executing queries if a query fails. The default
1122
- # value is `false`. Setting to `true` can be useful when executing independent
1123
- # parallel queries.
1124
- # Corresponds to the JSON property `continueOnFailure`
1125
- # @return [Boolean]
1126
- attr_accessor :continue_on_failure
1127
- alias_method :continue_on_failure?, :continue_on_failure
1243
+ # The status of the operation.
1244
+ # Corresponds to the JSON property `status`
1245
+ # @return [Google::Apis::DataprocV1::OperationStatus]
1246
+ attr_accessor :status
1247
+
1248
+ # A message containing any operation metadata details.
1249
+ # Corresponds to the JSON property `details`
1250
+ # @return [String]
1251
+ attr_accessor :details
1252
+
1253
+ # A message containing the operation state.
1254
+ # Corresponds to the JSON property `state`
1255
+ # @return [String]
1256
+ attr_accessor :state
1257
+
1258
+ # Name of the cluster for the operation.
1259
+ # Corresponds to the JSON property `clusterName`
1260
+ # @return [String]
1261
+ attr_accessor :cluster_name
1262
+
1263
+ # Cluster UUId for the operation.
1264
+ # Corresponds to the JSON property `clusterUuid`
1265
+ # @return [String]
1266
+ attr_accessor :cluster_uuid
1267
+
1268
+ # A message containing the detailed operation state.
1269
+ # Corresponds to the JSON property `innerState`
1270
+ # @return [String]
1271
+ attr_accessor :inner_state
1272
+
1273
+ # The time that the operation completed.
1274
+ # Corresponds to the JSON property `endTime`
1275
+ # @return [String]
1276
+ attr_accessor :end_time
1277
+
1278
+ # The time that the operation was started by the server.
1279
+ # Corresponds to the JSON property `startTime`
1280
+ # @return [String]
1281
+ attr_accessor :start_time
1128
1282
 
1129
- # [Optional] Mapping of query variable names to values (equivalent to the Hive
1130
- # command: `SET name="value";`).
1131
- # Corresponds to the JSON property `scriptVariables`
1132
- # @return [Hash<String,String>]
1133
- attr_accessor :script_variables
1283
+ # Output-only Errors encountered during operation execution.
1284
+ # Corresponds to the JSON property `warnings`
1285
+ # @return [Array<String>]
1286
+ attr_accessor :warnings
1134
1287
 
1135
- # [Optional] A mapping of property names and values, used to configure Hive.
1136
- # Properties that conflict with values set by the Cloud Dataproc API may be
1137
- # overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/
1138
- # hive/conf/hive-site.xml, and classes in user code.
1139
- # Corresponds to the JSON property `properties`
1140
- # @return [Hash<String,String>]
1141
- attr_accessor :properties
1288
+ # The time that the operation was requested.
1289
+ # Corresponds to the JSON property `insertTime`
1290
+ # @return [String]
1291
+ attr_accessor :insert_time
1142
1292
 
1143
- # [Optional] HCFS URIs of jar files to add to the CLASSPATH of the Hive server
1144
- # and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.
1145
- # Corresponds to the JSON property `jarFileUris`
1146
- # @return [Array<String>]
1147
- attr_accessor :jar_file_uris
1293
+ # Output-only Previous operation status.
1294
+ # Corresponds to the JSON property `statusHistory`
1295
+ # @return [Array<Google::Apis::DataprocV1::OperationStatus>]
1296
+ attr_accessor :status_history
1148
1297
 
1149
1298
  def initialize(**args)
1150
1299
  update!(**args)
@@ -1152,27 +1301,36 @@ module Google
1152
1301
 
1153
1302
  # Update properties of this object
1154
1303
  def update!(**args)
1155
- @query_file_uri = args[:query_file_uri] if args.key?(:query_file_uri)
1156
- @query_list = args[:query_list] if args.key?(:query_list)
1157
- @continue_on_failure = args[:continue_on_failure] if args.key?(:continue_on_failure)
1158
- @script_variables = args[:script_variables] if args.key?(:script_variables)
1159
- @properties = args[:properties] if args.key?(:properties)
1160
- @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
1304
+ @operation_type = args[:operation_type] if args.key?(:operation_type)
1305
+ @description = args[:description] if args.key?(:description)
1306
+ @status = args[:status] if args.key?(:status)
1307
+ @details = args[:details] if args.key?(:details)
1308
+ @state = args[:state] if args.key?(:state)
1309
+ @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
1310
+ @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
1311
+ @inner_state = args[:inner_state] if args.key?(:inner_state)
1312
+ @end_time = args[:end_time] if args.key?(:end_time)
1313
+ @start_time = args[:start_time] if args.key?(:start_time)
1314
+ @warnings = args[:warnings] if args.key?(:warnings)
1315
+ @insert_time = args[:insert_time] if args.key?(:insert_time)
1316
+ @status_history = args[:status_history] if args.key?(:status_history)
1161
1317
  end
1162
1318
  end
1163
1319
 
1164
- # A list of queries to run on a cluster.
1165
- class QueryList
1320
+ # Cloud Dataproc job config.
1321
+ class JobPlacement
1166
1322
  include Google::Apis::Core::Hashable
1167
1323
 
1168
- # [Required] The queries to execute. You do not need to terminate a query with a
1169
- # semicolon. Multiple queries can be specified in one string by separating each
1170
- # with a semicolon. Here is an example of an Cloud Dataproc API snippet that
1171
- # uses a QueryList to specify a HiveJob: "hiveJob": ` "queryList": ` "queries": [
1172
- # "query1", "query2", "query3;query4", ] ` `
1173
- # Corresponds to the JSON property `queries`
1174
- # @return [Array<String>]
1175
- attr_accessor :queries
1324
+ # Required The name of the cluster where the job will be submitted.
1325
+ # Corresponds to the JSON property `clusterName`
1326
+ # @return [String]
1327
+ attr_accessor :cluster_name
1328
+
1329
+ # Output-only A cluster UUID generated by the Cloud Dataproc service when the
1330
+ # job is submitted.
1331
+ # Corresponds to the JSON property `clusterUuid`
1332
+ # @return [String]
1333
+ attr_accessor :cluster_uuid
1176
1334
 
1177
1335
  def initialize(**args)
1178
1336
  update!(**args)
@@ -1180,214 +1338,154 @@ module Google
1180
1338
 
1181
1339
  # Update properties of this object
1182
1340
  def update!(**args)
1183
- @queries = args[:queries] if args.key?(:queries)
1341
+ @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
1342
+ @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
1184
1343
  end
1185
1344
  end
1186
1345
 
1187
- # A Cloud Dataproc job for running [Apache Pig](https://pig.apache.org/) queries
1188
- # on YARN.
1189
- class PigJob
1346
+ # Specifies the selection and config of software inside the cluster.
1347
+ class SoftwareConfig
1190
1348
  include Google::Apis::Core::Hashable
1191
1349
 
1192
- # The HCFS URI of the script that contains the Pig queries.
1193
- # Corresponds to the JSON property `queryFileUri`
1350
+ # Optional The version of software inside the cluster. It must match the regular
1351
+ # expression [0-9]+\.[0-9]+. If unspecified, it defaults to the latest version (
1352
+ # see Cloud Dataproc Versioning).
1353
+ # Corresponds to the JSON property `imageVersion`
1194
1354
  # @return [String]
1195
- attr_accessor :query_file_uri
1196
-
1197
- # A list of queries to run on a cluster.
1198
- # Corresponds to the JSON property `queryList`
1199
- # @return [Google::Apis::DataprocV1::QueryList]
1200
- attr_accessor :query_list
1201
-
1202
- # [Optional] Whether to continue executing queries if a query fails. The default
1203
- # value is `false`. Setting to `true` can be useful when executing independent
1204
- # parallel queries.
1205
- # Corresponds to the JSON property `continueOnFailure`
1206
- # @return [Boolean]
1207
- attr_accessor :continue_on_failure
1208
- alias_method :continue_on_failure?, :continue_on_failure
1209
-
1210
- # [Optional] Mapping of query variable names to values (equivalent to the Pig
1211
- # command: `name=[value]`).
1212
- # Corresponds to the JSON property `scriptVariables`
1213
- # @return [Hash<String,String>]
1214
- attr_accessor :script_variables
1355
+ attr_accessor :image_version
1215
1356
 
1216
- # [Optional] A mapping of property names to values, used to configure Pig.
1217
- # Properties that conflict with values set by the Cloud Dataproc API may be
1218
- # overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/
1219
- # pig/conf/pig.properties, and classes in user code.
1357
+ # Optional The properties to set on daemon config files.Property keys are
1358
+ # specified in prefix:property format, such as core:fs.defaultFS. The following
1359
+ # are supported prefixes and their mappings:
1360
+ # core: core-site.xml
1361
+ # hdfs: hdfs-site.xml
1362
+ # mapred: mapred-site.xml
1363
+ # yarn: yarn-site.xml
1364
+ # hive: hive-site.xml
1365
+ # pig: pig.properties
1366
+ # spark: spark-defaults.conf
1220
1367
  # Corresponds to the JSON property `properties`
1221
1368
  # @return [Hash<String,String>]
1222
1369
  attr_accessor :properties
1223
1370
 
1224
- # [Optional] HCFS URIs of jar files to add to the CLASSPATH of the Pig Client
1225
- # and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
1226
- # Corresponds to the JSON property `jarFileUris`
1227
- # @return [Array<String>]
1228
- attr_accessor :jar_file_uris
1229
-
1230
- # The runtime logging config of the job.
1231
- # Corresponds to the JSON property `loggingConfig`
1232
- # @return [Google::Apis::DataprocV1::LoggingConfig]
1233
- attr_accessor :logging_config
1234
-
1235
1371
  def initialize(**args)
1236
1372
  update!(**args)
1237
1373
  end
1238
1374
 
1239
1375
  # Update properties of this object
1240
1376
  def update!(**args)
1241
- @query_file_uri = args[:query_file_uri] if args.key?(:query_file_uri)
1242
- @query_list = args[:query_list] if args.key?(:query_list)
1243
- @continue_on_failure = args[:continue_on_failure] if args.key?(:continue_on_failure)
1244
- @script_variables = args[:script_variables] if args.key?(:script_variables)
1377
+ @image_version = args[:image_version] if args.key?(:image_version)
1245
1378
  @properties = args[:properties] if args.key?(:properties)
1246
- @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
1247
- @logging_config = args[:logging_config] if args.key?(:logging_config)
1248
1379
  end
1249
1380
  end
1250
1381
 
1251
- # A Cloud Dataproc job for running [Apache Spark SQL](http://spark.apache.org/
1252
- # sql/) queries.
1253
- class SparkSqlJob
1382
+ # A Cloud Dataproc job for running Apache Pig (https://pig.apache.org/) queries
1383
+ # on YARN.
1384
+ class PigJob
1254
1385
  include Google::Apis::Core::Hashable
1255
1386
 
1256
- # The HCFS URI of the script that contains SQL queries.
1257
- # Corresponds to the JSON property `queryFileUri`
1258
- # @return [String]
1259
- attr_accessor :query_file_uri
1260
-
1261
1387
  # A list of queries to run on a cluster.
1262
1388
  # Corresponds to the JSON property `queryList`
1263
1389
  # @return [Google::Apis::DataprocV1::QueryList]
1264
1390
  attr_accessor :query_list
1265
1391
 
1266
- # [Optional] Mapping of query variable names to values (equivalent to the Spark
1267
- # SQL command: SET `name="value";`).
1268
- # Corresponds to the JSON property `scriptVariables`
1269
- # @return [Hash<String,String>]
1270
- attr_accessor :script_variables
1271
-
1272
- # [Optional] A mapping of property names to values, used to configure Spark SQL'
1273
- # s SparkConf. Properties that conflict with values set by the Cloud Dataproc
1274
- # API may be overwritten.
1275
- # Corresponds to the JSON property `properties`
1276
- # @return [Hash<String,String>]
1277
- attr_accessor :properties
1392
+ # The HCFS URI of the script that contains the Pig queries.
1393
+ # Corresponds to the JSON property `queryFileUri`
1394
+ # @return [String]
1395
+ attr_accessor :query_file_uri
1278
1396
 
1279
- # [Optional] HCFS URIs of jar files to be added to the Spark CLASSPATH.
1397
+ # Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and
1398
+ # Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
1280
1399
  # Corresponds to the JSON property `jarFileUris`
1281
1400
  # @return [Array<String>]
1282
1401
  attr_accessor :jar_file_uris
1283
1402
 
1403
+ # Optional Mapping of query variable names to values (equivalent to the Pig
1404
+ # command: name=[value]).
1405
+ # Corresponds to the JSON property `scriptVariables`
1406
+ # @return [Hash<String,String>]
1407
+ attr_accessor :script_variables
1408
+
1284
1409
  # The runtime logging config of the job.
1285
1410
  # Corresponds to the JSON property `loggingConfig`
1286
1411
  # @return [Google::Apis::DataprocV1::LoggingConfig]
1287
1412
  attr_accessor :logging_config
1288
1413
 
1414
+ # Optional A mapping of property names to values, used to configure Pig.
1415
+ # Properties that conflict with values set by the Cloud Dataproc API may be
1416
+ # overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/
1417
+ # pig/conf/pig.properties, and classes in user code.
1418
+ # Corresponds to the JSON property `properties`
1419
+ # @return [Hash<String,String>]
1420
+ attr_accessor :properties
1421
+
1422
+ # Optional Whether to continue executing queries if a query fails. The default
1423
+ # value is false. Setting to true can be useful when executing independent
1424
+ # parallel queries.
1425
+ # Corresponds to the JSON property `continueOnFailure`
1426
+ # @return [Boolean]
1427
+ attr_accessor :continue_on_failure
1428
+ alias_method :continue_on_failure?, :continue_on_failure
1429
+
1289
1430
  def initialize(**args)
1290
1431
  update!(**args)
1291
1432
  end
1292
1433
 
1293
1434
  # Update properties of this object
1294
1435
  def update!(**args)
1295
- @query_file_uri = args[:query_file_uri] if args.key?(:query_file_uri)
1296
1436
  @query_list = args[:query_list] if args.key?(:query_list)
1297
- @script_variables = args[:script_variables] if args.key?(:script_variables)
1298
- @properties = args[:properties] if args.key?(:properties)
1437
+ @query_file_uri = args[:query_file_uri] if args.key?(:query_file_uri)
1299
1438
  @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
1439
+ @script_variables = args[:script_variables] if args.key?(:script_variables)
1300
1440
  @logging_config = args[:logging_config] if args.key?(:logging_config)
1441
+ @properties = args[:properties] if args.key?(:properties)
1442
+ @continue_on_failure = args[:continue_on_failure] if args.key?(:continue_on_failure)
1301
1443
  end
1302
1444
  end
1303
1445
 
1304
- # Cloud Dataproc job status.
1305
- class JobStatus
1446
+ # The status of a cluster and its instances.
1447
+ class ClusterStatus
1306
1448
  include Google::Apis::Core::Hashable
1307
1449
 
1308
- # [Output-only] A state message specifying the overall job state.
1309
- # Corresponds to the JSON property `state`
1310
- # @return [String]
1311
- attr_accessor :state
1312
-
1313
- # [Output-only] Optional job state details, such as an error description if the
1314
- # state is ERROR.
1315
- # Corresponds to the JSON property `details`
1316
- # @return [String]
1317
- attr_accessor :details
1318
-
1319
- # [Output-only] The time when this state was entered.
1450
+ # Output-only Time when this state was entered.
1320
1451
  # Corresponds to the JSON property `stateStartTime`
1321
1452
  # @return [String]
1322
1453
  attr_accessor :state_start_time
1323
1454
 
1324
- def initialize(**args)
1325
- update!(**args)
1326
- end
1327
-
1328
- # Update properties of this object
1329
- def update!(**args)
1330
- @state = args[:state] if args.key?(:state)
1331
- @details = args[:details] if args.key?(:details)
1332
- @state_start_time = args[:state_start_time] if args.key?(:state_start_time)
1333
- end
1334
- end
1335
-
1336
- # A YARN application created by a job. Application information is a subset of
1337
- # org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. **Beta Feature*
1338
- # *: This report is available for testing purposes only. It may be changed
1339
- # before final release.
1340
- class YarnApplication
1341
- include Google::Apis::Core::Hashable
1342
-
1343
- # [Required] The application name.
1344
- # Corresponds to the JSON property `name`
1455
+ # Output-only Optional details of cluster's state.
1456
+ # Corresponds to the JSON property `detail`
1345
1457
  # @return [String]
1346
- attr_accessor :name
1458
+ attr_accessor :detail
1347
1459
 
1348
- # [Required] The application state.
1460
+ # Output-only The cluster's state.
1349
1461
  # Corresponds to the JSON property `state`
1350
1462
  # @return [String]
1351
1463
  attr_accessor :state
1352
1464
 
1353
- # [Required] The numerical progress of the application, from 1 to 100.
1354
- # Corresponds to the JSON property `progress`
1355
- # @return [Float]
1356
- attr_accessor :progress
1357
-
1358
- # [Optional] The HTTP URL of the ApplicationMaster, HistoryServer, or
1359
- # TimelineServer that provides application-specific information. The URL uses
1360
- # the internal hostname, and requires a proxy server for resolution and,
1361
- # possibly, access.
1362
- # Corresponds to the JSON property `trackingUrl`
1363
- # @return [String]
1364
- attr_accessor :tracking_url
1365
-
1366
1465
  def initialize(**args)
1367
1466
  update!(**args)
1368
1467
  end
1369
1468
 
1370
1469
  # Update properties of this object
1371
1470
  def update!(**args)
1372
- @name = args[:name] if args.key?(:name)
1471
+ @state_start_time = args[:state_start_time] if args.key?(:state_start_time)
1472
+ @detail = args[:detail] if args.key?(:detail)
1373
1473
  @state = args[:state] if args.key?(:state)
1374
- @progress = args[:progress] if args.key?(:progress)
1375
- @tracking_url = args[:tracking_url] if args.key?(:tracking_url)
1376
1474
  end
1377
1475
  end
1378
1476
 
1379
- # A list of jobs in a project.
1380
- class ListJobsResponse
1477
+ # The list of all clusters in a project.
1478
+ class ListClustersResponse
1381
1479
  include Google::Apis::Core::Hashable
1382
1480
 
1383
- # [Output-only] Jobs list.
1384
- # Corresponds to the JSON property `jobs`
1385
- # @return [Array<Google::Apis::DataprocV1::Job>]
1386
- attr_accessor :jobs
1481
+ # Output-only The clusters in the project.
1482
+ # Corresponds to the JSON property `clusters`
1483
+ # @return [Array<Google::Apis::DataprocV1::Cluster>]
1484
+ attr_accessor :clusters
1387
1485
 
1388
- # [Optional] This token is included in the response if there are more results to
1389
- # fetch. To fetch additional results, provide this value as the `page_token` in
1390
- # a subsequent ListJobsRequest.
1486
+ # Output-only This token is included in the response if there are more results
1487
+ # to fetch. To fetch additional results, provide this value as the page_token in
1488
+ # a subsequent <code>ListClustersRequest</code>.
1391
1489
  # Corresponds to the JSON property `nextPageToken`
1392
1490
  # @return [String]
1393
1491
  attr_accessor :next_page_token
@@ -1398,75 +1496,107 @@ module Google
1398
1496
 
1399
1497
  # Update properties of this object
1400
1498
  def update!(**args)
1401
- @jobs = args[:jobs] if args.key?(:jobs)
1499
+ @clusters = args[:clusters] if args.key?(:clusters)
1402
1500
  @next_page_token = args[:next_page_token] if args.key?(:next_page_token)
1403
1501
  end
1404
1502
  end
1405
1503
 
1406
- # A request to cancel a job.
1407
- class CancelJobRequest
1504
+ # A Cloud Dataproc job resource.
1505
+ class Job
1408
1506
  include Google::Apis::Core::Hashable
1409
1507
 
1410
- def initialize(**args)
1411
- update!(**args)
1412
- end
1508
+ # Output-only If present, the location of miscellaneous control files which may
1509
+ # be used as part of job setup and handling. If not present, control files may
1510
+ # be placed in the same location as driver_output_uri.
1511
+ # Corresponds to the JSON property `driverControlFilesUri`
1512
+ # @return [String]
1513
+ attr_accessor :driver_control_files_uri
1413
1514
 
1414
- # Update properties of this object
1415
- def update!(**args)
1416
- end
1417
- end
1515
+ # Job scheduling options.Beta Feature: These options are available for testing
1516
+ # purposes only. They may be changed before final release.
1517
+ # Corresponds to the JSON property `scheduling`
1518
+ # @return [Google::Apis::DataprocV1::JobScheduling]
1519
+ attr_accessor :scheduling
1418
1520
 
1419
- # A generic empty message that you can re-use to avoid defining duplicated empty
1420
- # messages in your APIs. A typical example is to use it as the request or the
1421
- # response type of an API method. For instance: service Foo ` rpc Bar(google.
1422
- # protobuf.Empty) returns (google.protobuf.Empty); ` The JSON representation for
1423
- # `Empty` is empty JSON object ````.
1424
- class Empty
1425
- include Google::Apis::Core::Hashable
1521
+ # A Cloud Dataproc job for running Apache Pig (https://pig.apache.org/) queries
1522
+ # on YARN.
1523
+ # Corresponds to the JSON property `pigJob`
1524
+ # @return [Google::Apis::DataprocV1::PigJob]
1525
+ attr_accessor :pig_job
1426
1526
 
1427
- def initialize(**args)
1428
- update!(**args)
1429
- end
1527
+ # A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/)
1528
+ # queries on YARN.
1529
+ # Corresponds to the JSON property `hiveJob`
1530
+ # @return [Google::Apis::DataprocV1::HiveJob]
1531
+ attr_accessor :hive_job
1430
1532
 
1431
- # Update properties of this object
1432
- def update!(**args)
1433
- end
1434
- end
1533
+ # Optional The labels to associate with this job. Label keys must contain 1 to
1534
+ # 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.
1535
+ # txt). Label values may be empty, but, if present, must contain 1 to 63
1536
+ # characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt)
1537
+ # . No more than 32 labels can be associated with a job.
1538
+ # Corresponds to the JSON property `labels`
1539
+ # @return [Hash<String,String>]
1540
+ attr_accessor :labels
1435
1541
 
1436
- # The response message for Operations.ListOperations.
1437
- class ListOperationsResponse
1438
- include Google::Apis::Core::Hashable
1542
+ # Output-only A URI pointing to the location of the stdout of the job's driver
1543
+ # program.
1544
+ # Corresponds to the JSON property `driverOutputResourceUri`
1545
+ # @return [String]
1546
+ attr_accessor :driver_output_resource_uri
1547
+
1548
+ # A Cloud Dataproc job for running Apache Spark SQL (http://spark.apache.org/sql/
1549
+ # ) queries.
1550
+ # Corresponds to the JSON property `sparkSqlJob`
1551
+ # @return [Google::Apis::DataprocV1::SparkSqlJob]
1552
+ attr_accessor :spark_sql_job
1553
+
1554
+ # Output-only The previous job status.
1555
+ # Corresponds to the JSON property `statusHistory`
1556
+ # @return [Array<Google::Apis::DataprocV1::JobStatus>]
1557
+ attr_accessor :status_history
1558
+
1559
+ # A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/)
1560
+ # applications on YARN.
1561
+ # Corresponds to the JSON property `sparkJob`
1562
+ # @return [Google::Apis::DataprocV1::SparkJob]
1563
+ attr_accessor :spark_job
1439
1564
 
1440
- # A list of operations that matches the specified filter in the request.
1441
- # Corresponds to the JSON property `operations`
1442
- # @return [Array<Google::Apis::DataprocV1::Operation>]
1443
- attr_accessor :operations
1565
+ # Output-only The collection of YARN applications spun up by this job.Beta
1566
+ # Feature: This report is available for testing purposes only. It may be changed
1567
+ # before final release.
1568
+ # Corresponds to the JSON property `yarnApplications`
1569
+ # @return [Array<Google::Apis::DataprocV1::YarnApplication>]
1570
+ attr_accessor :yarn_applications
1444
1571
 
1445
- # The standard List next-page token.
1446
- # Corresponds to the JSON property `nextPageToken`
1447
- # @return [String]
1448
- attr_accessor :next_page_token
1572
+ # A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/
1573
+ # 0.9.0/python-programming-guide.html) applications on YARN.
1574
+ # Corresponds to the JSON property `pysparkJob`
1575
+ # @return [Google::Apis::DataprocV1::PySparkJob]
1576
+ attr_accessor :pyspark_job
1449
1577
 
1450
- def initialize(**args)
1451
- update!(**args)
1452
- end
1578
+ # Encapsulates the full scoping used to reference a job.
1579
+ # Corresponds to the JSON property `reference`
1580
+ # @return [Google::Apis::DataprocV1::JobReference]
1581
+ attr_accessor :reference
1453
1582
 
1454
- # Update properties of this object
1455
- def update!(**args)
1456
- @operations = args[:operations] if args.key?(:operations)
1457
- @next_page_token = args[:next_page_token] if args.key?(:next_page_token)
1458
- end
1459
- end
1583
+ # A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.
1584
+ # apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/
1585
+ # MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/
1586
+ # docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
1587
+ # Corresponds to the JSON property `hadoopJob`
1588
+ # @return [Google::Apis::DataprocV1::HadoopJob]
1589
+ attr_accessor :hadoop_job
1460
1590
 
1461
- # The location of diagnostic output.
1462
- class DiagnoseClusterResults
1463
- include Google::Apis::Core::Hashable
1591
+ # Cloud Dataproc job status.
1592
+ # Corresponds to the JSON property `status`
1593
+ # @return [Google::Apis::DataprocV1::JobStatus]
1594
+ attr_accessor :status
1464
1595
 
1465
- # [Output-only] The Google Cloud Storage URI of the diagnostic output. The
1466
- # output report is a plain text file with a summary of collected diagnostics.
1467
- # Corresponds to the JSON property `outputUri`
1468
- # @return [String]
1469
- attr_accessor :output_uri
1596
+ # Cloud Dataproc job config.
1597
+ # Corresponds to the JSON property `placement`
1598
+ # @return [Google::Apis::DataprocV1::JobPlacement]
1599
+ attr_accessor :placement
1470
1600
 
1471
1601
  def initialize(**args)
1472
1602
  update!(**args)
@@ -1474,48 +1604,78 @@ module Google
1474
1604
 
1475
1605
  # Update properties of this object
1476
1606
  def update!(**args)
1477
- @output_uri = args[:output_uri] if args.key?(:output_uri)
1607
+ @driver_control_files_uri = args[:driver_control_files_uri] if args.key?(:driver_control_files_uri)
1608
+ @scheduling = args[:scheduling] if args.key?(:scheduling)
1609
+ @pig_job = args[:pig_job] if args.key?(:pig_job)
1610
+ @hive_job = args[:hive_job] if args.key?(:hive_job)
1611
+ @labels = args[:labels] if args.key?(:labels)
1612
+ @driver_output_resource_uri = args[:driver_output_resource_uri] if args.key?(:driver_output_resource_uri)
1613
+ @spark_sql_job = args[:spark_sql_job] if args.key?(:spark_sql_job)
1614
+ @status_history = args[:status_history] if args.key?(:status_history)
1615
+ @spark_job = args[:spark_job] if args.key?(:spark_job)
1616
+ @yarn_applications = args[:yarn_applications] if args.key?(:yarn_applications)
1617
+ @pyspark_job = args[:pyspark_job] if args.key?(:pyspark_job)
1618
+ @reference = args[:reference] if args.key?(:reference)
1619
+ @hadoop_job = args[:hadoop_job] if args.key?(:hadoop_job)
1620
+ @status = args[:status] if args.key?(:status)
1621
+ @placement = args[:placement] if args.key?(:placement)
1478
1622
  end
1479
1623
  end
1480
1624
 
1481
- # Metadata describing the operation.
1482
- class ClusterOperationMetadata
1625
+ # A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/)
1626
+ # applications on YARN.
1627
+ class SparkJob
1483
1628
  include Google::Apis::Core::Hashable
1484
1629
 
1485
- # [Output-only] Name of the cluster for the operation.
1486
- # Corresponds to the JSON property `clusterName`
1487
- # @return [String]
1488
- attr_accessor :cluster_name
1630
+ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver
1631
+ # and tasks.
1632
+ # Corresponds to the JSON property `jarFileUris`
1633
+ # @return [Array<String>]
1634
+ attr_accessor :jar_file_uris
1489
1635
 
1490
- # [Output-only] Cluster UUID for the operation.
1491
- # Corresponds to the JSON property `clusterUuid`
1492
- # @return [String]
1493
- attr_accessor :cluster_uuid
1636
+ # The runtime logging config of the job.
1637
+ # Corresponds to the JSON property `loggingConfig`
1638
+ # @return [Google::Apis::DataprocV1::LoggingConfig]
1639
+ attr_accessor :logging_config
1494
1640
 
1495
- # The status of the operation.
1496
- # Corresponds to the JSON property `status`
1497
- # @return [Google::Apis::DataprocV1::ClusterOperationStatus]
1498
- attr_accessor :status
1641
+ # Optional A mapping of property names to values, used to configure Spark.
1642
+ # Properties that conflict with values set by the Cloud Dataproc API may be
1643
+ # overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf
1644
+ # and classes in user code.
1645
+ # Corresponds to the JSON property `properties`
1646
+ # @return [Hash<String,String>]
1647
+ attr_accessor :properties
1499
1648
 
1500
- # [Output-only] The previous operation status.
1501
- # Corresponds to the JSON property `statusHistory`
1502
- # @return [Array<Google::Apis::DataprocV1::ClusterOperationStatus>]
1503
- attr_accessor :status_history
1649
+ # Optional The arguments to pass to the driver. Do not include arguments, such
1650
+ # as --conf, that can be set as job properties, since a collision may occur that
1651
+ # causes an incorrect job submission.
1652
+ # Corresponds to the JSON property `args`
1653
+ # @return [Array<String>]
1654
+ attr_accessor :args
1504
1655
 
1505
- # [Output-only] The operation type.
1506
- # Corresponds to the JSON property `operationType`
1507
- # @return [String]
1508
- attr_accessor :operation_type
1656
+ # Optional HCFS URIs of files to be copied to the working directory of Spark
1657
+ # drivers and distributed tasks. Useful for naively parallel tasks.
1658
+ # Corresponds to the JSON property `fileUris`
1659
+ # @return [Array<String>]
1660
+ attr_accessor :file_uris
1509
1661
 
1510
- # [Output-only] Short description of operation.
1511
- # Corresponds to the JSON property `description`
1662
+ # The name of the driver's main class. The jar file that contains the class must
1663
+ # be in the default CLASSPATH or specified in jar_file_uris.
1664
+ # Corresponds to the JSON property `mainClass`
1512
1665
  # @return [String]
1513
- attr_accessor :description
1666
+ attr_accessor :main_class
1514
1667
 
1515
- # [Output-only] labels associated with the operation
1516
- # Corresponds to the JSON property `labels`
1517
- # @return [Hash<String,String>]
1518
- attr_accessor :labels
1668
+ # Optional HCFS URIs of archives to be extracted in the working directory of
1669
+ # Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .
1670
+ # zip.
1671
+ # Corresponds to the JSON property `archiveUris`
1672
+ # @return [Array<String>]
1673
+ attr_accessor :archive_uris
1674
+
1675
+ # The HCFS URI of the jar file that contains the main class.
1676
+ # Corresponds to the JSON property `mainJarFileUri`
1677
+ # @return [String]
1678
+ attr_accessor :main_jar_file_uri
1519
1679
 
1520
1680
  def initialize(**args)
1521
1681
  update!(**args)
@@ -1523,36 +1683,33 @@ module Google
1523
1683
 
1524
1684
  # Update properties of this object
1525
1685
  def update!(**args)
1526
- @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
1527
- @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
1528
- @status = args[:status] if args.key?(:status)
1529
- @status_history = args[:status_history] if args.key?(:status_history)
1530
- @operation_type = args[:operation_type] if args.key?(:operation_type)
1531
- @description = args[:description] if args.key?(:description)
1532
- @labels = args[:labels] if args.key?(:labels)
1686
+ @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
1687
+ @logging_config = args[:logging_config] if args.key?(:logging_config)
1688
+ @properties = args[:properties] if args.key?(:properties)
1689
+ @args = args[:args] if args.key?(:args)
1690
+ @file_uris = args[:file_uris] if args.key?(:file_uris)
1691
+ @main_class = args[:main_class] if args.key?(:main_class)
1692
+ @archive_uris = args[:archive_uris] if args.key?(:archive_uris)
1693
+ @main_jar_file_uri = args[:main_jar_file_uri] if args.key?(:main_jar_file_uri)
1533
1694
  end
1534
1695
  end
1535
1696
 
1536
- # The status of the operation.
1537
- class ClusterOperationStatus
1697
+ # Cloud Dataproc job status.
1698
+ class JobStatus
1538
1699
  include Google::Apis::Core::Hashable
1539
1700
 
1540
- # [Output-only] A message containing the operation state.
1701
+ # Output-only A state message specifying the overall job state.
1541
1702
  # Corresponds to the JSON property `state`
1542
1703
  # @return [String]
1543
1704
  attr_accessor :state
1544
1705
 
1545
- # [Output-only] A message containing the detailed operation state.
1546
- # Corresponds to the JSON property `innerState`
1547
- # @return [String]
1548
- attr_accessor :inner_state
1549
-
1550
- # [Output-only]A message containing any operation metadata details.
1706
+ # Output-only Optional job state details, such as an error description if the
1707
+ # state is <code>ERROR</code>.
1551
1708
  # Corresponds to the JSON property `details`
1552
1709
  # @return [String]
1553
1710
  attr_accessor :details
1554
1711
 
1555
- # [Output-only] The time this state was entered.
1712
+ # Output-only The time when this state was entered.
1556
1713
  # Corresponds to the JSON property `stateStartTime`
1557
1714
  # @return [String]
1558
1715
  attr_accessor :state_start_time
@@ -1564,21 +1721,25 @@ module Google
1564
1721
  # Update properties of this object
1565
1722
  def update!(**args)
1566
1723
  @state = args[:state] if args.key?(:state)
1567
- @inner_state = args[:inner_state] if args.key?(:inner_state)
1568
1724
  @details = args[:details] if args.key?(:details)
1569
1725
  @state_start_time = args[:state_start_time] if args.key?(:state_start_time)
1570
1726
  end
1571
1727
  end
1572
1728
 
1573
- # The location where output from diagnostic command can be found.
1574
- class DiagnoseClusterOutputLocation
1729
+ # Specifies the resources used to actively manage an instance group.
1730
+ class ManagedGroupConfig
1575
1731
  include Google::Apis::Core::Hashable
1576
1732
 
1577
- # [Output-only] The Google Cloud Storage URI of the diagnostic output. This will
1578
- # be a plain text file with summary of collected diagnostics.
1579
- # Corresponds to the JSON property `outputUri`
1733
+ # Output-only The name of the Instance Group Manager for this group.
1734
+ # Corresponds to the JSON property `instanceGroupManagerName`
1580
1735
  # @return [String]
1581
- attr_accessor :output_uri
1736
+ attr_accessor :instance_group_manager_name
1737
+
1738
+ # Output-only The name of the Instance Template used for the Managed Instance
1739
+ # Group.
1740
+ # Corresponds to the JSON property `instanceTemplateName`
1741
+ # @return [String]
1742
+ attr_accessor :instance_template_name
1582
1743
 
1583
1744
  def initialize(**args)
1584
1745
  update!(**args)
@@ -1586,115 +1747,31 @@ module Google
1586
1747
 
1587
1748
  # Update properties of this object
1588
1749
  def update!(**args)
1589
- @output_uri = args[:output_uri] if args.key?(:output_uri)
1750
+ @instance_group_manager_name = args[:instance_group_manager_name] if args.key?(:instance_group_manager_name)
1751
+ @instance_template_name = args[:instance_template_name] if args.key?(:instance_template_name)
1590
1752
  end
1591
1753
  end
1592
1754
 
1593
- # Metadata describing the operation.
1594
- class OperationMetadata
1755
+ # The status of the operation.
1756
+ class ClusterOperationStatus
1595
1757
  include Google::Apis::Core::Hashable
1596
1758
 
1597
- # A message containing the operation state.
1759
+ # Output-only A message containing the operation state.
1598
1760
  # Corresponds to the JSON property `state`
1599
1761
  # @return [String]
1600
1762
  attr_accessor :state
1601
1763
 
1602
- # A message containing the detailed operation state.
1603
- # Corresponds to the JSON property `innerState`
1604
- # @return [String]
1605
- attr_accessor :inner_state
1606
-
1607
- # A message containing any operation metadata details.
1764
+ # Output-onlyA message containing any operation metadata details.
1608
1765
  # Corresponds to the JSON property `details`
1609
1766
  # @return [String]
1610
1767
  attr_accessor :details
1611
1768
 
1612
- # The time that the operation was requested.
1613
- # Corresponds to the JSON property `insertTime`
1614
- # @return [String]
1615
- attr_accessor :insert_time
1616
-
1617
- # The time that the operation was started by the server.
1618
- # Corresponds to the JSON property `startTime`
1619
- # @return [String]
1620
- attr_accessor :start_time
1621
-
1622
- # The time that the operation completed.
1623
- # Corresponds to the JSON property `endTime`
1624
- # @return [String]
1625
- attr_accessor :end_time
1626
-
1627
- # Name of the cluster for the operation.
1628
- # Corresponds to the JSON property `clusterName`
1629
- # @return [String]
1630
- attr_accessor :cluster_name
1631
-
1632
- # Cluster UUId for the operation.
1633
- # Corresponds to the JSON property `clusterUuid`
1634
- # @return [String]
1635
- attr_accessor :cluster_uuid
1636
-
1637
- # The status of the operation.
1638
- # Corresponds to the JSON property `status`
1639
- # @return [Google::Apis::DataprocV1::OperationStatus]
1640
- attr_accessor :status
1641
-
1642
- # [Output-only] Previous operation status.
1643
- # Corresponds to the JSON property `statusHistory`
1644
- # @return [Array<Google::Apis::DataprocV1::OperationStatus>]
1645
- attr_accessor :status_history
1646
-
1647
- # [Output-only] The operation type.
1648
- # Corresponds to the JSON property `operationType`
1649
- # @return [String]
1650
- attr_accessor :operation_type
1651
-
1652
- # [Output-only] Short description of operation.
1653
- # Corresponds to the JSON property `description`
1654
- # @return [String]
1655
- attr_accessor :description
1656
-
1657
- def initialize(**args)
1658
- update!(**args)
1659
- end
1660
-
1661
- # Update properties of this object
1662
- def update!(**args)
1663
- @state = args[:state] if args.key?(:state)
1664
- @inner_state = args[:inner_state] if args.key?(:inner_state)
1665
- @details = args[:details] if args.key?(:details)
1666
- @insert_time = args[:insert_time] if args.key?(:insert_time)
1667
- @start_time = args[:start_time] if args.key?(:start_time)
1668
- @end_time = args[:end_time] if args.key?(:end_time)
1669
- @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
1670
- @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
1671
- @status = args[:status] if args.key?(:status)
1672
- @status_history = args[:status_history] if args.key?(:status_history)
1673
- @operation_type = args[:operation_type] if args.key?(:operation_type)
1674
- @description = args[:description] if args.key?(:description)
1675
- end
1676
- end
1677
-
1678
- # The status of the operation.
1679
- class OperationStatus
1680
- include Google::Apis::Core::Hashable
1681
-
1682
- # A message containing the operation state.
1683
- # Corresponds to the JSON property `state`
1684
- # @return [String]
1685
- attr_accessor :state
1686
-
1687
- # A message containing the detailed operation state.
1769
+ # Output-only A message containing the detailed operation state.
1688
1770
  # Corresponds to the JSON property `innerState`
1689
1771
  # @return [String]
1690
1772
  attr_accessor :inner_state
1691
1773
 
1692
- # A message containing any operation metadata details.
1693
- # Corresponds to the JSON property `details`
1694
- # @return [String]
1695
- attr_accessor :details
1696
-
1697
- # The time this state was entered.
1774
+ # Output-only The time this state was entered.
1698
1775
  # Corresponds to the JSON property `stateStartTime`
1699
1776
  # @return [String]
1700
1777
  attr_accessor :state_start_time
@@ -1706,8 +1783,8 @@ module Google
1706
1783
  # Update properties of this object
1707
1784
  def update!(**args)
1708
1785
  @state = args[:state] if args.key?(:state)
1709
- @inner_state = args[:inner_state] if args.key?(:inner_state)
1710
1786
  @details = args[:details] if args.key?(:details)
1787
+ @inner_state = args[:inner_state] if args.key?(:inner_state)
1711
1788
  @state_start_time = args[:state_start_time] if args.key?(:state_start_time)
1712
1789
  end
1713
1790
  end