google-api-client 0.10.1 → 0.10.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (176) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +3 -0
  3. data/api_names.yaml +395 -0
  4. data/generated/google/apis/adexchangebuyer2_v2beta1.rb +1 -1
  5. data/generated/google/apis/adexchangebuyer2_v2beta1/classes.rb +254 -254
  6. data/generated/google/apis/adexchangebuyer2_v2beta1/representations.rb +65 -65
  7. data/generated/google/apis/adexchangebuyer2_v2beta1/service.rb +220 -220
  8. data/generated/google/apis/admin_directory_v1.rb +1 -1
  9. data/generated/google/apis/admin_directory_v1/classes.rb +106 -0
  10. data/generated/google/apis/admin_directory_v1/representations.rb +37 -0
  11. data/generated/google/apis/admin_reports_v1.rb +3 -3
  12. data/generated/google/apis/admin_reports_v1/service.rb +6 -6
  13. data/generated/google/apis/adsense_v1_4.rb +1 -1
  14. data/generated/google/apis/adsensehost_v4_1.rb +1 -1
  15. data/generated/google/apis/analytics_v3.rb +1 -1
  16. data/generated/google/apis/analytics_v3/service.rb +39 -0
  17. data/generated/google/apis/analyticsreporting_v4/classes.rb +920 -920
  18. data/generated/google/apis/analyticsreporting_v4/representations.rb +197 -197
  19. data/generated/google/apis/analyticsreporting_v4/service.rb +4 -4
  20. data/generated/google/apis/androidenterprise_v1.rb +1 -1
  21. data/generated/google/apis/androidenterprise_v1/classes.rb +8 -13
  22. data/generated/google/apis/androidenterprise_v1/service.rb +3 -3
  23. data/generated/google/apis/appengine_v1beta5.rb +1 -1
  24. data/generated/google/apis/appengine_v1beta5/classes.rb +115 -5
  25. data/generated/google/apis/appengine_v1beta5/representations.rb +37 -0
  26. data/generated/google/apis/appengine_v1beta5/service.rb +12 -9
  27. data/generated/google/apis/appstate_v1.rb +1 -1
  28. data/generated/google/apis/bigquery_v2.rb +1 -1
  29. data/generated/google/apis/bigquery_v2/classes.rb +32 -37
  30. data/generated/google/apis/bigquery_v2/service.rb +10 -2
  31. data/generated/google/apis/calendar_v3.rb +1 -1
  32. data/generated/google/apis/calendar_v3/classes.rb +205 -0
  33. data/generated/google/apis/calendar_v3/representations.rb +97 -0
  34. data/generated/google/apis/classroom_v1.rb +22 -25
  35. data/generated/google/apis/classroom_v1/classes.rb +998 -907
  36. data/generated/google/apis/classroom_v1/representations.rb +240 -240
  37. data/generated/google/apis/classroom_v1/service.rb +1269 -1061
  38. data/generated/google/apis/cloudbuild_v1.rb +1 -1
  39. data/generated/google/apis/cloudbuild_v1/classes.rb +164 -163
  40. data/generated/google/apis/cloudbuild_v1/representations.rb +31 -31
  41. data/generated/google/apis/cloudbuild_v1/service.rb +114 -114
  42. data/generated/google/apis/clouddebugger_v2.rb +1 -1
  43. data/generated/google/apis/clouddebugger_v2/classes.rb +687 -687
  44. data/generated/google/apis/clouddebugger_v2/representations.rb +147 -147
  45. data/generated/google/apis/clouddebugger_v2/service.rb +132 -132
  46. data/generated/google/apis/cloudkms_v1.rb +1 -1
  47. data/generated/google/apis/cloudkms_v1/classes.rb +231 -248
  48. data/generated/google/apis/cloudkms_v1/representations.rb +74 -74
  49. data/generated/google/apis/cloudkms_v1/service.rb +228 -228
  50. data/generated/google/apis/cloudmonitoring_v2beta2.rb +1 -1
  51. data/generated/google/apis/cloudresourcemanager_v1.rb +1 -1
  52. data/generated/google/apis/cloudresourcemanager_v1/classes.rb +738 -128
  53. data/generated/google/apis/cloudresourcemanager_v1/representations.rb +245 -23
  54. data/generated/google/apis/cloudresourcemanager_v1/service.rb +1293 -249
  55. data/generated/google/apis/cloudresourcemanager_v1beta1.rb +4 -4
  56. data/generated/google/apis/cloudresourcemanager_v1beta1/classes.rb +982 -372
  57. data/generated/google/apis/cloudresourcemanager_v1beta1/representations.rb +293 -71
  58. data/generated/google/apis/cloudresourcemanager_v1beta1/service.rb +626 -277
  59. data/generated/google/apis/cloudtrace_v1.rb +1 -1
  60. data/generated/google/apis/cloudtrace_v1/classes.rb +19 -19
  61. data/generated/google/apis/cloudtrace_v1/representations.rb +2 -2
  62. data/generated/google/apis/cloudtrace_v1/service.rb +30 -30
  63. data/generated/google/apis/compute_beta.rb +1 -1
  64. data/generated/google/apis/compute_beta/classes.rb +116 -0
  65. data/generated/google/apis/compute_beta/representations.rb +48 -0
  66. data/generated/google/apis/compute_beta/service.rb +46 -1
  67. data/generated/google/apis/compute_v1.rb +1 -1
  68. data/generated/google/apis/dataflow_v1b3.rb +1 -1
  69. data/generated/google/apis/dataflow_v1b3/classes.rb +3276 -3320
  70. data/generated/google/apis/dataflow_v1b3/representations.rb +779 -781
  71. data/generated/google/apis/dataflow_v1b3/service.rb +225 -225
  72. data/generated/google/apis/dataproc_v1.rb +1 -1
  73. data/generated/google/apis/dataproc_v1/classes.rb +1221 -1207
  74. data/generated/google/apis/dataproc_v1/representations.rb +255 -253
  75. data/generated/google/apis/dataproc_v1/service.rb +100 -100
  76. data/generated/google/apis/deploymentmanager_v2.rb +1 -1
  77. data/generated/google/apis/deploymentmanager_v2/classes.rb +5 -5
  78. data/generated/google/apis/dns_v1.rb +1 -1
  79. data/generated/google/apis/dns_v2beta1.rb +1 -1
  80. data/generated/google/apis/doubleclicksearch_v2.rb +1 -1
  81. data/generated/google/apis/drive_v2.rb +1 -1
  82. data/generated/google/apis/drive_v2/classes.rb +3 -1
  83. data/generated/google/apis/drive_v3.rb +1 -1
  84. data/generated/google/apis/drive_v3/classes.rb +3 -1
  85. data/generated/google/apis/fusiontables_v2.rb +1 -1
  86. data/generated/google/apis/games_configuration_v1configuration.rb +1 -1
  87. data/generated/google/apis/games_management_v1management.rb +1 -1
  88. data/generated/google/apis/games_v1.rb +1 -1
  89. data/generated/google/apis/genomics_v1.rb +7 -7
  90. data/generated/google/apis/genomics_v1/classes.rb +959 -959
  91. data/generated/google/apis/genomics_v1/representations.rb +238 -238
  92. data/generated/google/apis/genomics_v1/service.rb +996 -996
  93. data/generated/google/apis/iam_v1.rb +1 -1
  94. data/generated/google/apis/iam_v1/classes.rb +440 -440
  95. data/generated/google/apis/iam_v1/representations.rb +94 -94
  96. data/generated/google/apis/iam_v1/service.rb +170 -173
  97. data/generated/google/apis/identitytoolkit_v3.rb +1 -1
  98. data/generated/google/apis/identitytoolkit_v3/classes.rb +55 -0
  99. data/generated/google/apis/identitytoolkit_v3/representations.rb +8 -0
  100. data/generated/google/apis/kgsearch_v1/service.rb +4 -4
  101. data/generated/google/apis/language_v1beta1.rb +1 -1
  102. data/generated/google/apis/language_v1beta1/classes.rb +427 -427
  103. data/generated/google/apis/language_v1beta1/representations.rb +113 -113
  104. data/generated/google/apis/language_v1beta1/service.rb +25 -24
  105. data/generated/google/apis/licensing_v1.rb +2 -2
  106. data/generated/google/apis/licensing_v1/classes.rb +14 -2
  107. data/generated/google/apis/licensing_v1/representations.rb +2 -0
  108. data/generated/google/apis/licensing_v1/service.rb +1 -1
  109. data/generated/google/apis/logging_v2beta1.rb +1 -1
  110. data/generated/google/apis/logging_v2beta1/classes.rb +864 -864
  111. data/generated/google/apis/logging_v2beta1/representations.rb +168 -168
  112. data/generated/google/apis/logging_v2beta1/service.rb +261 -261
  113. data/generated/google/apis/manufacturers_v1.rb +1 -1
  114. data/generated/google/apis/manufacturers_v1/classes.rb +452 -105
  115. data/generated/google/apis/manufacturers_v1/representations.rb +138 -18
  116. data/generated/google/apis/manufacturers_v1/service.rb +11 -11
  117. data/generated/google/apis/mirror_v1.rb +1 -1
  118. data/generated/google/apis/monitoring_v3.rb +7 -7
  119. data/generated/google/apis/monitoring_v3/classes.rb +668 -670
  120. data/generated/google/apis/monitoring_v3/representations.rb +140 -140
  121. data/generated/google/apis/monitoring_v3/service.rb +208 -208
  122. data/generated/google/apis/partners_v2.rb +1 -1
  123. data/generated/google/apis/partners_v2/classes.rb +505 -505
  124. data/generated/google/apis/partners_v2/representations.rb +118 -118
  125. data/generated/google/apis/partners_v2/service.rb +275 -275
  126. data/generated/google/apis/people_v1.rb +1 -1
  127. data/generated/google/apis/people_v1/classes.rb +1037 -1031
  128. data/generated/google/apis/people_v1/representations.rb +247 -246
  129. data/generated/google/apis/people_v1/service.rb +20 -20
  130. data/generated/google/apis/plus_domains_v1.rb +1 -1
  131. data/generated/google/apis/plus_v1.rb +1 -1
  132. data/generated/google/apis/proximitybeacon_v1beta1.rb +1 -1
  133. data/generated/google/apis/proximitybeacon_v1beta1/classes.rb +392 -392
  134. data/generated/google/apis/proximitybeacon_v1beta1/representations.rb +93 -93
  135. data/generated/google/apis/proximitybeacon_v1beta1/service.rb +381 -381
  136. data/generated/google/apis/pubsub_v1.rb +4 -4
  137. data/generated/google/apis/pubsub_v1/classes.rb +131 -132
  138. data/generated/google/apis/pubsub_v1/representations.rb +35 -35
  139. data/generated/google/apis/pubsub_v1/service.rb +399 -408
  140. data/generated/google/apis/reseller_v1.rb +1 -1
  141. data/generated/google/apis/reseller_v1/classes.rb +9 -0
  142. data/generated/google/apis/reseller_v1/representations.rb +1 -0
  143. data/generated/google/apis/script_v1.rb +9 -9
  144. data/generated/google/apis/script_v1/classes.rb +110 -110
  145. data/generated/google/apis/script_v1/representations.rb +26 -26
  146. data/generated/google/apis/sheets_v4.rb +4 -4
  147. data/generated/google/apis/sheets_v4/classes.rb +4329 -4329
  148. data/generated/google/apis/sheets_v4/representations.rb +856 -856
  149. data/generated/google/apis/sheets_v4/service.rb +106 -106
  150. data/generated/google/apis/slides_v1.rb +4 -4
  151. data/generated/google/apis/slides_v1/classes.rb +2923 -2841
  152. data/generated/google/apis/slides_v1/representations.rb +722 -691
  153. data/generated/google/apis/slides_v1/service.rb +58 -15
  154. data/generated/google/apis/speech_v1beta1.rb +1 -1
  155. data/generated/google/apis/speech_v1beta1/classes.rb +191 -191
  156. data/generated/google/apis/speech_v1beta1/representations.rb +57 -57
  157. data/generated/google/apis/speech_v1beta1/service.rb +70 -70
  158. data/generated/google/apis/storage_v1.rb +1 -1
  159. data/generated/google/apis/storage_v1/classes.rb +151 -0
  160. data/generated/google/apis/storage_v1/representations.rb +45 -0
  161. data/generated/google/apis/storage_v1/service.rb +248 -0
  162. data/generated/google/apis/vision_v1.rb +1 -1
  163. data/generated/google/apis/vision_v1/classes.rb +1227 -1221
  164. data/generated/google/apis/vision_v1/representations.rb +217 -215
  165. data/generated/google/apis/webmasters_v3.rb +1 -1
  166. data/generated/google/apis/youtube_analytics_v1.rb +1 -1
  167. data/generated/google/apis/youtube_partner_v1.rb +1 -1
  168. data/generated/google/apis/youtube_partner_v1/classes.rb +78 -0
  169. data/generated/google/apis/youtube_partner_v1/representations.rb +34 -0
  170. data/generated/google/apis/youtube_partner_v1/service.rb +40 -0
  171. data/generated/google/apis/youtubereporting_v1.rb +4 -4
  172. data/generated/google/apis/youtubereporting_v1/classes.rb +65 -65
  173. data/generated/google/apis/youtubereporting_v1/representations.rb +18 -18
  174. data/generated/google/apis/youtubereporting_v1/service.rb +111 -111
  175. data/lib/google/apis/version.rb +1 -1
  176. metadata +2 -2
@@ -25,7 +25,7 @@ module Google
25
25
  # @see https://cloud.google.com/dataproc/
26
26
  module DataprocV1
27
27
  VERSION = 'V1'
28
- REVISION = '20170228'
28
+ REVISION = '20170321'
29
29
 
30
30
  # View and manage your data across Google Cloud Platform services
31
31
  AUTH_CLOUD_PLATFORM = 'https://www.googleapis.com/auth/cloud-platform'
@@ -22,21 +22,74 @@ module Google
22
22
  module Apis
23
23
  module DataprocV1
24
24
 
25
- # Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature:
26
- # This report is available for testing purposes only. It may be changed before
27
- # final release.
28
- class ClusterMetrics
25
+ # Metadata describing the operation.
26
+ class OperationMetadata
29
27
  include Google::Apis::Core::Hashable
30
28
 
31
- # The YARN metrics.
32
- # Corresponds to the JSON property `yarnMetrics`
33
- # @return [Hash<String,String>]
34
- attr_accessor :yarn_metrics
29
+ # Output-only Short description of operation.
30
+ # Corresponds to the JSON property `description`
31
+ # @return [String]
32
+ attr_accessor :description
35
33
 
36
- # The HDFS metrics.
37
- # Corresponds to the JSON property `hdfsMetrics`
38
- # @return [Hash<String,String>]
39
- attr_accessor :hdfs_metrics
34
+ # The status of the operation.
35
+ # Corresponds to the JSON property `status`
36
+ # @return [Google::Apis::DataprocV1::OperationStatus]
37
+ attr_accessor :status
38
+
39
+ # A message containing any operation metadata details.
40
+ # Corresponds to the JSON property `details`
41
+ # @return [String]
42
+ attr_accessor :details
43
+
44
+ # A message containing the operation state.
45
+ # Corresponds to the JSON property `state`
46
+ # @return [String]
47
+ attr_accessor :state
48
+
49
+ # Name of the cluster for the operation.
50
+ # Corresponds to the JSON property `clusterName`
51
+ # @return [String]
52
+ attr_accessor :cluster_name
53
+
54
+ # Cluster UUId for the operation.
55
+ # Corresponds to the JSON property `clusterUuid`
56
+ # @return [String]
57
+ attr_accessor :cluster_uuid
58
+
59
+ # A message containing the detailed operation state.
60
+ # Corresponds to the JSON property `innerState`
61
+ # @return [String]
62
+ attr_accessor :inner_state
63
+
64
+ # The time that the operation completed.
65
+ # Corresponds to the JSON property `endTime`
66
+ # @return [String]
67
+ attr_accessor :end_time
68
+
69
+ # The time that the operation was started by the server.
70
+ # Corresponds to the JSON property `startTime`
71
+ # @return [String]
72
+ attr_accessor :start_time
73
+
74
+ # Output-only Errors encountered during operation execution.
75
+ # Corresponds to the JSON property `warnings`
76
+ # @return [Array<String>]
77
+ attr_accessor :warnings
78
+
79
+ # The time that the operation was requested.
80
+ # Corresponds to the JSON property `insertTime`
81
+ # @return [String]
82
+ attr_accessor :insert_time
83
+
84
+ # Output-only Previous operation status.
85
+ # Corresponds to the JSON property `statusHistory`
86
+ # @return [Array<Google::Apis::DataprocV1::OperationStatus>]
87
+ attr_accessor :status_history
88
+
89
+ # Output-only The operation type.
90
+ # Corresponds to the JSON property `operationType`
91
+ # @return [String]
92
+ attr_accessor :operation_type
40
93
 
41
94
  def initialize(**args)
42
95
  update!(**args)
@@ -44,27 +97,46 @@ module Google
44
97
 
45
98
  # Update properties of this object
46
99
  def update!(**args)
47
- @yarn_metrics = args[:yarn_metrics] if args.key?(:yarn_metrics)
48
- @hdfs_metrics = args[:hdfs_metrics] if args.key?(:hdfs_metrics)
100
+ @description = args[:description] if args.key?(:description)
101
+ @status = args[:status] if args.key?(:status)
102
+ @details = args[:details] if args.key?(:details)
103
+ @state = args[:state] if args.key?(:state)
104
+ @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
105
+ @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
106
+ @inner_state = args[:inner_state] if args.key?(:inner_state)
107
+ @end_time = args[:end_time] if args.key?(:end_time)
108
+ @start_time = args[:start_time] if args.key?(:start_time)
109
+ @warnings = args[:warnings] if args.key?(:warnings)
110
+ @insert_time = args[:insert_time] if args.key?(:insert_time)
111
+ @status_history = args[:status_history] if args.key?(:status_history)
112
+ @operation_type = args[:operation_type] if args.key?(:operation_type)
49
113
  end
50
114
  end
51
115
 
52
- # Specifies the type and number of accelerator cards attached to the instances
53
- # of an instance group (see GPUs on Compute Engine).
54
- class AcceleratorConfig
116
+ # Specifies the selection and config of software inside the cluster.
117
+ class SoftwareConfig
55
118
  include Google::Apis::Core::Hashable
56
119
 
57
- # Full or partial URI of the accelerator type resource to expose to this
58
- # instance. See Google Compute Engine AcceleratorTypes( /compute/docs/reference/
59
- # beta/acceleratorTypes)
60
- # Corresponds to the JSON property `acceleratorTypeUri`
120
+ # Optional The version of software inside the cluster. It must match the regular
121
+ # expression [0-9]+\.[0-9]+. If unspecified, it defaults to the latest version (
122
+ # see Cloud Dataproc Versioning).
123
+ # Corresponds to the JSON property `imageVersion`
61
124
  # @return [String]
62
- attr_accessor :accelerator_type_uri
125
+ attr_accessor :image_version
63
126
 
64
- # The number of the accelerator cards of this type exposed to this instance.
65
- # Corresponds to the JSON property `acceleratorCount`
66
- # @return [Fixnum]
67
- attr_accessor :accelerator_count
127
+ # Optional The properties to set on daemon config files.Property keys are
128
+ # specified in prefix:property format, such as core:fs.defaultFS. The following
129
+ # are supported prefixes and their mappings:
130
+ # core: core-site.xml
131
+ # hdfs: hdfs-site.xml
132
+ # mapred: mapred-site.xml
133
+ # yarn: yarn-site.xml
134
+ # hive: hive-site.xml
135
+ # pig: pig.properties
136
+ # spark: spark-defaults.conf
137
+ # Corresponds to the JSON property `properties`
138
+ # @return [Hash<String,String>]
139
+ attr_accessor :properties
68
140
 
69
141
  def initialize(**args)
70
142
  update!(**args)
@@ -72,21 +144,25 @@ module Google
72
144
 
73
145
  # Update properties of this object
74
146
  def update!(**args)
75
- @accelerator_type_uri = args[:accelerator_type_uri] if args.key?(:accelerator_type_uri)
76
- @accelerator_count = args[:accelerator_count] if args.key?(:accelerator_count)
147
+ @image_version = args[:image_version] if args.key?(:image_version)
148
+ @properties = args[:properties] if args.key?(:properties)
77
149
  end
78
150
  end
79
151
 
80
- # The runtime logging config of the job.
81
- class LoggingConfig
152
+ # Cloud Dataproc job config.
153
+ class JobPlacement
82
154
  include Google::Apis::Core::Hashable
83
155
 
84
- # The per-package log levels for the driver. This may include "root" package
85
- # name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', '
86
- # org.apache = DEBUG'
87
- # Corresponds to the JSON property `driverLogLevels`
88
- # @return [Hash<String,String>]
89
- attr_accessor :driver_log_levels
156
+ # Required The name of the cluster where the job will be submitted.
157
+ # Corresponds to the JSON property `clusterName`
158
+ # @return [String]
159
+ attr_accessor :cluster_name
160
+
161
+ # Output-only A cluster UUID generated by the Cloud Dataproc service when the
162
+ # job is submitted.
163
+ # Corresponds to the JSON property `clusterUuid`
164
+ # @return [String]
165
+ attr_accessor :cluster_uuid
90
166
 
91
167
  def initialize(**args)
92
168
  update!(**args)
@@ -94,19 +170,58 @@ module Google
94
170
 
95
171
  # Update properties of this object
96
172
  def update!(**args)
97
- @driver_log_levels = args[:driver_log_levels] if args.key?(:driver_log_levels)
173
+ @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
174
+ @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
98
175
  end
99
176
  end
100
177
 
101
- # The location where output from diagnostic command can be found.
102
- class DiagnoseClusterOutputLocation
178
+ # A Cloud Dataproc job for running Apache Pig (https://pig.apache.org/) queries
179
+ # on YARN.
180
+ class PigJob
103
181
  include Google::Apis::Core::Hashable
104
182
 
105
- # Output-only The Google Cloud Storage URI of the diagnostic output. This will
106
- # be a plain text file with summary of collected diagnostics.
107
- # Corresponds to the JSON property `outputUri`
183
+ # Optional Whether to continue executing queries if a query fails. The default
184
+ # value is false. Setting to true can be useful when executing independent
185
+ # parallel queries.
186
+ # Corresponds to the JSON property `continueOnFailure`
187
+ # @return [Boolean]
188
+ attr_accessor :continue_on_failure
189
+ alias_method :continue_on_failure?, :continue_on_failure
190
+
191
+ # The HCFS URI of the script that contains the Pig queries.
192
+ # Corresponds to the JSON property `queryFileUri`
108
193
  # @return [String]
109
- attr_accessor :output_uri
194
+ attr_accessor :query_file_uri
195
+
196
+ # A list of queries to run on a cluster.
197
+ # Corresponds to the JSON property `queryList`
198
+ # @return [Google::Apis::DataprocV1::QueryList]
199
+ attr_accessor :query_list
200
+
201
+ # Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and
202
+ # Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
203
+ # Corresponds to the JSON property `jarFileUris`
204
+ # @return [Array<String>]
205
+ attr_accessor :jar_file_uris
206
+
207
+ # Optional Mapping of query variable names to values (equivalent to the Pig
208
+ # command: name=[value]).
209
+ # Corresponds to the JSON property `scriptVariables`
210
+ # @return [Hash<String,String>]
211
+ attr_accessor :script_variables
212
+
213
+ # The runtime logging config of the job.
214
+ # Corresponds to the JSON property `loggingConfig`
215
+ # @return [Google::Apis::DataprocV1::LoggingConfig]
216
+ attr_accessor :logging_config
217
+
218
+ # Optional A mapping of property names to values, used to configure Pig.
219
+ # Properties that conflict with values set by the Cloud Dataproc API may be
220
+ # overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/
221
+ # pig/conf/pig.properties, and classes in user code.
222
+ # Corresponds to the JSON property `properties`
223
+ # @return [Hash<String,String>]
224
+ attr_accessor :properties
110
225
 
111
226
  def initialize(**args)
112
227
  update!(**args)
@@ -114,85 +229,40 @@ module Google
114
229
 
115
230
  # Update properties of this object
116
231
  def update!(**args)
117
- @output_uri = args[:output_uri] if args.key?(:output_uri)
232
+ @continue_on_failure = args[:continue_on_failure] if args.key?(:continue_on_failure)
233
+ @query_file_uri = args[:query_file_uri] if args.key?(:query_file_uri)
234
+ @query_list = args[:query_list] if args.key?(:query_list)
235
+ @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
236
+ @script_variables = args[:script_variables] if args.key?(:script_variables)
237
+ @logging_config = args[:logging_config] if args.key?(:logging_config)
238
+ @properties = args[:properties] if args.key?(:properties)
118
239
  end
119
240
  end
120
241
 
121
- # This resource represents a long-running operation that is the result of a
122
- # network API call.
123
- class Operation
242
+ # The status of a cluster and its instances.
243
+ class ClusterStatus
124
244
  include Google::Apis::Core::Hashable
125
245
 
126
- # If the value is false, it means the operation is still in progress. If true,
127
- # the operation is completed, and either error or response is available.
128
- # Corresponds to the JSON property `done`
129
- # @return [Boolean]
130
- attr_accessor :done
131
- alias_method :done?, :done
246
+ # Output-only Optional details of cluster's state.
247
+ # Corresponds to the JSON property `detail`
248
+ # @return [String]
249
+ attr_accessor :detail
132
250
 
133
- # The normal response of the operation in case of success. If the original
134
- # method returns no data on success, such as Delete, the response is google.
135
- # protobuf.Empty. If the original method is standard Get/Create/Update, the
136
- # response should be the resource. For other methods, the response should have
137
- # the type XxxResponse, where Xxx is the original method name. For example, if
138
- # the original method name is TakeSnapshot(), the inferred response type is
139
- # TakeSnapshotResponse.
140
- # Corresponds to the JSON property `response`
141
- # @return [Hash<String,Object>]
142
- attr_accessor :response
251
+ # Output-only The cluster's state.
252
+ # Corresponds to the JSON property `state`
253
+ # @return [String]
254
+ attr_accessor :state
143
255
 
144
- # The server-assigned name, which is only unique within the same service that
145
- # originally returns it. If you use the default HTTP mapping, the name should
146
- # have the format of operations/some/unique/name.
147
- # Corresponds to the JSON property `name`
256
+ # Output-only Time when this state was entered.
257
+ # Corresponds to the JSON property `stateStartTime`
148
258
  # @return [String]
149
- attr_accessor :name
259
+ attr_accessor :state_start_time
150
260
 
151
- # The Status type defines a logical error model that is suitable for different
152
- # programming environments, including REST APIs and RPC APIs. It is used by gRPC
153
- # (https://github.com/grpc). The error model is designed to be:
154
- # Simple to use and understand for most users
155
- # Flexible enough to meet unexpected needsOverviewThe Status message contains
156
- # three pieces of data: error code, error message, and error details. The error
157
- # code should be an enum value of google.rpc.Code, but it may accept additional
158
- # error codes if needed. The error message should be a developer-facing English
159
- # message that helps developers understand and resolve the error. If a localized
160
- # user-facing error message is needed, put the localized message in the error
161
- # details or localize it in the client. The optional error details may contain
162
- # arbitrary information about the error. There is a predefined set of error
163
- # detail types in the package google.rpc which can be used for common error
164
- # conditions.Language mappingThe Status message is the logical representation of
165
- # the error model, but it is not necessarily the actual wire format. When the
166
- # Status message is exposed in different client libraries and different wire
167
- # protocols, it can be mapped differently. For example, it will likely be mapped
168
- # to some exceptions in Java, but more likely mapped to some error codes in C.
169
- # Other usesThe error model and the Status message can be used in a variety of
170
- # environments, either with or without APIs, to provide a consistent developer
171
- # experience across different environments.Example uses of this error model
172
- # include:
173
- # Partial errors. If a service needs to return partial errors to the client, it
174
- # may embed the Status in the normal response to indicate the partial errors.
175
- # Workflow errors. A typical workflow has multiple steps. Each step may have a
176
- # Status message for error reporting purpose.
177
- # Batch operations. If a client uses batch request and batch response, the
178
- # Status message should be used directly inside batch response, one for each
179
- # error sub-response.
180
- # Asynchronous operations. If an API call embeds asynchronous operation results
181
- # in its response, the status of those operations should be represented directly
182
- # using the Status message.
183
- # Logging. If some API errors are stored in logs, the message Status could be
184
- # used directly after any stripping needed for security/privacy reasons.
185
- # Corresponds to the JSON property `error`
186
- # @return [Google::Apis::DataprocV1::Status]
187
- attr_accessor :error
188
-
189
- # Service-specific metadata associated with the operation. It typically contains
190
- # progress information and common metadata such as create time. Some services
191
- # might not provide such metadata. Any method that returns a long-running
192
- # operation should document the metadata type, if any.
193
- # Corresponds to the JSON property `metadata`
194
- # @return [Hash<String,Object>]
195
- attr_accessor :metadata
261
+ # Output-only Additional state information that includes status reported by the
262
+ # agent.
263
+ # Corresponds to the JSON property `substate`
264
+ # @return [String]
265
+ attr_accessor :substate
196
266
 
197
267
  def initialize(**args)
198
268
  update!(**args)
@@ -200,37 +270,28 @@ module Google
200
270
 
201
271
  # Update properties of this object
202
272
  def update!(**args)
203
- @done = args[:done] if args.key?(:done)
204
- @response = args[:response] if args.key?(:response)
205
- @name = args[:name] if args.key?(:name)
206
- @error = args[:error] if args.key?(:error)
207
- @metadata = args[:metadata] if args.key?(:metadata)
273
+ @detail = args[:detail] if args.key?(:detail)
274
+ @state = args[:state] if args.key?(:state)
275
+ @state_start_time = args[:state_start_time] if args.key?(:state_start_time)
276
+ @substate = args[:substate] if args.key?(:substate)
208
277
  end
209
278
  end
210
279
 
211
- # The status of the operation.
212
- class OperationStatus
280
+ # The list of all clusters in a project.
281
+ class ListClustersResponse
213
282
  include Google::Apis::Core::Hashable
214
283
 
215
- # A message containing the detailed operation state.
216
- # Corresponds to the JSON property `innerState`
217
- # @return [String]
218
- attr_accessor :inner_state
219
-
220
- # The time this state was entered.
221
- # Corresponds to the JSON property `stateStartTime`
222
- # @return [String]
223
- attr_accessor :state_start_time
224
-
225
- # A message containing the operation state.
226
- # Corresponds to the JSON property `state`
227
- # @return [String]
228
- attr_accessor :state
284
+ # Output-only The clusters in the project.
285
+ # Corresponds to the JSON property `clusters`
286
+ # @return [Array<Google::Apis::DataprocV1::Cluster>]
287
+ attr_accessor :clusters
229
288
 
230
- # A message containing any operation metadata details.
231
- # Corresponds to the JSON property `details`
289
+ # Output-only This token is included in the response if there are more results
290
+ # to fetch. To fetch additional results, provide this value as the page_token in
291
+ # a subsequent <code>ListClustersRequest</code>.
292
+ # Corresponds to the JSON property `nextPageToken`
232
293
  # @return [String]
233
- attr_accessor :details
294
+ attr_accessor :next_page_token
234
295
 
235
296
  def initialize(**args)
236
297
  update!(**args)
@@ -238,115 +299,107 @@ module Google
238
299
 
239
300
  # Update properties of this object
240
301
  def update!(**args)
241
- @inner_state = args[:inner_state] if args.key?(:inner_state)
242
- @state_start_time = args[:state_start_time] if args.key?(:state_start_time)
243
- @state = args[:state] if args.key?(:state)
244
- @details = args[:details] if args.key?(:details)
302
+ @clusters = args[:clusters] if args.key?(:clusters)
303
+ @next_page_token = args[:next_page_token] if args.key?(:next_page_token)
245
304
  end
246
305
  end
247
306
 
248
- # Encapsulates the full scoping used to reference a job.
249
- class JobReference
307
+ # A Cloud Dataproc job resource.
308
+ class Job
250
309
  include Google::Apis::Core::Hashable
251
310
 
252
- # Required The ID of the Google Cloud Platform project that the job belongs to.
253
- # Corresponds to the JSON property `projectId`
254
- # @return [String]
255
- attr_accessor :project_id
311
+ # Cloud Dataproc job status.
312
+ # Corresponds to the JSON property `status`
313
+ # @return [Google::Apis::DataprocV1::JobStatus]
314
+ attr_accessor :status
256
315
 
257
- # Optional The job ID, which must be unique within the project. The job ID is
258
- # generated by the server upon job submission or provided by the user as a means
259
- # to perform retries without creating duplicate jobs. The ID must contain only
260
- # letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The
261
- # maximum length is 100 characters.
262
- # Corresponds to the JSON property `jobId`
316
+ # Cloud Dataproc job config.
317
+ # Corresponds to the JSON property `placement`
318
+ # @return [Google::Apis::DataprocV1::JobPlacement]
319
+ attr_accessor :placement
320
+
321
+ # Output-only If present, the location of miscellaneous control files which may
322
+ # be used as part of job setup and handling. If not present, control files may
323
+ # be placed in the same location as driver_output_uri.
324
+ # Corresponds to the JSON property `driverControlFilesUri`
263
325
  # @return [String]
264
- attr_accessor :job_id
326
+ attr_accessor :driver_control_files_uri
265
327
 
266
- def initialize(**args)
267
- update!(**args)
268
- end
328
+ # Job scheduling options.Beta Feature: These options are available for testing
329
+ # purposes only. They may be changed before final release.
330
+ # Corresponds to the JSON property `scheduling`
331
+ # @return [Google::Apis::DataprocV1::JobScheduling]
332
+ attr_accessor :scheduling
269
333
 
270
- # Update properties of this object
271
- def update!(**args)
272
- @project_id = args[:project_id] if args.key?(:project_id)
273
- @job_id = args[:job_id] if args.key?(:job_id)
274
- end
275
- end
334
+ # A Cloud Dataproc job for running Apache Pig (https://pig.apache.org/) queries
335
+ # on YARN.
336
+ # Corresponds to the JSON property `pigJob`
337
+ # @return [Google::Apis::DataprocV1::PigJob]
338
+ attr_accessor :pig_job
276
339
 
277
- # A request to submit a job.
278
- class SubmitJobRequest
279
- include Google::Apis::Core::Hashable
340
+ # A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/)
341
+ # queries on YARN.
342
+ # Corresponds to the JSON property `hiveJob`
343
+ # @return [Google::Apis::DataprocV1::HiveJob]
344
+ attr_accessor :hive_job
280
345
 
281
- # A Cloud Dataproc job resource.
282
- # Corresponds to the JSON property `job`
283
- # @return [Google::Apis::DataprocV1::Job]
284
- attr_accessor :job
346
+ # Optional The labels to associate with this job. Label keys must contain 1 to
347
+ # 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.
348
+ # txt). Label values may be empty, but, if present, must contain 1 to 63
349
+ # characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt)
350
+ # . No more than 32 labels can be associated with a job.
351
+ # Corresponds to the JSON property `labels`
352
+ # @return [Hash<String,String>]
353
+ attr_accessor :labels
285
354
 
286
- def initialize(**args)
287
- update!(**args)
288
- end
355
+ # Output-only A URI pointing to the location of the stdout of the job's driver
356
+ # program.
357
+ # Corresponds to the JSON property `driverOutputResourceUri`
358
+ # @return [String]
359
+ attr_accessor :driver_output_resource_uri
289
360
 
290
- # Update properties of this object
291
- def update!(**args)
292
- @job = args[:job] if args.key?(:job)
293
- end
294
- end
361
+ # A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/)
362
+ # applications on YARN.
363
+ # Corresponds to the JSON property `sparkJob`
364
+ # @return [Google::Apis::DataprocV1::SparkJob]
365
+ attr_accessor :spark_job
295
366
 
296
- # The Status type defines a logical error model that is suitable for different
297
- # programming environments, including REST APIs and RPC APIs. It is used by gRPC
298
- # (https://github.com/grpc). The error model is designed to be:
299
- # Simple to use and understand for most users
300
- # Flexible enough to meet unexpected needsOverviewThe Status message contains
301
- # three pieces of data: error code, error message, and error details. The error
302
- # code should be an enum value of google.rpc.Code, but it may accept additional
303
- # error codes if needed. The error message should be a developer-facing English
304
- # message that helps developers understand and resolve the error. If a localized
305
- # user-facing error message is needed, put the localized message in the error
306
- # details or localize it in the client. The optional error details may contain
307
- # arbitrary information about the error. There is a predefined set of error
308
- # detail types in the package google.rpc which can be used for common error
309
- # conditions.Language mappingThe Status message is the logical representation of
310
- # the error model, but it is not necessarily the actual wire format. When the
311
- # Status message is exposed in different client libraries and different wire
312
- # protocols, it can be mapped differently. For example, it will likely be mapped
313
- # to some exceptions in Java, but more likely mapped to some error codes in C.
314
- # Other usesThe error model and the Status message can be used in a variety of
315
- # environments, either with or without APIs, to provide a consistent developer
316
- # experience across different environments.Example uses of this error model
317
- # include:
318
- # Partial errors. If a service needs to return partial errors to the client, it
319
- # may embed the Status in the normal response to indicate the partial errors.
320
- # Workflow errors. A typical workflow has multiple steps. Each step may have a
321
- # Status message for error reporting purpose.
322
- # Batch operations. If a client uses batch request and batch response, the
323
- # Status message should be used directly inside batch response, one for each
324
- # error sub-response.
325
- # Asynchronous operations. If an API call embeds asynchronous operation results
326
- # in its response, the status of those operations should be represented directly
327
- # using the Status message.
328
- # Logging. If some API errors are stored in logs, the message Status could be
329
- # used directly after any stripping needed for security/privacy reasons.
330
- class Status
331
- include Google::Apis::Core::Hashable
367
+ # A Cloud Dataproc job for running Apache Spark SQL (http://spark.apache.org/sql/
368
+ # ) queries.
369
+ # Corresponds to the JSON property `sparkSqlJob`
370
+ # @return [Google::Apis::DataprocV1::SparkSqlJob]
371
+ attr_accessor :spark_sql_job
332
372
 
333
- # The status code, which should be an enum value of google.rpc.Code.
334
- # Corresponds to the JSON property `code`
335
- # @return [Fixnum]
336
- attr_accessor :code
373
+ # Output-only The previous job status.
374
+ # Corresponds to the JSON property `statusHistory`
375
+ # @return [Array<Google::Apis::DataprocV1::JobStatus>]
376
+ attr_accessor :status_history
337
377
 
338
- # A developer-facing error message, which should be in English. Any user-facing
339
- # error message should be localized and sent in the google.rpc.Status.details
340
- # field, or localized by the client.
341
- # Corresponds to the JSON property `message`
342
- # @return [String]
343
- attr_accessor :message
378
+ # Output-only The collection of YARN applications spun up by this job.Beta
379
+ # Feature: This report is available for testing purposes only. It may be changed
380
+ # before final release.
381
+ # Corresponds to the JSON property `yarnApplications`
382
+ # @return [Array<Google::Apis::DataprocV1::YarnApplication>]
383
+ attr_accessor :yarn_applications
344
384
 
345
- # A list of messages that carry the error details. There will be a common set of
346
- # message types for APIs to use.
347
- # Corresponds to the JSON property `details`
348
- # @return [Array<Hash<String,Object>>]
349
- attr_accessor :details
385
+ # A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/
386
+ # 0.9.0/python-programming-guide.html) applications on YARN.
387
+ # Corresponds to the JSON property `pysparkJob`
388
+ # @return [Google::Apis::DataprocV1::PySparkJob]
389
+ attr_accessor :pyspark_job
390
+
391
+ # Encapsulates the full scoping used to reference a job.
392
+ # Corresponds to the JSON property `reference`
393
+ # @return [Google::Apis::DataprocV1::JobReference]
394
+ attr_accessor :reference
395
+
396
+ # A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.
397
+ # apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/
398
+ # MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/
399
+ # docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
400
+ # Corresponds to the JSON property `hadoopJob`
401
+ # @return [Google::Apis::DataprocV1::HadoopJob]
402
+ attr_accessor :hadoop_job
350
403
 
351
404
  def initialize(**args)
352
405
  update!(**args)
@@ -354,95 +407,78 @@ module Google
354
407
 
355
408
  # Update properties of this object
356
409
  def update!(**args)
357
- @code = args[:code] if args.key?(:code)
358
- @message = args[:message] if args.key?(:message)
359
- @details = args[:details] if args.key?(:details)
410
+ @status = args[:status] if args.key?(:status)
411
+ @placement = args[:placement] if args.key?(:placement)
412
+ @driver_control_files_uri = args[:driver_control_files_uri] if args.key?(:driver_control_files_uri)
413
+ @scheduling = args[:scheduling] if args.key?(:scheduling)
414
+ @pig_job = args[:pig_job] if args.key?(:pig_job)
415
+ @hive_job = args[:hive_job] if args.key?(:hive_job)
416
+ @labels = args[:labels] if args.key?(:labels)
417
+ @driver_output_resource_uri = args[:driver_output_resource_uri] if args.key?(:driver_output_resource_uri)
418
+ @spark_job = args[:spark_job] if args.key?(:spark_job)
419
+ @spark_sql_job = args[:spark_sql_job] if args.key?(:spark_sql_job)
420
+ @status_history = args[:status_history] if args.key?(:status_history)
421
+ @yarn_applications = args[:yarn_applications] if args.key?(:yarn_applications)
422
+ @pyspark_job = args[:pyspark_job] if args.key?(:pyspark_job)
423
+ @reference = args[:reference] if args.key?(:reference)
424
+ @hadoop_job = args[:hadoop_job] if args.key?(:hadoop_job)
360
425
  end
361
426
  end
362
427
 
363
- # Optional The config settings for Google Compute Engine resources in an
364
- # instance group, such as a master or worker group.
365
- class InstanceGroupConfig
428
+ # A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/)
429
+ # applications on YARN.
430
+ class SparkJob
366
431
  include Google::Apis::Core::Hashable
367
432
 
368
- # Specifies the resources used to actively manage an instance group.
369
- # Corresponds to the JSON property `managedGroupConfig`
370
- # @return [Google::Apis::DataprocV1::ManagedGroupConfig]
371
- attr_accessor :managed_group_config
372
-
373
- # Optional Specifies that this instance group contains preemptible instances.
374
- # Corresponds to the JSON property `isPreemptible`
375
- # @return [Boolean]
376
- attr_accessor :is_preemptible
377
- alias_method :is_preemptible?, :is_preemptible
378
-
379
- # Required The Google Compute Engine machine type used for cluster instances.
380
- # Example: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-
381
- # east1-a/machineTypes/n1-standard-2.
382
- # Corresponds to the JSON property `machineTypeUri`
383
- # @return [String]
384
- attr_accessor :machine_type_uri
385
-
386
- # Output-only The Google Compute Engine image resource used for cluster
387
- # instances. Inferred from SoftwareConfig.image_version.
388
- # Corresponds to the JSON property `imageUri`
389
- # @return [String]
390
- attr_accessor :image_uri
391
-
392
- # Optional The list of instance names. Cloud Dataproc derives the names from
393
- # cluster_name, num_instances, and the instance group if not set by user (
394
- # recommended practice is to let Cloud Dataproc derive the name).
395
- # Corresponds to the JSON property `instanceNames`
433
+ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver
434
+ # and tasks.
435
+ # Corresponds to the JSON property `jarFileUris`
396
436
  # @return [Array<String>]
397
- attr_accessor :instance_names
437
+ attr_accessor :jar_file_uris
398
438
 
399
- # Optional The Google Compute Engine accelerator configuration for these
400
- # instances.Beta Feature: This feature is still under development. It may be
401
- # changed before final release.
402
- # Corresponds to the JSON property `accelerators`
403
- # @return [Array<Google::Apis::DataprocV1::AcceleratorConfig>]
404
- attr_accessor :accelerators
439
+ # The runtime logging config of the job.
440
+ # Corresponds to the JSON property `loggingConfig`
441
+ # @return [Google::Apis::DataprocV1::LoggingConfig]
442
+ attr_accessor :logging_config
405
443
 
406
- # Required The number of VM instances in the instance group. For master instance
407
- # groups, must be set to 1.
408
- # Corresponds to the JSON property `numInstances`
409
- # @return [Fixnum]
410
- attr_accessor :num_instances
444
+ # Optional A mapping of property names to values, used to configure Spark.
445
+ # Properties that conflict with values set by the Cloud Dataproc API may be
446
+ # overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf
447
+ # and classes in user code.
448
+ # Corresponds to the JSON property `properties`
449
+ # @return [Hash<String,String>]
450
+ attr_accessor :properties
411
451
 
412
- # Specifies the config of disk options for a group of VM instances.
413
- # Corresponds to the JSON property `diskConfig`
414
- # @return [Google::Apis::DataprocV1::DiskConfig]
415
- attr_accessor :disk_config
452
+ # Optional The arguments to pass to the driver. Do not include arguments, such
453
+ # as --conf, that can be set as job properties, since a collision may occur that
454
+ # causes an incorrect job submission.
455
+ # Corresponds to the JSON property `args`
456
+ # @return [Array<String>]
457
+ attr_accessor :args
416
458
 
417
- def initialize(**args)
418
- update!(**args)
419
- end
459
+ # Optional HCFS URIs of files to be copied to the working directory of Spark
460
+ # drivers and distributed tasks. Useful for naively parallel tasks.
461
+ # Corresponds to the JSON property `fileUris`
462
+ # @return [Array<String>]
463
+ attr_accessor :file_uris
420
464
 
421
- # Update properties of this object
422
- def update!(**args)
423
- @managed_group_config = args[:managed_group_config] if args.key?(:managed_group_config)
424
- @is_preemptible = args[:is_preemptible] if args.key?(:is_preemptible)
425
- @machine_type_uri = args[:machine_type_uri] if args.key?(:machine_type_uri)
426
- @image_uri = args[:image_uri] if args.key?(:image_uri)
427
- @instance_names = args[:instance_names] if args.key?(:instance_names)
428
- @accelerators = args[:accelerators] if args.key?(:accelerators)
429
- @num_instances = args[:num_instances] if args.key?(:num_instances)
430
- @disk_config = args[:disk_config] if args.key?(:disk_config)
431
- end
432
- end
465
+ # The name of the driver's main class. The jar file that contains the class must
466
+ # be in the default CLASSPATH or specified in jar_file_uris.
467
+ # Corresponds to the JSON property `mainClass`
468
+ # @return [String]
469
+ attr_accessor :main_class
433
470
 
434
- # Job scheduling options.Beta Feature: These options are available for testing
435
- # purposes only. They may be changed before final release.
436
- class JobScheduling
437
- include Google::Apis::Core::Hashable
471
+ # Optional HCFS URIs of archives to be extracted in the working directory of
472
+ # Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .
473
+ # zip.
474
+ # Corresponds to the JSON property `archiveUris`
475
+ # @return [Array<String>]
476
+ attr_accessor :archive_uris
438
477
 
439
- # Optional Maximum number of times per hour a driver may be restarted as a
440
- # result of driver terminating with non-zero code before job is reported failed.
441
- # A job may be reported as thrashing if driver exits with non-zero code 4 times
442
- # within 10 minute window.Maximum value is 10.
443
- # Corresponds to the JSON property `maxFailuresPerHour`
444
- # @return [Fixnum]
445
- attr_accessor :max_failures_per_hour
478
+ # The HCFS URI of the jar file that contains the main class.
479
+ # Corresponds to the JSON property `mainJarFileUri`
480
+ # @return [String]
481
+ attr_accessor :main_jar_file_uri
446
482
 
447
483
  def initialize(**args)
448
484
  update!(**args)
@@ -450,25 +486,42 @@ module Google
450
486
 
451
487
  # Update properties of this object
452
488
  def update!(**args)
453
- @max_failures_per_hour = args[:max_failures_per_hour] if args.key?(:max_failures_per_hour)
489
+ @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
490
+ @logging_config = args[:logging_config] if args.key?(:logging_config)
491
+ @properties = args[:properties] if args.key?(:properties)
492
+ @args = args[:args] if args.key?(:args)
493
+ @file_uris = args[:file_uris] if args.key?(:file_uris)
494
+ @main_class = args[:main_class] if args.key?(:main_class)
495
+ @archive_uris = args[:archive_uris] if args.key?(:archive_uris)
496
+ @main_jar_file_uri = args[:main_jar_file_uri] if args.key?(:main_jar_file_uri)
454
497
  end
455
498
  end
456
499
 
457
- # A list of jobs in a project.
458
- class ListJobsResponse
500
+ # Cloud Dataproc job status.
501
+ class JobStatus
459
502
  include Google::Apis::Core::Hashable
460
503
 
461
- # Optional This token is included in the response if there are more results to
462
- # fetch. To fetch additional results, provide this value as the page_token in a
463
- # subsequent <code>ListJobsRequest</code>.
464
- # Corresponds to the JSON property `nextPageToken`
504
+ # Output-only A state message specifying the overall job state.
505
+ # Corresponds to the JSON property `state`
465
506
  # @return [String]
466
- attr_accessor :next_page_token
507
+ attr_accessor :state
467
508
 
468
- # Output-only Jobs list.
469
- # Corresponds to the JSON property `jobs`
470
- # @return [Array<Google::Apis::DataprocV1::Job>]
471
- attr_accessor :jobs
509
+ # Output-only Optional job state details, such as an error description if the
510
+ # state is <code>ERROR</code>.
511
+ # Corresponds to the JSON property `details`
512
+ # @return [String]
513
+ attr_accessor :details
514
+
515
+ # Output-only The time when this state was entered.
516
+ # Corresponds to the JSON property `stateStartTime`
517
+ # @return [String]
518
+ attr_accessor :state_start_time
519
+
520
+ # Output-only Additional state information, which includes status reported by
521
+ # the agent.
522
+ # Corresponds to the JSON property `substate`
523
+ # @return [String]
524
+ attr_accessor :substate
472
525
 
473
526
  def initialize(**args)
474
527
  update!(**args)
@@ -476,28 +529,27 @@ module Google
476
529
 
477
530
  # Update properties of this object
478
531
  def update!(**args)
479
- @next_page_token = args[:next_page_token] if args.key?(:next_page_token)
480
- @jobs = args[:jobs] if args.key?(:jobs)
532
+ @state = args[:state] if args.key?(:state)
533
+ @details = args[:details] if args.key?(:details)
534
+ @state_start_time = args[:state_start_time] if args.key?(:state_start_time)
535
+ @substate = args[:substate] if args.key?(:substate)
481
536
  end
482
537
  end
483
538
 
484
- # Specifies an executable to run on a fully configured node and a timeout period
485
- # for executable completion.
486
- class NodeInitializationAction
539
+ # Specifies the resources used to actively manage an instance group.
540
+ class ManagedGroupConfig
487
541
  include Google::Apis::Core::Hashable
488
542
 
489
- # Optional Amount of time executable has to complete. Default is 10 minutes.
490
- # Cluster creation fails with an explanatory error message (the name of the
491
- # executable that caused the error and the exceeded timeout period) if the
492
- # executable is not completed at end of the timeout period.
493
- # Corresponds to the JSON property `executionTimeout`
543
+ # Output-only The name of the Instance Group Manager for this group.
544
+ # Corresponds to the JSON property `instanceGroupManagerName`
494
545
  # @return [String]
495
- attr_accessor :execution_timeout
546
+ attr_accessor :instance_group_manager_name
496
547
 
497
- # Required Google Cloud Storage URI of executable file.
498
- # Corresponds to the JSON property `executableFile`
548
+ # Output-only The name of the Instance Template used for the Managed Instance
549
+ # Group.
550
+ # Corresponds to the JSON property `instanceTemplateName`
499
551
  # @return [String]
500
- attr_accessor :executable_file
552
+ attr_accessor :instance_template_name
501
553
 
502
554
  def initialize(**args)
503
555
  update!(**args)
@@ -505,46 +557,77 @@ module Google
505
557
 
506
558
  # Update properties of this object
507
559
  def update!(**args)
508
- @execution_timeout = args[:execution_timeout] if args.key?(:execution_timeout)
509
- @executable_file = args[:executable_file] if args.key?(:executable_file)
560
+ @instance_group_manager_name = args[:instance_group_manager_name] if args.key?(:instance_group_manager_name)
561
+ @instance_template_name = args[:instance_template_name] if args.key?(:instance_template_name)
510
562
  end
511
563
  end
512
564
 
513
- # A request to cancel a job.
514
- class CancelJobRequest
565
+ # The status of the operation.
566
+ class ClusterOperationStatus
515
567
  include Google::Apis::Core::Hashable
516
568
 
569
+ # Output-only A message containing the detailed operation state.
570
+ # Corresponds to the JSON property `innerState`
571
+ # @return [String]
572
+ attr_accessor :inner_state
573
+
574
+ # Output-only The time this state was entered.
575
+ # Corresponds to the JSON property `stateStartTime`
576
+ # @return [String]
577
+ attr_accessor :state_start_time
578
+
579
+ # Output-only A message containing the operation state.
580
+ # Corresponds to the JSON property `state`
581
+ # @return [String]
582
+ attr_accessor :state
583
+
584
+ # Output-onlyA message containing any operation metadata details.
585
+ # Corresponds to the JSON property `details`
586
+ # @return [String]
587
+ attr_accessor :details
588
+
517
589
  def initialize(**args)
518
590
  update!(**args)
519
591
  end
520
592
 
521
593
  # Update properties of this object
522
594
  def update!(**args)
595
+ @inner_state = args[:inner_state] if args.key?(:inner_state)
596
+ @state_start_time = args[:state_start_time] if args.key?(:state_start_time)
597
+ @state = args[:state] if args.key?(:state)
598
+ @details = args[:details] if args.key?(:details)
523
599
  end
524
600
  end
525
601
 
526
- # A Cloud Dataproc job for running Apache Spark SQL (http://spark.apache.org/sql/
527
- # ) queries.
528
- class SparkSqlJob
602
+ # A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.
603
+ # apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/
604
+ # MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/
605
+ # docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
606
+ class HadoopJob
529
607
  include Google::Apis::Core::Hashable
530
608
 
531
- # The HCFS URI of the script that contains SQL queries.
532
- # Corresponds to the JSON property `queryFileUri`
609
+ # The name of the driver's main class. The jar file containing the class must be
610
+ # in the default CLASSPATH or specified in jar_file_uris.
611
+ # Corresponds to the JSON property `mainClass`
533
612
  # @return [String]
534
- attr_accessor :query_file_uri
613
+ attr_accessor :main_class
535
614
 
536
- # A list of queries to run on a cluster.
537
- # Corresponds to the JSON property `queryList`
538
- # @return [Google::Apis::DataprocV1::QueryList]
539
- attr_accessor :query_list
615
+ # Optional HCFS URIs of archives to be extracted in the working directory of
616
+ # Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .
617
+ # zip.
618
+ # Corresponds to the JSON property `archiveUris`
619
+ # @return [Array<String>]
620
+ attr_accessor :archive_uris
540
621
 
541
- # Optional Mapping of query variable names to values (equivalent to the Spark
542
- # SQL command: SET name="value";).
543
- # Corresponds to the JSON property `scriptVariables`
544
- # @return [Hash<String,String>]
545
- attr_accessor :script_variables
622
+ # The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-
623
+ # bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-
624
+ # samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-
625
+ # mapreduce-examples.jar'
626
+ # Corresponds to the JSON property `mainJarFileUri`
627
+ # @return [String]
628
+ attr_accessor :main_jar_file_uri
546
629
 
547
- # Optional HCFS URIs of jar files to be added to the Spark CLASSPATH.
630
+ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
548
631
  # Corresponds to the JSON property `jarFileUris`
549
632
  # @return [Array<String>]
550
633
  attr_accessor :jar_file_uris
@@ -554,111 +637,65 @@ module Google
554
637
  # @return [Google::Apis::DataprocV1::LoggingConfig]
555
638
  attr_accessor :logging_config
556
639
 
557
- # Optional A mapping of property names to values, used to configure Spark SQL's
558
- # SparkConf. Properties that conflict with values set by the Cloud Dataproc API
559
- # may be overwritten.
640
+ # Optional A mapping of property names to values, used to configure Hadoop.
641
+ # Properties that conflict with values set by the Cloud Dataproc API may be
642
+ # overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes
643
+ # in user code.
560
644
  # Corresponds to the JSON property `properties`
561
645
  # @return [Hash<String,String>]
562
646
  attr_accessor :properties
563
647
 
648
+ # Optional The arguments to pass to the driver. Do not include arguments, such
649
+ # as -libjars or -Dfoo=bar, that can be set as job properties, since a collision
650
+ # may occur that causes an incorrect job submission.
651
+ # Corresponds to the JSON property `args`
652
+ # @return [Array<String>]
653
+ attr_accessor :args
654
+
655
+ # Optional HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the
656
+ # working directory of Hadoop drivers and distributed tasks. Useful for naively
657
+ # parallel tasks.
658
+ # Corresponds to the JSON property `fileUris`
659
+ # @return [Array<String>]
660
+ attr_accessor :file_uris
661
+
564
662
  def initialize(**args)
565
663
  update!(**args)
566
664
  end
567
665
 
568
666
  # Update properties of this object
569
667
  def update!(**args)
570
- @query_file_uri = args[:query_file_uri] if args.key?(:query_file_uri)
571
- @query_list = args[:query_list] if args.key?(:query_list)
572
- @script_variables = args[:script_variables] if args.key?(:script_variables)
668
+ @main_class = args[:main_class] if args.key?(:main_class)
669
+ @archive_uris = args[:archive_uris] if args.key?(:archive_uris)
670
+ @main_jar_file_uri = args[:main_jar_file_uri] if args.key?(:main_jar_file_uri)
573
671
  @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
574
672
  @logging_config = args[:logging_config] if args.key?(:logging_config)
575
673
  @properties = args[:properties] if args.key?(:properties)
674
+ @args = args[:args] if args.key?(:args)
675
+ @file_uris = args[:file_uris] if args.key?(:file_uris)
576
676
  end
577
677
  end
578
678
 
579
- # Describes the identifying information, config, and status of a cluster of
580
- # Google Compute Engine instances.
581
- class Cluster
582
- include Google::Apis::Core::Hashable
583
-
584
- # Optional The labels to associate with this cluster. Label keys must contain 1
585
- # to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/
586
- # rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63
587
- # characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt)
588
- # . No more than 32 labels can be associated with a cluster.
589
- # Corresponds to the JSON property `labels`
590
- # @return [Hash<String,String>]
591
- attr_accessor :labels
592
-
593
- # Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature:
594
- # This report is available for testing purposes only. It may be changed before
595
- # final release.
596
- # Corresponds to the JSON property `metrics`
597
- # @return [Google::Apis::DataprocV1::ClusterMetrics]
598
- attr_accessor :metrics
599
-
600
- # The status of a cluster and its instances.
601
- # Corresponds to the JSON property `status`
602
- # @return [Google::Apis::DataprocV1::ClusterStatus]
603
- attr_accessor :status
604
-
605
- # The cluster config.
606
- # Corresponds to the JSON property `config`
607
- # @return [Google::Apis::DataprocV1::ClusterConfig]
608
- attr_accessor :config
609
-
610
- # Output-only The previous cluster status.
611
- # Corresponds to the JSON property `statusHistory`
612
- # @return [Array<Google::Apis::DataprocV1::ClusterStatus>]
613
- attr_accessor :status_history
614
-
615
- # Required The cluster name. Cluster names within a project must be unique.
616
- # Names of deleted clusters can be reused.
617
- # Corresponds to the JSON property `clusterName`
618
- # @return [String]
619
- attr_accessor :cluster_name
620
-
621
- # Output-only A cluster UUID (Unique Universal Identifier). Cloud Dataproc
622
- # generates this value when it creates the cluster.
623
- # Corresponds to the JSON property `clusterUuid`
624
- # @return [String]
625
- attr_accessor :cluster_uuid
626
-
627
- # Required The Google Cloud Platform project ID that the cluster belongs to.
628
- # Corresponds to the JSON property `projectId`
629
- # @return [String]
630
- attr_accessor :project_id
631
-
632
- def initialize(**args)
633
- update!(**args)
634
- end
635
-
636
- # Update properties of this object
637
- def update!(**args)
638
- @labels = args[:labels] if args.key?(:labels)
639
- @metrics = args[:metrics] if args.key?(:metrics)
640
- @status = args[:status] if args.key?(:status)
641
- @config = args[:config] if args.key?(:config)
642
- @status_history = args[:status_history] if args.key?(:status_history)
643
- @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
644
- @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
645
- @project_id = args[:project_id] if args.key?(:project_id)
646
- end
647
- end
648
-
649
- # The response message for Operations.ListOperations.
650
- class ListOperationsResponse
651
- include Google::Apis::Core::Hashable
652
-
653
- # The standard List next-page token.
654
- # Corresponds to the JSON property `nextPageToken`
655
- # @return [String]
656
- attr_accessor :next_page_token
679
+ # A list of queries to run on a cluster.
680
+ class QueryList
681
+ include Google::Apis::Core::Hashable
657
682
 
658
- # A list of operations that matches the specified filter in the request.
659
- # Corresponds to the JSON property `operations`
660
- # @return [Array<Google::Apis::DataprocV1::Operation>]
661
- attr_accessor :operations
683
+ # Required The queries to execute. You do not need to terminate a query with a
684
+ # semicolon. Multiple queries can be specified in one string by separating each
685
+ # with a semicolon. Here is an example of an Cloud Dataproc API snippet that
686
+ # uses a QueryList to specify a HiveJob:
687
+ # "hiveJob": `
688
+ # "queryList": `
689
+ # "queries": [
690
+ # "query1",
691
+ # "query2",
692
+ # "query3;query4",
693
+ # ]
694
+ # `
695
+ # `
696
+ # Corresponds to the JSON property `queries`
697
+ # @return [Array<String>]
698
+ attr_accessor :queries
662
699
 
663
700
  def initialize(**args)
664
701
  update!(**args)
@@ -666,79 +703,39 @@ module Google
666
703
 
667
704
  # Update properties of this object
668
705
  def update!(**args)
669
- @next_page_token = args[:next_page_token] if args.key?(:next_page_token)
670
- @operations = args[:operations] if args.key?(:operations)
706
+ @queries = args[:queries] if args.key?(:queries)
671
707
  end
672
708
  end
673
709
 
674
- # Metadata describing the operation.
675
- class OperationMetadata
710
+ # A YARN application created by a job. Application information is a subset of <
711
+ # code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.
712
+ # Beta Feature: This report is available for testing purposes only. It may be
713
+ # changed before final release.
714
+ class YarnApplication
676
715
  include Google::Apis::Core::Hashable
677
716
 
678
- # Output-only Previous operation status.
679
- # Corresponds to the JSON property `statusHistory`
680
- # @return [Array<Google::Apis::DataprocV1::OperationStatus>]
681
- attr_accessor :status_history
682
-
683
- # Output-only The operation type.
684
- # Corresponds to the JSON property `operationType`
685
- # @return [String]
686
- attr_accessor :operation_type
687
-
688
- # Output-only Short description of operation.
689
- # Corresponds to the JSON property `description`
717
+ # Optional The HTTP URL of the ApplicationMaster, HistoryServer, or
718
+ # TimelineServer that provides application-specific information. The URL uses
719
+ # the internal hostname, and requires a proxy server for resolution and,
720
+ # possibly, access.
721
+ # Corresponds to the JSON property `trackingUrl`
690
722
  # @return [String]
691
- attr_accessor :description
692
-
693
- # The status of the operation.
694
- # Corresponds to the JSON property `status`
695
- # @return [Google::Apis::DataprocV1::OperationStatus]
696
- attr_accessor :status
723
+ attr_accessor :tracking_url
697
724
 
698
- # A message containing any operation metadata details.
699
- # Corresponds to the JSON property `details`
700
- # @return [String]
701
- attr_accessor :details
725
+ # Required The numerical progress of the application, from 1 to 100.
726
+ # Corresponds to the JSON property `progress`
727
+ # @return [Float]
728
+ attr_accessor :progress
702
729
 
703
- # A message containing the operation state.
730
+ # Required The application state.
704
731
  # Corresponds to the JSON property `state`
705
732
  # @return [String]
706
733
  attr_accessor :state
707
734
 
708
- # Name of the cluster for the operation.
709
- # Corresponds to the JSON property `clusterName`
710
- # @return [String]
711
- attr_accessor :cluster_name
712
-
713
- # Cluster UUId for the operation.
714
- # Corresponds to the JSON property `clusterUuid`
715
- # @return [String]
716
- attr_accessor :cluster_uuid
717
-
718
- # A message containing the detailed operation state.
719
- # Corresponds to the JSON property `innerState`
720
- # @return [String]
721
- attr_accessor :inner_state
722
-
723
- # The time that the operation completed.
724
- # Corresponds to the JSON property `endTime`
725
- # @return [String]
726
- attr_accessor :end_time
727
-
728
- # The time that the operation was started by the server.
729
- # Corresponds to the JSON property `startTime`
730
- # @return [String]
731
- attr_accessor :start_time
732
-
733
- # Output-only Errors encountered during operation execution.
734
- # Corresponds to the JSON property `warnings`
735
- # @return [Array<String>]
736
- attr_accessor :warnings
737
-
738
- # The time that the operation was requested.
739
- # Corresponds to the JSON property `insertTime`
735
+ # Required The application name.
736
+ # Corresponds to the JSON property `name`
740
737
  # @return [String]
741
- attr_accessor :insert_time
738
+ attr_accessor :name
742
739
 
743
740
  def initialize(**args)
744
741
  update!(**args)
@@ -746,72 +743,43 @@ module Google
746
743
 
747
744
  # Update properties of this object
748
745
  def update!(**args)
749
- @status_history = args[:status_history] if args.key?(:status_history)
750
- @operation_type = args[:operation_type] if args.key?(:operation_type)
751
- @description = args[:description] if args.key?(:description)
752
- @status = args[:status] if args.key?(:status)
753
- @details = args[:details] if args.key?(:details)
746
+ @tracking_url = args[:tracking_url] if args.key?(:tracking_url)
747
+ @progress = args[:progress] if args.key?(:progress)
754
748
  @state = args[:state] if args.key?(:state)
755
- @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
756
- @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
757
- @inner_state = args[:inner_state] if args.key?(:inner_state)
758
- @end_time = args[:end_time] if args.key?(:end_time)
759
- @start_time = args[:start_time] if args.key?(:start_time)
760
- @warnings = args[:warnings] if args.key?(:warnings)
761
- @insert_time = args[:insert_time] if args.key?(:insert_time)
749
+ @name = args[:name] if args.key?(:name)
762
750
  end
763
751
  end
764
752
 
765
- # Cloud Dataproc job config.
766
- class JobPlacement
753
+ # A request to collect cluster diagnostic information.
754
+ class DiagnoseClusterRequest
767
755
  include Google::Apis::Core::Hashable
768
756
 
769
- # Required The name of the cluster where the job will be submitted.
770
- # Corresponds to the JSON property `clusterName`
771
- # @return [String]
772
- attr_accessor :cluster_name
773
-
774
- # Output-only A cluster UUID generated by the Cloud Dataproc service when the
775
- # job is submitted.
776
- # Corresponds to the JSON property `clusterUuid`
777
- # @return [String]
778
- attr_accessor :cluster_uuid
779
-
780
757
  def initialize(**args)
781
758
  update!(**args)
782
759
  end
783
760
 
784
761
  # Update properties of this object
785
762
  def update!(**args)
786
- @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
787
- @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
788
763
  end
789
764
  end
790
765
 
791
- # Specifies the selection and config of software inside the cluster.
792
- class SoftwareConfig
766
+ # Specifies the config of disk options for a group of VM instances.
767
+ class DiskConfig
793
768
  include Google::Apis::Core::Hashable
794
769
 
795
- # Optional The version of software inside the cluster. It must match the regular
796
- # expression [0-9]+\.[0-9]+. If unspecified, it defaults to the latest version (
797
- # see Cloud Dataproc Versioning).
798
- # Corresponds to the JSON property `imageVersion`
799
- # @return [String]
800
- attr_accessor :image_version
770
+ # Optional Size in GB of the boot disk (default is 500GB).
771
+ # Corresponds to the JSON property `bootDiskSizeGb`
772
+ # @return [Fixnum]
773
+ attr_accessor :boot_disk_size_gb
801
774
 
802
- # Optional The properties to set on daemon config files.Property keys are
803
- # specified in prefix:property format, such as core:fs.defaultFS. The following
804
- # are supported prefixes and their mappings:
805
- # core: core-site.xml
806
- # hdfs: hdfs-site.xml
807
- # mapred: mapred-site.xml
808
- # yarn: yarn-site.xml
809
- # hive: hive-site.xml
810
- # pig: pig.properties
811
- # spark: spark-defaults.conf
812
- # Corresponds to the JSON property `properties`
813
- # @return [Hash<String,String>]
814
- attr_accessor :properties
775
+ # Optional Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not
776
+ # attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.
777
+ # apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are
778
+ # attached, this runtime bulk data is spread across them, and the boot disk
779
+ # contains only basic config and installed binaries.
780
+ # Corresponds to the JSON property `numLocalSsds`
781
+ # @return [Fixnum]
782
+ attr_accessor :num_local_ssds
815
783
 
816
784
  def initialize(**args)
817
785
  update!(**args)
@@ -819,58 +787,81 @@ module Google
819
787
 
820
788
  # Update properties of this object
821
789
  def update!(**args)
822
- @image_version = args[:image_version] if args.key?(:image_version)
823
- @properties = args[:properties] if args.key?(:properties)
790
+ @boot_disk_size_gb = args[:boot_disk_size_gb] if args.key?(:boot_disk_size_gb)
791
+ @num_local_ssds = args[:num_local_ssds] if args.key?(:num_local_ssds)
824
792
  end
825
793
  end
826
794
 
827
- # A Cloud Dataproc job for running Apache Pig (https://pig.apache.org/) queries
828
- # on YARN.
829
- class PigJob
795
+ # Metadata describing the operation.
796
+ class ClusterOperationMetadata
830
797
  include Google::Apis::Core::Hashable
831
798
 
832
- # Optional Whether to continue executing queries if a query fails. The default
833
- # value is false. Setting to true can be useful when executing independent
834
- # parallel queries.
835
- # Corresponds to the JSON property `continueOnFailure`
836
- # @return [Boolean]
837
- attr_accessor :continue_on_failure
838
- alias_method :continue_on_failure?, :continue_on_failure
839
-
840
- # A list of queries to run on a cluster.
841
- # Corresponds to the JSON property `queryList`
842
- # @return [Google::Apis::DataprocV1::QueryList]
843
- attr_accessor :query_list
799
+ # Output-only The operation type.
800
+ # Corresponds to the JSON property `operationType`
801
+ # @return [String]
802
+ attr_accessor :operation_type
844
803
 
845
- # The HCFS URI of the script that contains the Pig queries.
846
- # Corresponds to the JSON property `queryFileUri`
804
+ # Output-only Short description of operation.
805
+ # Corresponds to the JSON property `description`
847
806
  # @return [String]
848
- attr_accessor :query_file_uri
807
+ attr_accessor :description
849
808
 
850
- # Optional Mapping of query variable names to values (equivalent to the Pig
851
- # command: name=[value]).
852
- # Corresponds to the JSON property `scriptVariables`
809
+ # Output-only Errors encountered during operation execution.
810
+ # Corresponds to the JSON property `warnings`
811
+ # @return [Array<String>]
812
+ attr_accessor :warnings
813
+
814
+ # Output-only Labels associated with the operation
815
+ # Corresponds to the JSON property `labels`
853
816
  # @return [Hash<String,String>]
854
- attr_accessor :script_variables
817
+ attr_accessor :labels
855
818
 
856
- # Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and
857
- # Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
858
- # Corresponds to the JSON property `jarFileUris`
859
- # @return [Array<String>]
860
- attr_accessor :jar_file_uris
819
+ # The status of the operation.
820
+ # Corresponds to the JSON property `status`
821
+ # @return [Google::Apis::DataprocV1::ClusterOperationStatus]
822
+ attr_accessor :status
861
823
 
862
- # The runtime logging config of the job.
863
- # Corresponds to the JSON property `loggingConfig`
864
- # @return [Google::Apis::DataprocV1::LoggingConfig]
865
- attr_accessor :logging_config
824
+ # Output-only The previous operation status.
825
+ # Corresponds to the JSON property `statusHistory`
826
+ # @return [Array<Google::Apis::DataprocV1::ClusterOperationStatus>]
827
+ attr_accessor :status_history
828
+
829
+ # Output-only Cluster UUID for the operation.
830
+ # Corresponds to the JSON property `clusterUuid`
831
+ # @return [String]
832
+ attr_accessor :cluster_uuid
833
+
834
+ # Output-only Name of the cluster for the operation.
835
+ # Corresponds to the JSON property `clusterName`
836
+ # @return [String]
837
+ attr_accessor :cluster_name
838
+
839
+ def initialize(**args)
840
+ update!(**args)
841
+ end
842
+
843
+ # Update properties of this object
844
+ def update!(**args)
845
+ @operation_type = args[:operation_type] if args.key?(:operation_type)
846
+ @description = args[:description] if args.key?(:description)
847
+ @warnings = args[:warnings] if args.key?(:warnings)
848
+ @labels = args[:labels] if args.key?(:labels)
849
+ @status = args[:status] if args.key?(:status)
850
+ @status_history = args[:status_history] if args.key?(:status_history)
851
+ @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
852
+ @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
853
+ end
854
+ end
866
855
 
867
- # Optional A mapping of property names to values, used to configure Pig.
868
- # Properties that conflict with values set by the Cloud Dataproc API may be
869
- # overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/
870
- # pig/conf/pig.properties, and classes in user code.
871
- # Corresponds to the JSON property `properties`
872
- # @return [Hash<String,String>]
873
- attr_accessor :properties
856
+ # A generic empty message that you can re-use to avoid defining duplicated empty
857
+ # messages in your APIs. A typical example is to use it as the request or the
858
+ # response type of an API method. For instance:
859
+ # service Foo `
860
+ # rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
861
+ # `
862
+ # The JSON representation for Empty is empty JSON object ``.
863
+ class Empty
864
+ include Google::Apis::Core::Hashable
874
865
 
875
866
  def initialize(**args)
876
867
  update!(**args)
@@ -878,34 +869,51 @@ module Google
878
869
 
879
870
  # Update properties of this object
880
871
  def update!(**args)
881
- @continue_on_failure = args[:continue_on_failure] if args.key?(:continue_on_failure)
882
- @query_list = args[:query_list] if args.key?(:query_list)
883
- @query_file_uri = args[:query_file_uri] if args.key?(:query_file_uri)
884
- @script_variables = args[:script_variables] if args.key?(:script_variables)
885
- @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
886
- @logging_config = args[:logging_config] if args.key?(:logging_config)
887
- @properties = args[:properties] if args.key?(:properties)
888
872
  end
889
873
  end
890
874
 
891
- # The status of a cluster and its instances.
892
- class ClusterStatus
875
+ # A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/)
876
+ # queries on YARN.
877
+ class HiveJob
893
878
  include Google::Apis::Core::Hashable
894
879
 
895
- # Output-only Time when this state was entered.
896
- # Corresponds to the JSON property `stateStartTime`
897
- # @return [String]
898
- attr_accessor :state_start_time
880
+ # Optional HCFS URIs of jar files to add to the CLASSPATH of the Hive server and
881
+ # Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.
882
+ # Corresponds to the JSON property `jarFileUris`
883
+ # @return [Array<String>]
884
+ attr_accessor :jar_file_uris
899
885
 
900
- # Output-only Optional details of cluster's state.
901
- # Corresponds to the JSON property `detail`
902
- # @return [String]
903
- attr_accessor :detail
886
+ # Optional Mapping of query variable names to values (equivalent to the Hive
887
+ # command: SET name="value";).
888
+ # Corresponds to the JSON property `scriptVariables`
889
+ # @return [Hash<String,String>]
890
+ attr_accessor :script_variables
904
891
 
905
- # Output-only The cluster's state.
906
- # Corresponds to the JSON property `state`
892
+ # Optional A mapping of property names and values, used to configure Hive.
893
+ # Properties that conflict with values set by the Cloud Dataproc API may be
894
+ # overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/
895
+ # hive/conf/hive-site.xml, and classes in user code.
896
+ # Corresponds to the JSON property `properties`
897
+ # @return [Hash<String,String>]
898
+ attr_accessor :properties
899
+
900
+ # Optional Whether to continue executing queries if a query fails. The default
901
+ # value is false. Setting to true can be useful when executing independent
902
+ # parallel queries.
903
+ # Corresponds to the JSON property `continueOnFailure`
904
+ # @return [Boolean]
905
+ attr_accessor :continue_on_failure
906
+ alias_method :continue_on_failure?, :continue_on_failure
907
+
908
+ # The HCFS URI of the script that contains Hive queries.
909
+ # Corresponds to the JSON property `queryFileUri`
907
910
  # @return [String]
908
- attr_accessor :state
911
+ attr_accessor :query_file_uri
912
+
913
+ # A list of queries to run on a cluster.
914
+ # Corresponds to the JSON property `queryList`
915
+ # @return [Google::Apis::DataprocV1::QueryList]
916
+ attr_accessor :query_list
909
917
 
910
918
  def initialize(**args)
911
919
  update!(**args)
@@ -913,27 +921,24 @@ module Google
913
921
 
914
922
  # Update properties of this object
915
923
  def update!(**args)
916
- @state_start_time = args[:state_start_time] if args.key?(:state_start_time)
917
- @detail = args[:detail] if args.key?(:detail)
918
- @state = args[:state] if args.key?(:state)
924
+ @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
925
+ @script_variables = args[:script_variables] if args.key?(:script_variables)
926
+ @properties = args[:properties] if args.key?(:properties)
927
+ @continue_on_failure = args[:continue_on_failure] if args.key?(:continue_on_failure)
928
+ @query_file_uri = args[:query_file_uri] if args.key?(:query_file_uri)
929
+ @query_list = args[:query_list] if args.key?(:query_list)
919
930
  end
920
931
  end
921
932
 
922
- # The list of all clusters in a project.
923
- class ListClustersResponse
933
+ # The location of diagnostic output.
934
+ class DiagnoseClusterResults
924
935
  include Google::Apis::Core::Hashable
925
936
 
926
- # Output-only The clusters in the project.
927
- # Corresponds to the JSON property `clusters`
928
- # @return [Array<Google::Apis::DataprocV1::Cluster>]
929
- attr_accessor :clusters
930
-
931
- # Output-only This token is included in the response if there are more results
932
- # to fetch. To fetch additional results, provide this value as the page_token in
933
- # a subsequent <code>ListClustersRequest</code>.
934
- # Corresponds to the JSON property `nextPageToken`
937
+ # Output-only The Google Cloud Storage URI of the diagnostic output. The output
938
+ # report is a plain text file with a summary of collected diagnostics.
939
+ # Corresponds to the JSON property `outputUri`
935
940
  # @return [String]
936
- attr_accessor :next_page_token
941
+ attr_accessor :output_uri
937
942
 
938
943
  def initialize(**args)
939
944
  update!(**args)
@@ -941,150 +946,90 @@ module Google
941
946
 
942
947
  # Update properties of this object
943
948
  def update!(**args)
944
- @clusters = args[:clusters] if args.key?(:clusters)
945
- @next_page_token = args[:next_page_token] if args.key?(:next_page_token)
949
+ @output_uri = args[:output_uri] if args.key?(:output_uri)
946
950
  end
947
951
  end
948
952
 
949
- # A Cloud Dataproc job resource.
950
- class Job
953
+ # The cluster config.
954
+ class ClusterConfig
951
955
  include Google::Apis::Core::Hashable
952
956
 
953
- # Encapsulates the full scoping used to reference a job.
954
- # Corresponds to the JSON property `reference`
955
- # @return [Google::Apis::DataprocV1::JobReference]
956
- attr_accessor :reference
957
-
958
- # A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.
959
- # apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/
960
- # MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/
961
- # docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
962
- # Corresponds to the JSON property `hadoopJob`
963
- # @return [Google::Apis::DataprocV1::HadoopJob]
964
- attr_accessor :hadoop_job
965
-
966
- # Cloud Dataproc job config.
967
- # Corresponds to the JSON property `placement`
968
- # @return [Google::Apis::DataprocV1::JobPlacement]
969
- attr_accessor :placement
970
-
971
- # Cloud Dataproc job status.
972
- # Corresponds to the JSON property `status`
973
- # @return [Google::Apis::DataprocV1::JobStatus]
974
- attr_accessor :status
975
-
976
- # Output-only If present, the location of miscellaneous control files which may
977
- # be used as part of job setup and handling. If not present, control files may
978
- # be placed in the same location as driver_output_uri.
979
- # Corresponds to the JSON property `driverControlFilesUri`
980
- # @return [String]
981
- attr_accessor :driver_control_files_uri
982
-
983
- # Job scheduling options.Beta Feature: These options are available for testing
984
- # purposes only. They may be changed before final release.
985
- # Corresponds to the JSON property `scheduling`
986
- # @return [Google::Apis::DataprocV1::JobScheduling]
987
- attr_accessor :scheduling
988
-
989
- # A Cloud Dataproc job for running Apache Pig (https://pig.apache.org/) queries
990
- # on YARN.
991
- # Corresponds to the JSON property `pigJob`
992
- # @return [Google::Apis::DataprocV1::PigJob]
993
- attr_accessor :pig_job
957
+ # Optional The config settings for Google Compute Engine resources in an
958
+ # instance group, such as a master or worker group.
959
+ # Corresponds to the JSON property `masterConfig`
960
+ # @return [Google::Apis::DataprocV1::InstanceGroupConfig]
961
+ attr_accessor :master_config
994
962
 
995
- # A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/)
996
- # queries on YARN.
997
- # Corresponds to the JSON property `hiveJob`
998
- # @return [Google::Apis::DataprocV1::HiveJob]
999
- attr_accessor :hive_job
963
+ # Optional The config settings for Google Compute Engine resources in an
964
+ # instance group, such as a master or worker group.
965
+ # Corresponds to the JSON property `secondaryWorkerConfig`
966
+ # @return [Google::Apis::DataprocV1::InstanceGroupConfig]
967
+ attr_accessor :secondary_worker_config
1000
968
 
1001
- # Optional The labels to associate with this job. Label keys must contain 1 to
1002
- # 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.
1003
- # txt). Label values may be empty, but, if present, must contain 1 to 63
1004
- # characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt)
1005
- # . No more than 32 labels can be associated with a job.
1006
- # Corresponds to the JSON property `labels`
1007
- # @return [Hash<String,String>]
1008
- attr_accessor :labels
969
+ # Optional Commands to execute on each node after config is completed. By
970
+ # default, executables are run on master and all worker nodes. You can test a
971
+ # node's <code>role</code> metadata to run an executable on a master or worker
972
+ # node, as shown below using curl (you can also use wget):
973
+ # ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/
974
+ # instance/attributes/dataproc-role)
975
+ # if [[ "$`ROLE`" == 'Master' ]]; then
976
+ # ... master specific actions ...
977
+ # else
978
+ # ... worker specific actions ...
979
+ # fi
980
+ # Corresponds to the JSON property `initializationActions`
981
+ # @return [Array<Google::Apis::DataprocV1::NodeInitializationAction>]
982
+ attr_accessor :initialization_actions
1009
983
 
1010
- # Output-only A URI pointing to the location of the stdout of the job's driver
1011
- # program.
1012
- # Corresponds to the JSON property `driverOutputResourceUri`
984
+ # Optional A Google Cloud Storage staging bucket used for sharing generated SSH
985
+ # keys and config. If you do not specify a staging bucket, Cloud Dataproc will
986
+ # determine an appropriate Cloud Storage location (US, ASIA, or EU) for your
987
+ # cluster's staging bucket according to the Google Compute Engine zone where
988
+ # your cluster is deployed, and then it will create and manage this project-
989
+ # level, per-location bucket for you.
990
+ # Corresponds to the JSON property `configBucket`
1013
991
  # @return [String]
1014
- attr_accessor :driver_output_resource_uri
1015
-
1016
- # Output-only The previous job status.
1017
- # Corresponds to the JSON property `statusHistory`
1018
- # @return [Array<Google::Apis::DataprocV1::JobStatus>]
1019
- attr_accessor :status_history
1020
-
1021
- # A Cloud Dataproc job for running Apache Spark SQL (http://spark.apache.org/sql/
1022
- # ) queries.
1023
- # Corresponds to the JSON property `sparkSqlJob`
1024
- # @return [Google::Apis::DataprocV1::SparkSqlJob]
1025
- attr_accessor :spark_sql_job
992
+ attr_accessor :config_bucket
1026
993
 
1027
- # A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/)
1028
- # applications on YARN.
1029
- # Corresponds to the JSON property `sparkJob`
1030
- # @return [Google::Apis::DataprocV1::SparkJob]
1031
- attr_accessor :spark_job
994
+ # Optional The config settings for Google Compute Engine resources in an
995
+ # instance group, such as a master or worker group.
996
+ # Corresponds to the JSON property `workerConfig`
997
+ # @return [Google::Apis::DataprocV1::InstanceGroupConfig]
998
+ attr_accessor :worker_config
1032
999
 
1033
- # Output-only The collection of YARN applications spun up by this job.Beta
1034
- # Feature: This report is available for testing purposes only. It may be changed
1035
- # before final release.
1036
- # Corresponds to the JSON property `yarnApplications`
1037
- # @return [Array<Google::Apis::DataprocV1::YarnApplication>]
1038
- attr_accessor :yarn_applications
1000
+ # Common config settings for resources of Google Compute Engine cluster
1001
+ # instances, applicable to all instances in the cluster.
1002
+ # Corresponds to the JSON property `gceClusterConfig`
1003
+ # @return [Google::Apis::DataprocV1::GceClusterConfig]
1004
+ attr_accessor :gce_cluster_config
1039
1005
 
1040
- # A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/
1041
- # 0.9.0/python-programming-guide.html) applications on YARN.
1042
- # Corresponds to the JSON property `pysparkJob`
1043
- # @return [Google::Apis::DataprocV1::PySparkJob]
1044
- attr_accessor :pyspark_job
1006
+ # Specifies the selection and config of software inside the cluster.
1007
+ # Corresponds to the JSON property `softwareConfig`
1008
+ # @return [Google::Apis::DataprocV1::SoftwareConfig]
1009
+ attr_accessor :software_config
1045
1010
 
1046
1011
  def initialize(**args)
1047
1012
  update!(**args)
1048
1013
  end
1049
1014
 
1050
- # Update properties of this object
1051
- def update!(**args)
1052
- @reference = args[:reference] if args.key?(:reference)
1053
- @hadoop_job = args[:hadoop_job] if args.key?(:hadoop_job)
1054
- @placement = args[:placement] if args.key?(:placement)
1055
- @status = args[:status] if args.key?(:status)
1056
- @driver_control_files_uri = args[:driver_control_files_uri] if args.key?(:driver_control_files_uri)
1057
- @scheduling = args[:scheduling] if args.key?(:scheduling)
1058
- @pig_job = args[:pig_job] if args.key?(:pig_job)
1059
- @hive_job = args[:hive_job] if args.key?(:hive_job)
1060
- @labels = args[:labels] if args.key?(:labels)
1061
- @driver_output_resource_uri = args[:driver_output_resource_uri] if args.key?(:driver_output_resource_uri)
1062
- @status_history = args[:status_history] if args.key?(:status_history)
1063
- @spark_sql_job = args[:spark_sql_job] if args.key?(:spark_sql_job)
1064
- @spark_job = args[:spark_job] if args.key?(:spark_job)
1065
- @yarn_applications = args[:yarn_applications] if args.key?(:yarn_applications)
1066
- @pyspark_job = args[:pyspark_job] if args.key?(:pyspark_job)
1015
+ # Update properties of this object
1016
+ def update!(**args)
1017
+ @master_config = args[:master_config] if args.key?(:master_config)
1018
+ @secondary_worker_config = args[:secondary_worker_config] if args.key?(:secondary_worker_config)
1019
+ @initialization_actions = args[:initialization_actions] if args.key?(:initialization_actions)
1020
+ @config_bucket = args[:config_bucket] if args.key?(:config_bucket)
1021
+ @worker_config = args[:worker_config] if args.key?(:worker_config)
1022
+ @gce_cluster_config = args[:gce_cluster_config] if args.key?(:gce_cluster_config)
1023
+ @software_config = args[:software_config] if args.key?(:software_config)
1067
1024
  end
1068
1025
  end
1069
1026
 
1070
- # A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/)
1071
- # applications on YARN.
1072
- class SparkJob
1027
+ # A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/
1028
+ # 0.9.0/python-programming-guide.html) applications on YARN.
1029
+ class PySparkJob
1073
1030
  include Google::Apis::Core::Hashable
1074
1031
 
1075
- # Optional HCFS URIs of archives to be extracted in the working directory of
1076
- # Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .
1077
- # zip.
1078
- # Corresponds to the JSON property `archiveUris`
1079
- # @return [Array<String>]
1080
- attr_accessor :archive_uris
1081
-
1082
- # The HCFS URI of the jar file that contains the main class.
1083
- # Corresponds to the JSON property `mainJarFileUri`
1084
- # @return [String]
1085
- attr_accessor :main_jar_file_uri
1086
-
1087
- # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver
1032
+ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Python driver
1088
1033
  # and tasks.
1089
1034
  # Corresponds to the JSON property `jarFileUris`
1090
1035
  # @return [Array<String>]
@@ -1095,7 +1040,7 @@ module Google
1095
1040
  # @return [Google::Apis::DataprocV1::LoggingConfig]
1096
1041
  attr_accessor :logging_config
1097
1042
 
1098
- # Optional A mapping of property names to values, used to configure Spark.
1043
+ # Optional A mapping of property names to values, used to configure PySpark.
1099
1044
  # Properties that conflict with values set by the Cloud Dataproc API may be
1100
1045
  # overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf
1101
1046
  # and classes in user code.
@@ -1110,17 +1055,29 @@ module Google
1110
1055
  # @return [Array<String>]
1111
1056
  attr_accessor :args
1112
1057
 
1113
- # Optional HCFS URIs of files to be copied to the working directory of Spark
1058
+ # Optional HCFS URIs of files to be copied to the working directory of Python
1114
1059
  # drivers and distributed tasks. Useful for naively parallel tasks.
1115
1060
  # Corresponds to the JSON property `fileUris`
1116
1061
  # @return [Array<String>]
1117
1062
  attr_accessor :file_uris
1118
1063
 
1119
- # The name of the driver's main class. The jar file that contains the class must
1120
- # be in the default CLASSPATH or specified in jar_file_uris.
1121
- # Corresponds to the JSON property `mainClass`
1064
+ # Optional HCFS file URIs of Python files to pass to the PySpark framework.
1065
+ # Supported file types: .py, .egg, and .zip.
1066
+ # Corresponds to the JSON property `pythonFileUris`
1067
+ # @return [Array<String>]
1068
+ attr_accessor :python_file_uris
1069
+
1070
+ # Required The HCFS URI of the main Python file to use as the driver. Must be a .
1071
+ # py file.
1072
+ # Corresponds to the JSON property `mainPythonFileUri`
1122
1073
  # @return [String]
1123
- attr_accessor :main_class
1074
+ attr_accessor :main_python_file_uri
1075
+
1076
+ # Optional HCFS URIs of archives to be extracted in the working directory of .
1077
+ # jar, .tar, .tar.gz, .tgz, and .zip.
1078
+ # Corresponds to the JSON property `archiveUris`
1079
+ # @return [Array<String>]
1080
+ attr_accessor :archive_uris
1124
1081
 
1125
1082
  def initialize(**args)
1126
1083
  update!(**args)
@@ -1128,63 +1085,93 @@ module Google
1128
1085
 
1129
1086
  # Update properties of this object
1130
1087
  def update!(**args)
1131
- @archive_uris = args[:archive_uris] if args.key?(:archive_uris)
1132
- @main_jar_file_uri = args[:main_jar_file_uri] if args.key?(:main_jar_file_uri)
1133
1088
  @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
1134
1089
  @logging_config = args[:logging_config] if args.key?(:logging_config)
1135
1090
  @properties = args[:properties] if args.key?(:properties)
1136
1091
  @args = args[:args] if args.key?(:args)
1137
1092
  @file_uris = args[:file_uris] if args.key?(:file_uris)
1138
- @main_class = args[:main_class] if args.key?(:main_class)
1093
+ @python_file_uris = args[:python_file_uris] if args.key?(:python_file_uris)
1094
+ @main_python_file_uri = args[:main_python_file_uri] if args.key?(:main_python_file_uri)
1095
+ @archive_uris = args[:archive_uris] if args.key?(:archive_uris)
1139
1096
  end
1140
1097
  end
1141
1098
 
1142
- # Cloud Dataproc job status.
1143
- class JobStatus
1099
+ # Common config settings for resources of Google Compute Engine cluster
1100
+ # instances, applicable to all instances in the cluster.
1101
+ class GceClusterConfig
1144
1102
  include Google::Apis::Core::Hashable
1145
1103
 
1146
- # Output-only The time when this state was entered.
1147
- # Corresponds to the JSON property `stateStartTime`
1148
- # @return [String]
1149
- attr_accessor :state_start_time
1104
+ # The Google Compute Engine metadata entries to add to all instances (see
1105
+ # Project and instance metadata (https://cloud.google.com/compute/docs/storing-
1106
+ # retrieving-metadata#project_and_instance_metadata)).
1107
+ # Corresponds to the JSON property `metadata`
1108
+ # @return [Hash<String,String>]
1109
+ attr_accessor :metadata
1150
1110
 
1151
- # Output-only A state message specifying the overall job state.
1152
- # Corresponds to the JSON property `state`
1153
- # @return [String]
1154
- attr_accessor :state
1111
+ # Optional If true, all instances in the cluster will only have internal IP
1112
+ # addresses. By default, clusters are not restricted to internal IP addresses,
1113
+ # and will have ephemeral external IP addresses assigned to each instance. This
1114
+ # internal_ip_only restriction can only be enabled for subnetwork enabled
1115
+ # networks, and all off-cluster dependencies must be configured to be accessible
1116
+ # without external IP addresses.
1117
+ # Corresponds to the JSON property `internalIpOnly`
1118
+ # @return [Boolean]
1119
+ attr_accessor :internal_ip_only
1120
+ alias_method :internal_ip_only?, :internal_ip_only
1155
1121
 
1156
- # Output-only Optional job state details, such as an error description if the
1157
- # state is <code>ERROR</code>.
1158
- # Corresponds to the JSON property `details`
1159
- # @return [String]
1160
- attr_accessor :details
1122
+ # Optional The URIs of service account scopes to be included in Google Compute
1123
+ # Engine instances. The following base set of scopes is always included:
1124
+ # https://www.googleapis.com/auth/cloud.useraccounts.readonly
1125
+ # https://www.googleapis.com/auth/devstorage.read_write
1126
+ # https://www.googleapis.com/auth/logging.writeIf no scopes are specified, the
1127
+ # following defaults are also provided:
1128
+ # https://www.googleapis.com/auth/bigquery
1129
+ # https://www.googleapis.com/auth/bigtable.admin.table
1130
+ # https://www.googleapis.com/auth/bigtable.data
1131
+ # https://www.googleapis.com/auth/devstorage.full_control
1132
+ # Corresponds to the JSON property `serviceAccountScopes`
1133
+ # @return [Array<String>]
1134
+ attr_accessor :service_account_scopes
1161
1135
 
1162
- def initialize(**args)
1163
- update!(**args)
1164
- end
1136
+ # The Google Compute Engine tags to add to all instances (see Tagging instances).
1137
+ # Corresponds to the JSON property `tags`
1138
+ # @return [Array<String>]
1139
+ attr_accessor :tags
1165
1140
 
1166
- # Update properties of this object
1167
- def update!(**args)
1168
- @state_start_time = args[:state_start_time] if args.key?(:state_start_time)
1169
- @state = args[:state] if args.key?(:state)
1170
- @details = args[:details] if args.key?(:details)
1171
- end
1172
- end
1141
+ # Optional The service account of the instances. Defaults to the default Google
1142
+ # Compute Engine service account. Custom service accounts need permissions
1143
+ # equivalent to the folloing IAM roles:
1144
+ # roles/logging.logWriter
1145
+ # roles/storage.objectAdmin(see https://cloud.google.com/compute/docs/access/
1146
+ # service-accounts#custom_service_accounts for more information). Example: [
1147
+ # account_id]@[project_id].iam.gserviceaccount.com
1148
+ # Corresponds to the JSON property `serviceAccount`
1149
+ # @return [String]
1150
+ attr_accessor :service_account
1173
1151
 
1174
- # Specifies the resources used to actively manage an instance group.
1175
- class ManagedGroupConfig
1176
- include Google::Apis::Core::Hashable
1152
+ # Optional The Google Compute Engine subnetwork to be used for machine
1153
+ # communications. Cannot be specified with network_uri. Example: https://www.
1154
+ # googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0.
1155
+ # Corresponds to the JSON property `subnetworkUri`
1156
+ # @return [String]
1157
+ attr_accessor :subnetwork_uri
1177
1158
 
1178
- # Output-only The name of the Instance Group Manager for this group.
1179
- # Corresponds to the JSON property `instanceGroupManagerName`
1159
+ # Optional The Google Compute Engine network to be used for machine
1160
+ # communications. Cannot be specified with subnetwork_uri. If neither
1161
+ # network_uri nor subnetwork_uri is specified, the "default" network of the
1162
+ # project is used, if it exists. Cannot be a "Custom Subnet Network" (see Using
1163
+ # Subnetworks for more information). Example: https://www.googleapis.com/compute/
1164
+ # v1/projects/[project_id]/regions/global/default.
1165
+ # Corresponds to the JSON property `networkUri`
1180
1166
  # @return [String]
1181
- attr_accessor :instance_group_manager_name
1167
+ attr_accessor :network_uri
1182
1168
 
1183
- # Output-only The name of the Instance Template used for the Managed Instance
1184
- # Group.
1185
- # Corresponds to the JSON property `instanceTemplateName`
1169
+ # Required The zone where the Google Compute Engine cluster will be located.
1170
+ # Example: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[
1171
+ # zone].
1172
+ # Corresponds to the JSON property `zoneUri`
1186
1173
  # @return [String]
1187
- attr_accessor :instance_template_name
1174
+ attr_accessor :zone_uri
1188
1175
 
1189
1176
  def initialize(**args)
1190
1177
  update!(**args)
@@ -1192,34 +1179,33 @@ module Google
1192
1179
 
1193
1180
  # Update properties of this object
1194
1181
  def update!(**args)
1195
- @instance_group_manager_name = args[:instance_group_manager_name] if args.key?(:instance_group_manager_name)
1196
- @instance_template_name = args[:instance_template_name] if args.key?(:instance_template_name)
1182
+ @metadata = args[:metadata] if args.key?(:metadata)
1183
+ @internal_ip_only = args[:internal_ip_only] if args.key?(:internal_ip_only)
1184
+ @service_account_scopes = args[:service_account_scopes] if args.key?(:service_account_scopes)
1185
+ @tags = args[:tags] if args.key?(:tags)
1186
+ @service_account = args[:service_account] if args.key?(:service_account)
1187
+ @subnetwork_uri = args[:subnetwork_uri] if args.key?(:subnetwork_uri)
1188
+ @network_uri = args[:network_uri] if args.key?(:network_uri)
1189
+ @zone_uri = args[:zone_uri] if args.key?(:zone_uri)
1197
1190
  end
1198
1191
  end
1199
1192
 
1200
- # The status of the operation.
1201
- class ClusterOperationStatus
1193
+ # Specifies the type and number of accelerator cards attached to the instances
1194
+ # of an instance group (see GPUs on Compute Engine).
1195
+ class AcceleratorConfig
1202
1196
  include Google::Apis::Core::Hashable
1203
1197
 
1204
- # Output-only A message containing the operation state.
1205
- # Corresponds to the JSON property `state`
1206
- # @return [String]
1207
- attr_accessor :state
1208
-
1209
- # Output-onlyA message containing any operation metadata details.
1210
- # Corresponds to the JSON property `details`
1211
- # @return [String]
1212
- attr_accessor :details
1213
-
1214
- # Output-only A message containing the detailed operation state.
1215
- # Corresponds to the JSON property `innerState`
1216
- # @return [String]
1217
- attr_accessor :inner_state
1198
+ # The number of the accelerator cards of this type exposed to this instance.
1199
+ # Corresponds to the JSON property `acceleratorCount`
1200
+ # @return [Fixnum]
1201
+ attr_accessor :accelerator_count
1218
1202
 
1219
- # Output-only The time this state was entered.
1220
- # Corresponds to the JSON property `stateStartTime`
1203
+ # Full or partial URI of the accelerator type resource to expose to this
1204
+ # instance. See Google Compute Engine AcceleratorTypes( /compute/docs/reference/
1205
+ # beta/acceleratorTypes)
1206
+ # Corresponds to the JSON property `acceleratorTypeUri`
1221
1207
  # @return [String]
1222
- attr_accessor :state_start_time
1208
+ attr_accessor :accelerator_type_uri
1223
1209
 
1224
1210
  def initialize(**args)
1225
1211
  update!(**args)
@@ -1227,72 +1213,48 @@ module Google
1227
1213
 
1228
1214
  # Update properties of this object
1229
1215
  def update!(**args)
1230
- @state = args[:state] if args.key?(:state)
1231
- @details = args[:details] if args.key?(:details)
1232
- @inner_state = args[:inner_state] if args.key?(:inner_state)
1233
- @state_start_time = args[:state_start_time] if args.key?(:state_start_time)
1216
+ @accelerator_count = args[:accelerator_count] if args.key?(:accelerator_count)
1217
+ @accelerator_type_uri = args[:accelerator_type_uri] if args.key?(:accelerator_type_uri)
1234
1218
  end
1235
1219
  end
1236
1220
 
1237
- # A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.
1238
- # apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/
1239
- # MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/
1240
- # docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
1241
- class HadoopJob
1221
+ # Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature:
1222
+ # This report is available for testing purposes only. It may be changed before
1223
+ # final release.
1224
+ class ClusterMetrics
1242
1225
  include Google::Apis::Core::Hashable
1243
1226
 
1244
- # Optional A mapping of property names to values, used to configure Hadoop.
1245
- # Properties that conflict with values set by the Cloud Dataproc API may be
1246
- # overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes
1247
- # in user code.
1248
- # Corresponds to the JSON property `properties`
1227
+ # The YARN metrics.
1228
+ # Corresponds to the JSON property `yarnMetrics`
1249
1229
  # @return [Hash<String,String>]
1250
- attr_accessor :properties
1251
-
1252
- # Optional The arguments to pass to the driver. Do not include arguments, such
1253
- # as -libjars or -Dfoo=bar, that can be set as job properties, since a collision
1254
- # may occur that causes an incorrect job submission.
1255
- # Corresponds to the JSON property `args`
1256
- # @return [Array<String>]
1257
- attr_accessor :args
1258
-
1259
- # Optional HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the
1260
- # working directory of Hadoop drivers and distributed tasks. Useful for naively
1261
- # parallel tasks.
1262
- # Corresponds to the JSON property `fileUris`
1263
- # @return [Array<String>]
1264
- attr_accessor :file_uris
1230
+ attr_accessor :yarn_metrics
1265
1231
 
1266
- # The name of the driver's main class. The jar file containing the class must be
1267
- # in the default CLASSPATH or specified in jar_file_uris.
1268
- # Corresponds to the JSON property `mainClass`
1269
- # @return [String]
1270
- attr_accessor :main_class
1232
+ # The HDFS metrics.
1233
+ # Corresponds to the JSON property `hdfsMetrics`
1234
+ # @return [Hash<String,String>]
1235
+ attr_accessor :hdfs_metrics
1271
1236
 
1272
- # Optional HCFS URIs of archives to be extracted in the working directory of
1273
- # Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .
1274
- # zip.
1275
- # Corresponds to the JSON property `archiveUris`
1276
- # @return [Array<String>]
1277
- attr_accessor :archive_uris
1237
+ def initialize(**args)
1238
+ update!(**args)
1239
+ end
1278
1240
 
1279
- # The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-
1280
- # bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-
1281
- # samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-
1282
- # mapreduce-examples.jar'
1283
- # Corresponds to the JSON property `mainJarFileUri`
1284
- # @return [String]
1285
- attr_accessor :main_jar_file_uri
1241
+ # Update properties of this object
1242
+ def update!(**args)
1243
+ @yarn_metrics = args[:yarn_metrics] if args.key?(:yarn_metrics)
1244
+ @hdfs_metrics = args[:hdfs_metrics] if args.key?(:hdfs_metrics)
1245
+ end
1246
+ end
1286
1247
 
1287
- # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
1288
- # Corresponds to the JSON property `jarFileUris`
1289
- # @return [Array<String>]
1290
- attr_accessor :jar_file_uris
1248
+ # The runtime logging config of the job.
1249
+ class LoggingConfig
1250
+ include Google::Apis::Core::Hashable
1291
1251
 
1292
- # The runtime logging config of the job.
1293
- # Corresponds to the JSON property `loggingConfig`
1294
- # @return [Google::Apis::DataprocV1::LoggingConfig]
1295
- attr_accessor :logging_config
1252
+ # The per-package log levels for the driver. This may include "root" package
1253
+ # name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', '
1254
+ # org.apache = DEBUG'
1255
+ # Corresponds to the JSON property `driverLogLevels`
1256
+ # @return [Hash<String,String>]
1257
+ attr_accessor :driver_log_levels
1296
1258
 
1297
1259
  def initialize(**args)
1298
1260
  update!(**args)
@@ -1300,37 +1262,19 @@ module Google
1300
1262
 
1301
1263
  # Update properties of this object
1302
1264
  def update!(**args)
1303
- @properties = args[:properties] if args.key?(:properties)
1304
- @args = args[:args] if args.key?(:args)
1305
- @file_uris = args[:file_uris] if args.key?(:file_uris)
1306
- @main_class = args[:main_class] if args.key?(:main_class)
1307
- @archive_uris = args[:archive_uris] if args.key?(:archive_uris)
1308
- @main_jar_file_uri = args[:main_jar_file_uri] if args.key?(:main_jar_file_uri)
1309
- @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
1310
- @logging_config = args[:logging_config] if args.key?(:logging_config)
1265
+ @driver_log_levels = args[:driver_log_levels] if args.key?(:driver_log_levels)
1311
1266
  end
1312
1267
  end
1313
1268
 
1314
- # A list of queries to run on a cluster.
1315
- class QueryList
1269
+ # The location where output from diagnostic command can be found.
1270
+ class DiagnoseClusterOutputLocation
1316
1271
  include Google::Apis::Core::Hashable
1317
1272
 
1318
- # Required The queries to execute. You do not need to terminate a query with a
1319
- # semicolon. Multiple queries can be specified in one string by separating each
1320
- # with a semicolon. Here is an example of an Cloud Dataproc API snippet that
1321
- # uses a QueryList to specify a HiveJob:
1322
- # "hiveJob": `
1323
- # "queryList": `
1324
- # "queries": [
1325
- # "query1",
1326
- # "query2",
1327
- # "query3;query4",
1328
- # ]
1329
- # `
1330
- # `
1331
- # Corresponds to the JSON property `queries`
1332
- # @return [Array<String>]
1333
- attr_accessor :queries
1273
+ # Output-only The Google Cloud Storage URI of the diagnostic output. This will
1274
+ # be a plain text file with summary of collected diagnostics.
1275
+ # Corresponds to the JSON property `outputUri`
1276
+ # @return [String]
1277
+ attr_accessor :output_uri
1334
1278
 
1335
1279
  def initialize(**args)
1336
1280
  update!(**args)
@@ -1338,83 +1282,154 @@ module Google
1338
1282
 
1339
1283
  # Update properties of this object
1340
1284
  def update!(**args)
1341
- @queries = args[:queries] if args.key?(:queries)
1285
+ @output_uri = args[:output_uri] if args.key?(:output_uri)
1342
1286
  end
1343
1287
  end
1344
1288
 
1345
- # A YARN application created by a job. Application information is a subset of <
1346
- # code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.
1347
- # Beta Feature: This report is available for testing purposes only. It may be
1348
- # changed before final release.
1349
- class YarnApplication
1289
+ # This resource represents a long-running operation that is the result of a
1290
+ # network API call.
1291
+ class Operation
1350
1292
  include Google::Apis::Core::Hashable
1351
1293
 
1352
- # Required The application state.
1353
- # Corresponds to the JSON property `state`
1354
- # @return [String]
1355
- attr_accessor :state
1294
+ # The Status type defines a logical error model that is suitable for different
1295
+ # programming environments, including REST APIs and RPC APIs. It is used by gRPC
1296
+ # (https://github.com/grpc). The error model is designed to be:
1297
+ # Simple to use and understand for most users
1298
+ # Flexible enough to meet unexpected needsOverviewThe Status message contains
1299
+ # three pieces of data: error code, error message, and error details. The error
1300
+ # code should be an enum value of google.rpc.Code, but it may accept additional
1301
+ # error codes if needed. The error message should be a developer-facing English
1302
+ # message that helps developers understand and resolve the error. If a localized
1303
+ # user-facing error message is needed, put the localized message in the error
1304
+ # details or localize it in the client. The optional error details may contain
1305
+ # arbitrary information about the error. There is a predefined set of error
1306
+ # detail types in the package google.rpc which can be used for common error
1307
+ # conditions.Language mappingThe Status message is the logical representation of
1308
+ # the error model, but it is not necessarily the actual wire format. When the
1309
+ # Status message is exposed in different client libraries and different wire
1310
+ # protocols, it can be mapped differently. For example, it will likely be mapped
1311
+ # to some exceptions in Java, but more likely mapped to some error codes in C.
1312
+ # Other usesThe error model and the Status message can be used in a variety of
1313
+ # environments, either with or without APIs, to provide a consistent developer
1314
+ # experience across different environments.Example uses of this error model
1315
+ # include:
1316
+ # Partial errors. If a service needs to return partial errors to the client, it
1317
+ # may embed the Status in the normal response to indicate the partial errors.
1318
+ # Workflow errors. A typical workflow has multiple steps. Each step may have a
1319
+ # Status message for error reporting purpose.
1320
+ # Batch operations. If a client uses batch request and batch response, the
1321
+ # Status message should be used directly inside batch response, one for each
1322
+ # error sub-response.
1323
+ # Asynchronous operations. If an API call embeds asynchronous operation results
1324
+ # in its response, the status of those operations should be represented directly
1325
+ # using the Status message.
1326
+ # Logging. If some API errors are stored in logs, the message Status could be
1327
+ # used directly after any stripping needed for security/privacy reasons.
1328
+ # Corresponds to the JSON property `error`
1329
+ # @return [Google::Apis::DataprocV1::Status]
1330
+ attr_accessor :error
1356
1331
 
1357
- # Required The application name.
1332
+ # Service-specific metadata associated with the operation. It typically contains
1333
+ # progress information and common metadata such as create time. Some services
1334
+ # might not provide such metadata. Any method that returns a long-running
1335
+ # operation should document the metadata type, if any.
1336
+ # Corresponds to the JSON property `metadata`
1337
+ # @return [Hash<String,Object>]
1338
+ attr_accessor :metadata
1339
+
1340
+ # If the value is false, it means the operation is still in progress. If true,
1341
+ # the operation is completed, and either error or response is available.
1342
+ # Corresponds to the JSON property `done`
1343
+ # @return [Boolean]
1344
+ attr_accessor :done
1345
+ alias_method :done?, :done
1346
+
1347
+ # The normal response of the operation in case of success. If the original
1348
+ # method returns no data on success, such as Delete, the response is google.
1349
+ # protobuf.Empty. If the original method is standard Get/Create/Update, the
1350
+ # response should be the resource. For other methods, the response should have
1351
+ # the type XxxResponse, where Xxx is the original method name. For example, if
1352
+ # the original method name is TakeSnapshot(), the inferred response type is
1353
+ # TakeSnapshotResponse.
1354
+ # Corresponds to the JSON property `response`
1355
+ # @return [Hash<String,Object>]
1356
+ attr_accessor :response
1357
+
1358
+ # The server-assigned name, which is only unique within the same service that
1359
+ # originally returns it. If you use the default HTTP mapping, the name should
1360
+ # have the format of operations/some/unique/name.
1358
1361
  # Corresponds to the JSON property `name`
1359
1362
  # @return [String]
1360
1363
  attr_accessor :name
1361
1364
 
1362
- # Optional The HTTP URL of the ApplicationMaster, HistoryServer, or
1363
- # TimelineServer that provides application-specific information. The URL uses
1364
- # the internal hostname, and requires a proxy server for resolution and,
1365
- # possibly, access.
1366
- # Corresponds to the JSON property `trackingUrl`
1367
- # @return [String]
1368
- attr_accessor :tracking_url
1369
-
1370
- # Required The numerical progress of the application, from 1 to 100.
1371
- # Corresponds to the JSON property `progress`
1372
- # @return [Float]
1373
- attr_accessor :progress
1374
-
1375
1365
  def initialize(**args)
1376
1366
  update!(**args)
1377
1367
  end
1378
1368
 
1379
1369
  # Update properties of this object
1380
1370
  def update!(**args)
1381
- @state = args[:state] if args.key?(:state)
1371
+ @error = args[:error] if args.key?(:error)
1372
+ @metadata = args[:metadata] if args.key?(:metadata)
1373
+ @done = args[:done] if args.key?(:done)
1374
+ @response = args[:response] if args.key?(:response)
1382
1375
  @name = args[:name] if args.key?(:name)
1383
- @tracking_url = args[:tracking_url] if args.key?(:tracking_url)
1384
- @progress = args[:progress] if args.key?(:progress)
1385
1376
  end
1386
1377
  end
1387
1378
 
1388
- # A request to collect cluster diagnostic information.
1389
- class DiagnoseClusterRequest
1379
+ # The status of the operation.
1380
+ class OperationStatus
1390
1381
  include Google::Apis::Core::Hashable
1391
1382
 
1383
+ # A message containing the detailed operation state.
1384
+ # Corresponds to the JSON property `innerState`
1385
+ # @return [String]
1386
+ attr_accessor :inner_state
1387
+
1388
+ # The time this state was entered.
1389
+ # Corresponds to the JSON property `stateStartTime`
1390
+ # @return [String]
1391
+ attr_accessor :state_start_time
1392
+
1393
+ # A message containing the operation state.
1394
+ # Corresponds to the JSON property `state`
1395
+ # @return [String]
1396
+ attr_accessor :state
1397
+
1398
+ # A message containing any operation metadata details.
1399
+ # Corresponds to the JSON property `details`
1400
+ # @return [String]
1401
+ attr_accessor :details
1402
+
1392
1403
  def initialize(**args)
1393
1404
  update!(**args)
1394
1405
  end
1395
1406
 
1396
1407
  # Update properties of this object
1397
1408
  def update!(**args)
1409
+ @inner_state = args[:inner_state] if args.key?(:inner_state)
1410
+ @state_start_time = args[:state_start_time] if args.key?(:state_start_time)
1411
+ @state = args[:state] if args.key?(:state)
1412
+ @details = args[:details] if args.key?(:details)
1398
1413
  end
1399
1414
  end
1400
1415
 
1401
- # Specifies the config of disk options for a group of VM instances.
1402
- class DiskConfig
1416
+ # Encapsulates the full scoping used to reference a job.
1417
+ class JobReference
1403
1418
  include Google::Apis::Core::Hashable
1404
1419
 
1405
- # Optional Size in GB of the boot disk (default is 500GB).
1406
- # Corresponds to the JSON property `bootDiskSizeGb`
1407
- # @return [Fixnum]
1408
- attr_accessor :boot_disk_size_gb
1420
+ # Required The ID of the Google Cloud Platform project that the job belongs to.
1421
+ # Corresponds to the JSON property `projectId`
1422
+ # @return [String]
1423
+ attr_accessor :project_id
1409
1424
 
1410
- # Optional Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not
1411
- # attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.
1412
- # apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are
1413
- # attached, this runtime bulk data is spread across them, and the boot disk
1414
- # contains only basic config and installed binaries.
1415
- # Corresponds to the JSON property `numLocalSsds`
1416
- # @return [Fixnum]
1417
- attr_accessor :num_local_ssds
1425
+ # Optional The job ID, which must be unique within the project. The job ID is
1426
+ # generated by the server upon job submission or provided by the user as a means
1427
+ # to perform retries without creating duplicate jobs. The ID must contain only
1428
+ # letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The
1429
+ # maximum length is 100 characters.
1430
+ # Corresponds to the JSON property `jobId`
1431
+ # @return [String]
1432
+ attr_accessor :job_id
1418
1433
 
1419
1434
  def initialize(**args)
1420
1435
  update!(**args)
@@ -1422,54 +1437,84 @@ module Google
1422
1437
 
1423
1438
  # Update properties of this object
1424
1439
  def update!(**args)
1425
- @boot_disk_size_gb = args[:boot_disk_size_gb] if args.key?(:boot_disk_size_gb)
1426
- @num_local_ssds = args[:num_local_ssds] if args.key?(:num_local_ssds)
1440
+ @project_id = args[:project_id] if args.key?(:project_id)
1441
+ @job_id = args[:job_id] if args.key?(:job_id)
1427
1442
  end
1428
1443
  end
1429
1444
 
1430
- # Metadata describing the operation.
1431
- class ClusterOperationMetadata
1445
+ # A request to submit a job.
1446
+ class SubmitJobRequest
1432
1447
  include Google::Apis::Core::Hashable
1433
1448
 
1434
- # Output-only Short description of operation.
1435
- # Corresponds to the JSON property `description`
1436
- # @return [String]
1437
- attr_accessor :description
1438
-
1439
- # Output-only Errors encountered during operation execution.
1440
- # Corresponds to the JSON property `warnings`
1441
- # @return [Array<String>]
1442
- attr_accessor :warnings
1449
+ # A Cloud Dataproc job resource.
1450
+ # Corresponds to the JSON property `job`
1451
+ # @return [Google::Apis::DataprocV1::Job]
1452
+ attr_accessor :job
1443
1453
 
1444
- # Output-only Labels associated with the operation
1445
- # Corresponds to the JSON property `labels`
1446
- # @return [Hash<String,String>]
1447
- attr_accessor :labels
1454
+ def initialize(**args)
1455
+ update!(**args)
1456
+ end
1448
1457
 
1449
- # The status of the operation.
1450
- # Corresponds to the JSON property `status`
1451
- # @return [Google::Apis::DataprocV1::ClusterOperationStatus]
1452
- attr_accessor :status
1458
+ # Update properties of this object
1459
+ def update!(**args)
1460
+ @job = args[:job] if args.key?(:job)
1461
+ end
1462
+ end
1453
1463
 
1454
- # Output-only The previous operation status.
1455
- # Corresponds to the JSON property `statusHistory`
1456
- # @return [Array<Google::Apis::DataprocV1::ClusterOperationStatus>]
1457
- attr_accessor :status_history
1464
+ # The Status type defines a logical error model that is suitable for different
1465
+ # programming environments, including REST APIs and RPC APIs. It is used by gRPC
1466
+ # (https://github.com/grpc). The error model is designed to be:
1467
+ # Simple to use and understand for most users
1468
+ # Flexible enough to meet unexpected needsOverviewThe Status message contains
1469
+ # three pieces of data: error code, error message, and error details. The error
1470
+ # code should be an enum value of google.rpc.Code, but it may accept additional
1471
+ # error codes if needed. The error message should be a developer-facing English
1472
+ # message that helps developers understand and resolve the error. If a localized
1473
+ # user-facing error message is needed, put the localized message in the error
1474
+ # details or localize it in the client. The optional error details may contain
1475
+ # arbitrary information about the error. There is a predefined set of error
1476
+ # detail types in the package google.rpc which can be used for common error
1477
+ # conditions.Language mappingThe Status message is the logical representation of
1478
+ # the error model, but it is not necessarily the actual wire format. When the
1479
+ # Status message is exposed in different client libraries and different wire
1480
+ # protocols, it can be mapped differently. For example, it will likely be mapped
1481
+ # to some exceptions in Java, but more likely mapped to some error codes in C.
1482
+ # Other usesThe error model and the Status message can be used in a variety of
1483
+ # environments, either with or without APIs, to provide a consistent developer
1484
+ # experience across different environments.Example uses of this error model
1485
+ # include:
1486
+ # Partial errors. If a service needs to return partial errors to the client, it
1487
+ # may embed the Status in the normal response to indicate the partial errors.
1488
+ # Workflow errors. A typical workflow has multiple steps. Each step may have a
1489
+ # Status message for error reporting purpose.
1490
+ # Batch operations. If a client uses batch request and batch response, the
1491
+ # Status message should be used directly inside batch response, one for each
1492
+ # error sub-response.
1493
+ # Asynchronous operations. If an API call embeds asynchronous operation results
1494
+ # in its response, the status of those operations should be represented directly
1495
+ # using the Status message.
1496
+ # Logging. If some API errors are stored in logs, the message Status could be
1497
+ # used directly after any stripping needed for security/privacy reasons.
1498
+ class Status
1499
+ include Google::Apis::Core::Hashable
1458
1500
 
1459
- # Output-only Cluster UUID for the operation.
1460
- # Corresponds to the JSON property `clusterUuid`
1461
- # @return [String]
1462
- attr_accessor :cluster_uuid
1501
+ # The status code, which should be an enum value of google.rpc.Code.
1502
+ # Corresponds to the JSON property `code`
1503
+ # @return [Fixnum]
1504
+ attr_accessor :code
1463
1505
 
1464
- # Output-only Name of the cluster for the operation.
1465
- # Corresponds to the JSON property `clusterName`
1506
+ # A developer-facing error message, which should be in English. Any user-facing
1507
+ # error message should be localized and sent in the google.rpc.Status.details
1508
+ # field, or localized by the client.
1509
+ # Corresponds to the JSON property `message`
1466
1510
  # @return [String]
1467
- attr_accessor :cluster_name
1511
+ attr_accessor :message
1468
1512
 
1469
- # Output-only The operation type.
1470
- # Corresponds to the JSON property `operationType`
1471
- # @return [String]
1472
- attr_accessor :operation_type
1513
+ # A list of messages that carry the error details. There will be a common set of
1514
+ # message types for APIs to use.
1515
+ # Corresponds to the JSON property `details`
1516
+ # @return [Array<Hash<String,Object>>]
1517
+ attr_accessor :details
1473
1518
 
1474
1519
  def initialize(**args)
1475
1520
  update!(**args)
@@ -1477,59 +1522,65 @@ module Google
1477
1522
 
1478
1523
  # Update properties of this object
1479
1524
  def update!(**args)
1480
- @description = args[:description] if args.key?(:description)
1481
- @warnings = args[:warnings] if args.key?(:warnings)
1482
- @labels = args[:labels] if args.key?(:labels)
1483
- @status = args[:status] if args.key?(:status)
1484
- @status_history = args[:status_history] if args.key?(:status_history)
1485
- @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
1486
- @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
1487
- @operation_type = args[:operation_type] if args.key?(:operation_type)
1525
+ @code = args[:code] if args.key?(:code)
1526
+ @message = args[:message] if args.key?(:message)
1527
+ @details = args[:details] if args.key?(:details)
1488
1528
  end
1489
1529
  end
1490
1530
 
1491
- # A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/)
1492
- # queries on YARN.
1493
- class HiveJob
1531
+ # Optional The config settings for Google Compute Engine resources in an
1532
+ # instance group, such as a master or worker group.
1533
+ class InstanceGroupConfig
1494
1534
  include Google::Apis::Core::Hashable
1495
1535
 
1496
- # Optional Whether to continue executing queries if a query fails. The default
1497
- # value is false. Setting to true can be useful when executing independent
1498
- # parallel queries.
1499
- # Corresponds to the JSON property `continueOnFailure`
1536
+ # Specifies the resources used to actively manage an instance group.
1537
+ # Corresponds to the JSON property `managedGroupConfig`
1538
+ # @return [Google::Apis::DataprocV1::ManagedGroupConfig]
1539
+ attr_accessor :managed_group_config
1540
+
1541
+ # Optional Specifies that this instance group contains preemptible instances.
1542
+ # Corresponds to the JSON property `isPreemptible`
1500
1543
  # @return [Boolean]
1501
- attr_accessor :continue_on_failure
1502
- alias_method :continue_on_failure?, :continue_on_failure
1544
+ attr_accessor :is_preemptible
1545
+ alias_method :is_preemptible?, :is_preemptible
1503
1546
 
1504
- # The HCFS URI of the script that contains Hive queries.
1505
- # Corresponds to the JSON property `queryFileUri`
1547
+ # Output-only The Google Compute Engine image resource used for cluster
1548
+ # instances. Inferred from SoftwareConfig.image_version.
1549
+ # Corresponds to the JSON property `imageUri`
1506
1550
  # @return [String]
1507
- attr_accessor :query_file_uri
1551
+ attr_accessor :image_uri
1508
1552
 
1509
- # A list of queries to run on a cluster.
1510
- # Corresponds to the JSON property `queryList`
1511
- # @return [Google::Apis::DataprocV1::QueryList]
1512
- attr_accessor :query_list
1553
+ # Required The Google Compute Engine machine type used for cluster instances.
1554
+ # Example: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-
1555
+ # east1-a/machineTypes/n1-standard-2.
1556
+ # Corresponds to the JSON property `machineTypeUri`
1557
+ # @return [String]
1558
+ attr_accessor :machine_type_uri
1513
1559
 
1514
- # Optional HCFS URIs of jar files to add to the CLASSPATH of the Hive server and
1515
- # Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.
1516
- # Corresponds to the JSON property `jarFileUris`
1560
+ # Optional The list of instance names. Cloud Dataproc derives the names from
1561
+ # cluster_name, num_instances, and the instance group if not set by user (
1562
+ # recommended practice is to let Cloud Dataproc derive the name).
1563
+ # Corresponds to the JSON property `instanceNames`
1517
1564
  # @return [Array<String>]
1518
- attr_accessor :jar_file_uris
1565
+ attr_accessor :instance_names
1519
1566
 
1520
- # Optional Mapping of query variable names to values (equivalent to the Hive
1521
- # command: SET name="value";).
1522
- # Corresponds to the JSON property `scriptVariables`
1523
- # @return [Hash<String,String>]
1524
- attr_accessor :script_variables
1567
+ # Optional The Google Compute Engine accelerator configuration for these
1568
+ # instances.Beta Feature: This feature is still under development. It may be
1569
+ # changed before final release.
1570
+ # Corresponds to the JSON property `accelerators`
1571
+ # @return [Array<Google::Apis::DataprocV1::AcceleratorConfig>]
1572
+ attr_accessor :accelerators
1525
1573
 
1526
- # Optional A mapping of property names and values, used to configure Hive.
1527
- # Properties that conflict with values set by the Cloud Dataproc API may be
1528
- # overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/
1529
- # hive/conf/hive-site.xml, and classes in user code.
1530
- # Corresponds to the JSON property `properties`
1531
- # @return [Hash<String,String>]
1532
- attr_accessor :properties
1574
+ # Required The number of VM instances in the instance group. For master instance
1575
+ # groups, must be set to 1.
1576
+ # Corresponds to the JSON property `numInstances`
1577
+ # @return [Fixnum]
1578
+ attr_accessor :num_instances
1579
+
1580
+ # Specifies the config of disk options for a group of VM instances.
1581
+ # Corresponds to the JSON property `diskConfig`
1582
+ # @return [Google::Apis::DataprocV1::DiskConfig]
1583
+ attr_accessor :disk_config
1533
1584
 
1534
1585
  def initialize(**args)
1535
1586
  update!(**args)
@@ -1537,43 +1588,55 @@ module Google
1537
1588
 
1538
1589
  # Update properties of this object
1539
1590
  def update!(**args)
1540
- @continue_on_failure = args[:continue_on_failure] if args.key?(:continue_on_failure)
1541
- @query_file_uri = args[:query_file_uri] if args.key?(:query_file_uri)
1542
- @query_list = args[:query_list] if args.key?(:query_list)
1543
- @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
1544
- @script_variables = args[:script_variables] if args.key?(:script_variables)
1545
- @properties = args[:properties] if args.key?(:properties)
1591
+ @managed_group_config = args[:managed_group_config] if args.key?(:managed_group_config)
1592
+ @is_preemptible = args[:is_preemptible] if args.key?(:is_preemptible)
1593
+ @image_uri = args[:image_uri] if args.key?(:image_uri)
1594
+ @machine_type_uri = args[:machine_type_uri] if args.key?(:machine_type_uri)
1595
+ @instance_names = args[:instance_names] if args.key?(:instance_names)
1596
+ @accelerators = args[:accelerators] if args.key?(:accelerators)
1597
+ @num_instances = args[:num_instances] if args.key?(:num_instances)
1598
+ @disk_config = args[:disk_config] if args.key?(:disk_config)
1546
1599
  end
1547
1600
  end
1548
1601
 
1549
- # A generic empty message that you can re-use to avoid defining duplicated empty
1550
- # messages in your APIs. A typical example is to use it as the request or the
1551
- # response type of an API method. For instance:
1552
- # service Foo `
1553
- # rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
1554
- # `
1555
- # The JSON representation for Empty is empty JSON object ``.
1556
- class Empty
1602
+ # Job scheduling options.Beta Feature: These options are available for testing
1603
+ # purposes only. They may be changed before final release.
1604
+ class JobScheduling
1557
1605
  include Google::Apis::Core::Hashable
1558
1606
 
1607
+ # Optional Maximum number of times per hour a driver may be restarted as a
1608
+ # result of driver terminating with non-zero code before job is reported failed.
1609
+ # A job may be reported as thrashing if driver exits with non-zero code 4 times
1610
+ # within 10 minute window.Maximum value is 10.
1611
+ # Corresponds to the JSON property `maxFailuresPerHour`
1612
+ # @return [Fixnum]
1613
+ attr_accessor :max_failures_per_hour
1614
+
1559
1615
  def initialize(**args)
1560
1616
  update!(**args)
1561
1617
  end
1562
1618
 
1563
1619
  # Update properties of this object
1564
1620
  def update!(**args)
1621
+ @max_failures_per_hour = args[:max_failures_per_hour] if args.key?(:max_failures_per_hour)
1565
1622
  end
1566
1623
  end
1567
1624
 
1568
- # The location of diagnostic output.
1569
- class DiagnoseClusterResults
1625
+ # A list of jobs in a project.
1626
+ class ListJobsResponse
1570
1627
  include Google::Apis::Core::Hashable
1571
1628
 
1572
- # Output-only The Google Cloud Storage URI of the diagnostic output. The output
1573
- # report is a plain text file with a summary of collected diagnostics.
1574
- # Corresponds to the JSON property `outputUri`
1629
+ # Output-only Jobs list.
1630
+ # Corresponds to the JSON property `jobs`
1631
+ # @return [Array<Google::Apis::DataprocV1::Job>]
1632
+ attr_accessor :jobs
1633
+
1634
+ # Optional This token is included in the response if there are more results to
1635
+ # fetch. To fetch additional results, provide this value as the page_token in a
1636
+ # subsequent <code>ListJobsRequest</code>.
1637
+ # Corresponds to the JSON property `nextPageToken`
1575
1638
  # @return [String]
1576
- attr_accessor :output_uri
1639
+ attr_accessor :next_page_token
1577
1640
 
1578
1641
  def initialize(**args)
1579
1642
  update!(**args)
@@ -1581,67 +1644,28 @@ module Google
1581
1644
 
1582
1645
  # Update properties of this object
1583
1646
  def update!(**args)
1584
- @output_uri = args[:output_uri] if args.key?(:output_uri)
1585
- end
1586
- end
1587
-
1588
- # The cluster config.
1589
- class ClusterConfig
1590
- include Google::Apis::Core::Hashable
1591
-
1592
- # Optional Commands to execute on each node after config is completed. By
1593
- # default, executables are run on master and all worker nodes. You can test a
1594
- # node's <code>role</code> metadata to run an executable on a master or worker
1595
- # node, as shown below using curl (you can also use wget):
1596
- # ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/
1597
- # instance/attributes/dataproc-role)
1598
- # if [[ "$`ROLE`" == 'Master' ]]; then
1599
- # ... master specific actions ...
1600
- # else
1601
- # ... worker specific actions ...
1602
- # fi
1603
- # Corresponds to the JSON property `initializationActions`
1604
- # @return [Array<Google::Apis::DataprocV1::NodeInitializationAction>]
1605
- attr_accessor :initialization_actions
1606
-
1607
- # Optional A Google Cloud Storage staging bucket used for sharing generated SSH
1608
- # keys and config. If you do not specify a staging bucket, Cloud Dataproc will
1609
- # determine an appropriate Cloud Storage location (US, ASIA, or EU) for your
1610
- # cluster's staging bucket according to the Google Compute Engine zone where
1611
- # your cluster is deployed, and then it will create and manage this project-
1612
- # level, per-location bucket for you.
1613
- # Corresponds to the JSON property `configBucket`
1614
- # @return [String]
1615
- attr_accessor :config_bucket
1616
-
1617
- # Optional The config settings for Google Compute Engine resources in an
1618
- # instance group, such as a master or worker group.
1619
- # Corresponds to the JSON property `workerConfig`
1620
- # @return [Google::Apis::DataprocV1::InstanceGroupConfig]
1621
- attr_accessor :worker_config
1622
-
1623
- # Common config settings for resources of Google Compute Engine cluster
1624
- # instances, applicable to all instances in the cluster.
1625
- # Corresponds to the JSON property `gceClusterConfig`
1626
- # @return [Google::Apis::DataprocV1::GceClusterConfig]
1627
- attr_accessor :gce_cluster_config
1628
-
1629
- # Specifies the selection and config of software inside the cluster.
1630
- # Corresponds to the JSON property `softwareConfig`
1631
- # @return [Google::Apis::DataprocV1::SoftwareConfig]
1632
- attr_accessor :software_config
1647
+ @jobs = args[:jobs] if args.key?(:jobs)
1648
+ @next_page_token = args[:next_page_token] if args.key?(:next_page_token)
1649
+ end
1650
+ end
1633
1651
 
1634
- # Optional The config settings for Google Compute Engine resources in an
1635
- # instance group, such as a master or worker group.
1636
- # Corresponds to the JSON property `masterConfig`
1637
- # @return [Google::Apis::DataprocV1::InstanceGroupConfig]
1638
- attr_accessor :master_config
1652
+ # Specifies an executable to run on a fully configured node and a timeout period
1653
+ # for executable completion.
1654
+ class NodeInitializationAction
1655
+ include Google::Apis::Core::Hashable
1639
1656
 
1640
- # Optional The config settings for Google Compute Engine resources in an
1641
- # instance group, such as a master or worker group.
1642
- # Corresponds to the JSON property `secondaryWorkerConfig`
1643
- # @return [Google::Apis::DataprocV1::InstanceGroupConfig]
1644
- attr_accessor :secondary_worker_config
1657
+ # Required Google Cloud Storage URI of executable file.
1658
+ # Corresponds to the JSON property `executableFile`
1659
+ # @return [String]
1660
+ attr_accessor :executable_file
1661
+
1662
+ # Optional Amount of time executable has to complete. Default is 10 minutes.
1663
+ # Cluster creation fails with an explanatory error message (the name of the
1664
+ # executable that caused the error and the exceeded timeout period) if the
1665
+ # executable is not completed at end of the timeout period.
1666
+ # Corresponds to the JSON property `executionTimeout`
1667
+ # @return [String]
1668
+ attr_accessor :execution_timeout
1645
1669
 
1646
1670
  def initialize(**args)
1647
1671
  update!(**args)
@@ -1649,41 +1673,36 @@ module Google
1649
1673
 
1650
1674
  # Update properties of this object
1651
1675
  def update!(**args)
1652
- @initialization_actions = args[:initialization_actions] if args.key?(:initialization_actions)
1653
- @config_bucket = args[:config_bucket] if args.key?(:config_bucket)
1654
- @worker_config = args[:worker_config] if args.key?(:worker_config)
1655
- @gce_cluster_config = args[:gce_cluster_config] if args.key?(:gce_cluster_config)
1656
- @software_config = args[:software_config] if args.key?(:software_config)
1657
- @master_config = args[:master_config] if args.key?(:master_config)
1658
- @secondary_worker_config = args[:secondary_worker_config] if args.key?(:secondary_worker_config)
1676
+ @executable_file = args[:executable_file] if args.key?(:executable_file)
1677
+ @execution_timeout = args[:execution_timeout] if args.key?(:execution_timeout)
1659
1678
  end
1660
1679
  end
1661
1680
 
1662
- # A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/
1663
- # 0.9.0/python-programming-guide.html) applications on YARN.
1664
- class PySparkJob
1681
+ # A request to cancel a job.
1682
+ class CancelJobRequest
1665
1683
  include Google::Apis::Core::Hashable
1666
1684
 
1667
- # Optional HCFS file URIs of Python files to pass to the PySpark framework.
1668
- # Supported file types: .py, .egg, and .zip.
1669
- # Corresponds to the JSON property `pythonFileUris`
1670
- # @return [Array<String>]
1671
- attr_accessor :python_file_uris
1685
+ def initialize(**args)
1686
+ update!(**args)
1687
+ end
1672
1688
 
1673
- # Required The HCFS URI of the main Python file to use as the driver. Must be a .
1674
- # py file.
1675
- # Corresponds to the JSON property `mainPythonFileUri`
1676
- # @return [String]
1677
- attr_accessor :main_python_file_uri
1689
+ # Update properties of this object
1690
+ def update!(**args)
1691
+ end
1692
+ end
1678
1693
 
1679
- # Optional HCFS URIs of archives to be extracted in the working directory of .
1680
- # jar, .tar, .tar.gz, .tgz, and .zip.
1681
- # Corresponds to the JSON property `archiveUris`
1682
- # @return [Array<String>]
1683
- attr_accessor :archive_uris
1694
+ # A Cloud Dataproc job for running Apache Spark SQL (http://spark.apache.org/sql/
1695
+ # ) queries.
1696
+ class SparkSqlJob
1697
+ include Google::Apis::Core::Hashable
1684
1698
 
1685
- # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Python driver
1686
- # and tasks.
1699
+ # Optional Mapping of query variable names to values (equivalent to the Spark
1700
+ # SQL command: SET name="value";).
1701
+ # Corresponds to the JSON property `scriptVariables`
1702
+ # @return [Hash<String,String>]
1703
+ attr_accessor :script_variables
1704
+
1705
+ # Optional HCFS URIs of jar files to be added to the Spark CLASSPATH.
1687
1706
  # Corresponds to the JSON property `jarFileUris`
1688
1707
  # @return [Array<String>]
1689
1708
  attr_accessor :jar_file_uris
@@ -1693,26 +1712,22 @@ module Google
1693
1712
  # @return [Google::Apis::DataprocV1::LoggingConfig]
1694
1713
  attr_accessor :logging_config
1695
1714
 
1696
- # Optional A mapping of property names to values, used to configure PySpark.
1697
- # Properties that conflict with values set by the Cloud Dataproc API may be
1698
- # overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf
1699
- # and classes in user code.
1715
+ # Optional A mapping of property names to values, used to configure Spark SQL's
1716
+ # SparkConf. Properties that conflict with values set by the Cloud Dataproc API
1717
+ # may be overwritten.
1700
1718
  # Corresponds to the JSON property `properties`
1701
1719
  # @return [Hash<String,String>]
1702
1720
  attr_accessor :properties
1703
1721
 
1704
- # Optional The arguments to pass to the driver. Do not include arguments, such
1705
- # as --conf, that can be set as job properties, since a collision may occur that
1706
- # causes an incorrect job submission.
1707
- # Corresponds to the JSON property `args`
1708
- # @return [Array<String>]
1709
- attr_accessor :args
1722
+ # The HCFS URI of the script that contains SQL queries.
1723
+ # Corresponds to the JSON property `queryFileUri`
1724
+ # @return [String]
1725
+ attr_accessor :query_file_uri
1710
1726
 
1711
- # Optional HCFS URIs of files to be copied to the working directory of Python
1712
- # drivers and distributed tasks. Useful for naively parallel tasks.
1713
- # Corresponds to the JSON property `fileUris`
1714
- # @return [Array<String>]
1715
- attr_accessor :file_uris
1727
+ # A list of queries to run on a cluster.
1728
+ # Corresponds to the JSON property `queryList`
1729
+ # @return [Google::Apis::DataprocV1::QueryList]
1730
+ attr_accessor :query_list
1716
1731
 
1717
1732
  def initialize(**args)
1718
1733
  update!(**args)
@@ -1720,93 +1735,67 @@ module Google
1720
1735
 
1721
1736
  # Update properties of this object
1722
1737
  def update!(**args)
1723
- @python_file_uris = args[:python_file_uris] if args.key?(:python_file_uris)
1724
- @main_python_file_uri = args[:main_python_file_uri] if args.key?(:main_python_file_uri)
1725
- @archive_uris = args[:archive_uris] if args.key?(:archive_uris)
1738
+ @script_variables = args[:script_variables] if args.key?(:script_variables)
1726
1739
  @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
1727
1740
  @logging_config = args[:logging_config] if args.key?(:logging_config)
1728
1741
  @properties = args[:properties] if args.key?(:properties)
1729
- @args = args[:args] if args.key?(:args)
1730
- @file_uris = args[:file_uris] if args.key?(:file_uris)
1742
+ @query_file_uri = args[:query_file_uri] if args.key?(:query_file_uri)
1743
+ @query_list = args[:query_list] if args.key?(:query_list)
1731
1744
  end
1732
1745
  end
1733
1746
 
1734
- # Common config settings for resources of Google Compute Engine cluster
1735
- # instances, applicable to all instances in the cluster.
1736
- class GceClusterConfig
1747
+ # Describes the identifying information, config, and status of a cluster of
1748
+ # Google Compute Engine instances.
1749
+ class Cluster
1737
1750
  include Google::Apis::Core::Hashable
1738
1751
 
1739
- # Optional The Google Compute Engine network to be used for machine
1740
- # communications. Cannot be specified with subnetwork_uri. If neither
1741
- # network_uri nor subnetwork_uri is specified, the "default" network of the
1742
- # project is used, if it exists. Cannot be a "Custom Subnet Network" (see Using
1743
- # Subnetworks for more information). Example: https://www.googleapis.com/compute/
1744
- # v1/projects/[project_id]/regions/global/default.
1745
- # Corresponds to the JSON property `networkUri`
1746
- # @return [String]
1747
- attr_accessor :network_uri
1748
-
1749
- # Required The zone where the Google Compute Engine cluster will be located.
1750
- # Example: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[
1751
- # zone].
1752
- # Corresponds to the JSON property `zoneUri`
1752
+ # Required The Google Cloud Platform project ID that the cluster belongs to.
1753
+ # Corresponds to the JSON property `projectId`
1753
1754
  # @return [String]
1754
- attr_accessor :zone_uri
1755
+ attr_accessor :project_id
1755
1756
 
1756
- # The Google Compute Engine metadata entries to add to all instances (see
1757
- # Project and instance metadata (https://cloud.google.com/compute/docs/storing-
1758
- # retrieving-metadata#project_and_instance_metadata)).
1759
- # Corresponds to the JSON property `metadata`
1757
+ # Optional The labels to associate with this cluster. Label keys must contain 1
1758
+ # to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/
1759
+ # rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63
1760
+ # characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt)
1761
+ # . No more than 32 labels can be associated with a cluster.
1762
+ # Corresponds to the JSON property `labels`
1760
1763
  # @return [Hash<String,String>]
1761
- attr_accessor :metadata
1764
+ attr_accessor :labels
1762
1765
 
1763
- # Optional If true, all instances in the cluster will only have internal IP
1764
- # addresses. By default, clusters are not restricted to internal IP addresses,
1765
- # and will have ephemeral external IP addresses assigned to each instance. This
1766
- # internal_ip_only restriction can only be enabled for subnetwork enabled
1767
- # networks, and all off-cluster dependencies must be configured to be accessible
1768
- # without external IP addresses.
1769
- # Corresponds to the JSON property `internalIpOnly`
1770
- # @return [Boolean]
1771
- attr_accessor :internal_ip_only
1772
- alias_method :internal_ip_only?, :internal_ip_only
1766
+ # The status of a cluster and its instances.
1767
+ # Corresponds to the JSON property `status`
1768
+ # @return [Google::Apis::DataprocV1::ClusterStatus]
1769
+ attr_accessor :status
1773
1770
 
1774
- # Optional The URIs of service account scopes to be included in Google Compute
1775
- # Engine instances. The following base set of scopes is always included:
1776
- # https://www.googleapis.com/auth/cloud.useraccounts.readonly
1777
- # https://www.googleapis.com/auth/devstorage.read_write
1778
- # https://www.googleapis.com/auth/logging.writeIf no scopes are specified, the
1779
- # following defaults are also provided:
1780
- # https://www.googleapis.com/auth/bigquery
1781
- # https://www.googleapis.com/auth/bigtable.admin.table
1782
- # https://www.googleapis.com/auth/bigtable.data
1783
- # https://www.googleapis.com/auth/devstorage.full_control
1784
- # Corresponds to the JSON property `serviceAccountScopes`
1785
- # @return [Array<String>]
1786
- attr_accessor :service_account_scopes
1771
+ # Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature:
1772
+ # This report is available for testing purposes only. It may be changed before
1773
+ # final release.
1774
+ # Corresponds to the JSON property `metrics`
1775
+ # @return [Google::Apis::DataprocV1::ClusterMetrics]
1776
+ attr_accessor :metrics
1787
1777
 
1788
- # The Google Compute Engine tags to add to all instances (see Tagging instances).
1789
- # Corresponds to the JSON property `tags`
1790
- # @return [Array<String>]
1791
- attr_accessor :tags
1778
+ # Output-only The previous cluster status.
1779
+ # Corresponds to the JSON property `statusHistory`
1780
+ # @return [Array<Google::Apis::DataprocV1::ClusterStatus>]
1781
+ attr_accessor :status_history
1792
1782
 
1793
- # Optional The service account of the instances. Defaults to the default Google
1794
- # Compute Engine service account. Custom service accounts need permissions
1795
- # equivalent to the folloing IAM roles:
1796
- # roles/logging.logWriter
1797
- # roles/storage.objectAdmin(see https://cloud.google.com/compute/docs/access/
1798
- # service-accounts#custom_service_accounts for more information). Example: [
1799
- # account_id]@[project_id].iam.gserviceaccount.com
1800
- # Corresponds to the JSON property `serviceAccount`
1783
+ # The cluster config.
1784
+ # Corresponds to the JSON property `config`
1785
+ # @return [Google::Apis::DataprocV1::ClusterConfig]
1786
+ attr_accessor :config
1787
+
1788
+ # Output-only A cluster UUID (Unique Universal Identifier). Cloud Dataproc
1789
+ # generates this value when it creates the cluster.
1790
+ # Corresponds to the JSON property `clusterUuid`
1801
1791
  # @return [String]
1802
- attr_accessor :service_account
1792
+ attr_accessor :cluster_uuid
1803
1793
 
1804
- # Optional The Google Compute Engine subnetwork to be used for machine
1805
- # communications. Cannot be specified with network_uri. Example: https://www.
1806
- # googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0.
1807
- # Corresponds to the JSON property `subnetworkUri`
1794
+ # Required The cluster name. Cluster names within a project must be unique.
1795
+ # Names of deleted clusters can be reused.
1796
+ # Corresponds to the JSON property `clusterName`
1808
1797
  # @return [String]
1809
- attr_accessor :subnetwork_uri
1798
+ attr_accessor :cluster_name
1810
1799
 
1811
1800
  def initialize(**args)
1812
1801
  update!(**args)
@@ -1814,14 +1803,39 @@ module Google
1814
1803
 
1815
1804
  # Update properties of this object
1816
1805
  def update!(**args)
1817
- @network_uri = args[:network_uri] if args.key?(:network_uri)
1818
- @zone_uri = args[:zone_uri] if args.key?(:zone_uri)
1819
- @metadata = args[:metadata] if args.key?(:metadata)
1820
- @internal_ip_only = args[:internal_ip_only] if args.key?(:internal_ip_only)
1821
- @service_account_scopes = args[:service_account_scopes] if args.key?(:service_account_scopes)
1822
- @tags = args[:tags] if args.key?(:tags)
1823
- @service_account = args[:service_account] if args.key?(:service_account)
1824
- @subnetwork_uri = args[:subnetwork_uri] if args.key?(:subnetwork_uri)
1806
+ @project_id = args[:project_id] if args.key?(:project_id)
1807
+ @labels = args[:labels] if args.key?(:labels)
1808
+ @status = args[:status] if args.key?(:status)
1809
+ @metrics = args[:metrics] if args.key?(:metrics)
1810
+ @status_history = args[:status_history] if args.key?(:status_history)
1811
+ @config = args[:config] if args.key?(:config)
1812
+ @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
1813
+ @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
1814
+ end
1815
+ end
1816
+
1817
+ # The response message for Operations.ListOperations.
1818
+ class ListOperationsResponse
1819
+ include Google::Apis::Core::Hashable
1820
+
1821
+ # The standard List next-page token.
1822
+ # Corresponds to the JSON property `nextPageToken`
1823
+ # @return [String]
1824
+ attr_accessor :next_page_token
1825
+
1826
+ # A list of operations that matches the specified filter in the request.
1827
+ # Corresponds to the JSON property `operations`
1828
+ # @return [Array<Google::Apis::DataprocV1::Operation>]
1829
+ attr_accessor :operations
1830
+
1831
+ def initialize(**args)
1832
+ update!(**args)
1833
+ end
1834
+
1835
+ # Update properties of this object
1836
+ def update!(**args)
1837
+ @next_page_token = args[:next_page_token] if args.key?(:next_page_token)
1838
+ @operations = args[:operations] if args.key?(:operations)
1825
1839
  end
1826
1840
  end
1827
1841
  end