google-cloud-dataproc 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.yardopts +8 -0
- data/LICENSE +201 -0
- data/README.md +54 -0
- data/lib/google/cloud/dataproc.rb +177 -0
- data/lib/google/cloud/dataproc/credentials.rb +30 -0
- data/lib/google/cloud/dataproc/v1.rb +171 -0
- data/lib/google/cloud/dataproc/v1/cluster_controller_client.rb +670 -0
- data/lib/google/cloud/dataproc/v1/cluster_controller_client_config.json +56 -0
- data/lib/google/cloud/dataproc/v1/clusters_pb.rb +166 -0
- data/lib/google/cloud/dataproc/v1/clusters_services_pb.rb +58 -0
- data/lib/google/cloud/dataproc/v1/doc/google/cloud/dataproc/v1/clusters.rb +565 -0
- data/lib/google/cloud/dataproc/v1/doc/google/cloud/dataproc/v1/jobs.rb +682 -0
- data/lib/google/cloud/dataproc/v1/doc/google/protobuf/any.rb +124 -0
- data/lib/google/cloud/dataproc/v1/doc/google/protobuf/duration.rb +90 -0
- data/lib/google/cloud/dataproc/v1/doc/google/protobuf/field_mask.rb +223 -0
- data/lib/google/cloud/dataproc/v1/doc/google/protobuf/timestamp.rb +106 -0
- data/lib/google/cloud/dataproc/v1/doc/google/rpc/status.rb +83 -0
- data/lib/google/cloud/dataproc/v1/doc/overview.rb +77 -0
- data/lib/google/cloud/dataproc/v1/job_controller_client.rb +487 -0
- data/lib/google/cloud/dataproc/v1/job_controller_client_config.json +56 -0
- data/lib/google/cloud/dataproc/v1/jobs_pb.rb +244 -0
- data/lib/google/cloud/dataproc/v1/jobs_services_pb.rb +59 -0
- data/lib/google/cloud/dataproc/v1/operations_pb.rb +45 -0
- metadata +123 -0
@@ -0,0 +1,682 @@
|
|
1
|
+
# Copyright 2017 Google LLC
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
module Google
|
16
|
+
module Cloud
|
17
|
+
module Dataproc
|
18
|
+
##
|
19
|
+
# # Google Cloud Dataproc API Contents
|
20
|
+
#
|
21
|
+
# | Class | Description |
|
22
|
+
# | ----- | ----------- |
|
23
|
+
# | [ClusterControllerClient][] | Manages Hadoop-based clusters and jobs on Google Cloud Platform. |
|
24
|
+
# | [JobControllerClient][] | Manages Hadoop-based clusters and jobs on Google Cloud Platform. |
|
25
|
+
# | [Data Types][] | Data types for Google::Cloud::Dataproc::V1 |
|
26
|
+
#
|
27
|
+
# [ClusterControllerClient]: https://googlecloudplatform.github.io/google-cloud-ruby/#/docs/google-cloud-dataproc/latest/google/cloud/dataproc/v1/clustercontrollerclient
|
28
|
+
# [JobControllerClient]: https://googlecloudplatform.github.io/google-cloud-ruby/#/docs/google-cloud-dataproc/latest/google/cloud/dataproc/v1/jobcontrollerclient
|
29
|
+
# [Data Types]: https://googlecloudplatform.github.io/google-cloud-ruby/#/docs/google-cloud-dataproc/latest/google/cloud/dataproc/v1/datatypes
|
30
|
+
#
|
31
|
+
module V1
|
32
|
+
# The runtime logging config of the job.
|
33
|
+
# @!attribute [rw] driver_log_levels
|
34
|
+
# @return [Hash{String => Google::Cloud::Dataproc::V1::LoggingConfig::Level}]
|
35
|
+
# The per-package log levels for the driver. This may include
|
36
|
+
# "root" package name to configure rootLogger.
|
37
|
+
# Examples:
|
38
|
+
# 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
|
39
|
+
class LoggingConfig
|
40
|
+
# The Log4j level for job execution. When running an
|
41
|
+
# [Apache Hive](http://hive.apache.org/) job, Cloud
|
42
|
+
# Dataproc configures the Hive client to an equivalent verbosity level.
|
43
|
+
module Level
|
44
|
+
# Level is unspecified. Use default level for log4j.
|
45
|
+
LEVEL_UNSPECIFIED = 0
|
46
|
+
|
47
|
+
# Use ALL level for log4j.
|
48
|
+
ALL = 1
|
49
|
+
|
50
|
+
# Use TRACE level for log4j.
|
51
|
+
TRACE = 2
|
52
|
+
|
53
|
+
# Use DEBUG level for log4j.
|
54
|
+
DEBUG = 3
|
55
|
+
|
56
|
+
# Use INFO level for log4j.
|
57
|
+
INFO = 4
|
58
|
+
|
59
|
+
# Use WARN level for log4j.
|
60
|
+
WARN = 5
|
61
|
+
|
62
|
+
# Use ERROR level for log4j.
|
63
|
+
ERROR = 6
|
64
|
+
|
65
|
+
# Use FATAL level for log4j.
|
66
|
+
FATAL = 7
|
67
|
+
|
68
|
+
# Turn off log4j.
|
69
|
+
OFF = 8
|
70
|
+
end
|
71
|
+
end
|
72
|
+
|
73
|
+
# A Cloud Dataproc job for running
|
74
|
+
# [Apache Hadoop MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html)
|
75
|
+
# jobs on [Apache Hadoop YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
|
76
|
+
# @!attribute [rw] main_jar_file_uri
|
77
|
+
# @return [String]
|
78
|
+
# The HCFS URI of the jar file containing the main class.
|
79
|
+
# Examples:
|
80
|
+
# 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar'
|
81
|
+
# 'hdfs:/tmp/test-samples/custom-wordcount.jar'
|
82
|
+
# 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
|
83
|
+
# @!attribute [rw] main_class
|
84
|
+
# @return [String]
|
85
|
+
# The name of the driver's main class. The jar file containing the class
|
86
|
+
# must be in the default CLASSPATH or specified in +jar_file_uris+.
|
87
|
+
# @!attribute [rw] args
|
88
|
+
# @return [Array<String>]
|
89
|
+
# Optional. The arguments to pass to the driver. Do not
|
90
|
+
# include arguments, such as +-libjars+ or +-Dfoo=bar+, that can be set as job
|
91
|
+
# properties, since a collision may occur that causes an incorrect job
|
92
|
+
# submission.
|
93
|
+
# @!attribute [rw] jar_file_uris
|
94
|
+
# @return [Array<String>]
|
95
|
+
# Optional. Jar file URIs to add to the CLASSPATHs of the
|
96
|
+
# Hadoop driver and tasks.
|
97
|
+
# @!attribute [rw] file_uris
|
98
|
+
# @return [Array<String>]
|
99
|
+
# Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied
|
100
|
+
# to the working directory of Hadoop drivers and distributed tasks. Useful
|
101
|
+
# for naively parallel tasks.
|
102
|
+
# @!attribute [rw] archive_uris
|
103
|
+
# @return [Array<String>]
|
104
|
+
# Optional. HCFS URIs of archives to be extracted in the working directory of
|
105
|
+
# Hadoop drivers and tasks. Supported file types:
|
106
|
+
# .jar, .tar, .tar.gz, .tgz, or .zip.
|
107
|
+
# @!attribute [rw] properties
|
108
|
+
# @return [Hash{String => String}]
|
109
|
+
# Optional. A mapping of property names to values, used to configure Hadoop.
|
110
|
+
# Properties that conflict with values set by the Cloud Dataproc API may be
|
111
|
+
# overwritten. Can include properties set in /etc/hadoop/conf/*-site and
|
112
|
+
# classes in user code.
|
113
|
+
# @!attribute [rw] logging_config
|
114
|
+
# @return [Google::Cloud::Dataproc::V1::LoggingConfig]
|
115
|
+
# Optional. The runtime log config for job execution.
|
116
|
+
class HadoopJob; end
|
117
|
+
|
118
|
+
# A Cloud Dataproc job for running [Apache Spark](http://spark.apache.org/)
|
119
|
+
# applications on YARN.
|
120
|
+
# @!attribute [rw] main_jar_file_uri
|
121
|
+
# @return [String]
|
122
|
+
# The HCFS URI of the jar file that contains the main class.
|
123
|
+
# @!attribute [rw] main_class
|
124
|
+
# @return [String]
|
125
|
+
# The name of the driver's main class. The jar file that contains the class
|
126
|
+
# must be in the default CLASSPATH or specified in +jar_file_uris+.
|
127
|
+
# @!attribute [rw] args
|
128
|
+
# @return [Array<String>]
|
129
|
+
# Optional. The arguments to pass to the driver. Do not include arguments,
|
130
|
+
# such as +--conf+, that can be set as job properties, since a collision may
|
131
|
+
# occur that causes an incorrect job submission.
|
132
|
+
# @!attribute [rw] jar_file_uris
|
133
|
+
# @return [Array<String>]
|
134
|
+
# Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
|
135
|
+
# Spark driver and tasks.
|
136
|
+
# @!attribute [rw] file_uris
|
137
|
+
# @return [Array<String>]
|
138
|
+
# Optional. HCFS URIs of files to be copied to the working directory of
|
139
|
+
# Spark drivers and distributed tasks. Useful for naively parallel tasks.
|
140
|
+
# @!attribute [rw] archive_uris
|
141
|
+
# @return [Array<String>]
|
142
|
+
# Optional. HCFS URIs of archives to be extracted in the working directory
|
143
|
+
# of Spark drivers and tasks. Supported file types:
|
144
|
+
# .jar, .tar, .tar.gz, .tgz, and .zip.
|
145
|
+
# @!attribute [rw] properties
|
146
|
+
# @return [Hash{String => String}]
|
147
|
+
# Optional. A mapping of property names to values, used to configure Spark.
|
148
|
+
# Properties that conflict with values set by the Cloud Dataproc API may be
|
149
|
+
# overwritten. Can include properties set in
|
150
|
+
# /etc/spark/conf/spark-defaults.conf and classes in user code.
|
151
|
+
# @!attribute [rw] logging_config
|
152
|
+
# @return [Google::Cloud::Dataproc::V1::LoggingConfig]
|
153
|
+
# Optional. The runtime log config for job execution.
|
154
|
+
class SparkJob; end
|
155
|
+
|
156
|
+
# A Cloud Dataproc job for running
|
157
|
+
# [Apache PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html)
|
158
|
+
# applications on YARN.
|
159
|
+
# @!attribute [rw] main_python_file_uri
|
160
|
+
# @return [String]
|
161
|
+
# Required. The HCFS URI of the main Python file to use as the driver. Must
|
162
|
+
# be a .py file.
|
163
|
+
# @!attribute [rw] args
|
164
|
+
# @return [Array<String>]
|
165
|
+
# Optional. The arguments to pass to the driver. Do not include arguments,
|
166
|
+
# such as +--conf+, that can be set as job properties, since a collision may
|
167
|
+
# occur that causes an incorrect job submission.
|
168
|
+
# @!attribute [rw] python_file_uris
|
169
|
+
# @return [Array<String>]
|
170
|
+
# Optional. HCFS file URIs of Python files to pass to the PySpark
|
171
|
+
# framework. Supported file types: .py, .egg, and .zip.
|
172
|
+
# @!attribute [rw] jar_file_uris
|
173
|
+
# @return [Array<String>]
|
174
|
+
# Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
|
175
|
+
# Python driver and tasks.
|
176
|
+
# @!attribute [rw] file_uris
|
177
|
+
# @return [Array<String>]
|
178
|
+
# Optional. HCFS URIs of files to be copied to the working directory of
|
179
|
+
# Python drivers and distributed tasks. Useful for naively parallel tasks.
|
180
|
+
# @!attribute [rw] archive_uris
|
181
|
+
# @return [Array<String>]
|
182
|
+
# Optional. HCFS URIs of archives to be extracted in the working directory of
|
183
|
+
# .jar, .tar, .tar.gz, .tgz, and .zip.
|
184
|
+
# @!attribute [rw] properties
|
185
|
+
# @return [Hash{String => String}]
|
186
|
+
# Optional. A mapping of property names to values, used to configure PySpark.
|
187
|
+
# Properties that conflict with values set by the Cloud Dataproc API may be
|
188
|
+
# overwritten. Can include properties set in
|
189
|
+
# /etc/spark/conf/spark-defaults.conf and classes in user code.
|
190
|
+
# @!attribute [rw] logging_config
|
191
|
+
# @return [Google::Cloud::Dataproc::V1::LoggingConfig]
|
192
|
+
# Optional. The runtime log config for job execution.
|
193
|
+
class PySparkJob; end
|
194
|
+
|
195
|
+
# A list of queries to run on a cluster.
|
196
|
+
# @!attribute [rw] queries
|
197
|
+
# @return [Array<String>]
|
198
|
+
# Required. The queries to execute. You do not need to terminate a query
|
199
|
+
# with a semicolon. Multiple queries can be specified in one string
|
200
|
+
# by separating each with a semicolon. Here is an example of an Cloud
|
201
|
+
# Dataproc API snippet that uses a QueryList to specify a HiveJob:
|
202
|
+
#
|
203
|
+
# "hiveJob": {
|
204
|
+
# "queryList": {
|
205
|
+
# "queries": [
|
206
|
+
# "query1",
|
207
|
+
# "query2",
|
208
|
+
# "query3;query4",
|
209
|
+
# ]
|
210
|
+
# }
|
211
|
+
# }
|
212
|
+
class QueryList; end
|
213
|
+
|
214
|
+
# A Cloud Dataproc job for running [Apache Hive](https://hive.apache.org/)
|
215
|
+
# queries on YARN.
|
216
|
+
# @!attribute [rw] query_file_uri
|
217
|
+
# @return [String]
|
218
|
+
# The HCFS URI of the script that contains Hive queries.
|
219
|
+
# @!attribute [rw] query_list
|
220
|
+
# @return [Google::Cloud::Dataproc::V1::QueryList]
|
221
|
+
# A list of queries.
|
222
|
+
# @!attribute [rw] continue_on_failure
|
223
|
+
# @return [true, false]
|
224
|
+
# Optional. Whether to continue executing queries if a query fails.
|
225
|
+
# The default value is +false+. Setting to +true+ can be useful when executing
|
226
|
+
# independent parallel queries.
|
227
|
+
# @!attribute [rw] script_variables
|
228
|
+
# @return [Hash{String => String}]
|
229
|
+
# Optional. Mapping of query variable names to values (equivalent to the
|
230
|
+
# Hive command: +SET name="value";+).
|
231
|
+
# @!attribute [rw] properties
|
232
|
+
# @return [Hash{String => String}]
|
233
|
+
# Optional. A mapping of property names and values, used to configure Hive.
|
234
|
+
# Properties that conflict with values set by the Cloud Dataproc API may be
|
235
|
+
# overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
|
236
|
+
# /etc/hive/conf/hive-site.xml, and classes in user code.
|
237
|
+
# @!attribute [rw] jar_file_uris
|
238
|
+
# @return [Array<String>]
|
239
|
+
# Optional. HCFS URIs of jar files to add to the CLASSPATH of the
|
240
|
+
# Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes
|
241
|
+
# and UDFs.
|
242
|
+
class HiveJob; end
|
243
|
+
|
244
|
+
# A Cloud Dataproc job for running [Apache Spark SQL](http://spark.apache.org/sql/)
|
245
|
+
# queries.
|
246
|
+
# @!attribute [rw] query_file_uri
|
247
|
+
# @return [String]
|
248
|
+
# The HCFS URI of the script that contains SQL queries.
|
249
|
+
# @!attribute [rw] query_list
|
250
|
+
# @return [Google::Cloud::Dataproc::V1::QueryList]
|
251
|
+
# A list of queries.
|
252
|
+
# @!attribute [rw] script_variables
|
253
|
+
# @return [Hash{String => String}]
|
254
|
+
# Optional. Mapping of query variable names to values (equivalent to the
|
255
|
+
# Spark SQL command: SET +name="value";+).
|
256
|
+
# @!attribute [rw] properties
|
257
|
+
# @return [Hash{String => String}]
|
258
|
+
# Optional. A mapping of property names to values, used to configure
|
259
|
+
# Spark SQL's SparkConf. Properties that conflict with values set by the
|
260
|
+
# Cloud Dataproc API may be overwritten.
|
261
|
+
# @!attribute [rw] jar_file_uris
|
262
|
+
# @return [Array<String>]
|
263
|
+
# Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
|
264
|
+
# @!attribute [rw] logging_config
|
265
|
+
# @return [Google::Cloud::Dataproc::V1::LoggingConfig]
|
266
|
+
# Optional. The runtime log config for job execution.
|
267
|
+
class SparkSqlJob; end
|
268
|
+
|
269
|
+
# A Cloud Dataproc job for running [Apache Pig](https://pig.apache.org/)
|
270
|
+
# queries on YARN.
|
271
|
+
# @!attribute [rw] query_file_uri
|
272
|
+
# @return [String]
|
273
|
+
# The HCFS URI of the script that contains the Pig queries.
|
274
|
+
# @!attribute [rw] query_list
|
275
|
+
# @return [Google::Cloud::Dataproc::V1::QueryList]
|
276
|
+
# A list of queries.
|
277
|
+
# @!attribute [rw] continue_on_failure
|
278
|
+
# @return [true, false]
|
279
|
+
# Optional. Whether to continue executing queries if a query fails.
|
280
|
+
# The default value is +false+. Setting to +true+ can be useful when executing
|
281
|
+
# independent parallel queries.
|
282
|
+
# @!attribute [rw] script_variables
|
283
|
+
# @return [Hash{String => String}]
|
284
|
+
# Optional. Mapping of query variable names to values (equivalent to the Pig
|
285
|
+
# command: +name=[value]+).
|
286
|
+
# @!attribute [rw] properties
|
287
|
+
# @return [Hash{String => String}]
|
288
|
+
# Optional. A mapping of property names to values, used to configure Pig.
|
289
|
+
# Properties that conflict with values set by the Cloud Dataproc API may be
|
290
|
+
# overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
|
291
|
+
# /etc/pig/conf/pig.properties, and classes in user code.
|
292
|
+
# @!attribute [rw] jar_file_uris
|
293
|
+
# @return [Array<String>]
|
294
|
+
# Optional. HCFS URIs of jar files to add to the CLASSPATH of
|
295
|
+
# the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
|
296
|
+
# @!attribute [rw] logging_config
|
297
|
+
# @return [Google::Cloud::Dataproc::V1::LoggingConfig]
|
298
|
+
# Optional. The runtime log config for job execution.
|
299
|
+
class PigJob; end
|
300
|
+
|
301
|
+
# Cloud Dataproc job config.
|
302
|
+
# @!attribute [rw] cluster_name
|
303
|
+
# @return [String]
|
304
|
+
# Required. The name of the cluster where the job will be submitted.
|
305
|
+
# @!attribute [rw] cluster_uuid
|
306
|
+
# @return [String]
|
307
|
+
# Output-only. A cluster UUID generated by the Cloud Dataproc service when
|
308
|
+
# the job is submitted.
|
309
|
+
class JobPlacement; end
|
310
|
+
|
311
|
+
# Cloud Dataproc job status.
|
312
|
+
# @!attribute [rw] state
|
313
|
+
# @return [Google::Cloud::Dataproc::V1::JobStatus::State]
|
314
|
+
# Output-only. A state message specifying the overall job state.
|
315
|
+
# @!attribute [rw] details
|
316
|
+
# @return [String]
|
317
|
+
# Output-only. Optional job state details, such as an error
|
318
|
+
# description if the state is <code>ERROR</code>.
|
319
|
+
# @!attribute [rw] state_start_time
|
320
|
+
# @return [Google::Protobuf::Timestamp]
|
321
|
+
# Output-only. The time when this state was entered.
|
322
|
+
# @!attribute [rw] substate
|
323
|
+
# @return [Google::Cloud::Dataproc::V1::JobStatus::Substate]
|
324
|
+
# Output-only. Additional state information, which includes
|
325
|
+
# status reported by the agent.
|
326
|
+
class JobStatus
|
327
|
+
# The job state.
|
328
|
+
module State
|
329
|
+
# The job state is unknown.
|
330
|
+
STATE_UNSPECIFIED = 0
|
331
|
+
|
332
|
+
# The job is pending; it has been submitted, but is not yet running.
|
333
|
+
PENDING = 1
|
334
|
+
|
335
|
+
# Job has been received by the service and completed initial setup;
|
336
|
+
# it will soon be submitted to the cluster.
|
337
|
+
SETUP_DONE = 8
|
338
|
+
|
339
|
+
# The job is running on the cluster.
|
340
|
+
RUNNING = 2
|
341
|
+
|
342
|
+
# A CancelJob request has been received, but is pending.
|
343
|
+
CANCEL_PENDING = 3
|
344
|
+
|
345
|
+
# Transient in-flight resources have been canceled, and the request to
|
346
|
+
# cancel the running job has been issued to the cluster.
|
347
|
+
CANCEL_STARTED = 7
|
348
|
+
|
349
|
+
# The job cancellation was successful.
|
350
|
+
CANCELLED = 4
|
351
|
+
|
352
|
+
# The job has completed successfully.
|
353
|
+
DONE = 5
|
354
|
+
|
355
|
+
# The job has completed, but encountered an error.
|
356
|
+
ERROR = 6
|
357
|
+
|
358
|
+
# Job attempt has failed. The detail field contains failure details for
|
359
|
+
# this attempt.
|
360
|
+
#
|
361
|
+
# Applies to restartable jobs only.
|
362
|
+
ATTEMPT_FAILURE = 9
|
363
|
+
end
|
364
|
+
|
365
|
+
module Substate
|
366
|
+
UNSPECIFIED = 0
|
367
|
+
|
368
|
+
# The Job is submitted to the agent.
|
369
|
+
#
|
370
|
+
# Applies to RUNNING state.
|
371
|
+
SUBMITTED = 1
|
372
|
+
|
373
|
+
# The Job has been received and is awaiting execution (it may be waiting
|
374
|
+
# for a condition to be met). See the "details" field for the reason for
|
375
|
+
# the delay.
|
376
|
+
#
|
377
|
+
# Applies to RUNNING state.
|
378
|
+
QUEUED = 2
|
379
|
+
|
380
|
+
# The agent-reported status is out of date, which may be caused by a
|
381
|
+
# loss of communication between the agent and Cloud Dataproc. If the
|
382
|
+
# agent does not send a timely update, the job will fail.
|
383
|
+
#
|
384
|
+
# Applies to RUNNING state.
|
385
|
+
STALE_STATUS = 3
|
386
|
+
end
|
387
|
+
end
|
388
|
+
|
389
|
+
# Encapsulates the full scoping used to reference a job.
|
390
|
+
# @!attribute [rw] project_id
|
391
|
+
# @return [String]
|
392
|
+
# Required. The ID of the Google Cloud Platform project that the job
|
393
|
+
# belongs to.
|
394
|
+
# @!attribute [rw] job_id
|
395
|
+
# @return [String]
|
396
|
+
# Optional. The job ID, which must be unique within the project. The job ID
|
397
|
+
# is generated by the server upon job submission or provided by the user as a
|
398
|
+
# means to perform retries without creating duplicate jobs. The ID must
|
399
|
+
# contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or
|
400
|
+
# hyphens (-). The maximum length is 100 characters.
|
401
|
+
class JobReference; end
|
402
|
+
|
403
|
+
# A YARN application created by a job. Application information is a subset of
|
404
|
+
# <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.
|
405
|
+
#
|
406
|
+
# **Beta Feature**: This report is available for testing purposes only. It may
|
407
|
+
# be changed before final release.
|
408
|
+
# @!attribute [rw] name
|
409
|
+
# @return [String]
|
410
|
+
# Required. The application name.
|
411
|
+
# @!attribute [rw] state
|
412
|
+
# @return [Google::Cloud::Dataproc::V1::YarnApplication::State]
|
413
|
+
# Required. The application state.
|
414
|
+
# @!attribute [rw] progress
|
415
|
+
# @return [Float]
|
416
|
+
# Required. The numerical progress of the application, from 1 to 100.
|
417
|
+
# @!attribute [rw] tracking_url
|
418
|
+
# @return [String]
|
419
|
+
# Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or
|
420
|
+
# TimelineServer that provides application-specific information. The URL uses
|
421
|
+
# the internal hostname, and requires a proxy server for resolution and,
|
422
|
+
# possibly, access.
|
423
|
+
class YarnApplication
|
424
|
+
# The application state, corresponding to
|
425
|
+
# <code>YarnProtos.YarnApplicationStateProto</code>.
|
426
|
+
module State
|
427
|
+
# Status is unspecified.
|
428
|
+
STATE_UNSPECIFIED = 0
|
429
|
+
|
430
|
+
# Status is NEW.
|
431
|
+
NEW = 1
|
432
|
+
|
433
|
+
# Status is NEW_SAVING.
|
434
|
+
NEW_SAVING = 2
|
435
|
+
|
436
|
+
# Status is SUBMITTED.
|
437
|
+
SUBMITTED = 3
|
438
|
+
|
439
|
+
# Status is ACCEPTED.
|
440
|
+
ACCEPTED = 4
|
441
|
+
|
442
|
+
# Status is RUNNING.
|
443
|
+
RUNNING = 5
|
444
|
+
|
445
|
+
# Status is FINISHED.
|
446
|
+
FINISHED = 6
|
447
|
+
|
448
|
+
# Status is FAILED.
|
449
|
+
FAILED = 7
|
450
|
+
|
451
|
+
# Status is KILLED.
|
452
|
+
KILLED = 8
|
453
|
+
end
|
454
|
+
end
|
455
|
+
|
456
|
+
# A Cloud Dataproc job resource.
|
457
|
+
# @!attribute [rw] reference
|
458
|
+
# @return [Google::Cloud::Dataproc::V1::JobReference]
|
459
|
+
# Optional. The fully qualified reference to the job, which can be used to
|
460
|
+
# obtain the equivalent REST path of the job resource. If this property
|
461
|
+
# is not specified when a job is created, the server generates a
|
462
|
+
# <code>job_id</code>.
|
463
|
+
# @!attribute [rw] placement
|
464
|
+
# @return [Google::Cloud::Dataproc::V1::JobPlacement]
|
465
|
+
# Required. Job information, including how, when, and where to
|
466
|
+
# run the job.
|
467
|
+
# @!attribute [rw] hadoop_job
|
468
|
+
# @return [Google::Cloud::Dataproc::V1::HadoopJob]
|
469
|
+
# Job is a Hadoop job.
|
470
|
+
# @!attribute [rw] spark_job
|
471
|
+
# @return [Google::Cloud::Dataproc::V1::SparkJob]
|
472
|
+
# Job is a Spark job.
|
473
|
+
# @!attribute [rw] pyspark_job
|
474
|
+
# @return [Google::Cloud::Dataproc::V1::PySparkJob]
|
475
|
+
# Job is a Pyspark job.
|
476
|
+
# @!attribute [rw] hive_job
|
477
|
+
# @return [Google::Cloud::Dataproc::V1::HiveJob]
|
478
|
+
# Job is a Hive job.
|
479
|
+
# @!attribute [rw] pig_job
|
480
|
+
# @return [Google::Cloud::Dataproc::V1::PigJob]
|
481
|
+
# Job is a Pig job.
|
482
|
+
# @!attribute [rw] spark_sql_job
|
483
|
+
# @return [Google::Cloud::Dataproc::V1::SparkSqlJob]
|
484
|
+
# Job is a SparkSql job.
|
485
|
+
# @!attribute [rw] status
|
486
|
+
# @return [Google::Cloud::Dataproc::V1::JobStatus]
|
487
|
+
# Output-only. The job status. Additional application-specific
|
488
|
+
# status information may be contained in the <code>type_job</code>
|
489
|
+
# and <code>yarn_applications</code> fields.
|
490
|
+
# @!attribute [rw] status_history
|
491
|
+
# @return [Array<Google::Cloud::Dataproc::V1::JobStatus>]
|
492
|
+
# Output-only. The previous job status.
|
493
|
+
# @!attribute [rw] yarn_applications
|
494
|
+
# @return [Array<Google::Cloud::Dataproc::V1::YarnApplication>]
|
495
|
+
# Output-only. The collection of YARN applications spun up by this job.
|
496
|
+
#
|
497
|
+
# **Beta** Feature: This report is available for testing purposes only. It may
|
498
|
+
# be changed before final release.
|
499
|
+
# @!attribute [rw] driver_output_resource_uri
|
500
|
+
# @return [String]
|
501
|
+
# Output-only. A URI pointing to the location of the stdout of the job's
|
502
|
+
# driver program.
|
503
|
+
# @!attribute [rw] driver_control_files_uri
|
504
|
+
# @return [String]
|
505
|
+
# Output-only. If present, the location of miscellaneous control files
|
506
|
+
# which may be used as part of job setup and handling. If not present,
|
507
|
+
# control files may be placed in the same location as +driver_output_uri+.
|
508
|
+
# @!attribute [rw] labels
|
509
|
+
# @return [Hash{String => String}]
|
510
|
+
# Optional. The labels to associate with this job.
|
511
|
+
# Label **keys** must contain 1 to 63 characters, and must conform to
|
512
|
+
# [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
|
513
|
+
# Label **values** may be empty, but, if present, must contain 1 to 63
|
514
|
+
# characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
|
515
|
+
# No more than 32 labels can be associated with a job.
|
516
|
+
# @!attribute [rw] scheduling
|
517
|
+
# @return [Google::Cloud::Dataproc::V1::JobScheduling]
|
518
|
+
# Optional. Job scheduling configuration.
|
519
|
+
class Job; end
|
520
|
+
|
521
|
+
# Job scheduling options.
|
522
|
+
#
|
523
|
+
# **Beta Feature**: These options are available for testing purposes only.
|
524
|
+
# They may be changed before final release.
|
525
|
+
# @!attribute [rw] max_failures_per_hour
|
526
|
+
# @return [Integer]
|
527
|
+
# Optional. Maximum number of times per hour a driver may be restarted as
|
528
|
+
# a result of driver terminating with non-zero code before job is
|
529
|
+
# reported failed.
|
530
|
+
#
|
531
|
+
# A job may be reported as thrashing if driver exits with non-zero code
|
532
|
+
# 4 times within 10 minute window.
|
533
|
+
#
|
534
|
+
# Maximum value is 10.
|
535
|
+
class JobScheduling; end
|
536
|
+
|
537
|
+
# A request to submit a job.
|
538
|
+
# @!attribute [rw] project_id
|
539
|
+
# @return [String]
|
540
|
+
# Required. The ID of the Google Cloud Platform project that the job
|
541
|
+
# belongs to.
|
542
|
+
# @!attribute [rw] region
|
543
|
+
# @return [String]
|
544
|
+
# Required. The Cloud Dataproc region in which to handle the request.
|
545
|
+
# @!attribute [rw] job
|
546
|
+
# @return [Google::Cloud::Dataproc::V1::Job]
|
547
|
+
# Required. The job resource.
|
548
|
+
class SubmitJobRequest; end
|
549
|
+
|
550
|
+
# A request to get the resource representation for a job in a project.
|
551
|
+
# @!attribute [rw] project_id
|
552
|
+
# @return [String]
|
553
|
+
# Required. The ID of the Google Cloud Platform project that the job
|
554
|
+
# belongs to.
|
555
|
+
# @!attribute [rw] region
|
556
|
+
# @return [String]
|
557
|
+
# Required. The Cloud Dataproc region in which to handle the request.
|
558
|
+
# @!attribute [rw] job_id
|
559
|
+
# @return [String]
|
560
|
+
# Required. The job ID.
|
561
|
+
class GetJobRequest; end
|
562
|
+
|
563
|
+
# A request to list jobs in a project.
|
564
|
+
# @!attribute [rw] project_id
|
565
|
+
# @return [String]
|
566
|
+
# Required. The ID of the Google Cloud Platform project that the job
|
567
|
+
# belongs to.
|
568
|
+
# @!attribute [rw] region
|
569
|
+
# @return [String]
|
570
|
+
# Required. The Cloud Dataproc region in which to handle the request.
|
571
|
+
# @!attribute [rw] page_size
|
572
|
+
# @return [Integer]
|
573
|
+
# Optional. The number of results to return in each response.
|
574
|
+
# @!attribute [rw] page_token
|
575
|
+
# @return [String]
|
576
|
+
# Optional. The page token, returned by a previous call, to request the
|
577
|
+
# next page of results.
|
578
|
+
# @!attribute [rw] cluster_name
|
579
|
+
# @return [String]
|
580
|
+
# Optional. If set, the returned jobs list includes only jobs that were
|
581
|
+
# submitted to the named cluster.
|
582
|
+
# @!attribute [rw] job_state_matcher
|
583
|
+
# @return [Google::Cloud::Dataproc::V1::ListJobsRequest::JobStateMatcher]
|
584
|
+
# Optional. Specifies enumerated categories of jobs to list.
|
585
|
+
# (default = match ALL jobs).
|
586
|
+
#
|
587
|
+
# If +filter+ is provided, +jobStateMatcher+ will be ignored.
|
588
|
+
# @!attribute [rw] filter
|
589
|
+
# @return [String]
|
590
|
+
# Optional. A filter constraining the jobs to list. Filters are
|
591
|
+
# case-sensitive and have the following syntax:
|
592
|
+
#
|
593
|
+
# [field = value] AND [field [= value]] ...
|
594
|
+
#
|
595
|
+
# where **field** is +status.state+ or +labels.[KEY]+, and +[KEY]+ is a label
|
596
|
+
# key. **value** can be +*+ to match all values.
|
597
|
+
# +status.state+ can be either +ACTIVE+ or +NON_ACTIVE+.
|
598
|
+
# Only the logical +AND+ operator is supported; space-separated items are
|
599
|
+
# treated as having an implicit +AND+ operator.
|
600
|
+
#
|
601
|
+
# Example filter:
|
602
|
+
#
|
603
|
+
# status.state = ACTIVE AND labels.env = staging AND labels.starred = *
|
604
|
+
class ListJobsRequest
|
605
|
+
# A matcher that specifies categories of job states.
|
606
|
+
module JobStateMatcher
|
607
|
+
# Match all jobs, regardless of state.
|
608
|
+
ALL = 0
|
609
|
+
|
610
|
+
# Only match jobs in non-terminal states: PENDING, RUNNING, or
|
611
|
+
# CANCEL_PENDING.
|
612
|
+
ACTIVE = 1
|
613
|
+
|
614
|
+
# Only match jobs in terminal states: CANCELLED, DONE, or ERROR.
|
615
|
+
NON_ACTIVE = 2
|
616
|
+
end
|
617
|
+
end
|
618
|
+
|
619
|
+
# A request to update a job.
|
620
|
+
# @!attribute [rw] project_id
|
621
|
+
# @return [String]
|
622
|
+
# Required. The ID of the Google Cloud Platform project that the job
|
623
|
+
# belongs to.
|
624
|
+
# @!attribute [rw] region
|
625
|
+
# @return [String]
|
626
|
+
# Required. The Cloud Dataproc region in which to handle the request.
|
627
|
+
# @!attribute [rw] job_id
|
628
|
+
# @return [String]
|
629
|
+
# Required. The job ID.
|
630
|
+
# @!attribute [rw] job
|
631
|
+
# @return [Google::Cloud::Dataproc::V1::Job]
|
632
|
+
# Required. The changes to the job.
|
633
|
+
# @!attribute [rw] update_mask
|
634
|
+
# @return [Google::Protobuf::FieldMask]
|
635
|
+
# Required. Specifies the path, relative to <code>Job</code>, of
|
636
|
+
# the field to update. For example, to update the labels of a Job the
|
637
|
+
# <code>update_mask</code> parameter would be specified as
|
638
|
+
# <code>labels</code>, and the +PATCH+ request body would specify the new
|
639
|
+
# value. <strong>Note:</strong> Currently, <code>labels</code> is the only
|
640
|
+
# field that can be updated.
|
641
|
+
class UpdateJobRequest; end
|
642
|
+
|
643
|
+
# A list of jobs in a project.
|
644
|
+
# @!attribute [rw] jobs
|
645
|
+
# @return [Array<Google::Cloud::Dataproc::V1::Job>]
|
646
|
+
# Output-only. Jobs list.
|
647
|
+
# @!attribute [rw] next_page_token
|
648
|
+
# @return [String]
|
649
|
+
# Optional. This token is included in the response if there are more results
|
650
|
+
# to fetch. To fetch additional results, provide this value as the
|
651
|
+
# +page_token+ in a subsequent <code>ListJobsRequest</code>.
|
652
|
+
class ListJobsResponse; end
|
653
|
+
|
654
|
+
# A request to cancel a job.
|
655
|
+
# @!attribute [rw] project_id
|
656
|
+
# @return [String]
|
657
|
+
# Required. The ID of the Google Cloud Platform project that the job
|
658
|
+
# belongs to.
|
659
|
+
# @!attribute [rw] region
|
660
|
+
# @return [String]
|
661
|
+
# Required. The Cloud Dataproc region in which to handle the request.
|
662
|
+
# @!attribute [rw] job_id
|
663
|
+
# @return [String]
|
664
|
+
# Required. The job ID.
|
665
|
+
class CancelJobRequest; end
|
666
|
+
|
667
|
+
# A request to delete a job.
|
668
|
+
# @!attribute [rw] project_id
|
669
|
+
# @return [String]
|
670
|
+
# Required. The ID of the Google Cloud Platform project that the job
|
671
|
+
# belongs to.
|
672
|
+
# @!attribute [rw] region
|
673
|
+
# @return [String]
|
674
|
+
# Required. The Cloud Dataproc region in which to handle the request.
|
675
|
+
# @!attribute [rw] job_id
|
676
|
+
# @return [String]
|
677
|
+
# Required. The job ID.
|
678
|
+
class DeleteJobRequest; end
|
679
|
+
end
|
680
|
+
end
|
681
|
+
end
|
682
|
+
end
|