google-api-client 0.10.0 → 0.10.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (193) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +4 -0
  3. data/README.md +1 -1
  4. data/api_names.yaml +37747 -36512
  5. data/generated/google/apis/adexchangebuyer2_v2beta1.rb +1 -1
  6. data/generated/google/apis/adexchangebuyer2_v2beta1/classes.rb +1016 -114
  7. data/generated/google/apis/adexchangebuyer2_v2beta1/representations.rb +426 -22
  8. data/generated/google/apis/adexchangebuyer2_v2beta1/service.rb +481 -95
  9. data/generated/google/apis/adexchangebuyer_v1_4.rb +1 -1
  10. data/generated/google/apis/adexchangebuyer_v1_4/classes.rb +27 -10
  11. data/generated/google/apis/adexchangebuyer_v1_4/representations.rb +1 -0
  12. data/generated/google/apis/admin_reports_v1.rb +2 -2
  13. data/generated/google/apis/adsense_v1_4.rb +1 -1
  14. data/generated/google/apis/adsensehost_v4_1.rb +1 -1
  15. data/generated/google/apis/analytics_v3.rb +1 -1
  16. data/generated/google/apis/analyticsreporting_v4.rb +4 -4
  17. data/generated/google/apis/analyticsreporting_v4/classes.rb +428 -428
  18. data/generated/google/apis/analyticsreporting_v4/representations.rb +108 -108
  19. data/generated/google/apis/androidenterprise_v1.rb +1 -1
  20. data/generated/google/apis/androidenterprise_v1/classes.rb +60 -66
  21. data/generated/google/apis/androidenterprise_v1/service.rb +12 -11
  22. data/generated/google/apis/androidpublisher_v2.rb +1 -1
  23. data/generated/google/apis/androidpublisher_v2/classes.rb +7 -0
  24. data/generated/google/apis/androidpublisher_v2/representations.rb +1 -0
  25. data/generated/google/apis/androidpublisher_v2/service.rb +1 -1
  26. data/generated/google/apis/appsactivity_v1.rb +3 -3
  27. data/generated/google/apis/appsactivity_v1/service.rb +1 -1
  28. data/generated/google/apis/appstate_v1.rb +1 -1
  29. data/generated/google/apis/bigquery_v2.rb +1 -1
  30. data/generated/google/apis/bigquery_v2/classes.rb +34 -8
  31. data/generated/google/apis/bigquery_v2/representations.rb +15 -0
  32. data/generated/google/apis/calendar_v3.rb +1 -1
  33. data/generated/google/apis/calendar_v3/classes.rb +3 -5
  34. data/generated/google/apis/classroom_v1.rb +25 -22
  35. data/generated/google/apis/classroom_v1/classes.rb +910 -1001
  36. data/generated/google/apis/classroom_v1/representations.rb +240 -240
  37. data/generated/google/apis/classroom_v1/service.rb +1064 -1272
  38. data/generated/google/apis/cloudbilling_v1.rb +3 -3
  39. data/generated/google/apis/cloudbilling_v1/classes.rb +76 -75
  40. data/generated/google/apis/cloudbilling_v1/representations.rb +17 -17
  41. data/generated/google/apis/cloudbilling_v1/service.rb +117 -110
  42. data/generated/google/apis/cloudbuild_v1.rb +1 -1
  43. data/generated/google/apis/cloudbuild_v1/classes.rb +238 -238
  44. data/generated/google/apis/cloudbuild_v1/representations.rb +48 -48
  45. data/generated/google/apis/cloudbuild_v1/service.rb +176 -176
  46. data/generated/google/apis/clouddebugger_v2.rb +4 -4
  47. data/generated/google/apis/clouddebugger_v2/classes.rb +315 -315
  48. data/generated/google/apis/clouddebugger_v2/representations.rb +90 -90
  49. data/generated/google/apis/clouddebugger_v2/service.rb +152 -152
  50. data/generated/google/apis/cloudkms_v1.rb +35 -0
  51. data/generated/google/apis/cloudkms_v1/classes.rb +1039 -0
  52. data/generated/google/apis/cloudkms_v1/representations.rb +448 -0
  53. data/generated/google/apis/cloudkms_v1/service.rb +933 -0
  54. data/generated/google/apis/cloudkms_v1beta1.rb +1 -1
  55. data/generated/google/apis/cloudkms_v1beta1/classes.rb +645 -605
  56. data/generated/google/apis/cloudkms_v1beta1/representations.rb +136 -136
  57. data/generated/google/apis/cloudkms_v1beta1/service.rb +258 -264
  58. data/generated/google/apis/cloudmonitoring_v2beta2.rb +1 -1
  59. data/generated/google/apis/cloudresourcemanager_v1.rb +1 -1
  60. data/generated/google/apis/cloudresourcemanager_v1/classes.rb +677 -456
  61. data/generated/google/apis/cloudresourcemanager_v1/representations.rb +154 -89
  62. data/generated/google/apis/cloudresourcemanager_v1/service.rb +380 -279
  63. data/generated/google/apis/cloudresourcemanager_v1beta1.rb +4 -4
  64. data/generated/google/apis/cloudresourcemanager_v1beta1/classes.rb +247 -114
  65. data/generated/google/apis/cloudresourcemanager_v1beta1/representations.rb +72 -40
  66. data/generated/google/apis/cloudresourcemanager_v1beta1/service.rb +202 -208
  67. data/generated/google/apis/cloudtrace_v1.rb +4 -4
  68. data/generated/google/apis/cloudtrace_v1/classes.rb +39 -39
  69. data/generated/google/apis/cloudtrace_v1/representations.rb +13 -13
  70. data/generated/google/apis/cloudtrace_v1/service.rb +18 -18
  71. data/generated/google/apis/compute_beta.rb +1 -1
  72. data/generated/google/apis/compute_beta/classes.rb +813 -82
  73. data/generated/google/apis/compute_beta/representations.rb +305 -0
  74. data/generated/google/apis/compute_beta/service.rb +971 -180
  75. data/generated/google/apis/compute_v1.rb +1 -1
  76. data/generated/google/apis/compute_v1/classes.rb +147 -21
  77. data/generated/google/apis/compute_v1/representations.rb +38 -0
  78. data/generated/google/apis/compute_v1/service.rb +347 -65
  79. data/generated/google/apis/content_v2.rb +1 -1
  80. data/generated/google/apis/content_v2/classes.rb +2 -1
  81. data/generated/google/apis/dataflow_v1b3.rb +1 -1
  82. data/generated/google/apis/dataflow_v1b3/classes.rb +3352 -3110
  83. data/generated/google/apis/dataflow_v1b3/representations.rb +812 -730
  84. data/generated/google/apis/dataflow_v1b3/service.rb +264 -183
  85. data/generated/google/apis/dataproc_v1.rb +1 -1
  86. data/generated/google/apis/dataproc_v1/classes.rb +1200 -1164
  87. data/generated/google/apis/dataproc_v1/representations.rb +220 -204
  88. data/generated/google/apis/dataproc_v1/service.rb +299 -299
  89. data/generated/google/apis/datastore_v1.rb +4 -4
  90. data/generated/google/apis/datastore_v1/classes.rb +688 -688
  91. data/generated/google/apis/datastore_v1/representations.rb +167 -167
  92. data/generated/google/apis/datastore_v1/service.rb +68 -68
  93. data/generated/google/apis/deploymentmanager_v2.rb +1 -1
  94. data/generated/google/apis/deploymentmanager_v2/classes.rb +13 -3
  95. data/generated/google/apis/dns_v1.rb +1 -1
  96. data/generated/google/apis/dns_v2beta1.rb +1 -1
  97. data/generated/google/apis/doubleclickbidmanager_v1.rb +1 -1
  98. data/generated/google/apis/doubleclickbidmanager_v1/classes.rb +1 -1
  99. data/generated/google/apis/doubleclicksearch_v2.rb +1 -1
  100. data/generated/google/apis/drive_v2.rb +1 -1
  101. data/generated/google/apis/drive_v2/classes.rb +487 -15
  102. data/generated/google/apis/drive_v2/representations.rb +120 -0
  103. data/generated/google/apis/drive_v2/service.rb +355 -38
  104. data/generated/google/apis/drive_v3.rb +1 -1
  105. data/generated/google/apis/drive_v3/classes.rb +416 -14
  106. data/generated/google/apis/drive_v3/representations.rb +99 -0
  107. data/generated/google/apis/drive_v3/service.rb +315 -28
  108. data/generated/google/apis/games_configuration_v1configuration.rb +1 -1
  109. data/generated/google/apis/games_management_v1management.rb +1 -1
  110. data/generated/google/apis/games_v1.rb +1 -1
  111. data/generated/google/apis/genomics_v1.rb +7 -7
  112. data/generated/google/apis/genomics_v1/classes.rb +2301 -2301
  113. data/generated/google/apis/genomics_v1/representations.rb +239 -239
  114. data/generated/google/apis/genomics_v1/service.rb +599 -599
  115. data/generated/google/apis/gmail_v1.rb +2 -2
  116. data/generated/google/apis/gmail_v1/service.rb +18 -1
  117. data/generated/google/apis/groupssettings_v1.rb +1 -1
  118. data/generated/google/apis/iam_v1.rb +1 -1
  119. data/generated/google/apis/iam_v1/classes.rb +440 -440
  120. data/generated/google/apis/iam_v1/representations.rb +96 -96
  121. data/generated/google/apis/iam_v1/service.rb +150 -150
  122. data/generated/google/apis/kgsearch_v1/service.rb +13 -13
  123. data/generated/google/apis/language_v1beta1.rb +1 -1
  124. data/generated/google/apis/language_v1beta1/classes.rb +235 -235
  125. data/generated/google/apis/language_v1beta1/representations.rb +73 -73
  126. data/generated/google/apis/language_v1beta1/service.rb +49 -49
  127. data/generated/google/apis/licensing_v1.rb +1 -1
  128. data/generated/google/apis/logging_v2beta1.rb +1 -1
  129. data/generated/google/apis/logging_v2beta1/classes.rb +888 -879
  130. data/generated/google/apis/logging_v2beta1/representations.rb +129 -129
  131. data/generated/google/apis/logging_v2beta1/service.rb +315 -316
  132. data/generated/google/apis/manufacturers_v1/classes.rb +139 -139
  133. data/generated/google/apis/manufacturers_v1/representations.rb +30 -30
  134. data/generated/google/apis/manufacturers_v1/service.rb +32 -32
  135. data/generated/google/apis/monitoring_v3.rb +4 -4
  136. data/generated/google/apis/monitoring_v3/classes.rb +229 -229
  137. data/generated/google/apis/monitoring_v3/representations.rb +45 -45
  138. data/generated/google/apis/monitoring_v3/service.rb +141 -141
  139. data/generated/google/apis/partners_v2.rb +3 -3
  140. data/generated/google/apis/partners_v2/classes.rb +1837 -483
  141. data/generated/google/apis/partners_v2/representations.rb +614 -157
  142. data/generated/google/apis/partners_v2/service.rb +881 -150
  143. data/generated/google/apis/people_v1.rb +1 -1
  144. data/generated/google/apis/people_v1/classes.rb +517 -465
  145. data/generated/google/apis/people_v1/representations.rb +138 -107
  146. data/generated/google/apis/people_v1/service.rb +56 -49
  147. data/generated/google/apis/plus_domains_v1.rb +1 -1
  148. data/generated/google/apis/plus_v1.rb +1 -1
  149. data/generated/google/apis/proximitybeacon_v1beta1.rb +1 -1
  150. data/generated/google/apis/proximitybeacon_v1beta1/classes.rb +529 -458
  151. data/generated/google/apis/proximitybeacon_v1beta1/representations.rb +98 -97
  152. data/generated/google/apis/proximitybeacon_v1beta1/service.rb +494 -370
  153. data/generated/google/apis/pubsub_v1.rb +4 -4
  154. data/generated/google/apis/pubsub_v1/classes.rb +203 -203
  155. data/generated/google/apis/pubsub_v1/representations.rb +65 -65
  156. data/generated/google/apis/pubsub_v1/service.rb +226 -217
  157. data/generated/google/apis/reseller_v1.rb +1 -1
  158. data/generated/google/apis/reseller_v1/classes.rb +213 -61
  159. data/generated/google/apis/reseller_v1/representations.rb +27 -0
  160. data/generated/google/apis/reseller_v1/service.rb +240 -47
  161. data/generated/google/apis/script_v1.rb +14 -14
  162. data/generated/google/apis/script_v1/classes.rb +95 -95
  163. data/generated/google/apis/script_v1/representations.rb +25 -25
  164. data/generated/google/apis/sheets_v4.rb +1 -1
  165. data/generated/google/apis/sheets_v4/classes.rb +4346 -4300
  166. data/generated/google/apis/sheets_v4/representations.rb +853 -837
  167. data/generated/google/apis/sheets_v4/service.rb +32 -32
  168. data/generated/google/apis/slides_v1.rb +1 -1
  169. data/generated/google/apis/slides_v1/classes.rb +804 -748
  170. data/generated/google/apis/slides_v1/representations.rb +204 -187
  171. data/generated/google/apis/slides_v1/service.rb +16 -16
  172. data/generated/google/apis/speech_v1beta1.rb +1 -1
  173. data/generated/google/apis/speech_v1beta1/classes.rb +57 -57
  174. data/generated/google/apis/speech_v1beta1/representations.rb +19 -19
  175. data/generated/google/apis/speech_v1beta1/service.rb +66 -66
  176. data/generated/google/apis/sqladmin_v1beta4.rb +1 -1
  177. data/generated/google/apis/sqladmin_v1beta4/classes.rb +32 -7
  178. data/generated/google/apis/sqladmin_v1beta4/representations.rb +16 -0
  179. data/generated/google/apis/sqladmin_v1beta4/service.rb +9 -2
  180. data/generated/google/apis/storage_v1.rb +1 -1
  181. data/generated/google/apis/storage_v1/service.rb +10 -7
  182. data/generated/google/apis/vision_v1.rb +1 -1
  183. data/generated/google/apis/vision_v1/classes.rb +1393 -865
  184. data/generated/google/apis/vision_v1/representations.rb +367 -102
  185. data/generated/google/apis/vision_v1/service.rb +4 -4
  186. data/generated/google/apis/youtube_analytics_v1.rb +1 -1
  187. data/generated/google/apis/youtube_partner_v1.rb +1 -1
  188. data/generated/google/apis/youtubereporting_v1.rb +4 -4
  189. data/generated/google/apis/youtubereporting_v1/classes.rb +93 -93
  190. data/generated/google/apis/youtubereporting_v1/representations.rb +25 -25
  191. data/generated/google/apis/youtubereporting_v1/service.rb +108 -108
  192. data/lib/google/apis/version.rb +1 -1
  193. metadata +34 -30
@@ -25,7 +25,7 @@ module Google
25
25
  # @see https://cloud.google.com/dataproc/
26
26
  module DataprocV1
27
27
  VERSION = 'V1'
28
- REVISION = '20170207'
28
+ REVISION = '20170228'
29
29
 
30
30
  # View and manage your data across Google Cloud Platform services
31
31
  AUTH_CLOUD_PLATFORM = 'https://www.googleapis.com/auth/cloud-platform'
@@ -22,35 +22,21 @@ module Google
22
22
  module Apis
23
23
  module DataprocV1
24
24
 
25
- # A YARN application created by a job. Application information is a subset of <
26
- # code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.
27
- # Beta Feature: This report is available for testing purposes only. It may be
28
- # changed before final release.
29
- class YarnApplication
25
+ # Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature:
26
+ # This report is available for testing purposes only. It may be changed before
27
+ # final release.
28
+ class ClusterMetrics
30
29
  include Google::Apis::Core::Hashable
31
30
 
32
- # Required The application name.
33
- # Corresponds to the JSON property `name`
34
- # @return [String]
35
- attr_accessor :name
36
-
37
- # Optional The HTTP URL of the ApplicationMaster, HistoryServer, or
38
- # TimelineServer that provides application-specific information. The URL uses
39
- # the internal hostname, and requires a proxy server for resolution and,
40
- # possibly, access.
41
- # Corresponds to the JSON property `trackingUrl`
42
- # @return [String]
43
- attr_accessor :tracking_url
44
-
45
- # Required The numerical progress of the application, from 1 to 100.
46
- # Corresponds to the JSON property `progress`
47
- # @return [Float]
48
- attr_accessor :progress
31
+ # The YARN metrics.
32
+ # Corresponds to the JSON property `yarnMetrics`
33
+ # @return [Hash<String,String>]
34
+ attr_accessor :yarn_metrics
49
35
 
50
- # Required The application state.
51
- # Corresponds to the JSON property `state`
52
- # @return [String]
53
- attr_accessor :state
36
+ # The HDFS metrics.
37
+ # Corresponds to the JSON property `hdfsMetrics`
38
+ # @return [Hash<String,String>]
39
+ attr_accessor :hdfs_metrics
54
40
 
55
41
  def initialize(**args)
56
42
  update!(**args)
@@ -58,33 +44,27 @@ module Google
58
44
 
59
45
  # Update properties of this object
60
46
  def update!(**args)
61
- @name = args[:name] if args.key?(:name)
62
- @tracking_url = args[:tracking_url] if args.key?(:tracking_url)
63
- @progress = args[:progress] if args.key?(:progress)
64
- @state = args[:state] if args.key?(:state)
47
+ @yarn_metrics = args[:yarn_metrics] if args.key?(:yarn_metrics)
48
+ @hdfs_metrics = args[:hdfs_metrics] if args.key?(:hdfs_metrics)
65
49
  end
66
50
  end
67
51
 
68
- # A list of queries to run on a cluster.
69
- class QueryList
52
+ # Specifies the type and number of accelerator cards attached to the instances
53
+ # of an instance group (see GPUs on Compute Engine).
54
+ class AcceleratorConfig
70
55
  include Google::Apis::Core::Hashable
71
56
 
72
- # Required The queries to execute. You do not need to terminate a query with a
73
- # semicolon. Multiple queries can be specified in one string by separating each
74
- # with a semicolon. Here is an example of an Cloud Dataproc API snippet that
75
- # uses a QueryList to specify a HiveJob:
76
- # "hiveJob": `
77
- # "queryList": `
78
- # "queries": [
79
- # "query1",
80
- # "query2",
81
- # "query3;query4",
82
- # ]
83
- # `
84
- # `
85
- # Corresponds to the JSON property `queries`
86
- # @return [Array<String>]
87
- attr_accessor :queries
57
+ # Full or partial URI of the accelerator type resource to expose to this
58
+ # instance. See Google Compute Engine AcceleratorTypes( /compute/docs/reference/
59
+ # beta/acceleratorTypes)
60
+ # Corresponds to the JSON property `acceleratorTypeUri`
61
+ # @return [String]
62
+ attr_accessor :accelerator_type_uri
63
+
64
+ # The number of the accelerator cards of this type exposed to this instance.
65
+ # Corresponds to the JSON property `acceleratorCount`
66
+ # @return [Fixnum]
67
+ attr_accessor :accelerator_count
88
68
 
89
69
  def initialize(**args)
90
70
  update!(**args)
@@ -92,69 +72,21 @@ module Google
92
72
 
93
73
  # Update properties of this object
94
74
  def update!(**args)
95
- @queries = args[:queries] if args.key?(:queries)
75
+ @accelerator_type_uri = args[:accelerator_type_uri] if args.key?(:accelerator_type_uri)
76
+ @accelerator_count = args[:accelerator_count] if args.key?(:accelerator_count)
96
77
  end
97
78
  end
98
79
 
99
- # A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.
100
- # apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/
101
- # MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/
102
- # docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
103
- class HadoopJob
80
+ # The runtime logging config of the job.
81
+ class LoggingConfig
104
82
  include Google::Apis::Core::Hashable
105
83
 
106
- # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
107
- # Corresponds to the JSON property `jarFileUris`
108
- # @return [Array<String>]
109
- attr_accessor :jar_file_uris
110
-
111
- # The runtime logging config of the job.
112
- # Corresponds to the JSON property `loggingConfig`
113
- # @return [Google::Apis::DataprocV1::LoggingConfig]
114
- attr_accessor :logging_config
115
-
116
- # Optional A mapping of property names to values, used to configure Hadoop.
117
- # Properties that conflict with values set by the Cloud Dataproc API may be
118
- # overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes
119
- # in user code.
120
- # Corresponds to the JSON property `properties`
84
+ # The per-package log levels for the driver. This may include "root" package
85
+ # name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', '
86
+ # org.apache = DEBUG'
87
+ # Corresponds to the JSON property `driverLogLevels`
121
88
  # @return [Hash<String,String>]
122
- attr_accessor :properties
123
-
124
- # Optional The arguments to pass to the driver. Do not include arguments, such
125
- # as -libjars or -Dfoo=bar, that can be set as job properties, since a collision
126
- # may occur that causes an incorrect job submission.
127
- # Corresponds to the JSON property `args`
128
- # @return [Array<String>]
129
- attr_accessor :args
130
-
131
- # Optional HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the
132
- # working directory of Hadoop drivers and distributed tasks. Useful for naively
133
- # parallel tasks.
134
- # Corresponds to the JSON property `fileUris`
135
- # @return [Array<String>]
136
- attr_accessor :file_uris
137
-
138
- # The name of the driver's main class. The jar file containing the class must be
139
- # in the default CLASSPATH or specified in jar_file_uris.
140
- # Corresponds to the JSON property `mainClass`
141
- # @return [String]
142
- attr_accessor :main_class
143
-
144
- # Optional HCFS URIs of archives to be extracted in the working directory of
145
- # Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .
146
- # zip.
147
- # Corresponds to the JSON property `archiveUris`
148
- # @return [Array<String>]
149
- attr_accessor :archive_uris
150
-
151
- # The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-
152
- # bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-
153
- # samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-
154
- # mapreduce-examples.jar'
155
- # Corresponds to the JSON property `mainJarFileUri`
156
- # @return [String]
157
- attr_accessor :main_jar_file_uri
89
+ attr_accessor :driver_log_levels
158
90
 
159
91
  def initialize(**args)
160
92
  update!(**args)
@@ -162,47 +94,105 @@ module Google
162
94
 
163
95
  # Update properties of this object
164
96
  def update!(**args)
165
- @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
166
- @logging_config = args[:logging_config] if args.key?(:logging_config)
167
- @properties = args[:properties] if args.key?(:properties)
168
- @args = args[:args] if args.key?(:args)
169
- @file_uris = args[:file_uris] if args.key?(:file_uris)
170
- @main_class = args[:main_class] if args.key?(:main_class)
171
- @archive_uris = args[:archive_uris] if args.key?(:archive_uris)
172
- @main_jar_file_uri = args[:main_jar_file_uri] if args.key?(:main_jar_file_uri)
97
+ @driver_log_levels = args[:driver_log_levels] if args.key?(:driver_log_levels)
173
98
  end
174
99
  end
175
100
 
176
- # A request to collect cluster diagnostic information.
177
- class DiagnoseClusterRequest
101
+ # The location where output from diagnostic command can be found.
102
+ class DiagnoseClusterOutputLocation
178
103
  include Google::Apis::Core::Hashable
179
104
 
105
+ # Output-only The Google Cloud Storage URI of the diagnostic output. This will
106
+ # be a plain text file with summary of collected diagnostics.
107
+ # Corresponds to the JSON property `outputUri`
108
+ # @return [String]
109
+ attr_accessor :output_uri
110
+
180
111
  def initialize(**args)
181
112
  update!(**args)
182
113
  end
183
114
 
184
115
  # Update properties of this object
185
116
  def update!(**args)
117
+ @output_uri = args[:output_uri] if args.key?(:output_uri)
186
118
  end
187
119
  end
188
120
 
189
- # Specifies the config of disk options for a group of VM instances.
190
- class DiskConfig
121
+ # This resource represents a long-running operation that is the result of a
122
+ # network API call.
123
+ class Operation
191
124
  include Google::Apis::Core::Hashable
192
125
 
193
- # Optional Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not
194
- # attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.
195
- # apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are
196
- # attached, this runtime bulk data is spread across them, and the boot disk
197
- # contains only basic config and installed binaries.
198
- # Corresponds to the JSON property `numLocalSsds`
199
- # @return [Fixnum]
200
- attr_accessor :num_local_ssds
126
+ # If the value is false, it means the operation is still in progress. If true,
127
+ # the operation is completed, and either error or response is available.
128
+ # Corresponds to the JSON property `done`
129
+ # @return [Boolean]
130
+ attr_accessor :done
131
+ alias_method :done?, :done
201
132
 
202
- # Optional Size in GB of the boot disk (default is 500GB).
203
- # Corresponds to the JSON property `bootDiskSizeGb`
204
- # @return [Fixnum]
205
- attr_accessor :boot_disk_size_gb
133
+ # The normal response of the operation in case of success. If the original
134
+ # method returns no data on success, such as Delete, the response is google.
135
+ # protobuf.Empty. If the original method is standard Get/Create/Update, the
136
+ # response should be the resource. For other methods, the response should have
137
+ # the type XxxResponse, where Xxx is the original method name. For example, if
138
+ # the original method name is TakeSnapshot(), the inferred response type is
139
+ # TakeSnapshotResponse.
140
+ # Corresponds to the JSON property `response`
141
+ # @return [Hash<String,Object>]
142
+ attr_accessor :response
143
+
144
+ # The server-assigned name, which is only unique within the same service that
145
+ # originally returns it. If you use the default HTTP mapping, the name should
146
+ # have the format of operations/some/unique/name.
147
+ # Corresponds to the JSON property `name`
148
+ # @return [String]
149
+ attr_accessor :name
150
+
151
+ # The Status type defines a logical error model that is suitable for different
152
+ # programming environments, including REST APIs and RPC APIs. It is used by gRPC
153
+ # (https://github.com/grpc). The error model is designed to be:
154
+ # Simple to use and understand for most users
155
+ # Flexible enough to meet unexpected needsOverviewThe Status message contains
156
+ # three pieces of data: error code, error message, and error details. The error
157
+ # code should be an enum value of google.rpc.Code, but it may accept additional
158
+ # error codes if needed. The error message should be a developer-facing English
159
+ # message that helps developers understand and resolve the error. If a localized
160
+ # user-facing error message is needed, put the localized message in the error
161
+ # details or localize it in the client. The optional error details may contain
162
+ # arbitrary information about the error. There is a predefined set of error
163
+ # detail types in the package google.rpc which can be used for common error
164
+ # conditions.Language mappingThe Status message is the logical representation of
165
+ # the error model, but it is not necessarily the actual wire format. When the
166
+ # Status message is exposed in different client libraries and different wire
167
+ # protocols, it can be mapped differently. For example, it will likely be mapped
168
+ # to some exceptions in Java, but more likely mapped to some error codes in C.
169
+ # Other usesThe error model and the Status message can be used in a variety of
170
+ # environments, either with or without APIs, to provide a consistent developer
171
+ # experience across different environments.Example uses of this error model
172
+ # include:
173
+ # Partial errors. If a service needs to return partial errors to the client, it
174
+ # may embed the Status in the normal response to indicate the partial errors.
175
+ # Workflow errors. A typical workflow has multiple steps. Each step may have a
176
+ # Status message for error reporting purpose.
177
+ # Batch operations. If a client uses batch request and batch response, the
178
+ # Status message should be used directly inside batch response, one for each
179
+ # error sub-response.
180
+ # Asynchronous operations. If an API call embeds asynchronous operation results
181
+ # in its response, the status of those operations should be represented directly
182
+ # using the Status message.
183
+ # Logging. If some API errors are stored in logs, the message Status could be
184
+ # used directly after any stripping needed for security/privacy reasons.
185
+ # Corresponds to the JSON property `error`
186
+ # @return [Google::Apis::DataprocV1::Status]
187
+ attr_accessor :error
188
+
189
+ # Service-specific metadata associated with the operation. It typically contains
190
+ # progress information and common metadata such as create time. Some services
191
+ # might not provide such metadata. Any method that returns a long-running
192
+ # operation should document the metadata type, if any.
193
+ # Corresponds to the JSON property `metadata`
194
+ # @return [Hash<String,Object>]
195
+ attr_accessor :metadata
206
196
 
207
197
  def initialize(**args)
208
198
  update!(**args)
@@ -210,54 +200,68 @@ module Google
210
200
 
211
201
  # Update properties of this object
212
202
  def update!(**args)
213
- @num_local_ssds = args[:num_local_ssds] if args.key?(:num_local_ssds)
214
- @boot_disk_size_gb = args[:boot_disk_size_gb] if args.key?(:boot_disk_size_gb)
203
+ @done = args[:done] if args.key?(:done)
204
+ @response = args[:response] if args.key?(:response)
205
+ @name = args[:name] if args.key?(:name)
206
+ @error = args[:error] if args.key?(:error)
207
+ @metadata = args[:metadata] if args.key?(:metadata)
215
208
  end
216
209
  end
217
210
 
218
- # Metadata describing the operation.
219
- class ClusterOperationMetadata
211
+ # The status of the operation.
212
+ class OperationStatus
220
213
  include Google::Apis::Core::Hashable
221
214
 
222
- # Output-only Errors encountered during operation execution.
223
- # Corresponds to the JSON property `warnings`
224
- # @return [Array<String>]
225
- attr_accessor :warnings
215
+ # A message containing the detailed operation state.
216
+ # Corresponds to the JSON property `innerState`
217
+ # @return [String]
218
+ attr_accessor :inner_state
226
219
 
227
- # Output-only Labels associated with the operation
228
- # Corresponds to the JSON property `labels`
229
- # @return [Hash<String,String>]
230
- attr_accessor :labels
231
-
232
- # The status of the operation.
233
- # Corresponds to the JSON property `status`
234
- # @return [Google::Apis::DataprocV1::ClusterOperationStatus]
235
- attr_accessor :status
236
-
237
- # Output-only The previous operation status.
238
- # Corresponds to the JSON property `statusHistory`
239
- # @return [Array<Google::Apis::DataprocV1::ClusterOperationStatus>]
240
- attr_accessor :status_history
220
+ # The time this state was entered.
221
+ # Corresponds to the JSON property `stateStartTime`
222
+ # @return [String]
223
+ attr_accessor :state_start_time
241
224
 
242
- # Output-only Name of the cluster for the operation.
243
- # Corresponds to the JSON property `clusterName`
225
+ # A message containing the operation state.
226
+ # Corresponds to the JSON property `state`
244
227
  # @return [String]
245
- attr_accessor :cluster_name
228
+ attr_accessor :state
246
229
 
247
- # Output-only Cluster UUID for the operation.
248
- # Corresponds to the JSON property `clusterUuid`
230
+ # A message containing any operation metadata details.
231
+ # Corresponds to the JSON property `details`
249
232
  # @return [String]
250
- attr_accessor :cluster_uuid
233
+ attr_accessor :details
251
234
 
252
- # Output-only The operation type.
253
- # Corresponds to the JSON property `operationType`
235
+ def initialize(**args)
236
+ update!(**args)
237
+ end
238
+
239
+ # Update properties of this object
240
+ def update!(**args)
241
+ @inner_state = args[:inner_state] if args.key?(:inner_state)
242
+ @state_start_time = args[:state_start_time] if args.key?(:state_start_time)
243
+ @state = args[:state] if args.key?(:state)
244
+ @details = args[:details] if args.key?(:details)
245
+ end
246
+ end
247
+
248
+ # Encapsulates the full scoping used to reference a job.
249
+ class JobReference
250
+ include Google::Apis::Core::Hashable
251
+
252
+ # Required The ID of the Google Cloud Platform project that the job belongs to.
253
+ # Corresponds to the JSON property `projectId`
254
254
  # @return [String]
255
- attr_accessor :operation_type
255
+ attr_accessor :project_id
256
256
 
257
- # Output-only Short description of operation.
258
- # Corresponds to the JSON property `description`
257
+ # Optional The job ID, which must be unique within the project. The job ID is
258
+ # generated by the server upon job submission or provided by the user as a means
259
+ # to perform retries without creating duplicate jobs. The ID must contain only
260
+ # letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The
261
+ # maximum length is 100 characters.
262
+ # Corresponds to the JSON property `jobId`
259
263
  # @return [String]
260
- attr_accessor :description
264
+ attr_accessor :job_id
261
265
 
262
266
  def initialize(**args)
263
267
  update!(**args)
@@ -265,78 +269,84 @@ module Google
265
269
 
266
270
  # Update properties of this object
267
271
  def update!(**args)
268
- @warnings = args[:warnings] if args.key?(:warnings)
269
- @labels = args[:labels] if args.key?(:labels)
270
- @status = args[:status] if args.key?(:status)
271
- @status_history = args[:status_history] if args.key?(:status_history)
272
- @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
273
- @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
274
- @operation_type = args[:operation_type] if args.key?(:operation_type)
275
- @description = args[:description] if args.key?(:description)
272
+ @project_id = args[:project_id] if args.key?(:project_id)
273
+ @job_id = args[:job_id] if args.key?(:job_id)
276
274
  end
277
275
  end
278
276
 
279
- # A generic empty message that you can re-use to avoid defining duplicated empty
280
- # messages in your APIs. A typical example is to use it as the request or the
281
- # response type of an API method. For instance:
282
- # service Foo `
283
- # rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
284
- # `
285
- # The JSON representation for Empty is empty JSON object ``.
286
- class Empty
277
+ # A request to submit a job.
278
+ class SubmitJobRequest
287
279
  include Google::Apis::Core::Hashable
288
280
 
281
+ # A Cloud Dataproc job resource.
282
+ # Corresponds to the JSON property `job`
283
+ # @return [Google::Apis::DataprocV1::Job]
284
+ attr_accessor :job
285
+
289
286
  def initialize(**args)
290
287
  update!(**args)
291
288
  end
292
289
 
293
290
  # Update properties of this object
294
291
  def update!(**args)
292
+ @job = args[:job] if args.key?(:job)
295
293
  end
296
294
  end
297
295
 
298
- # A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/)
299
- # queries on YARN.
300
- class HiveJob
296
+ # The Status type defines a logical error model that is suitable for different
297
+ # programming environments, including REST APIs and RPC APIs. It is used by gRPC
298
+ # (https://github.com/grpc). The error model is designed to be:
299
+ # Simple to use and understand for most users
300
+ # Flexible enough to meet unexpected needsOverviewThe Status message contains
301
+ # three pieces of data: error code, error message, and error details. The error
302
+ # code should be an enum value of google.rpc.Code, but it may accept additional
303
+ # error codes if needed. The error message should be a developer-facing English
304
+ # message that helps developers understand and resolve the error. If a localized
305
+ # user-facing error message is needed, put the localized message in the error
306
+ # details or localize it in the client. The optional error details may contain
307
+ # arbitrary information about the error. There is a predefined set of error
308
+ # detail types in the package google.rpc which can be used for common error
309
+ # conditions.Language mappingThe Status message is the logical representation of
310
+ # the error model, but it is not necessarily the actual wire format. When the
311
+ # Status message is exposed in different client libraries and different wire
312
+ # protocols, it can be mapped differently. For example, it will likely be mapped
313
+ # to some exceptions in Java, but more likely mapped to some error codes in C.
314
+ # Other usesThe error model and the Status message can be used in a variety of
315
+ # environments, either with or without APIs, to provide a consistent developer
316
+ # experience across different environments.Example uses of this error model
317
+ # include:
318
+ # Partial errors. If a service needs to return partial errors to the client, it
319
+ # may embed the Status in the normal response to indicate the partial errors.
320
+ # Workflow errors. A typical workflow has multiple steps. Each step may have a
321
+ # Status message for error reporting purpose.
322
+ # Batch operations. If a client uses batch request and batch response, the
323
+ # Status message should be used directly inside batch response, one for each
324
+ # error sub-response.
325
+ # Asynchronous operations. If an API call embeds asynchronous operation results
326
+ # in its response, the status of those operations should be represented directly
327
+ # using the Status message.
328
+ # Logging. If some API errors are stored in logs, the message Status could be
329
+ # used directly after any stripping needed for security/privacy reasons.
330
+ class Status
301
331
  include Google::Apis::Core::Hashable
302
332
 
303
- # A list of queries to run on a cluster.
304
- # Corresponds to the JSON property `queryList`
305
- # @return [Google::Apis::DataprocV1::QueryList]
306
- attr_accessor :query_list
333
+ # The status code, which should be an enum value of google.rpc.Code.
334
+ # Corresponds to the JSON property `code`
335
+ # @return [Fixnum]
336
+ attr_accessor :code
307
337
 
308
- # The HCFS URI of the script that contains Hive queries.
309
- # Corresponds to the JSON property `queryFileUri`
338
+ # A developer-facing error message, which should be in English. Any user-facing
339
+ # error message should be localized and sent in the google.rpc.Status.details
340
+ # field, or localized by the client.
341
+ # Corresponds to the JSON property `message`
310
342
  # @return [String]
311
- attr_accessor :query_file_uri
312
-
313
- # Optional HCFS URIs of jar files to add to the CLASSPATH of the Hive server and
314
- # Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.
315
- # Corresponds to the JSON property `jarFileUris`
316
- # @return [Array<String>]
317
- attr_accessor :jar_file_uris
318
-
319
- # Optional Mapping of query variable names to values (equivalent to the Hive
320
- # command: SET name="value";).
321
- # Corresponds to the JSON property `scriptVariables`
322
- # @return [Hash<String,String>]
323
- attr_accessor :script_variables
324
-
325
- # Optional A mapping of property names and values, used to configure Hive.
326
- # Properties that conflict with values set by the Cloud Dataproc API may be
327
- # overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/
328
- # hive/conf/hive-site.xml, and classes in user code.
329
- # Corresponds to the JSON property `properties`
330
- # @return [Hash<String,String>]
331
- attr_accessor :properties
343
+ attr_accessor :message
332
344
 
333
- # Optional Whether to continue executing queries if a query fails. The default
334
- # value is false. Setting to true can be useful when executing independent
335
- # parallel queries.
336
- # Corresponds to the JSON property `continueOnFailure`
337
- # @return [Boolean]
338
- attr_accessor :continue_on_failure
339
- alias_method :continue_on_failure?, :continue_on_failure
345
+ # A list of messages that carry the error details. There will be a common set of
346
+ # message types for APIs to use.
347
+ # Corresponds to the JSON property `details`
348
+ # @return [Array<Hash<String,Object>>]
349
+ attr_accessor :details
340
350
 
341
351
  def initialize(**args)
342
352
  update!(**args)
@@ -344,24 +354,65 @@ module Google
344
354
 
345
355
  # Update properties of this object
346
356
  def update!(**args)
347
- @query_list = args[:query_list] if args.key?(:query_list)
348
- @query_file_uri = args[:query_file_uri] if args.key?(:query_file_uri)
349
- @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
350
- @script_variables = args[:script_variables] if args.key?(:script_variables)
351
- @properties = args[:properties] if args.key?(:properties)
352
- @continue_on_failure = args[:continue_on_failure] if args.key?(:continue_on_failure)
357
+ @code = args[:code] if args.key?(:code)
358
+ @message = args[:message] if args.key?(:message)
359
+ @details = args[:details] if args.key?(:details)
353
360
  end
354
361
  end
355
362
 
356
- # The location of diagnostic output.
357
- class DiagnoseClusterResults
363
+ # Optional The config settings for Google Compute Engine resources in an
364
+ # instance group, such as a master or worker group.
365
+ class InstanceGroupConfig
358
366
  include Google::Apis::Core::Hashable
359
367
 
360
- # Output-only The Google Cloud Storage URI of the diagnostic output. The output
361
- # report is a plain text file with a summary of collected diagnostics.
362
- # Corresponds to the JSON property `outputUri`
368
+ # Specifies the resources used to actively manage an instance group.
369
+ # Corresponds to the JSON property `managedGroupConfig`
370
+ # @return [Google::Apis::DataprocV1::ManagedGroupConfig]
371
+ attr_accessor :managed_group_config
372
+
373
+ # Optional Specifies that this instance group contains preemptible instances.
374
+ # Corresponds to the JSON property `isPreemptible`
375
+ # @return [Boolean]
376
+ attr_accessor :is_preemptible
377
+ alias_method :is_preemptible?, :is_preemptible
378
+
379
+ # Required The Google Compute Engine machine type used for cluster instances.
380
+ # Example: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-
381
+ # east1-a/machineTypes/n1-standard-2.
382
+ # Corresponds to the JSON property `machineTypeUri`
363
383
  # @return [String]
364
- attr_accessor :output_uri
384
+ attr_accessor :machine_type_uri
385
+
386
+ # Output-only The Google Compute Engine image resource used for cluster
387
+ # instances. Inferred from SoftwareConfig.image_version.
388
+ # Corresponds to the JSON property `imageUri`
389
+ # @return [String]
390
+ attr_accessor :image_uri
391
+
392
+ # Optional The list of instance names. Cloud Dataproc derives the names from
393
+ # cluster_name, num_instances, and the instance group if not set by user (
394
+ # recommended practice is to let Cloud Dataproc derive the name).
395
+ # Corresponds to the JSON property `instanceNames`
396
+ # @return [Array<String>]
397
+ attr_accessor :instance_names
398
+
399
+ # Optional The Google Compute Engine accelerator configuration for these
400
+ # instances.Beta Feature: This feature is still under development. It may be
401
+ # changed before final release.
402
+ # Corresponds to the JSON property `accelerators`
403
+ # @return [Array<Google::Apis::DataprocV1::AcceleratorConfig>]
404
+ attr_accessor :accelerators
405
+
406
+ # Required The number of VM instances in the instance group. For master instance
407
+ # groups, must be set to 1.
408
+ # Corresponds to the JSON property `numInstances`
409
+ # @return [Fixnum]
410
+ attr_accessor :num_instances
411
+
412
+ # Specifies the config of disk options for a group of VM instances.
413
+ # Corresponds to the JSON property `diskConfig`
414
+ # @return [Google::Apis::DataprocV1::DiskConfig]
415
+ attr_accessor :disk_config
365
416
 
366
417
  def initialize(**args)
367
418
  update!(**args)
@@ -369,67 +420,84 @@ module Google
369
420
 
370
421
  # Update properties of this object
371
422
  def update!(**args)
372
- @output_uri = args[:output_uri] if args.key?(:output_uri)
423
+ @managed_group_config = args[:managed_group_config] if args.key?(:managed_group_config)
424
+ @is_preemptible = args[:is_preemptible] if args.key?(:is_preemptible)
425
+ @machine_type_uri = args[:machine_type_uri] if args.key?(:machine_type_uri)
426
+ @image_uri = args[:image_uri] if args.key?(:image_uri)
427
+ @instance_names = args[:instance_names] if args.key?(:instance_names)
428
+ @accelerators = args[:accelerators] if args.key?(:accelerators)
429
+ @num_instances = args[:num_instances] if args.key?(:num_instances)
430
+ @disk_config = args[:disk_config] if args.key?(:disk_config)
373
431
  end
374
432
  end
375
433
 
376
- # The cluster config.
377
- class ClusterConfig
434
+ # Job scheduling options.Beta Feature: These options are available for testing
435
+ # purposes only. They may be changed before final release.
436
+ class JobScheduling
378
437
  include Google::Apis::Core::Hashable
379
438
 
380
- # Common config settings for resources of Google Compute Engine cluster
381
- # instances, applicable to all instances in the cluster.
382
- # Corresponds to the JSON property `gceClusterConfig`
383
- # @return [Google::Apis::DataprocV1::GceClusterConfig]
384
- attr_accessor :gce_cluster_config
439
+ # Optional Maximum number of times per hour a driver may be restarted as a
440
+ # result of driver terminating with non-zero code before job is reported failed.
441
+ # A job may be reported as thrashing if driver exits with non-zero code 4 times
442
+ # within 10 minute window.Maximum value is 10.
443
+ # Corresponds to the JSON property `maxFailuresPerHour`
444
+ # @return [Fixnum]
445
+ attr_accessor :max_failures_per_hour
385
446
 
386
- # Specifies the selection and config of software inside the cluster.
387
- # Corresponds to the JSON property `softwareConfig`
388
- # @return [Google::Apis::DataprocV1::SoftwareConfig]
389
- attr_accessor :software_config
447
+ def initialize(**args)
448
+ update!(**args)
449
+ end
390
450
 
391
- # Optional The config settings for Google Compute Engine resources in an
392
- # instance group, such as a master or worker group.
393
- # Corresponds to the JSON property `masterConfig`
394
- # @return [Google::Apis::DataprocV1::InstanceGroupConfig]
395
- attr_accessor :master_config
451
+ # Update properties of this object
452
+ def update!(**args)
453
+ @max_failures_per_hour = args[:max_failures_per_hour] if args.key?(:max_failures_per_hour)
454
+ end
455
+ end
396
456
 
397
- # Optional The config settings for Google Compute Engine resources in an
398
- # instance group, such as a master or worker group.
399
- # Corresponds to the JSON property `secondaryWorkerConfig`
400
- # @return [Google::Apis::DataprocV1::InstanceGroupConfig]
401
- attr_accessor :secondary_worker_config
457
+ # A list of jobs in a project.
458
+ class ListJobsResponse
459
+ include Google::Apis::Core::Hashable
402
460
 
403
- # Optional Commands to execute on each node after config is completed. By
404
- # default, executables are run on master and all worker nodes. You can test a
405
- # node's <code>role</code> metadata to run an executable on a master or worker
406
- # node, as shown below using curl (you can also use wget):
407
- # ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/
408
- # instance/attributes/dataproc-role)
409
- # if [[ "$`ROLE`" == 'Master' ]]; then
410
- # ... master specific actions ...
411
- # else
412
- # ... worker specific actions ...
413
- # fi
414
- # Corresponds to the JSON property `initializationActions`
415
- # @return [Array<Google::Apis::DataprocV1::NodeInitializationAction>]
416
- attr_accessor :initialization_actions
461
+ # Optional This token is included in the response if there are more results to
462
+ # fetch. To fetch additional results, provide this value as the page_token in a
463
+ # subsequent <code>ListJobsRequest</code>.
464
+ # Corresponds to the JSON property `nextPageToken`
465
+ # @return [String]
466
+ attr_accessor :next_page_token
417
467
 
418
- # Optional A Google Cloud Storage staging bucket used for sharing generated SSH
419
- # keys and config. If you do not specify a staging bucket, Cloud Dataproc will
420
- # determine an appropriate Cloud Storage location (US, ASIA, or EU) for your
421
- # cluster's staging bucket according to the Google Compute Engine zone where
422
- # your cluster is deployed, and then it will create and manage this project-
423
- # level, per-location bucket for you.
424
- # Corresponds to the JSON property `configBucket`
468
+ # Output-only Jobs list.
469
+ # Corresponds to the JSON property `jobs`
470
+ # @return [Array<Google::Apis::DataprocV1::Job>]
471
+ attr_accessor :jobs
472
+
473
+ def initialize(**args)
474
+ update!(**args)
475
+ end
476
+
477
+ # Update properties of this object
478
+ def update!(**args)
479
+ @next_page_token = args[:next_page_token] if args.key?(:next_page_token)
480
+ @jobs = args[:jobs] if args.key?(:jobs)
481
+ end
482
+ end
483
+
484
+ # Specifies an executable to run on a fully configured node and a timeout period
485
+ # for executable completion.
486
+ class NodeInitializationAction
487
+ include Google::Apis::Core::Hashable
488
+
489
+ # Optional Amount of time executable has to complete. Default is 10 minutes.
490
+ # Cluster creation fails with an explanatory error message (the name of the
491
+ # executable that caused the error and the exceeded timeout period) if the
492
+ # executable is not completed at end of the timeout period.
493
+ # Corresponds to the JSON property `executionTimeout`
425
494
  # @return [String]
426
- attr_accessor :config_bucket
495
+ attr_accessor :execution_timeout
427
496
 
428
- # Optional The config settings for Google Compute Engine resources in an
429
- # instance group, such as a master or worker group.
430
- # Corresponds to the JSON property `workerConfig`
431
- # @return [Google::Apis::DataprocV1::InstanceGroupConfig]
432
- attr_accessor :worker_config
497
+ # Required Google Cloud Storage URI of executable file.
498
+ # Corresponds to the JSON property `executableFile`
499
+ # @return [String]
500
+ attr_accessor :executable_file
433
501
 
434
502
  def initialize(**args)
435
503
  update!(**args)
@@ -437,35 +505,46 @@ module Google
437
505
 
438
506
  # Update properties of this object
439
507
  def update!(**args)
440
- @gce_cluster_config = args[:gce_cluster_config] if args.key?(:gce_cluster_config)
441
- @software_config = args[:software_config] if args.key?(:software_config)
442
- @master_config = args[:master_config] if args.key?(:master_config)
443
- @secondary_worker_config = args[:secondary_worker_config] if args.key?(:secondary_worker_config)
444
- @initialization_actions = args[:initialization_actions] if args.key?(:initialization_actions)
445
- @config_bucket = args[:config_bucket] if args.key?(:config_bucket)
446
- @worker_config = args[:worker_config] if args.key?(:worker_config)
508
+ @execution_timeout = args[:execution_timeout] if args.key?(:execution_timeout)
509
+ @executable_file = args[:executable_file] if args.key?(:executable_file)
447
510
  end
448
511
  end
449
512
 
450
- # A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/
451
- # 0.9.0/python-programming-guide.html) applications on YARN.
452
- class PySparkJob
513
+ # A request to cancel a job.
514
+ class CancelJobRequest
453
515
  include Google::Apis::Core::Hashable
454
516
 
455
- # Required The HCFS URI of the main Python file to use as the driver. Must be a .
456
- # py file.
457
- # Corresponds to the JSON property `mainPythonFileUri`
517
+ def initialize(**args)
518
+ update!(**args)
519
+ end
520
+
521
+ # Update properties of this object
522
+ def update!(**args)
523
+ end
524
+ end
525
+
526
+ # A Cloud Dataproc job for running Apache Spark SQL (http://spark.apache.org/sql/
527
+ # ) queries.
528
+ class SparkSqlJob
529
+ include Google::Apis::Core::Hashable
530
+
531
+ # The HCFS URI of the script that contains SQL queries.
532
+ # Corresponds to the JSON property `queryFileUri`
458
533
  # @return [String]
459
- attr_accessor :main_python_file_uri
534
+ attr_accessor :query_file_uri
460
535
 
461
- # Optional HCFS URIs of archives to be extracted in the working directory of .
462
- # jar, .tar, .tar.gz, .tgz, and .zip.
463
- # Corresponds to the JSON property `archiveUris`
464
- # @return [Array<String>]
465
- attr_accessor :archive_uris
536
+ # A list of queries to run on a cluster.
537
+ # Corresponds to the JSON property `queryList`
538
+ # @return [Google::Apis::DataprocV1::QueryList]
539
+ attr_accessor :query_list
466
540
 
467
- # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Python driver
468
- # and tasks.
541
+ # Optional Mapping of query variable names to values (equivalent to the Spark
542
+ # SQL command: SET name="value";).
543
+ # Corresponds to the JSON property `scriptVariables`
544
+ # @return [Hash<String,String>]
545
+ attr_accessor :script_variables
546
+
547
+ # Optional HCFS URIs of jar files to be added to the Spark CLASSPATH.
469
548
  # Corresponds to the JSON property `jarFileUris`
470
549
  # @return [Array<String>]
471
550
  attr_accessor :jar_file_uris
@@ -475,126 +554,111 @@ module Google
475
554
  # @return [Google::Apis::DataprocV1::LoggingConfig]
476
555
  attr_accessor :logging_config
477
556
 
478
- # Optional A mapping of property names to values, used to configure PySpark.
479
- # Properties that conflict with values set by the Cloud Dataproc API may be
480
- # overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf
481
- # and classes in user code.
557
+ # Optional A mapping of property names to values, used to configure Spark SQL's
558
+ # SparkConf. Properties that conflict with values set by the Cloud Dataproc API
559
+ # may be overwritten.
482
560
  # Corresponds to the JSON property `properties`
483
561
  # @return [Hash<String,String>]
484
562
  attr_accessor :properties
485
563
 
486
- # Optional The arguments to pass to the driver. Do not include arguments, such
487
- # as --conf, that can be set as job properties, since a collision may occur that
488
- # causes an incorrect job submission.
489
- # Corresponds to the JSON property `args`
490
- # @return [Array<String>]
491
- attr_accessor :args
492
-
493
- # Optional HCFS URIs of files to be copied to the working directory of Python
494
- # drivers and distributed tasks. Useful for naively parallel tasks.
495
- # Corresponds to the JSON property `fileUris`
496
- # @return [Array<String>]
497
- attr_accessor :file_uris
498
-
499
- # Optional HCFS file URIs of Python files to pass to the PySpark framework.
500
- # Supported file types: .py, .egg, and .zip.
501
- # Corresponds to the JSON property `pythonFileUris`
502
- # @return [Array<String>]
503
- attr_accessor :python_file_uris
504
-
505
564
  def initialize(**args)
506
565
  update!(**args)
507
566
  end
508
567
 
509
568
  # Update properties of this object
510
569
  def update!(**args)
511
- @main_python_file_uri = args[:main_python_file_uri] if args.key?(:main_python_file_uri)
512
- @archive_uris = args[:archive_uris] if args.key?(:archive_uris)
570
+ @query_file_uri = args[:query_file_uri] if args.key?(:query_file_uri)
571
+ @query_list = args[:query_list] if args.key?(:query_list)
572
+ @script_variables = args[:script_variables] if args.key?(:script_variables)
513
573
  @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
514
574
  @logging_config = args[:logging_config] if args.key?(:logging_config)
515
575
  @properties = args[:properties] if args.key?(:properties)
516
- @args = args[:args] if args.key?(:args)
517
- @file_uris = args[:file_uris] if args.key?(:file_uris)
518
- @python_file_uris = args[:python_file_uris] if args.key?(:python_file_uris)
519
576
  end
520
577
  end
521
578
 
522
- # Common config settings for resources of Google Compute Engine cluster
523
- # instances, applicable to all instances in the cluster.
524
- class GceClusterConfig
579
+ # Describes the identifying information, config, and status of a cluster of
580
+ # Google Compute Engine instances.
581
+ class Cluster
525
582
  include Google::Apis::Core::Hashable
526
583
 
527
- # The Google Compute Engine tags to add to all instances (see Tagging instances).
528
- # Corresponds to the JSON property `tags`
529
- # @return [Array<String>]
530
- attr_accessor :tags
584
+ # Optional The labels to associate with this cluster. Label keys must contain 1
585
+ # to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/
586
+ # rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63
587
+ # characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt)
588
+ # . No more than 32 labels can be associated with a cluster.
589
+ # Corresponds to the JSON property `labels`
590
+ # @return [Hash<String,String>]
591
+ attr_accessor :labels
531
592
 
532
- # Optional The service account of the instances. Defaults to the default Google
533
- # Compute Engine service account. Custom service accounts need permissions
534
- # equivalent to the folloing IAM roles:
535
- # roles/logging.logWriter
536
- # roles/storage.objectAdmin(see https://cloud.google.com/compute/docs/access/
537
- # service-accounts#custom_service_accounts for more information). Example: [
538
- # account_id]@[project_id].iam.gserviceaccount.com
539
- # Corresponds to the JSON property `serviceAccount`
540
- # @return [String]
541
- attr_accessor :service_account
593
+ # Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature:
594
+ # This report is available for testing purposes only. It may be changed before
595
+ # final release.
596
+ # Corresponds to the JSON property `metrics`
597
+ # @return [Google::Apis::DataprocV1::ClusterMetrics]
598
+ attr_accessor :metrics
542
599
 
543
- # Optional The Google Compute Engine subnetwork to be used for machine
544
- # communications. Cannot be specified with network_uri. Example: https://www.
545
- # googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0.
546
- # Corresponds to the JSON property `subnetworkUri`
600
+ # The status of a cluster and its instances.
601
+ # Corresponds to the JSON property `status`
602
+ # @return [Google::Apis::DataprocV1::ClusterStatus]
603
+ attr_accessor :status
604
+
605
+ # The cluster config.
606
+ # Corresponds to the JSON property `config`
607
+ # @return [Google::Apis::DataprocV1::ClusterConfig]
608
+ attr_accessor :config
609
+
610
+ # Output-only The previous cluster status.
611
+ # Corresponds to the JSON property `statusHistory`
612
+ # @return [Array<Google::Apis::DataprocV1::ClusterStatus>]
613
+ attr_accessor :status_history
614
+
615
+ # Required The cluster name. Cluster names within a project must be unique.
616
+ # Names of deleted clusters can be reused.
617
+ # Corresponds to the JSON property `clusterName`
547
618
  # @return [String]
548
- attr_accessor :subnetwork_uri
619
+ attr_accessor :cluster_name
549
620
 
550
- # Optional The Google Compute Engine network to be used for machine
551
- # communications. Cannot be specified with subnetwork_uri. If neither
552
- # network_uri nor subnetwork_uri is specified, the "default" network of the
553
- # project is used, if it exists. Cannot be a "Custom Subnet Network" (see Using
554
- # Subnetworks for more information). Example: https://www.googleapis.com/compute/
555
- # v1/projects/[project_id]/regions/global/default.
556
- # Corresponds to the JSON property `networkUri`
621
+ # Output-only A cluster UUID (Unique Universal Identifier). Cloud Dataproc
622
+ # generates this value when it creates the cluster.
623
+ # Corresponds to the JSON property `clusterUuid`
557
624
  # @return [String]
558
- attr_accessor :network_uri
625
+ attr_accessor :cluster_uuid
559
626
 
560
- # Required The zone where the Google Compute Engine cluster will be located.
561
- # Example: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[
562
- # zone].
563
- # Corresponds to the JSON property `zoneUri`
627
+ # Required The Google Cloud Platform project ID that the cluster belongs to.
628
+ # Corresponds to the JSON property `projectId`
564
629
  # @return [String]
565
- attr_accessor :zone_uri
630
+ attr_accessor :project_id
566
631
 
567
- # Optional If true, all instances in the cluster will only have internal IP
568
- # addresses. By default, clusters are not restricted to internal IP addresses,
569
- # and will have ephemeral external IP addresses assigned to each instance. This
570
- # internal_ip_only restriction can only be enabled for subnetwork enabled
571
- # networks, and all off-cluster dependencies must be configured to be accessible
572
- # without external IP addresses.
573
- # Corresponds to the JSON property `internalIpOnly`
574
- # @return [Boolean]
575
- attr_accessor :internal_ip_only
576
- alias_method :internal_ip_only?, :internal_ip_only
632
+ def initialize(**args)
633
+ update!(**args)
634
+ end
577
635
 
578
- # The Google Compute Engine metadata entries to add to all instances (see
579
- # Project and instance metadata (https://cloud.google.com/compute/docs/storing-
580
- # retrieving-metadata#project_and_instance_metadata)).
581
- # Corresponds to the JSON property `metadata`
582
- # @return [Hash<String,String>]
583
- attr_accessor :metadata
636
+ # Update properties of this object
637
+ def update!(**args)
638
+ @labels = args[:labels] if args.key?(:labels)
639
+ @metrics = args[:metrics] if args.key?(:metrics)
640
+ @status = args[:status] if args.key?(:status)
641
+ @config = args[:config] if args.key?(:config)
642
+ @status_history = args[:status_history] if args.key?(:status_history)
643
+ @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
644
+ @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
645
+ @project_id = args[:project_id] if args.key?(:project_id)
646
+ end
647
+ end
584
648
 
585
- # Optional The URIs of service account scopes to be included in Google Compute
586
- # Engine instances. The following base set of scopes is always included:
587
- # https://www.googleapis.com/auth/cloud.useraccounts.readonly
588
- # https://www.googleapis.com/auth/devstorage.read_write
589
- # https://www.googleapis.com/auth/logging.writeIf no scopes are specified, the
590
- # following defaults are also provided:
591
- # https://www.googleapis.com/auth/bigquery
592
- # https://www.googleapis.com/auth/bigtable.admin.table
593
- # https://www.googleapis.com/auth/bigtable.data
594
- # https://www.googleapis.com/auth/devstorage.full_control
595
- # Corresponds to the JSON property `serviceAccountScopes`
596
- # @return [Array<String>]
597
- attr_accessor :service_account_scopes
649
+ # The response message for Operations.ListOperations.
650
+ class ListOperationsResponse
651
+ include Google::Apis::Core::Hashable
652
+
653
+ # The standard List next-page token.
654
+ # Corresponds to the JSON property `nextPageToken`
655
+ # @return [String]
656
+ attr_accessor :next_page_token
657
+
658
+ # A list of operations that matches the specified filter in the request.
659
+ # Corresponds to the JSON property `operations`
660
+ # @return [Array<Google::Apis::DataprocV1::Operation>]
661
+ attr_accessor :operations
598
662
 
599
663
  def initialize(**args)
600
664
  update!(**args)
@@ -602,32 +666,79 @@ module Google
602
666
 
603
667
  # Update properties of this object
604
668
  def update!(**args)
605
- @tags = args[:tags] if args.key?(:tags)
606
- @service_account = args[:service_account] if args.key?(:service_account)
607
- @subnetwork_uri = args[:subnetwork_uri] if args.key?(:subnetwork_uri)
608
- @network_uri = args[:network_uri] if args.key?(:network_uri)
609
- @zone_uri = args[:zone_uri] if args.key?(:zone_uri)
610
- @internal_ip_only = args[:internal_ip_only] if args.key?(:internal_ip_only)
611
- @metadata = args[:metadata] if args.key?(:metadata)
612
- @service_account_scopes = args[:service_account_scopes] if args.key?(:service_account_scopes)
669
+ @next_page_token = args[:next_page_token] if args.key?(:next_page_token)
670
+ @operations = args[:operations] if args.key?(:operations)
613
671
  end
614
672
  end
615
673
 
616
- # Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature:
617
- # This report is available for testing purposes only. It may be changed before
618
- # final release.
619
- class ClusterMetrics
674
+ # Metadata describing the operation.
675
+ class OperationMetadata
620
676
  include Google::Apis::Core::Hashable
621
677
 
622
- # The HDFS metrics.
623
- # Corresponds to the JSON property `hdfsMetrics`
624
- # @return [Hash<String,String>]
625
- attr_accessor :hdfs_metrics
678
+ # Output-only Previous operation status.
679
+ # Corresponds to the JSON property `statusHistory`
680
+ # @return [Array<Google::Apis::DataprocV1::OperationStatus>]
681
+ attr_accessor :status_history
626
682
 
627
- # The YARN metrics.
628
- # Corresponds to the JSON property `yarnMetrics`
629
- # @return [Hash<String,String>]
630
- attr_accessor :yarn_metrics
683
+ # Output-only The operation type.
684
+ # Corresponds to the JSON property `operationType`
685
+ # @return [String]
686
+ attr_accessor :operation_type
687
+
688
+ # Output-only Short description of operation.
689
+ # Corresponds to the JSON property `description`
690
+ # @return [String]
691
+ attr_accessor :description
692
+
693
+ # The status of the operation.
694
+ # Corresponds to the JSON property `status`
695
+ # @return [Google::Apis::DataprocV1::OperationStatus]
696
+ attr_accessor :status
697
+
698
+ # A message containing any operation metadata details.
699
+ # Corresponds to the JSON property `details`
700
+ # @return [String]
701
+ attr_accessor :details
702
+
703
+ # A message containing the operation state.
704
+ # Corresponds to the JSON property `state`
705
+ # @return [String]
706
+ attr_accessor :state
707
+
708
+ # Name of the cluster for the operation.
709
+ # Corresponds to the JSON property `clusterName`
710
+ # @return [String]
711
+ attr_accessor :cluster_name
712
+
713
+ # Cluster UUId for the operation.
714
+ # Corresponds to the JSON property `clusterUuid`
715
+ # @return [String]
716
+ attr_accessor :cluster_uuid
717
+
718
+ # A message containing the detailed operation state.
719
+ # Corresponds to the JSON property `innerState`
720
+ # @return [String]
721
+ attr_accessor :inner_state
722
+
723
+ # The time that the operation completed.
724
+ # Corresponds to the JSON property `endTime`
725
+ # @return [String]
726
+ attr_accessor :end_time
727
+
728
+ # The time that the operation was started by the server.
729
+ # Corresponds to the JSON property `startTime`
730
+ # @return [String]
731
+ attr_accessor :start_time
732
+
733
+ # Output-only Errors encountered during operation execution.
734
+ # Corresponds to the JSON property `warnings`
735
+ # @return [Array<String>]
736
+ attr_accessor :warnings
737
+
738
+ # The time that the operation was requested.
739
+ # Corresponds to the JSON property `insertTime`
740
+ # @return [String]
741
+ attr_accessor :insert_time
631
742
 
632
743
  def initialize(**args)
633
744
  update!(**args)
@@ -635,21 +746,36 @@ module Google
635
746
 
636
747
  # Update properties of this object
637
748
  def update!(**args)
638
- @hdfs_metrics = args[:hdfs_metrics] if args.key?(:hdfs_metrics)
639
- @yarn_metrics = args[:yarn_metrics] if args.key?(:yarn_metrics)
749
+ @status_history = args[:status_history] if args.key?(:status_history)
750
+ @operation_type = args[:operation_type] if args.key?(:operation_type)
751
+ @description = args[:description] if args.key?(:description)
752
+ @status = args[:status] if args.key?(:status)
753
+ @details = args[:details] if args.key?(:details)
754
+ @state = args[:state] if args.key?(:state)
755
+ @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
756
+ @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
757
+ @inner_state = args[:inner_state] if args.key?(:inner_state)
758
+ @end_time = args[:end_time] if args.key?(:end_time)
759
+ @start_time = args[:start_time] if args.key?(:start_time)
760
+ @warnings = args[:warnings] if args.key?(:warnings)
761
+ @insert_time = args[:insert_time] if args.key?(:insert_time)
640
762
  end
641
763
  end
642
764
 
643
- # The runtime logging config of the job.
644
- class LoggingConfig
765
+ # Cloud Dataproc job config.
766
+ class JobPlacement
645
767
  include Google::Apis::Core::Hashable
646
768
 
647
- # The per-package log levels for the driver. This may include "root" package
648
- # name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', '
649
- # org.apache = DEBUG'
650
- # Corresponds to the JSON property `driverLogLevels`
651
- # @return [Hash<String,String>]
652
- attr_accessor :driver_log_levels
769
+ # Required The name of the cluster where the job will be submitted.
770
+ # Corresponds to the JSON property `clusterName`
771
+ # @return [String]
772
+ attr_accessor :cluster_name
773
+
774
+ # Output-only A cluster UUID generated by the Cloud Dataproc service when the
775
+ # job is submitted.
776
+ # Corresponds to the JSON property `clusterUuid`
777
+ # @return [String]
778
+ attr_accessor :cluster_uuid
653
779
 
654
780
  def initialize(**args)
655
781
  update!(**args)
@@ -657,19 +783,35 @@ module Google
657
783
 
658
784
  # Update properties of this object
659
785
  def update!(**args)
660
- @driver_log_levels = args[:driver_log_levels] if args.key?(:driver_log_levels)
786
+ @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
787
+ @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
661
788
  end
662
789
  end
663
790
 
664
- # The location where output from diagnostic command can be found.
665
- class DiagnoseClusterOutputLocation
791
+ # Specifies the selection and config of software inside the cluster.
792
+ class SoftwareConfig
666
793
  include Google::Apis::Core::Hashable
667
794
 
668
- # Output-only The Google Cloud Storage URI of the diagnostic output. This will
669
- # be a plain text file with summary of collected diagnostics.
670
- # Corresponds to the JSON property `outputUri`
795
+ # Optional The version of software inside the cluster. It must match the regular
796
+ # expression [0-9]+\.[0-9]+. If unspecified, it defaults to the latest version (
797
+ # see Cloud Dataproc Versioning).
798
+ # Corresponds to the JSON property `imageVersion`
671
799
  # @return [String]
672
- attr_accessor :output_uri
800
+ attr_accessor :image_version
801
+
802
+ # Optional The properties to set on daemon config files.Property keys are
803
+ # specified in prefix:property format, such as core:fs.defaultFS. The following
804
+ # are supported prefixes and their mappings:
805
+ # core: core-site.xml
806
+ # hdfs: hdfs-site.xml
807
+ # mapred: mapred-site.xml
808
+ # yarn: yarn-site.xml
809
+ # hive: hive-site.xml
810
+ # pig: pig.properties
811
+ # spark: spark-defaults.conf
812
+ # Corresponds to the JSON property `properties`
813
+ # @return [Hash<String,String>]
814
+ attr_accessor :properties
673
815
 
674
816
  def initialize(**args)
675
817
  update!(**args)
@@ -677,85 +819,58 @@ module Google
677
819
 
678
820
  # Update properties of this object
679
821
  def update!(**args)
680
- @output_uri = args[:output_uri] if args.key?(:output_uri)
822
+ @image_version = args[:image_version] if args.key?(:image_version)
823
+ @properties = args[:properties] if args.key?(:properties)
681
824
  end
682
825
  end
683
826
 
684
- # This resource represents a long-running operation that is the result of a
685
- # network API call.
686
- class Operation
827
+ # A Cloud Dataproc job for running Apache Pig (https://pig.apache.org/) queries
828
+ # on YARN.
829
+ class PigJob
687
830
  include Google::Apis::Core::Hashable
688
831
 
689
- # If the value is false, it means the operation is still in progress. If true,
690
- # the operation is completed, and either error or response is available.
691
- # Corresponds to the JSON property `done`
832
+ # Optional Whether to continue executing queries if a query fails. The default
833
+ # value is false. Setting to true can be useful when executing independent
834
+ # parallel queries.
835
+ # Corresponds to the JSON property `continueOnFailure`
692
836
  # @return [Boolean]
693
- attr_accessor :done
694
- alias_method :done?, :done
837
+ attr_accessor :continue_on_failure
838
+ alias_method :continue_on_failure?, :continue_on_failure
695
839
 
696
- # The normal response of the operation in case of success. If the original
697
- # method returns no data on success, such as Delete, the response is google.
698
- # protobuf.Empty. If the original method is standard Get/Create/Update, the
699
- # response should be the resource. For other methods, the response should have
700
- # the type XxxResponse, where Xxx is the original method name. For example, if
701
- # the original method name is TakeSnapshot(), the inferred response type is
702
- # TakeSnapshotResponse.
703
- # Corresponds to the JSON property `response`
704
- # @return [Hash<String,Object>]
705
- attr_accessor :response
840
+ # A list of queries to run on a cluster.
841
+ # Corresponds to the JSON property `queryList`
842
+ # @return [Google::Apis::DataprocV1::QueryList]
843
+ attr_accessor :query_list
706
844
 
707
- # The server-assigned name, which is only unique within the same service that
708
- # originally returns it. If you use the default HTTP mapping, the name should
709
- # have the format of operations/some/unique/name.
710
- # Corresponds to the JSON property `name`
845
+ # The HCFS URI of the script that contains the Pig queries.
846
+ # Corresponds to the JSON property `queryFileUri`
711
847
  # @return [String]
712
- attr_accessor :name
848
+ attr_accessor :query_file_uri
713
849
 
714
- # The Status type defines a logical error model that is suitable for different
715
- # programming environments, including REST APIs and RPC APIs. It is used by gRPC
716
- # (https://github.com/grpc). The error model is designed to be:
717
- # Simple to use and understand for most users
718
- # Flexible enough to meet unexpected needsOverviewThe Status message contains
719
- # three pieces of data: error code, error message, and error details. The error
720
- # code should be an enum value of google.rpc.Code, but it may accept additional
721
- # error codes if needed. The error message should be a developer-facing English
722
- # message that helps developers understand and resolve the error. If a localized
723
- # user-facing error message is needed, put the localized message in the error
724
- # details or localize it in the client. The optional error details may contain
725
- # arbitrary information about the error. There is a predefined set of error
726
- # detail types in the package google.rpc which can be used for common error
727
- # conditions.Language mappingThe Status message is the logical representation of
728
- # the error model, but it is not necessarily the actual wire format. When the
729
- # Status message is exposed in different client libraries and different wire
730
- # protocols, it can be mapped differently. For example, it will likely be mapped
731
- # to some exceptions in Java, but more likely mapped to some error codes in C.
732
- # Other usesThe error model and the Status message can be used in a variety of
733
- # environments, either with or without APIs, to provide a consistent developer
734
- # experience across different environments.Example uses of this error model
735
- # include:
736
- # Partial errors. If a service needs to return partial errors to the client, it
737
- # may embed the Status in the normal response to indicate the partial errors.
738
- # Workflow errors. A typical workflow has multiple steps. Each step may have a
739
- # Status message for error reporting purpose.
740
- # Batch operations. If a client uses batch request and batch response, the
741
- # Status message should be used directly inside batch response, one for each
742
- # error sub-response.
743
- # Asynchronous operations. If an API call embeds asynchronous operation results
744
- # in its response, the status of those operations should be represented directly
745
- # using the Status message.
746
- # Logging. If some API errors are stored in logs, the message Status could be
747
- # used directly after any stripping needed for security/privacy reasons.
748
- # Corresponds to the JSON property `error`
749
- # @return [Google::Apis::DataprocV1::Status]
750
- attr_accessor :error
850
+ # Optional Mapping of query variable names to values (equivalent to the Pig
851
+ # command: name=[value]).
852
+ # Corresponds to the JSON property `scriptVariables`
853
+ # @return [Hash<String,String>]
854
+ attr_accessor :script_variables
751
855
 
752
- # Service-specific metadata associated with the operation. It typically contains
753
- # progress information and common metadata such as create time. Some services
754
- # might not provide such metadata. Any method that returns a long-running
755
- # operation should document the metadata type, if any.
756
- # Corresponds to the JSON property `metadata`
757
- # @return [Hash<String,Object>]
758
- attr_accessor :metadata
856
+ # Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and
857
+ # Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
858
+ # Corresponds to the JSON property `jarFileUris`
859
+ # @return [Array<String>]
860
+ attr_accessor :jar_file_uris
861
+
862
+ # The runtime logging config of the job.
863
+ # Corresponds to the JSON property `loggingConfig`
864
+ # @return [Google::Apis::DataprocV1::LoggingConfig]
865
+ attr_accessor :logging_config
866
+
867
+ # Optional A mapping of property names to values, used to configure Pig.
868
+ # Properties that conflict with values set by the Cloud Dataproc API may be
869
+ # overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/
870
+ # pig/conf/pig.properties, and classes in user code.
871
+ # Corresponds to the JSON property `properties`
872
+ # @return [Hash<String,String>]
873
+ attr_accessor :properties
759
874
 
760
875
  def initialize(**args)
761
876
  update!(**args)
@@ -763,37 +878,34 @@ module Google
763
878
 
764
879
  # Update properties of this object
765
880
  def update!(**args)
766
- @done = args[:done] if args.key?(:done)
767
- @response = args[:response] if args.key?(:response)
768
- @name = args[:name] if args.key?(:name)
769
- @error = args[:error] if args.key?(:error)
770
- @metadata = args[:metadata] if args.key?(:metadata)
881
+ @continue_on_failure = args[:continue_on_failure] if args.key?(:continue_on_failure)
882
+ @query_list = args[:query_list] if args.key?(:query_list)
883
+ @query_file_uri = args[:query_file_uri] if args.key?(:query_file_uri)
884
+ @script_variables = args[:script_variables] if args.key?(:script_variables)
885
+ @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
886
+ @logging_config = args[:logging_config] if args.key?(:logging_config)
887
+ @properties = args[:properties] if args.key?(:properties)
771
888
  end
772
889
  end
773
890
 
774
- # The status of the operation.
775
- class OperationStatus
891
+ # The status of a cluster and its instances.
892
+ class ClusterStatus
776
893
  include Google::Apis::Core::Hashable
777
894
 
778
- # A message containing the operation state.
779
- # Corresponds to the JSON property `state`
780
- # @return [String]
781
- attr_accessor :state
782
-
783
- # A message containing any operation metadata details.
784
- # Corresponds to the JSON property `details`
895
+ # Output-only Time when this state was entered.
896
+ # Corresponds to the JSON property `stateStartTime`
785
897
  # @return [String]
786
- attr_accessor :details
898
+ attr_accessor :state_start_time
787
899
 
788
- # A message containing the detailed operation state.
789
- # Corresponds to the JSON property `innerState`
900
+ # Output-only Optional details of cluster's state.
901
+ # Corresponds to the JSON property `detail`
790
902
  # @return [String]
791
- attr_accessor :inner_state
903
+ attr_accessor :detail
792
904
 
793
- # The time this state was entered.
794
- # Corresponds to the JSON property `stateStartTime`
905
+ # Output-only The cluster's state.
906
+ # Corresponds to the JSON property `state`
795
907
  # @return [String]
796
- attr_accessor :state_start_time
908
+ attr_accessor :state
797
909
 
798
910
  def initialize(**args)
799
911
  update!(**args)
@@ -801,30 +913,27 @@ module Google
801
913
 
802
914
  # Update properties of this object
803
915
  def update!(**args)
804
- @state = args[:state] if args.key?(:state)
805
- @details = args[:details] if args.key?(:details)
806
- @inner_state = args[:inner_state] if args.key?(:inner_state)
807
916
  @state_start_time = args[:state_start_time] if args.key?(:state_start_time)
917
+ @detail = args[:detail] if args.key?(:detail)
918
+ @state = args[:state] if args.key?(:state)
808
919
  end
809
920
  end
810
921
 
811
- # Encapsulates the full scoping used to reference a job.
812
- class JobReference
922
+ # The list of all clusters in a project.
923
+ class ListClustersResponse
813
924
  include Google::Apis::Core::Hashable
814
925
 
815
- # Required The ID of the Google Cloud Platform project that the job belongs to.
816
- # Corresponds to the JSON property `projectId`
817
- # @return [String]
818
- attr_accessor :project_id
926
+ # Output-only The clusters in the project.
927
+ # Corresponds to the JSON property `clusters`
928
+ # @return [Array<Google::Apis::DataprocV1::Cluster>]
929
+ attr_accessor :clusters
819
930
 
820
- # Optional The job ID, which must be unique within the project. The job ID is
821
- # generated by the server upon job submission or provided by the user as a means
822
- # to perform retries without creating duplicate jobs. The ID must contain only
823
- # letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The
824
- # maximum length is 512 characters.
825
- # Corresponds to the JSON property `jobId`
931
+ # Output-only This token is included in the response if there are more results
932
+ # to fetch. To fetch additional results, provide this value as the page_token in
933
+ # a subsequent <code>ListClustersRequest</code>.
934
+ # Corresponds to the JSON property `nextPageToken`
826
935
  # @return [String]
827
- attr_accessor :job_id
936
+ attr_accessor :next_page_token
828
937
 
829
938
  def initialize(**args)
830
939
  update!(**args)
@@ -832,109 +941,107 @@ module Google
832
941
 
833
942
  # Update properties of this object
834
943
  def update!(**args)
835
- @project_id = args[:project_id] if args.key?(:project_id)
836
- @job_id = args[:job_id] if args.key?(:job_id)
944
+ @clusters = args[:clusters] if args.key?(:clusters)
945
+ @next_page_token = args[:next_page_token] if args.key?(:next_page_token)
837
946
  end
838
947
  end
839
948
 
840
- # A request to submit a job.
841
- class SubmitJobRequest
949
+ # A Cloud Dataproc job resource.
950
+ class Job
842
951
  include Google::Apis::Core::Hashable
843
952
 
844
- # A Cloud Dataproc job resource.
845
- # Corresponds to the JSON property `job`
846
- # @return [Google::Apis::DataprocV1::Job]
847
- attr_accessor :job
848
-
849
- def initialize(**args)
850
- update!(**args)
851
- end
953
+ # Encapsulates the full scoping used to reference a job.
954
+ # Corresponds to the JSON property `reference`
955
+ # @return [Google::Apis::DataprocV1::JobReference]
956
+ attr_accessor :reference
852
957
 
853
- # Update properties of this object
854
- def update!(**args)
855
- @job = args[:job] if args.key?(:job)
856
- end
857
- end
958
+ # A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.
959
+ # apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/
960
+ # MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/
961
+ # docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
962
+ # Corresponds to the JSON property `hadoopJob`
963
+ # @return [Google::Apis::DataprocV1::HadoopJob]
964
+ attr_accessor :hadoop_job
858
965
 
859
- # The Status type defines a logical error model that is suitable for different
860
- # programming environments, including REST APIs and RPC APIs. It is used by gRPC
861
- # (https://github.com/grpc). The error model is designed to be:
862
- # Simple to use and understand for most users
863
- # Flexible enough to meet unexpected needsOverviewThe Status message contains
864
- # three pieces of data: error code, error message, and error details. The error
865
- # code should be an enum value of google.rpc.Code, but it may accept additional
866
- # error codes if needed. The error message should be a developer-facing English
867
- # message that helps developers understand and resolve the error. If a localized
868
- # user-facing error message is needed, put the localized message in the error
869
- # details or localize it in the client. The optional error details may contain
870
- # arbitrary information about the error. There is a predefined set of error
871
- # detail types in the package google.rpc which can be used for common error
872
- # conditions.Language mappingThe Status message is the logical representation of
873
- # the error model, but it is not necessarily the actual wire format. When the
874
- # Status message is exposed in different client libraries and different wire
875
- # protocols, it can be mapped differently. For example, it will likely be mapped
876
- # to some exceptions in Java, but more likely mapped to some error codes in C.
877
- # Other usesThe error model and the Status message can be used in a variety of
878
- # environments, either with or without APIs, to provide a consistent developer
879
- # experience across different environments.Example uses of this error model
880
- # include:
881
- # Partial errors. If a service needs to return partial errors to the client, it
882
- # may embed the Status in the normal response to indicate the partial errors.
883
- # Workflow errors. A typical workflow has multiple steps. Each step may have a
884
- # Status message for error reporting purpose.
885
- # Batch operations. If a client uses batch request and batch response, the
886
- # Status message should be used directly inside batch response, one for each
887
- # error sub-response.
888
- # Asynchronous operations. If an API call embeds asynchronous operation results
889
- # in its response, the status of those operations should be represented directly
890
- # using the Status message.
891
- # Logging. If some API errors are stored in logs, the message Status could be
892
- # used directly after any stripping needed for security/privacy reasons.
893
- class Status
894
- include Google::Apis::Core::Hashable
966
+ # Cloud Dataproc job config.
967
+ # Corresponds to the JSON property `placement`
968
+ # @return [Google::Apis::DataprocV1::JobPlacement]
969
+ attr_accessor :placement
895
970
 
896
- # The status code, which should be an enum value of google.rpc.Code.
897
- # Corresponds to the JSON property `code`
898
- # @return [Fixnum]
899
- attr_accessor :code
971
+ # Cloud Dataproc job status.
972
+ # Corresponds to the JSON property `status`
973
+ # @return [Google::Apis::DataprocV1::JobStatus]
974
+ attr_accessor :status
900
975
 
901
- # A developer-facing error message, which should be in English. Any user-facing
902
- # error message should be localized and sent in the google.rpc.Status.details
903
- # field, or localized by the client.
904
- # Corresponds to the JSON property `message`
976
+ # Output-only If present, the location of miscellaneous control files which may
977
+ # be used as part of job setup and handling. If not present, control files may
978
+ # be placed in the same location as driver_output_uri.
979
+ # Corresponds to the JSON property `driverControlFilesUri`
905
980
  # @return [String]
906
- attr_accessor :message
981
+ attr_accessor :driver_control_files_uri
907
982
 
908
- # A list of messages that carry the error details. There will be a common set of
909
- # message types for APIs to use.
910
- # Corresponds to the JSON property `details`
911
- # @return [Array<Hash<String,Object>>]
912
- attr_accessor :details
983
+ # Job scheduling options.Beta Feature: These options are available for testing
984
+ # purposes only. They may be changed before final release.
985
+ # Corresponds to the JSON property `scheduling`
986
+ # @return [Google::Apis::DataprocV1::JobScheduling]
987
+ attr_accessor :scheduling
913
988
 
914
- def initialize(**args)
915
- update!(**args)
916
- end
989
+ # A Cloud Dataproc job for running Apache Pig (https://pig.apache.org/) queries
990
+ # on YARN.
991
+ # Corresponds to the JSON property `pigJob`
992
+ # @return [Google::Apis::DataprocV1::PigJob]
993
+ attr_accessor :pig_job
917
994
 
918
- # Update properties of this object
919
- def update!(**args)
920
- @code = args[:code] if args.key?(:code)
921
- @message = args[:message] if args.key?(:message)
922
- @details = args[:details] if args.key?(:details)
923
- end
924
- end
995
+ # A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/)
996
+ # queries on YARN.
997
+ # Corresponds to the JSON property `hiveJob`
998
+ # @return [Google::Apis::DataprocV1::HiveJob]
999
+ attr_accessor :hive_job
925
1000
 
926
- # Job scheduling options.Beta Feature: These options are available for testing
927
- # purposes only. They may be changed before final release.
928
- class JobScheduling
929
- include Google::Apis::Core::Hashable
1001
+ # Optional The labels to associate with this job. Label keys must contain 1 to
1002
+ # 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.
1003
+ # txt). Label values may be empty, but, if present, must contain 1 to 63
1004
+ # characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt)
1005
+ # . No more than 32 labels can be associated with a job.
1006
+ # Corresponds to the JSON property `labels`
1007
+ # @return [Hash<String,String>]
1008
+ attr_accessor :labels
930
1009
 
931
- # Optional Maximum number of times per hour a driver may be restarted as a
932
- # result of driver terminating with non-zero code before job is reported failed.
933
- # A job may be reported as thrashing if driver exits with non-zero code 4 times
934
- # within 10 minute window.Maximum value is 10.
935
- # Corresponds to the JSON property `maxFailuresPerHour`
936
- # @return [Fixnum]
937
- attr_accessor :max_failures_per_hour
1010
+ # Output-only A URI pointing to the location of the stdout of the job's driver
1011
+ # program.
1012
+ # Corresponds to the JSON property `driverOutputResourceUri`
1013
+ # @return [String]
1014
+ attr_accessor :driver_output_resource_uri
1015
+
1016
+ # Output-only The previous job status.
1017
+ # Corresponds to the JSON property `statusHistory`
1018
+ # @return [Array<Google::Apis::DataprocV1::JobStatus>]
1019
+ attr_accessor :status_history
1020
+
1021
+ # A Cloud Dataproc job for running Apache Spark SQL (http://spark.apache.org/sql/
1022
+ # ) queries.
1023
+ # Corresponds to the JSON property `sparkSqlJob`
1024
+ # @return [Google::Apis::DataprocV1::SparkSqlJob]
1025
+ attr_accessor :spark_sql_job
1026
+
1027
+ # A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/)
1028
+ # applications on YARN.
1029
+ # Corresponds to the JSON property `sparkJob`
1030
+ # @return [Google::Apis::DataprocV1::SparkJob]
1031
+ attr_accessor :spark_job
1032
+
1033
+ # Output-only The collection of YARN applications spun up by this job.Beta
1034
+ # Feature: This report is available for testing purposes only. It may be changed
1035
+ # before final release.
1036
+ # Corresponds to the JSON property `yarnApplications`
1037
+ # @return [Array<Google::Apis::DataprocV1::YarnApplication>]
1038
+ attr_accessor :yarn_applications
1039
+
1040
+ # A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/
1041
+ # 0.9.0/python-programming-guide.html) applications on YARN.
1042
+ # Corresponds to the JSON property `pysparkJob`
1043
+ # @return [Google::Apis::DataprocV1::PySparkJob]
1044
+ attr_accessor :pyspark_job
938
1045
 
939
1046
  def initialize(**args)
940
1047
  update!(**args)
@@ -942,56 +1049,78 @@ module Google
942
1049
 
943
1050
  # Update properties of this object
944
1051
  def update!(**args)
945
- @max_failures_per_hour = args[:max_failures_per_hour] if args.key?(:max_failures_per_hour)
1052
+ @reference = args[:reference] if args.key?(:reference)
1053
+ @hadoop_job = args[:hadoop_job] if args.key?(:hadoop_job)
1054
+ @placement = args[:placement] if args.key?(:placement)
1055
+ @status = args[:status] if args.key?(:status)
1056
+ @driver_control_files_uri = args[:driver_control_files_uri] if args.key?(:driver_control_files_uri)
1057
+ @scheduling = args[:scheduling] if args.key?(:scheduling)
1058
+ @pig_job = args[:pig_job] if args.key?(:pig_job)
1059
+ @hive_job = args[:hive_job] if args.key?(:hive_job)
1060
+ @labels = args[:labels] if args.key?(:labels)
1061
+ @driver_output_resource_uri = args[:driver_output_resource_uri] if args.key?(:driver_output_resource_uri)
1062
+ @status_history = args[:status_history] if args.key?(:status_history)
1063
+ @spark_sql_job = args[:spark_sql_job] if args.key?(:spark_sql_job)
1064
+ @spark_job = args[:spark_job] if args.key?(:spark_job)
1065
+ @yarn_applications = args[:yarn_applications] if args.key?(:yarn_applications)
1066
+ @pyspark_job = args[:pyspark_job] if args.key?(:pyspark_job)
946
1067
  end
947
1068
  end
948
1069
 
949
- # Optional The config settings for Google Compute Engine resources in an
950
- # instance group, such as a master or worker group.
951
- class InstanceGroupConfig
1070
+ # A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/)
1071
+ # applications on YARN.
1072
+ class SparkJob
952
1073
  include Google::Apis::Core::Hashable
953
1074
 
954
- # Specifies the config of disk options for a group of VM instances.
955
- # Corresponds to the JSON property `diskConfig`
956
- # @return [Google::Apis::DataprocV1::DiskConfig]
957
- attr_accessor :disk_config
1075
+ # Optional HCFS URIs of archives to be extracted in the working directory of
1076
+ # Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .
1077
+ # zip.
1078
+ # Corresponds to the JSON property `archiveUris`
1079
+ # @return [Array<String>]
1080
+ attr_accessor :archive_uris
958
1081
 
959
- # Required The Google Compute Engine machine type used for cluster instances.
960
- # Example: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-
961
- # east1-a/machineTypes/n1-standard-2.
962
- # Corresponds to the JSON property `machineTypeUri`
1082
+ # The HCFS URI of the jar file that contains the main class.
1083
+ # Corresponds to the JSON property `mainJarFileUri`
963
1084
  # @return [String]
964
- attr_accessor :machine_type_uri
1085
+ attr_accessor :main_jar_file_uri
965
1086
 
966
- # Specifies the resources used to actively manage an instance group.
967
- # Corresponds to the JSON property `managedGroupConfig`
968
- # @return [Google::Apis::DataprocV1::ManagedGroupConfig]
969
- attr_accessor :managed_group_config
1087
+ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver
1088
+ # and tasks.
1089
+ # Corresponds to the JSON property `jarFileUris`
1090
+ # @return [Array<String>]
1091
+ attr_accessor :jar_file_uris
970
1092
 
971
- # Optional Specifies that this instance group contains preemptible instances.
972
- # Corresponds to the JSON property `isPreemptible`
973
- # @return [Boolean]
974
- attr_accessor :is_preemptible
975
- alias_method :is_preemptible?, :is_preemptible
1093
+ # The runtime logging config of the job.
1094
+ # Corresponds to the JSON property `loggingConfig`
1095
+ # @return [Google::Apis::DataprocV1::LoggingConfig]
1096
+ attr_accessor :logging_config
976
1097
 
977
- # Output-only The Google Compute Engine image resource used for cluster
978
- # instances. Inferred from SoftwareConfig.image_version.
979
- # Corresponds to the JSON property `imageUri`
980
- # @return [String]
981
- attr_accessor :image_uri
1098
+ # Optional A mapping of property names to values, used to configure Spark.
1099
+ # Properties that conflict with values set by the Cloud Dataproc API may be
1100
+ # overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf
1101
+ # and classes in user code.
1102
+ # Corresponds to the JSON property `properties`
1103
+ # @return [Hash<String,String>]
1104
+ attr_accessor :properties
982
1105
 
983
- # Optional The list of instance names. Cloud Dataproc derives the names from
984
- # cluster_name, num_instances, and the instance group if not set by user (
985
- # recommended practice is to let Cloud Dataproc derive the name).
986
- # Corresponds to the JSON property `instanceNames`
1106
+ # Optional The arguments to pass to the driver. Do not include arguments, such
1107
+ # as --conf, that can be set as job properties, since a collision may occur that
1108
+ # causes an incorrect job submission.
1109
+ # Corresponds to the JSON property `args`
987
1110
  # @return [Array<String>]
988
- attr_accessor :instance_names
1111
+ attr_accessor :args
989
1112
 
990
- # Required The number of VM instances in the instance group. For master instance
991
- # groups, must be set to 1.
992
- # Corresponds to the JSON property `numInstances`
993
- # @return [Fixnum]
994
- attr_accessor :num_instances
1113
+ # Optional HCFS URIs of files to be copied to the working directory of Spark
1114
+ # drivers and distributed tasks. Useful for naively parallel tasks.
1115
+ # Corresponds to the JSON property `fileUris`
1116
+ # @return [Array<String>]
1117
+ attr_accessor :file_uris
1118
+
1119
+ # The name of the driver's main class. The jar file that contains the class must
1120
+ # be in the default CLASSPATH or specified in jar_file_uris.
1121
+ # Corresponds to the JSON property `mainClass`
1122
+ # @return [String]
1123
+ attr_accessor :main_class
995
1124
 
996
1125
  def initialize(**args)
997
1126
  update!(**args)
@@ -999,31 +1128,36 @@ module Google
999
1128
 
1000
1129
  # Update properties of this object
1001
1130
  def update!(**args)
1002
- @disk_config = args[:disk_config] if args.key?(:disk_config)
1003
- @machine_type_uri = args[:machine_type_uri] if args.key?(:machine_type_uri)
1004
- @managed_group_config = args[:managed_group_config] if args.key?(:managed_group_config)
1005
- @is_preemptible = args[:is_preemptible] if args.key?(:is_preemptible)
1006
- @image_uri = args[:image_uri] if args.key?(:image_uri)
1007
- @instance_names = args[:instance_names] if args.key?(:instance_names)
1008
- @num_instances = args[:num_instances] if args.key?(:num_instances)
1131
+ @archive_uris = args[:archive_uris] if args.key?(:archive_uris)
1132
+ @main_jar_file_uri = args[:main_jar_file_uri] if args.key?(:main_jar_file_uri)
1133
+ @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
1134
+ @logging_config = args[:logging_config] if args.key?(:logging_config)
1135
+ @properties = args[:properties] if args.key?(:properties)
1136
+ @args = args[:args] if args.key?(:args)
1137
+ @file_uris = args[:file_uris] if args.key?(:file_uris)
1138
+ @main_class = args[:main_class] if args.key?(:main_class)
1009
1139
  end
1010
1140
  end
1011
1141
 
1012
- # A list of jobs in a project.
1013
- class ListJobsResponse
1142
+ # Cloud Dataproc job status.
1143
+ class JobStatus
1014
1144
  include Google::Apis::Core::Hashable
1015
1145
 
1016
- # Output-only Jobs list.
1017
- # Corresponds to the JSON property `jobs`
1018
- # @return [Array<Google::Apis::DataprocV1::Job>]
1019
- attr_accessor :jobs
1146
+ # Output-only The time when this state was entered.
1147
+ # Corresponds to the JSON property `stateStartTime`
1148
+ # @return [String]
1149
+ attr_accessor :state_start_time
1020
1150
 
1021
- # Optional This token is included in the response if there are more results to
1022
- # fetch. To fetch additional results, provide this value as the page_token in a
1023
- # subsequent <code>ListJobsRequest</code>.
1024
- # Corresponds to the JSON property `nextPageToken`
1151
+ # Output-only A state message specifying the overall job state.
1152
+ # Corresponds to the JSON property `state`
1025
1153
  # @return [String]
1026
- attr_accessor :next_page_token
1154
+ attr_accessor :state
1155
+
1156
+ # Output-only Optional job state details, such as an error description if the
1157
+ # state is <code>ERROR</code>.
1158
+ # Corresponds to the JSON property `details`
1159
+ # @return [String]
1160
+ attr_accessor :details
1027
1161
 
1028
1162
  def initialize(**args)
1029
1163
  update!(**args)
@@ -1031,28 +1165,26 @@ module Google
1031
1165
 
1032
1166
  # Update properties of this object
1033
1167
  def update!(**args)
1034
- @jobs = args[:jobs] if args.key?(:jobs)
1035
- @next_page_token = args[:next_page_token] if args.key?(:next_page_token)
1168
+ @state_start_time = args[:state_start_time] if args.key?(:state_start_time)
1169
+ @state = args[:state] if args.key?(:state)
1170
+ @details = args[:details] if args.key?(:details)
1036
1171
  end
1037
1172
  end
1038
1173
 
1039
- # Specifies an executable to run on a fully configured node and a timeout period
1040
- # for executable completion.
1041
- class NodeInitializationAction
1174
+ # Specifies the resources used to actively manage an instance group.
1175
+ class ManagedGroupConfig
1042
1176
  include Google::Apis::Core::Hashable
1043
1177
 
1044
- # Required Google Cloud Storage URI of executable file.
1045
- # Corresponds to the JSON property `executableFile`
1178
+ # Output-only The name of the Instance Group Manager for this group.
1179
+ # Corresponds to the JSON property `instanceGroupManagerName`
1046
1180
  # @return [String]
1047
- attr_accessor :executable_file
1181
+ attr_accessor :instance_group_manager_name
1048
1182
 
1049
- # Optional Amount of time executable has to complete. Default is 10 minutes.
1050
- # Cluster creation fails with an explanatory error message (the name of the
1051
- # executable that caused the error and the exceeded timeout period) if the
1052
- # executable is not completed at end of the timeout period.
1053
- # Corresponds to the JSON property `executionTimeout`
1183
+ # Output-only The name of the Instance Template used for the Managed Instance
1184
+ # Group.
1185
+ # Corresponds to the JSON property `instanceTemplateName`
1054
1186
  # @return [String]
1055
- attr_accessor :execution_timeout
1187
+ attr_accessor :instance_template_name
1056
1188
 
1057
1189
  def initialize(**args)
1058
1190
  update!(**args)
@@ -1060,46 +1192,99 @@ module Google
1060
1192
 
1061
1193
  # Update properties of this object
1062
1194
  def update!(**args)
1063
- @executable_file = args[:executable_file] if args.key?(:executable_file)
1064
- @execution_timeout = args[:execution_timeout] if args.key?(:execution_timeout)
1195
+ @instance_group_manager_name = args[:instance_group_manager_name] if args.key?(:instance_group_manager_name)
1196
+ @instance_template_name = args[:instance_template_name] if args.key?(:instance_template_name)
1065
1197
  end
1066
1198
  end
1067
1199
 
1068
- # A request to cancel a job.
1069
- class CancelJobRequest
1200
+ # The status of the operation.
1201
+ class ClusterOperationStatus
1070
1202
  include Google::Apis::Core::Hashable
1071
1203
 
1204
+ # Output-only A message containing the operation state.
1205
+ # Corresponds to the JSON property `state`
1206
+ # @return [String]
1207
+ attr_accessor :state
1208
+
1209
+ # Output-onlyA message containing any operation metadata details.
1210
+ # Corresponds to the JSON property `details`
1211
+ # @return [String]
1212
+ attr_accessor :details
1213
+
1214
+ # Output-only A message containing the detailed operation state.
1215
+ # Corresponds to the JSON property `innerState`
1216
+ # @return [String]
1217
+ attr_accessor :inner_state
1218
+
1219
+ # Output-only The time this state was entered.
1220
+ # Corresponds to the JSON property `stateStartTime`
1221
+ # @return [String]
1222
+ attr_accessor :state_start_time
1223
+
1072
1224
  def initialize(**args)
1073
1225
  update!(**args)
1074
1226
  end
1075
1227
 
1076
1228
  # Update properties of this object
1077
1229
  def update!(**args)
1230
+ @state = args[:state] if args.key?(:state)
1231
+ @details = args[:details] if args.key?(:details)
1232
+ @inner_state = args[:inner_state] if args.key?(:inner_state)
1233
+ @state_start_time = args[:state_start_time] if args.key?(:state_start_time)
1078
1234
  end
1079
1235
  end
1080
1236
 
1081
- # A Cloud Dataproc job for running Apache Spark SQL (http://spark.apache.org/sql/
1082
- # ) queries.
1083
- class SparkSqlJob
1237
+ # A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.
1238
+ # apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/
1239
+ # MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/
1240
+ # docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
1241
+ class HadoopJob
1084
1242
  include Google::Apis::Core::Hashable
1085
1243
 
1086
- # The HCFS URI of the script that contains SQL queries.
1087
- # Corresponds to the JSON property `queryFileUri`
1244
+ # Optional A mapping of property names to values, used to configure Hadoop.
1245
+ # Properties that conflict with values set by the Cloud Dataproc API may be
1246
+ # overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes
1247
+ # in user code.
1248
+ # Corresponds to the JSON property `properties`
1249
+ # @return [Hash<String,String>]
1250
+ attr_accessor :properties
1251
+
1252
+ # Optional The arguments to pass to the driver. Do not include arguments, such
1253
+ # as -libjars or -Dfoo=bar, that can be set as job properties, since a collision
1254
+ # may occur that causes an incorrect job submission.
1255
+ # Corresponds to the JSON property `args`
1256
+ # @return [Array<String>]
1257
+ attr_accessor :args
1258
+
1259
+ # Optional HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the
1260
+ # working directory of Hadoop drivers and distributed tasks. Useful for naively
1261
+ # parallel tasks.
1262
+ # Corresponds to the JSON property `fileUris`
1263
+ # @return [Array<String>]
1264
+ attr_accessor :file_uris
1265
+
1266
+ # The name of the driver's main class. The jar file containing the class must be
1267
+ # in the default CLASSPATH or specified in jar_file_uris.
1268
+ # Corresponds to the JSON property `mainClass`
1088
1269
  # @return [String]
1089
- attr_accessor :query_file_uri
1270
+ attr_accessor :main_class
1090
1271
 
1091
- # A list of queries to run on a cluster.
1092
- # Corresponds to the JSON property `queryList`
1093
- # @return [Google::Apis::DataprocV1::QueryList]
1094
- attr_accessor :query_list
1272
+ # Optional HCFS URIs of archives to be extracted in the working directory of
1273
+ # Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .
1274
+ # zip.
1275
+ # Corresponds to the JSON property `archiveUris`
1276
+ # @return [Array<String>]
1277
+ attr_accessor :archive_uris
1095
1278
 
1096
- # Optional Mapping of query variable names to values (equivalent to the Spark
1097
- # SQL command: SET name="value";).
1098
- # Corresponds to the JSON property `scriptVariables`
1099
- # @return [Hash<String,String>]
1100
- attr_accessor :script_variables
1279
+ # The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-
1280
+ # bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-
1281
+ # samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-
1282
+ # mapreduce-examples.jar'
1283
+ # Corresponds to the JSON property `mainJarFileUri`
1284
+ # @return [String]
1285
+ attr_accessor :main_jar_file_uri
1101
1286
 
1102
- # Optional HCFS URIs of jar files to be added to the Spark CLASSPATH.
1287
+ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
1103
1288
  # Corresponds to the JSON property `jarFileUris`
1104
1289
  # @return [Array<String>]
1105
1290
  attr_accessor :jar_file_uris
@@ -1109,80 +1294,83 @@ module Google
1109
1294
  # @return [Google::Apis::DataprocV1::LoggingConfig]
1110
1295
  attr_accessor :logging_config
1111
1296
 
1112
- # Optional A mapping of property names to values, used to configure Spark SQL's
1113
- # SparkConf. Properties that conflict with values set by the Cloud Dataproc API
1114
- # may be overwritten.
1115
- # Corresponds to the JSON property `properties`
1116
- # @return [Hash<String,String>]
1117
- attr_accessor :properties
1118
-
1119
1297
  def initialize(**args)
1120
1298
  update!(**args)
1121
1299
  end
1122
1300
 
1123
1301
  # Update properties of this object
1124
1302
  def update!(**args)
1125
- @query_file_uri = args[:query_file_uri] if args.key?(:query_file_uri)
1126
- @query_list = args[:query_list] if args.key?(:query_list)
1127
- @script_variables = args[:script_variables] if args.key?(:script_variables)
1303
+ @properties = args[:properties] if args.key?(:properties)
1304
+ @args = args[:args] if args.key?(:args)
1305
+ @file_uris = args[:file_uris] if args.key?(:file_uris)
1306
+ @main_class = args[:main_class] if args.key?(:main_class)
1307
+ @archive_uris = args[:archive_uris] if args.key?(:archive_uris)
1308
+ @main_jar_file_uri = args[:main_jar_file_uri] if args.key?(:main_jar_file_uri)
1128
1309
  @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
1129
1310
  @logging_config = args[:logging_config] if args.key?(:logging_config)
1130
- @properties = args[:properties] if args.key?(:properties)
1131
1311
  end
1132
1312
  end
1133
1313
 
1134
- # Describes the identifying information, config, and status of a cluster of
1135
- # Google Compute Engine instances.
1136
- class Cluster
1314
+ # A list of queries to run on a cluster.
1315
+ class QueryList
1137
1316
  include Google::Apis::Core::Hashable
1138
1317
 
1139
- # Optional The labels to associate with this cluster. Label keys must contain 1
1140
- # to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/
1141
- # rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63
1142
- # characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt)
1143
- # . No more than 32 labels can be associated with a cluster.
1144
- # Corresponds to the JSON property `labels`
1145
- # @return [Hash<String,String>]
1146
- attr_accessor :labels
1147
-
1148
- # The status of a cluster and its instances.
1149
- # Corresponds to the JSON property `status`
1150
- # @return [Google::Apis::DataprocV1::ClusterStatus]
1151
- attr_accessor :status
1318
+ # Required The queries to execute. You do not need to terminate a query with a
1319
+ # semicolon. Multiple queries can be specified in one string by separating each
1320
+ # with a semicolon. Here is an example of an Cloud Dataproc API snippet that
1321
+ # uses a QueryList to specify a HiveJob:
1322
+ # "hiveJob": `
1323
+ # "queryList": `
1324
+ # "queries": [
1325
+ # "query1",
1326
+ # "query2",
1327
+ # "query3;query4",
1328
+ # ]
1329
+ # `
1330
+ # `
1331
+ # Corresponds to the JSON property `queries`
1332
+ # @return [Array<String>]
1333
+ attr_accessor :queries
1152
1334
 
1153
- # Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature:
1154
- # This report is available for testing purposes only. It may be changed before
1155
- # final release.
1156
- # Corresponds to the JSON property `metrics`
1157
- # @return [Google::Apis::DataprocV1::ClusterMetrics]
1158
- attr_accessor :metrics
1335
+ def initialize(**args)
1336
+ update!(**args)
1337
+ end
1159
1338
 
1160
- # Output-only The previous cluster status.
1161
- # Corresponds to the JSON property `statusHistory`
1162
- # @return [Array<Google::Apis::DataprocV1::ClusterStatus>]
1163
- attr_accessor :status_history
1339
+ # Update properties of this object
1340
+ def update!(**args)
1341
+ @queries = args[:queries] if args.key?(:queries)
1342
+ end
1343
+ end
1164
1344
 
1165
- # The cluster config.
1166
- # Corresponds to the JSON property `config`
1167
- # @return [Google::Apis::DataprocV1::ClusterConfig]
1168
- attr_accessor :config
1345
+ # A YARN application created by a job. Application information is a subset of <
1346
+ # code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.
1347
+ # Beta Feature: This report is available for testing purposes only. It may be
1348
+ # changed before final release.
1349
+ class YarnApplication
1350
+ include Google::Apis::Core::Hashable
1169
1351
 
1170
- # Output-only A cluster UUID (Unique Universal Identifier). Cloud Dataproc
1171
- # generates this value when it creates the cluster.
1172
- # Corresponds to the JSON property `clusterUuid`
1352
+ # Required The application state.
1353
+ # Corresponds to the JSON property `state`
1173
1354
  # @return [String]
1174
- attr_accessor :cluster_uuid
1355
+ attr_accessor :state
1175
1356
 
1176
- # Required The cluster name. Cluster names within a project must be unique.
1177
- # Names of deleted clusters can be reused.
1178
- # Corresponds to the JSON property `clusterName`
1357
+ # Required The application name.
1358
+ # Corresponds to the JSON property `name`
1179
1359
  # @return [String]
1180
- attr_accessor :cluster_name
1360
+ attr_accessor :name
1181
1361
 
1182
- # Required The Google Cloud Platform project ID that the cluster belongs to.
1183
- # Corresponds to the JSON property `projectId`
1362
+ # Optional The HTTP URL of the ApplicationMaster, HistoryServer, or
1363
+ # TimelineServer that provides application-specific information. The URL uses
1364
+ # the internal hostname, and requires a proxy server for resolution and,
1365
+ # possibly, access.
1366
+ # Corresponds to the JSON property `trackingUrl`
1184
1367
  # @return [String]
1185
- attr_accessor :project_id
1368
+ attr_accessor :tracking_url
1369
+
1370
+ # Required The numerical progress of the application, from 1 to 100.
1371
+ # Corresponds to the JSON property `progress`
1372
+ # @return [Float]
1373
+ attr_accessor :progress
1186
1374
 
1187
1375
  def initialize(**args)
1188
1376
  update!(**args)
@@ -1190,30 +1378,43 @@ module Google
1190
1378
 
1191
1379
  # Update properties of this object
1192
1380
  def update!(**args)
1193
- @labels = args[:labels] if args.key?(:labels)
1194
- @status = args[:status] if args.key?(:status)
1195
- @metrics = args[:metrics] if args.key?(:metrics)
1196
- @status_history = args[:status_history] if args.key?(:status_history)
1197
- @config = args[:config] if args.key?(:config)
1198
- @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
1199
- @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
1200
- @project_id = args[:project_id] if args.key?(:project_id)
1381
+ @state = args[:state] if args.key?(:state)
1382
+ @name = args[:name] if args.key?(:name)
1383
+ @tracking_url = args[:tracking_url] if args.key?(:tracking_url)
1384
+ @progress = args[:progress] if args.key?(:progress)
1201
1385
  end
1202
1386
  end
1203
1387
 
1204
- # The response message for Operations.ListOperations.
1205
- class ListOperationsResponse
1388
+ # A request to collect cluster diagnostic information.
1389
+ class DiagnoseClusterRequest
1206
1390
  include Google::Apis::Core::Hashable
1207
1391
 
1208
- # The standard List next-page token.
1209
- # Corresponds to the JSON property `nextPageToken`
1210
- # @return [String]
1211
- attr_accessor :next_page_token
1392
+ def initialize(**args)
1393
+ update!(**args)
1394
+ end
1212
1395
 
1213
- # A list of operations that matches the specified filter in the request.
1214
- # Corresponds to the JSON property `operations`
1215
- # @return [Array<Google::Apis::DataprocV1::Operation>]
1216
- attr_accessor :operations
1396
+ # Update properties of this object
1397
+ def update!(**args)
1398
+ end
1399
+ end
1400
+
1401
+ # Specifies the config of disk options for a group of VM instances.
1402
+ class DiskConfig
1403
+ include Google::Apis::Core::Hashable
1404
+
1405
+ # Optional Size in GB of the boot disk (default is 500GB).
1406
+ # Corresponds to the JSON property `bootDiskSizeGb`
1407
+ # @return [Fixnum]
1408
+ attr_accessor :boot_disk_size_gb
1409
+
1410
+ # Optional Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not
1411
+ # attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.
1412
+ # apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are
1413
+ # attached, this runtime bulk data is spread across them, and the boot disk
1414
+ # contains only basic config and installed binaries.
1415
+ # Corresponds to the JSON property `numLocalSsds`
1416
+ # @return [Fixnum]
1417
+ attr_accessor :num_local_ssds
1217
1418
 
1218
1419
  def initialize(**args)
1219
1420
  update!(**args)
@@ -1221,116 +1422,54 @@ module Google
1221
1422
 
1222
1423
  # Update properties of this object
1223
1424
  def update!(**args)
1224
- @next_page_token = args[:next_page_token] if args.key?(:next_page_token)
1225
- @operations = args[:operations] if args.key?(:operations)
1425
+ @boot_disk_size_gb = args[:boot_disk_size_gb] if args.key?(:boot_disk_size_gb)
1426
+ @num_local_ssds = args[:num_local_ssds] if args.key?(:num_local_ssds)
1226
1427
  end
1227
1428
  end
1228
1429
 
1229
1430
  # Metadata describing the operation.
1230
- class OperationMetadata
1431
+ class ClusterOperationMetadata
1231
1432
  include Google::Apis::Core::Hashable
1232
1433
 
1233
- # Output-only The operation type.
1234
- # Corresponds to the JSON property `operationType`
1235
- # @return [String]
1236
- attr_accessor :operation_type
1237
-
1238
1434
  # Output-only Short description of operation.
1239
1435
  # Corresponds to the JSON property `description`
1240
1436
  # @return [String]
1241
1437
  attr_accessor :description
1242
1438
 
1243
- # The status of the operation.
1244
- # Corresponds to the JSON property `status`
1245
- # @return [Google::Apis::DataprocV1::OperationStatus]
1246
- attr_accessor :status
1247
-
1248
- # A message containing any operation metadata details.
1249
- # Corresponds to the JSON property `details`
1250
- # @return [String]
1251
- attr_accessor :details
1252
-
1253
- # A message containing the operation state.
1254
- # Corresponds to the JSON property `state`
1255
- # @return [String]
1256
- attr_accessor :state
1257
-
1258
- # Name of the cluster for the operation.
1259
- # Corresponds to the JSON property `clusterName`
1260
- # @return [String]
1261
- attr_accessor :cluster_name
1262
-
1263
- # Cluster UUId for the operation.
1264
- # Corresponds to the JSON property `clusterUuid`
1265
- # @return [String]
1266
- attr_accessor :cluster_uuid
1267
-
1268
- # A message containing the detailed operation state.
1269
- # Corresponds to the JSON property `innerState`
1270
- # @return [String]
1271
- attr_accessor :inner_state
1272
-
1273
- # The time that the operation completed.
1274
- # Corresponds to the JSON property `endTime`
1275
- # @return [String]
1276
- attr_accessor :end_time
1277
-
1278
- # The time that the operation was started by the server.
1279
- # Corresponds to the JSON property `startTime`
1280
- # @return [String]
1281
- attr_accessor :start_time
1282
-
1283
1439
  # Output-only Errors encountered during operation execution.
1284
1440
  # Corresponds to the JSON property `warnings`
1285
1441
  # @return [Array<String>]
1286
1442
  attr_accessor :warnings
1287
1443
 
1288
- # The time that the operation was requested.
1289
- # Corresponds to the JSON property `insertTime`
1290
- # @return [String]
1291
- attr_accessor :insert_time
1444
+ # Output-only Labels associated with the operation
1445
+ # Corresponds to the JSON property `labels`
1446
+ # @return [Hash<String,String>]
1447
+ attr_accessor :labels
1292
1448
 
1293
- # Output-only Previous operation status.
1449
+ # The status of the operation.
1450
+ # Corresponds to the JSON property `status`
1451
+ # @return [Google::Apis::DataprocV1::ClusterOperationStatus]
1452
+ attr_accessor :status
1453
+
1454
+ # Output-only The previous operation status.
1294
1455
  # Corresponds to the JSON property `statusHistory`
1295
- # @return [Array<Google::Apis::DataprocV1::OperationStatus>]
1456
+ # @return [Array<Google::Apis::DataprocV1::ClusterOperationStatus>]
1296
1457
  attr_accessor :status_history
1297
1458
 
1298
- def initialize(**args)
1299
- update!(**args)
1300
- end
1301
-
1302
- # Update properties of this object
1303
- def update!(**args)
1304
- @operation_type = args[:operation_type] if args.key?(:operation_type)
1305
- @description = args[:description] if args.key?(:description)
1306
- @status = args[:status] if args.key?(:status)
1307
- @details = args[:details] if args.key?(:details)
1308
- @state = args[:state] if args.key?(:state)
1309
- @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
1310
- @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
1311
- @inner_state = args[:inner_state] if args.key?(:inner_state)
1312
- @end_time = args[:end_time] if args.key?(:end_time)
1313
- @start_time = args[:start_time] if args.key?(:start_time)
1314
- @warnings = args[:warnings] if args.key?(:warnings)
1315
- @insert_time = args[:insert_time] if args.key?(:insert_time)
1316
- @status_history = args[:status_history] if args.key?(:status_history)
1317
- end
1318
- end
1319
-
1320
- # Cloud Dataproc job config.
1321
- class JobPlacement
1322
- include Google::Apis::Core::Hashable
1459
+ # Output-only Cluster UUID for the operation.
1460
+ # Corresponds to the JSON property `clusterUuid`
1461
+ # @return [String]
1462
+ attr_accessor :cluster_uuid
1323
1463
 
1324
- # Required The name of the cluster where the job will be submitted.
1464
+ # Output-only Name of the cluster for the operation.
1325
1465
  # Corresponds to the JSON property `clusterName`
1326
1466
  # @return [String]
1327
1467
  attr_accessor :cluster_name
1328
1468
 
1329
- # Output-only A cluster UUID generated by the Cloud Dataproc service when the
1330
- # job is submitted.
1331
- # Corresponds to the JSON property `clusterUuid`
1469
+ # Output-only The operation type.
1470
+ # Corresponds to the JSON property `operationType`
1332
1471
  # @return [String]
1333
- attr_accessor :cluster_uuid
1472
+ attr_accessor :operation_type
1334
1473
 
1335
1474
  def initialize(**args)
1336
1475
  update!(**args)
@@ -1338,157 +1477,103 @@ module Google
1338
1477
 
1339
1478
  # Update properties of this object
1340
1479
  def update!(**args)
1341
- @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
1480
+ @description = args[:description] if args.key?(:description)
1481
+ @warnings = args[:warnings] if args.key?(:warnings)
1482
+ @labels = args[:labels] if args.key?(:labels)
1483
+ @status = args[:status] if args.key?(:status)
1484
+ @status_history = args[:status_history] if args.key?(:status_history)
1342
1485
  @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
1486
+ @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
1487
+ @operation_type = args[:operation_type] if args.key?(:operation_type)
1343
1488
  end
1344
1489
  end
1345
1490
 
1346
- # Specifies the selection and config of software inside the cluster.
1347
- class SoftwareConfig
1491
+ # A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/)
1492
+ # queries on YARN.
1493
+ class HiveJob
1348
1494
  include Google::Apis::Core::Hashable
1349
1495
 
1350
- # Optional The version of software inside the cluster. It must match the regular
1351
- # expression [0-9]+\.[0-9]+. If unspecified, it defaults to the latest version (
1352
- # see Cloud Dataproc Versioning).
1353
- # Corresponds to the JSON property `imageVersion`
1354
- # @return [String]
1355
- attr_accessor :image_version
1356
-
1357
- # Optional The properties to set on daemon config files.Property keys are
1358
- # specified in prefix:property format, such as core:fs.defaultFS. The following
1359
- # are supported prefixes and their mappings:
1360
- # core: core-site.xml
1361
- # hdfs: hdfs-site.xml
1362
- # mapred: mapred-site.xml
1363
- # yarn: yarn-site.xml
1364
- # hive: hive-site.xml
1365
- # pig: pig.properties
1366
- # spark: spark-defaults.conf
1367
- # Corresponds to the JSON property `properties`
1368
- # @return [Hash<String,String>]
1369
- attr_accessor :properties
1370
-
1371
- def initialize(**args)
1372
- update!(**args)
1373
- end
1374
-
1375
- # Update properties of this object
1376
- def update!(**args)
1377
- @image_version = args[:image_version] if args.key?(:image_version)
1378
- @properties = args[:properties] if args.key?(:properties)
1379
- end
1380
- end
1496
+ # Optional Whether to continue executing queries if a query fails. The default
1497
+ # value is false. Setting to true can be useful when executing independent
1498
+ # parallel queries.
1499
+ # Corresponds to the JSON property `continueOnFailure`
1500
+ # @return [Boolean]
1501
+ attr_accessor :continue_on_failure
1502
+ alias_method :continue_on_failure?, :continue_on_failure
1381
1503
 
1382
- # A Cloud Dataproc job for running Apache Pig (https://pig.apache.org/) queries
1383
- # on YARN.
1384
- class PigJob
1385
- include Google::Apis::Core::Hashable
1504
+ # The HCFS URI of the script that contains Hive queries.
1505
+ # Corresponds to the JSON property `queryFileUri`
1506
+ # @return [String]
1507
+ attr_accessor :query_file_uri
1386
1508
 
1387
1509
  # A list of queries to run on a cluster.
1388
1510
  # Corresponds to the JSON property `queryList`
1389
1511
  # @return [Google::Apis::DataprocV1::QueryList]
1390
1512
  attr_accessor :query_list
1391
1513
 
1392
- # The HCFS URI of the script that contains the Pig queries.
1393
- # Corresponds to the JSON property `queryFileUri`
1394
- # @return [String]
1395
- attr_accessor :query_file_uri
1396
-
1397
- # Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and
1398
- # Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
1514
+ # Optional HCFS URIs of jar files to add to the CLASSPATH of the Hive server and
1515
+ # Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.
1399
1516
  # Corresponds to the JSON property `jarFileUris`
1400
1517
  # @return [Array<String>]
1401
1518
  attr_accessor :jar_file_uris
1402
1519
 
1403
- # Optional Mapping of query variable names to values (equivalent to the Pig
1404
- # command: name=[value]).
1520
+ # Optional Mapping of query variable names to values (equivalent to the Hive
1521
+ # command: SET name="value";).
1405
1522
  # Corresponds to the JSON property `scriptVariables`
1406
1523
  # @return [Hash<String,String>]
1407
1524
  attr_accessor :script_variables
1408
1525
 
1409
- # The runtime logging config of the job.
1410
- # Corresponds to the JSON property `loggingConfig`
1411
- # @return [Google::Apis::DataprocV1::LoggingConfig]
1412
- attr_accessor :logging_config
1413
-
1414
- # Optional A mapping of property names to values, used to configure Pig.
1526
+ # Optional A mapping of property names and values, used to configure Hive.
1415
1527
  # Properties that conflict with values set by the Cloud Dataproc API may be
1416
1528
  # overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/
1417
- # pig/conf/pig.properties, and classes in user code.
1529
+ # hive/conf/hive-site.xml, and classes in user code.
1418
1530
  # Corresponds to the JSON property `properties`
1419
1531
  # @return [Hash<String,String>]
1420
1532
  attr_accessor :properties
1421
1533
 
1422
- # Optional Whether to continue executing queries if a query fails. The default
1423
- # value is false. Setting to true can be useful when executing independent
1424
- # parallel queries.
1425
- # Corresponds to the JSON property `continueOnFailure`
1426
- # @return [Boolean]
1427
- attr_accessor :continue_on_failure
1428
- alias_method :continue_on_failure?, :continue_on_failure
1429
-
1430
1534
  def initialize(**args)
1431
1535
  update!(**args)
1432
1536
  end
1433
1537
 
1434
1538
  # Update properties of this object
1435
1539
  def update!(**args)
1436
- @query_list = args[:query_list] if args.key?(:query_list)
1540
+ @continue_on_failure = args[:continue_on_failure] if args.key?(:continue_on_failure)
1437
1541
  @query_file_uri = args[:query_file_uri] if args.key?(:query_file_uri)
1542
+ @query_list = args[:query_list] if args.key?(:query_list)
1438
1543
  @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
1439
1544
  @script_variables = args[:script_variables] if args.key?(:script_variables)
1440
- @logging_config = args[:logging_config] if args.key?(:logging_config)
1441
1545
  @properties = args[:properties] if args.key?(:properties)
1442
- @continue_on_failure = args[:continue_on_failure] if args.key?(:continue_on_failure)
1443
1546
  end
1444
1547
  end
1445
1548
 
1446
- # The status of a cluster and its instances.
1447
- class ClusterStatus
1549
+ # A generic empty message that you can re-use to avoid defining duplicated empty
1550
+ # messages in your APIs. A typical example is to use it as the request or the
1551
+ # response type of an API method. For instance:
1552
+ # service Foo `
1553
+ # rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
1554
+ # `
1555
+ # The JSON representation for Empty is empty JSON object ``.
1556
+ class Empty
1448
1557
  include Google::Apis::Core::Hashable
1449
1558
 
1450
- # Output-only Time when this state was entered.
1451
- # Corresponds to the JSON property `stateStartTime`
1452
- # @return [String]
1453
- attr_accessor :state_start_time
1454
-
1455
- # Output-only Optional details of cluster's state.
1456
- # Corresponds to the JSON property `detail`
1457
- # @return [String]
1458
- attr_accessor :detail
1459
-
1460
- # Output-only The cluster's state.
1461
- # Corresponds to the JSON property `state`
1462
- # @return [String]
1463
- attr_accessor :state
1464
-
1465
1559
  def initialize(**args)
1466
1560
  update!(**args)
1467
1561
  end
1468
1562
 
1469
1563
  # Update properties of this object
1470
1564
  def update!(**args)
1471
- @state_start_time = args[:state_start_time] if args.key?(:state_start_time)
1472
- @detail = args[:detail] if args.key?(:detail)
1473
- @state = args[:state] if args.key?(:state)
1474
1565
  end
1475
1566
  end
1476
1567
 
1477
- # The list of all clusters in a project.
1478
- class ListClustersResponse
1568
+ # The location of diagnostic output.
1569
+ class DiagnoseClusterResults
1479
1570
  include Google::Apis::Core::Hashable
1480
1571
 
1481
- # Output-only The clusters in the project.
1482
- # Corresponds to the JSON property `clusters`
1483
- # @return [Array<Google::Apis::DataprocV1::Cluster>]
1484
- attr_accessor :clusters
1485
-
1486
- # Output-only This token is included in the response if there are more results
1487
- # to fetch. To fetch additional results, provide this value as the page_token in
1488
- # a subsequent <code>ListClustersRequest</code>.
1489
- # Corresponds to the JSON property `nextPageToken`
1572
+ # Output-only The Google Cloud Storage URI of the diagnostic output. The output
1573
+ # report is a plain text file with a summary of collected diagnostics.
1574
+ # Corresponds to the JSON property `outputUri`
1490
1575
  # @return [String]
1491
- attr_accessor :next_page_token
1576
+ attr_accessor :output_uri
1492
1577
 
1493
1578
  def initialize(**args)
1494
1579
  update!(**args)
@@ -1496,107 +1581,67 @@ module Google
1496
1581
 
1497
1582
  # Update properties of this object
1498
1583
  def update!(**args)
1499
- @clusters = args[:clusters] if args.key?(:clusters)
1500
- @next_page_token = args[:next_page_token] if args.key?(:next_page_token)
1584
+ @output_uri = args[:output_uri] if args.key?(:output_uri)
1501
1585
  end
1502
1586
  end
1503
1587
 
1504
- # A Cloud Dataproc job resource.
1505
- class Job
1588
+ # The cluster config.
1589
+ class ClusterConfig
1506
1590
  include Google::Apis::Core::Hashable
1507
1591
 
1508
- # Output-only If present, the location of miscellaneous control files which may
1509
- # be used as part of job setup and handling. If not present, control files may
1510
- # be placed in the same location as driver_output_uri.
1511
- # Corresponds to the JSON property `driverControlFilesUri`
1512
- # @return [String]
1513
- attr_accessor :driver_control_files_uri
1514
-
1515
- # Job scheduling options.Beta Feature: These options are available for testing
1516
- # purposes only. They may be changed before final release.
1517
- # Corresponds to the JSON property `scheduling`
1518
- # @return [Google::Apis::DataprocV1::JobScheduling]
1519
- attr_accessor :scheduling
1520
-
1521
- # A Cloud Dataproc job for running Apache Pig (https://pig.apache.org/) queries
1522
- # on YARN.
1523
- # Corresponds to the JSON property `pigJob`
1524
- # @return [Google::Apis::DataprocV1::PigJob]
1525
- attr_accessor :pig_job
1526
-
1527
- # A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/)
1528
- # queries on YARN.
1529
- # Corresponds to the JSON property `hiveJob`
1530
- # @return [Google::Apis::DataprocV1::HiveJob]
1531
- attr_accessor :hive_job
1532
-
1533
- # Optional The labels to associate with this job. Label keys must contain 1 to
1534
- # 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.
1535
- # txt). Label values may be empty, but, if present, must contain 1 to 63
1536
- # characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt)
1537
- # . No more than 32 labels can be associated with a job.
1538
- # Corresponds to the JSON property `labels`
1539
- # @return [Hash<String,String>]
1540
- attr_accessor :labels
1592
+ # Optional Commands to execute on each node after config is completed. By
1593
+ # default, executables are run on master and all worker nodes. You can test a
1594
+ # node's <code>role</code> metadata to run an executable on a master or worker
1595
+ # node, as shown below using curl (you can also use wget):
1596
+ # ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/
1597
+ # instance/attributes/dataproc-role)
1598
+ # if [[ "$`ROLE`" == 'Master' ]]; then
1599
+ # ... master specific actions ...
1600
+ # else
1601
+ # ... worker specific actions ...
1602
+ # fi
1603
+ # Corresponds to the JSON property `initializationActions`
1604
+ # @return [Array<Google::Apis::DataprocV1::NodeInitializationAction>]
1605
+ attr_accessor :initialization_actions
1541
1606
 
1542
- # Output-only A URI pointing to the location of the stdout of the job's driver
1543
- # program.
1544
- # Corresponds to the JSON property `driverOutputResourceUri`
1607
+ # Optional A Google Cloud Storage staging bucket used for sharing generated SSH
1608
+ # keys and config. If you do not specify a staging bucket, Cloud Dataproc will
1609
+ # determine an appropriate Cloud Storage location (US, ASIA, or EU) for your
1610
+ # cluster's staging bucket according to the Google Compute Engine zone where
1611
+ # your cluster is deployed, and then it will create and manage this project-
1612
+ # level, per-location bucket for you.
1613
+ # Corresponds to the JSON property `configBucket`
1545
1614
  # @return [String]
1546
- attr_accessor :driver_output_resource_uri
1547
-
1548
- # A Cloud Dataproc job for running Apache Spark SQL (http://spark.apache.org/sql/
1549
- # ) queries.
1550
- # Corresponds to the JSON property `sparkSqlJob`
1551
- # @return [Google::Apis::DataprocV1::SparkSqlJob]
1552
- attr_accessor :spark_sql_job
1553
-
1554
- # Output-only The previous job status.
1555
- # Corresponds to the JSON property `statusHistory`
1556
- # @return [Array<Google::Apis::DataprocV1::JobStatus>]
1557
- attr_accessor :status_history
1558
-
1559
- # A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/)
1560
- # applications on YARN.
1561
- # Corresponds to the JSON property `sparkJob`
1562
- # @return [Google::Apis::DataprocV1::SparkJob]
1563
- attr_accessor :spark_job
1564
-
1565
- # Output-only The collection of YARN applications spun up by this job.Beta
1566
- # Feature: This report is available for testing purposes only. It may be changed
1567
- # before final release.
1568
- # Corresponds to the JSON property `yarnApplications`
1569
- # @return [Array<Google::Apis::DataprocV1::YarnApplication>]
1570
- attr_accessor :yarn_applications
1571
-
1572
- # A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/
1573
- # 0.9.0/python-programming-guide.html) applications on YARN.
1574
- # Corresponds to the JSON property `pysparkJob`
1575
- # @return [Google::Apis::DataprocV1::PySparkJob]
1576
- attr_accessor :pyspark_job
1615
+ attr_accessor :config_bucket
1577
1616
 
1578
- # Encapsulates the full scoping used to reference a job.
1579
- # Corresponds to the JSON property `reference`
1580
- # @return [Google::Apis::DataprocV1::JobReference]
1581
- attr_accessor :reference
1617
+ # Optional The config settings for Google Compute Engine resources in an
1618
+ # instance group, such as a master or worker group.
1619
+ # Corresponds to the JSON property `workerConfig`
1620
+ # @return [Google::Apis::DataprocV1::InstanceGroupConfig]
1621
+ attr_accessor :worker_config
1582
1622
 
1583
- # A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.
1584
- # apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/
1585
- # MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/
1586
- # docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
1587
- # Corresponds to the JSON property `hadoopJob`
1588
- # @return [Google::Apis::DataprocV1::HadoopJob]
1589
- attr_accessor :hadoop_job
1623
+ # Common config settings for resources of Google Compute Engine cluster
1624
+ # instances, applicable to all instances in the cluster.
1625
+ # Corresponds to the JSON property `gceClusterConfig`
1626
+ # @return [Google::Apis::DataprocV1::GceClusterConfig]
1627
+ attr_accessor :gce_cluster_config
1590
1628
 
1591
- # Cloud Dataproc job status.
1592
- # Corresponds to the JSON property `status`
1593
- # @return [Google::Apis::DataprocV1::JobStatus]
1594
- attr_accessor :status
1629
+ # Specifies the selection and config of software inside the cluster.
1630
+ # Corresponds to the JSON property `softwareConfig`
1631
+ # @return [Google::Apis::DataprocV1::SoftwareConfig]
1632
+ attr_accessor :software_config
1595
1633
 
1596
- # Cloud Dataproc job config.
1597
- # Corresponds to the JSON property `placement`
1598
- # @return [Google::Apis::DataprocV1::JobPlacement]
1599
- attr_accessor :placement
1634
+ # Optional The config settings for Google Compute Engine resources in an
1635
+ # instance group, such as a master or worker group.
1636
+ # Corresponds to the JSON property `masterConfig`
1637
+ # @return [Google::Apis::DataprocV1::InstanceGroupConfig]
1638
+ attr_accessor :master_config
1639
+
1640
+ # Optional The config settings for Google Compute Engine resources in an
1641
+ # instance group, such as a master or worker group.
1642
+ # Corresponds to the JSON property `secondaryWorkerConfig`
1643
+ # @return [Google::Apis::DataprocV1::InstanceGroupConfig]
1644
+ attr_accessor :secondary_worker_config
1600
1645
 
1601
1646
  def initialize(**args)
1602
1647
  update!(**args)
@@ -1604,30 +1649,40 @@ module Google
1604
1649
 
1605
1650
  # Update properties of this object
1606
1651
  def update!(**args)
1607
- @driver_control_files_uri = args[:driver_control_files_uri] if args.key?(:driver_control_files_uri)
1608
- @scheduling = args[:scheduling] if args.key?(:scheduling)
1609
- @pig_job = args[:pig_job] if args.key?(:pig_job)
1610
- @hive_job = args[:hive_job] if args.key?(:hive_job)
1611
- @labels = args[:labels] if args.key?(:labels)
1612
- @driver_output_resource_uri = args[:driver_output_resource_uri] if args.key?(:driver_output_resource_uri)
1613
- @spark_sql_job = args[:spark_sql_job] if args.key?(:spark_sql_job)
1614
- @status_history = args[:status_history] if args.key?(:status_history)
1615
- @spark_job = args[:spark_job] if args.key?(:spark_job)
1616
- @yarn_applications = args[:yarn_applications] if args.key?(:yarn_applications)
1617
- @pyspark_job = args[:pyspark_job] if args.key?(:pyspark_job)
1618
- @reference = args[:reference] if args.key?(:reference)
1619
- @hadoop_job = args[:hadoop_job] if args.key?(:hadoop_job)
1620
- @status = args[:status] if args.key?(:status)
1621
- @placement = args[:placement] if args.key?(:placement)
1652
+ @initialization_actions = args[:initialization_actions] if args.key?(:initialization_actions)
1653
+ @config_bucket = args[:config_bucket] if args.key?(:config_bucket)
1654
+ @worker_config = args[:worker_config] if args.key?(:worker_config)
1655
+ @gce_cluster_config = args[:gce_cluster_config] if args.key?(:gce_cluster_config)
1656
+ @software_config = args[:software_config] if args.key?(:software_config)
1657
+ @master_config = args[:master_config] if args.key?(:master_config)
1658
+ @secondary_worker_config = args[:secondary_worker_config] if args.key?(:secondary_worker_config)
1622
1659
  end
1623
1660
  end
1624
1661
 
1625
- # A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/)
1626
- # applications on YARN.
1627
- class SparkJob
1662
+ # A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/
1663
+ # 0.9.0/python-programming-guide.html) applications on YARN.
1664
+ class PySparkJob
1628
1665
  include Google::Apis::Core::Hashable
1629
1666
 
1630
- # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver
1667
+ # Optional HCFS file URIs of Python files to pass to the PySpark framework.
1668
+ # Supported file types: .py, .egg, and .zip.
1669
+ # Corresponds to the JSON property `pythonFileUris`
1670
+ # @return [Array<String>]
1671
+ attr_accessor :python_file_uris
1672
+
1673
+ # Required The HCFS URI of the main Python file to use as the driver. Must be a .
1674
+ # py file.
1675
+ # Corresponds to the JSON property `mainPythonFileUri`
1676
+ # @return [String]
1677
+ attr_accessor :main_python_file_uri
1678
+
1679
+ # Optional HCFS URIs of archives to be extracted in the working directory of .
1680
+ # jar, .tar, .tar.gz, .tgz, and .zip.
1681
+ # Corresponds to the JSON property `archiveUris`
1682
+ # @return [Array<String>]
1683
+ attr_accessor :archive_uris
1684
+
1685
+ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Python driver
1631
1686
  # and tasks.
1632
1687
  # Corresponds to the JSON property `jarFileUris`
1633
1688
  # @return [Array<String>]
@@ -1638,7 +1693,7 @@ module Google
1638
1693
  # @return [Google::Apis::DataprocV1::LoggingConfig]
1639
1694
  attr_accessor :logging_config
1640
1695
 
1641
- # Optional A mapping of property names to values, used to configure Spark.
1696
+ # Optional A mapping of property names to values, used to configure PySpark.
1642
1697
  # Properties that conflict with values set by the Cloud Dataproc API may be
1643
1698
  # overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf
1644
1699
  # and classes in user code.
@@ -1653,128 +1708,105 @@ module Google
1653
1708
  # @return [Array<String>]
1654
1709
  attr_accessor :args
1655
1710
 
1656
- # Optional HCFS URIs of files to be copied to the working directory of Spark
1711
+ # Optional HCFS URIs of files to be copied to the working directory of Python
1657
1712
  # drivers and distributed tasks. Useful for naively parallel tasks.
1658
1713
  # Corresponds to the JSON property `fileUris`
1659
1714
  # @return [Array<String>]
1660
1715
  attr_accessor :file_uris
1661
1716
 
1662
- # The name of the driver's main class. The jar file that contains the class must
1663
- # be in the default CLASSPATH or specified in jar_file_uris.
1664
- # Corresponds to the JSON property `mainClass`
1665
- # @return [String]
1666
- attr_accessor :main_class
1667
-
1668
- # Optional HCFS URIs of archives to be extracted in the working directory of
1669
- # Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .
1670
- # zip.
1671
- # Corresponds to the JSON property `archiveUris`
1672
- # @return [Array<String>]
1673
- attr_accessor :archive_uris
1674
-
1675
- # The HCFS URI of the jar file that contains the main class.
1676
- # Corresponds to the JSON property `mainJarFileUri`
1677
- # @return [String]
1678
- attr_accessor :main_jar_file_uri
1679
-
1680
1717
  def initialize(**args)
1681
1718
  update!(**args)
1682
1719
  end
1683
1720
 
1684
1721
  # Update properties of this object
1685
1722
  def update!(**args)
1723
+ @python_file_uris = args[:python_file_uris] if args.key?(:python_file_uris)
1724
+ @main_python_file_uri = args[:main_python_file_uri] if args.key?(:main_python_file_uri)
1725
+ @archive_uris = args[:archive_uris] if args.key?(:archive_uris)
1686
1726
  @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
1687
1727
  @logging_config = args[:logging_config] if args.key?(:logging_config)
1688
1728
  @properties = args[:properties] if args.key?(:properties)
1689
1729
  @args = args[:args] if args.key?(:args)
1690
1730
  @file_uris = args[:file_uris] if args.key?(:file_uris)
1691
- @main_class = args[:main_class] if args.key?(:main_class)
1692
- @archive_uris = args[:archive_uris] if args.key?(:archive_uris)
1693
- @main_jar_file_uri = args[:main_jar_file_uri] if args.key?(:main_jar_file_uri)
1694
- end
1695
- end
1696
-
1697
- # Cloud Dataproc job status.
1698
- class JobStatus
1699
- include Google::Apis::Core::Hashable
1700
-
1701
- # Output-only A state message specifying the overall job state.
1702
- # Corresponds to the JSON property `state`
1703
- # @return [String]
1704
- attr_accessor :state
1705
-
1706
- # Output-only Optional job state details, such as an error description if the
1707
- # state is <code>ERROR</code>.
1708
- # Corresponds to the JSON property `details`
1709
- # @return [String]
1710
- attr_accessor :details
1711
-
1712
- # Output-only The time when this state was entered.
1713
- # Corresponds to the JSON property `stateStartTime`
1714
- # @return [String]
1715
- attr_accessor :state_start_time
1716
-
1717
- def initialize(**args)
1718
- update!(**args)
1719
- end
1720
-
1721
- # Update properties of this object
1722
- def update!(**args)
1723
- @state = args[:state] if args.key?(:state)
1724
- @details = args[:details] if args.key?(:details)
1725
- @state_start_time = args[:state_start_time] if args.key?(:state_start_time)
1726
1731
  end
1727
1732
  end
1728
1733
 
1729
- # Specifies the resources used to actively manage an instance group.
1730
- class ManagedGroupConfig
1734
+ # Common config settings for resources of Google Compute Engine cluster
1735
+ # instances, applicable to all instances in the cluster.
1736
+ class GceClusterConfig
1731
1737
  include Google::Apis::Core::Hashable
1732
1738
 
1733
- # Output-only The name of the Instance Group Manager for this group.
1734
- # Corresponds to the JSON property `instanceGroupManagerName`
1739
+ # Optional The Google Compute Engine network to be used for machine
1740
+ # communications. Cannot be specified with subnetwork_uri. If neither
1741
+ # network_uri nor subnetwork_uri is specified, the "default" network of the
1742
+ # project is used, if it exists. Cannot be a "Custom Subnet Network" (see Using
1743
+ # Subnetworks for more information). Example: https://www.googleapis.com/compute/
1744
+ # v1/projects/[project_id]/regions/global/default.
1745
+ # Corresponds to the JSON property `networkUri`
1735
1746
  # @return [String]
1736
- attr_accessor :instance_group_manager_name
1747
+ attr_accessor :network_uri
1737
1748
 
1738
- # Output-only The name of the Instance Template used for the Managed Instance
1739
- # Group.
1740
- # Corresponds to the JSON property `instanceTemplateName`
1749
+ # Required The zone where the Google Compute Engine cluster will be located.
1750
+ # Example: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[
1751
+ # zone].
1752
+ # Corresponds to the JSON property `zoneUri`
1741
1753
  # @return [String]
1742
- attr_accessor :instance_template_name
1743
-
1744
- def initialize(**args)
1745
- update!(**args)
1746
- end
1754
+ attr_accessor :zone_uri
1747
1755
 
1748
- # Update properties of this object
1749
- def update!(**args)
1750
- @instance_group_manager_name = args[:instance_group_manager_name] if args.key?(:instance_group_manager_name)
1751
- @instance_template_name = args[:instance_template_name] if args.key?(:instance_template_name)
1752
- end
1753
- end
1756
+ # The Google Compute Engine metadata entries to add to all instances (see
1757
+ # Project and instance metadata (https://cloud.google.com/compute/docs/storing-
1758
+ # retrieving-metadata#project_and_instance_metadata)).
1759
+ # Corresponds to the JSON property `metadata`
1760
+ # @return [Hash<String,String>]
1761
+ attr_accessor :metadata
1754
1762
 
1755
- # The status of the operation.
1756
- class ClusterOperationStatus
1757
- include Google::Apis::Core::Hashable
1763
+ # Optional If true, all instances in the cluster will only have internal IP
1764
+ # addresses. By default, clusters are not restricted to internal IP addresses,
1765
+ # and will have ephemeral external IP addresses assigned to each instance. This
1766
+ # internal_ip_only restriction can only be enabled for subnetwork enabled
1767
+ # networks, and all off-cluster dependencies must be configured to be accessible
1768
+ # without external IP addresses.
1769
+ # Corresponds to the JSON property `internalIpOnly`
1770
+ # @return [Boolean]
1771
+ attr_accessor :internal_ip_only
1772
+ alias_method :internal_ip_only?, :internal_ip_only
1758
1773
 
1759
- # Output-only A message containing the operation state.
1760
- # Corresponds to the JSON property `state`
1761
- # @return [String]
1762
- attr_accessor :state
1774
+ # Optional The URIs of service account scopes to be included in Google Compute
1775
+ # Engine instances. The following base set of scopes is always included:
1776
+ # https://www.googleapis.com/auth/cloud.useraccounts.readonly
1777
+ # https://www.googleapis.com/auth/devstorage.read_write
1778
+ # https://www.googleapis.com/auth/logging.writeIf no scopes are specified, the
1779
+ # following defaults are also provided:
1780
+ # https://www.googleapis.com/auth/bigquery
1781
+ # https://www.googleapis.com/auth/bigtable.admin.table
1782
+ # https://www.googleapis.com/auth/bigtable.data
1783
+ # https://www.googleapis.com/auth/devstorage.full_control
1784
+ # Corresponds to the JSON property `serviceAccountScopes`
1785
+ # @return [Array<String>]
1786
+ attr_accessor :service_account_scopes
1763
1787
 
1764
- # Output-onlyA message containing any operation metadata details.
1765
- # Corresponds to the JSON property `details`
1766
- # @return [String]
1767
- attr_accessor :details
1788
+ # The Google Compute Engine tags to add to all instances (see Tagging instances).
1789
+ # Corresponds to the JSON property `tags`
1790
+ # @return [Array<String>]
1791
+ attr_accessor :tags
1768
1792
 
1769
- # Output-only A message containing the detailed operation state.
1770
- # Corresponds to the JSON property `innerState`
1793
+ # Optional The service account of the instances. Defaults to the default Google
1794
+ # Compute Engine service account. Custom service accounts need permissions
1795
+ # equivalent to the folloing IAM roles:
1796
+ # roles/logging.logWriter
1797
+ # roles/storage.objectAdmin(see https://cloud.google.com/compute/docs/access/
1798
+ # service-accounts#custom_service_accounts for more information). Example: [
1799
+ # account_id]@[project_id].iam.gserviceaccount.com
1800
+ # Corresponds to the JSON property `serviceAccount`
1771
1801
  # @return [String]
1772
- attr_accessor :inner_state
1802
+ attr_accessor :service_account
1773
1803
 
1774
- # Output-only The time this state was entered.
1775
- # Corresponds to the JSON property `stateStartTime`
1804
+ # Optional The Google Compute Engine subnetwork to be used for machine
1805
+ # communications. Cannot be specified with network_uri. Example: https://www.
1806
+ # googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0.
1807
+ # Corresponds to the JSON property `subnetworkUri`
1776
1808
  # @return [String]
1777
- attr_accessor :state_start_time
1809
+ attr_accessor :subnetwork_uri
1778
1810
 
1779
1811
  def initialize(**args)
1780
1812
  update!(**args)
@@ -1782,10 +1814,14 @@ module Google
1782
1814
 
1783
1815
  # Update properties of this object
1784
1816
  def update!(**args)
1785
- @state = args[:state] if args.key?(:state)
1786
- @details = args[:details] if args.key?(:details)
1787
- @inner_state = args[:inner_state] if args.key?(:inner_state)
1788
- @state_start_time = args[:state_start_time] if args.key?(:state_start_time)
1817
+ @network_uri = args[:network_uri] if args.key?(:network_uri)
1818
+ @zone_uri = args[:zone_uri] if args.key?(:zone_uri)
1819
+ @metadata = args[:metadata] if args.key?(:metadata)
1820
+ @internal_ip_only = args[:internal_ip_only] if args.key?(:internal_ip_only)
1821
+ @service_account_scopes = args[:service_account_scopes] if args.key?(:service_account_scopes)
1822
+ @tags = args[:tags] if args.key?(:tags)
1823
+ @service_account = args[:service_account] if args.key?(:service_account)
1824
+ @subnetwork_uri = args[:subnetwork_uri] if args.key?(:subnetwork_uri)
1789
1825
  end
1790
1826
  end
1791
1827
  end