google-api-client 0.9.8 → 0.10.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (320) hide show
  1. checksums.yaml +4 -4
  2. data/.travis.yml +15 -8
  3. data/CHANGELOG.md +84 -0
  4. data/Gemfile +10 -2
  5. data/README.md +10 -2
  6. data/api_names.yaml +37172 -135
  7. data/generated/google/apis/adexchangebuyer2_v2beta1.rb +35 -0
  8. data/generated/google/apis/adexchangebuyer2_v2beta1/classes.rb +1200 -0
  9. data/generated/google/apis/adexchangebuyer2_v2beta1/representations.rb +526 -0
  10. data/generated/google/apis/adexchangebuyer2_v2beta1/service.rb +844 -0
  11. data/generated/google/apis/adexchangebuyer_v1_4.rb +1 -1
  12. data/generated/google/apis/adexchangebuyer_v1_4/classes.rb +310 -28
  13. data/generated/google/apis/adexchangebuyer_v1_4/representations.rb +84 -0
  14. data/generated/google/apis/adexchangebuyer_v1_4/service.rb +60 -8
  15. data/generated/google/apis/adexchangeseller_v2_0.rb +2 -3
  16. data/generated/google/apis/adexchangeseller_v2_0/service.rb +1 -2
  17. data/generated/google/apis/admin_directory_v1.rb +1 -1
  18. data/generated/google/apis/admin_directory_v1/classes.rb +206 -0
  19. data/generated/google/apis/admin_directory_v1/representations.rb +63 -0
  20. data/generated/google/apis/admin_directory_v1/service.rb +41 -2
  21. data/generated/google/apis/admin_reports_v1.rb +5 -5
  22. data/generated/google/apis/admin_reports_v1/service.rb +6 -6
  23. data/generated/google/apis/adsense_v1_4.rb +1 -1
  24. data/generated/google/apis/adsense_v1_4/classes.rb +1 -1
  25. data/generated/google/apis/adsensehost_v4_1.rb +1 -1
  26. data/generated/google/apis/adsensehost_v4_1/classes.rb +1 -2
  27. data/generated/google/apis/analytics_v3.rb +1 -1
  28. data/generated/google/apis/analytics_v3/classes.rb +379 -3
  29. data/generated/google/apis/analytics_v3/representations.rb +136 -0
  30. data/generated/google/apis/analytics_v3/service.rb +256 -0
  31. data/generated/google/apis/analyticsreporting_v4.rb +1 -1
  32. data/generated/google/apis/analyticsreporting_v4/classes.rb +858 -823
  33. data/generated/google/apis/analyticsreporting_v4/representations.rb +194 -193
  34. data/generated/google/apis/analyticsreporting_v4/service.rb +4 -4
  35. data/generated/google/apis/androidenterprise_v1.rb +1 -1
  36. data/generated/google/apis/androidenterprise_v1/classes.rb +913 -220
  37. data/generated/google/apis/androidenterprise_v1/representations.rb +337 -30
  38. data/generated/google/apis/androidenterprise_v1/service.rb +1163 -561
  39. data/generated/google/apis/androidpublisher_v2.rb +1 -1
  40. data/generated/google/apis/androidpublisher_v2/classes.rb +230 -8
  41. data/generated/google/apis/androidpublisher_v2/representations.rb +90 -0
  42. data/generated/google/apis/androidpublisher_v2/service.rb +116 -3
  43. data/generated/google/apis/appengine_v1beta5.rb +7 -1
  44. data/generated/google/apis/appengine_v1beta5/classes.rb +810 -395
  45. data/generated/google/apis/appengine_v1beta5/representations.rb +188 -22
  46. data/generated/google/apis/appengine_v1beta5/service.rb +343 -39
  47. data/generated/google/apis/appsactivity_v1.rb +3 -3
  48. data/generated/google/apis/appsactivity_v1/classes.rb +7 -0
  49. data/generated/google/apis/appsactivity_v1/representations.rb +1 -0
  50. data/generated/google/apis/appsactivity_v1/service.rb +1 -1
  51. data/generated/google/apis/appstate_v1.rb +1 -1
  52. data/generated/google/apis/bigquery_v2.rb +1 -1
  53. data/generated/google/apis/bigquery_v2/classes.rb +360 -38
  54. data/generated/google/apis/bigquery_v2/representations.rb +106 -0
  55. data/generated/google/apis/bigquery_v2/service.rb +18 -4
  56. data/generated/google/apis/books_v1.rb +2 -2
  57. data/generated/google/apis/books_v1/classes.rb +83 -0
  58. data/generated/google/apis/books_v1/representations.rb +35 -0
  59. data/generated/google/apis/books_v1/service.rb +7 -3
  60. data/generated/google/apis/calendar_v3.rb +1 -1
  61. data/generated/google/apis/calendar_v3/classes.rb +208 -5
  62. data/generated/google/apis/calendar_v3/representations.rb +97 -0
  63. data/generated/google/apis/civicinfo_v2.rb +3 -2
  64. data/generated/google/apis/civicinfo_v2/classes.rb +377 -5
  65. data/generated/google/apis/civicinfo_v2/representations.rb +149 -0
  66. data/generated/google/apis/civicinfo_v2/service.rb +27 -6
  67. data/generated/google/apis/classroom_v1.rb +22 -25
  68. data/generated/google/apis/classroom_v1/classes.rb +998 -761
  69. data/generated/google/apis/classroom_v1/representations.rb +263 -198
  70. data/generated/google/apis/classroom_v1/service.rb +1349 -672
  71. data/generated/google/apis/cloudbilling_v1.rb +3 -3
  72. data/generated/google/apis/cloudbilling_v1/classes.rb +76 -75
  73. data/generated/google/apis/cloudbilling_v1/representations.rb +17 -17
  74. data/generated/google/apis/cloudbilling_v1/service.rb +117 -110
  75. data/generated/google/apis/cloudbuild_v1.rb +1 -1
  76. data/generated/google/apis/cloudbuild_v1/classes.rb +661 -264
  77. data/generated/google/apis/cloudbuild_v1/representations.rb +198 -48
  78. data/generated/google/apis/cloudbuild_v1/service.rb +259 -44
  79. data/generated/google/apis/clouddebugger_v2.rb +5 -8
  80. data/generated/google/apis/clouddebugger_v2/classes.rb +686 -616
  81. data/generated/google/apis/clouddebugger_v2/representations.rb +151 -151
  82. data/generated/google/apis/clouddebugger_v2/service.rb +165 -159
  83. data/generated/google/apis/cloudkms_v1.rb +35 -0
  84. data/generated/google/apis/cloudkms_v1/classes.rb +1022 -0
  85. data/generated/google/apis/cloudkms_v1/representations.rb +448 -0
  86. data/generated/google/apis/cloudkms_v1/service.rb +933 -0
  87. data/generated/google/apis/cloudkms_v1beta1.rb +35 -0
  88. data/generated/google/apis/cloudkms_v1beta1/classes.rb +1039 -0
  89. data/generated/google/apis/cloudkms_v1beta1/representations.rb +448 -0
  90. data/generated/google/apis/cloudkms_v1beta1/service.rb +933 -0
  91. data/generated/google/apis/cloudmonitoring_v2beta2.rb +1 -1
  92. data/generated/google/apis/cloudresourcemanager_v1.rb +4 -4
  93. data/generated/google/apis/cloudresourcemanager_v1/classes.rb +1529 -167
  94. data/generated/google/apis/cloudresourcemanager_v1/representations.rb +491 -26
  95. data/generated/google/apis/cloudresourcemanager_v1/service.rb +1593 -135
  96. data/generated/google/apis/cloudresourcemanager_v1beta1.rb +1 -1
  97. data/generated/google/apis/cloudresourcemanager_v1beta1/classes.rb +1155 -235
  98. data/generated/google/apis/cloudresourcemanager_v1beta1/representations.rb +344 -44
  99. data/generated/google/apis/cloudresourcemanager_v1beta1/service.rb +702 -310
  100. data/generated/google/apis/cloudtrace_v1.rb +9 -9
  101. data/generated/google/apis/cloudtrace_v1/classes.rb +98 -92
  102. data/generated/google/apis/cloudtrace_v1/representations.rb +24 -24
  103. data/generated/google/apis/cloudtrace_v1/service.rb +42 -37
  104. data/generated/google/apis/compute_beta.rb +1 -1
  105. data/generated/google/apis/compute_beta/classes.rb +7007 -3648
  106. data/generated/google/apis/compute_beta/representations.rb +1459 -120
  107. data/generated/google/apis/compute_beta/service.rb +7943 -3726
  108. data/generated/google/apis/compute_v1.rb +1 -1
  109. data/generated/google/apis/compute_v1/classes.rb +2567 -370
  110. data/generated/google/apis/compute_v1/representations.rb +855 -0
  111. data/generated/google/apis/compute_v1/service.rb +6388 -2908
  112. data/generated/google/apis/container_v1.rb +1 -1
  113. data/generated/google/apis/container_v1/classes.rb +287 -16
  114. data/generated/google/apis/container_v1/representations.rb +113 -2
  115. data/generated/google/apis/container_v1/service.rb +130 -0
  116. data/generated/google/apis/content_v2.rb +1 -1
  117. data/generated/google/apis/content_v2/classes.rb +778 -10
  118. data/generated/google/apis/content_v2/representations.rb +339 -0
  119. data/generated/google/apis/content_v2/service.rb +364 -49
  120. data/generated/google/apis/dataflow_v1b3.rb +37 -0
  121. data/generated/google/apis/dataflow_v1b3/classes.rb +4941 -0
  122. data/generated/google/apis/dataflow_v1b3/representations.rb +2037 -0
  123. data/generated/google/apis/dataflow_v1b3/service.rb +957 -0
  124. data/generated/google/apis/dataproc_v1.rb +1 -1
  125. data/generated/google/apis/dataproc_v1/classes.rb +1235 -963
  126. data/generated/google/apis/dataproc_v1/representations.rb +299 -225
  127. data/generated/google/apis/dataproc_v1/service.rb +277 -192
  128. data/generated/google/apis/datastore_v1.rb +38 -0
  129. data/generated/google/apis/datastore_v1/classes.rb +1289 -0
  130. data/generated/google/apis/datastore_v1/representations.rb +572 -0
  131. data/generated/google/apis/datastore_v1/service.rb +259 -0
  132. data/generated/google/apis/datastore_v1beta3.rb +38 -0
  133. data/generated/google/apis/datastore_v1beta3/classes.rb +1284 -0
  134. data/generated/google/apis/datastore_v1beta3/representations.rb +572 -0
  135. data/generated/google/apis/datastore_v1beta3/service.rb +259 -0
  136. data/generated/google/apis/deploymentmanager_v2.rb +1 -1
  137. data/generated/google/apis/deploymentmanager_v2/classes.rb +436 -3
  138. data/generated/google/apis/deploymentmanager_v2/representations.rb +178 -0
  139. data/generated/google/apis/deploymentmanager_v2/service.rb +226 -61
  140. data/generated/google/apis/dfareporting_v2_5.rb +40 -0
  141. data/generated/google/apis/dfareporting_v2_5/classes.rb +11225 -0
  142. data/generated/google/apis/dfareporting_v2_5/representations.rb +3982 -0
  143. data/generated/google/apis/dfareporting_v2_5/service.rb +8755 -0
  144. data/generated/google/apis/dfareporting_v2_6.rb +40 -0
  145. data/generated/google/apis/dfareporting_v2_6/classes.rb +11586 -0
  146. data/generated/google/apis/dfareporting_v2_6/representations.rb +4119 -0
  147. data/generated/google/apis/dfareporting_v2_6/service.rb +9025 -0
  148. data/generated/google/apis/dfareporting_v2_7.rb +40 -0
  149. data/generated/google/apis/dfareporting_v2_7/classes.rb +11876 -0
  150. data/generated/google/apis/dfareporting_v2_7/representations.rb +4243 -0
  151. data/generated/google/apis/dfareporting_v2_7/service.rb +9095 -0
  152. data/generated/google/apis/dns_v1.rb +1 -1
  153. data/generated/google/apis/dns_v1/classes.rb +1 -1
  154. data/generated/google/apis/dns_v2beta1.rb +43 -0
  155. data/generated/google/apis/dns_v2beta1/classes.rb +915 -0
  156. data/generated/google/apis/dns_v2beta1/representations.rb +368 -0
  157. data/generated/google/apis/dns_v2beta1/service.rb +768 -0
  158. data/generated/google/apis/doubleclickbidmanager_v1.rb +1 -1
  159. data/generated/google/apis/doubleclickbidmanager_v1/classes.rb +79 -86
  160. data/generated/google/apis/doubleclickbidmanager_v1/representations.rb +25 -28
  161. data/generated/google/apis/doubleclickbidmanager_v1/service.rb +10 -8
  162. data/generated/google/apis/doubleclicksearch_v2.rb +1 -1
  163. data/generated/google/apis/doubleclicksearch_v2/classes.rb +6 -4
  164. data/generated/google/apis/drive_v2.rb +1 -1
  165. data/generated/google/apis/drive_v2/classes.rb +595 -37
  166. data/generated/google/apis/drive_v2/representations.rb +140 -0
  167. data/generated/google/apis/drive_v2/service.rb +406 -40
  168. data/generated/google/apis/drive_v3.rb +1 -1
  169. data/generated/google/apis/drive_v3/classes.rb +507 -40
  170. data/generated/google/apis/drive_v3/representations.rb +105 -0
  171. data/generated/google/apis/drive_v3/service.rb +326 -29
  172. data/generated/google/apis/fitness_v1.rb +38 -2
  173. data/generated/google/apis/fitness_v1/classes.rb +65 -7
  174. data/generated/google/apis/fitness_v1/representations.rb +20 -0
  175. data/generated/google/apis/fitness_v1/service.rb +17 -18
  176. data/generated/google/apis/fusiontables_v2.rb +1 -1
  177. data/generated/google/apis/fusiontables_v2/classes.rb +4 -3
  178. data/generated/google/apis/games_configuration_v1configuration.rb +1 -1
  179. data/generated/google/apis/games_management_v1management.rb +1 -1
  180. data/generated/google/apis/games_v1.rb +1 -1
  181. data/generated/google/apis/genomics_v1.rb +10 -12
  182. data/generated/google/apis/genomics_v1/classes.rb +2276 -2165
  183. data/generated/google/apis/genomics_v1/representations.rb +468 -496
  184. data/generated/google/apis/genomics_v1/service.rb +978 -882
  185. data/generated/google/apis/gmail_v1.rb +11 -2
  186. data/generated/google/apis/gmail_v1/classes.rb +655 -5
  187. data/generated/google/apis/gmail_v1/representations.rb +261 -0
  188. data/generated/google/apis/gmail_v1/service.rb +1211 -48
  189. data/generated/google/apis/groupssettings_v1.rb +2 -2
  190. data/generated/google/apis/groupssettings_v1/classes.rb +12 -0
  191. data/generated/google/apis/groupssettings_v1/representations.rb +2 -0
  192. data/generated/google/apis/iam_v1.rb +2 -2
  193. data/generated/google/apis/iam_v1/classes.rb +446 -368
  194. data/generated/google/apis/iam_v1/representations.rb +109 -91
  195. data/generated/google/apis/iam_v1/service.rb +249 -162
  196. data/generated/google/apis/identitytoolkit_v3.rb +7 -1
  197. data/generated/google/apis/identitytoolkit_v3/classes.rb +254 -2
  198. data/generated/google/apis/identitytoolkit_v3/representations.rb +38 -0
  199. data/generated/google/apis/kgsearch_v1.rb +2 -3
  200. data/generated/google/apis/kgsearch_v1/classes.rb +13 -13
  201. data/generated/google/apis/kgsearch_v1/representations.rb +2 -2
  202. data/generated/google/apis/kgsearch_v1/service.rb +26 -24
  203. data/generated/google/apis/language_v1beta1.rb +36 -0
  204. data/generated/google/apis/language_v1beta1/classes.rb +757 -0
  205. data/generated/google/apis/language_v1beta1/representations.rb +339 -0
  206. data/generated/google/apis/language_v1beta1/service.rb +185 -0
  207. data/generated/google/apis/licensing_v1.rb +3 -3
  208. data/generated/google/apis/licensing_v1/classes.rb +14 -2
  209. data/generated/google/apis/licensing_v1/representations.rb +2 -0
  210. data/generated/google/apis/licensing_v1/service.rb +1 -1
  211. data/generated/google/apis/logging_v2beta1.rb +6 -6
  212. data/generated/google/apis/logging_v2beta1/classes.rb +900 -775
  213. data/generated/google/apis/logging_v2beta1/representations.rb +163 -142
  214. data/generated/google/apis/logging_v2beta1/service.rb +427 -155
  215. data/generated/google/apis/manufacturers_v1.rb +34 -0
  216. data/generated/google/apis/manufacturers_v1/classes.rb +605 -0
  217. data/generated/google/apis/manufacturers_v1/representations.rb +223 -0
  218. data/generated/google/apis/manufacturers_v1/service.rb +138 -0
  219. data/generated/google/apis/mirror_v1.rb +2 -2
  220. data/generated/google/apis/mirror_v1/service.rb +1 -1
  221. data/generated/google/apis/monitoring_v3.rb +45 -0
  222. data/generated/google/apis/monitoring_v3/classes.rb +1333 -0
  223. data/generated/google/apis/monitoring_v3/representations.rb +516 -0
  224. data/generated/google/apis/monitoring_v3/service.rb +706 -0
  225. data/generated/google/apis/oauth2_v2.rb +1 -1
  226. data/generated/google/apis/pagespeedonline_v2.rb +1 -1
  227. data/generated/google/apis/partners_v2.rb +3 -3
  228. data/generated/google/apis/partners_v2/classes.rb +1831 -477
  229. data/generated/google/apis/partners_v2/representations.rb +571 -114
  230. data/generated/google/apis/partners_v2/service.rb +898 -167
  231. data/generated/google/apis/people_v1.rb +15 -16
  232. data/generated/google/apis/people_v1/classes.rb +1047 -859
  233. data/generated/google/apis/people_v1/representations.rb +252 -213
  234. data/generated/google/apis/people_v1/service.rb +75 -62
  235. data/generated/google/apis/plus_domains_v1.rb +1 -1
  236. data/generated/google/apis/plus_v1.rb +1 -1
  237. data/generated/google/apis/proximitybeacon_v1beta1.rb +1 -1
  238. data/generated/google/apis/proximitybeacon_v1beta1/classes.rb +527 -456
  239. data/generated/google/apis/proximitybeacon_v1beta1/representations.rb +101 -100
  240. data/generated/google/apis/proximitybeacon_v1beta1/service.rb +467 -343
  241. data/generated/google/apis/pubsub_v1.rb +1 -1
  242. data/generated/google/apis/pubsub_v1/classes.rb +363 -310
  243. data/generated/google/apis/pubsub_v1/representations.rb +76 -76
  244. data/generated/google/apis/pubsub_v1/service.rb +464 -335
  245. data/generated/google/apis/qpx_express_v1.rb +1 -1
  246. data/generated/google/apis/qpx_express_v1/classes.rb +6 -0
  247. data/generated/google/apis/qpx_express_v1/representations.rb +1 -0
  248. data/generated/google/apis/replicapool_v1beta2.rb +1 -1
  249. data/generated/google/apis/replicapoolupdater_v1beta1.rb +4 -2
  250. data/generated/google/apis/replicapoolupdater_v1beta1/service.rb +3 -1
  251. data/generated/google/apis/reseller_v1.rb +1 -1
  252. data/generated/google/apis/reseller_v1/classes.rb +222 -61
  253. data/generated/google/apis/reseller_v1/representations.rb +28 -0
  254. data/generated/google/apis/reseller_v1/service.rb +240 -47
  255. data/generated/google/apis/script_v1.rb +19 -19
  256. data/generated/google/apis/script_v1/classes.rb +151 -137
  257. data/generated/google/apis/script_v1/representations.rb +26 -26
  258. data/generated/google/apis/script_v1/service.rb +9 -8
  259. data/generated/google/apis/sheets_v4.rb +7 -7
  260. data/generated/google/apis/sheets_v4/classes.rb +4530 -3506
  261. data/generated/google/apis/sheets_v4/representations.rb +988 -737
  262. data/generated/google/apis/sheets_v4/service.rb +266 -99
  263. data/generated/google/apis/site_verification_v1.rb +1 -1
  264. data/generated/google/apis/slides_v1.rb +49 -0
  265. data/generated/google/apis/slides_v1/classes.rb +4480 -0
  266. data/generated/google/apis/slides_v1/representations.rb +1926 -0
  267. data/generated/google/apis/slides_v1/service.rb +245 -0
  268. data/generated/google/apis/speech_v1beta1.rb +34 -0
  269. data/generated/google/apis/speech_v1beta1/classes.rb +498 -0
  270. data/generated/google/apis/speech_v1beta1/representations.rb +206 -0
  271. data/generated/google/apis/speech_v1beta1/service.rb +270 -0
  272. data/generated/google/apis/sqladmin_v1beta4.rb +1 -1
  273. data/generated/google/apis/sqladmin_v1beta4/classes.rb +131 -10
  274. data/generated/google/apis/sqladmin_v1beta4/representations.rb +50 -0
  275. data/generated/google/apis/sqladmin_v1beta4/service.rb +92 -2
  276. data/generated/google/apis/storage_v1.rb +1 -1
  277. data/generated/google/apis/storage_v1/classes.rb +205 -21
  278. data/generated/google/apis/storage_v1/representations.rb +51 -1
  279. data/generated/google/apis/storage_v1/service.rb +263 -9
  280. data/generated/google/apis/tagmanager_v1.rb +5 -5
  281. data/generated/google/apis/translate_v2.rb +2 -2
  282. data/generated/google/apis/translate_v2/service.rb +1 -1
  283. data/generated/google/apis/vision_v1.rb +1 -1
  284. data/generated/google/apis/vision_v1/classes.rb +1275 -731
  285. data/generated/google/apis/vision_v1/representations.rb +378 -111
  286. data/generated/google/apis/vision_v1/service.rb +4 -4
  287. data/generated/google/apis/webmasters_v3.rb +1 -1
  288. data/generated/google/apis/youtube_analytics_v1.rb +1 -1
  289. data/generated/google/apis/youtube_analytics_v1/classes.rb +0 -209
  290. data/generated/google/apis/youtube_analytics_v1/representations.rb +0 -99
  291. data/generated/google/apis/youtube_analytics_v1/service.rb +5 -77
  292. data/generated/google/apis/youtube_partner_v1.rb +1 -1
  293. data/generated/google/apis/youtube_partner_v1/classes.rb +155 -0
  294. data/generated/google/apis/youtube_partner_v1/representations.rb +69 -0
  295. data/generated/google/apis/youtube_partner_v1/service.rb +107 -3
  296. data/generated/google/apis/youtube_v3.rb +1 -1
  297. data/generated/google/apis/youtube_v3/classes.rb +430 -22
  298. data/generated/google/apis/youtube_v3/representations.rb +171 -4
  299. data/generated/google/apis/youtube_v3/service.rb +100 -9
  300. data/generated/google/apis/youtubereporting_v1.rb +4 -4
  301. data/generated/google/apis/youtubereporting_v1/classes.rb +140 -131
  302. data/generated/google/apis/youtubereporting_v1/representations.rb +37 -37
  303. data/generated/google/apis/youtubereporting_v1/service.rb +139 -135
  304. data/google-api-client.gemspec +4 -3
  305. data/lib/google/apis/core/api_command.rb +16 -6
  306. data/lib/google/apis/core/base_service.rb +17 -9
  307. data/lib/google/apis/core/download.rb +7 -2
  308. data/lib/google/apis/core/http_command.rb +5 -1
  309. data/lib/google/apis/core/json_representation.rb +2 -2
  310. data/lib/google/apis/core/upload.rb +7 -1
  311. data/lib/google/apis/errors.rb +6 -2
  312. data/lib/google/apis/generator/annotator.rb +6 -0
  313. data/lib/google/apis/version.rb +1 -1
  314. data/samples/cli/lib/samples/bigquery.rb +101 -0
  315. data/samples/cli/lib/samples/calendar.rb +1 -1
  316. data/samples/cli/lib/samples/gmail.rb +74 -0
  317. data/samples/cli/lib/samples/sheets.rb +61 -0
  318. data/samples/cli/lib/samples/you_tube.rb +4 -1
  319. data/sync.rb +71 -0
  320. metadata +86 -18
@@ -25,7 +25,7 @@ module Google
25
25
  # @see https://cloud.google.com/dataproc/
26
26
  module DataprocV1
27
27
  VERSION = 'V1'
28
- REVISION = '20160503'
28
+ REVISION = '20170321'
29
29
 
30
30
  # View and manage your data across Google Cloud Platform services
31
31
  AUTH_CLOUD_PLATFORM = 'https://www.googleapis.com/auth/cloud-platform'
@@ -22,42 +22,74 @@ module Google
22
22
  module Apis
23
23
  module DataprocV1
24
24
 
25
- # Describes the identifying information, config, and status of a cluster of
26
- # Google Compute Engine instances.
27
- class Cluster
25
+ # Metadata describing the operation.
26
+ class OperationMetadata
28
27
  include Google::Apis::Core::Hashable
29
28
 
30
- # [Required] The Google Cloud Platform project ID that the cluster belongs to.
31
- # Corresponds to the JSON property `projectId`
29
+ # Output-only Short description of operation.
30
+ # Corresponds to the JSON property `description`
32
31
  # @return [String]
33
- attr_accessor :project_id
32
+ attr_accessor :description
34
33
 
35
- # [Required] The cluster name. Cluster names within a project must be unique.
36
- # Names of deleted clusters can be reused.
34
+ # The status of the operation.
35
+ # Corresponds to the JSON property `status`
36
+ # @return [Google::Apis::DataprocV1::OperationStatus]
37
+ attr_accessor :status
38
+
39
+ # A message containing any operation metadata details.
40
+ # Corresponds to the JSON property `details`
41
+ # @return [String]
42
+ attr_accessor :details
43
+
44
+ # A message containing the operation state.
45
+ # Corresponds to the JSON property `state`
46
+ # @return [String]
47
+ attr_accessor :state
48
+
49
+ # Name of the cluster for the operation.
37
50
  # Corresponds to the JSON property `clusterName`
38
51
  # @return [String]
39
52
  attr_accessor :cluster_name
40
53
 
41
- # The cluster config.
42
- # Corresponds to the JSON property `config`
43
- # @return [Google::Apis::DataprocV1::ClusterConfig]
44
- attr_accessor :config
54
+ # Cluster UUId for the operation.
55
+ # Corresponds to the JSON property `clusterUuid`
56
+ # @return [String]
57
+ attr_accessor :cluster_uuid
45
58
 
46
- # The status of a cluster and its instances.
47
- # Corresponds to the JSON property `status`
48
- # @return [Google::Apis::DataprocV1::ClusterStatus]
49
- attr_accessor :status
59
+ # A message containing the detailed operation state.
60
+ # Corresponds to the JSON property `innerState`
61
+ # @return [String]
62
+ attr_accessor :inner_state
63
+
64
+ # The time that the operation completed.
65
+ # Corresponds to the JSON property `endTime`
66
+ # @return [String]
67
+ attr_accessor :end_time
50
68
 
51
- # [Output-only] The previous cluster status.
69
+ # The time that the operation was started by the server.
70
+ # Corresponds to the JSON property `startTime`
71
+ # @return [String]
72
+ attr_accessor :start_time
73
+
74
+ # Output-only Errors encountered during operation execution.
75
+ # Corresponds to the JSON property `warnings`
76
+ # @return [Array<String>]
77
+ attr_accessor :warnings
78
+
79
+ # The time that the operation was requested.
80
+ # Corresponds to the JSON property `insertTime`
81
+ # @return [String]
82
+ attr_accessor :insert_time
83
+
84
+ # Output-only Previous operation status.
52
85
  # Corresponds to the JSON property `statusHistory`
53
- # @return [Array<Google::Apis::DataprocV1::ClusterStatus>]
86
+ # @return [Array<Google::Apis::DataprocV1::OperationStatus>]
54
87
  attr_accessor :status_history
55
88
 
56
- # [Output-only] A cluster UUID (Unique Universal Identifier). Cloud Dataproc
57
- # generates this value when it creates the cluster.
58
- # Corresponds to the JSON property `clusterUuid`
89
+ # Output-only The operation type.
90
+ # Corresponds to the JSON property `operationType`
59
91
  # @return [String]
60
- attr_accessor :cluster_uuid
92
+ attr_accessor :operation_type
61
93
 
62
94
  def initialize(**args)
63
95
  update!(**args)
@@ -65,67 +97,72 @@ module Google
65
97
 
66
98
  # Update properties of this object
67
99
  def update!(**args)
68
- @project_id = args[:project_id] if args.key?(:project_id)
69
- @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
70
- @config = args[:config] if args.key?(:config)
100
+ @description = args[:description] if args.key?(:description)
71
101
  @status = args[:status] if args.key?(:status)
72
- @status_history = args[:status_history] if args.key?(:status_history)
102
+ @details = args[:details] if args.key?(:details)
103
+ @state = args[:state] if args.key?(:state)
104
+ @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
73
105
  @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
106
+ @inner_state = args[:inner_state] if args.key?(:inner_state)
107
+ @end_time = args[:end_time] if args.key?(:end_time)
108
+ @start_time = args[:start_time] if args.key?(:start_time)
109
+ @warnings = args[:warnings] if args.key?(:warnings)
110
+ @insert_time = args[:insert_time] if args.key?(:insert_time)
111
+ @status_history = args[:status_history] if args.key?(:status_history)
112
+ @operation_type = args[:operation_type] if args.key?(:operation_type)
74
113
  end
75
114
  end
76
115
 
77
- # The cluster config.
78
- class ClusterConfig
116
+ # Specifies the selection and config of software inside the cluster.
117
+ class SoftwareConfig
79
118
  include Google::Apis::Core::Hashable
80
119
 
81
- # [Optional] A Google Cloud Storage staging bucket used for sharing generated
82
- # SSH keys and config. If you do not specify a staging bucket, Cloud Dataproc
83
- # will determine an appropriate Cloud Storage location (US, ASIA, or EU) for
84
- # your cluster's staging bucket according to the Google Compute Engine zone
85
- # where your cluster is deployed, and then it will create and manage this
86
- # project-level, per-location bucket for you.
87
- # Corresponds to the JSON property `configBucket`
120
+ # Optional The version of software inside the cluster. It must match the regular
121
+ # expression [0-9]+\.[0-9]+. If unspecified, it defaults to the latest version (
122
+ # see Cloud Dataproc Versioning).
123
+ # Corresponds to the JSON property `imageVersion`
88
124
  # @return [String]
89
- attr_accessor :config_bucket
125
+ attr_accessor :image_version
90
126
 
91
- # Common config settings for resources of Google Compute Engine cluster
92
- # instances, applicable to all instances in the cluster.
93
- # Corresponds to the JSON property `gceClusterConfig`
94
- # @return [Google::Apis::DataprocV1::GceClusterConfig]
95
- attr_accessor :gce_cluster_config
127
+ # Optional The properties to set on daemon config files.Property keys are
128
+ # specified in prefix:property format, such as core:fs.defaultFS. The following
129
+ # are supported prefixes and their mappings:
130
+ # core: core-site.xml
131
+ # hdfs: hdfs-site.xml
132
+ # mapred: mapred-site.xml
133
+ # yarn: yarn-site.xml
134
+ # hive: hive-site.xml
135
+ # pig: pig.properties
136
+ # spark: spark-defaults.conf
137
+ # Corresponds to the JSON property `properties`
138
+ # @return [Hash<String,String>]
139
+ attr_accessor :properties
96
140
 
97
- # The config settings for Google Compute Engine resources in an instance group,
98
- # such as a master or worker group.
99
- # Corresponds to the JSON property `masterConfig`
100
- # @return [Google::Apis::DataprocV1::InstanceGroupConfig]
101
- attr_accessor :master_config
141
+ def initialize(**args)
142
+ update!(**args)
143
+ end
102
144
 
103
- # The config settings for Google Compute Engine resources in an instance group,
104
- # such as a master or worker group.
105
- # Corresponds to the JSON property `workerConfig`
106
- # @return [Google::Apis::DataprocV1::InstanceGroupConfig]
107
- attr_accessor :worker_config
145
+ # Update properties of this object
146
+ def update!(**args)
147
+ @image_version = args[:image_version] if args.key?(:image_version)
148
+ @properties = args[:properties] if args.key?(:properties)
149
+ end
150
+ end
108
151
 
109
- # The config settings for Google Compute Engine resources in an instance group,
110
- # such as a master or worker group.
111
- # Corresponds to the JSON property `secondaryWorkerConfig`
112
- # @return [Google::Apis::DataprocV1::InstanceGroupConfig]
113
- attr_accessor :secondary_worker_config
152
+ # Cloud Dataproc job config.
153
+ class JobPlacement
154
+ include Google::Apis::Core::Hashable
114
155
 
115
- # Specifies the selection and config of software inside the cluster.
116
- # Corresponds to the JSON property `softwareConfig`
117
- # @return [Google::Apis::DataprocV1::SoftwareConfig]
118
- attr_accessor :software_config
156
+ # Required The name of the cluster where the job will be submitted.
157
+ # Corresponds to the JSON property `clusterName`
158
+ # @return [String]
159
+ attr_accessor :cluster_name
119
160
 
120
- # [Optional] Commands to execute on each node after config is completed. By
121
- # default, executables are run on master and all worker nodes. You can test a
122
- # node's role metadata to run an executable on a master or worker node, as shown
123
- # below: ROLE=$(/usr/share/google/get_metadata_value attributes/role) if [[ "$`
124
- # ROLE`" == 'Master' ]]; then ... master specific actions ... else ... worker
125
- # specific actions ... fi
126
- # Corresponds to the JSON property `initializationActions`
127
- # @return [Array<Google::Apis::DataprocV1::NodeInitializationAction>]
128
- attr_accessor :initialization_actions
161
+ # Output-only A cluster UUID generated by the Cloud Dataproc service when the
162
+ # job is submitted.
163
+ # Corresponds to the JSON property `clusterUuid`
164
+ # @return [String]
165
+ attr_accessor :cluster_uuid
129
166
 
130
167
  def initialize(**args)
131
168
  update!(**args)
@@ -133,66 +170,58 @@ module Google
133
170
 
134
171
  # Update properties of this object
135
172
  def update!(**args)
136
- @config_bucket = args[:config_bucket] if args.key?(:config_bucket)
137
- @gce_cluster_config = args[:gce_cluster_config] if args.key?(:gce_cluster_config)
138
- @master_config = args[:master_config] if args.key?(:master_config)
139
- @worker_config = args[:worker_config] if args.key?(:worker_config)
140
- @secondary_worker_config = args[:secondary_worker_config] if args.key?(:secondary_worker_config)
141
- @software_config = args[:software_config] if args.key?(:software_config)
142
- @initialization_actions = args[:initialization_actions] if args.key?(:initialization_actions)
173
+ @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
174
+ @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
143
175
  end
144
176
  end
145
177
 
146
- # Common config settings for resources of Google Compute Engine cluster
147
- # instances, applicable to all instances in the cluster.
148
- class GceClusterConfig
178
+ # A Cloud Dataproc job for running Apache Pig (https://pig.apache.org/) queries
179
+ # on YARN.
180
+ class PigJob
149
181
  include Google::Apis::Core::Hashable
150
182
 
151
- # [Required] The zone where the Google Compute Engine cluster will be located.
152
- # Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[
153
- # zone]`.
154
- # Corresponds to the JSON property `zoneUri`
155
- # @return [String]
156
- attr_accessor :zone_uri
183
+ # Optional Whether to continue executing queries if a query fails. The default
184
+ # value is false. Setting to true can be useful when executing independent
185
+ # parallel queries.
186
+ # Corresponds to the JSON property `continueOnFailure`
187
+ # @return [Boolean]
188
+ attr_accessor :continue_on_failure
189
+ alias_method :continue_on_failure?, :continue_on_failure
157
190
 
158
- # The Google Compute Engine network to be used for machine communications.
159
- # Cannot be specified with subnetwork_uri. If neither network_uri nor
160
- # subnetwork_uri is specified, the "default" network of the project is used, if
161
- # it exists. Cannot be a "Custom Subnet Network" (see https://cloud.google.com/
162
- # compute/docs/subnetworks for more information). Example: `https://www.
163
- # googleapis.com/compute/v1/projects/[project_id]/regions/global/default`.
164
- # Corresponds to the JSON property `networkUri`
191
+ # The HCFS URI of the script that contains the Pig queries.
192
+ # Corresponds to the JSON property `queryFileUri`
165
193
  # @return [String]
166
- attr_accessor :network_uri
194
+ attr_accessor :query_file_uri
167
195
 
168
- # The Google Compute Engine subnetwork to be used for machine communications.
169
- # Cannot be specified with network_uri. Example: `https://www.googleapis.com/
170
- # compute/v1/projects/[project_id]/regions/us-east1/sub0`.
171
- # Corresponds to the JSON property `subnetworkUri`
172
- # @return [String]
173
- attr_accessor :subnetwork_uri
196
+ # A list of queries to run on a cluster.
197
+ # Corresponds to the JSON property `queryList`
198
+ # @return [Google::Apis::DataprocV1::QueryList]
199
+ attr_accessor :query_list
174
200
 
175
- # The URIs of service account scopes to be included in Google Compute Engine
176
- # instances. The following base set of scopes is always included: * https://www.
177
- # googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/
178
- # auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If
179
- # no scopes are specfied, the following defaults are also provided: * https://
180
- # www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.
181
- # admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.
182
- # googleapis.com/auth/devstorage.full_control
183
- # Corresponds to the JSON property `serviceAccountScopes`
201
+ # Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and
202
+ # Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
203
+ # Corresponds to the JSON property `jarFileUris`
184
204
  # @return [Array<String>]
185
- attr_accessor :service_account_scopes
205
+ attr_accessor :jar_file_uris
186
206
 
187
- # The Google Compute Engine tags to add to all instances.
188
- # Corresponds to the JSON property `tags`
189
- # @return [Array<String>]
190
- attr_accessor :tags
207
+ # Optional Mapping of query variable names to values (equivalent to the Pig
208
+ # command: name=[value]).
209
+ # Corresponds to the JSON property `scriptVariables`
210
+ # @return [Hash<String,String>]
211
+ attr_accessor :script_variables
191
212
 
192
- # The Google Compute Engine metadata entries to add to all instances.
193
- # Corresponds to the JSON property `metadata`
213
+ # The runtime logging config of the job.
214
+ # Corresponds to the JSON property `loggingConfig`
215
+ # @return [Google::Apis::DataprocV1::LoggingConfig]
216
+ attr_accessor :logging_config
217
+
218
+ # Optional A mapping of property names to values, used to configure Pig.
219
+ # Properties that conflict with values set by the Cloud Dataproc API may be
220
+ # overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/
221
+ # pig/conf/pig.properties, and classes in user code.
222
+ # Corresponds to the JSON property `properties`
194
223
  # @return [Hash<String,String>]
195
- attr_accessor :metadata
224
+ attr_accessor :properties
196
225
 
197
226
  def initialize(**args)
198
227
  update!(**args)
@@ -200,61 +229,40 @@ module Google
200
229
 
201
230
  # Update properties of this object
202
231
  def update!(**args)
203
- @zone_uri = args[:zone_uri] if args.key?(:zone_uri)
204
- @network_uri = args[:network_uri] if args.key?(:network_uri)
205
- @subnetwork_uri = args[:subnetwork_uri] if args.key?(:subnetwork_uri)
206
- @service_account_scopes = args[:service_account_scopes] if args.key?(:service_account_scopes)
207
- @tags = args[:tags] if args.key?(:tags)
208
- @metadata = args[:metadata] if args.key?(:metadata)
232
+ @continue_on_failure = args[:continue_on_failure] if args.key?(:continue_on_failure)
233
+ @query_file_uri = args[:query_file_uri] if args.key?(:query_file_uri)
234
+ @query_list = args[:query_list] if args.key?(:query_list)
235
+ @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
236
+ @script_variables = args[:script_variables] if args.key?(:script_variables)
237
+ @logging_config = args[:logging_config] if args.key?(:logging_config)
238
+ @properties = args[:properties] if args.key?(:properties)
209
239
  end
210
240
  end
211
241
 
212
- # The config settings for Google Compute Engine resources in an instance group,
213
- # such as a master or worker group.
214
- class InstanceGroupConfig
242
+ # The status of a cluster and its instances.
243
+ class ClusterStatus
215
244
  include Google::Apis::Core::Hashable
216
245
 
217
- # The number of VM instances in the instance group. For master instance groups,
218
- # must be set to 1.
219
- # Corresponds to the JSON property `numInstances`
220
- # @return [Fixnum]
221
- attr_accessor :num_instances
222
-
223
- # The list of instance names. Cloud Dataproc derives the names from `
224
- # cluster_name`, `num_instances`, and the instance group if not set by user (
225
- # recommended practice is to let Cloud Dataproc derive the name).
226
- # Corresponds to the JSON property `instanceNames`
227
- # @return [Array<String>]
228
- attr_accessor :instance_names
229
-
230
- # [Output-only] The Google Compute Engine image resource used for cluster
231
- # instances. Inferred from `SoftwareConfig.image_version`.
232
- # Corresponds to the JSON property `imageUri`
246
+ # Output-only Optional details of cluster's state.
247
+ # Corresponds to the JSON property `detail`
233
248
  # @return [String]
234
- attr_accessor :image_uri
249
+ attr_accessor :detail
235
250
 
236
- # The Google Compute Engine machine type used for cluster instances. Example: `
237
- # https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/
238
- # machineTypes/n1-standard-2`.
239
- # Corresponds to the JSON property `machineTypeUri`
251
+ # Output-only The cluster's state.
252
+ # Corresponds to the JSON property `state`
240
253
  # @return [String]
241
- attr_accessor :machine_type_uri
242
-
243
- # Specifies the config of disk options for a group of VM instances.
244
- # Corresponds to the JSON property `diskConfig`
245
- # @return [Google::Apis::DataprocV1::DiskConfig]
246
- attr_accessor :disk_config
254
+ attr_accessor :state
247
255
 
248
- # Specifies that this instance group contains Preemptible Instances.
249
- # Corresponds to the JSON property `isPreemptible`
250
- # @return [Boolean]
251
- attr_accessor :is_preemptible
252
- alias_method :is_preemptible?, :is_preemptible
256
+ # Output-only Time when this state was entered.
257
+ # Corresponds to the JSON property `stateStartTime`
258
+ # @return [String]
259
+ attr_accessor :state_start_time
253
260
 
254
- # Specifies the resources used to actively manage an instance group.
255
- # Corresponds to the JSON property `managedGroupConfig`
256
- # @return [Google::Apis::DataprocV1::ManagedGroupConfig]
257
- attr_accessor :managed_group_config
261
+ # Output-only Additional state information that includes status reported by the
262
+ # agent.
263
+ # Corresponds to the JSON property `substate`
264
+ # @return [String]
265
+ attr_accessor :substate
258
266
 
259
267
  def initialize(**args)
260
268
  update!(**args)
@@ -262,32 +270,28 @@ module Google
262
270
 
263
271
  # Update properties of this object
264
272
  def update!(**args)
265
- @num_instances = args[:num_instances] if args.key?(:num_instances)
266
- @instance_names = args[:instance_names] if args.key?(:instance_names)
267
- @image_uri = args[:image_uri] if args.key?(:image_uri)
268
- @machine_type_uri = args[:machine_type_uri] if args.key?(:machine_type_uri)
269
- @disk_config = args[:disk_config] if args.key?(:disk_config)
270
- @is_preemptible = args[:is_preemptible] if args.key?(:is_preemptible)
271
- @managed_group_config = args[:managed_group_config] if args.key?(:managed_group_config)
273
+ @detail = args[:detail] if args.key?(:detail)
274
+ @state = args[:state] if args.key?(:state)
275
+ @state_start_time = args[:state_start_time] if args.key?(:state_start_time)
276
+ @substate = args[:substate] if args.key?(:substate)
272
277
  end
273
278
  end
274
279
 
275
- # Specifies the config of disk options for a group of VM instances.
276
- class DiskConfig
280
+ # The list of all clusters in a project.
281
+ class ListClustersResponse
277
282
  include Google::Apis::Core::Hashable
278
283
 
279
- # [Optional] Size in GB of the boot disk (default is 500GB).
280
- # Corresponds to the JSON property `bootDiskSizeGb`
281
- # @return [Fixnum]
282
- attr_accessor :boot_disk_size_gb
284
+ # Output-only The clusters in the project.
285
+ # Corresponds to the JSON property `clusters`
286
+ # @return [Array<Google::Apis::DataprocV1::Cluster>]
287
+ attr_accessor :clusters
283
288
 
284
- # [Optional] Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are
285
- # not attached, the boot disk is used to store runtime logs and HDFS data. If
286
- # one or more SSDs are attached, this runtime bulk data is spread across them,
287
- # and the boot disk contains only basic config and installed binaries.
288
- # Corresponds to the JSON property `numLocalSsds`
289
- # @return [Fixnum]
290
- attr_accessor :num_local_ssds
289
+ # Output-only This token is included in the response if there are more results
290
+ # to fetch. To fetch additional results, provide this value as the page_token in
291
+ # a subsequent <code>ListClustersRequest</code>.
292
+ # Corresponds to the JSON property `nextPageToken`
293
+ # @return [String]
294
+ attr_accessor :next_page_token
291
295
 
292
296
  def initialize(**args)
293
297
  update!(**args)
@@ -295,25 +299,107 @@ module Google
295
299
 
296
300
  # Update properties of this object
297
301
  def update!(**args)
298
- @boot_disk_size_gb = args[:boot_disk_size_gb] if args.key?(:boot_disk_size_gb)
299
- @num_local_ssds = args[:num_local_ssds] if args.key?(:num_local_ssds)
302
+ @clusters = args[:clusters] if args.key?(:clusters)
303
+ @next_page_token = args[:next_page_token] if args.key?(:next_page_token)
300
304
  end
301
305
  end
302
306
 
303
- # Specifies the resources used to actively manage an instance group.
304
- class ManagedGroupConfig
307
+ # A Cloud Dataproc job resource.
308
+ class Job
305
309
  include Google::Apis::Core::Hashable
306
310
 
307
- # [Output-only] The name of the Instance Template used for the Managed Instance
308
- # Group.
309
- # Corresponds to the JSON property `instanceTemplateName`
311
+ # Cloud Dataproc job status.
312
+ # Corresponds to the JSON property `status`
313
+ # @return [Google::Apis::DataprocV1::JobStatus]
314
+ attr_accessor :status
315
+
316
+ # Cloud Dataproc job config.
317
+ # Corresponds to the JSON property `placement`
318
+ # @return [Google::Apis::DataprocV1::JobPlacement]
319
+ attr_accessor :placement
320
+
321
+ # Output-only If present, the location of miscellaneous control files which may
322
+ # be used as part of job setup and handling. If not present, control files may
323
+ # be placed in the same location as driver_output_uri.
324
+ # Corresponds to the JSON property `driverControlFilesUri`
310
325
  # @return [String]
311
- attr_accessor :instance_template_name
326
+ attr_accessor :driver_control_files_uri
312
327
 
313
- # [Output-only] The name of the Instance Group Manager for this group.
314
- # Corresponds to the JSON property `instanceGroupManagerName`
328
+ # Job scheduling options.Beta Feature: These options are available for testing
329
+ # purposes only. They may be changed before final release.
330
+ # Corresponds to the JSON property `scheduling`
331
+ # @return [Google::Apis::DataprocV1::JobScheduling]
332
+ attr_accessor :scheduling
333
+
334
+ # A Cloud Dataproc job for running Apache Pig (https://pig.apache.org/) queries
335
+ # on YARN.
336
+ # Corresponds to the JSON property `pigJob`
337
+ # @return [Google::Apis::DataprocV1::PigJob]
338
+ attr_accessor :pig_job
339
+
340
+ # A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/)
341
+ # queries on YARN.
342
+ # Corresponds to the JSON property `hiveJob`
343
+ # @return [Google::Apis::DataprocV1::HiveJob]
344
+ attr_accessor :hive_job
345
+
346
+ # Optional The labels to associate with this job. Label keys must contain 1 to
347
+ # 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.
348
+ # txt). Label values may be empty, but, if present, must contain 1 to 63
349
+ # characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt)
350
+ # . No more than 32 labels can be associated with a job.
351
+ # Corresponds to the JSON property `labels`
352
+ # @return [Hash<String,String>]
353
+ attr_accessor :labels
354
+
355
+ # Output-only A URI pointing to the location of the stdout of the job's driver
356
+ # program.
357
+ # Corresponds to the JSON property `driverOutputResourceUri`
315
358
  # @return [String]
316
- attr_accessor :instance_group_manager_name
359
+ attr_accessor :driver_output_resource_uri
360
+
361
+ # A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/)
362
+ # applications on YARN.
363
+ # Corresponds to the JSON property `sparkJob`
364
+ # @return [Google::Apis::DataprocV1::SparkJob]
365
+ attr_accessor :spark_job
366
+
367
+ # A Cloud Dataproc job for running Apache Spark SQL (http://spark.apache.org/sql/
368
+ # ) queries.
369
+ # Corresponds to the JSON property `sparkSqlJob`
370
+ # @return [Google::Apis::DataprocV1::SparkSqlJob]
371
+ attr_accessor :spark_sql_job
372
+
373
+ # Output-only The previous job status.
374
+ # Corresponds to the JSON property `statusHistory`
375
+ # @return [Array<Google::Apis::DataprocV1::JobStatus>]
376
+ attr_accessor :status_history
377
+
378
+ # Output-only The collection of YARN applications spun up by this job.Beta
379
+ # Feature: This report is available for testing purposes only. It may be changed
380
+ # before final release.
381
+ # Corresponds to the JSON property `yarnApplications`
382
+ # @return [Array<Google::Apis::DataprocV1::YarnApplication>]
383
+ attr_accessor :yarn_applications
384
+
385
+ # A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/
386
+ # 0.9.0/python-programming-guide.html) applications on YARN.
387
+ # Corresponds to the JSON property `pysparkJob`
388
+ # @return [Google::Apis::DataprocV1::PySparkJob]
389
+ attr_accessor :pyspark_job
390
+
391
+ # Encapsulates the full scoping used to reference a job.
392
+ # Corresponds to the JSON property `reference`
393
+ # @return [Google::Apis::DataprocV1::JobReference]
394
+ attr_accessor :reference
395
+
396
+ # A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.
397
+ # apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/
398
+ # MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/
399
+ # docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
400
+ # Corresponds to the JSON property `hadoopJob`
401
+ # @return [Google::Apis::DataprocV1::HadoopJob]
402
+ attr_accessor :hadoop_job
317
403
 
318
404
  def initialize(**args)
319
405
  update!(**args)
@@ -321,59 +407,121 @@ module Google
321
407
 
322
408
  # Update properties of this object
323
409
  def update!(**args)
324
- @instance_template_name = args[:instance_template_name] if args.key?(:instance_template_name)
325
- @instance_group_manager_name = args[:instance_group_manager_name] if args.key?(:instance_group_manager_name)
410
+ @status = args[:status] if args.key?(:status)
411
+ @placement = args[:placement] if args.key?(:placement)
412
+ @driver_control_files_uri = args[:driver_control_files_uri] if args.key?(:driver_control_files_uri)
413
+ @scheduling = args[:scheduling] if args.key?(:scheduling)
414
+ @pig_job = args[:pig_job] if args.key?(:pig_job)
415
+ @hive_job = args[:hive_job] if args.key?(:hive_job)
416
+ @labels = args[:labels] if args.key?(:labels)
417
+ @driver_output_resource_uri = args[:driver_output_resource_uri] if args.key?(:driver_output_resource_uri)
418
+ @spark_job = args[:spark_job] if args.key?(:spark_job)
419
+ @spark_sql_job = args[:spark_sql_job] if args.key?(:spark_sql_job)
420
+ @status_history = args[:status_history] if args.key?(:status_history)
421
+ @yarn_applications = args[:yarn_applications] if args.key?(:yarn_applications)
422
+ @pyspark_job = args[:pyspark_job] if args.key?(:pyspark_job)
423
+ @reference = args[:reference] if args.key?(:reference)
424
+ @hadoop_job = args[:hadoop_job] if args.key?(:hadoop_job)
326
425
  end
327
426
  end
328
427
 
329
- # Specifies the selection and config of software inside the cluster.
330
- class SoftwareConfig
428
+ # A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/)
429
+ # applications on YARN.
430
+ class SparkJob
331
431
  include Google::Apis::Core::Hashable
332
432
 
333
- # [Optional] The version of software inside the cluster. It must match the
334
- # regular expression `[0-9]+\.[0-9]+`. If unspecified, it defaults to the latest
335
- # version (see [Cloud Dataproc Versioning](/dataproc/versioning)).
336
- # Corresponds to the JSON property `imageVersion`
337
- # @return [String]
338
- attr_accessor :image_version
433
+ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver
434
+ # and tasks.
435
+ # Corresponds to the JSON property `jarFileUris`
436
+ # @return [Array<String>]
437
+ attr_accessor :jar_file_uris
438
+
439
+ # The runtime logging config of the job.
440
+ # Corresponds to the JSON property `loggingConfig`
441
+ # @return [Google::Apis::DataprocV1::LoggingConfig]
442
+ attr_accessor :logging_config
339
443
 
340
- # [Optional] The properties to set on daemon config files. Property keys are
341
- # specified in `prefix:property` format, such as `core:fs.defaultFS`. The
342
- # following are supported prefixes and their mappings: * core: `core-site.xml` *
343
- # hdfs: `hdfs-site.xml` * mapred: `mapred-site.xml` * yarn: `yarn-site.xml` *
344
- # hive: `hive-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf`
444
+ # Optional A mapping of property names to values, used to configure Spark.
445
+ # Properties that conflict with values set by the Cloud Dataproc API may be
446
+ # overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf
447
+ # and classes in user code.
345
448
  # Corresponds to the JSON property `properties`
346
449
  # @return [Hash<String,String>]
347
450
  attr_accessor :properties
348
451
 
452
+ # Optional The arguments to pass to the driver. Do not include arguments, such
453
+ # as --conf, that can be set as job properties, since a collision may occur that
454
+ # causes an incorrect job submission.
455
+ # Corresponds to the JSON property `args`
456
+ # @return [Array<String>]
457
+ attr_accessor :args
458
+
459
+ # Optional HCFS URIs of files to be copied to the working directory of Spark
460
+ # drivers and distributed tasks. Useful for naively parallel tasks.
461
+ # Corresponds to the JSON property `fileUris`
462
+ # @return [Array<String>]
463
+ attr_accessor :file_uris
464
+
465
+ # The name of the driver's main class. The jar file that contains the class must
466
+ # be in the default CLASSPATH or specified in jar_file_uris.
467
+ # Corresponds to the JSON property `mainClass`
468
+ # @return [String]
469
+ attr_accessor :main_class
470
+
471
+ # Optional HCFS URIs of archives to be extracted in the working directory of
472
+ # Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .
473
+ # zip.
474
+ # Corresponds to the JSON property `archiveUris`
475
+ # @return [Array<String>]
476
+ attr_accessor :archive_uris
477
+
478
+ # The HCFS URI of the jar file that contains the main class.
479
+ # Corresponds to the JSON property `mainJarFileUri`
480
+ # @return [String]
481
+ attr_accessor :main_jar_file_uri
482
+
349
483
  def initialize(**args)
350
484
  update!(**args)
351
485
  end
352
486
 
353
487
  # Update properties of this object
354
488
  def update!(**args)
355
- @image_version = args[:image_version] if args.key?(:image_version)
489
+ @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
490
+ @logging_config = args[:logging_config] if args.key?(:logging_config)
356
491
  @properties = args[:properties] if args.key?(:properties)
492
+ @args = args[:args] if args.key?(:args)
493
+ @file_uris = args[:file_uris] if args.key?(:file_uris)
494
+ @main_class = args[:main_class] if args.key?(:main_class)
495
+ @archive_uris = args[:archive_uris] if args.key?(:archive_uris)
496
+ @main_jar_file_uri = args[:main_jar_file_uri] if args.key?(:main_jar_file_uri)
357
497
  end
358
498
  end
359
499
 
360
- # Specifies an executable to run on a fully configured node and a timeout period
361
- # for executable completion.
362
- class NodeInitializationAction
500
+ # Cloud Dataproc job status.
501
+ class JobStatus
363
502
  include Google::Apis::Core::Hashable
364
503
 
365
- # [Required] Google Cloud Storage URI of executable file.
366
- # Corresponds to the JSON property `executableFile`
504
+ # Output-only A state message specifying the overall job state.
505
+ # Corresponds to the JSON property `state`
367
506
  # @return [String]
368
- attr_accessor :executable_file
507
+ attr_accessor :state
369
508
 
370
- # [Optional] Amount of time executable has to complete. Default is 10 minutes.
371
- # Cluster creation fails with an explanatory error message (the name of the
372
- # executable that caused the error and the exceeded timeout period) if the
373
- # executable is not completed at end of the timeout period.
374
- # Corresponds to the JSON property `executionTimeout`
509
+ # Output-only Optional job state details, such as an error description if the
510
+ # state is <code>ERROR</code>.
511
+ # Corresponds to the JSON property `details`
375
512
  # @return [String]
376
- attr_accessor :execution_timeout
513
+ attr_accessor :details
514
+
515
+ # Output-only The time when this state was entered.
516
+ # Corresponds to the JSON property `stateStartTime`
517
+ # @return [String]
518
+ attr_accessor :state_start_time
519
+
520
+ # Output-only Additional state information, which includes status reported by
521
+ # the agent.
522
+ # Corresponds to the JSON property `substate`
523
+ # @return [String]
524
+ attr_accessor :substate
377
525
 
378
526
  def initialize(**args)
379
527
  update!(**args)
@@ -381,115 +529,135 @@ module Google
381
529
 
382
530
  # Update properties of this object
383
531
  def update!(**args)
384
- @executable_file = args[:executable_file] if args.key?(:executable_file)
385
- @execution_timeout = args[:execution_timeout] if args.key?(:execution_timeout)
532
+ @state = args[:state] if args.key?(:state)
533
+ @details = args[:details] if args.key?(:details)
534
+ @state_start_time = args[:state_start_time] if args.key?(:state_start_time)
535
+ @substate = args[:substate] if args.key?(:substate)
386
536
  end
387
537
  end
388
538
 
389
- # The status of a cluster and its instances.
390
- class ClusterStatus
539
+ # Specifies the resources used to actively manage an instance group.
540
+ class ManagedGroupConfig
391
541
  include Google::Apis::Core::Hashable
392
542
 
393
- # The cluster's state.
394
- # Corresponds to the JSON property `state`
543
+ # Output-only The name of the Instance Group Manager for this group.
544
+ # Corresponds to the JSON property `instanceGroupManagerName`
395
545
  # @return [String]
396
- attr_accessor :state
546
+ attr_accessor :instance_group_manager_name
397
547
 
398
- # Optional details of cluster's state.
399
- # Corresponds to the JSON property `detail`
548
+ # Output-only The name of the Instance Template used for the Managed Instance
549
+ # Group.
550
+ # Corresponds to the JSON property `instanceTemplateName`
400
551
  # @return [String]
401
- attr_accessor :detail
552
+ attr_accessor :instance_template_name
553
+
554
+ def initialize(**args)
555
+ update!(**args)
556
+ end
557
+
558
+ # Update properties of this object
559
+ def update!(**args)
560
+ @instance_group_manager_name = args[:instance_group_manager_name] if args.key?(:instance_group_manager_name)
561
+ @instance_template_name = args[:instance_template_name] if args.key?(:instance_template_name)
562
+ end
563
+ end
564
+
565
+ # The status of the operation.
566
+ class ClusterOperationStatus
567
+ include Google::Apis::Core::Hashable
402
568
 
403
- # Time when this state was entered.
569
+ # Output-only A message containing the detailed operation state.
570
+ # Corresponds to the JSON property `innerState`
571
+ # @return [String]
572
+ attr_accessor :inner_state
573
+
574
+ # Output-only The time this state was entered.
404
575
  # Corresponds to the JSON property `stateStartTime`
405
576
  # @return [String]
406
577
  attr_accessor :state_start_time
407
578
 
579
+ # Output-only A message containing the operation state.
580
+ # Corresponds to the JSON property `state`
581
+ # @return [String]
582
+ attr_accessor :state
583
+
584
+ # Output-onlyA message containing any operation metadata details.
585
+ # Corresponds to the JSON property `details`
586
+ # @return [String]
587
+ attr_accessor :details
588
+
408
589
  def initialize(**args)
409
590
  update!(**args)
410
591
  end
411
592
 
412
593
  # Update properties of this object
413
594
  def update!(**args)
414
- @state = args[:state] if args.key?(:state)
415
- @detail = args[:detail] if args.key?(:detail)
595
+ @inner_state = args[:inner_state] if args.key?(:inner_state)
416
596
  @state_start_time = args[:state_start_time] if args.key?(:state_start_time)
597
+ @state = args[:state] if args.key?(:state)
598
+ @details = args[:details] if args.key?(:details)
417
599
  end
418
600
  end
419
601
 
420
- # This resource represents a long-running operation that is the result of a
421
- # network API call.
422
- class Operation
602
+ # A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.
603
+ # apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/
604
+ # MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/
605
+ # docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
606
+ class HadoopJob
423
607
  include Google::Apis::Core::Hashable
424
608
 
425
- # The server-assigned name, which is only unique within the same service that
426
- # originally returns it. If you use the default HTTP mapping, the `name` should
427
- # have the format of `operations/some/unique/name`.
428
- # Corresponds to the JSON property `name`
609
+ # The name of the driver's main class. The jar file containing the class must be
610
+ # in the default CLASSPATH or specified in jar_file_uris.
611
+ # Corresponds to the JSON property `mainClass`
429
612
  # @return [String]
430
- attr_accessor :name
613
+ attr_accessor :main_class
431
614
 
432
- # Service-specific metadata associated with the operation. It typically contains
433
- # progress information and common metadata such as create time. Some services
434
- # might not provide such metadata. Any method that returns a long-running
435
- # operation should document the metadata type, if any.
436
- # Corresponds to the JSON property `metadata`
437
- # @return [Hash<String,Object>]
438
- attr_accessor :metadata
615
+ # Optional HCFS URIs of archives to be extracted in the working directory of
616
+ # Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .
617
+ # zip.
618
+ # Corresponds to the JSON property `archiveUris`
619
+ # @return [Array<String>]
620
+ attr_accessor :archive_uris
621
+
622
+ # The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-
623
+ # bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-
624
+ # samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-
625
+ # mapreduce-examples.jar'
626
+ # Corresponds to the JSON property `mainJarFileUri`
627
+ # @return [String]
628
+ attr_accessor :main_jar_file_uri
629
+
630
+ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
631
+ # Corresponds to the JSON property `jarFileUris`
632
+ # @return [Array<String>]
633
+ attr_accessor :jar_file_uris
439
634
 
440
- # If the value is `false`, it means the operation is still in progress. If true,
441
- # the operation is completed, and either `error` or `response` is available.
442
- # Corresponds to the JSON property `done`
443
- # @return [Boolean]
444
- attr_accessor :done
445
- alias_method :done?, :done
635
+ # The runtime logging config of the job.
636
+ # Corresponds to the JSON property `loggingConfig`
637
+ # @return [Google::Apis::DataprocV1::LoggingConfig]
638
+ attr_accessor :logging_config
446
639
 
447
- # The `Status` type defines a logical error model that is suitable for different
448
- # programming environments, including REST APIs and RPC APIs. It is used by [
449
- # gRPC](https://github.com/grpc). The error model is designed to be: - Simple to
450
- # use and understand for most users - Flexible enough to meet unexpected needs #
451
- # Overview The `Status` message contains three pieces of data: error code, error
452
- # message, and error details. The error code should be an enum value of google.
453
- # rpc.Code, but it may accept additional error codes if needed. The error
454
- # message should be a developer-facing English message that helps developers *
455
- # understand* and *resolve* the error. If a localized user-facing error message
456
- # is needed, put the localized message in the error details or localize it in
457
- # the client. The optional error details may contain arbitrary information about
458
- # the error. There is a predefined set of error detail types in the package `
459
- # google.rpc` which can be used for common error conditions. # Language mapping
460
- # The `Status` message is the logical representation of the error model, but it
461
- # is not necessarily the actual wire format. When the `Status` message is
462
- # exposed in different client libraries and different wire protocols, it can be
463
- # mapped differently. For example, it will likely be mapped to some exceptions
464
- # in Java, but more likely mapped to some error codes in C. # Other uses The
465
- # error model and the `Status` message can be used in a variety of environments,
466
- # either with or without APIs, to provide a consistent developer experience
467
- # across different environments. Example uses of this error model include: -
468
- # Partial errors. If a service needs to return partial errors to the client, it
469
- # may embed the `Status` in the normal response to indicate the partial errors. -
470
- # Workflow errors. A typical workflow has multiple steps. Each step may have a `
471
- # Status` message for error reporting purpose. - Batch operations. If a client
472
- # uses batch request and batch response, the `Status` message should be used
473
- # directly inside batch response, one for each error sub-response. -
474
- # Asynchronous operations. If an API call embeds asynchronous operation results
475
- # in its response, the status of those operations should be represented directly
476
- # using the `Status` message. - Logging. If some API errors are stored in logs,
477
- # the message `Status` could be used directly after any stripping needed for
478
- # security/privacy reasons.
479
- # Corresponds to the JSON property `error`
480
- # @return [Google::Apis::DataprocV1::Status]
481
- attr_accessor :error
640
+ # Optional A mapping of property names to values, used to configure Hadoop.
641
+ # Properties that conflict with values set by the Cloud Dataproc API may be
642
+ # overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes
643
+ # in user code.
644
+ # Corresponds to the JSON property `properties`
645
+ # @return [Hash<String,String>]
646
+ attr_accessor :properties
482
647
 
483
- # The normal response of the operation in case of success. If the original
484
- # method returns no data on success, such as `Delete`, the response is `google.
485
- # protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`,
486
- # the response should be the resource. For other methods, the response should
487
- # have the type `XxxResponse`, where `Xxx` is the original method name. For
488
- # example, if the original method name is `TakeSnapshot()`, the inferred
489
- # response type is `TakeSnapshotResponse`.
490
- # Corresponds to the JSON property `response`
491
- # @return [Hash<String,Object>]
492
- attr_accessor :response
648
+ # Optional The arguments to pass to the driver. Do not include arguments, such
649
+ # as -libjars or -Dfoo=bar, that can be set as job properties, since a collision
650
+ # may occur that causes an incorrect job submission.
651
+ # Corresponds to the JSON property `args`
652
+ # @return [Array<String>]
653
+ attr_accessor :args
654
+
655
+ # Optional HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the
656
+ # working directory of Hadoop drivers and distributed tasks. Useful for naively
657
+ # parallel tasks.
658
+ # Corresponds to the JSON property `fileUris`
659
+ # @return [Array<String>]
660
+ attr_accessor :file_uris
493
661
 
494
662
  def initialize(**args)
495
663
  update!(**args)
@@ -497,66 +665,37 @@ module Google
497
665
 
498
666
  # Update properties of this object
499
667
  def update!(**args)
500
- @name = args[:name] if args.key?(:name)
501
- @metadata = args[:metadata] if args.key?(:metadata)
502
- @done = args[:done] if args.key?(:done)
503
- @error = args[:error] if args.key?(:error)
504
- @response = args[:response] if args.key?(:response)
668
+ @main_class = args[:main_class] if args.key?(:main_class)
669
+ @archive_uris = args[:archive_uris] if args.key?(:archive_uris)
670
+ @main_jar_file_uri = args[:main_jar_file_uri] if args.key?(:main_jar_file_uri)
671
+ @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
672
+ @logging_config = args[:logging_config] if args.key?(:logging_config)
673
+ @properties = args[:properties] if args.key?(:properties)
674
+ @args = args[:args] if args.key?(:args)
675
+ @file_uris = args[:file_uris] if args.key?(:file_uris)
505
676
  end
506
677
  end
507
678
 
508
- # The `Status` type defines a logical error model that is suitable for different
509
- # programming environments, including REST APIs and RPC APIs. It is used by [
510
- # gRPC](https://github.com/grpc). The error model is designed to be: - Simple to
511
- # use and understand for most users - Flexible enough to meet unexpected needs #
512
- # Overview The `Status` message contains three pieces of data: error code, error
513
- # message, and error details. The error code should be an enum value of google.
514
- # rpc.Code, but it may accept additional error codes if needed. The error
515
- # message should be a developer-facing English message that helps developers *
516
- # understand* and *resolve* the error. If a localized user-facing error message
517
- # is needed, put the localized message in the error details or localize it in
518
- # the client. The optional error details may contain arbitrary information about
519
- # the error. There is a predefined set of error detail types in the package `
520
- # google.rpc` which can be used for common error conditions. # Language mapping
521
- # The `Status` message is the logical representation of the error model, but it
522
- # is not necessarily the actual wire format. When the `Status` message is
523
- # exposed in different client libraries and different wire protocols, it can be
524
- # mapped differently. For example, it will likely be mapped to some exceptions
525
- # in Java, but more likely mapped to some error codes in C. # Other uses The
526
- # error model and the `Status` message can be used in a variety of environments,
527
- # either with or without APIs, to provide a consistent developer experience
528
- # across different environments. Example uses of this error model include: -
529
- # Partial errors. If a service needs to return partial errors to the client, it
530
- # may embed the `Status` in the normal response to indicate the partial errors. -
531
- # Workflow errors. A typical workflow has multiple steps. Each step may have a `
532
- # Status` message for error reporting purpose. - Batch operations. If a client
533
- # uses batch request and batch response, the `Status` message should be used
534
- # directly inside batch response, one for each error sub-response. -
535
- # Asynchronous operations. If an API call embeds asynchronous operation results
536
- # in its response, the status of those operations should be represented directly
537
- # using the `Status` message. - Logging. If some API errors are stored in logs,
538
- # the message `Status` could be used directly after any stripping needed for
539
- # security/privacy reasons.
540
- class Status
679
+ # A list of queries to run on a cluster.
680
+ class QueryList
541
681
  include Google::Apis::Core::Hashable
542
682
 
543
- # The status code, which should be an enum value of google.rpc.Code.
544
- # Corresponds to the JSON property `code`
545
- # @return [Fixnum]
546
- attr_accessor :code
547
-
548
- # A developer-facing error message, which should be in English. Any user-facing
549
- # error message should be localized and sent in the google.rpc.Status.details
550
- # field, or localized by the client.
551
- # Corresponds to the JSON property `message`
552
- # @return [String]
553
- attr_accessor :message
554
-
555
- # A list of messages that carry the error details. There will be a common set of
556
- # message types for APIs to use.
557
- # Corresponds to the JSON property `details`
558
- # @return [Array<Hash<String,Object>>]
559
- attr_accessor :details
683
+ # Required The queries to execute. You do not need to terminate a query with a
684
+ # semicolon. Multiple queries can be specified in one string by separating each
685
+ # with a semicolon. Here is an example of an Cloud Dataproc API snippet that
686
+ # uses a QueryList to specify a HiveJob:
687
+ # "hiveJob": `
688
+ # "queryList": `
689
+ # "queries": [
690
+ # "query1",
691
+ # "query2",
692
+ # "query3;query4",
693
+ # ]
694
+ # `
695
+ # `
696
+ # Corresponds to the JSON property `queries`
697
+ # @return [Array<String>]
698
+ attr_accessor :queries
560
699
 
561
700
  def initialize(**args)
562
701
  update!(**args)
@@ -564,27 +703,39 @@ module Google
564
703
 
565
704
  # Update properties of this object
566
705
  def update!(**args)
567
- @code = args[:code] if args.key?(:code)
568
- @message = args[:message] if args.key?(:message)
569
- @details = args[:details] if args.key?(:details)
706
+ @queries = args[:queries] if args.key?(:queries)
570
707
  end
571
708
  end
572
709
 
573
- # The list of all clusters in a project.
574
- class ListClustersResponse
710
+ # A YARN application created by a job. Application information is a subset of <
711
+ # code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.
712
+ # Beta Feature: This report is available for testing purposes only. It may be
713
+ # changed before final release.
714
+ class YarnApplication
575
715
  include Google::Apis::Core::Hashable
576
716
 
577
- # [Output-only] The clusters in the project.
578
- # Corresponds to the JSON property `clusters`
579
- # @return [Array<Google::Apis::DataprocV1::Cluster>]
580
- attr_accessor :clusters
717
+ # Optional The HTTP URL of the ApplicationMaster, HistoryServer, or
718
+ # TimelineServer that provides application-specific information. The URL uses
719
+ # the internal hostname, and requires a proxy server for resolution and,
720
+ # possibly, access.
721
+ # Corresponds to the JSON property `trackingUrl`
722
+ # @return [String]
723
+ attr_accessor :tracking_url
581
724
 
582
- # [Optional] This token is included in the response if there are more results to
583
- # fetch. To fetch additional results, provide this value as the `page_token` in
584
- # a subsequent ListClustersRequest.
585
- # Corresponds to the JSON property `nextPageToken`
725
+ # Required The numerical progress of the application, from 1 to 100.
726
+ # Corresponds to the JSON property `progress`
727
+ # @return [Float]
728
+ attr_accessor :progress
729
+
730
+ # Required The application state.
731
+ # Corresponds to the JSON property `state`
586
732
  # @return [String]
587
- attr_accessor :next_page_token
733
+ attr_accessor :state
734
+
735
+ # Required The application name.
736
+ # Corresponds to the JSON property `name`
737
+ # @return [String]
738
+ attr_accessor :name
588
739
 
589
740
  def initialize(**args)
590
741
  update!(**args)
@@ -592,8 +743,10 @@ module Google
592
743
 
593
744
  # Update properties of this object
594
745
  def update!(**args)
595
- @clusters = args[:clusters] if args.key?(:clusters)
596
- @next_page_token = args[:next_page_token] if args.key?(:next_page_token)
746
+ @tracking_url = args[:tracking_url] if args.key?(:tracking_url)
747
+ @progress = args[:progress] if args.key?(:progress)
748
+ @state = args[:state] if args.key?(:state)
749
+ @name = args[:name] if args.key?(:name)
597
750
  end
598
751
  end
599
752
 
@@ -610,14 +763,23 @@ module Google
610
763
  end
611
764
  end
612
765
 
613
- # A request to submit a job.
614
- class SubmitJobRequest
766
+ # Specifies the config of disk options for a group of VM instances.
767
+ class DiskConfig
615
768
  include Google::Apis::Core::Hashable
616
769
 
617
- # A Cloud Dataproc job resource.
618
- # Corresponds to the JSON property `job`
619
- # @return [Google::Apis::DataprocV1::Job]
620
- attr_accessor :job
770
+ # Optional Size in GB of the boot disk (default is 500GB).
771
+ # Corresponds to the JSON property `bootDiskSizeGb`
772
+ # @return [Fixnum]
773
+ attr_accessor :boot_disk_size_gb
774
+
775
+ # Optional Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not
776
+ # attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.
777
+ # apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are
778
+ # attached, this runtime bulk data is spread across them, and the boot disk
779
+ # contains only basic config and installed binaries.
780
+ # Corresponds to the JSON property `numLocalSsds`
781
+ # @return [Fixnum]
782
+ attr_accessor :num_local_ssds
621
783
 
622
784
  def initialize(**args)
623
785
  update!(**args)
@@ -625,76 +787,54 @@ module Google
625
787
 
626
788
  # Update properties of this object
627
789
  def update!(**args)
628
- @job = args[:job] if args.key?(:job)
790
+ @boot_disk_size_gb = args[:boot_disk_size_gb] if args.key?(:boot_disk_size_gb)
791
+ @num_local_ssds = args[:num_local_ssds] if args.key?(:num_local_ssds)
629
792
  end
630
793
  end
631
794
 
632
- # A Cloud Dataproc job resource.
633
- class Job
795
+ # Metadata describing the operation.
796
+ class ClusterOperationMetadata
634
797
  include Google::Apis::Core::Hashable
635
798
 
636
- # Encapsulates the full scoping used to reference a job.
637
- # Corresponds to the JSON property `reference`
638
- # @return [Google::Apis::DataprocV1::JobReference]
639
- attr_accessor :reference
640
-
641
- # Cloud Dataproc job config.
642
- # Corresponds to the JSON property `placement`
643
- # @return [Google::Apis::DataprocV1::JobPlacement]
644
- attr_accessor :placement
645
-
646
- # A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN.
647
- # Corresponds to the JSON property `hadoopJob`
648
- # @return [Google::Apis::DataprocV1::HadoopJob]
649
- attr_accessor :hadoop_job
650
-
651
- # A Cloud Dataproc job for running Spark applications on YARN.
652
- # Corresponds to the JSON property `sparkJob`
653
- # @return [Google::Apis::DataprocV1::SparkJob]
654
- attr_accessor :spark_job
655
-
656
- # A Cloud Dataproc job for running PySpark applications on YARN.
657
- # Corresponds to the JSON property `pysparkJob`
658
- # @return [Google::Apis::DataprocV1::PySparkJob]
659
- attr_accessor :pyspark_job
799
+ # Output-only The operation type.
800
+ # Corresponds to the JSON property `operationType`
801
+ # @return [String]
802
+ attr_accessor :operation_type
660
803
 
661
- # A Cloud Dataproc job for running Hive queries on YARN.
662
- # Corresponds to the JSON property `hiveJob`
663
- # @return [Google::Apis::DataprocV1::HiveJob]
664
- attr_accessor :hive_job
804
+ # Output-only Short description of operation.
805
+ # Corresponds to the JSON property `description`
806
+ # @return [String]
807
+ attr_accessor :description
665
808
 
666
- # A Cloud Dataproc job for running Pig queries on YARN.
667
- # Corresponds to the JSON property `pigJob`
668
- # @return [Google::Apis::DataprocV1::PigJob]
669
- attr_accessor :pig_job
809
+ # Output-only Errors encountered during operation execution.
810
+ # Corresponds to the JSON property `warnings`
811
+ # @return [Array<String>]
812
+ attr_accessor :warnings
670
813
 
671
- # A Cloud Dataproc job for running Spark SQL queries.
672
- # Corresponds to the JSON property `sparkSqlJob`
673
- # @return [Google::Apis::DataprocV1::SparkSqlJob]
674
- attr_accessor :spark_sql_job
814
+ # Output-only Labels associated with the operation
815
+ # Corresponds to the JSON property `labels`
816
+ # @return [Hash<String,String>]
817
+ attr_accessor :labels
675
818
 
676
- # Cloud Dataproc job status.
819
+ # The status of the operation.
677
820
  # Corresponds to the JSON property `status`
678
- # @return [Google::Apis::DataprocV1::JobStatus]
821
+ # @return [Google::Apis::DataprocV1::ClusterOperationStatus]
679
822
  attr_accessor :status
680
823
 
681
- # [Output-only] The previous job status.
824
+ # Output-only The previous operation status.
682
825
  # Corresponds to the JSON property `statusHistory`
683
- # @return [Array<Google::Apis::DataprocV1::JobStatus>]
826
+ # @return [Array<Google::Apis::DataprocV1::ClusterOperationStatus>]
684
827
  attr_accessor :status_history
685
828
 
686
- # [Output-only] A URI pointing to the location of the stdout of the job's driver
687
- # program.
688
- # Corresponds to the JSON property `driverOutputResourceUri`
829
+ # Output-only Cluster UUID for the operation.
830
+ # Corresponds to the JSON property `clusterUuid`
689
831
  # @return [String]
690
- attr_accessor :driver_output_resource_uri
832
+ attr_accessor :cluster_uuid
691
833
 
692
- # [Output-only] If present, the location of miscellaneous control files which
693
- # may be used as part of job setup and handling. If not present, control files
694
- # may be placed in the same location as `driver_output_uri`.
695
- # Corresponds to the JSON property `driverControlFilesUri`
834
+ # Output-only Name of the cluster for the operation.
835
+ # Corresponds to the JSON property `clusterName`
696
836
  # @return [String]
697
- attr_accessor :driver_control_files_uri
837
+ attr_accessor :cluster_name
698
838
 
699
839
  def initialize(**args)
700
840
  update!(**args)
@@ -702,133 +842,78 @@ module Google
702
842
 
703
843
  # Update properties of this object
704
844
  def update!(**args)
705
- @reference = args[:reference] if args.key?(:reference)
706
- @placement = args[:placement] if args.key?(:placement)
707
- @hadoop_job = args[:hadoop_job] if args.key?(:hadoop_job)
708
- @spark_job = args[:spark_job] if args.key?(:spark_job)
709
- @pyspark_job = args[:pyspark_job] if args.key?(:pyspark_job)
710
- @hive_job = args[:hive_job] if args.key?(:hive_job)
711
- @pig_job = args[:pig_job] if args.key?(:pig_job)
712
- @spark_sql_job = args[:spark_sql_job] if args.key?(:spark_sql_job)
845
+ @operation_type = args[:operation_type] if args.key?(:operation_type)
846
+ @description = args[:description] if args.key?(:description)
847
+ @warnings = args[:warnings] if args.key?(:warnings)
848
+ @labels = args[:labels] if args.key?(:labels)
713
849
  @status = args[:status] if args.key?(:status)
714
850
  @status_history = args[:status_history] if args.key?(:status_history)
715
- @driver_output_resource_uri = args[:driver_output_resource_uri] if args.key?(:driver_output_resource_uri)
716
- @driver_control_files_uri = args[:driver_control_files_uri] if args.key?(:driver_control_files_uri)
717
- end
718
- end
719
-
720
- # Encapsulates the full scoping used to reference a job.
721
- class JobReference
722
- include Google::Apis::Core::Hashable
723
-
724
- # [Required] The ID of the Google Cloud Platform project that the job belongs to.
725
- # Corresponds to the JSON property `projectId`
726
- # @return [String]
727
- attr_accessor :project_id
728
-
729
- # [Required] The job ID, which must be unique within the project. The job ID is
730
- # generated by the server upon job submission or provided by the user as a means
731
- # to perform retries without creating duplicate jobs. The ID must contain only
732
- # letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The
733
- # maximum length is 512 characters.
734
- # Corresponds to the JSON property `jobId`
735
- # @return [String]
736
- attr_accessor :job_id
737
-
738
- def initialize(**args)
739
- update!(**args)
740
- end
741
-
742
- # Update properties of this object
743
- def update!(**args)
744
- @project_id = args[:project_id] if args.key?(:project_id)
745
- @job_id = args[:job_id] if args.key?(:job_id)
851
+ @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
852
+ @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
746
853
  end
747
854
  end
748
855
 
749
- # Cloud Dataproc job config.
750
- class JobPlacement
856
+ # A generic empty message that you can re-use to avoid defining duplicated empty
857
+ # messages in your APIs. A typical example is to use it as the request or the
858
+ # response type of an API method. For instance:
859
+ # service Foo `
860
+ # rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
861
+ # `
862
+ # The JSON representation for Empty is empty JSON object ``.
863
+ class Empty
751
864
  include Google::Apis::Core::Hashable
752
865
 
753
- # [Required] The name of the cluster where the job will be submitted.
754
- # Corresponds to the JSON property `clusterName`
755
- # @return [String]
756
- attr_accessor :cluster_name
757
-
758
- # [Output-only] A cluster UUID generated by the Cloud Dataproc service when the
759
- # job is submitted.
760
- # Corresponds to the JSON property `clusterUuid`
761
- # @return [String]
762
- attr_accessor :cluster_uuid
763
-
764
866
  def initialize(**args)
765
867
  update!(**args)
766
868
  end
767
869
 
768
870
  # Update properties of this object
769
871
  def update!(**args)
770
- @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
771
- @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
772
872
  end
773
873
  end
774
874
 
775
- # A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN.
776
- class HadoopJob
875
+ # A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/)
876
+ # queries on YARN.
877
+ class HiveJob
777
878
  include Google::Apis::Core::Hashable
778
879
 
779
- # The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-
780
- # bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-
781
- # samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-
782
- # mapreduce-examples.jar'
783
- # Corresponds to the JSON property `mainJarFileUri`
784
- # @return [String]
785
- attr_accessor :main_jar_file_uri
786
-
787
- # The name of the driver's main class. The jar file containing the class must be
788
- # in the default CLASSPATH or specified in `jar_file_uris`.
789
- # Corresponds to the JSON property `mainClass`
790
- # @return [String]
791
- attr_accessor :main_class
792
-
793
- # [Optional] The arguments to pass to the driver. Do not include arguments, such
794
- # as `-libjars` or `-Dfoo=bar`, that can be set as job properties, since a
795
- # collision may occur that causes an incorrect job submission.
796
- # Corresponds to the JSON property `args`
797
- # @return [Array<String>]
798
- attr_accessor :args
799
-
800
- # [Optional] Jar file URIs to add to the CLASSPATHs of the Hadoop driver and
801
- # tasks.
880
+ # Optional HCFS URIs of jar files to add to the CLASSPATH of the Hive server and
881
+ # Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.
802
882
  # Corresponds to the JSON property `jarFileUris`
803
883
  # @return [Array<String>]
804
- attr_accessor :jar_file_uris
805
-
806
- # [Optional] HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to
807
- # the working directory of Hadoop drivers and distributed tasks. Useful for
808
- # naively parallel tasks.
809
- # Corresponds to the JSON property `fileUris`
810
- # @return [Array<String>]
811
- attr_accessor :file_uris
812
-
813
- # [Optional] HCFS URIs of archives to be extracted in the working directory of
814
- # Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .
815
- # zip.
816
- # Corresponds to the JSON property `archiveUris`
817
- # @return [Array<String>]
818
- attr_accessor :archive_uris
884
+ attr_accessor :jar_file_uris
885
+
886
+ # Optional Mapping of query variable names to values (equivalent to the Hive
887
+ # command: SET name="value";).
888
+ # Corresponds to the JSON property `scriptVariables`
889
+ # @return [Hash<String,String>]
890
+ attr_accessor :script_variables
819
891
 
820
- # [Optional] A mapping of property names to values, used to configure Hadoop.
892
+ # Optional A mapping of property names and values, used to configure Hive.
821
893
  # Properties that conflict with values set by the Cloud Dataproc API may be
822
- # overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes
823
- # in user code.
894
+ # overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/
895
+ # hive/conf/hive-site.xml, and classes in user code.
824
896
  # Corresponds to the JSON property `properties`
825
897
  # @return [Hash<String,String>]
826
898
  attr_accessor :properties
827
899
 
828
- # The runtime logging config of the job.
829
- # Corresponds to the JSON property `loggingConfig`
830
- # @return [Google::Apis::DataprocV1::LoggingConfig]
831
- attr_accessor :logging_config
900
+ # Optional Whether to continue executing queries if a query fails. The default
901
+ # value is false. Setting to true can be useful when executing independent
902
+ # parallel queries.
903
+ # Corresponds to the JSON property `continueOnFailure`
904
+ # @return [Boolean]
905
+ attr_accessor :continue_on_failure
906
+ alias_method :continue_on_failure?, :continue_on_failure
907
+
908
+ # The HCFS URI of the script that contains Hive queries.
909
+ # Corresponds to the JSON property `queryFileUri`
910
+ # @return [String]
911
+ attr_accessor :query_file_uri
912
+
913
+ # A list of queries to run on a cluster.
914
+ # Corresponds to the JSON property `queryList`
915
+ # @return [Google::Apis::DataprocV1::QueryList]
916
+ attr_accessor :query_list
832
917
 
833
918
  def initialize(**args)
834
919
  update!(**args)
@@ -836,27 +921,24 @@ module Google
836
921
 
837
922
  # Update properties of this object
838
923
  def update!(**args)
839
- @main_jar_file_uri = args[:main_jar_file_uri] if args.key?(:main_jar_file_uri)
840
- @main_class = args[:main_class] if args.key?(:main_class)
841
- @args = args[:args] if args.key?(:args)
842
924
  @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
843
- @file_uris = args[:file_uris] if args.key?(:file_uris)
844
- @archive_uris = args[:archive_uris] if args.key?(:archive_uris)
925
+ @script_variables = args[:script_variables] if args.key?(:script_variables)
845
926
  @properties = args[:properties] if args.key?(:properties)
846
- @logging_config = args[:logging_config] if args.key?(:logging_config)
927
+ @continue_on_failure = args[:continue_on_failure] if args.key?(:continue_on_failure)
928
+ @query_file_uri = args[:query_file_uri] if args.key?(:query_file_uri)
929
+ @query_list = args[:query_list] if args.key?(:query_list)
847
930
  end
848
931
  end
849
932
 
850
- # The runtime logging config of the job.
851
- class LoggingConfig
933
+ # The location of diagnostic output.
934
+ class DiagnoseClusterResults
852
935
  include Google::Apis::Core::Hashable
853
936
 
854
- # The per-package log levels for the driver. This may include "root" package
855
- # name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', '
856
- # org.apache = DEBUG'
857
- # Corresponds to the JSON property `driverLogLevels`
858
- # @return [Hash<String,String>]
859
- attr_accessor :driver_log_levels
937
+ # Output-only The Google Cloud Storage URI of the diagnostic output. The output
938
+ # report is a plain text file with a summary of collected diagnostics.
939
+ # Corresponds to the JSON property `outputUri`
940
+ # @return [String]
941
+ attr_accessor :output_uri
860
942
 
861
943
  def initialize(**args)
862
944
  update!(**args)
@@ -864,63 +946,67 @@ module Google
864
946
 
865
947
  # Update properties of this object
866
948
  def update!(**args)
867
- @driver_log_levels = args[:driver_log_levels] if args.key?(:driver_log_levels)
949
+ @output_uri = args[:output_uri] if args.key?(:output_uri)
868
950
  end
869
951
  end
870
952
 
871
- # A Cloud Dataproc job for running Spark applications on YARN.
872
- class SparkJob
953
+ # The cluster config.
954
+ class ClusterConfig
873
955
  include Google::Apis::Core::Hashable
874
956
 
875
- # The HCFS URI of the jar file that contains the main class.
876
- # Corresponds to the JSON property `mainJarFileUri`
877
- # @return [String]
878
- attr_accessor :main_jar_file_uri
879
-
880
- # The name of the driver's main class. The jar file that contains the class must
881
- # be in the default CLASSPATH or specified in `jar_file_uris`.
882
- # Corresponds to the JSON property `mainClass`
883
- # @return [String]
884
- attr_accessor :main_class
957
+ # Optional The config settings for Google Compute Engine resources in an
958
+ # instance group, such as a master or worker group.
959
+ # Corresponds to the JSON property `masterConfig`
960
+ # @return [Google::Apis::DataprocV1::InstanceGroupConfig]
961
+ attr_accessor :master_config
885
962
 
886
- # [Optional] The arguments to pass to the driver. Do not include arguments, such
887
- # as `--conf`, that can be set as job properties, since a collision may occur
888
- # that causes an incorrect job submission.
889
- # Corresponds to the JSON property `args`
890
- # @return [Array<String>]
891
- attr_accessor :args
963
+ # Optional The config settings for Google Compute Engine resources in an
964
+ # instance group, such as a master or worker group.
965
+ # Corresponds to the JSON property `secondaryWorkerConfig`
966
+ # @return [Google::Apis::DataprocV1::InstanceGroupConfig]
967
+ attr_accessor :secondary_worker_config
892
968
 
893
- # [Optional] HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver
894
- # and tasks.
895
- # Corresponds to the JSON property `jarFileUris`
896
- # @return [Array<String>]
897
- attr_accessor :jar_file_uris
969
+ # Optional Commands to execute on each node after config is completed. By
970
+ # default, executables are run on master and all worker nodes. You can test a
971
+ # node's <code>role</code> metadata to run an executable on a master or worker
972
+ # node, as shown below using curl (you can also use wget):
973
+ # ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/
974
+ # instance/attributes/dataproc-role)
975
+ # if [[ "$`ROLE`" == 'Master' ]]; then
976
+ # ... master specific actions ...
977
+ # else
978
+ # ... worker specific actions ...
979
+ # fi
980
+ # Corresponds to the JSON property `initializationActions`
981
+ # @return [Array<Google::Apis::DataprocV1::NodeInitializationAction>]
982
+ attr_accessor :initialization_actions
898
983
 
899
- # [Optional] HCFS URIs of files to be copied to the working directory of Spark
900
- # drivers and distributed tasks. Useful for naively parallel tasks.
901
- # Corresponds to the JSON property `fileUris`
902
- # @return [Array<String>]
903
- attr_accessor :file_uris
984
+ # Optional A Google Cloud Storage staging bucket used for sharing generated SSH
985
+ # keys and config. If you do not specify a staging bucket, Cloud Dataproc will
986
+ # determine an appropriate Cloud Storage location (US, ASIA, or EU) for your
987
+ # cluster's staging bucket according to the Google Compute Engine zone where
988
+ # your cluster is deployed, and then it will create and manage this project-
989
+ # level, per-location bucket for you.
990
+ # Corresponds to the JSON property `configBucket`
991
+ # @return [String]
992
+ attr_accessor :config_bucket
904
993
 
905
- # [Optional] HCFS URIs of archives to be extracted in the working directory of
906
- # Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .
907
- # zip.
908
- # Corresponds to the JSON property `archiveUris`
909
- # @return [Array<String>]
910
- attr_accessor :archive_uris
994
+ # Optional The config settings for Google Compute Engine resources in an
995
+ # instance group, such as a master or worker group.
996
+ # Corresponds to the JSON property `workerConfig`
997
+ # @return [Google::Apis::DataprocV1::InstanceGroupConfig]
998
+ attr_accessor :worker_config
911
999
 
912
- # [Optional] A mapping of property names to values, used to configure Spark.
913
- # Properties that conflict with values set by the Cloud Dataproc API may be
914
- # overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf
915
- # and classes in user code.
916
- # Corresponds to the JSON property `properties`
917
- # @return [Hash<String,String>]
918
- attr_accessor :properties
1000
+ # Common config settings for resources of Google Compute Engine cluster
1001
+ # instances, applicable to all instances in the cluster.
1002
+ # Corresponds to the JSON property `gceClusterConfig`
1003
+ # @return [Google::Apis::DataprocV1::GceClusterConfig]
1004
+ attr_accessor :gce_cluster_config
919
1005
 
920
- # The runtime logging config of the job.
921
- # Corresponds to the JSON property `loggingConfig`
922
- # @return [Google::Apis::DataprocV1::LoggingConfig]
923
- attr_accessor :logging_config
1006
+ # Specifies the selection and config of software inside the cluster.
1007
+ # Corresponds to the JSON property `softwareConfig`
1008
+ # @return [Google::Apis::DataprocV1::SoftwareConfig]
1009
+ attr_accessor :software_config
924
1010
 
925
1011
  def initialize(**args)
926
1012
  update!(**args)
@@ -928,129 +1014,198 @@ module Google
928
1014
 
929
1015
  # Update properties of this object
930
1016
  def update!(**args)
931
- @main_jar_file_uri = args[:main_jar_file_uri] if args.key?(:main_jar_file_uri)
932
- @main_class = args[:main_class] if args.key?(:main_class)
933
- @args = args[:args] if args.key?(:args)
934
- @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
935
- @file_uris = args[:file_uris] if args.key?(:file_uris)
936
- @archive_uris = args[:archive_uris] if args.key?(:archive_uris)
937
- @properties = args[:properties] if args.key?(:properties)
938
- @logging_config = args[:logging_config] if args.key?(:logging_config)
1017
+ @master_config = args[:master_config] if args.key?(:master_config)
1018
+ @secondary_worker_config = args[:secondary_worker_config] if args.key?(:secondary_worker_config)
1019
+ @initialization_actions = args[:initialization_actions] if args.key?(:initialization_actions)
1020
+ @config_bucket = args[:config_bucket] if args.key?(:config_bucket)
1021
+ @worker_config = args[:worker_config] if args.key?(:worker_config)
1022
+ @gce_cluster_config = args[:gce_cluster_config] if args.key?(:gce_cluster_config)
1023
+ @software_config = args[:software_config] if args.key?(:software_config)
939
1024
  end
940
1025
  end
941
1026
 
942
- # A Cloud Dataproc job for running PySpark applications on YARN.
1027
+ # A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/
1028
+ # 0.9.0/python-programming-guide.html) applications on YARN.
943
1029
  class PySparkJob
944
1030
  include Google::Apis::Core::Hashable
945
1031
 
946
- # [Required] The HCFS URI of the main Python file to use as the driver. Must be
947
- # a .py file.
948
- # Corresponds to the JSON property `mainPythonFileUri`
949
- # @return [String]
950
- attr_accessor :main_python_file_uri
1032
+ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Python driver
1033
+ # and tasks.
1034
+ # Corresponds to the JSON property `jarFileUris`
1035
+ # @return [Array<String>]
1036
+ attr_accessor :jar_file_uris
1037
+
1038
+ # The runtime logging config of the job.
1039
+ # Corresponds to the JSON property `loggingConfig`
1040
+ # @return [Google::Apis::DataprocV1::LoggingConfig]
1041
+ attr_accessor :logging_config
1042
+
1043
+ # Optional A mapping of property names to values, used to configure PySpark.
1044
+ # Properties that conflict with values set by the Cloud Dataproc API may be
1045
+ # overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf
1046
+ # and classes in user code.
1047
+ # Corresponds to the JSON property `properties`
1048
+ # @return [Hash<String,String>]
1049
+ attr_accessor :properties
951
1050
 
952
- # [Optional] The arguments to pass to the driver. Do not include arguments, such
953
- # as `--conf`, that can be set as job properties, since a collision may occur
954
- # that causes an incorrect job submission.
1051
+ # Optional The arguments to pass to the driver. Do not include arguments, such
1052
+ # as --conf, that can be set as job properties, since a collision may occur that
1053
+ # causes an incorrect job submission.
955
1054
  # Corresponds to the JSON property `args`
956
1055
  # @return [Array<String>]
957
1056
  attr_accessor :args
958
1057
 
959
- # [Optional] HCFS file URIs of Python files to pass to the PySpark framework.
1058
+ # Optional HCFS URIs of files to be copied to the working directory of Python
1059
+ # drivers and distributed tasks. Useful for naively parallel tasks.
1060
+ # Corresponds to the JSON property `fileUris`
1061
+ # @return [Array<String>]
1062
+ attr_accessor :file_uris
1063
+
1064
+ # Optional HCFS file URIs of Python files to pass to the PySpark framework.
960
1065
  # Supported file types: .py, .egg, and .zip.
961
1066
  # Corresponds to the JSON property `pythonFileUris`
962
1067
  # @return [Array<String>]
963
1068
  attr_accessor :python_file_uris
964
1069
 
965
- # [Optional] HCFS URIs of jar files to add to the CLASSPATHs of the Python
966
- # driver and tasks.
967
- # Corresponds to the JSON property `jarFileUris`
968
- # @return [Array<String>]
969
- attr_accessor :jar_file_uris
970
-
971
- # [Optional] HCFS URIs of files to be copied to the working directory of Python
972
- # drivers and distributed tasks. Useful for naively parallel tasks.
973
- # Corresponds to the JSON property `fileUris`
974
- # @return [Array<String>]
975
- attr_accessor :file_uris
1070
+ # Required The HCFS URI of the main Python file to use as the driver. Must be a .
1071
+ # py file.
1072
+ # Corresponds to the JSON property `mainPythonFileUri`
1073
+ # @return [String]
1074
+ attr_accessor :main_python_file_uri
976
1075
 
977
- # [Optional] HCFS URIs of archives to be extracted in the working directory of .
1076
+ # Optional HCFS URIs of archives to be extracted in the working directory of .
978
1077
  # jar, .tar, .tar.gz, .tgz, and .zip.
979
1078
  # Corresponds to the JSON property `archiveUris`
980
1079
  # @return [Array<String>]
981
1080
  attr_accessor :archive_uris
982
1081
 
983
- # [Optional] A mapping of property names to values, used to configure PySpark.
984
- # Properties that conflict with values set by the Cloud Dataproc API may be
985
- # overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf
986
- # and classes in user code.
987
- # Corresponds to the JSON property `properties`
988
- # @return [Hash<String,String>]
989
- attr_accessor :properties
990
-
991
- # The runtime logging config of the job.
992
- # Corresponds to the JSON property `loggingConfig`
993
- # @return [Google::Apis::DataprocV1::LoggingConfig]
994
- attr_accessor :logging_config
995
-
996
1082
  def initialize(**args)
997
1083
  update!(**args)
998
1084
  end
999
1085
 
1000
1086
  # Update properties of this object
1001
1087
  def update!(**args)
1002
- @main_python_file_uri = args[:main_python_file_uri] if args.key?(:main_python_file_uri)
1003
- @args = args[:args] if args.key?(:args)
1004
- @python_file_uris = args[:python_file_uris] if args.key?(:python_file_uris)
1005
1088
  @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
1089
+ @logging_config = args[:logging_config] if args.key?(:logging_config)
1090
+ @properties = args[:properties] if args.key?(:properties)
1091
+ @args = args[:args] if args.key?(:args)
1006
1092
  @file_uris = args[:file_uris] if args.key?(:file_uris)
1093
+ @python_file_uris = args[:python_file_uris] if args.key?(:python_file_uris)
1094
+ @main_python_file_uri = args[:main_python_file_uri] if args.key?(:main_python_file_uri)
1007
1095
  @archive_uris = args[:archive_uris] if args.key?(:archive_uris)
1008
- @properties = args[:properties] if args.key?(:properties)
1009
- @logging_config = args[:logging_config] if args.key?(:logging_config)
1010
1096
  end
1011
1097
  end
1012
1098
 
1013
- # A Cloud Dataproc job for running Hive queries on YARN.
1014
- class HiveJob
1099
+ # Common config settings for resources of Google Compute Engine cluster
1100
+ # instances, applicable to all instances in the cluster.
1101
+ class GceClusterConfig
1015
1102
  include Google::Apis::Core::Hashable
1016
1103
 
1017
- # The HCFS URI of the script that contains Hive queries.
1018
- # Corresponds to the JSON property `queryFileUri`
1104
+ # The Google Compute Engine metadata entries to add to all instances (see
1105
+ # Project and instance metadata (https://cloud.google.com/compute/docs/storing-
1106
+ # retrieving-metadata#project_and_instance_metadata)).
1107
+ # Corresponds to the JSON property `metadata`
1108
+ # @return [Hash<String,String>]
1109
+ attr_accessor :metadata
1110
+
1111
+ # Optional If true, all instances in the cluster will only have internal IP
1112
+ # addresses. By default, clusters are not restricted to internal IP addresses,
1113
+ # and will have ephemeral external IP addresses assigned to each instance. This
1114
+ # internal_ip_only restriction can only be enabled for subnetwork enabled
1115
+ # networks, and all off-cluster dependencies must be configured to be accessible
1116
+ # without external IP addresses.
1117
+ # Corresponds to the JSON property `internalIpOnly`
1118
+ # @return [Boolean]
1119
+ attr_accessor :internal_ip_only
1120
+ alias_method :internal_ip_only?, :internal_ip_only
1121
+
1122
+ # Optional The URIs of service account scopes to be included in Google Compute
1123
+ # Engine instances. The following base set of scopes is always included:
1124
+ # https://www.googleapis.com/auth/cloud.useraccounts.readonly
1125
+ # https://www.googleapis.com/auth/devstorage.read_write
1126
+ # https://www.googleapis.com/auth/logging.writeIf no scopes are specified, the
1127
+ # following defaults are also provided:
1128
+ # https://www.googleapis.com/auth/bigquery
1129
+ # https://www.googleapis.com/auth/bigtable.admin.table
1130
+ # https://www.googleapis.com/auth/bigtable.data
1131
+ # https://www.googleapis.com/auth/devstorage.full_control
1132
+ # Corresponds to the JSON property `serviceAccountScopes`
1133
+ # @return [Array<String>]
1134
+ attr_accessor :service_account_scopes
1135
+
1136
+ # The Google Compute Engine tags to add to all instances (see Tagging instances).
1137
+ # Corresponds to the JSON property `tags`
1138
+ # @return [Array<String>]
1139
+ attr_accessor :tags
1140
+
1141
+ # Optional The service account of the instances. Defaults to the default Google
1142
+ # Compute Engine service account. Custom service accounts need permissions
1143
+ # equivalent to the folloing IAM roles:
1144
+ # roles/logging.logWriter
1145
+ # roles/storage.objectAdmin(see https://cloud.google.com/compute/docs/access/
1146
+ # service-accounts#custom_service_accounts for more information). Example: [
1147
+ # account_id]@[project_id].iam.gserviceaccount.com
1148
+ # Corresponds to the JSON property `serviceAccount`
1019
1149
  # @return [String]
1020
- attr_accessor :query_file_uri
1150
+ attr_accessor :service_account
1021
1151
 
1022
- # A list of queries to run on a cluster.
1023
- # Corresponds to the JSON property `queryList`
1024
- # @return [Google::Apis::DataprocV1::QueryList]
1025
- attr_accessor :query_list
1152
+ # Optional The Google Compute Engine subnetwork to be used for machine
1153
+ # communications. Cannot be specified with network_uri. Example: https://www.
1154
+ # googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0.
1155
+ # Corresponds to the JSON property `subnetworkUri`
1156
+ # @return [String]
1157
+ attr_accessor :subnetwork_uri
1026
1158
 
1027
- # [Optional] Whether to continue executing queries if a query fails. The default
1028
- # value is `false`. Setting to `true` can be useful when executing independent
1029
- # parallel queries.
1030
- # Corresponds to the JSON property `continueOnFailure`
1031
- # @return [Boolean]
1032
- attr_accessor :continue_on_failure
1033
- alias_method :continue_on_failure?, :continue_on_failure
1159
+ # Optional The Google Compute Engine network to be used for machine
1160
+ # communications. Cannot be specified with subnetwork_uri. If neither
1161
+ # network_uri nor subnetwork_uri is specified, the "default" network of the
1162
+ # project is used, if it exists. Cannot be a "Custom Subnet Network" (see Using
1163
+ # Subnetworks for more information). Example: https://www.googleapis.com/compute/
1164
+ # v1/projects/[project_id]/regions/global/default.
1165
+ # Corresponds to the JSON property `networkUri`
1166
+ # @return [String]
1167
+ attr_accessor :network_uri
1034
1168
 
1035
- # [Optional] Mapping of query variable names to values (equivalent to the Hive
1036
- # command: `SET name="value";`).
1037
- # Corresponds to the JSON property `scriptVariables`
1038
- # @return [Hash<String,String>]
1039
- attr_accessor :script_variables
1169
+ # Required The zone where the Google Compute Engine cluster will be located.
1170
+ # Example: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[
1171
+ # zone].
1172
+ # Corresponds to the JSON property `zoneUri`
1173
+ # @return [String]
1174
+ attr_accessor :zone_uri
1040
1175
 
1041
- # [Optional] A mapping of property names and values, used to configure Hive.
1042
- # Properties that conflict with values set by the Cloud Dataproc API may be
1043
- # overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/
1044
- # hive/conf/hive-site.xml, and classes in user code.
1045
- # Corresponds to the JSON property `properties`
1046
- # @return [Hash<String,String>]
1047
- attr_accessor :properties
1176
+ def initialize(**args)
1177
+ update!(**args)
1178
+ end
1048
1179
 
1049
- # [Optional] HCFS URIs of jar files to add to the CLASSPATH of the Hive server
1050
- # and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.
1051
- # Corresponds to the JSON property `jarFileUris`
1052
- # @return [Array<String>]
1053
- attr_accessor :jar_file_uris
1180
+ # Update properties of this object
1181
+ def update!(**args)
1182
+ @metadata = args[:metadata] if args.key?(:metadata)
1183
+ @internal_ip_only = args[:internal_ip_only] if args.key?(:internal_ip_only)
1184
+ @service_account_scopes = args[:service_account_scopes] if args.key?(:service_account_scopes)
1185
+ @tags = args[:tags] if args.key?(:tags)
1186
+ @service_account = args[:service_account] if args.key?(:service_account)
1187
+ @subnetwork_uri = args[:subnetwork_uri] if args.key?(:subnetwork_uri)
1188
+ @network_uri = args[:network_uri] if args.key?(:network_uri)
1189
+ @zone_uri = args[:zone_uri] if args.key?(:zone_uri)
1190
+ end
1191
+ end
1192
+
1193
+ # Specifies the type and number of accelerator cards attached to the instances
1194
+ # of an instance group (see GPUs on Compute Engine).
1195
+ class AcceleratorConfig
1196
+ include Google::Apis::Core::Hashable
1197
+
1198
+ # The number of the accelerator cards of this type exposed to this instance.
1199
+ # Corresponds to the JSON property `acceleratorCount`
1200
+ # @return [Fixnum]
1201
+ attr_accessor :accelerator_count
1202
+
1203
+ # Full or partial URI of the accelerator type resource to expose to this
1204
+ # instance. See Google Compute Engine AcceleratorTypes( /compute/docs/reference/
1205
+ # beta/acceleratorTypes)
1206
+ # Corresponds to the JSON property `acceleratorTypeUri`
1207
+ # @return [String]
1208
+ attr_accessor :accelerator_type_uri
1054
1209
 
1055
1210
  def initialize(**args)
1056
1211
  update!(**args)
@@ -1058,27 +1213,26 @@ module Google
1058
1213
 
1059
1214
  # Update properties of this object
1060
1215
  def update!(**args)
1061
- @query_file_uri = args[:query_file_uri] if args.key?(:query_file_uri)
1062
- @query_list = args[:query_list] if args.key?(:query_list)
1063
- @continue_on_failure = args[:continue_on_failure] if args.key?(:continue_on_failure)
1064
- @script_variables = args[:script_variables] if args.key?(:script_variables)
1065
- @properties = args[:properties] if args.key?(:properties)
1066
- @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
1216
+ @accelerator_count = args[:accelerator_count] if args.key?(:accelerator_count)
1217
+ @accelerator_type_uri = args[:accelerator_type_uri] if args.key?(:accelerator_type_uri)
1067
1218
  end
1068
1219
  end
1069
1220
 
1070
- # A list of queries to run on a cluster.
1071
- class QueryList
1221
+ # Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature:
1222
+ # This report is available for testing purposes only. It may be changed before
1223
+ # final release.
1224
+ class ClusterMetrics
1072
1225
  include Google::Apis::Core::Hashable
1073
1226
 
1074
- # [Required] The queries to execute. You do not need to terminate a query with a
1075
- # semicolon. Multiple queries can be specified in one string by separating each
1076
- # with a semicolon. Here is an example of an Cloud Dataproc API snippet that
1077
- # uses a QueryList to specify a HiveJob: "hiveJob": ` "queryList": ` "queries": [
1078
- # "query1", "query2", "query3;query4", ] ` `
1079
- # Corresponds to the JSON property `queries`
1080
- # @return [Array<String>]
1081
- attr_accessor :queries
1227
+ # The YARN metrics.
1228
+ # Corresponds to the JSON property `yarnMetrics`
1229
+ # @return [Hash<String,String>]
1230
+ attr_accessor :yarn_metrics
1231
+
1232
+ # The HDFS metrics.
1233
+ # Corresponds to the JSON property `hdfsMetrics`
1234
+ # @return [Hash<String,String>]
1235
+ attr_accessor :hdfs_metrics
1082
1236
 
1083
1237
  def initialize(**args)
1084
1238
  update!(**args)
@@ -1086,56 +1240,41 @@ module Google
1086
1240
 
1087
1241
  # Update properties of this object
1088
1242
  def update!(**args)
1089
- @queries = args[:queries] if args.key?(:queries)
1243
+ @yarn_metrics = args[:yarn_metrics] if args.key?(:yarn_metrics)
1244
+ @hdfs_metrics = args[:hdfs_metrics] if args.key?(:hdfs_metrics)
1090
1245
  end
1091
1246
  end
1092
1247
 
1093
- # A Cloud Dataproc job for running Pig queries on YARN.
1094
- class PigJob
1248
+ # The runtime logging config of the job.
1249
+ class LoggingConfig
1095
1250
  include Google::Apis::Core::Hashable
1096
1251
 
1097
- # The HCFS URI of the script that contains the Pig queries.
1098
- # Corresponds to the JSON property `queryFileUri`
1099
- # @return [String]
1100
- attr_accessor :query_file_uri
1101
-
1102
- # A list of queries to run on a cluster.
1103
- # Corresponds to the JSON property `queryList`
1104
- # @return [Google::Apis::DataprocV1::QueryList]
1105
- attr_accessor :query_list
1106
-
1107
- # [Optional] Whether to continue executing queries if a query fails. The default
1108
- # value is `false`. Setting to `true` can be useful when executing independent
1109
- # parallel queries.
1110
- # Corresponds to the JSON property `continueOnFailure`
1111
- # @return [Boolean]
1112
- attr_accessor :continue_on_failure
1113
- alias_method :continue_on_failure?, :continue_on_failure
1114
-
1115
- # [Optional] Mapping of query variable names to values (equivalent to the Pig
1116
- # command: `name=[value]`).
1117
- # Corresponds to the JSON property `scriptVariables`
1252
+ # The per-package log levels for the driver. This may include "root" package
1253
+ # name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', '
1254
+ # org.apache = DEBUG'
1255
+ # Corresponds to the JSON property `driverLogLevels`
1118
1256
  # @return [Hash<String,String>]
1119
- attr_accessor :script_variables
1257
+ attr_accessor :driver_log_levels
1120
1258
 
1121
- # [Optional] A mapping of property names to values, used to configure Pig.
1122
- # Properties that conflict with values set by the Cloud Dataproc API may be
1123
- # overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/
1124
- # pig/conf/pig.properties, and classes in user code.
1125
- # Corresponds to the JSON property `properties`
1126
- # @return [Hash<String,String>]
1127
- attr_accessor :properties
1259
+ def initialize(**args)
1260
+ update!(**args)
1261
+ end
1128
1262
 
1129
- # [Optional] HCFS URIs of jar files to add to the CLASSPATH of the Pig Client
1130
- # and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
1131
- # Corresponds to the JSON property `jarFileUris`
1132
- # @return [Array<String>]
1133
- attr_accessor :jar_file_uris
1263
+ # Update properties of this object
1264
+ def update!(**args)
1265
+ @driver_log_levels = args[:driver_log_levels] if args.key?(:driver_log_levels)
1266
+ end
1267
+ end
1134
1268
 
1135
- # The runtime logging config of the job.
1136
- # Corresponds to the JSON property `loggingConfig`
1137
- # @return [Google::Apis::DataprocV1::LoggingConfig]
1138
- attr_accessor :logging_config
1269
+ # The location where output from diagnostic command can be found.
1270
+ class DiagnoseClusterOutputLocation
1271
+ include Google::Apis::Core::Hashable
1272
+
1273
+ # Output-only The Google Cloud Storage URI of the diagnostic output. This will
1274
+ # be a plain text file with summary of collected diagnostics.
1275
+ # Corresponds to the JSON property `outputUri`
1276
+ # @return [String]
1277
+ attr_accessor :output_uri
1139
1278
 
1140
1279
  def initialize(**args)
1141
1280
  update!(**args)
@@ -1143,52 +1282,85 @@ module Google
1143
1282
 
1144
1283
  # Update properties of this object
1145
1284
  def update!(**args)
1146
- @query_file_uri = args[:query_file_uri] if args.key?(:query_file_uri)
1147
- @query_list = args[:query_list] if args.key?(:query_list)
1148
- @continue_on_failure = args[:continue_on_failure] if args.key?(:continue_on_failure)
1149
- @script_variables = args[:script_variables] if args.key?(:script_variables)
1150
- @properties = args[:properties] if args.key?(:properties)
1151
- @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
1152
- @logging_config = args[:logging_config] if args.key?(:logging_config)
1285
+ @output_uri = args[:output_uri] if args.key?(:output_uri)
1153
1286
  end
1154
1287
  end
1155
1288
 
1156
- # A Cloud Dataproc job for running Spark SQL queries.
1157
- class SparkSqlJob
1289
+ # This resource represents a long-running operation that is the result of a
1290
+ # network API call.
1291
+ class Operation
1158
1292
  include Google::Apis::Core::Hashable
1159
1293
 
1160
- # The HCFS URI of the script that contains SQL queries.
1161
- # Corresponds to the JSON property `queryFileUri`
1162
- # @return [String]
1163
- attr_accessor :query_file_uri
1164
-
1165
- # A list of queries to run on a cluster.
1166
- # Corresponds to the JSON property `queryList`
1167
- # @return [Google::Apis::DataprocV1::QueryList]
1168
- attr_accessor :query_list
1294
+ # The Status type defines a logical error model that is suitable for different
1295
+ # programming environments, including REST APIs and RPC APIs. It is used by gRPC
1296
+ # (https://github.com/grpc). The error model is designed to be:
1297
+ # Simple to use and understand for most users
1298
+ # Flexible enough to meet unexpected needsOverviewThe Status message contains
1299
+ # three pieces of data: error code, error message, and error details. The error
1300
+ # code should be an enum value of google.rpc.Code, but it may accept additional
1301
+ # error codes if needed. The error message should be a developer-facing English
1302
+ # message that helps developers understand and resolve the error. If a localized
1303
+ # user-facing error message is needed, put the localized message in the error
1304
+ # details or localize it in the client. The optional error details may contain
1305
+ # arbitrary information about the error. There is a predefined set of error
1306
+ # detail types in the package google.rpc which can be used for common error
1307
+ # conditions.Language mappingThe Status message is the logical representation of
1308
+ # the error model, but it is not necessarily the actual wire format. When the
1309
+ # Status message is exposed in different client libraries and different wire
1310
+ # protocols, it can be mapped differently. For example, it will likely be mapped
1311
+ # to some exceptions in Java, but more likely mapped to some error codes in C.
1312
+ # Other usesThe error model and the Status message can be used in a variety of
1313
+ # environments, either with or without APIs, to provide a consistent developer
1314
+ # experience across different environments.Example uses of this error model
1315
+ # include:
1316
+ # Partial errors. If a service needs to return partial errors to the client, it
1317
+ # may embed the Status in the normal response to indicate the partial errors.
1318
+ # Workflow errors. A typical workflow has multiple steps. Each step may have a
1319
+ # Status message for error reporting purpose.
1320
+ # Batch operations. If a client uses batch request and batch response, the
1321
+ # Status message should be used directly inside batch response, one for each
1322
+ # error sub-response.
1323
+ # Asynchronous operations. If an API call embeds asynchronous operation results
1324
+ # in its response, the status of those operations should be represented directly
1325
+ # using the Status message.
1326
+ # Logging. If some API errors are stored in logs, the message Status could be
1327
+ # used directly after any stripping needed for security/privacy reasons.
1328
+ # Corresponds to the JSON property `error`
1329
+ # @return [Google::Apis::DataprocV1::Status]
1330
+ attr_accessor :error
1169
1331
 
1170
- # [Optional] Mapping of query variable names to values (equivalent to the Spark
1171
- # SQL command: SET `name="value";`).
1172
- # Corresponds to the JSON property `scriptVariables`
1173
- # @return [Hash<String,String>]
1174
- attr_accessor :script_variables
1332
+ # Service-specific metadata associated with the operation. It typically contains
1333
+ # progress information and common metadata such as create time. Some services
1334
+ # might not provide such metadata. Any method that returns a long-running
1335
+ # operation should document the metadata type, if any.
1336
+ # Corresponds to the JSON property `metadata`
1337
+ # @return [Hash<String,Object>]
1338
+ attr_accessor :metadata
1175
1339
 
1176
- # [Optional] A mapping of property names to values, used to configure Spark SQL'
1177
- # s SparkConf. Properties that conflict with values set by the Cloud Dataproc
1178
- # API may be overwritten.
1179
- # Corresponds to the JSON property `properties`
1180
- # @return [Hash<String,String>]
1181
- attr_accessor :properties
1340
+ # If the value is false, it means the operation is still in progress. If true,
1341
+ # the operation is completed, and either error or response is available.
1342
+ # Corresponds to the JSON property `done`
1343
+ # @return [Boolean]
1344
+ attr_accessor :done
1345
+ alias_method :done?, :done
1182
1346
 
1183
- # [Optional] HCFS URIs of jar files to be added to the Spark CLASSPATH.
1184
- # Corresponds to the JSON property `jarFileUris`
1185
- # @return [Array<String>]
1186
- attr_accessor :jar_file_uris
1347
+ # The normal response of the operation in case of success. If the original
1348
+ # method returns no data on success, such as Delete, the response is google.
1349
+ # protobuf.Empty. If the original method is standard Get/Create/Update, the
1350
+ # response should be the resource. For other methods, the response should have
1351
+ # the type XxxResponse, where Xxx is the original method name. For example, if
1352
+ # the original method name is TakeSnapshot(), the inferred response type is
1353
+ # TakeSnapshotResponse.
1354
+ # Corresponds to the JSON property `response`
1355
+ # @return [Hash<String,Object>]
1356
+ attr_accessor :response
1187
1357
 
1188
- # The runtime logging config of the job.
1189
- # Corresponds to the JSON property `loggingConfig`
1190
- # @return [Google::Apis::DataprocV1::LoggingConfig]
1191
- attr_accessor :logging_config
1358
+ # The server-assigned name, which is only unique within the same service that
1359
+ # originally returns it. If you use the default HTTP mapping, the name should
1360
+ # have the format of operations/some/unique/name.
1361
+ # Corresponds to the JSON property `name`
1362
+ # @return [String]
1363
+ attr_accessor :name
1192
1364
 
1193
1365
  def initialize(**args)
1194
1366
  update!(**args)
@@ -1196,62 +1368,68 @@ module Google
1196
1368
 
1197
1369
  # Update properties of this object
1198
1370
  def update!(**args)
1199
- @query_file_uri = args[:query_file_uri] if args.key?(:query_file_uri)
1200
- @query_list = args[:query_list] if args.key?(:query_list)
1201
- @script_variables = args[:script_variables] if args.key?(:script_variables)
1202
- @properties = args[:properties] if args.key?(:properties)
1203
- @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
1204
- @logging_config = args[:logging_config] if args.key?(:logging_config)
1371
+ @error = args[:error] if args.key?(:error)
1372
+ @metadata = args[:metadata] if args.key?(:metadata)
1373
+ @done = args[:done] if args.key?(:done)
1374
+ @response = args[:response] if args.key?(:response)
1375
+ @name = args[:name] if args.key?(:name)
1205
1376
  end
1206
1377
  end
1207
1378
 
1208
- # Cloud Dataproc job status.
1209
- class JobStatus
1379
+ # The status of the operation.
1380
+ class OperationStatus
1210
1381
  include Google::Apis::Core::Hashable
1211
1382
 
1212
- # [Required] A state message specifying the overall job state.
1383
+ # A message containing the detailed operation state.
1384
+ # Corresponds to the JSON property `innerState`
1385
+ # @return [String]
1386
+ attr_accessor :inner_state
1387
+
1388
+ # The time this state was entered.
1389
+ # Corresponds to the JSON property `stateStartTime`
1390
+ # @return [String]
1391
+ attr_accessor :state_start_time
1392
+
1393
+ # A message containing the operation state.
1213
1394
  # Corresponds to the JSON property `state`
1214
1395
  # @return [String]
1215
1396
  attr_accessor :state
1216
1397
 
1217
- # [Optional] Job state details, such as an error description if the state is
1218
- # ERROR.
1398
+ # A message containing any operation metadata details.
1219
1399
  # Corresponds to the JSON property `details`
1220
1400
  # @return [String]
1221
1401
  attr_accessor :details
1222
1402
 
1223
- # [Output-only] The time when this state was entered.
1224
- # Corresponds to the JSON property `stateStartTime`
1225
- # @return [String]
1226
- attr_accessor :state_start_time
1227
-
1228
1403
  def initialize(**args)
1229
1404
  update!(**args)
1230
1405
  end
1231
1406
 
1232
1407
  # Update properties of this object
1233
1408
  def update!(**args)
1409
+ @inner_state = args[:inner_state] if args.key?(:inner_state)
1410
+ @state_start_time = args[:state_start_time] if args.key?(:state_start_time)
1234
1411
  @state = args[:state] if args.key?(:state)
1235
1412
  @details = args[:details] if args.key?(:details)
1236
- @state_start_time = args[:state_start_time] if args.key?(:state_start_time)
1237
1413
  end
1238
1414
  end
1239
1415
 
1240
- # A list of jobs in a project.
1241
- class ListJobsResponse
1416
+ # Encapsulates the full scoping used to reference a job.
1417
+ class JobReference
1242
1418
  include Google::Apis::Core::Hashable
1243
1419
 
1244
- # [Output-only] Jobs list.
1245
- # Corresponds to the JSON property `jobs`
1246
- # @return [Array<Google::Apis::DataprocV1::Job>]
1247
- attr_accessor :jobs
1420
+ # Required The ID of the Google Cloud Platform project that the job belongs to.
1421
+ # Corresponds to the JSON property `projectId`
1422
+ # @return [String]
1423
+ attr_accessor :project_id
1248
1424
 
1249
- # [Optional] This token is included in the response if there are more results to
1250
- # fetch. To fetch additional results, provide this value as the `page_token` in
1251
- # a subsequent ListJobsRequest.
1252
- # Corresponds to the JSON property `nextPageToken`
1425
+ # Optional The job ID, which must be unique within the project. The job ID is
1426
+ # generated by the server upon job submission or provided by the user as a means
1427
+ # to perform retries without creating duplicate jobs. The ID must contain only
1428
+ # letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The
1429
+ # maximum length is 100 characters.
1430
+ # Corresponds to the JSON property `jobId`
1253
1431
  # @return [String]
1254
- attr_accessor :next_page_token
1432
+ attr_accessor :job_id
1255
1433
 
1256
1434
  def initialize(**args)
1257
1435
  update!(**args)
@@ -1259,54 +1437,150 @@ module Google
1259
1437
 
1260
1438
  # Update properties of this object
1261
1439
  def update!(**args)
1262
- @jobs = args[:jobs] if args.key?(:jobs)
1263
- @next_page_token = args[:next_page_token] if args.key?(:next_page_token)
1440
+ @project_id = args[:project_id] if args.key?(:project_id)
1441
+ @job_id = args[:job_id] if args.key?(:job_id)
1264
1442
  end
1265
1443
  end
1266
1444
 
1267
- # A request to cancel a job.
1268
- class CancelJobRequest
1445
+ # A request to submit a job.
1446
+ class SubmitJobRequest
1269
1447
  include Google::Apis::Core::Hashable
1270
1448
 
1449
+ # A Cloud Dataproc job resource.
1450
+ # Corresponds to the JSON property `job`
1451
+ # @return [Google::Apis::DataprocV1::Job]
1452
+ attr_accessor :job
1453
+
1271
1454
  def initialize(**args)
1272
1455
  update!(**args)
1273
1456
  end
1274
1457
 
1275
1458
  # Update properties of this object
1276
1459
  def update!(**args)
1460
+ @job = args[:job] if args.key?(:job)
1277
1461
  end
1278
1462
  end
1279
1463
 
1280
- # A generic empty message that you can re-use to avoid defining duplicated empty
1281
- # messages in your APIs. A typical example is to use it as the request or the
1282
- # response type of an API method. For instance: service Foo ` rpc Bar(google.
1283
- # protobuf.Empty) returns (google.protobuf.Empty); ` The JSON representation for
1284
- # `Empty` is empty JSON object ````.
1285
- class Empty
1464
+ # The Status type defines a logical error model that is suitable for different
1465
+ # programming environments, including REST APIs and RPC APIs. It is used by gRPC
1466
+ # (https://github.com/grpc). The error model is designed to be:
1467
+ # Simple to use and understand for most users
1468
+ # Flexible enough to meet unexpected needsOverviewThe Status message contains
1469
+ # three pieces of data: error code, error message, and error details. The error
1470
+ # code should be an enum value of google.rpc.Code, but it may accept additional
1471
+ # error codes if needed. The error message should be a developer-facing English
1472
+ # message that helps developers understand and resolve the error. If a localized
1473
+ # user-facing error message is needed, put the localized message in the error
1474
+ # details or localize it in the client. The optional error details may contain
1475
+ # arbitrary information about the error. There is a predefined set of error
1476
+ # detail types in the package google.rpc which can be used for common error
1477
+ # conditions.Language mappingThe Status message is the logical representation of
1478
+ # the error model, but it is not necessarily the actual wire format. When the
1479
+ # Status message is exposed in different client libraries and different wire
1480
+ # protocols, it can be mapped differently. For example, it will likely be mapped
1481
+ # to some exceptions in Java, but more likely mapped to some error codes in C.
1482
+ # Other usesThe error model and the Status message can be used in a variety of
1483
+ # environments, either with or without APIs, to provide a consistent developer
1484
+ # experience across different environments.Example uses of this error model
1485
+ # include:
1486
+ # Partial errors. If a service needs to return partial errors to the client, it
1487
+ # may embed the Status in the normal response to indicate the partial errors.
1488
+ # Workflow errors. A typical workflow has multiple steps. Each step may have a
1489
+ # Status message for error reporting purpose.
1490
+ # Batch operations. If a client uses batch request and batch response, the
1491
+ # Status message should be used directly inside batch response, one for each
1492
+ # error sub-response.
1493
+ # Asynchronous operations. If an API call embeds asynchronous operation results
1494
+ # in its response, the status of those operations should be represented directly
1495
+ # using the Status message.
1496
+ # Logging. If some API errors are stored in logs, the message Status could be
1497
+ # used directly after any stripping needed for security/privacy reasons.
1498
+ class Status
1286
1499
  include Google::Apis::Core::Hashable
1287
1500
 
1501
+ # The status code, which should be an enum value of google.rpc.Code.
1502
+ # Corresponds to the JSON property `code`
1503
+ # @return [Fixnum]
1504
+ attr_accessor :code
1505
+
1506
+ # A developer-facing error message, which should be in English. Any user-facing
1507
+ # error message should be localized and sent in the google.rpc.Status.details
1508
+ # field, or localized by the client.
1509
+ # Corresponds to the JSON property `message`
1510
+ # @return [String]
1511
+ attr_accessor :message
1512
+
1513
+ # A list of messages that carry the error details. There will be a common set of
1514
+ # message types for APIs to use.
1515
+ # Corresponds to the JSON property `details`
1516
+ # @return [Array<Hash<String,Object>>]
1517
+ attr_accessor :details
1518
+
1288
1519
  def initialize(**args)
1289
1520
  update!(**args)
1290
1521
  end
1291
1522
 
1292
1523
  # Update properties of this object
1293
1524
  def update!(**args)
1525
+ @code = args[:code] if args.key?(:code)
1526
+ @message = args[:message] if args.key?(:message)
1527
+ @details = args[:details] if args.key?(:details)
1294
1528
  end
1295
1529
  end
1296
1530
 
1297
- # The response message for Operations.ListOperations.
1298
- class ListOperationsResponse
1531
+ # Optional The config settings for Google Compute Engine resources in an
1532
+ # instance group, such as a master or worker group.
1533
+ class InstanceGroupConfig
1299
1534
  include Google::Apis::Core::Hashable
1300
1535
 
1301
- # A list of operations that matches the specified filter in the request.
1302
- # Corresponds to the JSON property `operations`
1303
- # @return [Array<Google::Apis::DataprocV1::Operation>]
1304
- attr_accessor :operations
1536
+ # Specifies the resources used to actively manage an instance group.
1537
+ # Corresponds to the JSON property `managedGroupConfig`
1538
+ # @return [Google::Apis::DataprocV1::ManagedGroupConfig]
1539
+ attr_accessor :managed_group_config
1540
+
1541
+ # Optional Specifies that this instance group contains preemptible instances.
1542
+ # Corresponds to the JSON property `isPreemptible`
1543
+ # @return [Boolean]
1544
+ attr_accessor :is_preemptible
1545
+ alias_method :is_preemptible?, :is_preemptible
1546
+
1547
+ # Output-only The Google Compute Engine image resource used for cluster
1548
+ # instances. Inferred from SoftwareConfig.image_version.
1549
+ # Corresponds to the JSON property `imageUri`
1550
+ # @return [String]
1551
+ attr_accessor :image_uri
1552
+
1553
+ # Required The Google Compute Engine machine type used for cluster instances.
1554
+ # Example: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-
1555
+ # east1-a/machineTypes/n1-standard-2.
1556
+ # Corresponds to the JSON property `machineTypeUri`
1557
+ # @return [String]
1558
+ attr_accessor :machine_type_uri
1559
+
1560
+ # Optional The list of instance names. Cloud Dataproc derives the names from
1561
+ # cluster_name, num_instances, and the instance group if not set by user (
1562
+ # recommended practice is to let Cloud Dataproc derive the name).
1563
+ # Corresponds to the JSON property `instanceNames`
1564
+ # @return [Array<String>]
1565
+ attr_accessor :instance_names
1566
+
1567
+ # Optional The Google Compute Engine accelerator configuration for these
1568
+ # instances.Beta Feature: This feature is still under development. It may be
1569
+ # changed before final release.
1570
+ # Corresponds to the JSON property `accelerators`
1571
+ # @return [Array<Google::Apis::DataprocV1::AcceleratorConfig>]
1572
+ attr_accessor :accelerators
1573
+
1574
+ # Required The number of VM instances in the instance group. For master instance
1575
+ # groups, must be set to 1.
1576
+ # Corresponds to the JSON property `numInstances`
1577
+ # @return [Fixnum]
1578
+ attr_accessor :num_instances
1305
1579
 
1306
- # The standard List next-page token.
1307
- # Corresponds to the JSON property `nextPageToken`
1308
- # @return [String]
1309
- attr_accessor :next_page_token
1580
+ # Specifies the config of disk options for a group of VM instances.
1581
+ # Corresponds to the JSON property `diskConfig`
1582
+ # @return [Google::Apis::DataprocV1::DiskConfig]
1583
+ attr_accessor :disk_config
1310
1584
 
1311
1585
  def initialize(**args)
1312
1586
  update!(**args)
@@ -1314,20 +1588,29 @@ module Google
1314
1588
 
1315
1589
  # Update properties of this object
1316
1590
  def update!(**args)
1317
- @operations = args[:operations] if args.key?(:operations)
1318
- @next_page_token = args[:next_page_token] if args.key?(:next_page_token)
1591
+ @managed_group_config = args[:managed_group_config] if args.key?(:managed_group_config)
1592
+ @is_preemptible = args[:is_preemptible] if args.key?(:is_preemptible)
1593
+ @image_uri = args[:image_uri] if args.key?(:image_uri)
1594
+ @machine_type_uri = args[:machine_type_uri] if args.key?(:machine_type_uri)
1595
+ @instance_names = args[:instance_names] if args.key?(:instance_names)
1596
+ @accelerators = args[:accelerators] if args.key?(:accelerators)
1597
+ @num_instances = args[:num_instances] if args.key?(:num_instances)
1598
+ @disk_config = args[:disk_config] if args.key?(:disk_config)
1319
1599
  end
1320
1600
  end
1321
1601
 
1322
- # The location of diagnostic output.
1323
- class DiagnoseClusterResults
1602
+ # Job scheduling options.Beta Feature: These options are available for testing
1603
+ # purposes only. They may be changed before final release.
1604
+ class JobScheduling
1324
1605
  include Google::Apis::Core::Hashable
1325
1606
 
1326
- # [Output-only] The Google Cloud Storage URI of the diagnostic output. This is a
1327
- # plain text file with a summary of collected diagnostics.
1328
- # Corresponds to the JSON property `outputUri`
1329
- # @return [String]
1330
- attr_accessor :output_uri
1607
+ # Optional Maximum number of times per hour a driver may be restarted as a
1608
+ # result of driver terminating with non-zero code before job is reported failed.
1609
+ # A job may be reported as thrashing if driver exits with non-zero code 4 times
1610
+ # within 10 minute window.Maximum value is 10.
1611
+ # Corresponds to the JSON property `maxFailuresPerHour`
1612
+ # @return [Fixnum]
1613
+ attr_accessor :max_failures_per_hour
1331
1614
 
1332
1615
  def initialize(**args)
1333
1616
  update!(**args)
@@ -1335,43 +1618,25 @@ module Google
1335
1618
 
1336
1619
  # Update properties of this object
1337
1620
  def update!(**args)
1338
- @output_uri = args[:output_uri] if args.key?(:output_uri)
1621
+ @max_failures_per_hour = args[:max_failures_per_hour] if args.key?(:max_failures_per_hour)
1339
1622
  end
1340
1623
  end
1341
1624
 
1342
- # Metadata describing the operation.
1343
- class ClusterOperationMetadata
1625
+ # A list of jobs in a project.
1626
+ class ListJobsResponse
1344
1627
  include Google::Apis::Core::Hashable
1345
1628
 
1346
- # Name of the cluster for the operation.
1347
- # Corresponds to the JSON property `clusterName`
1348
- # @return [String]
1349
- attr_accessor :cluster_name
1350
-
1351
- # Cluster UUId for the operation.
1352
- # Corresponds to the JSON property `clusterUuid`
1353
- # @return [String]
1354
- attr_accessor :cluster_uuid
1355
-
1356
- # The status of the operation.
1357
- # Corresponds to the JSON property `status`
1358
- # @return [Google::Apis::DataprocV1::ClusterOperationStatus]
1359
- attr_accessor :status
1360
-
1361
- # [Output-only] The previous operation status.
1362
- # Corresponds to the JSON property `statusHistory`
1363
- # @return [Array<Google::Apis::DataprocV1::ClusterOperationStatus>]
1364
- attr_accessor :status_history
1365
-
1366
- # [Output-only] The operation type.
1367
- # Corresponds to the JSON property `operationType`
1368
- # @return [String]
1369
- attr_accessor :operation_type
1629
+ # Output-only Jobs list.
1630
+ # Corresponds to the JSON property `jobs`
1631
+ # @return [Array<Google::Apis::DataprocV1::Job>]
1632
+ attr_accessor :jobs
1370
1633
 
1371
- # [Output-only] Short description of operation.
1372
- # Corresponds to the JSON property `description`
1634
+ # Optional This token is included in the response if there are more results to
1635
+ # fetch. To fetch additional results, provide this value as the page_token in a
1636
+ # subsequent <code>ListJobsRequest</code>.
1637
+ # Corresponds to the JSON property `nextPageToken`
1373
1638
  # @return [String]
1374
- attr_accessor :description
1639
+ attr_accessor :next_page_token
1375
1640
 
1376
1641
  def initialize(**args)
1377
1642
  update!(**args)
@@ -1379,38 +1644,28 @@ module Google
1379
1644
 
1380
1645
  # Update properties of this object
1381
1646
  def update!(**args)
1382
- @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
1383
- @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
1384
- @status = args[:status] if args.key?(:status)
1385
- @status_history = args[:status_history] if args.key?(:status_history)
1386
- @operation_type = args[:operation_type] if args.key?(:operation_type)
1387
- @description = args[:description] if args.key?(:description)
1647
+ @jobs = args[:jobs] if args.key?(:jobs)
1648
+ @next_page_token = args[:next_page_token] if args.key?(:next_page_token)
1388
1649
  end
1389
1650
  end
1390
1651
 
1391
- # The status of the operation.
1392
- class ClusterOperationStatus
1652
+ # Specifies an executable to run on a fully configured node and a timeout period
1653
+ # for executable completion.
1654
+ class NodeInitializationAction
1393
1655
  include Google::Apis::Core::Hashable
1394
1656
 
1395
- # A message containing the operation state.
1396
- # Corresponds to the JSON property `state`
1397
- # @return [String]
1398
- attr_accessor :state
1399
-
1400
- # A message containing the detailed operation state.
1401
- # Corresponds to the JSON property `innerState`
1402
- # @return [String]
1403
- attr_accessor :inner_state
1404
-
1405
- # A message containing any operation metadata details.
1406
- # Corresponds to the JSON property `details`
1657
+ # Required Google Cloud Storage URI of executable file.
1658
+ # Corresponds to the JSON property `executableFile`
1407
1659
  # @return [String]
1408
- attr_accessor :details
1660
+ attr_accessor :executable_file
1409
1661
 
1410
- # The time this state was entered.
1411
- # Corresponds to the JSON property `stateStartTime`
1662
+ # Optional Amount of time executable has to complete. Default is 10 minutes.
1663
+ # Cluster creation fails with an explanatory error message (the name of the
1664
+ # executable that caused the error and the exceeded timeout period) if the
1665
+ # executable is not completed at end of the timeout period.
1666
+ # Corresponds to the JSON property `executionTimeout`
1412
1667
  # @return [String]
1413
- attr_accessor :state_start_time
1668
+ attr_accessor :execution_timeout
1414
1669
 
1415
1670
  def initialize(**args)
1416
1671
  update!(**args)
@@ -1418,96 +1673,129 @@ module Google
1418
1673
 
1419
1674
  # Update properties of this object
1420
1675
  def update!(**args)
1421
- @state = args[:state] if args.key?(:state)
1422
- @inner_state = args[:inner_state] if args.key?(:inner_state)
1423
- @details = args[:details] if args.key?(:details)
1424
- @state_start_time = args[:state_start_time] if args.key?(:state_start_time)
1676
+ @executable_file = args[:executable_file] if args.key?(:executable_file)
1677
+ @execution_timeout = args[:execution_timeout] if args.key?(:execution_timeout)
1425
1678
  end
1426
1679
  end
1427
1680
 
1428
- # The location where output from diagnostic command can be found.
1429
- class DiagnoseClusterOutputLocation
1681
+ # A request to cancel a job.
1682
+ class CancelJobRequest
1430
1683
  include Google::Apis::Core::Hashable
1431
1684
 
1432
- # [Output-only] The Google Cloud Storage URI of the diagnostic output. This will
1433
- # be a plain text file with summary of collected diagnostics.
1434
- # Corresponds to the JSON property `outputUri`
1435
- # @return [String]
1436
- attr_accessor :output_uri
1437
-
1438
1685
  def initialize(**args)
1439
1686
  update!(**args)
1440
1687
  end
1441
1688
 
1442
1689
  # Update properties of this object
1443
1690
  def update!(**args)
1444
- @output_uri = args[:output_uri] if args.key?(:output_uri)
1445
1691
  end
1446
1692
  end
1447
1693
 
1448
- # Metadata describing the operation.
1449
- class OperationMetadata
1694
+ # A Cloud Dataproc job for running Apache Spark SQL (http://spark.apache.org/sql/
1695
+ # ) queries.
1696
+ class SparkSqlJob
1450
1697
  include Google::Apis::Core::Hashable
1451
1698
 
1452
- # A message containing the operation state.
1453
- # Corresponds to the JSON property `state`
1454
- # @return [String]
1455
- attr_accessor :state
1699
+ # Optional Mapping of query variable names to values (equivalent to the Spark
1700
+ # SQL command: SET name="value";).
1701
+ # Corresponds to the JSON property `scriptVariables`
1702
+ # @return [Hash<String,String>]
1703
+ attr_accessor :script_variables
1456
1704
 
1457
- # A message containing the detailed operation state.
1458
- # Corresponds to the JSON property `innerState`
1459
- # @return [String]
1460
- attr_accessor :inner_state
1705
+ # Optional HCFS URIs of jar files to be added to the Spark CLASSPATH.
1706
+ # Corresponds to the JSON property `jarFileUris`
1707
+ # @return [Array<String>]
1708
+ attr_accessor :jar_file_uris
1461
1709
 
1462
- # A message containing any operation metadata details.
1463
- # Corresponds to the JSON property `details`
1464
- # @return [String]
1465
- attr_accessor :details
1710
+ # The runtime logging config of the job.
1711
+ # Corresponds to the JSON property `loggingConfig`
1712
+ # @return [Google::Apis::DataprocV1::LoggingConfig]
1713
+ attr_accessor :logging_config
1466
1714
 
1467
- # The time that the operation was requested.
1468
- # Corresponds to the JSON property `insertTime`
1469
- # @return [String]
1470
- attr_accessor :insert_time
1715
+ # Optional A mapping of property names to values, used to configure Spark SQL's
1716
+ # SparkConf. Properties that conflict with values set by the Cloud Dataproc API
1717
+ # may be overwritten.
1718
+ # Corresponds to the JSON property `properties`
1719
+ # @return [Hash<String,String>]
1720
+ attr_accessor :properties
1471
1721
 
1472
- # The time that the operation was started by the server.
1473
- # Corresponds to the JSON property `startTime`
1722
+ # The HCFS URI of the script that contains SQL queries.
1723
+ # Corresponds to the JSON property `queryFileUri`
1474
1724
  # @return [String]
1475
- attr_accessor :start_time
1725
+ attr_accessor :query_file_uri
1476
1726
 
1477
- # The time that the operation completed.
1478
- # Corresponds to the JSON property `endTime`
1479
- # @return [String]
1480
- attr_accessor :end_time
1727
+ # A list of queries to run on a cluster.
1728
+ # Corresponds to the JSON property `queryList`
1729
+ # @return [Google::Apis::DataprocV1::QueryList]
1730
+ attr_accessor :query_list
1481
1731
 
1482
- # Name of the cluster for the operation.
1483
- # Corresponds to the JSON property `clusterName`
1484
- # @return [String]
1485
- attr_accessor :cluster_name
1732
+ def initialize(**args)
1733
+ update!(**args)
1734
+ end
1486
1735
 
1487
- # Cluster UUId for the operation.
1488
- # Corresponds to the JSON property `clusterUuid`
1736
+ # Update properties of this object
1737
+ def update!(**args)
1738
+ @script_variables = args[:script_variables] if args.key?(:script_variables)
1739
+ @jar_file_uris = args[:jar_file_uris] if args.key?(:jar_file_uris)
1740
+ @logging_config = args[:logging_config] if args.key?(:logging_config)
1741
+ @properties = args[:properties] if args.key?(:properties)
1742
+ @query_file_uri = args[:query_file_uri] if args.key?(:query_file_uri)
1743
+ @query_list = args[:query_list] if args.key?(:query_list)
1744
+ end
1745
+ end
1746
+
1747
+ # Describes the identifying information, config, and status of a cluster of
1748
+ # Google Compute Engine instances.
1749
+ class Cluster
1750
+ include Google::Apis::Core::Hashable
1751
+
1752
+ # Required The Google Cloud Platform project ID that the cluster belongs to.
1753
+ # Corresponds to the JSON property `projectId`
1489
1754
  # @return [String]
1490
- attr_accessor :cluster_uuid
1755
+ attr_accessor :project_id
1491
1756
 
1492
- # The status of the operation.
1757
+ # Optional The labels to associate with this cluster. Label keys must contain 1
1758
+ # to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/
1759
+ # rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63
1760
+ # characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt)
1761
+ # . No more than 32 labels can be associated with a cluster.
1762
+ # Corresponds to the JSON property `labels`
1763
+ # @return [Hash<String,String>]
1764
+ attr_accessor :labels
1765
+
1766
+ # The status of a cluster and its instances.
1493
1767
  # Corresponds to the JSON property `status`
1494
- # @return [Google::Apis::DataprocV1::OperationStatus]
1768
+ # @return [Google::Apis::DataprocV1::ClusterStatus]
1495
1769
  attr_accessor :status
1496
1770
 
1497
- # [Output-only] Previous operation status.
1771
+ # Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature:
1772
+ # This report is available for testing purposes only. It may be changed before
1773
+ # final release.
1774
+ # Corresponds to the JSON property `metrics`
1775
+ # @return [Google::Apis::DataprocV1::ClusterMetrics]
1776
+ attr_accessor :metrics
1777
+
1778
+ # Output-only The previous cluster status.
1498
1779
  # Corresponds to the JSON property `statusHistory`
1499
- # @return [Array<Google::Apis::DataprocV1::OperationStatus>]
1780
+ # @return [Array<Google::Apis::DataprocV1::ClusterStatus>]
1500
1781
  attr_accessor :status_history
1501
1782
 
1502
- # [Output-only] The operation type.
1503
- # Corresponds to the JSON property `operationType`
1783
+ # The cluster config.
1784
+ # Corresponds to the JSON property `config`
1785
+ # @return [Google::Apis::DataprocV1::ClusterConfig]
1786
+ attr_accessor :config
1787
+
1788
+ # Output-only A cluster UUID (Unique Universal Identifier). Cloud Dataproc
1789
+ # generates this value when it creates the cluster.
1790
+ # Corresponds to the JSON property `clusterUuid`
1504
1791
  # @return [String]
1505
- attr_accessor :operation_type
1792
+ attr_accessor :cluster_uuid
1506
1793
 
1507
- # [Output-only] Short description of operation.
1508
- # Corresponds to the JSON property `description`
1794
+ # Required The cluster name. Cluster names within a project must be unique.
1795
+ # Names of deleted clusters can be reused.
1796
+ # Corresponds to the JSON property `clusterName`
1509
1797
  # @return [String]
1510
- attr_accessor :description
1798
+ attr_accessor :cluster_name
1511
1799
 
1512
1800
  def initialize(**args)
1513
1801
  update!(**args)
@@ -1515,44 +1803,30 @@ module Google
1515
1803
 
1516
1804
  # Update properties of this object
1517
1805
  def update!(**args)
1518
- @state = args[:state] if args.key?(:state)
1519
- @inner_state = args[:inner_state] if args.key?(:inner_state)
1520
- @details = args[:details] if args.key?(:details)
1521
- @insert_time = args[:insert_time] if args.key?(:insert_time)
1522
- @start_time = args[:start_time] if args.key?(:start_time)
1523
- @end_time = args[:end_time] if args.key?(:end_time)
1524
- @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
1525
- @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
1806
+ @project_id = args[:project_id] if args.key?(:project_id)
1807
+ @labels = args[:labels] if args.key?(:labels)
1526
1808
  @status = args[:status] if args.key?(:status)
1809
+ @metrics = args[:metrics] if args.key?(:metrics)
1527
1810
  @status_history = args[:status_history] if args.key?(:status_history)
1528
- @operation_type = args[:operation_type] if args.key?(:operation_type)
1529
- @description = args[:description] if args.key?(:description)
1811
+ @config = args[:config] if args.key?(:config)
1812
+ @cluster_uuid = args[:cluster_uuid] if args.key?(:cluster_uuid)
1813
+ @cluster_name = args[:cluster_name] if args.key?(:cluster_name)
1530
1814
  end
1531
1815
  end
1532
1816
 
1533
- # The status of the operation.
1534
- class OperationStatus
1817
+ # The response message for Operations.ListOperations.
1818
+ class ListOperationsResponse
1535
1819
  include Google::Apis::Core::Hashable
1536
1820
 
1537
- # A message containing the operation state.
1538
- # Corresponds to the JSON property `state`
1539
- # @return [String]
1540
- attr_accessor :state
1541
-
1542
- # A message containing the detailed operation state.
1543
- # Corresponds to the JSON property `innerState`
1544
- # @return [String]
1545
- attr_accessor :inner_state
1546
-
1547
- # A message containing any operation metadata details.
1548
- # Corresponds to the JSON property `details`
1821
+ # The standard List next-page token.
1822
+ # Corresponds to the JSON property `nextPageToken`
1549
1823
  # @return [String]
1550
- attr_accessor :details
1824
+ attr_accessor :next_page_token
1551
1825
 
1552
- # The time this state was entered.
1553
- # Corresponds to the JSON property `stateStartTime`
1554
- # @return [String]
1555
- attr_accessor :state_start_time
1826
+ # A list of operations that matches the specified filter in the request.
1827
+ # Corresponds to the JSON property `operations`
1828
+ # @return [Array<Google::Apis::DataprocV1::Operation>]
1829
+ attr_accessor :operations
1556
1830
 
1557
1831
  def initialize(**args)
1558
1832
  update!(**args)
@@ -1560,10 +1834,8 @@ module Google
1560
1834
 
1561
1835
  # Update properties of this object
1562
1836
  def update!(**args)
1563
- @state = args[:state] if args.key?(:state)
1564
- @inner_state = args[:inner_state] if args.key?(:inner_state)
1565
- @details = args[:details] if args.key?(:details)
1566
- @state_start_time = args[:state_start_time] if args.key?(:state_start_time)
1837
+ @next_page_token = args[:next_page_token] if args.key?(:next_page_token)
1838
+ @operations = args[:operations] if args.key?(:operations)
1567
1839
  end
1568
1840
  end
1569
1841
  end