google-api-client 0.43.0 → 0.48.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (964) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/autoapprove.yml +49 -0
  3. data/.github/workflows/release-please.yml +77 -0
  4. data/.gitignore +2 -0
  5. data/.kokoro/trampoline.sh +0 -0
  6. data/CHANGELOG.md +1066 -184
  7. data/Gemfile +1 -0
  8. data/Rakefile +31 -3
  9. data/api_list_config.yaml +8 -0
  10. data/api_names.yaml +1 -0
  11. data/bin/generate-api +77 -15
  12. data/docs/oauth-server.md +4 -6
  13. data/generated/google/apis/acceleratedmobilepageurl_v1/classes.rb +7 -9
  14. data/generated/google/apis/acceleratedmobilepageurl_v1/service.rb +2 -2
  15. data/generated/google/apis/acceleratedmobilepageurl_v1.rb +1 -1
  16. data/generated/google/apis/accessapproval_v1/classes.rb +60 -86
  17. data/generated/google/apis/accessapproval_v1/service.rb +93 -132
  18. data/generated/google/apis/accessapproval_v1.rb +1 -1
  19. data/generated/google/apis/accesscontextmanager_v1/classes.rb +266 -236
  20. data/generated/google/apis/accesscontextmanager_v1/representations.rb +30 -0
  21. data/generated/google/apis/accesscontextmanager_v1/service.rb +308 -171
  22. data/generated/google/apis/accesscontextmanager_v1.rb +1 -1
  23. data/generated/google/apis/accesscontextmanager_v1beta/classes.rb +153 -184
  24. data/generated/google/apis/accesscontextmanager_v1beta/service.rb +82 -111
  25. data/generated/google/apis/accesscontextmanager_v1beta.rb +1 -1
  26. data/generated/google/apis/adexchangebuyer2_v2beta1/classes.rb +47 -36
  27. data/generated/google/apis/adexchangebuyer2_v2beta1/representations.rb +1 -0
  28. data/generated/google/apis/adexchangebuyer2_v2beta1.rb +1 -1
  29. data/generated/google/apis/adexchangebuyer_v1_4/classes.rb +72 -2
  30. data/generated/google/apis/adexchangebuyer_v1_4/representations.rb +33 -0
  31. data/generated/google/apis/adexchangebuyer_v1_4.rb +1 -1
  32. data/generated/google/apis/adexperiencereport_v1.rb +1 -1
  33. data/generated/google/apis/admin_datatransfer_v1/classes.rb +16 -4
  34. data/generated/google/apis/admin_datatransfer_v1/service.rb +30 -48
  35. data/generated/google/apis/admin_datatransfer_v1.rb +6 -4
  36. data/generated/google/apis/admin_directory_v1/classes.rb +344 -242
  37. data/generated/google/apis/admin_directory_v1/representations.rb +62 -39
  38. data/generated/google/apis/admin_directory_v1/service.rb +607 -998
  39. data/generated/google/apis/admin_directory_v1.rb +6 -8
  40. data/generated/google/apis/admin_reports_v1/classes.rb +31 -33
  41. data/generated/google/apis/admin_reports_v1/service.rb +131 -187
  42. data/generated/google/apis/admin_reports_v1.rb +6 -5
  43. data/generated/google/apis/admob_v1/classes.rb +31 -31
  44. data/generated/google/apis/admob_v1/service.rb +2 -1
  45. data/generated/google/apis/admob_v1.rb +6 -2
  46. data/generated/google/apis/adsense_v1_4/service.rb +4 -1
  47. data/generated/google/apis/adsense_v1_4.rb +1 -1
  48. data/generated/google/apis/alertcenter_v1beta1.rb +1 -1
  49. data/generated/google/apis/analyticsadmin_v1alpha/classes.rb +1361 -0
  50. data/generated/google/apis/analyticsadmin_v1alpha/representations.rb +610 -0
  51. data/generated/google/apis/analyticsadmin_v1alpha/service.rb +2134 -0
  52. data/generated/google/apis/analyticsadmin_v1alpha.rb +43 -0
  53. data/generated/google/apis/analyticsdata_v1alpha/classes.rb +1655 -0
  54. data/generated/google/apis/analyticsdata_v1alpha/representations.rb +806 -0
  55. data/generated/google/apis/analyticsdata_v1alpha/service.rb +261 -0
  56. data/generated/google/apis/analyticsdata_v1alpha.rb +37 -0
  57. data/generated/google/apis/analyticsreporting_v4.rb +1 -1
  58. data/generated/google/apis/androidenterprise_v1/service.rb +2 -2
  59. data/generated/google/apis/androidenterprise_v1.rb +1 -1
  60. data/generated/google/apis/androidmanagement_v1/classes.rb +115 -75
  61. data/generated/google/apis/androidmanagement_v1/representations.rb +17 -0
  62. data/generated/google/apis/androidmanagement_v1.rb +1 -1
  63. data/generated/google/apis/androidpublisher_v3/classes.rb +9 -1
  64. data/generated/google/apis/androidpublisher_v3/representations.rb +1 -0
  65. data/generated/google/apis/androidpublisher_v3/service.rb +2 -2
  66. data/generated/google/apis/androidpublisher_v3.rb +1 -1
  67. data/generated/google/apis/apigateway_v1beta/classes.rb +1162 -0
  68. data/generated/google/apis/{memcache_v1 → apigateway_v1beta}/representations.rb +156 -157
  69. data/generated/google/apis/apigateway_v1beta/service.rb +1172 -0
  70. data/generated/google/apis/apigateway_v1beta.rb +34 -0
  71. data/generated/google/apis/apigee_v1/classes.rb +630 -88
  72. data/generated/google/apis/apigee_v1/representations.rb +209 -1
  73. data/generated/google/apis/apigee_v1/service.rb +401 -74
  74. data/generated/google/apis/apigee_v1.rb +6 -7
  75. data/generated/google/apis/appengine_v1/classes.rb +96 -59
  76. data/generated/google/apis/appengine_v1/representations.rb +17 -0
  77. data/generated/google/apis/appengine_v1/service.rb +38 -47
  78. data/generated/google/apis/appengine_v1.rb +1 -1
  79. data/generated/google/apis/appengine_v1alpha/classes.rb +9 -11
  80. data/generated/google/apis/appengine_v1alpha.rb +1 -1
  81. data/generated/google/apis/appengine_v1beta/classes.rb +103 -59
  82. data/generated/google/apis/appengine_v1beta/representations.rb +18 -0
  83. data/generated/google/apis/appengine_v1beta/service.rb +37 -47
  84. data/generated/google/apis/appengine_v1beta.rb +1 -1
  85. data/generated/google/apis/appsmarket_v2/classes.rb +14 -16
  86. data/generated/google/apis/appsmarket_v2.rb +1 -1
  87. data/generated/google/apis/area120tables_v1alpha1/classes.rb +423 -0
  88. data/generated/google/apis/area120tables_v1alpha1/representations.rb +248 -0
  89. data/generated/google/apis/area120tables_v1alpha1/service.rb +381 -0
  90. data/generated/google/apis/area120tables_v1alpha1.rb +46 -0
  91. data/generated/google/apis/artifactregistry_v1beta1/classes.rb +249 -337
  92. data/generated/google/apis/artifactregistry_v1beta1/representations.rb +2 -0
  93. data/generated/google/apis/artifactregistry_v1beta1/service.rb +44 -57
  94. data/generated/google/apis/artifactregistry_v1beta1.rb +1 -1
  95. data/generated/google/apis/assuredworkloads_v1beta1/classes.rb +722 -0
  96. data/generated/google/apis/assuredworkloads_v1beta1/representations.rb +359 -0
  97. data/generated/google/apis/assuredworkloads_v1beta1/service.rb +328 -0
  98. data/generated/google/apis/{accessapproval_v1beta1.rb → assuredworkloads_v1beta1.rb} +8 -8
  99. data/generated/google/apis/bigquery_v2/classes.rb +593 -576
  100. data/generated/google/apis/bigquery_v2/representations.rb +85 -0
  101. data/generated/google/apis/bigquery_v2/service.rb +79 -41
  102. data/generated/google/apis/bigquery_v2.rb +1 -1
  103. data/generated/google/apis/bigqueryconnection_v1beta1/classes.rb +192 -337
  104. data/generated/google/apis/bigqueryconnection_v1beta1/service.rb +29 -32
  105. data/generated/google/apis/bigqueryconnection_v1beta1.rb +1 -1
  106. data/generated/google/apis/bigquerydatatransfer_v1/classes.rb +132 -158
  107. data/generated/google/apis/bigquerydatatransfer_v1/service.rb +232 -282
  108. data/generated/google/apis/bigquerydatatransfer_v1.rb +1 -1
  109. data/generated/google/apis/bigqueryreservation_v1/classes.rb +116 -123
  110. data/generated/google/apis/bigqueryreservation_v1/representations.rb +2 -0
  111. data/generated/google/apis/bigqueryreservation_v1/service.rb +137 -183
  112. data/generated/google/apis/bigqueryreservation_v1.rb +1 -1
  113. data/generated/google/apis/bigqueryreservation_v1alpha2/classes.rb +88 -100
  114. data/generated/google/apis/bigqueryreservation_v1alpha2/service.rb +77 -100
  115. data/generated/google/apis/bigqueryreservation_v1alpha2.rb +1 -1
  116. data/generated/google/apis/bigqueryreservation_v1beta1/classes.rb +93 -98
  117. data/generated/google/apis/bigqueryreservation_v1beta1/representations.rb +2 -0
  118. data/generated/google/apis/bigqueryreservation_v1beta1/service.rb +114 -151
  119. data/generated/google/apis/bigqueryreservation_v1beta1.rb +1 -1
  120. data/generated/google/apis/bigtableadmin_v1/classes.rb +137 -119
  121. data/generated/google/apis/bigtableadmin_v1/representations.rb +29 -0
  122. data/generated/google/apis/bigtableadmin_v1.rb +1 -1
  123. data/generated/google/apis/bigtableadmin_v2/classes.rb +455 -607
  124. data/generated/google/apis/bigtableadmin_v2/representations.rb +29 -0
  125. data/generated/google/apis/bigtableadmin_v2/service.rb +194 -247
  126. data/generated/google/apis/bigtableadmin_v2.rb +1 -1
  127. data/generated/google/apis/billingbudgets_v1/classes.rb +373 -0
  128. data/generated/google/apis/billingbudgets_v1/representations.rb +171 -0
  129. data/generated/google/apis/billingbudgets_v1/service.rb +249 -0
  130. data/generated/google/apis/billingbudgets_v1.rb +38 -0
  131. data/generated/google/apis/billingbudgets_v1beta1/classes.rb +27 -6
  132. data/generated/google/apis/billingbudgets_v1beta1/representations.rb +2 -0
  133. data/generated/google/apis/billingbudgets_v1beta1.rb +4 -1
  134. data/generated/google/apis/binaryauthorization_v1/classes.rb +434 -355
  135. data/generated/google/apis/binaryauthorization_v1/representations.rb +75 -0
  136. data/generated/google/apis/binaryauthorization_v1/service.rb +109 -89
  137. data/generated/google/apis/binaryauthorization_v1.rb +1 -1
  138. data/generated/google/apis/binaryauthorization_v1beta1/classes.rb +434 -355
  139. data/generated/google/apis/binaryauthorization_v1beta1/representations.rb +75 -0
  140. data/generated/google/apis/binaryauthorization_v1beta1/service.rb +109 -89
  141. data/generated/google/apis/binaryauthorization_v1beta1.rb +1 -1
  142. data/generated/google/apis/books_v1/service.rb +54 -54
  143. data/generated/google/apis/books_v1.rb +1 -1
  144. data/generated/google/apis/calendar_v3/classes.rb +13 -10
  145. data/generated/google/apis/calendar_v3.rb +1 -1
  146. data/generated/google/apis/chat_v1/classes.rb +173 -116
  147. data/generated/google/apis/chat_v1/representations.rb +36 -0
  148. data/generated/google/apis/chat_v1/service.rb +30 -42
  149. data/generated/google/apis/chat_v1.rb +1 -1
  150. data/generated/google/apis/civicinfo_v2/classes.rb +18 -32
  151. data/generated/google/apis/civicinfo_v2/representations.rb +2 -3
  152. data/generated/google/apis/civicinfo_v2.rb +1 -1
  153. data/generated/google/apis/classroom_v1/classes.rb +153 -21
  154. data/generated/google/apis/classroom_v1/representations.rb +43 -0
  155. data/generated/google/apis/classroom_v1/service.rb +240 -0
  156. data/generated/google/apis/classroom_v1.rb +7 -1
  157. data/generated/google/apis/cloudasset_v1/classes.rb +1461 -1039
  158. data/generated/google/apis/cloudasset_v1/representations.rb +320 -0
  159. data/generated/google/apis/cloudasset_v1/service.rb +296 -167
  160. data/generated/google/apis/cloudasset_v1.rb +1 -1
  161. data/generated/google/apis/cloudasset_v1beta1/classes.rb +531 -777
  162. data/generated/google/apis/cloudasset_v1beta1/service.rb +59 -75
  163. data/generated/google/apis/cloudasset_v1beta1.rb +1 -1
  164. data/generated/google/apis/cloudasset_v1p1beta1/classes.rb +204 -349
  165. data/generated/google/apis/cloudasset_v1p1beta1/service.rb +35 -47
  166. data/generated/google/apis/cloudasset_v1p1beta1.rb +1 -1
  167. data/generated/google/apis/cloudasset_v1p4beta1/classes.rb +222 -276
  168. data/generated/google/apis/cloudasset_v1p4beta1/service.rb +75 -93
  169. data/generated/google/apis/cloudasset_v1p4beta1.rb +1 -1
  170. data/generated/google/apis/cloudasset_v1p5beta1/classes.rb +481 -720
  171. data/generated/google/apis/cloudasset_v1p5beta1/service.rb +25 -28
  172. data/generated/google/apis/cloudasset_v1p5beta1.rb +1 -1
  173. data/generated/google/apis/cloudbilling_v1/classes.rb +285 -446
  174. data/generated/google/apis/cloudbilling_v1/service.rb +104 -124
  175. data/generated/google/apis/cloudbilling_v1.rb +7 -1
  176. data/generated/google/apis/cloudbuild_v1/classes.rb +339 -344
  177. data/generated/google/apis/cloudbuild_v1/representations.rb +8 -0
  178. data/generated/google/apis/cloudbuild_v1/service.rb +277 -67
  179. data/generated/google/apis/cloudbuild_v1.rb +1 -1
  180. data/generated/google/apis/cloudbuild_v1alpha1/classes.rb +291 -329
  181. data/generated/google/apis/cloudbuild_v1alpha1/representations.rb +2 -0
  182. data/generated/google/apis/cloudbuild_v1alpha1/service.rb +15 -18
  183. data/generated/google/apis/cloudbuild_v1alpha1.rb +1 -1
  184. data/generated/google/apis/cloudbuild_v1alpha2/classes.rb +277 -313
  185. data/generated/google/apis/cloudbuild_v1alpha2/representations.rb +2 -0
  186. data/generated/google/apis/cloudbuild_v1alpha2/service.rb +22 -28
  187. data/generated/google/apis/cloudbuild_v1alpha2.rb +1 -1
  188. data/generated/google/apis/clouddebugger_v2/classes.rb +185 -252
  189. data/generated/google/apis/clouddebugger_v2/service.rb +53 -59
  190. data/generated/google/apis/clouddebugger_v2.rb +1 -1
  191. data/generated/google/apis/clouderrorreporting_v1beta1/classes.rb +127 -156
  192. data/generated/google/apis/clouderrorreporting_v1beta1/service.rb +53 -69
  193. data/generated/google/apis/clouderrorreporting_v1beta1.rb +1 -1
  194. data/generated/google/apis/cloudfunctions_v1/classes.rb +335 -494
  195. data/generated/google/apis/cloudfunctions_v1/representations.rb +1 -0
  196. data/generated/google/apis/cloudfunctions_v1/service.rb +79 -93
  197. data/generated/google/apis/cloudfunctions_v1.rb +1 -1
  198. data/generated/google/apis/cloudidentity_v1/classes.rb +989 -107
  199. data/generated/google/apis/cloudidentity_v1/representations.rb +371 -0
  200. data/generated/google/apis/cloudidentity_v1/service.rb +883 -88
  201. data/generated/google/apis/cloudidentity_v1.rb +4 -1
  202. data/generated/google/apis/cloudidentity_v1beta1/classes.rb +1236 -307
  203. data/generated/google/apis/cloudidentity_v1beta1/representations.rb +424 -21
  204. data/generated/google/apis/cloudidentity_v1beta1/service.rb +921 -96
  205. data/generated/google/apis/cloudidentity_v1beta1.rb +4 -1
  206. data/generated/google/apis/cloudiot_v1/classes.rb +270 -373
  207. data/generated/google/apis/cloudiot_v1/representations.rb +1 -0
  208. data/generated/google/apis/cloudiot_v1/service.rb +147 -154
  209. data/generated/google/apis/cloudiot_v1.rb +1 -1
  210. data/generated/google/apis/cloudkms_v1/classes.rb +509 -692
  211. data/generated/google/apis/cloudkms_v1/representations.rb +18 -0
  212. data/generated/google/apis/cloudkms_v1/service.rb +170 -216
  213. data/generated/google/apis/cloudkms_v1.rb +1 -1
  214. data/generated/google/apis/cloudprofiler_v2/classes.rb +28 -33
  215. data/generated/google/apis/cloudprofiler_v2/service.rb +17 -19
  216. data/generated/google/apis/cloudprofiler_v2.rb +1 -1
  217. data/generated/google/apis/cloudresourcemanager_v1/classes.rb +14 -7
  218. data/generated/google/apis/cloudresourcemanager_v1/representations.rb +1 -0
  219. data/generated/google/apis/cloudresourcemanager_v1/service.rb +62 -60
  220. data/generated/google/apis/cloudresourcemanager_v1.rb +1 -1
  221. data/generated/google/apis/cloudresourcemanager_v1beta1/classes.rb +11 -4
  222. data/generated/google/apis/cloudresourcemanager_v1beta1/representations.rb +1 -0
  223. data/generated/google/apis/cloudresourcemanager_v1beta1/service.rb +26 -25
  224. data/generated/google/apis/cloudresourcemanager_v1beta1.rb +1 -1
  225. data/generated/google/apis/cloudresourcemanager_v2/classes.rb +7 -0
  226. data/generated/google/apis/cloudresourcemanager_v2/representations.rb +1 -0
  227. data/generated/google/apis/cloudresourcemanager_v2/service.rb +7 -7
  228. data/generated/google/apis/cloudresourcemanager_v2.rb +1 -1
  229. data/generated/google/apis/cloudresourcemanager_v2beta1/classes.rb +7 -0
  230. data/generated/google/apis/cloudresourcemanager_v2beta1/representations.rb +1 -0
  231. data/generated/google/apis/cloudresourcemanager_v2beta1/service.rb +7 -7
  232. data/generated/google/apis/cloudresourcemanager_v2beta1.rb +1 -1
  233. data/generated/google/apis/cloudscheduler_v1/classes.rb +272 -383
  234. data/generated/google/apis/cloudscheduler_v1/service.rb +45 -62
  235. data/generated/google/apis/cloudscheduler_v1.rb +1 -1
  236. data/generated/google/apis/cloudscheduler_v1beta1/classes.rb +273 -384
  237. data/generated/google/apis/cloudscheduler_v1beta1/service.rb +45 -62
  238. data/generated/google/apis/cloudscheduler_v1beta1.rb +1 -1
  239. data/generated/google/apis/cloudsearch_v1/classes.rb +651 -781
  240. data/generated/google/apis/cloudsearch_v1/representations.rb +15 -0
  241. data/generated/google/apis/cloudsearch_v1/service.rb +286 -326
  242. data/generated/google/apis/cloudsearch_v1.rb +2 -2
  243. data/generated/google/apis/cloudshell_v1/classes.rb +256 -105
  244. data/generated/google/apis/cloudshell_v1/representations.rb +143 -10
  245. data/generated/google/apis/cloudshell_v1/service.rb +198 -25
  246. data/generated/google/apis/cloudshell_v1.rb +1 -1
  247. data/generated/google/apis/cloudshell_v1alpha1/classes.rb +69 -78
  248. data/generated/google/apis/cloudshell_v1alpha1/service.rb +20 -24
  249. data/generated/google/apis/cloudshell_v1alpha1.rb +1 -1
  250. data/generated/google/apis/cloudtasks_v2/classes.rb +612 -933
  251. data/generated/google/apis/cloudtasks_v2/representations.rb +1 -0
  252. data/generated/google/apis/cloudtasks_v2/service.rb +146 -217
  253. data/generated/google/apis/cloudtasks_v2.rb +1 -1
  254. data/generated/google/apis/cloudtasks_v2beta2/classes.rb +608 -964
  255. data/generated/google/apis/cloudtasks_v2beta2/representations.rb +1 -0
  256. data/generated/google/apis/cloudtasks_v2beta2/service.rb +178 -270
  257. data/generated/google/apis/cloudtasks_v2beta2.rb +1 -1
  258. data/generated/google/apis/cloudtasks_v2beta3/classes.rb +616 -938
  259. data/generated/google/apis/cloudtasks_v2beta3/representations.rb +1 -0
  260. data/generated/google/apis/cloudtasks_v2beta3/service.rb +146 -217
  261. data/generated/google/apis/cloudtasks_v2beta3.rb +1 -1
  262. data/generated/google/apis/cloudtrace_v1/classes.rb +39 -61
  263. data/generated/google/apis/cloudtrace_v1/service.rb +37 -51
  264. data/generated/google/apis/cloudtrace_v1.rb +1 -1
  265. data/generated/google/apis/cloudtrace_v2/classes.rb +92 -107
  266. data/generated/google/apis/cloudtrace_v2/service.rb +8 -11
  267. data/generated/google/apis/cloudtrace_v2.rb +1 -1
  268. data/generated/google/apis/cloudtrace_v2beta1/classes.rb +23 -33
  269. data/generated/google/apis/cloudtrace_v2beta1/service.rb +30 -37
  270. data/generated/google/apis/cloudtrace_v2beta1.rb +1 -1
  271. data/generated/google/apis/composer_v1/classes.rb +189 -242
  272. data/generated/google/apis/composer_v1/service.rb +79 -150
  273. data/generated/google/apis/composer_v1.rb +1 -1
  274. data/generated/google/apis/composer_v1beta1/classes.rb +203 -262
  275. data/generated/google/apis/composer_v1beta1/service.rb +94 -179
  276. data/generated/google/apis/composer_v1beta1.rb +1 -1
  277. data/generated/google/apis/compute_alpha/classes.rb +1227 -186
  278. data/generated/google/apis/compute_alpha/representations.rb +235 -8
  279. data/generated/google/apis/compute_alpha/service.rb +2009 -1024
  280. data/generated/google/apis/compute_alpha.rb +1 -1
  281. data/generated/google/apis/compute_beta/classes.rb +1080 -108
  282. data/generated/google/apis/compute_beta/representations.rb +212 -2
  283. data/generated/google/apis/compute_beta/service.rb +1413 -741
  284. data/generated/google/apis/compute_beta.rb +1 -1
  285. data/generated/google/apis/compute_v1/classes.rb +1512 -106
  286. data/generated/google/apis/compute_v1/representations.rb +470 -1
  287. data/generated/google/apis/compute_v1/service.rb +1625 -285
  288. data/generated/google/apis/compute_v1.rb +1 -1
  289. data/generated/google/apis/container_v1/classes.rb +982 -965
  290. data/generated/google/apis/container_v1/representations.rb +60 -0
  291. data/generated/google/apis/container_v1/service.rb +435 -502
  292. data/generated/google/apis/container_v1.rb +1 -1
  293. data/generated/google/apis/container_v1beta1/classes.rb +1106 -1044
  294. data/generated/google/apis/container_v1beta1/representations.rb +91 -0
  295. data/generated/google/apis/container_v1beta1/service.rb +403 -466
  296. data/generated/google/apis/container_v1beta1.rb +1 -1
  297. data/generated/google/apis/containeranalysis_v1alpha1/classes.rb +463 -596
  298. data/generated/google/apis/containeranalysis_v1alpha1/representations.rb +1 -0
  299. data/generated/google/apis/containeranalysis_v1alpha1/service.rb +149 -169
  300. data/generated/google/apis/containeranalysis_v1alpha1.rb +1 -1
  301. data/generated/google/apis/containeranalysis_v1beta1/classes.rb +461 -613
  302. data/generated/google/apis/containeranalysis_v1beta1/representations.rb +1 -0
  303. data/generated/google/apis/containeranalysis_v1beta1/service.rb +75 -90
  304. data/generated/google/apis/containeranalysis_v1beta1.rb +1 -1
  305. data/generated/google/apis/content_v2/classes.rb +515 -1219
  306. data/generated/google/apis/content_v2/service.rb +377 -650
  307. data/generated/google/apis/content_v2.rb +3 -4
  308. data/generated/google/apis/content_v2_1/classes.rb +1108 -1058
  309. data/generated/google/apis/content_v2_1/representations.rb +288 -0
  310. data/generated/google/apis/content_v2_1/service.rb +987 -795
  311. data/generated/google/apis/content_v2_1.rb +3 -4
  312. data/generated/google/apis/customsearch_v1/service.rb +2 -2
  313. data/generated/google/apis/customsearch_v1.rb +1 -1
  314. data/generated/google/apis/datacatalog_v1beta1/classes.rb +413 -573
  315. data/generated/google/apis/datacatalog_v1beta1/representations.rb +6 -0
  316. data/generated/google/apis/datacatalog_v1beta1/service.rb +319 -440
  317. data/generated/google/apis/datacatalog_v1beta1.rb +1 -1
  318. data/generated/google/apis/dataflow_v1b3/classes.rb +1174 -973
  319. data/generated/google/apis/dataflow_v1b3/representations.rb +148 -0
  320. data/generated/google/apis/dataflow_v1b3/service.rb +308 -257
  321. data/generated/google/apis/dataflow_v1b3.rb +1 -1
  322. data/generated/google/apis/datafusion_v1/classes.rb +283 -397
  323. data/generated/google/apis/datafusion_v1/representations.rb +5 -0
  324. data/generated/google/apis/datafusion_v1/service.rb +76 -89
  325. data/generated/google/apis/datafusion_v1.rb +5 -8
  326. data/generated/google/apis/datafusion_v1beta1/classes.rb +283 -397
  327. data/generated/google/apis/datafusion_v1beta1/representations.rb +5 -0
  328. data/generated/google/apis/datafusion_v1beta1/service.rb +81 -95
  329. data/generated/google/apis/datafusion_v1beta1.rb +5 -8
  330. data/generated/google/apis/datalabeling_v1beta1/classes.rb +6207 -0
  331. data/generated/google/apis/datalabeling_v1beta1/representations.rb +3156 -0
  332. data/generated/google/apis/datalabeling_v1beta1/service.rb +1762 -0
  333. data/generated/google/apis/datalabeling_v1beta1.rb +34 -0
  334. data/generated/google/apis/dataproc_v1/classes.rb +97 -13
  335. data/generated/google/apis/dataproc_v1/representations.rb +34 -0
  336. data/generated/google/apis/dataproc_v1.rb +1 -1
  337. data/generated/google/apis/dataproc_v1beta2/classes.rb +117 -9
  338. data/generated/google/apis/dataproc_v1beta2/representations.rb +49 -0
  339. data/generated/google/apis/dataproc_v1beta2.rb +1 -1
  340. data/generated/google/apis/datastore_v1/classes.rb +334 -476
  341. data/generated/google/apis/datastore_v1/service.rb +52 -63
  342. data/generated/google/apis/datastore_v1.rb +1 -1
  343. data/generated/google/apis/datastore_v1beta1/classes.rb +150 -217
  344. data/generated/google/apis/datastore_v1beta1/service.rb +11 -12
  345. data/generated/google/apis/datastore_v1beta1.rb +1 -1
  346. data/generated/google/apis/datastore_v1beta3/classes.rb +259 -375
  347. data/generated/google/apis/datastore_v1beta3/service.rb +1 -2
  348. data/generated/google/apis/datastore_v1beta3.rb +1 -1
  349. data/generated/google/apis/deploymentmanager_v2/classes.rb +203 -558
  350. data/generated/google/apis/deploymentmanager_v2/representations.rb +0 -132
  351. data/generated/google/apis/deploymentmanager_v2/service.rb +169 -213
  352. data/generated/google/apis/deploymentmanager_v2.rb +6 -4
  353. data/generated/google/apis/deploymentmanager_v2beta/classes.rb +247 -609
  354. data/generated/google/apis/deploymentmanager_v2beta/representations.rb +0 -132
  355. data/generated/google/apis/deploymentmanager_v2beta/service.rb +278 -359
  356. data/generated/google/apis/deploymentmanager_v2beta.rb +6 -5
  357. data/generated/google/apis/dfareporting_v3_3/classes.rb +162 -339
  358. data/generated/google/apis/dfareporting_v3_3/service.rb +646 -1262
  359. data/generated/google/apis/dfareporting_v3_3.rb +2 -2
  360. data/generated/google/apis/dfareporting_v3_4/classes.rb +184 -350
  361. data/generated/google/apis/dfareporting_v3_4/representations.rb +1 -0
  362. data/generated/google/apis/dfareporting_v3_4/service.rb +681 -1261
  363. data/generated/google/apis/dfareporting_v3_4.rb +2 -2
  364. data/generated/google/apis/dialogflow_v2/classes.rb +746 -217
  365. data/generated/google/apis/dialogflow_v2/representations.rb +318 -67
  366. data/generated/google/apis/dialogflow_v2.rb +1 -1
  367. data/generated/google/apis/dialogflow_v2beta1/classes.rb +764 -233
  368. data/generated/google/apis/dialogflow_v2beta1/representations.rb +318 -67
  369. data/generated/google/apis/dialogflow_v2beta1/service.rb +556 -331
  370. data/generated/google/apis/dialogflow_v2beta1.rb +1 -1
  371. data/generated/google/apis/dialogflow_v3beta1/classes.rb +8816 -0
  372. data/generated/google/apis/dialogflow_v3beta1/representations.rb +3725 -0
  373. data/generated/google/apis/dialogflow_v3beta1/service.rb +2825 -0
  374. data/generated/google/apis/dialogflow_v3beta1.rb +38 -0
  375. data/generated/google/apis/digitalassetlinks_v1/classes.rb +66 -92
  376. data/generated/google/apis/digitalassetlinks_v1/service.rb +131 -188
  377. data/generated/google/apis/digitalassetlinks_v1.rb +1 -1
  378. data/generated/google/apis/displayvideo_v1/classes.rb +271 -38
  379. data/generated/google/apis/displayvideo_v1/representations.rb +83 -0
  380. data/generated/google/apis/displayvideo_v1/service.rb +287 -32
  381. data/generated/google/apis/displayvideo_v1.rb +1 -1
  382. data/generated/google/apis/displayvideo_v1beta/classes.rb +146 -0
  383. data/generated/google/apis/displayvideo_v1beta/representations.rb +72 -0
  384. data/generated/google/apis/displayvideo_v1beta/service.rb +161 -0
  385. data/generated/google/apis/displayvideo_v1beta.rb +38 -0
  386. data/generated/google/apis/displayvideo_v1beta2/classes.rb +146 -0
  387. data/generated/google/apis/displayvideo_v1beta2/representations.rb +72 -0
  388. data/generated/google/apis/displayvideo_v1beta2/service.rb +130 -0
  389. data/generated/google/apis/displayvideo_v1beta2.rb +38 -0
  390. data/generated/google/apis/displayvideo_v1dev/classes.rb +146 -0
  391. data/generated/google/apis/displayvideo_v1dev/representations.rb +72 -0
  392. data/generated/google/apis/displayvideo_v1dev/service.rb +130 -0
  393. data/generated/google/apis/displayvideo_v1dev.rb +38 -0
  394. data/generated/google/apis/dlp_v2/classes.rb +1111 -1310
  395. data/generated/google/apis/dlp_v2/representations.rb +16 -0
  396. data/generated/google/apis/dlp_v2/service.rb +962 -905
  397. data/generated/google/apis/dlp_v2.rb +1 -1
  398. data/generated/google/apis/dns_v1/classes.rb +356 -198
  399. data/generated/google/apis/dns_v1/representations.rb +83 -0
  400. data/generated/google/apis/dns_v1/service.rb +83 -98
  401. data/generated/google/apis/dns_v1.rb +2 -2
  402. data/generated/google/apis/dns_v1beta2/classes.rb +362 -206
  403. data/generated/google/apis/dns_v1beta2/representations.rb +83 -0
  404. data/generated/google/apis/dns_v1beta2/service.rb +83 -98
  405. data/generated/google/apis/dns_v1beta2.rb +2 -2
  406. data/generated/google/apis/docs_v1/classes.rb +894 -1229
  407. data/generated/google/apis/docs_v1/service.rb +17 -22
  408. data/generated/google/apis/docs_v1.rb +1 -1
  409. data/generated/google/apis/documentai_v1beta2/classes.rb +2253 -824
  410. data/generated/google/apis/documentai_v1beta2/representations.rb +701 -0
  411. data/generated/google/apis/documentai_v1beta2/service.rb +22 -24
  412. data/generated/google/apis/documentai_v1beta2.rb +1 -1
  413. data/generated/google/apis/documentai_v1beta3/classes.rb +6149 -0
  414. data/generated/google/apis/documentai_v1beta3/representations.rb +2666 -0
  415. data/generated/google/apis/documentai_v1beta3/service.rb +263 -0
  416. data/generated/google/apis/{securitycenter_v1p1alpha1.rb → documentai_v1beta3.rb} +11 -10
  417. data/generated/google/apis/domains_v1alpha2/classes.rb +1540 -0
  418. data/generated/google/apis/domains_v1alpha2/representations.rb +606 -0
  419. data/generated/google/apis/domains_v1alpha2/service.rb +805 -0
  420. data/generated/google/apis/domains_v1alpha2.rb +34 -0
  421. data/generated/google/apis/domains_v1beta1/classes.rb +1540 -0
  422. data/generated/google/apis/domains_v1beta1/representations.rb +606 -0
  423. data/generated/google/apis/domains_v1beta1/service.rb +805 -0
  424. data/generated/google/apis/domains_v1beta1.rb +34 -0
  425. data/generated/google/apis/doubleclickbidmanager_v1/classes.rb +6 -12
  426. data/generated/google/apis/doubleclickbidmanager_v1/service.rb +33 -64
  427. data/generated/google/apis/doubleclickbidmanager_v1.rb +3 -2
  428. data/generated/google/apis/doubleclickbidmanager_v1_1/classes.rb +13 -20
  429. data/generated/google/apis/doubleclickbidmanager_v1_1/service.rb +33 -64
  430. data/generated/google/apis/doubleclickbidmanager_v1_1.rb +3 -2
  431. data/generated/google/apis/doubleclicksearch_v2/service.rb +2 -2
  432. data/generated/google/apis/doubleclicksearch_v2.rb +1 -1
  433. data/generated/google/apis/drive_v2/classes.rb +18 -7
  434. data/generated/google/apis/drive_v2/representations.rb +1 -0
  435. data/generated/google/apis/drive_v2/service.rb +79 -15
  436. data/generated/google/apis/drive_v2.rb +1 -1
  437. data/generated/google/apis/drive_v3/classes.rb +18 -8
  438. data/generated/google/apis/drive_v3/representations.rb +1 -0
  439. data/generated/google/apis/drive_v3/service.rb +59 -11
  440. data/generated/google/apis/drive_v3.rb +1 -1
  441. data/generated/google/apis/eventarc_v1beta1/classes.rb +931 -0
  442. data/generated/google/apis/eventarc_v1beta1/representations.rb +379 -0
  443. data/generated/google/apis/{memcache_v1 → eventarc_v1beta1}/service.rb +236 -215
  444. data/generated/google/apis/eventarc_v1beta1.rb +34 -0
  445. data/generated/google/apis/file_v1/classes.rb +155 -174
  446. data/generated/google/apis/file_v1/service.rb +43 -52
  447. data/generated/google/apis/file_v1.rb +1 -1
  448. data/generated/google/apis/file_v1beta1/classes.rb +335 -194
  449. data/generated/google/apis/file_v1beta1/representations.rb +55 -0
  450. data/generated/google/apis/file_v1beta1/service.rb +267 -55
  451. data/generated/google/apis/file_v1beta1.rb +1 -1
  452. data/generated/google/apis/firebase_v1beta1/classes.rb +33 -51
  453. data/generated/google/apis/firebase_v1beta1/representations.rb +2 -16
  454. data/generated/google/apis/firebase_v1beta1/service.rb +21 -1
  455. data/generated/google/apis/firebase_v1beta1.rb +1 -1
  456. data/generated/google/apis/firebasedynamiclinks_v1.rb +1 -1
  457. data/generated/google/apis/firebasehosting_v1beta1/classes.rb +188 -0
  458. data/generated/google/apis/firebasehosting_v1beta1/representations.rb +83 -0
  459. data/generated/google/apis/firebasehosting_v1beta1/service.rb +418 -4
  460. data/generated/google/apis/firebasehosting_v1beta1.rb +1 -1
  461. data/generated/google/apis/firebaseml_v1beta2/classes.rb +8 -8
  462. data/generated/google/apis/firebaseml_v1beta2/representations.rb +1 -1
  463. data/generated/google/apis/firebaseml_v1beta2.rb +1 -1
  464. data/generated/google/apis/firebaserules_v1/classes.rb +102 -137
  465. data/generated/google/apis/firebaserules_v1/service.rb +87 -110
  466. data/generated/google/apis/firebaserules_v1.rb +1 -1
  467. data/generated/google/apis/firestore_v1/classes.rb +406 -502
  468. data/generated/google/apis/firestore_v1/service.rb +165 -201
  469. data/generated/google/apis/firestore_v1.rb +1 -1
  470. data/generated/google/apis/firestore_v1beta1/classes.rb +338 -413
  471. data/generated/google/apis/firestore_v1beta1/service.rb +106 -122
  472. data/generated/google/apis/firestore_v1beta1.rb +1 -1
  473. data/generated/google/apis/firestore_v1beta2/classes.rb +135 -165
  474. data/generated/google/apis/firestore_v1beta2/service.rb +65 -86
  475. data/generated/google/apis/firestore_v1beta2.rb +1 -1
  476. data/generated/google/apis/fitness_v1/classes.rb +982 -0
  477. data/generated/google/apis/fitness_v1/representations.rb +398 -0
  478. data/generated/google/apis/fitness_v1/service.rb +628 -0
  479. data/generated/google/apis/fitness_v1.rb +97 -0
  480. data/generated/google/apis/games_configuration_v1configuration/classes.rb +2 -3
  481. data/generated/google/apis/games_configuration_v1configuration/service.rb +6 -6
  482. data/generated/google/apis/games_configuration_v1configuration.rb +1 -1
  483. data/generated/google/apis/games_management_v1management/classes.rb +14 -20
  484. data/generated/google/apis/games_management_v1management/service.rb +35 -36
  485. data/generated/google/apis/games_management_v1management.rb +2 -3
  486. data/generated/google/apis/games_v1/classes.rb +376 -83
  487. data/generated/google/apis/games_v1/representations.rb +118 -0
  488. data/generated/google/apis/games_v1/service.rb +118 -90
  489. data/generated/google/apis/games_v1.rb +2 -3
  490. data/generated/google/apis/gameservices_v1/classes.rb +22 -14
  491. data/generated/google/apis/gameservices_v1/representations.rb +1 -0
  492. data/generated/google/apis/gameservices_v1/service.rb +54 -51
  493. data/generated/google/apis/gameservices_v1.rb +1 -1
  494. data/generated/google/apis/gameservices_v1beta/classes.rb +22 -14
  495. data/generated/google/apis/gameservices_v1beta/representations.rb +1 -0
  496. data/generated/google/apis/gameservices_v1beta/service.rb +54 -51
  497. data/generated/google/apis/gameservices_v1beta.rb +1 -1
  498. data/generated/google/apis/genomics_v1/classes.rb +70 -76
  499. data/generated/google/apis/genomics_v1/service.rb +28 -43
  500. data/generated/google/apis/genomics_v1.rb +1 -1
  501. data/generated/google/apis/genomics_v1alpha2/classes.rb +223 -290
  502. data/generated/google/apis/genomics_v1alpha2/service.rb +54 -76
  503. data/generated/google/apis/genomics_v1alpha2.rb +1 -1
  504. data/generated/google/apis/genomics_v2alpha1/classes.rb +356 -275
  505. data/generated/google/apis/genomics_v2alpha1/representations.rb +48 -0
  506. data/generated/google/apis/genomics_v2alpha1/service.rb +47 -66
  507. data/generated/google/apis/genomics_v2alpha1.rb +1 -1
  508. data/generated/google/apis/gmail_v1/classes.rb +37 -43
  509. data/generated/google/apis/gmail_v1/service.rb +5 -4
  510. data/generated/google/apis/gmail_v1.rb +1 -1
  511. data/generated/google/apis/gmailpostmastertools_v1beta1/service.rb +11 -11
  512. data/generated/google/apis/gmailpostmastertools_v1beta1.rb +2 -2
  513. data/generated/google/apis/groupsmigration_v1/classes.rb +51 -0
  514. data/generated/google/apis/groupsmigration_v1/representations.rb +40 -0
  515. data/generated/google/apis/groupsmigration_v1/service.rb +100 -0
  516. data/generated/google/apis/groupsmigration_v1.rb +35 -0
  517. data/generated/google/apis/healthcare_v1/classes.rb +637 -826
  518. data/generated/google/apis/healthcare_v1/representations.rb +32 -0
  519. data/generated/google/apis/healthcare_v1/service.rb +842 -855
  520. data/generated/google/apis/healthcare_v1.rb +1 -1
  521. data/generated/google/apis/healthcare_v1beta1/classes.rb +1937 -1299
  522. data/generated/google/apis/healthcare_v1beta1/representations.rb +534 -65
  523. data/generated/google/apis/healthcare_v1beta1/service.rb +2534 -1293
  524. data/generated/google/apis/healthcare_v1beta1.rb +1 -1
  525. data/generated/google/apis/homegraph_v1/classes.rb +76 -164
  526. data/generated/google/apis/homegraph_v1/service.rb +23 -35
  527. data/generated/google/apis/homegraph_v1.rb +4 -1
  528. data/generated/google/apis/iam_v1/classes.rb +395 -592
  529. data/generated/google/apis/iam_v1/representations.rb +1 -0
  530. data/generated/google/apis/iam_v1/service.rb +427 -555
  531. data/generated/google/apis/iam_v1.rb +1 -1
  532. data/generated/google/apis/iamcredentials_v1/classes.rb +75 -85
  533. data/generated/google/apis/iamcredentials_v1/service.rb +14 -13
  534. data/generated/google/apis/iamcredentials_v1.rb +3 -2
  535. data/generated/google/apis/iap_v1/classes.rb +253 -355
  536. data/generated/google/apis/iap_v1/representations.rb +1 -0
  537. data/generated/google/apis/iap_v1/service.rb +61 -71
  538. data/generated/google/apis/iap_v1.rb +1 -1
  539. data/generated/google/apis/iap_v1beta1/classes.rb +164 -254
  540. data/generated/google/apis/iap_v1beta1/representations.rb +1 -0
  541. data/generated/google/apis/iap_v1beta1/service.rb +17 -19
  542. data/generated/google/apis/iap_v1beta1.rb +1 -1
  543. data/generated/google/apis/indexing_v3/classes.rb +11 -11
  544. data/generated/google/apis/indexing_v3.rb +1 -1
  545. data/generated/google/apis/jobs_v2/classes.rb +1584 -1086
  546. data/generated/google/apis/jobs_v2/representations.rb +272 -0
  547. data/generated/google/apis/jobs_v2/service.rb +85 -126
  548. data/generated/google/apis/jobs_v2.rb +1 -1
  549. data/generated/google/apis/jobs_v3/classes.rb +1559 -980
  550. data/generated/google/apis/jobs_v3/representations.rb +272 -0
  551. data/generated/google/apis/jobs_v3/service.rb +101 -139
  552. data/generated/google/apis/jobs_v3.rb +1 -1
  553. data/generated/google/apis/jobs_v3p1beta1/classes.rb +1521 -1023
  554. data/generated/google/apis/jobs_v3p1beta1/representations.rb +257 -0
  555. data/generated/google/apis/jobs_v3p1beta1/service.rb +103 -142
  556. data/generated/google/apis/jobs_v3p1beta1.rb +1 -1
  557. data/generated/google/apis/kgsearch_v1/classes.rb +4 -4
  558. data/generated/google/apis/kgsearch_v1/service.rb +11 -11
  559. data/generated/google/apis/kgsearch_v1.rb +1 -1
  560. data/generated/google/apis/licensing_v1/classes.rb +1 -1
  561. data/generated/google/apis/licensing_v1/service.rb +56 -86
  562. data/generated/google/apis/licensing_v1.rb +4 -3
  563. data/generated/google/apis/lifesciences_v2beta/classes.rb +366 -290
  564. data/generated/google/apis/lifesciences_v2beta/representations.rb +47 -0
  565. data/generated/google/apis/lifesciences_v2beta/service.rb +30 -42
  566. data/generated/google/apis/lifesciences_v2beta.rb +1 -1
  567. data/generated/google/apis/localservices_v1/classes.rb +426 -0
  568. data/generated/google/apis/localservices_v1/representations.rb +174 -0
  569. data/generated/google/apis/localservices_v1/service.rb +199 -0
  570. data/generated/google/apis/{appsactivity_v1.rb → localservices_v1.rb} +8 -11
  571. data/generated/google/apis/logging_v2/classes.rb +306 -232
  572. data/generated/google/apis/logging_v2/representations.rb +79 -0
  573. data/generated/google/apis/logging_v2/service.rb +3307 -1579
  574. data/generated/google/apis/logging_v2.rb +1 -1
  575. data/generated/google/apis/managedidentities_v1/classes.rb +8 -1
  576. data/generated/google/apis/managedidentities_v1/representations.rb +1 -0
  577. data/generated/google/apis/managedidentities_v1/service.rb +1 -4
  578. data/generated/google/apis/managedidentities_v1.rb +1 -1
  579. data/generated/google/apis/managedidentities_v1alpha1/classes.rb +87 -1
  580. data/generated/google/apis/managedidentities_v1alpha1/representations.rb +34 -0
  581. data/generated/google/apis/managedidentities_v1alpha1/service.rb +83 -5
  582. data/generated/google/apis/managedidentities_v1alpha1.rb +1 -1
  583. data/generated/google/apis/managedidentities_v1beta1/classes.rb +88 -1
  584. data/generated/google/apis/managedidentities_v1beta1/representations.rb +34 -0
  585. data/generated/google/apis/managedidentities_v1beta1/service.rb +83 -5
  586. data/generated/google/apis/managedidentities_v1beta1.rb +1 -1
  587. data/generated/google/apis/manufacturers_v1/classes.rb +99 -109
  588. data/generated/google/apis/manufacturers_v1/service.rb +44 -55
  589. data/generated/google/apis/manufacturers_v1.rb +1 -1
  590. data/generated/google/apis/memcache_v1beta2/classes.rb +171 -250
  591. data/generated/google/apis/memcache_v1beta2/representations.rb +0 -19
  592. data/generated/google/apis/memcache_v1beta2/service.rb +60 -73
  593. data/generated/google/apis/memcache_v1beta2.rb +1 -1
  594. data/generated/google/apis/ml_v1/classes.rb +1122 -1149
  595. data/generated/google/apis/ml_v1/representations.rb +82 -0
  596. data/generated/google/apis/ml_v1/service.rb +194 -253
  597. data/generated/google/apis/ml_v1.rb +1 -1
  598. data/generated/google/apis/monitoring_v1/classes.rb +107 -26
  599. data/generated/google/apis/monitoring_v1/representations.rb +35 -0
  600. data/generated/google/apis/monitoring_v1/service.rb +10 -11
  601. data/generated/google/apis/monitoring_v1.rb +1 -1
  602. data/generated/google/apis/monitoring_v3/classes.rb +303 -345
  603. data/generated/google/apis/monitoring_v3/representations.rb +18 -0
  604. data/generated/google/apis/monitoring_v3/service.rb +176 -146
  605. data/generated/google/apis/monitoring_v3.rb +1 -1
  606. data/generated/google/apis/networkmanagement_v1/classes.rb +273 -429
  607. data/generated/google/apis/networkmanagement_v1/service.rb +97 -120
  608. data/generated/google/apis/networkmanagement_v1.rb +1 -1
  609. data/generated/google/apis/networkmanagement_v1beta1/classes.rb +456 -429
  610. data/generated/google/apis/networkmanagement_v1beta1/representations.rb +63 -0
  611. data/generated/google/apis/networkmanagement_v1beta1/service.rb +97 -120
  612. data/generated/google/apis/networkmanagement_v1beta1.rb +1 -1
  613. data/generated/google/apis/osconfig_v1/classes.rb +154 -902
  614. data/generated/google/apis/osconfig_v1/representations.rb +0 -337
  615. data/generated/google/apis/osconfig_v1/service.rb +26 -31
  616. data/generated/google/apis/osconfig_v1.rb +3 -3
  617. data/generated/google/apis/osconfig_v1beta/classes.rb +327 -411
  618. data/generated/google/apis/osconfig_v1beta/service.rb +43 -56
  619. data/generated/google/apis/osconfig_v1beta.rb +3 -3
  620. data/generated/google/apis/oslogin_v1/classes.rb +14 -12
  621. data/generated/google/apis/oslogin_v1/representations.rb +1 -0
  622. data/generated/google/apis/oslogin_v1/service.rb +12 -16
  623. data/generated/google/apis/oslogin_v1.rb +1 -1
  624. data/generated/google/apis/oslogin_v1alpha/classes.rb +16 -14
  625. data/generated/google/apis/oslogin_v1alpha/representations.rb +1 -0
  626. data/generated/google/apis/oslogin_v1alpha/service.rb +17 -17
  627. data/generated/google/apis/oslogin_v1alpha.rb +1 -1
  628. data/generated/google/apis/oslogin_v1beta/classes.rb +14 -12
  629. data/generated/google/apis/oslogin_v1beta/representations.rb +1 -0
  630. data/generated/google/apis/oslogin_v1beta/service.rb +12 -16
  631. data/generated/google/apis/oslogin_v1beta.rb +1 -1
  632. data/generated/google/apis/pagespeedonline_v5/classes.rb +18 -24
  633. data/generated/google/apis/pagespeedonline_v5/service.rb +3 -4
  634. data/generated/google/apis/pagespeedonline_v5.rb +2 -2
  635. data/generated/google/apis/people_v1/classes.rb +173 -63
  636. data/generated/google/apis/people_v1/representations.rb +41 -0
  637. data/generated/google/apis/people_v1/service.rb +63 -61
  638. data/generated/google/apis/people_v1.rb +1 -1
  639. data/generated/google/apis/playablelocations_v3/classes.rb +114 -161
  640. data/generated/google/apis/playablelocations_v3/service.rb +10 -10
  641. data/generated/google/apis/playablelocations_v3.rb +1 -1
  642. data/generated/google/apis/playcustomapp_v1/service.rb +2 -2
  643. data/generated/google/apis/playcustomapp_v1.rb +1 -1
  644. data/generated/google/apis/poly_v1/classes.rb +65 -79
  645. data/generated/google/apis/poly_v1/service.rb +50 -63
  646. data/generated/google/apis/poly_v1.rb +3 -4
  647. data/generated/google/apis/privateca_v1beta1/classes.rb +2466 -0
  648. data/generated/google/apis/privateca_v1beta1/representations.rb +996 -0
  649. data/generated/google/apis/privateca_v1beta1/service.rb +1487 -0
  650. data/generated/google/apis/privateca_v1beta1.rb +34 -0
  651. data/generated/google/apis/prod_tt_sasportal_v1alpha1/classes.rb +26 -0
  652. data/generated/google/apis/prod_tt_sasportal_v1alpha1/representations.rb +14 -0
  653. data/generated/google/apis/prod_tt_sasportal_v1alpha1/service.rb +644 -56
  654. data/generated/google/apis/prod_tt_sasportal_v1alpha1.rb +1 -1
  655. data/generated/google/apis/pubsub_v1/classes.rb +399 -518
  656. data/generated/google/apis/pubsub_v1/representations.rb +2 -0
  657. data/generated/google/apis/pubsub_v1/service.rb +221 -247
  658. data/generated/google/apis/pubsub_v1.rb +1 -1
  659. data/generated/google/apis/pubsub_v1beta1a/classes.rb +71 -86
  660. data/generated/google/apis/pubsub_v1beta1a/service.rb +31 -38
  661. data/generated/google/apis/pubsub_v1beta1a.rb +1 -1
  662. data/generated/google/apis/pubsub_v1beta2/classes.rb +251 -354
  663. data/generated/google/apis/pubsub_v1beta2/representations.rb +1 -0
  664. data/generated/google/apis/pubsub_v1beta2/service.rb +96 -108
  665. data/generated/google/apis/pubsub_v1beta2.rb +1 -1
  666. data/generated/google/apis/pubsublite_v1/classes.rb +461 -0
  667. data/generated/google/apis/pubsublite_v1/representations.rb +261 -0
  668. data/generated/google/apis/pubsublite_v1/service.rb +558 -0
  669. data/generated/google/apis/{memcache_v1.rb → pubsublite_v1.rb} +8 -9
  670. data/generated/google/apis/realtimebidding_v1/classes.rb +84 -123
  671. data/generated/google/apis/realtimebidding_v1/representations.rb +18 -32
  672. data/generated/google/apis/realtimebidding_v1/service.rb +4 -1
  673. data/generated/google/apis/realtimebidding_v1.rb +1 -1
  674. data/generated/google/apis/recommendationengine_v1beta1/classes.rb +367 -456
  675. data/generated/google/apis/recommendationengine_v1beta1/representations.rb +25 -16
  676. data/generated/google/apis/recommendationengine_v1beta1/service.rb +140 -206
  677. data/generated/google/apis/recommendationengine_v1beta1.rb +1 -1
  678. data/generated/google/apis/recommender_v1/classes.rb +1 -1
  679. data/generated/google/apis/recommender_v1/service.rb +4 -2
  680. data/generated/google/apis/recommender_v1.rb +1 -1
  681. data/generated/google/apis/recommender_v1beta1/classes.rb +1 -1
  682. data/generated/google/apis/recommender_v1beta1/service.rb +4 -2
  683. data/generated/google/apis/recommender_v1beta1.rb +1 -1
  684. data/generated/google/apis/redis_v1/classes.rb +91 -513
  685. data/generated/google/apis/redis_v1/representations.rb +0 -139
  686. data/generated/google/apis/redis_v1/service.rb +92 -109
  687. data/generated/google/apis/redis_v1.rb +1 -1
  688. data/generated/google/apis/redis_v1beta1/classes.rb +123 -517
  689. data/generated/google/apis/redis_v1beta1/representations.rb +12 -137
  690. data/generated/google/apis/redis_v1beta1/service.rb +126 -109
  691. data/generated/google/apis/redis_v1beta1.rb +1 -1
  692. data/generated/google/apis/remotebuildexecution_v1/classes.rb +957 -1078
  693. data/generated/google/apis/remotebuildexecution_v1/representations.rb +62 -0
  694. data/generated/google/apis/remotebuildexecution_v1/service.rb +26 -33
  695. data/generated/google/apis/remotebuildexecution_v1.rb +1 -1
  696. data/generated/google/apis/remotebuildexecution_v1alpha/classes.rb +952 -1071
  697. data/generated/google/apis/remotebuildexecution_v1alpha/representations.rb +62 -0
  698. data/generated/google/apis/remotebuildexecution_v1alpha/service.rb +103 -65
  699. data/generated/google/apis/remotebuildexecution_v1alpha.rb +1 -1
  700. data/generated/google/apis/remotebuildexecution_v2/classes.rb +1105 -1250
  701. data/generated/google/apis/remotebuildexecution_v2/representations.rb +62 -0
  702. data/generated/google/apis/remotebuildexecution_v2/service.rb +147 -206
  703. data/generated/google/apis/remotebuildexecution_v2.rb +1 -1
  704. data/generated/google/apis/reseller_v1/classes.rb +151 -219
  705. data/generated/google/apis/reseller_v1/service.rb +122 -173
  706. data/generated/google/apis/reseller_v1.rb +2 -2
  707. data/generated/google/apis/run_v1/classes.rb +19 -138
  708. data/generated/google/apis/run_v1/representations.rb +1 -62
  709. data/generated/google/apis/run_v1/service.rb +0 -342
  710. data/generated/google/apis/run_v1.rb +1 -1
  711. data/generated/google/apis/run_v1alpha1/classes.rb +1 -1
  712. data/generated/google/apis/run_v1alpha1/representations.rb +1 -1
  713. data/generated/google/apis/run_v1alpha1.rb +1 -1
  714. data/generated/google/apis/run_v1beta1/classes.rb +3 -2
  715. data/generated/google/apis/run_v1beta1.rb +1 -1
  716. data/generated/google/apis/runtimeconfig_v1beta1/classes.rb +301 -412
  717. data/generated/google/apis/runtimeconfig_v1beta1/representations.rb +1 -0
  718. data/generated/google/apis/runtimeconfig_v1beta1/service.rb +135 -159
  719. data/generated/google/apis/runtimeconfig_v1beta1.rb +1 -1
  720. data/generated/google/apis/safebrowsing_v4/classes.rb +55 -64
  721. data/generated/google/apis/safebrowsing_v4/service.rb +4 -4
  722. data/generated/google/apis/safebrowsing_v4.rb +1 -1
  723. data/generated/google/apis/sasportal_v1alpha1/classes.rb +26 -0
  724. data/generated/google/apis/sasportal_v1alpha1/representations.rb +14 -0
  725. data/generated/google/apis/sasportal_v1alpha1/service.rb +644 -56
  726. data/generated/google/apis/sasportal_v1alpha1.rb +1 -1
  727. data/generated/google/apis/script_v1/classes.rb +88 -111
  728. data/generated/google/apis/script_v1/service.rb +63 -69
  729. data/generated/google/apis/script_v1.rb +1 -1
  730. data/generated/google/apis/searchconsole_v1/classes.rb +388 -0
  731. data/generated/google/apis/searchconsole_v1/representations.rb +162 -0
  732. data/generated/google/apis/searchconsole_v1/service.rb +287 -0
  733. data/generated/google/apis/searchconsole_v1.rb +7 -1
  734. data/generated/google/apis/secretmanager_v1/classes.rb +378 -365
  735. data/generated/google/apis/secretmanager_v1/representations.rb +92 -0
  736. data/generated/google/apis/secretmanager_v1/service.rb +66 -82
  737. data/generated/google/apis/secretmanager_v1.rb +1 -1
  738. data/generated/google/apis/secretmanager_v1beta1/classes.rb +217 -363
  739. data/generated/google/apis/secretmanager_v1beta1/representations.rb +1 -0
  740. data/generated/google/apis/secretmanager_v1beta1/service.rb +66 -82
  741. data/generated/google/apis/secretmanager_v1beta1.rb +1 -1
  742. data/generated/google/apis/securitycenter_v1/classes.rb +20 -204
  743. data/generated/google/apis/securitycenter_v1/representations.rb +1 -72
  744. data/generated/google/apis/securitycenter_v1.rb +1 -1
  745. data/generated/google/apis/securitycenter_v1beta1/classes.rb +22 -204
  746. data/generated/google/apis/securitycenter_v1beta1/representations.rb +1 -72
  747. data/generated/google/apis/securitycenter_v1beta1.rb +1 -1
  748. data/generated/google/apis/{securitycenter_v1p1alpha1 → securitycenter_v1beta2}/classes.rb +275 -291
  749. data/generated/google/apis/{securitycenter_v1p1alpha1 → securitycenter_v1beta2}/representations.rb +83 -84
  750. data/generated/google/apis/securitycenter_v1beta2/service.rb +1494 -0
  751. data/generated/google/apis/{securitycenter_v1p1beta1.rb → securitycenter_v1beta2.rb} +6 -6
  752. data/generated/google/apis/serviceconsumermanagement_v1/classes.rb +35 -123
  753. data/generated/google/apis/serviceconsumermanagement_v1/representations.rb +0 -18
  754. data/generated/google/apis/serviceconsumermanagement_v1/service.rb +32 -30
  755. data/generated/google/apis/serviceconsumermanagement_v1.rb +1 -1
  756. data/generated/google/apis/serviceconsumermanagement_v1beta1/classes.rb +24 -112
  757. data/generated/google/apis/serviceconsumermanagement_v1beta1/representations.rb +0 -18
  758. data/generated/google/apis/serviceconsumermanagement_v1beta1.rb +1 -1
  759. data/generated/google/apis/servicecontrol_v1/classes.rb +601 -642
  760. data/generated/google/apis/servicecontrol_v1/representations.rb +10 -0
  761. data/generated/google/apis/servicecontrol_v1/service.rb +36 -46
  762. data/generated/google/apis/servicecontrol_v1.rb +1 -1
  763. data/generated/google/apis/servicecontrol_v2/classes.rb +343 -325
  764. data/generated/google/apis/servicecontrol_v2/representations.rb +8 -0
  765. data/generated/google/apis/servicecontrol_v2/service.rb +33 -43
  766. data/generated/google/apis/servicecontrol_v2.rb +1 -1
  767. data/generated/google/apis/servicedirectory_v1beta1/classes.rb +221 -333
  768. data/generated/google/apis/servicedirectory_v1beta1/representations.rb +1 -0
  769. data/generated/google/apis/servicedirectory_v1beta1/service.rb +94 -129
  770. data/generated/google/apis/servicedirectory_v1beta1.rb +1 -1
  771. data/generated/google/apis/servicemanagement_v1/classes.rb +1244 -2174
  772. data/generated/google/apis/servicemanagement_v1/representations.rb +0 -31
  773. data/generated/google/apis/servicemanagement_v1/service.rb +141 -228
  774. data/generated/google/apis/servicemanagement_v1.rb +1 -1
  775. data/generated/google/apis/servicenetworking_v1/classes.rb +278 -121
  776. data/generated/google/apis/servicenetworking_v1/representations.rb +115 -15
  777. data/generated/google/apis/servicenetworking_v1/service.rb +118 -2
  778. data/generated/google/apis/servicenetworking_v1.rb +1 -1
  779. data/generated/google/apis/servicenetworking_v1beta/classes.rb +213 -112
  780. data/generated/google/apis/servicenetworking_v1beta/representations.rb +84 -14
  781. data/generated/google/apis/servicenetworking_v1beta.rb +1 -1
  782. data/generated/google/apis/serviceusage_v1/classes.rb +57 -111
  783. data/generated/google/apis/serviceusage_v1/representations.rb +4 -18
  784. data/generated/google/apis/serviceusage_v1/service.rb +5 -1
  785. data/generated/google/apis/serviceusage_v1.rb +1 -1
  786. data/generated/google/apis/serviceusage_v1beta1/classes.rb +122 -112
  787. data/generated/google/apis/serviceusage_v1beta1/representations.rb +23 -18
  788. data/generated/google/apis/serviceusage_v1beta1/service.rb +36 -0
  789. data/generated/google/apis/serviceusage_v1beta1.rb +1 -1
  790. data/generated/google/apis/sheets_v4/classes.rb +4029 -5014
  791. data/generated/google/apis/sheets_v4/representations.rb +625 -0
  792. data/generated/google/apis/sheets_v4/service.rb +113 -149
  793. data/generated/google/apis/sheets_v4.rb +1 -1
  794. data/generated/google/apis/site_verification_v1.rb +1 -1
  795. data/generated/google/apis/slides_v1/classes.rb +841 -1114
  796. data/generated/google/apis/slides_v1/service.rb +23 -30
  797. data/generated/google/apis/slides_v1.rb +1 -1
  798. data/generated/google/apis/smartdevicemanagement_v1/classes.rb +273 -0
  799. data/generated/google/apis/smartdevicemanagement_v1/representations.rb +157 -0
  800. data/generated/google/apis/smartdevicemanagement_v1/service.rb +304 -0
  801. data/generated/google/apis/smartdevicemanagement_v1.rb +35 -0
  802. data/generated/google/apis/sourcerepo_v1/classes.rb +250 -400
  803. data/generated/google/apis/sourcerepo_v1/service.rb +40 -49
  804. data/generated/google/apis/sourcerepo_v1.rb +1 -1
  805. data/generated/google/apis/spanner_v1/classes.rb +1553 -2157
  806. data/generated/google/apis/spanner_v1/representations.rb +1 -0
  807. data/generated/google/apis/spanner_v1/service.rb +443 -618
  808. data/generated/google/apis/spanner_v1.rb +1 -1
  809. data/generated/google/apis/speech_v1/classes.rb +174 -220
  810. data/generated/google/apis/speech_v1/service.rb +27 -32
  811. data/generated/google/apis/speech_v1.rb +1 -1
  812. data/generated/google/apis/speech_v1p1beta1/classes.rb +253 -306
  813. data/generated/google/apis/speech_v1p1beta1/service.rb +27 -32
  814. data/generated/google/apis/speech_v1p1beta1.rb +1 -1
  815. data/generated/google/apis/speech_v2beta1/classes.rb +66 -76
  816. data/generated/google/apis/speech_v2beta1/service.rb +10 -12
  817. data/generated/google/apis/speech_v2beta1.rb +1 -1
  818. data/generated/google/apis/{sql_v1beta4 → sqladmin_v1beta4}/classes.rb +537 -452
  819. data/generated/google/apis/{sql_v1beta4 → sqladmin_v1beta4}/representations.rb +142 -87
  820. data/generated/google/apis/{sql_v1beta4 → sqladmin_v1beta4}/service.rb +295 -300
  821. data/generated/google/apis/{sql_v1beta4.rb → sqladmin_v1beta4.rb} +5 -5
  822. data/generated/google/apis/storage_v1/classes.rb +10 -17
  823. data/generated/google/apis/storage_v1/representations.rb +2 -3
  824. data/generated/google/apis/storage_v1/service.rb +3 -2
  825. data/generated/google/apis/storage_v1.rb +1 -1
  826. data/generated/google/apis/storagetransfer_v1/classes.rb +301 -349
  827. data/generated/google/apis/storagetransfer_v1/representations.rb +13 -0
  828. data/generated/google/apis/storagetransfer_v1/service.rb +53 -72
  829. data/generated/google/apis/storagetransfer_v1.rb +1 -1
  830. data/generated/google/apis/streetviewpublish_v1/classes.rb +110 -152
  831. data/generated/google/apis/streetviewpublish_v1/service.rb +94 -177
  832. data/generated/google/apis/streetviewpublish_v1.rb +1 -1
  833. data/generated/google/apis/sts_v1/classes.rb +121 -0
  834. data/generated/google/apis/sts_v1/representations.rb +59 -0
  835. data/generated/google/apis/sts_v1/service.rb +90 -0
  836. data/generated/google/apis/sts_v1.rb +32 -0
  837. data/generated/google/apis/sts_v1beta/classes.rb +191 -0
  838. data/generated/google/apis/sts_v1beta/representations.rb +61 -0
  839. data/generated/google/apis/sts_v1beta/service.rb +92 -0
  840. data/generated/google/apis/sts_v1beta.rb +32 -0
  841. data/generated/google/apis/tagmanager_v1/service.rb +2 -2
  842. data/generated/google/apis/tagmanager_v1.rb +1 -1
  843. data/generated/google/apis/tagmanager_v2/classes.rb +12 -0
  844. data/generated/google/apis/tagmanager_v2/representations.rb +3 -0
  845. data/generated/google/apis/tagmanager_v2/service.rb +2 -2
  846. data/generated/google/apis/tagmanager_v2.rb +1 -1
  847. data/generated/google/apis/tasks_v1/classes.rb +21 -22
  848. data/generated/google/apis/tasks_v1/service.rb +19 -19
  849. data/generated/google/apis/tasks_v1.rb +1 -1
  850. data/generated/google/apis/testing_v1/classes.rb +384 -390
  851. data/generated/google/apis/testing_v1/representations.rb +23 -0
  852. data/generated/google/apis/testing_v1/service.rb +22 -28
  853. data/generated/google/apis/testing_v1.rb +1 -1
  854. data/generated/google/apis/texttospeech_v1/classes.rb +51 -57
  855. data/generated/google/apis/texttospeech_v1/service.rb +9 -10
  856. data/generated/google/apis/texttospeech_v1.rb +1 -1
  857. data/generated/google/apis/texttospeech_v1beta1/classes.rb +96 -57
  858. data/generated/google/apis/texttospeech_v1beta1/representations.rb +19 -0
  859. data/generated/google/apis/texttospeech_v1beta1/service.rb +9 -10
  860. data/generated/google/apis/texttospeech_v1beta1.rb +1 -1
  861. data/generated/google/apis/toolresults_v1beta3/classes.rb +20 -0
  862. data/generated/google/apis/toolresults_v1beta3/representations.rb +13 -0
  863. data/generated/google/apis/toolresults_v1beta3.rb +1 -1
  864. data/generated/google/apis/tpu_v1/classes.rb +57 -3
  865. data/generated/google/apis/tpu_v1/representations.rb +19 -0
  866. data/generated/google/apis/tpu_v1/service.rb +8 -8
  867. data/generated/google/apis/tpu_v1.rb +1 -1
  868. data/generated/google/apis/tpu_v1alpha1/classes.rb +57 -3
  869. data/generated/google/apis/tpu_v1alpha1/representations.rb +19 -0
  870. data/generated/google/apis/tpu_v1alpha1/service.rb +8 -8
  871. data/generated/google/apis/tpu_v1alpha1.rb +1 -1
  872. data/generated/google/apis/trafficdirector_v2/classes.rb +1347 -0
  873. data/generated/google/apis/trafficdirector_v2/representations.rb +620 -0
  874. data/generated/google/apis/trafficdirector_v2/service.rb +89 -0
  875. data/generated/google/apis/trafficdirector_v2.rb +34 -0
  876. data/generated/google/apis/translate_v3/classes.rb +151 -177
  877. data/generated/google/apis/translate_v3/service.rb +122 -151
  878. data/generated/google/apis/translate_v3.rb +1 -1
  879. data/generated/google/apis/translate_v3beta1/classes.rb +150 -170
  880. data/generated/google/apis/translate_v3beta1/service.rb +122 -151
  881. data/generated/google/apis/translate_v3beta1.rb +1 -1
  882. data/generated/google/apis/vault_v1/classes.rb +413 -103
  883. data/generated/google/apis/vault_v1/representations.rb +162 -0
  884. data/generated/google/apis/vault_v1/service.rb +182 -37
  885. data/generated/google/apis/vault_v1.rb +1 -1
  886. data/generated/google/apis/vectortile_v1/classes.rb +185 -267
  887. data/generated/google/apis/vectortile_v1/service.rb +75 -88
  888. data/generated/google/apis/vectortile_v1.rb +1 -1
  889. data/generated/google/apis/verifiedaccess_v1/classes.rb +20 -27
  890. data/generated/google/apis/verifiedaccess_v1.rb +1 -1
  891. data/generated/google/apis/videointelligence_v1/classes.rb +1493 -935
  892. data/generated/google/apis/videointelligence_v1/representations.rb +402 -2
  893. data/generated/google/apis/videointelligence_v1/service.rb +38 -77
  894. data/generated/google/apis/videointelligence_v1.rb +1 -1
  895. data/generated/google/apis/videointelligence_v1beta2/classes.rb +1488 -928
  896. data/generated/google/apis/videointelligence_v1beta2/representations.rb +402 -2
  897. data/generated/google/apis/videointelligence_v1beta2/service.rb +4 -4
  898. data/generated/google/apis/videointelligence_v1beta2.rb +1 -1
  899. data/generated/google/apis/videointelligence_v1p1beta1/classes.rb +1482 -922
  900. data/generated/google/apis/videointelligence_v1p1beta1/representations.rb +402 -2
  901. data/generated/google/apis/videointelligence_v1p1beta1/service.rb +4 -4
  902. data/generated/google/apis/videointelligence_v1p1beta1.rb +1 -1
  903. data/generated/google/apis/videointelligence_v1p2beta1/classes.rb +1485 -925
  904. data/generated/google/apis/videointelligence_v1p2beta1/representations.rb +402 -2
  905. data/generated/google/apis/videointelligence_v1p2beta1/service.rb +4 -4
  906. data/generated/google/apis/videointelligence_v1p2beta1.rb +1 -1
  907. data/generated/google/apis/videointelligence_v1p3beta1/classes.rb +1410 -937
  908. data/generated/google/apis/videointelligence_v1p3beta1/representations.rb +368 -2
  909. data/generated/google/apis/videointelligence_v1p3beta1/service.rb +4 -4
  910. data/generated/google/apis/videointelligence_v1p3beta1.rb +1 -1
  911. data/generated/google/apis/vision_v1/classes.rb +16 -16
  912. data/generated/google/apis/vision_v1.rb +1 -1
  913. data/generated/google/apis/vision_v1p1beta1/classes.rb +16 -16
  914. data/generated/google/apis/vision_v1p1beta1.rb +1 -1
  915. data/generated/google/apis/vision_v1p2beta1/classes.rb +16 -16
  916. data/generated/google/apis/vision_v1p2beta1.rb +1 -1
  917. data/generated/google/apis/webfonts_v1/classes.rb +1 -2
  918. data/generated/google/apis/webfonts_v1/service.rb +2 -4
  919. data/generated/google/apis/webfonts_v1.rb +2 -3
  920. data/generated/google/apis/websecurityscanner_v1.rb +1 -1
  921. data/generated/google/apis/websecurityscanner_v1alpha.rb +1 -1
  922. data/generated/google/apis/websecurityscanner_v1beta.rb +1 -1
  923. data/generated/google/apis/workflowexecutions_v1beta/classes.rb +155 -0
  924. data/generated/google/apis/workflowexecutions_v1beta/representations.rb +88 -0
  925. data/generated/google/apis/{securitycenter_v1p1alpha1 → workflowexecutions_v1beta}/service.rb +73 -69
  926. data/generated/google/apis/workflowexecutions_v1beta.rb +34 -0
  927. data/generated/google/apis/workflows_v1beta/classes.rb +406 -0
  928. data/generated/google/apis/workflows_v1beta/representations.rb +173 -0
  929. data/generated/google/apis/workflows_v1beta/service.rb +438 -0
  930. data/generated/google/apis/workflows_v1beta.rb +35 -0
  931. data/generated/google/apis/youtube_partner_v1.rb +1 -1
  932. data/generated/google/apis/youtube_v3/classes.rb +0 -586
  933. data/generated/google/apis/youtube_v3/representations.rb +0 -269
  934. data/generated/google/apis/youtube_v3/service.rb +3 -120
  935. data/generated/google/apis/youtube_v3.rb +1 -1
  936. data/google-api-client.gemspec +25 -24
  937. data/lib/google/apis/core/api_command.rb +1 -0
  938. data/lib/google/apis/core/http_command.rb +2 -1
  939. data/lib/google/apis/options.rb +8 -5
  940. data/lib/google/apis/version.rb +1 -1
  941. data/synth.py +40 -0
  942. metadata +134 -41
  943. data/generated/google/apis/accessapproval_v1beta1/classes.rb +0 -417
  944. data/generated/google/apis/accessapproval_v1beta1/representations.rb +0 -203
  945. data/generated/google/apis/accessapproval_v1beta1/service.rb +0 -857
  946. data/generated/google/apis/appsactivity_v1/classes.rb +0 -415
  947. data/generated/google/apis/appsactivity_v1/representations.rb +0 -209
  948. data/generated/google/apis/appsactivity_v1/service.rb +0 -126
  949. data/generated/google/apis/dns_v2beta1/classes.rb +0 -1447
  950. data/generated/google/apis/dns_v2beta1/representations.rb +0 -588
  951. data/generated/google/apis/dns_v2beta1/service.rb +0 -928
  952. data/generated/google/apis/dns_v2beta1.rb +0 -43
  953. data/generated/google/apis/memcache_v1/classes.rb +0 -1157
  954. data/generated/google/apis/plus_v1/classes.rb +0 -2094
  955. data/generated/google/apis/plus_v1/representations.rb +0 -907
  956. data/generated/google/apis/plus_v1/service.rb +0 -451
  957. data/generated/google/apis/plus_v1.rb +0 -43
  958. data/generated/google/apis/securitycenter_v1p1beta1/classes.rb +0 -2059
  959. data/generated/google/apis/securitycenter_v1p1beta1/representations.rb +0 -789
  960. data/generated/google/apis/securitycenter_v1p1beta1/service.rb +0 -1243
  961. data/generated/google/apis/storage_v1beta2/classes.rb +0 -1047
  962. data/generated/google/apis/storage_v1beta2/representations.rb +0 -425
  963. data/generated/google/apis/storage_v1beta2/service.rb +0 -1667
  964. data/generated/google/apis/storage_v1beta2.rb +0 -40
@@ -23,120 +23,107 @@ module Google
23
23
  module RemotebuildexecutionV1
24
24
 
25
25
  # An `Action` captures all the information about an execution which is required
26
- # to reproduce it.
27
- # `Action`s are the core component of the [Execution] service. A single
28
- # `Action` represents a repeatable action that can be performed by the
26
+ # to reproduce it. `Action`s are the core component of the [Execution] service.
27
+ # A single `Action` represents a repeatable action that can be performed by the
29
28
  # execution service. `Action`s can be succinctly identified by the digest of
30
29
  # their wire format encoding and, once an `Action` has been executed, will be
31
30
  # cached in the action cache. Future requests can then use the cached result
32
- # rather than needing to run afresh.
33
- # When a server completes execution of an
34
- # Action, it MAY choose to
35
- # cache the result in
36
- # the ActionCache unless
37
- # `do_not_cache` is `true`. Clients SHOULD expect the server to do so. By
38
- # default, future calls to
39
- # Execute the same
40
- # `Action` will also serve their results from the cache. Clients must take care
41
- # to understand the caching behaviour. Ideally, all `Action`s will be
42
- # reproducible so that serving a result from cache is always desirable and
43
- # correct.
31
+ # rather than needing to run afresh. When a server completes execution of an
32
+ # Action, it MAY choose to cache the result in the ActionCache unless `
33
+ # do_not_cache` is `true`. Clients SHOULD expect the server to do so. By default,
34
+ # future calls to Execute the same `Action` will also serve their results from
35
+ # the cache. Clients must take care to understand the caching behaviour. Ideally,
36
+ # all `Action`s will be reproducible so that serving a result from cache is
37
+ # always desirable and correct.
44
38
  class BuildBazelRemoteExecutionV2Action
45
39
  include Google::Apis::Core::Hashable
46
40
 
47
41
  # A content digest. A digest for a given blob consists of the size of the blob
48
- # and its hash. The hash algorithm to use is defined by the server.
49
- # The size is considered to be an integral part of the digest and cannot be
50
- # separated. That is, even if the `hash` field is correctly specified but
51
- # `size_bytes` is not, the server MUST reject the request.
52
- # The reason for including the size in the digest is as follows: in a great
53
- # many cases, the server needs to know the size of the blob it is about to work
54
- # with prior to starting an operation with it, such as flattening Merkle tree
55
- # structures or streaming it to a worker. Technically, the server could
56
- # implement a separate metadata store, but this results in a significantly more
57
- # complicated implementation as opposed to having the client specify the size
58
- # up-front (or storing the size along with the digest in every message where
59
- # digests are embedded). This does mean that the API leaks some implementation
60
- # details of (what we consider to be) a reasonable server implementation, but
61
- # we consider this to be a worthwhile tradeoff.
62
- # When a `Digest` is used to refer to a proto message, it always refers to the
63
- # message in binary encoded form. To ensure consistent hashing, clients and
64
- # servers MUST ensure that they serialize messages according to the following
65
- # rules, even if there are alternate valid encodings for the same message:
66
- # * Fields are serialized in tag order.
67
- # * There are no unknown fields.
68
- # * There are no duplicate fields.
69
- # * Fields are serialized according to the default semantics for their type.
70
- # Most protocol buffer implementations will always follow these rules when
71
- # serializing, but care should be taken to avoid shortcuts. For instance,
72
- # concatenating two messages to merge them may produce duplicate fields.
42
+ # and its hash. The hash algorithm to use is defined by the server. The size is
43
+ # considered to be an integral part of the digest and cannot be separated. That
44
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
45
+ # the server MUST reject the request. The reason for including the size in the
46
+ # digest is as follows: in a great many cases, the server needs to know the size
47
+ # of the blob it is about to work with prior to starting an operation with it,
48
+ # such as flattening Merkle tree structures or streaming it to a worker.
49
+ # Technically, the server could implement a separate metadata store, but this
50
+ # results in a significantly more complicated implementation as opposed to
51
+ # having the client specify the size up-front (or storing the size along with
52
+ # the digest in every message where digests are embedded). This does mean that
53
+ # the API leaks some implementation details of (what we consider to be) a
54
+ # reasonable server implementation, but we consider this to be a worthwhile
55
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
56
+ # refers to the message in binary encoded form. To ensure consistent hashing,
57
+ # clients and servers MUST ensure that they serialize messages according to the
58
+ # following rules, even if there are alternate valid encodings for the same
59
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
60
+ # There are no duplicate fields. * Fields are serialized according to the
61
+ # default semantics for their type. Most protocol buffer implementations will
62
+ # always follow these rules when serializing, but care should be taken to avoid
63
+ # shortcuts. For instance, concatenating two messages to merge them may produce
64
+ # duplicate fields.
73
65
  # Corresponds to the JSON property `commandDigest`
74
66
  # @return [Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2Digest]
75
67
  attr_accessor :command_digest
76
68
 
77
- # If true, then the `Action`'s result cannot be cached, and in-flight
78
- # requests for the same `Action` may not be merged.
69
+ # If true, then the `Action`'s result cannot be cached, and in-flight requests
70
+ # for the same `Action` may not be merged.
79
71
  # Corresponds to the JSON property `doNotCache`
80
72
  # @return [Boolean]
81
73
  attr_accessor :do_not_cache
82
74
  alias_method :do_not_cache?, :do_not_cache
83
75
 
84
76
  # A content digest. A digest for a given blob consists of the size of the blob
85
- # and its hash. The hash algorithm to use is defined by the server.
86
- # The size is considered to be an integral part of the digest and cannot be
87
- # separated. That is, even if the `hash` field is correctly specified but
88
- # `size_bytes` is not, the server MUST reject the request.
89
- # The reason for including the size in the digest is as follows: in a great
90
- # many cases, the server needs to know the size of the blob it is about to work
91
- # with prior to starting an operation with it, such as flattening Merkle tree
92
- # structures or streaming it to a worker. Technically, the server could
93
- # implement a separate metadata store, but this results in a significantly more
94
- # complicated implementation as opposed to having the client specify the size
95
- # up-front (or storing the size along with the digest in every message where
96
- # digests are embedded). This does mean that the API leaks some implementation
97
- # details of (what we consider to be) a reasonable server implementation, but
98
- # we consider this to be a worthwhile tradeoff.
99
- # When a `Digest` is used to refer to a proto message, it always refers to the
100
- # message in binary encoded form. To ensure consistent hashing, clients and
101
- # servers MUST ensure that they serialize messages according to the following
102
- # rules, even if there are alternate valid encodings for the same message:
103
- # * Fields are serialized in tag order.
104
- # * There are no unknown fields.
105
- # * There are no duplicate fields.
106
- # * Fields are serialized according to the default semantics for their type.
107
- # Most protocol buffer implementations will always follow these rules when
108
- # serializing, but care should be taken to avoid shortcuts. For instance,
109
- # concatenating two messages to merge them may produce duplicate fields.
77
+ # and its hash. The hash algorithm to use is defined by the server. The size is
78
+ # considered to be an integral part of the digest and cannot be separated. That
79
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
80
+ # the server MUST reject the request. The reason for including the size in the
81
+ # digest is as follows: in a great many cases, the server needs to know the size
82
+ # of the blob it is about to work with prior to starting an operation with it,
83
+ # such as flattening Merkle tree structures or streaming it to a worker.
84
+ # Technically, the server could implement a separate metadata store, but this
85
+ # results in a significantly more complicated implementation as opposed to
86
+ # having the client specify the size up-front (or storing the size along with
87
+ # the digest in every message where digests are embedded). This does mean that
88
+ # the API leaks some implementation details of (what we consider to be) a
89
+ # reasonable server implementation, but we consider this to be a worthwhile
90
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
91
+ # refers to the message in binary encoded form. To ensure consistent hashing,
92
+ # clients and servers MUST ensure that they serialize messages according to the
93
+ # following rules, even if there are alternate valid encodings for the same
94
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
95
+ # There are no duplicate fields. * Fields are serialized according to the
96
+ # default semantics for their type. Most protocol buffer implementations will
97
+ # always follow these rules when serializing, but care should be taken to avoid
98
+ # shortcuts. For instance, concatenating two messages to merge them may produce
99
+ # duplicate fields.
110
100
  # Corresponds to the JSON property `inputRootDigest`
111
101
  # @return [Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2Digest]
112
102
  attr_accessor :input_root_digest
113
103
 
114
- # List of required supported NodeProperty
115
- # keys. In order to ensure that equivalent `Action`s always hash to the same
116
- # value, the supported node properties MUST be lexicographically sorted by name.
117
- # Sorting of strings is done by code point, equivalently, by the UTF-8 bytes.
118
- # The interpretation of these properties is server-dependent. If a property is
119
- # not recognized by the server, the server will return an `INVALID_ARGUMENT`
120
- # error.
104
+ # List of required supported NodeProperty keys. In order to ensure that
105
+ # equivalent `Action`s always hash to the same value, the supported node
106
+ # properties MUST be lexicographically sorted by name. Sorting of strings is
107
+ # done by code point, equivalently, by the UTF-8 bytes. The interpretation of
108
+ # these properties is server-dependent. If a property is not recognized by the
109
+ # server, the server will return an `INVALID_ARGUMENT` error.
121
110
  # Corresponds to the JSON property `outputNodeProperties`
122
111
  # @return [Array<String>]
123
112
  attr_accessor :output_node_properties
124
113
 
125
- # A timeout after which the execution should be killed. If the timeout is
126
- # absent, then the client is specifying that the execution should continue
127
- # as long as the server will let it. The server SHOULD impose a timeout if
128
- # the client does not specify one, however, if the client does specify a
129
- # timeout that is longer than the server's maximum timeout, the server MUST
130
- # reject the request.
131
- # The timeout is a part of the
132
- # Action message, and
133
- # therefore two `Actions` with different timeouts are different, even if they
134
- # are otherwise identical. This is because, if they were not, running an
135
- # `Action` with a lower timeout than is required might result in a cache hit
136
- # from an execution run with a longer timeout, hiding the fact that the
137
- # timeout is too short. By encoding it directly in the `Action`, a lower
138
- # timeout will result in a cache miss and the execution timeout will fail
139
- # immediately, rather than whenever the cache entry gets evicted.
114
+ # A timeout after which the execution should be killed. If the timeout is absent,
115
+ # then the client is specifying that the execution should continue as long as
116
+ # the server will let it. The server SHOULD impose a timeout if the client does
117
+ # not specify one, however, if the client does specify a timeout that is longer
118
+ # than the server's maximum timeout, the server MUST reject the request. The
119
+ # timeout is a part of the Action message, and therefore two `Actions` with
120
+ # different timeouts are different, even if they are otherwise identical. This
121
+ # is because, if they were not, running an `Action` with a lower timeout than is
122
+ # required might result in a cache hit from an execution run with a longer
123
+ # timeout, hiding the fact that the timeout is too short. By encoding it
124
+ # directly in the `Action`, a lower timeout will result in a cache miss and the
125
+ # execution timeout will fail immediately, rather than whenever the cache entry
126
+ # gets evicted.
140
127
  # Corresponds to the JSON property `timeout`
141
128
  # @return [String]
142
129
  attr_accessor :timeout
@@ -155,8 +142,7 @@ module Google
155
142
  end
156
143
  end
157
144
 
158
- # An ActionResult represents the result of an
159
- # Action being run.
145
+ # An ActionResult represents the result of an Action being run.
160
146
  class BuildBazelRemoteExecutionV2ActionResult
161
147
  include Google::Apis::Core::Hashable
162
148
 
@@ -170,84 +156,41 @@ module Google
170
156
  # @return [Fixnum]
171
157
  attr_accessor :exit_code
172
158
 
173
- # The output directories of the action. For each output directory requested
174
- # in the `output_directories` or `output_paths` field of the Action, if the
159
+ # The output directories of the action. For each output directory requested in
160
+ # the `output_directories` or `output_paths` field of the Action, if the
175
161
  # corresponding directory existed after the action completed, a single entry
176
- # will be present in the output list, which will contain the digest of a
177
- # Tree message containing the
178
- # directory tree, and the path equal exactly to the corresponding Action
179
- # output_directories member.
180
- # As an example, suppose the Action had an output directory `a/b/dir` and the
181
- # execution produced the following contents in `a/b/dir`: a file named `bar`
182
- # and a directory named `foo` with an executable file named `baz`. Then,
183
- # output_directory will contain (hashes shortened for readability):
184
- # ```json
185
- # // OutputDirectory proto:
186
- # `
187
- # path: "a/b/dir"
188
- # tree_digest: `
189
- # hash: "4a73bc9d03...",
190
- # size: 55
191
- # `
192
- # `
193
- # // Tree proto with hash "4a73bc9d03..." and size 55:
194
- # `
195
- # root: `
196
- # files: [
197
- # `
198
- # name: "bar",
199
- # digest: `
200
- # hash: "4a73bc9d03...",
201
- # size: 65534
202
- # `
203
- # `
204
- # ],
205
- # directories: [
206
- # `
207
- # name: "foo",
208
- # digest: `
209
- # hash: "4cf2eda940...",
210
- # size: 43
211
- # `
212
- # `
213
- # ]
214
- # `
215
- # children : `
216
- # // (Directory proto with hash "4cf2eda940..." and size 43)
217
- # files: [
218
- # `
219
- # name: "baz",
220
- # digest: `
221
- # hash: "b2c941073e...",
222
- # size: 1294,
223
- # `,
224
- # is_executable: true
225
- # `
226
- # ]
227
- # `
228
- # `
229
- # ```
230
- # If an output of the same name as listed in `output_files` of
231
- # the Command was found in `output_directories`, but was not a directory, the
232
- # server will return a FAILED_PRECONDITION.
162
+ # will be present in the output list, which will contain the digest of a Tree
163
+ # message containing the directory tree, and the path equal exactly to the
164
+ # corresponding Action output_directories member. As an example, suppose the
165
+ # Action had an output directory `a/b/dir` and the execution produced the
166
+ # following contents in `a/b/dir`: a file named `bar` and a directory named `foo`
167
+ # with an executable file named `baz`. Then, output_directory will contain (
168
+ # hashes shortened for readability): ```json // OutputDirectory proto: ` path: "
169
+ # a/b/dir" tree_digest: ` hash: "4a73bc9d03...", size: 55 ` ` // Tree proto with
170
+ # hash "4a73bc9d03..." and size 55: ` root: ` files: [ ` name: "bar", digest: `
171
+ # hash: "4a73bc9d03...", size: 65534 ` ` ], directories: [ ` name: "foo", digest:
172
+ # ` hash: "4cf2eda940...", size: 43 ` ` ] ` children : ` // (Directory proto
173
+ # with hash "4cf2eda940..." and size 43) files: [ ` name: "baz", digest: ` hash:
174
+ # "b2c941073e...", size: 1294, `, is_executable: true ` ] ` ` ``` If an output
175
+ # of the same name as listed in `output_files` of the Command was found in `
176
+ # output_directories`, but was not a directory, the server will return a
177
+ # FAILED_PRECONDITION.
233
178
  # Corresponds to the JSON property `outputDirectories`
234
179
  # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2OutputDirectory>]
235
180
  attr_accessor :output_directories
236
181
 
237
182
  # The output directories of the action that are symbolic links to other
238
183
  # directories. Those may be links to other output directories, or input
239
- # directories, or even absolute paths outside of the working directory,
240
- # if the server supports
241
- # SymlinkAbsolutePathStrategy.ALLOWED.
242
- # For each output directory requested in the `output_directories` field of
243
- # the Action, if the directory existed after the action completed, a
244
- # single entry will be present either in this field, or in the
245
- # `output_directories` field, if the directory was not a symbolic link.
246
- # If an output of the same name was found, but was a symbolic link to a file
247
- # instead of a directory, the server will return a FAILED_PRECONDITION.
248
- # If the action does not produce the requested output, then that output
249
- # will be omitted from the list. The server is free to arrange the output
250
- # list as desired; clients MUST NOT assume that the output list is sorted.
184
+ # directories, or even absolute paths outside of the working directory, if the
185
+ # server supports SymlinkAbsolutePathStrategy.ALLOWED. For each output directory
186
+ # requested in the `output_directories` field of the Action, if the directory
187
+ # existed after the action completed, a single entry will be present either in
188
+ # this field, or in the `output_directories` field, if the directory was not a
189
+ # symbolic link. If an output of the same name was found, but was a symbolic
190
+ # link to a file instead of a directory, the server will return a
191
+ # FAILED_PRECONDITION. If the action does not produce the requested output, then
192
+ # that output will be omitted from the list. The server is free to arrange the
193
+ # output list as desired; clients MUST NOT assume that the output list is sorted.
251
194
  # DEPRECATED as of v2.1. Servers that wish to be compatible with v2.0 API
252
195
  # should still populate this field in addition to `output_symlinks`.
253
196
  # Corresponds to the JSON property `outputDirectorySymlinks`
@@ -257,131 +200,119 @@ module Google
257
200
  # The output files of the action that are symbolic links to other files. Those
258
201
  # may be links to other output files, or input files, or even absolute paths
259
202
  # outside of the working directory, if the server supports
260
- # SymlinkAbsolutePathStrategy.ALLOWED.
261
- # For each output file requested in the `output_files` or `output_paths`
262
- # field of the Action, if the corresponding file existed after
263
- # the action completed, a single entry will be present either in this field,
264
- # or in the `output_files` field, if the file was not a symbolic link.
265
- # If an output symbolic link of the same name as listed in `output_files` of
266
- # the Command was found, but its target type was not a regular file, the
267
- # server will return a FAILED_PRECONDITION.
268
- # If the action does not produce the requested output, then that output
269
- # will be omitted from the list. The server is free to arrange the output
270
- # list as desired; clients MUST NOT assume that the output list is sorted.
271
- # DEPRECATED as of v2.1. Servers that wish to be compatible with v2.0 API
272
- # should still populate this field in addition to `output_symlinks`.
203
+ # SymlinkAbsolutePathStrategy.ALLOWED. For each output file requested in the `
204
+ # output_files` or `output_paths` field of the Action, if the corresponding file
205
+ # existed after the action completed, a single entry will be present either in
206
+ # this field, or in the `output_files` field, if the file was not a symbolic
207
+ # link. If an output symbolic link of the same name as listed in `output_files`
208
+ # of the Command was found, but its target type was not a regular file, the
209
+ # server will return a FAILED_PRECONDITION. If the action does not produce the
210
+ # requested output, then that output will be omitted from the list. The server
211
+ # is free to arrange the output list as desired; clients MUST NOT assume that
212
+ # the output list is sorted. DEPRECATED as of v2.1. Servers that wish to be
213
+ # compatible with v2.0 API should still populate this field in addition to `
214
+ # output_symlinks`.
273
215
  # Corresponds to the JSON property `outputFileSymlinks`
274
216
  # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2OutputSymlink>]
275
217
  attr_accessor :output_file_symlinks
276
218
 
277
- # The output files of the action. For each output file requested in the
278
- # `output_files` or `output_paths` field of the Action, if the corresponding
279
- # file existed after the action completed, a single entry will be present
280
- # either in this field, or the `output_file_symlinks` field if the file was
281
- # a symbolic link to another file (`output_symlinks` field after v2.1).
282
- # If an output listed in `output_files` was found, but was a directory rather
283
- # than a regular file, the server will return a FAILED_PRECONDITION.
284
- # If the action does not produce the requested output, then that output
285
- # will be omitted from the list. The server is free to arrange the output
286
- # list as desired; clients MUST NOT assume that the output list is sorted.
219
+ # The output files of the action. For each output file requested in the `
220
+ # output_files` or `output_paths` field of the Action, if the corresponding file
221
+ # existed after the action completed, a single entry will be present either in
222
+ # this field, or the `output_file_symlinks` field if the file was a symbolic
223
+ # link to another file (`output_symlinks` field after v2.1). If an output listed
224
+ # in `output_files` was found, but was a directory rather than a regular file,
225
+ # the server will return a FAILED_PRECONDITION. If the action does not produce
226
+ # the requested output, then that output will be omitted from the list. The
227
+ # server is free to arrange the output list as desired; clients MUST NOT assume
228
+ # that the output list is sorted.
287
229
  # Corresponds to the JSON property `outputFiles`
288
230
  # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2OutputFile>]
289
231
  attr_accessor :output_files
290
232
 
291
- # New in v2.1: this field will only be populated if the command
292
- # `output_paths` field was used, and not the pre v2.1 `output_files` or
293
- # `output_directories` fields.
294
- # The output paths of the action that are symbolic links to other paths. Those
295
- # may be links to other outputs, or inputs, or even absolute paths
296
- # outside of the working directory, if the server supports
297
- # SymlinkAbsolutePathStrategy.ALLOWED.
298
- # A single entry for each output requested in `output_paths`
299
- # field of the Action, if the corresponding path existed after
300
- # the action completed and was a symbolic link.
301
- # If the action does not produce a requested output, then that output
302
- # will be omitted from the list. The server is free to arrange the output
303
- # list as desired; clients MUST NOT assume that the output list is sorted.
233
+ # New in v2.1: this field will only be populated if the command `output_paths`
234
+ # field was used, and not the pre v2.1 `output_files` or `output_directories`
235
+ # fields. The output paths of the action that are symbolic links to other paths.
236
+ # Those may be links to other outputs, or inputs, or even absolute paths outside
237
+ # of the working directory, if the server supports SymlinkAbsolutePathStrategy.
238
+ # ALLOWED. A single entry for each output requested in `output_paths` field of
239
+ # the Action, if the corresponding path existed after the action completed and
240
+ # was a symbolic link. If the action does not produce a requested output, then
241
+ # that output will be omitted from the list. The server is free to arrange the
242
+ # output list as desired; clients MUST NOT assume that the output list is sorted.
304
243
  # Corresponds to the JSON property `outputSymlinks`
305
244
  # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2OutputSymlink>]
306
245
  attr_accessor :output_symlinks
307
246
 
308
247
  # A content digest. A digest for a given blob consists of the size of the blob
309
- # and its hash. The hash algorithm to use is defined by the server.
310
- # The size is considered to be an integral part of the digest and cannot be
311
- # separated. That is, even if the `hash` field is correctly specified but
312
- # `size_bytes` is not, the server MUST reject the request.
313
- # The reason for including the size in the digest is as follows: in a great
314
- # many cases, the server needs to know the size of the blob it is about to work
315
- # with prior to starting an operation with it, such as flattening Merkle tree
316
- # structures or streaming it to a worker. Technically, the server could
317
- # implement a separate metadata store, but this results in a significantly more
318
- # complicated implementation as opposed to having the client specify the size
319
- # up-front (or storing the size along with the digest in every message where
320
- # digests are embedded). This does mean that the API leaks some implementation
321
- # details of (what we consider to be) a reasonable server implementation, but
322
- # we consider this to be a worthwhile tradeoff.
323
- # When a `Digest` is used to refer to a proto message, it always refers to the
324
- # message in binary encoded form. To ensure consistent hashing, clients and
325
- # servers MUST ensure that they serialize messages according to the following
326
- # rules, even if there are alternate valid encodings for the same message:
327
- # * Fields are serialized in tag order.
328
- # * There are no unknown fields.
329
- # * There are no duplicate fields.
330
- # * Fields are serialized according to the default semantics for their type.
331
- # Most protocol buffer implementations will always follow these rules when
332
- # serializing, but care should be taken to avoid shortcuts. For instance,
333
- # concatenating two messages to merge them may produce duplicate fields.
248
+ # and its hash. The hash algorithm to use is defined by the server. The size is
249
+ # considered to be an integral part of the digest and cannot be separated. That
250
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
251
+ # the server MUST reject the request. The reason for including the size in the
252
+ # digest is as follows: in a great many cases, the server needs to know the size
253
+ # of the blob it is about to work with prior to starting an operation with it,
254
+ # such as flattening Merkle tree structures or streaming it to a worker.
255
+ # Technically, the server could implement a separate metadata store, but this
256
+ # results in a significantly more complicated implementation as opposed to
257
+ # having the client specify the size up-front (or storing the size along with
258
+ # the digest in every message where digests are embedded). This does mean that
259
+ # the API leaks some implementation details of (what we consider to be) a
260
+ # reasonable server implementation, but we consider this to be a worthwhile
261
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
262
+ # refers to the message in binary encoded form. To ensure consistent hashing,
263
+ # clients and servers MUST ensure that they serialize messages according to the
264
+ # following rules, even if there are alternate valid encodings for the same
265
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
266
+ # There are no duplicate fields. * Fields are serialized according to the
267
+ # default semantics for their type. Most protocol buffer implementations will
268
+ # always follow these rules when serializing, but care should be taken to avoid
269
+ # shortcuts. For instance, concatenating two messages to merge them may produce
270
+ # duplicate fields.
334
271
  # Corresponds to the JSON property `stderrDigest`
335
272
  # @return [Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2Digest]
336
273
  attr_accessor :stderr_digest
337
274
 
338
- # The standard error buffer of the action. The server SHOULD NOT inline
339
- # stderr unless requested by the client in the
340
- # GetActionResultRequest
341
- # message. The server MAY omit inlining, even if requested, and MUST do so if
342
- # inlining
343
- # would cause the response to exceed message size limits.
275
+ # The standard error buffer of the action. The server SHOULD NOT inline stderr
276
+ # unless requested by the client in the GetActionResultRequest message. The
277
+ # server MAY omit inlining, even if requested, and MUST do so if inlining would
278
+ # cause the response to exceed message size limits.
344
279
  # Corresponds to the JSON property `stderrRaw`
345
280
  # NOTE: Values are automatically base64 encoded/decoded in the client library.
346
281
  # @return [String]
347
282
  attr_accessor :stderr_raw
348
283
 
349
284
  # A content digest. A digest for a given blob consists of the size of the blob
350
- # and its hash. The hash algorithm to use is defined by the server.
351
- # The size is considered to be an integral part of the digest and cannot be
352
- # separated. That is, even if the `hash` field is correctly specified but
353
- # `size_bytes` is not, the server MUST reject the request.
354
- # The reason for including the size in the digest is as follows: in a great
355
- # many cases, the server needs to know the size of the blob it is about to work
356
- # with prior to starting an operation with it, such as flattening Merkle tree
357
- # structures or streaming it to a worker. Technically, the server could
358
- # implement a separate metadata store, but this results in a significantly more
359
- # complicated implementation as opposed to having the client specify the size
360
- # up-front (or storing the size along with the digest in every message where
361
- # digests are embedded). This does mean that the API leaks some implementation
362
- # details of (what we consider to be) a reasonable server implementation, but
363
- # we consider this to be a worthwhile tradeoff.
364
- # When a `Digest` is used to refer to a proto message, it always refers to the
365
- # message in binary encoded form. To ensure consistent hashing, clients and
366
- # servers MUST ensure that they serialize messages according to the following
367
- # rules, even if there are alternate valid encodings for the same message:
368
- # * Fields are serialized in tag order.
369
- # * There are no unknown fields.
370
- # * There are no duplicate fields.
371
- # * Fields are serialized according to the default semantics for their type.
372
- # Most protocol buffer implementations will always follow these rules when
373
- # serializing, but care should be taken to avoid shortcuts. For instance,
374
- # concatenating two messages to merge them may produce duplicate fields.
285
+ # and its hash. The hash algorithm to use is defined by the server. The size is
286
+ # considered to be an integral part of the digest and cannot be separated. That
287
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
288
+ # the server MUST reject the request. The reason for including the size in the
289
+ # digest is as follows: in a great many cases, the server needs to know the size
290
+ # of the blob it is about to work with prior to starting an operation with it,
291
+ # such as flattening Merkle tree structures or streaming it to a worker.
292
+ # Technically, the server could implement a separate metadata store, but this
293
+ # results in a significantly more complicated implementation as opposed to
294
+ # having the client specify the size up-front (or storing the size along with
295
+ # the digest in every message where digests are embedded). This does mean that
296
+ # the API leaks some implementation details of (what we consider to be) a
297
+ # reasonable server implementation, but we consider this to be a worthwhile
298
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
299
+ # refers to the message in binary encoded form. To ensure consistent hashing,
300
+ # clients and servers MUST ensure that they serialize messages according to the
301
+ # following rules, even if there are alternate valid encodings for the same
302
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
303
+ # There are no duplicate fields. * Fields are serialized according to the
304
+ # default semantics for their type. Most protocol buffer implementations will
305
+ # always follow these rules when serializing, but care should be taken to avoid
306
+ # shortcuts. For instance, concatenating two messages to merge them may produce
307
+ # duplicate fields.
375
308
  # Corresponds to the JSON property `stdoutDigest`
376
309
  # @return [Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2Digest]
377
310
  attr_accessor :stdout_digest
378
311
 
379
- # The standard output buffer of the action. The server SHOULD NOT inline
380
- # stdout unless requested by the client in the
381
- # GetActionResultRequest
382
- # message. The server MAY omit inlining, even if requested, and MUST do so if
383
- # inlining
384
- # would cause the response to exceed message size limits.
312
+ # The standard output buffer of the action. The server SHOULD NOT inline stdout
313
+ # unless requested by the client in the GetActionResultRequest message. The
314
+ # server MAY omit inlining, even if requested, and MUST do so if inlining would
315
+ # cause the response to exceed message size limits.
385
316
  # Corresponds to the JSON property `stdoutRaw`
386
317
  # NOTE: Values are automatically base64 encoded/decoded in the client library.
387
318
  # @return [String]
@@ -407,12 +338,11 @@ module Google
407
338
  end
408
339
  end
409
340
 
410
- # A `Command` is the actual command executed by a worker running an
411
- # Action and specifications of its
412
- # environment.
413
- # Except as otherwise required, the environment (such as which system
414
- # libraries or binaries are available, and what filesystems are mounted where)
415
- # is defined by and specific to the implementation of the remote execution API.
341
+ # A `Command` is the actual command executed by a worker running an Action and
342
+ # specifications of its environment. Except as otherwise required, the
343
+ # environment (such as which system libraries or binaries are available, and
344
+ # what filesystems are mounted where) is defined by and specific to the
345
+ # implementation of the remote execution API.
416
346
  class BuildBazelRemoteExecutionV2Command
417
347
  include Google::Apis::Core::Hashable
418
348
 
@@ -425,105 +355,90 @@ module Google
425
355
 
426
356
  # The environment variables to set when running the program. The worker may
427
357
  # provide its own default environment variables; these defaults can be
428
- # overridden using this field. Additional variables can also be specified.
429
- # In order to ensure that equivalent
430
- # Commands always hash to the same
431
- # value, the environment variables MUST be lexicographically sorted by name.
432
- # Sorting of strings is done by code point, equivalently, by the UTF-8 bytes.
358
+ # overridden using this field. Additional variables can also be specified. In
359
+ # order to ensure that equivalent Commands always hash to the same value, the
360
+ # environment variables MUST be lexicographically sorted by name. Sorting of
361
+ # strings is done by code point, equivalently, by the UTF-8 bytes.
433
362
  # Corresponds to the JSON property `environmentVariables`
434
363
  # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2CommandEnvironmentVariable>]
435
364
  attr_accessor :environment_variables
436
365
 
437
- # A list of the output directories that the client expects to retrieve from
438
- # the action. Only the listed directories will be returned (an entire
439
- # directory structure will be returned as a
440
- # Tree message digest, see
441
- # OutputDirectory), as
442
- # well as files listed in `output_files`. Other files or directories that
443
- # may be created during command execution are discarded.
444
- # The paths are relative to the working directory of the action execution.
445
- # The paths are specified using a single forward slash (`/`) as a path
446
- # separator, even if the execution platform natively uses a different
447
- # separator. The path MUST NOT include a trailing slash, nor a leading slash,
448
- # being a relative path. The special value of empty string is allowed,
449
- # although not recommended, and can be used to capture the entire working
450
- # directory tree, including inputs.
451
- # In order to ensure consistent hashing of the same Action, the output paths
452
- # MUST be sorted lexicographically by code point (or, equivalently, by UTF-8
453
- # bytes).
454
- # An output directory cannot be duplicated or have the same path as any of
455
- # the listed output files. An output directory is allowed to be a parent of
456
- # another output directory.
366
+ # A list of the output directories that the client expects to retrieve from the
367
+ # action. Only the listed directories will be returned (an entire directory
368
+ # structure will be returned as a Tree message digest, see OutputDirectory), as
369
+ # well as files listed in `output_files`. Other files or directories that may be
370
+ # created during command execution are discarded. The paths are relative to the
371
+ # working directory of the action execution. The paths are specified using a
372
+ # single forward slash (`/`) as a path separator, even if the execution platform
373
+ # natively uses a different separator. The path MUST NOT include a trailing
374
+ # slash, nor a leading slash, being a relative path. The special value of empty
375
+ # string is allowed, although not recommended, and can be used to capture the
376
+ # entire working directory tree, including inputs. In order to ensure consistent
377
+ # hashing of the same Action, the output paths MUST be sorted lexicographically
378
+ # by code point (or, equivalently, by UTF-8 bytes). An output directory cannot
379
+ # be duplicated or have the same path as any of the listed output files. An
380
+ # output directory is allowed to be a parent of another output directory.
457
381
  # Directories leading up to the output directories (but not the output
458
- # directories themselves) are created by the worker prior to execution, even
459
- # if they are not explicitly part of the input root.
460
- # DEPRECATED since 2.1: Use `output_paths` instead.
382
+ # directories themselves) are created by the worker prior to execution, even if
383
+ # they are not explicitly part of the input root. DEPRECATED since 2.1: Use `
384
+ # output_paths` instead.
461
385
  # Corresponds to the JSON property `outputDirectories`
462
386
  # @return [Array<String>]
463
387
  attr_accessor :output_directories
464
388
 
465
- # A list of the output files that the client expects to retrieve from the
466
- # action. Only the listed files, as well as directories listed in
467
- # `output_directories`, will be returned to the client as output.
468
- # Other files or directories that may be created during command execution
469
- # are discarded.
470
- # The paths are relative to the working directory of the action execution.
471
- # The paths are specified using a single forward slash (`/`) as a path
472
- # separator, even if the execution platform natively uses a different
473
- # separator. The path MUST NOT include a trailing slash, nor a leading slash,
474
- # being a relative path.
475
- # In order to ensure consistent hashing of the same Action, the output paths
476
- # MUST be sorted lexicographically by code point (or, equivalently, by UTF-8
477
- # bytes).
478
- # An output file cannot be duplicated, be a parent of another output file, or
479
- # have the same path as any of the listed output directories.
480
- # Directories leading up to the output files are created by the worker prior
481
- # to execution, even if they are not explicitly part of the input root.
482
- # DEPRECATED since v2.1: Use `output_paths` instead.
389
+ # A list of the output files that the client expects to retrieve from the action.
390
+ # Only the listed files, as well as directories listed in `output_directories`,
391
+ # will be returned to the client as output. Other files or directories that may
392
+ # be created during command execution are discarded. The paths are relative to
393
+ # the working directory of the action execution. The paths are specified using a
394
+ # single forward slash (`/`) as a path separator, even if the execution platform
395
+ # natively uses a different separator. The path MUST NOT include a trailing
396
+ # slash, nor a leading slash, being a relative path. In order to ensure
397
+ # consistent hashing of the same Action, the output paths MUST be sorted
398
+ # lexicographically by code point (or, equivalently, by UTF-8 bytes). An output
399
+ # file cannot be duplicated, be a parent of another output file, or have the
400
+ # same path as any of the listed output directories. Directories leading up to
401
+ # the output files are created by the worker prior to execution, even if they
402
+ # are not explicitly part of the input root. DEPRECATED since v2.1: Use `
403
+ # output_paths` instead.
483
404
  # Corresponds to the JSON property `outputFiles`
484
405
  # @return [Array<String>]
485
406
  attr_accessor :output_files
486
407
 
487
- # A list of the output paths that the client expects to retrieve from the
488
- # action. Only the listed paths will be returned to the client as output.
489
- # The type of the output (file or directory) is not specified, and will be
490
- # determined by the server after action execution. If the resulting path is
491
- # a file, it will be returned in an
492
- # OutputFile) typed field.
493
- # If the path is a directory, the entire directory structure will be returned
494
- # as a Tree message digest, see
495
- # OutputDirectory)
496
- # Other files or directories that may be created during command execution
497
- # are discarded.
498
- # The paths are relative to the working directory of the action execution.
499
- # The paths are specified using a single forward slash (`/`) as a path
500
- # separator, even if the execution platform natively uses a different
501
- # separator. The path MUST NOT include a trailing slash, nor a leading slash,
502
- # being a relative path.
503
- # In order to ensure consistent hashing of the same Action, the output paths
504
- # MUST be deduplicated and sorted lexicographically by code point (or,
505
- # equivalently, by UTF-8 bytes).
506
- # Directories leading up to the output paths are created by the worker prior
507
- # to execution, even if they are not explicitly part of the input root.
508
- # New in v2.1: this field supersedes the DEPRECATED `output_files` and
509
- # `output_directories` fields. If `output_paths` is used, `output_files` and
510
- # `output_directories` will be ignored!
408
+ # A list of the output paths that the client expects to retrieve from the action.
409
+ # Only the listed paths will be returned to the client as output. The type of
410
+ # the output (file or directory) is not specified, and will be determined by the
411
+ # server after action execution. If the resulting path is a file, it will be
412
+ # returned in an OutputFile) typed field. If the path is a directory, the entire
413
+ # directory structure will be returned as a Tree message digest, see
414
+ # OutputDirectory) Other files or directories that may be created during command
415
+ # execution are discarded. The paths are relative to the working directory of
416
+ # the action execution. The paths are specified using a single forward slash (`/`
417
+ # ) as a path separator, even if the execution platform natively uses a
418
+ # different separator. The path MUST NOT include a trailing slash, nor a leading
419
+ # slash, being a relative path. In order to ensure consistent hashing of the
420
+ # same Action, the output paths MUST be deduplicated and sorted
421
+ # lexicographically by code point (or, equivalently, by UTF-8 bytes).
422
+ # Directories leading up to the output paths are created by the worker prior to
423
+ # execution, even if they are not explicitly part of the input root. New in v2.1:
424
+ # this field supersedes the DEPRECATED `output_files` and `output_directories`
425
+ # fields. If `output_paths` is used, `output_files` and `output_directories`
426
+ # will be ignored!
511
427
  # Corresponds to the JSON property `outputPaths`
512
428
  # @return [Array<String>]
513
429
  attr_accessor :output_paths
514
430
 
515
431
  # A `Platform` is a set of requirements, such as hardware, operating system, or
516
- # compiler toolchain, for an
517
- # Action's execution
518
- # environment. A `Platform` is represented as a series of key-value pairs
519
- # representing the properties that are required of the platform.
432
+ # compiler toolchain, for an Action's execution environment. A `Platform` is
433
+ # represented as a series of key-value pairs representing the properties that
434
+ # are required of the platform.
520
435
  # Corresponds to the JSON property `platform`
521
436
  # @return [Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2Platform]
522
437
  attr_accessor :platform
523
438
 
524
- # The working directory, relative to the input root, for the command to run
525
- # in. It must be a directory which exists in the input tree. If it is left
526
- # empty, then the action is run in the input root.
439
+ # The working directory, relative to the input root, for the command to run in.
440
+ # It must be a directory which exists in the input tree. If it is left empty,
441
+ # then the action is run in the input root.
527
442
  # Corresponds to the JSON property `workingDirectory`
528
443
  # @return [String]
529
444
  attr_accessor :working_directory
@@ -571,31 +486,29 @@ module Google
571
486
  end
572
487
 
573
488
  # A content digest. A digest for a given blob consists of the size of the blob
574
- # and its hash. The hash algorithm to use is defined by the server.
575
- # The size is considered to be an integral part of the digest and cannot be
576
- # separated. That is, even if the `hash` field is correctly specified but
577
- # `size_bytes` is not, the server MUST reject the request.
578
- # The reason for including the size in the digest is as follows: in a great
579
- # many cases, the server needs to know the size of the blob it is about to work
580
- # with prior to starting an operation with it, such as flattening Merkle tree
581
- # structures or streaming it to a worker. Technically, the server could
582
- # implement a separate metadata store, but this results in a significantly more
583
- # complicated implementation as opposed to having the client specify the size
584
- # up-front (or storing the size along with the digest in every message where
585
- # digests are embedded). This does mean that the API leaks some implementation
586
- # details of (what we consider to be) a reasonable server implementation, but
587
- # we consider this to be a worthwhile tradeoff.
588
- # When a `Digest` is used to refer to a proto message, it always refers to the
589
- # message in binary encoded form. To ensure consistent hashing, clients and
590
- # servers MUST ensure that they serialize messages according to the following
591
- # rules, even if there are alternate valid encodings for the same message:
592
- # * Fields are serialized in tag order.
593
- # * There are no unknown fields.
594
- # * There are no duplicate fields.
595
- # * Fields are serialized according to the default semantics for their type.
596
- # Most protocol buffer implementations will always follow these rules when
597
- # serializing, but care should be taken to avoid shortcuts. For instance,
598
- # concatenating two messages to merge them may produce duplicate fields.
489
+ # and its hash. The hash algorithm to use is defined by the server. The size is
490
+ # considered to be an integral part of the digest and cannot be separated. That
491
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
492
+ # the server MUST reject the request. The reason for including the size in the
493
+ # digest is as follows: in a great many cases, the server needs to know the size
494
+ # of the blob it is about to work with prior to starting an operation with it,
495
+ # such as flattening Merkle tree structures or streaming it to a worker.
496
+ # Technically, the server could implement a separate metadata store, but this
497
+ # results in a significantly more complicated implementation as opposed to
498
+ # having the client specify the size up-front (or storing the size along with
499
+ # the digest in every message where digests are embedded). This does mean that
500
+ # the API leaks some implementation details of (what we consider to be) a
501
+ # reasonable server implementation, but we consider this to be a worthwhile
502
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
503
+ # refers to the message in binary encoded form. To ensure consistent hashing,
504
+ # clients and servers MUST ensure that they serialize messages according to the
505
+ # following rules, even if there are alternate valid encodings for the same
506
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
507
+ # There are no duplicate fields. * Fields are serialized according to the
508
+ # default semantics for their type. Most protocol buffer implementations will
509
+ # always follow these rules when serializing, but care should be taken to avoid
510
+ # shortcuts. For instance, concatenating two messages to merge them may produce
511
+ # duplicate fields.
599
512
  class BuildBazelRemoteExecutionV2Digest
600
513
  include Google::Apis::Core::Hashable
601
514
 
@@ -622,75 +535,31 @@ module Google
622
535
  end
623
536
 
624
537
  # A `Directory` represents a directory node in a file tree, containing zero or
625
- # more children FileNodes,
626
- # DirectoryNodes and
627
- # SymlinkNodes.
628
- # Each `Node` contains its name in the directory, either the digest of its
629
- # content (either a file blob or a `Directory` proto) or a symlink target, as
630
- # well as possibly some metadata about the file or directory.
631
- # In order to ensure that two equivalent directory trees hash to the same
632
- # value, the following restrictions MUST be obeyed when constructing a
633
- # a `Directory`:
634
- # * Every child in the directory must have a path of exactly one segment.
635
- # Multiple levels of directory hierarchy may not be collapsed.
636
- # * Each child in the directory must have a unique path segment (file name).
637
- # Note that while the API itself is case-sensitive, the environment where
638
- # the Action is executed may or may not be case-sensitive. That is, it is
639
- # legal to call the API with a Directory that has both "Foo" and "foo" as
640
- # children, but the Action may be rejected by the remote system upon
641
- # execution.
642
- # * The files, directories and symlinks in the directory must each be sorted
643
- # in lexicographical order by path. The path strings must be sorted by code
644
- # point, equivalently, by UTF-8 bytes.
645
- # * The NodeProperties of files,
646
- # directories, and symlinks must be sorted in lexicographical order by
647
- # property name.
648
- # A `Directory` that obeys the restrictions is said to be in canonical form.
649
- # As an example, the following could be used for a file named `bar` and a
538
+ # more children FileNodes, DirectoryNodes and SymlinkNodes. Each `Node` contains
539
+ # its name in the directory, either the digest of its content (either a file
540
+ # blob or a `Directory` proto) or a symlink target, as well as possibly some
541
+ # metadata about the file or directory. In order to ensure that two equivalent
542
+ # directory trees hash to the same value, the following restrictions MUST be
543
+ # obeyed when constructing a a `Directory`: * Every child in the directory must
544
+ # have a path of exactly one segment. Multiple levels of directory hierarchy may
545
+ # not be collapsed. * Each child in the directory must have a unique path
546
+ # segment (file name). Note that while the API itself is case-sensitive, the
547
+ # environment where the Action is executed may or may not be case-sensitive.
548
+ # That is, it is legal to call the API with a Directory that has both "Foo" and "
549
+ # foo" as children, but the Action may be rejected by the remote system upon
550
+ # execution. * The files, directories and symlinks in the directory must each be
551
+ # sorted in lexicographical order by path. The path strings must be sorted by
552
+ # code point, equivalently, by UTF-8 bytes. * The NodeProperties of files,
553
+ # directories, and symlinks must be sorted in lexicographical order by property
554
+ # name. A `Directory` that obeys the restrictions is said to be in canonical
555
+ # form. As an example, the following could be used for a file named `bar` and a
650
556
  # directory named `foo` with an executable file named `baz` (hashes shortened
651
- # for readability):
652
- # ```json
653
- # // (Directory proto)
654
- # `
655
- # files: [
656
- # `
657
- # name: "bar",
658
- # digest: `
659
- # hash: "4a73bc9d03...",
660
- # size: 65534
661
- # `,
662
- # node_properties: [
663
- # `
664
- # "name": "MTime",
665
- # "value": "2017-01-15T01:30:15.01Z"
666
- # `
667
- # ]
668
- # `
669
- # ],
670
- # directories: [
671
- # `
672
- # name: "foo",
673
- # digest: `
674
- # hash: "4cf2eda940...",
675
- # size: 43
676
- # `
677
- # `
678
- # ]
679
- # `
680
- # // (Directory proto with hash "4cf2eda940..." and size 43)
681
- # `
682
- # files: [
683
- # `
684
- # name: "baz",
685
- # digest: `
686
- # hash: "b2c941073e...",
687
- # size: 1294,
688
- # `,
689
- # is_executable: true
690
- # `
691
- # ]
692
- # `
693
- # ```
557
+ # for readability): ```json // (Directory proto) ` files: [ ` name: "bar",
558
+ # digest: ` hash: "4a73bc9d03...", size: 65534 `, node_properties: [ ` "name": "
559
+ # MTime", "value": "2017-01-15T01:30:15.01Z" ` ] ` ], directories: [ ` name: "
560
+ # foo", digest: ` hash: "4cf2eda940...", size: 43 ` ` ] ` // (Directory proto
561
+ # with hash "4cf2eda940..." and size 43) ` files: [ ` name: "baz", digest: `
562
+ # hash: "b2c941073e...", size: 1294, `, is_executable: true ` ] ` ```
694
563
  class BuildBazelRemoteExecutionV2Directory
695
564
  include Google::Apis::Core::Hashable
696
565
 
@@ -727,38 +596,35 @@ module Google
727
596
  end
728
597
  end
729
598
 
730
- # A `DirectoryNode` represents a child of a
731
- # Directory which is itself
732
- # a `Directory` and its associated metadata.
599
+ # A `DirectoryNode` represents a child of a Directory which is itself a `
600
+ # Directory` and its associated metadata.
733
601
  class BuildBazelRemoteExecutionV2DirectoryNode
734
602
  include Google::Apis::Core::Hashable
735
603
 
736
604
  # A content digest. A digest for a given blob consists of the size of the blob
737
- # and its hash. The hash algorithm to use is defined by the server.
738
- # The size is considered to be an integral part of the digest and cannot be
739
- # separated. That is, even if the `hash` field is correctly specified but
740
- # `size_bytes` is not, the server MUST reject the request.
741
- # The reason for including the size in the digest is as follows: in a great
742
- # many cases, the server needs to know the size of the blob it is about to work
743
- # with prior to starting an operation with it, such as flattening Merkle tree
744
- # structures or streaming it to a worker. Technically, the server could
745
- # implement a separate metadata store, but this results in a significantly more
746
- # complicated implementation as opposed to having the client specify the size
747
- # up-front (or storing the size along with the digest in every message where
748
- # digests are embedded). This does mean that the API leaks some implementation
749
- # details of (what we consider to be) a reasonable server implementation, but
750
- # we consider this to be a worthwhile tradeoff.
751
- # When a `Digest` is used to refer to a proto message, it always refers to the
752
- # message in binary encoded form. To ensure consistent hashing, clients and
753
- # servers MUST ensure that they serialize messages according to the following
754
- # rules, even if there are alternate valid encodings for the same message:
755
- # * Fields are serialized in tag order.
756
- # * There are no unknown fields.
757
- # * There are no duplicate fields.
758
- # * Fields are serialized according to the default semantics for their type.
759
- # Most protocol buffer implementations will always follow these rules when
760
- # serializing, but care should be taken to avoid shortcuts. For instance,
761
- # concatenating two messages to merge them may produce duplicate fields.
605
+ # and its hash. The hash algorithm to use is defined by the server. The size is
606
+ # considered to be an integral part of the digest and cannot be separated. That
607
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
608
+ # the server MUST reject the request. The reason for including the size in the
609
+ # digest is as follows: in a great many cases, the server needs to know the size
610
+ # of the blob it is about to work with prior to starting an operation with it,
611
+ # such as flattening Merkle tree structures or streaming it to a worker.
612
+ # Technically, the server could implement a separate metadata store, but this
613
+ # results in a significantly more complicated implementation as opposed to
614
+ # having the client specify the size up-front (or storing the size along with
615
+ # the digest in every message where digests are embedded). This does mean that
616
+ # the API leaks some implementation details of (what we consider to be) a
617
+ # reasonable server implementation, but we consider this to be a worthwhile
618
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
619
+ # refers to the message in binary encoded form. To ensure consistent hashing,
620
+ # clients and servers MUST ensure that they serialize messages according to the
621
+ # following rules, even if there are alternate valid encodings for the same
622
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
623
+ # There are no duplicate fields. * Fields are serialized according to the
624
+ # default semantics for their type. Most protocol buffer implementations will
625
+ # always follow these rules when serializing, but care should be taken to avoid
626
+ # shortcuts. For instance, concatenating two messages to merge them may produce
627
+ # duplicate fields.
762
628
  # Corresponds to the JSON property `digest`
763
629
  # @return [Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2Digest]
764
630
  attr_accessor :digest
@@ -779,40 +645,35 @@ module Google
779
645
  end
780
646
  end
781
647
 
782
- # Metadata about an ongoing
783
- # execution, which
784
- # will be contained in the metadata
785
- # field of the
786
- # Operation.
648
+ # Metadata about an ongoing execution, which will be contained in the metadata
649
+ # field of the Operation.
787
650
  class BuildBazelRemoteExecutionV2ExecuteOperationMetadata
788
651
  include Google::Apis::Core::Hashable
789
652
 
790
653
  # A content digest. A digest for a given blob consists of the size of the blob
791
- # and its hash. The hash algorithm to use is defined by the server.
792
- # The size is considered to be an integral part of the digest and cannot be
793
- # separated. That is, even if the `hash` field is correctly specified but
794
- # `size_bytes` is not, the server MUST reject the request.
795
- # The reason for including the size in the digest is as follows: in a great
796
- # many cases, the server needs to know the size of the blob it is about to work
797
- # with prior to starting an operation with it, such as flattening Merkle tree
798
- # structures or streaming it to a worker. Technically, the server could
799
- # implement a separate metadata store, but this results in a significantly more
800
- # complicated implementation as opposed to having the client specify the size
801
- # up-front (or storing the size along with the digest in every message where
802
- # digests are embedded). This does mean that the API leaks some implementation
803
- # details of (what we consider to be) a reasonable server implementation, but
804
- # we consider this to be a worthwhile tradeoff.
805
- # When a `Digest` is used to refer to a proto message, it always refers to the
806
- # message in binary encoded form. To ensure consistent hashing, clients and
807
- # servers MUST ensure that they serialize messages according to the following
808
- # rules, even if there are alternate valid encodings for the same message:
809
- # * Fields are serialized in tag order.
810
- # * There are no unknown fields.
811
- # * There are no duplicate fields.
812
- # * Fields are serialized according to the default semantics for their type.
813
- # Most protocol buffer implementations will always follow these rules when
814
- # serializing, but care should be taken to avoid shortcuts. For instance,
815
- # concatenating two messages to merge them may produce duplicate fields.
654
+ # and its hash. The hash algorithm to use is defined by the server. The size is
655
+ # considered to be an integral part of the digest and cannot be separated. That
656
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
657
+ # the server MUST reject the request. The reason for including the size in the
658
+ # digest is as follows: in a great many cases, the server needs to know the size
659
+ # of the blob it is about to work with prior to starting an operation with it,
660
+ # such as flattening Merkle tree structures or streaming it to a worker.
661
+ # Technically, the server could implement a separate metadata store, but this
662
+ # results in a significantly more complicated implementation as opposed to
663
+ # having the client specify the size up-front (or storing the size along with
664
+ # the digest in every message where digests are embedded). This does mean that
665
+ # the API leaks some implementation details of (what we consider to be) a
666
+ # reasonable server implementation, but we consider this to be a worthwhile
667
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
668
+ # refers to the message in binary encoded form. To ensure consistent hashing,
669
+ # clients and servers MUST ensure that they serialize messages according to the
670
+ # following rules, even if there are alternate valid encodings for the same
671
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
672
+ # There are no duplicate fields. * Fields are serialized according to the
673
+ # default semantics for their type. Most protocol buffer implementations will
674
+ # always follow these rules when serializing, but care should be taken to avoid
675
+ # shortcuts. For instance, concatenating two messages to merge them may produce
676
+ # duplicate fields.
816
677
  # Corresponds to the JSON property `actionDigest`
817
678
  # @return [Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2Digest]
818
679
  attr_accessor :action_digest
@@ -822,15 +683,13 @@ module Google
822
683
  # @return [String]
823
684
  attr_accessor :stage
824
685
 
825
- # If set, the client can use this name with
826
- # ByteStream.Read to stream the
686
+ # If set, the client can use this name with ByteStream.Read to stream the
827
687
  # standard error.
828
688
  # Corresponds to the JSON property `stderrStreamName`
829
689
  # @return [String]
830
690
  attr_accessor :stderr_stream_name
831
691
 
832
- # If set, the client can use this name with
833
- # ByteStream.Read to stream the
692
+ # If set, the client can use this name with ByteStream.Read to stream the
834
693
  # standard output.
835
694
  # Corresponds to the JSON property `stdoutStreamName`
836
695
  # @return [String]
@@ -849,11 +708,8 @@ module Google
849
708
  end
850
709
  end
851
710
 
852
- # The response message for
853
- # Execution.Execute,
854
- # which will be contained in the response
855
- # field of the
856
- # Operation.
711
+ # The response message for Execution.Execute, which will be contained in the
712
+ # response field of the Operation.
857
713
  class BuildBazelRemoteExecutionV2ExecuteResponse
858
714
  include Google::Apis::Core::Hashable
859
715
 
@@ -869,29 +725,27 @@ module Google
869
725
  # @return [String]
870
726
  attr_accessor :message
871
727
 
872
- # An ActionResult represents the result of an
873
- # Action being run.
728
+ # An ActionResult represents the result of an Action being run.
874
729
  # Corresponds to the JSON property `result`
875
730
  # @return [Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2ActionResult]
876
731
  attr_accessor :result
877
732
 
878
733
  # An optional list of additional log outputs the server wishes to provide. A
879
- # server can use this to return execution-specific logs however it wishes.
880
- # This is intended primarily to make it easier for users to debug issues that
881
- # may be outside of the actual job execution, such as by identifying the
882
- # worker executing the action or by providing logs from the worker's setup
883
- # phase. The keys SHOULD be human readable so that a client can display them
884
- # to a user.
734
+ # server can use this to return execution-specific logs however it wishes. This
735
+ # is intended primarily to make it easier for users to debug issues that may be
736
+ # outside of the actual job execution, such as by identifying the worker
737
+ # executing the action or by providing logs from the worker's setup phase. The
738
+ # keys SHOULD be human readable so that a client can display them to a user.
885
739
  # Corresponds to the JSON property `serverLogs`
886
740
  # @return [Hash<String,Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2LogFile>]
887
741
  attr_accessor :server_logs
888
742
 
889
- # The `Status` type defines a logical error model that is suitable for
890
- # different programming environments, including REST APIs and RPC APIs. It is
891
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
892
- # three pieces of data: error code, error message, and error details.
893
- # You can find out more about this error model and how to work with it in the
894
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
743
+ # The `Status` type defines a logical error model that is suitable for different
744
+ # programming environments, including REST APIs and RPC APIs. It is used by [
745
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
746
+ # data: error code, error message, and error details. You can find out more
747
+ # about this error model and how to work with it in the [API Design Guide](https:
748
+ # //cloud.google.com/apis/design/errors).
895
749
  # Corresponds to the JSON property `status`
896
750
  # @return [Google::Apis::RemotebuildexecutionV1::GoogleRpcStatus]
897
751
  attr_accessor :status
@@ -988,31 +842,29 @@ module Google
988
842
  include Google::Apis::Core::Hashable
989
843
 
990
844
  # A content digest. A digest for a given blob consists of the size of the blob
991
- # and its hash. The hash algorithm to use is defined by the server.
992
- # The size is considered to be an integral part of the digest and cannot be
993
- # separated. That is, even if the `hash` field is correctly specified but
994
- # `size_bytes` is not, the server MUST reject the request.
995
- # The reason for including the size in the digest is as follows: in a great
996
- # many cases, the server needs to know the size of the blob it is about to work
997
- # with prior to starting an operation with it, such as flattening Merkle tree
998
- # structures or streaming it to a worker. Technically, the server could
999
- # implement a separate metadata store, but this results in a significantly more
1000
- # complicated implementation as opposed to having the client specify the size
1001
- # up-front (or storing the size along with the digest in every message where
1002
- # digests are embedded). This does mean that the API leaks some implementation
1003
- # details of (what we consider to be) a reasonable server implementation, but
1004
- # we consider this to be a worthwhile tradeoff.
1005
- # When a `Digest` is used to refer to a proto message, it always refers to the
1006
- # message in binary encoded form. To ensure consistent hashing, clients and
1007
- # servers MUST ensure that they serialize messages according to the following
1008
- # rules, even if there are alternate valid encodings for the same message:
1009
- # * Fields are serialized in tag order.
1010
- # * There are no unknown fields.
1011
- # * There are no duplicate fields.
1012
- # * Fields are serialized according to the default semantics for their type.
1013
- # Most protocol buffer implementations will always follow these rules when
1014
- # serializing, but care should be taken to avoid shortcuts. For instance,
1015
- # concatenating two messages to merge them may produce duplicate fields.
845
+ # and its hash. The hash algorithm to use is defined by the server. The size is
846
+ # considered to be an integral part of the digest and cannot be separated. That
847
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
848
+ # the server MUST reject the request. The reason for including the size in the
849
+ # digest is as follows: in a great many cases, the server needs to know the size
850
+ # of the blob it is about to work with prior to starting an operation with it,
851
+ # such as flattening Merkle tree structures or streaming it to a worker.
852
+ # Technically, the server could implement a separate metadata store, but this
853
+ # results in a significantly more complicated implementation as opposed to
854
+ # having the client specify the size up-front (or storing the size along with
855
+ # the digest in every message where digests are embedded). This does mean that
856
+ # the API leaks some implementation details of (what we consider to be) a
857
+ # reasonable server implementation, but we consider this to be a worthwhile
858
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
859
+ # refers to the message in binary encoded form. To ensure consistent hashing,
860
+ # clients and servers MUST ensure that they serialize messages according to the
861
+ # following rules, even if there are alternate valid encodings for the same
862
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
863
+ # There are no duplicate fields. * Fields are serialized according to the
864
+ # default semantics for their type. Most protocol buffer implementations will
865
+ # always follow these rules when serializing, but care should be taken to avoid
866
+ # shortcuts. For instance, concatenating two messages to merge them may produce
867
+ # duplicate fields.
1016
868
  # Corresponds to the JSON property `digest`
1017
869
  # @return [Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2Digest]
1018
870
  attr_accessor :digest
@@ -1051,40 +903,38 @@ module Google
1051
903
  include Google::Apis::Core::Hashable
1052
904
 
1053
905
  # A content digest. A digest for a given blob consists of the size of the blob
1054
- # and its hash. The hash algorithm to use is defined by the server.
1055
- # The size is considered to be an integral part of the digest and cannot be
1056
- # separated. That is, even if the `hash` field is correctly specified but
1057
- # `size_bytes` is not, the server MUST reject the request.
1058
- # The reason for including the size in the digest is as follows: in a great
1059
- # many cases, the server needs to know the size of the blob it is about to work
1060
- # with prior to starting an operation with it, such as flattening Merkle tree
1061
- # structures or streaming it to a worker. Technically, the server could
1062
- # implement a separate metadata store, but this results in a significantly more
1063
- # complicated implementation as opposed to having the client specify the size
1064
- # up-front (or storing the size along with the digest in every message where
1065
- # digests are embedded). This does mean that the API leaks some implementation
1066
- # details of (what we consider to be) a reasonable server implementation, but
1067
- # we consider this to be a worthwhile tradeoff.
1068
- # When a `Digest` is used to refer to a proto message, it always refers to the
1069
- # message in binary encoded form. To ensure consistent hashing, clients and
1070
- # servers MUST ensure that they serialize messages according to the following
1071
- # rules, even if there are alternate valid encodings for the same message:
1072
- # * Fields are serialized in tag order.
1073
- # * There are no unknown fields.
1074
- # * There are no duplicate fields.
1075
- # * Fields are serialized according to the default semantics for their type.
1076
- # Most protocol buffer implementations will always follow these rules when
1077
- # serializing, but care should be taken to avoid shortcuts. For instance,
1078
- # concatenating two messages to merge them may produce duplicate fields.
906
+ # and its hash. The hash algorithm to use is defined by the server. The size is
907
+ # considered to be an integral part of the digest and cannot be separated. That
908
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
909
+ # the server MUST reject the request. The reason for including the size in the
910
+ # digest is as follows: in a great many cases, the server needs to know the size
911
+ # of the blob it is about to work with prior to starting an operation with it,
912
+ # such as flattening Merkle tree structures or streaming it to a worker.
913
+ # Technically, the server could implement a separate metadata store, but this
914
+ # results in a significantly more complicated implementation as opposed to
915
+ # having the client specify the size up-front (or storing the size along with
916
+ # the digest in every message where digests are embedded). This does mean that
917
+ # the API leaks some implementation details of (what we consider to be) a
918
+ # reasonable server implementation, but we consider this to be a worthwhile
919
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
920
+ # refers to the message in binary encoded form. To ensure consistent hashing,
921
+ # clients and servers MUST ensure that they serialize messages according to the
922
+ # following rules, even if there are alternate valid encodings for the same
923
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
924
+ # There are no duplicate fields. * Fields are serialized according to the
925
+ # default semantics for their type. Most protocol buffer implementations will
926
+ # always follow these rules when serializing, but care should be taken to avoid
927
+ # shortcuts. For instance, concatenating two messages to merge them may produce
928
+ # duplicate fields.
1079
929
  # Corresponds to the JSON property `digest`
1080
930
  # @return [Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2Digest]
1081
931
  attr_accessor :digest
1082
932
 
1083
- # This is a hint as to the purpose of the log, and is set to true if the log
1084
- # is human-readable text that can be usefully displayed to a user, and false
1085
- # otherwise. For instance, if a command-line client wishes to print the
1086
- # server logs to the terminal for a failed action, this allows it to avoid
1087
- # displaying a binary file.
933
+ # This is a hint as to the purpose of the log, and is set to true if the log is
934
+ # human-readable text that can be usefully displayed to a user, and false
935
+ # otherwise. For instance, if a command-line client wishes to print the server
936
+ # logs to the terminal for a failed action, this allows it to avoid displaying a
937
+ # binary file.
1088
938
  # Corresponds to the JSON property `humanReadable`
1089
939
  # @return [Boolean]
1090
940
  attr_accessor :human_readable
@@ -1101,10 +951,8 @@ module Google
1101
951
  end
1102
952
  end
1103
953
 
1104
- # A single property for FileNodes,
1105
- # DirectoryNodes, and
1106
- # SymlinkNodes. The server is
1107
- # responsible for specifying the property `name`s that it accepts. If
954
+ # A single property for FileNodes, DirectoryNodes, and SymlinkNodes. The server
955
+ # is responsible for specifying the property `name`s that it accepts. If
1108
956
  # permitted by the server, the same `name` may occur multiple times.
1109
957
  class BuildBazelRemoteExecutionV2NodeProperty
1110
958
  include Google::Apis::Core::Hashable
@@ -1136,39 +984,37 @@ module Google
1136
984
  include Google::Apis::Core::Hashable
1137
985
 
1138
986
  # The full path of the directory relative to the working directory. The path
1139
- # separator is a forward slash `/`. Since this is a relative path, it MUST
1140
- # NOT begin with a leading forward slash. The empty string value is allowed,
1141
- # and it denotes the entire working directory.
987
+ # separator is a forward slash `/`. Since this is a relative path, it MUST NOT
988
+ # begin with a leading forward slash. The empty string value is allowed, and it
989
+ # denotes the entire working directory.
1142
990
  # Corresponds to the JSON property `path`
1143
991
  # @return [String]
1144
992
  attr_accessor :path
1145
993
 
1146
994
  # A content digest. A digest for a given blob consists of the size of the blob
1147
- # and its hash. The hash algorithm to use is defined by the server.
1148
- # The size is considered to be an integral part of the digest and cannot be
1149
- # separated. That is, even if the `hash` field is correctly specified but
1150
- # `size_bytes` is not, the server MUST reject the request.
1151
- # The reason for including the size in the digest is as follows: in a great
1152
- # many cases, the server needs to know the size of the blob it is about to work
1153
- # with prior to starting an operation with it, such as flattening Merkle tree
1154
- # structures or streaming it to a worker. Technically, the server could
1155
- # implement a separate metadata store, but this results in a significantly more
1156
- # complicated implementation as opposed to having the client specify the size
1157
- # up-front (or storing the size along with the digest in every message where
1158
- # digests are embedded). This does mean that the API leaks some implementation
1159
- # details of (what we consider to be) a reasonable server implementation, but
1160
- # we consider this to be a worthwhile tradeoff.
1161
- # When a `Digest` is used to refer to a proto message, it always refers to the
1162
- # message in binary encoded form. To ensure consistent hashing, clients and
1163
- # servers MUST ensure that they serialize messages according to the following
1164
- # rules, even if there are alternate valid encodings for the same message:
1165
- # * Fields are serialized in tag order.
1166
- # * There are no unknown fields.
1167
- # * There are no duplicate fields.
1168
- # * Fields are serialized according to the default semantics for their type.
1169
- # Most protocol buffer implementations will always follow these rules when
1170
- # serializing, but care should be taken to avoid shortcuts. For instance,
1171
- # concatenating two messages to merge them may produce duplicate fields.
995
+ # and its hash. The hash algorithm to use is defined by the server. The size is
996
+ # considered to be an integral part of the digest and cannot be separated. That
997
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
998
+ # the server MUST reject the request. The reason for including the size in the
999
+ # digest is as follows: in a great many cases, the server needs to know the size
1000
+ # of the blob it is about to work with prior to starting an operation with it,
1001
+ # such as flattening Merkle tree structures or streaming it to a worker.
1002
+ # Technically, the server could implement a separate metadata store, but this
1003
+ # results in a significantly more complicated implementation as opposed to
1004
+ # having the client specify the size up-front (or storing the size along with
1005
+ # the digest in every message where digests are embedded). This does mean that
1006
+ # the API leaks some implementation details of (what we consider to be) a
1007
+ # reasonable server implementation, but we consider this to be a worthwhile
1008
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
1009
+ # refers to the message in binary encoded form. To ensure consistent hashing,
1010
+ # clients and servers MUST ensure that they serialize messages according to the
1011
+ # following rules, even if there are alternate valid encodings for the same
1012
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
1013
+ # There are no duplicate fields. * Fields are serialized according to the
1014
+ # default semantics for their type. Most protocol buffer implementations will
1015
+ # always follow these rules when serializing, but care should be taken to avoid
1016
+ # shortcuts. For instance, concatenating two messages to merge them may produce
1017
+ # duplicate fields.
1172
1018
  # Corresponds to the JSON property `treeDigest`
1173
1019
  # @return [Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2Digest]
1174
1020
  attr_accessor :tree_digest
@@ -1184,51 +1030,45 @@ module Google
1184
1030
  end
1185
1031
  end
1186
1032
 
1187
- # An `OutputFile` is similar to a
1188
- # FileNode, but it is used as an
1189
- # output in an `ActionResult`. It allows a full file path rather than
1190
- # only a name.
1033
+ # An `OutputFile` is similar to a FileNode, but it is used as an output in an `
1034
+ # ActionResult`. It allows a full file path rather than only a name.
1191
1035
  class BuildBazelRemoteExecutionV2OutputFile
1192
1036
  include Google::Apis::Core::Hashable
1193
1037
 
1194
1038
  # The contents of the file if inlining was requested. The server SHOULD NOT
1195
- # inline
1196
- # file contents unless requested by the client in the
1197
- # GetActionResultRequest
1198
- # message. The server MAY omit inlining, even if requested, and MUST do so if
1199
- # inlining
1200
- # would cause the response to exceed message size limits.
1039
+ # inline file contents unless requested by the client in the
1040
+ # GetActionResultRequest message. The server MAY omit inlining, even if
1041
+ # requested, and MUST do so if inlining would cause the response to exceed
1042
+ # message size limits.
1201
1043
  # Corresponds to the JSON property `contents`
1202
1044
  # NOTE: Values are automatically base64 encoded/decoded in the client library.
1203
1045
  # @return [String]
1204
1046
  attr_accessor :contents
1205
1047
 
1206
1048
  # A content digest. A digest for a given blob consists of the size of the blob
1207
- # and its hash. The hash algorithm to use is defined by the server.
1208
- # The size is considered to be an integral part of the digest and cannot be
1209
- # separated. That is, even if the `hash` field is correctly specified but
1210
- # `size_bytes` is not, the server MUST reject the request.
1211
- # The reason for including the size in the digest is as follows: in a great
1212
- # many cases, the server needs to know the size of the blob it is about to work
1213
- # with prior to starting an operation with it, such as flattening Merkle tree
1214
- # structures or streaming it to a worker. Technically, the server could
1215
- # implement a separate metadata store, but this results in a significantly more
1216
- # complicated implementation as opposed to having the client specify the size
1217
- # up-front (or storing the size along with the digest in every message where
1218
- # digests are embedded). This does mean that the API leaks some implementation
1219
- # details of (what we consider to be) a reasonable server implementation, but
1220
- # we consider this to be a worthwhile tradeoff.
1221
- # When a `Digest` is used to refer to a proto message, it always refers to the
1222
- # message in binary encoded form. To ensure consistent hashing, clients and
1223
- # servers MUST ensure that they serialize messages according to the following
1224
- # rules, even if there are alternate valid encodings for the same message:
1225
- # * Fields are serialized in tag order.
1226
- # * There are no unknown fields.
1227
- # * There are no duplicate fields.
1228
- # * Fields are serialized according to the default semantics for their type.
1229
- # Most protocol buffer implementations will always follow these rules when
1230
- # serializing, but care should be taken to avoid shortcuts. For instance,
1231
- # concatenating two messages to merge them may produce duplicate fields.
1049
+ # and its hash. The hash algorithm to use is defined by the server. The size is
1050
+ # considered to be an integral part of the digest and cannot be separated. That
1051
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
1052
+ # the server MUST reject the request. The reason for including the size in the
1053
+ # digest is as follows: in a great many cases, the server needs to know the size
1054
+ # of the blob it is about to work with prior to starting an operation with it,
1055
+ # such as flattening Merkle tree structures or streaming it to a worker.
1056
+ # Technically, the server could implement a separate metadata store, but this
1057
+ # results in a significantly more complicated implementation as opposed to
1058
+ # having the client specify the size up-front (or storing the size along with
1059
+ # the digest in every message where digests are embedded). This does mean that
1060
+ # the API leaks some implementation details of (what we consider to be) a
1061
+ # reasonable server implementation, but we consider this to be a worthwhile
1062
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
1063
+ # refers to the message in binary encoded form. To ensure consistent hashing,
1064
+ # clients and servers MUST ensure that they serialize messages according to the
1065
+ # following rules, even if there are alternate valid encodings for the same
1066
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
1067
+ # There are no duplicate fields. * Fields are serialized according to the
1068
+ # default semantics for their type. Most protocol buffer implementations will
1069
+ # always follow these rules when serializing, but care should be taken to avoid
1070
+ # shortcuts. For instance, concatenating two messages to merge them may produce
1071
+ # duplicate fields.
1232
1072
  # Corresponds to the JSON property `digest`
1233
1073
  # @return [Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2Digest]
1234
1074
  attr_accessor :digest
@@ -1245,8 +1085,8 @@ module Google
1245
1085
  attr_accessor :node_properties
1246
1086
 
1247
1087
  # The full path of the file relative to the working directory, including the
1248
- # filename. The path separator is a forward slash `/`. Since this is a
1249
- # relative path, it MUST NOT begin with a leading forward slash.
1088
+ # filename. The path separator is a forward slash `/`. Since this is a relative
1089
+ # path, it MUST NOT begin with a leading forward slash.
1250
1090
  # Corresponds to the JSON property `path`
1251
1091
  # @return [String]
1252
1092
  attr_accessor :path
@@ -1265,32 +1105,29 @@ module Google
1265
1105
  end
1266
1106
  end
1267
1107
 
1268
- # An `OutputSymlink` is similar to a
1269
- # Symlink, but it is used as an
1270
- # output in an `ActionResult`.
1271
- # `OutputSymlink` is binary-compatible with `SymlinkNode`.
1108
+ # An `OutputSymlink` is similar to a Symlink, but it is used as an output in an `
1109
+ # ActionResult`. `OutputSymlink` is binary-compatible with `SymlinkNode`.
1272
1110
  class BuildBazelRemoteExecutionV2OutputSymlink
1273
1111
  include Google::Apis::Core::Hashable
1274
1112
 
1275
- # The supported node properties of the OutputSymlink, if requested by the
1276
- # Action.
1113
+ # The supported node properties of the OutputSymlink, if requested by the Action.
1277
1114
  # Corresponds to the JSON property `nodeProperties`
1278
1115
  # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2NodeProperty>]
1279
1116
  attr_accessor :node_properties
1280
1117
 
1281
1118
  # The full path of the symlink relative to the working directory, including the
1282
- # filename. The path separator is a forward slash `/`. Since this is a
1283
- # relative path, it MUST NOT begin with a leading forward slash.
1119
+ # filename. The path separator is a forward slash `/`. Since this is a relative
1120
+ # path, it MUST NOT begin with a leading forward slash.
1284
1121
  # Corresponds to the JSON property `path`
1285
1122
  # @return [String]
1286
1123
  attr_accessor :path
1287
1124
 
1288
- # The target path of the symlink. The path separator is a forward slash `/`.
1289
- # The target path can be relative to the parent directory of the symlink or
1290
- # it can be an absolute path starting with `/`. Support for absolute paths
1291
- # can be checked using the Capabilities
1292
- # API. The canonical form forbids the substrings `/./` and `//` in the target
1293
- # path. `..` components are allowed anywhere in the target path.
1125
+ # The target path of the symlink. The path separator is a forward slash `/`. The
1126
+ # target path can be relative to the parent directory of the symlink or it can
1127
+ # be an absolute path starting with `/`. Support for absolute paths can be
1128
+ # checked using the Capabilities API. The canonical form forbids the substrings `
1129
+ # /./` and `//` in the target path. `..` components are allowed anywhere in the
1130
+ # target path.
1294
1131
  # Corresponds to the JSON property `target`
1295
1132
  # @return [String]
1296
1133
  attr_accessor :target
@@ -1308,17 +1145,16 @@ module Google
1308
1145
  end
1309
1146
 
1310
1147
  # A `Platform` is a set of requirements, such as hardware, operating system, or
1311
- # compiler toolchain, for an
1312
- # Action's execution
1313
- # environment. A `Platform` is represented as a series of key-value pairs
1314
- # representing the properties that are required of the platform.
1148
+ # compiler toolchain, for an Action's execution environment. A `Platform` is
1149
+ # represented as a series of key-value pairs representing the properties that
1150
+ # are required of the platform.
1315
1151
  class BuildBazelRemoteExecutionV2Platform
1316
1152
  include Google::Apis::Core::Hashable
1317
1153
 
1318
- # The properties that make up this platform. In order to ensure that
1319
- # equivalent `Platform`s always hash to the same value, the properties MUST
1320
- # be lexicographically sorted by name, and then by value. Sorting of strings
1321
- # is done by code point, equivalently, by the UTF-8 bytes.
1154
+ # The properties that make up this platform. In order to ensure that equivalent `
1155
+ # Platform`s always hash to the same value, the properties MUST be
1156
+ # lexicographically sorted by name, and then by value. Sorting of strings is
1157
+ # done by code point, equivalently, by the UTF-8 bytes.
1322
1158
  # Corresponds to the JSON property `properties`
1323
1159
  # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2PlatformProperty>]
1324
1160
  attr_accessor :properties
@@ -1335,19 +1171,16 @@ module Google
1335
1171
 
1336
1172
  # A single property for the environment. The server is responsible for
1337
1173
  # specifying the property `name`s that it accepts. If an unknown `name` is
1338
- # provided in the requirements for an
1339
- # Action, the server SHOULD
1340
- # reject the execution request. If permitted by the server, the same `name`
1341
- # may occur multiple times.
1342
- # The server is also responsible for specifying the interpretation of
1343
- # property `value`s. For instance, a property describing how much RAM must be
1344
- # available may be interpreted as allowing a worker with 16GB to fulfill a
1345
- # request for 8GB, while a property describing the OS environment on which
1346
- # the action must be performed may require an exact match with the worker's
1347
- # OS.
1348
- # The server MAY use the `value` of one or more properties to determine how
1349
- # it sets up the execution environment, such as by making specific system
1350
- # files available to the worker.
1174
+ # provided in the requirements for an Action, the server SHOULD reject the
1175
+ # execution request. If permitted by the server, the same `name` may occur
1176
+ # multiple times. The server is also responsible for specifying the
1177
+ # interpretation of property `value`s. For instance, a property describing how
1178
+ # much RAM must be available may be interpreted as allowing a worker with 16GB
1179
+ # to fulfill a request for 8GB, while a property describing the OS environment
1180
+ # on which the action must be performed may require an exact match with the
1181
+ # worker's OS. The server MAY use the `value` of one or more properties to
1182
+ # determine how it sets up the execution environment, such as by making specific
1183
+ # system files available to the worker.
1351
1184
  class BuildBazelRemoteExecutionV2PlatformProperty
1352
1185
  include Google::Apis::Core::Hashable
1353
1186
 
@@ -1375,27 +1208,25 @@ module Google
1375
1208
  # An optional Metadata to attach to any RPC request to tell the server about an
1376
1209
  # external context of the request. The server may use this for logging or other
1377
1210
  # purposes. To use it, the client attaches the header to the call using the
1378
- # canonical proto serialization:
1379
- # * name: `build.bazel.remote.execution.v2.requestmetadata-bin`
1380
- # * contents: the base64 encoded binary `RequestMetadata` message.
1381
- # Note: the gRPC library serializes binary headers encoded in base 64 by
1382
- # default (https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#
1383
- # requests).
1384
- # Therefore, if the gRPC library is used to pass/retrieve this
1211
+ # canonical proto serialization: * name: `build.bazel.remote.execution.v2.
1212
+ # requestmetadata-bin` * contents: the base64 encoded binary `RequestMetadata`
1213
+ # message. Note: the gRPC library serializes binary headers encoded in base 64
1214
+ # by default (https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#
1215
+ # requests). Therefore, if the gRPC library is used to pass/retrieve this
1385
1216
  # metadata, the user may ignore the base64 encoding and assume it is simply
1386
1217
  # serialized as a binary message.
1387
1218
  class BuildBazelRemoteExecutionV2RequestMetadata
1388
1219
  include Google::Apis::Core::Hashable
1389
1220
 
1390
- # An identifier that ties multiple requests to the same action.
1391
- # For example, multiple requests to the CAS, Action Cache, and Execution
1392
- # API are used in order to compile foo.cc.
1221
+ # An identifier that ties multiple requests to the same action. For example,
1222
+ # multiple requests to the CAS, Action Cache, and Execution API are used in
1223
+ # order to compile foo.cc.
1393
1224
  # Corresponds to the JSON property `actionId`
1394
1225
  # @return [String]
1395
1226
  attr_accessor :action_id
1396
1227
 
1397
- # An identifier to tie multiple tool invocations together. For example,
1398
- # runs of foo_test, bar_test and baz_test on a post-submit of a given patch.
1228
+ # An identifier to tie multiple tool invocations together. For example, runs of
1229
+ # foo_test, bar_test and baz_test on a post-submit of a given patch.
1399
1230
  # Corresponds to the JSON property `correlatedInvocationsId`
1400
1231
  # @return [String]
1401
1232
  attr_accessor :correlated_invocations_id
@@ -1405,8 +1236,8 @@ module Google
1405
1236
  # @return [Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2ToolDetails]
1406
1237
  attr_accessor :tool_details
1407
1238
 
1408
- # An identifier that ties multiple actions together to a final result.
1409
- # For example, multiple actions are required to build and run foo_test.
1239
+ # An identifier that ties multiple actions together to a final result. For
1240
+ # example, multiple actions are required to build and run foo_test.
1410
1241
  # Corresponds to the JSON property `toolInvocationId`
1411
1242
  # @return [String]
1412
1243
  attr_accessor :tool_invocation_id
@@ -1438,12 +1269,12 @@ module Google
1438
1269
  # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2NodeProperty>]
1439
1270
  attr_accessor :node_properties
1440
1271
 
1441
- # The target path of the symlink. The path separator is a forward slash `/`.
1442
- # The target path can be relative to the parent directory of the symlink or
1443
- # it can be an absolute path starting with `/`. Support for absolute paths
1444
- # can be checked using the Capabilities
1445
- # API. The canonical form forbids the substrings `/./` and `//` in the target
1446
- # path. `..` components are allowed anywhere in the target path.
1272
+ # The target path of the symlink. The path separator is a forward slash `/`. The
1273
+ # target path can be relative to the parent directory of the symlink or it can
1274
+ # be an absolute path starting with `/`. Support for absolute paths can be
1275
+ # checked using the Capabilities API. The canonical form forbids the substrings `
1276
+ # /./` and `//` in the target path. `..` components are allowed anywhere in the
1277
+ # target path.
1447
1278
  # Corresponds to the JSON property `target`
1448
1279
  # @return [String]
1449
1280
  attr_accessor :target
@@ -1485,90 +1316,45 @@ module Google
1485
1316
  end
1486
1317
  end
1487
1318
 
1488
- # A `Tree` contains all the
1489
- # Directory protos in a
1490
- # single directory Merkle tree, compressed into one message.
1319
+ # A `Tree` contains all the Directory protos in a single directory Merkle tree,
1320
+ # compressed into one message.
1491
1321
  class BuildBazelRemoteExecutionV2Tree
1492
1322
  include Google::Apis::Core::Hashable
1493
1323
 
1494
1324
  # All the child directories: the directories referred to by the root and,
1495
- # recursively, all its children. In order to reconstruct the directory tree,
1496
- # the client must take the digests of each of the child directories and then
1497
- # build up a tree starting from the `root`.
1325
+ # recursively, all its children. In order to reconstruct the directory tree, the
1326
+ # client must take the digests of each of the child directories and then build
1327
+ # up a tree starting from the `root`.
1498
1328
  # Corresponds to the JSON property `children`
1499
1329
  # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2Directory>]
1500
1330
  attr_accessor :children
1501
1331
 
1502
1332
  # A `Directory` represents a directory node in a file tree, containing zero or
1503
- # more children FileNodes,
1504
- # DirectoryNodes and
1505
- # SymlinkNodes.
1506
- # Each `Node` contains its name in the directory, either the digest of its
1507
- # content (either a file blob or a `Directory` proto) or a symlink target, as
1508
- # well as possibly some metadata about the file or directory.
1509
- # In order to ensure that two equivalent directory trees hash to the same
1510
- # value, the following restrictions MUST be obeyed when constructing a
1511
- # a `Directory`:
1512
- # * Every child in the directory must have a path of exactly one segment.
1513
- # Multiple levels of directory hierarchy may not be collapsed.
1514
- # * Each child in the directory must have a unique path segment (file name).
1515
- # Note that while the API itself is case-sensitive, the environment where
1516
- # the Action is executed may or may not be case-sensitive. That is, it is
1517
- # legal to call the API with a Directory that has both "Foo" and "foo" as
1518
- # children, but the Action may be rejected by the remote system upon
1519
- # execution.
1520
- # * The files, directories and symlinks in the directory must each be sorted
1521
- # in lexicographical order by path. The path strings must be sorted by code
1522
- # point, equivalently, by UTF-8 bytes.
1523
- # * The NodeProperties of files,
1524
- # directories, and symlinks must be sorted in lexicographical order by
1525
- # property name.
1526
- # A `Directory` that obeys the restrictions is said to be in canonical form.
1527
- # As an example, the following could be used for a file named `bar` and a
1333
+ # more children FileNodes, DirectoryNodes and SymlinkNodes. Each `Node` contains
1334
+ # its name in the directory, either the digest of its content (either a file
1335
+ # blob or a `Directory` proto) or a symlink target, as well as possibly some
1336
+ # metadata about the file or directory. In order to ensure that two equivalent
1337
+ # directory trees hash to the same value, the following restrictions MUST be
1338
+ # obeyed when constructing a a `Directory`: * Every child in the directory must
1339
+ # have a path of exactly one segment. Multiple levels of directory hierarchy may
1340
+ # not be collapsed. * Each child in the directory must have a unique path
1341
+ # segment (file name). Note that while the API itself is case-sensitive, the
1342
+ # environment where the Action is executed may or may not be case-sensitive.
1343
+ # That is, it is legal to call the API with a Directory that has both "Foo" and "
1344
+ # foo" as children, but the Action may be rejected by the remote system upon
1345
+ # execution. * The files, directories and symlinks in the directory must each be
1346
+ # sorted in lexicographical order by path. The path strings must be sorted by
1347
+ # code point, equivalently, by UTF-8 bytes. * The NodeProperties of files,
1348
+ # directories, and symlinks must be sorted in lexicographical order by property
1349
+ # name. A `Directory` that obeys the restrictions is said to be in canonical
1350
+ # form. As an example, the following could be used for a file named `bar` and a
1528
1351
  # directory named `foo` with an executable file named `baz` (hashes shortened
1529
- # for readability):
1530
- # ```json
1531
- # // (Directory proto)
1532
- # `
1533
- # files: [
1534
- # `
1535
- # name: "bar",
1536
- # digest: `
1537
- # hash: "4a73bc9d03...",
1538
- # size: 65534
1539
- # `,
1540
- # node_properties: [
1541
- # `
1542
- # "name": "MTime",
1543
- # "value": "2017-01-15T01:30:15.01Z"
1544
- # `
1545
- # ]
1546
- # `
1547
- # ],
1548
- # directories: [
1549
- # `
1550
- # name: "foo",
1551
- # digest: `
1552
- # hash: "4cf2eda940...",
1553
- # size: 43
1554
- # `
1555
- # `
1556
- # ]
1557
- # `
1558
- # // (Directory proto with hash "4cf2eda940..." and size 43)
1559
- # `
1560
- # files: [
1561
- # `
1562
- # name: "baz",
1563
- # digest: `
1564
- # hash: "b2c941073e...",
1565
- # size: 1294,
1566
- # `,
1567
- # is_executable: true
1568
- # `
1569
- # ]
1570
- # `
1571
- # ```
1352
+ # for readability): ```json // (Directory proto) ` files: [ ` name: "bar",
1353
+ # digest: ` hash: "4a73bc9d03...", size: 65534 `, node_properties: [ ` "name": "
1354
+ # MTime", "value": "2017-01-15T01:30:15.01Z" ` ] ` ], directories: [ ` name: "
1355
+ # foo", digest: ` hash: "4cf2eda940...", size: 43 ` ` ] ` // (Directory proto
1356
+ # with hash "4cf2eda940..." and size 43) ` files: [ ` name: "baz", digest: `
1357
+ # hash: "b2c941073e...", size: 1294, `, is_executable: true ` ] ` ```
1572
1358
  # Corresponds to the JSON property `root`
1573
1359
  # @return [Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2Directory]
1574
1360
  attr_accessor :root
@@ -1608,8 +1394,8 @@ module Google
1608
1394
  class GoogleDevtoolsRemotebuildbotCommandDurations
1609
1395
  include Google::Apis::Core::Hashable
1610
1396
 
1611
- # The time spent preparing the command to be run in a Docker container
1612
- # (includes pulling the Docker image, if necessary).
1397
+ # The time spent preparing the command to be run in a Docker container (includes
1398
+ # pulling the Docker image, if necessary).
1613
1399
  # Corresponds to the JSON property `dockerPrep`
1614
1400
  # @return [String]
1615
1401
  attr_accessor :docker_prep
@@ -1685,18 +1471,23 @@ module Google
1685
1471
  end
1686
1472
  end
1687
1473
 
1688
- # CommandEvents contains counters for the number of warnings and errors
1689
- # that occurred during the execution of a command.
1474
+ # CommandEvents contains counters for the number of warnings and errors that
1475
+ # occurred during the execution of a command.
1690
1476
  class GoogleDevtoolsRemotebuildbotCommandEvents
1691
1477
  include Google::Apis::Core::Hashable
1692
1478
 
1693
- # Indicates whether we are using a cached Docker image (true) or had to pull
1694
- # the Docker image (false) for this command.
1479
+ # Indicates whether we are using a cached Docker image (true) or had to pull the
1480
+ # Docker image (false) for this command.
1695
1481
  # Corresponds to the JSON property `dockerCacheHit`
1696
1482
  # @return [Boolean]
1697
1483
  attr_accessor :docker_cache_hit
1698
1484
  alias_method :docker_cache_hit?, :docker_cache_hit
1699
1485
 
1486
+ # Docker Image name.
1487
+ # Corresponds to the JSON property `dockerImageName`
1488
+ # @return [String]
1489
+ attr_accessor :docker_image_name
1490
+
1700
1491
  # The input cache miss ratio.
1701
1492
  # Corresponds to the JSON property `inputCacheMiss`
1702
1493
  # @return [Float]
@@ -1719,6 +1510,7 @@ module Google
1719
1510
  # Update properties of this object
1720
1511
  def update!(**args)
1721
1512
  @docker_cache_hit = args[:docker_cache_hit] if args.key?(:docker_cache_hit)
1513
+ @docker_image_name = args[:docker_image_name] if args.key?(:docker_image_name)
1722
1514
  @input_cache_miss = args[:input_cache_miss] if args.key?(:input_cache_miss)
1723
1515
  @num_errors = args[:num_errors] if args.key?(:num_errors)
1724
1516
  @num_warnings = args[:num_warnings] if args.key?(:num_warnings)
@@ -1861,28 +1653,24 @@ module Google
1861
1653
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaCreateInstanceRequest
1862
1654
  include Google::Apis::Core::Hashable
1863
1655
 
1864
- # Instance conceptually encapsulates all Remote Build Execution resources
1865
- # for remote builds.
1866
- # An instance consists of storage and compute resources (for example,
1867
- # `ContentAddressableStorage`, `ActionCache`, `WorkerPools`) used for
1868
- # running remote builds.
1869
- # All Remote Build Execution API calls are scoped to an instance.
1656
+ # Instance conceptually encapsulates all Remote Build Execution resources for
1657
+ # remote builds. An instance consists of storage and compute resources (for
1658
+ # example, `ContentAddressableStorage`, `ActionCache`, `WorkerPools`) used for
1659
+ # running remote builds. All Remote Build Execution API calls are scoped to an
1660
+ # instance.
1870
1661
  # Corresponds to the JSON property `instance`
1871
1662
  # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemotebuildexecutionAdminV1alphaInstance]
1872
1663
  attr_accessor :instance
1873
1664
 
1874
- # ID of the created instance.
1875
- # A valid `instance_id` must:
1876
- # be 6-50 characters long,
1877
- # contain only lowercase letters, digits, hyphens and underscores,
1878
- # start with a lowercase letter, and
1879
- # end with a lowercase letter or a digit.
1665
+ # ID of the created instance. A valid `instance_id` must: be 6-50 characters
1666
+ # long, contain only lowercase letters, digits, hyphens and underscores, start
1667
+ # with a lowercase letter, and end with a lowercase letter or a digit.
1880
1668
  # Corresponds to the JSON property `instanceId`
1881
1669
  # @return [String]
1882
1670
  attr_accessor :instance_id
1883
1671
 
1884
- # Resource name of the project containing the instance.
1885
- # Format: `projects/[PROJECT_ID]`.
1672
+ # Resource name of the project containing the instance. Format: `projects/[
1673
+ # PROJECT_ID]`.
1886
1674
  # Corresponds to the JSON property `parent`
1887
1675
  # @return [String]
1888
1676
  attr_accessor :parent
@@ -1903,18 +1691,15 @@ module Google
1903
1691
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaCreateWorkerPoolRequest
1904
1692
  include Google::Apis::Core::Hashable
1905
1693
 
1906
- # Resource name of the instance in which to create the new worker pool.
1907
- # Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
1694
+ # Resource name of the instance in which to create the new worker pool. Format: `
1695
+ # projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
1908
1696
  # Corresponds to the JSON property `parent`
1909
1697
  # @return [String]
1910
1698
  attr_accessor :parent
1911
1699
 
1912
- # ID of the created worker pool.
1913
- # A valid pool ID must:
1914
- # be 6-50 characters long,
1915
- # contain only lowercase letters, digits, hyphens and underscores,
1916
- # start with a lowercase letter, and
1917
- # end with a lowercase letter or a digit.
1700
+ # ID of the created worker pool. A valid pool ID must: be 6-50 characters long,
1701
+ # contain only lowercase letters, digits, hyphens and underscores, start with a
1702
+ # lowercase letter, and end with a lowercase letter or a digit.
1918
1703
  # Corresponds to the JSON property `poolId`
1919
1704
  # @return [String]
1920
1705
  attr_accessor :pool_id
@@ -1940,8 +1725,8 @@ module Google
1940
1725
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaDeleteInstanceRequest
1941
1726
  include Google::Apis::Core::Hashable
1942
1727
 
1943
- # Name of the instance to delete.
1944
- # Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
1728
+ # Name of the instance to delete. Format: `projects/[PROJECT_ID]/instances/[
1729
+ # INSTANCE_ID]`.
1945
1730
  # Corresponds to the JSON property `name`
1946
1731
  # @return [String]
1947
1732
  attr_accessor :name
@@ -1960,9 +1745,8 @@ module Google
1960
1745
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaDeleteWorkerPoolRequest
1961
1746
  include Google::Apis::Core::Hashable
1962
1747
 
1963
- # Name of the worker pool to delete.
1964
- # Format:
1965
- # `projects/[PROJECT_ID]/instances/[INSTANCE_ID]/workerpools/[POOL_ID]`.
1748
+ # Name of the worker pool to delete. Format: `projects/[PROJECT_ID]/instances/[
1749
+ # INSTANCE_ID]/workerpools/[POOL_ID]`.
1966
1750
  # Corresponds to the JSON property `name`
1967
1751
  # @return [String]
1968
1752
  attr_accessor :name
@@ -1977,12 +1761,107 @@ module Google
1977
1761
  end
1978
1762
  end
1979
1763
 
1764
+ # FeaturePolicy defines features allowed to be used on RBE instances, as well as
1765
+ # instance-wide behavior changes that take effect without opt-in or opt-out at
1766
+ # usage time.
1767
+ class GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicy
1768
+ include Google::Apis::Core::Hashable
1769
+
1770
+ # Defines whether a feature can be used or what values are accepted.
1771
+ # Corresponds to the JSON property `containerImageSources`
1772
+ # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
1773
+ attr_accessor :container_image_sources
1774
+
1775
+ # Defines whether a feature can be used or what values are accepted.
1776
+ # Corresponds to the JSON property `dockerAddCapabilities`
1777
+ # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
1778
+ attr_accessor :docker_add_capabilities
1779
+
1780
+ # Defines whether a feature can be used or what values are accepted.
1781
+ # Corresponds to the JSON property `dockerChrootPath`
1782
+ # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
1783
+ attr_accessor :docker_chroot_path
1784
+
1785
+ # Defines whether a feature can be used or what values are accepted.
1786
+ # Corresponds to the JSON property `dockerNetwork`
1787
+ # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
1788
+ attr_accessor :docker_network
1789
+
1790
+ # Defines whether a feature can be used or what values are accepted.
1791
+ # Corresponds to the JSON property `dockerPrivileged`
1792
+ # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
1793
+ attr_accessor :docker_privileged
1794
+
1795
+ # Defines whether a feature can be used or what values are accepted.
1796
+ # Corresponds to the JSON property `dockerRunAsRoot`
1797
+ # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
1798
+ attr_accessor :docker_run_as_root
1799
+
1800
+ # Defines whether a feature can be used or what values are accepted.
1801
+ # Corresponds to the JSON property `dockerRuntime`
1802
+ # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
1803
+ attr_accessor :docker_runtime
1804
+
1805
+ # Defines whether a feature can be used or what values are accepted.
1806
+ # Corresponds to the JSON property `dockerSiblingContainers`
1807
+ # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
1808
+ attr_accessor :docker_sibling_containers
1809
+
1810
+ # linux_isolation allows overriding the docker runtime used for containers
1811
+ # started on Linux.
1812
+ # Corresponds to the JSON property `linuxIsolation`
1813
+ # @return [String]
1814
+ attr_accessor :linux_isolation
1815
+
1816
+ def initialize(**args)
1817
+ update!(**args)
1818
+ end
1819
+
1820
+ # Update properties of this object
1821
+ def update!(**args)
1822
+ @container_image_sources = args[:container_image_sources] if args.key?(:container_image_sources)
1823
+ @docker_add_capabilities = args[:docker_add_capabilities] if args.key?(:docker_add_capabilities)
1824
+ @docker_chroot_path = args[:docker_chroot_path] if args.key?(:docker_chroot_path)
1825
+ @docker_network = args[:docker_network] if args.key?(:docker_network)
1826
+ @docker_privileged = args[:docker_privileged] if args.key?(:docker_privileged)
1827
+ @docker_run_as_root = args[:docker_run_as_root] if args.key?(:docker_run_as_root)
1828
+ @docker_runtime = args[:docker_runtime] if args.key?(:docker_runtime)
1829
+ @docker_sibling_containers = args[:docker_sibling_containers] if args.key?(:docker_sibling_containers)
1830
+ @linux_isolation = args[:linux_isolation] if args.key?(:linux_isolation)
1831
+ end
1832
+ end
1833
+
1834
+ # Defines whether a feature can be used or what values are accepted.
1835
+ class GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature
1836
+ include Google::Apis::Core::Hashable
1837
+
1838
+ # A list of acceptable values. Only effective when the policy is `RESTRICTED`.
1839
+ # Corresponds to the JSON property `allowedValues`
1840
+ # @return [Array<String>]
1841
+ attr_accessor :allowed_values
1842
+
1843
+ # The policy of the feature.
1844
+ # Corresponds to the JSON property `policy`
1845
+ # @return [String]
1846
+ attr_accessor :policy
1847
+
1848
+ def initialize(**args)
1849
+ update!(**args)
1850
+ end
1851
+
1852
+ # Update properties of this object
1853
+ def update!(**args)
1854
+ @allowed_values = args[:allowed_values] if args.key?(:allowed_values)
1855
+ @policy = args[:policy] if args.key?(:policy)
1856
+ end
1857
+ end
1858
+
1980
1859
  # The request used for `GetInstance`.
1981
1860
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaGetInstanceRequest
1982
1861
  include Google::Apis::Core::Hashable
1983
1862
 
1984
- # Name of the instance to retrieve.
1985
- # Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
1863
+ # Name of the instance to retrieve. Format: `projects/[PROJECT_ID]/instances/[
1864
+ # INSTANCE_ID]`.
1986
1865
  # Corresponds to the JSON property `name`
1987
1866
  # @return [String]
1988
1867
  attr_accessor :name
@@ -2001,9 +1880,8 @@ module Google
2001
1880
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaGetWorkerPoolRequest
2002
1881
  include Google::Apis::Core::Hashable
2003
1882
 
2004
- # Name of the worker pool to retrieve.
2005
- # Format:
2006
- # `projects/[PROJECT_ID]/instances/[INSTANCE_ID]/workerpools/[POOL_ID]`.
1883
+ # Name of the worker pool to retrieve. Format: `projects/[PROJECT_ID]/instances/[
1884
+ # INSTANCE_ID]/workerpools/[POOL_ID]`.
2007
1885
  # Corresponds to the JSON property `name`
2008
1886
  # @return [String]
2009
1887
  attr_accessor :name
@@ -2018,15 +1896,21 @@ module Google
2018
1896
  end
2019
1897
  end
2020
1898
 
2021
- # Instance conceptually encapsulates all Remote Build Execution resources
2022
- # for remote builds.
2023
- # An instance consists of storage and compute resources (for example,
2024
- # `ContentAddressableStorage`, `ActionCache`, `WorkerPools`) used for
2025
- # running remote builds.
2026
- # All Remote Build Execution API calls are scoped to an instance.
1899
+ # Instance conceptually encapsulates all Remote Build Execution resources for
1900
+ # remote builds. An instance consists of storage and compute resources (for
1901
+ # example, `ContentAddressableStorage`, `ActionCache`, `WorkerPools`) used for
1902
+ # running remote builds. All Remote Build Execution API calls are scoped to an
1903
+ # instance.
2027
1904
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaInstance
2028
1905
  include Google::Apis::Core::Hashable
2029
1906
 
1907
+ # FeaturePolicy defines features allowed to be used on RBE instances, as well as
1908
+ # instance-wide behavior changes that take effect without opt-in or opt-out at
1909
+ # usage time.
1910
+ # Corresponds to the JSON property `featurePolicy`
1911
+ # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicy]
1912
+ attr_accessor :feature_policy
1913
+
2030
1914
  # The location is a GCP region. Currently only `us-central1` is supported.
2031
1915
  # Corresponds to the JSON property `location`
2032
1916
  # @return [String]
@@ -2038,10 +1922,9 @@ module Google
2038
1922
  attr_accessor :logging_enabled
2039
1923
  alias_method :logging_enabled?, :logging_enabled
2040
1924
 
2041
- # Output only. Instance resource name formatted as:
2042
- # `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
2043
- # Name should not be populated when creating an instance since it is provided
2044
- # in the `instance_id` field.
1925
+ # Output only. Instance resource name formatted as: `projects/[PROJECT_ID]/
1926
+ # instances/[INSTANCE_ID]`. Name should not be populated when creating an
1927
+ # instance since it is provided in the `instance_id` field.
2045
1928
  # Corresponds to the JSON property `name`
2046
1929
  # @return [String]
2047
1930
  attr_accessor :name
@@ -2057,6 +1940,7 @@ module Google
2057
1940
 
2058
1941
  # Update properties of this object
2059
1942
  def update!(**args)
1943
+ @feature_policy = args[:feature_policy] if args.key?(:feature_policy)
2060
1944
  @location = args[:location] if args.key?(:location)
2061
1945
  @logging_enabled = args[:logging_enabled] if args.key?(:logging_enabled)
2062
1946
  @name = args[:name] if args.key?(:name)
@@ -2068,8 +1952,7 @@ module Google
2068
1952
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaListInstancesRequest
2069
1953
  include Google::Apis::Core::Hashable
2070
1954
 
2071
- # Resource name of the project.
2072
- # Format: `projects/[PROJECT_ID]`.
1955
+ # Resource name of the project. Format: `projects/[PROJECT_ID]`.
2073
1956
  # Corresponds to the JSON property `parent`
2074
1957
  # @return [String]
2075
1958
  attr_accessor :parent
@@ -2107,32 +1990,26 @@ module Google
2107
1990
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaListWorkerPoolsRequest
2108
1991
  include Google::Apis::Core::Hashable
2109
1992
 
2110
- # Optional. A filter expression that filters resources listed in
2111
- # the response. The expression must specify the field name, a comparison
2112
- # operator, and the value that you want to use for filtering. The value
2113
- # must be a string, a number, or a boolean. String values are
2114
- # case-insensitive.
2115
- # The comparison operator must be either `:`, `=`, `!=`, `>`, `>=`, `<=` or
2116
- # `<`.
2117
- # The `:` operator can be used with string fields to match substrings.
2118
- # For non-string fields it is equivalent to the `=` operator.
2119
- # The `:*` comparison can be used to test whether a key has been defined.
2120
- # You can also filter on nested fields.
2121
- # To filter on multiple expressions, you can separate expression using
2122
- # `AND` and `OR` operators, using parentheses to specify precedence. If
2123
- # neither operator is specified, `AND` is assumed.
2124
- # Examples:
2125
- # Include only pools with more than 100 reserved workers:
2126
- # `(worker_count > 100) (worker_config.reserved = true)`
2127
- # Include only pools with a certain label or machines of the n1-standard
2128
- # family:
1993
+ # Optional. A filter expression that filters resources listed in the response.
1994
+ # The expression must specify the field name, a comparison operator, and the
1995
+ # value that you want to use for filtering. The value must be a string, a number,
1996
+ # or a boolean. String values are case-insensitive. The comparison operator
1997
+ # must be either `:`, `=`, `!=`, `>`, `>=`, `<=` or `<`. The `:` operator can be
1998
+ # used with string fields to match substrings. For non-string fields it is
1999
+ # equivalent to the `=` operator. The `:*` comparison can be used to test
2000
+ # whether a key has been defined. You can also filter on nested fields. To
2001
+ # filter on multiple expressions, you can separate expression using `AND` and `
2002
+ # OR` operators, using parentheses to specify precedence. If neither operator is
2003
+ # specified, `AND` is assumed. Examples: Include only pools with more than 100
2004
+ # reserved workers: `(worker_count > 100) (worker_config.reserved = true)`
2005
+ # Include only pools with a certain label or machines of the n1-standard family:
2129
2006
  # `worker_config.labels.key1 : * OR worker_config.machine_type: n1-standard`
2130
2007
  # Corresponds to the JSON property `filter`
2131
2008
  # @return [String]
2132
2009
  attr_accessor :filter
2133
2010
 
2134
- # Resource name of the instance.
2135
- # Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
2011
+ # Resource name of the instance. Format: `projects/[PROJECT_ID]/instances/[
2012
+ # INSTANCE_ID]`.
2136
2013
  # Corresponds to the JSON property `parent`
2137
2014
  # @return [String]
2138
2015
  attr_accessor :parent
@@ -2167,40 +2044,62 @@ module Google
2167
2044
  end
2168
2045
  end
2169
2046
 
2047
+ # SoleTenancyConfig specifies information required to host a pool on STNs.
2048
+ class GoogleDevtoolsRemotebuildexecutionAdminV1alphaSoleTenancyConfig
2049
+ include Google::Apis::Core::Hashable
2050
+
2051
+ # The sole-tenant node type to host the pool's workers on.
2052
+ # Corresponds to the JSON property `nodeType`
2053
+ # @return [String]
2054
+ attr_accessor :node_type
2055
+
2056
+ # Zone in which STNs are reserved.
2057
+ # Corresponds to the JSON property `nodesZone`
2058
+ # @return [String]
2059
+ attr_accessor :nodes_zone
2060
+
2061
+ def initialize(**args)
2062
+ update!(**args)
2063
+ end
2064
+
2065
+ # Update properties of this object
2066
+ def update!(**args)
2067
+ @node_type = args[:node_type] if args.key?(:node_type)
2068
+ @nodes_zone = args[:nodes_zone] if args.key?(:nodes_zone)
2069
+ end
2070
+ end
2071
+
2170
2072
  # The request used for `UpdateInstance`.
2171
2073
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaUpdateInstanceRequest
2172
2074
  include Google::Apis::Core::Hashable
2173
2075
 
2174
- # Instance conceptually encapsulates all Remote Build Execution resources
2175
- # for remote builds.
2176
- # An instance consists of storage and compute resources (for example,
2177
- # `ContentAddressableStorage`, `ActionCache`, `WorkerPools`) used for
2178
- # running remote builds.
2179
- # All Remote Build Execution API calls are scoped to an instance.
2076
+ # Instance conceptually encapsulates all Remote Build Execution resources for
2077
+ # remote builds. An instance consists of storage and compute resources (for
2078
+ # example, `ContentAddressableStorage`, `ActionCache`, `WorkerPools`) used for
2079
+ # running remote builds. All Remote Build Execution API calls are scoped to an
2080
+ # instance.
2180
2081
  # Corresponds to the JSON property `instance`
2181
2082
  # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemotebuildexecutionAdminV1alphaInstance]
2182
2083
  attr_accessor :instance
2183
2084
 
2184
- # Deprecated, use instance.logging_enabled instead.
2185
- # Whether to enable Stackdriver logging for this instance.
2085
+ # Deprecated, use instance.logging_enabled instead. Whether to enable
2086
+ # Stackdriver logging for this instance.
2186
2087
  # Corresponds to the JSON property `loggingEnabled`
2187
2088
  # @return [Boolean]
2188
2089
  attr_accessor :logging_enabled
2189
2090
  alias_method :logging_enabled?, :logging_enabled
2190
2091
 
2191
- # Deprecated, use instance.Name instead.
2192
- # Name of the instance to update.
2193
- # Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
2092
+ # Deprecated, use instance.Name instead. Name of the instance to update. Format:
2093
+ # `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
2194
2094
  # Corresponds to the JSON property `name`
2195
2095
  # @return [String]
2196
2096
  attr_accessor :name
2197
2097
 
2198
- # The update mask applies to instance. For the `FieldMask` definition, see
2199
- # https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#
2200
- # fieldmask
2201
- # If an empty update_mask is provided, only the non-default valued field in
2202
- # the worker pool field will be updated. Note that in order to update a field
2203
- # to the default value (zero, false, empty string) an explicit update_mask
2098
+ # The update mask applies to instance. For the `FieldMask` definition, see https:
2099
+ # //developers.google.com/protocol-buffers/docs/reference/google.protobuf#
2100
+ # fieldmask If an empty update_mask is provided, only the non-default valued
2101
+ # field in the worker pool field will be updated. Note that in order to update a
2102
+ # field to the default value (zero, false, empty string) an explicit update_mask
2204
2103
  # must be provided.
2205
2104
  # Corresponds to the JSON property `updateMask`
2206
2105
  # @return [String]
@@ -2223,13 +2122,11 @@ module Google
2223
2122
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaUpdateWorkerPoolRequest
2224
2123
  include Google::Apis::Core::Hashable
2225
2124
 
2226
- # The update mask applies to worker_pool. For the `FieldMask` definition,
2227
- # see
2125
+ # The update mask applies to worker_pool. For the `FieldMask` definition, see
2228
2126
  # https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#
2229
- # fieldmask
2230
- # If an empty update_mask is provided, only the non-default valued field in
2231
- # the worker pool field will be updated. Note that in order to update a field
2232
- # to the default value (zero, false, empty string) an explicit update_mask
2127
+ # fieldmask If an empty update_mask is provided, only the non-default valued
2128
+ # field in the worker pool field will be updated. Note that in order to update a
2129
+ # field to the default value (zero, false, empty string) an explicit update_mask
2233
2130
  # must be provided.
2234
2131
  # Corresponds to the JSON property `updateMask`
2235
2132
  # @return [String]
@@ -2251,8 +2148,7 @@ module Google
2251
2148
  end
2252
2149
  end
2253
2150
 
2254
- # Defines the configuration to be used for a creating workers in
2255
- # the worker pool.
2151
+ # Defines the configuration to be used for creating workers in the worker pool.
2256
2152
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaWorkerConfig
2257
2153
  include Google::Apis::Core::Hashable
2258
2154
 
@@ -2261,34 +2157,31 @@ module Google
2261
2157
  # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemotebuildexecutionAdminV1alphaAcceleratorConfig]
2262
2158
  attr_accessor :accelerator
2263
2159
 
2264
- # Required. Size of the disk attached to the worker, in GB.
2265
- # See https://cloud.google.com/compute/docs/disks/
2160
+ # Required. Size of the disk attached to the worker, in GB. See https://cloud.
2161
+ # google.com/compute/docs/disks/
2266
2162
  # Corresponds to the JSON property `diskSizeGb`
2267
2163
  # @return [Fixnum]
2268
2164
  attr_accessor :disk_size_gb
2269
2165
 
2270
- # Required. Disk Type to use for the worker.
2271
- # See [Storage
2272
- # options](https://cloud.google.com/compute/docs/disks/#introduction).
2273
- # Currently only `pd-standard` and `pd-ssd` are supported.
2166
+ # Required. Disk Type to use for the worker. See [Storage options](https://cloud.
2167
+ # google.com/compute/docs/disks/#introduction). Currently only `pd-standard` and
2168
+ # `pd-ssd` are supported.
2274
2169
  # Corresponds to the JSON property `diskType`
2275
2170
  # @return [String]
2276
2171
  attr_accessor :disk_type
2277
2172
 
2278
- # Labels associated with the workers.
2279
- # Label keys and values can be no longer than 63 characters, can only contain
2280
- # lowercase letters, numeric characters, underscores and dashes.
2281
- # International letters are permitted. Label keys must start with a letter.
2282
- # Label values are optional.
2283
- # There can not be more than 64 labels per resource.
2173
+ # Labels associated with the workers. Label keys and values can be no longer
2174
+ # than 63 characters, can only contain lowercase letters, numeric characters,
2175
+ # underscores and dashes. International letters are permitted. Label keys must
2176
+ # start with a letter. Label values are optional. There can not be more than 64
2177
+ # labels per resource.
2284
2178
  # Corresponds to the JSON property `labels`
2285
2179
  # @return [Hash<String,String>]
2286
2180
  attr_accessor :labels
2287
2181
 
2288
- # Required. Machine type of the worker, such as `n1-standard-2`.
2289
- # See https://cloud.google.com/compute/docs/machine-types for a list of
2290
- # supported machine types. Note that `f1-micro` and `g1-small` are not yet
2291
- # supported.
2182
+ # Required. Machine type of the worker, such as `n1-standard-2`. See https://
2183
+ # cloud.google.com/compute/docs/machine-types for a list of supported machine
2184
+ # types. Note that `f1-micro` and `g1-small` are not yet supported.
2292
2185
  # Corresponds to the JSON property `machineType`
2293
2186
  # @return [String]
2294
2187
  attr_accessor :machine_type
@@ -2298,30 +2191,34 @@ module Google
2298
2191
  # @return [Fixnum]
2299
2192
  attr_accessor :max_concurrent_actions
2300
2193
 
2301
- # Minimum CPU platform to use when creating the worker.
2302
- # See [CPU Platforms](https://cloud.google.com/compute/docs/cpu-platforms).
2194
+ # Minimum CPU platform to use when creating the worker. See [CPU Platforms](
2195
+ # https://cloud.google.com/compute/docs/cpu-platforms).
2303
2196
  # Corresponds to the JSON property `minCpuPlatform`
2304
2197
  # @return [String]
2305
2198
  attr_accessor :min_cpu_platform
2306
2199
 
2307
- # Determines the type of network access granted to workers. Possible values:
2308
- # - "public": Workers can connect to the public internet.
2309
- # - "private": Workers can only connect to Google APIs and services.
2310
- # - "restricted-private": Workers can only connect to Google APIs that are
2311
- # reachable through `restricted.googleapis.com` (`199.36.153.4/30`).
2200
+ # Determines the type of network access granted to workers. Possible values: - "
2201
+ # public": Workers can connect to the public internet. - "private": Workers can
2202
+ # only connect to Google APIs and services. - "restricted-private": Workers can
2203
+ # only connect to Google APIs that are reachable through `restricted.googleapis.
2204
+ # com` (`199.36.153.4/30`).
2312
2205
  # Corresponds to the JSON property `networkAccess`
2313
2206
  # @return [String]
2314
2207
  attr_accessor :network_access
2315
2208
 
2316
- # Determines whether the worker is reserved (equivalent to a Compute Engine
2317
- # on-demand VM and therefore won't be preempted).
2318
- # See [Preemptible VMs](https://cloud.google.com/preemptible-vms/) for more
2319
- # details.
2209
+ # Determines whether the worker is reserved (equivalent to a Compute Engine on-
2210
+ # demand VM and therefore won't be preempted). See [Preemptible VMs](https://
2211
+ # cloud.google.com/preemptible-vms/) for more details.
2320
2212
  # Corresponds to the JSON property `reserved`
2321
2213
  # @return [Boolean]
2322
2214
  attr_accessor :reserved
2323
2215
  alias_method :reserved?, :reserved
2324
2216
 
2217
+ # SoleTenancyConfig specifies information required to host a pool on STNs.
2218
+ # Corresponds to the JSON property `soleTenancy`
2219
+ # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemotebuildexecutionAdminV1alphaSoleTenancyConfig]
2220
+ attr_accessor :sole_tenancy
2221
+
2325
2222
  # The name of the image used by each VM.
2326
2223
  # Corresponds to the JSON property `vmImage`
2327
2224
  # @return [String]
@@ -2342,6 +2239,7 @@ module Google
2342
2239
  @min_cpu_platform = args[:min_cpu_platform] if args.key?(:min_cpu_platform)
2343
2240
  @network_access = args[:network_access] if args.key?(:network_access)
2344
2241
  @reserved = args[:reserved] if args.key?(:reserved)
2242
+ @sole_tenancy = args[:sole_tenancy] if args.key?(:sole_tenancy)
2345
2243
  @vm_image = args[:vm_image] if args.key?(:vm_image)
2346
2244
  end
2347
2245
  end
@@ -2360,10 +2258,9 @@ module Google
2360
2258
  # @return [String]
2361
2259
  attr_accessor :channel
2362
2260
 
2363
- # WorkerPool resource name formatted as:
2364
- # `projects/[PROJECT_ID]/instances/[INSTANCE_ID]/workerpools/[POOL_ID]`.
2365
- # name should not be populated when creating a worker pool since it is
2366
- # provided in the `poolId` field.
2261
+ # WorkerPool resource name formatted as: `projects/[PROJECT_ID]/instances/[
2262
+ # INSTANCE_ID]/workerpools/[POOL_ID]`. name should not be populated when
2263
+ # creating a worker pool since it is provided in the `poolId` field.
2367
2264
  # Corresponds to the JSON property `name`
2368
2265
  # @return [String]
2369
2266
  attr_accessor :name
@@ -2373,14 +2270,13 @@ module Google
2373
2270
  # @return [String]
2374
2271
  attr_accessor :state
2375
2272
 
2376
- # Defines the configuration to be used for a creating workers in
2377
- # the worker pool.
2273
+ # Defines the configuration to be used for creating workers in the worker pool.
2378
2274
  # Corresponds to the JSON property `workerConfig`
2379
2275
  # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemotebuildexecutionAdminV1alphaWorkerConfig]
2380
2276
  attr_accessor :worker_config
2381
2277
 
2382
- # The desired number of workers in the worker pool. Must be a value between
2383
- # 0 and 15000.
2278
+ # The desired number of workers in the worker pool. Must be a value between 0
2279
+ # and 15000.
2384
2280
  # Corresponds to the JSON property `workerCount`
2385
2281
  # @return [Fixnum]
2386
2282
  attr_accessor :worker_count
@@ -2402,14 +2298,13 @@ module Google
2402
2298
 
2403
2299
  # AdminTemp is a prelimiary set of administration tasks. It's called "Temp"
2404
2300
  # because we do not yet know the best way to represent admin tasks; it's
2405
- # possible that this will be entirely replaced in later versions of this API.
2406
- # If this message proves to be sufficient, it will be renamed in the alpha or
2407
- # beta release of this API.
2408
- # This message (suitably marshalled into a protobuf.Any) can be used as the
2409
- # inline_assignment field in a lease; the lease assignment field should simply
2410
- # be `"admin"` in these cases.
2411
- # This message is heavily based on Swarming administration tasks from the LUCI
2412
- # project (http://github.com/luci/luci-py/appengine/swarming).
2301
+ # possible that this will be entirely replaced in later versions of this API. If
2302
+ # this message proves to be sufficient, it will be renamed in the alpha or beta
2303
+ # release of this API. This message (suitably marshalled into a protobuf.Any)
2304
+ # can be used as the inline_assignment field in a lease; the lease assignment
2305
+ # field should simply be `"admin"` in these cases. This message is heavily based
2306
+ # on Swarming administration tasks from the LUCI project (http://github.com/luci/
2307
+ # luci-py/appengine/swarming).
2413
2308
  class GoogleDevtoolsRemoteworkersV1test2AdminTemp
2414
2309
  include Google::Apis::Core::Hashable
2415
2310
 
@@ -2445,13 +2340,12 @@ module Google
2445
2340
  attr_accessor :contents
2446
2341
 
2447
2342
  # The CommandTask and CommandResult messages assume the existence of a service
2448
- # that can serve blobs of content, identified by a hash and size known as a
2449
- # "digest." The method by which these blobs may be retrieved is not specified
2450
- # here, but a model implementation is in the Remote Execution API's
2451
- # "ContentAddressibleStorage" interface.
2452
- # In the context of the RWAPI, a Digest will virtually always refer to the
2453
- # contents of a file or a directory. The latter is represented by the
2454
- # byte-encoded Directory message.
2343
+ # that can serve blobs of content, identified by a hash and size known as a "
2344
+ # digest." The method by which these blobs may be retrieved is not specified
2345
+ # here, but a model implementation is in the Remote Execution API's "
2346
+ # ContentAddressibleStorage" interface. In the context of the RWAPI, a Digest
2347
+ # will virtually always refer to the contents of a file or a directory. The
2348
+ # latter is represented by the byte-encoded Directory message.
2455
2349
  # Corresponds to the JSON property `digest`
2456
2350
  # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemoteworkersV1test2Digest]
2457
2351
  attr_accessor :digest
@@ -2467,27 +2361,26 @@ module Google
2467
2361
  end
2468
2362
  end
2469
2363
 
2470
- # DEPRECATED - use CommandResult instead.
2471
- # Describes the actual outputs from the task.
2364
+ # DEPRECATED - use CommandResult instead. Describes the actual outputs from the
2365
+ # task.
2472
2366
  class GoogleDevtoolsRemoteworkersV1test2CommandOutputs
2473
2367
  include Google::Apis::Core::Hashable
2474
2368
 
2475
2369
  # exit_code is only fully reliable if the status' code is OK. If the task
2476
- # exceeded its deadline or was cancelled, the process may still produce an
2477
- # exit code as it is cancelled, and this will be populated, but a successful
2478
- # (zero) is unlikely to be correct unless the status code is OK.
2370
+ # exceeded its deadline or was cancelled, the process may still produce an exit
2371
+ # code as it is cancelled, and this will be populated, but a successful (zero)
2372
+ # is unlikely to be correct unless the status code is OK.
2479
2373
  # Corresponds to the JSON property `exitCode`
2480
2374
  # @return [Fixnum]
2481
2375
  attr_accessor :exit_code
2482
2376
 
2483
2377
  # The CommandTask and CommandResult messages assume the existence of a service
2484
- # that can serve blobs of content, identified by a hash and size known as a
2485
- # "digest." The method by which these blobs may be retrieved is not specified
2486
- # here, but a model implementation is in the Remote Execution API's
2487
- # "ContentAddressibleStorage" interface.
2488
- # In the context of the RWAPI, a Digest will virtually always refer to the
2489
- # contents of a file or a directory. The latter is represented by the
2490
- # byte-encoded Directory message.
2378
+ # that can serve blobs of content, identified by a hash and size known as a "
2379
+ # digest." The method by which these blobs may be retrieved is not specified
2380
+ # here, but a model implementation is in the Remote Execution API's "
2381
+ # ContentAddressibleStorage" interface. In the context of the RWAPI, a Digest
2382
+ # will virtually always refer to the contents of a file or a directory. The
2383
+ # latter is represented by the byte-encoded Directory message.
2491
2384
  # Corresponds to the JSON property `outputs`
2492
2385
  # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemoteworkersV1test2Digest]
2493
2386
  attr_accessor :outputs
@@ -2503,9 +2396,8 @@ module Google
2503
2396
  end
2504
2397
  end
2505
2398
 
2506
- # DEPRECATED - use CommandResult instead.
2507
- # Can be used as part of CompleteRequest.metadata, or are part of a more
2508
- # sophisticated message.
2399
+ # DEPRECATED - use CommandResult instead. Can be used as part of CompleteRequest.
2400
+ # metadata, or are part of a more sophisticated message.
2509
2401
  class GoogleDevtoolsRemoteworkersV1test2CommandOverhead
2510
2402
  include Google::Apis::Core::Hashable
2511
2403
 
@@ -2516,8 +2408,8 @@ module Google
2516
2408
  # @return [String]
2517
2409
  attr_accessor :duration
2518
2410
 
2519
- # The amount of time *not* spent executing the command (ie
2520
- # uploading/downloading files).
2411
+ # The amount of time *not* spent executing the command (ie uploading/downloading
2412
+ # files).
2521
2413
  # Corresponds to the JSON property `overhead`
2522
2414
  # @return [String]
2523
2415
  attr_accessor :overhead
@@ -2545,46 +2437,44 @@ module Google
2545
2437
  # @return [String]
2546
2438
  attr_accessor :duration
2547
2439
 
2548
- # The exit code of the process. An exit code of "0" should only be trusted if
2549
- # `status` has a code of OK (otherwise it may simply be unset).
2440
+ # The exit code of the process. An exit code of "0" should only be trusted if `
2441
+ # status` has a code of OK (otherwise it may simply be unset).
2550
2442
  # Corresponds to the JSON property `exitCode`
2551
2443
  # @return [Fixnum]
2552
2444
  attr_accessor :exit_code
2553
2445
 
2554
- # Implementation-dependent metadata about the task. Both servers and bots
2555
- # may define messages which can be encoded here; bots are free to provide
2556
- # metadata in multiple formats, and servers are free to choose one or more
2557
- # of the values to process and ignore others. In particular, it is *not*
2558
- # considered an error for the bot to provide the server with a field that it
2559
- # doesn't know about.
2446
+ # Implementation-dependent metadata about the task. Both servers and bots may
2447
+ # define messages which can be encoded here; bots are free to provide metadata
2448
+ # in multiple formats, and servers are free to choose one or more of the values
2449
+ # to process and ignore others. In particular, it is *not* considered an error
2450
+ # for the bot to provide the server with a field that it doesn't know about.
2560
2451
  # Corresponds to the JSON property `metadata`
2561
2452
  # @return [Array<Hash<String,Object>>]
2562
2453
  attr_accessor :metadata
2563
2454
 
2564
2455
  # The CommandTask and CommandResult messages assume the existence of a service
2565
- # that can serve blobs of content, identified by a hash and size known as a
2566
- # "digest." The method by which these blobs may be retrieved is not specified
2567
- # here, but a model implementation is in the Remote Execution API's
2568
- # "ContentAddressibleStorage" interface.
2569
- # In the context of the RWAPI, a Digest will virtually always refer to the
2570
- # contents of a file or a directory. The latter is represented by the
2571
- # byte-encoded Directory message.
2456
+ # that can serve blobs of content, identified by a hash and size known as a "
2457
+ # digest." The method by which these blobs may be retrieved is not specified
2458
+ # here, but a model implementation is in the Remote Execution API's "
2459
+ # ContentAddressibleStorage" interface. In the context of the RWAPI, a Digest
2460
+ # will virtually always refer to the contents of a file or a directory. The
2461
+ # latter is represented by the byte-encoded Directory message.
2572
2462
  # Corresponds to the JSON property `outputs`
2573
2463
  # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemoteworkersV1test2Digest]
2574
2464
  attr_accessor :outputs
2575
2465
 
2576
- # The amount of time *not* spent executing the command (ie
2577
- # uploading/downloading files).
2466
+ # The amount of time *not* spent executing the command (ie uploading/downloading
2467
+ # files).
2578
2468
  # Corresponds to the JSON property `overhead`
2579
2469
  # @return [String]
2580
2470
  attr_accessor :overhead
2581
2471
 
2582
- # The `Status` type defines a logical error model that is suitable for
2583
- # different programming environments, including REST APIs and RPC APIs. It is
2584
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
2585
- # three pieces of data: error code, error message, and error details.
2586
- # You can find out more about this error model and how to work with it in the
2587
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
2472
+ # The `Status` type defines a logical error model that is suitable for different
2473
+ # programming environments, including REST APIs and RPC APIs. It is used by [
2474
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
2475
+ # data: error code, error message, and error details. You can find out more
2476
+ # about this error model and how to work with it in the [API Design Guide](https:
2477
+ # //cloud.google.com/apis/design/errors).
2588
2478
  # Corresponds to the JSON property `status`
2589
2479
  # @return [Google::Apis::RemotebuildexecutionV1::GoogleRpcStatus]
2590
2480
  attr_accessor :status
@@ -2640,14 +2530,13 @@ module Google
2640
2530
  class GoogleDevtoolsRemoteworkersV1test2CommandTaskInputs
2641
2531
  include Google::Apis::Core::Hashable
2642
2532
 
2643
- # The command itself to run (e.g., argv).
2644
- # This field should be passed directly to the underlying operating system,
2645
- # and so it must be sensible to that operating system. For example, on
2646
- # Windows, the first argument might be "C:\Windows\System32\ping.exe" -
2647
- # that is, using drive letters and backslashes. A command for a *nix
2648
- # system, on the other hand, would use forward slashes.
2649
- # All other fields in the RWAPI must consistently use forward slashes,
2650
- # since those fields may be interpretted by both the service and the bot.
2533
+ # The command itself to run (e.g., argv). This field should be passed directly
2534
+ # to the underlying operating system, and so it must be sensible to that
2535
+ # operating system. For example, on Windows, the first argument might be "C:\
2536
+ # Windows\System32\ping.exe" - that is, using drive letters and backslashes. A
2537
+ # command for a *nix system, on the other hand, would use forward slashes. All
2538
+ # other fields in the RWAPI must consistently use forward slashes, since those
2539
+ # fields may be interpretted by both the service and the bot.
2651
2540
  # Corresponds to the JSON property `arguments`
2652
2541
  # @return [Array<String>]
2653
2542
  attr_accessor :arguments
@@ -2657,31 +2546,29 @@ module Google
2657
2546
  # @return [Array<Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemoteworkersV1test2CommandTaskInputsEnvironmentVariable>]
2658
2547
  attr_accessor :environment_variables
2659
2548
 
2660
- # The input filesystem to be set up prior to the task beginning. The
2661
- # contents should be a repeated set of FileMetadata messages though other
2662
- # formats are allowed if better for the implementation (eg, a LUCI-style
2663
- # .isolated file).
2664
- # This field is repeated since implementations might want to cache the
2665
- # metadata, in which case it may be useful to break up portions of the
2666
- # filesystem that change frequently (eg, specific input files) from those
2667
- # that don't (eg, standard header files).
2549
+ # The input filesystem to be set up prior to the task beginning. The contents
2550
+ # should be a repeated set of FileMetadata messages though other formats are
2551
+ # allowed if better for the implementation (eg, a LUCI-style .isolated file).
2552
+ # This field is repeated since implementations might want to cache the metadata,
2553
+ # in which case it may be useful to break up portions of the filesystem that
2554
+ # change frequently (eg, specific input files) from those that don't (eg,
2555
+ # standard header files).
2668
2556
  # Corresponds to the JSON property `files`
2669
2557
  # @return [Array<Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemoteworkersV1test2Digest>]
2670
2558
  attr_accessor :files
2671
2559
 
2672
- # Inline contents for blobs expected to be needed by the bot to execute the
2673
- # task. For example, contents of entries in `files` or blobs that are
2674
- # indirectly referenced by an entry there.
2675
- # The bot should check against this list before downloading required task
2676
- # inputs to reduce the number of communications between itself and the
2677
- # remote CAS server.
2560
+ # Inline contents for blobs expected to be needed by the bot to execute the task.
2561
+ # For example, contents of entries in `files` or blobs that are indirectly
2562
+ # referenced by an entry there. The bot should check against this list before
2563
+ # downloading required task inputs to reduce the number of communications
2564
+ # between itself and the remote CAS server.
2678
2565
  # Corresponds to the JSON property `inlineBlobs`
2679
2566
  # @return [Array<Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemoteworkersV1test2Blob>]
2680
2567
  attr_accessor :inline_blobs
2681
2568
 
2682
- # Directory from which a command is executed. It is a relative directory
2683
- # with respect to the bot's working directory (i.e., "./"). If it is
2684
- # non-empty, then it must exist under "./". Otherwise, "./" will be used.
2569
+ # Directory from which a command is executed. It is a relative directory with
2570
+ # respect to the bot's working directory (i.e., "./"). If it is non-empty, then
2571
+ # it must exist under "./". Otherwise, "./" will be used.
2685
2572
  # Corresponds to the JSON property `workingDirectory`
2686
2573
  # @return [String]
2687
2574
  attr_accessor :working_directory
@@ -2729,32 +2616,32 @@ module Google
2729
2616
  class GoogleDevtoolsRemoteworkersV1test2CommandTaskOutputs
2730
2617
  include Google::Apis::Core::Hashable
2731
2618
 
2732
- # A list of expected directories, relative to the execution root. All paths
2733
- # MUST be delimited by forward slashes.
2619
+ # A list of expected directories, relative to the execution root. All paths MUST
2620
+ # be delimited by forward slashes.
2734
2621
  # Corresponds to the JSON property `directories`
2735
2622
  # @return [Array<String>]
2736
2623
  attr_accessor :directories
2737
2624
 
2738
- # A list of expected files, relative to the execution root. All paths
2739
- # MUST be delimited by forward slashes.
2625
+ # A list of expected files, relative to the execution root. All paths MUST be
2626
+ # delimited by forward slashes.
2740
2627
  # Corresponds to the JSON property `files`
2741
2628
  # @return [Array<String>]
2742
2629
  attr_accessor :files
2743
2630
 
2744
- # The destination to which any stderr should be sent. The method by which
2745
- # the bot should send the stream contents to that destination is not
2746
- # defined in this API. As examples, the destination could be a file
2747
- # referenced in the `files` field in this message, or it could be a URI
2748
- # that must be written via the ByteStream API.
2631
+ # The destination to which any stderr should be sent. The method by which the
2632
+ # bot should send the stream contents to that destination is not defined in this
2633
+ # API. As examples, the destination could be a file referenced in the `files`
2634
+ # field in this message, or it could be a URI that must be written via the
2635
+ # ByteStream API.
2749
2636
  # Corresponds to the JSON property `stderrDestination`
2750
2637
  # @return [String]
2751
2638
  attr_accessor :stderr_destination
2752
2639
 
2753
- # The destination to which any stdout should be sent. The method by which
2754
- # the bot should send the stream contents to that destination is not
2755
- # defined in this API. As examples, the destination could be a file
2756
- # referenced in the `files` field in this message, or it could be a URI
2757
- # that must be written via the ByteStream API.
2640
+ # The destination to which any stdout should be sent. The method by which the
2641
+ # bot should send the stream contents to that destination is not defined in this
2642
+ # API. As examples, the destination could be a file referenced in the `files`
2643
+ # field in this message, or it could be a URI that must be written via the
2644
+ # ByteStream API.
2758
2645
  # Corresponds to the JSON property `stdoutDestination`
2759
2646
  # @return [String]
2760
2647
  attr_accessor :stdout_destination
@@ -2776,27 +2663,26 @@ module Google
2776
2663
  class GoogleDevtoolsRemoteworkersV1test2CommandTaskTimeouts
2777
2664
  include Google::Apis::Core::Hashable
2778
2665
 
2779
- # This specifies the maximum time that the task can run, excluding the
2780
- # time required to download inputs or upload outputs. That is, the worker
2781
- # will terminate the task if it runs longer than this.
2666
+ # This specifies the maximum time that the task can run, excluding the time
2667
+ # required to download inputs or upload outputs. That is, the worker will
2668
+ # terminate the task if it runs longer than this.
2782
2669
  # Corresponds to the JSON property `execution`
2783
2670
  # @return [String]
2784
2671
  attr_accessor :execution
2785
2672
 
2786
- # This specifies the maximum amount of time the task can be idle - that is,
2787
- # go without generating some output in either stdout or stderr. If the
2788
- # process is silent for more than the specified time, the worker will
2789
- # terminate the task.
2673
+ # This specifies the maximum amount of time the task can be idle - that is, go
2674
+ # without generating some output in either stdout or stderr. If the process is
2675
+ # silent for more than the specified time, the worker will terminate the task.
2790
2676
  # Corresponds to the JSON property `idle`
2791
2677
  # @return [String]
2792
2678
  attr_accessor :idle
2793
2679
 
2794
2680
  # If the execution or IO timeouts are exceeded, the worker will try to
2795
- # gracefully terminate the task and return any existing logs. However,
2796
- # tasks may be hard-frozen in which case this process will fail. This
2797
- # timeout specifies how long to wait for a terminated task to shut down
2798
- # gracefully (e.g. via SIGTERM) before we bring down the hammer (e.g.
2799
- # SIGKILL on *nix, CTRL_BREAK_EVENT on Windows).
2681
+ # gracefully terminate the task and return any existing logs. However, tasks may
2682
+ # be hard-frozen in which case this process will fail. This timeout specifies
2683
+ # how long to wait for a terminated task to shut down gracefully (e.g. via
2684
+ # SIGTERM) before we bring down the hammer (e.g. SIGKILL on *nix,
2685
+ # CTRL_BREAK_EVENT on Windows).
2800
2686
  # Corresponds to the JSON property `shutdown`
2801
2687
  # @return [String]
2802
2688
  attr_accessor :shutdown
@@ -2814,13 +2700,12 @@ module Google
2814
2700
  end
2815
2701
 
2816
2702
  # The CommandTask and CommandResult messages assume the existence of a service
2817
- # that can serve blobs of content, identified by a hash and size known as a
2818
- # "digest." The method by which these blobs may be retrieved is not specified
2819
- # here, but a model implementation is in the Remote Execution API's
2820
- # "ContentAddressibleStorage" interface.
2821
- # In the context of the RWAPI, a Digest will virtually always refer to the
2822
- # contents of a file or a directory. The latter is represented by the
2823
- # byte-encoded Directory message.
2703
+ # that can serve blobs of content, identified by a hash and size known as a "
2704
+ # digest." The method by which these blobs may be retrieved is not specified
2705
+ # here, but a model implementation is in the Remote Execution API's "
2706
+ # ContentAddressibleStorage" interface. In the context of the RWAPI, a Digest
2707
+ # will virtually always refer to the contents of a file or a directory. The
2708
+ # latter is represented by the byte-encoded Directory message.
2824
2709
  class GoogleDevtoolsRemoteworkersV1test2Digest
2825
2710
  include Google::Apis::Core::Hashable
2826
2711
 
@@ -2831,9 +2716,9 @@ module Google
2831
2716
  attr_accessor :hash_prop
2832
2717
 
2833
2718
  # The size of the contents. While this is not strictly required as part of an
2834
- # identifier (after all, any given hash will have exactly one canonical
2835
- # size), it's useful in almost all cases when one might want to send or
2836
- # retrieve blobs of content and is included here for this reason.
2719
+ # identifier (after all, any given hash will have exactly one canonical size),
2720
+ # it's useful in almost all cases when one might want to send or retrieve blobs
2721
+ # of content and is included here for this reason.
2837
2722
  # Corresponds to the JSON property `sizeBytes`
2838
2723
  # @return [Fixnum]
2839
2724
  attr_accessor :size_bytes
@@ -2881,13 +2766,12 @@ module Google
2881
2766
  include Google::Apis::Core::Hashable
2882
2767
 
2883
2768
  # The CommandTask and CommandResult messages assume the existence of a service
2884
- # that can serve blobs of content, identified by a hash and size known as a
2885
- # "digest." The method by which these blobs may be retrieved is not specified
2886
- # here, but a model implementation is in the Remote Execution API's
2887
- # "ContentAddressibleStorage" interface.
2888
- # In the context of the RWAPI, a Digest will virtually always refer to the
2889
- # contents of a file or a directory. The latter is represented by the
2890
- # byte-encoded Directory message.
2769
+ # that can serve blobs of content, identified by a hash and size known as a "
2770
+ # digest." The method by which these blobs may be retrieved is not specified
2771
+ # here, but a model implementation is in the Remote Execution API's "
2772
+ # ContentAddressibleStorage" interface. In the context of the RWAPI, a Digest
2773
+ # will virtually always refer to the contents of a file or a directory. The
2774
+ # latter is represented by the byte-encoded Directory message.
2891
2775
  # Corresponds to the JSON property `digest`
2892
2776
  # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemoteworkersV1test2Digest]
2893
2777
  attr_accessor :digest
@@ -2913,21 +2797,20 @@ module Google
2913
2797
  class GoogleDevtoolsRemoteworkersV1test2FileMetadata
2914
2798
  include Google::Apis::Core::Hashable
2915
2799
 
2916
- # If the file is small enough, its contents may also or alternatively be
2917
- # listed here.
2800
+ # If the file is small enough, its contents may also or alternatively be listed
2801
+ # here.
2918
2802
  # Corresponds to the JSON property `contents`
2919
2803
  # NOTE: Values are automatically base64 encoded/decoded in the client library.
2920
2804
  # @return [String]
2921
2805
  attr_accessor :contents
2922
2806
 
2923
2807
  # The CommandTask and CommandResult messages assume the existence of a service
2924
- # that can serve blobs of content, identified by a hash and size known as a
2925
- # "digest." The method by which these blobs may be retrieved is not specified
2926
- # here, but a model implementation is in the Remote Execution API's
2927
- # "ContentAddressibleStorage" interface.
2928
- # In the context of the RWAPI, a Digest will virtually always refer to the
2929
- # contents of a file or a directory. The latter is represented by the
2930
- # byte-encoded Directory message.
2808
+ # that can serve blobs of content, identified by a hash and size known as a "
2809
+ # digest." The method by which these blobs may be retrieved is not specified
2810
+ # here, but a model implementation is in the Remote Execution API's "
2811
+ # ContentAddressibleStorage" interface. In the context of the RWAPI, a Digest
2812
+ # will virtually always refer to the contents of a file or a directory. The
2813
+ # latter is represented by the byte-encoded Directory message.
2931
2814
  # Corresponds to the JSON property `digest`
2932
2815
  # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemoteworkersV1test2Digest]
2933
2816
  attr_accessor :digest
@@ -2938,11 +2821,11 @@ module Google
2938
2821
  attr_accessor :is_executable
2939
2822
  alias_method :is_executable?, :is_executable
2940
2823
 
2941
- # The path of this file. If this message is part of the
2942
- # CommandOutputs.outputs fields, the path is relative to the execution root
2943
- # and must correspond to an entry in CommandTask.outputs.files. If this
2944
- # message is part of a Directory message, then the path is relative to the
2945
- # root of that directory. All paths MUST be delimited by forward slashes.
2824
+ # The path of this file. If this message is part of the CommandOutputs.outputs
2825
+ # fields, the path is relative to the execution root and must correspond to an
2826
+ # entry in CommandTask.outputs.files. If this message is part of a Directory
2827
+ # message, then the path is relative to the root of that directory. All paths
2828
+ # MUST be delimited by forward slashes.
2946
2829
  # Corresponds to the JSON property `path`
2947
2830
  # @return [String]
2948
2831
  attr_accessor :path
@@ -3003,47 +2886,45 @@ module Google
3003
2886
  class GoogleLongrunningOperation
3004
2887
  include Google::Apis::Core::Hashable
3005
2888
 
3006
- # If the value is `false`, it means the operation is still in progress.
3007
- # If `true`, the operation is completed, and either `error` or `response` is
3008
- # available.
2889
+ # If the value is `false`, it means the operation is still in progress. If `true`
2890
+ # , the operation is completed, and either `error` or `response` is available.
3009
2891
  # Corresponds to the JSON property `done`
3010
2892
  # @return [Boolean]
3011
2893
  attr_accessor :done
3012
2894
  alias_method :done?, :done
3013
2895
 
3014
- # The `Status` type defines a logical error model that is suitable for
3015
- # different programming environments, including REST APIs and RPC APIs. It is
3016
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
3017
- # three pieces of data: error code, error message, and error details.
3018
- # You can find out more about this error model and how to work with it in the
3019
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
2896
+ # The `Status` type defines a logical error model that is suitable for different
2897
+ # programming environments, including REST APIs and RPC APIs. It is used by [
2898
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
2899
+ # data: error code, error message, and error details. You can find out more
2900
+ # about this error model and how to work with it in the [API Design Guide](https:
2901
+ # //cloud.google.com/apis/design/errors).
3020
2902
  # Corresponds to the JSON property `error`
3021
2903
  # @return [Google::Apis::RemotebuildexecutionV1::GoogleRpcStatus]
3022
2904
  attr_accessor :error
3023
2905
 
3024
- # Service-specific metadata associated with the operation. It typically
3025
- # contains progress information and common metadata such as create time.
3026
- # Some services might not provide such metadata. Any method that returns a
3027
- # long-running operation should document the metadata type, if any.
2906
+ # Service-specific metadata associated with the operation. It typically contains
2907
+ # progress information and common metadata such as create time. Some services
2908
+ # might not provide such metadata. Any method that returns a long-running
2909
+ # operation should document the metadata type, if any.
3028
2910
  # Corresponds to the JSON property `metadata`
3029
2911
  # @return [Hash<String,Object>]
3030
2912
  attr_accessor :metadata
3031
2913
 
3032
2914
  # The server-assigned name, which is only unique within the same service that
3033
- # originally returns it. If you use the default HTTP mapping, the
3034
- # `name` should be a resource name ending with `operations/`unique_id``.
2915
+ # originally returns it. If you use the default HTTP mapping, the `name` should
2916
+ # be a resource name ending with `operations/`unique_id``.
3035
2917
  # Corresponds to the JSON property `name`
3036
2918
  # @return [String]
3037
2919
  attr_accessor :name
3038
2920
 
3039
- # The normal response of the operation in case of success. If the original
3040
- # method returns no data on success, such as `Delete`, the response is
3041
- # `google.protobuf.Empty`. If the original method is standard
3042
- # `Get`/`Create`/`Update`, the response should be the resource. For other
3043
- # methods, the response should have the type `XxxResponse`, where `Xxx`
3044
- # is the original method name. For example, if the original method name
3045
- # is `TakeSnapshot()`, the inferred response type is
3046
- # `TakeSnapshotResponse`.
2921
+ # The normal response of the operation in case of success. If the original
2922
+ # method returns no data on success, such as `Delete`, the response is `google.
2923
+ # protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`,
2924
+ # the response should be the resource. For other methods, the response should
2925
+ # have the type `XxxResponse`, where `Xxx` is the original method name. For
2926
+ # example, if the original method name is `TakeSnapshot()`, the inferred
2927
+ # response type is `TakeSnapshotResponse`.
3047
2928
  # Corresponds to the JSON property `response`
3048
2929
  # @return [Hash<String,Object>]
3049
2930
  attr_accessor :response
@@ -3062,13 +2943,11 @@ module Google
3062
2943
  end
3063
2944
  end
3064
2945
 
3065
- # A generic empty message that you can re-use to avoid defining duplicated
3066
- # empty messages in your APIs. A typical example is to use it as the request
3067
- # or the response type of an API method. For instance:
3068
- # service Foo `
3069
- # rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
3070
- # `
3071
- # The JSON representation for `Empty` is empty JSON object ````.
2946
+ # A generic empty message that you can re-use to avoid defining duplicated empty
2947
+ # messages in your APIs. A typical example is to use it as the request or the
2948
+ # response type of an API method. For instance: service Foo ` rpc Bar(google.
2949
+ # protobuf.Empty) returns (google.protobuf.Empty); ` The JSON representation for
2950
+ # `Empty` is empty JSON object ````.
3072
2951
  class GoogleProtobufEmpty
3073
2952
  include Google::Apis::Core::Hashable
3074
2953
 
@@ -3081,12 +2960,12 @@ module Google
3081
2960
  end
3082
2961
  end
3083
2962
 
3084
- # The `Status` type defines a logical error model that is suitable for
3085
- # different programming environments, including REST APIs and RPC APIs. It is
3086
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
3087
- # three pieces of data: error code, error message, and error details.
3088
- # You can find out more about this error model and how to work with it in the
3089
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
2963
+ # The `Status` type defines a logical error model that is suitable for different
2964
+ # programming environments, including REST APIs and RPC APIs. It is used by [
2965
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
2966
+ # data: error code, error message, and error details. You can find out more
2967
+ # about this error model and how to work with it in the [API Design Guide](https:
2968
+ # //cloud.google.com/apis/design/errors).
3090
2969
  class GoogleRpcStatus
3091
2970
  include Google::Apis::Core::Hashable
3092
2971
 
@@ -3095,15 +2974,15 @@ module Google
3095
2974
  # @return [Fixnum]
3096
2975
  attr_accessor :code
3097
2976
 
3098
- # A list of messages that carry the error details. There is a common set of
2977
+ # A list of messages that carry the error details. There is a common set of
3099
2978
  # message types for APIs to use.
3100
2979
  # Corresponds to the JSON property `details`
3101
2980
  # @return [Array<Hash<String,Object>>]
3102
2981
  attr_accessor :details
3103
2982
 
3104
- # A developer-facing error message, which should be in English. Any
3105
- # user-facing error message should be localized and sent in the
3106
- # google.rpc.Status.details field, or localized by the client.
2983
+ # A developer-facing error message, which should be in English. Any user-facing
2984
+ # error message should be localized and sent in the google.rpc.Status.details
2985
+ # field, or localized by the client.
3107
2986
  # Corresponds to the JSON property `message`
3108
2987
  # @return [String]
3109
2988
  attr_accessor :message