google-api-client 0.43.0 → 0.45.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (863) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +462 -0
  3. data/api_names.yaml +1 -0
  4. data/docs/oauth-server.md +4 -6
  5. data/generated/google/apis/acceleratedmobilepageurl_v1.rb +1 -1
  6. data/generated/google/apis/acceleratedmobilepageurl_v1/classes.rb +7 -9
  7. data/generated/google/apis/acceleratedmobilepageurl_v1/service.rb +2 -2
  8. data/generated/google/apis/accessapproval_v1.rb +1 -1
  9. data/generated/google/apis/accessapproval_v1/classes.rb +53 -86
  10. data/generated/google/apis/accessapproval_v1/service.rb +93 -132
  11. data/generated/google/apis/accesscontextmanager_v1.rb +1 -1
  12. data/generated/google/apis/accesscontextmanager_v1/classes.rb +198 -236
  13. data/generated/google/apis/accesscontextmanager_v1/service.rb +128 -171
  14. data/generated/google/apis/accesscontextmanager_v1beta.rb +1 -1
  15. data/generated/google/apis/accesscontextmanager_v1beta/classes.rb +153 -184
  16. data/generated/google/apis/accesscontextmanager_v1beta/service.rb +82 -111
  17. data/generated/google/apis/adexchangebuyer2_v2beta1.rb +1 -1
  18. data/generated/google/apis/adexchangebuyer2_v2beta1/classes.rb +17 -6
  19. data/generated/google/apis/adexchangebuyer2_v2beta1/representations.rb +1 -0
  20. data/generated/google/apis/adexchangebuyer_v1_4.rb +1 -1
  21. data/generated/google/apis/adexchangebuyer_v1_4/classes.rb +47 -2
  22. data/generated/google/apis/adexchangebuyer_v1_4/representations.rb +18 -0
  23. data/generated/google/apis/adexperiencereport_v1.rb +1 -1
  24. data/generated/google/apis/admin_datatransfer_v1.rb +6 -4
  25. data/generated/google/apis/admin_datatransfer_v1/classes.rb +16 -4
  26. data/generated/google/apis/admin_datatransfer_v1/service.rb +30 -48
  27. data/generated/google/apis/admin_directory_v1.rb +6 -8
  28. data/generated/google/apis/admin_directory_v1/classes.rb +209 -242
  29. data/generated/google/apis/admin_directory_v1/representations.rb +0 -39
  30. data/generated/google/apis/admin_directory_v1/service.rb +535 -998
  31. data/generated/google/apis/admin_reports_v1.rb +6 -5
  32. data/generated/google/apis/admin_reports_v1/classes.rb +31 -33
  33. data/generated/google/apis/admin_reports_v1/service.rb +131 -187
  34. data/generated/google/apis/admob_v1.rb +4 -1
  35. data/generated/google/apis/admob_v1/classes.rb +2 -2
  36. data/generated/google/apis/alertcenter_v1beta1.rb +1 -1
  37. data/generated/google/apis/analyticsadmin_v1alpha.rb +43 -0
  38. data/generated/google/apis/analyticsadmin_v1alpha/classes.rb +1361 -0
  39. data/generated/google/apis/analyticsadmin_v1alpha/representations.rb +610 -0
  40. data/generated/google/apis/analyticsadmin_v1alpha/service.rb +2135 -0
  41. data/generated/google/apis/analyticsdata_v1alpha.rb +37 -0
  42. data/generated/google/apis/analyticsdata_v1alpha/classes.rb +1610 -0
  43. data/generated/google/apis/analyticsdata_v1alpha/representations.rb +789 -0
  44. data/generated/google/apis/analyticsdata_v1alpha/service.rb +220 -0
  45. data/generated/google/apis/analyticsreporting_v4.rb +1 -1
  46. data/generated/google/apis/androidenterprise_v1.rb +1 -1
  47. data/generated/google/apis/androidenterprise_v1/service.rb +2 -2
  48. data/generated/google/apis/androidmanagement_v1.rb +1 -1
  49. data/generated/google/apis/androidmanagement_v1/classes.rb +98 -61
  50. data/generated/google/apis/androidmanagement_v1/representations.rb +17 -0
  51. data/generated/google/apis/androidpublisher_v3.rb +1 -1
  52. data/generated/google/apis/androidpublisher_v3/service.rb +2 -2
  53. data/generated/google/apis/apigateway_v1alpha1.rb +34 -0
  54. data/generated/google/apis/apigateway_v1alpha1/classes.rb +633 -0
  55. data/generated/google/apis/apigateway_v1alpha1/representations.rb +250 -0
  56. data/generated/google/apis/apigateway_v1alpha1/service.rb +623 -0
  57. data/generated/google/apis/apigateway_v1alpha2.rb +34 -0
  58. data/generated/google/apis/apigateway_v1alpha2/classes.rb +633 -0
  59. data/generated/google/apis/apigateway_v1alpha2/representations.rb +250 -0
  60. data/generated/google/apis/apigateway_v1alpha2/service.rb +623 -0
  61. data/generated/google/apis/apigateway_v1beta.rb +34 -0
  62. data/generated/google/apis/apigateway_v1beta/classes.rb +1162 -0
  63. data/generated/google/apis/{memcache_v1 → apigateway_v1beta}/representations.rb +156 -157
  64. data/generated/google/apis/apigateway_v1beta/service.rb +1172 -0
  65. data/generated/google/apis/apigee_v1.rb +6 -7
  66. data/generated/google/apis/apigee_v1/classes.rb +432 -82
  67. data/generated/google/apis/apigee_v1/representations.rb +139 -1
  68. data/generated/google/apis/apigee_v1/service.rb +158 -41
  69. data/generated/google/apis/appengine_v1.rb +1 -1
  70. data/generated/google/apis/appengine_v1/classes.rb +96 -59
  71. data/generated/google/apis/appengine_v1/representations.rb +17 -0
  72. data/generated/google/apis/appengine_v1/service.rb +38 -47
  73. data/generated/google/apis/appengine_v1alpha.rb +1 -1
  74. data/generated/google/apis/appengine_v1alpha/classes.rb +9 -11
  75. data/generated/google/apis/appengine_v1beta.rb +1 -1
  76. data/generated/google/apis/appengine_v1beta/classes.rb +103 -59
  77. data/generated/google/apis/appengine_v1beta/representations.rb +18 -0
  78. data/generated/google/apis/appengine_v1beta/service.rb +37 -47
  79. data/generated/google/apis/appsmarket_v2.rb +1 -1
  80. data/generated/google/apis/appsmarket_v2/classes.rb +14 -16
  81. data/generated/google/apis/artifactregistry_v1beta1.rb +1 -1
  82. data/generated/google/apis/artifactregistry_v1beta1/classes.rb +242 -337
  83. data/generated/google/apis/artifactregistry_v1beta1/representations.rb +1 -0
  84. data/generated/google/apis/artifactregistry_v1beta1/service.rb +44 -57
  85. data/generated/google/apis/{accessapproval_v1beta1.rb → assuredworkloads_v1beta1.rb} +8 -8
  86. data/generated/google/apis/assuredworkloads_v1beta1/classes.rb +458 -0
  87. data/generated/google/apis/assuredworkloads_v1beta1/representations.rb +230 -0
  88. data/generated/google/apis/assuredworkloads_v1beta1/service.rb +328 -0
  89. data/generated/google/apis/bigquery_v2.rb +1 -1
  90. data/generated/google/apis/bigquery_v2/classes.rb +403 -553
  91. data/generated/google/apis/bigquery_v2/representations.rb +17 -0
  92. data/generated/google/apis/bigquery_v2/service.rb +32 -40
  93. data/generated/google/apis/bigqueryconnection_v1beta1.rb +1 -1
  94. data/generated/google/apis/bigqueryconnection_v1beta1/classes.rb +192 -337
  95. data/generated/google/apis/bigqueryconnection_v1beta1/service.rb +29 -32
  96. data/generated/google/apis/bigquerydatatransfer_v1.rb +1 -1
  97. data/generated/google/apis/bigquerydatatransfer_v1/classes.rb +132 -158
  98. data/generated/google/apis/bigquerydatatransfer_v1/service.rb +232 -282
  99. data/generated/google/apis/bigqueryreservation_v1.rb +1 -1
  100. data/generated/google/apis/bigqueryreservation_v1/classes.rb +116 -123
  101. data/generated/google/apis/bigqueryreservation_v1/representations.rb +2 -0
  102. data/generated/google/apis/bigqueryreservation_v1/service.rb +137 -183
  103. data/generated/google/apis/bigqueryreservation_v1alpha2.rb +1 -1
  104. data/generated/google/apis/bigqueryreservation_v1alpha2/classes.rb +88 -100
  105. data/generated/google/apis/bigqueryreservation_v1alpha2/service.rb +77 -100
  106. data/generated/google/apis/bigqueryreservation_v1beta1.rb +1 -1
  107. data/generated/google/apis/bigqueryreservation_v1beta1/classes.rb +93 -98
  108. data/generated/google/apis/bigqueryreservation_v1beta1/representations.rb +2 -0
  109. data/generated/google/apis/bigqueryreservation_v1beta1/service.rb +114 -151
  110. data/generated/google/apis/bigtableadmin_v1.rb +1 -1
  111. data/generated/google/apis/bigtableadmin_v1/classes.rb +137 -119
  112. data/generated/google/apis/bigtableadmin_v1/representations.rb +29 -0
  113. data/generated/google/apis/bigtableadmin_v2.rb +1 -1
  114. data/generated/google/apis/bigtableadmin_v2/classes.rb +455 -607
  115. data/generated/google/apis/bigtableadmin_v2/representations.rb +29 -0
  116. data/generated/google/apis/bigtableadmin_v2/service.rb +194 -247
  117. data/generated/google/apis/billingbudgets_v1beta1.rb +4 -1
  118. data/generated/google/apis/billingbudgets_v1beta1/classes.rb +15 -5
  119. data/generated/google/apis/billingbudgets_v1beta1/representations.rb +1 -0
  120. data/generated/google/apis/binaryauthorization_v1.rb +1 -1
  121. data/generated/google/apis/binaryauthorization_v1/classes.rb +433 -354
  122. data/generated/google/apis/binaryauthorization_v1/representations.rb +75 -0
  123. data/generated/google/apis/binaryauthorization_v1/service.rb +109 -89
  124. data/generated/google/apis/binaryauthorization_v1beta1.rb +1 -1
  125. data/generated/google/apis/binaryauthorization_v1beta1/classes.rb +433 -354
  126. data/generated/google/apis/binaryauthorization_v1beta1/representations.rb +75 -0
  127. data/generated/google/apis/binaryauthorization_v1beta1/service.rb +109 -89
  128. data/generated/google/apis/calendar_v3.rb +1 -1
  129. data/generated/google/apis/calendar_v3/classes.rb +13 -10
  130. data/generated/google/apis/chat_v1.rb +1 -1
  131. data/generated/google/apis/chat_v1/classes.rb +165 -116
  132. data/generated/google/apis/chat_v1/representations.rb +35 -0
  133. data/generated/google/apis/chat_v1/service.rb +30 -42
  134. data/generated/google/apis/civicinfo_v2.rb +1 -1
  135. data/generated/google/apis/civicinfo_v2/classes.rb +18 -8
  136. data/generated/google/apis/civicinfo_v2/representations.rb +2 -0
  137. data/generated/google/apis/classroom_v1.rb +7 -1
  138. data/generated/google/apis/classroom_v1/classes.rb +132 -0
  139. data/generated/google/apis/classroom_v1/representations.rb +43 -0
  140. data/generated/google/apis/classroom_v1/service.rb +240 -0
  141. data/generated/google/apis/cloudasset_v1.rb +1 -1
  142. data/generated/google/apis/cloudasset_v1/classes.rb +764 -1039
  143. data/generated/google/apis/cloudasset_v1/representations.rb +16 -0
  144. data/generated/google/apis/cloudasset_v1/service.rb +125 -167
  145. data/generated/google/apis/cloudasset_v1beta1.rb +1 -1
  146. data/generated/google/apis/cloudasset_v1beta1/classes.rb +531 -777
  147. data/generated/google/apis/cloudasset_v1beta1/service.rb +59 -75
  148. data/generated/google/apis/cloudasset_v1p1beta1.rb +1 -1
  149. data/generated/google/apis/cloudasset_v1p1beta1/classes.rb +204 -349
  150. data/generated/google/apis/cloudasset_v1p1beta1/service.rb +35 -47
  151. data/generated/google/apis/cloudasset_v1p4beta1.rb +1 -1
  152. data/generated/google/apis/cloudasset_v1p4beta1/classes.rb +222 -276
  153. data/generated/google/apis/cloudasset_v1p4beta1/service.rb +75 -93
  154. data/generated/google/apis/cloudasset_v1p5beta1.rb +1 -1
  155. data/generated/google/apis/cloudasset_v1p5beta1/classes.rb +481 -720
  156. data/generated/google/apis/cloudasset_v1p5beta1/service.rb +25 -28
  157. data/generated/google/apis/cloudbilling_v1.rb +7 -1
  158. data/generated/google/apis/cloudbilling_v1/classes.rb +284 -445
  159. data/generated/google/apis/cloudbilling_v1/service.rb +104 -124
  160. data/generated/google/apis/cloudbuild_v1.rb +1 -1
  161. data/generated/google/apis/cloudbuild_v1/classes.rb +337 -343
  162. data/generated/google/apis/cloudbuild_v1/representations.rb +8 -0
  163. data/generated/google/apis/cloudbuild_v1/service.rb +268 -66
  164. data/generated/google/apis/cloudbuild_v1alpha1.rb +1 -1
  165. data/generated/google/apis/cloudbuild_v1alpha1/classes.rb +291 -329
  166. data/generated/google/apis/cloudbuild_v1alpha1/representations.rb +2 -0
  167. data/generated/google/apis/cloudbuild_v1alpha1/service.rb +15 -18
  168. data/generated/google/apis/cloudbuild_v1alpha2.rb +1 -1
  169. data/generated/google/apis/cloudbuild_v1alpha2/classes.rb +277 -313
  170. data/generated/google/apis/cloudbuild_v1alpha2/representations.rb +2 -0
  171. data/generated/google/apis/cloudbuild_v1alpha2/service.rb +22 -28
  172. data/generated/google/apis/clouddebugger_v2.rb +1 -1
  173. data/generated/google/apis/clouddebugger_v2/classes.rb +185 -252
  174. data/generated/google/apis/clouddebugger_v2/service.rb +53 -59
  175. data/generated/google/apis/clouderrorreporting_v1beta1.rb +1 -1
  176. data/generated/google/apis/clouderrorreporting_v1beta1/classes.rb +127 -156
  177. data/generated/google/apis/clouderrorreporting_v1beta1/service.rb +53 -69
  178. data/generated/google/apis/cloudfunctions_v1.rb +1 -1
  179. data/generated/google/apis/cloudfunctions_v1/classes.rb +335 -494
  180. data/generated/google/apis/cloudfunctions_v1/representations.rb +1 -0
  181. data/generated/google/apis/cloudfunctions_v1/service.rb +79 -93
  182. data/generated/google/apis/cloudidentity_v1.rb +4 -1
  183. data/generated/google/apis/cloudidentity_v1/classes.rb +943 -75
  184. data/generated/google/apis/cloudidentity_v1/representations.rb +371 -0
  185. data/generated/google/apis/cloudidentity_v1/service.rb +841 -62
  186. data/generated/google/apis/cloudidentity_v1beta1.rb +4 -1
  187. data/generated/google/apis/cloudidentity_v1beta1/classes.rb +1233 -307
  188. data/generated/google/apis/cloudidentity_v1beta1/representations.rb +424 -21
  189. data/generated/google/apis/cloudidentity_v1beta1/service.rb +906 -96
  190. data/generated/google/apis/cloudiot_v1.rb +1 -1
  191. data/generated/google/apis/cloudiot_v1/classes.rb +270 -373
  192. data/generated/google/apis/cloudiot_v1/representations.rb +1 -0
  193. data/generated/google/apis/cloudiot_v1/service.rb +147 -154
  194. data/generated/google/apis/cloudkms_v1.rb +1 -1
  195. data/generated/google/apis/cloudkms_v1/classes.rb +509 -692
  196. data/generated/google/apis/cloudkms_v1/representations.rb +18 -0
  197. data/generated/google/apis/cloudkms_v1/service.rb +170 -216
  198. data/generated/google/apis/cloudprofiler_v2.rb +1 -1
  199. data/generated/google/apis/cloudprofiler_v2/classes.rb +28 -33
  200. data/generated/google/apis/cloudprofiler_v2/service.rb +17 -19
  201. data/generated/google/apis/cloudresourcemanager_v1.rb +1 -1
  202. data/generated/google/apis/cloudresourcemanager_v1/classes.rb +14 -7
  203. data/generated/google/apis/cloudresourcemanager_v1/representations.rb +1 -0
  204. data/generated/google/apis/cloudresourcemanager_v1/service.rb +62 -60
  205. data/generated/google/apis/cloudresourcemanager_v1beta1.rb +1 -1
  206. data/generated/google/apis/cloudresourcemanager_v1beta1/classes.rb +11 -4
  207. data/generated/google/apis/cloudresourcemanager_v1beta1/representations.rb +1 -0
  208. data/generated/google/apis/cloudresourcemanager_v1beta1/service.rb +26 -25
  209. data/generated/google/apis/cloudresourcemanager_v2.rb +1 -1
  210. data/generated/google/apis/cloudresourcemanager_v2/classes.rb +7 -0
  211. data/generated/google/apis/cloudresourcemanager_v2/representations.rb +1 -0
  212. data/generated/google/apis/cloudresourcemanager_v2beta1.rb +1 -1
  213. data/generated/google/apis/cloudresourcemanager_v2beta1/classes.rb +7 -0
  214. data/generated/google/apis/cloudresourcemanager_v2beta1/representations.rb +1 -0
  215. data/generated/google/apis/cloudscheduler_v1.rb +1 -1
  216. data/generated/google/apis/cloudscheduler_v1/classes.rb +272 -383
  217. data/generated/google/apis/cloudscheduler_v1/service.rb +45 -62
  218. data/generated/google/apis/cloudscheduler_v1beta1.rb +1 -1
  219. data/generated/google/apis/cloudscheduler_v1beta1/classes.rb +273 -384
  220. data/generated/google/apis/cloudscheduler_v1beta1/service.rb +45 -62
  221. data/generated/google/apis/cloudsearch_v1.rb +2 -2
  222. data/generated/google/apis/cloudsearch_v1/classes.rb +650 -781
  223. data/generated/google/apis/cloudsearch_v1/representations.rb +15 -0
  224. data/generated/google/apis/cloudsearch_v1/service.rb +286 -326
  225. data/generated/google/apis/cloudshell_v1.rb +1 -1
  226. data/generated/google/apis/cloudshell_v1/classes.rb +36 -227
  227. data/generated/google/apis/cloudshell_v1/representations.rb +0 -67
  228. data/generated/google/apis/cloudshell_v1/service.rb +21 -25
  229. data/generated/google/apis/cloudshell_v1alpha1.rb +1 -1
  230. data/generated/google/apis/cloudshell_v1alpha1/classes.rb +69 -78
  231. data/generated/google/apis/cloudshell_v1alpha1/service.rb +20 -24
  232. data/generated/google/apis/cloudtasks_v2.rb +1 -1
  233. data/generated/google/apis/cloudtasks_v2/classes.rb +612 -933
  234. data/generated/google/apis/cloudtasks_v2/representations.rb +1 -0
  235. data/generated/google/apis/cloudtasks_v2/service.rb +146 -217
  236. data/generated/google/apis/cloudtasks_v2beta2.rb +1 -1
  237. data/generated/google/apis/cloudtasks_v2beta2/classes.rb +608 -964
  238. data/generated/google/apis/cloudtasks_v2beta2/representations.rb +1 -0
  239. data/generated/google/apis/cloudtasks_v2beta2/service.rb +178 -270
  240. data/generated/google/apis/cloudtasks_v2beta3.rb +1 -1
  241. data/generated/google/apis/cloudtasks_v2beta3/classes.rb +616 -938
  242. data/generated/google/apis/cloudtasks_v2beta3/representations.rb +1 -0
  243. data/generated/google/apis/cloudtasks_v2beta3/service.rb +146 -217
  244. data/generated/google/apis/cloudtrace_v1.rb +1 -1
  245. data/generated/google/apis/cloudtrace_v1/classes.rb +39 -61
  246. data/generated/google/apis/cloudtrace_v1/service.rb +37 -51
  247. data/generated/google/apis/cloudtrace_v2.rb +1 -1
  248. data/generated/google/apis/cloudtrace_v2/classes.rb +92 -107
  249. data/generated/google/apis/cloudtrace_v2/service.rb +8 -11
  250. data/generated/google/apis/cloudtrace_v2beta1.rb +1 -1
  251. data/generated/google/apis/cloudtrace_v2beta1/classes.rb +23 -33
  252. data/generated/google/apis/cloudtrace_v2beta1/service.rb +30 -37
  253. data/generated/google/apis/composer_v1.rb +1 -1
  254. data/generated/google/apis/composer_v1/classes.rb +190 -242
  255. data/generated/google/apis/composer_v1/service.rb +79 -150
  256. data/generated/google/apis/composer_v1beta1.rb +1 -1
  257. data/generated/google/apis/composer_v1beta1/classes.rb +203 -262
  258. data/generated/google/apis/composer_v1beta1/service.rb +92 -179
  259. data/generated/google/apis/compute_alpha.rb +1 -1
  260. data/generated/google/apis/compute_alpha/classes.rb +681 -127
  261. data/generated/google/apis/compute_alpha/representations.rb +110 -6
  262. data/generated/google/apis/compute_alpha/service.rb +695 -692
  263. data/generated/google/apis/compute_beta.rb +1 -1
  264. data/generated/google/apis/compute_beta/classes.rb +570 -70
  265. data/generated/google/apis/compute_beta/representations.rb +112 -1
  266. data/generated/google/apis/compute_beta/service.rb +608 -605
  267. data/generated/google/apis/compute_v1.rb +1 -1
  268. data/generated/google/apis/compute_v1/classes.rb +977 -85
  269. data/generated/google/apis/compute_v1/representations.rb +372 -0
  270. data/generated/google/apis/compute_v1/service.rb +747 -15
  271. data/generated/google/apis/container_v1.rb +1 -1
  272. data/generated/google/apis/container_v1/classes.rb +970 -965
  273. data/generated/google/apis/container_v1/representations.rb +60 -0
  274. data/generated/google/apis/container_v1/service.rb +435 -502
  275. data/generated/google/apis/container_v1beta1.rb +1 -1
  276. data/generated/google/apis/container_v1beta1/classes.rb +1094 -1044
  277. data/generated/google/apis/container_v1beta1/representations.rb +91 -0
  278. data/generated/google/apis/container_v1beta1/service.rb +403 -466
  279. data/generated/google/apis/containeranalysis_v1alpha1.rb +1 -1
  280. data/generated/google/apis/containeranalysis_v1alpha1/classes.rb +463 -596
  281. data/generated/google/apis/containeranalysis_v1alpha1/representations.rb +1 -0
  282. data/generated/google/apis/containeranalysis_v1alpha1/service.rb +149 -169
  283. data/generated/google/apis/containeranalysis_v1beta1.rb +1 -1
  284. data/generated/google/apis/containeranalysis_v1beta1/classes.rb +461 -613
  285. data/generated/google/apis/containeranalysis_v1beta1/representations.rb +1 -0
  286. data/generated/google/apis/containeranalysis_v1beta1/service.rb +75 -90
  287. data/generated/google/apis/content_v2.rb +1 -1
  288. data/generated/google/apis/content_v2/classes.rb +5 -2
  289. data/generated/google/apis/content_v2_1.rb +1 -1
  290. data/generated/google/apis/content_v2_1/classes.rb +107 -5
  291. data/generated/google/apis/content_v2_1/representations.rb +35 -0
  292. data/generated/google/apis/content_v2_1/service.rb +54 -3
  293. data/generated/google/apis/customsearch_v1.rb +1 -1
  294. data/generated/google/apis/customsearch_v1/service.rb +2 -2
  295. data/generated/google/apis/datacatalog_v1beta1.rb +1 -1
  296. data/generated/google/apis/datacatalog_v1beta1/classes.rb +389 -573
  297. data/generated/google/apis/datacatalog_v1beta1/representations.rb +1 -0
  298. data/generated/google/apis/datacatalog_v1beta1/service.rb +319 -440
  299. data/generated/google/apis/dataflow_v1b3.rb +1 -1
  300. data/generated/google/apis/dataflow_v1b3/classes.rb +1162 -973
  301. data/generated/google/apis/dataflow_v1b3/representations.rb +145 -0
  302. data/generated/google/apis/dataflow_v1b3/service.rb +308 -257
  303. data/generated/google/apis/datafusion_v1.rb +5 -8
  304. data/generated/google/apis/datafusion_v1/classes.rb +283 -397
  305. data/generated/google/apis/datafusion_v1/representations.rb +5 -0
  306. data/generated/google/apis/datafusion_v1/service.rb +76 -89
  307. data/generated/google/apis/datafusion_v1beta1.rb +5 -8
  308. data/generated/google/apis/datafusion_v1beta1/classes.rb +283 -397
  309. data/generated/google/apis/datafusion_v1beta1/representations.rb +5 -0
  310. data/generated/google/apis/datafusion_v1beta1/service.rb +81 -95
  311. data/generated/google/apis/dataproc_v1.rb +1 -1
  312. data/generated/google/apis/dataproc_v1/classes.rb +60 -14
  313. data/generated/google/apis/dataproc_v1/representations.rb +18 -0
  314. data/generated/google/apis/dataproc_v1beta2.rb +1 -1
  315. data/generated/google/apis/dataproc_v1beta2/classes.rb +80 -10
  316. data/generated/google/apis/dataproc_v1beta2/representations.rb +33 -0
  317. data/generated/google/apis/datastore_v1.rb +1 -1
  318. data/generated/google/apis/datastore_v1/classes.rb +330 -472
  319. data/generated/google/apis/datastore_v1/service.rb +52 -63
  320. data/generated/google/apis/datastore_v1beta1.rb +1 -1
  321. data/generated/google/apis/datastore_v1beta1/classes.rb +150 -217
  322. data/generated/google/apis/datastore_v1beta1/service.rb +11 -12
  323. data/generated/google/apis/datastore_v1beta3.rb +1 -1
  324. data/generated/google/apis/datastore_v1beta3/classes.rb +255 -371
  325. data/generated/google/apis/datastore_v1beta3/service.rb +1 -2
  326. data/generated/google/apis/dfareporting_v3_3.rb +2 -2
  327. data/generated/google/apis/dfareporting_v3_3/classes.rb +162 -339
  328. data/generated/google/apis/dfareporting_v3_3/service.rb +646 -1262
  329. data/generated/google/apis/dfareporting_v3_4.rb +2 -2
  330. data/generated/google/apis/dfareporting_v3_4/classes.rb +184 -350
  331. data/generated/google/apis/dfareporting_v3_4/representations.rb +1 -0
  332. data/generated/google/apis/dfareporting_v3_4/service.rb +681 -1261
  333. data/generated/google/apis/dialogflow_v2.rb +1 -1
  334. data/generated/google/apis/dialogflow_v2/classes.rb +199 -70
  335. data/generated/google/apis/dialogflow_v2/representations.rb +104 -15
  336. data/generated/google/apis/dialogflow_v2beta1.rb +1 -1
  337. data/generated/google/apis/dialogflow_v2beta1/classes.rb +210 -78
  338. data/generated/google/apis/dialogflow_v2beta1/representations.rb +104 -15
  339. data/generated/google/apis/dialogflow_v2beta1/service.rb +500 -325
  340. data/generated/google/apis/{securitycenter_v1p1alpha1.rb → dialogflow_v3beta1.rb} +13 -10
  341. data/generated/google/apis/dialogflow_v3beta1/classes.rb +8352 -0
  342. data/generated/google/apis/dialogflow_v3beta1/representations.rb +3544 -0
  343. data/generated/google/apis/dialogflow_v3beta1/service.rb +2812 -0
  344. data/generated/google/apis/digitalassetlinks_v1.rb +1 -1
  345. data/generated/google/apis/digitalassetlinks_v1/classes.rb +66 -92
  346. data/generated/google/apis/digitalassetlinks_v1/service.rb +131 -188
  347. data/generated/google/apis/displayvideo_v1.rb +1 -1
  348. data/generated/google/apis/displayvideo_v1/classes.rb +63 -8
  349. data/generated/google/apis/displayvideo_v1/representations.rb +6 -0
  350. data/generated/google/apis/displayvideo_v1/service.rb +47 -35
  351. data/generated/google/apis/displayvideo_v1beta.rb +38 -0
  352. data/generated/google/apis/displayvideo_v1beta/classes.rb +146 -0
  353. data/generated/google/apis/displayvideo_v1beta/representations.rb +72 -0
  354. data/generated/google/apis/displayvideo_v1beta/service.rb +161 -0
  355. data/generated/google/apis/displayvideo_v1beta2.rb +38 -0
  356. data/generated/google/apis/displayvideo_v1beta2/classes.rb +146 -0
  357. data/generated/google/apis/displayvideo_v1beta2/representations.rb +72 -0
  358. data/generated/google/apis/displayvideo_v1beta2/service.rb +130 -0
  359. data/generated/google/apis/displayvideo_v1dev.rb +38 -0
  360. data/generated/google/apis/displayvideo_v1dev/classes.rb +146 -0
  361. data/generated/google/apis/displayvideo_v1dev/representations.rb +72 -0
  362. data/generated/google/apis/displayvideo_v1dev/service.rb +130 -0
  363. data/generated/google/apis/dlp_v2.rb +1 -1
  364. data/generated/google/apis/dlp_v2/classes.rb +1102 -1302
  365. data/generated/google/apis/dlp_v2/representations.rb +16 -0
  366. data/generated/google/apis/dlp_v2/service.rb +962 -905
  367. data/generated/google/apis/dns_v1.rb +1 -1
  368. data/generated/google/apis/dns_v1/classes.rb +175 -198
  369. data/generated/google/apis/dns_v1/service.rb +82 -97
  370. data/generated/google/apis/dns_v1beta2.rb +1 -1
  371. data/generated/google/apis/dns_v1beta2/classes.rb +180 -205
  372. data/generated/google/apis/dns_v1beta2/service.rb +82 -97
  373. data/generated/google/apis/docs_v1.rb +1 -1
  374. data/generated/google/apis/docs_v1/classes.rb +894 -1229
  375. data/generated/google/apis/docs_v1/service.rb +17 -22
  376. data/generated/google/apis/documentai_v1beta2.rb +1 -1
  377. data/generated/google/apis/documentai_v1beta2/classes.rb +2253 -824
  378. data/generated/google/apis/documentai_v1beta2/representations.rb +701 -0
  379. data/generated/google/apis/documentai_v1beta2/service.rb +22 -24
  380. data/generated/google/apis/doubleclickbidmanager_v1.rb +3 -2
  381. data/generated/google/apis/doubleclickbidmanager_v1/classes.rb +7 -14
  382. data/generated/google/apis/doubleclickbidmanager_v1/service.rb +33 -64
  383. data/generated/google/apis/doubleclickbidmanager_v1_1.rb +3 -2
  384. data/generated/google/apis/doubleclickbidmanager_v1_1/classes.rb +12 -20
  385. data/generated/google/apis/doubleclickbidmanager_v1_1/service.rb +33 -64
  386. data/generated/google/apis/doubleclicksearch_v2.rb +1 -1
  387. data/generated/google/apis/doubleclicksearch_v2/service.rb +2 -2
  388. data/generated/google/apis/drive_v2.rb +1 -1
  389. data/generated/google/apis/drive_v2/classes.rb +14 -6
  390. data/generated/google/apis/drive_v2/representations.rb +1 -0
  391. data/generated/google/apis/drive_v2/service.rb +79 -15
  392. data/generated/google/apis/drive_v3.rb +1 -1
  393. data/generated/google/apis/drive_v3/classes.rb +14 -6
  394. data/generated/google/apis/drive_v3/representations.rb +1 -0
  395. data/generated/google/apis/drive_v3/service.rb +59 -11
  396. data/generated/google/apis/file_v1.rb +1 -1
  397. data/generated/google/apis/file_v1/classes.rb +154 -173
  398. data/generated/google/apis/file_v1/service.rb +43 -52
  399. data/generated/google/apis/file_v1beta1.rb +1 -1
  400. data/generated/google/apis/file_v1beta1/classes.rb +334 -193
  401. data/generated/google/apis/file_v1beta1/representations.rb +55 -0
  402. data/generated/google/apis/file_v1beta1/service.rb +267 -55
  403. data/generated/google/apis/firebase_v1beta1.rb +1 -1
  404. data/generated/google/apis/firebase_v1beta1/classes.rb +33 -51
  405. data/generated/google/apis/firebase_v1beta1/representations.rb +2 -16
  406. data/generated/google/apis/firebase_v1beta1/service.rb +8 -1
  407. data/generated/google/apis/firebasedynamiclinks_v1.rb +1 -1
  408. data/generated/google/apis/firebasehosting_v1beta1.rb +1 -1
  409. data/generated/google/apis/firebasehosting_v1beta1/classes.rb +186 -0
  410. data/generated/google/apis/firebasehosting_v1beta1/representations.rb +83 -0
  411. data/generated/google/apis/firebasehosting_v1beta1/service.rb +418 -4
  412. data/generated/google/apis/firebaseml_v1beta2.rb +1 -1
  413. data/generated/google/apis/firebaseml_v1beta2/classes.rb +8 -8
  414. data/generated/google/apis/firebaseml_v1beta2/representations.rb +1 -1
  415. data/generated/google/apis/firebaserules_v1.rb +1 -1
  416. data/generated/google/apis/firebaserules_v1/classes.rb +102 -137
  417. data/generated/google/apis/firebaserules_v1/service.rb +87 -110
  418. data/generated/google/apis/firestore_v1.rb +1 -1
  419. data/generated/google/apis/firestore_v1/classes.rb +402 -498
  420. data/generated/google/apis/firestore_v1/service.rb +165 -201
  421. data/generated/google/apis/firestore_v1beta1.rb +1 -1
  422. data/generated/google/apis/firestore_v1beta1/classes.rb +334 -409
  423. data/generated/google/apis/firestore_v1beta1/service.rb +106 -122
  424. data/generated/google/apis/firestore_v1beta2.rb +1 -1
  425. data/generated/google/apis/firestore_v1beta2/classes.rb +135 -165
  426. data/generated/google/apis/firestore_v1beta2/service.rb +65 -86
  427. data/generated/google/apis/fitness_v1.rb +85 -0
  428. data/generated/google/apis/fitness_v1/classes.rb +982 -0
  429. data/generated/google/apis/fitness_v1/representations.rb +398 -0
  430. data/generated/google/apis/fitness_v1/service.rb +626 -0
  431. data/generated/google/apis/games_configuration_v1configuration.rb +1 -1
  432. data/generated/google/apis/games_configuration_v1configuration/classes.rb +2 -3
  433. data/generated/google/apis/games_configuration_v1configuration/service.rb +6 -6
  434. data/generated/google/apis/games_management_v1management.rb +2 -3
  435. data/generated/google/apis/games_management_v1management/classes.rb +14 -20
  436. data/generated/google/apis/games_management_v1management/service.rb +35 -36
  437. data/generated/google/apis/games_v1.rb +2 -3
  438. data/generated/google/apis/games_v1/classes.rb +76 -83
  439. data/generated/google/apis/games_v1/representations.rb +2 -0
  440. data/generated/google/apis/games_v1/service.rb +84 -90
  441. data/generated/google/apis/gameservices_v1.rb +1 -1
  442. data/generated/google/apis/gameservices_v1/classes.rb +7 -0
  443. data/generated/google/apis/gameservices_v1/representations.rb +1 -0
  444. data/generated/google/apis/gameservices_v1beta.rb +1 -1
  445. data/generated/google/apis/gameservices_v1beta/classes.rb +7 -0
  446. data/generated/google/apis/gameservices_v1beta/representations.rb +1 -0
  447. data/generated/google/apis/genomics_v1.rb +1 -1
  448. data/generated/google/apis/genomics_v1/classes.rb +70 -76
  449. data/generated/google/apis/genomics_v1/service.rb +28 -43
  450. data/generated/google/apis/genomics_v1alpha2.rb +1 -1
  451. data/generated/google/apis/genomics_v1alpha2/classes.rb +223 -290
  452. data/generated/google/apis/genomics_v1alpha2/service.rb +54 -76
  453. data/generated/google/apis/genomics_v2alpha1.rb +1 -1
  454. data/generated/google/apis/genomics_v2alpha1/classes.rb +252 -275
  455. data/generated/google/apis/genomics_v2alpha1/representations.rb +1 -0
  456. data/generated/google/apis/genomics_v2alpha1/service.rb +47 -66
  457. data/generated/google/apis/gmail_v1.rb +1 -1
  458. data/generated/google/apis/gmail_v1/classes.rb +37 -43
  459. data/generated/google/apis/gmail_v1/service.rb +5 -4
  460. data/generated/google/apis/gmailpostmastertools_v1beta1.rb +2 -2
  461. data/generated/google/apis/gmailpostmastertools_v1beta1/service.rb +1 -1
  462. data/generated/google/apis/groupsmigration_v1.rb +35 -0
  463. data/generated/google/apis/groupsmigration_v1/classes.rb +51 -0
  464. data/generated/google/apis/groupsmigration_v1/representations.rb +40 -0
  465. data/generated/google/apis/groupsmigration_v1/service.rb +100 -0
  466. data/generated/google/apis/healthcare_v1.rb +1 -1
  467. data/generated/google/apis/healthcare_v1/classes.rb +637 -826
  468. data/generated/google/apis/healthcare_v1/representations.rb +32 -0
  469. data/generated/google/apis/healthcare_v1/service.rb +840 -854
  470. data/generated/google/apis/healthcare_v1beta1.rb +1 -1
  471. data/generated/google/apis/healthcare_v1beta1/classes.rb +1833 -1102
  472. data/generated/google/apis/healthcare_v1beta1/representations.rb +474 -0
  473. data/generated/google/apis/healthcare_v1beta1/service.rb +2476 -1281
  474. data/generated/google/apis/homegraph_v1.rb +4 -1
  475. data/generated/google/apis/homegraph_v1/classes.rb +76 -164
  476. data/generated/google/apis/homegraph_v1/service.rb +23 -35
  477. data/generated/google/apis/iam_v1.rb +5 -2
  478. data/generated/google/apis/iam_v1/classes.rb +395 -592
  479. data/generated/google/apis/iam_v1/representations.rb +1 -0
  480. data/generated/google/apis/iam_v1/service.rb +431 -556
  481. data/generated/google/apis/iamcredentials_v1.rb +4 -2
  482. data/generated/google/apis/iamcredentials_v1/classes.rb +75 -85
  483. data/generated/google/apis/iamcredentials_v1/service.rb +15 -13
  484. data/generated/google/apis/iap_v1.rb +1 -1
  485. data/generated/google/apis/iap_v1/classes.rb +253 -355
  486. data/generated/google/apis/iap_v1/representations.rb +1 -0
  487. data/generated/google/apis/iap_v1/service.rb +61 -71
  488. data/generated/google/apis/iap_v1beta1.rb +1 -1
  489. data/generated/google/apis/iap_v1beta1/classes.rb +164 -254
  490. data/generated/google/apis/iap_v1beta1/representations.rb +1 -0
  491. data/generated/google/apis/iap_v1beta1/service.rb +17 -19
  492. data/generated/google/apis/indexing_v3.rb +1 -1
  493. data/generated/google/apis/indexing_v3/classes.rb +11 -11
  494. data/generated/google/apis/jobs_v2.rb +1 -1
  495. data/generated/google/apis/jobs_v2/classes.rb +786 -1086
  496. data/generated/google/apis/jobs_v2/service.rb +85 -126
  497. data/generated/google/apis/jobs_v3.rb +1 -1
  498. data/generated/google/apis/jobs_v3/classes.rb +637 -856
  499. data/generated/google/apis/jobs_v3/service.rb +101 -139
  500. data/generated/google/apis/jobs_v3p1beta1.rb +1 -1
  501. data/generated/google/apis/jobs_v3p1beta1/classes.rb +762 -1023
  502. data/generated/google/apis/jobs_v3p1beta1/service.rb +103 -142
  503. data/generated/google/apis/kgsearch_v1.rb +1 -1
  504. data/generated/google/apis/kgsearch_v1/classes.rb +4 -4
  505. data/generated/google/apis/kgsearch_v1/service.rb +11 -11
  506. data/generated/google/apis/licensing_v1.rb +4 -3
  507. data/generated/google/apis/licensing_v1/classes.rb +1 -1
  508. data/generated/google/apis/licensing_v1/service.rb +55 -85
  509. data/generated/google/apis/lifesciences_v2beta.rb +1 -1
  510. data/generated/google/apis/lifesciences_v2beta/classes.rb +262 -290
  511. data/generated/google/apis/lifesciences_v2beta/service.rb +30 -42
  512. data/generated/google/apis/localservices_v1.rb +31 -0
  513. data/generated/google/apis/localservices_v1/classes.rb +419 -0
  514. data/generated/google/apis/localservices_v1/representations.rb +172 -0
  515. data/generated/google/apis/localservices_v1/service.rb +199 -0
  516. data/generated/google/apis/logging_v2.rb +1 -1
  517. data/generated/google/apis/logging_v2/classes.rb +230 -227
  518. data/generated/google/apis/logging_v2/representations.rb +47 -0
  519. data/generated/google/apis/logging_v2/service.rb +2056 -673
  520. data/generated/google/apis/managedidentities_v1alpha1.rb +1 -1
  521. data/generated/google/apis/managedidentities_v1alpha1/classes.rb +8 -0
  522. data/generated/google/apis/managedidentities_v1alpha1/representations.rb +1 -0
  523. data/generated/google/apis/managedidentities_v1beta1.rb +1 -1
  524. data/generated/google/apis/managedidentities_v1beta1/classes.rb +8 -0
  525. data/generated/google/apis/managedidentities_v1beta1/representations.rb +1 -0
  526. data/generated/google/apis/manufacturers_v1.rb +1 -1
  527. data/generated/google/apis/manufacturers_v1/classes.rb +99 -109
  528. data/generated/google/apis/manufacturers_v1/service.rb +44 -55
  529. data/generated/google/apis/memcache_v1beta2.rb +1 -1
  530. data/generated/google/apis/memcache_v1beta2/classes.rb +170 -249
  531. data/generated/google/apis/memcache_v1beta2/representations.rb +0 -19
  532. data/generated/google/apis/memcache_v1beta2/service.rb +58 -71
  533. data/generated/google/apis/ml_v1.rb +1 -1
  534. data/generated/google/apis/ml_v1/classes.rb +956 -1144
  535. data/generated/google/apis/ml_v1/representations.rb +65 -0
  536. data/generated/google/apis/ml_v1/service.rb +194 -253
  537. data/generated/google/apis/monitoring_v1.rb +1 -1
  538. data/generated/google/apis/monitoring_v1/classes.rb +107 -26
  539. data/generated/google/apis/monitoring_v1/representations.rb +35 -0
  540. data/generated/google/apis/monitoring_v1/service.rb +10 -11
  541. data/generated/google/apis/monitoring_v3.rb +1 -1
  542. data/generated/google/apis/monitoring_v3/classes.rb +232 -328
  543. data/generated/google/apis/monitoring_v3/service.rb +121 -140
  544. data/generated/google/apis/networkmanagement_v1.rb +1 -1
  545. data/generated/google/apis/networkmanagement_v1/classes.rb +273 -429
  546. data/generated/google/apis/networkmanagement_v1/service.rb +97 -120
  547. data/generated/google/apis/networkmanagement_v1beta1.rb +1 -1
  548. data/generated/google/apis/networkmanagement_v1beta1/classes.rb +456 -429
  549. data/generated/google/apis/networkmanagement_v1beta1/representations.rb +63 -0
  550. data/generated/google/apis/networkmanagement_v1beta1/service.rb +97 -120
  551. data/generated/google/apis/osconfig_v1.rb +1 -1
  552. data/generated/google/apis/osconfig_v1/classes.rb +154 -902
  553. data/generated/google/apis/osconfig_v1/representations.rb +0 -337
  554. data/generated/google/apis/osconfig_v1/service.rb +22 -27
  555. data/generated/google/apis/osconfig_v1beta.rb +1 -1
  556. data/generated/google/apis/osconfig_v1beta/classes.rb +327 -411
  557. data/generated/google/apis/osconfig_v1beta/service.rb +39 -52
  558. data/generated/google/apis/oslogin_v1.rb +1 -1
  559. data/generated/google/apis/oslogin_v1/classes.rb +14 -12
  560. data/generated/google/apis/oslogin_v1/representations.rb +1 -0
  561. data/generated/google/apis/oslogin_v1/service.rb +12 -16
  562. data/generated/google/apis/oslogin_v1alpha.rb +1 -1
  563. data/generated/google/apis/oslogin_v1alpha/classes.rb +16 -14
  564. data/generated/google/apis/oslogin_v1alpha/representations.rb +1 -0
  565. data/generated/google/apis/oslogin_v1alpha/service.rb +17 -17
  566. data/generated/google/apis/oslogin_v1beta.rb +1 -1
  567. data/generated/google/apis/oslogin_v1beta/classes.rb +14 -12
  568. data/generated/google/apis/oslogin_v1beta/representations.rb +1 -0
  569. data/generated/google/apis/oslogin_v1beta/service.rb +12 -16
  570. data/generated/google/apis/pagespeedonline_v5.rb +2 -2
  571. data/generated/google/apis/pagespeedonline_v5/classes.rb +18 -24
  572. data/generated/google/apis/pagespeedonline_v5/service.rb +3 -4
  573. data/generated/google/apis/people_v1.rb +1 -1
  574. data/generated/google/apis/people_v1/classes.rb +121 -12
  575. data/generated/google/apis/people_v1/representations.rb +41 -0
  576. data/generated/google/apis/people_v1/service.rb +47 -45
  577. data/generated/google/apis/playablelocations_v3.rb +1 -1
  578. data/generated/google/apis/playablelocations_v3/classes.rb +108 -155
  579. data/generated/google/apis/playablelocations_v3/service.rb +10 -10
  580. data/generated/google/apis/playcustomapp_v1.rb +1 -1
  581. data/generated/google/apis/playcustomapp_v1/service.rb +2 -2
  582. data/generated/google/apis/prod_tt_sasportal_v1alpha1.rb +1 -1
  583. data/generated/google/apis/prod_tt_sasportal_v1alpha1/classes.rb +26 -0
  584. data/generated/google/apis/prod_tt_sasportal_v1alpha1/representations.rb +14 -0
  585. data/generated/google/apis/prod_tt_sasportal_v1alpha1/service.rb +479 -0
  586. data/generated/google/apis/pubsub_v1.rb +1 -1
  587. data/generated/google/apis/pubsub_v1/classes.rb +399 -518
  588. data/generated/google/apis/pubsub_v1/representations.rb +2 -0
  589. data/generated/google/apis/pubsub_v1/service.rb +220 -246
  590. data/generated/google/apis/pubsub_v1beta1a.rb +1 -1
  591. data/generated/google/apis/pubsub_v1beta1a/classes.rb +71 -86
  592. data/generated/google/apis/pubsub_v1beta1a/service.rb +31 -38
  593. data/generated/google/apis/pubsub_v1beta2.rb +1 -1
  594. data/generated/google/apis/pubsub_v1beta2/classes.rb +251 -354
  595. data/generated/google/apis/pubsub_v1beta2/representations.rb +1 -0
  596. data/generated/google/apis/pubsub_v1beta2/service.rb +96 -108
  597. data/generated/google/apis/{memcache_v1.rb → pubsublite_v1.rb} +8 -9
  598. data/generated/google/apis/pubsublite_v1/classes.rb +461 -0
  599. data/generated/google/apis/pubsublite_v1/representations.rb +261 -0
  600. data/generated/google/apis/{memcache_v1 → pubsublite_v1}/service.rb +228 -228
  601. data/generated/google/apis/realtimebidding_v1.rb +1 -1
  602. data/generated/google/apis/realtimebidding_v1/classes.rb +4 -4
  603. data/generated/google/apis/realtimebidding_v1/service.rb +4 -1
  604. data/generated/google/apis/recommendationengine_v1beta1.rb +1 -1
  605. data/generated/google/apis/recommendationengine_v1beta1/classes.rb +335 -456
  606. data/generated/google/apis/recommendationengine_v1beta1/representations.rb +0 -16
  607. data/generated/google/apis/recommendationengine_v1beta1/service.rb +140 -206
  608. data/generated/google/apis/redis_v1.rb +1 -1
  609. data/generated/google/apis/redis_v1/classes.rb +91 -513
  610. data/generated/google/apis/redis_v1/representations.rb +0 -139
  611. data/generated/google/apis/redis_v1/service.rb +93 -110
  612. data/generated/google/apis/redis_v1beta1.rb +1 -1
  613. data/generated/google/apis/redis_v1beta1/classes.rb +95 -517
  614. data/generated/google/apis/redis_v1beta1/representations.rb +0 -139
  615. data/generated/google/apis/redis_v1beta1/service.rb +93 -110
  616. data/generated/google/apis/remotebuildexecution_v1.rb +1 -1
  617. data/generated/google/apis/remotebuildexecution_v1/classes.rb +951 -1078
  618. data/generated/google/apis/remotebuildexecution_v1/representations.rb +61 -0
  619. data/generated/google/apis/remotebuildexecution_v1/service.rb +26 -33
  620. data/generated/google/apis/remotebuildexecution_v1alpha.rb +1 -1
  621. data/generated/google/apis/remotebuildexecution_v1alpha/classes.rb +946 -1071
  622. data/generated/google/apis/remotebuildexecution_v1alpha/representations.rb +61 -0
  623. data/generated/google/apis/remotebuildexecution_v1alpha/service.rb +103 -65
  624. data/generated/google/apis/remotebuildexecution_v2.rb +1 -1
  625. data/generated/google/apis/remotebuildexecution_v2/classes.rb +1099 -1250
  626. data/generated/google/apis/remotebuildexecution_v2/representations.rb +61 -0
  627. data/generated/google/apis/remotebuildexecution_v2/service.rb +147 -206
  628. data/generated/google/apis/reseller_v1.rb +2 -2
  629. data/generated/google/apis/reseller_v1/classes.rb +151 -219
  630. data/generated/google/apis/reseller_v1/service.rb +122 -173
  631. data/generated/google/apis/run_v1.rb +1 -1
  632. data/generated/google/apis/run_v1/classes.rb +12 -135
  633. data/generated/google/apis/run_v1/representations.rb +1 -62
  634. data/generated/google/apis/run_v1/service.rb +0 -342
  635. data/generated/google/apis/run_v1alpha1.rb +1 -1
  636. data/generated/google/apis/run_v1alpha1/classes.rb +1 -1
  637. data/generated/google/apis/run_v1alpha1/representations.rb +1 -1
  638. data/generated/google/apis/run_v1beta1.rb +1 -1
  639. data/generated/google/apis/run_v1beta1/classes.rb +3 -2
  640. data/generated/google/apis/runtimeconfig_v1beta1.rb +1 -1
  641. data/generated/google/apis/runtimeconfig_v1beta1/classes.rb +302 -412
  642. data/generated/google/apis/runtimeconfig_v1beta1/representations.rb +1 -0
  643. data/generated/google/apis/runtimeconfig_v1beta1/service.rb +135 -159
  644. data/generated/google/apis/safebrowsing_v4.rb +1 -1
  645. data/generated/google/apis/safebrowsing_v4/classes.rb +55 -64
  646. data/generated/google/apis/safebrowsing_v4/service.rb +4 -4
  647. data/generated/google/apis/sasportal_v1alpha1.rb +1 -1
  648. data/generated/google/apis/sasportal_v1alpha1/classes.rb +26 -0
  649. data/generated/google/apis/sasportal_v1alpha1/representations.rb +14 -0
  650. data/generated/google/apis/sasportal_v1alpha1/service.rb +479 -0
  651. data/generated/google/apis/script_v1.rb +1 -1
  652. data/generated/google/apis/script_v1/classes.rb +88 -111
  653. data/generated/google/apis/script_v1/service.rb +63 -69
  654. data/generated/google/apis/searchconsole_v1.rb +7 -1
  655. data/generated/google/apis/searchconsole_v1/classes.rb +388 -0
  656. data/generated/google/apis/searchconsole_v1/representations.rb +162 -0
  657. data/generated/google/apis/searchconsole_v1/service.rb +287 -0
  658. data/generated/google/apis/secretmanager_v1.rb +1 -1
  659. data/generated/google/apis/secretmanager_v1/classes.rb +379 -365
  660. data/generated/google/apis/secretmanager_v1/representations.rb +92 -0
  661. data/generated/google/apis/secretmanager_v1/service.rb +66 -82
  662. data/generated/google/apis/secretmanager_v1beta1.rb +1 -1
  663. data/generated/google/apis/secretmanager_v1beta1/classes.rb +218 -363
  664. data/generated/google/apis/secretmanager_v1beta1/representations.rb +1 -0
  665. data/generated/google/apis/secretmanager_v1beta1/service.rb +66 -82
  666. data/generated/google/apis/securitycenter_v1.rb +1 -1
  667. data/generated/google/apis/securitycenter_v1/classes.rb +20 -204
  668. data/generated/google/apis/securitycenter_v1/representations.rb +1 -72
  669. data/generated/google/apis/securitycenter_v1beta1.rb +1 -1
  670. data/generated/google/apis/securitycenter_v1beta1/classes.rb +22 -204
  671. data/generated/google/apis/securitycenter_v1beta1/representations.rb +1 -72
  672. data/generated/google/apis/{securitycenter_v1p1beta1.rb → securitycenter_v1beta2.rb} +6 -6
  673. data/generated/google/apis/{securitycenter_v1p1alpha1 → securitycenter_v1beta2}/classes.rb +275 -291
  674. data/generated/google/apis/{securitycenter_v1p1alpha1 → securitycenter_v1beta2}/representations.rb +83 -84
  675. data/generated/google/apis/securitycenter_v1beta2/service.rb +1494 -0
  676. data/generated/google/apis/serviceconsumermanagement_v1.rb +1 -1
  677. data/generated/google/apis/serviceconsumermanagement_v1/classes.rb +30 -60
  678. data/generated/google/apis/serviceconsumermanagement_v1/service.rb +31 -29
  679. data/generated/google/apis/serviceconsumermanagement_v1beta1.rb +1 -1
  680. data/generated/google/apis/serviceconsumermanagement_v1beta1/classes.rb +19 -49
  681. data/generated/google/apis/servicecontrol_v1.rb +1 -1
  682. data/generated/google/apis/servicecontrol_v1/classes.rb +525 -641
  683. data/generated/google/apis/servicecontrol_v1/service.rb +36 -46
  684. data/generated/google/apis/servicecontrol_v2.rb +1 -1
  685. data/generated/google/apis/servicecontrol_v2/classes.rb +281 -325
  686. data/generated/google/apis/servicecontrol_v2/service.rb +33 -43
  687. data/generated/google/apis/servicedirectory_v1beta1.rb +1 -1
  688. data/generated/google/apis/servicedirectory_v1beta1/classes.rb +221 -333
  689. data/generated/google/apis/servicedirectory_v1beta1/representations.rb +1 -0
  690. data/generated/google/apis/servicedirectory_v1beta1/service.rb +94 -129
  691. data/generated/google/apis/servicemanagement_v1.rb +1 -1
  692. data/generated/google/apis/servicemanagement_v1/classes.rb +1263 -2135
  693. data/generated/google/apis/servicemanagement_v1/representations.rb +0 -13
  694. data/generated/google/apis/servicemanagement_v1/service.rb +141 -228
  695. data/generated/google/apis/servicenetworking_v1.rb +1 -1
  696. data/generated/google/apis/servicenetworking_v1/classes.rb +93 -57
  697. data/generated/google/apis/servicenetworking_v1/representations.rb +52 -1
  698. data/generated/google/apis/servicenetworking_v1/service.rb +116 -0
  699. data/generated/google/apis/servicenetworking_v1beta.rb +1 -1
  700. data/generated/google/apis/servicenetworking_v1beta/classes.rb +74 -48
  701. data/generated/google/apis/servicenetworking_v1beta/representations.rb +38 -0
  702. data/generated/google/apis/serviceusage_v1.rb +1 -1
  703. data/generated/google/apis/serviceusage_v1/classes.rb +52 -48
  704. data/generated/google/apis/serviceusage_v1/representations.rb +4 -0
  705. data/generated/google/apis/serviceusage_v1/service.rb +5 -1
  706. data/generated/google/apis/serviceusage_v1beta1.rb +1 -1
  707. data/generated/google/apis/serviceusage_v1beta1/classes.rb +87 -49
  708. data/generated/google/apis/serviceusage_v1beta1/representations.rb +8 -0
  709. data/generated/google/apis/sheets_v4.rb +1 -1
  710. data/generated/google/apis/sheets_v4/classes.rb +3933 -5008
  711. data/generated/google/apis/sheets_v4/representations.rb +625 -0
  712. data/generated/google/apis/sheets_v4/service.rb +113 -149
  713. data/generated/google/apis/site_verification_v1.rb +1 -1
  714. data/generated/google/apis/slides_v1.rb +1 -1
  715. data/generated/google/apis/slides_v1/classes.rb +841 -1114
  716. data/generated/google/apis/slides_v1/service.rb +23 -30
  717. data/generated/google/apis/smartdevicemanagement_v1.rb +35 -0
  718. data/generated/google/apis/smartdevicemanagement_v1/classes.rb +313 -0
  719. data/generated/google/apis/{accessapproval_v1beta1 → smartdevicemanagement_v1}/representations.rb +44 -73
  720. data/generated/google/apis/smartdevicemanagement_v1/service.rb +312 -0
  721. data/generated/google/apis/sourcerepo_v1.rb +1 -1
  722. data/generated/google/apis/sourcerepo_v1/classes.rb +250 -400
  723. data/generated/google/apis/sourcerepo_v1/service.rb +40 -49
  724. data/generated/google/apis/spanner_v1.rb +1 -1
  725. data/generated/google/apis/spanner_v1/classes.rb +1553 -2157
  726. data/generated/google/apis/spanner_v1/representations.rb +1 -0
  727. data/generated/google/apis/spanner_v1/service.rb +443 -618
  728. data/generated/google/apis/speech_v1.rb +1 -1
  729. data/generated/google/apis/speech_v1/classes.rb +174 -220
  730. data/generated/google/apis/speech_v1/service.rb +27 -32
  731. data/generated/google/apis/speech_v1p1beta1.rb +1 -1
  732. data/generated/google/apis/speech_v1p1beta1/classes.rb +253 -306
  733. data/generated/google/apis/speech_v1p1beta1/service.rb +27 -32
  734. data/generated/google/apis/speech_v2beta1.rb +1 -1
  735. data/generated/google/apis/speech_v2beta1/classes.rb +66 -76
  736. data/generated/google/apis/speech_v2beta1/service.rb +10 -12
  737. data/generated/google/apis/{sql_v1beta4.rb → sqladmin_v1beta4.rb} +5 -5
  738. data/generated/google/apis/{sql_v1beta4 → sqladmin_v1beta4}/classes.rb +469 -452
  739. data/generated/google/apis/{sql_v1beta4 → sqladmin_v1beta4}/representations.rb +122 -87
  740. data/generated/google/apis/{sql_v1beta4 → sqladmin_v1beta4}/service.rb +295 -300
  741. data/generated/google/apis/storage_v1.rb +1 -1
  742. data/generated/google/apis/storage_v1/classes.rb +8 -7
  743. data/generated/google/apis/storage_v1/representations.rb +2 -2
  744. data/generated/google/apis/storagetransfer_v1.rb +1 -1
  745. data/generated/google/apis/storagetransfer_v1/classes.rb +261 -339
  746. data/generated/google/apis/storagetransfer_v1/service.rb +43 -64
  747. data/generated/google/apis/streetviewpublish_v1.rb +1 -1
  748. data/generated/google/apis/streetviewpublish_v1/classes.rb +106 -148
  749. data/generated/google/apis/streetviewpublish_v1/service.rb +94 -177
  750. data/generated/google/apis/sts_v1.rb +32 -0
  751. data/generated/google/apis/sts_v1/classes.rb +120 -0
  752. data/generated/google/apis/sts_v1/representations.rb +59 -0
  753. data/generated/google/apis/sts_v1/service.rb +90 -0
  754. data/generated/google/apis/sts_v1beta.rb +32 -0
  755. data/generated/google/apis/sts_v1beta/classes.rb +194 -0
  756. data/generated/google/apis/{oauth2_v2 → sts_v1beta}/representations.rb +14 -21
  757. data/generated/google/apis/sts_v1beta/service.rb +92 -0
  758. data/generated/google/apis/tagmanager_v1.rb +1 -1
  759. data/generated/google/apis/tagmanager_v1/service.rb +2 -2
  760. data/generated/google/apis/tagmanager_v2.rb +1 -1
  761. data/generated/google/apis/tagmanager_v2/service.rb +2 -2
  762. data/generated/google/apis/tasks_v1.rb +1 -1
  763. data/generated/google/apis/tasks_v1/classes.rb +21 -22
  764. data/generated/google/apis/tasks_v1/service.rb +19 -19
  765. data/generated/google/apis/testing_v1.rb +1 -1
  766. data/generated/google/apis/testing_v1/classes.rb +317 -382
  767. data/generated/google/apis/testing_v1/representations.rb +2 -0
  768. data/generated/google/apis/testing_v1/service.rb +22 -28
  769. data/generated/google/apis/texttospeech_v1.rb +1 -1
  770. data/generated/google/apis/texttospeech_v1/classes.rb +51 -57
  771. data/generated/google/apis/texttospeech_v1/service.rb +9 -10
  772. data/generated/google/apis/texttospeech_v1beta1.rb +1 -1
  773. data/generated/google/apis/texttospeech_v1beta1/classes.rb +96 -57
  774. data/generated/google/apis/texttospeech_v1beta1/representations.rb +19 -0
  775. data/generated/google/apis/texttospeech_v1beta1/service.rb +9 -10
  776. data/generated/google/apis/toolresults_v1beta3.rb +1 -1
  777. data/generated/google/apis/toolresults_v1beta3/classes.rb +7 -0
  778. data/generated/google/apis/toolresults_v1beta3/representations.rb +1 -0
  779. data/generated/google/apis/tpu_v1.rb +1 -1
  780. data/generated/google/apis/tpu_v1/classes.rb +54 -0
  781. data/generated/google/apis/tpu_v1/representations.rb +19 -0
  782. data/generated/google/apis/tpu_v1alpha1.rb +1 -1
  783. data/generated/google/apis/tpu_v1alpha1/classes.rb +54 -0
  784. data/generated/google/apis/tpu_v1alpha1/representations.rb +19 -0
  785. data/generated/google/apis/trafficdirector_v2.rb +34 -0
  786. data/generated/google/apis/trafficdirector_v2/classes.rb +1347 -0
  787. data/generated/google/apis/trafficdirector_v2/representations.rb +620 -0
  788. data/generated/google/apis/trafficdirector_v2/service.rb +89 -0
  789. data/generated/google/apis/translate_v3.rb +1 -1
  790. data/generated/google/apis/translate_v3/classes.rb +148 -175
  791. data/generated/google/apis/translate_v3/service.rb +122 -151
  792. data/generated/google/apis/translate_v3beta1.rb +1 -1
  793. data/generated/google/apis/translate_v3beta1/classes.rb +149 -170
  794. data/generated/google/apis/translate_v3beta1/service.rb +122 -151
  795. data/generated/google/apis/vault_v1.rb +1 -1
  796. data/generated/google/apis/vault_v1/classes.rb +80 -103
  797. data/generated/google/apis/vault_v1/service.rb +31 -37
  798. data/generated/google/apis/vectortile_v1.rb +1 -1
  799. data/generated/google/apis/vectortile_v1/classes.rb +185 -267
  800. data/generated/google/apis/vectortile_v1/service.rb +75 -88
  801. data/generated/google/apis/verifiedaccess_v1.rb +1 -1
  802. data/generated/google/apis/verifiedaccess_v1/classes.rb +20 -27
  803. data/generated/google/apis/videointelligence_v1.rb +1 -1
  804. data/generated/google/apis/videointelligence_v1/classes.rb +753 -918
  805. data/generated/google/apis/videointelligence_v1/service.rb +71 -48
  806. data/generated/google/apis/videointelligence_v1beta2.rb +1 -1
  807. data/generated/google/apis/videointelligence_v1beta2/classes.rb +748 -911
  808. data/generated/google/apis/videointelligence_v1beta2/service.rb +4 -4
  809. data/generated/google/apis/videointelligence_v1p1beta1.rb +1 -1
  810. data/generated/google/apis/videointelligence_v1p1beta1/classes.rb +748 -911
  811. data/generated/google/apis/videointelligence_v1p1beta1/service.rb +4 -4
  812. data/generated/google/apis/videointelligence_v1p2beta1.rb +1 -1
  813. data/generated/google/apis/videointelligence_v1p2beta1/classes.rb +748 -911
  814. data/generated/google/apis/videointelligence_v1p2beta1/service.rb +4 -4
  815. data/generated/google/apis/videointelligence_v1p3beta1.rb +1 -1
  816. data/generated/google/apis/videointelligence_v1p3beta1/classes.rb +754 -920
  817. data/generated/google/apis/videointelligence_v1p3beta1/service.rb +4 -4
  818. data/generated/google/apis/vision_v1.rb +1 -1
  819. data/generated/google/apis/vision_v1p1beta1.rb +1 -1
  820. data/generated/google/apis/vision_v1p2beta1.rb +1 -1
  821. data/generated/google/apis/webfonts_v1.rb +2 -3
  822. data/generated/google/apis/webfonts_v1/classes.rb +1 -2
  823. data/generated/google/apis/webfonts_v1/service.rb +2 -4
  824. data/generated/google/apis/websecurityscanner_v1.rb +1 -1
  825. data/generated/google/apis/websecurityscanner_v1alpha.rb +1 -1
  826. data/generated/google/apis/websecurityscanner_v1beta.rb +1 -1
  827. data/generated/google/apis/workflowexecutions_v1beta.rb +34 -0
  828. data/generated/google/apis/workflowexecutions_v1beta/classes.rb +155 -0
  829. data/generated/google/apis/workflowexecutions_v1beta/representations.rb +88 -0
  830. data/generated/google/apis/{securitycenter_v1p1alpha1 → workflowexecutions_v1beta}/service.rb +73 -69
  831. data/generated/google/apis/workflows_v1beta.rb +34 -0
  832. data/generated/google/apis/workflows_v1beta/classes.rb +406 -0
  833. data/generated/google/apis/workflows_v1beta/representations.rb +173 -0
  834. data/generated/google/apis/workflows_v1beta/service.rb +437 -0
  835. data/generated/google/apis/youtube_partner_v1.rb +1 -1
  836. data/generated/google/apis/youtube_v3.rb +1 -1
  837. data/generated/google/apis/youtube_v3/classes.rb +0 -586
  838. data/generated/google/apis/youtube_v3/representations.rb +0 -269
  839. data/generated/google/apis/youtube_v3/service.rb +0 -117
  840. data/google-api-client.gemspec +2 -1
  841. data/lib/google/apis/version.rb +1 -1
  842. metadata +106 -40
  843. data/generated/google/apis/accessapproval_v1beta1/classes.rb +0 -417
  844. data/generated/google/apis/accessapproval_v1beta1/service.rb +0 -857
  845. data/generated/google/apis/dns_v2beta1.rb +0 -43
  846. data/generated/google/apis/dns_v2beta1/classes.rb +0 -1447
  847. data/generated/google/apis/dns_v2beta1/representations.rb +0 -588
  848. data/generated/google/apis/dns_v2beta1/service.rb +0 -928
  849. data/generated/google/apis/memcache_v1/classes.rb +0 -1157
  850. data/generated/google/apis/oauth2_v2.rb +0 -40
  851. data/generated/google/apis/oauth2_v2/classes.rb +0 -165
  852. data/generated/google/apis/oauth2_v2/service.rb +0 -158
  853. data/generated/google/apis/plus_v1.rb +0 -43
  854. data/generated/google/apis/plus_v1/classes.rb +0 -2094
  855. data/generated/google/apis/plus_v1/representations.rb +0 -907
  856. data/generated/google/apis/plus_v1/service.rb +0 -451
  857. data/generated/google/apis/securitycenter_v1p1beta1/classes.rb +0 -2059
  858. data/generated/google/apis/securitycenter_v1p1beta1/representations.rb +0 -789
  859. data/generated/google/apis/securitycenter_v1p1beta1/service.rb +0 -1243
  860. data/generated/google/apis/storage_v1beta2.rb +0 -40
  861. data/generated/google/apis/storage_v1beta2/classes.rb +0 -1047
  862. data/generated/google/apis/storage_v1beta2/representations.rb +0 -425
  863. data/generated/google/apis/storage_v1beta2/service.rb +0 -1667
@@ -220,6 +220,18 @@ module Google
220
220
  include Google::Apis::Core::JsonObjectSupport
221
221
  end
222
222
 
223
+ class GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicy
224
+ class Representation < Google::Apis::Core::JsonRepresentation; end
225
+
226
+ include Google::Apis::Core::JsonObjectSupport
227
+ end
228
+
229
+ class GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature
230
+ class Representation < Google::Apis::Core::JsonRepresentation; end
231
+
232
+ include Google::Apis::Core::JsonObjectSupport
233
+ end
234
+
223
235
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaGetInstanceRequest
224
236
  class Representation < Google::Apis::Core::JsonRepresentation; end
225
237
 
@@ -262,6 +274,12 @@ module Google
262
274
  include Google::Apis::Core::JsonObjectSupport
263
275
  end
264
276
 
277
+ class GoogleDevtoolsRemotebuildexecutionAdminV1alphaSoleTenancyConfig
278
+ class Representation < Google::Apis::Core::JsonRepresentation; end
279
+
280
+ include Google::Apis::Core::JsonObjectSupport
281
+ end
282
+
265
283
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaUpdateInstanceRequest
266
284
  class Representation < Google::Apis::Core::JsonRepresentation; end
267
285
 
@@ -735,6 +753,37 @@ module Google
735
753
  end
736
754
  end
737
755
 
756
+ class GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicy
757
+ # @private
758
+ class Representation < Google::Apis::Core::JsonRepresentation
759
+ property :container_image_sources, as: 'containerImageSources', class: Google::Apis::RemotebuildexecutionV1alpha::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature, decorator: Google::Apis::RemotebuildexecutionV1alpha::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature::Representation
760
+
761
+ property :docker_add_capabilities, as: 'dockerAddCapabilities', class: Google::Apis::RemotebuildexecutionV1alpha::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature, decorator: Google::Apis::RemotebuildexecutionV1alpha::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature::Representation
762
+
763
+ property :docker_chroot_path, as: 'dockerChrootPath', class: Google::Apis::RemotebuildexecutionV1alpha::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature, decorator: Google::Apis::RemotebuildexecutionV1alpha::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature::Representation
764
+
765
+ property :docker_network, as: 'dockerNetwork', class: Google::Apis::RemotebuildexecutionV1alpha::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature, decorator: Google::Apis::RemotebuildexecutionV1alpha::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature::Representation
766
+
767
+ property :docker_privileged, as: 'dockerPrivileged', class: Google::Apis::RemotebuildexecutionV1alpha::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature, decorator: Google::Apis::RemotebuildexecutionV1alpha::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature::Representation
768
+
769
+ property :docker_run_as_root, as: 'dockerRunAsRoot', class: Google::Apis::RemotebuildexecutionV1alpha::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature, decorator: Google::Apis::RemotebuildexecutionV1alpha::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature::Representation
770
+
771
+ property :docker_runtime, as: 'dockerRuntime', class: Google::Apis::RemotebuildexecutionV1alpha::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature, decorator: Google::Apis::RemotebuildexecutionV1alpha::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature::Representation
772
+
773
+ property :docker_sibling_containers, as: 'dockerSiblingContainers', class: Google::Apis::RemotebuildexecutionV1alpha::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature, decorator: Google::Apis::RemotebuildexecutionV1alpha::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature::Representation
774
+
775
+ property :linux_isolation, as: 'linuxIsolation'
776
+ end
777
+ end
778
+
779
+ class GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature
780
+ # @private
781
+ class Representation < Google::Apis::Core::JsonRepresentation
782
+ collection :allowed_values, as: 'allowedValues'
783
+ property :policy, as: 'policy'
784
+ end
785
+ end
786
+
738
787
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaGetInstanceRequest
739
788
  # @private
740
789
  class Representation < Google::Apis::Core::JsonRepresentation
@@ -752,6 +801,8 @@ module Google
752
801
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaInstance
753
802
  # @private
754
803
  class Representation < Google::Apis::Core::JsonRepresentation
804
+ property :feature_policy, as: 'featurePolicy', class: Google::Apis::RemotebuildexecutionV1alpha::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicy, decorator: Google::Apis::RemotebuildexecutionV1alpha::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicy::Representation
805
+
755
806
  property :location, as: 'location'
756
807
  property :logging_enabled, as: 'loggingEnabled'
757
808
  property :name, as: 'name'
@@ -790,6 +841,14 @@ module Google
790
841
  end
791
842
  end
792
843
 
844
+ class GoogleDevtoolsRemotebuildexecutionAdminV1alphaSoleTenancyConfig
845
+ # @private
846
+ class Representation < Google::Apis::Core::JsonRepresentation
847
+ property :node_type, as: 'nodeType'
848
+ property :nodes_zone, as: 'nodesZone'
849
+ end
850
+ end
851
+
793
852
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaUpdateInstanceRequest
794
853
  # @private
795
854
  class Representation < Google::Apis::Core::JsonRepresentation
@@ -823,6 +882,8 @@ module Google
823
882
  property :min_cpu_platform, as: 'minCpuPlatform'
824
883
  property :network_access, as: 'networkAccess'
825
884
  property :reserved, as: 'reserved'
885
+ property :sole_tenancy, as: 'soleTenancy', class: Google::Apis::RemotebuildexecutionV1alpha::GoogleDevtoolsRemotebuildexecutionAdminV1alphaSoleTenancyConfig, decorator: Google::Apis::RemotebuildexecutionV1alpha::GoogleDevtoolsRemotebuildexecutionAdminV1alphaSoleTenancyConfig::Representation
886
+
826
887
  property :vm_image, as: 'vmImage'
827
888
  end
828
889
  end
@@ -47,13 +47,13 @@ module Google
47
47
  @batch_path = 'batch'
48
48
  end
49
49
 
50
- # Creates a new instance in the specified region.
51
- # Returns a long running operation which contains an instance on completion.
52
- # While the long running operation is in progress, any call to `GetInstance`
53
- # returns an instance in state `CREATING`.
50
+ # Creates a new instance in the specified region. Returns a long running
51
+ # operation which contains an instance on completion. While the long running
52
+ # operation is in progress, any call to `GetInstance` returns an instance in
53
+ # state `CREATING`.
54
54
  # @param [String] parent
55
- # Resource name of the project containing the instance.
56
- # Format: `projects/[PROJECT_ID]`.
55
+ # Resource name of the project containing the instance. Format: `projects/[
56
+ # PROJECT_ID]`.
57
57
  # @param [Google::Apis::RemotebuildexecutionV1alpha::GoogleDevtoolsRemotebuildexecutionAdminV1alphaCreateInstanceRequest] google_devtools_remotebuildexecution_admin_v1alpha_create_instance_request_object
58
58
  # @param [String] fields
59
59
  # Selector specifying which fields to include in a partial response.
@@ -84,14 +84,12 @@ module Google
84
84
  execute_or_queue_command(command, &block)
85
85
  end
86
86
 
87
- # Deletes the specified instance.
88
- # Returns a long running operation which contains a `google.protobuf.Empty`
89
- # response on completion.
90
- # Deleting an instance with worker pools in it will delete these worker
91
- # pools.
87
+ # Deletes the specified instance. Returns a long running operation which
88
+ # contains a `google.protobuf.Empty` response on completion. Deleting an
89
+ # instance with worker pools in it will delete these worker pools.
92
90
  # @param [String] name
93
- # Name of the instance to delete.
94
- # Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
91
+ # Name of the instance to delete. Format: `projects/[PROJECT_ID]/instances/[
92
+ # INSTANCE_ID]`.
95
93
  # @param [String] fields
96
94
  # Selector specifying which fields to include in a partial response.
97
95
  # @param [String] quota_user
@@ -121,8 +119,8 @@ module Google
121
119
 
122
120
  # Returns the specified instance.
123
121
  # @param [String] name
124
- # Name of the instance to retrieve.
125
- # Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
122
+ # Name of the instance to retrieve. Format: `projects/[PROJECT_ID]/instances/[
123
+ # INSTANCE_ID]`.
126
124
  # @param [String] fields
127
125
  # Selector specifying which fields to include in a partial response.
128
126
  # @param [String] quota_user
@@ -152,8 +150,7 @@ module Google
152
150
 
153
151
  # Lists instances in a project.
154
152
  # @param [String] parent
155
- # Resource name of the project.
156
- # Format: `projects/[PROJECT_ID]`.
153
+ # Resource name of the project. Format: `projects/[PROJECT_ID]`.
157
154
  # @param [String] fields
158
155
  # Selector specifying which fields to include in a partial response.
159
156
  # @param [String] quota_user
@@ -181,13 +178,65 @@ module Google
181
178
  execute_or_queue_command(command, &block)
182
179
  end
183
180
 
184
- # Creates a new worker pool with a specified size and configuration.
185
- # Returns a long running operation which contains a worker pool on
186
- # completion. While the long running operation is in progress, any call to
187
- # `GetWorkerPool` returns a worker pool in state `CREATING`.
181
+ # Updates the specified instance. Returns a long running operation which
182
+ # contains the updated instance in the response on completion.
183
+ # @param [String] name
184
+ # Output only. Instance resource name formatted as: `projects/[PROJECT_ID]/
185
+ # instances/[INSTANCE_ID]`. Name should not be populated when creating an
186
+ # instance since it is provided in the `instance_id` field.
187
+ # @param [Google::Apis::RemotebuildexecutionV1alpha::GoogleDevtoolsRemotebuildexecutionAdminV1alphaInstance] google_devtools_remotebuildexecution_admin_v1alpha_instance_object
188
+ # @param [Boolean] logging_enabled
189
+ # Deprecated, use instance.logging_enabled instead. Whether to enable
190
+ # Stackdriver logging for this instance.
191
+ # @param [String] name1
192
+ # Deprecated, use instance.Name instead. Name of the instance to update. Format:
193
+ # `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
194
+ # @param [String] update_mask
195
+ # The update mask applies to instance. For the `FieldMask` definition, see https:
196
+ # //developers.google.com/protocol-buffers/docs/reference/google.protobuf#
197
+ # fieldmask If an empty update_mask is provided, only the non-default valued
198
+ # field in the worker pool field will be updated. Note that in order to update a
199
+ # field to the default value (zero, false, empty string) an explicit update_mask
200
+ # must be provided.
201
+ # @param [String] fields
202
+ # Selector specifying which fields to include in a partial response.
203
+ # @param [String] quota_user
204
+ # Available to use for quota purposes for server-side applications. Can be any
205
+ # arbitrary string assigned to a user, but should not exceed 40 characters.
206
+ # @param [Google::Apis::RequestOptions] options
207
+ # Request-specific options
208
+ #
209
+ # @yield [result, err] Result & error if block supplied
210
+ # @yieldparam result [Google::Apis::RemotebuildexecutionV1alpha::GoogleLongrunningOperation] parsed result object
211
+ # @yieldparam err [StandardError] error object if request failed
212
+ #
213
+ # @return [Google::Apis::RemotebuildexecutionV1alpha::GoogleLongrunningOperation]
214
+ #
215
+ # @raise [Google::Apis::ServerError] An error occurred on the server and the request can be retried
216
+ # @raise [Google::Apis::ClientError] The request is invalid and should not be retried without modification
217
+ # @raise [Google::Apis::AuthorizationError] Authorization is required
218
+ def patch_project_instance(name, google_devtools_remotebuildexecution_admin_v1alpha_instance_object = nil, logging_enabled: nil, name1: nil, update_mask: nil, fields: nil, quota_user: nil, options: nil, &block)
219
+ command = make_simple_command(:patch, 'v1alpha/{+name}', options)
220
+ command.request_representation = Google::Apis::RemotebuildexecutionV1alpha::GoogleDevtoolsRemotebuildexecutionAdminV1alphaInstance::Representation
221
+ command.request_object = google_devtools_remotebuildexecution_admin_v1alpha_instance_object
222
+ command.response_representation = Google::Apis::RemotebuildexecutionV1alpha::GoogleLongrunningOperation::Representation
223
+ command.response_class = Google::Apis::RemotebuildexecutionV1alpha::GoogleLongrunningOperation
224
+ command.params['name'] = name unless name.nil?
225
+ command.query['loggingEnabled'] = logging_enabled unless logging_enabled.nil?
226
+ command.query['name1'] = name1 unless name1.nil?
227
+ command.query['updateMask'] = update_mask unless update_mask.nil?
228
+ command.query['fields'] = fields unless fields.nil?
229
+ command.query['quotaUser'] = quota_user unless quota_user.nil?
230
+ execute_or_queue_command(command, &block)
231
+ end
232
+
233
+ # Creates a new worker pool with a specified size and configuration. Returns a
234
+ # long running operation which contains a worker pool on completion. While the
235
+ # long running operation is in progress, any call to `GetWorkerPool` returns a
236
+ # worker pool in state `CREATING`.
188
237
  # @param [String] parent
189
- # Resource name of the instance in which to create the new worker pool.
190
- # Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
238
+ # Resource name of the instance in which to create the new worker pool. Format: `
239
+ # projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
191
240
  # @param [Google::Apis::RemotebuildexecutionV1alpha::GoogleDevtoolsRemotebuildexecutionAdminV1alphaCreateWorkerPoolRequest] google_devtools_remotebuildexecution_admin_v1alpha_create_worker_pool_request_object
192
241
  # @param [String] fields
193
242
  # Selector specifying which fields to include in a partial response.
@@ -218,15 +267,13 @@ module Google
218
267
  execute_or_queue_command(command, &block)
219
268
  end
220
269
 
221
- # Deletes the specified worker pool.
222
- # Returns a long running operation, which contains a `google.protobuf.Empty`
223
- # response on completion.
224
- # While the long running operation is in progress, any call to
225
- # `GetWorkerPool` returns a worker pool in state `DELETING`.
270
+ # Deletes the specified worker pool. Returns a long running operation, which
271
+ # contains a `google.protobuf.Empty` response on completion. While the long
272
+ # running operation is in progress, any call to `GetWorkerPool` returns a worker
273
+ # pool in state `DELETING`.
226
274
  # @param [String] name
227
- # Name of the worker pool to delete.
228
- # Format:
229
- # `projects/[PROJECT_ID]/instances/[INSTANCE_ID]/workerpools/[POOL_ID]`.
275
+ # Name of the worker pool to delete. Format: `projects/[PROJECT_ID]/instances/[
276
+ # INSTANCE_ID]/workerpools/[POOL_ID]`.
230
277
  # @param [String] fields
231
278
  # Selector specifying which fields to include in a partial response.
232
279
  # @param [String] quota_user
@@ -256,9 +303,8 @@ module Google
256
303
 
257
304
  # Returns the specified worker pool.
258
305
  # @param [String] name
259
- # Name of the worker pool to retrieve.
260
- # Format:
261
- # `projects/[PROJECT_ID]/instances/[INSTANCE_ID]/workerpools/[POOL_ID]`.
306
+ # Name of the worker pool to retrieve. Format: `projects/[PROJECT_ID]/instances/[
307
+ # INSTANCE_ID]/workerpools/[POOL_ID]`.
262
308
  # @param [String] fields
263
309
  # Selector specifying which fields to include in a partial response.
264
310
  # @param [String] quota_user
@@ -288,28 +334,22 @@ module Google
288
334
 
289
335
  # Lists worker pools in an instance.
290
336
  # @param [String] parent
291
- # Resource name of the instance.
292
- # Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
337
+ # Resource name of the instance. Format: `projects/[PROJECT_ID]/instances/[
338
+ # INSTANCE_ID]`.
293
339
  # @param [String] filter
294
- # Optional. A filter expression that filters resources listed in
295
- # the response. The expression must specify the field name, a comparison
296
- # operator, and the value that you want to use for filtering. The value
297
- # must be a string, a number, or a boolean. String values are
298
- # case-insensitive.
299
- # The comparison operator must be either `:`, `=`, `!=`, `>`, `>=`, `<=` or
300
- # `<`.
301
- # The `:` operator can be used with string fields to match substrings.
302
- # For non-string fields it is equivalent to the `=` operator.
303
- # The `:*` comparison can be used to test whether a key has been defined.
304
- # You can also filter on nested fields.
305
- # To filter on multiple expressions, you can separate expression using
306
- # `AND` and `OR` operators, using parentheses to specify precedence. If
307
- # neither operator is specified, `AND` is assumed.
308
- # Examples:
309
- # Include only pools with more than 100 reserved workers:
310
- # `(worker_count > 100) (worker_config.reserved = true)`
311
- # Include only pools with a certain label or machines of the n1-standard
312
- # family:
340
+ # Optional. A filter expression that filters resources listed in the response.
341
+ # The expression must specify the field name, a comparison operator, and the
342
+ # value that you want to use for filtering. The value must be a string, a number,
343
+ # or a boolean. String values are case-insensitive. The comparison operator
344
+ # must be either `:`, `=`, `!=`, `>`, `>=`, `<=` or `<`. The `:` operator can be
345
+ # used with string fields to match substrings. For non-string fields it is
346
+ # equivalent to the `=` operator. The `:*` comparison can be used to test
347
+ # whether a key has been defined. You can also filter on nested fields. To
348
+ # filter on multiple expressions, you can separate expression using `AND` and `
349
+ # OR` operators, using parentheses to specify precedence. If neither operator is
350
+ # specified, `AND` is assumed. Examples: Include only pools with more than 100
351
+ # reserved workers: `(worker_count > 100) (worker_config.reserved = true)`
352
+ # Include only pools with a certain label or machines of the n1-standard family:
313
353
  # `worker_config.labels.key1 : * OR worker_config.machine_type: n1-standard`
314
354
  # @param [String] fields
315
355
  # Selector specifying which fields to include in a partial response.
@@ -340,14 +380,13 @@ module Google
340
380
  end
341
381
 
342
382
  # Updates an existing worker pool with a specified size and/or configuration.
343
- # Returns a long running operation, which contains a worker pool on
344
- # completion. While the long running operation is in progress, any call to
345
- # `GetWorkerPool` returns a worker pool in state `UPDATING`.
383
+ # Returns a long running operation, which contains a worker pool on completion.
384
+ # While the long running operation is in progress, any call to `GetWorkerPool`
385
+ # returns a worker pool in state `UPDATING`.
346
386
  # @param [String] name
347
- # WorkerPool resource name formatted as:
348
- # `projects/[PROJECT_ID]/instances/[INSTANCE_ID]/workerpools/[POOL_ID]`.
349
- # name should not be populated when creating a worker pool since it is
350
- # provided in the `poolId` field.
387
+ # WorkerPool resource name formatted as: `projects/[PROJECT_ID]/instances/[
388
+ # INSTANCE_ID]/workerpools/[POOL_ID]`. name should not be populated when
389
+ # creating a worker pool since it is provided in the `poolId` field.
351
390
  # @param [Google::Apis::RemotebuildexecutionV1alpha::GoogleDevtoolsRemotebuildexecutionAdminV1alphaUpdateWorkerPoolRequest] google_devtools_remotebuildexecution_admin_v1alpha_update_worker_pool_request_object
352
391
  # @param [String] fields
353
392
  # Selector specifying which fields to include in a partial response.
@@ -378,9 +417,8 @@ module Google
378
417
  execute_or_queue_command(command, &block)
379
418
  end
380
419
 
381
- # Gets the latest state of a long-running operation. Clients can use this
382
- # method to poll the operation result at intervals as recommended by the API
383
- # service.
420
+ # Gets the latest state of a long-running operation. Clients can use this method
421
+ # to poll the operation result at intervals as recommended by the API service.
384
422
  # @param [String] name
385
423
  # The name of the operation resource.
386
424
  # @param [String] fields
@@ -25,7 +25,7 @@ module Google
25
25
  # @see https://cloud.google.com/remote-build-execution/docs/
26
26
  module RemotebuildexecutionV2
27
27
  VERSION = 'V2'
28
- REVISION = '20200721'
28
+ REVISION = '20200901'
29
29
 
30
30
  # View and manage your data across Google Cloud Platform services
31
31
  AUTH_CLOUD_PLATFORM = 'https://www.googleapis.com/auth/cloud-platform'
@@ -23,120 +23,107 @@ module Google
23
23
  module RemotebuildexecutionV2
24
24
 
25
25
  # An `Action` captures all the information about an execution which is required
26
- # to reproduce it.
27
- # `Action`s are the core component of the [Execution] service. A single
28
- # `Action` represents a repeatable action that can be performed by the
26
+ # to reproduce it. `Action`s are the core component of the [Execution] service.
27
+ # A single `Action` represents a repeatable action that can be performed by the
29
28
  # execution service. `Action`s can be succinctly identified by the digest of
30
29
  # their wire format encoding and, once an `Action` has been executed, will be
31
30
  # cached in the action cache. Future requests can then use the cached result
32
- # rather than needing to run afresh.
33
- # When a server completes execution of an
34
- # Action, it MAY choose to
35
- # cache the result in
36
- # the ActionCache unless
37
- # `do_not_cache` is `true`. Clients SHOULD expect the server to do so. By
38
- # default, future calls to
39
- # Execute the same
40
- # `Action` will also serve their results from the cache. Clients must take care
41
- # to understand the caching behaviour. Ideally, all `Action`s will be
42
- # reproducible so that serving a result from cache is always desirable and
43
- # correct.
31
+ # rather than needing to run afresh. When a server completes execution of an
32
+ # Action, it MAY choose to cache the result in the ActionCache unless `
33
+ # do_not_cache` is `true`. Clients SHOULD expect the server to do so. By default,
34
+ # future calls to Execute the same `Action` will also serve their results from
35
+ # the cache. Clients must take care to understand the caching behaviour. Ideally,
36
+ # all `Action`s will be reproducible so that serving a result from cache is
37
+ # always desirable and correct.
44
38
  class BuildBazelRemoteExecutionV2Action
45
39
  include Google::Apis::Core::Hashable
46
40
 
47
41
  # A content digest. A digest for a given blob consists of the size of the blob
48
- # and its hash. The hash algorithm to use is defined by the server.
49
- # The size is considered to be an integral part of the digest and cannot be
50
- # separated. That is, even if the `hash` field is correctly specified but
51
- # `size_bytes` is not, the server MUST reject the request.
52
- # The reason for including the size in the digest is as follows: in a great
53
- # many cases, the server needs to know the size of the blob it is about to work
54
- # with prior to starting an operation with it, such as flattening Merkle tree
55
- # structures or streaming it to a worker. Technically, the server could
56
- # implement a separate metadata store, but this results in a significantly more
57
- # complicated implementation as opposed to having the client specify the size
58
- # up-front (or storing the size along with the digest in every message where
59
- # digests are embedded). This does mean that the API leaks some implementation
60
- # details of (what we consider to be) a reasonable server implementation, but
61
- # we consider this to be a worthwhile tradeoff.
62
- # When a `Digest` is used to refer to a proto message, it always refers to the
63
- # message in binary encoded form. To ensure consistent hashing, clients and
64
- # servers MUST ensure that they serialize messages according to the following
65
- # rules, even if there are alternate valid encodings for the same message:
66
- # * Fields are serialized in tag order.
67
- # * There are no unknown fields.
68
- # * There are no duplicate fields.
69
- # * Fields are serialized according to the default semantics for their type.
70
- # Most protocol buffer implementations will always follow these rules when
71
- # serializing, but care should be taken to avoid shortcuts. For instance,
72
- # concatenating two messages to merge them may produce duplicate fields.
42
+ # and its hash. The hash algorithm to use is defined by the server. The size is
43
+ # considered to be an integral part of the digest and cannot be separated. That
44
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
45
+ # the server MUST reject the request. The reason for including the size in the
46
+ # digest is as follows: in a great many cases, the server needs to know the size
47
+ # of the blob it is about to work with prior to starting an operation with it,
48
+ # such as flattening Merkle tree structures or streaming it to a worker.
49
+ # Technically, the server could implement a separate metadata store, but this
50
+ # results in a significantly more complicated implementation as opposed to
51
+ # having the client specify the size up-front (or storing the size along with
52
+ # the digest in every message where digests are embedded). This does mean that
53
+ # the API leaks some implementation details of (what we consider to be) a
54
+ # reasonable server implementation, but we consider this to be a worthwhile
55
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
56
+ # refers to the message in binary encoded form. To ensure consistent hashing,
57
+ # clients and servers MUST ensure that they serialize messages according to the
58
+ # following rules, even if there are alternate valid encodings for the same
59
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
60
+ # There are no duplicate fields. * Fields are serialized according to the
61
+ # default semantics for their type. Most protocol buffer implementations will
62
+ # always follow these rules when serializing, but care should be taken to avoid
63
+ # shortcuts. For instance, concatenating two messages to merge them may produce
64
+ # duplicate fields.
73
65
  # Corresponds to the JSON property `commandDigest`
74
66
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Digest]
75
67
  attr_accessor :command_digest
76
68
 
77
- # If true, then the `Action`'s result cannot be cached, and in-flight
78
- # requests for the same `Action` may not be merged.
69
+ # If true, then the `Action`'s result cannot be cached, and in-flight requests
70
+ # for the same `Action` may not be merged.
79
71
  # Corresponds to the JSON property `doNotCache`
80
72
  # @return [Boolean]
81
73
  attr_accessor :do_not_cache
82
74
  alias_method :do_not_cache?, :do_not_cache
83
75
 
84
76
  # A content digest. A digest for a given blob consists of the size of the blob
85
- # and its hash. The hash algorithm to use is defined by the server.
86
- # The size is considered to be an integral part of the digest and cannot be
87
- # separated. That is, even if the `hash` field is correctly specified but
88
- # `size_bytes` is not, the server MUST reject the request.
89
- # The reason for including the size in the digest is as follows: in a great
90
- # many cases, the server needs to know the size of the blob it is about to work
91
- # with prior to starting an operation with it, such as flattening Merkle tree
92
- # structures or streaming it to a worker. Technically, the server could
93
- # implement a separate metadata store, but this results in a significantly more
94
- # complicated implementation as opposed to having the client specify the size
95
- # up-front (or storing the size along with the digest in every message where
96
- # digests are embedded). This does mean that the API leaks some implementation
97
- # details of (what we consider to be) a reasonable server implementation, but
98
- # we consider this to be a worthwhile tradeoff.
99
- # When a `Digest` is used to refer to a proto message, it always refers to the
100
- # message in binary encoded form. To ensure consistent hashing, clients and
101
- # servers MUST ensure that they serialize messages according to the following
102
- # rules, even if there are alternate valid encodings for the same message:
103
- # * Fields are serialized in tag order.
104
- # * There are no unknown fields.
105
- # * There are no duplicate fields.
106
- # * Fields are serialized according to the default semantics for their type.
107
- # Most protocol buffer implementations will always follow these rules when
108
- # serializing, but care should be taken to avoid shortcuts. For instance,
109
- # concatenating two messages to merge them may produce duplicate fields.
77
+ # and its hash. The hash algorithm to use is defined by the server. The size is
78
+ # considered to be an integral part of the digest and cannot be separated. That
79
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
80
+ # the server MUST reject the request. The reason for including the size in the
81
+ # digest is as follows: in a great many cases, the server needs to know the size
82
+ # of the blob it is about to work with prior to starting an operation with it,
83
+ # such as flattening Merkle tree structures or streaming it to a worker.
84
+ # Technically, the server could implement a separate metadata store, but this
85
+ # results in a significantly more complicated implementation as opposed to
86
+ # having the client specify the size up-front (or storing the size along with
87
+ # the digest in every message where digests are embedded). This does mean that
88
+ # the API leaks some implementation details of (what we consider to be) a
89
+ # reasonable server implementation, but we consider this to be a worthwhile
90
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
91
+ # refers to the message in binary encoded form. To ensure consistent hashing,
92
+ # clients and servers MUST ensure that they serialize messages according to the
93
+ # following rules, even if there are alternate valid encodings for the same
94
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
95
+ # There are no duplicate fields. * Fields are serialized according to the
96
+ # default semantics for their type. Most protocol buffer implementations will
97
+ # always follow these rules when serializing, but care should be taken to avoid
98
+ # shortcuts. For instance, concatenating two messages to merge them may produce
99
+ # duplicate fields.
110
100
  # Corresponds to the JSON property `inputRootDigest`
111
101
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Digest]
112
102
  attr_accessor :input_root_digest
113
103
 
114
- # List of required supported NodeProperty
115
- # keys. In order to ensure that equivalent `Action`s always hash to the same
116
- # value, the supported node properties MUST be lexicographically sorted by name.
117
- # Sorting of strings is done by code point, equivalently, by the UTF-8 bytes.
118
- # The interpretation of these properties is server-dependent. If a property is
119
- # not recognized by the server, the server will return an `INVALID_ARGUMENT`
120
- # error.
104
+ # List of required supported NodeProperty keys. In order to ensure that
105
+ # equivalent `Action`s always hash to the same value, the supported node
106
+ # properties MUST be lexicographically sorted by name. Sorting of strings is
107
+ # done by code point, equivalently, by the UTF-8 bytes. The interpretation of
108
+ # these properties is server-dependent. If a property is not recognized by the
109
+ # server, the server will return an `INVALID_ARGUMENT` error.
121
110
  # Corresponds to the JSON property `outputNodeProperties`
122
111
  # @return [Array<String>]
123
112
  attr_accessor :output_node_properties
124
113
 
125
- # A timeout after which the execution should be killed. If the timeout is
126
- # absent, then the client is specifying that the execution should continue
127
- # as long as the server will let it. The server SHOULD impose a timeout if
128
- # the client does not specify one, however, if the client does specify a
129
- # timeout that is longer than the server's maximum timeout, the server MUST
130
- # reject the request.
131
- # The timeout is a part of the
132
- # Action message, and
133
- # therefore two `Actions` with different timeouts are different, even if they
134
- # are otherwise identical. This is because, if they were not, running an
135
- # `Action` with a lower timeout than is required might result in a cache hit
136
- # from an execution run with a longer timeout, hiding the fact that the
137
- # timeout is too short. By encoding it directly in the `Action`, a lower
138
- # timeout will result in a cache miss and the execution timeout will fail
139
- # immediately, rather than whenever the cache entry gets evicted.
114
+ # A timeout after which the execution should be killed. If the timeout is absent,
115
+ # then the client is specifying that the execution should continue as long as
116
+ # the server will let it. The server SHOULD impose a timeout if the client does
117
+ # not specify one, however, if the client does specify a timeout that is longer
118
+ # than the server's maximum timeout, the server MUST reject the request. The
119
+ # timeout is a part of the Action message, and therefore two `Actions` with
120
+ # different timeouts are different, even if they are otherwise identical. This
121
+ # is because, if they were not, running an `Action` with a lower timeout than is
122
+ # required might result in a cache hit from an execution run with a longer
123
+ # timeout, hiding the fact that the timeout is too short. By encoding it
124
+ # directly in the `Action`, a lower timeout will result in a cache miss and the
125
+ # execution timeout will fail immediately, rather than whenever the cache entry
126
+ # gets evicted.
140
127
  # Corresponds to the JSON property `timeout`
141
128
  # @return [String]
142
129
  attr_accessor :timeout
@@ -175,8 +162,7 @@ module Google
175
162
  end
176
163
  end
177
164
 
178
- # An ActionResult represents the result of an
179
- # Action being run.
165
+ # An ActionResult represents the result of an Action being run.
180
166
  class BuildBazelRemoteExecutionV2ActionResult
181
167
  include Google::Apis::Core::Hashable
182
168
 
@@ -190,84 +176,41 @@ module Google
190
176
  # @return [Fixnum]
191
177
  attr_accessor :exit_code
192
178
 
193
- # The output directories of the action. For each output directory requested
194
- # in the `output_directories` or `output_paths` field of the Action, if the
179
+ # The output directories of the action. For each output directory requested in
180
+ # the `output_directories` or `output_paths` field of the Action, if the
195
181
  # corresponding directory existed after the action completed, a single entry
196
- # will be present in the output list, which will contain the digest of a
197
- # Tree message containing the
198
- # directory tree, and the path equal exactly to the corresponding Action
199
- # output_directories member.
200
- # As an example, suppose the Action had an output directory `a/b/dir` and the
201
- # execution produced the following contents in `a/b/dir`: a file named `bar`
202
- # and a directory named `foo` with an executable file named `baz`. Then,
203
- # output_directory will contain (hashes shortened for readability):
204
- # ```json
205
- # // OutputDirectory proto:
206
- # `
207
- # path: "a/b/dir"
208
- # tree_digest: `
209
- # hash: "4a73bc9d03...",
210
- # size: 55
211
- # `
212
- # `
213
- # // Tree proto with hash "4a73bc9d03..." and size 55:
214
- # `
215
- # root: `
216
- # files: [
217
- # `
218
- # name: "bar",
219
- # digest: `
220
- # hash: "4a73bc9d03...",
221
- # size: 65534
222
- # `
223
- # `
224
- # ],
225
- # directories: [
226
- # `
227
- # name: "foo",
228
- # digest: `
229
- # hash: "4cf2eda940...",
230
- # size: 43
231
- # `
232
- # `
233
- # ]
234
- # `
235
- # children : `
236
- # // (Directory proto with hash "4cf2eda940..." and size 43)
237
- # files: [
238
- # `
239
- # name: "baz",
240
- # digest: `
241
- # hash: "b2c941073e...",
242
- # size: 1294,
243
- # `,
244
- # is_executable: true
245
- # `
246
- # ]
247
- # `
248
- # `
249
- # ```
250
- # If an output of the same name as listed in `output_files` of
251
- # the Command was found in `output_directories`, but was not a directory, the
252
- # server will return a FAILED_PRECONDITION.
182
+ # will be present in the output list, which will contain the digest of a Tree
183
+ # message containing the directory tree, and the path equal exactly to the
184
+ # corresponding Action output_directories member. As an example, suppose the
185
+ # Action had an output directory `a/b/dir` and the execution produced the
186
+ # following contents in `a/b/dir`: a file named `bar` and a directory named `foo`
187
+ # with an executable file named `baz`. Then, output_directory will contain (
188
+ # hashes shortened for readability): ```json // OutputDirectory proto: ` path: "
189
+ # a/b/dir" tree_digest: ` hash: "4a73bc9d03...", size: 55 ` ` // Tree proto with
190
+ # hash "4a73bc9d03..." and size 55: ` root: ` files: [ ` name: "bar", digest: `
191
+ # hash: "4a73bc9d03...", size: 65534 ` ` ], directories: [ ` name: "foo", digest:
192
+ # ` hash: "4cf2eda940...", size: 43 ` ` ] ` children : ` // (Directory proto
193
+ # with hash "4cf2eda940..." and size 43) files: [ ` name: "baz", digest: ` hash:
194
+ # "b2c941073e...", size: 1294, `, is_executable: true ` ] ` ` ``` If an output
195
+ # of the same name as listed in `output_files` of the Command was found in `
196
+ # output_directories`, but was not a directory, the server will return a
197
+ # FAILED_PRECONDITION.
253
198
  # Corresponds to the JSON property `outputDirectories`
254
199
  # @return [Array<Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2OutputDirectory>]
255
200
  attr_accessor :output_directories
256
201
 
257
202
  # The output directories of the action that are symbolic links to other
258
203
  # directories. Those may be links to other output directories, or input
259
- # directories, or even absolute paths outside of the working directory,
260
- # if the server supports
261
- # SymlinkAbsolutePathStrategy.ALLOWED.
262
- # For each output directory requested in the `output_directories` field of
263
- # the Action, if the directory existed after the action completed, a
264
- # single entry will be present either in this field, or in the
265
- # `output_directories` field, if the directory was not a symbolic link.
266
- # If an output of the same name was found, but was a symbolic link to a file
267
- # instead of a directory, the server will return a FAILED_PRECONDITION.
268
- # If the action does not produce the requested output, then that output
269
- # will be omitted from the list. The server is free to arrange the output
270
- # list as desired; clients MUST NOT assume that the output list is sorted.
204
+ # directories, or even absolute paths outside of the working directory, if the
205
+ # server supports SymlinkAbsolutePathStrategy.ALLOWED. For each output directory
206
+ # requested in the `output_directories` field of the Action, if the directory
207
+ # existed after the action completed, a single entry will be present either in
208
+ # this field, or in the `output_directories` field, if the directory was not a
209
+ # symbolic link. If an output of the same name was found, but was a symbolic
210
+ # link to a file instead of a directory, the server will return a
211
+ # FAILED_PRECONDITION. If the action does not produce the requested output, then
212
+ # that output will be omitted from the list. The server is free to arrange the
213
+ # output list as desired; clients MUST NOT assume that the output list is sorted.
271
214
  # DEPRECATED as of v2.1. Servers that wish to be compatible with v2.0 API
272
215
  # should still populate this field in addition to `output_symlinks`.
273
216
  # Corresponds to the JSON property `outputDirectorySymlinks`
@@ -277,131 +220,119 @@ module Google
277
220
  # The output files of the action that are symbolic links to other files. Those
278
221
  # may be links to other output files, or input files, or even absolute paths
279
222
  # outside of the working directory, if the server supports
280
- # SymlinkAbsolutePathStrategy.ALLOWED.
281
- # For each output file requested in the `output_files` or `output_paths`
282
- # field of the Action, if the corresponding file existed after
283
- # the action completed, a single entry will be present either in this field,
284
- # or in the `output_files` field, if the file was not a symbolic link.
285
- # If an output symbolic link of the same name as listed in `output_files` of
286
- # the Command was found, but its target type was not a regular file, the
287
- # server will return a FAILED_PRECONDITION.
288
- # If the action does not produce the requested output, then that output
289
- # will be omitted from the list. The server is free to arrange the output
290
- # list as desired; clients MUST NOT assume that the output list is sorted.
291
- # DEPRECATED as of v2.1. Servers that wish to be compatible with v2.0 API
292
- # should still populate this field in addition to `output_symlinks`.
223
+ # SymlinkAbsolutePathStrategy.ALLOWED. For each output file requested in the `
224
+ # output_files` or `output_paths` field of the Action, if the corresponding file
225
+ # existed after the action completed, a single entry will be present either in
226
+ # this field, or in the `output_files` field, if the file was not a symbolic
227
+ # link. If an output symbolic link of the same name as listed in `output_files`
228
+ # of the Command was found, but its target type was not a regular file, the
229
+ # server will return a FAILED_PRECONDITION. If the action does not produce the
230
+ # requested output, then that output will be omitted from the list. The server
231
+ # is free to arrange the output list as desired; clients MUST NOT assume that
232
+ # the output list is sorted. DEPRECATED as of v2.1. Servers that wish to be
233
+ # compatible with v2.0 API should still populate this field in addition to `
234
+ # output_symlinks`.
293
235
  # Corresponds to the JSON property `outputFileSymlinks`
294
236
  # @return [Array<Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2OutputSymlink>]
295
237
  attr_accessor :output_file_symlinks
296
238
 
297
- # The output files of the action. For each output file requested in the
298
- # `output_files` or `output_paths` field of the Action, if the corresponding
299
- # file existed after the action completed, a single entry will be present
300
- # either in this field, or the `output_file_symlinks` field if the file was
301
- # a symbolic link to another file (`output_symlinks` field after v2.1).
302
- # If an output listed in `output_files` was found, but was a directory rather
303
- # than a regular file, the server will return a FAILED_PRECONDITION.
304
- # If the action does not produce the requested output, then that output
305
- # will be omitted from the list. The server is free to arrange the output
306
- # list as desired; clients MUST NOT assume that the output list is sorted.
239
+ # The output files of the action. For each output file requested in the `
240
+ # output_files` or `output_paths` field of the Action, if the corresponding file
241
+ # existed after the action completed, a single entry will be present either in
242
+ # this field, or the `output_file_symlinks` field if the file was a symbolic
243
+ # link to another file (`output_symlinks` field after v2.1). If an output listed
244
+ # in `output_files` was found, but was a directory rather than a regular file,
245
+ # the server will return a FAILED_PRECONDITION. If the action does not produce
246
+ # the requested output, then that output will be omitted from the list. The
247
+ # server is free to arrange the output list as desired; clients MUST NOT assume
248
+ # that the output list is sorted.
307
249
  # Corresponds to the JSON property `outputFiles`
308
250
  # @return [Array<Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2OutputFile>]
309
251
  attr_accessor :output_files
310
252
 
311
- # New in v2.1: this field will only be populated if the command
312
- # `output_paths` field was used, and not the pre v2.1 `output_files` or
313
- # `output_directories` fields.
314
- # The output paths of the action that are symbolic links to other paths. Those
315
- # may be links to other outputs, or inputs, or even absolute paths
316
- # outside of the working directory, if the server supports
317
- # SymlinkAbsolutePathStrategy.ALLOWED.
318
- # A single entry for each output requested in `output_paths`
319
- # field of the Action, if the corresponding path existed after
320
- # the action completed and was a symbolic link.
321
- # If the action does not produce a requested output, then that output
322
- # will be omitted from the list. The server is free to arrange the output
323
- # list as desired; clients MUST NOT assume that the output list is sorted.
253
+ # New in v2.1: this field will only be populated if the command `output_paths`
254
+ # field was used, and not the pre v2.1 `output_files` or `output_directories`
255
+ # fields. The output paths of the action that are symbolic links to other paths.
256
+ # Those may be links to other outputs, or inputs, or even absolute paths outside
257
+ # of the working directory, if the server supports SymlinkAbsolutePathStrategy.
258
+ # ALLOWED. A single entry for each output requested in `output_paths` field of
259
+ # the Action, if the corresponding path existed after the action completed and
260
+ # was a symbolic link. If the action does not produce a requested output, then
261
+ # that output will be omitted from the list. The server is free to arrange the
262
+ # output list as desired; clients MUST NOT assume that the output list is sorted.
324
263
  # Corresponds to the JSON property `outputSymlinks`
325
264
  # @return [Array<Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2OutputSymlink>]
326
265
  attr_accessor :output_symlinks
327
266
 
328
267
  # A content digest. A digest for a given blob consists of the size of the blob
329
- # and its hash. The hash algorithm to use is defined by the server.
330
- # The size is considered to be an integral part of the digest and cannot be
331
- # separated. That is, even if the `hash` field is correctly specified but
332
- # `size_bytes` is not, the server MUST reject the request.
333
- # The reason for including the size in the digest is as follows: in a great
334
- # many cases, the server needs to know the size of the blob it is about to work
335
- # with prior to starting an operation with it, such as flattening Merkle tree
336
- # structures or streaming it to a worker. Technically, the server could
337
- # implement a separate metadata store, but this results in a significantly more
338
- # complicated implementation as opposed to having the client specify the size
339
- # up-front (or storing the size along with the digest in every message where
340
- # digests are embedded). This does mean that the API leaks some implementation
341
- # details of (what we consider to be) a reasonable server implementation, but
342
- # we consider this to be a worthwhile tradeoff.
343
- # When a `Digest` is used to refer to a proto message, it always refers to the
344
- # message in binary encoded form. To ensure consistent hashing, clients and
345
- # servers MUST ensure that they serialize messages according to the following
346
- # rules, even if there are alternate valid encodings for the same message:
347
- # * Fields are serialized in tag order.
348
- # * There are no unknown fields.
349
- # * There are no duplicate fields.
350
- # * Fields are serialized according to the default semantics for their type.
351
- # Most protocol buffer implementations will always follow these rules when
352
- # serializing, but care should be taken to avoid shortcuts. For instance,
353
- # concatenating two messages to merge them may produce duplicate fields.
268
+ # and its hash. The hash algorithm to use is defined by the server. The size is
269
+ # considered to be an integral part of the digest and cannot be separated. That
270
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
271
+ # the server MUST reject the request. The reason for including the size in the
272
+ # digest is as follows: in a great many cases, the server needs to know the size
273
+ # of the blob it is about to work with prior to starting an operation with it,
274
+ # such as flattening Merkle tree structures or streaming it to a worker.
275
+ # Technically, the server could implement a separate metadata store, but this
276
+ # results in a significantly more complicated implementation as opposed to
277
+ # having the client specify the size up-front (or storing the size along with
278
+ # the digest in every message where digests are embedded). This does mean that
279
+ # the API leaks some implementation details of (what we consider to be) a
280
+ # reasonable server implementation, but we consider this to be a worthwhile
281
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
282
+ # refers to the message in binary encoded form. To ensure consistent hashing,
283
+ # clients and servers MUST ensure that they serialize messages according to the
284
+ # following rules, even if there are alternate valid encodings for the same
285
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
286
+ # There are no duplicate fields. * Fields are serialized according to the
287
+ # default semantics for their type. Most protocol buffer implementations will
288
+ # always follow these rules when serializing, but care should be taken to avoid
289
+ # shortcuts. For instance, concatenating two messages to merge them may produce
290
+ # duplicate fields.
354
291
  # Corresponds to the JSON property `stderrDigest`
355
292
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Digest]
356
293
  attr_accessor :stderr_digest
357
294
 
358
- # The standard error buffer of the action. The server SHOULD NOT inline
359
- # stderr unless requested by the client in the
360
- # GetActionResultRequest
361
- # message. The server MAY omit inlining, even if requested, and MUST do so if
362
- # inlining
363
- # would cause the response to exceed message size limits.
295
+ # The standard error buffer of the action. The server SHOULD NOT inline stderr
296
+ # unless requested by the client in the GetActionResultRequest message. The
297
+ # server MAY omit inlining, even if requested, and MUST do so if inlining would
298
+ # cause the response to exceed message size limits.
364
299
  # Corresponds to the JSON property `stderrRaw`
365
300
  # NOTE: Values are automatically base64 encoded/decoded in the client library.
366
301
  # @return [String]
367
302
  attr_accessor :stderr_raw
368
303
 
369
304
  # A content digest. A digest for a given blob consists of the size of the blob
370
- # and its hash. The hash algorithm to use is defined by the server.
371
- # The size is considered to be an integral part of the digest and cannot be
372
- # separated. That is, even if the `hash` field is correctly specified but
373
- # `size_bytes` is not, the server MUST reject the request.
374
- # The reason for including the size in the digest is as follows: in a great
375
- # many cases, the server needs to know the size of the blob it is about to work
376
- # with prior to starting an operation with it, such as flattening Merkle tree
377
- # structures or streaming it to a worker. Technically, the server could
378
- # implement a separate metadata store, but this results in a significantly more
379
- # complicated implementation as opposed to having the client specify the size
380
- # up-front (or storing the size along with the digest in every message where
381
- # digests are embedded). This does mean that the API leaks some implementation
382
- # details of (what we consider to be) a reasonable server implementation, but
383
- # we consider this to be a worthwhile tradeoff.
384
- # When a `Digest` is used to refer to a proto message, it always refers to the
385
- # message in binary encoded form. To ensure consistent hashing, clients and
386
- # servers MUST ensure that they serialize messages according to the following
387
- # rules, even if there are alternate valid encodings for the same message:
388
- # * Fields are serialized in tag order.
389
- # * There are no unknown fields.
390
- # * There are no duplicate fields.
391
- # * Fields are serialized according to the default semantics for their type.
392
- # Most protocol buffer implementations will always follow these rules when
393
- # serializing, but care should be taken to avoid shortcuts. For instance,
394
- # concatenating two messages to merge them may produce duplicate fields.
305
+ # and its hash. The hash algorithm to use is defined by the server. The size is
306
+ # considered to be an integral part of the digest and cannot be separated. That
307
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
308
+ # the server MUST reject the request. The reason for including the size in the
309
+ # digest is as follows: in a great many cases, the server needs to know the size
310
+ # of the blob it is about to work with prior to starting an operation with it,
311
+ # such as flattening Merkle tree structures or streaming it to a worker.
312
+ # Technically, the server could implement a separate metadata store, but this
313
+ # results in a significantly more complicated implementation as opposed to
314
+ # having the client specify the size up-front (or storing the size along with
315
+ # the digest in every message where digests are embedded). This does mean that
316
+ # the API leaks some implementation details of (what we consider to be) a
317
+ # reasonable server implementation, but we consider this to be a worthwhile
318
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
319
+ # refers to the message in binary encoded form. To ensure consistent hashing,
320
+ # clients and servers MUST ensure that they serialize messages according to the
321
+ # following rules, even if there are alternate valid encodings for the same
322
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
323
+ # There are no duplicate fields. * Fields are serialized according to the
324
+ # default semantics for their type. Most protocol buffer implementations will
325
+ # always follow these rules when serializing, but care should be taken to avoid
326
+ # shortcuts. For instance, concatenating two messages to merge them may produce
327
+ # duplicate fields.
395
328
  # Corresponds to the JSON property `stdoutDigest`
396
329
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Digest]
397
330
  attr_accessor :stdout_digest
398
331
 
399
- # The standard output buffer of the action. The server SHOULD NOT inline
400
- # stdout unless requested by the client in the
401
- # GetActionResultRequest
402
- # message. The server MAY omit inlining, even if requested, and MUST do so if
403
- # inlining
404
- # would cause the response to exceed message size limits.
332
+ # The standard output buffer of the action. The server SHOULD NOT inline stdout
333
+ # unless requested by the client in the GetActionResultRequest message. The
334
+ # server MAY omit inlining, even if requested, and MUST do so if inlining would
335
+ # cause the response to exceed message size limits.
405
336
  # Corresponds to the JSON property `stdoutRaw`
406
337
  # NOTE: Values are automatically base64 encoded/decoded in the client library.
407
338
  # @return [String]
@@ -427,8 +358,7 @@ module Google
427
358
  end
428
359
  end
429
360
 
430
- # A request message for
431
- # ContentAddressableStorage.BatchReadBlobs.
361
+ # A request message for ContentAddressableStorage.BatchReadBlobs.
432
362
  class BuildBazelRemoteExecutionV2BatchReadBlobsRequest
433
363
  include Google::Apis::Core::Hashable
434
364
 
@@ -447,8 +377,7 @@ module Google
447
377
  end
448
378
  end
449
379
 
450
- # A response message for
451
- # ContentAddressableStorage.BatchReadBlobs.
380
+ # A response message for ContentAddressableStorage.BatchReadBlobs.
452
381
  class BuildBazelRemoteExecutionV2BatchReadBlobsResponse
453
382
  include Google::Apis::Core::Hashable
454
383
 
@@ -478,41 +407,39 @@ module Google
478
407
  attr_accessor :data
479
408
 
480
409
  # A content digest. A digest for a given blob consists of the size of the blob
481
- # and its hash. The hash algorithm to use is defined by the server.
482
- # The size is considered to be an integral part of the digest and cannot be
483
- # separated. That is, even if the `hash` field is correctly specified but
484
- # `size_bytes` is not, the server MUST reject the request.
485
- # The reason for including the size in the digest is as follows: in a great
486
- # many cases, the server needs to know the size of the blob it is about to work
487
- # with prior to starting an operation with it, such as flattening Merkle tree
488
- # structures or streaming it to a worker. Technically, the server could
489
- # implement a separate metadata store, but this results in a significantly more
490
- # complicated implementation as opposed to having the client specify the size
491
- # up-front (or storing the size along with the digest in every message where
492
- # digests are embedded). This does mean that the API leaks some implementation
493
- # details of (what we consider to be) a reasonable server implementation, but
494
- # we consider this to be a worthwhile tradeoff.
495
- # When a `Digest` is used to refer to a proto message, it always refers to the
496
- # message in binary encoded form. To ensure consistent hashing, clients and
497
- # servers MUST ensure that they serialize messages according to the following
498
- # rules, even if there are alternate valid encodings for the same message:
499
- # * Fields are serialized in tag order.
500
- # * There are no unknown fields.
501
- # * There are no duplicate fields.
502
- # * Fields are serialized according to the default semantics for their type.
503
- # Most protocol buffer implementations will always follow these rules when
504
- # serializing, but care should be taken to avoid shortcuts. For instance,
505
- # concatenating two messages to merge them may produce duplicate fields.
410
+ # and its hash. The hash algorithm to use is defined by the server. The size is
411
+ # considered to be an integral part of the digest and cannot be separated. That
412
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
413
+ # the server MUST reject the request. The reason for including the size in the
414
+ # digest is as follows: in a great many cases, the server needs to know the size
415
+ # of the blob it is about to work with prior to starting an operation with it,
416
+ # such as flattening Merkle tree structures or streaming it to a worker.
417
+ # Technically, the server could implement a separate metadata store, but this
418
+ # results in a significantly more complicated implementation as opposed to
419
+ # having the client specify the size up-front (or storing the size along with
420
+ # the digest in every message where digests are embedded). This does mean that
421
+ # the API leaks some implementation details of (what we consider to be) a
422
+ # reasonable server implementation, but we consider this to be a worthwhile
423
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
424
+ # refers to the message in binary encoded form. To ensure consistent hashing,
425
+ # clients and servers MUST ensure that they serialize messages according to the
426
+ # following rules, even if there are alternate valid encodings for the same
427
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
428
+ # There are no duplicate fields. * Fields are serialized according to the
429
+ # default semantics for their type. Most protocol buffer implementations will
430
+ # always follow these rules when serializing, but care should be taken to avoid
431
+ # shortcuts. For instance, concatenating two messages to merge them may produce
432
+ # duplicate fields.
506
433
  # Corresponds to the JSON property `digest`
507
434
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Digest]
508
435
  attr_accessor :digest
509
436
 
510
- # The `Status` type defines a logical error model that is suitable for
511
- # different programming environments, including REST APIs and RPC APIs. It is
512
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
513
- # three pieces of data: error code, error message, and error details.
514
- # You can find out more about this error model and how to work with it in the
515
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
437
+ # The `Status` type defines a logical error model that is suitable for different
438
+ # programming environments, including REST APIs and RPC APIs. It is used by [
439
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
440
+ # data: error code, error message, and error details. You can find out more
441
+ # about this error model and how to work with it in the [API Design Guide](https:
442
+ # //cloud.google.com/apis/design/errors).
516
443
  # Corresponds to the JSON property `status`
517
444
  # @return [Google::Apis::RemotebuildexecutionV2::GoogleRpcStatus]
518
445
  attr_accessor :status
@@ -529,8 +456,7 @@ module Google
529
456
  end
530
457
  end
531
458
 
532
- # A request message for
533
- # ContentAddressableStorage.BatchUpdateBlobs.
459
+ # A request message for ContentAddressableStorage.BatchUpdateBlobs.
534
460
  class BuildBazelRemoteExecutionV2BatchUpdateBlobsRequest
535
461
  include Google::Apis::Core::Hashable
536
462
 
@@ -560,31 +486,29 @@ module Google
560
486
  attr_accessor :data
561
487
 
562
488
  # A content digest. A digest for a given blob consists of the size of the blob
563
- # and its hash. The hash algorithm to use is defined by the server.
564
- # The size is considered to be an integral part of the digest and cannot be
565
- # separated. That is, even if the `hash` field is correctly specified but
566
- # `size_bytes` is not, the server MUST reject the request.
567
- # The reason for including the size in the digest is as follows: in a great
568
- # many cases, the server needs to know the size of the blob it is about to work
569
- # with prior to starting an operation with it, such as flattening Merkle tree
570
- # structures or streaming it to a worker. Technically, the server could
571
- # implement a separate metadata store, but this results in a significantly more
572
- # complicated implementation as opposed to having the client specify the size
573
- # up-front (or storing the size along with the digest in every message where
574
- # digests are embedded). This does mean that the API leaks some implementation
575
- # details of (what we consider to be) a reasonable server implementation, but
576
- # we consider this to be a worthwhile tradeoff.
577
- # When a `Digest` is used to refer to a proto message, it always refers to the
578
- # message in binary encoded form. To ensure consistent hashing, clients and
579
- # servers MUST ensure that they serialize messages according to the following
580
- # rules, even if there are alternate valid encodings for the same message:
581
- # * Fields are serialized in tag order.
582
- # * There are no unknown fields.
583
- # * There are no duplicate fields.
584
- # * Fields are serialized according to the default semantics for their type.
585
- # Most protocol buffer implementations will always follow these rules when
586
- # serializing, but care should be taken to avoid shortcuts. For instance,
587
- # concatenating two messages to merge them may produce duplicate fields.
489
+ # and its hash. The hash algorithm to use is defined by the server. The size is
490
+ # considered to be an integral part of the digest and cannot be separated. That
491
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
492
+ # the server MUST reject the request. The reason for including the size in the
493
+ # digest is as follows: in a great many cases, the server needs to know the size
494
+ # of the blob it is about to work with prior to starting an operation with it,
495
+ # such as flattening Merkle tree structures or streaming it to a worker.
496
+ # Technically, the server could implement a separate metadata store, but this
497
+ # results in a significantly more complicated implementation as opposed to
498
+ # having the client specify the size up-front (or storing the size along with
499
+ # the digest in every message where digests are embedded). This does mean that
500
+ # the API leaks some implementation details of (what we consider to be) a
501
+ # reasonable server implementation, but we consider this to be a worthwhile
502
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
503
+ # refers to the message in binary encoded form. To ensure consistent hashing,
504
+ # clients and servers MUST ensure that they serialize messages according to the
505
+ # following rules, even if there are alternate valid encodings for the same
506
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
507
+ # There are no duplicate fields. * Fields are serialized according to the
508
+ # default semantics for their type. Most protocol buffer implementations will
509
+ # always follow these rules when serializing, but care should be taken to avoid
510
+ # shortcuts. For instance, concatenating two messages to merge them may produce
511
+ # duplicate fields.
588
512
  # Corresponds to the JSON property `digest`
589
513
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Digest]
590
514
  attr_accessor :digest
@@ -600,8 +524,7 @@ module Google
600
524
  end
601
525
  end
602
526
 
603
- # A response message for
604
- # ContentAddressableStorage.BatchUpdateBlobs.
527
+ # A response message for ContentAddressableStorage.BatchUpdateBlobs.
605
528
  class BuildBazelRemoteExecutionV2BatchUpdateBlobsResponse
606
529
  include Google::Apis::Core::Hashable
607
530
 
@@ -625,41 +548,39 @@ module Google
625
548
  include Google::Apis::Core::Hashable
626
549
 
627
550
  # A content digest. A digest for a given blob consists of the size of the blob
628
- # and its hash. The hash algorithm to use is defined by the server.
629
- # The size is considered to be an integral part of the digest and cannot be
630
- # separated. That is, even if the `hash` field is correctly specified but
631
- # `size_bytes` is not, the server MUST reject the request.
632
- # The reason for including the size in the digest is as follows: in a great
633
- # many cases, the server needs to know the size of the blob it is about to work
634
- # with prior to starting an operation with it, such as flattening Merkle tree
635
- # structures or streaming it to a worker. Technically, the server could
636
- # implement a separate metadata store, but this results in a significantly more
637
- # complicated implementation as opposed to having the client specify the size
638
- # up-front (or storing the size along with the digest in every message where
639
- # digests are embedded). This does mean that the API leaks some implementation
640
- # details of (what we consider to be) a reasonable server implementation, but
641
- # we consider this to be a worthwhile tradeoff.
642
- # When a `Digest` is used to refer to a proto message, it always refers to the
643
- # message in binary encoded form. To ensure consistent hashing, clients and
644
- # servers MUST ensure that they serialize messages according to the following
645
- # rules, even if there are alternate valid encodings for the same message:
646
- # * Fields are serialized in tag order.
647
- # * There are no unknown fields.
648
- # * There are no duplicate fields.
649
- # * Fields are serialized according to the default semantics for their type.
650
- # Most protocol buffer implementations will always follow these rules when
651
- # serializing, but care should be taken to avoid shortcuts. For instance,
652
- # concatenating two messages to merge them may produce duplicate fields.
551
+ # and its hash. The hash algorithm to use is defined by the server. The size is
552
+ # considered to be an integral part of the digest and cannot be separated. That
553
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
554
+ # the server MUST reject the request. The reason for including the size in the
555
+ # digest is as follows: in a great many cases, the server needs to know the size
556
+ # of the blob it is about to work with prior to starting an operation with it,
557
+ # such as flattening Merkle tree structures or streaming it to a worker.
558
+ # Technically, the server could implement a separate metadata store, but this
559
+ # results in a significantly more complicated implementation as opposed to
560
+ # having the client specify the size up-front (or storing the size along with
561
+ # the digest in every message where digests are embedded). This does mean that
562
+ # the API leaks some implementation details of (what we consider to be) a
563
+ # reasonable server implementation, but we consider this to be a worthwhile
564
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
565
+ # refers to the message in binary encoded form. To ensure consistent hashing,
566
+ # clients and servers MUST ensure that they serialize messages according to the
567
+ # following rules, even if there are alternate valid encodings for the same
568
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
569
+ # There are no duplicate fields. * Fields are serialized according to the
570
+ # default semantics for their type. Most protocol buffer implementations will
571
+ # always follow these rules when serializing, but care should be taken to avoid
572
+ # shortcuts. For instance, concatenating two messages to merge them may produce
573
+ # duplicate fields.
653
574
  # Corresponds to the JSON property `digest`
654
575
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Digest]
655
576
  attr_accessor :digest
656
577
 
657
- # The `Status` type defines a logical error model that is suitable for
658
- # different programming environments, including REST APIs and RPC APIs. It is
659
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
660
- # three pieces of data: error code, error message, and error details.
661
- # You can find out more about this error model and how to work with it in the
662
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
578
+ # The `Status` type defines a logical error model that is suitable for different
579
+ # programming environments, including REST APIs and RPC APIs. It is used by [
580
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
581
+ # data: error code, error message, and error details. You can find out more
582
+ # about this error model and how to work with it in the [API Design Guide](https:
583
+ # //cloud.google.com/apis/design/errors).
663
584
  # Corresponds to the JSON property `status`
664
585
  # @return [Google::Apis::RemotebuildexecutionV2::GoogleRpcStatus]
665
586
  attr_accessor :status
@@ -684,23 +605,21 @@ module Google
684
605
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2ActionCacheUpdateCapabilities]
685
606
  attr_accessor :action_cache_update_capabilities
686
607
 
687
- # Allowed values for priority in
688
- # ResultsCachePolicy
689
- # Used for querying both cache and execution valid priority ranges.
608
+ # Allowed values for priority in ResultsCachePolicy Used for querying both cache
609
+ # and execution valid priority ranges.
690
610
  # Corresponds to the JSON property `cachePriorityCapabilities`
691
611
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2PriorityCapabilities]
692
612
  attr_accessor :cache_priority_capabilities
693
613
 
694
- # All the digest functions supported by the remote cache.
695
- # Remote cache may support multiple digest functions simultaneously.
614
+ # All the digest functions supported by the remote cache. Remote cache may
615
+ # support multiple digest functions simultaneously.
696
616
  # Corresponds to the JSON property `digestFunction`
697
617
  # @return [Array<String>]
698
618
  attr_accessor :digest_function
699
619
 
700
- # Maximum total size of blobs to be uploaded/downloaded using
701
- # batch methods. A value of 0 means no limit is set, although
702
- # in practice there will always be a message size limitation
703
- # of the protocol in use, e.g. GRPC.
620
+ # Maximum total size of blobs to be uploaded/downloaded using batch methods. A
621
+ # value of 0 means no limit is set, although in practice there will always be a
622
+ # message size limitation of the protocol in use, e.g. GRPC.
704
623
  # Corresponds to the JSON property `maxBatchTotalSizeBytes`
705
624
  # @return [Fixnum]
706
625
  attr_accessor :max_batch_total_size_bytes
@@ -724,12 +643,11 @@ module Google
724
643
  end
725
644
  end
726
645
 
727
- # A `Command` is the actual command executed by a worker running an
728
- # Action and specifications of its
729
- # environment.
730
- # Except as otherwise required, the environment (such as which system
731
- # libraries or binaries are available, and what filesystems are mounted where)
732
- # is defined by and specific to the implementation of the remote execution API.
646
+ # A `Command` is the actual command executed by a worker running an Action and
647
+ # specifications of its environment. Except as otherwise required, the
648
+ # environment (such as which system libraries or binaries are available, and
649
+ # what filesystems are mounted where) is defined by and specific to the
650
+ # implementation of the remote execution API.
733
651
  class BuildBazelRemoteExecutionV2Command
734
652
  include Google::Apis::Core::Hashable
735
653
 
@@ -742,105 +660,90 @@ module Google
742
660
 
743
661
  # The environment variables to set when running the program. The worker may
744
662
  # provide its own default environment variables; these defaults can be
745
- # overridden using this field. Additional variables can also be specified.
746
- # In order to ensure that equivalent
747
- # Commands always hash to the same
748
- # value, the environment variables MUST be lexicographically sorted by name.
749
- # Sorting of strings is done by code point, equivalently, by the UTF-8 bytes.
663
+ # overridden using this field. Additional variables can also be specified. In
664
+ # order to ensure that equivalent Commands always hash to the same value, the
665
+ # environment variables MUST be lexicographically sorted by name. Sorting of
666
+ # strings is done by code point, equivalently, by the UTF-8 bytes.
750
667
  # Corresponds to the JSON property `environmentVariables`
751
668
  # @return [Array<Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2CommandEnvironmentVariable>]
752
669
  attr_accessor :environment_variables
753
670
 
754
- # A list of the output directories that the client expects to retrieve from
755
- # the action. Only the listed directories will be returned (an entire
756
- # directory structure will be returned as a
757
- # Tree message digest, see
758
- # OutputDirectory), as
759
- # well as files listed in `output_files`. Other files or directories that
760
- # may be created during command execution are discarded.
761
- # The paths are relative to the working directory of the action execution.
762
- # The paths are specified using a single forward slash (`/`) as a path
763
- # separator, even if the execution platform natively uses a different
764
- # separator. The path MUST NOT include a trailing slash, nor a leading slash,
765
- # being a relative path. The special value of empty string is allowed,
766
- # although not recommended, and can be used to capture the entire working
767
- # directory tree, including inputs.
768
- # In order to ensure consistent hashing of the same Action, the output paths
769
- # MUST be sorted lexicographically by code point (or, equivalently, by UTF-8
770
- # bytes).
771
- # An output directory cannot be duplicated or have the same path as any of
772
- # the listed output files. An output directory is allowed to be a parent of
773
- # another output directory.
671
+ # A list of the output directories that the client expects to retrieve from the
672
+ # action. Only the listed directories will be returned (an entire directory
673
+ # structure will be returned as a Tree message digest, see OutputDirectory), as
674
+ # well as files listed in `output_files`. Other files or directories that may be
675
+ # created during command execution are discarded. The paths are relative to the
676
+ # working directory of the action execution. The paths are specified using a
677
+ # single forward slash (`/`) as a path separator, even if the execution platform
678
+ # natively uses a different separator. The path MUST NOT include a trailing
679
+ # slash, nor a leading slash, being a relative path. The special value of empty
680
+ # string is allowed, although not recommended, and can be used to capture the
681
+ # entire working directory tree, including inputs. In order to ensure consistent
682
+ # hashing of the same Action, the output paths MUST be sorted lexicographically
683
+ # by code point (or, equivalently, by UTF-8 bytes). An output directory cannot
684
+ # be duplicated or have the same path as any of the listed output files. An
685
+ # output directory is allowed to be a parent of another output directory.
774
686
  # Directories leading up to the output directories (but not the output
775
- # directories themselves) are created by the worker prior to execution, even
776
- # if they are not explicitly part of the input root.
777
- # DEPRECATED since 2.1: Use `output_paths` instead.
687
+ # directories themselves) are created by the worker prior to execution, even if
688
+ # they are not explicitly part of the input root. DEPRECATED since 2.1: Use `
689
+ # output_paths` instead.
778
690
  # Corresponds to the JSON property `outputDirectories`
779
691
  # @return [Array<String>]
780
692
  attr_accessor :output_directories
781
693
 
782
- # A list of the output files that the client expects to retrieve from the
783
- # action. Only the listed files, as well as directories listed in
784
- # `output_directories`, will be returned to the client as output.
785
- # Other files or directories that may be created during command execution
786
- # are discarded.
787
- # The paths are relative to the working directory of the action execution.
788
- # The paths are specified using a single forward slash (`/`) as a path
789
- # separator, even if the execution platform natively uses a different
790
- # separator. The path MUST NOT include a trailing slash, nor a leading slash,
791
- # being a relative path.
792
- # In order to ensure consistent hashing of the same Action, the output paths
793
- # MUST be sorted lexicographically by code point (or, equivalently, by UTF-8
794
- # bytes).
795
- # An output file cannot be duplicated, be a parent of another output file, or
796
- # have the same path as any of the listed output directories.
797
- # Directories leading up to the output files are created by the worker prior
798
- # to execution, even if they are not explicitly part of the input root.
799
- # DEPRECATED since v2.1: Use `output_paths` instead.
694
+ # A list of the output files that the client expects to retrieve from the action.
695
+ # Only the listed files, as well as directories listed in `output_directories`,
696
+ # will be returned to the client as output. Other files or directories that may
697
+ # be created during command execution are discarded. The paths are relative to
698
+ # the working directory of the action execution. The paths are specified using a
699
+ # single forward slash (`/`) as a path separator, even if the execution platform
700
+ # natively uses a different separator. The path MUST NOT include a trailing
701
+ # slash, nor a leading slash, being a relative path. In order to ensure
702
+ # consistent hashing of the same Action, the output paths MUST be sorted
703
+ # lexicographically by code point (or, equivalently, by UTF-8 bytes). An output
704
+ # file cannot be duplicated, be a parent of another output file, or have the
705
+ # same path as any of the listed output directories. Directories leading up to
706
+ # the output files are created by the worker prior to execution, even if they
707
+ # are not explicitly part of the input root. DEPRECATED since v2.1: Use `
708
+ # output_paths` instead.
800
709
  # Corresponds to the JSON property `outputFiles`
801
710
  # @return [Array<String>]
802
711
  attr_accessor :output_files
803
712
 
804
- # A list of the output paths that the client expects to retrieve from the
805
- # action. Only the listed paths will be returned to the client as output.
806
- # The type of the output (file or directory) is not specified, and will be
807
- # determined by the server after action execution. If the resulting path is
808
- # a file, it will be returned in an
809
- # OutputFile) typed field.
810
- # If the path is a directory, the entire directory structure will be returned
811
- # as a Tree message digest, see
812
- # OutputDirectory)
813
- # Other files or directories that may be created during command execution
814
- # are discarded.
815
- # The paths are relative to the working directory of the action execution.
816
- # The paths are specified using a single forward slash (`/`) as a path
817
- # separator, even if the execution platform natively uses a different
818
- # separator. The path MUST NOT include a trailing slash, nor a leading slash,
819
- # being a relative path.
820
- # In order to ensure consistent hashing of the same Action, the output paths
821
- # MUST be deduplicated and sorted lexicographically by code point (or,
822
- # equivalently, by UTF-8 bytes).
823
- # Directories leading up to the output paths are created by the worker prior
824
- # to execution, even if they are not explicitly part of the input root.
825
- # New in v2.1: this field supersedes the DEPRECATED `output_files` and
826
- # `output_directories` fields. If `output_paths` is used, `output_files` and
827
- # `output_directories` will be ignored!
713
+ # A list of the output paths that the client expects to retrieve from the action.
714
+ # Only the listed paths will be returned to the client as output. The type of
715
+ # the output (file or directory) is not specified, and will be determined by the
716
+ # server after action execution. If the resulting path is a file, it will be
717
+ # returned in an OutputFile) typed field. If the path is a directory, the entire
718
+ # directory structure will be returned as a Tree message digest, see
719
+ # OutputDirectory) Other files or directories that may be created during command
720
+ # execution are discarded. The paths are relative to the working directory of
721
+ # the action execution. The paths are specified using a single forward slash (`/`
722
+ # ) as a path separator, even if the execution platform natively uses a
723
+ # different separator. The path MUST NOT include a trailing slash, nor a leading
724
+ # slash, being a relative path. In order to ensure consistent hashing of the
725
+ # same Action, the output paths MUST be deduplicated and sorted
726
+ # lexicographically by code point (or, equivalently, by UTF-8 bytes).
727
+ # Directories leading up to the output paths are created by the worker prior to
728
+ # execution, even if they are not explicitly part of the input root. New in v2.1:
729
+ # this field supersedes the DEPRECATED `output_files` and `output_directories`
730
+ # fields. If `output_paths` is used, `output_files` and `output_directories`
731
+ # will be ignored!
828
732
  # Corresponds to the JSON property `outputPaths`
829
733
  # @return [Array<String>]
830
734
  attr_accessor :output_paths
831
735
 
832
736
  # A `Platform` is a set of requirements, such as hardware, operating system, or
833
- # compiler toolchain, for an
834
- # Action's execution
835
- # environment. A `Platform` is represented as a series of key-value pairs
836
- # representing the properties that are required of the platform.
737
+ # compiler toolchain, for an Action's execution environment. A `Platform` is
738
+ # represented as a series of key-value pairs representing the properties that
739
+ # are required of the platform.
837
740
  # Corresponds to the JSON property `platform`
838
741
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Platform]
839
742
  attr_accessor :platform
840
743
 
841
- # The working directory, relative to the input root, for the command to run
842
- # in. It must be a directory which exists in the input tree. If it is left
843
- # empty, then the action is run in the input root.
744
+ # The working directory, relative to the input root, for the command to run in.
745
+ # It must be a directory which exists in the input tree. If it is left empty,
746
+ # then the action is run in the input root.
844
747
  # Corresponds to the JSON property `workingDirectory`
845
748
  # @return [String]
846
749
  attr_accessor :working_directory
@@ -888,31 +791,29 @@ module Google
888
791
  end
889
792
 
890
793
  # A content digest. A digest for a given blob consists of the size of the blob
891
- # and its hash. The hash algorithm to use is defined by the server.
892
- # The size is considered to be an integral part of the digest and cannot be
893
- # separated. That is, even if the `hash` field is correctly specified but
894
- # `size_bytes` is not, the server MUST reject the request.
895
- # The reason for including the size in the digest is as follows: in a great
896
- # many cases, the server needs to know the size of the blob it is about to work
897
- # with prior to starting an operation with it, such as flattening Merkle tree
898
- # structures or streaming it to a worker. Technically, the server could
899
- # implement a separate metadata store, but this results in a significantly more
900
- # complicated implementation as opposed to having the client specify the size
901
- # up-front (or storing the size along with the digest in every message where
902
- # digests are embedded). This does mean that the API leaks some implementation
903
- # details of (what we consider to be) a reasonable server implementation, but
904
- # we consider this to be a worthwhile tradeoff.
905
- # When a `Digest` is used to refer to a proto message, it always refers to the
906
- # message in binary encoded form. To ensure consistent hashing, clients and
907
- # servers MUST ensure that they serialize messages according to the following
908
- # rules, even if there are alternate valid encodings for the same message:
909
- # * Fields are serialized in tag order.
910
- # * There are no unknown fields.
911
- # * There are no duplicate fields.
912
- # * Fields are serialized according to the default semantics for their type.
913
- # Most protocol buffer implementations will always follow these rules when
914
- # serializing, but care should be taken to avoid shortcuts. For instance,
915
- # concatenating two messages to merge them may produce duplicate fields.
794
+ # and its hash. The hash algorithm to use is defined by the server. The size is
795
+ # considered to be an integral part of the digest and cannot be separated. That
796
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
797
+ # the server MUST reject the request. The reason for including the size in the
798
+ # digest is as follows: in a great many cases, the server needs to know the size
799
+ # of the blob it is about to work with prior to starting an operation with it,
800
+ # such as flattening Merkle tree structures or streaming it to a worker.
801
+ # Technically, the server could implement a separate metadata store, but this
802
+ # results in a significantly more complicated implementation as opposed to
803
+ # having the client specify the size up-front (or storing the size along with
804
+ # the digest in every message where digests are embedded). This does mean that
805
+ # the API leaks some implementation details of (what we consider to be) a
806
+ # reasonable server implementation, but we consider this to be a worthwhile
807
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
808
+ # refers to the message in binary encoded form. To ensure consistent hashing,
809
+ # clients and servers MUST ensure that they serialize messages according to the
810
+ # following rules, even if there are alternate valid encodings for the same
811
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
812
+ # There are no duplicate fields. * Fields are serialized according to the
813
+ # default semantics for their type. Most protocol buffer implementations will
814
+ # always follow these rules when serializing, but care should be taken to avoid
815
+ # shortcuts. For instance, concatenating two messages to merge them may produce
816
+ # duplicate fields.
916
817
  class BuildBazelRemoteExecutionV2Digest
917
818
  include Google::Apis::Core::Hashable
918
819
 
@@ -939,75 +840,31 @@ module Google
939
840
  end
940
841
 
941
842
  # A `Directory` represents a directory node in a file tree, containing zero or
942
- # more children FileNodes,
943
- # DirectoryNodes and
944
- # SymlinkNodes.
945
- # Each `Node` contains its name in the directory, either the digest of its
946
- # content (either a file blob or a `Directory` proto) or a symlink target, as
947
- # well as possibly some metadata about the file or directory.
948
- # In order to ensure that two equivalent directory trees hash to the same
949
- # value, the following restrictions MUST be obeyed when constructing a
950
- # a `Directory`:
951
- # * Every child in the directory must have a path of exactly one segment.
952
- # Multiple levels of directory hierarchy may not be collapsed.
953
- # * Each child in the directory must have a unique path segment (file name).
954
- # Note that while the API itself is case-sensitive, the environment where
955
- # the Action is executed may or may not be case-sensitive. That is, it is
956
- # legal to call the API with a Directory that has both "Foo" and "foo" as
957
- # children, but the Action may be rejected by the remote system upon
958
- # execution.
959
- # * The files, directories and symlinks in the directory must each be sorted
960
- # in lexicographical order by path. The path strings must be sorted by code
961
- # point, equivalently, by UTF-8 bytes.
962
- # * The NodeProperties of files,
963
- # directories, and symlinks must be sorted in lexicographical order by
964
- # property name.
965
- # A `Directory` that obeys the restrictions is said to be in canonical form.
966
- # As an example, the following could be used for a file named `bar` and a
843
+ # more children FileNodes, DirectoryNodes and SymlinkNodes. Each `Node` contains
844
+ # its name in the directory, either the digest of its content (either a file
845
+ # blob or a `Directory` proto) or a symlink target, as well as possibly some
846
+ # metadata about the file or directory. In order to ensure that two equivalent
847
+ # directory trees hash to the same value, the following restrictions MUST be
848
+ # obeyed when constructing a a `Directory`: * Every child in the directory must
849
+ # have a path of exactly one segment. Multiple levels of directory hierarchy may
850
+ # not be collapsed. * Each child in the directory must have a unique path
851
+ # segment (file name). Note that while the API itself is case-sensitive, the
852
+ # environment where the Action is executed may or may not be case-sensitive.
853
+ # That is, it is legal to call the API with a Directory that has both "Foo" and "
854
+ # foo" as children, but the Action may be rejected by the remote system upon
855
+ # execution. * The files, directories and symlinks in the directory must each be
856
+ # sorted in lexicographical order by path. The path strings must be sorted by
857
+ # code point, equivalently, by UTF-8 bytes. * The NodeProperties of files,
858
+ # directories, and symlinks must be sorted in lexicographical order by property
859
+ # name. A `Directory` that obeys the restrictions is said to be in canonical
860
+ # form. As an example, the following could be used for a file named `bar` and a
967
861
  # directory named `foo` with an executable file named `baz` (hashes shortened
968
- # for readability):
969
- # ```json
970
- # // (Directory proto)
971
- # `
972
- # files: [
973
- # `
974
- # name: "bar",
975
- # digest: `
976
- # hash: "4a73bc9d03...",
977
- # size: 65534
978
- # `,
979
- # node_properties: [
980
- # `
981
- # "name": "MTime",
982
- # "value": "2017-01-15T01:30:15.01Z"
983
- # `
984
- # ]
985
- # `
986
- # ],
987
- # directories: [
988
- # `
989
- # name: "foo",
990
- # digest: `
991
- # hash: "4cf2eda940...",
992
- # size: 43
993
- # `
994
- # `
995
- # ]
996
- # `
997
- # // (Directory proto with hash "4cf2eda940..." and size 43)
998
- # `
999
- # files: [
1000
- # `
1001
- # name: "baz",
1002
- # digest: `
1003
- # hash: "b2c941073e...",
1004
- # size: 1294,
1005
- # `,
1006
- # is_executable: true
1007
- # `
1008
- # ]
1009
- # `
1010
- # ```
862
+ # for readability): ```json // (Directory proto) ` files: [ ` name: "bar",
863
+ # digest: ` hash: "4a73bc9d03...", size: 65534 `, node_properties: [ ` "name": "
864
+ # MTime", "value": "2017-01-15T01:30:15.01Z" ` ] ` ], directories: [ ` name: "
865
+ # foo", digest: ` hash: "4cf2eda940...", size: 43 ` ` ] ` // (Directory proto
866
+ # with hash "4cf2eda940..." and size 43) ` files: [ ` name: "baz", digest: `
867
+ # hash: "b2c941073e...", size: 1294, `, is_executable: true ` ] ` ```
1011
868
  class BuildBazelRemoteExecutionV2Directory
1012
869
  include Google::Apis::Core::Hashable
1013
870
 
@@ -1044,38 +901,35 @@ module Google
1044
901
  end
1045
902
  end
1046
903
 
1047
- # A `DirectoryNode` represents a child of a
1048
- # Directory which is itself
1049
- # a `Directory` and its associated metadata.
904
+ # A `DirectoryNode` represents a child of a Directory which is itself a `
905
+ # Directory` and its associated metadata.
1050
906
  class BuildBazelRemoteExecutionV2DirectoryNode
1051
907
  include Google::Apis::Core::Hashable
1052
908
 
1053
909
  # A content digest. A digest for a given blob consists of the size of the blob
1054
- # and its hash. The hash algorithm to use is defined by the server.
1055
- # The size is considered to be an integral part of the digest and cannot be
1056
- # separated. That is, even if the `hash` field is correctly specified but
1057
- # `size_bytes` is not, the server MUST reject the request.
1058
- # The reason for including the size in the digest is as follows: in a great
1059
- # many cases, the server needs to know the size of the blob it is about to work
1060
- # with prior to starting an operation with it, such as flattening Merkle tree
1061
- # structures or streaming it to a worker. Technically, the server could
1062
- # implement a separate metadata store, but this results in a significantly more
1063
- # complicated implementation as opposed to having the client specify the size
1064
- # up-front (or storing the size along with the digest in every message where
1065
- # digests are embedded). This does mean that the API leaks some implementation
1066
- # details of (what we consider to be) a reasonable server implementation, but
1067
- # we consider this to be a worthwhile tradeoff.
1068
- # When a `Digest` is used to refer to a proto message, it always refers to the
1069
- # message in binary encoded form. To ensure consistent hashing, clients and
1070
- # servers MUST ensure that they serialize messages according to the following
1071
- # rules, even if there are alternate valid encodings for the same message:
1072
- # * Fields are serialized in tag order.
1073
- # * There are no unknown fields.
1074
- # * There are no duplicate fields.
1075
- # * Fields are serialized according to the default semantics for their type.
1076
- # Most protocol buffer implementations will always follow these rules when
1077
- # serializing, but care should be taken to avoid shortcuts. For instance,
1078
- # concatenating two messages to merge them may produce duplicate fields.
910
+ # and its hash. The hash algorithm to use is defined by the server. The size is
911
+ # considered to be an integral part of the digest and cannot be separated. That
912
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
913
+ # the server MUST reject the request. The reason for including the size in the
914
+ # digest is as follows: in a great many cases, the server needs to know the size
915
+ # of the blob it is about to work with prior to starting an operation with it,
916
+ # such as flattening Merkle tree structures or streaming it to a worker.
917
+ # Technically, the server could implement a separate metadata store, but this
918
+ # results in a significantly more complicated implementation as opposed to
919
+ # having the client specify the size up-front (or storing the size along with
920
+ # the digest in every message where digests are embedded). This does mean that
921
+ # the API leaks some implementation details of (what we consider to be) a
922
+ # reasonable server implementation, but we consider this to be a worthwhile
923
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
924
+ # refers to the message in binary encoded form. To ensure consistent hashing,
925
+ # clients and servers MUST ensure that they serialize messages according to the
926
+ # following rules, even if there are alternate valid encodings for the same
927
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
928
+ # There are no duplicate fields. * Fields are serialized according to the
929
+ # default semantics for their type. Most protocol buffer implementations will
930
+ # always follow these rules when serializing, but care should be taken to avoid
931
+ # shortcuts. For instance, concatenating two messages to merge them may produce
932
+ # duplicate fields.
1079
933
  # Corresponds to the JSON property `digest`
1080
934
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Digest]
1081
935
  attr_accessor :digest
@@ -1096,40 +950,35 @@ module Google
1096
950
  end
1097
951
  end
1098
952
 
1099
- # Metadata about an ongoing
1100
- # execution, which
1101
- # will be contained in the metadata
1102
- # field of the
1103
- # Operation.
953
+ # Metadata about an ongoing execution, which will be contained in the metadata
954
+ # field of the Operation.
1104
955
  class BuildBazelRemoteExecutionV2ExecuteOperationMetadata
1105
956
  include Google::Apis::Core::Hashable
1106
957
 
1107
958
  # A content digest. A digest for a given blob consists of the size of the blob
1108
- # and its hash. The hash algorithm to use is defined by the server.
1109
- # The size is considered to be an integral part of the digest and cannot be
1110
- # separated. That is, even if the `hash` field is correctly specified but
1111
- # `size_bytes` is not, the server MUST reject the request.
1112
- # The reason for including the size in the digest is as follows: in a great
1113
- # many cases, the server needs to know the size of the blob it is about to work
1114
- # with prior to starting an operation with it, such as flattening Merkle tree
1115
- # structures or streaming it to a worker. Technically, the server could
1116
- # implement a separate metadata store, but this results in a significantly more
1117
- # complicated implementation as opposed to having the client specify the size
1118
- # up-front (or storing the size along with the digest in every message where
1119
- # digests are embedded). This does mean that the API leaks some implementation
1120
- # details of (what we consider to be) a reasonable server implementation, but
1121
- # we consider this to be a worthwhile tradeoff.
1122
- # When a `Digest` is used to refer to a proto message, it always refers to the
1123
- # message in binary encoded form. To ensure consistent hashing, clients and
1124
- # servers MUST ensure that they serialize messages according to the following
1125
- # rules, even if there are alternate valid encodings for the same message:
1126
- # * Fields are serialized in tag order.
1127
- # * There are no unknown fields.
1128
- # * There are no duplicate fields.
1129
- # * Fields are serialized according to the default semantics for their type.
1130
- # Most protocol buffer implementations will always follow these rules when
1131
- # serializing, but care should be taken to avoid shortcuts. For instance,
1132
- # concatenating two messages to merge them may produce duplicate fields.
959
+ # and its hash. The hash algorithm to use is defined by the server. The size is
960
+ # considered to be an integral part of the digest and cannot be separated. That
961
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
962
+ # the server MUST reject the request. The reason for including the size in the
963
+ # digest is as follows: in a great many cases, the server needs to know the size
964
+ # of the blob it is about to work with prior to starting an operation with it,
965
+ # such as flattening Merkle tree structures or streaming it to a worker.
966
+ # Technically, the server could implement a separate metadata store, but this
967
+ # results in a significantly more complicated implementation as opposed to
968
+ # having the client specify the size up-front (or storing the size along with
969
+ # the digest in every message where digests are embedded). This does mean that
970
+ # the API leaks some implementation details of (what we consider to be) a
971
+ # reasonable server implementation, but we consider this to be a worthwhile
972
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
973
+ # refers to the message in binary encoded form. To ensure consistent hashing,
974
+ # clients and servers MUST ensure that they serialize messages according to the
975
+ # following rules, even if there are alternate valid encodings for the same
976
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
977
+ # There are no duplicate fields. * Fields are serialized according to the
978
+ # default semantics for their type. Most protocol buffer implementations will
979
+ # always follow these rules when serializing, but care should be taken to avoid
980
+ # shortcuts. For instance, concatenating two messages to merge them may produce
981
+ # duplicate fields.
1133
982
  # Corresponds to the JSON property `actionDigest`
1134
983
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Digest]
1135
984
  attr_accessor :action_digest
@@ -1139,15 +988,13 @@ module Google
1139
988
  # @return [String]
1140
989
  attr_accessor :stage
1141
990
 
1142
- # If set, the client can use this name with
1143
- # ByteStream.Read to stream the
991
+ # If set, the client can use this name with ByteStream.Read to stream the
1144
992
  # standard error.
1145
993
  # Corresponds to the JSON property `stderrStreamName`
1146
994
  # @return [String]
1147
995
  attr_accessor :stderr_stream_name
1148
996
 
1149
- # If set, the client can use this name with
1150
- # ByteStream.Read to stream the
997
+ # If set, the client can use this name with ByteStream.Read to stream the
1151
998
  # standard output.
1152
999
  # Corresponds to the JSON property `stdoutStreamName`
1153
1000
  # @return [String]
@@ -1166,37 +1013,34 @@ module Google
1166
1013
  end
1167
1014
  end
1168
1015
 
1169
- # A request message for
1170
- # Execution.Execute.
1016
+ # A request message for Execution.Execute.
1171
1017
  class BuildBazelRemoteExecutionV2ExecuteRequest
1172
1018
  include Google::Apis::Core::Hashable
1173
1019
 
1174
1020
  # A content digest. A digest for a given blob consists of the size of the blob
1175
- # and its hash. The hash algorithm to use is defined by the server.
1176
- # The size is considered to be an integral part of the digest and cannot be
1177
- # separated. That is, even if the `hash` field is correctly specified but
1178
- # `size_bytes` is not, the server MUST reject the request.
1179
- # The reason for including the size in the digest is as follows: in a great
1180
- # many cases, the server needs to know the size of the blob it is about to work
1181
- # with prior to starting an operation with it, such as flattening Merkle tree
1182
- # structures or streaming it to a worker. Technically, the server could
1183
- # implement a separate metadata store, but this results in a significantly more
1184
- # complicated implementation as opposed to having the client specify the size
1185
- # up-front (or storing the size along with the digest in every message where
1186
- # digests are embedded). This does mean that the API leaks some implementation
1187
- # details of (what we consider to be) a reasonable server implementation, but
1188
- # we consider this to be a worthwhile tradeoff.
1189
- # When a `Digest` is used to refer to a proto message, it always refers to the
1190
- # message in binary encoded form. To ensure consistent hashing, clients and
1191
- # servers MUST ensure that they serialize messages according to the following
1192
- # rules, even if there are alternate valid encodings for the same message:
1193
- # * Fields are serialized in tag order.
1194
- # * There are no unknown fields.
1195
- # * There are no duplicate fields.
1196
- # * Fields are serialized according to the default semantics for their type.
1197
- # Most protocol buffer implementations will always follow these rules when
1198
- # serializing, but care should be taken to avoid shortcuts. For instance,
1199
- # concatenating two messages to merge them may produce duplicate fields.
1021
+ # and its hash. The hash algorithm to use is defined by the server. The size is
1022
+ # considered to be an integral part of the digest and cannot be separated. That
1023
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
1024
+ # the server MUST reject the request. The reason for including the size in the
1025
+ # digest is as follows: in a great many cases, the server needs to know the size
1026
+ # of the blob it is about to work with prior to starting an operation with it,
1027
+ # such as flattening Merkle tree structures or streaming it to a worker.
1028
+ # Technically, the server could implement a separate metadata store, but this
1029
+ # results in a significantly more complicated implementation as opposed to
1030
+ # having the client specify the size up-front (or storing the size along with
1031
+ # the digest in every message where digests are embedded). This does mean that
1032
+ # the API leaks some implementation details of (what we consider to be) a
1033
+ # reasonable server implementation, but we consider this to be a worthwhile
1034
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
1035
+ # refers to the message in binary encoded form. To ensure consistent hashing,
1036
+ # clients and servers MUST ensure that they serialize messages according to the
1037
+ # following rules, even if there are alternate valid encodings for the same
1038
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
1039
+ # There are no duplicate fields. * Fields are serialized according to the
1040
+ # default semantics for their type. Most protocol buffer implementations will
1041
+ # always follow these rules when serializing, but care should be taken to avoid
1042
+ # shortcuts. For instance, concatenating two messages to merge them may produce
1043
+ # duplicate fields.
1200
1044
  # Corresponds to the JSON property `actionDigest`
1201
1045
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Digest]
1202
1046
  attr_accessor :action_digest
@@ -1212,19 +1056,17 @@ module Google
1212
1056
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2ResultsCachePolicy]
1213
1057
  attr_accessor :results_cache_policy
1214
1058
 
1215
- # If true, the action will be executed even if its result is already
1216
- # present in the ActionCache.
1217
- # The execution is still allowed to be merged with other in-flight executions
1218
- # of the same action, however - semantically, the service MUST only guarantee
1219
- # that the results of an execution with this field set were not visible
1220
- # before the corresponding execution request was sent.
1221
- # Note that actions from execution requests setting this field set are still
1222
- # eligible to be entered into the action cache upon completion, and services
1223
- # SHOULD overwrite any existing entries that may exist. This allows
1224
- # skip_cache_lookup requests to be used as a mechanism for replacing action
1225
- # cache entries that reference outputs no longer available or that are
1226
- # poisoned in any way.
1227
- # If false, the result may be served from the action cache.
1059
+ # If true, the action will be executed even if its result is already present in
1060
+ # the ActionCache. The execution is still allowed to be merged with other in-
1061
+ # flight executions of the same action, however - semantically, the service MUST
1062
+ # only guarantee that the results of an execution with this field set were not
1063
+ # visible before the corresponding execution request was sent. Note that actions
1064
+ # from execution requests setting this field set are still eligible to be
1065
+ # entered into the action cache upon completion, and services SHOULD overwrite
1066
+ # any existing entries that may exist. This allows skip_cache_lookup requests to
1067
+ # be used as a mechanism for replacing action cache entries that reference
1068
+ # outputs no longer available or that are poisoned in any way. If false, the
1069
+ # result may be served from the action cache.
1228
1070
  # Corresponds to the JSON property `skipCacheLookup`
1229
1071
  # @return [Boolean]
1230
1072
  attr_accessor :skip_cache_lookup
@@ -1243,11 +1085,8 @@ module Google
1243
1085
  end
1244
1086
  end
1245
1087
 
1246
- # The response message for
1247
- # Execution.Execute,
1248
- # which will be contained in the response
1249
- # field of the
1250
- # Operation.
1088
+ # The response message for Execution.Execute, which will be contained in the
1089
+ # response field of the Operation.
1251
1090
  class BuildBazelRemoteExecutionV2ExecuteResponse
1252
1091
  include Google::Apis::Core::Hashable
1253
1092
 
@@ -1263,29 +1102,27 @@ module Google
1263
1102
  # @return [String]
1264
1103
  attr_accessor :message
1265
1104
 
1266
- # An ActionResult represents the result of an
1267
- # Action being run.
1105
+ # An ActionResult represents the result of an Action being run.
1268
1106
  # Corresponds to the JSON property `result`
1269
1107
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2ActionResult]
1270
1108
  attr_accessor :result
1271
1109
 
1272
1110
  # An optional list of additional log outputs the server wishes to provide. A
1273
- # server can use this to return execution-specific logs however it wishes.
1274
- # This is intended primarily to make it easier for users to debug issues that
1275
- # may be outside of the actual job execution, such as by identifying the
1276
- # worker executing the action or by providing logs from the worker's setup
1277
- # phase. The keys SHOULD be human readable so that a client can display them
1278
- # to a user.
1111
+ # server can use this to return execution-specific logs however it wishes. This
1112
+ # is intended primarily to make it easier for users to debug issues that may be
1113
+ # outside of the actual job execution, such as by identifying the worker
1114
+ # executing the action or by providing logs from the worker's setup phase. The
1115
+ # keys SHOULD be human readable so that a client can display them to a user.
1279
1116
  # Corresponds to the JSON property `serverLogs`
1280
1117
  # @return [Hash<String,Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2LogFile>]
1281
1118
  attr_accessor :server_logs
1282
1119
 
1283
- # The `Status` type defines a logical error model that is suitable for
1284
- # different programming environments, including REST APIs and RPC APIs. It is
1285
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
1286
- # three pieces of data: error code, error message, and error details.
1287
- # You can find out more about this error model and how to work with it in the
1288
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
1120
+ # The `Status` type defines a logical error model that is suitable for different
1121
+ # programming environments, including REST APIs and RPC APIs. It is used by [
1122
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
1123
+ # data: error code, error message, and error details. You can find out more
1124
+ # about this error model and how to work with it in the [API Design Guide](https:
1125
+ # //cloud.google.com/apis/design/errors).
1289
1126
  # Corresponds to the JSON property `status`
1290
1127
  # @return [Google::Apis::RemotebuildexecutionV2::GoogleRpcStatus]
1291
1128
  attr_accessor :status
@@ -1392,9 +1229,8 @@ module Google
1392
1229
  attr_accessor :exec_enabled
1393
1230
  alias_method :exec_enabled?, :exec_enabled
1394
1231
 
1395
- # Allowed values for priority in
1396
- # ResultsCachePolicy
1397
- # Used for querying both cache and execution valid priority ranges.
1232
+ # Allowed values for priority in ResultsCachePolicy Used for querying both cache
1233
+ # and execution valid priority ranges.
1398
1234
  # Corresponds to the JSON property `executionPriorityCapabilities`
1399
1235
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2PriorityCapabilities]
1400
1236
  attr_accessor :execution_priority_capabilities
@@ -1423,13 +1259,12 @@ module Google
1423
1259
 
1424
1260
  # The priority (relative importance) of this action. Generally, a lower value
1425
1261
  # means that the action should be run sooner than actions having a greater
1426
- # priority value, but the interpretation of a given value is server-
1427
- # dependent. A priority of 0 means the *default* priority. Priorities may be
1428
- # positive or negative, and such actions should run later or sooner than
1429
- # actions having the default priority, respectively. The particular semantics
1430
- # of this field is up to the server. In particular, every server will have
1431
- # their own supported range of priorities, and will decide how these map into
1432
- # scheduling policy.
1262
+ # priority value, but the interpretation of a given value is server- dependent.
1263
+ # A priority of 0 means the *default* priority. Priorities may be positive or
1264
+ # negative, and such actions should run later or sooner than actions having the
1265
+ # default priority, respectively. The particular semantics of this field is up
1266
+ # to the server. In particular, every server will have their own supported range
1267
+ # of priorities, and will decide how these map into scheduling policy.
1433
1268
  # Corresponds to the JSON property `priority`
1434
1269
  # @return [Fixnum]
1435
1270
  attr_accessor :priority
@@ -1449,31 +1284,29 @@ module Google
1449
1284
  include Google::Apis::Core::Hashable
1450
1285
 
1451
1286
  # A content digest. A digest for a given blob consists of the size of the blob
1452
- # and its hash. The hash algorithm to use is defined by the server.
1453
- # The size is considered to be an integral part of the digest and cannot be
1454
- # separated. That is, even if the `hash` field is correctly specified but
1455
- # `size_bytes` is not, the server MUST reject the request.
1456
- # The reason for including the size in the digest is as follows: in a great
1457
- # many cases, the server needs to know the size of the blob it is about to work
1458
- # with prior to starting an operation with it, such as flattening Merkle tree
1459
- # structures or streaming it to a worker. Technically, the server could
1460
- # implement a separate metadata store, but this results in a significantly more
1461
- # complicated implementation as opposed to having the client specify the size
1462
- # up-front (or storing the size along with the digest in every message where
1463
- # digests are embedded). This does mean that the API leaks some implementation
1464
- # details of (what we consider to be) a reasonable server implementation, but
1465
- # we consider this to be a worthwhile tradeoff.
1466
- # When a `Digest` is used to refer to a proto message, it always refers to the
1467
- # message in binary encoded form. To ensure consistent hashing, clients and
1468
- # servers MUST ensure that they serialize messages according to the following
1469
- # rules, even if there are alternate valid encodings for the same message:
1470
- # * Fields are serialized in tag order.
1471
- # * There are no unknown fields.
1472
- # * There are no duplicate fields.
1473
- # * Fields are serialized according to the default semantics for their type.
1474
- # Most protocol buffer implementations will always follow these rules when
1475
- # serializing, but care should be taken to avoid shortcuts. For instance,
1476
- # concatenating two messages to merge them may produce duplicate fields.
1287
+ # and its hash. The hash algorithm to use is defined by the server. The size is
1288
+ # considered to be an integral part of the digest and cannot be separated. That
1289
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
1290
+ # the server MUST reject the request. The reason for including the size in the
1291
+ # digest is as follows: in a great many cases, the server needs to know the size
1292
+ # of the blob it is about to work with prior to starting an operation with it,
1293
+ # such as flattening Merkle tree structures or streaming it to a worker.
1294
+ # Technically, the server could implement a separate metadata store, but this
1295
+ # results in a significantly more complicated implementation as opposed to
1296
+ # having the client specify the size up-front (or storing the size along with
1297
+ # the digest in every message where digests are embedded). This does mean that
1298
+ # the API leaks some implementation details of (what we consider to be) a
1299
+ # reasonable server implementation, but we consider this to be a worthwhile
1300
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
1301
+ # refers to the message in binary encoded form. To ensure consistent hashing,
1302
+ # clients and servers MUST ensure that they serialize messages according to the
1303
+ # following rules, even if there are alternate valid encodings for the same
1304
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
1305
+ # There are no duplicate fields. * Fields are serialized according to the
1306
+ # default semantics for their type. Most protocol buffer implementations will
1307
+ # always follow these rules when serializing, but care should be taken to avoid
1308
+ # shortcuts. For instance, concatenating two messages to merge them may produce
1309
+ # duplicate fields.
1477
1310
  # Corresponds to the JSON property `digest`
1478
1311
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Digest]
1479
1312
  attr_accessor :digest
@@ -1507,8 +1340,7 @@ module Google
1507
1340
  end
1508
1341
  end
1509
1342
 
1510
- # A request message for
1511
- # ContentAddressableStorage.FindMissingBlobs.
1343
+ # A request message for ContentAddressableStorage.FindMissingBlobs.
1512
1344
  class BuildBazelRemoteExecutionV2FindMissingBlobsRequest
1513
1345
  include Google::Apis::Core::Hashable
1514
1346
 
@@ -1527,8 +1359,7 @@ module Google
1527
1359
  end
1528
1360
  end
1529
1361
 
1530
- # A response message for
1531
- # ContentAddressableStorage.FindMissingBlobs.
1362
+ # A response message for ContentAddressableStorage.FindMissingBlobs.
1532
1363
  class BuildBazelRemoteExecutionV2FindMissingBlobsResponse
1533
1364
  include Google::Apis::Core::Hashable
1534
1365
 
@@ -1547,8 +1378,7 @@ module Google
1547
1378
  end
1548
1379
  end
1549
1380
 
1550
- # A response message for
1551
- # ContentAddressableStorage.GetTree.
1381
+ # A response message for ContentAddressableStorage.GetTree.
1552
1382
  class BuildBazelRemoteExecutionV2GetTreeResponse
1553
1383
  include Google::Apis::Core::Hashable
1554
1384
 
@@ -1558,9 +1388,8 @@ module Google
1558
1388
  attr_accessor :directories
1559
1389
 
1560
1390
  # If present, signifies that there are more results which the client can
1561
- # retrieve by passing this as the page_token in a subsequent
1562
- # request.
1563
- # If empty, signifies that this is the last page of results.
1391
+ # retrieve by passing this as the page_token in a subsequent request. If empty,
1392
+ # signifies that this is the last page of results.
1564
1393
  # Corresponds to the JSON property `nextPageToken`
1565
1394
  # @return [String]
1566
1395
  attr_accessor :next_page_token
@@ -1581,40 +1410,38 @@ module Google
1581
1410
  include Google::Apis::Core::Hashable
1582
1411
 
1583
1412
  # A content digest. A digest for a given blob consists of the size of the blob
1584
- # and its hash. The hash algorithm to use is defined by the server.
1585
- # The size is considered to be an integral part of the digest and cannot be
1586
- # separated. That is, even if the `hash` field is correctly specified but
1587
- # `size_bytes` is not, the server MUST reject the request.
1588
- # The reason for including the size in the digest is as follows: in a great
1589
- # many cases, the server needs to know the size of the blob it is about to work
1590
- # with prior to starting an operation with it, such as flattening Merkle tree
1591
- # structures or streaming it to a worker. Technically, the server could
1592
- # implement a separate metadata store, but this results in a significantly more
1593
- # complicated implementation as opposed to having the client specify the size
1594
- # up-front (or storing the size along with the digest in every message where
1595
- # digests are embedded). This does mean that the API leaks some implementation
1596
- # details of (what we consider to be) a reasonable server implementation, but
1597
- # we consider this to be a worthwhile tradeoff.
1598
- # When a `Digest` is used to refer to a proto message, it always refers to the
1599
- # message in binary encoded form. To ensure consistent hashing, clients and
1600
- # servers MUST ensure that they serialize messages according to the following
1601
- # rules, even if there are alternate valid encodings for the same message:
1602
- # * Fields are serialized in tag order.
1603
- # * There are no unknown fields.
1604
- # * There are no duplicate fields.
1605
- # * Fields are serialized according to the default semantics for their type.
1606
- # Most protocol buffer implementations will always follow these rules when
1607
- # serializing, but care should be taken to avoid shortcuts. For instance,
1608
- # concatenating two messages to merge them may produce duplicate fields.
1413
+ # and its hash. The hash algorithm to use is defined by the server. The size is
1414
+ # considered to be an integral part of the digest and cannot be separated. That
1415
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
1416
+ # the server MUST reject the request. The reason for including the size in the
1417
+ # digest is as follows: in a great many cases, the server needs to know the size
1418
+ # of the blob it is about to work with prior to starting an operation with it,
1419
+ # such as flattening Merkle tree structures or streaming it to a worker.
1420
+ # Technically, the server could implement a separate metadata store, but this
1421
+ # results in a significantly more complicated implementation as opposed to
1422
+ # having the client specify the size up-front (or storing the size along with
1423
+ # the digest in every message where digests are embedded). This does mean that
1424
+ # the API leaks some implementation details of (what we consider to be) a
1425
+ # reasonable server implementation, but we consider this to be a worthwhile
1426
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
1427
+ # refers to the message in binary encoded form. To ensure consistent hashing,
1428
+ # clients and servers MUST ensure that they serialize messages according to the
1429
+ # following rules, even if there are alternate valid encodings for the same
1430
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
1431
+ # There are no duplicate fields. * Fields are serialized according to the
1432
+ # default semantics for their type. Most protocol buffer implementations will
1433
+ # always follow these rules when serializing, but care should be taken to avoid
1434
+ # shortcuts. For instance, concatenating two messages to merge them may produce
1435
+ # duplicate fields.
1609
1436
  # Corresponds to the JSON property `digest`
1610
1437
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Digest]
1611
1438
  attr_accessor :digest
1612
1439
 
1613
- # This is a hint as to the purpose of the log, and is set to true if the log
1614
- # is human-readable text that can be usefully displayed to a user, and false
1615
- # otherwise. For instance, if a command-line client wishes to print the
1616
- # server logs to the terminal for a failed action, this allows it to avoid
1617
- # displaying a binary file.
1440
+ # This is a hint as to the purpose of the log, and is set to true if the log is
1441
+ # human-readable text that can be usefully displayed to a user, and false
1442
+ # otherwise. For instance, if a command-line client wishes to print the server
1443
+ # logs to the terminal for a failed action, this allows it to avoid displaying a
1444
+ # binary file.
1618
1445
  # Corresponds to the JSON property `humanReadable`
1619
1446
  # @return [Boolean]
1620
1447
  attr_accessor :human_readable
@@ -1631,10 +1458,8 @@ module Google
1631
1458
  end
1632
1459
  end
1633
1460
 
1634
- # A single property for FileNodes,
1635
- # DirectoryNodes, and
1636
- # SymlinkNodes. The server is
1637
- # responsible for specifying the property `name`s that it accepts. If
1461
+ # A single property for FileNodes, DirectoryNodes, and SymlinkNodes. The server
1462
+ # is responsible for specifying the property `name`s that it accepts. If
1638
1463
  # permitted by the server, the same `name` may occur multiple times.
1639
1464
  class BuildBazelRemoteExecutionV2NodeProperty
1640
1465
  include Google::Apis::Core::Hashable
@@ -1666,39 +1491,37 @@ module Google
1666
1491
  include Google::Apis::Core::Hashable
1667
1492
 
1668
1493
  # The full path of the directory relative to the working directory. The path
1669
- # separator is a forward slash `/`. Since this is a relative path, it MUST
1670
- # NOT begin with a leading forward slash. The empty string value is allowed,
1671
- # and it denotes the entire working directory.
1494
+ # separator is a forward slash `/`. Since this is a relative path, it MUST NOT
1495
+ # begin with a leading forward slash. The empty string value is allowed, and it
1496
+ # denotes the entire working directory.
1672
1497
  # Corresponds to the JSON property `path`
1673
1498
  # @return [String]
1674
1499
  attr_accessor :path
1675
1500
 
1676
1501
  # A content digest. A digest for a given blob consists of the size of the blob
1677
- # and its hash. The hash algorithm to use is defined by the server.
1678
- # The size is considered to be an integral part of the digest and cannot be
1679
- # separated. That is, even if the `hash` field is correctly specified but
1680
- # `size_bytes` is not, the server MUST reject the request.
1681
- # The reason for including the size in the digest is as follows: in a great
1682
- # many cases, the server needs to know the size of the blob it is about to work
1683
- # with prior to starting an operation with it, such as flattening Merkle tree
1684
- # structures or streaming it to a worker. Technically, the server could
1685
- # implement a separate metadata store, but this results in a significantly more
1686
- # complicated implementation as opposed to having the client specify the size
1687
- # up-front (or storing the size along with the digest in every message where
1688
- # digests are embedded). This does mean that the API leaks some implementation
1689
- # details of (what we consider to be) a reasonable server implementation, but
1690
- # we consider this to be a worthwhile tradeoff.
1691
- # When a `Digest` is used to refer to a proto message, it always refers to the
1692
- # message in binary encoded form. To ensure consistent hashing, clients and
1693
- # servers MUST ensure that they serialize messages according to the following
1694
- # rules, even if there are alternate valid encodings for the same message:
1695
- # * Fields are serialized in tag order.
1696
- # * There are no unknown fields.
1697
- # * There are no duplicate fields.
1698
- # * Fields are serialized according to the default semantics for their type.
1699
- # Most protocol buffer implementations will always follow these rules when
1700
- # serializing, but care should be taken to avoid shortcuts. For instance,
1701
- # concatenating two messages to merge them may produce duplicate fields.
1502
+ # and its hash. The hash algorithm to use is defined by the server. The size is
1503
+ # considered to be an integral part of the digest and cannot be separated. That
1504
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
1505
+ # the server MUST reject the request. The reason for including the size in the
1506
+ # digest is as follows: in a great many cases, the server needs to know the size
1507
+ # of the blob it is about to work with prior to starting an operation with it,
1508
+ # such as flattening Merkle tree structures or streaming it to a worker.
1509
+ # Technically, the server could implement a separate metadata store, but this
1510
+ # results in a significantly more complicated implementation as opposed to
1511
+ # having the client specify the size up-front (or storing the size along with
1512
+ # the digest in every message where digests are embedded). This does mean that
1513
+ # the API leaks some implementation details of (what we consider to be) a
1514
+ # reasonable server implementation, but we consider this to be a worthwhile
1515
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
1516
+ # refers to the message in binary encoded form. To ensure consistent hashing,
1517
+ # clients and servers MUST ensure that they serialize messages according to the
1518
+ # following rules, even if there are alternate valid encodings for the same
1519
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
1520
+ # There are no duplicate fields. * Fields are serialized according to the
1521
+ # default semantics for their type. Most protocol buffer implementations will
1522
+ # always follow these rules when serializing, but care should be taken to avoid
1523
+ # shortcuts. For instance, concatenating two messages to merge them may produce
1524
+ # duplicate fields.
1702
1525
  # Corresponds to the JSON property `treeDigest`
1703
1526
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Digest]
1704
1527
  attr_accessor :tree_digest
@@ -1714,51 +1537,45 @@ module Google
1714
1537
  end
1715
1538
  end
1716
1539
 
1717
- # An `OutputFile` is similar to a
1718
- # FileNode, but it is used as an
1719
- # output in an `ActionResult`. It allows a full file path rather than
1720
- # only a name.
1540
+ # An `OutputFile` is similar to a FileNode, but it is used as an output in an `
1541
+ # ActionResult`. It allows a full file path rather than only a name.
1721
1542
  class BuildBazelRemoteExecutionV2OutputFile
1722
1543
  include Google::Apis::Core::Hashable
1723
1544
 
1724
1545
  # The contents of the file if inlining was requested. The server SHOULD NOT
1725
- # inline
1726
- # file contents unless requested by the client in the
1727
- # GetActionResultRequest
1728
- # message. The server MAY omit inlining, even if requested, and MUST do so if
1729
- # inlining
1730
- # would cause the response to exceed message size limits.
1546
+ # inline file contents unless requested by the client in the
1547
+ # GetActionResultRequest message. The server MAY omit inlining, even if
1548
+ # requested, and MUST do so if inlining would cause the response to exceed
1549
+ # message size limits.
1731
1550
  # Corresponds to the JSON property `contents`
1732
1551
  # NOTE: Values are automatically base64 encoded/decoded in the client library.
1733
1552
  # @return [String]
1734
1553
  attr_accessor :contents
1735
1554
 
1736
1555
  # A content digest. A digest for a given blob consists of the size of the blob
1737
- # and its hash. The hash algorithm to use is defined by the server.
1738
- # The size is considered to be an integral part of the digest and cannot be
1739
- # separated. That is, even if the `hash` field is correctly specified but
1740
- # `size_bytes` is not, the server MUST reject the request.
1741
- # The reason for including the size in the digest is as follows: in a great
1742
- # many cases, the server needs to know the size of the blob it is about to work
1743
- # with prior to starting an operation with it, such as flattening Merkle tree
1744
- # structures or streaming it to a worker. Technically, the server could
1745
- # implement a separate metadata store, but this results in a significantly more
1746
- # complicated implementation as opposed to having the client specify the size
1747
- # up-front (or storing the size along with the digest in every message where
1748
- # digests are embedded). This does mean that the API leaks some implementation
1749
- # details of (what we consider to be) a reasonable server implementation, but
1750
- # we consider this to be a worthwhile tradeoff.
1751
- # When a `Digest` is used to refer to a proto message, it always refers to the
1752
- # message in binary encoded form. To ensure consistent hashing, clients and
1753
- # servers MUST ensure that they serialize messages according to the following
1754
- # rules, even if there are alternate valid encodings for the same message:
1755
- # * Fields are serialized in tag order.
1756
- # * There are no unknown fields.
1757
- # * There are no duplicate fields.
1758
- # * Fields are serialized according to the default semantics for their type.
1759
- # Most protocol buffer implementations will always follow these rules when
1760
- # serializing, but care should be taken to avoid shortcuts. For instance,
1761
- # concatenating two messages to merge them may produce duplicate fields.
1556
+ # and its hash. The hash algorithm to use is defined by the server. The size is
1557
+ # considered to be an integral part of the digest and cannot be separated. That
1558
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
1559
+ # the server MUST reject the request. The reason for including the size in the
1560
+ # digest is as follows: in a great many cases, the server needs to know the size
1561
+ # of the blob it is about to work with prior to starting an operation with it,
1562
+ # such as flattening Merkle tree structures or streaming it to a worker.
1563
+ # Technically, the server could implement a separate metadata store, but this
1564
+ # results in a significantly more complicated implementation as opposed to
1565
+ # having the client specify the size up-front (or storing the size along with
1566
+ # the digest in every message where digests are embedded). This does mean that
1567
+ # the API leaks some implementation details of (what we consider to be) a
1568
+ # reasonable server implementation, but we consider this to be a worthwhile
1569
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
1570
+ # refers to the message in binary encoded form. To ensure consistent hashing,
1571
+ # clients and servers MUST ensure that they serialize messages according to the
1572
+ # following rules, even if there are alternate valid encodings for the same
1573
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
1574
+ # There are no duplicate fields. * Fields are serialized according to the
1575
+ # default semantics for their type. Most protocol buffer implementations will
1576
+ # always follow these rules when serializing, but care should be taken to avoid
1577
+ # shortcuts. For instance, concatenating two messages to merge them may produce
1578
+ # duplicate fields.
1762
1579
  # Corresponds to the JSON property `digest`
1763
1580
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Digest]
1764
1581
  attr_accessor :digest
@@ -1775,8 +1592,8 @@ module Google
1775
1592
  attr_accessor :node_properties
1776
1593
 
1777
1594
  # The full path of the file relative to the working directory, including the
1778
- # filename. The path separator is a forward slash `/`. Since this is a
1779
- # relative path, it MUST NOT begin with a leading forward slash.
1595
+ # filename. The path separator is a forward slash `/`. Since this is a relative
1596
+ # path, it MUST NOT begin with a leading forward slash.
1780
1597
  # Corresponds to the JSON property `path`
1781
1598
  # @return [String]
1782
1599
  attr_accessor :path
@@ -1795,32 +1612,29 @@ module Google
1795
1612
  end
1796
1613
  end
1797
1614
 
1798
- # An `OutputSymlink` is similar to a
1799
- # Symlink, but it is used as an
1800
- # output in an `ActionResult`.
1801
- # `OutputSymlink` is binary-compatible with `SymlinkNode`.
1615
+ # An `OutputSymlink` is similar to a Symlink, but it is used as an output in an `
1616
+ # ActionResult`. `OutputSymlink` is binary-compatible with `SymlinkNode`.
1802
1617
  class BuildBazelRemoteExecutionV2OutputSymlink
1803
1618
  include Google::Apis::Core::Hashable
1804
1619
 
1805
- # The supported node properties of the OutputSymlink, if requested by the
1806
- # Action.
1620
+ # The supported node properties of the OutputSymlink, if requested by the Action.
1807
1621
  # Corresponds to the JSON property `nodeProperties`
1808
1622
  # @return [Array<Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2NodeProperty>]
1809
1623
  attr_accessor :node_properties
1810
1624
 
1811
1625
  # The full path of the symlink relative to the working directory, including the
1812
- # filename. The path separator is a forward slash `/`. Since this is a
1813
- # relative path, it MUST NOT begin with a leading forward slash.
1626
+ # filename. The path separator is a forward slash `/`. Since this is a relative
1627
+ # path, it MUST NOT begin with a leading forward slash.
1814
1628
  # Corresponds to the JSON property `path`
1815
1629
  # @return [String]
1816
1630
  attr_accessor :path
1817
1631
 
1818
- # The target path of the symlink. The path separator is a forward slash `/`.
1819
- # The target path can be relative to the parent directory of the symlink or
1820
- # it can be an absolute path starting with `/`. Support for absolute paths
1821
- # can be checked using the Capabilities
1822
- # API. The canonical form forbids the substrings `/./` and `//` in the target
1823
- # path. `..` components are allowed anywhere in the target path.
1632
+ # The target path of the symlink. The path separator is a forward slash `/`. The
1633
+ # target path can be relative to the parent directory of the symlink or it can
1634
+ # be an absolute path starting with `/`. Support for absolute paths can be
1635
+ # checked using the Capabilities API. The canonical form forbids the substrings `
1636
+ # /./` and `//` in the target path. `..` components are allowed anywhere in the
1637
+ # target path.
1824
1638
  # Corresponds to the JSON property `target`
1825
1639
  # @return [String]
1826
1640
  attr_accessor :target
@@ -1838,17 +1652,16 @@ module Google
1838
1652
  end
1839
1653
 
1840
1654
  # A `Platform` is a set of requirements, such as hardware, operating system, or
1841
- # compiler toolchain, for an
1842
- # Action's execution
1843
- # environment. A `Platform` is represented as a series of key-value pairs
1844
- # representing the properties that are required of the platform.
1655
+ # compiler toolchain, for an Action's execution environment. A `Platform` is
1656
+ # represented as a series of key-value pairs representing the properties that
1657
+ # are required of the platform.
1845
1658
  class BuildBazelRemoteExecutionV2Platform
1846
1659
  include Google::Apis::Core::Hashable
1847
1660
 
1848
- # The properties that make up this platform. In order to ensure that
1849
- # equivalent `Platform`s always hash to the same value, the properties MUST
1850
- # be lexicographically sorted by name, and then by value. Sorting of strings
1851
- # is done by code point, equivalently, by the UTF-8 bytes.
1661
+ # The properties that make up this platform. In order to ensure that equivalent `
1662
+ # Platform`s always hash to the same value, the properties MUST be
1663
+ # lexicographically sorted by name, and then by value. Sorting of strings is
1664
+ # done by code point, equivalently, by the UTF-8 bytes.
1852
1665
  # Corresponds to the JSON property `properties`
1853
1666
  # @return [Array<Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2PlatformProperty>]
1854
1667
  attr_accessor :properties
@@ -1865,19 +1678,16 @@ module Google
1865
1678
 
1866
1679
  # A single property for the environment. The server is responsible for
1867
1680
  # specifying the property `name`s that it accepts. If an unknown `name` is
1868
- # provided in the requirements for an
1869
- # Action, the server SHOULD
1870
- # reject the execution request. If permitted by the server, the same `name`
1871
- # may occur multiple times.
1872
- # The server is also responsible for specifying the interpretation of
1873
- # property `value`s. For instance, a property describing how much RAM must be
1874
- # available may be interpreted as allowing a worker with 16GB to fulfill a
1875
- # request for 8GB, while a property describing the OS environment on which
1876
- # the action must be performed may require an exact match with the worker's
1877
- # OS.
1878
- # The server MAY use the `value` of one or more properties to determine how
1879
- # it sets up the execution environment, such as by making specific system
1880
- # files available to the worker.
1681
+ # provided in the requirements for an Action, the server SHOULD reject the
1682
+ # execution request. If permitted by the server, the same `name` may occur
1683
+ # multiple times. The server is also responsible for specifying the
1684
+ # interpretation of property `value`s. For instance, a property describing how
1685
+ # much RAM must be available may be interpreted as allowing a worker with 16GB
1686
+ # to fulfill a request for 8GB, while a property describing the OS environment
1687
+ # on which the action must be performed may require an exact match with the
1688
+ # worker's OS. The server MAY use the `value` of one or more properties to
1689
+ # determine how it sets up the execution environment, such as by making specific
1690
+ # system files available to the worker.
1881
1691
  class BuildBazelRemoteExecutionV2PlatformProperty
1882
1692
  include Google::Apis::Core::Hashable
1883
1693
 
@@ -1902,9 +1712,8 @@ module Google
1902
1712
  end
1903
1713
  end
1904
1714
 
1905
- # Allowed values for priority in
1906
- # ResultsCachePolicy
1907
- # Used for querying both cache and execution valid priority ranges.
1715
+ # Allowed values for priority in ResultsCachePolicy Used for querying both cache
1716
+ # and execution valid priority ranges.
1908
1717
  class BuildBazelRemoteExecutionV2PriorityCapabilities
1909
1718
  include Google::Apis::Core::Hashable
1910
1719
 
@@ -1951,27 +1760,25 @@ module Google
1951
1760
  # An optional Metadata to attach to any RPC request to tell the server about an
1952
1761
  # external context of the request. The server may use this for logging or other
1953
1762
  # purposes. To use it, the client attaches the header to the call using the
1954
- # canonical proto serialization:
1955
- # * name: `build.bazel.remote.execution.v2.requestmetadata-bin`
1956
- # * contents: the base64 encoded binary `RequestMetadata` message.
1957
- # Note: the gRPC library serializes binary headers encoded in base 64 by
1958
- # default (https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#
1959
- # requests).
1960
- # Therefore, if the gRPC library is used to pass/retrieve this
1763
+ # canonical proto serialization: * name: `build.bazel.remote.execution.v2.
1764
+ # requestmetadata-bin` * contents: the base64 encoded binary `RequestMetadata`
1765
+ # message. Note: the gRPC library serializes binary headers encoded in base 64
1766
+ # by default (https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#
1767
+ # requests). Therefore, if the gRPC library is used to pass/retrieve this
1961
1768
  # metadata, the user may ignore the base64 encoding and assume it is simply
1962
1769
  # serialized as a binary message.
1963
1770
  class BuildBazelRemoteExecutionV2RequestMetadata
1964
1771
  include Google::Apis::Core::Hashable
1965
1772
 
1966
- # An identifier that ties multiple requests to the same action.
1967
- # For example, multiple requests to the CAS, Action Cache, and Execution
1968
- # API are used in order to compile foo.cc.
1773
+ # An identifier that ties multiple requests to the same action. For example,
1774
+ # multiple requests to the CAS, Action Cache, and Execution API are used in
1775
+ # order to compile foo.cc.
1969
1776
  # Corresponds to the JSON property `actionId`
1970
1777
  # @return [String]
1971
1778
  attr_accessor :action_id
1972
1779
 
1973
- # An identifier to tie multiple tool invocations together. For example,
1974
- # runs of foo_test, bar_test and baz_test on a post-submit of a given patch.
1780
+ # An identifier to tie multiple tool invocations together. For example, runs of
1781
+ # foo_test, bar_test and baz_test on a post-submit of a given patch.
1975
1782
  # Corresponds to the JSON property `correlatedInvocationsId`
1976
1783
  # @return [String]
1977
1784
  attr_accessor :correlated_invocations_id
@@ -1981,8 +1788,8 @@ module Google
1981
1788
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2ToolDetails]
1982
1789
  attr_accessor :tool_details
1983
1790
 
1984
- # An identifier that ties multiple actions together to a final result.
1985
- # For example, multiple actions are required to build and run foo_test.
1791
+ # An identifier that ties multiple actions together to a final result. For
1792
+ # example, multiple actions are required to build and run foo_test.
1986
1793
  # Corresponds to the JSON property `toolInvocationId`
1987
1794
  # @return [String]
1988
1795
  attr_accessor :tool_invocation_id
@@ -2006,12 +1813,12 @@ module Google
2006
1813
  include Google::Apis::Core::Hashable
2007
1814
 
2008
1815
  # The priority (relative importance) of this content in the overall cache.
2009
- # Generally, a lower value means a longer retention time or other advantage,
2010
- # but the interpretation of a given value is server-dependent. A priority of
2011
- # 0 means a *default* value, decided by the server.
2012
- # The particular semantics of this field is up to the server. In particular,
2013
- # every server will have their own supported range of priorities, and will
2014
- # decide how these map into retention/eviction policy.
1816
+ # Generally, a lower value means a longer retention time or other advantage, but
1817
+ # the interpretation of a given value is server-dependent. A priority of 0 means
1818
+ # a *default* value, decided by the server. The particular semantics of this
1819
+ # field is up to the server. In particular, every server will have their own
1820
+ # supported range of priorities, and will decide how these map into retention/
1821
+ # eviction policy.
2015
1822
  # Corresponds to the JSON property `priority`
2016
1823
  # @return [Fixnum]
2017
1824
  attr_accessor :priority
@@ -2026,8 +1833,7 @@ module Google
2026
1833
  end
2027
1834
  end
2028
1835
 
2029
- # A response message for
2030
- # Capabilities.GetCapabilities.
1836
+ # A response message for Capabilities.GetCapabilities.
2031
1837
  class BuildBazelRemoteExecutionV2ServerCapabilities
2032
1838
  include Google::Apis::Core::Hashable
2033
1839
 
@@ -2084,12 +1890,12 @@ module Google
2084
1890
  # @return [Array<Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2NodeProperty>]
2085
1891
  attr_accessor :node_properties
2086
1892
 
2087
- # The target path of the symlink. The path separator is a forward slash `/`.
2088
- # The target path can be relative to the parent directory of the symlink or
2089
- # it can be an absolute path starting with `/`. Support for absolute paths
2090
- # can be checked using the Capabilities
2091
- # API. The canonical form forbids the substrings `/./` and `//` in the target
2092
- # path. `..` components are allowed anywhere in the target path.
1893
+ # The target path of the symlink. The path separator is a forward slash `/`. The
1894
+ # target path can be relative to the parent directory of the symlink or it can
1895
+ # be an absolute path starting with `/`. Support for absolute paths can be
1896
+ # checked using the Capabilities API. The canonical form forbids the substrings `
1897
+ # /./` and `//` in the target path. `..` components are allowed anywhere in the
1898
+ # target path.
2093
1899
  # Corresponds to the JSON property `target`
2094
1900
  # @return [String]
2095
1901
  attr_accessor :target
@@ -2131,90 +1937,45 @@ module Google
2131
1937
  end
2132
1938
  end
2133
1939
 
2134
- # A `Tree` contains all the
2135
- # Directory protos in a
2136
- # single directory Merkle tree, compressed into one message.
1940
+ # A `Tree` contains all the Directory protos in a single directory Merkle tree,
1941
+ # compressed into one message.
2137
1942
  class BuildBazelRemoteExecutionV2Tree
2138
1943
  include Google::Apis::Core::Hashable
2139
1944
 
2140
1945
  # All the child directories: the directories referred to by the root and,
2141
- # recursively, all its children. In order to reconstruct the directory tree,
2142
- # the client must take the digests of each of the child directories and then
2143
- # build up a tree starting from the `root`.
1946
+ # recursively, all its children. In order to reconstruct the directory tree, the
1947
+ # client must take the digests of each of the child directories and then build
1948
+ # up a tree starting from the `root`.
2144
1949
  # Corresponds to the JSON property `children`
2145
1950
  # @return [Array<Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Directory>]
2146
1951
  attr_accessor :children
2147
1952
 
2148
1953
  # A `Directory` represents a directory node in a file tree, containing zero or
2149
- # more children FileNodes,
2150
- # DirectoryNodes and
2151
- # SymlinkNodes.
2152
- # Each `Node` contains its name in the directory, either the digest of its
2153
- # content (either a file blob or a `Directory` proto) or a symlink target, as
2154
- # well as possibly some metadata about the file or directory.
2155
- # In order to ensure that two equivalent directory trees hash to the same
2156
- # value, the following restrictions MUST be obeyed when constructing a
2157
- # a `Directory`:
2158
- # * Every child in the directory must have a path of exactly one segment.
2159
- # Multiple levels of directory hierarchy may not be collapsed.
2160
- # * Each child in the directory must have a unique path segment (file name).
2161
- # Note that while the API itself is case-sensitive, the environment where
2162
- # the Action is executed may or may not be case-sensitive. That is, it is
2163
- # legal to call the API with a Directory that has both "Foo" and "foo" as
2164
- # children, but the Action may be rejected by the remote system upon
2165
- # execution.
2166
- # * The files, directories and symlinks in the directory must each be sorted
2167
- # in lexicographical order by path. The path strings must be sorted by code
2168
- # point, equivalently, by UTF-8 bytes.
2169
- # * The NodeProperties of files,
2170
- # directories, and symlinks must be sorted in lexicographical order by
2171
- # property name.
2172
- # A `Directory` that obeys the restrictions is said to be in canonical form.
2173
- # As an example, the following could be used for a file named `bar` and a
1954
+ # more children FileNodes, DirectoryNodes and SymlinkNodes. Each `Node` contains
1955
+ # its name in the directory, either the digest of its content (either a file
1956
+ # blob or a `Directory` proto) or a symlink target, as well as possibly some
1957
+ # metadata about the file or directory. In order to ensure that two equivalent
1958
+ # directory trees hash to the same value, the following restrictions MUST be
1959
+ # obeyed when constructing a a `Directory`: * Every child in the directory must
1960
+ # have a path of exactly one segment. Multiple levels of directory hierarchy may
1961
+ # not be collapsed. * Each child in the directory must have a unique path
1962
+ # segment (file name). Note that while the API itself is case-sensitive, the
1963
+ # environment where the Action is executed may or may not be case-sensitive.
1964
+ # That is, it is legal to call the API with a Directory that has both "Foo" and "
1965
+ # foo" as children, but the Action may be rejected by the remote system upon
1966
+ # execution. * The files, directories and symlinks in the directory must each be
1967
+ # sorted in lexicographical order by path. The path strings must be sorted by
1968
+ # code point, equivalently, by UTF-8 bytes. * The NodeProperties of files,
1969
+ # directories, and symlinks must be sorted in lexicographical order by property
1970
+ # name. A `Directory` that obeys the restrictions is said to be in canonical
1971
+ # form. As an example, the following could be used for a file named `bar` and a
2174
1972
  # directory named `foo` with an executable file named `baz` (hashes shortened
2175
- # for readability):
2176
- # ```json
2177
- # // (Directory proto)
2178
- # `
2179
- # files: [
2180
- # `
2181
- # name: "bar",
2182
- # digest: `
2183
- # hash: "4a73bc9d03...",
2184
- # size: 65534
2185
- # `,
2186
- # node_properties: [
2187
- # `
2188
- # "name": "MTime",
2189
- # "value": "2017-01-15T01:30:15.01Z"
2190
- # `
2191
- # ]
2192
- # `
2193
- # ],
2194
- # directories: [
2195
- # `
2196
- # name: "foo",
2197
- # digest: `
2198
- # hash: "4cf2eda940...",
2199
- # size: 43
2200
- # `
2201
- # `
2202
- # ]
2203
- # `
2204
- # // (Directory proto with hash "4cf2eda940..." and size 43)
2205
- # `
2206
- # files: [
2207
- # `
2208
- # name: "baz",
2209
- # digest: `
2210
- # hash: "b2c941073e...",
2211
- # size: 1294,
2212
- # `,
2213
- # is_executable: true
2214
- # `
2215
- # ]
2216
- # `
2217
- # ```
1973
+ # for readability): ```json // (Directory proto) ` files: [ ` name: "bar",
1974
+ # digest: ` hash: "4a73bc9d03...", size: 65534 `, node_properties: [ ` "name": "
1975
+ # MTime", "value": "2017-01-15T01:30:15.01Z" ` ] ` ], directories: [ ` name: "
1976
+ # foo", digest: ` hash: "4cf2eda940...", size: 43 ` ` ] ` // (Directory proto
1977
+ # with hash "4cf2eda940..." and size 43) ` files: [ ` name: "baz", digest: `
1978
+ # hash: "b2c941073e...", size: 1294, `, is_executable: true ` ] ` ```
2218
1979
  # Corresponds to the JSON property `root`
2219
1980
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Directory]
2220
1981
  attr_accessor :root
@@ -2230,8 +1991,7 @@ module Google
2230
1991
  end
2231
1992
  end
2232
1993
 
2233
- # A request message for
2234
- # WaitExecution.
1994
+ # A request message for WaitExecution.
2235
1995
  class BuildBazelRemoteExecutionV2WaitExecutionRequest
2236
1996
  include Google::Apis::Core::Hashable
2237
1997
 
@@ -2263,9 +2023,9 @@ module Google
2263
2023
  # @return [Fixnum]
2264
2024
  attr_accessor :patch
2265
2025
 
2266
- # The pre-release version. Either this field or major/minor/patch fields
2267
- # must be filled. They are mutually exclusive. Pre-release versions are
2268
- # assumed to be earlier than any released versions.
2026
+ # The pre-release version. Either this field or major/minor/patch fields must be
2027
+ # filled. They are mutually exclusive. Pre-release versions are assumed to be
2028
+ # earlier than any released versions.
2269
2029
  # Corresponds to the JSON property `prerelease`
2270
2030
  # @return [String]
2271
2031
  attr_accessor :prerelease
@@ -2288,8 +2048,8 @@ module Google
2288
2048
  class GoogleDevtoolsRemotebuildbotCommandDurations
2289
2049
  include Google::Apis::Core::Hashable
2290
2050
 
2291
- # The time spent preparing the command to be run in a Docker container
2292
- # (includes pulling the Docker image, if necessary).
2051
+ # The time spent preparing the command to be run in a Docker container (includes
2052
+ # pulling the Docker image, if necessary).
2293
2053
  # Corresponds to the JSON property `dockerPrep`
2294
2054
  # @return [String]
2295
2055
  attr_accessor :docker_prep
@@ -2365,13 +2125,13 @@ module Google
2365
2125
  end
2366
2126
  end
2367
2127
 
2368
- # CommandEvents contains counters for the number of warnings and errors
2369
- # that occurred during the execution of a command.
2128
+ # CommandEvents contains counters for the number of warnings and errors that
2129
+ # occurred during the execution of a command.
2370
2130
  class GoogleDevtoolsRemotebuildbotCommandEvents
2371
2131
  include Google::Apis::Core::Hashable
2372
2132
 
2373
- # Indicates whether we are using a cached Docker image (true) or had to pull
2374
- # the Docker image (false) for this command.
2133
+ # Indicates whether we are using a cached Docker image (true) or had to pull the
2134
+ # Docker image (false) for this command.
2375
2135
  # Corresponds to the JSON property `dockerCacheHit`
2376
2136
  # @return [Boolean]
2377
2137
  attr_accessor :docker_cache_hit
@@ -2541,28 +2301,24 @@ module Google
2541
2301
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaCreateInstanceRequest
2542
2302
  include Google::Apis::Core::Hashable
2543
2303
 
2544
- # Instance conceptually encapsulates all Remote Build Execution resources
2545
- # for remote builds.
2546
- # An instance consists of storage and compute resources (for example,
2547
- # `ContentAddressableStorage`, `ActionCache`, `WorkerPools`) used for
2548
- # running remote builds.
2549
- # All Remote Build Execution API calls are scoped to an instance.
2304
+ # Instance conceptually encapsulates all Remote Build Execution resources for
2305
+ # remote builds. An instance consists of storage and compute resources (for
2306
+ # example, `ContentAddressableStorage`, `ActionCache`, `WorkerPools`) used for
2307
+ # running remote builds. All Remote Build Execution API calls are scoped to an
2308
+ # instance.
2550
2309
  # Corresponds to the JSON property `instance`
2551
2310
  # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemotebuildexecutionAdminV1alphaInstance]
2552
2311
  attr_accessor :instance
2553
2312
 
2554
- # ID of the created instance.
2555
- # A valid `instance_id` must:
2556
- # be 6-50 characters long,
2557
- # contain only lowercase letters, digits, hyphens and underscores,
2558
- # start with a lowercase letter, and
2559
- # end with a lowercase letter or a digit.
2313
+ # ID of the created instance. A valid `instance_id` must: be 6-50 characters
2314
+ # long, contain only lowercase letters, digits, hyphens and underscores, start
2315
+ # with a lowercase letter, and end with a lowercase letter or a digit.
2560
2316
  # Corresponds to the JSON property `instanceId`
2561
2317
  # @return [String]
2562
2318
  attr_accessor :instance_id
2563
2319
 
2564
- # Resource name of the project containing the instance.
2565
- # Format: `projects/[PROJECT_ID]`.
2320
+ # Resource name of the project containing the instance. Format: `projects/[
2321
+ # PROJECT_ID]`.
2566
2322
  # Corresponds to the JSON property `parent`
2567
2323
  # @return [String]
2568
2324
  attr_accessor :parent
@@ -2583,18 +2339,15 @@ module Google
2583
2339
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaCreateWorkerPoolRequest
2584
2340
  include Google::Apis::Core::Hashable
2585
2341
 
2586
- # Resource name of the instance in which to create the new worker pool.
2587
- # Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
2342
+ # Resource name of the instance in which to create the new worker pool. Format: `
2343
+ # projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
2588
2344
  # Corresponds to the JSON property `parent`
2589
2345
  # @return [String]
2590
2346
  attr_accessor :parent
2591
2347
 
2592
- # ID of the created worker pool.
2593
- # A valid pool ID must:
2594
- # be 6-50 characters long,
2595
- # contain only lowercase letters, digits, hyphens and underscores,
2596
- # start with a lowercase letter, and
2597
- # end with a lowercase letter or a digit.
2348
+ # ID of the created worker pool. A valid pool ID must: be 6-50 characters long,
2349
+ # contain only lowercase letters, digits, hyphens and underscores, start with a
2350
+ # lowercase letter, and end with a lowercase letter or a digit.
2598
2351
  # Corresponds to the JSON property `poolId`
2599
2352
  # @return [String]
2600
2353
  attr_accessor :pool_id
@@ -2620,8 +2373,8 @@ module Google
2620
2373
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaDeleteInstanceRequest
2621
2374
  include Google::Apis::Core::Hashable
2622
2375
 
2623
- # Name of the instance to delete.
2624
- # Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
2376
+ # Name of the instance to delete. Format: `projects/[PROJECT_ID]/instances/[
2377
+ # INSTANCE_ID]`.
2625
2378
  # Corresponds to the JSON property `name`
2626
2379
  # @return [String]
2627
2380
  attr_accessor :name
@@ -2640,9 +2393,8 @@ module Google
2640
2393
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaDeleteWorkerPoolRequest
2641
2394
  include Google::Apis::Core::Hashable
2642
2395
 
2643
- # Name of the worker pool to delete.
2644
- # Format:
2645
- # `projects/[PROJECT_ID]/instances/[INSTANCE_ID]/workerpools/[POOL_ID]`.
2396
+ # Name of the worker pool to delete. Format: `projects/[PROJECT_ID]/instances/[
2397
+ # INSTANCE_ID]/workerpools/[POOL_ID]`.
2646
2398
  # Corresponds to the JSON property `name`
2647
2399
  # @return [String]
2648
2400
  attr_accessor :name
@@ -2657,12 +2409,107 @@ module Google
2657
2409
  end
2658
2410
  end
2659
2411
 
2412
+ # FeaturePolicy defines features allowed to be used on RBE instances, as well as
2413
+ # instance-wide behavior changes that take effect without opt-in or opt-out at
2414
+ # usage time.
2415
+ class GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicy
2416
+ include Google::Apis::Core::Hashable
2417
+
2418
+ # Defines whether a feature can be used or what values are accepted.
2419
+ # Corresponds to the JSON property `containerImageSources`
2420
+ # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
2421
+ attr_accessor :container_image_sources
2422
+
2423
+ # Defines whether a feature can be used or what values are accepted.
2424
+ # Corresponds to the JSON property `dockerAddCapabilities`
2425
+ # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
2426
+ attr_accessor :docker_add_capabilities
2427
+
2428
+ # Defines whether a feature can be used or what values are accepted.
2429
+ # Corresponds to the JSON property `dockerChrootPath`
2430
+ # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
2431
+ attr_accessor :docker_chroot_path
2432
+
2433
+ # Defines whether a feature can be used or what values are accepted.
2434
+ # Corresponds to the JSON property `dockerNetwork`
2435
+ # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
2436
+ attr_accessor :docker_network
2437
+
2438
+ # Defines whether a feature can be used or what values are accepted.
2439
+ # Corresponds to the JSON property `dockerPrivileged`
2440
+ # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
2441
+ attr_accessor :docker_privileged
2442
+
2443
+ # Defines whether a feature can be used or what values are accepted.
2444
+ # Corresponds to the JSON property `dockerRunAsRoot`
2445
+ # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
2446
+ attr_accessor :docker_run_as_root
2447
+
2448
+ # Defines whether a feature can be used or what values are accepted.
2449
+ # Corresponds to the JSON property `dockerRuntime`
2450
+ # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
2451
+ attr_accessor :docker_runtime
2452
+
2453
+ # Defines whether a feature can be used or what values are accepted.
2454
+ # Corresponds to the JSON property `dockerSiblingContainers`
2455
+ # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
2456
+ attr_accessor :docker_sibling_containers
2457
+
2458
+ # linux_isolation allows overriding the docker runtime used for containers
2459
+ # started on Linux.
2460
+ # Corresponds to the JSON property `linuxIsolation`
2461
+ # @return [String]
2462
+ attr_accessor :linux_isolation
2463
+
2464
+ def initialize(**args)
2465
+ update!(**args)
2466
+ end
2467
+
2468
+ # Update properties of this object
2469
+ def update!(**args)
2470
+ @container_image_sources = args[:container_image_sources] if args.key?(:container_image_sources)
2471
+ @docker_add_capabilities = args[:docker_add_capabilities] if args.key?(:docker_add_capabilities)
2472
+ @docker_chroot_path = args[:docker_chroot_path] if args.key?(:docker_chroot_path)
2473
+ @docker_network = args[:docker_network] if args.key?(:docker_network)
2474
+ @docker_privileged = args[:docker_privileged] if args.key?(:docker_privileged)
2475
+ @docker_run_as_root = args[:docker_run_as_root] if args.key?(:docker_run_as_root)
2476
+ @docker_runtime = args[:docker_runtime] if args.key?(:docker_runtime)
2477
+ @docker_sibling_containers = args[:docker_sibling_containers] if args.key?(:docker_sibling_containers)
2478
+ @linux_isolation = args[:linux_isolation] if args.key?(:linux_isolation)
2479
+ end
2480
+ end
2481
+
2482
+ # Defines whether a feature can be used or what values are accepted.
2483
+ class GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature
2484
+ include Google::Apis::Core::Hashable
2485
+
2486
+ # A list of acceptable values. Only effective when the policy is `RESTRICTED`.
2487
+ # Corresponds to the JSON property `allowedValues`
2488
+ # @return [Array<String>]
2489
+ attr_accessor :allowed_values
2490
+
2491
+ # The policy of the feature.
2492
+ # Corresponds to the JSON property `policy`
2493
+ # @return [String]
2494
+ attr_accessor :policy
2495
+
2496
+ def initialize(**args)
2497
+ update!(**args)
2498
+ end
2499
+
2500
+ # Update properties of this object
2501
+ def update!(**args)
2502
+ @allowed_values = args[:allowed_values] if args.key?(:allowed_values)
2503
+ @policy = args[:policy] if args.key?(:policy)
2504
+ end
2505
+ end
2506
+
2660
2507
  # The request used for `GetInstance`.
2661
2508
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaGetInstanceRequest
2662
2509
  include Google::Apis::Core::Hashable
2663
2510
 
2664
- # Name of the instance to retrieve.
2665
- # Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
2511
+ # Name of the instance to retrieve. Format: `projects/[PROJECT_ID]/instances/[
2512
+ # INSTANCE_ID]`.
2666
2513
  # Corresponds to the JSON property `name`
2667
2514
  # @return [String]
2668
2515
  attr_accessor :name
@@ -2681,9 +2528,8 @@ module Google
2681
2528
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaGetWorkerPoolRequest
2682
2529
  include Google::Apis::Core::Hashable
2683
2530
 
2684
- # Name of the worker pool to retrieve.
2685
- # Format:
2686
- # `projects/[PROJECT_ID]/instances/[INSTANCE_ID]/workerpools/[POOL_ID]`.
2531
+ # Name of the worker pool to retrieve. Format: `projects/[PROJECT_ID]/instances/[
2532
+ # INSTANCE_ID]/workerpools/[POOL_ID]`.
2687
2533
  # Corresponds to the JSON property `name`
2688
2534
  # @return [String]
2689
2535
  attr_accessor :name
@@ -2698,15 +2544,21 @@ module Google
2698
2544
  end
2699
2545
  end
2700
2546
 
2701
- # Instance conceptually encapsulates all Remote Build Execution resources
2702
- # for remote builds.
2703
- # An instance consists of storage and compute resources (for example,
2704
- # `ContentAddressableStorage`, `ActionCache`, `WorkerPools`) used for
2705
- # running remote builds.
2706
- # All Remote Build Execution API calls are scoped to an instance.
2547
+ # Instance conceptually encapsulates all Remote Build Execution resources for
2548
+ # remote builds. An instance consists of storage and compute resources (for
2549
+ # example, `ContentAddressableStorage`, `ActionCache`, `WorkerPools`) used for
2550
+ # running remote builds. All Remote Build Execution API calls are scoped to an
2551
+ # instance.
2707
2552
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaInstance
2708
2553
  include Google::Apis::Core::Hashable
2709
2554
 
2555
+ # FeaturePolicy defines features allowed to be used on RBE instances, as well as
2556
+ # instance-wide behavior changes that take effect without opt-in or opt-out at
2557
+ # usage time.
2558
+ # Corresponds to the JSON property `featurePolicy`
2559
+ # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicy]
2560
+ attr_accessor :feature_policy
2561
+
2710
2562
  # The location is a GCP region. Currently only `us-central1` is supported.
2711
2563
  # Corresponds to the JSON property `location`
2712
2564
  # @return [String]
@@ -2718,10 +2570,9 @@ module Google
2718
2570
  attr_accessor :logging_enabled
2719
2571
  alias_method :logging_enabled?, :logging_enabled
2720
2572
 
2721
- # Output only. Instance resource name formatted as:
2722
- # `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
2723
- # Name should not be populated when creating an instance since it is provided
2724
- # in the `instance_id` field.
2573
+ # Output only. Instance resource name formatted as: `projects/[PROJECT_ID]/
2574
+ # instances/[INSTANCE_ID]`. Name should not be populated when creating an
2575
+ # instance since it is provided in the `instance_id` field.
2725
2576
  # Corresponds to the JSON property `name`
2726
2577
  # @return [String]
2727
2578
  attr_accessor :name
@@ -2737,6 +2588,7 @@ module Google
2737
2588
 
2738
2589
  # Update properties of this object
2739
2590
  def update!(**args)
2591
+ @feature_policy = args[:feature_policy] if args.key?(:feature_policy)
2740
2592
  @location = args[:location] if args.key?(:location)
2741
2593
  @logging_enabled = args[:logging_enabled] if args.key?(:logging_enabled)
2742
2594
  @name = args[:name] if args.key?(:name)
@@ -2748,8 +2600,7 @@ module Google
2748
2600
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaListInstancesRequest
2749
2601
  include Google::Apis::Core::Hashable
2750
2602
 
2751
- # Resource name of the project.
2752
- # Format: `projects/[PROJECT_ID]`.
2603
+ # Resource name of the project. Format: `projects/[PROJECT_ID]`.
2753
2604
  # Corresponds to the JSON property `parent`
2754
2605
  # @return [String]
2755
2606
  attr_accessor :parent
@@ -2787,32 +2638,26 @@ module Google
2787
2638
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaListWorkerPoolsRequest
2788
2639
  include Google::Apis::Core::Hashable
2789
2640
 
2790
- # Optional. A filter expression that filters resources listed in
2791
- # the response. The expression must specify the field name, a comparison
2792
- # operator, and the value that you want to use for filtering. The value
2793
- # must be a string, a number, or a boolean. String values are
2794
- # case-insensitive.
2795
- # The comparison operator must be either `:`, `=`, `!=`, `>`, `>=`, `<=` or
2796
- # `<`.
2797
- # The `:` operator can be used with string fields to match substrings.
2798
- # For non-string fields it is equivalent to the `=` operator.
2799
- # The `:*` comparison can be used to test whether a key has been defined.
2800
- # You can also filter on nested fields.
2801
- # To filter on multiple expressions, you can separate expression using
2802
- # `AND` and `OR` operators, using parentheses to specify precedence. If
2803
- # neither operator is specified, `AND` is assumed.
2804
- # Examples:
2805
- # Include only pools with more than 100 reserved workers:
2806
- # `(worker_count > 100) (worker_config.reserved = true)`
2807
- # Include only pools with a certain label or machines of the n1-standard
2808
- # family:
2641
+ # Optional. A filter expression that filters resources listed in the response.
2642
+ # The expression must specify the field name, a comparison operator, and the
2643
+ # value that you want to use for filtering. The value must be a string, a number,
2644
+ # or a boolean. String values are case-insensitive. The comparison operator
2645
+ # must be either `:`, `=`, `!=`, `>`, `>=`, `<=` or `<`. The `:` operator can be
2646
+ # used with string fields to match substrings. For non-string fields it is
2647
+ # equivalent to the `=` operator. The `:*` comparison can be used to test
2648
+ # whether a key has been defined. You can also filter on nested fields. To
2649
+ # filter on multiple expressions, you can separate expression using `AND` and `
2650
+ # OR` operators, using parentheses to specify precedence. If neither operator is
2651
+ # specified, `AND` is assumed. Examples: Include only pools with more than 100
2652
+ # reserved workers: `(worker_count > 100) (worker_config.reserved = true)`
2653
+ # Include only pools with a certain label or machines of the n1-standard family:
2809
2654
  # `worker_config.labels.key1 : * OR worker_config.machine_type: n1-standard`
2810
2655
  # Corresponds to the JSON property `filter`
2811
2656
  # @return [String]
2812
2657
  attr_accessor :filter
2813
2658
 
2814
- # Resource name of the instance.
2815
- # Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
2659
+ # Resource name of the instance. Format: `projects/[PROJECT_ID]/instances/[
2660
+ # INSTANCE_ID]`.
2816
2661
  # Corresponds to the JSON property `parent`
2817
2662
  # @return [String]
2818
2663
  attr_accessor :parent
@@ -2847,40 +2692,62 @@ module Google
2847
2692
  end
2848
2693
  end
2849
2694
 
2695
+ # SoleTenancyConfig specifies information required to host a pool on STNs.
2696
+ class GoogleDevtoolsRemotebuildexecutionAdminV1alphaSoleTenancyConfig
2697
+ include Google::Apis::Core::Hashable
2698
+
2699
+ # The sole-tenant node type to host the pool's workers on.
2700
+ # Corresponds to the JSON property `nodeType`
2701
+ # @return [String]
2702
+ attr_accessor :node_type
2703
+
2704
+ # Zone in which STNs are reserved.
2705
+ # Corresponds to the JSON property `nodesZone`
2706
+ # @return [String]
2707
+ attr_accessor :nodes_zone
2708
+
2709
+ def initialize(**args)
2710
+ update!(**args)
2711
+ end
2712
+
2713
+ # Update properties of this object
2714
+ def update!(**args)
2715
+ @node_type = args[:node_type] if args.key?(:node_type)
2716
+ @nodes_zone = args[:nodes_zone] if args.key?(:nodes_zone)
2717
+ end
2718
+ end
2719
+
2850
2720
  # The request used for `UpdateInstance`.
2851
2721
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaUpdateInstanceRequest
2852
2722
  include Google::Apis::Core::Hashable
2853
2723
 
2854
- # Instance conceptually encapsulates all Remote Build Execution resources
2855
- # for remote builds.
2856
- # An instance consists of storage and compute resources (for example,
2857
- # `ContentAddressableStorage`, `ActionCache`, `WorkerPools`) used for
2858
- # running remote builds.
2859
- # All Remote Build Execution API calls are scoped to an instance.
2724
+ # Instance conceptually encapsulates all Remote Build Execution resources for
2725
+ # remote builds. An instance consists of storage and compute resources (for
2726
+ # example, `ContentAddressableStorage`, `ActionCache`, `WorkerPools`) used for
2727
+ # running remote builds. All Remote Build Execution API calls are scoped to an
2728
+ # instance.
2860
2729
  # Corresponds to the JSON property `instance`
2861
2730
  # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemotebuildexecutionAdminV1alphaInstance]
2862
2731
  attr_accessor :instance
2863
2732
 
2864
- # Deprecated, use instance.logging_enabled instead.
2865
- # Whether to enable Stackdriver logging for this instance.
2733
+ # Deprecated, use instance.logging_enabled instead. Whether to enable
2734
+ # Stackdriver logging for this instance.
2866
2735
  # Corresponds to the JSON property `loggingEnabled`
2867
2736
  # @return [Boolean]
2868
2737
  attr_accessor :logging_enabled
2869
2738
  alias_method :logging_enabled?, :logging_enabled
2870
2739
 
2871
- # Deprecated, use instance.Name instead.
2872
- # Name of the instance to update.
2873
- # Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
2740
+ # Deprecated, use instance.Name instead. Name of the instance to update. Format:
2741
+ # `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
2874
2742
  # Corresponds to the JSON property `name`
2875
2743
  # @return [String]
2876
2744
  attr_accessor :name
2877
2745
 
2878
- # The update mask applies to instance. For the `FieldMask` definition, see
2879
- # https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#
2880
- # fieldmask
2881
- # If an empty update_mask is provided, only the non-default valued field in
2882
- # the worker pool field will be updated. Note that in order to update a field
2883
- # to the default value (zero, false, empty string) an explicit update_mask
2746
+ # The update mask applies to instance. For the `FieldMask` definition, see https:
2747
+ # //developers.google.com/protocol-buffers/docs/reference/google.protobuf#
2748
+ # fieldmask If an empty update_mask is provided, only the non-default valued
2749
+ # field in the worker pool field will be updated. Note that in order to update a
2750
+ # field to the default value (zero, false, empty string) an explicit update_mask
2884
2751
  # must be provided.
2885
2752
  # Corresponds to the JSON property `updateMask`
2886
2753
  # @return [String]
@@ -2903,13 +2770,11 @@ module Google
2903
2770
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaUpdateWorkerPoolRequest
2904
2771
  include Google::Apis::Core::Hashable
2905
2772
 
2906
- # The update mask applies to worker_pool. For the `FieldMask` definition,
2907
- # see
2773
+ # The update mask applies to worker_pool. For the `FieldMask` definition, see
2908
2774
  # https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#
2909
- # fieldmask
2910
- # If an empty update_mask is provided, only the non-default valued field in
2911
- # the worker pool field will be updated. Note that in order to update a field
2912
- # to the default value (zero, false, empty string) an explicit update_mask
2775
+ # fieldmask If an empty update_mask is provided, only the non-default valued
2776
+ # field in the worker pool field will be updated. Note that in order to update a
2777
+ # field to the default value (zero, false, empty string) an explicit update_mask
2913
2778
  # must be provided.
2914
2779
  # Corresponds to the JSON property `updateMask`
2915
2780
  # @return [String]
@@ -2931,8 +2796,7 @@ module Google
2931
2796
  end
2932
2797
  end
2933
2798
 
2934
- # Defines the configuration to be used for a creating workers in
2935
- # the worker pool.
2799
+ # Defines the configuration to be used for creating workers in the worker pool.
2936
2800
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaWorkerConfig
2937
2801
  include Google::Apis::Core::Hashable
2938
2802
 
@@ -2941,34 +2805,31 @@ module Google
2941
2805
  # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemotebuildexecutionAdminV1alphaAcceleratorConfig]
2942
2806
  attr_accessor :accelerator
2943
2807
 
2944
- # Required. Size of the disk attached to the worker, in GB.
2945
- # See https://cloud.google.com/compute/docs/disks/
2808
+ # Required. Size of the disk attached to the worker, in GB. See https://cloud.
2809
+ # google.com/compute/docs/disks/
2946
2810
  # Corresponds to the JSON property `diskSizeGb`
2947
2811
  # @return [Fixnum]
2948
2812
  attr_accessor :disk_size_gb
2949
2813
 
2950
- # Required. Disk Type to use for the worker.
2951
- # See [Storage
2952
- # options](https://cloud.google.com/compute/docs/disks/#introduction).
2953
- # Currently only `pd-standard` and `pd-ssd` are supported.
2814
+ # Required. Disk Type to use for the worker. See [Storage options](https://cloud.
2815
+ # google.com/compute/docs/disks/#introduction). Currently only `pd-standard` and
2816
+ # `pd-ssd` are supported.
2954
2817
  # Corresponds to the JSON property `diskType`
2955
2818
  # @return [String]
2956
2819
  attr_accessor :disk_type
2957
2820
 
2958
- # Labels associated with the workers.
2959
- # Label keys and values can be no longer than 63 characters, can only contain
2960
- # lowercase letters, numeric characters, underscores and dashes.
2961
- # International letters are permitted. Label keys must start with a letter.
2962
- # Label values are optional.
2963
- # There can not be more than 64 labels per resource.
2821
+ # Labels associated with the workers. Label keys and values can be no longer
2822
+ # than 63 characters, can only contain lowercase letters, numeric characters,
2823
+ # underscores and dashes. International letters are permitted. Label keys must
2824
+ # start with a letter. Label values are optional. There can not be more than 64
2825
+ # labels per resource.
2964
2826
  # Corresponds to the JSON property `labels`
2965
2827
  # @return [Hash<String,String>]
2966
2828
  attr_accessor :labels
2967
2829
 
2968
- # Required. Machine type of the worker, such as `n1-standard-2`.
2969
- # See https://cloud.google.com/compute/docs/machine-types for a list of
2970
- # supported machine types. Note that `f1-micro` and `g1-small` are not yet
2971
- # supported.
2830
+ # Required. Machine type of the worker, such as `n1-standard-2`. See https://
2831
+ # cloud.google.com/compute/docs/machine-types for a list of supported machine
2832
+ # types. Note that `f1-micro` and `g1-small` are not yet supported.
2972
2833
  # Corresponds to the JSON property `machineType`
2973
2834
  # @return [String]
2974
2835
  attr_accessor :machine_type
@@ -2978,30 +2839,34 @@ module Google
2978
2839
  # @return [Fixnum]
2979
2840
  attr_accessor :max_concurrent_actions
2980
2841
 
2981
- # Minimum CPU platform to use when creating the worker.
2982
- # See [CPU Platforms](https://cloud.google.com/compute/docs/cpu-platforms).
2842
+ # Minimum CPU platform to use when creating the worker. See [CPU Platforms](
2843
+ # https://cloud.google.com/compute/docs/cpu-platforms).
2983
2844
  # Corresponds to the JSON property `minCpuPlatform`
2984
2845
  # @return [String]
2985
2846
  attr_accessor :min_cpu_platform
2986
2847
 
2987
- # Determines the type of network access granted to workers. Possible values:
2988
- # - "public": Workers can connect to the public internet.
2989
- # - "private": Workers can only connect to Google APIs and services.
2990
- # - "restricted-private": Workers can only connect to Google APIs that are
2991
- # reachable through `restricted.googleapis.com` (`199.36.153.4/30`).
2848
+ # Determines the type of network access granted to workers. Possible values: - "
2849
+ # public": Workers can connect to the public internet. - "private": Workers can
2850
+ # only connect to Google APIs and services. - "restricted-private": Workers can
2851
+ # only connect to Google APIs that are reachable through `restricted.googleapis.
2852
+ # com` (`199.36.153.4/30`).
2992
2853
  # Corresponds to the JSON property `networkAccess`
2993
2854
  # @return [String]
2994
2855
  attr_accessor :network_access
2995
2856
 
2996
- # Determines whether the worker is reserved (equivalent to a Compute Engine
2997
- # on-demand VM and therefore won't be preempted).
2998
- # See [Preemptible VMs](https://cloud.google.com/preemptible-vms/) for more
2999
- # details.
2857
+ # Determines whether the worker is reserved (equivalent to a Compute Engine on-
2858
+ # demand VM and therefore won't be preempted). See [Preemptible VMs](https://
2859
+ # cloud.google.com/preemptible-vms/) for more details.
3000
2860
  # Corresponds to the JSON property `reserved`
3001
2861
  # @return [Boolean]
3002
2862
  attr_accessor :reserved
3003
2863
  alias_method :reserved?, :reserved
3004
2864
 
2865
+ # SoleTenancyConfig specifies information required to host a pool on STNs.
2866
+ # Corresponds to the JSON property `soleTenancy`
2867
+ # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemotebuildexecutionAdminV1alphaSoleTenancyConfig]
2868
+ attr_accessor :sole_tenancy
2869
+
3005
2870
  # The name of the image used by each VM.
3006
2871
  # Corresponds to the JSON property `vmImage`
3007
2872
  # @return [String]
@@ -3022,6 +2887,7 @@ module Google
3022
2887
  @min_cpu_platform = args[:min_cpu_platform] if args.key?(:min_cpu_platform)
3023
2888
  @network_access = args[:network_access] if args.key?(:network_access)
3024
2889
  @reserved = args[:reserved] if args.key?(:reserved)
2890
+ @sole_tenancy = args[:sole_tenancy] if args.key?(:sole_tenancy)
3025
2891
  @vm_image = args[:vm_image] if args.key?(:vm_image)
3026
2892
  end
3027
2893
  end
@@ -3040,10 +2906,9 @@ module Google
3040
2906
  # @return [String]
3041
2907
  attr_accessor :channel
3042
2908
 
3043
- # WorkerPool resource name formatted as:
3044
- # `projects/[PROJECT_ID]/instances/[INSTANCE_ID]/workerpools/[POOL_ID]`.
3045
- # name should not be populated when creating a worker pool since it is
3046
- # provided in the `poolId` field.
2909
+ # WorkerPool resource name formatted as: `projects/[PROJECT_ID]/instances/[
2910
+ # INSTANCE_ID]/workerpools/[POOL_ID]`. name should not be populated when
2911
+ # creating a worker pool since it is provided in the `poolId` field.
3047
2912
  # Corresponds to the JSON property `name`
3048
2913
  # @return [String]
3049
2914
  attr_accessor :name
@@ -3053,14 +2918,13 @@ module Google
3053
2918
  # @return [String]
3054
2919
  attr_accessor :state
3055
2920
 
3056
- # Defines the configuration to be used for a creating workers in
3057
- # the worker pool.
2921
+ # Defines the configuration to be used for creating workers in the worker pool.
3058
2922
  # Corresponds to the JSON property `workerConfig`
3059
2923
  # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemotebuildexecutionAdminV1alphaWorkerConfig]
3060
2924
  attr_accessor :worker_config
3061
2925
 
3062
- # The desired number of workers in the worker pool. Must be a value between
3063
- # 0 and 15000.
2926
+ # The desired number of workers in the worker pool. Must be a value between 0
2927
+ # and 15000.
3064
2928
  # Corresponds to the JSON property `workerCount`
3065
2929
  # @return [Fixnum]
3066
2930
  attr_accessor :worker_count
@@ -3082,14 +2946,13 @@ module Google
3082
2946
 
3083
2947
  # AdminTemp is a prelimiary set of administration tasks. It's called "Temp"
3084
2948
  # because we do not yet know the best way to represent admin tasks; it's
3085
- # possible that this will be entirely replaced in later versions of this API.
3086
- # If this message proves to be sufficient, it will be renamed in the alpha or
3087
- # beta release of this API.
3088
- # This message (suitably marshalled into a protobuf.Any) can be used as the
3089
- # inline_assignment field in a lease; the lease assignment field should simply
3090
- # be `"admin"` in these cases.
3091
- # This message is heavily based on Swarming administration tasks from the LUCI
3092
- # project (http://github.com/luci/luci-py/appengine/swarming).
2949
+ # possible that this will be entirely replaced in later versions of this API. If
2950
+ # this message proves to be sufficient, it will be renamed in the alpha or beta
2951
+ # release of this API. This message (suitably marshalled into a protobuf.Any)
2952
+ # can be used as the inline_assignment field in a lease; the lease assignment
2953
+ # field should simply be `"admin"` in these cases. This message is heavily based
2954
+ # on Swarming administration tasks from the LUCI project (http://github.com/luci/
2955
+ # luci-py/appengine/swarming).
3093
2956
  class GoogleDevtoolsRemoteworkersV1test2AdminTemp
3094
2957
  include Google::Apis::Core::Hashable
3095
2958
 
@@ -3125,13 +2988,12 @@ module Google
3125
2988
  attr_accessor :contents
3126
2989
 
3127
2990
  # The CommandTask and CommandResult messages assume the existence of a service
3128
- # that can serve blobs of content, identified by a hash and size known as a
3129
- # "digest." The method by which these blobs may be retrieved is not specified
3130
- # here, but a model implementation is in the Remote Execution API's
3131
- # "ContentAddressibleStorage" interface.
3132
- # In the context of the RWAPI, a Digest will virtually always refer to the
3133
- # contents of a file or a directory. The latter is represented by the
3134
- # byte-encoded Directory message.
2991
+ # that can serve blobs of content, identified by a hash and size known as a "
2992
+ # digest." The method by which these blobs may be retrieved is not specified
2993
+ # here, but a model implementation is in the Remote Execution API's "
2994
+ # ContentAddressibleStorage" interface. In the context of the RWAPI, a Digest
2995
+ # will virtually always refer to the contents of a file or a directory. The
2996
+ # latter is represented by the byte-encoded Directory message.
3135
2997
  # Corresponds to the JSON property `digest`
3136
2998
  # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemoteworkersV1test2Digest]
3137
2999
  attr_accessor :digest
@@ -3147,27 +3009,26 @@ module Google
3147
3009
  end
3148
3010
  end
3149
3011
 
3150
- # DEPRECATED - use CommandResult instead.
3151
- # Describes the actual outputs from the task.
3012
+ # DEPRECATED - use CommandResult instead. Describes the actual outputs from the
3013
+ # task.
3152
3014
  class GoogleDevtoolsRemoteworkersV1test2CommandOutputs
3153
3015
  include Google::Apis::Core::Hashable
3154
3016
 
3155
3017
  # exit_code is only fully reliable if the status' code is OK. If the task
3156
- # exceeded its deadline or was cancelled, the process may still produce an
3157
- # exit code as it is cancelled, and this will be populated, but a successful
3158
- # (zero) is unlikely to be correct unless the status code is OK.
3018
+ # exceeded its deadline or was cancelled, the process may still produce an exit
3019
+ # code as it is cancelled, and this will be populated, but a successful (zero)
3020
+ # is unlikely to be correct unless the status code is OK.
3159
3021
  # Corresponds to the JSON property `exitCode`
3160
3022
  # @return [Fixnum]
3161
3023
  attr_accessor :exit_code
3162
3024
 
3163
3025
  # The CommandTask and CommandResult messages assume the existence of a service
3164
- # that can serve blobs of content, identified by a hash and size known as a
3165
- # "digest." The method by which these blobs may be retrieved is not specified
3166
- # here, but a model implementation is in the Remote Execution API's
3167
- # "ContentAddressibleStorage" interface.
3168
- # In the context of the RWAPI, a Digest will virtually always refer to the
3169
- # contents of a file or a directory. The latter is represented by the
3170
- # byte-encoded Directory message.
3026
+ # that can serve blobs of content, identified by a hash and size known as a "
3027
+ # digest." The method by which these blobs may be retrieved is not specified
3028
+ # here, but a model implementation is in the Remote Execution API's "
3029
+ # ContentAddressibleStorage" interface. In the context of the RWAPI, a Digest
3030
+ # will virtually always refer to the contents of a file or a directory. The
3031
+ # latter is represented by the byte-encoded Directory message.
3171
3032
  # Corresponds to the JSON property `outputs`
3172
3033
  # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemoteworkersV1test2Digest]
3173
3034
  attr_accessor :outputs
@@ -3183,9 +3044,8 @@ module Google
3183
3044
  end
3184
3045
  end
3185
3046
 
3186
- # DEPRECATED - use CommandResult instead.
3187
- # Can be used as part of CompleteRequest.metadata, or are part of a more
3188
- # sophisticated message.
3047
+ # DEPRECATED - use CommandResult instead. Can be used as part of CompleteRequest.
3048
+ # metadata, or are part of a more sophisticated message.
3189
3049
  class GoogleDevtoolsRemoteworkersV1test2CommandOverhead
3190
3050
  include Google::Apis::Core::Hashable
3191
3051
 
@@ -3196,8 +3056,8 @@ module Google
3196
3056
  # @return [String]
3197
3057
  attr_accessor :duration
3198
3058
 
3199
- # The amount of time *not* spent executing the command (ie
3200
- # uploading/downloading files).
3059
+ # The amount of time *not* spent executing the command (ie uploading/downloading
3060
+ # files).
3201
3061
  # Corresponds to the JSON property `overhead`
3202
3062
  # @return [String]
3203
3063
  attr_accessor :overhead
@@ -3225,46 +3085,44 @@ module Google
3225
3085
  # @return [String]
3226
3086
  attr_accessor :duration
3227
3087
 
3228
- # The exit code of the process. An exit code of "0" should only be trusted if
3229
- # `status` has a code of OK (otherwise it may simply be unset).
3088
+ # The exit code of the process. An exit code of "0" should only be trusted if `
3089
+ # status` has a code of OK (otherwise it may simply be unset).
3230
3090
  # Corresponds to the JSON property `exitCode`
3231
3091
  # @return [Fixnum]
3232
3092
  attr_accessor :exit_code
3233
3093
 
3234
- # Implementation-dependent metadata about the task. Both servers and bots
3235
- # may define messages which can be encoded here; bots are free to provide
3236
- # metadata in multiple formats, and servers are free to choose one or more
3237
- # of the values to process and ignore others. In particular, it is *not*
3238
- # considered an error for the bot to provide the server with a field that it
3239
- # doesn't know about.
3094
+ # Implementation-dependent metadata about the task. Both servers and bots may
3095
+ # define messages which can be encoded here; bots are free to provide metadata
3096
+ # in multiple formats, and servers are free to choose one or more of the values
3097
+ # to process and ignore others. In particular, it is *not* considered an error
3098
+ # for the bot to provide the server with a field that it doesn't know about.
3240
3099
  # Corresponds to the JSON property `metadata`
3241
3100
  # @return [Array<Hash<String,Object>>]
3242
3101
  attr_accessor :metadata
3243
3102
 
3244
3103
  # The CommandTask and CommandResult messages assume the existence of a service
3245
- # that can serve blobs of content, identified by a hash and size known as a
3246
- # "digest." The method by which these blobs may be retrieved is not specified
3247
- # here, but a model implementation is in the Remote Execution API's
3248
- # "ContentAddressibleStorage" interface.
3249
- # In the context of the RWAPI, a Digest will virtually always refer to the
3250
- # contents of a file or a directory. The latter is represented by the
3251
- # byte-encoded Directory message.
3104
+ # that can serve blobs of content, identified by a hash and size known as a "
3105
+ # digest." The method by which these blobs may be retrieved is not specified
3106
+ # here, but a model implementation is in the Remote Execution API's "
3107
+ # ContentAddressibleStorage" interface. In the context of the RWAPI, a Digest
3108
+ # will virtually always refer to the contents of a file or a directory. The
3109
+ # latter is represented by the byte-encoded Directory message.
3252
3110
  # Corresponds to the JSON property `outputs`
3253
3111
  # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemoteworkersV1test2Digest]
3254
3112
  attr_accessor :outputs
3255
3113
 
3256
- # The amount of time *not* spent executing the command (ie
3257
- # uploading/downloading files).
3114
+ # The amount of time *not* spent executing the command (ie uploading/downloading
3115
+ # files).
3258
3116
  # Corresponds to the JSON property `overhead`
3259
3117
  # @return [String]
3260
3118
  attr_accessor :overhead
3261
3119
 
3262
- # The `Status` type defines a logical error model that is suitable for
3263
- # different programming environments, including REST APIs and RPC APIs. It is
3264
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
3265
- # three pieces of data: error code, error message, and error details.
3266
- # You can find out more about this error model and how to work with it in the
3267
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
3120
+ # The `Status` type defines a logical error model that is suitable for different
3121
+ # programming environments, including REST APIs and RPC APIs. It is used by [
3122
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
3123
+ # data: error code, error message, and error details. You can find out more
3124
+ # about this error model and how to work with it in the [API Design Guide](https:
3125
+ # //cloud.google.com/apis/design/errors).
3268
3126
  # Corresponds to the JSON property `status`
3269
3127
  # @return [Google::Apis::RemotebuildexecutionV2::GoogleRpcStatus]
3270
3128
  attr_accessor :status
@@ -3320,14 +3178,13 @@ module Google
3320
3178
  class GoogleDevtoolsRemoteworkersV1test2CommandTaskInputs
3321
3179
  include Google::Apis::Core::Hashable
3322
3180
 
3323
- # The command itself to run (e.g., argv).
3324
- # This field should be passed directly to the underlying operating system,
3325
- # and so it must be sensible to that operating system. For example, on
3326
- # Windows, the first argument might be "C:\Windows\System32\ping.exe" -
3327
- # that is, using drive letters and backslashes. A command for a *nix
3328
- # system, on the other hand, would use forward slashes.
3329
- # All other fields in the RWAPI must consistently use forward slashes,
3330
- # since those fields may be interpretted by both the service and the bot.
3181
+ # The command itself to run (e.g., argv). This field should be passed directly
3182
+ # to the underlying operating system, and so it must be sensible to that
3183
+ # operating system. For example, on Windows, the first argument might be "C:\
3184
+ # Windows\System32\ping.exe" - that is, using drive letters and backslashes. A
3185
+ # command for a *nix system, on the other hand, would use forward slashes. All
3186
+ # other fields in the RWAPI must consistently use forward slashes, since those
3187
+ # fields may be interpretted by both the service and the bot.
3331
3188
  # Corresponds to the JSON property `arguments`
3332
3189
  # @return [Array<String>]
3333
3190
  attr_accessor :arguments
@@ -3337,31 +3194,29 @@ module Google
3337
3194
  # @return [Array<Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemoteworkersV1test2CommandTaskInputsEnvironmentVariable>]
3338
3195
  attr_accessor :environment_variables
3339
3196
 
3340
- # The input filesystem to be set up prior to the task beginning. The
3341
- # contents should be a repeated set of FileMetadata messages though other
3342
- # formats are allowed if better for the implementation (eg, a LUCI-style
3343
- # .isolated file).
3344
- # This field is repeated since implementations might want to cache the
3345
- # metadata, in which case it may be useful to break up portions of the
3346
- # filesystem that change frequently (eg, specific input files) from those
3347
- # that don't (eg, standard header files).
3197
+ # The input filesystem to be set up prior to the task beginning. The contents
3198
+ # should be a repeated set of FileMetadata messages though other formats are
3199
+ # allowed if better for the implementation (eg, a LUCI-style .isolated file).
3200
+ # This field is repeated since implementations might want to cache the metadata,
3201
+ # in which case it may be useful to break up portions of the filesystem that
3202
+ # change frequently (eg, specific input files) from those that don't (eg,
3203
+ # standard header files).
3348
3204
  # Corresponds to the JSON property `files`
3349
3205
  # @return [Array<Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemoteworkersV1test2Digest>]
3350
3206
  attr_accessor :files
3351
3207
 
3352
- # Inline contents for blobs expected to be needed by the bot to execute the
3353
- # task. For example, contents of entries in `files` or blobs that are
3354
- # indirectly referenced by an entry there.
3355
- # The bot should check against this list before downloading required task
3356
- # inputs to reduce the number of communications between itself and the
3357
- # remote CAS server.
3208
+ # Inline contents for blobs expected to be needed by the bot to execute the task.
3209
+ # For example, contents of entries in `files` or blobs that are indirectly
3210
+ # referenced by an entry there. The bot should check against this list before
3211
+ # downloading required task inputs to reduce the number of communications
3212
+ # between itself and the remote CAS server.
3358
3213
  # Corresponds to the JSON property `inlineBlobs`
3359
3214
  # @return [Array<Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemoteworkersV1test2Blob>]
3360
3215
  attr_accessor :inline_blobs
3361
3216
 
3362
- # Directory from which a command is executed. It is a relative directory
3363
- # with respect to the bot's working directory (i.e., "./"). If it is
3364
- # non-empty, then it must exist under "./". Otherwise, "./" will be used.
3217
+ # Directory from which a command is executed. It is a relative directory with
3218
+ # respect to the bot's working directory (i.e., "./"). If it is non-empty, then
3219
+ # it must exist under "./". Otherwise, "./" will be used.
3365
3220
  # Corresponds to the JSON property `workingDirectory`
3366
3221
  # @return [String]
3367
3222
  attr_accessor :working_directory
@@ -3409,32 +3264,32 @@ module Google
3409
3264
  class GoogleDevtoolsRemoteworkersV1test2CommandTaskOutputs
3410
3265
  include Google::Apis::Core::Hashable
3411
3266
 
3412
- # A list of expected directories, relative to the execution root. All paths
3413
- # MUST be delimited by forward slashes.
3267
+ # A list of expected directories, relative to the execution root. All paths MUST
3268
+ # be delimited by forward slashes.
3414
3269
  # Corresponds to the JSON property `directories`
3415
3270
  # @return [Array<String>]
3416
3271
  attr_accessor :directories
3417
3272
 
3418
- # A list of expected files, relative to the execution root. All paths
3419
- # MUST be delimited by forward slashes.
3273
+ # A list of expected files, relative to the execution root. All paths MUST be
3274
+ # delimited by forward slashes.
3420
3275
  # Corresponds to the JSON property `files`
3421
3276
  # @return [Array<String>]
3422
3277
  attr_accessor :files
3423
3278
 
3424
- # The destination to which any stderr should be sent. The method by which
3425
- # the bot should send the stream contents to that destination is not
3426
- # defined in this API. As examples, the destination could be a file
3427
- # referenced in the `files` field in this message, or it could be a URI
3428
- # that must be written via the ByteStream API.
3279
+ # The destination to which any stderr should be sent. The method by which the
3280
+ # bot should send the stream contents to that destination is not defined in this
3281
+ # API. As examples, the destination could be a file referenced in the `files`
3282
+ # field in this message, or it could be a URI that must be written via the
3283
+ # ByteStream API.
3429
3284
  # Corresponds to the JSON property `stderrDestination`
3430
3285
  # @return [String]
3431
3286
  attr_accessor :stderr_destination
3432
3287
 
3433
- # The destination to which any stdout should be sent. The method by which
3434
- # the bot should send the stream contents to that destination is not
3435
- # defined in this API. As examples, the destination could be a file
3436
- # referenced in the `files` field in this message, or it could be a URI
3437
- # that must be written via the ByteStream API.
3288
+ # The destination to which any stdout should be sent. The method by which the
3289
+ # bot should send the stream contents to that destination is not defined in this
3290
+ # API. As examples, the destination could be a file referenced in the `files`
3291
+ # field in this message, or it could be a URI that must be written via the
3292
+ # ByteStream API.
3438
3293
  # Corresponds to the JSON property `stdoutDestination`
3439
3294
  # @return [String]
3440
3295
  attr_accessor :stdout_destination
@@ -3456,27 +3311,26 @@ module Google
3456
3311
  class GoogleDevtoolsRemoteworkersV1test2CommandTaskTimeouts
3457
3312
  include Google::Apis::Core::Hashable
3458
3313
 
3459
- # This specifies the maximum time that the task can run, excluding the
3460
- # time required to download inputs or upload outputs. That is, the worker
3461
- # will terminate the task if it runs longer than this.
3314
+ # This specifies the maximum time that the task can run, excluding the time
3315
+ # required to download inputs or upload outputs. That is, the worker will
3316
+ # terminate the task if it runs longer than this.
3462
3317
  # Corresponds to the JSON property `execution`
3463
3318
  # @return [String]
3464
3319
  attr_accessor :execution
3465
3320
 
3466
- # This specifies the maximum amount of time the task can be idle - that is,
3467
- # go without generating some output in either stdout or stderr. If the
3468
- # process is silent for more than the specified time, the worker will
3469
- # terminate the task.
3321
+ # This specifies the maximum amount of time the task can be idle - that is, go
3322
+ # without generating some output in either stdout or stderr. If the process is
3323
+ # silent for more than the specified time, the worker will terminate the task.
3470
3324
  # Corresponds to the JSON property `idle`
3471
3325
  # @return [String]
3472
3326
  attr_accessor :idle
3473
3327
 
3474
3328
  # If the execution or IO timeouts are exceeded, the worker will try to
3475
- # gracefully terminate the task and return any existing logs. However,
3476
- # tasks may be hard-frozen in which case this process will fail. This
3477
- # timeout specifies how long to wait for a terminated task to shut down
3478
- # gracefully (e.g. via SIGTERM) before we bring down the hammer (e.g.
3479
- # SIGKILL on *nix, CTRL_BREAK_EVENT on Windows).
3329
+ # gracefully terminate the task and return any existing logs. However, tasks may
3330
+ # be hard-frozen in which case this process will fail. This timeout specifies
3331
+ # how long to wait for a terminated task to shut down gracefully (e.g. via
3332
+ # SIGTERM) before we bring down the hammer (e.g. SIGKILL on *nix,
3333
+ # CTRL_BREAK_EVENT on Windows).
3480
3334
  # Corresponds to the JSON property `shutdown`
3481
3335
  # @return [String]
3482
3336
  attr_accessor :shutdown
@@ -3494,13 +3348,12 @@ module Google
3494
3348
  end
3495
3349
 
3496
3350
  # The CommandTask and CommandResult messages assume the existence of a service
3497
- # that can serve blobs of content, identified by a hash and size known as a
3498
- # "digest." The method by which these blobs may be retrieved is not specified
3499
- # here, but a model implementation is in the Remote Execution API's
3500
- # "ContentAddressibleStorage" interface.
3501
- # In the context of the RWAPI, a Digest will virtually always refer to the
3502
- # contents of a file or a directory. The latter is represented by the
3503
- # byte-encoded Directory message.
3351
+ # that can serve blobs of content, identified by a hash and size known as a "
3352
+ # digest." The method by which these blobs may be retrieved is not specified
3353
+ # here, but a model implementation is in the Remote Execution API's "
3354
+ # ContentAddressibleStorage" interface. In the context of the RWAPI, a Digest
3355
+ # will virtually always refer to the contents of a file or a directory. The
3356
+ # latter is represented by the byte-encoded Directory message.
3504
3357
  class GoogleDevtoolsRemoteworkersV1test2Digest
3505
3358
  include Google::Apis::Core::Hashable
3506
3359
 
@@ -3511,9 +3364,9 @@ module Google
3511
3364
  attr_accessor :hash_prop
3512
3365
 
3513
3366
  # The size of the contents. While this is not strictly required as part of an
3514
- # identifier (after all, any given hash will have exactly one canonical
3515
- # size), it's useful in almost all cases when one might want to send or
3516
- # retrieve blobs of content and is included here for this reason.
3367
+ # identifier (after all, any given hash will have exactly one canonical size),
3368
+ # it's useful in almost all cases when one might want to send or retrieve blobs
3369
+ # of content and is included here for this reason.
3517
3370
  # Corresponds to the JSON property `sizeBytes`
3518
3371
  # @return [Fixnum]
3519
3372
  attr_accessor :size_bytes
@@ -3561,13 +3414,12 @@ module Google
3561
3414
  include Google::Apis::Core::Hashable
3562
3415
 
3563
3416
  # The CommandTask and CommandResult messages assume the existence of a service
3564
- # that can serve blobs of content, identified by a hash and size known as a
3565
- # "digest." The method by which these blobs may be retrieved is not specified
3566
- # here, but a model implementation is in the Remote Execution API's
3567
- # "ContentAddressibleStorage" interface.
3568
- # In the context of the RWAPI, a Digest will virtually always refer to the
3569
- # contents of a file or a directory. The latter is represented by the
3570
- # byte-encoded Directory message.
3417
+ # that can serve blobs of content, identified by a hash and size known as a "
3418
+ # digest." The method by which these blobs may be retrieved is not specified
3419
+ # here, but a model implementation is in the Remote Execution API's "
3420
+ # ContentAddressibleStorage" interface. In the context of the RWAPI, a Digest
3421
+ # will virtually always refer to the contents of a file or a directory. The
3422
+ # latter is represented by the byte-encoded Directory message.
3571
3423
  # Corresponds to the JSON property `digest`
3572
3424
  # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemoteworkersV1test2Digest]
3573
3425
  attr_accessor :digest
@@ -3593,21 +3445,20 @@ module Google
3593
3445
  class GoogleDevtoolsRemoteworkersV1test2FileMetadata
3594
3446
  include Google::Apis::Core::Hashable
3595
3447
 
3596
- # If the file is small enough, its contents may also or alternatively be
3597
- # listed here.
3448
+ # If the file is small enough, its contents may also or alternatively be listed
3449
+ # here.
3598
3450
  # Corresponds to the JSON property `contents`
3599
3451
  # NOTE: Values are automatically base64 encoded/decoded in the client library.
3600
3452
  # @return [String]
3601
3453
  attr_accessor :contents
3602
3454
 
3603
3455
  # The CommandTask and CommandResult messages assume the existence of a service
3604
- # that can serve blobs of content, identified by a hash and size known as a
3605
- # "digest." The method by which these blobs may be retrieved is not specified
3606
- # here, but a model implementation is in the Remote Execution API's
3607
- # "ContentAddressibleStorage" interface.
3608
- # In the context of the RWAPI, a Digest will virtually always refer to the
3609
- # contents of a file or a directory. The latter is represented by the
3610
- # byte-encoded Directory message.
3456
+ # that can serve blobs of content, identified by a hash and size known as a "
3457
+ # digest." The method by which these blobs may be retrieved is not specified
3458
+ # here, but a model implementation is in the Remote Execution API's "
3459
+ # ContentAddressibleStorage" interface. In the context of the RWAPI, a Digest
3460
+ # will virtually always refer to the contents of a file or a directory. The
3461
+ # latter is represented by the byte-encoded Directory message.
3611
3462
  # Corresponds to the JSON property `digest`
3612
3463
  # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemoteworkersV1test2Digest]
3613
3464
  attr_accessor :digest
@@ -3618,11 +3469,11 @@ module Google
3618
3469
  attr_accessor :is_executable
3619
3470
  alias_method :is_executable?, :is_executable
3620
3471
 
3621
- # The path of this file. If this message is part of the
3622
- # CommandOutputs.outputs fields, the path is relative to the execution root
3623
- # and must correspond to an entry in CommandTask.outputs.files. If this
3624
- # message is part of a Directory message, then the path is relative to the
3625
- # root of that directory. All paths MUST be delimited by forward slashes.
3472
+ # The path of this file. If this message is part of the CommandOutputs.outputs
3473
+ # fields, the path is relative to the execution root and must correspond to an
3474
+ # entry in CommandTask.outputs.files. If this message is part of a Directory
3475
+ # message, then the path is relative to the root of that directory. All paths
3476
+ # MUST be delimited by forward slashes.
3626
3477
  # Corresponds to the JSON property `path`
3627
3478
  # @return [String]
3628
3479
  attr_accessor :path
@@ -3645,47 +3496,45 @@ module Google
3645
3496
  class GoogleLongrunningOperation
3646
3497
  include Google::Apis::Core::Hashable
3647
3498
 
3648
- # If the value is `false`, it means the operation is still in progress.
3649
- # If `true`, the operation is completed, and either `error` or `response` is
3650
- # available.
3499
+ # If the value is `false`, it means the operation is still in progress. If `true`
3500
+ # , the operation is completed, and either `error` or `response` is available.
3651
3501
  # Corresponds to the JSON property `done`
3652
3502
  # @return [Boolean]
3653
3503
  attr_accessor :done
3654
3504
  alias_method :done?, :done
3655
3505
 
3656
- # The `Status` type defines a logical error model that is suitable for
3657
- # different programming environments, including REST APIs and RPC APIs. It is
3658
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
3659
- # three pieces of data: error code, error message, and error details.
3660
- # You can find out more about this error model and how to work with it in the
3661
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
3506
+ # The `Status` type defines a logical error model that is suitable for different
3507
+ # programming environments, including REST APIs and RPC APIs. It is used by [
3508
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
3509
+ # data: error code, error message, and error details. You can find out more
3510
+ # about this error model and how to work with it in the [API Design Guide](https:
3511
+ # //cloud.google.com/apis/design/errors).
3662
3512
  # Corresponds to the JSON property `error`
3663
3513
  # @return [Google::Apis::RemotebuildexecutionV2::GoogleRpcStatus]
3664
3514
  attr_accessor :error
3665
3515
 
3666
- # Service-specific metadata associated with the operation. It typically
3667
- # contains progress information and common metadata such as create time.
3668
- # Some services might not provide such metadata. Any method that returns a
3669
- # long-running operation should document the metadata type, if any.
3516
+ # Service-specific metadata associated with the operation. It typically contains
3517
+ # progress information and common metadata such as create time. Some services
3518
+ # might not provide such metadata. Any method that returns a long-running
3519
+ # operation should document the metadata type, if any.
3670
3520
  # Corresponds to the JSON property `metadata`
3671
3521
  # @return [Hash<String,Object>]
3672
3522
  attr_accessor :metadata
3673
3523
 
3674
3524
  # The server-assigned name, which is only unique within the same service that
3675
- # originally returns it. If you use the default HTTP mapping, the
3676
- # `name` should be a resource name ending with `operations/`unique_id``.
3525
+ # originally returns it. If you use the default HTTP mapping, the `name` should
3526
+ # be a resource name ending with `operations/`unique_id``.
3677
3527
  # Corresponds to the JSON property `name`
3678
3528
  # @return [String]
3679
3529
  attr_accessor :name
3680
3530
 
3681
- # The normal response of the operation in case of success. If the original
3682
- # method returns no data on success, such as `Delete`, the response is
3683
- # `google.protobuf.Empty`. If the original method is standard
3684
- # `Get`/`Create`/`Update`, the response should be the resource. For other
3685
- # methods, the response should have the type `XxxResponse`, where `Xxx`
3686
- # is the original method name. For example, if the original method name
3687
- # is `TakeSnapshot()`, the inferred response type is
3688
- # `TakeSnapshotResponse`.
3531
+ # The normal response of the operation in case of success. If the original
3532
+ # method returns no data on success, such as `Delete`, the response is `google.
3533
+ # protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`,
3534
+ # the response should be the resource. For other methods, the response should
3535
+ # have the type `XxxResponse`, where `Xxx` is the original method name. For
3536
+ # example, if the original method name is `TakeSnapshot()`, the inferred
3537
+ # response type is `TakeSnapshotResponse`.
3689
3538
  # Corresponds to the JSON property `response`
3690
3539
  # @return [Hash<String,Object>]
3691
3540
  attr_accessor :response
@@ -3704,12 +3553,12 @@ module Google
3704
3553
  end
3705
3554
  end
3706
3555
 
3707
- # The `Status` type defines a logical error model that is suitable for
3708
- # different programming environments, including REST APIs and RPC APIs. It is
3709
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
3710
- # three pieces of data: error code, error message, and error details.
3711
- # You can find out more about this error model and how to work with it in the
3712
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
3556
+ # The `Status` type defines a logical error model that is suitable for different
3557
+ # programming environments, including REST APIs and RPC APIs. It is used by [
3558
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
3559
+ # data: error code, error message, and error details. You can find out more
3560
+ # about this error model and how to work with it in the [API Design Guide](https:
3561
+ # //cloud.google.com/apis/design/errors).
3713
3562
  class GoogleRpcStatus
3714
3563
  include Google::Apis::Core::Hashable
3715
3564
 
@@ -3718,15 +3567,15 @@ module Google
3718
3567
  # @return [Fixnum]
3719
3568
  attr_accessor :code
3720
3569
 
3721
- # A list of messages that carry the error details. There is a common set of
3570
+ # A list of messages that carry the error details. There is a common set of
3722
3571
  # message types for APIs to use.
3723
3572
  # Corresponds to the JSON property `details`
3724
3573
  # @return [Array<Hash<String,Object>>]
3725
3574
  attr_accessor :details
3726
3575
 
3727
- # A developer-facing error message, which should be in English. Any
3728
- # user-facing error message should be localized and sent in the
3729
- # google.rpc.Status.details field, or localized by the client.
3576
+ # A developer-facing error message, which should be in English. Any user-facing
3577
+ # error message should be localized and sent in the google.rpc.Status.details
3578
+ # field, or localized by the client.
3730
3579
  # Corresponds to the JSON property `message`
3731
3580
  # @return [String]
3732
3581
  attr_accessor :message