google-api-client 0.43.0 → 0.48.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (964) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/autoapprove.yml +49 -0
  3. data/.github/workflows/release-please.yml +77 -0
  4. data/.gitignore +2 -0
  5. data/.kokoro/trampoline.sh +0 -0
  6. data/CHANGELOG.md +1066 -184
  7. data/Gemfile +1 -0
  8. data/Rakefile +31 -3
  9. data/api_list_config.yaml +8 -0
  10. data/api_names.yaml +1 -0
  11. data/bin/generate-api +77 -15
  12. data/docs/oauth-server.md +4 -6
  13. data/generated/google/apis/acceleratedmobilepageurl_v1/classes.rb +7 -9
  14. data/generated/google/apis/acceleratedmobilepageurl_v1/service.rb +2 -2
  15. data/generated/google/apis/acceleratedmobilepageurl_v1.rb +1 -1
  16. data/generated/google/apis/accessapproval_v1/classes.rb +60 -86
  17. data/generated/google/apis/accessapproval_v1/service.rb +93 -132
  18. data/generated/google/apis/accessapproval_v1.rb +1 -1
  19. data/generated/google/apis/accesscontextmanager_v1/classes.rb +266 -236
  20. data/generated/google/apis/accesscontextmanager_v1/representations.rb +30 -0
  21. data/generated/google/apis/accesscontextmanager_v1/service.rb +308 -171
  22. data/generated/google/apis/accesscontextmanager_v1.rb +1 -1
  23. data/generated/google/apis/accesscontextmanager_v1beta/classes.rb +153 -184
  24. data/generated/google/apis/accesscontextmanager_v1beta/service.rb +82 -111
  25. data/generated/google/apis/accesscontextmanager_v1beta.rb +1 -1
  26. data/generated/google/apis/adexchangebuyer2_v2beta1/classes.rb +47 -36
  27. data/generated/google/apis/adexchangebuyer2_v2beta1/representations.rb +1 -0
  28. data/generated/google/apis/adexchangebuyer2_v2beta1.rb +1 -1
  29. data/generated/google/apis/adexchangebuyer_v1_4/classes.rb +72 -2
  30. data/generated/google/apis/adexchangebuyer_v1_4/representations.rb +33 -0
  31. data/generated/google/apis/adexchangebuyer_v1_4.rb +1 -1
  32. data/generated/google/apis/adexperiencereport_v1.rb +1 -1
  33. data/generated/google/apis/admin_datatransfer_v1/classes.rb +16 -4
  34. data/generated/google/apis/admin_datatransfer_v1/service.rb +30 -48
  35. data/generated/google/apis/admin_datatransfer_v1.rb +6 -4
  36. data/generated/google/apis/admin_directory_v1/classes.rb +344 -242
  37. data/generated/google/apis/admin_directory_v1/representations.rb +62 -39
  38. data/generated/google/apis/admin_directory_v1/service.rb +607 -998
  39. data/generated/google/apis/admin_directory_v1.rb +6 -8
  40. data/generated/google/apis/admin_reports_v1/classes.rb +31 -33
  41. data/generated/google/apis/admin_reports_v1/service.rb +131 -187
  42. data/generated/google/apis/admin_reports_v1.rb +6 -5
  43. data/generated/google/apis/admob_v1/classes.rb +31 -31
  44. data/generated/google/apis/admob_v1/service.rb +2 -1
  45. data/generated/google/apis/admob_v1.rb +6 -2
  46. data/generated/google/apis/adsense_v1_4/service.rb +4 -1
  47. data/generated/google/apis/adsense_v1_4.rb +1 -1
  48. data/generated/google/apis/alertcenter_v1beta1.rb +1 -1
  49. data/generated/google/apis/analyticsadmin_v1alpha/classes.rb +1361 -0
  50. data/generated/google/apis/analyticsadmin_v1alpha/representations.rb +610 -0
  51. data/generated/google/apis/analyticsadmin_v1alpha/service.rb +2134 -0
  52. data/generated/google/apis/analyticsadmin_v1alpha.rb +43 -0
  53. data/generated/google/apis/analyticsdata_v1alpha/classes.rb +1655 -0
  54. data/generated/google/apis/analyticsdata_v1alpha/representations.rb +806 -0
  55. data/generated/google/apis/analyticsdata_v1alpha/service.rb +261 -0
  56. data/generated/google/apis/analyticsdata_v1alpha.rb +37 -0
  57. data/generated/google/apis/analyticsreporting_v4.rb +1 -1
  58. data/generated/google/apis/androidenterprise_v1/service.rb +2 -2
  59. data/generated/google/apis/androidenterprise_v1.rb +1 -1
  60. data/generated/google/apis/androidmanagement_v1/classes.rb +115 -75
  61. data/generated/google/apis/androidmanagement_v1/representations.rb +17 -0
  62. data/generated/google/apis/androidmanagement_v1.rb +1 -1
  63. data/generated/google/apis/androidpublisher_v3/classes.rb +9 -1
  64. data/generated/google/apis/androidpublisher_v3/representations.rb +1 -0
  65. data/generated/google/apis/androidpublisher_v3/service.rb +2 -2
  66. data/generated/google/apis/androidpublisher_v3.rb +1 -1
  67. data/generated/google/apis/apigateway_v1beta/classes.rb +1162 -0
  68. data/generated/google/apis/{memcache_v1 → apigateway_v1beta}/representations.rb +156 -157
  69. data/generated/google/apis/apigateway_v1beta/service.rb +1172 -0
  70. data/generated/google/apis/apigateway_v1beta.rb +34 -0
  71. data/generated/google/apis/apigee_v1/classes.rb +630 -88
  72. data/generated/google/apis/apigee_v1/representations.rb +209 -1
  73. data/generated/google/apis/apigee_v1/service.rb +401 -74
  74. data/generated/google/apis/apigee_v1.rb +6 -7
  75. data/generated/google/apis/appengine_v1/classes.rb +96 -59
  76. data/generated/google/apis/appengine_v1/representations.rb +17 -0
  77. data/generated/google/apis/appengine_v1/service.rb +38 -47
  78. data/generated/google/apis/appengine_v1.rb +1 -1
  79. data/generated/google/apis/appengine_v1alpha/classes.rb +9 -11
  80. data/generated/google/apis/appengine_v1alpha.rb +1 -1
  81. data/generated/google/apis/appengine_v1beta/classes.rb +103 -59
  82. data/generated/google/apis/appengine_v1beta/representations.rb +18 -0
  83. data/generated/google/apis/appengine_v1beta/service.rb +37 -47
  84. data/generated/google/apis/appengine_v1beta.rb +1 -1
  85. data/generated/google/apis/appsmarket_v2/classes.rb +14 -16
  86. data/generated/google/apis/appsmarket_v2.rb +1 -1
  87. data/generated/google/apis/area120tables_v1alpha1/classes.rb +423 -0
  88. data/generated/google/apis/area120tables_v1alpha1/representations.rb +248 -0
  89. data/generated/google/apis/area120tables_v1alpha1/service.rb +381 -0
  90. data/generated/google/apis/area120tables_v1alpha1.rb +46 -0
  91. data/generated/google/apis/artifactregistry_v1beta1/classes.rb +249 -337
  92. data/generated/google/apis/artifactregistry_v1beta1/representations.rb +2 -0
  93. data/generated/google/apis/artifactregistry_v1beta1/service.rb +44 -57
  94. data/generated/google/apis/artifactregistry_v1beta1.rb +1 -1
  95. data/generated/google/apis/assuredworkloads_v1beta1/classes.rb +722 -0
  96. data/generated/google/apis/assuredworkloads_v1beta1/representations.rb +359 -0
  97. data/generated/google/apis/assuredworkloads_v1beta1/service.rb +328 -0
  98. data/generated/google/apis/{accessapproval_v1beta1.rb → assuredworkloads_v1beta1.rb} +8 -8
  99. data/generated/google/apis/bigquery_v2/classes.rb +593 -576
  100. data/generated/google/apis/bigquery_v2/representations.rb +85 -0
  101. data/generated/google/apis/bigquery_v2/service.rb +79 -41
  102. data/generated/google/apis/bigquery_v2.rb +1 -1
  103. data/generated/google/apis/bigqueryconnection_v1beta1/classes.rb +192 -337
  104. data/generated/google/apis/bigqueryconnection_v1beta1/service.rb +29 -32
  105. data/generated/google/apis/bigqueryconnection_v1beta1.rb +1 -1
  106. data/generated/google/apis/bigquerydatatransfer_v1/classes.rb +132 -158
  107. data/generated/google/apis/bigquerydatatransfer_v1/service.rb +232 -282
  108. data/generated/google/apis/bigquerydatatransfer_v1.rb +1 -1
  109. data/generated/google/apis/bigqueryreservation_v1/classes.rb +116 -123
  110. data/generated/google/apis/bigqueryreservation_v1/representations.rb +2 -0
  111. data/generated/google/apis/bigqueryreservation_v1/service.rb +137 -183
  112. data/generated/google/apis/bigqueryreservation_v1.rb +1 -1
  113. data/generated/google/apis/bigqueryreservation_v1alpha2/classes.rb +88 -100
  114. data/generated/google/apis/bigqueryreservation_v1alpha2/service.rb +77 -100
  115. data/generated/google/apis/bigqueryreservation_v1alpha2.rb +1 -1
  116. data/generated/google/apis/bigqueryreservation_v1beta1/classes.rb +93 -98
  117. data/generated/google/apis/bigqueryreservation_v1beta1/representations.rb +2 -0
  118. data/generated/google/apis/bigqueryreservation_v1beta1/service.rb +114 -151
  119. data/generated/google/apis/bigqueryreservation_v1beta1.rb +1 -1
  120. data/generated/google/apis/bigtableadmin_v1/classes.rb +137 -119
  121. data/generated/google/apis/bigtableadmin_v1/representations.rb +29 -0
  122. data/generated/google/apis/bigtableadmin_v1.rb +1 -1
  123. data/generated/google/apis/bigtableadmin_v2/classes.rb +455 -607
  124. data/generated/google/apis/bigtableadmin_v2/representations.rb +29 -0
  125. data/generated/google/apis/bigtableadmin_v2/service.rb +194 -247
  126. data/generated/google/apis/bigtableadmin_v2.rb +1 -1
  127. data/generated/google/apis/billingbudgets_v1/classes.rb +373 -0
  128. data/generated/google/apis/billingbudgets_v1/representations.rb +171 -0
  129. data/generated/google/apis/billingbudgets_v1/service.rb +249 -0
  130. data/generated/google/apis/billingbudgets_v1.rb +38 -0
  131. data/generated/google/apis/billingbudgets_v1beta1/classes.rb +27 -6
  132. data/generated/google/apis/billingbudgets_v1beta1/representations.rb +2 -0
  133. data/generated/google/apis/billingbudgets_v1beta1.rb +4 -1
  134. data/generated/google/apis/binaryauthorization_v1/classes.rb +434 -355
  135. data/generated/google/apis/binaryauthorization_v1/representations.rb +75 -0
  136. data/generated/google/apis/binaryauthorization_v1/service.rb +109 -89
  137. data/generated/google/apis/binaryauthorization_v1.rb +1 -1
  138. data/generated/google/apis/binaryauthorization_v1beta1/classes.rb +434 -355
  139. data/generated/google/apis/binaryauthorization_v1beta1/representations.rb +75 -0
  140. data/generated/google/apis/binaryauthorization_v1beta1/service.rb +109 -89
  141. data/generated/google/apis/binaryauthorization_v1beta1.rb +1 -1
  142. data/generated/google/apis/books_v1/service.rb +54 -54
  143. data/generated/google/apis/books_v1.rb +1 -1
  144. data/generated/google/apis/calendar_v3/classes.rb +13 -10
  145. data/generated/google/apis/calendar_v3.rb +1 -1
  146. data/generated/google/apis/chat_v1/classes.rb +173 -116
  147. data/generated/google/apis/chat_v1/representations.rb +36 -0
  148. data/generated/google/apis/chat_v1/service.rb +30 -42
  149. data/generated/google/apis/chat_v1.rb +1 -1
  150. data/generated/google/apis/civicinfo_v2/classes.rb +18 -32
  151. data/generated/google/apis/civicinfo_v2/representations.rb +2 -3
  152. data/generated/google/apis/civicinfo_v2.rb +1 -1
  153. data/generated/google/apis/classroom_v1/classes.rb +153 -21
  154. data/generated/google/apis/classroom_v1/representations.rb +43 -0
  155. data/generated/google/apis/classroom_v1/service.rb +240 -0
  156. data/generated/google/apis/classroom_v1.rb +7 -1
  157. data/generated/google/apis/cloudasset_v1/classes.rb +1461 -1039
  158. data/generated/google/apis/cloudasset_v1/representations.rb +320 -0
  159. data/generated/google/apis/cloudasset_v1/service.rb +296 -167
  160. data/generated/google/apis/cloudasset_v1.rb +1 -1
  161. data/generated/google/apis/cloudasset_v1beta1/classes.rb +531 -777
  162. data/generated/google/apis/cloudasset_v1beta1/service.rb +59 -75
  163. data/generated/google/apis/cloudasset_v1beta1.rb +1 -1
  164. data/generated/google/apis/cloudasset_v1p1beta1/classes.rb +204 -349
  165. data/generated/google/apis/cloudasset_v1p1beta1/service.rb +35 -47
  166. data/generated/google/apis/cloudasset_v1p1beta1.rb +1 -1
  167. data/generated/google/apis/cloudasset_v1p4beta1/classes.rb +222 -276
  168. data/generated/google/apis/cloudasset_v1p4beta1/service.rb +75 -93
  169. data/generated/google/apis/cloudasset_v1p4beta1.rb +1 -1
  170. data/generated/google/apis/cloudasset_v1p5beta1/classes.rb +481 -720
  171. data/generated/google/apis/cloudasset_v1p5beta1/service.rb +25 -28
  172. data/generated/google/apis/cloudasset_v1p5beta1.rb +1 -1
  173. data/generated/google/apis/cloudbilling_v1/classes.rb +285 -446
  174. data/generated/google/apis/cloudbilling_v1/service.rb +104 -124
  175. data/generated/google/apis/cloudbilling_v1.rb +7 -1
  176. data/generated/google/apis/cloudbuild_v1/classes.rb +339 -344
  177. data/generated/google/apis/cloudbuild_v1/representations.rb +8 -0
  178. data/generated/google/apis/cloudbuild_v1/service.rb +277 -67
  179. data/generated/google/apis/cloudbuild_v1.rb +1 -1
  180. data/generated/google/apis/cloudbuild_v1alpha1/classes.rb +291 -329
  181. data/generated/google/apis/cloudbuild_v1alpha1/representations.rb +2 -0
  182. data/generated/google/apis/cloudbuild_v1alpha1/service.rb +15 -18
  183. data/generated/google/apis/cloudbuild_v1alpha1.rb +1 -1
  184. data/generated/google/apis/cloudbuild_v1alpha2/classes.rb +277 -313
  185. data/generated/google/apis/cloudbuild_v1alpha2/representations.rb +2 -0
  186. data/generated/google/apis/cloudbuild_v1alpha2/service.rb +22 -28
  187. data/generated/google/apis/cloudbuild_v1alpha2.rb +1 -1
  188. data/generated/google/apis/clouddebugger_v2/classes.rb +185 -252
  189. data/generated/google/apis/clouddebugger_v2/service.rb +53 -59
  190. data/generated/google/apis/clouddebugger_v2.rb +1 -1
  191. data/generated/google/apis/clouderrorreporting_v1beta1/classes.rb +127 -156
  192. data/generated/google/apis/clouderrorreporting_v1beta1/service.rb +53 -69
  193. data/generated/google/apis/clouderrorreporting_v1beta1.rb +1 -1
  194. data/generated/google/apis/cloudfunctions_v1/classes.rb +335 -494
  195. data/generated/google/apis/cloudfunctions_v1/representations.rb +1 -0
  196. data/generated/google/apis/cloudfunctions_v1/service.rb +79 -93
  197. data/generated/google/apis/cloudfunctions_v1.rb +1 -1
  198. data/generated/google/apis/cloudidentity_v1/classes.rb +989 -107
  199. data/generated/google/apis/cloudidentity_v1/representations.rb +371 -0
  200. data/generated/google/apis/cloudidentity_v1/service.rb +883 -88
  201. data/generated/google/apis/cloudidentity_v1.rb +4 -1
  202. data/generated/google/apis/cloudidentity_v1beta1/classes.rb +1236 -307
  203. data/generated/google/apis/cloudidentity_v1beta1/representations.rb +424 -21
  204. data/generated/google/apis/cloudidentity_v1beta1/service.rb +921 -96
  205. data/generated/google/apis/cloudidentity_v1beta1.rb +4 -1
  206. data/generated/google/apis/cloudiot_v1/classes.rb +270 -373
  207. data/generated/google/apis/cloudiot_v1/representations.rb +1 -0
  208. data/generated/google/apis/cloudiot_v1/service.rb +147 -154
  209. data/generated/google/apis/cloudiot_v1.rb +1 -1
  210. data/generated/google/apis/cloudkms_v1/classes.rb +509 -692
  211. data/generated/google/apis/cloudkms_v1/representations.rb +18 -0
  212. data/generated/google/apis/cloudkms_v1/service.rb +170 -216
  213. data/generated/google/apis/cloudkms_v1.rb +1 -1
  214. data/generated/google/apis/cloudprofiler_v2/classes.rb +28 -33
  215. data/generated/google/apis/cloudprofiler_v2/service.rb +17 -19
  216. data/generated/google/apis/cloudprofiler_v2.rb +1 -1
  217. data/generated/google/apis/cloudresourcemanager_v1/classes.rb +14 -7
  218. data/generated/google/apis/cloudresourcemanager_v1/representations.rb +1 -0
  219. data/generated/google/apis/cloudresourcemanager_v1/service.rb +62 -60
  220. data/generated/google/apis/cloudresourcemanager_v1.rb +1 -1
  221. data/generated/google/apis/cloudresourcemanager_v1beta1/classes.rb +11 -4
  222. data/generated/google/apis/cloudresourcemanager_v1beta1/representations.rb +1 -0
  223. data/generated/google/apis/cloudresourcemanager_v1beta1/service.rb +26 -25
  224. data/generated/google/apis/cloudresourcemanager_v1beta1.rb +1 -1
  225. data/generated/google/apis/cloudresourcemanager_v2/classes.rb +7 -0
  226. data/generated/google/apis/cloudresourcemanager_v2/representations.rb +1 -0
  227. data/generated/google/apis/cloudresourcemanager_v2/service.rb +7 -7
  228. data/generated/google/apis/cloudresourcemanager_v2.rb +1 -1
  229. data/generated/google/apis/cloudresourcemanager_v2beta1/classes.rb +7 -0
  230. data/generated/google/apis/cloudresourcemanager_v2beta1/representations.rb +1 -0
  231. data/generated/google/apis/cloudresourcemanager_v2beta1/service.rb +7 -7
  232. data/generated/google/apis/cloudresourcemanager_v2beta1.rb +1 -1
  233. data/generated/google/apis/cloudscheduler_v1/classes.rb +272 -383
  234. data/generated/google/apis/cloudscheduler_v1/service.rb +45 -62
  235. data/generated/google/apis/cloudscheduler_v1.rb +1 -1
  236. data/generated/google/apis/cloudscheduler_v1beta1/classes.rb +273 -384
  237. data/generated/google/apis/cloudscheduler_v1beta1/service.rb +45 -62
  238. data/generated/google/apis/cloudscheduler_v1beta1.rb +1 -1
  239. data/generated/google/apis/cloudsearch_v1/classes.rb +651 -781
  240. data/generated/google/apis/cloudsearch_v1/representations.rb +15 -0
  241. data/generated/google/apis/cloudsearch_v1/service.rb +286 -326
  242. data/generated/google/apis/cloudsearch_v1.rb +2 -2
  243. data/generated/google/apis/cloudshell_v1/classes.rb +256 -105
  244. data/generated/google/apis/cloudshell_v1/representations.rb +143 -10
  245. data/generated/google/apis/cloudshell_v1/service.rb +198 -25
  246. data/generated/google/apis/cloudshell_v1.rb +1 -1
  247. data/generated/google/apis/cloudshell_v1alpha1/classes.rb +69 -78
  248. data/generated/google/apis/cloudshell_v1alpha1/service.rb +20 -24
  249. data/generated/google/apis/cloudshell_v1alpha1.rb +1 -1
  250. data/generated/google/apis/cloudtasks_v2/classes.rb +612 -933
  251. data/generated/google/apis/cloudtasks_v2/representations.rb +1 -0
  252. data/generated/google/apis/cloudtasks_v2/service.rb +146 -217
  253. data/generated/google/apis/cloudtasks_v2.rb +1 -1
  254. data/generated/google/apis/cloudtasks_v2beta2/classes.rb +608 -964
  255. data/generated/google/apis/cloudtasks_v2beta2/representations.rb +1 -0
  256. data/generated/google/apis/cloudtasks_v2beta2/service.rb +178 -270
  257. data/generated/google/apis/cloudtasks_v2beta2.rb +1 -1
  258. data/generated/google/apis/cloudtasks_v2beta3/classes.rb +616 -938
  259. data/generated/google/apis/cloudtasks_v2beta3/representations.rb +1 -0
  260. data/generated/google/apis/cloudtasks_v2beta3/service.rb +146 -217
  261. data/generated/google/apis/cloudtasks_v2beta3.rb +1 -1
  262. data/generated/google/apis/cloudtrace_v1/classes.rb +39 -61
  263. data/generated/google/apis/cloudtrace_v1/service.rb +37 -51
  264. data/generated/google/apis/cloudtrace_v1.rb +1 -1
  265. data/generated/google/apis/cloudtrace_v2/classes.rb +92 -107
  266. data/generated/google/apis/cloudtrace_v2/service.rb +8 -11
  267. data/generated/google/apis/cloudtrace_v2.rb +1 -1
  268. data/generated/google/apis/cloudtrace_v2beta1/classes.rb +23 -33
  269. data/generated/google/apis/cloudtrace_v2beta1/service.rb +30 -37
  270. data/generated/google/apis/cloudtrace_v2beta1.rb +1 -1
  271. data/generated/google/apis/composer_v1/classes.rb +189 -242
  272. data/generated/google/apis/composer_v1/service.rb +79 -150
  273. data/generated/google/apis/composer_v1.rb +1 -1
  274. data/generated/google/apis/composer_v1beta1/classes.rb +203 -262
  275. data/generated/google/apis/composer_v1beta1/service.rb +94 -179
  276. data/generated/google/apis/composer_v1beta1.rb +1 -1
  277. data/generated/google/apis/compute_alpha/classes.rb +1227 -186
  278. data/generated/google/apis/compute_alpha/representations.rb +235 -8
  279. data/generated/google/apis/compute_alpha/service.rb +2009 -1024
  280. data/generated/google/apis/compute_alpha.rb +1 -1
  281. data/generated/google/apis/compute_beta/classes.rb +1080 -108
  282. data/generated/google/apis/compute_beta/representations.rb +212 -2
  283. data/generated/google/apis/compute_beta/service.rb +1413 -741
  284. data/generated/google/apis/compute_beta.rb +1 -1
  285. data/generated/google/apis/compute_v1/classes.rb +1512 -106
  286. data/generated/google/apis/compute_v1/representations.rb +470 -1
  287. data/generated/google/apis/compute_v1/service.rb +1625 -285
  288. data/generated/google/apis/compute_v1.rb +1 -1
  289. data/generated/google/apis/container_v1/classes.rb +982 -965
  290. data/generated/google/apis/container_v1/representations.rb +60 -0
  291. data/generated/google/apis/container_v1/service.rb +435 -502
  292. data/generated/google/apis/container_v1.rb +1 -1
  293. data/generated/google/apis/container_v1beta1/classes.rb +1106 -1044
  294. data/generated/google/apis/container_v1beta1/representations.rb +91 -0
  295. data/generated/google/apis/container_v1beta1/service.rb +403 -466
  296. data/generated/google/apis/container_v1beta1.rb +1 -1
  297. data/generated/google/apis/containeranalysis_v1alpha1/classes.rb +463 -596
  298. data/generated/google/apis/containeranalysis_v1alpha1/representations.rb +1 -0
  299. data/generated/google/apis/containeranalysis_v1alpha1/service.rb +149 -169
  300. data/generated/google/apis/containeranalysis_v1alpha1.rb +1 -1
  301. data/generated/google/apis/containeranalysis_v1beta1/classes.rb +461 -613
  302. data/generated/google/apis/containeranalysis_v1beta1/representations.rb +1 -0
  303. data/generated/google/apis/containeranalysis_v1beta1/service.rb +75 -90
  304. data/generated/google/apis/containeranalysis_v1beta1.rb +1 -1
  305. data/generated/google/apis/content_v2/classes.rb +515 -1219
  306. data/generated/google/apis/content_v2/service.rb +377 -650
  307. data/generated/google/apis/content_v2.rb +3 -4
  308. data/generated/google/apis/content_v2_1/classes.rb +1108 -1058
  309. data/generated/google/apis/content_v2_1/representations.rb +288 -0
  310. data/generated/google/apis/content_v2_1/service.rb +987 -795
  311. data/generated/google/apis/content_v2_1.rb +3 -4
  312. data/generated/google/apis/customsearch_v1/service.rb +2 -2
  313. data/generated/google/apis/customsearch_v1.rb +1 -1
  314. data/generated/google/apis/datacatalog_v1beta1/classes.rb +413 -573
  315. data/generated/google/apis/datacatalog_v1beta1/representations.rb +6 -0
  316. data/generated/google/apis/datacatalog_v1beta1/service.rb +319 -440
  317. data/generated/google/apis/datacatalog_v1beta1.rb +1 -1
  318. data/generated/google/apis/dataflow_v1b3/classes.rb +1174 -973
  319. data/generated/google/apis/dataflow_v1b3/representations.rb +148 -0
  320. data/generated/google/apis/dataflow_v1b3/service.rb +308 -257
  321. data/generated/google/apis/dataflow_v1b3.rb +1 -1
  322. data/generated/google/apis/datafusion_v1/classes.rb +283 -397
  323. data/generated/google/apis/datafusion_v1/representations.rb +5 -0
  324. data/generated/google/apis/datafusion_v1/service.rb +76 -89
  325. data/generated/google/apis/datafusion_v1.rb +5 -8
  326. data/generated/google/apis/datafusion_v1beta1/classes.rb +283 -397
  327. data/generated/google/apis/datafusion_v1beta1/representations.rb +5 -0
  328. data/generated/google/apis/datafusion_v1beta1/service.rb +81 -95
  329. data/generated/google/apis/datafusion_v1beta1.rb +5 -8
  330. data/generated/google/apis/datalabeling_v1beta1/classes.rb +6207 -0
  331. data/generated/google/apis/datalabeling_v1beta1/representations.rb +3156 -0
  332. data/generated/google/apis/datalabeling_v1beta1/service.rb +1762 -0
  333. data/generated/google/apis/datalabeling_v1beta1.rb +34 -0
  334. data/generated/google/apis/dataproc_v1/classes.rb +97 -13
  335. data/generated/google/apis/dataproc_v1/representations.rb +34 -0
  336. data/generated/google/apis/dataproc_v1.rb +1 -1
  337. data/generated/google/apis/dataproc_v1beta2/classes.rb +117 -9
  338. data/generated/google/apis/dataproc_v1beta2/representations.rb +49 -0
  339. data/generated/google/apis/dataproc_v1beta2.rb +1 -1
  340. data/generated/google/apis/datastore_v1/classes.rb +334 -476
  341. data/generated/google/apis/datastore_v1/service.rb +52 -63
  342. data/generated/google/apis/datastore_v1.rb +1 -1
  343. data/generated/google/apis/datastore_v1beta1/classes.rb +150 -217
  344. data/generated/google/apis/datastore_v1beta1/service.rb +11 -12
  345. data/generated/google/apis/datastore_v1beta1.rb +1 -1
  346. data/generated/google/apis/datastore_v1beta3/classes.rb +259 -375
  347. data/generated/google/apis/datastore_v1beta3/service.rb +1 -2
  348. data/generated/google/apis/datastore_v1beta3.rb +1 -1
  349. data/generated/google/apis/deploymentmanager_v2/classes.rb +203 -558
  350. data/generated/google/apis/deploymentmanager_v2/representations.rb +0 -132
  351. data/generated/google/apis/deploymentmanager_v2/service.rb +169 -213
  352. data/generated/google/apis/deploymentmanager_v2.rb +6 -4
  353. data/generated/google/apis/deploymentmanager_v2beta/classes.rb +247 -609
  354. data/generated/google/apis/deploymentmanager_v2beta/representations.rb +0 -132
  355. data/generated/google/apis/deploymentmanager_v2beta/service.rb +278 -359
  356. data/generated/google/apis/deploymentmanager_v2beta.rb +6 -5
  357. data/generated/google/apis/dfareporting_v3_3/classes.rb +162 -339
  358. data/generated/google/apis/dfareporting_v3_3/service.rb +646 -1262
  359. data/generated/google/apis/dfareporting_v3_3.rb +2 -2
  360. data/generated/google/apis/dfareporting_v3_4/classes.rb +184 -350
  361. data/generated/google/apis/dfareporting_v3_4/representations.rb +1 -0
  362. data/generated/google/apis/dfareporting_v3_4/service.rb +681 -1261
  363. data/generated/google/apis/dfareporting_v3_4.rb +2 -2
  364. data/generated/google/apis/dialogflow_v2/classes.rb +746 -217
  365. data/generated/google/apis/dialogflow_v2/representations.rb +318 -67
  366. data/generated/google/apis/dialogflow_v2.rb +1 -1
  367. data/generated/google/apis/dialogflow_v2beta1/classes.rb +764 -233
  368. data/generated/google/apis/dialogflow_v2beta1/representations.rb +318 -67
  369. data/generated/google/apis/dialogflow_v2beta1/service.rb +556 -331
  370. data/generated/google/apis/dialogflow_v2beta1.rb +1 -1
  371. data/generated/google/apis/dialogflow_v3beta1/classes.rb +8816 -0
  372. data/generated/google/apis/dialogflow_v3beta1/representations.rb +3725 -0
  373. data/generated/google/apis/dialogflow_v3beta1/service.rb +2825 -0
  374. data/generated/google/apis/dialogflow_v3beta1.rb +38 -0
  375. data/generated/google/apis/digitalassetlinks_v1/classes.rb +66 -92
  376. data/generated/google/apis/digitalassetlinks_v1/service.rb +131 -188
  377. data/generated/google/apis/digitalassetlinks_v1.rb +1 -1
  378. data/generated/google/apis/displayvideo_v1/classes.rb +271 -38
  379. data/generated/google/apis/displayvideo_v1/representations.rb +83 -0
  380. data/generated/google/apis/displayvideo_v1/service.rb +287 -32
  381. data/generated/google/apis/displayvideo_v1.rb +1 -1
  382. data/generated/google/apis/displayvideo_v1beta/classes.rb +146 -0
  383. data/generated/google/apis/displayvideo_v1beta/representations.rb +72 -0
  384. data/generated/google/apis/displayvideo_v1beta/service.rb +161 -0
  385. data/generated/google/apis/displayvideo_v1beta.rb +38 -0
  386. data/generated/google/apis/displayvideo_v1beta2/classes.rb +146 -0
  387. data/generated/google/apis/displayvideo_v1beta2/representations.rb +72 -0
  388. data/generated/google/apis/displayvideo_v1beta2/service.rb +130 -0
  389. data/generated/google/apis/displayvideo_v1beta2.rb +38 -0
  390. data/generated/google/apis/displayvideo_v1dev/classes.rb +146 -0
  391. data/generated/google/apis/displayvideo_v1dev/representations.rb +72 -0
  392. data/generated/google/apis/displayvideo_v1dev/service.rb +130 -0
  393. data/generated/google/apis/displayvideo_v1dev.rb +38 -0
  394. data/generated/google/apis/dlp_v2/classes.rb +1111 -1310
  395. data/generated/google/apis/dlp_v2/representations.rb +16 -0
  396. data/generated/google/apis/dlp_v2/service.rb +962 -905
  397. data/generated/google/apis/dlp_v2.rb +1 -1
  398. data/generated/google/apis/dns_v1/classes.rb +356 -198
  399. data/generated/google/apis/dns_v1/representations.rb +83 -0
  400. data/generated/google/apis/dns_v1/service.rb +83 -98
  401. data/generated/google/apis/dns_v1.rb +2 -2
  402. data/generated/google/apis/dns_v1beta2/classes.rb +362 -206
  403. data/generated/google/apis/dns_v1beta2/representations.rb +83 -0
  404. data/generated/google/apis/dns_v1beta2/service.rb +83 -98
  405. data/generated/google/apis/dns_v1beta2.rb +2 -2
  406. data/generated/google/apis/docs_v1/classes.rb +894 -1229
  407. data/generated/google/apis/docs_v1/service.rb +17 -22
  408. data/generated/google/apis/docs_v1.rb +1 -1
  409. data/generated/google/apis/documentai_v1beta2/classes.rb +2253 -824
  410. data/generated/google/apis/documentai_v1beta2/representations.rb +701 -0
  411. data/generated/google/apis/documentai_v1beta2/service.rb +22 -24
  412. data/generated/google/apis/documentai_v1beta2.rb +1 -1
  413. data/generated/google/apis/documentai_v1beta3/classes.rb +6149 -0
  414. data/generated/google/apis/documentai_v1beta3/representations.rb +2666 -0
  415. data/generated/google/apis/documentai_v1beta3/service.rb +263 -0
  416. data/generated/google/apis/{securitycenter_v1p1alpha1.rb → documentai_v1beta3.rb} +11 -10
  417. data/generated/google/apis/domains_v1alpha2/classes.rb +1540 -0
  418. data/generated/google/apis/domains_v1alpha2/representations.rb +606 -0
  419. data/generated/google/apis/domains_v1alpha2/service.rb +805 -0
  420. data/generated/google/apis/domains_v1alpha2.rb +34 -0
  421. data/generated/google/apis/domains_v1beta1/classes.rb +1540 -0
  422. data/generated/google/apis/domains_v1beta1/representations.rb +606 -0
  423. data/generated/google/apis/domains_v1beta1/service.rb +805 -0
  424. data/generated/google/apis/domains_v1beta1.rb +34 -0
  425. data/generated/google/apis/doubleclickbidmanager_v1/classes.rb +6 -12
  426. data/generated/google/apis/doubleclickbidmanager_v1/service.rb +33 -64
  427. data/generated/google/apis/doubleclickbidmanager_v1.rb +3 -2
  428. data/generated/google/apis/doubleclickbidmanager_v1_1/classes.rb +13 -20
  429. data/generated/google/apis/doubleclickbidmanager_v1_1/service.rb +33 -64
  430. data/generated/google/apis/doubleclickbidmanager_v1_1.rb +3 -2
  431. data/generated/google/apis/doubleclicksearch_v2/service.rb +2 -2
  432. data/generated/google/apis/doubleclicksearch_v2.rb +1 -1
  433. data/generated/google/apis/drive_v2/classes.rb +18 -7
  434. data/generated/google/apis/drive_v2/representations.rb +1 -0
  435. data/generated/google/apis/drive_v2/service.rb +79 -15
  436. data/generated/google/apis/drive_v2.rb +1 -1
  437. data/generated/google/apis/drive_v3/classes.rb +18 -8
  438. data/generated/google/apis/drive_v3/representations.rb +1 -0
  439. data/generated/google/apis/drive_v3/service.rb +59 -11
  440. data/generated/google/apis/drive_v3.rb +1 -1
  441. data/generated/google/apis/eventarc_v1beta1/classes.rb +931 -0
  442. data/generated/google/apis/eventarc_v1beta1/representations.rb +379 -0
  443. data/generated/google/apis/{memcache_v1 → eventarc_v1beta1}/service.rb +236 -215
  444. data/generated/google/apis/eventarc_v1beta1.rb +34 -0
  445. data/generated/google/apis/file_v1/classes.rb +155 -174
  446. data/generated/google/apis/file_v1/service.rb +43 -52
  447. data/generated/google/apis/file_v1.rb +1 -1
  448. data/generated/google/apis/file_v1beta1/classes.rb +335 -194
  449. data/generated/google/apis/file_v1beta1/representations.rb +55 -0
  450. data/generated/google/apis/file_v1beta1/service.rb +267 -55
  451. data/generated/google/apis/file_v1beta1.rb +1 -1
  452. data/generated/google/apis/firebase_v1beta1/classes.rb +33 -51
  453. data/generated/google/apis/firebase_v1beta1/representations.rb +2 -16
  454. data/generated/google/apis/firebase_v1beta1/service.rb +21 -1
  455. data/generated/google/apis/firebase_v1beta1.rb +1 -1
  456. data/generated/google/apis/firebasedynamiclinks_v1.rb +1 -1
  457. data/generated/google/apis/firebasehosting_v1beta1/classes.rb +188 -0
  458. data/generated/google/apis/firebasehosting_v1beta1/representations.rb +83 -0
  459. data/generated/google/apis/firebasehosting_v1beta1/service.rb +418 -4
  460. data/generated/google/apis/firebasehosting_v1beta1.rb +1 -1
  461. data/generated/google/apis/firebaseml_v1beta2/classes.rb +8 -8
  462. data/generated/google/apis/firebaseml_v1beta2/representations.rb +1 -1
  463. data/generated/google/apis/firebaseml_v1beta2.rb +1 -1
  464. data/generated/google/apis/firebaserules_v1/classes.rb +102 -137
  465. data/generated/google/apis/firebaserules_v1/service.rb +87 -110
  466. data/generated/google/apis/firebaserules_v1.rb +1 -1
  467. data/generated/google/apis/firestore_v1/classes.rb +406 -502
  468. data/generated/google/apis/firestore_v1/service.rb +165 -201
  469. data/generated/google/apis/firestore_v1.rb +1 -1
  470. data/generated/google/apis/firestore_v1beta1/classes.rb +338 -413
  471. data/generated/google/apis/firestore_v1beta1/service.rb +106 -122
  472. data/generated/google/apis/firestore_v1beta1.rb +1 -1
  473. data/generated/google/apis/firestore_v1beta2/classes.rb +135 -165
  474. data/generated/google/apis/firestore_v1beta2/service.rb +65 -86
  475. data/generated/google/apis/firestore_v1beta2.rb +1 -1
  476. data/generated/google/apis/fitness_v1/classes.rb +982 -0
  477. data/generated/google/apis/fitness_v1/representations.rb +398 -0
  478. data/generated/google/apis/fitness_v1/service.rb +628 -0
  479. data/generated/google/apis/fitness_v1.rb +97 -0
  480. data/generated/google/apis/games_configuration_v1configuration/classes.rb +2 -3
  481. data/generated/google/apis/games_configuration_v1configuration/service.rb +6 -6
  482. data/generated/google/apis/games_configuration_v1configuration.rb +1 -1
  483. data/generated/google/apis/games_management_v1management/classes.rb +14 -20
  484. data/generated/google/apis/games_management_v1management/service.rb +35 -36
  485. data/generated/google/apis/games_management_v1management.rb +2 -3
  486. data/generated/google/apis/games_v1/classes.rb +376 -83
  487. data/generated/google/apis/games_v1/representations.rb +118 -0
  488. data/generated/google/apis/games_v1/service.rb +118 -90
  489. data/generated/google/apis/games_v1.rb +2 -3
  490. data/generated/google/apis/gameservices_v1/classes.rb +22 -14
  491. data/generated/google/apis/gameservices_v1/representations.rb +1 -0
  492. data/generated/google/apis/gameservices_v1/service.rb +54 -51
  493. data/generated/google/apis/gameservices_v1.rb +1 -1
  494. data/generated/google/apis/gameservices_v1beta/classes.rb +22 -14
  495. data/generated/google/apis/gameservices_v1beta/representations.rb +1 -0
  496. data/generated/google/apis/gameservices_v1beta/service.rb +54 -51
  497. data/generated/google/apis/gameservices_v1beta.rb +1 -1
  498. data/generated/google/apis/genomics_v1/classes.rb +70 -76
  499. data/generated/google/apis/genomics_v1/service.rb +28 -43
  500. data/generated/google/apis/genomics_v1.rb +1 -1
  501. data/generated/google/apis/genomics_v1alpha2/classes.rb +223 -290
  502. data/generated/google/apis/genomics_v1alpha2/service.rb +54 -76
  503. data/generated/google/apis/genomics_v1alpha2.rb +1 -1
  504. data/generated/google/apis/genomics_v2alpha1/classes.rb +356 -275
  505. data/generated/google/apis/genomics_v2alpha1/representations.rb +48 -0
  506. data/generated/google/apis/genomics_v2alpha1/service.rb +47 -66
  507. data/generated/google/apis/genomics_v2alpha1.rb +1 -1
  508. data/generated/google/apis/gmail_v1/classes.rb +37 -43
  509. data/generated/google/apis/gmail_v1/service.rb +5 -4
  510. data/generated/google/apis/gmail_v1.rb +1 -1
  511. data/generated/google/apis/gmailpostmastertools_v1beta1/service.rb +11 -11
  512. data/generated/google/apis/gmailpostmastertools_v1beta1.rb +2 -2
  513. data/generated/google/apis/groupsmigration_v1/classes.rb +51 -0
  514. data/generated/google/apis/groupsmigration_v1/representations.rb +40 -0
  515. data/generated/google/apis/groupsmigration_v1/service.rb +100 -0
  516. data/generated/google/apis/groupsmigration_v1.rb +35 -0
  517. data/generated/google/apis/healthcare_v1/classes.rb +637 -826
  518. data/generated/google/apis/healthcare_v1/representations.rb +32 -0
  519. data/generated/google/apis/healthcare_v1/service.rb +842 -855
  520. data/generated/google/apis/healthcare_v1.rb +1 -1
  521. data/generated/google/apis/healthcare_v1beta1/classes.rb +1937 -1299
  522. data/generated/google/apis/healthcare_v1beta1/representations.rb +534 -65
  523. data/generated/google/apis/healthcare_v1beta1/service.rb +2534 -1293
  524. data/generated/google/apis/healthcare_v1beta1.rb +1 -1
  525. data/generated/google/apis/homegraph_v1/classes.rb +76 -164
  526. data/generated/google/apis/homegraph_v1/service.rb +23 -35
  527. data/generated/google/apis/homegraph_v1.rb +4 -1
  528. data/generated/google/apis/iam_v1/classes.rb +395 -592
  529. data/generated/google/apis/iam_v1/representations.rb +1 -0
  530. data/generated/google/apis/iam_v1/service.rb +427 -555
  531. data/generated/google/apis/iam_v1.rb +1 -1
  532. data/generated/google/apis/iamcredentials_v1/classes.rb +75 -85
  533. data/generated/google/apis/iamcredentials_v1/service.rb +14 -13
  534. data/generated/google/apis/iamcredentials_v1.rb +3 -2
  535. data/generated/google/apis/iap_v1/classes.rb +253 -355
  536. data/generated/google/apis/iap_v1/representations.rb +1 -0
  537. data/generated/google/apis/iap_v1/service.rb +61 -71
  538. data/generated/google/apis/iap_v1.rb +1 -1
  539. data/generated/google/apis/iap_v1beta1/classes.rb +164 -254
  540. data/generated/google/apis/iap_v1beta1/representations.rb +1 -0
  541. data/generated/google/apis/iap_v1beta1/service.rb +17 -19
  542. data/generated/google/apis/iap_v1beta1.rb +1 -1
  543. data/generated/google/apis/indexing_v3/classes.rb +11 -11
  544. data/generated/google/apis/indexing_v3.rb +1 -1
  545. data/generated/google/apis/jobs_v2/classes.rb +1584 -1086
  546. data/generated/google/apis/jobs_v2/representations.rb +272 -0
  547. data/generated/google/apis/jobs_v2/service.rb +85 -126
  548. data/generated/google/apis/jobs_v2.rb +1 -1
  549. data/generated/google/apis/jobs_v3/classes.rb +1559 -980
  550. data/generated/google/apis/jobs_v3/representations.rb +272 -0
  551. data/generated/google/apis/jobs_v3/service.rb +101 -139
  552. data/generated/google/apis/jobs_v3.rb +1 -1
  553. data/generated/google/apis/jobs_v3p1beta1/classes.rb +1521 -1023
  554. data/generated/google/apis/jobs_v3p1beta1/representations.rb +257 -0
  555. data/generated/google/apis/jobs_v3p1beta1/service.rb +103 -142
  556. data/generated/google/apis/jobs_v3p1beta1.rb +1 -1
  557. data/generated/google/apis/kgsearch_v1/classes.rb +4 -4
  558. data/generated/google/apis/kgsearch_v1/service.rb +11 -11
  559. data/generated/google/apis/kgsearch_v1.rb +1 -1
  560. data/generated/google/apis/licensing_v1/classes.rb +1 -1
  561. data/generated/google/apis/licensing_v1/service.rb +56 -86
  562. data/generated/google/apis/licensing_v1.rb +4 -3
  563. data/generated/google/apis/lifesciences_v2beta/classes.rb +366 -290
  564. data/generated/google/apis/lifesciences_v2beta/representations.rb +47 -0
  565. data/generated/google/apis/lifesciences_v2beta/service.rb +30 -42
  566. data/generated/google/apis/lifesciences_v2beta.rb +1 -1
  567. data/generated/google/apis/localservices_v1/classes.rb +426 -0
  568. data/generated/google/apis/localservices_v1/representations.rb +174 -0
  569. data/generated/google/apis/localservices_v1/service.rb +199 -0
  570. data/generated/google/apis/{appsactivity_v1.rb → localservices_v1.rb} +8 -11
  571. data/generated/google/apis/logging_v2/classes.rb +306 -232
  572. data/generated/google/apis/logging_v2/representations.rb +79 -0
  573. data/generated/google/apis/logging_v2/service.rb +3307 -1579
  574. data/generated/google/apis/logging_v2.rb +1 -1
  575. data/generated/google/apis/managedidentities_v1/classes.rb +8 -1
  576. data/generated/google/apis/managedidentities_v1/representations.rb +1 -0
  577. data/generated/google/apis/managedidentities_v1/service.rb +1 -4
  578. data/generated/google/apis/managedidentities_v1.rb +1 -1
  579. data/generated/google/apis/managedidentities_v1alpha1/classes.rb +87 -1
  580. data/generated/google/apis/managedidentities_v1alpha1/representations.rb +34 -0
  581. data/generated/google/apis/managedidentities_v1alpha1/service.rb +83 -5
  582. data/generated/google/apis/managedidentities_v1alpha1.rb +1 -1
  583. data/generated/google/apis/managedidentities_v1beta1/classes.rb +88 -1
  584. data/generated/google/apis/managedidentities_v1beta1/representations.rb +34 -0
  585. data/generated/google/apis/managedidentities_v1beta1/service.rb +83 -5
  586. data/generated/google/apis/managedidentities_v1beta1.rb +1 -1
  587. data/generated/google/apis/manufacturers_v1/classes.rb +99 -109
  588. data/generated/google/apis/manufacturers_v1/service.rb +44 -55
  589. data/generated/google/apis/manufacturers_v1.rb +1 -1
  590. data/generated/google/apis/memcache_v1beta2/classes.rb +171 -250
  591. data/generated/google/apis/memcache_v1beta2/representations.rb +0 -19
  592. data/generated/google/apis/memcache_v1beta2/service.rb +60 -73
  593. data/generated/google/apis/memcache_v1beta2.rb +1 -1
  594. data/generated/google/apis/ml_v1/classes.rb +1122 -1149
  595. data/generated/google/apis/ml_v1/representations.rb +82 -0
  596. data/generated/google/apis/ml_v1/service.rb +194 -253
  597. data/generated/google/apis/ml_v1.rb +1 -1
  598. data/generated/google/apis/monitoring_v1/classes.rb +107 -26
  599. data/generated/google/apis/monitoring_v1/representations.rb +35 -0
  600. data/generated/google/apis/monitoring_v1/service.rb +10 -11
  601. data/generated/google/apis/monitoring_v1.rb +1 -1
  602. data/generated/google/apis/monitoring_v3/classes.rb +303 -345
  603. data/generated/google/apis/monitoring_v3/representations.rb +18 -0
  604. data/generated/google/apis/monitoring_v3/service.rb +176 -146
  605. data/generated/google/apis/monitoring_v3.rb +1 -1
  606. data/generated/google/apis/networkmanagement_v1/classes.rb +273 -429
  607. data/generated/google/apis/networkmanagement_v1/service.rb +97 -120
  608. data/generated/google/apis/networkmanagement_v1.rb +1 -1
  609. data/generated/google/apis/networkmanagement_v1beta1/classes.rb +456 -429
  610. data/generated/google/apis/networkmanagement_v1beta1/representations.rb +63 -0
  611. data/generated/google/apis/networkmanagement_v1beta1/service.rb +97 -120
  612. data/generated/google/apis/networkmanagement_v1beta1.rb +1 -1
  613. data/generated/google/apis/osconfig_v1/classes.rb +154 -902
  614. data/generated/google/apis/osconfig_v1/representations.rb +0 -337
  615. data/generated/google/apis/osconfig_v1/service.rb +26 -31
  616. data/generated/google/apis/osconfig_v1.rb +3 -3
  617. data/generated/google/apis/osconfig_v1beta/classes.rb +327 -411
  618. data/generated/google/apis/osconfig_v1beta/service.rb +43 -56
  619. data/generated/google/apis/osconfig_v1beta.rb +3 -3
  620. data/generated/google/apis/oslogin_v1/classes.rb +14 -12
  621. data/generated/google/apis/oslogin_v1/representations.rb +1 -0
  622. data/generated/google/apis/oslogin_v1/service.rb +12 -16
  623. data/generated/google/apis/oslogin_v1.rb +1 -1
  624. data/generated/google/apis/oslogin_v1alpha/classes.rb +16 -14
  625. data/generated/google/apis/oslogin_v1alpha/representations.rb +1 -0
  626. data/generated/google/apis/oslogin_v1alpha/service.rb +17 -17
  627. data/generated/google/apis/oslogin_v1alpha.rb +1 -1
  628. data/generated/google/apis/oslogin_v1beta/classes.rb +14 -12
  629. data/generated/google/apis/oslogin_v1beta/representations.rb +1 -0
  630. data/generated/google/apis/oslogin_v1beta/service.rb +12 -16
  631. data/generated/google/apis/oslogin_v1beta.rb +1 -1
  632. data/generated/google/apis/pagespeedonline_v5/classes.rb +18 -24
  633. data/generated/google/apis/pagespeedonline_v5/service.rb +3 -4
  634. data/generated/google/apis/pagespeedonline_v5.rb +2 -2
  635. data/generated/google/apis/people_v1/classes.rb +173 -63
  636. data/generated/google/apis/people_v1/representations.rb +41 -0
  637. data/generated/google/apis/people_v1/service.rb +63 -61
  638. data/generated/google/apis/people_v1.rb +1 -1
  639. data/generated/google/apis/playablelocations_v3/classes.rb +114 -161
  640. data/generated/google/apis/playablelocations_v3/service.rb +10 -10
  641. data/generated/google/apis/playablelocations_v3.rb +1 -1
  642. data/generated/google/apis/playcustomapp_v1/service.rb +2 -2
  643. data/generated/google/apis/playcustomapp_v1.rb +1 -1
  644. data/generated/google/apis/poly_v1/classes.rb +65 -79
  645. data/generated/google/apis/poly_v1/service.rb +50 -63
  646. data/generated/google/apis/poly_v1.rb +3 -4
  647. data/generated/google/apis/privateca_v1beta1/classes.rb +2466 -0
  648. data/generated/google/apis/privateca_v1beta1/representations.rb +996 -0
  649. data/generated/google/apis/privateca_v1beta1/service.rb +1487 -0
  650. data/generated/google/apis/privateca_v1beta1.rb +34 -0
  651. data/generated/google/apis/prod_tt_sasportal_v1alpha1/classes.rb +26 -0
  652. data/generated/google/apis/prod_tt_sasportal_v1alpha1/representations.rb +14 -0
  653. data/generated/google/apis/prod_tt_sasportal_v1alpha1/service.rb +644 -56
  654. data/generated/google/apis/prod_tt_sasportal_v1alpha1.rb +1 -1
  655. data/generated/google/apis/pubsub_v1/classes.rb +399 -518
  656. data/generated/google/apis/pubsub_v1/representations.rb +2 -0
  657. data/generated/google/apis/pubsub_v1/service.rb +221 -247
  658. data/generated/google/apis/pubsub_v1.rb +1 -1
  659. data/generated/google/apis/pubsub_v1beta1a/classes.rb +71 -86
  660. data/generated/google/apis/pubsub_v1beta1a/service.rb +31 -38
  661. data/generated/google/apis/pubsub_v1beta1a.rb +1 -1
  662. data/generated/google/apis/pubsub_v1beta2/classes.rb +251 -354
  663. data/generated/google/apis/pubsub_v1beta2/representations.rb +1 -0
  664. data/generated/google/apis/pubsub_v1beta2/service.rb +96 -108
  665. data/generated/google/apis/pubsub_v1beta2.rb +1 -1
  666. data/generated/google/apis/pubsublite_v1/classes.rb +461 -0
  667. data/generated/google/apis/pubsublite_v1/representations.rb +261 -0
  668. data/generated/google/apis/pubsublite_v1/service.rb +558 -0
  669. data/generated/google/apis/{memcache_v1.rb → pubsublite_v1.rb} +8 -9
  670. data/generated/google/apis/realtimebidding_v1/classes.rb +84 -123
  671. data/generated/google/apis/realtimebidding_v1/representations.rb +18 -32
  672. data/generated/google/apis/realtimebidding_v1/service.rb +4 -1
  673. data/generated/google/apis/realtimebidding_v1.rb +1 -1
  674. data/generated/google/apis/recommendationengine_v1beta1/classes.rb +367 -456
  675. data/generated/google/apis/recommendationengine_v1beta1/representations.rb +25 -16
  676. data/generated/google/apis/recommendationengine_v1beta1/service.rb +140 -206
  677. data/generated/google/apis/recommendationengine_v1beta1.rb +1 -1
  678. data/generated/google/apis/recommender_v1/classes.rb +1 -1
  679. data/generated/google/apis/recommender_v1/service.rb +4 -2
  680. data/generated/google/apis/recommender_v1.rb +1 -1
  681. data/generated/google/apis/recommender_v1beta1/classes.rb +1 -1
  682. data/generated/google/apis/recommender_v1beta1/service.rb +4 -2
  683. data/generated/google/apis/recommender_v1beta1.rb +1 -1
  684. data/generated/google/apis/redis_v1/classes.rb +91 -513
  685. data/generated/google/apis/redis_v1/representations.rb +0 -139
  686. data/generated/google/apis/redis_v1/service.rb +92 -109
  687. data/generated/google/apis/redis_v1.rb +1 -1
  688. data/generated/google/apis/redis_v1beta1/classes.rb +123 -517
  689. data/generated/google/apis/redis_v1beta1/representations.rb +12 -137
  690. data/generated/google/apis/redis_v1beta1/service.rb +126 -109
  691. data/generated/google/apis/redis_v1beta1.rb +1 -1
  692. data/generated/google/apis/remotebuildexecution_v1/classes.rb +957 -1078
  693. data/generated/google/apis/remotebuildexecution_v1/representations.rb +62 -0
  694. data/generated/google/apis/remotebuildexecution_v1/service.rb +26 -33
  695. data/generated/google/apis/remotebuildexecution_v1.rb +1 -1
  696. data/generated/google/apis/remotebuildexecution_v1alpha/classes.rb +952 -1071
  697. data/generated/google/apis/remotebuildexecution_v1alpha/representations.rb +62 -0
  698. data/generated/google/apis/remotebuildexecution_v1alpha/service.rb +103 -65
  699. data/generated/google/apis/remotebuildexecution_v1alpha.rb +1 -1
  700. data/generated/google/apis/remotebuildexecution_v2/classes.rb +1105 -1250
  701. data/generated/google/apis/remotebuildexecution_v2/representations.rb +62 -0
  702. data/generated/google/apis/remotebuildexecution_v2/service.rb +147 -206
  703. data/generated/google/apis/remotebuildexecution_v2.rb +1 -1
  704. data/generated/google/apis/reseller_v1/classes.rb +151 -219
  705. data/generated/google/apis/reseller_v1/service.rb +122 -173
  706. data/generated/google/apis/reseller_v1.rb +2 -2
  707. data/generated/google/apis/run_v1/classes.rb +19 -138
  708. data/generated/google/apis/run_v1/representations.rb +1 -62
  709. data/generated/google/apis/run_v1/service.rb +0 -342
  710. data/generated/google/apis/run_v1.rb +1 -1
  711. data/generated/google/apis/run_v1alpha1/classes.rb +1 -1
  712. data/generated/google/apis/run_v1alpha1/representations.rb +1 -1
  713. data/generated/google/apis/run_v1alpha1.rb +1 -1
  714. data/generated/google/apis/run_v1beta1/classes.rb +3 -2
  715. data/generated/google/apis/run_v1beta1.rb +1 -1
  716. data/generated/google/apis/runtimeconfig_v1beta1/classes.rb +301 -412
  717. data/generated/google/apis/runtimeconfig_v1beta1/representations.rb +1 -0
  718. data/generated/google/apis/runtimeconfig_v1beta1/service.rb +135 -159
  719. data/generated/google/apis/runtimeconfig_v1beta1.rb +1 -1
  720. data/generated/google/apis/safebrowsing_v4/classes.rb +55 -64
  721. data/generated/google/apis/safebrowsing_v4/service.rb +4 -4
  722. data/generated/google/apis/safebrowsing_v4.rb +1 -1
  723. data/generated/google/apis/sasportal_v1alpha1/classes.rb +26 -0
  724. data/generated/google/apis/sasportal_v1alpha1/representations.rb +14 -0
  725. data/generated/google/apis/sasportal_v1alpha1/service.rb +644 -56
  726. data/generated/google/apis/sasportal_v1alpha1.rb +1 -1
  727. data/generated/google/apis/script_v1/classes.rb +88 -111
  728. data/generated/google/apis/script_v1/service.rb +63 -69
  729. data/generated/google/apis/script_v1.rb +1 -1
  730. data/generated/google/apis/searchconsole_v1/classes.rb +388 -0
  731. data/generated/google/apis/searchconsole_v1/representations.rb +162 -0
  732. data/generated/google/apis/searchconsole_v1/service.rb +287 -0
  733. data/generated/google/apis/searchconsole_v1.rb +7 -1
  734. data/generated/google/apis/secretmanager_v1/classes.rb +378 -365
  735. data/generated/google/apis/secretmanager_v1/representations.rb +92 -0
  736. data/generated/google/apis/secretmanager_v1/service.rb +66 -82
  737. data/generated/google/apis/secretmanager_v1.rb +1 -1
  738. data/generated/google/apis/secretmanager_v1beta1/classes.rb +217 -363
  739. data/generated/google/apis/secretmanager_v1beta1/representations.rb +1 -0
  740. data/generated/google/apis/secretmanager_v1beta1/service.rb +66 -82
  741. data/generated/google/apis/secretmanager_v1beta1.rb +1 -1
  742. data/generated/google/apis/securitycenter_v1/classes.rb +20 -204
  743. data/generated/google/apis/securitycenter_v1/representations.rb +1 -72
  744. data/generated/google/apis/securitycenter_v1.rb +1 -1
  745. data/generated/google/apis/securitycenter_v1beta1/classes.rb +22 -204
  746. data/generated/google/apis/securitycenter_v1beta1/representations.rb +1 -72
  747. data/generated/google/apis/securitycenter_v1beta1.rb +1 -1
  748. data/generated/google/apis/{securitycenter_v1p1alpha1 → securitycenter_v1beta2}/classes.rb +275 -291
  749. data/generated/google/apis/{securitycenter_v1p1alpha1 → securitycenter_v1beta2}/representations.rb +83 -84
  750. data/generated/google/apis/securitycenter_v1beta2/service.rb +1494 -0
  751. data/generated/google/apis/{securitycenter_v1p1beta1.rb → securitycenter_v1beta2.rb} +6 -6
  752. data/generated/google/apis/serviceconsumermanagement_v1/classes.rb +35 -123
  753. data/generated/google/apis/serviceconsumermanagement_v1/representations.rb +0 -18
  754. data/generated/google/apis/serviceconsumermanagement_v1/service.rb +32 -30
  755. data/generated/google/apis/serviceconsumermanagement_v1.rb +1 -1
  756. data/generated/google/apis/serviceconsumermanagement_v1beta1/classes.rb +24 -112
  757. data/generated/google/apis/serviceconsumermanagement_v1beta1/representations.rb +0 -18
  758. data/generated/google/apis/serviceconsumermanagement_v1beta1.rb +1 -1
  759. data/generated/google/apis/servicecontrol_v1/classes.rb +601 -642
  760. data/generated/google/apis/servicecontrol_v1/representations.rb +10 -0
  761. data/generated/google/apis/servicecontrol_v1/service.rb +36 -46
  762. data/generated/google/apis/servicecontrol_v1.rb +1 -1
  763. data/generated/google/apis/servicecontrol_v2/classes.rb +343 -325
  764. data/generated/google/apis/servicecontrol_v2/representations.rb +8 -0
  765. data/generated/google/apis/servicecontrol_v2/service.rb +33 -43
  766. data/generated/google/apis/servicecontrol_v2.rb +1 -1
  767. data/generated/google/apis/servicedirectory_v1beta1/classes.rb +221 -333
  768. data/generated/google/apis/servicedirectory_v1beta1/representations.rb +1 -0
  769. data/generated/google/apis/servicedirectory_v1beta1/service.rb +94 -129
  770. data/generated/google/apis/servicedirectory_v1beta1.rb +1 -1
  771. data/generated/google/apis/servicemanagement_v1/classes.rb +1244 -2174
  772. data/generated/google/apis/servicemanagement_v1/representations.rb +0 -31
  773. data/generated/google/apis/servicemanagement_v1/service.rb +141 -228
  774. data/generated/google/apis/servicemanagement_v1.rb +1 -1
  775. data/generated/google/apis/servicenetworking_v1/classes.rb +278 -121
  776. data/generated/google/apis/servicenetworking_v1/representations.rb +115 -15
  777. data/generated/google/apis/servicenetworking_v1/service.rb +118 -2
  778. data/generated/google/apis/servicenetworking_v1.rb +1 -1
  779. data/generated/google/apis/servicenetworking_v1beta/classes.rb +213 -112
  780. data/generated/google/apis/servicenetworking_v1beta/representations.rb +84 -14
  781. data/generated/google/apis/servicenetworking_v1beta.rb +1 -1
  782. data/generated/google/apis/serviceusage_v1/classes.rb +57 -111
  783. data/generated/google/apis/serviceusage_v1/representations.rb +4 -18
  784. data/generated/google/apis/serviceusage_v1/service.rb +5 -1
  785. data/generated/google/apis/serviceusage_v1.rb +1 -1
  786. data/generated/google/apis/serviceusage_v1beta1/classes.rb +122 -112
  787. data/generated/google/apis/serviceusage_v1beta1/representations.rb +23 -18
  788. data/generated/google/apis/serviceusage_v1beta1/service.rb +36 -0
  789. data/generated/google/apis/serviceusage_v1beta1.rb +1 -1
  790. data/generated/google/apis/sheets_v4/classes.rb +4029 -5014
  791. data/generated/google/apis/sheets_v4/representations.rb +625 -0
  792. data/generated/google/apis/sheets_v4/service.rb +113 -149
  793. data/generated/google/apis/sheets_v4.rb +1 -1
  794. data/generated/google/apis/site_verification_v1.rb +1 -1
  795. data/generated/google/apis/slides_v1/classes.rb +841 -1114
  796. data/generated/google/apis/slides_v1/service.rb +23 -30
  797. data/generated/google/apis/slides_v1.rb +1 -1
  798. data/generated/google/apis/smartdevicemanagement_v1/classes.rb +273 -0
  799. data/generated/google/apis/smartdevicemanagement_v1/representations.rb +157 -0
  800. data/generated/google/apis/smartdevicemanagement_v1/service.rb +304 -0
  801. data/generated/google/apis/smartdevicemanagement_v1.rb +35 -0
  802. data/generated/google/apis/sourcerepo_v1/classes.rb +250 -400
  803. data/generated/google/apis/sourcerepo_v1/service.rb +40 -49
  804. data/generated/google/apis/sourcerepo_v1.rb +1 -1
  805. data/generated/google/apis/spanner_v1/classes.rb +1553 -2157
  806. data/generated/google/apis/spanner_v1/representations.rb +1 -0
  807. data/generated/google/apis/spanner_v1/service.rb +443 -618
  808. data/generated/google/apis/spanner_v1.rb +1 -1
  809. data/generated/google/apis/speech_v1/classes.rb +174 -220
  810. data/generated/google/apis/speech_v1/service.rb +27 -32
  811. data/generated/google/apis/speech_v1.rb +1 -1
  812. data/generated/google/apis/speech_v1p1beta1/classes.rb +253 -306
  813. data/generated/google/apis/speech_v1p1beta1/service.rb +27 -32
  814. data/generated/google/apis/speech_v1p1beta1.rb +1 -1
  815. data/generated/google/apis/speech_v2beta1/classes.rb +66 -76
  816. data/generated/google/apis/speech_v2beta1/service.rb +10 -12
  817. data/generated/google/apis/speech_v2beta1.rb +1 -1
  818. data/generated/google/apis/{sql_v1beta4 → sqladmin_v1beta4}/classes.rb +537 -452
  819. data/generated/google/apis/{sql_v1beta4 → sqladmin_v1beta4}/representations.rb +142 -87
  820. data/generated/google/apis/{sql_v1beta4 → sqladmin_v1beta4}/service.rb +295 -300
  821. data/generated/google/apis/{sql_v1beta4.rb → sqladmin_v1beta4.rb} +5 -5
  822. data/generated/google/apis/storage_v1/classes.rb +10 -17
  823. data/generated/google/apis/storage_v1/representations.rb +2 -3
  824. data/generated/google/apis/storage_v1/service.rb +3 -2
  825. data/generated/google/apis/storage_v1.rb +1 -1
  826. data/generated/google/apis/storagetransfer_v1/classes.rb +301 -349
  827. data/generated/google/apis/storagetransfer_v1/representations.rb +13 -0
  828. data/generated/google/apis/storagetransfer_v1/service.rb +53 -72
  829. data/generated/google/apis/storagetransfer_v1.rb +1 -1
  830. data/generated/google/apis/streetviewpublish_v1/classes.rb +110 -152
  831. data/generated/google/apis/streetviewpublish_v1/service.rb +94 -177
  832. data/generated/google/apis/streetviewpublish_v1.rb +1 -1
  833. data/generated/google/apis/sts_v1/classes.rb +121 -0
  834. data/generated/google/apis/sts_v1/representations.rb +59 -0
  835. data/generated/google/apis/sts_v1/service.rb +90 -0
  836. data/generated/google/apis/sts_v1.rb +32 -0
  837. data/generated/google/apis/sts_v1beta/classes.rb +191 -0
  838. data/generated/google/apis/sts_v1beta/representations.rb +61 -0
  839. data/generated/google/apis/sts_v1beta/service.rb +92 -0
  840. data/generated/google/apis/sts_v1beta.rb +32 -0
  841. data/generated/google/apis/tagmanager_v1/service.rb +2 -2
  842. data/generated/google/apis/tagmanager_v1.rb +1 -1
  843. data/generated/google/apis/tagmanager_v2/classes.rb +12 -0
  844. data/generated/google/apis/tagmanager_v2/representations.rb +3 -0
  845. data/generated/google/apis/tagmanager_v2/service.rb +2 -2
  846. data/generated/google/apis/tagmanager_v2.rb +1 -1
  847. data/generated/google/apis/tasks_v1/classes.rb +21 -22
  848. data/generated/google/apis/tasks_v1/service.rb +19 -19
  849. data/generated/google/apis/tasks_v1.rb +1 -1
  850. data/generated/google/apis/testing_v1/classes.rb +384 -390
  851. data/generated/google/apis/testing_v1/representations.rb +23 -0
  852. data/generated/google/apis/testing_v1/service.rb +22 -28
  853. data/generated/google/apis/testing_v1.rb +1 -1
  854. data/generated/google/apis/texttospeech_v1/classes.rb +51 -57
  855. data/generated/google/apis/texttospeech_v1/service.rb +9 -10
  856. data/generated/google/apis/texttospeech_v1.rb +1 -1
  857. data/generated/google/apis/texttospeech_v1beta1/classes.rb +96 -57
  858. data/generated/google/apis/texttospeech_v1beta1/representations.rb +19 -0
  859. data/generated/google/apis/texttospeech_v1beta1/service.rb +9 -10
  860. data/generated/google/apis/texttospeech_v1beta1.rb +1 -1
  861. data/generated/google/apis/toolresults_v1beta3/classes.rb +20 -0
  862. data/generated/google/apis/toolresults_v1beta3/representations.rb +13 -0
  863. data/generated/google/apis/toolresults_v1beta3.rb +1 -1
  864. data/generated/google/apis/tpu_v1/classes.rb +57 -3
  865. data/generated/google/apis/tpu_v1/representations.rb +19 -0
  866. data/generated/google/apis/tpu_v1/service.rb +8 -8
  867. data/generated/google/apis/tpu_v1.rb +1 -1
  868. data/generated/google/apis/tpu_v1alpha1/classes.rb +57 -3
  869. data/generated/google/apis/tpu_v1alpha1/representations.rb +19 -0
  870. data/generated/google/apis/tpu_v1alpha1/service.rb +8 -8
  871. data/generated/google/apis/tpu_v1alpha1.rb +1 -1
  872. data/generated/google/apis/trafficdirector_v2/classes.rb +1347 -0
  873. data/generated/google/apis/trafficdirector_v2/representations.rb +620 -0
  874. data/generated/google/apis/trafficdirector_v2/service.rb +89 -0
  875. data/generated/google/apis/trafficdirector_v2.rb +34 -0
  876. data/generated/google/apis/translate_v3/classes.rb +151 -177
  877. data/generated/google/apis/translate_v3/service.rb +122 -151
  878. data/generated/google/apis/translate_v3.rb +1 -1
  879. data/generated/google/apis/translate_v3beta1/classes.rb +150 -170
  880. data/generated/google/apis/translate_v3beta1/service.rb +122 -151
  881. data/generated/google/apis/translate_v3beta1.rb +1 -1
  882. data/generated/google/apis/vault_v1/classes.rb +413 -103
  883. data/generated/google/apis/vault_v1/representations.rb +162 -0
  884. data/generated/google/apis/vault_v1/service.rb +182 -37
  885. data/generated/google/apis/vault_v1.rb +1 -1
  886. data/generated/google/apis/vectortile_v1/classes.rb +185 -267
  887. data/generated/google/apis/vectortile_v1/service.rb +75 -88
  888. data/generated/google/apis/vectortile_v1.rb +1 -1
  889. data/generated/google/apis/verifiedaccess_v1/classes.rb +20 -27
  890. data/generated/google/apis/verifiedaccess_v1.rb +1 -1
  891. data/generated/google/apis/videointelligence_v1/classes.rb +1493 -935
  892. data/generated/google/apis/videointelligence_v1/representations.rb +402 -2
  893. data/generated/google/apis/videointelligence_v1/service.rb +38 -77
  894. data/generated/google/apis/videointelligence_v1.rb +1 -1
  895. data/generated/google/apis/videointelligence_v1beta2/classes.rb +1488 -928
  896. data/generated/google/apis/videointelligence_v1beta2/representations.rb +402 -2
  897. data/generated/google/apis/videointelligence_v1beta2/service.rb +4 -4
  898. data/generated/google/apis/videointelligence_v1beta2.rb +1 -1
  899. data/generated/google/apis/videointelligence_v1p1beta1/classes.rb +1482 -922
  900. data/generated/google/apis/videointelligence_v1p1beta1/representations.rb +402 -2
  901. data/generated/google/apis/videointelligence_v1p1beta1/service.rb +4 -4
  902. data/generated/google/apis/videointelligence_v1p1beta1.rb +1 -1
  903. data/generated/google/apis/videointelligence_v1p2beta1/classes.rb +1485 -925
  904. data/generated/google/apis/videointelligence_v1p2beta1/representations.rb +402 -2
  905. data/generated/google/apis/videointelligence_v1p2beta1/service.rb +4 -4
  906. data/generated/google/apis/videointelligence_v1p2beta1.rb +1 -1
  907. data/generated/google/apis/videointelligence_v1p3beta1/classes.rb +1410 -937
  908. data/generated/google/apis/videointelligence_v1p3beta1/representations.rb +368 -2
  909. data/generated/google/apis/videointelligence_v1p3beta1/service.rb +4 -4
  910. data/generated/google/apis/videointelligence_v1p3beta1.rb +1 -1
  911. data/generated/google/apis/vision_v1/classes.rb +16 -16
  912. data/generated/google/apis/vision_v1.rb +1 -1
  913. data/generated/google/apis/vision_v1p1beta1/classes.rb +16 -16
  914. data/generated/google/apis/vision_v1p1beta1.rb +1 -1
  915. data/generated/google/apis/vision_v1p2beta1/classes.rb +16 -16
  916. data/generated/google/apis/vision_v1p2beta1.rb +1 -1
  917. data/generated/google/apis/webfonts_v1/classes.rb +1 -2
  918. data/generated/google/apis/webfonts_v1/service.rb +2 -4
  919. data/generated/google/apis/webfonts_v1.rb +2 -3
  920. data/generated/google/apis/websecurityscanner_v1.rb +1 -1
  921. data/generated/google/apis/websecurityscanner_v1alpha.rb +1 -1
  922. data/generated/google/apis/websecurityscanner_v1beta.rb +1 -1
  923. data/generated/google/apis/workflowexecutions_v1beta/classes.rb +155 -0
  924. data/generated/google/apis/workflowexecutions_v1beta/representations.rb +88 -0
  925. data/generated/google/apis/{securitycenter_v1p1alpha1 → workflowexecutions_v1beta}/service.rb +73 -69
  926. data/generated/google/apis/workflowexecutions_v1beta.rb +34 -0
  927. data/generated/google/apis/workflows_v1beta/classes.rb +406 -0
  928. data/generated/google/apis/workflows_v1beta/representations.rb +173 -0
  929. data/generated/google/apis/workflows_v1beta/service.rb +438 -0
  930. data/generated/google/apis/workflows_v1beta.rb +35 -0
  931. data/generated/google/apis/youtube_partner_v1.rb +1 -1
  932. data/generated/google/apis/youtube_v3/classes.rb +0 -586
  933. data/generated/google/apis/youtube_v3/representations.rb +0 -269
  934. data/generated/google/apis/youtube_v3/service.rb +3 -120
  935. data/generated/google/apis/youtube_v3.rb +1 -1
  936. data/google-api-client.gemspec +25 -24
  937. data/lib/google/apis/core/api_command.rb +1 -0
  938. data/lib/google/apis/core/http_command.rb +2 -1
  939. data/lib/google/apis/options.rb +8 -5
  940. data/lib/google/apis/version.rb +1 -1
  941. data/synth.py +40 -0
  942. metadata +134 -41
  943. data/generated/google/apis/accessapproval_v1beta1/classes.rb +0 -417
  944. data/generated/google/apis/accessapproval_v1beta1/representations.rb +0 -203
  945. data/generated/google/apis/accessapproval_v1beta1/service.rb +0 -857
  946. data/generated/google/apis/appsactivity_v1/classes.rb +0 -415
  947. data/generated/google/apis/appsactivity_v1/representations.rb +0 -209
  948. data/generated/google/apis/appsactivity_v1/service.rb +0 -126
  949. data/generated/google/apis/dns_v2beta1/classes.rb +0 -1447
  950. data/generated/google/apis/dns_v2beta1/representations.rb +0 -588
  951. data/generated/google/apis/dns_v2beta1/service.rb +0 -928
  952. data/generated/google/apis/dns_v2beta1.rb +0 -43
  953. data/generated/google/apis/memcache_v1/classes.rb +0 -1157
  954. data/generated/google/apis/plus_v1/classes.rb +0 -2094
  955. data/generated/google/apis/plus_v1/representations.rb +0 -907
  956. data/generated/google/apis/plus_v1/service.rb +0 -451
  957. data/generated/google/apis/plus_v1.rb +0 -43
  958. data/generated/google/apis/securitycenter_v1p1beta1/classes.rb +0 -2059
  959. data/generated/google/apis/securitycenter_v1p1beta1/representations.rb +0 -789
  960. data/generated/google/apis/securitycenter_v1p1beta1/service.rb +0 -1243
  961. data/generated/google/apis/storage_v1beta2/classes.rb +0 -1047
  962. data/generated/google/apis/storage_v1beta2/representations.rb +0 -425
  963. data/generated/google/apis/storage_v1beta2/service.rb +0 -1667
  964. data/generated/google/apis/storage_v1beta2.rb +0 -40
@@ -23,120 +23,107 @@ module Google
23
23
  module RemotebuildexecutionV2
24
24
 
25
25
  # An `Action` captures all the information about an execution which is required
26
- # to reproduce it.
27
- # `Action`s are the core component of the [Execution] service. A single
28
- # `Action` represents a repeatable action that can be performed by the
26
+ # to reproduce it. `Action`s are the core component of the [Execution] service.
27
+ # A single `Action` represents a repeatable action that can be performed by the
29
28
  # execution service. `Action`s can be succinctly identified by the digest of
30
29
  # their wire format encoding and, once an `Action` has been executed, will be
31
30
  # cached in the action cache. Future requests can then use the cached result
32
- # rather than needing to run afresh.
33
- # When a server completes execution of an
34
- # Action, it MAY choose to
35
- # cache the result in
36
- # the ActionCache unless
37
- # `do_not_cache` is `true`. Clients SHOULD expect the server to do so. By
38
- # default, future calls to
39
- # Execute the same
40
- # `Action` will also serve their results from the cache. Clients must take care
41
- # to understand the caching behaviour. Ideally, all `Action`s will be
42
- # reproducible so that serving a result from cache is always desirable and
43
- # correct.
31
+ # rather than needing to run afresh. When a server completes execution of an
32
+ # Action, it MAY choose to cache the result in the ActionCache unless `
33
+ # do_not_cache` is `true`. Clients SHOULD expect the server to do so. By default,
34
+ # future calls to Execute the same `Action` will also serve their results from
35
+ # the cache. Clients must take care to understand the caching behaviour. Ideally,
36
+ # all `Action`s will be reproducible so that serving a result from cache is
37
+ # always desirable and correct.
44
38
  class BuildBazelRemoteExecutionV2Action
45
39
  include Google::Apis::Core::Hashable
46
40
 
47
41
  # A content digest. A digest for a given blob consists of the size of the blob
48
- # and its hash. The hash algorithm to use is defined by the server.
49
- # The size is considered to be an integral part of the digest and cannot be
50
- # separated. That is, even if the `hash` field is correctly specified but
51
- # `size_bytes` is not, the server MUST reject the request.
52
- # The reason for including the size in the digest is as follows: in a great
53
- # many cases, the server needs to know the size of the blob it is about to work
54
- # with prior to starting an operation with it, such as flattening Merkle tree
55
- # structures or streaming it to a worker. Technically, the server could
56
- # implement a separate metadata store, but this results in a significantly more
57
- # complicated implementation as opposed to having the client specify the size
58
- # up-front (or storing the size along with the digest in every message where
59
- # digests are embedded). This does mean that the API leaks some implementation
60
- # details of (what we consider to be) a reasonable server implementation, but
61
- # we consider this to be a worthwhile tradeoff.
62
- # When a `Digest` is used to refer to a proto message, it always refers to the
63
- # message in binary encoded form. To ensure consistent hashing, clients and
64
- # servers MUST ensure that they serialize messages according to the following
65
- # rules, even if there are alternate valid encodings for the same message:
66
- # * Fields are serialized in tag order.
67
- # * There are no unknown fields.
68
- # * There are no duplicate fields.
69
- # * Fields are serialized according to the default semantics for their type.
70
- # Most protocol buffer implementations will always follow these rules when
71
- # serializing, but care should be taken to avoid shortcuts. For instance,
72
- # concatenating two messages to merge them may produce duplicate fields.
42
+ # and its hash. The hash algorithm to use is defined by the server. The size is
43
+ # considered to be an integral part of the digest and cannot be separated. That
44
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
45
+ # the server MUST reject the request. The reason for including the size in the
46
+ # digest is as follows: in a great many cases, the server needs to know the size
47
+ # of the blob it is about to work with prior to starting an operation with it,
48
+ # such as flattening Merkle tree structures or streaming it to a worker.
49
+ # Technically, the server could implement a separate metadata store, but this
50
+ # results in a significantly more complicated implementation as opposed to
51
+ # having the client specify the size up-front (or storing the size along with
52
+ # the digest in every message where digests are embedded). This does mean that
53
+ # the API leaks some implementation details of (what we consider to be) a
54
+ # reasonable server implementation, but we consider this to be a worthwhile
55
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
56
+ # refers to the message in binary encoded form. To ensure consistent hashing,
57
+ # clients and servers MUST ensure that they serialize messages according to the
58
+ # following rules, even if there are alternate valid encodings for the same
59
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
60
+ # There are no duplicate fields. * Fields are serialized according to the
61
+ # default semantics for their type. Most protocol buffer implementations will
62
+ # always follow these rules when serializing, but care should be taken to avoid
63
+ # shortcuts. For instance, concatenating two messages to merge them may produce
64
+ # duplicate fields.
73
65
  # Corresponds to the JSON property `commandDigest`
74
66
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Digest]
75
67
  attr_accessor :command_digest
76
68
 
77
- # If true, then the `Action`'s result cannot be cached, and in-flight
78
- # requests for the same `Action` may not be merged.
69
+ # If true, then the `Action`'s result cannot be cached, and in-flight requests
70
+ # for the same `Action` may not be merged.
79
71
  # Corresponds to the JSON property `doNotCache`
80
72
  # @return [Boolean]
81
73
  attr_accessor :do_not_cache
82
74
  alias_method :do_not_cache?, :do_not_cache
83
75
 
84
76
  # A content digest. A digest for a given blob consists of the size of the blob
85
- # and its hash. The hash algorithm to use is defined by the server.
86
- # The size is considered to be an integral part of the digest and cannot be
87
- # separated. That is, even if the `hash` field is correctly specified but
88
- # `size_bytes` is not, the server MUST reject the request.
89
- # The reason for including the size in the digest is as follows: in a great
90
- # many cases, the server needs to know the size of the blob it is about to work
91
- # with prior to starting an operation with it, such as flattening Merkle tree
92
- # structures or streaming it to a worker. Technically, the server could
93
- # implement a separate metadata store, but this results in a significantly more
94
- # complicated implementation as opposed to having the client specify the size
95
- # up-front (or storing the size along with the digest in every message where
96
- # digests are embedded). This does mean that the API leaks some implementation
97
- # details of (what we consider to be) a reasonable server implementation, but
98
- # we consider this to be a worthwhile tradeoff.
99
- # When a `Digest` is used to refer to a proto message, it always refers to the
100
- # message in binary encoded form. To ensure consistent hashing, clients and
101
- # servers MUST ensure that they serialize messages according to the following
102
- # rules, even if there are alternate valid encodings for the same message:
103
- # * Fields are serialized in tag order.
104
- # * There are no unknown fields.
105
- # * There are no duplicate fields.
106
- # * Fields are serialized according to the default semantics for their type.
107
- # Most protocol buffer implementations will always follow these rules when
108
- # serializing, but care should be taken to avoid shortcuts. For instance,
109
- # concatenating two messages to merge them may produce duplicate fields.
77
+ # and its hash. The hash algorithm to use is defined by the server. The size is
78
+ # considered to be an integral part of the digest and cannot be separated. That
79
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
80
+ # the server MUST reject the request. The reason for including the size in the
81
+ # digest is as follows: in a great many cases, the server needs to know the size
82
+ # of the blob it is about to work with prior to starting an operation with it,
83
+ # such as flattening Merkle tree structures or streaming it to a worker.
84
+ # Technically, the server could implement a separate metadata store, but this
85
+ # results in a significantly more complicated implementation as opposed to
86
+ # having the client specify the size up-front (or storing the size along with
87
+ # the digest in every message where digests are embedded). This does mean that
88
+ # the API leaks some implementation details of (what we consider to be) a
89
+ # reasonable server implementation, but we consider this to be a worthwhile
90
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
91
+ # refers to the message in binary encoded form. To ensure consistent hashing,
92
+ # clients and servers MUST ensure that they serialize messages according to the
93
+ # following rules, even if there are alternate valid encodings for the same
94
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
95
+ # There are no duplicate fields. * Fields are serialized according to the
96
+ # default semantics for their type. Most protocol buffer implementations will
97
+ # always follow these rules when serializing, but care should be taken to avoid
98
+ # shortcuts. For instance, concatenating two messages to merge them may produce
99
+ # duplicate fields.
110
100
  # Corresponds to the JSON property `inputRootDigest`
111
101
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Digest]
112
102
  attr_accessor :input_root_digest
113
103
 
114
- # List of required supported NodeProperty
115
- # keys. In order to ensure that equivalent `Action`s always hash to the same
116
- # value, the supported node properties MUST be lexicographically sorted by name.
117
- # Sorting of strings is done by code point, equivalently, by the UTF-8 bytes.
118
- # The interpretation of these properties is server-dependent. If a property is
119
- # not recognized by the server, the server will return an `INVALID_ARGUMENT`
120
- # error.
104
+ # List of required supported NodeProperty keys. In order to ensure that
105
+ # equivalent `Action`s always hash to the same value, the supported node
106
+ # properties MUST be lexicographically sorted by name. Sorting of strings is
107
+ # done by code point, equivalently, by the UTF-8 bytes. The interpretation of
108
+ # these properties is server-dependent. If a property is not recognized by the
109
+ # server, the server will return an `INVALID_ARGUMENT` error.
121
110
  # Corresponds to the JSON property `outputNodeProperties`
122
111
  # @return [Array<String>]
123
112
  attr_accessor :output_node_properties
124
113
 
125
- # A timeout after which the execution should be killed. If the timeout is
126
- # absent, then the client is specifying that the execution should continue
127
- # as long as the server will let it. The server SHOULD impose a timeout if
128
- # the client does not specify one, however, if the client does specify a
129
- # timeout that is longer than the server's maximum timeout, the server MUST
130
- # reject the request.
131
- # The timeout is a part of the
132
- # Action message, and
133
- # therefore two `Actions` with different timeouts are different, even if they
134
- # are otherwise identical. This is because, if they were not, running an
135
- # `Action` with a lower timeout than is required might result in a cache hit
136
- # from an execution run with a longer timeout, hiding the fact that the
137
- # timeout is too short. By encoding it directly in the `Action`, a lower
138
- # timeout will result in a cache miss and the execution timeout will fail
139
- # immediately, rather than whenever the cache entry gets evicted.
114
+ # A timeout after which the execution should be killed. If the timeout is absent,
115
+ # then the client is specifying that the execution should continue as long as
116
+ # the server will let it. The server SHOULD impose a timeout if the client does
117
+ # not specify one, however, if the client does specify a timeout that is longer
118
+ # than the server's maximum timeout, the server MUST reject the request. The
119
+ # timeout is a part of the Action message, and therefore two `Actions` with
120
+ # different timeouts are different, even if they are otherwise identical. This
121
+ # is because, if they were not, running an `Action` with a lower timeout than is
122
+ # required might result in a cache hit from an execution run with a longer
123
+ # timeout, hiding the fact that the timeout is too short. By encoding it
124
+ # directly in the `Action`, a lower timeout will result in a cache miss and the
125
+ # execution timeout will fail immediately, rather than whenever the cache entry
126
+ # gets evicted.
140
127
  # Corresponds to the JSON property `timeout`
141
128
  # @return [String]
142
129
  attr_accessor :timeout
@@ -175,8 +162,7 @@ module Google
175
162
  end
176
163
  end
177
164
 
178
- # An ActionResult represents the result of an
179
- # Action being run.
165
+ # An ActionResult represents the result of an Action being run.
180
166
  class BuildBazelRemoteExecutionV2ActionResult
181
167
  include Google::Apis::Core::Hashable
182
168
 
@@ -190,84 +176,41 @@ module Google
190
176
  # @return [Fixnum]
191
177
  attr_accessor :exit_code
192
178
 
193
- # The output directories of the action. For each output directory requested
194
- # in the `output_directories` or `output_paths` field of the Action, if the
179
+ # The output directories of the action. For each output directory requested in
180
+ # the `output_directories` or `output_paths` field of the Action, if the
195
181
  # corresponding directory existed after the action completed, a single entry
196
- # will be present in the output list, which will contain the digest of a
197
- # Tree message containing the
198
- # directory tree, and the path equal exactly to the corresponding Action
199
- # output_directories member.
200
- # As an example, suppose the Action had an output directory `a/b/dir` and the
201
- # execution produced the following contents in `a/b/dir`: a file named `bar`
202
- # and a directory named `foo` with an executable file named `baz`. Then,
203
- # output_directory will contain (hashes shortened for readability):
204
- # ```json
205
- # // OutputDirectory proto:
206
- # `
207
- # path: "a/b/dir"
208
- # tree_digest: `
209
- # hash: "4a73bc9d03...",
210
- # size: 55
211
- # `
212
- # `
213
- # // Tree proto with hash "4a73bc9d03..." and size 55:
214
- # `
215
- # root: `
216
- # files: [
217
- # `
218
- # name: "bar",
219
- # digest: `
220
- # hash: "4a73bc9d03...",
221
- # size: 65534
222
- # `
223
- # `
224
- # ],
225
- # directories: [
226
- # `
227
- # name: "foo",
228
- # digest: `
229
- # hash: "4cf2eda940...",
230
- # size: 43
231
- # `
232
- # `
233
- # ]
234
- # `
235
- # children : `
236
- # // (Directory proto with hash "4cf2eda940..." and size 43)
237
- # files: [
238
- # `
239
- # name: "baz",
240
- # digest: `
241
- # hash: "b2c941073e...",
242
- # size: 1294,
243
- # `,
244
- # is_executable: true
245
- # `
246
- # ]
247
- # `
248
- # `
249
- # ```
250
- # If an output of the same name as listed in `output_files` of
251
- # the Command was found in `output_directories`, but was not a directory, the
252
- # server will return a FAILED_PRECONDITION.
182
+ # will be present in the output list, which will contain the digest of a Tree
183
+ # message containing the directory tree, and the path equal exactly to the
184
+ # corresponding Action output_directories member. As an example, suppose the
185
+ # Action had an output directory `a/b/dir` and the execution produced the
186
+ # following contents in `a/b/dir`: a file named `bar` and a directory named `foo`
187
+ # with an executable file named `baz`. Then, output_directory will contain (
188
+ # hashes shortened for readability): ```json // OutputDirectory proto: ` path: "
189
+ # a/b/dir" tree_digest: ` hash: "4a73bc9d03...", size: 55 ` ` // Tree proto with
190
+ # hash "4a73bc9d03..." and size 55: ` root: ` files: [ ` name: "bar", digest: `
191
+ # hash: "4a73bc9d03...", size: 65534 ` ` ], directories: [ ` name: "foo", digest:
192
+ # ` hash: "4cf2eda940...", size: 43 ` ` ] ` children : ` // (Directory proto
193
+ # with hash "4cf2eda940..." and size 43) files: [ ` name: "baz", digest: ` hash:
194
+ # "b2c941073e...", size: 1294, `, is_executable: true ` ] ` ` ``` If an output
195
+ # of the same name as listed in `output_files` of the Command was found in `
196
+ # output_directories`, but was not a directory, the server will return a
197
+ # FAILED_PRECONDITION.
253
198
  # Corresponds to the JSON property `outputDirectories`
254
199
  # @return [Array<Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2OutputDirectory>]
255
200
  attr_accessor :output_directories
256
201
 
257
202
  # The output directories of the action that are symbolic links to other
258
203
  # directories. Those may be links to other output directories, or input
259
- # directories, or even absolute paths outside of the working directory,
260
- # if the server supports
261
- # SymlinkAbsolutePathStrategy.ALLOWED.
262
- # For each output directory requested in the `output_directories` field of
263
- # the Action, if the directory existed after the action completed, a
264
- # single entry will be present either in this field, or in the
265
- # `output_directories` field, if the directory was not a symbolic link.
266
- # If an output of the same name was found, but was a symbolic link to a file
267
- # instead of a directory, the server will return a FAILED_PRECONDITION.
268
- # If the action does not produce the requested output, then that output
269
- # will be omitted from the list. The server is free to arrange the output
270
- # list as desired; clients MUST NOT assume that the output list is sorted.
204
+ # directories, or even absolute paths outside of the working directory, if the
205
+ # server supports SymlinkAbsolutePathStrategy.ALLOWED. For each output directory
206
+ # requested in the `output_directories` field of the Action, if the directory
207
+ # existed after the action completed, a single entry will be present either in
208
+ # this field, or in the `output_directories` field, if the directory was not a
209
+ # symbolic link. If an output of the same name was found, but was a symbolic
210
+ # link to a file instead of a directory, the server will return a
211
+ # FAILED_PRECONDITION. If the action does not produce the requested output, then
212
+ # that output will be omitted from the list. The server is free to arrange the
213
+ # output list as desired; clients MUST NOT assume that the output list is sorted.
271
214
  # DEPRECATED as of v2.1. Servers that wish to be compatible with v2.0 API
272
215
  # should still populate this field in addition to `output_symlinks`.
273
216
  # Corresponds to the JSON property `outputDirectorySymlinks`
@@ -277,131 +220,119 @@ module Google
277
220
  # The output files of the action that are symbolic links to other files. Those
278
221
  # may be links to other output files, or input files, or even absolute paths
279
222
  # outside of the working directory, if the server supports
280
- # SymlinkAbsolutePathStrategy.ALLOWED.
281
- # For each output file requested in the `output_files` or `output_paths`
282
- # field of the Action, if the corresponding file existed after
283
- # the action completed, a single entry will be present either in this field,
284
- # or in the `output_files` field, if the file was not a symbolic link.
285
- # If an output symbolic link of the same name as listed in `output_files` of
286
- # the Command was found, but its target type was not a regular file, the
287
- # server will return a FAILED_PRECONDITION.
288
- # If the action does not produce the requested output, then that output
289
- # will be omitted from the list. The server is free to arrange the output
290
- # list as desired; clients MUST NOT assume that the output list is sorted.
291
- # DEPRECATED as of v2.1. Servers that wish to be compatible with v2.0 API
292
- # should still populate this field in addition to `output_symlinks`.
223
+ # SymlinkAbsolutePathStrategy.ALLOWED. For each output file requested in the `
224
+ # output_files` or `output_paths` field of the Action, if the corresponding file
225
+ # existed after the action completed, a single entry will be present either in
226
+ # this field, or in the `output_files` field, if the file was not a symbolic
227
+ # link. If an output symbolic link of the same name as listed in `output_files`
228
+ # of the Command was found, but its target type was not a regular file, the
229
+ # server will return a FAILED_PRECONDITION. If the action does not produce the
230
+ # requested output, then that output will be omitted from the list. The server
231
+ # is free to arrange the output list as desired; clients MUST NOT assume that
232
+ # the output list is sorted. DEPRECATED as of v2.1. Servers that wish to be
233
+ # compatible with v2.0 API should still populate this field in addition to `
234
+ # output_symlinks`.
293
235
  # Corresponds to the JSON property `outputFileSymlinks`
294
236
  # @return [Array<Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2OutputSymlink>]
295
237
  attr_accessor :output_file_symlinks
296
238
 
297
- # The output files of the action. For each output file requested in the
298
- # `output_files` or `output_paths` field of the Action, if the corresponding
299
- # file existed after the action completed, a single entry will be present
300
- # either in this field, or the `output_file_symlinks` field if the file was
301
- # a symbolic link to another file (`output_symlinks` field after v2.1).
302
- # If an output listed in `output_files` was found, but was a directory rather
303
- # than a regular file, the server will return a FAILED_PRECONDITION.
304
- # If the action does not produce the requested output, then that output
305
- # will be omitted from the list. The server is free to arrange the output
306
- # list as desired; clients MUST NOT assume that the output list is sorted.
239
+ # The output files of the action. For each output file requested in the `
240
+ # output_files` or `output_paths` field of the Action, if the corresponding file
241
+ # existed after the action completed, a single entry will be present either in
242
+ # this field, or the `output_file_symlinks` field if the file was a symbolic
243
+ # link to another file (`output_symlinks` field after v2.1). If an output listed
244
+ # in `output_files` was found, but was a directory rather than a regular file,
245
+ # the server will return a FAILED_PRECONDITION. If the action does not produce
246
+ # the requested output, then that output will be omitted from the list. The
247
+ # server is free to arrange the output list as desired; clients MUST NOT assume
248
+ # that the output list is sorted.
307
249
  # Corresponds to the JSON property `outputFiles`
308
250
  # @return [Array<Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2OutputFile>]
309
251
  attr_accessor :output_files
310
252
 
311
- # New in v2.1: this field will only be populated if the command
312
- # `output_paths` field was used, and not the pre v2.1 `output_files` or
313
- # `output_directories` fields.
314
- # The output paths of the action that are symbolic links to other paths. Those
315
- # may be links to other outputs, or inputs, or even absolute paths
316
- # outside of the working directory, if the server supports
317
- # SymlinkAbsolutePathStrategy.ALLOWED.
318
- # A single entry for each output requested in `output_paths`
319
- # field of the Action, if the corresponding path existed after
320
- # the action completed and was a symbolic link.
321
- # If the action does not produce a requested output, then that output
322
- # will be omitted from the list. The server is free to arrange the output
323
- # list as desired; clients MUST NOT assume that the output list is sorted.
253
+ # New in v2.1: this field will only be populated if the command `output_paths`
254
+ # field was used, and not the pre v2.1 `output_files` or `output_directories`
255
+ # fields. The output paths of the action that are symbolic links to other paths.
256
+ # Those may be links to other outputs, or inputs, or even absolute paths outside
257
+ # of the working directory, if the server supports SymlinkAbsolutePathStrategy.
258
+ # ALLOWED. A single entry for each output requested in `output_paths` field of
259
+ # the Action, if the corresponding path existed after the action completed and
260
+ # was a symbolic link. If the action does not produce a requested output, then
261
+ # that output will be omitted from the list. The server is free to arrange the
262
+ # output list as desired; clients MUST NOT assume that the output list is sorted.
324
263
  # Corresponds to the JSON property `outputSymlinks`
325
264
  # @return [Array<Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2OutputSymlink>]
326
265
  attr_accessor :output_symlinks
327
266
 
328
267
  # A content digest. A digest for a given blob consists of the size of the blob
329
- # and its hash. The hash algorithm to use is defined by the server.
330
- # The size is considered to be an integral part of the digest and cannot be
331
- # separated. That is, even if the `hash` field is correctly specified but
332
- # `size_bytes` is not, the server MUST reject the request.
333
- # The reason for including the size in the digest is as follows: in a great
334
- # many cases, the server needs to know the size of the blob it is about to work
335
- # with prior to starting an operation with it, such as flattening Merkle tree
336
- # structures or streaming it to a worker. Technically, the server could
337
- # implement a separate metadata store, but this results in a significantly more
338
- # complicated implementation as opposed to having the client specify the size
339
- # up-front (or storing the size along with the digest in every message where
340
- # digests are embedded). This does mean that the API leaks some implementation
341
- # details of (what we consider to be) a reasonable server implementation, but
342
- # we consider this to be a worthwhile tradeoff.
343
- # When a `Digest` is used to refer to a proto message, it always refers to the
344
- # message in binary encoded form. To ensure consistent hashing, clients and
345
- # servers MUST ensure that they serialize messages according to the following
346
- # rules, even if there are alternate valid encodings for the same message:
347
- # * Fields are serialized in tag order.
348
- # * There are no unknown fields.
349
- # * There are no duplicate fields.
350
- # * Fields are serialized according to the default semantics for their type.
351
- # Most protocol buffer implementations will always follow these rules when
352
- # serializing, but care should be taken to avoid shortcuts. For instance,
353
- # concatenating two messages to merge them may produce duplicate fields.
268
+ # and its hash. The hash algorithm to use is defined by the server. The size is
269
+ # considered to be an integral part of the digest and cannot be separated. That
270
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
271
+ # the server MUST reject the request. The reason for including the size in the
272
+ # digest is as follows: in a great many cases, the server needs to know the size
273
+ # of the blob it is about to work with prior to starting an operation with it,
274
+ # such as flattening Merkle tree structures or streaming it to a worker.
275
+ # Technically, the server could implement a separate metadata store, but this
276
+ # results in a significantly more complicated implementation as opposed to
277
+ # having the client specify the size up-front (or storing the size along with
278
+ # the digest in every message where digests are embedded). This does mean that
279
+ # the API leaks some implementation details of (what we consider to be) a
280
+ # reasonable server implementation, but we consider this to be a worthwhile
281
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
282
+ # refers to the message in binary encoded form. To ensure consistent hashing,
283
+ # clients and servers MUST ensure that they serialize messages according to the
284
+ # following rules, even if there are alternate valid encodings for the same
285
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
286
+ # There are no duplicate fields. * Fields are serialized according to the
287
+ # default semantics for their type. Most protocol buffer implementations will
288
+ # always follow these rules when serializing, but care should be taken to avoid
289
+ # shortcuts. For instance, concatenating two messages to merge them may produce
290
+ # duplicate fields.
354
291
  # Corresponds to the JSON property `stderrDigest`
355
292
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Digest]
356
293
  attr_accessor :stderr_digest
357
294
 
358
- # The standard error buffer of the action. The server SHOULD NOT inline
359
- # stderr unless requested by the client in the
360
- # GetActionResultRequest
361
- # message. The server MAY omit inlining, even if requested, and MUST do so if
362
- # inlining
363
- # would cause the response to exceed message size limits.
295
+ # The standard error buffer of the action. The server SHOULD NOT inline stderr
296
+ # unless requested by the client in the GetActionResultRequest message. The
297
+ # server MAY omit inlining, even if requested, and MUST do so if inlining would
298
+ # cause the response to exceed message size limits.
364
299
  # Corresponds to the JSON property `stderrRaw`
365
300
  # NOTE: Values are automatically base64 encoded/decoded in the client library.
366
301
  # @return [String]
367
302
  attr_accessor :stderr_raw
368
303
 
369
304
  # A content digest. A digest for a given blob consists of the size of the blob
370
- # and its hash. The hash algorithm to use is defined by the server.
371
- # The size is considered to be an integral part of the digest and cannot be
372
- # separated. That is, even if the `hash` field is correctly specified but
373
- # `size_bytes` is not, the server MUST reject the request.
374
- # The reason for including the size in the digest is as follows: in a great
375
- # many cases, the server needs to know the size of the blob it is about to work
376
- # with prior to starting an operation with it, such as flattening Merkle tree
377
- # structures or streaming it to a worker. Technically, the server could
378
- # implement a separate metadata store, but this results in a significantly more
379
- # complicated implementation as opposed to having the client specify the size
380
- # up-front (or storing the size along with the digest in every message where
381
- # digests are embedded). This does mean that the API leaks some implementation
382
- # details of (what we consider to be) a reasonable server implementation, but
383
- # we consider this to be a worthwhile tradeoff.
384
- # When a `Digest` is used to refer to a proto message, it always refers to the
385
- # message in binary encoded form. To ensure consistent hashing, clients and
386
- # servers MUST ensure that they serialize messages according to the following
387
- # rules, even if there are alternate valid encodings for the same message:
388
- # * Fields are serialized in tag order.
389
- # * There are no unknown fields.
390
- # * There are no duplicate fields.
391
- # * Fields are serialized according to the default semantics for their type.
392
- # Most protocol buffer implementations will always follow these rules when
393
- # serializing, but care should be taken to avoid shortcuts. For instance,
394
- # concatenating two messages to merge them may produce duplicate fields.
305
+ # and its hash. The hash algorithm to use is defined by the server. The size is
306
+ # considered to be an integral part of the digest and cannot be separated. That
307
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
308
+ # the server MUST reject the request. The reason for including the size in the
309
+ # digest is as follows: in a great many cases, the server needs to know the size
310
+ # of the blob it is about to work with prior to starting an operation with it,
311
+ # such as flattening Merkle tree structures or streaming it to a worker.
312
+ # Technically, the server could implement a separate metadata store, but this
313
+ # results in a significantly more complicated implementation as opposed to
314
+ # having the client specify the size up-front (or storing the size along with
315
+ # the digest in every message where digests are embedded). This does mean that
316
+ # the API leaks some implementation details of (what we consider to be) a
317
+ # reasonable server implementation, but we consider this to be a worthwhile
318
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
319
+ # refers to the message in binary encoded form. To ensure consistent hashing,
320
+ # clients and servers MUST ensure that they serialize messages according to the
321
+ # following rules, even if there are alternate valid encodings for the same
322
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
323
+ # There are no duplicate fields. * Fields are serialized according to the
324
+ # default semantics for their type. Most protocol buffer implementations will
325
+ # always follow these rules when serializing, but care should be taken to avoid
326
+ # shortcuts. For instance, concatenating two messages to merge them may produce
327
+ # duplicate fields.
395
328
  # Corresponds to the JSON property `stdoutDigest`
396
329
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Digest]
397
330
  attr_accessor :stdout_digest
398
331
 
399
- # The standard output buffer of the action. The server SHOULD NOT inline
400
- # stdout unless requested by the client in the
401
- # GetActionResultRequest
402
- # message. The server MAY omit inlining, even if requested, and MUST do so if
403
- # inlining
404
- # would cause the response to exceed message size limits.
332
+ # The standard output buffer of the action. The server SHOULD NOT inline stdout
333
+ # unless requested by the client in the GetActionResultRequest message. The
334
+ # server MAY omit inlining, even if requested, and MUST do so if inlining would
335
+ # cause the response to exceed message size limits.
405
336
  # Corresponds to the JSON property `stdoutRaw`
406
337
  # NOTE: Values are automatically base64 encoded/decoded in the client library.
407
338
  # @return [String]
@@ -427,8 +358,7 @@ module Google
427
358
  end
428
359
  end
429
360
 
430
- # A request message for
431
- # ContentAddressableStorage.BatchReadBlobs.
361
+ # A request message for ContentAddressableStorage.BatchReadBlobs.
432
362
  class BuildBazelRemoteExecutionV2BatchReadBlobsRequest
433
363
  include Google::Apis::Core::Hashable
434
364
 
@@ -447,8 +377,7 @@ module Google
447
377
  end
448
378
  end
449
379
 
450
- # A response message for
451
- # ContentAddressableStorage.BatchReadBlobs.
380
+ # A response message for ContentAddressableStorage.BatchReadBlobs.
452
381
  class BuildBazelRemoteExecutionV2BatchReadBlobsResponse
453
382
  include Google::Apis::Core::Hashable
454
383
 
@@ -478,41 +407,39 @@ module Google
478
407
  attr_accessor :data
479
408
 
480
409
  # A content digest. A digest for a given blob consists of the size of the blob
481
- # and its hash. The hash algorithm to use is defined by the server.
482
- # The size is considered to be an integral part of the digest and cannot be
483
- # separated. That is, even if the `hash` field is correctly specified but
484
- # `size_bytes` is not, the server MUST reject the request.
485
- # The reason for including the size in the digest is as follows: in a great
486
- # many cases, the server needs to know the size of the blob it is about to work
487
- # with prior to starting an operation with it, such as flattening Merkle tree
488
- # structures or streaming it to a worker. Technically, the server could
489
- # implement a separate metadata store, but this results in a significantly more
490
- # complicated implementation as opposed to having the client specify the size
491
- # up-front (or storing the size along with the digest in every message where
492
- # digests are embedded). This does mean that the API leaks some implementation
493
- # details of (what we consider to be) a reasonable server implementation, but
494
- # we consider this to be a worthwhile tradeoff.
495
- # When a `Digest` is used to refer to a proto message, it always refers to the
496
- # message in binary encoded form. To ensure consistent hashing, clients and
497
- # servers MUST ensure that they serialize messages according to the following
498
- # rules, even if there are alternate valid encodings for the same message:
499
- # * Fields are serialized in tag order.
500
- # * There are no unknown fields.
501
- # * There are no duplicate fields.
502
- # * Fields are serialized according to the default semantics for their type.
503
- # Most protocol buffer implementations will always follow these rules when
504
- # serializing, but care should be taken to avoid shortcuts. For instance,
505
- # concatenating two messages to merge them may produce duplicate fields.
410
+ # and its hash. The hash algorithm to use is defined by the server. The size is
411
+ # considered to be an integral part of the digest and cannot be separated. That
412
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
413
+ # the server MUST reject the request. The reason for including the size in the
414
+ # digest is as follows: in a great many cases, the server needs to know the size
415
+ # of the blob it is about to work with prior to starting an operation with it,
416
+ # such as flattening Merkle tree structures or streaming it to a worker.
417
+ # Technically, the server could implement a separate metadata store, but this
418
+ # results in a significantly more complicated implementation as opposed to
419
+ # having the client specify the size up-front (or storing the size along with
420
+ # the digest in every message where digests are embedded). This does mean that
421
+ # the API leaks some implementation details of (what we consider to be) a
422
+ # reasonable server implementation, but we consider this to be a worthwhile
423
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
424
+ # refers to the message in binary encoded form. To ensure consistent hashing,
425
+ # clients and servers MUST ensure that they serialize messages according to the
426
+ # following rules, even if there are alternate valid encodings for the same
427
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
428
+ # There are no duplicate fields. * Fields are serialized according to the
429
+ # default semantics for their type. Most protocol buffer implementations will
430
+ # always follow these rules when serializing, but care should be taken to avoid
431
+ # shortcuts. For instance, concatenating two messages to merge them may produce
432
+ # duplicate fields.
506
433
  # Corresponds to the JSON property `digest`
507
434
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Digest]
508
435
  attr_accessor :digest
509
436
 
510
- # The `Status` type defines a logical error model that is suitable for
511
- # different programming environments, including REST APIs and RPC APIs. It is
512
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
513
- # three pieces of data: error code, error message, and error details.
514
- # You can find out more about this error model and how to work with it in the
515
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
437
+ # The `Status` type defines a logical error model that is suitable for different
438
+ # programming environments, including REST APIs and RPC APIs. It is used by [
439
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
440
+ # data: error code, error message, and error details. You can find out more
441
+ # about this error model and how to work with it in the [API Design Guide](https:
442
+ # //cloud.google.com/apis/design/errors).
516
443
  # Corresponds to the JSON property `status`
517
444
  # @return [Google::Apis::RemotebuildexecutionV2::GoogleRpcStatus]
518
445
  attr_accessor :status
@@ -529,8 +456,7 @@ module Google
529
456
  end
530
457
  end
531
458
 
532
- # A request message for
533
- # ContentAddressableStorage.BatchUpdateBlobs.
459
+ # A request message for ContentAddressableStorage.BatchUpdateBlobs.
534
460
  class BuildBazelRemoteExecutionV2BatchUpdateBlobsRequest
535
461
  include Google::Apis::Core::Hashable
536
462
 
@@ -560,31 +486,29 @@ module Google
560
486
  attr_accessor :data
561
487
 
562
488
  # A content digest. A digest for a given blob consists of the size of the blob
563
- # and its hash. The hash algorithm to use is defined by the server.
564
- # The size is considered to be an integral part of the digest and cannot be
565
- # separated. That is, even if the `hash` field is correctly specified but
566
- # `size_bytes` is not, the server MUST reject the request.
567
- # The reason for including the size in the digest is as follows: in a great
568
- # many cases, the server needs to know the size of the blob it is about to work
569
- # with prior to starting an operation with it, such as flattening Merkle tree
570
- # structures or streaming it to a worker. Technically, the server could
571
- # implement a separate metadata store, but this results in a significantly more
572
- # complicated implementation as opposed to having the client specify the size
573
- # up-front (or storing the size along with the digest in every message where
574
- # digests are embedded). This does mean that the API leaks some implementation
575
- # details of (what we consider to be) a reasonable server implementation, but
576
- # we consider this to be a worthwhile tradeoff.
577
- # When a `Digest` is used to refer to a proto message, it always refers to the
578
- # message in binary encoded form. To ensure consistent hashing, clients and
579
- # servers MUST ensure that they serialize messages according to the following
580
- # rules, even if there are alternate valid encodings for the same message:
581
- # * Fields are serialized in tag order.
582
- # * There are no unknown fields.
583
- # * There are no duplicate fields.
584
- # * Fields are serialized according to the default semantics for their type.
585
- # Most protocol buffer implementations will always follow these rules when
586
- # serializing, but care should be taken to avoid shortcuts. For instance,
587
- # concatenating two messages to merge them may produce duplicate fields.
489
+ # and its hash. The hash algorithm to use is defined by the server. The size is
490
+ # considered to be an integral part of the digest and cannot be separated. That
491
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
492
+ # the server MUST reject the request. The reason for including the size in the
493
+ # digest is as follows: in a great many cases, the server needs to know the size
494
+ # of the blob it is about to work with prior to starting an operation with it,
495
+ # such as flattening Merkle tree structures or streaming it to a worker.
496
+ # Technically, the server could implement a separate metadata store, but this
497
+ # results in a significantly more complicated implementation as opposed to
498
+ # having the client specify the size up-front (or storing the size along with
499
+ # the digest in every message where digests are embedded). This does mean that
500
+ # the API leaks some implementation details of (what we consider to be) a
501
+ # reasonable server implementation, but we consider this to be a worthwhile
502
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
503
+ # refers to the message in binary encoded form. To ensure consistent hashing,
504
+ # clients and servers MUST ensure that they serialize messages according to the
505
+ # following rules, even if there are alternate valid encodings for the same
506
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
507
+ # There are no duplicate fields. * Fields are serialized according to the
508
+ # default semantics for their type. Most protocol buffer implementations will
509
+ # always follow these rules when serializing, but care should be taken to avoid
510
+ # shortcuts. For instance, concatenating two messages to merge them may produce
511
+ # duplicate fields.
588
512
  # Corresponds to the JSON property `digest`
589
513
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Digest]
590
514
  attr_accessor :digest
@@ -600,8 +524,7 @@ module Google
600
524
  end
601
525
  end
602
526
 
603
- # A response message for
604
- # ContentAddressableStorage.BatchUpdateBlobs.
527
+ # A response message for ContentAddressableStorage.BatchUpdateBlobs.
605
528
  class BuildBazelRemoteExecutionV2BatchUpdateBlobsResponse
606
529
  include Google::Apis::Core::Hashable
607
530
 
@@ -625,41 +548,39 @@ module Google
625
548
  include Google::Apis::Core::Hashable
626
549
 
627
550
  # A content digest. A digest for a given blob consists of the size of the blob
628
- # and its hash. The hash algorithm to use is defined by the server.
629
- # The size is considered to be an integral part of the digest and cannot be
630
- # separated. That is, even if the `hash` field is correctly specified but
631
- # `size_bytes` is not, the server MUST reject the request.
632
- # The reason for including the size in the digest is as follows: in a great
633
- # many cases, the server needs to know the size of the blob it is about to work
634
- # with prior to starting an operation with it, such as flattening Merkle tree
635
- # structures or streaming it to a worker. Technically, the server could
636
- # implement a separate metadata store, but this results in a significantly more
637
- # complicated implementation as opposed to having the client specify the size
638
- # up-front (or storing the size along with the digest in every message where
639
- # digests are embedded). This does mean that the API leaks some implementation
640
- # details of (what we consider to be) a reasonable server implementation, but
641
- # we consider this to be a worthwhile tradeoff.
642
- # When a `Digest` is used to refer to a proto message, it always refers to the
643
- # message in binary encoded form. To ensure consistent hashing, clients and
644
- # servers MUST ensure that they serialize messages according to the following
645
- # rules, even if there are alternate valid encodings for the same message:
646
- # * Fields are serialized in tag order.
647
- # * There are no unknown fields.
648
- # * There are no duplicate fields.
649
- # * Fields are serialized according to the default semantics for their type.
650
- # Most protocol buffer implementations will always follow these rules when
651
- # serializing, but care should be taken to avoid shortcuts. For instance,
652
- # concatenating two messages to merge them may produce duplicate fields.
551
+ # and its hash. The hash algorithm to use is defined by the server. The size is
552
+ # considered to be an integral part of the digest and cannot be separated. That
553
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
554
+ # the server MUST reject the request. The reason for including the size in the
555
+ # digest is as follows: in a great many cases, the server needs to know the size
556
+ # of the blob it is about to work with prior to starting an operation with it,
557
+ # such as flattening Merkle tree structures or streaming it to a worker.
558
+ # Technically, the server could implement a separate metadata store, but this
559
+ # results in a significantly more complicated implementation as opposed to
560
+ # having the client specify the size up-front (or storing the size along with
561
+ # the digest in every message where digests are embedded). This does mean that
562
+ # the API leaks some implementation details of (what we consider to be) a
563
+ # reasonable server implementation, but we consider this to be a worthwhile
564
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
565
+ # refers to the message in binary encoded form. To ensure consistent hashing,
566
+ # clients and servers MUST ensure that they serialize messages according to the
567
+ # following rules, even if there are alternate valid encodings for the same
568
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
569
+ # There are no duplicate fields. * Fields are serialized according to the
570
+ # default semantics for their type. Most protocol buffer implementations will
571
+ # always follow these rules when serializing, but care should be taken to avoid
572
+ # shortcuts. For instance, concatenating two messages to merge them may produce
573
+ # duplicate fields.
653
574
  # Corresponds to the JSON property `digest`
654
575
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Digest]
655
576
  attr_accessor :digest
656
577
 
657
- # The `Status` type defines a logical error model that is suitable for
658
- # different programming environments, including REST APIs and RPC APIs. It is
659
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
660
- # three pieces of data: error code, error message, and error details.
661
- # You can find out more about this error model and how to work with it in the
662
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
578
+ # The `Status` type defines a logical error model that is suitable for different
579
+ # programming environments, including REST APIs and RPC APIs. It is used by [
580
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
581
+ # data: error code, error message, and error details. You can find out more
582
+ # about this error model and how to work with it in the [API Design Guide](https:
583
+ # //cloud.google.com/apis/design/errors).
663
584
  # Corresponds to the JSON property `status`
664
585
  # @return [Google::Apis::RemotebuildexecutionV2::GoogleRpcStatus]
665
586
  attr_accessor :status
@@ -684,23 +605,21 @@ module Google
684
605
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2ActionCacheUpdateCapabilities]
685
606
  attr_accessor :action_cache_update_capabilities
686
607
 
687
- # Allowed values for priority in
688
- # ResultsCachePolicy
689
- # Used for querying both cache and execution valid priority ranges.
608
+ # Allowed values for priority in ResultsCachePolicy Used for querying both cache
609
+ # and execution valid priority ranges.
690
610
  # Corresponds to the JSON property `cachePriorityCapabilities`
691
611
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2PriorityCapabilities]
692
612
  attr_accessor :cache_priority_capabilities
693
613
 
694
- # All the digest functions supported by the remote cache.
695
- # Remote cache may support multiple digest functions simultaneously.
614
+ # All the digest functions supported by the remote cache. Remote cache may
615
+ # support multiple digest functions simultaneously.
696
616
  # Corresponds to the JSON property `digestFunction`
697
617
  # @return [Array<String>]
698
618
  attr_accessor :digest_function
699
619
 
700
- # Maximum total size of blobs to be uploaded/downloaded using
701
- # batch methods. A value of 0 means no limit is set, although
702
- # in practice there will always be a message size limitation
703
- # of the protocol in use, e.g. GRPC.
620
+ # Maximum total size of blobs to be uploaded/downloaded using batch methods. A
621
+ # value of 0 means no limit is set, although in practice there will always be a
622
+ # message size limitation of the protocol in use, e.g. GRPC.
704
623
  # Corresponds to the JSON property `maxBatchTotalSizeBytes`
705
624
  # @return [Fixnum]
706
625
  attr_accessor :max_batch_total_size_bytes
@@ -724,12 +643,11 @@ module Google
724
643
  end
725
644
  end
726
645
 
727
- # A `Command` is the actual command executed by a worker running an
728
- # Action and specifications of its
729
- # environment.
730
- # Except as otherwise required, the environment (such as which system
731
- # libraries or binaries are available, and what filesystems are mounted where)
732
- # is defined by and specific to the implementation of the remote execution API.
646
+ # A `Command` is the actual command executed by a worker running an Action and
647
+ # specifications of its environment. Except as otherwise required, the
648
+ # environment (such as which system libraries or binaries are available, and
649
+ # what filesystems are mounted where) is defined by and specific to the
650
+ # implementation of the remote execution API.
733
651
  class BuildBazelRemoteExecutionV2Command
734
652
  include Google::Apis::Core::Hashable
735
653
 
@@ -742,105 +660,90 @@ module Google
742
660
 
743
661
  # The environment variables to set when running the program. The worker may
744
662
  # provide its own default environment variables; these defaults can be
745
- # overridden using this field. Additional variables can also be specified.
746
- # In order to ensure that equivalent
747
- # Commands always hash to the same
748
- # value, the environment variables MUST be lexicographically sorted by name.
749
- # Sorting of strings is done by code point, equivalently, by the UTF-8 bytes.
663
+ # overridden using this field. Additional variables can also be specified. In
664
+ # order to ensure that equivalent Commands always hash to the same value, the
665
+ # environment variables MUST be lexicographically sorted by name. Sorting of
666
+ # strings is done by code point, equivalently, by the UTF-8 bytes.
750
667
  # Corresponds to the JSON property `environmentVariables`
751
668
  # @return [Array<Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2CommandEnvironmentVariable>]
752
669
  attr_accessor :environment_variables
753
670
 
754
- # A list of the output directories that the client expects to retrieve from
755
- # the action. Only the listed directories will be returned (an entire
756
- # directory structure will be returned as a
757
- # Tree message digest, see
758
- # OutputDirectory), as
759
- # well as files listed in `output_files`. Other files or directories that
760
- # may be created during command execution are discarded.
761
- # The paths are relative to the working directory of the action execution.
762
- # The paths are specified using a single forward slash (`/`) as a path
763
- # separator, even if the execution platform natively uses a different
764
- # separator. The path MUST NOT include a trailing slash, nor a leading slash,
765
- # being a relative path. The special value of empty string is allowed,
766
- # although not recommended, and can be used to capture the entire working
767
- # directory tree, including inputs.
768
- # In order to ensure consistent hashing of the same Action, the output paths
769
- # MUST be sorted lexicographically by code point (or, equivalently, by UTF-8
770
- # bytes).
771
- # An output directory cannot be duplicated or have the same path as any of
772
- # the listed output files. An output directory is allowed to be a parent of
773
- # another output directory.
671
+ # A list of the output directories that the client expects to retrieve from the
672
+ # action. Only the listed directories will be returned (an entire directory
673
+ # structure will be returned as a Tree message digest, see OutputDirectory), as
674
+ # well as files listed in `output_files`. Other files or directories that may be
675
+ # created during command execution are discarded. The paths are relative to the
676
+ # working directory of the action execution. The paths are specified using a
677
+ # single forward slash (`/`) as a path separator, even if the execution platform
678
+ # natively uses a different separator. The path MUST NOT include a trailing
679
+ # slash, nor a leading slash, being a relative path. The special value of empty
680
+ # string is allowed, although not recommended, and can be used to capture the
681
+ # entire working directory tree, including inputs. In order to ensure consistent
682
+ # hashing of the same Action, the output paths MUST be sorted lexicographically
683
+ # by code point (or, equivalently, by UTF-8 bytes). An output directory cannot
684
+ # be duplicated or have the same path as any of the listed output files. An
685
+ # output directory is allowed to be a parent of another output directory.
774
686
  # Directories leading up to the output directories (but not the output
775
- # directories themselves) are created by the worker prior to execution, even
776
- # if they are not explicitly part of the input root.
777
- # DEPRECATED since 2.1: Use `output_paths` instead.
687
+ # directories themselves) are created by the worker prior to execution, even if
688
+ # they are not explicitly part of the input root. DEPRECATED since 2.1: Use `
689
+ # output_paths` instead.
778
690
  # Corresponds to the JSON property `outputDirectories`
779
691
  # @return [Array<String>]
780
692
  attr_accessor :output_directories
781
693
 
782
- # A list of the output files that the client expects to retrieve from the
783
- # action. Only the listed files, as well as directories listed in
784
- # `output_directories`, will be returned to the client as output.
785
- # Other files or directories that may be created during command execution
786
- # are discarded.
787
- # The paths are relative to the working directory of the action execution.
788
- # The paths are specified using a single forward slash (`/`) as a path
789
- # separator, even if the execution platform natively uses a different
790
- # separator. The path MUST NOT include a trailing slash, nor a leading slash,
791
- # being a relative path.
792
- # In order to ensure consistent hashing of the same Action, the output paths
793
- # MUST be sorted lexicographically by code point (or, equivalently, by UTF-8
794
- # bytes).
795
- # An output file cannot be duplicated, be a parent of another output file, or
796
- # have the same path as any of the listed output directories.
797
- # Directories leading up to the output files are created by the worker prior
798
- # to execution, even if they are not explicitly part of the input root.
799
- # DEPRECATED since v2.1: Use `output_paths` instead.
694
+ # A list of the output files that the client expects to retrieve from the action.
695
+ # Only the listed files, as well as directories listed in `output_directories`,
696
+ # will be returned to the client as output. Other files or directories that may
697
+ # be created during command execution are discarded. The paths are relative to
698
+ # the working directory of the action execution. The paths are specified using a
699
+ # single forward slash (`/`) as a path separator, even if the execution platform
700
+ # natively uses a different separator. The path MUST NOT include a trailing
701
+ # slash, nor a leading slash, being a relative path. In order to ensure
702
+ # consistent hashing of the same Action, the output paths MUST be sorted
703
+ # lexicographically by code point (or, equivalently, by UTF-8 bytes). An output
704
+ # file cannot be duplicated, be a parent of another output file, or have the
705
+ # same path as any of the listed output directories. Directories leading up to
706
+ # the output files are created by the worker prior to execution, even if they
707
+ # are not explicitly part of the input root. DEPRECATED since v2.1: Use `
708
+ # output_paths` instead.
800
709
  # Corresponds to the JSON property `outputFiles`
801
710
  # @return [Array<String>]
802
711
  attr_accessor :output_files
803
712
 
804
- # A list of the output paths that the client expects to retrieve from the
805
- # action. Only the listed paths will be returned to the client as output.
806
- # The type of the output (file or directory) is not specified, and will be
807
- # determined by the server after action execution. If the resulting path is
808
- # a file, it will be returned in an
809
- # OutputFile) typed field.
810
- # If the path is a directory, the entire directory structure will be returned
811
- # as a Tree message digest, see
812
- # OutputDirectory)
813
- # Other files or directories that may be created during command execution
814
- # are discarded.
815
- # The paths are relative to the working directory of the action execution.
816
- # The paths are specified using a single forward slash (`/`) as a path
817
- # separator, even if the execution platform natively uses a different
818
- # separator. The path MUST NOT include a trailing slash, nor a leading slash,
819
- # being a relative path.
820
- # In order to ensure consistent hashing of the same Action, the output paths
821
- # MUST be deduplicated and sorted lexicographically by code point (or,
822
- # equivalently, by UTF-8 bytes).
823
- # Directories leading up to the output paths are created by the worker prior
824
- # to execution, even if they are not explicitly part of the input root.
825
- # New in v2.1: this field supersedes the DEPRECATED `output_files` and
826
- # `output_directories` fields. If `output_paths` is used, `output_files` and
827
- # `output_directories` will be ignored!
713
+ # A list of the output paths that the client expects to retrieve from the action.
714
+ # Only the listed paths will be returned to the client as output. The type of
715
+ # the output (file or directory) is not specified, and will be determined by the
716
+ # server after action execution. If the resulting path is a file, it will be
717
+ # returned in an OutputFile) typed field. If the path is a directory, the entire
718
+ # directory structure will be returned as a Tree message digest, see
719
+ # OutputDirectory) Other files or directories that may be created during command
720
+ # execution are discarded. The paths are relative to the working directory of
721
+ # the action execution. The paths are specified using a single forward slash (`/`
722
+ # ) as a path separator, even if the execution platform natively uses a
723
+ # different separator. The path MUST NOT include a trailing slash, nor a leading
724
+ # slash, being a relative path. In order to ensure consistent hashing of the
725
+ # same Action, the output paths MUST be deduplicated and sorted
726
+ # lexicographically by code point (or, equivalently, by UTF-8 bytes).
727
+ # Directories leading up to the output paths are created by the worker prior to
728
+ # execution, even if they are not explicitly part of the input root. New in v2.1:
729
+ # this field supersedes the DEPRECATED `output_files` and `output_directories`
730
+ # fields. If `output_paths` is used, `output_files` and `output_directories`
731
+ # will be ignored!
828
732
  # Corresponds to the JSON property `outputPaths`
829
733
  # @return [Array<String>]
830
734
  attr_accessor :output_paths
831
735
 
832
736
  # A `Platform` is a set of requirements, such as hardware, operating system, or
833
- # compiler toolchain, for an
834
- # Action's execution
835
- # environment. A `Platform` is represented as a series of key-value pairs
836
- # representing the properties that are required of the platform.
737
+ # compiler toolchain, for an Action's execution environment. A `Platform` is
738
+ # represented as a series of key-value pairs representing the properties that
739
+ # are required of the platform.
837
740
  # Corresponds to the JSON property `platform`
838
741
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Platform]
839
742
  attr_accessor :platform
840
743
 
841
- # The working directory, relative to the input root, for the command to run
842
- # in. It must be a directory which exists in the input tree. If it is left
843
- # empty, then the action is run in the input root.
744
+ # The working directory, relative to the input root, for the command to run in.
745
+ # It must be a directory which exists in the input tree. If it is left empty,
746
+ # then the action is run in the input root.
844
747
  # Corresponds to the JSON property `workingDirectory`
845
748
  # @return [String]
846
749
  attr_accessor :working_directory
@@ -888,31 +791,29 @@ module Google
888
791
  end
889
792
 
890
793
  # A content digest. A digest for a given blob consists of the size of the blob
891
- # and its hash. The hash algorithm to use is defined by the server.
892
- # The size is considered to be an integral part of the digest and cannot be
893
- # separated. That is, even if the `hash` field is correctly specified but
894
- # `size_bytes` is not, the server MUST reject the request.
895
- # The reason for including the size in the digest is as follows: in a great
896
- # many cases, the server needs to know the size of the blob it is about to work
897
- # with prior to starting an operation with it, such as flattening Merkle tree
898
- # structures or streaming it to a worker. Technically, the server could
899
- # implement a separate metadata store, but this results in a significantly more
900
- # complicated implementation as opposed to having the client specify the size
901
- # up-front (or storing the size along with the digest in every message where
902
- # digests are embedded). This does mean that the API leaks some implementation
903
- # details of (what we consider to be) a reasonable server implementation, but
904
- # we consider this to be a worthwhile tradeoff.
905
- # When a `Digest` is used to refer to a proto message, it always refers to the
906
- # message in binary encoded form. To ensure consistent hashing, clients and
907
- # servers MUST ensure that they serialize messages according to the following
908
- # rules, even if there are alternate valid encodings for the same message:
909
- # * Fields are serialized in tag order.
910
- # * There are no unknown fields.
911
- # * There are no duplicate fields.
912
- # * Fields are serialized according to the default semantics for their type.
913
- # Most protocol buffer implementations will always follow these rules when
914
- # serializing, but care should be taken to avoid shortcuts. For instance,
915
- # concatenating two messages to merge them may produce duplicate fields.
794
+ # and its hash. The hash algorithm to use is defined by the server. The size is
795
+ # considered to be an integral part of the digest and cannot be separated. That
796
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
797
+ # the server MUST reject the request. The reason for including the size in the
798
+ # digest is as follows: in a great many cases, the server needs to know the size
799
+ # of the blob it is about to work with prior to starting an operation with it,
800
+ # such as flattening Merkle tree structures or streaming it to a worker.
801
+ # Technically, the server could implement a separate metadata store, but this
802
+ # results in a significantly more complicated implementation as opposed to
803
+ # having the client specify the size up-front (or storing the size along with
804
+ # the digest in every message where digests are embedded). This does mean that
805
+ # the API leaks some implementation details of (what we consider to be) a
806
+ # reasonable server implementation, but we consider this to be a worthwhile
807
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
808
+ # refers to the message in binary encoded form. To ensure consistent hashing,
809
+ # clients and servers MUST ensure that they serialize messages according to the
810
+ # following rules, even if there are alternate valid encodings for the same
811
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
812
+ # There are no duplicate fields. * Fields are serialized according to the
813
+ # default semantics for their type. Most protocol buffer implementations will
814
+ # always follow these rules when serializing, but care should be taken to avoid
815
+ # shortcuts. For instance, concatenating two messages to merge them may produce
816
+ # duplicate fields.
916
817
  class BuildBazelRemoteExecutionV2Digest
917
818
  include Google::Apis::Core::Hashable
918
819
 
@@ -939,75 +840,31 @@ module Google
939
840
  end
940
841
 
941
842
  # A `Directory` represents a directory node in a file tree, containing zero or
942
- # more children FileNodes,
943
- # DirectoryNodes and
944
- # SymlinkNodes.
945
- # Each `Node` contains its name in the directory, either the digest of its
946
- # content (either a file blob or a `Directory` proto) or a symlink target, as
947
- # well as possibly some metadata about the file or directory.
948
- # In order to ensure that two equivalent directory trees hash to the same
949
- # value, the following restrictions MUST be obeyed when constructing a
950
- # a `Directory`:
951
- # * Every child in the directory must have a path of exactly one segment.
952
- # Multiple levels of directory hierarchy may not be collapsed.
953
- # * Each child in the directory must have a unique path segment (file name).
954
- # Note that while the API itself is case-sensitive, the environment where
955
- # the Action is executed may or may not be case-sensitive. That is, it is
956
- # legal to call the API with a Directory that has both "Foo" and "foo" as
957
- # children, but the Action may be rejected by the remote system upon
958
- # execution.
959
- # * The files, directories and symlinks in the directory must each be sorted
960
- # in lexicographical order by path. The path strings must be sorted by code
961
- # point, equivalently, by UTF-8 bytes.
962
- # * The NodeProperties of files,
963
- # directories, and symlinks must be sorted in lexicographical order by
964
- # property name.
965
- # A `Directory` that obeys the restrictions is said to be in canonical form.
966
- # As an example, the following could be used for a file named `bar` and a
843
+ # more children FileNodes, DirectoryNodes and SymlinkNodes. Each `Node` contains
844
+ # its name in the directory, either the digest of its content (either a file
845
+ # blob or a `Directory` proto) or a symlink target, as well as possibly some
846
+ # metadata about the file or directory. In order to ensure that two equivalent
847
+ # directory trees hash to the same value, the following restrictions MUST be
848
+ # obeyed when constructing a a `Directory`: * Every child in the directory must
849
+ # have a path of exactly one segment. Multiple levels of directory hierarchy may
850
+ # not be collapsed. * Each child in the directory must have a unique path
851
+ # segment (file name). Note that while the API itself is case-sensitive, the
852
+ # environment where the Action is executed may or may not be case-sensitive.
853
+ # That is, it is legal to call the API with a Directory that has both "Foo" and "
854
+ # foo" as children, but the Action may be rejected by the remote system upon
855
+ # execution. * The files, directories and symlinks in the directory must each be
856
+ # sorted in lexicographical order by path. The path strings must be sorted by
857
+ # code point, equivalently, by UTF-8 bytes. * The NodeProperties of files,
858
+ # directories, and symlinks must be sorted in lexicographical order by property
859
+ # name. A `Directory` that obeys the restrictions is said to be in canonical
860
+ # form. As an example, the following could be used for a file named `bar` and a
967
861
  # directory named `foo` with an executable file named `baz` (hashes shortened
968
- # for readability):
969
- # ```json
970
- # // (Directory proto)
971
- # `
972
- # files: [
973
- # `
974
- # name: "bar",
975
- # digest: `
976
- # hash: "4a73bc9d03...",
977
- # size: 65534
978
- # `,
979
- # node_properties: [
980
- # `
981
- # "name": "MTime",
982
- # "value": "2017-01-15T01:30:15.01Z"
983
- # `
984
- # ]
985
- # `
986
- # ],
987
- # directories: [
988
- # `
989
- # name: "foo",
990
- # digest: `
991
- # hash: "4cf2eda940...",
992
- # size: 43
993
- # `
994
- # `
995
- # ]
996
- # `
997
- # // (Directory proto with hash "4cf2eda940..." and size 43)
998
- # `
999
- # files: [
1000
- # `
1001
- # name: "baz",
1002
- # digest: `
1003
- # hash: "b2c941073e...",
1004
- # size: 1294,
1005
- # `,
1006
- # is_executable: true
1007
- # `
1008
- # ]
1009
- # `
1010
- # ```
862
+ # for readability): ```json // (Directory proto) ` files: [ ` name: "bar",
863
+ # digest: ` hash: "4a73bc9d03...", size: 65534 `, node_properties: [ ` "name": "
864
+ # MTime", "value": "2017-01-15T01:30:15.01Z" ` ] ` ], directories: [ ` name: "
865
+ # foo", digest: ` hash: "4cf2eda940...", size: 43 ` ` ] ` // (Directory proto
866
+ # with hash "4cf2eda940..." and size 43) ` files: [ ` name: "baz", digest: `
867
+ # hash: "b2c941073e...", size: 1294, `, is_executable: true ` ] ` ```
1011
868
  class BuildBazelRemoteExecutionV2Directory
1012
869
  include Google::Apis::Core::Hashable
1013
870
 
@@ -1044,38 +901,35 @@ module Google
1044
901
  end
1045
902
  end
1046
903
 
1047
- # A `DirectoryNode` represents a child of a
1048
- # Directory which is itself
1049
- # a `Directory` and its associated metadata.
904
+ # A `DirectoryNode` represents a child of a Directory which is itself a `
905
+ # Directory` and its associated metadata.
1050
906
  class BuildBazelRemoteExecutionV2DirectoryNode
1051
907
  include Google::Apis::Core::Hashable
1052
908
 
1053
909
  # A content digest. A digest for a given blob consists of the size of the blob
1054
- # and its hash. The hash algorithm to use is defined by the server.
1055
- # The size is considered to be an integral part of the digest and cannot be
1056
- # separated. That is, even if the `hash` field is correctly specified but
1057
- # `size_bytes` is not, the server MUST reject the request.
1058
- # The reason for including the size in the digest is as follows: in a great
1059
- # many cases, the server needs to know the size of the blob it is about to work
1060
- # with prior to starting an operation with it, such as flattening Merkle tree
1061
- # structures or streaming it to a worker. Technically, the server could
1062
- # implement a separate metadata store, but this results in a significantly more
1063
- # complicated implementation as opposed to having the client specify the size
1064
- # up-front (or storing the size along with the digest in every message where
1065
- # digests are embedded). This does mean that the API leaks some implementation
1066
- # details of (what we consider to be) a reasonable server implementation, but
1067
- # we consider this to be a worthwhile tradeoff.
1068
- # When a `Digest` is used to refer to a proto message, it always refers to the
1069
- # message in binary encoded form. To ensure consistent hashing, clients and
1070
- # servers MUST ensure that they serialize messages according to the following
1071
- # rules, even if there are alternate valid encodings for the same message:
1072
- # * Fields are serialized in tag order.
1073
- # * There are no unknown fields.
1074
- # * There are no duplicate fields.
1075
- # * Fields are serialized according to the default semantics for their type.
1076
- # Most protocol buffer implementations will always follow these rules when
1077
- # serializing, but care should be taken to avoid shortcuts. For instance,
1078
- # concatenating two messages to merge them may produce duplicate fields.
910
+ # and its hash. The hash algorithm to use is defined by the server. The size is
911
+ # considered to be an integral part of the digest and cannot be separated. That
912
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
913
+ # the server MUST reject the request. The reason for including the size in the
914
+ # digest is as follows: in a great many cases, the server needs to know the size
915
+ # of the blob it is about to work with prior to starting an operation with it,
916
+ # such as flattening Merkle tree structures or streaming it to a worker.
917
+ # Technically, the server could implement a separate metadata store, but this
918
+ # results in a significantly more complicated implementation as opposed to
919
+ # having the client specify the size up-front (or storing the size along with
920
+ # the digest in every message where digests are embedded). This does mean that
921
+ # the API leaks some implementation details of (what we consider to be) a
922
+ # reasonable server implementation, but we consider this to be a worthwhile
923
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
924
+ # refers to the message in binary encoded form. To ensure consistent hashing,
925
+ # clients and servers MUST ensure that they serialize messages according to the
926
+ # following rules, even if there are alternate valid encodings for the same
927
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
928
+ # There are no duplicate fields. * Fields are serialized according to the
929
+ # default semantics for their type. Most protocol buffer implementations will
930
+ # always follow these rules when serializing, but care should be taken to avoid
931
+ # shortcuts. For instance, concatenating two messages to merge them may produce
932
+ # duplicate fields.
1079
933
  # Corresponds to the JSON property `digest`
1080
934
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Digest]
1081
935
  attr_accessor :digest
@@ -1096,40 +950,35 @@ module Google
1096
950
  end
1097
951
  end
1098
952
 
1099
- # Metadata about an ongoing
1100
- # execution, which
1101
- # will be contained in the metadata
1102
- # field of the
1103
- # Operation.
953
+ # Metadata about an ongoing execution, which will be contained in the metadata
954
+ # field of the Operation.
1104
955
  class BuildBazelRemoteExecutionV2ExecuteOperationMetadata
1105
956
  include Google::Apis::Core::Hashable
1106
957
 
1107
958
  # A content digest. A digest for a given blob consists of the size of the blob
1108
- # and its hash. The hash algorithm to use is defined by the server.
1109
- # The size is considered to be an integral part of the digest and cannot be
1110
- # separated. That is, even if the `hash` field is correctly specified but
1111
- # `size_bytes` is not, the server MUST reject the request.
1112
- # The reason for including the size in the digest is as follows: in a great
1113
- # many cases, the server needs to know the size of the blob it is about to work
1114
- # with prior to starting an operation with it, such as flattening Merkle tree
1115
- # structures or streaming it to a worker. Technically, the server could
1116
- # implement a separate metadata store, but this results in a significantly more
1117
- # complicated implementation as opposed to having the client specify the size
1118
- # up-front (or storing the size along with the digest in every message where
1119
- # digests are embedded). This does mean that the API leaks some implementation
1120
- # details of (what we consider to be) a reasonable server implementation, but
1121
- # we consider this to be a worthwhile tradeoff.
1122
- # When a `Digest` is used to refer to a proto message, it always refers to the
1123
- # message in binary encoded form. To ensure consistent hashing, clients and
1124
- # servers MUST ensure that they serialize messages according to the following
1125
- # rules, even if there are alternate valid encodings for the same message:
1126
- # * Fields are serialized in tag order.
1127
- # * There are no unknown fields.
1128
- # * There are no duplicate fields.
1129
- # * Fields are serialized according to the default semantics for their type.
1130
- # Most protocol buffer implementations will always follow these rules when
1131
- # serializing, but care should be taken to avoid shortcuts. For instance,
1132
- # concatenating two messages to merge them may produce duplicate fields.
959
+ # and its hash. The hash algorithm to use is defined by the server. The size is
960
+ # considered to be an integral part of the digest and cannot be separated. That
961
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
962
+ # the server MUST reject the request. The reason for including the size in the
963
+ # digest is as follows: in a great many cases, the server needs to know the size
964
+ # of the blob it is about to work with prior to starting an operation with it,
965
+ # such as flattening Merkle tree structures or streaming it to a worker.
966
+ # Technically, the server could implement a separate metadata store, but this
967
+ # results in a significantly more complicated implementation as opposed to
968
+ # having the client specify the size up-front (or storing the size along with
969
+ # the digest in every message where digests are embedded). This does mean that
970
+ # the API leaks some implementation details of (what we consider to be) a
971
+ # reasonable server implementation, but we consider this to be a worthwhile
972
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
973
+ # refers to the message in binary encoded form. To ensure consistent hashing,
974
+ # clients and servers MUST ensure that they serialize messages according to the
975
+ # following rules, even if there are alternate valid encodings for the same
976
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
977
+ # There are no duplicate fields. * Fields are serialized according to the
978
+ # default semantics for their type. Most protocol buffer implementations will
979
+ # always follow these rules when serializing, but care should be taken to avoid
980
+ # shortcuts. For instance, concatenating two messages to merge them may produce
981
+ # duplicate fields.
1133
982
  # Corresponds to the JSON property `actionDigest`
1134
983
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Digest]
1135
984
  attr_accessor :action_digest
@@ -1139,15 +988,13 @@ module Google
1139
988
  # @return [String]
1140
989
  attr_accessor :stage
1141
990
 
1142
- # If set, the client can use this name with
1143
- # ByteStream.Read to stream the
991
+ # If set, the client can use this name with ByteStream.Read to stream the
1144
992
  # standard error.
1145
993
  # Corresponds to the JSON property `stderrStreamName`
1146
994
  # @return [String]
1147
995
  attr_accessor :stderr_stream_name
1148
996
 
1149
- # If set, the client can use this name with
1150
- # ByteStream.Read to stream the
997
+ # If set, the client can use this name with ByteStream.Read to stream the
1151
998
  # standard output.
1152
999
  # Corresponds to the JSON property `stdoutStreamName`
1153
1000
  # @return [String]
@@ -1166,37 +1013,34 @@ module Google
1166
1013
  end
1167
1014
  end
1168
1015
 
1169
- # A request message for
1170
- # Execution.Execute.
1016
+ # A request message for Execution.Execute.
1171
1017
  class BuildBazelRemoteExecutionV2ExecuteRequest
1172
1018
  include Google::Apis::Core::Hashable
1173
1019
 
1174
1020
  # A content digest. A digest for a given blob consists of the size of the blob
1175
- # and its hash. The hash algorithm to use is defined by the server.
1176
- # The size is considered to be an integral part of the digest and cannot be
1177
- # separated. That is, even if the `hash` field is correctly specified but
1178
- # `size_bytes` is not, the server MUST reject the request.
1179
- # The reason for including the size in the digest is as follows: in a great
1180
- # many cases, the server needs to know the size of the blob it is about to work
1181
- # with prior to starting an operation with it, such as flattening Merkle tree
1182
- # structures or streaming it to a worker. Technically, the server could
1183
- # implement a separate metadata store, but this results in a significantly more
1184
- # complicated implementation as opposed to having the client specify the size
1185
- # up-front (or storing the size along with the digest in every message where
1186
- # digests are embedded). This does mean that the API leaks some implementation
1187
- # details of (what we consider to be) a reasonable server implementation, but
1188
- # we consider this to be a worthwhile tradeoff.
1189
- # When a `Digest` is used to refer to a proto message, it always refers to the
1190
- # message in binary encoded form. To ensure consistent hashing, clients and
1191
- # servers MUST ensure that they serialize messages according to the following
1192
- # rules, even if there are alternate valid encodings for the same message:
1193
- # * Fields are serialized in tag order.
1194
- # * There are no unknown fields.
1195
- # * There are no duplicate fields.
1196
- # * Fields are serialized according to the default semantics for their type.
1197
- # Most protocol buffer implementations will always follow these rules when
1198
- # serializing, but care should be taken to avoid shortcuts. For instance,
1199
- # concatenating two messages to merge them may produce duplicate fields.
1021
+ # and its hash. The hash algorithm to use is defined by the server. The size is
1022
+ # considered to be an integral part of the digest and cannot be separated. That
1023
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
1024
+ # the server MUST reject the request. The reason for including the size in the
1025
+ # digest is as follows: in a great many cases, the server needs to know the size
1026
+ # of the blob it is about to work with prior to starting an operation with it,
1027
+ # such as flattening Merkle tree structures or streaming it to a worker.
1028
+ # Technically, the server could implement a separate metadata store, but this
1029
+ # results in a significantly more complicated implementation as opposed to
1030
+ # having the client specify the size up-front (or storing the size along with
1031
+ # the digest in every message where digests are embedded). This does mean that
1032
+ # the API leaks some implementation details of (what we consider to be) a
1033
+ # reasonable server implementation, but we consider this to be a worthwhile
1034
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
1035
+ # refers to the message in binary encoded form. To ensure consistent hashing,
1036
+ # clients and servers MUST ensure that they serialize messages according to the
1037
+ # following rules, even if there are alternate valid encodings for the same
1038
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
1039
+ # There are no duplicate fields. * Fields are serialized according to the
1040
+ # default semantics for their type. Most protocol buffer implementations will
1041
+ # always follow these rules when serializing, but care should be taken to avoid
1042
+ # shortcuts. For instance, concatenating two messages to merge them may produce
1043
+ # duplicate fields.
1200
1044
  # Corresponds to the JSON property `actionDigest`
1201
1045
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Digest]
1202
1046
  attr_accessor :action_digest
@@ -1212,19 +1056,17 @@ module Google
1212
1056
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2ResultsCachePolicy]
1213
1057
  attr_accessor :results_cache_policy
1214
1058
 
1215
- # If true, the action will be executed even if its result is already
1216
- # present in the ActionCache.
1217
- # The execution is still allowed to be merged with other in-flight executions
1218
- # of the same action, however - semantically, the service MUST only guarantee
1219
- # that the results of an execution with this field set were not visible
1220
- # before the corresponding execution request was sent.
1221
- # Note that actions from execution requests setting this field set are still
1222
- # eligible to be entered into the action cache upon completion, and services
1223
- # SHOULD overwrite any existing entries that may exist. This allows
1224
- # skip_cache_lookup requests to be used as a mechanism for replacing action
1225
- # cache entries that reference outputs no longer available or that are
1226
- # poisoned in any way.
1227
- # If false, the result may be served from the action cache.
1059
+ # If true, the action will be executed even if its result is already present in
1060
+ # the ActionCache. The execution is still allowed to be merged with other in-
1061
+ # flight executions of the same action, however - semantically, the service MUST
1062
+ # only guarantee that the results of an execution with this field set were not
1063
+ # visible before the corresponding execution request was sent. Note that actions
1064
+ # from execution requests setting this field set are still eligible to be
1065
+ # entered into the action cache upon completion, and services SHOULD overwrite
1066
+ # any existing entries that may exist. This allows skip_cache_lookup requests to
1067
+ # be used as a mechanism for replacing action cache entries that reference
1068
+ # outputs no longer available or that are poisoned in any way. If false, the
1069
+ # result may be served from the action cache.
1228
1070
  # Corresponds to the JSON property `skipCacheLookup`
1229
1071
  # @return [Boolean]
1230
1072
  attr_accessor :skip_cache_lookup
@@ -1243,11 +1085,8 @@ module Google
1243
1085
  end
1244
1086
  end
1245
1087
 
1246
- # The response message for
1247
- # Execution.Execute,
1248
- # which will be contained in the response
1249
- # field of the
1250
- # Operation.
1088
+ # The response message for Execution.Execute, which will be contained in the
1089
+ # response field of the Operation.
1251
1090
  class BuildBazelRemoteExecutionV2ExecuteResponse
1252
1091
  include Google::Apis::Core::Hashable
1253
1092
 
@@ -1263,29 +1102,27 @@ module Google
1263
1102
  # @return [String]
1264
1103
  attr_accessor :message
1265
1104
 
1266
- # An ActionResult represents the result of an
1267
- # Action being run.
1105
+ # An ActionResult represents the result of an Action being run.
1268
1106
  # Corresponds to the JSON property `result`
1269
1107
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2ActionResult]
1270
1108
  attr_accessor :result
1271
1109
 
1272
1110
  # An optional list of additional log outputs the server wishes to provide. A
1273
- # server can use this to return execution-specific logs however it wishes.
1274
- # This is intended primarily to make it easier for users to debug issues that
1275
- # may be outside of the actual job execution, such as by identifying the
1276
- # worker executing the action or by providing logs from the worker's setup
1277
- # phase. The keys SHOULD be human readable so that a client can display them
1278
- # to a user.
1111
+ # server can use this to return execution-specific logs however it wishes. This
1112
+ # is intended primarily to make it easier for users to debug issues that may be
1113
+ # outside of the actual job execution, such as by identifying the worker
1114
+ # executing the action or by providing logs from the worker's setup phase. The
1115
+ # keys SHOULD be human readable so that a client can display them to a user.
1279
1116
  # Corresponds to the JSON property `serverLogs`
1280
1117
  # @return [Hash<String,Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2LogFile>]
1281
1118
  attr_accessor :server_logs
1282
1119
 
1283
- # The `Status` type defines a logical error model that is suitable for
1284
- # different programming environments, including REST APIs and RPC APIs. It is
1285
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
1286
- # three pieces of data: error code, error message, and error details.
1287
- # You can find out more about this error model and how to work with it in the
1288
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
1120
+ # The `Status` type defines a logical error model that is suitable for different
1121
+ # programming environments, including REST APIs and RPC APIs. It is used by [
1122
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
1123
+ # data: error code, error message, and error details. You can find out more
1124
+ # about this error model and how to work with it in the [API Design Guide](https:
1125
+ # //cloud.google.com/apis/design/errors).
1289
1126
  # Corresponds to the JSON property `status`
1290
1127
  # @return [Google::Apis::RemotebuildexecutionV2::GoogleRpcStatus]
1291
1128
  attr_accessor :status
@@ -1392,9 +1229,8 @@ module Google
1392
1229
  attr_accessor :exec_enabled
1393
1230
  alias_method :exec_enabled?, :exec_enabled
1394
1231
 
1395
- # Allowed values for priority in
1396
- # ResultsCachePolicy
1397
- # Used for querying both cache and execution valid priority ranges.
1232
+ # Allowed values for priority in ResultsCachePolicy Used for querying both cache
1233
+ # and execution valid priority ranges.
1398
1234
  # Corresponds to the JSON property `executionPriorityCapabilities`
1399
1235
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2PriorityCapabilities]
1400
1236
  attr_accessor :execution_priority_capabilities
@@ -1423,13 +1259,12 @@ module Google
1423
1259
 
1424
1260
  # The priority (relative importance) of this action. Generally, a lower value
1425
1261
  # means that the action should be run sooner than actions having a greater
1426
- # priority value, but the interpretation of a given value is server-
1427
- # dependent. A priority of 0 means the *default* priority. Priorities may be
1428
- # positive or negative, and such actions should run later or sooner than
1429
- # actions having the default priority, respectively. The particular semantics
1430
- # of this field is up to the server. In particular, every server will have
1431
- # their own supported range of priorities, and will decide how these map into
1432
- # scheduling policy.
1262
+ # priority value, but the interpretation of a given value is server- dependent.
1263
+ # A priority of 0 means the *default* priority. Priorities may be positive or
1264
+ # negative, and such actions should run later or sooner than actions having the
1265
+ # default priority, respectively. The particular semantics of this field is up
1266
+ # to the server. In particular, every server will have their own supported range
1267
+ # of priorities, and will decide how these map into scheduling policy.
1433
1268
  # Corresponds to the JSON property `priority`
1434
1269
  # @return [Fixnum]
1435
1270
  attr_accessor :priority
@@ -1449,31 +1284,29 @@ module Google
1449
1284
  include Google::Apis::Core::Hashable
1450
1285
 
1451
1286
  # A content digest. A digest for a given blob consists of the size of the blob
1452
- # and its hash. The hash algorithm to use is defined by the server.
1453
- # The size is considered to be an integral part of the digest and cannot be
1454
- # separated. That is, even if the `hash` field is correctly specified but
1455
- # `size_bytes` is not, the server MUST reject the request.
1456
- # The reason for including the size in the digest is as follows: in a great
1457
- # many cases, the server needs to know the size of the blob it is about to work
1458
- # with prior to starting an operation with it, such as flattening Merkle tree
1459
- # structures or streaming it to a worker. Technically, the server could
1460
- # implement a separate metadata store, but this results in a significantly more
1461
- # complicated implementation as opposed to having the client specify the size
1462
- # up-front (or storing the size along with the digest in every message where
1463
- # digests are embedded). This does mean that the API leaks some implementation
1464
- # details of (what we consider to be) a reasonable server implementation, but
1465
- # we consider this to be a worthwhile tradeoff.
1466
- # When a `Digest` is used to refer to a proto message, it always refers to the
1467
- # message in binary encoded form. To ensure consistent hashing, clients and
1468
- # servers MUST ensure that they serialize messages according to the following
1469
- # rules, even if there are alternate valid encodings for the same message:
1470
- # * Fields are serialized in tag order.
1471
- # * There are no unknown fields.
1472
- # * There are no duplicate fields.
1473
- # * Fields are serialized according to the default semantics for their type.
1474
- # Most protocol buffer implementations will always follow these rules when
1475
- # serializing, but care should be taken to avoid shortcuts. For instance,
1476
- # concatenating two messages to merge them may produce duplicate fields.
1287
+ # and its hash. The hash algorithm to use is defined by the server. The size is
1288
+ # considered to be an integral part of the digest and cannot be separated. That
1289
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
1290
+ # the server MUST reject the request. The reason for including the size in the
1291
+ # digest is as follows: in a great many cases, the server needs to know the size
1292
+ # of the blob it is about to work with prior to starting an operation with it,
1293
+ # such as flattening Merkle tree structures or streaming it to a worker.
1294
+ # Technically, the server could implement a separate metadata store, but this
1295
+ # results in a significantly more complicated implementation as opposed to
1296
+ # having the client specify the size up-front (or storing the size along with
1297
+ # the digest in every message where digests are embedded). This does mean that
1298
+ # the API leaks some implementation details of (what we consider to be) a
1299
+ # reasonable server implementation, but we consider this to be a worthwhile
1300
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
1301
+ # refers to the message in binary encoded form. To ensure consistent hashing,
1302
+ # clients and servers MUST ensure that they serialize messages according to the
1303
+ # following rules, even if there are alternate valid encodings for the same
1304
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
1305
+ # There are no duplicate fields. * Fields are serialized according to the
1306
+ # default semantics for their type. Most protocol buffer implementations will
1307
+ # always follow these rules when serializing, but care should be taken to avoid
1308
+ # shortcuts. For instance, concatenating two messages to merge them may produce
1309
+ # duplicate fields.
1477
1310
  # Corresponds to the JSON property `digest`
1478
1311
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Digest]
1479
1312
  attr_accessor :digest
@@ -1507,8 +1340,7 @@ module Google
1507
1340
  end
1508
1341
  end
1509
1342
 
1510
- # A request message for
1511
- # ContentAddressableStorage.FindMissingBlobs.
1343
+ # A request message for ContentAddressableStorage.FindMissingBlobs.
1512
1344
  class BuildBazelRemoteExecutionV2FindMissingBlobsRequest
1513
1345
  include Google::Apis::Core::Hashable
1514
1346
 
@@ -1527,8 +1359,7 @@ module Google
1527
1359
  end
1528
1360
  end
1529
1361
 
1530
- # A response message for
1531
- # ContentAddressableStorage.FindMissingBlobs.
1362
+ # A response message for ContentAddressableStorage.FindMissingBlobs.
1532
1363
  class BuildBazelRemoteExecutionV2FindMissingBlobsResponse
1533
1364
  include Google::Apis::Core::Hashable
1534
1365
 
@@ -1547,8 +1378,7 @@ module Google
1547
1378
  end
1548
1379
  end
1549
1380
 
1550
- # A response message for
1551
- # ContentAddressableStorage.GetTree.
1381
+ # A response message for ContentAddressableStorage.GetTree.
1552
1382
  class BuildBazelRemoteExecutionV2GetTreeResponse
1553
1383
  include Google::Apis::Core::Hashable
1554
1384
 
@@ -1558,9 +1388,8 @@ module Google
1558
1388
  attr_accessor :directories
1559
1389
 
1560
1390
  # If present, signifies that there are more results which the client can
1561
- # retrieve by passing this as the page_token in a subsequent
1562
- # request.
1563
- # If empty, signifies that this is the last page of results.
1391
+ # retrieve by passing this as the page_token in a subsequent request. If empty,
1392
+ # signifies that this is the last page of results.
1564
1393
  # Corresponds to the JSON property `nextPageToken`
1565
1394
  # @return [String]
1566
1395
  attr_accessor :next_page_token
@@ -1581,40 +1410,38 @@ module Google
1581
1410
  include Google::Apis::Core::Hashable
1582
1411
 
1583
1412
  # A content digest. A digest for a given blob consists of the size of the blob
1584
- # and its hash. The hash algorithm to use is defined by the server.
1585
- # The size is considered to be an integral part of the digest and cannot be
1586
- # separated. That is, even if the `hash` field is correctly specified but
1587
- # `size_bytes` is not, the server MUST reject the request.
1588
- # The reason for including the size in the digest is as follows: in a great
1589
- # many cases, the server needs to know the size of the blob it is about to work
1590
- # with prior to starting an operation with it, such as flattening Merkle tree
1591
- # structures or streaming it to a worker. Technically, the server could
1592
- # implement a separate metadata store, but this results in a significantly more
1593
- # complicated implementation as opposed to having the client specify the size
1594
- # up-front (or storing the size along with the digest in every message where
1595
- # digests are embedded). This does mean that the API leaks some implementation
1596
- # details of (what we consider to be) a reasonable server implementation, but
1597
- # we consider this to be a worthwhile tradeoff.
1598
- # When a `Digest` is used to refer to a proto message, it always refers to the
1599
- # message in binary encoded form. To ensure consistent hashing, clients and
1600
- # servers MUST ensure that they serialize messages according to the following
1601
- # rules, even if there are alternate valid encodings for the same message:
1602
- # * Fields are serialized in tag order.
1603
- # * There are no unknown fields.
1604
- # * There are no duplicate fields.
1605
- # * Fields are serialized according to the default semantics for their type.
1606
- # Most protocol buffer implementations will always follow these rules when
1607
- # serializing, but care should be taken to avoid shortcuts. For instance,
1608
- # concatenating two messages to merge them may produce duplicate fields.
1413
+ # and its hash. The hash algorithm to use is defined by the server. The size is
1414
+ # considered to be an integral part of the digest and cannot be separated. That
1415
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
1416
+ # the server MUST reject the request. The reason for including the size in the
1417
+ # digest is as follows: in a great many cases, the server needs to know the size
1418
+ # of the blob it is about to work with prior to starting an operation with it,
1419
+ # such as flattening Merkle tree structures or streaming it to a worker.
1420
+ # Technically, the server could implement a separate metadata store, but this
1421
+ # results in a significantly more complicated implementation as opposed to
1422
+ # having the client specify the size up-front (or storing the size along with
1423
+ # the digest in every message where digests are embedded). This does mean that
1424
+ # the API leaks some implementation details of (what we consider to be) a
1425
+ # reasonable server implementation, but we consider this to be a worthwhile
1426
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
1427
+ # refers to the message in binary encoded form. To ensure consistent hashing,
1428
+ # clients and servers MUST ensure that they serialize messages according to the
1429
+ # following rules, even if there are alternate valid encodings for the same
1430
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
1431
+ # There are no duplicate fields. * Fields are serialized according to the
1432
+ # default semantics for their type. Most protocol buffer implementations will
1433
+ # always follow these rules when serializing, but care should be taken to avoid
1434
+ # shortcuts. For instance, concatenating two messages to merge them may produce
1435
+ # duplicate fields.
1609
1436
  # Corresponds to the JSON property `digest`
1610
1437
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Digest]
1611
1438
  attr_accessor :digest
1612
1439
 
1613
- # This is a hint as to the purpose of the log, and is set to true if the log
1614
- # is human-readable text that can be usefully displayed to a user, and false
1615
- # otherwise. For instance, if a command-line client wishes to print the
1616
- # server logs to the terminal for a failed action, this allows it to avoid
1617
- # displaying a binary file.
1440
+ # This is a hint as to the purpose of the log, and is set to true if the log is
1441
+ # human-readable text that can be usefully displayed to a user, and false
1442
+ # otherwise. For instance, if a command-line client wishes to print the server
1443
+ # logs to the terminal for a failed action, this allows it to avoid displaying a
1444
+ # binary file.
1618
1445
  # Corresponds to the JSON property `humanReadable`
1619
1446
  # @return [Boolean]
1620
1447
  attr_accessor :human_readable
@@ -1631,10 +1458,8 @@ module Google
1631
1458
  end
1632
1459
  end
1633
1460
 
1634
- # A single property for FileNodes,
1635
- # DirectoryNodes, and
1636
- # SymlinkNodes. The server is
1637
- # responsible for specifying the property `name`s that it accepts. If
1461
+ # A single property for FileNodes, DirectoryNodes, and SymlinkNodes. The server
1462
+ # is responsible for specifying the property `name`s that it accepts. If
1638
1463
  # permitted by the server, the same `name` may occur multiple times.
1639
1464
  class BuildBazelRemoteExecutionV2NodeProperty
1640
1465
  include Google::Apis::Core::Hashable
@@ -1666,39 +1491,37 @@ module Google
1666
1491
  include Google::Apis::Core::Hashable
1667
1492
 
1668
1493
  # The full path of the directory relative to the working directory. The path
1669
- # separator is a forward slash `/`. Since this is a relative path, it MUST
1670
- # NOT begin with a leading forward slash. The empty string value is allowed,
1671
- # and it denotes the entire working directory.
1494
+ # separator is a forward slash `/`. Since this is a relative path, it MUST NOT
1495
+ # begin with a leading forward slash. The empty string value is allowed, and it
1496
+ # denotes the entire working directory.
1672
1497
  # Corresponds to the JSON property `path`
1673
1498
  # @return [String]
1674
1499
  attr_accessor :path
1675
1500
 
1676
1501
  # A content digest. A digest for a given blob consists of the size of the blob
1677
- # and its hash. The hash algorithm to use is defined by the server.
1678
- # The size is considered to be an integral part of the digest and cannot be
1679
- # separated. That is, even if the `hash` field is correctly specified but
1680
- # `size_bytes` is not, the server MUST reject the request.
1681
- # The reason for including the size in the digest is as follows: in a great
1682
- # many cases, the server needs to know the size of the blob it is about to work
1683
- # with prior to starting an operation with it, such as flattening Merkle tree
1684
- # structures or streaming it to a worker. Technically, the server could
1685
- # implement a separate metadata store, but this results in a significantly more
1686
- # complicated implementation as opposed to having the client specify the size
1687
- # up-front (or storing the size along with the digest in every message where
1688
- # digests are embedded). This does mean that the API leaks some implementation
1689
- # details of (what we consider to be) a reasonable server implementation, but
1690
- # we consider this to be a worthwhile tradeoff.
1691
- # When a `Digest` is used to refer to a proto message, it always refers to the
1692
- # message in binary encoded form. To ensure consistent hashing, clients and
1693
- # servers MUST ensure that they serialize messages according to the following
1694
- # rules, even if there are alternate valid encodings for the same message:
1695
- # * Fields are serialized in tag order.
1696
- # * There are no unknown fields.
1697
- # * There are no duplicate fields.
1698
- # * Fields are serialized according to the default semantics for their type.
1699
- # Most protocol buffer implementations will always follow these rules when
1700
- # serializing, but care should be taken to avoid shortcuts. For instance,
1701
- # concatenating two messages to merge them may produce duplicate fields.
1502
+ # and its hash. The hash algorithm to use is defined by the server. The size is
1503
+ # considered to be an integral part of the digest and cannot be separated. That
1504
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
1505
+ # the server MUST reject the request. The reason for including the size in the
1506
+ # digest is as follows: in a great many cases, the server needs to know the size
1507
+ # of the blob it is about to work with prior to starting an operation with it,
1508
+ # such as flattening Merkle tree structures or streaming it to a worker.
1509
+ # Technically, the server could implement a separate metadata store, but this
1510
+ # results in a significantly more complicated implementation as opposed to
1511
+ # having the client specify the size up-front (or storing the size along with
1512
+ # the digest in every message where digests are embedded). This does mean that
1513
+ # the API leaks some implementation details of (what we consider to be) a
1514
+ # reasonable server implementation, but we consider this to be a worthwhile
1515
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
1516
+ # refers to the message in binary encoded form. To ensure consistent hashing,
1517
+ # clients and servers MUST ensure that they serialize messages according to the
1518
+ # following rules, even if there are alternate valid encodings for the same
1519
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
1520
+ # There are no duplicate fields. * Fields are serialized according to the
1521
+ # default semantics for their type. Most protocol buffer implementations will
1522
+ # always follow these rules when serializing, but care should be taken to avoid
1523
+ # shortcuts. For instance, concatenating two messages to merge them may produce
1524
+ # duplicate fields.
1702
1525
  # Corresponds to the JSON property `treeDigest`
1703
1526
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Digest]
1704
1527
  attr_accessor :tree_digest
@@ -1714,51 +1537,45 @@ module Google
1714
1537
  end
1715
1538
  end
1716
1539
 
1717
- # An `OutputFile` is similar to a
1718
- # FileNode, but it is used as an
1719
- # output in an `ActionResult`. It allows a full file path rather than
1720
- # only a name.
1540
+ # An `OutputFile` is similar to a FileNode, but it is used as an output in an `
1541
+ # ActionResult`. It allows a full file path rather than only a name.
1721
1542
  class BuildBazelRemoteExecutionV2OutputFile
1722
1543
  include Google::Apis::Core::Hashable
1723
1544
 
1724
1545
  # The contents of the file if inlining was requested. The server SHOULD NOT
1725
- # inline
1726
- # file contents unless requested by the client in the
1727
- # GetActionResultRequest
1728
- # message. The server MAY omit inlining, even if requested, and MUST do so if
1729
- # inlining
1730
- # would cause the response to exceed message size limits.
1546
+ # inline file contents unless requested by the client in the
1547
+ # GetActionResultRequest message. The server MAY omit inlining, even if
1548
+ # requested, and MUST do so if inlining would cause the response to exceed
1549
+ # message size limits.
1731
1550
  # Corresponds to the JSON property `contents`
1732
1551
  # NOTE: Values are automatically base64 encoded/decoded in the client library.
1733
1552
  # @return [String]
1734
1553
  attr_accessor :contents
1735
1554
 
1736
1555
  # A content digest. A digest for a given blob consists of the size of the blob
1737
- # and its hash. The hash algorithm to use is defined by the server.
1738
- # The size is considered to be an integral part of the digest and cannot be
1739
- # separated. That is, even if the `hash` field is correctly specified but
1740
- # `size_bytes` is not, the server MUST reject the request.
1741
- # The reason for including the size in the digest is as follows: in a great
1742
- # many cases, the server needs to know the size of the blob it is about to work
1743
- # with prior to starting an operation with it, such as flattening Merkle tree
1744
- # structures or streaming it to a worker. Technically, the server could
1745
- # implement a separate metadata store, but this results in a significantly more
1746
- # complicated implementation as opposed to having the client specify the size
1747
- # up-front (or storing the size along with the digest in every message where
1748
- # digests are embedded). This does mean that the API leaks some implementation
1749
- # details of (what we consider to be) a reasonable server implementation, but
1750
- # we consider this to be a worthwhile tradeoff.
1751
- # When a `Digest` is used to refer to a proto message, it always refers to the
1752
- # message in binary encoded form. To ensure consistent hashing, clients and
1753
- # servers MUST ensure that they serialize messages according to the following
1754
- # rules, even if there are alternate valid encodings for the same message:
1755
- # * Fields are serialized in tag order.
1756
- # * There are no unknown fields.
1757
- # * There are no duplicate fields.
1758
- # * Fields are serialized according to the default semantics for their type.
1759
- # Most protocol buffer implementations will always follow these rules when
1760
- # serializing, but care should be taken to avoid shortcuts. For instance,
1761
- # concatenating two messages to merge them may produce duplicate fields.
1556
+ # and its hash. The hash algorithm to use is defined by the server. The size is
1557
+ # considered to be an integral part of the digest and cannot be separated. That
1558
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
1559
+ # the server MUST reject the request. The reason for including the size in the
1560
+ # digest is as follows: in a great many cases, the server needs to know the size
1561
+ # of the blob it is about to work with prior to starting an operation with it,
1562
+ # such as flattening Merkle tree structures or streaming it to a worker.
1563
+ # Technically, the server could implement a separate metadata store, but this
1564
+ # results in a significantly more complicated implementation as opposed to
1565
+ # having the client specify the size up-front (or storing the size along with
1566
+ # the digest in every message where digests are embedded). This does mean that
1567
+ # the API leaks some implementation details of (what we consider to be) a
1568
+ # reasonable server implementation, but we consider this to be a worthwhile
1569
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
1570
+ # refers to the message in binary encoded form. To ensure consistent hashing,
1571
+ # clients and servers MUST ensure that they serialize messages according to the
1572
+ # following rules, even if there are alternate valid encodings for the same
1573
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
1574
+ # There are no duplicate fields. * Fields are serialized according to the
1575
+ # default semantics for their type. Most protocol buffer implementations will
1576
+ # always follow these rules when serializing, but care should be taken to avoid
1577
+ # shortcuts. For instance, concatenating two messages to merge them may produce
1578
+ # duplicate fields.
1762
1579
  # Corresponds to the JSON property `digest`
1763
1580
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Digest]
1764
1581
  attr_accessor :digest
@@ -1775,8 +1592,8 @@ module Google
1775
1592
  attr_accessor :node_properties
1776
1593
 
1777
1594
  # The full path of the file relative to the working directory, including the
1778
- # filename. The path separator is a forward slash `/`. Since this is a
1779
- # relative path, it MUST NOT begin with a leading forward slash.
1595
+ # filename. The path separator is a forward slash `/`. Since this is a relative
1596
+ # path, it MUST NOT begin with a leading forward slash.
1780
1597
  # Corresponds to the JSON property `path`
1781
1598
  # @return [String]
1782
1599
  attr_accessor :path
@@ -1795,32 +1612,29 @@ module Google
1795
1612
  end
1796
1613
  end
1797
1614
 
1798
- # An `OutputSymlink` is similar to a
1799
- # Symlink, but it is used as an
1800
- # output in an `ActionResult`.
1801
- # `OutputSymlink` is binary-compatible with `SymlinkNode`.
1615
+ # An `OutputSymlink` is similar to a Symlink, but it is used as an output in an `
1616
+ # ActionResult`. `OutputSymlink` is binary-compatible with `SymlinkNode`.
1802
1617
  class BuildBazelRemoteExecutionV2OutputSymlink
1803
1618
  include Google::Apis::Core::Hashable
1804
1619
 
1805
- # The supported node properties of the OutputSymlink, if requested by the
1806
- # Action.
1620
+ # The supported node properties of the OutputSymlink, if requested by the Action.
1807
1621
  # Corresponds to the JSON property `nodeProperties`
1808
1622
  # @return [Array<Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2NodeProperty>]
1809
1623
  attr_accessor :node_properties
1810
1624
 
1811
1625
  # The full path of the symlink relative to the working directory, including the
1812
- # filename. The path separator is a forward slash `/`. Since this is a
1813
- # relative path, it MUST NOT begin with a leading forward slash.
1626
+ # filename. The path separator is a forward slash `/`. Since this is a relative
1627
+ # path, it MUST NOT begin with a leading forward slash.
1814
1628
  # Corresponds to the JSON property `path`
1815
1629
  # @return [String]
1816
1630
  attr_accessor :path
1817
1631
 
1818
- # The target path of the symlink. The path separator is a forward slash `/`.
1819
- # The target path can be relative to the parent directory of the symlink or
1820
- # it can be an absolute path starting with `/`. Support for absolute paths
1821
- # can be checked using the Capabilities
1822
- # API. The canonical form forbids the substrings `/./` and `//` in the target
1823
- # path. `..` components are allowed anywhere in the target path.
1632
+ # The target path of the symlink. The path separator is a forward slash `/`. The
1633
+ # target path can be relative to the parent directory of the symlink or it can
1634
+ # be an absolute path starting with `/`. Support for absolute paths can be
1635
+ # checked using the Capabilities API. The canonical form forbids the substrings `
1636
+ # /./` and `//` in the target path. `..` components are allowed anywhere in the
1637
+ # target path.
1824
1638
  # Corresponds to the JSON property `target`
1825
1639
  # @return [String]
1826
1640
  attr_accessor :target
@@ -1838,17 +1652,16 @@ module Google
1838
1652
  end
1839
1653
 
1840
1654
  # A `Platform` is a set of requirements, such as hardware, operating system, or
1841
- # compiler toolchain, for an
1842
- # Action's execution
1843
- # environment. A `Platform` is represented as a series of key-value pairs
1844
- # representing the properties that are required of the platform.
1655
+ # compiler toolchain, for an Action's execution environment. A `Platform` is
1656
+ # represented as a series of key-value pairs representing the properties that
1657
+ # are required of the platform.
1845
1658
  class BuildBazelRemoteExecutionV2Platform
1846
1659
  include Google::Apis::Core::Hashable
1847
1660
 
1848
- # The properties that make up this platform. In order to ensure that
1849
- # equivalent `Platform`s always hash to the same value, the properties MUST
1850
- # be lexicographically sorted by name, and then by value. Sorting of strings
1851
- # is done by code point, equivalently, by the UTF-8 bytes.
1661
+ # The properties that make up this platform. In order to ensure that equivalent `
1662
+ # Platform`s always hash to the same value, the properties MUST be
1663
+ # lexicographically sorted by name, and then by value. Sorting of strings is
1664
+ # done by code point, equivalently, by the UTF-8 bytes.
1852
1665
  # Corresponds to the JSON property `properties`
1853
1666
  # @return [Array<Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2PlatformProperty>]
1854
1667
  attr_accessor :properties
@@ -1865,19 +1678,16 @@ module Google
1865
1678
 
1866
1679
  # A single property for the environment. The server is responsible for
1867
1680
  # specifying the property `name`s that it accepts. If an unknown `name` is
1868
- # provided in the requirements for an
1869
- # Action, the server SHOULD
1870
- # reject the execution request. If permitted by the server, the same `name`
1871
- # may occur multiple times.
1872
- # The server is also responsible for specifying the interpretation of
1873
- # property `value`s. For instance, a property describing how much RAM must be
1874
- # available may be interpreted as allowing a worker with 16GB to fulfill a
1875
- # request for 8GB, while a property describing the OS environment on which
1876
- # the action must be performed may require an exact match with the worker's
1877
- # OS.
1878
- # The server MAY use the `value` of one or more properties to determine how
1879
- # it sets up the execution environment, such as by making specific system
1880
- # files available to the worker.
1681
+ # provided in the requirements for an Action, the server SHOULD reject the
1682
+ # execution request. If permitted by the server, the same `name` may occur
1683
+ # multiple times. The server is also responsible for specifying the
1684
+ # interpretation of property `value`s. For instance, a property describing how
1685
+ # much RAM must be available may be interpreted as allowing a worker with 16GB
1686
+ # to fulfill a request for 8GB, while a property describing the OS environment
1687
+ # on which the action must be performed may require an exact match with the
1688
+ # worker's OS. The server MAY use the `value` of one or more properties to
1689
+ # determine how it sets up the execution environment, such as by making specific
1690
+ # system files available to the worker.
1881
1691
  class BuildBazelRemoteExecutionV2PlatformProperty
1882
1692
  include Google::Apis::Core::Hashable
1883
1693
 
@@ -1902,9 +1712,8 @@ module Google
1902
1712
  end
1903
1713
  end
1904
1714
 
1905
- # Allowed values for priority in
1906
- # ResultsCachePolicy
1907
- # Used for querying both cache and execution valid priority ranges.
1715
+ # Allowed values for priority in ResultsCachePolicy Used for querying both cache
1716
+ # and execution valid priority ranges.
1908
1717
  class BuildBazelRemoteExecutionV2PriorityCapabilities
1909
1718
  include Google::Apis::Core::Hashable
1910
1719
 
@@ -1951,27 +1760,25 @@ module Google
1951
1760
  # An optional Metadata to attach to any RPC request to tell the server about an
1952
1761
  # external context of the request. The server may use this for logging or other
1953
1762
  # purposes. To use it, the client attaches the header to the call using the
1954
- # canonical proto serialization:
1955
- # * name: `build.bazel.remote.execution.v2.requestmetadata-bin`
1956
- # * contents: the base64 encoded binary `RequestMetadata` message.
1957
- # Note: the gRPC library serializes binary headers encoded in base 64 by
1958
- # default (https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#
1959
- # requests).
1960
- # Therefore, if the gRPC library is used to pass/retrieve this
1763
+ # canonical proto serialization: * name: `build.bazel.remote.execution.v2.
1764
+ # requestmetadata-bin` * contents: the base64 encoded binary `RequestMetadata`
1765
+ # message. Note: the gRPC library serializes binary headers encoded in base 64
1766
+ # by default (https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#
1767
+ # requests). Therefore, if the gRPC library is used to pass/retrieve this
1961
1768
  # metadata, the user may ignore the base64 encoding and assume it is simply
1962
1769
  # serialized as a binary message.
1963
1770
  class BuildBazelRemoteExecutionV2RequestMetadata
1964
1771
  include Google::Apis::Core::Hashable
1965
1772
 
1966
- # An identifier that ties multiple requests to the same action.
1967
- # For example, multiple requests to the CAS, Action Cache, and Execution
1968
- # API are used in order to compile foo.cc.
1773
+ # An identifier that ties multiple requests to the same action. For example,
1774
+ # multiple requests to the CAS, Action Cache, and Execution API are used in
1775
+ # order to compile foo.cc.
1969
1776
  # Corresponds to the JSON property `actionId`
1970
1777
  # @return [String]
1971
1778
  attr_accessor :action_id
1972
1779
 
1973
- # An identifier to tie multiple tool invocations together. For example,
1974
- # runs of foo_test, bar_test and baz_test on a post-submit of a given patch.
1780
+ # An identifier to tie multiple tool invocations together. For example, runs of
1781
+ # foo_test, bar_test and baz_test on a post-submit of a given patch.
1975
1782
  # Corresponds to the JSON property `correlatedInvocationsId`
1976
1783
  # @return [String]
1977
1784
  attr_accessor :correlated_invocations_id
@@ -1981,8 +1788,8 @@ module Google
1981
1788
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2ToolDetails]
1982
1789
  attr_accessor :tool_details
1983
1790
 
1984
- # An identifier that ties multiple actions together to a final result.
1985
- # For example, multiple actions are required to build and run foo_test.
1791
+ # An identifier that ties multiple actions together to a final result. For
1792
+ # example, multiple actions are required to build and run foo_test.
1986
1793
  # Corresponds to the JSON property `toolInvocationId`
1987
1794
  # @return [String]
1988
1795
  attr_accessor :tool_invocation_id
@@ -2006,12 +1813,12 @@ module Google
2006
1813
  include Google::Apis::Core::Hashable
2007
1814
 
2008
1815
  # The priority (relative importance) of this content in the overall cache.
2009
- # Generally, a lower value means a longer retention time or other advantage,
2010
- # but the interpretation of a given value is server-dependent. A priority of
2011
- # 0 means a *default* value, decided by the server.
2012
- # The particular semantics of this field is up to the server. In particular,
2013
- # every server will have their own supported range of priorities, and will
2014
- # decide how these map into retention/eviction policy.
1816
+ # Generally, a lower value means a longer retention time or other advantage, but
1817
+ # the interpretation of a given value is server-dependent. A priority of 0 means
1818
+ # a *default* value, decided by the server. The particular semantics of this
1819
+ # field is up to the server. In particular, every server will have their own
1820
+ # supported range of priorities, and will decide how these map into retention/
1821
+ # eviction policy.
2015
1822
  # Corresponds to the JSON property `priority`
2016
1823
  # @return [Fixnum]
2017
1824
  attr_accessor :priority
@@ -2026,8 +1833,7 @@ module Google
2026
1833
  end
2027
1834
  end
2028
1835
 
2029
- # A response message for
2030
- # Capabilities.GetCapabilities.
1836
+ # A response message for Capabilities.GetCapabilities.
2031
1837
  class BuildBazelRemoteExecutionV2ServerCapabilities
2032
1838
  include Google::Apis::Core::Hashable
2033
1839
 
@@ -2084,12 +1890,12 @@ module Google
2084
1890
  # @return [Array<Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2NodeProperty>]
2085
1891
  attr_accessor :node_properties
2086
1892
 
2087
- # The target path of the symlink. The path separator is a forward slash `/`.
2088
- # The target path can be relative to the parent directory of the symlink or
2089
- # it can be an absolute path starting with `/`. Support for absolute paths
2090
- # can be checked using the Capabilities
2091
- # API. The canonical form forbids the substrings `/./` and `//` in the target
2092
- # path. `..` components are allowed anywhere in the target path.
1893
+ # The target path of the symlink. The path separator is a forward slash `/`. The
1894
+ # target path can be relative to the parent directory of the symlink or it can
1895
+ # be an absolute path starting with `/`. Support for absolute paths can be
1896
+ # checked using the Capabilities API. The canonical form forbids the substrings `
1897
+ # /./` and `//` in the target path. `..` components are allowed anywhere in the
1898
+ # target path.
2093
1899
  # Corresponds to the JSON property `target`
2094
1900
  # @return [String]
2095
1901
  attr_accessor :target
@@ -2131,90 +1937,45 @@ module Google
2131
1937
  end
2132
1938
  end
2133
1939
 
2134
- # A `Tree` contains all the
2135
- # Directory protos in a
2136
- # single directory Merkle tree, compressed into one message.
1940
+ # A `Tree` contains all the Directory protos in a single directory Merkle tree,
1941
+ # compressed into one message.
2137
1942
  class BuildBazelRemoteExecutionV2Tree
2138
1943
  include Google::Apis::Core::Hashable
2139
1944
 
2140
1945
  # All the child directories: the directories referred to by the root and,
2141
- # recursively, all its children. In order to reconstruct the directory tree,
2142
- # the client must take the digests of each of the child directories and then
2143
- # build up a tree starting from the `root`.
1946
+ # recursively, all its children. In order to reconstruct the directory tree, the
1947
+ # client must take the digests of each of the child directories and then build
1948
+ # up a tree starting from the `root`.
2144
1949
  # Corresponds to the JSON property `children`
2145
1950
  # @return [Array<Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Directory>]
2146
1951
  attr_accessor :children
2147
1952
 
2148
1953
  # A `Directory` represents a directory node in a file tree, containing zero or
2149
- # more children FileNodes,
2150
- # DirectoryNodes and
2151
- # SymlinkNodes.
2152
- # Each `Node` contains its name in the directory, either the digest of its
2153
- # content (either a file blob or a `Directory` proto) or a symlink target, as
2154
- # well as possibly some metadata about the file or directory.
2155
- # In order to ensure that two equivalent directory trees hash to the same
2156
- # value, the following restrictions MUST be obeyed when constructing a
2157
- # a `Directory`:
2158
- # * Every child in the directory must have a path of exactly one segment.
2159
- # Multiple levels of directory hierarchy may not be collapsed.
2160
- # * Each child in the directory must have a unique path segment (file name).
2161
- # Note that while the API itself is case-sensitive, the environment where
2162
- # the Action is executed may or may not be case-sensitive. That is, it is
2163
- # legal to call the API with a Directory that has both "Foo" and "foo" as
2164
- # children, but the Action may be rejected by the remote system upon
2165
- # execution.
2166
- # * The files, directories and symlinks in the directory must each be sorted
2167
- # in lexicographical order by path. The path strings must be sorted by code
2168
- # point, equivalently, by UTF-8 bytes.
2169
- # * The NodeProperties of files,
2170
- # directories, and symlinks must be sorted in lexicographical order by
2171
- # property name.
2172
- # A `Directory` that obeys the restrictions is said to be in canonical form.
2173
- # As an example, the following could be used for a file named `bar` and a
1954
+ # more children FileNodes, DirectoryNodes and SymlinkNodes. Each `Node` contains
1955
+ # its name in the directory, either the digest of its content (either a file
1956
+ # blob or a `Directory` proto) or a symlink target, as well as possibly some
1957
+ # metadata about the file or directory. In order to ensure that two equivalent
1958
+ # directory trees hash to the same value, the following restrictions MUST be
1959
+ # obeyed when constructing a a `Directory`: * Every child in the directory must
1960
+ # have a path of exactly one segment. Multiple levels of directory hierarchy may
1961
+ # not be collapsed. * Each child in the directory must have a unique path
1962
+ # segment (file name). Note that while the API itself is case-sensitive, the
1963
+ # environment where the Action is executed may or may not be case-sensitive.
1964
+ # That is, it is legal to call the API with a Directory that has both "Foo" and "
1965
+ # foo" as children, but the Action may be rejected by the remote system upon
1966
+ # execution. * The files, directories and symlinks in the directory must each be
1967
+ # sorted in lexicographical order by path. The path strings must be sorted by
1968
+ # code point, equivalently, by UTF-8 bytes. * The NodeProperties of files,
1969
+ # directories, and symlinks must be sorted in lexicographical order by property
1970
+ # name. A `Directory` that obeys the restrictions is said to be in canonical
1971
+ # form. As an example, the following could be used for a file named `bar` and a
2174
1972
  # directory named `foo` with an executable file named `baz` (hashes shortened
2175
- # for readability):
2176
- # ```json
2177
- # // (Directory proto)
2178
- # `
2179
- # files: [
2180
- # `
2181
- # name: "bar",
2182
- # digest: `
2183
- # hash: "4a73bc9d03...",
2184
- # size: 65534
2185
- # `,
2186
- # node_properties: [
2187
- # `
2188
- # "name": "MTime",
2189
- # "value": "2017-01-15T01:30:15.01Z"
2190
- # `
2191
- # ]
2192
- # `
2193
- # ],
2194
- # directories: [
2195
- # `
2196
- # name: "foo",
2197
- # digest: `
2198
- # hash: "4cf2eda940...",
2199
- # size: 43
2200
- # `
2201
- # `
2202
- # ]
2203
- # `
2204
- # // (Directory proto with hash "4cf2eda940..." and size 43)
2205
- # `
2206
- # files: [
2207
- # `
2208
- # name: "baz",
2209
- # digest: `
2210
- # hash: "b2c941073e...",
2211
- # size: 1294,
2212
- # `,
2213
- # is_executable: true
2214
- # `
2215
- # ]
2216
- # `
2217
- # ```
1973
+ # for readability): ```json // (Directory proto) ` files: [ ` name: "bar",
1974
+ # digest: ` hash: "4a73bc9d03...", size: 65534 `, node_properties: [ ` "name": "
1975
+ # MTime", "value": "2017-01-15T01:30:15.01Z" ` ] ` ], directories: [ ` name: "
1976
+ # foo", digest: ` hash: "4cf2eda940...", size: 43 ` ` ] ` // (Directory proto
1977
+ # with hash "4cf2eda940..." and size 43) ` files: [ ` name: "baz", digest: `
1978
+ # hash: "b2c941073e...", size: 1294, `, is_executable: true ` ] ` ```
2218
1979
  # Corresponds to the JSON property `root`
2219
1980
  # @return [Google::Apis::RemotebuildexecutionV2::BuildBazelRemoteExecutionV2Directory]
2220
1981
  attr_accessor :root
@@ -2230,8 +1991,7 @@ module Google
2230
1991
  end
2231
1992
  end
2232
1993
 
2233
- # A request message for
2234
- # WaitExecution.
1994
+ # A request message for WaitExecution.
2235
1995
  class BuildBazelRemoteExecutionV2WaitExecutionRequest
2236
1996
  include Google::Apis::Core::Hashable
2237
1997
 
@@ -2263,9 +2023,9 @@ module Google
2263
2023
  # @return [Fixnum]
2264
2024
  attr_accessor :patch
2265
2025
 
2266
- # The pre-release version. Either this field or major/minor/patch fields
2267
- # must be filled. They are mutually exclusive. Pre-release versions are
2268
- # assumed to be earlier than any released versions.
2026
+ # The pre-release version. Either this field or major/minor/patch fields must be
2027
+ # filled. They are mutually exclusive. Pre-release versions are assumed to be
2028
+ # earlier than any released versions.
2269
2029
  # Corresponds to the JSON property `prerelease`
2270
2030
  # @return [String]
2271
2031
  attr_accessor :prerelease
@@ -2288,8 +2048,8 @@ module Google
2288
2048
  class GoogleDevtoolsRemotebuildbotCommandDurations
2289
2049
  include Google::Apis::Core::Hashable
2290
2050
 
2291
- # The time spent preparing the command to be run in a Docker container
2292
- # (includes pulling the Docker image, if necessary).
2051
+ # The time spent preparing the command to be run in a Docker container (includes
2052
+ # pulling the Docker image, if necessary).
2293
2053
  # Corresponds to the JSON property `dockerPrep`
2294
2054
  # @return [String]
2295
2055
  attr_accessor :docker_prep
@@ -2365,18 +2125,23 @@ module Google
2365
2125
  end
2366
2126
  end
2367
2127
 
2368
- # CommandEvents contains counters for the number of warnings and errors
2369
- # that occurred during the execution of a command.
2128
+ # CommandEvents contains counters for the number of warnings and errors that
2129
+ # occurred during the execution of a command.
2370
2130
  class GoogleDevtoolsRemotebuildbotCommandEvents
2371
2131
  include Google::Apis::Core::Hashable
2372
2132
 
2373
- # Indicates whether we are using a cached Docker image (true) or had to pull
2374
- # the Docker image (false) for this command.
2133
+ # Indicates whether we are using a cached Docker image (true) or had to pull the
2134
+ # Docker image (false) for this command.
2375
2135
  # Corresponds to the JSON property `dockerCacheHit`
2376
2136
  # @return [Boolean]
2377
2137
  attr_accessor :docker_cache_hit
2378
2138
  alias_method :docker_cache_hit?, :docker_cache_hit
2379
2139
 
2140
+ # Docker Image name.
2141
+ # Corresponds to the JSON property `dockerImageName`
2142
+ # @return [String]
2143
+ attr_accessor :docker_image_name
2144
+
2380
2145
  # The input cache miss ratio.
2381
2146
  # Corresponds to the JSON property `inputCacheMiss`
2382
2147
  # @return [Float]
@@ -2399,6 +2164,7 @@ module Google
2399
2164
  # Update properties of this object
2400
2165
  def update!(**args)
2401
2166
  @docker_cache_hit = args[:docker_cache_hit] if args.key?(:docker_cache_hit)
2167
+ @docker_image_name = args[:docker_image_name] if args.key?(:docker_image_name)
2402
2168
  @input_cache_miss = args[:input_cache_miss] if args.key?(:input_cache_miss)
2403
2169
  @num_errors = args[:num_errors] if args.key?(:num_errors)
2404
2170
  @num_warnings = args[:num_warnings] if args.key?(:num_warnings)
@@ -2541,28 +2307,24 @@ module Google
2541
2307
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaCreateInstanceRequest
2542
2308
  include Google::Apis::Core::Hashable
2543
2309
 
2544
- # Instance conceptually encapsulates all Remote Build Execution resources
2545
- # for remote builds.
2546
- # An instance consists of storage and compute resources (for example,
2547
- # `ContentAddressableStorage`, `ActionCache`, `WorkerPools`) used for
2548
- # running remote builds.
2549
- # All Remote Build Execution API calls are scoped to an instance.
2310
+ # Instance conceptually encapsulates all Remote Build Execution resources for
2311
+ # remote builds. An instance consists of storage and compute resources (for
2312
+ # example, `ContentAddressableStorage`, `ActionCache`, `WorkerPools`) used for
2313
+ # running remote builds. All Remote Build Execution API calls are scoped to an
2314
+ # instance.
2550
2315
  # Corresponds to the JSON property `instance`
2551
2316
  # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemotebuildexecutionAdminV1alphaInstance]
2552
2317
  attr_accessor :instance
2553
2318
 
2554
- # ID of the created instance.
2555
- # A valid `instance_id` must:
2556
- # be 6-50 characters long,
2557
- # contain only lowercase letters, digits, hyphens and underscores,
2558
- # start with a lowercase letter, and
2559
- # end with a lowercase letter or a digit.
2319
+ # ID of the created instance. A valid `instance_id` must: be 6-50 characters
2320
+ # long, contain only lowercase letters, digits, hyphens and underscores, start
2321
+ # with a lowercase letter, and end with a lowercase letter or a digit.
2560
2322
  # Corresponds to the JSON property `instanceId`
2561
2323
  # @return [String]
2562
2324
  attr_accessor :instance_id
2563
2325
 
2564
- # Resource name of the project containing the instance.
2565
- # Format: `projects/[PROJECT_ID]`.
2326
+ # Resource name of the project containing the instance. Format: `projects/[
2327
+ # PROJECT_ID]`.
2566
2328
  # Corresponds to the JSON property `parent`
2567
2329
  # @return [String]
2568
2330
  attr_accessor :parent
@@ -2583,18 +2345,15 @@ module Google
2583
2345
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaCreateWorkerPoolRequest
2584
2346
  include Google::Apis::Core::Hashable
2585
2347
 
2586
- # Resource name of the instance in which to create the new worker pool.
2587
- # Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
2348
+ # Resource name of the instance in which to create the new worker pool. Format: `
2349
+ # projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
2588
2350
  # Corresponds to the JSON property `parent`
2589
2351
  # @return [String]
2590
2352
  attr_accessor :parent
2591
2353
 
2592
- # ID of the created worker pool.
2593
- # A valid pool ID must:
2594
- # be 6-50 characters long,
2595
- # contain only lowercase letters, digits, hyphens and underscores,
2596
- # start with a lowercase letter, and
2597
- # end with a lowercase letter or a digit.
2354
+ # ID of the created worker pool. A valid pool ID must: be 6-50 characters long,
2355
+ # contain only lowercase letters, digits, hyphens and underscores, start with a
2356
+ # lowercase letter, and end with a lowercase letter or a digit.
2598
2357
  # Corresponds to the JSON property `poolId`
2599
2358
  # @return [String]
2600
2359
  attr_accessor :pool_id
@@ -2620,8 +2379,8 @@ module Google
2620
2379
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaDeleteInstanceRequest
2621
2380
  include Google::Apis::Core::Hashable
2622
2381
 
2623
- # Name of the instance to delete.
2624
- # Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
2382
+ # Name of the instance to delete. Format: `projects/[PROJECT_ID]/instances/[
2383
+ # INSTANCE_ID]`.
2625
2384
  # Corresponds to the JSON property `name`
2626
2385
  # @return [String]
2627
2386
  attr_accessor :name
@@ -2640,9 +2399,8 @@ module Google
2640
2399
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaDeleteWorkerPoolRequest
2641
2400
  include Google::Apis::Core::Hashable
2642
2401
 
2643
- # Name of the worker pool to delete.
2644
- # Format:
2645
- # `projects/[PROJECT_ID]/instances/[INSTANCE_ID]/workerpools/[POOL_ID]`.
2402
+ # Name of the worker pool to delete. Format: `projects/[PROJECT_ID]/instances/[
2403
+ # INSTANCE_ID]/workerpools/[POOL_ID]`.
2646
2404
  # Corresponds to the JSON property `name`
2647
2405
  # @return [String]
2648
2406
  attr_accessor :name
@@ -2657,12 +2415,107 @@ module Google
2657
2415
  end
2658
2416
  end
2659
2417
 
2418
+ # FeaturePolicy defines features allowed to be used on RBE instances, as well as
2419
+ # instance-wide behavior changes that take effect without opt-in or opt-out at
2420
+ # usage time.
2421
+ class GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicy
2422
+ include Google::Apis::Core::Hashable
2423
+
2424
+ # Defines whether a feature can be used or what values are accepted.
2425
+ # Corresponds to the JSON property `containerImageSources`
2426
+ # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
2427
+ attr_accessor :container_image_sources
2428
+
2429
+ # Defines whether a feature can be used or what values are accepted.
2430
+ # Corresponds to the JSON property `dockerAddCapabilities`
2431
+ # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
2432
+ attr_accessor :docker_add_capabilities
2433
+
2434
+ # Defines whether a feature can be used or what values are accepted.
2435
+ # Corresponds to the JSON property `dockerChrootPath`
2436
+ # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
2437
+ attr_accessor :docker_chroot_path
2438
+
2439
+ # Defines whether a feature can be used or what values are accepted.
2440
+ # Corresponds to the JSON property `dockerNetwork`
2441
+ # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
2442
+ attr_accessor :docker_network
2443
+
2444
+ # Defines whether a feature can be used or what values are accepted.
2445
+ # Corresponds to the JSON property `dockerPrivileged`
2446
+ # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
2447
+ attr_accessor :docker_privileged
2448
+
2449
+ # Defines whether a feature can be used or what values are accepted.
2450
+ # Corresponds to the JSON property `dockerRunAsRoot`
2451
+ # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
2452
+ attr_accessor :docker_run_as_root
2453
+
2454
+ # Defines whether a feature can be used or what values are accepted.
2455
+ # Corresponds to the JSON property `dockerRuntime`
2456
+ # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
2457
+ attr_accessor :docker_runtime
2458
+
2459
+ # Defines whether a feature can be used or what values are accepted.
2460
+ # Corresponds to the JSON property `dockerSiblingContainers`
2461
+ # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
2462
+ attr_accessor :docker_sibling_containers
2463
+
2464
+ # linux_isolation allows overriding the docker runtime used for containers
2465
+ # started on Linux.
2466
+ # Corresponds to the JSON property `linuxIsolation`
2467
+ # @return [String]
2468
+ attr_accessor :linux_isolation
2469
+
2470
+ def initialize(**args)
2471
+ update!(**args)
2472
+ end
2473
+
2474
+ # Update properties of this object
2475
+ def update!(**args)
2476
+ @container_image_sources = args[:container_image_sources] if args.key?(:container_image_sources)
2477
+ @docker_add_capabilities = args[:docker_add_capabilities] if args.key?(:docker_add_capabilities)
2478
+ @docker_chroot_path = args[:docker_chroot_path] if args.key?(:docker_chroot_path)
2479
+ @docker_network = args[:docker_network] if args.key?(:docker_network)
2480
+ @docker_privileged = args[:docker_privileged] if args.key?(:docker_privileged)
2481
+ @docker_run_as_root = args[:docker_run_as_root] if args.key?(:docker_run_as_root)
2482
+ @docker_runtime = args[:docker_runtime] if args.key?(:docker_runtime)
2483
+ @docker_sibling_containers = args[:docker_sibling_containers] if args.key?(:docker_sibling_containers)
2484
+ @linux_isolation = args[:linux_isolation] if args.key?(:linux_isolation)
2485
+ end
2486
+ end
2487
+
2488
+ # Defines whether a feature can be used or what values are accepted.
2489
+ class GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature
2490
+ include Google::Apis::Core::Hashable
2491
+
2492
+ # A list of acceptable values. Only effective when the policy is `RESTRICTED`.
2493
+ # Corresponds to the JSON property `allowedValues`
2494
+ # @return [Array<String>]
2495
+ attr_accessor :allowed_values
2496
+
2497
+ # The policy of the feature.
2498
+ # Corresponds to the JSON property `policy`
2499
+ # @return [String]
2500
+ attr_accessor :policy
2501
+
2502
+ def initialize(**args)
2503
+ update!(**args)
2504
+ end
2505
+
2506
+ # Update properties of this object
2507
+ def update!(**args)
2508
+ @allowed_values = args[:allowed_values] if args.key?(:allowed_values)
2509
+ @policy = args[:policy] if args.key?(:policy)
2510
+ end
2511
+ end
2512
+
2660
2513
  # The request used for `GetInstance`.
2661
2514
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaGetInstanceRequest
2662
2515
  include Google::Apis::Core::Hashable
2663
2516
 
2664
- # Name of the instance to retrieve.
2665
- # Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
2517
+ # Name of the instance to retrieve. Format: `projects/[PROJECT_ID]/instances/[
2518
+ # INSTANCE_ID]`.
2666
2519
  # Corresponds to the JSON property `name`
2667
2520
  # @return [String]
2668
2521
  attr_accessor :name
@@ -2681,9 +2534,8 @@ module Google
2681
2534
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaGetWorkerPoolRequest
2682
2535
  include Google::Apis::Core::Hashable
2683
2536
 
2684
- # Name of the worker pool to retrieve.
2685
- # Format:
2686
- # `projects/[PROJECT_ID]/instances/[INSTANCE_ID]/workerpools/[POOL_ID]`.
2537
+ # Name of the worker pool to retrieve. Format: `projects/[PROJECT_ID]/instances/[
2538
+ # INSTANCE_ID]/workerpools/[POOL_ID]`.
2687
2539
  # Corresponds to the JSON property `name`
2688
2540
  # @return [String]
2689
2541
  attr_accessor :name
@@ -2698,15 +2550,21 @@ module Google
2698
2550
  end
2699
2551
  end
2700
2552
 
2701
- # Instance conceptually encapsulates all Remote Build Execution resources
2702
- # for remote builds.
2703
- # An instance consists of storage and compute resources (for example,
2704
- # `ContentAddressableStorage`, `ActionCache`, `WorkerPools`) used for
2705
- # running remote builds.
2706
- # All Remote Build Execution API calls are scoped to an instance.
2553
+ # Instance conceptually encapsulates all Remote Build Execution resources for
2554
+ # remote builds. An instance consists of storage and compute resources (for
2555
+ # example, `ContentAddressableStorage`, `ActionCache`, `WorkerPools`) used for
2556
+ # running remote builds. All Remote Build Execution API calls are scoped to an
2557
+ # instance.
2707
2558
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaInstance
2708
2559
  include Google::Apis::Core::Hashable
2709
2560
 
2561
+ # FeaturePolicy defines features allowed to be used on RBE instances, as well as
2562
+ # instance-wide behavior changes that take effect without opt-in or opt-out at
2563
+ # usage time.
2564
+ # Corresponds to the JSON property `featurePolicy`
2565
+ # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicy]
2566
+ attr_accessor :feature_policy
2567
+
2710
2568
  # The location is a GCP region. Currently only `us-central1` is supported.
2711
2569
  # Corresponds to the JSON property `location`
2712
2570
  # @return [String]
@@ -2718,10 +2576,9 @@ module Google
2718
2576
  attr_accessor :logging_enabled
2719
2577
  alias_method :logging_enabled?, :logging_enabled
2720
2578
 
2721
- # Output only. Instance resource name formatted as:
2722
- # `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
2723
- # Name should not be populated when creating an instance since it is provided
2724
- # in the `instance_id` field.
2579
+ # Output only. Instance resource name formatted as: `projects/[PROJECT_ID]/
2580
+ # instances/[INSTANCE_ID]`. Name should not be populated when creating an
2581
+ # instance since it is provided in the `instance_id` field.
2725
2582
  # Corresponds to the JSON property `name`
2726
2583
  # @return [String]
2727
2584
  attr_accessor :name
@@ -2737,6 +2594,7 @@ module Google
2737
2594
 
2738
2595
  # Update properties of this object
2739
2596
  def update!(**args)
2597
+ @feature_policy = args[:feature_policy] if args.key?(:feature_policy)
2740
2598
  @location = args[:location] if args.key?(:location)
2741
2599
  @logging_enabled = args[:logging_enabled] if args.key?(:logging_enabled)
2742
2600
  @name = args[:name] if args.key?(:name)
@@ -2748,8 +2606,7 @@ module Google
2748
2606
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaListInstancesRequest
2749
2607
  include Google::Apis::Core::Hashable
2750
2608
 
2751
- # Resource name of the project.
2752
- # Format: `projects/[PROJECT_ID]`.
2609
+ # Resource name of the project. Format: `projects/[PROJECT_ID]`.
2753
2610
  # Corresponds to the JSON property `parent`
2754
2611
  # @return [String]
2755
2612
  attr_accessor :parent
@@ -2787,32 +2644,26 @@ module Google
2787
2644
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaListWorkerPoolsRequest
2788
2645
  include Google::Apis::Core::Hashable
2789
2646
 
2790
- # Optional. A filter expression that filters resources listed in
2791
- # the response. The expression must specify the field name, a comparison
2792
- # operator, and the value that you want to use for filtering. The value
2793
- # must be a string, a number, or a boolean. String values are
2794
- # case-insensitive.
2795
- # The comparison operator must be either `:`, `=`, `!=`, `>`, `>=`, `<=` or
2796
- # `<`.
2797
- # The `:` operator can be used with string fields to match substrings.
2798
- # For non-string fields it is equivalent to the `=` operator.
2799
- # The `:*` comparison can be used to test whether a key has been defined.
2800
- # You can also filter on nested fields.
2801
- # To filter on multiple expressions, you can separate expression using
2802
- # `AND` and `OR` operators, using parentheses to specify precedence. If
2803
- # neither operator is specified, `AND` is assumed.
2804
- # Examples:
2805
- # Include only pools with more than 100 reserved workers:
2806
- # `(worker_count > 100) (worker_config.reserved = true)`
2807
- # Include only pools with a certain label or machines of the n1-standard
2808
- # family:
2647
+ # Optional. A filter expression that filters resources listed in the response.
2648
+ # The expression must specify the field name, a comparison operator, and the
2649
+ # value that you want to use for filtering. The value must be a string, a number,
2650
+ # or a boolean. String values are case-insensitive. The comparison operator
2651
+ # must be either `:`, `=`, `!=`, `>`, `>=`, `<=` or `<`. The `:` operator can be
2652
+ # used with string fields to match substrings. For non-string fields it is
2653
+ # equivalent to the `=` operator. The `:*` comparison can be used to test
2654
+ # whether a key has been defined. You can also filter on nested fields. To
2655
+ # filter on multiple expressions, you can separate expression using `AND` and `
2656
+ # OR` operators, using parentheses to specify precedence. If neither operator is
2657
+ # specified, `AND` is assumed. Examples: Include only pools with more than 100
2658
+ # reserved workers: `(worker_count > 100) (worker_config.reserved = true)`
2659
+ # Include only pools with a certain label or machines of the n1-standard family:
2809
2660
  # `worker_config.labels.key1 : * OR worker_config.machine_type: n1-standard`
2810
2661
  # Corresponds to the JSON property `filter`
2811
2662
  # @return [String]
2812
2663
  attr_accessor :filter
2813
2664
 
2814
- # Resource name of the instance.
2815
- # Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
2665
+ # Resource name of the instance. Format: `projects/[PROJECT_ID]/instances/[
2666
+ # INSTANCE_ID]`.
2816
2667
  # Corresponds to the JSON property `parent`
2817
2668
  # @return [String]
2818
2669
  attr_accessor :parent
@@ -2847,40 +2698,62 @@ module Google
2847
2698
  end
2848
2699
  end
2849
2700
 
2701
+ # SoleTenancyConfig specifies information required to host a pool on STNs.
2702
+ class GoogleDevtoolsRemotebuildexecutionAdminV1alphaSoleTenancyConfig
2703
+ include Google::Apis::Core::Hashable
2704
+
2705
+ # The sole-tenant node type to host the pool's workers on.
2706
+ # Corresponds to the JSON property `nodeType`
2707
+ # @return [String]
2708
+ attr_accessor :node_type
2709
+
2710
+ # Zone in which STNs are reserved.
2711
+ # Corresponds to the JSON property `nodesZone`
2712
+ # @return [String]
2713
+ attr_accessor :nodes_zone
2714
+
2715
+ def initialize(**args)
2716
+ update!(**args)
2717
+ end
2718
+
2719
+ # Update properties of this object
2720
+ def update!(**args)
2721
+ @node_type = args[:node_type] if args.key?(:node_type)
2722
+ @nodes_zone = args[:nodes_zone] if args.key?(:nodes_zone)
2723
+ end
2724
+ end
2725
+
2850
2726
  # The request used for `UpdateInstance`.
2851
2727
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaUpdateInstanceRequest
2852
2728
  include Google::Apis::Core::Hashable
2853
2729
 
2854
- # Instance conceptually encapsulates all Remote Build Execution resources
2855
- # for remote builds.
2856
- # An instance consists of storage and compute resources (for example,
2857
- # `ContentAddressableStorage`, `ActionCache`, `WorkerPools`) used for
2858
- # running remote builds.
2859
- # All Remote Build Execution API calls are scoped to an instance.
2730
+ # Instance conceptually encapsulates all Remote Build Execution resources for
2731
+ # remote builds. An instance consists of storage and compute resources (for
2732
+ # example, `ContentAddressableStorage`, `ActionCache`, `WorkerPools`) used for
2733
+ # running remote builds. All Remote Build Execution API calls are scoped to an
2734
+ # instance.
2860
2735
  # Corresponds to the JSON property `instance`
2861
2736
  # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemotebuildexecutionAdminV1alphaInstance]
2862
2737
  attr_accessor :instance
2863
2738
 
2864
- # Deprecated, use instance.logging_enabled instead.
2865
- # Whether to enable Stackdriver logging for this instance.
2739
+ # Deprecated, use instance.logging_enabled instead. Whether to enable
2740
+ # Stackdriver logging for this instance.
2866
2741
  # Corresponds to the JSON property `loggingEnabled`
2867
2742
  # @return [Boolean]
2868
2743
  attr_accessor :logging_enabled
2869
2744
  alias_method :logging_enabled?, :logging_enabled
2870
2745
 
2871
- # Deprecated, use instance.Name instead.
2872
- # Name of the instance to update.
2873
- # Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
2746
+ # Deprecated, use instance.Name instead. Name of the instance to update. Format:
2747
+ # `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
2874
2748
  # Corresponds to the JSON property `name`
2875
2749
  # @return [String]
2876
2750
  attr_accessor :name
2877
2751
 
2878
- # The update mask applies to instance. For the `FieldMask` definition, see
2879
- # https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#
2880
- # fieldmask
2881
- # If an empty update_mask is provided, only the non-default valued field in
2882
- # the worker pool field will be updated. Note that in order to update a field
2883
- # to the default value (zero, false, empty string) an explicit update_mask
2752
+ # The update mask applies to instance. For the `FieldMask` definition, see https:
2753
+ # //developers.google.com/protocol-buffers/docs/reference/google.protobuf#
2754
+ # fieldmask If an empty update_mask is provided, only the non-default valued
2755
+ # field in the worker pool field will be updated. Note that in order to update a
2756
+ # field to the default value (zero, false, empty string) an explicit update_mask
2884
2757
  # must be provided.
2885
2758
  # Corresponds to the JSON property `updateMask`
2886
2759
  # @return [String]
@@ -2903,13 +2776,11 @@ module Google
2903
2776
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaUpdateWorkerPoolRequest
2904
2777
  include Google::Apis::Core::Hashable
2905
2778
 
2906
- # The update mask applies to worker_pool. For the `FieldMask` definition,
2907
- # see
2779
+ # The update mask applies to worker_pool. For the `FieldMask` definition, see
2908
2780
  # https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#
2909
- # fieldmask
2910
- # If an empty update_mask is provided, only the non-default valued field in
2911
- # the worker pool field will be updated. Note that in order to update a field
2912
- # to the default value (zero, false, empty string) an explicit update_mask
2781
+ # fieldmask If an empty update_mask is provided, only the non-default valued
2782
+ # field in the worker pool field will be updated. Note that in order to update a
2783
+ # field to the default value (zero, false, empty string) an explicit update_mask
2913
2784
  # must be provided.
2914
2785
  # Corresponds to the JSON property `updateMask`
2915
2786
  # @return [String]
@@ -2931,8 +2802,7 @@ module Google
2931
2802
  end
2932
2803
  end
2933
2804
 
2934
- # Defines the configuration to be used for a creating workers in
2935
- # the worker pool.
2805
+ # Defines the configuration to be used for creating workers in the worker pool.
2936
2806
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaWorkerConfig
2937
2807
  include Google::Apis::Core::Hashable
2938
2808
 
@@ -2941,34 +2811,31 @@ module Google
2941
2811
  # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemotebuildexecutionAdminV1alphaAcceleratorConfig]
2942
2812
  attr_accessor :accelerator
2943
2813
 
2944
- # Required. Size of the disk attached to the worker, in GB.
2945
- # See https://cloud.google.com/compute/docs/disks/
2814
+ # Required. Size of the disk attached to the worker, in GB. See https://cloud.
2815
+ # google.com/compute/docs/disks/
2946
2816
  # Corresponds to the JSON property `diskSizeGb`
2947
2817
  # @return [Fixnum]
2948
2818
  attr_accessor :disk_size_gb
2949
2819
 
2950
- # Required. Disk Type to use for the worker.
2951
- # See [Storage
2952
- # options](https://cloud.google.com/compute/docs/disks/#introduction).
2953
- # Currently only `pd-standard` and `pd-ssd` are supported.
2820
+ # Required. Disk Type to use for the worker. See [Storage options](https://cloud.
2821
+ # google.com/compute/docs/disks/#introduction). Currently only `pd-standard` and
2822
+ # `pd-ssd` are supported.
2954
2823
  # Corresponds to the JSON property `diskType`
2955
2824
  # @return [String]
2956
2825
  attr_accessor :disk_type
2957
2826
 
2958
- # Labels associated with the workers.
2959
- # Label keys and values can be no longer than 63 characters, can only contain
2960
- # lowercase letters, numeric characters, underscores and dashes.
2961
- # International letters are permitted. Label keys must start with a letter.
2962
- # Label values are optional.
2963
- # There can not be more than 64 labels per resource.
2827
+ # Labels associated with the workers. Label keys and values can be no longer
2828
+ # than 63 characters, can only contain lowercase letters, numeric characters,
2829
+ # underscores and dashes. International letters are permitted. Label keys must
2830
+ # start with a letter. Label values are optional. There can not be more than 64
2831
+ # labels per resource.
2964
2832
  # Corresponds to the JSON property `labels`
2965
2833
  # @return [Hash<String,String>]
2966
2834
  attr_accessor :labels
2967
2835
 
2968
- # Required. Machine type of the worker, such as `n1-standard-2`.
2969
- # See https://cloud.google.com/compute/docs/machine-types for a list of
2970
- # supported machine types. Note that `f1-micro` and `g1-small` are not yet
2971
- # supported.
2836
+ # Required. Machine type of the worker, such as `n1-standard-2`. See https://
2837
+ # cloud.google.com/compute/docs/machine-types for a list of supported machine
2838
+ # types. Note that `f1-micro` and `g1-small` are not yet supported.
2972
2839
  # Corresponds to the JSON property `machineType`
2973
2840
  # @return [String]
2974
2841
  attr_accessor :machine_type
@@ -2978,30 +2845,34 @@ module Google
2978
2845
  # @return [Fixnum]
2979
2846
  attr_accessor :max_concurrent_actions
2980
2847
 
2981
- # Minimum CPU platform to use when creating the worker.
2982
- # See [CPU Platforms](https://cloud.google.com/compute/docs/cpu-platforms).
2848
+ # Minimum CPU platform to use when creating the worker. See [CPU Platforms](
2849
+ # https://cloud.google.com/compute/docs/cpu-platforms).
2983
2850
  # Corresponds to the JSON property `minCpuPlatform`
2984
2851
  # @return [String]
2985
2852
  attr_accessor :min_cpu_platform
2986
2853
 
2987
- # Determines the type of network access granted to workers. Possible values:
2988
- # - "public": Workers can connect to the public internet.
2989
- # - "private": Workers can only connect to Google APIs and services.
2990
- # - "restricted-private": Workers can only connect to Google APIs that are
2991
- # reachable through `restricted.googleapis.com` (`199.36.153.4/30`).
2854
+ # Determines the type of network access granted to workers. Possible values: - "
2855
+ # public": Workers can connect to the public internet. - "private": Workers can
2856
+ # only connect to Google APIs and services. - "restricted-private": Workers can
2857
+ # only connect to Google APIs that are reachable through `restricted.googleapis.
2858
+ # com` (`199.36.153.4/30`).
2992
2859
  # Corresponds to the JSON property `networkAccess`
2993
2860
  # @return [String]
2994
2861
  attr_accessor :network_access
2995
2862
 
2996
- # Determines whether the worker is reserved (equivalent to a Compute Engine
2997
- # on-demand VM and therefore won't be preempted).
2998
- # See [Preemptible VMs](https://cloud.google.com/preemptible-vms/) for more
2999
- # details.
2863
+ # Determines whether the worker is reserved (equivalent to a Compute Engine on-
2864
+ # demand VM and therefore won't be preempted). See [Preemptible VMs](https://
2865
+ # cloud.google.com/preemptible-vms/) for more details.
3000
2866
  # Corresponds to the JSON property `reserved`
3001
2867
  # @return [Boolean]
3002
2868
  attr_accessor :reserved
3003
2869
  alias_method :reserved?, :reserved
3004
2870
 
2871
+ # SoleTenancyConfig specifies information required to host a pool on STNs.
2872
+ # Corresponds to the JSON property `soleTenancy`
2873
+ # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemotebuildexecutionAdminV1alphaSoleTenancyConfig]
2874
+ attr_accessor :sole_tenancy
2875
+
3005
2876
  # The name of the image used by each VM.
3006
2877
  # Corresponds to the JSON property `vmImage`
3007
2878
  # @return [String]
@@ -3022,6 +2893,7 @@ module Google
3022
2893
  @min_cpu_platform = args[:min_cpu_platform] if args.key?(:min_cpu_platform)
3023
2894
  @network_access = args[:network_access] if args.key?(:network_access)
3024
2895
  @reserved = args[:reserved] if args.key?(:reserved)
2896
+ @sole_tenancy = args[:sole_tenancy] if args.key?(:sole_tenancy)
3025
2897
  @vm_image = args[:vm_image] if args.key?(:vm_image)
3026
2898
  end
3027
2899
  end
@@ -3040,10 +2912,9 @@ module Google
3040
2912
  # @return [String]
3041
2913
  attr_accessor :channel
3042
2914
 
3043
- # WorkerPool resource name formatted as:
3044
- # `projects/[PROJECT_ID]/instances/[INSTANCE_ID]/workerpools/[POOL_ID]`.
3045
- # name should not be populated when creating a worker pool since it is
3046
- # provided in the `poolId` field.
2915
+ # WorkerPool resource name formatted as: `projects/[PROJECT_ID]/instances/[
2916
+ # INSTANCE_ID]/workerpools/[POOL_ID]`. name should not be populated when
2917
+ # creating a worker pool since it is provided in the `poolId` field.
3047
2918
  # Corresponds to the JSON property `name`
3048
2919
  # @return [String]
3049
2920
  attr_accessor :name
@@ -3053,14 +2924,13 @@ module Google
3053
2924
  # @return [String]
3054
2925
  attr_accessor :state
3055
2926
 
3056
- # Defines the configuration to be used for a creating workers in
3057
- # the worker pool.
2927
+ # Defines the configuration to be used for creating workers in the worker pool.
3058
2928
  # Corresponds to the JSON property `workerConfig`
3059
2929
  # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemotebuildexecutionAdminV1alphaWorkerConfig]
3060
2930
  attr_accessor :worker_config
3061
2931
 
3062
- # The desired number of workers in the worker pool. Must be a value between
3063
- # 0 and 15000.
2932
+ # The desired number of workers in the worker pool. Must be a value between 0
2933
+ # and 15000.
3064
2934
  # Corresponds to the JSON property `workerCount`
3065
2935
  # @return [Fixnum]
3066
2936
  attr_accessor :worker_count
@@ -3082,14 +2952,13 @@ module Google
3082
2952
 
3083
2953
  # AdminTemp is a prelimiary set of administration tasks. It's called "Temp"
3084
2954
  # because we do not yet know the best way to represent admin tasks; it's
3085
- # possible that this will be entirely replaced in later versions of this API.
3086
- # If this message proves to be sufficient, it will be renamed in the alpha or
3087
- # beta release of this API.
3088
- # This message (suitably marshalled into a protobuf.Any) can be used as the
3089
- # inline_assignment field in a lease; the lease assignment field should simply
3090
- # be `"admin"` in these cases.
3091
- # This message is heavily based on Swarming administration tasks from the LUCI
3092
- # project (http://github.com/luci/luci-py/appengine/swarming).
2955
+ # possible that this will be entirely replaced in later versions of this API. If
2956
+ # this message proves to be sufficient, it will be renamed in the alpha or beta
2957
+ # release of this API. This message (suitably marshalled into a protobuf.Any)
2958
+ # can be used as the inline_assignment field in a lease; the lease assignment
2959
+ # field should simply be `"admin"` in these cases. This message is heavily based
2960
+ # on Swarming administration tasks from the LUCI project (http://github.com/luci/
2961
+ # luci-py/appengine/swarming).
3093
2962
  class GoogleDevtoolsRemoteworkersV1test2AdminTemp
3094
2963
  include Google::Apis::Core::Hashable
3095
2964
 
@@ -3125,13 +2994,12 @@ module Google
3125
2994
  attr_accessor :contents
3126
2995
 
3127
2996
  # The CommandTask and CommandResult messages assume the existence of a service
3128
- # that can serve blobs of content, identified by a hash and size known as a
3129
- # "digest." The method by which these blobs may be retrieved is not specified
3130
- # here, but a model implementation is in the Remote Execution API's
3131
- # "ContentAddressibleStorage" interface.
3132
- # In the context of the RWAPI, a Digest will virtually always refer to the
3133
- # contents of a file or a directory. The latter is represented by the
3134
- # byte-encoded Directory message.
2997
+ # that can serve blobs of content, identified by a hash and size known as a "
2998
+ # digest." The method by which these blobs may be retrieved is not specified
2999
+ # here, but a model implementation is in the Remote Execution API's "
3000
+ # ContentAddressibleStorage" interface. In the context of the RWAPI, a Digest
3001
+ # will virtually always refer to the contents of a file or a directory. The
3002
+ # latter is represented by the byte-encoded Directory message.
3135
3003
  # Corresponds to the JSON property `digest`
3136
3004
  # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemoteworkersV1test2Digest]
3137
3005
  attr_accessor :digest
@@ -3147,27 +3015,26 @@ module Google
3147
3015
  end
3148
3016
  end
3149
3017
 
3150
- # DEPRECATED - use CommandResult instead.
3151
- # Describes the actual outputs from the task.
3018
+ # DEPRECATED - use CommandResult instead. Describes the actual outputs from the
3019
+ # task.
3152
3020
  class GoogleDevtoolsRemoteworkersV1test2CommandOutputs
3153
3021
  include Google::Apis::Core::Hashable
3154
3022
 
3155
3023
  # exit_code is only fully reliable if the status' code is OK. If the task
3156
- # exceeded its deadline or was cancelled, the process may still produce an
3157
- # exit code as it is cancelled, and this will be populated, but a successful
3158
- # (zero) is unlikely to be correct unless the status code is OK.
3024
+ # exceeded its deadline or was cancelled, the process may still produce an exit
3025
+ # code as it is cancelled, and this will be populated, but a successful (zero)
3026
+ # is unlikely to be correct unless the status code is OK.
3159
3027
  # Corresponds to the JSON property `exitCode`
3160
3028
  # @return [Fixnum]
3161
3029
  attr_accessor :exit_code
3162
3030
 
3163
3031
  # The CommandTask and CommandResult messages assume the existence of a service
3164
- # that can serve blobs of content, identified by a hash and size known as a
3165
- # "digest." The method by which these blobs may be retrieved is not specified
3166
- # here, but a model implementation is in the Remote Execution API's
3167
- # "ContentAddressibleStorage" interface.
3168
- # In the context of the RWAPI, a Digest will virtually always refer to the
3169
- # contents of a file or a directory. The latter is represented by the
3170
- # byte-encoded Directory message.
3032
+ # that can serve blobs of content, identified by a hash and size known as a "
3033
+ # digest." The method by which these blobs may be retrieved is not specified
3034
+ # here, but a model implementation is in the Remote Execution API's "
3035
+ # ContentAddressibleStorage" interface. In the context of the RWAPI, a Digest
3036
+ # will virtually always refer to the contents of a file or a directory. The
3037
+ # latter is represented by the byte-encoded Directory message.
3171
3038
  # Corresponds to the JSON property `outputs`
3172
3039
  # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemoteworkersV1test2Digest]
3173
3040
  attr_accessor :outputs
@@ -3183,9 +3050,8 @@ module Google
3183
3050
  end
3184
3051
  end
3185
3052
 
3186
- # DEPRECATED - use CommandResult instead.
3187
- # Can be used as part of CompleteRequest.metadata, or are part of a more
3188
- # sophisticated message.
3053
+ # DEPRECATED - use CommandResult instead. Can be used as part of CompleteRequest.
3054
+ # metadata, or are part of a more sophisticated message.
3189
3055
  class GoogleDevtoolsRemoteworkersV1test2CommandOverhead
3190
3056
  include Google::Apis::Core::Hashable
3191
3057
 
@@ -3196,8 +3062,8 @@ module Google
3196
3062
  # @return [String]
3197
3063
  attr_accessor :duration
3198
3064
 
3199
- # The amount of time *not* spent executing the command (ie
3200
- # uploading/downloading files).
3065
+ # The amount of time *not* spent executing the command (ie uploading/downloading
3066
+ # files).
3201
3067
  # Corresponds to the JSON property `overhead`
3202
3068
  # @return [String]
3203
3069
  attr_accessor :overhead
@@ -3225,46 +3091,44 @@ module Google
3225
3091
  # @return [String]
3226
3092
  attr_accessor :duration
3227
3093
 
3228
- # The exit code of the process. An exit code of "0" should only be trusted if
3229
- # `status` has a code of OK (otherwise it may simply be unset).
3094
+ # The exit code of the process. An exit code of "0" should only be trusted if `
3095
+ # status` has a code of OK (otherwise it may simply be unset).
3230
3096
  # Corresponds to the JSON property `exitCode`
3231
3097
  # @return [Fixnum]
3232
3098
  attr_accessor :exit_code
3233
3099
 
3234
- # Implementation-dependent metadata about the task. Both servers and bots
3235
- # may define messages which can be encoded here; bots are free to provide
3236
- # metadata in multiple formats, and servers are free to choose one or more
3237
- # of the values to process and ignore others. In particular, it is *not*
3238
- # considered an error for the bot to provide the server with a field that it
3239
- # doesn't know about.
3100
+ # Implementation-dependent metadata about the task. Both servers and bots may
3101
+ # define messages which can be encoded here; bots are free to provide metadata
3102
+ # in multiple formats, and servers are free to choose one or more of the values
3103
+ # to process and ignore others. In particular, it is *not* considered an error
3104
+ # for the bot to provide the server with a field that it doesn't know about.
3240
3105
  # Corresponds to the JSON property `metadata`
3241
3106
  # @return [Array<Hash<String,Object>>]
3242
3107
  attr_accessor :metadata
3243
3108
 
3244
3109
  # The CommandTask and CommandResult messages assume the existence of a service
3245
- # that can serve blobs of content, identified by a hash and size known as a
3246
- # "digest." The method by which these blobs may be retrieved is not specified
3247
- # here, but a model implementation is in the Remote Execution API's
3248
- # "ContentAddressibleStorage" interface.
3249
- # In the context of the RWAPI, a Digest will virtually always refer to the
3250
- # contents of a file or a directory. The latter is represented by the
3251
- # byte-encoded Directory message.
3110
+ # that can serve blobs of content, identified by a hash and size known as a "
3111
+ # digest." The method by which these blobs may be retrieved is not specified
3112
+ # here, but a model implementation is in the Remote Execution API's "
3113
+ # ContentAddressibleStorage" interface. In the context of the RWAPI, a Digest
3114
+ # will virtually always refer to the contents of a file or a directory. The
3115
+ # latter is represented by the byte-encoded Directory message.
3252
3116
  # Corresponds to the JSON property `outputs`
3253
3117
  # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemoteworkersV1test2Digest]
3254
3118
  attr_accessor :outputs
3255
3119
 
3256
- # The amount of time *not* spent executing the command (ie
3257
- # uploading/downloading files).
3120
+ # The amount of time *not* spent executing the command (ie uploading/downloading
3121
+ # files).
3258
3122
  # Corresponds to the JSON property `overhead`
3259
3123
  # @return [String]
3260
3124
  attr_accessor :overhead
3261
3125
 
3262
- # The `Status` type defines a logical error model that is suitable for
3263
- # different programming environments, including REST APIs and RPC APIs. It is
3264
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
3265
- # three pieces of data: error code, error message, and error details.
3266
- # You can find out more about this error model and how to work with it in the
3267
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
3126
+ # The `Status` type defines a logical error model that is suitable for different
3127
+ # programming environments, including REST APIs and RPC APIs. It is used by [
3128
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
3129
+ # data: error code, error message, and error details. You can find out more
3130
+ # about this error model and how to work with it in the [API Design Guide](https:
3131
+ # //cloud.google.com/apis/design/errors).
3268
3132
  # Corresponds to the JSON property `status`
3269
3133
  # @return [Google::Apis::RemotebuildexecutionV2::GoogleRpcStatus]
3270
3134
  attr_accessor :status
@@ -3320,14 +3184,13 @@ module Google
3320
3184
  class GoogleDevtoolsRemoteworkersV1test2CommandTaskInputs
3321
3185
  include Google::Apis::Core::Hashable
3322
3186
 
3323
- # The command itself to run (e.g., argv).
3324
- # This field should be passed directly to the underlying operating system,
3325
- # and so it must be sensible to that operating system. For example, on
3326
- # Windows, the first argument might be "C:\Windows\System32\ping.exe" -
3327
- # that is, using drive letters and backslashes. A command for a *nix
3328
- # system, on the other hand, would use forward slashes.
3329
- # All other fields in the RWAPI must consistently use forward slashes,
3330
- # since those fields may be interpretted by both the service and the bot.
3187
+ # The command itself to run (e.g., argv). This field should be passed directly
3188
+ # to the underlying operating system, and so it must be sensible to that
3189
+ # operating system. For example, on Windows, the first argument might be "C:\
3190
+ # Windows\System32\ping.exe" - that is, using drive letters and backslashes. A
3191
+ # command for a *nix system, on the other hand, would use forward slashes. All
3192
+ # other fields in the RWAPI must consistently use forward slashes, since those
3193
+ # fields may be interpretted by both the service and the bot.
3331
3194
  # Corresponds to the JSON property `arguments`
3332
3195
  # @return [Array<String>]
3333
3196
  attr_accessor :arguments
@@ -3337,31 +3200,29 @@ module Google
3337
3200
  # @return [Array<Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemoteworkersV1test2CommandTaskInputsEnvironmentVariable>]
3338
3201
  attr_accessor :environment_variables
3339
3202
 
3340
- # The input filesystem to be set up prior to the task beginning. The
3341
- # contents should be a repeated set of FileMetadata messages though other
3342
- # formats are allowed if better for the implementation (eg, a LUCI-style
3343
- # .isolated file).
3344
- # This field is repeated since implementations might want to cache the
3345
- # metadata, in which case it may be useful to break up portions of the
3346
- # filesystem that change frequently (eg, specific input files) from those
3347
- # that don't (eg, standard header files).
3203
+ # The input filesystem to be set up prior to the task beginning. The contents
3204
+ # should be a repeated set of FileMetadata messages though other formats are
3205
+ # allowed if better for the implementation (eg, a LUCI-style .isolated file).
3206
+ # This field is repeated since implementations might want to cache the metadata,
3207
+ # in which case it may be useful to break up portions of the filesystem that
3208
+ # change frequently (eg, specific input files) from those that don't (eg,
3209
+ # standard header files).
3348
3210
  # Corresponds to the JSON property `files`
3349
3211
  # @return [Array<Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemoteworkersV1test2Digest>]
3350
3212
  attr_accessor :files
3351
3213
 
3352
- # Inline contents for blobs expected to be needed by the bot to execute the
3353
- # task. For example, contents of entries in `files` or blobs that are
3354
- # indirectly referenced by an entry there.
3355
- # The bot should check against this list before downloading required task
3356
- # inputs to reduce the number of communications between itself and the
3357
- # remote CAS server.
3214
+ # Inline contents for blobs expected to be needed by the bot to execute the task.
3215
+ # For example, contents of entries in `files` or blobs that are indirectly
3216
+ # referenced by an entry there. The bot should check against this list before
3217
+ # downloading required task inputs to reduce the number of communications
3218
+ # between itself and the remote CAS server.
3358
3219
  # Corresponds to the JSON property `inlineBlobs`
3359
3220
  # @return [Array<Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemoteworkersV1test2Blob>]
3360
3221
  attr_accessor :inline_blobs
3361
3222
 
3362
- # Directory from which a command is executed. It is a relative directory
3363
- # with respect to the bot's working directory (i.e., "./"). If it is
3364
- # non-empty, then it must exist under "./". Otherwise, "./" will be used.
3223
+ # Directory from which a command is executed. It is a relative directory with
3224
+ # respect to the bot's working directory (i.e., "./"). If it is non-empty, then
3225
+ # it must exist under "./". Otherwise, "./" will be used.
3365
3226
  # Corresponds to the JSON property `workingDirectory`
3366
3227
  # @return [String]
3367
3228
  attr_accessor :working_directory
@@ -3409,32 +3270,32 @@ module Google
3409
3270
  class GoogleDevtoolsRemoteworkersV1test2CommandTaskOutputs
3410
3271
  include Google::Apis::Core::Hashable
3411
3272
 
3412
- # A list of expected directories, relative to the execution root. All paths
3413
- # MUST be delimited by forward slashes.
3273
+ # A list of expected directories, relative to the execution root. All paths MUST
3274
+ # be delimited by forward slashes.
3414
3275
  # Corresponds to the JSON property `directories`
3415
3276
  # @return [Array<String>]
3416
3277
  attr_accessor :directories
3417
3278
 
3418
- # A list of expected files, relative to the execution root. All paths
3419
- # MUST be delimited by forward slashes.
3279
+ # A list of expected files, relative to the execution root. All paths MUST be
3280
+ # delimited by forward slashes.
3420
3281
  # Corresponds to the JSON property `files`
3421
3282
  # @return [Array<String>]
3422
3283
  attr_accessor :files
3423
3284
 
3424
- # The destination to which any stderr should be sent. The method by which
3425
- # the bot should send the stream contents to that destination is not
3426
- # defined in this API. As examples, the destination could be a file
3427
- # referenced in the `files` field in this message, or it could be a URI
3428
- # that must be written via the ByteStream API.
3285
+ # The destination to which any stderr should be sent. The method by which the
3286
+ # bot should send the stream contents to that destination is not defined in this
3287
+ # API. As examples, the destination could be a file referenced in the `files`
3288
+ # field in this message, or it could be a URI that must be written via the
3289
+ # ByteStream API.
3429
3290
  # Corresponds to the JSON property `stderrDestination`
3430
3291
  # @return [String]
3431
3292
  attr_accessor :stderr_destination
3432
3293
 
3433
- # The destination to which any stdout should be sent. The method by which
3434
- # the bot should send the stream contents to that destination is not
3435
- # defined in this API. As examples, the destination could be a file
3436
- # referenced in the `files` field in this message, or it could be a URI
3437
- # that must be written via the ByteStream API.
3294
+ # The destination to which any stdout should be sent. The method by which the
3295
+ # bot should send the stream contents to that destination is not defined in this
3296
+ # API. As examples, the destination could be a file referenced in the `files`
3297
+ # field in this message, or it could be a URI that must be written via the
3298
+ # ByteStream API.
3438
3299
  # Corresponds to the JSON property `stdoutDestination`
3439
3300
  # @return [String]
3440
3301
  attr_accessor :stdout_destination
@@ -3456,27 +3317,26 @@ module Google
3456
3317
  class GoogleDevtoolsRemoteworkersV1test2CommandTaskTimeouts
3457
3318
  include Google::Apis::Core::Hashable
3458
3319
 
3459
- # This specifies the maximum time that the task can run, excluding the
3460
- # time required to download inputs or upload outputs. That is, the worker
3461
- # will terminate the task if it runs longer than this.
3320
+ # This specifies the maximum time that the task can run, excluding the time
3321
+ # required to download inputs or upload outputs. That is, the worker will
3322
+ # terminate the task if it runs longer than this.
3462
3323
  # Corresponds to the JSON property `execution`
3463
3324
  # @return [String]
3464
3325
  attr_accessor :execution
3465
3326
 
3466
- # This specifies the maximum amount of time the task can be idle - that is,
3467
- # go without generating some output in either stdout or stderr. If the
3468
- # process is silent for more than the specified time, the worker will
3469
- # terminate the task.
3327
+ # This specifies the maximum amount of time the task can be idle - that is, go
3328
+ # without generating some output in either stdout or stderr. If the process is
3329
+ # silent for more than the specified time, the worker will terminate the task.
3470
3330
  # Corresponds to the JSON property `idle`
3471
3331
  # @return [String]
3472
3332
  attr_accessor :idle
3473
3333
 
3474
3334
  # If the execution or IO timeouts are exceeded, the worker will try to
3475
- # gracefully terminate the task and return any existing logs. However,
3476
- # tasks may be hard-frozen in which case this process will fail. This
3477
- # timeout specifies how long to wait for a terminated task to shut down
3478
- # gracefully (e.g. via SIGTERM) before we bring down the hammer (e.g.
3479
- # SIGKILL on *nix, CTRL_BREAK_EVENT on Windows).
3335
+ # gracefully terminate the task and return any existing logs. However, tasks may
3336
+ # be hard-frozen in which case this process will fail. This timeout specifies
3337
+ # how long to wait for a terminated task to shut down gracefully (e.g. via
3338
+ # SIGTERM) before we bring down the hammer (e.g. SIGKILL on *nix,
3339
+ # CTRL_BREAK_EVENT on Windows).
3480
3340
  # Corresponds to the JSON property `shutdown`
3481
3341
  # @return [String]
3482
3342
  attr_accessor :shutdown
@@ -3494,13 +3354,12 @@ module Google
3494
3354
  end
3495
3355
 
3496
3356
  # The CommandTask and CommandResult messages assume the existence of a service
3497
- # that can serve blobs of content, identified by a hash and size known as a
3498
- # "digest." The method by which these blobs may be retrieved is not specified
3499
- # here, but a model implementation is in the Remote Execution API's
3500
- # "ContentAddressibleStorage" interface.
3501
- # In the context of the RWAPI, a Digest will virtually always refer to the
3502
- # contents of a file or a directory. The latter is represented by the
3503
- # byte-encoded Directory message.
3357
+ # that can serve blobs of content, identified by a hash and size known as a "
3358
+ # digest." The method by which these blobs may be retrieved is not specified
3359
+ # here, but a model implementation is in the Remote Execution API's "
3360
+ # ContentAddressibleStorage" interface. In the context of the RWAPI, a Digest
3361
+ # will virtually always refer to the contents of a file or a directory. The
3362
+ # latter is represented by the byte-encoded Directory message.
3504
3363
  class GoogleDevtoolsRemoteworkersV1test2Digest
3505
3364
  include Google::Apis::Core::Hashable
3506
3365
 
@@ -3511,9 +3370,9 @@ module Google
3511
3370
  attr_accessor :hash_prop
3512
3371
 
3513
3372
  # The size of the contents. While this is not strictly required as part of an
3514
- # identifier (after all, any given hash will have exactly one canonical
3515
- # size), it's useful in almost all cases when one might want to send or
3516
- # retrieve blobs of content and is included here for this reason.
3373
+ # identifier (after all, any given hash will have exactly one canonical size),
3374
+ # it's useful in almost all cases when one might want to send or retrieve blobs
3375
+ # of content and is included here for this reason.
3517
3376
  # Corresponds to the JSON property `sizeBytes`
3518
3377
  # @return [Fixnum]
3519
3378
  attr_accessor :size_bytes
@@ -3561,13 +3420,12 @@ module Google
3561
3420
  include Google::Apis::Core::Hashable
3562
3421
 
3563
3422
  # The CommandTask and CommandResult messages assume the existence of a service
3564
- # that can serve blobs of content, identified by a hash and size known as a
3565
- # "digest." The method by which these blobs may be retrieved is not specified
3566
- # here, but a model implementation is in the Remote Execution API's
3567
- # "ContentAddressibleStorage" interface.
3568
- # In the context of the RWAPI, a Digest will virtually always refer to the
3569
- # contents of a file or a directory. The latter is represented by the
3570
- # byte-encoded Directory message.
3423
+ # that can serve blobs of content, identified by a hash and size known as a "
3424
+ # digest." The method by which these blobs may be retrieved is not specified
3425
+ # here, but a model implementation is in the Remote Execution API's "
3426
+ # ContentAddressibleStorage" interface. In the context of the RWAPI, a Digest
3427
+ # will virtually always refer to the contents of a file or a directory. The
3428
+ # latter is represented by the byte-encoded Directory message.
3571
3429
  # Corresponds to the JSON property `digest`
3572
3430
  # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemoteworkersV1test2Digest]
3573
3431
  attr_accessor :digest
@@ -3593,21 +3451,20 @@ module Google
3593
3451
  class GoogleDevtoolsRemoteworkersV1test2FileMetadata
3594
3452
  include Google::Apis::Core::Hashable
3595
3453
 
3596
- # If the file is small enough, its contents may also or alternatively be
3597
- # listed here.
3454
+ # If the file is small enough, its contents may also or alternatively be listed
3455
+ # here.
3598
3456
  # Corresponds to the JSON property `contents`
3599
3457
  # NOTE: Values are automatically base64 encoded/decoded in the client library.
3600
3458
  # @return [String]
3601
3459
  attr_accessor :contents
3602
3460
 
3603
3461
  # The CommandTask and CommandResult messages assume the existence of a service
3604
- # that can serve blobs of content, identified by a hash and size known as a
3605
- # "digest." The method by which these blobs may be retrieved is not specified
3606
- # here, but a model implementation is in the Remote Execution API's
3607
- # "ContentAddressibleStorage" interface.
3608
- # In the context of the RWAPI, a Digest will virtually always refer to the
3609
- # contents of a file or a directory. The latter is represented by the
3610
- # byte-encoded Directory message.
3462
+ # that can serve blobs of content, identified by a hash and size known as a "
3463
+ # digest." The method by which these blobs may be retrieved is not specified
3464
+ # here, but a model implementation is in the Remote Execution API's "
3465
+ # ContentAddressibleStorage" interface. In the context of the RWAPI, a Digest
3466
+ # will virtually always refer to the contents of a file or a directory. The
3467
+ # latter is represented by the byte-encoded Directory message.
3611
3468
  # Corresponds to the JSON property `digest`
3612
3469
  # @return [Google::Apis::RemotebuildexecutionV2::GoogleDevtoolsRemoteworkersV1test2Digest]
3613
3470
  attr_accessor :digest
@@ -3618,11 +3475,11 @@ module Google
3618
3475
  attr_accessor :is_executable
3619
3476
  alias_method :is_executable?, :is_executable
3620
3477
 
3621
- # The path of this file. If this message is part of the
3622
- # CommandOutputs.outputs fields, the path is relative to the execution root
3623
- # and must correspond to an entry in CommandTask.outputs.files. If this
3624
- # message is part of a Directory message, then the path is relative to the
3625
- # root of that directory. All paths MUST be delimited by forward slashes.
3478
+ # The path of this file. If this message is part of the CommandOutputs.outputs
3479
+ # fields, the path is relative to the execution root and must correspond to an
3480
+ # entry in CommandTask.outputs.files. If this message is part of a Directory
3481
+ # message, then the path is relative to the root of that directory. All paths
3482
+ # MUST be delimited by forward slashes.
3626
3483
  # Corresponds to the JSON property `path`
3627
3484
  # @return [String]
3628
3485
  attr_accessor :path
@@ -3645,47 +3502,45 @@ module Google
3645
3502
  class GoogleLongrunningOperation
3646
3503
  include Google::Apis::Core::Hashable
3647
3504
 
3648
- # If the value is `false`, it means the operation is still in progress.
3649
- # If `true`, the operation is completed, and either `error` or `response` is
3650
- # available.
3505
+ # If the value is `false`, it means the operation is still in progress. If `true`
3506
+ # , the operation is completed, and either `error` or `response` is available.
3651
3507
  # Corresponds to the JSON property `done`
3652
3508
  # @return [Boolean]
3653
3509
  attr_accessor :done
3654
3510
  alias_method :done?, :done
3655
3511
 
3656
- # The `Status` type defines a logical error model that is suitable for
3657
- # different programming environments, including REST APIs and RPC APIs. It is
3658
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
3659
- # three pieces of data: error code, error message, and error details.
3660
- # You can find out more about this error model and how to work with it in the
3661
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
3512
+ # The `Status` type defines a logical error model that is suitable for different
3513
+ # programming environments, including REST APIs and RPC APIs. It is used by [
3514
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
3515
+ # data: error code, error message, and error details. You can find out more
3516
+ # about this error model and how to work with it in the [API Design Guide](https:
3517
+ # //cloud.google.com/apis/design/errors).
3662
3518
  # Corresponds to the JSON property `error`
3663
3519
  # @return [Google::Apis::RemotebuildexecutionV2::GoogleRpcStatus]
3664
3520
  attr_accessor :error
3665
3521
 
3666
- # Service-specific metadata associated with the operation. It typically
3667
- # contains progress information and common metadata such as create time.
3668
- # Some services might not provide such metadata. Any method that returns a
3669
- # long-running operation should document the metadata type, if any.
3522
+ # Service-specific metadata associated with the operation. It typically contains
3523
+ # progress information and common metadata such as create time. Some services
3524
+ # might not provide such metadata. Any method that returns a long-running
3525
+ # operation should document the metadata type, if any.
3670
3526
  # Corresponds to the JSON property `metadata`
3671
3527
  # @return [Hash<String,Object>]
3672
3528
  attr_accessor :metadata
3673
3529
 
3674
3530
  # The server-assigned name, which is only unique within the same service that
3675
- # originally returns it. If you use the default HTTP mapping, the
3676
- # `name` should be a resource name ending with `operations/`unique_id``.
3531
+ # originally returns it. If you use the default HTTP mapping, the `name` should
3532
+ # be a resource name ending with `operations/`unique_id``.
3677
3533
  # Corresponds to the JSON property `name`
3678
3534
  # @return [String]
3679
3535
  attr_accessor :name
3680
3536
 
3681
- # The normal response of the operation in case of success. If the original
3682
- # method returns no data on success, such as `Delete`, the response is
3683
- # `google.protobuf.Empty`. If the original method is standard
3684
- # `Get`/`Create`/`Update`, the response should be the resource. For other
3685
- # methods, the response should have the type `XxxResponse`, where `Xxx`
3686
- # is the original method name. For example, if the original method name
3687
- # is `TakeSnapshot()`, the inferred response type is
3688
- # `TakeSnapshotResponse`.
3537
+ # The normal response of the operation in case of success. If the original
3538
+ # method returns no data on success, such as `Delete`, the response is `google.
3539
+ # protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`,
3540
+ # the response should be the resource. For other methods, the response should
3541
+ # have the type `XxxResponse`, where `Xxx` is the original method name. For
3542
+ # example, if the original method name is `TakeSnapshot()`, the inferred
3543
+ # response type is `TakeSnapshotResponse`.
3689
3544
  # Corresponds to the JSON property `response`
3690
3545
  # @return [Hash<String,Object>]
3691
3546
  attr_accessor :response
@@ -3704,12 +3559,12 @@ module Google
3704
3559
  end
3705
3560
  end
3706
3561
 
3707
- # The `Status` type defines a logical error model that is suitable for
3708
- # different programming environments, including REST APIs and RPC APIs. It is
3709
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
3710
- # three pieces of data: error code, error message, and error details.
3711
- # You can find out more about this error model and how to work with it in the
3712
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
3562
+ # The `Status` type defines a logical error model that is suitable for different
3563
+ # programming environments, including REST APIs and RPC APIs. It is used by [
3564
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
3565
+ # data: error code, error message, and error details. You can find out more
3566
+ # about this error model and how to work with it in the [API Design Guide](https:
3567
+ # //cloud.google.com/apis/design/errors).
3713
3568
  class GoogleRpcStatus
3714
3569
  include Google::Apis::Core::Hashable
3715
3570
 
@@ -3718,15 +3573,15 @@ module Google
3718
3573
  # @return [Fixnum]
3719
3574
  attr_accessor :code
3720
3575
 
3721
- # A list of messages that carry the error details. There is a common set of
3576
+ # A list of messages that carry the error details. There is a common set of
3722
3577
  # message types for APIs to use.
3723
3578
  # Corresponds to the JSON property `details`
3724
3579
  # @return [Array<Hash<String,Object>>]
3725
3580
  attr_accessor :details
3726
3581
 
3727
- # A developer-facing error message, which should be in English. Any
3728
- # user-facing error message should be localized and sent in the
3729
- # google.rpc.Status.details field, or localized by the client.
3582
+ # A developer-facing error message, which should be in English. Any user-facing
3583
+ # error message should be localized and sent in the google.rpc.Status.details
3584
+ # field, or localized by the client.
3730
3585
  # Corresponds to the JSON property `message`
3731
3586
  # @return [String]
3732
3587
  attr_accessor :message