google-api-client 0.43.0 → 0.44.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (696) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +218 -0
  3. data/docs/oauth-server.md +4 -6
  4. data/generated/google/apis/accessapproval_v1.rb +1 -1
  5. data/generated/google/apis/accessapproval_v1/classes.rb +51 -86
  6. data/generated/google/apis/accessapproval_v1/service.rb +93 -132
  7. data/generated/google/apis/accesscontextmanager_v1.rb +1 -1
  8. data/generated/google/apis/accesscontextmanager_v1/classes.rb +198 -236
  9. data/generated/google/apis/accesscontextmanager_v1/service.rb +128 -171
  10. data/generated/google/apis/accesscontextmanager_v1beta.rb +1 -1
  11. data/generated/google/apis/accesscontextmanager_v1beta/classes.rb +153 -184
  12. data/generated/google/apis/accesscontextmanager_v1beta/service.rb +82 -111
  13. data/generated/google/apis/adexchangebuyer2_v2beta1.rb +1 -1
  14. data/generated/google/apis/adexchangebuyer2_v2beta1/classes.rb +17 -6
  15. data/generated/google/apis/adexchangebuyer2_v2beta1/representations.rb +1 -0
  16. data/generated/google/apis/adexchangebuyer_v1_4.rb +1 -1
  17. data/generated/google/apis/adexchangebuyer_v1_4/classes.rb +47 -2
  18. data/generated/google/apis/adexchangebuyer_v1_4/representations.rb +18 -0
  19. data/generated/google/apis/adexperiencereport_v1.rb +1 -1
  20. data/generated/google/apis/admin_datatransfer_v1.rb +6 -4
  21. data/generated/google/apis/admin_datatransfer_v1/classes.rb +16 -4
  22. data/generated/google/apis/admin_datatransfer_v1/service.rb +30 -48
  23. data/generated/google/apis/admin_directory_v1.rb +6 -8
  24. data/generated/google/apis/admin_directory_v1/classes.rb +224 -243
  25. data/generated/google/apis/admin_directory_v1/representations.rb +14 -40
  26. data/generated/google/apis/admin_directory_v1/service.rb +475 -1026
  27. data/generated/google/apis/admin_reports_v1.rb +6 -5
  28. data/generated/google/apis/admin_reports_v1/classes.rb +31 -33
  29. data/generated/google/apis/admin_reports_v1/service.rb +131 -187
  30. data/generated/google/apis/admob_v1.rb +1 -1
  31. data/generated/google/apis/alertcenter_v1beta1.rb +1 -1
  32. data/generated/google/apis/androidenterprise_v1.rb +1 -1
  33. data/generated/google/apis/androidmanagement_v1.rb +1 -1
  34. data/generated/google/apis/androidmanagement_v1/classes.rb +95 -59
  35. data/generated/google/apis/androidmanagement_v1/representations.rb +17 -0
  36. data/generated/google/apis/androidpublisher_v3.rb +1 -1
  37. data/generated/google/apis/androidpublisher_v3/service.rb +2 -2
  38. data/generated/google/apis/apigee_v1.rb +6 -7
  39. data/generated/google/apis/apigee_v1/classes.rb +205 -75
  40. data/generated/google/apis/apigee_v1/representations.rb +51 -0
  41. data/generated/google/apis/apigee_v1/service.rb +133 -34
  42. data/generated/google/apis/appengine_v1.rb +1 -1
  43. data/generated/google/apis/appengine_v1/classes.rb +45 -35
  44. data/generated/google/apis/appengine_v1/representations.rb +2 -0
  45. data/generated/google/apis/appengine_v1/service.rb +38 -47
  46. data/generated/google/apis/appengine_v1alpha.rb +1 -1
  47. data/generated/google/apis/appengine_v1alpha/classes.rb +9 -11
  48. data/generated/google/apis/appengine_v1beta.rb +1 -1
  49. data/generated/google/apis/appengine_v1beta/classes.rb +45 -35
  50. data/generated/google/apis/appengine_v1beta/representations.rb +2 -0
  51. data/generated/google/apis/appengine_v1beta/service.rb +37 -47
  52. data/generated/google/apis/appsmarket_v2.rb +1 -1
  53. data/generated/google/apis/appsmarket_v2/classes.rb +14 -16
  54. data/generated/google/apis/artifactregistry_v1beta1.rb +1 -1
  55. data/generated/google/apis/artifactregistry_v1beta1/classes.rb +235 -337
  56. data/generated/google/apis/artifactregistry_v1beta1/service.rb +44 -57
  57. data/generated/google/apis/bigquery_v2.rb +1 -1
  58. data/generated/google/apis/bigquery_v2/classes.rb +355 -553
  59. data/generated/google/apis/bigquery_v2/representations.rb +1 -0
  60. data/generated/google/apis/bigquery_v2/service.rb +32 -40
  61. data/generated/google/apis/bigqueryconnection_v1beta1.rb +1 -1
  62. data/generated/google/apis/bigqueryconnection_v1beta1/classes.rb +192 -337
  63. data/generated/google/apis/bigqueryconnection_v1beta1/service.rb +29 -32
  64. data/generated/google/apis/bigquerydatatransfer_v1.rb +1 -1
  65. data/generated/google/apis/bigquerydatatransfer_v1/classes.rb +132 -158
  66. data/generated/google/apis/bigquerydatatransfer_v1/service.rb +232 -282
  67. data/generated/google/apis/bigqueryreservation_v1.rb +1 -1
  68. data/generated/google/apis/bigqueryreservation_v1/classes.rb +116 -123
  69. data/generated/google/apis/bigqueryreservation_v1/representations.rb +2 -0
  70. data/generated/google/apis/bigqueryreservation_v1/service.rb +137 -183
  71. data/generated/google/apis/bigqueryreservation_v1alpha2.rb +1 -1
  72. data/generated/google/apis/bigqueryreservation_v1alpha2/classes.rb +88 -100
  73. data/generated/google/apis/bigqueryreservation_v1alpha2/service.rb +77 -100
  74. data/generated/google/apis/bigqueryreservation_v1beta1.rb +1 -1
  75. data/generated/google/apis/bigqueryreservation_v1beta1/classes.rb +93 -98
  76. data/generated/google/apis/bigqueryreservation_v1beta1/representations.rb +2 -0
  77. data/generated/google/apis/bigqueryreservation_v1beta1/service.rb +114 -151
  78. data/generated/google/apis/bigtableadmin_v1.rb +1 -1
  79. data/generated/google/apis/bigtableadmin_v1/classes.rb +50 -0
  80. data/generated/google/apis/bigtableadmin_v1/representations.rb +29 -0
  81. data/generated/google/apis/bigtableadmin_v2.rb +1 -1
  82. data/generated/google/apis/bigtableadmin_v2/classes.rb +50 -0
  83. data/generated/google/apis/bigtableadmin_v2/representations.rb +29 -0
  84. data/generated/google/apis/billingbudgets_v1beta1.rb +4 -1
  85. data/generated/google/apis/binaryauthorization_v1.rb +1 -1
  86. data/generated/google/apis/binaryauthorization_v1/classes.rb +239 -354
  87. data/generated/google/apis/binaryauthorization_v1/service.rb +74 -89
  88. data/generated/google/apis/binaryauthorization_v1beta1.rb +1 -1
  89. data/generated/google/apis/binaryauthorization_v1beta1/classes.rb +239 -354
  90. data/generated/google/apis/binaryauthorization_v1beta1/service.rb +74 -89
  91. data/generated/google/apis/calendar_v3.rb +1 -1
  92. data/generated/google/apis/chat_v1.rb +1 -1
  93. data/generated/google/apis/chat_v1/classes.rb +90 -115
  94. data/generated/google/apis/chat_v1/service.rb +30 -42
  95. data/generated/google/apis/civicinfo_v2.rb +1 -1
  96. data/generated/google/apis/cloudasset_v1.rb +1 -1
  97. data/generated/google/apis/cloudasset_v1/classes.rb +712 -1039
  98. data/generated/google/apis/cloudasset_v1/service.rb +125 -167
  99. data/generated/google/apis/cloudasset_v1beta1.rb +1 -1
  100. data/generated/google/apis/cloudasset_v1beta1/classes.rb +531 -777
  101. data/generated/google/apis/cloudasset_v1beta1/service.rb +59 -75
  102. data/generated/google/apis/cloudasset_v1p1beta1.rb +1 -1
  103. data/generated/google/apis/cloudasset_v1p1beta1/classes.rb +204 -349
  104. data/generated/google/apis/cloudasset_v1p1beta1/service.rb +35 -47
  105. data/generated/google/apis/cloudasset_v1p4beta1.rb +1 -1
  106. data/generated/google/apis/cloudasset_v1p4beta1/classes.rb +220 -276
  107. data/generated/google/apis/cloudasset_v1p4beta1/service.rb +75 -93
  108. data/generated/google/apis/cloudasset_v1p5beta1.rb +1 -1
  109. data/generated/google/apis/cloudasset_v1p5beta1/classes.rb +481 -720
  110. data/generated/google/apis/cloudasset_v1p5beta1/service.rb +25 -28
  111. data/generated/google/apis/cloudbilling_v1.rb +7 -1
  112. data/generated/google/apis/cloudbilling_v1/classes.rb +284 -445
  113. data/generated/google/apis/cloudbilling_v1/service.rb +104 -124
  114. data/generated/google/apis/cloudbuild_v1.rb +1 -1
  115. data/generated/google/apis/cloudbuild_v1/classes.rb +291 -343
  116. data/generated/google/apis/cloudbuild_v1/representations.rb +1 -0
  117. data/generated/google/apis/cloudbuild_v1/service.rb +48 -63
  118. data/generated/google/apis/cloudbuild_v1alpha1.rb +1 -1
  119. data/generated/google/apis/cloudbuild_v1alpha1/classes.rb +283 -329
  120. data/generated/google/apis/cloudbuild_v1alpha1/representations.rb +1 -0
  121. data/generated/google/apis/cloudbuild_v1alpha1/service.rb +15 -18
  122. data/generated/google/apis/cloudbuild_v1alpha2.rb +1 -1
  123. data/generated/google/apis/cloudbuild_v1alpha2/classes.rb +269 -313
  124. data/generated/google/apis/cloudbuild_v1alpha2/representations.rb +1 -0
  125. data/generated/google/apis/cloudbuild_v1alpha2/service.rb +22 -28
  126. data/generated/google/apis/clouddebugger_v2.rb +1 -1
  127. data/generated/google/apis/clouddebugger_v2/classes.rb +185 -252
  128. data/generated/google/apis/clouddebugger_v2/service.rb +53 -59
  129. data/generated/google/apis/clouderrorreporting_v1beta1.rb +1 -1
  130. data/generated/google/apis/clouderrorreporting_v1beta1/classes.rb +127 -156
  131. data/generated/google/apis/clouderrorreporting_v1beta1/service.rb +53 -69
  132. data/generated/google/apis/cloudfunctions_v1.rb +1 -1
  133. data/generated/google/apis/cloudfunctions_v1/classes.rb +323 -493
  134. data/generated/google/apis/cloudfunctions_v1/service.rb +79 -93
  135. data/generated/google/apis/cloudidentity_v1.rb +1 -1
  136. data/generated/google/apis/cloudidentity_v1/classes.rb +625 -75
  137. data/generated/google/apis/cloudidentity_v1/representations.rb +203 -0
  138. data/generated/google/apis/cloudidentity_v1/service.rb +43 -61
  139. data/generated/google/apis/cloudidentity_v1beta1.rb +4 -1
  140. data/generated/google/apis/cloudidentity_v1beta1/classes.rb +1045 -317
  141. data/generated/google/apis/cloudidentity_v1beta1/representations.rb +331 -22
  142. data/generated/google/apis/cloudidentity_v1beta1/service.rb +742 -96
  143. data/generated/google/apis/cloudiot_v1.rb +1 -1
  144. data/generated/google/apis/cloudiot_v1/classes.rb +263 -373
  145. data/generated/google/apis/cloudiot_v1/service.rb +147 -154
  146. data/generated/google/apis/cloudkms_v1.rb +1 -1
  147. data/generated/google/apis/cloudkms_v1/classes.rb +502 -692
  148. data/generated/google/apis/cloudkms_v1/representations.rb +17 -0
  149. data/generated/google/apis/cloudkms_v1/service.rb +170 -216
  150. data/generated/google/apis/cloudprofiler_v2.rb +1 -1
  151. data/generated/google/apis/cloudprofiler_v2/classes.rb +28 -33
  152. data/generated/google/apis/cloudprofiler_v2/service.rb +17 -19
  153. data/generated/google/apis/cloudresourcemanager_v1.rb +1 -1
  154. data/generated/google/apis/cloudresourcemanager_v1/service.rb +1 -1
  155. data/generated/google/apis/cloudresourcemanager_v1beta1.rb +1 -1
  156. data/generated/google/apis/cloudresourcemanager_v1beta1/service.rb +1 -1
  157. data/generated/google/apis/cloudresourcemanager_v2.rb +1 -1
  158. data/generated/google/apis/cloudresourcemanager_v2beta1.rb +1 -1
  159. data/generated/google/apis/cloudscheduler_v1.rb +1 -1
  160. data/generated/google/apis/cloudscheduler_v1/classes.rb +272 -383
  161. data/generated/google/apis/cloudscheduler_v1/service.rb +45 -62
  162. data/generated/google/apis/cloudscheduler_v1beta1.rb +1 -1
  163. data/generated/google/apis/cloudscheduler_v1beta1/classes.rb +273 -384
  164. data/generated/google/apis/cloudscheduler_v1beta1/service.rb +45 -62
  165. data/generated/google/apis/cloudsearch_v1.rb +2 -2
  166. data/generated/google/apis/cloudsearch_v1/classes.rb +650 -781
  167. data/generated/google/apis/cloudsearch_v1/representations.rb +15 -0
  168. data/generated/google/apis/cloudsearch_v1/service.rb +286 -326
  169. data/generated/google/apis/cloudshell_v1.rb +1 -1
  170. data/generated/google/apis/cloudshell_v1/classes.rb +36 -227
  171. data/generated/google/apis/cloudshell_v1/representations.rb +0 -67
  172. data/generated/google/apis/cloudshell_v1/service.rb +21 -25
  173. data/generated/google/apis/cloudshell_v1alpha1.rb +1 -1
  174. data/generated/google/apis/cloudshell_v1alpha1/classes.rb +69 -78
  175. data/generated/google/apis/cloudshell_v1alpha1/service.rb +20 -24
  176. data/generated/google/apis/cloudtasks_v2.rb +1 -1
  177. data/generated/google/apis/cloudtasks_v2/classes.rb +605 -933
  178. data/generated/google/apis/cloudtasks_v2/service.rb +146 -217
  179. data/generated/google/apis/cloudtasks_v2beta2.rb +1 -1
  180. data/generated/google/apis/cloudtasks_v2beta2/classes.rb +602 -964
  181. data/generated/google/apis/cloudtasks_v2beta2/service.rb +178 -270
  182. data/generated/google/apis/cloudtasks_v2beta3.rb +1 -1
  183. data/generated/google/apis/cloudtasks_v2beta3/classes.rb +609 -938
  184. data/generated/google/apis/cloudtasks_v2beta3/service.rb +146 -217
  185. data/generated/google/apis/cloudtrace_v1.rb +1 -1
  186. data/generated/google/apis/cloudtrace_v1/classes.rb +39 -61
  187. data/generated/google/apis/cloudtrace_v1/service.rb +37 -51
  188. data/generated/google/apis/cloudtrace_v2.rb +1 -1
  189. data/generated/google/apis/cloudtrace_v2/classes.rb +92 -107
  190. data/generated/google/apis/cloudtrace_v2/service.rb +8 -11
  191. data/generated/google/apis/cloudtrace_v2beta1.rb +1 -1
  192. data/generated/google/apis/cloudtrace_v2beta1/classes.rb +23 -33
  193. data/generated/google/apis/cloudtrace_v2beta1/service.rb +30 -37
  194. data/generated/google/apis/composer_v1.rb +1 -1
  195. data/generated/google/apis/composer_v1/classes.rb +190 -242
  196. data/generated/google/apis/composer_v1/service.rb +79 -150
  197. data/generated/google/apis/composer_v1beta1.rb +1 -1
  198. data/generated/google/apis/composer_v1beta1/classes.rb +203 -262
  199. data/generated/google/apis/composer_v1beta1/service.rb +92 -179
  200. data/generated/google/apis/compute_alpha.rb +1 -1
  201. data/generated/google/apis/compute_alpha/classes.rb +681 -127
  202. data/generated/google/apis/compute_alpha/representations.rb +110 -6
  203. data/generated/google/apis/compute_alpha/service.rb +695 -692
  204. data/generated/google/apis/compute_beta.rb +1 -1
  205. data/generated/google/apis/compute_beta/classes.rb +570 -70
  206. data/generated/google/apis/compute_beta/representations.rb +112 -1
  207. data/generated/google/apis/compute_beta/service.rb +608 -605
  208. data/generated/google/apis/compute_v1.rb +1 -1
  209. data/generated/google/apis/compute_v1/classes.rb +977 -85
  210. data/generated/google/apis/compute_v1/representations.rb +372 -0
  211. data/generated/google/apis/compute_v1/service.rb +747 -15
  212. data/generated/google/apis/container_v1.rb +1 -1
  213. data/generated/google/apis/container_v1/classes.rb +915 -965
  214. data/generated/google/apis/container_v1/representations.rb +53 -0
  215. data/generated/google/apis/container_v1/service.rb +435 -502
  216. data/generated/google/apis/container_v1beta1.rb +1 -1
  217. data/generated/google/apis/container_v1beta1/classes.rb +1021 -1043
  218. data/generated/google/apis/container_v1beta1/representations.rb +70 -0
  219. data/generated/google/apis/container_v1beta1/service.rb +403 -466
  220. data/generated/google/apis/containeranalysis_v1alpha1.rb +1 -1
  221. data/generated/google/apis/containeranalysis_v1alpha1/classes.rb +456 -596
  222. data/generated/google/apis/containeranalysis_v1alpha1/service.rb +149 -169
  223. data/generated/google/apis/containeranalysis_v1beta1.rb +1 -1
  224. data/generated/google/apis/containeranalysis_v1beta1/classes.rb +454 -613
  225. data/generated/google/apis/containeranalysis_v1beta1/service.rb +75 -90
  226. data/generated/google/apis/content_v2.rb +1 -1
  227. data/generated/google/apis/content_v2/classes.rb +3 -1
  228. data/generated/google/apis/content_v2_1.rb +1 -1
  229. data/generated/google/apis/content_v2_1/classes.rb +93 -2
  230. data/generated/google/apis/content_v2_1/representations.rb +34 -0
  231. data/generated/google/apis/content_v2_1/service.rb +53 -2
  232. data/generated/google/apis/datacatalog_v1beta1.rb +1 -1
  233. data/generated/google/apis/datacatalog_v1beta1/classes.rb +382 -573
  234. data/generated/google/apis/datacatalog_v1beta1/service.rb +319 -440
  235. data/generated/google/apis/dataflow_v1b3.rb +1 -1
  236. data/generated/google/apis/dataflow_v1b3/classes.rb +1015 -973
  237. data/generated/google/apis/dataflow_v1b3/representations.rb +115 -0
  238. data/generated/google/apis/dataflow_v1b3/service.rb +299 -257
  239. data/generated/google/apis/datafusion_v1.rb +5 -8
  240. data/generated/google/apis/datafusion_v1/classes.rb +268 -397
  241. data/generated/google/apis/datafusion_v1/representations.rb +3 -0
  242. data/generated/google/apis/datafusion_v1/service.rb +76 -89
  243. data/generated/google/apis/datafusion_v1beta1.rb +5 -8
  244. data/generated/google/apis/datafusion_v1beta1/classes.rb +268 -397
  245. data/generated/google/apis/datafusion_v1beta1/representations.rb +3 -0
  246. data/generated/google/apis/datafusion_v1beta1/service.rb +81 -95
  247. data/generated/google/apis/dataproc_v1.rb +1 -1
  248. data/generated/google/apis/dataproc_v1/classes.rb +37 -4
  249. data/generated/google/apis/dataproc_v1/representations.rb +16 -0
  250. data/generated/google/apis/dataproc_v1beta2.rb +1 -1
  251. data/generated/google/apis/dataproc_v1beta2/classes.rb +56 -0
  252. data/generated/google/apis/dataproc_v1beta2/representations.rb +31 -0
  253. data/generated/google/apis/datastore_v1.rb +1 -1
  254. data/generated/google/apis/datastore_v1/classes.rb +330 -472
  255. data/generated/google/apis/datastore_v1/service.rb +52 -63
  256. data/generated/google/apis/datastore_v1beta1.rb +1 -1
  257. data/generated/google/apis/datastore_v1beta1/classes.rb +150 -217
  258. data/generated/google/apis/datastore_v1beta1/service.rb +11 -12
  259. data/generated/google/apis/datastore_v1beta3.rb +1 -1
  260. data/generated/google/apis/datastore_v1beta3/classes.rb +255 -371
  261. data/generated/google/apis/datastore_v1beta3/service.rb +1 -2
  262. data/generated/google/apis/dfareporting_v3_3.rb +2 -2
  263. data/generated/google/apis/dfareporting_v3_3/classes.rb +326 -339
  264. data/generated/google/apis/dfareporting_v3_3/representations.rb +42 -0
  265. data/generated/google/apis/dfareporting_v3_3/service.rb +673 -1286
  266. data/generated/google/apis/dfareporting_v3_4.rb +2 -2
  267. data/generated/google/apis/dfareporting_v3_4/classes.rb +348 -350
  268. data/generated/google/apis/dfareporting_v3_4/representations.rb +43 -0
  269. data/generated/google/apis/dfareporting_v3_4/service.rb +708 -1285
  270. data/generated/google/apis/dialogflow_v2.rb +1 -1
  271. data/generated/google/apis/dialogflow_v2/classes.rb +84 -44
  272. data/generated/google/apis/dialogflow_v2/representations.rb +52 -15
  273. data/generated/google/apis/dialogflow_v2beta1.rb +1 -1
  274. data/generated/google/apis/dialogflow_v2beta1/classes.rb +84 -44
  275. data/generated/google/apis/dialogflow_v2beta1/representations.rb +52 -15
  276. data/generated/google/apis/dialogflow_v2beta1/service.rb +37 -0
  277. data/generated/google/apis/{securitycenter_v1p1alpha1.rb → dialogflow_v3beta1.rb} +13 -10
  278. data/generated/google/apis/dialogflow_v3beta1/classes.rb +8183 -0
  279. data/generated/google/apis/dialogflow_v3beta1/representations.rb +3459 -0
  280. data/generated/google/apis/dialogflow_v3beta1/service.rb +2812 -0
  281. data/generated/google/apis/displayvideo_v1.rb +1 -1
  282. data/generated/google/apis/displayvideo_v1/classes.rb +55 -8
  283. data/generated/google/apis/displayvideo_v1/representations.rb +5 -0
  284. data/generated/google/apis/displayvideo_v1/service.rb +48 -36
  285. data/generated/google/apis/dlp_v2.rb +1 -1
  286. data/generated/google/apis/dlp_v2/classes.rb +1076 -1302
  287. data/generated/google/apis/dlp_v2/service.rb +962 -905
  288. data/generated/google/apis/dns_v1.rb +1 -1
  289. data/generated/google/apis/dns_v1/classes.rb +175 -198
  290. data/generated/google/apis/dns_v1/service.rb +82 -97
  291. data/generated/google/apis/dns_v1beta2.rb +1 -1
  292. data/generated/google/apis/dns_v1beta2/classes.rb +180 -205
  293. data/generated/google/apis/dns_v1beta2/service.rb +82 -97
  294. data/generated/google/apis/docs_v1.rb +1 -1
  295. data/generated/google/apis/docs_v1/classes.rb +894 -1229
  296. data/generated/google/apis/docs_v1/service.rb +17 -22
  297. data/generated/google/apis/documentai_v1beta2.rb +1 -1
  298. data/generated/google/apis/documentai_v1beta2/classes.rb +1186 -810
  299. data/generated/google/apis/documentai_v1beta2/representations.rb +303 -0
  300. data/generated/google/apis/documentai_v1beta2/service.rb +22 -24
  301. data/generated/google/apis/doubleclickbidmanager_v1.rb +3 -2
  302. data/generated/google/apis/doubleclickbidmanager_v1/classes.rb +6 -12
  303. data/generated/google/apis/doubleclickbidmanager_v1/service.rb +33 -64
  304. data/generated/google/apis/doubleclickbidmanager_v1_1.rb +3 -2
  305. data/generated/google/apis/doubleclickbidmanager_v1_1/classes.rb +11 -18
  306. data/generated/google/apis/doubleclickbidmanager_v1_1/service.rb +33 -64
  307. data/generated/google/apis/doubleclicksearch_v2.rb +1 -1
  308. data/generated/google/apis/doubleclicksearch_v2/service.rb +2 -2
  309. data/generated/google/apis/drive_v2.rb +1 -1
  310. data/generated/google/apis/drive_v2/classes.rb +14 -6
  311. data/generated/google/apis/drive_v2/representations.rb +1 -0
  312. data/generated/google/apis/drive_v2/service.rb +79 -15
  313. data/generated/google/apis/drive_v3.rb +1 -1
  314. data/generated/google/apis/drive_v3/classes.rb +14 -6
  315. data/generated/google/apis/drive_v3/representations.rb +1 -0
  316. data/generated/google/apis/drive_v3/service.rb +59 -11
  317. data/generated/google/apis/file_v1.rb +1 -1
  318. data/generated/google/apis/file_v1/classes.rb +154 -173
  319. data/generated/google/apis/file_v1/service.rb +43 -52
  320. data/generated/google/apis/file_v1beta1.rb +1 -1
  321. data/generated/google/apis/file_v1beta1/classes.rb +334 -193
  322. data/generated/google/apis/file_v1beta1/representations.rb +55 -0
  323. data/generated/google/apis/file_v1beta1/service.rb +267 -55
  324. data/generated/google/apis/firebase_v1beta1.rb +1 -1
  325. data/generated/google/apis/firebase_v1beta1/classes.rb +25 -47
  326. data/generated/google/apis/firebase_v1beta1/representations.rb +2 -16
  327. data/generated/google/apis/firebase_v1beta1/service.rb +8 -1
  328. data/generated/google/apis/firebasehosting_v1beta1.rb +1 -1
  329. data/generated/google/apis/firebasehosting_v1beta1/classes.rb +26 -0
  330. data/generated/google/apis/firebasehosting_v1beta1/representations.rb +15 -0
  331. data/generated/google/apis/firebaseml_v1beta2.rb +1 -1
  332. data/generated/google/apis/firebaseml_v1beta2/classes.rb +0 -8
  333. data/generated/google/apis/firebaseml_v1beta2/representations.rb +0 -1
  334. data/generated/google/apis/firebaserules_v1.rb +1 -1
  335. data/generated/google/apis/firebaserules_v1/classes.rb +102 -137
  336. data/generated/google/apis/firebaserules_v1/service.rb +87 -110
  337. data/generated/google/apis/firestore_v1.rb +1 -1
  338. data/generated/google/apis/firestore_v1/classes.rb +402 -498
  339. data/generated/google/apis/firestore_v1/service.rb +165 -201
  340. data/generated/google/apis/firestore_v1beta1.rb +1 -1
  341. data/generated/google/apis/firestore_v1beta1/classes.rb +334 -409
  342. data/generated/google/apis/firestore_v1beta1/service.rb +106 -122
  343. data/generated/google/apis/firestore_v1beta2.rb +1 -1
  344. data/generated/google/apis/firestore_v1beta2/classes.rb +135 -165
  345. data/generated/google/apis/firestore_v1beta2/service.rb +65 -86
  346. data/generated/google/apis/fitness_v1.rb +85 -0
  347. data/generated/google/apis/fitness_v1/classes.rb +982 -0
  348. data/generated/google/apis/fitness_v1/representations.rb +398 -0
  349. data/generated/google/apis/fitness_v1/service.rb +626 -0
  350. data/generated/google/apis/games_configuration_v1configuration.rb +1 -1
  351. data/generated/google/apis/games_configuration_v1configuration/classes.rb +2 -3
  352. data/generated/google/apis/games_configuration_v1configuration/service.rb +6 -6
  353. data/generated/google/apis/games_management_v1management.rb +2 -3
  354. data/generated/google/apis/games_management_v1management/classes.rb +14 -20
  355. data/generated/google/apis/games_management_v1management/service.rb +35 -36
  356. data/generated/google/apis/games_v1.rb +2 -3
  357. data/generated/google/apis/games_v1/classes.rb +76 -83
  358. data/generated/google/apis/games_v1/representations.rb +2 -0
  359. data/generated/google/apis/games_v1/service.rb +84 -90
  360. data/generated/google/apis/genomics_v1.rb +1 -1
  361. data/generated/google/apis/genomics_v1/classes.rb +70 -76
  362. data/generated/google/apis/genomics_v1/service.rb +28 -43
  363. data/generated/google/apis/genomics_v1alpha2.rb +1 -1
  364. data/generated/google/apis/genomics_v1alpha2/classes.rb +223 -290
  365. data/generated/google/apis/genomics_v1alpha2/service.rb +54 -76
  366. data/generated/google/apis/genomics_v2alpha1.rb +1 -1
  367. data/generated/google/apis/genomics_v2alpha1/classes.rb +252 -275
  368. data/generated/google/apis/genomics_v2alpha1/representations.rb +1 -0
  369. data/generated/google/apis/genomics_v2alpha1/service.rb +47 -66
  370. data/generated/google/apis/gmail_v1.rb +1 -1
  371. data/generated/google/apis/gmail_v1/classes.rb +37 -43
  372. data/generated/google/apis/gmail_v1/service.rb +4 -3
  373. data/generated/google/apis/gmailpostmastertools_v1beta1.rb +2 -2
  374. data/generated/google/apis/gmailpostmastertools_v1beta1/service.rb +1 -1
  375. data/generated/google/apis/groupsmigration_v1.rb +35 -0
  376. data/generated/google/apis/groupsmigration_v1/classes.rb +51 -0
  377. data/generated/google/apis/groupsmigration_v1/representations.rb +40 -0
  378. data/generated/google/apis/groupsmigration_v1/service.rb +100 -0
  379. data/generated/google/apis/healthcare_v1.rb +1 -1
  380. data/generated/google/apis/healthcare_v1/classes.rb +563 -826
  381. data/generated/google/apis/healthcare_v1/service.rb +675 -853
  382. data/generated/google/apis/healthcare_v1beta1.rb +1 -1
  383. data/generated/google/apis/healthcare_v1beta1/classes.rb +828 -1102
  384. data/generated/google/apis/healthcare_v1beta1/representations.rb +20 -0
  385. data/generated/google/apis/healthcare_v1beta1/service.rb +895 -1139
  386. data/generated/google/apis/homegraph_v1.rb +1 -1
  387. data/generated/google/apis/homegraph_v1/classes.rb +76 -164
  388. data/generated/google/apis/homegraph_v1/service.rb +23 -35
  389. data/generated/google/apis/iam_v1.rb +5 -2
  390. data/generated/google/apis/iam_v1/classes.rb +388 -592
  391. data/generated/google/apis/iam_v1/service.rb +429 -555
  392. data/generated/google/apis/iamcredentials_v1.rb +4 -2
  393. data/generated/google/apis/iamcredentials_v1/classes.rb +75 -85
  394. data/generated/google/apis/iamcredentials_v1/service.rb +15 -13
  395. data/generated/google/apis/iap_v1.rb +1 -1
  396. data/generated/google/apis/iap_v1/classes.rb +246 -355
  397. data/generated/google/apis/iap_v1/service.rb +61 -71
  398. data/generated/google/apis/iap_v1beta1.rb +1 -1
  399. data/generated/google/apis/iap_v1beta1/classes.rb +157 -254
  400. data/generated/google/apis/iap_v1beta1/service.rb +17 -19
  401. data/generated/google/apis/indexing_v3.rb +1 -1
  402. data/generated/google/apis/indexing_v3/classes.rb +11 -11
  403. data/generated/google/apis/kgsearch_v1.rb +1 -1
  404. data/generated/google/apis/kgsearch_v1/classes.rb +4 -4
  405. data/generated/google/apis/kgsearch_v1/service.rb +11 -11
  406. data/generated/google/apis/lifesciences_v2beta.rb +1 -1
  407. data/generated/google/apis/lifesciences_v2beta/classes.rb +262 -290
  408. data/generated/google/apis/lifesciences_v2beta/service.rb +30 -42
  409. data/generated/google/apis/localservices_v1.rb +31 -0
  410. data/generated/google/apis/localservices_v1/classes.rb +419 -0
  411. data/generated/google/apis/localservices_v1/representations.rb +172 -0
  412. data/generated/google/apis/localservices_v1/service.rb +199 -0
  413. data/generated/google/apis/logging_v2.rb +1 -1
  414. data/generated/google/apis/logging_v2/classes.rb +174 -214
  415. data/generated/google/apis/logging_v2/representations.rb +15 -0
  416. data/generated/google/apis/logging_v2/service.rb +1017 -584
  417. data/generated/google/apis/manufacturers_v1.rb +1 -1
  418. data/generated/google/apis/manufacturers_v1/classes.rb +99 -109
  419. data/generated/google/apis/manufacturers_v1/service.rb +44 -55
  420. data/generated/google/apis/memcache_v1beta2.rb +1 -1
  421. data/generated/google/apis/memcache_v1beta2/classes.rb +170 -249
  422. data/generated/google/apis/memcache_v1beta2/representations.rb +0 -19
  423. data/generated/google/apis/memcache_v1beta2/service.rb +58 -71
  424. data/generated/google/apis/ml_v1.rb +1 -1
  425. data/generated/google/apis/ml_v1/classes.rb +949 -1144
  426. data/generated/google/apis/ml_v1/representations.rb +64 -0
  427. data/generated/google/apis/ml_v1/service.rb +194 -253
  428. data/generated/google/apis/monitoring_v1.rb +1 -1
  429. data/generated/google/apis/monitoring_v1/classes.rb +103 -26
  430. data/generated/google/apis/monitoring_v1/representations.rb +35 -0
  431. data/generated/google/apis/monitoring_v1/service.rb +10 -11
  432. data/generated/google/apis/monitoring_v3.rb +1 -1
  433. data/generated/google/apis/monitoring_v3/classes.rb +220 -322
  434. data/generated/google/apis/monitoring_v3/service.rb +121 -140
  435. data/generated/google/apis/networkmanagement_v1.rb +1 -1
  436. data/generated/google/apis/networkmanagement_v1/classes.rb +273 -429
  437. data/generated/google/apis/networkmanagement_v1/service.rb +97 -120
  438. data/generated/google/apis/networkmanagement_v1beta1.rb +1 -1
  439. data/generated/google/apis/networkmanagement_v1beta1/classes.rb +388 -429
  440. data/generated/google/apis/networkmanagement_v1beta1/representations.rb +40 -0
  441. data/generated/google/apis/networkmanagement_v1beta1/service.rb +97 -120
  442. data/generated/google/apis/osconfig_v1.rb +1 -1
  443. data/generated/google/apis/osconfig_v1/classes.rb +226 -270
  444. data/generated/google/apis/osconfig_v1/service.rb +22 -27
  445. data/generated/google/apis/osconfig_v1beta.rb +1 -1
  446. data/generated/google/apis/osconfig_v1beta/classes.rb +1031 -411
  447. data/generated/google/apis/osconfig_v1beta/representations.rb +337 -0
  448. data/generated/google/apis/osconfig_v1beta/service.rb +39 -52
  449. data/generated/google/apis/oslogin_v1.rb +1 -1
  450. data/generated/google/apis/oslogin_v1/classes.rb +14 -12
  451. data/generated/google/apis/oslogin_v1/representations.rb +1 -0
  452. data/generated/google/apis/oslogin_v1/service.rb +12 -16
  453. data/generated/google/apis/oslogin_v1alpha.rb +1 -1
  454. data/generated/google/apis/oslogin_v1alpha/classes.rb +14 -12
  455. data/generated/google/apis/oslogin_v1alpha/representations.rb +1 -0
  456. data/generated/google/apis/oslogin_v1alpha/service.rb +14 -14
  457. data/generated/google/apis/oslogin_v1beta.rb +1 -1
  458. data/generated/google/apis/oslogin_v1beta/classes.rb +14 -12
  459. data/generated/google/apis/oslogin_v1beta/representations.rb +1 -0
  460. data/generated/google/apis/oslogin_v1beta/service.rb +12 -16
  461. data/generated/google/apis/pagespeedonline_v5.rb +2 -2
  462. data/generated/google/apis/pagespeedonline_v5/classes.rb +18 -24
  463. data/generated/google/apis/pagespeedonline_v5/service.rb +3 -4
  464. data/generated/google/apis/people_v1.rb +1 -1
  465. data/generated/google/apis/people_v1/classes.rb +121 -12
  466. data/generated/google/apis/people_v1/representations.rb +41 -0
  467. data/generated/google/apis/people_v1/service.rb +39 -39
  468. data/generated/google/apis/playablelocations_v3.rb +1 -1
  469. data/generated/google/apis/playablelocations_v3/classes.rb +108 -155
  470. data/generated/google/apis/playablelocations_v3/service.rb +10 -10
  471. data/generated/google/apis/prod_tt_sasportal_v1alpha1.rb +1 -1
  472. data/generated/google/apis/prod_tt_sasportal_v1alpha1/classes.rb +6 -0
  473. data/generated/google/apis/prod_tt_sasportal_v1alpha1/representations.rb +1 -0
  474. data/generated/google/apis/pubsub_v1.rb +1 -1
  475. data/generated/google/apis/pubsub_v1/classes.rb +392 -518
  476. data/generated/google/apis/pubsub_v1/representations.rb +1 -0
  477. data/generated/google/apis/pubsub_v1/service.rb +220 -246
  478. data/generated/google/apis/pubsub_v1beta1a.rb +1 -1
  479. data/generated/google/apis/pubsub_v1beta1a/classes.rb +71 -86
  480. data/generated/google/apis/pubsub_v1beta1a/service.rb +31 -38
  481. data/generated/google/apis/pubsub_v1beta2.rb +1 -1
  482. data/generated/google/apis/pubsub_v1beta2/classes.rb +244 -354
  483. data/generated/google/apis/pubsub_v1beta2/service.rb +96 -108
  484. data/generated/google/apis/{memcache_v1.rb → pubsublite_v1.rb} +8 -9
  485. data/generated/google/apis/pubsublite_v1/classes.rb +389 -0
  486. data/generated/google/apis/{accessapproval_v1beta1 → pubsublite_v1}/representations.rb +78 -53
  487. data/generated/google/apis/{memcache_v1 → pubsublite_v1}/service.rb +195 -228
  488. data/generated/google/apis/realtimebidding_v1.rb +1 -1
  489. data/generated/google/apis/recommendationengine_v1beta1.rb +1 -1
  490. data/generated/google/apis/recommendationengine_v1beta1/classes.rb +335 -456
  491. data/generated/google/apis/recommendationengine_v1beta1/representations.rb +0 -16
  492. data/generated/google/apis/recommendationengine_v1beta1/service.rb +140 -206
  493. data/generated/google/apis/redis_v1.rb +1 -1
  494. data/generated/google/apis/redis_v1/classes.rb +172 -208
  495. data/generated/google/apis/redis_v1/service.rb +93 -110
  496. data/generated/google/apis/redis_v1beta1.rb +1 -1
  497. data/generated/google/apis/redis_v1beta1/classes.rb +176 -212
  498. data/generated/google/apis/redis_v1beta1/service.rb +93 -110
  499. data/generated/google/apis/remotebuildexecution_v1.rb +1 -1
  500. data/generated/google/apis/remotebuildexecution_v1/classes.rb +951 -1078
  501. data/generated/google/apis/remotebuildexecution_v1/representations.rb +61 -0
  502. data/generated/google/apis/remotebuildexecution_v1/service.rb +26 -33
  503. data/generated/google/apis/remotebuildexecution_v1alpha.rb +1 -1
  504. data/generated/google/apis/remotebuildexecution_v1alpha/classes.rb +946 -1071
  505. data/generated/google/apis/remotebuildexecution_v1alpha/representations.rb +61 -0
  506. data/generated/google/apis/remotebuildexecution_v1alpha/service.rb +103 -65
  507. data/generated/google/apis/remotebuildexecution_v2.rb +1 -1
  508. data/generated/google/apis/remotebuildexecution_v2/classes.rb +1099 -1250
  509. data/generated/google/apis/remotebuildexecution_v2/representations.rb +61 -0
  510. data/generated/google/apis/remotebuildexecution_v2/service.rb +147 -206
  511. data/generated/google/apis/run_v1.rb +1 -1
  512. data/generated/google/apis/run_v1/classes.rb +4 -3
  513. data/generated/google/apis/run_v1/representations.rb +1 -1
  514. data/generated/google/apis/run_v1alpha1.rb +1 -1
  515. data/generated/google/apis/run_v1alpha1/classes.rb +1 -1
  516. data/generated/google/apis/run_v1alpha1/representations.rb +1 -1
  517. data/generated/google/apis/run_v1beta1.rb +1 -1
  518. data/generated/google/apis/run_v1beta1/classes.rb +3 -2
  519. data/generated/google/apis/runtimeconfig_v1beta1.rb +1 -1
  520. data/generated/google/apis/runtimeconfig_v1beta1/classes.rb +295 -412
  521. data/generated/google/apis/runtimeconfig_v1beta1/service.rb +135 -159
  522. data/generated/google/apis/safebrowsing_v4.rb +1 -1
  523. data/generated/google/apis/safebrowsing_v4/classes.rb +55 -64
  524. data/generated/google/apis/safebrowsing_v4/service.rb +4 -4
  525. data/generated/google/apis/sasportal_v1alpha1.rb +1 -1
  526. data/generated/google/apis/sasportal_v1alpha1/classes.rb +6 -0
  527. data/generated/google/apis/sasportal_v1alpha1/representations.rb +1 -0
  528. data/generated/google/apis/script_v1.rb +1 -1
  529. data/generated/google/apis/script_v1/classes.rb +88 -111
  530. data/generated/google/apis/script_v1/service.rb +63 -69
  531. data/generated/google/apis/secretmanager_v1.rb +1 -1
  532. data/generated/google/apis/secretmanager_v1/classes.rb +211 -363
  533. data/generated/google/apis/secretmanager_v1/service.rb +66 -82
  534. data/generated/google/apis/secretmanager_v1beta1.rb +1 -1
  535. data/generated/google/apis/secretmanager_v1beta1/classes.rb +211 -363
  536. data/generated/google/apis/secretmanager_v1beta1/service.rb +66 -82
  537. data/generated/google/apis/securitycenter_v1.rb +1 -1
  538. data/generated/google/apis/securitycenter_v1/classes.rb +16 -6
  539. data/generated/google/apis/securitycenter_v1/representations.rb +1 -0
  540. data/generated/google/apis/securitycenter_v1beta1.rb +1 -1
  541. data/generated/google/apis/securitycenter_v1beta1/classes.rb +21 -9
  542. data/generated/google/apis/securitycenter_v1beta1/representations.rb +1 -0
  543. data/generated/google/apis/{securitycenter_v1p1beta1.rb → securitycenter_v1beta2.rb} +6 -6
  544. data/generated/google/apis/{securitycenter_v1p1alpha1 → securitycenter_v1beta2}/classes.rb +281 -103
  545. data/generated/google/apis/{securitycenter_v1p1alpha1 → securitycenter_v1beta2}/representations.rb +101 -30
  546. data/generated/google/apis/securitycenter_v1beta2/service.rb +1494 -0
  547. data/generated/google/apis/serviceconsumermanagement_v1.rb +1 -1
  548. data/generated/google/apis/serviceconsumermanagement_v1/classes.rb +18 -48
  549. data/generated/google/apis/serviceconsumermanagement_v1beta1.rb +1 -1
  550. data/generated/google/apis/serviceconsumermanagement_v1beta1/classes.rb +19 -49
  551. data/generated/google/apis/servicecontrol_v1.rb +1 -1
  552. data/generated/google/apis/servicecontrol_v1/classes.rb +523 -641
  553. data/generated/google/apis/servicecontrol_v1/service.rb +36 -46
  554. data/generated/google/apis/servicecontrol_v2.rb +1 -1
  555. data/generated/google/apis/servicecontrol_v2/classes.rb +279 -325
  556. data/generated/google/apis/servicecontrol_v2/service.rb +33 -43
  557. data/generated/google/apis/servicedirectory_v1beta1.rb +1 -1
  558. data/generated/google/apis/servicedirectory_v1beta1/classes.rb +214 -333
  559. data/generated/google/apis/servicedirectory_v1beta1/service.rb +94 -129
  560. data/generated/google/apis/servicemanagement_v1.rb +1 -1
  561. data/generated/google/apis/servicemanagement_v1/classes.rb +1266 -2116
  562. data/generated/google/apis/servicemanagement_v1/service.rb +144 -195
  563. data/generated/google/apis/servicenetworking_v1.rb +1 -1
  564. data/generated/google/apis/servicenetworking_v1/classes.rb +93 -48
  565. data/generated/google/apis/servicenetworking_v1/representations.rb +52 -0
  566. data/generated/google/apis/servicenetworking_v1/service.rb +116 -0
  567. data/generated/google/apis/servicenetworking_v1beta.rb +1 -1
  568. data/generated/google/apis/servicenetworking_v1beta/classes.rb +74 -48
  569. data/generated/google/apis/servicenetworking_v1beta/representations.rb +38 -0
  570. data/generated/google/apis/serviceusage_v1.rb +1 -1
  571. data/generated/google/apis/serviceusage_v1/classes.rb +52 -48
  572. data/generated/google/apis/serviceusage_v1/representations.rb +4 -0
  573. data/generated/google/apis/serviceusage_v1/service.rb +5 -1
  574. data/generated/google/apis/serviceusage_v1beta1.rb +1 -1
  575. data/generated/google/apis/serviceusage_v1beta1/classes.rb +87 -49
  576. data/generated/google/apis/serviceusage_v1beta1/representations.rb +8 -0
  577. data/generated/google/apis/sheets_v4.rb +1 -1
  578. data/generated/google/apis/sheets_v4/classes.rb +3932 -5007
  579. data/generated/google/apis/sheets_v4/representations.rb +625 -0
  580. data/generated/google/apis/sheets_v4/service.rb +113 -149
  581. data/generated/google/apis/site_verification_v1.rb +1 -1
  582. data/generated/google/apis/slides_v1.rb +1 -1
  583. data/generated/google/apis/slides_v1/classes.rb +841 -1114
  584. data/generated/google/apis/slides_v1/service.rb +23 -30
  585. data/generated/google/apis/sourcerepo_v1.rb +1 -1
  586. data/generated/google/apis/sourcerepo_v1/classes.rb +6 -6
  587. data/generated/google/apis/spanner_v1.rb +1 -1
  588. data/generated/google/apis/spanner_v1/classes.rb +1546 -2157
  589. data/generated/google/apis/spanner_v1/service.rb +443 -618
  590. data/generated/google/apis/speech_v1.rb +1 -1
  591. data/generated/google/apis/speech_v1/classes.rb +174 -220
  592. data/generated/google/apis/speech_v1/service.rb +27 -32
  593. data/generated/google/apis/speech_v1p1beta1.rb +1 -1
  594. data/generated/google/apis/speech_v1p1beta1/classes.rb +253 -306
  595. data/generated/google/apis/speech_v1p1beta1/service.rb +27 -32
  596. data/generated/google/apis/speech_v2beta1.rb +1 -1
  597. data/generated/google/apis/speech_v2beta1/classes.rb +66 -76
  598. data/generated/google/apis/speech_v2beta1/service.rb +10 -12
  599. data/generated/google/apis/sql_v1beta4.rb +1 -1
  600. data/generated/google/apis/sql_v1beta4/classes.rb +311 -370
  601. data/generated/google/apis/sql_v1beta4/representations.rb +2 -0
  602. data/generated/google/apis/sql_v1beta4/service.rb +51 -56
  603. data/generated/google/apis/storage_v1.rb +1 -1
  604. data/generated/google/apis/storage_v1/classes.rb +8 -7
  605. data/generated/google/apis/storage_v1/representations.rb +2 -2
  606. data/generated/google/apis/storagetransfer_v1.rb +1 -1
  607. data/generated/google/apis/storagetransfer_v1/classes.rb +261 -339
  608. data/generated/google/apis/storagetransfer_v1/service.rb +43 -64
  609. data/generated/google/apis/streetviewpublish_v1.rb +1 -1
  610. data/generated/google/apis/streetviewpublish_v1/classes.rb +106 -148
  611. data/generated/google/apis/streetviewpublish_v1/service.rb +94 -177
  612. data/generated/google/apis/tagmanager_v1.rb +1 -1
  613. data/generated/google/apis/tagmanager_v1/service.rb +2 -2
  614. data/generated/google/apis/tagmanager_v2.rb +1 -1
  615. data/generated/google/apis/tagmanager_v2/service.rb +2 -2
  616. data/generated/google/apis/tasks_v1.rb +1 -1
  617. data/generated/google/apis/tasks_v1/classes.rb +20 -21
  618. data/generated/google/apis/tasks_v1/service.rb +16 -17
  619. data/generated/google/apis/testing_v1.rb +1 -1
  620. data/generated/google/apis/testing_v1/classes.rb +317 -382
  621. data/generated/google/apis/testing_v1/representations.rb +2 -0
  622. data/generated/google/apis/testing_v1/service.rb +22 -28
  623. data/generated/google/apis/texttospeech_v1.rb +1 -1
  624. data/generated/google/apis/texttospeech_v1/classes.rb +51 -57
  625. data/generated/google/apis/texttospeech_v1/service.rb +9 -10
  626. data/generated/google/apis/texttospeech_v1beta1.rb +1 -1
  627. data/generated/google/apis/texttospeech_v1beta1/classes.rb +96 -57
  628. data/generated/google/apis/texttospeech_v1beta1/representations.rb +19 -0
  629. data/generated/google/apis/texttospeech_v1beta1/service.rb +9 -10
  630. data/generated/google/apis/toolresults_v1beta3.rb +1 -1
  631. data/generated/google/apis/toolresults_v1beta3/classes.rb +7 -0
  632. data/generated/google/apis/toolresults_v1beta3/representations.rb +1 -0
  633. data/generated/google/apis/tpu_v1.rb +1 -1
  634. data/generated/google/apis/tpu_v1/classes.rb +11 -0
  635. data/generated/google/apis/tpu_v1/representations.rb +1 -0
  636. data/generated/google/apis/tpu_v1alpha1.rb +1 -1
  637. data/generated/google/apis/tpu_v1alpha1/classes.rb +11 -0
  638. data/generated/google/apis/tpu_v1alpha1/representations.rb +1 -0
  639. data/generated/google/apis/{accessapproval_v1beta1.rb → trafficdirector_v2.rb} +9 -9
  640. data/generated/google/apis/trafficdirector_v2/classes.rb +1347 -0
  641. data/generated/google/apis/trafficdirector_v2/representations.rb +620 -0
  642. data/generated/google/apis/trafficdirector_v2/service.rb +89 -0
  643. data/generated/google/apis/translate_v3.rb +1 -1
  644. data/generated/google/apis/translate_v3/classes.rb +148 -175
  645. data/generated/google/apis/translate_v3/service.rb +122 -151
  646. data/generated/google/apis/translate_v3beta1.rb +1 -1
  647. data/generated/google/apis/translate_v3beta1/classes.rb +149 -170
  648. data/generated/google/apis/translate_v3beta1/service.rb +122 -151
  649. data/generated/google/apis/vectortile_v1.rb +1 -1
  650. data/generated/google/apis/vectortile_v1/classes.rb +185 -267
  651. data/generated/google/apis/vectortile_v1/service.rb +75 -88
  652. data/generated/google/apis/videointelligence_v1.rb +1 -1
  653. data/generated/google/apis/videointelligence_v1/classes.rb +753 -918
  654. data/generated/google/apis/videointelligence_v1/service.rb +40 -48
  655. data/generated/google/apis/videointelligence_v1beta2.rb +1 -1
  656. data/generated/google/apis/videointelligence_v1beta2/classes.rb +748 -911
  657. data/generated/google/apis/videointelligence_v1beta2/service.rb +4 -4
  658. data/generated/google/apis/videointelligence_v1p1beta1.rb +1 -1
  659. data/generated/google/apis/videointelligence_v1p1beta1/classes.rb +748 -911
  660. data/generated/google/apis/videointelligence_v1p1beta1/service.rb +4 -4
  661. data/generated/google/apis/videointelligence_v1p2beta1.rb +1 -1
  662. data/generated/google/apis/videointelligence_v1p2beta1/classes.rb +748 -911
  663. data/generated/google/apis/videointelligence_v1p2beta1/service.rb +4 -4
  664. data/generated/google/apis/videointelligence_v1p3beta1.rb +1 -1
  665. data/generated/google/apis/videointelligence_v1p3beta1/classes.rb +754 -920
  666. data/generated/google/apis/videointelligence_v1p3beta1/service.rb +4 -4
  667. data/generated/google/apis/webfonts_v1.rb +2 -3
  668. data/generated/google/apis/webfonts_v1/classes.rb +1 -2
  669. data/generated/google/apis/webfonts_v1/service.rb +2 -4
  670. data/generated/google/apis/youtube_partner_v1.rb +1 -1
  671. data/generated/google/apis/youtube_v3.rb +1 -1
  672. data/generated/google/apis/youtube_v3/classes.rb +347 -0
  673. data/generated/google/apis/youtube_v3/representations.rb +176 -0
  674. data/generated/google/apis/youtube_v3/service.rb +78 -0
  675. data/lib/google/apis/version.rb +1 -1
  676. metadata +31 -31
  677. data/generated/google/apis/accessapproval_v1beta1/classes.rb +0 -417
  678. data/generated/google/apis/accessapproval_v1beta1/service.rb +0 -857
  679. data/generated/google/apis/dns_v2beta1.rb +0 -43
  680. data/generated/google/apis/dns_v2beta1/classes.rb +0 -1447
  681. data/generated/google/apis/dns_v2beta1/representations.rb +0 -588
  682. data/generated/google/apis/dns_v2beta1/service.rb +0 -928
  683. data/generated/google/apis/memcache_v1/classes.rb +0 -1157
  684. data/generated/google/apis/memcache_v1/representations.rb +0 -471
  685. data/generated/google/apis/oauth2_v2.rb +0 -40
  686. data/generated/google/apis/oauth2_v2/classes.rb +0 -165
  687. data/generated/google/apis/oauth2_v2/representations.rb +0 -68
  688. data/generated/google/apis/oauth2_v2/service.rb +0 -158
  689. data/generated/google/apis/securitycenter_v1p1alpha1/service.rb +0 -207
  690. data/generated/google/apis/securitycenter_v1p1beta1/classes.rb +0 -2059
  691. data/generated/google/apis/securitycenter_v1p1beta1/representations.rb +0 -789
  692. data/generated/google/apis/securitycenter_v1p1beta1/service.rb +0 -1243
  693. data/generated/google/apis/storage_v1beta2.rb +0 -40
  694. data/generated/google/apis/storage_v1beta2/classes.rb +0 -1047
  695. data/generated/google/apis/storage_v1beta2/representations.rb +0 -425
  696. data/generated/google/apis/storage_v1beta2/service.rb +0 -1667
@@ -116,28 +116,25 @@ module Google
116
116
  execute_or_queue_command(command, &block)
117
117
  end
118
118
 
119
- # Creates a Redis instance based on the specified tier and memory size.
120
- # By default, the instance is accessible from the project's
121
- # [default network](https://cloud.google.com/vpc/docs/vpc).
122
- # The creation is executed asynchronously and callers may check the returned
123
- # operation to track its progress. Once the operation is completed the Redis
124
- # instance will be fully functional. Completed longrunning.Operation will
125
- # contain the new instance object in the response field.
126
- # The returned operation is automatically deleted after a few hours, so there
127
- # is no need to call DeleteOperation.
119
+ # Creates a Redis instance based on the specified tier and memory size. By
120
+ # default, the instance is accessible from the project's [default network](https:
121
+ # //cloud.google.com/vpc/docs/vpc). The creation is executed asynchronously and
122
+ # callers may check the returned operation to track its progress. Once the
123
+ # operation is completed the Redis instance will be fully functional. Completed
124
+ # longrunning.Operation will contain the new instance object in the response
125
+ # field. The returned operation is automatically deleted after a few hours, so
126
+ # there is no need to call DeleteOperation.
128
127
  # @param [String] parent
129
- # Required. The resource name of the instance location using the form:
130
- # `projects/`project_id`/locations/`location_id``
131
- # where `location_id` refers to a GCP region.
128
+ # Required. The resource name of the instance location using the form: `projects/
129
+ # `project_id`/locations/`location_id`` where `location_id` refers to a GCP
130
+ # region.
132
131
  # @param [Google::Apis::RedisV1beta1::Instance] instance_object
133
132
  # @param [String] instance_id
134
- # Required. The logical name of the Redis instance in the customer project
135
- # with the following restrictions:
136
- # * Must contain only lowercase letters, numbers, and hyphens.
137
- # * Must start with a letter.
138
- # * Must be between 1-40 characters.
139
- # * Must end with a number or a letter.
140
- # * Must be unique within the customer project / location
133
+ # Required. The logical name of the Redis instance in the customer project with
134
+ # the following restrictions: * Must contain only lowercase letters, numbers,
135
+ # and hyphens. * Must start with a letter. * Must be between 1-40 characters. *
136
+ # Must end with a number or a letter. * Must be unique within the customer
137
+ # project / location
141
138
  # @param [String] fields
142
139
  # Selector specifying which fields to include in a partial response.
143
140
  # @param [String] quota_user
@@ -168,12 +165,11 @@ module Google
168
165
  execute_or_queue_command(command, &block)
169
166
  end
170
167
 
171
- # Deletes a specific Redis instance. Instance stops serving and data is
172
- # deleted.
168
+ # Deletes a specific Redis instance. Instance stops serving and data is deleted.
173
169
  # @param [String] name
174
- # Required. Redis instance resource name using the form:
175
- # `projects/`project_id`/locations/`location_id`/instances/`instance_id``
176
- # where `location_id` refers to a GCP region.
170
+ # Required. Redis instance resource name using the form: `projects/`project_id`/
171
+ # locations/`location_id`/instances/`instance_id`` where `location_id` refers to
172
+ # a GCP region.
177
173
  # @param [String] fields
178
174
  # Selector specifying which fields to include in a partial response.
179
175
  # @param [String] quota_user
@@ -202,13 +198,13 @@ module Google
202
198
  end
203
199
 
204
200
  # Export Redis instance data into a Redis RDB format file in Cloud Storage.
205
- # Redis will continue serving during this operation.
206
- # The returned operation is automatically deleted after a few hours, so
207
- # there is no need to call DeleteOperation.
201
+ # Redis will continue serving during this operation. The returned operation is
202
+ # automatically deleted after a few hours, so there is no need to call
203
+ # DeleteOperation.
208
204
  # @param [String] name
209
- # Required. Redis instance resource name using the form:
210
- # `projects/`project_id`/locations/`location_id`/instances/`instance_id``
211
- # where `location_id` refers to a GCP region.
205
+ # Required. Redis instance resource name using the form: `projects/`project_id`/
206
+ # locations/`location_id`/instances/`instance_id`` where `location_id` refers to
207
+ # a GCP region.
212
208
  # @param [Google::Apis::RedisV1beta1::ExportInstanceRequest] export_instance_request_object
213
209
  # @param [String] fields
214
210
  # Selector specifying which fields to include in a partial response.
@@ -239,12 +235,12 @@ module Google
239
235
  execute_or_queue_command(command, &block)
240
236
  end
241
237
 
242
- # Initiates a failover of the master node to current replica node for a
243
- # specific STANDARD tier Cloud Memorystore for Redis instance.
238
+ # Initiates a failover of the master node to current replica node for a specific
239
+ # STANDARD tier Cloud Memorystore for Redis instance.
244
240
  # @param [String] name
245
- # Required. Redis instance resource name using the form:
246
- # `projects/`project_id`/locations/`location_id`/instances/`instance_id``
247
- # where `location_id` refers to a GCP region.
241
+ # Required. Redis instance resource name using the form: `projects/`project_id`/
242
+ # locations/`location_id`/instances/`instance_id`` where `location_id` refers to
243
+ # a GCP region.
248
244
  # @param [Google::Apis::RedisV1beta1::FailoverInstanceRequest] failover_instance_request_object
249
245
  # @param [String] fields
250
246
  # Selector specifying which fields to include in a partial response.
@@ -277,9 +273,9 @@ module Google
277
273
 
278
274
  # Gets the details of a specific Redis instance.
279
275
  # @param [String] name
280
- # Required. Redis instance resource name using the form:
281
- # `projects/`project_id`/locations/`location_id`/instances/`instance_id``
282
- # where `location_id` refers to a GCP region.
276
+ # Required. Redis instance resource name using the form: `projects/`project_id`/
277
+ # locations/`location_id`/instances/`instance_id`` where `location_id` refers to
278
+ # a GCP region.
283
279
  # @param [String] fields
284
280
  # Selector specifying which fields to include in a partial response.
285
281
  # @param [String] quota_user
@@ -308,15 +304,14 @@ module Google
308
304
  end
309
305
 
310
306
  # Import a Redis RDB snapshot file from Cloud Storage into a Redis instance.
311
- # Redis may stop serving during this operation. Instance state will be
312
- # IMPORTING for entire operation. When complete, the instance will contain
313
- # only data from the imported file.
314
- # The returned operation is automatically deleted after a few hours, so
315
- # there is no need to call DeleteOperation.
307
+ # Redis may stop serving during this operation. Instance state will be IMPORTING
308
+ # for entire operation. When complete, the instance will contain only data from
309
+ # the imported file. The returned operation is automatically deleted after a few
310
+ # hours, so there is no need to call DeleteOperation.
316
311
  # @param [String] name
317
- # Required. Redis instance resource name using the form:
318
- # `projects/`project_id`/locations/`location_id`/instances/`instance_id``
319
- # where `location_id` refers to a GCP region.
312
+ # Required. Redis instance resource name using the form: `projects/`project_id`/
313
+ # locations/`location_id`/instances/`instance_id`` where `location_id` refers to
314
+ # a GCP region.
320
315
  # @param [Google::Apis::RedisV1beta1::ImportInstanceRequest] import_instance_request_object
321
316
  # @param [String] fields
322
317
  # Selector specifying which fields to include in a partial response.
@@ -347,26 +342,24 @@ module Google
347
342
  execute_or_queue_command(command, &block)
348
343
  end
349
344
 
350
- # Lists all Redis instances owned by a project in either the specified
351
- # location (region) or all locations.
352
- # The location should have the following format:
353
- # * `projects/`project_id`/locations/`location_id``
354
- # If `location_id` is specified as `-` (wildcard), then all regions
355
- # available to the project are queried, and the results are aggregated.
345
+ # Lists all Redis instances owned by a project in either the specified location (
346
+ # region) or all locations. The location should have the following format: * `
347
+ # projects/`project_id`/locations/`location_id`` If `location_id` is specified
348
+ # as `-` (wildcard), then all regions available to the project are queried, and
349
+ # the results are aggregated.
356
350
  # @param [String] parent
357
- # Required. The resource name of the instance location using the form:
358
- # `projects/`project_id`/locations/`location_id``
359
- # where `location_id` refers to a GCP region.
351
+ # Required. The resource name of the instance location using the form: `projects/
352
+ # `project_id`/locations/`location_id`` where `location_id` refers to a GCP
353
+ # region.
360
354
  # @param [Fixnum] page_size
361
- # The maximum number of items to return.
362
- # If not specified, a default value of 1000 will be used by the service.
363
- # Regardless of the page_size value, the response may include a partial list
364
- # and a caller should only rely on response's
365
- # `next_page_token`
366
- # to determine if there are more instances left to be queried.
355
+ # The maximum number of items to return. If not specified, a default value of
356
+ # 1000 will be used by the service. Regardless of the page_size value, the
357
+ # response may include a partial list and a caller should only rely on response'
358
+ # s `next_page_token` to determine if there are more instances left to be
359
+ # queried.
367
360
  # @param [String] page_token
368
- # The `next_page_token` value returned from a previous
369
- # ListInstances request, if any.
361
+ # The `next_page_token` value returned from a previous ListInstances request, if
362
+ # any.
370
363
  # @param [String] fields
371
364
  # Selector specifying which fields to include in a partial response.
372
365
  # @param [String] quota_user
@@ -396,28 +389,23 @@ module Google
396
389
  execute_or_queue_command(command, &block)
397
390
  end
398
391
 
399
- # Updates the metadata and configuration of a specific Redis instance.
400
- # Completed longrunning.Operation will contain the new instance object
401
- # in the response field. The returned operation is automatically deleted
402
- # after a few hours, so there is no need to call DeleteOperation.
392
+ # Updates the metadata and configuration of a specific Redis instance. Completed
393
+ # longrunning.Operation will contain the new instance object in the response
394
+ # field. The returned operation is automatically deleted after a few hours, so
395
+ # there is no need to call DeleteOperation.
403
396
  # @param [String] name
404
397
  # Required. Unique name of the resource in this scope including project and
405
- # location using the form:
406
- # `projects/`project_id`/locations/`location_id`/instances/`instance_id``
407
- # Note: Redis instances are managed and addressed at regional level so
408
- # location_id here refers to a GCP region; however, users may choose which
409
- # specific zone (or collection of zones for cross-zone instances) an instance
410
- # should be provisioned in. Refer to location_id and
398
+ # location using the form: `projects/`project_id`/locations/`location_id`/
399
+ # instances/`instance_id`` Note: Redis instances are managed and addressed at
400
+ # regional level so location_id here refers to a GCP region; however, users may
401
+ # choose which specific zone (or collection of zones for cross-zone instances)
402
+ # an instance should be provisioned in. Refer to location_id and
411
403
  # alternative_location_id fields for more details.
412
404
  # @param [Google::Apis::RedisV1beta1::Instance] instance_object
413
405
  # @param [String] update_mask
414
- # Required. Mask of fields to update. At least one path must be supplied in
415
- # this field. The elements of the repeated paths field may only include these
416
- # fields from Instance:
417
- # * `displayName`
418
- # * `labels`
419
- # * `memorySizeGb`
420
- # * `redisConfig`
406
+ # Required. Mask of fields to update. At least one path must be supplied in this
407
+ # field. The elements of the repeated paths field may only include these fields
408
+ # from Instance: * `displayName` * `labels` * `memorySizeGb` * `redisConfig`
421
409
  # @param [String] fields
422
410
  # Selector specifying which fields to include in a partial response.
423
411
  # @param [String] quota_user
@@ -448,12 +436,11 @@ module Google
448
436
  execute_or_queue_command(command, &block)
449
437
  end
450
438
 
451
- # Upgrades Redis instance to the newer Redis version specified in the
452
- # request.
439
+ # Upgrades Redis instance to the newer Redis version specified in the request.
453
440
  # @param [String] name
454
- # Required. Redis instance resource name using the form:
455
- # `projects/`project_id`/locations/`location_id`/instances/`instance_id``
456
- # where `location_id` refers to a GCP region.
441
+ # Required. Redis instance resource name using the form: `projects/`project_id`/
442
+ # locations/`location_id`/instances/`instance_id`` where `location_id` refers to
443
+ # a GCP region.
457
444
  # @param [Google::Apis::RedisV1beta1::UpgradeInstanceRequest] upgrade_instance_request_object
458
445
  # @param [String] fields
459
446
  # Selector specifying which fields to include in a partial response.
@@ -484,15 +471,13 @@ module Google
484
471
  execute_or_queue_command(command, &block)
485
472
  end
486
473
 
487
- # Starts asynchronous cancellation on a long-running operation. The server
488
- # makes a best effort to cancel the operation, but success is not
489
- # guaranteed. If the server doesn't support this method, it returns
490
- # `google.rpc.Code.UNIMPLEMENTED`. Clients can use
491
- # Operations.GetOperation or
492
- # other methods to check whether the cancellation succeeded or whether the
493
- # operation completed despite cancellation. On successful cancellation,
494
- # the operation is not deleted; instead, it becomes an operation with
495
- # an Operation.error value with a google.rpc.Status.code of 1,
474
+ # Starts asynchronous cancellation on a long-running operation. The server makes
475
+ # a best effort to cancel the operation, but success is not guaranteed. If the
476
+ # server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.
477
+ # Clients can use Operations.GetOperation or other methods to check whether the
478
+ # cancellation succeeded or whether the operation completed despite cancellation.
479
+ # On successful cancellation, the operation is not deleted; instead, it becomes
480
+ # an operation with an Operation.error value with a google.rpc.Status.code of 1,
496
481
  # corresponding to `Code.CANCELLED`.
497
482
  # @param [String] name
498
483
  # The name of the operation resource to be cancelled.
@@ -523,10 +508,10 @@ module Google
523
508
  execute_or_queue_command(command, &block)
524
509
  end
525
510
 
526
- # Deletes a long-running operation. This method indicates that the client is
527
- # no longer interested in the operation result. It does not cancel the
528
- # operation. If the server doesn't support this method, it returns
529
- # `google.rpc.Code.UNIMPLEMENTED`.
511
+ # Deletes a long-running operation. This method indicates that the client is no
512
+ # longer interested in the operation result. It does not cancel the operation.
513
+ # If the server doesn't support this method, it returns `google.rpc.Code.
514
+ # UNIMPLEMENTED`.
530
515
  # @param [String] name
531
516
  # The name of the operation resource to be deleted.
532
517
  # @param [String] fields
@@ -556,9 +541,8 @@ module Google
556
541
  execute_or_queue_command(command, &block)
557
542
  end
558
543
 
559
- # Gets the latest state of a long-running operation. Clients can use this
560
- # method to poll the operation result at intervals as recommended by the API
561
- # service.
544
+ # Gets the latest state of a long-running operation. Clients can use this method
545
+ # to poll the operation result at intervals as recommended by the API service.
562
546
  # @param [String] name
563
547
  # The name of the operation resource.
564
548
  # @param [String] fields
@@ -588,15 +572,14 @@ module Google
588
572
  execute_or_queue_command(command, &block)
589
573
  end
590
574
 
591
- # Lists operations that match the specified filter in the request. If the
592
- # server doesn't support this method, it returns `UNIMPLEMENTED`.
593
- # NOTE: the `name` binding allows API services to override the binding
594
- # to use different resource name schemes, such as `users/*/operations`. To
595
- # override the binding, API services can add a binding such as
596
- # `"/v1/`name=users/*`/operations"` to their service configuration.
597
- # For backwards compatibility, the default name includes the operations
598
- # collection id, however overriding users must ensure the name binding
599
- # is the parent resource, without the operations collection id.
575
+ # Lists operations that match the specified filter in the request. If the server
576
+ # doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name`
577
+ # binding allows API services to override the binding to use different resource
578
+ # name schemes, such as `users/*/operations`. To override the binding, API
579
+ # services can add a binding such as `"/v1/`name=users/*`/operations"` to their
580
+ # service configuration. For backwards compatibility, the default name includes
581
+ # the operations collection id, however overriding users must ensure the name
582
+ # binding is the parent resource, without the operations collection id.
600
583
  # @param [String] name
601
584
  # The name of the operation's parent resource.
602
585
  # @param [String] filter
@@ -25,7 +25,7 @@ module Google
25
25
  # @see https://cloud.google.com/remote-build-execution/docs/
26
26
  module RemotebuildexecutionV1
27
27
  VERSION = 'V1'
28
- REVISION = '20200721'
28
+ REVISION = '20200819'
29
29
 
30
30
  # View and manage your data across Google Cloud Platform services
31
31
  AUTH_CLOUD_PLATFORM = 'https://www.googleapis.com/auth/cloud-platform'
@@ -23,120 +23,107 @@ module Google
23
23
  module RemotebuildexecutionV1
24
24
 
25
25
  # An `Action` captures all the information about an execution which is required
26
- # to reproduce it.
27
- # `Action`s are the core component of the [Execution] service. A single
28
- # `Action` represents a repeatable action that can be performed by the
26
+ # to reproduce it. `Action`s are the core component of the [Execution] service.
27
+ # A single `Action` represents a repeatable action that can be performed by the
29
28
  # execution service. `Action`s can be succinctly identified by the digest of
30
29
  # their wire format encoding and, once an `Action` has been executed, will be
31
30
  # cached in the action cache. Future requests can then use the cached result
32
- # rather than needing to run afresh.
33
- # When a server completes execution of an
34
- # Action, it MAY choose to
35
- # cache the result in
36
- # the ActionCache unless
37
- # `do_not_cache` is `true`. Clients SHOULD expect the server to do so. By
38
- # default, future calls to
39
- # Execute the same
40
- # `Action` will also serve their results from the cache. Clients must take care
41
- # to understand the caching behaviour. Ideally, all `Action`s will be
42
- # reproducible so that serving a result from cache is always desirable and
43
- # correct.
31
+ # rather than needing to run afresh. When a server completes execution of an
32
+ # Action, it MAY choose to cache the result in the ActionCache unless `
33
+ # do_not_cache` is `true`. Clients SHOULD expect the server to do so. By default,
34
+ # future calls to Execute the same `Action` will also serve their results from
35
+ # the cache. Clients must take care to understand the caching behaviour. Ideally,
36
+ # all `Action`s will be reproducible so that serving a result from cache is
37
+ # always desirable and correct.
44
38
  class BuildBazelRemoteExecutionV2Action
45
39
  include Google::Apis::Core::Hashable
46
40
 
47
41
  # A content digest. A digest for a given blob consists of the size of the blob
48
- # and its hash. The hash algorithm to use is defined by the server.
49
- # The size is considered to be an integral part of the digest and cannot be
50
- # separated. That is, even if the `hash` field is correctly specified but
51
- # `size_bytes` is not, the server MUST reject the request.
52
- # The reason for including the size in the digest is as follows: in a great
53
- # many cases, the server needs to know the size of the blob it is about to work
54
- # with prior to starting an operation with it, such as flattening Merkle tree
55
- # structures or streaming it to a worker. Technically, the server could
56
- # implement a separate metadata store, but this results in a significantly more
57
- # complicated implementation as opposed to having the client specify the size
58
- # up-front (or storing the size along with the digest in every message where
59
- # digests are embedded). This does mean that the API leaks some implementation
60
- # details of (what we consider to be) a reasonable server implementation, but
61
- # we consider this to be a worthwhile tradeoff.
62
- # When a `Digest` is used to refer to a proto message, it always refers to the
63
- # message in binary encoded form. To ensure consistent hashing, clients and
64
- # servers MUST ensure that they serialize messages according to the following
65
- # rules, even if there are alternate valid encodings for the same message:
66
- # * Fields are serialized in tag order.
67
- # * There are no unknown fields.
68
- # * There are no duplicate fields.
69
- # * Fields are serialized according to the default semantics for their type.
70
- # Most protocol buffer implementations will always follow these rules when
71
- # serializing, but care should be taken to avoid shortcuts. For instance,
72
- # concatenating two messages to merge them may produce duplicate fields.
42
+ # and its hash. The hash algorithm to use is defined by the server. The size is
43
+ # considered to be an integral part of the digest and cannot be separated. That
44
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
45
+ # the server MUST reject the request. The reason for including the size in the
46
+ # digest is as follows: in a great many cases, the server needs to know the size
47
+ # of the blob it is about to work with prior to starting an operation with it,
48
+ # such as flattening Merkle tree structures or streaming it to a worker.
49
+ # Technically, the server could implement a separate metadata store, but this
50
+ # results in a significantly more complicated implementation as opposed to
51
+ # having the client specify the size up-front (or storing the size along with
52
+ # the digest in every message where digests are embedded). This does mean that
53
+ # the API leaks some implementation details of (what we consider to be) a
54
+ # reasonable server implementation, but we consider this to be a worthwhile
55
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
56
+ # refers to the message in binary encoded form. To ensure consistent hashing,
57
+ # clients and servers MUST ensure that they serialize messages according to the
58
+ # following rules, even if there are alternate valid encodings for the same
59
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
60
+ # There are no duplicate fields. * Fields are serialized according to the
61
+ # default semantics for their type. Most protocol buffer implementations will
62
+ # always follow these rules when serializing, but care should be taken to avoid
63
+ # shortcuts. For instance, concatenating two messages to merge them may produce
64
+ # duplicate fields.
73
65
  # Corresponds to the JSON property `commandDigest`
74
66
  # @return [Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2Digest]
75
67
  attr_accessor :command_digest
76
68
 
77
- # If true, then the `Action`'s result cannot be cached, and in-flight
78
- # requests for the same `Action` may not be merged.
69
+ # If true, then the `Action`'s result cannot be cached, and in-flight requests
70
+ # for the same `Action` may not be merged.
79
71
  # Corresponds to the JSON property `doNotCache`
80
72
  # @return [Boolean]
81
73
  attr_accessor :do_not_cache
82
74
  alias_method :do_not_cache?, :do_not_cache
83
75
 
84
76
  # A content digest. A digest for a given blob consists of the size of the blob
85
- # and its hash. The hash algorithm to use is defined by the server.
86
- # The size is considered to be an integral part of the digest and cannot be
87
- # separated. That is, even if the `hash` field is correctly specified but
88
- # `size_bytes` is not, the server MUST reject the request.
89
- # The reason for including the size in the digest is as follows: in a great
90
- # many cases, the server needs to know the size of the blob it is about to work
91
- # with prior to starting an operation with it, such as flattening Merkle tree
92
- # structures or streaming it to a worker. Technically, the server could
93
- # implement a separate metadata store, but this results in a significantly more
94
- # complicated implementation as opposed to having the client specify the size
95
- # up-front (or storing the size along with the digest in every message where
96
- # digests are embedded). This does mean that the API leaks some implementation
97
- # details of (what we consider to be) a reasonable server implementation, but
98
- # we consider this to be a worthwhile tradeoff.
99
- # When a `Digest` is used to refer to a proto message, it always refers to the
100
- # message in binary encoded form. To ensure consistent hashing, clients and
101
- # servers MUST ensure that they serialize messages according to the following
102
- # rules, even if there are alternate valid encodings for the same message:
103
- # * Fields are serialized in tag order.
104
- # * There are no unknown fields.
105
- # * There are no duplicate fields.
106
- # * Fields are serialized according to the default semantics for their type.
107
- # Most protocol buffer implementations will always follow these rules when
108
- # serializing, but care should be taken to avoid shortcuts. For instance,
109
- # concatenating two messages to merge them may produce duplicate fields.
77
+ # and its hash. The hash algorithm to use is defined by the server. The size is
78
+ # considered to be an integral part of the digest and cannot be separated. That
79
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
80
+ # the server MUST reject the request. The reason for including the size in the
81
+ # digest is as follows: in a great many cases, the server needs to know the size
82
+ # of the blob it is about to work with prior to starting an operation with it,
83
+ # such as flattening Merkle tree structures or streaming it to a worker.
84
+ # Technically, the server could implement a separate metadata store, but this
85
+ # results in a significantly more complicated implementation as opposed to
86
+ # having the client specify the size up-front (or storing the size along with
87
+ # the digest in every message where digests are embedded). This does mean that
88
+ # the API leaks some implementation details of (what we consider to be) a
89
+ # reasonable server implementation, but we consider this to be a worthwhile
90
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
91
+ # refers to the message in binary encoded form. To ensure consistent hashing,
92
+ # clients and servers MUST ensure that they serialize messages according to the
93
+ # following rules, even if there are alternate valid encodings for the same
94
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
95
+ # There are no duplicate fields. * Fields are serialized according to the
96
+ # default semantics for their type. Most protocol buffer implementations will
97
+ # always follow these rules when serializing, but care should be taken to avoid
98
+ # shortcuts. For instance, concatenating two messages to merge them may produce
99
+ # duplicate fields.
110
100
  # Corresponds to the JSON property `inputRootDigest`
111
101
  # @return [Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2Digest]
112
102
  attr_accessor :input_root_digest
113
103
 
114
- # List of required supported NodeProperty
115
- # keys. In order to ensure that equivalent `Action`s always hash to the same
116
- # value, the supported node properties MUST be lexicographically sorted by name.
117
- # Sorting of strings is done by code point, equivalently, by the UTF-8 bytes.
118
- # The interpretation of these properties is server-dependent. If a property is
119
- # not recognized by the server, the server will return an `INVALID_ARGUMENT`
120
- # error.
104
+ # List of required supported NodeProperty keys. In order to ensure that
105
+ # equivalent `Action`s always hash to the same value, the supported node
106
+ # properties MUST be lexicographically sorted by name. Sorting of strings is
107
+ # done by code point, equivalently, by the UTF-8 bytes. The interpretation of
108
+ # these properties is server-dependent. If a property is not recognized by the
109
+ # server, the server will return an `INVALID_ARGUMENT` error.
121
110
  # Corresponds to the JSON property `outputNodeProperties`
122
111
  # @return [Array<String>]
123
112
  attr_accessor :output_node_properties
124
113
 
125
- # A timeout after which the execution should be killed. If the timeout is
126
- # absent, then the client is specifying that the execution should continue
127
- # as long as the server will let it. The server SHOULD impose a timeout if
128
- # the client does not specify one, however, if the client does specify a
129
- # timeout that is longer than the server's maximum timeout, the server MUST
130
- # reject the request.
131
- # The timeout is a part of the
132
- # Action message, and
133
- # therefore two `Actions` with different timeouts are different, even if they
134
- # are otherwise identical. This is because, if they were not, running an
135
- # `Action` with a lower timeout than is required might result in a cache hit
136
- # from an execution run with a longer timeout, hiding the fact that the
137
- # timeout is too short. By encoding it directly in the `Action`, a lower
138
- # timeout will result in a cache miss and the execution timeout will fail
139
- # immediately, rather than whenever the cache entry gets evicted.
114
+ # A timeout after which the execution should be killed. If the timeout is absent,
115
+ # then the client is specifying that the execution should continue as long as
116
+ # the server will let it. The server SHOULD impose a timeout if the client does
117
+ # not specify one, however, if the client does specify a timeout that is longer
118
+ # than the server's maximum timeout, the server MUST reject the request. The
119
+ # timeout is a part of the Action message, and therefore two `Actions` with
120
+ # different timeouts are different, even if they are otherwise identical. This
121
+ # is because, if they were not, running an `Action` with a lower timeout than is
122
+ # required might result in a cache hit from an execution run with a longer
123
+ # timeout, hiding the fact that the timeout is too short. By encoding it
124
+ # directly in the `Action`, a lower timeout will result in a cache miss and the
125
+ # execution timeout will fail immediately, rather than whenever the cache entry
126
+ # gets evicted.
140
127
  # Corresponds to the JSON property `timeout`
141
128
  # @return [String]
142
129
  attr_accessor :timeout
@@ -155,8 +142,7 @@ module Google
155
142
  end
156
143
  end
157
144
 
158
- # An ActionResult represents the result of an
159
- # Action being run.
145
+ # An ActionResult represents the result of an Action being run.
160
146
  class BuildBazelRemoteExecutionV2ActionResult
161
147
  include Google::Apis::Core::Hashable
162
148
 
@@ -170,84 +156,41 @@ module Google
170
156
  # @return [Fixnum]
171
157
  attr_accessor :exit_code
172
158
 
173
- # The output directories of the action. For each output directory requested
174
- # in the `output_directories` or `output_paths` field of the Action, if the
159
+ # The output directories of the action. For each output directory requested in
160
+ # the `output_directories` or `output_paths` field of the Action, if the
175
161
  # corresponding directory existed after the action completed, a single entry
176
- # will be present in the output list, which will contain the digest of a
177
- # Tree message containing the
178
- # directory tree, and the path equal exactly to the corresponding Action
179
- # output_directories member.
180
- # As an example, suppose the Action had an output directory `a/b/dir` and the
181
- # execution produced the following contents in `a/b/dir`: a file named `bar`
182
- # and a directory named `foo` with an executable file named `baz`. Then,
183
- # output_directory will contain (hashes shortened for readability):
184
- # ```json
185
- # // OutputDirectory proto:
186
- # `
187
- # path: "a/b/dir"
188
- # tree_digest: `
189
- # hash: "4a73bc9d03...",
190
- # size: 55
191
- # `
192
- # `
193
- # // Tree proto with hash "4a73bc9d03..." and size 55:
194
- # `
195
- # root: `
196
- # files: [
197
- # `
198
- # name: "bar",
199
- # digest: `
200
- # hash: "4a73bc9d03...",
201
- # size: 65534
202
- # `
203
- # `
204
- # ],
205
- # directories: [
206
- # `
207
- # name: "foo",
208
- # digest: `
209
- # hash: "4cf2eda940...",
210
- # size: 43
211
- # `
212
- # `
213
- # ]
214
- # `
215
- # children : `
216
- # // (Directory proto with hash "4cf2eda940..." and size 43)
217
- # files: [
218
- # `
219
- # name: "baz",
220
- # digest: `
221
- # hash: "b2c941073e...",
222
- # size: 1294,
223
- # `,
224
- # is_executable: true
225
- # `
226
- # ]
227
- # `
228
- # `
229
- # ```
230
- # If an output of the same name as listed in `output_files` of
231
- # the Command was found in `output_directories`, but was not a directory, the
232
- # server will return a FAILED_PRECONDITION.
162
+ # will be present in the output list, which will contain the digest of a Tree
163
+ # message containing the directory tree, and the path equal exactly to the
164
+ # corresponding Action output_directories member. As an example, suppose the
165
+ # Action had an output directory `a/b/dir` and the execution produced the
166
+ # following contents in `a/b/dir`: a file named `bar` and a directory named `foo`
167
+ # with an executable file named `baz`. Then, output_directory will contain (
168
+ # hashes shortened for readability): ```json // OutputDirectory proto: ` path: "
169
+ # a/b/dir" tree_digest: ` hash: "4a73bc9d03...", size: 55 ` ` // Tree proto with
170
+ # hash "4a73bc9d03..." and size 55: ` root: ` files: [ ` name: "bar", digest: `
171
+ # hash: "4a73bc9d03...", size: 65534 ` ` ], directories: [ ` name: "foo", digest:
172
+ # ` hash: "4cf2eda940...", size: 43 ` ` ] ` children : ` // (Directory proto
173
+ # with hash "4cf2eda940..." and size 43) files: [ ` name: "baz", digest: ` hash:
174
+ # "b2c941073e...", size: 1294, `, is_executable: true ` ] ` ` ``` If an output
175
+ # of the same name as listed in `output_files` of the Command was found in `
176
+ # output_directories`, but was not a directory, the server will return a
177
+ # FAILED_PRECONDITION.
233
178
  # Corresponds to the JSON property `outputDirectories`
234
179
  # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2OutputDirectory>]
235
180
  attr_accessor :output_directories
236
181
 
237
182
  # The output directories of the action that are symbolic links to other
238
183
  # directories. Those may be links to other output directories, or input
239
- # directories, or even absolute paths outside of the working directory,
240
- # if the server supports
241
- # SymlinkAbsolutePathStrategy.ALLOWED.
242
- # For each output directory requested in the `output_directories` field of
243
- # the Action, if the directory existed after the action completed, a
244
- # single entry will be present either in this field, or in the
245
- # `output_directories` field, if the directory was not a symbolic link.
246
- # If an output of the same name was found, but was a symbolic link to a file
247
- # instead of a directory, the server will return a FAILED_PRECONDITION.
248
- # If the action does not produce the requested output, then that output
249
- # will be omitted from the list. The server is free to arrange the output
250
- # list as desired; clients MUST NOT assume that the output list is sorted.
184
+ # directories, or even absolute paths outside of the working directory, if the
185
+ # server supports SymlinkAbsolutePathStrategy.ALLOWED. For each output directory
186
+ # requested in the `output_directories` field of the Action, if the directory
187
+ # existed after the action completed, a single entry will be present either in
188
+ # this field, or in the `output_directories` field, if the directory was not a
189
+ # symbolic link. If an output of the same name was found, but was a symbolic
190
+ # link to a file instead of a directory, the server will return a
191
+ # FAILED_PRECONDITION. If the action does not produce the requested output, then
192
+ # that output will be omitted from the list. The server is free to arrange the
193
+ # output list as desired; clients MUST NOT assume that the output list is sorted.
251
194
  # DEPRECATED as of v2.1. Servers that wish to be compatible with v2.0 API
252
195
  # should still populate this field in addition to `output_symlinks`.
253
196
  # Corresponds to the JSON property `outputDirectorySymlinks`
@@ -257,131 +200,119 @@ module Google
257
200
  # The output files of the action that are symbolic links to other files. Those
258
201
  # may be links to other output files, or input files, or even absolute paths
259
202
  # outside of the working directory, if the server supports
260
- # SymlinkAbsolutePathStrategy.ALLOWED.
261
- # For each output file requested in the `output_files` or `output_paths`
262
- # field of the Action, if the corresponding file existed after
263
- # the action completed, a single entry will be present either in this field,
264
- # or in the `output_files` field, if the file was not a symbolic link.
265
- # If an output symbolic link of the same name as listed in `output_files` of
266
- # the Command was found, but its target type was not a regular file, the
267
- # server will return a FAILED_PRECONDITION.
268
- # If the action does not produce the requested output, then that output
269
- # will be omitted from the list. The server is free to arrange the output
270
- # list as desired; clients MUST NOT assume that the output list is sorted.
271
- # DEPRECATED as of v2.1. Servers that wish to be compatible with v2.0 API
272
- # should still populate this field in addition to `output_symlinks`.
203
+ # SymlinkAbsolutePathStrategy.ALLOWED. For each output file requested in the `
204
+ # output_files` or `output_paths` field of the Action, if the corresponding file
205
+ # existed after the action completed, a single entry will be present either in
206
+ # this field, or in the `output_files` field, if the file was not a symbolic
207
+ # link. If an output symbolic link of the same name as listed in `output_files`
208
+ # of the Command was found, but its target type was not a regular file, the
209
+ # server will return a FAILED_PRECONDITION. If the action does not produce the
210
+ # requested output, then that output will be omitted from the list. The server
211
+ # is free to arrange the output list as desired; clients MUST NOT assume that
212
+ # the output list is sorted. DEPRECATED as of v2.1. Servers that wish to be
213
+ # compatible with v2.0 API should still populate this field in addition to `
214
+ # output_symlinks`.
273
215
  # Corresponds to the JSON property `outputFileSymlinks`
274
216
  # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2OutputSymlink>]
275
217
  attr_accessor :output_file_symlinks
276
218
 
277
- # The output files of the action. For each output file requested in the
278
- # `output_files` or `output_paths` field of the Action, if the corresponding
279
- # file existed after the action completed, a single entry will be present
280
- # either in this field, or the `output_file_symlinks` field if the file was
281
- # a symbolic link to another file (`output_symlinks` field after v2.1).
282
- # If an output listed in `output_files` was found, but was a directory rather
283
- # than a regular file, the server will return a FAILED_PRECONDITION.
284
- # If the action does not produce the requested output, then that output
285
- # will be omitted from the list. The server is free to arrange the output
286
- # list as desired; clients MUST NOT assume that the output list is sorted.
219
+ # The output files of the action. For each output file requested in the `
220
+ # output_files` or `output_paths` field of the Action, if the corresponding file
221
+ # existed after the action completed, a single entry will be present either in
222
+ # this field, or the `output_file_symlinks` field if the file was a symbolic
223
+ # link to another file (`output_symlinks` field after v2.1). If an output listed
224
+ # in `output_files` was found, but was a directory rather than a regular file,
225
+ # the server will return a FAILED_PRECONDITION. If the action does not produce
226
+ # the requested output, then that output will be omitted from the list. The
227
+ # server is free to arrange the output list as desired; clients MUST NOT assume
228
+ # that the output list is sorted.
287
229
  # Corresponds to the JSON property `outputFiles`
288
230
  # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2OutputFile>]
289
231
  attr_accessor :output_files
290
232
 
291
- # New in v2.1: this field will only be populated if the command
292
- # `output_paths` field was used, and not the pre v2.1 `output_files` or
293
- # `output_directories` fields.
294
- # The output paths of the action that are symbolic links to other paths. Those
295
- # may be links to other outputs, or inputs, or even absolute paths
296
- # outside of the working directory, if the server supports
297
- # SymlinkAbsolutePathStrategy.ALLOWED.
298
- # A single entry for each output requested in `output_paths`
299
- # field of the Action, if the corresponding path existed after
300
- # the action completed and was a symbolic link.
301
- # If the action does not produce a requested output, then that output
302
- # will be omitted from the list. The server is free to arrange the output
303
- # list as desired; clients MUST NOT assume that the output list is sorted.
233
+ # New in v2.1: this field will only be populated if the command `output_paths`
234
+ # field was used, and not the pre v2.1 `output_files` or `output_directories`
235
+ # fields. The output paths of the action that are symbolic links to other paths.
236
+ # Those may be links to other outputs, or inputs, or even absolute paths outside
237
+ # of the working directory, if the server supports SymlinkAbsolutePathStrategy.
238
+ # ALLOWED. A single entry for each output requested in `output_paths` field of
239
+ # the Action, if the corresponding path existed after the action completed and
240
+ # was a symbolic link. If the action does not produce a requested output, then
241
+ # that output will be omitted from the list. The server is free to arrange the
242
+ # output list as desired; clients MUST NOT assume that the output list is sorted.
304
243
  # Corresponds to the JSON property `outputSymlinks`
305
244
  # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2OutputSymlink>]
306
245
  attr_accessor :output_symlinks
307
246
 
308
247
  # A content digest. A digest for a given blob consists of the size of the blob
309
- # and its hash. The hash algorithm to use is defined by the server.
310
- # The size is considered to be an integral part of the digest and cannot be
311
- # separated. That is, even if the `hash` field is correctly specified but
312
- # `size_bytes` is not, the server MUST reject the request.
313
- # The reason for including the size in the digest is as follows: in a great
314
- # many cases, the server needs to know the size of the blob it is about to work
315
- # with prior to starting an operation with it, such as flattening Merkle tree
316
- # structures or streaming it to a worker. Technically, the server could
317
- # implement a separate metadata store, but this results in a significantly more
318
- # complicated implementation as opposed to having the client specify the size
319
- # up-front (or storing the size along with the digest in every message where
320
- # digests are embedded). This does mean that the API leaks some implementation
321
- # details of (what we consider to be) a reasonable server implementation, but
322
- # we consider this to be a worthwhile tradeoff.
323
- # When a `Digest` is used to refer to a proto message, it always refers to the
324
- # message in binary encoded form. To ensure consistent hashing, clients and
325
- # servers MUST ensure that they serialize messages according to the following
326
- # rules, even if there are alternate valid encodings for the same message:
327
- # * Fields are serialized in tag order.
328
- # * There are no unknown fields.
329
- # * There are no duplicate fields.
330
- # * Fields are serialized according to the default semantics for their type.
331
- # Most protocol buffer implementations will always follow these rules when
332
- # serializing, but care should be taken to avoid shortcuts. For instance,
333
- # concatenating two messages to merge them may produce duplicate fields.
248
+ # and its hash. The hash algorithm to use is defined by the server. The size is
249
+ # considered to be an integral part of the digest and cannot be separated. That
250
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
251
+ # the server MUST reject the request. The reason for including the size in the
252
+ # digest is as follows: in a great many cases, the server needs to know the size
253
+ # of the blob it is about to work with prior to starting an operation with it,
254
+ # such as flattening Merkle tree structures or streaming it to a worker.
255
+ # Technically, the server could implement a separate metadata store, but this
256
+ # results in a significantly more complicated implementation as opposed to
257
+ # having the client specify the size up-front (or storing the size along with
258
+ # the digest in every message where digests are embedded). This does mean that
259
+ # the API leaks some implementation details of (what we consider to be) a
260
+ # reasonable server implementation, but we consider this to be a worthwhile
261
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
262
+ # refers to the message in binary encoded form. To ensure consistent hashing,
263
+ # clients and servers MUST ensure that they serialize messages according to the
264
+ # following rules, even if there are alternate valid encodings for the same
265
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
266
+ # There are no duplicate fields. * Fields are serialized according to the
267
+ # default semantics for their type. Most protocol buffer implementations will
268
+ # always follow these rules when serializing, but care should be taken to avoid
269
+ # shortcuts. For instance, concatenating two messages to merge them may produce
270
+ # duplicate fields.
334
271
  # Corresponds to the JSON property `stderrDigest`
335
272
  # @return [Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2Digest]
336
273
  attr_accessor :stderr_digest
337
274
 
338
- # The standard error buffer of the action. The server SHOULD NOT inline
339
- # stderr unless requested by the client in the
340
- # GetActionResultRequest
341
- # message. The server MAY omit inlining, even if requested, and MUST do so if
342
- # inlining
343
- # would cause the response to exceed message size limits.
275
+ # The standard error buffer of the action. The server SHOULD NOT inline stderr
276
+ # unless requested by the client in the GetActionResultRequest message. The
277
+ # server MAY omit inlining, even if requested, and MUST do so if inlining would
278
+ # cause the response to exceed message size limits.
344
279
  # Corresponds to the JSON property `stderrRaw`
345
280
  # NOTE: Values are automatically base64 encoded/decoded in the client library.
346
281
  # @return [String]
347
282
  attr_accessor :stderr_raw
348
283
 
349
284
  # A content digest. A digest for a given blob consists of the size of the blob
350
- # and its hash. The hash algorithm to use is defined by the server.
351
- # The size is considered to be an integral part of the digest and cannot be
352
- # separated. That is, even if the `hash` field is correctly specified but
353
- # `size_bytes` is not, the server MUST reject the request.
354
- # The reason for including the size in the digest is as follows: in a great
355
- # many cases, the server needs to know the size of the blob it is about to work
356
- # with prior to starting an operation with it, such as flattening Merkle tree
357
- # structures or streaming it to a worker. Technically, the server could
358
- # implement a separate metadata store, but this results in a significantly more
359
- # complicated implementation as opposed to having the client specify the size
360
- # up-front (or storing the size along with the digest in every message where
361
- # digests are embedded). This does mean that the API leaks some implementation
362
- # details of (what we consider to be) a reasonable server implementation, but
363
- # we consider this to be a worthwhile tradeoff.
364
- # When a `Digest` is used to refer to a proto message, it always refers to the
365
- # message in binary encoded form. To ensure consistent hashing, clients and
366
- # servers MUST ensure that they serialize messages according to the following
367
- # rules, even if there are alternate valid encodings for the same message:
368
- # * Fields are serialized in tag order.
369
- # * There are no unknown fields.
370
- # * There are no duplicate fields.
371
- # * Fields are serialized according to the default semantics for their type.
372
- # Most protocol buffer implementations will always follow these rules when
373
- # serializing, but care should be taken to avoid shortcuts. For instance,
374
- # concatenating two messages to merge them may produce duplicate fields.
285
+ # and its hash. The hash algorithm to use is defined by the server. The size is
286
+ # considered to be an integral part of the digest and cannot be separated. That
287
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
288
+ # the server MUST reject the request. The reason for including the size in the
289
+ # digest is as follows: in a great many cases, the server needs to know the size
290
+ # of the blob it is about to work with prior to starting an operation with it,
291
+ # such as flattening Merkle tree structures or streaming it to a worker.
292
+ # Technically, the server could implement a separate metadata store, but this
293
+ # results in a significantly more complicated implementation as opposed to
294
+ # having the client specify the size up-front (or storing the size along with
295
+ # the digest in every message where digests are embedded). This does mean that
296
+ # the API leaks some implementation details of (what we consider to be) a
297
+ # reasonable server implementation, but we consider this to be a worthwhile
298
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
299
+ # refers to the message in binary encoded form. To ensure consistent hashing,
300
+ # clients and servers MUST ensure that they serialize messages according to the
301
+ # following rules, even if there are alternate valid encodings for the same
302
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
303
+ # There are no duplicate fields. * Fields are serialized according to the
304
+ # default semantics for their type. Most protocol buffer implementations will
305
+ # always follow these rules when serializing, but care should be taken to avoid
306
+ # shortcuts. For instance, concatenating two messages to merge them may produce
307
+ # duplicate fields.
375
308
  # Corresponds to the JSON property `stdoutDigest`
376
309
  # @return [Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2Digest]
377
310
  attr_accessor :stdout_digest
378
311
 
379
- # The standard output buffer of the action. The server SHOULD NOT inline
380
- # stdout unless requested by the client in the
381
- # GetActionResultRequest
382
- # message. The server MAY omit inlining, even if requested, and MUST do so if
383
- # inlining
384
- # would cause the response to exceed message size limits.
312
+ # The standard output buffer of the action. The server SHOULD NOT inline stdout
313
+ # unless requested by the client in the GetActionResultRequest message. The
314
+ # server MAY omit inlining, even if requested, and MUST do so if inlining would
315
+ # cause the response to exceed message size limits.
385
316
  # Corresponds to the JSON property `stdoutRaw`
386
317
  # NOTE: Values are automatically base64 encoded/decoded in the client library.
387
318
  # @return [String]
@@ -407,12 +338,11 @@ module Google
407
338
  end
408
339
  end
409
340
 
410
- # A `Command` is the actual command executed by a worker running an
411
- # Action and specifications of its
412
- # environment.
413
- # Except as otherwise required, the environment (such as which system
414
- # libraries or binaries are available, and what filesystems are mounted where)
415
- # is defined by and specific to the implementation of the remote execution API.
341
+ # A `Command` is the actual command executed by a worker running an Action and
342
+ # specifications of its environment. Except as otherwise required, the
343
+ # environment (such as which system libraries or binaries are available, and
344
+ # what filesystems are mounted where) is defined by and specific to the
345
+ # implementation of the remote execution API.
416
346
  class BuildBazelRemoteExecutionV2Command
417
347
  include Google::Apis::Core::Hashable
418
348
 
@@ -425,105 +355,90 @@ module Google
425
355
 
426
356
  # The environment variables to set when running the program. The worker may
427
357
  # provide its own default environment variables; these defaults can be
428
- # overridden using this field. Additional variables can also be specified.
429
- # In order to ensure that equivalent
430
- # Commands always hash to the same
431
- # value, the environment variables MUST be lexicographically sorted by name.
432
- # Sorting of strings is done by code point, equivalently, by the UTF-8 bytes.
358
+ # overridden using this field. Additional variables can also be specified. In
359
+ # order to ensure that equivalent Commands always hash to the same value, the
360
+ # environment variables MUST be lexicographically sorted by name. Sorting of
361
+ # strings is done by code point, equivalently, by the UTF-8 bytes.
433
362
  # Corresponds to the JSON property `environmentVariables`
434
363
  # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2CommandEnvironmentVariable>]
435
364
  attr_accessor :environment_variables
436
365
 
437
- # A list of the output directories that the client expects to retrieve from
438
- # the action. Only the listed directories will be returned (an entire
439
- # directory structure will be returned as a
440
- # Tree message digest, see
441
- # OutputDirectory), as
442
- # well as files listed in `output_files`. Other files or directories that
443
- # may be created during command execution are discarded.
444
- # The paths are relative to the working directory of the action execution.
445
- # The paths are specified using a single forward slash (`/`) as a path
446
- # separator, even if the execution platform natively uses a different
447
- # separator. The path MUST NOT include a trailing slash, nor a leading slash,
448
- # being a relative path. The special value of empty string is allowed,
449
- # although not recommended, and can be used to capture the entire working
450
- # directory tree, including inputs.
451
- # In order to ensure consistent hashing of the same Action, the output paths
452
- # MUST be sorted lexicographically by code point (or, equivalently, by UTF-8
453
- # bytes).
454
- # An output directory cannot be duplicated or have the same path as any of
455
- # the listed output files. An output directory is allowed to be a parent of
456
- # another output directory.
366
+ # A list of the output directories that the client expects to retrieve from the
367
+ # action. Only the listed directories will be returned (an entire directory
368
+ # structure will be returned as a Tree message digest, see OutputDirectory), as
369
+ # well as files listed in `output_files`. Other files or directories that may be
370
+ # created during command execution are discarded. The paths are relative to the
371
+ # working directory of the action execution. The paths are specified using a
372
+ # single forward slash (`/`) as a path separator, even if the execution platform
373
+ # natively uses a different separator. The path MUST NOT include a trailing
374
+ # slash, nor a leading slash, being a relative path. The special value of empty
375
+ # string is allowed, although not recommended, and can be used to capture the
376
+ # entire working directory tree, including inputs. In order to ensure consistent
377
+ # hashing of the same Action, the output paths MUST be sorted lexicographically
378
+ # by code point (or, equivalently, by UTF-8 bytes). An output directory cannot
379
+ # be duplicated or have the same path as any of the listed output files. An
380
+ # output directory is allowed to be a parent of another output directory.
457
381
  # Directories leading up to the output directories (but not the output
458
- # directories themselves) are created by the worker prior to execution, even
459
- # if they are not explicitly part of the input root.
460
- # DEPRECATED since 2.1: Use `output_paths` instead.
382
+ # directories themselves) are created by the worker prior to execution, even if
383
+ # they are not explicitly part of the input root. DEPRECATED since 2.1: Use `
384
+ # output_paths` instead.
461
385
  # Corresponds to the JSON property `outputDirectories`
462
386
  # @return [Array<String>]
463
387
  attr_accessor :output_directories
464
388
 
465
- # A list of the output files that the client expects to retrieve from the
466
- # action. Only the listed files, as well as directories listed in
467
- # `output_directories`, will be returned to the client as output.
468
- # Other files or directories that may be created during command execution
469
- # are discarded.
470
- # The paths are relative to the working directory of the action execution.
471
- # The paths are specified using a single forward slash (`/`) as a path
472
- # separator, even if the execution platform natively uses a different
473
- # separator. The path MUST NOT include a trailing slash, nor a leading slash,
474
- # being a relative path.
475
- # In order to ensure consistent hashing of the same Action, the output paths
476
- # MUST be sorted lexicographically by code point (or, equivalently, by UTF-8
477
- # bytes).
478
- # An output file cannot be duplicated, be a parent of another output file, or
479
- # have the same path as any of the listed output directories.
480
- # Directories leading up to the output files are created by the worker prior
481
- # to execution, even if they are not explicitly part of the input root.
482
- # DEPRECATED since v2.1: Use `output_paths` instead.
389
+ # A list of the output files that the client expects to retrieve from the action.
390
+ # Only the listed files, as well as directories listed in `output_directories`,
391
+ # will be returned to the client as output. Other files or directories that may
392
+ # be created during command execution are discarded. The paths are relative to
393
+ # the working directory of the action execution. The paths are specified using a
394
+ # single forward slash (`/`) as a path separator, even if the execution platform
395
+ # natively uses a different separator. The path MUST NOT include a trailing
396
+ # slash, nor a leading slash, being a relative path. In order to ensure
397
+ # consistent hashing of the same Action, the output paths MUST be sorted
398
+ # lexicographically by code point (or, equivalently, by UTF-8 bytes). An output
399
+ # file cannot be duplicated, be a parent of another output file, or have the
400
+ # same path as any of the listed output directories. Directories leading up to
401
+ # the output files are created by the worker prior to execution, even if they
402
+ # are not explicitly part of the input root. DEPRECATED since v2.1: Use `
403
+ # output_paths` instead.
483
404
  # Corresponds to the JSON property `outputFiles`
484
405
  # @return [Array<String>]
485
406
  attr_accessor :output_files
486
407
 
487
- # A list of the output paths that the client expects to retrieve from the
488
- # action. Only the listed paths will be returned to the client as output.
489
- # The type of the output (file or directory) is not specified, and will be
490
- # determined by the server after action execution. If the resulting path is
491
- # a file, it will be returned in an
492
- # OutputFile) typed field.
493
- # If the path is a directory, the entire directory structure will be returned
494
- # as a Tree message digest, see
495
- # OutputDirectory)
496
- # Other files or directories that may be created during command execution
497
- # are discarded.
498
- # The paths are relative to the working directory of the action execution.
499
- # The paths are specified using a single forward slash (`/`) as a path
500
- # separator, even if the execution platform natively uses a different
501
- # separator. The path MUST NOT include a trailing slash, nor a leading slash,
502
- # being a relative path.
503
- # In order to ensure consistent hashing of the same Action, the output paths
504
- # MUST be deduplicated and sorted lexicographically by code point (or,
505
- # equivalently, by UTF-8 bytes).
506
- # Directories leading up to the output paths are created by the worker prior
507
- # to execution, even if they are not explicitly part of the input root.
508
- # New in v2.1: this field supersedes the DEPRECATED `output_files` and
509
- # `output_directories` fields. If `output_paths` is used, `output_files` and
510
- # `output_directories` will be ignored!
408
+ # A list of the output paths that the client expects to retrieve from the action.
409
+ # Only the listed paths will be returned to the client as output. The type of
410
+ # the output (file or directory) is not specified, and will be determined by the
411
+ # server after action execution. If the resulting path is a file, it will be
412
+ # returned in an OutputFile) typed field. If the path is a directory, the entire
413
+ # directory structure will be returned as a Tree message digest, see
414
+ # OutputDirectory) Other files or directories that may be created during command
415
+ # execution are discarded. The paths are relative to the working directory of
416
+ # the action execution. The paths are specified using a single forward slash (`/`
417
+ # ) as a path separator, even if the execution platform natively uses a
418
+ # different separator. The path MUST NOT include a trailing slash, nor a leading
419
+ # slash, being a relative path. In order to ensure consistent hashing of the
420
+ # same Action, the output paths MUST be deduplicated and sorted
421
+ # lexicographically by code point (or, equivalently, by UTF-8 bytes).
422
+ # Directories leading up to the output paths are created by the worker prior to
423
+ # execution, even if they are not explicitly part of the input root. New in v2.1:
424
+ # this field supersedes the DEPRECATED `output_files` and `output_directories`
425
+ # fields. If `output_paths` is used, `output_files` and `output_directories`
426
+ # will be ignored!
511
427
  # Corresponds to the JSON property `outputPaths`
512
428
  # @return [Array<String>]
513
429
  attr_accessor :output_paths
514
430
 
515
431
  # A `Platform` is a set of requirements, such as hardware, operating system, or
516
- # compiler toolchain, for an
517
- # Action's execution
518
- # environment. A `Platform` is represented as a series of key-value pairs
519
- # representing the properties that are required of the platform.
432
+ # compiler toolchain, for an Action's execution environment. A `Platform` is
433
+ # represented as a series of key-value pairs representing the properties that
434
+ # are required of the platform.
520
435
  # Corresponds to the JSON property `platform`
521
436
  # @return [Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2Platform]
522
437
  attr_accessor :platform
523
438
 
524
- # The working directory, relative to the input root, for the command to run
525
- # in. It must be a directory which exists in the input tree. If it is left
526
- # empty, then the action is run in the input root.
439
+ # The working directory, relative to the input root, for the command to run in.
440
+ # It must be a directory which exists in the input tree. If it is left empty,
441
+ # then the action is run in the input root.
527
442
  # Corresponds to the JSON property `workingDirectory`
528
443
  # @return [String]
529
444
  attr_accessor :working_directory
@@ -571,31 +486,29 @@ module Google
571
486
  end
572
487
 
573
488
  # A content digest. A digest for a given blob consists of the size of the blob
574
- # and its hash. The hash algorithm to use is defined by the server.
575
- # The size is considered to be an integral part of the digest and cannot be
576
- # separated. That is, even if the `hash` field is correctly specified but
577
- # `size_bytes` is not, the server MUST reject the request.
578
- # The reason for including the size in the digest is as follows: in a great
579
- # many cases, the server needs to know the size of the blob it is about to work
580
- # with prior to starting an operation with it, such as flattening Merkle tree
581
- # structures or streaming it to a worker. Technically, the server could
582
- # implement a separate metadata store, but this results in a significantly more
583
- # complicated implementation as opposed to having the client specify the size
584
- # up-front (or storing the size along with the digest in every message where
585
- # digests are embedded). This does mean that the API leaks some implementation
586
- # details of (what we consider to be) a reasonable server implementation, but
587
- # we consider this to be a worthwhile tradeoff.
588
- # When a `Digest` is used to refer to a proto message, it always refers to the
589
- # message in binary encoded form. To ensure consistent hashing, clients and
590
- # servers MUST ensure that they serialize messages according to the following
591
- # rules, even if there are alternate valid encodings for the same message:
592
- # * Fields are serialized in tag order.
593
- # * There are no unknown fields.
594
- # * There are no duplicate fields.
595
- # * Fields are serialized according to the default semantics for their type.
596
- # Most protocol buffer implementations will always follow these rules when
597
- # serializing, but care should be taken to avoid shortcuts. For instance,
598
- # concatenating two messages to merge them may produce duplicate fields.
489
+ # and its hash. The hash algorithm to use is defined by the server. The size is
490
+ # considered to be an integral part of the digest and cannot be separated. That
491
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
492
+ # the server MUST reject the request. The reason for including the size in the
493
+ # digest is as follows: in a great many cases, the server needs to know the size
494
+ # of the blob it is about to work with prior to starting an operation with it,
495
+ # such as flattening Merkle tree structures or streaming it to a worker.
496
+ # Technically, the server could implement a separate metadata store, but this
497
+ # results in a significantly more complicated implementation as opposed to
498
+ # having the client specify the size up-front (or storing the size along with
499
+ # the digest in every message where digests are embedded). This does mean that
500
+ # the API leaks some implementation details of (what we consider to be) a
501
+ # reasonable server implementation, but we consider this to be a worthwhile
502
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
503
+ # refers to the message in binary encoded form. To ensure consistent hashing,
504
+ # clients and servers MUST ensure that they serialize messages according to the
505
+ # following rules, even if there are alternate valid encodings for the same
506
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
507
+ # There are no duplicate fields. * Fields are serialized according to the
508
+ # default semantics for their type. Most protocol buffer implementations will
509
+ # always follow these rules when serializing, but care should be taken to avoid
510
+ # shortcuts. For instance, concatenating two messages to merge them may produce
511
+ # duplicate fields.
599
512
  class BuildBazelRemoteExecutionV2Digest
600
513
  include Google::Apis::Core::Hashable
601
514
 
@@ -622,75 +535,31 @@ module Google
622
535
  end
623
536
 
624
537
  # A `Directory` represents a directory node in a file tree, containing zero or
625
- # more children FileNodes,
626
- # DirectoryNodes and
627
- # SymlinkNodes.
628
- # Each `Node` contains its name in the directory, either the digest of its
629
- # content (either a file blob or a `Directory` proto) or a symlink target, as
630
- # well as possibly some metadata about the file or directory.
631
- # In order to ensure that two equivalent directory trees hash to the same
632
- # value, the following restrictions MUST be obeyed when constructing a
633
- # a `Directory`:
634
- # * Every child in the directory must have a path of exactly one segment.
635
- # Multiple levels of directory hierarchy may not be collapsed.
636
- # * Each child in the directory must have a unique path segment (file name).
637
- # Note that while the API itself is case-sensitive, the environment where
638
- # the Action is executed may or may not be case-sensitive. That is, it is
639
- # legal to call the API with a Directory that has both "Foo" and "foo" as
640
- # children, but the Action may be rejected by the remote system upon
641
- # execution.
642
- # * The files, directories and symlinks in the directory must each be sorted
643
- # in lexicographical order by path. The path strings must be sorted by code
644
- # point, equivalently, by UTF-8 bytes.
645
- # * The NodeProperties of files,
646
- # directories, and symlinks must be sorted in lexicographical order by
647
- # property name.
648
- # A `Directory` that obeys the restrictions is said to be in canonical form.
649
- # As an example, the following could be used for a file named `bar` and a
538
+ # more children FileNodes, DirectoryNodes and SymlinkNodes. Each `Node` contains
539
+ # its name in the directory, either the digest of its content (either a file
540
+ # blob or a `Directory` proto) or a symlink target, as well as possibly some
541
+ # metadata about the file or directory. In order to ensure that two equivalent
542
+ # directory trees hash to the same value, the following restrictions MUST be
543
+ # obeyed when constructing a a `Directory`: * Every child in the directory must
544
+ # have a path of exactly one segment. Multiple levels of directory hierarchy may
545
+ # not be collapsed. * Each child in the directory must have a unique path
546
+ # segment (file name). Note that while the API itself is case-sensitive, the
547
+ # environment where the Action is executed may or may not be case-sensitive.
548
+ # That is, it is legal to call the API with a Directory that has both "Foo" and "
549
+ # foo" as children, but the Action may be rejected by the remote system upon
550
+ # execution. * The files, directories and symlinks in the directory must each be
551
+ # sorted in lexicographical order by path. The path strings must be sorted by
552
+ # code point, equivalently, by UTF-8 bytes. * The NodeProperties of files,
553
+ # directories, and symlinks must be sorted in lexicographical order by property
554
+ # name. A `Directory` that obeys the restrictions is said to be in canonical
555
+ # form. As an example, the following could be used for a file named `bar` and a
650
556
  # directory named `foo` with an executable file named `baz` (hashes shortened
651
- # for readability):
652
- # ```json
653
- # // (Directory proto)
654
- # `
655
- # files: [
656
- # `
657
- # name: "bar",
658
- # digest: `
659
- # hash: "4a73bc9d03...",
660
- # size: 65534
661
- # `,
662
- # node_properties: [
663
- # `
664
- # "name": "MTime",
665
- # "value": "2017-01-15T01:30:15.01Z"
666
- # `
667
- # ]
668
- # `
669
- # ],
670
- # directories: [
671
- # `
672
- # name: "foo",
673
- # digest: `
674
- # hash: "4cf2eda940...",
675
- # size: 43
676
- # `
677
- # `
678
- # ]
679
- # `
680
- # // (Directory proto with hash "4cf2eda940..." and size 43)
681
- # `
682
- # files: [
683
- # `
684
- # name: "baz",
685
- # digest: `
686
- # hash: "b2c941073e...",
687
- # size: 1294,
688
- # `,
689
- # is_executable: true
690
- # `
691
- # ]
692
- # `
693
- # ```
557
+ # for readability): ```json // (Directory proto) ` files: [ ` name: "bar",
558
+ # digest: ` hash: "4a73bc9d03...", size: 65534 `, node_properties: [ ` "name": "
559
+ # MTime", "value": "2017-01-15T01:30:15.01Z" ` ] ` ], directories: [ ` name: "
560
+ # foo", digest: ` hash: "4cf2eda940...", size: 43 ` ` ] ` // (Directory proto
561
+ # with hash "4cf2eda940..." and size 43) ` files: [ ` name: "baz", digest: `
562
+ # hash: "b2c941073e...", size: 1294, `, is_executable: true ` ] ` ```
694
563
  class BuildBazelRemoteExecutionV2Directory
695
564
  include Google::Apis::Core::Hashable
696
565
 
@@ -727,38 +596,35 @@ module Google
727
596
  end
728
597
  end
729
598
 
730
- # A `DirectoryNode` represents a child of a
731
- # Directory which is itself
732
- # a `Directory` and its associated metadata.
599
+ # A `DirectoryNode` represents a child of a Directory which is itself a `
600
+ # Directory` and its associated metadata.
733
601
  class BuildBazelRemoteExecutionV2DirectoryNode
734
602
  include Google::Apis::Core::Hashable
735
603
 
736
604
  # A content digest. A digest for a given blob consists of the size of the blob
737
- # and its hash. The hash algorithm to use is defined by the server.
738
- # The size is considered to be an integral part of the digest and cannot be
739
- # separated. That is, even if the `hash` field is correctly specified but
740
- # `size_bytes` is not, the server MUST reject the request.
741
- # The reason for including the size in the digest is as follows: in a great
742
- # many cases, the server needs to know the size of the blob it is about to work
743
- # with prior to starting an operation with it, such as flattening Merkle tree
744
- # structures or streaming it to a worker. Technically, the server could
745
- # implement a separate metadata store, but this results in a significantly more
746
- # complicated implementation as opposed to having the client specify the size
747
- # up-front (or storing the size along with the digest in every message where
748
- # digests are embedded). This does mean that the API leaks some implementation
749
- # details of (what we consider to be) a reasonable server implementation, but
750
- # we consider this to be a worthwhile tradeoff.
751
- # When a `Digest` is used to refer to a proto message, it always refers to the
752
- # message in binary encoded form. To ensure consistent hashing, clients and
753
- # servers MUST ensure that they serialize messages according to the following
754
- # rules, even if there are alternate valid encodings for the same message:
755
- # * Fields are serialized in tag order.
756
- # * There are no unknown fields.
757
- # * There are no duplicate fields.
758
- # * Fields are serialized according to the default semantics for their type.
759
- # Most protocol buffer implementations will always follow these rules when
760
- # serializing, but care should be taken to avoid shortcuts. For instance,
761
- # concatenating two messages to merge them may produce duplicate fields.
605
+ # and its hash. The hash algorithm to use is defined by the server. The size is
606
+ # considered to be an integral part of the digest and cannot be separated. That
607
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
608
+ # the server MUST reject the request. The reason for including the size in the
609
+ # digest is as follows: in a great many cases, the server needs to know the size
610
+ # of the blob it is about to work with prior to starting an operation with it,
611
+ # such as flattening Merkle tree structures or streaming it to a worker.
612
+ # Technically, the server could implement a separate metadata store, but this
613
+ # results in a significantly more complicated implementation as opposed to
614
+ # having the client specify the size up-front (or storing the size along with
615
+ # the digest in every message where digests are embedded). This does mean that
616
+ # the API leaks some implementation details of (what we consider to be) a
617
+ # reasonable server implementation, but we consider this to be a worthwhile
618
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
619
+ # refers to the message in binary encoded form. To ensure consistent hashing,
620
+ # clients and servers MUST ensure that they serialize messages according to the
621
+ # following rules, even if there are alternate valid encodings for the same
622
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
623
+ # There are no duplicate fields. * Fields are serialized according to the
624
+ # default semantics for their type. Most protocol buffer implementations will
625
+ # always follow these rules when serializing, but care should be taken to avoid
626
+ # shortcuts. For instance, concatenating two messages to merge them may produce
627
+ # duplicate fields.
762
628
  # Corresponds to the JSON property `digest`
763
629
  # @return [Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2Digest]
764
630
  attr_accessor :digest
@@ -779,40 +645,35 @@ module Google
779
645
  end
780
646
  end
781
647
 
782
- # Metadata about an ongoing
783
- # execution, which
784
- # will be contained in the metadata
785
- # field of the
786
- # Operation.
648
+ # Metadata about an ongoing execution, which will be contained in the metadata
649
+ # field of the Operation.
787
650
  class BuildBazelRemoteExecutionV2ExecuteOperationMetadata
788
651
  include Google::Apis::Core::Hashable
789
652
 
790
653
  # A content digest. A digest for a given blob consists of the size of the blob
791
- # and its hash. The hash algorithm to use is defined by the server.
792
- # The size is considered to be an integral part of the digest and cannot be
793
- # separated. That is, even if the `hash` field is correctly specified but
794
- # `size_bytes` is not, the server MUST reject the request.
795
- # The reason for including the size in the digest is as follows: in a great
796
- # many cases, the server needs to know the size of the blob it is about to work
797
- # with prior to starting an operation with it, such as flattening Merkle tree
798
- # structures or streaming it to a worker. Technically, the server could
799
- # implement a separate metadata store, but this results in a significantly more
800
- # complicated implementation as opposed to having the client specify the size
801
- # up-front (or storing the size along with the digest in every message where
802
- # digests are embedded). This does mean that the API leaks some implementation
803
- # details of (what we consider to be) a reasonable server implementation, but
804
- # we consider this to be a worthwhile tradeoff.
805
- # When a `Digest` is used to refer to a proto message, it always refers to the
806
- # message in binary encoded form. To ensure consistent hashing, clients and
807
- # servers MUST ensure that they serialize messages according to the following
808
- # rules, even if there are alternate valid encodings for the same message:
809
- # * Fields are serialized in tag order.
810
- # * There are no unknown fields.
811
- # * There are no duplicate fields.
812
- # * Fields are serialized according to the default semantics for their type.
813
- # Most protocol buffer implementations will always follow these rules when
814
- # serializing, but care should be taken to avoid shortcuts. For instance,
815
- # concatenating two messages to merge them may produce duplicate fields.
654
+ # and its hash. The hash algorithm to use is defined by the server. The size is
655
+ # considered to be an integral part of the digest and cannot be separated. That
656
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
657
+ # the server MUST reject the request. The reason for including the size in the
658
+ # digest is as follows: in a great many cases, the server needs to know the size
659
+ # of the blob it is about to work with prior to starting an operation with it,
660
+ # such as flattening Merkle tree structures or streaming it to a worker.
661
+ # Technically, the server could implement a separate metadata store, but this
662
+ # results in a significantly more complicated implementation as opposed to
663
+ # having the client specify the size up-front (or storing the size along with
664
+ # the digest in every message where digests are embedded). This does mean that
665
+ # the API leaks some implementation details of (what we consider to be) a
666
+ # reasonable server implementation, but we consider this to be a worthwhile
667
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
668
+ # refers to the message in binary encoded form. To ensure consistent hashing,
669
+ # clients and servers MUST ensure that they serialize messages according to the
670
+ # following rules, even if there are alternate valid encodings for the same
671
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
672
+ # There are no duplicate fields. * Fields are serialized according to the
673
+ # default semantics for their type. Most protocol buffer implementations will
674
+ # always follow these rules when serializing, but care should be taken to avoid
675
+ # shortcuts. For instance, concatenating two messages to merge them may produce
676
+ # duplicate fields.
816
677
  # Corresponds to the JSON property `actionDigest`
817
678
  # @return [Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2Digest]
818
679
  attr_accessor :action_digest
@@ -822,15 +683,13 @@ module Google
822
683
  # @return [String]
823
684
  attr_accessor :stage
824
685
 
825
- # If set, the client can use this name with
826
- # ByteStream.Read to stream the
686
+ # If set, the client can use this name with ByteStream.Read to stream the
827
687
  # standard error.
828
688
  # Corresponds to the JSON property `stderrStreamName`
829
689
  # @return [String]
830
690
  attr_accessor :stderr_stream_name
831
691
 
832
- # If set, the client can use this name with
833
- # ByteStream.Read to stream the
692
+ # If set, the client can use this name with ByteStream.Read to stream the
834
693
  # standard output.
835
694
  # Corresponds to the JSON property `stdoutStreamName`
836
695
  # @return [String]
@@ -849,11 +708,8 @@ module Google
849
708
  end
850
709
  end
851
710
 
852
- # The response message for
853
- # Execution.Execute,
854
- # which will be contained in the response
855
- # field of the
856
- # Operation.
711
+ # The response message for Execution.Execute, which will be contained in the
712
+ # response field of the Operation.
857
713
  class BuildBazelRemoteExecutionV2ExecuteResponse
858
714
  include Google::Apis::Core::Hashable
859
715
 
@@ -869,29 +725,27 @@ module Google
869
725
  # @return [String]
870
726
  attr_accessor :message
871
727
 
872
- # An ActionResult represents the result of an
873
- # Action being run.
728
+ # An ActionResult represents the result of an Action being run.
874
729
  # Corresponds to the JSON property `result`
875
730
  # @return [Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2ActionResult]
876
731
  attr_accessor :result
877
732
 
878
733
  # An optional list of additional log outputs the server wishes to provide. A
879
- # server can use this to return execution-specific logs however it wishes.
880
- # This is intended primarily to make it easier for users to debug issues that
881
- # may be outside of the actual job execution, such as by identifying the
882
- # worker executing the action or by providing logs from the worker's setup
883
- # phase. The keys SHOULD be human readable so that a client can display them
884
- # to a user.
734
+ # server can use this to return execution-specific logs however it wishes. This
735
+ # is intended primarily to make it easier for users to debug issues that may be
736
+ # outside of the actual job execution, such as by identifying the worker
737
+ # executing the action or by providing logs from the worker's setup phase. The
738
+ # keys SHOULD be human readable so that a client can display them to a user.
885
739
  # Corresponds to the JSON property `serverLogs`
886
740
  # @return [Hash<String,Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2LogFile>]
887
741
  attr_accessor :server_logs
888
742
 
889
- # The `Status` type defines a logical error model that is suitable for
890
- # different programming environments, including REST APIs and RPC APIs. It is
891
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
892
- # three pieces of data: error code, error message, and error details.
893
- # You can find out more about this error model and how to work with it in the
894
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
743
+ # The `Status` type defines a logical error model that is suitable for different
744
+ # programming environments, including REST APIs and RPC APIs. It is used by [
745
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
746
+ # data: error code, error message, and error details. You can find out more
747
+ # about this error model and how to work with it in the [API Design Guide](https:
748
+ # //cloud.google.com/apis/design/errors).
895
749
  # Corresponds to the JSON property `status`
896
750
  # @return [Google::Apis::RemotebuildexecutionV1::GoogleRpcStatus]
897
751
  attr_accessor :status
@@ -988,31 +842,29 @@ module Google
988
842
  include Google::Apis::Core::Hashable
989
843
 
990
844
  # A content digest. A digest for a given blob consists of the size of the blob
991
- # and its hash. The hash algorithm to use is defined by the server.
992
- # The size is considered to be an integral part of the digest and cannot be
993
- # separated. That is, even if the `hash` field is correctly specified but
994
- # `size_bytes` is not, the server MUST reject the request.
995
- # The reason for including the size in the digest is as follows: in a great
996
- # many cases, the server needs to know the size of the blob it is about to work
997
- # with prior to starting an operation with it, such as flattening Merkle tree
998
- # structures or streaming it to a worker. Technically, the server could
999
- # implement a separate metadata store, but this results in a significantly more
1000
- # complicated implementation as opposed to having the client specify the size
1001
- # up-front (or storing the size along with the digest in every message where
1002
- # digests are embedded). This does mean that the API leaks some implementation
1003
- # details of (what we consider to be) a reasonable server implementation, but
1004
- # we consider this to be a worthwhile tradeoff.
1005
- # When a `Digest` is used to refer to a proto message, it always refers to the
1006
- # message in binary encoded form. To ensure consistent hashing, clients and
1007
- # servers MUST ensure that they serialize messages according to the following
1008
- # rules, even if there are alternate valid encodings for the same message:
1009
- # * Fields are serialized in tag order.
1010
- # * There are no unknown fields.
1011
- # * There are no duplicate fields.
1012
- # * Fields are serialized according to the default semantics for their type.
1013
- # Most protocol buffer implementations will always follow these rules when
1014
- # serializing, but care should be taken to avoid shortcuts. For instance,
1015
- # concatenating two messages to merge them may produce duplicate fields.
845
+ # and its hash. The hash algorithm to use is defined by the server. The size is
846
+ # considered to be an integral part of the digest and cannot be separated. That
847
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
848
+ # the server MUST reject the request. The reason for including the size in the
849
+ # digest is as follows: in a great many cases, the server needs to know the size
850
+ # of the blob it is about to work with prior to starting an operation with it,
851
+ # such as flattening Merkle tree structures or streaming it to a worker.
852
+ # Technically, the server could implement a separate metadata store, but this
853
+ # results in a significantly more complicated implementation as opposed to
854
+ # having the client specify the size up-front (or storing the size along with
855
+ # the digest in every message where digests are embedded). This does mean that
856
+ # the API leaks some implementation details of (what we consider to be) a
857
+ # reasonable server implementation, but we consider this to be a worthwhile
858
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
859
+ # refers to the message in binary encoded form. To ensure consistent hashing,
860
+ # clients and servers MUST ensure that they serialize messages according to the
861
+ # following rules, even if there are alternate valid encodings for the same
862
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
863
+ # There are no duplicate fields. * Fields are serialized according to the
864
+ # default semantics for their type. Most protocol buffer implementations will
865
+ # always follow these rules when serializing, but care should be taken to avoid
866
+ # shortcuts. For instance, concatenating two messages to merge them may produce
867
+ # duplicate fields.
1016
868
  # Corresponds to the JSON property `digest`
1017
869
  # @return [Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2Digest]
1018
870
  attr_accessor :digest
@@ -1051,40 +903,38 @@ module Google
1051
903
  include Google::Apis::Core::Hashable
1052
904
 
1053
905
  # A content digest. A digest for a given blob consists of the size of the blob
1054
- # and its hash. The hash algorithm to use is defined by the server.
1055
- # The size is considered to be an integral part of the digest and cannot be
1056
- # separated. That is, even if the `hash` field is correctly specified but
1057
- # `size_bytes` is not, the server MUST reject the request.
1058
- # The reason for including the size in the digest is as follows: in a great
1059
- # many cases, the server needs to know the size of the blob it is about to work
1060
- # with prior to starting an operation with it, such as flattening Merkle tree
1061
- # structures or streaming it to a worker. Technically, the server could
1062
- # implement a separate metadata store, but this results in a significantly more
1063
- # complicated implementation as opposed to having the client specify the size
1064
- # up-front (or storing the size along with the digest in every message where
1065
- # digests are embedded). This does mean that the API leaks some implementation
1066
- # details of (what we consider to be) a reasonable server implementation, but
1067
- # we consider this to be a worthwhile tradeoff.
1068
- # When a `Digest` is used to refer to a proto message, it always refers to the
1069
- # message in binary encoded form. To ensure consistent hashing, clients and
1070
- # servers MUST ensure that they serialize messages according to the following
1071
- # rules, even if there are alternate valid encodings for the same message:
1072
- # * Fields are serialized in tag order.
1073
- # * There are no unknown fields.
1074
- # * There are no duplicate fields.
1075
- # * Fields are serialized according to the default semantics for their type.
1076
- # Most protocol buffer implementations will always follow these rules when
1077
- # serializing, but care should be taken to avoid shortcuts. For instance,
1078
- # concatenating two messages to merge them may produce duplicate fields.
906
+ # and its hash. The hash algorithm to use is defined by the server. The size is
907
+ # considered to be an integral part of the digest and cannot be separated. That
908
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
909
+ # the server MUST reject the request. The reason for including the size in the
910
+ # digest is as follows: in a great many cases, the server needs to know the size
911
+ # of the blob it is about to work with prior to starting an operation with it,
912
+ # such as flattening Merkle tree structures or streaming it to a worker.
913
+ # Technically, the server could implement a separate metadata store, but this
914
+ # results in a significantly more complicated implementation as opposed to
915
+ # having the client specify the size up-front (or storing the size along with
916
+ # the digest in every message where digests are embedded). This does mean that
917
+ # the API leaks some implementation details of (what we consider to be) a
918
+ # reasonable server implementation, but we consider this to be a worthwhile
919
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
920
+ # refers to the message in binary encoded form. To ensure consistent hashing,
921
+ # clients and servers MUST ensure that they serialize messages according to the
922
+ # following rules, even if there are alternate valid encodings for the same
923
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
924
+ # There are no duplicate fields. * Fields are serialized according to the
925
+ # default semantics for their type. Most protocol buffer implementations will
926
+ # always follow these rules when serializing, but care should be taken to avoid
927
+ # shortcuts. For instance, concatenating two messages to merge them may produce
928
+ # duplicate fields.
1079
929
  # Corresponds to the JSON property `digest`
1080
930
  # @return [Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2Digest]
1081
931
  attr_accessor :digest
1082
932
 
1083
- # This is a hint as to the purpose of the log, and is set to true if the log
1084
- # is human-readable text that can be usefully displayed to a user, and false
1085
- # otherwise. For instance, if a command-line client wishes to print the
1086
- # server logs to the terminal for a failed action, this allows it to avoid
1087
- # displaying a binary file.
933
+ # This is a hint as to the purpose of the log, and is set to true if the log is
934
+ # human-readable text that can be usefully displayed to a user, and false
935
+ # otherwise. For instance, if a command-line client wishes to print the server
936
+ # logs to the terminal for a failed action, this allows it to avoid displaying a
937
+ # binary file.
1088
938
  # Corresponds to the JSON property `humanReadable`
1089
939
  # @return [Boolean]
1090
940
  attr_accessor :human_readable
@@ -1101,10 +951,8 @@ module Google
1101
951
  end
1102
952
  end
1103
953
 
1104
- # A single property for FileNodes,
1105
- # DirectoryNodes, and
1106
- # SymlinkNodes. The server is
1107
- # responsible for specifying the property `name`s that it accepts. If
954
+ # A single property for FileNodes, DirectoryNodes, and SymlinkNodes. The server
955
+ # is responsible for specifying the property `name`s that it accepts. If
1108
956
  # permitted by the server, the same `name` may occur multiple times.
1109
957
  class BuildBazelRemoteExecutionV2NodeProperty
1110
958
  include Google::Apis::Core::Hashable
@@ -1136,39 +984,37 @@ module Google
1136
984
  include Google::Apis::Core::Hashable
1137
985
 
1138
986
  # The full path of the directory relative to the working directory. The path
1139
- # separator is a forward slash `/`. Since this is a relative path, it MUST
1140
- # NOT begin with a leading forward slash. The empty string value is allowed,
1141
- # and it denotes the entire working directory.
987
+ # separator is a forward slash `/`. Since this is a relative path, it MUST NOT
988
+ # begin with a leading forward slash. The empty string value is allowed, and it
989
+ # denotes the entire working directory.
1142
990
  # Corresponds to the JSON property `path`
1143
991
  # @return [String]
1144
992
  attr_accessor :path
1145
993
 
1146
994
  # A content digest. A digest for a given blob consists of the size of the blob
1147
- # and its hash. The hash algorithm to use is defined by the server.
1148
- # The size is considered to be an integral part of the digest and cannot be
1149
- # separated. That is, even if the `hash` field is correctly specified but
1150
- # `size_bytes` is not, the server MUST reject the request.
1151
- # The reason for including the size in the digest is as follows: in a great
1152
- # many cases, the server needs to know the size of the blob it is about to work
1153
- # with prior to starting an operation with it, such as flattening Merkle tree
1154
- # structures or streaming it to a worker. Technically, the server could
1155
- # implement a separate metadata store, but this results in a significantly more
1156
- # complicated implementation as opposed to having the client specify the size
1157
- # up-front (or storing the size along with the digest in every message where
1158
- # digests are embedded). This does mean that the API leaks some implementation
1159
- # details of (what we consider to be) a reasonable server implementation, but
1160
- # we consider this to be a worthwhile tradeoff.
1161
- # When a `Digest` is used to refer to a proto message, it always refers to the
1162
- # message in binary encoded form. To ensure consistent hashing, clients and
1163
- # servers MUST ensure that they serialize messages according to the following
1164
- # rules, even if there are alternate valid encodings for the same message:
1165
- # * Fields are serialized in tag order.
1166
- # * There are no unknown fields.
1167
- # * There are no duplicate fields.
1168
- # * Fields are serialized according to the default semantics for their type.
1169
- # Most protocol buffer implementations will always follow these rules when
1170
- # serializing, but care should be taken to avoid shortcuts. For instance,
1171
- # concatenating two messages to merge them may produce duplicate fields.
995
+ # and its hash. The hash algorithm to use is defined by the server. The size is
996
+ # considered to be an integral part of the digest and cannot be separated. That
997
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
998
+ # the server MUST reject the request. The reason for including the size in the
999
+ # digest is as follows: in a great many cases, the server needs to know the size
1000
+ # of the blob it is about to work with prior to starting an operation with it,
1001
+ # such as flattening Merkle tree structures or streaming it to a worker.
1002
+ # Technically, the server could implement a separate metadata store, but this
1003
+ # results in a significantly more complicated implementation as opposed to
1004
+ # having the client specify the size up-front (or storing the size along with
1005
+ # the digest in every message where digests are embedded). This does mean that
1006
+ # the API leaks some implementation details of (what we consider to be) a
1007
+ # reasonable server implementation, but we consider this to be a worthwhile
1008
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
1009
+ # refers to the message in binary encoded form. To ensure consistent hashing,
1010
+ # clients and servers MUST ensure that they serialize messages according to the
1011
+ # following rules, even if there are alternate valid encodings for the same
1012
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
1013
+ # There are no duplicate fields. * Fields are serialized according to the
1014
+ # default semantics for their type. Most protocol buffer implementations will
1015
+ # always follow these rules when serializing, but care should be taken to avoid
1016
+ # shortcuts. For instance, concatenating two messages to merge them may produce
1017
+ # duplicate fields.
1172
1018
  # Corresponds to the JSON property `treeDigest`
1173
1019
  # @return [Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2Digest]
1174
1020
  attr_accessor :tree_digest
@@ -1184,51 +1030,45 @@ module Google
1184
1030
  end
1185
1031
  end
1186
1032
 
1187
- # An `OutputFile` is similar to a
1188
- # FileNode, but it is used as an
1189
- # output in an `ActionResult`. It allows a full file path rather than
1190
- # only a name.
1033
+ # An `OutputFile` is similar to a FileNode, but it is used as an output in an `
1034
+ # ActionResult`. It allows a full file path rather than only a name.
1191
1035
  class BuildBazelRemoteExecutionV2OutputFile
1192
1036
  include Google::Apis::Core::Hashable
1193
1037
 
1194
1038
  # The contents of the file if inlining was requested. The server SHOULD NOT
1195
- # inline
1196
- # file contents unless requested by the client in the
1197
- # GetActionResultRequest
1198
- # message. The server MAY omit inlining, even if requested, and MUST do so if
1199
- # inlining
1200
- # would cause the response to exceed message size limits.
1039
+ # inline file contents unless requested by the client in the
1040
+ # GetActionResultRequest message. The server MAY omit inlining, even if
1041
+ # requested, and MUST do so if inlining would cause the response to exceed
1042
+ # message size limits.
1201
1043
  # Corresponds to the JSON property `contents`
1202
1044
  # NOTE: Values are automatically base64 encoded/decoded in the client library.
1203
1045
  # @return [String]
1204
1046
  attr_accessor :contents
1205
1047
 
1206
1048
  # A content digest. A digest for a given blob consists of the size of the blob
1207
- # and its hash. The hash algorithm to use is defined by the server.
1208
- # The size is considered to be an integral part of the digest and cannot be
1209
- # separated. That is, even if the `hash` field is correctly specified but
1210
- # `size_bytes` is not, the server MUST reject the request.
1211
- # The reason for including the size in the digest is as follows: in a great
1212
- # many cases, the server needs to know the size of the blob it is about to work
1213
- # with prior to starting an operation with it, such as flattening Merkle tree
1214
- # structures or streaming it to a worker. Technically, the server could
1215
- # implement a separate metadata store, but this results in a significantly more
1216
- # complicated implementation as opposed to having the client specify the size
1217
- # up-front (or storing the size along with the digest in every message where
1218
- # digests are embedded). This does mean that the API leaks some implementation
1219
- # details of (what we consider to be) a reasonable server implementation, but
1220
- # we consider this to be a worthwhile tradeoff.
1221
- # When a `Digest` is used to refer to a proto message, it always refers to the
1222
- # message in binary encoded form. To ensure consistent hashing, clients and
1223
- # servers MUST ensure that they serialize messages according to the following
1224
- # rules, even if there are alternate valid encodings for the same message:
1225
- # * Fields are serialized in tag order.
1226
- # * There are no unknown fields.
1227
- # * There are no duplicate fields.
1228
- # * Fields are serialized according to the default semantics for their type.
1229
- # Most protocol buffer implementations will always follow these rules when
1230
- # serializing, but care should be taken to avoid shortcuts. For instance,
1231
- # concatenating two messages to merge them may produce duplicate fields.
1049
+ # and its hash. The hash algorithm to use is defined by the server. The size is
1050
+ # considered to be an integral part of the digest and cannot be separated. That
1051
+ # is, even if the `hash` field is correctly specified but `size_bytes` is not,
1052
+ # the server MUST reject the request. The reason for including the size in the
1053
+ # digest is as follows: in a great many cases, the server needs to know the size
1054
+ # of the blob it is about to work with prior to starting an operation with it,
1055
+ # such as flattening Merkle tree structures or streaming it to a worker.
1056
+ # Technically, the server could implement a separate metadata store, but this
1057
+ # results in a significantly more complicated implementation as opposed to
1058
+ # having the client specify the size up-front (or storing the size along with
1059
+ # the digest in every message where digests are embedded). This does mean that
1060
+ # the API leaks some implementation details of (what we consider to be) a
1061
+ # reasonable server implementation, but we consider this to be a worthwhile
1062
+ # tradeoff. When a `Digest` is used to refer to a proto message, it always
1063
+ # refers to the message in binary encoded form. To ensure consistent hashing,
1064
+ # clients and servers MUST ensure that they serialize messages according to the
1065
+ # following rules, even if there are alternate valid encodings for the same
1066
+ # message: * Fields are serialized in tag order. * There are no unknown fields. *
1067
+ # There are no duplicate fields. * Fields are serialized according to the
1068
+ # default semantics for their type. Most protocol buffer implementations will
1069
+ # always follow these rules when serializing, but care should be taken to avoid
1070
+ # shortcuts. For instance, concatenating two messages to merge them may produce
1071
+ # duplicate fields.
1232
1072
  # Corresponds to the JSON property `digest`
1233
1073
  # @return [Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2Digest]
1234
1074
  attr_accessor :digest
@@ -1245,8 +1085,8 @@ module Google
1245
1085
  attr_accessor :node_properties
1246
1086
 
1247
1087
  # The full path of the file relative to the working directory, including the
1248
- # filename. The path separator is a forward slash `/`. Since this is a
1249
- # relative path, it MUST NOT begin with a leading forward slash.
1088
+ # filename. The path separator is a forward slash `/`. Since this is a relative
1089
+ # path, it MUST NOT begin with a leading forward slash.
1250
1090
  # Corresponds to the JSON property `path`
1251
1091
  # @return [String]
1252
1092
  attr_accessor :path
@@ -1265,32 +1105,29 @@ module Google
1265
1105
  end
1266
1106
  end
1267
1107
 
1268
- # An `OutputSymlink` is similar to a
1269
- # Symlink, but it is used as an
1270
- # output in an `ActionResult`.
1271
- # `OutputSymlink` is binary-compatible with `SymlinkNode`.
1108
+ # An `OutputSymlink` is similar to a Symlink, but it is used as an output in an `
1109
+ # ActionResult`. `OutputSymlink` is binary-compatible with `SymlinkNode`.
1272
1110
  class BuildBazelRemoteExecutionV2OutputSymlink
1273
1111
  include Google::Apis::Core::Hashable
1274
1112
 
1275
- # The supported node properties of the OutputSymlink, if requested by the
1276
- # Action.
1113
+ # The supported node properties of the OutputSymlink, if requested by the Action.
1277
1114
  # Corresponds to the JSON property `nodeProperties`
1278
1115
  # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2NodeProperty>]
1279
1116
  attr_accessor :node_properties
1280
1117
 
1281
1118
  # The full path of the symlink relative to the working directory, including the
1282
- # filename. The path separator is a forward slash `/`. Since this is a
1283
- # relative path, it MUST NOT begin with a leading forward slash.
1119
+ # filename. The path separator is a forward slash `/`. Since this is a relative
1120
+ # path, it MUST NOT begin with a leading forward slash.
1284
1121
  # Corresponds to the JSON property `path`
1285
1122
  # @return [String]
1286
1123
  attr_accessor :path
1287
1124
 
1288
- # The target path of the symlink. The path separator is a forward slash `/`.
1289
- # The target path can be relative to the parent directory of the symlink or
1290
- # it can be an absolute path starting with `/`. Support for absolute paths
1291
- # can be checked using the Capabilities
1292
- # API. The canonical form forbids the substrings `/./` and `//` in the target
1293
- # path. `..` components are allowed anywhere in the target path.
1125
+ # The target path of the symlink. The path separator is a forward slash `/`. The
1126
+ # target path can be relative to the parent directory of the symlink or it can
1127
+ # be an absolute path starting with `/`. Support for absolute paths can be
1128
+ # checked using the Capabilities API. The canonical form forbids the substrings `
1129
+ # /./` and `//` in the target path. `..` components are allowed anywhere in the
1130
+ # target path.
1294
1131
  # Corresponds to the JSON property `target`
1295
1132
  # @return [String]
1296
1133
  attr_accessor :target
@@ -1308,17 +1145,16 @@ module Google
1308
1145
  end
1309
1146
 
1310
1147
  # A `Platform` is a set of requirements, such as hardware, operating system, or
1311
- # compiler toolchain, for an
1312
- # Action's execution
1313
- # environment. A `Platform` is represented as a series of key-value pairs
1314
- # representing the properties that are required of the platform.
1148
+ # compiler toolchain, for an Action's execution environment. A `Platform` is
1149
+ # represented as a series of key-value pairs representing the properties that
1150
+ # are required of the platform.
1315
1151
  class BuildBazelRemoteExecutionV2Platform
1316
1152
  include Google::Apis::Core::Hashable
1317
1153
 
1318
- # The properties that make up this platform. In order to ensure that
1319
- # equivalent `Platform`s always hash to the same value, the properties MUST
1320
- # be lexicographically sorted by name, and then by value. Sorting of strings
1321
- # is done by code point, equivalently, by the UTF-8 bytes.
1154
+ # The properties that make up this platform. In order to ensure that equivalent `
1155
+ # Platform`s always hash to the same value, the properties MUST be
1156
+ # lexicographically sorted by name, and then by value. Sorting of strings is
1157
+ # done by code point, equivalently, by the UTF-8 bytes.
1322
1158
  # Corresponds to the JSON property `properties`
1323
1159
  # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2PlatformProperty>]
1324
1160
  attr_accessor :properties
@@ -1335,19 +1171,16 @@ module Google
1335
1171
 
1336
1172
  # A single property for the environment. The server is responsible for
1337
1173
  # specifying the property `name`s that it accepts. If an unknown `name` is
1338
- # provided in the requirements for an
1339
- # Action, the server SHOULD
1340
- # reject the execution request. If permitted by the server, the same `name`
1341
- # may occur multiple times.
1342
- # The server is also responsible for specifying the interpretation of
1343
- # property `value`s. For instance, a property describing how much RAM must be
1344
- # available may be interpreted as allowing a worker with 16GB to fulfill a
1345
- # request for 8GB, while a property describing the OS environment on which
1346
- # the action must be performed may require an exact match with the worker's
1347
- # OS.
1348
- # The server MAY use the `value` of one or more properties to determine how
1349
- # it sets up the execution environment, such as by making specific system
1350
- # files available to the worker.
1174
+ # provided in the requirements for an Action, the server SHOULD reject the
1175
+ # execution request. If permitted by the server, the same `name` may occur
1176
+ # multiple times. The server is also responsible for specifying the
1177
+ # interpretation of property `value`s. For instance, a property describing how
1178
+ # much RAM must be available may be interpreted as allowing a worker with 16GB
1179
+ # to fulfill a request for 8GB, while a property describing the OS environment
1180
+ # on which the action must be performed may require an exact match with the
1181
+ # worker's OS. The server MAY use the `value` of one or more properties to
1182
+ # determine how it sets up the execution environment, such as by making specific
1183
+ # system files available to the worker.
1351
1184
  class BuildBazelRemoteExecutionV2PlatformProperty
1352
1185
  include Google::Apis::Core::Hashable
1353
1186
 
@@ -1375,27 +1208,25 @@ module Google
1375
1208
  # An optional Metadata to attach to any RPC request to tell the server about an
1376
1209
  # external context of the request. The server may use this for logging or other
1377
1210
  # purposes. To use it, the client attaches the header to the call using the
1378
- # canonical proto serialization:
1379
- # * name: `build.bazel.remote.execution.v2.requestmetadata-bin`
1380
- # * contents: the base64 encoded binary `RequestMetadata` message.
1381
- # Note: the gRPC library serializes binary headers encoded in base 64 by
1382
- # default (https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#
1383
- # requests).
1384
- # Therefore, if the gRPC library is used to pass/retrieve this
1211
+ # canonical proto serialization: * name: `build.bazel.remote.execution.v2.
1212
+ # requestmetadata-bin` * contents: the base64 encoded binary `RequestMetadata`
1213
+ # message. Note: the gRPC library serializes binary headers encoded in base 64
1214
+ # by default (https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#
1215
+ # requests). Therefore, if the gRPC library is used to pass/retrieve this
1385
1216
  # metadata, the user may ignore the base64 encoding and assume it is simply
1386
1217
  # serialized as a binary message.
1387
1218
  class BuildBazelRemoteExecutionV2RequestMetadata
1388
1219
  include Google::Apis::Core::Hashable
1389
1220
 
1390
- # An identifier that ties multiple requests to the same action.
1391
- # For example, multiple requests to the CAS, Action Cache, and Execution
1392
- # API are used in order to compile foo.cc.
1221
+ # An identifier that ties multiple requests to the same action. For example,
1222
+ # multiple requests to the CAS, Action Cache, and Execution API are used in
1223
+ # order to compile foo.cc.
1393
1224
  # Corresponds to the JSON property `actionId`
1394
1225
  # @return [String]
1395
1226
  attr_accessor :action_id
1396
1227
 
1397
- # An identifier to tie multiple tool invocations together. For example,
1398
- # runs of foo_test, bar_test and baz_test on a post-submit of a given patch.
1228
+ # An identifier to tie multiple tool invocations together. For example, runs of
1229
+ # foo_test, bar_test and baz_test on a post-submit of a given patch.
1399
1230
  # Corresponds to the JSON property `correlatedInvocationsId`
1400
1231
  # @return [String]
1401
1232
  attr_accessor :correlated_invocations_id
@@ -1405,8 +1236,8 @@ module Google
1405
1236
  # @return [Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2ToolDetails]
1406
1237
  attr_accessor :tool_details
1407
1238
 
1408
- # An identifier that ties multiple actions together to a final result.
1409
- # For example, multiple actions are required to build and run foo_test.
1239
+ # An identifier that ties multiple actions together to a final result. For
1240
+ # example, multiple actions are required to build and run foo_test.
1410
1241
  # Corresponds to the JSON property `toolInvocationId`
1411
1242
  # @return [String]
1412
1243
  attr_accessor :tool_invocation_id
@@ -1438,12 +1269,12 @@ module Google
1438
1269
  # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2NodeProperty>]
1439
1270
  attr_accessor :node_properties
1440
1271
 
1441
- # The target path of the symlink. The path separator is a forward slash `/`.
1442
- # The target path can be relative to the parent directory of the symlink or
1443
- # it can be an absolute path starting with `/`. Support for absolute paths
1444
- # can be checked using the Capabilities
1445
- # API. The canonical form forbids the substrings `/./` and `//` in the target
1446
- # path. `..` components are allowed anywhere in the target path.
1272
+ # The target path of the symlink. The path separator is a forward slash `/`. The
1273
+ # target path can be relative to the parent directory of the symlink or it can
1274
+ # be an absolute path starting with `/`. Support for absolute paths can be
1275
+ # checked using the Capabilities API. The canonical form forbids the substrings `
1276
+ # /./` and `//` in the target path. `..` components are allowed anywhere in the
1277
+ # target path.
1447
1278
  # Corresponds to the JSON property `target`
1448
1279
  # @return [String]
1449
1280
  attr_accessor :target
@@ -1485,90 +1316,45 @@ module Google
1485
1316
  end
1486
1317
  end
1487
1318
 
1488
- # A `Tree` contains all the
1489
- # Directory protos in a
1490
- # single directory Merkle tree, compressed into one message.
1319
+ # A `Tree` contains all the Directory protos in a single directory Merkle tree,
1320
+ # compressed into one message.
1491
1321
  class BuildBazelRemoteExecutionV2Tree
1492
1322
  include Google::Apis::Core::Hashable
1493
1323
 
1494
1324
  # All the child directories: the directories referred to by the root and,
1495
- # recursively, all its children. In order to reconstruct the directory tree,
1496
- # the client must take the digests of each of the child directories and then
1497
- # build up a tree starting from the `root`.
1325
+ # recursively, all its children. In order to reconstruct the directory tree, the
1326
+ # client must take the digests of each of the child directories and then build
1327
+ # up a tree starting from the `root`.
1498
1328
  # Corresponds to the JSON property `children`
1499
1329
  # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2Directory>]
1500
1330
  attr_accessor :children
1501
1331
 
1502
1332
  # A `Directory` represents a directory node in a file tree, containing zero or
1503
- # more children FileNodes,
1504
- # DirectoryNodes and
1505
- # SymlinkNodes.
1506
- # Each `Node` contains its name in the directory, either the digest of its
1507
- # content (either a file blob or a `Directory` proto) or a symlink target, as
1508
- # well as possibly some metadata about the file or directory.
1509
- # In order to ensure that two equivalent directory trees hash to the same
1510
- # value, the following restrictions MUST be obeyed when constructing a
1511
- # a `Directory`:
1512
- # * Every child in the directory must have a path of exactly one segment.
1513
- # Multiple levels of directory hierarchy may not be collapsed.
1514
- # * Each child in the directory must have a unique path segment (file name).
1515
- # Note that while the API itself is case-sensitive, the environment where
1516
- # the Action is executed may or may not be case-sensitive. That is, it is
1517
- # legal to call the API with a Directory that has both "Foo" and "foo" as
1518
- # children, but the Action may be rejected by the remote system upon
1519
- # execution.
1520
- # * The files, directories and symlinks in the directory must each be sorted
1521
- # in lexicographical order by path. The path strings must be sorted by code
1522
- # point, equivalently, by UTF-8 bytes.
1523
- # * The NodeProperties of files,
1524
- # directories, and symlinks must be sorted in lexicographical order by
1525
- # property name.
1526
- # A `Directory` that obeys the restrictions is said to be in canonical form.
1527
- # As an example, the following could be used for a file named `bar` and a
1333
+ # more children FileNodes, DirectoryNodes and SymlinkNodes. Each `Node` contains
1334
+ # its name in the directory, either the digest of its content (either a file
1335
+ # blob or a `Directory` proto) or a symlink target, as well as possibly some
1336
+ # metadata about the file or directory. In order to ensure that two equivalent
1337
+ # directory trees hash to the same value, the following restrictions MUST be
1338
+ # obeyed when constructing a a `Directory`: * Every child in the directory must
1339
+ # have a path of exactly one segment. Multiple levels of directory hierarchy may
1340
+ # not be collapsed. * Each child in the directory must have a unique path
1341
+ # segment (file name). Note that while the API itself is case-sensitive, the
1342
+ # environment where the Action is executed may or may not be case-sensitive.
1343
+ # That is, it is legal to call the API with a Directory that has both "Foo" and "
1344
+ # foo" as children, but the Action may be rejected by the remote system upon
1345
+ # execution. * The files, directories and symlinks in the directory must each be
1346
+ # sorted in lexicographical order by path. The path strings must be sorted by
1347
+ # code point, equivalently, by UTF-8 bytes. * The NodeProperties of files,
1348
+ # directories, and symlinks must be sorted in lexicographical order by property
1349
+ # name. A `Directory` that obeys the restrictions is said to be in canonical
1350
+ # form. As an example, the following could be used for a file named `bar` and a
1528
1351
  # directory named `foo` with an executable file named `baz` (hashes shortened
1529
- # for readability):
1530
- # ```json
1531
- # // (Directory proto)
1532
- # `
1533
- # files: [
1534
- # `
1535
- # name: "bar",
1536
- # digest: `
1537
- # hash: "4a73bc9d03...",
1538
- # size: 65534
1539
- # `,
1540
- # node_properties: [
1541
- # `
1542
- # "name": "MTime",
1543
- # "value": "2017-01-15T01:30:15.01Z"
1544
- # `
1545
- # ]
1546
- # `
1547
- # ],
1548
- # directories: [
1549
- # `
1550
- # name: "foo",
1551
- # digest: `
1552
- # hash: "4cf2eda940...",
1553
- # size: 43
1554
- # `
1555
- # `
1556
- # ]
1557
- # `
1558
- # // (Directory proto with hash "4cf2eda940..." and size 43)
1559
- # `
1560
- # files: [
1561
- # `
1562
- # name: "baz",
1563
- # digest: `
1564
- # hash: "b2c941073e...",
1565
- # size: 1294,
1566
- # `,
1567
- # is_executable: true
1568
- # `
1569
- # ]
1570
- # `
1571
- # ```
1352
+ # for readability): ```json // (Directory proto) ` files: [ ` name: "bar",
1353
+ # digest: ` hash: "4a73bc9d03...", size: 65534 `, node_properties: [ ` "name": "
1354
+ # MTime", "value": "2017-01-15T01:30:15.01Z" ` ] ` ], directories: [ ` name: "
1355
+ # foo", digest: ` hash: "4cf2eda940...", size: 43 ` ` ] ` // (Directory proto
1356
+ # with hash "4cf2eda940..." and size 43) ` files: [ ` name: "baz", digest: `
1357
+ # hash: "b2c941073e...", size: 1294, `, is_executable: true ` ] ` ```
1572
1358
  # Corresponds to the JSON property `root`
1573
1359
  # @return [Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2Directory]
1574
1360
  attr_accessor :root
@@ -1608,8 +1394,8 @@ module Google
1608
1394
  class GoogleDevtoolsRemotebuildbotCommandDurations
1609
1395
  include Google::Apis::Core::Hashable
1610
1396
 
1611
- # The time spent preparing the command to be run in a Docker container
1612
- # (includes pulling the Docker image, if necessary).
1397
+ # The time spent preparing the command to be run in a Docker container (includes
1398
+ # pulling the Docker image, if necessary).
1613
1399
  # Corresponds to the JSON property `dockerPrep`
1614
1400
  # @return [String]
1615
1401
  attr_accessor :docker_prep
@@ -1685,13 +1471,13 @@ module Google
1685
1471
  end
1686
1472
  end
1687
1473
 
1688
- # CommandEvents contains counters for the number of warnings and errors
1689
- # that occurred during the execution of a command.
1474
+ # CommandEvents contains counters for the number of warnings and errors that
1475
+ # occurred during the execution of a command.
1690
1476
  class GoogleDevtoolsRemotebuildbotCommandEvents
1691
1477
  include Google::Apis::Core::Hashable
1692
1478
 
1693
- # Indicates whether we are using a cached Docker image (true) or had to pull
1694
- # the Docker image (false) for this command.
1479
+ # Indicates whether we are using a cached Docker image (true) or had to pull the
1480
+ # Docker image (false) for this command.
1695
1481
  # Corresponds to the JSON property `dockerCacheHit`
1696
1482
  # @return [Boolean]
1697
1483
  attr_accessor :docker_cache_hit
@@ -1861,28 +1647,24 @@ module Google
1861
1647
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaCreateInstanceRequest
1862
1648
  include Google::Apis::Core::Hashable
1863
1649
 
1864
- # Instance conceptually encapsulates all Remote Build Execution resources
1865
- # for remote builds.
1866
- # An instance consists of storage and compute resources (for example,
1867
- # `ContentAddressableStorage`, `ActionCache`, `WorkerPools`) used for
1868
- # running remote builds.
1869
- # All Remote Build Execution API calls are scoped to an instance.
1650
+ # Instance conceptually encapsulates all Remote Build Execution resources for
1651
+ # remote builds. An instance consists of storage and compute resources (for
1652
+ # example, `ContentAddressableStorage`, `ActionCache`, `WorkerPools`) used for
1653
+ # running remote builds. All Remote Build Execution API calls are scoped to an
1654
+ # instance.
1870
1655
  # Corresponds to the JSON property `instance`
1871
1656
  # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemotebuildexecutionAdminV1alphaInstance]
1872
1657
  attr_accessor :instance
1873
1658
 
1874
- # ID of the created instance.
1875
- # A valid `instance_id` must:
1876
- # be 6-50 characters long,
1877
- # contain only lowercase letters, digits, hyphens and underscores,
1878
- # start with a lowercase letter, and
1879
- # end with a lowercase letter or a digit.
1659
+ # ID of the created instance. A valid `instance_id` must: be 6-50 characters
1660
+ # long, contain only lowercase letters, digits, hyphens and underscores, start
1661
+ # with a lowercase letter, and end with a lowercase letter or a digit.
1880
1662
  # Corresponds to the JSON property `instanceId`
1881
1663
  # @return [String]
1882
1664
  attr_accessor :instance_id
1883
1665
 
1884
- # Resource name of the project containing the instance.
1885
- # Format: `projects/[PROJECT_ID]`.
1666
+ # Resource name of the project containing the instance. Format: `projects/[
1667
+ # PROJECT_ID]`.
1886
1668
  # Corresponds to the JSON property `parent`
1887
1669
  # @return [String]
1888
1670
  attr_accessor :parent
@@ -1903,18 +1685,15 @@ module Google
1903
1685
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaCreateWorkerPoolRequest
1904
1686
  include Google::Apis::Core::Hashable
1905
1687
 
1906
- # Resource name of the instance in which to create the new worker pool.
1907
- # Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
1688
+ # Resource name of the instance in which to create the new worker pool. Format: `
1689
+ # projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
1908
1690
  # Corresponds to the JSON property `parent`
1909
1691
  # @return [String]
1910
1692
  attr_accessor :parent
1911
1693
 
1912
- # ID of the created worker pool.
1913
- # A valid pool ID must:
1914
- # be 6-50 characters long,
1915
- # contain only lowercase letters, digits, hyphens and underscores,
1916
- # start with a lowercase letter, and
1917
- # end with a lowercase letter or a digit.
1694
+ # ID of the created worker pool. A valid pool ID must: be 6-50 characters long,
1695
+ # contain only lowercase letters, digits, hyphens and underscores, start with a
1696
+ # lowercase letter, and end with a lowercase letter or a digit.
1918
1697
  # Corresponds to the JSON property `poolId`
1919
1698
  # @return [String]
1920
1699
  attr_accessor :pool_id
@@ -1940,8 +1719,8 @@ module Google
1940
1719
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaDeleteInstanceRequest
1941
1720
  include Google::Apis::Core::Hashable
1942
1721
 
1943
- # Name of the instance to delete.
1944
- # Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
1722
+ # Name of the instance to delete. Format: `projects/[PROJECT_ID]/instances/[
1723
+ # INSTANCE_ID]`.
1945
1724
  # Corresponds to the JSON property `name`
1946
1725
  # @return [String]
1947
1726
  attr_accessor :name
@@ -1960,9 +1739,8 @@ module Google
1960
1739
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaDeleteWorkerPoolRequest
1961
1740
  include Google::Apis::Core::Hashable
1962
1741
 
1963
- # Name of the worker pool to delete.
1964
- # Format:
1965
- # `projects/[PROJECT_ID]/instances/[INSTANCE_ID]/workerpools/[POOL_ID]`.
1742
+ # Name of the worker pool to delete. Format: `projects/[PROJECT_ID]/instances/[
1743
+ # INSTANCE_ID]/workerpools/[POOL_ID]`.
1966
1744
  # Corresponds to the JSON property `name`
1967
1745
  # @return [String]
1968
1746
  attr_accessor :name
@@ -1977,12 +1755,107 @@ module Google
1977
1755
  end
1978
1756
  end
1979
1757
 
1758
+ # FeaturePolicy defines features allowed to be used on RBE instances, as well as
1759
+ # instance-wide behavior changes that take effect without opt-in or opt-out at
1760
+ # usage time.
1761
+ class GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicy
1762
+ include Google::Apis::Core::Hashable
1763
+
1764
+ # Defines whether a feature can be used or what values are accepted.
1765
+ # Corresponds to the JSON property `containerImageSources`
1766
+ # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
1767
+ attr_accessor :container_image_sources
1768
+
1769
+ # Defines whether a feature can be used or what values are accepted.
1770
+ # Corresponds to the JSON property `dockerAddCapabilities`
1771
+ # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
1772
+ attr_accessor :docker_add_capabilities
1773
+
1774
+ # Defines whether a feature can be used or what values are accepted.
1775
+ # Corresponds to the JSON property `dockerChrootPath`
1776
+ # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
1777
+ attr_accessor :docker_chroot_path
1778
+
1779
+ # Defines whether a feature can be used or what values are accepted.
1780
+ # Corresponds to the JSON property `dockerNetwork`
1781
+ # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
1782
+ attr_accessor :docker_network
1783
+
1784
+ # Defines whether a feature can be used or what values are accepted.
1785
+ # Corresponds to the JSON property `dockerPrivileged`
1786
+ # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
1787
+ attr_accessor :docker_privileged
1788
+
1789
+ # Defines whether a feature can be used or what values are accepted.
1790
+ # Corresponds to the JSON property `dockerRunAsRoot`
1791
+ # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
1792
+ attr_accessor :docker_run_as_root
1793
+
1794
+ # Defines whether a feature can be used or what values are accepted.
1795
+ # Corresponds to the JSON property `dockerRuntime`
1796
+ # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
1797
+ attr_accessor :docker_runtime
1798
+
1799
+ # Defines whether a feature can be used or what values are accepted.
1800
+ # Corresponds to the JSON property `dockerSiblingContainers`
1801
+ # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature]
1802
+ attr_accessor :docker_sibling_containers
1803
+
1804
+ # linux_isolation allows overriding the docker runtime used for containers
1805
+ # started on Linux.
1806
+ # Corresponds to the JSON property `linuxIsolation`
1807
+ # @return [String]
1808
+ attr_accessor :linux_isolation
1809
+
1810
+ def initialize(**args)
1811
+ update!(**args)
1812
+ end
1813
+
1814
+ # Update properties of this object
1815
+ def update!(**args)
1816
+ @container_image_sources = args[:container_image_sources] if args.key?(:container_image_sources)
1817
+ @docker_add_capabilities = args[:docker_add_capabilities] if args.key?(:docker_add_capabilities)
1818
+ @docker_chroot_path = args[:docker_chroot_path] if args.key?(:docker_chroot_path)
1819
+ @docker_network = args[:docker_network] if args.key?(:docker_network)
1820
+ @docker_privileged = args[:docker_privileged] if args.key?(:docker_privileged)
1821
+ @docker_run_as_root = args[:docker_run_as_root] if args.key?(:docker_run_as_root)
1822
+ @docker_runtime = args[:docker_runtime] if args.key?(:docker_runtime)
1823
+ @docker_sibling_containers = args[:docker_sibling_containers] if args.key?(:docker_sibling_containers)
1824
+ @linux_isolation = args[:linux_isolation] if args.key?(:linux_isolation)
1825
+ end
1826
+ end
1827
+
1828
+ # Defines whether a feature can be used or what values are accepted.
1829
+ class GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeature
1830
+ include Google::Apis::Core::Hashable
1831
+
1832
+ # A list of acceptable values. Only effective when the policy is `RESTRICTED`.
1833
+ # Corresponds to the JSON property `allowedValues`
1834
+ # @return [Array<String>]
1835
+ attr_accessor :allowed_values
1836
+
1837
+ # The policy of the feature.
1838
+ # Corresponds to the JSON property `policy`
1839
+ # @return [String]
1840
+ attr_accessor :policy
1841
+
1842
+ def initialize(**args)
1843
+ update!(**args)
1844
+ end
1845
+
1846
+ # Update properties of this object
1847
+ def update!(**args)
1848
+ @allowed_values = args[:allowed_values] if args.key?(:allowed_values)
1849
+ @policy = args[:policy] if args.key?(:policy)
1850
+ end
1851
+ end
1852
+
1980
1853
  # The request used for `GetInstance`.
1981
1854
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaGetInstanceRequest
1982
1855
  include Google::Apis::Core::Hashable
1983
1856
 
1984
- # Name of the instance to retrieve.
1985
- # Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
1857
+ # Name of the instance to retrieve. Format: `projects/[PROJECT_ID]/instances/[
1858
+ # INSTANCE_ID]`.
1986
1859
  # Corresponds to the JSON property `name`
1987
1860
  # @return [String]
1988
1861
  attr_accessor :name
@@ -2001,9 +1874,8 @@ module Google
2001
1874
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaGetWorkerPoolRequest
2002
1875
  include Google::Apis::Core::Hashable
2003
1876
 
2004
- # Name of the worker pool to retrieve.
2005
- # Format:
2006
- # `projects/[PROJECT_ID]/instances/[INSTANCE_ID]/workerpools/[POOL_ID]`.
1877
+ # Name of the worker pool to retrieve. Format: `projects/[PROJECT_ID]/instances/[
1878
+ # INSTANCE_ID]/workerpools/[POOL_ID]`.
2007
1879
  # Corresponds to the JSON property `name`
2008
1880
  # @return [String]
2009
1881
  attr_accessor :name
@@ -2018,15 +1890,21 @@ module Google
2018
1890
  end
2019
1891
  end
2020
1892
 
2021
- # Instance conceptually encapsulates all Remote Build Execution resources
2022
- # for remote builds.
2023
- # An instance consists of storage and compute resources (for example,
2024
- # `ContentAddressableStorage`, `ActionCache`, `WorkerPools`) used for
2025
- # running remote builds.
2026
- # All Remote Build Execution API calls are scoped to an instance.
1893
+ # Instance conceptually encapsulates all Remote Build Execution resources for
1894
+ # remote builds. An instance consists of storage and compute resources (for
1895
+ # example, `ContentAddressableStorage`, `ActionCache`, `WorkerPools`) used for
1896
+ # running remote builds. All Remote Build Execution API calls are scoped to an
1897
+ # instance.
2027
1898
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaInstance
2028
1899
  include Google::Apis::Core::Hashable
2029
1900
 
1901
+ # FeaturePolicy defines features allowed to be used on RBE instances, as well as
1902
+ # instance-wide behavior changes that take effect without opt-in or opt-out at
1903
+ # usage time.
1904
+ # Corresponds to the JSON property `featurePolicy`
1905
+ # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicy]
1906
+ attr_accessor :feature_policy
1907
+
2030
1908
  # The location is a GCP region. Currently only `us-central1` is supported.
2031
1909
  # Corresponds to the JSON property `location`
2032
1910
  # @return [String]
@@ -2038,10 +1916,9 @@ module Google
2038
1916
  attr_accessor :logging_enabled
2039
1917
  alias_method :logging_enabled?, :logging_enabled
2040
1918
 
2041
- # Output only. Instance resource name formatted as:
2042
- # `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
2043
- # Name should not be populated when creating an instance since it is provided
2044
- # in the `instance_id` field.
1919
+ # Output only. Instance resource name formatted as: `projects/[PROJECT_ID]/
1920
+ # instances/[INSTANCE_ID]`. Name should not be populated when creating an
1921
+ # instance since it is provided in the `instance_id` field.
2045
1922
  # Corresponds to the JSON property `name`
2046
1923
  # @return [String]
2047
1924
  attr_accessor :name
@@ -2057,6 +1934,7 @@ module Google
2057
1934
 
2058
1935
  # Update properties of this object
2059
1936
  def update!(**args)
1937
+ @feature_policy = args[:feature_policy] if args.key?(:feature_policy)
2060
1938
  @location = args[:location] if args.key?(:location)
2061
1939
  @logging_enabled = args[:logging_enabled] if args.key?(:logging_enabled)
2062
1940
  @name = args[:name] if args.key?(:name)
@@ -2068,8 +1946,7 @@ module Google
2068
1946
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaListInstancesRequest
2069
1947
  include Google::Apis::Core::Hashable
2070
1948
 
2071
- # Resource name of the project.
2072
- # Format: `projects/[PROJECT_ID]`.
1949
+ # Resource name of the project. Format: `projects/[PROJECT_ID]`.
2073
1950
  # Corresponds to the JSON property `parent`
2074
1951
  # @return [String]
2075
1952
  attr_accessor :parent
@@ -2107,32 +1984,26 @@ module Google
2107
1984
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaListWorkerPoolsRequest
2108
1985
  include Google::Apis::Core::Hashable
2109
1986
 
2110
- # Optional. A filter expression that filters resources listed in
2111
- # the response. The expression must specify the field name, a comparison
2112
- # operator, and the value that you want to use for filtering. The value
2113
- # must be a string, a number, or a boolean. String values are
2114
- # case-insensitive.
2115
- # The comparison operator must be either `:`, `=`, `!=`, `>`, `>=`, `<=` or
2116
- # `<`.
2117
- # The `:` operator can be used with string fields to match substrings.
2118
- # For non-string fields it is equivalent to the `=` operator.
2119
- # The `:*` comparison can be used to test whether a key has been defined.
2120
- # You can also filter on nested fields.
2121
- # To filter on multiple expressions, you can separate expression using
2122
- # `AND` and `OR` operators, using parentheses to specify precedence. If
2123
- # neither operator is specified, `AND` is assumed.
2124
- # Examples:
2125
- # Include only pools with more than 100 reserved workers:
2126
- # `(worker_count > 100) (worker_config.reserved = true)`
2127
- # Include only pools with a certain label or machines of the n1-standard
2128
- # family:
1987
+ # Optional. A filter expression that filters resources listed in the response.
1988
+ # The expression must specify the field name, a comparison operator, and the
1989
+ # value that you want to use for filtering. The value must be a string, a number,
1990
+ # or a boolean. String values are case-insensitive. The comparison operator
1991
+ # must be either `:`, `=`, `!=`, `>`, `>=`, `<=` or `<`. The `:` operator can be
1992
+ # used with string fields to match substrings. For non-string fields it is
1993
+ # equivalent to the `=` operator. The `:*` comparison can be used to test
1994
+ # whether a key has been defined. You can also filter on nested fields. To
1995
+ # filter on multiple expressions, you can separate expression using `AND` and `
1996
+ # OR` operators, using parentheses to specify precedence. If neither operator is
1997
+ # specified, `AND` is assumed. Examples: Include only pools with more than 100
1998
+ # reserved workers: `(worker_count > 100) (worker_config.reserved = true)`
1999
+ # Include only pools with a certain label or machines of the n1-standard family:
2129
2000
  # `worker_config.labels.key1 : * OR worker_config.machine_type: n1-standard`
2130
2001
  # Corresponds to the JSON property `filter`
2131
2002
  # @return [String]
2132
2003
  attr_accessor :filter
2133
2004
 
2134
- # Resource name of the instance.
2135
- # Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
2005
+ # Resource name of the instance. Format: `projects/[PROJECT_ID]/instances/[
2006
+ # INSTANCE_ID]`.
2136
2007
  # Corresponds to the JSON property `parent`
2137
2008
  # @return [String]
2138
2009
  attr_accessor :parent
@@ -2167,40 +2038,62 @@ module Google
2167
2038
  end
2168
2039
  end
2169
2040
 
2041
+ # SoleTenancyConfig specifies information required to host a pool on STNs.
2042
+ class GoogleDevtoolsRemotebuildexecutionAdminV1alphaSoleTenancyConfig
2043
+ include Google::Apis::Core::Hashable
2044
+
2045
+ # The sole-tenant node type to host the pool's workers on.
2046
+ # Corresponds to the JSON property `nodeType`
2047
+ # @return [String]
2048
+ attr_accessor :node_type
2049
+
2050
+ # Zone in which STNs are reserved.
2051
+ # Corresponds to the JSON property `nodesZone`
2052
+ # @return [String]
2053
+ attr_accessor :nodes_zone
2054
+
2055
+ def initialize(**args)
2056
+ update!(**args)
2057
+ end
2058
+
2059
+ # Update properties of this object
2060
+ def update!(**args)
2061
+ @node_type = args[:node_type] if args.key?(:node_type)
2062
+ @nodes_zone = args[:nodes_zone] if args.key?(:nodes_zone)
2063
+ end
2064
+ end
2065
+
2170
2066
  # The request used for `UpdateInstance`.
2171
2067
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaUpdateInstanceRequest
2172
2068
  include Google::Apis::Core::Hashable
2173
2069
 
2174
- # Instance conceptually encapsulates all Remote Build Execution resources
2175
- # for remote builds.
2176
- # An instance consists of storage and compute resources (for example,
2177
- # `ContentAddressableStorage`, `ActionCache`, `WorkerPools`) used for
2178
- # running remote builds.
2179
- # All Remote Build Execution API calls are scoped to an instance.
2070
+ # Instance conceptually encapsulates all Remote Build Execution resources for
2071
+ # remote builds. An instance consists of storage and compute resources (for
2072
+ # example, `ContentAddressableStorage`, `ActionCache`, `WorkerPools`) used for
2073
+ # running remote builds. All Remote Build Execution API calls are scoped to an
2074
+ # instance.
2180
2075
  # Corresponds to the JSON property `instance`
2181
2076
  # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemotebuildexecutionAdminV1alphaInstance]
2182
2077
  attr_accessor :instance
2183
2078
 
2184
- # Deprecated, use instance.logging_enabled instead.
2185
- # Whether to enable Stackdriver logging for this instance.
2079
+ # Deprecated, use instance.logging_enabled instead. Whether to enable
2080
+ # Stackdriver logging for this instance.
2186
2081
  # Corresponds to the JSON property `loggingEnabled`
2187
2082
  # @return [Boolean]
2188
2083
  attr_accessor :logging_enabled
2189
2084
  alias_method :logging_enabled?, :logging_enabled
2190
2085
 
2191
- # Deprecated, use instance.Name instead.
2192
- # Name of the instance to update.
2193
- # Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
2086
+ # Deprecated, use instance.Name instead. Name of the instance to update. Format:
2087
+ # `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`.
2194
2088
  # Corresponds to the JSON property `name`
2195
2089
  # @return [String]
2196
2090
  attr_accessor :name
2197
2091
 
2198
- # The update mask applies to instance. For the `FieldMask` definition, see
2199
- # https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#
2200
- # fieldmask
2201
- # If an empty update_mask is provided, only the non-default valued field in
2202
- # the worker pool field will be updated. Note that in order to update a field
2203
- # to the default value (zero, false, empty string) an explicit update_mask
2092
+ # The update mask applies to instance. For the `FieldMask` definition, see https:
2093
+ # //developers.google.com/protocol-buffers/docs/reference/google.protobuf#
2094
+ # fieldmask If an empty update_mask is provided, only the non-default valued
2095
+ # field in the worker pool field will be updated. Note that in order to update a
2096
+ # field to the default value (zero, false, empty string) an explicit update_mask
2204
2097
  # must be provided.
2205
2098
  # Corresponds to the JSON property `updateMask`
2206
2099
  # @return [String]
@@ -2223,13 +2116,11 @@ module Google
2223
2116
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaUpdateWorkerPoolRequest
2224
2117
  include Google::Apis::Core::Hashable
2225
2118
 
2226
- # The update mask applies to worker_pool. For the `FieldMask` definition,
2227
- # see
2119
+ # The update mask applies to worker_pool. For the `FieldMask` definition, see
2228
2120
  # https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#
2229
- # fieldmask
2230
- # If an empty update_mask is provided, only the non-default valued field in
2231
- # the worker pool field will be updated. Note that in order to update a field
2232
- # to the default value (zero, false, empty string) an explicit update_mask
2121
+ # fieldmask If an empty update_mask is provided, only the non-default valued
2122
+ # field in the worker pool field will be updated. Note that in order to update a
2123
+ # field to the default value (zero, false, empty string) an explicit update_mask
2233
2124
  # must be provided.
2234
2125
  # Corresponds to the JSON property `updateMask`
2235
2126
  # @return [String]
@@ -2251,8 +2142,7 @@ module Google
2251
2142
  end
2252
2143
  end
2253
2144
 
2254
- # Defines the configuration to be used for a creating workers in
2255
- # the worker pool.
2145
+ # Defines the configuration to be used for creating workers in the worker pool.
2256
2146
  class GoogleDevtoolsRemotebuildexecutionAdminV1alphaWorkerConfig
2257
2147
  include Google::Apis::Core::Hashable
2258
2148
 
@@ -2261,34 +2151,31 @@ module Google
2261
2151
  # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemotebuildexecutionAdminV1alphaAcceleratorConfig]
2262
2152
  attr_accessor :accelerator
2263
2153
 
2264
- # Required. Size of the disk attached to the worker, in GB.
2265
- # See https://cloud.google.com/compute/docs/disks/
2154
+ # Required. Size of the disk attached to the worker, in GB. See https://cloud.
2155
+ # google.com/compute/docs/disks/
2266
2156
  # Corresponds to the JSON property `diskSizeGb`
2267
2157
  # @return [Fixnum]
2268
2158
  attr_accessor :disk_size_gb
2269
2159
 
2270
- # Required. Disk Type to use for the worker.
2271
- # See [Storage
2272
- # options](https://cloud.google.com/compute/docs/disks/#introduction).
2273
- # Currently only `pd-standard` and `pd-ssd` are supported.
2160
+ # Required. Disk Type to use for the worker. See [Storage options](https://cloud.
2161
+ # google.com/compute/docs/disks/#introduction). Currently only `pd-standard` and
2162
+ # `pd-ssd` are supported.
2274
2163
  # Corresponds to the JSON property `diskType`
2275
2164
  # @return [String]
2276
2165
  attr_accessor :disk_type
2277
2166
 
2278
- # Labels associated with the workers.
2279
- # Label keys and values can be no longer than 63 characters, can only contain
2280
- # lowercase letters, numeric characters, underscores and dashes.
2281
- # International letters are permitted. Label keys must start with a letter.
2282
- # Label values are optional.
2283
- # There can not be more than 64 labels per resource.
2167
+ # Labels associated with the workers. Label keys and values can be no longer
2168
+ # than 63 characters, can only contain lowercase letters, numeric characters,
2169
+ # underscores and dashes. International letters are permitted. Label keys must
2170
+ # start with a letter. Label values are optional. There can not be more than 64
2171
+ # labels per resource.
2284
2172
  # Corresponds to the JSON property `labels`
2285
2173
  # @return [Hash<String,String>]
2286
2174
  attr_accessor :labels
2287
2175
 
2288
- # Required. Machine type of the worker, such as `n1-standard-2`.
2289
- # See https://cloud.google.com/compute/docs/machine-types for a list of
2290
- # supported machine types. Note that `f1-micro` and `g1-small` are not yet
2291
- # supported.
2176
+ # Required. Machine type of the worker, such as `n1-standard-2`. See https://
2177
+ # cloud.google.com/compute/docs/machine-types for a list of supported machine
2178
+ # types. Note that `f1-micro` and `g1-small` are not yet supported.
2292
2179
  # Corresponds to the JSON property `machineType`
2293
2180
  # @return [String]
2294
2181
  attr_accessor :machine_type
@@ -2298,30 +2185,34 @@ module Google
2298
2185
  # @return [Fixnum]
2299
2186
  attr_accessor :max_concurrent_actions
2300
2187
 
2301
- # Minimum CPU platform to use when creating the worker.
2302
- # See [CPU Platforms](https://cloud.google.com/compute/docs/cpu-platforms).
2188
+ # Minimum CPU platform to use when creating the worker. See [CPU Platforms](
2189
+ # https://cloud.google.com/compute/docs/cpu-platforms).
2303
2190
  # Corresponds to the JSON property `minCpuPlatform`
2304
2191
  # @return [String]
2305
2192
  attr_accessor :min_cpu_platform
2306
2193
 
2307
- # Determines the type of network access granted to workers. Possible values:
2308
- # - "public": Workers can connect to the public internet.
2309
- # - "private": Workers can only connect to Google APIs and services.
2310
- # - "restricted-private": Workers can only connect to Google APIs that are
2311
- # reachable through `restricted.googleapis.com` (`199.36.153.4/30`).
2194
+ # Determines the type of network access granted to workers. Possible values: - "
2195
+ # public": Workers can connect to the public internet. - "private": Workers can
2196
+ # only connect to Google APIs and services. - "restricted-private": Workers can
2197
+ # only connect to Google APIs that are reachable through `restricted.googleapis.
2198
+ # com` (`199.36.153.4/30`).
2312
2199
  # Corresponds to the JSON property `networkAccess`
2313
2200
  # @return [String]
2314
2201
  attr_accessor :network_access
2315
2202
 
2316
- # Determines whether the worker is reserved (equivalent to a Compute Engine
2317
- # on-demand VM and therefore won't be preempted).
2318
- # See [Preemptible VMs](https://cloud.google.com/preemptible-vms/) for more
2319
- # details.
2203
+ # Determines whether the worker is reserved (equivalent to a Compute Engine on-
2204
+ # demand VM and therefore won't be preempted). See [Preemptible VMs](https://
2205
+ # cloud.google.com/preemptible-vms/) for more details.
2320
2206
  # Corresponds to the JSON property `reserved`
2321
2207
  # @return [Boolean]
2322
2208
  attr_accessor :reserved
2323
2209
  alias_method :reserved?, :reserved
2324
2210
 
2211
+ # SoleTenancyConfig specifies information required to host a pool on STNs.
2212
+ # Corresponds to the JSON property `soleTenancy`
2213
+ # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemotebuildexecutionAdminV1alphaSoleTenancyConfig]
2214
+ attr_accessor :sole_tenancy
2215
+
2325
2216
  # The name of the image used by each VM.
2326
2217
  # Corresponds to the JSON property `vmImage`
2327
2218
  # @return [String]
@@ -2342,6 +2233,7 @@ module Google
2342
2233
  @min_cpu_platform = args[:min_cpu_platform] if args.key?(:min_cpu_platform)
2343
2234
  @network_access = args[:network_access] if args.key?(:network_access)
2344
2235
  @reserved = args[:reserved] if args.key?(:reserved)
2236
+ @sole_tenancy = args[:sole_tenancy] if args.key?(:sole_tenancy)
2345
2237
  @vm_image = args[:vm_image] if args.key?(:vm_image)
2346
2238
  end
2347
2239
  end
@@ -2360,10 +2252,9 @@ module Google
2360
2252
  # @return [String]
2361
2253
  attr_accessor :channel
2362
2254
 
2363
- # WorkerPool resource name formatted as:
2364
- # `projects/[PROJECT_ID]/instances/[INSTANCE_ID]/workerpools/[POOL_ID]`.
2365
- # name should not be populated when creating a worker pool since it is
2366
- # provided in the `poolId` field.
2255
+ # WorkerPool resource name formatted as: `projects/[PROJECT_ID]/instances/[
2256
+ # INSTANCE_ID]/workerpools/[POOL_ID]`. name should not be populated when
2257
+ # creating a worker pool since it is provided in the `poolId` field.
2367
2258
  # Corresponds to the JSON property `name`
2368
2259
  # @return [String]
2369
2260
  attr_accessor :name
@@ -2373,14 +2264,13 @@ module Google
2373
2264
  # @return [String]
2374
2265
  attr_accessor :state
2375
2266
 
2376
- # Defines the configuration to be used for a creating workers in
2377
- # the worker pool.
2267
+ # Defines the configuration to be used for creating workers in the worker pool.
2378
2268
  # Corresponds to the JSON property `workerConfig`
2379
2269
  # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemotebuildexecutionAdminV1alphaWorkerConfig]
2380
2270
  attr_accessor :worker_config
2381
2271
 
2382
- # The desired number of workers in the worker pool. Must be a value between
2383
- # 0 and 15000.
2272
+ # The desired number of workers in the worker pool. Must be a value between 0
2273
+ # and 15000.
2384
2274
  # Corresponds to the JSON property `workerCount`
2385
2275
  # @return [Fixnum]
2386
2276
  attr_accessor :worker_count
@@ -2402,14 +2292,13 @@ module Google
2402
2292
 
2403
2293
  # AdminTemp is a prelimiary set of administration tasks. It's called "Temp"
2404
2294
  # because we do not yet know the best way to represent admin tasks; it's
2405
- # possible that this will be entirely replaced in later versions of this API.
2406
- # If this message proves to be sufficient, it will be renamed in the alpha or
2407
- # beta release of this API.
2408
- # This message (suitably marshalled into a protobuf.Any) can be used as the
2409
- # inline_assignment field in a lease; the lease assignment field should simply
2410
- # be `"admin"` in these cases.
2411
- # This message is heavily based on Swarming administration tasks from the LUCI
2412
- # project (http://github.com/luci/luci-py/appengine/swarming).
2295
+ # possible that this will be entirely replaced in later versions of this API. If
2296
+ # this message proves to be sufficient, it will be renamed in the alpha or beta
2297
+ # release of this API. This message (suitably marshalled into a protobuf.Any)
2298
+ # can be used as the inline_assignment field in a lease; the lease assignment
2299
+ # field should simply be `"admin"` in these cases. This message is heavily based
2300
+ # on Swarming administration tasks from the LUCI project (http://github.com/luci/
2301
+ # luci-py/appengine/swarming).
2413
2302
  class GoogleDevtoolsRemoteworkersV1test2AdminTemp
2414
2303
  include Google::Apis::Core::Hashable
2415
2304
 
@@ -2445,13 +2334,12 @@ module Google
2445
2334
  attr_accessor :contents
2446
2335
 
2447
2336
  # The CommandTask and CommandResult messages assume the existence of a service
2448
- # that can serve blobs of content, identified by a hash and size known as a
2449
- # "digest." The method by which these blobs may be retrieved is not specified
2450
- # here, but a model implementation is in the Remote Execution API's
2451
- # "ContentAddressibleStorage" interface.
2452
- # In the context of the RWAPI, a Digest will virtually always refer to the
2453
- # contents of a file or a directory. The latter is represented by the
2454
- # byte-encoded Directory message.
2337
+ # that can serve blobs of content, identified by a hash and size known as a "
2338
+ # digest." The method by which these blobs may be retrieved is not specified
2339
+ # here, but a model implementation is in the Remote Execution API's "
2340
+ # ContentAddressibleStorage" interface. In the context of the RWAPI, a Digest
2341
+ # will virtually always refer to the contents of a file or a directory. The
2342
+ # latter is represented by the byte-encoded Directory message.
2455
2343
  # Corresponds to the JSON property `digest`
2456
2344
  # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemoteworkersV1test2Digest]
2457
2345
  attr_accessor :digest
@@ -2467,27 +2355,26 @@ module Google
2467
2355
  end
2468
2356
  end
2469
2357
 
2470
- # DEPRECATED - use CommandResult instead.
2471
- # Describes the actual outputs from the task.
2358
+ # DEPRECATED - use CommandResult instead. Describes the actual outputs from the
2359
+ # task.
2472
2360
  class GoogleDevtoolsRemoteworkersV1test2CommandOutputs
2473
2361
  include Google::Apis::Core::Hashable
2474
2362
 
2475
2363
  # exit_code is only fully reliable if the status' code is OK. If the task
2476
- # exceeded its deadline or was cancelled, the process may still produce an
2477
- # exit code as it is cancelled, and this will be populated, but a successful
2478
- # (zero) is unlikely to be correct unless the status code is OK.
2364
+ # exceeded its deadline or was cancelled, the process may still produce an exit
2365
+ # code as it is cancelled, and this will be populated, but a successful (zero)
2366
+ # is unlikely to be correct unless the status code is OK.
2479
2367
  # Corresponds to the JSON property `exitCode`
2480
2368
  # @return [Fixnum]
2481
2369
  attr_accessor :exit_code
2482
2370
 
2483
2371
  # The CommandTask and CommandResult messages assume the existence of a service
2484
- # that can serve blobs of content, identified by a hash and size known as a
2485
- # "digest." The method by which these blobs may be retrieved is not specified
2486
- # here, but a model implementation is in the Remote Execution API's
2487
- # "ContentAddressibleStorage" interface.
2488
- # In the context of the RWAPI, a Digest will virtually always refer to the
2489
- # contents of a file or a directory. The latter is represented by the
2490
- # byte-encoded Directory message.
2372
+ # that can serve blobs of content, identified by a hash and size known as a "
2373
+ # digest." The method by which these blobs may be retrieved is not specified
2374
+ # here, but a model implementation is in the Remote Execution API's "
2375
+ # ContentAddressibleStorage" interface. In the context of the RWAPI, a Digest
2376
+ # will virtually always refer to the contents of a file or a directory. The
2377
+ # latter is represented by the byte-encoded Directory message.
2491
2378
  # Corresponds to the JSON property `outputs`
2492
2379
  # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemoteworkersV1test2Digest]
2493
2380
  attr_accessor :outputs
@@ -2503,9 +2390,8 @@ module Google
2503
2390
  end
2504
2391
  end
2505
2392
 
2506
- # DEPRECATED - use CommandResult instead.
2507
- # Can be used as part of CompleteRequest.metadata, or are part of a more
2508
- # sophisticated message.
2393
+ # DEPRECATED - use CommandResult instead. Can be used as part of CompleteRequest.
2394
+ # metadata, or are part of a more sophisticated message.
2509
2395
  class GoogleDevtoolsRemoteworkersV1test2CommandOverhead
2510
2396
  include Google::Apis::Core::Hashable
2511
2397
 
@@ -2516,8 +2402,8 @@ module Google
2516
2402
  # @return [String]
2517
2403
  attr_accessor :duration
2518
2404
 
2519
- # The amount of time *not* spent executing the command (ie
2520
- # uploading/downloading files).
2405
+ # The amount of time *not* spent executing the command (ie uploading/downloading
2406
+ # files).
2521
2407
  # Corresponds to the JSON property `overhead`
2522
2408
  # @return [String]
2523
2409
  attr_accessor :overhead
@@ -2545,46 +2431,44 @@ module Google
2545
2431
  # @return [String]
2546
2432
  attr_accessor :duration
2547
2433
 
2548
- # The exit code of the process. An exit code of "0" should only be trusted if
2549
- # `status` has a code of OK (otherwise it may simply be unset).
2434
+ # The exit code of the process. An exit code of "0" should only be trusted if `
2435
+ # status` has a code of OK (otherwise it may simply be unset).
2550
2436
  # Corresponds to the JSON property `exitCode`
2551
2437
  # @return [Fixnum]
2552
2438
  attr_accessor :exit_code
2553
2439
 
2554
- # Implementation-dependent metadata about the task. Both servers and bots
2555
- # may define messages which can be encoded here; bots are free to provide
2556
- # metadata in multiple formats, and servers are free to choose one or more
2557
- # of the values to process and ignore others. In particular, it is *not*
2558
- # considered an error for the bot to provide the server with a field that it
2559
- # doesn't know about.
2440
+ # Implementation-dependent metadata about the task. Both servers and bots may
2441
+ # define messages which can be encoded here; bots are free to provide metadata
2442
+ # in multiple formats, and servers are free to choose one or more of the values
2443
+ # to process and ignore others. In particular, it is *not* considered an error
2444
+ # for the bot to provide the server with a field that it doesn't know about.
2560
2445
  # Corresponds to the JSON property `metadata`
2561
2446
  # @return [Array<Hash<String,Object>>]
2562
2447
  attr_accessor :metadata
2563
2448
 
2564
2449
  # The CommandTask and CommandResult messages assume the existence of a service
2565
- # that can serve blobs of content, identified by a hash and size known as a
2566
- # "digest." The method by which these blobs may be retrieved is not specified
2567
- # here, but a model implementation is in the Remote Execution API's
2568
- # "ContentAddressibleStorage" interface.
2569
- # In the context of the RWAPI, a Digest will virtually always refer to the
2570
- # contents of a file or a directory. The latter is represented by the
2571
- # byte-encoded Directory message.
2450
+ # that can serve blobs of content, identified by a hash and size known as a "
2451
+ # digest." The method by which these blobs may be retrieved is not specified
2452
+ # here, but a model implementation is in the Remote Execution API's "
2453
+ # ContentAddressibleStorage" interface. In the context of the RWAPI, a Digest
2454
+ # will virtually always refer to the contents of a file or a directory. The
2455
+ # latter is represented by the byte-encoded Directory message.
2572
2456
  # Corresponds to the JSON property `outputs`
2573
2457
  # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemoteworkersV1test2Digest]
2574
2458
  attr_accessor :outputs
2575
2459
 
2576
- # The amount of time *not* spent executing the command (ie
2577
- # uploading/downloading files).
2460
+ # The amount of time *not* spent executing the command (ie uploading/downloading
2461
+ # files).
2578
2462
  # Corresponds to the JSON property `overhead`
2579
2463
  # @return [String]
2580
2464
  attr_accessor :overhead
2581
2465
 
2582
- # The `Status` type defines a logical error model that is suitable for
2583
- # different programming environments, including REST APIs and RPC APIs. It is
2584
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
2585
- # three pieces of data: error code, error message, and error details.
2586
- # You can find out more about this error model and how to work with it in the
2587
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
2466
+ # The `Status` type defines a logical error model that is suitable for different
2467
+ # programming environments, including REST APIs and RPC APIs. It is used by [
2468
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
2469
+ # data: error code, error message, and error details. You can find out more
2470
+ # about this error model and how to work with it in the [API Design Guide](https:
2471
+ # //cloud.google.com/apis/design/errors).
2588
2472
  # Corresponds to the JSON property `status`
2589
2473
  # @return [Google::Apis::RemotebuildexecutionV1::GoogleRpcStatus]
2590
2474
  attr_accessor :status
@@ -2640,14 +2524,13 @@ module Google
2640
2524
  class GoogleDevtoolsRemoteworkersV1test2CommandTaskInputs
2641
2525
  include Google::Apis::Core::Hashable
2642
2526
 
2643
- # The command itself to run (e.g., argv).
2644
- # This field should be passed directly to the underlying operating system,
2645
- # and so it must be sensible to that operating system. For example, on
2646
- # Windows, the first argument might be "C:\Windows\System32\ping.exe" -
2647
- # that is, using drive letters and backslashes. A command for a *nix
2648
- # system, on the other hand, would use forward slashes.
2649
- # All other fields in the RWAPI must consistently use forward slashes,
2650
- # since those fields may be interpretted by both the service and the bot.
2527
+ # The command itself to run (e.g., argv). This field should be passed directly
2528
+ # to the underlying operating system, and so it must be sensible to that
2529
+ # operating system. For example, on Windows, the first argument might be "C:\
2530
+ # Windows\System32\ping.exe" - that is, using drive letters and backslashes. A
2531
+ # command for a *nix system, on the other hand, would use forward slashes. All
2532
+ # other fields in the RWAPI must consistently use forward slashes, since those
2533
+ # fields may be interpretted by both the service and the bot.
2651
2534
  # Corresponds to the JSON property `arguments`
2652
2535
  # @return [Array<String>]
2653
2536
  attr_accessor :arguments
@@ -2657,31 +2540,29 @@ module Google
2657
2540
  # @return [Array<Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemoteworkersV1test2CommandTaskInputsEnvironmentVariable>]
2658
2541
  attr_accessor :environment_variables
2659
2542
 
2660
- # The input filesystem to be set up prior to the task beginning. The
2661
- # contents should be a repeated set of FileMetadata messages though other
2662
- # formats are allowed if better for the implementation (eg, a LUCI-style
2663
- # .isolated file).
2664
- # This field is repeated since implementations might want to cache the
2665
- # metadata, in which case it may be useful to break up portions of the
2666
- # filesystem that change frequently (eg, specific input files) from those
2667
- # that don't (eg, standard header files).
2543
+ # The input filesystem to be set up prior to the task beginning. The contents
2544
+ # should be a repeated set of FileMetadata messages though other formats are
2545
+ # allowed if better for the implementation (eg, a LUCI-style .isolated file).
2546
+ # This field is repeated since implementations might want to cache the metadata,
2547
+ # in which case it may be useful to break up portions of the filesystem that
2548
+ # change frequently (eg, specific input files) from those that don't (eg,
2549
+ # standard header files).
2668
2550
  # Corresponds to the JSON property `files`
2669
2551
  # @return [Array<Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemoteworkersV1test2Digest>]
2670
2552
  attr_accessor :files
2671
2553
 
2672
- # Inline contents for blobs expected to be needed by the bot to execute the
2673
- # task. For example, contents of entries in `files` or blobs that are
2674
- # indirectly referenced by an entry there.
2675
- # The bot should check against this list before downloading required task
2676
- # inputs to reduce the number of communications between itself and the
2677
- # remote CAS server.
2554
+ # Inline contents for blobs expected to be needed by the bot to execute the task.
2555
+ # For example, contents of entries in `files` or blobs that are indirectly
2556
+ # referenced by an entry there. The bot should check against this list before
2557
+ # downloading required task inputs to reduce the number of communications
2558
+ # between itself and the remote CAS server.
2678
2559
  # Corresponds to the JSON property `inlineBlobs`
2679
2560
  # @return [Array<Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemoteworkersV1test2Blob>]
2680
2561
  attr_accessor :inline_blobs
2681
2562
 
2682
- # Directory from which a command is executed. It is a relative directory
2683
- # with respect to the bot's working directory (i.e., "./"). If it is
2684
- # non-empty, then it must exist under "./". Otherwise, "./" will be used.
2563
+ # Directory from which a command is executed. It is a relative directory with
2564
+ # respect to the bot's working directory (i.e., "./"). If it is non-empty, then
2565
+ # it must exist under "./". Otherwise, "./" will be used.
2685
2566
  # Corresponds to the JSON property `workingDirectory`
2686
2567
  # @return [String]
2687
2568
  attr_accessor :working_directory
@@ -2729,32 +2610,32 @@ module Google
2729
2610
  class GoogleDevtoolsRemoteworkersV1test2CommandTaskOutputs
2730
2611
  include Google::Apis::Core::Hashable
2731
2612
 
2732
- # A list of expected directories, relative to the execution root. All paths
2733
- # MUST be delimited by forward slashes.
2613
+ # A list of expected directories, relative to the execution root. All paths MUST
2614
+ # be delimited by forward slashes.
2734
2615
  # Corresponds to the JSON property `directories`
2735
2616
  # @return [Array<String>]
2736
2617
  attr_accessor :directories
2737
2618
 
2738
- # A list of expected files, relative to the execution root. All paths
2739
- # MUST be delimited by forward slashes.
2619
+ # A list of expected files, relative to the execution root. All paths MUST be
2620
+ # delimited by forward slashes.
2740
2621
  # Corresponds to the JSON property `files`
2741
2622
  # @return [Array<String>]
2742
2623
  attr_accessor :files
2743
2624
 
2744
- # The destination to which any stderr should be sent. The method by which
2745
- # the bot should send the stream contents to that destination is not
2746
- # defined in this API. As examples, the destination could be a file
2747
- # referenced in the `files` field in this message, or it could be a URI
2748
- # that must be written via the ByteStream API.
2625
+ # The destination to which any stderr should be sent. The method by which the
2626
+ # bot should send the stream contents to that destination is not defined in this
2627
+ # API. As examples, the destination could be a file referenced in the `files`
2628
+ # field in this message, or it could be a URI that must be written via the
2629
+ # ByteStream API.
2749
2630
  # Corresponds to the JSON property `stderrDestination`
2750
2631
  # @return [String]
2751
2632
  attr_accessor :stderr_destination
2752
2633
 
2753
- # The destination to which any stdout should be sent. The method by which
2754
- # the bot should send the stream contents to that destination is not
2755
- # defined in this API. As examples, the destination could be a file
2756
- # referenced in the `files` field in this message, or it could be a URI
2757
- # that must be written via the ByteStream API.
2634
+ # The destination to which any stdout should be sent. The method by which the
2635
+ # bot should send the stream contents to that destination is not defined in this
2636
+ # API. As examples, the destination could be a file referenced in the `files`
2637
+ # field in this message, or it could be a URI that must be written via the
2638
+ # ByteStream API.
2758
2639
  # Corresponds to the JSON property `stdoutDestination`
2759
2640
  # @return [String]
2760
2641
  attr_accessor :stdout_destination
@@ -2776,27 +2657,26 @@ module Google
2776
2657
  class GoogleDevtoolsRemoteworkersV1test2CommandTaskTimeouts
2777
2658
  include Google::Apis::Core::Hashable
2778
2659
 
2779
- # This specifies the maximum time that the task can run, excluding the
2780
- # time required to download inputs or upload outputs. That is, the worker
2781
- # will terminate the task if it runs longer than this.
2660
+ # This specifies the maximum time that the task can run, excluding the time
2661
+ # required to download inputs or upload outputs. That is, the worker will
2662
+ # terminate the task if it runs longer than this.
2782
2663
  # Corresponds to the JSON property `execution`
2783
2664
  # @return [String]
2784
2665
  attr_accessor :execution
2785
2666
 
2786
- # This specifies the maximum amount of time the task can be idle - that is,
2787
- # go without generating some output in either stdout or stderr. If the
2788
- # process is silent for more than the specified time, the worker will
2789
- # terminate the task.
2667
+ # This specifies the maximum amount of time the task can be idle - that is, go
2668
+ # without generating some output in either stdout or stderr. If the process is
2669
+ # silent for more than the specified time, the worker will terminate the task.
2790
2670
  # Corresponds to the JSON property `idle`
2791
2671
  # @return [String]
2792
2672
  attr_accessor :idle
2793
2673
 
2794
2674
  # If the execution or IO timeouts are exceeded, the worker will try to
2795
- # gracefully terminate the task and return any existing logs. However,
2796
- # tasks may be hard-frozen in which case this process will fail. This
2797
- # timeout specifies how long to wait for a terminated task to shut down
2798
- # gracefully (e.g. via SIGTERM) before we bring down the hammer (e.g.
2799
- # SIGKILL on *nix, CTRL_BREAK_EVENT on Windows).
2675
+ # gracefully terminate the task and return any existing logs. However, tasks may
2676
+ # be hard-frozen in which case this process will fail. This timeout specifies
2677
+ # how long to wait for a terminated task to shut down gracefully (e.g. via
2678
+ # SIGTERM) before we bring down the hammer (e.g. SIGKILL on *nix,
2679
+ # CTRL_BREAK_EVENT on Windows).
2800
2680
  # Corresponds to the JSON property `shutdown`
2801
2681
  # @return [String]
2802
2682
  attr_accessor :shutdown
@@ -2814,13 +2694,12 @@ module Google
2814
2694
  end
2815
2695
 
2816
2696
  # The CommandTask and CommandResult messages assume the existence of a service
2817
- # that can serve blobs of content, identified by a hash and size known as a
2818
- # "digest." The method by which these blobs may be retrieved is not specified
2819
- # here, but a model implementation is in the Remote Execution API's
2820
- # "ContentAddressibleStorage" interface.
2821
- # In the context of the RWAPI, a Digest will virtually always refer to the
2822
- # contents of a file or a directory. The latter is represented by the
2823
- # byte-encoded Directory message.
2697
+ # that can serve blobs of content, identified by a hash and size known as a "
2698
+ # digest." The method by which these blobs may be retrieved is not specified
2699
+ # here, but a model implementation is in the Remote Execution API's "
2700
+ # ContentAddressibleStorage" interface. In the context of the RWAPI, a Digest
2701
+ # will virtually always refer to the contents of a file or a directory. The
2702
+ # latter is represented by the byte-encoded Directory message.
2824
2703
  class GoogleDevtoolsRemoteworkersV1test2Digest
2825
2704
  include Google::Apis::Core::Hashable
2826
2705
 
@@ -2831,9 +2710,9 @@ module Google
2831
2710
  attr_accessor :hash_prop
2832
2711
 
2833
2712
  # The size of the contents. While this is not strictly required as part of an
2834
- # identifier (after all, any given hash will have exactly one canonical
2835
- # size), it's useful in almost all cases when one might want to send or
2836
- # retrieve blobs of content and is included here for this reason.
2713
+ # identifier (after all, any given hash will have exactly one canonical size),
2714
+ # it's useful in almost all cases when one might want to send or retrieve blobs
2715
+ # of content and is included here for this reason.
2837
2716
  # Corresponds to the JSON property `sizeBytes`
2838
2717
  # @return [Fixnum]
2839
2718
  attr_accessor :size_bytes
@@ -2881,13 +2760,12 @@ module Google
2881
2760
  include Google::Apis::Core::Hashable
2882
2761
 
2883
2762
  # The CommandTask and CommandResult messages assume the existence of a service
2884
- # that can serve blobs of content, identified by a hash and size known as a
2885
- # "digest." The method by which these blobs may be retrieved is not specified
2886
- # here, but a model implementation is in the Remote Execution API's
2887
- # "ContentAddressibleStorage" interface.
2888
- # In the context of the RWAPI, a Digest will virtually always refer to the
2889
- # contents of a file or a directory. The latter is represented by the
2890
- # byte-encoded Directory message.
2763
+ # that can serve blobs of content, identified by a hash and size known as a "
2764
+ # digest." The method by which these blobs may be retrieved is not specified
2765
+ # here, but a model implementation is in the Remote Execution API's "
2766
+ # ContentAddressibleStorage" interface. In the context of the RWAPI, a Digest
2767
+ # will virtually always refer to the contents of a file or a directory. The
2768
+ # latter is represented by the byte-encoded Directory message.
2891
2769
  # Corresponds to the JSON property `digest`
2892
2770
  # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemoteworkersV1test2Digest]
2893
2771
  attr_accessor :digest
@@ -2913,21 +2791,20 @@ module Google
2913
2791
  class GoogleDevtoolsRemoteworkersV1test2FileMetadata
2914
2792
  include Google::Apis::Core::Hashable
2915
2793
 
2916
- # If the file is small enough, its contents may also or alternatively be
2917
- # listed here.
2794
+ # If the file is small enough, its contents may also or alternatively be listed
2795
+ # here.
2918
2796
  # Corresponds to the JSON property `contents`
2919
2797
  # NOTE: Values are automatically base64 encoded/decoded in the client library.
2920
2798
  # @return [String]
2921
2799
  attr_accessor :contents
2922
2800
 
2923
2801
  # The CommandTask and CommandResult messages assume the existence of a service
2924
- # that can serve blobs of content, identified by a hash and size known as a
2925
- # "digest." The method by which these blobs may be retrieved is not specified
2926
- # here, but a model implementation is in the Remote Execution API's
2927
- # "ContentAddressibleStorage" interface.
2928
- # In the context of the RWAPI, a Digest will virtually always refer to the
2929
- # contents of a file or a directory. The latter is represented by the
2930
- # byte-encoded Directory message.
2802
+ # that can serve blobs of content, identified by a hash and size known as a "
2803
+ # digest." The method by which these blobs may be retrieved is not specified
2804
+ # here, but a model implementation is in the Remote Execution API's "
2805
+ # ContentAddressibleStorage" interface. In the context of the RWAPI, a Digest
2806
+ # will virtually always refer to the contents of a file or a directory. The
2807
+ # latter is represented by the byte-encoded Directory message.
2931
2808
  # Corresponds to the JSON property `digest`
2932
2809
  # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemoteworkersV1test2Digest]
2933
2810
  attr_accessor :digest
@@ -2938,11 +2815,11 @@ module Google
2938
2815
  attr_accessor :is_executable
2939
2816
  alias_method :is_executable?, :is_executable
2940
2817
 
2941
- # The path of this file. If this message is part of the
2942
- # CommandOutputs.outputs fields, the path is relative to the execution root
2943
- # and must correspond to an entry in CommandTask.outputs.files. If this
2944
- # message is part of a Directory message, then the path is relative to the
2945
- # root of that directory. All paths MUST be delimited by forward slashes.
2818
+ # The path of this file. If this message is part of the CommandOutputs.outputs
2819
+ # fields, the path is relative to the execution root and must correspond to an
2820
+ # entry in CommandTask.outputs.files. If this message is part of a Directory
2821
+ # message, then the path is relative to the root of that directory. All paths
2822
+ # MUST be delimited by forward slashes.
2946
2823
  # Corresponds to the JSON property `path`
2947
2824
  # @return [String]
2948
2825
  attr_accessor :path
@@ -3003,47 +2880,45 @@ module Google
3003
2880
  class GoogleLongrunningOperation
3004
2881
  include Google::Apis::Core::Hashable
3005
2882
 
3006
- # If the value is `false`, it means the operation is still in progress.
3007
- # If `true`, the operation is completed, and either `error` or `response` is
3008
- # available.
2883
+ # If the value is `false`, it means the operation is still in progress. If `true`
2884
+ # , the operation is completed, and either `error` or `response` is available.
3009
2885
  # Corresponds to the JSON property `done`
3010
2886
  # @return [Boolean]
3011
2887
  attr_accessor :done
3012
2888
  alias_method :done?, :done
3013
2889
 
3014
- # The `Status` type defines a logical error model that is suitable for
3015
- # different programming environments, including REST APIs and RPC APIs. It is
3016
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
3017
- # three pieces of data: error code, error message, and error details.
3018
- # You can find out more about this error model and how to work with it in the
3019
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
2890
+ # The `Status` type defines a logical error model that is suitable for different
2891
+ # programming environments, including REST APIs and RPC APIs. It is used by [
2892
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
2893
+ # data: error code, error message, and error details. You can find out more
2894
+ # about this error model and how to work with it in the [API Design Guide](https:
2895
+ # //cloud.google.com/apis/design/errors).
3020
2896
  # Corresponds to the JSON property `error`
3021
2897
  # @return [Google::Apis::RemotebuildexecutionV1::GoogleRpcStatus]
3022
2898
  attr_accessor :error
3023
2899
 
3024
- # Service-specific metadata associated with the operation. It typically
3025
- # contains progress information and common metadata such as create time.
3026
- # Some services might not provide such metadata. Any method that returns a
3027
- # long-running operation should document the metadata type, if any.
2900
+ # Service-specific metadata associated with the operation. It typically contains
2901
+ # progress information and common metadata such as create time. Some services
2902
+ # might not provide such metadata. Any method that returns a long-running
2903
+ # operation should document the metadata type, if any.
3028
2904
  # Corresponds to the JSON property `metadata`
3029
2905
  # @return [Hash<String,Object>]
3030
2906
  attr_accessor :metadata
3031
2907
 
3032
2908
  # The server-assigned name, which is only unique within the same service that
3033
- # originally returns it. If you use the default HTTP mapping, the
3034
- # `name` should be a resource name ending with `operations/`unique_id``.
2909
+ # originally returns it. If you use the default HTTP mapping, the `name` should
2910
+ # be a resource name ending with `operations/`unique_id``.
3035
2911
  # Corresponds to the JSON property `name`
3036
2912
  # @return [String]
3037
2913
  attr_accessor :name
3038
2914
 
3039
- # The normal response of the operation in case of success. If the original
3040
- # method returns no data on success, such as `Delete`, the response is
3041
- # `google.protobuf.Empty`. If the original method is standard
3042
- # `Get`/`Create`/`Update`, the response should be the resource. For other
3043
- # methods, the response should have the type `XxxResponse`, where `Xxx`
3044
- # is the original method name. For example, if the original method name
3045
- # is `TakeSnapshot()`, the inferred response type is
3046
- # `TakeSnapshotResponse`.
2915
+ # The normal response of the operation in case of success. If the original
2916
+ # method returns no data on success, such as `Delete`, the response is `google.
2917
+ # protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`,
2918
+ # the response should be the resource. For other methods, the response should
2919
+ # have the type `XxxResponse`, where `Xxx` is the original method name. For
2920
+ # example, if the original method name is `TakeSnapshot()`, the inferred
2921
+ # response type is `TakeSnapshotResponse`.
3047
2922
  # Corresponds to the JSON property `response`
3048
2923
  # @return [Hash<String,Object>]
3049
2924
  attr_accessor :response
@@ -3062,13 +2937,11 @@ module Google
3062
2937
  end
3063
2938
  end
3064
2939
 
3065
- # A generic empty message that you can re-use to avoid defining duplicated
3066
- # empty messages in your APIs. A typical example is to use it as the request
3067
- # or the response type of an API method. For instance:
3068
- # service Foo `
3069
- # rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
3070
- # `
3071
- # The JSON representation for `Empty` is empty JSON object ````.
2940
+ # A generic empty message that you can re-use to avoid defining duplicated empty
2941
+ # messages in your APIs. A typical example is to use it as the request or the
2942
+ # response type of an API method. For instance: service Foo ` rpc Bar(google.
2943
+ # protobuf.Empty) returns (google.protobuf.Empty); ` The JSON representation for
2944
+ # `Empty` is empty JSON object ````.
3072
2945
  class GoogleProtobufEmpty
3073
2946
  include Google::Apis::Core::Hashable
3074
2947
 
@@ -3081,12 +2954,12 @@ module Google
3081
2954
  end
3082
2955
  end
3083
2956
 
3084
- # The `Status` type defines a logical error model that is suitable for
3085
- # different programming environments, including REST APIs and RPC APIs. It is
3086
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
3087
- # three pieces of data: error code, error message, and error details.
3088
- # You can find out more about this error model and how to work with it in the
3089
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
2957
+ # The `Status` type defines a logical error model that is suitable for different
2958
+ # programming environments, including REST APIs and RPC APIs. It is used by [
2959
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
2960
+ # data: error code, error message, and error details. You can find out more
2961
+ # about this error model and how to work with it in the [API Design Guide](https:
2962
+ # //cloud.google.com/apis/design/errors).
3090
2963
  class GoogleRpcStatus
3091
2964
  include Google::Apis::Core::Hashable
3092
2965
 
@@ -3095,15 +2968,15 @@ module Google
3095
2968
  # @return [Fixnum]
3096
2969
  attr_accessor :code
3097
2970
 
3098
- # A list of messages that carry the error details. There is a common set of
2971
+ # A list of messages that carry the error details. There is a common set of
3099
2972
  # message types for APIs to use.
3100
2973
  # Corresponds to the JSON property `details`
3101
2974
  # @return [Array<Hash<String,Object>>]
3102
2975
  attr_accessor :details
3103
2976
 
3104
- # A developer-facing error message, which should be in English. Any
3105
- # user-facing error message should be localized and sent in the
3106
- # google.rpc.Status.details field, or localized by the client.
2977
+ # A developer-facing error message, which should be in English. Any user-facing
2978
+ # error message should be localized and sent in the google.rpc.Status.details
2979
+ # field, or localized by the client.
3107
2980
  # Corresponds to the JSON property `message`
3108
2981
  # @return [String]
3109
2982
  attr_accessor :message