google-api-client 0.42.2 → 0.45.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (959) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +535 -0
  3. data/api_names.yaml +1 -0
  4. data/docs/oauth-server.md +4 -6
  5. data/generated/google/apis/abusiveexperiencereport_v1.rb +1 -1
  6. data/generated/google/apis/abusiveexperiencereport_v1/classes.rb +8 -13
  7. data/generated/google/apis/abusiveexperiencereport_v1/service.rb +2 -3
  8. data/generated/google/apis/accessapproval_v1.rb +1 -1
  9. data/generated/google/apis/accessapproval_v1/classes.rb +59 -83
  10. data/generated/google/apis/accessapproval_v1/representations.rb +1 -0
  11. data/generated/google/apis/accessapproval_v1/service.rb +93 -132
  12. data/generated/google/apis/accesscontextmanager_v1.rb +1 -1
  13. data/generated/google/apis/accesscontextmanager_v1/classes.rb +198 -236
  14. data/generated/google/apis/accesscontextmanager_v1/service.rb +128 -171
  15. data/generated/google/apis/accesscontextmanager_v1beta.rb +1 -1
  16. data/generated/google/apis/accesscontextmanager_v1beta/classes.rb +153 -184
  17. data/generated/google/apis/accesscontextmanager_v1beta/service.rb +82 -111
  18. data/generated/google/apis/adexchangebuyer2_v2beta1.rb +1 -1
  19. data/generated/google/apis/adexchangebuyer2_v2beta1/classes.rb +532 -651
  20. data/generated/google/apis/adexchangebuyer2_v2beta1/representations.rb +1 -0
  21. data/generated/google/apis/adexchangebuyer2_v2beta1/service.rb +467 -631
  22. data/generated/google/apis/adexchangebuyer_v1_4.rb +1 -1
  23. data/generated/google/apis/adexchangebuyer_v1_4/classes.rb +47 -2
  24. data/generated/google/apis/adexchangebuyer_v1_4/representations.rb +18 -0
  25. data/generated/google/apis/adexperiencereport_v1.rb +1 -1
  26. data/generated/google/apis/adexperiencereport_v1/classes.rb +11 -18
  27. data/generated/google/apis/adexperiencereport_v1/service.rb +2 -3
  28. data/generated/google/apis/admin_datatransfer_v1.rb +6 -4
  29. data/generated/google/apis/admin_datatransfer_v1/classes.rb +16 -4
  30. data/generated/google/apis/admin_datatransfer_v1/service.rb +30 -48
  31. data/generated/google/apis/admin_directory_v1.rb +6 -8
  32. data/generated/google/apis/admin_directory_v1/classes.rb +209 -242
  33. data/generated/google/apis/admin_directory_v1/representations.rb +0 -39
  34. data/generated/google/apis/admin_directory_v1/service.rb +535 -998
  35. data/generated/google/apis/admin_reports_v1.rb +6 -5
  36. data/generated/google/apis/admin_reports_v1/classes.rb +31 -33
  37. data/generated/google/apis/admin_reports_v1/service.rb +131 -187
  38. data/generated/google/apis/admob_v1.rb +4 -1
  39. data/generated/google/apis/admob_v1/classes.rb +139 -270
  40. data/generated/google/apis/admob_v1/service.rb +11 -13
  41. data/generated/google/apis/alertcenter_v1beta1.rb +1 -1
  42. data/generated/google/apis/alertcenter_v1beta1/classes.rb +107 -138
  43. data/generated/google/apis/alertcenter_v1beta1/service.rb +50 -55
  44. data/generated/google/apis/analyticsdata_v1alpha.rb +37 -0
  45. data/generated/google/apis/analyticsdata_v1alpha/classes.rb +1610 -0
  46. data/generated/google/apis/analyticsdata_v1alpha/representations.rb +789 -0
  47. data/generated/google/apis/analyticsdata_v1alpha/service.rb +220 -0
  48. data/generated/google/apis/analyticsreporting_v4.rb +1 -1
  49. data/generated/google/apis/analyticsreporting_v4/classes.rb +315 -399
  50. data/generated/google/apis/androiddeviceprovisioning_v1.rb +1 -1
  51. data/generated/google/apis/androiddeviceprovisioning_v1/classes.rb +193 -220
  52. data/generated/google/apis/androiddeviceprovisioning_v1/service.rb +55 -61
  53. data/generated/google/apis/androidenterprise_v1.rb +1 -1
  54. data/generated/google/apis/androidenterprise_v1/classes.rb +452 -557
  55. data/generated/google/apis/androidenterprise_v1/service.rb +183 -242
  56. data/generated/google/apis/androidmanagement_v1.rb +1 -1
  57. data/generated/google/apis/androidmanagement_v1/classes.rb +98 -61
  58. data/generated/google/apis/androidmanagement_v1/representations.rb +17 -0
  59. data/generated/google/apis/androidpublisher_v3.rb +1 -1
  60. data/generated/google/apis/androidpublisher_v3/classes.rb +281 -342
  61. data/generated/google/apis/androidpublisher_v3/service.rb +127 -155
  62. data/generated/google/apis/apigateway_v1alpha1.rb +34 -0
  63. data/generated/google/apis/apigateway_v1alpha1/classes.rb +633 -0
  64. data/generated/google/apis/apigateway_v1alpha1/representations.rb +250 -0
  65. data/generated/google/apis/apigateway_v1alpha1/service.rb +623 -0
  66. data/generated/google/apis/apigateway_v1beta.rb +34 -0
  67. data/generated/google/apis/apigateway_v1beta/classes.rb +1162 -0
  68. data/generated/google/apis/apigateway_v1beta/representations.rb +470 -0
  69. data/generated/google/apis/apigateway_v1beta/service.rb +1172 -0
  70. data/generated/google/apis/apigee_v1.rb +6 -8
  71. data/generated/google/apis/apigee_v1/classes.rb +1123 -1214
  72. data/generated/google/apis/apigee_v1/representations.rb +139 -0
  73. data/generated/google/apis/apigee_v1/service.rb +1018 -1151
  74. data/generated/google/apis/appengine_v1.rb +1 -1
  75. data/generated/google/apis/appengine_v1/classes.rb +96 -59
  76. data/generated/google/apis/appengine_v1/representations.rb +17 -0
  77. data/generated/google/apis/appengine_v1/service.rb +38 -47
  78. data/generated/google/apis/appengine_v1alpha.rb +1 -1
  79. data/generated/google/apis/appengine_v1alpha/classes.rb +9 -11
  80. data/generated/google/apis/appengine_v1beta.rb +1 -1
  81. data/generated/google/apis/appengine_v1beta/classes.rb +103 -59
  82. data/generated/google/apis/appengine_v1beta/representations.rb +18 -0
  83. data/generated/google/apis/appengine_v1beta/service.rb +37 -47
  84. data/generated/google/apis/appsmarket_v2.rb +1 -1
  85. data/generated/google/apis/appsmarket_v2/classes.rb +14 -16
  86. data/generated/google/apis/artifactregistry_v1beta1.rb +1 -1
  87. data/generated/google/apis/artifactregistry_v1beta1/classes.rb +242 -337
  88. data/generated/google/apis/artifactregistry_v1beta1/representations.rb +1 -0
  89. data/generated/google/apis/artifactregistry_v1beta1/service.rb +44 -57
  90. data/generated/google/apis/{accessapproval_v1beta1.rb → assuredworkloads_v1beta1.rb} +8 -8
  91. data/generated/google/apis/assuredworkloads_v1beta1/classes.rb +458 -0
  92. data/generated/google/apis/assuredworkloads_v1beta1/representations.rb +230 -0
  93. data/generated/google/apis/assuredworkloads_v1beta1/service.rb +328 -0
  94. data/generated/google/apis/bigquery_v2.rb +1 -1
  95. data/generated/google/apis/bigquery_v2/classes.rb +381 -553
  96. data/generated/google/apis/bigquery_v2/representations.rb +16 -0
  97. data/generated/google/apis/bigquery_v2/service.rb +32 -40
  98. data/generated/google/apis/bigqueryconnection_v1beta1.rb +1 -1
  99. data/generated/google/apis/bigqueryconnection_v1beta1/classes.rb +192 -337
  100. data/generated/google/apis/bigqueryconnection_v1beta1/service.rb +29 -32
  101. data/generated/google/apis/bigquerydatatransfer_v1.rb +1 -1
  102. data/generated/google/apis/bigquerydatatransfer_v1/classes.rb +132 -158
  103. data/generated/google/apis/bigquerydatatransfer_v1/service.rb +232 -282
  104. data/generated/google/apis/bigqueryreservation_v1.rb +1 -1
  105. data/generated/google/apis/bigqueryreservation_v1/classes.rb +116 -123
  106. data/generated/google/apis/bigqueryreservation_v1/representations.rb +2 -0
  107. data/generated/google/apis/bigqueryreservation_v1/service.rb +137 -183
  108. data/generated/google/apis/bigqueryreservation_v1alpha2.rb +1 -1
  109. data/generated/google/apis/bigqueryreservation_v1alpha2/classes.rb +88 -100
  110. data/generated/google/apis/bigqueryreservation_v1alpha2/service.rb +77 -100
  111. data/generated/google/apis/bigqueryreservation_v1beta1.rb +1 -1
  112. data/generated/google/apis/bigqueryreservation_v1beta1/classes.rb +93 -98
  113. data/generated/google/apis/bigqueryreservation_v1beta1/representations.rb +2 -0
  114. data/generated/google/apis/bigqueryreservation_v1beta1/service.rb +114 -151
  115. data/generated/google/apis/bigtableadmin_v1.rb +1 -1
  116. data/generated/google/apis/bigtableadmin_v1/classes.rb +137 -119
  117. data/generated/google/apis/bigtableadmin_v1/representations.rb +29 -0
  118. data/generated/google/apis/bigtableadmin_v2.rb +1 -1
  119. data/generated/google/apis/bigtableadmin_v2/classes.rb +455 -607
  120. data/generated/google/apis/bigtableadmin_v2/representations.rb +29 -0
  121. data/generated/google/apis/bigtableadmin_v2/service.rb +194 -247
  122. data/generated/google/apis/billingbudgets_v1beta1.rb +4 -1
  123. data/generated/google/apis/billingbudgets_v1beta1/classes.rb +107 -118
  124. data/generated/google/apis/billingbudgets_v1beta1/representations.rb +1 -0
  125. data/generated/google/apis/billingbudgets_v1beta1/service.rb +30 -33
  126. data/generated/google/apis/binaryauthorization_v1.rb +1 -1
  127. data/generated/google/apis/binaryauthorization_v1/classes.rb +246 -354
  128. data/generated/google/apis/binaryauthorization_v1/representations.rb +1 -0
  129. data/generated/google/apis/binaryauthorization_v1/service.rb +74 -89
  130. data/generated/google/apis/binaryauthorization_v1beta1.rb +1 -1
  131. data/generated/google/apis/binaryauthorization_v1beta1/classes.rb +246 -354
  132. data/generated/google/apis/binaryauthorization_v1beta1/representations.rb +1 -0
  133. data/generated/google/apis/binaryauthorization_v1beta1/service.rb +74 -89
  134. data/generated/google/apis/blogger_v2.rb +2 -3
  135. data/generated/google/apis/blogger_v2/classes.rb +1 -2
  136. data/generated/google/apis/blogger_v2/service.rb +1 -2
  137. data/generated/google/apis/blogger_v3.rb +2 -3
  138. data/generated/google/apis/blogger_v3/classes.rb +1 -2
  139. data/generated/google/apis/blogger_v3/service.rb +1 -2
  140. data/generated/google/apis/calendar_v3.rb +1 -1
  141. data/generated/google/apis/calendar_v3/classes.rb +11 -9
  142. data/generated/google/apis/chat_v1.rb +1 -1
  143. data/generated/google/apis/chat_v1/classes.rb +91 -116
  144. data/generated/google/apis/chat_v1/service.rb +30 -42
  145. data/generated/google/apis/chromeuxreport_v1.rb +1 -1
  146. data/generated/google/apis/chromeuxreport_v1/classes.rb +55 -66
  147. data/generated/google/apis/civicinfo_v2.rb +1 -1
  148. data/generated/google/apis/civicinfo_v2/classes.rb +18 -8
  149. data/generated/google/apis/civicinfo_v2/representations.rb +2 -0
  150. data/generated/google/apis/classroom_v1.rb +7 -1
  151. data/generated/google/apis/classroom_v1/classes.rb +382 -365
  152. data/generated/google/apis/classroom_v1/representations.rb +43 -0
  153. data/generated/google/apis/classroom_v1/service.rb +883 -902
  154. data/generated/google/apis/cloudasset_v1.rb +1 -1
  155. data/generated/google/apis/cloudasset_v1/classes.rb +765 -1036
  156. data/generated/google/apis/cloudasset_v1/representations.rb +16 -0
  157. data/generated/google/apis/cloudasset_v1/service.rb +131 -156
  158. data/generated/google/apis/cloudasset_v1beta1.rb +1 -1
  159. data/generated/google/apis/cloudasset_v1beta1/classes.rb +531 -775
  160. data/generated/google/apis/cloudasset_v1beta1/service.rb +59 -75
  161. data/generated/google/apis/cloudasset_v1p1beta1.rb +1 -1
  162. data/generated/google/apis/cloudasset_v1p1beta1/classes.rb +204 -349
  163. data/generated/google/apis/cloudasset_v1p1beta1/service.rb +35 -47
  164. data/generated/google/apis/cloudasset_v1p4beta1.rb +1 -1
  165. data/generated/google/apis/cloudasset_v1p4beta1/classes.rb +222 -276
  166. data/generated/google/apis/cloudasset_v1p4beta1/service.rb +75 -93
  167. data/generated/google/apis/cloudasset_v1p5beta1.rb +1 -1
  168. data/generated/google/apis/cloudasset_v1p5beta1/classes.rb +481 -718
  169. data/generated/google/apis/cloudasset_v1p5beta1/service.rb +25 -28
  170. data/generated/google/apis/cloudbilling_v1.rb +7 -1
  171. data/generated/google/apis/cloudbilling_v1/classes.rb +284 -445
  172. data/generated/google/apis/cloudbilling_v1/service.rb +104 -124
  173. data/generated/google/apis/cloudbuild_v1.rb +1 -1
  174. data/generated/google/apis/cloudbuild_v1/classes.rb +371 -343
  175. data/generated/google/apis/cloudbuild_v1/representations.rb +23 -0
  176. data/generated/google/apis/cloudbuild_v1/service.rb +265 -66
  177. data/generated/google/apis/cloudbuild_v1alpha1.rb +1 -1
  178. data/generated/google/apis/cloudbuild_v1alpha1/classes.rb +291 -329
  179. data/generated/google/apis/cloudbuild_v1alpha1/representations.rb +2 -0
  180. data/generated/google/apis/cloudbuild_v1alpha1/service.rb +15 -18
  181. data/generated/google/apis/cloudbuild_v1alpha2.rb +1 -1
  182. data/generated/google/apis/cloudbuild_v1alpha2/classes.rb +277 -313
  183. data/generated/google/apis/cloudbuild_v1alpha2/representations.rb +2 -0
  184. data/generated/google/apis/cloudbuild_v1alpha2/service.rb +22 -28
  185. data/generated/google/apis/clouddebugger_v2.rb +1 -1
  186. data/generated/google/apis/clouddebugger_v2/classes.rb +185 -252
  187. data/generated/google/apis/clouddebugger_v2/service.rb +53 -59
  188. data/generated/google/apis/clouderrorreporting_v1beta1.rb +1 -1
  189. data/generated/google/apis/clouderrorreporting_v1beta1/classes.rb +132 -154
  190. data/generated/google/apis/clouderrorreporting_v1beta1/representations.rb +1 -0
  191. data/generated/google/apis/clouderrorreporting_v1beta1/service.rb +53 -69
  192. data/generated/google/apis/cloudfunctions_v1.rb +1 -1
  193. data/generated/google/apis/cloudfunctions_v1/classes.rb +323 -493
  194. data/generated/google/apis/cloudfunctions_v1/service.rb +79 -93
  195. data/generated/google/apis/cloudidentity_v1.rb +4 -1
  196. data/generated/google/apis/cloudidentity_v1/classes.rb +977 -75
  197. data/generated/google/apis/cloudidentity_v1/representations.rb +388 -0
  198. data/generated/google/apis/cloudidentity_v1/service.rb +841 -62
  199. data/generated/google/apis/cloudidentity_v1beta1.rb +4 -1
  200. data/generated/google/apis/cloudidentity_v1beta1/classes.rb +1059 -318
  201. data/generated/google/apis/cloudidentity_v1beta1/representations.rb +331 -22
  202. data/generated/google/apis/cloudidentity_v1beta1/service.rb +742 -96
  203. data/generated/google/apis/cloudiot_v1.rb +1 -1
  204. data/generated/google/apis/cloudiot_v1/classes.rb +270 -373
  205. data/generated/google/apis/cloudiot_v1/representations.rb +1 -0
  206. data/generated/google/apis/cloudiot_v1/service.rb +147 -154
  207. data/generated/google/apis/cloudkms_v1.rb +1 -1
  208. data/generated/google/apis/cloudkms_v1/classes.rb +509 -692
  209. data/generated/google/apis/cloudkms_v1/representations.rb +18 -0
  210. data/generated/google/apis/cloudkms_v1/service.rb +170 -216
  211. data/generated/google/apis/cloudprofiler_v2.rb +1 -1
  212. data/generated/google/apis/cloudprofiler_v2/classes.rb +28 -33
  213. data/generated/google/apis/cloudprofiler_v2/service.rb +17 -19
  214. data/generated/google/apis/cloudresourcemanager_v1.rb +1 -1
  215. data/generated/google/apis/cloudresourcemanager_v1/classes.rb +14 -7
  216. data/generated/google/apis/cloudresourcemanager_v1/representations.rb +1 -0
  217. data/generated/google/apis/cloudresourcemanager_v1/service.rb +60 -58
  218. data/generated/google/apis/cloudresourcemanager_v1beta1.rb +1 -1
  219. data/generated/google/apis/cloudresourcemanager_v1beta1/classes.rb +11 -4
  220. data/generated/google/apis/cloudresourcemanager_v1beta1/representations.rb +1 -0
  221. data/generated/google/apis/cloudresourcemanager_v1beta1/service.rb +24 -23
  222. data/generated/google/apis/cloudresourcemanager_v2.rb +1 -1
  223. data/generated/google/apis/cloudresourcemanager_v2/classes.rb +7 -0
  224. data/generated/google/apis/cloudresourcemanager_v2/representations.rb +1 -0
  225. data/generated/google/apis/cloudresourcemanager_v2beta1.rb +1 -1
  226. data/generated/google/apis/cloudresourcemanager_v2beta1/classes.rb +7 -0
  227. data/generated/google/apis/cloudresourcemanager_v2beta1/representations.rb +1 -0
  228. data/generated/google/apis/cloudscheduler_v1.rb +1 -1
  229. data/generated/google/apis/cloudscheduler_v1/classes.rb +272 -383
  230. data/generated/google/apis/cloudscheduler_v1/service.rb +45 -62
  231. data/generated/google/apis/cloudscheduler_v1beta1.rb +1 -1
  232. data/generated/google/apis/cloudscheduler_v1beta1/classes.rb +273 -384
  233. data/generated/google/apis/cloudscheduler_v1beta1/service.rb +45 -62
  234. data/generated/google/apis/cloudsearch_v1.rb +2 -2
  235. data/generated/google/apis/cloudsearch_v1/classes.rb +650 -782
  236. data/generated/google/apis/cloudsearch_v1/representations.rb +15 -0
  237. data/generated/google/apis/cloudsearch_v1/service.rb +286 -326
  238. data/generated/google/apis/cloudshell_v1.rb +1 -1
  239. data/generated/google/apis/cloudshell_v1/classes.rb +36 -227
  240. data/generated/google/apis/cloudshell_v1/representations.rb +0 -67
  241. data/generated/google/apis/cloudshell_v1/service.rb +21 -25
  242. data/generated/google/apis/cloudshell_v1alpha1.rb +1 -1
  243. data/generated/google/apis/cloudshell_v1alpha1/classes.rb +69 -78
  244. data/generated/google/apis/cloudshell_v1alpha1/service.rb +20 -24
  245. data/generated/google/apis/cloudtasks_v2.rb +1 -1
  246. data/generated/google/apis/cloudtasks_v2/classes.rb +612 -933
  247. data/generated/google/apis/cloudtasks_v2/representations.rb +1 -0
  248. data/generated/google/apis/cloudtasks_v2/service.rb +146 -217
  249. data/generated/google/apis/cloudtasks_v2beta2.rb +1 -1
  250. data/generated/google/apis/cloudtasks_v2beta2/classes.rb +608 -964
  251. data/generated/google/apis/cloudtasks_v2beta2/representations.rb +1 -0
  252. data/generated/google/apis/cloudtasks_v2beta2/service.rb +178 -270
  253. data/generated/google/apis/cloudtasks_v2beta3.rb +1 -1
  254. data/generated/google/apis/cloudtasks_v2beta3/classes.rb +616 -938
  255. data/generated/google/apis/cloudtasks_v2beta3/representations.rb +1 -0
  256. data/generated/google/apis/cloudtasks_v2beta3/service.rb +146 -217
  257. data/generated/google/apis/cloudtrace_v1.rb +1 -1
  258. data/generated/google/apis/cloudtrace_v1/classes.rb +39 -61
  259. data/generated/google/apis/cloudtrace_v1/service.rb +37 -51
  260. data/generated/google/apis/cloudtrace_v2.rb +1 -1
  261. data/generated/google/apis/cloudtrace_v2/classes.rb +92 -107
  262. data/generated/google/apis/cloudtrace_v2/service.rb +8 -11
  263. data/generated/google/apis/cloudtrace_v2beta1.rb +1 -1
  264. data/generated/google/apis/cloudtrace_v2beta1/classes.rb +23 -33
  265. data/generated/google/apis/cloudtrace_v2beta1/service.rb +30 -37
  266. data/generated/google/apis/composer_v1.rb +1 -1
  267. data/generated/google/apis/composer_v1/classes.rb +190 -242
  268. data/generated/google/apis/composer_v1/service.rb +79 -150
  269. data/generated/google/apis/composer_v1beta1.rb +1 -1
  270. data/generated/google/apis/composer_v1beta1/classes.rb +203 -262
  271. data/generated/google/apis/composer_v1beta1/service.rb +92 -179
  272. data/generated/google/apis/compute_alpha.rb +1 -1
  273. data/generated/google/apis/compute_alpha/classes.rb +1021 -137
  274. data/generated/google/apis/compute_alpha/representations.rb +208 -4
  275. data/generated/google/apis/compute_alpha/service.rb +710 -699
  276. data/generated/google/apis/compute_beta.rb +1 -1
  277. data/generated/google/apis/compute_beta/classes.rb +607 -82
  278. data/generated/google/apis/compute_beta/representations.rb +114 -1
  279. data/generated/google/apis/compute_beta/service.rb +623 -612
  280. data/generated/google/apis/compute_v1.rb +1 -1
  281. data/generated/google/apis/compute_v1/classes.rb +1014 -99
  282. data/generated/google/apis/compute_v1/representations.rb +390 -15
  283. data/generated/google/apis/compute_v1/service.rb +809 -33
  284. data/generated/google/apis/container_v1.rb +1 -1
  285. data/generated/google/apis/container_v1/classes.rb +996 -965
  286. data/generated/google/apis/container_v1/representations.rb +75 -0
  287. data/generated/google/apis/container_v1/service.rb +435 -502
  288. data/generated/google/apis/container_v1beta1.rb +1 -1
  289. data/generated/google/apis/container_v1beta1/classes.rb +1094 -1044
  290. data/generated/google/apis/container_v1beta1/representations.rb +91 -0
  291. data/generated/google/apis/container_v1beta1/service.rb +403 -466
  292. data/generated/google/apis/containeranalysis_v1alpha1.rb +1 -1
  293. data/generated/google/apis/containeranalysis_v1alpha1/classes.rb +463 -596
  294. data/generated/google/apis/containeranalysis_v1alpha1/representations.rb +1 -0
  295. data/generated/google/apis/containeranalysis_v1alpha1/service.rb +149 -169
  296. data/generated/google/apis/containeranalysis_v1beta1.rb +1 -1
  297. data/generated/google/apis/containeranalysis_v1beta1/classes.rb +461 -613
  298. data/generated/google/apis/containeranalysis_v1beta1/representations.rb +1 -0
  299. data/generated/google/apis/containeranalysis_v1beta1/service.rb +75 -90
  300. data/generated/google/apis/content_v2.rb +1 -1
  301. data/generated/google/apis/content_v2/classes.rb +14 -2
  302. data/generated/google/apis/content_v2_1.rb +1 -1
  303. data/generated/google/apis/content_v2_1/classes.rb +116 -5
  304. data/generated/google/apis/content_v2_1/representations.rb +35 -0
  305. data/generated/google/apis/content_v2_1/service.rb +53 -2
  306. data/generated/google/apis/customsearch_v1.rb +1 -1
  307. data/generated/google/apis/customsearch_v1/classes.rb +303 -492
  308. data/generated/google/apis/customsearch_v1/service.rb +186 -343
  309. data/generated/google/apis/datacatalog_v1beta1.rb +1 -1
  310. data/generated/google/apis/datacatalog_v1beta1/classes.rb +9 -1
  311. data/generated/google/apis/datacatalog_v1beta1/representations.rb +1 -0
  312. data/generated/google/apis/dataflow_v1b3.rb +1 -1
  313. data/generated/google/apis/dataflow_v1b3/classes.rb +1150 -973
  314. data/generated/google/apis/dataflow_v1b3/representations.rb +145 -0
  315. data/generated/google/apis/dataflow_v1b3/service.rb +308 -257
  316. data/generated/google/apis/datafusion_v1.rb +40 -0
  317. data/generated/google/apis/datafusion_v1/classes.rb +1040 -0
  318. data/generated/google/apis/datafusion_v1/representations.rb +395 -0
  319. data/generated/google/apis/datafusion_v1/service.rb +667 -0
  320. data/generated/google/apis/datafusion_v1beta1.rb +5 -8
  321. data/generated/google/apis/datafusion_v1beta1/classes.rb +283 -397
  322. data/generated/google/apis/datafusion_v1beta1/representations.rb +5 -0
  323. data/generated/google/apis/datafusion_v1beta1/service.rb +81 -95
  324. data/generated/google/apis/dataproc_v1.rb +1 -1
  325. data/generated/google/apis/dataproc_v1/classes.rb +250 -328
  326. data/generated/google/apis/dataproc_v1/representations.rb +18 -0
  327. data/generated/google/apis/dataproc_v1/service.rb +134 -168
  328. data/generated/google/apis/dataproc_v1beta2.rb +1 -1
  329. data/generated/google/apis/dataproc_v1beta2/classes.rb +274 -323
  330. data/generated/google/apis/dataproc_v1beta2/representations.rb +33 -0
  331. data/generated/google/apis/dataproc_v1beta2/service.rb +137 -192
  332. data/generated/google/apis/datastore_v1.rb +1 -1
  333. data/generated/google/apis/datastore_v1/classes.rb +330 -472
  334. data/generated/google/apis/datastore_v1/service.rb +52 -63
  335. data/generated/google/apis/datastore_v1beta1.rb +1 -1
  336. data/generated/google/apis/datastore_v1beta1/classes.rb +150 -217
  337. data/generated/google/apis/datastore_v1beta1/service.rb +11 -12
  338. data/generated/google/apis/datastore_v1beta3.rb +1 -1
  339. data/generated/google/apis/datastore_v1beta3/classes.rb +255 -371
  340. data/generated/google/apis/datastore_v1beta3/service.rb +1 -2
  341. data/generated/google/apis/deploymentmanager_alpha.rb +1 -1
  342. data/generated/google/apis/deploymentmanager_v2beta.rb +1 -1
  343. data/generated/google/apis/dfareporting_v3_3.rb +2 -2
  344. data/generated/google/apis/dfareporting_v3_3/classes.rb +326 -339
  345. data/generated/google/apis/dfareporting_v3_3/representations.rb +42 -0
  346. data/generated/google/apis/dfareporting_v3_3/service.rb +646 -1262
  347. data/generated/google/apis/dfareporting_v3_4.rb +2 -2
  348. data/generated/google/apis/dfareporting_v3_4/classes.rb +348 -350
  349. data/generated/google/apis/dfareporting_v3_4/representations.rb +43 -0
  350. data/generated/google/apis/dfareporting_v3_4/service.rb +681 -1261
  351. data/generated/google/apis/dialogflow_v2.rb +1 -1
  352. data/generated/google/apis/dialogflow_v2/classes.rb +1318 -1613
  353. data/generated/google/apis/dialogflow_v2/representations.rb +53 -15
  354. data/generated/google/apis/dialogflow_v2/service.rb +324 -448
  355. data/generated/google/apis/dialogflow_v2beta1.rb +1 -1
  356. data/generated/google/apis/dialogflow_v2beta1/classes.rb +1396 -1705
  357. data/generated/google/apis/dialogflow_v2beta1/representations.rb +53 -15
  358. data/generated/google/apis/dialogflow_v2beta1/service.rb +915 -996
  359. data/generated/google/apis/dialogflow_v3beta1.rb +38 -0
  360. data/generated/google/apis/dialogflow_v3beta1/classes.rb +8210 -0
  361. data/generated/google/apis/dialogflow_v3beta1/representations.rb +3461 -0
  362. data/generated/google/apis/dialogflow_v3beta1/service.rb +2812 -0
  363. data/generated/google/apis/digitalassetlinks_v1.rb +1 -1
  364. data/generated/google/apis/digitalassetlinks_v1/classes.rb +66 -92
  365. data/generated/google/apis/digitalassetlinks_v1/service.rb +131 -188
  366. data/generated/google/apis/displayvideo_v1.rb +1 -1
  367. data/generated/google/apis/displayvideo_v1/classes.rb +158 -7
  368. data/generated/google/apis/displayvideo_v1/representations.rb +41 -0
  369. data/generated/google/apis/displayvideo_v1/service.rb +147 -31
  370. data/generated/google/apis/displayvideo_v1beta.rb +38 -0
  371. data/generated/google/apis/displayvideo_v1beta/classes.rb +146 -0
  372. data/generated/google/apis/displayvideo_v1beta/representations.rb +72 -0
  373. data/generated/google/apis/displayvideo_v1beta/service.rb +161 -0
  374. data/generated/google/apis/displayvideo_v1beta2.rb +38 -0
  375. data/generated/google/apis/displayvideo_v1beta2/classes.rb +146 -0
  376. data/generated/google/apis/displayvideo_v1beta2/representations.rb +72 -0
  377. data/generated/google/apis/displayvideo_v1beta2/service.rb +130 -0
  378. data/generated/google/apis/displayvideo_v1dev.rb +38 -0
  379. data/generated/google/apis/displayvideo_v1dev/classes.rb +146 -0
  380. data/generated/google/apis/displayvideo_v1dev/representations.rb +72 -0
  381. data/generated/google/apis/displayvideo_v1dev/service.rb +130 -0
  382. data/generated/google/apis/dlp_v2.rb +1 -1
  383. data/generated/google/apis/dlp_v2/classes.rb +1076 -1301
  384. data/generated/google/apis/dlp_v2/service.rb +962 -905
  385. data/generated/google/apis/dns_v1.rb +1 -1
  386. data/generated/google/apis/dns_v1/classes.rb +175 -198
  387. data/generated/google/apis/dns_v1/service.rb +82 -97
  388. data/generated/google/apis/dns_v1beta2.rb +1 -1
  389. data/generated/google/apis/dns_v1beta2/classes.rb +180 -205
  390. data/generated/google/apis/dns_v1beta2/service.rb +82 -97
  391. data/generated/google/apis/docs_v1.rb +1 -1
  392. data/generated/google/apis/docs_v1/classes.rb +894 -1229
  393. data/generated/google/apis/docs_v1/service.rb +17 -22
  394. data/generated/google/apis/documentai_v1beta2.rb +1 -1
  395. data/generated/google/apis/documentai_v1beta2/classes.rb +2253 -824
  396. data/generated/google/apis/documentai_v1beta2/representations.rb +701 -0
  397. data/generated/google/apis/documentai_v1beta2/service.rb +22 -24
  398. data/generated/google/apis/domainsrdap_v1.rb +1 -1
  399. data/generated/google/apis/domainsrdap_v1/classes.rb +42 -69
  400. data/generated/google/apis/domainsrdap_v1/service.rb +16 -16
  401. data/generated/google/apis/doubleclickbidmanager_v1.rb +3 -2
  402. data/generated/google/apis/doubleclickbidmanager_v1/classes.rb +8 -15
  403. data/generated/google/apis/doubleclickbidmanager_v1/service.rb +37 -66
  404. data/generated/google/apis/doubleclickbidmanager_v1_1.rb +3 -2
  405. data/generated/google/apis/doubleclickbidmanager_v1_1/classes.rb +202 -15
  406. data/generated/google/apis/doubleclickbidmanager_v1_1/representations.rb +107 -0
  407. data/generated/google/apis/doubleclickbidmanager_v1_1/service.rb +37 -66
  408. data/generated/google/apis/doubleclicksearch_v2.rb +1 -1
  409. data/generated/google/apis/doubleclicksearch_v2/service.rb +2 -2
  410. data/generated/google/apis/drive_v2.rb +1 -1
  411. data/generated/google/apis/drive_v2/classes.rb +17 -7
  412. data/generated/google/apis/drive_v2/representations.rb +1 -0
  413. data/generated/google/apis/drive_v2/service.rb +79 -15
  414. data/generated/google/apis/drive_v3.rb +1 -1
  415. data/generated/google/apis/drive_v3/classes.rb +23 -7
  416. data/generated/google/apis/drive_v3/representations.rb +2 -0
  417. data/generated/google/apis/drive_v3/service.rb +59 -11
  418. data/generated/google/apis/driveactivity_v2.rb +1 -1
  419. data/generated/google/apis/driveactivity_v2/classes.rb +55 -68
  420. data/generated/google/apis/factchecktools_v1alpha1.rb +1 -1
  421. data/generated/google/apis/factchecktools_v1alpha1/classes.rb +46 -56
  422. data/generated/google/apis/factchecktools_v1alpha1/service.rb +30 -33
  423. data/generated/google/apis/file_v1.rb +1 -1
  424. data/generated/google/apis/file_v1/classes.rb +154 -173
  425. data/generated/google/apis/file_v1/service.rb +43 -52
  426. data/generated/google/apis/file_v1beta1.rb +1 -1
  427. data/generated/google/apis/file_v1beta1/classes.rb +334 -193
  428. data/generated/google/apis/file_v1beta1/representations.rb +55 -0
  429. data/generated/google/apis/file_v1beta1/service.rb +267 -55
  430. data/generated/google/apis/firebase_v1beta1.rb +1 -1
  431. data/generated/google/apis/firebase_v1beta1/classes.rb +325 -375
  432. data/generated/google/apis/firebase_v1beta1/representations.rb +3 -16
  433. data/generated/google/apis/firebase_v1beta1/service.rb +364 -421
  434. data/generated/google/apis/firebasedynamiclinks_v1.rb +1 -1
  435. data/generated/google/apis/firebasedynamiclinks_v1/classes.rb +89 -112
  436. data/generated/google/apis/firebasedynamiclinks_v1/service.rb +18 -21
  437. data/generated/google/apis/firebasehosting_v1.rb +1 -1
  438. data/generated/google/apis/firebasehosting_v1/classes.rb +36 -40
  439. data/generated/google/apis/firebasehosting_v1/service.rb +19 -22
  440. data/generated/google/apis/firebasehosting_v1beta1.rb +1 -1
  441. data/generated/google/apis/firebasehosting_v1beta1/classes.rb +334 -177
  442. data/generated/google/apis/firebasehosting_v1beta1/representations.rb +83 -0
  443. data/generated/google/apis/firebasehosting_v1beta1/service.rb +530 -149
  444. data/generated/google/apis/firebaseml_v1.rb +1 -1
  445. data/generated/google/apis/firebaseml_v1/classes.rb +39 -44
  446. data/generated/google/apis/firebaseml_v1/service.rb +19 -22
  447. data/generated/google/apis/firebaseml_v1beta2.rb +1 -1
  448. data/generated/google/apis/firebaseml_v1beta2/classes.rb +67 -77
  449. data/generated/google/apis/firebaseml_v1beta2/representations.rb +1 -1
  450. data/generated/google/apis/firebaseml_v1beta2/service.rb +16 -18
  451. data/generated/google/apis/firebaserules_v1.rb +1 -1
  452. data/generated/google/apis/firebaserules_v1/classes.rb +102 -137
  453. data/generated/google/apis/firebaserules_v1/service.rb +87 -110
  454. data/generated/google/apis/firestore_v1.rb +1 -1
  455. data/generated/google/apis/firestore_v1/classes.rb +402 -498
  456. data/generated/google/apis/firestore_v1/service.rb +165 -201
  457. data/generated/google/apis/firestore_v1beta1.rb +1 -1
  458. data/generated/google/apis/firestore_v1beta1/classes.rb +334 -409
  459. data/generated/google/apis/firestore_v1beta1/service.rb +106 -122
  460. data/generated/google/apis/firestore_v1beta2.rb +1 -1
  461. data/generated/google/apis/firestore_v1beta2/classes.rb +135 -165
  462. data/generated/google/apis/firestore_v1beta2/service.rb +65 -86
  463. data/generated/google/apis/fitness_v1.rb +3 -3
  464. data/generated/google/apis/fitness_v1/classes.rb +172 -210
  465. data/generated/google/apis/fitness_v1/service.rb +125 -146
  466. data/generated/google/apis/games_configuration_v1configuration.rb +1 -1
  467. data/generated/google/apis/games_configuration_v1configuration/classes.rb +2 -3
  468. data/generated/google/apis/games_configuration_v1configuration/service.rb +6 -6
  469. data/generated/google/apis/games_management_v1management.rb +2 -3
  470. data/generated/google/apis/games_management_v1management/classes.rb +14 -20
  471. data/generated/google/apis/games_management_v1management/service.rb +35 -36
  472. data/generated/google/apis/games_v1.rb +2 -3
  473. data/generated/google/apis/games_v1/classes.rb +76 -83
  474. data/generated/google/apis/games_v1/representations.rb +2 -0
  475. data/generated/google/apis/games_v1/service.rb +84 -90
  476. data/generated/google/apis/gameservices_v1.rb +1 -1
  477. data/generated/google/apis/gameservices_v1/classes.rb +351 -523
  478. data/generated/google/apis/gameservices_v1/representations.rb +1 -0
  479. data/generated/google/apis/gameservices_v1/service.rb +167 -207
  480. data/generated/google/apis/gameservices_v1beta.rb +1 -1
  481. data/generated/google/apis/gameservices_v1beta/classes.rb +351 -523
  482. data/generated/google/apis/gameservices_v1beta/representations.rb +1 -0
  483. data/generated/google/apis/gameservices_v1beta/service.rb +167 -207
  484. data/generated/google/apis/genomics_v1.rb +1 -1
  485. data/generated/google/apis/genomics_v1/classes.rb +70 -76
  486. data/generated/google/apis/genomics_v1/service.rb +28 -43
  487. data/generated/google/apis/genomics_v1alpha2.rb +1 -1
  488. data/generated/google/apis/genomics_v1alpha2/classes.rb +223 -290
  489. data/generated/google/apis/genomics_v1alpha2/service.rb +54 -76
  490. data/generated/google/apis/genomics_v2alpha1.rb +1 -1
  491. data/generated/google/apis/genomics_v2alpha1/classes.rb +252 -347
  492. data/generated/google/apis/genomics_v2alpha1/representations.rb +1 -27
  493. data/generated/google/apis/genomics_v2alpha1/service.rb +47 -100
  494. data/generated/google/apis/gmail_v1.rb +3 -3
  495. data/generated/google/apis/gmail_v1/classes.rb +207 -266
  496. data/generated/google/apis/gmail_v1/service.rb +260 -288
  497. data/generated/google/apis/gmailpostmastertools_v1beta1.rb +2 -2
  498. data/generated/google/apis/gmailpostmastertools_v1beta1/classes.rb +37 -42
  499. data/generated/google/apis/gmailpostmastertools_v1beta1/service.rb +35 -41
  500. data/generated/google/apis/groupsmigration_v1.rb +35 -0
  501. data/generated/google/apis/groupsmigration_v1/classes.rb +51 -0
  502. data/generated/google/apis/groupsmigration_v1/representations.rb +40 -0
  503. data/generated/google/apis/groupsmigration_v1/service.rb +100 -0
  504. data/generated/google/apis/healthcare_v1.rb +1 -1
  505. data/generated/google/apis/healthcare_v1/classes.rb +635 -826
  506. data/generated/google/apis/healthcare_v1/representations.rb +32 -0
  507. data/generated/google/apis/healthcare_v1/service.rb +842 -844
  508. data/generated/google/apis/healthcare_v1beta1.rb +1 -1
  509. data/generated/google/apis/healthcare_v1beta1/classes.rb +887 -1086
  510. data/generated/google/apis/healthcare_v1beta1/representations.rb +60 -0
  511. data/generated/google/apis/healthcare_v1beta1/service.rb +1149 -1109
  512. data/generated/google/apis/homegraph_v1.rb +4 -1
  513. data/generated/google/apis/homegraph_v1/classes.rb +76 -164
  514. data/generated/google/apis/homegraph_v1/service.rb +23 -35
  515. data/generated/google/apis/iam_v1.rb +5 -2
  516. data/generated/google/apis/iam_v1/classes.rb +395 -592
  517. data/generated/google/apis/iam_v1/representations.rb +1 -0
  518. data/generated/google/apis/iam_v1/service.rb +431 -556
  519. data/generated/google/apis/iamcredentials_v1.rb +4 -2
  520. data/generated/google/apis/iamcredentials_v1/classes.rb +75 -85
  521. data/generated/google/apis/iamcredentials_v1/service.rb +15 -13
  522. data/generated/google/apis/iap_v1.rb +1 -1
  523. data/generated/google/apis/iap_v1/classes.rb +253 -355
  524. data/generated/google/apis/iap_v1/representations.rb +1 -0
  525. data/generated/google/apis/iap_v1/service.rb +61 -71
  526. data/generated/google/apis/iap_v1beta1.rb +1 -1
  527. data/generated/google/apis/iap_v1beta1/classes.rb +164 -254
  528. data/generated/google/apis/iap_v1beta1/representations.rb +1 -0
  529. data/generated/google/apis/iap_v1beta1/service.rb +17 -19
  530. data/generated/google/apis/indexing_v3.rb +1 -1
  531. data/generated/google/apis/indexing_v3/classes.rb +11 -11
  532. data/generated/google/apis/jobs_v2.rb +1 -1
  533. data/generated/google/apis/jobs_v2/classes.rb +786 -1086
  534. data/generated/google/apis/jobs_v2/service.rb +85 -126
  535. data/generated/google/apis/jobs_v3.rb +1 -1
  536. data/generated/google/apis/jobs_v3/classes.rb +637 -856
  537. data/generated/google/apis/jobs_v3/service.rb +101 -139
  538. data/generated/google/apis/jobs_v3p1beta1.rb +1 -1
  539. data/generated/google/apis/jobs_v3p1beta1/classes.rb +762 -1023
  540. data/generated/google/apis/jobs_v3p1beta1/service.rb +103 -142
  541. data/generated/google/apis/kgsearch_v1.rb +1 -1
  542. data/generated/google/apis/kgsearch_v1/classes.rb +4 -4
  543. data/generated/google/apis/kgsearch_v1/service.rb +11 -11
  544. data/generated/google/apis/language_v1.rb +1 -1
  545. data/generated/google/apis/language_v1/classes.rb +93 -111
  546. data/generated/google/apis/language_v1/service.rb +4 -4
  547. data/generated/google/apis/language_v1beta1.rb +1 -1
  548. data/generated/google/apis/language_v1beta1/classes.rb +78 -90
  549. data/generated/google/apis/language_v1beta1/service.rb +2 -2
  550. data/generated/google/apis/language_v1beta2.rb +1 -1
  551. data/generated/google/apis/language_v1beta2/classes.rb +95 -112
  552. data/generated/google/apis/language_v1beta2/service.rb +4 -4
  553. data/generated/google/apis/libraryagent_v1.rb +1 -1
  554. data/generated/google/apis/libraryagent_v1/classes.rb +10 -16
  555. data/generated/google/apis/libraryagent_v1/service.rb +13 -16
  556. data/generated/google/apis/licensing_v1.rb +4 -3
  557. data/generated/google/apis/licensing_v1/classes.rb +1 -1
  558. data/generated/google/apis/licensing_v1/service.rb +55 -85
  559. data/generated/google/apis/lifesciences_v2beta.rb +1 -1
  560. data/generated/google/apis/lifesciences_v2beta/classes.rb +262 -290
  561. data/generated/google/apis/lifesciences_v2beta/service.rb +30 -42
  562. data/generated/google/apis/localservices_v1.rb +31 -0
  563. data/generated/google/apis/localservices_v1/classes.rb +419 -0
  564. data/generated/google/apis/localservices_v1/representations.rb +172 -0
  565. data/generated/google/apis/localservices_v1/service.rb +199 -0
  566. data/generated/google/apis/logging_v2.rb +1 -1
  567. data/generated/google/apis/logging_v2/classes.rb +174 -214
  568. data/generated/google/apis/logging_v2/representations.rb +15 -0
  569. data/generated/google/apis/logging_v2/service.rb +1017 -584
  570. data/generated/google/apis/managedidentities_v1.rb +1 -1
  571. data/generated/google/apis/managedidentities_v1/classes.rb +326 -452
  572. data/generated/google/apis/managedidentities_v1/service.rb +78 -99
  573. data/generated/google/apis/managedidentities_v1alpha1.rb +1 -1
  574. data/generated/google/apis/managedidentities_v1alpha1/classes.rb +344 -457
  575. data/generated/google/apis/managedidentities_v1alpha1/representations.rb +1 -0
  576. data/generated/google/apis/managedidentities_v1alpha1/service.rb +88 -112
  577. data/generated/google/apis/managedidentities_v1beta1.rb +1 -1
  578. data/generated/google/apis/managedidentities_v1beta1/classes.rb +340 -456
  579. data/generated/google/apis/managedidentities_v1beta1/representations.rb +1 -0
  580. data/generated/google/apis/managedidentities_v1beta1/service.rb +76 -96
  581. data/generated/google/apis/manufacturers_v1.rb +1 -1
  582. data/generated/google/apis/manufacturers_v1/classes.rb +99 -109
  583. data/generated/google/apis/manufacturers_v1/service.rb +44 -55
  584. data/generated/google/apis/memcache_v1beta2.rb +1 -1
  585. data/generated/google/apis/memcache_v1beta2/classes.rb +170 -710
  586. data/generated/google/apis/memcache_v1beta2/representations.rb +0 -120
  587. data/generated/google/apis/memcache_v1beta2/service.rb +58 -190
  588. data/generated/google/apis/ml_v1.rb +1 -1
  589. data/generated/google/apis/ml_v1/classes.rb +956 -1144
  590. data/generated/google/apis/ml_v1/representations.rb +65 -0
  591. data/generated/google/apis/ml_v1/service.rb +194 -253
  592. data/generated/google/apis/monitoring_v1.rb +1 -1
  593. data/generated/google/apis/monitoring_v1/classes.rb +119 -37
  594. data/generated/google/apis/monitoring_v1/representations.rb +35 -0
  595. data/generated/google/apis/monitoring_v1/service.rb +10 -11
  596. data/generated/google/apis/monitoring_v3.rb +1 -1
  597. data/generated/google/apis/monitoring_v3/classes.rb +260 -339
  598. data/generated/google/apis/monitoring_v3/representations.rb +2 -0
  599. data/generated/google/apis/monitoring_v3/service.rb +121 -140
  600. data/generated/google/apis/networkmanagement_v1.rb +1 -1
  601. data/generated/google/apis/networkmanagement_v1/classes.rb +273 -429
  602. data/generated/google/apis/networkmanagement_v1/service.rb +97 -120
  603. data/generated/google/apis/networkmanagement_v1beta1.rb +1 -1
  604. data/generated/google/apis/networkmanagement_v1beta1/classes.rb +456 -429
  605. data/generated/google/apis/networkmanagement_v1beta1/representations.rb +63 -0
  606. data/generated/google/apis/networkmanagement_v1beta1/service.rb +97 -120
  607. data/generated/google/apis/osconfig_v1.rb +1 -1
  608. data/generated/google/apis/osconfig_v1/classes.rb +154 -181
  609. data/generated/google/apis/osconfig_v1/service.rb +22 -27
  610. data/generated/google/apis/osconfig_v1beta.rb +1 -1
  611. data/generated/google/apis/osconfig_v1beta/classes.rb +327 -411
  612. data/generated/google/apis/osconfig_v1beta/service.rb +39 -52
  613. data/generated/google/apis/oslogin_v1.rb +1 -1
  614. data/generated/google/apis/oslogin_v1/classes.rb +14 -12
  615. data/generated/google/apis/oslogin_v1/representations.rb +1 -0
  616. data/generated/google/apis/oslogin_v1/service.rb +12 -16
  617. data/generated/google/apis/oslogin_v1alpha.rb +1 -1
  618. data/generated/google/apis/oslogin_v1alpha/classes.rb +14 -12
  619. data/generated/google/apis/oslogin_v1alpha/representations.rb +1 -0
  620. data/generated/google/apis/oslogin_v1alpha/service.rb +14 -14
  621. data/generated/google/apis/oslogin_v1beta.rb +1 -1
  622. data/generated/google/apis/oslogin_v1beta/classes.rb +14 -12
  623. data/generated/google/apis/oslogin_v1beta/representations.rb +1 -0
  624. data/generated/google/apis/oslogin_v1beta/service.rb +12 -16
  625. data/generated/google/apis/pagespeedonline_v5.rb +2 -2
  626. data/generated/google/apis/pagespeedonline_v5/classes.rb +18 -24
  627. data/generated/google/apis/pagespeedonline_v5/service.rb +3 -4
  628. data/generated/google/apis/people_v1.rb +1 -1
  629. data/generated/google/apis/people_v1/classes.rb +258 -14
  630. data/generated/google/apis/people_v1/representations.rb +98 -0
  631. data/generated/google/apis/people_v1/service.rb +59 -47
  632. data/generated/google/apis/playablelocations_v3.rb +1 -1
  633. data/generated/google/apis/playablelocations_v3/classes.rb +108 -155
  634. data/generated/google/apis/playablelocations_v3/service.rb +10 -10
  635. data/generated/google/apis/playcustomapp_v1.rb +1 -1
  636. data/generated/google/apis/playcustomapp_v1/classes.rb +2 -2
  637. data/generated/google/apis/playcustomapp_v1/service.rb +2 -2
  638. data/generated/google/apis/policytroubleshooter_v1.rb +1 -1
  639. data/generated/google/apis/policytroubleshooter_v1/classes.rb +232 -394
  640. data/generated/google/apis/policytroubleshooter_v1/service.rb +2 -2
  641. data/generated/google/apis/policytroubleshooter_v1beta.rb +1 -1
  642. data/generated/google/apis/policytroubleshooter_v1beta/classes.rb +232 -393
  643. data/generated/google/apis/policytroubleshooter_v1beta/service.rb +2 -2
  644. data/generated/google/apis/prod_tt_sasportal_v1alpha1.rb +1 -1
  645. data/generated/google/apis/prod_tt_sasportal_v1alpha1/classes.rb +148 -146
  646. data/generated/google/apis/prod_tt_sasportal_v1alpha1/representations.rb +14 -0
  647. data/generated/google/apis/prod_tt_sasportal_v1alpha1/service.rb +517 -49
  648. data/generated/google/apis/pubsub_v1.rb +1 -1
  649. data/generated/google/apis/pubsub_v1/classes.rb +399 -518
  650. data/generated/google/apis/pubsub_v1/representations.rb +2 -0
  651. data/generated/google/apis/pubsub_v1/service.rb +220 -246
  652. data/generated/google/apis/pubsub_v1beta1a.rb +1 -1
  653. data/generated/google/apis/pubsub_v1beta1a/classes.rb +71 -86
  654. data/generated/google/apis/pubsub_v1beta1a/service.rb +31 -38
  655. data/generated/google/apis/pubsub_v1beta2.rb +1 -1
  656. data/generated/google/apis/pubsub_v1beta2/classes.rb +251 -354
  657. data/generated/google/apis/pubsub_v1beta2/representations.rb +1 -0
  658. data/generated/google/apis/pubsub_v1beta2/service.rb +96 -108
  659. data/generated/google/apis/pubsublite_v1.rb +34 -0
  660. data/generated/google/apis/pubsublite_v1/classes.rb +461 -0
  661. data/generated/google/apis/pubsublite_v1/representations.rb +261 -0
  662. data/generated/google/apis/pubsublite_v1/service.rb +558 -0
  663. data/generated/google/apis/realtimebidding_v1.rb +1 -4
  664. data/generated/google/apis/realtimebidding_v1/classes.rb +198 -291
  665. data/generated/google/apis/realtimebidding_v1/service.rb +98 -135
  666. data/generated/google/apis/recommendationengine_v1beta1.rb +2 -2
  667. data/generated/google/apis/recommendationengine_v1beta1/classes.rb +337 -456
  668. data/generated/google/apis/recommendationengine_v1beta1/representations.rb +0 -16
  669. data/generated/google/apis/recommendationengine_v1beta1/service.rb +141 -207
  670. data/generated/google/apis/recommender_v1.rb +1 -1
  671. data/generated/google/apis/recommender_v1/classes.rb +76 -100
  672. data/generated/google/apis/recommender_v1/service.rb +43 -58
  673. data/generated/google/apis/recommender_v1beta1.rb +1 -1
  674. data/generated/google/apis/recommender_v1beta1/classes.rb +75 -99
  675. data/generated/google/apis/recommender_v1beta1/service.rb +43 -58
  676. data/generated/google/apis/redis_v1.rb +1 -1
  677. data/generated/google/apis/redis_v1/classes.rb +91 -513
  678. data/generated/google/apis/redis_v1/representations.rb +0 -139
  679. data/generated/google/apis/redis_v1/service.rb +93 -110
  680. data/generated/google/apis/redis_v1beta1.rb +1 -1
  681. data/generated/google/apis/redis_v1beta1/classes.rb +95 -517
  682. data/generated/google/apis/redis_v1beta1/representations.rb +0 -139
  683. data/generated/google/apis/redis_v1beta1/service.rb +93 -110
  684. data/generated/google/apis/remotebuildexecution_v1.rb +1 -1
  685. data/generated/google/apis/remotebuildexecution_v1/classes.rb +951 -1078
  686. data/generated/google/apis/remotebuildexecution_v1/representations.rb +61 -0
  687. data/generated/google/apis/remotebuildexecution_v1/service.rb +26 -33
  688. data/generated/google/apis/remotebuildexecution_v1alpha.rb +1 -1
  689. data/generated/google/apis/remotebuildexecution_v1alpha/classes.rb +946 -1071
  690. data/generated/google/apis/remotebuildexecution_v1alpha/representations.rb +61 -0
  691. data/generated/google/apis/remotebuildexecution_v1alpha/service.rb +103 -65
  692. data/generated/google/apis/remotebuildexecution_v2.rb +1 -1
  693. data/generated/google/apis/remotebuildexecution_v2/classes.rb +1099 -1250
  694. data/generated/google/apis/remotebuildexecution_v2/representations.rb +61 -0
  695. data/generated/google/apis/remotebuildexecution_v2/service.rb +147 -206
  696. data/generated/google/apis/reseller_v1.rb +2 -2
  697. data/generated/google/apis/reseller_v1/classes.rb +151 -219
  698. data/generated/google/apis/reseller_v1/service.rb +122 -173
  699. data/generated/google/apis/run_v1.rb +1 -1
  700. data/generated/google/apis/run_v1/classes.rb +14 -13
  701. data/generated/google/apis/run_v1/representations.rb +1 -1
  702. data/generated/google/apis/run_v1/service.rb +38 -2
  703. data/generated/google/apis/run_v1alpha1.rb +1 -1
  704. data/generated/google/apis/run_v1alpha1/classes.rb +1 -1
  705. data/generated/google/apis/run_v1alpha1/representations.rb +1 -1
  706. data/generated/google/apis/run_v1beta1.rb +1 -1
  707. data/generated/google/apis/run_v1beta1/classes.rb +3 -2
  708. data/generated/google/apis/runtimeconfig_v1.rb +1 -1
  709. data/generated/google/apis/runtimeconfig_v1/classes.rb +36 -40
  710. data/generated/google/apis/runtimeconfig_v1/service.rb +19 -22
  711. data/generated/google/apis/runtimeconfig_v1beta1.rb +1 -1
  712. data/generated/google/apis/runtimeconfig_v1beta1/classes.rb +302 -412
  713. data/generated/google/apis/runtimeconfig_v1beta1/representations.rb +1 -0
  714. data/generated/google/apis/runtimeconfig_v1beta1/service.rb +135 -159
  715. data/generated/google/apis/safebrowsing_v4.rb +1 -1
  716. data/generated/google/apis/safebrowsing_v4/classes.rb +55 -64
  717. data/generated/google/apis/safebrowsing_v4/service.rb +4 -4
  718. data/generated/google/apis/sasportal_v1alpha1.rb +1 -1
  719. data/generated/google/apis/sasportal_v1alpha1/classes.rb +148 -146
  720. data/generated/google/apis/sasportal_v1alpha1/representations.rb +14 -0
  721. data/generated/google/apis/sasportal_v1alpha1/service.rb +517 -49
  722. data/generated/google/apis/script_v1.rb +1 -1
  723. data/generated/google/apis/script_v1/classes.rb +88 -111
  724. data/generated/google/apis/script_v1/service.rb +63 -69
  725. data/generated/google/apis/searchconsole_v1.rb +1 -1
  726. data/generated/google/apis/searchconsole_v1/classes.rb +2 -2
  727. data/generated/google/apis/secretmanager_v1.rb +1 -1
  728. data/generated/google/apis/secretmanager_v1/classes.rb +379 -365
  729. data/generated/google/apis/secretmanager_v1/representations.rb +92 -0
  730. data/generated/google/apis/secretmanager_v1/service.rb +66 -82
  731. data/generated/google/apis/secretmanager_v1beta1.rb +1 -1
  732. data/generated/google/apis/secretmanager_v1beta1/classes.rb +218 -363
  733. data/generated/google/apis/secretmanager_v1beta1/representations.rb +1 -0
  734. data/generated/google/apis/secretmanager_v1beta1/service.rb +66 -82
  735. data/generated/google/apis/securitycenter_v1.rb +1 -1
  736. data/generated/google/apis/securitycenter_v1/classes.rb +584 -826
  737. data/generated/google/apis/securitycenter_v1/representations.rb +1 -0
  738. data/generated/google/apis/securitycenter_v1/service.rb +250 -332
  739. data/generated/google/apis/securitycenter_v1beta1.rb +1 -1
  740. data/generated/google/apis/securitycenter_v1beta1/classes.rb +533 -746
  741. data/generated/google/apis/securitycenter_v1beta1/representations.rb +1 -0
  742. data/generated/google/apis/securitycenter_v1beta1/service.rb +160 -202
  743. data/generated/google/apis/{securitycenter_v1p1beta1.rb → securitycenter_v1beta2.rb} +6 -6
  744. data/generated/google/apis/{securitycenter_v1p1alpha1 → securitycenter_v1beta2}/classes.rb +415 -269
  745. data/generated/google/apis/{securitycenter_v1p1alpha1 → securitycenter_v1beta2}/representations.rb +101 -30
  746. data/generated/google/apis/securitycenter_v1beta2/service.rb +1494 -0
  747. data/generated/google/apis/serviceconsumermanagement_v1.rb +1 -1
  748. data/generated/google/apis/serviceconsumermanagement_v1/classes.rb +1124 -1788
  749. data/generated/google/apis/serviceconsumermanagement_v1/service.rb +116 -141
  750. data/generated/google/apis/serviceconsumermanagement_v1beta1.rb +1 -1
  751. data/generated/google/apis/serviceconsumermanagement_v1beta1/classes.rb +1107 -1774
  752. data/generated/google/apis/serviceconsumermanagement_v1beta1/service.rb +52 -66
  753. data/generated/google/apis/servicecontrol_v1.rb +1 -1
  754. data/generated/google/apis/servicecontrol_v1/classes.rb +526 -641
  755. data/generated/google/apis/servicecontrol_v1/service.rb +36 -46
  756. data/generated/google/apis/servicecontrol_v2.rb +38 -0
  757. data/generated/google/apis/servicecontrol_v2/classes.rb +1078 -0
  758. data/generated/google/apis/servicecontrol_v2/representations.rb +405 -0
  759. data/generated/google/apis/servicecontrol_v2/service.rb +155 -0
  760. data/generated/google/apis/servicedirectory_v1beta1.rb +1 -1
  761. data/generated/google/apis/servicedirectory_v1beta1/classes.rb +221 -333
  762. data/generated/google/apis/servicedirectory_v1beta1/representations.rb +1 -0
  763. data/generated/google/apis/servicedirectory_v1beta1/service.rb +94 -129
  764. data/generated/google/apis/servicemanagement_v1.rb +1 -1
  765. data/generated/google/apis/servicemanagement_v1/classes.rb +1288 -2090
  766. data/generated/google/apis/servicemanagement_v1/representations.rb +14 -0
  767. data/generated/google/apis/servicemanagement_v1/service.rb +144 -195
  768. data/generated/google/apis/servicenetworking_v1.rb +1 -1
  769. data/generated/google/apis/servicenetworking_v1/classes.rb +1160 -1763
  770. data/generated/google/apis/servicenetworking_v1/representations.rb +52 -1
  771. data/generated/google/apis/servicenetworking_v1/service.rb +210 -114
  772. data/generated/google/apis/servicenetworking_v1beta.rb +1 -1
  773. data/generated/google/apis/servicenetworking_v1beta/classes.rb +1091 -1684
  774. data/generated/google/apis/servicenetworking_v1beta/representations.rb +38 -0
  775. data/generated/google/apis/servicenetworking_v1beta/service.rb +52 -63
  776. data/generated/google/apis/serviceusage_v1.rb +1 -1
  777. data/generated/google/apis/serviceusage_v1/classes.rb +1144 -1823
  778. data/generated/google/apis/serviceusage_v1/representations.rb +4 -0
  779. data/generated/google/apis/serviceusage_v1/service.rb +67 -80
  780. data/generated/google/apis/serviceusage_v1beta1.rb +1 -1
  781. data/generated/google/apis/serviceusage_v1beta1/classes.rb +1273 -1986
  782. data/generated/google/apis/serviceusage_v1beta1/representations.rb +8 -0
  783. data/generated/google/apis/serviceusage_v1beta1/service.rb +130 -162
  784. data/generated/google/apis/sheets_v4.rb +1 -1
  785. data/generated/google/apis/sheets_v4/classes.rb +3933 -5008
  786. data/generated/google/apis/sheets_v4/representations.rb +625 -0
  787. data/generated/google/apis/sheets_v4/service.rb +113 -149
  788. data/generated/google/apis/site_verification_v1.rb +1 -1
  789. data/generated/google/apis/slides_v1.rb +1 -1
  790. data/generated/google/apis/slides_v1/classes.rb +841 -1114
  791. data/generated/google/apis/slides_v1/service.rb +23 -30
  792. data/generated/google/apis/sourcerepo_v1.rb +1 -1
  793. data/generated/google/apis/sourcerepo_v1/classes.rb +6 -6
  794. data/generated/google/apis/spanner_v1.rb +1 -1
  795. data/generated/google/apis/spanner_v1/classes.rb +1553 -2156
  796. data/generated/google/apis/spanner_v1/representations.rb +1 -0
  797. data/generated/google/apis/spanner_v1/service.rb +443 -618
  798. data/generated/google/apis/speech_v1.rb +1 -1
  799. data/generated/google/apis/speech_v1/classes.rb +174 -220
  800. data/generated/google/apis/speech_v1/service.rb +27 -32
  801. data/generated/google/apis/speech_v1p1beta1.rb +1 -1
  802. data/generated/google/apis/speech_v1p1beta1/classes.rb +253 -306
  803. data/generated/google/apis/speech_v1p1beta1/service.rb +27 -32
  804. data/generated/google/apis/speech_v2beta1.rb +1 -1
  805. data/generated/google/apis/speech_v2beta1/classes.rb +66 -76
  806. data/generated/google/apis/speech_v2beta1/service.rb +10 -12
  807. data/generated/google/apis/{sql_v1beta4.rb → sqladmin_v1beta4.rb} +5 -5
  808. data/generated/google/apis/{sql_v1beta4 → sqladmin_v1beta4}/classes.rb +438 -452
  809. data/generated/google/apis/{sql_v1beta4 → sqladmin_v1beta4}/representations.rb +106 -87
  810. data/generated/google/apis/{sql_v1beta4 → sqladmin_v1beta4}/service.rb +295 -300
  811. data/generated/google/apis/storage_v1.rb +1 -1
  812. data/generated/google/apis/storage_v1/classes.rb +8 -7
  813. data/generated/google/apis/storage_v1/representations.rb +2 -2
  814. data/generated/google/apis/storagetransfer_v1.rb +1 -1
  815. data/generated/google/apis/storagetransfer_v1/classes.rb +263 -343
  816. data/generated/google/apis/storagetransfer_v1/service.rb +43 -40
  817. data/generated/google/apis/streetviewpublish_v1.rb +1 -1
  818. data/generated/google/apis/streetviewpublish_v1/classes.rb +106 -148
  819. data/generated/google/apis/streetviewpublish_v1/service.rb +94 -177
  820. data/generated/google/apis/sts_v1.rb +32 -0
  821. data/generated/google/apis/sts_v1/classes.rb +120 -0
  822. data/generated/google/apis/sts_v1/representations.rb +59 -0
  823. data/generated/google/apis/sts_v1/service.rb +90 -0
  824. data/generated/google/apis/sts_v1beta.rb +32 -0
  825. data/generated/google/apis/sts_v1beta/classes.rb +191 -0
  826. data/generated/google/apis/{oauth2_v2 → sts_v1beta}/representations.rb +14 -21
  827. data/generated/google/apis/sts_v1beta/service.rb +92 -0
  828. data/generated/google/apis/tagmanager_v1.rb +2 -3
  829. data/generated/google/apis/tagmanager_v1/classes.rb +225 -288
  830. data/generated/google/apis/tagmanager_v1/service.rb +22 -23
  831. data/generated/google/apis/tagmanager_v2.rb +2 -3
  832. data/generated/google/apis/tagmanager_v2/classes.rb +240 -280
  833. data/generated/google/apis/tagmanager_v2/representations.rb +1 -0
  834. data/generated/google/apis/tagmanager_v2/service.rb +189 -285
  835. data/generated/google/apis/tasks_v1.rb +1 -1
  836. data/generated/google/apis/tasks_v1/classes.rb +21 -22
  837. data/generated/google/apis/tasks_v1/service.rb +19 -19
  838. data/generated/google/apis/testing_v1.rb +1 -1
  839. data/generated/google/apis/testing_v1/classes.rb +317 -382
  840. data/generated/google/apis/testing_v1/representations.rb +2 -0
  841. data/generated/google/apis/testing_v1/service.rb +22 -28
  842. data/generated/google/apis/texttospeech_v1.rb +1 -1
  843. data/generated/google/apis/texttospeech_v1/classes.rb +51 -57
  844. data/generated/google/apis/texttospeech_v1/service.rb +9 -10
  845. data/generated/google/apis/texttospeech_v1beta1.rb +1 -1
  846. data/generated/google/apis/texttospeech_v1beta1/classes.rb +96 -57
  847. data/generated/google/apis/texttospeech_v1beta1/representations.rb +19 -0
  848. data/generated/google/apis/texttospeech_v1beta1/service.rb +9 -10
  849. data/generated/google/apis/toolresults_v1beta3.rb +1 -1
  850. data/generated/google/apis/toolresults_v1beta3/classes.rb +672 -931
  851. data/generated/google/apis/toolresults_v1beta3/representations.rb +1 -0
  852. data/generated/google/apis/toolresults_v1beta3/service.rb +231 -349
  853. data/generated/google/apis/tpu_v1.rb +1 -1
  854. data/generated/google/apis/tpu_v1/classes.rb +122 -78
  855. data/generated/google/apis/tpu_v1/representations.rb +19 -0
  856. data/generated/google/apis/tpu_v1/service.rb +21 -25
  857. data/generated/google/apis/tpu_v1alpha1.rb +1 -1
  858. data/generated/google/apis/tpu_v1alpha1/classes.rb +122 -78
  859. data/generated/google/apis/tpu_v1alpha1/representations.rb +19 -0
  860. data/generated/google/apis/tpu_v1alpha1/service.rb +21 -25
  861. data/generated/google/apis/trafficdirector_v2.rb +34 -0
  862. data/generated/google/apis/trafficdirector_v2/classes.rb +1347 -0
  863. data/generated/google/apis/trafficdirector_v2/representations.rb +620 -0
  864. data/generated/google/apis/trafficdirector_v2/service.rb +89 -0
  865. data/generated/google/apis/translate_v3.rb +1 -1
  866. data/generated/google/apis/translate_v3/classes.rb +148 -175
  867. data/generated/google/apis/translate_v3/service.rb +122 -138
  868. data/generated/google/apis/translate_v3beta1.rb +1 -1
  869. data/generated/google/apis/translate_v3beta1/classes.rb +149 -170
  870. data/generated/google/apis/translate_v3beta1/service.rb +122 -138
  871. data/generated/google/apis/vault_v1.rb +1 -1
  872. data/generated/google/apis/vault_v1/classes.rb +80 -103
  873. data/generated/google/apis/vault_v1/service.rb +31 -37
  874. data/generated/google/apis/vectortile_v1.rb +1 -1
  875. data/generated/google/apis/vectortile_v1/classes.rb +185 -267
  876. data/generated/google/apis/vectortile_v1/service.rb +75 -88
  877. data/generated/google/apis/verifiedaccess_v1.rb +1 -1
  878. data/generated/google/apis/verifiedaccess_v1/classes.rb +20 -27
  879. data/generated/google/apis/videointelligence_v1.rb +1 -1
  880. data/generated/google/apis/videointelligence_v1/classes.rb +753 -918
  881. data/generated/google/apis/videointelligence_v1/service.rb +71 -48
  882. data/generated/google/apis/videointelligence_v1beta2.rb +1 -1
  883. data/generated/google/apis/videointelligence_v1beta2/classes.rb +748 -911
  884. data/generated/google/apis/videointelligence_v1beta2/service.rb +4 -4
  885. data/generated/google/apis/videointelligence_v1p1beta1.rb +1 -1
  886. data/generated/google/apis/videointelligence_v1p1beta1/classes.rb +748 -911
  887. data/generated/google/apis/videointelligence_v1p1beta1/service.rb +4 -4
  888. data/generated/google/apis/videointelligence_v1p2beta1.rb +1 -1
  889. data/generated/google/apis/videointelligence_v1p2beta1/classes.rb +748 -911
  890. data/generated/google/apis/videointelligence_v1p2beta1/service.rb +4 -4
  891. data/generated/google/apis/videointelligence_v1p3beta1.rb +1 -1
  892. data/generated/google/apis/videointelligence_v1p3beta1/classes.rb +754 -920
  893. data/generated/google/apis/videointelligence_v1p3beta1/service.rb +4 -4
  894. data/generated/google/apis/vision_v1.rb +1 -1
  895. data/generated/google/apis/vision_v1/classes.rb +1304 -1870
  896. data/generated/google/apis/vision_v1/service.rb +254 -340
  897. data/generated/google/apis/vision_v1p1beta1.rb +1 -1
  898. data/generated/google/apis/vision_v1p1beta1/classes.rb +1246 -1790
  899. data/generated/google/apis/vision_v1p1beta1/service.rb +91 -121
  900. data/generated/google/apis/vision_v1p2beta1.rb +1 -1
  901. data/generated/google/apis/vision_v1p2beta1/classes.rb +1246 -1790
  902. data/generated/google/apis/vision_v1p2beta1/service.rb +91 -121
  903. data/generated/google/apis/webfonts_v1.rb +2 -3
  904. data/generated/google/apis/webfonts_v1/classes.rb +1 -2
  905. data/generated/google/apis/webfonts_v1/service.rb +2 -4
  906. data/generated/google/apis/websecurityscanner_v1.rb +1 -1
  907. data/generated/google/apis/websecurityscanner_v1/classes.rb +71 -95
  908. data/generated/google/apis/websecurityscanner_v1/service.rb +46 -65
  909. data/generated/google/apis/websecurityscanner_v1alpha.rb +1 -1
  910. data/generated/google/apis/websecurityscanner_v1alpha/classes.rb +55 -63
  911. data/generated/google/apis/websecurityscanner_v1alpha/service.rb +46 -65
  912. data/generated/google/apis/websecurityscanner_v1beta.rb +1 -1
  913. data/generated/google/apis/websecurityscanner_v1beta/classes.rb +77 -92
  914. data/generated/google/apis/websecurityscanner_v1beta/service.rb +46 -65
  915. data/generated/google/apis/{securitycenter_v1p1alpha1.rb → workflowexecutions_v1beta.rb} +9 -10
  916. data/generated/google/apis/workflowexecutions_v1beta/classes.rb +155 -0
  917. data/generated/google/apis/workflowexecutions_v1beta/representations.rb +88 -0
  918. data/generated/google/apis/{securitycenter_v1p1alpha1 → workflowexecutions_v1beta}/service.rb +73 -73
  919. data/generated/google/apis/workflows_v1beta.rb +34 -0
  920. data/generated/google/apis/workflows_v1beta/classes.rb +406 -0
  921. data/generated/google/apis/workflows_v1beta/representations.rb +173 -0
  922. data/generated/google/apis/workflows_v1beta/service.rb +437 -0
  923. data/generated/google/apis/youtube_analytics_v2.rb +1 -1
  924. data/generated/google/apis/youtube_analytics_v2/classes.rb +77 -104
  925. data/generated/google/apis/youtube_analytics_v2/service.rb +106 -126
  926. data/generated/google/apis/youtube_partner_v1.rb +1 -1
  927. data/generated/google/apis/youtube_partner_v1/service.rb +19 -19
  928. data/generated/google/apis/youtube_v3.rb +1 -1
  929. data/generated/google/apis/youtube_v3/classes.rb +904 -1594
  930. data/generated/google/apis/youtube_v3/representations.rb +0 -214
  931. data/generated/google/apis/youtube_v3/service.rb +971 -1397
  932. data/generated/google/apis/youtubereporting_v1.rb +1 -1
  933. data/generated/google/apis/youtubereporting_v1/classes.rb +20 -29
  934. data/generated/google/apis/youtubereporting_v1/service.rb +40 -43
  935. data/google-api-client.gemspec +2 -1
  936. data/lib/google/apis/core/base_service.rb +7 -1
  937. data/lib/google/apis/version.rb +1 -1
  938. metadata +98 -36
  939. data/generated/google/apis/accessapproval_v1beta1/classes.rb +0 -417
  940. data/generated/google/apis/accessapproval_v1beta1/representations.rb +0 -203
  941. data/generated/google/apis/accessapproval_v1beta1/service.rb +0 -857
  942. data/generated/google/apis/dns_v2beta1.rb +0 -43
  943. data/generated/google/apis/dns_v2beta1/classes.rb +0 -1447
  944. data/generated/google/apis/dns_v2beta1/representations.rb +0 -588
  945. data/generated/google/apis/dns_v2beta1/service.rb +0 -928
  946. data/generated/google/apis/oauth2_v2.rb +0 -40
  947. data/generated/google/apis/oauth2_v2/classes.rb +0 -165
  948. data/generated/google/apis/oauth2_v2/service.rb +0 -158
  949. data/generated/google/apis/plus_v1.rb +0 -43
  950. data/generated/google/apis/plus_v1/classes.rb +0 -2094
  951. data/generated/google/apis/plus_v1/representations.rb +0 -907
  952. data/generated/google/apis/plus_v1/service.rb +0 -451
  953. data/generated/google/apis/securitycenter_v1p1beta1/classes.rb +0 -2305
  954. data/generated/google/apis/securitycenter_v1p1beta1/representations.rb +0 -789
  955. data/generated/google/apis/securitycenter_v1p1beta1/service.rb +0 -1326
  956. data/generated/google/apis/storage_v1beta2.rb +0 -40
  957. data/generated/google/apis/storage_v1beta2/classes.rb +0 -1047
  958. data/generated/google/apis/storage_v1beta2/representations.rb +0 -425
  959. data/generated/google/apis/storage_v1beta2/service.rb +0 -1667
@@ -49,10 +49,10 @@ module Google
49
49
  @batch_path = 'batch'
50
50
  end
51
51
 
52
- # Performs asynchronous video annotation. Progress and results can be
53
- # retrieved through the `google.longrunning.Operations` interface.
54
- # `Operation.metadata` contains `AnnotateVideoProgress` (progress).
55
- # `Operation.response` contains `AnnotateVideoResponse` (results).
52
+ # Performs asynchronous video annotation. Progress and results can be retrieved
53
+ # through the `google.longrunning.Operations` interface. `Operation.metadata`
54
+ # contains `AnnotateVideoProgress` (progress). `Operation.response` contains `
55
+ # AnnotateVideoResponse` (results).
56
56
  # @param [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1AnnotateVideoRequest] google_cloud_videointelligence_v1p2beta1_annotate_video_request_object
57
57
  # @param [String] fields
58
58
  # Selector specifying which fields to include in a partial response.
@@ -27,7 +27,7 @@ module Google
27
27
  # @see https://cloud.google.com/video-intelligence/docs/
28
28
  module VideointelligenceV1p3beta1
29
29
  VERSION = 'V1p3beta1'
30
- REVISION = '20200602'
30
+ REVISION = '20200824'
31
31
 
32
32
  # View and manage your data across Google Cloud Platform services
33
33
  AUTH_CLOUD_PLATFORM = 'https://www.googleapis.com/auth/cloud-platform'
@@ -22,9 +22,9 @@ module Google
22
22
  module Apis
23
23
  module VideointelligenceV1p3beta1
24
24
 
25
- # Video annotation progress. Included in the `metadata`
26
- # field of the `Operation` returned by the `GetOperation`
27
- # call of the `google::longrunning::Operations` service.
25
+ # Video annotation progress. Included in the `metadata` field of the `Operation`
26
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
27
+ # service.
28
28
  class GoogleCloudVideointelligenceV1AnnotateVideoProgress
29
29
  include Google::Apis::Core::Hashable
30
30
 
@@ -43,9 +43,9 @@ module Google
43
43
  end
44
44
  end
45
45
 
46
- # Video annotation response. Included in the `response`
47
- # field of the `Operation` returned by the `GetOperation`
48
- # call of the `google::longrunning::Operations` service.
46
+ # Video annotation response. Included in the `response` field of the `Operation`
47
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
48
+ # service.
49
49
  class GoogleCloudVideointelligenceV1AnnotateVideoResponse
50
50
  include Google::Apis::Core::Hashable
51
51
 
@@ -73,14 +73,14 @@ module Google
73
73
  # @return [Float]
74
74
  attr_accessor :confidence
75
75
 
76
- # The name of the attribute, for example, glasses, dark_glasses, mouth_open.
77
- # A full list of supported type names will be provided in the document.
76
+ # The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
77
+ # full list of supported type names will be provided in the document.
78
78
  # Corresponds to the JSON property `name`
79
79
  # @return [String]
80
80
  attr_accessor :name
81
81
 
82
- # Text value of the detection result. For example, the value for "HairColor"
83
- # can be "black", "blonde", etc.
82
+ # Text value of the detection result. For example, the value for "HairColor" can
83
+ # be "black", "blonde", etc.
84
84
  # Corresponds to the JSON property `value`
85
85
  # @return [String]
86
86
  attr_accessor :value
@@ -112,9 +112,8 @@ module Google
112
112
  # @return [String]
113
113
  attr_accessor :name
114
114
 
115
- # A vertex represents a 2D point in the image.
116
- # NOTE: the normalized vertex coordinates are relative to the original image
117
- # and range from 0 to 1.
115
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
116
+ # coordinates are relative to the original image and range from 0 to 1.
118
117
  # Corresponds to the JSON property `point`
119
118
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1NormalizedVertex]
120
119
  attr_accessor :point
@@ -140,8 +139,7 @@ module Google
140
139
  # @return [String]
141
140
  attr_accessor :description
142
141
 
143
- # Opaque entity ID. Some IDs may be available in
144
- # [Google Knowledge Graph Search
142
+ # Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
145
143
  # API](https://developers.google.com/knowledge-graph/).
146
144
  # Corresponds to the JSON property `entityId`
147
145
  # @return [String]
@@ -164,9 +162,9 @@ module Google
164
162
  end
165
163
  end
166
164
 
167
- # Explicit content annotation (based on per-frame visual signals only).
168
- # If no explicit content has been detected in a frame, no annotations are
169
- # present for that frame.
165
+ # Explicit content annotation (based on per-frame visual signals only). If no
166
+ # explicit content has been detected in a frame, no annotations are present for
167
+ # that frame.
170
168
  class GoogleCloudVideointelligenceV1ExplicitContentAnnotation
171
169
  include Google::Apis::Core::Hashable
172
170
 
@@ -221,10 +219,9 @@ module Google
221
219
  class GoogleCloudVideointelligenceV1LabelAnnotation
222
220
  include Google::Apis::Core::Hashable
223
221
 
224
- # Common categories for the detected entity.
225
- # For example, when the label is `Terrier`, the category is likely `dog`. And
226
- # in some cases there might be more than one categories e.g., `Terrier` could
227
- # also be a `pet`.
222
+ # Common categories for the detected entity. For example, when the label is `
223
+ # Terrier`, the category is likely `dog`. And in some cases there might be more
224
+ # than one categories e.g., `Terrier` could also be a `pet`.
228
225
  # Corresponds to the JSON property `categoryEntities`
229
226
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1Entity>]
230
227
  attr_accessor :category_entities
@@ -323,14 +320,14 @@ module Google
323
320
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1Entity]
324
321
  attr_accessor :entity
325
322
 
326
- # All video segments where the recognized logo appears. There might be
327
- # multiple instances of the same logo class appearing in one VideoSegment.
323
+ # All video segments where the recognized logo appears. There might be multiple
324
+ # instances of the same logo class appearing in one VideoSegment.
328
325
  # Corresponds to the JSON property `segments`
329
326
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1VideoSegment>]
330
327
  attr_accessor :segments
331
328
 
332
- # All logo tracks where the recognized logo appears. Each track corresponds
333
- # to one logo instance appearing in consecutive frames.
329
+ # All logo tracks where the recognized logo appears. Each track corresponds to
330
+ # one logo instance appearing in consecutive frames.
334
331
  # Corresponds to the JSON property `tracks`
335
332
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1Track>]
336
333
  attr_accessor :tracks
@@ -347,9 +344,8 @@ module Google
347
344
  end
348
345
  end
349
346
 
350
- # Normalized bounding box.
351
- # The normalized vertex coordinates are relative to the original image.
352
- # Range: [0, 1].
347
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
348
+ # original image. Range: [0, 1].
353
349
  class GoogleCloudVideointelligenceV1NormalizedBoundingBox
354
350
  include Google::Apis::Core::Hashable
355
351
 
@@ -387,20 +383,12 @@ module Google
387
383
  end
388
384
 
389
385
  # Normalized bounding polygon for text (that might not be aligned with axis).
390
- # Contains list of the corner points in clockwise order starting from
391
- # top-left corner. For example, for a rectangular bounding box:
392
- # When the text is horizontal it might look like:
393
- # 0----1
394
- # | |
395
- # 3----2
396
- # When it's clockwise rotated 180 degrees around the top-left corner it
397
- # becomes:
398
- # 2----3
399
- # | |
400
- # 1----0
401
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
402
- # than 0, or greater than 1 due to trignometric calculations for location of
403
- # the box.
386
+ # Contains list of the corner points in clockwise order starting from top-left
387
+ # corner. For example, for a rectangular bounding box: When the text is
388
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
389
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
390
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
391
+ # or greater than 1 due to trignometric calculations for location of the box.
404
392
  class GoogleCloudVideointelligenceV1NormalizedBoundingPoly
405
393
  include Google::Apis::Core::Hashable
406
394
 
@@ -419,9 +407,8 @@ module Google
419
407
  end
420
408
  end
421
409
 
422
- # A vertex represents a 2D point in the image.
423
- # NOTE: the normalized vertex coordinates are relative to the original image
424
- # and range from 0 to 1.
410
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
411
+ # coordinates are relative to the original image and range from 0 to 1.
425
412
  class GoogleCloudVideointelligenceV1NormalizedVertex
426
413
  include Google::Apis::Core::Hashable
427
414
 
@@ -460,10 +447,10 @@ module Google
460
447
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1Entity]
461
448
  attr_accessor :entity
462
449
 
463
- # Information corresponding to all frames where this object track appears.
464
- # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
465
- # messages in frames.
466
- # Streaming mode: it can only be one ObjectTrackingFrame message in frames.
450
+ # Information corresponding to all frames where this object track appears. Non-
451
+ # streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
452
+ # in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
453
+ # frames.
467
454
  # Corresponds to the JSON property `frames`
468
455
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1ObjectTrackingFrame>]
469
456
  attr_accessor :frames
@@ -473,12 +460,11 @@ module Google
473
460
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1VideoSegment]
474
461
  attr_accessor :segment
475
462
 
476
- # Streaming mode ONLY.
477
- # In streaming mode, we do not know the end time of a tracked object
478
- # before it is completed. Hence, there is no VideoSegment info returned.
479
- # Instead, we provide a unique identifiable integer track_id so that
480
- # the customers can correlate the results of the ongoing
481
- # ObjectTrackAnnotation of the same track_id over time.
463
+ # Streaming mode ONLY. In streaming mode, we do not know the end time of a
464
+ # tracked object before it is completed. Hence, there is no VideoSegment info
465
+ # returned. Instead, we provide a unique identifiable integer track_id so that
466
+ # the customers can correlate the results of the ongoing ObjectTrackAnnotation
467
+ # of the same track_id over time.
482
468
  # Corresponds to the JSON property `trackId`
483
469
  # @return [Fixnum]
484
470
  attr_accessor :track_id
@@ -508,9 +494,8 @@ module Google
508
494
  class GoogleCloudVideointelligenceV1ObjectTrackingFrame
509
495
  include Google::Apis::Core::Hashable
510
496
 
511
- # Normalized bounding box.
512
- # The normalized vertex coordinates are relative to the original image.
513
- # Range: [0, 1].
497
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
498
+ # original image. Range: [0, 1].
514
499
  # Corresponds to the JSON property `normalizedBoundingBox`
515
500
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1NormalizedBoundingBox]
516
501
  attr_accessor :normalized_bounding_box
@@ -537,10 +522,10 @@ module Google
537
522
 
538
523
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
539
524
  # indicates an estimated greater likelihood that the recognized words are
540
- # correct. This field is set only for the top alternative.
541
- # This field is not guaranteed to be accurate and users should not rely on it
542
- # to be always provided.
543
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
525
+ # correct. This field is set only for the top alternative. This field is not
526
+ # guaranteed to be accurate and users should not rely on it to be always
527
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
528
+ # not set.
544
529
  # Corresponds to the JSON property `confidence`
545
530
  # @return [Float]
546
531
  attr_accessor :confidence
@@ -551,8 +536,8 @@ module Google
551
536
  attr_accessor :transcript
552
537
 
553
538
  # Output only. A list of word-specific information for each recognized word.
554
- # Note: When `enable_speaker_diarization` is set to true, you will see all
555
- # the words from the beginning of the audio.
539
+ # Note: When `enable_speaker_diarization` is set to true, you will see all the
540
+ # words from the beginning of the audio.
556
541
  # Corresponds to the JSON property `words`
557
542
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1WordInfo>]
558
543
  attr_accessor :words
@@ -573,18 +558,17 @@ module Google
573
558
  class GoogleCloudVideointelligenceV1SpeechTranscription
574
559
  include Google::Apis::Core::Hashable
575
560
 
576
- # May contain one or more recognition hypotheses (up to the maximum specified
577
- # in `max_alternatives`). These alternatives are ordered in terms of
578
- # accuracy, with the top (first) alternative being the most probable, as
579
- # ranked by the recognizer.
561
+ # May contain one or more recognition hypotheses (up to the maximum specified in
562
+ # `max_alternatives`). These alternatives are ordered in terms of accuracy, with
563
+ # the top (first) alternative being the most probable, as ranked by the
564
+ # recognizer.
580
565
  # Corresponds to the JSON property `alternatives`
581
566
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1SpeechRecognitionAlternative>]
582
567
  attr_accessor :alternatives
583
568
 
584
569
  # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
585
- # language tag of
586
- # the language in this result. This language code was detected to have the
587
- # most likelihood of being spoken in the audio.
570
+ # language tag of the language in this result. This language code was detected
571
+ # to have the most likelihood of being spoken in the audio.
588
572
  # Corresponds to the JSON property `languageCode`
589
573
  # @return [String]
590
574
  attr_accessor :language_code
@@ -633,27 +617,19 @@ module Google
633
617
  end
634
618
  end
635
619
 
636
- # Video frame level annotation results for text annotation (OCR).
637
- # Contains information regarding timestamp and bounding box locations for the
638
- # frames containing detected OCR text snippets.
620
+ # Video frame level annotation results for text annotation (OCR). Contains
621
+ # information regarding timestamp and bounding box locations for the frames
622
+ # containing detected OCR text snippets.
639
623
  class GoogleCloudVideointelligenceV1TextFrame
640
624
  include Google::Apis::Core::Hashable
641
625
 
642
626
  # Normalized bounding polygon for text (that might not be aligned with axis).
643
- # Contains list of the corner points in clockwise order starting from
644
- # top-left corner. For example, for a rectangular bounding box:
645
- # When the text is horizontal it might look like:
646
- # 0----1
647
- # | |
648
- # 3----2
649
- # When it's clockwise rotated 180 degrees around the top-left corner it
650
- # becomes:
651
- # 2----3
652
- # | |
653
- # 1----0
654
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
655
- # than 0, or greater than 1 due to trignometric calculations for location of
656
- # the box.
627
+ # Contains list of the corner points in clockwise order starting from top-left
628
+ # corner. For example, for a rectangular bounding box: When the text is
629
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
630
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
631
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
632
+ # or greater than 1 due to trignometric calculations for location of the box.
657
633
  # Corresponds to the JSON property `rotatedBoundingBox`
658
634
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1NormalizedBoundingPoly]
659
635
  attr_accessor :rotated_bounding_box
@@ -706,9 +682,8 @@ module Google
706
682
  end
707
683
  end
708
684
 
709
- # For tracking related features.
710
- # An object at time_offset with attributes, and located with
711
- # normalized_bounding_box.
685
+ # For tracking related features. An object at time_offset with attributes, and
686
+ # located with normalized_bounding_box.
712
687
  class GoogleCloudVideointelligenceV1TimestampedObject
713
688
  include Google::Apis::Core::Hashable
714
689
 
@@ -722,15 +697,14 @@ module Google
722
697
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1DetectedLandmark>]
723
698
  attr_accessor :landmarks
724
699
 
725
- # Normalized bounding box.
726
- # The normalized vertex coordinates are relative to the original image.
727
- # Range: [0, 1].
700
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
701
+ # original image. Range: [0, 1].
728
702
  # Corresponds to the JSON property `normalizedBoundingBox`
729
703
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1NormalizedBoundingBox]
730
704
  attr_accessor :normalized_bounding_box
731
705
 
732
- # Time-offset, relative to the beginning of the video,
733
- # corresponding to the video frame for this object.
706
+ # Time-offset, relative to the beginning of the video, corresponding to the
707
+ # video frame for this object.
734
708
  # Corresponds to the JSON property `timeOffset`
735
709
  # @return [String]
736
710
  attr_accessor :time_offset
@@ -789,20 +763,19 @@ module Google
789
763
  class GoogleCloudVideointelligenceV1VideoAnnotationProgress
790
764
  include Google::Apis::Core::Hashable
791
765
 
792
- # Specifies which feature is being tracked if the request contains more than
793
- # one feature.
766
+ # Specifies which feature is being tracked if the request contains more than one
767
+ # feature.
794
768
  # Corresponds to the JSON property `feature`
795
769
  # @return [String]
796
770
  attr_accessor :feature
797
771
 
798
- # Video file location in
799
- # [Cloud Storage](https://cloud.google.com/storage/).
772
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
800
773
  # Corresponds to the JSON property `inputUri`
801
774
  # @return [String]
802
775
  attr_accessor :input_uri
803
776
 
804
- # Approximate percentage processed thus far. Guaranteed to be
805
- # 100 when fully processed.
777
+ # Approximate percentage processed thus far. Guaranteed to be 100 when fully
778
+ # processed.
806
779
  # Corresponds to the JSON property `progressPercent`
807
780
  # @return [Fixnum]
808
781
  attr_accessor :progress_percent
@@ -841,31 +814,30 @@ module Google
841
814
  class GoogleCloudVideointelligenceV1VideoAnnotationResults
842
815
  include Google::Apis::Core::Hashable
843
816
 
844
- # The `Status` type defines a logical error model that is suitable for
845
- # different programming environments, including REST APIs and RPC APIs. It is
846
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
847
- # three pieces of data: error code, error message, and error details.
848
- # You can find out more about this error model and how to work with it in the
849
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
817
+ # The `Status` type defines a logical error model that is suitable for different
818
+ # programming environments, including REST APIs and RPC APIs. It is used by [
819
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
820
+ # data: error code, error message, and error details. You can find out more
821
+ # about this error model and how to work with it in the [API Design Guide](https:
822
+ # //cloud.google.com/apis/design/errors).
850
823
  # Corresponds to the JSON property `error`
851
824
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleRpcStatus]
852
825
  attr_accessor :error
853
826
 
854
- # Explicit content annotation (based on per-frame visual signals only).
855
- # If no explicit content has been detected in a frame, no annotations are
856
- # present for that frame.
827
+ # Explicit content annotation (based on per-frame visual signals only). If no
828
+ # explicit content has been detected in a frame, no annotations are present for
829
+ # that frame.
857
830
  # Corresponds to the JSON property `explicitAnnotation`
858
831
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1ExplicitContentAnnotation]
859
832
  attr_accessor :explicit_annotation
860
833
 
861
- # Label annotations on frame level.
862
- # There is exactly one element for each unique label.
834
+ # Label annotations on frame level. There is exactly one element for each unique
835
+ # label.
863
836
  # Corresponds to the JSON property `frameLabelAnnotations`
864
837
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1LabelAnnotation>]
865
838
  attr_accessor :frame_label_annotations
866
839
 
867
- # Video file location in
868
- # [Cloud Storage](https://cloud.google.com/storage/).
840
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
869
841
  # Corresponds to the JSON property `inputUri`
870
842
  # @return [String]
871
843
  attr_accessor :input_uri
@@ -892,11 +864,11 @@ module Google
892
864
  attr_accessor :segment_label_annotations
893
865
 
894
866
  # Presence label annotations on video level or user-specified segment level.
895
- # There is exactly one element for each unique label. Compared to the
896
- # existing topical `segment_label_annotations`, this field presents more
897
- # fine-grained, segment-level labels detected in video content and is made
898
- # available only when the client sets `LabelDetectionConfig.model` to
899
- # "builtin/latest" in the request.
867
+ # There is exactly one element for each unique label. Compared to the existing
868
+ # topical `segment_label_annotations`, this field presents more fine-grained,
869
+ # segment-level labels detected in video content and is made available only when
870
+ # the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
871
+ # request.
900
872
  # Corresponds to the JSON property `segmentPresenceLabelAnnotations`
901
873
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1LabelAnnotation>]
902
874
  attr_accessor :segment_presence_label_annotations
@@ -906,17 +878,17 @@ module Google
906
878
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1VideoSegment>]
907
879
  attr_accessor :shot_annotations
908
880
 
909
- # Topical label annotations on shot level.
910
- # There is exactly one element for each unique label.
881
+ # Topical label annotations on shot level. There is exactly one element for each
882
+ # unique label.
911
883
  # Corresponds to the JSON property `shotLabelAnnotations`
912
884
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1LabelAnnotation>]
913
885
  attr_accessor :shot_label_annotations
914
886
 
915
887
  # Presence label annotations on shot level. There is exactly one element for
916
- # each unique label. Compared to the existing topical
917
- # `shot_label_annotations`, this field presents more fine-grained, shot-level
918
- # labels detected in video content and is made available only when the client
919
- # sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
888
+ # each unique label. Compared to the existing topical `shot_label_annotations`,
889
+ # this field presents more fine-grained, shot-level labels detected in video
890
+ # content and is made available only when the client sets `LabelDetectionConfig.
891
+ # model` to "builtin/latest" in the request.
920
892
  # Corresponds to the JSON property `shotPresenceLabelAnnotations`
921
893
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1LabelAnnotation>]
922
894
  attr_accessor :shot_presence_label_annotations
@@ -926,9 +898,8 @@ module Google
926
898
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1SpeechTranscription>]
927
899
  attr_accessor :speech_transcriptions
928
900
 
929
- # OCR text detection and tracking.
930
- # Annotations for list of detected text snippets. Each will have list of
931
- # frame information associated with it.
901
+ # OCR text detection and tracking. Annotations for list of detected text
902
+ # snippets. Each will have list of frame information associated with it.
932
903
  # Corresponds to the JSON property `textAnnotations`
933
904
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1TextAnnotation>]
934
905
  attr_accessor :text_annotations
@@ -960,14 +931,14 @@ module Google
960
931
  class GoogleCloudVideointelligenceV1VideoSegment
961
932
  include Google::Apis::Core::Hashable
962
933
 
963
- # Time-offset, relative to the beginning of the video,
964
- # corresponding to the end of the segment (inclusive).
934
+ # Time-offset, relative to the beginning of the video, corresponding to the end
935
+ # of the segment (inclusive).
965
936
  # Corresponds to the JSON property `endTimeOffset`
966
937
  # @return [String]
967
938
  attr_accessor :end_time_offset
968
939
 
969
- # Time-offset, relative to the beginning of the video,
970
- # corresponding to the start of the segment (inclusive).
940
+ # Time-offset, relative to the beginning of the video, corresponding to the
941
+ # start of the segment (inclusive).
971
942
  # Corresponds to the JSON property `startTimeOffset`
972
943
  # @return [String]
973
944
  attr_accessor :start_time_offset
@@ -984,41 +955,41 @@ module Google
984
955
  end
985
956
 
986
957
  # Word-specific information for recognized words. Word information is only
987
- # included in the response when certain request parameters are set, such
988
- # as `enable_word_time_offsets`.
958
+ # included in the response when certain request parameters are set, such as `
959
+ # enable_word_time_offsets`.
989
960
  class GoogleCloudVideointelligenceV1WordInfo
990
961
  include Google::Apis::Core::Hashable
991
962
 
992
963
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
993
964
  # indicates an estimated greater likelihood that the recognized words are
994
- # correct. This field is set only for the top alternative.
995
- # This field is not guaranteed to be accurate and users should not rely on it
996
- # to be always provided.
997
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
965
+ # correct. This field is set only for the top alternative. This field is not
966
+ # guaranteed to be accurate and users should not rely on it to be always
967
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
968
+ # not set.
998
969
  # Corresponds to the JSON property `confidence`
999
970
  # @return [Float]
1000
971
  attr_accessor :confidence
1001
972
 
1002
- # Time offset relative to the beginning of the audio, and
1003
- # corresponding to the end of the spoken word. This field is only set if
1004
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
1005
- # experimental feature and the accuracy of the time offset can vary.
973
+ # Time offset relative to the beginning of the audio, and corresponding to the
974
+ # end of the spoken word. This field is only set if `enable_word_time_offsets=
975
+ # true` and only in the top hypothesis. This is an experimental feature and the
976
+ # accuracy of the time offset can vary.
1006
977
  # Corresponds to the JSON property `endTime`
1007
978
  # @return [String]
1008
979
  attr_accessor :end_time
1009
980
 
1010
- # Output only. A distinct integer value is assigned for every speaker within
1011
- # the audio. This field specifies which one of those speakers was detected to
1012
- # have spoken this word. Value ranges from 1 up to diarization_speaker_count,
1013
- # and is only set if speaker diarization is enabled.
981
+ # Output only. A distinct integer value is assigned for every speaker within the
982
+ # audio. This field specifies which one of those speakers was detected to have
983
+ # spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
984
+ # only set if speaker diarization is enabled.
1014
985
  # Corresponds to the JSON property `speakerTag`
1015
986
  # @return [Fixnum]
1016
987
  attr_accessor :speaker_tag
1017
988
 
1018
- # Time offset relative to the beginning of the audio, and
1019
- # corresponding to the start of the spoken word. This field is only set if
1020
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
1021
- # experimental feature and the accuracy of the time offset can vary.
989
+ # Time offset relative to the beginning of the audio, and corresponding to the
990
+ # start of the spoken word. This field is only set if `enable_word_time_offsets=
991
+ # true` and only in the top hypothesis. This is an experimental feature and the
992
+ # accuracy of the time offset can vary.
1022
993
  # Corresponds to the JSON property `startTime`
1023
994
  # @return [String]
1024
995
  attr_accessor :start_time
@@ -1042,9 +1013,9 @@ module Google
1042
1013
  end
1043
1014
  end
1044
1015
 
1045
- # Video annotation progress. Included in the `metadata`
1046
- # field of the `Operation` returned by the `GetOperation`
1047
- # call of the `google::longrunning::Operations` service.
1016
+ # Video annotation progress. Included in the `metadata` field of the `Operation`
1017
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
1018
+ # service.
1048
1019
  class GoogleCloudVideointelligenceV1beta2AnnotateVideoProgress
1049
1020
  include Google::Apis::Core::Hashable
1050
1021
 
@@ -1063,9 +1034,9 @@ module Google
1063
1034
  end
1064
1035
  end
1065
1036
 
1066
- # Video annotation response. Included in the `response`
1067
- # field of the `Operation` returned by the `GetOperation`
1068
- # call of the `google::longrunning::Operations` service.
1037
+ # Video annotation response. Included in the `response` field of the `Operation`
1038
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
1039
+ # service.
1069
1040
  class GoogleCloudVideointelligenceV1beta2AnnotateVideoResponse
1070
1041
  include Google::Apis::Core::Hashable
1071
1042
 
@@ -1093,14 +1064,14 @@ module Google
1093
1064
  # @return [Float]
1094
1065
  attr_accessor :confidence
1095
1066
 
1096
- # The name of the attribute, for example, glasses, dark_glasses, mouth_open.
1097
- # A full list of supported type names will be provided in the document.
1067
+ # The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
1068
+ # full list of supported type names will be provided in the document.
1098
1069
  # Corresponds to the JSON property `name`
1099
1070
  # @return [String]
1100
1071
  attr_accessor :name
1101
1072
 
1102
- # Text value of the detection result. For example, the value for "HairColor"
1103
- # can be "black", "blonde", etc.
1073
+ # Text value of the detection result. For example, the value for "HairColor" can
1074
+ # be "black", "blonde", etc.
1104
1075
  # Corresponds to the JSON property `value`
1105
1076
  # @return [String]
1106
1077
  attr_accessor :value
@@ -1132,9 +1103,8 @@ module Google
1132
1103
  # @return [String]
1133
1104
  attr_accessor :name
1134
1105
 
1135
- # A vertex represents a 2D point in the image.
1136
- # NOTE: the normalized vertex coordinates are relative to the original image
1137
- # and range from 0 to 1.
1106
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
1107
+ # coordinates are relative to the original image and range from 0 to 1.
1138
1108
  # Corresponds to the JSON property `point`
1139
1109
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2NormalizedVertex]
1140
1110
  attr_accessor :point
@@ -1160,8 +1130,7 @@ module Google
1160
1130
  # @return [String]
1161
1131
  attr_accessor :description
1162
1132
 
1163
- # Opaque entity ID. Some IDs may be available in
1164
- # [Google Knowledge Graph Search
1133
+ # Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
1165
1134
  # API](https://developers.google.com/knowledge-graph/).
1166
1135
  # Corresponds to the JSON property `entityId`
1167
1136
  # @return [String]
@@ -1184,9 +1153,9 @@ module Google
1184
1153
  end
1185
1154
  end
1186
1155
 
1187
- # Explicit content annotation (based on per-frame visual signals only).
1188
- # If no explicit content has been detected in a frame, no annotations are
1189
- # present for that frame.
1156
+ # Explicit content annotation (based on per-frame visual signals only). If no
1157
+ # explicit content has been detected in a frame, no annotations are present for
1158
+ # that frame.
1190
1159
  class GoogleCloudVideointelligenceV1beta2ExplicitContentAnnotation
1191
1160
  include Google::Apis::Core::Hashable
1192
1161
 
@@ -1241,10 +1210,9 @@ module Google
1241
1210
  class GoogleCloudVideointelligenceV1beta2LabelAnnotation
1242
1211
  include Google::Apis::Core::Hashable
1243
1212
 
1244
- # Common categories for the detected entity.
1245
- # For example, when the label is `Terrier`, the category is likely `dog`. And
1246
- # in some cases there might be more than one categories e.g., `Terrier` could
1247
- # also be a `pet`.
1213
+ # Common categories for the detected entity. For example, when the label is `
1214
+ # Terrier`, the category is likely `dog`. And in some cases there might be more
1215
+ # than one categories e.g., `Terrier` could also be a `pet`.
1248
1216
  # Corresponds to the JSON property `categoryEntities`
1249
1217
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2Entity>]
1250
1218
  attr_accessor :category_entities
@@ -1343,14 +1311,14 @@ module Google
1343
1311
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2Entity]
1344
1312
  attr_accessor :entity
1345
1313
 
1346
- # All video segments where the recognized logo appears. There might be
1347
- # multiple instances of the same logo class appearing in one VideoSegment.
1314
+ # All video segments where the recognized logo appears. There might be multiple
1315
+ # instances of the same logo class appearing in one VideoSegment.
1348
1316
  # Corresponds to the JSON property `segments`
1349
1317
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2VideoSegment>]
1350
1318
  attr_accessor :segments
1351
1319
 
1352
- # All logo tracks where the recognized logo appears. Each track corresponds
1353
- # to one logo instance appearing in consecutive frames.
1320
+ # All logo tracks where the recognized logo appears. Each track corresponds to
1321
+ # one logo instance appearing in consecutive frames.
1354
1322
  # Corresponds to the JSON property `tracks`
1355
1323
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2Track>]
1356
1324
  attr_accessor :tracks
@@ -1367,9 +1335,8 @@ module Google
1367
1335
  end
1368
1336
  end
1369
1337
 
1370
- # Normalized bounding box.
1371
- # The normalized vertex coordinates are relative to the original image.
1372
- # Range: [0, 1].
1338
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
1339
+ # original image. Range: [0, 1].
1373
1340
  class GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox
1374
1341
  include Google::Apis::Core::Hashable
1375
1342
 
@@ -1407,20 +1374,12 @@ module Google
1407
1374
  end
1408
1375
 
1409
1376
  # Normalized bounding polygon for text (that might not be aligned with axis).
1410
- # Contains list of the corner points in clockwise order starting from
1411
- # top-left corner. For example, for a rectangular bounding box:
1412
- # When the text is horizontal it might look like:
1413
- # 0----1
1414
- # | |
1415
- # 3----2
1416
- # When it's clockwise rotated 180 degrees around the top-left corner it
1417
- # becomes:
1418
- # 2----3
1419
- # | |
1420
- # 1----0
1421
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
1422
- # than 0, or greater than 1 due to trignometric calculations for location of
1423
- # the box.
1377
+ # Contains list of the corner points in clockwise order starting from top-left
1378
+ # corner. For example, for a rectangular bounding box: When the text is
1379
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
1380
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
1381
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
1382
+ # or greater than 1 due to trignometric calculations for location of the box.
1424
1383
  class GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly
1425
1384
  include Google::Apis::Core::Hashable
1426
1385
 
@@ -1439,9 +1398,8 @@ module Google
1439
1398
  end
1440
1399
  end
1441
1400
 
1442
- # A vertex represents a 2D point in the image.
1443
- # NOTE: the normalized vertex coordinates are relative to the original image
1444
- # and range from 0 to 1.
1401
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
1402
+ # coordinates are relative to the original image and range from 0 to 1.
1445
1403
  class GoogleCloudVideointelligenceV1beta2NormalizedVertex
1446
1404
  include Google::Apis::Core::Hashable
1447
1405
 
@@ -1480,10 +1438,10 @@ module Google
1480
1438
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2Entity]
1481
1439
  attr_accessor :entity
1482
1440
 
1483
- # Information corresponding to all frames where this object track appears.
1484
- # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
1485
- # messages in frames.
1486
- # Streaming mode: it can only be one ObjectTrackingFrame message in frames.
1441
+ # Information corresponding to all frames where this object track appears. Non-
1442
+ # streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
1443
+ # in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
1444
+ # frames.
1487
1445
  # Corresponds to the JSON property `frames`
1488
1446
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame>]
1489
1447
  attr_accessor :frames
@@ -1493,12 +1451,11 @@ module Google
1493
1451
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2VideoSegment]
1494
1452
  attr_accessor :segment
1495
1453
 
1496
- # Streaming mode ONLY.
1497
- # In streaming mode, we do not know the end time of a tracked object
1498
- # before it is completed. Hence, there is no VideoSegment info returned.
1499
- # Instead, we provide a unique identifiable integer track_id so that
1500
- # the customers can correlate the results of the ongoing
1501
- # ObjectTrackAnnotation of the same track_id over time.
1454
+ # Streaming mode ONLY. In streaming mode, we do not know the end time of a
1455
+ # tracked object before it is completed. Hence, there is no VideoSegment info
1456
+ # returned. Instead, we provide a unique identifiable integer track_id so that
1457
+ # the customers can correlate the results of the ongoing ObjectTrackAnnotation
1458
+ # of the same track_id over time.
1502
1459
  # Corresponds to the JSON property `trackId`
1503
1460
  # @return [Fixnum]
1504
1461
  attr_accessor :track_id
@@ -1528,9 +1485,8 @@ module Google
1528
1485
  class GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame
1529
1486
  include Google::Apis::Core::Hashable
1530
1487
 
1531
- # Normalized bounding box.
1532
- # The normalized vertex coordinates are relative to the original image.
1533
- # Range: [0, 1].
1488
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
1489
+ # original image. Range: [0, 1].
1534
1490
  # Corresponds to the JSON property `normalizedBoundingBox`
1535
1491
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox]
1536
1492
  attr_accessor :normalized_bounding_box
@@ -1557,10 +1513,10 @@ module Google
1557
1513
 
1558
1514
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
1559
1515
  # indicates an estimated greater likelihood that the recognized words are
1560
- # correct. This field is set only for the top alternative.
1561
- # This field is not guaranteed to be accurate and users should not rely on it
1562
- # to be always provided.
1563
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
1516
+ # correct. This field is set only for the top alternative. This field is not
1517
+ # guaranteed to be accurate and users should not rely on it to be always
1518
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
1519
+ # not set.
1564
1520
  # Corresponds to the JSON property `confidence`
1565
1521
  # @return [Float]
1566
1522
  attr_accessor :confidence
@@ -1571,8 +1527,8 @@ module Google
1571
1527
  attr_accessor :transcript
1572
1528
 
1573
1529
  # Output only. A list of word-specific information for each recognized word.
1574
- # Note: When `enable_speaker_diarization` is set to true, you will see all
1575
- # the words from the beginning of the audio.
1530
+ # Note: When `enable_speaker_diarization` is set to true, you will see all the
1531
+ # words from the beginning of the audio.
1576
1532
  # Corresponds to the JSON property `words`
1577
1533
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2WordInfo>]
1578
1534
  attr_accessor :words
@@ -1593,18 +1549,17 @@ module Google
1593
1549
  class GoogleCloudVideointelligenceV1beta2SpeechTranscription
1594
1550
  include Google::Apis::Core::Hashable
1595
1551
 
1596
- # May contain one or more recognition hypotheses (up to the maximum specified
1597
- # in `max_alternatives`). These alternatives are ordered in terms of
1598
- # accuracy, with the top (first) alternative being the most probable, as
1599
- # ranked by the recognizer.
1552
+ # May contain one or more recognition hypotheses (up to the maximum specified in
1553
+ # `max_alternatives`). These alternatives are ordered in terms of accuracy, with
1554
+ # the top (first) alternative being the most probable, as ranked by the
1555
+ # recognizer.
1600
1556
  # Corresponds to the JSON property `alternatives`
1601
1557
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2SpeechRecognitionAlternative>]
1602
1558
  attr_accessor :alternatives
1603
1559
 
1604
1560
  # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
1605
- # language tag of
1606
- # the language in this result. This language code was detected to have the
1607
- # most likelihood of being spoken in the audio.
1561
+ # language tag of the language in this result. This language code was detected
1562
+ # to have the most likelihood of being spoken in the audio.
1608
1563
  # Corresponds to the JSON property `languageCode`
1609
1564
  # @return [String]
1610
1565
  attr_accessor :language_code
@@ -1653,27 +1608,19 @@ module Google
1653
1608
  end
1654
1609
  end
1655
1610
 
1656
- # Video frame level annotation results for text annotation (OCR).
1657
- # Contains information regarding timestamp and bounding box locations for the
1658
- # frames containing detected OCR text snippets.
1611
+ # Video frame level annotation results for text annotation (OCR). Contains
1612
+ # information regarding timestamp and bounding box locations for the frames
1613
+ # containing detected OCR text snippets.
1659
1614
  class GoogleCloudVideointelligenceV1beta2TextFrame
1660
1615
  include Google::Apis::Core::Hashable
1661
1616
 
1662
1617
  # Normalized bounding polygon for text (that might not be aligned with axis).
1663
- # Contains list of the corner points in clockwise order starting from
1664
- # top-left corner. For example, for a rectangular bounding box:
1665
- # When the text is horizontal it might look like:
1666
- # 0----1
1667
- # | |
1668
- # 3----2
1669
- # When it's clockwise rotated 180 degrees around the top-left corner it
1670
- # becomes:
1671
- # 2----3
1672
- # | |
1673
- # 1----0
1674
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
1675
- # than 0, or greater than 1 due to trignometric calculations for location of
1676
- # the box.
1618
+ # Contains list of the corner points in clockwise order starting from top-left
1619
+ # corner. For example, for a rectangular bounding box: When the text is
1620
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
1621
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
1622
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
1623
+ # or greater than 1 due to trignometric calculations for location of the box.
1677
1624
  # Corresponds to the JSON property `rotatedBoundingBox`
1678
1625
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly]
1679
1626
  attr_accessor :rotated_bounding_box
@@ -1726,9 +1673,8 @@ module Google
1726
1673
  end
1727
1674
  end
1728
1675
 
1729
- # For tracking related features.
1730
- # An object at time_offset with attributes, and located with
1731
- # normalized_bounding_box.
1676
+ # For tracking related features. An object at time_offset with attributes, and
1677
+ # located with normalized_bounding_box.
1732
1678
  class GoogleCloudVideointelligenceV1beta2TimestampedObject
1733
1679
  include Google::Apis::Core::Hashable
1734
1680
 
@@ -1742,15 +1688,14 @@ module Google
1742
1688
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2DetectedLandmark>]
1743
1689
  attr_accessor :landmarks
1744
1690
 
1745
- # Normalized bounding box.
1746
- # The normalized vertex coordinates are relative to the original image.
1747
- # Range: [0, 1].
1691
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
1692
+ # original image. Range: [0, 1].
1748
1693
  # Corresponds to the JSON property `normalizedBoundingBox`
1749
1694
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox]
1750
1695
  attr_accessor :normalized_bounding_box
1751
1696
 
1752
- # Time-offset, relative to the beginning of the video,
1753
- # corresponding to the video frame for this object.
1697
+ # Time-offset, relative to the beginning of the video, corresponding to the
1698
+ # video frame for this object.
1754
1699
  # Corresponds to the JSON property `timeOffset`
1755
1700
  # @return [String]
1756
1701
  attr_accessor :time_offset
@@ -1809,20 +1754,19 @@ module Google
1809
1754
  class GoogleCloudVideointelligenceV1beta2VideoAnnotationProgress
1810
1755
  include Google::Apis::Core::Hashable
1811
1756
 
1812
- # Specifies which feature is being tracked if the request contains more than
1813
- # one feature.
1757
+ # Specifies which feature is being tracked if the request contains more than one
1758
+ # feature.
1814
1759
  # Corresponds to the JSON property `feature`
1815
1760
  # @return [String]
1816
1761
  attr_accessor :feature
1817
1762
 
1818
- # Video file location in
1819
- # [Cloud Storage](https://cloud.google.com/storage/).
1763
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
1820
1764
  # Corresponds to the JSON property `inputUri`
1821
1765
  # @return [String]
1822
1766
  attr_accessor :input_uri
1823
1767
 
1824
- # Approximate percentage processed thus far. Guaranteed to be
1825
- # 100 when fully processed.
1768
+ # Approximate percentage processed thus far. Guaranteed to be 100 when fully
1769
+ # processed.
1826
1770
  # Corresponds to the JSON property `progressPercent`
1827
1771
  # @return [Fixnum]
1828
1772
  attr_accessor :progress_percent
@@ -1861,31 +1805,30 @@ module Google
1861
1805
  class GoogleCloudVideointelligenceV1beta2VideoAnnotationResults
1862
1806
  include Google::Apis::Core::Hashable
1863
1807
 
1864
- # The `Status` type defines a logical error model that is suitable for
1865
- # different programming environments, including REST APIs and RPC APIs. It is
1866
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
1867
- # three pieces of data: error code, error message, and error details.
1868
- # You can find out more about this error model and how to work with it in the
1869
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
1808
+ # The `Status` type defines a logical error model that is suitable for different
1809
+ # programming environments, including REST APIs and RPC APIs. It is used by [
1810
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
1811
+ # data: error code, error message, and error details. You can find out more
1812
+ # about this error model and how to work with it in the [API Design Guide](https:
1813
+ # //cloud.google.com/apis/design/errors).
1870
1814
  # Corresponds to the JSON property `error`
1871
1815
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleRpcStatus]
1872
1816
  attr_accessor :error
1873
1817
 
1874
- # Explicit content annotation (based on per-frame visual signals only).
1875
- # If no explicit content has been detected in a frame, no annotations are
1876
- # present for that frame.
1818
+ # Explicit content annotation (based on per-frame visual signals only). If no
1819
+ # explicit content has been detected in a frame, no annotations are present for
1820
+ # that frame.
1877
1821
  # Corresponds to the JSON property `explicitAnnotation`
1878
1822
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2ExplicitContentAnnotation]
1879
1823
  attr_accessor :explicit_annotation
1880
1824
 
1881
- # Label annotations on frame level.
1882
- # There is exactly one element for each unique label.
1825
+ # Label annotations on frame level. There is exactly one element for each unique
1826
+ # label.
1883
1827
  # Corresponds to the JSON property `frameLabelAnnotations`
1884
1828
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
1885
1829
  attr_accessor :frame_label_annotations
1886
1830
 
1887
- # Video file location in
1888
- # [Cloud Storage](https://cloud.google.com/storage/).
1831
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
1889
1832
  # Corresponds to the JSON property `inputUri`
1890
1833
  # @return [String]
1891
1834
  attr_accessor :input_uri
@@ -1912,11 +1855,11 @@ module Google
1912
1855
  attr_accessor :segment_label_annotations
1913
1856
 
1914
1857
  # Presence label annotations on video level or user-specified segment level.
1915
- # There is exactly one element for each unique label. Compared to the
1916
- # existing topical `segment_label_annotations`, this field presents more
1917
- # fine-grained, segment-level labels detected in video content and is made
1918
- # available only when the client sets `LabelDetectionConfig.model` to
1919
- # "builtin/latest" in the request.
1858
+ # There is exactly one element for each unique label. Compared to the existing
1859
+ # topical `segment_label_annotations`, this field presents more fine-grained,
1860
+ # segment-level labels detected in video content and is made available only when
1861
+ # the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
1862
+ # request.
1920
1863
  # Corresponds to the JSON property `segmentPresenceLabelAnnotations`
1921
1864
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
1922
1865
  attr_accessor :segment_presence_label_annotations
@@ -1926,17 +1869,17 @@ module Google
1926
1869
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2VideoSegment>]
1927
1870
  attr_accessor :shot_annotations
1928
1871
 
1929
- # Topical label annotations on shot level.
1930
- # There is exactly one element for each unique label.
1872
+ # Topical label annotations on shot level. There is exactly one element for each
1873
+ # unique label.
1931
1874
  # Corresponds to the JSON property `shotLabelAnnotations`
1932
1875
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
1933
1876
  attr_accessor :shot_label_annotations
1934
1877
 
1935
1878
  # Presence label annotations on shot level. There is exactly one element for
1936
- # each unique label. Compared to the existing topical
1937
- # `shot_label_annotations`, this field presents more fine-grained, shot-level
1938
- # labels detected in video content and is made available only when the client
1939
- # sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
1879
+ # each unique label. Compared to the existing topical `shot_label_annotations`,
1880
+ # this field presents more fine-grained, shot-level labels detected in video
1881
+ # content and is made available only when the client sets `LabelDetectionConfig.
1882
+ # model` to "builtin/latest" in the request.
1940
1883
  # Corresponds to the JSON property `shotPresenceLabelAnnotations`
1941
1884
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
1942
1885
  attr_accessor :shot_presence_label_annotations
@@ -1946,9 +1889,8 @@ module Google
1946
1889
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2SpeechTranscription>]
1947
1890
  attr_accessor :speech_transcriptions
1948
1891
 
1949
- # OCR text detection and tracking.
1950
- # Annotations for list of detected text snippets. Each will have list of
1951
- # frame information associated with it.
1892
+ # OCR text detection and tracking. Annotations for list of detected text
1893
+ # snippets. Each will have list of frame information associated with it.
1952
1894
  # Corresponds to the JSON property `textAnnotations`
1953
1895
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2TextAnnotation>]
1954
1896
  attr_accessor :text_annotations
@@ -1980,14 +1922,14 @@ module Google
1980
1922
  class GoogleCloudVideointelligenceV1beta2VideoSegment
1981
1923
  include Google::Apis::Core::Hashable
1982
1924
 
1983
- # Time-offset, relative to the beginning of the video,
1984
- # corresponding to the end of the segment (inclusive).
1925
+ # Time-offset, relative to the beginning of the video, corresponding to the end
1926
+ # of the segment (inclusive).
1985
1927
  # Corresponds to the JSON property `endTimeOffset`
1986
1928
  # @return [String]
1987
1929
  attr_accessor :end_time_offset
1988
1930
 
1989
- # Time-offset, relative to the beginning of the video,
1990
- # corresponding to the start of the segment (inclusive).
1931
+ # Time-offset, relative to the beginning of the video, corresponding to the
1932
+ # start of the segment (inclusive).
1991
1933
  # Corresponds to the JSON property `startTimeOffset`
1992
1934
  # @return [String]
1993
1935
  attr_accessor :start_time_offset
@@ -2004,41 +1946,41 @@ module Google
2004
1946
  end
2005
1947
 
2006
1948
  # Word-specific information for recognized words. Word information is only
2007
- # included in the response when certain request parameters are set, such
2008
- # as `enable_word_time_offsets`.
1949
+ # included in the response when certain request parameters are set, such as `
1950
+ # enable_word_time_offsets`.
2009
1951
  class GoogleCloudVideointelligenceV1beta2WordInfo
2010
1952
  include Google::Apis::Core::Hashable
2011
1953
 
2012
1954
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
2013
1955
  # indicates an estimated greater likelihood that the recognized words are
2014
- # correct. This field is set only for the top alternative.
2015
- # This field is not guaranteed to be accurate and users should not rely on it
2016
- # to be always provided.
2017
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
1956
+ # correct. This field is set only for the top alternative. This field is not
1957
+ # guaranteed to be accurate and users should not rely on it to be always
1958
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
1959
+ # not set.
2018
1960
  # Corresponds to the JSON property `confidence`
2019
1961
  # @return [Float]
2020
1962
  attr_accessor :confidence
2021
1963
 
2022
- # Time offset relative to the beginning of the audio, and
2023
- # corresponding to the end of the spoken word. This field is only set if
2024
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
2025
- # experimental feature and the accuracy of the time offset can vary.
1964
+ # Time offset relative to the beginning of the audio, and corresponding to the
1965
+ # end of the spoken word. This field is only set if `enable_word_time_offsets=
1966
+ # true` and only in the top hypothesis. This is an experimental feature and the
1967
+ # accuracy of the time offset can vary.
2026
1968
  # Corresponds to the JSON property `endTime`
2027
1969
  # @return [String]
2028
1970
  attr_accessor :end_time
2029
1971
 
2030
- # Output only. A distinct integer value is assigned for every speaker within
2031
- # the audio. This field specifies which one of those speakers was detected to
2032
- # have spoken this word. Value ranges from 1 up to diarization_speaker_count,
2033
- # and is only set if speaker diarization is enabled.
1972
+ # Output only. A distinct integer value is assigned for every speaker within the
1973
+ # audio. This field specifies which one of those speakers was detected to have
1974
+ # spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
1975
+ # only set if speaker diarization is enabled.
2034
1976
  # Corresponds to the JSON property `speakerTag`
2035
1977
  # @return [Fixnum]
2036
1978
  attr_accessor :speaker_tag
2037
1979
 
2038
- # Time offset relative to the beginning of the audio, and
2039
- # corresponding to the start of the spoken word. This field is only set if
2040
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
2041
- # experimental feature and the accuracy of the time offset can vary.
1980
+ # Time offset relative to the beginning of the audio, and corresponding to the
1981
+ # start of the spoken word. This field is only set if `enable_word_time_offsets=
1982
+ # true` and only in the top hypothesis. This is an experimental feature and the
1983
+ # accuracy of the time offset can vary.
2042
1984
  # Corresponds to the JSON property `startTime`
2043
1985
  # @return [String]
2044
1986
  attr_accessor :start_time
@@ -2062,9 +2004,9 @@ module Google
2062
2004
  end
2063
2005
  end
2064
2006
 
2065
- # Video annotation progress. Included in the `metadata`
2066
- # field of the `Operation` returned by the `GetOperation`
2067
- # call of the `google::longrunning::Operations` service.
2007
+ # Video annotation progress. Included in the `metadata` field of the `Operation`
2008
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
2009
+ # service.
2068
2010
  class GoogleCloudVideointelligenceV1p1beta1AnnotateVideoProgress
2069
2011
  include Google::Apis::Core::Hashable
2070
2012
 
@@ -2083,9 +2025,9 @@ module Google
2083
2025
  end
2084
2026
  end
2085
2027
 
2086
- # Video annotation response. Included in the `response`
2087
- # field of the `Operation` returned by the `GetOperation`
2088
- # call of the `google::longrunning::Operations` service.
2028
+ # Video annotation response. Included in the `response` field of the `Operation`
2029
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
2030
+ # service.
2089
2031
  class GoogleCloudVideointelligenceV1p1beta1AnnotateVideoResponse
2090
2032
  include Google::Apis::Core::Hashable
2091
2033
 
@@ -2113,14 +2055,14 @@ module Google
2113
2055
  # @return [Float]
2114
2056
  attr_accessor :confidence
2115
2057
 
2116
- # The name of the attribute, for example, glasses, dark_glasses, mouth_open.
2117
- # A full list of supported type names will be provided in the document.
2058
+ # The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
2059
+ # full list of supported type names will be provided in the document.
2118
2060
  # Corresponds to the JSON property `name`
2119
2061
  # @return [String]
2120
2062
  attr_accessor :name
2121
2063
 
2122
- # Text value of the detection result. For example, the value for "HairColor"
2123
- # can be "black", "blonde", etc.
2064
+ # Text value of the detection result. For example, the value for "HairColor" can
2065
+ # be "black", "blonde", etc.
2124
2066
  # Corresponds to the JSON property `value`
2125
2067
  # @return [String]
2126
2068
  attr_accessor :value
@@ -2152,9 +2094,8 @@ module Google
2152
2094
  # @return [String]
2153
2095
  attr_accessor :name
2154
2096
 
2155
- # A vertex represents a 2D point in the image.
2156
- # NOTE: the normalized vertex coordinates are relative to the original image
2157
- # and range from 0 to 1.
2097
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
2098
+ # coordinates are relative to the original image and range from 0 to 1.
2158
2099
  # Corresponds to the JSON property `point`
2159
2100
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedVertex]
2160
2101
  attr_accessor :point
@@ -2180,8 +2121,7 @@ module Google
2180
2121
  # @return [String]
2181
2122
  attr_accessor :description
2182
2123
 
2183
- # Opaque entity ID. Some IDs may be available in
2184
- # [Google Knowledge Graph Search
2124
+ # Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
2185
2125
  # API](https://developers.google.com/knowledge-graph/).
2186
2126
  # Corresponds to the JSON property `entityId`
2187
2127
  # @return [String]
@@ -2204,9 +2144,9 @@ module Google
2204
2144
  end
2205
2145
  end
2206
2146
 
2207
- # Explicit content annotation (based on per-frame visual signals only).
2208
- # If no explicit content has been detected in a frame, no annotations are
2209
- # present for that frame.
2147
+ # Explicit content annotation (based on per-frame visual signals only). If no
2148
+ # explicit content has been detected in a frame, no annotations are present for
2149
+ # that frame.
2210
2150
  class GoogleCloudVideointelligenceV1p1beta1ExplicitContentAnnotation
2211
2151
  include Google::Apis::Core::Hashable
2212
2152
 
@@ -2261,10 +2201,9 @@ module Google
2261
2201
  class GoogleCloudVideointelligenceV1p1beta1LabelAnnotation
2262
2202
  include Google::Apis::Core::Hashable
2263
2203
 
2264
- # Common categories for the detected entity.
2265
- # For example, when the label is `Terrier`, the category is likely `dog`. And
2266
- # in some cases there might be more than one categories e.g., `Terrier` could
2267
- # also be a `pet`.
2204
+ # Common categories for the detected entity. For example, when the label is `
2205
+ # Terrier`, the category is likely `dog`. And in some cases there might be more
2206
+ # than one categories e.g., `Terrier` could also be a `pet`.
2268
2207
  # Corresponds to the JSON property `categoryEntities`
2269
2208
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1Entity>]
2270
2209
  attr_accessor :category_entities
@@ -2363,14 +2302,14 @@ module Google
2363
2302
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1Entity]
2364
2303
  attr_accessor :entity
2365
2304
 
2366
- # All video segments where the recognized logo appears. There might be
2367
- # multiple instances of the same logo class appearing in one VideoSegment.
2305
+ # All video segments where the recognized logo appears. There might be multiple
2306
+ # instances of the same logo class appearing in one VideoSegment.
2368
2307
  # Corresponds to the JSON property `segments`
2369
2308
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment>]
2370
2309
  attr_accessor :segments
2371
2310
 
2372
- # All logo tracks where the recognized logo appears. Each track corresponds
2373
- # to one logo instance appearing in consecutive frames.
2311
+ # All logo tracks where the recognized logo appears. Each track corresponds to
2312
+ # one logo instance appearing in consecutive frames.
2374
2313
  # Corresponds to the JSON property `tracks`
2375
2314
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1Track>]
2376
2315
  attr_accessor :tracks
@@ -2387,9 +2326,8 @@ module Google
2387
2326
  end
2388
2327
  end
2389
2328
 
2390
- # Normalized bounding box.
2391
- # The normalized vertex coordinates are relative to the original image.
2392
- # Range: [0, 1].
2329
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
2330
+ # original image. Range: [0, 1].
2393
2331
  class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox
2394
2332
  include Google::Apis::Core::Hashable
2395
2333
 
@@ -2427,20 +2365,12 @@ module Google
2427
2365
  end
2428
2366
 
2429
2367
  # Normalized bounding polygon for text (that might not be aligned with axis).
2430
- # Contains list of the corner points in clockwise order starting from
2431
- # top-left corner. For example, for a rectangular bounding box:
2432
- # When the text is horizontal it might look like:
2433
- # 0----1
2434
- # | |
2435
- # 3----2
2436
- # When it's clockwise rotated 180 degrees around the top-left corner it
2437
- # becomes:
2438
- # 2----3
2439
- # | |
2440
- # 1----0
2441
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
2442
- # than 0, or greater than 1 due to trignometric calculations for location of
2443
- # the box.
2368
+ # Contains list of the corner points in clockwise order starting from top-left
2369
+ # corner. For example, for a rectangular bounding box: When the text is
2370
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
2371
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
2372
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
2373
+ # or greater than 1 due to trignometric calculations for location of the box.
2444
2374
  class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly
2445
2375
  include Google::Apis::Core::Hashable
2446
2376
 
@@ -2459,9 +2389,8 @@ module Google
2459
2389
  end
2460
2390
  end
2461
2391
 
2462
- # A vertex represents a 2D point in the image.
2463
- # NOTE: the normalized vertex coordinates are relative to the original image
2464
- # and range from 0 to 1.
2392
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
2393
+ # coordinates are relative to the original image and range from 0 to 1.
2465
2394
  class GoogleCloudVideointelligenceV1p1beta1NormalizedVertex
2466
2395
  include Google::Apis::Core::Hashable
2467
2396
 
@@ -2500,10 +2429,10 @@ module Google
2500
2429
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1Entity]
2501
2430
  attr_accessor :entity
2502
2431
 
2503
- # Information corresponding to all frames where this object track appears.
2504
- # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
2505
- # messages in frames.
2506
- # Streaming mode: it can only be one ObjectTrackingFrame message in frames.
2432
+ # Information corresponding to all frames where this object track appears. Non-
2433
+ # streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
2434
+ # in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
2435
+ # frames.
2507
2436
  # Corresponds to the JSON property `frames`
2508
2437
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame>]
2509
2438
  attr_accessor :frames
@@ -2513,12 +2442,11 @@ module Google
2513
2442
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment]
2514
2443
  attr_accessor :segment
2515
2444
 
2516
- # Streaming mode ONLY.
2517
- # In streaming mode, we do not know the end time of a tracked object
2518
- # before it is completed. Hence, there is no VideoSegment info returned.
2519
- # Instead, we provide a unique identifiable integer track_id so that
2520
- # the customers can correlate the results of the ongoing
2521
- # ObjectTrackAnnotation of the same track_id over time.
2445
+ # Streaming mode ONLY. In streaming mode, we do not know the end time of a
2446
+ # tracked object before it is completed. Hence, there is no VideoSegment info
2447
+ # returned. Instead, we provide a unique identifiable integer track_id so that
2448
+ # the customers can correlate the results of the ongoing ObjectTrackAnnotation
2449
+ # of the same track_id over time.
2522
2450
  # Corresponds to the JSON property `trackId`
2523
2451
  # @return [Fixnum]
2524
2452
  attr_accessor :track_id
@@ -2548,9 +2476,8 @@ module Google
2548
2476
  class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame
2549
2477
  include Google::Apis::Core::Hashable
2550
2478
 
2551
- # Normalized bounding box.
2552
- # The normalized vertex coordinates are relative to the original image.
2553
- # Range: [0, 1].
2479
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
2480
+ # original image. Range: [0, 1].
2554
2481
  # Corresponds to the JSON property `normalizedBoundingBox`
2555
2482
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox]
2556
2483
  attr_accessor :normalized_bounding_box
@@ -2577,10 +2504,10 @@ module Google
2577
2504
 
2578
2505
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
2579
2506
  # indicates an estimated greater likelihood that the recognized words are
2580
- # correct. This field is set only for the top alternative.
2581
- # This field is not guaranteed to be accurate and users should not rely on it
2582
- # to be always provided.
2583
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
2507
+ # correct. This field is set only for the top alternative. This field is not
2508
+ # guaranteed to be accurate and users should not rely on it to be always
2509
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
2510
+ # not set.
2584
2511
  # Corresponds to the JSON property `confidence`
2585
2512
  # @return [Float]
2586
2513
  attr_accessor :confidence
@@ -2591,8 +2518,8 @@ module Google
2591
2518
  attr_accessor :transcript
2592
2519
 
2593
2520
  # Output only. A list of word-specific information for each recognized word.
2594
- # Note: When `enable_speaker_diarization` is set to true, you will see all
2595
- # the words from the beginning of the audio.
2521
+ # Note: When `enable_speaker_diarization` is set to true, you will see all the
2522
+ # words from the beginning of the audio.
2596
2523
  # Corresponds to the JSON property `words`
2597
2524
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1WordInfo>]
2598
2525
  attr_accessor :words
@@ -2613,18 +2540,17 @@ module Google
2613
2540
  class GoogleCloudVideointelligenceV1p1beta1SpeechTranscription
2614
2541
  include Google::Apis::Core::Hashable
2615
2542
 
2616
- # May contain one or more recognition hypotheses (up to the maximum specified
2617
- # in `max_alternatives`). These alternatives are ordered in terms of
2618
- # accuracy, with the top (first) alternative being the most probable, as
2619
- # ranked by the recognizer.
2543
+ # May contain one or more recognition hypotheses (up to the maximum specified in
2544
+ # `max_alternatives`). These alternatives are ordered in terms of accuracy, with
2545
+ # the top (first) alternative being the most probable, as ranked by the
2546
+ # recognizer.
2620
2547
  # Corresponds to the JSON property `alternatives`
2621
2548
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1SpeechRecognitionAlternative>]
2622
2549
  attr_accessor :alternatives
2623
2550
 
2624
2551
  # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
2625
- # language tag of
2626
- # the language in this result. This language code was detected to have the
2627
- # most likelihood of being spoken in the audio.
2552
+ # language tag of the language in this result. This language code was detected
2553
+ # to have the most likelihood of being spoken in the audio.
2628
2554
  # Corresponds to the JSON property `languageCode`
2629
2555
  # @return [String]
2630
2556
  attr_accessor :language_code
@@ -2673,27 +2599,19 @@ module Google
2673
2599
  end
2674
2600
  end
2675
2601
 
2676
- # Video frame level annotation results for text annotation (OCR).
2677
- # Contains information regarding timestamp and bounding box locations for the
2678
- # frames containing detected OCR text snippets.
2602
+ # Video frame level annotation results for text annotation (OCR). Contains
2603
+ # information regarding timestamp and bounding box locations for the frames
2604
+ # containing detected OCR text snippets.
2679
2605
  class GoogleCloudVideointelligenceV1p1beta1TextFrame
2680
2606
  include Google::Apis::Core::Hashable
2681
2607
 
2682
2608
  # Normalized bounding polygon for text (that might not be aligned with axis).
2683
- # Contains list of the corner points in clockwise order starting from
2684
- # top-left corner. For example, for a rectangular bounding box:
2685
- # When the text is horizontal it might look like:
2686
- # 0----1
2687
- # | |
2688
- # 3----2
2689
- # When it's clockwise rotated 180 degrees around the top-left corner it
2690
- # becomes:
2691
- # 2----3
2692
- # | |
2693
- # 1----0
2694
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
2695
- # than 0, or greater than 1 due to trignometric calculations for location of
2696
- # the box.
2609
+ # Contains list of the corner points in clockwise order starting from top-left
2610
+ # corner. For example, for a rectangular bounding box: When the text is
2611
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
2612
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
2613
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
2614
+ # or greater than 1 due to trignometric calculations for location of the box.
2697
2615
  # Corresponds to the JSON property `rotatedBoundingBox`
2698
2616
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly]
2699
2617
  attr_accessor :rotated_bounding_box
@@ -2746,9 +2664,8 @@ module Google
2746
2664
  end
2747
2665
  end
2748
2666
 
2749
- # For tracking related features.
2750
- # An object at time_offset with attributes, and located with
2751
- # normalized_bounding_box.
2667
+ # For tracking related features. An object at time_offset with attributes, and
2668
+ # located with normalized_bounding_box.
2752
2669
  class GoogleCloudVideointelligenceV1p1beta1TimestampedObject
2753
2670
  include Google::Apis::Core::Hashable
2754
2671
 
@@ -2762,15 +2679,14 @@ module Google
2762
2679
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1DetectedLandmark>]
2763
2680
  attr_accessor :landmarks
2764
2681
 
2765
- # Normalized bounding box.
2766
- # The normalized vertex coordinates are relative to the original image.
2767
- # Range: [0, 1].
2682
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
2683
+ # original image. Range: [0, 1].
2768
2684
  # Corresponds to the JSON property `normalizedBoundingBox`
2769
2685
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox]
2770
2686
  attr_accessor :normalized_bounding_box
2771
2687
 
2772
- # Time-offset, relative to the beginning of the video,
2773
- # corresponding to the video frame for this object.
2688
+ # Time-offset, relative to the beginning of the video, corresponding to the
2689
+ # video frame for this object.
2774
2690
  # Corresponds to the JSON property `timeOffset`
2775
2691
  # @return [String]
2776
2692
  attr_accessor :time_offset
@@ -2829,20 +2745,19 @@ module Google
2829
2745
  class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationProgress
2830
2746
  include Google::Apis::Core::Hashable
2831
2747
 
2832
- # Specifies which feature is being tracked if the request contains more than
2833
- # one feature.
2748
+ # Specifies which feature is being tracked if the request contains more than one
2749
+ # feature.
2834
2750
  # Corresponds to the JSON property `feature`
2835
2751
  # @return [String]
2836
2752
  attr_accessor :feature
2837
2753
 
2838
- # Video file location in
2839
- # [Cloud Storage](https://cloud.google.com/storage/).
2754
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
2840
2755
  # Corresponds to the JSON property `inputUri`
2841
2756
  # @return [String]
2842
2757
  attr_accessor :input_uri
2843
2758
 
2844
- # Approximate percentage processed thus far. Guaranteed to be
2845
- # 100 when fully processed.
2759
+ # Approximate percentage processed thus far. Guaranteed to be 100 when fully
2760
+ # processed.
2846
2761
  # Corresponds to the JSON property `progressPercent`
2847
2762
  # @return [Fixnum]
2848
2763
  attr_accessor :progress_percent
@@ -2881,31 +2796,30 @@ module Google
2881
2796
  class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationResults
2882
2797
  include Google::Apis::Core::Hashable
2883
2798
 
2884
- # The `Status` type defines a logical error model that is suitable for
2885
- # different programming environments, including REST APIs and RPC APIs. It is
2886
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
2887
- # three pieces of data: error code, error message, and error details.
2888
- # You can find out more about this error model and how to work with it in the
2889
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
2799
+ # The `Status` type defines a logical error model that is suitable for different
2800
+ # programming environments, including REST APIs and RPC APIs. It is used by [
2801
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
2802
+ # data: error code, error message, and error details. You can find out more
2803
+ # about this error model and how to work with it in the [API Design Guide](https:
2804
+ # //cloud.google.com/apis/design/errors).
2890
2805
  # Corresponds to the JSON property `error`
2891
2806
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleRpcStatus]
2892
2807
  attr_accessor :error
2893
2808
 
2894
- # Explicit content annotation (based on per-frame visual signals only).
2895
- # If no explicit content has been detected in a frame, no annotations are
2896
- # present for that frame.
2809
+ # Explicit content annotation (based on per-frame visual signals only). If no
2810
+ # explicit content has been detected in a frame, no annotations are present for
2811
+ # that frame.
2897
2812
  # Corresponds to the JSON property `explicitAnnotation`
2898
2813
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1ExplicitContentAnnotation]
2899
2814
  attr_accessor :explicit_annotation
2900
2815
 
2901
- # Label annotations on frame level.
2902
- # There is exactly one element for each unique label.
2816
+ # Label annotations on frame level. There is exactly one element for each unique
2817
+ # label.
2903
2818
  # Corresponds to the JSON property `frameLabelAnnotations`
2904
2819
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
2905
2820
  attr_accessor :frame_label_annotations
2906
2821
 
2907
- # Video file location in
2908
- # [Cloud Storage](https://cloud.google.com/storage/).
2822
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
2909
2823
  # Corresponds to the JSON property `inputUri`
2910
2824
  # @return [String]
2911
2825
  attr_accessor :input_uri
@@ -2932,11 +2846,11 @@ module Google
2932
2846
  attr_accessor :segment_label_annotations
2933
2847
 
2934
2848
  # Presence label annotations on video level or user-specified segment level.
2935
- # There is exactly one element for each unique label. Compared to the
2936
- # existing topical `segment_label_annotations`, this field presents more
2937
- # fine-grained, segment-level labels detected in video content and is made
2938
- # available only when the client sets `LabelDetectionConfig.model` to
2939
- # "builtin/latest" in the request.
2849
+ # There is exactly one element for each unique label. Compared to the existing
2850
+ # topical `segment_label_annotations`, this field presents more fine-grained,
2851
+ # segment-level labels detected in video content and is made available only when
2852
+ # the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
2853
+ # request.
2940
2854
  # Corresponds to the JSON property `segmentPresenceLabelAnnotations`
2941
2855
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
2942
2856
  attr_accessor :segment_presence_label_annotations
@@ -2946,17 +2860,17 @@ module Google
2946
2860
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment>]
2947
2861
  attr_accessor :shot_annotations
2948
2862
 
2949
- # Topical label annotations on shot level.
2950
- # There is exactly one element for each unique label.
2863
+ # Topical label annotations on shot level. There is exactly one element for each
2864
+ # unique label.
2951
2865
  # Corresponds to the JSON property `shotLabelAnnotations`
2952
2866
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
2953
2867
  attr_accessor :shot_label_annotations
2954
2868
 
2955
2869
  # Presence label annotations on shot level. There is exactly one element for
2956
- # each unique label. Compared to the existing topical
2957
- # `shot_label_annotations`, this field presents more fine-grained, shot-level
2958
- # labels detected in video content and is made available only when the client
2959
- # sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
2870
+ # each unique label. Compared to the existing topical `shot_label_annotations`,
2871
+ # this field presents more fine-grained, shot-level labels detected in video
2872
+ # content and is made available only when the client sets `LabelDetectionConfig.
2873
+ # model` to "builtin/latest" in the request.
2960
2874
  # Corresponds to the JSON property `shotPresenceLabelAnnotations`
2961
2875
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
2962
2876
  attr_accessor :shot_presence_label_annotations
@@ -2966,9 +2880,8 @@ module Google
2966
2880
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1SpeechTranscription>]
2967
2881
  attr_accessor :speech_transcriptions
2968
2882
 
2969
- # OCR text detection and tracking.
2970
- # Annotations for list of detected text snippets. Each will have list of
2971
- # frame information associated with it.
2883
+ # OCR text detection and tracking. Annotations for list of detected text
2884
+ # snippets. Each will have list of frame information associated with it.
2972
2885
  # Corresponds to the JSON property `textAnnotations`
2973
2886
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1TextAnnotation>]
2974
2887
  attr_accessor :text_annotations
@@ -3000,14 +2913,14 @@ module Google
3000
2913
  class GoogleCloudVideointelligenceV1p1beta1VideoSegment
3001
2914
  include Google::Apis::Core::Hashable
3002
2915
 
3003
- # Time-offset, relative to the beginning of the video,
3004
- # corresponding to the end of the segment (inclusive).
2916
+ # Time-offset, relative to the beginning of the video, corresponding to the end
2917
+ # of the segment (inclusive).
3005
2918
  # Corresponds to the JSON property `endTimeOffset`
3006
2919
  # @return [String]
3007
2920
  attr_accessor :end_time_offset
3008
2921
 
3009
- # Time-offset, relative to the beginning of the video,
3010
- # corresponding to the start of the segment (inclusive).
2922
+ # Time-offset, relative to the beginning of the video, corresponding to the
2923
+ # start of the segment (inclusive).
3011
2924
  # Corresponds to the JSON property `startTimeOffset`
3012
2925
  # @return [String]
3013
2926
  attr_accessor :start_time_offset
@@ -3024,41 +2937,41 @@ module Google
3024
2937
  end
3025
2938
 
3026
2939
  # Word-specific information for recognized words. Word information is only
3027
- # included in the response when certain request parameters are set, such
3028
- # as `enable_word_time_offsets`.
2940
+ # included in the response when certain request parameters are set, such as `
2941
+ # enable_word_time_offsets`.
3029
2942
  class GoogleCloudVideointelligenceV1p1beta1WordInfo
3030
2943
  include Google::Apis::Core::Hashable
3031
2944
 
3032
2945
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
3033
2946
  # indicates an estimated greater likelihood that the recognized words are
3034
- # correct. This field is set only for the top alternative.
3035
- # This field is not guaranteed to be accurate and users should not rely on it
3036
- # to be always provided.
3037
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
2947
+ # correct. This field is set only for the top alternative. This field is not
2948
+ # guaranteed to be accurate and users should not rely on it to be always
2949
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
2950
+ # not set.
3038
2951
  # Corresponds to the JSON property `confidence`
3039
2952
  # @return [Float]
3040
2953
  attr_accessor :confidence
3041
2954
 
3042
- # Time offset relative to the beginning of the audio, and
3043
- # corresponding to the end of the spoken word. This field is only set if
3044
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
3045
- # experimental feature and the accuracy of the time offset can vary.
2955
+ # Time offset relative to the beginning of the audio, and corresponding to the
2956
+ # end of the spoken word. This field is only set if `enable_word_time_offsets=
2957
+ # true` and only in the top hypothesis. This is an experimental feature and the
2958
+ # accuracy of the time offset can vary.
3046
2959
  # Corresponds to the JSON property `endTime`
3047
2960
  # @return [String]
3048
2961
  attr_accessor :end_time
3049
2962
 
3050
- # Output only. A distinct integer value is assigned for every speaker within
3051
- # the audio. This field specifies which one of those speakers was detected to
3052
- # have spoken this word. Value ranges from 1 up to diarization_speaker_count,
3053
- # and is only set if speaker diarization is enabled.
2963
+ # Output only. A distinct integer value is assigned for every speaker within the
2964
+ # audio. This field specifies which one of those speakers was detected to have
2965
+ # spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
2966
+ # only set if speaker diarization is enabled.
3054
2967
  # Corresponds to the JSON property `speakerTag`
3055
2968
  # @return [Fixnum]
3056
2969
  attr_accessor :speaker_tag
3057
2970
 
3058
- # Time offset relative to the beginning of the audio, and
3059
- # corresponding to the start of the spoken word. This field is only set if
3060
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
3061
- # experimental feature and the accuracy of the time offset can vary.
2971
+ # Time offset relative to the beginning of the audio, and corresponding to the
2972
+ # start of the spoken word. This field is only set if `enable_word_time_offsets=
2973
+ # true` and only in the top hypothesis. This is an experimental feature and the
2974
+ # accuracy of the time offset can vary.
3062
2975
  # Corresponds to the JSON property `startTime`
3063
2976
  # @return [String]
3064
2977
  attr_accessor :start_time
@@ -3082,9 +2995,9 @@ module Google
3082
2995
  end
3083
2996
  end
3084
2997
 
3085
- # Video annotation progress. Included in the `metadata`
3086
- # field of the `Operation` returned by the `GetOperation`
3087
- # call of the `google::longrunning::Operations` service.
2998
+ # Video annotation progress. Included in the `metadata` field of the `Operation`
2999
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
3000
+ # service.
3088
3001
  class GoogleCloudVideointelligenceV1p2beta1AnnotateVideoProgress
3089
3002
  include Google::Apis::Core::Hashable
3090
3003
 
@@ -3103,9 +3016,9 @@ module Google
3103
3016
  end
3104
3017
  end
3105
3018
 
3106
- # Video annotation response. Included in the `response`
3107
- # field of the `Operation` returned by the `GetOperation`
3108
- # call of the `google::longrunning::Operations` service.
3019
+ # Video annotation response. Included in the `response` field of the `Operation`
3020
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
3021
+ # service.
3109
3022
  class GoogleCloudVideointelligenceV1p2beta1AnnotateVideoResponse
3110
3023
  include Google::Apis::Core::Hashable
3111
3024
 
@@ -3133,14 +3046,14 @@ module Google
3133
3046
  # @return [Float]
3134
3047
  attr_accessor :confidence
3135
3048
 
3136
- # The name of the attribute, for example, glasses, dark_glasses, mouth_open.
3137
- # A full list of supported type names will be provided in the document.
3049
+ # The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
3050
+ # full list of supported type names will be provided in the document.
3138
3051
  # Corresponds to the JSON property `name`
3139
3052
  # @return [String]
3140
3053
  attr_accessor :name
3141
3054
 
3142
- # Text value of the detection result. For example, the value for "HairColor"
3143
- # can be "black", "blonde", etc.
3055
+ # Text value of the detection result. For example, the value for "HairColor" can
3056
+ # be "black", "blonde", etc.
3144
3057
  # Corresponds to the JSON property `value`
3145
3058
  # @return [String]
3146
3059
  attr_accessor :value
@@ -3172,9 +3085,8 @@ module Google
3172
3085
  # @return [String]
3173
3086
  attr_accessor :name
3174
3087
 
3175
- # A vertex represents a 2D point in the image.
3176
- # NOTE: the normalized vertex coordinates are relative to the original image
3177
- # and range from 0 to 1.
3088
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
3089
+ # coordinates are relative to the original image and range from 0 to 1.
3178
3090
  # Corresponds to the JSON property `point`
3179
3091
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1NormalizedVertex]
3180
3092
  attr_accessor :point
@@ -3200,8 +3112,7 @@ module Google
3200
3112
  # @return [String]
3201
3113
  attr_accessor :description
3202
3114
 
3203
- # Opaque entity ID. Some IDs may be available in
3204
- # [Google Knowledge Graph Search
3115
+ # Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
3205
3116
  # API](https://developers.google.com/knowledge-graph/).
3206
3117
  # Corresponds to the JSON property `entityId`
3207
3118
  # @return [String]
@@ -3224,9 +3135,9 @@ module Google
3224
3135
  end
3225
3136
  end
3226
3137
 
3227
- # Explicit content annotation (based on per-frame visual signals only).
3228
- # If no explicit content has been detected in a frame, no annotations are
3229
- # present for that frame.
3138
+ # Explicit content annotation (based on per-frame visual signals only). If no
3139
+ # explicit content has been detected in a frame, no annotations are present for
3140
+ # that frame.
3230
3141
  class GoogleCloudVideointelligenceV1p2beta1ExplicitContentAnnotation
3231
3142
  include Google::Apis::Core::Hashable
3232
3143
 
@@ -3281,10 +3192,9 @@ module Google
3281
3192
  class GoogleCloudVideointelligenceV1p2beta1LabelAnnotation
3282
3193
  include Google::Apis::Core::Hashable
3283
3194
 
3284
- # Common categories for the detected entity.
3285
- # For example, when the label is `Terrier`, the category is likely `dog`. And
3286
- # in some cases there might be more than one categories e.g., `Terrier` could
3287
- # also be a `pet`.
3195
+ # Common categories for the detected entity. For example, when the label is `
3196
+ # Terrier`, the category is likely `dog`. And in some cases there might be more
3197
+ # than one categories e.g., `Terrier` could also be a `pet`.
3288
3198
  # Corresponds to the JSON property `categoryEntities`
3289
3199
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1Entity>]
3290
3200
  attr_accessor :category_entities
@@ -3383,14 +3293,14 @@ module Google
3383
3293
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1Entity]
3384
3294
  attr_accessor :entity
3385
3295
 
3386
- # All video segments where the recognized logo appears. There might be
3387
- # multiple instances of the same logo class appearing in one VideoSegment.
3296
+ # All video segments where the recognized logo appears. There might be multiple
3297
+ # instances of the same logo class appearing in one VideoSegment.
3388
3298
  # Corresponds to the JSON property `segments`
3389
3299
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1VideoSegment>]
3390
3300
  attr_accessor :segments
3391
3301
 
3392
- # All logo tracks where the recognized logo appears. Each track corresponds
3393
- # to one logo instance appearing in consecutive frames.
3302
+ # All logo tracks where the recognized logo appears. Each track corresponds to
3303
+ # one logo instance appearing in consecutive frames.
3394
3304
  # Corresponds to the JSON property `tracks`
3395
3305
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1Track>]
3396
3306
  attr_accessor :tracks
@@ -3407,9 +3317,8 @@ module Google
3407
3317
  end
3408
3318
  end
3409
3319
 
3410
- # Normalized bounding box.
3411
- # The normalized vertex coordinates are relative to the original image.
3412
- # Range: [0, 1].
3320
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
3321
+ # original image. Range: [0, 1].
3413
3322
  class GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox
3414
3323
  include Google::Apis::Core::Hashable
3415
3324
 
@@ -3447,20 +3356,12 @@ module Google
3447
3356
  end
3448
3357
 
3449
3358
  # Normalized bounding polygon for text (that might not be aligned with axis).
3450
- # Contains list of the corner points in clockwise order starting from
3451
- # top-left corner. For example, for a rectangular bounding box:
3452
- # When the text is horizontal it might look like:
3453
- # 0----1
3454
- # | |
3455
- # 3----2
3456
- # When it's clockwise rotated 180 degrees around the top-left corner it
3457
- # becomes:
3458
- # 2----3
3459
- # | |
3460
- # 1----0
3461
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
3462
- # than 0, or greater than 1 due to trignometric calculations for location of
3463
- # the box.
3359
+ # Contains list of the corner points in clockwise order starting from top-left
3360
+ # corner. For example, for a rectangular bounding box: When the text is
3361
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
3362
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
3363
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
3364
+ # or greater than 1 due to trignometric calculations for location of the box.
3464
3365
  class GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingPoly
3465
3366
  include Google::Apis::Core::Hashable
3466
3367
 
@@ -3479,9 +3380,8 @@ module Google
3479
3380
  end
3480
3381
  end
3481
3382
 
3482
- # A vertex represents a 2D point in the image.
3483
- # NOTE: the normalized vertex coordinates are relative to the original image
3484
- # and range from 0 to 1.
3383
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
3384
+ # coordinates are relative to the original image and range from 0 to 1.
3485
3385
  class GoogleCloudVideointelligenceV1p2beta1NormalizedVertex
3486
3386
  include Google::Apis::Core::Hashable
3487
3387
 
@@ -3520,10 +3420,10 @@ module Google
3520
3420
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1Entity]
3521
3421
  attr_accessor :entity
3522
3422
 
3523
- # Information corresponding to all frames where this object track appears.
3524
- # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
3525
- # messages in frames.
3526
- # Streaming mode: it can only be one ObjectTrackingFrame message in frames.
3423
+ # Information corresponding to all frames where this object track appears. Non-
3424
+ # streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
3425
+ # in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
3426
+ # frames.
3527
3427
  # Corresponds to the JSON property `frames`
3528
3428
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1ObjectTrackingFrame>]
3529
3429
  attr_accessor :frames
@@ -3533,12 +3433,11 @@ module Google
3533
3433
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1VideoSegment]
3534
3434
  attr_accessor :segment
3535
3435
 
3536
- # Streaming mode ONLY.
3537
- # In streaming mode, we do not know the end time of a tracked object
3538
- # before it is completed. Hence, there is no VideoSegment info returned.
3539
- # Instead, we provide a unique identifiable integer track_id so that
3540
- # the customers can correlate the results of the ongoing
3541
- # ObjectTrackAnnotation of the same track_id over time.
3436
+ # Streaming mode ONLY. In streaming mode, we do not know the end time of a
3437
+ # tracked object before it is completed. Hence, there is no VideoSegment info
3438
+ # returned. Instead, we provide a unique identifiable integer track_id so that
3439
+ # the customers can correlate the results of the ongoing ObjectTrackAnnotation
3440
+ # of the same track_id over time.
3542
3441
  # Corresponds to the JSON property `trackId`
3543
3442
  # @return [Fixnum]
3544
3443
  attr_accessor :track_id
@@ -3568,9 +3467,8 @@ module Google
3568
3467
  class GoogleCloudVideointelligenceV1p2beta1ObjectTrackingFrame
3569
3468
  include Google::Apis::Core::Hashable
3570
3469
 
3571
- # Normalized bounding box.
3572
- # The normalized vertex coordinates are relative to the original image.
3573
- # Range: [0, 1].
3470
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
3471
+ # original image. Range: [0, 1].
3574
3472
  # Corresponds to the JSON property `normalizedBoundingBox`
3575
3473
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox]
3576
3474
  attr_accessor :normalized_bounding_box
@@ -3597,10 +3495,10 @@ module Google
3597
3495
 
3598
3496
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
3599
3497
  # indicates an estimated greater likelihood that the recognized words are
3600
- # correct. This field is set only for the top alternative.
3601
- # This field is not guaranteed to be accurate and users should not rely on it
3602
- # to be always provided.
3603
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
3498
+ # correct. This field is set only for the top alternative. This field is not
3499
+ # guaranteed to be accurate and users should not rely on it to be always
3500
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
3501
+ # not set.
3604
3502
  # Corresponds to the JSON property `confidence`
3605
3503
  # @return [Float]
3606
3504
  attr_accessor :confidence
@@ -3611,8 +3509,8 @@ module Google
3611
3509
  attr_accessor :transcript
3612
3510
 
3613
3511
  # Output only. A list of word-specific information for each recognized word.
3614
- # Note: When `enable_speaker_diarization` is set to true, you will see all
3615
- # the words from the beginning of the audio.
3512
+ # Note: When `enable_speaker_diarization` is set to true, you will see all the
3513
+ # words from the beginning of the audio.
3616
3514
  # Corresponds to the JSON property `words`
3617
3515
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1WordInfo>]
3618
3516
  attr_accessor :words
@@ -3633,18 +3531,17 @@ module Google
3633
3531
  class GoogleCloudVideointelligenceV1p2beta1SpeechTranscription
3634
3532
  include Google::Apis::Core::Hashable
3635
3533
 
3636
- # May contain one or more recognition hypotheses (up to the maximum specified
3637
- # in `max_alternatives`). These alternatives are ordered in terms of
3638
- # accuracy, with the top (first) alternative being the most probable, as
3639
- # ranked by the recognizer.
3534
+ # May contain one or more recognition hypotheses (up to the maximum specified in
3535
+ # `max_alternatives`). These alternatives are ordered in terms of accuracy, with
3536
+ # the top (first) alternative being the most probable, as ranked by the
3537
+ # recognizer.
3640
3538
  # Corresponds to the JSON property `alternatives`
3641
3539
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1SpeechRecognitionAlternative>]
3642
3540
  attr_accessor :alternatives
3643
3541
 
3644
3542
  # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
3645
- # language tag of
3646
- # the language in this result. This language code was detected to have the
3647
- # most likelihood of being spoken in the audio.
3543
+ # language tag of the language in this result. This language code was detected
3544
+ # to have the most likelihood of being spoken in the audio.
3648
3545
  # Corresponds to the JSON property `languageCode`
3649
3546
  # @return [String]
3650
3547
  attr_accessor :language_code
@@ -3693,27 +3590,19 @@ module Google
3693
3590
  end
3694
3591
  end
3695
3592
 
3696
- # Video frame level annotation results for text annotation (OCR).
3697
- # Contains information regarding timestamp and bounding box locations for the
3698
- # frames containing detected OCR text snippets.
3593
+ # Video frame level annotation results for text annotation (OCR). Contains
3594
+ # information regarding timestamp and bounding box locations for the frames
3595
+ # containing detected OCR text snippets.
3699
3596
  class GoogleCloudVideointelligenceV1p2beta1TextFrame
3700
3597
  include Google::Apis::Core::Hashable
3701
3598
 
3702
3599
  # Normalized bounding polygon for text (that might not be aligned with axis).
3703
- # Contains list of the corner points in clockwise order starting from
3704
- # top-left corner. For example, for a rectangular bounding box:
3705
- # When the text is horizontal it might look like:
3706
- # 0----1
3707
- # | |
3708
- # 3----2
3709
- # When it's clockwise rotated 180 degrees around the top-left corner it
3710
- # becomes:
3711
- # 2----3
3712
- # | |
3713
- # 1----0
3714
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
3715
- # than 0, or greater than 1 due to trignometric calculations for location of
3716
- # the box.
3600
+ # Contains list of the corner points in clockwise order starting from top-left
3601
+ # corner. For example, for a rectangular bounding box: When the text is
3602
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
3603
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
3604
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
3605
+ # or greater than 1 due to trignometric calculations for location of the box.
3717
3606
  # Corresponds to the JSON property `rotatedBoundingBox`
3718
3607
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingPoly]
3719
3608
  attr_accessor :rotated_bounding_box
@@ -3766,9 +3655,8 @@ module Google
3766
3655
  end
3767
3656
  end
3768
3657
 
3769
- # For tracking related features.
3770
- # An object at time_offset with attributes, and located with
3771
- # normalized_bounding_box.
3658
+ # For tracking related features. An object at time_offset with attributes, and
3659
+ # located with normalized_bounding_box.
3772
3660
  class GoogleCloudVideointelligenceV1p2beta1TimestampedObject
3773
3661
  include Google::Apis::Core::Hashable
3774
3662
 
@@ -3782,15 +3670,14 @@ module Google
3782
3670
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1DetectedLandmark>]
3783
3671
  attr_accessor :landmarks
3784
3672
 
3785
- # Normalized bounding box.
3786
- # The normalized vertex coordinates are relative to the original image.
3787
- # Range: [0, 1].
3673
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
3674
+ # original image. Range: [0, 1].
3788
3675
  # Corresponds to the JSON property `normalizedBoundingBox`
3789
3676
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox]
3790
3677
  attr_accessor :normalized_bounding_box
3791
3678
 
3792
- # Time-offset, relative to the beginning of the video,
3793
- # corresponding to the video frame for this object.
3679
+ # Time-offset, relative to the beginning of the video, corresponding to the
3680
+ # video frame for this object.
3794
3681
  # Corresponds to the JSON property `timeOffset`
3795
3682
  # @return [String]
3796
3683
  attr_accessor :time_offset
@@ -3849,20 +3736,19 @@ module Google
3849
3736
  class GoogleCloudVideointelligenceV1p2beta1VideoAnnotationProgress
3850
3737
  include Google::Apis::Core::Hashable
3851
3738
 
3852
- # Specifies which feature is being tracked if the request contains more than
3853
- # one feature.
3739
+ # Specifies which feature is being tracked if the request contains more than one
3740
+ # feature.
3854
3741
  # Corresponds to the JSON property `feature`
3855
3742
  # @return [String]
3856
3743
  attr_accessor :feature
3857
3744
 
3858
- # Video file location in
3859
- # [Cloud Storage](https://cloud.google.com/storage/).
3745
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
3860
3746
  # Corresponds to the JSON property `inputUri`
3861
3747
  # @return [String]
3862
3748
  attr_accessor :input_uri
3863
3749
 
3864
- # Approximate percentage processed thus far. Guaranteed to be
3865
- # 100 when fully processed.
3750
+ # Approximate percentage processed thus far. Guaranteed to be 100 when fully
3751
+ # processed.
3866
3752
  # Corresponds to the JSON property `progressPercent`
3867
3753
  # @return [Fixnum]
3868
3754
  attr_accessor :progress_percent
@@ -3901,31 +3787,30 @@ module Google
3901
3787
  class GoogleCloudVideointelligenceV1p2beta1VideoAnnotationResults
3902
3788
  include Google::Apis::Core::Hashable
3903
3789
 
3904
- # The `Status` type defines a logical error model that is suitable for
3905
- # different programming environments, including REST APIs and RPC APIs. It is
3906
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
3907
- # three pieces of data: error code, error message, and error details.
3908
- # You can find out more about this error model and how to work with it in the
3909
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
3790
+ # The `Status` type defines a logical error model that is suitable for different
3791
+ # programming environments, including REST APIs and RPC APIs. It is used by [
3792
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
3793
+ # data: error code, error message, and error details. You can find out more
3794
+ # about this error model and how to work with it in the [API Design Guide](https:
3795
+ # //cloud.google.com/apis/design/errors).
3910
3796
  # Corresponds to the JSON property `error`
3911
3797
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleRpcStatus]
3912
3798
  attr_accessor :error
3913
3799
 
3914
- # Explicit content annotation (based on per-frame visual signals only).
3915
- # If no explicit content has been detected in a frame, no annotations are
3916
- # present for that frame.
3800
+ # Explicit content annotation (based on per-frame visual signals only). If no
3801
+ # explicit content has been detected in a frame, no annotations are present for
3802
+ # that frame.
3917
3803
  # Corresponds to the JSON property `explicitAnnotation`
3918
3804
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1ExplicitContentAnnotation]
3919
3805
  attr_accessor :explicit_annotation
3920
3806
 
3921
- # Label annotations on frame level.
3922
- # There is exactly one element for each unique label.
3807
+ # Label annotations on frame level. There is exactly one element for each unique
3808
+ # label.
3923
3809
  # Corresponds to the JSON property `frameLabelAnnotations`
3924
3810
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
3925
3811
  attr_accessor :frame_label_annotations
3926
3812
 
3927
- # Video file location in
3928
- # [Cloud Storage](https://cloud.google.com/storage/).
3813
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
3929
3814
  # Corresponds to the JSON property `inputUri`
3930
3815
  # @return [String]
3931
3816
  attr_accessor :input_uri
@@ -3952,11 +3837,11 @@ module Google
3952
3837
  attr_accessor :segment_label_annotations
3953
3838
 
3954
3839
  # Presence label annotations on video level or user-specified segment level.
3955
- # There is exactly one element for each unique label. Compared to the
3956
- # existing topical `segment_label_annotations`, this field presents more
3957
- # fine-grained, segment-level labels detected in video content and is made
3958
- # available only when the client sets `LabelDetectionConfig.model` to
3959
- # "builtin/latest" in the request.
3840
+ # There is exactly one element for each unique label. Compared to the existing
3841
+ # topical `segment_label_annotations`, this field presents more fine-grained,
3842
+ # segment-level labels detected in video content and is made available only when
3843
+ # the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
3844
+ # request.
3960
3845
  # Corresponds to the JSON property `segmentPresenceLabelAnnotations`
3961
3846
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
3962
3847
  attr_accessor :segment_presence_label_annotations
@@ -3966,17 +3851,17 @@ module Google
3966
3851
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1VideoSegment>]
3967
3852
  attr_accessor :shot_annotations
3968
3853
 
3969
- # Topical label annotations on shot level.
3970
- # There is exactly one element for each unique label.
3854
+ # Topical label annotations on shot level. There is exactly one element for each
3855
+ # unique label.
3971
3856
  # Corresponds to the JSON property `shotLabelAnnotations`
3972
3857
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
3973
3858
  attr_accessor :shot_label_annotations
3974
3859
 
3975
3860
  # Presence label annotations on shot level. There is exactly one element for
3976
- # each unique label. Compared to the existing topical
3977
- # `shot_label_annotations`, this field presents more fine-grained, shot-level
3978
- # labels detected in video content and is made available only when the client
3979
- # sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
3861
+ # each unique label. Compared to the existing topical `shot_label_annotations`,
3862
+ # this field presents more fine-grained, shot-level labels detected in video
3863
+ # content and is made available only when the client sets `LabelDetectionConfig.
3864
+ # model` to "builtin/latest" in the request.
3980
3865
  # Corresponds to the JSON property `shotPresenceLabelAnnotations`
3981
3866
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
3982
3867
  attr_accessor :shot_presence_label_annotations
@@ -3986,9 +3871,8 @@ module Google
3986
3871
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1SpeechTranscription>]
3987
3872
  attr_accessor :speech_transcriptions
3988
3873
 
3989
- # OCR text detection and tracking.
3990
- # Annotations for list of detected text snippets. Each will have list of
3991
- # frame information associated with it.
3874
+ # OCR text detection and tracking. Annotations for list of detected text
3875
+ # snippets. Each will have list of frame information associated with it.
3992
3876
  # Corresponds to the JSON property `textAnnotations`
3993
3877
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1TextAnnotation>]
3994
3878
  attr_accessor :text_annotations
@@ -4020,14 +3904,14 @@ module Google
4020
3904
  class GoogleCloudVideointelligenceV1p2beta1VideoSegment
4021
3905
  include Google::Apis::Core::Hashable
4022
3906
 
4023
- # Time-offset, relative to the beginning of the video,
4024
- # corresponding to the end of the segment (inclusive).
3907
+ # Time-offset, relative to the beginning of the video, corresponding to the end
3908
+ # of the segment (inclusive).
4025
3909
  # Corresponds to the JSON property `endTimeOffset`
4026
3910
  # @return [String]
4027
3911
  attr_accessor :end_time_offset
4028
3912
 
4029
- # Time-offset, relative to the beginning of the video,
4030
- # corresponding to the start of the segment (inclusive).
3913
+ # Time-offset, relative to the beginning of the video, corresponding to the
3914
+ # start of the segment (inclusive).
4031
3915
  # Corresponds to the JSON property `startTimeOffset`
4032
3916
  # @return [String]
4033
3917
  attr_accessor :start_time_offset
@@ -4044,41 +3928,41 @@ module Google
4044
3928
  end
4045
3929
 
4046
3930
  # Word-specific information for recognized words. Word information is only
4047
- # included in the response when certain request parameters are set, such
4048
- # as `enable_word_time_offsets`.
3931
+ # included in the response when certain request parameters are set, such as `
3932
+ # enable_word_time_offsets`.
4049
3933
  class GoogleCloudVideointelligenceV1p2beta1WordInfo
4050
3934
  include Google::Apis::Core::Hashable
4051
3935
 
4052
3936
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
4053
3937
  # indicates an estimated greater likelihood that the recognized words are
4054
- # correct. This field is set only for the top alternative.
4055
- # This field is not guaranteed to be accurate and users should not rely on it
4056
- # to be always provided.
4057
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
3938
+ # correct. This field is set only for the top alternative. This field is not
3939
+ # guaranteed to be accurate and users should not rely on it to be always
3940
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
3941
+ # not set.
4058
3942
  # Corresponds to the JSON property `confidence`
4059
3943
  # @return [Float]
4060
3944
  attr_accessor :confidence
4061
3945
 
4062
- # Time offset relative to the beginning of the audio, and
4063
- # corresponding to the end of the spoken word. This field is only set if
4064
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
4065
- # experimental feature and the accuracy of the time offset can vary.
3946
+ # Time offset relative to the beginning of the audio, and corresponding to the
3947
+ # end of the spoken word. This field is only set if `enable_word_time_offsets=
3948
+ # true` and only in the top hypothesis. This is an experimental feature and the
3949
+ # accuracy of the time offset can vary.
4066
3950
  # Corresponds to the JSON property `endTime`
4067
3951
  # @return [String]
4068
3952
  attr_accessor :end_time
4069
3953
 
4070
- # Output only. A distinct integer value is assigned for every speaker within
4071
- # the audio. This field specifies which one of those speakers was detected to
4072
- # have spoken this word. Value ranges from 1 up to diarization_speaker_count,
4073
- # and is only set if speaker diarization is enabled.
3954
+ # Output only. A distinct integer value is assigned for every speaker within the
3955
+ # audio. This field specifies which one of those speakers was detected to have
3956
+ # spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
3957
+ # only set if speaker diarization is enabled.
4074
3958
  # Corresponds to the JSON property `speakerTag`
4075
3959
  # @return [Fixnum]
4076
3960
  attr_accessor :speaker_tag
4077
3961
 
4078
- # Time offset relative to the beginning of the audio, and
4079
- # corresponding to the start of the spoken word. This field is only set if
4080
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
4081
- # experimental feature and the accuracy of the time offset can vary.
3962
+ # Time offset relative to the beginning of the audio, and corresponding to the
3963
+ # start of the spoken word. This field is only set if `enable_word_time_offsets=
3964
+ # true` and only in the top hypothesis. This is an experimental feature and the
3965
+ # accuracy of the time offset can vary.
4082
3966
  # Corresponds to the JSON property `startTime`
4083
3967
  # @return [String]
4084
3968
  attr_accessor :start_time
@@ -4102,9 +3986,9 @@ module Google
4102
3986
  end
4103
3987
  end
4104
3988
 
4105
- # Video annotation progress. Included in the `metadata`
4106
- # field of the `Operation` returned by the `GetOperation`
4107
- # call of the `google::longrunning::Operations` service.
3989
+ # Video annotation progress. Included in the `metadata` field of the `Operation`
3990
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
3991
+ # service.
4108
3992
  class GoogleCloudVideointelligenceV1p3beta1AnnotateVideoProgress
4109
3993
  include Google::Apis::Core::Hashable
4110
3994
 
@@ -4132,24 +4016,22 @@ module Google
4132
4016
  # @return [Array<String>]
4133
4017
  attr_accessor :features
4134
4018
 
4135
- # The video data bytes.
4136
- # If unset, the input video(s) should be specified via the `input_uri`.
4137
- # If set, `input_uri` must be unset.
4019
+ # The video data bytes. If unset, the input video(s) should be specified via the
4020
+ # `input_uri`. If set, `input_uri` must be unset.
4138
4021
  # Corresponds to the JSON property `inputContent`
4139
4022
  # NOTE: Values are automatically base64 encoded/decoded in the client library.
4140
4023
  # @return [String]
4141
4024
  attr_accessor :input_content
4142
4025
 
4143
- # Input video location. Currently, only
4144
- # [Cloud Storage](https://cloud.google.com/storage/) URIs are
4145
- # supported. URIs must be specified in the following format:
4146
- # `gs://bucket-id/object-id` (other URI formats return
4147
- # google.rpc.Code.INVALID_ARGUMENT). For more information, see
4148
- # [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
4149
- # To identify multiple videos, a video URI may include wildcards in the
4150
- # `object-id`. Supported wildcards: '*' to match 0 or more characters;
4151
- # '?' to match 1 character. If unset, the input video should be embedded
4152
- # in the request as `input_content`. If set, `input_content` must be unset.
4026
+ # Input video location. Currently, only [Cloud Storage](https://cloud.google.com/
4027
+ # storage/) URIs are supported. URIs must be specified in the following format: `
4028
+ # gs://bucket-id/object-id` (other URI formats return google.rpc.Code.
4029
+ # INVALID_ARGUMENT). For more information, see [Request URIs](https://cloud.
4030
+ # google.com/storage/docs/request-endpoints). To identify multiple videos, a
4031
+ # video URI may include wildcards in the `object-id`. Supported wildcards: '*'
4032
+ # to match 0 or more characters; '?' to match 1 character. If unset, the input
4033
+ # video should be embedded in the request as `input_content`. If set, `
4034
+ # input_content` must be unset.
4153
4035
  # Corresponds to the JSON property `inputUri`
4154
4036
  # @return [String]
4155
4037
  attr_accessor :input_uri
@@ -4163,11 +4045,11 @@ module Google
4163
4045
  attr_accessor :location_id
4164
4046
 
4165
4047
  # Optional. Location where the output (in JSON format) should be stored.
4166
- # Currently, only [Cloud Storage](https://cloud.google.com/storage/)
4167
- # URIs are supported. These must be specified in the following format:
4168
- # `gs://bucket-id/object-id` (other URI formats return
4169
- # google.rpc.Code.INVALID_ARGUMENT). For more information, see
4170
- # [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
4048
+ # Currently, only [Cloud Storage](https://cloud.google.com/storage/) URIs are
4049
+ # supported. These must be specified in the following format: `gs://bucket-id/
4050
+ # object-id` (other URI formats return google.rpc.Code.INVALID_ARGUMENT). For
4051
+ # more information, see [Request URIs](https://cloud.google.com/storage/docs/
4052
+ # request-endpoints).
4171
4053
  # Corresponds to the JSON property `outputUri`
4172
4054
  # @return [String]
4173
4055
  attr_accessor :output_uri
@@ -4192,9 +4074,9 @@ module Google
4192
4074
  end
4193
4075
  end
4194
4076
 
4195
- # Video annotation response. Included in the `response`
4196
- # field of the `Operation` returned by the `GetOperation`
4197
- # call of the `google::longrunning::Operations` service.
4077
+ # Video annotation response. Included in the `response` field of the `Operation`
4078
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
4079
+ # service.
4198
4080
  class GoogleCloudVideointelligenceV1p3beta1AnnotateVideoResponse
4199
4081
  include Google::Apis::Core::Hashable
4200
4082
 
@@ -4228,10 +4110,9 @@ module Google
4228
4110
  # @return [String]
4229
4111
  attr_accessor :display_name
4230
4112
 
4231
- # The resource name of the celebrity. Have the format
4232
- # `video-intelligence/kg-mid` indicates a celebrity from preloaded gallery.
4233
- # kg-mid is the id in Google knowledge graph, which is unique for the
4234
- # celebrity.
4113
+ # The resource name of the celebrity. Have the format `video-intelligence/kg-mid`
4114
+ # indicates a celebrity from preloaded gallery. kg-mid is the id in Google
4115
+ # knowledge graph, which is unique for the celebrity.
4235
4116
  # Corresponds to the JSON property `name`
4236
4117
  # @return [String]
4237
4118
  attr_accessor :name
@@ -4252,8 +4133,8 @@ module Google
4252
4133
  class GoogleCloudVideointelligenceV1p3beta1CelebrityRecognitionAnnotation
4253
4134
  include Google::Apis::Core::Hashable
4254
4135
 
4255
- # The tracks detected from the input video, including recognized celebrities
4256
- # and other detected faces in the video.
4136
+ # The tracks detected from the input video, including recognized celebrities and
4137
+ # other detected faces in the video.
4257
4138
  # Corresponds to the JSON property `celebrityTracks`
4258
4139
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1CelebrityTrack>]
4259
4140
  attr_accessor :celebrity_tracks
@@ -4309,14 +4190,14 @@ module Google
4309
4190
  # @return [Float]
4310
4191
  attr_accessor :confidence
4311
4192
 
4312
- # The name of the attribute, for example, glasses, dark_glasses, mouth_open.
4313
- # A full list of supported type names will be provided in the document.
4193
+ # The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
4194
+ # full list of supported type names will be provided in the document.
4314
4195
  # Corresponds to the JSON property `name`
4315
4196
  # @return [String]
4316
4197
  attr_accessor :name
4317
4198
 
4318
- # Text value of the detection result. For example, the value for "HairColor"
4319
- # can be "black", "blonde", etc.
4199
+ # Text value of the detection result. For example, the value for "HairColor" can
4200
+ # be "black", "blonde", etc.
4320
4201
  # Corresponds to the JSON property `value`
4321
4202
  # @return [String]
4322
4203
  attr_accessor :value
@@ -4348,9 +4229,8 @@ module Google
4348
4229
  # @return [String]
4349
4230
  attr_accessor :name
4350
4231
 
4351
- # A vertex represents a 2D point in the image.
4352
- # NOTE: the normalized vertex coordinates are relative to the original image
4353
- # and range from 0 to 1.
4232
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
4233
+ # coordinates are relative to the original image and range from 0 to 1.
4354
4234
  # Corresponds to the JSON property `point`
4355
4235
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1NormalizedVertex]
4356
4236
  attr_accessor :point
@@ -4376,8 +4256,7 @@ module Google
4376
4256
  # @return [String]
4377
4257
  attr_accessor :description
4378
4258
 
4379
- # Opaque entity ID. Some IDs may be available in
4380
- # [Google Knowledge Graph Search
4259
+ # Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
4381
4260
  # API](https://developers.google.com/knowledge-graph/).
4382
4261
  # Corresponds to the JSON property `entityId`
4383
4262
  # @return [String]
@@ -4400,9 +4279,9 @@ module Google
4400
4279
  end
4401
4280
  end
4402
4281
 
4403
- # Explicit content annotation (based on per-frame visual signals only).
4404
- # If no explicit content has been detected in a frame, no annotations are
4405
- # present for that frame.
4282
+ # Explicit content annotation (based on per-frame visual signals only). If no
4283
+ # explicit content has been detected in a frame, no annotations are present for
4284
+ # that frame.
4406
4285
  class GoogleCloudVideointelligenceV1p3beta1ExplicitContentAnnotation
4407
4286
  include Google::Apis::Core::Hashable
4408
4287
 
@@ -4431,9 +4310,8 @@ module Google
4431
4310
  class GoogleCloudVideointelligenceV1p3beta1ExplicitContentDetectionConfig
4432
4311
  include Google::Apis::Core::Hashable
4433
4312
 
4434
- # Model to use for explicit content detection.
4435
- # Supported values: "builtin/stable" (the default if unset) and
4436
- # "builtin/latest".
4313
+ # Model to use for explicit content detection. Supported values: "builtin/stable"
4314
+ # (the default if unset) and "builtin/latest".
4437
4315
  # Corresponds to the JSON property `model`
4438
4316
  # @return [String]
4439
4317
  attr_accessor :model
@@ -4523,9 +4401,8 @@ module Google
4523
4401
  attr_accessor :include_bounding_boxes
4524
4402
  alias_method :include_bounding_boxes?, :include_bounding_boxes
4525
4403
 
4526
- # Model to use for face detection.
4527
- # Supported values: "builtin/stable" (the default if unset) and
4528
- # "builtin/latest".
4404
+ # Model to use for face detection. Supported values: "builtin/stable" (the
4405
+ # default if unset) and "builtin/latest".
4529
4406
  # Corresponds to the JSON property `model`
4530
4407
  # @return [String]
4531
4408
  attr_accessor :model
@@ -4546,10 +4423,9 @@ module Google
4546
4423
  class GoogleCloudVideointelligenceV1p3beta1LabelAnnotation
4547
4424
  include Google::Apis::Core::Hashable
4548
4425
 
4549
- # Common categories for the detected entity.
4550
- # For example, when the label is `Terrier`, the category is likely `dog`. And
4551
- # in some cases there might be more than one categories e.g., `Terrier` could
4552
- # also be a `pet`.
4426
+ # Common categories for the detected entity. For example, when the label is `
4427
+ # Terrier`, the category is likely `dog`. And in some cases there might be more
4428
+ # than one categories e.g., `Terrier` could also be a `pet`.
4553
4429
  # Corresponds to the JSON property `categoryEntities`
4554
4430
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1Entity>]
4555
4431
  attr_accessor :category_entities
@@ -4592,44 +4468,40 @@ module Google
4592
4468
  class GoogleCloudVideointelligenceV1p3beta1LabelDetectionConfig
4593
4469
  include Google::Apis::Core::Hashable
4594
4470
 
4595
- # The confidence threshold we perform filtering on the labels from
4596
- # frame-level detection. If not set, it is set to 0.4 by default. The valid
4597
- # range for this threshold is [0.1, 0.9]. Any value set outside of this
4598
- # range will be clipped.
4599
- # Note: For best results, follow the default threshold. We will update
4600
- # the default threshold everytime when we release a new model.
4471
+ # The confidence threshold we perform filtering on the labels from frame-level
4472
+ # detection. If not set, it is set to 0.4 by default. The valid range for this
4473
+ # threshold is [0.1, 0.9]. Any value set outside of this range will be clipped.
4474
+ # Note: For best results, follow the default threshold. We will update the
4475
+ # default threshold everytime when we release a new model.
4601
4476
  # Corresponds to the JSON property `frameConfidenceThreshold`
4602
4477
  # @return [Float]
4603
4478
  attr_accessor :frame_confidence_threshold
4604
4479
 
4605
- # What labels should be detected with LABEL_DETECTION, in addition to
4606
- # video-level labels or segment-level labels.
4607
- # If unspecified, defaults to `SHOT_MODE`.
4480
+ # What labels should be detected with LABEL_DETECTION, in addition to video-
4481
+ # level labels or segment-level labels. If unspecified, defaults to `SHOT_MODE`.
4608
4482
  # Corresponds to the JSON property `labelDetectionMode`
4609
4483
  # @return [String]
4610
4484
  attr_accessor :label_detection_mode
4611
4485
 
4612
- # Model to use for label detection.
4613
- # Supported values: "builtin/stable" (the default if unset) and
4614
- # "builtin/latest".
4486
+ # Model to use for label detection. Supported values: "builtin/stable" (the
4487
+ # default if unset) and "builtin/latest".
4615
4488
  # Corresponds to the JSON property `model`
4616
4489
  # @return [String]
4617
4490
  attr_accessor :model
4618
4491
 
4619
- # Whether the video has been shot from a stationary (i.e., non-moving)
4620
- # camera. When set to true, might improve detection accuracy for moving
4621
- # objects. Should be used with `SHOT_AND_FRAME_MODE` enabled.
4492
+ # Whether the video has been shot from a stationary (i.e., non-moving) camera.
4493
+ # When set to true, might improve detection accuracy for moving objects. Should
4494
+ # be used with `SHOT_AND_FRAME_MODE` enabled.
4622
4495
  # Corresponds to the JSON property `stationaryCamera`
4623
4496
  # @return [Boolean]
4624
4497
  attr_accessor :stationary_camera
4625
4498
  alias_method :stationary_camera?, :stationary_camera
4626
4499
 
4627
- # The confidence threshold we perform filtering on the labels from
4628
- # video-level and shot-level detections. If not set, it's set to 0.3 by
4629
- # default. The valid range for this threshold is [0.1, 0.9]. Any value set
4630
- # outside of this range will be clipped.
4631
- # Note: For best results, follow the default threshold. We will update
4632
- # the default threshold everytime when we release a new model.
4500
+ # The confidence threshold we perform filtering on the labels from video-level
4501
+ # and shot-level detections. If not set, it's set to 0.3 by default. The valid
4502
+ # range for this threshold is [0.1, 0.9]. Any value set outside of this range
4503
+ # will be clipped. Note: For best results, follow the default threshold. We will
4504
+ # update the default threshold everytime when we release a new model.
4633
4505
  # Corresponds to the JSON property `videoConfidenceThreshold`
4634
4506
  # @return [Float]
4635
4507
  attr_accessor :video_confidence_threshold
@@ -4708,14 +4580,14 @@ module Google
4708
4580
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1Entity]
4709
4581
  attr_accessor :entity
4710
4582
 
4711
- # All video segments where the recognized logo appears. There might be
4712
- # multiple instances of the same logo class appearing in one VideoSegment.
4583
+ # All video segments where the recognized logo appears. There might be multiple
4584
+ # instances of the same logo class appearing in one VideoSegment.
4713
4585
  # Corresponds to the JSON property `segments`
4714
4586
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1VideoSegment>]
4715
4587
  attr_accessor :segments
4716
4588
 
4717
- # All logo tracks where the recognized logo appears. Each track corresponds
4718
- # to one logo instance appearing in consecutive frames.
4589
+ # All logo tracks where the recognized logo appears. Each track corresponds to
4590
+ # one logo instance appearing in consecutive frames.
4719
4591
  # Corresponds to the JSON property `tracks`
4720
4592
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1Track>]
4721
4593
  attr_accessor :tracks
@@ -4732,9 +4604,8 @@ module Google
4732
4604
  end
4733
4605
  end
4734
4606
 
4735
- # Normalized bounding box.
4736
- # The normalized vertex coordinates are relative to the original image.
4737
- # Range: [0, 1].
4607
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
4608
+ # original image. Range: [0, 1].
4738
4609
  class GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox
4739
4610
  include Google::Apis::Core::Hashable
4740
4611
 
@@ -4772,20 +4643,12 @@ module Google
4772
4643
  end
4773
4644
 
4774
4645
  # Normalized bounding polygon for text (that might not be aligned with axis).
4775
- # Contains list of the corner points in clockwise order starting from
4776
- # top-left corner. For example, for a rectangular bounding box:
4777
- # When the text is horizontal it might look like:
4778
- # 0----1
4779
- # | |
4780
- # 3----2
4781
- # When it's clockwise rotated 180 degrees around the top-left corner it
4782
- # becomes:
4783
- # 2----3
4784
- # | |
4785
- # 1----0
4786
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
4787
- # than 0, or greater than 1 due to trignometric calculations for location of
4788
- # the box.
4646
+ # Contains list of the corner points in clockwise order starting from top-left
4647
+ # corner. For example, for a rectangular bounding box: When the text is
4648
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
4649
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
4650
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
4651
+ # or greater than 1 due to trignometric calculations for location of the box.
4789
4652
  class GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingPoly
4790
4653
  include Google::Apis::Core::Hashable
4791
4654
 
@@ -4804,9 +4667,8 @@ module Google
4804
4667
  end
4805
4668
  end
4806
4669
 
4807
- # A vertex represents a 2D point in the image.
4808
- # NOTE: the normalized vertex coordinates are relative to the original image
4809
- # and range from 0 to 1.
4670
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
4671
+ # coordinates are relative to the original image and range from 0 to 1.
4810
4672
  class GoogleCloudVideointelligenceV1p3beta1NormalizedVertex
4811
4673
  include Google::Apis::Core::Hashable
4812
4674
 
@@ -4845,10 +4707,10 @@ module Google
4845
4707
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1Entity]
4846
4708
  attr_accessor :entity
4847
4709
 
4848
- # Information corresponding to all frames where this object track appears.
4849
- # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
4850
- # messages in frames.
4851
- # Streaming mode: it can only be one ObjectTrackingFrame message in frames.
4710
+ # Information corresponding to all frames where this object track appears. Non-
4711
+ # streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
4712
+ # in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
4713
+ # frames.
4852
4714
  # Corresponds to the JSON property `frames`
4853
4715
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1ObjectTrackingFrame>]
4854
4716
  attr_accessor :frames
@@ -4858,12 +4720,11 @@ module Google
4858
4720
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1VideoSegment]
4859
4721
  attr_accessor :segment
4860
4722
 
4861
- # Streaming mode ONLY.
4862
- # In streaming mode, we do not know the end time of a tracked object
4863
- # before it is completed. Hence, there is no VideoSegment info returned.
4864
- # Instead, we provide a unique identifiable integer track_id so that
4865
- # the customers can correlate the results of the ongoing
4866
- # ObjectTrackAnnotation of the same track_id over time.
4723
+ # Streaming mode ONLY. In streaming mode, we do not know the end time of a
4724
+ # tracked object before it is completed. Hence, there is no VideoSegment info
4725
+ # returned. Instead, we provide a unique identifiable integer track_id so that
4726
+ # the customers can correlate the results of the ongoing ObjectTrackAnnotation
4727
+ # of the same track_id over time.
4867
4728
  # Corresponds to the JSON property `trackId`
4868
4729
  # @return [Fixnum]
4869
4730
  attr_accessor :track_id
@@ -4892,9 +4753,8 @@ module Google
4892
4753
  class GoogleCloudVideointelligenceV1p3beta1ObjectTrackingConfig
4893
4754
  include Google::Apis::Core::Hashable
4894
4755
 
4895
- # Model to use for object tracking.
4896
- # Supported values: "builtin/stable" (the default if unset) and
4897
- # "builtin/latest".
4756
+ # Model to use for object tracking. Supported values: "builtin/stable" (the
4757
+ # default if unset) and "builtin/latest".
4898
4758
  # Corresponds to the JSON property `model`
4899
4759
  # @return [String]
4900
4760
  attr_accessor :model
@@ -4914,9 +4774,8 @@ module Google
4914
4774
  class GoogleCloudVideointelligenceV1p3beta1ObjectTrackingFrame
4915
4775
  include Google::Apis::Core::Hashable
4916
4776
 
4917
- # Normalized bounding box.
4918
- # The normalized vertex coordinates are relative to the original image.
4919
- # Range: [0, 1].
4777
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
4778
+ # original image. Range: [0, 1].
4920
4779
  # Corresponds to the JSON property `normalizedBoundingBox`
4921
4780
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox]
4922
4781
  attr_accessor :normalized_bounding_box
@@ -4967,23 +4826,21 @@ module Google
4967
4826
  include Google::Apis::Core::Hashable
4968
4827
 
4969
4828
  # Whether to enable person attributes detection, such as cloth color (black,
4970
- # blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair,
4971
- # etc.
4829
+ # blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair, etc.
4972
4830
  # Ignored if 'include_bounding_boxes' is set to false.
4973
4831
  # Corresponds to the JSON property `includeAttributes`
4974
4832
  # @return [Boolean]
4975
4833
  attr_accessor :include_attributes
4976
4834
  alias_method :include_attributes?, :include_attributes
4977
4835
 
4978
- # Whether bounding boxes are included in the person detection annotation
4979
- # output.
4836
+ # Whether bounding boxes are included in the person detection annotation output.
4980
4837
  # Corresponds to the JSON property `includeBoundingBoxes`
4981
4838
  # @return [Boolean]
4982
4839
  attr_accessor :include_bounding_boxes
4983
4840
  alias_method :include_bounding_boxes?, :include_bounding_boxes
4984
4841
 
4985
- # Whether to enable pose landmarks detection. Ignored if
4986
- # 'include_bounding_boxes' is set to false.
4842
+ # Whether to enable pose landmarks detection. Ignored if 'include_bounding_boxes'
4843
+ # is set to false.
4987
4844
  # Corresponds to the JSON property `includePoseLandmarks`
4988
4845
  # @return [Boolean]
4989
4846
  attr_accessor :include_pose_landmarks
@@ -5030,9 +4887,8 @@ module Google
5030
4887
  class GoogleCloudVideointelligenceV1p3beta1ShotChangeDetectionConfig
5031
4888
  include Google::Apis::Core::Hashable
5032
4889
 
5033
- # Model to use for shot change detection.
5034
- # Supported values: "builtin/stable" (the default if unset) and
5035
- # "builtin/latest".
4890
+ # Model to use for shot change detection. Supported values: "builtin/stable" (
4891
+ # the default if unset) and "builtin/latest".
5036
4892
  # Corresponds to the JSON property `model`
5037
4893
  # @return [String]
5038
4894
  attr_accessor :model
@@ -5052,12 +4908,12 @@ module Google
5052
4908
  class GoogleCloudVideointelligenceV1p3beta1SpeechContext
5053
4909
  include Google::Apis::Core::Hashable
5054
4910
 
5055
- # Optional. A list of strings containing words and phrases "hints" so that
5056
- # the speech recognition is more likely to recognize them. This can be used
5057
- # to improve the accuracy for specific words and phrases, for example, if
5058
- # specific commands are typically spoken by the user. This can also be used
5059
- # to add additional words to the vocabulary of the recognizer. See
5060
- # [usage limits](https://cloud.google.com/speech/limits#content).
4911
+ # Optional. A list of strings containing words and phrases "hints" so that the
4912
+ # speech recognition is more likely to recognize them. This can be used to
4913
+ # improve the accuracy for specific words and phrases, for example, if specific
4914
+ # commands are typically spoken by the user. This can also be used to add
4915
+ # additional words to the vocabulary of the recognizer. See [usage limits](https:
4916
+ # //cloud.google.com/speech/limits#content).
5061
4917
  # Corresponds to the JSON property `phrases`
5062
4918
  # @return [Array<String>]
5063
4919
  attr_accessor :phrases
@@ -5078,10 +4934,10 @@ module Google
5078
4934
 
5079
4935
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
5080
4936
  # indicates an estimated greater likelihood that the recognized words are
5081
- # correct. This field is set only for the top alternative.
5082
- # This field is not guaranteed to be accurate and users should not rely on it
5083
- # to be always provided.
5084
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
4937
+ # correct. This field is set only for the top alternative. This field is not
4938
+ # guaranteed to be accurate and users should not rely on it to be always
4939
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
4940
+ # not set.
5085
4941
  # Corresponds to the JSON property `confidence`
5086
4942
  # @return [Float]
5087
4943
  attr_accessor :confidence
@@ -5092,8 +4948,8 @@ module Google
5092
4948
  attr_accessor :transcript
5093
4949
 
5094
4950
  # Output only. A list of word-specific information for each recognized word.
5095
- # Note: When `enable_speaker_diarization` is set to true, you will see all
5096
- # the words from the beginning of the audio.
4951
+ # Note: When `enable_speaker_diarization` is set to true, you will see all the
4952
+ # words from the beginning of the audio.
5097
4953
  # Corresponds to the JSON property `words`
5098
4954
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1WordInfo>]
5099
4955
  attr_accessor :words
@@ -5114,18 +4970,17 @@ module Google
5114
4970
  class GoogleCloudVideointelligenceV1p3beta1SpeechTranscription
5115
4971
  include Google::Apis::Core::Hashable
5116
4972
 
5117
- # May contain one or more recognition hypotheses (up to the maximum specified
5118
- # in `max_alternatives`). These alternatives are ordered in terms of
5119
- # accuracy, with the top (first) alternative being the most probable, as
5120
- # ranked by the recognizer.
4973
+ # May contain one or more recognition hypotheses (up to the maximum specified in
4974
+ # `max_alternatives`). These alternatives are ordered in terms of accuracy, with
4975
+ # the top (first) alternative being the most probable, as ranked by the
4976
+ # recognizer.
5121
4977
  # Corresponds to the JSON property `alternatives`
5122
4978
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1SpeechRecognitionAlternative>]
5123
4979
  attr_accessor :alternatives
5124
4980
 
5125
4981
  # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
5126
- # language tag of
5127
- # the language in this result. This language code was detected to have the
5128
- # most likelihood of being spoken in the audio.
4982
+ # language tag of the language in this result. This language code was detected
4983
+ # to have the most likelihood of being spoken in the audio.
5129
4984
  # Corresponds to the JSON property `languageCode`
5130
4985
  # @return [String]
5131
4986
  attr_accessor :language_code
@@ -5152,66 +5007,62 @@ module Google
5152
5007
  attr_accessor :audio_tracks
5153
5008
 
5154
5009
  # Optional. If set, specifies the estimated number of speakers in the
5155
- # conversation.
5156
- # If not set, defaults to '2'.
5157
- # Ignored unless enable_speaker_diarization is set to true.
5010
+ # conversation. If not set, defaults to '2'. Ignored unless
5011
+ # enable_speaker_diarization is set to true.
5158
5012
  # Corresponds to the JSON property `diarizationSpeakerCount`
5159
5013
  # @return [Fixnum]
5160
5014
  attr_accessor :diarization_speaker_count
5161
5015
 
5162
- # Optional. If 'true', adds punctuation to recognition result hypotheses.
5163
- # This feature is only available in select languages. Setting this for
5164
- # requests in other languages has no effect at all. The default 'false' value
5165
- # does not add punctuation to result hypotheses. NOTE: "This is currently
5166
- # offered as an experimental service, complimentary to all users. In the
5167
- # future this may be exclusively available as a premium feature."
5016
+ # Optional. If 'true', adds punctuation to recognition result hypotheses. This
5017
+ # feature is only available in select languages. Setting this for requests in
5018
+ # other languages has no effect at all. The default 'false' value does not add
5019
+ # punctuation to result hypotheses. NOTE: "This is currently offered as an
5020
+ # experimental service, complimentary to all users. In the future this may be
5021
+ # exclusively available as a premium feature."
5168
5022
  # Corresponds to the JSON property `enableAutomaticPunctuation`
5169
5023
  # @return [Boolean]
5170
5024
  attr_accessor :enable_automatic_punctuation
5171
5025
  alias_method :enable_automatic_punctuation?, :enable_automatic_punctuation
5172
5026
 
5173
- # Optional. If 'true', enables speaker detection for each recognized word in
5174
- # the top alternative of the recognition result using a speaker_tag provided
5175
- # in the WordInfo.
5176
- # Note: When this is true, we send all the words from the beginning of the
5177
- # audio for the top alternative in every consecutive response.
5178
- # This is done in order to improve our speaker tags as our models learn to
5179
- # identify the speakers in the conversation over time.
5027
+ # Optional. If 'true', enables speaker detection for each recognized word in the
5028
+ # top alternative of the recognition result using a speaker_tag provided in the
5029
+ # WordInfo. Note: When this is true, we send all the words from the beginning of
5030
+ # the audio for the top alternative in every consecutive response. This is done
5031
+ # in order to improve our speaker tags as our models learn to identify the
5032
+ # speakers in the conversation over time.
5180
5033
  # Corresponds to the JSON property `enableSpeakerDiarization`
5181
5034
  # @return [Boolean]
5182
5035
  attr_accessor :enable_speaker_diarization
5183
5036
  alias_method :enable_speaker_diarization?, :enable_speaker_diarization
5184
5037
 
5185
5038
  # Optional. If `true`, the top result includes a list of words and the
5186
- # confidence for those words. If `false`, no word-level confidence
5187
- # information is returned. The default is `false`.
5039
+ # confidence for those words. If `false`, no word-level confidence information
5040
+ # is returned. The default is `false`.
5188
5041
  # Corresponds to the JSON property `enableWordConfidence`
5189
5042
  # @return [Boolean]
5190
5043
  attr_accessor :enable_word_confidence
5191
5044
  alias_method :enable_word_confidence?, :enable_word_confidence
5192
5045
 
5193
- # Optional. If set to `true`, the server will attempt to filter out
5194
- # profanities, replacing all but the initial character in each filtered word
5195
- # with asterisks, e.g. "f***". If set to `false` or omitted, profanities
5196
- # won't be filtered out.
5046
+ # Optional. If set to `true`, the server will attempt to filter out profanities,
5047
+ # replacing all but the initial character in each filtered word with asterisks,
5048
+ # e.g. "f***". If set to `false` or omitted, profanities won't be filtered out.
5197
5049
  # Corresponds to the JSON property `filterProfanity`
5198
5050
  # @return [Boolean]
5199
5051
  attr_accessor :filter_profanity
5200
5052
  alias_method :filter_profanity?, :filter_profanity
5201
5053
 
5202
- # Required. *Required* The language of the supplied audio as a
5203
- # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
5204
- # Example: "en-US".
5205
- # See [Language Support](https://cloud.google.com/speech/docs/languages)
5206
- # for a list of the currently supported language codes.
5054
+ # Required. *Required* The language of the supplied audio as a [BCP-47](https://
5055
+ # www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. Example: "en-US". See [
5056
+ # Language Support](https://cloud.google.com/speech/docs/languages) for a list
5057
+ # of the currently supported language codes.
5207
5058
  # Corresponds to the JSON property `languageCode`
5208
5059
  # @return [String]
5209
5060
  attr_accessor :language_code
5210
5061
 
5211
5062
  # Optional. Maximum number of recognition hypotheses to be returned.
5212
5063
  # Specifically, the maximum number of `SpeechRecognitionAlternative` messages
5213
- # within each `SpeechTranscription`. The server may return fewer than
5214
- # `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
5064
+ # within each `SpeechTranscription`. The server may return fewer than `
5065
+ # max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
5215
5066
  # return a maximum of one. If omitted, will return a maximum of one.
5216
5067
  # Corresponds to the JSON property `maxAlternatives`
5217
5068
  # @return [Fixnum]
@@ -5240,32 +5091,31 @@ module Google
5240
5091
  end
5241
5092
  end
5242
5093
 
5243
- # `StreamingAnnotateVideoResponse` is the only message returned to the client
5244
- # by `StreamingAnnotateVideo`. A series of zero or more
5245
- # `StreamingAnnotateVideoResponse` messages are streamed back to the client.
5094
+ # `StreamingAnnotateVideoResponse` is the only message returned to the client by
5095
+ # `StreamingAnnotateVideo`. A series of zero or more `
5096
+ # StreamingAnnotateVideoResponse` messages are streamed back to the client.
5246
5097
  class GoogleCloudVideointelligenceV1p3beta1StreamingAnnotateVideoResponse
5247
5098
  include Google::Apis::Core::Hashable
5248
5099
 
5249
- # Streaming annotation results corresponding to a portion of the video
5250
- # that is currently being processed.
5100
+ # Streaming annotation results corresponding to a portion of the video that is
5101
+ # currently being processed.
5251
5102
  # Corresponds to the JSON property `annotationResults`
5252
5103
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1StreamingVideoAnnotationResults]
5253
5104
  attr_accessor :annotation_results
5254
5105
 
5255
- # Google Cloud Storage URI that stores annotation results of one
5256
- # streaming session in JSON format.
5257
- # It is the annotation_result_storage_directory
5258
- # from the request followed by '/cloud_project_number-session_id'.
5106
+ # Google Cloud Storage URI that stores annotation results of one streaming
5107
+ # session in JSON format. It is the annotation_result_storage_directory from the
5108
+ # request followed by '/cloud_project_number-session_id'.
5259
5109
  # Corresponds to the JSON property `annotationResultsUri`
5260
5110
  # @return [String]
5261
5111
  attr_accessor :annotation_results_uri
5262
5112
 
5263
- # The `Status` type defines a logical error model that is suitable for
5264
- # different programming environments, including REST APIs and RPC APIs. It is
5265
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
5266
- # three pieces of data: error code, error message, and error details.
5267
- # You can find out more about this error model and how to work with it in the
5268
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
5113
+ # The `Status` type defines a logical error model that is suitable for different
5114
+ # programming environments, including REST APIs and RPC APIs. It is used by [
5115
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
5116
+ # data: error code, error message, and error details. You can find out more
5117
+ # about this error model and how to work with it in the [API Design Guide](https:
5118
+ # //cloud.google.com/apis/design/errors).
5269
5119
  # Corresponds to the JSON property `error`
5270
5120
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleRpcStatus]
5271
5121
  attr_accessor :error
@@ -5282,14 +5132,14 @@ module Google
5282
5132
  end
5283
5133
  end
5284
5134
 
5285
- # Streaming annotation results corresponding to a portion of the video
5286
- # that is currently being processed.
5135
+ # Streaming annotation results corresponding to a portion of the video that is
5136
+ # currently being processed.
5287
5137
  class GoogleCloudVideointelligenceV1p3beta1StreamingVideoAnnotationResults
5288
5138
  include Google::Apis::Core::Hashable
5289
5139
 
5290
- # Explicit content annotation (based on per-frame visual signals only).
5291
- # If no explicit content has been detected in a frame, no annotations are
5292
- # present for that frame.
5140
+ # Explicit content annotation (based on per-frame visual signals only). If no
5141
+ # explicit content has been detected in a frame, no annotations are present for
5142
+ # that frame.
5293
5143
  # Corresponds to the JSON property `explicitAnnotation`
5294
5144
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1ExplicitContentAnnotation]
5295
5145
  attr_accessor :explicit_annotation
@@ -5360,16 +5210,15 @@ module Google
5360
5210
  include Google::Apis::Core::Hashable
5361
5211
 
5362
5212
  # Language hint can be specified if the language to be detected is known a
5363
- # priori. It can increase the accuracy of the detection. Language hint must
5364
- # be language code in BCP-47 format.
5365
- # Automatic language detection is performed if no hint is provided.
5213
+ # priori. It can increase the accuracy of the detection. Language hint must be
5214
+ # language code in BCP-47 format. Automatic language detection is performed if
5215
+ # no hint is provided.
5366
5216
  # Corresponds to the JSON property `languageHints`
5367
5217
  # @return [Array<String>]
5368
5218
  attr_accessor :language_hints
5369
5219
 
5370
- # Model to use for text detection.
5371
- # Supported values: "builtin/stable" (the default if unset) and
5372
- # "builtin/latest".
5220
+ # Model to use for text detection. Supported values: "builtin/stable" (the
5221
+ # default if unset) and "builtin/latest".
5373
5222
  # Corresponds to the JSON property `model`
5374
5223
  # @return [String]
5375
5224
  attr_accessor :model
@@ -5385,27 +5234,19 @@ module Google
5385
5234
  end
5386
5235
  end
5387
5236
 
5388
- # Video frame level annotation results for text annotation (OCR).
5389
- # Contains information regarding timestamp and bounding box locations for the
5390
- # frames containing detected OCR text snippets.
5237
+ # Video frame level annotation results for text annotation (OCR). Contains
5238
+ # information regarding timestamp and bounding box locations for the frames
5239
+ # containing detected OCR text snippets.
5391
5240
  class GoogleCloudVideointelligenceV1p3beta1TextFrame
5392
5241
  include Google::Apis::Core::Hashable
5393
5242
 
5394
5243
  # Normalized bounding polygon for text (that might not be aligned with axis).
5395
- # Contains list of the corner points in clockwise order starting from
5396
- # top-left corner. For example, for a rectangular bounding box:
5397
- # When the text is horizontal it might look like:
5398
- # 0----1
5399
- # | |
5400
- # 3----2
5401
- # When it's clockwise rotated 180 degrees around the top-left corner it
5402
- # becomes:
5403
- # 2----3
5404
- # | |
5405
- # 1----0
5406
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
5407
- # than 0, or greater than 1 due to trignometric calculations for location of
5408
- # the box.
5244
+ # Contains list of the corner points in clockwise order starting from top-left
5245
+ # corner. For example, for a rectangular bounding box: When the text is
5246
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
5247
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
5248
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
5249
+ # or greater than 1 due to trignometric calculations for location of the box.
5409
5250
  # Corresponds to the JSON property `rotatedBoundingBox`
5410
5251
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingPoly]
5411
5252
  attr_accessor :rotated_bounding_box
@@ -5458,9 +5299,8 @@ module Google
5458
5299
  end
5459
5300
  end
5460
5301
 
5461
- # For tracking related features.
5462
- # An object at time_offset with attributes, and located with
5463
- # normalized_bounding_box.
5302
+ # For tracking related features. An object at time_offset with attributes, and
5303
+ # located with normalized_bounding_box.
5464
5304
  class GoogleCloudVideointelligenceV1p3beta1TimestampedObject
5465
5305
  include Google::Apis::Core::Hashable
5466
5306
 
@@ -5474,15 +5314,14 @@ module Google
5474
5314
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1DetectedLandmark>]
5475
5315
  attr_accessor :landmarks
5476
5316
 
5477
- # Normalized bounding box.
5478
- # The normalized vertex coordinates are relative to the original image.
5479
- # Range: [0, 1].
5317
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
5318
+ # original image. Range: [0, 1].
5480
5319
  # Corresponds to the JSON property `normalizedBoundingBox`
5481
5320
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox]
5482
5321
  attr_accessor :normalized_bounding_box
5483
5322
 
5484
- # Time-offset, relative to the beginning of the video,
5485
- # corresponding to the video frame for this object.
5323
+ # Time-offset, relative to the beginning of the video, corresponding to the
5324
+ # video frame for this object.
5486
5325
  # Corresponds to the JSON property `timeOffset`
5487
5326
  # @return [String]
5488
5327
  attr_accessor :time_offset
@@ -5541,20 +5380,19 @@ module Google
5541
5380
  class GoogleCloudVideointelligenceV1p3beta1VideoAnnotationProgress
5542
5381
  include Google::Apis::Core::Hashable
5543
5382
 
5544
- # Specifies which feature is being tracked if the request contains more than
5545
- # one feature.
5383
+ # Specifies which feature is being tracked if the request contains more than one
5384
+ # feature.
5546
5385
  # Corresponds to the JSON property `feature`
5547
5386
  # @return [String]
5548
5387
  attr_accessor :feature
5549
5388
 
5550
- # Video file location in
5551
- # [Cloud Storage](https://cloud.google.com/storage/).
5389
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
5552
5390
  # Corresponds to the JSON property `inputUri`
5553
5391
  # @return [String]
5554
5392
  attr_accessor :input_uri
5555
5393
 
5556
- # Approximate percentage processed thus far. Guaranteed to be
5557
- # 100 when fully processed.
5394
+ # Approximate percentage processed thus far. Guaranteed to be 100 when fully
5395
+ # processed.
5558
5396
  # Corresponds to the JSON property `progressPercent`
5559
5397
  # @return [Fixnum]
5560
5398
  attr_accessor :progress_percent
@@ -5598,19 +5436,19 @@ module Google
5598
5436
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1CelebrityRecognitionAnnotation]
5599
5437
  attr_accessor :celebrity_recognition_annotations
5600
5438
 
5601
- # The `Status` type defines a logical error model that is suitable for
5602
- # different programming environments, including REST APIs and RPC APIs. It is
5603
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
5604
- # three pieces of data: error code, error message, and error details.
5605
- # You can find out more about this error model and how to work with it in the
5606
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
5439
+ # The `Status` type defines a logical error model that is suitable for different
5440
+ # programming environments, including REST APIs and RPC APIs. It is used by [
5441
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
5442
+ # data: error code, error message, and error details. You can find out more
5443
+ # about this error model and how to work with it in the [API Design Guide](https:
5444
+ # //cloud.google.com/apis/design/errors).
5607
5445
  # Corresponds to the JSON property `error`
5608
5446
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleRpcStatus]
5609
5447
  attr_accessor :error
5610
5448
 
5611
- # Explicit content annotation (based on per-frame visual signals only).
5612
- # If no explicit content has been detected in a frame, no annotations are
5613
- # present for that frame.
5449
+ # Explicit content annotation (based on per-frame visual signals only). If no
5450
+ # explicit content has been detected in a frame, no annotations are present for
5451
+ # that frame.
5614
5452
  # Corresponds to the JSON property `explicitAnnotation`
5615
5453
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1ExplicitContentAnnotation]
5616
5454
  attr_accessor :explicit_annotation
@@ -5620,14 +5458,13 @@ module Google
5620
5458
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1FaceDetectionAnnotation>]
5621
5459
  attr_accessor :face_detection_annotations
5622
5460
 
5623
- # Label annotations on frame level.
5624
- # There is exactly one element for each unique label.
5461
+ # Label annotations on frame level. There is exactly one element for each unique
5462
+ # label.
5625
5463
  # Corresponds to the JSON property `frameLabelAnnotations`
5626
5464
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
5627
5465
  attr_accessor :frame_label_annotations
5628
5466
 
5629
- # Video file location in
5630
- # [Cloud Storage](https://cloud.google.com/storage/).
5467
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
5631
5468
  # Corresponds to the JSON property `inputUri`
5632
5469
  # @return [String]
5633
5470
  attr_accessor :input_uri
@@ -5659,11 +5496,11 @@ module Google
5659
5496
  attr_accessor :segment_label_annotations
5660
5497
 
5661
5498
  # Presence label annotations on video level or user-specified segment level.
5662
- # There is exactly one element for each unique label. Compared to the
5663
- # existing topical `segment_label_annotations`, this field presents more
5664
- # fine-grained, segment-level labels detected in video content and is made
5665
- # available only when the client sets `LabelDetectionConfig.model` to
5666
- # "builtin/latest" in the request.
5499
+ # There is exactly one element for each unique label. Compared to the existing
5500
+ # topical `segment_label_annotations`, this field presents more fine-grained,
5501
+ # segment-level labels detected in video content and is made available only when
5502
+ # the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
5503
+ # request.
5667
5504
  # Corresponds to the JSON property `segmentPresenceLabelAnnotations`
5668
5505
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
5669
5506
  attr_accessor :segment_presence_label_annotations
@@ -5673,17 +5510,17 @@ module Google
5673
5510
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1VideoSegment>]
5674
5511
  attr_accessor :shot_annotations
5675
5512
 
5676
- # Topical label annotations on shot level.
5677
- # There is exactly one element for each unique label.
5513
+ # Topical label annotations on shot level. There is exactly one element for each
5514
+ # unique label.
5678
5515
  # Corresponds to the JSON property `shotLabelAnnotations`
5679
5516
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
5680
5517
  attr_accessor :shot_label_annotations
5681
5518
 
5682
5519
  # Presence label annotations on shot level. There is exactly one element for
5683
- # each unique label. Compared to the existing topical
5684
- # `shot_label_annotations`, this field presents more fine-grained, shot-level
5685
- # labels detected in video content and is made available only when the client
5686
- # sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
5520
+ # each unique label. Compared to the existing topical `shot_label_annotations`,
5521
+ # this field presents more fine-grained, shot-level labels detected in video
5522
+ # content and is made available only when the client sets `LabelDetectionConfig.
5523
+ # model` to "builtin/latest" in the request.
5687
5524
  # Corresponds to the JSON property `shotPresenceLabelAnnotations`
5688
5525
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
5689
5526
  attr_accessor :shot_presence_label_annotations
@@ -5693,9 +5530,8 @@ module Google
5693
5530
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1SpeechTranscription>]
5694
5531
  attr_accessor :speech_transcriptions
5695
5532
 
5696
- # OCR text detection and tracking.
5697
- # Annotations for list of detected text snippets. Each will have list of
5698
- # frame information associated with it.
5533
+ # OCR text detection and tracking. Annotations for list of detected text
5534
+ # snippets. Each will have list of frame information associated with it.
5699
5535
  # Corresponds to the JSON property `textAnnotations`
5700
5536
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1TextAnnotation>]
5701
5537
  attr_accessor :text_annotations
@@ -5755,9 +5591,9 @@ module Google
5755
5591
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1PersonDetectionConfig]
5756
5592
  attr_accessor :person_detection_config
5757
5593
 
5758
- # Video segments to annotate. The segments may overlap and are not required
5759
- # to be contiguous or span the whole video. If unspecified, each video is
5760
- # treated as a single segment.
5594
+ # Video segments to annotate. The segments may overlap and are not required to
5595
+ # be contiguous or span the whole video. If unspecified, each video is treated
5596
+ # as a single segment.
5761
5597
  # Corresponds to the JSON property `segments`
5762
5598
  # @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1VideoSegment>]
5763
5599
  attr_accessor :segments
@@ -5799,14 +5635,14 @@ module Google
5799
5635
  class GoogleCloudVideointelligenceV1p3beta1VideoSegment
5800
5636
  include Google::Apis::Core::Hashable
5801
5637
 
5802
- # Time-offset, relative to the beginning of the video,
5803
- # corresponding to the end of the segment (inclusive).
5638
+ # Time-offset, relative to the beginning of the video, corresponding to the end
5639
+ # of the segment (inclusive).
5804
5640
  # Corresponds to the JSON property `endTimeOffset`
5805
5641
  # @return [String]
5806
5642
  attr_accessor :end_time_offset
5807
5643
 
5808
- # Time-offset, relative to the beginning of the video,
5809
- # corresponding to the start of the segment (inclusive).
5644
+ # Time-offset, relative to the beginning of the video, corresponding to the
5645
+ # start of the segment (inclusive).
5810
5646
  # Corresponds to the JSON property `startTimeOffset`
5811
5647
  # @return [String]
5812
5648
  attr_accessor :start_time_offset
@@ -5823,41 +5659,41 @@ module Google
5823
5659
  end
5824
5660
 
5825
5661
  # Word-specific information for recognized words. Word information is only
5826
- # included in the response when certain request parameters are set, such
5827
- # as `enable_word_time_offsets`.
5662
+ # included in the response when certain request parameters are set, such as `
5663
+ # enable_word_time_offsets`.
5828
5664
  class GoogleCloudVideointelligenceV1p3beta1WordInfo
5829
5665
  include Google::Apis::Core::Hashable
5830
5666
 
5831
5667
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
5832
5668
  # indicates an estimated greater likelihood that the recognized words are
5833
- # correct. This field is set only for the top alternative.
5834
- # This field is not guaranteed to be accurate and users should not rely on it
5835
- # to be always provided.
5836
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
5669
+ # correct. This field is set only for the top alternative. This field is not
5670
+ # guaranteed to be accurate and users should not rely on it to be always
5671
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
5672
+ # not set.
5837
5673
  # Corresponds to the JSON property `confidence`
5838
5674
  # @return [Float]
5839
5675
  attr_accessor :confidence
5840
5676
 
5841
- # Time offset relative to the beginning of the audio, and
5842
- # corresponding to the end of the spoken word. This field is only set if
5843
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
5844
- # experimental feature and the accuracy of the time offset can vary.
5677
+ # Time offset relative to the beginning of the audio, and corresponding to the
5678
+ # end of the spoken word. This field is only set if `enable_word_time_offsets=
5679
+ # true` and only in the top hypothesis. This is an experimental feature and the
5680
+ # accuracy of the time offset can vary.
5845
5681
  # Corresponds to the JSON property `endTime`
5846
5682
  # @return [String]
5847
5683
  attr_accessor :end_time
5848
5684
 
5849
- # Output only. A distinct integer value is assigned for every speaker within
5850
- # the audio. This field specifies which one of those speakers was detected to
5851
- # have spoken this word. Value ranges from 1 up to diarization_speaker_count,
5852
- # and is only set if speaker diarization is enabled.
5685
+ # Output only. A distinct integer value is assigned for every speaker within the
5686
+ # audio. This field specifies which one of those speakers was detected to have
5687
+ # spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
5688
+ # only set if speaker diarization is enabled.
5853
5689
  # Corresponds to the JSON property `speakerTag`
5854
5690
  # @return [Fixnum]
5855
5691
  attr_accessor :speaker_tag
5856
5692
 
5857
- # Time offset relative to the beginning of the audio, and
5858
- # corresponding to the start of the spoken word. This field is only set if
5859
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
5860
- # experimental feature and the accuracy of the time offset can vary.
5693
+ # Time offset relative to the beginning of the audio, and corresponding to the
5694
+ # start of the spoken word. This field is only set if `enable_word_time_offsets=
5695
+ # true` and only in the top hypothesis. This is an experimental feature and the
5696
+ # accuracy of the time offset can vary.
5861
5697
  # Corresponds to the JSON property `startTime`
5862
5698
  # @return [String]
5863
5699
  attr_accessor :start_time
@@ -5886,47 +5722,45 @@ module Google
5886
5722
  class GoogleLongrunningOperation
5887
5723
  include Google::Apis::Core::Hashable
5888
5724
 
5889
- # If the value is `false`, it means the operation is still in progress.
5890
- # If `true`, the operation is completed, and either `error` or `response` is
5891
- # available.
5725
+ # If the value is `false`, it means the operation is still in progress. If `true`
5726
+ # , the operation is completed, and either `error` or `response` is available.
5892
5727
  # Corresponds to the JSON property `done`
5893
5728
  # @return [Boolean]
5894
5729
  attr_accessor :done
5895
5730
  alias_method :done?, :done
5896
5731
 
5897
- # The `Status` type defines a logical error model that is suitable for
5898
- # different programming environments, including REST APIs and RPC APIs. It is
5899
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
5900
- # three pieces of data: error code, error message, and error details.
5901
- # You can find out more about this error model and how to work with it in the
5902
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
5732
+ # The `Status` type defines a logical error model that is suitable for different
5733
+ # programming environments, including REST APIs and RPC APIs. It is used by [
5734
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
5735
+ # data: error code, error message, and error details. You can find out more
5736
+ # about this error model and how to work with it in the [API Design Guide](https:
5737
+ # //cloud.google.com/apis/design/errors).
5903
5738
  # Corresponds to the JSON property `error`
5904
5739
  # @return [Google::Apis::VideointelligenceV1p3beta1::GoogleRpcStatus]
5905
5740
  attr_accessor :error
5906
5741
 
5907
- # Service-specific metadata associated with the operation. It typically
5908
- # contains progress information and common metadata such as create time.
5909
- # Some services might not provide such metadata. Any method that returns a
5910
- # long-running operation should document the metadata type, if any.
5742
+ # Service-specific metadata associated with the operation. It typically contains
5743
+ # progress information and common metadata such as create time. Some services
5744
+ # might not provide such metadata. Any method that returns a long-running
5745
+ # operation should document the metadata type, if any.
5911
5746
  # Corresponds to the JSON property `metadata`
5912
5747
  # @return [Hash<String,Object>]
5913
5748
  attr_accessor :metadata
5914
5749
 
5915
5750
  # The server-assigned name, which is only unique within the same service that
5916
- # originally returns it. If you use the default HTTP mapping, the
5917
- # `name` should be a resource name ending with `operations/`unique_id``.
5751
+ # originally returns it. If you use the default HTTP mapping, the `name` should
5752
+ # be a resource name ending with `operations/`unique_id``.
5918
5753
  # Corresponds to the JSON property `name`
5919
5754
  # @return [String]
5920
5755
  attr_accessor :name
5921
5756
 
5922
- # The normal response of the operation in case of success. If the original
5923
- # method returns no data on success, such as `Delete`, the response is
5924
- # `google.protobuf.Empty`. If the original method is standard
5925
- # `Get`/`Create`/`Update`, the response should be the resource. For other
5926
- # methods, the response should have the type `XxxResponse`, where `Xxx`
5927
- # is the original method name. For example, if the original method name
5928
- # is `TakeSnapshot()`, the inferred response type is
5929
- # `TakeSnapshotResponse`.
5757
+ # The normal response of the operation in case of success. If the original
5758
+ # method returns no data on success, such as `Delete`, the response is `google.
5759
+ # protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`,
5760
+ # the response should be the resource. For other methods, the response should
5761
+ # have the type `XxxResponse`, where `Xxx` is the original method name. For
5762
+ # example, if the original method name is `TakeSnapshot()`, the inferred
5763
+ # response type is `TakeSnapshotResponse`.
5930
5764
  # Corresponds to the JSON property `response`
5931
5765
  # @return [Hash<String,Object>]
5932
5766
  attr_accessor :response
@@ -5945,12 +5779,12 @@ module Google
5945
5779
  end
5946
5780
  end
5947
5781
 
5948
- # The `Status` type defines a logical error model that is suitable for
5949
- # different programming environments, including REST APIs and RPC APIs. It is
5950
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
5951
- # three pieces of data: error code, error message, and error details.
5952
- # You can find out more about this error model and how to work with it in the
5953
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
5782
+ # The `Status` type defines a logical error model that is suitable for different
5783
+ # programming environments, including REST APIs and RPC APIs. It is used by [
5784
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
5785
+ # data: error code, error message, and error details. You can find out more
5786
+ # about this error model and how to work with it in the [API Design Guide](https:
5787
+ # //cloud.google.com/apis/design/errors).
5954
5788
  class GoogleRpcStatus
5955
5789
  include Google::Apis::Core::Hashable
5956
5790
 
@@ -5959,15 +5793,15 @@ module Google
5959
5793
  # @return [Fixnum]
5960
5794
  attr_accessor :code
5961
5795
 
5962
- # A list of messages that carry the error details. There is a common set of
5796
+ # A list of messages that carry the error details. There is a common set of
5963
5797
  # message types for APIs to use.
5964
5798
  # Corresponds to the JSON property `details`
5965
5799
  # @return [Array<Hash<String,Object>>]
5966
5800
  attr_accessor :details
5967
5801
 
5968
- # A developer-facing error message, which should be in English. Any
5969
- # user-facing error message should be localized and sent in the
5970
- # google.rpc.Status.details field, or localized by the client.
5802
+ # A developer-facing error message, which should be in English. Any user-facing
5803
+ # error message should be localized and sent in the google.rpc.Status.details
5804
+ # field, or localized by the client.
5971
5805
  # Corresponds to the JSON property `message`
5972
5806
  # @return [String]
5973
5807
  attr_accessor :message