google-api-client 0.43.0 → 0.44.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (696) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +218 -0
  3. data/docs/oauth-server.md +4 -6
  4. data/generated/google/apis/accessapproval_v1.rb +1 -1
  5. data/generated/google/apis/accessapproval_v1/classes.rb +51 -86
  6. data/generated/google/apis/accessapproval_v1/service.rb +93 -132
  7. data/generated/google/apis/accesscontextmanager_v1.rb +1 -1
  8. data/generated/google/apis/accesscontextmanager_v1/classes.rb +198 -236
  9. data/generated/google/apis/accesscontextmanager_v1/service.rb +128 -171
  10. data/generated/google/apis/accesscontextmanager_v1beta.rb +1 -1
  11. data/generated/google/apis/accesscontextmanager_v1beta/classes.rb +153 -184
  12. data/generated/google/apis/accesscontextmanager_v1beta/service.rb +82 -111
  13. data/generated/google/apis/adexchangebuyer2_v2beta1.rb +1 -1
  14. data/generated/google/apis/adexchangebuyer2_v2beta1/classes.rb +17 -6
  15. data/generated/google/apis/adexchangebuyer2_v2beta1/representations.rb +1 -0
  16. data/generated/google/apis/adexchangebuyer_v1_4.rb +1 -1
  17. data/generated/google/apis/adexchangebuyer_v1_4/classes.rb +47 -2
  18. data/generated/google/apis/adexchangebuyer_v1_4/representations.rb +18 -0
  19. data/generated/google/apis/adexperiencereport_v1.rb +1 -1
  20. data/generated/google/apis/admin_datatransfer_v1.rb +6 -4
  21. data/generated/google/apis/admin_datatransfer_v1/classes.rb +16 -4
  22. data/generated/google/apis/admin_datatransfer_v1/service.rb +30 -48
  23. data/generated/google/apis/admin_directory_v1.rb +6 -8
  24. data/generated/google/apis/admin_directory_v1/classes.rb +224 -243
  25. data/generated/google/apis/admin_directory_v1/representations.rb +14 -40
  26. data/generated/google/apis/admin_directory_v1/service.rb +475 -1026
  27. data/generated/google/apis/admin_reports_v1.rb +6 -5
  28. data/generated/google/apis/admin_reports_v1/classes.rb +31 -33
  29. data/generated/google/apis/admin_reports_v1/service.rb +131 -187
  30. data/generated/google/apis/admob_v1.rb +1 -1
  31. data/generated/google/apis/alertcenter_v1beta1.rb +1 -1
  32. data/generated/google/apis/androidenterprise_v1.rb +1 -1
  33. data/generated/google/apis/androidmanagement_v1.rb +1 -1
  34. data/generated/google/apis/androidmanagement_v1/classes.rb +95 -59
  35. data/generated/google/apis/androidmanagement_v1/representations.rb +17 -0
  36. data/generated/google/apis/androidpublisher_v3.rb +1 -1
  37. data/generated/google/apis/androidpublisher_v3/service.rb +2 -2
  38. data/generated/google/apis/apigee_v1.rb +6 -7
  39. data/generated/google/apis/apigee_v1/classes.rb +205 -75
  40. data/generated/google/apis/apigee_v1/representations.rb +51 -0
  41. data/generated/google/apis/apigee_v1/service.rb +133 -34
  42. data/generated/google/apis/appengine_v1.rb +1 -1
  43. data/generated/google/apis/appengine_v1/classes.rb +45 -35
  44. data/generated/google/apis/appengine_v1/representations.rb +2 -0
  45. data/generated/google/apis/appengine_v1/service.rb +38 -47
  46. data/generated/google/apis/appengine_v1alpha.rb +1 -1
  47. data/generated/google/apis/appengine_v1alpha/classes.rb +9 -11
  48. data/generated/google/apis/appengine_v1beta.rb +1 -1
  49. data/generated/google/apis/appengine_v1beta/classes.rb +45 -35
  50. data/generated/google/apis/appengine_v1beta/representations.rb +2 -0
  51. data/generated/google/apis/appengine_v1beta/service.rb +37 -47
  52. data/generated/google/apis/appsmarket_v2.rb +1 -1
  53. data/generated/google/apis/appsmarket_v2/classes.rb +14 -16
  54. data/generated/google/apis/artifactregistry_v1beta1.rb +1 -1
  55. data/generated/google/apis/artifactregistry_v1beta1/classes.rb +235 -337
  56. data/generated/google/apis/artifactregistry_v1beta1/service.rb +44 -57
  57. data/generated/google/apis/bigquery_v2.rb +1 -1
  58. data/generated/google/apis/bigquery_v2/classes.rb +355 -553
  59. data/generated/google/apis/bigquery_v2/representations.rb +1 -0
  60. data/generated/google/apis/bigquery_v2/service.rb +32 -40
  61. data/generated/google/apis/bigqueryconnection_v1beta1.rb +1 -1
  62. data/generated/google/apis/bigqueryconnection_v1beta1/classes.rb +192 -337
  63. data/generated/google/apis/bigqueryconnection_v1beta1/service.rb +29 -32
  64. data/generated/google/apis/bigquerydatatransfer_v1.rb +1 -1
  65. data/generated/google/apis/bigquerydatatransfer_v1/classes.rb +132 -158
  66. data/generated/google/apis/bigquerydatatransfer_v1/service.rb +232 -282
  67. data/generated/google/apis/bigqueryreservation_v1.rb +1 -1
  68. data/generated/google/apis/bigqueryreservation_v1/classes.rb +116 -123
  69. data/generated/google/apis/bigqueryreservation_v1/representations.rb +2 -0
  70. data/generated/google/apis/bigqueryreservation_v1/service.rb +137 -183
  71. data/generated/google/apis/bigqueryreservation_v1alpha2.rb +1 -1
  72. data/generated/google/apis/bigqueryreservation_v1alpha2/classes.rb +88 -100
  73. data/generated/google/apis/bigqueryreservation_v1alpha2/service.rb +77 -100
  74. data/generated/google/apis/bigqueryreservation_v1beta1.rb +1 -1
  75. data/generated/google/apis/bigqueryreservation_v1beta1/classes.rb +93 -98
  76. data/generated/google/apis/bigqueryreservation_v1beta1/representations.rb +2 -0
  77. data/generated/google/apis/bigqueryreservation_v1beta1/service.rb +114 -151
  78. data/generated/google/apis/bigtableadmin_v1.rb +1 -1
  79. data/generated/google/apis/bigtableadmin_v1/classes.rb +50 -0
  80. data/generated/google/apis/bigtableadmin_v1/representations.rb +29 -0
  81. data/generated/google/apis/bigtableadmin_v2.rb +1 -1
  82. data/generated/google/apis/bigtableadmin_v2/classes.rb +50 -0
  83. data/generated/google/apis/bigtableadmin_v2/representations.rb +29 -0
  84. data/generated/google/apis/billingbudgets_v1beta1.rb +4 -1
  85. data/generated/google/apis/binaryauthorization_v1.rb +1 -1
  86. data/generated/google/apis/binaryauthorization_v1/classes.rb +239 -354
  87. data/generated/google/apis/binaryauthorization_v1/service.rb +74 -89
  88. data/generated/google/apis/binaryauthorization_v1beta1.rb +1 -1
  89. data/generated/google/apis/binaryauthorization_v1beta1/classes.rb +239 -354
  90. data/generated/google/apis/binaryauthorization_v1beta1/service.rb +74 -89
  91. data/generated/google/apis/calendar_v3.rb +1 -1
  92. data/generated/google/apis/chat_v1.rb +1 -1
  93. data/generated/google/apis/chat_v1/classes.rb +90 -115
  94. data/generated/google/apis/chat_v1/service.rb +30 -42
  95. data/generated/google/apis/civicinfo_v2.rb +1 -1
  96. data/generated/google/apis/cloudasset_v1.rb +1 -1
  97. data/generated/google/apis/cloudasset_v1/classes.rb +712 -1039
  98. data/generated/google/apis/cloudasset_v1/service.rb +125 -167
  99. data/generated/google/apis/cloudasset_v1beta1.rb +1 -1
  100. data/generated/google/apis/cloudasset_v1beta1/classes.rb +531 -777
  101. data/generated/google/apis/cloudasset_v1beta1/service.rb +59 -75
  102. data/generated/google/apis/cloudasset_v1p1beta1.rb +1 -1
  103. data/generated/google/apis/cloudasset_v1p1beta1/classes.rb +204 -349
  104. data/generated/google/apis/cloudasset_v1p1beta1/service.rb +35 -47
  105. data/generated/google/apis/cloudasset_v1p4beta1.rb +1 -1
  106. data/generated/google/apis/cloudasset_v1p4beta1/classes.rb +220 -276
  107. data/generated/google/apis/cloudasset_v1p4beta1/service.rb +75 -93
  108. data/generated/google/apis/cloudasset_v1p5beta1.rb +1 -1
  109. data/generated/google/apis/cloudasset_v1p5beta1/classes.rb +481 -720
  110. data/generated/google/apis/cloudasset_v1p5beta1/service.rb +25 -28
  111. data/generated/google/apis/cloudbilling_v1.rb +7 -1
  112. data/generated/google/apis/cloudbilling_v1/classes.rb +284 -445
  113. data/generated/google/apis/cloudbilling_v1/service.rb +104 -124
  114. data/generated/google/apis/cloudbuild_v1.rb +1 -1
  115. data/generated/google/apis/cloudbuild_v1/classes.rb +291 -343
  116. data/generated/google/apis/cloudbuild_v1/representations.rb +1 -0
  117. data/generated/google/apis/cloudbuild_v1/service.rb +48 -63
  118. data/generated/google/apis/cloudbuild_v1alpha1.rb +1 -1
  119. data/generated/google/apis/cloudbuild_v1alpha1/classes.rb +283 -329
  120. data/generated/google/apis/cloudbuild_v1alpha1/representations.rb +1 -0
  121. data/generated/google/apis/cloudbuild_v1alpha1/service.rb +15 -18
  122. data/generated/google/apis/cloudbuild_v1alpha2.rb +1 -1
  123. data/generated/google/apis/cloudbuild_v1alpha2/classes.rb +269 -313
  124. data/generated/google/apis/cloudbuild_v1alpha2/representations.rb +1 -0
  125. data/generated/google/apis/cloudbuild_v1alpha2/service.rb +22 -28
  126. data/generated/google/apis/clouddebugger_v2.rb +1 -1
  127. data/generated/google/apis/clouddebugger_v2/classes.rb +185 -252
  128. data/generated/google/apis/clouddebugger_v2/service.rb +53 -59
  129. data/generated/google/apis/clouderrorreporting_v1beta1.rb +1 -1
  130. data/generated/google/apis/clouderrorreporting_v1beta1/classes.rb +127 -156
  131. data/generated/google/apis/clouderrorreporting_v1beta1/service.rb +53 -69
  132. data/generated/google/apis/cloudfunctions_v1.rb +1 -1
  133. data/generated/google/apis/cloudfunctions_v1/classes.rb +323 -493
  134. data/generated/google/apis/cloudfunctions_v1/service.rb +79 -93
  135. data/generated/google/apis/cloudidentity_v1.rb +1 -1
  136. data/generated/google/apis/cloudidentity_v1/classes.rb +625 -75
  137. data/generated/google/apis/cloudidentity_v1/representations.rb +203 -0
  138. data/generated/google/apis/cloudidentity_v1/service.rb +43 -61
  139. data/generated/google/apis/cloudidentity_v1beta1.rb +4 -1
  140. data/generated/google/apis/cloudidentity_v1beta1/classes.rb +1045 -317
  141. data/generated/google/apis/cloudidentity_v1beta1/representations.rb +331 -22
  142. data/generated/google/apis/cloudidentity_v1beta1/service.rb +742 -96
  143. data/generated/google/apis/cloudiot_v1.rb +1 -1
  144. data/generated/google/apis/cloudiot_v1/classes.rb +263 -373
  145. data/generated/google/apis/cloudiot_v1/service.rb +147 -154
  146. data/generated/google/apis/cloudkms_v1.rb +1 -1
  147. data/generated/google/apis/cloudkms_v1/classes.rb +502 -692
  148. data/generated/google/apis/cloudkms_v1/representations.rb +17 -0
  149. data/generated/google/apis/cloudkms_v1/service.rb +170 -216
  150. data/generated/google/apis/cloudprofiler_v2.rb +1 -1
  151. data/generated/google/apis/cloudprofiler_v2/classes.rb +28 -33
  152. data/generated/google/apis/cloudprofiler_v2/service.rb +17 -19
  153. data/generated/google/apis/cloudresourcemanager_v1.rb +1 -1
  154. data/generated/google/apis/cloudresourcemanager_v1/service.rb +1 -1
  155. data/generated/google/apis/cloudresourcemanager_v1beta1.rb +1 -1
  156. data/generated/google/apis/cloudresourcemanager_v1beta1/service.rb +1 -1
  157. data/generated/google/apis/cloudresourcemanager_v2.rb +1 -1
  158. data/generated/google/apis/cloudresourcemanager_v2beta1.rb +1 -1
  159. data/generated/google/apis/cloudscheduler_v1.rb +1 -1
  160. data/generated/google/apis/cloudscheduler_v1/classes.rb +272 -383
  161. data/generated/google/apis/cloudscheduler_v1/service.rb +45 -62
  162. data/generated/google/apis/cloudscheduler_v1beta1.rb +1 -1
  163. data/generated/google/apis/cloudscheduler_v1beta1/classes.rb +273 -384
  164. data/generated/google/apis/cloudscheduler_v1beta1/service.rb +45 -62
  165. data/generated/google/apis/cloudsearch_v1.rb +2 -2
  166. data/generated/google/apis/cloudsearch_v1/classes.rb +650 -781
  167. data/generated/google/apis/cloudsearch_v1/representations.rb +15 -0
  168. data/generated/google/apis/cloudsearch_v1/service.rb +286 -326
  169. data/generated/google/apis/cloudshell_v1.rb +1 -1
  170. data/generated/google/apis/cloudshell_v1/classes.rb +36 -227
  171. data/generated/google/apis/cloudshell_v1/representations.rb +0 -67
  172. data/generated/google/apis/cloudshell_v1/service.rb +21 -25
  173. data/generated/google/apis/cloudshell_v1alpha1.rb +1 -1
  174. data/generated/google/apis/cloudshell_v1alpha1/classes.rb +69 -78
  175. data/generated/google/apis/cloudshell_v1alpha1/service.rb +20 -24
  176. data/generated/google/apis/cloudtasks_v2.rb +1 -1
  177. data/generated/google/apis/cloudtasks_v2/classes.rb +605 -933
  178. data/generated/google/apis/cloudtasks_v2/service.rb +146 -217
  179. data/generated/google/apis/cloudtasks_v2beta2.rb +1 -1
  180. data/generated/google/apis/cloudtasks_v2beta2/classes.rb +602 -964
  181. data/generated/google/apis/cloudtasks_v2beta2/service.rb +178 -270
  182. data/generated/google/apis/cloudtasks_v2beta3.rb +1 -1
  183. data/generated/google/apis/cloudtasks_v2beta3/classes.rb +609 -938
  184. data/generated/google/apis/cloudtasks_v2beta3/service.rb +146 -217
  185. data/generated/google/apis/cloudtrace_v1.rb +1 -1
  186. data/generated/google/apis/cloudtrace_v1/classes.rb +39 -61
  187. data/generated/google/apis/cloudtrace_v1/service.rb +37 -51
  188. data/generated/google/apis/cloudtrace_v2.rb +1 -1
  189. data/generated/google/apis/cloudtrace_v2/classes.rb +92 -107
  190. data/generated/google/apis/cloudtrace_v2/service.rb +8 -11
  191. data/generated/google/apis/cloudtrace_v2beta1.rb +1 -1
  192. data/generated/google/apis/cloudtrace_v2beta1/classes.rb +23 -33
  193. data/generated/google/apis/cloudtrace_v2beta1/service.rb +30 -37
  194. data/generated/google/apis/composer_v1.rb +1 -1
  195. data/generated/google/apis/composer_v1/classes.rb +190 -242
  196. data/generated/google/apis/composer_v1/service.rb +79 -150
  197. data/generated/google/apis/composer_v1beta1.rb +1 -1
  198. data/generated/google/apis/composer_v1beta1/classes.rb +203 -262
  199. data/generated/google/apis/composer_v1beta1/service.rb +92 -179
  200. data/generated/google/apis/compute_alpha.rb +1 -1
  201. data/generated/google/apis/compute_alpha/classes.rb +681 -127
  202. data/generated/google/apis/compute_alpha/representations.rb +110 -6
  203. data/generated/google/apis/compute_alpha/service.rb +695 -692
  204. data/generated/google/apis/compute_beta.rb +1 -1
  205. data/generated/google/apis/compute_beta/classes.rb +570 -70
  206. data/generated/google/apis/compute_beta/representations.rb +112 -1
  207. data/generated/google/apis/compute_beta/service.rb +608 -605
  208. data/generated/google/apis/compute_v1.rb +1 -1
  209. data/generated/google/apis/compute_v1/classes.rb +977 -85
  210. data/generated/google/apis/compute_v1/representations.rb +372 -0
  211. data/generated/google/apis/compute_v1/service.rb +747 -15
  212. data/generated/google/apis/container_v1.rb +1 -1
  213. data/generated/google/apis/container_v1/classes.rb +915 -965
  214. data/generated/google/apis/container_v1/representations.rb +53 -0
  215. data/generated/google/apis/container_v1/service.rb +435 -502
  216. data/generated/google/apis/container_v1beta1.rb +1 -1
  217. data/generated/google/apis/container_v1beta1/classes.rb +1021 -1043
  218. data/generated/google/apis/container_v1beta1/representations.rb +70 -0
  219. data/generated/google/apis/container_v1beta1/service.rb +403 -466
  220. data/generated/google/apis/containeranalysis_v1alpha1.rb +1 -1
  221. data/generated/google/apis/containeranalysis_v1alpha1/classes.rb +456 -596
  222. data/generated/google/apis/containeranalysis_v1alpha1/service.rb +149 -169
  223. data/generated/google/apis/containeranalysis_v1beta1.rb +1 -1
  224. data/generated/google/apis/containeranalysis_v1beta1/classes.rb +454 -613
  225. data/generated/google/apis/containeranalysis_v1beta1/service.rb +75 -90
  226. data/generated/google/apis/content_v2.rb +1 -1
  227. data/generated/google/apis/content_v2/classes.rb +3 -1
  228. data/generated/google/apis/content_v2_1.rb +1 -1
  229. data/generated/google/apis/content_v2_1/classes.rb +93 -2
  230. data/generated/google/apis/content_v2_1/representations.rb +34 -0
  231. data/generated/google/apis/content_v2_1/service.rb +53 -2
  232. data/generated/google/apis/datacatalog_v1beta1.rb +1 -1
  233. data/generated/google/apis/datacatalog_v1beta1/classes.rb +382 -573
  234. data/generated/google/apis/datacatalog_v1beta1/service.rb +319 -440
  235. data/generated/google/apis/dataflow_v1b3.rb +1 -1
  236. data/generated/google/apis/dataflow_v1b3/classes.rb +1015 -973
  237. data/generated/google/apis/dataflow_v1b3/representations.rb +115 -0
  238. data/generated/google/apis/dataflow_v1b3/service.rb +299 -257
  239. data/generated/google/apis/datafusion_v1.rb +5 -8
  240. data/generated/google/apis/datafusion_v1/classes.rb +268 -397
  241. data/generated/google/apis/datafusion_v1/representations.rb +3 -0
  242. data/generated/google/apis/datafusion_v1/service.rb +76 -89
  243. data/generated/google/apis/datafusion_v1beta1.rb +5 -8
  244. data/generated/google/apis/datafusion_v1beta1/classes.rb +268 -397
  245. data/generated/google/apis/datafusion_v1beta1/representations.rb +3 -0
  246. data/generated/google/apis/datafusion_v1beta1/service.rb +81 -95
  247. data/generated/google/apis/dataproc_v1.rb +1 -1
  248. data/generated/google/apis/dataproc_v1/classes.rb +37 -4
  249. data/generated/google/apis/dataproc_v1/representations.rb +16 -0
  250. data/generated/google/apis/dataproc_v1beta2.rb +1 -1
  251. data/generated/google/apis/dataproc_v1beta2/classes.rb +56 -0
  252. data/generated/google/apis/dataproc_v1beta2/representations.rb +31 -0
  253. data/generated/google/apis/datastore_v1.rb +1 -1
  254. data/generated/google/apis/datastore_v1/classes.rb +330 -472
  255. data/generated/google/apis/datastore_v1/service.rb +52 -63
  256. data/generated/google/apis/datastore_v1beta1.rb +1 -1
  257. data/generated/google/apis/datastore_v1beta1/classes.rb +150 -217
  258. data/generated/google/apis/datastore_v1beta1/service.rb +11 -12
  259. data/generated/google/apis/datastore_v1beta3.rb +1 -1
  260. data/generated/google/apis/datastore_v1beta3/classes.rb +255 -371
  261. data/generated/google/apis/datastore_v1beta3/service.rb +1 -2
  262. data/generated/google/apis/dfareporting_v3_3.rb +2 -2
  263. data/generated/google/apis/dfareporting_v3_3/classes.rb +326 -339
  264. data/generated/google/apis/dfareporting_v3_3/representations.rb +42 -0
  265. data/generated/google/apis/dfareporting_v3_3/service.rb +673 -1286
  266. data/generated/google/apis/dfareporting_v3_4.rb +2 -2
  267. data/generated/google/apis/dfareporting_v3_4/classes.rb +348 -350
  268. data/generated/google/apis/dfareporting_v3_4/representations.rb +43 -0
  269. data/generated/google/apis/dfareporting_v3_4/service.rb +708 -1285
  270. data/generated/google/apis/dialogflow_v2.rb +1 -1
  271. data/generated/google/apis/dialogflow_v2/classes.rb +84 -44
  272. data/generated/google/apis/dialogflow_v2/representations.rb +52 -15
  273. data/generated/google/apis/dialogflow_v2beta1.rb +1 -1
  274. data/generated/google/apis/dialogflow_v2beta1/classes.rb +84 -44
  275. data/generated/google/apis/dialogflow_v2beta1/representations.rb +52 -15
  276. data/generated/google/apis/dialogflow_v2beta1/service.rb +37 -0
  277. data/generated/google/apis/{securitycenter_v1p1alpha1.rb → dialogflow_v3beta1.rb} +13 -10
  278. data/generated/google/apis/dialogflow_v3beta1/classes.rb +8183 -0
  279. data/generated/google/apis/dialogflow_v3beta1/representations.rb +3459 -0
  280. data/generated/google/apis/dialogflow_v3beta1/service.rb +2812 -0
  281. data/generated/google/apis/displayvideo_v1.rb +1 -1
  282. data/generated/google/apis/displayvideo_v1/classes.rb +55 -8
  283. data/generated/google/apis/displayvideo_v1/representations.rb +5 -0
  284. data/generated/google/apis/displayvideo_v1/service.rb +48 -36
  285. data/generated/google/apis/dlp_v2.rb +1 -1
  286. data/generated/google/apis/dlp_v2/classes.rb +1076 -1302
  287. data/generated/google/apis/dlp_v2/service.rb +962 -905
  288. data/generated/google/apis/dns_v1.rb +1 -1
  289. data/generated/google/apis/dns_v1/classes.rb +175 -198
  290. data/generated/google/apis/dns_v1/service.rb +82 -97
  291. data/generated/google/apis/dns_v1beta2.rb +1 -1
  292. data/generated/google/apis/dns_v1beta2/classes.rb +180 -205
  293. data/generated/google/apis/dns_v1beta2/service.rb +82 -97
  294. data/generated/google/apis/docs_v1.rb +1 -1
  295. data/generated/google/apis/docs_v1/classes.rb +894 -1229
  296. data/generated/google/apis/docs_v1/service.rb +17 -22
  297. data/generated/google/apis/documentai_v1beta2.rb +1 -1
  298. data/generated/google/apis/documentai_v1beta2/classes.rb +1186 -810
  299. data/generated/google/apis/documentai_v1beta2/representations.rb +303 -0
  300. data/generated/google/apis/documentai_v1beta2/service.rb +22 -24
  301. data/generated/google/apis/doubleclickbidmanager_v1.rb +3 -2
  302. data/generated/google/apis/doubleclickbidmanager_v1/classes.rb +6 -12
  303. data/generated/google/apis/doubleclickbidmanager_v1/service.rb +33 -64
  304. data/generated/google/apis/doubleclickbidmanager_v1_1.rb +3 -2
  305. data/generated/google/apis/doubleclickbidmanager_v1_1/classes.rb +11 -18
  306. data/generated/google/apis/doubleclickbidmanager_v1_1/service.rb +33 -64
  307. data/generated/google/apis/doubleclicksearch_v2.rb +1 -1
  308. data/generated/google/apis/doubleclicksearch_v2/service.rb +2 -2
  309. data/generated/google/apis/drive_v2.rb +1 -1
  310. data/generated/google/apis/drive_v2/classes.rb +14 -6
  311. data/generated/google/apis/drive_v2/representations.rb +1 -0
  312. data/generated/google/apis/drive_v2/service.rb +79 -15
  313. data/generated/google/apis/drive_v3.rb +1 -1
  314. data/generated/google/apis/drive_v3/classes.rb +14 -6
  315. data/generated/google/apis/drive_v3/representations.rb +1 -0
  316. data/generated/google/apis/drive_v3/service.rb +59 -11
  317. data/generated/google/apis/file_v1.rb +1 -1
  318. data/generated/google/apis/file_v1/classes.rb +154 -173
  319. data/generated/google/apis/file_v1/service.rb +43 -52
  320. data/generated/google/apis/file_v1beta1.rb +1 -1
  321. data/generated/google/apis/file_v1beta1/classes.rb +334 -193
  322. data/generated/google/apis/file_v1beta1/representations.rb +55 -0
  323. data/generated/google/apis/file_v1beta1/service.rb +267 -55
  324. data/generated/google/apis/firebase_v1beta1.rb +1 -1
  325. data/generated/google/apis/firebase_v1beta1/classes.rb +25 -47
  326. data/generated/google/apis/firebase_v1beta1/representations.rb +2 -16
  327. data/generated/google/apis/firebase_v1beta1/service.rb +8 -1
  328. data/generated/google/apis/firebasehosting_v1beta1.rb +1 -1
  329. data/generated/google/apis/firebasehosting_v1beta1/classes.rb +26 -0
  330. data/generated/google/apis/firebasehosting_v1beta1/representations.rb +15 -0
  331. data/generated/google/apis/firebaseml_v1beta2.rb +1 -1
  332. data/generated/google/apis/firebaseml_v1beta2/classes.rb +0 -8
  333. data/generated/google/apis/firebaseml_v1beta2/representations.rb +0 -1
  334. data/generated/google/apis/firebaserules_v1.rb +1 -1
  335. data/generated/google/apis/firebaserules_v1/classes.rb +102 -137
  336. data/generated/google/apis/firebaserules_v1/service.rb +87 -110
  337. data/generated/google/apis/firestore_v1.rb +1 -1
  338. data/generated/google/apis/firestore_v1/classes.rb +402 -498
  339. data/generated/google/apis/firestore_v1/service.rb +165 -201
  340. data/generated/google/apis/firestore_v1beta1.rb +1 -1
  341. data/generated/google/apis/firestore_v1beta1/classes.rb +334 -409
  342. data/generated/google/apis/firestore_v1beta1/service.rb +106 -122
  343. data/generated/google/apis/firestore_v1beta2.rb +1 -1
  344. data/generated/google/apis/firestore_v1beta2/classes.rb +135 -165
  345. data/generated/google/apis/firestore_v1beta2/service.rb +65 -86
  346. data/generated/google/apis/fitness_v1.rb +85 -0
  347. data/generated/google/apis/fitness_v1/classes.rb +982 -0
  348. data/generated/google/apis/fitness_v1/representations.rb +398 -0
  349. data/generated/google/apis/fitness_v1/service.rb +626 -0
  350. data/generated/google/apis/games_configuration_v1configuration.rb +1 -1
  351. data/generated/google/apis/games_configuration_v1configuration/classes.rb +2 -3
  352. data/generated/google/apis/games_configuration_v1configuration/service.rb +6 -6
  353. data/generated/google/apis/games_management_v1management.rb +2 -3
  354. data/generated/google/apis/games_management_v1management/classes.rb +14 -20
  355. data/generated/google/apis/games_management_v1management/service.rb +35 -36
  356. data/generated/google/apis/games_v1.rb +2 -3
  357. data/generated/google/apis/games_v1/classes.rb +76 -83
  358. data/generated/google/apis/games_v1/representations.rb +2 -0
  359. data/generated/google/apis/games_v1/service.rb +84 -90
  360. data/generated/google/apis/genomics_v1.rb +1 -1
  361. data/generated/google/apis/genomics_v1/classes.rb +70 -76
  362. data/generated/google/apis/genomics_v1/service.rb +28 -43
  363. data/generated/google/apis/genomics_v1alpha2.rb +1 -1
  364. data/generated/google/apis/genomics_v1alpha2/classes.rb +223 -290
  365. data/generated/google/apis/genomics_v1alpha2/service.rb +54 -76
  366. data/generated/google/apis/genomics_v2alpha1.rb +1 -1
  367. data/generated/google/apis/genomics_v2alpha1/classes.rb +252 -275
  368. data/generated/google/apis/genomics_v2alpha1/representations.rb +1 -0
  369. data/generated/google/apis/genomics_v2alpha1/service.rb +47 -66
  370. data/generated/google/apis/gmail_v1.rb +1 -1
  371. data/generated/google/apis/gmail_v1/classes.rb +37 -43
  372. data/generated/google/apis/gmail_v1/service.rb +4 -3
  373. data/generated/google/apis/gmailpostmastertools_v1beta1.rb +2 -2
  374. data/generated/google/apis/gmailpostmastertools_v1beta1/service.rb +1 -1
  375. data/generated/google/apis/groupsmigration_v1.rb +35 -0
  376. data/generated/google/apis/groupsmigration_v1/classes.rb +51 -0
  377. data/generated/google/apis/groupsmigration_v1/representations.rb +40 -0
  378. data/generated/google/apis/groupsmigration_v1/service.rb +100 -0
  379. data/generated/google/apis/healthcare_v1.rb +1 -1
  380. data/generated/google/apis/healthcare_v1/classes.rb +563 -826
  381. data/generated/google/apis/healthcare_v1/service.rb +675 -853
  382. data/generated/google/apis/healthcare_v1beta1.rb +1 -1
  383. data/generated/google/apis/healthcare_v1beta1/classes.rb +828 -1102
  384. data/generated/google/apis/healthcare_v1beta1/representations.rb +20 -0
  385. data/generated/google/apis/healthcare_v1beta1/service.rb +895 -1139
  386. data/generated/google/apis/homegraph_v1.rb +1 -1
  387. data/generated/google/apis/homegraph_v1/classes.rb +76 -164
  388. data/generated/google/apis/homegraph_v1/service.rb +23 -35
  389. data/generated/google/apis/iam_v1.rb +5 -2
  390. data/generated/google/apis/iam_v1/classes.rb +388 -592
  391. data/generated/google/apis/iam_v1/service.rb +429 -555
  392. data/generated/google/apis/iamcredentials_v1.rb +4 -2
  393. data/generated/google/apis/iamcredentials_v1/classes.rb +75 -85
  394. data/generated/google/apis/iamcredentials_v1/service.rb +15 -13
  395. data/generated/google/apis/iap_v1.rb +1 -1
  396. data/generated/google/apis/iap_v1/classes.rb +246 -355
  397. data/generated/google/apis/iap_v1/service.rb +61 -71
  398. data/generated/google/apis/iap_v1beta1.rb +1 -1
  399. data/generated/google/apis/iap_v1beta1/classes.rb +157 -254
  400. data/generated/google/apis/iap_v1beta1/service.rb +17 -19
  401. data/generated/google/apis/indexing_v3.rb +1 -1
  402. data/generated/google/apis/indexing_v3/classes.rb +11 -11
  403. data/generated/google/apis/kgsearch_v1.rb +1 -1
  404. data/generated/google/apis/kgsearch_v1/classes.rb +4 -4
  405. data/generated/google/apis/kgsearch_v1/service.rb +11 -11
  406. data/generated/google/apis/lifesciences_v2beta.rb +1 -1
  407. data/generated/google/apis/lifesciences_v2beta/classes.rb +262 -290
  408. data/generated/google/apis/lifesciences_v2beta/service.rb +30 -42
  409. data/generated/google/apis/localservices_v1.rb +31 -0
  410. data/generated/google/apis/localservices_v1/classes.rb +419 -0
  411. data/generated/google/apis/localservices_v1/representations.rb +172 -0
  412. data/generated/google/apis/localservices_v1/service.rb +199 -0
  413. data/generated/google/apis/logging_v2.rb +1 -1
  414. data/generated/google/apis/logging_v2/classes.rb +174 -214
  415. data/generated/google/apis/logging_v2/representations.rb +15 -0
  416. data/generated/google/apis/logging_v2/service.rb +1017 -584
  417. data/generated/google/apis/manufacturers_v1.rb +1 -1
  418. data/generated/google/apis/manufacturers_v1/classes.rb +99 -109
  419. data/generated/google/apis/manufacturers_v1/service.rb +44 -55
  420. data/generated/google/apis/memcache_v1beta2.rb +1 -1
  421. data/generated/google/apis/memcache_v1beta2/classes.rb +170 -249
  422. data/generated/google/apis/memcache_v1beta2/representations.rb +0 -19
  423. data/generated/google/apis/memcache_v1beta2/service.rb +58 -71
  424. data/generated/google/apis/ml_v1.rb +1 -1
  425. data/generated/google/apis/ml_v1/classes.rb +949 -1144
  426. data/generated/google/apis/ml_v1/representations.rb +64 -0
  427. data/generated/google/apis/ml_v1/service.rb +194 -253
  428. data/generated/google/apis/monitoring_v1.rb +1 -1
  429. data/generated/google/apis/monitoring_v1/classes.rb +103 -26
  430. data/generated/google/apis/monitoring_v1/representations.rb +35 -0
  431. data/generated/google/apis/monitoring_v1/service.rb +10 -11
  432. data/generated/google/apis/monitoring_v3.rb +1 -1
  433. data/generated/google/apis/monitoring_v3/classes.rb +220 -322
  434. data/generated/google/apis/monitoring_v3/service.rb +121 -140
  435. data/generated/google/apis/networkmanagement_v1.rb +1 -1
  436. data/generated/google/apis/networkmanagement_v1/classes.rb +273 -429
  437. data/generated/google/apis/networkmanagement_v1/service.rb +97 -120
  438. data/generated/google/apis/networkmanagement_v1beta1.rb +1 -1
  439. data/generated/google/apis/networkmanagement_v1beta1/classes.rb +388 -429
  440. data/generated/google/apis/networkmanagement_v1beta1/representations.rb +40 -0
  441. data/generated/google/apis/networkmanagement_v1beta1/service.rb +97 -120
  442. data/generated/google/apis/osconfig_v1.rb +1 -1
  443. data/generated/google/apis/osconfig_v1/classes.rb +226 -270
  444. data/generated/google/apis/osconfig_v1/service.rb +22 -27
  445. data/generated/google/apis/osconfig_v1beta.rb +1 -1
  446. data/generated/google/apis/osconfig_v1beta/classes.rb +1031 -411
  447. data/generated/google/apis/osconfig_v1beta/representations.rb +337 -0
  448. data/generated/google/apis/osconfig_v1beta/service.rb +39 -52
  449. data/generated/google/apis/oslogin_v1.rb +1 -1
  450. data/generated/google/apis/oslogin_v1/classes.rb +14 -12
  451. data/generated/google/apis/oslogin_v1/representations.rb +1 -0
  452. data/generated/google/apis/oslogin_v1/service.rb +12 -16
  453. data/generated/google/apis/oslogin_v1alpha.rb +1 -1
  454. data/generated/google/apis/oslogin_v1alpha/classes.rb +14 -12
  455. data/generated/google/apis/oslogin_v1alpha/representations.rb +1 -0
  456. data/generated/google/apis/oslogin_v1alpha/service.rb +14 -14
  457. data/generated/google/apis/oslogin_v1beta.rb +1 -1
  458. data/generated/google/apis/oslogin_v1beta/classes.rb +14 -12
  459. data/generated/google/apis/oslogin_v1beta/representations.rb +1 -0
  460. data/generated/google/apis/oslogin_v1beta/service.rb +12 -16
  461. data/generated/google/apis/pagespeedonline_v5.rb +2 -2
  462. data/generated/google/apis/pagespeedonline_v5/classes.rb +18 -24
  463. data/generated/google/apis/pagespeedonline_v5/service.rb +3 -4
  464. data/generated/google/apis/people_v1.rb +1 -1
  465. data/generated/google/apis/people_v1/classes.rb +121 -12
  466. data/generated/google/apis/people_v1/representations.rb +41 -0
  467. data/generated/google/apis/people_v1/service.rb +39 -39
  468. data/generated/google/apis/playablelocations_v3.rb +1 -1
  469. data/generated/google/apis/playablelocations_v3/classes.rb +108 -155
  470. data/generated/google/apis/playablelocations_v3/service.rb +10 -10
  471. data/generated/google/apis/prod_tt_sasportal_v1alpha1.rb +1 -1
  472. data/generated/google/apis/prod_tt_sasportal_v1alpha1/classes.rb +6 -0
  473. data/generated/google/apis/prod_tt_sasportal_v1alpha1/representations.rb +1 -0
  474. data/generated/google/apis/pubsub_v1.rb +1 -1
  475. data/generated/google/apis/pubsub_v1/classes.rb +392 -518
  476. data/generated/google/apis/pubsub_v1/representations.rb +1 -0
  477. data/generated/google/apis/pubsub_v1/service.rb +220 -246
  478. data/generated/google/apis/pubsub_v1beta1a.rb +1 -1
  479. data/generated/google/apis/pubsub_v1beta1a/classes.rb +71 -86
  480. data/generated/google/apis/pubsub_v1beta1a/service.rb +31 -38
  481. data/generated/google/apis/pubsub_v1beta2.rb +1 -1
  482. data/generated/google/apis/pubsub_v1beta2/classes.rb +244 -354
  483. data/generated/google/apis/pubsub_v1beta2/service.rb +96 -108
  484. data/generated/google/apis/{memcache_v1.rb → pubsublite_v1.rb} +8 -9
  485. data/generated/google/apis/pubsublite_v1/classes.rb +389 -0
  486. data/generated/google/apis/{accessapproval_v1beta1 → pubsublite_v1}/representations.rb +78 -53
  487. data/generated/google/apis/{memcache_v1 → pubsublite_v1}/service.rb +195 -228
  488. data/generated/google/apis/realtimebidding_v1.rb +1 -1
  489. data/generated/google/apis/recommendationengine_v1beta1.rb +1 -1
  490. data/generated/google/apis/recommendationengine_v1beta1/classes.rb +335 -456
  491. data/generated/google/apis/recommendationengine_v1beta1/representations.rb +0 -16
  492. data/generated/google/apis/recommendationengine_v1beta1/service.rb +140 -206
  493. data/generated/google/apis/redis_v1.rb +1 -1
  494. data/generated/google/apis/redis_v1/classes.rb +172 -208
  495. data/generated/google/apis/redis_v1/service.rb +93 -110
  496. data/generated/google/apis/redis_v1beta1.rb +1 -1
  497. data/generated/google/apis/redis_v1beta1/classes.rb +176 -212
  498. data/generated/google/apis/redis_v1beta1/service.rb +93 -110
  499. data/generated/google/apis/remotebuildexecution_v1.rb +1 -1
  500. data/generated/google/apis/remotebuildexecution_v1/classes.rb +951 -1078
  501. data/generated/google/apis/remotebuildexecution_v1/representations.rb +61 -0
  502. data/generated/google/apis/remotebuildexecution_v1/service.rb +26 -33
  503. data/generated/google/apis/remotebuildexecution_v1alpha.rb +1 -1
  504. data/generated/google/apis/remotebuildexecution_v1alpha/classes.rb +946 -1071
  505. data/generated/google/apis/remotebuildexecution_v1alpha/representations.rb +61 -0
  506. data/generated/google/apis/remotebuildexecution_v1alpha/service.rb +103 -65
  507. data/generated/google/apis/remotebuildexecution_v2.rb +1 -1
  508. data/generated/google/apis/remotebuildexecution_v2/classes.rb +1099 -1250
  509. data/generated/google/apis/remotebuildexecution_v2/representations.rb +61 -0
  510. data/generated/google/apis/remotebuildexecution_v2/service.rb +147 -206
  511. data/generated/google/apis/run_v1.rb +1 -1
  512. data/generated/google/apis/run_v1/classes.rb +4 -3
  513. data/generated/google/apis/run_v1/representations.rb +1 -1
  514. data/generated/google/apis/run_v1alpha1.rb +1 -1
  515. data/generated/google/apis/run_v1alpha1/classes.rb +1 -1
  516. data/generated/google/apis/run_v1alpha1/representations.rb +1 -1
  517. data/generated/google/apis/run_v1beta1.rb +1 -1
  518. data/generated/google/apis/run_v1beta1/classes.rb +3 -2
  519. data/generated/google/apis/runtimeconfig_v1beta1.rb +1 -1
  520. data/generated/google/apis/runtimeconfig_v1beta1/classes.rb +295 -412
  521. data/generated/google/apis/runtimeconfig_v1beta1/service.rb +135 -159
  522. data/generated/google/apis/safebrowsing_v4.rb +1 -1
  523. data/generated/google/apis/safebrowsing_v4/classes.rb +55 -64
  524. data/generated/google/apis/safebrowsing_v4/service.rb +4 -4
  525. data/generated/google/apis/sasportal_v1alpha1.rb +1 -1
  526. data/generated/google/apis/sasportal_v1alpha1/classes.rb +6 -0
  527. data/generated/google/apis/sasportal_v1alpha1/representations.rb +1 -0
  528. data/generated/google/apis/script_v1.rb +1 -1
  529. data/generated/google/apis/script_v1/classes.rb +88 -111
  530. data/generated/google/apis/script_v1/service.rb +63 -69
  531. data/generated/google/apis/secretmanager_v1.rb +1 -1
  532. data/generated/google/apis/secretmanager_v1/classes.rb +211 -363
  533. data/generated/google/apis/secretmanager_v1/service.rb +66 -82
  534. data/generated/google/apis/secretmanager_v1beta1.rb +1 -1
  535. data/generated/google/apis/secretmanager_v1beta1/classes.rb +211 -363
  536. data/generated/google/apis/secretmanager_v1beta1/service.rb +66 -82
  537. data/generated/google/apis/securitycenter_v1.rb +1 -1
  538. data/generated/google/apis/securitycenter_v1/classes.rb +16 -6
  539. data/generated/google/apis/securitycenter_v1/representations.rb +1 -0
  540. data/generated/google/apis/securitycenter_v1beta1.rb +1 -1
  541. data/generated/google/apis/securitycenter_v1beta1/classes.rb +21 -9
  542. data/generated/google/apis/securitycenter_v1beta1/representations.rb +1 -0
  543. data/generated/google/apis/{securitycenter_v1p1beta1.rb → securitycenter_v1beta2.rb} +6 -6
  544. data/generated/google/apis/{securitycenter_v1p1alpha1 → securitycenter_v1beta2}/classes.rb +281 -103
  545. data/generated/google/apis/{securitycenter_v1p1alpha1 → securitycenter_v1beta2}/representations.rb +101 -30
  546. data/generated/google/apis/securitycenter_v1beta2/service.rb +1494 -0
  547. data/generated/google/apis/serviceconsumermanagement_v1.rb +1 -1
  548. data/generated/google/apis/serviceconsumermanagement_v1/classes.rb +18 -48
  549. data/generated/google/apis/serviceconsumermanagement_v1beta1.rb +1 -1
  550. data/generated/google/apis/serviceconsumermanagement_v1beta1/classes.rb +19 -49
  551. data/generated/google/apis/servicecontrol_v1.rb +1 -1
  552. data/generated/google/apis/servicecontrol_v1/classes.rb +523 -641
  553. data/generated/google/apis/servicecontrol_v1/service.rb +36 -46
  554. data/generated/google/apis/servicecontrol_v2.rb +1 -1
  555. data/generated/google/apis/servicecontrol_v2/classes.rb +279 -325
  556. data/generated/google/apis/servicecontrol_v2/service.rb +33 -43
  557. data/generated/google/apis/servicedirectory_v1beta1.rb +1 -1
  558. data/generated/google/apis/servicedirectory_v1beta1/classes.rb +214 -333
  559. data/generated/google/apis/servicedirectory_v1beta1/service.rb +94 -129
  560. data/generated/google/apis/servicemanagement_v1.rb +1 -1
  561. data/generated/google/apis/servicemanagement_v1/classes.rb +1266 -2116
  562. data/generated/google/apis/servicemanagement_v1/service.rb +144 -195
  563. data/generated/google/apis/servicenetworking_v1.rb +1 -1
  564. data/generated/google/apis/servicenetworking_v1/classes.rb +93 -48
  565. data/generated/google/apis/servicenetworking_v1/representations.rb +52 -0
  566. data/generated/google/apis/servicenetworking_v1/service.rb +116 -0
  567. data/generated/google/apis/servicenetworking_v1beta.rb +1 -1
  568. data/generated/google/apis/servicenetworking_v1beta/classes.rb +74 -48
  569. data/generated/google/apis/servicenetworking_v1beta/representations.rb +38 -0
  570. data/generated/google/apis/serviceusage_v1.rb +1 -1
  571. data/generated/google/apis/serviceusage_v1/classes.rb +52 -48
  572. data/generated/google/apis/serviceusage_v1/representations.rb +4 -0
  573. data/generated/google/apis/serviceusage_v1/service.rb +5 -1
  574. data/generated/google/apis/serviceusage_v1beta1.rb +1 -1
  575. data/generated/google/apis/serviceusage_v1beta1/classes.rb +87 -49
  576. data/generated/google/apis/serviceusage_v1beta1/representations.rb +8 -0
  577. data/generated/google/apis/sheets_v4.rb +1 -1
  578. data/generated/google/apis/sheets_v4/classes.rb +3932 -5007
  579. data/generated/google/apis/sheets_v4/representations.rb +625 -0
  580. data/generated/google/apis/sheets_v4/service.rb +113 -149
  581. data/generated/google/apis/site_verification_v1.rb +1 -1
  582. data/generated/google/apis/slides_v1.rb +1 -1
  583. data/generated/google/apis/slides_v1/classes.rb +841 -1114
  584. data/generated/google/apis/slides_v1/service.rb +23 -30
  585. data/generated/google/apis/sourcerepo_v1.rb +1 -1
  586. data/generated/google/apis/sourcerepo_v1/classes.rb +6 -6
  587. data/generated/google/apis/spanner_v1.rb +1 -1
  588. data/generated/google/apis/spanner_v1/classes.rb +1546 -2157
  589. data/generated/google/apis/spanner_v1/service.rb +443 -618
  590. data/generated/google/apis/speech_v1.rb +1 -1
  591. data/generated/google/apis/speech_v1/classes.rb +174 -220
  592. data/generated/google/apis/speech_v1/service.rb +27 -32
  593. data/generated/google/apis/speech_v1p1beta1.rb +1 -1
  594. data/generated/google/apis/speech_v1p1beta1/classes.rb +253 -306
  595. data/generated/google/apis/speech_v1p1beta1/service.rb +27 -32
  596. data/generated/google/apis/speech_v2beta1.rb +1 -1
  597. data/generated/google/apis/speech_v2beta1/classes.rb +66 -76
  598. data/generated/google/apis/speech_v2beta1/service.rb +10 -12
  599. data/generated/google/apis/sql_v1beta4.rb +1 -1
  600. data/generated/google/apis/sql_v1beta4/classes.rb +311 -370
  601. data/generated/google/apis/sql_v1beta4/representations.rb +2 -0
  602. data/generated/google/apis/sql_v1beta4/service.rb +51 -56
  603. data/generated/google/apis/storage_v1.rb +1 -1
  604. data/generated/google/apis/storage_v1/classes.rb +8 -7
  605. data/generated/google/apis/storage_v1/representations.rb +2 -2
  606. data/generated/google/apis/storagetransfer_v1.rb +1 -1
  607. data/generated/google/apis/storagetransfer_v1/classes.rb +261 -339
  608. data/generated/google/apis/storagetransfer_v1/service.rb +43 -64
  609. data/generated/google/apis/streetviewpublish_v1.rb +1 -1
  610. data/generated/google/apis/streetviewpublish_v1/classes.rb +106 -148
  611. data/generated/google/apis/streetviewpublish_v1/service.rb +94 -177
  612. data/generated/google/apis/tagmanager_v1.rb +1 -1
  613. data/generated/google/apis/tagmanager_v1/service.rb +2 -2
  614. data/generated/google/apis/tagmanager_v2.rb +1 -1
  615. data/generated/google/apis/tagmanager_v2/service.rb +2 -2
  616. data/generated/google/apis/tasks_v1.rb +1 -1
  617. data/generated/google/apis/tasks_v1/classes.rb +20 -21
  618. data/generated/google/apis/tasks_v1/service.rb +16 -17
  619. data/generated/google/apis/testing_v1.rb +1 -1
  620. data/generated/google/apis/testing_v1/classes.rb +317 -382
  621. data/generated/google/apis/testing_v1/representations.rb +2 -0
  622. data/generated/google/apis/testing_v1/service.rb +22 -28
  623. data/generated/google/apis/texttospeech_v1.rb +1 -1
  624. data/generated/google/apis/texttospeech_v1/classes.rb +51 -57
  625. data/generated/google/apis/texttospeech_v1/service.rb +9 -10
  626. data/generated/google/apis/texttospeech_v1beta1.rb +1 -1
  627. data/generated/google/apis/texttospeech_v1beta1/classes.rb +96 -57
  628. data/generated/google/apis/texttospeech_v1beta1/representations.rb +19 -0
  629. data/generated/google/apis/texttospeech_v1beta1/service.rb +9 -10
  630. data/generated/google/apis/toolresults_v1beta3.rb +1 -1
  631. data/generated/google/apis/toolresults_v1beta3/classes.rb +7 -0
  632. data/generated/google/apis/toolresults_v1beta3/representations.rb +1 -0
  633. data/generated/google/apis/tpu_v1.rb +1 -1
  634. data/generated/google/apis/tpu_v1/classes.rb +11 -0
  635. data/generated/google/apis/tpu_v1/representations.rb +1 -0
  636. data/generated/google/apis/tpu_v1alpha1.rb +1 -1
  637. data/generated/google/apis/tpu_v1alpha1/classes.rb +11 -0
  638. data/generated/google/apis/tpu_v1alpha1/representations.rb +1 -0
  639. data/generated/google/apis/{accessapproval_v1beta1.rb → trafficdirector_v2.rb} +9 -9
  640. data/generated/google/apis/trafficdirector_v2/classes.rb +1347 -0
  641. data/generated/google/apis/trafficdirector_v2/representations.rb +620 -0
  642. data/generated/google/apis/trafficdirector_v2/service.rb +89 -0
  643. data/generated/google/apis/translate_v3.rb +1 -1
  644. data/generated/google/apis/translate_v3/classes.rb +148 -175
  645. data/generated/google/apis/translate_v3/service.rb +122 -151
  646. data/generated/google/apis/translate_v3beta1.rb +1 -1
  647. data/generated/google/apis/translate_v3beta1/classes.rb +149 -170
  648. data/generated/google/apis/translate_v3beta1/service.rb +122 -151
  649. data/generated/google/apis/vectortile_v1.rb +1 -1
  650. data/generated/google/apis/vectortile_v1/classes.rb +185 -267
  651. data/generated/google/apis/vectortile_v1/service.rb +75 -88
  652. data/generated/google/apis/videointelligence_v1.rb +1 -1
  653. data/generated/google/apis/videointelligence_v1/classes.rb +753 -918
  654. data/generated/google/apis/videointelligence_v1/service.rb +40 -48
  655. data/generated/google/apis/videointelligence_v1beta2.rb +1 -1
  656. data/generated/google/apis/videointelligence_v1beta2/classes.rb +748 -911
  657. data/generated/google/apis/videointelligence_v1beta2/service.rb +4 -4
  658. data/generated/google/apis/videointelligence_v1p1beta1.rb +1 -1
  659. data/generated/google/apis/videointelligence_v1p1beta1/classes.rb +748 -911
  660. data/generated/google/apis/videointelligence_v1p1beta1/service.rb +4 -4
  661. data/generated/google/apis/videointelligence_v1p2beta1.rb +1 -1
  662. data/generated/google/apis/videointelligence_v1p2beta1/classes.rb +748 -911
  663. data/generated/google/apis/videointelligence_v1p2beta1/service.rb +4 -4
  664. data/generated/google/apis/videointelligence_v1p3beta1.rb +1 -1
  665. data/generated/google/apis/videointelligence_v1p3beta1/classes.rb +754 -920
  666. data/generated/google/apis/videointelligence_v1p3beta1/service.rb +4 -4
  667. data/generated/google/apis/webfonts_v1.rb +2 -3
  668. data/generated/google/apis/webfonts_v1/classes.rb +1 -2
  669. data/generated/google/apis/webfonts_v1/service.rb +2 -4
  670. data/generated/google/apis/youtube_partner_v1.rb +1 -1
  671. data/generated/google/apis/youtube_v3.rb +1 -1
  672. data/generated/google/apis/youtube_v3/classes.rb +347 -0
  673. data/generated/google/apis/youtube_v3/representations.rb +176 -0
  674. data/generated/google/apis/youtube_v3/service.rb +78 -0
  675. data/lib/google/apis/version.rb +1 -1
  676. metadata +31 -31
  677. data/generated/google/apis/accessapproval_v1beta1/classes.rb +0 -417
  678. data/generated/google/apis/accessapproval_v1beta1/service.rb +0 -857
  679. data/generated/google/apis/dns_v2beta1.rb +0 -43
  680. data/generated/google/apis/dns_v2beta1/classes.rb +0 -1447
  681. data/generated/google/apis/dns_v2beta1/representations.rb +0 -588
  682. data/generated/google/apis/dns_v2beta1/service.rb +0 -928
  683. data/generated/google/apis/memcache_v1/classes.rb +0 -1157
  684. data/generated/google/apis/memcache_v1/representations.rb +0 -471
  685. data/generated/google/apis/oauth2_v2.rb +0 -40
  686. data/generated/google/apis/oauth2_v2/classes.rb +0 -165
  687. data/generated/google/apis/oauth2_v2/representations.rb +0 -68
  688. data/generated/google/apis/oauth2_v2/service.rb +0 -158
  689. data/generated/google/apis/securitycenter_v1p1alpha1/service.rb +0 -207
  690. data/generated/google/apis/securitycenter_v1p1beta1/classes.rb +0 -2059
  691. data/generated/google/apis/securitycenter_v1p1beta1/representations.rb +0 -789
  692. data/generated/google/apis/securitycenter_v1p1beta1/service.rb +0 -1243
  693. data/generated/google/apis/storage_v1beta2.rb +0 -40
  694. data/generated/google/apis/storage_v1beta2/classes.rb +0 -1047
  695. data/generated/google/apis/storage_v1beta2/representations.rb +0 -425
  696. data/generated/google/apis/storage_v1beta2/service.rb +0 -1667
@@ -49,73 +49,69 @@ module Google
49
49
 
50
50
  # Gets a feature tile by its tile resource name.
51
51
  # @param [String] name
52
- # Required. Resource name of the tile. The tile resource name is prefixed by
53
- # its collection ID `tiles/` followed by the resource ID, which encodes the
54
- # tile's global x and y coordinates and zoom level as `@<x>,<y>,<zoom>z`.
55
- # For example, `tiles/@1,2,3z`.
52
+ # Required. Resource name of the tile. The tile resource name is prefixed by its
53
+ # collection ID `tiles/` followed by the resource ID, which encodes the tile's
54
+ # global x and y coordinates and zoom level as `@,,z`. For example, `tiles/@1,2,
55
+ # 3z`.
56
56
  # @param [String] client_info_api_client
57
- # API client name and version. For example, the SDK calling the API. The
58
- # exact format is up to the client.
57
+ # API client name and version. For example, the SDK calling the API. The exact
58
+ # format is up to the client.
59
59
  # @param [String] client_info_application_id
60
- # Application ID, such as the package name on Android and the bundle
61
- # identifier on iOS platforms.
60
+ # Application ID, such as the package name on Android and the bundle identifier
61
+ # on iOS platforms.
62
62
  # @param [String] client_info_application_version
63
- # Application version number, such as "1.2.3". The exact format is
64
- # application-dependent.
63
+ # Application version number, such as "1.2.3". The exact format is application-
64
+ # dependent.
65
65
  # @param [String] client_info_device_model
66
- # Device model as reported by the device. The exact format is
67
- # platform-dependent.
66
+ # Device model as reported by the device. The exact format is platform-dependent.
68
67
  # @param [String] client_info_operating_system
69
- # Operating system name and version as reported by the OS. For example,
70
- # "Mac OS X 10.10.4". The exact format is platform-dependent.
68
+ # Operating system name and version as reported by the OS. For example, "Mac OS
69
+ # X 10.10.4". The exact format is platform-dependent.
71
70
  # @param [String] client_info_platform
72
71
  # Platform where the application is running.
73
72
  # @param [String] client_info_user_id
74
- # A client-generated user ID. The ID should be generated and persisted during
75
- # the first user session or whenever a pre-existing ID is not found. The
73
+ # Required. A client-generated user ID. The ID should be generated and persisted
74
+ # during the first user session or whenever a pre-existing ID is not found. The
76
75
  # exact format is up to the client. This must be non-empty in a
77
- # GetFeatureTileRequest (whether via the header or
78
- # GetFeatureTileRequest.client_info).
76
+ # GetFeatureTileRequest (whether via the header or GetFeatureTileRequest.
77
+ # client_info).
79
78
  # @param [String] client_tile_version_id
80
- # Optional version id identifying the tile that is already in the client's
81
- # cache. This field should be populated with the most recent version_id value
82
- # returned by the API for the requested tile.
83
- # If the version id is empty the server always returns a newly rendered tile.
84
- # If it is provided the server checks if the tile contents would be identical
85
- # to one that's already on the client, and if so, returns a stripped-down
86
- # response tile with STATUS_OK_DATA_UNCHANGED instead.
79
+ # Optional version id identifying the tile that is already in the client's cache.
80
+ # This field should be populated with the most recent version_id value returned
81
+ # by the API for the requested tile. If the version id is empty the server
82
+ # always returns a newly rendered tile. If it is provided the server checks if
83
+ # the tile contents would be identical to one that's already on the client, and
84
+ # if so, returns a stripped-down response tile with STATUS_OK_DATA_UNCHANGED
85
+ # instead.
87
86
  # @param [Boolean] enable_detailed_highway_types
88
- # Flag indicating whether detailed highway types should be returned. If this
89
- # is set, the CONTROLLED_ACCESS_HIGHWAY type may be returned. If not, then
90
- # these highways will have the generic HIGHWAY type.
91
- # This exists for backwards compatibility reasons.
87
+ # Flag indicating whether detailed highway types should be returned. If this is
88
+ # set, the CONTROLLED_ACCESS_HIGHWAY type may be returned. If not, then these
89
+ # highways will have the generic HIGHWAY type. This exists for backwards
90
+ # compatibility reasons.
92
91
  # @param [Boolean] enable_feature_names
93
- # Flag indicating whether human-readable names should be returned for
94
- # features. If this is set, the display_name field on the feature will be
95
- # filled out.
92
+ # Flag indicating whether human-readable names should be returned for features.
93
+ # If this is set, the display_name field on the feature will be filled out.
96
94
  # @param [Boolean] enable_modeled_volumes
97
- # Flag indicating whether 3D building models should be enabled. If this is
98
- # set structures will be returned as 3D modeled volumes rather than 2.5D
99
- # extruded areas where possible.
95
+ # Flag indicating whether 3D building models should be enabled. If this is set
96
+ # structures will be returned as 3D modeled volumes rather than 2.5D extruded
97
+ # areas where possible.
100
98
  # @param [Boolean] enable_political_features
101
99
  # Flag indicating whether political features should be returned.
102
100
  # @param [Boolean] enable_private_roads
103
- # Flag indicating whether the returned tile will contain road features that
104
- # are marked private. Private roads are indicated by the
105
- # Feature.segment_info.road_info.is_private field.
101
+ # Flag indicating whether the returned tile will contain road features that are
102
+ # marked private. Private roads are indicated by the Feature.segment_info.
103
+ # road_info.is_private field.
106
104
  # @param [Boolean] enable_unclipped_buildings
107
- # Flag indicating whether unclipped buildings should be returned. If this is
108
- # set, building render ops will extend beyond the tile boundary. Buildings
109
- # will only be returned on the tile that contains their centroid.
105
+ # Flag indicating whether unclipped buildings should be returned. If this is set,
106
+ # building render ops will extend beyond the tile boundary. Buildings will only
107
+ # be returned on the tile that contains their centroid.
110
108
  # @param [String] language_code
111
- # Required. The BCP-47 language code corresponding to the language in which
112
- # the name was requested, such as "en-US" or "sr-Latn".
113
- # For more information, see
109
+ # Required. The BCP-47 language code corresponding to the language in which the
110
+ # name was requested, such as "en-US" or "sr-Latn". For more information, see
114
111
  # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
115
112
  # @param [String] region_code
116
113
  # Required. The Unicode country/region code (CLDR) of the location from which
117
- # the request is coming from, such as "US" and "419".
118
- # For more information, see
114
+ # the request is coming from, such as "US" and "419". For more information, see
119
115
  # http://www.unicode.org/reports/tr35/#unicode_region_subtag.
120
116
  # @param [String] fields
121
117
  # Selector specifying which fields to include in a partial response.
@@ -162,60 +158,51 @@ module Google
162
158
 
163
159
  # Gets a terrain tile by its tile resource name.
164
160
  # @param [String] name
165
- # Required. Resource name of the tile. The tile resource name is prefixed by
166
- # its collection ID `terraintiles/` followed by the resource ID, which
167
- # encodes the tile's global x and y coordinates and zoom level as
168
- # `@<x>,<y>,<zoom>z`. For example, `terraintiles/@1,2,3z`.
161
+ # Required. Resource name of the tile. The tile resource name is prefixed by its
162
+ # collection ID `terraintiles/` followed by the resource ID, which encodes the
163
+ # tile's global x and y coordinates and zoom level as `@,,z`. For example, `
164
+ # terraintiles/@1,2,3z`.
169
165
  # @param [Fixnum] altitude_precision_centimeters
170
- # The precision of terrain altitudes in centimeters.
171
- # Possible values: between 1 (cm level precision) and 1,000,000 (10-kilometer
172
- # level precision).
166
+ # The precision of terrain altitudes in centimeters. Possible values: between 1 (
167
+ # cm level precision) and 1,000,000 (10-kilometer level precision).
173
168
  # @param [String] client_info_api_client
174
- # API client name and version. For example, the SDK calling the API. The
175
- # exact format is up to the client.
169
+ # API client name and version. For example, the SDK calling the API. The exact
170
+ # format is up to the client.
176
171
  # @param [String] client_info_application_id
177
- # Application ID, such as the package name on Android and the bundle
178
- # identifier on iOS platforms.
172
+ # Application ID, such as the package name on Android and the bundle identifier
173
+ # on iOS platforms.
179
174
  # @param [String] client_info_application_version
180
- # Application version number, such as "1.2.3". The exact format is
181
- # application-dependent.
175
+ # Application version number, such as "1.2.3". The exact format is application-
176
+ # dependent.
182
177
  # @param [String] client_info_device_model
183
- # Device model as reported by the device. The exact format is
184
- # platform-dependent.
178
+ # Device model as reported by the device. The exact format is platform-dependent.
185
179
  # @param [String] client_info_operating_system
186
- # Operating system name and version as reported by the OS. For example,
187
- # "Mac OS X 10.10.4". The exact format is platform-dependent.
180
+ # Operating system name and version as reported by the OS. For example, "Mac OS
181
+ # X 10.10.4". The exact format is platform-dependent.
188
182
  # @param [String] client_info_platform
189
183
  # Platform where the application is running.
190
184
  # @param [String] client_info_user_id
191
- # A client-generated user ID. The ID should be generated and persisted during
192
- # the first user session or whenever a pre-existing ID is not found. The
185
+ # Required. A client-generated user ID. The ID should be generated and persisted
186
+ # during the first user session or whenever a pre-existing ID is not found. The
193
187
  # exact format is up to the client. This must be non-empty in a
194
- # GetFeatureTileRequest (whether via the header or
195
- # GetFeatureTileRequest.client_info).
188
+ # GetFeatureTileRequest (whether via the header or GetFeatureTileRequest.
189
+ # client_info).
196
190
  # @param [Fixnum] max_elevation_resolution_cells
197
- # The maximum allowed resolution for the returned elevation heightmap.
198
- # Possible values: between 1 and 1024 (and not less than
199
- # min_elevation_resolution_cells).
200
- # Over-sized heightmaps will be non-uniformly down-sampled such that each
201
- # edge is no longer than this value. Non-uniformity is chosen to maximise the
202
- # amount of preserved data.
203
- # For example:
204
- # Original resolution: 100px (width) * 30px (height)
205
- # max_elevation_resolution: 30
206
- # New resolution: 30px (width) * 30px (height)
191
+ # The maximum allowed resolution for the returned elevation heightmap. Possible
192
+ # values: between 1 and 1024 (and not less than min_elevation_resolution_cells).
193
+ # Over-sized heightmaps will be non-uniformly down-sampled such that each edge
194
+ # is no longer than this value. Non-uniformity is chosen to maximise the amount
195
+ # of preserved data. For example: Original resolution: 100px (width) * 30px (
196
+ # height) max_elevation_resolution: 30 New resolution: 30px (width) * 30px (
197
+ # height)
207
198
  # @param [Fixnum] min_elevation_resolution_cells
208
- # The minimum allowed resolution for the returned elevation heightmap.
209
- # Possible values: between 0 and 1024 (and not more than
210
- # max_elevation_resolution_cells). Zero is supported for backward
211
- # compatibility.
212
- # Under-sized heightmaps will be non-uniformly up-sampled
213
- # such that each edge is no shorter than this value. Non-uniformity is chosen
214
- # to maximise the amount of preserved data.
215
- # For example:
216
- # Original resolution: 30px (width) * 10px (height)
217
- # min_elevation_resolution: 30
218
- # New resolution: 30px (width) * 30px (height)
199
+ # The minimum allowed resolution for the returned elevation heightmap. Possible
200
+ # values: between 0 and 1024 (and not more than max_elevation_resolution_cells).
201
+ # Zero is supported for backward compatibility. Under-sized heightmaps will be
202
+ # non-uniformly up-sampled such that each edge is no shorter than this value.
203
+ # Non-uniformity is chosen to maximise the amount of preserved data. For example:
204
+ # Original resolution: 30px (width) * 10px (height) min_elevation_resolution:
205
+ # 30 New resolution: 30px (width) * 30px (height)
219
206
  # @param [Array<String>, String] terrain_formats
220
207
  # Terrain formats that the client understands.
221
208
  # @param [String] fields
@@ -27,7 +27,7 @@ module Google
27
27
  # @see https://cloud.google.com/video-intelligence/docs/
28
28
  module VideointelligenceV1
29
29
  VERSION = 'V1'
30
- REVISION = '20200602'
30
+ REVISION = '20200810'
31
31
 
32
32
  # View and manage your data across Google Cloud Platform services
33
33
  AUTH_CLOUD_PLATFORM = 'https://www.googleapis.com/auth/cloud-platform'
@@ -22,9 +22,9 @@ module Google
22
22
  module Apis
23
23
  module VideointelligenceV1
24
24
 
25
- # Video annotation progress. Included in the `metadata`
26
- # field of the `Operation` returned by the `GetOperation`
27
- # call of the `google::longrunning::Operations` service.
25
+ # Video annotation progress. Included in the `metadata` field of the `Operation`
26
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
27
+ # service.
28
28
  class GoogleCloudVideointelligenceV1AnnotateVideoProgress
29
29
  include Google::Apis::Core::Hashable
30
30
 
@@ -52,24 +52,22 @@ module Google
52
52
  # @return [Array<String>]
53
53
  attr_accessor :features
54
54
 
55
- # The video data bytes.
56
- # If unset, the input video(s) should be specified via the `input_uri`.
57
- # If set, `input_uri` must be unset.
55
+ # The video data bytes. If unset, the input video(s) should be specified via the
56
+ # `input_uri`. If set, `input_uri` must be unset.
58
57
  # Corresponds to the JSON property `inputContent`
59
58
  # NOTE: Values are automatically base64 encoded/decoded in the client library.
60
59
  # @return [String]
61
60
  attr_accessor :input_content
62
61
 
63
- # Input video location. Currently, only
64
- # [Cloud Storage](https://cloud.google.com/storage/) URIs are
65
- # supported. URIs must be specified in the following format:
66
- # `gs://bucket-id/object-id` (other URI formats return
67
- # google.rpc.Code.INVALID_ARGUMENT). For more information, see
68
- # [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
69
- # To identify multiple videos, a video URI may include wildcards in the
70
- # `object-id`. Supported wildcards: '*' to match 0 or more characters;
71
- # '?' to match 1 character. If unset, the input video should be embedded
72
- # in the request as `input_content`. If set, `input_content` must be unset.
62
+ # Input video location. Currently, only [Cloud Storage](https://cloud.google.com/
63
+ # storage/) URIs are supported. URIs must be specified in the following format: `
64
+ # gs://bucket-id/object-id` (other URI formats return google.rpc.Code.
65
+ # INVALID_ARGUMENT). For more information, see [Request URIs](https://cloud.
66
+ # google.com/storage/docs/request-endpoints). To identify multiple videos, a
67
+ # video URI may include wildcards in the `object-id`. Supported wildcards: '*'
68
+ # to match 0 or more characters; '?' to match 1 character. If unset, the input
69
+ # video should be embedded in the request as `input_content`. If set, `
70
+ # input_content` must be unset.
73
71
  # Corresponds to the JSON property `inputUri`
74
72
  # @return [String]
75
73
  attr_accessor :input_uri
@@ -83,11 +81,11 @@ module Google
83
81
  attr_accessor :location_id
84
82
 
85
83
  # Optional. Location where the output (in JSON format) should be stored.
86
- # Currently, only [Cloud Storage](https://cloud.google.com/storage/)
87
- # URIs are supported. These must be specified in the following format:
88
- # `gs://bucket-id/object-id` (other URI formats return
89
- # google.rpc.Code.INVALID_ARGUMENT). For more information, see
90
- # [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
84
+ # Currently, only [Cloud Storage](https://cloud.google.com/storage/) URIs are
85
+ # supported. These must be specified in the following format: `gs://bucket-id/
86
+ # object-id` (other URI formats return google.rpc.Code.INVALID_ARGUMENT). For
87
+ # more information, see [Request URIs](https://cloud.google.com/storage/docs/
88
+ # request-endpoints).
91
89
  # Corresponds to the JSON property `outputUri`
92
90
  # @return [String]
93
91
  attr_accessor :output_uri
@@ -112,9 +110,9 @@ module Google
112
110
  end
113
111
  end
114
112
 
115
- # Video annotation response. Included in the `response`
116
- # field of the `Operation` returned by the `GetOperation`
117
- # call of the `google::longrunning::Operations` service.
113
+ # Video annotation response. Included in the `response` field of the `Operation`
114
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
115
+ # service.
118
116
  class GoogleCloudVideointelligenceV1AnnotateVideoResponse
119
117
  include Google::Apis::Core::Hashable
120
118
 
@@ -142,14 +140,14 @@ module Google
142
140
  # @return [Float]
143
141
  attr_accessor :confidence
144
142
 
145
- # The name of the attribute, for example, glasses, dark_glasses, mouth_open.
146
- # A full list of supported type names will be provided in the document.
143
+ # The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
144
+ # full list of supported type names will be provided in the document.
147
145
  # Corresponds to the JSON property `name`
148
146
  # @return [String]
149
147
  attr_accessor :name
150
148
 
151
- # Text value of the detection result. For example, the value for "HairColor"
152
- # can be "black", "blonde", etc.
149
+ # Text value of the detection result. For example, the value for "HairColor" can
150
+ # be "black", "blonde", etc.
153
151
  # Corresponds to the JSON property `value`
154
152
  # @return [String]
155
153
  attr_accessor :value
@@ -181,9 +179,8 @@ module Google
181
179
  # @return [String]
182
180
  attr_accessor :name
183
181
 
184
- # A vertex represents a 2D point in the image.
185
- # NOTE: the normalized vertex coordinates are relative to the original image
186
- # and range from 0 to 1.
182
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
183
+ # coordinates are relative to the original image and range from 0 to 1.
187
184
  # Corresponds to the JSON property `point`
188
185
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1NormalizedVertex]
189
186
  attr_accessor :point
@@ -209,8 +206,7 @@ module Google
209
206
  # @return [String]
210
207
  attr_accessor :description
211
208
 
212
- # Opaque entity ID. Some IDs may be available in
213
- # [Google Knowledge Graph Search
209
+ # Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
214
210
  # API](https://developers.google.com/knowledge-graph/).
215
211
  # Corresponds to the JSON property `entityId`
216
212
  # @return [String]
@@ -233,9 +229,9 @@ module Google
233
229
  end
234
230
  end
235
231
 
236
- # Explicit content annotation (based on per-frame visual signals only).
237
- # If no explicit content has been detected in a frame, no annotations are
238
- # present for that frame.
232
+ # Explicit content annotation (based on per-frame visual signals only). If no
233
+ # explicit content has been detected in a frame, no annotations are present for
234
+ # that frame.
239
235
  class GoogleCloudVideointelligenceV1ExplicitContentAnnotation
240
236
  include Google::Apis::Core::Hashable
241
237
 
@@ -264,9 +260,8 @@ module Google
264
260
  class GoogleCloudVideointelligenceV1ExplicitContentDetectionConfig
265
261
  include Google::Apis::Core::Hashable
266
262
 
267
- # Model to use for explicit content detection.
268
- # Supported values: "builtin/stable" (the default if unset) and
269
- # "builtin/latest".
263
+ # Model to use for explicit content detection. Supported values: "builtin/stable"
264
+ # (the default if unset) and "builtin/latest".
270
265
  # Corresponds to the JSON property `model`
271
266
  # @return [String]
272
267
  attr_accessor :model
@@ -311,10 +306,9 @@ module Google
311
306
  class GoogleCloudVideointelligenceV1LabelAnnotation
312
307
  include Google::Apis::Core::Hashable
313
308
 
314
- # Common categories for the detected entity.
315
- # For example, when the label is `Terrier`, the category is likely `dog`. And
316
- # in some cases there might be more than one categories e.g., `Terrier` could
317
- # also be a `pet`.
309
+ # Common categories for the detected entity. For example, when the label is `
310
+ # Terrier`, the category is likely `dog`. And in some cases there might be more
311
+ # than one categories e.g., `Terrier` could also be a `pet`.
318
312
  # Corresponds to the JSON property `categoryEntities`
319
313
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1Entity>]
320
314
  attr_accessor :category_entities
@@ -357,44 +351,40 @@ module Google
357
351
  class GoogleCloudVideointelligenceV1LabelDetectionConfig
358
352
  include Google::Apis::Core::Hashable
359
353
 
360
- # The confidence threshold we perform filtering on the labels from
361
- # frame-level detection. If not set, it is set to 0.4 by default. The valid
362
- # range for this threshold is [0.1, 0.9]. Any value set outside of this
363
- # range will be clipped.
364
- # Note: For best results, follow the default threshold. We will update
365
- # the default threshold everytime when we release a new model.
354
+ # The confidence threshold we perform filtering on the labels from frame-level
355
+ # detection. If not set, it is set to 0.4 by default. The valid range for this
356
+ # threshold is [0.1, 0.9]. Any value set outside of this range will be clipped.
357
+ # Note: For best results, follow the default threshold. We will update the
358
+ # default threshold everytime when we release a new model.
366
359
  # Corresponds to the JSON property `frameConfidenceThreshold`
367
360
  # @return [Float]
368
361
  attr_accessor :frame_confidence_threshold
369
362
 
370
- # What labels should be detected with LABEL_DETECTION, in addition to
371
- # video-level labels or segment-level labels.
372
- # If unspecified, defaults to `SHOT_MODE`.
363
+ # What labels should be detected with LABEL_DETECTION, in addition to video-
364
+ # level labels or segment-level labels. If unspecified, defaults to `SHOT_MODE`.
373
365
  # Corresponds to the JSON property `labelDetectionMode`
374
366
  # @return [String]
375
367
  attr_accessor :label_detection_mode
376
368
 
377
- # Model to use for label detection.
378
- # Supported values: "builtin/stable" (the default if unset) and
379
- # "builtin/latest".
369
+ # Model to use for label detection. Supported values: "builtin/stable" (the
370
+ # default if unset) and "builtin/latest".
380
371
  # Corresponds to the JSON property `model`
381
372
  # @return [String]
382
373
  attr_accessor :model
383
374
 
384
- # Whether the video has been shot from a stationary (i.e., non-moving)
385
- # camera. When set to true, might improve detection accuracy for moving
386
- # objects. Should be used with `SHOT_AND_FRAME_MODE` enabled.
375
+ # Whether the video has been shot from a stationary (i.e., non-moving) camera.
376
+ # When set to true, might improve detection accuracy for moving objects. Should
377
+ # be used with `SHOT_AND_FRAME_MODE` enabled.
387
378
  # Corresponds to the JSON property `stationaryCamera`
388
379
  # @return [Boolean]
389
380
  attr_accessor :stationary_camera
390
381
  alias_method :stationary_camera?, :stationary_camera
391
382
 
392
- # The confidence threshold we perform filtering on the labels from
393
- # video-level and shot-level detections. If not set, it's set to 0.3 by
394
- # default. The valid range for this threshold is [0.1, 0.9]. Any value set
395
- # outside of this range will be clipped.
396
- # Note: For best results, follow the default threshold. We will update
397
- # the default threshold everytime when we release a new model.
383
+ # The confidence threshold we perform filtering on the labels from video-level
384
+ # and shot-level detections. If not set, it's set to 0.3 by default. The valid
385
+ # range for this threshold is [0.1, 0.9]. Any value set outside of this range
386
+ # will be clipped. Note: For best results, follow the default threshold. We will
387
+ # update the default threshold everytime when we release a new model.
398
388
  # Corresponds to the JSON property `videoConfidenceThreshold`
399
389
  # @return [Float]
400
390
  attr_accessor :video_confidence_threshold
@@ -473,14 +463,14 @@ module Google
473
463
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1Entity]
474
464
  attr_accessor :entity
475
465
 
476
- # All video segments where the recognized logo appears. There might be
477
- # multiple instances of the same logo class appearing in one VideoSegment.
466
+ # All video segments where the recognized logo appears. There might be multiple
467
+ # instances of the same logo class appearing in one VideoSegment.
478
468
  # Corresponds to the JSON property `segments`
479
469
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1VideoSegment>]
480
470
  attr_accessor :segments
481
471
 
482
- # All logo tracks where the recognized logo appears. Each track corresponds
483
- # to one logo instance appearing in consecutive frames.
472
+ # All logo tracks where the recognized logo appears. Each track corresponds to
473
+ # one logo instance appearing in consecutive frames.
484
474
  # Corresponds to the JSON property `tracks`
485
475
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1Track>]
486
476
  attr_accessor :tracks
@@ -497,9 +487,8 @@ module Google
497
487
  end
498
488
  end
499
489
 
500
- # Normalized bounding box.
501
- # The normalized vertex coordinates are relative to the original image.
502
- # Range: [0, 1].
490
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
491
+ # original image. Range: [0, 1].
503
492
  class GoogleCloudVideointelligenceV1NormalizedBoundingBox
504
493
  include Google::Apis::Core::Hashable
505
494
 
@@ -537,20 +526,12 @@ module Google
537
526
  end
538
527
 
539
528
  # Normalized bounding polygon for text (that might not be aligned with axis).
540
- # Contains list of the corner points in clockwise order starting from
541
- # top-left corner. For example, for a rectangular bounding box:
542
- # When the text is horizontal it might look like:
543
- # 0----1
544
- # | |
545
- # 3----2
546
- # When it's clockwise rotated 180 degrees around the top-left corner it
547
- # becomes:
548
- # 2----3
549
- # | |
550
- # 1----0
551
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
552
- # than 0, or greater than 1 due to trignometric calculations for location of
553
- # the box.
529
+ # Contains list of the corner points in clockwise order starting from top-left
530
+ # corner. For example, for a rectangular bounding box: When the text is
531
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
532
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
533
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
534
+ # or greater than 1 due to trignometric calculations for location of the box.
554
535
  class GoogleCloudVideointelligenceV1NormalizedBoundingPoly
555
536
  include Google::Apis::Core::Hashable
556
537
 
@@ -569,9 +550,8 @@ module Google
569
550
  end
570
551
  end
571
552
 
572
- # A vertex represents a 2D point in the image.
573
- # NOTE: the normalized vertex coordinates are relative to the original image
574
- # and range from 0 to 1.
553
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
554
+ # coordinates are relative to the original image and range from 0 to 1.
575
555
  class GoogleCloudVideointelligenceV1NormalizedVertex
576
556
  include Google::Apis::Core::Hashable
577
557
 
@@ -610,10 +590,10 @@ module Google
610
590
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1Entity]
611
591
  attr_accessor :entity
612
592
 
613
- # Information corresponding to all frames where this object track appears.
614
- # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
615
- # messages in frames.
616
- # Streaming mode: it can only be one ObjectTrackingFrame message in frames.
593
+ # Information corresponding to all frames where this object track appears. Non-
594
+ # streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
595
+ # in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
596
+ # frames.
617
597
  # Corresponds to the JSON property `frames`
618
598
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1ObjectTrackingFrame>]
619
599
  attr_accessor :frames
@@ -623,12 +603,11 @@ module Google
623
603
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1VideoSegment]
624
604
  attr_accessor :segment
625
605
 
626
- # Streaming mode ONLY.
627
- # In streaming mode, we do not know the end time of a tracked object
628
- # before it is completed. Hence, there is no VideoSegment info returned.
629
- # Instead, we provide a unique identifiable integer track_id so that
630
- # the customers can correlate the results of the ongoing
631
- # ObjectTrackAnnotation of the same track_id over time.
606
+ # Streaming mode ONLY. In streaming mode, we do not know the end time of a
607
+ # tracked object before it is completed. Hence, there is no VideoSegment info
608
+ # returned. Instead, we provide a unique identifiable integer track_id so that
609
+ # the customers can correlate the results of the ongoing ObjectTrackAnnotation
610
+ # of the same track_id over time.
632
611
  # Corresponds to the JSON property `trackId`
633
612
  # @return [Fixnum]
634
613
  attr_accessor :track_id
@@ -657,9 +636,8 @@ module Google
657
636
  class GoogleCloudVideointelligenceV1ObjectTrackingConfig
658
637
  include Google::Apis::Core::Hashable
659
638
 
660
- # Model to use for object tracking.
661
- # Supported values: "builtin/stable" (the default if unset) and
662
- # "builtin/latest".
639
+ # Model to use for object tracking. Supported values: "builtin/stable" (the
640
+ # default if unset) and "builtin/latest".
663
641
  # Corresponds to the JSON property `model`
664
642
  # @return [String]
665
643
  attr_accessor :model
@@ -679,9 +657,8 @@ module Google
679
657
  class GoogleCloudVideointelligenceV1ObjectTrackingFrame
680
658
  include Google::Apis::Core::Hashable
681
659
 
682
- # Normalized bounding box.
683
- # The normalized vertex coordinates are relative to the original image.
684
- # Range: [0, 1].
660
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
661
+ # original image. Range: [0, 1].
685
662
  # Corresponds to the JSON property `normalizedBoundingBox`
686
663
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1NormalizedBoundingBox]
687
664
  attr_accessor :normalized_bounding_box
@@ -706,9 +683,8 @@ module Google
706
683
  class GoogleCloudVideointelligenceV1ShotChangeDetectionConfig
707
684
  include Google::Apis::Core::Hashable
708
685
 
709
- # Model to use for shot change detection.
710
- # Supported values: "builtin/stable" (the default if unset) and
711
- # "builtin/latest".
686
+ # Model to use for shot change detection. Supported values: "builtin/stable" (
687
+ # the default if unset) and "builtin/latest".
712
688
  # Corresponds to the JSON property `model`
713
689
  # @return [String]
714
690
  attr_accessor :model
@@ -728,12 +704,12 @@ module Google
728
704
  class GoogleCloudVideointelligenceV1SpeechContext
729
705
  include Google::Apis::Core::Hashable
730
706
 
731
- # Optional. A list of strings containing words and phrases "hints" so that
732
- # the speech recognition is more likely to recognize them. This can be used
733
- # to improve the accuracy for specific words and phrases, for example, if
734
- # specific commands are typically spoken by the user. This can also be used
735
- # to add additional words to the vocabulary of the recognizer. See
736
- # [usage limits](https://cloud.google.com/speech/limits#content).
707
+ # Optional. A list of strings containing words and phrases "hints" so that the
708
+ # speech recognition is more likely to recognize them. This can be used to
709
+ # improve the accuracy for specific words and phrases, for example, if specific
710
+ # commands are typically spoken by the user. This can also be used to add
711
+ # additional words to the vocabulary of the recognizer. See [usage limits](https:
712
+ # //cloud.google.com/speech/limits#content).
737
713
  # Corresponds to the JSON property `phrases`
738
714
  # @return [Array<String>]
739
715
  attr_accessor :phrases
@@ -754,10 +730,10 @@ module Google
754
730
 
755
731
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
756
732
  # indicates an estimated greater likelihood that the recognized words are
757
- # correct. This field is set only for the top alternative.
758
- # This field is not guaranteed to be accurate and users should not rely on it
759
- # to be always provided.
760
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
733
+ # correct. This field is set only for the top alternative. This field is not
734
+ # guaranteed to be accurate and users should not rely on it to be always
735
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
736
+ # not set.
761
737
  # Corresponds to the JSON property `confidence`
762
738
  # @return [Float]
763
739
  attr_accessor :confidence
@@ -768,8 +744,8 @@ module Google
768
744
  attr_accessor :transcript
769
745
 
770
746
  # Output only. A list of word-specific information for each recognized word.
771
- # Note: When `enable_speaker_diarization` is set to true, you will see all
772
- # the words from the beginning of the audio.
747
+ # Note: When `enable_speaker_diarization` is set to true, you will see all the
748
+ # words from the beginning of the audio.
773
749
  # Corresponds to the JSON property `words`
774
750
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1WordInfo>]
775
751
  attr_accessor :words
@@ -790,18 +766,17 @@ module Google
790
766
  class GoogleCloudVideointelligenceV1SpeechTranscription
791
767
  include Google::Apis::Core::Hashable
792
768
 
793
- # May contain one or more recognition hypotheses (up to the maximum specified
794
- # in `max_alternatives`). These alternatives are ordered in terms of
795
- # accuracy, with the top (first) alternative being the most probable, as
796
- # ranked by the recognizer.
769
+ # May contain one or more recognition hypotheses (up to the maximum specified in
770
+ # `max_alternatives`). These alternatives are ordered in terms of accuracy, with
771
+ # the top (first) alternative being the most probable, as ranked by the
772
+ # recognizer.
797
773
  # Corresponds to the JSON property `alternatives`
798
774
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1SpeechRecognitionAlternative>]
799
775
  attr_accessor :alternatives
800
776
 
801
777
  # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
802
- # language tag of
803
- # the language in this result. This language code was detected to have the
804
- # most likelihood of being spoken in the audio.
778
+ # language tag of the language in this result. This language code was detected
779
+ # to have the most likelihood of being spoken in the audio.
805
780
  # Corresponds to the JSON property `languageCode`
806
781
  # @return [String]
807
782
  attr_accessor :language_code
@@ -828,66 +803,62 @@ module Google
828
803
  attr_accessor :audio_tracks
829
804
 
830
805
  # Optional. If set, specifies the estimated number of speakers in the
831
- # conversation.
832
- # If not set, defaults to '2'.
833
- # Ignored unless enable_speaker_diarization is set to true.
806
+ # conversation. If not set, defaults to '2'. Ignored unless
807
+ # enable_speaker_diarization is set to true.
834
808
  # Corresponds to the JSON property `diarizationSpeakerCount`
835
809
  # @return [Fixnum]
836
810
  attr_accessor :diarization_speaker_count
837
811
 
838
- # Optional. If 'true', adds punctuation to recognition result hypotheses.
839
- # This feature is only available in select languages. Setting this for
840
- # requests in other languages has no effect at all. The default 'false' value
841
- # does not add punctuation to result hypotheses. NOTE: "This is currently
842
- # offered as an experimental service, complimentary to all users. In the
843
- # future this may be exclusively available as a premium feature."
812
+ # Optional. If 'true', adds punctuation to recognition result hypotheses. This
813
+ # feature is only available in select languages. Setting this for requests in
814
+ # other languages has no effect at all. The default 'false' value does not add
815
+ # punctuation to result hypotheses. NOTE: "This is currently offered as an
816
+ # experimental service, complimentary to all users. In the future this may be
817
+ # exclusively available as a premium feature."
844
818
  # Corresponds to the JSON property `enableAutomaticPunctuation`
845
819
  # @return [Boolean]
846
820
  attr_accessor :enable_automatic_punctuation
847
821
  alias_method :enable_automatic_punctuation?, :enable_automatic_punctuation
848
822
 
849
- # Optional. If 'true', enables speaker detection for each recognized word in
850
- # the top alternative of the recognition result using a speaker_tag provided
851
- # in the WordInfo.
852
- # Note: When this is true, we send all the words from the beginning of the
853
- # audio for the top alternative in every consecutive response.
854
- # This is done in order to improve our speaker tags as our models learn to
855
- # identify the speakers in the conversation over time.
823
+ # Optional. If 'true', enables speaker detection for each recognized word in the
824
+ # top alternative of the recognition result using a speaker_tag provided in the
825
+ # WordInfo. Note: When this is true, we send all the words from the beginning of
826
+ # the audio for the top alternative in every consecutive response. This is done
827
+ # in order to improve our speaker tags as our models learn to identify the
828
+ # speakers in the conversation over time.
856
829
  # Corresponds to the JSON property `enableSpeakerDiarization`
857
830
  # @return [Boolean]
858
831
  attr_accessor :enable_speaker_diarization
859
832
  alias_method :enable_speaker_diarization?, :enable_speaker_diarization
860
833
 
861
834
  # Optional. If `true`, the top result includes a list of words and the
862
- # confidence for those words. If `false`, no word-level confidence
863
- # information is returned. The default is `false`.
835
+ # confidence for those words. If `false`, no word-level confidence information
836
+ # is returned. The default is `false`.
864
837
  # Corresponds to the JSON property `enableWordConfidence`
865
838
  # @return [Boolean]
866
839
  attr_accessor :enable_word_confidence
867
840
  alias_method :enable_word_confidence?, :enable_word_confidence
868
841
 
869
- # Optional. If set to `true`, the server will attempt to filter out
870
- # profanities, replacing all but the initial character in each filtered word
871
- # with asterisks, e.g. "f***". If set to `false` or omitted, profanities
872
- # won't be filtered out.
842
+ # Optional. If set to `true`, the server will attempt to filter out profanities,
843
+ # replacing all but the initial character in each filtered word with asterisks,
844
+ # e.g. "f***". If set to `false` or omitted, profanities won't be filtered out.
873
845
  # Corresponds to the JSON property `filterProfanity`
874
846
  # @return [Boolean]
875
847
  attr_accessor :filter_profanity
876
848
  alias_method :filter_profanity?, :filter_profanity
877
849
 
878
- # Required. *Required* The language of the supplied audio as a
879
- # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
880
- # Example: "en-US".
881
- # See [Language Support](https://cloud.google.com/speech/docs/languages)
882
- # for a list of the currently supported language codes.
850
+ # Required. *Required* The language of the supplied audio as a [BCP-47](https://
851
+ # www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. Example: "en-US". See [
852
+ # Language Support](https://cloud.google.com/speech/docs/languages) for a list
853
+ # of the currently supported language codes.
883
854
  # Corresponds to the JSON property `languageCode`
884
855
  # @return [String]
885
856
  attr_accessor :language_code
886
857
 
887
858
  # Optional. Maximum number of recognition hypotheses to be returned.
888
859
  # Specifically, the maximum number of `SpeechRecognitionAlternative` messages
889
- # within each `SpeechTranscription`. The server may return fewer than
890
- # `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
860
+ # within each `SpeechTranscription`. The server may return fewer than `
861
+ # max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
891
862
  # return a maximum of one. If omitted, will return a maximum of one.
892
863
  # Corresponds to the JSON property `maxAlternatives`
893
864
  # @return [Fixnum]
@@ -954,16 +925,15 @@ module Google
954
925
  include Google::Apis::Core::Hashable
955
926
 
956
927
  # Language hint can be specified if the language to be detected is known a
957
- # priori. It can increase the accuracy of the detection. Language hint must
958
- # be language code in BCP-47 format.
959
- # Automatic language detection is performed if no hint is provided.
928
+ # priori. It can increase the accuracy of the detection. Language hint must be
929
+ # language code in BCP-47 format. Automatic language detection is performed if
930
+ # no hint is provided.
960
931
  # Corresponds to the JSON property `languageHints`
961
932
  # @return [Array<String>]
962
933
  attr_accessor :language_hints
963
934
 
964
- # Model to use for text detection.
965
- # Supported values: "builtin/stable" (the default if unset) and
966
- # "builtin/latest".
935
+ # Model to use for text detection. Supported values: "builtin/stable" (the
936
+ # default if unset) and "builtin/latest".
967
937
  # Corresponds to the JSON property `model`
968
938
  # @return [String]
969
939
  attr_accessor :model
@@ -979,27 +949,19 @@ module Google
979
949
  end
980
950
  end
981
951
 
982
- # Video frame level annotation results for text annotation (OCR).
983
- # Contains information regarding timestamp and bounding box locations for the
984
- # frames containing detected OCR text snippets.
952
+ # Video frame level annotation results for text annotation (OCR). Contains
953
+ # information regarding timestamp and bounding box locations for the frames
954
+ # containing detected OCR text snippets.
985
955
  class GoogleCloudVideointelligenceV1TextFrame
986
956
  include Google::Apis::Core::Hashable
987
957
 
988
958
  # Normalized bounding polygon for text (that might not be aligned with axis).
989
- # Contains list of the corner points in clockwise order starting from
990
- # top-left corner. For example, for a rectangular bounding box:
991
- # When the text is horizontal it might look like:
992
- # 0----1
993
- # | |
994
- # 3----2
995
- # When it's clockwise rotated 180 degrees around the top-left corner it
996
- # becomes:
997
- # 2----3
998
- # | |
999
- # 1----0
1000
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
1001
- # than 0, or greater than 1 due to trignometric calculations for location of
1002
- # the box.
959
+ # Contains list of the corner points in clockwise order starting from top-left
960
+ # corner. For example, for a rectangular bounding box: When the text is
961
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
962
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
963
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
964
+ # or greater than 1 due to trignometric calculations for location of the box.
1003
965
  # Corresponds to the JSON property `rotatedBoundingBox`
1004
966
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1NormalizedBoundingPoly]
1005
967
  attr_accessor :rotated_bounding_box
@@ -1052,9 +1014,8 @@ module Google
1052
1014
  end
1053
1015
  end
1054
1016
 
1055
- # For tracking related features.
1056
- # An object at time_offset with attributes, and located with
1057
- # normalized_bounding_box.
1017
+ # For tracking related features. An object at time_offset with attributes, and
1018
+ # located with normalized_bounding_box.
1058
1019
  class GoogleCloudVideointelligenceV1TimestampedObject
1059
1020
  include Google::Apis::Core::Hashable
1060
1021
 
@@ -1068,15 +1029,14 @@ module Google
1068
1029
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1DetectedLandmark>]
1069
1030
  attr_accessor :landmarks
1070
1031
 
1071
- # Normalized bounding box.
1072
- # The normalized vertex coordinates are relative to the original image.
1073
- # Range: [0, 1].
1032
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
1033
+ # original image. Range: [0, 1].
1074
1034
  # Corresponds to the JSON property `normalizedBoundingBox`
1075
1035
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1NormalizedBoundingBox]
1076
1036
  attr_accessor :normalized_bounding_box
1077
1037
 
1078
- # Time-offset, relative to the beginning of the video,
1079
- # corresponding to the video frame for this object.
1038
+ # Time-offset, relative to the beginning of the video, corresponding to the
1039
+ # video frame for this object.
1080
1040
  # Corresponds to the JSON property `timeOffset`
1081
1041
  # @return [String]
1082
1042
  attr_accessor :time_offset
@@ -1135,20 +1095,19 @@ module Google
1135
1095
  class GoogleCloudVideointelligenceV1VideoAnnotationProgress
1136
1096
  include Google::Apis::Core::Hashable
1137
1097
 
1138
- # Specifies which feature is being tracked if the request contains more than
1139
- # one feature.
1098
+ # Specifies which feature is being tracked if the request contains more than one
1099
+ # feature.
1140
1100
  # Corresponds to the JSON property `feature`
1141
1101
  # @return [String]
1142
1102
  attr_accessor :feature
1143
1103
 
1144
- # Video file location in
1145
- # [Cloud Storage](https://cloud.google.com/storage/).
1104
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
1146
1105
  # Corresponds to the JSON property `inputUri`
1147
1106
  # @return [String]
1148
1107
  attr_accessor :input_uri
1149
1108
 
1150
- # Approximate percentage processed thus far. Guaranteed to be
1151
- # 100 when fully processed.
1109
+ # Approximate percentage processed thus far. Guaranteed to be 100 when fully
1110
+ # processed.
1152
1111
  # Corresponds to the JSON property `progressPercent`
1153
1112
  # @return [Fixnum]
1154
1113
  attr_accessor :progress_percent
@@ -1187,31 +1146,30 @@ module Google
1187
1146
  class GoogleCloudVideointelligenceV1VideoAnnotationResults
1188
1147
  include Google::Apis::Core::Hashable
1189
1148
 
1190
- # The `Status` type defines a logical error model that is suitable for
1191
- # different programming environments, including REST APIs and RPC APIs. It is
1192
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
1193
- # three pieces of data: error code, error message, and error details.
1194
- # You can find out more about this error model and how to work with it in the
1195
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
1149
+ # The `Status` type defines a logical error model that is suitable for different
1150
+ # programming environments, including REST APIs and RPC APIs. It is used by [
1151
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
1152
+ # data: error code, error message, and error details. You can find out more
1153
+ # about this error model and how to work with it in the [API Design Guide](https:
1154
+ # //cloud.google.com/apis/design/errors).
1196
1155
  # Corresponds to the JSON property `error`
1197
1156
  # @return [Google::Apis::VideointelligenceV1::GoogleRpcStatus]
1198
1157
  attr_accessor :error
1199
1158
 
1200
- # Explicit content annotation (based on per-frame visual signals only).
1201
- # If no explicit content has been detected in a frame, no annotations are
1202
- # present for that frame.
1159
+ # Explicit content annotation (based on per-frame visual signals only). If no
1160
+ # explicit content has been detected in a frame, no annotations are present for
1161
+ # that frame.
1203
1162
  # Corresponds to the JSON property `explicitAnnotation`
1204
1163
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1ExplicitContentAnnotation]
1205
1164
  attr_accessor :explicit_annotation
1206
1165
 
1207
- # Label annotations on frame level.
1208
- # There is exactly one element for each unique label.
1166
+ # Label annotations on frame level. There is exactly one element for each unique
1167
+ # label.
1209
1168
  # Corresponds to the JSON property `frameLabelAnnotations`
1210
1169
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1LabelAnnotation>]
1211
1170
  attr_accessor :frame_label_annotations
1212
1171
 
1213
- # Video file location in
1214
- # [Cloud Storage](https://cloud.google.com/storage/).
1172
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
1215
1173
  # Corresponds to the JSON property `inputUri`
1216
1174
  # @return [String]
1217
1175
  attr_accessor :input_uri
@@ -1238,11 +1196,11 @@ module Google
1238
1196
  attr_accessor :segment_label_annotations
1239
1197
 
1240
1198
  # Presence label annotations on video level or user-specified segment level.
1241
- # There is exactly one element for each unique label. Compared to the
1242
- # existing topical `segment_label_annotations`, this field presents more
1243
- # fine-grained, segment-level labels detected in video content and is made
1244
- # available only when the client sets `LabelDetectionConfig.model` to
1245
- # "builtin/latest" in the request.
1199
+ # There is exactly one element for each unique label. Compared to the existing
1200
+ # topical `segment_label_annotations`, this field presents more fine-grained,
1201
+ # segment-level labels detected in video content and is made available only when
1202
+ # the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
1203
+ # request.
1246
1204
  # Corresponds to the JSON property `segmentPresenceLabelAnnotations`
1247
1205
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1LabelAnnotation>]
1248
1206
  attr_accessor :segment_presence_label_annotations
@@ -1252,17 +1210,17 @@ module Google
1252
1210
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1VideoSegment>]
1253
1211
  attr_accessor :shot_annotations
1254
1212
 
1255
- # Topical label annotations on shot level.
1256
- # There is exactly one element for each unique label.
1213
+ # Topical label annotations on shot level. There is exactly one element for each
1214
+ # unique label.
1257
1215
  # Corresponds to the JSON property `shotLabelAnnotations`
1258
1216
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1LabelAnnotation>]
1259
1217
  attr_accessor :shot_label_annotations
1260
1218
 
1261
1219
  # Presence label annotations on shot level. There is exactly one element for
1262
- # each unique label. Compared to the existing topical
1263
- # `shot_label_annotations`, this field presents more fine-grained, shot-level
1264
- # labels detected in video content and is made available only when the client
1265
- # sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
1220
+ # each unique label. Compared to the existing topical `shot_label_annotations`,
1221
+ # this field presents more fine-grained, shot-level labels detected in video
1222
+ # content and is made available only when the client sets `LabelDetectionConfig.
1223
+ # model` to "builtin/latest" in the request.
1266
1224
  # Corresponds to the JSON property `shotPresenceLabelAnnotations`
1267
1225
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1LabelAnnotation>]
1268
1226
  attr_accessor :shot_presence_label_annotations
@@ -1272,9 +1230,8 @@ module Google
1272
1230
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1SpeechTranscription>]
1273
1231
  attr_accessor :speech_transcriptions
1274
1232
 
1275
- # OCR text detection and tracking.
1276
- # Annotations for list of detected text snippets. Each will have list of
1277
- # frame information associated with it.
1233
+ # OCR text detection and tracking. Annotations for list of detected text
1234
+ # snippets. Each will have list of frame information associated with it.
1278
1235
  # Corresponds to the JSON property `textAnnotations`
1279
1236
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1TextAnnotation>]
1280
1237
  attr_accessor :text_annotations
@@ -1321,9 +1278,9 @@ module Google
1321
1278
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1ObjectTrackingConfig]
1322
1279
  attr_accessor :object_tracking_config
1323
1280
 
1324
- # Video segments to annotate. The segments may overlap and are not required
1325
- # to be contiguous or span the whole video. If unspecified, each video is
1326
- # treated as a single segment.
1281
+ # Video segments to annotate. The segments may overlap and are not required to
1282
+ # be contiguous or span the whole video. If unspecified, each video is treated
1283
+ # as a single segment.
1327
1284
  # Corresponds to the JSON property `segments`
1328
1285
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1VideoSegment>]
1329
1286
  attr_accessor :segments
@@ -1363,14 +1320,14 @@ module Google
1363
1320
  class GoogleCloudVideointelligenceV1VideoSegment
1364
1321
  include Google::Apis::Core::Hashable
1365
1322
 
1366
- # Time-offset, relative to the beginning of the video,
1367
- # corresponding to the end of the segment (inclusive).
1323
+ # Time-offset, relative to the beginning of the video, corresponding to the end
1324
+ # of the segment (inclusive).
1368
1325
  # Corresponds to the JSON property `endTimeOffset`
1369
1326
  # @return [String]
1370
1327
  attr_accessor :end_time_offset
1371
1328
 
1372
- # Time-offset, relative to the beginning of the video,
1373
- # corresponding to the start of the segment (inclusive).
1329
+ # Time-offset, relative to the beginning of the video, corresponding to the
1330
+ # start of the segment (inclusive).
1374
1331
  # Corresponds to the JSON property `startTimeOffset`
1375
1332
  # @return [String]
1376
1333
  attr_accessor :start_time_offset
@@ -1387,41 +1344,41 @@ module Google
1387
1344
  end
1388
1345
 
1389
1346
  # Word-specific information for recognized words. Word information is only
1390
- # included in the response when certain request parameters are set, such
1391
- # as `enable_word_time_offsets`.
1347
+ # included in the response when certain request parameters are set, such as `
1348
+ # enable_word_time_offsets`.
1392
1349
  class GoogleCloudVideointelligenceV1WordInfo
1393
1350
  include Google::Apis::Core::Hashable
1394
1351
 
1395
1352
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
1396
1353
  # indicates an estimated greater likelihood that the recognized words are
1397
- # correct. This field is set only for the top alternative.
1398
- # This field is not guaranteed to be accurate and users should not rely on it
1399
- # to be always provided.
1400
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
1354
+ # correct. This field is set only for the top alternative. This field is not
1355
+ # guaranteed to be accurate and users should not rely on it to be always
1356
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
1357
+ # not set.
1401
1358
  # Corresponds to the JSON property `confidence`
1402
1359
  # @return [Float]
1403
1360
  attr_accessor :confidence
1404
1361
 
1405
- # Time offset relative to the beginning of the audio, and
1406
- # corresponding to the end of the spoken word. This field is only set if
1407
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
1408
- # experimental feature and the accuracy of the time offset can vary.
1362
+ # Time offset relative to the beginning of the audio, and corresponding to the
1363
+ # end of the spoken word. This field is only set if `enable_word_time_offsets=
1364
+ # true` and only in the top hypothesis. This is an experimental feature and the
1365
+ # accuracy of the time offset can vary.
1409
1366
  # Corresponds to the JSON property `endTime`
1410
1367
  # @return [String]
1411
1368
  attr_accessor :end_time
1412
1369
 
1413
- # Output only. A distinct integer value is assigned for every speaker within
1414
- # the audio. This field specifies which one of those speakers was detected to
1415
- # have spoken this word. Value ranges from 1 up to diarization_speaker_count,
1416
- # and is only set if speaker diarization is enabled.
1370
+ # Output only. A distinct integer value is assigned for every speaker within the
1371
+ # audio. This field specifies which one of those speakers was detected to have
1372
+ # spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
1373
+ # only set if speaker diarization is enabled.
1417
1374
  # Corresponds to the JSON property `speakerTag`
1418
1375
  # @return [Fixnum]
1419
1376
  attr_accessor :speaker_tag
1420
1377
 
1421
- # Time offset relative to the beginning of the audio, and
1422
- # corresponding to the start of the spoken word. This field is only set if
1423
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
1424
- # experimental feature and the accuracy of the time offset can vary.
1378
+ # Time offset relative to the beginning of the audio, and corresponding to the
1379
+ # start of the spoken word. This field is only set if `enable_word_time_offsets=
1380
+ # true` and only in the top hypothesis. This is an experimental feature and the
1381
+ # accuracy of the time offset can vary.
1425
1382
  # Corresponds to the JSON property `startTime`
1426
1383
  # @return [String]
1427
1384
  attr_accessor :start_time
@@ -1445,9 +1402,9 @@ module Google
1445
1402
  end
1446
1403
  end
1447
1404
 
1448
- # Video annotation progress. Included in the `metadata`
1449
- # field of the `Operation` returned by the `GetOperation`
1450
- # call of the `google::longrunning::Operations` service.
1405
+ # Video annotation progress. Included in the `metadata` field of the `Operation`
1406
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
1407
+ # service.
1451
1408
  class GoogleCloudVideointelligenceV1beta2AnnotateVideoProgress
1452
1409
  include Google::Apis::Core::Hashable
1453
1410
 
@@ -1466,9 +1423,9 @@ module Google
1466
1423
  end
1467
1424
  end
1468
1425
 
1469
- # Video annotation response. Included in the `response`
1470
- # field of the `Operation` returned by the `GetOperation`
1471
- # call of the `google::longrunning::Operations` service.
1426
+ # Video annotation response. Included in the `response` field of the `Operation`
1427
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
1428
+ # service.
1472
1429
  class GoogleCloudVideointelligenceV1beta2AnnotateVideoResponse
1473
1430
  include Google::Apis::Core::Hashable
1474
1431
 
@@ -1496,14 +1453,14 @@ module Google
1496
1453
  # @return [Float]
1497
1454
  attr_accessor :confidence
1498
1455
 
1499
- # The name of the attribute, for example, glasses, dark_glasses, mouth_open.
1500
- # A full list of supported type names will be provided in the document.
1456
+ # The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
1457
+ # full list of supported type names will be provided in the document.
1501
1458
  # Corresponds to the JSON property `name`
1502
1459
  # @return [String]
1503
1460
  attr_accessor :name
1504
1461
 
1505
- # Text value of the detection result. For example, the value for "HairColor"
1506
- # can be "black", "blonde", etc.
1462
+ # Text value of the detection result. For example, the value for "HairColor" can
1463
+ # be "black", "blonde", etc.
1507
1464
  # Corresponds to the JSON property `value`
1508
1465
  # @return [String]
1509
1466
  attr_accessor :value
@@ -1535,9 +1492,8 @@ module Google
1535
1492
  # @return [String]
1536
1493
  attr_accessor :name
1537
1494
 
1538
- # A vertex represents a 2D point in the image.
1539
- # NOTE: the normalized vertex coordinates are relative to the original image
1540
- # and range from 0 to 1.
1495
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
1496
+ # coordinates are relative to the original image and range from 0 to 1.
1541
1497
  # Corresponds to the JSON property `point`
1542
1498
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2NormalizedVertex]
1543
1499
  attr_accessor :point
@@ -1563,8 +1519,7 @@ module Google
1563
1519
  # @return [String]
1564
1520
  attr_accessor :description
1565
1521
 
1566
- # Opaque entity ID. Some IDs may be available in
1567
- # [Google Knowledge Graph Search
1522
+ # Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
1568
1523
  # API](https://developers.google.com/knowledge-graph/).
1569
1524
  # Corresponds to the JSON property `entityId`
1570
1525
  # @return [String]
@@ -1587,9 +1542,9 @@ module Google
1587
1542
  end
1588
1543
  end
1589
1544
 
1590
- # Explicit content annotation (based on per-frame visual signals only).
1591
- # If no explicit content has been detected in a frame, no annotations are
1592
- # present for that frame.
1545
+ # Explicit content annotation (based on per-frame visual signals only). If no
1546
+ # explicit content has been detected in a frame, no annotations are present for
1547
+ # that frame.
1593
1548
  class GoogleCloudVideointelligenceV1beta2ExplicitContentAnnotation
1594
1549
  include Google::Apis::Core::Hashable
1595
1550
 
@@ -1644,10 +1599,9 @@ module Google
1644
1599
  class GoogleCloudVideointelligenceV1beta2LabelAnnotation
1645
1600
  include Google::Apis::Core::Hashable
1646
1601
 
1647
- # Common categories for the detected entity.
1648
- # For example, when the label is `Terrier`, the category is likely `dog`. And
1649
- # in some cases there might be more than one categories e.g., `Terrier` could
1650
- # also be a `pet`.
1602
+ # Common categories for the detected entity. For example, when the label is `
1603
+ # Terrier`, the category is likely `dog`. And in some cases there might be more
1604
+ # than one categories e.g., `Terrier` could also be a `pet`.
1651
1605
  # Corresponds to the JSON property `categoryEntities`
1652
1606
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2Entity>]
1653
1607
  attr_accessor :category_entities
@@ -1746,14 +1700,14 @@ module Google
1746
1700
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2Entity]
1747
1701
  attr_accessor :entity
1748
1702
 
1749
- # All video segments where the recognized logo appears. There might be
1750
- # multiple instances of the same logo class appearing in one VideoSegment.
1703
+ # All video segments where the recognized logo appears. There might be multiple
1704
+ # instances of the same logo class appearing in one VideoSegment.
1751
1705
  # Corresponds to the JSON property `segments`
1752
1706
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2VideoSegment>]
1753
1707
  attr_accessor :segments
1754
1708
 
1755
- # All logo tracks where the recognized logo appears. Each track corresponds
1756
- # to one logo instance appearing in consecutive frames.
1709
+ # All logo tracks where the recognized logo appears. Each track corresponds to
1710
+ # one logo instance appearing in consecutive frames.
1757
1711
  # Corresponds to the JSON property `tracks`
1758
1712
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2Track>]
1759
1713
  attr_accessor :tracks
@@ -1770,9 +1724,8 @@ module Google
1770
1724
  end
1771
1725
  end
1772
1726
 
1773
- # Normalized bounding box.
1774
- # The normalized vertex coordinates are relative to the original image.
1775
- # Range: [0, 1].
1727
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
1728
+ # original image. Range: [0, 1].
1776
1729
  class GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox
1777
1730
  include Google::Apis::Core::Hashable
1778
1731
 
@@ -1810,20 +1763,12 @@ module Google
1810
1763
  end
1811
1764
 
1812
1765
  # Normalized bounding polygon for text (that might not be aligned with axis).
1813
- # Contains list of the corner points in clockwise order starting from
1814
- # top-left corner. For example, for a rectangular bounding box:
1815
- # When the text is horizontal it might look like:
1816
- # 0----1
1817
- # | |
1818
- # 3----2
1819
- # When it's clockwise rotated 180 degrees around the top-left corner it
1820
- # becomes:
1821
- # 2----3
1822
- # | |
1823
- # 1----0
1824
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
1825
- # than 0, or greater than 1 due to trignometric calculations for location of
1826
- # the box.
1766
+ # Contains list of the corner points in clockwise order starting from top-left
1767
+ # corner. For example, for a rectangular bounding box: When the text is
1768
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
1769
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
1770
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
1771
+ # or greater than 1 due to trignometric calculations for location of the box.
1827
1772
  class GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly
1828
1773
  include Google::Apis::Core::Hashable
1829
1774
 
@@ -1842,9 +1787,8 @@ module Google
1842
1787
  end
1843
1788
  end
1844
1789
 
1845
- # A vertex represents a 2D point in the image.
1846
- # NOTE: the normalized vertex coordinates are relative to the original image
1847
- # and range from 0 to 1.
1790
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
1791
+ # coordinates are relative to the original image and range from 0 to 1.
1848
1792
  class GoogleCloudVideointelligenceV1beta2NormalizedVertex
1849
1793
  include Google::Apis::Core::Hashable
1850
1794
 
@@ -1883,10 +1827,10 @@ module Google
1883
1827
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2Entity]
1884
1828
  attr_accessor :entity
1885
1829
 
1886
- # Information corresponding to all frames where this object track appears.
1887
- # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
1888
- # messages in frames.
1889
- # Streaming mode: it can only be one ObjectTrackingFrame message in frames.
1830
+ # Information corresponding to all frames where this object track appears. Non-
1831
+ # streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
1832
+ # in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
1833
+ # frames.
1890
1834
  # Corresponds to the JSON property `frames`
1891
1835
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame>]
1892
1836
  attr_accessor :frames
@@ -1896,12 +1840,11 @@ module Google
1896
1840
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2VideoSegment]
1897
1841
  attr_accessor :segment
1898
1842
 
1899
- # Streaming mode ONLY.
1900
- # In streaming mode, we do not know the end time of a tracked object
1901
- # before it is completed. Hence, there is no VideoSegment info returned.
1902
- # Instead, we provide a unique identifiable integer track_id so that
1903
- # the customers can correlate the results of the ongoing
1904
- # ObjectTrackAnnotation of the same track_id over time.
1843
+ # Streaming mode ONLY. In streaming mode, we do not know the end time of a
1844
+ # tracked object before it is completed. Hence, there is no VideoSegment info
1845
+ # returned. Instead, we provide a unique identifiable integer track_id so that
1846
+ # the customers can correlate the results of the ongoing ObjectTrackAnnotation
1847
+ # of the same track_id over time.
1905
1848
  # Corresponds to the JSON property `trackId`
1906
1849
  # @return [Fixnum]
1907
1850
  attr_accessor :track_id
@@ -1931,9 +1874,8 @@ module Google
1931
1874
  class GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame
1932
1875
  include Google::Apis::Core::Hashable
1933
1876
 
1934
- # Normalized bounding box.
1935
- # The normalized vertex coordinates are relative to the original image.
1936
- # Range: [0, 1].
1877
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
1878
+ # original image. Range: [0, 1].
1937
1879
  # Corresponds to the JSON property `normalizedBoundingBox`
1938
1880
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox]
1939
1881
  attr_accessor :normalized_bounding_box
@@ -1960,10 +1902,10 @@ module Google
1960
1902
 
1961
1903
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
1962
1904
  # indicates an estimated greater likelihood that the recognized words are
1963
- # correct. This field is set only for the top alternative.
1964
- # This field is not guaranteed to be accurate and users should not rely on it
1965
- # to be always provided.
1966
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
1905
+ # correct. This field is set only for the top alternative. This field is not
1906
+ # guaranteed to be accurate and users should not rely on it to be always
1907
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
1908
+ # not set.
1967
1909
  # Corresponds to the JSON property `confidence`
1968
1910
  # @return [Float]
1969
1911
  attr_accessor :confidence
@@ -1974,8 +1916,8 @@ module Google
1974
1916
  attr_accessor :transcript
1975
1917
 
1976
1918
  # Output only. A list of word-specific information for each recognized word.
1977
- # Note: When `enable_speaker_diarization` is set to true, you will see all
1978
- # the words from the beginning of the audio.
1919
+ # Note: When `enable_speaker_diarization` is set to true, you will see all the
1920
+ # words from the beginning of the audio.
1979
1921
  # Corresponds to the JSON property `words`
1980
1922
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2WordInfo>]
1981
1923
  attr_accessor :words
@@ -1996,18 +1938,17 @@ module Google
1996
1938
  class GoogleCloudVideointelligenceV1beta2SpeechTranscription
1997
1939
  include Google::Apis::Core::Hashable
1998
1940
 
1999
- # May contain one or more recognition hypotheses (up to the maximum specified
2000
- # in `max_alternatives`). These alternatives are ordered in terms of
2001
- # accuracy, with the top (first) alternative being the most probable, as
2002
- # ranked by the recognizer.
1941
+ # May contain one or more recognition hypotheses (up to the maximum specified in
1942
+ # `max_alternatives`). These alternatives are ordered in terms of accuracy, with
1943
+ # the top (first) alternative being the most probable, as ranked by the
1944
+ # recognizer.
2003
1945
  # Corresponds to the JSON property `alternatives`
2004
1946
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2SpeechRecognitionAlternative>]
2005
1947
  attr_accessor :alternatives
2006
1948
 
2007
1949
  # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
2008
- # language tag of
2009
- # the language in this result. This language code was detected to have the
2010
- # most likelihood of being spoken in the audio.
1950
+ # language tag of the language in this result. This language code was detected
1951
+ # to have the most likelihood of being spoken in the audio.
2011
1952
  # Corresponds to the JSON property `languageCode`
2012
1953
  # @return [String]
2013
1954
  attr_accessor :language_code
@@ -2056,27 +1997,19 @@ module Google
2056
1997
  end
2057
1998
  end
2058
1999
 
2059
- # Video frame level annotation results for text annotation (OCR).
2060
- # Contains information regarding timestamp and bounding box locations for the
2061
- # frames containing detected OCR text snippets.
2000
+ # Video frame level annotation results for text annotation (OCR). Contains
2001
+ # information regarding timestamp and bounding box locations for the frames
2002
+ # containing detected OCR text snippets.
2062
2003
  class GoogleCloudVideointelligenceV1beta2TextFrame
2063
2004
  include Google::Apis::Core::Hashable
2064
2005
 
2065
2006
  # Normalized bounding polygon for text (that might not be aligned with axis).
2066
- # Contains list of the corner points in clockwise order starting from
2067
- # top-left corner. For example, for a rectangular bounding box:
2068
- # When the text is horizontal it might look like:
2069
- # 0----1
2070
- # | |
2071
- # 3----2
2072
- # When it's clockwise rotated 180 degrees around the top-left corner it
2073
- # becomes:
2074
- # 2----3
2075
- # | |
2076
- # 1----0
2077
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
2078
- # than 0, or greater than 1 due to trignometric calculations for location of
2079
- # the box.
2007
+ # Contains list of the corner points in clockwise order starting from top-left
2008
+ # corner. For example, for a rectangular bounding box: When the text is
2009
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
2010
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
2011
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
2012
+ # or greater than 1 due to trignometric calculations for location of the box.
2080
2013
  # Corresponds to the JSON property `rotatedBoundingBox`
2081
2014
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly]
2082
2015
  attr_accessor :rotated_bounding_box
@@ -2129,9 +2062,8 @@ module Google
2129
2062
  end
2130
2063
  end
2131
2064
 
2132
- # For tracking related features.
2133
- # An object at time_offset with attributes, and located with
2134
- # normalized_bounding_box.
2065
+ # For tracking related features. An object at time_offset with attributes, and
2066
+ # located with normalized_bounding_box.
2135
2067
  class GoogleCloudVideointelligenceV1beta2TimestampedObject
2136
2068
  include Google::Apis::Core::Hashable
2137
2069
 
@@ -2145,15 +2077,14 @@ module Google
2145
2077
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2DetectedLandmark>]
2146
2078
  attr_accessor :landmarks
2147
2079
 
2148
- # Normalized bounding box.
2149
- # The normalized vertex coordinates are relative to the original image.
2150
- # Range: [0, 1].
2080
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
2081
+ # original image. Range: [0, 1].
2151
2082
  # Corresponds to the JSON property `normalizedBoundingBox`
2152
2083
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox]
2153
2084
  attr_accessor :normalized_bounding_box
2154
2085
 
2155
- # Time-offset, relative to the beginning of the video,
2156
- # corresponding to the video frame for this object.
2086
+ # Time-offset, relative to the beginning of the video, corresponding to the
2087
+ # video frame for this object.
2157
2088
  # Corresponds to the JSON property `timeOffset`
2158
2089
  # @return [String]
2159
2090
  attr_accessor :time_offset
@@ -2212,20 +2143,19 @@ module Google
2212
2143
  class GoogleCloudVideointelligenceV1beta2VideoAnnotationProgress
2213
2144
  include Google::Apis::Core::Hashable
2214
2145
 
2215
- # Specifies which feature is being tracked if the request contains more than
2216
- # one feature.
2146
+ # Specifies which feature is being tracked if the request contains more than one
2147
+ # feature.
2217
2148
  # Corresponds to the JSON property `feature`
2218
2149
  # @return [String]
2219
2150
  attr_accessor :feature
2220
2151
 
2221
- # Video file location in
2222
- # [Cloud Storage](https://cloud.google.com/storage/).
2152
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
2223
2153
  # Corresponds to the JSON property `inputUri`
2224
2154
  # @return [String]
2225
2155
  attr_accessor :input_uri
2226
2156
 
2227
- # Approximate percentage processed thus far. Guaranteed to be
2228
- # 100 when fully processed.
2157
+ # Approximate percentage processed thus far. Guaranteed to be 100 when fully
2158
+ # processed.
2229
2159
  # Corresponds to the JSON property `progressPercent`
2230
2160
  # @return [Fixnum]
2231
2161
  attr_accessor :progress_percent
@@ -2264,31 +2194,30 @@ module Google
2264
2194
  class GoogleCloudVideointelligenceV1beta2VideoAnnotationResults
2265
2195
  include Google::Apis::Core::Hashable
2266
2196
 
2267
- # The `Status` type defines a logical error model that is suitable for
2268
- # different programming environments, including REST APIs and RPC APIs. It is
2269
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
2270
- # three pieces of data: error code, error message, and error details.
2271
- # You can find out more about this error model and how to work with it in the
2272
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
2197
+ # The `Status` type defines a logical error model that is suitable for different
2198
+ # programming environments, including REST APIs and RPC APIs. It is used by [
2199
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
2200
+ # data: error code, error message, and error details. You can find out more
2201
+ # about this error model and how to work with it in the [API Design Guide](https:
2202
+ # //cloud.google.com/apis/design/errors).
2273
2203
  # Corresponds to the JSON property `error`
2274
2204
  # @return [Google::Apis::VideointelligenceV1::GoogleRpcStatus]
2275
2205
  attr_accessor :error
2276
2206
 
2277
- # Explicit content annotation (based on per-frame visual signals only).
2278
- # If no explicit content has been detected in a frame, no annotations are
2279
- # present for that frame.
2207
+ # Explicit content annotation (based on per-frame visual signals only). If no
2208
+ # explicit content has been detected in a frame, no annotations are present for
2209
+ # that frame.
2280
2210
  # Corresponds to the JSON property `explicitAnnotation`
2281
2211
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2ExplicitContentAnnotation]
2282
2212
  attr_accessor :explicit_annotation
2283
2213
 
2284
- # Label annotations on frame level.
2285
- # There is exactly one element for each unique label.
2214
+ # Label annotations on frame level. There is exactly one element for each unique
2215
+ # label.
2286
2216
  # Corresponds to the JSON property `frameLabelAnnotations`
2287
2217
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
2288
2218
  attr_accessor :frame_label_annotations
2289
2219
 
2290
- # Video file location in
2291
- # [Cloud Storage](https://cloud.google.com/storage/).
2220
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
2292
2221
  # Corresponds to the JSON property `inputUri`
2293
2222
  # @return [String]
2294
2223
  attr_accessor :input_uri
@@ -2315,11 +2244,11 @@ module Google
2315
2244
  attr_accessor :segment_label_annotations
2316
2245
 
2317
2246
  # Presence label annotations on video level or user-specified segment level.
2318
- # There is exactly one element for each unique label. Compared to the
2319
- # existing topical `segment_label_annotations`, this field presents more
2320
- # fine-grained, segment-level labels detected in video content and is made
2321
- # available only when the client sets `LabelDetectionConfig.model` to
2322
- # "builtin/latest" in the request.
2247
+ # There is exactly one element for each unique label. Compared to the existing
2248
+ # topical `segment_label_annotations`, this field presents more fine-grained,
2249
+ # segment-level labels detected in video content and is made available only when
2250
+ # the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
2251
+ # request.
2323
2252
  # Corresponds to the JSON property `segmentPresenceLabelAnnotations`
2324
2253
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
2325
2254
  attr_accessor :segment_presence_label_annotations
@@ -2329,17 +2258,17 @@ module Google
2329
2258
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2VideoSegment>]
2330
2259
  attr_accessor :shot_annotations
2331
2260
 
2332
- # Topical label annotations on shot level.
2333
- # There is exactly one element for each unique label.
2261
+ # Topical label annotations on shot level. There is exactly one element for each
2262
+ # unique label.
2334
2263
  # Corresponds to the JSON property `shotLabelAnnotations`
2335
2264
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
2336
2265
  attr_accessor :shot_label_annotations
2337
2266
 
2338
2267
  # Presence label annotations on shot level. There is exactly one element for
2339
- # each unique label. Compared to the existing topical
2340
- # `shot_label_annotations`, this field presents more fine-grained, shot-level
2341
- # labels detected in video content and is made available only when the client
2342
- # sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
2268
+ # each unique label. Compared to the existing topical `shot_label_annotations`,
2269
+ # this field presents more fine-grained, shot-level labels detected in video
2270
+ # content and is made available only when the client sets `LabelDetectionConfig.
2271
+ # model` to "builtin/latest" in the request.
2343
2272
  # Corresponds to the JSON property `shotPresenceLabelAnnotations`
2344
2273
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
2345
2274
  attr_accessor :shot_presence_label_annotations
@@ -2349,9 +2278,8 @@ module Google
2349
2278
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2SpeechTranscription>]
2350
2279
  attr_accessor :speech_transcriptions
2351
2280
 
2352
- # OCR text detection and tracking.
2353
- # Annotations for list of detected text snippets. Each will have list of
2354
- # frame information associated with it.
2281
+ # OCR text detection and tracking. Annotations for list of detected text
2282
+ # snippets. Each will have list of frame information associated with it.
2355
2283
  # Corresponds to the JSON property `textAnnotations`
2356
2284
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2TextAnnotation>]
2357
2285
  attr_accessor :text_annotations
@@ -2383,14 +2311,14 @@ module Google
2383
2311
  class GoogleCloudVideointelligenceV1beta2VideoSegment
2384
2312
  include Google::Apis::Core::Hashable
2385
2313
 
2386
- # Time-offset, relative to the beginning of the video,
2387
- # corresponding to the end of the segment (inclusive).
2314
+ # Time-offset, relative to the beginning of the video, corresponding to the end
2315
+ # of the segment (inclusive).
2388
2316
  # Corresponds to the JSON property `endTimeOffset`
2389
2317
  # @return [String]
2390
2318
  attr_accessor :end_time_offset
2391
2319
 
2392
- # Time-offset, relative to the beginning of the video,
2393
- # corresponding to the start of the segment (inclusive).
2320
+ # Time-offset, relative to the beginning of the video, corresponding to the
2321
+ # start of the segment (inclusive).
2394
2322
  # Corresponds to the JSON property `startTimeOffset`
2395
2323
  # @return [String]
2396
2324
  attr_accessor :start_time_offset
@@ -2407,41 +2335,41 @@ module Google
2407
2335
  end
2408
2336
 
2409
2337
  # Word-specific information for recognized words. Word information is only
2410
- # included in the response when certain request parameters are set, such
2411
- # as `enable_word_time_offsets`.
2338
+ # included in the response when certain request parameters are set, such as `
2339
+ # enable_word_time_offsets`.
2412
2340
  class GoogleCloudVideointelligenceV1beta2WordInfo
2413
2341
  include Google::Apis::Core::Hashable
2414
2342
 
2415
2343
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
2416
2344
  # indicates an estimated greater likelihood that the recognized words are
2417
- # correct. This field is set only for the top alternative.
2418
- # This field is not guaranteed to be accurate and users should not rely on it
2419
- # to be always provided.
2420
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
2345
+ # correct. This field is set only for the top alternative. This field is not
2346
+ # guaranteed to be accurate and users should not rely on it to be always
2347
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
2348
+ # not set.
2421
2349
  # Corresponds to the JSON property `confidence`
2422
2350
  # @return [Float]
2423
2351
  attr_accessor :confidence
2424
2352
 
2425
- # Time offset relative to the beginning of the audio, and
2426
- # corresponding to the end of the spoken word. This field is only set if
2427
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
2428
- # experimental feature and the accuracy of the time offset can vary.
2353
+ # Time offset relative to the beginning of the audio, and corresponding to the
2354
+ # end of the spoken word. This field is only set if `enable_word_time_offsets=
2355
+ # true` and only in the top hypothesis. This is an experimental feature and the
2356
+ # accuracy of the time offset can vary.
2429
2357
  # Corresponds to the JSON property `endTime`
2430
2358
  # @return [String]
2431
2359
  attr_accessor :end_time
2432
2360
 
2433
- # Output only. A distinct integer value is assigned for every speaker within
2434
- # the audio. This field specifies which one of those speakers was detected to
2435
- # have spoken this word. Value ranges from 1 up to diarization_speaker_count,
2436
- # and is only set if speaker diarization is enabled.
2361
+ # Output only. A distinct integer value is assigned for every speaker within the
2362
+ # audio. This field specifies which one of those speakers was detected to have
2363
+ # spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
2364
+ # only set if speaker diarization is enabled.
2437
2365
  # Corresponds to the JSON property `speakerTag`
2438
2366
  # @return [Fixnum]
2439
2367
  attr_accessor :speaker_tag
2440
2368
 
2441
- # Time offset relative to the beginning of the audio, and
2442
- # corresponding to the start of the spoken word. This field is only set if
2443
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
2444
- # experimental feature and the accuracy of the time offset can vary.
2369
+ # Time offset relative to the beginning of the audio, and corresponding to the
2370
+ # start of the spoken word. This field is only set if `enable_word_time_offsets=
2371
+ # true` and only in the top hypothesis. This is an experimental feature and the
2372
+ # accuracy of the time offset can vary.
2445
2373
  # Corresponds to the JSON property `startTime`
2446
2374
  # @return [String]
2447
2375
  attr_accessor :start_time
@@ -2465,9 +2393,9 @@ module Google
2465
2393
  end
2466
2394
  end
2467
2395
 
2468
- # Video annotation progress. Included in the `metadata`
2469
- # field of the `Operation` returned by the `GetOperation`
2470
- # call of the `google::longrunning::Operations` service.
2396
+ # Video annotation progress. Included in the `metadata` field of the `Operation`
2397
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
2398
+ # service.
2471
2399
  class GoogleCloudVideointelligenceV1p1beta1AnnotateVideoProgress
2472
2400
  include Google::Apis::Core::Hashable
2473
2401
 
@@ -2486,9 +2414,9 @@ module Google
2486
2414
  end
2487
2415
  end
2488
2416
 
2489
- # Video annotation response. Included in the `response`
2490
- # field of the `Operation` returned by the `GetOperation`
2491
- # call of the `google::longrunning::Operations` service.
2417
+ # Video annotation response. Included in the `response` field of the `Operation`
2418
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
2419
+ # service.
2492
2420
  class GoogleCloudVideointelligenceV1p1beta1AnnotateVideoResponse
2493
2421
  include Google::Apis::Core::Hashable
2494
2422
 
@@ -2516,14 +2444,14 @@ module Google
2516
2444
  # @return [Float]
2517
2445
  attr_accessor :confidence
2518
2446
 
2519
- # The name of the attribute, for example, glasses, dark_glasses, mouth_open.
2520
- # A full list of supported type names will be provided in the document.
2447
+ # The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
2448
+ # full list of supported type names will be provided in the document.
2521
2449
  # Corresponds to the JSON property `name`
2522
2450
  # @return [String]
2523
2451
  attr_accessor :name
2524
2452
 
2525
- # Text value of the detection result. For example, the value for "HairColor"
2526
- # can be "black", "blonde", etc.
2453
+ # Text value of the detection result. For example, the value for "HairColor" can
2454
+ # be "black", "blonde", etc.
2527
2455
  # Corresponds to the JSON property `value`
2528
2456
  # @return [String]
2529
2457
  attr_accessor :value
@@ -2555,9 +2483,8 @@ module Google
2555
2483
  # @return [String]
2556
2484
  attr_accessor :name
2557
2485
 
2558
- # A vertex represents a 2D point in the image.
2559
- # NOTE: the normalized vertex coordinates are relative to the original image
2560
- # and range from 0 to 1.
2486
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
2487
+ # coordinates are relative to the original image and range from 0 to 1.
2561
2488
  # Corresponds to the JSON property `point`
2562
2489
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1NormalizedVertex]
2563
2490
  attr_accessor :point
@@ -2583,8 +2510,7 @@ module Google
2583
2510
  # @return [String]
2584
2511
  attr_accessor :description
2585
2512
 
2586
- # Opaque entity ID. Some IDs may be available in
2587
- # [Google Knowledge Graph Search
2513
+ # Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
2588
2514
  # API](https://developers.google.com/knowledge-graph/).
2589
2515
  # Corresponds to the JSON property `entityId`
2590
2516
  # @return [String]
@@ -2607,9 +2533,9 @@ module Google
2607
2533
  end
2608
2534
  end
2609
2535
 
2610
- # Explicit content annotation (based on per-frame visual signals only).
2611
- # If no explicit content has been detected in a frame, no annotations are
2612
- # present for that frame.
2536
+ # Explicit content annotation (based on per-frame visual signals only). If no
2537
+ # explicit content has been detected in a frame, no annotations are present for
2538
+ # that frame.
2613
2539
  class GoogleCloudVideointelligenceV1p1beta1ExplicitContentAnnotation
2614
2540
  include Google::Apis::Core::Hashable
2615
2541
 
@@ -2664,10 +2590,9 @@ module Google
2664
2590
  class GoogleCloudVideointelligenceV1p1beta1LabelAnnotation
2665
2591
  include Google::Apis::Core::Hashable
2666
2592
 
2667
- # Common categories for the detected entity.
2668
- # For example, when the label is `Terrier`, the category is likely `dog`. And
2669
- # in some cases there might be more than one categories e.g., `Terrier` could
2670
- # also be a `pet`.
2593
+ # Common categories for the detected entity. For example, when the label is `
2594
+ # Terrier`, the category is likely `dog`. And in some cases there might be more
2595
+ # than one categories e.g., `Terrier` could also be a `pet`.
2671
2596
  # Corresponds to the JSON property `categoryEntities`
2672
2597
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1Entity>]
2673
2598
  attr_accessor :category_entities
@@ -2766,14 +2691,14 @@ module Google
2766
2691
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1Entity]
2767
2692
  attr_accessor :entity
2768
2693
 
2769
- # All video segments where the recognized logo appears. There might be
2770
- # multiple instances of the same logo class appearing in one VideoSegment.
2694
+ # All video segments where the recognized logo appears. There might be multiple
2695
+ # instances of the same logo class appearing in one VideoSegment.
2771
2696
  # Corresponds to the JSON property `segments`
2772
2697
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1VideoSegment>]
2773
2698
  attr_accessor :segments
2774
2699
 
2775
- # All logo tracks where the recognized logo appears. Each track corresponds
2776
- # to one logo instance appearing in consecutive frames.
2700
+ # All logo tracks where the recognized logo appears. Each track corresponds to
2701
+ # one logo instance appearing in consecutive frames.
2777
2702
  # Corresponds to the JSON property `tracks`
2778
2703
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1Track>]
2779
2704
  attr_accessor :tracks
@@ -2790,9 +2715,8 @@ module Google
2790
2715
  end
2791
2716
  end
2792
2717
 
2793
- # Normalized bounding box.
2794
- # The normalized vertex coordinates are relative to the original image.
2795
- # Range: [0, 1].
2718
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
2719
+ # original image. Range: [0, 1].
2796
2720
  class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox
2797
2721
  include Google::Apis::Core::Hashable
2798
2722
 
@@ -2830,20 +2754,12 @@ module Google
2830
2754
  end
2831
2755
 
2832
2756
  # Normalized bounding polygon for text (that might not be aligned with axis).
2833
- # Contains list of the corner points in clockwise order starting from
2834
- # top-left corner. For example, for a rectangular bounding box:
2835
- # When the text is horizontal it might look like:
2836
- # 0----1
2837
- # | |
2838
- # 3----2
2839
- # When it's clockwise rotated 180 degrees around the top-left corner it
2840
- # becomes:
2841
- # 2----3
2842
- # | |
2843
- # 1----0
2844
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
2845
- # than 0, or greater than 1 due to trignometric calculations for location of
2846
- # the box.
2757
+ # Contains list of the corner points in clockwise order starting from top-left
2758
+ # corner. For example, for a rectangular bounding box: When the text is
2759
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
2760
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
2761
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
2762
+ # or greater than 1 due to trignometric calculations for location of the box.
2847
2763
  class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly
2848
2764
  include Google::Apis::Core::Hashable
2849
2765
 
@@ -2862,9 +2778,8 @@ module Google
2862
2778
  end
2863
2779
  end
2864
2780
 
2865
- # A vertex represents a 2D point in the image.
2866
- # NOTE: the normalized vertex coordinates are relative to the original image
2867
- # and range from 0 to 1.
2781
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
2782
+ # coordinates are relative to the original image and range from 0 to 1.
2868
2783
  class GoogleCloudVideointelligenceV1p1beta1NormalizedVertex
2869
2784
  include Google::Apis::Core::Hashable
2870
2785
 
@@ -2903,10 +2818,10 @@ module Google
2903
2818
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1Entity]
2904
2819
  attr_accessor :entity
2905
2820
 
2906
- # Information corresponding to all frames where this object track appears.
2907
- # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
2908
- # messages in frames.
2909
- # Streaming mode: it can only be one ObjectTrackingFrame message in frames.
2821
+ # Information corresponding to all frames where this object track appears. Non-
2822
+ # streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
2823
+ # in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
2824
+ # frames.
2910
2825
  # Corresponds to the JSON property `frames`
2911
2826
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame>]
2912
2827
  attr_accessor :frames
@@ -2916,12 +2831,11 @@ module Google
2916
2831
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1VideoSegment]
2917
2832
  attr_accessor :segment
2918
2833
 
2919
- # Streaming mode ONLY.
2920
- # In streaming mode, we do not know the end time of a tracked object
2921
- # before it is completed. Hence, there is no VideoSegment info returned.
2922
- # Instead, we provide a unique identifiable integer track_id so that
2923
- # the customers can correlate the results of the ongoing
2924
- # ObjectTrackAnnotation of the same track_id over time.
2834
+ # Streaming mode ONLY. In streaming mode, we do not know the end time of a
2835
+ # tracked object before it is completed. Hence, there is no VideoSegment info
2836
+ # returned. Instead, we provide a unique identifiable integer track_id so that
2837
+ # the customers can correlate the results of the ongoing ObjectTrackAnnotation
2838
+ # of the same track_id over time.
2925
2839
  # Corresponds to the JSON property `trackId`
2926
2840
  # @return [Fixnum]
2927
2841
  attr_accessor :track_id
@@ -2951,9 +2865,8 @@ module Google
2951
2865
  class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame
2952
2866
  include Google::Apis::Core::Hashable
2953
2867
 
2954
- # Normalized bounding box.
2955
- # The normalized vertex coordinates are relative to the original image.
2956
- # Range: [0, 1].
2868
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
2869
+ # original image. Range: [0, 1].
2957
2870
  # Corresponds to the JSON property `normalizedBoundingBox`
2958
2871
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox]
2959
2872
  attr_accessor :normalized_bounding_box
@@ -2980,10 +2893,10 @@ module Google
2980
2893
 
2981
2894
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
2982
2895
  # indicates an estimated greater likelihood that the recognized words are
2983
- # correct. This field is set only for the top alternative.
2984
- # This field is not guaranteed to be accurate and users should not rely on it
2985
- # to be always provided.
2986
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
2896
+ # correct. This field is set only for the top alternative. This field is not
2897
+ # guaranteed to be accurate and users should not rely on it to be always
2898
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
2899
+ # not set.
2987
2900
  # Corresponds to the JSON property `confidence`
2988
2901
  # @return [Float]
2989
2902
  attr_accessor :confidence
@@ -2994,8 +2907,8 @@ module Google
2994
2907
  attr_accessor :transcript
2995
2908
 
2996
2909
  # Output only. A list of word-specific information for each recognized word.
2997
- # Note: When `enable_speaker_diarization` is set to true, you will see all
2998
- # the words from the beginning of the audio.
2910
+ # Note: When `enable_speaker_diarization` is set to true, you will see all the
2911
+ # words from the beginning of the audio.
2999
2912
  # Corresponds to the JSON property `words`
3000
2913
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1WordInfo>]
3001
2914
  attr_accessor :words
@@ -3016,18 +2929,17 @@ module Google
3016
2929
  class GoogleCloudVideointelligenceV1p1beta1SpeechTranscription
3017
2930
  include Google::Apis::Core::Hashable
3018
2931
 
3019
- # May contain one or more recognition hypotheses (up to the maximum specified
3020
- # in `max_alternatives`). These alternatives are ordered in terms of
3021
- # accuracy, with the top (first) alternative being the most probable, as
3022
- # ranked by the recognizer.
2932
+ # May contain one or more recognition hypotheses (up to the maximum specified in
2933
+ # `max_alternatives`). These alternatives are ordered in terms of accuracy, with
2934
+ # the top (first) alternative being the most probable, as ranked by the
2935
+ # recognizer.
3023
2936
  # Corresponds to the JSON property `alternatives`
3024
2937
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1SpeechRecognitionAlternative>]
3025
2938
  attr_accessor :alternatives
3026
2939
 
3027
2940
  # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
3028
- # language tag of
3029
- # the language in this result. This language code was detected to have the
3030
- # most likelihood of being spoken in the audio.
2941
+ # language tag of the language in this result. This language code was detected
2942
+ # to have the most likelihood of being spoken in the audio.
3031
2943
  # Corresponds to the JSON property `languageCode`
3032
2944
  # @return [String]
3033
2945
  attr_accessor :language_code
@@ -3076,27 +2988,19 @@ module Google
3076
2988
  end
3077
2989
  end
3078
2990
 
3079
- # Video frame level annotation results for text annotation (OCR).
3080
- # Contains information regarding timestamp and bounding box locations for the
3081
- # frames containing detected OCR text snippets.
2991
+ # Video frame level annotation results for text annotation (OCR). Contains
2992
+ # information regarding timestamp and bounding box locations for the frames
2993
+ # containing detected OCR text snippets.
3082
2994
  class GoogleCloudVideointelligenceV1p1beta1TextFrame
3083
2995
  include Google::Apis::Core::Hashable
3084
2996
 
3085
2997
  # Normalized bounding polygon for text (that might not be aligned with axis).
3086
- # Contains list of the corner points in clockwise order starting from
3087
- # top-left corner. For example, for a rectangular bounding box:
3088
- # When the text is horizontal it might look like:
3089
- # 0----1
3090
- # | |
3091
- # 3----2
3092
- # When it's clockwise rotated 180 degrees around the top-left corner it
3093
- # becomes:
3094
- # 2----3
3095
- # | |
3096
- # 1----0
3097
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
3098
- # than 0, or greater than 1 due to trignometric calculations for location of
3099
- # the box.
2998
+ # Contains list of the corner points in clockwise order starting from top-left
2999
+ # corner. For example, for a rectangular bounding box: When the text is
3000
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
3001
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
3002
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
3003
+ # or greater than 1 due to trignometric calculations for location of the box.
3100
3004
  # Corresponds to the JSON property `rotatedBoundingBox`
3101
3005
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly]
3102
3006
  attr_accessor :rotated_bounding_box
@@ -3149,9 +3053,8 @@ module Google
3149
3053
  end
3150
3054
  end
3151
3055
 
3152
- # For tracking related features.
3153
- # An object at time_offset with attributes, and located with
3154
- # normalized_bounding_box.
3056
+ # For tracking related features. An object at time_offset with attributes, and
3057
+ # located with normalized_bounding_box.
3155
3058
  class GoogleCloudVideointelligenceV1p1beta1TimestampedObject
3156
3059
  include Google::Apis::Core::Hashable
3157
3060
 
@@ -3165,15 +3068,14 @@ module Google
3165
3068
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1DetectedLandmark>]
3166
3069
  attr_accessor :landmarks
3167
3070
 
3168
- # Normalized bounding box.
3169
- # The normalized vertex coordinates are relative to the original image.
3170
- # Range: [0, 1].
3071
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
3072
+ # original image. Range: [0, 1].
3171
3073
  # Corresponds to the JSON property `normalizedBoundingBox`
3172
3074
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox]
3173
3075
  attr_accessor :normalized_bounding_box
3174
3076
 
3175
- # Time-offset, relative to the beginning of the video,
3176
- # corresponding to the video frame for this object.
3077
+ # Time-offset, relative to the beginning of the video, corresponding to the
3078
+ # video frame for this object.
3177
3079
  # Corresponds to the JSON property `timeOffset`
3178
3080
  # @return [String]
3179
3081
  attr_accessor :time_offset
@@ -3232,20 +3134,19 @@ module Google
3232
3134
  class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationProgress
3233
3135
  include Google::Apis::Core::Hashable
3234
3136
 
3235
- # Specifies which feature is being tracked if the request contains more than
3236
- # one feature.
3137
+ # Specifies which feature is being tracked if the request contains more than one
3138
+ # feature.
3237
3139
  # Corresponds to the JSON property `feature`
3238
3140
  # @return [String]
3239
3141
  attr_accessor :feature
3240
3142
 
3241
- # Video file location in
3242
- # [Cloud Storage](https://cloud.google.com/storage/).
3143
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
3243
3144
  # Corresponds to the JSON property `inputUri`
3244
3145
  # @return [String]
3245
3146
  attr_accessor :input_uri
3246
3147
 
3247
- # Approximate percentage processed thus far. Guaranteed to be
3248
- # 100 when fully processed.
3148
+ # Approximate percentage processed thus far. Guaranteed to be 100 when fully
3149
+ # processed.
3249
3150
  # Corresponds to the JSON property `progressPercent`
3250
3151
  # @return [Fixnum]
3251
3152
  attr_accessor :progress_percent
@@ -3284,31 +3185,30 @@ module Google
3284
3185
  class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationResults
3285
3186
  include Google::Apis::Core::Hashable
3286
3187
 
3287
- # The `Status` type defines a logical error model that is suitable for
3288
- # different programming environments, including REST APIs and RPC APIs. It is
3289
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
3290
- # three pieces of data: error code, error message, and error details.
3291
- # You can find out more about this error model and how to work with it in the
3292
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
3188
+ # The `Status` type defines a logical error model that is suitable for different
3189
+ # programming environments, including REST APIs and RPC APIs. It is used by [
3190
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
3191
+ # data: error code, error message, and error details. You can find out more
3192
+ # about this error model and how to work with it in the [API Design Guide](https:
3193
+ # //cloud.google.com/apis/design/errors).
3293
3194
  # Corresponds to the JSON property `error`
3294
3195
  # @return [Google::Apis::VideointelligenceV1::GoogleRpcStatus]
3295
3196
  attr_accessor :error
3296
3197
 
3297
- # Explicit content annotation (based on per-frame visual signals only).
3298
- # If no explicit content has been detected in a frame, no annotations are
3299
- # present for that frame.
3198
+ # Explicit content annotation (based on per-frame visual signals only). If no
3199
+ # explicit content has been detected in a frame, no annotations are present for
3200
+ # that frame.
3300
3201
  # Corresponds to the JSON property `explicitAnnotation`
3301
3202
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1ExplicitContentAnnotation]
3302
3203
  attr_accessor :explicit_annotation
3303
3204
 
3304
- # Label annotations on frame level.
3305
- # There is exactly one element for each unique label.
3205
+ # Label annotations on frame level. There is exactly one element for each unique
3206
+ # label.
3306
3207
  # Corresponds to the JSON property `frameLabelAnnotations`
3307
3208
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
3308
3209
  attr_accessor :frame_label_annotations
3309
3210
 
3310
- # Video file location in
3311
- # [Cloud Storage](https://cloud.google.com/storage/).
3211
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
3312
3212
  # Corresponds to the JSON property `inputUri`
3313
3213
  # @return [String]
3314
3214
  attr_accessor :input_uri
@@ -3335,11 +3235,11 @@ module Google
3335
3235
  attr_accessor :segment_label_annotations
3336
3236
 
3337
3237
  # Presence label annotations on video level or user-specified segment level.
3338
- # There is exactly one element for each unique label. Compared to the
3339
- # existing topical `segment_label_annotations`, this field presents more
3340
- # fine-grained, segment-level labels detected in video content and is made
3341
- # available only when the client sets `LabelDetectionConfig.model` to
3342
- # "builtin/latest" in the request.
3238
+ # There is exactly one element for each unique label. Compared to the existing
3239
+ # topical `segment_label_annotations`, this field presents more fine-grained,
3240
+ # segment-level labels detected in video content and is made available only when
3241
+ # the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
3242
+ # request.
3343
3243
  # Corresponds to the JSON property `segmentPresenceLabelAnnotations`
3344
3244
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
3345
3245
  attr_accessor :segment_presence_label_annotations
@@ -3349,17 +3249,17 @@ module Google
3349
3249
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1VideoSegment>]
3350
3250
  attr_accessor :shot_annotations
3351
3251
 
3352
- # Topical label annotations on shot level.
3353
- # There is exactly one element for each unique label.
3252
+ # Topical label annotations on shot level. There is exactly one element for each
3253
+ # unique label.
3354
3254
  # Corresponds to the JSON property `shotLabelAnnotations`
3355
3255
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
3356
3256
  attr_accessor :shot_label_annotations
3357
3257
 
3358
3258
  # Presence label annotations on shot level. There is exactly one element for
3359
- # each unique label. Compared to the existing topical
3360
- # `shot_label_annotations`, this field presents more fine-grained, shot-level
3361
- # labels detected in video content and is made available only when the client
3362
- # sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
3259
+ # each unique label. Compared to the existing topical `shot_label_annotations`,
3260
+ # this field presents more fine-grained, shot-level labels detected in video
3261
+ # content and is made available only when the client sets `LabelDetectionConfig.
3262
+ # model` to "builtin/latest" in the request.
3363
3263
  # Corresponds to the JSON property `shotPresenceLabelAnnotations`
3364
3264
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
3365
3265
  attr_accessor :shot_presence_label_annotations
@@ -3369,9 +3269,8 @@ module Google
3369
3269
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1SpeechTranscription>]
3370
3270
  attr_accessor :speech_transcriptions
3371
3271
 
3372
- # OCR text detection and tracking.
3373
- # Annotations for list of detected text snippets. Each will have list of
3374
- # frame information associated with it.
3272
+ # OCR text detection and tracking. Annotations for list of detected text
3273
+ # snippets. Each will have list of frame information associated with it.
3375
3274
  # Corresponds to the JSON property `textAnnotations`
3376
3275
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1TextAnnotation>]
3377
3276
  attr_accessor :text_annotations
@@ -3403,14 +3302,14 @@ module Google
3403
3302
  class GoogleCloudVideointelligenceV1p1beta1VideoSegment
3404
3303
  include Google::Apis::Core::Hashable
3405
3304
 
3406
- # Time-offset, relative to the beginning of the video,
3407
- # corresponding to the end of the segment (inclusive).
3305
+ # Time-offset, relative to the beginning of the video, corresponding to the end
3306
+ # of the segment (inclusive).
3408
3307
  # Corresponds to the JSON property `endTimeOffset`
3409
3308
  # @return [String]
3410
3309
  attr_accessor :end_time_offset
3411
3310
 
3412
- # Time-offset, relative to the beginning of the video,
3413
- # corresponding to the start of the segment (inclusive).
3311
+ # Time-offset, relative to the beginning of the video, corresponding to the
3312
+ # start of the segment (inclusive).
3414
3313
  # Corresponds to the JSON property `startTimeOffset`
3415
3314
  # @return [String]
3416
3315
  attr_accessor :start_time_offset
@@ -3427,41 +3326,41 @@ module Google
3427
3326
  end
3428
3327
 
3429
3328
  # Word-specific information for recognized words. Word information is only
3430
- # included in the response when certain request parameters are set, such
3431
- # as `enable_word_time_offsets`.
3329
+ # included in the response when certain request parameters are set, such as `
3330
+ # enable_word_time_offsets`.
3432
3331
  class GoogleCloudVideointelligenceV1p1beta1WordInfo
3433
3332
  include Google::Apis::Core::Hashable
3434
3333
 
3435
3334
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
3436
3335
  # indicates an estimated greater likelihood that the recognized words are
3437
- # correct. This field is set only for the top alternative.
3438
- # This field is not guaranteed to be accurate and users should not rely on it
3439
- # to be always provided.
3440
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
3336
+ # correct. This field is set only for the top alternative. This field is not
3337
+ # guaranteed to be accurate and users should not rely on it to be always
3338
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
3339
+ # not set.
3441
3340
  # Corresponds to the JSON property `confidence`
3442
3341
  # @return [Float]
3443
3342
  attr_accessor :confidence
3444
3343
 
3445
- # Time offset relative to the beginning of the audio, and
3446
- # corresponding to the end of the spoken word. This field is only set if
3447
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
3448
- # experimental feature and the accuracy of the time offset can vary.
3344
+ # Time offset relative to the beginning of the audio, and corresponding to the
3345
+ # end of the spoken word. This field is only set if `enable_word_time_offsets=
3346
+ # true` and only in the top hypothesis. This is an experimental feature and the
3347
+ # accuracy of the time offset can vary.
3449
3348
  # Corresponds to the JSON property `endTime`
3450
3349
  # @return [String]
3451
3350
  attr_accessor :end_time
3452
3351
 
3453
- # Output only. A distinct integer value is assigned for every speaker within
3454
- # the audio. This field specifies which one of those speakers was detected to
3455
- # have spoken this word. Value ranges from 1 up to diarization_speaker_count,
3456
- # and is only set if speaker diarization is enabled.
3352
+ # Output only. A distinct integer value is assigned for every speaker within the
3353
+ # audio. This field specifies which one of those speakers was detected to have
3354
+ # spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
3355
+ # only set if speaker diarization is enabled.
3457
3356
  # Corresponds to the JSON property `speakerTag`
3458
3357
  # @return [Fixnum]
3459
3358
  attr_accessor :speaker_tag
3460
3359
 
3461
- # Time offset relative to the beginning of the audio, and
3462
- # corresponding to the start of the spoken word. This field is only set if
3463
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
3464
- # experimental feature and the accuracy of the time offset can vary.
3360
+ # Time offset relative to the beginning of the audio, and corresponding to the
3361
+ # start of the spoken word. This field is only set if `enable_word_time_offsets=
3362
+ # true` and only in the top hypothesis. This is an experimental feature and the
3363
+ # accuracy of the time offset can vary.
3465
3364
  # Corresponds to the JSON property `startTime`
3466
3365
  # @return [String]
3467
3366
  attr_accessor :start_time
@@ -3485,9 +3384,9 @@ module Google
3485
3384
  end
3486
3385
  end
3487
3386
 
3488
- # Video annotation progress. Included in the `metadata`
3489
- # field of the `Operation` returned by the `GetOperation`
3490
- # call of the `google::longrunning::Operations` service.
3387
+ # Video annotation progress. Included in the `metadata` field of the `Operation`
3388
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
3389
+ # service.
3491
3390
  class GoogleCloudVideointelligenceV1p2beta1AnnotateVideoProgress
3492
3391
  include Google::Apis::Core::Hashable
3493
3392
 
@@ -3506,9 +3405,9 @@ module Google
3506
3405
  end
3507
3406
  end
3508
3407
 
3509
- # Video annotation response. Included in the `response`
3510
- # field of the `Operation` returned by the `GetOperation`
3511
- # call of the `google::longrunning::Operations` service.
3408
+ # Video annotation response. Included in the `response` field of the `Operation`
3409
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
3410
+ # service.
3512
3411
  class GoogleCloudVideointelligenceV1p2beta1AnnotateVideoResponse
3513
3412
  include Google::Apis::Core::Hashable
3514
3413
 
@@ -3536,14 +3435,14 @@ module Google
3536
3435
  # @return [Float]
3537
3436
  attr_accessor :confidence
3538
3437
 
3539
- # The name of the attribute, for example, glasses, dark_glasses, mouth_open.
3540
- # A full list of supported type names will be provided in the document.
3438
+ # The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
3439
+ # full list of supported type names will be provided in the document.
3541
3440
  # Corresponds to the JSON property `name`
3542
3441
  # @return [String]
3543
3442
  attr_accessor :name
3544
3443
 
3545
- # Text value of the detection result. For example, the value for "HairColor"
3546
- # can be "black", "blonde", etc.
3444
+ # Text value of the detection result. For example, the value for "HairColor" can
3445
+ # be "black", "blonde", etc.
3547
3446
  # Corresponds to the JSON property `value`
3548
3447
  # @return [String]
3549
3448
  attr_accessor :value
@@ -3575,9 +3474,8 @@ module Google
3575
3474
  # @return [String]
3576
3475
  attr_accessor :name
3577
3476
 
3578
- # A vertex represents a 2D point in the image.
3579
- # NOTE: the normalized vertex coordinates are relative to the original image
3580
- # and range from 0 to 1.
3477
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
3478
+ # coordinates are relative to the original image and range from 0 to 1.
3581
3479
  # Corresponds to the JSON property `point`
3582
3480
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1NormalizedVertex]
3583
3481
  attr_accessor :point
@@ -3603,8 +3501,7 @@ module Google
3603
3501
  # @return [String]
3604
3502
  attr_accessor :description
3605
3503
 
3606
- # Opaque entity ID. Some IDs may be available in
3607
- # [Google Knowledge Graph Search
3504
+ # Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
3608
3505
  # API](https://developers.google.com/knowledge-graph/).
3609
3506
  # Corresponds to the JSON property `entityId`
3610
3507
  # @return [String]
@@ -3627,9 +3524,9 @@ module Google
3627
3524
  end
3628
3525
  end
3629
3526
 
3630
- # Explicit content annotation (based on per-frame visual signals only).
3631
- # If no explicit content has been detected in a frame, no annotations are
3632
- # present for that frame.
3527
+ # Explicit content annotation (based on per-frame visual signals only). If no
3528
+ # explicit content has been detected in a frame, no annotations are present for
3529
+ # that frame.
3633
3530
  class GoogleCloudVideointelligenceV1p2beta1ExplicitContentAnnotation
3634
3531
  include Google::Apis::Core::Hashable
3635
3532
 
@@ -3684,10 +3581,9 @@ module Google
3684
3581
  class GoogleCloudVideointelligenceV1p2beta1LabelAnnotation
3685
3582
  include Google::Apis::Core::Hashable
3686
3583
 
3687
- # Common categories for the detected entity.
3688
- # For example, when the label is `Terrier`, the category is likely `dog`. And
3689
- # in some cases there might be more than one categories e.g., `Terrier` could
3690
- # also be a `pet`.
3584
+ # Common categories for the detected entity. For example, when the label is `
3585
+ # Terrier`, the category is likely `dog`. And in some cases there might be more
3586
+ # than one categories e.g., `Terrier` could also be a `pet`.
3691
3587
  # Corresponds to the JSON property `categoryEntities`
3692
3588
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1Entity>]
3693
3589
  attr_accessor :category_entities
@@ -3786,14 +3682,14 @@ module Google
3786
3682
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1Entity]
3787
3683
  attr_accessor :entity
3788
3684
 
3789
- # All video segments where the recognized logo appears. There might be
3790
- # multiple instances of the same logo class appearing in one VideoSegment.
3685
+ # All video segments where the recognized logo appears. There might be multiple
3686
+ # instances of the same logo class appearing in one VideoSegment.
3791
3687
  # Corresponds to the JSON property `segments`
3792
3688
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1VideoSegment>]
3793
3689
  attr_accessor :segments
3794
3690
 
3795
- # All logo tracks where the recognized logo appears. Each track corresponds
3796
- # to one logo instance appearing in consecutive frames.
3691
+ # All logo tracks where the recognized logo appears. Each track corresponds to
3692
+ # one logo instance appearing in consecutive frames.
3797
3693
  # Corresponds to the JSON property `tracks`
3798
3694
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1Track>]
3799
3695
  attr_accessor :tracks
@@ -3810,9 +3706,8 @@ module Google
3810
3706
  end
3811
3707
  end
3812
3708
 
3813
- # Normalized bounding box.
3814
- # The normalized vertex coordinates are relative to the original image.
3815
- # Range: [0, 1].
3709
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
3710
+ # original image. Range: [0, 1].
3816
3711
  class GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox
3817
3712
  include Google::Apis::Core::Hashable
3818
3713
 
@@ -3850,20 +3745,12 @@ module Google
3850
3745
  end
3851
3746
 
3852
3747
  # Normalized bounding polygon for text (that might not be aligned with axis).
3853
- # Contains list of the corner points in clockwise order starting from
3854
- # top-left corner. For example, for a rectangular bounding box:
3855
- # When the text is horizontal it might look like:
3856
- # 0----1
3857
- # | |
3858
- # 3----2
3859
- # When it's clockwise rotated 180 degrees around the top-left corner it
3860
- # becomes:
3861
- # 2----3
3862
- # | |
3863
- # 1----0
3864
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
3865
- # than 0, or greater than 1 due to trignometric calculations for location of
3866
- # the box.
3748
+ # Contains list of the corner points in clockwise order starting from top-left
3749
+ # corner. For example, for a rectangular bounding box: When the text is
3750
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
3751
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
3752
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
3753
+ # or greater than 1 due to trignometric calculations for location of the box.
3867
3754
  class GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingPoly
3868
3755
  include Google::Apis::Core::Hashable
3869
3756
 
@@ -3882,9 +3769,8 @@ module Google
3882
3769
  end
3883
3770
  end
3884
3771
 
3885
- # A vertex represents a 2D point in the image.
3886
- # NOTE: the normalized vertex coordinates are relative to the original image
3887
- # and range from 0 to 1.
3772
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
3773
+ # coordinates are relative to the original image and range from 0 to 1.
3888
3774
  class GoogleCloudVideointelligenceV1p2beta1NormalizedVertex
3889
3775
  include Google::Apis::Core::Hashable
3890
3776
 
@@ -3923,10 +3809,10 @@ module Google
3923
3809
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1Entity]
3924
3810
  attr_accessor :entity
3925
3811
 
3926
- # Information corresponding to all frames where this object track appears.
3927
- # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
3928
- # messages in frames.
3929
- # Streaming mode: it can only be one ObjectTrackingFrame message in frames.
3812
+ # Information corresponding to all frames where this object track appears. Non-
3813
+ # streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
3814
+ # in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
3815
+ # frames.
3930
3816
  # Corresponds to the JSON property `frames`
3931
3817
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1ObjectTrackingFrame>]
3932
3818
  attr_accessor :frames
@@ -3936,12 +3822,11 @@ module Google
3936
3822
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1VideoSegment]
3937
3823
  attr_accessor :segment
3938
3824
 
3939
- # Streaming mode ONLY.
3940
- # In streaming mode, we do not know the end time of a tracked object
3941
- # before it is completed. Hence, there is no VideoSegment info returned.
3942
- # Instead, we provide a unique identifiable integer track_id so that
3943
- # the customers can correlate the results of the ongoing
3944
- # ObjectTrackAnnotation of the same track_id over time.
3825
+ # Streaming mode ONLY. In streaming mode, we do not know the end time of a
3826
+ # tracked object before it is completed. Hence, there is no VideoSegment info
3827
+ # returned. Instead, we provide a unique identifiable integer track_id so that
3828
+ # the customers can correlate the results of the ongoing ObjectTrackAnnotation
3829
+ # of the same track_id over time.
3945
3830
  # Corresponds to the JSON property `trackId`
3946
3831
  # @return [Fixnum]
3947
3832
  attr_accessor :track_id
@@ -3971,9 +3856,8 @@ module Google
3971
3856
  class GoogleCloudVideointelligenceV1p2beta1ObjectTrackingFrame
3972
3857
  include Google::Apis::Core::Hashable
3973
3858
 
3974
- # Normalized bounding box.
3975
- # The normalized vertex coordinates are relative to the original image.
3976
- # Range: [0, 1].
3859
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
3860
+ # original image. Range: [0, 1].
3977
3861
  # Corresponds to the JSON property `normalizedBoundingBox`
3978
3862
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox]
3979
3863
  attr_accessor :normalized_bounding_box
@@ -4000,10 +3884,10 @@ module Google
4000
3884
 
4001
3885
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
4002
3886
  # indicates an estimated greater likelihood that the recognized words are
4003
- # correct. This field is set only for the top alternative.
4004
- # This field is not guaranteed to be accurate and users should not rely on it
4005
- # to be always provided.
4006
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
3887
+ # correct. This field is set only for the top alternative. This field is not
3888
+ # guaranteed to be accurate and users should not rely on it to be always
3889
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
3890
+ # not set.
4007
3891
  # Corresponds to the JSON property `confidence`
4008
3892
  # @return [Float]
4009
3893
  attr_accessor :confidence
@@ -4014,8 +3898,8 @@ module Google
4014
3898
  attr_accessor :transcript
4015
3899
 
4016
3900
  # Output only. A list of word-specific information for each recognized word.
4017
- # Note: When `enable_speaker_diarization` is set to true, you will see all
4018
- # the words from the beginning of the audio.
3901
+ # Note: When `enable_speaker_diarization` is set to true, you will see all the
3902
+ # words from the beginning of the audio.
4019
3903
  # Corresponds to the JSON property `words`
4020
3904
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1WordInfo>]
4021
3905
  attr_accessor :words
@@ -4036,18 +3920,17 @@ module Google
4036
3920
  class GoogleCloudVideointelligenceV1p2beta1SpeechTranscription
4037
3921
  include Google::Apis::Core::Hashable
4038
3922
 
4039
- # May contain one or more recognition hypotheses (up to the maximum specified
4040
- # in `max_alternatives`). These alternatives are ordered in terms of
4041
- # accuracy, with the top (first) alternative being the most probable, as
4042
- # ranked by the recognizer.
3923
+ # May contain one or more recognition hypotheses (up to the maximum specified in
3924
+ # `max_alternatives`). These alternatives are ordered in terms of accuracy, with
3925
+ # the top (first) alternative being the most probable, as ranked by the
3926
+ # recognizer.
4043
3927
  # Corresponds to the JSON property `alternatives`
4044
3928
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1SpeechRecognitionAlternative>]
4045
3929
  attr_accessor :alternatives
4046
3930
 
4047
3931
  # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
4048
- # language tag of
4049
- # the language in this result. This language code was detected to have the
4050
- # most likelihood of being spoken in the audio.
3932
+ # language tag of the language in this result. This language code was detected
3933
+ # to have the most likelihood of being spoken in the audio.
4051
3934
  # Corresponds to the JSON property `languageCode`
4052
3935
  # @return [String]
4053
3936
  attr_accessor :language_code
@@ -4096,27 +3979,19 @@ module Google
4096
3979
  end
4097
3980
  end
4098
3981
 
4099
- # Video frame level annotation results for text annotation (OCR).
4100
- # Contains information regarding timestamp and bounding box locations for the
4101
- # frames containing detected OCR text snippets.
3982
+ # Video frame level annotation results for text annotation (OCR). Contains
3983
+ # information regarding timestamp and bounding box locations for the frames
3984
+ # containing detected OCR text snippets.
4102
3985
  class GoogleCloudVideointelligenceV1p2beta1TextFrame
4103
3986
  include Google::Apis::Core::Hashable
4104
3987
 
4105
3988
  # Normalized bounding polygon for text (that might not be aligned with axis).
4106
- # Contains list of the corner points in clockwise order starting from
4107
- # top-left corner. For example, for a rectangular bounding box:
4108
- # When the text is horizontal it might look like:
4109
- # 0----1
4110
- # | |
4111
- # 3----2
4112
- # When it's clockwise rotated 180 degrees around the top-left corner it
4113
- # becomes:
4114
- # 2----3
4115
- # | |
4116
- # 1----0
4117
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
4118
- # than 0, or greater than 1 due to trignometric calculations for location of
4119
- # the box.
3989
+ # Contains list of the corner points in clockwise order starting from top-left
3990
+ # corner. For example, for a rectangular bounding box: When the text is
3991
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
3992
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
3993
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
3994
+ # or greater than 1 due to trignometric calculations for location of the box.
4120
3995
  # Corresponds to the JSON property `rotatedBoundingBox`
4121
3996
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingPoly]
4122
3997
  attr_accessor :rotated_bounding_box
@@ -4169,9 +4044,8 @@ module Google
4169
4044
  end
4170
4045
  end
4171
4046
 
4172
- # For tracking related features.
4173
- # An object at time_offset with attributes, and located with
4174
- # normalized_bounding_box.
4047
+ # For tracking related features. An object at time_offset with attributes, and
4048
+ # located with normalized_bounding_box.
4175
4049
  class GoogleCloudVideointelligenceV1p2beta1TimestampedObject
4176
4050
  include Google::Apis::Core::Hashable
4177
4051
 
@@ -4185,15 +4059,14 @@ module Google
4185
4059
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1DetectedLandmark>]
4186
4060
  attr_accessor :landmarks
4187
4061
 
4188
- # Normalized bounding box.
4189
- # The normalized vertex coordinates are relative to the original image.
4190
- # Range: [0, 1].
4062
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
4063
+ # original image. Range: [0, 1].
4191
4064
  # Corresponds to the JSON property `normalizedBoundingBox`
4192
4065
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox]
4193
4066
  attr_accessor :normalized_bounding_box
4194
4067
 
4195
- # Time-offset, relative to the beginning of the video,
4196
- # corresponding to the video frame for this object.
4068
+ # Time-offset, relative to the beginning of the video, corresponding to the
4069
+ # video frame for this object.
4197
4070
  # Corresponds to the JSON property `timeOffset`
4198
4071
  # @return [String]
4199
4072
  attr_accessor :time_offset
@@ -4252,20 +4125,19 @@ module Google
4252
4125
  class GoogleCloudVideointelligenceV1p2beta1VideoAnnotationProgress
4253
4126
  include Google::Apis::Core::Hashable
4254
4127
 
4255
- # Specifies which feature is being tracked if the request contains more than
4256
- # one feature.
4128
+ # Specifies which feature is being tracked if the request contains more than one
4129
+ # feature.
4257
4130
  # Corresponds to the JSON property `feature`
4258
4131
  # @return [String]
4259
4132
  attr_accessor :feature
4260
4133
 
4261
- # Video file location in
4262
- # [Cloud Storage](https://cloud.google.com/storage/).
4134
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
4263
4135
  # Corresponds to the JSON property `inputUri`
4264
4136
  # @return [String]
4265
4137
  attr_accessor :input_uri
4266
4138
 
4267
- # Approximate percentage processed thus far. Guaranteed to be
4268
- # 100 when fully processed.
4139
+ # Approximate percentage processed thus far. Guaranteed to be 100 when fully
4140
+ # processed.
4269
4141
  # Corresponds to the JSON property `progressPercent`
4270
4142
  # @return [Fixnum]
4271
4143
  attr_accessor :progress_percent
@@ -4304,31 +4176,30 @@ module Google
4304
4176
  class GoogleCloudVideointelligenceV1p2beta1VideoAnnotationResults
4305
4177
  include Google::Apis::Core::Hashable
4306
4178
 
4307
- # The `Status` type defines a logical error model that is suitable for
4308
- # different programming environments, including REST APIs and RPC APIs. It is
4309
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
4310
- # three pieces of data: error code, error message, and error details.
4311
- # You can find out more about this error model and how to work with it in the
4312
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
4179
+ # The `Status` type defines a logical error model that is suitable for different
4180
+ # programming environments, including REST APIs and RPC APIs. It is used by [
4181
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
4182
+ # data: error code, error message, and error details. You can find out more
4183
+ # about this error model and how to work with it in the [API Design Guide](https:
4184
+ # //cloud.google.com/apis/design/errors).
4313
4185
  # Corresponds to the JSON property `error`
4314
4186
  # @return [Google::Apis::VideointelligenceV1::GoogleRpcStatus]
4315
4187
  attr_accessor :error
4316
4188
 
4317
- # Explicit content annotation (based on per-frame visual signals only).
4318
- # If no explicit content has been detected in a frame, no annotations are
4319
- # present for that frame.
4189
+ # Explicit content annotation (based on per-frame visual signals only). If no
4190
+ # explicit content has been detected in a frame, no annotations are present for
4191
+ # that frame.
4320
4192
  # Corresponds to the JSON property `explicitAnnotation`
4321
4193
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1ExplicitContentAnnotation]
4322
4194
  attr_accessor :explicit_annotation
4323
4195
 
4324
- # Label annotations on frame level.
4325
- # There is exactly one element for each unique label.
4196
+ # Label annotations on frame level. There is exactly one element for each unique
4197
+ # label.
4326
4198
  # Corresponds to the JSON property `frameLabelAnnotations`
4327
4199
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
4328
4200
  attr_accessor :frame_label_annotations
4329
4201
 
4330
- # Video file location in
4331
- # [Cloud Storage](https://cloud.google.com/storage/).
4202
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
4332
4203
  # Corresponds to the JSON property `inputUri`
4333
4204
  # @return [String]
4334
4205
  attr_accessor :input_uri
@@ -4355,11 +4226,11 @@ module Google
4355
4226
  attr_accessor :segment_label_annotations
4356
4227
 
4357
4228
  # Presence label annotations on video level or user-specified segment level.
4358
- # There is exactly one element for each unique label. Compared to the
4359
- # existing topical `segment_label_annotations`, this field presents more
4360
- # fine-grained, segment-level labels detected in video content and is made
4361
- # available only when the client sets `LabelDetectionConfig.model` to
4362
- # "builtin/latest" in the request.
4229
+ # There is exactly one element for each unique label. Compared to the existing
4230
+ # topical `segment_label_annotations`, this field presents more fine-grained,
4231
+ # segment-level labels detected in video content and is made available only when
4232
+ # the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
4233
+ # request.
4363
4234
  # Corresponds to the JSON property `segmentPresenceLabelAnnotations`
4364
4235
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
4365
4236
  attr_accessor :segment_presence_label_annotations
@@ -4369,17 +4240,17 @@ module Google
4369
4240
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1VideoSegment>]
4370
4241
  attr_accessor :shot_annotations
4371
4242
 
4372
- # Topical label annotations on shot level.
4373
- # There is exactly one element for each unique label.
4243
+ # Topical label annotations on shot level. There is exactly one element for each
4244
+ # unique label.
4374
4245
  # Corresponds to the JSON property `shotLabelAnnotations`
4375
4246
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
4376
4247
  attr_accessor :shot_label_annotations
4377
4248
 
4378
4249
  # Presence label annotations on shot level. There is exactly one element for
4379
- # each unique label. Compared to the existing topical
4380
- # `shot_label_annotations`, this field presents more fine-grained, shot-level
4381
- # labels detected in video content and is made available only when the client
4382
- # sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
4250
+ # each unique label. Compared to the existing topical `shot_label_annotations`,
4251
+ # this field presents more fine-grained, shot-level labels detected in video
4252
+ # content and is made available only when the client sets `LabelDetectionConfig.
4253
+ # model` to "builtin/latest" in the request.
4383
4254
  # Corresponds to the JSON property `shotPresenceLabelAnnotations`
4384
4255
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
4385
4256
  attr_accessor :shot_presence_label_annotations
@@ -4389,9 +4260,8 @@ module Google
4389
4260
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1SpeechTranscription>]
4390
4261
  attr_accessor :speech_transcriptions
4391
4262
 
4392
- # OCR text detection and tracking.
4393
- # Annotations for list of detected text snippets. Each will have list of
4394
- # frame information associated with it.
4263
+ # OCR text detection and tracking. Annotations for list of detected text
4264
+ # snippets. Each will have list of frame information associated with it.
4395
4265
  # Corresponds to the JSON property `textAnnotations`
4396
4266
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1TextAnnotation>]
4397
4267
  attr_accessor :text_annotations
@@ -4423,14 +4293,14 @@ module Google
4423
4293
  class GoogleCloudVideointelligenceV1p2beta1VideoSegment
4424
4294
  include Google::Apis::Core::Hashable
4425
4295
 
4426
- # Time-offset, relative to the beginning of the video,
4427
- # corresponding to the end of the segment (inclusive).
4296
+ # Time-offset, relative to the beginning of the video, corresponding to the end
4297
+ # of the segment (inclusive).
4428
4298
  # Corresponds to the JSON property `endTimeOffset`
4429
4299
  # @return [String]
4430
4300
  attr_accessor :end_time_offset
4431
4301
 
4432
- # Time-offset, relative to the beginning of the video,
4433
- # corresponding to the start of the segment (inclusive).
4302
+ # Time-offset, relative to the beginning of the video, corresponding to the
4303
+ # start of the segment (inclusive).
4434
4304
  # Corresponds to the JSON property `startTimeOffset`
4435
4305
  # @return [String]
4436
4306
  attr_accessor :start_time_offset
@@ -4447,41 +4317,41 @@ module Google
4447
4317
  end
4448
4318
 
4449
4319
  # Word-specific information for recognized words. Word information is only
4450
- # included in the response when certain request parameters are set, such
4451
- # as `enable_word_time_offsets`.
4320
+ # included in the response when certain request parameters are set, such as `
4321
+ # enable_word_time_offsets`.
4452
4322
  class GoogleCloudVideointelligenceV1p2beta1WordInfo
4453
4323
  include Google::Apis::Core::Hashable
4454
4324
 
4455
4325
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
4456
4326
  # indicates an estimated greater likelihood that the recognized words are
4457
- # correct. This field is set only for the top alternative.
4458
- # This field is not guaranteed to be accurate and users should not rely on it
4459
- # to be always provided.
4460
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
4327
+ # correct. This field is set only for the top alternative. This field is not
4328
+ # guaranteed to be accurate and users should not rely on it to be always
4329
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
4330
+ # not set.
4461
4331
  # Corresponds to the JSON property `confidence`
4462
4332
  # @return [Float]
4463
4333
  attr_accessor :confidence
4464
4334
 
4465
- # Time offset relative to the beginning of the audio, and
4466
- # corresponding to the end of the spoken word. This field is only set if
4467
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
4468
- # experimental feature and the accuracy of the time offset can vary.
4335
+ # Time offset relative to the beginning of the audio, and corresponding to the
4336
+ # end of the spoken word. This field is only set if `enable_word_time_offsets=
4337
+ # true` and only in the top hypothesis. This is an experimental feature and the
4338
+ # accuracy of the time offset can vary.
4469
4339
  # Corresponds to the JSON property `endTime`
4470
4340
  # @return [String]
4471
4341
  attr_accessor :end_time
4472
4342
 
4473
- # Output only. A distinct integer value is assigned for every speaker within
4474
- # the audio. This field specifies which one of those speakers was detected to
4475
- # have spoken this word. Value ranges from 1 up to diarization_speaker_count,
4476
- # and is only set if speaker diarization is enabled.
4343
+ # Output only. A distinct integer value is assigned for every speaker within the
4344
+ # audio. This field specifies which one of those speakers was detected to have
4345
+ # spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
4346
+ # only set if speaker diarization is enabled.
4477
4347
  # Corresponds to the JSON property `speakerTag`
4478
4348
  # @return [Fixnum]
4479
4349
  attr_accessor :speaker_tag
4480
4350
 
4481
- # Time offset relative to the beginning of the audio, and
4482
- # corresponding to the start of the spoken word. This field is only set if
4483
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
4484
- # experimental feature and the accuracy of the time offset can vary.
4351
+ # Time offset relative to the beginning of the audio, and corresponding to the
4352
+ # start of the spoken word. This field is only set if `enable_word_time_offsets=
4353
+ # true` and only in the top hypothesis. This is an experimental feature and the
4354
+ # accuracy of the time offset can vary.
4485
4355
  # Corresponds to the JSON property `startTime`
4486
4356
  # @return [String]
4487
4357
  attr_accessor :start_time
@@ -4505,9 +4375,9 @@ module Google
4505
4375
  end
4506
4376
  end
4507
4377
 
4508
- # Video annotation progress. Included in the `metadata`
4509
- # field of the `Operation` returned by the `GetOperation`
4510
- # call of the `google::longrunning::Operations` service.
4378
+ # Video annotation progress. Included in the `metadata` field of the `Operation`
4379
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
4380
+ # service.
4511
4381
  class GoogleCloudVideointelligenceV1p3beta1AnnotateVideoProgress
4512
4382
  include Google::Apis::Core::Hashable
4513
4383
 
@@ -4526,9 +4396,9 @@ module Google
4526
4396
  end
4527
4397
  end
4528
4398
 
4529
- # Video annotation response. Included in the `response`
4530
- # field of the `Operation` returned by the `GetOperation`
4531
- # call of the `google::longrunning::Operations` service.
4399
+ # Video annotation response. Included in the `response` field of the `Operation`
4400
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
4401
+ # service.
4532
4402
  class GoogleCloudVideointelligenceV1p3beta1AnnotateVideoResponse
4533
4403
  include Google::Apis::Core::Hashable
4534
4404
 
@@ -4562,10 +4432,9 @@ module Google
4562
4432
  # @return [String]
4563
4433
  attr_accessor :display_name
4564
4434
 
4565
- # The resource name of the celebrity. Have the format
4566
- # `video-intelligence/kg-mid` indicates a celebrity from preloaded gallery.
4567
- # kg-mid is the id in Google knowledge graph, which is unique for the
4568
- # celebrity.
4435
+ # The resource name of the celebrity. Have the format `video-intelligence/kg-mid`
4436
+ # indicates a celebrity from preloaded gallery. kg-mid is the id in Google
4437
+ # knowledge graph, which is unique for the celebrity.
4569
4438
  # Corresponds to the JSON property `name`
4570
4439
  # @return [String]
4571
4440
  attr_accessor :name
@@ -4586,8 +4455,8 @@ module Google
4586
4455
  class GoogleCloudVideointelligenceV1p3beta1CelebrityRecognitionAnnotation
4587
4456
  include Google::Apis::Core::Hashable
4588
4457
 
4589
- # The tracks detected from the input video, including recognized celebrities
4590
- # and other detected faces in the video.
4458
+ # The tracks detected from the input video, including recognized celebrities and
4459
+ # other detected faces in the video.
4591
4460
  # Corresponds to the JSON property `celebrityTracks`
4592
4461
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1CelebrityTrack>]
4593
4462
  attr_accessor :celebrity_tracks
@@ -4643,14 +4512,14 @@ module Google
4643
4512
  # @return [Float]
4644
4513
  attr_accessor :confidence
4645
4514
 
4646
- # The name of the attribute, for example, glasses, dark_glasses, mouth_open.
4647
- # A full list of supported type names will be provided in the document.
4515
+ # The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
4516
+ # full list of supported type names will be provided in the document.
4648
4517
  # Corresponds to the JSON property `name`
4649
4518
  # @return [String]
4650
4519
  attr_accessor :name
4651
4520
 
4652
- # Text value of the detection result. For example, the value for "HairColor"
4653
- # can be "black", "blonde", etc.
4521
+ # Text value of the detection result. For example, the value for "HairColor" can
4522
+ # be "black", "blonde", etc.
4654
4523
  # Corresponds to the JSON property `value`
4655
4524
  # @return [String]
4656
4525
  attr_accessor :value
@@ -4682,9 +4551,8 @@ module Google
4682
4551
  # @return [String]
4683
4552
  attr_accessor :name
4684
4553
 
4685
- # A vertex represents a 2D point in the image.
4686
- # NOTE: the normalized vertex coordinates are relative to the original image
4687
- # and range from 0 to 1.
4554
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
4555
+ # coordinates are relative to the original image and range from 0 to 1.
4688
4556
  # Corresponds to the JSON property `point`
4689
4557
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1NormalizedVertex]
4690
4558
  attr_accessor :point
@@ -4710,8 +4578,7 @@ module Google
4710
4578
  # @return [String]
4711
4579
  attr_accessor :description
4712
4580
 
4713
- # Opaque entity ID. Some IDs may be available in
4714
- # [Google Knowledge Graph Search
4581
+ # Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
4715
4582
  # API](https://developers.google.com/knowledge-graph/).
4716
4583
  # Corresponds to the JSON property `entityId`
4717
4584
  # @return [String]
@@ -4734,9 +4601,9 @@ module Google
4734
4601
  end
4735
4602
  end
4736
4603
 
4737
- # Explicit content annotation (based on per-frame visual signals only).
4738
- # If no explicit content has been detected in a frame, no annotations are
4739
- # present for that frame.
4604
+ # Explicit content annotation (based on per-frame visual signals only). If no
4605
+ # explicit content has been detected in a frame, no annotations are present for
4606
+ # that frame.
4740
4607
  class GoogleCloudVideointelligenceV1p3beta1ExplicitContentAnnotation
4741
4608
  include Google::Apis::Core::Hashable
4742
4609
 
@@ -4823,10 +4690,9 @@ module Google
4823
4690
  class GoogleCloudVideointelligenceV1p3beta1LabelAnnotation
4824
4691
  include Google::Apis::Core::Hashable
4825
4692
 
4826
- # Common categories for the detected entity.
4827
- # For example, when the label is `Terrier`, the category is likely `dog`. And
4828
- # in some cases there might be more than one categories e.g., `Terrier` could
4829
- # also be a `pet`.
4693
+ # Common categories for the detected entity. For example, when the label is `
4694
+ # Terrier`, the category is likely `dog`. And in some cases there might be more
4695
+ # than one categories e.g., `Terrier` could also be a `pet`.
4830
4696
  # Corresponds to the JSON property `categoryEntities`
4831
4697
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1Entity>]
4832
4698
  attr_accessor :category_entities
@@ -4925,14 +4791,14 @@ module Google
4925
4791
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1Entity]
4926
4792
  attr_accessor :entity
4927
4793
 
4928
- # All video segments where the recognized logo appears. There might be
4929
- # multiple instances of the same logo class appearing in one VideoSegment.
4794
+ # All video segments where the recognized logo appears. There might be multiple
4795
+ # instances of the same logo class appearing in one VideoSegment.
4930
4796
  # Corresponds to the JSON property `segments`
4931
4797
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1VideoSegment>]
4932
4798
  attr_accessor :segments
4933
4799
 
4934
- # All logo tracks where the recognized logo appears. Each track corresponds
4935
- # to one logo instance appearing in consecutive frames.
4800
+ # All logo tracks where the recognized logo appears. Each track corresponds to
4801
+ # one logo instance appearing in consecutive frames.
4936
4802
  # Corresponds to the JSON property `tracks`
4937
4803
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1Track>]
4938
4804
  attr_accessor :tracks
@@ -4949,9 +4815,8 @@ module Google
4949
4815
  end
4950
4816
  end
4951
4817
 
4952
- # Normalized bounding box.
4953
- # The normalized vertex coordinates are relative to the original image.
4954
- # Range: [0, 1].
4818
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
4819
+ # original image. Range: [0, 1].
4955
4820
  class GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox
4956
4821
  include Google::Apis::Core::Hashable
4957
4822
 
@@ -4989,20 +4854,12 @@ module Google
4989
4854
  end
4990
4855
 
4991
4856
  # Normalized bounding polygon for text (that might not be aligned with axis).
4992
- # Contains list of the corner points in clockwise order starting from
4993
- # top-left corner. For example, for a rectangular bounding box:
4994
- # When the text is horizontal it might look like:
4995
- # 0----1
4996
- # | |
4997
- # 3----2
4998
- # When it's clockwise rotated 180 degrees around the top-left corner it
4999
- # becomes:
5000
- # 2----3
5001
- # | |
5002
- # 1----0
5003
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
5004
- # than 0, or greater than 1 due to trignometric calculations for location of
5005
- # the box.
4857
+ # Contains list of the corner points in clockwise order starting from top-left
4858
+ # corner. For example, for a rectangular bounding box: When the text is
4859
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
4860
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
4861
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
4862
+ # or greater than 1 due to trignometric calculations for location of the box.
5006
4863
  class GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingPoly
5007
4864
  include Google::Apis::Core::Hashable
5008
4865
 
@@ -5021,9 +4878,8 @@ module Google
5021
4878
  end
5022
4879
  end
5023
4880
 
5024
- # A vertex represents a 2D point in the image.
5025
- # NOTE: the normalized vertex coordinates are relative to the original image
5026
- # and range from 0 to 1.
4881
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
4882
+ # coordinates are relative to the original image and range from 0 to 1.
5027
4883
  class GoogleCloudVideointelligenceV1p3beta1NormalizedVertex
5028
4884
  include Google::Apis::Core::Hashable
5029
4885
 
@@ -5062,10 +4918,10 @@ module Google
5062
4918
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1Entity]
5063
4919
  attr_accessor :entity
5064
4920
 
5065
- # Information corresponding to all frames where this object track appears.
5066
- # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
5067
- # messages in frames.
5068
- # Streaming mode: it can only be one ObjectTrackingFrame message in frames.
4921
+ # Information corresponding to all frames where this object track appears. Non-
4922
+ # streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
4923
+ # in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
4924
+ # frames.
5069
4925
  # Corresponds to the JSON property `frames`
5070
4926
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1ObjectTrackingFrame>]
5071
4927
  attr_accessor :frames
@@ -5075,12 +4931,11 @@ module Google
5075
4931
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1VideoSegment]
5076
4932
  attr_accessor :segment
5077
4933
 
5078
- # Streaming mode ONLY.
5079
- # In streaming mode, we do not know the end time of a tracked object
5080
- # before it is completed. Hence, there is no VideoSegment info returned.
5081
- # Instead, we provide a unique identifiable integer track_id so that
5082
- # the customers can correlate the results of the ongoing
5083
- # ObjectTrackAnnotation of the same track_id over time.
4934
+ # Streaming mode ONLY. In streaming mode, we do not know the end time of a
4935
+ # tracked object before it is completed. Hence, there is no VideoSegment info
4936
+ # returned. Instead, we provide a unique identifiable integer track_id so that
4937
+ # the customers can correlate the results of the ongoing ObjectTrackAnnotation
4938
+ # of the same track_id over time.
5084
4939
  # Corresponds to the JSON property `trackId`
5085
4940
  # @return [Fixnum]
5086
4941
  attr_accessor :track_id
@@ -5110,9 +4965,8 @@ module Google
5110
4965
  class GoogleCloudVideointelligenceV1p3beta1ObjectTrackingFrame
5111
4966
  include Google::Apis::Core::Hashable
5112
4967
 
5113
- # Normalized bounding box.
5114
- # The normalized vertex coordinates are relative to the original image.
5115
- # Range: [0, 1].
4968
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
4969
+ # original image. Range: [0, 1].
5116
4970
  # Corresponds to the JSON property `normalizedBoundingBox`
5117
4971
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox]
5118
4972
  attr_accessor :normalized_bounding_box
@@ -5189,10 +5043,10 @@ module Google
5189
5043
 
5190
5044
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
5191
5045
  # indicates an estimated greater likelihood that the recognized words are
5192
- # correct. This field is set only for the top alternative.
5193
- # This field is not guaranteed to be accurate and users should not rely on it
5194
- # to be always provided.
5195
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
5046
+ # correct. This field is set only for the top alternative. This field is not
5047
+ # guaranteed to be accurate and users should not rely on it to be always
5048
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
5049
+ # not set.
5196
5050
  # Corresponds to the JSON property `confidence`
5197
5051
  # @return [Float]
5198
5052
  attr_accessor :confidence
@@ -5203,8 +5057,8 @@ module Google
5203
5057
  attr_accessor :transcript
5204
5058
 
5205
5059
  # Output only. A list of word-specific information for each recognized word.
5206
- # Note: When `enable_speaker_diarization` is set to true, you will see all
5207
- # the words from the beginning of the audio.
5060
+ # Note: When `enable_speaker_diarization` is set to true, you will see all the
5061
+ # words from the beginning of the audio.
5208
5062
  # Corresponds to the JSON property `words`
5209
5063
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1WordInfo>]
5210
5064
  attr_accessor :words
@@ -5225,18 +5079,17 @@ module Google
5225
5079
  class GoogleCloudVideointelligenceV1p3beta1SpeechTranscription
5226
5080
  include Google::Apis::Core::Hashable
5227
5081
 
5228
- # May contain one or more recognition hypotheses (up to the maximum specified
5229
- # in `max_alternatives`). These alternatives are ordered in terms of
5230
- # accuracy, with the top (first) alternative being the most probable, as
5231
- # ranked by the recognizer.
5082
+ # May contain one or more recognition hypotheses (up to the maximum specified in
5083
+ # `max_alternatives`). These alternatives are ordered in terms of accuracy, with
5084
+ # the top (first) alternative being the most probable, as ranked by the
5085
+ # recognizer.
5232
5086
  # Corresponds to the JSON property `alternatives`
5233
5087
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1SpeechRecognitionAlternative>]
5234
5088
  attr_accessor :alternatives
5235
5089
 
5236
5090
  # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
5237
- # language tag of
5238
- # the language in this result. This language code was detected to have the
5239
- # most likelihood of being spoken in the audio.
5091
+ # language tag of the language in this result. This language code was detected
5092
+ # to have the most likelihood of being spoken in the audio.
5240
5093
  # Corresponds to the JSON property `languageCode`
5241
5094
  # @return [String]
5242
5095
  attr_accessor :language_code
@@ -5252,32 +5105,31 @@ module Google
5252
5105
  end
5253
5106
  end
5254
5107
 
5255
- # `StreamingAnnotateVideoResponse` is the only message returned to the client
5256
- # by `StreamingAnnotateVideo`. A series of zero or more
5257
- # `StreamingAnnotateVideoResponse` messages are streamed back to the client.
5108
+ # `StreamingAnnotateVideoResponse` is the only message returned to the client by
5109
+ # `StreamingAnnotateVideo`. A series of zero or more `
5110
+ # StreamingAnnotateVideoResponse` messages are streamed back to the client.
5258
5111
  class GoogleCloudVideointelligenceV1p3beta1StreamingAnnotateVideoResponse
5259
5112
  include Google::Apis::Core::Hashable
5260
5113
 
5261
- # Streaming annotation results corresponding to a portion of the video
5262
- # that is currently being processed.
5114
+ # Streaming annotation results corresponding to a portion of the video that is
5115
+ # currently being processed.
5263
5116
  # Corresponds to the JSON property `annotationResults`
5264
5117
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1StreamingVideoAnnotationResults]
5265
5118
  attr_accessor :annotation_results
5266
5119
 
5267
- # Google Cloud Storage URI that stores annotation results of one
5268
- # streaming session in JSON format.
5269
- # It is the annotation_result_storage_directory
5270
- # from the request followed by '/cloud_project_number-session_id'.
5120
+ # Google Cloud Storage URI that stores annotation results of one streaming
5121
+ # session in JSON format. It is the annotation_result_storage_directory from the
5122
+ # request followed by '/cloud_project_number-session_id'.
5271
5123
  # Corresponds to the JSON property `annotationResultsUri`
5272
5124
  # @return [String]
5273
5125
  attr_accessor :annotation_results_uri
5274
5126
 
5275
- # The `Status` type defines a logical error model that is suitable for
5276
- # different programming environments, including REST APIs and RPC APIs. It is
5277
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
5278
- # three pieces of data: error code, error message, and error details.
5279
- # You can find out more about this error model and how to work with it in the
5280
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
5127
+ # The `Status` type defines a logical error model that is suitable for different
5128
+ # programming environments, including REST APIs and RPC APIs. It is used by [
5129
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
5130
+ # data: error code, error message, and error details. You can find out more
5131
+ # about this error model and how to work with it in the [API Design Guide](https:
5132
+ # //cloud.google.com/apis/design/errors).
5281
5133
  # Corresponds to the JSON property `error`
5282
5134
  # @return [Google::Apis::VideointelligenceV1::GoogleRpcStatus]
5283
5135
  attr_accessor :error
@@ -5294,14 +5146,14 @@ module Google
5294
5146
  end
5295
5147
  end
5296
5148
 
5297
- # Streaming annotation results corresponding to a portion of the video
5298
- # that is currently being processed.
5149
+ # Streaming annotation results corresponding to a portion of the video that is
5150
+ # currently being processed.
5299
5151
  class GoogleCloudVideointelligenceV1p3beta1StreamingVideoAnnotationResults
5300
5152
  include Google::Apis::Core::Hashable
5301
5153
 
5302
- # Explicit content annotation (based on per-frame visual signals only).
5303
- # If no explicit content has been detected in a frame, no annotations are
5304
- # present for that frame.
5154
+ # Explicit content annotation (based on per-frame visual signals only). If no
5155
+ # explicit content has been detected in a frame, no annotations are present for
5156
+ # that frame.
5305
5157
  # Corresponds to the JSON property `explicitAnnotation`
5306
5158
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1ExplicitContentAnnotation]
5307
5159
  attr_accessor :explicit_annotation
@@ -5367,27 +5219,19 @@ module Google
5367
5219
  end
5368
5220
  end
5369
5221
 
5370
- # Video frame level annotation results for text annotation (OCR).
5371
- # Contains information regarding timestamp and bounding box locations for the
5372
- # frames containing detected OCR text snippets.
5222
+ # Video frame level annotation results for text annotation (OCR). Contains
5223
+ # information regarding timestamp and bounding box locations for the frames
5224
+ # containing detected OCR text snippets.
5373
5225
  class GoogleCloudVideointelligenceV1p3beta1TextFrame
5374
5226
  include Google::Apis::Core::Hashable
5375
5227
 
5376
5228
  # Normalized bounding polygon for text (that might not be aligned with axis).
5377
- # Contains list of the corner points in clockwise order starting from
5378
- # top-left corner. For example, for a rectangular bounding box:
5379
- # When the text is horizontal it might look like:
5380
- # 0----1
5381
- # | |
5382
- # 3----2
5383
- # When it's clockwise rotated 180 degrees around the top-left corner it
5384
- # becomes:
5385
- # 2----3
5386
- # | |
5387
- # 1----0
5388
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
5389
- # than 0, or greater than 1 due to trignometric calculations for location of
5390
- # the box.
5229
+ # Contains list of the corner points in clockwise order starting from top-left
5230
+ # corner. For example, for a rectangular bounding box: When the text is
5231
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
5232
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
5233
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
5234
+ # or greater than 1 due to trignometric calculations for location of the box.
5391
5235
  # Corresponds to the JSON property `rotatedBoundingBox`
5392
5236
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingPoly]
5393
5237
  attr_accessor :rotated_bounding_box
@@ -5440,9 +5284,8 @@ module Google
5440
5284
  end
5441
5285
  end
5442
5286
 
5443
- # For tracking related features.
5444
- # An object at time_offset with attributes, and located with
5445
- # normalized_bounding_box.
5287
+ # For tracking related features. An object at time_offset with attributes, and
5288
+ # located with normalized_bounding_box.
5446
5289
  class GoogleCloudVideointelligenceV1p3beta1TimestampedObject
5447
5290
  include Google::Apis::Core::Hashable
5448
5291
 
@@ -5456,15 +5299,14 @@ module Google
5456
5299
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1DetectedLandmark>]
5457
5300
  attr_accessor :landmarks
5458
5301
 
5459
- # Normalized bounding box.
5460
- # The normalized vertex coordinates are relative to the original image.
5461
- # Range: [0, 1].
5302
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
5303
+ # original image. Range: [0, 1].
5462
5304
  # Corresponds to the JSON property `normalizedBoundingBox`
5463
5305
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox]
5464
5306
  attr_accessor :normalized_bounding_box
5465
5307
 
5466
- # Time-offset, relative to the beginning of the video,
5467
- # corresponding to the video frame for this object.
5308
+ # Time-offset, relative to the beginning of the video, corresponding to the
5309
+ # video frame for this object.
5468
5310
  # Corresponds to the JSON property `timeOffset`
5469
5311
  # @return [String]
5470
5312
  attr_accessor :time_offset
@@ -5523,20 +5365,19 @@ module Google
5523
5365
  class GoogleCloudVideointelligenceV1p3beta1VideoAnnotationProgress
5524
5366
  include Google::Apis::Core::Hashable
5525
5367
 
5526
- # Specifies which feature is being tracked if the request contains more than
5527
- # one feature.
5368
+ # Specifies which feature is being tracked if the request contains more than one
5369
+ # feature.
5528
5370
  # Corresponds to the JSON property `feature`
5529
5371
  # @return [String]
5530
5372
  attr_accessor :feature
5531
5373
 
5532
- # Video file location in
5533
- # [Cloud Storage](https://cloud.google.com/storage/).
5374
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
5534
5375
  # Corresponds to the JSON property `inputUri`
5535
5376
  # @return [String]
5536
5377
  attr_accessor :input_uri
5537
5378
 
5538
- # Approximate percentage processed thus far. Guaranteed to be
5539
- # 100 when fully processed.
5379
+ # Approximate percentage processed thus far. Guaranteed to be 100 when fully
5380
+ # processed.
5540
5381
  # Corresponds to the JSON property `progressPercent`
5541
5382
  # @return [Fixnum]
5542
5383
  attr_accessor :progress_percent
@@ -5580,19 +5421,19 @@ module Google
5580
5421
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1CelebrityRecognitionAnnotation]
5581
5422
  attr_accessor :celebrity_recognition_annotations
5582
5423
 
5583
- # The `Status` type defines a logical error model that is suitable for
5584
- # different programming environments, including REST APIs and RPC APIs. It is
5585
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
5586
- # three pieces of data: error code, error message, and error details.
5587
- # You can find out more about this error model and how to work with it in the
5588
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
5424
+ # The `Status` type defines a logical error model that is suitable for different
5425
+ # programming environments, including REST APIs and RPC APIs. It is used by [
5426
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
5427
+ # data: error code, error message, and error details. You can find out more
5428
+ # about this error model and how to work with it in the [API Design Guide](https:
5429
+ # //cloud.google.com/apis/design/errors).
5589
5430
  # Corresponds to the JSON property `error`
5590
5431
  # @return [Google::Apis::VideointelligenceV1::GoogleRpcStatus]
5591
5432
  attr_accessor :error
5592
5433
 
5593
- # Explicit content annotation (based on per-frame visual signals only).
5594
- # If no explicit content has been detected in a frame, no annotations are
5595
- # present for that frame.
5434
+ # Explicit content annotation (based on per-frame visual signals only). If no
5435
+ # explicit content has been detected in a frame, no annotations are present for
5436
+ # that frame.
5596
5437
  # Corresponds to the JSON property `explicitAnnotation`
5597
5438
  # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1ExplicitContentAnnotation]
5598
5439
  attr_accessor :explicit_annotation
@@ -5602,14 +5443,13 @@ module Google
5602
5443
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1FaceDetectionAnnotation>]
5603
5444
  attr_accessor :face_detection_annotations
5604
5445
 
5605
- # Label annotations on frame level.
5606
- # There is exactly one element for each unique label.
5446
+ # Label annotations on frame level. There is exactly one element for each unique
5447
+ # label.
5607
5448
  # Corresponds to the JSON property `frameLabelAnnotations`
5608
5449
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
5609
5450
  attr_accessor :frame_label_annotations
5610
5451
 
5611
- # Video file location in
5612
- # [Cloud Storage](https://cloud.google.com/storage/).
5452
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
5613
5453
  # Corresponds to the JSON property `inputUri`
5614
5454
  # @return [String]
5615
5455
  attr_accessor :input_uri
@@ -5641,11 +5481,11 @@ module Google
5641
5481
  attr_accessor :segment_label_annotations
5642
5482
 
5643
5483
  # Presence label annotations on video level or user-specified segment level.
5644
- # There is exactly one element for each unique label. Compared to the
5645
- # existing topical `segment_label_annotations`, this field presents more
5646
- # fine-grained, segment-level labels detected in video content and is made
5647
- # available only when the client sets `LabelDetectionConfig.model` to
5648
- # "builtin/latest" in the request.
5484
+ # There is exactly one element for each unique label. Compared to the existing
5485
+ # topical `segment_label_annotations`, this field presents more fine-grained,
5486
+ # segment-level labels detected in video content and is made available only when
5487
+ # the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
5488
+ # request.
5649
5489
  # Corresponds to the JSON property `segmentPresenceLabelAnnotations`
5650
5490
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
5651
5491
  attr_accessor :segment_presence_label_annotations
@@ -5655,17 +5495,17 @@ module Google
5655
5495
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1VideoSegment>]
5656
5496
  attr_accessor :shot_annotations
5657
5497
 
5658
- # Topical label annotations on shot level.
5659
- # There is exactly one element for each unique label.
5498
+ # Topical label annotations on shot level. There is exactly one element for each
5499
+ # unique label.
5660
5500
  # Corresponds to the JSON property `shotLabelAnnotations`
5661
5501
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
5662
5502
  attr_accessor :shot_label_annotations
5663
5503
 
5664
5504
  # Presence label annotations on shot level. There is exactly one element for
5665
- # each unique label. Compared to the existing topical
5666
- # `shot_label_annotations`, this field presents more fine-grained, shot-level
5667
- # labels detected in video content and is made available only when the client
5668
- # sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
5505
+ # each unique label. Compared to the existing topical `shot_label_annotations`,
5506
+ # this field presents more fine-grained, shot-level labels detected in video
5507
+ # content and is made available only when the client sets `LabelDetectionConfig.
5508
+ # model` to "builtin/latest" in the request.
5669
5509
  # Corresponds to the JSON property `shotPresenceLabelAnnotations`
5670
5510
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
5671
5511
  attr_accessor :shot_presence_label_annotations
@@ -5675,9 +5515,8 @@ module Google
5675
5515
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1SpeechTranscription>]
5676
5516
  attr_accessor :speech_transcriptions
5677
5517
 
5678
- # OCR text detection and tracking.
5679
- # Annotations for list of detected text snippets. Each will have list of
5680
- # frame information associated with it.
5518
+ # OCR text detection and tracking. Annotations for list of detected text
5519
+ # snippets. Each will have list of frame information associated with it.
5681
5520
  # Corresponds to the JSON property `textAnnotations`
5682
5521
  # @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1TextAnnotation>]
5683
5522
  attr_accessor :text_annotations
@@ -5712,14 +5551,14 @@ module Google
5712
5551
  class GoogleCloudVideointelligenceV1p3beta1VideoSegment
5713
5552
  include Google::Apis::Core::Hashable
5714
5553
 
5715
- # Time-offset, relative to the beginning of the video,
5716
- # corresponding to the end of the segment (inclusive).
5554
+ # Time-offset, relative to the beginning of the video, corresponding to the end
5555
+ # of the segment (inclusive).
5717
5556
  # Corresponds to the JSON property `endTimeOffset`
5718
5557
  # @return [String]
5719
5558
  attr_accessor :end_time_offset
5720
5559
 
5721
- # Time-offset, relative to the beginning of the video,
5722
- # corresponding to the start of the segment (inclusive).
5560
+ # Time-offset, relative to the beginning of the video, corresponding to the
5561
+ # start of the segment (inclusive).
5723
5562
  # Corresponds to the JSON property `startTimeOffset`
5724
5563
  # @return [String]
5725
5564
  attr_accessor :start_time_offset
@@ -5736,41 +5575,41 @@ module Google
5736
5575
  end
5737
5576
 
5738
5577
  # Word-specific information for recognized words. Word information is only
5739
- # included in the response when certain request parameters are set, such
5740
- # as `enable_word_time_offsets`.
5578
+ # included in the response when certain request parameters are set, such as `
5579
+ # enable_word_time_offsets`.
5741
5580
  class GoogleCloudVideointelligenceV1p3beta1WordInfo
5742
5581
  include Google::Apis::Core::Hashable
5743
5582
 
5744
5583
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
5745
5584
  # indicates an estimated greater likelihood that the recognized words are
5746
- # correct. This field is set only for the top alternative.
5747
- # This field is not guaranteed to be accurate and users should not rely on it
5748
- # to be always provided.
5749
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
5585
+ # correct. This field is set only for the top alternative. This field is not
5586
+ # guaranteed to be accurate and users should not rely on it to be always
5587
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
5588
+ # not set.
5750
5589
  # Corresponds to the JSON property `confidence`
5751
5590
  # @return [Float]
5752
5591
  attr_accessor :confidence
5753
5592
 
5754
- # Time offset relative to the beginning of the audio, and
5755
- # corresponding to the end of the spoken word. This field is only set if
5756
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
5757
- # experimental feature and the accuracy of the time offset can vary.
5593
+ # Time offset relative to the beginning of the audio, and corresponding to the
5594
+ # end of the spoken word. This field is only set if `enable_word_time_offsets=
5595
+ # true` and only in the top hypothesis. This is an experimental feature and the
5596
+ # accuracy of the time offset can vary.
5758
5597
  # Corresponds to the JSON property `endTime`
5759
5598
  # @return [String]
5760
5599
  attr_accessor :end_time
5761
5600
 
5762
- # Output only. A distinct integer value is assigned for every speaker within
5763
- # the audio. This field specifies which one of those speakers was detected to
5764
- # have spoken this word. Value ranges from 1 up to diarization_speaker_count,
5765
- # and is only set if speaker diarization is enabled.
5601
+ # Output only. A distinct integer value is assigned for every speaker within the
5602
+ # audio. This field specifies which one of those speakers was detected to have
5603
+ # spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
5604
+ # only set if speaker diarization is enabled.
5766
5605
  # Corresponds to the JSON property `speakerTag`
5767
5606
  # @return [Fixnum]
5768
5607
  attr_accessor :speaker_tag
5769
5608
 
5770
- # Time offset relative to the beginning of the audio, and
5771
- # corresponding to the start of the spoken word. This field is only set if
5772
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
5773
- # experimental feature and the accuracy of the time offset can vary.
5609
+ # Time offset relative to the beginning of the audio, and corresponding to the
5610
+ # start of the spoken word. This field is only set if `enable_word_time_offsets=
5611
+ # true` and only in the top hypothesis. This is an experimental feature and the
5612
+ # accuracy of the time offset can vary.
5774
5613
  # Corresponds to the JSON property `startTime`
5775
5614
  # @return [String]
5776
5615
  attr_accessor :start_time
@@ -5837,47 +5676,45 @@ module Google
5837
5676
  class GoogleLongrunningOperation
5838
5677
  include Google::Apis::Core::Hashable
5839
5678
 
5840
- # If the value is `false`, it means the operation is still in progress.
5841
- # If `true`, the operation is completed, and either `error` or `response` is
5842
- # available.
5679
+ # If the value is `false`, it means the operation is still in progress. If `true`
5680
+ # , the operation is completed, and either `error` or `response` is available.
5843
5681
  # Corresponds to the JSON property `done`
5844
5682
  # @return [Boolean]
5845
5683
  attr_accessor :done
5846
5684
  alias_method :done?, :done
5847
5685
 
5848
- # The `Status` type defines a logical error model that is suitable for
5849
- # different programming environments, including REST APIs and RPC APIs. It is
5850
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
5851
- # three pieces of data: error code, error message, and error details.
5852
- # You can find out more about this error model and how to work with it in the
5853
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
5686
+ # The `Status` type defines a logical error model that is suitable for different
5687
+ # programming environments, including REST APIs and RPC APIs. It is used by [
5688
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
5689
+ # data: error code, error message, and error details. You can find out more
5690
+ # about this error model and how to work with it in the [API Design Guide](https:
5691
+ # //cloud.google.com/apis/design/errors).
5854
5692
  # Corresponds to the JSON property `error`
5855
5693
  # @return [Google::Apis::VideointelligenceV1::GoogleRpcStatus]
5856
5694
  attr_accessor :error
5857
5695
 
5858
- # Service-specific metadata associated with the operation. It typically
5859
- # contains progress information and common metadata such as create time.
5860
- # Some services might not provide such metadata. Any method that returns a
5861
- # long-running operation should document the metadata type, if any.
5696
+ # Service-specific metadata associated with the operation. It typically contains
5697
+ # progress information and common metadata such as create time. Some services
5698
+ # might not provide such metadata. Any method that returns a long-running
5699
+ # operation should document the metadata type, if any.
5862
5700
  # Corresponds to the JSON property `metadata`
5863
5701
  # @return [Hash<String,Object>]
5864
5702
  attr_accessor :metadata
5865
5703
 
5866
5704
  # The server-assigned name, which is only unique within the same service that
5867
- # originally returns it. If you use the default HTTP mapping, the
5868
- # `name` should be a resource name ending with `operations/`unique_id``.
5705
+ # originally returns it. If you use the default HTTP mapping, the `name` should
5706
+ # be a resource name ending with `operations/`unique_id``.
5869
5707
  # Corresponds to the JSON property `name`
5870
5708
  # @return [String]
5871
5709
  attr_accessor :name
5872
5710
 
5873
- # The normal response of the operation in case of success. If the original
5874
- # method returns no data on success, such as `Delete`, the response is
5875
- # `google.protobuf.Empty`. If the original method is standard
5876
- # `Get`/`Create`/`Update`, the response should be the resource. For other
5877
- # methods, the response should have the type `XxxResponse`, where `Xxx`
5878
- # is the original method name. For example, if the original method name
5879
- # is `TakeSnapshot()`, the inferred response type is
5880
- # `TakeSnapshotResponse`.
5711
+ # The normal response of the operation in case of success. If the original
5712
+ # method returns no data on success, such as `Delete`, the response is `google.
5713
+ # protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`,
5714
+ # the response should be the resource. For other methods, the response should
5715
+ # have the type `XxxResponse`, where `Xxx` is the original method name. For
5716
+ # example, if the original method name is `TakeSnapshot()`, the inferred
5717
+ # response type is `TakeSnapshotResponse`.
5881
5718
  # Corresponds to the JSON property `response`
5882
5719
  # @return [Hash<String,Object>]
5883
5720
  attr_accessor :response
@@ -5896,13 +5733,11 @@ module Google
5896
5733
  end
5897
5734
  end
5898
5735
 
5899
- # A generic empty message that you can re-use to avoid defining duplicated
5900
- # empty messages in your APIs. A typical example is to use it as the request
5901
- # or the response type of an API method. For instance:
5902
- # service Foo `
5903
- # rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
5904
- # `
5905
- # The JSON representation for `Empty` is empty JSON object ````.
5736
+ # A generic empty message that you can re-use to avoid defining duplicated empty
5737
+ # messages in your APIs. A typical example is to use it as the request or the
5738
+ # response type of an API method. For instance: service Foo ` rpc Bar(google.
5739
+ # protobuf.Empty) returns (google.protobuf.Empty); ` The JSON representation for
5740
+ # `Empty` is empty JSON object ````.
5906
5741
  class GoogleProtobufEmpty
5907
5742
  include Google::Apis::Core::Hashable
5908
5743
 
@@ -5915,12 +5750,12 @@ module Google
5915
5750
  end
5916
5751
  end
5917
5752
 
5918
- # The `Status` type defines a logical error model that is suitable for
5919
- # different programming environments, including REST APIs and RPC APIs. It is
5920
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
5921
- # three pieces of data: error code, error message, and error details.
5922
- # You can find out more about this error model and how to work with it in the
5923
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
5753
+ # The `Status` type defines a logical error model that is suitable for different
5754
+ # programming environments, including REST APIs and RPC APIs. It is used by [
5755
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
5756
+ # data: error code, error message, and error details. You can find out more
5757
+ # about this error model and how to work with it in the [API Design Guide](https:
5758
+ # //cloud.google.com/apis/design/errors).
5924
5759
  class GoogleRpcStatus
5925
5760
  include Google::Apis::Core::Hashable
5926
5761
 
@@ -5929,15 +5764,15 @@ module Google
5929
5764
  # @return [Fixnum]
5930
5765
  attr_accessor :code
5931
5766
 
5932
- # A list of messages that carry the error details. There is a common set of
5767
+ # A list of messages that carry the error details. There is a common set of
5933
5768
  # message types for APIs to use.
5934
5769
  # Corresponds to the JSON property `details`
5935
5770
  # @return [Array<Hash<String,Object>>]
5936
5771
  attr_accessor :details
5937
5772
 
5938
- # A developer-facing error message, which should be in English. Any
5939
- # user-facing error message should be localized and sent in the
5940
- # google.rpc.Status.details field, or localized by the client.
5773
+ # A developer-facing error message, which should be in English. Any user-facing
5774
+ # error message should be localized and sent in the google.rpc.Status.details
5775
+ # field, or localized by the client.
5941
5776
  # Corresponds to the JSON property `message`
5942
5777
  # @return [String]
5943
5778
  attr_accessor :message