google-api-client 0.43.0 → 0.44.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (696) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +218 -0
  3. data/docs/oauth-server.md +4 -6
  4. data/generated/google/apis/accessapproval_v1.rb +1 -1
  5. data/generated/google/apis/accessapproval_v1/classes.rb +51 -86
  6. data/generated/google/apis/accessapproval_v1/service.rb +93 -132
  7. data/generated/google/apis/accesscontextmanager_v1.rb +1 -1
  8. data/generated/google/apis/accesscontextmanager_v1/classes.rb +198 -236
  9. data/generated/google/apis/accesscontextmanager_v1/service.rb +128 -171
  10. data/generated/google/apis/accesscontextmanager_v1beta.rb +1 -1
  11. data/generated/google/apis/accesscontextmanager_v1beta/classes.rb +153 -184
  12. data/generated/google/apis/accesscontextmanager_v1beta/service.rb +82 -111
  13. data/generated/google/apis/adexchangebuyer2_v2beta1.rb +1 -1
  14. data/generated/google/apis/adexchangebuyer2_v2beta1/classes.rb +17 -6
  15. data/generated/google/apis/adexchangebuyer2_v2beta1/representations.rb +1 -0
  16. data/generated/google/apis/adexchangebuyer_v1_4.rb +1 -1
  17. data/generated/google/apis/adexchangebuyer_v1_4/classes.rb +47 -2
  18. data/generated/google/apis/adexchangebuyer_v1_4/representations.rb +18 -0
  19. data/generated/google/apis/adexperiencereport_v1.rb +1 -1
  20. data/generated/google/apis/admin_datatransfer_v1.rb +6 -4
  21. data/generated/google/apis/admin_datatransfer_v1/classes.rb +16 -4
  22. data/generated/google/apis/admin_datatransfer_v1/service.rb +30 -48
  23. data/generated/google/apis/admin_directory_v1.rb +6 -8
  24. data/generated/google/apis/admin_directory_v1/classes.rb +224 -243
  25. data/generated/google/apis/admin_directory_v1/representations.rb +14 -40
  26. data/generated/google/apis/admin_directory_v1/service.rb +475 -1026
  27. data/generated/google/apis/admin_reports_v1.rb +6 -5
  28. data/generated/google/apis/admin_reports_v1/classes.rb +31 -33
  29. data/generated/google/apis/admin_reports_v1/service.rb +131 -187
  30. data/generated/google/apis/admob_v1.rb +1 -1
  31. data/generated/google/apis/alertcenter_v1beta1.rb +1 -1
  32. data/generated/google/apis/androidenterprise_v1.rb +1 -1
  33. data/generated/google/apis/androidmanagement_v1.rb +1 -1
  34. data/generated/google/apis/androidmanagement_v1/classes.rb +95 -59
  35. data/generated/google/apis/androidmanagement_v1/representations.rb +17 -0
  36. data/generated/google/apis/androidpublisher_v3.rb +1 -1
  37. data/generated/google/apis/androidpublisher_v3/service.rb +2 -2
  38. data/generated/google/apis/apigee_v1.rb +6 -7
  39. data/generated/google/apis/apigee_v1/classes.rb +205 -75
  40. data/generated/google/apis/apigee_v1/representations.rb +51 -0
  41. data/generated/google/apis/apigee_v1/service.rb +133 -34
  42. data/generated/google/apis/appengine_v1.rb +1 -1
  43. data/generated/google/apis/appengine_v1/classes.rb +45 -35
  44. data/generated/google/apis/appengine_v1/representations.rb +2 -0
  45. data/generated/google/apis/appengine_v1/service.rb +38 -47
  46. data/generated/google/apis/appengine_v1alpha.rb +1 -1
  47. data/generated/google/apis/appengine_v1alpha/classes.rb +9 -11
  48. data/generated/google/apis/appengine_v1beta.rb +1 -1
  49. data/generated/google/apis/appengine_v1beta/classes.rb +45 -35
  50. data/generated/google/apis/appengine_v1beta/representations.rb +2 -0
  51. data/generated/google/apis/appengine_v1beta/service.rb +37 -47
  52. data/generated/google/apis/appsmarket_v2.rb +1 -1
  53. data/generated/google/apis/appsmarket_v2/classes.rb +14 -16
  54. data/generated/google/apis/artifactregistry_v1beta1.rb +1 -1
  55. data/generated/google/apis/artifactregistry_v1beta1/classes.rb +235 -337
  56. data/generated/google/apis/artifactregistry_v1beta1/service.rb +44 -57
  57. data/generated/google/apis/bigquery_v2.rb +1 -1
  58. data/generated/google/apis/bigquery_v2/classes.rb +355 -553
  59. data/generated/google/apis/bigquery_v2/representations.rb +1 -0
  60. data/generated/google/apis/bigquery_v2/service.rb +32 -40
  61. data/generated/google/apis/bigqueryconnection_v1beta1.rb +1 -1
  62. data/generated/google/apis/bigqueryconnection_v1beta1/classes.rb +192 -337
  63. data/generated/google/apis/bigqueryconnection_v1beta1/service.rb +29 -32
  64. data/generated/google/apis/bigquerydatatransfer_v1.rb +1 -1
  65. data/generated/google/apis/bigquerydatatransfer_v1/classes.rb +132 -158
  66. data/generated/google/apis/bigquerydatatransfer_v1/service.rb +232 -282
  67. data/generated/google/apis/bigqueryreservation_v1.rb +1 -1
  68. data/generated/google/apis/bigqueryreservation_v1/classes.rb +116 -123
  69. data/generated/google/apis/bigqueryreservation_v1/representations.rb +2 -0
  70. data/generated/google/apis/bigqueryreservation_v1/service.rb +137 -183
  71. data/generated/google/apis/bigqueryreservation_v1alpha2.rb +1 -1
  72. data/generated/google/apis/bigqueryreservation_v1alpha2/classes.rb +88 -100
  73. data/generated/google/apis/bigqueryreservation_v1alpha2/service.rb +77 -100
  74. data/generated/google/apis/bigqueryreservation_v1beta1.rb +1 -1
  75. data/generated/google/apis/bigqueryreservation_v1beta1/classes.rb +93 -98
  76. data/generated/google/apis/bigqueryreservation_v1beta1/representations.rb +2 -0
  77. data/generated/google/apis/bigqueryreservation_v1beta1/service.rb +114 -151
  78. data/generated/google/apis/bigtableadmin_v1.rb +1 -1
  79. data/generated/google/apis/bigtableadmin_v1/classes.rb +50 -0
  80. data/generated/google/apis/bigtableadmin_v1/representations.rb +29 -0
  81. data/generated/google/apis/bigtableadmin_v2.rb +1 -1
  82. data/generated/google/apis/bigtableadmin_v2/classes.rb +50 -0
  83. data/generated/google/apis/bigtableadmin_v2/representations.rb +29 -0
  84. data/generated/google/apis/billingbudgets_v1beta1.rb +4 -1
  85. data/generated/google/apis/binaryauthorization_v1.rb +1 -1
  86. data/generated/google/apis/binaryauthorization_v1/classes.rb +239 -354
  87. data/generated/google/apis/binaryauthorization_v1/service.rb +74 -89
  88. data/generated/google/apis/binaryauthorization_v1beta1.rb +1 -1
  89. data/generated/google/apis/binaryauthorization_v1beta1/classes.rb +239 -354
  90. data/generated/google/apis/binaryauthorization_v1beta1/service.rb +74 -89
  91. data/generated/google/apis/calendar_v3.rb +1 -1
  92. data/generated/google/apis/chat_v1.rb +1 -1
  93. data/generated/google/apis/chat_v1/classes.rb +90 -115
  94. data/generated/google/apis/chat_v1/service.rb +30 -42
  95. data/generated/google/apis/civicinfo_v2.rb +1 -1
  96. data/generated/google/apis/cloudasset_v1.rb +1 -1
  97. data/generated/google/apis/cloudasset_v1/classes.rb +712 -1039
  98. data/generated/google/apis/cloudasset_v1/service.rb +125 -167
  99. data/generated/google/apis/cloudasset_v1beta1.rb +1 -1
  100. data/generated/google/apis/cloudasset_v1beta1/classes.rb +531 -777
  101. data/generated/google/apis/cloudasset_v1beta1/service.rb +59 -75
  102. data/generated/google/apis/cloudasset_v1p1beta1.rb +1 -1
  103. data/generated/google/apis/cloudasset_v1p1beta1/classes.rb +204 -349
  104. data/generated/google/apis/cloudasset_v1p1beta1/service.rb +35 -47
  105. data/generated/google/apis/cloudasset_v1p4beta1.rb +1 -1
  106. data/generated/google/apis/cloudasset_v1p4beta1/classes.rb +220 -276
  107. data/generated/google/apis/cloudasset_v1p4beta1/service.rb +75 -93
  108. data/generated/google/apis/cloudasset_v1p5beta1.rb +1 -1
  109. data/generated/google/apis/cloudasset_v1p5beta1/classes.rb +481 -720
  110. data/generated/google/apis/cloudasset_v1p5beta1/service.rb +25 -28
  111. data/generated/google/apis/cloudbilling_v1.rb +7 -1
  112. data/generated/google/apis/cloudbilling_v1/classes.rb +284 -445
  113. data/generated/google/apis/cloudbilling_v1/service.rb +104 -124
  114. data/generated/google/apis/cloudbuild_v1.rb +1 -1
  115. data/generated/google/apis/cloudbuild_v1/classes.rb +291 -343
  116. data/generated/google/apis/cloudbuild_v1/representations.rb +1 -0
  117. data/generated/google/apis/cloudbuild_v1/service.rb +48 -63
  118. data/generated/google/apis/cloudbuild_v1alpha1.rb +1 -1
  119. data/generated/google/apis/cloudbuild_v1alpha1/classes.rb +283 -329
  120. data/generated/google/apis/cloudbuild_v1alpha1/representations.rb +1 -0
  121. data/generated/google/apis/cloudbuild_v1alpha1/service.rb +15 -18
  122. data/generated/google/apis/cloudbuild_v1alpha2.rb +1 -1
  123. data/generated/google/apis/cloudbuild_v1alpha2/classes.rb +269 -313
  124. data/generated/google/apis/cloudbuild_v1alpha2/representations.rb +1 -0
  125. data/generated/google/apis/cloudbuild_v1alpha2/service.rb +22 -28
  126. data/generated/google/apis/clouddebugger_v2.rb +1 -1
  127. data/generated/google/apis/clouddebugger_v2/classes.rb +185 -252
  128. data/generated/google/apis/clouddebugger_v2/service.rb +53 -59
  129. data/generated/google/apis/clouderrorreporting_v1beta1.rb +1 -1
  130. data/generated/google/apis/clouderrorreporting_v1beta1/classes.rb +127 -156
  131. data/generated/google/apis/clouderrorreporting_v1beta1/service.rb +53 -69
  132. data/generated/google/apis/cloudfunctions_v1.rb +1 -1
  133. data/generated/google/apis/cloudfunctions_v1/classes.rb +323 -493
  134. data/generated/google/apis/cloudfunctions_v1/service.rb +79 -93
  135. data/generated/google/apis/cloudidentity_v1.rb +1 -1
  136. data/generated/google/apis/cloudidentity_v1/classes.rb +625 -75
  137. data/generated/google/apis/cloudidentity_v1/representations.rb +203 -0
  138. data/generated/google/apis/cloudidentity_v1/service.rb +43 -61
  139. data/generated/google/apis/cloudidentity_v1beta1.rb +4 -1
  140. data/generated/google/apis/cloudidentity_v1beta1/classes.rb +1045 -317
  141. data/generated/google/apis/cloudidentity_v1beta1/representations.rb +331 -22
  142. data/generated/google/apis/cloudidentity_v1beta1/service.rb +742 -96
  143. data/generated/google/apis/cloudiot_v1.rb +1 -1
  144. data/generated/google/apis/cloudiot_v1/classes.rb +263 -373
  145. data/generated/google/apis/cloudiot_v1/service.rb +147 -154
  146. data/generated/google/apis/cloudkms_v1.rb +1 -1
  147. data/generated/google/apis/cloudkms_v1/classes.rb +502 -692
  148. data/generated/google/apis/cloudkms_v1/representations.rb +17 -0
  149. data/generated/google/apis/cloudkms_v1/service.rb +170 -216
  150. data/generated/google/apis/cloudprofiler_v2.rb +1 -1
  151. data/generated/google/apis/cloudprofiler_v2/classes.rb +28 -33
  152. data/generated/google/apis/cloudprofiler_v2/service.rb +17 -19
  153. data/generated/google/apis/cloudresourcemanager_v1.rb +1 -1
  154. data/generated/google/apis/cloudresourcemanager_v1/service.rb +1 -1
  155. data/generated/google/apis/cloudresourcemanager_v1beta1.rb +1 -1
  156. data/generated/google/apis/cloudresourcemanager_v1beta1/service.rb +1 -1
  157. data/generated/google/apis/cloudresourcemanager_v2.rb +1 -1
  158. data/generated/google/apis/cloudresourcemanager_v2beta1.rb +1 -1
  159. data/generated/google/apis/cloudscheduler_v1.rb +1 -1
  160. data/generated/google/apis/cloudscheduler_v1/classes.rb +272 -383
  161. data/generated/google/apis/cloudscheduler_v1/service.rb +45 -62
  162. data/generated/google/apis/cloudscheduler_v1beta1.rb +1 -1
  163. data/generated/google/apis/cloudscheduler_v1beta1/classes.rb +273 -384
  164. data/generated/google/apis/cloudscheduler_v1beta1/service.rb +45 -62
  165. data/generated/google/apis/cloudsearch_v1.rb +2 -2
  166. data/generated/google/apis/cloudsearch_v1/classes.rb +650 -781
  167. data/generated/google/apis/cloudsearch_v1/representations.rb +15 -0
  168. data/generated/google/apis/cloudsearch_v1/service.rb +286 -326
  169. data/generated/google/apis/cloudshell_v1.rb +1 -1
  170. data/generated/google/apis/cloudshell_v1/classes.rb +36 -227
  171. data/generated/google/apis/cloudshell_v1/representations.rb +0 -67
  172. data/generated/google/apis/cloudshell_v1/service.rb +21 -25
  173. data/generated/google/apis/cloudshell_v1alpha1.rb +1 -1
  174. data/generated/google/apis/cloudshell_v1alpha1/classes.rb +69 -78
  175. data/generated/google/apis/cloudshell_v1alpha1/service.rb +20 -24
  176. data/generated/google/apis/cloudtasks_v2.rb +1 -1
  177. data/generated/google/apis/cloudtasks_v2/classes.rb +605 -933
  178. data/generated/google/apis/cloudtasks_v2/service.rb +146 -217
  179. data/generated/google/apis/cloudtasks_v2beta2.rb +1 -1
  180. data/generated/google/apis/cloudtasks_v2beta2/classes.rb +602 -964
  181. data/generated/google/apis/cloudtasks_v2beta2/service.rb +178 -270
  182. data/generated/google/apis/cloudtasks_v2beta3.rb +1 -1
  183. data/generated/google/apis/cloudtasks_v2beta3/classes.rb +609 -938
  184. data/generated/google/apis/cloudtasks_v2beta3/service.rb +146 -217
  185. data/generated/google/apis/cloudtrace_v1.rb +1 -1
  186. data/generated/google/apis/cloudtrace_v1/classes.rb +39 -61
  187. data/generated/google/apis/cloudtrace_v1/service.rb +37 -51
  188. data/generated/google/apis/cloudtrace_v2.rb +1 -1
  189. data/generated/google/apis/cloudtrace_v2/classes.rb +92 -107
  190. data/generated/google/apis/cloudtrace_v2/service.rb +8 -11
  191. data/generated/google/apis/cloudtrace_v2beta1.rb +1 -1
  192. data/generated/google/apis/cloudtrace_v2beta1/classes.rb +23 -33
  193. data/generated/google/apis/cloudtrace_v2beta1/service.rb +30 -37
  194. data/generated/google/apis/composer_v1.rb +1 -1
  195. data/generated/google/apis/composer_v1/classes.rb +190 -242
  196. data/generated/google/apis/composer_v1/service.rb +79 -150
  197. data/generated/google/apis/composer_v1beta1.rb +1 -1
  198. data/generated/google/apis/composer_v1beta1/classes.rb +203 -262
  199. data/generated/google/apis/composer_v1beta1/service.rb +92 -179
  200. data/generated/google/apis/compute_alpha.rb +1 -1
  201. data/generated/google/apis/compute_alpha/classes.rb +681 -127
  202. data/generated/google/apis/compute_alpha/representations.rb +110 -6
  203. data/generated/google/apis/compute_alpha/service.rb +695 -692
  204. data/generated/google/apis/compute_beta.rb +1 -1
  205. data/generated/google/apis/compute_beta/classes.rb +570 -70
  206. data/generated/google/apis/compute_beta/representations.rb +112 -1
  207. data/generated/google/apis/compute_beta/service.rb +608 -605
  208. data/generated/google/apis/compute_v1.rb +1 -1
  209. data/generated/google/apis/compute_v1/classes.rb +977 -85
  210. data/generated/google/apis/compute_v1/representations.rb +372 -0
  211. data/generated/google/apis/compute_v1/service.rb +747 -15
  212. data/generated/google/apis/container_v1.rb +1 -1
  213. data/generated/google/apis/container_v1/classes.rb +915 -965
  214. data/generated/google/apis/container_v1/representations.rb +53 -0
  215. data/generated/google/apis/container_v1/service.rb +435 -502
  216. data/generated/google/apis/container_v1beta1.rb +1 -1
  217. data/generated/google/apis/container_v1beta1/classes.rb +1021 -1043
  218. data/generated/google/apis/container_v1beta1/representations.rb +70 -0
  219. data/generated/google/apis/container_v1beta1/service.rb +403 -466
  220. data/generated/google/apis/containeranalysis_v1alpha1.rb +1 -1
  221. data/generated/google/apis/containeranalysis_v1alpha1/classes.rb +456 -596
  222. data/generated/google/apis/containeranalysis_v1alpha1/service.rb +149 -169
  223. data/generated/google/apis/containeranalysis_v1beta1.rb +1 -1
  224. data/generated/google/apis/containeranalysis_v1beta1/classes.rb +454 -613
  225. data/generated/google/apis/containeranalysis_v1beta1/service.rb +75 -90
  226. data/generated/google/apis/content_v2.rb +1 -1
  227. data/generated/google/apis/content_v2/classes.rb +3 -1
  228. data/generated/google/apis/content_v2_1.rb +1 -1
  229. data/generated/google/apis/content_v2_1/classes.rb +93 -2
  230. data/generated/google/apis/content_v2_1/representations.rb +34 -0
  231. data/generated/google/apis/content_v2_1/service.rb +53 -2
  232. data/generated/google/apis/datacatalog_v1beta1.rb +1 -1
  233. data/generated/google/apis/datacatalog_v1beta1/classes.rb +382 -573
  234. data/generated/google/apis/datacatalog_v1beta1/service.rb +319 -440
  235. data/generated/google/apis/dataflow_v1b3.rb +1 -1
  236. data/generated/google/apis/dataflow_v1b3/classes.rb +1015 -973
  237. data/generated/google/apis/dataflow_v1b3/representations.rb +115 -0
  238. data/generated/google/apis/dataflow_v1b3/service.rb +299 -257
  239. data/generated/google/apis/datafusion_v1.rb +5 -8
  240. data/generated/google/apis/datafusion_v1/classes.rb +268 -397
  241. data/generated/google/apis/datafusion_v1/representations.rb +3 -0
  242. data/generated/google/apis/datafusion_v1/service.rb +76 -89
  243. data/generated/google/apis/datafusion_v1beta1.rb +5 -8
  244. data/generated/google/apis/datafusion_v1beta1/classes.rb +268 -397
  245. data/generated/google/apis/datafusion_v1beta1/representations.rb +3 -0
  246. data/generated/google/apis/datafusion_v1beta1/service.rb +81 -95
  247. data/generated/google/apis/dataproc_v1.rb +1 -1
  248. data/generated/google/apis/dataproc_v1/classes.rb +37 -4
  249. data/generated/google/apis/dataproc_v1/representations.rb +16 -0
  250. data/generated/google/apis/dataproc_v1beta2.rb +1 -1
  251. data/generated/google/apis/dataproc_v1beta2/classes.rb +56 -0
  252. data/generated/google/apis/dataproc_v1beta2/representations.rb +31 -0
  253. data/generated/google/apis/datastore_v1.rb +1 -1
  254. data/generated/google/apis/datastore_v1/classes.rb +330 -472
  255. data/generated/google/apis/datastore_v1/service.rb +52 -63
  256. data/generated/google/apis/datastore_v1beta1.rb +1 -1
  257. data/generated/google/apis/datastore_v1beta1/classes.rb +150 -217
  258. data/generated/google/apis/datastore_v1beta1/service.rb +11 -12
  259. data/generated/google/apis/datastore_v1beta3.rb +1 -1
  260. data/generated/google/apis/datastore_v1beta3/classes.rb +255 -371
  261. data/generated/google/apis/datastore_v1beta3/service.rb +1 -2
  262. data/generated/google/apis/dfareporting_v3_3.rb +2 -2
  263. data/generated/google/apis/dfareporting_v3_3/classes.rb +326 -339
  264. data/generated/google/apis/dfareporting_v3_3/representations.rb +42 -0
  265. data/generated/google/apis/dfareporting_v3_3/service.rb +673 -1286
  266. data/generated/google/apis/dfareporting_v3_4.rb +2 -2
  267. data/generated/google/apis/dfareporting_v3_4/classes.rb +348 -350
  268. data/generated/google/apis/dfareporting_v3_4/representations.rb +43 -0
  269. data/generated/google/apis/dfareporting_v3_4/service.rb +708 -1285
  270. data/generated/google/apis/dialogflow_v2.rb +1 -1
  271. data/generated/google/apis/dialogflow_v2/classes.rb +84 -44
  272. data/generated/google/apis/dialogflow_v2/representations.rb +52 -15
  273. data/generated/google/apis/dialogflow_v2beta1.rb +1 -1
  274. data/generated/google/apis/dialogflow_v2beta1/classes.rb +84 -44
  275. data/generated/google/apis/dialogflow_v2beta1/representations.rb +52 -15
  276. data/generated/google/apis/dialogflow_v2beta1/service.rb +37 -0
  277. data/generated/google/apis/{securitycenter_v1p1alpha1.rb → dialogflow_v3beta1.rb} +13 -10
  278. data/generated/google/apis/dialogflow_v3beta1/classes.rb +8183 -0
  279. data/generated/google/apis/dialogflow_v3beta1/representations.rb +3459 -0
  280. data/generated/google/apis/dialogflow_v3beta1/service.rb +2812 -0
  281. data/generated/google/apis/displayvideo_v1.rb +1 -1
  282. data/generated/google/apis/displayvideo_v1/classes.rb +55 -8
  283. data/generated/google/apis/displayvideo_v1/representations.rb +5 -0
  284. data/generated/google/apis/displayvideo_v1/service.rb +48 -36
  285. data/generated/google/apis/dlp_v2.rb +1 -1
  286. data/generated/google/apis/dlp_v2/classes.rb +1076 -1302
  287. data/generated/google/apis/dlp_v2/service.rb +962 -905
  288. data/generated/google/apis/dns_v1.rb +1 -1
  289. data/generated/google/apis/dns_v1/classes.rb +175 -198
  290. data/generated/google/apis/dns_v1/service.rb +82 -97
  291. data/generated/google/apis/dns_v1beta2.rb +1 -1
  292. data/generated/google/apis/dns_v1beta2/classes.rb +180 -205
  293. data/generated/google/apis/dns_v1beta2/service.rb +82 -97
  294. data/generated/google/apis/docs_v1.rb +1 -1
  295. data/generated/google/apis/docs_v1/classes.rb +894 -1229
  296. data/generated/google/apis/docs_v1/service.rb +17 -22
  297. data/generated/google/apis/documentai_v1beta2.rb +1 -1
  298. data/generated/google/apis/documentai_v1beta2/classes.rb +1186 -810
  299. data/generated/google/apis/documentai_v1beta2/representations.rb +303 -0
  300. data/generated/google/apis/documentai_v1beta2/service.rb +22 -24
  301. data/generated/google/apis/doubleclickbidmanager_v1.rb +3 -2
  302. data/generated/google/apis/doubleclickbidmanager_v1/classes.rb +6 -12
  303. data/generated/google/apis/doubleclickbidmanager_v1/service.rb +33 -64
  304. data/generated/google/apis/doubleclickbidmanager_v1_1.rb +3 -2
  305. data/generated/google/apis/doubleclickbidmanager_v1_1/classes.rb +11 -18
  306. data/generated/google/apis/doubleclickbidmanager_v1_1/service.rb +33 -64
  307. data/generated/google/apis/doubleclicksearch_v2.rb +1 -1
  308. data/generated/google/apis/doubleclicksearch_v2/service.rb +2 -2
  309. data/generated/google/apis/drive_v2.rb +1 -1
  310. data/generated/google/apis/drive_v2/classes.rb +14 -6
  311. data/generated/google/apis/drive_v2/representations.rb +1 -0
  312. data/generated/google/apis/drive_v2/service.rb +79 -15
  313. data/generated/google/apis/drive_v3.rb +1 -1
  314. data/generated/google/apis/drive_v3/classes.rb +14 -6
  315. data/generated/google/apis/drive_v3/representations.rb +1 -0
  316. data/generated/google/apis/drive_v3/service.rb +59 -11
  317. data/generated/google/apis/file_v1.rb +1 -1
  318. data/generated/google/apis/file_v1/classes.rb +154 -173
  319. data/generated/google/apis/file_v1/service.rb +43 -52
  320. data/generated/google/apis/file_v1beta1.rb +1 -1
  321. data/generated/google/apis/file_v1beta1/classes.rb +334 -193
  322. data/generated/google/apis/file_v1beta1/representations.rb +55 -0
  323. data/generated/google/apis/file_v1beta1/service.rb +267 -55
  324. data/generated/google/apis/firebase_v1beta1.rb +1 -1
  325. data/generated/google/apis/firebase_v1beta1/classes.rb +25 -47
  326. data/generated/google/apis/firebase_v1beta1/representations.rb +2 -16
  327. data/generated/google/apis/firebase_v1beta1/service.rb +8 -1
  328. data/generated/google/apis/firebasehosting_v1beta1.rb +1 -1
  329. data/generated/google/apis/firebasehosting_v1beta1/classes.rb +26 -0
  330. data/generated/google/apis/firebasehosting_v1beta1/representations.rb +15 -0
  331. data/generated/google/apis/firebaseml_v1beta2.rb +1 -1
  332. data/generated/google/apis/firebaseml_v1beta2/classes.rb +0 -8
  333. data/generated/google/apis/firebaseml_v1beta2/representations.rb +0 -1
  334. data/generated/google/apis/firebaserules_v1.rb +1 -1
  335. data/generated/google/apis/firebaserules_v1/classes.rb +102 -137
  336. data/generated/google/apis/firebaserules_v1/service.rb +87 -110
  337. data/generated/google/apis/firestore_v1.rb +1 -1
  338. data/generated/google/apis/firestore_v1/classes.rb +402 -498
  339. data/generated/google/apis/firestore_v1/service.rb +165 -201
  340. data/generated/google/apis/firestore_v1beta1.rb +1 -1
  341. data/generated/google/apis/firestore_v1beta1/classes.rb +334 -409
  342. data/generated/google/apis/firestore_v1beta1/service.rb +106 -122
  343. data/generated/google/apis/firestore_v1beta2.rb +1 -1
  344. data/generated/google/apis/firestore_v1beta2/classes.rb +135 -165
  345. data/generated/google/apis/firestore_v1beta2/service.rb +65 -86
  346. data/generated/google/apis/fitness_v1.rb +85 -0
  347. data/generated/google/apis/fitness_v1/classes.rb +982 -0
  348. data/generated/google/apis/fitness_v1/representations.rb +398 -0
  349. data/generated/google/apis/fitness_v1/service.rb +626 -0
  350. data/generated/google/apis/games_configuration_v1configuration.rb +1 -1
  351. data/generated/google/apis/games_configuration_v1configuration/classes.rb +2 -3
  352. data/generated/google/apis/games_configuration_v1configuration/service.rb +6 -6
  353. data/generated/google/apis/games_management_v1management.rb +2 -3
  354. data/generated/google/apis/games_management_v1management/classes.rb +14 -20
  355. data/generated/google/apis/games_management_v1management/service.rb +35 -36
  356. data/generated/google/apis/games_v1.rb +2 -3
  357. data/generated/google/apis/games_v1/classes.rb +76 -83
  358. data/generated/google/apis/games_v1/representations.rb +2 -0
  359. data/generated/google/apis/games_v1/service.rb +84 -90
  360. data/generated/google/apis/genomics_v1.rb +1 -1
  361. data/generated/google/apis/genomics_v1/classes.rb +70 -76
  362. data/generated/google/apis/genomics_v1/service.rb +28 -43
  363. data/generated/google/apis/genomics_v1alpha2.rb +1 -1
  364. data/generated/google/apis/genomics_v1alpha2/classes.rb +223 -290
  365. data/generated/google/apis/genomics_v1alpha2/service.rb +54 -76
  366. data/generated/google/apis/genomics_v2alpha1.rb +1 -1
  367. data/generated/google/apis/genomics_v2alpha1/classes.rb +252 -275
  368. data/generated/google/apis/genomics_v2alpha1/representations.rb +1 -0
  369. data/generated/google/apis/genomics_v2alpha1/service.rb +47 -66
  370. data/generated/google/apis/gmail_v1.rb +1 -1
  371. data/generated/google/apis/gmail_v1/classes.rb +37 -43
  372. data/generated/google/apis/gmail_v1/service.rb +4 -3
  373. data/generated/google/apis/gmailpostmastertools_v1beta1.rb +2 -2
  374. data/generated/google/apis/gmailpostmastertools_v1beta1/service.rb +1 -1
  375. data/generated/google/apis/groupsmigration_v1.rb +35 -0
  376. data/generated/google/apis/groupsmigration_v1/classes.rb +51 -0
  377. data/generated/google/apis/groupsmigration_v1/representations.rb +40 -0
  378. data/generated/google/apis/groupsmigration_v1/service.rb +100 -0
  379. data/generated/google/apis/healthcare_v1.rb +1 -1
  380. data/generated/google/apis/healthcare_v1/classes.rb +563 -826
  381. data/generated/google/apis/healthcare_v1/service.rb +675 -853
  382. data/generated/google/apis/healthcare_v1beta1.rb +1 -1
  383. data/generated/google/apis/healthcare_v1beta1/classes.rb +828 -1102
  384. data/generated/google/apis/healthcare_v1beta1/representations.rb +20 -0
  385. data/generated/google/apis/healthcare_v1beta1/service.rb +895 -1139
  386. data/generated/google/apis/homegraph_v1.rb +1 -1
  387. data/generated/google/apis/homegraph_v1/classes.rb +76 -164
  388. data/generated/google/apis/homegraph_v1/service.rb +23 -35
  389. data/generated/google/apis/iam_v1.rb +5 -2
  390. data/generated/google/apis/iam_v1/classes.rb +388 -592
  391. data/generated/google/apis/iam_v1/service.rb +429 -555
  392. data/generated/google/apis/iamcredentials_v1.rb +4 -2
  393. data/generated/google/apis/iamcredentials_v1/classes.rb +75 -85
  394. data/generated/google/apis/iamcredentials_v1/service.rb +15 -13
  395. data/generated/google/apis/iap_v1.rb +1 -1
  396. data/generated/google/apis/iap_v1/classes.rb +246 -355
  397. data/generated/google/apis/iap_v1/service.rb +61 -71
  398. data/generated/google/apis/iap_v1beta1.rb +1 -1
  399. data/generated/google/apis/iap_v1beta1/classes.rb +157 -254
  400. data/generated/google/apis/iap_v1beta1/service.rb +17 -19
  401. data/generated/google/apis/indexing_v3.rb +1 -1
  402. data/generated/google/apis/indexing_v3/classes.rb +11 -11
  403. data/generated/google/apis/kgsearch_v1.rb +1 -1
  404. data/generated/google/apis/kgsearch_v1/classes.rb +4 -4
  405. data/generated/google/apis/kgsearch_v1/service.rb +11 -11
  406. data/generated/google/apis/lifesciences_v2beta.rb +1 -1
  407. data/generated/google/apis/lifesciences_v2beta/classes.rb +262 -290
  408. data/generated/google/apis/lifesciences_v2beta/service.rb +30 -42
  409. data/generated/google/apis/localservices_v1.rb +31 -0
  410. data/generated/google/apis/localservices_v1/classes.rb +419 -0
  411. data/generated/google/apis/localservices_v1/representations.rb +172 -0
  412. data/generated/google/apis/localservices_v1/service.rb +199 -0
  413. data/generated/google/apis/logging_v2.rb +1 -1
  414. data/generated/google/apis/logging_v2/classes.rb +174 -214
  415. data/generated/google/apis/logging_v2/representations.rb +15 -0
  416. data/generated/google/apis/logging_v2/service.rb +1017 -584
  417. data/generated/google/apis/manufacturers_v1.rb +1 -1
  418. data/generated/google/apis/manufacturers_v1/classes.rb +99 -109
  419. data/generated/google/apis/manufacturers_v1/service.rb +44 -55
  420. data/generated/google/apis/memcache_v1beta2.rb +1 -1
  421. data/generated/google/apis/memcache_v1beta2/classes.rb +170 -249
  422. data/generated/google/apis/memcache_v1beta2/representations.rb +0 -19
  423. data/generated/google/apis/memcache_v1beta2/service.rb +58 -71
  424. data/generated/google/apis/ml_v1.rb +1 -1
  425. data/generated/google/apis/ml_v1/classes.rb +949 -1144
  426. data/generated/google/apis/ml_v1/representations.rb +64 -0
  427. data/generated/google/apis/ml_v1/service.rb +194 -253
  428. data/generated/google/apis/monitoring_v1.rb +1 -1
  429. data/generated/google/apis/monitoring_v1/classes.rb +103 -26
  430. data/generated/google/apis/monitoring_v1/representations.rb +35 -0
  431. data/generated/google/apis/monitoring_v1/service.rb +10 -11
  432. data/generated/google/apis/monitoring_v3.rb +1 -1
  433. data/generated/google/apis/monitoring_v3/classes.rb +220 -322
  434. data/generated/google/apis/monitoring_v3/service.rb +121 -140
  435. data/generated/google/apis/networkmanagement_v1.rb +1 -1
  436. data/generated/google/apis/networkmanagement_v1/classes.rb +273 -429
  437. data/generated/google/apis/networkmanagement_v1/service.rb +97 -120
  438. data/generated/google/apis/networkmanagement_v1beta1.rb +1 -1
  439. data/generated/google/apis/networkmanagement_v1beta1/classes.rb +388 -429
  440. data/generated/google/apis/networkmanagement_v1beta1/representations.rb +40 -0
  441. data/generated/google/apis/networkmanagement_v1beta1/service.rb +97 -120
  442. data/generated/google/apis/osconfig_v1.rb +1 -1
  443. data/generated/google/apis/osconfig_v1/classes.rb +226 -270
  444. data/generated/google/apis/osconfig_v1/service.rb +22 -27
  445. data/generated/google/apis/osconfig_v1beta.rb +1 -1
  446. data/generated/google/apis/osconfig_v1beta/classes.rb +1031 -411
  447. data/generated/google/apis/osconfig_v1beta/representations.rb +337 -0
  448. data/generated/google/apis/osconfig_v1beta/service.rb +39 -52
  449. data/generated/google/apis/oslogin_v1.rb +1 -1
  450. data/generated/google/apis/oslogin_v1/classes.rb +14 -12
  451. data/generated/google/apis/oslogin_v1/representations.rb +1 -0
  452. data/generated/google/apis/oslogin_v1/service.rb +12 -16
  453. data/generated/google/apis/oslogin_v1alpha.rb +1 -1
  454. data/generated/google/apis/oslogin_v1alpha/classes.rb +14 -12
  455. data/generated/google/apis/oslogin_v1alpha/representations.rb +1 -0
  456. data/generated/google/apis/oslogin_v1alpha/service.rb +14 -14
  457. data/generated/google/apis/oslogin_v1beta.rb +1 -1
  458. data/generated/google/apis/oslogin_v1beta/classes.rb +14 -12
  459. data/generated/google/apis/oslogin_v1beta/representations.rb +1 -0
  460. data/generated/google/apis/oslogin_v1beta/service.rb +12 -16
  461. data/generated/google/apis/pagespeedonline_v5.rb +2 -2
  462. data/generated/google/apis/pagespeedonline_v5/classes.rb +18 -24
  463. data/generated/google/apis/pagespeedonline_v5/service.rb +3 -4
  464. data/generated/google/apis/people_v1.rb +1 -1
  465. data/generated/google/apis/people_v1/classes.rb +121 -12
  466. data/generated/google/apis/people_v1/representations.rb +41 -0
  467. data/generated/google/apis/people_v1/service.rb +39 -39
  468. data/generated/google/apis/playablelocations_v3.rb +1 -1
  469. data/generated/google/apis/playablelocations_v3/classes.rb +108 -155
  470. data/generated/google/apis/playablelocations_v3/service.rb +10 -10
  471. data/generated/google/apis/prod_tt_sasportal_v1alpha1.rb +1 -1
  472. data/generated/google/apis/prod_tt_sasportal_v1alpha1/classes.rb +6 -0
  473. data/generated/google/apis/prod_tt_sasportal_v1alpha1/representations.rb +1 -0
  474. data/generated/google/apis/pubsub_v1.rb +1 -1
  475. data/generated/google/apis/pubsub_v1/classes.rb +392 -518
  476. data/generated/google/apis/pubsub_v1/representations.rb +1 -0
  477. data/generated/google/apis/pubsub_v1/service.rb +220 -246
  478. data/generated/google/apis/pubsub_v1beta1a.rb +1 -1
  479. data/generated/google/apis/pubsub_v1beta1a/classes.rb +71 -86
  480. data/generated/google/apis/pubsub_v1beta1a/service.rb +31 -38
  481. data/generated/google/apis/pubsub_v1beta2.rb +1 -1
  482. data/generated/google/apis/pubsub_v1beta2/classes.rb +244 -354
  483. data/generated/google/apis/pubsub_v1beta2/service.rb +96 -108
  484. data/generated/google/apis/{memcache_v1.rb → pubsublite_v1.rb} +8 -9
  485. data/generated/google/apis/pubsublite_v1/classes.rb +389 -0
  486. data/generated/google/apis/{accessapproval_v1beta1 → pubsublite_v1}/representations.rb +78 -53
  487. data/generated/google/apis/{memcache_v1 → pubsublite_v1}/service.rb +195 -228
  488. data/generated/google/apis/realtimebidding_v1.rb +1 -1
  489. data/generated/google/apis/recommendationengine_v1beta1.rb +1 -1
  490. data/generated/google/apis/recommendationengine_v1beta1/classes.rb +335 -456
  491. data/generated/google/apis/recommendationengine_v1beta1/representations.rb +0 -16
  492. data/generated/google/apis/recommendationengine_v1beta1/service.rb +140 -206
  493. data/generated/google/apis/redis_v1.rb +1 -1
  494. data/generated/google/apis/redis_v1/classes.rb +172 -208
  495. data/generated/google/apis/redis_v1/service.rb +93 -110
  496. data/generated/google/apis/redis_v1beta1.rb +1 -1
  497. data/generated/google/apis/redis_v1beta1/classes.rb +176 -212
  498. data/generated/google/apis/redis_v1beta1/service.rb +93 -110
  499. data/generated/google/apis/remotebuildexecution_v1.rb +1 -1
  500. data/generated/google/apis/remotebuildexecution_v1/classes.rb +951 -1078
  501. data/generated/google/apis/remotebuildexecution_v1/representations.rb +61 -0
  502. data/generated/google/apis/remotebuildexecution_v1/service.rb +26 -33
  503. data/generated/google/apis/remotebuildexecution_v1alpha.rb +1 -1
  504. data/generated/google/apis/remotebuildexecution_v1alpha/classes.rb +946 -1071
  505. data/generated/google/apis/remotebuildexecution_v1alpha/representations.rb +61 -0
  506. data/generated/google/apis/remotebuildexecution_v1alpha/service.rb +103 -65
  507. data/generated/google/apis/remotebuildexecution_v2.rb +1 -1
  508. data/generated/google/apis/remotebuildexecution_v2/classes.rb +1099 -1250
  509. data/generated/google/apis/remotebuildexecution_v2/representations.rb +61 -0
  510. data/generated/google/apis/remotebuildexecution_v2/service.rb +147 -206
  511. data/generated/google/apis/run_v1.rb +1 -1
  512. data/generated/google/apis/run_v1/classes.rb +4 -3
  513. data/generated/google/apis/run_v1/representations.rb +1 -1
  514. data/generated/google/apis/run_v1alpha1.rb +1 -1
  515. data/generated/google/apis/run_v1alpha1/classes.rb +1 -1
  516. data/generated/google/apis/run_v1alpha1/representations.rb +1 -1
  517. data/generated/google/apis/run_v1beta1.rb +1 -1
  518. data/generated/google/apis/run_v1beta1/classes.rb +3 -2
  519. data/generated/google/apis/runtimeconfig_v1beta1.rb +1 -1
  520. data/generated/google/apis/runtimeconfig_v1beta1/classes.rb +295 -412
  521. data/generated/google/apis/runtimeconfig_v1beta1/service.rb +135 -159
  522. data/generated/google/apis/safebrowsing_v4.rb +1 -1
  523. data/generated/google/apis/safebrowsing_v4/classes.rb +55 -64
  524. data/generated/google/apis/safebrowsing_v4/service.rb +4 -4
  525. data/generated/google/apis/sasportal_v1alpha1.rb +1 -1
  526. data/generated/google/apis/sasportal_v1alpha1/classes.rb +6 -0
  527. data/generated/google/apis/sasportal_v1alpha1/representations.rb +1 -0
  528. data/generated/google/apis/script_v1.rb +1 -1
  529. data/generated/google/apis/script_v1/classes.rb +88 -111
  530. data/generated/google/apis/script_v1/service.rb +63 -69
  531. data/generated/google/apis/secretmanager_v1.rb +1 -1
  532. data/generated/google/apis/secretmanager_v1/classes.rb +211 -363
  533. data/generated/google/apis/secretmanager_v1/service.rb +66 -82
  534. data/generated/google/apis/secretmanager_v1beta1.rb +1 -1
  535. data/generated/google/apis/secretmanager_v1beta1/classes.rb +211 -363
  536. data/generated/google/apis/secretmanager_v1beta1/service.rb +66 -82
  537. data/generated/google/apis/securitycenter_v1.rb +1 -1
  538. data/generated/google/apis/securitycenter_v1/classes.rb +16 -6
  539. data/generated/google/apis/securitycenter_v1/representations.rb +1 -0
  540. data/generated/google/apis/securitycenter_v1beta1.rb +1 -1
  541. data/generated/google/apis/securitycenter_v1beta1/classes.rb +21 -9
  542. data/generated/google/apis/securitycenter_v1beta1/representations.rb +1 -0
  543. data/generated/google/apis/{securitycenter_v1p1beta1.rb → securitycenter_v1beta2.rb} +6 -6
  544. data/generated/google/apis/{securitycenter_v1p1alpha1 → securitycenter_v1beta2}/classes.rb +281 -103
  545. data/generated/google/apis/{securitycenter_v1p1alpha1 → securitycenter_v1beta2}/representations.rb +101 -30
  546. data/generated/google/apis/securitycenter_v1beta2/service.rb +1494 -0
  547. data/generated/google/apis/serviceconsumermanagement_v1.rb +1 -1
  548. data/generated/google/apis/serviceconsumermanagement_v1/classes.rb +18 -48
  549. data/generated/google/apis/serviceconsumermanagement_v1beta1.rb +1 -1
  550. data/generated/google/apis/serviceconsumermanagement_v1beta1/classes.rb +19 -49
  551. data/generated/google/apis/servicecontrol_v1.rb +1 -1
  552. data/generated/google/apis/servicecontrol_v1/classes.rb +523 -641
  553. data/generated/google/apis/servicecontrol_v1/service.rb +36 -46
  554. data/generated/google/apis/servicecontrol_v2.rb +1 -1
  555. data/generated/google/apis/servicecontrol_v2/classes.rb +279 -325
  556. data/generated/google/apis/servicecontrol_v2/service.rb +33 -43
  557. data/generated/google/apis/servicedirectory_v1beta1.rb +1 -1
  558. data/generated/google/apis/servicedirectory_v1beta1/classes.rb +214 -333
  559. data/generated/google/apis/servicedirectory_v1beta1/service.rb +94 -129
  560. data/generated/google/apis/servicemanagement_v1.rb +1 -1
  561. data/generated/google/apis/servicemanagement_v1/classes.rb +1266 -2116
  562. data/generated/google/apis/servicemanagement_v1/service.rb +144 -195
  563. data/generated/google/apis/servicenetworking_v1.rb +1 -1
  564. data/generated/google/apis/servicenetworking_v1/classes.rb +93 -48
  565. data/generated/google/apis/servicenetworking_v1/representations.rb +52 -0
  566. data/generated/google/apis/servicenetworking_v1/service.rb +116 -0
  567. data/generated/google/apis/servicenetworking_v1beta.rb +1 -1
  568. data/generated/google/apis/servicenetworking_v1beta/classes.rb +74 -48
  569. data/generated/google/apis/servicenetworking_v1beta/representations.rb +38 -0
  570. data/generated/google/apis/serviceusage_v1.rb +1 -1
  571. data/generated/google/apis/serviceusage_v1/classes.rb +52 -48
  572. data/generated/google/apis/serviceusage_v1/representations.rb +4 -0
  573. data/generated/google/apis/serviceusage_v1/service.rb +5 -1
  574. data/generated/google/apis/serviceusage_v1beta1.rb +1 -1
  575. data/generated/google/apis/serviceusage_v1beta1/classes.rb +87 -49
  576. data/generated/google/apis/serviceusage_v1beta1/representations.rb +8 -0
  577. data/generated/google/apis/sheets_v4.rb +1 -1
  578. data/generated/google/apis/sheets_v4/classes.rb +3932 -5007
  579. data/generated/google/apis/sheets_v4/representations.rb +625 -0
  580. data/generated/google/apis/sheets_v4/service.rb +113 -149
  581. data/generated/google/apis/site_verification_v1.rb +1 -1
  582. data/generated/google/apis/slides_v1.rb +1 -1
  583. data/generated/google/apis/slides_v1/classes.rb +841 -1114
  584. data/generated/google/apis/slides_v1/service.rb +23 -30
  585. data/generated/google/apis/sourcerepo_v1.rb +1 -1
  586. data/generated/google/apis/sourcerepo_v1/classes.rb +6 -6
  587. data/generated/google/apis/spanner_v1.rb +1 -1
  588. data/generated/google/apis/spanner_v1/classes.rb +1546 -2157
  589. data/generated/google/apis/spanner_v1/service.rb +443 -618
  590. data/generated/google/apis/speech_v1.rb +1 -1
  591. data/generated/google/apis/speech_v1/classes.rb +174 -220
  592. data/generated/google/apis/speech_v1/service.rb +27 -32
  593. data/generated/google/apis/speech_v1p1beta1.rb +1 -1
  594. data/generated/google/apis/speech_v1p1beta1/classes.rb +253 -306
  595. data/generated/google/apis/speech_v1p1beta1/service.rb +27 -32
  596. data/generated/google/apis/speech_v2beta1.rb +1 -1
  597. data/generated/google/apis/speech_v2beta1/classes.rb +66 -76
  598. data/generated/google/apis/speech_v2beta1/service.rb +10 -12
  599. data/generated/google/apis/sql_v1beta4.rb +1 -1
  600. data/generated/google/apis/sql_v1beta4/classes.rb +311 -370
  601. data/generated/google/apis/sql_v1beta4/representations.rb +2 -0
  602. data/generated/google/apis/sql_v1beta4/service.rb +51 -56
  603. data/generated/google/apis/storage_v1.rb +1 -1
  604. data/generated/google/apis/storage_v1/classes.rb +8 -7
  605. data/generated/google/apis/storage_v1/representations.rb +2 -2
  606. data/generated/google/apis/storagetransfer_v1.rb +1 -1
  607. data/generated/google/apis/storagetransfer_v1/classes.rb +261 -339
  608. data/generated/google/apis/storagetransfer_v1/service.rb +43 -64
  609. data/generated/google/apis/streetviewpublish_v1.rb +1 -1
  610. data/generated/google/apis/streetviewpublish_v1/classes.rb +106 -148
  611. data/generated/google/apis/streetviewpublish_v1/service.rb +94 -177
  612. data/generated/google/apis/tagmanager_v1.rb +1 -1
  613. data/generated/google/apis/tagmanager_v1/service.rb +2 -2
  614. data/generated/google/apis/tagmanager_v2.rb +1 -1
  615. data/generated/google/apis/tagmanager_v2/service.rb +2 -2
  616. data/generated/google/apis/tasks_v1.rb +1 -1
  617. data/generated/google/apis/tasks_v1/classes.rb +20 -21
  618. data/generated/google/apis/tasks_v1/service.rb +16 -17
  619. data/generated/google/apis/testing_v1.rb +1 -1
  620. data/generated/google/apis/testing_v1/classes.rb +317 -382
  621. data/generated/google/apis/testing_v1/representations.rb +2 -0
  622. data/generated/google/apis/testing_v1/service.rb +22 -28
  623. data/generated/google/apis/texttospeech_v1.rb +1 -1
  624. data/generated/google/apis/texttospeech_v1/classes.rb +51 -57
  625. data/generated/google/apis/texttospeech_v1/service.rb +9 -10
  626. data/generated/google/apis/texttospeech_v1beta1.rb +1 -1
  627. data/generated/google/apis/texttospeech_v1beta1/classes.rb +96 -57
  628. data/generated/google/apis/texttospeech_v1beta1/representations.rb +19 -0
  629. data/generated/google/apis/texttospeech_v1beta1/service.rb +9 -10
  630. data/generated/google/apis/toolresults_v1beta3.rb +1 -1
  631. data/generated/google/apis/toolresults_v1beta3/classes.rb +7 -0
  632. data/generated/google/apis/toolresults_v1beta3/representations.rb +1 -0
  633. data/generated/google/apis/tpu_v1.rb +1 -1
  634. data/generated/google/apis/tpu_v1/classes.rb +11 -0
  635. data/generated/google/apis/tpu_v1/representations.rb +1 -0
  636. data/generated/google/apis/tpu_v1alpha1.rb +1 -1
  637. data/generated/google/apis/tpu_v1alpha1/classes.rb +11 -0
  638. data/generated/google/apis/tpu_v1alpha1/representations.rb +1 -0
  639. data/generated/google/apis/{accessapproval_v1beta1.rb → trafficdirector_v2.rb} +9 -9
  640. data/generated/google/apis/trafficdirector_v2/classes.rb +1347 -0
  641. data/generated/google/apis/trafficdirector_v2/representations.rb +620 -0
  642. data/generated/google/apis/trafficdirector_v2/service.rb +89 -0
  643. data/generated/google/apis/translate_v3.rb +1 -1
  644. data/generated/google/apis/translate_v3/classes.rb +148 -175
  645. data/generated/google/apis/translate_v3/service.rb +122 -151
  646. data/generated/google/apis/translate_v3beta1.rb +1 -1
  647. data/generated/google/apis/translate_v3beta1/classes.rb +149 -170
  648. data/generated/google/apis/translate_v3beta1/service.rb +122 -151
  649. data/generated/google/apis/vectortile_v1.rb +1 -1
  650. data/generated/google/apis/vectortile_v1/classes.rb +185 -267
  651. data/generated/google/apis/vectortile_v1/service.rb +75 -88
  652. data/generated/google/apis/videointelligence_v1.rb +1 -1
  653. data/generated/google/apis/videointelligence_v1/classes.rb +753 -918
  654. data/generated/google/apis/videointelligence_v1/service.rb +40 -48
  655. data/generated/google/apis/videointelligence_v1beta2.rb +1 -1
  656. data/generated/google/apis/videointelligence_v1beta2/classes.rb +748 -911
  657. data/generated/google/apis/videointelligence_v1beta2/service.rb +4 -4
  658. data/generated/google/apis/videointelligence_v1p1beta1.rb +1 -1
  659. data/generated/google/apis/videointelligence_v1p1beta1/classes.rb +748 -911
  660. data/generated/google/apis/videointelligence_v1p1beta1/service.rb +4 -4
  661. data/generated/google/apis/videointelligence_v1p2beta1.rb +1 -1
  662. data/generated/google/apis/videointelligence_v1p2beta1/classes.rb +748 -911
  663. data/generated/google/apis/videointelligence_v1p2beta1/service.rb +4 -4
  664. data/generated/google/apis/videointelligence_v1p3beta1.rb +1 -1
  665. data/generated/google/apis/videointelligence_v1p3beta1/classes.rb +754 -920
  666. data/generated/google/apis/videointelligence_v1p3beta1/service.rb +4 -4
  667. data/generated/google/apis/webfonts_v1.rb +2 -3
  668. data/generated/google/apis/webfonts_v1/classes.rb +1 -2
  669. data/generated/google/apis/webfonts_v1/service.rb +2 -4
  670. data/generated/google/apis/youtube_partner_v1.rb +1 -1
  671. data/generated/google/apis/youtube_v3.rb +1 -1
  672. data/generated/google/apis/youtube_v3/classes.rb +347 -0
  673. data/generated/google/apis/youtube_v3/representations.rb +176 -0
  674. data/generated/google/apis/youtube_v3/service.rb +78 -0
  675. data/lib/google/apis/version.rb +1 -1
  676. metadata +31 -31
  677. data/generated/google/apis/accessapproval_v1beta1/classes.rb +0 -417
  678. data/generated/google/apis/accessapproval_v1beta1/service.rb +0 -857
  679. data/generated/google/apis/dns_v2beta1.rb +0 -43
  680. data/generated/google/apis/dns_v2beta1/classes.rb +0 -1447
  681. data/generated/google/apis/dns_v2beta1/representations.rb +0 -588
  682. data/generated/google/apis/dns_v2beta1/service.rb +0 -928
  683. data/generated/google/apis/memcache_v1/classes.rb +0 -1157
  684. data/generated/google/apis/memcache_v1/representations.rb +0 -471
  685. data/generated/google/apis/oauth2_v2.rb +0 -40
  686. data/generated/google/apis/oauth2_v2/classes.rb +0 -165
  687. data/generated/google/apis/oauth2_v2/representations.rb +0 -68
  688. data/generated/google/apis/oauth2_v2/service.rb +0 -158
  689. data/generated/google/apis/securitycenter_v1p1alpha1/service.rb +0 -207
  690. data/generated/google/apis/securitycenter_v1p1beta1/classes.rb +0 -2059
  691. data/generated/google/apis/securitycenter_v1p1beta1/representations.rb +0 -789
  692. data/generated/google/apis/securitycenter_v1p1beta1/service.rb +0 -1243
  693. data/generated/google/apis/storage_v1beta2.rb +0 -40
  694. data/generated/google/apis/storage_v1beta2/classes.rb +0 -1047
  695. data/generated/google/apis/storage_v1beta2/representations.rb +0 -425
  696. data/generated/google/apis/storage_v1beta2/service.rb +0 -1667
@@ -49,15 +49,13 @@ module Google
49
49
  @batch_path = 'batch'
50
50
  end
51
51
 
52
- # Starts asynchronous cancellation on a long-running operation. The server
53
- # makes a best effort to cancel the operation, but success is not
54
- # guaranteed. If the server doesn't support this method, it returns
55
- # `google.rpc.Code.UNIMPLEMENTED`. Clients can use
56
- # Operations.GetOperation or
57
- # other methods to check whether the cancellation succeeded or whether the
58
- # operation completed despite cancellation. On successful cancellation,
59
- # the operation is not deleted; instead, it becomes an operation with
60
- # an Operation.error value with a google.rpc.Status.code of 1,
52
+ # Starts asynchronous cancellation on a long-running operation. The server makes
53
+ # a best effort to cancel the operation, but success is not guaranteed. If the
54
+ # server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.
55
+ # Clients can use Operations.GetOperation or other methods to check whether the
56
+ # cancellation succeeded or whether the operation completed despite cancellation.
57
+ # On successful cancellation, the operation is not deleted; instead, it becomes
58
+ # an operation with an Operation.error value with a google.rpc.Status.code of 1,
61
59
  # corresponding to `Code.CANCELLED`.
62
60
  # @param [String] name
63
61
  # The name of the operation resource to be cancelled.
@@ -88,10 +86,10 @@ module Google
88
86
  execute_or_queue_command(command, &block)
89
87
  end
90
88
 
91
- # Deletes a long-running operation. This method indicates that the client is
92
- # no longer interested in the operation result. It does not cancel the
93
- # operation. If the server doesn't support this method, it returns
94
- # `google.rpc.Code.UNIMPLEMENTED`.
89
+ # Deletes a long-running operation. This method indicates that the client is no
90
+ # longer interested in the operation result. It does not cancel the operation.
91
+ # If the server doesn't support this method, it returns `google.rpc.Code.
92
+ # UNIMPLEMENTED`.
95
93
  # @param [String] name
96
94
  # The name of the operation resource to be deleted.
97
95
  # @param [String] fields
@@ -121,9 +119,8 @@ module Google
121
119
  execute_or_queue_command(command, &block)
122
120
  end
123
121
 
124
- # Gets the latest state of a long-running operation. Clients can use this
125
- # method to poll the operation result at intervals as recommended by the API
126
- # service.
122
+ # Gets the latest state of a long-running operation. Clients can use this method
123
+ # to poll the operation result at intervals as recommended by the API service.
127
124
  # @param [String] name
128
125
  # The name of the operation resource.
129
126
  # @param [String] fields
@@ -153,9 +150,8 @@ module Google
153
150
  execute_or_queue_command(command, &block)
154
151
  end
155
152
 
156
- # Gets the latest state of a long-running operation. Clients can use this
157
- # method to poll the operation result at intervals as recommended by the API
158
- # service.
153
+ # Gets the latest state of a long-running operation. Clients can use this method
154
+ # to poll the operation result at intervals as recommended by the API service.
159
155
  # @param [String] name
160
156
  # The name of the operation resource.
161
157
  # @param [String] fields
@@ -185,15 +181,13 @@ module Google
185
181
  execute_or_queue_command(command, &block)
186
182
  end
187
183
 
188
- # Starts asynchronous cancellation on a long-running operation. The server
189
- # makes a best effort to cancel the operation, but success is not
190
- # guaranteed. If the server doesn't support this method, it returns
191
- # `google.rpc.Code.UNIMPLEMENTED`. Clients can use
192
- # Operations.GetOperation or
193
- # other methods to check whether the cancellation succeeded or whether the
194
- # operation completed despite cancellation. On successful cancellation,
195
- # the operation is not deleted; instead, it becomes an operation with
196
- # an Operation.error value with a google.rpc.Status.code of 1,
184
+ # Starts asynchronous cancellation on a long-running operation. The server makes
185
+ # a best effort to cancel the operation, but success is not guaranteed. If the
186
+ # server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.
187
+ # Clients can use Operations.GetOperation or other methods to check whether the
188
+ # cancellation succeeded or whether the operation completed despite cancellation.
189
+ # On successful cancellation, the operation is not deleted; instead, it becomes
190
+ # an operation with an Operation.error value with a google.rpc.Status.code of 1,
197
191
  # corresponding to `Code.CANCELLED`.
198
192
  # @param [String] name
199
193
  # The name of the operation resource to be cancelled.
@@ -227,10 +221,10 @@ module Google
227
221
  execute_or_queue_command(command, &block)
228
222
  end
229
223
 
230
- # Deletes a long-running operation. This method indicates that the client is
231
- # no longer interested in the operation result. It does not cancel the
232
- # operation. If the server doesn't support this method, it returns
233
- # `google.rpc.Code.UNIMPLEMENTED`.
224
+ # Deletes a long-running operation. This method indicates that the client is no
225
+ # longer interested in the operation result. It does not cancel the operation.
226
+ # If the server doesn't support this method, it returns `google.rpc.Code.
227
+ # UNIMPLEMENTED`.
234
228
  # @param [String] name
235
229
  # The name of the operation resource to be deleted.
236
230
  # @param [String] fields
@@ -260,9 +254,8 @@ module Google
260
254
  execute_or_queue_command(command, &block)
261
255
  end
262
256
 
263
- # Gets the latest state of a long-running operation. Clients can use this
264
- # method to poll the operation result at intervals as recommended by the API
265
- # service.
257
+ # Gets the latest state of a long-running operation. Clients can use this method
258
+ # to poll the operation result at intervals as recommended by the API service.
266
259
  # @param [String] name
267
260
  # The name of the operation resource.
268
261
  # @param [String] fields
@@ -292,15 +285,14 @@ module Google
292
285
  execute_or_queue_command(command, &block)
293
286
  end
294
287
 
295
- # Lists operations that match the specified filter in the request. If the
296
- # server doesn't support this method, it returns `UNIMPLEMENTED`.
297
- # NOTE: the `name` binding allows API services to override the binding
298
- # to use different resource name schemes, such as `users/*/operations`. To
299
- # override the binding, API services can add a binding such as
300
- # `"/v1/`name=users/*`/operations"` to their service configuration.
301
- # For backwards compatibility, the default name includes the operations
302
- # collection id, however overriding users must ensure the name binding
303
- # is the parent resource, without the operations collection id.
288
+ # Lists operations that match the specified filter in the request. If the server
289
+ # doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name`
290
+ # binding allows API services to override the binding to use different resource
291
+ # name schemes, such as `users/*/operations`. To override the binding, API
292
+ # services can add a binding such as `"/v1/`name=users/*`/operations"` to their
293
+ # service configuration. For backwards compatibility, the default name includes
294
+ # the operations collection id, however overriding users must ensure the name
295
+ # binding is the parent resource, without the operations collection id.
304
296
  # @param [String] name
305
297
  # The name of the operation's parent resource.
306
298
  # @param [String] filter
@@ -339,10 +331,10 @@ module Google
339
331
  execute_or_queue_command(command, &block)
340
332
  end
341
333
 
342
- # Performs asynchronous video annotation. Progress and results can be
343
- # retrieved through the `google.longrunning.Operations` interface.
344
- # `Operation.metadata` contains `AnnotateVideoProgress` (progress).
345
- # `Operation.response` contains `AnnotateVideoResponse` (results).
334
+ # Performs asynchronous video annotation. Progress and results can be retrieved
335
+ # through the `google.longrunning.Operations` interface. `Operation.metadata`
336
+ # contains `AnnotateVideoProgress` (progress). `Operation.response` contains `
337
+ # AnnotateVideoResponse` (results).
346
338
  # @param [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1AnnotateVideoRequest] google_cloud_videointelligence_v1_annotate_video_request_object
347
339
  # @param [String] fields
348
340
  # Selector specifying which fields to include in a partial response.
@@ -27,7 +27,7 @@ module Google
27
27
  # @see https://cloud.google.com/video-intelligence/docs/
28
28
  module VideointelligenceV1beta2
29
29
  VERSION = 'V1beta2'
30
- REVISION = '20200602'
30
+ REVISION = '20200810'
31
31
 
32
32
  # View and manage your data across Google Cloud Platform services
33
33
  AUTH_CLOUD_PLATFORM = 'https://www.googleapis.com/auth/cloud-platform'
@@ -22,9 +22,9 @@ module Google
22
22
  module Apis
23
23
  module VideointelligenceV1beta2
24
24
 
25
- # Video annotation progress. Included in the `metadata`
26
- # field of the `Operation` returned by the `GetOperation`
27
- # call of the `google::longrunning::Operations` service.
25
+ # Video annotation progress. Included in the `metadata` field of the `Operation`
26
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
27
+ # service.
28
28
  class GoogleCloudVideointelligenceV1AnnotateVideoProgress
29
29
  include Google::Apis::Core::Hashable
30
30
 
@@ -43,9 +43,9 @@ module Google
43
43
  end
44
44
  end
45
45
 
46
- # Video annotation response. Included in the `response`
47
- # field of the `Operation` returned by the `GetOperation`
48
- # call of the `google::longrunning::Operations` service.
46
+ # Video annotation response. Included in the `response` field of the `Operation`
47
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
48
+ # service.
49
49
  class GoogleCloudVideointelligenceV1AnnotateVideoResponse
50
50
  include Google::Apis::Core::Hashable
51
51
 
@@ -73,14 +73,14 @@ module Google
73
73
  # @return [Float]
74
74
  attr_accessor :confidence
75
75
 
76
- # The name of the attribute, for example, glasses, dark_glasses, mouth_open.
77
- # A full list of supported type names will be provided in the document.
76
+ # The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
77
+ # full list of supported type names will be provided in the document.
78
78
  # Corresponds to the JSON property `name`
79
79
  # @return [String]
80
80
  attr_accessor :name
81
81
 
82
- # Text value of the detection result. For example, the value for "HairColor"
83
- # can be "black", "blonde", etc.
82
+ # Text value of the detection result. For example, the value for "HairColor" can
83
+ # be "black", "blonde", etc.
84
84
  # Corresponds to the JSON property `value`
85
85
  # @return [String]
86
86
  attr_accessor :value
@@ -112,9 +112,8 @@ module Google
112
112
  # @return [String]
113
113
  attr_accessor :name
114
114
 
115
- # A vertex represents a 2D point in the image.
116
- # NOTE: the normalized vertex coordinates are relative to the original image
117
- # and range from 0 to 1.
115
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
116
+ # coordinates are relative to the original image and range from 0 to 1.
118
117
  # Corresponds to the JSON property `point`
119
118
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1NormalizedVertex]
120
119
  attr_accessor :point
@@ -140,8 +139,7 @@ module Google
140
139
  # @return [String]
141
140
  attr_accessor :description
142
141
 
143
- # Opaque entity ID. Some IDs may be available in
144
- # [Google Knowledge Graph Search
142
+ # Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
145
143
  # API](https://developers.google.com/knowledge-graph/).
146
144
  # Corresponds to the JSON property `entityId`
147
145
  # @return [String]
@@ -164,9 +162,9 @@ module Google
164
162
  end
165
163
  end
166
164
 
167
- # Explicit content annotation (based on per-frame visual signals only).
168
- # If no explicit content has been detected in a frame, no annotations are
169
- # present for that frame.
165
+ # Explicit content annotation (based on per-frame visual signals only). If no
166
+ # explicit content has been detected in a frame, no annotations are present for
167
+ # that frame.
170
168
  class GoogleCloudVideointelligenceV1ExplicitContentAnnotation
171
169
  include Google::Apis::Core::Hashable
172
170
 
@@ -221,10 +219,9 @@ module Google
221
219
  class GoogleCloudVideointelligenceV1LabelAnnotation
222
220
  include Google::Apis::Core::Hashable
223
221
 
224
- # Common categories for the detected entity.
225
- # For example, when the label is `Terrier`, the category is likely `dog`. And
226
- # in some cases there might be more than one categories e.g., `Terrier` could
227
- # also be a `pet`.
222
+ # Common categories for the detected entity. For example, when the label is `
223
+ # Terrier`, the category is likely `dog`. And in some cases there might be more
224
+ # than one categories e.g., `Terrier` could also be a `pet`.
228
225
  # Corresponds to the JSON property `categoryEntities`
229
226
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1Entity>]
230
227
  attr_accessor :category_entities
@@ -323,14 +320,14 @@ module Google
323
320
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1Entity]
324
321
  attr_accessor :entity
325
322
 
326
- # All video segments where the recognized logo appears. There might be
327
- # multiple instances of the same logo class appearing in one VideoSegment.
323
+ # All video segments where the recognized logo appears. There might be multiple
324
+ # instances of the same logo class appearing in one VideoSegment.
328
325
  # Corresponds to the JSON property `segments`
329
326
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1VideoSegment>]
330
327
  attr_accessor :segments
331
328
 
332
- # All logo tracks where the recognized logo appears. Each track corresponds
333
- # to one logo instance appearing in consecutive frames.
329
+ # All logo tracks where the recognized logo appears. Each track corresponds to
330
+ # one logo instance appearing in consecutive frames.
334
331
  # Corresponds to the JSON property `tracks`
335
332
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1Track>]
336
333
  attr_accessor :tracks
@@ -347,9 +344,8 @@ module Google
347
344
  end
348
345
  end
349
346
 
350
- # Normalized bounding box.
351
- # The normalized vertex coordinates are relative to the original image.
352
- # Range: [0, 1].
347
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
348
+ # original image. Range: [0, 1].
353
349
  class GoogleCloudVideointelligenceV1NormalizedBoundingBox
354
350
  include Google::Apis::Core::Hashable
355
351
 
@@ -387,20 +383,12 @@ module Google
387
383
  end
388
384
 
389
385
  # Normalized bounding polygon for text (that might not be aligned with axis).
390
- # Contains list of the corner points in clockwise order starting from
391
- # top-left corner. For example, for a rectangular bounding box:
392
- # When the text is horizontal it might look like:
393
- # 0----1
394
- # | |
395
- # 3----2
396
- # When it's clockwise rotated 180 degrees around the top-left corner it
397
- # becomes:
398
- # 2----3
399
- # | |
400
- # 1----0
401
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
402
- # than 0, or greater than 1 due to trignometric calculations for location of
403
- # the box.
386
+ # Contains list of the corner points in clockwise order starting from top-left
387
+ # corner. For example, for a rectangular bounding box: When the text is
388
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
389
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
390
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
391
+ # or greater than 1 due to trignometric calculations for location of the box.
404
392
  class GoogleCloudVideointelligenceV1NormalizedBoundingPoly
405
393
  include Google::Apis::Core::Hashable
406
394
 
@@ -419,9 +407,8 @@ module Google
419
407
  end
420
408
  end
421
409
 
422
- # A vertex represents a 2D point in the image.
423
- # NOTE: the normalized vertex coordinates are relative to the original image
424
- # and range from 0 to 1.
410
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
411
+ # coordinates are relative to the original image and range from 0 to 1.
425
412
  class GoogleCloudVideointelligenceV1NormalizedVertex
426
413
  include Google::Apis::Core::Hashable
427
414
 
@@ -460,10 +447,10 @@ module Google
460
447
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1Entity]
461
448
  attr_accessor :entity
462
449
 
463
- # Information corresponding to all frames where this object track appears.
464
- # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
465
- # messages in frames.
466
- # Streaming mode: it can only be one ObjectTrackingFrame message in frames.
450
+ # Information corresponding to all frames where this object track appears. Non-
451
+ # streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
452
+ # in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
453
+ # frames.
467
454
  # Corresponds to the JSON property `frames`
468
455
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1ObjectTrackingFrame>]
469
456
  attr_accessor :frames
@@ -473,12 +460,11 @@ module Google
473
460
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1VideoSegment]
474
461
  attr_accessor :segment
475
462
 
476
- # Streaming mode ONLY.
477
- # In streaming mode, we do not know the end time of a tracked object
478
- # before it is completed. Hence, there is no VideoSegment info returned.
479
- # Instead, we provide a unique identifiable integer track_id so that
480
- # the customers can correlate the results of the ongoing
481
- # ObjectTrackAnnotation of the same track_id over time.
463
+ # Streaming mode ONLY. In streaming mode, we do not know the end time of a
464
+ # tracked object before it is completed. Hence, there is no VideoSegment info
465
+ # returned. Instead, we provide a unique identifiable integer track_id so that
466
+ # the customers can correlate the results of the ongoing ObjectTrackAnnotation
467
+ # of the same track_id over time.
482
468
  # Corresponds to the JSON property `trackId`
483
469
  # @return [Fixnum]
484
470
  attr_accessor :track_id
@@ -508,9 +494,8 @@ module Google
508
494
  class GoogleCloudVideointelligenceV1ObjectTrackingFrame
509
495
  include Google::Apis::Core::Hashable
510
496
 
511
- # Normalized bounding box.
512
- # The normalized vertex coordinates are relative to the original image.
513
- # Range: [0, 1].
497
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
498
+ # original image. Range: [0, 1].
514
499
  # Corresponds to the JSON property `normalizedBoundingBox`
515
500
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1NormalizedBoundingBox]
516
501
  attr_accessor :normalized_bounding_box
@@ -537,10 +522,10 @@ module Google
537
522
 
538
523
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
539
524
  # indicates an estimated greater likelihood that the recognized words are
540
- # correct. This field is set only for the top alternative.
541
- # This field is not guaranteed to be accurate and users should not rely on it
542
- # to be always provided.
543
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
525
+ # correct. This field is set only for the top alternative. This field is not
526
+ # guaranteed to be accurate and users should not rely on it to be always
527
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
528
+ # not set.
544
529
  # Corresponds to the JSON property `confidence`
545
530
  # @return [Float]
546
531
  attr_accessor :confidence
@@ -551,8 +536,8 @@ module Google
551
536
  attr_accessor :transcript
552
537
 
553
538
  # Output only. A list of word-specific information for each recognized word.
554
- # Note: When `enable_speaker_diarization` is set to true, you will see all
555
- # the words from the beginning of the audio.
539
+ # Note: When `enable_speaker_diarization` is set to true, you will see all the
540
+ # words from the beginning of the audio.
556
541
  # Corresponds to the JSON property `words`
557
542
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1WordInfo>]
558
543
  attr_accessor :words
@@ -573,18 +558,17 @@ module Google
573
558
  class GoogleCloudVideointelligenceV1SpeechTranscription
574
559
  include Google::Apis::Core::Hashable
575
560
 
576
- # May contain one or more recognition hypotheses (up to the maximum specified
577
- # in `max_alternatives`). These alternatives are ordered in terms of
578
- # accuracy, with the top (first) alternative being the most probable, as
579
- # ranked by the recognizer.
561
+ # May contain one or more recognition hypotheses (up to the maximum specified in
562
+ # `max_alternatives`). These alternatives are ordered in terms of accuracy, with
563
+ # the top (first) alternative being the most probable, as ranked by the
564
+ # recognizer.
580
565
  # Corresponds to the JSON property `alternatives`
581
566
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1SpeechRecognitionAlternative>]
582
567
  attr_accessor :alternatives
583
568
 
584
569
  # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
585
- # language tag of
586
- # the language in this result. This language code was detected to have the
587
- # most likelihood of being spoken in the audio.
570
+ # language tag of the language in this result. This language code was detected
571
+ # to have the most likelihood of being spoken in the audio.
588
572
  # Corresponds to the JSON property `languageCode`
589
573
  # @return [String]
590
574
  attr_accessor :language_code
@@ -633,27 +617,19 @@ module Google
633
617
  end
634
618
  end
635
619
 
636
- # Video frame level annotation results for text annotation (OCR).
637
- # Contains information regarding timestamp and bounding box locations for the
638
- # frames containing detected OCR text snippets.
620
+ # Video frame level annotation results for text annotation (OCR). Contains
621
+ # information regarding timestamp and bounding box locations for the frames
622
+ # containing detected OCR text snippets.
639
623
  class GoogleCloudVideointelligenceV1TextFrame
640
624
  include Google::Apis::Core::Hashable
641
625
 
642
626
  # Normalized bounding polygon for text (that might not be aligned with axis).
643
- # Contains list of the corner points in clockwise order starting from
644
- # top-left corner. For example, for a rectangular bounding box:
645
- # When the text is horizontal it might look like:
646
- # 0----1
647
- # | |
648
- # 3----2
649
- # When it's clockwise rotated 180 degrees around the top-left corner it
650
- # becomes:
651
- # 2----3
652
- # | |
653
- # 1----0
654
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
655
- # than 0, or greater than 1 due to trignometric calculations for location of
656
- # the box.
627
+ # Contains list of the corner points in clockwise order starting from top-left
628
+ # corner. For example, for a rectangular bounding box: When the text is
629
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
630
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
631
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
632
+ # or greater than 1 due to trignometric calculations for location of the box.
657
633
  # Corresponds to the JSON property `rotatedBoundingBox`
658
634
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1NormalizedBoundingPoly]
659
635
  attr_accessor :rotated_bounding_box
@@ -706,9 +682,8 @@ module Google
706
682
  end
707
683
  end
708
684
 
709
- # For tracking related features.
710
- # An object at time_offset with attributes, and located with
711
- # normalized_bounding_box.
685
+ # For tracking related features. An object at time_offset with attributes, and
686
+ # located with normalized_bounding_box.
712
687
  class GoogleCloudVideointelligenceV1TimestampedObject
713
688
  include Google::Apis::Core::Hashable
714
689
 
@@ -722,15 +697,14 @@ module Google
722
697
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1DetectedLandmark>]
723
698
  attr_accessor :landmarks
724
699
 
725
- # Normalized bounding box.
726
- # The normalized vertex coordinates are relative to the original image.
727
- # Range: [0, 1].
700
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
701
+ # original image. Range: [0, 1].
728
702
  # Corresponds to the JSON property `normalizedBoundingBox`
729
703
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1NormalizedBoundingBox]
730
704
  attr_accessor :normalized_bounding_box
731
705
 
732
- # Time-offset, relative to the beginning of the video,
733
- # corresponding to the video frame for this object.
706
+ # Time-offset, relative to the beginning of the video, corresponding to the
707
+ # video frame for this object.
734
708
  # Corresponds to the JSON property `timeOffset`
735
709
  # @return [String]
736
710
  attr_accessor :time_offset
@@ -789,20 +763,19 @@ module Google
789
763
  class GoogleCloudVideointelligenceV1VideoAnnotationProgress
790
764
  include Google::Apis::Core::Hashable
791
765
 
792
- # Specifies which feature is being tracked if the request contains more than
793
- # one feature.
766
+ # Specifies which feature is being tracked if the request contains more than one
767
+ # feature.
794
768
  # Corresponds to the JSON property `feature`
795
769
  # @return [String]
796
770
  attr_accessor :feature
797
771
 
798
- # Video file location in
799
- # [Cloud Storage](https://cloud.google.com/storage/).
772
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
800
773
  # Corresponds to the JSON property `inputUri`
801
774
  # @return [String]
802
775
  attr_accessor :input_uri
803
776
 
804
- # Approximate percentage processed thus far. Guaranteed to be
805
- # 100 when fully processed.
777
+ # Approximate percentage processed thus far. Guaranteed to be 100 when fully
778
+ # processed.
806
779
  # Corresponds to the JSON property `progressPercent`
807
780
  # @return [Fixnum]
808
781
  attr_accessor :progress_percent
@@ -841,31 +814,30 @@ module Google
841
814
  class GoogleCloudVideointelligenceV1VideoAnnotationResults
842
815
  include Google::Apis::Core::Hashable
843
816
 
844
- # The `Status` type defines a logical error model that is suitable for
845
- # different programming environments, including REST APIs and RPC APIs. It is
846
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
847
- # three pieces of data: error code, error message, and error details.
848
- # You can find out more about this error model and how to work with it in the
849
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
817
+ # The `Status` type defines a logical error model that is suitable for different
818
+ # programming environments, including REST APIs and RPC APIs. It is used by [
819
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
820
+ # data: error code, error message, and error details. You can find out more
821
+ # about this error model and how to work with it in the [API Design Guide](https:
822
+ # //cloud.google.com/apis/design/errors).
850
823
  # Corresponds to the JSON property `error`
851
824
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleRpcStatus]
852
825
  attr_accessor :error
853
826
 
854
- # Explicit content annotation (based on per-frame visual signals only).
855
- # If no explicit content has been detected in a frame, no annotations are
856
- # present for that frame.
827
+ # Explicit content annotation (based on per-frame visual signals only). If no
828
+ # explicit content has been detected in a frame, no annotations are present for
829
+ # that frame.
857
830
  # Corresponds to the JSON property `explicitAnnotation`
858
831
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1ExplicitContentAnnotation]
859
832
  attr_accessor :explicit_annotation
860
833
 
861
- # Label annotations on frame level.
862
- # There is exactly one element for each unique label.
834
+ # Label annotations on frame level. There is exactly one element for each unique
835
+ # label.
863
836
  # Corresponds to the JSON property `frameLabelAnnotations`
864
837
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1LabelAnnotation>]
865
838
  attr_accessor :frame_label_annotations
866
839
 
867
- # Video file location in
868
- # [Cloud Storage](https://cloud.google.com/storage/).
840
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
869
841
  # Corresponds to the JSON property `inputUri`
870
842
  # @return [String]
871
843
  attr_accessor :input_uri
@@ -892,11 +864,11 @@ module Google
892
864
  attr_accessor :segment_label_annotations
893
865
 
894
866
  # Presence label annotations on video level or user-specified segment level.
895
- # There is exactly one element for each unique label. Compared to the
896
- # existing topical `segment_label_annotations`, this field presents more
897
- # fine-grained, segment-level labels detected in video content and is made
898
- # available only when the client sets `LabelDetectionConfig.model` to
899
- # "builtin/latest" in the request.
867
+ # There is exactly one element for each unique label. Compared to the existing
868
+ # topical `segment_label_annotations`, this field presents more fine-grained,
869
+ # segment-level labels detected in video content and is made available only when
870
+ # the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
871
+ # request.
900
872
  # Corresponds to the JSON property `segmentPresenceLabelAnnotations`
901
873
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1LabelAnnotation>]
902
874
  attr_accessor :segment_presence_label_annotations
@@ -906,17 +878,17 @@ module Google
906
878
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1VideoSegment>]
907
879
  attr_accessor :shot_annotations
908
880
 
909
- # Topical label annotations on shot level.
910
- # There is exactly one element for each unique label.
881
+ # Topical label annotations on shot level. There is exactly one element for each
882
+ # unique label.
911
883
  # Corresponds to the JSON property `shotLabelAnnotations`
912
884
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1LabelAnnotation>]
913
885
  attr_accessor :shot_label_annotations
914
886
 
915
887
  # Presence label annotations on shot level. There is exactly one element for
916
- # each unique label. Compared to the existing topical
917
- # `shot_label_annotations`, this field presents more fine-grained, shot-level
918
- # labels detected in video content and is made available only when the client
919
- # sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
888
+ # each unique label. Compared to the existing topical `shot_label_annotations`,
889
+ # this field presents more fine-grained, shot-level labels detected in video
890
+ # content and is made available only when the client sets `LabelDetectionConfig.
891
+ # model` to "builtin/latest" in the request.
920
892
  # Corresponds to the JSON property `shotPresenceLabelAnnotations`
921
893
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1LabelAnnotation>]
922
894
  attr_accessor :shot_presence_label_annotations
@@ -926,9 +898,8 @@ module Google
926
898
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1SpeechTranscription>]
927
899
  attr_accessor :speech_transcriptions
928
900
 
929
- # OCR text detection and tracking.
930
- # Annotations for list of detected text snippets. Each will have list of
931
- # frame information associated with it.
901
+ # OCR text detection and tracking. Annotations for list of detected text
902
+ # snippets. Each will have list of frame information associated with it.
932
903
  # Corresponds to the JSON property `textAnnotations`
933
904
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1TextAnnotation>]
934
905
  attr_accessor :text_annotations
@@ -960,14 +931,14 @@ module Google
960
931
  class GoogleCloudVideointelligenceV1VideoSegment
961
932
  include Google::Apis::Core::Hashable
962
933
 
963
- # Time-offset, relative to the beginning of the video,
964
- # corresponding to the end of the segment (inclusive).
934
+ # Time-offset, relative to the beginning of the video, corresponding to the end
935
+ # of the segment (inclusive).
965
936
  # Corresponds to the JSON property `endTimeOffset`
966
937
  # @return [String]
967
938
  attr_accessor :end_time_offset
968
939
 
969
- # Time-offset, relative to the beginning of the video,
970
- # corresponding to the start of the segment (inclusive).
940
+ # Time-offset, relative to the beginning of the video, corresponding to the
941
+ # start of the segment (inclusive).
971
942
  # Corresponds to the JSON property `startTimeOffset`
972
943
  # @return [String]
973
944
  attr_accessor :start_time_offset
@@ -984,41 +955,41 @@ module Google
984
955
  end
985
956
 
986
957
  # Word-specific information for recognized words. Word information is only
987
- # included in the response when certain request parameters are set, such
988
- # as `enable_word_time_offsets`.
958
+ # included in the response when certain request parameters are set, such as `
959
+ # enable_word_time_offsets`.
989
960
  class GoogleCloudVideointelligenceV1WordInfo
990
961
  include Google::Apis::Core::Hashable
991
962
 
992
963
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
993
964
  # indicates an estimated greater likelihood that the recognized words are
994
- # correct. This field is set only for the top alternative.
995
- # This field is not guaranteed to be accurate and users should not rely on it
996
- # to be always provided.
997
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
965
+ # correct. This field is set only for the top alternative. This field is not
966
+ # guaranteed to be accurate and users should not rely on it to be always
967
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
968
+ # not set.
998
969
  # Corresponds to the JSON property `confidence`
999
970
  # @return [Float]
1000
971
  attr_accessor :confidence
1001
972
 
1002
- # Time offset relative to the beginning of the audio, and
1003
- # corresponding to the end of the spoken word. This field is only set if
1004
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
1005
- # experimental feature and the accuracy of the time offset can vary.
973
+ # Time offset relative to the beginning of the audio, and corresponding to the
974
+ # end of the spoken word. This field is only set if `enable_word_time_offsets=
975
+ # true` and only in the top hypothesis. This is an experimental feature and the
976
+ # accuracy of the time offset can vary.
1006
977
  # Corresponds to the JSON property `endTime`
1007
978
  # @return [String]
1008
979
  attr_accessor :end_time
1009
980
 
1010
- # Output only. A distinct integer value is assigned for every speaker within
1011
- # the audio. This field specifies which one of those speakers was detected to
1012
- # have spoken this word. Value ranges from 1 up to diarization_speaker_count,
1013
- # and is only set if speaker diarization is enabled.
981
+ # Output only. A distinct integer value is assigned for every speaker within the
982
+ # audio. This field specifies which one of those speakers was detected to have
983
+ # spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
984
+ # only set if speaker diarization is enabled.
1014
985
  # Corresponds to the JSON property `speakerTag`
1015
986
  # @return [Fixnum]
1016
987
  attr_accessor :speaker_tag
1017
988
 
1018
- # Time offset relative to the beginning of the audio, and
1019
- # corresponding to the start of the spoken word. This field is only set if
1020
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
1021
- # experimental feature and the accuracy of the time offset can vary.
989
+ # Time offset relative to the beginning of the audio, and corresponding to the
990
+ # start of the spoken word. This field is only set if `enable_word_time_offsets=
991
+ # true` and only in the top hypothesis. This is an experimental feature and the
992
+ # accuracy of the time offset can vary.
1022
993
  # Corresponds to the JSON property `startTime`
1023
994
  # @return [String]
1024
995
  attr_accessor :start_time
@@ -1042,9 +1013,9 @@ module Google
1042
1013
  end
1043
1014
  end
1044
1015
 
1045
- # Video annotation progress. Included in the `metadata`
1046
- # field of the `Operation` returned by the `GetOperation`
1047
- # call of the `google::longrunning::Operations` service.
1016
+ # Video annotation progress. Included in the `metadata` field of the `Operation`
1017
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
1018
+ # service.
1048
1019
  class GoogleCloudVideointelligenceV1beta2AnnotateVideoProgress
1049
1020
  include Google::Apis::Core::Hashable
1050
1021
 
@@ -1072,24 +1043,22 @@ module Google
1072
1043
  # @return [Array<String>]
1073
1044
  attr_accessor :features
1074
1045
 
1075
- # The video data bytes.
1076
- # If unset, the input video(s) should be specified via the `input_uri`.
1077
- # If set, `input_uri` must be unset.
1046
+ # The video data bytes. If unset, the input video(s) should be specified via the
1047
+ # `input_uri`. If set, `input_uri` must be unset.
1078
1048
  # Corresponds to the JSON property `inputContent`
1079
1049
  # NOTE: Values are automatically base64 encoded/decoded in the client library.
1080
1050
  # @return [String]
1081
1051
  attr_accessor :input_content
1082
1052
 
1083
- # Input video location. Currently, only
1084
- # [Cloud Storage](https://cloud.google.com/storage/) URIs are
1085
- # supported. URIs must be specified in the following format:
1086
- # `gs://bucket-id/object-id` (other URI formats return
1087
- # google.rpc.Code.INVALID_ARGUMENT). For more information, see
1088
- # [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
1089
- # To identify multiple videos, a video URI may include wildcards in the
1090
- # `object-id`. Supported wildcards: '*' to match 0 or more characters;
1091
- # '?' to match 1 character. If unset, the input video should be embedded
1092
- # in the request as `input_content`. If set, `input_content` must be unset.
1053
+ # Input video location. Currently, only [Cloud Storage](https://cloud.google.com/
1054
+ # storage/) URIs are supported. URIs must be specified in the following format: `
1055
+ # gs://bucket-id/object-id` (other URI formats return google.rpc.Code.
1056
+ # INVALID_ARGUMENT). For more information, see [Request URIs](https://cloud.
1057
+ # google.com/storage/docs/request-endpoints). To identify multiple videos, a
1058
+ # video URI may include wildcards in the `object-id`. Supported wildcards: '*'
1059
+ # to match 0 or more characters; '?' to match 1 character. If unset, the input
1060
+ # video should be embedded in the request as `input_content`. If set, `
1061
+ # input_content` must be unset.
1093
1062
  # Corresponds to the JSON property `inputUri`
1094
1063
  # @return [String]
1095
1064
  attr_accessor :input_uri
@@ -1103,11 +1072,11 @@ module Google
1103
1072
  attr_accessor :location_id
1104
1073
 
1105
1074
  # Optional. Location where the output (in JSON format) should be stored.
1106
- # Currently, only [Cloud Storage](https://cloud.google.com/storage/)
1107
- # URIs are supported. These must be specified in the following format:
1108
- # `gs://bucket-id/object-id` (other URI formats return
1109
- # google.rpc.Code.INVALID_ARGUMENT). For more information, see
1110
- # [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
1075
+ # Currently, only [Cloud Storage](https://cloud.google.com/storage/) URIs are
1076
+ # supported. These must be specified in the following format: `gs://bucket-id/
1077
+ # object-id` (other URI formats return google.rpc.Code.INVALID_ARGUMENT). For
1078
+ # more information, see [Request URIs](https://cloud.google.com/storage/docs/
1079
+ # request-endpoints).
1111
1080
  # Corresponds to the JSON property `outputUri`
1112
1081
  # @return [String]
1113
1082
  attr_accessor :output_uri
@@ -1132,9 +1101,9 @@ module Google
1132
1101
  end
1133
1102
  end
1134
1103
 
1135
- # Video annotation response. Included in the `response`
1136
- # field of the `Operation` returned by the `GetOperation`
1137
- # call of the `google::longrunning::Operations` service.
1104
+ # Video annotation response. Included in the `response` field of the `Operation`
1105
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
1106
+ # service.
1138
1107
  class GoogleCloudVideointelligenceV1beta2AnnotateVideoResponse
1139
1108
  include Google::Apis::Core::Hashable
1140
1109
 
@@ -1162,14 +1131,14 @@ module Google
1162
1131
  # @return [Float]
1163
1132
  attr_accessor :confidence
1164
1133
 
1165
- # The name of the attribute, for example, glasses, dark_glasses, mouth_open.
1166
- # A full list of supported type names will be provided in the document.
1134
+ # The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
1135
+ # full list of supported type names will be provided in the document.
1167
1136
  # Corresponds to the JSON property `name`
1168
1137
  # @return [String]
1169
1138
  attr_accessor :name
1170
1139
 
1171
- # Text value of the detection result. For example, the value for "HairColor"
1172
- # can be "black", "blonde", etc.
1140
+ # Text value of the detection result. For example, the value for "HairColor" can
1141
+ # be "black", "blonde", etc.
1173
1142
  # Corresponds to the JSON property `value`
1174
1143
  # @return [String]
1175
1144
  attr_accessor :value
@@ -1201,9 +1170,8 @@ module Google
1201
1170
  # @return [String]
1202
1171
  attr_accessor :name
1203
1172
 
1204
- # A vertex represents a 2D point in the image.
1205
- # NOTE: the normalized vertex coordinates are relative to the original image
1206
- # and range from 0 to 1.
1173
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
1174
+ # coordinates are relative to the original image and range from 0 to 1.
1207
1175
  # Corresponds to the JSON property `point`
1208
1176
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2NormalizedVertex]
1209
1177
  attr_accessor :point
@@ -1229,8 +1197,7 @@ module Google
1229
1197
  # @return [String]
1230
1198
  attr_accessor :description
1231
1199
 
1232
- # Opaque entity ID. Some IDs may be available in
1233
- # [Google Knowledge Graph Search
1200
+ # Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
1234
1201
  # API](https://developers.google.com/knowledge-graph/).
1235
1202
  # Corresponds to the JSON property `entityId`
1236
1203
  # @return [String]
@@ -1253,9 +1220,9 @@ module Google
1253
1220
  end
1254
1221
  end
1255
1222
 
1256
- # Explicit content annotation (based on per-frame visual signals only).
1257
- # If no explicit content has been detected in a frame, no annotations are
1258
- # present for that frame.
1223
+ # Explicit content annotation (based on per-frame visual signals only). If no
1224
+ # explicit content has been detected in a frame, no annotations are present for
1225
+ # that frame.
1259
1226
  class GoogleCloudVideointelligenceV1beta2ExplicitContentAnnotation
1260
1227
  include Google::Apis::Core::Hashable
1261
1228
 
@@ -1284,9 +1251,8 @@ module Google
1284
1251
  class GoogleCloudVideointelligenceV1beta2ExplicitContentDetectionConfig
1285
1252
  include Google::Apis::Core::Hashable
1286
1253
 
1287
- # Model to use for explicit content detection.
1288
- # Supported values: "builtin/stable" (the default if unset) and
1289
- # "builtin/latest".
1254
+ # Model to use for explicit content detection. Supported values: "builtin/stable"
1255
+ # (the default if unset) and "builtin/latest".
1290
1256
  # Corresponds to the JSON property `model`
1291
1257
  # @return [String]
1292
1258
  attr_accessor :model
@@ -1331,10 +1297,9 @@ module Google
1331
1297
  class GoogleCloudVideointelligenceV1beta2LabelAnnotation
1332
1298
  include Google::Apis::Core::Hashable
1333
1299
 
1334
- # Common categories for the detected entity.
1335
- # For example, when the label is `Terrier`, the category is likely `dog`. And
1336
- # in some cases there might be more than one categories e.g., `Terrier` could
1337
- # also be a `pet`.
1300
+ # Common categories for the detected entity. For example, when the label is `
1301
+ # Terrier`, the category is likely `dog`. And in some cases there might be more
1302
+ # than one categories e.g., `Terrier` could also be a `pet`.
1338
1303
  # Corresponds to the JSON property `categoryEntities`
1339
1304
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2Entity>]
1340
1305
  attr_accessor :category_entities
@@ -1377,44 +1342,40 @@ module Google
1377
1342
  class GoogleCloudVideointelligenceV1beta2LabelDetectionConfig
1378
1343
  include Google::Apis::Core::Hashable
1379
1344
 
1380
- # The confidence threshold we perform filtering on the labels from
1381
- # frame-level detection. If not set, it is set to 0.4 by default. The valid
1382
- # range for this threshold is [0.1, 0.9]. Any value set outside of this
1383
- # range will be clipped.
1384
- # Note: For best results, follow the default threshold. We will update
1385
- # the default threshold everytime when we release a new model.
1345
+ # The confidence threshold we perform filtering on the labels from frame-level
1346
+ # detection. If not set, it is set to 0.4 by default. The valid range for this
1347
+ # threshold is [0.1, 0.9]. Any value set outside of this range will be clipped.
1348
+ # Note: For best results, follow the default threshold. We will update the
1349
+ # default threshold everytime when we release a new model.
1386
1350
  # Corresponds to the JSON property `frameConfidenceThreshold`
1387
1351
  # @return [Float]
1388
1352
  attr_accessor :frame_confidence_threshold
1389
1353
 
1390
- # What labels should be detected with LABEL_DETECTION, in addition to
1391
- # video-level labels or segment-level labels.
1392
- # If unspecified, defaults to `SHOT_MODE`.
1354
+ # What labels should be detected with LABEL_DETECTION, in addition to video-
1355
+ # level labels or segment-level labels. If unspecified, defaults to `SHOT_MODE`.
1393
1356
  # Corresponds to the JSON property `labelDetectionMode`
1394
1357
  # @return [String]
1395
1358
  attr_accessor :label_detection_mode
1396
1359
 
1397
- # Model to use for label detection.
1398
- # Supported values: "builtin/stable" (the default if unset) and
1399
- # "builtin/latest".
1360
+ # Model to use for label detection. Supported values: "builtin/stable" (the
1361
+ # default if unset) and "builtin/latest".
1400
1362
  # Corresponds to the JSON property `model`
1401
1363
  # @return [String]
1402
1364
  attr_accessor :model
1403
1365
 
1404
- # Whether the video has been shot from a stationary (i.e., non-moving)
1405
- # camera. When set to true, might improve detection accuracy for moving
1406
- # objects. Should be used with `SHOT_AND_FRAME_MODE` enabled.
1366
+ # Whether the video has been shot from a stationary (i.e., non-moving) camera.
1367
+ # When set to true, might improve detection accuracy for moving objects. Should
1368
+ # be used with `SHOT_AND_FRAME_MODE` enabled.
1407
1369
  # Corresponds to the JSON property `stationaryCamera`
1408
1370
  # @return [Boolean]
1409
1371
  attr_accessor :stationary_camera
1410
1372
  alias_method :stationary_camera?, :stationary_camera
1411
1373
 
1412
- # The confidence threshold we perform filtering on the labels from
1413
- # video-level and shot-level detections. If not set, it's set to 0.3 by
1414
- # default. The valid range for this threshold is [0.1, 0.9]. Any value set
1415
- # outside of this range will be clipped.
1416
- # Note: For best results, follow the default threshold. We will update
1417
- # the default threshold everytime when we release a new model.
1374
+ # The confidence threshold we perform filtering on the labels from video-level
1375
+ # and shot-level detections. If not set, it's set to 0.3 by default. The valid
1376
+ # range for this threshold is [0.1, 0.9]. Any value set outside of this range
1377
+ # will be clipped. Note: For best results, follow the default threshold. We will
1378
+ # update the default threshold everytime when we release a new model.
1418
1379
  # Corresponds to the JSON property `videoConfidenceThreshold`
1419
1380
  # @return [Float]
1420
1381
  attr_accessor :video_confidence_threshold
@@ -1493,14 +1454,14 @@ module Google
1493
1454
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2Entity]
1494
1455
  attr_accessor :entity
1495
1456
 
1496
- # All video segments where the recognized logo appears. There might be
1497
- # multiple instances of the same logo class appearing in one VideoSegment.
1457
+ # All video segments where the recognized logo appears. There might be multiple
1458
+ # instances of the same logo class appearing in one VideoSegment.
1498
1459
  # Corresponds to the JSON property `segments`
1499
1460
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2VideoSegment>]
1500
1461
  attr_accessor :segments
1501
1462
 
1502
- # All logo tracks where the recognized logo appears. Each track corresponds
1503
- # to one logo instance appearing in consecutive frames.
1463
+ # All logo tracks where the recognized logo appears. Each track corresponds to
1464
+ # one logo instance appearing in consecutive frames.
1504
1465
  # Corresponds to the JSON property `tracks`
1505
1466
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2Track>]
1506
1467
  attr_accessor :tracks
@@ -1517,9 +1478,8 @@ module Google
1517
1478
  end
1518
1479
  end
1519
1480
 
1520
- # Normalized bounding box.
1521
- # The normalized vertex coordinates are relative to the original image.
1522
- # Range: [0, 1].
1481
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
1482
+ # original image. Range: [0, 1].
1523
1483
  class GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox
1524
1484
  include Google::Apis::Core::Hashable
1525
1485
 
@@ -1557,20 +1517,12 @@ module Google
1557
1517
  end
1558
1518
 
1559
1519
  # Normalized bounding polygon for text (that might not be aligned with axis).
1560
- # Contains list of the corner points in clockwise order starting from
1561
- # top-left corner. For example, for a rectangular bounding box:
1562
- # When the text is horizontal it might look like:
1563
- # 0----1
1564
- # | |
1565
- # 3----2
1566
- # When it's clockwise rotated 180 degrees around the top-left corner it
1567
- # becomes:
1568
- # 2----3
1569
- # | |
1570
- # 1----0
1571
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
1572
- # than 0, or greater than 1 due to trignometric calculations for location of
1573
- # the box.
1520
+ # Contains list of the corner points in clockwise order starting from top-left
1521
+ # corner. For example, for a rectangular bounding box: When the text is
1522
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
1523
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
1524
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
1525
+ # or greater than 1 due to trignometric calculations for location of the box.
1574
1526
  class GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly
1575
1527
  include Google::Apis::Core::Hashable
1576
1528
 
@@ -1589,9 +1541,8 @@ module Google
1589
1541
  end
1590
1542
  end
1591
1543
 
1592
- # A vertex represents a 2D point in the image.
1593
- # NOTE: the normalized vertex coordinates are relative to the original image
1594
- # and range from 0 to 1.
1544
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
1545
+ # coordinates are relative to the original image and range from 0 to 1.
1595
1546
  class GoogleCloudVideointelligenceV1beta2NormalizedVertex
1596
1547
  include Google::Apis::Core::Hashable
1597
1548
 
@@ -1630,10 +1581,10 @@ module Google
1630
1581
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2Entity]
1631
1582
  attr_accessor :entity
1632
1583
 
1633
- # Information corresponding to all frames where this object track appears.
1634
- # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
1635
- # messages in frames.
1636
- # Streaming mode: it can only be one ObjectTrackingFrame message in frames.
1584
+ # Information corresponding to all frames where this object track appears. Non-
1585
+ # streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
1586
+ # in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
1587
+ # frames.
1637
1588
  # Corresponds to the JSON property `frames`
1638
1589
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame>]
1639
1590
  attr_accessor :frames
@@ -1643,12 +1594,11 @@ module Google
1643
1594
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2VideoSegment]
1644
1595
  attr_accessor :segment
1645
1596
 
1646
- # Streaming mode ONLY.
1647
- # In streaming mode, we do not know the end time of a tracked object
1648
- # before it is completed. Hence, there is no VideoSegment info returned.
1649
- # Instead, we provide a unique identifiable integer track_id so that
1650
- # the customers can correlate the results of the ongoing
1651
- # ObjectTrackAnnotation of the same track_id over time.
1597
+ # Streaming mode ONLY. In streaming mode, we do not know the end time of a
1598
+ # tracked object before it is completed. Hence, there is no VideoSegment info
1599
+ # returned. Instead, we provide a unique identifiable integer track_id so that
1600
+ # the customers can correlate the results of the ongoing ObjectTrackAnnotation
1601
+ # of the same track_id over time.
1652
1602
  # Corresponds to the JSON property `trackId`
1653
1603
  # @return [Fixnum]
1654
1604
  attr_accessor :track_id
@@ -1677,9 +1627,8 @@ module Google
1677
1627
  class GoogleCloudVideointelligenceV1beta2ObjectTrackingConfig
1678
1628
  include Google::Apis::Core::Hashable
1679
1629
 
1680
- # Model to use for object tracking.
1681
- # Supported values: "builtin/stable" (the default if unset) and
1682
- # "builtin/latest".
1630
+ # Model to use for object tracking. Supported values: "builtin/stable" (the
1631
+ # default if unset) and "builtin/latest".
1683
1632
  # Corresponds to the JSON property `model`
1684
1633
  # @return [String]
1685
1634
  attr_accessor :model
@@ -1699,9 +1648,8 @@ module Google
1699
1648
  class GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame
1700
1649
  include Google::Apis::Core::Hashable
1701
1650
 
1702
- # Normalized bounding box.
1703
- # The normalized vertex coordinates are relative to the original image.
1704
- # Range: [0, 1].
1651
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
1652
+ # original image. Range: [0, 1].
1705
1653
  # Corresponds to the JSON property `normalizedBoundingBox`
1706
1654
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox]
1707
1655
  attr_accessor :normalized_bounding_box
@@ -1726,9 +1674,8 @@ module Google
1726
1674
  class GoogleCloudVideointelligenceV1beta2ShotChangeDetectionConfig
1727
1675
  include Google::Apis::Core::Hashable
1728
1676
 
1729
- # Model to use for shot change detection.
1730
- # Supported values: "builtin/stable" (the default if unset) and
1731
- # "builtin/latest".
1677
+ # Model to use for shot change detection. Supported values: "builtin/stable" (
1678
+ # the default if unset) and "builtin/latest".
1732
1679
  # Corresponds to the JSON property `model`
1733
1680
  # @return [String]
1734
1681
  attr_accessor :model
@@ -1748,12 +1695,12 @@ module Google
1748
1695
  class GoogleCloudVideointelligenceV1beta2SpeechContext
1749
1696
  include Google::Apis::Core::Hashable
1750
1697
 
1751
- # Optional. A list of strings containing words and phrases "hints" so that
1752
- # the speech recognition is more likely to recognize them. This can be used
1753
- # to improve the accuracy for specific words and phrases, for example, if
1754
- # specific commands are typically spoken by the user. This can also be used
1755
- # to add additional words to the vocabulary of the recognizer. See
1756
- # [usage limits](https://cloud.google.com/speech/limits#content).
1698
+ # Optional. A list of strings containing words and phrases "hints" so that the
1699
+ # speech recognition is more likely to recognize them. This can be used to
1700
+ # improve the accuracy for specific words and phrases, for example, if specific
1701
+ # commands are typically spoken by the user. This can also be used to add
1702
+ # additional words to the vocabulary of the recognizer. See [usage limits](https:
1703
+ # //cloud.google.com/speech/limits#content).
1757
1704
  # Corresponds to the JSON property `phrases`
1758
1705
  # @return [Array<String>]
1759
1706
  attr_accessor :phrases
@@ -1774,10 +1721,10 @@ module Google
1774
1721
 
1775
1722
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
1776
1723
  # indicates an estimated greater likelihood that the recognized words are
1777
- # correct. This field is set only for the top alternative.
1778
- # This field is not guaranteed to be accurate and users should not rely on it
1779
- # to be always provided.
1780
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
1724
+ # correct. This field is set only for the top alternative. This field is not
1725
+ # guaranteed to be accurate and users should not rely on it to be always
1726
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
1727
+ # not set.
1781
1728
  # Corresponds to the JSON property `confidence`
1782
1729
  # @return [Float]
1783
1730
  attr_accessor :confidence
@@ -1788,8 +1735,8 @@ module Google
1788
1735
  attr_accessor :transcript
1789
1736
 
1790
1737
  # Output only. A list of word-specific information for each recognized word.
1791
- # Note: When `enable_speaker_diarization` is set to true, you will see all
1792
- # the words from the beginning of the audio.
1738
+ # Note: When `enable_speaker_diarization` is set to true, you will see all the
1739
+ # words from the beginning of the audio.
1793
1740
  # Corresponds to the JSON property `words`
1794
1741
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2WordInfo>]
1795
1742
  attr_accessor :words
@@ -1810,18 +1757,17 @@ module Google
1810
1757
  class GoogleCloudVideointelligenceV1beta2SpeechTranscription
1811
1758
  include Google::Apis::Core::Hashable
1812
1759
 
1813
- # May contain one or more recognition hypotheses (up to the maximum specified
1814
- # in `max_alternatives`). These alternatives are ordered in terms of
1815
- # accuracy, with the top (first) alternative being the most probable, as
1816
- # ranked by the recognizer.
1760
+ # May contain one or more recognition hypotheses (up to the maximum specified in
1761
+ # `max_alternatives`). These alternatives are ordered in terms of accuracy, with
1762
+ # the top (first) alternative being the most probable, as ranked by the
1763
+ # recognizer.
1817
1764
  # Corresponds to the JSON property `alternatives`
1818
1765
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2SpeechRecognitionAlternative>]
1819
1766
  attr_accessor :alternatives
1820
1767
 
1821
1768
  # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
1822
- # language tag of
1823
- # the language in this result. This language code was detected to have the
1824
- # most likelihood of being spoken in the audio.
1769
+ # language tag of the language in this result. This language code was detected
1770
+ # to have the most likelihood of being spoken in the audio.
1825
1771
  # Corresponds to the JSON property `languageCode`
1826
1772
  # @return [String]
1827
1773
  attr_accessor :language_code
@@ -1848,66 +1794,62 @@ module Google
1848
1794
  attr_accessor :audio_tracks
1849
1795
 
1850
1796
  # Optional. If set, specifies the estimated number of speakers in the
1851
- # conversation.
1852
- # If not set, defaults to '2'.
1853
- # Ignored unless enable_speaker_diarization is set to true.
1797
+ # conversation. If not set, defaults to '2'. Ignored unless
1798
+ # enable_speaker_diarization is set to true.
1854
1799
  # Corresponds to the JSON property `diarizationSpeakerCount`
1855
1800
  # @return [Fixnum]
1856
1801
  attr_accessor :diarization_speaker_count
1857
1802
 
1858
- # Optional. If 'true', adds punctuation to recognition result hypotheses.
1859
- # This feature is only available in select languages. Setting this for
1860
- # requests in other languages has no effect at all. The default 'false' value
1861
- # does not add punctuation to result hypotheses. NOTE: "This is currently
1862
- # offered as an experimental service, complimentary to all users. In the
1863
- # future this may be exclusively available as a premium feature."
1803
+ # Optional. If 'true', adds punctuation to recognition result hypotheses. This
1804
+ # feature is only available in select languages. Setting this for requests in
1805
+ # other languages has no effect at all. The default 'false' value does not add
1806
+ # punctuation to result hypotheses. NOTE: "This is currently offered as an
1807
+ # experimental service, complimentary to all users. In the future this may be
1808
+ # exclusively available as a premium feature."
1864
1809
  # Corresponds to the JSON property `enableAutomaticPunctuation`
1865
1810
  # @return [Boolean]
1866
1811
  attr_accessor :enable_automatic_punctuation
1867
1812
  alias_method :enable_automatic_punctuation?, :enable_automatic_punctuation
1868
1813
 
1869
- # Optional. If 'true', enables speaker detection for each recognized word in
1870
- # the top alternative of the recognition result using a speaker_tag provided
1871
- # in the WordInfo.
1872
- # Note: When this is true, we send all the words from the beginning of the
1873
- # audio for the top alternative in every consecutive response.
1874
- # This is done in order to improve our speaker tags as our models learn to
1875
- # identify the speakers in the conversation over time.
1814
+ # Optional. If 'true', enables speaker detection for each recognized word in the
1815
+ # top alternative of the recognition result using a speaker_tag provided in the
1816
+ # WordInfo. Note: When this is true, we send all the words from the beginning of
1817
+ # the audio for the top alternative in every consecutive response. This is done
1818
+ # in order to improve our speaker tags as our models learn to identify the
1819
+ # speakers in the conversation over time.
1876
1820
  # Corresponds to the JSON property `enableSpeakerDiarization`
1877
1821
  # @return [Boolean]
1878
1822
  attr_accessor :enable_speaker_diarization
1879
1823
  alias_method :enable_speaker_diarization?, :enable_speaker_diarization
1880
1824
 
1881
1825
  # Optional. If `true`, the top result includes a list of words and the
1882
- # confidence for those words. If `false`, no word-level confidence
1883
- # information is returned. The default is `false`.
1826
+ # confidence for those words. If `false`, no word-level confidence information
1827
+ # is returned. The default is `false`.
1884
1828
  # Corresponds to the JSON property `enableWordConfidence`
1885
1829
  # @return [Boolean]
1886
1830
  attr_accessor :enable_word_confidence
1887
1831
  alias_method :enable_word_confidence?, :enable_word_confidence
1888
1832
 
1889
- # Optional. If set to `true`, the server will attempt to filter out
1890
- # profanities, replacing all but the initial character in each filtered word
1891
- # with asterisks, e.g. "f***". If set to `false` or omitted, profanities
1892
- # won't be filtered out.
1833
+ # Optional. If set to `true`, the server will attempt to filter out profanities,
1834
+ # replacing all but the initial character in each filtered word with asterisks,
1835
+ # e.g. "f***". If set to `false` or omitted, profanities won't be filtered out.
1893
1836
  # Corresponds to the JSON property `filterProfanity`
1894
1837
  # @return [Boolean]
1895
1838
  attr_accessor :filter_profanity
1896
1839
  alias_method :filter_profanity?, :filter_profanity
1897
1840
 
1898
- # Required. *Required* The language of the supplied audio as a
1899
- # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
1900
- # Example: "en-US".
1901
- # See [Language Support](https://cloud.google.com/speech/docs/languages)
1902
- # for a list of the currently supported language codes.
1841
+ # Required. *Required* The language of the supplied audio as a [BCP-47](https://
1842
+ # www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. Example: "en-US". See [
1843
+ # Language Support](https://cloud.google.com/speech/docs/languages) for a list
1844
+ # of the currently supported language codes.
1903
1845
  # Corresponds to the JSON property `languageCode`
1904
1846
  # @return [String]
1905
1847
  attr_accessor :language_code
1906
1848
 
1907
1849
  # Optional. Maximum number of recognition hypotheses to be returned.
1908
1850
  # Specifically, the maximum number of `SpeechRecognitionAlternative` messages
1909
- # within each `SpeechTranscription`. The server may return fewer than
1910
- # `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
1851
+ # within each `SpeechTranscription`. The server may return fewer than `
1852
+ # max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
1911
1853
  # return a maximum of one. If omitted, will return a maximum of one.
1912
1854
  # Corresponds to the JSON property `maxAlternatives`
1913
1855
  # @return [Fixnum]
@@ -1974,16 +1916,15 @@ module Google
1974
1916
  include Google::Apis::Core::Hashable
1975
1917
 
1976
1918
  # Language hint can be specified if the language to be detected is known a
1977
- # priori. It can increase the accuracy of the detection. Language hint must
1978
- # be language code in BCP-47 format.
1979
- # Automatic language detection is performed if no hint is provided.
1919
+ # priori. It can increase the accuracy of the detection. Language hint must be
1920
+ # language code in BCP-47 format. Automatic language detection is performed if
1921
+ # no hint is provided.
1980
1922
  # Corresponds to the JSON property `languageHints`
1981
1923
  # @return [Array<String>]
1982
1924
  attr_accessor :language_hints
1983
1925
 
1984
- # Model to use for text detection.
1985
- # Supported values: "builtin/stable" (the default if unset) and
1986
- # "builtin/latest".
1926
+ # Model to use for text detection. Supported values: "builtin/stable" (the
1927
+ # default if unset) and "builtin/latest".
1987
1928
  # Corresponds to the JSON property `model`
1988
1929
  # @return [String]
1989
1930
  attr_accessor :model
@@ -1999,27 +1940,19 @@ module Google
1999
1940
  end
2000
1941
  end
2001
1942
 
2002
- # Video frame level annotation results for text annotation (OCR).
2003
- # Contains information regarding timestamp and bounding box locations for the
2004
- # frames containing detected OCR text snippets.
1943
+ # Video frame level annotation results for text annotation (OCR). Contains
1944
+ # information regarding timestamp and bounding box locations for the frames
1945
+ # containing detected OCR text snippets.
2005
1946
  class GoogleCloudVideointelligenceV1beta2TextFrame
2006
1947
  include Google::Apis::Core::Hashable
2007
1948
 
2008
1949
  # Normalized bounding polygon for text (that might not be aligned with axis).
2009
- # Contains list of the corner points in clockwise order starting from
2010
- # top-left corner. For example, for a rectangular bounding box:
2011
- # When the text is horizontal it might look like:
2012
- # 0----1
2013
- # | |
2014
- # 3----2
2015
- # When it's clockwise rotated 180 degrees around the top-left corner it
2016
- # becomes:
2017
- # 2----3
2018
- # | |
2019
- # 1----0
2020
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
2021
- # than 0, or greater than 1 due to trignometric calculations for location of
2022
- # the box.
1950
+ # Contains list of the corner points in clockwise order starting from top-left
1951
+ # corner. For example, for a rectangular bounding box: When the text is
1952
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
1953
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
1954
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
1955
+ # or greater than 1 due to trignometric calculations for location of the box.
2023
1956
  # Corresponds to the JSON property `rotatedBoundingBox`
2024
1957
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly]
2025
1958
  attr_accessor :rotated_bounding_box
@@ -2072,9 +2005,8 @@ module Google
2072
2005
  end
2073
2006
  end
2074
2007
 
2075
- # For tracking related features.
2076
- # An object at time_offset with attributes, and located with
2077
- # normalized_bounding_box.
2008
+ # For tracking related features. An object at time_offset with attributes, and
2009
+ # located with normalized_bounding_box.
2078
2010
  class GoogleCloudVideointelligenceV1beta2TimestampedObject
2079
2011
  include Google::Apis::Core::Hashable
2080
2012
 
@@ -2088,15 +2020,14 @@ module Google
2088
2020
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2DetectedLandmark>]
2089
2021
  attr_accessor :landmarks
2090
2022
 
2091
- # Normalized bounding box.
2092
- # The normalized vertex coordinates are relative to the original image.
2093
- # Range: [0, 1].
2023
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
2024
+ # original image. Range: [0, 1].
2094
2025
  # Corresponds to the JSON property `normalizedBoundingBox`
2095
2026
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox]
2096
2027
  attr_accessor :normalized_bounding_box
2097
2028
 
2098
- # Time-offset, relative to the beginning of the video,
2099
- # corresponding to the video frame for this object.
2029
+ # Time-offset, relative to the beginning of the video, corresponding to the
2030
+ # video frame for this object.
2100
2031
  # Corresponds to the JSON property `timeOffset`
2101
2032
  # @return [String]
2102
2033
  attr_accessor :time_offset
@@ -2155,20 +2086,19 @@ module Google
2155
2086
  class GoogleCloudVideointelligenceV1beta2VideoAnnotationProgress
2156
2087
  include Google::Apis::Core::Hashable
2157
2088
 
2158
- # Specifies which feature is being tracked if the request contains more than
2159
- # one feature.
2089
+ # Specifies which feature is being tracked if the request contains more than one
2090
+ # feature.
2160
2091
  # Corresponds to the JSON property `feature`
2161
2092
  # @return [String]
2162
2093
  attr_accessor :feature
2163
2094
 
2164
- # Video file location in
2165
- # [Cloud Storage](https://cloud.google.com/storage/).
2095
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
2166
2096
  # Corresponds to the JSON property `inputUri`
2167
2097
  # @return [String]
2168
2098
  attr_accessor :input_uri
2169
2099
 
2170
- # Approximate percentage processed thus far. Guaranteed to be
2171
- # 100 when fully processed.
2100
+ # Approximate percentage processed thus far. Guaranteed to be 100 when fully
2101
+ # processed.
2172
2102
  # Corresponds to the JSON property `progressPercent`
2173
2103
  # @return [Fixnum]
2174
2104
  attr_accessor :progress_percent
@@ -2207,31 +2137,30 @@ module Google
2207
2137
  class GoogleCloudVideointelligenceV1beta2VideoAnnotationResults
2208
2138
  include Google::Apis::Core::Hashable
2209
2139
 
2210
- # The `Status` type defines a logical error model that is suitable for
2211
- # different programming environments, including REST APIs and RPC APIs. It is
2212
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
2213
- # three pieces of data: error code, error message, and error details.
2214
- # You can find out more about this error model and how to work with it in the
2215
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
2140
+ # The `Status` type defines a logical error model that is suitable for different
2141
+ # programming environments, including REST APIs and RPC APIs. It is used by [
2142
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
2143
+ # data: error code, error message, and error details. You can find out more
2144
+ # about this error model and how to work with it in the [API Design Guide](https:
2145
+ # //cloud.google.com/apis/design/errors).
2216
2146
  # Corresponds to the JSON property `error`
2217
2147
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleRpcStatus]
2218
2148
  attr_accessor :error
2219
2149
 
2220
- # Explicit content annotation (based on per-frame visual signals only).
2221
- # If no explicit content has been detected in a frame, no annotations are
2222
- # present for that frame.
2150
+ # Explicit content annotation (based on per-frame visual signals only). If no
2151
+ # explicit content has been detected in a frame, no annotations are present for
2152
+ # that frame.
2223
2153
  # Corresponds to the JSON property `explicitAnnotation`
2224
2154
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2ExplicitContentAnnotation]
2225
2155
  attr_accessor :explicit_annotation
2226
2156
 
2227
- # Label annotations on frame level.
2228
- # There is exactly one element for each unique label.
2157
+ # Label annotations on frame level. There is exactly one element for each unique
2158
+ # label.
2229
2159
  # Corresponds to the JSON property `frameLabelAnnotations`
2230
2160
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
2231
2161
  attr_accessor :frame_label_annotations
2232
2162
 
2233
- # Video file location in
2234
- # [Cloud Storage](https://cloud.google.com/storage/).
2163
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
2235
2164
  # Corresponds to the JSON property `inputUri`
2236
2165
  # @return [String]
2237
2166
  attr_accessor :input_uri
@@ -2258,11 +2187,11 @@ module Google
2258
2187
  attr_accessor :segment_label_annotations
2259
2188
 
2260
2189
  # Presence label annotations on video level or user-specified segment level.
2261
- # There is exactly one element for each unique label. Compared to the
2262
- # existing topical `segment_label_annotations`, this field presents more
2263
- # fine-grained, segment-level labels detected in video content and is made
2264
- # available only when the client sets `LabelDetectionConfig.model` to
2265
- # "builtin/latest" in the request.
2190
+ # There is exactly one element for each unique label. Compared to the existing
2191
+ # topical `segment_label_annotations`, this field presents more fine-grained,
2192
+ # segment-level labels detected in video content and is made available only when
2193
+ # the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
2194
+ # request.
2266
2195
  # Corresponds to the JSON property `segmentPresenceLabelAnnotations`
2267
2196
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
2268
2197
  attr_accessor :segment_presence_label_annotations
@@ -2272,17 +2201,17 @@ module Google
2272
2201
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2VideoSegment>]
2273
2202
  attr_accessor :shot_annotations
2274
2203
 
2275
- # Topical label annotations on shot level.
2276
- # There is exactly one element for each unique label.
2204
+ # Topical label annotations on shot level. There is exactly one element for each
2205
+ # unique label.
2277
2206
  # Corresponds to the JSON property `shotLabelAnnotations`
2278
2207
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
2279
2208
  attr_accessor :shot_label_annotations
2280
2209
 
2281
2210
  # Presence label annotations on shot level. There is exactly one element for
2282
- # each unique label. Compared to the existing topical
2283
- # `shot_label_annotations`, this field presents more fine-grained, shot-level
2284
- # labels detected in video content and is made available only when the client
2285
- # sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
2211
+ # each unique label. Compared to the existing topical `shot_label_annotations`,
2212
+ # this field presents more fine-grained, shot-level labels detected in video
2213
+ # content and is made available only when the client sets `LabelDetectionConfig.
2214
+ # model` to "builtin/latest" in the request.
2286
2215
  # Corresponds to the JSON property `shotPresenceLabelAnnotations`
2287
2216
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
2288
2217
  attr_accessor :shot_presence_label_annotations
@@ -2292,9 +2221,8 @@ module Google
2292
2221
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2SpeechTranscription>]
2293
2222
  attr_accessor :speech_transcriptions
2294
2223
 
2295
- # OCR text detection and tracking.
2296
- # Annotations for list of detected text snippets. Each will have list of
2297
- # frame information associated with it.
2224
+ # OCR text detection and tracking. Annotations for list of detected text
2225
+ # snippets. Each will have list of frame information associated with it.
2298
2226
  # Corresponds to the JSON property `textAnnotations`
2299
2227
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2TextAnnotation>]
2300
2228
  attr_accessor :text_annotations
@@ -2341,9 +2269,9 @@ module Google
2341
2269
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2ObjectTrackingConfig]
2342
2270
  attr_accessor :object_tracking_config
2343
2271
 
2344
- # Video segments to annotate. The segments may overlap and are not required
2345
- # to be contiguous or span the whole video. If unspecified, each video is
2346
- # treated as a single segment.
2272
+ # Video segments to annotate. The segments may overlap and are not required to
2273
+ # be contiguous or span the whole video. If unspecified, each video is treated
2274
+ # as a single segment.
2347
2275
  # Corresponds to the JSON property `segments`
2348
2276
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2VideoSegment>]
2349
2277
  attr_accessor :segments
@@ -2383,14 +2311,14 @@ module Google
2383
2311
  class GoogleCloudVideointelligenceV1beta2VideoSegment
2384
2312
  include Google::Apis::Core::Hashable
2385
2313
 
2386
- # Time-offset, relative to the beginning of the video,
2387
- # corresponding to the end of the segment (inclusive).
2314
+ # Time-offset, relative to the beginning of the video, corresponding to the end
2315
+ # of the segment (inclusive).
2388
2316
  # Corresponds to the JSON property `endTimeOffset`
2389
2317
  # @return [String]
2390
2318
  attr_accessor :end_time_offset
2391
2319
 
2392
- # Time-offset, relative to the beginning of the video,
2393
- # corresponding to the start of the segment (inclusive).
2320
+ # Time-offset, relative to the beginning of the video, corresponding to the
2321
+ # start of the segment (inclusive).
2394
2322
  # Corresponds to the JSON property `startTimeOffset`
2395
2323
  # @return [String]
2396
2324
  attr_accessor :start_time_offset
@@ -2407,41 +2335,41 @@ module Google
2407
2335
  end
2408
2336
 
2409
2337
  # Word-specific information for recognized words. Word information is only
2410
- # included in the response when certain request parameters are set, such
2411
- # as `enable_word_time_offsets`.
2338
+ # included in the response when certain request parameters are set, such as `
2339
+ # enable_word_time_offsets`.
2412
2340
  class GoogleCloudVideointelligenceV1beta2WordInfo
2413
2341
  include Google::Apis::Core::Hashable
2414
2342
 
2415
2343
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
2416
2344
  # indicates an estimated greater likelihood that the recognized words are
2417
- # correct. This field is set only for the top alternative.
2418
- # This field is not guaranteed to be accurate and users should not rely on it
2419
- # to be always provided.
2420
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
2345
+ # correct. This field is set only for the top alternative. This field is not
2346
+ # guaranteed to be accurate and users should not rely on it to be always
2347
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
2348
+ # not set.
2421
2349
  # Corresponds to the JSON property `confidence`
2422
2350
  # @return [Float]
2423
2351
  attr_accessor :confidence
2424
2352
 
2425
- # Time offset relative to the beginning of the audio, and
2426
- # corresponding to the end of the spoken word. This field is only set if
2427
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
2428
- # experimental feature and the accuracy of the time offset can vary.
2353
+ # Time offset relative to the beginning of the audio, and corresponding to the
2354
+ # end of the spoken word. This field is only set if `enable_word_time_offsets=
2355
+ # true` and only in the top hypothesis. This is an experimental feature and the
2356
+ # accuracy of the time offset can vary.
2429
2357
  # Corresponds to the JSON property `endTime`
2430
2358
  # @return [String]
2431
2359
  attr_accessor :end_time
2432
2360
 
2433
- # Output only. A distinct integer value is assigned for every speaker within
2434
- # the audio. This field specifies which one of those speakers was detected to
2435
- # have spoken this word. Value ranges from 1 up to diarization_speaker_count,
2436
- # and is only set if speaker diarization is enabled.
2361
+ # Output only. A distinct integer value is assigned for every speaker within the
2362
+ # audio. This field specifies which one of those speakers was detected to have
2363
+ # spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
2364
+ # only set if speaker diarization is enabled.
2437
2365
  # Corresponds to the JSON property `speakerTag`
2438
2366
  # @return [Fixnum]
2439
2367
  attr_accessor :speaker_tag
2440
2368
 
2441
- # Time offset relative to the beginning of the audio, and
2442
- # corresponding to the start of the spoken word. This field is only set if
2443
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
2444
- # experimental feature and the accuracy of the time offset can vary.
2369
+ # Time offset relative to the beginning of the audio, and corresponding to the
2370
+ # start of the spoken word. This field is only set if `enable_word_time_offsets=
2371
+ # true` and only in the top hypothesis. This is an experimental feature and the
2372
+ # accuracy of the time offset can vary.
2445
2373
  # Corresponds to the JSON property `startTime`
2446
2374
  # @return [String]
2447
2375
  attr_accessor :start_time
@@ -2465,9 +2393,9 @@ module Google
2465
2393
  end
2466
2394
  end
2467
2395
 
2468
- # Video annotation progress. Included in the `metadata`
2469
- # field of the `Operation` returned by the `GetOperation`
2470
- # call of the `google::longrunning::Operations` service.
2396
+ # Video annotation progress. Included in the `metadata` field of the `Operation`
2397
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
2398
+ # service.
2471
2399
  class GoogleCloudVideointelligenceV1p1beta1AnnotateVideoProgress
2472
2400
  include Google::Apis::Core::Hashable
2473
2401
 
@@ -2486,9 +2414,9 @@ module Google
2486
2414
  end
2487
2415
  end
2488
2416
 
2489
- # Video annotation response. Included in the `response`
2490
- # field of the `Operation` returned by the `GetOperation`
2491
- # call of the `google::longrunning::Operations` service.
2417
+ # Video annotation response. Included in the `response` field of the `Operation`
2418
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
2419
+ # service.
2492
2420
  class GoogleCloudVideointelligenceV1p1beta1AnnotateVideoResponse
2493
2421
  include Google::Apis::Core::Hashable
2494
2422
 
@@ -2516,14 +2444,14 @@ module Google
2516
2444
  # @return [Float]
2517
2445
  attr_accessor :confidence
2518
2446
 
2519
- # The name of the attribute, for example, glasses, dark_glasses, mouth_open.
2520
- # A full list of supported type names will be provided in the document.
2447
+ # The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
2448
+ # full list of supported type names will be provided in the document.
2521
2449
  # Corresponds to the JSON property `name`
2522
2450
  # @return [String]
2523
2451
  attr_accessor :name
2524
2452
 
2525
- # Text value of the detection result. For example, the value for "HairColor"
2526
- # can be "black", "blonde", etc.
2453
+ # Text value of the detection result. For example, the value for "HairColor" can
2454
+ # be "black", "blonde", etc.
2527
2455
  # Corresponds to the JSON property `value`
2528
2456
  # @return [String]
2529
2457
  attr_accessor :value
@@ -2555,9 +2483,8 @@ module Google
2555
2483
  # @return [String]
2556
2484
  attr_accessor :name
2557
2485
 
2558
- # A vertex represents a 2D point in the image.
2559
- # NOTE: the normalized vertex coordinates are relative to the original image
2560
- # and range from 0 to 1.
2486
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
2487
+ # coordinates are relative to the original image and range from 0 to 1.
2561
2488
  # Corresponds to the JSON property `point`
2562
2489
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1NormalizedVertex]
2563
2490
  attr_accessor :point
@@ -2583,8 +2510,7 @@ module Google
2583
2510
  # @return [String]
2584
2511
  attr_accessor :description
2585
2512
 
2586
- # Opaque entity ID. Some IDs may be available in
2587
- # [Google Knowledge Graph Search
2513
+ # Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
2588
2514
  # API](https://developers.google.com/knowledge-graph/).
2589
2515
  # Corresponds to the JSON property `entityId`
2590
2516
  # @return [String]
@@ -2607,9 +2533,9 @@ module Google
2607
2533
  end
2608
2534
  end
2609
2535
 
2610
- # Explicit content annotation (based on per-frame visual signals only).
2611
- # If no explicit content has been detected in a frame, no annotations are
2612
- # present for that frame.
2536
+ # Explicit content annotation (based on per-frame visual signals only). If no
2537
+ # explicit content has been detected in a frame, no annotations are present for
2538
+ # that frame.
2613
2539
  class GoogleCloudVideointelligenceV1p1beta1ExplicitContentAnnotation
2614
2540
  include Google::Apis::Core::Hashable
2615
2541
 
@@ -2664,10 +2590,9 @@ module Google
2664
2590
  class GoogleCloudVideointelligenceV1p1beta1LabelAnnotation
2665
2591
  include Google::Apis::Core::Hashable
2666
2592
 
2667
- # Common categories for the detected entity.
2668
- # For example, when the label is `Terrier`, the category is likely `dog`. And
2669
- # in some cases there might be more than one categories e.g., `Terrier` could
2670
- # also be a `pet`.
2593
+ # Common categories for the detected entity. For example, when the label is `
2594
+ # Terrier`, the category is likely `dog`. And in some cases there might be more
2595
+ # than one categories e.g., `Terrier` could also be a `pet`.
2671
2596
  # Corresponds to the JSON property `categoryEntities`
2672
2597
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1Entity>]
2673
2598
  attr_accessor :category_entities
@@ -2766,14 +2691,14 @@ module Google
2766
2691
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1Entity]
2767
2692
  attr_accessor :entity
2768
2693
 
2769
- # All video segments where the recognized logo appears. There might be
2770
- # multiple instances of the same logo class appearing in one VideoSegment.
2694
+ # All video segments where the recognized logo appears. There might be multiple
2695
+ # instances of the same logo class appearing in one VideoSegment.
2771
2696
  # Corresponds to the JSON property `segments`
2772
2697
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1VideoSegment>]
2773
2698
  attr_accessor :segments
2774
2699
 
2775
- # All logo tracks where the recognized logo appears. Each track corresponds
2776
- # to one logo instance appearing in consecutive frames.
2700
+ # All logo tracks where the recognized logo appears. Each track corresponds to
2701
+ # one logo instance appearing in consecutive frames.
2777
2702
  # Corresponds to the JSON property `tracks`
2778
2703
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1Track>]
2779
2704
  attr_accessor :tracks
@@ -2790,9 +2715,8 @@ module Google
2790
2715
  end
2791
2716
  end
2792
2717
 
2793
- # Normalized bounding box.
2794
- # The normalized vertex coordinates are relative to the original image.
2795
- # Range: [0, 1].
2718
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
2719
+ # original image. Range: [0, 1].
2796
2720
  class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox
2797
2721
  include Google::Apis::Core::Hashable
2798
2722
 
@@ -2830,20 +2754,12 @@ module Google
2830
2754
  end
2831
2755
 
2832
2756
  # Normalized bounding polygon for text (that might not be aligned with axis).
2833
- # Contains list of the corner points in clockwise order starting from
2834
- # top-left corner. For example, for a rectangular bounding box:
2835
- # When the text is horizontal it might look like:
2836
- # 0----1
2837
- # | |
2838
- # 3----2
2839
- # When it's clockwise rotated 180 degrees around the top-left corner it
2840
- # becomes:
2841
- # 2----3
2842
- # | |
2843
- # 1----0
2844
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
2845
- # than 0, or greater than 1 due to trignometric calculations for location of
2846
- # the box.
2757
+ # Contains list of the corner points in clockwise order starting from top-left
2758
+ # corner. For example, for a rectangular bounding box: When the text is
2759
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
2760
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
2761
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
2762
+ # or greater than 1 due to trignometric calculations for location of the box.
2847
2763
  class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly
2848
2764
  include Google::Apis::Core::Hashable
2849
2765
 
@@ -2862,9 +2778,8 @@ module Google
2862
2778
  end
2863
2779
  end
2864
2780
 
2865
- # A vertex represents a 2D point in the image.
2866
- # NOTE: the normalized vertex coordinates are relative to the original image
2867
- # and range from 0 to 1.
2781
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
2782
+ # coordinates are relative to the original image and range from 0 to 1.
2868
2783
  class GoogleCloudVideointelligenceV1p1beta1NormalizedVertex
2869
2784
  include Google::Apis::Core::Hashable
2870
2785
 
@@ -2903,10 +2818,10 @@ module Google
2903
2818
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1Entity]
2904
2819
  attr_accessor :entity
2905
2820
 
2906
- # Information corresponding to all frames where this object track appears.
2907
- # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
2908
- # messages in frames.
2909
- # Streaming mode: it can only be one ObjectTrackingFrame message in frames.
2821
+ # Information corresponding to all frames where this object track appears. Non-
2822
+ # streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
2823
+ # in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
2824
+ # frames.
2910
2825
  # Corresponds to the JSON property `frames`
2911
2826
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame>]
2912
2827
  attr_accessor :frames
@@ -2916,12 +2831,11 @@ module Google
2916
2831
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1VideoSegment]
2917
2832
  attr_accessor :segment
2918
2833
 
2919
- # Streaming mode ONLY.
2920
- # In streaming mode, we do not know the end time of a tracked object
2921
- # before it is completed. Hence, there is no VideoSegment info returned.
2922
- # Instead, we provide a unique identifiable integer track_id so that
2923
- # the customers can correlate the results of the ongoing
2924
- # ObjectTrackAnnotation of the same track_id over time.
2834
+ # Streaming mode ONLY. In streaming mode, we do not know the end time of a
2835
+ # tracked object before it is completed. Hence, there is no VideoSegment info
2836
+ # returned. Instead, we provide a unique identifiable integer track_id so that
2837
+ # the customers can correlate the results of the ongoing ObjectTrackAnnotation
2838
+ # of the same track_id over time.
2925
2839
  # Corresponds to the JSON property `trackId`
2926
2840
  # @return [Fixnum]
2927
2841
  attr_accessor :track_id
@@ -2951,9 +2865,8 @@ module Google
2951
2865
  class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame
2952
2866
  include Google::Apis::Core::Hashable
2953
2867
 
2954
- # Normalized bounding box.
2955
- # The normalized vertex coordinates are relative to the original image.
2956
- # Range: [0, 1].
2868
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
2869
+ # original image. Range: [0, 1].
2957
2870
  # Corresponds to the JSON property `normalizedBoundingBox`
2958
2871
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox]
2959
2872
  attr_accessor :normalized_bounding_box
@@ -2980,10 +2893,10 @@ module Google
2980
2893
 
2981
2894
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
2982
2895
  # indicates an estimated greater likelihood that the recognized words are
2983
- # correct. This field is set only for the top alternative.
2984
- # This field is not guaranteed to be accurate and users should not rely on it
2985
- # to be always provided.
2986
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
2896
+ # correct. This field is set only for the top alternative. This field is not
2897
+ # guaranteed to be accurate and users should not rely on it to be always
2898
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
2899
+ # not set.
2987
2900
  # Corresponds to the JSON property `confidence`
2988
2901
  # @return [Float]
2989
2902
  attr_accessor :confidence
@@ -2994,8 +2907,8 @@ module Google
2994
2907
  attr_accessor :transcript
2995
2908
 
2996
2909
  # Output only. A list of word-specific information for each recognized word.
2997
- # Note: When `enable_speaker_diarization` is set to true, you will see all
2998
- # the words from the beginning of the audio.
2910
+ # Note: When `enable_speaker_diarization` is set to true, you will see all the
2911
+ # words from the beginning of the audio.
2999
2912
  # Corresponds to the JSON property `words`
3000
2913
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1WordInfo>]
3001
2914
  attr_accessor :words
@@ -3016,18 +2929,17 @@ module Google
3016
2929
  class GoogleCloudVideointelligenceV1p1beta1SpeechTranscription
3017
2930
  include Google::Apis::Core::Hashable
3018
2931
 
3019
- # May contain one or more recognition hypotheses (up to the maximum specified
3020
- # in `max_alternatives`). These alternatives are ordered in terms of
3021
- # accuracy, with the top (first) alternative being the most probable, as
3022
- # ranked by the recognizer.
2932
+ # May contain one or more recognition hypotheses (up to the maximum specified in
2933
+ # `max_alternatives`). These alternatives are ordered in terms of accuracy, with
2934
+ # the top (first) alternative being the most probable, as ranked by the
2935
+ # recognizer.
3023
2936
  # Corresponds to the JSON property `alternatives`
3024
2937
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1SpeechRecognitionAlternative>]
3025
2938
  attr_accessor :alternatives
3026
2939
 
3027
2940
  # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
3028
- # language tag of
3029
- # the language in this result. This language code was detected to have the
3030
- # most likelihood of being spoken in the audio.
2941
+ # language tag of the language in this result. This language code was detected
2942
+ # to have the most likelihood of being spoken in the audio.
3031
2943
  # Corresponds to the JSON property `languageCode`
3032
2944
  # @return [String]
3033
2945
  attr_accessor :language_code
@@ -3076,27 +2988,19 @@ module Google
3076
2988
  end
3077
2989
  end
3078
2990
 
3079
- # Video frame level annotation results for text annotation (OCR).
3080
- # Contains information regarding timestamp and bounding box locations for the
3081
- # frames containing detected OCR text snippets.
2991
+ # Video frame level annotation results for text annotation (OCR). Contains
2992
+ # information regarding timestamp and bounding box locations for the frames
2993
+ # containing detected OCR text snippets.
3082
2994
  class GoogleCloudVideointelligenceV1p1beta1TextFrame
3083
2995
  include Google::Apis::Core::Hashable
3084
2996
 
3085
2997
  # Normalized bounding polygon for text (that might not be aligned with axis).
3086
- # Contains list of the corner points in clockwise order starting from
3087
- # top-left corner. For example, for a rectangular bounding box:
3088
- # When the text is horizontal it might look like:
3089
- # 0----1
3090
- # | |
3091
- # 3----2
3092
- # When it's clockwise rotated 180 degrees around the top-left corner it
3093
- # becomes:
3094
- # 2----3
3095
- # | |
3096
- # 1----0
3097
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
3098
- # than 0, or greater than 1 due to trignometric calculations for location of
3099
- # the box.
2998
+ # Contains list of the corner points in clockwise order starting from top-left
2999
+ # corner. For example, for a rectangular bounding box: When the text is
3000
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
3001
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
3002
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
3003
+ # or greater than 1 due to trignometric calculations for location of the box.
3100
3004
  # Corresponds to the JSON property `rotatedBoundingBox`
3101
3005
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly]
3102
3006
  attr_accessor :rotated_bounding_box
@@ -3149,9 +3053,8 @@ module Google
3149
3053
  end
3150
3054
  end
3151
3055
 
3152
- # For tracking related features.
3153
- # An object at time_offset with attributes, and located with
3154
- # normalized_bounding_box.
3056
+ # For tracking related features. An object at time_offset with attributes, and
3057
+ # located with normalized_bounding_box.
3155
3058
  class GoogleCloudVideointelligenceV1p1beta1TimestampedObject
3156
3059
  include Google::Apis::Core::Hashable
3157
3060
 
@@ -3165,15 +3068,14 @@ module Google
3165
3068
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1DetectedLandmark>]
3166
3069
  attr_accessor :landmarks
3167
3070
 
3168
- # Normalized bounding box.
3169
- # The normalized vertex coordinates are relative to the original image.
3170
- # Range: [0, 1].
3071
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
3072
+ # original image. Range: [0, 1].
3171
3073
  # Corresponds to the JSON property `normalizedBoundingBox`
3172
3074
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox]
3173
3075
  attr_accessor :normalized_bounding_box
3174
3076
 
3175
- # Time-offset, relative to the beginning of the video,
3176
- # corresponding to the video frame for this object.
3077
+ # Time-offset, relative to the beginning of the video, corresponding to the
3078
+ # video frame for this object.
3177
3079
  # Corresponds to the JSON property `timeOffset`
3178
3080
  # @return [String]
3179
3081
  attr_accessor :time_offset
@@ -3232,20 +3134,19 @@ module Google
3232
3134
  class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationProgress
3233
3135
  include Google::Apis::Core::Hashable
3234
3136
 
3235
- # Specifies which feature is being tracked if the request contains more than
3236
- # one feature.
3137
+ # Specifies which feature is being tracked if the request contains more than one
3138
+ # feature.
3237
3139
  # Corresponds to the JSON property `feature`
3238
3140
  # @return [String]
3239
3141
  attr_accessor :feature
3240
3142
 
3241
- # Video file location in
3242
- # [Cloud Storage](https://cloud.google.com/storage/).
3143
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
3243
3144
  # Corresponds to the JSON property `inputUri`
3244
3145
  # @return [String]
3245
3146
  attr_accessor :input_uri
3246
3147
 
3247
- # Approximate percentage processed thus far. Guaranteed to be
3248
- # 100 when fully processed.
3148
+ # Approximate percentage processed thus far. Guaranteed to be 100 when fully
3149
+ # processed.
3249
3150
  # Corresponds to the JSON property `progressPercent`
3250
3151
  # @return [Fixnum]
3251
3152
  attr_accessor :progress_percent
@@ -3284,31 +3185,30 @@ module Google
3284
3185
  class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationResults
3285
3186
  include Google::Apis::Core::Hashable
3286
3187
 
3287
- # The `Status` type defines a logical error model that is suitable for
3288
- # different programming environments, including REST APIs and RPC APIs. It is
3289
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
3290
- # three pieces of data: error code, error message, and error details.
3291
- # You can find out more about this error model and how to work with it in the
3292
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
3188
+ # The `Status` type defines a logical error model that is suitable for different
3189
+ # programming environments, including REST APIs and RPC APIs. It is used by [
3190
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
3191
+ # data: error code, error message, and error details. You can find out more
3192
+ # about this error model and how to work with it in the [API Design Guide](https:
3193
+ # //cloud.google.com/apis/design/errors).
3293
3194
  # Corresponds to the JSON property `error`
3294
3195
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleRpcStatus]
3295
3196
  attr_accessor :error
3296
3197
 
3297
- # Explicit content annotation (based on per-frame visual signals only).
3298
- # If no explicit content has been detected in a frame, no annotations are
3299
- # present for that frame.
3198
+ # Explicit content annotation (based on per-frame visual signals only). If no
3199
+ # explicit content has been detected in a frame, no annotations are present for
3200
+ # that frame.
3300
3201
  # Corresponds to the JSON property `explicitAnnotation`
3301
3202
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1ExplicitContentAnnotation]
3302
3203
  attr_accessor :explicit_annotation
3303
3204
 
3304
- # Label annotations on frame level.
3305
- # There is exactly one element for each unique label.
3205
+ # Label annotations on frame level. There is exactly one element for each unique
3206
+ # label.
3306
3207
  # Corresponds to the JSON property `frameLabelAnnotations`
3307
3208
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
3308
3209
  attr_accessor :frame_label_annotations
3309
3210
 
3310
- # Video file location in
3311
- # [Cloud Storage](https://cloud.google.com/storage/).
3211
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
3312
3212
  # Corresponds to the JSON property `inputUri`
3313
3213
  # @return [String]
3314
3214
  attr_accessor :input_uri
@@ -3335,11 +3235,11 @@ module Google
3335
3235
  attr_accessor :segment_label_annotations
3336
3236
 
3337
3237
  # Presence label annotations on video level or user-specified segment level.
3338
- # There is exactly one element for each unique label. Compared to the
3339
- # existing topical `segment_label_annotations`, this field presents more
3340
- # fine-grained, segment-level labels detected in video content and is made
3341
- # available only when the client sets `LabelDetectionConfig.model` to
3342
- # "builtin/latest" in the request.
3238
+ # There is exactly one element for each unique label. Compared to the existing
3239
+ # topical `segment_label_annotations`, this field presents more fine-grained,
3240
+ # segment-level labels detected in video content and is made available only when
3241
+ # the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
3242
+ # request.
3343
3243
  # Corresponds to the JSON property `segmentPresenceLabelAnnotations`
3344
3244
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
3345
3245
  attr_accessor :segment_presence_label_annotations
@@ -3349,17 +3249,17 @@ module Google
3349
3249
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1VideoSegment>]
3350
3250
  attr_accessor :shot_annotations
3351
3251
 
3352
- # Topical label annotations on shot level.
3353
- # There is exactly one element for each unique label.
3252
+ # Topical label annotations on shot level. There is exactly one element for each
3253
+ # unique label.
3354
3254
  # Corresponds to the JSON property `shotLabelAnnotations`
3355
3255
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
3356
3256
  attr_accessor :shot_label_annotations
3357
3257
 
3358
3258
  # Presence label annotations on shot level. There is exactly one element for
3359
- # each unique label. Compared to the existing topical
3360
- # `shot_label_annotations`, this field presents more fine-grained, shot-level
3361
- # labels detected in video content and is made available only when the client
3362
- # sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
3259
+ # each unique label. Compared to the existing topical `shot_label_annotations`,
3260
+ # this field presents more fine-grained, shot-level labels detected in video
3261
+ # content and is made available only when the client sets `LabelDetectionConfig.
3262
+ # model` to "builtin/latest" in the request.
3363
3263
  # Corresponds to the JSON property `shotPresenceLabelAnnotations`
3364
3264
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
3365
3265
  attr_accessor :shot_presence_label_annotations
@@ -3369,9 +3269,8 @@ module Google
3369
3269
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1SpeechTranscription>]
3370
3270
  attr_accessor :speech_transcriptions
3371
3271
 
3372
- # OCR text detection and tracking.
3373
- # Annotations for list of detected text snippets. Each will have list of
3374
- # frame information associated with it.
3272
+ # OCR text detection and tracking. Annotations for list of detected text
3273
+ # snippets. Each will have list of frame information associated with it.
3375
3274
  # Corresponds to the JSON property `textAnnotations`
3376
3275
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1TextAnnotation>]
3377
3276
  attr_accessor :text_annotations
@@ -3403,14 +3302,14 @@ module Google
3403
3302
  class GoogleCloudVideointelligenceV1p1beta1VideoSegment
3404
3303
  include Google::Apis::Core::Hashable
3405
3304
 
3406
- # Time-offset, relative to the beginning of the video,
3407
- # corresponding to the end of the segment (inclusive).
3305
+ # Time-offset, relative to the beginning of the video, corresponding to the end
3306
+ # of the segment (inclusive).
3408
3307
  # Corresponds to the JSON property `endTimeOffset`
3409
3308
  # @return [String]
3410
3309
  attr_accessor :end_time_offset
3411
3310
 
3412
- # Time-offset, relative to the beginning of the video,
3413
- # corresponding to the start of the segment (inclusive).
3311
+ # Time-offset, relative to the beginning of the video, corresponding to the
3312
+ # start of the segment (inclusive).
3414
3313
  # Corresponds to the JSON property `startTimeOffset`
3415
3314
  # @return [String]
3416
3315
  attr_accessor :start_time_offset
@@ -3427,41 +3326,41 @@ module Google
3427
3326
  end
3428
3327
 
3429
3328
  # Word-specific information for recognized words. Word information is only
3430
- # included in the response when certain request parameters are set, such
3431
- # as `enable_word_time_offsets`.
3329
+ # included in the response when certain request parameters are set, such as `
3330
+ # enable_word_time_offsets`.
3432
3331
  class GoogleCloudVideointelligenceV1p1beta1WordInfo
3433
3332
  include Google::Apis::Core::Hashable
3434
3333
 
3435
3334
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
3436
3335
  # indicates an estimated greater likelihood that the recognized words are
3437
- # correct. This field is set only for the top alternative.
3438
- # This field is not guaranteed to be accurate and users should not rely on it
3439
- # to be always provided.
3440
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
3336
+ # correct. This field is set only for the top alternative. This field is not
3337
+ # guaranteed to be accurate and users should not rely on it to be always
3338
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
3339
+ # not set.
3441
3340
  # Corresponds to the JSON property `confidence`
3442
3341
  # @return [Float]
3443
3342
  attr_accessor :confidence
3444
3343
 
3445
- # Time offset relative to the beginning of the audio, and
3446
- # corresponding to the end of the spoken word. This field is only set if
3447
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
3448
- # experimental feature and the accuracy of the time offset can vary.
3344
+ # Time offset relative to the beginning of the audio, and corresponding to the
3345
+ # end of the spoken word. This field is only set if `enable_word_time_offsets=
3346
+ # true` and only in the top hypothesis. This is an experimental feature and the
3347
+ # accuracy of the time offset can vary.
3449
3348
  # Corresponds to the JSON property `endTime`
3450
3349
  # @return [String]
3451
3350
  attr_accessor :end_time
3452
3351
 
3453
- # Output only. A distinct integer value is assigned for every speaker within
3454
- # the audio. This field specifies which one of those speakers was detected to
3455
- # have spoken this word. Value ranges from 1 up to diarization_speaker_count,
3456
- # and is only set if speaker diarization is enabled.
3352
+ # Output only. A distinct integer value is assigned for every speaker within the
3353
+ # audio. This field specifies which one of those speakers was detected to have
3354
+ # spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
3355
+ # only set if speaker diarization is enabled.
3457
3356
  # Corresponds to the JSON property `speakerTag`
3458
3357
  # @return [Fixnum]
3459
3358
  attr_accessor :speaker_tag
3460
3359
 
3461
- # Time offset relative to the beginning of the audio, and
3462
- # corresponding to the start of the spoken word. This field is only set if
3463
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
3464
- # experimental feature and the accuracy of the time offset can vary.
3360
+ # Time offset relative to the beginning of the audio, and corresponding to the
3361
+ # start of the spoken word. This field is only set if `enable_word_time_offsets=
3362
+ # true` and only in the top hypothesis. This is an experimental feature and the
3363
+ # accuracy of the time offset can vary.
3465
3364
  # Corresponds to the JSON property `startTime`
3466
3365
  # @return [String]
3467
3366
  attr_accessor :start_time
@@ -3485,9 +3384,9 @@ module Google
3485
3384
  end
3486
3385
  end
3487
3386
 
3488
- # Video annotation progress. Included in the `metadata`
3489
- # field of the `Operation` returned by the `GetOperation`
3490
- # call of the `google::longrunning::Operations` service.
3387
+ # Video annotation progress. Included in the `metadata` field of the `Operation`
3388
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
3389
+ # service.
3491
3390
  class GoogleCloudVideointelligenceV1p2beta1AnnotateVideoProgress
3492
3391
  include Google::Apis::Core::Hashable
3493
3392
 
@@ -3506,9 +3405,9 @@ module Google
3506
3405
  end
3507
3406
  end
3508
3407
 
3509
- # Video annotation response. Included in the `response`
3510
- # field of the `Operation` returned by the `GetOperation`
3511
- # call of the `google::longrunning::Operations` service.
3408
+ # Video annotation response. Included in the `response` field of the `Operation`
3409
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
3410
+ # service.
3512
3411
  class GoogleCloudVideointelligenceV1p2beta1AnnotateVideoResponse
3513
3412
  include Google::Apis::Core::Hashable
3514
3413
 
@@ -3536,14 +3435,14 @@ module Google
3536
3435
  # @return [Float]
3537
3436
  attr_accessor :confidence
3538
3437
 
3539
- # The name of the attribute, for example, glasses, dark_glasses, mouth_open.
3540
- # A full list of supported type names will be provided in the document.
3438
+ # The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
3439
+ # full list of supported type names will be provided in the document.
3541
3440
  # Corresponds to the JSON property `name`
3542
3441
  # @return [String]
3543
3442
  attr_accessor :name
3544
3443
 
3545
- # Text value of the detection result. For example, the value for "HairColor"
3546
- # can be "black", "blonde", etc.
3444
+ # Text value of the detection result. For example, the value for "HairColor" can
3445
+ # be "black", "blonde", etc.
3547
3446
  # Corresponds to the JSON property `value`
3548
3447
  # @return [String]
3549
3448
  attr_accessor :value
@@ -3575,9 +3474,8 @@ module Google
3575
3474
  # @return [String]
3576
3475
  attr_accessor :name
3577
3476
 
3578
- # A vertex represents a 2D point in the image.
3579
- # NOTE: the normalized vertex coordinates are relative to the original image
3580
- # and range from 0 to 1.
3477
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
3478
+ # coordinates are relative to the original image and range from 0 to 1.
3581
3479
  # Corresponds to the JSON property `point`
3582
3480
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1NormalizedVertex]
3583
3481
  attr_accessor :point
@@ -3603,8 +3501,7 @@ module Google
3603
3501
  # @return [String]
3604
3502
  attr_accessor :description
3605
3503
 
3606
- # Opaque entity ID. Some IDs may be available in
3607
- # [Google Knowledge Graph Search
3504
+ # Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
3608
3505
  # API](https://developers.google.com/knowledge-graph/).
3609
3506
  # Corresponds to the JSON property `entityId`
3610
3507
  # @return [String]
@@ -3627,9 +3524,9 @@ module Google
3627
3524
  end
3628
3525
  end
3629
3526
 
3630
- # Explicit content annotation (based on per-frame visual signals only).
3631
- # If no explicit content has been detected in a frame, no annotations are
3632
- # present for that frame.
3527
+ # Explicit content annotation (based on per-frame visual signals only). If no
3528
+ # explicit content has been detected in a frame, no annotations are present for
3529
+ # that frame.
3633
3530
  class GoogleCloudVideointelligenceV1p2beta1ExplicitContentAnnotation
3634
3531
  include Google::Apis::Core::Hashable
3635
3532
 
@@ -3684,10 +3581,9 @@ module Google
3684
3581
  class GoogleCloudVideointelligenceV1p2beta1LabelAnnotation
3685
3582
  include Google::Apis::Core::Hashable
3686
3583
 
3687
- # Common categories for the detected entity.
3688
- # For example, when the label is `Terrier`, the category is likely `dog`. And
3689
- # in some cases there might be more than one categories e.g., `Terrier` could
3690
- # also be a `pet`.
3584
+ # Common categories for the detected entity. For example, when the label is `
3585
+ # Terrier`, the category is likely `dog`. And in some cases there might be more
3586
+ # than one categories e.g., `Terrier` could also be a `pet`.
3691
3587
  # Corresponds to the JSON property `categoryEntities`
3692
3588
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1Entity>]
3693
3589
  attr_accessor :category_entities
@@ -3786,14 +3682,14 @@ module Google
3786
3682
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1Entity]
3787
3683
  attr_accessor :entity
3788
3684
 
3789
- # All video segments where the recognized logo appears. There might be
3790
- # multiple instances of the same logo class appearing in one VideoSegment.
3685
+ # All video segments where the recognized logo appears. There might be multiple
3686
+ # instances of the same logo class appearing in one VideoSegment.
3791
3687
  # Corresponds to the JSON property `segments`
3792
3688
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1VideoSegment>]
3793
3689
  attr_accessor :segments
3794
3690
 
3795
- # All logo tracks where the recognized logo appears. Each track corresponds
3796
- # to one logo instance appearing in consecutive frames.
3691
+ # All logo tracks where the recognized logo appears. Each track corresponds to
3692
+ # one logo instance appearing in consecutive frames.
3797
3693
  # Corresponds to the JSON property `tracks`
3798
3694
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1Track>]
3799
3695
  attr_accessor :tracks
@@ -3810,9 +3706,8 @@ module Google
3810
3706
  end
3811
3707
  end
3812
3708
 
3813
- # Normalized bounding box.
3814
- # The normalized vertex coordinates are relative to the original image.
3815
- # Range: [0, 1].
3709
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
3710
+ # original image. Range: [0, 1].
3816
3711
  class GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox
3817
3712
  include Google::Apis::Core::Hashable
3818
3713
 
@@ -3850,20 +3745,12 @@ module Google
3850
3745
  end
3851
3746
 
3852
3747
  # Normalized bounding polygon for text (that might not be aligned with axis).
3853
- # Contains list of the corner points in clockwise order starting from
3854
- # top-left corner. For example, for a rectangular bounding box:
3855
- # When the text is horizontal it might look like:
3856
- # 0----1
3857
- # | |
3858
- # 3----2
3859
- # When it's clockwise rotated 180 degrees around the top-left corner it
3860
- # becomes:
3861
- # 2----3
3862
- # | |
3863
- # 1----0
3864
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
3865
- # than 0, or greater than 1 due to trignometric calculations for location of
3866
- # the box.
3748
+ # Contains list of the corner points in clockwise order starting from top-left
3749
+ # corner. For example, for a rectangular bounding box: When the text is
3750
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
3751
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
3752
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
3753
+ # or greater than 1 due to trignometric calculations for location of the box.
3867
3754
  class GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingPoly
3868
3755
  include Google::Apis::Core::Hashable
3869
3756
 
@@ -3882,9 +3769,8 @@ module Google
3882
3769
  end
3883
3770
  end
3884
3771
 
3885
- # A vertex represents a 2D point in the image.
3886
- # NOTE: the normalized vertex coordinates are relative to the original image
3887
- # and range from 0 to 1.
3772
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
3773
+ # coordinates are relative to the original image and range from 0 to 1.
3888
3774
  class GoogleCloudVideointelligenceV1p2beta1NormalizedVertex
3889
3775
  include Google::Apis::Core::Hashable
3890
3776
 
@@ -3923,10 +3809,10 @@ module Google
3923
3809
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1Entity]
3924
3810
  attr_accessor :entity
3925
3811
 
3926
- # Information corresponding to all frames where this object track appears.
3927
- # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
3928
- # messages in frames.
3929
- # Streaming mode: it can only be one ObjectTrackingFrame message in frames.
3812
+ # Information corresponding to all frames where this object track appears. Non-
3813
+ # streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
3814
+ # in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
3815
+ # frames.
3930
3816
  # Corresponds to the JSON property `frames`
3931
3817
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1ObjectTrackingFrame>]
3932
3818
  attr_accessor :frames
@@ -3936,12 +3822,11 @@ module Google
3936
3822
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1VideoSegment]
3937
3823
  attr_accessor :segment
3938
3824
 
3939
- # Streaming mode ONLY.
3940
- # In streaming mode, we do not know the end time of a tracked object
3941
- # before it is completed. Hence, there is no VideoSegment info returned.
3942
- # Instead, we provide a unique identifiable integer track_id so that
3943
- # the customers can correlate the results of the ongoing
3944
- # ObjectTrackAnnotation of the same track_id over time.
3825
+ # Streaming mode ONLY. In streaming mode, we do not know the end time of a
3826
+ # tracked object before it is completed. Hence, there is no VideoSegment info
3827
+ # returned. Instead, we provide a unique identifiable integer track_id so that
3828
+ # the customers can correlate the results of the ongoing ObjectTrackAnnotation
3829
+ # of the same track_id over time.
3945
3830
  # Corresponds to the JSON property `trackId`
3946
3831
  # @return [Fixnum]
3947
3832
  attr_accessor :track_id
@@ -3971,9 +3856,8 @@ module Google
3971
3856
  class GoogleCloudVideointelligenceV1p2beta1ObjectTrackingFrame
3972
3857
  include Google::Apis::Core::Hashable
3973
3858
 
3974
- # Normalized bounding box.
3975
- # The normalized vertex coordinates are relative to the original image.
3976
- # Range: [0, 1].
3859
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
3860
+ # original image. Range: [0, 1].
3977
3861
  # Corresponds to the JSON property `normalizedBoundingBox`
3978
3862
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox]
3979
3863
  attr_accessor :normalized_bounding_box
@@ -4000,10 +3884,10 @@ module Google
4000
3884
 
4001
3885
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
4002
3886
  # indicates an estimated greater likelihood that the recognized words are
4003
- # correct. This field is set only for the top alternative.
4004
- # This field is not guaranteed to be accurate and users should not rely on it
4005
- # to be always provided.
4006
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
3887
+ # correct. This field is set only for the top alternative. This field is not
3888
+ # guaranteed to be accurate and users should not rely on it to be always
3889
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
3890
+ # not set.
4007
3891
  # Corresponds to the JSON property `confidence`
4008
3892
  # @return [Float]
4009
3893
  attr_accessor :confidence
@@ -4014,8 +3898,8 @@ module Google
4014
3898
  attr_accessor :transcript
4015
3899
 
4016
3900
  # Output only. A list of word-specific information for each recognized word.
4017
- # Note: When `enable_speaker_diarization` is set to true, you will see all
4018
- # the words from the beginning of the audio.
3901
+ # Note: When `enable_speaker_diarization` is set to true, you will see all the
3902
+ # words from the beginning of the audio.
4019
3903
  # Corresponds to the JSON property `words`
4020
3904
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1WordInfo>]
4021
3905
  attr_accessor :words
@@ -4036,18 +3920,17 @@ module Google
4036
3920
  class GoogleCloudVideointelligenceV1p2beta1SpeechTranscription
4037
3921
  include Google::Apis::Core::Hashable
4038
3922
 
4039
- # May contain one or more recognition hypotheses (up to the maximum specified
4040
- # in `max_alternatives`). These alternatives are ordered in terms of
4041
- # accuracy, with the top (first) alternative being the most probable, as
4042
- # ranked by the recognizer.
3923
+ # May contain one or more recognition hypotheses (up to the maximum specified in
3924
+ # `max_alternatives`). These alternatives are ordered in terms of accuracy, with
3925
+ # the top (first) alternative being the most probable, as ranked by the
3926
+ # recognizer.
4043
3927
  # Corresponds to the JSON property `alternatives`
4044
3928
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1SpeechRecognitionAlternative>]
4045
3929
  attr_accessor :alternatives
4046
3930
 
4047
3931
  # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
4048
- # language tag of
4049
- # the language in this result. This language code was detected to have the
4050
- # most likelihood of being spoken in the audio.
3932
+ # language tag of the language in this result. This language code was detected
3933
+ # to have the most likelihood of being spoken in the audio.
4051
3934
  # Corresponds to the JSON property `languageCode`
4052
3935
  # @return [String]
4053
3936
  attr_accessor :language_code
@@ -4096,27 +3979,19 @@ module Google
4096
3979
  end
4097
3980
  end
4098
3981
 
4099
- # Video frame level annotation results for text annotation (OCR).
4100
- # Contains information regarding timestamp and bounding box locations for the
4101
- # frames containing detected OCR text snippets.
3982
+ # Video frame level annotation results for text annotation (OCR). Contains
3983
+ # information regarding timestamp and bounding box locations for the frames
3984
+ # containing detected OCR text snippets.
4102
3985
  class GoogleCloudVideointelligenceV1p2beta1TextFrame
4103
3986
  include Google::Apis::Core::Hashable
4104
3987
 
4105
3988
  # Normalized bounding polygon for text (that might not be aligned with axis).
4106
- # Contains list of the corner points in clockwise order starting from
4107
- # top-left corner. For example, for a rectangular bounding box:
4108
- # When the text is horizontal it might look like:
4109
- # 0----1
4110
- # | |
4111
- # 3----2
4112
- # When it's clockwise rotated 180 degrees around the top-left corner it
4113
- # becomes:
4114
- # 2----3
4115
- # | |
4116
- # 1----0
4117
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
4118
- # than 0, or greater than 1 due to trignometric calculations for location of
4119
- # the box.
3989
+ # Contains list of the corner points in clockwise order starting from top-left
3990
+ # corner. For example, for a rectangular bounding box: When the text is
3991
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
3992
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
3993
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
3994
+ # or greater than 1 due to trignometric calculations for location of the box.
4120
3995
  # Corresponds to the JSON property `rotatedBoundingBox`
4121
3996
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingPoly]
4122
3997
  attr_accessor :rotated_bounding_box
@@ -4169,9 +4044,8 @@ module Google
4169
4044
  end
4170
4045
  end
4171
4046
 
4172
- # For tracking related features.
4173
- # An object at time_offset with attributes, and located with
4174
- # normalized_bounding_box.
4047
+ # For tracking related features. An object at time_offset with attributes, and
4048
+ # located with normalized_bounding_box.
4175
4049
  class GoogleCloudVideointelligenceV1p2beta1TimestampedObject
4176
4050
  include Google::Apis::Core::Hashable
4177
4051
 
@@ -4185,15 +4059,14 @@ module Google
4185
4059
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1DetectedLandmark>]
4186
4060
  attr_accessor :landmarks
4187
4061
 
4188
- # Normalized bounding box.
4189
- # The normalized vertex coordinates are relative to the original image.
4190
- # Range: [0, 1].
4062
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
4063
+ # original image. Range: [0, 1].
4191
4064
  # Corresponds to the JSON property `normalizedBoundingBox`
4192
4065
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox]
4193
4066
  attr_accessor :normalized_bounding_box
4194
4067
 
4195
- # Time-offset, relative to the beginning of the video,
4196
- # corresponding to the video frame for this object.
4068
+ # Time-offset, relative to the beginning of the video, corresponding to the
4069
+ # video frame for this object.
4197
4070
  # Corresponds to the JSON property `timeOffset`
4198
4071
  # @return [String]
4199
4072
  attr_accessor :time_offset
@@ -4252,20 +4125,19 @@ module Google
4252
4125
  class GoogleCloudVideointelligenceV1p2beta1VideoAnnotationProgress
4253
4126
  include Google::Apis::Core::Hashable
4254
4127
 
4255
- # Specifies which feature is being tracked if the request contains more than
4256
- # one feature.
4128
+ # Specifies which feature is being tracked if the request contains more than one
4129
+ # feature.
4257
4130
  # Corresponds to the JSON property `feature`
4258
4131
  # @return [String]
4259
4132
  attr_accessor :feature
4260
4133
 
4261
- # Video file location in
4262
- # [Cloud Storage](https://cloud.google.com/storage/).
4134
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
4263
4135
  # Corresponds to the JSON property `inputUri`
4264
4136
  # @return [String]
4265
4137
  attr_accessor :input_uri
4266
4138
 
4267
- # Approximate percentage processed thus far. Guaranteed to be
4268
- # 100 when fully processed.
4139
+ # Approximate percentage processed thus far. Guaranteed to be 100 when fully
4140
+ # processed.
4269
4141
  # Corresponds to the JSON property `progressPercent`
4270
4142
  # @return [Fixnum]
4271
4143
  attr_accessor :progress_percent
@@ -4304,31 +4176,30 @@ module Google
4304
4176
  class GoogleCloudVideointelligenceV1p2beta1VideoAnnotationResults
4305
4177
  include Google::Apis::Core::Hashable
4306
4178
 
4307
- # The `Status` type defines a logical error model that is suitable for
4308
- # different programming environments, including REST APIs and RPC APIs. It is
4309
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
4310
- # three pieces of data: error code, error message, and error details.
4311
- # You can find out more about this error model and how to work with it in the
4312
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
4179
+ # The `Status` type defines a logical error model that is suitable for different
4180
+ # programming environments, including REST APIs and RPC APIs. It is used by [
4181
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
4182
+ # data: error code, error message, and error details. You can find out more
4183
+ # about this error model and how to work with it in the [API Design Guide](https:
4184
+ # //cloud.google.com/apis/design/errors).
4313
4185
  # Corresponds to the JSON property `error`
4314
4186
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleRpcStatus]
4315
4187
  attr_accessor :error
4316
4188
 
4317
- # Explicit content annotation (based on per-frame visual signals only).
4318
- # If no explicit content has been detected in a frame, no annotations are
4319
- # present for that frame.
4189
+ # Explicit content annotation (based on per-frame visual signals only). If no
4190
+ # explicit content has been detected in a frame, no annotations are present for
4191
+ # that frame.
4320
4192
  # Corresponds to the JSON property `explicitAnnotation`
4321
4193
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1ExplicitContentAnnotation]
4322
4194
  attr_accessor :explicit_annotation
4323
4195
 
4324
- # Label annotations on frame level.
4325
- # There is exactly one element for each unique label.
4196
+ # Label annotations on frame level. There is exactly one element for each unique
4197
+ # label.
4326
4198
  # Corresponds to the JSON property `frameLabelAnnotations`
4327
4199
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
4328
4200
  attr_accessor :frame_label_annotations
4329
4201
 
4330
- # Video file location in
4331
- # [Cloud Storage](https://cloud.google.com/storage/).
4202
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
4332
4203
  # Corresponds to the JSON property `inputUri`
4333
4204
  # @return [String]
4334
4205
  attr_accessor :input_uri
@@ -4355,11 +4226,11 @@ module Google
4355
4226
  attr_accessor :segment_label_annotations
4356
4227
 
4357
4228
  # Presence label annotations on video level or user-specified segment level.
4358
- # There is exactly one element for each unique label. Compared to the
4359
- # existing topical `segment_label_annotations`, this field presents more
4360
- # fine-grained, segment-level labels detected in video content and is made
4361
- # available only when the client sets `LabelDetectionConfig.model` to
4362
- # "builtin/latest" in the request.
4229
+ # There is exactly one element for each unique label. Compared to the existing
4230
+ # topical `segment_label_annotations`, this field presents more fine-grained,
4231
+ # segment-level labels detected in video content and is made available only when
4232
+ # the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
4233
+ # request.
4363
4234
  # Corresponds to the JSON property `segmentPresenceLabelAnnotations`
4364
4235
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
4365
4236
  attr_accessor :segment_presence_label_annotations
@@ -4369,17 +4240,17 @@ module Google
4369
4240
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1VideoSegment>]
4370
4241
  attr_accessor :shot_annotations
4371
4242
 
4372
- # Topical label annotations on shot level.
4373
- # There is exactly one element for each unique label.
4243
+ # Topical label annotations on shot level. There is exactly one element for each
4244
+ # unique label.
4374
4245
  # Corresponds to the JSON property `shotLabelAnnotations`
4375
4246
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
4376
4247
  attr_accessor :shot_label_annotations
4377
4248
 
4378
4249
  # Presence label annotations on shot level. There is exactly one element for
4379
- # each unique label. Compared to the existing topical
4380
- # `shot_label_annotations`, this field presents more fine-grained, shot-level
4381
- # labels detected in video content and is made available only when the client
4382
- # sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
4250
+ # each unique label. Compared to the existing topical `shot_label_annotations`,
4251
+ # this field presents more fine-grained, shot-level labels detected in video
4252
+ # content and is made available only when the client sets `LabelDetectionConfig.
4253
+ # model` to "builtin/latest" in the request.
4383
4254
  # Corresponds to the JSON property `shotPresenceLabelAnnotations`
4384
4255
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
4385
4256
  attr_accessor :shot_presence_label_annotations
@@ -4389,9 +4260,8 @@ module Google
4389
4260
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1SpeechTranscription>]
4390
4261
  attr_accessor :speech_transcriptions
4391
4262
 
4392
- # OCR text detection and tracking.
4393
- # Annotations for list of detected text snippets. Each will have list of
4394
- # frame information associated with it.
4263
+ # OCR text detection and tracking. Annotations for list of detected text
4264
+ # snippets. Each will have list of frame information associated with it.
4395
4265
  # Corresponds to the JSON property `textAnnotations`
4396
4266
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1TextAnnotation>]
4397
4267
  attr_accessor :text_annotations
@@ -4423,14 +4293,14 @@ module Google
4423
4293
  class GoogleCloudVideointelligenceV1p2beta1VideoSegment
4424
4294
  include Google::Apis::Core::Hashable
4425
4295
 
4426
- # Time-offset, relative to the beginning of the video,
4427
- # corresponding to the end of the segment (inclusive).
4296
+ # Time-offset, relative to the beginning of the video, corresponding to the end
4297
+ # of the segment (inclusive).
4428
4298
  # Corresponds to the JSON property `endTimeOffset`
4429
4299
  # @return [String]
4430
4300
  attr_accessor :end_time_offset
4431
4301
 
4432
- # Time-offset, relative to the beginning of the video,
4433
- # corresponding to the start of the segment (inclusive).
4302
+ # Time-offset, relative to the beginning of the video, corresponding to the
4303
+ # start of the segment (inclusive).
4434
4304
  # Corresponds to the JSON property `startTimeOffset`
4435
4305
  # @return [String]
4436
4306
  attr_accessor :start_time_offset
@@ -4447,41 +4317,41 @@ module Google
4447
4317
  end
4448
4318
 
4449
4319
  # Word-specific information for recognized words. Word information is only
4450
- # included in the response when certain request parameters are set, such
4451
- # as `enable_word_time_offsets`.
4320
+ # included in the response when certain request parameters are set, such as `
4321
+ # enable_word_time_offsets`.
4452
4322
  class GoogleCloudVideointelligenceV1p2beta1WordInfo
4453
4323
  include Google::Apis::Core::Hashable
4454
4324
 
4455
4325
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
4456
4326
  # indicates an estimated greater likelihood that the recognized words are
4457
- # correct. This field is set only for the top alternative.
4458
- # This field is not guaranteed to be accurate and users should not rely on it
4459
- # to be always provided.
4460
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
4327
+ # correct. This field is set only for the top alternative. This field is not
4328
+ # guaranteed to be accurate and users should not rely on it to be always
4329
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
4330
+ # not set.
4461
4331
  # Corresponds to the JSON property `confidence`
4462
4332
  # @return [Float]
4463
4333
  attr_accessor :confidence
4464
4334
 
4465
- # Time offset relative to the beginning of the audio, and
4466
- # corresponding to the end of the spoken word. This field is only set if
4467
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
4468
- # experimental feature and the accuracy of the time offset can vary.
4335
+ # Time offset relative to the beginning of the audio, and corresponding to the
4336
+ # end of the spoken word. This field is only set if `enable_word_time_offsets=
4337
+ # true` and only in the top hypothesis. This is an experimental feature and the
4338
+ # accuracy of the time offset can vary.
4469
4339
  # Corresponds to the JSON property `endTime`
4470
4340
  # @return [String]
4471
4341
  attr_accessor :end_time
4472
4342
 
4473
- # Output only. A distinct integer value is assigned for every speaker within
4474
- # the audio. This field specifies which one of those speakers was detected to
4475
- # have spoken this word. Value ranges from 1 up to diarization_speaker_count,
4476
- # and is only set if speaker diarization is enabled.
4343
+ # Output only. A distinct integer value is assigned for every speaker within the
4344
+ # audio. This field specifies which one of those speakers was detected to have
4345
+ # spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
4346
+ # only set if speaker diarization is enabled.
4477
4347
  # Corresponds to the JSON property `speakerTag`
4478
4348
  # @return [Fixnum]
4479
4349
  attr_accessor :speaker_tag
4480
4350
 
4481
- # Time offset relative to the beginning of the audio, and
4482
- # corresponding to the start of the spoken word. This field is only set if
4483
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
4484
- # experimental feature and the accuracy of the time offset can vary.
4351
+ # Time offset relative to the beginning of the audio, and corresponding to the
4352
+ # start of the spoken word. This field is only set if `enable_word_time_offsets=
4353
+ # true` and only in the top hypothesis. This is an experimental feature and the
4354
+ # accuracy of the time offset can vary.
4485
4355
  # Corresponds to the JSON property `startTime`
4486
4356
  # @return [String]
4487
4357
  attr_accessor :start_time
@@ -4505,9 +4375,9 @@ module Google
4505
4375
  end
4506
4376
  end
4507
4377
 
4508
- # Video annotation progress. Included in the `metadata`
4509
- # field of the `Operation` returned by the `GetOperation`
4510
- # call of the `google::longrunning::Operations` service.
4378
+ # Video annotation progress. Included in the `metadata` field of the `Operation`
4379
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
4380
+ # service.
4511
4381
  class GoogleCloudVideointelligenceV1p3beta1AnnotateVideoProgress
4512
4382
  include Google::Apis::Core::Hashable
4513
4383
 
@@ -4526,9 +4396,9 @@ module Google
4526
4396
  end
4527
4397
  end
4528
4398
 
4529
- # Video annotation response. Included in the `response`
4530
- # field of the `Operation` returned by the `GetOperation`
4531
- # call of the `google::longrunning::Operations` service.
4399
+ # Video annotation response. Included in the `response` field of the `Operation`
4400
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
4401
+ # service.
4532
4402
  class GoogleCloudVideointelligenceV1p3beta1AnnotateVideoResponse
4533
4403
  include Google::Apis::Core::Hashable
4534
4404
 
@@ -4562,10 +4432,9 @@ module Google
4562
4432
  # @return [String]
4563
4433
  attr_accessor :display_name
4564
4434
 
4565
- # The resource name of the celebrity. Have the format
4566
- # `video-intelligence/kg-mid` indicates a celebrity from preloaded gallery.
4567
- # kg-mid is the id in Google knowledge graph, which is unique for the
4568
- # celebrity.
4435
+ # The resource name of the celebrity. Have the format `video-intelligence/kg-mid`
4436
+ # indicates a celebrity from preloaded gallery. kg-mid is the id in Google
4437
+ # knowledge graph, which is unique for the celebrity.
4569
4438
  # Corresponds to the JSON property `name`
4570
4439
  # @return [String]
4571
4440
  attr_accessor :name
@@ -4586,8 +4455,8 @@ module Google
4586
4455
  class GoogleCloudVideointelligenceV1p3beta1CelebrityRecognitionAnnotation
4587
4456
  include Google::Apis::Core::Hashable
4588
4457
 
4589
- # The tracks detected from the input video, including recognized celebrities
4590
- # and other detected faces in the video.
4458
+ # The tracks detected from the input video, including recognized celebrities and
4459
+ # other detected faces in the video.
4591
4460
  # Corresponds to the JSON property `celebrityTracks`
4592
4461
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1CelebrityTrack>]
4593
4462
  attr_accessor :celebrity_tracks
@@ -4643,14 +4512,14 @@ module Google
4643
4512
  # @return [Float]
4644
4513
  attr_accessor :confidence
4645
4514
 
4646
- # The name of the attribute, for example, glasses, dark_glasses, mouth_open.
4647
- # A full list of supported type names will be provided in the document.
4515
+ # The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
4516
+ # full list of supported type names will be provided in the document.
4648
4517
  # Corresponds to the JSON property `name`
4649
4518
  # @return [String]
4650
4519
  attr_accessor :name
4651
4520
 
4652
- # Text value of the detection result. For example, the value for "HairColor"
4653
- # can be "black", "blonde", etc.
4521
+ # Text value of the detection result. For example, the value for "HairColor" can
4522
+ # be "black", "blonde", etc.
4654
4523
  # Corresponds to the JSON property `value`
4655
4524
  # @return [String]
4656
4525
  attr_accessor :value
@@ -4682,9 +4551,8 @@ module Google
4682
4551
  # @return [String]
4683
4552
  attr_accessor :name
4684
4553
 
4685
- # A vertex represents a 2D point in the image.
4686
- # NOTE: the normalized vertex coordinates are relative to the original image
4687
- # and range from 0 to 1.
4554
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
4555
+ # coordinates are relative to the original image and range from 0 to 1.
4688
4556
  # Corresponds to the JSON property `point`
4689
4557
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1NormalizedVertex]
4690
4558
  attr_accessor :point
@@ -4710,8 +4578,7 @@ module Google
4710
4578
  # @return [String]
4711
4579
  attr_accessor :description
4712
4580
 
4713
- # Opaque entity ID. Some IDs may be available in
4714
- # [Google Knowledge Graph Search
4581
+ # Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
4715
4582
  # API](https://developers.google.com/knowledge-graph/).
4716
4583
  # Corresponds to the JSON property `entityId`
4717
4584
  # @return [String]
@@ -4734,9 +4601,9 @@ module Google
4734
4601
  end
4735
4602
  end
4736
4603
 
4737
- # Explicit content annotation (based on per-frame visual signals only).
4738
- # If no explicit content has been detected in a frame, no annotations are
4739
- # present for that frame.
4604
+ # Explicit content annotation (based on per-frame visual signals only). If no
4605
+ # explicit content has been detected in a frame, no annotations are present for
4606
+ # that frame.
4740
4607
  class GoogleCloudVideointelligenceV1p3beta1ExplicitContentAnnotation
4741
4608
  include Google::Apis::Core::Hashable
4742
4609
 
@@ -4823,10 +4690,9 @@ module Google
4823
4690
  class GoogleCloudVideointelligenceV1p3beta1LabelAnnotation
4824
4691
  include Google::Apis::Core::Hashable
4825
4692
 
4826
- # Common categories for the detected entity.
4827
- # For example, when the label is `Terrier`, the category is likely `dog`. And
4828
- # in some cases there might be more than one categories e.g., `Terrier` could
4829
- # also be a `pet`.
4693
+ # Common categories for the detected entity. For example, when the label is `
4694
+ # Terrier`, the category is likely `dog`. And in some cases there might be more
4695
+ # than one categories e.g., `Terrier` could also be a `pet`.
4830
4696
  # Corresponds to the JSON property `categoryEntities`
4831
4697
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1Entity>]
4832
4698
  attr_accessor :category_entities
@@ -4925,14 +4791,14 @@ module Google
4925
4791
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1Entity]
4926
4792
  attr_accessor :entity
4927
4793
 
4928
- # All video segments where the recognized logo appears. There might be
4929
- # multiple instances of the same logo class appearing in one VideoSegment.
4794
+ # All video segments where the recognized logo appears. There might be multiple
4795
+ # instances of the same logo class appearing in one VideoSegment.
4930
4796
  # Corresponds to the JSON property `segments`
4931
4797
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1VideoSegment>]
4932
4798
  attr_accessor :segments
4933
4799
 
4934
- # All logo tracks where the recognized logo appears. Each track corresponds
4935
- # to one logo instance appearing in consecutive frames.
4800
+ # All logo tracks where the recognized logo appears. Each track corresponds to
4801
+ # one logo instance appearing in consecutive frames.
4936
4802
  # Corresponds to the JSON property `tracks`
4937
4803
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1Track>]
4938
4804
  attr_accessor :tracks
@@ -4949,9 +4815,8 @@ module Google
4949
4815
  end
4950
4816
  end
4951
4817
 
4952
- # Normalized bounding box.
4953
- # The normalized vertex coordinates are relative to the original image.
4954
- # Range: [0, 1].
4818
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
4819
+ # original image. Range: [0, 1].
4955
4820
  class GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox
4956
4821
  include Google::Apis::Core::Hashable
4957
4822
 
@@ -4989,20 +4854,12 @@ module Google
4989
4854
  end
4990
4855
 
4991
4856
  # Normalized bounding polygon for text (that might not be aligned with axis).
4992
- # Contains list of the corner points in clockwise order starting from
4993
- # top-left corner. For example, for a rectangular bounding box:
4994
- # When the text is horizontal it might look like:
4995
- # 0----1
4996
- # | |
4997
- # 3----2
4998
- # When it's clockwise rotated 180 degrees around the top-left corner it
4999
- # becomes:
5000
- # 2----3
5001
- # | |
5002
- # 1----0
5003
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
5004
- # than 0, or greater than 1 due to trignometric calculations for location of
5005
- # the box.
4857
+ # Contains list of the corner points in clockwise order starting from top-left
4858
+ # corner. For example, for a rectangular bounding box: When the text is
4859
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
4860
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
4861
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
4862
+ # or greater than 1 due to trignometric calculations for location of the box.
5006
4863
  class GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingPoly
5007
4864
  include Google::Apis::Core::Hashable
5008
4865
 
@@ -5021,9 +4878,8 @@ module Google
5021
4878
  end
5022
4879
  end
5023
4880
 
5024
- # A vertex represents a 2D point in the image.
5025
- # NOTE: the normalized vertex coordinates are relative to the original image
5026
- # and range from 0 to 1.
4881
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
4882
+ # coordinates are relative to the original image and range from 0 to 1.
5027
4883
  class GoogleCloudVideointelligenceV1p3beta1NormalizedVertex
5028
4884
  include Google::Apis::Core::Hashable
5029
4885
 
@@ -5062,10 +4918,10 @@ module Google
5062
4918
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1Entity]
5063
4919
  attr_accessor :entity
5064
4920
 
5065
- # Information corresponding to all frames where this object track appears.
5066
- # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
5067
- # messages in frames.
5068
- # Streaming mode: it can only be one ObjectTrackingFrame message in frames.
4921
+ # Information corresponding to all frames where this object track appears. Non-
4922
+ # streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
4923
+ # in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
4924
+ # frames.
5069
4925
  # Corresponds to the JSON property `frames`
5070
4926
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1ObjectTrackingFrame>]
5071
4927
  attr_accessor :frames
@@ -5075,12 +4931,11 @@ module Google
5075
4931
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1VideoSegment]
5076
4932
  attr_accessor :segment
5077
4933
 
5078
- # Streaming mode ONLY.
5079
- # In streaming mode, we do not know the end time of a tracked object
5080
- # before it is completed. Hence, there is no VideoSegment info returned.
5081
- # Instead, we provide a unique identifiable integer track_id so that
5082
- # the customers can correlate the results of the ongoing
5083
- # ObjectTrackAnnotation of the same track_id over time.
4934
+ # Streaming mode ONLY. In streaming mode, we do not know the end time of a
4935
+ # tracked object before it is completed. Hence, there is no VideoSegment info
4936
+ # returned. Instead, we provide a unique identifiable integer track_id so that
4937
+ # the customers can correlate the results of the ongoing ObjectTrackAnnotation
4938
+ # of the same track_id over time.
5084
4939
  # Corresponds to the JSON property `trackId`
5085
4940
  # @return [Fixnum]
5086
4941
  attr_accessor :track_id
@@ -5110,9 +4965,8 @@ module Google
5110
4965
  class GoogleCloudVideointelligenceV1p3beta1ObjectTrackingFrame
5111
4966
  include Google::Apis::Core::Hashable
5112
4967
 
5113
- # Normalized bounding box.
5114
- # The normalized vertex coordinates are relative to the original image.
5115
- # Range: [0, 1].
4968
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
4969
+ # original image. Range: [0, 1].
5116
4970
  # Corresponds to the JSON property `normalizedBoundingBox`
5117
4971
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox]
5118
4972
  attr_accessor :normalized_bounding_box
@@ -5189,10 +5043,10 @@ module Google
5189
5043
 
5190
5044
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
5191
5045
  # indicates an estimated greater likelihood that the recognized words are
5192
- # correct. This field is set only for the top alternative.
5193
- # This field is not guaranteed to be accurate and users should not rely on it
5194
- # to be always provided.
5195
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
5046
+ # correct. This field is set only for the top alternative. This field is not
5047
+ # guaranteed to be accurate and users should not rely on it to be always
5048
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
5049
+ # not set.
5196
5050
  # Corresponds to the JSON property `confidence`
5197
5051
  # @return [Float]
5198
5052
  attr_accessor :confidence
@@ -5203,8 +5057,8 @@ module Google
5203
5057
  attr_accessor :transcript
5204
5058
 
5205
5059
  # Output only. A list of word-specific information for each recognized word.
5206
- # Note: When `enable_speaker_diarization` is set to true, you will see all
5207
- # the words from the beginning of the audio.
5060
+ # Note: When `enable_speaker_diarization` is set to true, you will see all the
5061
+ # words from the beginning of the audio.
5208
5062
  # Corresponds to the JSON property `words`
5209
5063
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1WordInfo>]
5210
5064
  attr_accessor :words
@@ -5225,18 +5079,17 @@ module Google
5225
5079
  class GoogleCloudVideointelligenceV1p3beta1SpeechTranscription
5226
5080
  include Google::Apis::Core::Hashable
5227
5081
 
5228
- # May contain one or more recognition hypotheses (up to the maximum specified
5229
- # in `max_alternatives`). These alternatives are ordered in terms of
5230
- # accuracy, with the top (first) alternative being the most probable, as
5231
- # ranked by the recognizer.
5082
+ # May contain one or more recognition hypotheses (up to the maximum specified in
5083
+ # `max_alternatives`). These alternatives are ordered in terms of accuracy, with
5084
+ # the top (first) alternative being the most probable, as ranked by the
5085
+ # recognizer.
5232
5086
  # Corresponds to the JSON property `alternatives`
5233
5087
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1SpeechRecognitionAlternative>]
5234
5088
  attr_accessor :alternatives
5235
5089
 
5236
5090
  # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
5237
- # language tag of
5238
- # the language in this result. This language code was detected to have the
5239
- # most likelihood of being spoken in the audio.
5091
+ # language tag of the language in this result. This language code was detected
5092
+ # to have the most likelihood of being spoken in the audio.
5240
5093
  # Corresponds to the JSON property `languageCode`
5241
5094
  # @return [String]
5242
5095
  attr_accessor :language_code
@@ -5252,32 +5105,31 @@ module Google
5252
5105
  end
5253
5106
  end
5254
5107
 
5255
- # `StreamingAnnotateVideoResponse` is the only message returned to the client
5256
- # by `StreamingAnnotateVideo`. A series of zero or more
5257
- # `StreamingAnnotateVideoResponse` messages are streamed back to the client.
5108
+ # `StreamingAnnotateVideoResponse` is the only message returned to the client by
5109
+ # `StreamingAnnotateVideo`. A series of zero or more `
5110
+ # StreamingAnnotateVideoResponse` messages are streamed back to the client.
5258
5111
  class GoogleCloudVideointelligenceV1p3beta1StreamingAnnotateVideoResponse
5259
5112
  include Google::Apis::Core::Hashable
5260
5113
 
5261
- # Streaming annotation results corresponding to a portion of the video
5262
- # that is currently being processed.
5114
+ # Streaming annotation results corresponding to a portion of the video that is
5115
+ # currently being processed.
5263
5116
  # Corresponds to the JSON property `annotationResults`
5264
5117
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1StreamingVideoAnnotationResults]
5265
5118
  attr_accessor :annotation_results
5266
5119
 
5267
- # Google Cloud Storage URI that stores annotation results of one
5268
- # streaming session in JSON format.
5269
- # It is the annotation_result_storage_directory
5270
- # from the request followed by '/cloud_project_number-session_id'.
5120
+ # Google Cloud Storage URI that stores annotation results of one streaming
5121
+ # session in JSON format. It is the annotation_result_storage_directory from the
5122
+ # request followed by '/cloud_project_number-session_id'.
5271
5123
  # Corresponds to the JSON property `annotationResultsUri`
5272
5124
  # @return [String]
5273
5125
  attr_accessor :annotation_results_uri
5274
5126
 
5275
- # The `Status` type defines a logical error model that is suitable for
5276
- # different programming environments, including REST APIs and RPC APIs. It is
5277
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
5278
- # three pieces of data: error code, error message, and error details.
5279
- # You can find out more about this error model and how to work with it in the
5280
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
5127
+ # The `Status` type defines a logical error model that is suitable for different
5128
+ # programming environments, including REST APIs and RPC APIs. It is used by [
5129
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
5130
+ # data: error code, error message, and error details. You can find out more
5131
+ # about this error model and how to work with it in the [API Design Guide](https:
5132
+ # //cloud.google.com/apis/design/errors).
5281
5133
  # Corresponds to the JSON property `error`
5282
5134
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleRpcStatus]
5283
5135
  attr_accessor :error
@@ -5294,14 +5146,14 @@ module Google
5294
5146
  end
5295
5147
  end
5296
5148
 
5297
- # Streaming annotation results corresponding to a portion of the video
5298
- # that is currently being processed.
5149
+ # Streaming annotation results corresponding to a portion of the video that is
5150
+ # currently being processed.
5299
5151
  class GoogleCloudVideointelligenceV1p3beta1StreamingVideoAnnotationResults
5300
5152
  include Google::Apis::Core::Hashable
5301
5153
 
5302
- # Explicit content annotation (based on per-frame visual signals only).
5303
- # If no explicit content has been detected in a frame, no annotations are
5304
- # present for that frame.
5154
+ # Explicit content annotation (based on per-frame visual signals only). If no
5155
+ # explicit content has been detected in a frame, no annotations are present for
5156
+ # that frame.
5305
5157
  # Corresponds to the JSON property `explicitAnnotation`
5306
5158
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1ExplicitContentAnnotation]
5307
5159
  attr_accessor :explicit_annotation
@@ -5367,27 +5219,19 @@ module Google
5367
5219
  end
5368
5220
  end
5369
5221
 
5370
- # Video frame level annotation results for text annotation (OCR).
5371
- # Contains information regarding timestamp and bounding box locations for the
5372
- # frames containing detected OCR text snippets.
5222
+ # Video frame level annotation results for text annotation (OCR). Contains
5223
+ # information regarding timestamp and bounding box locations for the frames
5224
+ # containing detected OCR text snippets.
5373
5225
  class GoogleCloudVideointelligenceV1p3beta1TextFrame
5374
5226
  include Google::Apis::Core::Hashable
5375
5227
 
5376
5228
  # Normalized bounding polygon for text (that might not be aligned with axis).
5377
- # Contains list of the corner points in clockwise order starting from
5378
- # top-left corner. For example, for a rectangular bounding box:
5379
- # When the text is horizontal it might look like:
5380
- # 0----1
5381
- # | |
5382
- # 3----2
5383
- # When it's clockwise rotated 180 degrees around the top-left corner it
5384
- # becomes:
5385
- # 2----3
5386
- # | |
5387
- # 1----0
5388
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
5389
- # than 0, or greater than 1 due to trignometric calculations for location of
5390
- # the box.
5229
+ # Contains list of the corner points in clockwise order starting from top-left
5230
+ # corner. For example, for a rectangular bounding box: When the text is
5231
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
5232
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
5233
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
5234
+ # or greater than 1 due to trignometric calculations for location of the box.
5391
5235
  # Corresponds to the JSON property `rotatedBoundingBox`
5392
5236
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingPoly]
5393
5237
  attr_accessor :rotated_bounding_box
@@ -5440,9 +5284,8 @@ module Google
5440
5284
  end
5441
5285
  end
5442
5286
 
5443
- # For tracking related features.
5444
- # An object at time_offset with attributes, and located with
5445
- # normalized_bounding_box.
5287
+ # For tracking related features. An object at time_offset with attributes, and
5288
+ # located with normalized_bounding_box.
5446
5289
  class GoogleCloudVideointelligenceV1p3beta1TimestampedObject
5447
5290
  include Google::Apis::Core::Hashable
5448
5291
 
@@ -5456,15 +5299,14 @@ module Google
5456
5299
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1DetectedLandmark>]
5457
5300
  attr_accessor :landmarks
5458
5301
 
5459
- # Normalized bounding box.
5460
- # The normalized vertex coordinates are relative to the original image.
5461
- # Range: [0, 1].
5302
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
5303
+ # original image. Range: [0, 1].
5462
5304
  # Corresponds to the JSON property `normalizedBoundingBox`
5463
5305
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox]
5464
5306
  attr_accessor :normalized_bounding_box
5465
5307
 
5466
- # Time-offset, relative to the beginning of the video,
5467
- # corresponding to the video frame for this object.
5308
+ # Time-offset, relative to the beginning of the video, corresponding to the
5309
+ # video frame for this object.
5468
5310
  # Corresponds to the JSON property `timeOffset`
5469
5311
  # @return [String]
5470
5312
  attr_accessor :time_offset
@@ -5523,20 +5365,19 @@ module Google
5523
5365
  class GoogleCloudVideointelligenceV1p3beta1VideoAnnotationProgress
5524
5366
  include Google::Apis::Core::Hashable
5525
5367
 
5526
- # Specifies which feature is being tracked if the request contains more than
5527
- # one feature.
5368
+ # Specifies which feature is being tracked if the request contains more than one
5369
+ # feature.
5528
5370
  # Corresponds to the JSON property `feature`
5529
5371
  # @return [String]
5530
5372
  attr_accessor :feature
5531
5373
 
5532
- # Video file location in
5533
- # [Cloud Storage](https://cloud.google.com/storage/).
5374
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
5534
5375
  # Corresponds to the JSON property `inputUri`
5535
5376
  # @return [String]
5536
5377
  attr_accessor :input_uri
5537
5378
 
5538
- # Approximate percentage processed thus far. Guaranteed to be
5539
- # 100 when fully processed.
5379
+ # Approximate percentage processed thus far. Guaranteed to be 100 when fully
5380
+ # processed.
5540
5381
  # Corresponds to the JSON property `progressPercent`
5541
5382
  # @return [Fixnum]
5542
5383
  attr_accessor :progress_percent
@@ -5580,19 +5421,19 @@ module Google
5580
5421
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1CelebrityRecognitionAnnotation]
5581
5422
  attr_accessor :celebrity_recognition_annotations
5582
5423
 
5583
- # The `Status` type defines a logical error model that is suitable for
5584
- # different programming environments, including REST APIs and RPC APIs. It is
5585
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
5586
- # three pieces of data: error code, error message, and error details.
5587
- # You can find out more about this error model and how to work with it in the
5588
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
5424
+ # The `Status` type defines a logical error model that is suitable for different
5425
+ # programming environments, including REST APIs and RPC APIs. It is used by [
5426
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
5427
+ # data: error code, error message, and error details. You can find out more
5428
+ # about this error model and how to work with it in the [API Design Guide](https:
5429
+ # //cloud.google.com/apis/design/errors).
5589
5430
  # Corresponds to the JSON property `error`
5590
5431
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleRpcStatus]
5591
5432
  attr_accessor :error
5592
5433
 
5593
- # Explicit content annotation (based on per-frame visual signals only).
5594
- # If no explicit content has been detected in a frame, no annotations are
5595
- # present for that frame.
5434
+ # Explicit content annotation (based on per-frame visual signals only). If no
5435
+ # explicit content has been detected in a frame, no annotations are present for
5436
+ # that frame.
5596
5437
  # Corresponds to the JSON property `explicitAnnotation`
5597
5438
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1ExplicitContentAnnotation]
5598
5439
  attr_accessor :explicit_annotation
@@ -5602,14 +5443,13 @@ module Google
5602
5443
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1FaceDetectionAnnotation>]
5603
5444
  attr_accessor :face_detection_annotations
5604
5445
 
5605
- # Label annotations on frame level.
5606
- # There is exactly one element for each unique label.
5446
+ # Label annotations on frame level. There is exactly one element for each unique
5447
+ # label.
5607
5448
  # Corresponds to the JSON property `frameLabelAnnotations`
5608
5449
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
5609
5450
  attr_accessor :frame_label_annotations
5610
5451
 
5611
- # Video file location in
5612
- # [Cloud Storage](https://cloud.google.com/storage/).
5452
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
5613
5453
  # Corresponds to the JSON property `inputUri`
5614
5454
  # @return [String]
5615
5455
  attr_accessor :input_uri
@@ -5641,11 +5481,11 @@ module Google
5641
5481
  attr_accessor :segment_label_annotations
5642
5482
 
5643
5483
  # Presence label annotations on video level or user-specified segment level.
5644
- # There is exactly one element for each unique label. Compared to the
5645
- # existing topical `segment_label_annotations`, this field presents more
5646
- # fine-grained, segment-level labels detected in video content and is made
5647
- # available only when the client sets `LabelDetectionConfig.model` to
5648
- # "builtin/latest" in the request.
5484
+ # There is exactly one element for each unique label. Compared to the existing
5485
+ # topical `segment_label_annotations`, this field presents more fine-grained,
5486
+ # segment-level labels detected in video content and is made available only when
5487
+ # the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
5488
+ # request.
5649
5489
  # Corresponds to the JSON property `segmentPresenceLabelAnnotations`
5650
5490
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
5651
5491
  attr_accessor :segment_presence_label_annotations
@@ -5655,17 +5495,17 @@ module Google
5655
5495
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1VideoSegment>]
5656
5496
  attr_accessor :shot_annotations
5657
5497
 
5658
- # Topical label annotations on shot level.
5659
- # There is exactly one element for each unique label.
5498
+ # Topical label annotations on shot level. There is exactly one element for each
5499
+ # unique label.
5660
5500
  # Corresponds to the JSON property `shotLabelAnnotations`
5661
5501
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
5662
5502
  attr_accessor :shot_label_annotations
5663
5503
 
5664
5504
  # Presence label annotations on shot level. There is exactly one element for
5665
- # each unique label. Compared to the existing topical
5666
- # `shot_label_annotations`, this field presents more fine-grained, shot-level
5667
- # labels detected in video content and is made available only when the client
5668
- # sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
5505
+ # each unique label. Compared to the existing topical `shot_label_annotations`,
5506
+ # this field presents more fine-grained, shot-level labels detected in video
5507
+ # content and is made available only when the client sets `LabelDetectionConfig.
5508
+ # model` to "builtin/latest" in the request.
5669
5509
  # Corresponds to the JSON property `shotPresenceLabelAnnotations`
5670
5510
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
5671
5511
  attr_accessor :shot_presence_label_annotations
@@ -5675,9 +5515,8 @@ module Google
5675
5515
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1SpeechTranscription>]
5676
5516
  attr_accessor :speech_transcriptions
5677
5517
 
5678
- # OCR text detection and tracking.
5679
- # Annotations for list of detected text snippets. Each will have list of
5680
- # frame information associated with it.
5518
+ # OCR text detection and tracking. Annotations for list of detected text
5519
+ # snippets. Each will have list of frame information associated with it.
5681
5520
  # Corresponds to the JSON property `textAnnotations`
5682
5521
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1TextAnnotation>]
5683
5522
  attr_accessor :text_annotations
@@ -5712,14 +5551,14 @@ module Google
5712
5551
  class GoogleCloudVideointelligenceV1p3beta1VideoSegment
5713
5552
  include Google::Apis::Core::Hashable
5714
5553
 
5715
- # Time-offset, relative to the beginning of the video,
5716
- # corresponding to the end of the segment (inclusive).
5554
+ # Time-offset, relative to the beginning of the video, corresponding to the end
5555
+ # of the segment (inclusive).
5717
5556
  # Corresponds to the JSON property `endTimeOffset`
5718
5557
  # @return [String]
5719
5558
  attr_accessor :end_time_offset
5720
5559
 
5721
- # Time-offset, relative to the beginning of the video,
5722
- # corresponding to the start of the segment (inclusive).
5560
+ # Time-offset, relative to the beginning of the video, corresponding to the
5561
+ # start of the segment (inclusive).
5723
5562
  # Corresponds to the JSON property `startTimeOffset`
5724
5563
  # @return [String]
5725
5564
  attr_accessor :start_time_offset
@@ -5736,41 +5575,41 @@ module Google
5736
5575
  end
5737
5576
 
5738
5577
  # Word-specific information for recognized words. Word information is only
5739
- # included in the response when certain request parameters are set, such
5740
- # as `enable_word_time_offsets`.
5578
+ # included in the response when certain request parameters are set, such as `
5579
+ # enable_word_time_offsets`.
5741
5580
  class GoogleCloudVideointelligenceV1p3beta1WordInfo
5742
5581
  include Google::Apis::Core::Hashable
5743
5582
 
5744
5583
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
5745
5584
  # indicates an estimated greater likelihood that the recognized words are
5746
- # correct. This field is set only for the top alternative.
5747
- # This field is not guaranteed to be accurate and users should not rely on it
5748
- # to be always provided.
5749
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
5585
+ # correct. This field is set only for the top alternative. This field is not
5586
+ # guaranteed to be accurate and users should not rely on it to be always
5587
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
5588
+ # not set.
5750
5589
  # Corresponds to the JSON property `confidence`
5751
5590
  # @return [Float]
5752
5591
  attr_accessor :confidence
5753
5592
 
5754
- # Time offset relative to the beginning of the audio, and
5755
- # corresponding to the end of the spoken word. This field is only set if
5756
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
5757
- # experimental feature and the accuracy of the time offset can vary.
5593
+ # Time offset relative to the beginning of the audio, and corresponding to the
5594
+ # end of the spoken word. This field is only set if `enable_word_time_offsets=
5595
+ # true` and only in the top hypothesis. This is an experimental feature and the
5596
+ # accuracy of the time offset can vary.
5758
5597
  # Corresponds to the JSON property `endTime`
5759
5598
  # @return [String]
5760
5599
  attr_accessor :end_time
5761
5600
 
5762
- # Output only. A distinct integer value is assigned for every speaker within
5763
- # the audio. This field specifies which one of those speakers was detected to
5764
- # have spoken this word. Value ranges from 1 up to diarization_speaker_count,
5765
- # and is only set if speaker diarization is enabled.
5601
+ # Output only. A distinct integer value is assigned for every speaker within the
5602
+ # audio. This field specifies which one of those speakers was detected to have
5603
+ # spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
5604
+ # only set if speaker diarization is enabled.
5766
5605
  # Corresponds to the JSON property `speakerTag`
5767
5606
  # @return [Fixnum]
5768
5607
  attr_accessor :speaker_tag
5769
5608
 
5770
- # Time offset relative to the beginning of the audio, and
5771
- # corresponding to the start of the spoken word. This field is only set if
5772
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
5773
- # experimental feature and the accuracy of the time offset can vary.
5609
+ # Time offset relative to the beginning of the audio, and corresponding to the
5610
+ # start of the spoken word. This field is only set if `enable_word_time_offsets=
5611
+ # true` and only in the top hypothesis. This is an experimental feature and the
5612
+ # accuracy of the time offset can vary.
5774
5613
  # Corresponds to the JSON property `startTime`
5775
5614
  # @return [String]
5776
5615
  attr_accessor :start_time
@@ -5799,47 +5638,45 @@ module Google
5799
5638
  class GoogleLongrunningOperation
5800
5639
  include Google::Apis::Core::Hashable
5801
5640
 
5802
- # If the value is `false`, it means the operation is still in progress.
5803
- # If `true`, the operation is completed, and either `error` or `response` is
5804
- # available.
5641
+ # If the value is `false`, it means the operation is still in progress. If `true`
5642
+ # , the operation is completed, and either `error` or `response` is available.
5805
5643
  # Corresponds to the JSON property `done`
5806
5644
  # @return [Boolean]
5807
5645
  attr_accessor :done
5808
5646
  alias_method :done?, :done
5809
5647
 
5810
- # The `Status` type defines a logical error model that is suitable for
5811
- # different programming environments, including REST APIs and RPC APIs. It is
5812
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
5813
- # three pieces of data: error code, error message, and error details.
5814
- # You can find out more about this error model and how to work with it in the
5815
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
5648
+ # The `Status` type defines a logical error model that is suitable for different
5649
+ # programming environments, including REST APIs and RPC APIs. It is used by [
5650
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
5651
+ # data: error code, error message, and error details. You can find out more
5652
+ # about this error model and how to work with it in the [API Design Guide](https:
5653
+ # //cloud.google.com/apis/design/errors).
5816
5654
  # Corresponds to the JSON property `error`
5817
5655
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleRpcStatus]
5818
5656
  attr_accessor :error
5819
5657
 
5820
- # Service-specific metadata associated with the operation. It typically
5821
- # contains progress information and common metadata such as create time.
5822
- # Some services might not provide such metadata. Any method that returns a
5823
- # long-running operation should document the metadata type, if any.
5658
+ # Service-specific metadata associated with the operation. It typically contains
5659
+ # progress information and common metadata such as create time. Some services
5660
+ # might not provide such metadata. Any method that returns a long-running
5661
+ # operation should document the metadata type, if any.
5824
5662
  # Corresponds to the JSON property `metadata`
5825
5663
  # @return [Hash<String,Object>]
5826
5664
  attr_accessor :metadata
5827
5665
 
5828
5666
  # The server-assigned name, which is only unique within the same service that
5829
- # originally returns it. If you use the default HTTP mapping, the
5830
- # `name` should be a resource name ending with `operations/`unique_id``.
5667
+ # originally returns it. If you use the default HTTP mapping, the `name` should
5668
+ # be a resource name ending with `operations/`unique_id``.
5831
5669
  # Corresponds to the JSON property `name`
5832
5670
  # @return [String]
5833
5671
  attr_accessor :name
5834
5672
 
5835
- # The normal response of the operation in case of success. If the original
5836
- # method returns no data on success, such as `Delete`, the response is
5837
- # `google.protobuf.Empty`. If the original method is standard
5838
- # `Get`/`Create`/`Update`, the response should be the resource. For other
5839
- # methods, the response should have the type `XxxResponse`, where `Xxx`
5840
- # is the original method name. For example, if the original method name
5841
- # is `TakeSnapshot()`, the inferred response type is
5842
- # `TakeSnapshotResponse`.
5673
+ # The normal response of the operation in case of success. If the original
5674
+ # method returns no data on success, such as `Delete`, the response is `google.
5675
+ # protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`,
5676
+ # the response should be the resource. For other methods, the response should
5677
+ # have the type `XxxResponse`, where `Xxx` is the original method name. For
5678
+ # example, if the original method name is `TakeSnapshot()`, the inferred
5679
+ # response type is `TakeSnapshotResponse`.
5843
5680
  # Corresponds to the JSON property `response`
5844
5681
  # @return [Hash<String,Object>]
5845
5682
  attr_accessor :response
@@ -5858,12 +5695,12 @@ module Google
5858
5695
  end
5859
5696
  end
5860
5697
 
5861
- # The `Status` type defines a logical error model that is suitable for
5862
- # different programming environments, including REST APIs and RPC APIs. It is
5863
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
5864
- # three pieces of data: error code, error message, and error details.
5865
- # You can find out more about this error model and how to work with it in the
5866
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
5698
+ # The `Status` type defines a logical error model that is suitable for different
5699
+ # programming environments, including REST APIs and RPC APIs. It is used by [
5700
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
5701
+ # data: error code, error message, and error details. You can find out more
5702
+ # about this error model and how to work with it in the [API Design Guide](https:
5703
+ # //cloud.google.com/apis/design/errors).
5867
5704
  class GoogleRpcStatus
5868
5705
  include Google::Apis::Core::Hashable
5869
5706
 
@@ -5872,15 +5709,15 @@ module Google
5872
5709
  # @return [Fixnum]
5873
5710
  attr_accessor :code
5874
5711
 
5875
- # A list of messages that carry the error details. There is a common set of
5712
+ # A list of messages that carry the error details. There is a common set of
5876
5713
  # message types for APIs to use.
5877
5714
  # Corresponds to the JSON property `details`
5878
5715
  # @return [Array<Hash<String,Object>>]
5879
5716
  attr_accessor :details
5880
5717
 
5881
- # A developer-facing error message, which should be in English. Any
5882
- # user-facing error message should be localized and sent in the
5883
- # google.rpc.Status.details field, or localized by the client.
5718
+ # A developer-facing error message, which should be in English. Any user-facing
5719
+ # error message should be localized and sent in the google.rpc.Status.details
5720
+ # field, or localized by the client.
5884
5721
  # Corresponds to the JSON property `message`
5885
5722
  # @return [String]
5886
5723
  attr_accessor :message