google-api-client 0.43.0 → 0.48.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (964) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/autoapprove.yml +49 -0
  3. data/.github/workflows/release-please.yml +77 -0
  4. data/.gitignore +2 -0
  5. data/.kokoro/trampoline.sh +0 -0
  6. data/CHANGELOG.md +1066 -184
  7. data/Gemfile +1 -0
  8. data/Rakefile +31 -3
  9. data/api_list_config.yaml +8 -0
  10. data/api_names.yaml +1 -0
  11. data/bin/generate-api +77 -15
  12. data/docs/oauth-server.md +4 -6
  13. data/generated/google/apis/acceleratedmobilepageurl_v1/classes.rb +7 -9
  14. data/generated/google/apis/acceleratedmobilepageurl_v1/service.rb +2 -2
  15. data/generated/google/apis/acceleratedmobilepageurl_v1.rb +1 -1
  16. data/generated/google/apis/accessapproval_v1/classes.rb +60 -86
  17. data/generated/google/apis/accessapproval_v1/service.rb +93 -132
  18. data/generated/google/apis/accessapproval_v1.rb +1 -1
  19. data/generated/google/apis/accesscontextmanager_v1/classes.rb +266 -236
  20. data/generated/google/apis/accesscontextmanager_v1/representations.rb +30 -0
  21. data/generated/google/apis/accesscontextmanager_v1/service.rb +308 -171
  22. data/generated/google/apis/accesscontextmanager_v1.rb +1 -1
  23. data/generated/google/apis/accesscontextmanager_v1beta/classes.rb +153 -184
  24. data/generated/google/apis/accesscontextmanager_v1beta/service.rb +82 -111
  25. data/generated/google/apis/accesscontextmanager_v1beta.rb +1 -1
  26. data/generated/google/apis/adexchangebuyer2_v2beta1/classes.rb +47 -36
  27. data/generated/google/apis/adexchangebuyer2_v2beta1/representations.rb +1 -0
  28. data/generated/google/apis/adexchangebuyer2_v2beta1.rb +1 -1
  29. data/generated/google/apis/adexchangebuyer_v1_4/classes.rb +72 -2
  30. data/generated/google/apis/adexchangebuyer_v1_4/representations.rb +33 -0
  31. data/generated/google/apis/adexchangebuyer_v1_4.rb +1 -1
  32. data/generated/google/apis/adexperiencereport_v1.rb +1 -1
  33. data/generated/google/apis/admin_datatransfer_v1/classes.rb +16 -4
  34. data/generated/google/apis/admin_datatransfer_v1/service.rb +30 -48
  35. data/generated/google/apis/admin_datatransfer_v1.rb +6 -4
  36. data/generated/google/apis/admin_directory_v1/classes.rb +344 -242
  37. data/generated/google/apis/admin_directory_v1/representations.rb +62 -39
  38. data/generated/google/apis/admin_directory_v1/service.rb +607 -998
  39. data/generated/google/apis/admin_directory_v1.rb +6 -8
  40. data/generated/google/apis/admin_reports_v1/classes.rb +31 -33
  41. data/generated/google/apis/admin_reports_v1/service.rb +131 -187
  42. data/generated/google/apis/admin_reports_v1.rb +6 -5
  43. data/generated/google/apis/admob_v1/classes.rb +31 -31
  44. data/generated/google/apis/admob_v1/service.rb +2 -1
  45. data/generated/google/apis/admob_v1.rb +6 -2
  46. data/generated/google/apis/adsense_v1_4/service.rb +4 -1
  47. data/generated/google/apis/adsense_v1_4.rb +1 -1
  48. data/generated/google/apis/alertcenter_v1beta1.rb +1 -1
  49. data/generated/google/apis/analyticsadmin_v1alpha/classes.rb +1361 -0
  50. data/generated/google/apis/analyticsadmin_v1alpha/representations.rb +610 -0
  51. data/generated/google/apis/analyticsadmin_v1alpha/service.rb +2134 -0
  52. data/generated/google/apis/analyticsadmin_v1alpha.rb +43 -0
  53. data/generated/google/apis/analyticsdata_v1alpha/classes.rb +1655 -0
  54. data/generated/google/apis/analyticsdata_v1alpha/representations.rb +806 -0
  55. data/generated/google/apis/analyticsdata_v1alpha/service.rb +261 -0
  56. data/generated/google/apis/analyticsdata_v1alpha.rb +37 -0
  57. data/generated/google/apis/analyticsreporting_v4.rb +1 -1
  58. data/generated/google/apis/androidenterprise_v1/service.rb +2 -2
  59. data/generated/google/apis/androidenterprise_v1.rb +1 -1
  60. data/generated/google/apis/androidmanagement_v1/classes.rb +115 -75
  61. data/generated/google/apis/androidmanagement_v1/representations.rb +17 -0
  62. data/generated/google/apis/androidmanagement_v1.rb +1 -1
  63. data/generated/google/apis/androidpublisher_v3/classes.rb +9 -1
  64. data/generated/google/apis/androidpublisher_v3/representations.rb +1 -0
  65. data/generated/google/apis/androidpublisher_v3/service.rb +2 -2
  66. data/generated/google/apis/androidpublisher_v3.rb +1 -1
  67. data/generated/google/apis/apigateway_v1beta/classes.rb +1162 -0
  68. data/generated/google/apis/{memcache_v1 → apigateway_v1beta}/representations.rb +156 -157
  69. data/generated/google/apis/apigateway_v1beta/service.rb +1172 -0
  70. data/generated/google/apis/apigateway_v1beta.rb +34 -0
  71. data/generated/google/apis/apigee_v1/classes.rb +630 -88
  72. data/generated/google/apis/apigee_v1/representations.rb +209 -1
  73. data/generated/google/apis/apigee_v1/service.rb +401 -74
  74. data/generated/google/apis/apigee_v1.rb +6 -7
  75. data/generated/google/apis/appengine_v1/classes.rb +96 -59
  76. data/generated/google/apis/appengine_v1/representations.rb +17 -0
  77. data/generated/google/apis/appengine_v1/service.rb +38 -47
  78. data/generated/google/apis/appengine_v1.rb +1 -1
  79. data/generated/google/apis/appengine_v1alpha/classes.rb +9 -11
  80. data/generated/google/apis/appengine_v1alpha.rb +1 -1
  81. data/generated/google/apis/appengine_v1beta/classes.rb +103 -59
  82. data/generated/google/apis/appengine_v1beta/representations.rb +18 -0
  83. data/generated/google/apis/appengine_v1beta/service.rb +37 -47
  84. data/generated/google/apis/appengine_v1beta.rb +1 -1
  85. data/generated/google/apis/appsmarket_v2/classes.rb +14 -16
  86. data/generated/google/apis/appsmarket_v2.rb +1 -1
  87. data/generated/google/apis/area120tables_v1alpha1/classes.rb +423 -0
  88. data/generated/google/apis/area120tables_v1alpha1/representations.rb +248 -0
  89. data/generated/google/apis/area120tables_v1alpha1/service.rb +381 -0
  90. data/generated/google/apis/area120tables_v1alpha1.rb +46 -0
  91. data/generated/google/apis/artifactregistry_v1beta1/classes.rb +249 -337
  92. data/generated/google/apis/artifactregistry_v1beta1/representations.rb +2 -0
  93. data/generated/google/apis/artifactregistry_v1beta1/service.rb +44 -57
  94. data/generated/google/apis/artifactregistry_v1beta1.rb +1 -1
  95. data/generated/google/apis/assuredworkloads_v1beta1/classes.rb +722 -0
  96. data/generated/google/apis/assuredworkloads_v1beta1/representations.rb +359 -0
  97. data/generated/google/apis/assuredworkloads_v1beta1/service.rb +328 -0
  98. data/generated/google/apis/{accessapproval_v1beta1.rb → assuredworkloads_v1beta1.rb} +8 -8
  99. data/generated/google/apis/bigquery_v2/classes.rb +593 -576
  100. data/generated/google/apis/bigquery_v2/representations.rb +85 -0
  101. data/generated/google/apis/bigquery_v2/service.rb +79 -41
  102. data/generated/google/apis/bigquery_v2.rb +1 -1
  103. data/generated/google/apis/bigqueryconnection_v1beta1/classes.rb +192 -337
  104. data/generated/google/apis/bigqueryconnection_v1beta1/service.rb +29 -32
  105. data/generated/google/apis/bigqueryconnection_v1beta1.rb +1 -1
  106. data/generated/google/apis/bigquerydatatransfer_v1/classes.rb +132 -158
  107. data/generated/google/apis/bigquerydatatransfer_v1/service.rb +232 -282
  108. data/generated/google/apis/bigquerydatatransfer_v1.rb +1 -1
  109. data/generated/google/apis/bigqueryreservation_v1/classes.rb +116 -123
  110. data/generated/google/apis/bigqueryreservation_v1/representations.rb +2 -0
  111. data/generated/google/apis/bigqueryreservation_v1/service.rb +137 -183
  112. data/generated/google/apis/bigqueryreservation_v1.rb +1 -1
  113. data/generated/google/apis/bigqueryreservation_v1alpha2/classes.rb +88 -100
  114. data/generated/google/apis/bigqueryreservation_v1alpha2/service.rb +77 -100
  115. data/generated/google/apis/bigqueryreservation_v1alpha2.rb +1 -1
  116. data/generated/google/apis/bigqueryreservation_v1beta1/classes.rb +93 -98
  117. data/generated/google/apis/bigqueryreservation_v1beta1/representations.rb +2 -0
  118. data/generated/google/apis/bigqueryreservation_v1beta1/service.rb +114 -151
  119. data/generated/google/apis/bigqueryreservation_v1beta1.rb +1 -1
  120. data/generated/google/apis/bigtableadmin_v1/classes.rb +137 -119
  121. data/generated/google/apis/bigtableadmin_v1/representations.rb +29 -0
  122. data/generated/google/apis/bigtableadmin_v1.rb +1 -1
  123. data/generated/google/apis/bigtableadmin_v2/classes.rb +455 -607
  124. data/generated/google/apis/bigtableadmin_v2/representations.rb +29 -0
  125. data/generated/google/apis/bigtableadmin_v2/service.rb +194 -247
  126. data/generated/google/apis/bigtableadmin_v2.rb +1 -1
  127. data/generated/google/apis/billingbudgets_v1/classes.rb +373 -0
  128. data/generated/google/apis/billingbudgets_v1/representations.rb +171 -0
  129. data/generated/google/apis/billingbudgets_v1/service.rb +249 -0
  130. data/generated/google/apis/billingbudgets_v1.rb +38 -0
  131. data/generated/google/apis/billingbudgets_v1beta1/classes.rb +27 -6
  132. data/generated/google/apis/billingbudgets_v1beta1/representations.rb +2 -0
  133. data/generated/google/apis/billingbudgets_v1beta1.rb +4 -1
  134. data/generated/google/apis/binaryauthorization_v1/classes.rb +434 -355
  135. data/generated/google/apis/binaryauthorization_v1/representations.rb +75 -0
  136. data/generated/google/apis/binaryauthorization_v1/service.rb +109 -89
  137. data/generated/google/apis/binaryauthorization_v1.rb +1 -1
  138. data/generated/google/apis/binaryauthorization_v1beta1/classes.rb +434 -355
  139. data/generated/google/apis/binaryauthorization_v1beta1/representations.rb +75 -0
  140. data/generated/google/apis/binaryauthorization_v1beta1/service.rb +109 -89
  141. data/generated/google/apis/binaryauthorization_v1beta1.rb +1 -1
  142. data/generated/google/apis/books_v1/service.rb +54 -54
  143. data/generated/google/apis/books_v1.rb +1 -1
  144. data/generated/google/apis/calendar_v3/classes.rb +13 -10
  145. data/generated/google/apis/calendar_v3.rb +1 -1
  146. data/generated/google/apis/chat_v1/classes.rb +173 -116
  147. data/generated/google/apis/chat_v1/representations.rb +36 -0
  148. data/generated/google/apis/chat_v1/service.rb +30 -42
  149. data/generated/google/apis/chat_v1.rb +1 -1
  150. data/generated/google/apis/civicinfo_v2/classes.rb +18 -32
  151. data/generated/google/apis/civicinfo_v2/representations.rb +2 -3
  152. data/generated/google/apis/civicinfo_v2.rb +1 -1
  153. data/generated/google/apis/classroom_v1/classes.rb +153 -21
  154. data/generated/google/apis/classroom_v1/representations.rb +43 -0
  155. data/generated/google/apis/classroom_v1/service.rb +240 -0
  156. data/generated/google/apis/classroom_v1.rb +7 -1
  157. data/generated/google/apis/cloudasset_v1/classes.rb +1461 -1039
  158. data/generated/google/apis/cloudasset_v1/representations.rb +320 -0
  159. data/generated/google/apis/cloudasset_v1/service.rb +296 -167
  160. data/generated/google/apis/cloudasset_v1.rb +1 -1
  161. data/generated/google/apis/cloudasset_v1beta1/classes.rb +531 -777
  162. data/generated/google/apis/cloudasset_v1beta1/service.rb +59 -75
  163. data/generated/google/apis/cloudasset_v1beta1.rb +1 -1
  164. data/generated/google/apis/cloudasset_v1p1beta1/classes.rb +204 -349
  165. data/generated/google/apis/cloudasset_v1p1beta1/service.rb +35 -47
  166. data/generated/google/apis/cloudasset_v1p1beta1.rb +1 -1
  167. data/generated/google/apis/cloudasset_v1p4beta1/classes.rb +222 -276
  168. data/generated/google/apis/cloudasset_v1p4beta1/service.rb +75 -93
  169. data/generated/google/apis/cloudasset_v1p4beta1.rb +1 -1
  170. data/generated/google/apis/cloudasset_v1p5beta1/classes.rb +481 -720
  171. data/generated/google/apis/cloudasset_v1p5beta1/service.rb +25 -28
  172. data/generated/google/apis/cloudasset_v1p5beta1.rb +1 -1
  173. data/generated/google/apis/cloudbilling_v1/classes.rb +285 -446
  174. data/generated/google/apis/cloudbilling_v1/service.rb +104 -124
  175. data/generated/google/apis/cloudbilling_v1.rb +7 -1
  176. data/generated/google/apis/cloudbuild_v1/classes.rb +339 -344
  177. data/generated/google/apis/cloudbuild_v1/representations.rb +8 -0
  178. data/generated/google/apis/cloudbuild_v1/service.rb +277 -67
  179. data/generated/google/apis/cloudbuild_v1.rb +1 -1
  180. data/generated/google/apis/cloudbuild_v1alpha1/classes.rb +291 -329
  181. data/generated/google/apis/cloudbuild_v1alpha1/representations.rb +2 -0
  182. data/generated/google/apis/cloudbuild_v1alpha1/service.rb +15 -18
  183. data/generated/google/apis/cloudbuild_v1alpha1.rb +1 -1
  184. data/generated/google/apis/cloudbuild_v1alpha2/classes.rb +277 -313
  185. data/generated/google/apis/cloudbuild_v1alpha2/representations.rb +2 -0
  186. data/generated/google/apis/cloudbuild_v1alpha2/service.rb +22 -28
  187. data/generated/google/apis/cloudbuild_v1alpha2.rb +1 -1
  188. data/generated/google/apis/clouddebugger_v2/classes.rb +185 -252
  189. data/generated/google/apis/clouddebugger_v2/service.rb +53 -59
  190. data/generated/google/apis/clouddebugger_v2.rb +1 -1
  191. data/generated/google/apis/clouderrorreporting_v1beta1/classes.rb +127 -156
  192. data/generated/google/apis/clouderrorreporting_v1beta1/service.rb +53 -69
  193. data/generated/google/apis/clouderrorreporting_v1beta1.rb +1 -1
  194. data/generated/google/apis/cloudfunctions_v1/classes.rb +335 -494
  195. data/generated/google/apis/cloudfunctions_v1/representations.rb +1 -0
  196. data/generated/google/apis/cloudfunctions_v1/service.rb +79 -93
  197. data/generated/google/apis/cloudfunctions_v1.rb +1 -1
  198. data/generated/google/apis/cloudidentity_v1/classes.rb +989 -107
  199. data/generated/google/apis/cloudidentity_v1/representations.rb +371 -0
  200. data/generated/google/apis/cloudidentity_v1/service.rb +883 -88
  201. data/generated/google/apis/cloudidentity_v1.rb +4 -1
  202. data/generated/google/apis/cloudidentity_v1beta1/classes.rb +1236 -307
  203. data/generated/google/apis/cloudidentity_v1beta1/representations.rb +424 -21
  204. data/generated/google/apis/cloudidentity_v1beta1/service.rb +921 -96
  205. data/generated/google/apis/cloudidentity_v1beta1.rb +4 -1
  206. data/generated/google/apis/cloudiot_v1/classes.rb +270 -373
  207. data/generated/google/apis/cloudiot_v1/representations.rb +1 -0
  208. data/generated/google/apis/cloudiot_v1/service.rb +147 -154
  209. data/generated/google/apis/cloudiot_v1.rb +1 -1
  210. data/generated/google/apis/cloudkms_v1/classes.rb +509 -692
  211. data/generated/google/apis/cloudkms_v1/representations.rb +18 -0
  212. data/generated/google/apis/cloudkms_v1/service.rb +170 -216
  213. data/generated/google/apis/cloudkms_v1.rb +1 -1
  214. data/generated/google/apis/cloudprofiler_v2/classes.rb +28 -33
  215. data/generated/google/apis/cloudprofiler_v2/service.rb +17 -19
  216. data/generated/google/apis/cloudprofiler_v2.rb +1 -1
  217. data/generated/google/apis/cloudresourcemanager_v1/classes.rb +14 -7
  218. data/generated/google/apis/cloudresourcemanager_v1/representations.rb +1 -0
  219. data/generated/google/apis/cloudresourcemanager_v1/service.rb +62 -60
  220. data/generated/google/apis/cloudresourcemanager_v1.rb +1 -1
  221. data/generated/google/apis/cloudresourcemanager_v1beta1/classes.rb +11 -4
  222. data/generated/google/apis/cloudresourcemanager_v1beta1/representations.rb +1 -0
  223. data/generated/google/apis/cloudresourcemanager_v1beta1/service.rb +26 -25
  224. data/generated/google/apis/cloudresourcemanager_v1beta1.rb +1 -1
  225. data/generated/google/apis/cloudresourcemanager_v2/classes.rb +7 -0
  226. data/generated/google/apis/cloudresourcemanager_v2/representations.rb +1 -0
  227. data/generated/google/apis/cloudresourcemanager_v2/service.rb +7 -7
  228. data/generated/google/apis/cloudresourcemanager_v2.rb +1 -1
  229. data/generated/google/apis/cloudresourcemanager_v2beta1/classes.rb +7 -0
  230. data/generated/google/apis/cloudresourcemanager_v2beta1/representations.rb +1 -0
  231. data/generated/google/apis/cloudresourcemanager_v2beta1/service.rb +7 -7
  232. data/generated/google/apis/cloudresourcemanager_v2beta1.rb +1 -1
  233. data/generated/google/apis/cloudscheduler_v1/classes.rb +272 -383
  234. data/generated/google/apis/cloudscheduler_v1/service.rb +45 -62
  235. data/generated/google/apis/cloudscheduler_v1.rb +1 -1
  236. data/generated/google/apis/cloudscheduler_v1beta1/classes.rb +273 -384
  237. data/generated/google/apis/cloudscheduler_v1beta1/service.rb +45 -62
  238. data/generated/google/apis/cloudscheduler_v1beta1.rb +1 -1
  239. data/generated/google/apis/cloudsearch_v1/classes.rb +651 -781
  240. data/generated/google/apis/cloudsearch_v1/representations.rb +15 -0
  241. data/generated/google/apis/cloudsearch_v1/service.rb +286 -326
  242. data/generated/google/apis/cloudsearch_v1.rb +2 -2
  243. data/generated/google/apis/cloudshell_v1/classes.rb +256 -105
  244. data/generated/google/apis/cloudshell_v1/representations.rb +143 -10
  245. data/generated/google/apis/cloudshell_v1/service.rb +198 -25
  246. data/generated/google/apis/cloudshell_v1.rb +1 -1
  247. data/generated/google/apis/cloudshell_v1alpha1/classes.rb +69 -78
  248. data/generated/google/apis/cloudshell_v1alpha1/service.rb +20 -24
  249. data/generated/google/apis/cloudshell_v1alpha1.rb +1 -1
  250. data/generated/google/apis/cloudtasks_v2/classes.rb +612 -933
  251. data/generated/google/apis/cloudtasks_v2/representations.rb +1 -0
  252. data/generated/google/apis/cloudtasks_v2/service.rb +146 -217
  253. data/generated/google/apis/cloudtasks_v2.rb +1 -1
  254. data/generated/google/apis/cloudtasks_v2beta2/classes.rb +608 -964
  255. data/generated/google/apis/cloudtasks_v2beta2/representations.rb +1 -0
  256. data/generated/google/apis/cloudtasks_v2beta2/service.rb +178 -270
  257. data/generated/google/apis/cloudtasks_v2beta2.rb +1 -1
  258. data/generated/google/apis/cloudtasks_v2beta3/classes.rb +616 -938
  259. data/generated/google/apis/cloudtasks_v2beta3/representations.rb +1 -0
  260. data/generated/google/apis/cloudtasks_v2beta3/service.rb +146 -217
  261. data/generated/google/apis/cloudtasks_v2beta3.rb +1 -1
  262. data/generated/google/apis/cloudtrace_v1/classes.rb +39 -61
  263. data/generated/google/apis/cloudtrace_v1/service.rb +37 -51
  264. data/generated/google/apis/cloudtrace_v1.rb +1 -1
  265. data/generated/google/apis/cloudtrace_v2/classes.rb +92 -107
  266. data/generated/google/apis/cloudtrace_v2/service.rb +8 -11
  267. data/generated/google/apis/cloudtrace_v2.rb +1 -1
  268. data/generated/google/apis/cloudtrace_v2beta1/classes.rb +23 -33
  269. data/generated/google/apis/cloudtrace_v2beta1/service.rb +30 -37
  270. data/generated/google/apis/cloudtrace_v2beta1.rb +1 -1
  271. data/generated/google/apis/composer_v1/classes.rb +189 -242
  272. data/generated/google/apis/composer_v1/service.rb +79 -150
  273. data/generated/google/apis/composer_v1.rb +1 -1
  274. data/generated/google/apis/composer_v1beta1/classes.rb +203 -262
  275. data/generated/google/apis/composer_v1beta1/service.rb +94 -179
  276. data/generated/google/apis/composer_v1beta1.rb +1 -1
  277. data/generated/google/apis/compute_alpha/classes.rb +1227 -186
  278. data/generated/google/apis/compute_alpha/representations.rb +235 -8
  279. data/generated/google/apis/compute_alpha/service.rb +2009 -1024
  280. data/generated/google/apis/compute_alpha.rb +1 -1
  281. data/generated/google/apis/compute_beta/classes.rb +1080 -108
  282. data/generated/google/apis/compute_beta/representations.rb +212 -2
  283. data/generated/google/apis/compute_beta/service.rb +1413 -741
  284. data/generated/google/apis/compute_beta.rb +1 -1
  285. data/generated/google/apis/compute_v1/classes.rb +1512 -106
  286. data/generated/google/apis/compute_v1/representations.rb +470 -1
  287. data/generated/google/apis/compute_v1/service.rb +1625 -285
  288. data/generated/google/apis/compute_v1.rb +1 -1
  289. data/generated/google/apis/container_v1/classes.rb +982 -965
  290. data/generated/google/apis/container_v1/representations.rb +60 -0
  291. data/generated/google/apis/container_v1/service.rb +435 -502
  292. data/generated/google/apis/container_v1.rb +1 -1
  293. data/generated/google/apis/container_v1beta1/classes.rb +1106 -1044
  294. data/generated/google/apis/container_v1beta1/representations.rb +91 -0
  295. data/generated/google/apis/container_v1beta1/service.rb +403 -466
  296. data/generated/google/apis/container_v1beta1.rb +1 -1
  297. data/generated/google/apis/containeranalysis_v1alpha1/classes.rb +463 -596
  298. data/generated/google/apis/containeranalysis_v1alpha1/representations.rb +1 -0
  299. data/generated/google/apis/containeranalysis_v1alpha1/service.rb +149 -169
  300. data/generated/google/apis/containeranalysis_v1alpha1.rb +1 -1
  301. data/generated/google/apis/containeranalysis_v1beta1/classes.rb +461 -613
  302. data/generated/google/apis/containeranalysis_v1beta1/representations.rb +1 -0
  303. data/generated/google/apis/containeranalysis_v1beta1/service.rb +75 -90
  304. data/generated/google/apis/containeranalysis_v1beta1.rb +1 -1
  305. data/generated/google/apis/content_v2/classes.rb +515 -1219
  306. data/generated/google/apis/content_v2/service.rb +377 -650
  307. data/generated/google/apis/content_v2.rb +3 -4
  308. data/generated/google/apis/content_v2_1/classes.rb +1108 -1058
  309. data/generated/google/apis/content_v2_1/representations.rb +288 -0
  310. data/generated/google/apis/content_v2_1/service.rb +987 -795
  311. data/generated/google/apis/content_v2_1.rb +3 -4
  312. data/generated/google/apis/customsearch_v1/service.rb +2 -2
  313. data/generated/google/apis/customsearch_v1.rb +1 -1
  314. data/generated/google/apis/datacatalog_v1beta1/classes.rb +413 -573
  315. data/generated/google/apis/datacatalog_v1beta1/representations.rb +6 -0
  316. data/generated/google/apis/datacatalog_v1beta1/service.rb +319 -440
  317. data/generated/google/apis/datacatalog_v1beta1.rb +1 -1
  318. data/generated/google/apis/dataflow_v1b3/classes.rb +1174 -973
  319. data/generated/google/apis/dataflow_v1b3/representations.rb +148 -0
  320. data/generated/google/apis/dataflow_v1b3/service.rb +308 -257
  321. data/generated/google/apis/dataflow_v1b3.rb +1 -1
  322. data/generated/google/apis/datafusion_v1/classes.rb +283 -397
  323. data/generated/google/apis/datafusion_v1/representations.rb +5 -0
  324. data/generated/google/apis/datafusion_v1/service.rb +76 -89
  325. data/generated/google/apis/datafusion_v1.rb +5 -8
  326. data/generated/google/apis/datafusion_v1beta1/classes.rb +283 -397
  327. data/generated/google/apis/datafusion_v1beta1/representations.rb +5 -0
  328. data/generated/google/apis/datafusion_v1beta1/service.rb +81 -95
  329. data/generated/google/apis/datafusion_v1beta1.rb +5 -8
  330. data/generated/google/apis/datalabeling_v1beta1/classes.rb +6207 -0
  331. data/generated/google/apis/datalabeling_v1beta1/representations.rb +3156 -0
  332. data/generated/google/apis/datalabeling_v1beta1/service.rb +1762 -0
  333. data/generated/google/apis/datalabeling_v1beta1.rb +34 -0
  334. data/generated/google/apis/dataproc_v1/classes.rb +97 -13
  335. data/generated/google/apis/dataproc_v1/representations.rb +34 -0
  336. data/generated/google/apis/dataproc_v1.rb +1 -1
  337. data/generated/google/apis/dataproc_v1beta2/classes.rb +117 -9
  338. data/generated/google/apis/dataproc_v1beta2/representations.rb +49 -0
  339. data/generated/google/apis/dataproc_v1beta2.rb +1 -1
  340. data/generated/google/apis/datastore_v1/classes.rb +334 -476
  341. data/generated/google/apis/datastore_v1/service.rb +52 -63
  342. data/generated/google/apis/datastore_v1.rb +1 -1
  343. data/generated/google/apis/datastore_v1beta1/classes.rb +150 -217
  344. data/generated/google/apis/datastore_v1beta1/service.rb +11 -12
  345. data/generated/google/apis/datastore_v1beta1.rb +1 -1
  346. data/generated/google/apis/datastore_v1beta3/classes.rb +259 -375
  347. data/generated/google/apis/datastore_v1beta3/service.rb +1 -2
  348. data/generated/google/apis/datastore_v1beta3.rb +1 -1
  349. data/generated/google/apis/deploymentmanager_v2/classes.rb +203 -558
  350. data/generated/google/apis/deploymentmanager_v2/representations.rb +0 -132
  351. data/generated/google/apis/deploymentmanager_v2/service.rb +169 -213
  352. data/generated/google/apis/deploymentmanager_v2.rb +6 -4
  353. data/generated/google/apis/deploymentmanager_v2beta/classes.rb +247 -609
  354. data/generated/google/apis/deploymentmanager_v2beta/representations.rb +0 -132
  355. data/generated/google/apis/deploymentmanager_v2beta/service.rb +278 -359
  356. data/generated/google/apis/deploymentmanager_v2beta.rb +6 -5
  357. data/generated/google/apis/dfareporting_v3_3/classes.rb +162 -339
  358. data/generated/google/apis/dfareporting_v3_3/service.rb +646 -1262
  359. data/generated/google/apis/dfareporting_v3_3.rb +2 -2
  360. data/generated/google/apis/dfareporting_v3_4/classes.rb +184 -350
  361. data/generated/google/apis/dfareporting_v3_4/representations.rb +1 -0
  362. data/generated/google/apis/dfareporting_v3_4/service.rb +681 -1261
  363. data/generated/google/apis/dfareporting_v3_4.rb +2 -2
  364. data/generated/google/apis/dialogflow_v2/classes.rb +746 -217
  365. data/generated/google/apis/dialogflow_v2/representations.rb +318 -67
  366. data/generated/google/apis/dialogflow_v2.rb +1 -1
  367. data/generated/google/apis/dialogflow_v2beta1/classes.rb +764 -233
  368. data/generated/google/apis/dialogflow_v2beta1/representations.rb +318 -67
  369. data/generated/google/apis/dialogflow_v2beta1/service.rb +556 -331
  370. data/generated/google/apis/dialogflow_v2beta1.rb +1 -1
  371. data/generated/google/apis/dialogflow_v3beta1/classes.rb +8816 -0
  372. data/generated/google/apis/dialogflow_v3beta1/representations.rb +3725 -0
  373. data/generated/google/apis/dialogflow_v3beta1/service.rb +2825 -0
  374. data/generated/google/apis/dialogflow_v3beta1.rb +38 -0
  375. data/generated/google/apis/digitalassetlinks_v1/classes.rb +66 -92
  376. data/generated/google/apis/digitalassetlinks_v1/service.rb +131 -188
  377. data/generated/google/apis/digitalassetlinks_v1.rb +1 -1
  378. data/generated/google/apis/displayvideo_v1/classes.rb +271 -38
  379. data/generated/google/apis/displayvideo_v1/representations.rb +83 -0
  380. data/generated/google/apis/displayvideo_v1/service.rb +287 -32
  381. data/generated/google/apis/displayvideo_v1.rb +1 -1
  382. data/generated/google/apis/displayvideo_v1beta/classes.rb +146 -0
  383. data/generated/google/apis/displayvideo_v1beta/representations.rb +72 -0
  384. data/generated/google/apis/displayvideo_v1beta/service.rb +161 -0
  385. data/generated/google/apis/displayvideo_v1beta.rb +38 -0
  386. data/generated/google/apis/displayvideo_v1beta2/classes.rb +146 -0
  387. data/generated/google/apis/displayvideo_v1beta2/representations.rb +72 -0
  388. data/generated/google/apis/displayvideo_v1beta2/service.rb +130 -0
  389. data/generated/google/apis/displayvideo_v1beta2.rb +38 -0
  390. data/generated/google/apis/displayvideo_v1dev/classes.rb +146 -0
  391. data/generated/google/apis/displayvideo_v1dev/representations.rb +72 -0
  392. data/generated/google/apis/displayvideo_v1dev/service.rb +130 -0
  393. data/generated/google/apis/displayvideo_v1dev.rb +38 -0
  394. data/generated/google/apis/dlp_v2/classes.rb +1111 -1310
  395. data/generated/google/apis/dlp_v2/representations.rb +16 -0
  396. data/generated/google/apis/dlp_v2/service.rb +962 -905
  397. data/generated/google/apis/dlp_v2.rb +1 -1
  398. data/generated/google/apis/dns_v1/classes.rb +356 -198
  399. data/generated/google/apis/dns_v1/representations.rb +83 -0
  400. data/generated/google/apis/dns_v1/service.rb +83 -98
  401. data/generated/google/apis/dns_v1.rb +2 -2
  402. data/generated/google/apis/dns_v1beta2/classes.rb +362 -206
  403. data/generated/google/apis/dns_v1beta2/representations.rb +83 -0
  404. data/generated/google/apis/dns_v1beta2/service.rb +83 -98
  405. data/generated/google/apis/dns_v1beta2.rb +2 -2
  406. data/generated/google/apis/docs_v1/classes.rb +894 -1229
  407. data/generated/google/apis/docs_v1/service.rb +17 -22
  408. data/generated/google/apis/docs_v1.rb +1 -1
  409. data/generated/google/apis/documentai_v1beta2/classes.rb +2253 -824
  410. data/generated/google/apis/documentai_v1beta2/representations.rb +701 -0
  411. data/generated/google/apis/documentai_v1beta2/service.rb +22 -24
  412. data/generated/google/apis/documentai_v1beta2.rb +1 -1
  413. data/generated/google/apis/documentai_v1beta3/classes.rb +6149 -0
  414. data/generated/google/apis/documentai_v1beta3/representations.rb +2666 -0
  415. data/generated/google/apis/documentai_v1beta3/service.rb +263 -0
  416. data/generated/google/apis/{securitycenter_v1p1alpha1.rb → documentai_v1beta3.rb} +11 -10
  417. data/generated/google/apis/domains_v1alpha2/classes.rb +1540 -0
  418. data/generated/google/apis/domains_v1alpha2/representations.rb +606 -0
  419. data/generated/google/apis/domains_v1alpha2/service.rb +805 -0
  420. data/generated/google/apis/domains_v1alpha2.rb +34 -0
  421. data/generated/google/apis/domains_v1beta1/classes.rb +1540 -0
  422. data/generated/google/apis/domains_v1beta1/representations.rb +606 -0
  423. data/generated/google/apis/domains_v1beta1/service.rb +805 -0
  424. data/generated/google/apis/domains_v1beta1.rb +34 -0
  425. data/generated/google/apis/doubleclickbidmanager_v1/classes.rb +6 -12
  426. data/generated/google/apis/doubleclickbidmanager_v1/service.rb +33 -64
  427. data/generated/google/apis/doubleclickbidmanager_v1.rb +3 -2
  428. data/generated/google/apis/doubleclickbidmanager_v1_1/classes.rb +13 -20
  429. data/generated/google/apis/doubleclickbidmanager_v1_1/service.rb +33 -64
  430. data/generated/google/apis/doubleclickbidmanager_v1_1.rb +3 -2
  431. data/generated/google/apis/doubleclicksearch_v2/service.rb +2 -2
  432. data/generated/google/apis/doubleclicksearch_v2.rb +1 -1
  433. data/generated/google/apis/drive_v2/classes.rb +18 -7
  434. data/generated/google/apis/drive_v2/representations.rb +1 -0
  435. data/generated/google/apis/drive_v2/service.rb +79 -15
  436. data/generated/google/apis/drive_v2.rb +1 -1
  437. data/generated/google/apis/drive_v3/classes.rb +18 -8
  438. data/generated/google/apis/drive_v3/representations.rb +1 -0
  439. data/generated/google/apis/drive_v3/service.rb +59 -11
  440. data/generated/google/apis/drive_v3.rb +1 -1
  441. data/generated/google/apis/eventarc_v1beta1/classes.rb +931 -0
  442. data/generated/google/apis/eventarc_v1beta1/representations.rb +379 -0
  443. data/generated/google/apis/{memcache_v1 → eventarc_v1beta1}/service.rb +236 -215
  444. data/generated/google/apis/eventarc_v1beta1.rb +34 -0
  445. data/generated/google/apis/file_v1/classes.rb +155 -174
  446. data/generated/google/apis/file_v1/service.rb +43 -52
  447. data/generated/google/apis/file_v1.rb +1 -1
  448. data/generated/google/apis/file_v1beta1/classes.rb +335 -194
  449. data/generated/google/apis/file_v1beta1/representations.rb +55 -0
  450. data/generated/google/apis/file_v1beta1/service.rb +267 -55
  451. data/generated/google/apis/file_v1beta1.rb +1 -1
  452. data/generated/google/apis/firebase_v1beta1/classes.rb +33 -51
  453. data/generated/google/apis/firebase_v1beta1/representations.rb +2 -16
  454. data/generated/google/apis/firebase_v1beta1/service.rb +21 -1
  455. data/generated/google/apis/firebase_v1beta1.rb +1 -1
  456. data/generated/google/apis/firebasedynamiclinks_v1.rb +1 -1
  457. data/generated/google/apis/firebasehosting_v1beta1/classes.rb +188 -0
  458. data/generated/google/apis/firebasehosting_v1beta1/representations.rb +83 -0
  459. data/generated/google/apis/firebasehosting_v1beta1/service.rb +418 -4
  460. data/generated/google/apis/firebasehosting_v1beta1.rb +1 -1
  461. data/generated/google/apis/firebaseml_v1beta2/classes.rb +8 -8
  462. data/generated/google/apis/firebaseml_v1beta2/representations.rb +1 -1
  463. data/generated/google/apis/firebaseml_v1beta2.rb +1 -1
  464. data/generated/google/apis/firebaserules_v1/classes.rb +102 -137
  465. data/generated/google/apis/firebaserules_v1/service.rb +87 -110
  466. data/generated/google/apis/firebaserules_v1.rb +1 -1
  467. data/generated/google/apis/firestore_v1/classes.rb +406 -502
  468. data/generated/google/apis/firestore_v1/service.rb +165 -201
  469. data/generated/google/apis/firestore_v1.rb +1 -1
  470. data/generated/google/apis/firestore_v1beta1/classes.rb +338 -413
  471. data/generated/google/apis/firestore_v1beta1/service.rb +106 -122
  472. data/generated/google/apis/firestore_v1beta1.rb +1 -1
  473. data/generated/google/apis/firestore_v1beta2/classes.rb +135 -165
  474. data/generated/google/apis/firestore_v1beta2/service.rb +65 -86
  475. data/generated/google/apis/firestore_v1beta2.rb +1 -1
  476. data/generated/google/apis/fitness_v1/classes.rb +982 -0
  477. data/generated/google/apis/fitness_v1/representations.rb +398 -0
  478. data/generated/google/apis/fitness_v1/service.rb +628 -0
  479. data/generated/google/apis/fitness_v1.rb +97 -0
  480. data/generated/google/apis/games_configuration_v1configuration/classes.rb +2 -3
  481. data/generated/google/apis/games_configuration_v1configuration/service.rb +6 -6
  482. data/generated/google/apis/games_configuration_v1configuration.rb +1 -1
  483. data/generated/google/apis/games_management_v1management/classes.rb +14 -20
  484. data/generated/google/apis/games_management_v1management/service.rb +35 -36
  485. data/generated/google/apis/games_management_v1management.rb +2 -3
  486. data/generated/google/apis/games_v1/classes.rb +376 -83
  487. data/generated/google/apis/games_v1/representations.rb +118 -0
  488. data/generated/google/apis/games_v1/service.rb +118 -90
  489. data/generated/google/apis/games_v1.rb +2 -3
  490. data/generated/google/apis/gameservices_v1/classes.rb +22 -14
  491. data/generated/google/apis/gameservices_v1/representations.rb +1 -0
  492. data/generated/google/apis/gameservices_v1/service.rb +54 -51
  493. data/generated/google/apis/gameservices_v1.rb +1 -1
  494. data/generated/google/apis/gameservices_v1beta/classes.rb +22 -14
  495. data/generated/google/apis/gameservices_v1beta/representations.rb +1 -0
  496. data/generated/google/apis/gameservices_v1beta/service.rb +54 -51
  497. data/generated/google/apis/gameservices_v1beta.rb +1 -1
  498. data/generated/google/apis/genomics_v1/classes.rb +70 -76
  499. data/generated/google/apis/genomics_v1/service.rb +28 -43
  500. data/generated/google/apis/genomics_v1.rb +1 -1
  501. data/generated/google/apis/genomics_v1alpha2/classes.rb +223 -290
  502. data/generated/google/apis/genomics_v1alpha2/service.rb +54 -76
  503. data/generated/google/apis/genomics_v1alpha2.rb +1 -1
  504. data/generated/google/apis/genomics_v2alpha1/classes.rb +356 -275
  505. data/generated/google/apis/genomics_v2alpha1/representations.rb +48 -0
  506. data/generated/google/apis/genomics_v2alpha1/service.rb +47 -66
  507. data/generated/google/apis/genomics_v2alpha1.rb +1 -1
  508. data/generated/google/apis/gmail_v1/classes.rb +37 -43
  509. data/generated/google/apis/gmail_v1/service.rb +5 -4
  510. data/generated/google/apis/gmail_v1.rb +1 -1
  511. data/generated/google/apis/gmailpostmastertools_v1beta1/service.rb +11 -11
  512. data/generated/google/apis/gmailpostmastertools_v1beta1.rb +2 -2
  513. data/generated/google/apis/groupsmigration_v1/classes.rb +51 -0
  514. data/generated/google/apis/groupsmigration_v1/representations.rb +40 -0
  515. data/generated/google/apis/groupsmigration_v1/service.rb +100 -0
  516. data/generated/google/apis/groupsmigration_v1.rb +35 -0
  517. data/generated/google/apis/healthcare_v1/classes.rb +637 -826
  518. data/generated/google/apis/healthcare_v1/representations.rb +32 -0
  519. data/generated/google/apis/healthcare_v1/service.rb +842 -855
  520. data/generated/google/apis/healthcare_v1.rb +1 -1
  521. data/generated/google/apis/healthcare_v1beta1/classes.rb +1937 -1299
  522. data/generated/google/apis/healthcare_v1beta1/representations.rb +534 -65
  523. data/generated/google/apis/healthcare_v1beta1/service.rb +2534 -1293
  524. data/generated/google/apis/healthcare_v1beta1.rb +1 -1
  525. data/generated/google/apis/homegraph_v1/classes.rb +76 -164
  526. data/generated/google/apis/homegraph_v1/service.rb +23 -35
  527. data/generated/google/apis/homegraph_v1.rb +4 -1
  528. data/generated/google/apis/iam_v1/classes.rb +395 -592
  529. data/generated/google/apis/iam_v1/representations.rb +1 -0
  530. data/generated/google/apis/iam_v1/service.rb +427 -555
  531. data/generated/google/apis/iam_v1.rb +1 -1
  532. data/generated/google/apis/iamcredentials_v1/classes.rb +75 -85
  533. data/generated/google/apis/iamcredentials_v1/service.rb +14 -13
  534. data/generated/google/apis/iamcredentials_v1.rb +3 -2
  535. data/generated/google/apis/iap_v1/classes.rb +253 -355
  536. data/generated/google/apis/iap_v1/representations.rb +1 -0
  537. data/generated/google/apis/iap_v1/service.rb +61 -71
  538. data/generated/google/apis/iap_v1.rb +1 -1
  539. data/generated/google/apis/iap_v1beta1/classes.rb +164 -254
  540. data/generated/google/apis/iap_v1beta1/representations.rb +1 -0
  541. data/generated/google/apis/iap_v1beta1/service.rb +17 -19
  542. data/generated/google/apis/iap_v1beta1.rb +1 -1
  543. data/generated/google/apis/indexing_v3/classes.rb +11 -11
  544. data/generated/google/apis/indexing_v3.rb +1 -1
  545. data/generated/google/apis/jobs_v2/classes.rb +1584 -1086
  546. data/generated/google/apis/jobs_v2/representations.rb +272 -0
  547. data/generated/google/apis/jobs_v2/service.rb +85 -126
  548. data/generated/google/apis/jobs_v2.rb +1 -1
  549. data/generated/google/apis/jobs_v3/classes.rb +1559 -980
  550. data/generated/google/apis/jobs_v3/representations.rb +272 -0
  551. data/generated/google/apis/jobs_v3/service.rb +101 -139
  552. data/generated/google/apis/jobs_v3.rb +1 -1
  553. data/generated/google/apis/jobs_v3p1beta1/classes.rb +1521 -1023
  554. data/generated/google/apis/jobs_v3p1beta1/representations.rb +257 -0
  555. data/generated/google/apis/jobs_v3p1beta1/service.rb +103 -142
  556. data/generated/google/apis/jobs_v3p1beta1.rb +1 -1
  557. data/generated/google/apis/kgsearch_v1/classes.rb +4 -4
  558. data/generated/google/apis/kgsearch_v1/service.rb +11 -11
  559. data/generated/google/apis/kgsearch_v1.rb +1 -1
  560. data/generated/google/apis/licensing_v1/classes.rb +1 -1
  561. data/generated/google/apis/licensing_v1/service.rb +56 -86
  562. data/generated/google/apis/licensing_v1.rb +4 -3
  563. data/generated/google/apis/lifesciences_v2beta/classes.rb +366 -290
  564. data/generated/google/apis/lifesciences_v2beta/representations.rb +47 -0
  565. data/generated/google/apis/lifesciences_v2beta/service.rb +30 -42
  566. data/generated/google/apis/lifesciences_v2beta.rb +1 -1
  567. data/generated/google/apis/localservices_v1/classes.rb +426 -0
  568. data/generated/google/apis/localservices_v1/representations.rb +174 -0
  569. data/generated/google/apis/localservices_v1/service.rb +199 -0
  570. data/generated/google/apis/{appsactivity_v1.rb → localservices_v1.rb} +8 -11
  571. data/generated/google/apis/logging_v2/classes.rb +306 -232
  572. data/generated/google/apis/logging_v2/representations.rb +79 -0
  573. data/generated/google/apis/logging_v2/service.rb +3307 -1579
  574. data/generated/google/apis/logging_v2.rb +1 -1
  575. data/generated/google/apis/managedidentities_v1/classes.rb +8 -1
  576. data/generated/google/apis/managedidentities_v1/representations.rb +1 -0
  577. data/generated/google/apis/managedidentities_v1/service.rb +1 -4
  578. data/generated/google/apis/managedidentities_v1.rb +1 -1
  579. data/generated/google/apis/managedidentities_v1alpha1/classes.rb +87 -1
  580. data/generated/google/apis/managedidentities_v1alpha1/representations.rb +34 -0
  581. data/generated/google/apis/managedidentities_v1alpha1/service.rb +83 -5
  582. data/generated/google/apis/managedidentities_v1alpha1.rb +1 -1
  583. data/generated/google/apis/managedidentities_v1beta1/classes.rb +88 -1
  584. data/generated/google/apis/managedidentities_v1beta1/representations.rb +34 -0
  585. data/generated/google/apis/managedidentities_v1beta1/service.rb +83 -5
  586. data/generated/google/apis/managedidentities_v1beta1.rb +1 -1
  587. data/generated/google/apis/manufacturers_v1/classes.rb +99 -109
  588. data/generated/google/apis/manufacturers_v1/service.rb +44 -55
  589. data/generated/google/apis/manufacturers_v1.rb +1 -1
  590. data/generated/google/apis/memcache_v1beta2/classes.rb +171 -250
  591. data/generated/google/apis/memcache_v1beta2/representations.rb +0 -19
  592. data/generated/google/apis/memcache_v1beta2/service.rb +60 -73
  593. data/generated/google/apis/memcache_v1beta2.rb +1 -1
  594. data/generated/google/apis/ml_v1/classes.rb +1122 -1149
  595. data/generated/google/apis/ml_v1/representations.rb +82 -0
  596. data/generated/google/apis/ml_v1/service.rb +194 -253
  597. data/generated/google/apis/ml_v1.rb +1 -1
  598. data/generated/google/apis/monitoring_v1/classes.rb +107 -26
  599. data/generated/google/apis/monitoring_v1/representations.rb +35 -0
  600. data/generated/google/apis/monitoring_v1/service.rb +10 -11
  601. data/generated/google/apis/monitoring_v1.rb +1 -1
  602. data/generated/google/apis/monitoring_v3/classes.rb +303 -345
  603. data/generated/google/apis/monitoring_v3/representations.rb +18 -0
  604. data/generated/google/apis/monitoring_v3/service.rb +176 -146
  605. data/generated/google/apis/monitoring_v3.rb +1 -1
  606. data/generated/google/apis/networkmanagement_v1/classes.rb +273 -429
  607. data/generated/google/apis/networkmanagement_v1/service.rb +97 -120
  608. data/generated/google/apis/networkmanagement_v1.rb +1 -1
  609. data/generated/google/apis/networkmanagement_v1beta1/classes.rb +456 -429
  610. data/generated/google/apis/networkmanagement_v1beta1/representations.rb +63 -0
  611. data/generated/google/apis/networkmanagement_v1beta1/service.rb +97 -120
  612. data/generated/google/apis/networkmanagement_v1beta1.rb +1 -1
  613. data/generated/google/apis/osconfig_v1/classes.rb +154 -902
  614. data/generated/google/apis/osconfig_v1/representations.rb +0 -337
  615. data/generated/google/apis/osconfig_v1/service.rb +26 -31
  616. data/generated/google/apis/osconfig_v1.rb +3 -3
  617. data/generated/google/apis/osconfig_v1beta/classes.rb +327 -411
  618. data/generated/google/apis/osconfig_v1beta/service.rb +43 -56
  619. data/generated/google/apis/osconfig_v1beta.rb +3 -3
  620. data/generated/google/apis/oslogin_v1/classes.rb +14 -12
  621. data/generated/google/apis/oslogin_v1/representations.rb +1 -0
  622. data/generated/google/apis/oslogin_v1/service.rb +12 -16
  623. data/generated/google/apis/oslogin_v1.rb +1 -1
  624. data/generated/google/apis/oslogin_v1alpha/classes.rb +16 -14
  625. data/generated/google/apis/oslogin_v1alpha/representations.rb +1 -0
  626. data/generated/google/apis/oslogin_v1alpha/service.rb +17 -17
  627. data/generated/google/apis/oslogin_v1alpha.rb +1 -1
  628. data/generated/google/apis/oslogin_v1beta/classes.rb +14 -12
  629. data/generated/google/apis/oslogin_v1beta/representations.rb +1 -0
  630. data/generated/google/apis/oslogin_v1beta/service.rb +12 -16
  631. data/generated/google/apis/oslogin_v1beta.rb +1 -1
  632. data/generated/google/apis/pagespeedonline_v5/classes.rb +18 -24
  633. data/generated/google/apis/pagespeedonline_v5/service.rb +3 -4
  634. data/generated/google/apis/pagespeedonline_v5.rb +2 -2
  635. data/generated/google/apis/people_v1/classes.rb +173 -63
  636. data/generated/google/apis/people_v1/representations.rb +41 -0
  637. data/generated/google/apis/people_v1/service.rb +63 -61
  638. data/generated/google/apis/people_v1.rb +1 -1
  639. data/generated/google/apis/playablelocations_v3/classes.rb +114 -161
  640. data/generated/google/apis/playablelocations_v3/service.rb +10 -10
  641. data/generated/google/apis/playablelocations_v3.rb +1 -1
  642. data/generated/google/apis/playcustomapp_v1/service.rb +2 -2
  643. data/generated/google/apis/playcustomapp_v1.rb +1 -1
  644. data/generated/google/apis/poly_v1/classes.rb +65 -79
  645. data/generated/google/apis/poly_v1/service.rb +50 -63
  646. data/generated/google/apis/poly_v1.rb +3 -4
  647. data/generated/google/apis/privateca_v1beta1/classes.rb +2466 -0
  648. data/generated/google/apis/privateca_v1beta1/representations.rb +996 -0
  649. data/generated/google/apis/privateca_v1beta1/service.rb +1487 -0
  650. data/generated/google/apis/privateca_v1beta1.rb +34 -0
  651. data/generated/google/apis/prod_tt_sasportal_v1alpha1/classes.rb +26 -0
  652. data/generated/google/apis/prod_tt_sasportal_v1alpha1/representations.rb +14 -0
  653. data/generated/google/apis/prod_tt_sasportal_v1alpha1/service.rb +644 -56
  654. data/generated/google/apis/prod_tt_sasportal_v1alpha1.rb +1 -1
  655. data/generated/google/apis/pubsub_v1/classes.rb +399 -518
  656. data/generated/google/apis/pubsub_v1/representations.rb +2 -0
  657. data/generated/google/apis/pubsub_v1/service.rb +221 -247
  658. data/generated/google/apis/pubsub_v1.rb +1 -1
  659. data/generated/google/apis/pubsub_v1beta1a/classes.rb +71 -86
  660. data/generated/google/apis/pubsub_v1beta1a/service.rb +31 -38
  661. data/generated/google/apis/pubsub_v1beta1a.rb +1 -1
  662. data/generated/google/apis/pubsub_v1beta2/classes.rb +251 -354
  663. data/generated/google/apis/pubsub_v1beta2/representations.rb +1 -0
  664. data/generated/google/apis/pubsub_v1beta2/service.rb +96 -108
  665. data/generated/google/apis/pubsub_v1beta2.rb +1 -1
  666. data/generated/google/apis/pubsublite_v1/classes.rb +461 -0
  667. data/generated/google/apis/pubsublite_v1/representations.rb +261 -0
  668. data/generated/google/apis/pubsublite_v1/service.rb +558 -0
  669. data/generated/google/apis/{memcache_v1.rb → pubsublite_v1.rb} +8 -9
  670. data/generated/google/apis/realtimebidding_v1/classes.rb +84 -123
  671. data/generated/google/apis/realtimebidding_v1/representations.rb +18 -32
  672. data/generated/google/apis/realtimebidding_v1/service.rb +4 -1
  673. data/generated/google/apis/realtimebidding_v1.rb +1 -1
  674. data/generated/google/apis/recommendationengine_v1beta1/classes.rb +367 -456
  675. data/generated/google/apis/recommendationengine_v1beta1/representations.rb +25 -16
  676. data/generated/google/apis/recommendationengine_v1beta1/service.rb +140 -206
  677. data/generated/google/apis/recommendationengine_v1beta1.rb +1 -1
  678. data/generated/google/apis/recommender_v1/classes.rb +1 -1
  679. data/generated/google/apis/recommender_v1/service.rb +4 -2
  680. data/generated/google/apis/recommender_v1.rb +1 -1
  681. data/generated/google/apis/recommender_v1beta1/classes.rb +1 -1
  682. data/generated/google/apis/recommender_v1beta1/service.rb +4 -2
  683. data/generated/google/apis/recommender_v1beta1.rb +1 -1
  684. data/generated/google/apis/redis_v1/classes.rb +91 -513
  685. data/generated/google/apis/redis_v1/representations.rb +0 -139
  686. data/generated/google/apis/redis_v1/service.rb +92 -109
  687. data/generated/google/apis/redis_v1.rb +1 -1
  688. data/generated/google/apis/redis_v1beta1/classes.rb +123 -517
  689. data/generated/google/apis/redis_v1beta1/representations.rb +12 -137
  690. data/generated/google/apis/redis_v1beta1/service.rb +126 -109
  691. data/generated/google/apis/redis_v1beta1.rb +1 -1
  692. data/generated/google/apis/remotebuildexecution_v1/classes.rb +957 -1078
  693. data/generated/google/apis/remotebuildexecution_v1/representations.rb +62 -0
  694. data/generated/google/apis/remotebuildexecution_v1/service.rb +26 -33
  695. data/generated/google/apis/remotebuildexecution_v1.rb +1 -1
  696. data/generated/google/apis/remotebuildexecution_v1alpha/classes.rb +952 -1071
  697. data/generated/google/apis/remotebuildexecution_v1alpha/representations.rb +62 -0
  698. data/generated/google/apis/remotebuildexecution_v1alpha/service.rb +103 -65
  699. data/generated/google/apis/remotebuildexecution_v1alpha.rb +1 -1
  700. data/generated/google/apis/remotebuildexecution_v2/classes.rb +1105 -1250
  701. data/generated/google/apis/remotebuildexecution_v2/representations.rb +62 -0
  702. data/generated/google/apis/remotebuildexecution_v2/service.rb +147 -206
  703. data/generated/google/apis/remotebuildexecution_v2.rb +1 -1
  704. data/generated/google/apis/reseller_v1/classes.rb +151 -219
  705. data/generated/google/apis/reseller_v1/service.rb +122 -173
  706. data/generated/google/apis/reseller_v1.rb +2 -2
  707. data/generated/google/apis/run_v1/classes.rb +19 -138
  708. data/generated/google/apis/run_v1/representations.rb +1 -62
  709. data/generated/google/apis/run_v1/service.rb +0 -342
  710. data/generated/google/apis/run_v1.rb +1 -1
  711. data/generated/google/apis/run_v1alpha1/classes.rb +1 -1
  712. data/generated/google/apis/run_v1alpha1/representations.rb +1 -1
  713. data/generated/google/apis/run_v1alpha1.rb +1 -1
  714. data/generated/google/apis/run_v1beta1/classes.rb +3 -2
  715. data/generated/google/apis/run_v1beta1.rb +1 -1
  716. data/generated/google/apis/runtimeconfig_v1beta1/classes.rb +301 -412
  717. data/generated/google/apis/runtimeconfig_v1beta1/representations.rb +1 -0
  718. data/generated/google/apis/runtimeconfig_v1beta1/service.rb +135 -159
  719. data/generated/google/apis/runtimeconfig_v1beta1.rb +1 -1
  720. data/generated/google/apis/safebrowsing_v4/classes.rb +55 -64
  721. data/generated/google/apis/safebrowsing_v4/service.rb +4 -4
  722. data/generated/google/apis/safebrowsing_v4.rb +1 -1
  723. data/generated/google/apis/sasportal_v1alpha1/classes.rb +26 -0
  724. data/generated/google/apis/sasportal_v1alpha1/representations.rb +14 -0
  725. data/generated/google/apis/sasportal_v1alpha1/service.rb +644 -56
  726. data/generated/google/apis/sasportal_v1alpha1.rb +1 -1
  727. data/generated/google/apis/script_v1/classes.rb +88 -111
  728. data/generated/google/apis/script_v1/service.rb +63 -69
  729. data/generated/google/apis/script_v1.rb +1 -1
  730. data/generated/google/apis/searchconsole_v1/classes.rb +388 -0
  731. data/generated/google/apis/searchconsole_v1/representations.rb +162 -0
  732. data/generated/google/apis/searchconsole_v1/service.rb +287 -0
  733. data/generated/google/apis/searchconsole_v1.rb +7 -1
  734. data/generated/google/apis/secretmanager_v1/classes.rb +378 -365
  735. data/generated/google/apis/secretmanager_v1/representations.rb +92 -0
  736. data/generated/google/apis/secretmanager_v1/service.rb +66 -82
  737. data/generated/google/apis/secretmanager_v1.rb +1 -1
  738. data/generated/google/apis/secretmanager_v1beta1/classes.rb +217 -363
  739. data/generated/google/apis/secretmanager_v1beta1/representations.rb +1 -0
  740. data/generated/google/apis/secretmanager_v1beta1/service.rb +66 -82
  741. data/generated/google/apis/secretmanager_v1beta1.rb +1 -1
  742. data/generated/google/apis/securitycenter_v1/classes.rb +20 -204
  743. data/generated/google/apis/securitycenter_v1/representations.rb +1 -72
  744. data/generated/google/apis/securitycenter_v1.rb +1 -1
  745. data/generated/google/apis/securitycenter_v1beta1/classes.rb +22 -204
  746. data/generated/google/apis/securitycenter_v1beta1/representations.rb +1 -72
  747. data/generated/google/apis/securitycenter_v1beta1.rb +1 -1
  748. data/generated/google/apis/{securitycenter_v1p1alpha1 → securitycenter_v1beta2}/classes.rb +275 -291
  749. data/generated/google/apis/{securitycenter_v1p1alpha1 → securitycenter_v1beta2}/representations.rb +83 -84
  750. data/generated/google/apis/securitycenter_v1beta2/service.rb +1494 -0
  751. data/generated/google/apis/{securitycenter_v1p1beta1.rb → securitycenter_v1beta2.rb} +6 -6
  752. data/generated/google/apis/serviceconsumermanagement_v1/classes.rb +35 -123
  753. data/generated/google/apis/serviceconsumermanagement_v1/representations.rb +0 -18
  754. data/generated/google/apis/serviceconsumermanagement_v1/service.rb +32 -30
  755. data/generated/google/apis/serviceconsumermanagement_v1.rb +1 -1
  756. data/generated/google/apis/serviceconsumermanagement_v1beta1/classes.rb +24 -112
  757. data/generated/google/apis/serviceconsumermanagement_v1beta1/representations.rb +0 -18
  758. data/generated/google/apis/serviceconsumermanagement_v1beta1.rb +1 -1
  759. data/generated/google/apis/servicecontrol_v1/classes.rb +601 -642
  760. data/generated/google/apis/servicecontrol_v1/representations.rb +10 -0
  761. data/generated/google/apis/servicecontrol_v1/service.rb +36 -46
  762. data/generated/google/apis/servicecontrol_v1.rb +1 -1
  763. data/generated/google/apis/servicecontrol_v2/classes.rb +343 -325
  764. data/generated/google/apis/servicecontrol_v2/representations.rb +8 -0
  765. data/generated/google/apis/servicecontrol_v2/service.rb +33 -43
  766. data/generated/google/apis/servicecontrol_v2.rb +1 -1
  767. data/generated/google/apis/servicedirectory_v1beta1/classes.rb +221 -333
  768. data/generated/google/apis/servicedirectory_v1beta1/representations.rb +1 -0
  769. data/generated/google/apis/servicedirectory_v1beta1/service.rb +94 -129
  770. data/generated/google/apis/servicedirectory_v1beta1.rb +1 -1
  771. data/generated/google/apis/servicemanagement_v1/classes.rb +1244 -2174
  772. data/generated/google/apis/servicemanagement_v1/representations.rb +0 -31
  773. data/generated/google/apis/servicemanagement_v1/service.rb +141 -228
  774. data/generated/google/apis/servicemanagement_v1.rb +1 -1
  775. data/generated/google/apis/servicenetworking_v1/classes.rb +278 -121
  776. data/generated/google/apis/servicenetworking_v1/representations.rb +115 -15
  777. data/generated/google/apis/servicenetworking_v1/service.rb +118 -2
  778. data/generated/google/apis/servicenetworking_v1.rb +1 -1
  779. data/generated/google/apis/servicenetworking_v1beta/classes.rb +213 -112
  780. data/generated/google/apis/servicenetworking_v1beta/representations.rb +84 -14
  781. data/generated/google/apis/servicenetworking_v1beta.rb +1 -1
  782. data/generated/google/apis/serviceusage_v1/classes.rb +57 -111
  783. data/generated/google/apis/serviceusage_v1/representations.rb +4 -18
  784. data/generated/google/apis/serviceusage_v1/service.rb +5 -1
  785. data/generated/google/apis/serviceusage_v1.rb +1 -1
  786. data/generated/google/apis/serviceusage_v1beta1/classes.rb +122 -112
  787. data/generated/google/apis/serviceusage_v1beta1/representations.rb +23 -18
  788. data/generated/google/apis/serviceusage_v1beta1/service.rb +36 -0
  789. data/generated/google/apis/serviceusage_v1beta1.rb +1 -1
  790. data/generated/google/apis/sheets_v4/classes.rb +4029 -5014
  791. data/generated/google/apis/sheets_v4/representations.rb +625 -0
  792. data/generated/google/apis/sheets_v4/service.rb +113 -149
  793. data/generated/google/apis/sheets_v4.rb +1 -1
  794. data/generated/google/apis/site_verification_v1.rb +1 -1
  795. data/generated/google/apis/slides_v1/classes.rb +841 -1114
  796. data/generated/google/apis/slides_v1/service.rb +23 -30
  797. data/generated/google/apis/slides_v1.rb +1 -1
  798. data/generated/google/apis/smartdevicemanagement_v1/classes.rb +273 -0
  799. data/generated/google/apis/smartdevicemanagement_v1/representations.rb +157 -0
  800. data/generated/google/apis/smartdevicemanagement_v1/service.rb +304 -0
  801. data/generated/google/apis/smartdevicemanagement_v1.rb +35 -0
  802. data/generated/google/apis/sourcerepo_v1/classes.rb +250 -400
  803. data/generated/google/apis/sourcerepo_v1/service.rb +40 -49
  804. data/generated/google/apis/sourcerepo_v1.rb +1 -1
  805. data/generated/google/apis/spanner_v1/classes.rb +1553 -2157
  806. data/generated/google/apis/spanner_v1/representations.rb +1 -0
  807. data/generated/google/apis/spanner_v1/service.rb +443 -618
  808. data/generated/google/apis/spanner_v1.rb +1 -1
  809. data/generated/google/apis/speech_v1/classes.rb +174 -220
  810. data/generated/google/apis/speech_v1/service.rb +27 -32
  811. data/generated/google/apis/speech_v1.rb +1 -1
  812. data/generated/google/apis/speech_v1p1beta1/classes.rb +253 -306
  813. data/generated/google/apis/speech_v1p1beta1/service.rb +27 -32
  814. data/generated/google/apis/speech_v1p1beta1.rb +1 -1
  815. data/generated/google/apis/speech_v2beta1/classes.rb +66 -76
  816. data/generated/google/apis/speech_v2beta1/service.rb +10 -12
  817. data/generated/google/apis/speech_v2beta1.rb +1 -1
  818. data/generated/google/apis/{sql_v1beta4 → sqladmin_v1beta4}/classes.rb +537 -452
  819. data/generated/google/apis/{sql_v1beta4 → sqladmin_v1beta4}/representations.rb +142 -87
  820. data/generated/google/apis/{sql_v1beta4 → sqladmin_v1beta4}/service.rb +295 -300
  821. data/generated/google/apis/{sql_v1beta4.rb → sqladmin_v1beta4.rb} +5 -5
  822. data/generated/google/apis/storage_v1/classes.rb +10 -17
  823. data/generated/google/apis/storage_v1/representations.rb +2 -3
  824. data/generated/google/apis/storage_v1/service.rb +3 -2
  825. data/generated/google/apis/storage_v1.rb +1 -1
  826. data/generated/google/apis/storagetransfer_v1/classes.rb +301 -349
  827. data/generated/google/apis/storagetransfer_v1/representations.rb +13 -0
  828. data/generated/google/apis/storagetransfer_v1/service.rb +53 -72
  829. data/generated/google/apis/storagetransfer_v1.rb +1 -1
  830. data/generated/google/apis/streetviewpublish_v1/classes.rb +110 -152
  831. data/generated/google/apis/streetviewpublish_v1/service.rb +94 -177
  832. data/generated/google/apis/streetviewpublish_v1.rb +1 -1
  833. data/generated/google/apis/sts_v1/classes.rb +121 -0
  834. data/generated/google/apis/sts_v1/representations.rb +59 -0
  835. data/generated/google/apis/sts_v1/service.rb +90 -0
  836. data/generated/google/apis/sts_v1.rb +32 -0
  837. data/generated/google/apis/sts_v1beta/classes.rb +191 -0
  838. data/generated/google/apis/sts_v1beta/representations.rb +61 -0
  839. data/generated/google/apis/sts_v1beta/service.rb +92 -0
  840. data/generated/google/apis/sts_v1beta.rb +32 -0
  841. data/generated/google/apis/tagmanager_v1/service.rb +2 -2
  842. data/generated/google/apis/tagmanager_v1.rb +1 -1
  843. data/generated/google/apis/tagmanager_v2/classes.rb +12 -0
  844. data/generated/google/apis/tagmanager_v2/representations.rb +3 -0
  845. data/generated/google/apis/tagmanager_v2/service.rb +2 -2
  846. data/generated/google/apis/tagmanager_v2.rb +1 -1
  847. data/generated/google/apis/tasks_v1/classes.rb +21 -22
  848. data/generated/google/apis/tasks_v1/service.rb +19 -19
  849. data/generated/google/apis/tasks_v1.rb +1 -1
  850. data/generated/google/apis/testing_v1/classes.rb +384 -390
  851. data/generated/google/apis/testing_v1/representations.rb +23 -0
  852. data/generated/google/apis/testing_v1/service.rb +22 -28
  853. data/generated/google/apis/testing_v1.rb +1 -1
  854. data/generated/google/apis/texttospeech_v1/classes.rb +51 -57
  855. data/generated/google/apis/texttospeech_v1/service.rb +9 -10
  856. data/generated/google/apis/texttospeech_v1.rb +1 -1
  857. data/generated/google/apis/texttospeech_v1beta1/classes.rb +96 -57
  858. data/generated/google/apis/texttospeech_v1beta1/representations.rb +19 -0
  859. data/generated/google/apis/texttospeech_v1beta1/service.rb +9 -10
  860. data/generated/google/apis/texttospeech_v1beta1.rb +1 -1
  861. data/generated/google/apis/toolresults_v1beta3/classes.rb +20 -0
  862. data/generated/google/apis/toolresults_v1beta3/representations.rb +13 -0
  863. data/generated/google/apis/toolresults_v1beta3.rb +1 -1
  864. data/generated/google/apis/tpu_v1/classes.rb +57 -3
  865. data/generated/google/apis/tpu_v1/representations.rb +19 -0
  866. data/generated/google/apis/tpu_v1/service.rb +8 -8
  867. data/generated/google/apis/tpu_v1.rb +1 -1
  868. data/generated/google/apis/tpu_v1alpha1/classes.rb +57 -3
  869. data/generated/google/apis/tpu_v1alpha1/representations.rb +19 -0
  870. data/generated/google/apis/tpu_v1alpha1/service.rb +8 -8
  871. data/generated/google/apis/tpu_v1alpha1.rb +1 -1
  872. data/generated/google/apis/trafficdirector_v2/classes.rb +1347 -0
  873. data/generated/google/apis/trafficdirector_v2/representations.rb +620 -0
  874. data/generated/google/apis/trafficdirector_v2/service.rb +89 -0
  875. data/generated/google/apis/trafficdirector_v2.rb +34 -0
  876. data/generated/google/apis/translate_v3/classes.rb +151 -177
  877. data/generated/google/apis/translate_v3/service.rb +122 -151
  878. data/generated/google/apis/translate_v3.rb +1 -1
  879. data/generated/google/apis/translate_v3beta1/classes.rb +150 -170
  880. data/generated/google/apis/translate_v3beta1/service.rb +122 -151
  881. data/generated/google/apis/translate_v3beta1.rb +1 -1
  882. data/generated/google/apis/vault_v1/classes.rb +413 -103
  883. data/generated/google/apis/vault_v1/representations.rb +162 -0
  884. data/generated/google/apis/vault_v1/service.rb +182 -37
  885. data/generated/google/apis/vault_v1.rb +1 -1
  886. data/generated/google/apis/vectortile_v1/classes.rb +185 -267
  887. data/generated/google/apis/vectortile_v1/service.rb +75 -88
  888. data/generated/google/apis/vectortile_v1.rb +1 -1
  889. data/generated/google/apis/verifiedaccess_v1/classes.rb +20 -27
  890. data/generated/google/apis/verifiedaccess_v1.rb +1 -1
  891. data/generated/google/apis/videointelligence_v1/classes.rb +1493 -935
  892. data/generated/google/apis/videointelligence_v1/representations.rb +402 -2
  893. data/generated/google/apis/videointelligence_v1/service.rb +38 -77
  894. data/generated/google/apis/videointelligence_v1.rb +1 -1
  895. data/generated/google/apis/videointelligence_v1beta2/classes.rb +1488 -928
  896. data/generated/google/apis/videointelligence_v1beta2/representations.rb +402 -2
  897. data/generated/google/apis/videointelligence_v1beta2/service.rb +4 -4
  898. data/generated/google/apis/videointelligence_v1beta2.rb +1 -1
  899. data/generated/google/apis/videointelligence_v1p1beta1/classes.rb +1482 -922
  900. data/generated/google/apis/videointelligence_v1p1beta1/representations.rb +402 -2
  901. data/generated/google/apis/videointelligence_v1p1beta1/service.rb +4 -4
  902. data/generated/google/apis/videointelligence_v1p1beta1.rb +1 -1
  903. data/generated/google/apis/videointelligence_v1p2beta1/classes.rb +1485 -925
  904. data/generated/google/apis/videointelligence_v1p2beta1/representations.rb +402 -2
  905. data/generated/google/apis/videointelligence_v1p2beta1/service.rb +4 -4
  906. data/generated/google/apis/videointelligence_v1p2beta1.rb +1 -1
  907. data/generated/google/apis/videointelligence_v1p3beta1/classes.rb +1410 -937
  908. data/generated/google/apis/videointelligence_v1p3beta1/representations.rb +368 -2
  909. data/generated/google/apis/videointelligence_v1p3beta1/service.rb +4 -4
  910. data/generated/google/apis/videointelligence_v1p3beta1.rb +1 -1
  911. data/generated/google/apis/vision_v1/classes.rb +16 -16
  912. data/generated/google/apis/vision_v1.rb +1 -1
  913. data/generated/google/apis/vision_v1p1beta1/classes.rb +16 -16
  914. data/generated/google/apis/vision_v1p1beta1.rb +1 -1
  915. data/generated/google/apis/vision_v1p2beta1/classes.rb +16 -16
  916. data/generated/google/apis/vision_v1p2beta1.rb +1 -1
  917. data/generated/google/apis/webfonts_v1/classes.rb +1 -2
  918. data/generated/google/apis/webfonts_v1/service.rb +2 -4
  919. data/generated/google/apis/webfonts_v1.rb +2 -3
  920. data/generated/google/apis/websecurityscanner_v1.rb +1 -1
  921. data/generated/google/apis/websecurityscanner_v1alpha.rb +1 -1
  922. data/generated/google/apis/websecurityscanner_v1beta.rb +1 -1
  923. data/generated/google/apis/workflowexecutions_v1beta/classes.rb +155 -0
  924. data/generated/google/apis/workflowexecutions_v1beta/representations.rb +88 -0
  925. data/generated/google/apis/{securitycenter_v1p1alpha1 → workflowexecutions_v1beta}/service.rb +73 -69
  926. data/generated/google/apis/workflowexecutions_v1beta.rb +34 -0
  927. data/generated/google/apis/workflows_v1beta/classes.rb +406 -0
  928. data/generated/google/apis/workflows_v1beta/representations.rb +173 -0
  929. data/generated/google/apis/workflows_v1beta/service.rb +438 -0
  930. data/generated/google/apis/workflows_v1beta.rb +35 -0
  931. data/generated/google/apis/youtube_partner_v1.rb +1 -1
  932. data/generated/google/apis/youtube_v3/classes.rb +0 -586
  933. data/generated/google/apis/youtube_v3/representations.rb +0 -269
  934. data/generated/google/apis/youtube_v3/service.rb +3 -120
  935. data/generated/google/apis/youtube_v3.rb +1 -1
  936. data/google-api-client.gemspec +25 -24
  937. data/lib/google/apis/core/api_command.rb +1 -0
  938. data/lib/google/apis/core/http_command.rb +2 -1
  939. data/lib/google/apis/options.rb +8 -5
  940. data/lib/google/apis/version.rb +1 -1
  941. data/synth.py +40 -0
  942. metadata +134 -41
  943. data/generated/google/apis/accessapproval_v1beta1/classes.rb +0 -417
  944. data/generated/google/apis/accessapproval_v1beta1/representations.rb +0 -203
  945. data/generated/google/apis/accessapproval_v1beta1/service.rb +0 -857
  946. data/generated/google/apis/appsactivity_v1/classes.rb +0 -415
  947. data/generated/google/apis/appsactivity_v1/representations.rb +0 -209
  948. data/generated/google/apis/appsactivity_v1/service.rb +0 -126
  949. data/generated/google/apis/dns_v2beta1/classes.rb +0 -1447
  950. data/generated/google/apis/dns_v2beta1/representations.rb +0 -588
  951. data/generated/google/apis/dns_v2beta1/service.rb +0 -928
  952. data/generated/google/apis/dns_v2beta1.rb +0 -43
  953. data/generated/google/apis/memcache_v1/classes.rb +0 -1157
  954. data/generated/google/apis/plus_v1/classes.rb +0 -2094
  955. data/generated/google/apis/plus_v1/representations.rb +0 -907
  956. data/generated/google/apis/plus_v1/service.rb +0 -451
  957. data/generated/google/apis/plus_v1.rb +0 -43
  958. data/generated/google/apis/securitycenter_v1p1beta1/classes.rb +0 -2059
  959. data/generated/google/apis/securitycenter_v1p1beta1/representations.rb +0 -789
  960. data/generated/google/apis/securitycenter_v1p1beta1/service.rb +0 -1243
  961. data/generated/google/apis/storage_v1beta2/classes.rb +0 -1047
  962. data/generated/google/apis/storage_v1beta2/representations.rb +0 -425
  963. data/generated/google/apis/storage_v1beta2/service.rb +0 -1667
  964. data/generated/google/apis/storage_v1beta2.rb +0 -40
@@ -22,9 +22,9 @@ module Google
22
22
  module Apis
23
23
  module VideointelligenceV1beta2
24
24
 
25
- # Video annotation progress. Included in the `metadata`
26
- # field of the `Operation` returned by the `GetOperation`
27
- # call of the `google::longrunning::Operations` service.
25
+ # Video annotation progress. Included in the `metadata` field of the `Operation`
26
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
27
+ # service.
28
28
  class GoogleCloudVideointelligenceV1AnnotateVideoProgress
29
29
  include Google::Apis::Core::Hashable
30
30
 
@@ -43,9 +43,9 @@ module Google
43
43
  end
44
44
  end
45
45
 
46
- # Video annotation response. Included in the `response`
47
- # field of the `Operation` returned by the `GetOperation`
48
- # call of the `google::longrunning::Operations` service.
46
+ # Video annotation response. Included in the `response` field of the `Operation`
47
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
48
+ # service.
49
49
  class GoogleCloudVideointelligenceV1AnnotateVideoResponse
50
50
  include Google::Apis::Core::Hashable
51
51
 
@@ -73,14 +73,14 @@ module Google
73
73
  # @return [Float]
74
74
  attr_accessor :confidence
75
75
 
76
- # The name of the attribute, for example, glasses, dark_glasses, mouth_open.
77
- # A full list of supported type names will be provided in the document.
76
+ # The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
77
+ # full list of supported type names will be provided in the document.
78
78
  # Corresponds to the JSON property `name`
79
79
  # @return [String]
80
80
  attr_accessor :name
81
81
 
82
- # Text value of the detection result. For example, the value for "HairColor"
83
- # can be "black", "blonde", etc.
82
+ # Text value of the detection result. For example, the value for "HairColor" can
83
+ # be "black", "blonde", etc.
84
84
  # Corresponds to the JSON property `value`
85
85
  # @return [String]
86
86
  attr_accessor :value
@@ -112,9 +112,8 @@ module Google
112
112
  # @return [String]
113
113
  attr_accessor :name
114
114
 
115
- # A vertex represents a 2D point in the image.
116
- # NOTE: the normalized vertex coordinates are relative to the original image
117
- # and range from 0 to 1.
115
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
116
+ # coordinates are relative to the original image and range from 0 to 1.
118
117
  # Corresponds to the JSON property `point`
119
118
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1NormalizedVertex]
120
119
  attr_accessor :point
@@ -140,8 +139,7 @@ module Google
140
139
  # @return [String]
141
140
  attr_accessor :description
142
141
 
143
- # Opaque entity ID. Some IDs may be available in
144
- # [Google Knowledge Graph Search
142
+ # Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
145
143
  # API](https://developers.google.com/knowledge-graph/).
146
144
  # Corresponds to the JSON property `entityId`
147
145
  # @return [String]
@@ -164,9 +162,9 @@ module Google
164
162
  end
165
163
  end
166
164
 
167
- # Explicit content annotation (based on per-frame visual signals only).
168
- # If no explicit content has been detected in a frame, no annotations are
169
- # present for that frame.
165
+ # Explicit content annotation (based on per-frame visual signals only). If no
166
+ # explicit content has been detected in a frame, no annotations are present for
167
+ # that frame.
170
168
  class GoogleCloudVideointelligenceV1ExplicitContentAnnotation
171
169
  include Google::Apis::Core::Hashable
172
170
 
@@ -217,14 +215,110 @@ module Google
217
215
  end
218
216
  end
219
217
 
218
+ # Deprecated. No effect.
219
+ class GoogleCloudVideointelligenceV1FaceAnnotation
220
+ include Google::Apis::Core::Hashable
221
+
222
+ # All video frames where a face was detected.
223
+ # Corresponds to the JSON property `frames`
224
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1FaceFrame>]
225
+ attr_accessor :frames
226
+
227
+ # All video segments where a face was detected.
228
+ # Corresponds to the JSON property `segments`
229
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1FaceSegment>]
230
+ attr_accessor :segments
231
+
232
+ # Thumbnail of a representative face view (in JPEG format).
233
+ # Corresponds to the JSON property `thumbnail`
234
+ # NOTE: Values are automatically base64 encoded/decoded in the client library.
235
+ # @return [String]
236
+ attr_accessor :thumbnail
237
+
238
+ def initialize(**args)
239
+ update!(**args)
240
+ end
241
+
242
+ # Update properties of this object
243
+ def update!(**args)
244
+ @frames = args[:frames] if args.key?(:frames)
245
+ @segments = args[:segments] if args.key?(:segments)
246
+ @thumbnail = args[:thumbnail] if args.key?(:thumbnail)
247
+ end
248
+ end
249
+
250
+ # Face detection annotation.
251
+ class GoogleCloudVideointelligenceV1FaceDetectionAnnotation
252
+ include Google::Apis::Core::Hashable
253
+
254
+ # Feature version.
255
+ # Corresponds to the JSON property `version`
256
+ # @return [String]
257
+ attr_accessor :version
258
+
259
+ def initialize(**args)
260
+ update!(**args)
261
+ end
262
+
263
+ # Update properties of this object
264
+ def update!(**args)
265
+ @version = args[:version] if args.key?(:version)
266
+ end
267
+ end
268
+
269
+ # Deprecated. No effect.
270
+ class GoogleCloudVideointelligenceV1FaceFrame
271
+ include Google::Apis::Core::Hashable
272
+
273
+ # Normalized Bounding boxes in a frame. There can be more than one boxes if the
274
+ # same face is detected in multiple locations within the current frame.
275
+ # Corresponds to the JSON property `normalizedBoundingBoxes`
276
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1NormalizedBoundingBox>]
277
+ attr_accessor :normalized_bounding_boxes
278
+
279
+ # Time-offset, relative to the beginning of the video, corresponding to the
280
+ # video frame for this location.
281
+ # Corresponds to the JSON property `timeOffset`
282
+ # @return [String]
283
+ attr_accessor :time_offset
284
+
285
+ def initialize(**args)
286
+ update!(**args)
287
+ end
288
+
289
+ # Update properties of this object
290
+ def update!(**args)
291
+ @normalized_bounding_boxes = args[:normalized_bounding_boxes] if args.key?(:normalized_bounding_boxes)
292
+ @time_offset = args[:time_offset] if args.key?(:time_offset)
293
+ end
294
+ end
295
+
296
+ # Video segment level annotation results for face detection.
297
+ class GoogleCloudVideointelligenceV1FaceSegment
298
+ include Google::Apis::Core::Hashable
299
+
300
+ # Video segment.
301
+ # Corresponds to the JSON property `segment`
302
+ # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1VideoSegment]
303
+ attr_accessor :segment
304
+
305
+ def initialize(**args)
306
+ update!(**args)
307
+ end
308
+
309
+ # Update properties of this object
310
+ def update!(**args)
311
+ @segment = args[:segment] if args.key?(:segment)
312
+ end
313
+ end
314
+
220
315
  # Label annotation.
221
316
  class GoogleCloudVideointelligenceV1LabelAnnotation
222
317
  include Google::Apis::Core::Hashable
223
318
 
224
- # Common categories for the detected entity.
225
- # For example, when the label is `Terrier`, the category is likely `dog`. And
226
- # in some cases there might be more than one categories e.g., `Terrier` could
227
- # also be a `pet`.
319
+ # Common categories for the detected entity. For example, when the label is `
320
+ # Terrier`, the category is likely `dog`. And in some cases there might be more
321
+ # than one categories e.g., `Terrier` could also be a `pet`.
228
322
  # Corresponds to the JSON property `categoryEntities`
229
323
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1Entity>]
230
324
  attr_accessor :category_entities
@@ -323,14 +417,14 @@ module Google
323
417
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1Entity]
324
418
  attr_accessor :entity
325
419
 
326
- # All video segments where the recognized logo appears. There might be
327
- # multiple instances of the same logo class appearing in one VideoSegment.
420
+ # All video segments where the recognized logo appears. There might be multiple
421
+ # instances of the same logo class appearing in one VideoSegment.
328
422
  # Corresponds to the JSON property `segments`
329
423
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1VideoSegment>]
330
424
  attr_accessor :segments
331
425
 
332
- # All logo tracks where the recognized logo appears. Each track corresponds
333
- # to one logo instance appearing in consecutive frames.
426
+ # All logo tracks where the recognized logo appears. Each track corresponds to
427
+ # one logo instance appearing in consecutive frames.
334
428
  # Corresponds to the JSON property `tracks`
335
429
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1Track>]
336
430
  attr_accessor :tracks
@@ -347,9 +441,8 @@ module Google
347
441
  end
348
442
  end
349
443
 
350
- # Normalized bounding box.
351
- # The normalized vertex coordinates are relative to the original image.
352
- # Range: [0, 1].
444
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
445
+ # original image. Range: [0, 1].
353
446
  class GoogleCloudVideointelligenceV1NormalizedBoundingBox
354
447
  include Google::Apis::Core::Hashable
355
448
 
@@ -387,20 +480,12 @@ module Google
387
480
  end
388
481
 
389
482
  # Normalized bounding polygon for text (that might not be aligned with axis).
390
- # Contains list of the corner points in clockwise order starting from
391
- # top-left corner. For example, for a rectangular bounding box:
392
- # When the text is horizontal it might look like:
393
- # 0----1
394
- # | |
395
- # 3----2
396
- # When it's clockwise rotated 180 degrees around the top-left corner it
397
- # becomes:
398
- # 2----3
399
- # | |
400
- # 1----0
401
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
402
- # than 0, or greater than 1 due to trignometric calculations for location of
403
- # the box.
483
+ # Contains list of the corner points in clockwise order starting from top-left
484
+ # corner. For example, for a rectangular bounding box: When the text is
485
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
486
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
487
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
488
+ # or greater than 1 due to trignometric calculations for location of the box.
404
489
  class GoogleCloudVideointelligenceV1NormalizedBoundingPoly
405
490
  include Google::Apis::Core::Hashable
406
491
 
@@ -419,9 +504,8 @@ module Google
419
504
  end
420
505
  end
421
506
 
422
- # A vertex represents a 2D point in the image.
423
- # NOTE: the normalized vertex coordinates are relative to the original image
424
- # and range from 0 to 1.
507
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
508
+ # coordinates are relative to the original image and range from 0 to 1.
425
509
  class GoogleCloudVideointelligenceV1NormalizedVertex
426
510
  include Google::Apis::Core::Hashable
427
511
 
@@ -460,10 +544,10 @@ module Google
460
544
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1Entity]
461
545
  attr_accessor :entity
462
546
 
463
- # Information corresponding to all frames where this object track appears.
464
- # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
465
- # messages in frames.
466
- # Streaming mode: it can only be one ObjectTrackingFrame message in frames.
547
+ # Information corresponding to all frames where this object track appears. Non-
548
+ # streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
549
+ # in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
550
+ # frames.
467
551
  # Corresponds to the JSON property `frames`
468
552
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1ObjectTrackingFrame>]
469
553
  attr_accessor :frames
@@ -473,12 +557,11 @@ module Google
473
557
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1VideoSegment]
474
558
  attr_accessor :segment
475
559
 
476
- # Streaming mode ONLY.
477
- # In streaming mode, we do not know the end time of a tracked object
478
- # before it is completed. Hence, there is no VideoSegment info returned.
479
- # Instead, we provide a unique identifiable integer track_id so that
480
- # the customers can correlate the results of the ongoing
481
- # ObjectTrackAnnotation of the same track_id over time.
560
+ # Streaming mode ONLY. In streaming mode, we do not know the end time of a
561
+ # tracked object before it is completed. Hence, there is no VideoSegment info
562
+ # returned. Instead, we provide a unique identifiable integer track_id so that
563
+ # the customers can correlate the results of the ongoing ObjectTrackAnnotation
564
+ # of the same track_id over time.
482
565
  # Corresponds to the JSON property `trackId`
483
566
  # @return [Fixnum]
484
567
  attr_accessor :track_id
@@ -508,9 +591,8 @@ module Google
508
591
  class GoogleCloudVideointelligenceV1ObjectTrackingFrame
509
592
  include Google::Apis::Core::Hashable
510
593
 
511
- # Normalized bounding box.
512
- # The normalized vertex coordinates are relative to the original image.
513
- # Range: [0, 1].
594
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
595
+ # original image. Range: [0, 1].
514
596
  # Corresponds to the JSON property `normalizedBoundingBox`
515
597
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1NormalizedBoundingBox]
516
598
  attr_accessor :normalized_bounding_box
@@ -531,16 +613,41 @@ module Google
531
613
  end
532
614
  end
533
615
 
616
+ # Person detection annotation per video.
617
+ class GoogleCloudVideointelligenceV1PersonDetectionAnnotation
618
+ include Google::Apis::Core::Hashable
619
+
620
+ # The detected tracks of a person.
621
+ # Corresponds to the JSON property `tracks`
622
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1Track>]
623
+ attr_accessor :tracks
624
+
625
+ # Feature version.
626
+ # Corresponds to the JSON property `version`
627
+ # @return [String]
628
+ attr_accessor :version
629
+
630
+ def initialize(**args)
631
+ update!(**args)
632
+ end
633
+
634
+ # Update properties of this object
635
+ def update!(**args)
636
+ @tracks = args[:tracks] if args.key?(:tracks)
637
+ @version = args[:version] if args.key?(:version)
638
+ end
639
+ end
640
+
534
641
  # Alternative hypotheses (a.k.a. n-best list).
535
642
  class GoogleCloudVideointelligenceV1SpeechRecognitionAlternative
536
643
  include Google::Apis::Core::Hashable
537
644
 
538
645
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
539
646
  # indicates an estimated greater likelihood that the recognized words are
540
- # correct. This field is set only for the top alternative.
541
- # This field is not guaranteed to be accurate and users should not rely on it
542
- # to be always provided.
543
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
647
+ # correct. This field is set only for the top alternative. This field is not
648
+ # guaranteed to be accurate and users should not rely on it to be always
649
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
650
+ # not set.
544
651
  # Corresponds to the JSON property `confidence`
545
652
  # @return [Float]
546
653
  attr_accessor :confidence
@@ -551,8 +658,8 @@ module Google
551
658
  attr_accessor :transcript
552
659
 
553
660
  # Output only. A list of word-specific information for each recognized word.
554
- # Note: When `enable_speaker_diarization` is set to true, you will see all
555
- # the words from the beginning of the audio.
661
+ # Note: When `enable_speaker_diarization` is set to true, you will see all the
662
+ # words from the beginning of the audio.
556
663
  # Corresponds to the JSON property `words`
557
664
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1WordInfo>]
558
665
  attr_accessor :words
@@ -573,18 +680,17 @@ module Google
573
680
  class GoogleCloudVideointelligenceV1SpeechTranscription
574
681
  include Google::Apis::Core::Hashable
575
682
 
576
- # May contain one or more recognition hypotheses (up to the maximum specified
577
- # in `max_alternatives`). These alternatives are ordered in terms of
578
- # accuracy, with the top (first) alternative being the most probable, as
579
- # ranked by the recognizer.
683
+ # May contain one or more recognition hypotheses (up to the maximum specified in
684
+ # `max_alternatives`). These alternatives are ordered in terms of accuracy, with
685
+ # the top (first) alternative being the most probable, as ranked by the
686
+ # recognizer.
580
687
  # Corresponds to the JSON property `alternatives`
581
688
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1SpeechRecognitionAlternative>]
582
689
  attr_accessor :alternatives
583
690
 
584
691
  # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
585
- # language tag of
586
- # the language in this result. This language code was detected to have the
587
- # most likelihood of being spoken in the audio.
692
+ # language tag of the language in this result. This language code was detected
693
+ # to have the most likelihood of being spoken in the audio.
588
694
  # Corresponds to the JSON property `languageCode`
589
695
  # @return [String]
590
696
  attr_accessor :language_code
@@ -633,27 +739,19 @@ module Google
633
739
  end
634
740
  end
635
741
 
636
- # Video frame level annotation results for text annotation (OCR).
637
- # Contains information regarding timestamp and bounding box locations for the
638
- # frames containing detected OCR text snippets.
742
+ # Video frame level annotation results for text annotation (OCR). Contains
743
+ # information regarding timestamp and bounding box locations for the frames
744
+ # containing detected OCR text snippets.
639
745
  class GoogleCloudVideointelligenceV1TextFrame
640
746
  include Google::Apis::Core::Hashable
641
747
 
642
748
  # Normalized bounding polygon for text (that might not be aligned with axis).
643
- # Contains list of the corner points in clockwise order starting from
644
- # top-left corner. For example, for a rectangular bounding box:
645
- # When the text is horizontal it might look like:
646
- # 0----1
647
- # | |
648
- # 3----2
649
- # When it's clockwise rotated 180 degrees around the top-left corner it
650
- # becomes:
651
- # 2----3
652
- # | |
653
- # 1----0
654
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
655
- # than 0, or greater than 1 due to trignometric calculations for location of
656
- # the box.
749
+ # Contains list of the corner points in clockwise order starting from top-left
750
+ # corner. For example, for a rectangular bounding box: When the text is
751
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
752
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
753
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
754
+ # or greater than 1 due to trignometric calculations for location of the box.
657
755
  # Corresponds to the JSON property `rotatedBoundingBox`
658
756
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1NormalizedBoundingPoly]
659
757
  attr_accessor :rotated_bounding_box
@@ -706,9 +804,8 @@ module Google
706
804
  end
707
805
  end
708
806
 
709
- # For tracking related features.
710
- # An object at time_offset with attributes, and located with
711
- # normalized_bounding_box.
807
+ # For tracking related features. An object at time_offset with attributes, and
808
+ # located with normalized_bounding_box.
712
809
  class GoogleCloudVideointelligenceV1TimestampedObject
713
810
  include Google::Apis::Core::Hashable
714
811
 
@@ -722,15 +819,14 @@ module Google
722
819
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1DetectedLandmark>]
723
820
  attr_accessor :landmarks
724
821
 
725
- # Normalized bounding box.
726
- # The normalized vertex coordinates are relative to the original image.
727
- # Range: [0, 1].
822
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
823
+ # original image. Range: [0, 1].
728
824
  # Corresponds to the JSON property `normalizedBoundingBox`
729
825
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1NormalizedBoundingBox]
730
826
  attr_accessor :normalized_bounding_box
731
827
 
732
- # Time-offset, relative to the beginning of the video,
733
- # corresponding to the video frame for this object.
828
+ # Time-offset, relative to the beginning of the video, corresponding to the
829
+ # video frame for this object.
734
830
  # Corresponds to the JSON property `timeOffset`
735
831
  # @return [String]
736
832
  attr_accessor :time_offset
@@ -789,20 +885,19 @@ module Google
789
885
  class GoogleCloudVideointelligenceV1VideoAnnotationProgress
790
886
  include Google::Apis::Core::Hashable
791
887
 
792
- # Specifies which feature is being tracked if the request contains more than
793
- # one feature.
888
+ # Specifies which feature is being tracked if the request contains more than one
889
+ # feature.
794
890
  # Corresponds to the JSON property `feature`
795
891
  # @return [String]
796
892
  attr_accessor :feature
797
893
 
798
- # Video file location in
799
- # [Cloud Storage](https://cloud.google.com/storage/).
894
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
800
895
  # Corresponds to the JSON property `inputUri`
801
896
  # @return [String]
802
897
  attr_accessor :input_uri
803
898
 
804
- # Approximate percentage processed thus far. Guaranteed to be
805
- # 100 when fully processed.
899
+ # Approximate percentage processed thus far. Guaranteed to be 100 when fully
900
+ # processed.
806
901
  # Corresponds to the JSON property `progressPercent`
807
902
  # @return [Fixnum]
808
903
  attr_accessor :progress_percent
@@ -841,31 +936,40 @@ module Google
841
936
  class GoogleCloudVideointelligenceV1VideoAnnotationResults
842
937
  include Google::Apis::Core::Hashable
843
938
 
844
- # The `Status` type defines a logical error model that is suitable for
845
- # different programming environments, including REST APIs and RPC APIs. It is
846
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
847
- # three pieces of data: error code, error message, and error details.
848
- # You can find out more about this error model and how to work with it in the
849
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
939
+ # The `Status` type defines a logical error model that is suitable for different
940
+ # programming environments, including REST APIs and RPC APIs. It is used by [
941
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
942
+ # data: error code, error message, and error details. You can find out more
943
+ # about this error model and how to work with it in the [API Design Guide](https:
944
+ # //cloud.google.com/apis/design/errors).
850
945
  # Corresponds to the JSON property `error`
851
946
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleRpcStatus]
852
947
  attr_accessor :error
853
948
 
854
- # Explicit content annotation (based on per-frame visual signals only).
855
- # If no explicit content has been detected in a frame, no annotations are
856
- # present for that frame.
949
+ # Explicit content annotation (based on per-frame visual signals only). If no
950
+ # explicit content has been detected in a frame, no annotations are present for
951
+ # that frame.
857
952
  # Corresponds to the JSON property `explicitAnnotation`
858
953
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1ExplicitContentAnnotation]
859
954
  attr_accessor :explicit_annotation
860
955
 
861
- # Label annotations on frame level.
862
- # There is exactly one element for each unique label.
956
+ # Deprecated. Please use `face_detection_annotations` instead.
957
+ # Corresponds to the JSON property `faceAnnotations`
958
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1FaceAnnotation>]
959
+ attr_accessor :face_annotations
960
+
961
+ # Face detection annotations.
962
+ # Corresponds to the JSON property `faceDetectionAnnotations`
963
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1FaceDetectionAnnotation>]
964
+ attr_accessor :face_detection_annotations
965
+
966
+ # Label annotations on frame level. There is exactly one element for each unique
967
+ # label.
863
968
  # Corresponds to the JSON property `frameLabelAnnotations`
864
969
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1LabelAnnotation>]
865
970
  attr_accessor :frame_label_annotations
866
971
 
867
- # Video file location in
868
- # [Cloud Storage](https://cloud.google.com/storage/).
972
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
869
973
  # Corresponds to the JSON property `inputUri`
870
974
  # @return [String]
871
975
  attr_accessor :input_uri
@@ -880,6 +984,11 @@ module Google
880
984
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1ObjectTrackingAnnotation>]
881
985
  attr_accessor :object_annotations
882
986
 
987
+ # Person detection annotations.
988
+ # Corresponds to the JSON property `personDetectionAnnotations`
989
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1PersonDetectionAnnotation>]
990
+ attr_accessor :person_detection_annotations
991
+
883
992
  # Video segment.
884
993
  # Corresponds to the JSON property `segment`
885
994
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1VideoSegment]
@@ -892,11 +1001,11 @@ module Google
892
1001
  attr_accessor :segment_label_annotations
893
1002
 
894
1003
  # Presence label annotations on video level or user-specified segment level.
895
- # There is exactly one element for each unique label. Compared to the
896
- # existing topical `segment_label_annotations`, this field presents more
897
- # fine-grained, segment-level labels detected in video content and is made
898
- # available only when the client sets `LabelDetectionConfig.model` to
899
- # "builtin/latest" in the request.
1004
+ # There is exactly one element for each unique label. Compared to the existing
1005
+ # topical `segment_label_annotations`, this field presents more fine-grained,
1006
+ # segment-level labels detected in video content and is made available only when
1007
+ # the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
1008
+ # request.
900
1009
  # Corresponds to the JSON property `segmentPresenceLabelAnnotations`
901
1010
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1LabelAnnotation>]
902
1011
  attr_accessor :segment_presence_label_annotations
@@ -906,17 +1015,17 @@ module Google
906
1015
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1VideoSegment>]
907
1016
  attr_accessor :shot_annotations
908
1017
 
909
- # Topical label annotations on shot level.
910
- # There is exactly one element for each unique label.
1018
+ # Topical label annotations on shot level. There is exactly one element for each
1019
+ # unique label.
911
1020
  # Corresponds to the JSON property `shotLabelAnnotations`
912
1021
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1LabelAnnotation>]
913
1022
  attr_accessor :shot_label_annotations
914
1023
 
915
1024
  # Presence label annotations on shot level. There is exactly one element for
916
- # each unique label. Compared to the existing topical
917
- # `shot_label_annotations`, this field presents more fine-grained, shot-level
918
- # labels detected in video content and is made available only when the client
919
- # sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
1025
+ # each unique label. Compared to the existing topical `shot_label_annotations`,
1026
+ # this field presents more fine-grained, shot-level labels detected in video
1027
+ # content and is made available only when the client sets `LabelDetectionConfig.
1028
+ # model` to "builtin/latest" in the request.
920
1029
  # Corresponds to the JSON property `shotPresenceLabelAnnotations`
921
1030
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1LabelAnnotation>]
922
1031
  attr_accessor :shot_presence_label_annotations
@@ -926,9 +1035,8 @@ module Google
926
1035
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1SpeechTranscription>]
927
1036
  attr_accessor :speech_transcriptions
928
1037
 
929
- # OCR text detection and tracking.
930
- # Annotations for list of detected text snippets. Each will have list of
931
- # frame information associated with it.
1038
+ # OCR text detection and tracking. Annotations for list of detected text
1039
+ # snippets. Each will have list of frame information associated with it.
932
1040
  # Corresponds to the JSON property `textAnnotations`
933
1041
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1TextAnnotation>]
934
1042
  attr_accessor :text_annotations
@@ -941,10 +1049,13 @@ module Google
941
1049
  def update!(**args)
942
1050
  @error = args[:error] if args.key?(:error)
943
1051
  @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
1052
+ @face_annotations = args[:face_annotations] if args.key?(:face_annotations)
1053
+ @face_detection_annotations = args[:face_detection_annotations] if args.key?(:face_detection_annotations)
944
1054
  @frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
945
1055
  @input_uri = args[:input_uri] if args.key?(:input_uri)
946
1056
  @logo_recognition_annotations = args[:logo_recognition_annotations] if args.key?(:logo_recognition_annotations)
947
1057
  @object_annotations = args[:object_annotations] if args.key?(:object_annotations)
1058
+ @person_detection_annotations = args[:person_detection_annotations] if args.key?(:person_detection_annotations)
948
1059
  @segment = args[:segment] if args.key?(:segment)
949
1060
  @segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations)
950
1061
  @segment_presence_label_annotations = args[:segment_presence_label_annotations] if args.key?(:segment_presence_label_annotations)
@@ -960,14 +1071,14 @@ module Google
960
1071
  class GoogleCloudVideointelligenceV1VideoSegment
961
1072
  include Google::Apis::Core::Hashable
962
1073
 
963
- # Time-offset, relative to the beginning of the video,
964
- # corresponding to the end of the segment (inclusive).
1074
+ # Time-offset, relative to the beginning of the video, corresponding to the end
1075
+ # of the segment (inclusive).
965
1076
  # Corresponds to the JSON property `endTimeOffset`
966
1077
  # @return [String]
967
1078
  attr_accessor :end_time_offset
968
1079
 
969
- # Time-offset, relative to the beginning of the video,
970
- # corresponding to the start of the segment (inclusive).
1080
+ # Time-offset, relative to the beginning of the video, corresponding to the
1081
+ # start of the segment (inclusive).
971
1082
  # Corresponds to the JSON property `startTimeOffset`
972
1083
  # @return [String]
973
1084
  attr_accessor :start_time_offset
@@ -984,41 +1095,41 @@ module Google
984
1095
  end
985
1096
 
986
1097
  # Word-specific information for recognized words. Word information is only
987
- # included in the response when certain request parameters are set, such
988
- # as `enable_word_time_offsets`.
1098
+ # included in the response when certain request parameters are set, such as `
1099
+ # enable_word_time_offsets`.
989
1100
  class GoogleCloudVideointelligenceV1WordInfo
990
1101
  include Google::Apis::Core::Hashable
991
1102
 
992
1103
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
993
1104
  # indicates an estimated greater likelihood that the recognized words are
994
- # correct. This field is set only for the top alternative.
995
- # This field is not guaranteed to be accurate and users should not rely on it
996
- # to be always provided.
997
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
1105
+ # correct. This field is set only for the top alternative. This field is not
1106
+ # guaranteed to be accurate and users should not rely on it to be always
1107
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
1108
+ # not set.
998
1109
  # Corresponds to the JSON property `confidence`
999
1110
  # @return [Float]
1000
1111
  attr_accessor :confidence
1001
1112
 
1002
- # Time offset relative to the beginning of the audio, and
1003
- # corresponding to the end of the spoken word. This field is only set if
1004
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
1005
- # experimental feature and the accuracy of the time offset can vary.
1113
+ # Time offset relative to the beginning of the audio, and corresponding to the
1114
+ # end of the spoken word. This field is only set if `enable_word_time_offsets=
1115
+ # true` and only in the top hypothesis. This is an experimental feature and the
1116
+ # accuracy of the time offset can vary.
1006
1117
  # Corresponds to the JSON property `endTime`
1007
1118
  # @return [String]
1008
1119
  attr_accessor :end_time
1009
1120
 
1010
- # Output only. A distinct integer value is assigned for every speaker within
1011
- # the audio. This field specifies which one of those speakers was detected to
1012
- # have spoken this word. Value ranges from 1 up to diarization_speaker_count,
1013
- # and is only set if speaker diarization is enabled.
1121
+ # Output only. A distinct integer value is assigned for every speaker within the
1122
+ # audio. This field specifies which one of those speakers was detected to have
1123
+ # spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
1124
+ # only set if speaker diarization is enabled.
1014
1125
  # Corresponds to the JSON property `speakerTag`
1015
1126
  # @return [Fixnum]
1016
1127
  attr_accessor :speaker_tag
1017
1128
 
1018
- # Time offset relative to the beginning of the audio, and
1019
- # corresponding to the start of the spoken word. This field is only set if
1020
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
1021
- # experimental feature and the accuracy of the time offset can vary.
1129
+ # Time offset relative to the beginning of the audio, and corresponding to the
1130
+ # start of the spoken word. This field is only set if `enable_word_time_offsets=
1131
+ # true` and only in the top hypothesis. This is an experimental feature and the
1132
+ # accuracy of the time offset can vary.
1022
1133
  # Corresponds to the JSON property `startTime`
1023
1134
  # @return [String]
1024
1135
  attr_accessor :start_time
@@ -1042,9 +1153,9 @@ module Google
1042
1153
  end
1043
1154
  end
1044
1155
 
1045
- # Video annotation progress. Included in the `metadata`
1046
- # field of the `Operation` returned by the `GetOperation`
1047
- # call of the `google::longrunning::Operations` service.
1156
+ # Video annotation progress. Included in the `metadata` field of the `Operation`
1157
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
1158
+ # service.
1048
1159
  class GoogleCloudVideointelligenceV1beta2AnnotateVideoProgress
1049
1160
  include Google::Apis::Core::Hashable
1050
1161
 
@@ -1072,24 +1183,22 @@ module Google
1072
1183
  # @return [Array<String>]
1073
1184
  attr_accessor :features
1074
1185
 
1075
- # The video data bytes.
1076
- # If unset, the input video(s) should be specified via the `input_uri`.
1077
- # If set, `input_uri` must be unset.
1186
+ # The video data bytes. If unset, the input video(s) should be specified via the
1187
+ # `input_uri`. If set, `input_uri` must be unset.
1078
1188
  # Corresponds to the JSON property `inputContent`
1079
1189
  # NOTE: Values are automatically base64 encoded/decoded in the client library.
1080
1190
  # @return [String]
1081
1191
  attr_accessor :input_content
1082
1192
 
1083
- # Input video location. Currently, only
1084
- # [Cloud Storage](https://cloud.google.com/storage/) URIs are
1085
- # supported. URIs must be specified in the following format:
1086
- # `gs://bucket-id/object-id` (other URI formats return
1087
- # google.rpc.Code.INVALID_ARGUMENT). For more information, see
1088
- # [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
1089
- # To identify multiple videos, a video URI may include wildcards in the
1090
- # `object-id`. Supported wildcards: '*' to match 0 or more characters;
1091
- # '?' to match 1 character. If unset, the input video should be embedded
1092
- # in the request as `input_content`. If set, `input_content` must be unset.
1193
+ # Input video location. Currently, only [Cloud Storage](https://cloud.google.com/
1194
+ # storage/) URIs are supported. URIs must be specified in the following format: `
1195
+ # gs://bucket-id/object-id` (other URI formats return google.rpc.Code.
1196
+ # INVALID_ARGUMENT). For more information, see [Request URIs](https://cloud.
1197
+ # google.com/storage/docs/request-endpoints). To identify multiple videos, a
1198
+ # video URI may include wildcards in the `object-id`. Supported wildcards: '*'
1199
+ # to match 0 or more characters; '?' to match 1 character. If unset, the input
1200
+ # video should be embedded in the request as `input_content`. If set, `
1201
+ # input_content` must be unset.
1093
1202
  # Corresponds to the JSON property `inputUri`
1094
1203
  # @return [String]
1095
1204
  attr_accessor :input_uri
@@ -1103,11 +1212,11 @@ module Google
1103
1212
  attr_accessor :location_id
1104
1213
 
1105
1214
  # Optional. Location where the output (in JSON format) should be stored.
1106
- # Currently, only [Cloud Storage](https://cloud.google.com/storage/)
1107
- # URIs are supported. These must be specified in the following format:
1108
- # `gs://bucket-id/object-id` (other URI formats return
1109
- # google.rpc.Code.INVALID_ARGUMENT). For more information, see
1110
- # [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
1215
+ # Currently, only [Cloud Storage](https://cloud.google.com/storage/) URIs are
1216
+ # supported. These must be specified in the following format: `gs://bucket-id/
1217
+ # object-id` (other URI formats return google.rpc.Code.INVALID_ARGUMENT). For
1218
+ # more information, see [Request URIs](https://cloud.google.com/storage/docs/
1219
+ # request-endpoints).
1111
1220
  # Corresponds to the JSON property `outputUri`
1112
1221
  # @return [String]
1113
1222
  attr_accessor :output_uri
@@ -1132,9 +1241,9 @@ module Google
1132
1241
  end
1133
1242
  end
1134
1243
 
1135
- # Video annotation response. Included in the `response`
1136
- # field of the `Operation` returned by the `GetOperation`
1137
- # call of the `google::longrunning::Operations` service.
1244
+ # Video annotation response. Included in the `response` field of the `Operation`
1245
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
1246
+ # service.
1138
1247
  class GoogleCloudVideointelligenceV1beta2AnnotateVideoResponse
1139
1248
  include Google::Apis::Core::Hashable
1140
1249
 
@@ -1162,14 +1271,14 @@ module Google
1162
1271
  # @return [Float]
1163
1272
  attr_accessor :confidence
1164
1273
 
1165
- # The name of the attribute, for example, glasses, dark_glasses, mouth_open.
1166
- # A full list of supported type names will be provided in the document.
1274
+ # The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
1275
+ # full list of supported type names will be provided in the document.
1167
1276
  # Corresponds to the JSON property `name`
1168
1277
  # @return [String]
1169
1278
  attr_accessor :name
1170
1279
 
1171
- # Text value of the detection result. For example, the value for "HairColor"
1172
- # can be "black", "blonde", etc.
1280
+ # Text value of the detection result. For example, the value for "HairColor" can
1281
+ # be "black", "blonde", etc.
1173
1282
  # Corresponds to the JSON property `value`
1174
1283
  # @return [String]
1175
1284
  attr_accessor :value
@@ -1201,9 +1310,8 @@ module Google
1201
1310
  # @return [String]
1202
1311
  attr_accessor :name
1203
1312
 
1204
- # A vertex represents a 2D point in the image.
1205
- # NOTE: the normalized vertex coordinates are relative to the original image
1206
- # and range from 0 to 1.
1313
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
1314
+ # coordinates are relative to the original image and range from 0 to 1.
1207
1315
  # Corresponds to the JSON property `point`
1208
1316
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2NormalizedVertex]
1209
1317
  attr_accessor :point
@@ -1229,8 +1337,7 @@ module Google
1229
1337
  # @return [String]
1230
1338
  attr_accessor :description
1231
1339
 
1232
- # Opaque entity ID. Some IDs may be available in
1233
- # [Google Knowledge Graph Search
1340
+ # Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
1234
1341
  # API](https://developers.google.com/knowledge-graph/).
1235
1342
  # Corresponds to the JSON property `entityId`
1236
1343
  # @return [String]
@@ -1253,9 +1360,9 @@ module Google
1253
1360
  end
1254
1361
  end
1255
1362
 
1256
- # Explicit content annotation (based on per-frame visual signals only).
1257
- # If no explicit content has been detected in a frame, no annotations are
1258
- # present for that frame.
1363
+ # Explicit content annotation (based on per-frame visual signals only). If no
1364
+ # explicit content has been detected in a frame, no annotations are present for
1365
+ # that frame.
1259
1366
  class GoogleCloudVideointelligenceV1beta2ExplicitContentAnnotation
1260
1367
  include Google::Apis::Core::Hashable
1261
1368
 
@@ -1284,9 +1391,8 @@ module Google
1284
1391
  class GoogleCloudVideointelligenceV1beta2ExplicitContentDetectionConfig
1285
1392
  include Google::Apis::Core::Hashable
1286
1393
 
1287
- # Model to use for explicit content detection.
1288
- # Supported values: "builtin/stable" (the default if unset) and
1289
- # "builtin/latest".
1394
+ # Model to use for explicit content detection. Supported values: "builtin/stable"
1395
+ # (the default if unset) and "builtin/latest".
1290
1396
  # Corresponds to the JSON property `model`
1291
1397
  # @return [String]
1292
1398
  attr_accessor :model
@@ -1327,14 +1433,145 @@ module Google
1327
1433
  end
1328
1434
  end
1329
1435
 
1436
+ # Deprecated. No effect.
1437
+ class GoogleCloudVideointelligenceV1beta2FaceAnnotation
1438
+ include Google::Apis::Core::Hashable
1439
+
1440
+ # All video frames where a face was detected.
1441
+ # Corresponds to the JSON property `frames`
1442
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2FaceFrame>]
1443
+ attr_accessor :frames
1444
+
1445
+ # All video segments where a face was detected.
1446
+ # Corresponds to the JSON property `segments`
1447
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2FaceSegment>]
1448
+ attr_accessor :segments
1449
+
1450
+ # Thumbnail of a representative face view (in JPEG format).
1451
+ # Corresponds to the JSON property `thumbnail`
1452
+ # NOTE: Values are automatically base64 encoded/decoded in the client library.
1453
+ # @return [String]
1454
+ attr_accessor :thumbnail
1455
+
1456
+ def initialize(**args)
1457
+ update!(**args)
1458
+ end
1459
+
1460
+ # Update properties of this object
1461
+ def update!(**args)
1462
+ @frames = args[:frames] if args.key?(:frames)
1463
+ @segments = args[:segments] if args.key?(:segments)
1464
+ @thumbnail = args[:thumbnail] if args.key?(:thumbnail)
1465
+ end
1466
+ end
1467
+
1468
+ # Face detection annotation.
1469
+ class GoogleCloudVideointelligenceV1beta2FaceDetectionAnnotation
1470
+ include Google::Apis::Core::Hashable
1471
+
1472
+ # Feature version.
1473
+ # Corresponds to the JSON property `version`
1474
+ # @return [String]
1475
+ attr_accessor :version
1476
+
1477
+ def initialize(**args)
1478
+ update!(**args)
1479
+ end
1480
+
1481
+ # Update properties of this object
1482
+ def update!(**args)
1483
+ @version = args[:version] if args.key?(:version)
1484
+ end
1485
+ end
1486
+
1487
+ # Config for FACE_DETECTION.
1488
+ class GoogleCloudVideointelligenceV1beta2FaceDetectionConfig
1489
+ include Google::Apis::Core::Hashable
1490
+
1491
+ # Whether to enable face attributes detection, such as glasses, dark_glasses,
1492
+ # mouth_open etc. Ignored if 'include_bounding_boxes' is set to false.
1493
+ # Corresponds to the JSON property `includeAttributes`
1494
+ # @return [Boolean]
1495
+ attr_accessor :include_attributes
1496
+ alias_method :include_attributes?, :include_attributes
1497
+
1498
+ # Whether bounding boxes are included in the face annotation output.
1499
+ # Corresponds to the JSON property `includeBoundingBoxes`
1500
+ # @return [Boolean]
1501
+ attr_accessor :include_bounding_boxes
1502
+ alias_method :include_bounding_boxes?, :include_bounding_boxes
1503
+
1504
+ # Model to use for face detection. Supported values: "builtin/stable" (the
1505
+ # default if unset) and "builtin/latest".
1506
+ # Corresponds to the JSON property `model`
1507
+ # @return [String]
1508
+ attr_accessor :model
1509
+
1510
+ def initialize(**args)
1511
+ update!(**args)
1512
+ end
1513
+
1514
+ # Update properties of this object
1515
+ def update!(**args)
1516
+ @include_attributes = args[:include_attributes] if args.key?(:include_attributes)
1517
+ @include_bounding_boxes = args[:include_bounding_boxes] if args.key?(:include_bounding_boxes)
1518
+ @model = args[:model] if args.key?(:model)
1519
+ end
1520
+ end
1521
+
1522
+ # Deprecated. No effect.
1523
+ class GoogleCloudVideointelligenceV1beta2FaceFrame
1524
+ include Google::Apis::Core::Hashable
1525
+
1526
+ # Normalized Bounding boxes in a frame. There can be more than one boxes if the
1527
+ # same face is detected in multiple locations within the current frame.
1528
+ # Corresponds to the JSON property `normalizedBoundingBoxes`
1529
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox>]
1530
+ attr_accessor :normalized_bounding_boxes
1531
+
1532
+ # Time-offset, relative to the beginning of the video, corresponding to the
1533
+ # video frame for this location.
1534
+ # Corresponds to the JSON property `timeOffset`
1535
+ # @return [String]
1536
+ attr_accessor :time_offset
1537
+
1538
+ def initialize(**args)
1539
+ update!(**args)
1540
+ end
1541
+
1542
+ # Update properties of this object
1543
+ def update!(**args)
1544
+ @normalized_bounding_boxes = args[:normalized_bounding_boxes] if args.key?(:normalized_bounding_boxes)
1545
+ @time_offset = args[:time_offset] if args.key?(:time_offset)
1546
+ end
1547
+ end
1548
+
1549
+ # Video segment level annotation results for face detection.
1550
+ class GoogleCloudVideointelligenceV1beta2FaceSegment
1551
+ include Google::Apis::Core::Hashable
1552
+
1553
+ # Video segment.
1554
+ # Corresponds to the JSON property `segment`
1555
+ # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2VideoSegment]
1556
+ attr_accessor :segment
1557
+
1558
+ def initialize(**args)
1559
+ update!(**args)
1560
+ end
1561
+
1562
+ # Update properties of this object
1563
+ def update!(**args)
1564
+ @segment = args[:segment] if args.key?(:segment)
1565
+ end
1566
+ end
1567
+
1330
1568
  # Label annotation.
1331
1569
  class GoogleCloudVideointelligenceV1beta2LabelAnnotation
1332
1570
  include Google::Apis::Core::Hashable
1333
1571
 
1334
- # Common categories for the detected entity.
1335
- # For example, when the label is `Terrier`, the category is likely `dog`. And
1336
- # in some cases there might be more than one categories e.g., `Terrier` could
1337
- # also be a `pet`.
1572
+ # Common categories for the detected entity. For example, when the label is `
1573
+ # Terrier`, the category is likely `dog`. And in some cases there might be more
1574
+ # than one categories e.g., `Terrier` could also be a `pet`.
1338
1575
  # Corresponds to the JSON property `categoryEntities`
1339
1576
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2Entity>]
1340
1577
  attr_accessor :category_entities
@@ -1377,44 +1614,40 @@ module Google
1377
1614
  class GoogleCloudVideointelligenceV1beta2LabelDetectionConfig
1378
1615
  include Google::Apis::Core::Hashable
1379
1616
 
1380
- # The confidence threshold we perform filtering on the labels from
1381
- # frame-level detection. If not set, it is set to 0.4 by default. The valid
1382
- # range for this threshold is [0.1, 0.9]. Any value set outside of this
1383
- # range will be clipped.
1384
- # Note: For best results, follow the default threshold. We will update
1385
- # the default threshold everytime when we release a new model.
1617
+ # The confidence threshold we perform filtering on the labels from frame-level
1618
+ # detection. If not set, it is set to 0.4 by default. The valid range for this
1619
+ # threshold is [0.1, 0.9]. Any value set outside of this range will be clipped.
1620
+ # Note: For best results, follow the default threshold. We will update the
1621
+ # default threshold everytime when we release a new model.
1386
1622
  # Corresponds to the JSON property `frameConfidenceThreshold`
1387
1623
  # @return [Float]
1388
1624
  attr_accessor :frame_confidence_threshold
1389
1625
 
1390
- # What labels should be detected with LABEL_DETECTION, in addition to
1391
- # video-level labels or segment-level labels.
1392
- # If unspecified, defaults to `SHOT_MODE`.
1626
+ # What labels should be detected with LABEL_DETECTION, in addition to video-
1627
+ # level labels or segment-level labels. If unspecified, defaults to `SHOT_MODE`.
1393
1628
  # Corresponds to the JSON property `labelDetectionMode`
1394
1629
  # @return [String]
1395
1630
  attr_accessor :label_detection_mode
1396
1631
 
1397
- # Model to use for label detection.
1398
- # Supported values: "builtin/stable" (the default if unset) and
1399
- # "builtin/latest".
1632
+ # Model to use for label detection. Supported values: "builtin/stable" (the
1633
+ # default if unset) and "builtin/latest".
1400
1634
  # Corresponds to the JSON property `model`
1401
1635
  # @return [String]
1402
1636
  attr_accessor :model
1403
1637
 
1404
- # Whether the video has been shot from a stationary (i.e., non-moving)
1405
- # camera. When set to true, might improve detection accuracy for moving
1406
- # objects. Should be used with `SHOT_AND_FRAME_MODE` enabled.
1638
+ # Whether the video has been shot from a stationary (i.e., non-moving) camera.
1639
+ # When set to true, might improve detection accuracy for moving objects. Should
1640
+ # be used with `SHOT_AND_FRAME_MODE` enabled.
1407
1641
  # Corresponds to the JSON property `stationaryCamera`
1408
1642
  # @return [Boolean]
1409
1643
  attr_accessor :stationary_camera
1410
1644
  alias_method :stationary_camera?, :stationary_camera
1411
1645
 
1412
- # The confidence threshold we perform filtering on the labels from
1413
- # video-level and shot-level detections. If not set, it's set to 0.3 by
1414
- # default. The valid range for this threshold is [0.1, 0.9]. Any value set
1415
- # outside of this range will be clipped.
1416
- # Note: For best results, follow the default threshold. We will update
1417
- # the default threshold everytime when we release a new model.
1646
+ # The confidence threshold we perform filtering on the labels from video-level
1647
+ # and shot-level detections. If not set, it's set to 0.3 by default. The valid
1648
+ # range for this threshold is [0.1, 0.9]. Any value set outside of this range
1649
+ # will be clipped. Note: For best results, follow the default threshold. We will
1650
+ # update the default threshold everytime when we release a new model.
1418
1651
  # Corresponds to the JSON property `videoConfidenceThreshold`
1419
1652
  # @return [Float]
1420
1653
  attr_accessor :video_confidence_threshold
@@ -1493,14 +1726,14 @@ module Google
1493
1726
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2Entity]
1494
1727
  attr_accessor :entity
1495
1728
 
1496
- # All video segments where the recognized logo appears. There might be
1497
- # multiple instances of the same logo class appearing in one VideoSegment.
1729
+ # All video segments where the recognized logo appears. There might be multiple
1730
+ # instances of the same logo class appearing in one VideoSegment.
1498
1731
  # Corresponds to the JSON property `segments`
1499
1732
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2VideoSegment>]
1500
1733
  attr_accessor :segments
1501
1734
 
1502
- # All logo tracks where the recognized logo appears. Each track corresponds
1503
- # to one logo instance appearing in consecutive frames.
1735
+ # All logo tracks where the recognized logo appears. Each track corresponds to
1736
+ # one logo instance appearing in consecutive frames.
1504
1737
  # Corresponds to the JSON property `tracks`
1505
1738
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2Track>]
1506
1739
  attr_accessor :tracks
@@ -1517,9 +1750,8 @@ module Google
1517
1750
  end
1518
1751
  end
1519
1752
 
1520
- # Normalized bounding box.
1521
- # The normalized vertex coordinates are relative to the original image.
1522
- # Range: [0, 1].
1753
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
1754
+ # original image. Range: [0, 1].
1523
1755
  class GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox
1524
1756
  include Google::Apis::Core::Hashable
1525
1757
 
@@ -1557,20 +1789,12 @@ module Google
1557
1789
  end
1558
1790
 
1559
1791
  # Normalized bounding polygon for text (that might not be aligned with axis).
1560
- # Contains list of the corner points in clockwise order starting from
1561
- # top-left corner. For example, for a rectangular bounding box:
1562
- # When the text is horizontal it might look like:
1563
- # 0----1
1564
- # | |
1565
- # 3----2
1566
- # When it's clockwise rotated 180 degrees around the top-left corner it
1567
- # becomes:
1568
- # 2----3
1569
- # | |
1570
- # 1----0
1571
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
1572
- # than 0, or greater than 1 due to trignometric calculations for location of
1573
- # the box.
1792
+ # Contains list of the corner points in clockwise order starting from top-left
1793
+ # corner. For example, for a rectangular bounding box: When the text is
1794
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
1795
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
1796
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
1797
+ # or greater than 1 due to trignometric calculations for location of the box.
1574
1798
  class GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly
1575
1799
  include Google::Apis::Core::Hashable
1576
1800
 
@@ -1589,9 +1813,8 @@ module Google
1589
1813
  end
1590
1814
  end
1591
1815
 
1592
- # A vertex represents a 2D point in the image.
1593
- # NOTE: the normalized vertex coordinates are relative to the original image
1594
- # and range from 0 to 1.
1816
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
1817
+ # coordinates are relative to the original image and range from 0 to 1.
1595
1818
  class GoogleCloudVideointelligenceV1beta2NormalizedVertex
1596
1819
  include Google::Apis::Core::Hashable
1597
1820
 
@@ -1630,10 +1853,10 @@ module Google
1630
1853
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2Entity]
1631
1854
  attr_accessor :entity
1632
1855
 
1633
- # Information corresponding to all frames where this object track appears.
1634
- # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
1635
- # messages in frames.
1636
- # Streaming mode: it can only be one ObjectTrackingFrame message in frames.
1856
+ # Information corresponding to all frames where this object track appears. Non-
1857
+ # streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
1858
+ # in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
1859
+ # frames.
1637
1860
  # Corresponds to the JSON property `frames`
1638
1861
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame>]
1639
1862
  attr_accessor :frames
@@ -1643,12 +1866,11 @@ module Google
1643
1866
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2VideoSegment]
1644
1867
  attr_accessor :segment
1645
1868
 
1646
- # Streaming mode ONLY.
1647
- # In streaming mode, we do not know the end time of a tracked object
1648
- # before it is completed. Hence, there is no VideoSegment info returned.
1649
- # Instead, we provide a unique identifiable integer track_id so that
1650
- # the customers can correlate the results of the ongoing
1651
- # ObjectTrackAnnotation of the same track_id over time.
1869
+ # Streaming mode ONLY. In streaming mode, we do not know the end time of a
1870
+ # tracked object before it is completed. Hence, there is no VideoSegment info
1871
+ # returned. Instead, we provide a unique identifiable integer track_id so that
1872
+ # the customers can correlate the results of the ongoing ObjectTrackAnnotation
1873
+ # of the same track_id over time.
1652
1874
  # Corresponds to the JSON property `trackId`
1653
1875
  # @return [Fixnum]
1654
1876
  attr_accessor :track_id
@@ -1677,9 +1899,8 @@ module Google
1677
1899
  class GoogleCloudVideointelligenceV1beta2ObjectTrackingConfig
1678
1900
  include Google::Apis::Core::Hashable
1679
1901
 
1680
- # Model to use for object tracking.
1681
- # Supported values: "builtin/stable" (the default if unset) and
1682
- # "builtin/latest".
1902
+ # Model to use for object tracking. Supported values: "builtin/stable" (the
1903
+ # default if unset) and "builtin/latest".
1683
1904
  # Corresponds to the JSON property `model`
1684
1905
  # @return [String]
1685
1906
  attr_accessor :model
@@ -1699,9 +1920,8 @@ module Google
1699
1920
  class GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame
1700
1921
  include Google::Apis::Core::Hashable
1701
1922
 
1702
- # Normalized bounding box.
1703
- # The normalized vertex coordinates are relative to the original image.
1704
- # Range: [0, 1].
1923
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
1924
+ # original image. Range: [0, 1].
1705
1925
  # Corresponds to the JSON property `normalizedBoundingBox`
1706
1926
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox]
1707
1927
  attr_accessor :normalized_bounding_box
@@ -1722,13 +1942,74 @@ module Google
1722
1942
  end
1723
1943
  end
1724
1944
 
1945
+ # Person detection annotation per video.
1946
+ class GoogleCloudVideointelligenceV1beta2PersonDetectionAnnotation
1947
+ include Google::Apis::Core::Hashable
1948
+
1949
+ # The detected tracks of a person.
1950
+ # Corresponds to the JSON property `tracks`
1951
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2Track>]
1952
+ attr_accessor :tracks
1953
+
1954
+ # Feature version.
1955
+ # Corresponds to the JSON property `version`
1956
+ # @return [String]
1957
+ attr_accessor :version
1958
+
1959
+ def initialize(**args)
1960
+ update!(**args)
1961
+ end
1962
+
1963
+ # Update properties of this object
1964
+ def update!(**args)
1965
+ @tracks = args[:tracks] if args.key?(:tracks)
1966
+ @version = args[:version] if args.key?(:version)
1967
+ end
1968
+ end
1969
+
1970
+ # Config for PERSON_DETECTION.
1971
+ class GoogleCloudVideointelligenceV1beta2PersonDetectionConfig
1972
+ include Google::Apis::Core::Hashable
1973
+
1974
+ # Whether to enable person attributes detection, such as cloth color (black,
1975
+ # blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair, etc.
1976
+ # Ignored if 'include_bounding_boxes' is set to false.
1977
+ # Corresponds to the JSON property `includeAttributes`
1978
+ # @return [Boolean]
1979
+ attr_accessor :include_attributes
1980
+ alias_method :include_attributes?, :include_attributes
1981
+
1982
+ # Whether bounding boxes are included in the person detection annotation output.
1983
+ # Corresponds to the JSON property `includeBoundingBoxes`
1984
+ # @return [Boolean]
1985
+ attr_accessor :include_bounding_boxes
1986
+ alias_method :include_bounding_boxes?, :include_bounding_boxes
1987
+
1988
+ # Whether to enable pose landmarks detection. Ignored if 'include_bounding_boxes'
1989
+ # is set to false.
1990
+ # Corresponds to the JSON property `includePoseLandmarks`
1991
+ # @return [Boolean]
1992
+ attr_accessor :include_pose_landmarks
1993
+ alias_method :include_pose_landmarks?, :include_pose_landmarks
1994
+
1995
+ def initialize(**args)
1996
+ update!(**args)
1997
+ end
1998
+
1999
+ # Update properties of this object
2000
+ def update!(**args)
2001
+ @include_attributes = args[:include_attributes] if args.key?(:include_attributes)
2002
+ @include_bounding_boxes = args[:include_bounding_boxes] if args.key?(:include_bounding_boxes)
2003
+ @include_pose_landmarks = args[:include_pose_landmarks] if args.key?(:include_pose_landmarks)
2004
+ end
2005
+ end
2006
+
1725
2007
  # Config for SHOT_CHANGE_DETECTION.
1726
2008
  class GoogleCloudVideointelligenceV1beta2ShotChangeDetectionConfig
1727
2009
  include Google::Apis::Core::Hashable
1728
2010
 
1729
- # Model to use for shot change detection.
1730
- # Supported values: "builtin/stable" (the default if unset) and
1731
- # "builtin/latest".
2011
+ # Model to use for shot change detection. Supported values: "builtin/stable" (
2012
+ # the default if unset) and "builtin/latest".
1732
2013
  # Corresponds to the JSON property `model`
1733
2014
  # @return [String]
1734
2015
  attr_accessor :model
@@ -1748,12 +2029,12 @@ module Google
1748
2029
  class GoogleCloudVideointelligenceV1beta2SpeechContext
1749
2030
  include Google::Apis::Core::Hashable
1750
2031
 
1751
- # Optional. A list of strings containing words and phrases "hints" so that
1752
- # the speech recognition is more likely to recognize them. This can be used
1753
- # to improve the accuracy for specific words and phrases, for example, if
1754
- # specific commands are typically spoken by the user. This can also be used
1755
- # to add additional words to the vocabulary of the recognizer. See
1756
- # [usage limits](https://cloud.google.com/speech/limits#content).
2032
+ # Optional. A list of strings containing words and phrases "hints" so that the
2033
+ # speech recognition is more likely to recognize them. This can be used to
2034
+ # improve the accuracy for specific words and phrases, for example, if specific
2035
+ # commands are typically spoken by the user. This can also be used to add
2036
+ # additional words to the vocabulary of the recognizer. See [usage limits](https:
2037
+ # //cloud.google.com/speech/limits#content).
1757
2038
  # Corresponds to the JSON property `phrases`
1758
2039
  # @return [Array<String>]
1759
2040
  attr_accessor :phrases
@@ -1774,10 +2055,10 @@ module Google
1774
2055
 
1775
2056
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
1776
2057
  # indicates an estimated greater likelihood that the recognized words are
1777
- # correct. This field is set only for the top alternative.
1778
- # This field is not guaranteed to be accurate and users should not rely on it
1779
- # to be always provided.
1780
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
2058
+ # correct. This field is set only for the top alternative. This field is not
2059
+ # guaranteed to be accurate and users should not rely on it to be always
2060
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
2061
+ # not set.
1781
2062
  # Corresponds to the JSON property `confidence`
1782
2063
  # @return [Float]
1783
2064
  attr_accessor :confidence
@@ -1788,8 +2069,8 @@ module Google
1788
2069
  attr_accessor :transcript
1789
2070
 
1790
2071
  # Output only. A list of word-specific information for each recognized word.
1791
- # Note: When `enable_speaker_diarization` is set to true, you will see all
1792
- # the words from the beginning of the audio.
2072
+ # Note: When `enable_speaker_diarization` is set to true, you will see all the
2073
+ # words from the beginning of the audio.
1793
2074
  # Corresponds to the JSON property `words`
1794
2075
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2WordInfo>]
1795
2076
  attr_accessor :words
@@ -1810,18 +2091,17 @@ module Google
1810
2091
  class GoogleCloudVideointelligenceV1beta2SpeechTranscription
1811
2092
  include Google::Apis::Core::Hashable
1812
2093
 
1813
- # May contain one or more recognition hypotheses (up to the maximum specified
1814
- # in `max_alternatives`). These alternatives are ordered in terms of
1815
- # accuracy, with the top (first) alternative being the most probable, as
1816
- # ranked by the recognizer.
2094
+ # May contain one or more recognition hypotheses (up to the maximum specified in
2095
+ # `max_alternatives`). These alternatives are ordered in terms of accuracy, with
2096
+ # the top (first) alternative being the most probable, as ranked by the
2097
+ # recognizer.
1817
2098
  # Corresponds to the JSON property `alternatives`
1818
2099
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2SpeechRecognitionAlternative>]
1819
2100
  attr_accessor :alternatives
1820
2101
 
1821
2102
  # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
1822
- # language tag of
1823
- # the language in this result. This language code was detected to have the
1824
- # most likelihood of being spoken in the audio.
2103
+ # language tag of the language in this result. This language code was detected
2104
+ # to have the most likelihood of being spoken in the audio.
1825
2105
  # Corresponds to the JSON property `languageCode`
1826
2106
  # @return [String]
1827
2107
  attr_accessor :language_code
@@ -1848,66 +2128,62 @@ module Google
1848
2128
  attr_accessor :audio_tracks
1849
2129
 
1850
2130
  # Optional. If set, specifies the estimated number of speakers in the
1851
- # conversation.
1852
- # If not set, defaults to '2'.
1853
- # Ignored unless enable_speaker_diarization is set to true.
2131
+ # conversation. If not set, defaults to '2'. Ignored unless
2132
+ # enable_speaker_diarization is set to true.
1854
2133
  # Corresponds to the JSON property `diarizationSpeakerCount`
1855
2134
  # @return [Fixnum]
1856
2135
  attr_accessor :diarization_speaker_count
1857
2136
 
1858
- # Optional. If 'true', adds punctuation to recognition result hypotheses.
1859
- # This feature is only available in select languages. Setting this for
1860
- # requests in other languages has no effect at all. The default 'false' value
1861
- # does not add punctuation to result hypotheses. NOTE: "This is currently
1862
- # offered as an experimental service, complimentary to all users. In the
1863
- # future this may be exclusively available as a premium feature."
2137
+ # Optional. If 'true', adds punctuation to recognition result hypotheses. This
2138
+ # feature is only available in select languages. Setting this for requests in
2139
+ # other languages has no effect at all. The default 'false' value does not add
2140
+ # punctuation to result hypotheses. NOTE: "This is currently offered as an
2141
+ # experimental service, complimentary to all users. In the future this may be
2142
+ # exclusively available as a premium feature."
1864
2143
  # Corresponds to the JSON property `enableAutomaticPunctuation`
1865
2144
  # @return [Boolean]
1866
2145
  attr_accessor :enable_automatic_punctuation
1867
2146
  alias_method :enable_automatic_punctuation?, :enable_automatic_punctuation
1868
2147
 
1869
- # Optional. If 'true', enables speaker detection for each recognized word in
1870
- # the top alternative of the recognition result using a speaker_tag provided
1871
- # in the WordInfo.
1872
- # Note: When this is true, we send all the words from the beginning of the
1873
- # audio for the top alternative in every consecutive response.
1874
- # This is done in order to improve our speaker tags as our models learn to
1875
- # identify the speakers in the conversation over time.
2148
+ # Optional. If 'true', enables speaker detection for each recognized word in the
2149
+ # top alternative of the recognition result using a speaker_tag provided in the
2150
+ # WordInfo. Note: When this is true, we send all the words from the beginning of
2151
+ # the audio for the top alternative in every consecutive response. This is done
2152
+ # in order to improve our speaker tags as our models learn to identify the
2153
+ # speakers in the conversation over time.
1876
2154
  # Corresponds to the JSON property `enableSpeakerDiarization`
1877
2155
  # @return [Boolean]
1878
2156
  attr_accessor :enable_speaker_diarization
1879
2157
  alias_method :enable_speaker_diarization?, :enable_speaker_diarization
1880
2158
 
1881
2159
  # Optional. If `true`, the top result includes a list of words and the
1882
- # confidence for those words. If `false`, no word-level confidence
1883
- # information is returned. The default is `false`.
2160
+ # confidence for those words. If `false`, no word-level confidence information
2161
+ # is returned. The default is `false`.
1884
2162
  # Corresponds to the JSON property `enableWordConfidence`
1885
2163
  # @return [Boolean]
1886
2164
  attr_accessor :enable_word_confidence
1887
2165
  alias_method :enable_word_confidence?, :enable_word_confidence
1888
2166
 
1889
- # Optional. If set to `true`, the server will attempt to filter out
1890
- # profanities, replacing all but the initial character in each filtered word
1891
- # with asterisks, e.g. "f***". If set to `false` or omitted, profanities
1892
- # won't be filtered out.
2167
+ # Optional. If set to `true`, the server will attempt to filter out profanities,
2168
+ # replacing all but the initial character in each filtered word with asterisks,
2169
+ # e.g. "f***". If set to `false` or omitted, profanities won't be filtered out.
1893
2170
  # Corresponds to the JSON property `filterProfanity`
1894
2171
  # @return [Boolean]
1895
2172
  attr_accessor :filter_profanity
1896
2173
  alias_method :filter_profanity?, :filter_profanity
1897
2174
 
1898
- # Required. *Required* The language of the supplied audio as a
1899
- # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
1900
- # Example: "en-US".
1901
- # See [Language Support](https://cloud.google.com/speech/docs/languages)
1902
- # for a list of the currently supported language codes.
2175
+ # Required. *Required* The language of the supplied audio as a [BCP-47](https://
2176
+ # www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. Example: "en-US". See [
2177
+ # Language Support](https://cloud.google.com/speech/docs/languages) for a list
2178
+ # of the currently supported language codes.
1903
2179
  # Corresponds to the JSON property `languageCode`
1904
2180
  # @return [String]
1905
2181
  attr_accessor :language_code
1906
2182
 
1907
2183
  # Optional. Maximum number of recognition hypotheses to be returned.
1908
2184
  # Specifically, the maximum number of `SpeechRecognitionAlternative` messages
1909
- # within each `SpeechTranscription`. The server may return fewer than
1910
- # `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
2185
+ # within each `SpeechTranscription`. The server may return fewer than `
2186
+ # max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
1911
2187
  # return a maximum of one. If omitted, will return a maximum of one.
1912
2188
  # Corresponds to the JSON property `maxAlternatives`
1913
2189
  # @return [Fixnum]
@@ -1974,16 +2250,15 @@ module Google
1974
2250
  include Google::Apis::Core::Hashable
1975
2251
 
1976
2252
  # Language hint can be specified if the language to be detected is known a
1977
- # priori. It can increase the accuracy of the detection. Language hint must
1978
- # be language code in BCP-47 format.
1979
- # Automatic language detection is performed if no hint is provided.
2253
+ # priori. It can increase the accuracy of the detection. Language hint must be
2254
+ # language code in BCP-47 format. Automatic language detection is performed if
2255
+ # no hint is provided.
1980
2256
  # Corresponds to the JSON property `languageHints`
1981
2257
  # @return [Array<String>]
1982
2258
  attr_accessor :language_hints
1983
2259
 
1984
- # Model to use for text detection.
1985
- # Supported values: "builtin/stable" (the default if unset) and
1986
- # "builtin/latest".
2260
+ # Model to use for text detection. Supported values: "builtin/stable" (the
2261
+ # default if unset) and "builtin/latest".
1987
2262
  # Corresponds to the JSON property `model`
1988
2263
  # @return [String]
1989
2264
  attr_accessor :model
@@ -1999,27 +2274,19 @@ module Google
1999
2274
  end
2000
2275
  end
2001
2276
 
2002
- # Video frame level annotation results for text annotation (OCR).
2003
- # Contains information regarding timestamp and bounding box locations for the
2004
- # frames containing detected OCR text snippets.
2277
+ # Video frame level annotation results for text annotation (OCR). Contains
2278
+ # information regarding timestamp and bounding box locations for the frames
2279
+ # containing detected OCR text snippets.
2005
2280
  class GoogleCloudVideointelligenceV1beta2TextFrame
2006
2281
  include Google::Apis::Core::Hashable
2007
2282
 
2008
2283
  # Normalized bounding polygon for text (that might not be aligned with axis).
2009
- # Contains list of the corner points in clockwise order starting from
2010
- # top-left corner. For example, for a rectangular bounding box:
2011
- # When the text is horizontal it might look like:
2012
- # 0----1
2013
- # | |
2014
- # 3----2
2015
- # When it's clockwise rotated 180 degrees around the top-left corner it
2016
- # becomes:
2017
- # 2----3
2018
- # | |
2019
- # 1----0
2020
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
2021
- # than 0, or greater than 1 due to trignometric calculations for location of
2022
- # the box.
2284
+ # Contains list of the corner points in clockwise order starting from top-left
2285
+ # corner. For example, for a rectangular bounding box: When the text is
2286
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
2287
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
2288
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
2289
+ # or greater than 1 due to trignometric calculations for location of the box.
2023
2290
  # Corresponds to the JSON property `rotatedBoundingBox`
2024
2291
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly]
2025
2292
  attr_accessor :rotated_bounding_box
@@ -2072,9 +2339,8 @@ module Google
2072
2339
  end
2073
2340
  end
2074
2341
 
2075
- # For tracking related features.
2076
- # An object at time_offset with attributes, and located with
2077
- # normalized_bounding_box.
2342
+ # For tracking related features. An object at time_offset with attributes, and
2343
+ # located with normalized_bounding_box.
2078
2344
  class GoogleCloudVideointelligenceV1beta2TimestampedObject
2079
2345
  include Google::Apis::Core::Hashable
2080
2346
 
@@ -2088,15 +2354,14 @@ module Google
2088
2354
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2DetectedLandmark>]
2089
2355
  attr_accessor :landmarks
2090
2356
 
2091
- # Normalized bounding box.
2092
- # The normalized vertex coordinates are relative to the original image.
2093
- # Range: [0, 1].
2357
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
2358
+ # original image. Range: [0, 1].
2094
2359
  # Corresponds to the JSON property `normalizedBoundingBox`
2095
2360
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox]
2096
2361
  attr_accessor :normalized_bounding_box
2097
2362
 
2098
- # Time-offset, relative to the beginning of the video,
2099
- # corresponding to the video frame for this object.
2363
+ # Time-offset, relative to the beginning of the video, corresponding to the
2364
+ # video frame for this object.
2100
2365
  # Corresponds to the JSON property `timeOffset`
2101
2366
  # @return [String]
2102
2367
  attr_accessor :time_offset
@@ -2155,20 +2420,19 @@ module Google
2155
2420
  class GoogleCloudVideointelligenceV1beta2VideoAnnotationProgress
2156
2421
  include Google::Apis::Core::Hashable
2157
2422
 
2158
- # Specifies which feature is being tracked if the request contains more than
2159
- # one feature.
2423
+ # Specifies which feature is being tracked if the request contains more than one
2424
+ # feature.
2160
2425
  # Corresponds to the JSON property `feature`
2161
2426
  # @return [String]
2162
2427
  attr_accessor :feature
2163
2428
 
2164
- # Video file location in
2165
- # [Cloud Storage](https://cloud.google.com/storage/).
2429
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
2166
2430
  # Corresponds to the JSON property `inputUri`
2167
2431
  # @return [String]
2168
2432
  attr_accessor :input_uri
2169
2433
 
2170
- # Approximate percentage processed thus far. Guaranteed to be
2171
- # 100 when fully processed.
2434
+ # Approximate percentage processed thus far. Guaranteed to be 100 when fully
2435
+ # processed.
2172
2436
  # Corresponds to the JSON property `progressPercent`
2173
2437
  # @return [Fixnum]
2174
2438
  attr_accessor :progress_percent
@@ -2207,31 +2471,40 @@ module Google
2207
2471
  class GoogleCloudVideointelligenceV1beta2VideoAnnotationResults
2208
2472
  include Google::Apis::Core::Hashable
2209
2473
 
2210
- # The `Status` type defines a logical error model that is suitable for
2211
- # different programming environments, including REST APIs and RPC APIs. It is
2212
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
2213
- # three pieces of data: error code, error message, and error details.
2214
- # You can find out more about this error model and how to work with it in the
2215
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
2474
+ # The `Status` type defines a logical error model that is suitable for different
2475
+ # programming environments, including REST APIs and RPC APIs. It is used by [
2476
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
2477
+ # data: error code, error message, and error details. You can find out more
2478
+ # about this error model and how to work with it in the [API Design Guide](https:
2479
+ # //cloud.google.com/apis/design/errors).
2216
2480
  # Corresponds to the JSON property `error`
2217
2481
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleRpcStatus]
2218
2482
  attr_accessor :error
2219
2483
 
2220
- # Explicit content annotation (based on per-frame visual signals only).
2221
- # If no explicit content has been detected in a frame, no annotations are
2222
- # present for that frame.
2484
+ # Explicit content annotation (based on per-frame visual signals only). If no
2485
+ # explicit content has been detected in a frame, no annotations are present for
2486
+ # that frame.
2223
2487
  # Corresponds to the JSON property `explicitAnnotation`
2224
2488
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2ExplicitContentAnnotation]
2225
2489
  attr_accessor :explicit_annotation
2226
2490
 
2227
- # Label annotations on frame level.
2228
- # There is exactly one element for each unique label.
2491
+ # Deprecated. Please use `face_detection_annotations` instead.
2492
+ # Corresponds to the JSON property `faceAnnotations`
2493
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2FaceAnnotation>]
2494
+ attr_accessor :face_annotations
2495
+
2496
+ # Face detection annotations.
2497
+ # Corresponds to the JSON property `faceDetectionAnnotations`
2498
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2FaceDetectionAnnotation>]
2499
+ attr_accessor :face_detection_annotations
2500
+
2501
+ # Label annotations on frame level. There is exactly one element for each unique
2502
+ # label.
2229
2503
  # Corresponds to the JSON property `frameLabelAnnotations`
2230
2504
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
2231
2505
  attr_accessor :frame_label_annotations
2232
2506
 
2233
- # Video file location in
2234
- # [Cloud Storage](https://cloud.google.com/storage/).
2507
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
2235
2508
  # Corresponds to the JSON property `inputUri`
2236
2509
  # @return [String]
2237
2510
  attr_accessor :input_uri
@@ -2246,6 +2519,11 @@ module Google
2246
2519
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2ObjectTrackingAnnotation>]
2247
2520
  attr_accessor :object_annotations
2248
2521
 
2522
+ # Person detection annotations.
2523
+ # Corresponds to the JSON property `personDetectionAnnotations`
2524
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2PersonDetectionAnnotation>]
2525
+ attr_accessor :person_detection_annotations
2526
+
2249
2527
  # Video segment.
2250
2528
  # Corresponds to the JSON property `segment`
2251
2529
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2VideoSegment]
@@ -2258,11 +2536,11 @@ module Google
2258
2536
  attr_accessor :segment_label_annotations
2259
2537
 
2260
2538
  # Presence label annotations on video level or user-specified segment level.
2261
- # There is exactly one element for each unique label. Compared to the
2262
- # existing topical `segment_label_annotations`, this field presents more
2263
- # fine-grained, segment-level labels detected in video content and is made
2264
- # available only when the client sets `LabelDetectionConfig.model` to
2265
- # "builtin/latest" in the request.
2539
+ # There is exactly one element for each unique label. Compared to the existing
2540
+ # topical `segment_label_annotations`, this field presents more fine-grained,
2541
+ # segment-level labels detected in video content and is made available only when
2542
+ # the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
2543
+ # request.
2266
2544
  # Corresponds to the JSON property `segmentPresenceLabelAnnotations`
2267
2545
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
2268
2546
  attr_accessor :segment_presence_label_annotations
@@ -2272,17 +2550,17 @@ module Google
2272
2550
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2VideoSegment>]
2273
2551
  attr_accessor :shot_annotations
2274
2552
 
2275
- # Topical label annotations on shot level.
2276
- # There is exactly one element for each unique label.
2553
+ # Topical label annotations on shot level. There is exactly one element for each
2554
+ # unique label.
2277
2555
  # Corresponds to the JSON property `shotLabelAnnotations`
2278
2556
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
2279
2557
  attr_accessor :shot_label_annotations
2280
2558
 
2281
2559
  # Presence label annotations on shot level. There is exactly one element for
2282
- # each unique label. Compared to the existing topical
2283
- # `shot_label_annotations`, this field presents more fine-grained, shot-level
2284
- # labels detected in video content and is made available only when the client
2285
- # sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
2560
+ # each unique label. Compared to the existing topical `shot_label_annotations`,
2561
+ # this field presents more fine-grained, shot-level labels detected in video
2562
+ # content and is made available only when the client sets `LabelDetectionConfig.
2563
+ # model` to "builtin/latest" in the request.
2286
2564
  # Corresponds to the JSON property `shotPresenceLabelAnnotations`
2287
2565
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
2288
2566
  attr_accessor :shot_presence_label_annotations
@@ -2292,9 +2570,8 @@ module Google
2292
2570
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2SpeechTranscription>]
2293
2571
  attr_accessor :speech_transcriptions
2294
2572
 
2295
- # OCR text detection and tracking.
2296
- # Annotations for list of detected text snippets. Each will have list of
2297
- # frame information associated with it.
2573
+ # OCR text detection and tracking. Annotations for list of detected text
2574
+ # snippets. Each will have list of frame information associated with it.
2298
2575
  # Corresponds to the JSON property `textAnnotations`
2299
2576
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2TextAnnotation>]
2300
2577
  attr_accessor :text_annotations
@@ -2307,10 +2584,13 @@ module Google
2307
2584
  def update!(**args)
2308
2585
  @error = args[:error] if args.key?(:error)
2309
2586
  @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
2587
+ @face_annotations = args[:face_annotations] if args.key?(:face_annotations)
2588
+ @face_detection_annotations = args[:face_detection_annotations] if args.key?(:face_detection_annotations)
2310
2589
  @frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
2311
2590
  @input_uri = args[:input_uri] if args.key?(:input_uri)
2312
2591
  @logo_recognition_annotations = args[:logo_recognition_annotations] if args.key?(:logo_recognition_annotations)
2313
2592
  @object_annotations = args[:object_annotations] if args.key?(:object_annotations)
2593
+ @person_detection_annotations = args[:person_detection_annotations] if args.key?(:person_detection_annotations)
2314
2594
  @segment = args[:segment] if args.key?(:segment)
2315
2595
  @segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations)
2316
2596
  @segment_presence_label_annotations = args[:segment_presence_label_annotations] if args.key?(:segment_presence_label_annotations)
@@ -2331,6 +2611,11 @@ module Google
2331
2611
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2ExplicitContentDetectionConfig]
2332
2612
  attr_accessor :explicit_content_detection_config
2333
2613
 
2614
+ # Config for FACE_DETECTION.
2615
+ # Corresponds to the JSON property `faceDetectionConfig`
2616
+ # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2FaceDetectionConfig]
2617
+ attr_accessor :face_detection_config
2618
+
2334
2619
  # Config for LABEL_DETECTION.
2335
2620
  # Corresponds to the JSON property `labelDetectionConfig`
2336
2621
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2LabelDetectionConfig]
@@ -2341,9 +2626,14 @@ module Google
2341
2626
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2ObjectTrackingConfig]
2342
2627
  attr_accessor :object_tracking_config
2343
2628
 
2344
- # Video segments to annotate. The segments may overlap and are not required
2345
- # to be contiguous or span the whole video. If unspecified, each video is
2346
- # treated as a single segment.
2629
+ # Config for PERSON_DETECTION.
2630
+ # Corresponds to the JSON property `personDetectionConfig`
2631
+ # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2PersonDetectionConfig]
2632
+ attr_accessor :person_detection_config
2633
+
2634
+ # Video segments to annotate. The segments may overlap and are not required to
2635
+ # be contiguous or span the whole video. If unspecified, each video is treated
2636
+ # as a single segment.
2347
2637
  # Corresponds to the JSON property `segments`
2348
2638
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2VideoSegment>]
2349
2639
  attr_accessor :segments
@@ -2370,8 +2660,10 @@ module Google
2370
2660
  # Update properties of this object
2371
2661
  def update!(**args)
2372
2662
  @explicit_content_detection_config = args[:explicit_content_detection_config] if args.key?(:explicit_content_detection_config)
2663
+ @face_detection_config = args[:face_detection_config] if args.key?(:face_detection_config)
2373
2664
  @label_detection_config = args[:label_detection_config] if args.key?(:label_detection_config)
2374
2665
  @object_tracking_config = args[:object_tracking_config] if args.key?(:object_tracking_config)
2666
+ @person_detection_config = args[:person_detection_config] if args.key?(:person_detection_config)
2375
2667
  @segments = args[:segments] if args.key?(:segments)
2376
2668
  @shot_change_detection_config = args[:shot_change_detection_config] if args.key?(:shot_change_detection_config)
2377
2669
  @speech_transcription_config = args[:speech_transcription_config] if args.key?(:speech_transcription_config)
@@ -2383,14 +2675,14 @@ module Google
2383
2675
  class GoogleCloudVideointelligenceV1beta2VideoSegment
2384
2676
  include Google::Apis::Core::Hashable
2385
2677
 
2386
- # Time-offset, relative to the beginning of the video,
2387
- # corresponding to the end of the segment (inclusive).
2678
+ # Time-offset, relative to the beginning of the video, corresponding to the end
2679
+ # of the segment (inclusive).
2388
2680
  # Corresponds to the JSON property `endTimeOffset`
2389
2681
  # @return [String]
2390
2682
  attr_accessor :end_time_offset
2391
2683
 
2392
- # Time-offset, relative to the beginning of the video,
2393
- # corresponding to the start of the segment (inclusive).
2684
+ # Time-offset, relative to the beginning of the video, corresponding to the
2685
+ # start of the segment (inclusive).
2394
2686
  # Corresponds to the JSON property `startTimeOffset`
2395
2687
  # @return [String]
2396
2688
  attr_accessor :start_time_offset
@@ -2407,41 +2699,41 @@ module Google
2407
2699
  end
2408
2700
 
2409
2701
  # Word-specific information for recognized words. Word information is only
2410
- # included in the response when certain request parameters are set, such
2411
- # as `enable_word_time_offsets`.
2702
+ # included in the response when certain request parameters are set, such as `
2703
+ # enable_word_time_offsets`.
2412
2704
  class GoogleCloudVideointelligenceV1beta2WordInfo
2413
2705
  include Google::Apis::Core::Hashable
2414
2706
 
2415
2707
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
2416
2708
  # indicates an estimated greater likelihood that the recognized words are
2417
- # correct. This field is set only for the top alternative.
2418
- # This field is not guaranteed to be accurate and users should not rely on it
2419
- # to be always provided.
2420
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
2709
+ # correct. This field is set only for the top alternative. This field is not
2710
+ # guaranteed to be accurate and users should not rely on it to be always
2711
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
2712
+ # not set.
2421
2713
  # Corresponds to the JSON property `confidence`
2422
2714
  # @return [Float]
2423
2715
  attr_accessor :confidence
2424
2716
 
2425
- # Time offset relative to the beginning of the audio, and
2426
- # corresponding to the end of the spoken word. This field is only set if
2427
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
2428
- # experimental feature and the accuracy of the time offset can vary.
2717
+ # Time offset relative to the beginning of the audio, and corresponding to the
2718
+ # end of the spoken word. This field is only set if `enable_word_time_offsets=
2719
+ # true` and only in the top hypothesis. This is an experimental feature and the
2720
+ # accuracy of the time offset can vary.
2429
2721
  # Corresponds to the JSON property `endTime`
2430
2722
  # @return [String]
2431
2723
  attr_accessor :end_time
2432
2724
 
2433
- # Output only. A distinct integer value is assigned for every speaker within
2434
- # the audio. This field specifies which one of those speakers was detected to
2435
- # have spoken this word. Value ranges from 1 up to diarization_speaker_count,
2436
- # and is only set if speaker diarization is enabled.
2725
+ # Output only. A distinct integer value is assigned for every speaker within the
2726
+ # audio. This field specifies which one of those speakers was detected to have
2727
+ # spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
2728
+ # only set if speaker diarization is enabled.
2437
2729
  # Corresponds to the JSON property `speakerTag`
2438
2730
  # @return [Fixnum]
2439
2731
  attr_accessor :speaker_tag
2440
2732
 
2441
- # Time offset relative to the beginning of the audio, and
2442
- # corresponding to the start of the spoken word. This field is only set if
2443
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
2444
- # experimental feature and the accuracy of the time offset can vary.
2733
+ # Time offset relative to the beginning of the audio, and corresponding to the
2734
+ # start of the spoken word. This field is only set if `enable_word_time_offsets=
2735
+ # true` and only in the top hypothesis. This is an experimental feature and the
2736
+ # accuracy of the time offset can vary.
2445
2737
  # Corresponds to the JSON property `startTime`
2446
2738
  # @return [String]
2447
2739
  attr_accessor :start_time
@@ -2465,9 +2757,9 @@ module Google
2465
2757
  end
2466
2758
  end
2467
2759
 
2468
- # Video annotation progress. Included in the `metadata`
2469
- # field of the `Operation` returned by the `GetOperation`
2470
- # call of the `google::longrunning::Operations` service.
2760
+ # Video annotation progress. Included in the `metadata` field of the `Operation`
2761
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
2762
+ # service.
2471
2763
  class GoogleCloudVideointelligenceV1p1beta1AnnotateVideoProgress
2472
2764
  include Google::Apis::Core::Hashable
2473
2765
 
@@ -2486,9 +2778,9 @@ module Google
2486
2778
  end
2487
2779
  end
2488
2780
 
2489
- # Video annotation response. Included in the `response`
2490
- # field of the `Operation` returned by the `GetOperation`
2491
- # call of the `google::longrunning::Operations` service.
2781
+ # Video annotation response. Included in the `response` field of the `Operation`
2782
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
2783
+ # service.
2492
2784
  class GoogleCloudVideointelligenceV1p1beta1AnnotateVideoResponse
2493
2785
  include Google::Apis::Core::Hashable
2494
2786
 
@@ -2516,14 +2808,14 @@ module Google
2516
2808
  # @return [Float]
2517
2809
  attr_accessor :confidence
2518
2810
 
2519
- # The name of the attribute, for example, glasses, dark_glasses, mouth_open.
2520
- # A full list of supported type names will be provided in the document.
2811
+ # The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
2812
+ # full list of supported type names will be provided in the document.
2521
2813
  # Corresponds to the JSON property `name`
2522
2814
  # @return [String]
2523
2815
  attr_accessor :name
2524
2816
 
2525
- # Text value of the detection result. For example, the value for "HairColor"
2526
- # can be "black", "blonde", etc.
2817
+ # Text value of the detection result. For example, the value for "HairColor" can
2818
+ # be "black", "blonde", etc.
2527
2819
  # Corresponds to the JSON property `value`
2528
2820
  # @return [String]
2529
2821
  attr_accessor :value
@@ -2555,9 +2847,8 @@ module Google
2555
2847
  # @return [String]
2556
2848
  attr_accessor :name
2557
2849
 
2558
- # A vertex represents a 2D point in the image.
2559
- # NOTE: the normalized vertex coordinates are relative to the original image
2560
- # and range from 0 to 1.
2850
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
2851
+ # coordinates are relative to the original image and range from 0 to 1.
2561
2852
  # Corresponds to the JSON property `point`
2562
2853
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1NormalizedVertex]
2563
2854
  attr_accessor :point
@@ -2583,8 +2874,7 @@ module Google
2583
2874
  # @return [String]
2584
2875
  attr_accessor :description
2585
2876
 
2586
- # Opaque entity ID. Some IDs may be available in
2587
- # [Google Knowledge Graph Search
2877
+ # Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
2588
2878
  # API](https://developers.google.com/knowledge-graph/).
2589
2879
  # Corresponds to the JSON property `entityId`
2590
2880
  # @return [String]
@@ -2607,9 +2897,9 @@ module Google
2607
2897
  end
2608
2898
  end
2609
2899
 
2610
- # Explicit content annotation (based on per-frame visual signals only).
2611
- # If no explicit content has been detected in a frame, no annotations are
2612
- # present for that frame.
2900
+ # Explicit content annotation (based on per-frame visual signals only). If no
2901
+ # explicit content has been detected in a frame, no annotations are present for
2902
+ # that frame.
2613
2903
  class GoogleCloudVideointelligenceV1p1beta1ExplicitContentAnnotation
2614
2904
  include Google::Apis::Core::Hashable
2615
2905
 
@@ -2629,19 +2919,97 @@ module Google
2629
2919
 
2630
2920
  # Update properties of this object
2631
2921
  def update!(**args)
2632
- @frames = args[:frames] if args.key?(:frames)
2922
+ @frames = args[:frames] if args.key?(:frames)
2923
+ @version = args[:version] if args.key?(:version)
2924
+ end
2925
+ end
2926
+
2927
+ # Video frame level annotation results for explicit content.
2928
+ class GoogleCloudVideointelligenceV1p1beta1ExplicitContentFrame
2929
+ include Google::Apis::Core::Hashable
2930
+
2931
+ # Likelihood of the pornography content..
2932
+ # Corresponds to the JSON property `pornographyLikelihood`
2933
+ # @return [String]
2934
+ attr_accessor :pornography_likelihood
2935
+
2936
+ # Time-offset, relative to the beginning of the video, corresponding to the
2937
+ # video frame for this location.
2938
+ # Corresponds to the JSON property `timeOffset`
2939
+ # @return [String]
2940
+ attr_accessor :time_offset
2941
+
2942
+ def initialize(**args)
2943
+ update!(**args)
2944
+ end
2945
+
2946
+ # Update properties of this object
2947
+ def update!(**args)
2948
+ @pornography_likelihood = args[:pornography_likelihood] if args.key?(:pornography_likelihood)
2949
+ @time_offset = args[:time_offset] if args.key?(:time_offset)
2950
+ end
2951
+ end
2952
+
2953
+ # Deprecated. No effect.
2954
+ class GoogleCloudVideointelligenceV1p1beta1FaceAnnotation
2955
+ include Google::Apis::Core::Hashable
2956
+
2957
+ # All video frames where a face was detected.
2958
+ # Corresponds to the JSON property `frames`
2959
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1FaceFrame>]
2960
+ attr_accessor :frames
2961
+
2962
+ # All video segments where a face was detected.
2963
+ # Corresponds to the JSON property `segments`
2964
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1FaceSegment>]
2965
+ attr_accessor :segments
2966
+
2967
+ # Thumbnail of a representative face view (in JPEG format).
2968
+ # Corresponds to the JSON property `thumbnail`
2969
+ # NOTE: Values are automatically base64 encoded/decoded in the client library.
2970
+ # @return [String]
2971
+ attr_accessor :thumbnail
2972
+
2973
+ def initialize(**args)
2974
+ update!(**args)
2975
+ end
2976
+
2977
+ # Update properties of this object
2978
+ def update!(**args)
2979
+ @frames = args[:frames] if args.key?(:frames)
2980
+ @segments = args[:segments] if args.key?(:segments)
2981
+ @thumbnail = args[:thumbnail] if args.key?(:thumbnail)
2982
+ end
2983
+ end
2984
+
2985
+ # Face detection annotation.
2986
+ class GoogleCloudVideointelligenceV1p1beta1FaceDetectionAnnotation
2987
+ include Google::Apis::Core::Hashable
2988
+
2989
+ # Feature version.
2990
+ # Corresponds to the JSON property `version`
2991
+ # @return [String]
2992
+ attr_accessor :version
2993
+
2994
+ def initialize(**args)
2995
+ update!(**args)
2996
+ end
2997
+
2998
+ # Update properties of this object
2999
+ def update!(**args)
2633
3000
  @version = args[:version] if args.key?(:version)
2634
3001
  end
2635
3002
  end
2636
3003
 
2637
- # Video frame level annotation results for explicit content.
2638
- class GoogleCloudVideointelligenceV1p1beta1ExplicitContentFrame
3004
+ # Deprecated. No effect.
3005
+ class GoogleCloudVideointelligenceV1p1beta1FaceFrame
2639
3006
  include Google::Apis::Core::Hashable
2640
3007
 
2641
- # Likelihood of the pornography content..
2642
- # Corresponds to the JSON property `pornographyLikelihood`
2643
- # @return [String]
2644
- attr_accessor :pornography_likelihood
3008
+ # Normalized Bounding boxes in a frame. There can be more than one boxes if the
3009
+ # same face is detected in multiple locations within the current frame.
3010
+ # Corresponds to the JSON property `normalizedBoundingBoxes`
3011
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox>]
3012
+ attr_accessor :normalized_bounding_boxes
2645
3013
 
2646
3014
  # Time-offset, relative to the beginning of the video, corresponding to the
2647
3015
  # video frame for this location.
@@ -2655,19 +3023,37 @@ module Google
2655
3023
 
2656
3024
  # Update properties of this object
2657
3025
  def update!(**args)
2658
- @pornography_likelihood = args[:pornography_likelihood] if args.key?(:pornography_likelihood)
3026
+ @normalized_bounding_boxes = args[:normalized_bounding_boxes] if args.key?(:normalized_bounding_boxes)
2659
3027
  @time_offset = args[:time_offset] if args.key?(:time_offset)
2660
3028
  end
2661
3029
  end
2662
3030
 
3031
+ # Video segment level annotation results for face detection.
3032
+ class GoogleCloudVideointelligenceV1p1beta1FaceSegment
3033
+ include Google::Apis::Core::Hashable
3034
+
3035
+ # Video segment.
3036
+ # Corresponds to the JSON property `segment`
3037
+ # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1VideoSegment]
3038
+ attr_accessor :segment
3039
+
3040
+ def initialize(**args)
3041
+ update!(**args)
3042
+ end
3043
+
3044
+ # Update properties of this object
3045
+ def update!(**args)
3046
+ @segment = args[:segment] if args.key?(:segment)
3047
+ end
3048
+ end
3049
+
2663
3050
  # Label annotation.
2664
3051
  class GoogleCloudVideointelligenceV1p1beta1LabelAnnotation
2665
3052
  include Google::Apis::Core::Hashable
2666
3053
 
2667
- # Common categories for the detected entity.
2668
- # For example, when the label is `Terrier`, the category is likely `dog`. And
2669
- # in some cases there might be more than one categories e.g., `Terrier` could
2670
- # also be a `pet`.
3054
+ # Common categories for the detected entity. For example, when the label is `
3055
+ # Terrier`, the category is likely `dog`. And in some cases there might be more
3056
+ # than one categories e.g., `Terrier` could also be a `pet`.
2671
3057
  # Corresponds to the JSON property `categoryEntities`
2672
3058
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1Entity>]
2673
3059
  attr_accessor :category_entities
@@ -2766,14 +3152,14 @@ module Google
2766
3152
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1Entity]
2767
3153
  attr_accessor :entity
2768
3154
 
2769
- # All video segments where the recognized logo appears. There might be
2770
- # multiple instances of the same logo class appearing in one VideoSegment.
3155
+ # All video segments where the recognized logo appears. There might be multiple
3156
+ # instances of the same logo class appearing in one VideoSegment.
2771
3157
  # Corresponds to the JSON property `segments`
2772
3158
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1VideoSegment>]
2773
3159
  attr_accessor :segments
2774
3160
 
2775
- # All logo tracks where the recognized logo appears. Each track corresponds
2776
- # to one logo instance appearing in consecutive frames.
3161
+ # All logo tracks where the recognized logo appears. Each track corresponds to
3162
+ # one logo instance appearing in consecutive frames.
2777
3163
  # Corresponds to the JSON property `tracks`
2778
3164
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1Track>]
2779
3165
  attr_accessor :tracks
@@ -2790,9 +3176,8 @@ module Google
2790
3176
  end
2791
3177
  end
2792
3178
 
2793
- # Normalized bounding box.
2794
- # The normalized vertex coordinates are relative to the original image.
2795
- # Range: [0, 1].
3179
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
3180
+ # original image. Range: [0, 1].
2796
3181
  class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox
2797
3182
  include Google::Apis::Core::Hashable
2798
3183
 
@@ -2830,20 +3215,12 @@ module Google
2830
3215
  end
2831
3216
 
2832
3217
  # Normalized bounding polygon for text (that might not be aligned with axis).
2833
- # Contains list of the corner points in clockwise order starting from
2834
- # top-left corner. For example, for a rectangular bounding box:
2835
- # When the text is horizontal it might look like:
2836
- # 0----1
2837
- # | |
2838
- # 3----2
2839
- # When it's clockwise rotated 180 degrees around the top-left corner it
2840
- # becomes:
2841
- # 2----3
2842
- # | |
2843
- # 1----0
2844
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
2845
- # than 0, or greater than 1 due to trignometric calculations for location of
2846
- # the box.
3218
+ # Contains list of the corner points in clockwise order starting from top-left
3219
+ # corner. For example, for a rectangular bounding box: When the text is
3220
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
3221
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
3222
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
3223
+ # or greater than 1 due to trignometric calculations for location of the box.
2847
3224
  class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly
2848
3225
  include Google::Apis::Core::Hashable
2849
3226
 
@@ -2862,9 +3239,8 @@ module Google
2862
3239
  end
2863
3240
  end
2864
3241
 
2865
- # A vertex represents a 2D point in the image.
2866
- # NOTE: the normalized vertex coordinates are relative to the original image
2867
- # and range from 0 to 1.
3242
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
3243
+ # coordinates are relative to the original image and range from 0 to 1.
2868
3244
  class GoogleCloudVideointelligenceV1p1beta1NormalizedVertex
2869
3245
  include Google::Apis::Core::Hashable
2870
3246
 
@@ -2903,10 +3279,10 @@ module Google
2903
3279
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1Entity]
2904
3280
  attr_accessor :entity
2905
3281
 
2906
- # Information corresponding to all frames where this object track appears.
2907
- # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
2908
- # messages in frames.
2909
- # Streaming mode: it can only be one ObjectTrackingFrame message in frames.
3282
+ # Information corresponding to all frames where this object track appears. Non-
3283
+ # streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
3284
+ # in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
3285
+ # frames.
2910
3286
  # Corresponds to the JSON property `frames`
2911
3287
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame>]
2912
3288
  attr_accessor :frames
@@ -2916,12 +3292,11 @@ module Google
2916
3292
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1VideoSegment]
2917
3293
  attr_accessor :segment
2918
3294
 
2919
- # Streaming mode ONLY.
2920
- # In streaming mode, we do not know the end time of a tracked object
2921
- # before it is completed. Hence, there is no VideoSegment info returned.
2922
- # Instead, we provide a unique identifiable integer track_id so that
2923
- # the customers can correlate the results of the ongoing
2924
- # ObjectTrackAnnotation of the same track_id over time.
3295
+ # Streaming mode ONLY. In streaming mode, we do not know the end time of a
3296
+ # tracked object before it is completed. Hence, there is no VideoSegment info
3297
+ # returned. Instead, we provide a unique identifiable integer track_id so that
3298
+ # the customers can correlate the results of the ongoing ObjectTrackAnnotation
3299
+ # of the same track_id over time.
2925
3300
  # Corresponds to the JSON property `trackId`
2926
3301
  # @return [Fixnum]
2927
3302
  attr_accessor :track_id
@@ -2951,9 +3326,8 @@ module Google
2951
3326
  class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame
2952
3327
  include Google::Apis::Core::Hashable
2953
3328
 
2954
- # Normalized bounding box.
2955
- # The normalized vertex coordinates are relative to the original image.
2956
- # Range: [0, 1].
3329
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
3330
+ # original image. Range: [0, 1].
2957
3331
  # Corresponds to the JSON property `normalizedBoundingBox`
2958
3332
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox]
2959
3333
  attr_accessor :normalized_bounding_box
@@ -2974,16 +3348,41 @@ module Google
2974
3348
  end
2975
3349
  end
2976
3350
 
3351
+ # Person detection annotation per video.
3352
+ class GoogleCloudVideointelligenceV1p1beta1PersonDetectionAnnotation
3353
+ include Google::Apis::Core::Hashable
3354
+
3355
+ # The detected tracks of a person.
3356
+ # Corresponds to the JSON property `tracks`
3357
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1Track>]
3358
+ attr_accessor :tracks
3359
+
3360
+ # Feature version.
3361
+ # Corresponds to the JSON property `version`
3362
+ # @return [String]
3363
+ attr_accessor :version
3364
+
3365
+ def initialize(**args)
3366
+ update!(**args)
3367
+ end
3368
+
3369
+ # Update properties of this object
3370
+ def update!(**args)
3371
+ @tracks = args[:tracks] if args.key?(:tracks)
3372
+ @version = args[:version] if args.key?(:version)
3373
+ end
3374
+ end
3375
+
2977
3376
  # Alternative hypotheses (a.k.a. n-best list).
2978
3377
  class GoogleCloudVideointelligenceV1p1beta1SpeechRecognitionAlternative
2979
3378
  include Google::Apis::Core::Hashable
2980
3379
 
2981
3380
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
2982
3381
  # indicates an estimated greater likelihood that the recognized words are
2983
- # correct. This field is set only for the top alternative.
2984
- # This field is not guaranteed to be accurate and users should not rely on it
2985
- # to be always provided.
2986
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
3382
+ # correct. This field is set only for the top alternative. This field is not
3383
+ # guaranteed to be accurate and users should not rely on it to be always
3384
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
3385
+ # not set.
2987
3386
  # Corresponds to the JSON property `confidence`
2988
3387
  # @return [Float]
2989
3388
  attr_accessor :confidence
@@ -2994,8 +3393,8 @@ module Google
2994
3393
  attr_accessor :transcript
2995
3394
 
2996
3395
  # Output only. A list of word-specific information for each recognized word.
2997
- # Note: When `enable_speaker_diarization` is set to true, you will see all
2998
- # the words from the beginning of the audio.
3396
+ # Note: When `enable_speaker_diarization` is set to true, you will see all the
3397
+ # words from the beginning of the audio.
2999
3398
  # Corresponds to the JSON property `words`
3000
3399
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1WordInfo>]
3001
3400
  attr_accessor :words
@@ -3016,18 +3415,17 @@ module Google
3016
3415
  class GoogleCloudVideointelligenceV1p1beta1SpeechTranscription
3017
3416
  include Google::Apis::Core::Hashable
3018
3417
 
3019
- # May contain one or more recognition hypotheses (up to the maximum specified
3020
- # in `max_alternatives`). These alternatives are ordered in terms of
3021
- # accuracy, with the top (first) alternative being the most probable, as
3022
- # ranked by the recognizer.
3418
+ # May contain one or more recognition hypotheses (up to the maximum specified in
3419
+ # `max_alternatives`). These alternatives are ordered in terms of accuracy, with
3420
+ # the top (first) alternative being the most probable, as ranked by the
3421
+ # recognizer.
3023
3422
  # Corresponds to the JSON property `alternatives`
3024
3423
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1SpeechRecognitionAlternative>]
3025
3424
  attr_accessor :alternatives
3026
3425
 
3027
3426
  # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
3028
- # language tag of
3029
- # the language in this result. This language code was detected to have the
3030
- # most likelihood of being spoken in the audio.
3427
+ # language tag of the language in this result. This language code was detected
3428
+ # to have the most likelihood of being spoken in the audio.
3031
3429
  # Corresponds to the JSON property `languageCode`
3032
3430
  # @return [String]
3033
3431
  attr_accessor :language_code
@@ -3076,27 +3474,19 @@ module Google
3076
3474
  end
3077
3475
  end
3078
3476
 
3079
- # Video frame level annotation results for text annotation (OCR).
3080
- # Contains information regarding timestamp and bounding box locations for the
3081
- # frames containing detected OCR text snippets.
3477
+ # Video frame level annotation results for text annotation (OCR). Contains
3478
+ # information regarding timestamp and bounding box locations for the frames
3479
+ # containing detected OCR text snippets.
3082
3480
  class GoogleCloudVideointelligenceV1p1beta1TextFrame
3083
3481
  include Google::Apis::Core::Hashable
3084
3482
 
3085
3483
  # Normalized bounding polygon for text (that might not be aligned with axis).
3086
- # Contains list of the corner points in clockwise order starting from
3087
- # top-left corner. For example, for a rectangular bounding box:
3088
- # When the text is horizontal it might look like:
3089
- # 0----1
3090
- # | |
3091
- # 3----2
3092
- # When it's clockwise rotated 180 degrees around the top-left corner it
3093
- # becomes:
3094
- # 2----3
3095
- # | |
3096
- # 1----0
3097
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
3098
- # than 0, or greater than 1 due to trignometric calculations for location of
3099
- # the box.
3484
+ # Contains list of the corner points in clockwise order starting from top-left
3485
+ # corner. For example, for a rectangular bounding box: When the text is
3486
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
3487
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
3488
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
3489
+ # or greater than 1 due to trignometric calculations for location of the box.
3100
3490
  # Corresponds to the JSON property `rotatedBoundingBox`
3101
3491
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly]
3102
3492
  attr_accessor :rotated_bounding_box
@@ -3149,9 +3539,8 @@ module Google
3149
3539
  end
3150
3540
  end
3151
3541
 
3152
- # For tracking related features.
3153
- # An object at time_offset with attributes, and located with
3154
- # normalized_bounding_box.
3542
+ # For tracking related features. An object at time_offset with attributes, and
3543
+ # located with normalized_bounding_box.
3155
3544
  class GoogleCloudVideointelligenceV1p1beta1TimestampedObject
3156
3545
  include Google::Apis::Core::Hashable
3157
3546
 
@@ -3165,15 +3554,14 @@ module Google
3165
3554
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1DetectedLandmark>]
3166
3555
  attr_accessor :landmarks
3167
3556
 
3168
- # Normalized bounding box.
3169
- # The normalized vertex coordinates are relative to the original image.
3170
- # Range: [0, 1].
3557
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
3558
+ # original image. Range: [0, 1].
3171
3559
  # Corresponds to the JSON property `normalizedBoundingBox`
3172
3560
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox]
3173
3561
  attr_accessor :normalized_bounding_box
3174
3562
 
3175
- # Time-offset, relative to the beginning of the video,
3176
- # corresponding to the video frame for this object.
3563
+ # Time-offset, relative to the beginning of the video, corresponding to the
3564
+ # video frame for this object.
3177
3565
  # Corresponds to the JSON property `timeOffset`
3178
3566
  # @return [String]
3179
3567
  attr_accessor :time_offset
@@ -3232,20 +3620,19 @@ module Google
3232
3620
  class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationProgress
3233
3621
  include Google::Apis::Core::Hashable
3234
3622
 
3235
- # Specifies which feature is being tracked if the request contains more than
3236
- # one feature.
3623
+ # Specifies which feature is being tracked if the request contains more than one
3624
+ # feature.
3237
3625
  # Corresponds to the JSON property `feature`
3238
3626
  # @return [String]
3239
3627
  attr_accessor :feature
3240
3628
 
3241
- # Video file location in
3242
- # [Cloud Storage](https://cloud.google.com/storage/).
3629
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
3243
3630
  # Corresponds to the JSON property `inputUri`
3244
3631
  # @return [String]
3245
3632
  attr_accessor :input_uri
3246
3633
 
3247
- # Approximate percentage processed thus far. Guaranteed to be
3248
- # 100 when fully processed.
3634
+ # Approximate percentage processed thus far. Guaranteed to be 100 when fully
3635
+ # processed.
3249
3636
  # Corresponds to the JSON property `progressPercent`
3250
3637
  # @return [Fixnum]
3251
3638
  attr_accessor :progress_percent
@@ -3284,31 +3671,40 @@ module Google
3284
3671
  class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationResults
3285
3672
  include Google::Apis::Core::Hashable
3286
3673
 
3287
- # The `Status` type defines a logical error model that is suitable for
3288
- # different programming environments, including REST APIs and RPC APIs. It is
3289
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
3290
- # three pieces of data: error code, error message, and error details.
3291
- # You can find out more about this error model and how to work with it in the
3292
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
3674
+ # The `Status` type defines a logical error model that is suitable for different
3675
+ # programming environments, including REST APIs and RPC APIs. It is used by [
3676
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
3677
+ # data: error code, error message, and error details. You can find out more
3678
+ # about this error model and how to work with it in the [API Design Guide](https:
3679
+ # //cloud.google.com/apis/design/errors).
3293
3680
  # Corresponds to the JSON property `error`
3294
3681
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleRpcStatus]
3295
3682
  attr_accessor :error
3296
3683
 
3297
- # Explicit content annotation (based on per-frame visual signals only).
3298
- # If no explicit content has been detected in a frame, no annotations are
3299
- # present for that frame.
3684
+ # Explicit content annotation (based on per-frame visual signals only). If no
3685
+ # explicit content has been detected in a frame, no annotations are present for
3686
+ # that frame.
3300
3687
  # Corresponds to the JSON property `explicitAnnotation`
3301
3688
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1ExplicitContentAnnotation]
3302
3689
  attr_accessor :explicit_annotation
3303
3690
 
3304
- # Label annotations on frame level.
3305
- # There is exactly one element for each unique label.
3691
+ # Deprecated. Please use `face_detection_annotations` instead.
3692
+ # Corresponds to the JSON property `faceAnnotations`
3693
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1FaceAnnotation>]
3694
+ attr_accessor :face_annotations
3695
+
3696
+ # Face detection annotations.
3697
+ # Corresponds to the JSON property `faceDetectionAnnotations`
3698
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1FaceDetectionAnnotation>]
3699
+ attr_accessor :face_detection_annotations
3700
+
3701
+ # Label annotations on frame level. There is exactly one element for each unique
3702
+ # label.
3306
3703
  # Corresponds to the JSON property `frameLabelAnnotations`
3307
3704
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
3308
3705
  attr_accessor :frame_label_annotations
3309
3706
 
3310
- # Video file location in
3311
- # [Cloud Storage](https://cloud.google.com/storage/).
3707
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
3312
3708
  # Corresponds to the JSON property `inputUri`
3313
3709
  # @return [String]
3314
3710
  attr_accessor :input_uri
@@ -3323,6 +3719,11 @@ module Google
3323
3719
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingAnnotation>]
3324
3720
  attr_accessor :object_annotations
3325
3721
 
3722
+ # Person detection annotations.
3723
+ # Corresponds to the JSON property `personDetectionAnnotations`
3724
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1PersonDetectionAnnotation>]
3725
+ attr_accessor :person_detection_annotations
3726
+
3326
3727
  # Video segment.
3327
3728
  # Corresponds to the JSON property `segment`
3328
3729
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1VideoSegment]
@@ -3335,11 +3736,11 @@ module Google
3335
3736
  attr_accessor :segment_label_annotations
3336
3737
 
3337
3738
  # Presence label annotations on video level or user-specified segment level.
3338
- # There is exactly one element for each unique label. Compared to the
3339
- # existing topical `segment_label_annotations`, this field presents more
3340
- # fine-grained, segment-level labels detected in video content and is made
3341
- # available only when the client sets `LabelDetectionConfig.model` to
3342
- # "builtin/latest" in the request.
3739
+ # There is exactly one element for each unique label. Compared to the existing
3740
+ # topical `segment_label_annotations`, this field presents more fine-grained,
3741
+ # segment-level labels detected in video content and is made available only when
3742
+ # the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
3743
+ # request.
3343
3744
  # Corresponds to the JSON property `segmentPresenceLabelAnnotations`
3344
3745
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
3345
3746
  attr_accessor :segment_presence_label_annotations
@@ -3349,17 +3750,17 @@ module Google
3349
3750
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1VideoSegment>]
3350
3751
  attr_accessor :shot_annotations
3351
3752
 
3352
- # Topical label annotations on shot level.
3353
- # There is exactly one element for each unique label.
3753
+ # Topical label annotations on shot level. There is exactly one element for each
3754
+ # unique label.
3354
3755
  # Corresponds to the JSON property `shotLabelAnnotations`
3355
3756
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
3356
3757
  attr_accessor :shot_label_annotations
3357
3758
 
3358
3759
  # Presence label annotations on shot level. There is exactly one element for
3359
- # each unique label. Compared to the existing topical
3360
- # `shot_label_annotations`, this field presents more fine-grained, shot-level
3361
- # labels detected in video content and is made available only when the client
3362
- # sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
3760
+ # each unique label. Compared to the existing topical `shot_label_annotations`,
3761
+ # this field presents more fine-grained, shot-level labels detected in video
3762
+ # content and is made available only when the client sets `LabelDetectionConfig.
3763
+ # model` to "builtin/latest" in the request.
3363
3764
  # Corresponds to the JSON property `shotPresenceLabelAnnotations`
3364
3765
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
3365
3766
  attr_accessor :shot_presence_label_annotations
@@ -3369,9 +3770,8 @@ module Google
3369
3770
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1SpeechTranscription>]
3370
3771
  attr_accessor :speech_transcriptions
3371
3772
 
3372
- # OCR text detection and tracking.
3373
- # Annotations for list of detected text snippets. Each will have list of
3374
- # frame information associated with it.
3773
+ # OCR text detection and tracking. Annotations for list of detected text
3774
+ # snippets. Each will have list of frame information associated with it.
3375
3775
  # Corresponds to the JSON property `textAnnotations`
3376
3776
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1TextAnnotation>]
3377
3777
  attr_accessor :text_annotations
@@ -3384,10 +3784,13 @@ module Google
3384
3784
  def update!(**args)
3385
3785
  @error = args[:error] if args.key?(:error)
3386
3786
  @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
3787
+ @face_annotations = args[:face_annotations] if args.key?(:face_annotations)
3788
+ @face_detection_annotations = args[:face_detection_annotations] if args.key?(:face_detection_annotations)
3387
3789
  @frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
3388
3790
  @input_uri = args[:input_uri] if args.key?(:input_uri)
3389
3791
  @logo_recognition_annotations = args[:logo_recognition_annotations] if args.key?(:logo_recognition_annotations)
3390
3792
  @object_annotations = args[:object_annotations] if args.key?(:object_annotations)
3793
+ @person_detection_annotations = args[:person_detection_annotations] if args.key?(:person_detection_annotations)
3391
3794
  @segment = args[:segment] if args.key?(:segment)
3392
3795
  @segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations)
3393
3796
  @segment_presence_label_annotations = args[:segment_presence_label_annotations] if args.key?(:segment_presence_label_annotations)
@@ -3403,14 +3806,14 @@ module Google
3403
3806
  class GoogleCloudVideointelligenceV1p1beta1VideoSegment
3404
3807
  include Google::Apis::Core::Hashable
3405
3808
 
3406
- # Time-offset, relative to the beginning of the video,
3407
- # corresponding to the end of the segment (inclusive).
3809
+ # Time-offset, relative to the beginning of the video, corresponding to the end
3810
+ # of the segment (inclusive).
3408
3811
  # Corresponds to the JSON property `endTimeOffset`
3409
3812
  # @return [String]
3410
3813
  attr_accessor :end_time_offset
3411
3814
 
3412
- # Time-offset, relative to the beginning of the video,
3413
- # corresponding to the start of the segment (inclusive).
3815
+ # Time-offset, relative to the beginning of the video, corresponding to the
3816
+ # start of the segment (inclusive).
3414
3817
  # Corresponds to the JSON property `startTimeOffset`
3415
3818
  # @return [String]
3416
3819
  attr_accessor :start_time_offset
@@ -3427,41 +3830,41 @@ module Google
3427
3830
  end
3428
3831
 
3429
3832
  # Word-specific information for recognized words. Word information is only
3430
- # included in the response when certain request parameters are set, such
3431
- # as `enable_word_time_offsets`.
3833
+ # included in the response when certain request parameters are set, such as `
3834
+ # enable_word_time_offsets`.
3432
3835
  class GoogleCloudVideointelligenceV1p1beta1WordInfo
3433
3836
  include Google::Apis::Core::Hashable
3434
3837
 
3435
3838
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
3436
3839
  # indicates an estimated greater likelihood that the recognized words are
3437
- # correct. This field is set only for the top alternative.
3438
- # This field is not guaranteed to be accurate and users should not rely on it
3439
- # to be always provided.
3440
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
3840
+ # correct. This field is set only for the top alternative. This field is not
3841
+ # guaranteed to be accurate and users should not rely on it to be always
3842
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
3843
+ # not set.
3441
3844
  # Corresponds to the JSON property `confidence`
3442
3845
  # @return [Float]
3443
3846
  attr_accessor :confidence
3444
3847
 
3445
- # Time offset relative to the beginning of the audio, and
3446
- # corresponding to the end of the spoken word. This field is only set if
3447
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
3448
- # experimental feature and the accuracy of the time offset can vary.
3848
+ # Time offset relative to the beginning of the audio, and corresponding to the
3849
+ # end of the spoken word. This field is only set if `enable_word_time_offsets=
3850
+ # true` and only in the top hypothesis. This is an experimental feature and the
3851
+ # accuracy of the time offset can vary.
3449
3852
  # Corresponds to the JSON property `endTime`
3450
3853
  # @return [String]
3451
3854
  attr_accessor :end_time
3452
3855
 
3453
- # Output only. A distinct integer value is assigned for every speaker within
3454
- # the audio. This field specifies which one of those speakers was detected to
3455
- # have spoken this word. Value ranges from 1 up to diarization_speaker_count,
3456
- # and is only set if speaker diarization is enabled.
3856
+ # Output only. A distinct integer value is assigned for every speaker within the
3857
+ # audio. This field specifies which one of those speakers was detected to have
3858
+ # spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
3859
+ # only set if speaker diarization is enabled.
3457
3860
  # Corresponds to the JSON property `speakerTag`
3458
3861
  # @return [Fixnum]
3459
3862
  attr_accessor :speaker_tag
3460
3863
 
3461
- # Time offset relative to the beginning of the audio, and
3462
- # corresponding to the start of the spoken word. This field is only set if
3463
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
3464
- # experimental feature and the accuracy of the time offset can vary.
3864
+ # Time offset relative to the beginning of the audio, and corresponding to the
3865
+ # start of the spoken word. This field is only set if `enable_word_time_offsets=
3866
+ # true` and only in the top hypothesis. This is an experimental feature and the
3867
+ # accuracy of the time offset can vary.
3465
3868
  # Corresponds to the JSON property `startTime`
3466
3869
  # @return [String]
3467
3870
  attr_accessor :start_time
@@ -3485,9 +3888,9 @@ module Google
3485
3888
  end
3486
3889
  end
3487
3890
 
3488
- # Video annotation progress. Included in the `metadata`
3489
- # field of the `Operation` returned by the `GetOperation`
3490
- # call of the `google::longrunning::Operations` service.
3891
+ # Video annotation progress. Included in the `metadata` field of the `Operation`
3892
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
3893
+ # service.
3491
3894
  class GoogleCloudVideointelligenceV1p2beta1AnnotateVideoProgress
3492
3895
  include Google::Apis::Core::Hashable
3493
3896
 
@@ -3506,9 +3909,9 @@ module Google
3506
3909
  end
3507
3910
  end
3508
3911
 
3509
- # Video annotation response. Included in the `response`
3510
- # field of the `Operation` returned by the `GetOperation`
3511
- # call of the `google::longrunning::Operations` service.
3912
+ # Video annotation response. Included in the `response` field of the `Operation`
3913
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
3914
+ # service.
3512
3915
  class GoogleCloudVideointelligenceV1p2beta1AnnotateVideoResponse
3513
3916
  include Google::Apis::Core::Hashable
3514
3917
 
@@ -3536,14 +3939,14 @@ module Google
3536
3939
  # @return [Float]
3537
3940
  attr_accessor :confidence
3538
3941
 
3539
- # The name of the attribute, for example, glasses, dark_glasses, mouth_open.
3540
- # A full list of supported type names will be provided in the document.
3942
+ # The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
3943
+ # full list of supported type names will be provided in the document.
3541
3944
  # Corresponds to the JSON property `name`
3542
3945
  # @return [String]
3543
3946
  attr_accessor :name
3544
3947
 
3545
- # Text value of the detection result. For example, the value for "HairColor"
3546
- # can be "black", "blonde", etc.
3948
+ # Text value of the detection result. For example, the value for "HairColor" can
3949
+ # be "black", "blonde", etc.
3547
3950
  # Corresponds to the JSON property `value`
3548
3951
  # @return [String]
3549
3952
  attr_accessor :value
@@ -3575,9 +3978,8 @@ module Google
3575
3978
  # @return [String]
3576
3979
  attr_accessor :name
3577
3980
 
3578
- # A vertex represents a 2D point in the image.
3579
- # NOTE: the normalized vertex coordinates are relative to the original image
3580
- # and range from 0 to 1.
3981
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
3982
+ # coordinates are relative to the original image and range from 0 to 1.
3581
3983
  # Corresponds to the JSON property `point`
3582
3984
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1NormalizedVertex]
3583
3985
  attr_accessor :point
@@ -3603,8 +4005,7 @@ module Google
3603
4005
  # @return [String]
3604
4006
  attr_accessor :description
3605
4007
 
3606
- # Opaque entity ID. Some IDs may be available in
3607
- # [Google Knowledge Graph Search
4008
+ # Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
3608
4009
  # API](https://developers.google.com/knowledge-graph/).
3609
4010
  # Corresponds to the JSON property `entityId`
3610
4011
  # @return [String]
@@ -3627,9 +4028,9 @@ module Google
3627
4028
  end
3628
4029
  end
3629
4030
 
3630
- # Explicit content annotation (based on per-frame visual signals only).
3631
- # If no explicit content has been detected in a frame, no annotations are
3632
- # present for that frame.
4031
+ # Explicit content annotation (based on per-frame visual signals only). If no
4032
+ # explicit content has been detected in a frame, no annotations are present for
4033
+ # that frame.
3633
4034
  class GoogleCloudVideointelligenceV1p2beta1ExplicitContentAnnotation
3634
4035
  include Google::Apis::Core::Hashable
3635
4036
 
@@ -3680,14 +4081,110 @@ module Google
3680
4081
  end
3681
4082
  end
3682
4083
 
4084
+ # Deprecated. No effect.
4085
+ class GoogleCloudVideointelligenceV1p2beta1FaceAnnotation
4086
+ include Google::Apis::Core::Hashable
4087
+
4088
+ # All video frames where a face was detected.
4089
+ # Corresponds to the JSON property `frames`
4090
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1FaceFrame>]
4091
+ attr_accessor :frames
4092
+
4093
+ # All video segments where a face was detected.
4094
+ # Corresponds to the JSON property `segments`
4095
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1FaceSegment>]
4096
+ attr_accessor :segments
4097
+
4098
+ # Thumbnail of a representative face view (in JPEG format).
4099
+ # Corresponds to the JSON property `thumbnail`
4100
+ # NOTE: Values are automatically base64 encoded/decoded in the client library.
4101
+ # @return [String]
4102
+ attr_accessor :thumbnail
4103
+
4104
+ def initialize(**args)
4105
+ update!(**args)
4106
+ end
4107
+
4108
+ # Update properties of this object
4109
+ def update!(**args)
4110
+ @frames = args[:frames] if args.key?(:frames)
4111
+ @segments = args[:segments] if args.key?(:segments)
4112
+ @thumbnail = args[:thumbnail] if args.key?(:thumbnail)
4113
+ end
4114
+ end
4115
+
4116
+ # Face detection annotation.
4117
+ class GoogleCloudVideointelligenceV1p2beta1FaceDetectionAnnotation
4118
+ include Google::Apis::Core::Hashable
4119
+
4120
+ # Feature version.
4121
+ # Corresponds to the JSON property `version`
4122
+ # @return [String]
4123
+ attr_accessor :version
4124
+
4125
+ def initialize(**args)
4126
+ update!(**args)
4127
+ end
4128
+
4129
+ # Update properties of this object
4130
+ def update!(**args)
4131
+ @version = args[:version] if args.key?(:version)
4132
+ end
4133
+ end
4134
+
4135
+ # Deprecated. No effect.
4136
+ class GoogleCloudVideointelligenceV1p2beta1FaceFrame
4137
+ include Google::Apis::Core::Hashable
4138
+
4139
+ # Normalized Bounding boxes in a frame. There can be more than one boxes if the
4140
+ # same face is detected in multiple locations within the current frame.
4141
+ # Corresponds to the JSON property `normalizedBoundingBoxes`
4142
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox>]
4143
+ attr_accessor :normalized_bounding_boxes
4144
+
4145
+ # Time-offset, relative to the beginning of the video, corresponding to the
4146
+ # video frame for this location.
4147
+ # Corresponds to the JSON property `timeOffset`
4148
+ # @return [String]
4149
+ attr_accessor :time_offset
4150
+
4151
+ def initialize(**args)
4152
+ update!(**args)
4153
+ end
4154
+
4155
+ # Update properties of this object
4156
+ def update!(**args)
4157
+ @normalized_bounding_boxes = args[:normalized_bounding_boxes] if args.key?(:normalized_bounding_boxes)
4158
+ @time_offset = args[:time_offset] if args.key?(:time_offset)
4159
+ end
4160
+ end
4161
+
4162
+ # Video segment level annotation results for face detection.
4163
+ class GoogleCloudVideointelligenceV1p2beta1FaceSegment
4164
+ include Google::Apis::Core::Hashable
4165
+
4166
+ # Video segment.
4167
+ # Corresponds to the JSON property `segment`
4168
+ # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1VideoSegment]
4169
+ attr_accessor :segment
4170
+
4171
+ def initialize(**args)
4172
+ update!(**args)
4173
+ end
4174
+
4175
+ # Update properties of this object
4176
+ def update!(**args)
4177
+ @segment = args[:segment] if args.key?(:segment)
4178
+ end
4179
+ end
4180
+
3683
4181
  # Label annotation.
3684
4182
  class GoogleCloudVideointelligenceV1p2beta1LabelAnnotation
3685
4183
  include Google::Apis::Core::Hashable
3686
4184
 
3687
- # Common categories for the detected entity.
3688
- # For example, when the label is `Terrier`, the category is likely `dog`. And
3689
- # in some cases there might be more than one categories e.g., `Terrier` could
3690
- # also be a `pet`.
4185
+ # Common categories for the detected entity. For example, when the label is `
4186
+ # Terrier`, the category is likely `dog`. And in some cases there might be more
4187
+ # than one categories e.g., `Terrier` could also be a `pet`.
3691
4188
  # Corresponds to the JSON property `categoryEntities`
3692
4189
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1Entity>]
3693
4190
  attr_accessor :category_entities
@@ -3786,14 +4283,14 @@ module Google
3786
4283
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1Entity]
3787
4284
  attr_accessor :entity
3788
4285
 
3789
- # All video segments where the recognized logo appears. There might be
3790
- # multiple instances of the same logo class appearing in one VideoSegment.
4286
+ # All video segments where the recognized logo appears. There might be multiple
4287
+ # instances of the same logo class appearing in one VideoSegment.
3791
4288
  # Corresponds to the JSON property `segments`
3792
4289
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1VideoSegment>]
3793
4290
  attr_accessor :segments
3794
4291
 
3795
- # All logo tracks where the recognized logo appears. Each track corresponds
3796
- # to one logo instance appearing in consecutive frames.
4292
+ # All logo tracks where the recognized logo appears. Each track corresponds to
4293
+ # one logo instance appearing in consecutive frames.
3797
4294
  # Corresponds to the JSON property `tracks`
3798
4295
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1Track>]
3799
4296
  attr_accessor :tracks
@@ -3810,9 +4307,8 @@ module Google
3810
4307
  end
3811
4308
  end
3812
4309
 
3813
- # Normalized bounding box.
3814
- # The normalized vertex coordinates are relative to the original image.
3815
- # Range: [0, 1].
4310
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
4311
+ # original image. Range: [0, 1].
3816
4312
  class GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox
3817
4313
  include Google::Apis::Core::Hashable
3818
4314
 
@@ -3850,20 +4346,12 @@ module Google
3850
4346
  end
3851
4347
 
3852
4348
  # Normalized bounding polygon for text (that might not be aligned with axis).
3853
- # Contains list of the corner points in clockwise order starting from
3854
- # top-left corner. For example, for a rectangular bounding box:
3855
- # When the text is horizontal it might look like:
3856
- # 0----1
3857
- # | |
3858
- # 3----2
3859
- # When it's clockwise rotated 180 degrees around the top-left corner it
3860
- # becomes:
3861
- # 2----3
3862
- # | |
3863
- # 1----0
3864
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
3865
- # than 0, or greater than 1 due to trignometric calculations for location of
3866
- # the box.
4349
+ # Contains list of the corner points in clockwise order starting from top-left
4350
+ # corner. For example, for a rectangular bounding box: When the text is
4351
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
4352
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
4353
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
4354
+ # or greater than 1 due to trignometric calculations for location of the box.
3867
4355
  class GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingPoly
3868
4356
  include Google::Apis::Core::Hashable
3869
4357
 
@@ -3882,9 +4370,8 @@ module Google
3882
4370
  end
3883
4371
  end
3884
4372
 
3885
- # A vertex represents a 2D point in the image.
3886
- # NOTE: the normalized vertex coordinates are relative to the original image
3887
- # and range from 0 to 1.
4373
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
4374
+ # coordinates are relative to the original image and range from 0 to 1.
3888
4375
  class GoogleCloudVideointelligenceV1p2beta1NormalizedVertex
3889
4376
  include Google::Apis::Core::Hashable
3890
4377
 
@@ -3923,10 +4410,10 @@ module Google
3923
4410
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1Entity]
3924
4411
  attr_accessor :entity
3925
4412
 
3926
- # Information corresponding to all frames where this object track appears.
3927
- # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
3928
- # messages in frames.
3929
- # Streaming mode: it can only be one ObjectTrackingFrame message in frames.
4413
+ # Information corresponding to all frames where this object track appears. Non-
4414
+ # streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
4415
+ # in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
4416
+ # frames.
3930
4417
  # Corresponds to the JSON property `frames`
3931
4418
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1ObjectTrackingFrame>]
3932
4419
  attr_accessor :frames
@@ -3936,12 +4423,11 @@ module Google
3936
4423
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1VideoSegment]
3937
4424
  attr_accessor :segment
3938
4425
 
3939
- # Streaming mode ONLY.
3940
- # In streaming mode, we do not know the end time of a tracked object
3941
- # before it is completed. Hence, there is no VideoSegment info returned.
3942
- # Instead, we provide a unique identifiable integer track_id so that
3943
- # the customers can correlate the results of the ongoing
3944
- # ObjectTrackAnnotation of the same track_id over time.
4426
+ # Streaming mode ONLY. In streaming mode, we do not know the end time of a
4427
+ # tracked object before it is completed. Hence, there is no VideoSegment info
4428
+ # returned. Instead, we provide a unique identifiable integer track_id so that
4429
+ # the customers can correlate the results of the ongoing ObjectTrackAnnotation
4430
+ # of the same track_id over time.
3945
4431
  # Corresponds to the JSON property `trackId`
3946
4432
  # @return [Fixnum]
3947
4433
  attr_accessor :track_id
@@ -3971,9 +4457,8 @@ module Google
3971
4457
  class GoogleCloudVideointelligenceV1p2beta1ObjectTrackingFrame
3972
4458
  include Google::Apis::Core::Hashable
3973
4459
 
3974
- # Normalized bounding box.
3975
- # The normalized vertex coordinates are relative to the original image.
3976
- # Range: [0, 1].
4460
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
4461
+ # original image. Range: [0, 1].
3977
4462
  # Corresponds to the JSON property `normalizedBoundingBox`
3978
4463
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox]
3979
4464
  attr_accessor :normalized_bounding_box
@@ -3994,16 +4479,41 @@ module Google
3994
4479
  end
3995
4480
  end
3996
4481
 
4482
+ # Person detection annotation per video.
4483
+ class GoogleCloudVideointelligenceV1p2beta1PersonDetectionAnnotation
4484
+ include Google::Apis::Core::Hashable
4485
+
4486
+ # The detected tracks of a person.
4487
+ # Corresponds to the JSON property `tracks`
4488
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1Track>]
4489
+ attr_accessor :tracks
4490
+
4491
+ # Feature version.
4492
+ # Corresponds to the JSON property `version`
4493
+ # @return [String]
4494
+ attr_accessor :version
4495
+
4496
+ def initialize(**args)
4497
+ update!(**args)
4498
+ end
4499
+
4500
+ # Update properties of this object
4501
+ def update!(**args)
4502
+ @tracks = args[:tracks] if args.key?(:tracks)
4503
+ @version = args[:version] if args.key?(:version)
4504
+ end
4505
+ end
4506
+
3997
4507
  # Alternative hypotheses (a.k.a. n-best list).
3998
4508
  class GoogleCloudVideointelligenceV1p2beta1SpeechRecognitionAlternative
3999
4509
  include Google::Apis::Core::Hashable
4000
4510
 
4001
4511
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
4002
4512
  # indicates an estimated greater likelihood that the recognized words are
4003
- # correct. This field is set only for the top alternative.
4004
- # This field is not guaranteed to be accurate and users should not rely on it
4005
- # to be always provided.
4006
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
4513
+ # correct. This field is set only for the top alternative. This field is not
4514
+ # guaranteed to be accurate and users should not rely on it to be always
4515
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
4516
+ # not set.
4007
4517
  # Corresponds to the JSON property `confidence`
4008
4518
  # @return [Float]
4009
4519
  attr_accessor :confidence
@@ -4014,8 +4524,8 @@ module Google
4014
4524
  attr_accessor :transcript
4015
4525
 
4016
4526
  # Output only. A list of word-specific information for each recognized word.
4017
- # Note: When `enable_speaker_diarization` is set to true, you will see all
4018
- # the words from the beginning of the audio.
4527
+ # Note: When `enable_speaker_diarization` is set to true, you will see all the
4528
+ # words from the beginning of the audio.
4019
4529
  # Corresponds to the JSON property `words`
4020
4530
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1WordInfo>]
4021
4531
  attr_accessor :words
@@ -4036,18 +4546,17 @@ module Google
4036
4546
  class GoogleCloudVideointelligenceV1p2beta1SpeechTranscription
4037
4547
  include Google::Apis::Core::Hashable
4038
4548
 
4039
- # May contain one or more recognition hypotheses (up to the maximum specified
4040
- # in `max_alternatives`). These alternatives are ordered in terms of
4041
- # accuracy, with the top (first) alternative being the most probable, as
4042
- # ranked by the recognizer.
4549
+ # May contain one or more recognition hypotheses (up to the maximum specified in
4550
+ # `max_alternatives`). These alternatives are ordered in terms of accuracy, with
4551
+ # the top (first) alternative being the most probable, as ranked by the
4552
+ # recognizer.
4043
4553
  # Corresponds to the JSON property `alternatives`
4044
4554
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1SpeechRecognitionAlternative>]
4045
4555
  attr_accessor :alternatives
4046
4556
 
4047
4557
  # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
4048
- # language tag of
4049
- # the language in this result. This language code was detected to have the
4050
- # most likelihood of being spoken in the audio.
4558
+ # language tag of the language in this result. This language code was detected
4559
+ # to have the most likelihood of being spoken in the audio.
4051
4560
  # Corresponds to the JSON property `languageCode`
4052
4561
  # @return [String]
4053
4562
  attr_accessor :language_code
@@ -4096,27 +4605,19 @@ module Google
4096
4605
  end
4097
4606
  end
4098
4607
 
4099
- # Video frame level annotation results for text annotation (OCR).
4100
- # Contains information regarding timestamp and bounding box locations for the
4101
- # frames containing detected OCR text snippets.
4608
+ # Video frame level annotation results for text annotation (OCR). Contains
4609
+ # information regarding timestamp and bounding box locations for the frames
4610
+ # containing detected OCR text snippets.
4102
4611
  class GoogleCloudVideointelligenceV1p2beta1TextFrame
4103
4612
  include Google::Apis::Core::Hashable
4104
4613
 
4105
4614
  # Normalized bounding polygon for text (that might not be aligned with axis).
4106
- # Contains list of the corner points in clockwise order starting from
4107
- # top-left corner. For example, for a rectangular bounding box:
4108
- # When the text is horizontal it might look like:
4109
- # 0----1
4110
- # | |
4111
- # 3----2
4112
- # When it's clockwise rotated 180 degrees around the top-left corner it
4113
- # becomes:
4114
- # 2----3
4115
- # | |
4116
- # 1----0
4117
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
4118
- # than 0, or greater than 1 due to trignometric calculations for location of
4119
- # the box.
4615
+ # Contains list of the corner points in clockwise order starting from top-left
4616
+ # corner. For example, for a rectangular bounding box: When the text is
4617
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
4618
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
4619
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
4620
+ # or greater than 1 due to trignometric calculations for location of the box.
4120
4621
  # Corresponds to the JSON property `rotatedBoundingBox`
4121
4622
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingPoly]
4122
4623
  attr_accessor :rotated_bounding_box
@@ -4169,9 +4670,8 @@ module Google
4169
4670
  end
4170
4671
  end
4171
4672
 
4172
- # For tracking related features.
4173
- # An object at time_offset with attributes, and located with
4174
- # normalized_bounding_box.
4673
+ # For tracking related features. An object at time_offset with attributes, and
4674
+ # located with normalized_bounding_box.
4175
4675
  class GoogleCloudVideointelligenceV1p2beta1TimestampedObject
4176
4676
  include Google::Apis::Core::Hashable
4177
4677
 
@@ -4185,15 +4685,14 @@ module Google
4185
4685
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1DetectedLandmark>]
4186
4686
  attr_accessor :landmarks
4187
4687
 
4188
- # Normalized bounding box.
4189
- # The normalized vertex coordinates are relative to the original image.
4190
- # Range: [0, 1].
4688
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
4689
+ # original image. Range: [0, 1].
4191
4690
  # Corresponds to the JSON property `normalizedBoundingBox`
4192
4691
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox]
4193
4692
  attr_accessor :normalized_bounding_box
4194
4693
 
4195
- # Time-offset, relative to the beginning of the video,
4196
- # corresponding to the video frame for this object.
4694
+ # Time-offset, relative to the beginning of the video, corresponding to the
4695
+ # video frame for this object.
4197
4696
  # Corresponds to the JSON property `timeOffset`
4198
4697
  # @return [String]
4199
4698
  attr_accessor :time_offset
@@ -4252,20 +4751,19 @@ module Google
4252
4751
  class GoogleCloudVideointelligenceV1p2beta1VideoAnnotationProgress
4253
4752
  include Google::Apis::Core::Hashable
4254
4753
 
4255
- # Specifies which feature is being tracked if the request contains more than
4256
- # one feature.
4754
+ # Specifies which feature is being tracked if the request contains more than one
4755
+ # feature.
4257
4756
  # Corresponds to the JSON property `feature`
4258
4757
  # @return [String]
4259
4758
  attr_accessor :feature
4260
4759
 
4261
- # Video file location in
4262
- # [Cloud Storage](https://cloud.google.com/storage/).
4760
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
4263
4761
  # Corresponds to the JSON property `inputUri`
4264
4762
  # @return [String]
4265
4763
  attr_accessor :input_uri
4266
4764
 
4267
- # Approximate percentage processed thus far. Guaranteed to be
4268
- # 100 when fully processed.
4765
+ # Approximate percentage processed thus far. Guaranteed to be 100 when fully
4766
+ # processed.
4269
4767
  # Corresponds to the JSON property `progressPercent`
4270
4768
  # @return [Fixnum]
4271
4769
  attr_accessor :progress_percent
@@ -4304,31 +4802,40 @@ module Google
4304
4802
  class GoogleCloudVideointelligenceV1p2beta1VideoAnnotationResults
4305
4803
  include Google::Apis::Core::Hashable
4306
4804
 
4307
- # The `Status` type defines a logical error model that is suitable for
4308
- # different programming environments, including REST APIs and RPC APIs. It is
4309
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
4310
- # three pieces of data: error code, error message, and error details.
4311
- # You can find out more about this error model and how to work with it in the
4312
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
4805
+ # The `Status` type defines a logical error model that is suitable for different
4806
+ # programming environments, including REST APIs and RPC APIs. It is used by [
4807
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
4808
+ # data: error code, error message, and error details. You can find out more
4809
+ # about this error model and how to work with it in the [API Design Guide](https:
4810
+ # //cloud.google.com/apis/design/errors).
4313
4811
  # Corresponds to the JSON property `error`
4314
4812
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleRpcStatus]
4315
4813
  attr_accessor :error
4316
4814
 
4317
- # Explicit content annotation (based on per-frame visual signals only).
4318
- # If no explicit content has been detected in a frame, no annotations are
4319
- # present for that frame.
4815
+ # Explicit content annotation (based on per-frame visual signals only). If no
4816
+ # explicit content has been detected in a frame, no annotations are present for
4817
+ # that frame.
4320
4818
  # Corresponds to the JSON property `explicitAnnotation`
4321
4819
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1ExplicitContentAnnotation]
4322
4820
  attr_accessor :explicit_annotation
4323
4821
 
4324
- # Label annotations on frame level.
4325
- # There is exactly one element for each unique label.
4822
+ # Deprecated. Please use `face_detection_annotations` instead.
4823
+ # Corresponds to the JSON property `faceAnnotations`
4824
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1FaceAnnotation>]
4825
+ attr_accessor :face_annotations
4826
+
4827
+ # Face detection annotations.
4828
+ # Corresponds to the JSON property `faceDetectionAnnotations`
4829
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1FaceDetectionAnnotation>]
4830
+ attr_accessor :face_detection_annotations
4831
+
4832
+ # Label annotations on frame level. There is exactly one element for each unique
4833
+ # label.
4326
4834
  # Corresponds to the JSON property `frameLabelAnnotations`
4327
4835
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
4328
4836
  attr_accessor :frame_label_annotations
4329
4837
 
4330
- # Video file location in
4331
- # [Cloud Storage](https://cloud.google.com/storage/).
4838
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
4332
4839
  # Corresponds to the JSON property `inputUri`
4333
4840
  # @return [String]
4334
4841
  attr_accessor :input_uri
@@ -4343,6 +4850,11 @@ module Google
4343
4850
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1ObjectTrackingAnnotation>]
4344
4851
  attr_accessor :object_annotations
4345
4852
 
4853
+ # Person detection annotations.
4854
+ # Corresponds to the JSON property `personDetectionAnnotations`
4855
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1PersonDetectionAnnotation>]
4856
+ attr_accessor :person_detection_annotations
4857
+
4346
4858
  # Video segment.
4347
4859
  # Corresponds to the JSON property `segment`
4348
4860
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1VideoSegment]
@@ -4355,11 +4867,11 @@ module Google
4355
4867
  attr_accessor :segment_label_annotations
4356
4868
 
4357
4869
  # Presence label annotations on video level or user-specified segment level.
4358
- # There is exactly one element for each unique label. Compared to the
4359
- # existing topical `segment_label_annotations`, this field presents more
4360
- # fine-grained, segment-level labels detected in video content and is made
4361
- # available only when the client sets `LabelDetectionConfig.model` to
4362
- # "builtin/latest" in the request.
4870
+ # There is exactly one element for each unique label. Compared to the existing
4871
+ # topical `segment_label_annotations`, this field presents more fine-grained,
4872
+ # segment-level labels detected in video content and is made available only when
4873
+ # the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
4874
+ # request.
4363
4875
  # Corresponds to the JSON property `segmentPresenceLabelAnnotations`
4364
4876
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
4365
4877
  attr_accessor :segment_presence_label_annotations
@@ -4369,17 +4881,17 @@ module Google
4369
4881
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1VideoSegment>]
4370
4882
  attr_accessor :shot_annotations
4371
4883
 
4372
- # Topical label annotations on shot level.
4373
- # There is exactly one element for each unique label.
4884
+ # Topical label annotations on shot level. There is exactly one element for each
4885
+ # unique label.
4374
4886
  # Corresponds to the JSON property `shotLabelAnnotations`
4375
4887
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
4376
4888
  attr_accessor :shot_label_annotations
4377
4889
 
4378
4890
  # Presence label annotations on shot level. There is exactly one element for
4379
- # each unique label. Compared to the existing topical
4380
- # `shot_label_annotations`, this field presents more fine-grained, shot-level
4381
- # labels detected in video content and is made available only when the client
4382
- # sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
4891
+ # each unique label. Compared to the existing topical `shot_label_annotations`,
4892
+ # this field presents more fine-grained, shot-level labels detected in video
4893
+ # content and is made available only when the client sets `LabelDetectionConfig.
4894
+ # model` to "builtin/latest" in the request.
4383
4895
  # Corresponds to the JSON property `shotPresenceLabelAnnotations`
4384
4896
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
4385
4897
  attr_accessor :shot_presence_label_annotations
@@ -4389,9 +4901,8 @@ module Google
4389
4901
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1SpeechTranscription>]
4390
4902
  attr_accessor :speech_transcriptions
4391
4903
 
4392
- # OCR text detection and tracking.
4393
- # Annotations for list of detected text snippets. Each will have list of
4394
- # frame information associated with it.
4904
+ # OCR text detection and tracking. Annotations for list of detected text
4905
+ # snippets. Each will have list of frame information associated with it.
4395
4906
  # Corresponds to the JSON property `textAnnotations`
4396
4907
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p2beta1TextAnnotation>]
4397
4908
  attr_accessor :text_annotations
@@ -4404,10 +4915,13 @@ module Google
4404
4915
  def update!(**args)
4405
4916
  @error = args[:error] if args.key?(:error)
4406
4917
  @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
4918
+ @face_annotations = args[:face_annotations] if args.key?(:face_annotations)
4919
+ @face_detection_annotations = args[:face_detection_annotations] if args.key?(:face_detection_annotations)
4407
4920
  @frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
4408
4921
  @input_uri = args[:input_uri] if args.key?(:input_uri)
4409
4922
  @logo_recognition_annotations = args[:logo_recognition_annotations] if args.key?(:logo_recognition_annotations)
4410
4923
  @object_annotations = args[:object_annotations] if args.key?(:object_annotations)
4924
+ @person_detection_annotations = args[:person_detection_annotations] if args.key?(:person_detection_annotations)
4411
4925
  @segment = args[:segment] if args.key?(:segment)
4412
4926
  @segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations)
4413
4927
  @segment_presence_label_annotations = args[:segment_presence_label_annotations] if args.key?(:segment_presence_label_annotations)
@@ -4423,14 +4937,14 @@ module Google
4423
4937
  class GoogleCloudVideointelligenceV1p2beta1VideoSegment
4424
4938
  include Google::Apis::Core::Hashable
4425
4939
 
4426
- # Time-offset, relative to the beginning of the video,
4427
- # corresponding to the end of the segment (inclusive).
4940
+ # Time-offset, relative to the beginning of the video, corresponding to the end
4941
+ # of the segment (inclusive).
4428
4942
  # Corresponds to the JSON property `endTimeOffset`
4429
4943
  # @return [String]
4430
4944
  attr_accessor :end_time_offset
4431
4945
 
4432
- # Time-offset, relative to the beginning of the video,
4433
- # corresponding to the start of the segment (inclusive).
4946
+ # Time-offset, relative to the beginning of the video, corresponding to the
4947
+ # start of the segment (inclusive).
4434
4948
  # Corresponds to the JSON property `startTimeOffset`
4435
4949
  # @return [String]
4436
4950
  attr_accessor :start_time_offset
@@ -4447,41 +4961,41 @@ module Google
4447
4961
  end
4448
4962
 
4449
4963
  # Word-specific information for recognized words. Word information is only
4450
- # included in the response when certain request parameters are set, such
4451
- # as `enable_word_time_offsets`.
4964
+ # included in the response when certain request parameters are set, such as `
4965
+ # enable_word_time_offsets`.
4452
4966
  class GoogleCloudVideointelligenceV1p2beta1WordInfo
4453
4967
  include Google::Apis::Core::Hashable
4454
4968
 
4455
4969
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
4456
4970
  # indicates an estimated greater likelihood that the recognized words are
4457
- # correct. This field is set only for the top alternative.
4458
- # This field is not guaranteed to be accurate and users should not rely on it
4459
- # to be always provided.
4460
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
4971
+ # correct. This field is set only for the top alternative. This field is not
4972
+ # guaranteed to be accurate and users should not rely on it to be always
4973
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
4974
+ # not set.
4461
4975
  # Corresponds to the JSON property `confidence`
4462
4976
  # @return [Float]
4463
4977
  attr_accessor :confidence
4464
4978
 
4465
- # Time offset relative to the beginning of the audio, and
4466
- # corresponding to the end of the spoken word. This field is only set if
4467
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
4468
- # experimental feature and the accuracy of the time offset can vary.
4979
+ # Time offset relative to the beginning of the audio, and corresponding to the
4980
+ # end of the spoken word. This field is only set if `enable_word_time_offsets=
4981
+ # true` and only in the top hypothesis. This is an experimental feature and the
4982
+ # accuracy of the time offset can vary.
4469
4983
  # Corresponds to the JSON property `endTime`
4470
4984
  # @return [String]
4471
4985
  attr_accessor :end_time
4472
4986
 
4473
- # Output only. A distinct integer value is assigned for every speaker within
4474
- # the audio. This field specifies which one of those speakers was detected to
4475
- # have spoken this word. Value ranges from 1 up to diarization_speaker_count,
4476
- # and is only set if speaker diarization is enabled.
4987
+ # Output only. A distinct integer value is assigned for every speaker within the
4988
+ # audio. This field specifies which one of those speakers was detected to have
4989
+ # spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
4990
+ # only set if speaker diarization is enabled.
4477
4991
  # Corresponds to the JSON property `speakerTag`
4478
4992
  # @return [Fixnum]
4479
4993
  attr_accessor :speaker_tag
4480
4994
 
4481
- # Time offset relative to the beginning of the audio, and
4482
- # corresponding to the start of the spoken word. This field is only set if
4483
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
4484
- # experimental feature and the accuracy of the time offset can vary.
4995
+ # Time offset relative to the beginning of the audio, and corresponding to the
4996
+ # start of the spoken word. This field is only set if `enable_word_time_offsets=
4997
+ # true` and only in the top hypothesis. This is an experimental feature and the
4998
+ # accuracy of the time offset can vary.
4485
4999
  # Corresponds to the JSON property `startTime`
4486
5000
  # @return [String]
4487
5001
  attr_accessor :start_time
@@ -4505,9 +5019,9 @@ module Google
4505
5019
  end
4506
5020
  end
4507
5021
 
4508
- # Video annotation progress. Included in the `metadata`
4509
- # field of the `Operation` returned by the `GetOperation`
4510
- # call of the `google::longrunning::Operations` service.
5022
+ # Video annotation progress. Included in the `metadata` field of the `Operation`
5023
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
5024
+ # service.
4511
5025
  class GoogleCloudVideointelligenceV1p3beta1AnnotateVideoProgress
4512
5026
  include Google::Apis::Core::Hashable
4513
5027
 
@@ -4526,9 +5040,9 @@ module Google
4526
5040
  end
4527
5041
  end
4528
5042
 
4529
- # Video annotation response. Included in the `response`
4530
- # field of the `Operation` returned by the `GetOperation`
4531
- # call of the `google::longrunning::Operations` service.
5043
+ # Video annotation response. Included in the `response` field of the `Operation`
5044
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
5045
+ # service.
4532
5046
  class GoogleCloudVideointelligenceV1p3beta1AnnotateVideoResponse
4533
5047
  include Google::Apis::Core::Hashable
4534
5048
 
@@ -4562,10 +5076,9 @@ module Google
4562
5076
  # @return [String]
4563
5077
  attr_accessor :display_name
4564
5078
 
4565
- # The resource name of the celebrity. Have the format
4566
- # `video-intelligence/kg-mid` indicates a celebrity from preloaded gallery.
4567
- # kg-mid is the id in Google knowledge graph, which is unique for the
4568
- # celebrity.
5079
+ # The resource name of the celebrity. Have the format `video-intelligence/kg-mid`
5080
+ # indicates a celebrity from preloaded gallery. kg-mid is the id in Google
5081
+ # knowledge graph, which is unique for the celebrity.
4569
5082
  # Corresponds to the JSON property `name`
4570
5083
  # @return [String]
4571
5084
  attr_accessor :name
@@ -4586,8 +5099,8 @@ module Google
4586
5099
  class GoogleCloudVideointelligenceV1p3beta1CelebrityRecognitionAnnotation
4587
5100
  include Google::Apis::Core::Hashable
4588
5101
 
4589
- # The tracks detected from the input video, including recognized celebrities
4590
- # and other detected faces in the video.
5102
+ # The tracks detected from the input video, including recognized celebrities and
5103
+ # other detected faces in the video.
4591
5104
  # Corresponds to the JSON property `celebrityTracks`
4592
5105
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1CelebrityTrack>]
4593
5106
  attr_accessor :celebrity_tracks
@@ -4643,14 +5156,14 @@ module Google
4643
5156
  # @return [Float]
4644
5157
  attr_accessor :confidence
4645
5158
 
4646
- # The name of the attribute, for example, glasses, dark_glasses, mouth_open.
4647
- # A full list of supported type names will be provided in the document.
5159
+ # The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
5160
+ # full list of supported type names will be provided in the document.
4648
5161
  # Corresponds to the JSON property `name`
4649
5162
  # @return [String]
4650
5163
  attr_accessor :name
4651
5164
 
4652
- # Text value of the detection result. For example, the value for "HairColor"
4653
- # can be "black", "blonde", etc.
5165
+ # Text value of the detection result. For example, the value for "HairColor" can
5166
+ # be "black", "blonde", etc.
4654
5167
  # Corresponds to the JSON property `value`
4655
5168
  # @return [String]
4656
5169
  attr_accessor :value
@@ -4682,9 +5195,8 @@ module Google
4682
5195
  # @return [String]
4683
5196
  attr_accessor :name
4684
5197
 
4685
- # A vertex represents a 2D point in the image.
4686
- # NOTE: the normalized vertex coordinates are relative to the original image
4687
- # and range from 0 to 1.
5198
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
5199
+ # coordinates are relative to the original image and range from 0 to 1.
4688
5200
  # Corresponds to the JSON property `point`
4689
5201
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1NormalizedVertex]
4690
5202
  attr_accessor :point
@@ -4710,8 +5222,7 @@ module Google
4710
5222
  # @return [String]
4711
5223
  attr_accessor :description
4712
5224
 
4713
- # Opaque entity ID. Some IDs may be available in
4714
- # [Google Knowledge Graph Search
5225
+ # Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
4715
5226
  # API](https://developers.google.com/knowledge-graph/).
4716
5227
  # Corresponds to the JSON property `entityId`
4717
5228
  # @return [String]
@@ -4734,9 +5245,9 @@ module Google
4734
5245
  end
4735
5246
  end
4736
5247
 
4737
- # Explicit content annotation (based on per-frame visual signals only).
4738
- # If no explicit content has been detected in a frame, no annotations are
4739
- # present for that frame.
5248
+ # Explicit content annotation (based on per-frame visual signals only). If no
5249
+ # explicit content has been detected in a frame, no annotations are present for
5250
+ # that frame.
4740
5251
  class GoogleCloudVideointelligenceV1p3beta1ExplicitContentAnnotation
4741
5252
  include Google::Apis::Core::Hashable
4742
5253
 
@@ -4787,20 +5298,41 @@ module Google
4787
5298
  end
4788
5299
  end
4789
5300
 
4790
- # Face detection annotation.
4791
- class GoogleCloudVideointelligenceV1p3beta1FaceDetectionAnnotation
5301
+ # Deprecated. No effect.
5302
+ class GoogleCloudVideointelligenceV1p3beta1FaceAnnotation
4792
5303
  include Google::Apis::Core::Hashable
4793
5304
 
4794
- # The thumbnail of a person's face.
5305
+ # All video frames where a face was detected.
5306
+ # Corresponds to the JSON property `frames`
5307
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1FaceFrame>]
5308
+ attr_accessor :frames
5309
+
5310
+ # All video segments where a face was detected.
5311
+ # Corresponds to the JSON property `segments`
5312
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1FaceSegment>]
5313
+ attr_accessor :segments
5314
+
5315
+ # Thumbnail of a representative face view (in JPEG format).
4795
5316
  # Corresponds to the JSON property `thumbnail`
4796
5317
  # NOTE: Values are automatically base64 encoded/decoded in the client library.
4797
5318
  # @return [String]
4798
5319
  attr_accessor :thumbnail
4799
5320
 
4800
- # The face tracks with attributes.
4801
- # Corresponds to the JSON property `tracks`
4802
- # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1Track>]
4803
- attr_accessor :tracks
5321
+ def initialize(**args)
5322
+ update!(**args)
5323
+ end
5324
+
5325
+ # Update properties of this object
5326
+ def update!(**args)
5327
+ @frames = args[:frames] if args.key?(:frames)
5328
+ @segments = args[:segments] if args.key?(:segments)
5329
+ @thumbnail = args[:thumbnail] if args.key?(:thumbnail)
5330
+ end
5331
+ end
5332
+
5333
+ # Face detection annotation.
5334
+ class GoogleCloudVideointelligenceV1p3beta1FaceDetectionAnnotation
5335
+ include Google::Apis::Core::Hashable
4804
5336
 
4805
5337
  # Feature version.
4806
5338
  # Corresponds to the JSON property `version`
@@ -4813,20 +5345,63 @@ module Google
4813
5345
 
4814
5346
  # Update properties of this object
4815
5347
  def update!(**args)
4816
- @thumbnail = args[:thumbnail] if args.key?(:thumbnail)
4817
- @tracks = args[:tracks] if args.key?(:tracks)
4818
5348
  @version = args[:version] if args.key?(:version)
4819
5349
  end
4820
5350
  end
4821
5351
 
5352
+ # Deprecated. No effect.
5353
+ class GoogleCloudVideointelligenceV1p3beta1FaceFrame
5354
+ include Google::Apis::Core::Hashable
5355
+
5356
+ # Normalized Bounding boxes in a frame. There can be more than one boxes if the
5357
+ # same face is detected in multiple locations within the current frame.
5358
+ # Corresponds to the JSON property `normalizedBoundingBoxes`
5359
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox>]
5360
+ attr_accessor :normalized_bounding_boxes
5361
+
5362
+ # Time-offset, relative to the beginning of the video, corresponding to the
5363
+ # video frame for this location.
5364
+ # Corresponds to the JSON property `timeOffset`
5365
+ # @return [String]
5366
+ attr_accessor :time_offset
5367
+
5368
+ def initialize(**args)
5369
+ update!(**args)
5370
+ end
5371
+
5372
+ # Update properties of this object
5373
+ def update!(**args)
5374
+ @normalized_bounding_boxes = args[:normalized_bounding_boxes] if args.key?(:normalized_bounding_boxes)
5375
+ @time_offset = args[:time_offset] if args.key?(:time_offset)
5376
+ end
5377
+ end
5378
+
5379
+ # Video segment level annotation results for face detection.
5380
+ class GoogleCloudVideointelligenceV1p3beta1FaceSegment
5381
+ include Google::Apis::Core::Hashable
5382
+
5383
+ # Video segment.
5384
+ # Corresponds to the JSON property `segment`
5385
+ # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1VideoSegment]
5386
+ attr_accessor :segment
5387
+
5388
+ def initialize(**args)
5389
+ update!(**args)
5390
+ end
5391
+
5392
+ # Update properties of this object
5393
+ def update!(**args)
5394
+ @segment = args[:segment] if args.key?(:segment)
5395
+ end
5396
+ end
5397
+
4822
5398
  # Label annotation.
4823
5399
  class GoogleCloudVideointelligenceV1p3beta1LabelAnnotation
4824
5400
  include Google::Apis::Core::Hashable
4825
5401
 
4826
- # Common categories for the detected entity.
4827
- # For example, when the label is `Terrier`, the category is likely `dog`. And
4828
- # in some cases there might be more than one categories e.g., `Terrier` could
4829
- # also be a `pet`.
5402
+ # Common categories for the detected entity. For example, when the label is `
5403
+ # Terrier`, the category is likely `dog`. And in some cases there might be more
5404
+ # than one categories e.g., `Terrier` could also be a `pet`.
4830
5405
  # Corresponds to the JSON property `categoryEntities`
4831
5406
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1Entity>]
4832
5407
  attr_accessor :category_entities
@@ -4925,14 +5500,14 @@ module Google
4925
5500
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1Entity]
4926
5501
  attr_accessor :entity
4927
5502
 
4928
- # All video segments where the recognized logo appears. There might be
4929
- # multiple instances of the same logo class appearing in one VideoSegment.
5503
+ # All video segments where the recognized logo appears. There might be multiple
5504
+ # instances of the same logo class appearing in one VideoSegment.
4930
5505
  # Corresponds to the JSON property `segments`
4931
5506
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1VideoSegment>]
4932
5507
  attr_accessor :segments
4933
5508
 
4934
- # All logo tracks where the recognized logo appears. Each track corresponds
4935
- # to one logo instance appearing in consecutive frames.
5509
+ # All logo tracks where the recognized logo appears. Each track corresponds to
5510
+ # one logo instance appearing in consecutive frames.
4936
5511
  # Corresponds to the JSON property `tracks`
4937
5512
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1Track>]
4938
5513
  attr_accessor :tracks
@@ -4949,9 +5524,8 @@ module Google
4949
5524
  end
4950
5525
  end
4951
5526
 
4952
- # Normalized bounding box.
4953
- # The normalized vertex coordinates are relative to the original image.
4954
- # Range: [0, 1].
5527
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
5528
+ # original image. Range: [0, 1].
4955
5529
  class GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox
4956
5530
  include Google::Apis::Core::Hashable
4957
5531
 
@@ -4989,20 +5563,12 @@ module Google
4989
5563
  end
4990
5564
 
4991
5565
  # Normalized bounding polygon for text (that might not be aligned with axis).
4992
- # Contains list of the corner points in clockwise order starting from
4993
- # top-left corner. For example, for a rectangular bounding box:
4994
- # When the text is horizontal it might look like:
4995
- # 0----1
4996
- # | |
4997
- # 3----2
4998
- # When it's clockwise rotated 180 degrees around the top-left corner it
4999
- # becomes:
5000
- # 2----3
5001
- # | |
5002
- # 1----0
5003
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
5004
- # than 0, or greater than 1 due to trignometric calculations for location of
5005
- # the box.
5566
+ # Contains list of the corner points in clockwise order starting from top-left
5567
+ # corner. For example, for a rectangular bounding box: When the text is
5568
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
5569
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
5570
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
5571
+ # or greater than 1 due to trignometric calculations for location of the box.
5006
5572
  class GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingPoly
5007
5573
  include Google::Apis::Core::Hashable
5008
5574
 
@@ -5021,9 +5587,8 @@ module Google
5021
5587
  end
5022
5588
  end
5023
5589
 
5024
- # A vertex represents a 2D point in the image.
5025
- # NOTE: the normalized vertex coordinates are relative to the original image
5026
- # and range from 0 to 1.
5590
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
5591
+ # coordinates are relative to the original image and range from 0 to 1.
5027
5592
  class GoogleCloudVideointelligenceV1p3beta1NormalizedVertex
5028
5593
  include Google::Apis::Core::Hashable
5029
5594
 
@@ -5062,10 +5627,10 @@ module Google
5062
5627
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1Entity]
5063
5628
  attr_accessor :entity
5064
5629
 
5065
- # Information corresponding to all frames where this object track appears.
5066
- # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
5067
- # messages in frames.
5068
- # Streaming mode: it can only be one ObjectTrackingFrame message in frames.
5630
+ # Information corresponding to all frames where this object track appears. Non-
5631
+ # streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
5632
+ # in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
5633
+ # frames.
5069
5634
  # Corresponds to the JSON property `frames`
5070
5635
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1ObjectTrackingFrame>]
5071
5636
  attr_accessor :frames
@@ -5075,12 +5640,11 @@ module Google
5075
5640
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1VideoSegment]
5076
5641
  attr_accessor :segment
5077
5642
 
5078
- # Streaming mode ONLY.
5079
- # In streaming mode, we do not know the end time of a tracked object
5080
- # before it is completed. Hence, there is no VideoSegment info returned.
5081
- # Instead, we provide a unique identifiable integer track_id so that
5082
- # the customers can correlate the results of the ongoing
5083
- # ObjectTrackAnnotation of the same track_id over time.
5643
+ # Streaming mode ONLY. In streaming mode, we do not know the end time of a
5644
+ # tracked object before it is completed. Hence, there is no VideoSegment info
5645
+ # returned. Instead, we provide a unique identifiable integer track_id so that
5646
+ # the customers can correlate the results of the ongoing ObjectTrackAnnotation
5647
+ # of the same track_id over time.
5084
5648
  # Corresponds to the JSON property `trackId`
5085
5649
  # @return [Fixnum]
5086
5650
  attr_accessor :track_id
@@ -5110,9 +5674,8 @@ module Google
5110
5674
  class GoogleCloudVideointelligenceV1p3beta1ObjectTrackingFrame
5111
5675
  include Google::Apis::Core::Hashable
5112
5676
 
5113
- # Normalized bounding box.
5114
- # The normalized vertex coordinates are relative to the original image.
5115
- # Range: [0, 1].
5677
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
5678
+ # original image. Range: [0, 1].
5116
5679
  # Corresponds to the JSON property `normalizedBoundingBox`
5117
5680
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox]
5118
5681
  attr_accessor :normalized_bounding_box
@@ -5189,10 +5752,10 @@ module Google
5189
5752
 
5190
5753
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
5191
5754
  # indicates an estimated greater likelihood that the recognized words are
5192
- # correct. This field is set only for the top alternative.
5193
- # This field is not guaranteed to be accurate and users should not rely on it
5194
- # to be always provided.
5195
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
5755
+ # correct. This field is set only for the top alternative. This field is not
5756
+ # guaranteed to be accurate and users should not rely on it to be always
5757
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
5758
+ # not set.
5196
5759
  # Corresponds to the JSON property `confidence`
5197
5760
  # @return [Float]
5198
5761
  attr_accessor :confidence
@@ -5203,8 +5766,8 @@ module Google
5203
5766
  attr_accessor :transcript
5204
5767
 
5205
5768
  # Output only. A list of word-specific information for each recognized word.
5206
- # Note: When `enable_speaker_diarization` is set to true, you will see all
5207
- # the words from the beginning of the audio.
5769
+ # Note: When `enable_speaker_diarization` is set to true, you will see all the
5770
+ # words from the beginning of the audio.
5208
5771
  # Corresponds to the JSON property `words`
5209
5772
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1WordInfo>]
5210
5773
  attr_accessor :words
@@ -5225,18 +5788,17 @@ module Google
5225
5788
  class GoogleCloudVideointelligenceV1p3beta1SpeechTranscription
5226
5789
  include Google::Apis::Core::Hashable
5227
5790
 
5228
- # May contain one or more recognition hypotheses (up to the maximum specified
5229
- # in `max_alternatives`). These alternatives are ordered in terms of
5230
- # accuracy, with the top (first) alternative being the most probable, as
5231
- # ranked by the recognizer.
5791
+ # May contain one or more recognition hypotheses (up to the maximum specified in
5792
+ # `max_alternatives`). These alternatives are ordered in terms of accuracy, with
5793
+ # the top (first) alternative being the most probable, as ranked by the
5794
+ # recognizer.
5232
5795
  # Corresponds to the JSON property `alternatives`
5233
5796
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1SpeechRecognitionAlternative>]
5234
5797
  attr_accessor :alternatives
5235
5798
 
5236
5799
  # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
5237
- # language tag of
5238
- # the language in this result. This language code was detected to have the
5239
- # most likelihood of being spoken in the audio.
5800
+ # language tag of the language in this result. This language code was detected
5801
+ # to have the most likelihood of being spoken in the audio.
5240
5802
  # Corresponds to the JSON property `languageCode`
5241
5803
  # @return [String]
5242
5804
  attr_accessor :language_code
@@ -5252,32 +5814,32 @@ module Google
5252
5814
  end
5253
5815
  end
5254
5816
 
5255
- # `StreamingAnnotateVideoResponse` is the only message returned to the client
5256
- # by `StreamingAnnotateVideo`. A series of zero or more
5257
- # `StreamingAnnotateVideoResponse` messages are streamed back to the client.
5817
+ # `StreamingAnnotateVideoResponse` is the only message returned to the client by
5818
+ # `StreamingAnnotateVideo`. A series of zero or more `
5819
+ # StreamingAnnotateVideoResponse` messages are streamed back to the client.
5258
5820
  class GoogleCloudVideointelligenceV1p3beta1StreamingAnnotateVideoResponse
5259
5821
  include Google::Apis::Core::Hashable
5260
5822
 
5261
- # Streaming annotation results corresponding to a portion of the video
5262
- # that is currently being processed.
5823
+ # Streaming annotation results corresponding to a portion of the video that is
5824
+ # currently being processed. Only ONE type of annotation will be specified in
5825
+ # the response.
5263
5826
  # Corresponds to the JSON property `annotationResults`
5264
5827
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1StreamingVideoAnnotationResults]
5265
5828
  attr_accessor :annotation_results
5266
5829
 
5267
- # Google Cloud Storage URI that stores annotation results of one
5268
- # streaming session in JSON format.
5269
- # It is the annotation_result_storage_directory
5270
- # from the request followed by '/cloud_project_number-session_id'.
5830
+ # Google Cloud Storage URI that stores annotation results of one streaming
5831
+ # session in JSON format. It is the annotation_result_storage_directory from the
5832
+ # request followed by '/cloud_project_number-session_id'.
5271
5833
  # Corresponds to the JSON property `annotationResultsUri`
5272
5834
  # @return [String]
5273
5835
  attr_accessor :annotation_results_uri
5274
5836
 
5275
- # The `Status` type defines a logical error model that is suitable for
5276
- # different programming environments, including REST APIs and RPC APIs. It is
5277
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
5278
- # three pieces of data: error code, error message, and error details.
5279
- # You can find out more about this error model and how to work with it in the
5280
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
5837
+ # The `Status` type defines a logical error model that is suitable for different
5838
+ # programming environments, including REST APIs and RPC APIs. It is used by [
5839
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
5840
+ # data: error code, error message, and error details. You can find out more
5841
+ # about this error model and how to work with it in the [API Design Guide](https:
5842
+ # //cloud.google.com/apis/design/errors).
5281
5843
  # Corresponds to the JSON property `error`
5282
5844
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleRpcStatus]
5283
5845
  attr_accessor :error
@@ -5294,18 +5856,24 @@ module Google
5294
5856
  end
5295
5857
  end
5296
5858
 
5297
- # Streaming annotation results corresponding to a portion of the video
5298
- # that is currently being processed.
5859
+ # Streaming annotation results corresponding to a portion of the video that is
5860
+ # currently being processed. Only ONE type of annotation will be specified in
5861
+ # the response.
5299
5862
  class GoogleCloudVideointelligenceV1p3beta1StreamingVideoAnnotationResults
5300
5863
  include Google::Apis::Core::Hashable
5301
5864
 
5302
- # Explicit content annotation (based on per-frame visual signals only).
5303
- # If no explicit content has been detected in a frame, no annotations are
5304
- # present for that frame.
5865
+ # Explicit content annotation (based on per-frame visual signals only). If no
5866
+ # explicit content has been detected in a frame, no annotations are present for
5867
+ # that frame.
5305
5868
  # Corresponds to the JSON property `explicitAnnotation`
5306
5869
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1ExplicitContentAnnotation]
5307
5870
  attr_accessor :explicit_annotation
5308
5871
 
5872
+ # Timestamp of the processed frame in microseconds.
5873
+ # Corresponds to the JSON property `frameTimestamp`
5874
+ # @return [String]
5875
+ attr_accessor :frame_timestamp
5876
+
5309
5877
  # Label annotation results.
5310
5878
  # Corresponds to the JSON property `labelAnnotations`
5311
5879
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
@@ -5328,6 +5896,7 @@ module Google
5328
5896
  # Update properties of this object
5329
5897
  def update!(**args)
5330
5898
  @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
5899
+ @frame_timestamp = args[:frame_timestamp] if args.key?(:frame_timestamp)
5331
5900
  @label_annotations = args[:label_annotations] if args.key?(:label_annotations)
5332
5901
  @object_annotations = args[:object_annotations] if args.key?(:object_annotations)
5333
5902
  @shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations)
@@ -5367,27 +5936,19 @@ module Google
5367
5936
  end
5368
5937
  end
5369
5938
 
5370
- # Video frame level annotation results for text annotation (OCR).
5371
- # Contains information regarding timestamp and bounding box locations for the
5372
- # frames containing detected OCR text snippets.
5939
+ # Video frame level annotation results for text annotation (OCR). Contains
5940
+ # information regarding timestamp and bounding box locations for the frames
5941
+ # containing detected OCR text snippets.
5373
5942
  class GoogleCloudVideointelligenceV1p3beta1TextFrame
5374
5943
  include Google::Apis::Core::Hashable
5375
5944
 
5376
5945
  # Normalized bounding polygon for text (that might not be aligned with axis).
5377
- # Contains list of the corner points in clockwise order starting from
5378
- # top-left corner. For example, for a rectangular bounding box:
5379
- # When the text is horizontal it might look like:
5380
- # 0----1
5381
- # | |
5382
- # 3----2
5383
- # When it's clockwise rotated 180 degrees around the top-left corner it
5384
- # becomes:
5385
- # 2----3
5386
- # | |
5387
- # 1----0
5388
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
5389
- # than 0, or greater than 1 due to trignometric calculations for location of
5390
- # the box.
5946
+ # Contains list of the corner points in clockwise order starting from top-left
5947
+ # corner. For example, for a rectangular bounding box: When the text is
5948
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
5949
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
5950
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
5951
+ # or greater than 1 due to trignometric calculations for location of the box.
5391
5952
  # Corresponds to the JSON property `rotatedBoundingBox`
5392
5953
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingPoly]
5393
5954
  attr_accessor :rotated_bounding_box
@@ -5440,9 +6001,8 @@ module Google
5440
6001
  end
5441
6002
  end
5442
6003
 
5443
- # For tracking related features.
5444
- # An object at time_offset with attributes, and located with
5445
- # normalized_bounding_box.
6004
+ # For tracking related features. An object at time_offset with attributes, and
6005
+ # located with normalized_bounding_box.
5446
6006
  class GoogleCloudVideointelligenceV1p3beta1TimestampedObject
5447
6007
  include Google::Apis::Core::Hashable
5448
6008
 
@@ -5456,15 +6016,14 @@ module Google
5456
6016
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1DetectedLandmark>]
5457
6017
  attr_accessor :landmarks
5458
6018
 
5459
- # Normalized bounding box.
5460
- # The normalized vertex coordinates are relative to the original image.
5461
- # Range: [0, 1].
6019
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
6020
+ # original image. Range: [0, 1].
5462
6021
  # Corresponds to the JSON property `normalizedBoundingBox`
5463
6022
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox]
5464
6023
  attr_accessor :normalized_bounding_box
5465
6024
 
5466
- # Time-offset, relative to the beginning of the video,
5467
- # corresponding to the video frame for this object.
6025
+ # Time-offset, relative to the beginning of the video, corresponding to the
6026
+ # video frame for this object.
5468
6027
  # Corresponds to the JSON property `timeOffset`
5469
6028
  # @return [String]
5470
6029
  attr_accessor :time_offset
@@ -5523,20 +6082,19 @@ module Google
5523
6082
  class GoogleCloudVideointelligenceV1p3beta1VideoAnnotationProgress
5524
6083
  include Google::Apis::Core::Hashable
5525
6084
 
5526
- # Specifies which feature is being tracked if the request contains more than
5527
- # one feature.
6085
+ # Specifies which feature is being tracked if the request contains more than one
6086
+ # feature.
5528
6087
  # Corresponds to the JSON property `feature`
5529
6088
  # @return [String]
5530
6089
  attr_accessor :feature
5531
6090
 
5532
- # Video file location in
5533
- # [Cloud Storage](https://cloud.google.com/storage/).
6091
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
5534
6092
  # Corresponds to the JSON property `inputUri`
5535
6093
  # @return [String]
5536
6094
  attr_accessor :input_uri
5537
6095
 
5538
- # Approximate percentage processed thus far. Guaranteed to be
5539
- # 100 when fully processed.
6096
+ # Approximate percentage processed thus far. Guaranteed to be 100 when fully
6097
+ # processed.
5540
6098
  # Corresponds to the JSON property `progressPercent`
5541
6099
  # @return [Fixnum]
5542
6100
  attr_accessor :progress_percent
@@ -5580,36 +6138,40 @@ module Google
5580
6138
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1CelebrityRecognitionAnnotation]
5581
6139
  attr_accessor :celebrity_recognition_annotations
5582
6140
 
5583
- # The `Status` type defines a logical error model that is suitable for
5584
- # different programming environments, including REST APIs and RPC APIs. It is
5585
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
5586
- # three pieces of data: error code, error message, and error details.
5587
- # You can find out more about this error model and how to work with it in the
5588
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
6141
+ # The `Status` type defines a logical error model that is suitable for different
6142
+ # programming environments, including REST APIs and RPC APIs. It is used by [
6143
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
6144
+ # data: error code, error message, and error details. You can find out more
6145
+ # about this error model and how to work with it in the [API Design Guide](https:
6146
+ # //cloud.google.com/apis/design/errors).
5589
6147
  # Corresponds to the JSON property `error`
5590
6148
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleRpcStatus]
5591
6149
  attr_accessor :error
5592
6150
 
5593
- # Explicit content annotation (based on per-frame visual signals only).
5594
- # If no explicit content has been detected in a frame, no annotations are
5595
- # present for that frame.
6151
+ # Explicit content annotation (based on per-frame visual signals only). If no
6152
+ # explicit content has been detected in a frame, no annotations are present for
6153
+ # that frame.
5596
6154
  # Corresponds to the JSON property `explicitAnnotation`
5597
6155
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1ExplicitContentAnnotation]
5598
6156
  attr_accessor :explicit_annotation
5599
6157
 
6158
+ # Deprecated. Please use `face_detection_annotations` instead.
6159
+ # Corresponds to the JSON property `faceAnnotations`
6160
+ # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1FaceAnnotation>]
6161
+ attr_accessor :face_annotations
6162
+
5600
6163
  # Face detection annotations.
5601
6164
  # Corresponds to the JSON property `faceDetectionAnnotations`
5602
6165
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1FaceDetectionAnnotation>]
5603
6166
  attr_accessor :face_detection_annotations
5604
6167
 
5605
- # Label annotations on frame level.
5606
- # There is exactly one element for each unique label.
6168
+ # Label annotations on frame level. There is exactly one element for each unique
6169
+ # label.
5607
6170
  # Corresponds to the JSON property `frameLabelAnnotations`
5608
6171
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
5609
6172
  attr_accessor :frame_label_annotations
5610
6173
 
5611
- # Video file location in
5612
- # [Cloud Storage](https://cloud.google.com/storage/).
6174
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
5613
6175
  # Corresponds to the JSON property `inputUri`
5614
6176
  # @return [String]
5615
6177
  attr_accessor :input_uri
@@ -5641,11 +6203,11 @@ module Google
5641
6203
  attr_accessor :segment_label_annotations
5642
6204
 
5643
6205
  # Presence label annotations on video level or user-specified segment level.
5644
- # There is exactly one element for each unique label. Compared to the
5645
- # existing topical `segment_label_annotations`, this field presents more
5646
- # fine-grained, segment-level labels detected in video content and is made
5647
- # available only when the client sets `LabelDetectionConfig.model` to
5648
- # "builtin/latest" in the request.
6206
+ # There is exactly one element for each unique label. Compared to the existing
6207
+ # topical `segment_label_annotations`, this field presents more fine-grained,
6208
+ # segment-level labels detected in video content and is made available only when
6209
+ # the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
6210
+ # request.
5649
6211
  # Corresponds to the JSON property `segmentPresenceLabelAnnotations`
5650
6212
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
5651
6213
  attr_accessor :segment_presence_label_annotations
@@ -5655,17 +6217,17 @@ module Google
5655
6217
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1VideoSegment>]
5656
6218
  attr_accessor :shot_annotations
5657
6219
 
5658
- # Topical label annotations on shot level.
5659
- # There is exactly one element for each unique label.
6220
+ # Topical label annotations on shot level. There is exactly one element for each
6221
+ # unique label.
5660
6222
  # Corresponds to the JSON property `shotLabelAnnotations`
5661
6223
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
5662
6224
  attr_accessor :shot_label_annotations
5663
6225
 
5664
6226
  # Presence label annotations on shot level. There is exactly one element for
5665
- # each unique label. Compared to the existing topical
5666
- # `shot_label_annotations`, this field presents more fine-grained, shot-level
5667
- # labels detected in video content and is made available only when the client
5668
- # sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
6227
+ # each unique label. Compared to the existing topical `shot_label_annotations`,
6228
+ # this field presents more fine-grained, shot-level labels detected in video
6229
+ # content and is made available only when the client sets `LabelDetectionConfig.
6230
+ # model` to "builtin/latest" in the request.
5669
6231
  # Corresponds to the JSON property `shotPresenceLabelAnnotations`
5670
6232
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
5671
6233
  attr_accessor :shot_presence_label_annotations
@@ -5675,9 +6237,8 @@ module Google
5675
6237
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1SpeechTranscription>]
5676
6238
  attr_accessor :speech_transcriptions
5677
6239
 
5678
- # OCR text detection and tracking.
5679
- # Annotations for list of detected text snippets. Each will have list of
5680
- # frame information associated with it.
6240
+ # OCR text detection and tracking. Annotations for list of detected text
6241
+ # snippets. Each will have list of frame information associated with it.
5681
6242
  # Corresponds to the JSON property `textAnnotations`
5682
6243
  # @return [Array<Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p3beta1TextAnnotation>]
5683
6244
  attr_accessor :text_annotations
@@ -5691,6 +6252,7 @@ module Google
5691
6252
  @celebrity_recognition_annotations = args[:celebrity_recognition_annotations] if args.key?(:celebrity_recognition_annotations)
5692
6253
  @error = args[:error] if args.key?(:error)
5693
6254
  @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
6255
+ @face_annotations = args[:face_annotations] if args.key?(:face_annotations)
5694
6256
  @face_detection_annotations = args[:face_detection_annotations] if args.key?(:face_detection_annotations)
5695
6257
  @frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
5696
6258
  @input_uri = args[:input_uri] if args.key?(:input_uri)
@@ -5712,14 +6274,14 @@ module Google
5712
6274
  class GoogleCloudVideointelligenceV1p3beta1VideoSegment
5713
6275
  include Google::Apis::Core::Hashable
5714
6276
 
5715
- # Time-offset, relative to the beginning of the video,
5716
- # corresponding to the end of the segment (inclusive).
6277
+ # Time-offset, relative to the beginning of the video, corresponding to the end
6278
+ # of the segment (inclusive).
5717
6279
  # Corresponds to the JSON property `endTimeOffset`
5718
6280
  # @return [String]
5719
6281
  attr_accessor :end_time_offset
5720
6282
 
5721
- # Time-offset, relative to the beginning of the video,
5722
- # corresponding to the start of the segment (inclusive).
6283
+ # Time-offset, relative to the beginning of the video, corresponding to the
6284
+ # start of the segment (inclusive).
5723
6285
  # Corresponds to the JSON property `startTimeOffset`
5724
6286
  # @return [String]
5725
6287
  attr_accessor :start_time_offset
@@ -5736,41 +6298,41 @@ module Google
5736
6298
  end
5737
6299
 
5738
6300
  # Word-specific information for recognized words. Word information is only
5739
- # included in the response when certain request parameters are set, such
5740
- # as `enable_word_time_offsets`.
6301
+ # included in the response when certain request parameters are set, such as `
6302
+ # enable_word_time_offsets`.
5741
6303
  class GoogleCloudVideointelligenceV1p3beta1WordInfo
5742
6304
  include Google::Apis::Core::Hashable
5743
6305
 
5744
6306
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
5745
6307
  # indicates an estimated greater likelihood that the recognized words are
5746
- # correct. This field is set only for the top alternative.
5747
- # This field is not guaranteed to be accurate and users should not rely on it
5748
- # to be always provided.
5749
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
6308
+ # correct. This field is set only for the top alternative. This field is not
6309
+ # guaranteed to be accurate and users should not rely on it to be always
6310
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
6311
+ # not set.
5750
6312
  # Corresponds to the JSON property `confidence`
5751
6313
  # @return [Float]
5752
6314
  attr_accessor :confidence
5753
6315
 
5754
- # Time offset relative to the beginning of the audio, and
5755
- # corresponding to the end of the spoken word. This field is only set if
5756
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
5757
- # experimental feature and the accuracy of the time offset can vary.
6316
+ # Time offset relative to the beginning of the audio, and corresponding to the
6317
+ # end of the spoken word. This field is only set if `enable_word_time_offsets=
6318
+ # true` and only in the top hypothesis. This is an experimental feature and the
6319
+ # accuracy of the time offset can vary.
5758
6320
  # Corresponds to the JSON property `endTime`
5759
6321
  # @return [String]
5760
6322
  attr_accessor :end_time
5761
6323
 
5762
- # Output only. A distinct integer value is assigned for every speaker within
5763
- # the audio. This field specifies which one of those speakers was detected to
5764
- # have spoken this word. Value ranges from 1 up to diarization_speaker_count,
5765
- # and is only set if speaker diarization is enabled.
6324
+ # Output only. A distinct integer value is assigned for every speaker within the
6325
+ # audio. This field specifies which one of those speakers was detected to have
6326
+ # spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
6327
+ # only set if speaker diarization is enabled.
5766
6328
  # Corresponds to the JSON property `speakerTag`
5767
6329
  # @return [Fixnum]
5768
6330
  attr_accessor :speaker_tag
5769
6331
 
5770
- # Time offset relative to the beginning of the audio, and
5771
- # corresponding to the start of the spoken word. This field is only set if
5772
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
5773
- # experimental feature and the accuracy of the time offset can vary.
6332
+ # Time offset relative to the beginning of the audio, and corresponding to the
6333
+ # start of the spoken word. This field is only set if `enable_word_time_offsets=
6334
+ # true` and only in the top hypothesis. This is an experimental feature and the
6335
+ # accuracy of the time offset can vary.
5774
6336
  # Corresponds to the JSON property `startTime`
5775
6337
  # @return [String]
5776
6338
  attr_accessor :start_time
@@ -5799,47 +6361,45 @@ module Google
5799
6361
  class GoogleLongrunningOperation
5800
6362
  include Google::Apis::Core::Hashable
5801
6363
 
5802
- # If the value is `false`, it means the operation is still in progress.
5803
- # If `true`, the operation is completed, and either `error` or `response` is
5804
- # available.
6364
+ # If the value is `false`, it means the operation is still in progress. If `true`
6365
+ # , the operation is completed, and either `error` or `response` is available.
5805
6366
  # Corresponds to the JSON property `done`
5806
6367
  # @return [Boolean]
5807
6368
  attr_accessor :done
5808
6369
  alias_method :done?, :done
5809
6370
 
5810
- # The `Status` type defines a logical error model that is suitable for
5811
- # different programming environments, including REST APIs and RPC APIs. It is
5812
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
5813
- # three pieces of data: error code, error message, and error details.
5814
- # You can find out more about this error model and how to work with it in the
5815
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
6371
+ # The `Status` type defines a logical error model that is suitable for different
6372
+ # programming environments, including REST APIs and RPC APIs. It is used by [
6373
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
6374
+ # data: error code, error message, and error details. You can find out more
6375
+ # about this error model and how to work with it in the [API Design Guide](https:
6376
+ # //cloud.google.com/apis/design/errors).
5816
6377
  # Corresponds to the JSON property `error`
5817
6378
  # @return [Google::Apis::VideointelligenceV1beta2::GoogleRpcStatus]
5818
6379
  attr_accessor :error
5819
6380
 
5820
- # Service-specific metadata associated with the operation. It typically
5821
- # contains progress information and common metadata such as create time.
5822
- # Some services might not provide such metadata. Any method that returns a
5823
- # long-running operation should document the metadata type, if any.
6381
+ # Service-specific metadata associated with the operation. It typically contains
6382
+ # progress information and common metadata such as create time. Some services
6383
+ # might not provide such metadata. Any method that returns a long-running
6384
+ # operation should document the metadata type, if any.
5824
6385
  # Corresponds to the JSON property `metadata`
5825
6386
  # @return [Hash<String,Object>]
5826
6387
  attr_accessor :metadata
5827
6388
 
5828
6389
  # The server-assigned name, which is only unique within the same service that
5829
- # originally returns it. If you use the default HTTP mapping, the
5830
- # `name` should be a resource name ending with `operations/`unique_id``.
6390
+ # originally returns it. If you use the default HTTP mapping, the `name` should
6391
+ # be a resource name ending with `operations/`unique_id``.
5831
6392
  # Corresponds to the JSON property `name`
5832
6393
  # @return [String]
5833
6394
  attr_accessor :name
5834
6395
 
5835
- # The normal response of the operation in case of success. If the original
5836
- # method returns no data on success, such as `Delete`, the response is
5837
- # `google.protobuf.Empty`. If the original method is standard
5838
- # `Get`/`Create`/`Update`, the response should be the resource. For other
5839
- # methods, the response should have the type `XxxResponse`, where `Xxx`
5840
- # is the original method name. For example, if the original method name
5841
- # is `TakeSnapshot()`, the inferred response type is
5842
- # `TakeSnapshotResponse`.
6396
+ # The normal response of the operation in case of success. If the original
6397
+ # method returns no data on success, such as `Delete`, the response is `google.
6398
+ # protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`,
6399
+ # the response should be the resource. For other methods, the response should
6400
+ # have the type `XxxResponse`, where `Xxx` is the original method name. For
6401
+ # example, if the original method name is `TakeSnapshot()`, the inferred
6402
+ # response type is `TakeSnapshotResponse`.
5843
6403
  # Corresponds to the JSON property `response`
5844
6404
  # @return [Hash<String,Object>]
5845
6405
  attr_accessor :response
@@ -5858,12 +6418,12 @@ module Google
5858
6418
  end
5859
6419
  end
5860
6420
 
5861
- # The `Status` type defines a logical error model that is suitable for
5862
- # different programming environments, including REST APIs and RPC APIs. It is
5863
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
5864
- # three pieces of data: error code, error message, and error details.
5865
- # You can find out more about this error model and how to work with it in the
5866
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
6421
+ # The `Status` type defines a logical error model that is suitable for different
6422
+ # programming environments, including REST APIs and RPC APIs. It is used by [
6423
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
6424
+ # data: error code, error message, and error details. You can find out more
6425
+ # about this error model and how to work with it in the [API Design Guide](https:
6426
+ # //cloud.google.com/apis/design/errors).
5867
6427
  class GoogleRpcStatus
5868
6428
  include Google::Apis::Core::Hashable
5869
6429
 
@@ -5872,15 +6432,15 @@ module Google
5872
6432
  # @return [Fixnum]
5873
6433
  attr_accessor :code
5874
6434
 
5875
- # A list of messages that carry the error details. There is a common set of
6435
+ # A list of messages that carry the error details. There is a common set of
5876
6436
  # message types for APIs to use.
5877
6437
  # Corresponds to the JSON property `details`
5878
6438
  # @return [Array<Hash<String,Object>>]
5879
6439
  attr_accessor :details
5880
6440
 
5881
- # A developer-facing error message, which should be in English. Any
5882
- # user-facing error message should be localized and sent in the
5883
- # google.rpc.Status.details field, or localized by the client.
6441
+ # A developer-facing error message, which should be in English. Any user-facing
6442
+ # error message should be localized and sent in the google.rpc.Status.details
6443
+ # field, or localized by the client.
5884
6444
  # Corresponds to the JSON property `message`
5885
6445
  # @return [String]
5886
6446
  attr_accessor :message