google-api-client 0.43.0 → 0.48.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (964) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/autoapprove.yml +49 -0
  3. data/.github/workflows/release-please.yml +77 -0
  4. data/.gitignore +2 -0
  5. data/.kokoro/trampoline.sh +0 -0
  6. data/CHANGELOG.md +1066 -184
  7. data/Gemfile +1 -0
  8. data/Rakefile +31 -3
  9. data/api_list_config.yaml +8 -0
  10. data/api_names.yaml +1 -0
  11. data/bin/generate-api +77 -15
  12. data/docs/oauth-server.md +4 -6
  13. data/generated/google/apis/acceleratedmobilepageurl_v1/classes.rb +7 -9
  14. data/generated/google/apis/acceleratedmobilepageurl_v1/service.rb +2 -2
  15. data/generated/google/apis/acceleratedmobilepageurl_v1.rb +1 -1
  16. data/generated/google/apis/accessapproval_v1/classes.rb +60 -86
  17. data/generated/google/apis/accessapproval_v1/service.rb +93 -132
  18. data/generated/google/apis/accessapproval_v1.rb +1 -1
  19. data/generated/google/apis/accesscontextmanager_v1/classes.rb +266 -236
  20. data/generated/google/apis/accesscontextmanager_v1/representations.rb +30 -0
  21. data/generated/google/apis/accesscontextmanager_v1/service.rb +308 -171
  22. data/generated/google/apis/accesscontextmanager_v1.rb +1 -1
  23. data/generated/google/apis/accesscontextmanager_v1beta/classes.rb +153 -184
  24. data/generated/google/apis/accesscontextmanager_v1beta/service.rb +82 -111
  25. data/generated/google/apis/accesscontextmanager_v1beta.rb +1 -1
  26. data/generated/google/apis/adexchangebuyer2_v2beta1/classes.rb +47 -36
  27. data/generated/google/apis/adexchangebuyer2_v2beta1/representations.rb +1 -0
  28. data/generated/google/apis/adexchangebuyer2_v2beta1.rb +1 -1
  29. data/generated/google/apis/adexchangebuyer_v1_4/classes.rb +72 -2
  30. data/generated/google/apis/adexchangebuyer_v1_4/representations.rb +33 -0
  31. data/generated/google/apis/adexchangebuyer_v1_4.rb +1 -1
  32. data/generated/google/apis/adexperiencereport_v1.rb +1 -1
  33. data/generated/google/apis/admin_datatransfer_v1/classes.rb +16 -4
  34. data/generated/google/apis/admin_datatransfer_v1/service.rb +30 -48
  35. data/generated/google/apis/admin_datatransfer_v1.rb +6 -4
  36. data/generated/google/apis/admin_directory_v1/classes.rb +344 -242
  37. data/generated/google/apis/admin_directory_v1/representations.rb +62 -39
  38. data/generated/google/apis/admin_directory_v1/service.rb +607 -998
  39. data/generated/google/apis/admin_directory_v1.rb +6 -8
  40. data/generated/google/apis/admin_reports_v1/classes.rb +31 -33
  41. data/generated/google/apis/admin_reports_v1/service.rb +131 -187
  42. data/generated/google/apis/admin_reports_v1.rb +6 -5
  43. data/generated/google/apis/admob_v1/classes.rb +31 -31
  44. data/generated/google/apis/admob_v1/service.rb +2 -1
  45. data/generated/google/apis/admob_v1.rb +6 -2
  46. data/generated/google/apis/adsense_v1_4/service.rb +4 -1
  47. data/generated/google/apis/adsense_v1_4.rb +1 -1
  48. data/generated/google/apis/alertcenter_v1beta1.rb +1 -1
  49. data/generated/google/apis/analyticsadmin_v1alpha/classes.rb +1361 -0
  50. data/generated/google/apis/analyticsadmin_v1alpha/representations.rb +610 -0
  51. data/generated/google/apis/analyticsadmin_v1alpha/service.rb +2134 -0
  52. data/generated/google/apis/analyticsadmin_v1alpha.rb +43 -0
  53. data/generated/google/apis/analyticsdata_v1alpha/classes.rb +1655 -0
  54. data/generated/google/apis/analyticsdata_v1alpha/representations.rb +806 -0
  55. data/generated/google/apis/analyticsdata_v1alpha/service.rb +261 -0
  56. data/generated/google/apis/analyticsdata_v1alpha.rb +37 -0
  57. data/generated/google/apis/analyticsreporting_v4.rb +1 -1
  58. data/generated/google/apis/androidenterprise_v1/service.rb +2 -2
  59. data/generated/google/apis/androidenterprise_v1.rb +1 -1
  60. data/generated/google/apis/androidmanagement_v1/classes.rb +115 -75
  61. data/generated/google/apis/androidmanagement_v1/representations.rb +17 -0
  62. data/generated/google/apis/androidmanagement_v1.rb +1 -1
  63. data/generated/google/apis/androidpublisher_v3/classes.rb +9 -1
  64. data/generated/google/apis/androidpublisher_v3/representations.rb +1 -0
  65. data/generated/google/apis/androidpublisher_v3/service.rb +2 -2
  66. data/generated/google/apis/androidpublisher_v3.rb +1 -1
  67. data/generated/google/apis/apigateway_v1beta/classes.rb +1162 -0
  68. data/generated/google/apis/{memcache_v1 → apigateway_v1beta}/representations.rb +156 -157
  69. data/generated/google/apis/apigateway_v1beta/service.rb +1172 -0
  70. data/generated/google/apis/apigateway_v1beta.rb +34 -0
  71. data/generated/google/apis/apigee_v1/classes.rb +630 -88
  72. data/generated/google/apis/apigee_v1/representations.rb +209 -1
  73. data/generated/google/apis/apigee_v1/service.rb +401 -74
  74. data/generated/google/apis/apigee_v1.rb +6 -7
  75. data/generated/google/apis/appengine_v1/classes.rb +96 -59
  76. data/generated/google/apis/appengine_v1/representations.rb +17 -0
  77. data/generated/google/apis/appengine_v1/service.rb +38 -47
  78. data/generated/google/apis/appengine_v1.rb +1 -1
  79. data/generated/google/apis/appengine_v1alpha/classes.rb +9 -11
  80. data/generated/google/apis/appengine_v1alpha.rb +1 -1
  81. data/generated/google/apis/appengine_v1beta/classes.rb +103 -59
  82. data/generated/google/apis/appengine_v1beta/representations.rb +18 -0
  83. data/generated/google/apis/appengine_v1beta/service.rb +37 -47
  84. data/generated/google/apis/appengine_v1beta.rb +1 -1
  85. data/generated/google/apis/appsmarket_v2/classes.rb +14 -16
  86. data/generated/google/apis/appsmarket_v2.rb +1 -1
  87. data/generated/google/apis/area120tables_v1alpha1/classes.rb +423 -0
  88. data/generated/google/apis/area120tables_v1alpha1/representations.rb +248 -0
  89. data/generated/google/apis/area120tables_v1alpha1/service.rb +381 -0
  90. data/generated/google/apis/area120tables_v1alpha1.rb +46 -0
  91. data/generated/google/apis/artifactregistry_v1beta1/classes.rb +249 -337
  92. data/generated/google/apis/artifactregistry_v1beta1/representations.rb +2 -0
  93. data/generated/google/apis/artifactregistry_v1beta1/service.rb +44 -57
  94. data/generated/google/apis/artifactregistry_v1beta1.rb +1 -1
  95. data/generated/google/apis/assuredworkloads_v1beta1/classes.rb +722 -0
  96. data/generated/google/apis/assuredworkloads_v1beta1/representations.rb +359 -0
  97. data/generated/google/apis/assuredworkloads_v1beta1/service.rb +328 -0
  98. data/generated/google/apis/{accessapproval_v1beta1.rb → assuredworkloads_v1beta1.rb} +8 -8
  99. data/generated/google/apis/bigquery_v2/classes.rb +593 -576
  100. data/generated/google/apis/bigquery_v2/representations.rb +85 -0
  101. data/generated/google/apis/bigquery_v2/service.rb +79 -41
  102. data/generated/google/apis/bigquery_v2.rb +1 -1
  103. data/generated/google/apis/bigqueryconnection_v1beta1/classes.rb +192 -337
  104. data/generated/google/apis/bigqueryconnection_v1beta1/service.rb +29 -32
  105. data/generated/google/apis/bigqueryconnection_v1beta1.rb +1 -1
  106. data/generated/google/apis/bigquerydatatransfer_v1/classes.rb +132 -158
  107. data/generated/google/apis/bigquerydatatransfer_v1/service.rb +232 -282
  108. data/generated/google/apis/bigquerydatatransfer_v1.rb +1 -1
  109. data/generated/google/apis/bigqueryreservation_v1/classes.rb +116 -123
  110. data/generated/google/apis/bigqueryreservation_v1/representations.rb +2 -0
  111. data/generated/google/apis/bigqueryreservation_v1/service.rb +137 -183
  112. data/generated/google/apis/bigqueryreservation_v1.rb +1 -1
  113. data/generated/google/apis/bigqueryreservation_v1alpha2/classes.rb +88 -100
  114. data/generated/google/apis/bigqueryreservation_v1alpha2/service.rb +77 -100
  115. data/generated/google/apis/bigqueryreservation_v1alpha2.rb +1 -1
  116. data/generated/google/apis/bigqueryreservation_v1beta1/classes.rb +93 -98
  117. data/generated/google/apis/bigqueryreservation_v1beta1/representations.rb +2 -0
  118. data/generated/google/apis/bigqueryreservation_v1beta1/service.rb +114 -151
  119. data/generated/google/apis/bigqueryreservation_v1beta1.rb +1 -1
  120. data/generated/google/apis/bigtableadmin_v1/classes.rb +137 -119
  121. data/generated/google/apis/bigtableadmin_v1/representations.rb +29 -0
  122. data/generated/google/apis/bigtableadmin_v1.rb +1 -1
  123. data/generated/google/apis/bigtableadmin_v2/classes.rb +455 -607
  124. data/generated/google/apis/bigtableadmin_v2/representations.rb +29 -0
  125. data/generated/google/apis/bigtableadmin_v2/service.rb +194 -247
  126. data/generated/google/apis/bigtableadmin_v2.rb +1 -1
  127. data/generated/google/apis/billingbudgets_v1/classes.rb +373 -0
  128. data/generated/google/apis/billingbudgets_v1/representations.rb +171 -0
  129. data/generated/google/apis/billingbudgets_v1/service.rb +249 -0
  130. data/generated/google/apis/billingbudgets_v1.rb +38 -0
  131. data/generated/google/apis/billingbudgets_v1beta1/classes.rb +27 -6
  132. data/generated/google/apis/billingbudgets_v1beta1/representations.rb +2 -0
  133. data/generated/google/apis/billingbudgets_v1beta1.rb +4 -1
  134. data/generated/google/apis/binaryauthorization_v1/classes.rb +434 -355
  135. data/generated/google/apis/binaryauthorization_v1/representations.rb +75 -0
  136. data/generated/google/apis/binaryauthorization_v1/service.rb +109 -89
  137. data/generated/google/apis/binaryauthorization_v1.rb +1 -1
  138. data/generated/google/apis/binaryauthorization_v1beta1/classes.rb +434 -355
  139. data/generated/google/apis/binaryauthorization_v1beta1/representations.rb +75 -0
  140. data/generated/google/apis/binaryauthorization_v1beta1/service.rb +109 -89
  141. data/generated/google/apis/binaryauthorization_v1beta1.rb +1 -1
  142. data/generated/google/apis/books_v1/service.rb +54 -54
  143. data/generated/google/apis/books_v1.rb +1 -1
  144. data/generated/google/apis/calendar_v3/classes.rb +13 -10
  145. data/generated/google/apis/calendar_v3.rb +1 -1
  146. data/generated/google/apis/chat_v1/classes.rb +173 -116
  147. data/generated/google/apis/chat_v1/representations.rb +36 -0
  148. data/generated/google/apis/chat_v1/service.rb +30 -42
  149. data/generated/google/apis/chat_v1.rb +1 -1
  150. data/generated/google/apis/civicinfo_v2/classes.rb +18 -32
  151. data/generated/google/apis/civicinfo_v2/representations.rb +2 -3
  152. data/generated/google/apis/civicinfo_v2.rb +1 -1
  153. data/generated/google/apis/classroom_v1/classes.rb +153 -21
  154. data/generated/google/apis/classroom_v1/representations.rb +43 -0
  155. data/generated/google/apis/classroom_v1/service.rb +240 -0
  156. data/generated/google/apis/classroom_v1.rb +7 -1
  157. data/generated/google/apis/cloudasset_v1/classes.rb +1461 -1039
  158. data/generated/google/apis/cloudasset_v1/representations.rb +320 -0
  159. data/generated/google/apis/cloudasset_v1/service.rb +296 -167
  160. data/generated/google/apis/cloudasset_v1.rb +1 -1
  161. data/generated/google/apis/cloudasset_v1beta1/classes.rb +531 -777
  162. data/generated/google/apis/cloudasset_v1beta1/service.rb +59 -75
  163. data/generated/google/apis/cloudasset_v1beta1.rb +1 -1
  164. data/generated/google/apis/cloudasset_v1p1beta1/classes.rb +204 -349
  165. data/generated/google/apis/cloudasset_v1p1beta1/service.rb +35 -47
  166. data/generated/google/apis/cloudasset_v1p1beta1.rb +1 -1
  167. data/generated/google/apis/cloudasset_v1p4beta1/classes.rb +222 -276
  168. data/generated/google/apis/cloudasset_v1p4beta1/service.rb +75 -93
  169. data/generated/google/apis/cloudasset_v1p4beta1.rb +1 -1
  170. data/generated/google/apis/cloudasset_v1p5beta1/classes.rb +481 -720
  171. data/generated/google/apis/cloudasset_v1p5beta1/service.rb +25 -28
  172. data/generated/google/apis/cloudasset_v1p5beta1.rb +1 -1
  173. data/generated/google/apis/cloudbilling_v1/classes.rb +285 -446
  174. data/generated/google/apis/cloudbilling_v1/service.rb +104 -124
  175. data/generated/google/apis/cloudbilling_v1.rb +7 -1
  176. data/generated/google/apis/cloudbuild_v1/classes.rb +339 -344
  177. data/generated/google/apis/cloudbuild_v1/representations.rb +8 -0
  178. data/generated/google/apis/cloudbuild_v1/service.rb +277 -67
  179. data/generated/google/apis/cloudbuild_v1.rb +1 -1
  180. data/generated/google/apis/cloudbuild_v1alpha1/classes.rb +291 -329
  181. data/generated/google/apis/cloudbuild_v1alpha1/representations.rb +2 -0
  182. data/generated/google/apis/cloudbuild_v1alpha1/service.rb +15 -18
  183. data/generated/google/apis/cloudbuild_v1alpha1.rb +1 -1
  184. data/generated/google/apis/cloudbuild_v1alpha2/classes.rb +277 -313
  185. data/generated/google/apis/cloudbuild_v1alpha2/representations.rb +2 -0
  186. data/generated/google/apis/cloudbuild_v1alpha2/service.rb +22 -28
  187. data/generated/google/apis/cloudbuild_v1alpha2.rb +1 -1
  188. data/generated/google/apis/clouddebugger_v2/classes.rb +185 -252
  189. data/generated/google/apis/clouddebugger_v2/service.rb +53 -59
  190. data/generated/google/apis/clouddebugger_v2.rb +1 -1
  191. data/generated/google/apis/clouderrorreporting_v1beta1/classes.rb +127 -156
  192. data/generated/google/apis/clouderrorreporting_v1beta1/service.rb +53 -69
  193. data/generated/google/apis/clouderrorreporting_v1beta1.rb +1 -1
  194. data/generated/google/apis/cloudfunctions_v1/classes.rb +335 -494
  195. data/generated/google/apis/cloudfunctions_v1/representations.rb +1 -0
  196. data/generated/google/apis/cloudfunctions_v1/service.rb +79 -93
  197. data/generated/google/apis/cloudfunctions_v1.rb +1 -1
  198. data/generated/google/apis/cloudidentity_v1/classes.rb +989 -107
  199. data/generated/google/apis/cloudidentity_v1/representations.rb +371 -0
  200. data/generated/google/apis/cloudidentity_v1/service.rb +883 -88
  201. data/generated/google/apis/cloudidentity_v1.rb +4 -1
  202. data/generated/google/apis/cloudidentity_v1beta1/classes.rb +1236 -307
  203. data/generated/google/apis/cloudidentity_v1beta1/representations.rb +424 -21
  204. data/generated/google/apis/cloudidentity_v1beta1/service.rb +921 -96
  205. data/generated/google/apis/cloudidentity_v1beta1.rb +4 -1
  206. data/generated/google/apis/cloudiot_v1/classes.rb +270 -373
  207. data/generated/google/apis/cloudiot_v1/representations.rb +1 -0
  208. data/generated/google/apis/cloudiot_v1/service.rb +147 -154
  209. data/generated/google/apis/cloudiot_v1.rb +1 -1
  210. data/generated/google/apis/cloudkms_v1/classes.rb +509 -692
  211. data/generated/google/apis/cloudkms_v1/representations.rb +18 -0
  212. data/generated/google/apis/cloudkms_v1/service.rb +170 -216
  213. data/generated/google/apis/cloudkms_v1.rb +1 -1
  214. data/generated/google/apis/cloudprofiler_v2/classes.rb +28 -33
  215. data/generated/google/apis/cloudprofiler_v2/service.rb +17 -19
  216. data/generated/google/apis/cloudprofiler_v2.rb +1 -1
  217. data/generated/google/apis/cloudresourcemanager_v1/classes.rb +14 -7
  218. data/generated/google/apis/cloudresourcemanager_v1/representations.rb +1 -0
  219. data/generated/google/apis/cloudresourcemanager_v1/service.rb +62 -60
  220. data/generated/google/apis/cloudresourcemanager_v1.rb +1 -1
  221. data/generated/google/apis/cloudresourcemanager_v1beta1/classes.rb +11 -4
  222. data/generated/google/apis/cloudresourcemanager_v1beta1/representations.rb +1 -0
  223. data/generated/google/apis/cloudresourcemanager_v1beta1/service.rb +26 -25
  224. data/generated/google/apis/cloudresourcemanager_v1beta1.rb +1 -1
  225. data/generated/google/apis/cloudresourcemanager_v2/classes.rb +7 -0
  226. data/generated/google/apis/cloudresourcemanager_v2/representations.rb +1 -0
  227. data/generated/google/apis/cloudresourcemanager_v2/service.rb +7 -7
  228. data/generated/google/apis/cloudresourcemanager_v2.rb +1 -1
  229. data/generated/google/apis/cloudresourcemanager_v2beta1/classes.rb +7 -0
  230. data/generated/google/apis/cloudresourcemanager_v2beta1/representations.rb +1 -0
  231. data/generated/google/apis/cloudresourcemanager_v2beta1/service.rb +7 -7
  232. data/generated/google/apis/cloudresourcemanager_v2beta1.rb +1 -1
  233. data/generated/google/apis/cloudscheduler_v1/classes.rb +272 -383
  234. data/generated/google/apis/cloudscheduler_v1/service.rb +45 -62
  235. data/generated/google/apis/cloudscheduler_v1.rb +1 -1
  236. data/generated/google/apis/cloudscheduler_v1beta1/classes.rb +273 -384
  237. data/generated/google/apis/cloudscheduler_v1beta1/service.rb +45 -62
  238. data/generated/google/apis/cloudscheduler_v1beta1.rb +1 -1
  239. data/generated/google/apis/cloudsearch_v1/classes.rb +651 -781
  240. data/generated/google/apis/cloudsearch_v1/representations.rb +15 -0
  241. data/generated/google/apis/cloudsearch_v1/service.rb +286 -326
  242. data/generated/google/apis/cloudsearch_v1.rb +2 -2
  243. data/generated/google/apis/cloudshell_v1/classes.rb +256 -105
  244. data/generated/google/apis/cloudshell_v1/representations.rb +143 -10
  245. data/generated/google/apis/cloudshell_v1/service.rb +198 -25
  246. data/generated/google/apis/cloudshell_v1.rb +1 -1
  247. data/generated/google/apis/cloudshell_v1alpha1/classes.rb +69 -78
  248. data/generated/google/apis/cloudshell_v1alpha1/service.rb +20 -24
  249. data/generated/google/apis/cloudshell_v1alpha1.rb +1 -1
  250. data/generated/google/apis/cloudtasks_v2/classes.rb +612 -933
  251. data/generated/google/apis/cloudtasks_v2/representations.rb +1 -0
  252. data/generated/google/apis/cloudtasks_v2/service.rb +146 -217
  253. data/generated/google/apis/cloudtasks_v2.rb +1 -1
  254. data/generated/google/apis/cloudtasks_v2beta2/classes.rb +608 -964
  255. data/generated/google/apis/cloudtasks_v2beta2/representations.rb +1 -0
  256. data/generated/google/apis/cloudtasks_v2beta2/service.rb +178 -270
  257. data/generated/google/apis/cloudtasks_v2beta2.rb +1 -1
  258. data/generated/google/apis/cloudtasks_v2beta3/classes.rb +616 -938
  259. data/generated/google/apis/cloudtasks_v2beta3/representations.rb +1 -0
  260. data/generated/google/apis/cloudtasks_v2beta3/service.rb +146 -217
  261. data/generated/google/apis/cloudtasks_v2beta3.rb +1 -1
  262. data/generated/google/apis/cloudtrace_v1/classes.rb +39 -61
  263. data/generated/google/apis/cloudtrace_v1/service.rb +37 -51
  264. data/generated/google/apis/cloudtrace_v1.rb +1 -1
  265. data/generated/google/apis/cloudtrace_v2/classes.rb +92 -107
  266. data/generated/google/apis/cloudtrace_v2/service.rb +8 -11
  267. data/generated/google/apis/cloudtrace_v2.rb +1 -1
  268. data/generated/google/apis/cloudtrace_v2beta1/classes.rb +23 -33
  269. data/generated/google/apis/cloudtrace_v2beta1/service.rb +30 -37
  270. data/generated/google/apis/cloudtrace_v2beta1.rb +1 -1
  271. data/generated/google/apis/composer_v1/classes.rb +189 -242
  272. data/generated/google/apis/composer_v1/service.rb +79 -150
  273. data/generated/google/apis/composer_v1.rb +1 -1
  274. data/generated/google/apis/composer_v1beta1/classes.rb +203 -262
  275. data/generated/google/apis/composer_v1beta1/service.rb +94 -179
  276. data/generated/google/apis/composer_v1beta1.rb +1 -1
  277. data/generated/google/apis/compute_alpha/classes.rb +1227 -186
  278. data/generated/google/apis/compute_alpha/representations.rb +235 -8
  279. data/generated/google/apis/compute_alpha/service.rb +2009 -1024
  280. data/generated/google/apis/compute_alpha.rb +1 -1
  281. data/generated/google/apis/compute_beta/classes.rb +1080 -108
  282. data/generated/google/apis/compute_beta/representations.rb +212 -2
  283. data/generated/google/apis/compute_beta/service.rb +1413 -741
  284. data/generated/google/apis/compute_beta.rb +1 -1
  285. data/generated/google/apis/compute_v1/classes.rb +1512 -106
  286. data/generated/google/apis/compute_v1/representations.rb +470 -1
  287. data/generated/google/apis/compute_v1/service.rb +1625 -285
  288. data/generated/google/apis/compute_v1.rb +1 -1
  289. data/generated/google/apis/container_v1/classes.rb +982 -965
  290. data/generated/google/apis/container_v1/representations.rb +60 -0
  291. data/generated/google/apis/container_v1/service.rb +435 -502
  292. data/generated/google/apis/container_v1.rb +1 -1
  293. data/generated/google/apis/container_v1beta1/classes.rb +1106 -1044
  294. data/generated/google/apis/container_v1beta1/representations.rb +91 -0
  295. data/generated/google/apis/container_v1beta1/service.rb +403 -466
  296. data/generated/google/apis/container_v1beta1.rb +1 -1
  297. data/generated/google/apis/containeranalysis_v1alpha1/classes.rb +463 -596
  298. data/generated/google/apis/containeranalysis_v1alpha1/representations.rb +1 -0
  299. data/generated/google/apis/containeranalysis_v1alpha1/service.rb +149 -169
  300. data/generated/google/apis/containeranalysis_v1alpha1.rb +1 -1
  301. data/generated/google/apis/containeranalysis_v1beta1/classes.rb +461 -613
  302. data/generated/google/apis/containeranalysis_v1beta1/representations.rb +1 -0
  303. data/generated/google/apis/containeranalysis_v1beta1/service.rb +75 -90
  304. data/generated/google/apis/containeranalysis_v1beta1.rb +1 -1
  305. data/generated/google/apis/content_v2/classes.rb +515 -1219
  306. data/generated/google/apis/content_v2/service.rb +377 -650
  307. data/generated/google/apis/content_v2.rb +3 -4
  308. data/generated/google/apis/content_v2_1/classes.rb +1108 -1058
  309. data/generated/google/apis/content_v2_1/representations.rb +288 -0
  310. data/generated/google/apis/content_v2_1/service.rb +987 -795
  311. data/generated/google/apis/content_v2_1.rb +3 -4
  312. data/generated/google/apis/customsearch_v1/service.rb +2 -2
  313. data/generated/google/apis/customsearch_v1.rb +1 -1
  314. data/generated/google/apis/datacatalog_v1beta1/classes.rb +413 -573
  315. data/generated/google/apis/datacatalog_v1beta1/representations.rb +6 -0
  316. data/generated/google/apis/datacatalog_v1beta1/service.rb +319 -440
  317. data/generated/google/apis/datacatalog_v1beta1.rb +1 -1
  318. data/generated/google/apis/dataflow_v1b3/classes.rb +1174 -973
  319. data/generated/google/apis/dataflow_v1b3/representations.rb +148 -0
  320. data/generated/google/apis/dataflow_v1b3/service.rb +308 -257
  321. data/generated/google/apis/dataflow_v1b3.rb +1 -1
  322. data/generated/google/apis/datafusion_v1/classes.rb +283 -397
  323. data/generated/google/apis/datafusion_v1/representations.rb +5 -0
  324. data/generated/google/apis/datafusion_v1/service.rb +76 -89
  325. data/generated/google/apis/datafusion_v1.rb +5 -8
  326. data/generated/google/apis/datafusion_v1beta1/classes.rb +283 -397
  327. data/generated/google/apis/datafusion_v1beta1/representations.rb +5 -0
  328. data/generated/google/apis/datafusion_v1beta1/service.rb +81 -95
  329. data/generated/google/apis/datafusion_v1beta1.rb +5 -8
  330. data/generated/google/apis/datalabeling_v1beta1/classes.rb +6207 -0
  331. data/generated/google/apis/datalabeling_v1beta1/representations.rb +3156 -0
  332. data/generated/google/apis/datalabeling_v1beta1/service.rb +1762 -0
  333. data/generated/google/apis/datalabeling_v1beta1.rb +34 -0
  334. data/generated/google/apis/dataproc_v1/classes.rb +97 -13
  335. data/generated/google/apis/dataproc_v1/representations.rb +34 -0
  336. data/generated/google/apis/dataproc_v1.rb +1 -1
  337. data/generated/google/apis/dataproc_v1beta2/classes.rb +117 -9
  338. data/generated/google/apis/dataproc_v1beta2/representations.rb +49 -0
  339. data/generated/google/apis/dataproc_v1beta2.rb +1 -1
  340. data/generated/google/apis/datastore_v1/classes.rb +334 -476
  341. data/generated/google/apis/datastore_v1/service.rb +52 -63
  342. data/generated/google/apis/datastore_v1.rb +1 -1
  343. data/generated/google/apis/datastore_v1beta1/classes.rb +150 -217
  344. data/generated/google/apis/datastore_v1beta1/service.rb +11 -12
  345. data/generated/google/apis/datastore_v1beta1.rb +1 -1
  346. data/generated/google/apis/datastore_v1beta3/classes.rb +259 -375
  347. data/generated/google/apis/datastore_v1beta3/service.rb +1 -2
  348. data/generated/google/apis/datastore_v1beta3.rb +1 -1
  349. data/generated/google/apis/deploymentmanager_v2/classes.rb +203 -558
  350. data/generated/google/apis/deploymentmanager_v2/representations.rb +0 -132
  351. data/generated/google/apis/deploymentmanager_v2/service.rb +169 -213
  352. data/generated/google/apis/deploymentmanager_v2.rb +6 -4
  353. data/generated/google/apis/deploymentmanager_v2beta/classes.rb +247 -609
  354. data/generated/google/apis/deploymentmanager_v2beta/representations.rb +0 -132
  355. data/generated/google/apis/deploymentmanager_v2beta/service.rb +278 -359
  356. data/generated/google/apis/deploymentmanager_v2beta.rb +6 -5
  357. data/generated/google/apis/dfareporting_v3_3/classes.rb +162 -339
  358. data/generated/google/apis/dfareporting_v3_3/service.rb +646 -1262
  359. data/generated/google/apis/dfareporting_v3_3.rb +2 -2
  360. data/generated/google/apis/dfareporting_v3_4/classes.rb +184 -350
  361. data/generated/google/apis/dfareporting_v3_4/representations.rb +1 -0
  362. data/generated/google/apis/dfareporting_v3_4/service.rb +681 -1261
  363. data/generated/google/apis/dfareporting_v3_4.rb +2 -2
  364. data/generated/google/apis/dialogflow_v2/classes.rb +746 -217
  365. data/generated/google/apis/dialogflow_v2/representations.rb +318 -67
  366. data/generated/google/apis/dialogflow_v2.rb +1 -1
  367. data/generated/google/apis/dialogflow_v2beta1/classes.rb +764 -233
  368. data/generated/google/apis/dialogflow_v2beta1/representations.rb +318 -67
  369. data/generated/google/apis/dialogflow_v2beta1/service.rb +556 -331
  370. data/generated/google/apis/dialogflow_v2beta1.rb +1 -1
  371. data/generated/google/apis/dialogflow_v3beta1/classes.rb +8816 -0
  372. data/generated/google/apis/dialogflow_v3beta1/representations.rb +3725 -0
  373. data/generated/google/apis/dialogflow_v3beta1/service.rb +2825 -0
  374. data/generated/google/apis/dialogflow_v3beta1.rb +38 -0
  375. data/generated/google/apis/digitalassetlinks_v1/classes.rb +66 -92
  376. data/generated/google/apis/digitalassetlinks_v1/service.rb +131 -188
  377. data/generated/google/apis/digitalassetlinks_v1.rb +1 -1
  378. data/generated/google/apis/displayvideo_v1/classes.rb +271 -38
  379. data/generated/google/apis/displayvideo_v1/representations.rb +83 -0
  380. data/generated/google/apis/displayvideo_v1/service.rb +287 -32
  381. data/generated/google/apis/displayvideo_v1.rb +1 -1
  382. data/generated/google/apis/displayvideo_v1beta/classes.rb +146 -0
  383. data/generated/google/apis/displayvideo_v1beta/representations.rb +72 -0
  384. data/generated/google/apis/displayvideo_v1beta/service.rb +161 -0
  385. data/generated/google/apis/displayvideo_v1beta.rb +38 -0
  386. data/generated/google/apis/displayvideo_v1beta2/classes.rb +146 -0
  387. data/generated/google/apis/displayvideo_v1beta2/representations.rb +72 -0
  388. data/generated/google/apis/displayvideo_v1beta2/service.rb +130 -0
  389. data/generated/google/apis/displayvideo_v1beta2.rb +38 -0
  390. data/generated/google/apis/displayvideo_v1dev/classes.rb +146 -0
  391. data/generated/google/apis/displayvideo_v1dev/representations.rb +72 -0
  392. data/generated/google/apis/displayvideo_v1dev/service.rb +130 -0
  393. data/generated/google/apis/displayvideo_v1dev.rb +38 -0
  394. data/generated/google/apis/dlp_v2/classes.rb +1111 -1310
  395. data/generated/google/apis/dlp_v2/representations.rb +16 -0
  396. data/generated/google/apis/dlp_v2/service.rb +962 -905
  397. data/generated/google/apis/dlp_v2.rb +1 -1
  398. data/generated/google/apis/dns_v1/classes.rb +356 -198
  399. data/generated/google/apis/dns_v1/representations.rb +83 -0
  400. data/generated/google/apis/dns_v1/service.rb +83 -98
  401. data/generated/google/apis/dns_v1.rb +2 -2
  402. data/generated/google/apis/dns_v1beta2/classes.rb +362 -206
  403. data/generated/google/apis/dns_v1beta2/representations.rb +83 -0
  404. data/generated/google/apis/dns_v1beta2/service.rb +83 -98
  405. data/generated/google/apis/dns_v1beta2.rb +2 -2
  406. data/generated/google/apis/docs_v1/classes.rb +894 -1229
  407. data/generated/google/apis/docs_v1/service.rb +17 -22
  408. data/generated/google/apis/docs_v1.rb +1 -1
  409. data/generated/google/apis/documentai_v1beta2/classes.rb +2253 -824
  410. data/generated/google/apis/documentai_v1beta2/representations.rb +701 -0
  411. data/generated/google/apis/documentai_v1beta2/service.rb +22 -24
  412. data/generated/google/apis/documentai_v1beta2.rb +1 -1
  413. data/generated/google/apis/documentai_v1beta3/classes.rb +6149 -0
  414. data/generated/google/apis/documentai_v1beta3/representations.rb +2666 -0
  415. data/generated/google/apis/documentai_v1beta3/service.rb +263 -0
  416. data/generated/google/apis/{securitycenter_v1p1alpha1.rb → documentai_v1beta3.rb} +11 -10
  417. data/generated/google/apis/domains_v1alpha2/classes.rb +1540 -0
  418. data/generated/google/apis/domains_v1alpha2/representations.rb +606 -0
  419. data/generated/google/apis/domains_v1alpha2/service.rb +805 -0
  420. data/generated/google/apis/domains_v1alpha2.rb +34 -0
  421. data/generated/google/apis/domains_v1beta1/classes.rb +1540 -0
  422. data/generated/google/apis/domains_v1beta1/representations.rb +606 -0
  423. data/generated/google/apis/domains_v1beta1/service.rb +805 -0
  424. data/generated/google/apis/domains_v1beta1.rb +34 -0
  425. data/generated/google/apis/doubleclickbidmanager_v1/classes.rb +6 -12
  426. data/generated/google/apis/doubleclickbidmanager_v1/service.rb +33 -64
  427. data/generated/google/apis/doubleclickbidmanager_v1.rb +3 -2
  428. data/generated/google/apis/doubleclickbidmanager_v1_1/classes.rb +13 -20
  429. data/generated/google/apis/doubleclickbidmanager_v1_1/service.rb +33 -64
  430. data/generated/google/apis/doubleclickbidmanager_v1_1.rb +3 -2
  431. data/generated/google/apis/doubleclicksearch_v2/service.rb +2 -2
  432. data/generated/google/apis/doubleclicksearch_v2.rb +1 -1
  433. data/generated/google/apis/drive_v2/classes.rb +18 -7
  434. data/generated/google/apis/drive_v2/representations.rb +1 -0
  435. data/generated/google/apis/drive_v2/service.rb +79 -15
  436. data/generated/google/apis/drive_v2.rb +1 -1
  437. data/generated/google/apis/drive_v3/classes.rb +18 -8
  438. data/generated/google/apis/drive_v3/representations.rb +1 -0
  439. data/generated/google/apis/drive_v3/service.rb +59 -11
  440. data/generated/google/apis/drive_v3.rb +1 -1
  441. data/generated/google/apis/eventarc_v1beta1/classes.rb +931 -0
  442. data/generated/google/apis/eventarc_v1beta1/representations.rb +379 -0
  443. data/generated/google/apis/{memcache_v1 → eventarc_v1beta1}/service.rb +236 -215
  444. data/generated/google/apis/eventarc_v1beta1.rb +34 -0
  445. data/generated/google/apis/file_v1/classes.rb +155 -174
  446. data/generated/google/apis/file_v1/service.rb +43 -52
  447. data/generated/google/apis/file_v1.rb +1 -1
  448. data/generated/google/apis/file_v1beta1/classes.rb +335 -194
  449. data/generated/google/apis/file_v1beta1/representations.rb +55 -0
  450. data/generated/google/apis/file_v1beta1/service.rb +267 -55
  451. data/generated/google/apis/file_v1beta1.rb +1 -1
  452. data/generated/google/apis/firebase_v1beta1/classes.rb +33 -51
  453. data/generated/google/apis/firebase_v1beta1/representations.rb +2 -16
  454. data/generated/google/apis/firebase_v1beta1/service.rb +21 -1
  455. data/generated/google/apis/firebase_v1beta1.rb +1 -1
  456. data/generated/google/apis/firebasedynamiclinks_v1.rb +1 -1
  457. data/generated/google/apis/firebasehosting_v1beta1/classes.rb +188 -0
  458. data/generated/google/apis/firebasehosting_v1beta1/representations.rb +83 -0
  459. data/generated/google/apis/firebasehosting_v1beta1/service.rb +418 -4
  460. data/generated/google/apis/firebasehosting_v1beta1.rb +1 -1
  461. data/generated/google/apis/firebaseml_v1beta2/classes.rb +8 -8
  462. data/generated/google/apis/firebaseml_v1beta2/representations.rb +1 -1
  463. data/generated/google/apis/firebaseml_v1beta2.rb +1 -1
  464. data/generated/google/apis/firebaserules_v1/classes.rb +102 -137
  465. data/generated/google/apis/firebaserules_v1/service.rb +87 -110
  466. data/generated/google/apis/firebaserules_v1.rb +1 -1
  467. data/generated/google/apis/firestore_v1/classes.rb +406 -502
  468. data/generated/google/apis/firestore_v1/service.rb +165 -201
  469. data/generated/google/apis/firestore_v1.rb +1 -1
  470. data/generated/google/apis/firestore_v1beta1/classes.rb +338 -413
  471. data/generated/google/apis/firestore_v1beta1/service.rb +106 -122
  472. data/generated/google/apis/firestore_v1beta1.rb +1 -1
  473. data/generated/google/apis/firestore_v1beta2/classes.rb +135 -165
  474. data/generated/google/apis/firestore_v1beta2/service.rb +65 -86
  475. data/generated/google/apis/firestore_v1beta2.rb +1 -1
  476. data/generated/google/apis/fitness_v1/classes.rb +982 -0
  477. data/generated/google/apis/fitness_v1/representations.rb +398 -0
  478. data/generated/google/apis/fitness_v1/service.rb +628 -0
  479. data/generated/google/apis/fitness_v1.rb +97 -0
  480. data/generated/google/apis/games_configuration_v1configuration/classes.rb +2 -3
  481. data/generated/google/apis/games_configuration_v1configuration/service.rb +6 -6
  482. data/generated/google/apis/games_configuration_v1configuration.rb +1 -1
  483. data/generated/google/apis/games_management_v1management/classes.rb +14 -20
  484. data/generated/google/apis/games_management_v1management/service.rb +35 -36
  485. data/generated/google/apis/games_management_v1management.rb +2 -3
  486. data/generated/google/apis/games_v1/classes.rb +376 -83
  487. data/generated/google/apis/games_v1/representations.rb +118 -0
  488. data/generated/google/apis/games_v1/service.rb +118 -90
  489. data/generated/google/apis/games_v1.rb +2 -3
  490. data/generated/google/apis/gameservices_v1/classes.rb +22 -14
  491. data/generated/google/apis/gameservices_v1/representations.rb +1 -0
  492. data/generated/google/apis/gameservices_v1/service.rb +54 -51
  493. data/generated/google/apis/gameservices_v1.rb +1 -1
  494. data/generated/google/apis/gameservices_v1beta/classes.rb +22 -14
  495. data/generated/google/apis/gameservices_v1beta/representations.rb +1 -0
  496. data/generated/google/apis/gameservices_v1beta/service.rb +54 -51
  497. data/generated/google/apis/gameservices_v1beta.rb +1 -1
  498. data/generated/google/apis/genomics_v1/classes.rb +70 -76
  499. data/generated/google/apis/genomics_v1/service.rb +28 -43
  500. data/generated/google/apis/genomics_v1.rb +1 -1
  501. data/generated/google/apis/genomics_v1alpha2/classes.rb +223 -290
  502. data/generated/google/apis/genomics_v1alpha2/service.rb +54 -76
  503. data/generated/google/apis/genomics_v1alpha2.rb +1 -1
  504. data/generated/google/apis/genomics_v2alpha1/classes.rb +356 -275
  505. data/generated/google/apis/genomics_v2alpha1/representations.rb +48 -0
  506. data/generated/google/apis/genomics_v2alpha1/service.rb +47 -66
  507. data/generated/google/apis/genomics_v2alpha1.rb +1 -1
  508. data/generated/google/apis/gmail_v1/classes.rb +37 -43
  509. data/generated/google/apis/gmail_v1/service.rb +5 -4
  510. data/generated/google/apis/gmail_v1.rb +1 -1
  511. data/generated/google/apis/gmailpostmastertools_v1beta1/service.rb +11 -11
  512. data/generated/google/apis/gmailpostmastertools_v1beta1.rb +2 -2
  513. data/generated/google/apis/groupsmigration_v1/classes.rb +51 -0
  514. data/generated/google/apis/groupsmigration_v1/representations.rb +40 -0
  515. data/generated/google/apis/groupsmigration_v1/service.rb +100 -0
  516. data/generated/google/apis/groupsmigration_v1.rb +35 -0
  517. data/generated/google/apis/healthcare_v1/classes.rb +637 -826
  518. data/generated/google/apis/healthcare_v1/representations.rb +32 -0
  519. data/generated/google/apis/healthcare_v1/service.rb +842 -855
  520. data/generated/google/apis/healthcare_v1.rb +1 -1
  521. data/generated/google/apis/healthcare_v1beta1/classes.rb +1937 -1299
  522. data/generated/google/apis/healthcare_v1beta1/representations.rb +534 -65
  523. data/generated/google/apis/healthcare_v1beta1/service.rb +2534 -1293
  524. data/generated/google/apis/healthcare_v1beta1.rb +1 -1
  525. data/generated/google/apis/homegraph_v1/classes.rb +76 -164
  526. data/generated/google/apis/homegraph_v1/service.rb +23 -35
  527. data/generated/google/apis/homegraph_v1.rb +4 -1
  528. data/generated/google/apis/iam_v1/classes.rb +395 -592
  529. data/generated/google/apis/iam_v1/representations.rb +1 -0
  530. data/generated/google/apis/iam_v1/service.rb +427 -555
  531. data/generated/google/apis/iam_v1.rb +1 -1
  532. data/generated/google/apis/iamcredentials_v1/classes.rb +75 -85
  533. data/generated/google/apis/iamcredentials_v1/service.rb +14 -13
  534. data/generated/google/apis/iamcredentials_v1.rb +3 -2
  535. data/generated/google/apis/iap_v1/classes.rb +253 -355
  536. data/generated/google/apis/iap_v1/representations.rb +1 -0
  537. data/generated/google/apis/iap_v1/service.rb +61 -71
  538. data/generated/google/apis/iap_v1.rb +1 -1
  539. data/generated/google/apis/iap_v1beta1/classes.rb +164 -254
  540. data/generated/google/apis/iap_v1beta1/representations.rb +1 -0
  541. data/generated/google/apis/iap_v1beta1/service.rb +17 -19
  542. data/generated/google/apis/iap_v1beta1.rb +1 -1
  543. data/generated/google/apis/indexing_v3/classes.rb +11 -11
  544. data/generated/google/apis/indexing_v3.rb +1 -1
  545. data/generated/google/apis/jobs_v2/classes.rb +1584 -1086
  546. data/generated/google/apis/jobs_v2/representations.rb +272 -0
  547. data/generated/google/apis/jobs_v2/service.rb +85 -126
  548. data/generated/google/apis/jobs_v2.rb +1 -1
  549. data/generated/google/apis/jobs_v3/classes.rb +1559 -980
  550. data/generated/google/apis/jobs_v3/representations.rb +272 -0
  551. data/generated/google/apis/jobs_v3/service.rb +101 -139
  552. data/generated/google/apis/jobs_v3.rb +1 -1
  553. data/generated/google/apis/jobs_v3p1beta1/classes.rb +1521 -1023
  554. data/generated/google/apis/jobs_v3p1beta1/representations.rb +257 -0
  555. data/generated/google/apis/jobs_v3p1beta1/service.rb +103 -142
  556. data/generated/google/apis/jobs_v3p1beta1.rb +1 -1
  557. data/generated/google/apis/kgsearch_v1/classes.rb +4 -4
  558. data/generated/google/apis/kgsearch_v1/service.rb +11 -11
  559. data/generated/google/apis/kgsearch_v1.rb +1 -1
  560. data/generated/google/apis/licensing_v1/classes.rb +1 -1
  561. data/generated/google/apis/licensing_v1/service.rb +56 -86
  562. data/generated/google/apis/licensing_v1.rb +4 -3
  563. data/generated/google/apis/lifesciences_v2beta/classes.rb +366 -290
  564. data/generated/google/apis/lifesciences_v2beta/representations.rb +47 -0
  565. data/generated/google/apis/lifesciences_v2beta/service.rb +30 -42
  566. data/generated/google/apis/lifesciences_v2beta.rb +1 -1
  567. data/generated/google/apis/localservices_v1/classes.rb +426 -0
  568. data/generated/google/apis/localservices_v1/representations.rb +174 -0
  569. data/generated/google/apis/localservices_v1/service.rb +199 -0
  570. data/generated/google/apis/{appsactivity_v1.rb → localservices_v1.rb} +8 -11
  571. data/generated/google/apis/logging_v2/classes.rb +306 -232
  572. data/generated/google/apis/logging_v2/representations.rb +79 -0
  573. data/generated/google/apis/logging_v2/service.rb +3307 -1579
  574. data/generated/google/apis/logging_v2.rb +1 -1
  575. data/generated/google/apis/managedidentities_v1/classes.rb +8 -1
  576. data/generated/google/apis/managedidentities_v1/representations.rb +1 -0
  577. data/generated/google/apis/managedidentities_v1/service.rb +1 -4
  578. data/generated/google/apis/managedidentities_v1.rb +1 -1
  579. data/generated/google/apis/managedidentities_v1alpha1/classes.rb +87 -1
  580. data/generated/google/apis/managedidentities_v1alpha1/representations.rb +34 -0
  581. data/generated/google/apis/managedidentities_v1alpha1/service.rb +83 -5
  582. data/generated/google/apis/managedidentities_v1alpha1.rb +1 -1
  583. data/generated/google/apis/managedidentities_v1beta1/classes.rb +88 -1
  584. data/generated/google/apis/managedidentities_v1beta1/representations.rb +34 -0
  585. data/generated/google/apis/managedidentities_v1beta1/service.rb +83 -5
  586. data/generated/google/apis/managedidentities_v1beta1.rb +1 -1
  587. data/generated/google/apis/manufacturers_v1/classes.rb +99 -109
  588. data/generated/google/apis/manufacturers_v1/service.rb +44 -55
  589. data/generated/google/apis/manufacturers_v1.rb +1 -1
  590. data/generated/google/apis/memcache_v1beta2/classes.rb +171 -250
  591. data/generated/google/apis/memcache_v1beta2/representations.rb +0 -19
  592. data/generated/google/apis/memcache_v1beta2/service.rb +60 -73
  593. data/generated/google/apis/memcache_v1beta2.rb +1 -1
  594. data/generated/google/apis/ml_v1/classes.rb +1122 -1149
  595. data/generated/google/apis/ml_v1/representations.rb +82 -0
  596. data/generated/google/apis/ml_v1/service.rb +194 -253
  597. data/generated/google/apis/ml_v1.rb +1 -1
  598. data/generated/google/apis/monitoring_v1/classes.rb +107 -26
  599. data/generated/google/apis/monitoring_v1/representations.rb +35 -0
  600. data/generated/google/apis/monitoring_v1/service.rb +10 -11
  601. data/generated/google/apis/monitoring_v1.rb +1 -1
  602. data/generated/google/apis/monitoring_v3/classes.rb +303 -345
  603. data/generated/google/apis/monitoring_v3/representations.rb +18 -0
  604. data/generated/google/apis/monitoring_v3/service.rb +176 -146
  605. data/generated/google/apis/monitoring_v3.rb +1 -1
  606. data/generated/google/apis/networkmanagement_v1/classes.rb +273 -429
  607. data/generated/google/apis/networkmanagement_v1/service.rb +97 -120
  608. data/generated/google/apis/networkmanagement_v1.rb +1 -1
  609. data/generated/google/apis/networkmanagement_v1beta1/classes.rb +456 -429
  610. data/generated/google/apis/networkmanagement_v1beta1/representations.rb +63 -0
  611. data/generated/google/apis/networkmanagement_v1beta1/service.rb +97 -120
  612. data/generated/google/apis/networkmanagement_v1beta1.rb +1 -1
  613. data/generated/google/apis/osconfig_v1/classes.rb +154 -902
  614. data/generated/google/apis/osconfig_v1/representations.rb +0 -337
  615. data/generated/google/apis/osconfig_v1/service.rb +26 -31
  616. data/generated/google/apis/osconfig_v1.rb +3 -3
  617. data/generated/google/apis/osconfig_v1beta/classes.rb +327 -411
  618. data/generated/google/apis/osconfig_v1beta/service.rb +43 -56
  619. data/generated/google/apis/osconfig_v1beta.rb +3 -3
  620. data/generated/google/apis/oslogin_v1/classes.rb +14 -12
  621. data/generated/google/apis/oslogin_v1/representations.rb +1 -0
  622. data/generated/google/apis/oslogin_v1/service.rb +12 -16
  623. data/generated/google/apis/oslogin_v1.rb +1 -1
  624. data/generated/google/apis/oslogin_v1alpha/classes.rb +16 -14
  625. data/generated/google/apis/oslogin_v1alpha/representations.rb +1 -0
  626. data/generated/google/apis/oslogin_v1alpha/service.rb +17 -17
  627. data/generated/google/apis/oslogin_v1alpha.rb +1 -1
  628. data/generated/google/apis/oslogin_v1beta/classes.rb +14 -12
  629. data/generated/google/apis/oslogin_v1beta/representations.rb +1 -0
  630. data/generated/google/apis/oslogin_v1beta/service.rb +12 -16
  631. data/generated/google/apis/oslogin_v1beta.rb +1 -1
  632. data/generated/google/apis/pagespeedonline_v5/classes.rb +18 -24
  633. data/generated/google/apis/pagespeedonline_v5/service.rb +3 -4
  634. data/generated/google/apis/pagespeedonline_v5.rb +2 -2
  635. data/generated/google/apis/people_v1/classes.rb +173 -63
  636. data/generated/google/apis/people_v1/representations.rb +41 -0
  637. data/generated/google/apis/people_v1/service.rb +63 -61
  638. data/generated/google/apis/people_v1.rb +1 -1
  639. data/generated/google/apis/playablelocations_v3/classes.rb +114 -161
  640. data/generated/google/apis/playablelocations_v3/service.rb +10 -10
  641. data/generated/google/apis/playablelocations_v3.rb +1 -1
  642. data/generated/google/apis/playcustomapp_v1/service.rb +2 -2
  643. data/generated/google/apis/playcustomapp_v1.rb +1 -1
  644. data/generated/google/apis/poly_v1/classes.rb +65 -79
  645. data/generated/google/apis/poly_v1/service.rb +50 -63
  646. data/generated/google/apis/poly_v1.rb +3 -4
  647. data/generated/google/apis/privateca_v1beta1/classes.rb +2466 -0
  648. data/generated/google/apis/privateca_v1beta1/representations.rb +996 -0
  649. data/generated/google/apis/privateca_v1beta1/service.rb +1487 -0
  650. data/generated/google/apis/privateca_v1beta1.rb +34 -0
  651. data/generated/google/apis/prod_tt_sasportal_v1alpha1/classes.rb +26 -0
  652. data/generated/google/apis/prod_tt_sasportal_v1alpha1/representations.rb +14 -0
  653. data/generated/google/apis/prod_tt_sasportal_v1alpha1/service.rb +644 -56
  654. data/generated/google/apis/prod_tt_sasportal_v1alpha1.rb +1 -1
  655. data/generated/google/apis/pubsub_v1/classes.rb +399 -518
  656. data/generated/google/apis/pubsub_v1/representations.rb +2 -0
  657. data/generated/google/apis/pubsub_v1/service.rb +221 -247
  658. data/generated/google/apis/pubsub_v1.rb +1 -1
  659. data/generated/google/apis/pubsub_v1beta1a/classes.rb +71 -86
  660. data/generated/google/apis/pubsub_v1beta1a/service.rb +31 -38
  661. data/generated/google/apis/pubsub_v1beta1a.rb +1 -1
  662. data/generated/google/apis/pubsub_v1beta2/classes.rb +251 -354
  663. data/generated/google/apis/pubsub_v1beta2/representations.rb +1 -0
  664. data/generated/google/apis/pubsub_v1beta2/service.rb +96 -108
  665. data/generated/google/apis/pubsub_v1beta2.rb +1 -1
  666. data/generated/google/apis/pubsublite_v1/classes.rb +461 -0
  667. data/generated/google/apis/pubsublite_v1/representations.rb +261 -0
  668. data/generated/google/apis/pubsublite_v1/service.rb +558 -0
  669. data/generated/google/apis/{memcache_v1.rb → pubsublite_v1.rb} +8 -9
  670. data/generated/google/apis/realtimebidding_v1/classes.rb +84 -123
  671. data/generated/google/apis/realtimebidding_v1/representations.rb +18 -32
  672. data/generated/google/apis/realtimebidding_v1/service.rb +4 -1
  673. data/generated/google/apis/realtimebidding_v1.rb +1 -1
  674. data/generated/google/apis/recommendationengine_v1beta1/classes.rb +367 -456
  675. data/generated/google/apis/recommendationengine_v1beta1/representations.rb +25 -16
  676. data/generated/google/apis/recommendationengine_v1beta1/service.rb +140 -206
  677. data/generated/google/apis/recommendationengine_v1beta1.rb +1 -1
  678. data/generated/google/apis/recommender_v1/classes.rb +1 -1
  679. data/generated/google/apis/recommender_v1/service.rb +4 -2
  680. data/generated/google/apis/recommender_v1.rb +1 -1
  681. data/generated/google/apis/recommender_v1beta1/classes.rb +1 -1
  682. data/generated/google/apis/recommender_v1beta1/service.rb +4 -2
  683. data/generated/google/apis/recommender_v1beta1.rb +1 -1
  684. data/generated/google/apis/redis_v1/classes.rb +91 -513
  685. data/generated/google/apis/redis_v1/representations.rb +0 -139
  686. data/generated/google/apis/redis_v1/service.rb +92 -109
  687. data/generated/google/apis/redis_v1.rb +1 -1
  688. data/generated/google/apis/redis_v1beta1/classes.rb +123 -517
  689. data/generated/google/apis/redis_v1beta1/representations.rb +12 -137
  690. data/generated/google/apis/redis_v1beta1/service.rb +126 -109
  691. data/generated/google/apis/redis_v1beta1.rb +1 -1
  692. data/generated/google/apis/remotebuildexecution_v1/classes.rb +957 -1078
  693. data/generated/google/apis/remotebuildexecution_v1/representations.rb +62 -0
  694. data/generated/google/apis/remotebuildexecution_v1/service.rb +26 -33
  695. data/generated/google/apis/remotebuildexecution_v1.rb +1 -1
  696. data/generated/google/apis/remotebuildexecution_v1alpha/classes.rb +952 -1071
  697. data/generated/google/apis/remotebuildexecution_v1alpha/representations.rb +62 -0
  698. data/generated/google/apis/remotebuildexecution_v1alpha/service.rb +103 -65
  699. data/generated/google/apis/remotebuildexecution_v1alpha.rb +1 -1
  700. data/generated/google/apis/remotebuildexecution_v2/classes.rb +1105 -1250
  701. data/generated/google/apis/remotebuildexecution_v2/representations.rb +62 -0
  702. data/generated/google/apis/remotebuildexecution_v2/service.rb +147 -206
  703. data/generated/google/apis/remotebuildexecution_v2.rb +1 -1
  704. data/generated/google/apis/reseller_v1/classes.rb +151 -219
  705. data/generated/google/apis/reseller_v1/service.rb +122 -173
  706. data/generated/google/apis/reseller_v1.rb +2 -2
  707. data/generated/google/apis/run_v1/classes.rb +19 -138
  708. data/generated/google/apis/run_v1/representations.rb +1 -62
  709. data/generated/google/apis/run_v1/service.rb +0 -342
  710. data/generated/google/apis/run_v1.rb +1 -1
  711. data/generated/google/apis/run_v1alpha1/classes.rb +1 -1
  712. data/generated/google/apis/run_v1alpha1/representations.rb +1 -1
  713. data/generated/google/apis/run_v1alpha1.rb +1 -1
  714. data/generated/google/apis/run_v1beta1/classes.rb +3 -2
  715. data/generated/google/apis/run_v1beta1.rb +1 -1
  716. data/generated/google/apis/runtimeconfig_v1beta1/classes.rb +301 -412
  717. data/generated/google/apis/runtimeconfig_v1beta1/representations.rb +1 -0
  718. data/generated/google/apis/runtimeconfig_v1beta1/service.rb +135 -159
  719. data/generated/google/apis/runtimeconfig_v1beta1.rb +1 -1
  720. data/generated/google/apis/safebrowsing_v4/classes.rb +55 -64
  721. data/generated/google/apis/safebrowsing_v4/service.rb +4 -4
  722. data/generated/google/apis/safebrowsing_v4.rb +1 -1
  723. data/generated/google/apis/sasportal_v1alpha1/classes.rb +26 -0
  724. data/generated/google/apis/sasportal_v1alpha1/representations.rb +14 -0
  725. data/generated/google/apis/sasportal_v1alpha1/service.rb +644 -56
  726. data/generated/google/apis/sasportal_v1alpha1.rb +1 -1
  727. data/generated/google/apis/script_v1/classes.rb +88 -111
  728. data/generated/google/apis/script_v1/service.rb +63 -69
  729. data/generated/google/apis/script_v1.rb +1 -1
  730. data/generated/google/apis/searchconsole_v1/classes.rb +388 -0
  731. data/generated/google/apis/searchconsole_v1/representations.rb +162 -0
  732. data/generated/google/apis/searchconsole_v1/service.rb +287 -0
  733. data/generated/google/apis/searchconsole_v1.rb +7 -1
  734. data/generated/google/apis/secretmanager_v1/classes.rb +378 -365
  735. data/generated/google/apis/secretmanager_v1/representations.rb +92 -0
  736. data/generated/google/apis/secretmanager_v1/service.rb +66 -82
  737. data/generated/google/apis/secretmanager_v1.rb +1 -1
  738. data/generated/google/apis/secretmanager_v1beta1/classes.rb +217 -363
  739. data/generated/google/apis/secretmanager_v1beta1/representations.rb +1 -0
  740. data/generated/google/apis/secretmanager_v1beta1/service.rb +66 -82
  741. data/generated/google/apis/secretmanager_v1beta1.rb +1 -1
  742. data/generated/google/apis/securitycenter_v1/classes.rb +20 -204
  743. data/generated/google/apis/securitycenter_v1/representations.rb +1 -72
  744. data/generated/google/apis/securitycenter_v1.rb +1 -1
  745. data/generated/google/apis/securitycenter_v1beta1/classes.rb +22 -204
  746. data/generated/google/apis/securitycenter_v1beta1/representations.rb +1 -72
  747. data/generated/google/apis/securitycenter_v1beta1.rb +1 -1
  748. data/generated/google/apis/{securitycenter_v1p1alpha1 → securitycenter_v1beta2}/classes.rb +275 -291
  749. data/generated/google/apis/{securitycenter_v1p1alpha1 → securitycenter_v1beta2}/representations.rb +83 -84
  750. data/generated/google/apis/securitycenter_v1beta2/service.rb +1494 -0
  751. data/generated/google/apis/{securitycenter_v1p1beta1.rb → securitycenter_v1beta2.rb} +6 -6
  752. data/generated/google/apis/serviceconsumermanagement_v1/classes.rb +35 -123
  753. data/generated/google/apis/serviceconsumermanagement_v1/representations.rb +0 -18
  754. data/generated/google/apis/serviceconsumermanagement_v1/service.rb +32 -30
  755. data/generated/google/apis/serviceconsumermanagement_v1.rb +1 -1
  756. data/generated/google/apis/serviceconsumermanagement_v1beta1/classes.rb +24 -112
  757. data/generated/google/apis/serviceconsumermanagement_v1beta1/representations.rb +0 -18
  758. data/generated/google/apis/serviceconsumermanagement_v1beta1.rb +1 -1
  759. data/generated/google/apis/servicecontrol_v1/classes.rb +601 -642
  760. data/generated/google/apis/servicecontrol_v1/representations.rb +10 -0
  761. data/generated/google/apis/servicecontrol_v1/service.rb +36 -46
  762. data/generated/google/apis/servicecontrol_v1.rb +1 -1
  763. data/generated/google/apis/servicecontrol_v2/classes.rb +343 -325
  764. data/generated/google/apis/servicecontrol_v2/representations.rb +8 -0
  765. data/generated/google/apis/servicecontrol_v2/service.rb +33 -43
  766. data/generated/google/apis/servicecontrol_v2.rb +1 -1
  767. data/generated/google/apis/servicedirectory_v1beta1/classes.rb +221 -333
  768. data/generated/google/apis/servicedirectory_v1beta1/representations.rb +1 -0
  769. data/generated/google/apis/servicedirectory_v1beta1/service.rb +94 -129
  770. data/generated/google/apis/servicedirectory_v1beta1.rb +1 -1
  771. data/generated/google/apis/servicemanagement_v1/classes.rb +1244 -2174
  772. data/generated/google/apis/servicemanagement_v1/representations.rb +0 -31
  773. data/generated/google/apis/servicemanagement_v1/service.rb +141 -228
  774. data/generated/google/apis/servicemanagement_v1.rb +1 -1
  775. data/generated/google/apis/servicenetworking_v1/classes.rb +278 -121
  776. data/generated/google/apis/servicenetworking_v1/representations.rb +115 -15
  777. data/generated/google/apis/servicenetworking_v1/service.rb +118 -2
  778. data/generated/google/apis/servicenetworking_v1.rb +1 -1
  779. data/generated/google/apis/servicenetworking_v1beta/classes.rb +213 -112
  780. data/generated/google/apis/servicenetworking_v1beta/representations.rb +84 -14
  781. data/generated/google/apis/servicenetworking_v1beta.rb +1 -1
  782. data/generated/google/apis/serviceusage_v1/classes.rb +57 -111
  783. data/generated/google/apis/serviceusage_v1/representations.rb +4 -18
  784. data/generated/google/apis/serviceusage_v1/service.rb +5 -1
  785. data/generated/google/apis/serviceusage_v1.rb +1 -1
  786. data/generated/google/apis/serviceusage_v1beta1/classes.rb +122 -112
  787. data/generated/google/apis/serviceusage_v1beta1/representations.rb +23 -18
  788. data/generated/google/apis/serviceusage_v1beta1/service.rb +36 -0
  789. data/generated/google/apis/serviceusage_v1beta1.rb +1 -1
  790. data/generated/google/apis/sheets_v4/classes.rb +4029 -5014
  791. data/generated/google/apis/sheets_v4/representations.rb +625 -0
  792. data/generated/google/apis/sheets_v4/service.rb +113 -149
  793. data/generated/google/apis/sheets_v4.rb +1 -1
  794. data/generated/google/apis/site_verification_v1.rb +1 -1
  795. data/generated/google/apis/slides_v1/classes.rb +841 -1114
  796. data/generated/google/apis/slides_v1/service.rb +23 -30
  797. data/generated/google/apis/slides_v1.rb +1 -1
  798. data/generated/google/apis/smartdevicemanagement_v1/classes.rb +273 -0
  799. data/generated/google/apis/smartdevicemanagement_v1/representations.rb +157 -0
  800. data/generated/google/apis/smartdevicemanagement_v1/service.rb +304 -0
  801. data/generated/google/apis/smartdevicemanagement_v1.rb +35 -0
  802. data/generated/google/apis/sourcerepo_v1/classes.rb +250 -400
  803. data/generated/google/apis/sourcerepo_v1/service.rb +40 -49
  804. data/generated/google/apis/sourcerepo_v1.rb +1 -1
  805. data/generated/google/apis/spanner_v1/classes.rb +1553 -2157
  806. data/generated/google/apis/spanner_v1/representations.rb +1 -0
  807. data/generated/google/apis/spanner_v1/service.rb +443 -618
  808. data/generated/google/apis/spanner_v1.rb +1 -1
  809. data/generated/google/apis/speech_v1/classes.rb +174 -220
  810. data/generated/google/apis/speech_v1/service.rb +27 -32
  811. data/generated/google/apis/speech_v1.rb +1 -1
  812. data/generated/google/apis/speech_v1p1beta1/classes.rb +253 -306
  813. data/generated/google/apis/speech_v1p1beta1/service.rb +27 -32
  814. data/generated/google/apis/speech_v1p1beta1.rb +1 -1
  815. data/generated/google/apis/speech_v2beta1/classes.rb +66 -76
  816. data/generated/google/apis/speech_v2beta1/service.rb +10 -12
  817. data/generated/google/apis/speech_v2beta1.rb +1 -1
  818. data/generated/google/apis/{sql_v1beta4 → sqladmin_v1beta4}/classes.rb +537 -452
  819. data/generated/google/apis/{sql_v1beta4 → sqladmin_v1beta4}/representations.rb +142 -87
  820. data/generated/google/apis/{sql_v1beta4 → sqladmin_v1beta4}/service.rb +295 -300
  821. data/generated/google/apis/{sql_v1beta4.rb → sqladmin_v1beta4.rb} +5 -5
  822. data/generated/google/apis/storage_v1/classes.rb +10 -17
  823. data/generated/google/apis/storage_v1/representations.rb +2 -3
  824. data/generated/google/apis/storage_v1/service.rb +3 -2
  825. data/generated/google/apis/storage_v1.rb +1 -1
  826. data/generated/google/apis/storagetransfer_v1/classes.rb +301 -349
  827. data/generated/google/apis/storagetransfer_v1/representations.rb +13 -0
  828. data/generated/google/apis/storagetransfer_v1/service.rb +53 -72
  829. data/generated/google/apis/storagetransfer_v1.rb +1 -1
  830. data/generated/google/apis/streetviewpublish_v1/classes.rb +110 -152
  831. data/generated/google/apis/streetviewpublish_v1/service.rb +94 -177
  832. data/generated/google/apis/streetviewpublish_v1.rb +1 -1
  833. data/generated/google/apis/sts_v1/classes.rb +121 -0
  834. data/generated/google/apis/sts_v1/representations.rb +59 -0
  835. data/generated/google/apis/sts_v1/service.rb +90 -0
  836. data/generated/google/apis/sts_v1.rb +32 -0
  837. data/generated/google/apis/sts_v1beta/classes.rb +191 -0
  838. data/generated/google/apis/sts_v1beta/representations.rb +61 -0
  839. data/generated/google/apis/sts_v1beta/service.rb +92 -0
  840. data/generated/google/apis/sts_v1beta.rb +32 -0
  841. data/generated/google/apis/tagmanager_v1/service.rb +2 -2
  842. data/generated/google/apis/tagmanager_v1.rb +1 -1
  843. data/generated/google/apis/tagmanager_v2/classes.rb +12 -0
  844. data/generated/google/apis/tagmanager_v2/representations.rb +3 -0
  845. data/generated/google/apis/tagmanager_v2/service.rb +2 -2
  846. data/generated/google/apis/tagmanager_v2.rb +1 -1
  847. data/generated/google/apis/tasks_v1/classes.rb +21 -22
  848. data/generated/google/apis/tasks_v1/service.rb +19 -19
  849. data/generated/google/apis/tasks_v1.rb +1 -1
  850. data/generated/google/apis/testing_v1/classes.rb +384 -390
  851. data/generated/google/apis/testing_v1/representations.rb +23 -0
  852. data/generated/google/apis/testing_v1/service.rb +22 -28
  853. data/generated/google/apis/testing_v1.rb +1 -1
  854. data/generated/google/apis/texttospeech_v1/classes.rb +51 -57
  855. data/generated/google/apis/texttospeech_v1/service.rb +9 -10
  856. data/generated/google/apis/texttospeech_v1.rb +1 -1
  857. data/generated/google/apis/texttospeech_v1beta1/classes.rb +96 -57
  858. data/generated/google/apis/texttospeech_v1beta1/representations.rb +19 -0
  859. data/generated/google/apis/texttospeech_v1beta1/service.rb +9 -10
  860. data/generated/google/apis/texttospeech_v1beta1.rb +1 -1
  861. data/generated/google/apis/toolresults_v1beta3/classes.rb +20 -0
  862. data/generated/google/apis/toolresults_v1beta3/representations.rb +13 -0
  863. data/generated/google/apis/toolresults_v1beta3.rb +1 -1
  864. data/generated/google/apis/tpu_v1/classes.rb +57 -3
  865. data/generated/google/apis/tpu_v1/representations.rb +19 -0
  866. data/generated/google/apis/tpu_v1/service.rb +8 -8
  867. data/generated/google/apis/tpu_v1.rb +1 -1
  868. data/generated/google/apis/tpu_v1alpha1/classes.rb +57 -3
  869. data/generated/google/apis/tpu_v1alpha1/representations.rb +19 -0
  870. data/generated/google/apis/tpu_v1alpha1/service.rb +8 -8
  871. data/generated/google/apis/tpu_v1alpha1.rb +1 -1
  872. data/generated/google/apis/trafficdirector_v2/classes.rb +1347 -0
  873. data/generated/google/apis/trafficdirector_v2/representations.rb +620 -0
  874. data/generated/google/apis/trafficdirector_v2/service.rb +89 -0
  875. data/generated/google/apis/trafficdirector_v2.rb +34 -0
  876. data/generated/google/apis/translate_v3/classes.rb +151 -177
  877. data/generated/google/apis/translate_v3/service.rb +122 -151
  878. data/generated/google/apis/translate_v3.rb +1 -1
  879. data/generated/google/apis/translate_v3beta1/classes.rb +150 -170
  880. data/generated/google/apis/translate_v3beta1/service.rb +122 -151
  881. data/generated/google/apis/translate_v3beta1.rb +1 -1
  882. data/generated/google/apis/vault_v1/classes.rb +413 -103
  883. data/generated/google/apis/vault_v1/representations.rb +162 -0
  884. data/generated/google/apis/vault_v1/service.rb +182 -37
  885. data/generated/google/apis/vault_v1.rb +1 -1
  886. data/generated/google/apis/vectortile_v1/classes.rb +185 -267
  887. data/generated/google/apis/vectortile_v1/service.rb +75 -88
  888. data/generated/google/apis/vectortile_v1.rb +1 -1
  889. data/generated/google/apis/verifiedaccess_v1/classes.rb +20 -27
  890. data/generated/google/apis/verifiedaccess_v1.rb +1 -1
  891. data/generated/google/apis/videointelligence_v1/classes.rb +1493 -935
  892. data/generated/google/apis/videointelligence_v1/representations.rb +402 -2
  893. data/generated/google/apis/videointelligence_v1/service.rb +38 -77
  894. data/generated/google/apis/videointelligence_v1.rb +1 -1
  895. data/generated/google/apis/videointelligence_v1beta2/classes.rb +1488 -928
  896. data/generated/google/apis/videointelligence_v1beta2/representations.rb +402 -2
  897. data/generated/google/apis/videointelligence_v1beta2/service.rb +4 -4
  898. data/generated/google/apis/videointelligence_v1beta2.rb +1 -1
  899. data/generated/google/apis/videointelligence_v1p1beta1/classes.rb +1482 -922
  900. data/generated/google/apis/videointelligence_v1p1beta1/representations.rb +402 -2
  901. data/generated/google/apis/videointelligence_v1p1beta1/service.rb +4 -4
  902. data/generated/google/apis/videointelligence_v1p1beta1.rb +1 -1
  903. data/generated/google/apis/videointelligence_v1p2beta1/classes.rb +1485 -925
  904. data/generated/google/apis/videointelligence_v1p2beta1/representations.rb +402 -2
  905. data/generated/google/apis/videointelligence_v1p2beta1/service.rb +4 -4
  906. data/generated/google/apis/videointelligence_v1p2beta1.rb +1 -1
  907. data/generated/google/apis/videointelligence_v1p3beta1/classes.rb +1410 -937
  908. data/generated/google/apis/videointelligence_v1p3beta1/representations.rb +368 -2
  909. data/generated/google/apis/videointelligence_v1p3beta1/service.rb +4 -4
  910. data/generated/google/apis/videointelligence_v1p3beta1.rb +1 -1
  911. data/generated/google/apis/vision_v1/classes.rb +16 -16
  912. data/generated/google/apis/vision_v1.rb +1 -1
  913. data/generated/google/apis/vision_v1p1beta1/classes.rb +16 -16
  914. data/generated/google/apis/vision_v1p1beta1.rb +1 -1
  915. data/generated/google/apis/vision_v1p2beta1/classes.rb +16 -16
  916. data/generated/google/apis/vision_v1p2beta1.rb +1 -1
  917. data/generated/google/apis/webfonts_v1/classes.rb +1 -2
  918. data/generated/google/apis/webfonts_v1/service.rb +2 -4
  919. data/generated/google/apis/webfonts_v1.rb +2 -3
  920. data/generated/google/apis/websecurityscanner_v1.rb +1 -1
  921. data/generated/google/apis/websecurityscanner_v1alpha.rb +1 -1
  922. data/generated/google/apis/websecurityscanner_v1beta.rb +1 -1
  923. data/generated/google/apis/workflowexecutions_v1beta/classes.rb +155 -0
  924. data/generated/google/apis/workflowexecutions_v1beta/representations.rb +88 -0
  925. data/generated/google/apis/{securitycenter_v1p1alpha1 → workflowexecutions_v1beta}/service.rb +73 -69
  926. data/generated/google/apis/workflowexecutions_v1beta.rb +34 -0
  927. data/generated/google/apis/workflows_v1beta/classes.rb +406 -0
  928. data/generated/google/apis/workflows_v1beta/representations.rb +173 -0
  929. data/generated/google/apis/workflows_v1beta/service.rb +438 -0
  930. data/generated/google/apis/workflows_v1beta.rb +35 -0
  931. data/generated/google/apis/youtube_partner_v1.rb +1 -1
  932. data/generated/google/apis/youtube_v3/classes.rb +0 -586
  933. data/generated/google/apis/youtube_v3/representations.rb +0 -269
  934. data/generated/google/apis/youtube_v3/service.rb +3 -120
  935. data/generated/google/apis/youtube_v3.rb +1 -1
  936. data/google-api-client.gemspec +25 -24
  937. data/lib/google/apis/core/api_command.rb +1 -0
  938. data/lib/google/apis/core/http_command.rb +2 -1
  939. data/lib/google/apis/options.rb +8 -5
  940. data/lib/google/apis/version.rb +1 -1
  941. data/synth.py +40 -0
  942. metadata +134 -41
  943. data/generated/google/apis/accessapproval_v1beta1/classes.rb +0 -417
  944. data/generated/google/apis/accessapproval_v1beta1/representations.rb +0 -203
  945. data/generated/google/apis/accessapproval_v1beta1/service.rb +0 -857
  946. data/generated/google/apis/appsactivity_v1/classes.rb +0 -415
  947. data/generated/google/apis/appsactivity_v1/representations.rb +0 -209
  948. data/generated/google/apis/appsactivity_v1/service.rb +0 -126
  949. data/generated/google/apis/dns_v2beta1/classes.rb +0 -1447
  950. data/generated/google/apis/dns_v2beta1/representations.rb +0 -588
  951. data/generated/google/apis/dns_v2beta1/service.rb +0 -928
  952. data/generated/google/apis/dns_v2beta1.rb +0 -43
  953. data/generated/google/apis/memcache_v1/classes.rb +0 -1157
  954. data/generated/google/apis/plus_v1/classes.rb +0 -2094
  955. data/generated/google/apis/plus_v1/representations.rb +0 -907
  956. data/generated/google/apis/plus_v1/service.rb +0 -451
  957. data/generated/google/apis/plus_v1.rb +0 -43
  958. data/generated/google/apis/securitycenter_v1p1beta1/classes.rb +0 -2059
  959. data/generated/google/apis/securitycenter_v1p1beta1/representations.rb +0 -789
  960. data/generated/google/apis/securitycenter_v1p1beta1/service.rb +0 -1243
  961. data/generated/google/apis/storage_v1beta2/classes.rb +0 -1047
  962. data/generated/google/apis/storage_v1beta2/representations.rb +0 -425
  963. data/generated/google/apis/storage_v1beta2/service.rb +0 -1667
  964. data/generated/google/apis/storage_v1beta2.rb +0 -40
@@ -22,9 +22,9 @@ module Google
22
22
  module Apis
23
23
  module VideointelligenceV1p1beta1
24
24
 
25
- # Video annotation progress. Included in the `metadata`
26
- # field of the `Operation` returned by the `GetOperation`
27
- # call of the `google::longrunning::Operations` service.
25
+ # Video annotation progress. Included in the `metadata` field of the `Operation`
26
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
27
+ # service.
28
28
  class GoogleCloudVideointelligenceV1AnnotateVideoProgress
29
29
  include Google::Apis::Core::Hashable
30
30
 
@@ -43,9 +43,9 @@ module Google
43
43
  end
44
44
  end
45
45
 
46
- # Video annotation response. Included in the `response`
47
- # field of the `Operation` returned by the `GetOperation`
48
- # call of the `google::longrunning::Operations` service.
46
+ # Video annotation response. Included in the `response` field of the `Operation`
47
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
48
+ # service.
49
49
  class GoogleCloudVideointelligenceV1AnnotateVideoResponse
50
50
  include Google::Apis::Core::Hashable
51
51
 
@@ -73,14 +73,14 @@ module Google
73
73
  # @return [Float]
74
74
  attr_accessor :confidence
75
75
 
76
- # The name of the attribute, for example, glasses, dark_glasses, mouth_open.
77
- # A full list of supported type names will be provided in the document.
76
+ # The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
77
+ # full list of supported type names will be provided in the document.
78
78
  # Corresponds to the JSON property `name`
79
79
  # @return [String]
80
80
  attr_accessor :name
81
81
 
82
- # Text value of the detection result. For example, the value for "HairColor"
83
- # can be "black", "blonde", etc.
82
+ # Text value of the detection result. For example, the value for "HairColor" can
83
+ # be "black", "blonde", etc.
84
84
  # Corresponds to the JSON property `value`
85
85
  # @return [String]
86
86
  attr_accessor :value
@@ -112,9 +112,8 @@ module Google
112
112
  # @return [String]
113
113
  attr_accessor :name
114
114
 
115
- # A vertex represents a 2D point in the image.
116
- # NOTE: the normalized vertex coordinates are relative to the original image
117
- # and range from 0 to 1.
115
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
116
+ # coordinates are relative to the original image and range from 0 to 1.
118
117
  # Corresponds to the JSON property `point`
119
118
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1NormalizedVertex]
120
119
  attr_accessor :point
@@ -140,8 +139,7 @@ module Google
140
139
  # @return [String]
141
140
  attr_accessor :description
142
141
 
143
- # Opaque entity ID. Some IDs may be available in
144
- # [Google Knowledge Graph Search
142
+ # Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
145
143
  # API](https://developers.google.com/knowledge-graph/).
146
144
  # Corresponds to the JSON property `entityId`
147
145
  # @return [String]
@@ -164,9 +162,9 @@ module Google
164
162
  end
165
163
  end
166
164
 
167
- # Explicit content annotation (based on per-frame visual signals only).
168
- # If no explicit content has been detected in a frame, no annotations are
169
- # present for that frame.
165
+ # Explicit content annotation (based on per-frame visual signals only). If no
166
+ # explicit content has been detected in a frame, no annotations are present for
167
+ # that frame.
170
168
  class GoogleCloudVideointelligenceV1ExplicitContentAnnotation
171
169
  include Google::Apis::Core::Hashable
172
170
 
@@ -217,14 +215,110 @@ module Google
217
215
  end
218
216
  end
219
217
 
218
+ # Deprecated. No effect.
219
+ class GoogleCloudVideointelligenceV1FaceAnnotation
220
+ include Google::Apis::Core::Hashable
221
+
222
+ # All video frames where a face was detected.
223
+ # Corresponds to the JSON property `frames`
224
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1FaceFrame>]
225
+ attr_accessor :frames
226
+
227
+ # All video segments where a face was detected.
228
+ # Corresponds to the JSON property `segments`
229
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1FaceSegment>]
230
+ attr_accessor :segments
231
+
232
+ # Thumbnail of a representative face view (in JPEG format).
233
+ # Corresponds to the JSON property `thumbnail`
234
+ # NOTE: Values are automatically base64 encoded/decoded in the client library.
235
+ # @return [String]
236
+ attr_accessor :thumbnail
237
+
238
+ def initialize(**args)
239
+ update!(**args)
240
+ end
241
+
242
+ # Update properties of this object
243
+ def update!(**args)
244
+ @frames = args[:frames] if args.key?(:frames)
245
+ @segments = args[:segments] if args.key?(:segments)
246
+ @thumbnail = args[:thumbnail] if args.key?(:thumbnail)
247
+ end
248
+ end
249
+
250
+ # Face detection annotation.
251
+ class GoogleCloudVideointelligenceV1FaceDetectionAnnotation
252
+ include Google::Apis::Core::Hashable
253
+
254
+ # Feature version.
255
+ # Corresponds to the JSON property `version`
256
+ # @return [String]
257
+ attr_accessor :version
258
+
259
+ def initialize(**args)
260
+ update!(**args)
261
+ end
262
+
263
+ # Update properties of this object
264
+ def update!(**args)
265
+ @version = args[:version] if args.key?(:version)
266
+ end
267
+ end
268
+
269
+ # Deprecated. No effect.
270
+ class GoogleCloudVideointelligenceV1FaceFrame
271
+ include Google::Apis::Core::Hashable
272
+
273
+ # Normalized Bounding boxes in a frame. There can be more than one boxes if the
274
+ # same face is detected in multiple locations within the current frame.
275
+ # Corresponds to the JSON property `normalizedBoundingBoxes`
276
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1NormalizedBoundingBox>]
277
+ attr_accessor :normalized_bounding_boxes
278
+
279
+ # Time-offset, relative to the beginning of the video, corresponding to the
280
+ # video frame for this location.
281
+ # Corresponds to the JSON property `timeOffset`
282
+ # @return [String]
283
+ attr_accessor :time_offset
284
+
285
+ def initialize(**args)
286
+ update!(**args)
287
+ end
288
+
289
+ # Update properties of this object
290
+ def update!(**args)
291
+ @normalized_bounding_boxes = args[:normalized_bounding_boxes] if args.key?(:normalized_bounding_boxes)
292
+ @time_offset = args[:time_offset] if args.key?(:time_offset)
293
+ end
294
+ end
295
+
296
+ # Video segment level annotation results for face detection.
297
+ class GoogleCloudVideointelligenceV1FaceSegment
298
+ include Google::Apis::Core::Hashable
299
+
300
+ # Video segment.
301
+ # Corresponds to the JSON property `segment`
302
+ # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1VideoSegment]
303
+ attr_accessor :segment
304
+
305
+ def initialize(**args)
306
+ update!(**args)
307
+ end
308
+
309
+ # Update properties of this object
310
+ def update!(**args)
311
+ @segment = args[:segment] if args.key?(:segment)
312
+ end
313
+ end
314
+
220
315
  # Label annotation.
221
316
  class GoogleCloudVideointelligenceV1LabelAnnotation
222
317
  include Google::Apis::Core::Hashable
223
318
 
224
- # Common categories for the detected entity.
225
- # For example, when the label is `Terrier`, the category is likely `dog`. And
226
- # in some cases there might be more than one categories e.g., `Terrier` could
227
- # also be a `pet`.
319
+ # Common categories for the detected entity. For example, when the label is `
320
+ # Terrier`, the category is likely `dog`. And in some cases there might be more
321
+ # than one categories e.g., `Terrier` could also be a `pet`.
228
322
  # Corresponds to the JSON property `categoryEntities`
229
323
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1Entity>]
230
324
  attr_accessor :category_entities
@@ -323,14 +417,14 @@ module Google
323
417
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1Entity]
324
418
  attr_accessor :entity
325
419
 
326
- # All video segments where the recognized logo appears. There might be
327
- # multiple instances of the same logo class appearing in one VideoSegment.
420
+ # All video segments where the recognized logo appears. There might be multiple
421
+ # instances of the same logo class appearing in one VideoSegment.
328
422
  # Corresponds to the JSON property `segments`
329
423
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1VideoSegment>]
330
424
  attr_accessor :segments
331
425
 
332
- # All logo tracks where the recognized logo appears. Each track corresponds
333
- # to one logo instance appearing in consecutive frames.
426
+ # All logo tracks where the recognized logo appears. Each track corresponds to
427
+ # one logo instance appearing in consecutive frames.
334
428
  # Corresponds to the JSON property `tracks`
335
429
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1Track>]
336
430
  attr_accessor :tracks
@@ -347,9 +441,8 @@ module Google
347
441
  end
348
442
  end
349
443
 
350
- # Normalized bounding box.
351
- # The normalized vertex coordinates are relative to the original image.
352
- # Range: [0, 1].
444
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
445
+ # original image. Range: [0, 1].
353
446
  class GoogleCloudVideointelligenceV1NormalizedBoundingBox
354
447
  include Google::Apis::Core::Hashable
355
448
 
@@ -387,20 +480,12 @@ module Google
387
480
  end
388
481
 
389
482
  # Normalized bounding polygon for text (that might not be aligned with axis).
390
- # Contains list of the corner points in clockwise order starting from
391
- # top-left corner. For example, for a rectangular bounding box:
392
- # When the text is horizontal it might look like:
393
- # 0----1
394
- # | |
395
- # 3----2
396
- # When it's clockwise rotated 180 degrees around the top-left corner it
397
- # becomes:
398
- # 2----3
399
- # | |
400
- # 1----0
401
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
402
- # than 0, or greater than 1 due to trignometric calculations for location of
403
- # the box.
483
+ # Contains list of the corner points in clockwise order starting from top-left
484
+ # corner. For example, for a rectangular bounding box: When the text is
485
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
486
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
487
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
488
+ # or greater than 1 due to trignometric calculations for location of the box.
404
489
  class GoogleCloudVideointelligenceV1NormalizedBoundingPoly
405
490
  include Google::Apis::Core::Hashable
406
491
 
@@ -419,9 +504,8 @@ module Google
419
504
  end
420
505
  end
421
506
 
422
- # A vertex represents a 2D point in the image.
423
- # NOTE: the normalized vertex coordinates are relative to the original image
424
- # and range from 0 to 1.
507
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
508
+ # coordinates are relative to the original image and range from 0 to 1.
425
509
  class GoogleCloudVideointelligenceV1NormalizedVertex
426
510
  include Google::Apis::Core::Hashable
427
511
 
@@ -460,10 +544,10 @@ module Google
460
544
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1Entity]
461
545
  attr_accessor :entity
462
546
 
463
- # Information corresponding to all frames where this object track appears.
464
- # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
465
- # messages in frames.
466
- # Streaming mode: it can only be one ObjectTrackingFrame message in frames.
547
+ # Information corresponding to all frames where this object track appears. Non-
548
+ # streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
549
+ # in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
550
+ # frames.
467
551
  # Corresponds to the JSON property `frames`
468
552
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1ObjectTrackingFrame>]
469
553
  attr_accessor :frames
@@ -473,12 +557,11 @@ module Google
473
557
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1VideoSegment]
474
558
  attr_accessor :segment
475
559
 
476
- # Streaming mode ONLY.
477
- # In streaming mode, we do not know the end time of a tracked object
478
- # before it is completed. Hence, there is no VideoSegment info returned.
479
- # Instead, we provide a unique identifiable integer track_id so that
480
- # the customers can correlate the results of the ongoing
481
- # ObjectTrackAnnotation of the same track_id over time.
560
+ # Streaming mode ONLY. In streaming mode, we do not know the end time of a
561
+ # tracked object before it is completed. Hence, there is no VideoSegment info
562
+ # returned. Instead, we provide a unique identifiable integer track_id so that
563
+ # the customers can correlate the results of the ongoing ObjectTrackAnnotation
564
+ # of the same track_id over time.
482
565
  # Corresponds to the JSON property `trackId`
483
566
  # @return [Fixnum]
484
567
  attr_accessor :track_id
@@ -508,9 +591,8 @@ module Google
508
591
  class GoogleCloudVideointelligenceV1ObjectTrackingFrame
509
592
  include Google::Apis::Core::Hashable
510
593
 
511
- # Normalized bounding box.
512
- # The normalized vertex coordinates are relative to the original image.
513
- # Range: [0, 1].
594
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
595
+ # original image. Range: [0, 1].
514
596
  # Corresponds to the JSON property `normalizedBoundingBox`
515
597
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1NormalizedBoundingBox]
516
598
  attr_accessor :normalized_bounding_box
@@ -531,16 +613,41 @@ module Google
531
613
  end
532
614
  end
533
615
 
616
+ # Person detection annotation per video.
617
+ class GoogleCloudVideointelligenceV1PersonDetectionAnnotation
618
+ include Google::Apis::Core::Hashable
619
+
620
+ # The detected tracks of a person.
621
+ # Corresponds to the JSON property `tracks`
622
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1Track>]
623
+ attr_accessor :tracks
624
+
625
+ # Feature version.
626
+ # Corresponds to the JSON property `version`
627
+ # @return [String]
628
+ attr_accessor :version
629
+
630
+ def initialize(**args)
631
+ update!(**args)
632
+ end
633
+
634
+ # Update properties of this object
635
+ def update!(**args)
636
+ @tracks = args[:tracks] if args.key?(:tracks)
637
+ @version = args[:version] if args.key?(:version)
638
+ end
639
+ end
640
+
534
641
  # Alternative hypotheses (a.k.a. n-best list).
535
642
  class GoogleCloudVideointelligenceV1SpeechRecognitionAlternative
536
643
  include Google::Apis::Core::Hashable
537
644
 
538
645
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
539
646
  # indicates an estimated greater likelihood that the recognized words are
540
- # correct. This field is set only for the top alternative.
541
- # This field is not guaranteed to be accurate and users should not rely on it
542
- # to be always provided.
543
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
647
+ # correct. This field is set only for the top alternative. This field is not
648
+ # guaranteed to be accurate and users should not rely on it to be always
649
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
650
+ # not set.
544
651
  # Corresponds to the JSON property `confidence`
545
652
  # @return [Float]
546
653
  attr_accessor :confidence
@@ -551,8 +658,8 @@ module Google
551
658
  attr_accessor :transcript
552
659
 
553
660
  # Output only. A list of word-specific information for each recognized word.
554
- # Note: When `enable_speaker_diarization` is set to true, you will see all
555
- # the words from the beginning of the audio.
661
+ # Note: When `enable_speaker_diarization` is set to true, you will see all the
662
+ # words from the beginning of the audio.
556
663
  # Corresponds to the JSON property `words`
557
664
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1WordInfo>]
558
665
  attr_accessor :words
@@ -573,18 +680,17 @@ module Google
573
680
  class GoogleCloudVideointelligenceV1SpeechTranscription
574
681
  include Google::Apis::Core::Hashable
575
682
 
576
- # May contain one or more recognition hypotheses (up to the maximum specified
577
- # in `max_alternatives`). These alternatives are ordered in terms of
578
- # accuracy, with the top (first) alternative being the most probable, as
579
- # ranked by the recognizer.
683
+ # May contain one or more recognition hypotheses (up to the maximum specified in
684
+ # `max_alternatives`). These alternatives are ordered in terms of accuracy, with
685
+ # the top (first) alternative being the most probable, as ranked by the
686
+ # recognizer.
580
687
  # Corresponds to the JSON property `alternatives`
581
688
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1SpeechRecognitionAlternative>]
582
689
  attr_accessor :alternatives
583
690
 
584
691
  # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
585
- # language tag of
586
- # the language in this result. This language code was detected to have the
587
- # most likelihood of being spoken in the audio.
692
+ # language tag of the language in this result. This language code was detected
693
+ # to have the most likelihood of being spoken in the audio.
588
694
  # Corresponds to the JSON property `languageCode`
589
695
  # @return [String]
590
696
  attr_accessor :language_code
@@ -633,27 +739,19 @@ module Google
633
739
  end
634
740
  end
635
741
 
636
- # Video frame level annotation results for text annotation (OCR).
637
- # Contains information regarding timestamp and bounding box locations for the
638
- # frames containing detected OCR text snippets.
742
+ # Video frame level annotation results for text annotation (OCR). Contains
743
+ # information regarding timestamp and bounding box locations for the frames
744
+ # containing detected OCR text snippets.
639
745
  class GoogleCloudVideointelligenceV1TextFrame
640
746
  include Google::Apis::Core::Hashable
641
747
 
642
748
  # Normalized bounding polygon for text (that might not be aligned with axis).
643
- # Contains list of the corner points in clockwise order starting from
644
- # top-left corner. For example, for a rectangular bounding box:
645
- # When the text is horizontal it might look like:
646
- # 0----1
647
- # | |
648
- # 3----2
649
- # When it's clockwise rotated 180 degrees around the top-left corner it
650
- # becomes:
651
- # 2----3
652
- # | |
653
- # 1----0
654
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
655
- # than 0, or greater than 1 due to trignometric calculations for location of
656
- # the box.
749
+ # Contains list of the corner points in clockwise order starting from top-left
750
+ # corner. For example, for a rectangular bounding box: When the text is
751
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
752
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
753
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
754
+ # or greater than 1 due to trignometric calculations for location of the box.
657
755
  # Corresponds to the JSON property `rotatedBoundingBox`
658
756
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1NormalizedBoundingPoly]
659
757
  attr_accessor :rotated_bounding_box
@@ -706,9 +804,8 @@ module Google
706
804
  end
707
805
  end
708
806
 
709
- # For tracking related features.
710
- # An object at time_offset with attributes, and located with
711
- # normalized_bounding_box.
807
+ # For tracking related features. An object at time_offset with attributes, and
808
+ # located with normalized_bounding_box.
712
809
  class GoogleCloudVideointelligenceV1TimestampedObject
713
810
  include Google::Apis::Core::Hashable
714
811
 
@@ -722,15 +819,14 @@ module Google
722
819
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1DetectedLandmark>]
723
820
  attr_accessor :landmarks
724
821
 
725
- # Normalized bounding box.
726
- # The normalized vertex coordinates are relative to the original image.
727
- # Range: [0, 1].
822
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
823
+ # original image. Range: [0, 1].
728
824
  # Corresponds to the JSON property `normalizedBoundingBox`
729
825
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1NormalizedBoundingBox]
730
826
  attr_accessor :normalized_bounding_box
731
827
 
732
- # Time-offset, relative to the beginning of the video,
733
- # corresponding to the video frame for this object.
828
+ # Time-offset, relative to the beginning of the video, corresponding to the
829
+ # video frame for this object.
734
830
  # Corresponds to the JSON property `timeOffset`
735
831
  # @return [String]
736
832
  attr_accessor :time_offset
@@ -789,20 +885,19 @@ module Google
789
885
  class GoogleCloudVideointelligenceV1VideoAnnotationProgress
790
886
  include Google::Apis::Core::Hashable
791
887
 
792
- # Specifies which feature is being tracked if the request contains more than
793
- # one feature.
888
+ # Specifies which feature is being tracked if the request contains more than one
889
+ # feature.
794
890
  # Corresponds to the JSON property `feature`
795
891
  # @return [String]
796
892
  attr_accessor :feature
797
893
 
798
- # Video file location in
799
- # [Cloud Storage](https://cloud.google.com/storage/).
894
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
800
895
  # Corresponds to the JSON property `inputUri`
801
896
  # @return [String]
802
897
  attr_accessor :input_uri
803
898
 
804
- # Approximate percentage processed thus far. Guaranteed to be
805
- # 100 when fully processed.
899
+ # Approximate percentage processed thus far. Guaranteed to be 100 when fully
900
+ # processed.
806
901
  # Corresponds to the JSON property `progressPercent`
807
902
  # @return [Fixnum]
808
903
  attr_accessor :progress_percent
@@ -841,31 +936,40 @@ module Google
841
936
  class GoogleCloudVideointelligenceV1VideoAnnotationResults
842
937
  include Google::Apis::Core::Hashable
843
938
 
844
- # The `Status` type defines a logical error model that is suitable for
845
- # different programming environments, including REST APIs and RPC APIs. It is
846
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
847
- # three pieces of data: error code, error message, and error details.
848
- # You can find out more about this error model and how to work with it in the
849
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
939
+ # The `Status` type defines a logical error model that is suitable for different
940
+ # programming environments, including REST APIs and RPC APIs. It is used by [
941
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
942
+ # data: error code, error message, and error details. You can find out more
943
+ # about this error model and how to work with it in the [API Design Guide](https:
944
+ # //cloud.google.com/apis/design/errors).
850
945
  # Corresponds to the JSON property `error`
851
946
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleRpcStatus]
852
947
  attr_accessor :error
853
948
 
854
- # Explicit content annotation (based on per-frame visual signals only).
855
- # If no explicit content has been detected in a frame, no annotations are
856
- # present for that frame.
949
+ # Explicit content annotation (based on per-frame visual signals only). If no
950
+ # explicit content has been detected in a frame, no annotations are present for
951
+ # that frame.
857
952
  # Corresponds to the JSON property `explicitAnnotation`
858
953
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1ExplicitContentAnnotation]
859
954
  attr_accessor :explicit_annotation
860
955
 
861
- # Label annotations on frame level.
862
- # There is exactly one element for each unique label.
956
+ # Deprecated. Please use `face_detection_annotations` instead.
957
+ # Corresponds to the JSON property `faceAnnotations`
958
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1FaceAnnotation>]
959
+ attr_accessor :face_annotations
960
+
961
+ # Face detection annotations.
962
+ # Corresponds to the JSON property `faceDetectionAnnotations`
963
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1FaceDetectionAnnotation>]
964
+ attr_accessor :face_detection_annotations
965
+
966
+ # Label annotations on frame level. There is exactly one element for each unique
967
+ # label.
863
968
  # Corresponds to the JSON property `frameLabelAnnotations`
864
969
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1LabelAnnotation>]
865
970
  attr_accessor :frame_label_annotations
866
971
 
867
- # Video file location in
868
- # [Cloud Storage](https://cloud.google.com/storage/).
972
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
869
973
  # Corresponds to the JSON property `inputUri`
870
974
  # @return [String]
871
975
  attr_accessor :input_uri
@@ -880,6 +984,11 @@ module Google
880
984
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1ObjectTrackingAnnotation>]
881
985
  attr_accessor :object_annotations
882
986
 
987
+ # Person detection annotations.
988
+ # Corresponds to the JSON property `personDetectionAnnotations`
989
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1PersonDetectionAnnotation>]
990
+ attr_accessor :person_detection_annotations
991
+
883
992
  # Video segment.
884
993
  # Corresponds to the JSON property `segment`
885
994
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1VideoSegment]
@@ -892,11 +1001,11 @@ module Google
892
1001
  attr_accessor :segment_label_annotations
893
1002
 
894
1003
  # Presence label annotations on video level or user-specified segment level.
895
- # There is exactly one element for each unique label. Compared to the
896
- # existing topical `segment_label_annotations`, this field presents more
897
- # fine-grained, segment-level labels detected in video content and is made
898
- # available only when the client sets `LabelDetectionConfig.model` to
899
- # "builtin/latest" in the request.
1004
+ # There is exactly one element for each unique label. Compared to the existing
1005
+ # topical `segment_label_annotations`, this field presents more fine-grained,
1006
+ # segment-level labels detected in video content and is made available only when
1007
+ # the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
1008
+ # request.
900
1009
  # Corresponds to the JSON property `segmentPresenceLabelAnnotations`
901
1010
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1LabelAnnotation>]
902
1011
  attr_accessor :segment_presence_label_annotations
@@ -906,17 +1015,17 @@ module Google
906
1015
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1VideoSegment>]
907
1016
  attr_accessor :shot_annotations
908
1017
 
909
- # Topical label annotations on shot level.
910
- # There is exactly one element for each unique label.
1018
+ # Topical label annotations on shot level. There is exactly one element for each
1019
+ # unique label.
911
1020
  # Corresponds to the JSON property `shotLabelAnnotations`
912
1021
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1LabelAnnotation>]
913
1022
  attr_accessor :shot_label_annotations
914
1023
 
915
1024
  # Presence label annotations on shot level. There is exactly one element for
916
- # each unique label. Compared to the existing topical
917
- # `shot_label_annotations`, this field presents more fine-grained, shot-level
918
- # labels detected in video content and is made available only when the client
919
- # sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
1025
+ # each unique label. Compared to the existing topical `shot_label_annotations`,
1026
+ # this field presents more fine-grained, shot-level labels detected in video
1027
+ # content and is made available only when the client sets `LabelDetectionConfig.
1028
+ # model` to "builtin/latest" in the request.
920
1029
  # Corresponds to the JSON property `shotPresenceLabelAnnotations`
921
1030
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1LabelAnnotation>]
922
1031
  attr_accessor :shot_presence_label_annotations
@@ -926,9 +1035,8 @@ module Google
926
1035
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1SpeechTranscription>]
927
1036
  attr_accessor :speech_transcriptions
928
1037
 
929
- # OCR text detection and tracking.
930
- # Annotations for list of detected text snippets. Each will have list of
931
- # frame information associated with it.
1038
+ # OCR text detection and tracking. Annotations for list of detected text
1039
+ # snippets. Each will have list of frame information associated with it.
932
1040
  # Corresponds to the JSON property `textAnnotations`
933
1041
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1TextAnnotation>]
934
1042
  attr_accessor :text_annotations
@@ -941,10 +1049,13 @@ module Google
941
1049
  def update!(**args)
942
1050
  @error = args[:error] if args.key?(:error)
943
1051
  @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
1052
+ @face_annotations = args[:face_annotations] if args.key?(:face_annotations)
1053
+ @face_detection_annotations = args[:face_detection_annotations] if args.key?(:face_detection_annotations)
944
1054
  @frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
945
1055
  @input_uri = args[:input_uri] if args.key?(:input_uri)
946
1056
  @logo_recognition_annotations = args[:logo_recognition_annotations] if args.key?(:logo_recognition_annotations)
947
1057
  @object_annotations = args[:object_annotations] if args.key?(:object_annotations)
1058
+ @person_detection_annotations = args[:person_detection_annotations] if args.key?(:person_detection_annotations)
948
1059
  @segment = args[:segment] if args.key?(:segment)
949
1060
  @segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations)
950
1061
  @segment_presence_label_annotations = args[:segment_presence_label_annotations] if args.key?(:segment_presence_label_annotations)
@@ -960,14 +1071,14 @@ module Google
960
1071
  class GoogleCloudVideointelligenceV1VideoSegment
961
1072
  include Google::Apis::Core::Hashable
962
1073
 
963
- # Time-offset, relative to the beginning of the video,
964
- # corresponding to the end of the segment (inclusive).
1074
+ # Time-offset, relative to the beginning of the video, corresponding to the end
1075
+ # of the segment (inclusive).
965
1076
  # Corresponds to the JSON property `endTimeOffset`
966
1077
  # @return [String]
967
1078
  attr_accessor :end_time_offset
968
1079
 
969
- # Time-offset, relative to the beginning of the video,
970
- # corresponding to the start of the segment (inclusive).
1080
+ # Time-offset, relative to the beginning of the video, corresponding to the
1081
+ # start of the segment (inclusive).
971
1082
  # Corresponds to the JSON property `startTimeOffset`
972
1083
  # @return [String]
973
1084
  attr_accessor :start_time_offset
@@ -984,41 +1095,41 @@ module Google
984
1095
  end
985
1096
 
986
1097
  # Word-specific information for recognized words. Word information is only
987
- # included in the response when certain request parameters are set, such
988
- # as `enable_word_time_offsets`.
1098
+ # included in the response when certain request parameters are set, such as `
1099
+ # enable_word_time_offsets`.
989
1100
  class GoogleCloudVideointelligenceV1WordInfo
990
1101
  include Google::Apis::Core::Hashable
991
1102
 
992
1103
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
993
1104
  # indicates an estimated greater likelihood that the recognized words are
994
- # correct. This field is set only for the top alternative.
995
- # This field is not guaranteed to be accurate and users should not rely on it
996
- # to be always provided.
997
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
1105
+ # correct. This field is set only for the top alternative. This field is not
1106
+ # guaranteed to be accurate and users should not rely on it to be always
1107
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
1108
+ # not set.
998
1109
  # Corresponds to the JSON property `confidence`
999
1110
  # @return [Float]
1000
1111
  attr_accessor :confidence
1001
1112
 
1002
- # Time offset relative to the beginning of the audio, and
1003
- # corresponding to the end of the spoken word. This field is only set if
1004
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
1005
- # experimental feature and the accuracy of the time offset can vary.
1113
+ # Time offset relative to the beginning of the audio, and corresponding to the
1114
+ # end of the spoken word. This field is only set if `enable_word_time_offsets=
1115
+ # true` and only in the top hypothesis. This is an experimental feature and the
1116
+ # accuracy of the time offset can vary.
1006
1117
  # Corresponds to the JSON property `endTime`
1007
1118
  # @return [String]
1008
1119
  attr_accessor :end_time
1009
1120
 
1010
- # Output only. A distinct integer value is assigned for every speaker within
1011
- # the audio. This field specifies which one of those speakers was detected to
1012
- # have spoken this word. Value ranges from 1 up to diarization_speaker_count,
1013
- # and is only set if speaker diarization is enabled.
1121
+ # Output only. A distinct integer value is assigned for every speaker within the
1122
+ # audio. This field specifies which one of those speakers was detected to have
1123
+ # spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
1124
+ # only set if speaker diarization is enabled.
1014
1125
  # Corresponds to the JSON property `speakerTag`
1015
1126
  # @return [Fixnum]
1016
1127
  attr_accessor :speaker_tag
1017
1128
 
1018
- # Time offset relative to the beginning of the audio, and
1019
- # corresponding to the start of the spoken word. This field is only set if
1020
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
1021
- # experimental feature and the accuracy of the time offset can vary.
1129
+ # Time offset relative to the beginning of the audio, and corresponding to the
1130
+ # start of the spoken word. This field is only set if `enable_word_time_offsets=
1131
+ # true` and only in the top hypothesis. This is an experimental feature and the
1132
+ # accuracy of the time offset can vary.
1022
1133
  # Corresponds to the JSON property `startTime`
1023
1134
  # @return [String]
1024
1135
  attr_accessor :start_time
@@ -1042,9 +1153,9 @@ module Google
1042
1153
  end
1043
1154
  end
1044
1155
 
1045
- # Video annotation progress. Included in the `metadata`
1046
- # field of the `Operation` returned by the `GetOperation`
1047
- # call of the `google::longrunning::Operations` service.
1156
+ # Video annotation progress. Included in the `metadata` field of the `Operation`
1157
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
1158
+ # service.
1048
1159
  class GoogleCloudVideointelligenceV1beta2AnnotateVideoProgress
1049
1160
  include Google::Apis::Core::Hashable
1050
1161
 
@@ -1063,9 +1174,9 @@ module Google
1063
1174
  end
1064
1175
  end
1065
1176
 
1066
- # Video annotation response. Included in the `response`
1067
- # field of the `Operation` returned by the `GetOperation`
1068
- # call of the `google::longrunning::Operations` service.
1177
+ # Video annotation response. Included in the `response` field of the `Operation`
1178
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
1179
+ # service.
1069
1180
  class GoogleCloudVideointelligenceV1beta2AnnotateVideoResponse
1070
1181
  include Google::Apis::Core::Hashable
1071
1182
 
@@ -1093,14 +1204,14 @@ module Google
1093
1204
  # @return [Float]
1094
1205
  attr_accessor :confidence
1095
1206
 
1096
- # The name of the attribute, for example, glasses, dark_glasses, mouth_open.
1097
- # A full list of supported type names will be provided in the document.
1207
+ # The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
1208
+ # full list of supported type names will be provided in the document.
1098
1209
  # Corresponds to the JSON property `name`
1099
1210
  # @return [String]
1100
1211
  attr_accessor :name
1101
1212
 
1102
- # Text value of the detection result. For example, the value for "HairColor"
1103
- # can be "black", "blonde", etc.
1213
+ # Text value of the detection result. For example, the value for "HairColor" can
1214
+ # be "black", "blonde", etc.
1104
1215
  # Corresponds to the JSON property `value`
1105
1216
  # @return [String]
1106
1217
  attr_accessor :value
@@ -1132,9 +1243,8 @@ module Google
1132
1243
  # @return [String]
1133
1244
  attr_accessor :name
1134
1245
 
1135
- # A vertex represents a 2D point in the image.
1136
- # NOTE: the normalized vertex coordinates are relative to the original image
1137
- # and range from 0 to 1.
1246
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
1247
+ # coordinates are relative to the original image and range from 0 to 1.
1138
1248
  # Corresponds to the JSON property `point`
1139
1249
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2NormalizedVertex]
1140
1250
  attr_accessor :point
@@ -1160,8 +1270,7 @@ module Google
1160
1270
  # @return [String]
1161
1271
  attr_accessor :description
1162
1272
 
1163
- # Opaque entity ID. Some IDs may be available in
1164
- # [Google Knowledge Graph Search
1273
+ # Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
1165
1274
  # API](https://developers.google.com/knowledge-graph/).
1166
1275
  # Corresponds to the JSON property `entityId`
1167
1276
  # @return [String]
@@ -1184,9 +1293,9 @@ module Google
1184
1293
  end
1185
1294
  end
1186
1295
 
1187
- # Explicit content annotation (based on per-frame visual signals only).
1188
- # If no explicit content has been detected in a frame, no annotations are
1189
- # present for that frame.
1296
+ # Explicit content annotation (based on per-frame visual signals only). If no
1297
+ # explicit content has been detected in a frame, no annotations are present for
1298
+ # that frame.
1190
1299
  class GoogleCloudVideointelligenceV1beta2ExplicitContentAnnotation
1191
1300
  include Google::Apis::Core::Hashable
1192
1301
 
@@ -1237,14 +1346,110 @@ module Google
1237
1346
  end
1238
1347
  end
1239
1348
 
1349
+ # Deprecated. No effect.
1350
+ class GoogleCloudVideointelligenceV1beta2FaceAnnotation
1351
+ include Google::Apis::Core::Hashable
1352
+
1353
+ # All video frames where a face was detected.
1354
+ # Corresponds to the JSON property `frames`
1355
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2FaceFrame>]
1356
+ attr_accessor :frames
1357
+
1358
+ # All video segments where a face was detected.
1359
+ # Corresponds to the JSON property `segments`
1360
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2FaceSegment>]
1361
+ attr_accessor :segments
1362
+
1363
+ # Thumbnail of a representative face view (in JPEG format).
1364
+ # Corresponds to the JSON property `thumbnail`
1365
+ # NOTE: Values are automatically base64 encoded/decoded in the client library.
1366
+ # @return [String]
1367
+ attr_accessor :thumbnail
1368
+
1369
+ def initialize(**args)
1370
+ update!(**args)
1371
+ end
1372
+
1373
+ # Update properties of this object
1374
+ def update!(**args)
1375
+ @frames = args[:frames] if args.key?(:frames)
1376
+ @segments = args[:segments] if args.key?(:segments)
1377
+ @thumbnail = args[:thumbnail] if args.key?(:thumbnail)
1378
+ end
1379
+ end
1380
+
1381
+ # Face detection annotation.
1382
+ class GoogleCloudVideointelligenceV1beta2FaceDetectionAnnotation
1383
+ include Google::Apis::Core::Hashable
1384
+
1385
+ # Feature version.
1386
+ # Corresponds to the JSON property `version`
1387
+ # @return [String]
1388
+ attr_accessor :version
1389
+
1390
+ def initialize(**args)
1391
+ update!(**args)
1392
+ end
1393
+
1394
+ # Update properties of this object
1395
+ def update!(**args)
1396
+ @version = args[:version] if args.key?(:version)
1397
+ end
1398
+ end
1399
+
1400
+ # Deprecated. No effect.
1401
+ class GoogleCloudVideointelligenceV1beta2FaceFrame
1402
+ include Google::Apis::Core::Hashable
1403
+
1404
+ # Normalized Bounding boxes in a frame. There can be more than one boxes if the
1405
+ # same face is detected in multiple locations within the current frame.
1406
+ # Corresponds to the JSON property `normalizedBoundingBoxes`
1407
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox>]
1408
+ attr_accessor :normalized_bounding_boxes
1409
+
1410
+ # Time-offset, relative to the beginning of the video, corresponding to the
1411
+ # video frame for this location.
1412
+ # Corresponds to the JSON property `timeOffset`
1413
+ # @return [String]
1414
+ attr_accessor :time_offset
1415
+
1416
+ def initialize(**args)
1417
+ update!(**args)
1418
+ end
1419
+
1420
+ # Update properties of this object
1421
+ def update!(**args)
1422
+ @normalized_bounding_boxes = args[:normalized_bounding_boxes] if args.key?(:normalized_bounding_boxes)
1423
+ @time_offset = args[:time_offset] if args.key?(:time_offset)
1424
+ end
1425
+ end
1426
+
1427
+ # Video segment level annotation results for face detection.
1428
+ class GoogleCloudVideointelligenceV1beta2FaceSegment
1429
+ include Google::Apis::Core::Hashable
1430
+
1431
+ # Video segment.
1432
+ # Corresponds to the JSON property `segment`
1433
+ # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2VideoSegment]
1434
+ attr_accessor :segment
1435
+
1436
+ def initialize(**args)
1437
+ update!(**args)
1438
+ end
1439
+
1440
+ # Update properties of this object
1441
+ def update!(**args)
1442
+ @segment = args[:segment] if args.key?(:segment)
1443
+ end
1444
+ end
1445
+
1240
1446
  # Label annotation.
1241
1447
  class GoogleCloudVideointelligenceV1beta2LabelAnnotation
1242
1448
  include Google::Apis::Core::Hashable
1243
1449
 
1244
- # Common categories for the detected entity.
1245
- # For example, when the label is `Terrier`, the category is likely `dog`. And
1246
- # in some cases there might be more than one categories e.g., `Terrier` could
1247
- # also be a `pet`.
1450
+ # Common categories for the detected entity. For example, when the label is `
1451
+ # Terrier`, the category is likely `dog`. And in some cases there might be more
1452
+ # than one categories e.g., `Terrier` could also be a `pet`.
1248
1453
  # Corresponds to the JSON property `categoryEntities`
1249
1454
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2Entity>]
1250
1455
  attr_accessor :category_entities
@@ -1343,14 +1548,14 @@ module Google
1343
1548
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2Entity]
1344
1549
  attr_accessor :entity
1345
1550
 
1346
- # All video segments where the recognized logo appears. There might be
1347
- # multiple instances of the same logo class appearing in one VideoSegment.
1551
+ # All video segments where the recognized logo appears. There might be multiple
1552
+ # instances of the same logo class appearing in one VideoSegment.
1348
1553
  # Corresponds to the JSON property `segments`
1349
1554
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2VideoSegment>]
1350
1555
  attr_accessor :segments
1351
1556
 
1352
- # All logo tracks where the recognized logo appears. Each track corresponds
1353
- # to one logo instance appearing in consecutive frames.
1557
+ # All logo tracks where the recognized logo appears. Each track corresponds to
1558
+ # one logo instance appearing in consecutive frames.
1354
1559
  # Corresponds to the JSON property `tracks`
1355
1560
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2Track>]
1356
1561
  attr_accessor :tracks
@@ -1367,9 +1572,8 @@ module Google
1367
1572
  end
1368
1573
  end
1369
1574
 
1370
- # Normalized bounding box.
1371
- # The normalized vertex coordinates are relative to the original image.
1372
- # Range: [0, 1].
1575
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
1576
+ # original image. Range: [0, 1].
1373
1577
  class GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox
1374
1578
  include Google::Apis::Core::Hashable
1375
1579
 
@@ -1407,20 +1611,12 @@ module Google
1407
1611
  end
1408
1612
 
1409
1613
  # Normalized bounding polygon for text (that might not be aligned with axis).
1410
- # Contains list of the corner points in clockwise order starting from
1411
- # top-left corner. For example, for a rectangular bounding box:
1412
- # When the text is horizontal it might look like:
1413
- # 0----1
1414
- # | |
1415
- # 3----2
1416
- # When it's clockwise rotated 180 degrees around the top-left corner it
1417
- # becomes:
1418
- # 2----3
1419
- # | |
1420
- # 1----0
1421
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
1422
- # than 0, or greater than 1 due to trignometric calculations for location of
1423
- # the box.
1614
+ # Contains list of the corner points in clockwise order starting from top-left
1615
+ # corner. For example, for a rectangular bounding box: When the text is
1616
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
1617
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
1618
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
1619
+ # or greater than 1 due to trignometric calculations for location of the box.
1424
1620
  class GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly
1425
1621
  include Google::Apis::Core::Hashable
1426
1622
 
@@ -1439,9 +1635,8 @@ module Google
1439
1635
  end
1440
1636
  end
1441
1637
 
1442
- # A vertex represents a 2D point in the image.
1443
- # NOTE: the normalized vertex coordinates are relative to the original image
1444
- # and range from 0 to 1.
1638
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
1639
+ # coordinates are relative to the original image and range from 0 to 1.
1445
1640
  class GoogleCloudVideointelligenceV1beta2NormalizedVertex
1446
1641
  include Google::Apis::Core::Hashable
1447
1642
 
@@ -1480,10 +1675,10 @@ module Google
1480
1675
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2Entity]
1481
1676
  attr_accessor :entity
1482
1677
 
1483
- # Information corresponding to all frames where this object track appears.
1484
- # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
1485
- # messages in frames.
1486
- # Streaming mode: it can only be one ObjectTrackingFrame message in frames.
1678
+ # Information corresponding to all frames where this object track appears. Non-
1679
+ # streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
1680
+ # in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
1681
+ # frames.
1487
1682
  # Corresponds to the JSON property `frames`
1488
1683
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame>]
1489
1684
  attr_accessor :frames
@@ -1493,12 +1688,11 @@ module Google
1493
1688
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2VideoSegment]
1494
1689
  attr_accessor :segment
1495
1690
 
1496
- # Streaming mode ONLY.
1497
- # In streaming mode, we do not know the end time of a tracked object
1498
- # before it is completed. Hence, there is no VideoSegment info returned.
1499
- # Instead, we provide a unique identifiable integer track_id so that
1500
- # the customers can correlate the results of the ongoing
1501
- # ObjectTrackAnnotation of the same track_id over time.
1691
+ # Streaming mode ONLY. In streaming mode, we do not know the end time of a
1692
+ # tracked object before it is completed. Hence, there is no VideoSegment info
1693
+ # returned. Instead, we provide a unique identifiable integer track_id so that
1694
+ # the customers can correlate the results of the ongoing ObjectTrackAnnotation
1695
+ # of the same track_id over time.
1502
1696
  # Corresponds to the JSON property `trackId`
1503
1697
  # @return [Fixnum]
1504
1698
  attr_accessor :track_id
@@ -1528,9 +1722,8 @@ module Google
1528
1722
  class GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame
1529
1723
  include Google::Apis::Core::Hashable
1530
1724
 
1531
- # Normalized bounding box.
1532
- # The normalized vertex coordinates are relative to the original image.
1533
- # Range: [0, 1].
1725
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
1726
+ # original image. Range: [0, 1].
1534
1727
  # Corresponds to the JSON property `normalizedBoundingBox`
1535
1728
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox]
1536
1729
  attr_accessor :normalized_bounding_box
@@ -1551,16 +1744,41 @@ module Google
1551
1744
  end
1552
1745
  end
1553
1746
 
1747
+ # Person detection annotation per video.
1748
+ class GoogleCloudVideointelligenceV1beta2PersonDetectionAnnotation
1749
+ include Google::Apis::Core::Hashable
1750
+
1751
+ # The detected tracks of a person.
1752
+ # Corresponds to the JSON property `tracks`
1753
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2Track>]
1754
+ attr_accessor :tracks
1755
+
1756
+ # Feature version.
1757
+ # Corresponds to the JSON property `version`
1758
+ # @return [String]
1759
+ attr_accessor :version
1760
+
1761
+ def initialize(**args)
1762
+ update!(**args)
1763
+ end
1764
+
1765
+ # Update properties of this object
1766
+ def update!(**args)
1767
+ @tracks = args[:tracks] if args.key?(:tracks)
1768
+ @version = args[:version] if args.key?(:version)
1769
+ end
1770
+ end
1771
+
1554
1772
  # Alternative hypotheses (a.k.a. n-best list).
1555
1773
  class GoogleCloudVideointelligenceV1beta2SpeechRecognitionAlternative
1556
1774
  include Google::Apis::Core::Hashable
1557
1775
 
1558
1776
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
1559
1777
  # indicates an estimated greater likelihood that the recognized words are
1560
- # correct. This field is set only for the top alternative.
1561
- # This field is not guaranteed to be accurate and users should not rely on it
1562
- # to be always provided.
1563
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
1778
+ # correct. This field is set only for the top alternative. This field is not
1779
+ # guaranteed to be accurate and users should not rely on it to be always
1780
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
1781
+ # not set.
1564
1782
  # Corresponds to the JSON property `confidence`
1565
1783
  # @return [Float]
1566
1784
  attr_accessor :confidence
@@ -1571,8 +1789,8 @@ module Google
1571
1789
  attr_accessor :transcript
1572
1790
 
1573
1791
  # Output only. A list of word-specific information for each recognized word.
1574
- # Note: When `enable_speaker_diarization` is set to true, you will see all
1575
- # the words from the beginning of the audio.
1792
+ # Note: When `enable_speaker_diarization` is set to true, you will see all the
1793
+ # words from the beginning of the audio.
1576
1794
  # Corresponds to the JSON property `words`
1577
1795
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2WordInfo>]
1578
1796
  attr_accessor :words
@@ -1593,18 +1811,17 @@ module Google
1593
1811
  class GoogleCloudVideointelligenceV1beta2SpeechTranscription
1594
1812
  include Google::Apis::Core::Hashable
1595
1813
 
1596
- # May contain one or more recognition hypotheses (up to the maximum specified
1597
- # in `max_alternatives`). These alternatives are ordered in terms of
1598
- # accuracy, with the top (first) alternative being the most probable, as
1599
- # ranked by the recognizer.
1814
+ # May contain one or more recognition hypotheses (up to the maximum specified in
1815
+ # `max_alternatives`). These alternatives are ordered in terms of accuracy, with
1816
+ # the top (first) alternative being the most probable, as ranked by the
1817
+ # recognizer.
1600
1818
  # Corresponds to the JSON property `alternatives`
1601
1819
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2SpeechRecognitionAlternative>]
1602
1820
  attr_accessor :alternatives
1603
1821
 
1604
1822
  # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
1605
- # language tag of
1606
- # the language in this result. This language code was detected to have the
1607
- # most likelihood of being spoken in the audio.
1823
+ # language tag of the language in this result. This language code was detected
1824
+ # to have the most likelihood of being spoken in the audio.
1608
1825
  # Corresponds to the JSON property `languageCode`
1609
1826
  # @return [String]
1610
1827
  attr_accessor :language_code
@@ -1653,27 +1870,19 @@ module Google
1653
1870
  end
1654
1871
  end
1655
1872
 
1656
- # Video frame level annotation results for text annotation (OCR).
1657
- # Contains information regarding timestamp and bounding box locations for the
1658
- # frames containing detected OCR text snippets.
1873
+ # Video frame level annotation results for text annotation (OCR). Contains
1874
+ # information regarding timestamp and bounding box locations for the frames
1875
+ # containing detected OCR text snippets.
1659
1876
  class GoogleCloudVideointelligenceV1beta2TextFrame
1660
1877
  include Google::Apis::Core::Hashable
1661
1878
 
1662
1879
  # Normalized bounding polygon for text (that might not be aligned with axis).
1663
- # Contains list of the corner points in clockwise order starting from
1664
- # top-left corner. For example, for a rectangular bounding box:
1665
- # When the text is horizontal it might look like:
1666
- # 0----1
1667
- # | |
1668
- # 3----2
1669
- # When it's clockwise rotated 180 degrees around the top-left corner it
1670
- # becomes:
1671
- # 2----3
1672
- # | |
1673
- # 1----0
1674
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
1675
- # than 0, or greater than 1 due to trignometric calculations for location of
1676
- # the box.
1880
+ # Contains list of the corner points in clockwise order starting from top-left
1881
+ # corner. For example, for a rectangular bounding box: When the text is
1882
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
1883
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
1884
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
1885
+ # or greater than 1 due to trignometric calculations for location of the box.
1677
1886
  # Corresponds to the JSON property `rotatedBoundingBox`
1678
1887
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly]
1679
1888
  attr_accessor :rotated_bounding_box
@@ -1726,9 +1935,8 @@ module Google
1726
1935
  end
1727
1936
  end
1728
1937
 
1729
- # For tracking related features.
1730
- # An object at time_offset with attributes, and located with
1731
- # normalized_bounding_box.
1938
+ # For tracking related features. An object at time_offset with attributes, and
1939
+ # located with normalized_bounding_box.
1732
1940
  class GoogleCloudVideointelligenceV1beta2TimestampedObject
1733
1941
  include Google::Apis::Core::Hashable
1734
1942
 
@@ -1742,15 +1950,14 @@ module Google
1742
1950
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2DetectedLandmark>]
1743
1951
  attr_accessor :landmarks
1744
1952
 
1745
- # Normalized bounding box.
1746
- # The normalized vertex coordinates are relative to the original image.
1747
- # Range: [0, 1].
1953
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
1954
+ # original image. Range: [0, 1].
1748
1955
  # Corresponds to the JSON property `normalizedBoundingBox`
1749
1956
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox]
1750
1957
  attr_accessor :normalized_bounding_box
1751
1958
 
1752
- # Time-offset, relative to the beginning of the video,
1753
- # corresponding to the video frame for this object.
1959
+ # Time-offset, relative to the beginning of the video, corresponding to the
1960
+ # video frame for this object.
1754
1961
  # Corresponds to the JSON property `timeOffset`
1755
1962
  # @return [String]
1756
1963
  attr_accessor :time_offset
@@ -1809,20 +2016,19 @@ module Google
1809
2016
  class GoogleCloudVideointelligenceV1beta2VideoAnnotationProgress
1810
2017
  include Google::Apis::Core::Hashable
1811
2018
 
1812
- # Specifies which feature is being tracked if the request contains more than
1813
- # one feature.
2019
+ # Specifies which feature is being tracked if the request contains more than one
2020
+ # feature.
1814
2021
  # Corresponds to the JSON property `feature`
1815
2022
  # @return [String]
1816
2023
  attr_accessor :feature
1817
2024
 
1818
- # Video file location in
1819
- # [Cloud Storage](https://cloud.google.com/storage/).
2025
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
1820
2026
  # Corresponds to the JSON property `inputUri`
1821
2027
  # @return [String]
1822
2028
  attr_accessor :input_uri
1823
2029
 
1824
- # Approximate percentage processed thus far. Guaranteed to be
1825
- # 100 when fully processed.
2030
+ # Approximate percentage processed thus far. Guaranteed to be 100 when fully
2031
+ # processed.
1826
2032
  # Corresponds to the JSON property `progressPercent`
1827
2033
  # @return [Fixnum]
1828
2034
  attr_accessor :progress_percent
@@ -1861,31 +2067,40 @@ module Google
1861
2067
  class GoogleCloudVideointelligenceV1beta2VideoAnnotationResults
1862
2068
  include Google::Apis::Core::Hashable
1863
2069
 
1864
- # The `Status` type defines a logical error model that is suitable for
1865
- # different programming environments, including REST APIs and RPC APIs. It is
1866
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
1867
- # three pieces of data: error code, error message, and error details.
1868
- # You can find out more about this error model and how to work with it in the
1869
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
2070
+ # The `Status` type defines a logical error model that is suitable for different
2071
+ # programming environments, including REST APIs and RPC APIs. It is used by [
2072
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
2073
+ # data: error code, error message, and error details. You can find out more
2074
+ # about this error model and how to work with it in the [API Design Guide](https:
2075
+ # //cloud.google.com/apis/design/errors).
1870
2076
  # Corresponds to the JSON property `error`
1871
2077
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleRpcStatus]
1872
2078
  attr_accessor :error
1873
2079
 
1874
- # Explicit content annotation (based on per-frame visual signals only).
1875
- # If no explicit content has been detected in a frame, no annotations are
1876
- # present for that frame.
2080
+ # Explicit content annotation (based on per-frame visual signals only). If no
2081
+ # explicit content has been detected in a frame, no annotations are present for
2082
+ # that frame.
1877
2083
  # Corresponds to the JSON property `explicitAnnotation`
1878
2084
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2ExplicitContentAnnotation]
1879
2085
  attr_accessor :explicit_annotation
1880
2086
 
1881
- # Label annotations on frame level.
1882
- # There is exactly one element for each unique label.
2087
+ # Deprecated. Please use `face_detection_annotations` instead.
2088
+ # Corresponds to the JSON property `faceAnnotations`
2089
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2FaceAnnotation>]
2090
+ attr_accessor :face_annotations
2091
+
2092
+ # Face detection annotations.
2093
+ # Corresponds to the JSON property `faceDetectionAnnotations`
2094
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2FaceDetectionAnnotation>]
2095
+ attr_accessor :face_detection_annotations
2096
+
2097
+ # Label annotations on frame level. There is exactly one element for each unique
2098
+ # label.
1883
2099
  # Corresponds to the JSON property `frameLabelAnnotations`
1884
2100
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
1885
2101
  attr_accessor :frame_label_annotations
1886
2102
 
1887
- # Video file location in
1888
- # [Cloud Storage](https://cloud.google.com/storage/).
2103
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
1889
2104
  # Corresponds to the JSON property `inputUri`
1890
2105
  # @return [String]
1891
2106
  attr_accessor :input_uri
@@ -1900,6 +2115,11 @@ module Google
1900
2115
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2ObjectTrackingAnnotation>]
1901
2116
  attr_accessor :object_annotations
1902
2117
 
2118
+ # Person detection annotations.
2119
+ # Corresponds to the JSON property `personDetectionAnnotations`
2120
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2PersonDetectionAnnotation>]
2121
+ attr_accessor :person_detection_annotations
2122
+
1903
2123
  # Video segment.
1904
2124
  # Corresponds to the JSON property `segment`
1905
2125
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2VideoSegment]
@@ -1912,11 +2132,11 @@ module Google
1912
2132
  attr_accessor :segment_label_annotations
1913
2133
 
1914
2134
  # Presence label annotations on video level or user-specified segment level.
1915
- # There is exactly one element for each unique label. Compared to the
1916
- # existing topical `segment_label_annotations`, this field presents more
1917
- # fine-grained, segment-level labels detected in video content and is made
1918
- # available only when the client sets `LabelDetectionConfig.model` to
1919
- # "builtin/latest" in the request.
2135
+ # There is exactly one element for each unique label. Compared to the existing
2136
+ # topical `segment_label_annotations`, this field presents more fine-grained,
2137
+ # segment-level labels detected in video content and is made available only when
2138
+ # the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
2139
+ # request.
1920
2140
  # Corresponds to the JSON property `segmentPresenceLabelAnnotations`
1921
2141
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
1922
2142
  attr_accessor :segment_presence_label_annotations
@@ -1926,17 +2146,17 @@ module Google
1926
2146
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2VideoSegment>]
1927
2147
  attr_accessor :shot_annotations
1928
2148
 
1929
- # Topical label annotations on shot level.
1930
- # There is exactly one element for each unique label.
2149
+ # Topical label annotations on shot level. There is exactly one element for each
2150
+ # unique label.
1931
2151
  # Corresponds to the JSON property `shotLabelAnnotations`
1932
2152
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
1933
2153
  attr_accessor :shot_label_annotations
1934
2154
 
1935
2155
  # Presence label annotations on shot level. There is exactly one element for
1936
- # each unique label. Compared to the existing topical
1937
- # `shot_label_annotations`, this field presents more fine-grained, shot-level
1938
- # labels detected in video content and is made available only when the client
1939
- # sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
2156
+ # each unique label. Compared to the existing topical `shot_label_annotations`,
2157
+ # this field presents more fine-grained, shot-level labels detected in video
2158
+ # content and is made available only when the client sets `LabelDetectionConfig.
2159
+ # model` to "builtin/latest" in the request.
1940
2160
  # Corresponds to the JSON property `shotPresenceLabelAnnotations`
1941
2161
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
1942
2162
  attr_accessor :shot_presence_label_annotations
@@ -1946,9 +2166,8 @@ module Google
1946
2166
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2SpeechTranscription>]
1947
2167
  attr_accessor :speech_transcriptions
1948
2168
 
1949
- # OCR text detection and tracking.
1950
- # Annotations for list of detected text snippets. Each will have list of
1951
- # frame information associated with it.
2169
+ # OCR text detection and tracking. Annotations for list of detected text
2170
+ # snippets. Each will have list of frame information associated with it.
1952
2171
  # Corresponds to the JSON property `textAnnotations`
1953
2172
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2TextAnnotation>]
1954
2173
  attr_accessor :text_annotations
@@ -1961,10 +2180,13 @@ module Google
1961
2180
  def update!(**args)
1962
2181
  @error = args[:error] if args.key?(:error)
1963
2182
  @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
2183
+ @face_annotations = args[:face_annotations] if args.key?(:face_annotations)
2184
+ @face_detection_annotations = args[:face_detection_annotations] if args.key?(:face_detection_annotations)
1964
2185
  @frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
1965
2186
  @input_uri = args[:input_uri] if args.key?(:input_uri)
1966
2187
  @logo_recognition_annotations = args[:logo_recognition_annotations] if args.key?(:logo_recognition_annotations)
1967
2188
  @object_annotations = args[:object_annotations] if args.key?(:object_annotations)
2189
+ @person_detection_annotations = args[:person_detection_annotations] if args.key?(:person_detection_annotations)
1968
2190
  @segment = args[:segment] if args.key?(:segment)
1969
2191
  @segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations)
1970
2192
  @segment_presence_label_annotations = args[:segment_presence_label_annotations] if args.key?(:segment_presence_label_annotations)
@@ -1980,14 +2202,14 @@ module Google
1980
2202
  class GoogleCloudVideointelligenceV1beta2VideoSegment
1981
2203
  include Google::Apis::Core::Hashable
1982
2204
 
1983
- # Time-offset, relative to the beginning of the video,
1984
- # corresponding to the end of the segment (inclusive).
2205
+ # Time-offset, relative to the beginning of the video, corresponding to the end
2206
+ # of the segment (inclusive).
1985
2207
  # Corresponds to the JSON property `endTimeOffset`
1986
2208
  # @return [String]
1987
2209
  attr_accessor :end_time_offset
1988
2210
 
1989
- # Time-offset, relative to the beginning of the video,
1990
- # corresponding to the start of the segment (inclusive).
2211
+ # Time-offset, relative to the beginning of the video, corresponding to the
2212
+ # start of the segment (inclusive).
1991
2213
  # Corresponds to the JSON property `startTimeOffset`
1992
2214
  # @return [String]
1993
2215
  attr_accessor :start_time_offset
@@ -2004,41 +2226,41 @@ module Google
2004
2226
  end
2005
2227
 
2006
2228
  # Word-specific information for recognized words. Word information is only
2007
- # included in the response when certain request parameters are set, such
2008
- # as `enable_word_time_offsets`.
2229
+ # included in the response when certain request parameters are set, such as `
2230
+ # enable_word_time_offsets`.
2009
2231
  class GoogleCloudVideointelligenceV1beta2WordInfo
2010
2232
  include Google::Apis::Core::Hashable
2011
2233
 
2012
2234
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
2013
2235
  # indicates an estimated greater likelihood that the recognized words are
2014
- # correct. This field is set only for the top alternative.
2015
- # This field is not guaranteed to be accurate and users should not rely on it
2016
- # to be always provided.
2017
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
2236
+ # correct. This field is set only for the top alternative. This field is not
2237
+ # guaranteed to be accurate and users should not rely on it to be always
2238
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
2239
+ # not set.
2018
2240
  # Corresponds to the JSON property `confidence`
2019
2241
  # @return [Float]
2020
2242
  attr_accessor :confidence
2021
2243
 
2022
- # Time offset relative to the beginning of the audio, and
2023
- # corresponding to the end of the spoken word. This field is only set if
2024
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
2025
- # experimental feature and the accuracy of the time offset can vary.
2244
+ # Time offset relative to the beginning of the audio, and corresponding to the
2245
+ # end of the spoken word. This field is only set if `enable_word_time_offsets=
2246
+ # true` and only in the top hypothesis. This is an experimental feature and the
2247
+ # accuracy of the time offset can vary.
2026
2248
  # Corresponds to the JSON property `endTime`
2027
2249
  # @return [String]
2028
2250
  attr_accessor :end_time
2029
2251
 
2030
- # Output only. A distinct integer value is assigned for every speaker within
2031
- # the audio. This field specifies which one of those speakers was detected to
2032
- # have spoken this word. Value ranges from 1 up to diarization_speaker_count,
2033
- # and is only set if speaker diarization is enabled.
2252
+ # Output only. A distinct integer value is assigned for every speaker within the
2253
+ # audio. This field specifies which one of those speakers was detected to have
2254
+ # spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
2255
+ # only set if speaker diarization is enabled.
2034
2256
  # Corresponds to the JSON property `speakerTag`
2035
2257
  # @return [Fixnum]
2036
2258
  attr_accessor :speaker_tag
2037
2259
 
2038
- # Time offset relative to the beginning of the audio, and
2039
- # corresponding to the start of the spoken word. This field is only set if
2040
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
2041
- # experimental feature and the accuracy of the time offset can vary.
2260
+ # Time offset relative to the beginning of the audio, and corresponding to the
2261
+ # start of the spoken word. This field is only set if `enable_word_time_offsets=
2262
+ # true` and only in the top hypothesis. This is an experimental feature and the
2263
+ # accuracy of the time offset can vary.
2042
2264
  # Corresponds to the JSON property `startTime`
2043
2265
  # @return [String]
2044
2266
  attr_accessor :start_time
@@ -2062,9 +2284,9 @@ module Google
2062
2284
  end
2063
2285
  end
2064
2286
 
2065
- # Video annotation progress. Included in the `metadata`
2066
- # field of the `Operation` returned by the `GetOperation`
2067
- # call of the `google::longrunning::Operations` service.
2287
+ # Video annotation progress. Included in the `metadata` field of the `Operation`
2288
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
2289
+ # service.
2068
2290
  class GoogleCloudVideointelligenceV1p1beta1AnnotateVideoProgress
2069
2291
  include Google::Apis::Core::Hashable
2070
2292
 
@@ -2092,24 +2314,22 @@ module Google
2092
2314
  # @return [Array<String>]
2093
2315
  attr_accessor :features
2094
2316
 
2095
- # The video data bytes.
2096
- # If unset, the input video(s) should be specified via the `input_uri`.
2097
- # If set, `input_uri` must be unset.
2317
+ # The video data bytes. If unset, the input video(s) should be specified via the
2318
+ # `input_uri`. If set, `input_uri` must be unset.
2098
2319
  # Corresponds to the JSON property `inputContent`
2099
2320
  # NOTE: Values are automatically base64 encoded/decoded in the client library.
2100
2321
  # @return [String]
2101
2322
  attr_accessor :input_content
2102
2323
 
2103
- # Input video location. Currently, only
2104
- # [Cloud Storage](https://cloud.google.com/storage/) URIs are
2105
- # supported. URIs must be specified in the following format:
2106
- # `gs://bucket-id/object-id` (other URI formats return
2107
- # google.rpc.Code.INVALID_ARGUMENT). For more information, see
2108
- # [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
2109
- # To identify multiple videos, a video URI may include wildcards in the
2110
- # `object-id`. Supported wildcards: '*' to match 0 or more characters;
2111
- # '?' to match 1 character. If unset, the input video should be embedded
2112
- # in the request as `input_content`. If set, `input_content` must be unset.
2324
+ # Input video location. Currently, only [Cloud Storage](https://cloud.google.com/
2325
+ # storage/) URIs are supported. URIs must be specified in the following format: `
2326
+ # gs://bucket-id/object-id` (other URI formats return google.rpc.Code.
2327
+ # INVALID_ARGUMENT). For more information, see [Request URIs](https://cloud.
2328
+ # google.com/storage/docs/request-endpoints). To identify multiple videos, a
2329
+ # video URI may include wildcards in the `object-id`. Supported wildcards: '*'
2330
+ # to match 0 or more characters; '?' to match 1 character. If unset, the input
2331
+ # video should be embedded in the request as `input_content`. If set, `
2332
+ # input_content` must be unset.
2113
2333
  # Corresponds to the JSON property `inputUri`
2114
2334
  # @return [String]
2115
2335
  attr_accessor :input_uri
@@ -2123,11 +2343,11 @@ module Google
2123
2343
  attr_accessor :location_id
2124
2344
 
2125
2345
  # Optional. Location where the output (in JSON format) should be stored.
2126
- # Currently, only [Cloud Storage](https://cloud.google.com/storage/)
2127
- # URIs are supported. These must be specified in the following format:
2128
- # `gs://bucket-id/object-id` (other URI formats return
2129
- # google.rpc.Code.INVALID_ARGUMENT). For more information, see
2130
- # [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
2346
+ # Currently, only [Cloud Storage](https://cloud.google.com/storage/) URIs are
2347
+ # supported. These must be specified in the following format: `gs://bucket-id/
2348
+ # object-id` (other URI formats return google.rpc.Code.INVALID_ARGUMENT). For
2349
+ # more information, see [Request URIs](https://cloud.google.com/storage/docs/
2350
+ # request-endpoints).
2131
2351
  # Corresponds to the JSON property `outputUri`
2132
2352
  # @return [String]
2133
2353
  attr_accessor :output_uri
@@ -2152,9 +2372,9 @@ module Google
2152
2372
  end
2153
2373
  end
2154
2374
 
2155
- # Video annotation response. Included in the `response`
2156
- # field of the `Operation` returned by the `GetOperation`
2157
- # call of the `google::longrunning::Operations` service.
2375
+ # Video annotation response. Included in the `response` field of the `Operation`
2376
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
2377
+ # service.
2158
2378
  class GoogleCloudVideointelligenceV1p1beta1AnnotateVideoResponse
2159
2379
  include Google::Apis::Core::Hashable
2160
2380
 
@@ -2182,14 +2402,14 @@ module Google
2182
2402
  # @return [Float]
2183
2403
  attr_accessor :confidence
2184
2404
 
2185
- # The name of the attribute, for example, glasses, dark_glasses, mouth_open.
2186
- # A full list of supported type names will be provided in the document.
2405
+ # The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
2406
+ # full list of supported type names will be provided in the document.
2187
2407
  # Corresponds to the JSON property `name`
2188
2408
  # @return [String]
2189
2409
  attr_accessor :name
2190
2410
 
2191
- # Text value of the detection result. For example, the value for "HairColor"
2192
- # can be "black", "blonde", etc.
2411
+ # Text value of the detection result. For example, the value for "HairColor" can
2412
+ # be "black", "blonde", etc.
2193
2413
  # Corresponds to the JSON property `value`
2194
2414
  # @return [String]
2195
2415
  attr_accessor :value
@@ -2221,9 +2441,8 @@ module Google
2221
2441
  # @return [String]
2222
2442
  attr_accessor :name
2223
2443
 
2224
- # A vertex represents a 2D point in the image.
2225
- # NOTE: the normalized vertex coordinates are relative to the original image
2226
- # and range from 0 to 1.
2444
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
2445
+ # coordinates are relative to the original image and range from 0 to 1.
2227
2446
  # Corresponds to the JSON property `point`
2228
2447
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedVertex]
2229
2448
  attr_accessor :point
@@ -2249,8 +2468,7 @@ module Google
2249
2468
  # @return [String]
2250
2469
  attr_accessor :description
2251
2470
 
2252
- # Opaque entity ID. Some IDs may be available in
2253
- # [Google Knowledge Graph Search
2471
+ # Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
2254
2472
  # API](https://developers.google.com/knowledge-graph/).
2255
2473
  # Corresponds to the JSON property `entityId`
2256
2474
  # @return [String]
@@ -2273,9 +2491,9 @@ module Google
2273
2491
  end
2274
2492
  end
2275
2493
 
2276
- # Explicit content annotation (based on per-frame visual signals only).
2277
- # If no explicit content has been detected in a frame, no annotations are
2278
- # present for that frame.
2494
+ # Explicit content annotation (based on per-frame visual signals only). If no
2495
+ # explicit content has been detected in a frame, no annotations are present for
2496
+ # that frame.
2279
2497
  class GoogleCloudVideointelligenceV1p1beta1ExplicitContentAnnotation
2280
2498
  include Google::Apis::Core::Hashable
2281
2499
 
@@ -2304,9 +2522,8 @@ module Google
2304
2522
  class GoogleCloudVideointelligenceV1p1beta1ExplicitContentDetectionConfig
2305
2523
  include Google::Apis::Core::Hashable
2306
2524
 
2307
- # Model to use for explicit content detection.
2308
- # Supported values: "builtin/stable" (the default if unset) and
2309
- # "builtin/latest".
2525
+ # Model to use for explicit content detection. Supported values: "builtin/stable"
2526
+ # (the default if unset) and "builtin/latest".
2310
2527
  # Corresponds to the JSON property `model`
2311
2528
  # @return [String]
2312
2529
  attr_accessor :model
@@ -2342,8 +2559,140 @@ module Google
2342
2559
 
2343
2560
  # Update properties of this object
2344
2561
  def update!(**args)
2345
- @pornography_likelihood = args[:pornography_likelihood] if args.key?(:pornography_likelihood)
2346
- @time_offset = args[:time_offset] if args.key?(:time_offset)
2562
+ @pornography_likelihood = args[:pornography_likelihood] if args.key?(:pornography_likelihood)
2563
+ @time_offset = args[:time_offset] if args.key?(:time_offset)
2564
+ end
2565
+ end
2566
+
2567
+ # Deprecated. No effect.
2568
+ class GoogleCloudVideointelligenceV1p1beta1FaceAnnotation
2569
+ include Google::Apis::Core::Hashable
2570
+
2571
+ # All video frames where a face was detected.
2572
+ # Corresponds to the JSON property `frames`
2573
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1FaceFrame>]
2574
+ attr_accessor :frames
2575
+
2576
+ # All video segments where a face was detected.
2577
+ # Corresponds to the JSON property `segments`
2578
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1FaceSegment>]
2579
+ attr_accessor :segments
2580
+
2581
+ # Thumbnail of a representative face view (in JPEG format).
2582
+ # Corresponds to the JSON property `thumbnail`
2583
+ # NOTE: Values are automatically base64 encoded/decoded in the client library.
2584
+ # @return [String]
2585
+ attr_accessor :thumbnail
2586
+
2587
+ def initialize(**args)
2588
+ update!(**args)
2589
+ end
2590
+
2591
+ # Update properties of this object
2592
+ def update!(**args)
2593
+ @frames = args[:frames] if args.key?(:frames)
2594
+ @segments = args[:segments] if args.key?(:segments)
2595
+ @thumbnail = args[:thumbnail] if args.key?(:thumbnail)
2596
+ end
2597
+ end
2598
+
2599
+ # Face detection annotation.
2600
+ class GoogleCloudVideointelligenceV1p1beta1FaceDetectionAnnotation
2601
+ include Google::Apis::Core::Hashable
2602
+
2603
+ # Feature version.
2604
+ # Corresponds to the JSON property `version`
2605
+ # @return [String]
2606
+ attr_accessor :version
2607
+
2608
+ def initialize(**args)
2609
+ update!(**args)
2610
+ end
2611
+
2612
+ # Update properties of this object
2613
+ def update!(**args)
2614
+ @version = args[:version] if args.key?(:version)
2615
+ end
2616
+ end
2617
+
2618
+ # Config for FACE_DETECTION.
2619
+ class GoogleCloudVideointelligenceV1p1beta1FaceDetectionConfig
2620
+ include Google::Apis::Core::Hashable
2621
+
2622
+ # Whether to enable face attributes detection, such as glasses, dark_glasses,
2623
+ # mouth_open etc. Ignored if 'include_bounding_boxes' is set to false.
2624
+ # Corresponds to the JSON property `includeAttributes`
2625
+ # @return [Boolean]
2626
+ attr_accessor :include_attributes
2627
+ alias_method :include_attributes?, :include_attributes
2628
+
2629
+ # Whether bounding boxes are included in the face annotation output.
2630
+ # Corresponds to the JSON property `includeBoundingBoxes`
2631
+ # @return [Boolean]
2632
+ attr_accessor :include_bounding_boxes
2633
+ alias_method :include_bounding_boxes?, :include_bounding_boxes
2634
+
2635
+ # Model to use for face detection. Supported values: "builtin/stable" (the
2636
+ # default if unset) and "builtin/latest".
2637
+ # Corresponds to the JSON property `model`
2638
+ # @return [String]
2639
+ attr_accessor :model
2640
+
2641
+ def initialize(**args)
2642
+ update!(**args)
2643
+ end
2644
+
2645
+ # Update properties of this object
2646
+ def update!(**args)
2647
+ @include_attributes = args[:include_attributes] if args.key?(:include_attributes)
2648
+ @include_bounding_boxes = args[:include_bounding_boxes] if args.key?(:include_bounding_boxes)
2649
+ @model = args[:model] if args.key?(:model)
2650
+ end
2651
+ end
2652
+
2653
+ # Deprecated. No effect.
2654
+ class GoogleCloudVideointelligenceV1p1beta1FaceFrame
2655
+ include Google::Apis::Core::Hashable
2656
+
2657
+ # Normalized Bounding boxes in a frame. There can be more than one boxes if the
2658
+ # same face is detected in multiple locations within the current frame.
2659
+ # Corresponds to the JSON property `normalizedBoundingBoxes`
2660
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox>]
2661
+ attr_accessor :normalized_bounding_boxes
2662
+
2663
+ # Time-offset, relative to the beginning of the video, corresponding to the
2664
+ # video frame for this location.
2665
+ # Corresponds to the JSON property `timeOffset`
2666
+ # @return [String]
2667
+ attr_accessor :time_offset
2668
+
2669
+ def initialize(**args)
2670
+ update!(**args)
2671
+ end
2672
+
2673
+ # Update properties of this object
2674
+ def update!(**args)
2675
+ @normalized_bounding_boxes = args[:normalized_bounding_boxes] if args.key?(:normalized_bounding_boxes)
2676
+ @time_offset = args[:time_offset] if args.key?(:time_offset)
2677
+ end
2678
+ end
2679
+
2680
+ # Video segment level annotation results for face detection.
2681
+ class GoogleCloudVideointelligenceV1p1beta1FaceSegment
2682
+ include Google::Apis::Core::Hashable
2683
+
2684
+ # Video segment.
2685
+ # Corresponds to the JSON property `segment`
2686
+ # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment]
2687
+ attr_accessor :segment
2688
+
2689
+ def initialize(**args)
2690
+ update!(**args)
2691
+ end
2692
+
2693
+ # Update properties of this object
2694
+ def update!(**args)
2695
+ @segment = args[:segment] if args.key?(:segment)
2347
2696
  end
2348
2697
  end
2349
2698
 
@@ -2351,10 +2700,9 @@ module Google
2351
2700
  class GoogleCloudVideointelligenceV1p1beta1LabelAnnotation
2352
2701
  include Google::Apis::Core::Hashable
2353
2702
 
2354
- # Common categories for the detected entity.
2355
- # For example, when the label is `Terrier`, the category is likely `dog`. And
2356
- # in some cases there might be more than one categories e.g., `Terrier` could
2357
- # also be a `pet`.
2703
+ # Common categories for the detected entity. For example, when the label is `
2704
+ # Terrier`, the category is likely `dog`. And in some cases there might be more
2705
+ # than one categories e.g., `Terrier` could also be a `pet`.
2358
2706
  # Corresponds to the JSON property `categoryEntities`
2359
2707
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1Entity>]
2360
2708
  attr_accessor :category_entities
@@ -2397,44 +2745,40 @@ module Google
2397
2745
  class GoogleCloudVideointelligenceV1p1beta1LabelDetectionConfig
2398
2746
  include Google::Apis::Core::Hashable
2399
2747
 
2400
- # The confidence threshold we perform filtering on the labels from
2401
- # frame-level detection. If not set, it is set to 0.4 by default. The valid
2402
- # range for this threshold is [0.1, 0.9]. Any value set outside of this
2403
- # range will be clipped.
2404
- # Note: For best results, follow the default threshold. We will update
2405
- # the default threshold everytime when we release a new model.
2748
+ # The confidence threshold we perform filtering on the labels from frame-level
2749
+ # detection. If not set, it is set to 0.4 by default. The valid range for this
2750
+ # threshold is [0.1, 0.9]. Any value set outside of this range will be clipped.
2751
+ # Note: For best results, follow the default threshold. We will update the
2752
+ # default threshold everytime when we release a new model.
2406
2753
  # Corresponds to the JSON property `frameConfidenceThreshold`
2407
2754
  # @return [Float]
2408
2755
  attr_accessor :frame_confidence_threshold
2409
2756
 
2410
- # What labels should be detected with LABEL_DETECTION, in addition to
2411
- # video-level labels or segment-level labels.
2412
- # If unspecified, defaults to `SHOT_MODE`.
2757
+ # What labels should be detected with LABEL_DETECTION, in addition to video-
2758
+ # level labels or segment-level labels. If unspecified, defaults to `SHOT_MODE`.
2413
2759
  # Corresponds to the JSON property `labelDetectionMode`
2414
2760
  # @return [String]
2415
2761
  attr_accessor :label_detection_mode
2416
2762
 
2417
- # Model to use for label detection.
2418
- # Supported values: "builtin/stable" (the default if unset) and
2419
- # "builtin/latest".
2763
+ # Model to use for label detection. Supported values: "builtin/stable" (the
2764
+ # default if unset) and "builtin/latest".
2420
2765
  # Corresponds to the JSON property `model`
2421
2766
  # @return [String]
2422
2767
  attr_accessor :model
2423
2768
 
2424
- # Whether the video has been shot from a stationary (i.e., non-moving)
2425
- # camera. When set to true, might improve detection accuracy for moving
2426
- # objects. Should be used with `SHOT_AND_FRAME_MODE` enabled.
2769
+ # Whether the video has been shot from a stationary (i.e., non-moving) camera.
2770
+ # When set to true, might improve detection accuracy for moving objects. Should
2771
+ # be used with `SHOT_AND_FRAME_MODE` enabled.
2427
2772
  # Corresponds to the JSON property `stationaryCamera`
2428
2773
  # @return [Boolean]
2429
2774
  attr_accessor :stationary_camera
2430
2775
  alias_method :stationary_camera?, :stationary_camera
2431
2776
 
2432
- # The confidence threshold we perform filtering on the labels from
2433
- # video-level and shot-level detections. If not set, it's set to 0.3 by
2434
- # default. The valid range for this threshold is [0.1, 0.9]. Any value set
2435
- # outside of this range will be clipped.
2436
- # Note: For best results, follow the default threshold. We will update
2437
- # the default threshold everytime when we release a new model.
2777
+ # The confidence threshold we perform filtering on the labels from video-level
2778
+ # and shot-level detections. If not set, it's set to 0.3 by default. The valid
2779
+ # range for this threshold is [0.1, 0.9]. Any value set outside of this range
2780
+ # will be clipped. Note: For best results, follow the default threshold. We will
2781
+ # update the default threshold everytime when we release a new model.
2438
2782
  # Corresponds to the JSON property `videoConfidenceThreshold`
2439
2783
  # @return [Float]
2440
2784
  attr_accessor :video_confidence_threshold
@@ -2513,14 +2857,14 @@ module Google
2513
2857
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1Entity]
2514
2858
  attr_accessor :entity
2515
2859
 
2516
- # All video segments where the recognized logo appears. There might be
2517
- # multiple instances of the same logo class appearing in one VideoSegment.
2860
+ # All video segments where the recognized logo appears. There might be multiple
2861
+ # instances of the same logo class appearing in one VideoSegment.
2518
2862
  # Corresponds to the JSON property `segments`
2519
2863
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment>]
2520
2864
  attr_accessor :segments
2521
2865
 
2522
- # All logo tracks where the recognized logo appears. Each track corresponds
2523
- # to one logo instance appearing in consecutive frames.
2866
+ # All logo tracks where the recognized logo appears. Each track corresponds to
2867
+ # one logo instance appearing in consecutive frames.
2524
2868
  # Corresponds to the JSON property `tracks`
2525
2869
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1Track>]
2526
2870
  attr_accessor :tracks
@@ -2537,9 +2881,8 @@ module Google
2537
2881
  end
2538
2882
  end
2539
2883
 
2540
- # Normalized bounding box.
2541
- # The normalized vertex coordinates are relative to the original image.
2542
- # Range: [0, 1].
2884
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
2885
+ # original image. Range: [0, 1].
2543
2886
  class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox
2544
2887
  include Google::Apis::Core::Hashable
2545
2888
 
@@ -2577,20 +2920,12 @@ module Google
2577
2920
  end
2578
2921
 
2579
2922
  # Normalized bounding polygon for text (that might not be aligned with axis).
2580
- # Contains list of the corner points in clockwise order starting from
2581
- # top-left corner. For example, for a rectangular bounding box:
2582
- # When the text is horizontal it might look like:
2583
- # 0----1
2584
- # | |
2585
- # 3----2
2586
- # When it's clockwise rotated 180 degrees around the top-left corner it
2587
- # becomes:
2588
- # 2----3
2589
- # | |
2590
- # 1----0
2591
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
2592
- # than 0, or greater than 1 due to trignometric calculations for location of
2593
- # the box.
2923
+ # Contains list of the corner points in clockwise order starting from top-left
2924
+ # corner. For example, for a rectangular bounding box: When the text is
2925
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
2926
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
2927
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
2928
+ # or greater than 1 due to trignometric calculations for location of the box.
2594
2929
  class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly
2595
2930
  include Google::Apis::Core::Hashable
2596
2931
 
@@ -2609,9 +2944,8 @@ module Google
2609
2944
  end
2610
2945
  end
2611
2946
 
2612
- # A vertex represents a 2D point in the image.
2613
- # NOTE: the normalized vertex coordinates are relative to the original image
2614
- # and range from 0 to 1.
2947
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
2948
+ # coordinates are relative to the original image and range from 0 to 1.
2615
2949
  class GoogleCloudVideointelligenceV1p1beta1NormalizedVertex
2616
2950
  include Google::Apis::Core::Hashable
2617
2951
 
@@ -2650,10 +2984,10 @@ module Google
2650
2984
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1Entity]
2651
2985
  attr_accessor :entity
2652
2986
 
2653
- # Information corresponding to all frames where this object track appears.
2654
- # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
2655
- # messages in frames.
2656
- # Streaming mode: it can only be one ObjectTrackingFrame message in frames.
2987
+ # Information corresponding to all frames where this object track appears. Non-
2988
+ # streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
2989
+ # in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
2990
+ # frames.
2657
2991
  # Corresponds to the JSON property `frames`
2658
2992
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame>]
2659
2993
  attr_accessor :frames
@@ -2663,12 +2997,11 @@ module Google
2663
2997
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment]
2664
2998
  attr_accessor :segment
2665
2999
 
2666
- # Streaming mode ONLY.
2667
- # In streaming mode, we do not know the end time of a tracked object
2668
- # before it is completed. Hence, there is no VideoSegment info returned.
2669
- # Instead, we provide a unique identifiable integer track_id so that
2670
- # the customers can correlate the results of the ongoing
2671
- # ObjectTrackAnnotation of the same track_id over time.
3000
+ # Streaming mode ONLY. In streaming mode, we do not know the end time of a
3001
+ # tracked object before it is completed. Hence, there is no VideoSegment info
3002
+ # returned. Instead, we provide a unique identifiable integer track_id so that
3003
+ # the customers can correlate the results of the ongoing ObjectTrackAnnotation
3004
+ # of the same track_id over time.
2672
3005
  # Corresponds to the JSON property `trackId`
2673
3006
  # @return [Fixnum]
2674
3007
  attr_accessor :track_id
@@ -2697,9 +3030,8 @@ module Google
2697
3030
  class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingConfig
2698
3031
  include Google::Apis::Core::Hashable
2699
3032
 
2700
- # Model to use for object tracking.
2701
- # Supported values: "builtin/stable" (the default if unset) and
2702
- # "builtin/latest".
3033
+ # Model to use for object tracking. Supported values: "builtin/stable" (the
3034
+ # default if unset) and "builtin/latest".
2703
3035
  # Corresponds to the JSON property `model`
2704
3036
  # @return [String]
2705
3037
  attr_accessor :model
@@ -2719,9 +3051,8 @@ module Google
2719
3051
  class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame
2720
3052
  include Google::Apis::Core::Hashable
2721
3053
 
2722
- # Normalized bounding box.
2723
- # The normalized vertex coordinates are relative to the original image.
2724
- # Range: [0, 1].
3054
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
3055
+ # original image. Range: [0, 1].
2725
3056
  # Corresponds to the JSON property `normalizedBoundingBox`
2726
3057
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox]
2727
3058
  attr_accessor :normalized_bounding_box
@@ -2742,13 +3073,74 @@ module Google
2742
3073
  end
2743
3074
  end
2744
3075
 
3076
+ # Person detection annotation per video.
3077
+ class GoogleCloudVideointelligenceV1p1beta1PersonDetectionAnnotation
3078
+ include Google::Apis::Core::Hashable
3079
+
3080
+ # The detected tracks of a person.
3081
+ # Corresponds to the JSON property `tracks`
3082
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1Track>]
3083
+ attr_accessor :tracks
3084
+
3085
+ # Feature version.
3086
+ # Corresponds to the JSON property `version`
3087
+ # @return [String]
3088
+ attr_accessor :version
3089
+
3090
+ def initialize(**args)
3091
+ update!(**args)
3092
+ end
3093
+
3094
+ # Update properties of this object
3095
+ def update!(**args)
3096
+ @tracks = args[:tracks] if args.key?(:tracks)
3097
+ @version = args[:version] if args.key?(:version)
3098
+ end
3099
+ end
3100
+
3101
+ # Config for PERSON_DETECTION.
3102
+ class GoogleCloudVideointelligenceV1p1beta1PersonDetectionConfig
3103
+ include Google::Apis::Core::Hashable
3104
+
3105
+ # Whether to enable person attributes detection, such as cloth color (black,
3106
+ # blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair, etc.
3107
+ # Ignored if 'include_bounding_boxes' is set to false.
3108
+ # Corresponds to the JSON property `includeAttributes`
3109
+ # @return [Boolean]
3110
+ attr_accessor :include_attributes
3111
+ alias_method :include_attributes?, :include_attributes
3112
+
3113
+ # Whether bounding boxes are included in the person detection annotation output.
3114
+ # Corresponds to the JSON property `includeBoundingBoxes`
3115
+ # @return [Boolean]
3116
+ attr_accessor :include_bounding_boxes
3117
+ alias_method :include_bounding_boxes?, :include_bounding_boxes
3118
+
3119
+ # Whether to enable pose landmarks detection. Ignored if 'include_bounding_boxes'
3120
+ # is set to false.
3121
+ # Corresponds to the JSON property `includePoseLandmarks`
3122
+ # @return [Boolean]
3123
+ attr_accessor :include_pose_landmarks
3124
+ alias_method :include_pose_landmarks?, :include_pose_landmarks
3125
+
3126
+ def initialize(**args)
3127
+ update!(**args)
3128
+ end
3129
+
3130
+ # Update properties of this object
3131
+ def update!(**args)
3132
+ @include_attributes = args[:include_attributes] if args.key?(:include_attributes)
3133
+ @include_bounding_boxes = args[:include_bounding_boxes] if args.key?(:include_bounding_boxes)
3134
+ @include_pose_landmarks = args[:include_pose_landmarks] if args.key?(:include_pose_landmarks)
3135
+ end
3136
+ end
3137
+
2745
3138
  # Config for SHOT_CHANGE_DETECTION.
2746
3139
  class GoogleCloudVideointelligenceV1p1beta1ShotChangeDetectionConfig
2747
3140
  include Google::Apis::Core::Hashable
2748
3141
 
2749
- # Model to use for shot change detection.
2750
- # Supported values: "builtin/stable" (the default if unset) and
2751
- # "builtin/latest".
3142
+ # Model to use for shot change detection. Supported values: "builtin/stable" (
3143
+ # the default if unset) and "builtin/latest".
2752
3144
  # Corresponds to the JSON property `model`
2753
3145
  # @return [String]
2754
3146
  attr_accessor :model
@@ -2768,12 +3160,12 @@ module Google
2768
3160
  class GoogleCloudVideointelligenceV1p1beta1SpeechContext
2769
3161
  include Google::Apis::Core::Hashable
2770
3162
 
2771
- # Optional. A list of strings containing words and phrases "hints" so that
2772
- # the speech recognition is more likely to recognize them. This can be used
2773
- # to improve the accuracy for specific words and phrases, for example, if
2774
- # specific commands are typically spoken by the user. This can also be used
2775
- # to add additional words to the vocabulary of the recognizer. See
2776
- # [usage limits](https://cloud.google.com/speech/limits#content).
3163
+ # Optional. A list of strings containing words and phrases "hints" so that the
3164
+ # speech recognition is more likely to recognize them. This can be used to
3165
+ # improve the accuracy for specific words and phrases, for example, if specific
3166
+ # commands are typically spoken by the user. This can also be used to add
3167
+ # additional words to the vocabulary of the recognizer. See [usage limits](https:
3168
+ # //cloud.google.com/speech/limits#content).
2777
3169
  # Corresponds to the JSON property `phrases`
2778
3170
  # @return [Array<String>]
2779
3171
  attr_accessor :phrases
@@ -2794,10 +3186,10 @@ module Google
2794
3186
 
2795
3187
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
2796
3188
  # indicates an estimated greater likelihood that the recognized words are
2797
- # correct. This field is set only for the top alternative.
2798
- # This field is not guaranteed to be accurate and users should not rely on it
2799
- # to be always provided.
2800
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
3189
+ # correct. This field is set only for the top alternative. This field is not
3190
+ # guaranteed to be accurate and users should not rely on it to be always
3191
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
3192
+ # not set.
2801
3193
  # Corresponds to the JSON property `confidence`
2802
3194
  # @return [Float]
2803
3195
  attr_accessor :confidence
@@ -2808,8 +3200,8 @@ module Google
2808
3200
  attr_accessor :transcript
2809
3201
 
2810
3202
  # Output only. A list of word-specific information for each recognized word.
2811
- # Note: When `enable_speaker_diarization` is set to true, you will see all
2812
- # the words from the beginning of the audio.
3203
+ # Note: When `enable_speaker_diarization` is set to true, you will see all the
3204
+ # words from the beginning of the audio.
2813
3205
  # Corresponds to the JSON property `words`
2814
3206
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1WordInfo>]
2815
3207
  attr_accessor :words
@@ -2830,18 +3222,17 @@ module Google
2830
3222
  class GoogleCloudVideointelligenceV1p1beta1SpeechTranscription
2831
3223
  include Google::Apis::Core::Hashable
2832
3224
 
2833
- # May contain one or more recognition hypotheses (up to the maximum specified
2834
- # in `max_alternatives`). These alternatives are ordered in terms of
2835
- # accuracy, with the top (first) alternative being the most probable, as
2836
- # ranked by the recognizer.
3225
+ # May contain one or more recognition hypotheses (up to the maximum specified in
3226
+ # `max_alternatives`). These alternatives are ordered in terms of accuracy, with
3227
+ # the top (first) alternative being the most probable, as ranked by the
3228
+ # recognizer.
2837
3229
  # Corresponds to the JSON property `alternatives`
2838
3230
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1SpeechRecognitionAlternative>]
2839
3231
  attr_accessor :alternatives
2840
3232
 
2841
3233
  # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
2842
- # language tag of
2843
- # the language in this result. This language code was detected to have the
2844
- # most likelihood of being spoken in the audio.
3234
+ # language tag of the language in this result. This language code was detected
3235
+ # to have the most likelihood of being spoken in the audio.
2845
3236
  # Corresponds to the JSON property `languageCode`
2846
3237
  # @return [String]
2847
3238
  attr_accessor :language_code
@@ -2868,66 +3259,62 @@ module Google
2868
3259
  attr_accessor :audio_tracks
2869
3260
 
2870
3261
  # Optional. If set, specifies the estimated number of speakers in the
2871
- # conversation.
2872
- # If not set, defaults to '2'.
2873
- # Ignored unless enable_speaker_diarization is set to true.
3262
+ # conversation. If not set, defaults to '2'. Ignored unless
3263
+ # enable_speaker_diarization is set to true.
2874
3264
  # Corresponds to the JSON property `diarizationSpeakerCount`
2875
3265
  # @return [Fixnum]
2876
3266
  attr_accessor :diarization_speaker_count
2877
3267
 
2878
- # Optional. If 'true', adds punctuation to recognition result hypotheses.
2879
- # This feature is only available in select languages. Setting this for
2880
- # requests in other languages has no effect at all. The default 'false' value
2881
- # does not add punctuation to result hypotheses. NOTE: "This is currently
2882
- # offered as an experimental service, complimentary to all users. In the
2883
- # future this may be exclusively available as a premium feature."
3268
+ # Optional. If 'true', adds punctuation to recognition result hypotheses. This
3269
+ # feature is only available in select languages. Setting this for requests in
3270
+ # other languages has no effect at all. The default 'false' value does not add
3271
+ # punctuation to result hypotheses. NOTE: "This is currently offered as an
3272
+ # experimental service, complimentary to all users. In the future this may be
3273
+ # exclusively available as a premium feature."
2884
3274
  # Corresponds to the JSON property `enableAutomaticPunctuation`
2885
3275
  # @return [Boolean]
2886
3276
  attr_accessor :enable_automatic_punctuation
2887
3277
  alias_method :enable_automatic_punctuation?, :enable_automatic_punctuation
2888
3278
 
2889
- # Optional. If 'true', enables speaker detection for each recognized word in
2890
- # the top alternative of the recognition result using a speaker_tag provided
2891
- # in the WordInfo.
2892
- # Note: When this is true, we send all the words from the beginning of the
2893
- # audio for the top alternative in every consecutive response.
2894
- # This is done in order to improve our speaker tags as our models learn to
2895
- # identify the speakers in the conversation over time.
3279
+ # Optional. If 'true', enables speaker detection for each recognized word in the
3280
+ # top alternative of the recognition result using a speaker_tag provided in the
3281
+ # WordInfo. Note: When this is true, we send all the words from the beginning of
3282
+ # the audio for the top alternative in every consecutive response. This is done
3283
+ # in order to improve our speaker tags as our models learn to identify the
3284
+ # speakers in the conversation over time.
2896
3285
  # Corresponds to the JSON property `enableSpeakerDiarization`
2897
3286
  # @return [Boolean]
2898
3287
  attr_accessor :enable_speaker_diarization
2899
3288
  alias_method :enable_speaker_diarization?, :enable_speaker_diarization
2900
3289
 
2901
3290
  # Optional. If `true`, the top result includes a list of words and the
2902
- # confidence for those words. If `false`, no word-level confidence
2903
- # information is returned. The default is `false`.
3291
+ # confidence for those words. If `false`, no word-level confidence information
3292
+ # is returned. The default is `false`.
2904
3293
  # Corresponds to the JSON property `enableWordConfidence`
2905
3294
  # @return [Boolean]
2906
3295
  attr_accessor :enable_word_confidence
2907
3296
  alias_method :enable_word_confidence?, :enable_word_confidence
2908
3297
 
2909
- # Optional. If set to `true`, the server will attempt to filter out
2910
- # profanities, replacing all but the initial character in each filtered word
2911
- # with asterisks, e.g. "f***". If set to `false` or omitted, profanities
2912
- # won't be filtered out.
3298
+ # Optional. If set to `true`, the server will attempt to filter out profanities,
3299
+ # replacing all but the initial character in each filtered word with asterisks,
3300
+ # e.g. "f***". If set to `false` or omitted, profanities won't be filtered out.
2913
3301
  # Corresponds to the JSON property `filterProfanity`
2914
3302
  # @return [Boolean]
2915
3303
  attr_accessor :filter_profanity
2916
3304
  alias_method :filter_profanity?, :filter_profanity
2917
3305
 
2918
- # Required. *Required* The language of the supplied audio as a
2919
- # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
2920
- # Example: "en-US".
2921
- # See [Language Support](https://cloud.google.com/speech/docs/languages)
2922
- # for a list of the currently supported language codes.
3306
+ # Required. *Required* The language of the supplied audio as a [BCP-47](https://
3307
+ # www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. Example: "en-US". See [
3308
+ # Language Support](https://cloud.google.com/speech/docs/languages) for a list
3309
+ # of the currently supported language codes.
2923
3310
  # Corresponds to the JSON property `languageCode`
2924
3311
  # @return [String]
2925
3312
  attr_accessor :language_code
2926
3313
 
2927
3314
  # Optional. Maximum number of recognition hypotheses to be returned.
2928
3315
  # Specifically, the maximum number of `SpeechRecognitionAlternative` messages
2929
- # within each `SpeechTranscription`. The server may return fewer than
2930
- # `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
3316
+ # within each `SpeechTranscription`. The server may return fewer than `
3317
+ # max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
2931
3318
  # return a maximum of one. If omitted, will return a maximum of one.
2932
3319
  # Corresponds to the JSON property `maxAlternatives`
2933
3320
  # @return [Fixnum]
@@ -2994,16 +3381,15 @@ module Google
2994
3381
  include Google::Apis::Core::Hashable
2995
3382
 
2996
3383
  # Language hint can be specified if the language to be detected is known a
2997
- # priori. It can increase the accuracy of the detection. Language hint must
2998
- # be language code in BCP-47 format.
2999
- # Automatic language detection is performed if no hint is provided.
3384
+ # priori. It can increase the accuracy of the detection. Language hint must be
3385
+ # language code in BCP-47 format. Automatic language detection is performed if
3386
+ # no hint is provided.
3000
3387
  # Corresponds to the JSON property `languageHints`
3001
3388
  # @return [Array<String>]
3002
3389
  attr_accessor :language_hints
3003
3390
 
3004
- # Model to use for text detection.
3005
- # Supported values: "builtin/stable" (the default if unset) and
3006
- # "builtin/latest".
3391
+ # Model to use for text detection. Supported values: "builtin/stable" (the
3392
+ # default if unset) and "builtin/latest".
3007
3393
  # Corresponds to the JSON property `model`
3008
3394
  # @return [String]
3009
3395
  attr_accessor :model
@@ -3019,27 +3405,19 @@ module Google
3019
3405
  end
3020
3406
  end
3021
3407
 
3022
- # Video frame level annotation results for text annotation (OCR).
3023
- # Contains information regarding timestamp and bounding box locations for the
3024
- # frames containing detected OCR text snippets.
3408
+ # Video frame level annotation results for text annotation (OCR). Contains
3409
+ # information regarding timestamp and bounding box locations for the frames
3410
+ # containing detected OCR text snippets.
3025
3411
  class GoogleCloudVideointelligenceV1p1beta1TextFrame
3026
3412
  include Google::Apis::Core::Hashable
3027
3413
 
3028
3414
  # Normalized bounding polygon for text (that might not be aligned with axis).
3029
- # Contains list of the corner points in clockwise order starting from
3030
- # top-left corner. For example, for a rectangular bounding box:
3031
- # When the text is horizontal it might look like:
3032
- # 0----1
3033
- # | |
3034
- # 3----2
3035
- # When it's clockwise rotated 180 degrees around the top-left corner it
3036
- # becomes:
3037
- # 2----3
3038
- # | |
3039
- # 1----0
3040
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
3041
- # than 0, or greater than 1 due to trignometric calculations for location of
3042
- # the box.
3415
+ # Contains list of the corner points in clockwise order starting from top-left
3416
+ # corner. For example, for a rectangular bounding box: When the text is
3417
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
3418
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
3419
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
3420
+ # or greater than 1 due to trignometric calculations for location of the box.
3043
3421
  # Corresponds to the JSON property `rotatedBoundingBox`
3044
3422
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly]
3045
3423
  attr_accessor :rotated_bounding_box
@@ -3092,9 +3470,8 @@ module Google
3092
3470
  end
3093
3471
  end
3094
3472
 
3095
- # For tracking related features.
3096
- # An object at time_offset with attributes, and located with
3097
- # normalized_bounding_box.
3473
+ # For tracking related features. An object at time_offset with attributes, and
3474
+ # located with normalized_bounding_box.
3098
3475
  class GoogleCloudVideointelligenceV1p1beta1TimestampedObject
3099
3476
  include Google::Apis::Core::Hashable
3100
3477
 
@@ -3108,15 +3485,14 @@ module Google
3108
3485
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1DetectedLandmark>]
3109
3486
  attr_accessor :landmarks
3110
3487
 
3111
- # Normalized bounding box.
3112
- # The normalized vertex coordinates are relative to the original image.
3113
- # Range: [0, 1].
3488
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
3489
+ # original image. Range: [0, 1].
3114
3490
  # Corresponds to the JSON property `normalizedBoundingBox`
3115
3491
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox]
3116
3492
  attr_accessor :normalized_bounding_box
3117
3493
 
3118
- # Time-offset, relative to the beginning of the video,
3119
- # corresponding to the video frame for this object.
3494
+ # Time-offset, relative to the beginning of the video, corresponding to the
3495
+ # video frame for this object.
3120
3496
  # Corresponds to the JSON property `timeOffset`
3121
3497
  # @return [String]
3122
3498
  attr_accessor :time_offset
@@ -3175,20 +3551,19 @@ module Google
3175
3551
  class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationProgress
3176
3552
  include Google::Apis::Core::Hashable
3177
3553
 
3178
- # Specifies which feature is being tracked if the request contains more than
3179
- # one feature.
3554
+ # Specifies which feature is being tracked if the request contains more than one
3555
+ # feature.
3180
3556
  # Corresponds to the JSON property `feature`
3181
3557
  # @return [String]
3182
3558
  attr_accessor :feature
3183
3559
 
3184
- # Video file location in
3185
- # [Cloud Storage](https://cloud.google.com/storage/).
3560
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
3186
3561
  # Corresponds to the JSON property `inputUri`
3187
3562
  # @return [String]
3188
3563
  attr_accessor :input_uri
3189
3564
 
3190
- # Approximate percentage processed thus far. Guaranteed to be
3191
- # 100 when fully processed.
3565
+ # Approximate percentage processed thus far. Guaranteed to be 100 when fully
3566
+ # processed.
3192
3567
  # Corresponds to the JSON property `progressPercent`
3193
3568
  # @return [Fixnum]
3194
3569
  attr_accessor :progress_percent
@@ -3227,31 +3602,40 @@ module Google
3227
3602
  class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationResults
3228
3603
  include Google::Apis::Core::Hashable
3229
3604
 
3230
- # The `Status` type defines a logical error model that is suitable for
3231
- # different programming environments, including REST APIs and RPC APIs. It is
3232
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
3233
- # three pieces of data: error code, error message, and error details.
3234
- # You can find out more about this error model and how to work with it in the
3235
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
3605
+ # The `Status` type defines a logical error model that is suitable for different
3606
+ # programming environments, including REST APIs and RPC APIs. It is used by [
3607
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
3608
+ # data: error code, error message, and error details. You can find out more
3609
+ # about this error model and how to work with it in the [API Design Guide](https:
3610
+ # //cloud.google.com/apis/design/errors).
3236
3611
  # Corresponds to the JSON property `error`
3237
3612
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleRpcStatus]
3238
3613
  attr_accessor :error
3239
3614
 
3240
- # Explicit content annotation (based on per-frame visual signals only).
3241
- # If no explicit content has been detected in a frame, no annotations are
3242
- # present for that frame.
3615
+ # Explicit content annotation (based on per-frame visual signals only). If no
3616
+ # explicit content has been detected in a frame, no annotations are present for
3617
+ # that frame.
3243
3618
  # Corresponds to the JSON property `explicitAnnotation`
3244
3619
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1ExplicitContentAnnotation]
3245
3620
  attr_accessor :explicit_annotation
3246
3621
 
3247
- # Label annotations on frame level.
3248
- # There is exactly one element for each unique label.
3622
+ # Deprecated. Please use `face_detection_annotations` instead.
3623
+ # Corresponds to the JSON property `faceAnnotations`
3624
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1FaceAnnotation>]
3625
+ attr_accessor :face_annotations
3626
+
3627
+ # Face detection annotations.
3628
+ # Corresponds to the JSON property `faceDetectionAnnotations`
3629
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1FaceDetectionAnnotation>]
3630
+ attr_accessor :face_detection_annotations
3631
+
3632
+ # Label annotations on frame level. There is exactly one element for each unique
3633
+ # label.
3249
3634
  # Corresponds to the JSON property `frameLabelAnnotations`
3250
3635
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
3251
3636
  attr_accessor :frame_label_annotations
3252
3637
 
3253
- # Video file location in
3254
- # [Cloud Storage](https://cloud.google.com/storage/).
3638
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
3255
3639
  # Corresponds to the JSON property `inputUri`
3256
3640
  # @return [String]
3257
3641
  attr_accessor :input_uri
@@ -3266,6 +3650,11 @@ module Google
3266
3650
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingAnnotation>]
3267
3651
  attr_accessor :object_annotations
3268
3652
 
3653
+ # Person detection annotations.
3654
+ # Corresponds to the JSON property `personDetectionAnnotations`
3655
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1PersonDetectionAnnotation>]
3656
+ attr_accessor :person_detection_annotations
3657
+
3269
3658
  # Video segment.
3270
3659
  # Corresponds to the JSON property `segment`
3271
3660
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment]
@@ -3278,11 +3667,11 @@ module Google
3278
3667
  attr_accessor :segment_label_annotations
3279
3668
 
3280
3669
  # Presence label annotations on video level or user-specified segment level.
3281
- # There is exactly one element for each unique label. Compared to the
3282
- # existing topical `segment_label_annotations`, this field presents more
3283
- # fine-grained, segment-level labels detected in video content and is made
3284
- # available only when the client sets `LabelDetectionConfig.model` to
3285
- # "builtin/latest" in the request.
3670
+ # There is exactly one element for each unique label. Compared to the existing
3671
+ # topical `segment_label_annotations`, this field presents more fine-grained,
3672
+ # segment-level labels detected in video content and is made available only when
3673
+ # the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
3674
+ # request.
3286
3675
  # Corresponds to the JSON property `segmentPresenceLabelAnnotations`
3287
3676
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
3288
3677
  attr_accessor :segment_presence_label_annotations
@@ -3292,17 +3681,17 @@ module Google
3292
3681
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment>]
3293
3682
  attr_accessor :shot_annotations
3294
3683
 
3295
- # Topical label annotations on shot level.
3296
- # There is exactly one element for each unique label.
3684
+ # Topical label annotations on shot level. There is exactly one element for each
3685
+ # unique label.
3297
3686
  # Corresponds to the JSON property `shotLabelAnnotations`
3298
3687
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
3299
3688
  attr_accessor :shot_label_annotations
3300
3689
 
3301
3690
  # Presence label annotations on shot level. There is exactly one element for
3302
- # each unique label. Compared to the existing topical
3303
- # `shot_label_annotations`, this field presents more fine-grained, shot-level
3304
- # labels detected in video content and is made available only when the client
3305
- # sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
3691
+ # each unique label. Compared to the existing topical `shot_label_annotations`,
3692
+ # this field presents more fine-grained, shot-level labels detected in video
3693
+ # content and is made available only when the client sets `LabelDetectionConfig.
3694
+ # model` to "builtin/latest" in the request.
3306
3695
  # Corresponds to the JSON property `shotPresenceLabelAnnotations`
3307
3696
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
3308
3697
  attr_accessor :shot_presence_label_annotations
@@ -3312,9 +3701,8 @@ module Google
3312
3701
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1SpeechTranscription>]
3313
3702
  attr_accessor :speech_transcriptions
3314
3703
 
3315
- # OCR text detection and tracking.
3316
- # Annotations for list of detected text snippets. Each will have list of
3317
- # frame information associated with it.
3704
+ # OCR text detection and tracking. Annotations for list of detected text
3705
+ # snippets. Each will have list of frame information associated with it.
3318
3706
  # Corresponds to the JSON property `textAnnotations`
3319
3707
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1TextAnnotation>]
3320
3708
  attr_accessor :text_annotations
@@ -3327,10 +3715,13 @@ module Google
3327
3715
  def update!(**args)
3328
3716
  @error = args[:error] if args.key?(:error)
3329
3717
  @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
3718
+ @face_annotations = args[:face_annotations] if args.key?(:face_annotations)
3719
+ @face_detection_annotations = args[:face_detection_annotations] if args.key?(:face_detection_annotations)
3330
3720
  @frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
3331
3721
  @input_uri = args[:input_uri] if args.key?(:input_uri)
3332
3722
  @logo_recognition_annotations = args[:logo_recognition_annotations] if args.key?(:logo_recognition_annotations)
3333
3723
  @object_annotations = args[:object_annotations] if args.key?(:object_annotations)
3724
+ @person_detection_annotations = args[:person_detection_annotations] if args.key?(:person_detection_annotations)
3334
3725
  @segment = args[:segment] if args.key?(:segment)
3335
3726
  @segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations)
3336
3727
  @segment_presence_label_annotations = args[:segment_presence_label_annotations] if args.key?(:segment_presence_label_annotations)
@@ -3351,6 +3742,11 @@ module Google
3351
3742
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1ExplicitContentDetectionConfig]
3352
3743
  attr_accessor :explicit_content_detection_config
3353
3744
 
3745
+ # Config for FACE_DETECTION.
3746
+ # Corresponds to the JSON property `faceDetectionConfig`
3747
+ # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1FaceDetectionConfig]
3748
+ attr_accessor :face_detection_config
3749
+
3354
3750
  # Config for LABEL_DETECTION.
3355
3751
  # Corresponds to the JSON property `labelDetectionConfig`
3356
3752
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1LabelDetectionConfig]
@@ -3361,9 +3757,14 @@ module Google
3361
3757
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingConfig]
3362
3758
  attr_accessor :object_tracking_config
3363
3759
 
3364
- # Video segments to annotate. The segments may overlap and are not required
3365
- # to be contiguous or span the whole video. If unspecified, each video is
3366
- # treated as a single segment.
3760
+ # Config for PERSON_DETECTION.
3761
+ # Corresponds to the JSON property `personDetectionConfig`
3762
+ # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1PersonDetectionConfig]
3763
+ attr_accessor :person_detection_config
3764
+
3765
+ # Video segments to annotate. The segments may overlap and are not required to
3766
+ # be contiguous or span the whole video. If unspecified, each video is treated
3767
+ # as a single segment.
3367
3768
  # Corresponds to the JSON property `segments`
3368
3769
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment>]
3369
3770
  attr_accessor :segments
@@ -3390,8 +3791,10 @@ module Google
3390
3791
  # Update properties of this object
3391
3792
  def update!(**args)
3392
3793
  @explicit_content_detection_config = args[:explicit_content_detection_config] if args.key?(:explicit_content_detection_config)
3794
+ @face_detection_config = args[:face_detection_config] if args.key?(:face_detection_config)
3393
3795
  @label_detection_config = args[:label_detection_config] if args.key?(:label_detection_config)
3394
3796
  @object_tracking_config = args[:object_tracking_config] if args.key?(:object_tracking_config)
3797
+ @person_detection_config = args[:person_detection_config] if args.key?(:person_detection_config)
3395
3798
  @segments = args[:segments] if args.key?(:segments)
3396
3799
  @shot_change_detection_config = args[:shot_change_detection_config] if args.key?(:shot_change_detection_config)
3397
3800
  @speech_transcription_config = args[:speech_transcription_config] if args.key?(:speech_transcription_config)
@@ -3403,14 +3806,14 @@ module Google
3403
3806
  class GoogleCloudVideointelligenceV1p1beta1VideoSegment
3404
3807
  include Google::Apis::Core::Hashable
3405
3808
 
3406
- # Time-offset, relative to the beginning of the video,
3407
- # corresponding to the end of the segment (inclusive).
3809
+ # Time-offset, relative to the beginning of the video, corresponding to the end
3810
+ # of the segment (inclusive).
3408
3811
  # Corresponds to the JSON property `endTimeOffset`
3409
3812
  # @return [String]
3410
3813
  attr_accessor :end_time_offset
3411
3814
 
3412
- # Time-offset, relative to the beginning of the video,
3413
- # corresponding to the start of the segment (inclusive).
3815
+ # Time-offset, relative to the beginning of the video, corresponding to the
3816
+ # start of the segment (inclusive).
3414
3817
  # Corresponds to the JSON property `startTimeOffset`
3415
3818
  # @return [String]
3416
3819
  attr_accessor :start_time_offset
@@ -3427,41 +3830,41 @@ module Google
3427
3830
  end
3428
3831
 
3429
3832
  # Word-specific information for recognized words. Word information is only
3430
- # included in the response when certain request parameters are set, such
3431
- # as `enable_word_time_offsets`.
3833
+ # included in the response when certain request parameters are set, such as `
3834
+ # enable_word_time_offsets`.
3432
3835
  class GoogleCloudVideointelligenceV1p1beta1WordInfo
3433
3836
  include Google::Apis::Core::Hashable
3434
3837
 
3435
3838
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
3436
3839
  # indicates an estimated greater likelihood that the recognized words are
3437
- # correct. This field is set only for the top alternative.
3438
- # This field is not guaranteed to be accurate and users should not rely on it
3439
- # to be always provided.
3440
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
3840
+ # correct. This field is set only for the top alternative. This field is not
3841
+ # guaranteed to be accurate and users should not rely on it to be always
3842
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
3843
+ # not set.
3441
3844
  # Corresponds to the JSON property `confidence`
3442
3845
  # @return [Float]
3443
3846
  attr_accessor :confidence
3444
3847
 
3445
- # Time offset relative to the beginning of the audio, and
3446
- # corresponding to the end of the spoken word. This field is only set if
3447
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
3448
- # experimental feature and the accuracy of the time offset can vary.
3848
+ # Time offset relative to the beginning of the audio, and corresponding to the
3849
+ # end of the spoken word. This field is only set if `enable_word_time_offsets=
3850
+ # true` and only in the top hypothesis. This is an experimental feature and the
3851
+ # accuracy of the time offset can vary.
3449
3852
  # Corresponds to the JSON property `endTime`
3450
3853
  # @return [String]
3451
3854
  attr_accessor :end_time
3452
3855
 
3453
- # Output only. A distinct integer value is assigned for every speaker within
3454
- # the audio. This field specifies which one of those speakers was detected to
3455
- # have spoken this word. Value ranges from 1 up to diarization_speaker_count,
3456
- # and is only set if speaker diarization is enabled.
3856
+ # Output only. A distinct integer value is assigned for every speaker within the
3857
+ # audio. This field specifies which one of those speakers was detected to have
3858
+ # spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
3859
+ # only set if speaker diarization is enabled.
3457
3860
  # Corresponds to the JSON property `speakerTag`
3458
3861
  # @return [Fixnum]
3459
3862
  attr_accessor :speaker_tag
3460
3863
 
3461
- # Time offset relative to the beginning of the audio, and
3462
- # corresponding to the start of the spoken word. This field is only set if
3463
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
3464
- # experimental feature and the accuracy of the time offset can vary.
3864
+ # Time offset relative to the beginning of the audio, and corresponding to the
3865
+ # start of the spoken word. This field is only set if `enable_word_time_offsets=
3866
+ # true` and only in the top hypothesis. This is an experimental feature and the
3867
+ # accuracy of the time offset can vary.
3465
3868
  # Corresponds to the JSON property `startTime`
3466
3869
  # @return [String]
3467
3870
  attr_accessor :start_time
@@ -3485,9 +3888,9 @@ module Google
3485
3888
  end
3486
3889
  end
3487
3890
 
3488
- # Video annotation progress. Included in the `metadata`
3489
- # field of the `Operation` returned by the `GetOperation`
3490
- # call of the `google::longrunning::Operations` service.
3891
+ # Video annotation progress. Included in the `metadata` field of the `Operation`
3892
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
3893
+ # service.
3491
3894
  class GoogleCloudVideointelligenceV1p2beta1AnnotateVideoProgress
3492
3895
  include Google::Apis::Core::Hashable
3493
3896
 
@@ -3506,9 +3909,9 @@ module Google
3506
3909
  end
3507
3910
  end
3508
3911
 
3509
- # Video annotation response. Included in the `response`
3510
- # field of the `Operation` returned by the `GetOperation`
3511
- # call of the `google::longrunning::Operations` service.
3912
+ # Video annotation response. Included in the `response` field of the `Operation`
3913
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
3914
+ # service.
3512
3915
  class GoogleCloudVideointelligenceV1p2beta1AnnotateVideoResponse
3513
3916
  include Google::Apis::Core::Hashable
3514
3917
 
@@ -3536,14 +3939,14 @@ module Google
3536
3939
  # @return [Float]
3537
3940
  attr_accessor :confidence
3538
3941
 
3539
- # The name of the attribute, for example, glasses, dark_glasses, mouth_open.
3540
- # A full list of supported type names will be provided in the document.
3942
+ # The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
3943
+ # full list of supported type names will be provided in the document.
3541
3944
  # Corresponds to the JSON property `name`
3542
3945
  # @return [String]
3543
3946
  attr_accessor :name
3544
3947
 
3545
- # Text value of the detection result. For example, the value for "HairColor"
3546
- # can be "black", "blonde", etc.
3948
+ # Text value of the detection result. For example, the value for "HairColor" can
3949
+ # be "black", "blonde", etc.
3547
3950
  # Corresponds to the JSON property `value`
3548
3951
  # @return [String]
3549
3952
  attr_accessor :value
@@ -3575,9 +3978,8 @@ module Google
3575
3978
  # @return [String]
3576
3979
  attr_accessor :name
3577
3980
 
3578
- # A vertex represents a 2D point in the image.
3579
- # NOTE: the normalized vertex coordinates are relative to the original image
3580
- # and range from 0 to 1.
3981
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
3982
+ # coordinates are relative to the original image and range from 0 to 1.
3581
3983
  # Corresponds to the JSON property `point`
3582
3984
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1NormalizedVertex]
3583
3985
  attr_accessor :point
@@ -3603,8 +4005,7 @@ module Google
3603
4005
  # @return [String]
3604
4006
  attr_accessor :description
3605
4007
 
3606
- # Opaque entity ID. Some IDs may be available in
3607
- # [Google Knowledge Graph Search
4008
+ # Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
3608
4009
  # API](https://developers.google.com/knowledge-graph/).
3609
4010
  # Corresponds to the JSON property `entityId`
3610
4011
  # @return [String]
@@ -3627,9 +4028,9 @@ module Google
3627
4028
  end
3628
4029
  end
3629
4030
 
3630
- # Explicit content annotation (based on per-frame visual signals only).
3631
- # If no explicit content has been detected in a frame, no annotations are
3632
- # present for that frame.
4031
+ # Explicit content annotation (based on per-frame visual signals only). If no
4032
+ # explicit content has been detected in a frame, no annotations are present for
4033
+ # that frame.
3633
4034
  class GoogleCloudVideointelligenceV1p2beta1ExplicitContentAnnotation
3634
4035
  include Google::Apis::Core::Hashable
3635
4036
 
@@ -3680,14 +4081,110 @@ module Google
3680
4081
  end
3681
4082
  end
3682
4083
 
4084
+ # Deprecated. No effect.
4085
+ class GoogleCloudVideointelligenceV1p2beta1FaceAnnotation
4086
+ include Google::Apis::Core::Hashable
4087
+
4088
+ # All video frames where a face was detected.
4089
+ # Corresponds to the JSON property `frames`
4090
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1FaceFrame>]
4091
+ attr_accessor :frames
4092
+
4093
+ # All video segments where a face was detected.
4094
+ # Corresponds to the JSON property `segments`
4095
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1FaceSegment>]
4096
+ attr_accessor :segments
4097
+
4098
+ # Thumbnail of a representative face view (in JPEG format).
4099
+ # Corresponds to the JSON property `thumbnail`
4100
+ # NOTE: Values are automatically base64 encoded/decoded in the client library.
4101
+ # @return [String]
4102
+ attr_accessor :thumbnail
4103
+
4104
+ def initialize(**args)
4105
+ update!(**args)
4106
+ end
4107
+
4108
+ # Update properties of this object
4109
+ def update!(**args)
4110
+ @frames = args[:frames] if args.key?(:frames)
4111
+ @segments = args[:segments] if args.key?(:segments)
4112
+ @thumbnail = args[:thumbnail] if args.key?(:thumbnail)
4113
+ end
4114
+ end
4115
+
4116
+ # Face detection annotation.
4117
+ class GoogleCloudVideointelligenceV1p2beta1FaceDetectionAnnotation
4118
+ include Google::Apis::Core::Hashable
4119
+
4120
+ # Feature version.
4121
+ # Corresponds to the JSON property `version`
4122
+ # @return [String]
4123
+ attr_accessor :version
4124
+
4125
+ def initialize(**args)
4126
+ update!(**args)
4127
+ end
4128
+
4129
+ # Update properties of this object
4130
+ def update!(**args)
4131
+ @version = args[:version] if args.key?(:version)
4132
+ end
4133
+ end
4134
+
4135
+ # Deprecated. No effect.
4136
+ class GoogleCloudVideointelligenceV1p2beta1FaceFrame
4137
+ include Google::Apis::Core::Hashable
4138
+
4139
+ # Normalized Bounding boxes in a frame. There can be more than one boxes if the
4140
+ # same face is detected in multiple locations within the current frame.
4141
+ # Corresponds to the JSON property `normalizedBoundingBoxes`
4142
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox>]
4143
+ attr_accessor :normalized_bounding_boxes
4144
+
4145
+ # Time-offset, relative to the beginning of the video, corresponding to the
4146
+ # video frame for this location.
4147
+ # Corresponds to the JSON property `timeOffset`
4148
+ # @return [String]
4149
+ attr_accessor :time_offset
4150
+
4151
+ def initialize(**args)
4152
+ update!(**args)
4153
+ end
4154
+
4155
+ # Update properties of this object
4156
+ def update!(**args)
4157
+ @normalized_bounding_boxes = args[:normalized_bounding_boxes] if args.key?(:normalized_bounding_boxes)
4158
+ @time_offset = args[:time_offset] if args.key?(:time_offset)
4159
+ end
4160
+ end
4161
+
4162
+ # Video segment level annotation results for face detection.
4163
+ class GoogleCloudVideointelligenceV1p2beta1FaceSegment
4164
+ include Google::Apis::Core::Hashable
4165
+
4166
+ # Video segment.
4167
+ # Corresponds to the JSON property `segment`
4168
+ # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1VideoSegment]
4169
+ attr_accessor :segment
4170
+
4171
+ def initialize(**args)
4172
+ update!(**args)
4173
+ end
4174
+
4175
+ # Update properties of this object
4176
+ def update!(**args)
4177
+ @segment = args[:segment] if args.key?(:segment)
4178
+ end
4179
+ end
4180
+
3683
4181
  # Label annotation.
3684
4182
  class GoogleCloudVideointelligenceV1p2beta1LabelAnnotation
3685
4183
  include Google::Apis::Core::Hashable
3686
4184
 
3687
- # Common categories for the detected entity.
3688
- # For example, when the label is `Terrier`, the category is likely `dog`. And
3689
- # in some cases there might be more than one categories e.g., `Terrier` could
3690
- # also be a `pet`.
4185
+ # Common categories for the detected entity. For example, when the label is `
4186
+ # Terrier`, the category is likely `dog`. And in some cases there might be more
4187
+ # than one categories e.g., `Terrier` could also be a `pet`.
3691
4188
  # Corresponds to the JSON property `categoryEntities`
3692
4189
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1Entity>]
3693
4190
  attr_accessor :category_entities
@@ -3786,14 +4283,14 @@ module Google
3786
4283
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1Entity]
3787
4284
  attr_accessor :entity
3788
4285
 
3789
- # All video segments where the recognized logo appears. There might be
3790
- # multiple instances of the same logo class appearing in one VideoSegment.
4286
+ # All video segments where the recognized logo appears. There might be multiple
4287
+ # instances of the same logo class appearing in one VideoSegment.
3791
4288
  # Corresponds to the JSON property `segments`
3792
4289
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1VideoSegment>]
3793
4290
  attr_accessor :segments
3794
4291
 
3795
- # All logo tracks where the recognized logo appears. Each track corresponds
3796
- # to one logo instance appearing in consecutive frames.
4292
+ # All logo tracks where the recognized logo appears. Each track corresponds to
4293
+ # one logo instance appearing in consecutive frames.
3797
4294
  # Corresponds to the JSON property `tracks`
3798
4295
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1Track>]
3799
4296
  attr_accessor :tracks
@@ -3810,9 +4307,8 @@ module Google
3810
4307
  end
3811
4308
  end
3812
4309
 
3813
- # Normalized bounding box.
3814
- # The normalized vertex coordinates are relative to the original image.
3815
- # Range: [0, 1].
4310
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
4311
+ # original image. Range: [0, 1].
3816
4312
  class GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox
3817
4313
  include Google::Apis::Core::Hashable
3818
4314
 
@@ -3850,20 +4346,12 @@ module Google
3850
4346
  end
3851
4347
 
3852
4348
  # Normalized bounding polygon for text (that might not be aligned with axis).
3853
- # Contains list of the corner points in clockwise order starting from
3854
- # top-left corner. For example, for a rectangular bounding box:
3855
- # When the text is horizontal it might look like:
3856
- # 0----1
3857
- # | |
3858
- # 3----2
3859
- # When it's clockwise rotated 180 degrees around the top-left corner it
3860
- # becomes:
3861
- # 2----3
3862
- # | |
3863
- # 1----0
3864
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
3865
- # than 0, or greater than 1 due to trignometric calculations for location of
3866
- # the box.
4349
+ # Contains list of the corner points in clockwise order starting from top-left
4350
+ # corner. For example, for a rectangular bounding box: When the text is
4351
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
4352
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
4353
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
4354
+ # or greater than 1 due to trignometric calculations for location of the box.
3867
4355
  class GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingPoly
3868
4356
  include Google::Apis::Core::Hashable
3869
4357
 
@@ -3882,9 +4370,8 @@ module Google
3882
4370
  end
3883
4371
  end
3884
4372
 
3885
- # A vertex represents a 2D point in the image.
3886
- # NOTE: the normalized vertex coordinates are relative to the original image
3887
- # and range from 0 to 1.
4373
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
4374
+ # coordinates are relative to the original image and range from 0 to 1.
3888
4375
  class GoogleCloudVideointelligenceV1p2beta1NormalizedVertex
3889
4376
  include Google::Apis::Core::Hashable
3890
4377
 
@@ -3923,10 +4410,10 @@ module Google
3923
4410
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1Entity]
3924
4411
  attr_accessor :entity
3925
4412
 
3926
- # Information corresponding to all frames where this object track appears.
3927
- # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
3928
- # messages in frames.
3929
- # Streaming mode: it can only be one ObjectTrackingFrame message in frames.
4413
+ # Information corresponding to all frames where this object track appears. Non-
4414
+ # streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
4415
+ # in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
4416
+ # frames.
3930
4417
  # Corresponds to the JSON property `frames`
3931
4418
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1ObjectTrackingFrame>]
3932
4419
  attr_accessor :frames
@@ -3936,12 +4423,11 @@ module Google
3936
4423
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1VideoSegment]
3937
4424
  attr_accessor :segment
3938
4425
 
3939
- # Streaming mode ONLY.
3940
- # In streaming mode, we do not know the end time of a tracked object
3941
- # before it is completed. Hence, there is no VideoSegment info returned.
3942
- # Instead, we provide a unique identifiable integer track_id so that
3943
- # the customers can correlate the results of the ongoing
3944
- # ObjectTrackAnnotation of the same track_id over time.
4426
+ # Streaming mode ONLY. In streaming mode, we do not know the end time of a
4427
+ # tracked object before it is completed. Hence, there is no VideoSegment info
4428
+ # returned. Instead, we provide a unique identifiable integer track_id so that
4429
+ # the customers can correlate the results of the ongoing ObjectTrackAnnotation
4430
+ # of the same track_id over time.
3945
4431
  # Corresponds to the JSON property `trackId`
3946
4432
  # @return [Fixnum]
3947
4433
  attr_accessor :track_id
@@ -3971,9 +4457,8 @@ module Google
3971
4457
  class GoogleCloudVideointelligenceV1p2beta1ObjectTrackingFrame
3972
4458
  include Google::Apis::Core::Hashable
3973
4459
 
3974
- # Normalized bounding box.
3975
- # The normalized vertex coordinates are relative to the original image.
3976
- # Range: [0, 1].
4460
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
4461
+ # original image. Range: [0, 1].
3977
4462
  # Corresponds to the JSON property `normalizedBoundingBox`
3978
4463
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox]
3979
4464
  attr_accessor :normalized_bounding_box
@@ -3994,16 +4479,41 @@ module Google
3994
4479
  end
3995
4480
  end
3996
4481
 
4482
+ # Person detection annotation per video.
4483
+ class GoogleCloudVideointelligenceV1p2beta1PersonDetectionAnnotation
4484
+ include Google::Apis::Core::Hashable
4485
+
4486
+ # The detected tracks of a person.
4487
+ # Corresponds to the JSON property `tracks`
4488
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1Track>]
4489
+ attr_accessor :tracks
4490
+
4491
+ # Feature version.
4492
+ # Corresponds to the JSON property `version`
4493
+ # @return [String]
4494
+ attr_accessor :version
4495
+
4496
+ def initialize(**args)
4497
+ update!(**args)
4498
+ end
4499
+
4500
+ # Update properties of this object
4501
+ def update!(**args)
4502
+ @tracks = args[:tracks] if args.key?(:tracks)
4503
+ @version = args[:version] if args.key?(:version)
4504
+ end
4505
+ end
4506
+
3997
4507
  # Alternative hypotheses (a.k.a. n-best list).
3998
4508
  class GoogleCloudVideointelligenceV1p2beta1SpeechRecognitionAlternative
3999
4509
  include Google::Apis::Core::Hashable
4000
4510
 
4001
4511
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
4002
4512
  # indicates an estimated greater likelihood that the recognized words are
4003
- # correct. This field is set only for the top alternative.
4004
- # This field is not guaranteed to be accurate and users should not rely on it
4005
- # to be always provided.
4006
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
4513
+ # correct. This field is set only for the top alternative. This field is not
4514
+ # guaranteed to be accurate and users should not rely on it to be always
4515
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
4516
+ # not set.
4007
4517
  # Corresponds to the JSON property `confidence`
4008
4518
  # @return [Float]
4009
4519
  attr_accessor :confidence
@@ -4014,8 +4524,8 @@ module Google
4014
4524
  attr_accessor :transcript
4015
4525
 
4016
4526
  # Output only. A list of word-specific information for each recognized word.
4017
- # Note: When `enable_speaker_diarization` is set to true, you will see all
4018
- # the words from the beginning of the audio.
4527
+ # Note: When `enable_speaker_diarization` is set to true, you will see all the
4528
+ # words from the beginning of the audio.
4019
4529
  # Corresponds to the JSON property `words`
4020
4530
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1WordInfo>]
4021
4531
  attr_accessor :words
@@ -4036,18 +4546,17 @@ module Google
4036
4546
  class GoogleCloudVideointelligenceV1p2beta1SpeechTranscription
4037
4547
  include Google::Apis::Core::Hashable
4038
4548
 
4039
- # May contain one or more recognition hypotheses (up to the maximum specified
4040
- # in `max_alternatives`). These alternatives are ordered in terms of
4041
- # accuracy, with the top (first) alternative being the most probable, as
4042
- # ranked by the recognizer.
4549
+ # May contain one or more recognition hypotheses (up to the maximum specified in
4550
+ # `max_alternatives`). These alternatives are ordered in terms of accuracy, with
4551
+ # the top (first) alternative being the most probable, as ranked by the
4552
+ # recognizer.
4043
4553
  # Corresponds to the JSON property `alternatives`
4044
4554
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1SpeechRecognitionAlternative>]
4045
4555
  attr_accessor :alternatives
4046
4556
 
4047
4557
  # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
4048
- # language tag of
4049
- # the language in this result. This language code was detected to have the
4050
- # most likelihood of being spoken in the audio.
4558
+ # language tag of the language in this result. This language code was detected
4559
+ # to have the most likelihood of being spoken in the audio.
4051
4560
  # Corresponds to the JSON property `languageCode`
4052
4561
  # @return [String]
4053
4562
  attr_accessor :language_code
@@ -4096,27 +4605,19 @@ module Google
4096
4605
  end
4097
4606
  end
4098
4607
 
4099
- # Video frame level annotation results for text annotation (OCR).
4100
- # Contains information regarding timestamp and bounding box locations for the
4101
- # frames containing detected OCR text snippets.
4608
+ # Video frame level annotation results for text annotation (OCR). Contains
4609
+ # information regarding timestamp and bounding box locations for the frames
4610
+ # containing detected OCR text snippets.
4102
4611
  class GoogleCloudVideointelligenceV1p2beta1TextFrame
4103
4612
  include Google::Apis::Core::Hashable
4104
4613
 
4105
4614
  # Normalized bounding polygon for text (that might not be aligned with axis).
4106
- # Contains list of the corner points in clockwise order starting from
4107
- # top-left corner. For example, for a rectangular bounding box:
4108
- # When the text is horizontal it might look like:
4109
- # 0----1
4110
- # | |
4111
- # 3----2
4112
- # When it's clockwise rotated 180 degrees around the top-left corner it
4113
- # becomes:
4114
- # 2----3
4115
- # | |
4116
- # 1----0
4117
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
4118
- # than 0, or greater than 1 due to trignometric calculations for location of
4119
- # the box.
4615
+ # Contains list of the corner points in clockwise order starting from top-left
4616
+ # corner. For example, for a rectangular bounding box: When the text is
4617
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
4618
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
4619
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
4620
+ # or greater than 1 due to trignometric calculations for location of the box.
4120
4621
  # Corresponds to the JSON property `rotatedBoundingBox`
4121
4622
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingPoly]
4122
4623
  attr_accessor :rotated_bounding_box
@@ -4169,9 +4670,8 @@ module Google
4169
4670
  end
4170
4671
  end
4171
4672
 
4172
- # For tracking related features.
4173
- # An object at time_offset with attributes, and located with
4174
- # normalized_bounding_box.
4673
+ # For tracking related features. An object at time_offset with attributes, and
4674
+ # located with normalized_bounding_box.
4175
4675
  class GoogleCloudVideointelligenceV1p2beta1TimestampedObject
4176
4676
  include Google::Apis::Core::Hashable
4177
4677
 
@@ -4185,15 +4685,14 @@ module Google
4185
4685
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1DetectedLandmark>]
4186
4686
  attr_accessor :landmarks
4187
4687
 
4188
- # Normalized bounding box.
4189
- # The normalized vertex coordinates are relative to the original image.
4190
- # Range: [0, 1].
4688
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
4689
+ # original image. Range: [0, 1].
4191
4690
  # Corresponds to the JSON property `normalizedBoundingBox`
4192
4691
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox]
4193
4692
  attr_accessor :normalized_bounding_box
4194
4693
 
4195
- # Time-offset, relative to the beginning of the video,
4196
- # corresponding to the video frame for this object.
4694
+ # Time-offset, relative to the beginning of the video, corresponding to the
4695
+ # video frame for this object.
4197
4696
  # Corresponds to the JSON property `timeOffset`
4198
4697
  # @return [String]
4199
4698
  attr_accessor :time_offset
@@ -4252,20 +4751,19 @@ module Google
4252
4751
  class GoogleCloudVideointelligenceV1p2beta1VideoAnnotationProgress
4253
4752
  include Google::Apis::Core::Hashable
4254
4753
 
4255
- # Specifies which feature is being tracked if the request contains more than
4256
- # one feature.
4754
+ # Specifies which feature is being tracked if the request contains more than one
4755
+ # feature.
4257
4756
  # Corresponds to the JSON property `feature`
4258
4757
  # @return [String]
4259
4758
  attr_accessor :feature
4260
4759
 
4261
- # Video file location in
4262
- # [Cloud Storage](https://cloud.google.com/storage/).
4760
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
4263
4761
  # Corresponds to the JSON property `inputUri`
4264
4762
  # @return [String]
4265
4763
  attr_accessor :input_uri
4266
4764
 
4267
- # Approximate percentage processed thus far. Guaranteed to be
4268
- # 100 when fully processed.
4765
+ # Approximate percentage processed thus far. Guaranteed to be 100 when fully
4766
+ # processed.
4269
4767
  # Corresponds to the JSON property `progressPercent`
4270
4768
  # @return [Fixnum]
4271
4769
  attr_accessor :progress_percent
@@ -4304,31 +4802,40 @@ module Google
4304
4802
  class GoogleCloudVideointelligenceV1p2beta1VideoAnnotationResults
4305
4803
  include Google::Apis::Core::Hashable
4306
4804
 
4307
- # The `Status` type defines a logical error model that is suitable for
4308
- # different programming environments, including REST APIs and RPC APIs. It is
4309
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
4310
- # three pieces of data: error code, error message, and error details.
4311
- # You can find out more about this error model and how to work with it in the
4312
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
4805
+ # The `Status` type defines a logical error model that is suitable for different
4806
+ # programming environments, including REST APIs and RPC APIs. It is used by [
4807
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
4808
+ # data: error code, error message, and error details. You can find out more
4809
+ # about this error model and how to work with it in the [API Design Guide](https:
4810
+ # //cloud.google.com/apis/design/errors).
4313
4811
  # Corresponds to the JSON property `error`
4314
4812
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleRpcStatus]
4315
4813
  attr_accessor :error
4316
4814
 
4317
- # Explicit content annotation (based on per-frame visual signals only).
4318
- # If no explicit content has been detected in a frame, no annotations are
4319
- # present for that frame.
4815
+ # Explicit content annotation (based on per-frame visual signals only). If no
4816
+ # explicit content has been detected in a frame, no annotations are present for
4817
+ # that frame.
4320
4818
  # Corresponds to the JSON property `explicitAnnotation`
4321
4819
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1ExplicitContentAnnotation]
4322
4820
  attr_accessor :explicit_annotation
4323
4821
 
4324
- # Label annotations on frame level.
4325
- # There is exactly one element for each unique label.
4822
+ # Deprecated. Please use `face_detection_annotations` instead.
4823
+ # Corresponds to the JSON property `faceAnnotations`
4824
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1FaceAnnotation>]
4825
+ attr_accessor :face_annotations
4826
+
4827
+ # Face detection annotations.
4828
+ # Corresponds to the JSON property `faceDetectionAnnotations`
4829
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1FaceDetectionAnnotation>]
4830
+ attr_accessor :face_detection_annotations
4831
+
4832
+ # Label annotations on frame level. There is exactly one element for each unique
4833
+ # label.
4326
4834
  # Corresponds to the JSON property `frameLabelAnnotations`
4327
4835
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
4328
4836
  attr_accessor :frame_label_annotations
4329
4837
 
4330
- # Video file location in
4331
- # [Cloud Storage](https://cloud.google.com/storage/).
4838
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
4332
4839
  # Corresponds to the JSON property `inputUri`
4333
4840
  # @return [String]
4334
4841
  attr_accessor :input_uri
@@ -4343,6 +4850,11 @@ module Google
4343
4850
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1ObjectTrackingAnnotation>]
4344
4851
  attr_accessor :object_annotations
4345
4852
 
4853
+ # Person detection annotations.
4854
+ # Corresponds to the JSON property `personDetectionAnnotations`
4855
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1PersonDetectionAnnotation>]
4856
+ attr_accessor :person_detection_annotations
4857
+
4346
4858
  # Video segment.
4347
4859
  # Corresponds to the JSON property `segment`
4348
4860
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1VideoSegment]
@@ -4355,11 +4867,11 @@ module Google
4355
4867
  attr_accessor :segment_label_annotations
4356
4868
 
4357
4869
  # Presence label annotations on video level or user-specified segment level.
4358
- # There is exactly one element for each unique label. Compared to the
4359
- # existing topical `segment_label_annotations`, this field presents more
4360
- # fine-grained, segment-level labels detected in video content and is made
4361
- # available only when the client sets `LabelDetectionConfig.model` to
4362
- # "builtin/latest" in the request.
4870
+ # There is exactly one element for each unique label. Compared to the existing
4871
+ # topical `segment_label_annotations`, this field presents more fine-grained,
4872
+ # segment-level labels detected in video content and is made available only when
4873
+ # the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
4874
+ # request.
4363
4875
  # Corresponds to the JSON property `segmentPresenceLabelAnnotations`
4364
4876
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
4365
4877
  attr_accessor :segment_presence_label_annotations
@@ -4369,17 +4881,17 @@ module Google
4369
4881
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1VideoSegment>]
4370
4882
  attr_accessor :shot_annotations
4371
4883
 
4372
- # Topical label annotations on shot level.
4373
- # There is exactly one element for each unique label.
4884
+ # Topical label annotations on shot level. There is exactly one element for each
4885
+ # unique label.
4374
4886
  # Corresponds to the JSON property `shotLabelAnnotations`
4375
4887
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
4376
4888
  attr_accessor :shot_label_annotations
4377
4889
 
4378
4890
  # Presence label annotations on shot level. There is exactly one element for
4379
- # each unique label. Compared to the existing topical
4380
- # `shot_label_annotations`, this field presents more fine-grained, shot-level
4381
- # labels detected in video content and is made available only when the client
4382
- # sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
4891
+ # each unique label. Compared to the existing topical `shot_label_annotations`,
4892
+ # this field presents more fine-grained, shot-level labels detected in video
4893
+ # content and is made available only when the client sets `LabelDetectionConfig.
4894
+ # model` to "builtin/latest" in the request.
4383
4895
  # Corresponds to the JSON property `shotPresenceLabelAnnotations`
4384
4896
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
4385
4897
  attr_accessor :shot_presence_label_annotations
@@ -4389,9 +4901,8 @@ module Google
4389
4901
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1SpeechTranscription>]
4390
4902
  attr_accessor :speech_transcriptions
4391
4903
 
4392
- # OCR text detection and tracking.
4393
- # Annotations for list of detected text snippets. Each will have list of
4394
- # frame information associated with it.
4904
+ # OCR text detection and tracking. Annotations for list of detected text
4905
+ # snippets. Each will have list of frame information associated with it.
4395
4906
  # Corresponds to the JSON property `textAnnotations`
4396
4907
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p2beta1TextAnnotation>]
4397
4908
  attr_accessor :text_annotations
@@ -4404,10 +4915,13 @@ module Google
4404
4915
  def update!(**args)
4405
4916
  @error = args[:error] if args.key?(:error)
4406
4917
  @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
4918
+ @face_annotations = args[:face_annotations] if args.key?(:face_annotations)
4919
+ @face_detection_annotations = args[:face_detection_annotations] if args.key?(:face_detection_annotations)
4407
4920
  @frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
4408
4921
  @input_uri = args[:input_uri] if args.key?(:input_uri)
4409
4922
  @logo_recognition_annotations = args[:logo_recognition_annotations] if args.key?(:logo_recognition_annotations)
4410
4923
  @object_annotations = args[:object_annotations] if args.key?(:object_annotations)
4924
+ @person_detection_annotations = args[:person_detection_annotations] if args.key?(:person_detection_annotations)
4411
4925
  @segment = args[:segment] if args.key?(:segment)
4412
4926
  @segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations)
4413
4927
  @segment_presence_label_annotations = args[:segment_presence_label_annotations] if args.key?(:segment_presence_label_annotations)
@@ -4423,14 +4937,14 @@ module Google
4423
4937
  class GoogleCloudVideointelligenceV1p2beta1VideoSegment
4424
4938
  include Google::Apis::Core::Hashable
4425
4939
 
4426
- # Time-offset, relative to the beginning of the video,
4427
- # corresponding to the end of the segment (inclusive).
4940
+ # Time-offset, relative to the beginning of the video, corresponding to the end
4941
+ # of the segment (inclusive).
4428
4942
  # Corresponds to the JSON property `endTimeOffset`
4429
4943
  # @return [String]
4430
4944
  attr_accessor :end_time_offset
4431
4945
 
4432
- # Time-offset, relative to the beginning of the video,
4433
- # corresponding to the start of the segment (inclusive).
4946
+ # Time-offset, relative to the beginning of the video, corresponding to the
4947
+ # start of the segment (inclusive).
4434
4948
  # Corresponds to the JSON property `startTimeOffset`
4435
4949
  # @return [String]
4436
4950
  attr_accessor :start_time_offset
@@ -4447,41 +4961,41 @@ module Google
4447
4961
  end
4448
4962
 
4449
4963
  # Word-specific information for recognized words. Word information is only
4450
- # included in the response when certain request parameters are set, such
4451
- # as `enable_word_time_offsets`.
4964
+ # included in the response when certain request parameters are set, such as `
4965
+ # enable_word_time_offsets`.
4452
4966
  class GoogleCloudVideointelligenceV1p2beta1WordInfo
4453
4967
  include Google::Apis::Core::Hashable
4454
4968
 
4455
4969
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
4456
4970
  # indicates an estimated greater likelihood that the recognized words are
4457
- # correct. This field is set only for the top alternative.
4458
- # This field is not guaranteed to be accurate and users should not rely on it
4459
- # to be always provided.
4460
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
4971
+ # correct. This field is set only for the top alternative. This field is not
4972
+ # guaranteed to be accurate and users should not rely on it to be always
4973
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
4974
+ # not set.
4461
4975
  # Corresponds to the JSON property `confidence`
4462
4976
  # @return [Float]
4463
4977
  attr_accessor :confidence
4464
4978
 
4465
- # Time offset relative to the beginning of the audio, and
4466
- # corresponding to the end of the spoken word. This field is only set if
4467
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
4468
- # experimental feature and the accuracy of the time offset can vary.
4979
+ # Time offset relative to the beginning of the audio, and corresponding to the
4980
+ # end of the spoken word. This field is only set if `enable_word_time_offsets=
4981
+ # true` and only in the top hypothesis. This is an experimental feature and the
4982
+ # accuracy of the time offset can vary.
4469
4983
  # Corresponds to the JSON property `endTime`
4470
4984
  # @return [String]
4471
4985
  attr_accessor :end_time
4472
4986
 
4473
- # Output only. A distinct integer value is assigned for every speaker within
4474
- # the audio. This field specifies which one of those speakers was detected to
4475
- # have spoken this word. Value ranges from 1 up to diarization_speaker_count,
4476
- # and is only set if speaker diarization is enabled.
4987
+ # Output only. A distinct integer value is assigned for every speaker within the
4988
+ # audio. This field specifies which one of those speakers was detected to have
4989
+ # spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
4990
+ # only set if speaker diarization is enabled.
4477
4991
  # Corresponds to the JSON property `speakerTag`
4478
4992
  # @return [Fixnum]
4479
4993
  attr_accessor :speaker_tag
4480
4994
 
4481
- # Time offset relative to the beginning of the audio, and
4482
- # corresponding to the start of the spoken word. This field is only set if
4483
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
4484
- # experimental feature and the accuracy of the time offset can vary.
4995
+ # Time offset relative to the beginning of the audio, and corresponding to the
4996
+ # start of the spoken word. This field is only set if `enable_word_time_offsets=
4997
+ # true` and only in the top hypothesis. This is an experimental feature and the
4998
+ # accuracy of the time offset can vary.
4485
4999
  # Corresponds to the JSON property `startTime`
4486
5000
  # @return [String]
4487
5001
  attr_accessor :start_time
@@ -4505,9 +5019,9 @@ module Google
4505
5019
  end
4506
5020
  end
4507
5021
 
4508
- # Video annotation progress. Included in the `metadata`
4509
- # field of the `Operation` returned by the `GetOperation`
4510
- # call of the `google::longrunning::Operations` service.
5022
+ # Video annotation progress. Included in the `metadata` field of the `Operation`
5023
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
5024
+ # service.
4511
5025
  class GoogleCloudVideointelligenceV1p3beta1AnnotateVideoProgress
4512
5026
  include Google::Apis::Core::Hashable
4513
5027
 
@@ -4526,9 +5040,9 @@ module Google
4526
5040
  end
4527
5041
  end
4528
5042
 
4529
- # Video annotation response. Included in the `response`
4530
- # field of the `Operation` returned by the `GetOperation`
4531
- # call of the `google::longrunning::Operations` service.
5043
+ # Video annotation response. Included in the `response` field of the `Operation`
5044
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
5045
+ # service.
4532
5046
  class GoogleCloudVideointelligenceV1p3beta1AnnotateVideoResponse
4533
5047
  include Google::Apis::Core::Hashable
4534
5048
 
@@ -4562,10 +5076,9 @@ module Google
4562
5076
  # @return [String]
4563
5077
  attr_accessor :display_name
4564
5078
 
4565
- # The resource name of the celebrity. Have the format
4566
- # `video-intelligence/kg-mid` indicates a celebrity from preloaded gallery.
4567
- # kg-mid is the id in Google knowledge graph, which is unique for the
4568
- # celebrity.
5079
+ # The resource name of the celebrity. Have the format `video-intelligence/kg-mid`
5080
+ # indicates a celebrity from preloaded gallery. kg-mid is the id in Google
5081
+ # knowledge graph, which is unique for the celebrity.
4569
5082
  # Corresponds to the JSON property `name`
4570
5083
  # @return [String]
4571
5084
  attr_accessor :name
@@ -4586,8 +5099,8 @@ module Google
4586
5099
  class GoogleCloudVideointelligenceV1p3beta1CelebrityRecognitionAnnotation
4587
5100
  include Google::Apis::Core::Hashable
4588
5101
 
4589
- # The tracks detected from the input video, including recognized celebrities
4590
- # and other detected faces in the video.
5102
+ # The tracks detected from the input video, including recognized celebrities and
5103
+ # other detected faces in the video.
4591
5104
  # Corresponds to the JSON property `celebrityTracks`
4592
5105
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1CelebrityTrack>]
4593
5106
  attr_accessor :celebrity_tracks
@@ -4643,14 +5156,14 @@ module Google
4643
5156
  # @return [Float]
4644
5157
  attr_accessor :confidence
4645
5158
 
4646
- # The name of the attribute, for example, glasses, dark_glasses, mouth_open.
4647
- # A full list of supported type names will be provided in the document.
5159
+ # The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
5160
+ # full list of supported type names will be provided in the document.
4648
5161
  # Corresponds to the JSON property `name`
4649
5162
  # @return [String]
4650
5163
  attr_accessor :name
4651
5164
 
4652
- # Text value of the detection result. For example, the value for "HairColor"
4653
- # can be "black", "blonde", etc.
5165
+ # Text value of the detection result. For example, the value for "HairColor" can
5166
+ # be "black", "blonde", etc.
4654
5167
  # Corresponds to the JSON property `value`
4655
5168
  # @return [String]
4656
5169
  attr_accessor :value
@@ -4682,9 +5195,8 @@ module Google
4682
5195
  # @return [String]
4683
5196
  attr_accessor :name
4684
5197
 
4685
- # A vertex represents a 2D point in the image.
4686
- # NOTE: the normalized vertex coordinates are relative to the original image
4687
- # and range from 0 to 1.
5198
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
5199
+ # coordinates are relative to the original image and range from 0 to 1.
4688
5200
  # Corresponds to the JSON property `point`
4689
5201
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1NormalizedVertex]
4690
5202
  attr_accessor :point
@@ -4710,8 +5222,7 @@ module Google
4710
5222
  # @return [String]
4711
5223
  attr_accessor :description
4712
5224
 
4713
- # Opaque entity ID. Some IDs may be available in
4714
- # [Google Knowledge Graph Search
5225
+ # Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
4715
5226
  # API](https://developers.google.com/knowledge-graph/).
4716
5227
  # Corresponds to the JSON property `entityId`
4717
5228
  # @return [String]
@@ -4734,9 +5245,9 @@ module Google
4734
5245
  end
4735
5246
  end
4736
5247
 
4737
- # Explicit content annotation (based on per-frame visual signals only).
4738
- # If no explicit content has been detected in a frame, no annotations are
4739
- # present for that frame.
5248
+ # Explicit content annotation (based on per-frame visual signals only). If no
5249
+ # explicit content has been detected in a frame, no annotations are present for
5250
+ # that frame.
4740
5251
  class GoogleCloudVideointelligenceV1p3beta1ExplicitContentAnnotation
4741
5252
  include Google::Apis::Core::Hashable
4742
5253
 
@@ -4787,20 +5298,41 @@ module Google
4787
5298
  end
4788
5299
  end
4789
5300
 
4790
- # Face detection annotation.
4791
- class GoogleCloudVideointelligenceV1p3beta1FaceDetectionAnnotation
5301
+ # Deprecated. No effect.
5302
+ class GoogleCloudVideointelligenceV1p3beta1FaceAnnotation
4792
5303
  include Google::Apis::Core::Hashable
4793
5304
 
4794
- # The thumbnail of a person's face.
5305
+ # All video frames where a face was detected.
5306
+ # Corresponds to the JSON property `frames`
5307
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1FaceFrame>]
5308
+ attr_accessor :frames
5309
+
5310
+ # All video segments where a face was detected.
5311
+ # Corresponds to the JSON property `segments`
5312
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1FaceSegment>]
5313
+ attr_accessor :segments
5314
+
5315
+ # Thumbnail of a representative face view (in JPEG format).
4795
5316
  # Corresponds to the JSON property `thumbnail`
4796
5317
  # NOTE: Values are automatically base64 encoded/decoded in the client library.
4797
5318
  # @return [String]
4798
5319
  attr_accessor :thumbnail
4799
5320
 
4800
- # The face tracks with attributes.
4801
- # Corresponds to the JSON property `tracks`
4802
- # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1Track>]
4803
- attr_accessor :tracks
5321
+ def initialize(**args)
5322
+ update!(**args)
5323
+ end
5324
+
5325
+ # Update properties of this object
5326
+ def update!(**args)
5327
+ @frames = args[:frames] if args.key?(:frames)
5328
+ @segments = args[:segments] if args.key?(:segments)
5329
+ @thumbnail = args[:thumbnail] if args.key?(:thumbnail)
5330
+ end
5331
+ end
5332
+
5333
+ # Face detection annotation.
5334
+ class GoogleCloudVideointelligenceV1p3beta1FaceDetectionAnnotation
5335
+ include Google::Apis::Core::Hashable
4804
5336
 
4805
5337
  # Feature version.
4806
5338
  # Corresponds to the JSON property `version`
@@ -4813,20 +5345,63 @@ module Google
4813
5345
 
4814
5346
  # Update properties of this object
4815
5347
  def update!(**args)
4816
- @thumbnail = args[:thumbnail] if args.key?(:thumbnail)
4817
- @tracks = args[:tracks] if args.key?(:tracks)
4818
5348
  @version = args[:version] if args.key?(:version)
4819
5349
  end
4820
5350
  end
4821
5351
 
5352
+ # Deprecated. No effect.
5353
+ class GoogleCloudVideointelligenceV1p3beta1FaceFrame
5354
+ include Google::Apis::Core::Hashable
5355
+
5356
+ # Normalized Bounding boxes in a frame. There can be more than one boxes if the
5357
+ # same face is detected in multiple locations within the current frame.
5358
+ # Corresponds to the JSON property `normalizedBoundingBoxes`
5359
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox>]
5360
+ attr_accessor :normalized_bounding_boxes
5361
+
5362
+ # Time-offset, relative to the beginning of the video, corresponding to the
5363
+ # video frame for this location.
5364
+ # Corresponds to the JSON property `timeOffset`
5365
+ # @return [String]
5366
+ attr_accessor :time_offset
5367
+
5368
+ def initialize(**args)
5369
+ update!(**args)
5370
+ end
5371
+
5372
+ # Update properties of this object
5373
+ def update!(**args)
5374
+ @normalized_bounding_boxes = args[:normalized_bounding_boxes] if args.key?(:normalized_bounding_boxes)
5375
+ @time_offset = args[:time_offset] if args.key?(:time_offset)
5376
+ end
5377
+ end
5378
+
5379
+ # Video segment level annotation results for face detection.
5380
+ class GoogleCloudVideointelligenceV1p3beta1FaceSegment
5381
+ include Google::Apis::Core::Hashable
5382
+
5383
+ # Video segment.
5384
+ # Corresponds to the JSON property `segment`
5385
+ # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1VideoSegment]
5386
+ attr_accessor :segment
5387
+
5388
+ def initialize(**args)
5389
+ update!(**args)
5390
+ end
5391
+
5392
+ # Update properties of this object
5393
+ def update!(**args)
5394
+ @segment = args[:segment] if args.key?(:segment)
5395
+ end
5396
+ end
5397
+
4822
5398
  # Label annotation.
4823
5399
  class GoogleCloudVideointelligenceV1p3beta1LabelAnnotation
4824
5400
  include Google::Apis::Core::Hashable
4825
5401
 
4826
- # Common categories for the detected entity.
4827
- # For example, when the label is `Terrier`, the category is likely `dog`. And
4828
- # in some cases there might be more than one categories e.g., `Terrier` could
4829
- # also be a `pet`.
5402
+ # Common categories for the detected entity. For example, when the label is `
5403
+ # Terrier`, the category is likely `dog`. And in some cases there might be more
5404
+ # than one categories e.g., `Terrier` could also be a `pet`.
4830
5405
  # Corresponds to the JSON property `categoryEntities`
4831
5406
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1Entity>]
4832
5407
  attr_accessor :category_entities
@@ -4925,14 +5500,14 @@ module Google
4925
5500
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1Entity]
4926
5501
  attr_accessor :entity
4927
5502
 
4928
- # All video segments where the recognized logo appears. There might be
4929
- # multiple instances of the same logo class appearing in one VideoSegment.
5503
+ # All video segments where the recognized logo appears. There might be multiple
5504
+ # instances of the same logo class appearing in one VideoSegment.
4930
5505
  # Corresponds to the JSON property `segments`
4931
5506
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1VideoSegment>]
4932
5507
  attr_accessor :segments
4933
5508
 
4934
- # All logo tracks where the recognized logo appears. Each track corresponds
4935
- # to one logo instance appearing in consecutive frames.
5509
+ # All logo tracks where the recognized logo appears. Each track corresponds to
5510
+ # one logo instance appearing in consecutive frames.
4936
5511
  # Corresponds to the JSON property `tracks`
4937
5512
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1Track>]
4938
5513
  attr_accessor :tracks
@@ -4949,9 +5524,8 @@ module Google
4949
5524
  end
4950
5525
  end
4951
5526
 
4952
- # Normalized bounding box.
4953
- # The normalized vertex coordinates are relative to the original image.
4954
- # Range: [0, 1].
5527
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
5528
+ # original image. Range: [0, 1].
4955
5529
  class GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox
4956
5530
  include Google::Apis::Core::Hashable
4957
5531
 
@@ -4989,20 +5563,12 @@ module Google
4989
5563
  end
4990
5564
 
4991
5565
  # Normalized bounding polygon for text (that might not be aligned with axis).
4992
- # Contains list of the corner points in clockwise order starting from
4993
- # top-left corner. For example, for a rectangular bounding box:
4994
- # When the text is horizontal it might look like:
4995
- # 0----1
4996
- # | |
4997
- # 3----2
4998
- # When it's clockwise rotated 180 degrees around the top-left corner it
4999
- # becomes:
5000
- # 2----3
5001
- # | |
5002
- # 1----0
5003
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
5004
- # than 0, or greater than 1 due to trignometric calculations for location of
5005
- # the box.
5566
+ # Contains list of the corner points in clockwise order starting from top-left
5567
+ # corner. For example, for a rectangular bounding box: When the text is
5568
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
5569
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
5570
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
5571
+ # or greater than 1 due to trignometric calculations for location of the box.
5006
5572
  class GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingPoly
5007
5573
  include Google::Apis::Core::Hashable
5008
5574
 
@@ -5021,9 +5587,8 @@ module Google
5021
5587
  end
5022
5588
  end
5023
5589
 
5024
- # A vertex represents a 2D point in the image.
5025
- # NOTE: the normalized vertex coordinates are relative to the original image
5026
- # and range from 0 to 1.
5590
+ # A vertex represents a 2D point in the image. NOTE: the normalized vertex
5591
+ # coordinates are relative to the original image and range from 0 to 1.
5027
5592
  class GoogleCloudVideointelligenceV1p3beta1NormalizedVertex
5028
5593
  include Google::Apis::Core::Hashable
5029
5594
 
@@ -5062,10 +5627,10 @@ module Google
5062
5627
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1Entity]
5063
5628
  attr_accessor :entity
5064
5629
 
5065
- # Information corresponding to all frames where this object track appears.
5066
- # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
5067
- # messages in frames.
5068
- # Streaming mode: it can only be one ObjectTrackingFrame message in frames.
5630
+ # Information corresponding to all frames where this object track appears. Non-
5631
+ # streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
5632
+ # in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
5633
+ # frames.
5069
5634
  # Corresponds to the JSON property `frames`
5070
5635
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1ObjectTrackingFrame>]
5071
5636
  attr_accessor :frames
@@ -5075,12 +5640,11 @@ module Google
5075
5640
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1VideoSegment]
5076
5641
  attr_accessor :segment
5077
5642
 
5078
- # Streaming mode ONLY.
5079
- # In streaming mode, we do not know the end time of a tracked object
5080
- # before it is completed. Hence, there is no VideoSegment info returned.
5081
- # Instead, we provide a unique identifiable integer track_id so that
5082
- # the customers can correlate the results of the ongoing
5083
- # ObjectTrackAnnotation of the same track_id over time.
5643
+ # Streaming mode ONLY. In streaming mode, we do not know the end time of a
5644
+ # tracked object before it is completed. Hence, there is no VideoSegment info
5645
+ # returned. Instead, we provide a unique identifiable integer track_id so that
5646
+ # the customers can correlate the results of the ongoing ObjectTrackAnnotation
5647
+ # of the same track_id over time.
5084
5648
  # Corresponds to the JSON property `trackId`
5085
5649
  # @return [Fixnum]
5086
5650
  attr_accessor :track_id
@@ -5110,9 +5674,8 @@ module Google
5110
5674
  class GoogleCloudVideointelligenceV1p3beta1ObjectTrackingFrame
5111
5675
  include Google::Apis::Core::Hashable
5112
5676
 
5113
- # Normalized bounding box.
5114
- # The normalized vertex coordinates are relative to the original image.
5115
- # Range: [0, 1].
5677
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
5678
+ # original image. Range: [0, 1].
5116
5679
  # Corresponds to the JSON property `normalizedBoundingBox`
5117
5680
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox]
5118
5681
  attr_accessor :normalized_bounding_box
@@ -5189,10 +5752,10 @@ module Google
5189
5752
 
5190
5753
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
5191
5754
  # indicates an estimated greater likelihood that the recognized words are
5192
- # correct. This field is set only for the top alternative.
5193
- # This field is not guaranteed to be accurate and users should not rely on it
5194
- # to be always provided.
5195
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
5755
+ # correct. This field is set only for the top alternative. This field is not
5756
+ # guaranteed to be accurate and users should not rely on it to be always
5757
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
5758
+ # not set.
5196
5759
  # Corresponds to the JSON property `confidence`
5197
5760
  # @return [Float]
5198
5761
  attr_accessor :confidence
@@ -5203,8 +5766,8 @@ module Google
5203
5766
  attr_accessor :transcript
5204
5767
 
5205
5768
  # Output only. A list of word-specific information for each recognized word.
5206
- # Note: When `enable_speaker_diarization` is set to true, you will see all
5207
- # the words from the beginning of the audio.
5769
+ # Note: When `enable_speaker_diarization` is set to true, you will see all the
5770
+ # words from the beginning of the audio.
5208
5771
  # Corresponds to the JSON property `words`
5209
5772
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1WordInfo>]
5210
5773
  attr_accessor :words
@@ -5225,18 +5788,17 @@ module Google
5225
5788
  class GoogleCloudVideointelligenceV1p3beta1SpeechTranscription
5226
5789
  include Google::Apis::Core::Hashable
5227
5790
 
5228
- # May contain one or more recognition hypotheses (up to the maximum specified
5229
- # in `max_alternatives`). These alternatives are ordered in terms of
5230
- # accuracy, with the top (first) alternative being the most probable, as
5231
- # ranked by the recognizer.
5791
+ # May contain one or more recognition hypotheses (up to the maximum specified in
5792
+ # `max_alternatives`). These alternatives are ordered in terms of accuracy, with
5793
+ # the top (first) alternative being the most probable, as ranked by the
5794
+ # recognizer.
5232
5795
  # Corresponds to the JSON property `alternatives`
5233
5796
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1SpeechRecognitionAlternative>]
5234
5797
  attr_accessor :alternatives
5235
5798
 
5236
5799
  # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
5237
- # language tag of
5238
- # the language in this result. This language code was detected to have the
5239
- # most likelihood of being spoken in the audio.
5800
+ # language tag of the language in this result. This language code was detected
5801
+ # to have the most likelihood of being spoken in the audio.
5240
5802
  # Corresponds to the JSON property `languageCode`
5241
5803
  # @return [String]
5242
5804
  attr_accessor :language_code
@@ -5252,32 +5814,32 @@ module Google
5252
5814
  end
5253
5815
  end
5254
5816
 
5255
- # `StreamingAnnotateVideoResponse` is the only message returned to the client
5256
- # by `StreamingAnnotateVideo`. A series of zero or more
5257
- # `StreamingAnnotateVideoResponse` messages are streamed back to the client.
5817
+ # `StreamingAnnotateVideoResponse` is the only message returned to the client by
5818
+ # `StreamingAnnotateVideo`. A series of zero or more `
5819
+ # StreamingAnnotateVideoResponse` messages are streamed back to the client.
5258
5820
  class GoogleCloudVideointelligenceV1p3beta1StreamingAnnotateVideoResponse
5259
5821
  include Google::Apis::Core::Hashable
5260
5822
 
5261
- # Streaming annotation results corresponding to a portion of the video
5262
- # that is currently being processed.
5823
+ # Streaming annotation results corresponding to a portion of the video that is
5824
+ # currently being processed. Only ONE type of annotation will be specified in
5825
+ # the response.
5263
5826
  # Corresponds to the JSON property `annotationResults`
5264
5827
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1StreamingVideoAnnotationResults]
5265
5828
  attr_accessor :annotation_results
5266
5829
 
5267
- # Google Cloud Storage URI that stores annotation results of one
5268
- # streaming session in JSON format.
5269
- # It is the annotation_result_storage_directory
5270
- # from the request followed by '/cloud_project_number-session_id'.
5830
+ # Google Cloud Storage URI that stores annotation results of one streaming
5831
+ # session in JSON format. It is the annotation_result_storage_directory from the
5832
+ # request followed by '/cloud_project_number-session_id'.
5271
5833
  # Corresponds to the JSON property `annotationResultsUri`
5272
5834
  # @return [String]
5273
5835
  attr_accessor :annotation_results_uri
5274
5836
 
5275
- # The `Status` type defines a logical error model that is suitable for
5276
- # different programming environments, including REST APIs and RPC APIs. It is
5277
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
5278
- # three pieces of data: error code, error message, and error details.
5279
- # You can find out more about this error model and how to work with it in the
5280
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
5837
+ # The `Status` type defines a logical error model that is suitable for different
5838
+ # programming environments, including REST APIs and RPC APIs. It is used by [
5839
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
5840
+ # data: error code, error message, and error details. You can find out more
5841
+ # about this error model and how to work with it in the [API Design Guide](https:
5842
+ # //cloud.google.com/apis/design/errors).
5281
5843
  # Corresponds to the JSON property `error`
5282
5844
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleRpcStatus]
5283
5845
  attr_accessor :error
@@ -5294,18 +5856,24 @@ module Google
5294
5856
  end
5295
5857
  end
5296
5858
 
5297
- # Streaming annotation results corresponding to a portion of the video
5298
- # that is currently being processed.
5859
+ # Streaming annotation results corresponding to a portion of the video that is
5860
+ # currently being processed. Only ONE type of annotation will be specified in
5861
+ # the response.
5299
5862
  class GoogleCloudVideointelligenceV1p3beta1StreamingVideoAnnotationResults
5300
5863
  include Google::Apis::Core::Hashable
5301
5864
 
5302
- # Explicit content annotation (based on per-frame visual signals only).
5303
- # If no explicit content has been detected in a frame, no annotations are
5304
- # present for that frame.
5865
+ # Explicit content annotation (based on per-frame visual signals only). If no
5866
+ # explicit content has been detected in a frame, no annotations are present for
5867
+ # that frame.
5305
5868
  # Corresponds to the JSON property `explicitAnnotation`
5306
5869
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1ExplicitContentAnnotation]
5307
5870
  attr_accessor :explicit_annotation
5308
5871
 
5872
+ # Timestamp of the processed frame in microseconds.
5873
+ # Corresponds to the JSON property `frameTimestamp`
5874
+ # @return [String]
5875
+ attr_accessor :frame_timestamp
5876
+
5309
5877
  # Label annotation results.
5310
5878
  # Corresponds to the JSON property `labelAnnotations`
5311
5879
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
@@ -5328,6 +5896,7 @@ module Google
5328
5896
  # Update properties of this object
5329
5897
  def update!(**args)
5330
5898
  @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
5899
+ @frame_timestamp = args[:frame_timestamp] if args.key?(:frame_timestamp)
5331
5900
  @label_annotations = args[:label_annotations] if args.key?(:label_annotations)
5332
5901
  @object_annotations = args[:object_annotations] if args.key?(:object_annotations)
5333
5902
  @shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations)
@@ -5367,27 +5936,19 @@ module Google
5367
5936
  end
5368
5937
  end
5369
5938
 
5370
- # Video frame level annotation results for text annotation (OCR).
5371
- # Contains information regarding timestamp and bounding box locations for the
5372
- # frames containing detected OCR text snippets.
5939
+ # Video frame level annotation results for text annotation (OCR). Contains
5940
+ # information regarding timestamp and bounding box locations for the frames
5941
+ # containing detected OCR text snippets.
5373
5942
  class GoogleCloudVideointelligenceV1p3beta1TextFrame
5374
5943
  include Google::Apis::Core::Hashable
5375
5944
 
5376
5945
  # Normalized bounding polygon for text (that might not be aligned with axis).
5377
- # Contains list of the corner points in clockwise order starting from
5378
- # top-left corner. For example, for a rectangular bounding box:
5379
- # When the text is horizontal it might look like:
5380
- # 0----1
5381
- # | |
5382
- # 3----2
5383
- # When it's clockwise rotated 180 degrees around the top-left corner it
5384
- # becomes:
5385
- # 2----3
5386
- # | |
5387
- # 1----0
5388
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
5389
- # than 0, or greater than 1 due to trignometric calculations for location of
5390
- # the box.
5946
+ # Contains list of the corner points in clockwise order starting from top-left
5947
+ # corner. For example, for a rectangular bounding box: When the text is
5948
+ # horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
5949
+ # 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
5950
+ # vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
5951
+ # or greater than 1 due to trignometric calculations for location of the box.
5391
5952
  # Corresponds to the JSON property `rotatedBoundingBox`
5392
5953
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingPoly]
5393
5954
  attr_accessor :rotated_bounding_box
@@ -5440,9 +6001,8 @@ module Google
5440
6001
  end
5441
6002
  end
5442
6003
 
5443
- # For tracking related features.
5444
- # An object at time_offset with attributes, and located with
5445
- # normalized_bounding_box.
6004
+ # For tracking related features. An object at time_offset with attributes, and
6005
+ # located with normalized_bounding_box.
5446
6006
  class GoogleCloudVideointelligenceV1p3beta1TimestampedObject
5447
6007
  include Google::Apis::Core::Hashable
5448
6008
 
@@ -5456,15 +6016,14 @@ module Google
5456
6016
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1DetectedLandmark>]
5457
6017
  attr_accessor :landmarks
5458
6018
 
5459
- # Normalized bounding box.
5460
- # The normalized vertex coordinates are relative to the original image.
5461
- # Range: [0, 1].
6019
+ # Normalized bounding box. The normalized vertex coordinates are relative to the
6020
+ # original image. Range: [0, 1].
5462
6021
  # Corresponds to the JSON property `normalizedBoundingBox`
5463
6022
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox]
5464
6023
  attr_accessor :normalized_bounding_box
5465
6024
 
5466
- # Time-offset, relative to the beginning of the video,
5467
- # corresponding to the video frame for this object.
6025
+ # Time-offset, relative to the beginning of the video, corresponding to the
6026
+ # video frame for this object.
5468
6027
  # Corresponds to the JSON property `timeOffset`
5469
6028
  # @return [String]
5470
6029
  attr_accessor :time_offset
@@ -5523,20 +6082,19 @@ module Google
5523
6082
  class GoogleCloudVideointelligenceV1p3beta1VideoAnnotationProgress
5524
6083
  include Google::Apis::Core::Hashable
5525
6084
 
5526
- # Specifies which feature is being tracked if the request contains more than
5527
- # one feature.
6085
+ # Specifies which feature is being tracked if the request contains more than one
6086
+ # feature.
5528
6087
  # Corresponds to the JSON property `feature`
5529
6088
  # @return [String]
5530
6089
  attr_accessor :feature
5531
6090
 
5532
- # Video file location in
5533
- # [Cloud Storage](https://cloud.google.com/storage/).
6091
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
5534
6092
  # Corresponds to the JSON property `inputUri`
5535
6093
  # @return [String]
5536
6094
  attr_accessor :input_uri
5537
6095
 
5538
- # Approximate percentage processed thus far. Guaranteed to be
5539
- # 100 when fully processed.
6096
+ # Approximate percentage processed thus far. Guaranteed to be 100 when fully
6097
+ # processed.
5540
6098
  # Corresponds to the JSON property `progressPercent`
5541
6099
  # @return [Fixnum]
5542
6100
  attr_accessor :progress_percent
@@ -5580,36 +6138,40 @@ module Google
5580
6138
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1CelebrityRecognitionAnnotation]
5581
6139
  attr_accessor :celebrity_recognition_annotations
5582
6140
 
5583
- # The `Status` type defines a logical error model that is suitable for
5584
- # different programming environments, including REST APIs and RPC APIs. It is
5585
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
5586
- # three pieces of data: error code, error message, and error details.
5587
- # You can find out more about this error model and how to work with it in the
5588
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
6141
+ # The `Status` type defines a logical error model that is suitable for different
6142
+ # programming environments, including REST APIs and RPC APIs. It is used by [
6143
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
6144
+ # data: error code, error message, and error details. You can find out more
6145
+ # about this error model and how to work with it in the [API Design Guide](https:
6146
+ # //cloud.google.com/apis/design/errors).
5589
6147
  # Corresponds to the JSON property `error`
5590
6148
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleRpcStatus]
5591
6149
  attr_accessor :error
5592
6150
 
5593
- # Explicit content annotation (based on per-frame visual signals only).
5594
- # If no explicit content has been detected in a frame, no annotations are
5595
- # present for that frame.
6151
+ # Explicit content annotation (based on per-frame visual signals only). If no
6152
+ # explicit content has been detected in a frame, no annotations are present for
6153
+ # that frame.
5596
6154
  # Corresponds to the JSON property `explicitAnnotation`
5597
6155
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1ExplicitContentAnnotation]
5598
6156
  attr_accessor :explicit_annotation
5599
6157
 
6158
+ # Deprecated. Please use `face_detection_annotations` instead.
6159
+ # Corresponds to the JSON property `faceAnnotations`
6160
+ # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1FaceAnnotation>]
6161
+ attr_accessor :face_annotations
6162
+
5600
6163
  # Face detection annotations.
5601
6164
  # Corresponds to the JSON property `faceDetectionAnnotations`
5602
6165
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1FaceDetectionAnnotation>]
5603
6166
  attr_accessor :face_detection_annotations
5604
6167
 
5605
- # Label annotations on frame level.
5606
- # There is exactly one element for each unique label.
6168
+ # Label annotations on frame level. There is exactly one element for each unique
6169
+ # label.
5607
6170
  # Corresponds to the JSON property `frameLabelAnnotations`
5608
6171
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
5609
6172
  attr_accessor :frame_label_annotations
5610
6173
 
5611
- # Video file location in
5612
- # [Cloud Storage](https://cloud.google.com/storage/).
6174
+ # Video file location in [Cloud Storage](https://cloud.google.com/storage/).
5613
6175
  # Corresponds to the JSON property `inputUri`
5614
6176
  # @return [String]
5615
6177
  attr_accessor :input_uri
@@ -5641,11 +6203,11 @@ module Google
5641
6203
  attr_accessor :segment_label_annotations
5642
6204
 
5643
6205
  # Presence label annotations on video level or user-specified segment level.
5644
- # There is exactly one element for each unique label. Compared to the
5645
- # existing topical `segment_label_annotations`, this field presents more
5646
- # fine-grained, segment-level labels detected in video content and is made
5647
- # available only when the client sets `LabelDetectionConfig.model` to
5648
- # "builtin/latest" in the request.
6206
+ # There is exactly one element for each unique label. Compared to the existing
6207
+ # topical `segment_label_annotations`, this field presents more fine-grained,
6208
+ # segment-level labels detected in video content and is made available only when
6209
+ # the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
6210
+ # request.
5649
6211
  # Corresponds to the JSON property `segmentPresenceLabelAnnotations`
5650
6212
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
5651
6213
  attr_accessor :segment_presence_label_annotations
@@ -5655,17 +6217,17 @@ module Google
5655
6217
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1VideoSegment>]
5656
6218
  attr_accessor :shot_annotations
5657
6219
 
5658
- # Topical label annotations on shot level.
5659
- # There is exactly one element for each unique label.
6220
+ # Topical label annotations on shot level. There is exactly one element for each
6221
+ # unique label.
5660
6222
  # Corresponds to the JSON property `shotLabelAnnotations`
5661
6223
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
5662
6224
  attr_accessor :shot_label_annotations
5663
6225
 
5664
6226
  # Presence label annotations on shot level. There is exactly one element for
5665
- # each unique label. Compared to the existing topical
5666
- # `shot_label_annotations`, this field presents more fine-grained, shot-level
5667
- # labels detected in video content and is made available only when the client
5668
- # sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
6227
+ # each unique label. Compared to the existing topical `shot_label_annotations`,
6228
+ # this field presents more fine-grained, shot-level labels detected in video
6229
+ # content and is made available only when the client sets `LabelDetectionConfig.
6230
+ # model` to "builtin/latest" in the request.
5669
6231
  # Corresponds to the JSON property `shotPresenceLabelAnnotations`
5670
6232
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
5671
6233
  attr_accessor :shot_presence_label_annotations
@@ -5675,9 +6237,8 @@ module Google
5675
6237
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1SpeechTranscription>]
5676
6238
  attr_accessor :speech_transcriptions
5677
6239
 
5678
- # OCR text detection and tracking.
5679
- # Annotations for list of detected text snippets. Each will have list of
5680
- # frame information associated with it.
6240
+ # OCR text detection and tracking. Annotations for list of detected text
6241
+ # snippets. Each will have list of frame information associated with it.
5681
6242
  # Corresponds to the JSON property `textAnnotations`
5682
6243
  # @return [Array<Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p3beta1TextAnnotation>]
5683
6244
  attr_accessor :text_annotations
@@ -5691,6 +6252,7 @@ module Google
5691
6252
  @celebrity_recognition_annotations = args[:celebrity_recognition_annotations] if args.key?(:celebrity_recognition_annotations)
5692
6253
  @error = args[:error] if args.key?(:error)
5693
6254
  @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
6255
+ @face_annotations = args[:face_annotations] if args.key?(:face_annotations)
5694
6256
  @face_detection_annotations = args[:face_detection_annotations] if args.key?(:face_detection_annotations)
5695
6257
  @frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
5696
6258
  @input_uri = args[:input_uri] if args.key?(:input_uri)
@@ -5712,14 +6274,14 @@ module Google
5712
6274
  class GoogleCloudVideointelligenceV1p3beta1VideoSegment
5713
6275
  include Google::Apis::Core::Hashable
5714
6276
 
5715
- # Time-offset, relative to the beginning of the video,
5716
- # corresponding to the end of the segment (inclusive).
6277
+ # Time-offset, relative to the beginning of the video, corresponding to the end
6278
+ # of the segment (inclusive).
5717
6279
  # Corresponds to the JSON property `endTimeOffset`
5718
6280
  # @return [String]
5719
6281
  attr_accessor :end_time_offset
5720
6282
 
5721
- # Time-offset, relative to the beginning of the video,
5722
- # corresponding to the start of the segment (inclusive).
6283
+ # Time-offset, relative to the beginning of the video, corresponding to the
6284
+ # start of the segment (inclusive).
5723
6285
  # Corresponds to the JSON property `startTimeOffset`
5724
6286
  # @return [String]
5725
6287
  attr_accessor :start_time_offset
@@ -5736,41 +6298,41 @@ module Google
5736
6298
  end
5737
6299
 
5738
6300
  # Word-specific information for recognized words. Word information is only
5739
- # included in the response when certain request parameters are set, such
5740
- # as `enable_word_time_offsets`.
6301
+ # included in the response when certain request parameters are set, such as `
6302
+ # enable_word_time_offsets`.
5741
6303
  class GoogleCloudVideointelligenceV1p3beta1WordInfo
5742
6304
  include Google::Apis::Core::Hashable
5743
6305
 
5744
6306
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
5745
6307
  # indicates an estimated greater likelihood that the recognized words are
5746
- # correct. This field is set only for the top alternative.
5747
- # This field is not guaranteed to be accurate and users should not rely on it
5748
- # to be always provided.
5749
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
6308
+ # correct. This field is set only for the top alternative. This field is not
6309
+ # guaranteed to be accurate and users should not rely on it to be always
6310
+ # provided. The default of 0.0 is a sentinel value indicating `confidence` was
6311
+ # not set.
5750
6312
  # Corresponds to the JSON property `confidence`
5751
6313
  # @return [Float]
5752
6314
  attr_accessor :confidence
5753
6315
 
5754
- # Time offset relative to the beginning of the audio, and
5755
- # corresponding to the end of the spoken word. This field is only set if
5756
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
5757
- # experimental feature and the accuracy of the time offset can vary.
6316
+ # Time offset relative to the beginning of the audio, and corresponding to the
6317
+ # end of the spoken word. This field is only set if `enable_word_time_offsets=
6318
+ # true` and only in the top hypothesis. This is an experimental feature and the
6319
+ # accuracy of the time offset can vary.
5758
6320
  # Corresponds to the JSON property `endTime`
5759
6321
  # @return [String]
5760
6322
  attr_accessor :end_time
5761
6323
 
5762
- # Output only. A distinct integer value is assigned for every speaker within
5763
- # the audio. This field specifies which one of those speakers was detected to
5764
- # have spoken this word. Value ranges from 1 up to diarization_speaker_count,
5765
- # and is only set if speaker diarization is enabled.
6324
+ # Output only. A distinct integer value is assigned for every speaker within the
6325
+ # audio. This field specifies which one of those speakers was detected to have
6326
+ # spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
6327
+ # only set if speaker diarization is enabled.
5766
6328
  # Corresponds to the JSON property `speakerTag`
5767
6329
  # @return [Fixnum]
5768
6330
  attr_accessor :speaker_tag
5769
6331
 
5770
- # Time offset relative to the beginning of the audio, and
5771
- # corresponding to the start of the spoken word. This field is only set if
5772
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
5773
- # experimental feature and the accuracy of the time offset can vary.
6332
+ # Time offset relative to the beginning of the audio, and corresponding to the
6333
+ # start of the spoken word. This field is only set if `enable_word_time_offsets=
6334
+ # true` and only in the top hypothesis. This is an experimental feature and the
6335
+ # accuracy of the time offset can vary.
5774
6336
  # Corresponds to the JSON property `startTime`
5775
6337
  # @return [String]
5776
6338
  attr_accessor :start_time
@@ -5799,47 +6361,45 @@ module Google
5799
6361
  class GoogleLongrunningOperation
5800
6362
  include Google::Apis::Core::Hashable
5801
6363
 
5802
- # If the value is `false`, it means the operation is still in progress.
5803
- # If `true`, the operation is completed, and either `error` or `response` is
5804
- # available.
6364
+ # If the value is `false`, it means the operation is still in progress. If `true`
6365
+ # , the operation is completed, and either `error` or `response` is available.
5805
6366
  # Corresponds to the JSON property `done`
5806
6367
  # @return [Boolean]
5807
6368
  attr_accessor :done
5808
6369
  alias_method :done?, :done
5809
6370
 
5810
- # The `Status` type defines a logical error model that is suitable for
5811
- # different programming environments, including REST APIs and RPC APIs. It is
5812
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
5813
- # three pieces of data: error code, error message, and error details.
5814
- # You can find out more about this error model and how to work with it in the
5815
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
6371
+ # The `Status` type defines a logical error model that is suitable for different
6372
+ # programming environments, including REST APIs and RPC APIs. It is used by [
6373
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
6374
+ # data: error code, error message, and error details. You can find out more
6375
+ # about this error model and how to work with it in the [API Design Guide](https:
6376
+ # //cloud.google.com/apis/design/errors).
5816
6377
  # Corresponds to the JSON property `error`
5817
6378
  # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleRpcStatus]
5818
6379
  attr_accessor :error
5819
6380
 
5820
- # Service-specific metadata associated with the operation. It typically
5821
- # contains progress information and common metadata such as create time.
5822
- # Some services might not provide such metadata. Any method that returns a
5823
- # long-running operation should document the metadata type, if any.
6381
+ # Service-specific metadata associated with the operation. It typically contains
6382
+ # progress information and common metadata such as create time. Some services
6383
+ # might not provide such metadata. Any method that returns a long-running
6384
+ # operation should document the metadata type, if any.
5824
6385
  # Corresponds to the JSON property `metadata`
5825
6386
  # @return [Hash<String,Object>]
5826
6387
  attr_accessor :metadata
5827
6388
 
5828
6389
  # The server-assigned name, which is only unique within the same service that
5829
- # originally returns it. If you use the default HTTP mapping, the
5830
- # `name` should be a resource name ending with `operations/`unique_id``.
6390
+ # originally returns it. If you use the default HTTP mapping, the `name` should
6391
+ # be a resource name ending with `operations/`unique_id``.
5831
6392
  # Corresponds to the JSON property `name`
5832
6393
  # @return [String]
5833
6394
  attr_accessor :name
5834
6395
 
5835
- # The normal response of the operation in case of success. If the original
5836
- # method returns no data on success, such as `Delete`, the response is
5837
- # `google.protobuf.Empty`. If the original method is standard
5838
- # `Get`/`Create`/`Update`, the response should be the resource. For other
5839
- # methods, the response should have the type `XxxResponse`, where `Xxx`
5840
- # is the original method name. For example, if the original method name
5841
- # is `TakeSnapshot()`, the inferred response type is
5842
- # `TakeSnapshotResponse`.
6396
+ # The normal response of the operation in case of success. If the original
6397
+ # method returns no data on success, such as `Delete`, the response is `google.
6398
+ # protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`,
6399
+ # the response should be the resource. For other methods, the response should
6400
+ # have the type `XxxResponse`, where `Xxx` is the original method name. For
6401
+ # example, if the original method name is `TakeSnapshot()`, the inferred
6402
+ # response type is `TakeSnapshotResponse`.
5843
6403
  # Corresponds to the JSON property `response`
5844
6404
  # @return [Hash<String,Object>]
5845
6405
  attr_accessor :response
@@ -5858,12 +6418,12 @@ module Google
5858
6418
  end
5859
6419
  end
5860
6420
 
5861
- # The `Status` type defines a logical error model that is suitable for
5862
- # different programming environments, including REST APIs and RPC APIs. It is
5863
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
5864
- # three pieces of data: error code, error message, and error details.
5865
- # You can find out more about this error model and how to work with it in the
5866
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
6421
+ # The `Status` type defines a logical error model that is suitable for different
6422
+ # programming environments, including REST APIs and RPC APIs. It is used by [
6423
+ # gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
6424
+ # data: error code, error message, and error details. You can find out more
6425
+ # about this error model and how to work with it in the [API Design Guide](https:
6426
+ # //cloud.google.com/apis/design/errors).
5867
6427
  class GoogleRpcStatus
5868
6428
  include Google::Apis::Core::Hashable
5869
6429
 
@@ -5872,15 +6432,15 @@ module Google
5872
6432
  # @return [Fixnum]
5873
6433
  attr_accessor :code
5874
6434
 
5875
- # A list of messages that carry the error details. There is a common set of
6435
+ # A list of messages that carry the error details. There is a common set of
5876
6436
  # message types for APIs to use.
5877
6437
  # Corresponds to the JSON property `details`
5878
6438
  # @return [Array<Hash<String,Object>>]
5879
6439
  attr_accessor :details
5880
6440
 
5881
- # A developer-facing error message, which should be in English. Any
5882
- # user-facing error message should be localized and sent in the
5883
- # google.rpc.Status.details field, or localized by the client.
6441
+ # A developer-facing error message, which should be in English. Any user-facing
6442
+ # error message should be localized and sent in the google.rpc.Status.details
6443
+ # field, or localized by the client.
5884
6444
  # Corresponds to the JSON property `message`
5885
6445
  # @return [String]
5886
6446
  attr_accessor :message