google-api-client 0.43.0 → 0.48.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/autoapprove.yml +49 -0
- data/.github/workflows/release-please.yml +77 -0
- data/.gitignore +2 -0
- data/.kokoro/trampoline.sh +0 -0
- data/CHANGELOG.md +1066 -184
- data/Gemfile +1 -0
- data/Rakefile +31 -3
- data/api_list_config.yaml +8 -0
- data/api_names.yaml +1 -0
- data/bin/generate-api +77 -15
- data/docs/oauth-server.md +4 -6
- data/generated/google/apis/acceleratedmobilepageurl_v1/classes.rb +7 -9
- data/generated/google/apis/acceleratedmobilepageurl_v1/service.rb +2 -2
- data/generated/google/apis/acceleratedmobilepageurl_v1.rb +1 -1
- data/generated/google/apis/accessapproval_v1/classes.rb +60 -86
- data/generated/google/apis/accessapproval_v1/service.rb +93 -132
- data/generated/google/apis/accessapproval_v1.rb +1 -1
- data/generated/google/apis/accesscontextmanager_v1/classes.rb +266 -236
- data/generated/google/apis/accesscontextmanager_v1/representations.rb +30 -0
- data/generated/google/apis/accesscontextmanager_v1/service.rb +308 -171
- data/generated/google/apis/accesscontextmanager_v1.rb +1 -1
- data/generated/google/apis/accesscontextmanager_v1beta/classes.rb +153 -184
- data/generated/google/apis/accesscontextmanager_v1beta/service.rb +82 -111
- data/generated/google/apis/accesscontextmanager_v1beta.rb +1 -1
- data/generated/google/apis/adexchangebuyer2_v2beta1/classes.rb +47 -36
- data/generated/google/apis/adexchangebuyer2_v2beta1/representations.rb +1 -0
- data/generated/google/apis/adexchangebuyer2_v2beta1.rb +1 -1
- data/generated/google/apis/adexchangebuyer_v1_4/classes.rb +72 -2
- data/generated/google/apis/adexchangebuyer_v1_4/representations.rb +33 -0
- data/generated/google/apis/adexchangebuyer_v1_4.rb +1 -1
- data/generated/google/apis/adexperiencereport_v1.rb +1 -1
- data/generated/google/apis/admin_datatransfer_v1/classes.rb +16 -4
- data/generated/google/apis/admin_datatransfer_v1/service.rb +30 -48
- data/generated/google/apis/admin_datatransfer_v1.rb +6 -4
- data/generated/google/apis/admin_directory_v1/classes.rb +344 -242
- data/generated/google/apis/admin_directory_v1/representations.rb +62 -39
- data/generated/google/apis/admin_directory_v1/service.rb +607 -998
- data/generated/google/apis/admin_directory_v1.rb +6 -8
- data/generated/google/apis/admin_reports_v1/classes.rb +31 -33
- data/generated/google/apis/admin_reports_v1/service.rb +131 -187
- data/generated/google/apis/admin_reports_v1.rb +6 -5
- data/generated/google/apis/admob_v1/classes.rb +31 -31
- data/generated/google/apis/admob_v1/service.rb +2 -1
- data/generated/google/apis/admob_v1.rb +6 -2
- data/generated/google/apis/adsense_v1_4/service.rb +4 -1
- data/generated/google/apis/adsense_v1_4.rb +1 -1
- data/generated/google/apis/alertcenter_v1beta1.rb +1 -1
- data/generated/google/apis/analyticsadmin_v1alpha/classes.rb +1361 -0
- data/generated/google/apis/analyticsadmin_v1alpha/representations.rb +610 -0
- data/generated/google/apis/analyticsadmin_v1alpha/service.rb +2134 -0
- data/generated/google/apis/analyticsadmin_v1alpha.rb +43 -0
- data/generated/google/apis/analyticsdata_v1alpha/classes.rb +1655 -0
- data/generated/google/apis/analyticsdata_v1alpha/representations.rb +806 -0
- data/generated/google/apis/analyticsdata_v1alpha/service.rb +261 -0
- data/generated/google/apis/analyticsdata_v1alpha.rb +37 -0
- data/generated/google/apis/analyticsreporting_v4.rb +1 -1
- data/generated/google/apis/androidenterprise_v1/service.rb +2 -2
- data/generated/google/apis/androidenterprise_v1.rb +1 -1
- data/generated/google/apis/androidmanagement_v1/classes.rb +115 -75
- data/generated/google/apis/androidmanagement_v1/representations.rb +17 -0
- data/generated/google/apis/androidmanagement_v1.rb +1 -1
- data/generated/google/apis/androidpublisher_v3/classes.rb +9 -1
- data/generated/google/apis/androidpublisher_v3/representations.rb +1 -0
- data/generated/google/apis/androidpublisher_v3/service.rb +2 -2
- data/generated/google/apis/androidpublisher_v3.rb +1 -1
- data/generated/google/apis/apigateway_v1beta/classes.rb +1162 -0
- data/generated/google/apis/{memcache_v1 → apigateway_v1beta}/representations.rb +156 -157
- data/generated/google/apis/apigateway_v1beta/service.rb +1172 -0
- data/generated/google/apis/apigateway_v1beta.rb +34 -0
- data/generated/google/apis/apigee_v1/classes.rb +630 -88
- data/generated/google/apis/apigee_v1/representations.rb +209 -1
- data/generated/google/apis/apigee_v1/service.rb +401 -74
- data/generated/google/apis/apigee_v1.rb +6 -7
- data/generated/google/apis/appengine_v1/classes.rb +96 -59
- data/generated/google/apis/appengine_v1/representations.rb +17 -0
- data/generated/google/apis/appengine_v1/service.rb +38 -47
- data/generated/google/apis/appengine_v1.rb +1 -1
- data/generated/google/apis/appengine_v1alpha/classes.rb +9 -11
- data/generated/google/apis/appengine_v1alpha.rb +1 -1
- data/generated/google/apis/appengine_v1beta/classes.rb +103 -59
- data/generated/google/apis/appengine_v1beta/representations.rb +18 -0
- data/generated/google/apis/appengine_v1beta/service.rb +37 -47
- data/generated/google/apis/appengine_v1beta.rb +1 -1
- data/generated/google/apis/appsmarket_v2/classes.rb +14 -16
- data/generated/google/apis/appsmarket_v2.rb +1 -1
- data/generated/google/apis/area120tables_v1alpha1/classes.rb +423 -0
- data/generated/google/apis/area120tables_v1alpha1/representations.rb +248 -0
- data/generated/google/apis/area120tables_v1alpha1/service.rb +381 -0
- data/generated/google/apis/area120tables_v1alpha1.rb +46 -0
- data/generated/google/apis/artifactregistry_v1beta1/classes.rb +249 -337
- data/generated/google/apis/artifactregistry_v1beta1/representations.rb +2 -0
- data/generated/google/apis/artifactregistry_v1beta1/service.rb +44 -57
- data/generated/google/apis/artifactregistry_v1beta1.rb +1 -1
- data/generated/google/apis/assuredworkloads_v1beta1/classes.rb +722 -0
- data/generated/google/apis/assuredworkloads_v1beta1/representations.rb +359 -0
- data/generated/google/apis/assuredworkloads_v1beta1/service.rb +328 -0
- data/generated/google/apis/{accessapproval_v1beta1.rb → assuredworkloads_v1beta1.rb} +8 -8
- data/generated/google/apis/bigquery_v2/classes.rb +593 -576
- data/generated/google/apis/bigquery_v2/representations.rb +85 -0
- data/generated/google/apis/bigquery_v2/service.rb +79 -41
- data/generated/google/apis/bigquery_v2.rb +1 -1
- data/generated/google/apis/bigqueryconnection_v1beta1/classes.rb +192 -337
- data/generated/google/apis/bigqueryconnection_v1beta1/service.rb +29 -32
- data/generated/google/apis/bigqueryconnection_v1beta1.rb +1 -1
- data/generated/google/apis/bigquerydatatransfer_v1/classes.rb +132 -158
- data/generated/google/apis/bigquerydatatransfer_v1/service.rb +232 -282
- data/generated/google/apis/bigquerydatatransfer_v1.rb +1 -1
- data/generated/google/apis/bigqueryreservation_v1/classes.rb +116 -123
- data/generated/google/apis/bigqueryreservation_v1/representations.rb +2 -0
- data/generated/google/apis/bigqueryreservation_v1/service.rb +137 -183
- data/generated/google/apis/bigqueryreservation_v1.rb +1 -1
- data/generated/google/apis/bigqueryreservation_v1alpha2/classes.rb +88 -100
- data/generated/google/apis/bigqueryreservation_v1alpha2/service.rb +77 -100
- data/generated/google/apis/bigqueryreservation_v1alpha2.rb +1 -1
- data/generated/google/apis/bigqueryreservation_v1beta1/classes.rb +93 -98
- data/generated/google/apis/bigqueryreservation_v1beta1/representations.rb +2 -0
- data/generated/google/apis/bigqueryreservation_v1beta1/service.rb +114 -151
- data/generated/google/apis/bigqueryreservation_v1beta1.rb +1 -1
- data/generated/google/apis/bigtableadmin_v1/classes.rb +137 -119
- data/generated/google/apis/bigtableadmin_v1/representations.rb +29 -0
- data/generated/google/apis/bigtableadmin_v1.rb +1 -1
- data/generated/google/apis/bigtableadmin_v2/classes.rb +455 -607
- data/generated/google/apis/bigtableadmin_v2/representations.rb +29 -0
- data/generated/google/apis/bigtableadmin_v2/service.rb +194 -247
- data/generated/google/apis/bigtableadmin_v2.rb +1 -1
- data/generated/google/apis/billingbudgets_v1/classes.rb +373 -0
- data/generated/google/apis/billingbudgets_v1/representations.rb +171 -0
- data/generated/google/apis/billingbudgets_v1/service.rb +249 -0
- data/generated/google/apis/billingbudgets_v1.rb +38 -0
- data/generated/google/apis/billingbudgets_v1beta1/classes.rb +27 -6
- data/generated/google/apis/billingbudgets_v1beta1/representations.rb +2 -0
- data/generated/google/apis/billingbudgets_v1beta1.rb +4 -1
- data/generated/google/apis/binaryauthorization_v1/classes.rb +434 -355
- data/generated/google/apis/binaryauthorization_v1/representations.rb +75 -0
- data/generated/google/apis/binaryauthorization_v1/service.rb +109 -89
- data/generated/google/apis/binaryauthorization_v1.rb +1 -1
- data/generated/google/apis/binaryauthorization_v1beta1/classes.rb +434 -355
- data/generated/google/apis/binaryauthorization_v1beta1/representations.rb +75 -0
- data/generated/google/apis/binaryauthorization_v1beta1/service.rb +109 -89
- data/generated/google/apis/binaryauthorization_v1beta1.rb +1 -1
- data/generated/google/apis/books_v1/service.rb +54 -54
- data/generated/google/apis/books_v1.rb +1 -1
- data/generated/google/apis/calendar_v3/classes.rb +13 -10
- data/generated/google/apis/calendar_v3.rb +1 -1
- data/generated/google/apis/chat_v1/classes.rb +173 -116
- data/generated/google/apis/chat_v1/representations.rb +36 -0
- data/generated/google/apis/chat_v1/service.rb +30 -42
- data/generated/google/apis/chat_v1.rb +1 -1
- data/generated/google/apis/civicinfo_v2/classes.rb +18 -32
- data/generated/google/apis/civicinfo_v2/representations.rb +2 -3
- data/generated/google/apis/civicinfo_v2.rb +1 -1
- data/generated/google/apis/classroom_v1/classes.rb +153 -21
- data/generated/google/apis/classroom_v1/representations.rb +43 -0
- data/generated/google/apis/classroom_v1/service.rb +240 -0
- data/generated/google/apis/classroom_v1.rb +7 -1
- data/generated/google/apis/cloudasset_v1/classes.rb +1461 -1039
- data/generated/google/apis/cloudasset_v1/representations.rb +320 -0
- data/generated/google/apis/cloudasset_v1/service.rb +296 -167
- data/generated/google/apis/cloudasset_v1.rb +1 -1
- data/generated/google/apis/cloudasset_v1beta1/classes.rb +531 -777
- data/generated/google/apis/cloudasset_v1beta1/service.rb +59 -75
- data/generated/google/apis/cloudasset_v1beta1.rb +1 -1
- data/generated/google/apis/cloudasset_v1p1beta1/classes.rb +204 -349
- data/generated/google/apis/cloudasset_v1p1beta1/service.rb +35 -47
- data/generated/google/apis/cloudasset_v1p1beta1.rb +1 -1
- data/generated/google/apis/cloudasset_v1p4beta1/classes.rb +222 -276
- data/generated/google/apis/cloudasset_v1p4beta1/service.rb +75 -93
- data/generated/google/apis/cloudasset_v1p4beta1.rb +1 -1
- data/generated/google/apis/cloudasset_v1p5beta1/classes.rb +481 -720
- data/generated/google/apis/cloudasset_v1p5beta1/service.rb +25 -28
- data/generated/google/apis/cloudasset_v1p5beta1.rb +1 -1
- data/generated/google/apis/cloudbilling_v1/classes.rb +285 -446
- data/generated/google/apis/cloudbilling_v1/service.rb +104 -124
- data/generated/google/apis/cloudbilling_v1.rb +7 -1
- data/generated/google/apis/cloudbuild_v1/classes.rb +339 -344
- data/generated/google/apis/cloudbuild_v1/representations.rb +8 -0
- data/generated/google/apis/cloudbuild_v1/service.rb +277 -67
- data/generated/google/apis/cloudbuild_v1.rb +1 -1
- data/generated/google/apis/cloudbuild_v1alpha1/classes.rb +291 -329
- data/generated/google/apis/cloudbuild_v1alpha1/representations.rb +2 -0
- data/generated/google/apis/cloudbuild_v1alpha1/service.rb +15 -18
- data/generated/google/apis/cloudbuild_v1alpha1.rb +1 -1
- data/generated/google/apis/cloudbuild_v1alpha2/classes.rb +277 -313
- data/generated/google/apis/cloudbuild_v1alpha2/representations.rb +2 -0
- data/generated/google/apis/cloudbuild_v1alpha2/service.rb +22 -28
- data/generated/google/apis/cloudbuild_v1alpha2.rb +1 -1
- data/generated/google/apis/clouddebugger_v2/classes.rb +185 -252
- data/generated/google/apis/clouddebugger_v2/service.rb +53 -59
- data/generated/google/apis/clouddebugger_v2.rb +1 -1
- data/generated/google/apis/clouderrorreporting_v1beta1/classes.rb +127 -156
- data/generated/google/apis/clouderrorreporting_v1beta1/service.rb +53 -69
- data/generated/google/apis/clouderrorreporting_v1beta1.rb +1 -1
- data/generated/google/apis/cloudfunctions_v1/classes.rb +335 -494
- data/generated/google/apis/cloudfunctions_v1/representations.rb +1 -0
- data/generated/google/apis/cloudfunctions_v1/service.rb +79 -93
- data/generated/google/apis/cloudfunctions_v1.rb +1 -1
- data/generated/google/apis/cloudidentity_v1/classes.rb +989 -107
- data/generated/google/apis/cloudidentity_v1/representations.rb +371 -0
- data/generated/google/apis/cloudidentity_v1/service.rb +883 -88
- data/generated/google/apis/cloudidentity_v1.rb +4 -1
- data/generated/google/apis/cloudidentity_v1beta1/classes.rb +1236 -307
- data/generated/google/apis/cloudidentity_v1beta1/representations.rb +424 -21
- data/generated/google/apis/cloudidentity_v1beta1/service.rb +921 -96
- data/generated/google/apis/cloudidentity_v1beta1.rb +4 -1
- data/generated/google/apis/cloudiot_v1/classes.rb +270 -373
- data/generated/google/apis/cloudiot_v1/representations.rb +1 -0
- data/generated/google/apis/cloudiot_v1/service.rb +147 -154
- data/generated/google/apis/cloudiot_v1.rb +1 -1
- data/generated/google/apis/cloudkms_v1/classes.rb +509 -692
- data/generated/google/apis/cloudkms_v1/representations.rb +18 -0
- data/generated/google/apis/cloudkms_v1/service.rb +170 -216
- data/generated/google/apis/cloudkms_v1.rb +1 -1
- data/generated/google/apis/cloudprofiler_v2/classes.rb +28 -33
- data/generated/google/apis/cloudprofiler_v2/service.rb +17 -19
- data/generated/google/apis/cloudprofiler_v2.rb +1 -1
- data/generated/google/apis/cloudresourcemanager_v1/classes.rb +14 -7
- data/generated/google/apis/cloudresourcemanager_v1/representations.rb +1 -0
- data/generated/google/apis/cloudresourcemanager_v1/service.rb +62 -60
- data/generated/google/apis/cloudresourcemanager_v1.rb +1 -1
- data/generated/google/apis/cloudresourcemanager_v1beta1/classes.rb +11 -4
- data/generated/google/apis/cloudresourcemanager_v1beta1/representations.rb +1 -0
- data/generated/google/apis/cloudresourcemanager_v1beta1/service.rb +26 -25
- data/generated/google/apis/cloudresourcemanager_v1beta1.rb +1 -1
- data/generated/google/apis/cloudresourcemanager_v2/classes.rb +7 -0
- data/generated/google/apis/cloudresourcemanager_v2/representations.rb +1 -0
- data/generated/google/apis/cloudresourcemanager_v2/service.rb +7 -7
- data/generated/google/apis/cloudresourcemanager_v2.rb +1 -1
- data/generated/google/apis/cloudresourcemanager_v2beta1/classes.rb +7 -0
- data/generated/google/apis/cloudresourcemanager_v2beta1/representations.rb +1 -0
- data/generated/google/apis/cloudresourcemanager_v2beta1/service.rb +7 -7
- data/generated/google/apis/cloudresourcemanager_v2beta1.rb +1 -1
- data/generated/google/apis/cloudscheduler_v1/classes.rb +272 -383
- data/generated/google/apis/cloudscheduler_v1/service.rb +45 -62
- data/generated/google/apis/cloudscheduler_v1.rb +1 -1
- data/generated/google/apis/cloudscheduler_v1beta1/classes.rb +273 -384
- data/generated/google/apis/cloudscheduler_v1beta1/service.rb +45 -62
- data/generated/google/apis/cloudscheduler_v1beta1.rb +1 -1
- data/generated/google/apis/cloudsearch_v1/classes.rb +651 -781
- data/generated/google/apis/cloudsearch_v1/representations.rb +15 -0
- data/generated/google/apis/cloudsearch_v1/service.rb +286 -326
- data/generated/google/apis/cloudsearch_v1.rb +2 -2
- data/generated/google/apis/cloudshell_v1/classes.rb +256 -105
- data/generated/google/apis/cloudshell_v1/representations.rb +143 -10
- data/generated/google/apis/cloudshell_v1/service.rb +198 -25
- data/generated/google/apis/cloudshell_v1.rb +1 -1
- data/generated/google/apis/cloudshell_v1alpha1/classes.rb +69 -78
- data/generated/google/apis/cloudshell_v1alpha1/service.rb +20 -24
- data/generated/google/apis/cloudshell_v1alpha1.rb +1 -1
- data/generated/google/apis/cloudtasks_v2/classes.rb +612 -933
- data/generated/google/apis/cloudtasks_v2/representations.rb +1 -0
- data/generated/google/apis/cloudtasks_v2/service.rb +146 -217
- data/generated/google/apis/cloudtasks_v2.rb +1 -1
- data/generated/google/apis/cloudtasks_v2beta2/classes.rb +608 -964
- data/generated/google/apis/cloudtasks_v2beta2/representations.rb +1 -0
- data/generated/google/apis/cloudtasks_v2beta2/service.rb +178 -270
- data/generated/google/apis/cloudtasks_v2beta2.rb +1 -1
- data/generated/google/apis/cloudtasks_v2beta3/classes.rb +616 -938
- data/generated/google/apis/cloudtasks_v2beta3/representations.rb +1 -0
- data/generated/google/apis/cloudtasks_v2beta3/service.rb +146 -217
- data/generated/google/apis/cloudtasks_v2beta3.rb +1 -1
- data/generated/google/apis/cloudtrace_v1/classes.rb +39 -61
- data/generated/google/apis/cloudtrace_v1/service.rb +37 -51
- data/generated/google/apis/cloudtrace_v1.rb +1 -1
- data/generated/google/apis/cloudtrace_v2/classes.rb +92 -107
- data/generated/google/apis/cloudtrace_v2/service.rb +8 -11
- data/generated/google/apis/cloudtrace_v2.rb +1 -1
- data/generated/google/apis/cloudtrace_v2beta1/classes.rb +23 -33
- data/generated/google/apis/cloudtrace_v2beta1/service.rb +30 -37
- data/generated/google/apis/cloudtrace_v2beta1.rb +1 -1
- data/generated/google/apis/composer_v1/classes.rb +189 -242
- data/generated/google/apis/composer_v1/service.rb +79 -150
- data/generated/google/apis/composer_v1.rb +1 -1
- data/generated/google/apis/composer_v1beta1/classes.rb +203 -262
- data/generated/google/apis/composer_v1beta1/service.rb +94 -179
- data/generated/google/apis/composer_v1beta1.rb +1 -1
- data/generated/google/apis/compute_alpha/classes.rb +1227 -186
- data/generated/google/apis/compute_alpha/representations.rb +235 -8
- data/generated/google/apis/compute_alpha/service.rb +2009 -1024
- data/generated/google/apis/compute_alpha.rb +1 -1
- data/generated/google/apis/compute_beta/classes.rb +1080 -108
- data/generated/google/apis/compute_beta/representations.rb +212 -2
- data/generated/google/apis/compute_beta/service.rb +1413 -741
- data/generated/google/apis/compute_beta.rb +1 -1
- data/generated/google/apis/compute_v1/classes.rb +1512 -106
- data/generated/google/apis/compute_v1/representations.rb +470 -1
- data/generated/google/apis/compute_v1/service.rb +1625 -285
- data/generated/google/apis/compute_v1.rb +1 -1
- data/generated/google/apis/container_v1/classes.rb +982 -965
- data/generated/google/apis/container_v1/representations.rb +60 -0
- data/generated/google/apis/container_v1/service.rb +435 -502
- data/generated/google/apis/container_v1.rb +1 -1
- data/generated/google/apis/container_v1beta1/classes.rb +1106 -1044
- data/generated/google/apis/container_v1beta1/representations.rb +91 -0
- data/generated/google/apis/container_v1beta1/service.rb +403 -466
- data/generated/google/apis/container_v1beta1.rb +1 -1
- data/generated/google/apis/containeranalysis_v1alpha1/classes.rb +463 -596
- data/generated/google/apis/containeranalysis_v1alpha1/representations.rb +1 -0
- data/generated/google/apis/containeranalysis_v1alpha1/service.rb +149 -169
- data/generated/google/apis/containeranalysis_v1alpha1.rb +1 -1
- data/generated/google/apis/containeranalysis_v1beta1/classes.rb +461 -613
- data/generated/google/apis/containeranalysis_v1beta1/representations.rb +1 -0
- data/generated/google/apis/containeranalysis_v1beta1/service.rb +75 -90
- data/generated/google/apis/containeranalysis_v1beta1.rb +1 -1
- data/generated/google/apis/content_v2/classes.rb +515 -1219
- data/generated/google/apis/content_v2/service.rb +377 -650
- data/generated/google/apis/content_v2.rb +3 -4
- data/generated/google/apis/content_v2_1/classes.rb +1108 -1058
- data/generated/google/apis/content_v2_1/representations.rb +288 -0
- data/generated/google/apis/content_v2_1/service.rb +987 -795
- data/generated/google/apis/content_v2_1.rb +3 -4
- data/generated/google/apis/customsearch_v1/service.rb +2 -2
- data/generated/google/apis/customsearch_v1.rb +1 -1
- data/generated/google/apis/datacatalog_v1beta1/classes.rb +413 -573
- data/generated/google/apis/datacatalog_v1beta1/representations.rb +6 -0
- data/generated/google/apis/datacatalog_v1beta1/service.rb +319 -440
- data/generated/google/apis/datacatalog_v1beta1.rb +1 -1
- data/generated/google/apis/dataflow_v1b3/classes.rb +1174 -973
- data/generated/google/apis/dataflow_v1b3/representations.rb +148 -0
- data/generated/google/apis/dataflow_v1b3/service.rb +308 -257
- data/generated/google/apis/dataflow_v1b3.rb +1 -1
- data/generated/google/apis/datafusion_v1/classes.rb +283 -397
- data/generated/google/apis/datafusion_v1/representations.rb +5 -0
- data/generated/google/apis/datafusion_v1/service.rb +76 -89
- data/generated/google/apis/datafusion_v1.rb +5 -8
- data/generated/google/apis/datafusion_v1beta1/classes.rb +283 -397
- data/generated/google/apis/datafusion_v1beta1/representations.rb +5 -0
- data/generated/google/apis/datafusion_v1beta1/service.rb +81 -95
- data/generated/google/apis/datafusion_v1beta1.rb +5 -8
- data/generated/google/apis/datalabeling_v1beta1/classes.rb +6207 -0
- data/generated/google/apis/datalabeling_v1beta1/representations.rb +3156 -0
- data/generated/google/apis/datalabeling_v1beta1/service.rb +1762 -0
- data/generated/google/apis/datalabeling_v1beta1.rb +34 -0
- data/generated/google/apis/dataproc_v1/classes.rb +97 -13
- data/generated/google/apis/dataproc_v1/representations.rb +34 -0
- data/generated/google/apis/dataproc_v1.rb +1 -1
- data/generated/google/apis/dataproc_v1beta2/classes.rb +117 -9
- data/generated/google/apis/dataproc_v1beta2/representations.rb +49 -0
- data/generated/google/apis/dataproc_v1beta2.rb +1 -1
- data/generated/google/apis/datastore_v1/classes.rb +334 -476
- data/generated/google/apis/datastore_v1/service.rb +52 -63
- data/generated/google/apis/datastore_v1.rb +1 -1
- data/generated/google/apis/datastore_v1beta1/classes.rb +150 -217
- data/generated/google/apis/datastore_v1beta1/service.rb +11 -12
- data/generated/google/apis/datastore_v1beta1.rb +1 -1
- data/generated/google/apis/datastore_v1beta3/classes.rb +259 -375
- data/generated/google/apis/datastore_v1beta3/service.rb +1 -2
- data/generated/google/apis/datastore_v1beta3.rb +1 -1
- data/generated/google/apis/deploymentmanager_v2/classes.rb +203 -558
- data/generated/google/apis/deploymentmanager_v2/representations.rb +0 -132
- data/generated/google/apis/deploymentmanager_v2/service.rb +169 -213
- data/generated/google/apis/deploymentmanager_v2.rb +6 -4
- data/generated/google/apis/deploymentmanager_v2beta/classes.rb +247 -609
- data/generated/google/apis/deploymentmanager_v2beta/representations.rb +0 -132
- data/generated/google/apis/deploymentmanager_v2beta/service.rb +278 -359
- data/generated/google/apis/deploymentmanager_v2beta.rb +6 -5
- data/generated/google/apis/dfareporting_v3_3/classes.rb +162 -339
- data/generated/google/apis/dfareporting_v3_3/service.rb +646 -1262
- data/generated/google/apis/dfareporting_v3_3.rb +2 -2
- data/generated/google/apis/dfareporting_v3_4/classes.rb +184 -350
- data/generated/google/apis/dfareporting_v3_4/representations.rb +1 -0
- data/generated/google/apis/dfareporting_v3_4/service.rb +681 -1261
- data/generated/google/apis/dfareporting_v3_4.rb +2 -2
- data/generated/google/apis/dialogflow_v2/classes.rb +746 -217
- data/generated/google/apis/dialogflow_v2/representations.rb +318 -67
- data/generated/google/apis/dialogflow_v2.rb +1 -1
- data/generated/google/apis/dialogflow_v2beta1/classes.rb +764 -233
- data/generated/google/apis/dialogflow_v2beta1/representations.rb +318 -67
- data/generated/google/apis/dialogflow_v2beta1/service.rb +556 -331
- data/generated/google/apis/dialogflow_v2beta1.rb +1 -1
- data/generated/google/apis/dialogflow_v3beta1/classes.rb +8816 -0
- data/generated/google/apis/dialogflow_v3beta1/representations.rb +3725 -0
- data/generated/google/apis/dialogflow_v3beta1/service.rb +2825 -0
- data/generated/google/apis/dialogflow_v3beta1.rb +38 -0
- data/generated/google/apis/digitalassetlinks_v1/classes.rb +66 -92
- data/generated/google/apis/digitalassetlinks_v1/service.rb +131 -188
- data/generated/google/apis/digitalassetlinks_v1.rb +1 -1
- data/generated/google/apis/displayvideo_v1/classes.rb +271 -38
- data/generated/google/apis/displayvideo_v1/representations.rb +83 -0
- data/generated/google/apis/displayvideo_v1/service.rb +287 -32
- data/generated/google/apis/displayvideo_v1.rb +1 -1
- data/generated/google/apis/displayvideo_v1beta/classes.rb +146 -0
- data/generated/google/apis/displayvideo_v1beta/representations.rb +72 -0
- data/generated/google/apis/displayvideo_v1beta/service.rb +161 -0
- data/generated/google/apis/displayvideo_v1beta.rb +38 -0
- data/generated/google/apis/displayvideo_v1beta2/classes.rb +146 -0
- data/generated/google/apis/displayvideo_v1beta2/representations.rb +72 -0
- data/generated/google/apis/displayvideo_v1beta2/service.rb +130 -0
- data/generated/google/apis/displayvideo_v1beta2.rb +38 -0
- data/generated/google/apis/displayvideo_v1dev/classes.rb +146 -0
- data/generated/google/apis/displayvideo_v1dev/representations.rb +72 -0
- data/generated/google/apis/displayvideo_v1dev/service.rb +130 -0
- data/generated/google/apis/displayvideo_v1dev.rb +38 -0
- data/generated/google/apis/dlp_v2/classes.rb +1111 -1310
- data/generated/google/apis/dlp_v2/representations.rb +16 -0
- data/generated/google/apis/dlp_v2/service.rb +962 -905
- data/generated/google/apis/dlp_v2.rb +1 -1
- data/generated/google/apis/dns_v1/classes.rb +356 -198
- data/generated/google/apis/dns_v1/representations.rb +83 -0
- data/generated/google/apis/dns_v1/service.rb +83 -98
- data/generated/google/apis/dns_v1.rb +2 -2
- data/generated/google/apis/dns_v1beta2/classes.rb +362 -206
- data/generated/google/apis/dns_v1beta2/representations.rb +83 -0
- data/generated/google/apis/dns_v1beta2/service.rb +83 -98
- data/generated/google/apis/dns_v1beta2.rb +2 -2
- data/generated/google/apis/docs_v1/classes.rb +894 -1229
- data/generated/google/apis/docs_v1/service.rb +17 -22
- data/generated/google/apis/docs_v1.rb +1 -1
- data/generated/google/apis/documentai_v1beta2/classes.rb +2253 -824
- data/generated/google/apis/documentai_v1beta2/representations.rb +701 -0
- data/generated/google/apis/documentai_v1beta2/service.rb +22 -24
- data/generated/google/apis/documentai_v1beta2.rb +1 -1
- data/generated/google/apis/documentai_v1beta3/classes.rb +6149 -0
- data/generated/google/apis/documentai_v1beta3/representations.rb +2666 -0
- data/generated/google/apis/documentai_v1beta3/service.rb +263 -0
- data/generated/google/apis/{securitycenter_v1p1alpha1.rb → documentai_v1beta3.rb} +11 -10
- data/generated/google/apis/domains_v1alpha2/classes.rb +1540 -0
- data/generated/google/apis/domains_v1alpha2/representations.rb +606 -0
- data/generated/google/apis/domains_v1alpha2/service.rb +805 -0
- data/generated/google/apis/domains_v1alpha2.rb +34 -0
- data/generated/google/apis/domains_v1beta1/classes.rb +1540 -0
- data/generated/google/apis/domains_v1beta1/representations.rb +606 -0
- data/generated/google/apis/domains_v1beta1/service.rb +805 -0
- data/generated/google/apis/domains_v1beta1.rb +34 -0
- data/generated/google/apis/doubleclickbidmanager_v1/classes.rb +6 -12
- data/generated/google/apis/doubleclickbidmanager_v1/service.rb +33 -64
- data/generated/google/apis/doubleclickbidmanager_v1.rb +3 -2
- data/generated/google/apis/doubleclickbidmanager_v1_1/classes.rb +13 -20
- data/generated/google/apis/doubleclickbidmanager_v1_1/service.rb +33 -64
- data/generated/google/apis/doubleclickbidmanager_v1_1.rb +3 -2
- data/generated/google/apis/doubleclicksearch_v2/service.rb +2 -2
- data/generated/google/apis/doubleclicksearch_v2.rb +1 -1
- data/generated/google/apis/drive_v2/classes.rb +18 -7
- data/generated/google/apis/drive_v2/representations.rb +1 -0
- data/generated/google/apis/drive_v2/service.rb +79 -15
- data/generated/google/apis/drive_v2.rb +1 -1
- data/generated/google/apis/drive_v3/classes.rb +18 -8
- data/generated/google/apis/drive_v3/representations.rb +1 -0
- data/generated/google/apis/drive_v3/service.rb +59 -11
- data/generated/google/apis/drive_v3.rb +1 -1
- data/generated/google/apis/eventarc_v1beta1/classes.rb +931 -0
- data/generated/google/apis/eventarc_v1beta1/representations.rb +379 -0
- data/generated/google/apis/{memcache_v1 → eventarc_v1beta1}/service.rb +236 -215
- data/generated/google/apis/eventarc_v1beta1.rb +34 -0
- data/generated/google/apis/file_v1/classes.rb +155 -174
- data/generated/google/apis/file_v1/service.rb +43 -52
- data/generated/google/apis/file_v1.rb +1 -1
- data/generated/google/apis/file_v1beta1/classes.rb +335 -194
- data/generated/google/apis/file_v1beta1/representations.rb +55 -0
- data/generated/google/apis/file_v1beta1/service.rb +267 -55
- data/generated/google/apis/file_v1beta1.rb +1 -1
- data/generated/google/apis/firebase_v1beta1/classes.rb +33 -51
- data/generated/google/apis/firebase_v1beta1/representations.rb +2 -16
- data/generated/google/apis/firebase_v1beta1/service.rb +21 -1
- data/generated/google/apis/firebase_v1beta1.rb +1 -1
- data/generated/google/apis/firebasedynamiclinks_v1.rb +1 -1
- data/generated/google/apis/firebasehosting_v1beta1/classes.rb +188 -0
- data/generated/google/apis/firebasehosting_v1beta1/representations.rb +83 -0
- data/generated/google/apis/firebasehosting_v1beta1/service.rb +418 -4
- data/generated/google/apis/firebasehosting_v1beta1.rb +1 -1
- data/generated/google/apis/firebaseml_v1beta2/classes.rb +8 -8
- data/generated/google/apis/firebaseml_v1beta2/representations.rb +1 -1
- data/generated/google/apis/firebaseml_v1beta2.rb +1 -1
- data/generated/google/apis/firebaserules_v1/classes.rb +102 -137
- data/generated/google/apis/firebaserules_v1/service.rb +87 -110
- data/generated/google/apis/firebaserules_v1.rb +1 -1
- data/generated/google/apis/firestore_v1/classes.rb +406 -502
- data/generated/google/apis/firestore_v1/service.rb +165 -201
- data/generated/google/apis/firestore_v1.rb +1 -1
- data/generated/google/apis/firestore_v1beta1/classes.rb +338 -413
- data/generated/google/apis/firestore_v1beta1/service.rb +106 -122
- data/generated/google/apis/firestore_v1beta1.rb +1 -1
- data/generated/google/apis/firestore_v1beta2/classes.rb +135 -165
- data/generated/google/apis/firestore_v1beta2/service.rb +65 -86
- data/generated/google/apis/firestore_v1beta2.rb +1 -1
- data/generated/google/apis/fitness_v1/classes.rb +982 -0
- data/generated/google/apis/fitness_v1/representations.rb +398 -0
- data/generated/google/apis/fitness_v1/service.rb +628 -0
- data/generated/google/apis/fitness_v1.rb +97 -0
- data/generated/google/apis/games_configuration_v1configuration/classes.rb +2 -3
- data/generated/google/apis/games_configuration_v1configuration/service.rb +6 -6
- data/generated/google/apis/games_configuration_v1configuration.rb +1 -1
- data/generated/google/apis/games_management_v1management/classes.rb +14 -20
- data/generated/google/apis/games_management_v1management/service.rb +35 -36
- data/generated/google/apis/games_management_v1management.rb +2 -3
- data/generated/google/apis/games_v1/classes.rb +376 -83
- data/generated/google/apis/games_v1/representations.rb +118 -0
- data/generated/google/apis/games_v1/service.rb +118 -90
- data/generated/google/apis/games_v1.rb +2 -3
- data/generated/google/apis/gameservices_v1/classes.rb +22 -14
- data/generated/google/apis/gameservices_v1/representations.rb +1 -0
- data/generated/google/apis/gameservices_v1/service.rb +54 -51
- data/generated/google/apis/gameservices_v1.rb +1 -1
- data/generated/google/apis/gameservices_v1beta/classes.rb +22 -14
- data/generated/google/apis/gameservices_v1beta/representations.rb +1 -0
- data/generated/google/apis/gameservices_v1beta/service.rb +54 -51
- data/generated/google/apis/gameservices_v1beta.rb +1 -1
- data/generated/google/apis/genomics_v1/classes.rb +70 -76
- data/generated/google/apis/genomics_v1/service.rb +28 -43
- data/generated/google/apis/genomics_v1.rb +1 -1
- data/generated/google/apis/genomics_v1alpha2/classes.rb +223 -290
- data/generated/google/apis/genomics_v1alpha2/service.rb +54 -76
- data/generated/google/apis/genomics_v1alpha2.rb +1 -1
- data/generated/google/apis/genomics_v2alpha1/classes.rb +356 -275
- data/generated/google/apis/genomics_v2alpha1/representations.rb +48 -0
- data/generated/google/apis/genomics_v2alpha1/service.rb +47 -66
- data/generated/google/apis/genomics_v2alpha1.rb +1 -1
- data/generated/google/apis/gmail_v1/classes.rb +37 -43
- data/generated/google/apis/gmail_v1/service.rb +5 -4
- data/generated/google/apis/gmail_v1.rb +1 -1
- data/generated/google/apis/gmailpostmastertools_v1beta1/service.rb +11 -11
- data/generated/google/apis/gmailpostmastertools_v1beta1.rb +2 -2
- data/generated/google/apis/groupsmigration_v1/classes.rb +51 -0
- data/generated/google/apis/groupsmigration_v1/representations.rb +40 -0
- data/generated/google/apis/groupsmigration_v1/service.rb +100 -0
- data/generated/google/apis/groupsmigration_v1.rb +35 -0
- data/generated/google/apis/healthcare_v1/classes.rb +637 -826
- data/generated/google/apis/healthcare_v1/representations.rb +32 -0
- data/generated/google/apis/healthcare_v1/service.rb +842 -855
- data/generated/google/apis/healthcare_v1.rb +1 -1
- data/generated/google/apis/healthcare_v1beta1/classes.rb +1937 -1299
- data/generated/google/apis/healthcare_v1beta1/representations.rb +534 -65
- data/generated/google/apis/healthcare_v1beta1/service.rb +2534 -1293
- data/generated/google/apis/healthcare_v1beta1.rb +1 -1
- data/generated/google/apis/homegraph_v1/classes.rb +76 -164
- data/generated/google/apis/homegraph_v1/service.rb +23 -35
- data/generated/google/apis/homegraph_v1.rb +4 -1
- data/generated/google/apis/iam_v1/classes.rb +395 -592
- data/generated/google/apis/iam_v1/representations.rb +1 -0
- data/generated/google/apis/iam_v1/service.rb +427 -555
- data/generated/google/apis/iam_v1.rb +1 -1
- data/generated/google/apis/iamcredentials_v1/classes.rb +75 -85
- data/generated/google/apis/iamcredentials_v1/service.rb +14 -13
- data/generated/google/apis/iamcredentials_v1.rb +3 -2
- data/generated/google/apis/iap_v1/classes.rb +253 -355
- data/generated/google/apis/iap_v1/representations.rb +1 -0
- data/generated/google/apis/iap_v1/service.rb +61 -71
- data/generated/google/apis/iap_v1.rb +1 -1
- data/generated/google/apis/iap_v1beta1/classes.rb +164 -254
- data/generated/google/apis/iap_v1beta1/representations.rb +1 -0
- data/generated/google/apis/iap_v1beta1/service.rb +17 -19
- data/generated/google/apis/iap_v1beta1.rb +1 -1
- data/generated/google/apis/indexing_v3/classes.rb +11 -11
- data/generated/google/apis/indexing_v3.rb +1 -1
- data/generated/google/apis/jobs_v2/classes.rb +1584 -1086
- data/generated/google/apis/jobs_v2/representations.rb +272 -0
- data/generated/google/apis/jobs_v2/service.rb +85 -126
- data/generated/google/apis/jobs_v2.rb +1 -1
- data/generated/google/apis/jobs_v3/classes.rb +1559 -980
- data/generated/google/apis/jobs_v3/representations.rb +272 -0
- data/generated/google/apis/jobs_v3/service.rb +101 -139
- data/generated/google/apis/jobs_v3.rb +1 -1
- data/generated/google/apis/jobs_v3p1beta1/classes.rb +1521 -1023
- data/generated/google/apis/jobs_v3p1beta1/representations.rb +257 -0
- data/generated/google/apis/jobs_v3p1beta1/service.rb +103 -142
- data/generated/google/apis/jobs_v3p1beta1.rb +1 -1
- data/generated/google/apis/kgsearch_v1/classes.rb +4 -4
- data/generated/google/apis/kgsearch_v1/service.rb +11 -11
- data/generated/google/apis/kgsearch_v1.rb +1 -1
- data/generated/google/apis/licensing_v1/classes.rb +1 -1
- data/generated/google/apis/licensing_v1/service.rb +56 -86
- data/generated/google/apis/licensing_v1.rb +4 -3
- data/generated/google/apis/lifesciences_v2beta/classes.rb +366 -290
- data/generated/google/apis/lifesciences_v2beta/representations.rb +47 -0
- data/generated/google/apis/lifesciences_v2beta/service.rb +30 -42
- data/generated/google/apis/lifesciences_v2beta.rb +1 -1
- data/generated/google/apis/localservices_v1/classes.rb +426 -0
- data/generated/google/apis/localservices_v1/representations.rb +174 -0
- data/generated/google/apis/localservices_v1/service.rb +199 -0
- data/generated/google/apis/{appsactivity_v1.rb → localservices_v1.rb} +8 -11
- data/generated/google/apis/logging_v2/classes.rb +306 -232
- data/generated/google/apis/logging_v2/representations.rb +79 -0
- data/generated/google/apis/logging_v2/service.rb +3307 -1579
- data/generated/google/apis/logging_v2.rb +1 -1
- data/generated/google/apis/managedidentities_v1/classes.rb +8 -1
- data/generated/google/apis/managedidentities_v1/representations.rb +1 -0
- data/generated/google/apis/managedidentities_v1/service.rb +1 -4
- data/generated/google/apis/managedidentities_v1.rb +1 -1
- data/generated/google/apis/managedidentities_v1alpha1/classes.rb +87 -1
- data/generated/google/apis/managedidentities_v1alpha1/representations.rb +34 -0
- data/generated/google/apis/managedidentities_v1alpha1/service.rb +83 -5
- data/generated/google/apis/managedidentities_v1alpha1.rb +1 -1
- data/generated/google/apis/managedidentities_v1beta1/classes.rb +88 -1
- data/generated/google/apis/managedidentities_v1beta1/representations.rb +34 -0
- data/generated/google/apis/managedidentities_v1beta1/service.rb +83 -5
- data/generated/google/apis/managedidentities_v1beta1.rb +1 -1
- data/generated/google/apis/manufacturers_v1/classes.rb +99 -109
- data/generated/google/apis/manufacturers_v1/service.rb +44 -55
- data/generated/google/apis/manufacturers_v1.rb +1 -1
- data/generated/google/apis/memcache_v1beta2/classes.rb +171 -250
- data/generated/google/apis/memcache_v1beta2/representations.rb +0 -19
- data/generated/google/apis/memcache_v1beta2/service.rb +60 -73
- data/generated/google/apis/memcache_v1beta2.rb +1 -1
- data/generated/google/apis/ml_v1/classes.rb +1122 -1149
- data/generated/google/apis/ml_v1/representations.rb +82 -0
- data/generated/google/apis/ml_v1/service.rb +194 -253
- data/generated/google/apis/ml_v1.rb +1 -1
- data/generated/google/apis/monitoring_v1/classes.rb +107 -26
- data/generated/google/apis/monitoring_v1/representations.rb +35 -0
- data/generated/google/apis/monitoring_v1/service.rb +10 -11
- data/generated/google/apis/monitoring_v1.rb +1 -1
- data/generated/google/apis/monitoring_v3/classes.rb +303 -345
- data/generated/google/apis/monitoring_v3/representations.rb +18 -0
- data/generated/google/apis/monitoring_v3/service.rb +176 -146
- data/generated/google/apis/monitoring_v3.rb +1 -1
- data/generated/google/apis/networkmanagement_v1/classes.rb +273 -429
- data/generated/google/apis/networkmanagement_v1/service.rb +97 -120
- data/generated/google/apis/networkmanagement_v1.rb +1 -1
- data/generated/google/apis/networkmanagement_v1beta1/classes.rb +456 -429
- data/generated/google/apis/networkmanagement_v1beta1/representations.rb +63 -0
- data/generated/google/apis/networkmanagement_v1beta1/service.rb +97 -120
- data/generated/google/apis/networkmanagement_v1beta1.rb +1 -1
- data/generated/google/apis/osconfig_v1/classes.rb +154 -902
- data/generated/google/apis/osconfig_v1/representations.rb +0 -337
- data/generated/google/apis/osconfig_v1/service.rb +26 -31
- data/generated/google/apis/osconfig_v1.rb +3 -3
- data/generated/google/apis/osconfig_v1beta/classes.rb +327 -411
- data/generated/google/apis/osconfig_v1beta/service.rb +43 -56
- data/generated/google/apis/osconfig_v1beta.rb +3 -3
- data/generated/google/apis/oslogin_v1/classes.rb +14 -12
- data/generated/google/apis/oslogin_v1/representations.rb +1 -0
- data/generated/google/apis/oslogin_v1/service.rb +12 -16
- data/generated/google/apis/oslogin_v1.rb +1 -1
- data/generated/google/apis/oslogin_v1alpha/classes.rb +16 -14
- data/generated/google/apis/oslogin_v1alpha/representations.rb +1 -0
- data/generated/google/apis/oslogin_v1alpha/service.rb +17 -17
- data/generated/google/apis/oslogin_v1alpha.rb +1 -1
- data/generated/google/apis/oslogin_v1beta/classes.rb +14 -12
- data/generated/google/apis/oslogin_v1beta/representations.rb +1 -0
- data/generated/google/apis/oslogin_v1beta/service.rb +12 -16
- data/generated/google/apis/oslogin_v1beta.rb +1 -1
- data/generated/google/apis/pagespeedonline_v5/classes.rb +18 -24
- data/generated/google/apis/pagespeedonline_v5/service.rb +3 -4
- data/generated/google/apis/pagespeedonline_v5.rb +2 -2
- data/generated/google/apis/people_v1/classes.rb +173 -63
- data/generated/google/apis/people_v1/representations.rb +41 -0
- data/generated/google/apis/people_v1/service.rb +63 -61
- data/generated/google/apis/people_v1.rb +1 -1
- data/generated/google/apis/playablelocations_v3/classes.rb +114 -161
- data/generated/google/apis/playablelocations_v3/service.rb +10 -10
- data/generated/google/apis/playablelocations_v3.rb +1 -1
- data/generated/google/apis/playcustomapp_v1/service.rb +2 -2
- data/generated/google/apis/playcustomapp_v1.rb +1 -1
- data/generated/google/apis/poly_v1/classes.rb +65 -79
- data/generated/google/apis/poly_v1/service.rb +50 -63
- data/generated/google/apis/poly_v1.rb +3 -4
- data/generated/google/apis/privateca_v1beta1/classes.rb +2466 -0
- data/generated/google/apis/privateca_v1beta1/representations.rb +996 -0
- data/generated/google/apis/privateca_v1beta1/service.rb +1487 -0
- data/generated/google/apis/privateca_v1beta1.rb +34 -0
- data/generated/google/apis/prod_tt_sasportal_v1alpha1/classes.rb +26 -0
- data/generated/google/apis/prod_tt_sasportal_v1alpha1/representations.rb +14 -0
- data/generated/google/apis/prod_tt_sasportal_v1alpha1/service.rb +644 -56
- data/generated/google/apis/prod_tt_sasportal_v1alpha1.rb +1 -1
- data/generated/google/apis/pubsub_v1/classes.rb +399 -518
- data/generated/google/apis/pubsub_v1/representations.rb +2 -0
- data/generated/google/apis/pubsub_v1/service.rb +221 -247
- data/generated/google/apis/pubsub_v1.rb +1 -1
- data/generated/google/apis/pubsub_v1beta1a/classes.rb +71 -86
- data/generated/google/apis/pubsub_v1beta1a/service.rb +31 -38
- data/generated/google/apis/pubsub_v1beta1a.rb +1 -1
- data/generated/google/apis/pubsub_v1beta2/classes.rb +251 -354
- data/generated/google/apis/pubsub_v1beta2/representations.rb +1 -0
- data/generated/google/apis/pubsub_v1beta2/service.rb +96 -108
- data/generated/google/apis/pubsub_v1beta2.rb +1 -1
- data/generated/google/apis/pubsublite_v1/classes.rb +461 -0
- data/generated/google/apis/pubsublite_v1/representations.rb +261 -0
- data/generated/google/apis/pubsublite_v1/service.rb +558 -0
- data/generated/google/apis/{memcache_v1.rb → pubsublite_v1.rb} +8 -9
- data/generated/google/apis/realtimebidding_v1/classes.rb +84 -123
- data/generated/google/apis/realtimebidding_v1/representations.rb +18 -32
- data/generated/google/apis/realtimebidding_v1/service.rb +4 -1
- data/generated/google/apis/realtimebidding_v1.rb +1 -1
- data/generated/google/apis/recommendationengine_v1beta1/classes.rb +367 -456
- data/generated/google/apis/recommendationengine_v1beta1/representations.rb +25 -16
- data/generated/google/apis/recommendationengine_v1beta1/service.rb +140 -206
- data/generated/google/apis/recommendationengine_v1beta1.rb +1 -1
- data/generated/google/apis/recommender_v1/classes.rb +1 -1
- data/generated/google/apis/recommender_v1/service.rb +4 -2
- data/generated/google/apis/recommender_v1.rb +1 -1
- data/generated/google/apis/recommender_v1beta1/classes.rb +1 -1
- data/generated/google/apis/recommender_v1beta1/service.rb +4 -2
- data/generated/google/apis/recommender_v1beta1.rb +1 -1
- data/generated/google/apis/redis_v1/classes.rb +91 -513
- data/generated/google/apis/redis_v1/representations.rb +0 -139
- data/generated/google/apis/redis_v1/service.rb +92 -109
- data/generated/google/apis/redis_v1.rb +1 -1
- data/generated/google/apis/redis_v1beta1/classes.rb +123 -517
- data/generated/google/apis/redis_v1beta1/representations.rb +12 -137
- data/generated/google/apis/redis_v1beta1/service.rb +126 -109
- data/generated/google/apis/redis_v1beta1.rb +1 -1
- data/generated/google/apis/remotebuildexecution_v1/classes.rb +957 -1078
- data/generated/google/apis/remotebuildexecution_v1/representations.rb +62 -0
- data/generated/google/apis/remotebuildexecution_v1/service.rb +26 -33
- data/generated/google/apis/remotebuildexecution_v1.rb +1 -1
- data/generated/google/apis/remotebuildexecution_v1alpha/classes.rb +952 -1071
- data/generated/google/apis/remotebuildexecution_v1alpha/representations.rb +62 -0
- data/generated/google/apis/remotebuildexecution_v1alpha/service.rb +103 -65
- data/generated/google/apis/remotebuildexecution_v1alpha.rb +1 -1
- data/generated/google/apis/remotebuildexecution_v2/classes.rb +1105 -1250
- data/generated/google/apis/remotebuildexecution_v2/representations.rb +62 -0
- data/generated/google/apis/remotebuildexecution_v2/service.rb +147 -206
- data/generated/google/apis/remotebuildexecution_v2.rb +1 -1
- data/generated/google/apis/reseller_v1/classes.rb +151 -219
- data/generated/google/apis/reseller_v1/service.rb +122 -173
- data/generated/google/apis/reseller_v1.rb +2 -2
- data/generated/google/apis/run_v1/classes.rb +19 -138
- data/generated/google/apis/run_v1/representations.rb +1 -62
- data/generated/google/apis/run_v1/service.rb +0 -342
- data/generated/google/apis/run_v1.rb +1 -1
- data/generated/google/apis/run_v1alpha1/classes.rb +1 -1
- data/generated/google/apis/run_v1alpha1/representations.rb +1 -1
- data/generated/google/apis/run_v1alpha1.rb +1 -1
- data/generated/google/apis/run_v1beta1/classes.rb +3 -2
- data/generated/google/apis/run_v1beta1.rb +1 -1
- data/generated/google/apis/runtimeconfig_v1beta1/classes.rb +301 -412
- data/generated/google/apis/runtimeconfig_v1beta1/representations.rb +1 -0
- data/generated/google/apis/runtimeconfig_v1beta1/service.rb +135 -159
- data/generated/google/apis/runtimeconfig_v1beta1.rb +1 -1
- data/generated/google/apis/safebrowsing_v4/classes.rb +55 -64
- data/generated/google/apis/safebrowsing_v4/service.rb +4 -4
- data/generated/google/apis/safebrowsing_v4.rb +1 -1
- data/generated/google/apis/sasportal_v1alpha1/classes.rb +26 -0
- data/generated/google/apis/sasportal_v1alpha1/representations.rb +14 -0
- data/generated/google/apis/sasportal_v1alpha1/service.rb +644 -56
- data/generated/google/apis/sasportal_v1alpha1.rb +1 -1
- data/generated/google/apis/script_v1/classes.rb +88 -111
- data/generated/google/apis/script_v1/service.rb +63 -69
- data/generated/google/apis/script_v1.rb +1 -1
- data/generated/google/apis/searchconsole_v1/classes.rb +388 -0
- data/generated/google/apis/searchconsole_v1/representations.rb +162 -0
- data/generated/google/apis/searchconsole_v1/service.rb +287 -0
- data/generated/google/apis/searchconsole_v1.rb +7 -1
- data/generated/google/apis/secretmanager_v1/classes.rb +378 -365
- data/generated/google/apis/secretmanager_v1/representations.rb +92 -0
- data/generated/google/apis/secretmanager_v1/service.rb +66 -82
- data/generated/google/apis/secretmanager_v1.rb +1 -1
- data/generated/google/apis/secretmanager_v1beta1/classes.rb +217 -363
- data/generated/google/apis/secretmanager_v1beta1/representations.rb +1 -0
- data/generated/google/apis/secretmanager_v1beta1/service.rb +66 -82
- data/generated/google/apis/secretmanager_v1beta1.rb +1 -1
- data/generated/google/apis/securitycenter_v1/classes.rb +20 -204
- data/generated/google/apis/securitycenter_v1/representations.rb +1 -72
- data/generated/google/apis/securitycenter_v1.rb +1 -1
- data/generated/google/apis/securitycenter_v1beta1/classes.rb +22 -204
- data/generated/google/apis/securitycenter_v1beta1/representations.rb +1 -72
- data/generated/google/apis/securitycenter_v1beta1.rb +1 -1
- data/generated/google/apis/{securitycenter_v1p1alpha1 → securitycenter_v1beta2}/classes.rb +275 -291
- data/generated/google/apis/{securitycenter_v1p1alpha1 → securitycenter_v1beta2}/representations.rb +83 -84
- data/generated/google/apis/securitycenter_v1beta2/service.rb +1494 -0
- data/generated/google/apis/{securitycenter_v1p1beta1.rb → securitycenter_v1beta2.rb} +6 -6
- data/generated/google/apis/serviceconsumermanagement_v1/classes.rb +35 -123
- data/generated/google/apis/serviceconsumermanagement_v1/representations.rb +0 -18
- data/generated/google/apis/serviceconsumermanagement_v1/service.rb +32 -30
- data/generated/google/apis/serviceconsumermanagement_v1.rb +1 -1
- data/generated/google/apis/serviceconsumermanagement_v1beta1/classes.rb +24 -112
- data/generated/google/apis/serviceconsumermanagement_v1beta1/representations.rb +0 -18
- data/generated/google/apis/serviceconsumermanagement_v1beta1.rb +1 -1
- data/generated/google/apis/servicecontrol_v1/classes.rb +601 -642
- data/generated/google/apis/servicecontrol_v1/representations.rb +10 -0
- data/generated/google/apis/servicecontrol_v1/service.rb +36 -46
- data/generated/google/apis/servicecontrol_v1.rb +1 -1
- data/generated/google/apis/servicecontrol_v2/classes.rb +343 -325
- data/generated/google/apis/servicecontrol_v2/representations.rb +8 -0
- data/generated/google/apis/servicecontrol_v2/service.rb +33 -43
- data/generated/google/apis/servicecontrol_v2.rb +1 -1
- data/generated/google/apis/servicedirectory_v1beta1/classes.rb +221 -333
- data/generated/google/apis/servicedirectory_v1beta1/representations.rb +1 -0
- data/generated/google/apis/servicedirectory_v1beta1/service.rb +94 -129
- data/generated/google/apis/servicedirectory_v1beta1.rb +1 -1
- data/generated/google/apis/servicemanagement_v1/classes.rb +1244 -2174
- data/generated/google/apis/servicemanagement_v1/representations.rb +0 -31
- data/generated/google/apis/servicemanagement_v1/service.rb +141 -228
- data/generated/google/apis/servicemanagement_v1.rb +1 -1
- data/generated/google/apis/servicenetworking_v1/classes.rb +278 -121
- data/generated/google/apis/servicenetworking_v1/representations.rb +115 -15
- data/generated/google/apis/servicenetworking_v1/service.rb +118 -2
- data/generated/google/apis/servicenetworking_v1.rb +1 -1
- data/generated/google/apis/servicenetworking_v1beta/classes.rb +213 -112
- data/generated/google/apis/servicenetworking_v1beta/representations.rb +84 -14
- data/generated/google/apis/servicenetworking_v1beta.rb +1 -1
- data/generated/google/apis/serviceusage_v1/classes.rb +57 -111
- data/generated/google/apis/serviceusage_v1/representations.rb +4 -18
- data/generated/google/apis/serviceusage_v1/service.rb +5 -1
- data/generated/google/apis/serviceusage_v1.rb +1 -1
- data/generated/google/apis/serviceusage_v1beta1/classes.rb +122 -112
- data/generated/google/apis/serviceusage_v1beta1/representations.rb +23 -18
- data/generated/google/apis/serviceusage_v1beta1/service.rb +36 -0
- data/generated/google/apis/serviceusage_v1beta1.rb +1 -1
- data/generated/google/apis/sheets_v4/classes.rb +4029 -5014
- data/generated/google/apis/sheets_v4/representations.rb +625 -0
- data/generated/google/apis/sheets_v4/service.rb +113 -149
- data/generated/google/apis/sheets_v4.rb +1 -1
- data/generated/google/apis/site_verification_v1.rb +1 -1
- data/generated/google/apis/slides_v1/classes.rb +841 -1114
- data/generated/google/apis/slides_v1/service.rb +23 -30
- data/generated/google/apis/slides_v1.rb +1 -1
- data/generated/google/apis/smartdevicemanagement_v1/classes.rb +273 -0
- data/generated/google/apis/smartdevicemanagement_v1/representations.rb +157 -0
- data/generated/google/apis/smartdevicemanagement_v1/service.rb +304 -0
- data/generated/google/apis/smartdevicemanagement_v1.rb +35 -0
- data/generated/google/apis/sourcerepo_v1/classes.rb +250 -400
- data/generated/google/apis/sourcerepo_v1/service.rb +40 -49
- data/generated/google/apis/sourcerepo_v1.rb +1 -1
- data/generated/google/apis/spanner_v1/classes.rb +1553 -2157
- data/generated/google/apis/spanner_v1/representations.rb +1 -0
- data/generated/google/apis/spanner_v1/service.rb +443 -618
- data/generated/google/apis/spanner_v1.rb +1 -1
- data/generated/google/apis/speech_v1/classes.rb +174 -220
- data/generated/google/apis/speech_v1/service.rb +27 -32
- data/generated/google/apis/speech_v1.rb +1 -1
- data/generated/google/apis/speech_v1p1beta1/classes.rb +253 -306
- data/generated/google/apis/speech_v1p1beta1/service.rb +27 -32
- data/generated/google/apis/speech_v1p1beta1.rb +1 -1
- data/generated/google/apis/speech_v2beta1/classes.rb +66 -76
- data/generated/google/apis/speech_v2beta1/service.rb +10 -12
- data/generated/google/apis/speech_v2beta1.rb +1 -1
- data/generated/google/apis/{sql_v1beta4 → sqladmin_v1beta4}/classes.rb +537 -452
- data/generated/google/apis/{sql_v1beta4 → sqladmin_v1beta4}/representations.rb +142 -87
- data/generated/google/apis/{sql_v1beta4 → sqladmin_v1beta4}/service.rb +295 -300
- data/generated/google/apis/{sql_v1beta4.rb → sqladmin_v1beta4.rb} +5 -5
- data/generated/google/apis/storage_v1/classes.rb +10 -17
- data/generated/google/apis/storage_v1/representations.rb +2 -3
- data/generated/google/apis/storage_v1/service.rb +3 -2
- data/generated/google/apis/storage_v1.rb +1 -1
- data/generated/google/apis/storagetransfer_v1/classes.rb +301 -349
- data/generated/google/apis/storagetransfer_v1/representations.rb +13 -0
- data/generated/google/apis/storagetransfer_v1/service.rb +53 -72
- data/generated/google/apis/storagetransfer_v1.rb +1 -1
- data/generated/google/apis/streetviewpublish_v1/classes.rb +110 -152
- data/generated/google/apis/streetviewpublish_v1/service.rb +94 -177
- data/generated/google/apis/streetviewpublish_v1.rb +1 -1
- data/generated/google/apis/sts_v1/classes.rb +121 -0
- data/generated/google/apis/sts_v1/representations.rb +59 -0
- data/generated/google/apis/sts_v1/service.rb +90 -0
- data/generated/google/apis/sts_v1.rb +32 -0
- data/generated/google/apis/sts_v1beta/classes.rb +191 -0
- data/generated/google/apis/sts_v1beta/representations.rb +61 -0
- data/generated/google/apis/sts_v1beta/service.rb +92 -0
- data/generated/google/apis/sts_v1beta.rb +32 -0
- data/generated/google/apis/tagmanager_v1/service.rb +2 -2
- data/generated/google/apis/tagmanager_v1.rb +1 -1
- data/generated/google/apis/tagmanager_v2/classes.rb +12 -0
- data/generated/google/apis/tagmanager_v2/representations.rb +3 -0
- data/generated/google/apis/tagmanager_v2/service.rb +2 -2
- data/generated/google/apis/tagmanager_v2.rb +1 -1
- data/generated/google/apis/tasks_v1/classes.rb +21 -22
- data/generated/google/apis/tasks_v1/service.rb +19 -19
- data/generated/google/apis/tasks_v1.rb +1 -1
- data/generated/google/apis/testing_v1/classes.rb +384 -390
- data/generated/google/apis/testing_v1/representations.rb +23 -0
- data/generated/google/apis/testing_v1/service.rb +22 -28
- data/generated/google/apis/testing_v1.rb +1 -1
- data/generated/google/apis/texttospeech_v1/classes.rb +51 -57
- data/generated/google/apis/texttospeech_v1/service.rb +9 -10
- data/generated/google/apis/texttospeech_v1.rb +1 -1
- data/generated/google/apis/texttospeech_v1beta1/classes.rb +96 -57
- data/generated/google/apis/texttospeech_v1beta1/representations.rb +19 -0
- data/generated/google/apis/texttospeech_v1beta1/service.rb +9 -10
- data/generated/google/apis/texttospeech_v1beta1.rb +1 -1
- data/generated/google/apis/toolresults_v1beta3/classes.rb +20 -0
- data/generated/google/apis/toolresults_v1beta3/representations.rb +13 -0
- data/generated/google/apis/toolresults_v1beta3.rb +1 -1
- data/generated/google/apis/tpu_v1/classes.rb +57 -3
- data/generated/google/apis/tpu_v1/representations.rb +19 -0
- data/generated/google/apis/tpu_v1/service.rb +8 -8
- data/generated/google/apis/tpu_v1.rb +1 -1
- data/generated/google/apis/tpu_v1alpha1/classes.rb +57 -3
- data/generated/google/apis/tpu_v1alpha1/representations.rb +19 -0
- data/generated/google/apis/tpu_v1alpha1/service.rb +8 -8
- data/generated/google/apis/tpu_v1alpha1.rb +1 -1
- data/generated/google/apis/trafficdirector_v2/classes.rb +1347 -0
- data/generated/google/apis/trafficdirector_v2/representations.rb +620 -0
- data/generated/google/apis/trafficdirector_v2/service.rb +89 -0
- data/generated/google/apis/trafficdirector_v2.rb +34 -0
- data/generated/google/apis/translate_v3/classes.rb +151 -177
- data/generated/google/apis/translate_v3/service.rb +122 -151
- data/generated/google/apis/translate_v3.rb +1 -1
- data/generated/google/apis/translate_v3beta1/classes.rb +150 -170
- data/generated/google/apis/translate_v3beta1/service.rb +122 -151
- data/generated/google/apis/translate_v3beta1.rb +1 -1
- data/generated/google/apis/vault_v1/classes.rb +413 -103
- data/generated/google/apis/vault_v1/representations.rb +162 -0
- data/generated/google/apis/vault_v1/service.rb +182 -37
- data/generated/google/apis/vault_v1.rb +1 -1
- data/generated/google/apis/vectortile_v1/classes.rb +185 -267
- data/generated/google/apis/vectortile_v1/service.rb +75 -88
- data/generated/google/apis/vectortile_v1.rb +1 -1
- data/generated/google/apis/verifiedaccess_v1/classes.rb +20 -27
- data/generated/google/apis/verifiedaccess_v1.rb +1 -1
- data/generated/google/apis/videointelligence_v1/classes.rb +1493 -935
- data/generated/google/apis/videointelligence_v1/representations.rb +402 -2
- data/generated/google/apis/videointelligence_v1/service.rb +38 -77
- data/generated/google/apis/videointelligence_v1.rb +1 -1
- data/generated/google/apis/videointelligence_v1beta2/classes.rb +1488 -928
- data/generated/google/apis/videointelligence_v1beta2/representations.rb +402 -2
- data/generated/google/apis/videointelligence_v1beta2/service.rb +4 -4
- data/generated/google/apis/videointelligence_v1beta2.rb +1 -1
- data/generated/google/apis/videointelligence_v1p1beta1/classes.rb +1482 -922
- data/generated/google/apis/videointelligence_v1p1beta1/representations.rb +402 -2
- data/generated/google/apis/videointelligence_v1p1beta1/service.rb +4 -4
- data/generated/google/apis/videointelligence_v1p1beta1.rb +1 -1
- data/generated/google/apis/videointelligence_v1p2beta1/classes.rb +1485 -925
- data/generated/google/apis/videointelligence_v1p2beta1/representations.rb +402 -2
- data/generated/google/apis/videointelligence_v1p2beta1/service.rb +4 -4
- data/generated/google/apis/videointelligence_v1p2beta1.rb +1 -1
- data/generated/google/apis/videointelligence_v1p3beta1/classes.rb +1410 -937
- data/generated/google/apis/videointelligence_v1p3beta1/representations.rb +368 -2
- data/generated/google/apis/videointelligence_v1p3beta1/service.rb +4 -4
- data/generated/google/apis/videointelligence_v1p3beta1.rb +1 -1
- data/generated/google/apis/vision_v1/classes.rb +16 -16
- data/generated/google/apis/vision_v1.rb +1 -1
- data/generated/google/apis/vision_v1p1beta1/classes.rb +16 -16
- data/generated/google/apis/vision_v1p1beta1.rb +1 -1
- data/generated/google/apis/vision_v1p2beta1/classes.rb +16 -16
- data/generated/google/apis/vision_v1p2beta1.rb +1 -1
- data/generated/google/apis/webfonts_v1/classes.rb +1 -2
- data/generated/google/apis/webfonts_v1/service.rb +2 -4
- data/generated/google/apis/webfonts_v1.rb +2 -3
- data/generated/google/apis/websecurityscanner_v1.rb +1 -1
- data/generated/google/apis/websecurityscanner_v1alpha.rb +1 -1
- data/generated/google/apis/websecurityscanner_v1beta.rb +1 -1
- data/generated/google/apis/workflowexecutions_v1beta/classes.rb +155 -0
- data/generated/google/apis/workflowexecutions_v1beta/representations.rb +88 -0
- data/generated/google/apis/{securitycenter_v1p1alpha1 → workflowexecutions_v1beta}/service.rb +73 -69
- data/generated/google/apis/workflowexecutions_v1beta.rb +34 -0
- data/generated/google/apis/workflows_v1beta/classes.rb +406 -0
- data/generated/google/apis/workflows_v1beta/representations.rb +173 -0
- data/generated/google/apis/workflows_v1beta/service.rb +438 -0
- data/generated/google/apis/workflows_v1beta.rb +35 -0
- data/generated/google/apis/youtube_partner_v1.rb +1 -1
- data/generated/google/apis/youtube_v3/classes.rb +0 -586
- data/generated/google/apis/youtube_v3/representations.rb +0 -269
- data/generated/google/apis/youtube_v3/service.rb +3 -120
- data/generated/google/apis/youtube_v3.rb +1 -1
- data/google-api-client.gemspec +25 -24
- data/lib/google/apis/core/api_command.rb +1 -0
- data/lib/google/apis/core/http_command.rb +2 -1
- data/lib/google/apis/options.rb +8 -5
- data/lib/google/apis/version.rb +1 -1
- data/synth.py +40 -0
- metadata +134 -41
- data/generated/google/apis/accessapproval_v1beta1/classes.rb +0 -417
- data/generated/google/apis/accessapproval_v1beta1/representations.rb +0 -203
- data/generated/google/apis/accessapproval_v1beta1/service.rb +0 -857
- data/generated/google/apis/appsactivity_v1/classes.rb +0 -415
- data/generated/google/apis/appsactivity_v1/representations.rb +0 -209
- data/generated/google/apis/appsactivity_v1/service.rb +0 -126
- data/generated/google/apis/dns_v2beta1/classes.rb +0 -1447
- data/generated/google/apis/dns_v2beta1/representations.rb +0 -588
- data/generated/google/apis/dns_v2beta1/service.rb +0 -928
- data/generated/google/apis/dns_v2beta1.rb +0 -43
- data/generated/google/apis/memcache_v1/classes.rb +0 -1157
- data/generated/google/apis/plus_v1/classes.rb +0 -2094
- data/generated/google/apis/plus_v1/representations.rb +0 -907
- data/generated/google/apis/plus_v1/service.rb +0 -451
- data/generated/google/apis/plus_v1.rb +0 -43
- data/generated/google/apis/securitycenter_v1p1beta1/classes.rb +0 -2059
- data/generated/google/apis/securitycenter_v1p1beta1/representations.rb +0 -789
- data/generated/google/apis/securitycenter_v1p1beta1/service.rb +0 -1243
- data/generated/google/apis/storage_v1beta2/classes.rb +0 -1047
- data/generated/google/apis/storage_v1beta2/representations.rb +0 -425
- data/generated/google/apis/storage_v1beta2/service.rb +0 -1667
- data/generated/google/apis/storage_v1beta2.rb +0 -40
@@ -22,9 +22,9 @@ module Google
|
|
22
22
|
module Apis
|
23
23
|
module VideointelligenceV1
|
24
24
|
|
25
|
-
# Video annotation progress. Included in the `metadata`
|
26
|
-
#
|
27
|
-
#
|
25
|
+
# Video annotation progress. Included in the `metadata` field of the `Operation`
|
26
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
27
|
+
# service.
|
28
28
|
class GoogleCloudVideointelligenceV1AnnotateVideoProgress
|
29
29
|
include Google::Apis::Core::Hashable
|
30
30
|
|
@@ -52,24 +52,22 @@ module Google
|
|
52
52
|
# @return [Array<String>]
|
53
53
|
attr_accessor :features
|
54
54
|
|
55
|
-
# The video data bytes.
|
56
|
-
# If
|
57
|
-
# If set, `input_uri` must be unset.
|
55
|
+
# The video data bytes. If unset, the input video(s) should be specified via the
|
56
|
+
# `input_uri`. If set, `input_uri` must be unset.
|
58
57
|
# Corresponds to the JSON property `inputContent`
|
59
58
|
# NOTE: Values are automatically base64 encoded/decoded in the client library.
|
60
59
|
# @return [String]
|
61
60
|
attr_accessor :input_content
|
62
61
|
|
63
|
-
# Input video location. Currently, only
|
64
|
-
#
|
65
|
-
#
|
66
|
-
#
|
67
|
-
# google.
|
68
|
-
#
|
69
|
-
#
|
70
|
-
#
|
71
|
-
#
|
72
|
-
# in the request as `input_content`. If set, `input_content` must be unset.
|
62
|
+
# Input video location. Currently, only [Cloud Storage](https://cloud.google.com/
|
63
|
+
# storage/) URIs are supported. URIs must be specified in the following format: `
|
64
|
+
# gs://bucket-id/object-id` (other URI formats return google.rpc.Code.
|
65
|
+
# INVALID_ARGUMENT). For more information, see [Request URIs](https://cloud.
|
66
|
+
# google.com/storage/docs/request-endpoints). To identify multiple videos, a
|
67
|
+
# video URI may include wildcards in the `object-id`. Supported wildcards: '*'
|
68
|
+
# to match 0 or more characters; '?' to match 1 character. If unset, the input
|
69
|
+
# video should be embedded in the request as `input_content`. If set, `
|
70
|
+
# input_content` must be unset.
|
73
71
|
# Corresponds to the JSON property `inputUri`
|
74
72
|
# @return [String]
|
75
73
|
attr_accessor :input_uri
|
@@ -83,11 +81,11 @@ module Google
|
|
83
81
|
attr_accessor :location_id
|
84
82
|
|
85
83
|
# Optional. Location where the output (in JSON format) should be stored.
|
86
|
-
# Currently, only [Cloud Storage](https://cloud.google.com/storage/)
|
87
|
-
#
|
88
|
-
#
|
89
|
-
#
|
90
|
-
#
|
84
|
+
# Currently, only [Cloud Storage](https://cloud.google.com/storage/) URIs are
|
85
|
+
# supported. These must be specified in the following format: `gs://bucket-id/
|
86
|
+
# object-id` (other URI formats return google.rpc.Code.INVALID_ARGUMENT). For
|
87
|
+
# more information, see [Request URIs](https://cloud.google.com/storage/docs/
|
88
|
+
# request-endpoints).
|
91
89
|
# Corresponds to the JSON property `outputUri`
|
92
90
|
# @return [String]
|
93
91
|
attr_accessor :output_uri
|
@@ -112,9 +110,9 @@ module Google
|
|
112
110
|
end
|
113
111
|
end
|
114
112
|
|
115
|
-
# Video annotation response. Included in the `response`
|
116
|
-
#
|
117
|
-
#
|
113
|
+
# Video annotation response. Included in the `response` field of the `Operation`
|
114
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
115
|
+
# service.
|
118
116
|
class GoogleCloudVideointelligenceV1AnnotateVideoResponse
|
119
117
|
include Google::Apis::Core::Hashable
|
120
118
|
|
@@ -142,14 +140,14 @@ module Google
|
|
142
140
|
# @return [Float]
|
143
141
|
attr_accessor :confidence
|
144
142
|
|
145
|
-
# The name of the attribute, for example, glasses, dark_glasses, mouth_open.
|
146
|
-
#
|
143
|
+
# The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
|
144
|
+
# full list of supported type names will be provided in the document.
|
147
145
|
# Corresponds to the JSON property `name`
|
148
146
|
# @return [String]
|
149
147
|
attr_accessor :name
|
150
148
|
|
151
|
-
# Text value of the detection result. For example, the value for "HairColor"
|
152
|
-
#
|
149
|
+
# Text value of the detection result. For example, the value for "HairColor" can
|
150
|
+
# be "black", "blonde", etc.
|
153
151
|
# Corresponds to the JSON property `value`
|
154
152
|
# @return [String]
|
155
153
|
attr_accessor :value
|
@@ -181,9 +179,8 @@ module Google
|
|
181
179
|
# @return [String]
|
182
180
|
attr_accessor :name
|
183
181
|
|
184
|
-
# A vertex represents a 2D point in the image.
|
185
|
-
#
|
186
|
-
# and range from 0 to 1.
|
182
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
183
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
187
184
|
# Corresponds to the JSON property `point`
|
188
185
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1NormalizedVertex]
|
189
186
|
attr_accessor :point
|
@@ -209,8 +206,7 @@ module Google
|
|
209
206
|
# @return [String]
|
210
207
|
attr_accessor :description
|
211
208
|
|
212
|
-
# Opaque entity ID. Some IDs may be available in
|
213
|
-
# [Google Knowledge Graph Search
|
209
|
+
# Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
|
214
210
|
# API](https://developers.google.com/knowledge-graph/).
|
215
211
|
# Corresponds to the JSON property `entityId`
|
216
212
|
# @return [String]
|
@@ -233,9 +229,9 @@ module Google
|
|
233
229
|
end
|
234
230
|
end
|
235
231
|
|
236
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
237
|
-
#
|
238
|
-
#
|
232
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
233
|
+
# explicit content has been detected in a frame, no annotations are present for
|
234
|
+
# that frame.
|
239
235
|
class GoogleCloudVideointelligenceV1ExplicitContentAnnotation
|
240
236
|
include Google::Apis::Core::Hashable
|
241
237
|
|
@@ -264,9 +260,8 @@ module Google
|
|
264
260
|
class GoogleCloudVideointelligenceV1ExplicitContentDetectionConfig
|
265
261
|
include Google::Apis::Core::Hashable
|
266
262
|
|
267
|
-
# Model to use for explicit content detection.
|
268
|
-
#
|
269
|
-
# "builtin/latest".
|
263
|
+
# Model to use for explicit content detection. Supported values: "builtin/stable"
|
264
|
+
# (the default if unset) and "builtin/latest".
|
270
265
|
# Corresponds to the JSON property `model`
|
271
266
|
# @return [String]
|
272
267
|
attr_accessor :model
|
@@ -307,14 +302,145 @@ module Google
|
|
307
302
|
end
|
308
303
|
end
|
309
304
|
|
305
|
+
# Deprecated. No effect.
|
306
|
+
class GoogleCloudVideointelligenceV1FaceAnnotation
|
307
|
+
include Google::Apis::Core::Hashable
|
308
|
+
|
309
|
+
# All video frames where a face was detected.
|
310
|
+
# Corresponds to the JSON property `frames`
|
311
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1FaceFrame>]
|
312
|
+
attr_accessor :frames
|
313
|
+
|
314
|
+
# All video segments where a face was detected.
|
315
|
+
# Corresponds to the JSON property `segments`
|
316
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1FaceSegment>]
|
317
|
+
attr_accessor :segments
|
318
|
+
|
319
|
+
# Thumbnail of a representative face view (in JPEG format).
|
320
|
+
# Corresponds to the JSON property `thumbnail`
|
321
|
+
# NOTE: Values are automatically base64 encoded/decoded in the client library.
|
322
|
+
# @return [String]
|
323
|
+
attr_accessor :thumbnail
|
324
|
+
|
325
|
+
def initialize(**args)
|
326
|
+
update!(**args)
|
327
|
+
end
|
328
|
+
|
329
|
+
# Update properties of this object
|
330
|
+
def update!(**args)
|
331
|
+
@frames = args[:frames] if args.key?(:frames)
|
332
|
+
@segments = args[:segments] if args.key?(:segments)
|
333
|
+
@thumbnail = args[:thumbnail] if args.key?(:thumbnail)
|
334
|
+
end
|
335
|
+
end
|
336
|
+
|
337
|
+
# Face detection annotation.
|
338
|
+
class GoogleCloudVideointelligenceV1FaceDetectionAnnotation
|
339
|
+
include Google::Apis::Core::Hashable
|
340
|
+
|
341
|
+
# Feature version.
|
342
|
+
# Corresponds to the JSON property `version`
|
343
|
+
# @return [String]
|
344
|
+
attr_accessor :version
|
345
|
+
|
346
|
+
def initialize(**args)
|
347
|
+
update!(**args)
|
348
|
+
end
|
349
|
+
|
350
|
+
# Update properties of this object
|
351
|
+
def update!(**args)
|
352
|
+
@version = args[:version] if args.key?(:version)
|
353
|
+
end
|
354
|
+
end
|
355
|
+
|
356
|
+
# Config for FACE_DETECTION.
|
357
|
+
class GoogleCloudVideointelligenceV1FaceDetectionConfig
|
358
|
+
include Google::Apis::Core::Hashable
|
359
|
+
|
360
|
+
# Whether to enable face attributes detection, such as glasses, dark_glasses,
|
361
|
+
# mouth_open etc. Ignored if 'include_bounding_boxes' is set to false.
|
362
|
+
# Corresponds to the JSON property `includeAttributes`
|
363
|
+
# @return [Boolean]
|
364
|
+
attr_accessor :include_attributes
|
365
|
+
alias_method :include_attributes?, :include_attributes
|
366
|
+
|
367
|
+
# Whether bounding boxes are included in the face annotation output.
|
368
|
+
# Corresponds to the JSON property `includeBoundingBoxes`
|
369
|
+
# @return [Boolean]
|
370
|
+
attr_accessor :include_bounding_boxes
|
371
|
+
alias_method :include_bounding_boxes?, :include_bounding_boxes
|
372
|
+
|
373
|
+
# Model to use for face detection. Supported values: "builtin/stable" (the
|
374
|
+
# default if unset) and "builtin/latest".
|
375
|
+
# Corresponds to the JSON property `model`
|
376
|
+
# @return [String]
|
377
|
+
attr_accessor :model
|
378
|
+
|
379
|
+
def initialize(**args)
|
380
|
+
update!(**args)
|
381
|
+
end
|
382
|
+
|
383
|
+
# Update properties of this object
|
384
|
+
def update!(**args)
|
385
|
+
@include_attributes = args[:include_attributes] if args.key?(:include_attributes)
|
386
|
+
@include_bounding_boxes = args[:include_bounding_boxes] if args.key?(:include_bounding_boxes)
|
387
|
+
@model = args[:model] if args.key?(:model)
|
388
|
+
end
|
389
|
+
end
|
390
|
+
|
391
|
+
# Deprecated. No effect.
|
392
|
+
class GoogleCloudVideointelligenceV1FaceFrame
|
393
|
+
include Google::Apis::Core::Hashable
|
394
|
+
|
395
|
+
# Normalized Bounding boxes in a frame. There can be more than one boxes if the
|
396
|
+
# same face is detected in multiple locations within the current frame.
|
397
|
+
# Corresponds to the JSON property `normalizedBoundingBoxes`
|
398
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1NormalizedBoundingBox>]
|
399
|
+
attr_accessor :normalized_bounding_boxes
|
400
|
+
|
401
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
402
|
+
# video frame for this location.
|
403
|
+
# Corresponds to the JSON property `timeOffset`
|
404
|
+
# @return [String]
|
405
|
+
attr_accessor :time_offset
|
406
|
+
|
407
|
+
def initialize(**args)
|
408
|
+
update!(**args)
|
409
|
+
end
|
410
|
+
|
411
|
+
# Update properties of this object
|
412
|
+
def update!(**args)
|
413
|
+
@normalized_bounding_boxes = args[:normalized_bounding_boxes] if args.key?(:normalized_bounding_boxes)
|
414
|
+
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
415
|
+
end
|
416
|
+
end
|
417
|
+
|
418
|
+
# Video segment level annotation results for face detection.
|
419
|
+
class GoogleCloudVideointelligenceV1FaceSegment
|
420
|
+
include Google::Apis::Core::Hashable
|
421
|
+
|
422
|
+
# Video segment.
|
423
|
+
# Corresponds to the JSON property `segment`
|
424
|
+
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1VideoSegment]
|
425
|
+
attr_accessor :segment
|
426
|
+
|
427
|
+
def initialize(**args)
|
428
|
+
update!(**args)
|
429
|
+
end
|
430
|
+
|
431
|
+
# Update properties of this object
|
432
|
+
def update!(**args)
|
433
|
+
@segment = args[:segment] if args.key?(:segment)
|
434
|
+
end
|
435
|
+
end
|
436
|
+
|
310
437
|
# Label annotation.
|
311
438
|
class GoogleCloudVideointelligenceV1LabelAnnotation
|
312
439
|
include Google::Apis::Core::Hashable
|
313
440
|
|
314
|
-
# Common categories for the detected entity.
|
315
|
-
#
|
316
|
-
#
|
317
|
-
# also be a `pet`.
|
441
|
+
# Common categories for the detected entity. For example, when the label is `
|
442
|
+
# Terrier`, the category is likely `dog`. And in some cases there might be more
|
443
|
+
# than one categories e.g., `Terrier` could also be a `pet`.
|
318
444
|
# Corresponds to the JSON property `categoryEntities`
|
319
445
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1Entity>]
|
320
446
|
attr_accessor :category_entities
|
@@ -357,44 +483,40 @@ module Google
|
|
357
483
|
class GoogleCloudVideointelligenceV1LabelDetectionConfig
|
358
484
|
include Google::Apis::Core::Hashable
|
359
485
|
|
360
|
-
# The confidence threshold we perform filtering on the labels from
|
361
|
-
#
|
362
|
-
#
|
363
|
-
#
|
364
|
-
#
|
365
|
-
# the default threshold everytime when we release a new model.
|
486
|
+
# The confidence threshold we perform filtering on the labels from frame-level
|
487
|
+
# detection. If not set, it is set to 0.4 by default. The valid range for this
|
488
|
+
# threshold is [0.1, 0.9]. Any value set outside of this range will be clipped.
|
489
|
+
# Note: For best results, follow the default threshold. We will update the
|
490
|
+
# default threshold everytime when we release a new model.
|
366
491
|
# Corresponds to the JSON property `frameConfidenceThreshold`
|
367
492
|
# @return [Float]
|
368
493
|
attr_accessor :frame_confidence_threshold
|
369
494
|
|
370
|
-
# What labels should be detected with LABEL_DETECTION, in addition to
|
371
|
-
#
|
372
|
-
# If unspecified, defaults to `SHOT_MODE`.
|
495
|
+
# What labels should be detected with LABEL_DETECTION, in addition to video-
|
496
|
+
# level labels or segment-level labels. If unspecified, defaults to `SHOT_MODE`.
|
373
497
|
# Corresponds to the JSON property `labelDetectionMode`
|
374
498
|
# @return [String]
|
375
499
|
attr_accessor :label_detection_mode
|
376
500
|
|
377
|
-
# Model to use for label detection.
|
378
|
-
#
|
379
|
-
# "builtin/latest".
|
501
|
+
# Model to use for label detection. Supported values: "builtin/stable" (the
|
502
|
+
# default if unset) and "builtin/latest".
|
380
503
|
# Corresponds to the JSON property `model`
|
381
504
|
# @return [String]
|
382
505
|
attr_accessor :model
|
383
506
|
|
384
|
-
# Whether the video has been shot from a stationary (i.e., non-moving)
|
385
|
-
#
|
386
|
-
#
|
507
|
+
# Whether the video has been shot from a stationary (i.e., non-moving) camera.
|
508
|
+
# When set to true, might improve detection accuracy for moving objects. Should
|
509
|
+
# be used with `SHOT_AND_FRAME_MODE` enabled.
|
387
510
|
# Corresponds to the JSON property `stationaryCamera`
|
388
511
|
# @return [Boolean]
|
389
512
|
attr_accessor :stationary_camera
|
390
513
|
alias_method :stationary_camera?, :stationary_camera
|
391
514
|
|
392
|
-
# The confidence threshold we perform filtering on the labels from
|
393
|
-
#
|
394
|
-
#
|
395
|
-
#
|
396
|
-
#
|
397
|
-
# the default threshold everytime when we release a new model.
|
515
|
+
# The confidence threshold we perform filtering on the labels from video-level
|
516
|
+
# and shot-level detections. If not set, it's set to 0.3 by default. The valid
|
517
|
+
# range for this threshold is [0.1, 0.9]. Any value set outside of this range
|
518
|
+
# will be clipped. Note: For best results, follow the default threshold. We will
|
519
|
+
# update the default threshold everytime when we release a new model.
|
398
520
|
# Corresponds to the JSON property `videoConfidenceThreshold`
|
399
521
|
# @return [Float]
|
400
522
|
attr_accessor :video_confidence_threshold
|
@@ -473,14 +595,14 @@ module Google
|
|
473
595
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1Entity]
|
474
596
|
attr_accessor :entity
|
475
597
|
|
476
|
-
# All video segments where the recognized logo appears. There might be
|
477
|
-
#
|
598
|
+
# All video segments where the recognized logo appears. There might be multiple
|
599
|
+
# instances of the same logo class appearing in one VideoSegment.
|
478
600
|
# Corresponds to the JSON property `segments`
|
479
601
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1VideoSegment>]
|
480
602
|
attr_accessor :segments
|
481
603
|
|
482
|
-
# All logo tracks where the recognized logo appears. Each track corresponds
|
483
|
-
#
|
604
|
+
# All logo tracks where the recognized logo appears. Each track corresponds to
|
605
|
+
# one logo instance appearing in consecutive frames.
|
484
606
|
# Corresponds to the JSON property `tracks`
|
485
607
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1Track>]
|
486
608
|
attr_accessor :tracks
|
@@ -497,9 +619,8 @@ module Google
|
|
497
619
|
end
|
498
620
|
end
|
499
621
|
|
500
|
-
# Normalized bounding box.
|
501
|
-
#
|
502
|
-
# Range: [0, 1].
|
622
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
623
|
+
# original image. Range: [0, 1].
|
503
624
|
class GoogleCloudVideointelligenceV1NormalizedBoundingBox
|
504
625
|
include Google::Apis::Core::Hashable
|
505
626
|
|
@@ -537,20 +658,12 @@ module Google
|
|
537
658
|
end
|
538
659
|
|
539
660
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
540
|
-
# Contains list of the corner points in clockwise order starting from
|
541
|
-
#
|
542
|
-
#
|
543
|
-
#
|
544
|
-
#
|
545
|
-
#
|
546
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
547
|
-
# becomes:
|
548
|
-
# 2----3
|
549
|
-
# | |
|
550
|
-
# 1----0
|
551
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
552
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
553
|
-
# the box.
|
661
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
662
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
663
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
664
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
665
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
666
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
554
667
|
class GoogleCloudVideointelligenceV1NormalizedBoundingPoly
|
555
668
|
include Google::Apis::Core::Hashable
|
556
669
|
|
@@ -569,9 +682,8 @@ module Google
|
|
569
682
|
end
|
570
683
|
end
|
571
684
|
|
572
|
-
# A vertex represents a 2D point in the image.
|
573
|
-
#
|
574
|
-
# and range from 0 to 1.
|
685
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
686
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
575
687
|
class GoogleCloudVideointelligenceV1NormalizedVertex
|
576
688
|
include Google::Apis::Core::Hashable
|
577
689
|
|
@@ -610,10 +722,10 @@ module Google
|
|
610
722
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1Entity]
|
611
723
|
attr_accessor :entity
|
612
724
|
|
613
|
-
# Information corresponding to all frames where this object track appears.
|
614
|
-
#
|
615
|
-
#
|
616
|
-
#
|
725
|
+
# Information corresponding to all frames where this object track appears. Non-
|
726
|
+
# streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
|
727
|
+
# in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
|
728
|
+
# frames.
|
617
729
|
# Corresponds to the JSON property `frames`
|
618
730
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1ObjectTrackingFrame>]
|
619
731
|
attr_accessor :frames
|
@@ -623,12 +735,11 @@ module Google
|
|
623
735
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1VideoSegment]
|
624
736
|
attr_accessor :segment
|
625
737
|
|
626
|
-
# Streaming mode ONLY.
|
627
|
-
#
|
628
|
-
#
|
629
|
-
#
|
630
|
-
#
|
631
|
-
# ObjectTrackAnnotation of the same track_id over time.
|
738
|
+
# Streaming mode ONLY. In streaming mode, we do not know the end time of a
|
739
|
+
# tracked object before it is completed. Hence, there is no VideoSegment info
|
740
|
+
# returned. Instead, we provide a unique identifiable integer track_id so that
|
741
|
+
# the customers can correlate the results of the ongoing ObjectTrackAnnotation
|
742
|
+
# of the same track_id over time.
|
632
743
|
# Corresponds to the JSON property `trackId`
|
633
744
|
# @return [Fixnum]
|
634
745
|
attr_accessor :track_id
|
@@ -657,9 +768,8 @@ module Google
|
|
657
768
|
class GoogleCloudVideointelligenceV1ObjectTrackingConfig
|
658
769
|
include Google::Apis::Core::Hashable
|
659
770
|
|
660
|
-
# Model to use for object tracking.
|
661
|
-
#
|
662
|
-
# "builtin/latest".
|
771
|
+
# Model to use for object tracking. Supported values: "builtin/stable" (the
|
772
|
+
# default if unset) and "builtin/latest".
|
663
773
|
# Corresponds to the JSON property `model`
|
664
774
|
# @return [String]
|
665
775
|
attr_accessor :model
|
@@ -679,9 +789,8 @@ module Google
|
|
679
789
|
class GoogleCloudVideointelligenceV1ObjectTrackingFrame
|
680
790
|
include Google::Apis::Core::Hashable
|
681
791
|
|
682
|
-
# Normalized bounding box.
|
683
|
-
#
|
684
|
-
# Range: [0, 1].
|
792
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
793
|
+
# original image. Range: [0, 1].
|
685
794
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
686
795
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1NormalizedBoundingBox]
|
687
796
|
attr_accessor :normalized_bounding_box
|
@@ -702,13 +811,74 @@ module Google
|
|
702
811
|
end
|
703
812
|
end
|
704
813
|
|
814
|
+
# Person detection annotation per video.
|
815
|
+
class GoogleCloudVideointelligenceV1PersonDetectionAnnotation
|
816
|
+
include Google::Apis::Core::Hashable
|
817
|
+
|
818
|
+
# The detected tracks of a person.
|
819
|
+
# Corresponds to the JSON property `tracks`
|
820
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1Track>]
|
821
|
+
attr_accessor :tracks
|
822
|
+
|
823
|
+
# Feature version.
|
824
|
+
# Corresponds to the JSON property `version`
|
825
|
+
# @return [String]
|
826
|
+
attr_accessor :version
|
827
|
+
|
828
|
+
def initialize(**args)
|
829
|
+
update!(**args)
|
830
|
+
end
|
831
|
+
|
832
|
+
# Update properties of this object
|
833
|
+
def update!(**args)
|
834
|
+
@tracks = args[:tracks] if args.key?(:tracks)
|
835
|
+
@version = args[:version] if args.key?(:version)
|
836
|
+
end
|
837
|
+
end
|
838
|
+
|
839
|
+
# Config for PERSON_DETECTION.
|
840
|
+
class GoogleCloudVideointelligenceV1PersonDetectionConfig
|
841
|
+
include Google::Apis::Core::Hashable
|
842
|
+
|
843
|
+
# Whether to enable person attributes detection, such as cloth color (black,
|
844
|
+
# blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair, etc.
|
845
|
+
# Ignored if 'include_bounding_boxes' is set to false.
|
846
|
+
# Corresponds to the JSON property `includeAttributes`
|
847
|
+
# @return [Boolean]
|
848
|
+
attr_accessor :include_attributes
|
849
|
+
alias_method :include_attributes?, :include_attributes
|
850
|
+
|
851
|
+
# Whether bounding boxes are included in the person detection annotation output.
|
852
|
+
# Corresponds to the JSON property `includeBoundingBoxes`
|
853
|
+
# @return [Boolean]
|
854
|
+
attr_accessor :include_bounding_boxes
|
855
|
+
alias_method :include_bounding_boxes?, :include_bounding_boxes
|
856
|
+
|
857
|
+
# Whether to enable pose landmarks detection. Ignored if 'include_bounding_boxes'
|
858
|
+
# is set to false.
|
859
|
+
# Corresponds to the JSON property `includePoseLandmarks`
|
860
|
+
# @return [Boolean]
|
861
|
+
attr_accessor :include_pose_landmarks
|
862
|
+
alias_method :include_pose_landmarks?, :include_pose_landmarks
|
863
|
+
|
864
|
+
def initialize(**args)
|
865
|
+
update!(**args)
|
866
|
+
end
|
867
|
+
|
868
|
+
# Update properties of this object
|
869
|
+
def update!(**args)
|
870
|
+
@include_attributes = args[:include_attributes] if args.key?(:include_attributes)
|
871
|
+
@include_bounding_boxes = args[:include_bounding_boxes] if args.key?(:include_bounding_boxes)
|
872
|
+
@include_pose_landmarks = args[:include_pose_landmarks] if args.key?(:include_pose_landmarks)
|
873
|
+
end
|
874
|
+
end
|
875
|
+
|
705
876
|
# Config for SHOT_CHANGE_DETECTION.
|
706
877
|
class GoogleCloudVideointelligenceV1ShotChangeDetectionConfig
|
707
878
|
include Google::Apis::Core::Hashable
|
708
879
|
|
709
|
-
# Model to use for shot change detection.
|
710
|
-
#
|
711
|
-
# "builtin/latest".
|
880
|
+
# Model to use for shot change detection. Supported values: "builtin/stable" (
|
881
|
+
# the default if unset) and "builtin/latest".
|
712
882
|
# Corresponds to the JSON property `model`
|
713
883
|
# @return [String]
|
714
884
|
attr_accessor :model
|
@@ -728,12 +898,12 @@ module Google
|
|
728
898
|
class GoogleCloudVideointelligenceV1SpeechContext
|
729
899
|
include Google::Apis::Core::Hashable
|
730
900
|
|
731
|
-
# Optional. A list of strings containing words and phrases "hints" so that
|
732
|
-
#
|
733
|
-
#
|
734
|
-
#
|
735
|
-
#
|
736
|
-
#
|
901
|
+
# Optional. A list of strings containing words and phrases "hints" so that the
|
902
|
+
# speech recognition is more likely to recognize them. This can be used to
|
903
|
+
# improve the accuracy for specific words and phrases, for example, if specific
|
904
|
+
# commands are typically spoken by the user. This can also be used to add
|
905
|
+
# additional words to the vocabulary of the recognizer. See [usage limits](https:
|
906
|
+
# //cloud.google.com/speech/limits#content).
|
737
907
|
# Corresponds to the JSON property `phrases`
|
738
908
|
# @return [Array<String>]
|
739
909
|
attr_accessor :phrases
|
@@ -754,10 +924,10 @@ module Google
|
|
754
924
|
|
755
925
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
756
926
|
# indicates an estimated greater likelihood that the recognized words are
|
757
|
-
# correct. This field is set only for the top alternative.
|
758
|
-
#
|
759
|
-
#
|
760
|
-
#
|
927
|
+
# correct. This field is set only for the top alternative. This field is not
|
928
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
929
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
930
|
+
# not set.
|
761
931
|
# Corresponds to the JSON property `confidence`
|
762
932
|
# @return [Float]
|
763
933
|
attr_accessor :confidence
|
@@ -768,8 +938,8 @@ module Google
|
|
768
938
|
attr_accessor :transcript
|
769
939
|
|
770
940
|
# Output only. A list of word-specific information for each recognized word.
|
771
|
-
# Note: When `enable_speaker_diarization` is set to true, you will see all
|
772
|
-
#
|
941
|
+
# Note: When `enable_speaker_diarization` is set to true, you will see all the
|
942
|
+
# words from the beginning of the audio.
|
773
943
|
# Corresponds to the JSON property `words`
|
774
944
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1WordInfo>]
|
775
945
|
attr_accessor :words
|
@@ -790,18 +960,17 @@ module Google
|
|
790
960
|
class GoogleCloudVideointelligenceV1SpeechTranscription
|
791
961
|
include Google::Apis::Core::Hashable
|
792
962
|
|
793
|
-
# May contain one or more recognition hypotheses (up to the maximum specified
|
794
|
-
#
|
795
|
-
#
|
796
|
-
#
|
963
|
+
# May contain one or more recognition hypotheses (up to the maximum specified in
|
964
|
+
# `max_alternatives`). These alternatives are ordered in terms of accuracy, with
|
965
|
+
# the top (first) alternative being the most probable, as ranked by the
|
966
|
+
# recognizer.
|
797
967
|
# Corresponds to the JSON property `alternatives`
|
798
968
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1SpeechRecognitionAlternative>]
|
799
969
|
attr_accessor :alternatives
|
800
970
|
|
801
971
|
# Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
|
802
|
-
# language tag of
|
803
|
-
#
|
804
|
-
# most likelihood of being spoken in the audio.
|
972
|
+
# language tag of the language in this result. This language code was detected
|
973
|
+
# to have the most likelihood of being spoken in the audio.
|
805
974
|
# Corresponds to the JSON property `languageCode`
|
806
975
|
# @return [String]
|
807
976
|
attr_accessor :language_code
|
@@ -828,66 +997,62 @@ module Google
|
|
828
997
|
attr_accessor :audio_tracks
|
829
998
|
|
830
999
|
# Optional. If set, specifies the estimated number of speakers in the
|
831
|
-
# conversation.
|
832
|
-
#
|
833
|
-
# Ignored unless enable_speaker_diarization is set to true.
|
1000
|
+
# conversation. If not set, defaults to '2'. Ignored unless
|
1001
|
+
# enable_speaker_diarization is set to true.
|
834
1002
|
# Corresponds to the JSON property `diarizationSpeakerCount`
|
835
1003
|
# @return [Fixnum]
|
836
1004
|
attr_accessor :diarization_speaker_count
|
837
1005
|
|
838
|
-
# Optional. If 'true', adds punctuation to recognition result hypotheses.
|
839
|
-
#
|
840
|
-
#
|
841
|
-
#
|
842
|
-
#
|
843
|
-
#
|
1006
|
+
# Optional. If 'true', adds punctuation to recognition result hypotheses. This
|
1007
|
+
# feature is only available in select languages. Setting this for requests in
|
1008
|
+
# other languages has no effect at all. The default 'false' value does not add
|
1009
|
+
# punctuation to result hypotheses. NOTE: "This is currently offered as an
|
1010
|
+
# experimental service, complimentary to all users. In the future this may be
|
1011
|
+
# exclusively available as a premium feature."
|
844
1012
|
# Corresponds to the JSON property `enableAutomaticPunctuation`
|
845
1013
|
# @return [Boolean]
|
846
1014
|
attr_accessor :enable_automatic_punctuation
|
847
1015
|
alias_method :enable_automatic_punctuation?, :enable_automatic_punctuation
|
848
1016
|
|
849
|
-
# Optional. If 'true', enables speaker detection for each recognized word in
|
850
|
-
#
|
851
|
-
#
|
852
|
-
#
|
853
|
-
#
|
854
|
-
#
|
855
|
-
# identify the speakers in the conversation over time.
|
1017
|
+
# Optional. If 'true', enables speaker detection for each recognized word in the
|
1018
|
+
# top alternative of the recognition result using a speaker_tag provided in the
|
1019
|
+
# WordInfo. Note: When this is true, we send all the words from the beginning of
|
1020
|
+
# the audio for the top alternative in every consecutive response. This is done
|
1021
|
+
# in order to improve our speaker tags as our models learn to identify the
|
1022
|
+
# speakers in the conversation over time.
|
856
1023
|
# Corresponds to the JSON property `enableSpeakerDiarization`
|
857
1024
|
# @return [Boolean]
|
858
1025
|
attr_accessor :enable_speaker_diarization
|
859
1026
|
alias_method :enable_speaker_diarization?, :enable_speaker_diarization
|
860
1027
|
|
861
1028
|
# Optional. If `true`, the top result includes a list of words and the
|
862
|
-
# confidence for those words. If `false`, no word-level confidence
|
863
|
-
#
|
1029
|
+
# confidence for those words. If `false`, no word-level confidence information
|
1030
|
+
# is returned. The default is `false`.
|
864
1031
|
# Corresponds to the JSON property `enableWordConfidence`
|
865
1032
|
# @return [Boolean]
|
866
1033
|
attr_accessor :enable_word_confidence
|
867
1034
|
alias_method :enable_word_confidence?, :enable_word_confidence
|
868
1035
|
|
869
|
-
# Optional. If set to `true`, the server will attempt to filter out
|
870
|
-
#
|
871
|
-
#
|
872
|
-
# won't be filtered out.
|
1036
|
+
# Optional. If set to `true`, the server will attempt to filter out profanities,
|
1037
|
+
# replacing all but the initial character in each filtered word with asterisks,
|
1038
|
+
# e.g. "f***". If set to `false` or omitted, profanities won't be filtered out.
|
873
1039
|
# Corresponds to the JSON property `filterProfanity`
|
874
1040
|
# @return [Boolean]
|
875
1041
|
attr_accessor :filter_profanity
|
876
1042
|
alias_method :filter_profanity?, :filter_profanity
|
877
1043
|
|
878
|
-
# Required. *Required* The language of the supplied audio as a
|
879
|
-
#
|
880
|
-
#
|
881
|
-
#
|
882
|
-
# for a list of the currently supported language codes.
|
1044
|
+
# Required. *Required* The language of the supplied audio as a [BCP-47](https://
|
1045
|
+
# www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. Example: "en-US". See [
|
1046
|
+
# Language Support](https://cloud.google.com/speech/docs/languages) for a list
|
1047
|
+
# of the currently supported language codes.
|
883
1048
|
# Corresponds to the JSON property `languageCode`
|
884
1049
|
# @return [String]
|
885
1050
|
attr_accessor :language_code
|
886
1051
|
|
887
1052
|
# Optional. Maximum number of recognition hypotheses to be returned.
|
888
1053
|
# Specifically, the maximum number of `SpeechRecognitionAlternative` messages
|
889
|
-
# within each `SpeechTranscription`. The server may return fewer than
|
890
|
-
#
|
1054
|
+
# within each `SpeechTranscription`. The server may return fewer than `
|
1055
|
+
# max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
|
891
1056
|
# return a maximum of one. If omitted, will return a maximum of one.
|
892
1057
|
# Corresponds to the JSON property `maxAlternatives`
|
893
1058
|
# @return [Fixnum]
|
@@ -954,16 +1119,15 @@ module Google
|
|
954
1119
|
include Google::Apis::Core::Hashable
|
955
1120
|
|
956
1121
|
# Language hint can be specified if the language to be detected is known a
|
957
|
-
# priori. It can increase the accuracy of the detection. Language hint must
|
958
|
-
#
|
959
|
-
#
|
1122
|
+
# priori. It can increase the accuracy of the detection. Language hint must be
|
1123
|
+
# language code in BCP-47 format. Automatic language detection is performed if
|
1124
|
+
# no hint is provided.
|
960
1125
|
# Corresponds to the JSON property `languageHints`
|
961
1126
|
# @return [Array<String>]
|
962
1127
|
attr_accessor :language_hints
|
963
1128
|
|
964
|
-
# Model to use for text detection.
|
965
|
-
#
|
966
|
-
# "builtin/latest".
|
1129
|
+
# Model to use for text detection. Supported values: "builtin/stable" (the
|
1130
|
+
# default if unset) and "builtin/latest".
|
967
1131
|
# Corresponds to the JSON property `model`
|
968
1132
|
# @return [String]
|
969
1133
|
attr_accessor :model
|
@@ -979,27 +1143,19 @@ module Google
|
|
979
1143
|
end
|
980
1144
|
end
|
981
1145
|
|
982
|
-
# Video frame level annotation results for text annotation (OCR).
|
983
|
-
#
|
984
|
-
#
|
1146
|
+
# Video frame level annotation results for text annotation (OCR). Contains
|
1147
|
+
# information regarding timestamp and bounding box locations for the frames
|
1148
|
+
# containing detected OCR text snippets.
|
985
1149
|
class GoogleCloudVideointelligenceV1TextFrame
|
986
1150
|
include Google::Apis::Core::Hashable
|
987
1151
|
|
988
1152
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
989
|
-
# Contains list of the corner points in clockwise order starting from
|
990
|
-
#
|
991
|
-
#
|
992
|
-
#
|
993
|
-
#
|
994
|
-
#
|
995
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
996
|
-
# becomes:
|
997
|
-
# 2----3
|
998
|
-
# | |
|
999
|
-
# 1----0
|
1000
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
1001
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
1002
|
-
# the box.
|
1153
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
1154
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
1155
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
1156
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
1157
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
1158
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
1003
1159
|
# Corresponds to the JSON property `rotatedBoundingBox`
|
1004
1160
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1NormalizedBoundingPoly]
|
1005
1161
|
attr_accessor :rotated_bounding_box
|
@@ -1052,9 +1208,8 @@ module Google
|
|
1052
1208
|
end
|
1053
1209
|
end
|
1054
1210
|
|
1055
|
-
# For tracking related features.
|
1056
|
-
#
|
1057
|
-
# normalized_bounding_box.
|
1211
|
+
# For tracking related features. An object at time_offset with attributes, and
|
1212
|
+
# located with normalized_bounding_box.
|
1058
1213
|
class GoogleCloudVideointelligenceV1TimestampedObject
|
1059
1214
|
include Google::Apis::Core::Hashable
|
1060
1215
|
|
@@ -1068,15 +1223,14 @@ module Google
|
|
1068
1223
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1DetectedLandmark>]
|
1069
1224
|
attr_accessor :landmarks
|
1070
1225
|
|
1071
|
-
# Normalized bounding box.
|
1072
|
-
#
|
1073
|
-
# Range: [0, 1].
|
1226
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
1227
|
+
# original image. Range: [0, 1].
|
1074
1228
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
1075
1229
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1NormalizedBoundingBox]
|
1076
1230
|
attr_accessor :normalized_bounding_box
|
1077
1231
|
|
1078
|
-
# Time-offset, relative to the beginning of the video,
|
1079
|
-
#
|
1232
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
1233
|
+
# video frame for this object.
|
1080
1234
|
# Corresponds to the JSON property `timeOffset`
|
1081
1235
|
# @return [String]
|
1082
1236
|
attr_accessor :time_offset
|
@@ -1135,20 +1289,19 @@ module Google
|
|
1135
1289
|
class GoogleCloudVideointelligenceV1VideoAnnotationProgress
|
1136
1290
|
include Google::Apis::Core::Hashable
|
1137
1291
|
|
1138
|
-
# Specifies which feature is being tracked if the request contains more than
|
1139
|
-
#
|
1292
|
+
# Specifies which feature is being tracked if the request contains more than one
|
1293
|
+
# feature.
|
1140
1294
|
# Corresponds to the JSON property `feature`
|
1141
1295
|
# @return [String]
|
1142
1296
|
attr_accessor :feature
|
1143
1297
|
|
1144
|
-
# Video file location in
|
1145
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
1298
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
1146
1299
|
# Corresponds to the JSON property `inputUri`
|
1147
1300
|
# @return [String]
|
1148
1301
|
attr_accessor :input_uri
|
1149
1302
|
|
1150
|
-
# Approximate percentage processed thus far. Guaranteed to be
|
1151
|
-
#
|
1303
|
+
# Approximate percentage processed thus far. Guaranteed to be 100 when fully
|
1304
|
+
# processed.
|
1152
1305
|
# Corresponds to the JSON property `progressPercent`
|
1153
1306
|
# @return [Fixnum]
|
1154
1307
|
attr_accessor :progress_percent
|
@@ -1187,31 +1340,40 @@ module Google
|
|
1187
1340
|
class GoogleCloudVideointelligenceV1VideoAnnotationResults
|
1188
1341
|
include Google::Apis::Core::Hashable
|
1189
1342
|
|
1190
|
-
# The `Status` type defines a logical error model that is suitable for
|
1191
|
-
#
|
1192
|
-
#
|
1193
|
-
#
|
1194
|
-
#
|
1195
|
-
#
|
1343
|
+
# The `Status` type defines a logical error model that is suitable for different
|
1344
|
+
# programming environments, including REST APIs and RPC APIs. It is used by [
|
1345
|
+
# gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
|
1346
|
+
# data: error code, error message, and error details. You can find out more
|
1347
|
+
# about this error model and how to work with it in the [API Design Guide](https:
|
1348
|
+
# //cloud.google.com/apis/design/errors).
|
1196
1349
|
# Corresponds to the JSON property `error`
|
1197
1350
|
# @return [Google::Apis::VideointelligenceV1::GoogleRpcStatus]
|
1198
1351
|
attr_accessor :error
|
1199
1352
|
|
1200
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
1201
|
-
#
|
1202
|
-
#
|
1353
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
1354
|
+
# explicit content has been detected in a frame, no annotations are present for
|
1355
|
+
# that frame.
|
1203
1356
|
# Corresponds to the JSON property `explicitAnnotation`
|
1204
1357
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1ExplicitContentAnnotation]
|
1205
1358
|
attr_accessor :explicit_annotation
|
1206
1359
|
|
1207
|
-
#
|
1208
|
-
#
|
1360
|
+
# Deprecated. Please use `face_detection_annotations` instead.
|
1361
|
+
# Corresponds to the JSON property `faceAnnotations`
|
1362
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1FaceAnnotation>]
|
1363
|
+
attr_accessor :face_annotations
|
1364
|
+
|
1365
|
+
# Face detection annotations.
|
1366
|
+
# Corresponds to the JSON property `faceDetectionAnnotations`
|
1367
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1FaceDetectionAnnotation>]
|
1368
|
+
attr_accessor :face_detection_annotations
|
1369
|
+
|
1370
|
+
# Label annotations on frame level. There is exactly one element for each unique
|
1371
|
+
# label.
|
1209
1372
|
# Corresponds to the JSON property `frameLabelAnnotations`
|
1210
1373
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1LabelAnnotation>]
|
1211
1374
|
attr_accessor :frame_label_annotations
|
1212
1375
|
|
1213
|
-
# Video file location in
|
1214
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
1376
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
1215
1377
|
# Corresponds to the JSON property `inputUri`
|
1216
1378
|
# @return [String]
|
1217
1379
|
attr_accessor :input_uri
|
@@ -1226,6 +1388,11 @@ module Google
|
|
1226
1388
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1ObjectTrackingAnnotation>]
|
1227
1389
|
attr_accessor :object_annotations
|
1228
1390
|
|
1391
|
+
# Person detection annotations.
|
1392
|
+
# Corresponds to the JSON property `personDetectionAnnotations`
|
1393
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1PersonDetectionAnnotation>]
|
1394
|
+
attr_accessor :person_detection_annotations
|
1395
|
+
|
1229
1396
|
# Video segment.
|
1230
1397
|
# Corresponds to the JSON property `segment`
|
1231
1398
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1VideoSegment]
|
@@ -1238,11 +1405,11 @@ module Google
|
|
1238
1405
|
attr_accessor :segment_label_annotations
|
1239
1406
|
|
1240
1407
|
# Presence label annotations on video level or user-specified segment level.
|
1241
|
-
# There is exactly one element for each unique label. Compared to the
|
1242
|
-
#
|
1243
|
-
#
|
1244
|
-
#
|
1245
|
-
#
|
1408
|
+
# There is exactly one element for each unique label. Compared to the existing
|
1409
|
+
# topical `segment_label_annotations`, this field presents more fine-grained,
|
1410
|
+
# segment-level labels detected in video content and is made available only when
|
1411
|
+
# the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
|
1412
|
+
# request.
|
1246
1413
|
# Corresponds to the JSON property `segmentPresenceLabelAnnotations`
|
1247
1414
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1LabelAnnotation>]
|
1248
1415
|
attr_accessor :segment_presence_label_annotations
|
@@ -1252,17 +1419,17 @@ module Google
|
|
1252
1419
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1VideoSegment>]
|
1253
1420
|
attr_accessor :shot_annotations
|
1254
1421
|
|
1255
|
-
# Topical label annotations on shot level.
|
1256
|
-
#
|
1422
|
+
# Topical label annotations on shot level. There is exactly one element for each
|
1423
|
+
# unique label.
|
1257
1424
|
# Corresponds to the JSON property `shotLabelAnnotations`
|
1258
1425
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1LabelAnnotation>]
|
1259
1426
|
attr_accessor :shot_label_annotations
|
1260
1427
|
|
1261
1428
|
# Presence label annotations on shot level. There is exactly one element for
|
1262
|
-
# each unique label. Compared to the existing topical
|
1263
|
-
#
|
1264
|
-
#
|
1265
|
-
#
|
1429
|
+
# each unique label. Compared to the existing topical `shot_label_annotations`,
|
1430
|
+
# this field presents more fine-grained, shot-level labels detected in video
|
1431
|
+
# content and is made available only when the client sets `LabelDetectionConfig.
|
1432
|
+
# model` to "builtin/latest" in the request.
|
1266
1433
|
# Corresponds to the JSON property `shotPresenceLabelAnnotations`
|
1267
1434
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1LabelAnnotation>]
|
1268
1435
|
attr_accessor :shot_presence_label_annotations
|
@@ -1272,9 +1439,8 @@ module Google
|
|
1272
1439
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1SpeechTranscription>]
|
1273
1440
|
attr_accessor :speech_transcriptions
|
1274
1441
|
|
1275
|
-
# OCR text detection and tracking.
|
1276
|
-
#
|
1277
|
-
# frame information associated with it.
|
1442
|
+
# OCR text detection and tracking. Annotations for list of detected text
|
1443
|
+
# snippets. Each will have list of frame information associated with it.
|
1278
1444
|
# Corresponds to the JSON property `textAnnotations`
|
1279
1445
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1TextAnnotation>]
|
1280
1446
|
attr_accessor :text_annotations
|
@@ -1287,10 +1453,13 @@ module Google
|
|
1287
1453
|
def update!(**args)
|
1288
1454
|
@error = args[:error] if args.key?(:error)
|
1289
1455
|
@explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
|
1456
|
+
@face_annotations = args[:face_annotations] if args.key?(:face_annotations)
|
1457
|
+
@face_detection_annotations = args[:face_detection_annotations] if args.key?(:face_detection_annotations)
|
1290
1458
|
@frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
|
1291
1459
|
@input_uri = args[:input_uri] if args.key?(:input_uri)
|
1292
1460
|
@logo_recognition_annotations = args[:logo_recognition_annotations] if args.key?(:logo_recognition_annotations)
|
1293
1461
|
@object_annotations = args[:object_annotations] if args.key?(:object_annotations)
|
1462
|
+
@person_detection_annotations = args[:person_detection_annotations] if args.key?(:person_detection_annotations)
|
1294
1463
|
@segment = args[:segment] if args.key?(:segment)
|
1295
1464
|
@segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations)
|
1296
1465
|
@segment_presence_label_annotations = args[:segment_presence_label_annotations] if args.key?(:segment_presence_label_annotations)
|
@@ -1311,6 +1480,11 @@ module Google
|
|
1311
1480
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1ExplicitContentDetectionConfig]
|
1312
1481
|
attr_accessor :explicit_content_detection_config
|
1313
1482
|
|
1483
|
+
# Config for FACE_DETECTION.
|
1484
|
+
# Corresponds to the JSON property `faceDetectionConfig`
|
1485
|
+
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1FaceDetectionConfig]
|
1486
|
+
attr_accessor :face_detection_config
|
1487
|
+
|
1314
1488
|
# Config for LABEL_DETECTION.
|
1315
1489
|
# Corresponds to the JSON property `labelDetectionConfig`
|
1316
1490
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1LabelDetectionConfig]
|
@@ -1321,9 +1495,14 @@ module Google
|
|
1321
1495
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1ObjectTrackingConfig]
|
1322
1496
|
attr_accessor :object_tracking_config
|
1323
1497
|
|
1324
|
-
#
|
1325
|
-
# to
|
1326
|
-
#
|
1498
|
+
# Config for PERSON_DETECTION.
|
1499
|
+
# Corresponds to the JSON property `personDetectionConfig`
|
1500
|
+
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1PersonDetectionConfig]
|
1501
|
+
attr_accessor :person_detection_config
|
1502
|
+
|
1503
|
+
# Video segments to annotate. The segments may overlap and are not required to
|
1504
|
+
# be contiguous or span the whole video. If unspecified, each video is treated
|
1505
|
+
# as a single segment.
|
1327
1506
|
# Corresponds to the JSON property `segments`
|
1328
1507
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1VideoSegment>]
|
1329
1508
|
attr_accessor :segments
|
@@ -1350,8 +1529,10 @@ module Google
|
|
1350
1529
|
# Update properties of this object
|
1351
1530
|
def update!(**args)
|
1352
1531
|
@explicit_content_detection_config = args[:explicit_content_detection_config] if args.key?(:explicit_content_detection_config)
|
1532
|
+
@face_detection_config = args[:face_detection_config] if args.key?(:face_detection_config)
|
1353
1533
|
@label_detection_config = args[:label_detection_config] if args.key?(:label_detection_config)
|
1354
1534
|
@object_tracking_config = args[:object_tracking_config] if args.key?(:object_tracking_config)
|
1535
|
+
@person_detection_config = args[:person_detection_config] if args.key?(:person_detection_config)
|
1355
1536
|
@segments = args[:segments] if args.key?(:segments)
|
1356
1537
|
@shot_change_detection_config = args[:shot_change_detection_config] if args.key?(:shot_change_detection_config)
|
1357
1538
|
@speech_transcription_config = args[:speech_transcription_config] if args.key?(:speech_transcription_config)
|
@@ -1363,14 +1544,14 @@ module Google
|
|
1363
1544
|
class GoogleCloudVideointelligenceV1VideoSegment
|
1364
1545
|
include Google::Apis::Core::Hashable
|
1365
1546
|
|
1366
|
-
# Time-offset, relative to the beginning of the video,
|
1367
|
-
#
|
1547
|
+
# Time-offset, relative to the beginning of the video, corresponding to the end
|
1548
|
+
# of the segment (inclusive).
|
1368
1549
|
# Corresponds to the JSON property `endTimeOffset`
|
1369
1550
|
# @return [String]
|
1370
1551
|
attr_accessor :end_time_offset
|
1371
1552
|
|
1372
|
-
# Time-offset, relative to the beginning of the video,
|
1373
|
-
#
|
1553
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
1554
|
+
# start of the segment (inclusive).
|
1374
1555
|
# Corresponds to the JSON property `startTimeOffset`
|
1375
1556
|
# @return [String]
|
1376
1557
|
attr_accessor :start_time_offset
|
@@ -1387,41 +1568,41 @@ module Google
|
|
1387
1568
|
end
|
1388
1569
|
|
1389
1570
|
# Word-specific information for recognized words. Word information is only
|
1390
|
-
# included in the response when certain request parameters are set, such
|
1391
|
-
#
|
1571
|
+
# included in the response when certain request parameters are set, such as `
|
1572
|
+
# enable_word_time_offsets`.
|
1392
1573
|
class GoogleCloudVideointelligenceV1WordInfo
|
1393
1574
|
include Google::Apis::Core::Hashable
|
1394
1575
|
|
1395
1576
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
1396
1577
|
# indicates an estimated greater likelihood that the recognized words are
|
1397
|
-
# correct. This field is set only for the top alternative.
|
1398
|
-
#
|
1399
|
-
#
|
1400
|
-
#
|
1578
|
+
# correct. This field is set only for the top alternative. This field is not
|
1579
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
1580
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
1581
|
+
# not set.
|
1401
1582
|
# Corresponds to the JSON property `confidence`
|
1402
1583
|
# @return [Float]
|
1403
1584
|
attr_accessor :confidence
|
1404
1585
|
|
1405
|
-
# Time offset relative to the beginning of the audio, and
|
1406
|
-
#
|
1407
|
-
#
|
1408
|
-
#
|
1586
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
1587
|
+
# end of the spoken word. This field is only set if `enable_word_time_offsets=
|
1588
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
1589
|
+
# accuracy of the time offset can vary.
|
1409
1590
|
# Corresponds to the JSON property `endTime`
|
1410
1591
|
# @return [String]
|
1411
1592
|
attr_accessor :end_time
|
1412
1593
|
|
1413
|
-
# Output only. A distinct integer value is assigned for every speaker within
|
1414
|
-
#
|
1415
|
-
#
|
1416
|
-
#
|
1594
|
+
# Output only. A distinct integer value is assigned for every speaker within the
|
1595
|
+
# audio. This field specifies which one of those speakers was detected to have
|
1596
|
+
# spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
|
1597
|
+
# only set if speaker diarization is enabled.
|
1417
1598
|
# Corresponds to the JSON property `speakerTag`
|
1418
1599
|
# @return [Fixnum]
|
1419
1600
|
attr_accessor :speaker_tag
|
1420
1601
|
|
1421
|
-
# Time offset relative to the beginning of the audio, and
|
1422
|
-
#
|
1423
|
-
#
|
1424
|
-
#
|
1602
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
1603
|
+
# start of the spoken word. This field is only set if `enable_word_time_offsets=
|
1604
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
1605
|
+
# accuracy of the time offset can vary.
|
1425
1606
|
# Corresponds to the JSON property `startTime`
|
1426
1607
|
# @return [String]
|
1427
1608
|
attr_accessor :start_time
|
@@ -1445,9 +1626,9 @@ module Google
|
|
1445
1626
|
end
|
1446
1627
|
end
|
1447
1628
|
|
1448
|
-
# Video annotation progress. Included in the `metadata`
|
1449
|
-
#
|
1450
|
-
#
|
1629
|
+
# Video annotation progress. Included in the `metadata` field of the `Operation`
|
1630
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
1631
|
+
# service.
|
1451
1632
|
class GoogleCloudVideointelligenceV1beta2AnnotateVideoProgress
|
1452
1633
|
include Google::Apis::Core::Hashable
|
1453
1634
|
|
@@ -1466,9 +1647,9 @@ module Google
|
|
1466
1647
|
end
|
1467
1648
|
end
|
1468
1649
|
|
1469
|
-
# Video annotation response. Included in the `response`
|
1470
|
-
#
|
1471
|
-
#
|
1650
|
+
# Video annotation response. Included in the `response` field of the `Operation`
|
1651
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
1652
|
+
# service.
|
1472
1653
|
class GoogleCloudVideointelligenceV1beta2AnnotateVideoResponse
|
1473
1654
|
include Google::Apis::Core::Hashable
|
1474
1655
|
|
@@ -1496,14 +1677,14 @@ module Google
|
|
1496
1677
|
# @return [Float]
|
1497
1678
|
attr_accessor :confidence
|
1498
1679
|
|
1499
|
-
# The name of the attribute, for example, glasses, dark_glasses, mouth_open.
|
1500
|
-
#
|
1680
|
+
# The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
|
1681
|
+
# full list of supported type names will be provided in the document.
|
1501
1682
|
# Corresponds to the JSON property `name`
|
1502
1683
|
# @return [String]
|
1503
1684
|
attr_accessor :name
|
1504
1685
|
|
1505
|
-
# Text value of the detection result. For example, the value for "HairColor"
|
1506
|
-
#
|
1686
|
+
# Text value of the detection result. For example, the value for "HairColor" can
|
1687
|
+
# be "black", "blonde", etc.
|
1507
1688
|
# Corresponds to the JSON property `value`
|
1508
1689
|
# @return [String]
|
1509
1690
|
attr_accessor :value
|
@@ -1535,9 +1716,8 @@ module Google
|
|
1535
1716
|
# @return [String]
|
1536
1717
|
attr_accessor :name
|
1537
1718
|
|
1538
|
-
# A vertex represents a 2D point in the image.
|
1539
|
-
#
|
1540
|
-
# and range from 0 to 1.
|
1719
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
1720
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
1541
1721
|
# Corresponds to the JSON property `point`
|
1542
1722
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2NormalizedVertex]
|
1543
1723
|
attr_accessor :point
|
@@ -1563,8 +1743,7 @@ module Google
|
|
1563
1743
|
# @return [String]
|
1564
1744
|
attr_accessor :description
|
1565
1745
|
|
1566
|
-
# Opaque entity ID. Some IDs may be available in
|
1567
|
-
# [Google Knowledge Graph Search
|
1746
|
+
# Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
|
1568
1747
|
# API](https://developers.google.com/knowledge-graph/).
|
1569
1748
|
# Corresponds to the JSON property `entityId`
|
1570
1749
|
# @return [String]
|
@@ -1587,9 +1766,9 @@ module Google
|
|
1587
1766
|
end
|
1588
1767
|
end
|
1589
1768
|
|
1590
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
1591
|
-
#
|
1592
|
-
#
|
1769
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
1770
|
+
# explicit content has been detected in a frame, no annotations are present for
|
1771
|
+
# that frame.
|
1593
1772
|
class GoogleCloudVideointelligenceV1beta2ExplicitContentAnnotation
|
1594
1773
|
include Google::Apis::Core::Hashable
|
1595
1774
|
|
@@ -1640,14 +1819,110 @@ module Google
|
|
1640
1819
|
end
|
1641
1820
|
end
|
1642
1821
|
|
1822
|
+
# Deprecated. No effect.
|
1823
|
+
class GoogleCloudVideointelligenceV1beta2FaceAnnotation
|
1824
|
+
include Google::Apis::Core::Hashable
|
1825
|
+
|
1826
|
+
# All video frames where a face was detected.
|
1827
|
+
# Corresponds to the JSON property `frames`
|
1828
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2FaceFrame>]
|
1829
|
+
attr_accessor :frames
|
1830
|
+
|
1831
|
+
# All video segments where a face was detected.
|
1832
|
+
# Corresponds to the JSON property `segments`
|
1833
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2FaceSegment>]
|
1834
|
+
attr_accessor :segments
|
1835
|
+
|
1836
|
+
# Thumbnail of a representative face view (in JPEG format).
|
1837
|
+
# Corresponds to the JSON property `thumbnail`
|
1838
|
+
# NOTE: Values are automatically base64 encoded/decoded in the client library.
|
1839
|
+
# @return [String]
|
1840
|
+
attr_accessor :thumbnail
|
1841
|
+
|
1842
|
+
def initialize(**args)
|
1843
|
+
update!(**args)
|
1844
|
+
end
|
1845
|
+
|
1846
|
+
# Update properties of this object
|
1847
|
+
def update!(**args)
|
1848
|
+
@frames = args[:frames] if args.key?(:frames)
|
1849
|
+
@segments = args[:segments] if args.key?(:segments)
|
1850
|
+
@thumbnail = args[:thumbnail] if args.key?(:thumbnail)
|
1851
|
+
end
|
1852
|
+
end
|
1853
|
+
|
1854
|
+
# Face detection annotation.
|
1855
|
+
class GoogleCloudVideointelligenceV1beta2FaceDetectionAnnotation
|
1856
|
+
include Google::Apis::Core::Hashable
|
1857
|
+
|
1858
|
+
# Feature version.
|
1859
|
+
# Corresponds to the JSON property `version`
|
1860
|
+
# @return [String]
|
1861
|
+
attr_accessor :version
|
1862
|
+
|
1863
|
+
def initialize(**args)
|
1864
|
+
update!(**args)
|
1865
|
+
end
|
1866
|
+
|
1867
|
+
# Update properties of this object
|
1868
|
+
def update!(**args)
|
1869
|
+
@version = args[:version] if args.key?(:version)
|
1870
|
+
end
|
1871
|
+
end
|
1872
|
+
|
1873
|
+
# Deprecated. No effect.
|
1874
|
+
class GoogleCloudVideointelligenceV1beta2FaceFrame
|
1875
|
+
include Google::Apis::Core::Hashable
|
1876
|
+
|
1877
|
+
# Normalized Bounding boxes in a frame. There can be more than one boxes if the
|
1878
|
+
# same face is detected in multiple locations within the current frame.
|
1879
|
+
# Corresponds to the JSON property `normalizedBoundingBoxes`
|
1880
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox>]
|
1881
|
+
attr_accessor :normalized_bounding_boxes
|
1882
|
+
|
1883
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
1884
|
+
# video frame for this location.
|
1885
|
+
# Corresponds to the JSON property `timeOffset`
|
1886
|
+
# @return [String]
|
1887
|
+
attr_accessor :time_offset
|
1888
|
+
|
1889
|
+
def initialize(**args)
|
1890
|
+
update!(**args)
|
1891
|
+
end
|
1892
|
+
|
1893
|
+
# Update properties of this object
|
1894
|
+
def update!(**args)
|
1895
|
+
@normalized_bounding_boxes = args[:normalized_bounding_boxes] if args.key?(:normalized_bounding_boxes)
|
1896
|
+
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
1897
|
+
end
|
1898
|
+
end
|
1899
|
+
|
1900
|
+
# Video segment level annotation results for face detection.
|
1901
|
+
class GoogleCloudVideointelligenceV1beta2FaceSegment
|
1902
|
+
include Google::Apis::Core::Hashable
|
1903
|
+
|
1904
|
+
# Video segment.
|
1905
|
+
# Corresponds to the JSON property `segment`
|
1906
|
+
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2VideoSegment]
|
1907
|
+
attr_accessor :segment
|
1908
|
+
|
1909
|
+
def initialize(**args)
|
1910
|
+
update!(**args)
|
1911
|
+
end
|
1912
|
+
|
1913
|
+
# Update properties of this object
|
1914
|
+
def update!(**args)
|
1915
|
+
@segment = args[:segment] if args.key?(:segment)
|
1916
|
+
end
|
1917
|
+
end
|
1918
|
+
|
1643
1919
|
# Label annotation.
|
1644
1920
|
class GoogleCloudVideointelligenceV1beta2LabelAnnotation
|
1645
1921
|
include Google::Apis::Core::Hashable
|
1646
1922
|
|
1647
|
-
# Common categories for the detected entity.
|
1648
|
-
#
|
1649
|
-
#
|
1650
|
-
# also be a `pet`.
|
1923
|
+
# Common categories for the detected entity. For example, when the label is `
|
1924
|
+
# Terrier`, the category is likely `dog`. And in some cases there might be more
|
1925
|
+
# than one categories e.g., `Terrier` could also be a `pet`.
|
1651
1926
|
# Corresponds to the JSON property `categoryEntities`
|
1652
1927
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2Entity>]
|
1653
1928
|
attr_accessor :category_entities
|
@@ -1746,14 +2021,14 @@ module Google
|
|
1746
2021
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2Entity]
|
1747
2022
|
attr_accessor :entity
|
1748
2023
|
|
1749
|
-
# All video segments where the recognized logo appears. There might be
|
1750
|
-
#
|
2024
|
+
# All video segments where the recognized logo appears. There might be multiple
|
2025
|
+
# instances of the same logo class appearing in one VideoSegment.
|
1751
2026
|
# Corresponds to the JSON property `segments`
|
1752
2027
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2VideoSegment>]
|
1753
2028
|
attr_accessor :segments
|
1754
2029
|
|
1755
|
-
# All logo tracks where the recognized logo appears. Each track corresponds
|
1756
|
-
#
|
2030
|
+
# All logo tracks where the recognized logo appears. Each track corresponds to
|
2031
|
+
# one logo instance appearing in consecutive frames.
|
1757
2032
|
# Corresponds to the JSON property `tracks`
|
1758
2033
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2Track>]
|
1759
2034
|
attr_accessor :tracks
|
@@ -1770,9 +2045,8 @@ module Google
|
|
1770
2045
|
end
|
1771
2046
|
end
|
1772
2047
|
|
1773
|
-
# Normalized bounding box.
|
1774
|
-
#
|
1775
|
-
# Range: [0, 1].
|
2048
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
2049
|
+
# original image. Range: [0, 1].
|
1776
2050
|
class GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox
|
1777
2051
|
include Google::Apis::Core::Hashable
|
1778
2052
|
|
@@ -1810,20 +2084,12 @@ module Google
|
|
1810
2084
|
end
|
1811
2085
|
|
1812
2086
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
1813
|
-
# Contains list of the corner points in clockwise order starting from
|
1814
|
-
#
|
1815
|
-
#
|
1816
|
-
#
|
1817
|
-
#
|
1818
|
-
#
|
1819
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
1820
|
-
# becomes:
|
1821
|
-
# 2----3
|
1822
|
-
# | |
|
1823
|
-
# 1----0
|
1824
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
1825
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
1826
|
-
# the box.
|
2087
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
2088
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
2089
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
2090
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
2091
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
2092
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
1827
2093
|
class GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly
|
1828
2094
|
include Google::Apis::Core::Hashable
|
1829
2095
|
|
@@ -1842,9 +2108,8 @@ module Google
|
|
1842
2108
|
end
|
1843
2109
|
end
|
1844
2110
|
|
1845
|
-
# A vertex represents a 2D point in the image.
|
1846
|
-
#
|
1847
|
-
# and range from 0 to 1.
|
2111
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
2112
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
1848
2113
|
class GoogleCloudVideointelligenceV1beta2NormalizedVertex
|
1849
2114
|
include Google::Apis::Core::Hashable
|
1850
2115
|
|
@@ -1883,10 +2148,10 @@ module Google
|
|
1883
2148
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2Entity]
|
1884
2149
|
attr_accessor :entity
|
1885
2150
|
|
1886
|
-
# Information corresponding to all frames where this object track appears.
|
1887
|
-
#
|
1888
|
-
#
|
1889
|
-
#
|
2151
|
+
# Information corresponding to all frames where this object track appears. Non-
|
2152
|
+
# streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
|
2153
|
+
# in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
|
2154
|
+
# frames.
|
1890
2155
|
# Corresponds to the JSON property `frames`
|
1891
2156
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame>]
|
1892
2157
|
attr_accessor :frames
|
@@ -1896,12 +2161,11 @@ module Google
|
|
1896
2161
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2VideoSegment]
|
1897
2162
|
attr_accessor :segment
|
1898
2163
|
|
1899
|
-
# Streaming mode ONLY.
|
1900
|
-
#
|
1901
|
-
#
|
1902
|
-
#
|
1903
|
-
#
|
1904
|
-
# ObjectTrackAnnotation of the same track_id over time.
|
2164
|
+
# Streaming mode ONLY. In streaming mode, we do not know the end time of a
|
2165
|
+
# tracked object before it is completed. Hence, there is no VideoSegment info
|
2166
|
+
# returned. Instead, we provide a unique identifiable integer track_id so that
|
2167
|
+
# the customers can correlate the results of the ongoing ObjectTrackAnnotation
|
2168
|
+
# of the same track_id over time.
|
1905
2169
|
# Corresponds to the JSON property `trackId`
|
1906
2170
|
# @return [Fixnum]
|
1907
2171
|
attr_accessor :track_id
|
@@ -1931,9 +2195,8 @@ module Google
|
|
1931
2195
|
class GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame
|
1932
2196
|
include Google::Apis::Core::Hashable
|
1933
2197
|
|
1934
|
-
# Normalized bounding box.
|
1935
|
-
#
|
1936
|
-
# Range: [0, 1].
|
2198
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
2199
|
+
# original image. Range: [0, 1].
|
1937
2200
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
1938
2201
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox]
|
1939
2202
|
attr_accessor :normalized_bounding_box
|
@@ -1954,16 +2217,41 @@ module Google
|
|
1954
2217
|
end
|
1955
2218
|
end
|
1956
2219
|
|
2220
|
+
# Person detection annotation per video.
|
2221
|
+
class GoogleCloudVideointelligenceV1beta2PersonDetectionAnnotation
|
2222
|
+
include Google::Apis::Core::Hashable
|
2223
|
+
|
2224
|
+
# The detected tracks of a person.
|
2225
|
+
# Corresponds to the JSON property `tracks`
|
2226
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2Track>]
|
2227
|
+
attr_accessor :tracks
|
2228
|
+
|
2229
|
+
# Feature version.
|
2230
|
+
# Corresponds to the JSON property `version`
|
2231
|
+
# @return [String]
|
2232
|
+
attr_accessor :version
|
2233
|
+
|
2234
|
+
def initialize(**args)
|
2235
|
+
update!(**args)
|
2236
|
+
end
|
2237
|
+
|
2238
|
+
# Update properties of this object
|
2239
|
+
def update!(**args)
|
2240
|
+
@tracks = args[:tracks] if args.key?(:tracks)
|
2241
|
+
@version = args[:version] if args.key?(:version)
|
2242
|
+
end
|
2243
|
+
end
|
2244
|
+
|
1957
2245
|
# Alternative hypotheses (a.k.a. n-best list).
|
1958
2246
|
class GoogleCloudVideointelligenceV1beta2SpeechRecognitionAlternative
|
1959
2247
|
include Google::Apis::Core::Hashable
|
1960
2248
|
|
1961
2249
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
1962
2250
|
# indicates an estimated greater likelihood that the recognized words are
|
1963
|
-
# correct. This field is set only for the top alternative.
|
1964
|
-
#
|
1965
|
-
#
|
1966
|
-
#
|
2251
|
+
# correct. This field is set only for the top alternative. This field is not
|
2252
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
2253
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
2254
|
+
# not set.
|
1967
2255
|
# Corresponds to the JSON property `confidence`
|
1968
2256
|
# @return [Float]
|
1969
2257
|
attr_accessor :confidence
|
@@ -1974,8 +2262,8 @@ module Google
|
|
1974
2262
|
attr_accessor :transcript
|
1975
2263
|
|
1976
2264
|
# Output only. A list of word-specific information for each recognized word.
|
1977
|
-
# Note: When `enable_speaker_diarization` is set to true, you will see all
|
1978
|
-
#
|
2265
|
+
# Note: When `enable_speaker_diarization` is set to true, you will see all the
|
2266
|
+
# words from the beginning of the audio.
|
1979
2267
|
# Corresponds to the JSON property `words`
|
1980
2268
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2WordInfo>]
|
1981
2269
|
attr_accessor :words
|
@@ -1996,18 +2284,17 @@ module Google
|
|
1996
2284
|
class GoogleCloudVideointelligenceV1beta2SpeechTranscription
|
1997
2285
|
include Google::Apis::Core::Hashable
|
1998
2286
|
|
1999
|
-
# May contain one or more recognition hypotheses (up to the maximum specified
|
2000
|
-
#
|
2001
|
-
#
|
2002
|
-
#
|
2287
|
+
# May contain one or more recognition hypotheses (up to the maximum specified in
|
2288
|
+
# `max_alternatives`). These alternatives are ordered in terms of accuracy, with
|
2289
|
+
# the top (first) alternative being the most probable, as ranked by the
|
2290
|
+
# recognizer.
|
2003
2291
|
# Corresponds to the JSON property `alternatives`
|
2004
2292
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2SpeechRecognitionAlternative>]
|
2005
2293
|
attr_accessor :alternatives
|
2006
2294
|
|
2007
2295
|
# Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
|
2008
|
-
# language tag of
|
2009
|
-
#
|
2010
|
-
# most likelihood of being spoken in the audio.
|
2296
|
+
# language tag of the language in this result. This language code was detected
|
2297
|
+
# to have the most likelihood of being spoken in the audio.
|
2011
2298
|
# Corresponds to the JSON property `languageCode`
|
2012
2299
|
# @return [String]
|
2013
2300
|
attr_accessor :language_code
|
@@ -2056,27 +2343,19 @@ module Google
|
|
2056
2343
|
end
|
2057
2344
|
end
|
2058
2345
|
|
2059
|
-
# Video frame level annotation results for text annotation (OCR).
|
2060
|
-
#
|
2061
|
-
#
|
2346
|
+
# Video frame level annotation results for text annotation (OCR). Contains
|
2347
|
+
# information regarding timestamp and bounding box locations for the frames
|
2348
|
+
# containing detected OCR text snippets.
|
2062
2349
|
class GoogleCloudVideointelligenceV1beta2TextFrame
|
2063
2350
|
include Google::Apis::Core::Hashable
|
2064
2351
|
|
2065
2352
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
2066
|
-
# Contains list of the corner points in clockwise order starting from
|
2067
|
-
#
|
2068
|
-
#
|
2069
|
-
#
|
2070
|
-
#
|
2071
|
-
#
|
2072
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
2073
|
-
# becomes:
|
2074
|
-
# 2----3
|
2075
|
-
# | |
|
2076
|
-
# 1----0
|
2077
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
2078
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
2079
|
-
# the box.
|
2353
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
2354
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
2355
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
2356
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
2357
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
2358
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
2080
2359
|
# Corresponds to the JSON property `rotatedBoundingBox`
|
2081
2360
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly]
|
2082
2361
|
attr_accessor :rotated_bounding_box
|
@@ -2129,9 +2408,8 @@ module Google
|
|
2129
2408
|
end
|
2130
2409
|
end
|
2131
2410
|
|
2132
|
-
# For tracking related features.
|
2133
|
-
#
|
2134
|
-
# normalized_bounding_box.
|
2411
|
+
# For tracking related features. An object at time_offset with attributes, and
|
2412
|
+
# located with normalized_bounding_box.
|
2135
2413
|
class GoogleCloudVideointelligenceV1beta2TimestampedObject
|
2136
2414
|
include Google::Apis::Core::Hashable
|
2137
2415
|
|
@@ -2145,15 +2423,14 @@ module Google
|
|
2145
2423
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2DetectedLandmark>]
|
2146
2424
|
attr_accessor :landmarks
|
2147
2425
|
|
2148
|
-
# Normalized bounding box.
|
2149
|
-
#
|
2150
|
-
# Range: [0, 1].
|
2426
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
2427
|
+
# original image. Range: [0, 1].
|
2151
2428
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
2152
2429
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox]
|
2153
2430
|
attr_accessor :normalized_bounding_box
|
2154
2431
|
|
2155
|
-
# Time-offset, relative to the beginning of the video,
|
2156
|
-
#
|
2432
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
2433
|
+
# video frame for this object.
|
2157
2434
|
# Corresponds to the JSON property `timeOffset`
|
2158
2435
|
# @return [String]
|
2159
2436
|
attr_accessor :time_offset
|
@@ -2212,20 +2489,19 @@ module Google
|
|
2212
2489
|
class GoogleCloudVideointelligenceV1beta2VideoAnnotationProgress
|
2213
2490
|
include Google::Apis::Core::Hashable
|
2214
2491
|
|
2215
|
-
# Specifies which feature is being tracked if the request contains more than
|
2216
|
-
#
|
2492
|
+
# Specifies which feature is being tracked if the request contains more than one
|
2493
|
+
# feature.
|
2217
2494
|
# Corresponds to the JSON property `feature`
|
2218
2495
|
# @return [String]
|
2219
2496
|
attr_accessor :feature
|
2220
2497
|
|
2221
|
-
# Video file location in
|
2222
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
2498
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
2223
2499
|
# Corresponds to the JSON property `inputUri`
|
2224
2500
|
# @return [String]
|
2225
2501
|
attr_accessor :input_uri
|
2226
2502
|
|
2227
|
-
# Approximate percentage processed thus far. Guaranteed to be
|
2228
|
-
#
|
2503
|
+
# Approximate percentage processed thus far. Guaranteed to be 100 when fully
|
2504
|
+
# processed.
|
2229
2505
|
# Corresponds to the JSON property `progressPercent`
|
2230
2506
|
# @return [Fixnum]
|
2231
2507
|
attr_accessor :progress_percent
|
@@ -2264,31 +2540,40 @@ module Google
|
|
2264
2540
|
class GoogleCloudVideointelligenceV1beta2VideoAnnotationResults
|
2265
2541
|
include Google::Apis::Core::Hashable
|
2266
2542
|
|
2267
|
-
# The `Status` type defines a logical error model that is suitable for
|
2268
|
-
#
|
2269
|
-
#
|
2270
|
-
#
|
2271
|
-
#
|
2272
|
-
#
|
2543
|
+
# The `Status` type defines a logical error model that is suitable for different
|
2544
|
+
# programming environments, including REST APIs and RPC APIs. It is used by [
|
2545
|
+
# gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
|
2546
|
+
# data: error code, error message, and error details. You can find out more
|
2547
|
+
# about this error model and how to work with it in the [API Design Guide](https:
|
2548
|
+
# //cloud.google.com/apis/design/errors).
|
2273
2549
|
# Corresponds to the JSON property `error`
|
2274
2550
|
# @return [Google::Apis::VideointelligenceV1::GoogleRpcStatus]
|
2275
2551
|
attr_accessor :error
|
2276
2552
|
|
2277
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
2278
|
-
#
|
2279
|
-
#
|
2553
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
2554
|
+
# explicit content has been detected in a frame, no annotations are present for
|
2555
|
+
# that frame.
|
2280
2556
|
# Corresponds to the JSON property `explicitAnnotation`
|
2281
2557
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2ExplicitContentAnnotation]
|
2282
2558
|
attr_accessor :explicit_annotation
|
2283
2559
|
|
2284
|
-
#
|
2285
|
-
#
|
2560
|
+
# Deprecated. Please use `face_detection_annotations` instead.
|
2561
|
+
# Corresponds to the JSON property `faceAnnotations`
|
2562
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2FaceAnnotation>]
|
2563
|
+
attr_accessor :face_annotations
|
2564
|
+
|
2565
|
+
# Face detection annotations.
|
2566
|
+
# Corresponds to the JSON property `faceDetectionAnnotations`
|
2567
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2FaceDetectionAnnotation>]
|
2568
|
+
attr_accessor :face_detection_annotations
|
2569
|
+
|
2570
|
+
# Label annotations on frame level. There is exactly one element for each unique
|
2571
|
+
# label.
|
2286
2572
|
# Corresponds to the JSON property `frameLabelAnnotations`
|
2287
2573
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
|
2288
2574
|
attr_accessor :frame_label_annotations
|
2289
2575
|
|
2290
|
-
# Video file location in
|
2291
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
2576
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
2292
2577
|
# Corresponds to the JSON property `inputUri`
|
2293
2578
|
# @return [String]
|
2294
2579
|
attr_accessor :input_uri
|
@@ -2303,6 +2588,11 @@ module Google
|
|
2303
2588
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2ObjectTrackingAnnotation>]
|
2304
2589
|
attr_accessor :object_annotations
|
2305
2590
|
|
2591
|
+
# Person detection annotations.
|
2592
|
+
# Corresponds to the JSON property `personDetectionAnnotations`
|
2593
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2PersonDetectionAnnotation>]
|
2594
|
+
attr_accessor :person_detection_annotations
|
2595
|
+
|
2306
2596
|
# Video segment.
|
2307
2597
|
# Corresponds to the JSON property `segment`
|
2308
2598
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2VideoSegment]
|
@@ -2315,11 +2605,11 @@ module Google
|
|
2315
2605
|
attr_accessor :segment_label_annotations
|
2316
2606
|
|
2317
2607
|
# Presence label annotations on video level or user-specified segment level.
|
2318
|
-
# There is exactly one element for each unique label. Compared to the
|
2319
|
-
#
|
2320
|
-
#
|
2321
|
-
#
|
2322
|
-
#
|
2608
|
+
# There is exactly one element for each unique label. Compared to the existing
|
2609
|
+
# topical `segment_label_annotations`, this field presents more fine-grained,
|
2610
|
+
# segment-level labels detected in video content and is made available only when
|
2611
|
+
# the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
|
2612
|
+
# request.
|
2323
2613
|
# Corresponds to the JSON property `segmentPresenceLabelAnnotations`
|
2324
2614
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
|
2325
2615
|
attr_accessor :segment_presence_label_annotations
|
@@ -2329,17 +2619,17 @@ module Google
|
|
2329
2619
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2VideoSegment>]
|
2330
2620
|
attr_accessor :shot_annotations
|
2331
2621
|
|
2332
|
-
# Topical label annotations on shot level.
|
2333
|
-
#
|
2622
|
+
# Topical label annotations on shot level. There is exactly one element for each
|
2623
|
+
# unique label.
|
2334
2624
|
# Corresponds to the JSON property `shotLabelAnnotations`
|
2335
2625
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
|
2336
2626
|
attr_accessor :shot_label_annotations
|
2337
2627
|
|
2338
2628
|
# Presence label annotations on shot level. There is exactly one element for
|
2339
|
-
# each unique label. Compared to the existing topical
|
2340
|
-
#
|
2341
|
-
#
|
2342
|
-
#
|
2629
|
+
# each unique label. Compared to the existing topical `shot_label_annotations`,
|
2630
|
+
# this field presents more fine-grained, shot-level labels detected in video
|
2631
|
+
# content and is made available only when the client sets `LabelDetectionConfig.
|
2632
|
+
# model` to "builtin/latest" in the request.
|
2343
2633
|
# Corresponds to the JSON property `shotPresenceLabelAnnotations`
|
2344
2634
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
|
2345
2635
|
attr_accessor :shot_presence_label_annotations
|
@@ -2349,9 +2639,8 @@ module Google
|
|
2349
2639
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2SpeechTranscription>]
|
2350
2640
|
attr_accessor :speech_transcriptions
|
2351
2641
|
|
2352
|
-
# OCR text detection and tracking.
|
2353
|
-
#
|
2354
|
-
# frame information associated with it.
|
2642
|
+
# OCR text detection and tracking. Annotations for list of detected text
|
2643
|
+
# snippets. Each will have list of frame information associated with it.
|
2355
2644
|
# Corresponds to the JSON property `textAnnotations`
|
2356
2645
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2TextAnnotation>]
|
2357
2646
|
attr_accessor :text_annotations
|
@@ -2364,10 +2653,13 @@ module Google
|
|
2364
2653
|
def update!(**args)
|
2365
2654
|
@error = args[:error] if args.key?(:error)
|
2366
2655
|
@explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
|
2656
|
+
@face_annotations = args[:face_annotations] if args.key?(:face_annotations)
|
2657
|
+
@face_detection_annotations = args[:face_detection_annotations] if args.key?(:face_detection_annotations)
|
2367
2658
|
@frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
|
2368
2659
|
@input_uri = args[:input_uri] if args.key?(:input_uri)
|
2369
2660
|
@logo_recognition_annotations = args[:logo_recognition_annotations] if args.key?(:logo_recognition_annotations)
|
2370
2661
|
@object_annotations = args[:object_annotations] if args.key?(:object_annotations)
|
2662
|
+
@person_detection_annotations = args[:person_detection_annotations] if args.key?(:person_detection_annotations)
|
2371
2663
|
@segment = args[:segment] if args.key?(:segment)
|
2372
2664
|
@segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations)
|
2373
2665
|
@segment_presence_label_annotations = args[:segment_presence_label_annotations] if args.key?(:segment_presence_label_annotations)
|
@@ -2383,14 +2675,14 @@ module Google
|
|
2383
2675
|
class GoogleCloudVideointelligenceV1beta2VideoSegment
|
2384
2676
|
include Google::Apis::Core::Hashable
|
2385
2677
|
|
2386
|
-
# Time-offset, relative to the beginning of the video,
|
2387
|
-
#
|
2678
|
+
# Time-offset, relative to the beginning of the video, corresponding to the end
|
2679
|
+
# of the segment (inclusive).
|
2388
2680
|
# Corresponds to the JSON property `endTimeOffset`
|
2389
2681
|
# @return [String]
|
2390
2682
|
attr_accessor :end_time_offset
|
2391
2683
|
|
2392
|
-
# Time-offset, relative to the beginning of the video,
|
2393
|
-
#
|
2684
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
2685
|
+
# start of the segment (inclusive).
|
2394
2686
|
# Corresponds to the JSON property `startTimeOffset`
|
2395
2687
|
# @return [String]
|
2396
2688
|
attr_accessor :start_time_offset
|
@@ -2407,41 +2699,41 @@ module Google
|
|
2407
2699
|
end
|
2408
2700
|
|
2409
2701
|
# Word-specific information for recognized words. Word information is only
|
2410
|
-
# included in the response when certain request parameters are set, such
|
2411
|
-
#
|
2702
|
+
# included in the response when certain request parameters are set, such as `
|
2703
|
+
# enable_word_time_offsets`.
|
2412
2704
|
class GoogleCloudVideointelligenceV1beta2WordInfo
|
2413
2705
|
include Google::Apis::Core::Hashable
|
2414
2706
|
|
2415
2707
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
2416
2708
|
# indicates an estimated greater likelihood that the recognized words are
|
2417
|
-
# correct. This field is set only for the top alternative.
|
2418
|
-
#
|
2419
|
-
#
|
2420
|
-
#
|
2709
|
+
# correct. This field is set only for the top alternative. This field is not
|
2710
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
2711
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
2712
|
+
# not set.
|
2421
2713
|
# Corresponds to the JSON property `confidence`
|
2422
2714
|
# @return [Float]
|
2423
2715
|
attr_accessor :confidence
|
2424
2716
|
|
2425
|
-
# Time offset relative to the beginning of the audio, and
|
2426
|
-
#
|
2427
|
-
#
|
2428
|
-
#
|
2717
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
2718
|
+
# end of the spoken word. This field is only set if `enable_word_time_offsets=
|
2719
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
2720
|
+
# accuracy of the time offset can vary.
|
2429
2721
|
# Corresponds to the JSON property `endTime`
|
2430
2722
|
# @return [String]
|
2431
2723
|
attr_accessor :end_time
|
2432
2724
|
|
2433
|
-
# Output only. A distinct integer value is assigned for every speaker within
|
2434
|
-
#
|
2435
|
-
#
|
2436
|
-
#
|
2725
|
+
# Output only. A distinct integer value is assigned for every speaker within the
|
2726
|
+
# audio. This field specifies which one of those speakers was detected to have
|
2727
|
+
# spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
|
2728
|
+
# only set if speaker diarization is enabled.
|
2437
2729
|
# Corresponds to the JSON property `speakerTag`
|
2438
2730
|
# @return [Fixnum]
|
2439
2731
|
attr_accessor :speaker_tag
|
2440
2732
|
|
2441
|
-
# Time offset relative to the beginning of the audio, and
|
2442
|
-
#
|
2443
|
-
#
|
2444
|
-
#
|
2733
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
2734
|
+
# start of the spoken word. This field is only set if `enable_word_time_offsets=
|
2735
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
2736
|
+
# accuracy of the time offset can vary.
|
2445
2737
|
# Corresponds to the JSON property `startTime`
|
2446
2738
|
# @return [String]
|
2447
2739
|
attr_accessor :start_time
|
@@ -2465,9 +2757,9 @@ module Google
|
|
2465
2757
|
end
|
2466
2758
|
end
|
2467
2759
|
|
2468
|
-
# Video annotation progress. Included in the `metadata`
|
2469
|
-
#
|
2470
|
-
#
|
2760
|
+
# Video annotation progress. Included in the `metadata` field of the `Operation`
|
2761
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
2762
|
+
# service.
|
2471
2763
|
class GoogleCloudVideointelligenceV1p1beta1AnnotateVideoProgress
|
2472
2764
|
include Google::Apis::Core::Hashable
|
2473
2765
|
|
@@ -2486,9 +2778,9 @@ module Google
|
|
2486
2778
|
end
|
2487
2779
|
end
|
2488
2780
|
|
2489
|
-
# Video annotation response. Included in the `response`
|
2490
|
-
#
|
2491
|
-
#
|
2781
|
+
# Video annotation response. Included in the `response` field of the `Operation`
|
2782
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
2783
|
+
# service.
|
2492
2784
|
class GoogleCloudVideointelligenceV1p1beta1AnnotateVideoResponse
|
2493
2785
|
include Google::Apis::Core::Hashable
|
2494
2786
|
|
@@ -2516,14 +2808,14 @@ module Google
|
|
2516
2808
|
# @return [Float]
|
2517
2809
|
attr_accessor :confidence
|
2518
2810
|
|
2519
|
-
# The name of the attribute, for example, glasses, dark_glasses, mouth_open.
|
2520
|
-
#
|
2811
|
+
# The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
|
2812
|
+
# full list of supported type names will be provided in the document.
|
2521
2813
|
# Corresponds to the JSON property `name`
|
2522
2814
|
# @return [String]
|
2523
2815
|
attr_accessor :name
|
2524
2816
|
|
2525
|
-
# Text value of the detection result. For example, the value for "HairColor"
|
2526
|
-
#
|
2817
|
+
# Text value of the detection result. For example, the value for "HairColor" can
|
2818
|
+
# be "black", "blonde", etc.
|
2527
2819
|
# Corresponds to the JSON property `value`
|
2528
2820
|
# @return [String]
|
2529
2821
|
attr_accessor :value
|
@@ -2555,9 +2847,8 @@ module Google
|
|
2555
2847
|
# @return [String]
|
2556
2848
|
attr_accessor :name
|
2557
2849
|
|
2558
|
-
# A vertex represents a 2D point in the image.
|
2559
|
-
#
|
2560
|
-
# and range from 0 to 1.
|
2850
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
2851
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
2561
2852
|
# Corresponds to the JSON property `point`
|
2562
2853
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1NormalizedVertex]
|
2563
2854
|
attr_accessor :point
|
@@ -2583,8 +2874,7 @@ module Google
|
|
2583
2874
|
# @return [String]
|
2584
2875
|
attr_accessor :description
|
2585
2876
|
|
2586
|
-
# Opaque entity ID. Some IDs may be available in
|
2587
|
-
# [Google Knowledge Graph Search
|
2877
|
+
# Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
|
2588
2878
|
# API](https://developers.google.com/knowledge-graph/).
|
2589
2879
|
# Corresponds to the JSON property `entityId`
|
2590
2880
|
# @return [String]
|
@@ -2607,9 +2897,9 @@ module Google
|
|
2607
2897
|
end
|
2608
2898
|
end
|
2609
2899
|
|
2610
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
2611
|
-
#
|
2612
|
-
#
|
2900
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
2901
|
+
# explicit content has been detected in a frame, no annotations are present for
|
2902
|
+
# that frame.
|
2613
2903
|
class GoogleCloudVideointelligenceV1p1beta1ExplicitContentAnnotation
|
2614
2904
|
include Google::Apis::Core::Hashable
|
2615
2905
|
|
@@ -2629,19 +2919,97 @@ module Google
|
|
2629
2919
|
|
2630
2920
|
# Update properties of this object
|
2631
2921
|
def update!(**args)
|
2632
|
-
@frames = args[:frames] if args.key?(:frames)
|
2922
|
+
@frames = args[:frames] if args.key?(:frames)
|
2923
|
+
@version = args[:version] if args.key?(:version)
|
2924
|
+
end
|
2925
|
+
end
|
2926
|
+
|
2927
|
+
# Video frame level annotation results for explicit content.
|
2928
|
+
class GoogleCloudVideointelligenceV1p1beta1ExplicitContentFrame
|
2929
|
+
include Google::Apis::Core::Hashable
|
2930
|
+
|
2931
|
+
# Likelihood of the pornography content..
|
2932
|
+
# Corresponds to the JSON property `pornographyLikelihood`
|
2933
|
+
# @return [String]
|
2934
|
+
attr_accessor :pornography_likelihood
|
2935
|
+
|
2936
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
2937
|
+
# video frame for this location.
|
2938
|
+
# Corresponds to the JSON property `timeOffset`
|
2939
|
+
# @return [String]
|
2940
|
+
attr_accessor :time_offset
|
2941
|
+
|
2942
|
+
def initialize(**args)
|
2943
|
+
update!(**args)
|
2944
|
+
end
|
2945
|
+
|
2946
|
+
# Update properties of this object
|
2947
|
+
def update!(**args)
|
2948
|
+
@pornography_likelihood = args[:pornography_likelihood] if args.key?(:pornography_likelihood)
|
2949
|
+
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
2950
|
+
end
|
2951
|
+
end
|
2952
|
+
|
2953
|
+
# Deprecated. No effect.
|
2954
|
+
class GoogleCloudVideointelligenceV1p1beta1FaceAnnotation
|
2955
|
+
include Google::Apis::Core::Hashable
|
2956
|
+
|
2957
|
+
# All video frames where a face was detected.
|
2958
|
+
# Corresponds to the JSON property `frames`
|
2959
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1FaceFrame>]
|
2960
|
+
attr_accessor :frames
|
2961
|
+
|
2962
|
+
# All video segments where a face was detected.
|
2963
|
+
# Corresponds to the JSON property `segments`
|
2964
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1FaceSegment>]
|
2965
|
+
attr_accessor :segments
|
2966
|
+
|
2967
|
+
# Thumbnail of a representative face view (in JPEG format).
|
2968
|
+
# Corresponds to the JSON property `thumbnail`
|
2969
|
+
# NOTE: Values are automatically base64 encoded/decoded in the client library.
|
2970
|
+
# @return [String]
|
2971
|
+
attr_accessor :thumbnail
|
2972
|
+
|
2973
|
+
def initialize(**args)
|
2974
|
+
update!(**args)
|
2975
|
+
end
|
2976
|
+
|
2977
|
+
# Update properties of this object
|
2978
|
+
def update!(**args)
|
2979
|
+
@frames = args[:frames] if args.key?(:frames)
|
2980
|
+
@segments = args[:segments] if args.key?(:segments)
|
2981
|
+
@thumbnail = args[:thumbnail] if args.key?(:thumbnail)
|
2982
|
+
end
|
2983
|
+
end
|
2984
|
+
|
2985
|
+
# Face detection annotation.
|
2986
|
+
class GoogleCloudVideointelligenceV1p1beta1FaceDetectionAnnotation
|
2987
|
+
include Google::Apis::Core::Hashable
|
2988
|
+
|
2989
|
+
# Feature version.
|
2990
|
+
# Corresponds to the JSON property `version`
|
2991
|
+
# @return [String]
|
2992
|
+
attr_accessor :version
|
2993
|
+
|
2994
|
+
def initialize(**args)
|
2995
|
+
update!(**args)
|
2996
|
+
end
|
2997
|
+
|
2998
|
+
# Update properties of this object
|
2999
|
+
def update!(**args)
|
2633
3000
|
@version = args[:version] if args.key?(:version)
|
2634
3001
|
end
|
2635
3002
|
end
|
2636
3003
|
|
2637
|
-
#
|
2638
|
-
class
|
3004
|
+
# Deprecated. No effect.
|
3005
|
+
class GoogleCloudVideointelligenceV1p1beta1FaceFrame
|
2639
3006
|
include Google::Apis::Core::Hashable
|
2640
3007
|
|
2641
|
-
#
|
2642
|
-
#
|
2643
|
-
#
|
2644
|
-
|
3008
|
+
# Normalized Bounding boxes in a frame. There can be more than one boxes if the
|
3009
|
+
# same face is detected in multiple locations within the current frame.
|
3010
|
+
# Corresponds to the JSON property `normalizedBoundingBoxes`
|
3011
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox>]
|
3012
|
+
attr_accessor :normalized_bounding_boxes
|
2645
3013
|
|
2646
3014
|
# Time-offset, relative to the beginning of the video, corresponding to the
|
2647
3015
|
# video frame for this location.
|
@@ -2655,19 +3023,37 @@ module Google
|
|
2655
3023
|
|
2656
3024
|
# Update properties of this object
|
2657
3025
|
def update!(**args)
|
2658
|
-
@
|
3026
|
+
@normalized_bounding_boxes = args[:normalized_bounding_boxes] if args.key?(:normalized_bounding_boxes)
|
2659
3027
|
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
2660
3028
|
end
|
2661
3029
|
end
|
2662
3030
|
|
3031
|
+
# Video segment level annotation results for face detection.
|
3032
|
+
class GoogleCloudVideointelligenceV1p1beta1FaceSegment
|
3033
|
+
include Google::Apis::Core::Hashable
|
3034
|
+
|
3035
|
+
# Video segment.
|
3036
|
+
# Corresponds to the JSON property `segment`
|
3037
|
+
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1VideoSegment]
|
3038
|
+
attr_accessor :segment
|
3039
|
+
|
3040
|
+
def initialize(**args)
|
3041
|
+
update!(**args)
|
3042
|
+
end
|
3043
|
+
|
3044
|
+
# Update properties of this object
|
3045
|
+
def update!(**args)
|
3046
|
+
@segment = args[:segment] if args.key?(:segment)
|
3047
|
+
end
|
3048
|
+
end
|
3049
|
+
|
2663
3050
|
# Label annotation.
|
2664
3051
|
class GoogleCloudVideointelligenceV1p1beta1LabelAnnotation
|
2665
3052
|
include Google::Apis::Core::Hashable
|
2666
3053
|
|
2667
|
-
# Common categories for the detected entity.
|
2668
|
-
#
|
2669
|
-
#
|
2670
|
-
# also be a `pet`.
|
3054
|
+
# Common categories for the detected entity. For example, when the label is `
|
3055
|
+
# Terrier`, the category is likely `dog`. And in some cases there might be more
|
3056
|
+
# than one categories e.g., `Terrier` could also be a `pet`.
|
2671
3057
|
# Corresponds to the JSON property `categoryEntities`
|
2672
3058
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1Entity>]
|
2673
3059
|
attr_accessor :category_entities
|
@@ -2766,14 +3152,14 @@ module Google
|
|
2766
3152
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1Entity]
|
2767
3153
|
attr_accessor :entity
|
2768
3154
|
|
2769
|
-
# All video segments where the recognized logo appears. There might be
|
2770
|
-
#
|
3155
|
+
# All video segments where the recognized logo appears. There might be multiple
|
3156
|
+
# instances of the same logo class appearing in one VideoSegment.
|
2771
3157
|
# Corresponds to the JSON property `segments`
|
2772
3158
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1VideoSegment>]
|
2773
3159
|
attr_accessor :segments
|
2774
3160
|
|
2775
|
-
# All logo tracks where the recognized logo appears. Each track corresponds
|
2776
|
-
#
|
3161
|
+
# All logo tracks where the recognized logo appears. Each track corresponds to
|
3162
|
+
# one logo instance appearing in consecutive frames.
|
2777
3163
|
# Corresponds to the JSON property `tracks`
|
2778
3164
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1Track>]
|
2779
3165
|
attr_accessor :tracks
|
@@ -2790,9 +3176,8 @@ module Google
|
|
2790
3176
|
end
|
2791
3177
|
end
|
2792
3178
|
|
2793
|
-
# Normalized bounding box.
|
2794
|
-
#
|
2795
|
-
# Range: [0, 1].
|
3179
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
3180
|
+
# original image. Range: [0, 1].
|
2796
3181
|
class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox
|
2797
3182
|
include Google::Apis::Core::Hashable
|
2798
3183
|
|
@@ -2830,20 +3215,12 @@ module Google
|
|
2830
3215
|
end
|
2831
3216
|
|
2832
3217
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
2833
|
-
# Contains list of the corner points in clockwise order starting from
|
2834
|
-
#
|
2835
|
-
#
|
2836
|
-
#
|
2837
|
-
#
|
2838
|
-
#
|
2839
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
2840
|
-
# becomes:
|
2841
|
-
# 2----3
|
2842
|
-
# | |
|
2843
|
-
# 1----0
|
2844
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
2845
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
2846
|
-
# the box.
|
3218
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
3219
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
3220
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
3221
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
3222
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
3223
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
2847
3224
|
class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly
|
2848
3225
|
include Google::Apis::Core::Hashable
|
2849
3226
|
|
@@ -2862,9 +3239,8 @@ module Google
|
|
2862
3239
|
end
|
2863
3240
|
end
|
2864
3241
|
|
2865
|
-
# A vertex represents a 2D point in the image.
|
2866
|
-
#
|
2867
|
-
# and range from 0 to 1.
|
3242
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
3243
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
2868
3244
|
class GoogleCloudVideointelligenceV1p1beta1NormalizedVertex
|
2869
3245
|
include Google::Apis::Core::Hashable
|
2870
3246
|
|
@@ -2903,10 +3279,10 @@ module Google
|
|
2903
3279
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1Entity]
|
2904
3280
|
attr_accessor :entity
|
2905
3281
|
|
2906
|
-
# Information corresponding to all frames where this object track appears.
|
2907
|
-
#
|
2908
|
-
#
|
2909
|
-
#
|
3282
|
+
# Information corresponding to all frames where this object track appears. Non-
|
3283
|
+
# streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
|
3284
|
+
# in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
|
3285
|
+
# frames.
|
2910
3286
|
# Corresponds to the JSON property `frames`
|
2911
3287
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame>]
|
2912
3288
|
attr_accessor :frames
|
@@ -2916,12 +3292,11 @@ module Google
|
|
2916
3292
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1VideoSegment]
|
2917
3293
|
attr_accessor :segment
|
2918
3294
|
|
2919
|
-
# Streaming mode ONLY.
|
2920
|
-
#
|
2921
|
-
#
|
2922
|
-
#
|
2923
|
-
#
|
2924
|
-
# ObjectTrackAnnotation of the same track_id over time.
|
3295
|
+
# Streaming mode ONLY. In streaming mode, we do not know the end time of a
|
3296
|
+
# tracked object before it is completed. Hence, there is no VideoSegment info
|
3297
|
+
# returned. Instead, we provide a unique identifiable integer track_id so that
|
3298
|
+
# the customers can correlate the results of the ongoing ObjectTrackAnnotation
|
3299
|
+
# of the same track_id over time.
|
2925
3300
|
# Corresponds to the JSON property `trackId`
|
2926
3301
|
# @return [Fixnum]
|
2927
3302
|
attr_accessor :track_id
|
@@ -2951,9 +3326,8 @@ module Google
|
|
2951
3326
|
class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame
|
2952
3327
|
include Google::Apis::Core::Hashable
|
2953
3328
|
|
2954
|
-
# Normalized bounding box.
|
2955
|
-
#
|
2956
|
-
# Range: [0, 1].
|
3329
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
3330
|
+
# original image. Range: [0, 1].
|
2957
3331
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
2958
3332
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox]
|
2959
3333
|
attr_accessor :normalized_bounding_box
|
@@ -2974,16 +3348,41 @@ module Google
|
|
2974
3348
|
end
|
2975
3349
|
end
|
2976
3350
|
|
3351
|
+
# Person detection annotation per video.
|
3352
|
+
class GoogleCloudVideointelligenceV1p1beta1PersonDetectionAnnotation
|
3353
|
+
include Google::Apis::Core::Hashable
|
3354
|
+
|
3355
|
+
# The detected tracks of a person.
|
3356
|
+
# Corresponds to the JSON property `tracks`
|
3357
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1Track>]
|
3358
|
+
attr_accessor :tracks
|
3359
|
+
|
3360
|
+
# Feature version.
|
3361
|
+
# Corresponds to the JSON property `version`
|
3362
|
+
# @return [String]
|
3363
|
+
attr_accessor :version
|
3364
|
+
|
3365
|
+
def initialize(**args)
|
3366
|
+
update!(**args)
|
3367
|
+
end
|
3368
|
+
|
3369
|
+
# Update properties of this object
|
3370
|
+
def update!(**args)
|
3371
|
+
@tracks = args[:tracks] if args.key?(:tracks)
|
3372
|
+
@version = args[:version] if args.key?(:version)
|
3373
|
+
end
|
3374
|
+
end
|
3375
|
+
|
2977
3376
|
# Alternative hypotheses (a.k.a. n-best list).
|
2978
3377
|
class GoogleCloudVideointelligenceV1p1beta1SpeechRecognitionAlternative
|
2979
3378
|
include Google::Apis::Core::Hashable
|
2980
3379
|
|
2981
3380
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
2982
3381
|
# indicates an estimated greater likelihood that the recognized words are
|
2983
|
-
# correct. This field is set only for the top alternative.
|
2984
|
-
#
|
2985
|
-
#
|
2986
|
-
#
|
3382
|
+
# correct. This field is set only for the top alternative. This field is not
|
3383
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
3384
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
3385
|
+
# not set.
|
2987
3386
|
# Corresponds to the JSON property `confidence`
|
2988
3387
|
# @return [Float]
|
2989
3388
|
attr_accessor :confidence
|
@@ -2994,8 +3393,8 @@ module Google
|
|
2994
3393
|
attr_accessor :transcript
|
2995
3394
|
|
2996
3395
|
# Output only. A list of word-specific information for each recognized word.
|
2997
|
-
# Note: When `enable_speaker_diarization` is set to true, you will see all
|
2998
|
-
#
|
3396
|
+
# Note: When `enable_speaker_diarization` is set to true, you will see all the
|
3397
|
+
# words from the beginning of the audio.
|
2999
3398
|
# Corresponds to the JSON property `words`
|
3000
3399
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1WordInfo>]
|
3001
3400
|
attr_accessor :words
|
@@ -3016,18 +3415,17 @@ module Google
|
|
3016
3415
|
class GoogleCloudVideointelligenceV1p1beta1SpeechTranscription
|
3017
3416
|
include Google::Apis::Core::Hashable
|
3018
3417
|
|
3019
|
-
# May contain one or more recognition hypotheses (up to the maximum specified
|
3020
|
-
#
|
3021
|
-
#
|
3022
|
-
#
|
3418
|
+
# May contain one or more recognition hypotheses (up to the maximum specified in
|
3419
|
+
# `max_alternatives`). These alternatives are ordered in terms of accuracy, with
|
3420
|
+
# the top (first) alternative being the most probable, as ranked by the
|
3421
|
+
# recognizer.
|
3023
3422
|
# Corresponds to the JSON property `alternatives`
|
3024
3423
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1SpeechRecognitionAlternative>]
|
3025
3424
|
attr_accessor :alternatives
|
3026
3425
|
|
3027
3426
|
# Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
|
3028
|
-
# language tag of
|
3029
|
-
#
|
3030
|
-
# most likelihood of being spoken in the audio.
|
3427
|
+
# language tag of the language in this result. This language code was detected
|
3428
|
+
# to have the most likelihood of being spoken in the audio.
|
3031
3429
|
# Corresponds to the JSON property `languageCode`
|
3032
3430
|
# @return [String]
|
3033
3431
|
attr_accessor :language_code
|
@@ -3076,27 +3474,19 @@ module Google
|
|
3076
3474
|
end
|
3077
3475
|
end
|
3078
3476
|
|
3079
|
-
# Video frame level annotation results for text annotation (OCR).
|
3080
|
-
#
|
3081
|
-
#
|
3477
|
+
# Video frame level annotation results for text annotation (OCR). Contains
|
3478
|
+
# information regarding timestamp and bounding box locations for the frames
|
3479
|
+
# containing detected OCR text snippets.
|
3082
3480
|
class GoogleCloudVideointelligenceV1p1beta1TextFrame
|
3083
3481
|
include Google::Apis::Core::Hashable
|
3084
3482
|
|
3085
3483
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
3086
|
-
# Contains list of the corner points in clockwise order starting from
|
3087
|
-
#
|
3088
|
-
#
|
3089
|
-
#
|
3090
|
-
#
|
3091
|
-
#
|
3092
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
3093
|
-
# becomes:
|
3094
|
-
# 2----3
|
3095
|
-
# | |
|
3096
|
-
# 1----0
|
3097
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
3098
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
3099
|
-
# the box.
|
3484
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
3485
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
3486
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
3487
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
3488
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
3489
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
3100
3490
|
# Corresponds to the JSON property `rotatedBoundingBox`
|
3101
3491
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly]
|
3102
3492
|
attr_accessor :rotated_bounding_box
|
@@ -3149,9 +3539,8 @@ module Google
|
|
3149
3539
|
end
|
3150
3540
|
end
|
3151
3541
|
|
3152
|
-
# For tracking related features.
|
3153
|
-
#
|
3154
|
-
# normalized_bounding_box.
|
3542
|
+
# For tracking related features. An object at time_offset with attributes, and
|
3543
|
+
# located with normalized_bounding_box.
|
3155
3544
|
class GoogleCloudVideointelligenceV1p1beta1TimestampedObject
|
3156
3545
|
include Google::Apis::Core::Hashable
|
3157
3546
|
|
@@ -3165,15 +3554,14 @@ module Google
|
|
3165
3554
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1DetectedLandmark>]
|
3166
3555
|
attr_accessor :landmarks
|
3167
3556
|
|
3168
|
-
# Normalized bounding box.
|
3169
|
-
#
|
3170
|
-
# Range: [0, 1].
|
3557
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
3558
|
+
# original image. Range: [0, 1].
|
3171
3559
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
3172
3560
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox]
|
3173
3561
|
attr_accessor :normalized_bounding_box
|
3174
3562
|
|
3175
|
-
# Time-offset, relative to the beginning of the video,
|
3176
|
-
#
|
3563
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
3564
|
+
# video frame for this object.
|
3177
3565
|
# Corresponds to the JSON property `timeOffset`
|
3178
3566
|
# @return [String]
|
3179
3567
|
attr_accessor :time_offset
|
@@ -3232,20 +3620,19 @@ module Google
|
|
3232
3620
|
class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationProgress
|
3233
3621
|
include Google::Apis::Core::Hashable
|
3234
3622
|
|
3235
|
-
# Specifies which feature is being tracked if the request contains more than
|
3236
|
-
#
|
3623
|
+
# Specifies which feature is being tracked if the request contains more than one
|
3624
|
+
# feature.
|
3237
3625
|
# Corresponds to the JSON property `feature`
|
3238
3626
|
# @return [String]
|
3239
3627
|
attr_accessor :feature
|
3240
3628
|
|
3241
|
-
# Video file location in
|
3242
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
3629
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
3243
3630
|
# Corresponds to the JSON property `inputUri`
|
3244
3631
|
# @return [String]
|
3245
3632
|
attr_accessor :input_uri
|
3246
3633
|
|
3247
|
-
# Approximate percentage processed thus far. Guaranteed to be
|
3248
|
-
#
|
3634
|
+
# Approximate percentage processed thus far. Guaranteed to be 100 when fully
|
3635
|
+
# processed.
|
3249
3636
|
# Corresponds to the JSON property `progressPercent`
|
3250
3637
|
# @return [Fixnum]
|
3251
3638
|
attr_accessor :progress_percent
|
@@ -3284,31 +3671,40 @@ module Google
|
|
3284
3671
|
class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationResults
|
3285
3672
|
include Google::Apis::Core::Hashable
|
3286
3673
|
|
3287
|
-
# The `Status` type defines a logical error model that is suitable for
|
3288
|
-
#
|
3289
|
-
#
|
3290
|
-
#
|
3291
|
-
#
|
3292
|
-
#
|
3674
|
+
# The `Status` type defines a logical error model that is suitable for different
|
3675
|
+
# programming environments, including REST APIs and RPC APIs. It is used by [
|
3676
|
+
# gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
|
3677
|
+
# data: error code, error message, and error details. You can find out more
|
3678
|
+
# about this error model and how to work with it in the [API Design Guide](https:
|
3679
|
+
# //cloud.google.com/apis/design/errors).
|
3293
3680
|
# Corresponds to the JSON property `error`
|
3294
3681
|
# @return [Google::Apis::VideointelligenceV1::GoogleRpcStatus]
|
3295
3682
|
attr_accessor :error
|
3296
3683
|
|
3297
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
3298
|
-
#
|
3299
|
-
#
|
3684
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
3685
|
+
# explicit content has been detected in a frame, no annotations are present for
|
3686
|
+
# that frame.
|
3300
3687
|
# Corresponds to the JSON property `explicitAnnotation`
|
3301
3688
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1ExplicitContentAnnotation]
|
3302
3689
|
attr_accessor :explicit_annotation
|
3303
3690
|
|
3304
|
-
#
|
3305
|
-
#
|
3691
|
+
# Deprecated. Please use `face_detection_annotations` instead.
|
3692
|
+
# Corresponds to the JSON property `faceAnnotations`
|
3693
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1FaceAnnotation>]
|
3694
|
+
attr_accessor :face_annotations
|
3695
|
+
|
3696
|
+
# Face detection annotations.
|
3697
|
+
# Corresponds to the JSON property `faceDetectionAnnotations`
|
3698
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1FaceDetectionAnnotation>]
|
3699
|
+
attr_accessor :face_detection_annotations
|
3700
|
+
|
3701
|
+
# Label annotations on frame level. There is exactly one element for each unique
|
3702
|
+
# label.
|
3306
3703
|
# Corresponds to the JSON property `frameLabelAnnotations`
|
3307
3704
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
|
3308
3705
|
attr_accessor :frame_label_annotations
|
3309
3706
|
|
3310
|
-
# Video file location in
|
3311
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
3707
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
3312
3708
|
# Corresponds to the JSON property `inputUri`
|
3313
3709
|
# @return [String]
|
3314
3710
|
attr_accessor :input_uri
|
@@ -3323,6 +3719,11 @@ module Google
|
|
3323
3719
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingAnnotation>]
|
3324
3720
|
attr_accessor :object_annotations
|
3325
3721
|
|
3722
|
+
# Person detection annotations.
|
3723
|
+
# Corresponds to the JSON property `personDetectionAnnotations`
|
3724
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1PersonDetectionAnnotation>]
|
3725
|
+
attr_accessor :person_detection_annotations
|
3726
|
+
|
3326
3727
|
# Video segment.
|
3327
3728
|
# Corresponds to the JSON property `segment`
|
3328
3729
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1VideoSegment]
|
@@ -3335,11 +3736,11 @@ module Google
|
|
3335
3736
|
attr_accessor :segment_label_annotations
|
3336
3737
|
|
3337
3738
|
# Presence label annotations on video level or user-specified segment level.
|
3338
|
-
# There is exactly one element for each unique label. Compared to the
|
3339
|
-
#
|
3340
|
-
#
|
3341
|
-
#
|
3342
|
-
#
|
3739
|
+
# There is exactly one element for each unique label. Compared to the existing
|
3740
|
+
# topical `segment_label_annotations`, this field presents more fine-grained,
|
3741
|
+
# segment-level labels detected in video content and is made available only when
|
3742
|
+
# the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
|
3743
|
+
# request.
|
3343
3744
|
# Corresponds to the JSON property `segmentPresenceLabelAnnotations`
|
3344
3745
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
|
3345
3746
|
attr_accessor :segment_presence_label_annotations
|
@@ -3349,17 +3750,17 @@ module Google
|
|
3349
3750
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1VideoSegment>]
|
3350
3751
|
attr_accessor :shot_annotations
|
3351
3752
|
|
3352
|
-
# Topical label annotations on shot level.
|
3353
|
-
#
|
3753
|
+
# Topical label annotations on shot level. There is exactly one element for each
|
3754
|
+
# unique label.
|
3354
3755
|
# Corresponds to the JSON property `shotLabelAnnotations`
|
3355
3756
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
|
3356
3757
|
attr_accessor :shot_label_annotations
|
3357
3758
|
|
3358
3759
|
# Presence label annotations on shot level. There is exactly one element for
|
3359
|
-
# each unique label. Compared to the existing topical
|
3360
|
-
#
|
3361
|
-
#
|
3362
|
-
#
|
3760
|
+
# each unique label. Compared to the existing topical `shot_label_annotations`,
|
3761
|
+
# this field presents more fine-grained, shot-level labels detected in video
|
3762
|
+
# content and is made available only when the client sets `LabelDetectionConfig.
|
3763
|
+
# model` to "builtin/latest" in the request.
|
3363
3764
|
# Corresponds to the JSON property `shotPresenceLabelAnnotations`
|
3364
3765
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
|
3365
3766
|
attr_accessor :shot_presence_label_annotations
|
@@ -3369,9 +3770,8 @@ module Google
|
|
3369
3770
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1SpeechTranscription>]
|
3370
3771
|
attr_accessor :speech_transcriptions
|
3371
3772
|
|
3372
|
-
# OCR text detection and tracking.
|
3373
|
-
#
|
3374
|
-
# frame information associated with it.
|
3773
|
+
# OCR text detection and tracking. Annotations for list of detected text
|
3774
|
+
# snippets. Each will have list of frame information associated with it.
|
3375
3775
|
# Corresponds to the JSON property `textAnnotations`
|
3376
3776
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1TextAnnotation>]
|
3377
3777
|
attr_accessor :text_annotations
|
@@ -3384,10 +3784,13 @@ module Google
|
|
3384
3784
|
def update!(**args)
|
3385
3785
|
@error = args[:error] if args.key?(:error)
|
3386
3786
|
@explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
|
3787
|
+
@face_annotations = args[:face_annotations] if args.key?(:face_annotations)
|
3788
|
+
@face_detection_annotations = args[:face_detection_annotations] if args.key?(:face_detection_annotations)
|
3387
3789
|
@frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
|
3388
3790
|
@input_uri = args[:input_uri] if args.key?(:input_uri)
|
3389
3791
|
@logo_recognition_annotations = args[:logo_recognition_annotations] if args.key?(:logo_recognition_annotations)
|
3390
3792
|
@object_annotations = args[:object_annotations] if args.key?(:object_annotations)
|
3793
|
+
@person_detection_annotations = args[:person_detection_annotations] if args.key?(:person_detection_annotations)
|
3391
3794
|
@segment = args[:segment] if args.key?(:segment)
|
3392
3795
|
@segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations)
|
3393
3796
|
@segment_presence_label_annotations = args[:segment_presence_label_annotations] if args.key?(:segment_presence_label_annotations)
|
@@ -3403,14 +3806,14 @@ module Google
|
|
3403
3806
|
class GoogleCloudVideointelligenceV1p1beta1VideoSegment
|
3404
3807
|
include Google::Apis::Core::Hashable
|
3405
3808
|
|
3406
|
-
# Time-offset, relative to the beginning of the video,
|
3407
|
-
#
|
3809
|
+
# Time-offset, relative to the beginning of the video, corresponding to the end
|
3810
|
+
# of the segment (inclusive).
|
3408
3811
|
# Corresponds to the JSON property `endTimeOffset`
|
3409
3812
|
# @return [String]
|
3410
3813
|
attr_accessor :end_time_offset
|
3411
3814
|
|
3412
|
-
# Time-offset, relative to the beginning of the video,
|
3413
|
-
#
|
3815
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
3816
|
+
# start of the segment (inclusive).
|
3414
3817
|
# Corresponds to the JSON property `startTimeOffset`
|
3415
3818
|
# @return [String]
|
3416
3819
|
attr_accessor :start_time_offset
|
@@ -3427,41 +3830,41 @@ module Google
|
|
3427
3830
|
end
|
3428
3831
|
|
3429
3832
|
# Word-specific information for recognized words. Word information is only
|
3430
|
-
# included in the response when certain request parameters are set, such
|
3431
|
-
#
|
3833
|
+
# included in the response when certain request parameters are set, such as `
|
3834
|
+
# enable_word_time_offsets`.
|
3432
3835
|
class GoogleCloudVideointelligenceV1p1beta1WordInfo
|
3433
3836
|
include Google::Apis::Core::Hashable
|
3434
3837
|
|
3435
3838
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
3436
3839
|
# indicates an estimated greater likelihood that the recognized words are
|
3437
|
-
# correct. This field is set only for the top alternative.
|
3438
|
-
#
|
3439
|
-
#
|
3440
|
-
#
|
3840
|
+
# correct. This field is set only for the top alternative. This field is not
|
3841
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
3842
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
3843
|
+
# not set.
|
3441
3844
|
# Corresponds to the JSON property `confidence`
|
3442
3845
|
# @return [Float]
|
3443
3846
|
attr_accessor :confidence
|
3444
3847
|
|
3445
|
-
# Time offset relative to the beginning of the audio, and
|
3446
|
-
#
|
3447
|
-
#
|
3448
|
-
#
|
3848
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
3849
|
+
# end of the spoken word. This field is only set if `enable_word_time_offsets=
|
3850
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
3851
|
+
# accuracy of the time offset can vary.
|
3449
3852
|
# Corresponds to the JSON property `endTime`
|
3450
3853
|
# @return [String]
|
3451
3854
|
attr_accessor :end_time
|
3452
3855
|
|
3453
|
-
# Output only. A distinct integer value is assigned for every speaker within
|
3454
|
-
#
|
3455
|
-
#
|
3456
|
-
#
|
3856
|
+
# Output only. A distinct integer value is assigned for every speaker within the
|
3857
|
+
# audio. This field specifies which one of those speakers was detected to have
|
3858
|
+
# spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
|
3859
|
+
# only set if speaker diarization is enabled.
|
3457
3860
|
# Corresponds to the JSON property `speakerTag`
|
3458
3861
|
# @return [Fixnum]
|
3459
3862
|
attr_accessor :speaker_tag
|
3460
3863
|
|
3461
|
-
# Time offset relative to the beginning of the audio, and
|
3462
|
-
#
|
3463
|
-
#
|
3464
|
-
#
|
3864
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
3865
|
+
# start of the spoken word. This field is only set if `enable_word_time_offsets=
|
3866
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
3867
|
+
# accuracy of the time offset can vary.
|
3465
3868
|
# Corresponds to the JSON property `startTime`
|
3466
3869
|
# @return [String]
|
3467
3870
|
attr_accessor :start_time
|
@@ -3485,9 +3888,9 @@ module Google
|
|
3485
3888
|
end
|
3486
3889
|
end
|
3487
3890
|
|
3488
|
-
# Video annotation progress. Included in the `metadata`
|
3489
|
-
#
|
3490
|
-
#
|
3891
|
+
# Video annotation progress. Included in the `metadata` field of the `Operation`
|
3892
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
3893
|
+
# service.
|
3491
3894
|
class GoogleCloudVideointelligenceV1p2beta1AnnotateVideoProgress
|
3492
3895
|
include Google::Apis::Core::Hashable
|
3493
3896
|
|
@@ -3506,9 +3909,9 @@ module Google
|
|
3506
3909
|
end
|
3507
3910
|
end
|
3508
3911
|
|
3509
|
-
# Video annotation response. Included in the `response`
|
3510
|
-
#
|
3511
|
-
#
|
3912
|
+
# Video annotation response. Included in the `response` field of the `Operation`
|
3913
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
3914
|
+
# service.
|
3512
3915
|
class GoogleCloudVideointelligenceV1p2beta1AnnotateVideoResponse
|
3513
3916
|
include Google::Apis::Core::Hashable
|
3514
3917
|
|
@@ -3536,14 +3939,14 @@ module Google
|
|
3536
3939
|
# @return [Float]
|
3537
3940
|
attr_accessor :confidence
|
3538
3941
|
|
3539
|
-
# The name of the attribute, for example, glasses, dark_glasses, mouth_open.
|
3540
|
-
#
|
3942
|
+
# The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
|
3943
|
+
# full list of supported type names will be provided in the document.
|
3541
3944
|
# Corresponds to the JSON property `name`
|
3542
3945
|
# @return [String]
|
3543
3946
|
attr_accessor :name
|
3544
3947
|
|
3545
|
-
# Text value of the detection result. For example, the value for "HairColor"
|
3546
|
-
#
|
3948
|
+
# Text value of the detection result. For example, the value for "HairColor" can
|
3949
|
+
# be "black", "blonde", etc.
|
3547
3950
|
# Corresponds to the JSON property `value`
|
3548
3951
|
# @return [String]
|
3549
3952
|
attr_accessor :value
|
@@ -3575,9 +3978,8 @@ module Google
|
|
3575
3978
|
# @return [String]
|
3576
3979
|
attr_accessor :name
|
3577
3980
|
|
3578
|
-
# A vertex represents a 2D point in the image.
|
3579
|
-
#
|
3580
|
-
# and range from 0 to 1.
|
3981
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
3982
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
3581
3983
|
# Corresponds to the JSON property `point`
|
3582
3984
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1NormalizedVertex]
|
3583
3985
|
attr_accessor :point
|
@@ -3603,8 +4005,7 @@ module Google
|
|
3603
4005
|
# @return [String]
|
3604
4006
|
attr_accessor :description
|
3605
4007
|
|
3606
|
-
# Opaque entity ID. Some IDs may be available in
|
3607
|
-
# [Google Knowledge Graph Search
|
4008
|
+
# Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
|
3608
4009
|
# API](https://developers.google.com/knowledge-graph/).
|
3609
4010
|
# Corresponds to the JSON property `entityId`
|
3610
4011
|
# @return [String]
|
@@ -3627,9 +4028,9 @@ module Google
|
|
3627
4028
|
end
|
3628
4029
|
end
|
3629
4030
|
|
3630
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
3631
|
-
#
|
3632
|
-
#
|
4031
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
4032
|
+
# explicit content has been detected in a frame, no annotations are present for
|
4033
|
+
# that frame.
|
3633
4034
|
class GoogleCloudVideointelligenceV1p2beta1ExplicitContentAnnotation
|
3634
4035
|
include Google::Apis::Core::Hashable
|
3635
4036
|
|
@@ -3680,14 +4081,110 @@ module Google
|
|
3680
4081
|
end
|
3681
4082
|
end
|
3682
4083
|
|
4084
|
+
# Deprecated. No effect.
|
4085
|
+
class GoogleCloudVideointelligenceV1p2beta1FaceAnnotation
|
4086
|
+
include Google::Apis::Core::Hashable
|
4087
|
+
|
4088
|
+
# All video frames where a face was detected.
|
4089
|
+
# Corresponds to the JSON property `frames`
|
4090
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1FaceFrame>]
|
4091
|
+
attr_accessor :frames
|
4092
|
+
|
4093
|
+
# All video segments where a face was detected.
|
4094
|
+
# Corresponds to the JSON property `segments`
|
4095
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1FaceSegment>]
|
4096
|
+
attr_accessor :segments
|
4097
|
+
|
4098
|
+
# Thumbnail of a representative face view (in JPEG format).
|
4099
|
+
# Corresponds to the JSON property `thumbnail`
|
4100
|
+
# NOTE: Values are automatically base64 encoded/decoded in the client library.
|
4101
|
+
# @return [String]
|
4102
|
+
attr_accessor :thumbnail
|
4103
|
+
|
4104
|
+
def initialize(**args)
|
4105
|
+
update!(**args)
|
4106
|
+
end
|
4107
|
+
|
4108
|
+
# Update properties of this object
|
4109
|
+
def update!(**args)
|
4110
|
+
@frames = args[:frames] if args.key?(:frames)
|
4111
|
+
@segments = args[:segments] if args.key?(:segments)
|
4112
|
+
@thumbnail = args[:thumbnail] if args.key?(:thumbnail)
|
4113
|
+
end
|
4114
|
+
end
|
4115
|
+
|
4116
|
+
# Face detection annotation.
|
4117
|
+
class GoogleCloudVideointelligenceV1p2beta1FaceDetectionAnnotation
|
4118
|
+
include Google::Apis::Core::Hashable
|
4119
|
+
|
4120
|
+
# Feature version.
|
4121
|
+
# Corresponds to the JSON property `version`
|
4122
|
+
# @return [String]
|
4123
|
+
attr_accessor :version
|
4124
|
+
|
4125
|
+
def initialize(**args)
|
4126
|
+
update!(**args)
|
4127
|
+
end
|
4128
|
+
|
4129
|
+
# Update properties of this object
|
4130
|
+
def update!(**args)
|
4131
|
+
@version = args[:version] if args.key?(:version)
|
4132
|
+
end
|
4133
|
+
end
|
4134
|
+
|
4135
|
+
# Deprecated. No effect.
|
4136
|
+
class GoogleCloudVideointelligenceV1p2beta1FaceFrame
|
4137
|
+
include Google::Apis::Core::Hashable
|
4138
|
+
|
4139
|
+
# Normalized Bounding boxes in a frame. There can be more than one boxes if the
|
4140
|
+
# same face is detected in multiple locations within the current frame.
|
4141
|
+
# Corresponds to the JSON property `normalizedBoundingBoxes`
|
4142
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox>]
|
4143
|
+
attr_accessor :normalized_bounding_boxes
|
4144
|
+
|
4145
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
4146
|
+
# video frame for this location.
|
4147
|
+
# Corresponds to the JSON property `timeOffset`
|
4148
|
+
# @return [String]
|
4149
|
+
attr_accessor :time_offset
|
4150
|
+
|
4151
|
+
def initialize(**args)
|
4152
|
+
update!(**args)
|
4153
|
+
end
|
4154
|
+
|
4155
|
+
# Update properties of this object
|
4156
|
+
def update!(**args)
|
4157
|
+
@normalized_bounding_boxes = args[:normalized_bounding_boxes] if args.key?(:normalized_bounding_boxes)
|
4158
|
+
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
4159
|
+
end
|
4160
|
+
end
|
4161
|
+
|
4162
|
+
# Video segment level annotation results for face detection.
|
4163
|
+
class GoogleCloudVideointelligenceV1p2beta1FaceSegment
|
4164
|
+
include Google::Apis::Core::Hashable
|
4165
|
+
|
4166
|
+
# Video segment.
|
4167
|
+
# Corresponds to the JSON property `segment`
|
4168
|
+
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1VideoSegment]
|
4169
|
+
attr_accessor :segment
|
4170
|
+
|
4171
|
+
def initialize(**args)
|
4172
|
+
update!(**args)
|
4173
|
+
end
|
4174
|
+
|
4175
|
+
# Update properties of this object
|
4176
|
+
def update!(**args)
|
4177
|
+
@segment = args[:segment] if args.key?(:segment)
|
4178
|
+
end
|
4179
|
+
end
|
4180
|
+
|
3683
4181
|
# Label annotation.
|
3684
4182
|
class GoogleCloudVideointelligenceV1p2beta1LabelAnnotation
|
3685
4183
|
include Google::Apis::Core::Hashable
|
3686
4184
|
|
3687
|
-
# Common categories for the detected entity.
|
3688
|
-
#
|
3689
|
-
#
|
3690
|
-
# also be a `pet`.
|
4185
|
+
# Common categories for the detected entity. For example, when the label is `
|
4186
|
+
# Terrier`, the category is likely `dog`. And in some cases there might be more
|
4187
|
+
# than one categories e.g., `Terrier` could also be a `pet`.
|
3691
4188
|
# Corresponds to the JSON property `categoryEntities`
|
3692
4189
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1Entity>]
|
3693
4190
|
attr_accessor :category_entities
|
@@ -3786,14 +4283,14 @@ module Google
|
|
3786
4283
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1Entity]
|
3787
4284
|
attr_accessor :entity
|
3788
4285
|
|
3789
|
-
# All video segments where the recognized logo appears. There might be
|
3790
|
-
#
|
4286
|
+
# All video segments where the recognized logo appears. There might be multiple
|
4287
|
+
# instances of the same logo class appearing in one VideoSegment.
|
3791
4288
|
# Corresponds to the JSON property `segments`
|
3792
4289
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1VideoSegment>]
|
3793
4290
|
attr_accessor :segments
|
3794
4291
|
|
3795
|
-
# All logo tracks where the recognized logo appears. Each track corresponds
|
3796
|
-
#
|
4292
|
+
# All logo tracks where the recognized logo appears. Each track corresponds to
|
4293
|
+
# one logo instance appearing in consecutive frames.
|
3797
4294
|
# Corresponds to the JSON property `tracks`
|
3798
4295
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1Track>]
|
3799
4296
|
attr_accessor :tracks
|
@@ -3810,9 +4307,8 @@ module Google
|
|
3810
4307
|
end
|
3811
4308
|
end
|
3812
4309
|
|
3813
|
-
# Normalized bounding box.
|
3814
|
-
#
|
3815
|
-
# Range: [0, 1].
|
4310
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
4311
|
+
# original image. Range: [0, 1].
|
3816
4312
|
class GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox
|
3817
4313
|
include Google::Apis::Core::Hashable
|
3818
4314
|
|
@@ -3850,20 +4346,12 @@ module Google
|
|
3850
4346
|
end
|
3851
4347
|
|
3852
4348
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
3853
|
-
# Contains list of the corner points in clockwise order starting from
|
3854
|
-
#
|
3855
|
-
#
|
3856
|
-
#
|
3857
|
-
#
|
3858
|
-
#
|
3859
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
3860
|
-
# becomes:
|
3861
|
-
# 2----3
|
3862
|
-
# | |
|
3863
|
-
# 1----0
|
3864
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
3865
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
3866
|
-
# the box.
|
4349
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
4350
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
4351
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
4352
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
4353
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
4354
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
3867
4355
|
class GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingPoly
|
3868
4356
|
include Google::Apis::Core::Hashable
|
3869
4357
|
|
@@ -3882,9 +4370,8 @@ module Google
|
|
3882
4370
|
end
|
3883
4371
|
end
|
3884
4372
|
|
3885
|
-
# A vertex represents a 2D point in the image.
|
3886
|
-
#
|
3887
|
-
# and range from 0 to 1.
|
4373
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
4374
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
3888
4375
|
class GoogleCloudVideointelligenceV1p2beta1NormalizedVertex
|
3889
4376
|
include Google::Apis::Core::Hashable
|
3890
4377
|
|
@@ -3923,10 +4410,10 @@ module Google
|
|
3923
4410
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1Entity]
|
3924
4411
|
attr_accessor :entity
|
3925
4412
|
|
3926
|
-
# Information corresponding to all frames where this object track appears.
|
3927
|
-
#
|
3928
|
-
#
|
3929
|
-
#
|
4413
|
+
# Information corresponding to all frames where this object track appears. Non-
|
4414
|
+
# streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
|
4415
|
+
# in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
|
4416
|
+
# frames.
|
3930
4417
|
# Corresponds to the JSON property `frames`
|
3931
4418
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1ObjectTrackingFrame>]
|
3932
4419
|
attr_accessor :frames
|
@@ -3936,12 +4423,11 @@ module Google
|
|
3936
4423
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1VideoSegment]
|
3937
4424
|
attr_accessor :segment
|
3938
4425
|
|
3939
|
-
# Streaming mode ONLY.
|
3940
|
-
#
|
3941
|
-
#
|
3942
|
-
#
|
3943
|
-
#
|
3944
|
-
# ObjectTrackAnnotation of the same track_id over time.
|
4426
|
+
# Streaming mode ONLY. In streaming mode, we do not know the end time of a
|
4427
|
+
# tracked object before it is completed. Hence, there is no VideoSegment info
|
4428
|
+
# returned. Instead, we provide a unique identifiable integer track_id so that
|
4429
|
+
# the customers can correlate the results of the ongoing ObjectTrackAnnotation
|
4430
|
+
# of the same track_id over time.
|
3945
4431
|
# Corresponds to the JSON property `trackId`
|
3946
4432
|
# @return [Fixnum]
|
3947
4433
|
attr_accessor :track_id
|
@@ -3971,9 +4457,8 @@ module Google
|
|
3971
4457
|
class GoogleCloudVideointelligenceV1p2beta1ObjectTrackingFrame
|
3972
4458
|
include Google::Apis::Core::Hashable
|
3973
4459
|
|
3974
|
-
# Normalized bounding box.
|
3975
|
-
#
|
3976
|
-
# Range: [0, 1].
|
4460
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
4461
|
+
# original image. Range: [0, 1].
|
3977
4462
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
3978
4463
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox]
|
3979
4464
|
attr_accessor :normalized_bounding_box
|
@@ -3994,16 +4479,41 @@ module Google
|
|
3994
4479
|
end
|
3995
4480
|
end
|
3996
4481
|
|
4482
|
+
# Person detection annotation per video.
|
4483
|
+
class GoogleCloudVideointelligenceV1p2beta1PersonDetectionAnnotation
|
4484
|
+
include Google::Apis::Core::Hashable
|
4485
|
+
|
4486
|
+
# The detected tracks of a person.
|
4487
|
+
# Corresponds to the JSON property `tracks`
|
4488
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1Track>]
|
4489
|
+
attr_accessor :tracks
|
4490
|
+
|
4491
|
+
# Feature version.
|
4492
|
+
# Corresponds to the JSON property `version`
|
4493
|
+
# @return [String]
|
4494
|
+
attr_accessor :version
|
4495
|
+
|
4496
|
+
def initialize(**args)
|
4497
|
+
update!(**args)
|
4498
|
+
end
|
4499
|
+
|
4500
|
+
# Update properties of this object
|
4501
|
+
def update!(**args)
|
4502
|
+
@tracks = args[:tracks] if args.key?(:tracks)
|
4503
|
+
@version = args[:version] if args.key?(:version)
|
4504
|
+
end
|
4505
|
+
end
|
4506
|
+
|
3997
4507
|
# Alternative hypotheses (a.k.a. n-best list).
|
3998
4508
|
class GoogleCloudVideointelligenceV1p2beta1SpeechRecognitionAlternative
|
3999
4509
|
include Google::Apis::Core::Hashable
|
4000
4510
|
|
4001
4511
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
4002
4512
|
# indicates an estimated greater likelihood that the recognized words are
|
4003
|
-
# correct. This field is set only for the top alternative.
|
4004
|
-
#
|
4005
|
-
#
|
4006
|
-
#
|
4513
|
+
# correct. This field is set only for the top alternative. This field is not
|
4514
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
4515
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
4516
|
+
# not set.
|
4007
4517
|
# Corresponds to the JSON property `confidence`
|
4008
4518
|
# @return [Float]
|
4009
4519
|
attr_accessor :confidence
|
@@ -4014,8 +4524,8 @@ module Google
|
|
4014
4524
|
attr_accessor :transcript
|
4015
4525
|
|
4016
4526
|
# Output only. A list of word-specific information for each recognized word.
|
4017
|
-
# Note: When `enable_speaker_diarization` is set to true, you will see all
|
4018
|
-
#
|
4527
|
+
# Note: When `enable_speaker_diarization` is set to true, you will see all the
|
4528
|
+
# words from the beginning of the audio.
|
4019
4529
|
# Corresponds to the JSON property `words`
|
4020
4530
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1WordInfo>]
|
4021
4531
|
attr_accessor :words
|
@@ -4036,18 +4546,17 @@ module Google
|
|
4036
4546
|
class GoogleCloudVideointelligenceV1p2beta1SpeechTranscription
|
4037
4547
|
include Google::Apis::Core::Hashable
|
4038
4548
|
|
4039
|
-
# May contain one or more recognition hypotheses (up to the maximum specified
|
4040
|
-
#
|
4041
|
-
#
|
4042
|
-
#
|
4549
|
+
# May contain one or more recognition hypotheses (up to the maximum specified in
|
4550
|
+
# `max_alternatives`). These alternatives are ordered in terms of accuracy, with
|
4551
|
+
# the top (first) alternative being the most probable, as ranked by the
|
4552
|
+
# recognizer.
|
4043
4553
|
# Corresponds to the JSON property `alternatives`
|
4044
4554
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1SpeechRecognitionAlternative>]
|
4045
4555
|
attr_accessor :alternatives
|
4046
4556
|
|
4047
4557
|
# Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
|
4048
|
-
# language tag of
|
4049
|
-
#
|
4050
|
-
# most likelihood of being spoken in the audio.
|
4558
|
+
# language tag of the language in this result. This language code was detected
|
4559
|
+
# to have the most likelihood of being spoken in the audio.
|
4051
4560
|
# Corresponds to the JSON property `languageCode`
|
4052
4561
|
# @return [String]
|
4053
4562
|
attr_accessor :language_code
|
@@ -4096,27 +4605,19 @@ module Google
|
|
4096
4605
|
end
|
4097
4606
|
end
|
4098
4607
|
|
4099
|
-
# Video frame level annotation results for text annotation (OCR).
|
4100
|
-
#
|
4101
|
-
#
|
4608
|
+
# Video frame level annotation results for text annotation (OCR). Contains
|
4609
|
+
# information regarding timestamp and bounding box locations for the frames
|
4610
|
+
# containing detected OCR text snippets.
|
4102
4611
|
class GoogleCloudVideointelligenceV1p2beta1TextFrame
|
4103
4612
|
include Google::Apis::Core::Hashable
|
4104
4613
|
|
4105
4614
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
4106
|
-
# Contains list of the corner points in clockwise order starting from
|
4107
|
-
#
|
4108
|
-
#
|
4109
|
-
#
|
4110
|
-
#
|
4111
|
-
#
|
4112
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
4113
|
-
# becomes:
|
4114
|
-
# 2----3
|
4115
|
-
# | |
|
4116
|
-
# 1----0
|
4117
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
4118
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
4119
|
-
# the box.
|
4615
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
4616
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
4617
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
4618
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
4619
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
4620
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
4120
4621
|
# Corresponds to the JSON property `rotatedBoundingBox`
|
4121
4622
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingPoly]
|
4122
4623
|
attr_accessor :rotated_bounding_box
|
@@ -4169,9 +4670,8 @@ module Google
|
|
4169
4670
|
end
|
4170
4671
|
end
|
4171
4672
|
|
4172
|
-
# For tracking related features.
|
4173
|
-
#
|
4174
|
-
# normalized_bounding_box.
|
4673
|
+
# For tracking related features. An object at time_offset with attributes, and
|
4674
|
+
# located with normalized_bounding_box.
|
4175
4675
|
class GoogleCloudVideointelligenceV1p2beta1TimestampedObject
|
4176
4676
|
include Google::Apis::Core::Hashable
|
4177
4677
|
|
@@ -4185,15 +4685,14 @@ module Google
|
|
4185
4685
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1DetectedLandmark>]
|
4186
4686
|
attr_accessor :landmarks
|
4187
4687
|
|
4188
|
-
# Normalized bounding box.
|
4189
|
-
#
|
4190
|
-
# Range: [0, 1].
|
4688
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
4689
|
+
# original image. Range: [0, 1].
|
4191
4690
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
4192
4691
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox]
|
4193
4692
|
attr_accessor :normalized_bounding_box
|
4194
4693
|
|
4195
|
-
# Time-offset, relative to the beginning of the video,
|
4196
|
-
#
|
4694
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
4695
|
+
# video frame for this object.
|
4197
4696
|
# Corresponds to the JSON property `timeOffset`
|
4198
4697
|
# @return [String]
|
4199
4698
|
attr_accessor :time_offset
|
@@ -4252,20 +4751,19 @@ module Google
|
|
4252
4751
|
class GoogleCloudVideointelligenceV1p2beta1VideoAnnotationProgress
|
4253
4752
|
include Google::Apis::Core::Hashable
|
4254
4753
|
|
4255
|
-
# Specifies which feature is being tracked if the request contains more than
|
4256
|
-
#
|
4754
|
+
# Specifies which feature is being tracked if the request contains more than one
|
4755
|
+
# feature.
|
4257
4756
|
# Corresponds to the JSON property `feature`
|
4258
4757
|
# @return [String]
|
4259
4758
|
attr_accessor :feature
|
4260
4759
|
|
4261
|
-
# Video file location in
|
4262
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
4760
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
4263
4761
|
# Corresponds to the JSON property `inputUri`
|
4264
4762
|
# @return [String]
|
4265
4763
|
attr_accessor :input_uri
|
4266
4764
|
|
4267
|
-
# Approximate percentage processed thus far. Guaranteed to be
|
4268
|
-
#
|
4765
|
+
# Approximate percentage processed thus far. Guaranteed to be 100 when fully
|
4766
|
+
# processed.
|
4269
4767
|
# Corresponds to the JSON property `progressPercent`
|
4270
4768
|
# @return [Fixnum]
|
4271
4769
|
attr_accessor :progress_percent
|
@@ -4304,31 +4802,40 @@ module Google
|
|
4304
4802
|
class GoogleCloudVideointelligenceV1p2beta1VideoAnnotationResults
|
4305
4803
|
include Google::Apis::Core::Hashable
|
4306
4804
|
|
4307
|
-
# The `Status` type defines a logical error model that is suitable for
|
4308
|
-
#
|
4309
|
-
#
|
4310
|
-
#
|
4311
|
-
#
|
4312
|
-
#
|
4805
|
+
# The `Status` type defines a logical error model that is suitable for different
|
4806
|
+
# programming environments, including REST APIs and RPC APIs. It is used by [
|
4807
|
+
# gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
|
4808
|
+
# data: error code, error message, and error details. You can find out more
|
4809
|
+
# about this error model and how to work with it in the [API Design Guide](https:
|
4810
|
+
# //cloud.google.com/apis/design/errors).
|
4313
4811
|
# Corresponds to the JSON property `error`
|
4314
4812
|
# @return [Google::Apis::VideointelligenceV1::GoogleRpcStatus]
|
4315
4813
|
attr_accessor :error
|
4316
4814
|
|
4317
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
4318
|
-
#
|
4319
|
-
#
|
4815
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
4816
|
+
# explicit content has been detected in a frame, no annotations are present for
|
4817
|
+
# that frame.
|
4320
4818
|
# Corresponds to the JSON property `explicitAnnotation`
|
4321
4819
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1ExplicitContentAnnotation]
|
4322
4820
|
attr_accessor :explicit_annotation
|
4323
4821
|
|
4324
|
-
#
|
4325
|
-
#
|
4822
|
+
# Deprecated. Please use `face_detection_annotations` instead.
|
4823
|
+
# Corresponds to the JSON property `faceAnnotations`
|
4824
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1FaceAnnotation>]
|
4825
|
+
attr_accessor :face_annotations
|
4826
|
+
|
4827
|
+
# Face detection annotations.
|
4828
|
+
# Corresponds to the JSON property `faceDetectionAnnotations`
|
4829
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1FaceDetectionAnnotation>]
|
4830
|
+
attr_accessor :face_detection_annotations
|
4831
|
+
|
4832
|
+
# Label annotations on frame level. There is exactly one element for each unique
|
4833
|
+
# label.
|
4326
4834
|
# Corresponds to the JSON property `frameLabelAnnotations`
|
4327
4835
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
|
4328
4836
|
attr_accessor :frame_label_annotations
|
4329
4837
|
|
4330
|
-
# Video file location in
|
4331
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
4838
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
4332
4839
|
# Corresponds to the JSON property `inputUri`
|
4333
4840
|
# @return [String]
|
4334
4841
|
attr_accessor :input_uri
|
@@ -4343,6 +4850,11 @@ module Google
|
|
4343
4850
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1ObjectTrackingAnnotation>]
|
4344
4851
|
attr_accessor :object_annotations
|
4345
4852
|
|
4853
|
+
# Person detection annotations.
|
4854
|
+
# Corresponds to the JSON property `personDetectionAnnotations`
|
4855
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1PersonDetectionAnnotation>]
|
4856
|
+
attr_accessor :person_detection_annotations
|
4857
|
+
|
4346
4858
|
# Video segment.
|
4347
4859
|
# Corresponds to the JSON property `segment`
|
4348
4860
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1VideoSegment]
|
@@ -4355,11 +4867,11 @@ module Google
|
|
4355
4867
|
attr_accessor :segment_label_annotations
|
4356
4868
|
|
4357
4869
|
# Presence label annotations on video level or user-specified segment level.
|
4358
|
-
# There is exactly one element for each unique label. Compared to the
|
4359
|
-
#
|
4360
|
-
#
|
4361
|
-
#
|
4362
|
-
#
|
4870
|
+
# There is exactly one element for each unique label. Compared to the existing
|
4871
|
+
# topical `segment_label_annotations`, this field presents more fine-grained,
|
4872
|
+
# segment-level labels detected in video content and is made available only when
|
4873
|
+
# the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
|
4874
|
+
# request.
|
4363
4875
|
# Corresponds to the JSON property `segmentPresenceLabelAnnotations`
|
4364
4876
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
|
4365
4877
|
attr_accessor :segment_presence_label_annotations
|
@@ -4369,17 +4881,17 @@ module Google
|
|
4369
4881
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1VideoSegment>]
|
4370
4882
|
attr_accessor :shot_annotations
|
4371
4883
|
|
4372
|
-
# Topical label annotations on shot level.
|
4373
|
-
#
|
4884
|
+
# Topical label annotations on shot level. There is exactly one element for each
|
4885
|
+
# unique label.
|
4374
4886
|
# Corresponds to the JSON property `shotLabelAnnotations`
|
4375
4887
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
|
4376
4888
|
attr_accessor :shot_label_annotations
|
4377
4889
|
|
4378
4890
|
# Presence label annotations on shot level. There is exactly one element for
|
4379
|
-
# each unique label. Compared to the existing topical
|
4380
|
-
#
|
4381
|
-
#
|
4382
|
-
#
|
4891
|
+
# each unique label. Compared to the existing topical `shot_label_annotations`,
|
4892
|
+
# this field presents more fine-grained, shot-level labels detected in video
|
4893
|
+
# content and is made available only when the client sets `LabelDetectionConfig.
|
4894
|
+
# model` to "builtin/latest" in the request.
|
4383
4895
|
# Corresponds to the JSON property `shotPresenceLabelAnnotations`
|
4384
4896
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
|
4385
4897
|
attr_accessor :shot_presence_label_annotations
|
@@ -4389,9 +4901,8 @@ module Google
|
|
4389
4901
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1SpeechTranscription>]
|
4390
4902
|
attr_accessor :speech_transcriptions
|
4391
4903
|
|
4392
|
-
# OCR text detection and tracking.
|
4393
|
-
#
|
4394
|
-
# frame information associated with it.
|
4904
|
+
# OCR text detection and tracking. Annotations for list of detected text
|
4905
|
+
# snippets. Each will have list of frame information associated with it.
|
4395
4906
|
# Corresponds to the JSON property `textAnnotations`
|
4396
4907
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p2beta1TextAnnotation>]
|
4397
4908
|
attr_accessor :text_annotations
|
@@ -4404,10 +4915,13 @@ module Google
|
|
4404
4915
|
def update!(**args)
|
4405
4916
|
@error = args[:error] if args.key?(:error)
|
4406
4917
|
@explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
|
4918
|
+
@face_annotations = args[:face_annotations] if args.key?(:face_annotations)
|
4919
|
+
@face_detection_annotations = args[:face_detection_annotations] if args.key?(:face_detection_annotations)
|
4407
4920
|
@frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
|
4408
4921
|
@input_uri = args[:input_uri] if args.key?(:input_uri)
|
4409
4922
|
@logo_recognition_annotations = args[:logo_recognition_annotations] if args.key?(:logo_recognition_annotations)
|
4410
4923
|
@object_annotations = args[:object_annotations] if args.key?(:object_annotations)
|
4924
|
+
@person_detection_annotations = args[:person_detection_annotations] if args.key?(:person_detection_annotations)
|
4411
4925
|
@segment = args[:segment] if args.key?(:segment)
|
4412
4926
|
@segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations)
|
4413
4927
|
@segment_presence_label_annotations = args[:segment_presence_label_annotations] if args.key?(:segment_presence_label_annotations)
|
@@ -4423,14 +4937,14 @@ module Google
|
|
4423
4937
|
class GoogleCloudVideointelligenceV1p2beta1VideoSegment
|
4424
4938
|
include Google::Apis::Core::Hashable
|
4425
4939
|
|
4426
|
-
# Time-offset, relative to the beginning of the video,
|
4427
|
-
#
|
4940
|
+
# Time-offset, relative to the beginning of the video, corresponding to the end
|
4941
|
+
# of the segment (inclusive).
|
4428
4942
|
# Corresponds to the JSON property `endTimeOffset`
|
4429
4943
|
# @return [String]
|
4430
4944
|
attr_accessor :end_time_offset
|
4431
4945
|
|
4432
|
-
# Time-offset, relative to the beginning of the video,
|
4433
|
-
#
|
4946
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
4947
|
+
# start of the segment (inclusive).
|
4434
4948
|
# Corresponds to the JSON property `startTimeOffset`
|
4435
4949
|
# @return [String]
|
4436
4950
|
attr_accessor :start_time_offset
|
@@ -4447,41 +4961,41 @@ module Google
|
|
4447
4961
|
end
|
4448
4962
|
|
4449
4963
|
# Word-specific information for recognized words. Word information is only
|
4450
|
-
# included in the response when certain request parameters are set, such
|
4451
|
-
#
|
4964
|
+
# included in the response when certain request parameters are set, such as `
|
4965
|
+
# enable_word_time_offsets`.
|
4452
4966
|
class GoogleCloudVideointelligenceV1p2beta1WordInfo
|
4453
4967
|
include Google::Apis::Core::Hashable
|
4454
4968
|
|
4455
4969
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
4456
4970
|
# indicates an estimated greater likelihood that the recognized words are
|
4457
|
-
# correct. This field is set only for the top alternative.
|
4458
|
-
#
|
4459
|
-
#
|
4460
|
-
#
|
4971
|
+
# correct. This field is set only for the top alternative. This field is not
|
4972
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
4973
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
4974
|
+
# not set.
|
4461
4975
|
# Corresponds to the JSON property `confidence`
|
4462
4976
|
# @return [Float]
|
4463
4977
|
attr_accessor :confidence
|
4464
4978
|
|
4465
|
-
# Time offset relative to the beginning of the audio, and
|
4466
|
-
#
|
4467
|
-
#
|
4468
|
-
#
|
4979
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
4980
|
+
# end of the spoken word. This field is only set if `enable_word_time_offsets=
|
4981
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
4982
|
+
# accuracy of the time offset can vary.
|
4469
4983
|
# Corresponds to the JSON property `endTime`
|
4470
4984
|
# @return [String]
|
4471
4985
|
attr_accessor :end_time
|
4472
4986
|
|
4473
|
-
# Output only. A distinct integer value is assigned for every speaker within
|
4474
|
-
#
|
4475
|
-
#
|
4476
|
-
#
|
4987
|
+
# Output only. A distinct integer value is assigned for every speaker within the
|
4988
|
+
# audio. This field specifies which one of those speakers was detected to have
|
4989
|
+
# spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
|
4990
|
+
# only set if speaker diarization is enabled.
|
4477
4991
|
# Corresponds to the JSON property `speakerTag`
|
4478
4992
|
# @return [Fixnum]
|
4479
4993
|
attr_accessor :speaker_tag
|
4480
4994
|
|
4481
|
-
# Time offset relative to the beginning of the audio, and
|
4482
|
-
#
|
4483
|
-
#
|
4484
|
-
#
|
4995
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
4996
|
+
# start of the spoken word. This field is only set if `enable_word_time_offsets=
|
4997
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
4998
|
+
# accuracy of the time offset can vary.
|
4485
4999
|
# Corresponds to the JSON property `startTime`
|
4486
5000
|
# @return [String]
|
4487
5001
|
attr_accessor :start_time
|
@@ -4505,9 +5019,9 @@ module Google
|
|
4505
5019
|
end
|
4506
5020
|
end
|
4507
5021
|
|
4508
|
-
# Video annotation progress. Included in the `metadata`
|
4509
|
-
#
|
4510
|
-
#
|
5022
|
+
# Video annotation progress. Included in the `metadata` field of the `Operation`
|
5023
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
5024
|
+
# service.
|
4511
5025
|
class GoogleCloudVideointelligenceV1p3beta1AnnotateVideoProgress
|
4512
5026
|
include Google::Apis::Core::Hashable
|
4513
5027
|
|
@@ -4526,9 +5040,9 @@ module Google
|
|
4526
5040
|
end
|
4527
5041
|
end
|
4528
5042
|
|
4529
|
-
# Video annotation response. Included in the `response`
|
4530
|
-
#
|
4531
|
-
#
|
5043
|
+
# Video annotation response. Included in the `response` field of the `Operation`
|
5044
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
5045
|
+
# service.
|
4532
5046
|
class GoogleCloudVideointelligenceV1p3beta1AnnotateVideoResponse
|
4533
5047
|
include Google::Apis::Core::Hashable
|
4534
5048
|
|
@@ -4562,10 +5076,9 @@ module Google
|
|
4562
5076
|
# @return [String]
|
4563
5077
|
attr_accessor :display_name
|
4564
5078
|
|
4565
|
-
# The resource name of the celebrity. Have the format
|
4566
|
-
#
|
4567
|
-
#
|
4568
|
-
# celebrity.
|
5079
|
+
# The resource name of the celebrity. Have the format `video-intelligence/kg-mid`
|
5080
|
+
# indicates a celebrity from preloaded gallery. kg-mid is the id in Google
|
5081
|
+
# knowledge graph, which is unique for the celebrity.
|
4569
5082
|
# Corresponds to the JSON property `name`
|
4570
5083
|
# @return [String]
|
4571
5084
|
attr_accessor :name
|
@@ -4586,8 +5099,8 @@ module Google
|
|
4586
5099
|
class GoogleCloudVideointelligenceV1p3beta1CelebrityRecognitionAnnotation
|
4587
5100
|
include Google::Apis::Core::Hashable
|
4588
5101
|
|
4589
|
-
# The tracks detected from the input video, including recognized celebrities
|
4590
|
-
#
|
5102
|
+
# The tracks detected from the input video, including recognized celebrities and
|
5103
|
+
# other detected faces in the video.
|
4591
5104
|
# Corresponds to the JSON property `celebrityTracks`
|
4592
5105
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1CelebrityTrack>]
|
4593
5106
|
attr_accessor :celebrity_tracks
|
@@ -4643,14 +5156,14 @@ module Google
|
|
4643
5156
|
# @return [Float]
|
4644
5157
|
attr_accessor :confidence
|
4645
5158
|
|
4646
|
-
# The name of the attribute, for example, glasses, dark_glasses, mouth_open.
|
4647
|
-
#
|
5159
|
+
# The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
|
5160
|
+
# full list of supported type names will be provided in the document.
|
4648
5161
|
# Corresponds to the JSON property `name`
|
4649
5162
|
# @return [String]
|
4650
5163
|
attr_accessor :name
|
4651
5164
|
|
4652
|
-
# Text value of the detection result. For example, the value for "HairColor"
|
4653
|
-
#
|
5165
|
+
# Text value of the detection result. For example, the value for "HairColor" can
|
5166
|
+
# be "black", "blonde", etc.
|
4654
5167
|
# Corresponds to the JSON property `value`
|
4655
5168
|
# @return [String]
|
4656
5169
|
attr_accessor :value
|
@@ -4682,9 +5195,8 @@ module Google
|
|
4682
5195
|
# @return [String]
|
4683
5196
|
attr_accessor :name
|
4684
5197
|
|
4685
|
-
# A vertex represents a 2D point in the image.
|
4686
|
-
#
|
4687
|
-
# and range from 0 to 1.
|
5198
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
5199
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
4688
5200
|
# Corresponds to the JSON property `point`
|
4689
5201
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1NormalizedVertex]
|
4690
5202
|
attr_accessor :point
|
@@ -4710,8 +5222,7 @@ module Google
|
|
4710
5222
|
# @return [String]
|
4711
5223
|
attr_accessor :description
|
4712
5224
|
|
4713
|
-
# Opaque entity ID. Some IDs may be available in
|
4714
|
-
# [Google Knowledge Graph Search
|
5225
|
+
# Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
|
4715
5226
|
# API](https://developers.google.com/knowledge-graph/).
|
4716
5227
|
# Corresponds to the JSON property `entityId`
|
4717
5228
|
# @return [String]
|
@@ -4734,9 +5245,9 @@ module Google
|
|
4734
5245
|
end
|
4735
5246
|
end
|
4736
5247
|
|
4737
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
4738
|
-
#
|
4739
|
-
#
|
5248
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
5249
|
+
# explicit content has been detected in a frame, no annotations are present for
|
5250
|
+
# that frame.
|
4740
5251
|
class GoogleCloudVideointelligenceV1p3beta1ExplicitContentAnnotation
|
4741
5252
|
include Google::Apis::Core::Hashable
|
4742
5253
|
|
@@ -4787,20 +5298,41 @@ module Google
|
|
4787
5298
|
end
|
4788
5299
|
end
|
4789
5300
|
|
4790
|
-
#
|
4791
|
-
class
|
5301
|
+
# Deprecated. No effect.
|
5302
|
+
class GoogleCloudVideointelligenceV1p3beta1FaceAnnotation
|
4792
5303
|
include Google::Apis::Core::Hashable
|
4793
5304
|
|
4794
|
-
#
|
5305
|
+
# All video frames where a face was detected.
|
5306
|
+
# Corresponds to the JSON property `frames`
|
5307
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1FaceFrame>]
|
5308
|
+
attr_accessor :frames
|
5309
|
+
|
5310
|
+
# All video segments where a face was detected.
|
5311
|
+
# Corresponds to the JSON property `segments`
|
5312
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1FaceSegment>]
|
5313
|
+
attr_accessor :segments
|
5314
|
+
|
5315
|
+
# Thumbnail of a representative face view (in JPEG format).
|
4795
5316
|
# Corresponds to the JSON property `thumbnail`
|
4796
5317
|
# NOTE: Values are automatically base64 encoded/decoded in the client library.
|
4797
5318
|
# @return [String]
|
4798
5319
|
attr_accessor :thumbnail
|
4799
5320
|
|
4800
|
-
|
4801
|
-
|
4802
|
-
|
4803
|
-
|
5321
|
+
def initialize(**args)
|
5322
|
+
update!(**args)
|
5323
|
+
end
|
5324
|
+
|
5325
|
+
# Update properties of this object
|
5326
|
+
def update!(**args)
|
5327
|
+
@frames = args[:frames] if args.key?(:frames)
|
5328
|
+
@segments = args[:segments] if args.key?(:segments)
|
5329
|
+
@thumbnail = args[:thumbnail] if args.key?(:thumbnail)
|
5330
|
+
end
|
5331
|
+
end
|
5332
|
+
|
5333
|
+
# Face detection annotation.
|
5334
|
+
class GoogleCloudVideointelligenceV1p3beta1FaceDetectionAnnotation
|
5335
|
+
include Google::Apis::Core::Hashable
|
4804
5336
|
|
4805
5337
|
# Feature version.
|
4806
5338
|
# Corresponds to the JSON property `version`
|
@@ -4813,20 +5345,63 @@ module Google
|
|
4813
5345
|
|
4814
5346
|
# Update properties of this object
|
4815
5347
|
def update!(**args)
|
4816
|
-
@thumbnail = args[:thumbnail] if args.key?(:thumbnail)
|
4817
|
-
@tracks = args[:tracks] if args.key?(:tracks)
|
4818
5348
|
@version = args[:version] if args.key?(:version)
|
4819
5349
|
end
|
4820
5350
|
end
|
4821
5351
|
|
5352
|
+
# Deprecated. No effect.
|
5353
|
+
class GoogleCloudVideointelligenceV1p3beta1FaceFrame
|
5354
|
+
include Google::Apis::Core::Hashable
|
5355
|
+
|
5356
|
+
# Normalized Bounding boxes in a frame. There can be more than one boxes if the
|
5357
|
+
# same face is detected in multiple locations within the current frame.
|
5358
|
+
# Corresponds to the JSON property `normalizedBoundingBoxes`
|
5359
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox>]
|
5360
|
+
attr_accessor :normalized_bounding_boxes
|
5361
|
+
|
5362
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
5363
|
+
# video frame for this location.
|
5364
|
+
# Corresponds to the JSON property `timeOffset`
|
5365
|
+
# @return [String]
|
5366
|
+
attr_accessor :time_offset
|
5367
|
+
|
5368
|
+
def initialize(**args)
|
5369
|
+
update!(**args)
|
5370
|
+
end
|
5371
|
+
|
5372
|
+
# Update properties of this object
|
5373
|
+
def update!(**args)
|
5374
|
+
@normalized_bounding_boxes = args[:normalized_bounding_boxes] if args.key?(:normalized_bounding_boxes)
|
5375
|
+
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
5376
|
+
end
|
5377
|
+
end
|
5378
|
+
|
5379
|
+
# Video segment level annotation results for face detection.
|
5380
|
+
class GoogleCloudVideointelligenceV1p3beta1FaceSegment
|
5381
|
+
include Google::Apis::Core::Hashable
|
5382
|
+
|
5383
|
+
# Video segment.
|
5384
|
+
# Corresponds to the JSON property `segment`
|
5385
|
+
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1VideoSegment]
|
5386
|
+
attr_accessor :segment
|
5387
|
+
|
5388
|
+
def initialize(**args)
|
5389
|
+
update!(**args)
|
5390
|
+
end
|
5391
|
+
|
5392
|
+
# Update properties of this object
|
5393
|
+
def update!(**args)
|
5394
|
+
@segment = args[:segment] if args.key?(:segment)
|
5395
|
+
end
|
5396
|
+
end
|
5397
|
+
|
4822
5398
|
# Label annotation.
|
4823
5399
|
class GoogleCloudVideointelligenceV1p3beta1LabelAnnotation
|
4824
5400
|
include Google::Apis::Core::Hashable
|
4825
5401
|
|
4826
|
-
# Common categories for the detected entity.
|
4827
|
-
#
|
4828
|
-
#
|
4829
|
-
# also be a `pet`.
|
5402
|
+
# Common categories for the detected entity. For example, when the label is `
|
5403
|
+
# Terrier`, the category is likely `dog`. And in some cases there might be more
|
5404
|
+
# than one categories e.g., `Terrier` could also be a `pet`.
|
4830
5405
|
# Corresponds to the JSON property `categoryEntities`
|
4831
5406
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1Entity>]
|
4832
5407
|
attr_accessor :category_entities
|
@@ -4925,14 +5500,14 @@ module Google
|
|
4925
5500
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1Entity]
|
4926
5501
|
attr_accessor :entity
|
4927
5502
|
|
4928
|
-
# All video segments where the recognized logo appears. There might be
|
4929
|
-
#
|
5503
|
+
# All video segments where the recognized logo appears. There might be multiple
|
5504
|
+
# instances of the same logo class appearing in one VideoSegment.
|
4930
5505
|
# Corresponds to the JSON property `segments`
|
4931
5506
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1VideoSegment>]
|
4932
5507
|
attr_accessor :segments
|
4933
5508
|
|
4934
|
-
# All logo tracks where the recognized logo appears. Each track corresponds
|
4935
|
-
#
|
5509
|
+
# All logo tracks where the recognized logo appears. Each track corresponds to
|
5510
|
+
# one logo instance appearing in consecutive frames.
|
4936
5511
|
# Corresponds to the JSON property `tracks`
|
4937
5512
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1Track>]
|
4938
5513
|
attr_accessor :tracks
|
@@ -4949,9 +5524,8 @@ module Google
|
|
4949
5524
|
end
|
4950
5525
|
end
|
4951
5526
|
|
4952
|
-
# Normalized bounding box.
|
4953
|
-
#
|
4954
|
-
# Range: [0, 1].
|
5527
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
5528
|
+
# original image. Range: [0, 1].
|
4955
5529
|
class GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox
|
4956
5530
|
include Google::Apis::Core::Hashable
|
4957
5531
|
|
@@ -4989,20 +5563,12 @@ module Google
|
|
4989
5563
|
end
|
4990
5564
|
|
4991
5565
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
4992
|
-
# Contains list of the corner points in clockwise order starting from
|
4993
|
-
#
|
4994
|
-
#
|
4995
|
-
#
|
4996
|
-
#
|
4997
|
-
#
|
4998
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
4999
|
-
# becomes:
|
5000
|
-
# 2----3
|
5001
|
-
# | |
|
5002
|
-
# 1----0
|
5003
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
5004
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
5005
|
-
# the box.
|
5566
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
5567
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
5568
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
5569
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
5570
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
5571
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
5006
5572
|
class GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingPoly
|
5007
5573
|
include Google::Apis::Core::Hashable
|
5008
5574
|
|
@@ -5021,9 +5587,8 @@ module Google
|
|
5021
5587
|
end
|
5022
5588
|
end
|
5023
5589
|
|
5024
|
-
# A vertex represents a 2D point in the image.
|
5025
|
-
#
|
5026
|
-
# and range from 0 to 1.
|
5590
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
5591
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
5027
5592
|
class GoogleCloudVideointelligenceV1p3beta1NormalizedVertex
|
5028
5593
|
include Google::Apis::Core::Hashable
|
5029
5594
|
|
@@ -5062,10 +5627,10 @@ module Google
|
|
5062
5627
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1Entity]
|
5063
5628
|
attr_accessor :entity
|
5064
5629
|
|
5065
|
-
# Information corresponding to all frames where this object track appears.
|
5066
|
-
#
|
5067
|
-
#
|
5068
|
-
#
|
5630
|
+
# Information corresponding to all frames where this object track appears. Non-
|
5631
|
+
# streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
|
5632
|
+
# in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
|
5633
|
+
# frames.
|
5069
5634
|
# Corresponds to the JSON property `frames`
|
5070
5635
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1ObjectTrackingFrame>]
|
5071
5636
|
attr_accessor :frames
|
@@ -5075,12 +5640,11 @@ module Google
|
|
5075
5640
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1VideoSegment]
|
5076
5641
|
attr_accessor :segment
|
5077
5642
|
|
5078
|
-
# Streaming mode ONLY.
|
5079
|
-
#
|
5080
|
-
#
|
5081
|
-
#
|
5082
|
-
#
|
5083
|
-
# ObjectTrackAnnotation of the same track_id over time.
|
5643
|
+
# Streaming mode ONLY. In streaming mode, we do not know the end time of a
|
5644
|
+
# tracked object before it is completed. Hence, there is no VideoSegment info
|
5645
|
+
# returned. Instead, we provide a unique identifiable integer track_id so that
|
5646
|
+
# the customers can correlate the results of the ongoing ObjectTrackAnnotation
|
5647
|
+
# of the same track_id over time.
|
5084
5648
|
# Corresponds to the JSON property `trackId`
|
5085
5649
|
# @return [Fixnum]
|
5086
5650
|
attr_accessor :track_id
|
@@ -5110,9 +5674,8 @@ module Google
|
|
5110
5674
|
class GoogleCloudVideointelligenceV1p3beta1ObjectTrackingFrame
|
5111
5675
|
include Google::Apis::Core::Hashable
|
5112
5676
|
|
5113
|
-
# Normalized bounding box.
|
5114
|
-
#
|
5115
|
-
# Range: [0, 1].
|
5677
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
5678
|
+
# original image. Range: [0, 1].
|
5116
5679
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
5117
5680
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox]
|
5118
5681
|
attr_accessor :normalized_bounding_box
|
@@ -5189,10 +5752,10 @@ module Google
|
|
5189
5752
|
|
5190
5753
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
5191
5754
|
# indicates an estimated greater likelihood that the recognized words are
|
5192
|
-
# correct. This field is set only for the top alternative.
|
5193
|
-
#
|
5194
|
-
#
|
5195
|
-
#
|
5755
|
+
# correct. This field is set only for the top alternative. This field is not
|
5756
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
5757
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
5758
|
+
# not set.
|
5196
5759
|
# Corresponds to the JSON property `confidence`
|
5197
5760
|
# @return [Float]
|
5198
5761
|
attr_accessor :confidence
|
@@ -5203,8 +5766,8 @@ module Google
|
|
5203
5766
|
attr_accessor :transcript
|
5204
5767
|
|
5205
5768
|
# Output only. A list of word-specific information for each recognized word.
|
5206
|
-
# Note: When `enable_speaker_diarization` is set to true, you will see all
|
5207
|
-
#
|
5769
|
+
# Note: When `enable_speaker_diarization` is set to true, you will see all the
|
5770
|
+
# words from the beginning of the audio.
|
5208
5771
|
# Corresponds to the JSON property `words`
|
5209
5772
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1WordInfo>]
|
5210
5773
|
attr_accessor :words
|
@@ -5225,18 +5788,17 @@ module Google
|
|
5225
5788
|
class GoogleCloudVideointelligenceV1p3beta1SpeechTranscription
|
5226
5789
|
include Google::Apis::Core::Hashable
|
5227
5790
|
|
5228
|
-
# May contain one or more recognition hypotheses (up to the maximum specified
|
5229
|
-
#
|
5230
|
-
#
|
5231
|
-
#
|
5791
|
+
# May contain one or more recognition hypotheses (up to the maximum specified in
|
5792
|
+
# `max_alternatives`). These alternatives are ordered in terms of accuracy, with
|
5793
|
+
# the top (first) alternative being the most probable, as ranked by the
|
5794
|
+
# recognizer.
|
5232
5795
|
# Corresponds to the JSON property `alternatives`
|
5233
5796
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1SpeechRecognitionAlternative>]
|
5234
5797
|
attr_accessor :alternatives
|
5235
5798
|
|
5236
5799
|
# Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
|
5237
|
-
# language tag of
|
5238
|
-
#
|
5239
|
-
# most likelihood of being spoken in the audio.
|
5800
|
+
# language tag of the language in this result. This language code was detected
|
5801
|
+
# to have the most likelihood of being spoken in the audio.
|
5240
5802
|
# Corresponds to the JSON property `languageCode`
|
5241
5803
|
# @return [String]
|
5242
5804
|
attr_accessor :language_code
|
@@ -5252,32 +5814,32 @@ module Google
|
|
5252
5814
|
end
|
5253
5815
|
end
|
5254
5816
|
|
5255
|
-
# `StreamingAnnotateVideoResponse` is the only message returned to the client
|
5256
|
-
#
|
5257
|
-
#
|
5817
|
+
# `StreamingAnnotateVideoResponse` is the only message returned to the client by
|
5818
|
+
# `StreamingAnnotateVideo`. A series of zero or more `
|
5819
|
+
# StreamingAnnotateVideoResponse` messages are streamed back to the client.
|
5258
5820
|
class GoogleCloudVideointelligenceV1p3beta1StreamingAnnotateVideoResponse
|
5259
5821
|
include Google::Apis::Core::Hashable
|
5260
5822
|
|
5261
|
-
# Streaming annotation results corresponding to a portion of the video
|
5262
|
-
#
|
5823
|
+
# Streaming annotation results corresponding to a portion of the video that is
|
5824
|
+
# currently being processed. Only ONE type of annotation will be specified in
|
5825
|
+
# the response.
|
5263
5826
|
# Corresponds to the JSON property `annotationResults`
|
5264
5827
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1StreamingVideoAnnotationResults]
|
5265
5828
|
attr_accessor :annotation_results
|
5266
5829
|
|
5267
|
-
# Google Cloud Storage URI that stores annotation results of one
|
5268
|
-
#
|
5269
|
-
#
|
5270
|
-
# from the request followed by '/cloud_project_number-session_id'.
|
5830
|
+
# Google Cloud Storage URI that stores annotation results of one streaming
|
5831
|
+
# session in JSON format. It is the annotation_result_storage_directory from the
|
5832
|
+
# request followed by '/cloud_project_number-session_id'.
|
5271
5833
|
# Corresponds to the JSON property `annotationResultsUri`
|
5272
5834
|
# @return [String]
|
5273
5835
|
attr_accessor :annotation_results_uri
|
5274
5836
|
|
5275
|
-
# The `Status` type defines a logical error model that is suitable for
|
5276
|
-
#
|
5277
|
-
#
|
5278
|
-
#
|
5279
|
-
#
|
5280
|
-
#
|
5837
|
+
# The `Status` type defines a logical error model that is suitable for different
|
5838
|
+
# programming environments, including REST APIs and RPC APIs. It is used by [
|
5839
|
+
# gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
|
5840
|
+
# data: error code, error message, and error details. You can find out more
|
5841
|
+
# about this error model and how to work with it in the [API Design Guide](https:
|
5842
|
+
# //cloud.google.com/apis/design/errors).
|
5281
5843
|
# Corresponds to the JSON property `error`
|
5282
5844
|
# @return [Google::Apis::VideointelligenceV1::GoogleRpcStatus]
|
5283
5845
|
attr_accessor :error
|
@@ -5294,18 +5856,24 @@ module Google
|
|
5294
5856
|
end
|
5295
5857
|
end
|
5296
5858
|
|
5297
|
-
# Streaming annotation results corresponding to a portion of the video
|
5298
|
-
#
|
5859
|
+
# Streaming annotation results corresponding to a portion of the video that is
|
5860
|
+
# currently being processed. Only ONE type of annotation will be specified in
|
5861
|
+
# the response.
|
5299
5862
|
class GoogleCloudVideointelligenceV1p3beta1StreamingVideoAnnotationResults
|
5300
5863
|
include Google::Apis::Core::Hashable
|
5301
5864
|
|
5302
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
5303
|
-
#
|
5304
|
-
#
|
5865
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
5866
|
+
# explicit content has been detected in a frame, no annotations are present for
|
5867
|
+
# that frame.
|
5305
5868
|
# Corresponds to the JSON property `explicitAnnotation`
|
5306
5869
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1ExplicitContentAnnotation]
|
5307
5870
|
attr_accessor :explicit_annotation
|
5308
5871
|
|
5872
|
+
# Timestamp of the processed frame in microseconds.
|
5873
|
+
# Corresponds to the JSON property `frameTimestamp`
|
5874
|
+
# @return [String]
|
5875
|
+
attr_accessor :frame_timestamp
|
5876
|
+
|
5309
5877
|
# Label annotation results.
|
5310
5878
|
# Corresponds to the JSON property `labelAnnotations`
|
5311
5879
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
|
@@ -5328,6 +5896,7 @@ module Google
|
|
5328
5896
|
# Update properties of this object
|
5329
5897
|
def update!(**args)
|
5330
5898
|
@explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
|
5899
|
+
@frame_timestamp = args[:frame_timestamp] if args.key?(:frame_timestamp)
|
5331
5900
|
@label_annotations = args[:label_annotations] if args.key?(:label_annotations)
|
5332
5901
|
@object_annotations = args[:object_annotations] if args.key?(:object_annotations)
|
5333
5902
|
@shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations)
|
@@ -5367,27 +5936,19 @@ module Google
|
|
5367
5936
|
end
|
5368
5937
|
end
|
5369
5938
|
|
5370
|
-
# Video frame level annotation results for text annotation (OCR).
|
5371
|
-
#
|
5372
|
-
#
|
5939
|
+
# Video frame level annotation results for text annotation (OCR). Contains
|
5940
|
+
# information regarding timestamp and bounding box locations for the frames
|
5941
|
+
# containing detected OCR text snippets.
|
5373
5942
|
class GoogleCloudVideointelligenceV1p3beta1TextFrame
|
5374
5943
|
include Google::Apis::Core::Hashable
|
5375
5944
|
|
5376
5945
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
5377
|
-
# Contains list of the corner points in clockwise order starting from
|
5378
|
-
#
|
5379
|
-
#
|
5380
|
-
#
|
5381
|
-
#
|
5382
|
-
#
|
5383
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
5384
|
-
# becomes:
|
5385
|
-
# 2----3
|
5386
|
-
# | |
|
5387
|
-
# 1----0
|
5388
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
5389
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
5390
|
-
# the box.
|
5946
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
5947
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
5948
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
5949
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
5950
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
5951
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
5391
5952
|
# Corresponds to the JSON property `rotatedBoundingBox`
|
5392
5953
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingPoly]
|
5393
5954
|
attr_accessor :rotated_bounding_box
|
@@ -5440,9 +6001,8 @@ module Google
|
|
5440
6001
|
end
|
5441
6002
|
end
|
5442
6003
|
|
5443
|
-
# For tracking related features.
|
5444
|
-
#
|
5445
|
-
# normalized_bounding_box.
|
6004
|
+
# For tracking related features. An object at time_offset with attributes, and
|
6005
|
+
# located with normalized_bounding_box.
|
5446
6006
|
class GoogleCloudVideointelligenceV1p3beta1TimestampedObject
|
5447
6007
|
include Google::Apis::Core::Hashable
|
5448
6008
|
|
@@ -5456,15 +6016,14 @@ module Google
|
|
5456
6016
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1DetectedLandmark>]
|
5457
6017
|
attr_accessor :landmarks
|
5458
6018
|
|
5459
|
-
# Normalized bounding box.
|
5460
|
-
#
|
5461
|
-
# Range: [0, 1].
|
6019
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
6020
|
+
# original image. Range: [0, 1].
|
5462
6021
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
5463
6022
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox]
|
5464
6023
|
attr_accessor :normalized_bounding_box
|
5465
6024
|
|
5466
|
-
# Time-offset, relative to the beginning of the video,
|
5467
|
-
#
|
6025
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
6026
|
+
# video frame for this object.
|
5468
6027
|
# Corresponds to the JSON property `timeOffset`
|
5469
6028
|
# @return [String]
|
5470
6029
|
attr_accessor :time_offset
|
@@ -5523,20 +6082,19 @@ module Google
|
|
5523
6082
|
class GoogleCloudVideointelligenceV1p3beta1VideoAnnotationProgress
|
5524
6083
|
include Google::Apis::Core::Hashable
|
5525
6084
|
|
5526
|
-
# Specifies which feature is being tracked if the request contains more than
|
5527
|
-
#
|
6085
|
+
# Specifies which feature is being tracked if the request contains more than one
|
6086
|
+
# feature.
|
5528
6087
|
# Corresponds to the JSON property `feature`
|
5529
6088
|
# @return [String]
|
5530
6089
|
attr_accessor :feature
|
5531
6090
|
|
5532
|
-
# Video file location in
|
5533
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
6091
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
5534
6092
|
# Corresponds to the JSON property `inputUri`
|
5535
6093
|
# @return [String]
|
5536
6094
|
attr_accessor :input_uri
|
5537
6095
|
|
5538
|
-
# Approximate percentage processed thus far. Guaranteed to be
|
5539
|
-
#
|
6096
|
+
# Approximate percentage processed thus far. Guaranteed to be 100 when fully
|
6097
|
+
# processed.
|
5540
6098
|
# Corresponds to the JSON property `progressPercent`
|
5541
6099
|
# @return [Fixnum]
|
5542
6100
|
attr_accessor :progress_percent
|
@@ -5580,36 +6138,40 @@ module Google
|
|
5580
6138
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1CelebrityRecognitionAnnotation]
|
5581
6139
|
attr_accessor :celebrity_recognition_annotations
|
5582
6140
|
|
5583
|
-
# The `Status` type defines a logical error model that is suitable for
|
5584
|
-
#
|
5585
|
-
#
|
5586
|
-
#
|
5587
|
-
#
|
5588
|
-
#
|
6141
|
+
# The `Status` type defines a logical error model that is suitable for different
|
6142
|
+
# programming environments, including REST APIs and RPC APIs. It is used by [
|
6143
|
+
# gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
|
6144
|
+
# data: error code, error message, and error details. You can find out more
|
6145
|
+
# about this error model and how to work with it in the [API Design Guide](https:
|
6146
|
+
# //cloud.google.com/apis/design/errors).
|
5589
6147
|
# Corresponds to the JSON property `error`
|
5590
6148
|
# @return [Google::Apis::VideointelligenceV1::GoogleRpcStatus]
|
5591
6149
|
attr_accessor :error
|
5592
6150
|
|
5593
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
5594
|
-
#
|
5595
|
-
#
|
6151
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
6152
|
+
# explicit content has been detected in a frame, no annotations are present for
|
6153
|
+
# that frame.
|
5596
6154
|
# Corresponds to the JSON property `explicitAnnotation`
|
5597
6155
|
# @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1ExplicitContentAnnotation]
|
5598
6156
|
attr_accessor :explicit_annotation
|
5599
6157
|
|
6158
|
+
# Deprecated. Please use `face_detection_annotations` instead.
|
6159
|
+
# Corresponds to the JSON property `faceAnnotations`
|
6160
|
+
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1FaceAnnotation>]
|
6161
|
+
attr_accessor :face_annotations
|
6162
|
+
|
5600
6163
|
# Face detection annotations.
|
5601
6164
|
# Corresponds to the JSON property `faceDetectionAnnotations`
|
5602
6165
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1FaceDetectionAnnotation>]
|
5603
6166
|
attr_accessor :face_detection_annotations
|
5604
6167
|
|
5605
|
-
# Label annotations on frame level.
|
5606
|
-
#
|
6168
|
+
# Label annotations on frame level. There is exactly one element for each unique
|
6169
|
+
# label.
|
5607
6170
|
# Corresponds to the JSON property `frameLabelAnnotations`
|
5608
6171
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
|
5609
6172
|
attr_accessor :frame_label_annotations
|
5610
6173
|
|
5611
|
-
# Video file location in
|
5612
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
6174
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
5613
6175
|
# Corresponds to the JSON property `inputUri`
|
5614
6176
|
# @return [String]
|
5615
6177
|
attr_accessor :input_uri
|
@@ -5641,11 +6203,11 @@ module Google
|
|
5641
6203
|
attr_accessor :segment_label_annotations
|
5642
6204
|
|
5643
6205
|
# Presence label annotations on video level or user-specified segment level.
|
5644
|
-
# There is exactly one element for each unique label. Compared to the
|
5645
|
-
#
|
5646
|
-
#
|
5647
|
-
#
|
5648
|
-
#
|
6206
|
+
# There is exactly one element for each unique label. Compared to the existing
|
6207
|
+
# topical `segment_label_annotations`, this field presents more fine-grained,
|
6208
|
+
# segment-level labels detected in video content and is made available only when
|
6209
|
+
# the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
|
6210
|
+
# request.
|
5649
6211
|
# Corresponds to the JSON property `segmentPresenceLabelAnnotations`
|
5650
6212
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
|
5651
6213
|
attr_accessor :segment_presence_label_annotations
|
@@ -5655,17 +6217,17 @@ module Google
|
|
5655
6217
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1VideoSegment>]
|
5656
6218
|
attr_accessor :shot_annotations
|
5657
6219
|
|
5658
|
-
# Topical label annotations on shot level.
|
5659
|
-
#
|
6220
|
+
# Topical label annotations on shot level. There is exactly one element for each
|
6221
|
+
# unique label.
|
5660
6222
|
# Corresponds to the JSON property `shotLabelAnnotations`
|
5661
6223
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
|
5662
6224
|
attr_accessor :shot_label_annotations
|
5663
6225
|
|
5664
6226
|
# Presence label annotations on shot level. There is exactly one element for
|
5665
|
-
# each unique label. Compared to the existing topical
|
5666
|
-
#
|
5667
|
-
#
|
5668
|
-
#
|
6227
|
+
# each unique label. Compared to the existing topical `shot_label_annotations`,
|
6228
|
+
# this field presents more fine-grained, shot-level labels detected in video
|
6229
|
+
# content and is made available only when the client sets `LabelDetectionConfig.
|
6230
|
+
# model` to "builtin/latest" in the request.
|
5669
6231
|
# Corresponds to the JSON property `shotPresenceLabelAnnotations`
|
5670
6232
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
|
5671
6233
|
attr_accessor :shot_presence_label_annotations
|
@@ -5675,9 +6237,8 @@ module Google
|
|
5675
6237
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1SpeechTranscription>]
|
5676
6238
|
attr_accessor :speech_transcriptions
|
5677
6239
|
|
5678
|
-
# OCR text detection and tracking.
|
5679
|
-
#
|
5680
|
-
# frame information associated with it.
|
6240
|
+
# OCR text detection and tracking. Annotations for list of detected text
|
6241
|
+
# snippets. Each will have list of frame information associated with it.
|
5681
6242
|
# Corresponds to the JSON property `textAnnotations`
|
5682
6243
|
# @return [Array<Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p3beta1TextAnnotation>]
|
5683
6244
|
attr_accessor :text_annotations
|
@@ -5691,6 +6252,7 @@ module Google
|
|
5691
6252
|
@celebrity_recognition_annotations = args[:celebrity_recognition_annotations] if args.key?(:celebrity_recognition_annotations)
|
5692
6253
|
@error = args[:error] if args.key?(:error)
|
5693
6254
|
@explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
|
6255
|
+
@face_annotations = args[:face_annotations] if args.key?(:face_annotations)
|
5694
6256
|
@face_detection_annotations = args[:face_detection_annotations] if args.key?(:face_detection_annotations)
|
5695
6257
|
@frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
|
5696
6258
|
@input_uri = args[:input_uri] if args.key?(:input_uri)
|
@@ -5712,14 +6274,14 @@ module Google
|
|
5712
6274
|
class GoogleCloudVideointelligenceV1p3beta1VideoSegment
|
5713
6275
|
include Google::Apis::Core::Hashable
|
5714
6276
|
|
5715
|
-
# Time-offset, relative to the beginning of the video,
|
5716
|
-
#
|
6277
|
+
# Time-offset, relative to the beginning of the video, corresponding to the end
|
6278
|
+
# of the segment (inclusive).
|
5717
6279
|
# Corresponds to the JSON property `endTimeOffset`
|
5718
6280
|
# @return [String]
|
5719
6281
|
attr_accessor :end_time_offset
|
5720
6282
|
|
5721
|
-
# Time-offset, relative to the beginning of the video,
|
5722
|
-
#
|
6283
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
6284
|
+
# start of the segment (inclusive).
|
5723
6285
|
# Corresponds to the JSON property `startTimeOffset`
|
5724
6286
|
# @return [String]
|
5725
6287
|
attr_accessor :start_time_offset
|
@@ -5736,41 +6298,41 @@ module Google
|
|
5736
6298
|
end
|
5737
6299
|
|
5738
6300
|
# Word-specific information for recognized words. Word information is only
|
5739
|
-
# included in the response when certain request parameters are set, such
|
5740
|
-
#
|
6301
|
+
# included in the response when certain request parameters are set, such as `
|
6302
|
+
# enable_word_time_offsets`.
|
5741
6303
|
class GoogleCloudVideointelligenceV1p3beta1WordInfo
|
5742
6304
|
include Google::Apis::Core::Hashable
|
5743
6305
|
|
5744
6306
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
5745
6307
|
# indicates an estimated greater likelihood that the recognized words are
|
5746
|
-
# correct. This field is set only for the top alternative.
|
5747
|
-
#
|
5748
|
-
#
|
5749
|
-
#
|
6308
|
+
# correct. This field is set only for the top alternative. This field is not
|
6309
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
6310
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
6311
|
+
# not set.
|
5750
6312
|
# Corresponds to the JSON property `confidence`
|
5751
6313
|
# @return [Float]
|
5752
6314
|
attr_accessor :confidence
|
5753
6315
|
|
5754
|
-
# Time offset relative to the beginning of the audio, and
|
5755
|
-
#
|
5756
|
-
#
|
5757
|
-
#
|
6316
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
6317
|
+
# end of the spoken word. This field is only set if `enable_word_time_offsets=
|
6318
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
6319
|
+
# accuracy of the time offset can vary.
|
5758
6320
|
# Corresponds to the JSON property `endTime`
|
5759
6321
|
# @return [String]
|
5760
6322
|
attr_accessor :end_time
|
5761
6323
|
|
5762
|
-
# Output only. A distinct integer value is assigned for every speaker within
|
5763
|
-
#
|
5764
|
-
#
|
5765
|
-
#
|
6324
|
+
# Output only. A distinct integer value is assigned for every speaker within the
|
6325
|
+
# audio. This field specifies which one of those speakers was detected to have
|
6326
|
+
# spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
|
6327
|
+
# only set if speaker diarization is enabled.
|
5766
6328
|
# Corresponds to the JSON property `speakerTag`
|
5767
6329
|
# @return [Fixnum]
|
5768
6330
|
attr_accessor :speaker_tag
|
5769
6331
|
|
5770
|
-
# Time offset relative to the beginning of the audio, and
|
5771
|
-
#
|
5772
|
-
#
|
5773
|
-
#
|
6332
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
6333
|
+
# start of the spoken word. This field is only set if `enable_word_time_offsets=
|
6334
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
6335
|
+
# accuracy of the time offset can vary.
|
5774
6336
|
# Corresponds to the JSON property `startTime`
|
5775
6337
|
# @return [String]
|
5776
6338
|
attr_accessor :start_time
|
@@ -5837,47 +6399,45 @@ module Google
|
|
5837
6399
|
class GoogleLongrunningOperation
|
5838
6400
|
include Google::Apis::Core::Hashable
|
5839
6401
|
|
5840
|
-
# If the value is `false`, it means the operation is still in progress.
|
5841
|
-
#
|
5842
|
-
# available.
|
6402
|
+
# If the value is `false`, it means the operation is still in progress. If `true`
|
6403
|
+
# , the operation is completed, and either `error` or `response` is available.
|
5843
6404
|
# Corresponds to the JSON property `done`
|
5844
6405
|
# @return [Boolean]
|
5845
6406
|
attr_accessor :done
|
5846
6407
|
alias_method :done?, :done
|
5847
6408
|
|
5848
|
-
# The `Status` type defines a logical error model that is suitable for
|
5849
|
-
#
|
5850
|
-
#
|
5851
|
-
#
|
5852
|
-
#
|
5853
|
-
#
|
6409
|
+
# The `Status` type defines a logical error model that is suitable for different
|
6410
|
+
# programming environments, including REST APIs and RPC APIs. It is used by [
|
6411
|
+
# gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
|
6412
|
+
# data: error code, error message, and error details. You can find out more
|
6413
|
+
# about this error model and how to work with it in the [API Design Guide](https:
|
6414
|
+
# //cloud.google.com/apis/design/errors).
|
5854
6415
|
# Corresponds to the JSON property `error`
|
5855
6416
|
# @return [Google::Apis::VideointelligenceV1::GoogleRpcStatus]
|
5856
6417
|
attr_accessor :error
|
5857
6418
|
|
5858
|
-
# Service-specific metadata associated with the operation.
|
5859
|
-
#
|
5860
|
-
#
|
5861
|
-
#
|
6419
|
+
# Service-specific metadata associated with the operation. It typically contains
|
6420
|
+
# progress information and common metadata such as create time. Some services
|
6421
|
+
# might not provide such metadata. Any method that returns a long-running
|
6422
|
+
# operation should document the metadata type, if any.
|
5862
6423
|
# Corresponds to the JSON property `metadata`
|
5863
6424
|
# @return [Hash<String,Object>]
|
5864
6425
|
attr_accessor :metadata
|
5865
6426
|
|
5866
6427
|
# The server-assigned name, which is only unique within the same service that
|
5867
|
-
# originally returns it. If you use the default HTTP mapping, the
|
5868
|
-
#
|
6428
|
+
# originally returns it. If you use the default HTTP mapping, the `name` should
|
6429
|
+
# be a resource name ending with `operations/`unique_id``.
|
5869
6430
|
# Corresponds to the JSON property `name`
|
5870
6431
|
# @return [String]
|
5871
6432
|
attr_accessor :name
|
5872
6433
|
|
5873
|
-
# The normal response of the operation in case of success.
|
5874
|
-
# method returns no data on success, such as `Delete`, the response is
|
5875
|
-
#
|
5876
|
-
#
|
5877
|
-
#
|
5878
|
-
#
|
5879
|
-
#
|
5880
|
-
# `TakeSnapshotResponse`.
|
6434
|
+
# The normal response of the operation in case of success. If the original
|
6435
|
+
# method returns no data on success, such as `Delete`, the response is `google.
|
6436
|
+
# protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`,
|
6437
|
+
# the response should be the resource. For other methods, the response should
|
6438
|
+
# have the type `XxxResponse`, where `Xxx` is the original method name. For
|
6439
|
+
# example, if the original method name is `TakeSnapshot()`, the inferred
|
6440
|
+
# response type is `TakeSnapshotResponse`.
|
5881
6441
|
# Corresponds to the JSON property `response`
|
5882
6442
|
# @return [Hash<String,Object>]
|
5883
6443
|
attr_accessor :response
|
@@ -5896,13 +6456,11 @@ module Google
|
|
5896
6456
|
end
|
5897
6457
|
end
|
5898
6458
|
|
5899
|
-
# A generic empty message that you can re-use to avoid defining duplicated
|
5900
|
-
#
|
5901
|
-
#
|
5902
|
-
#
|
5903
|
-
#
|
5904
|
-
# `
|
5905
|
-
# The JSON representation for `Empty` is empty JSON object ````.
|
6459
|
+
# A generic empty message that you can re-use to avoid defining duplicated empty
|
6460
|
+
# messages in your APIs. A typical example is to use it as the request or the
|
6461
|
+
# response type of an API method. For instance: service Foo ` rpc Bar(google.
|
6462
|
+
# protobuf.Empty) returns (google.protobuf.Empty); ` The JSON representation for
|
6463
|
+
# `Empty` is empty JSON object ````.
|
5906
6464
|
class GoogleProtobufEmpty
|
5907
6465
|
include Google::Apis::Core::Hashable
|
5908
6466
|
|
@@ -5915,12 +6473,12 @@ module Google
|
|
5915
6473
|
end
|
5916
6474
|
end
|
5917
6475
|
|
5918
|
-
# The `Status` type defines a logical error model that is suitable for
|
5919
|
-
#
|
5920
|
-
#
|
5921
|
-
#
|
5922
|
-
#
|
5923
|
-
#
|
6476
|
+
# The `Status` type defines a logical error model that is suitable for different
|
6477
|
+
# programming environments, including REST APIs and RPC APIs. It is used by [
|
6478
|
+
# gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
|
6479
|
+
# data: error code, error message, and error details. You can find out more
|
6480
|
+
# about this error model and how to work with it in the [API Design Guide](https:
|
6481
|
+
# //cloud.google.com/apis/design/errors).
|
5924
6482
|
class GoogleRpcStatus
|
5925
6483
|
include Google::Apis::Core::Hashable
|
5926
6484
|
|
@@ -5929,15 +6487,15 @@ module Google
|
|
5929
6487
|
# @return [Fixnum]
|
5930
6488
|
attr_accessor :code
|
5931
6489
|
|
5932
|
-
# A list of messages that carry the error details.
|
6490
|
+
# A list of messages that carry the error details. There is a common set of
|
5933
6491
|
# message types for APIs to use.
|
5934
6492
|
# Corresponds to the JSON property `details`
|
5935
6493
|
# @return [Array<Hash<String,Object>>]
|
5936
6494
|
attr_accessor :details
|
5937
6495
|
|
5938
|
-
# A developer-facing error message, which should be in English. Any
|
5939
|
-
#
|
5940
|
-
#
|
6496
|
+
# A developer-facing error message, which should be in English. Any user-facing
|
6497
|
+
# error message should be localized and sent in the google.rpc.Status.details
|
6498
|
+
# field, or localized by the client.
|
5941
6499
|
# Corresponds to the JSON property `message`
|
5942
6500
|
# @return [String]
|
5943
6501
|
attr_accessor :message
|