google-api-client 0.43.0 → 0.48.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.github/workflows/autoapprove.yml +49 -0
- data/.github/workflows/release-please.yml +77 -0
- data/.gitignore +2 -0
- data/.kokoro/trampoline.sh +0 -0
- data/CHANGELOG.md +1066 -184
- data/Gemfile +1 -0
- data/Rakefile +31 -3
- data/api_list_config.yaml +8 -0
- data/api_names.yaml +1 -0
- data/bin/generate-api +77 -15
- data/docs/oauth-server.md +4 -6
- data/generated/google/apis/acceleratedmobilepageurl_v1/classes.rb +7 -9
- data/generated/google/apis/acceleratedmobilepageurl_v1/service.rb +2 -2
- data/generated/google/apis/acceleratedmobilepageurl_v1.rb +1 -1
- data/generated/google/apis/accessapproval_v1/classes.rb +60 -86
- data/generated/google/apis/accessapproval_v1/service.rb +93 -132
- data/generated/google/apis/accessapproval_v1.rb +1 -1
- data/generated/google/apis/accesscontextmanager_v1/classes.rb +266 -236
- data/generated/google/apis/accesscontextmanager_v1/representations.rb +30 -0
- data/generated/google/apis/accesscontextmanager_v1/service.rb +308 -171
- data/generated/google/apis/accesscontextmanager_v1.rb +1 -1
- data/generated/google/apis/accesscontextmanager_v1beta/classes.rb +153 -184
- data/generated/google/apis/accesscontextmanager_v1beta/service.rb +82 -111
- data/generated/google/apis/accesscontextmanager_v1beta.rb +1 -1
- data/generated/google/apis/adexchangebuyer2_v2beta1/classes.rb +47 -36
- data/generated/google/apis/adexchangebuyer2_v2beta1/representations.rb +1 -0
- data/generated/google/apis/adexchangebuyer2_v2beta1.rb +1 -1
- data/generated/google/apis/adexchangebuyer_v1_4/classes.rb +72 -2
- data/generated/google/apis/adexchangebuyer_v1_4/representations.rb +33 -0
- data/generated/google/apis/adexchangebuyer_v1_4.rb +1 -1
- data/generated/google/apis/adexperiencereport_v1.rb +1 -1
- data/generated/google/apis/admin_datatransfer_v1/classes.rb +16 -4
- data/generated/google/apis/admin_datatransfer_v1/service.rb +30 -48
- data/generated/google/apis/admin_datatransfer_v1.rb +6 -4
- data/generated/google/apis/admin_directory_v1/classes.rb +344 -242
- data/generated/google/apis/admin_directory_v1/representations.rb +62 -39
- data/generated/google/apis/admin_directory_v1/service.rb +607 -998
- data/generated/google/apis/admin_directory_v1.rb +6 -8
- data/generated/google/apis/admin_reports_v1/classes.rb +31 -33
- data/generated/google/apis/admin_reports_v1/service.rb +131 -187
- data/generated/google/apis/admin_reports_v1.rb +6 -5
- data/generated/google/apis/admob_v1/classes.rb +31 -31
- data/generated/google/apis/admob_v1/service.rb +2 -1
- data/generated/google/apis/admob_v1.rb +6 -2
- data/generated/google/apis/adsense_v1_4/service.rb +4 -1
- data/generated/google/apis/adsense_v1_4.rb +1 -1
- data/generated/google/apis/alertcenter_v1beta1.rb +1 -1
- data/generated/google/apis/analyticsadmin_v1alpha/classes.rb +1361 -0
- data/generated/google/apis/analyticsadmin_v1alpha/representations.rb +610 -0
- data/generated/google/apis/analyticsadmin_v1alpha/service.rb +2134 -0
- data/generated/google/apis/analyticsadmin_v1alpha.rb +43 -0
- data/generated/google/apis/analyticsdata_v1alpha/classes.rb +1655 -0
- data/generated/google/apis/analyticsdata_v1alpha/representations.rb +806 -0
- data/generated/google/apis/analyticsdata_v1alpha/service.rb +261 -0
- data/generated/google/apis/analyticsdata_v1alpha.rb +37 -0
- data/generated/google/apis/analyticsreporting_v4.rb +1 -1
- data/generated/google/apis/androidenterprise_v1/service.rb +2 -2
- data/generated/google/apis/androidenterprise_v1.rb +1 -1
- data/generated/google/apis/androidmanagement_v1/classes.rb +115 -75
- data/generated/google/apis/androidmanagement_v1/representations.rb +17 -0
- data/generated/google/apis/androidmanagement_v1.rb +1 -1
- data/generated/google/apis/androidpublisher_v3/classes.rb +9 -1
- data/generated/google/apis/androidpublisher_v3/representations.rb +1 -0
- data/generated/google/apis/androidpublisher_v3/service.rb +2 -2
- data/generated/google/apis/androidpublisher_v3.rb +1 -1
- data/generated/google/apis/apigateway_v1beta/classes.rb +1162 -0
- data/generated/google/apis/{memcache_v1 → apigateway_v1beta}/representations.rb +156 -157
- data/generated/google/apis/apigateway_v1beta/service.rb +1172 -0
- data/generated/google/apis/apigateway_v1beta.rb +34 -0
- data/generated/google/apis/apigee_v1/classes.rb +630 -88
- data/generated/google/apis/apigee_v1/representations.rb +209 -1
- data/generated/google/apis/apigee_v1/service.rb +401 -74
- data/generated/google/apis/apigee_v1.rb +6 -7
- data/generated/google/apis/appengine_v1/classes.rb +96 -59
- data/generated/google/apis/appengine_v1/representations.rb +17 -0
- data/generated/google/apis/appengine_v1/service.rb +38 -47
- data/generated/google/apis/appengine_v1.rb +1 -1
- data/generated/google/apis/appengine_v1alpha/classes.rb +9 -11
- data/generated/google/apis/appengine_v1alpha.rb +1 -1
- data/generated/google/apis/appengine_v1beta/classes.rb +103 -59
- data/generated/google/apis/appengine_v1beta/representations.rb +18 -0
- data/generated/google/apis/appengine_v1beta/service.rb +37 -47
- data/generated/google/apis/appengine_v1beta.rb +1 -1
- data/generated/google/apis/appsmarket_v2/classes.rb +14 -16
- data/generated/google/apis/appsmarket_v2.rb +1 -1
- data/generated/google/apis/area120tables_v1alpha1/classes.rb +423 -0
- data/generated/google/apis/area120tables_v1alpha1/representations.rb +248 -0
- data/generated/google/apis/area120tables_v1alpha1/service.rb +381 -0
- data/generated/google/apis/area120tables_v1alpha1.rb +46 -0
- data/generated/google/apis/artifactregistry_v1beta1/classes.rb +249 -337
- data/generated/google/apis/artifactregistry_v1beta1/representations.rb +2 -0
- data/generated/google/apis/artifactregistry_v1beta1/service.rb +44 -57
- data/generated/google/apis/artifactregistry_v1beta1.rb +1 -1
- data/generated/google/apis/assuredworkloads_v1beta1/classes.rb +722 -0
- data/generated/google/apis/assuredworkloads_v1beta1/representations.rb +359 -0
- data/generated/google/apis/assuredworkloads_v1beta1/service.rb +328 -0
- data/generated/google/apis/{accessapproval_v1beta1.rb → assuredworkloads_v1beta1.rb} +8 -8
- data/generated/google/apis/bigquery_v2/classes.rb +593 -576
- data/generated/google/apis/bigquery_v2/representations.rb +85 -0
- data/generated/google/apis/bigquery_v2/service.rb +79 -41
- data/generated/google/apis/bigquery_v2.rb +1 -1
- data/generated/google/apis/bigqueryconnection_v1beta1/classes.rb +192 -337
- data/generated/google/apis/bigqueryconnection_v1beta1/service.rb +29 -32
- data/generated/google/apis/bigqueryconnection_v1beta1.rb +1 -1
- data/generated/google/apis/bigquerydatatransfer_v1/classes.rb +132 -158
- data/generated/google/apis/bigquerydatatransfer_v1/service.rb +232 -282
- data/generated/google/apis/bigquerydatatransfer_v1.rb +1 -1
- data/generated/google/apis/bigqueryreservation_v1/classes.rb +116 -123
- data/generated/google/apis/bigqueryreservation_v1/representations.rb +2 -0
- data/generated/google/apis/bigqueryreservation_v1/service.rb +137 -183
- data/generated/google/apis/bigqueryreservation_v1.rb +1 -1
- data/generated/google/apis/bigqueryreservation_v1alpha2/classes.rb +88 -100
- data/generated/google/apis/bigqueryreservation_v1alpha2/service.rb +77 -100
- data/generated/google/apis/bigqueryreservation_v1alpha2.rb +1 -1
- data/generated/google/apis/bigqueryreservation_v1beta1/classes.rb +93 -98
- data/generated/google/apis/bigqueryreservation_v1beta1/representations.rb +2 -0
- data/generated/google/apis/bigqueryreservation_v1beta1/service.rb +114 -151
- data/generated/google/apis/bigqueryreservation_v1beta1.rb +1 -1
- data/generated/google/apis/bigtableadmin_v1/classes.rb +137 -119
- data/generated/google/apis/bigtableadmin_v1/representations.rb +29 -0
- data/generated/google/apis/bigtableadmin_v1.rb +1 -1
- data/generated/google/apis/bigtableadmin_v2/classes.rb +455 -607
- data/generated/google/apis/bigtableadmin_v2/representations.rb +29 -0
- data/generated/google/apis/bigtableadmin_v2/service.rb +194 -247
- data/generated/google/apis/bigtableadmin_v2.rb +1 -1
- data/generated/google/apis/billingbudgets_v1/classes.rb +373 -0
- data/generated/google/apis/billingbudgets_v1/representations.rb +171 -0
- data/generated/google/apis/billingbudgets_v1/service.rb +249 -0
- data/generated/google/apis/billingbudgets_v1.rb +38 -0
- data/generated/google/apis/billingbudgets_v1beta1/classes.rb +27 -6
- data/generated/google/apis/billingbudgets_v1beta1/representations.rb +2 -0
- data/generated/google/apis/billingbudgets_v1beta1.rb +4 -1
- data/generated/google/apis/binaryauthorization_v1/classes.rb +434 -355
- data/generated/google/apis/binaryauthorization_v1/representations.rb +75 -0
- data/generated/google/apis/binaryauthorization_v1/service.rb +109 -89
- data/generated/google/apis/binaryauthorization_v1.rb +1 -1
- data/generated/google/apis/binaryauthorization_v1beta1/classes.rb +434 -355
- data/generated/google/apis/binaryauthorization_v1beta1/representations.rb +75 -0
- data/generated/google/apis/binaryauthorization_v1beta1/service.rb +109 -89
- data/generated/google/apis/binaryauthorization_v1beta1.rb +1 -1
- data/generated/google/apis/books_v1/service.rb +54 -54
- data/generated/google/apis/books_v1.rb +1 -1
- data/generated/google/apis/calendar_v3/classes.rb +13 -10
- data/generated/google/apis/calendar_v3.rb +1 -1
- data/generated/google/apis/chat_v1/classes.rb +173 -116
- data/generated/google/apis/chat_v1/representations.rb +36 -0
- data/generated/google/apis/chat_v1/service.rb +30 -42
- data/generated/google/apis/chat_v1.rb +1 -1
- data/generated/google/apis/civicinfo_v2/classes.rb +18 -32
- data/generated/google/apis/civicinfo_v2/representations.rb +2 -3
- data/generated/google/apis/civicinfo_v2.rb +1 -1
- data/generated/google/apis/classroom_v1/classes.rb +153 -21
- data/generated/google/apis/classroom_v1/representations.rb +43 -0
- data/generated/google/apis/classroom_v1/service.rb +240 -0
- data/generated/google/apis/classroom_v1.rb +7 -1
- data/generated/google/apis/cloudasset_v1/classes.rb +1461 -1039
- data/generated/google/apis/cloudasset_v1/representations.rb +320 -0
- data/generated/google/apis/cloudasset_v1/service.rb +296 -167
- data/generated/google/apis/cloudasset_v1.rb +1 -1
- data/generated/google/apis/cloudasset_v1beta1/classes.rb +531 -777
- data/generated/google/apis/cloudasset_v1beta1/service.rb +59 -75
- data/generated/google/apis/cloudasset_v1beta1.rb +1 -1
- data/generated/google/apis/cloudasset_v1p1beta1/classes.rb +204 -349
- data/generated/google/apis/cloudasset_v1p1beta1/service.rb +35 -47
- data/generated/google/apis/cloudasset_v1p1beta1.rb +1 -1
- data/generated/google/apis/cloudasset_v1p4beta1/classes.rb +222 -276
- data/generated/google/apis/cloudasset_v1p4beta1/service.rb +75 -93
- data/generated/google/apis/cloudasset_v1p4beta1.rb +1 -1
- data/generated/google/apis/cloudasset_v1p5beta1/classes.rb +481 -720
- data/generated/google/apis/cloudasset_v1p5beta1/service.rb +25 -28
- data/generated/google/apis/cloudasset_v1p5beta1.rb +1 -1
- data/generated/google/apis/cloudbilling_v1/classes.rb +285 -446
- data/generated/google/apis/cloudbilling_v1/service.rb +104 -124
- data/generated/google/apis/cloudbilling_v1.rb +7 -1
- data/generated/google/apis/cloudbuild_v1/classes.rb +339 -344
- data/generated/google/apis/cloudbuild_v1/representations.rb +8 -0
- data/generated/google/apis/cloudbuild_v1/service.rb +277 -67
- data/generated/google/apis/cloudbuild_v1.rb +1 -1
- data/generated/google/apis/cloudbuild_v1alpha1/classes.rb +291 -329
- data/generated/google/apis/cloudbuild_v1alpha1/representations.rb +2 -0
- data/generated/google/apis/cloudbuild_v1alpha1/service.rb +15 -18
- data/generated/google/apis/cloudbuild_v1alpha1.rb +1 -1
- data/generated/google/apis/cloudbuild_v1alpha2/classes.rb +277 -313
- data/generated/google/apis/cloudbuild_v1alpha2/representations.rb +2 -0
- data/generated/google/apis/cloudbuild_v1alpha2/service.rb +22 -28
- data/generated/google/apis/cloudbuild_v1alpha2.rb +1 -1
- data/generated/google/apis/clouddebugger_v2/classes.rb +185 -252
- data/generated/google/apis/clouddebugger_v2/service.rb +53 -59
- data/generated/google/apis/clouddebugger_v2.rb +1 -1
- data/generated/google/apis/clouderrorreporting_v1beta1/classes.rb +127 -156
- data/generated/google/apis/clouderrorreporting_v1beta1/service.rb +53 -69
- data/generated/google/apis/clouderrorreporting_v1beta1.rb +1 -1
- data/generated/google/apis/cloudfunctions_v1/classes.rb +335 -494
- data/generated/google/apis/cloudfunctions_v1/representations.rb +1 -0
- data/generated/google/apis/cloudfunctions_v1/service.rb +79 -93
- data/generated/google/apis/cloudfunctions_v1.rb +1 -1
- data/generated/google/apis/cloudidentity_v1/classes.rb +989 -107
- data/generated/google/apis/cloudidentity_v1/representations.rb +371 -0
- data/generated/google/apis/cloudidentity_v1/service.rb +883 -88
- data/generated/google/apis/cloudidentity_v1.rb +4 -1
- data/generated/google/apis/cloudidentity_v1beta1/classes.rb +1236 -307
- data/generated/google/apis/cloudidentity_v1beta1/representations.rb +424 -21
- data/generated/google/apis/cloudidentity_v1beta1/service.rb +921 -96
- data/generated/google/apis/cloudidentity_v1beta1.rb +4 -1
- data/generated/google/apis/cloudiot_v1/classes.rb +270 -373
- data/generated/google/apis/cloudiot_v1/representations.rb +1 -0
- data/generated/google/apis/cloudiot_v1/service.rb +147 -154
- data/generated/google/apis/cloudiot_v1.rb +1 -1
- data/generated/google/apis/cloudkms_v1/classes.rb +509 -692
- data/generated/google/apis/cloudkms_v1/representations.rb +18 -0
- data/generated/google/apis/cloudkms_v1/service.rb +170 -216
- data/generated/google/apis/cloudkms_v1.rb +1 -1
- data/generated/google/apis/cloudprofiler_v2/classes.rb +28 -33
- data/generated/google/apis/cloudprofiler_v2/service.rb +17 -19
- data/generated/google/apis/cloudprofiler_v2.rb +1 -1
- data/generated/google/apis/cloudresourcemanager_v1/classes.rb +14 -7
- data/generated/google/apis/cloudresourcemanager_v1/representations.rb +1 -0
- data/generated/google/apis/cloudresourcemanager_v1/service.rb +62 -60
- data/generated/google/apis/cloudresourcemanager_v1.rb +1 -1
- data/generated/google/apis/cloudresourcemanager_v1beta1/classes.rb +11 -4
- data/generated/google/apis/cloudresourcemanager_v1beta1/representations.rb +1 -0
- data/generated/google/apis/cloudresourcemanager_v1beta1/service.rb +26 -25
- data/generated/google/apis/cloudresourcemanager_v1beta1.rb +1 -1
- data/generated/google/apis/cloudresourcemanager_v2/classes.rb +7 -0
- data/generated/google/apis/cloudresourcemanager_v2/representations.rb +1 -0
- data/generated/google/apis/cloudresourcemanager_v2/service.rb +7 -7
- data/generated/google/apis/cloudresourcemanager_v2.rb +1 -1
- data/generated/google/apis/cloudresourcemanager_v2beta1/classes.rb +7 -0
- data/generated/google/apis/cloudresourcemanager_v2beta1/representations.rb +1 -0
- data/generated/google/apis/cloudresourcemanager_v2beta1/service.rb +7 -7
- data/generated/google/apis/cloudresourcemanager_v2beta1.rb +1 -1
- data/generated/google/apis/cloudscheduler_v1/classes.rb +272 -383
- data/generated/google/apis/cloudscheduler_v1/service.rb +45 -62
- data/generated/google/apis/cloudscheduler_v1.rb +1 -1
- data/generated/google/apis/cloudscheduler_v1beta1/classes.rb +273 -384
- data/generated/google/apis/cloudscheduler_v1beta1/service.rb +45 -62
- data/generated/google/apis/cloudscheduler_v1beta1.rb +1 -1
- data/generated/google/apis/cloudsearch_v1/classes.rb +651 -781
- data/generated/google/apis/cloudsearch_v1/representations.rb +15 -0
- data/generated/google/apis/cloudsearch_v1/service.rb +286 -326
- data/generated/google/apis/cloudsearch_v1.rb +2 -2
- data/generated/google/apis/cloudshell_v1/classes.rb +256 -105
- data/generated/google/apis/cloudshell_v1/representations.rb +143 -10
- data/generated/google/apis/cloudshell_v1/service.rb +198 -25
- data/generated/google/apis/cloudshell_v1.rb +1 -1
- data/generated/google/apis/cloudshell_v1alpha1/classes.rb +69 -78
- data/generated/google/apis/cloudshell_v1alpha1/service.rb +20 -24
- data/generated/google/apis/cloudshell_v1alpha1.rb +1 -1
- data/generated/google/apis/cloudtasks_v2/classes.rb +612 -933
- data/generated/google/apis/cloudtasks_v2/representations.rb +1 -0
- data/generated/google/apis/cloudtasks_v2/service.rb +146 -217
- data/generated/google/apis/cloudtasks_v2.rb +1 -1
- data/generated/google/apis/cloudtasks_v2beta2/classes.rb +608 -964
- data/generated/google/apis/cloudtasks_v2beta2/representations.rb +1 -0
- data/generated/google/apis/cloudtasks_v2beta2/service.rb +178 -270
- data/generated/google/apis/cloudtasks_v2beta2.rb +1 -1
- data/generated/google/apis/cloudtasks_v2beta3/classes.rb +616 -938
- data/generated/google/apis/cloudtasks_v2beta3/representations.rb +1 -0
- data/generated/google/apis/cloudtasks_v2beta3/service.rb +146 -217
- data/generated/google/apis/cloudtasks_v2beta3.rb +1 -1
- data/generated/google/apis/cloudtrace_v1/classes.rb +39 -61
- data/generated/google/apis/cloudtrace_v1/service.rb +37 -51
- data/generated/google/apis/cloudtrace_v1.rb +1 -1
- data/generated/google/apis/cloudtrace_v2/classes.rb +92 -107
- data/generated/google/apis/cloudtrace_v2/service.rb +8 -11
- data/generated/google/apis/cloudtrace_v2.rb +1 -1
- data/generated/google/apis/cloudtrace_v2beta1/classes.rb +23 -33
- data/generated/google/apis/cloudtrace_v2beta1/service.rb +30 -37
- data/generated/google/apis/cloudtrace_v2beta1.rb +1 -1
- data/generated/google/apis/composer_v1/classes.rb +189 -242
- data/generated/google/apis/composer_v1/service.rb +79 -150
- data/generated/google/apis/composer_v1.rb +1 -1
- data/generated/google/apis/composer_v1beta1/classes.rb +203 -262
- data/generated/google/apis/composer_v1beta1/service.rb +94 -179
- data/generated/google/apis/composer_v1beta1.rb +1 -1
- data/generated/google/apis/compute_alpha/classes.rb +1227 -186
- data/generated/google/apis/compute_alpha/representations.rb +235 -8
- data/generated/google/apis/compute_alpha/service.rb +2009 -1024
- data/generated/google/apis/compute_alpha.rb +1 -1
- data/generated/google/apis/compute_beta/classes.rb +1080 -108
- data/generated/google/apis/compute_beta/representations.rb +212 -2
- data/generated/google/apis/compute_beta/service.rb +1413 -741
- data/generated/google/apis/compute_beta.rb +1 -1
- data/generated/google/apis/compute_v1/classes.rb +1512 -106
- data/generated/google/apis/compute_v1/representations.rb +470 -1
- data/generated/google/apis/compute_v1/service.rb +1625 -285
- data/generated/google/apis/compute_v1.rb +1 -1
- data/generated/google/apis/container_v1/classes.rb +982 -965
- data/generated/google/apis/container_v1/representations.rb +60 -0
- data/generated/google/apis/container_v1/service.rb +435 -502
- data/generated/google/apis/container_v1.rb +1 -1
- data/generated/google/apis/container_v1beta1/classes.rb +1106 -1044
- data/generated/google/apis/container_v1beta1/representations.rb +91 -0
- data/generated/google/apis/container_v1beta1/service.rb +403 -466
- data/generated/google/apis/container_v1beta1.rb +1 -1
- data/generated/google/apis/containeranalysis_v1alpha1/classes.rb +463 -596
- data/generated/google/apis/containeranalysis_v1alpha1/representations.rb +1 -0
- data/generated/google/apis/containeranalysis_v1alpha1/service.rb +149 -169
- data/generated/google/apis/containeranalysis_v1alpha1.rb +1 -1
- data/generated/google/apis/containeranalysis_v1beta1/classes.rb +461 -613
- data/generated/google/apis/containeranalysis_v1beta1/representations.rb +1 -0
- data/generated/google/apis/containeranalysis_v1beta1/service.rb +75 -90
- data/generated/google/apis/containeranalysis_v1beta1.rb +1 -1
- data/generated/google/apis/content_v2/classes.rb +515 -1219
- data/generated/google/apis/content_v2/service.rb +377 -650
- data/generated/google/apis/content_v2.rb +3 -4
- data/generated/google/apis/content_v2_1/classes.rb +1108 -1058
- data/generated/google/apis/content_v2_1/representations.rb +288 -0
- data/generated/google/apis/content_v2_1/service.rb +987 -795
- data/generated/google/apis/content_v2_1.rb +3 -4
- data/generated/google/apis/customsearch_v1/service.rb +2 -2
- data/generated/google/apis/customsearch_v1.rb +1 -1
- data/generated/google/apis/datacatalog_v1beta1/classes.rb +413 -573
- data/generated/google/apis/datacatalog_v1beta1/representations.rb +6 -0
- data/generated/google/apis/datacatalog_v1beta1/service.rb +319 -440
- data/generated/google/apis/datacatalog_v1beta1.rb +1 -1
- data/generated/google/apis/dataflow_v1b3/classes.rb +1174 -973
- data/generated/google/apis/dataflow_v1b3/representations.rb +148 -0
- data/generated/google/apis/dataflow_v1b3/service.rb +308 -257
- data/generated/google/apis/dataflow_v1b3.rb +1 -1
- data/generated/google/apis/datafusion_v1/classes.rb +283 -397
- data/generated/google/apis/datafusion_v1/representations.rb +5 -0
- data/generated/google/apis/datafusion_v1/service.rb +76 -89
- data/generated/google/apis/datafusion_v1.rb +5 -8
- data/generated/google/apis/datafusion_v1beta1/classes.rb +283 -397
- data/generated/google/apis/datafusion_v1beta1/representations.rb +5 -0
- data/generated/google/apis/datafusion_v1beta1/service.rb +81 -95
- data/generated/google/apis/datafusion_v1beta1.rb +5 -8
- data/generated/google/apis/datalabeling_v1beta1/classes.rb +6207 -0
- data/generated/google/apis/datalabeling_v1beta1/representations.rb +3156 -0
- data/generated/google/apis/datalabeling_v1beta1/service.rb +1762 -0
- data/generated/google/apis/datalabeling_v1beta1.rb +34 -0
- data/generated/google/apis/dataproc_v1/classes.rb +97 -13
- data/generated/google/apis/dataproc_v1/representations.rb +34 -0
- data/generated/google/apis/dataproc_v1.rb +1 -1
- data/generated/google/apis/dataproc_v1beta2/classes.rb +117 -9
- data/generated/google/apis/dataproc_v1beta2/representations.rb +49 -0
- data/generated/google/apis/dataproc_v1beta2.rb +1 -1
- data/generated/google/apis/datastore_v1/classes.rb +334 -476
- data/generated/google/apis/datastore_v1/service.rb +52 -63
- data/generated/google/apis/datastore_v1.rb +1 -1
- data/generated/google/apis/datastore_v1beta1/classes.rb +150 -217
- data/generated/google/apis/datastore_v1beta1/service.rb +11 -12
- data/generated/google/apis/datastore_v1beta1.rb +1 -1
- data/generated/google/apis/datastore_v1beta3/classes.rb +259 -375
- data/generated/google/apis/datastore_v1beta3/service.rb +1 -2
- data/generated/google/apis/datastore_v1beta3.rb +1 -1
- data/generated/google/apis/deploymentmanager_v2/classes.rb +203 -558
- data/generated/google/apis/deploymentmanager_v2/representations.rb +0 -132
- data/generated/google/apis/deploymentmanager_v2/service.rb +169 -213
- data/generated/google/apis/deploymentmanager_v2.rb +6 -4
- data/generated/google/apis/deploymentmanager_v2beta/classes.rb +247 -609
- data/generated/google/apis/deploymentmanager_v2beta/representations.rb +0 -132
- data/generated/google/apis/deploymentmanager_v2beta/service.rb +278 -359
- data/generated/google/apis/deploymentmanager_v2beta.rb +6 -5
- data/generated/google/apis/dfareporting_v3_3/classes.rb +162 -339
- data/generated/google/apis/dfareporting_v3_3/service.rb +646 -1262
- data/generated/google/apis/dfareporting_v3_3.rb +2 -2
- data/generated/google/apis/dfareporting_v3_4/classes.rb +184 -350
- data/generated/google/apis/dfareporting_v3_4/representations.rb +1 -0
- data/generated/google/apis/dfareporting_v3_4/service.rb +681 -1261
- data/generated/google/apis/dfareporting_v3_4.rb +2 -2
- data/generated/google/apis/dialogflow_v2/classes.rb +746 -217
- data/generated/google/apis/dialogflow_v2/representations.rb +318 -67
- data/generated/google/apis/dialogflow_v2.rb +1 -1
- data/generated/google/apis/dialogflow_v2beta1/classes.rb +764 -233
- data/generated/google/apis/dialogflow_v2beta1/representations.rb +318 -67
- data/generated/google/apis/dialogflow_v2beta1/service.rb +556 -331
- data/generated/google/apis/dialogflow_v2beta1.rb +1 -1
- data/generated/google/apis/dialogflow_v3beta1/classes.rb +8816 -0
- data/generated/google/apis/dialogflow_v3beta1/representations.rb +3725 -0
- data/generated/google/apis/dialogflow_v3beta1/service.rb +2825 -0
- data/generated/google/apis/dialogflow_v3beta1.rb +38 -0
- data/generated/google/apis/digitalassetlinks_v1/classes.rb +66 -92
- data/generated/google/apis/digitalassetlinks_v1/service.rb +131 -188
- data/generated/google/apis/digitalassetlinks_v1.rb +1 -1
- data/generated/google/apis/displayvideo_v1/classes.rb +271 -38
- data/generated/google/apis/displayvideo_v1/representations.rb +83 -0
- data/generated/google/apis/displayvideo_v1/service.rb +287 -32
- data/generated/google/apis/displayvideo_v1.rb +1 -1
- data/generated/google/apis/displayvideo_v1beta/classes.rb +146 -0
- data/generated/google/apis/displayvideo_v1beta/representations.rb +72 -0
- data/generated/google/apis/displayvideo_v1beta/service.rb +161 -0
- data/generated/google/apis/displayvideo_v1beta.rb +38 -0
- data/generated/google/apis/displayvideo_v1beta2/classes.rb +146 -0
- data/generated/google/apis/displayvideo_v1beta2/representations.rb +72 -0
- data/generated/google/apis/displayvideo_v1beta2/service.rb +130 -0
- data/generated/google/apis/displayvideo_v1beta2.rb +38 -0
- data/generated/google/apis/displayvideo_v1dev/classes.rb +146 -0
- data/generated/google/apis/displayvideo_v1dev/representations.rb +72 -0
- data/generated/google/apis/displayvideo_v1dev/service.rb +130 -0
- data/generated/google/apis/displayvideo_v1dev.rb +38 -0
- data/generated/google/apis/dlp_v2/classes.rb +1111 -1310
- data/generated/google/apis/dlp_v2/representations.rb +16 -0
- data/generated/google/apis/dlp_v2/service.rb +962 -905
- data/generated/google/apis/dlp_v2.rb +1 -1
- data/generated/google/apis/dns_v1/classes.rb +356 -198
- data/generated/google/apis/dns_v1/representations.rb +83 -0
- data/generated/google/apis/dns_v1/service.rb +83 -98
- data/generated/google/apis/dns_v1.rb +2 -2
- data/generated/google/apis/dns_v1beta2/classes.rb +362 -206
- data/generated/google/apis/dns_v1beta2/representations.rb +83 -0
- data/generated/google/apis/dns_v1beta2/service.rb +83 -98
- data/generated/google/apis/dns_v1beta2.rb +2 -2
- data/generated/google/apis/docs_v1/classes.rb +894 -1229
- data/generated/google/apis/docs_v1/service.rb +17 -22
- data/generated/google/apis/docs_v1.rb +1 -1
- data/generated/google/apis/documentai_v1beta2/classes.rb +2253 -824
- data/generated/google/apis/documentai_v1beta2/representations.rb +701 -0
- data/generated/google/apis/documentai_v1beta2/service.rb +22 -24
- data/generated/google/apis/documentai_v1beta2.rb +1 -1
- data/generated/google/apis/documentai_v1beta3/classes.rb +6149 -0
- data/generated/google/apis/documentai_v1beta3/representations.rb +2666 -0
- data/generated/google/apis/documentai_v1beta3/service.rb +263 -0
- data/generated/google/apis/{securitycenter_v1p1alpha1.rb → documentai_v1beta3.rb} +11 -10
- data/generated/google/apis/domains_v1alpha2/classes.rb +1540 -0
- data/generated/google/apis/domains_v1alpha2/representations.rb +606 -0
- data/generated/google/apis/domains_v1alpha2/service.rb +805 -0
- data/generated/google/apis/domains_v1alpha2.rb +34 -0
- data/generated/google/apis/domains_v1beta1/classes.rb +1540 -0
- data/generated/google/apis/domains_v1beta1/representations.rb +606 -0
- data/generated/google/apis/domains_v1beta1/service.rb +805 -0
- data/generated/google/apis/domains_v1beta1.rb +34 -0
- data/generated/google/apis/doubleclickbidmanager_v1/classes.rb +6 -12
- data/generated/google/apis/doubleclickbidmanager_v1/service.rb +33 -64
- data/generated/google/apis/doubleclickbidmanager_v1.rb +3 -2
- data/generated/google/apis/doubleclickbidmanager_v1_1/classes.rb +13 -20
- data/generated/google/apis/doubleclickbidmanager_v1_1/service.rb +33 -64
- data/generated/google/apis/doubleclickbidmanager_v1_1.rb +3 -2
- data/generated/google/apis/doubleclicksearch_v2/service.rb +2 -2
- data/generated/google/apis/doubleclicksearch_v2.rb +1 -1
- data/generated/google/apis/drive_v2/classes.rb +18 -7
- data/generated/google/apis/drive_v2/representations.rb +1 -0
- data/generated/google/apis/drive_v2/service.rb +79 -15
- data/generated/google/apis/drive_v2.rb +1 -1
- data/generated/google/apis/drive_v3/classes.rb +18 -8
- data/generated/google/apis/drive_v3/representations.rb +1 -0
- data/generated/google/apis/drive_v3/service.rb +59 -11
- data/generated/google/apis/drive_v3.rb +1 -1
- data/generated/google/apis/eventarc_v1beta1/classes.rb +931 -0
- data/generated/google/apis/eventarc_v1beta1/representations.rb +379 -0
- data/generated/google/apis/{memcache_v1 → eventarc_v1beta1}/service.rb +236 -215
- data/generated/google/apis/eventarc_v1beta1.rb +34 -0
- data/generated/google/apis/file_v1/classes.rb +155 -174
- data/generated/google/apis/file_v1/service.rb +43 -52
- data/generated/google/apis/file_v1.rb +1 -1
- data/generated/google/apis/file_v1beta1/classes.rb +335 -194
- data/generated/google/apis/file_v1beta1/representations.rb +55 -0
- data/generated/google/apis/file_v1beta1/service.rb +267 -55
- data/generated/google/apis/file_v1beta1.rb +1 -1
- data/generated/google/apis/firebase_v1beta1/classes.rb +33 -51
- data/generated/google/apis/firebase_v1beta1/representations.rb +2 -16
- data/generated/google/apis/firebase_v1beta1/service.rb +21 -1
- data/generated/google/apis/firebase_v1beta1.rb +1 -1
- data/generated/google/apis/firebasedynamiclinks_v1.rb +1 -1
- data/generated/google/apis/firebasehosting_v1beta1/classes.rb +188 -0
- data/generated/google/apis/firebasehosting_v1beta1/representations.rb +83 -0
- data/generated/google/apis/firebasehosting_v1beta1/service.rb +418 -4
- data/generated/google/apis/firebasehosting_v1beta1.rb +1 -1
- data/generated/google/apis/firebaseml_v1beta2/classes.rb +8 -8
- data/generated/google/apis/firebaseml_v1beta2/representations.rb +1 -1
- data/generated/google/apis/firebaseml_v1beta2.rb +1 -1
- data/generated/google/apis/firebaserules_v1/classes.rb +102 -137
- data/generated/google/apis/firebaserules_v1/service.rb +87 -110
- data/generated/google/apis/firebaserules_v1.rb +1 -1
- data/generated/google/apis/firestore_v1/classes.rb +406 -502
- data/generated/google/apis/firestore_v1/service.rb +165 -201
- data/generated/google/apis/firestore_v1.rb +1 -1
- data/generated/google/apis/firestore_v1beta1/classes.rb +338 -413
- data/generated/google/apis/firestore_v1beta1/service.rb +106 -122
- data/generated/google/apis/firestore_v1beta1.rb +1 -1
- data/generated/google/apis/firestore_v1beta2/classes.rb +135 -165
- data/generated/google/apis/firestore_v1beta2/service.rb +65 -86
- data/generated/google/apis/firestore_v1beta2.rb +1 -1
- data/generated/google/apis/fitness_v1/classes.rb +982 -0
- data/generated/google/apis/fitness_v1/representations.rb +398 -0
- data/generated/google/apis/fitness_v1/service.rb +628 -0
- data/generated/google/apis/fitness_v1.rb +97 -0
- data/generated/google/apis/games_configuration_v1configuration/classes.rb +2 -3
- data/generated/google/apis/games_configuration_v1configuration/service.rb +6 -6
- data/generated/google/apis/games_configuration_v1configuration.rb +1 -1
- data/generated/google/apis/games_management_v1management/classes.rb +14 -20
- data/generated/google/apis/games_management_v1management/service.rb +35 -36
- data/generated/google/apis/games_management_v1management.rb +2 -3
- data/generated/google/apis/games_v1/classes.rb +376 -83
- data/generated/google/apis/games_v1/representations.rb +118 -0
- data/generated/google/apis/games_v1/service.rb +118 -90
- data/generated/google/apis/games_v1.rb +2 -3
- data/generated/google/apis/gameservices_v1/classes.rb +22 -14
- data/generated/google/apis/gameservices_v1/representations.rb +1 -0
- data/generated/google/apis/gameservices_v1/service.rb +54 -51
- data/generated/google/apis/gameservices_v1.rb +1 -1
- data/generated/google/apis/gameservices_v1beta/classes.rb +22 -14
- data/generated/google/apis/gameservices_v1beta/representations.rb +1 -0
- data/generated/google/apis/gameservices_v1beta/service.rb +54 -51
- data/generated/google/apis/gameservices_v1beta.rb +1 -1
- data/generated/google/apis/genomics_v1/classes.rb +70 -76
- data/generated/google/apis/genomics_v1/service.rb +28 -43
- data/generated/google/apis/genomics_v1.rb +1 -1
- data/generated/google/apis/genomics_v1alpha2/classes.rb +223 -290
- data/generated/google/apis/genomics_v1alpha2/service.rb +54 -76
- data/generated/google/apis/genomics_v1alpha2.rb +1 -1
- data/generated/google/apis/genomics_v2alpha1/classes.rb +356 -275
- data/generated/google/apis/genomics_v2alpha1/representations.rb +48 -0
- data/generated/google/apis/genomics_v2alpha1/service.rb +47 -66
- data/generated/google/apis/genomics_v2alpha1.rb +1 -1
- data/generated/google/apis/gmail_v1/classes.rb +37 -43
- data/generated/google/apis/gmail_v1/service.rb +5 -4
- data/generated/google/apis/gmail_v1.rb +1 -1
- data/generated/google/apis/gmailpostmastertools_v1beta1/service.rb +11 -11
- data/generated/google/apis/gmailpostmastertools_v1beta1.rb +2 -2
- data/generated/google/apis/groupsmigration_v1/classes.rb +51 -0
- data/generated/google/apis/groupsmigration_v1/representations.rb +40 -0
- data/generated/google/apis/groupsmigration_v1/service.rb +100 -0
- data/generated/google/apis/groupsmigration_v1.rb +35 -0
- data/generated/google/apis/healthcare_v1/classes.rb +637 -826
- data/generated/google/apis/healthcare_v1/representations.rb +32 -0
- data/generated/google/apis/healthcare_v1/service.rb +842 -855
- data/generated/google/apis/healthcare_v1.rb +1 -1
- data/generated/google/apis/healthcare_v1beta1/classes.rb +1937 -1299
- data/generated/google/apis/healthcare_v1beta1/representations.rb +534 -65
- data/generated/google/apis/healthcare_v1beta1/service.rb +2534 -1293
- data/generated/google/apis/healthcare_v1beta1.rb +1 -1
- data/generated/google/apis/homegraph_v1/classes.rb +76 -164
- data/generated/google/apis/homegraph_v1/service.rb +23 -35
- data/generated/google/apis/homegraph_v1.rb +4 -1
- data/generated/google/apis/iam_v1/classes.rb +395 -592
- data/generated/google/apis/iam_v1/representations.rb +1 -0
- data/generated/google/apis/iam_v1/service.rb +427 -555
- data/generated/google/apis/iam_v1.rb +1 -1
- data/generated/google/apis/iamcredentials_v1/classes.rb +75 -85
- data/generated/google/apis/iamcredentials_v1/service.rb +14 -13
- data/generated/google/apis/iamcredentials_v1.rb +3 -2
- data/generated/google/apis/iap_v1/classes.rb +253 -355
- data/generated/google/apis/iap_v1/representations.rb +1 -0
- data/generated/google/apis/iap_v1/service.rb +61 -71
- data/generated/google/apis/iap_v1.rb +1 -1
- data/generated/google/apis/iap_v1beta1/classes.rb +164 -254
- data/generated/google/apis/iap_v1beta1/representations.rb +1 -0
- data/generated/google/apis/iap_v1beta1/service.rb +17 -19
- data/generated/google/apis/iap_v1beta1.rb +1 -1
- data/generated/google/apis/indexing_v3/classes.rb +11 -11
- data/generated/google/apis/indexing_v3.rb +1 -1
- data/generated/google/apis/jobs_v2/classes.rb +1584 -1086
- data/generated/google/apis/jobs_v2/representations.rb +272 -0
- data/generated/google/apis/jobs_v2/service.rb +85 -126
- data/generated/google/apis/jobs_v2.rb +1 -1
- data/generated/google/apis/jobs_v3/classes.rb +1559 -980
- data/generated/google/apis/jobs_v3/representations.rb +272 -0
- data/generated/google/apis/jobs_v3/service.rb +101 -139
- data/generated/google/apis/jobs_v3.rb +1 -1
- data/generated/google/apis/jobs_v3p1beta1/classes.rb +1521 -1023
- data/generated/google/apis/jobs_v3p1beta1/representations.rb +257 -0
- data/generated/google/apis/jobs_v3p1beta1/service.rb +103 -142
- data/generated/google/apis/jobs_v3p1beta1.rb +1 -1
- data/generated/google/apis/kgsearch_v1/classes.rb +4 -4
- data/generated/google/apis/kgsearch_v1/service.rb +11 -11
- data/generated/google/apis/kgsearch_v1.rb +1 -1
- data/generated/google/apis/licensing_v1/classes.rb +1 -1
- data/generated/google/apis/licensing_v1/service.rb +56 -86
- data/generated/google/apis/licensing_v1.rb +4 -3
- data/generated/google/apis/lifesciences_v2beta/classes.rb +366 -290
- data/generated/google/apis/lifesciences_v2beta/representations.rb +47 -0
- data/generated/google/apis/lifesciences_v2beta/service.rb +30 -42
- data/generated/google/apis/lifesciences_v2beta.rb +1 -1
- data/generated/google/apis/localservices_v1/classes.rb +426 -0
- data/generated/google/apis/localservices_v1/representations.rb +174 -0
- data/generated/google/apis/localservices_v1/service.rb +199 -0
- data/generated/google/apis/{appsactivity_v1.rb → localservices_v1.rb} +8 -11
- data/generated/google/apis/logging_v2/classes.rb +306 -232
- data/generated/google/apis/logging_v2/representations.rb +79 -0
- data/generated/google/apis/logging_v2/service.rb +3307 -1579
- data/generated/google/apis/logging_v2.rb +1 -1
- data/generated/google/apis/managedidentities_v1/classes.rb +8 -1
- data/generated/google/apis/managedidentities_v1/representations.rb +1 -0
- data/generated/google/apis/managedidentities_v1/service.rb +1 -4
- data/generated/google/apis/managedidentities_v1.rb +1 -1
- data/generated/google/apis/managedidentities_v1alpha1/classes.rb +87 -1
- data/generated/google/apis/managedidentities_v1alpha1/representations.rb +34 -0
- data/generated/google/apis/managedidentities_v1alpha1/service.rb +83 -5
- data/generated/google/apis/managedidentities_v1alpha1.rb +1 -1
- data/generated/google/apis/managedidentities_v1beta1/classes.rb +88 -1
- data/generated/google/apis/managedidentities_v1beta1/representations.rb +34 -0
- data/generated/google/apis/managedidentities_v1beta1/service.rb +83 -5
- data/generated/google/apis/managedidentities_v1beta1.rb +1 -1
- data/generated/google/apis/manufacturers_v1/classes.rb +99 -109
- data/generated/google/apis/manufacturers_v1/service.rb +44 -55
- data/generated/google/apis/manufacturers_v1.rb +1 -1
- data/generated/google/apis/memcache_v1beta2/classes.rb +171 -250
- data/generated/google/apis/memcache_v1beta2/representations.rb +0 -19
- data/generated/google/apis/memcache_v1beta2/service.rb +60 -73
- data/generated/google/apis/memcache_v1beta2.rb +1 -1
- data/generated/google/apis/ml_v1/classes.rb +1122 -1149
- data/generated/google/apis/ml_v1/representations.rb +82 -0
- data/generated/google/apis/ml_v1/service.rb +194 -253
- data/generated/google/apis/ml_v1.rb +1 -1
- data/generated/google/apis/monitoring_v1/classes.rb +107 -26
- data/generated/google/apis/monitoring_v1/representations.rb +35 -0
- data/generated/google/apis/monitoring_v1/service.rb +10 -11
- data/generated/google/apis/monitoring_v1.rb +1 -1
- data/generated/google/apis/monitoring_v3/classes.rb +303 -345
- data/generated/google/apis/monitoring_v3/representations.rb +18 -0
- data/generated/google/apis/monitoring_v3/service.rb +176 -146
- data/generated/google/apis/monitoring_v3.rb +1 -1
- data/generated/google/apis/networkmanagement_v1/classes.rb +273 -429
- data/generated/google/apis/networkmanagement_v1/service.rb +97 -120
- data/generated/google/apis/networkmanagement_v1.rb +1 -1
- data/generated/google/apis/networkmanagement_v1beta1/classes.rb +456 -429
- data/generated/google/apis/networkmanagement_v1beta1/representations.rb +63 -0
- data/generated/google/apis/networkmanagement_v1beta1/service.rb +97 -120
- data/generated/google/apis/networkmanagement_v1beta1.rb +1 -1
- data/generated/google/apis/osconfig_v1/classes.rb +154 -902
- data/generated/google/apis/osconfig_v1/representations.rb +0 -337
- data/generated/google/apis/osconfig_v1/service.rb +26 -31
- data/generated/google/apis/osconfig_v1.rb +3 -3
- data/generated/google/apis/osconfig_v1beta/classes.rb +327 -411
- data/generated/google/apis/osconfig_v1beta/service.rb +43 -56
- data/generated/google/apis/osconfig_v1beta.rb +3 -3
- data/generated/google/apis/oslogin_v1/classes.rb +14 -12
- data/generated/google/apis/oslogin_v1/representations.rb +1 -0
- data/generated/google/apis/oslogin_v1/service.rb +12 -16
- data/generated/google/apis/oslogin_v1.rb +1 -1
- data/generated/google/apis/oslogin_v1alpha/classes.rb +16 -14
- data/generated/google/apis/oslogin_v1alpha/representations.rb +1 -0
- data/generated/google/apis/oslogin_v1alpha/service.rb +17 -17
- data/generated/google/apis/oslogin_v1alpha.rb +1 -1
- data/generated/google/apis/oslogin_v1beta/classes.rb +14 -12
- data/generated/google/apis/oslogin_v1beta/representations.rb +1 -0
- data/generated/google/apis/oslogin_v1beta/service.rb +12 -16
- data/generated/google/apis/oslogin_v1beta.rb +1 -1
- data/generated/google/apis/pagespeedonline_v5/classes.rb +18 -24
- data/generated/google/apis/pagespeedonline_v5/service.rb +3 -4
- data/generated/google/apis/pagespeedonline_v5.rb +2 -2
- data/generated/google/apis/people_v1/classes.rb +173 -63
- data/generated/google/apis/people_v1/representations.rb +41 -0
- data/generated/google/apis/people_v1/service.rb +63 -61
- data/generated/google/apis/people_v1.rb +1 -1
- data/generated/google/apis/playablelocations_v3/classes.rb +114 -161
- data/generated/google/apis/playablelocations_v3/service.rb +10 -10
- data/generated/google/apis/playablelocations_v3.rb +1 -1
- data/generated/google/apis/playcustomapp_v1/service.rb +2 -2
- data/generated/google/apis/playcustomapp_v1.rb +1 -1
- data/generated/google/apis/poly_v1/classes.rb +65 -79
- data/generated/google/apis/poly_v1/service.rb +50 -63
- data/generated/google/apis/poly_v1.rb +3 -4
- data/generated/google/apis/privateca_v1beta1/classes.rb +2466 -0
- data/generated/google/apis/privateca_v1beta1/representations.rb +996 -0
- data/generated/google/apis/privateca_v1beta1/service.rb +1487 -0
- data/generated/google/apis/privateca_v1beta1.rb +34 -0
- data/generated/google/apis/prod_tt_sasportal_v1alpha1/classes.rb +26 -0
- data/generated/google/apis/prod_tt_sasportal_v1alpha1/representations.rb +14 -0
- data/generated/google/apis/prod_tt_sasportal_v1alpha1/service.rb +644 -56
- data/generated/google/apis/prod_tt_sasportal_v1alpha1.rb +1 -1
- data/generated/google/apis/pubsub_v1/classes.rb +399 -518
- data/generated/google/apis/pubsub_v1/representations.rb +2 -0
- data/generated/google/apis/pubsub_v1/service.rb +221 -247
- data/generated/google/apis/pubsub_v1.rb +1 -1
- data/generated/google/apis/pubsub_v1beta1a/classes.rb +71 -86
- data/generated/google/apis/pubsub_v1beta1a/service.rb +31 -38
- data/generated/google/apis/pubsub_v1beta1a.rb +1 -1
- data/generated/google/apis/pubsub_v1beta2/classes.rb +251 -354
- data/generated/google/apis/pubsub_v1beta2/representations.rb +1 -0
- data/generated/google/apis/pubsub_v1beta2/service.rb +96 -108
- data/generated/google/apis/pubsub_v1beta2.rb +1 -1
- data/generated/google/apis/pubsublite_v1/classes.rb +461 -0
- data/generated/google/apis/pubsublite_v1/representations.rb +261 -0
- data/generated/google/apis/pubsublite_v1/service.rb +558 -0
- data/generated/google/apis/{memcache_v1.rb → pubsublite_v1.rb} +8 -9
- data/generated/google/apis/realtimebidding_v1/classes.rb +84 -123
- data/generated/google/apis/realtimebidding_v1/representations.rb +18 -32
- data/generated/google/apis/realtimebidding_v1/service.rb +4 -1
- data/generated/google/apis/realtimebidding_v1.rb +1 -1
- data/generated/google/apis/recommendationengine_v1beta1/classes.rb +367 -456
- data/generated/google/apis/recommendationengine_v1beta1/representations.rb +25 -16
- data/generated/google/apis/recommendationengine_v1beta1/service.rb +140 -206
- data/generated/google/apis/recommendationengine_v1beta1.rb +1 -1
- data/generated/google/apis/recommender_v1/classes.rb +1 -1
- data/generated/google/apis/recommender_v1/service.rb +4 -2
- data/generated/google/apis/recommender_v1.rb +1 -1
- data/generated/google/apis/recommender_v1beta1/classes.rb +1 -1
- data/generated/google/apis/recommender_v1beta1/service.rb +4 -2
- data/generated/google/apis/recommender_v1beta1.rb +1 -1
- data/generated/google/apis/redis_v1/classes.rb +91 -513
- data/generated/google/apis/redis_v1/representations.rb +0 -139
- data/generated/google/apis/redis_v1/service.rb +92 -109
- data/generated/google/apis/redis_v1.rb +1 -1
- data/generated/google/apis/redis_v1beta1/classes.rb +123 -517
- data/generated/google/apis/redis_v1beta1/representations.rb +12 -137
- data/generated/google/apis/redis_v1beta1/service.rb +126 -109
- data/generated/google/apis/redis_v1beta1.rb +1 -1
- data/generated/google/apis/remotebuildexecution_v1/classes.rb +957 -1078
- data/generated/google/apis/remotebuildexecution_v1/representations.rb +62 -0
- data/generated/google/apis/remotebuildexecution_v1/service.rb +26 -33
- data/generated/google/apis/remotebuildexecution_v1.rb +1 -1
- data/generated/google/apis/remotebuildexecution_v1alpha/classes.rb +952 -1071
- data/generated/google/apis/remotebuildexecution_v1alpha/representations.rb +62 -0
- data/generated/google/apis/remotebuildexecution_v1alpha/service.rb +103 -65
- data/generated/google/apis/remotebuildexecution_v1alpha.rb +1 -1
- data/generated/google/apis/remotebuildexecution_v2/classes.rb +1105 -1250
- data/generated/google/apis/remotebuildexecution_v2/representations.rb +62 -0
- data/generated/google/apis/remotebuildexecution_v2/service.rb +147 -206
- data/generated/google/apis/remotebuildexecution_v2.rb +1 -1
- data/generated/google/apis/reseller_v1/classes.rb +151 -219
- data/generated/google/apis/reseller_v1/service.rb +122 -173
- data/generated/google/apis/reseller_v1.rb +2 -2
- data/generated/google/apis/run_v1/classes.rb +19 -138
- data/generated/google/apis/run_v1/representations.rb +1 -62
- data/generated/google/apis/run_v1/service.rb +0 -342
- data/generated/google/apis/run_v1.rb +1 -1
- data/generated/google/apis/run_v1alpha1/classes.rb +1 -1
- data/generated/google/apis/run_v1alpha1/representations.rb +1 -1
- data/generated/google/apis/run_v1alpha1.rb +1 -1
- data/generated/google/apis/run_v1beta1/classes.rb +3 -2
- data/generated/google/apis/run_v1beta1.rb +1 -1
- data/generated/google/apis/runtimeconfig_v1beta1/classes.rb +301 -412
- data/generated/google/apis/runtimeconfig_v1beta1/representations.rb +1 -0
- data/generated/google/apis/runtimeconfig_v1beta1/service.rb +135 -159
- data/generated/google/apis/runtimeconfig_v1beta1.rb +1 -1
- data/generated/google/apis/safebrowsing_v4/classes.rb +55 -64
- data/generated/google/apis/safebrowsing_v4/service.rb +4 -4
- data/generated/google/apis/safebrowsing_v4.rb +1 -1
- data/generated/google/apis/sasportal_v1alpha1/classes.rb +26 -0
- data/generated/google/apis/sasportal_v1alpha1/representations.rb +14 -0
- data/generated/google/apis/sasportal_v1alpha1/service.rb +644 -56
- data/generated/google/apis/sasportal_v1alpha1.rb +1 -1
- data/generated/google/apis/script_v1/classes.rb +88 -111
- data/generated/google/apis/script_v1/service.rb +63 -69
- data/generated/google/apis/script_v1.rb +1 -1
- data/generated/google/apis/searchconsole_v1/classes.rb +388 -0
- data/generated/google/apis/searchconsole_v1/representations.rb +162 -0
- data/generated/google/apis/searchconsole_v1/service.rb +287 -0
- data/generated/google/apis/searchconsole_v1.rb +7 -1
- data/generated/google/apis/secretmanager_v1/classes.rb +378 -365
- data/generated/google/apis/secretmanager_v1/representations.rb +92 -0
- data/generated/google/apis/secretmanager_v1/service.rb +66 -82
- data/generated/google/apis/secretmanager_v1.rb +1 -1
- data/generated/google/apis/secretmanager_v1beta1/classes.rb +217 -363
- data/generated/google/apis/secretmanager_v1beta1/representations.rb +1 -0
- data/generated/google/apis/secretmanager_v1beta1/service.rb +66 -82
- data/generated/google/apis/secretmanager_v1beta1.rb +1 -1
- data/generated/google/apis/securitycenter_v1/classes.rb +20 -204
- data/generated/google/apis/securitycenter_v1/representations.rb +1 -72
- data/generated/google/apis/securitycenter_v1.rb +1 -1
- data/generated/google/apis/securitycenter_v1beta1/classes.rb +22 -204
- data/generated/google/apis/securitycenter_v1beta1/representations.rb +1 -72
- data/generated/google/apis/securitycenter_v1beta1.rb +1 -1
- data/generated/google/apis/{securitycenter_v1p1alpha1 → securitycenter_v1beta2}/classes.rb +275 -291
- data/generated/google/apis/{securitycenter_v1p1alpha1 → securitycenter_v1beta2}/representations.rb +83 -84
- data/generated/google/apis/securitycenter_v1beta2/service.rb +1494 -0
- data/generated/google/apis/{securitycenter_v1p1beta1.rb → securitycenter_v1beta2.rb} +6 -6
- data/generated/google/apis/serviceconsumermanagement_v1/classes.rb +35 -123
- data/generated/google/apis/serviceconsumermanagement_v1/representations.rb +0 -18
- data/generated/google/apis/serviceconsumermanagement_v1/service.rb +32 -30
- data/generated/google/apis/serviceconsumermanagement_v1.rb +1 -1
- data/generated/google/apis/serviceconsumermanagement_v1beta1/classes.rb +24 -112
- data/generated/google/apis/serviceconsumermanagement_v1beta1/representations.rb +0 -18
- data/generated/google/apis/serviceconsumermanagement_v1beta1.rb +1 -1
- data/generated/google/apis/servicecontrol_v1/classes.rb +601 -642
- data/generated/google/apis/servicecontrol_v1/representations.rb +10 -0
- data/generated/google/apis/servicecontrol_v1/service.rb +36 -46
- data/generated/google/apis/servicecontrol_v1.rb +1 -1
- data/generated/google/apis/servicecontrol_v2/classes.rb +343 -325
- data/generated/google/apis/servicecontrol_v2/representations.rb +8 -0
- data/generated/google/apis/servicecontrol_v2/service.rb +33 -43
- data/generated/google/apis/servicecontrol_v2.rb +1 -1
- data/generated/google/apis/servicedirectory_v1beta1/classes.rb +221 -333
- data/generated/google/apis/servicedirectory_v1beta1/representations.rb +1 -0
- data/generated/google/apis/servicedirectory_v1beta1/service.rb +94 -129
- data/generated/google/apis/servicedirectory_v1beta1.rb +1 -1
- data/generated/google/apis/servicemanagement_v1/classes.rb +1244 -2174
- data/generated/google/apis/servicemanagement_v1/representations.rb +0 -31
- data/generated/google/apis/servicemanagement_v1/service.rb +141 -228
- data/generated/google/apis/servicemanagement_v1.rb +1 -1
- data/generated/google/apis/servicenetworking_v1/classes.rb +278 -121
- data/generated/google/apis/servicenetworking_v1/representations.rb +115 -15
- data/generated/google/apis/servicenetworking_v1/service.rb +118 -2
- data/generated/google/apis/servicenetworking_v1.rb +1 -1
- data/generated/google/apis/servicenetworking_v1beta/classes.rb +213 -112
- data/generated/google/apis/servicenetworking_v1beta/representations.rb +84 -14
- data/generated/google/apis/servicenetworking_v1beta.rb +1 -1
- data/generated/google/apis/serviceusage_v1/classes.rb +57 -111
- data/generated/google/apis/serviceusage_v1/representations.rb +4 -18
- data/generated/google/apis/serviceusage_v1/service.rb +5 -1
- data/generated/google/apis/serviceusage_v1.rb +1 -1
- data/generated/google/apis/serviceusage_v1beta1/classes.rb +122 -112
- data/generated/google/apis/serviceusage_v1beta1/representations.rb +23 -18
- data/generated/google/apis/serviceusage_v1beta1/service.rb +36 -0
- data/generated/google/apis/serviceusage_v1beta1.rb +1 -1
- data/generated/google/apis/sheets_v4/classes.rb +4029 -5014
- data/generated/google/apis/sheets_v4/representations.rb +625 -0
- data/generated/google/apis/sheets_v4/service.rb +113 -149
- data/generated/google/apis/sheets_v4.rb +1 -1
- data/generated/google/apis/site_verification_v1.rb +1 -1
- data/generated/google/apis/slides_v1/classes.rb +841 -1114
- data/generated/google/apis/slides_v1/service.rb +23 -30
- data/generated/google/apis/slides_v1.rb +1 -1
- data/generated/google/apis/smartdevicemanagement_v1/classes.rb +273 -0
- data/generated/google/apis/smartdevicemanagement_v1/representations.rb +157 -0
- data/generated/google/apis/smartdevicemanagement_v1/service.rb +304 -0
- data/generated/google/apis/smartdevicemanagement_v1.rb +35 -0
- data/generated/google/apis/sourcerepo_v1/classes.rb +250 -400
- data/generated/google/apis/sourcerepo_v1/service.rb +40 -49
- data/generated/google/apis/sourcerepo_v1.rb +1 -1
- data/generated/google/apis/spanner_v1/classes.rb +1553 -2157
- data/generated/google/apis/spanner_v1/representations.rb +1 -0
- data/generated/google/apis/spanner_v1/service.rb +443 -618
- data/generated/google/apis/spanner_v1.rb +1 -1
- data/generated/google/apis/speech_v1/classes.rb +174 -220
- data/generated/google/apis/speech_v1/service.rb +27 -32
- data/generated/google/apis/speech_v1.rb +1 -1
- data/generated/google/apis/speech_v1p1beta1/classes.rb +253 -306
- data/generated/google/apis/speech_v1p1beta1/service.rb +27 -32
- data/generated/google/apis/speech_v1p1beta1.rb +1 -1
- data/generated/google/apis/speech_v2beta1/classes.rb +66 -76
- data/generated/google/apis/speech_v2beta1/service.rb +10 -12
- data/generated/google/apis/speech_v2beta1.rb +1 -1
- data/generated/google/apis/{sql_v1beta4 → sqladmin_v1beta4}/classes.rb +537 -452
- data/generated/google/apis/{sql_v1beta4 → sqladmin_v1beta4}/representations.rb +142 -87
- data/generated/google/apis/{sql_v1beta4 → sqladmin_v1beta4}/service.rb +295 -300
- data/generated/google/apis/{sql_v1beta4.rb → sqladmin_v1beta4.rb} +5 -5
- data/generated/google/apis/storage_v1/classes.rb +10 -17
- data/generated/google/apis/storage_v1/representations.rb +2 -3
- data/generated/google/apis/storage_v1/service.rb +3 -2
- data/generated/google/apis/storage_v1.rb +1 -1
- data/generated/google/apis/storagetransfer_v1/classes.rb +301 -349
- data/generated/google/apis/storagetransfer_v1/representations.rb +13 -0
- data/generated/google/apis/storagetransfer_v1/service.rb +53 -72
- data/generated/google/apis/storagetransfer_v1.rb +1 -1
- data/generated/google/apis/streetviewpublish_v1/classes.rb +110 -152
- data/generated/google/apis/streetviewpublish_v1/service.rb +94 -177
- data/generated/google/apis/streetviewpublish_v1.rb +1 -1
- data/generated/google/apis/sts_v1/classes.rb +121 -0
- data/generated/google/apis/sts_v1/representations.rb +59 -0
- data/generated/google/apis/sts_v1/service.rb +90 -0
- data/generated/google/apis/sts_v1.rb +32 -0
- data/generated/google/apis/sts_v1beta/classes.rb +191 -0
- data/generated/google/apis/sts_v1beta/representations.rb +61 -0
- data/generated/google/apis/sts_v1beta/service.rb +92 -0
- data/generated/google/apis/sts_v1beta.rb +32 -0
- data/generated/google/apis/tagmanager_v1/service.rb +2 -2
- data/generated/google/apis/tagmanager_v1.rb +1 -1
- data/generated/google/apis/tagmanager_v2/classes.rb +12 -0
- data/generated/google/apis/tagmanager_v2/representations.rb +3 -0
- data/generated/google/apis/tagmanager_v2/service.rb +2 -2
- data/generated/google/apis/tagmanager_v2.rb +1 -1
- data/generated/google/apis/tasks_v1/classes.rb +21 -22
- data/generated/google/apis/tasks_v1/service.rb +19 -19
- data/generated/google/apis/tasks_v1.rb +1 -1
- data/generated/google/apis/testing_v1/classes.rb +384 -390
- data/generated/google/apis/testing_v1/representations.rb +23 -0
- data/generated/google/apis/testing_v1/service.rb +22 -28
- data/generated/google/apis/testing_v1.rb +1 -1
- data/generated/google/apis/texttospeech_v1/classes.rb +51 -57
- data/generated/google/apis/texttospeech_v1/service.rb +9 -10
- data/generated/google/apis/texttospeech_v1.rb +1 -1
- data/generated/google/apis/texttospeech_v1beta1/classes.rb +96 -57
- data/generated/google/apis/texttospeech_v1beta1/representations.rb +19 -0
- data/generated/google/apis/texttospeech_v1beta1/service.rb +9 -10
- data/generated/google/apis/texttospeech_v1beta1.rb +1 -1
- data/generated/google/apis/toolresults_v1beta3/classes.rb +20 -0
- data/generated/google/apis/toolresults_v1beta3/representations.rb +13 -0
- data/generated/google/apis/toolresults_v1beta3.rb +1 -1
- data/generated/google/apis/tpu_v1/classes.rb +57 -3
- data/generated/google/apis/tpu_v1/representations.rb +19 -0
- data/generated/google/apis/tpu_v1/service.rb +8 -8
- data/generated/google/apis/tpu_v1.rb +1 -1
- data/generated/google/apis/tpu_v1alpha1/classes.rb +57 -3
- data/generated/google/apis/tpu_v1alpha1/representations.rb +19 -0
- data/generated/google/apis/tpu_v1alpha1/service.rb +8 -8
- data/generated/google/apis/tpu_v1alpha1.rb +1 -1
- data/generated/google/apis/trafficdirector_v2/classes.rb +1347 -0
- data/generated/google/apis/trafficdirector_v2/representations.rb +620 -0
- data/generated/google/apis/trafficdirector_v2/service.rb +89 -0
- data/generated/google/apis/trafficdirector_v2.rb +34 -0
- data/generated/google/apis/translate_v3/classes.rb +151 -177
- data/generated/google/apis/translate_v3/service.rb +122 -151
- data/generated/google/apis/translate_v3.rb +1 -1
- data/generated/google/apis/translate_v3beta1/classes.rb +150 -170
- data/generated/google/apis/translate_v3beta1/service.rb +122 -151
- data/generated/google/apis/translate_v3beta1.rb +1 -1
- data/generated/google/apis/vault_v1/classes.rb +413 -103
- data/generated/google/apis/vault_v1/representations.rb +162 -0
- data/generated/google/apis/vault_v1/service.rb +182 -37
- data/generated/google/apis/vault_v1.rb +1 -1
- data/generated/google/apis/vectortile_v1/classes.rb +185 -267
- data/generated/google/apis/vectortile_v1/service.rb +75 -88
- data/generated/google/apis/vectortile_v1.rb +1 -1
- data/generated/google/apis/verifiedaccess_v1/classes.rb +20 -27
- data/generated/google/apis/verifiedaccess_v1.rb +1 -1
- data/generated/google/apis/videointelligence_v1/classes.rb +1493 -935
- data/generated/google/apis/videointelligence_v1/representations.rb +402 -2
- data/generated/google/apis/videointelligence_v1/service.rb +38 -77
- data/generated/google/apis/videointelligence_v1.rb +1 -1
- data/generated/google/apis/videointelligence_v1beta2/classes.rb +1488 -928
- data/generated/google/apis/videointelligence_v1beta2/representations.rb +402 -2
- data/generated/google/apis/videointelligence_v1beta2/service.rb +4 -4
- data/generated/google/apis/videointelligence_v1beta2.rb +1 -1
- data/generated/google/apis/videointelligence_v1p1beta1/classes.rb +1482 -922
- data/generated/google/apis/videointelligence_v1p1beta1/representations.rb +402 -2
- data/generated/google/apis/videointelligence_v1p1beta1/service.rb +4 -4
- data/generated/google/apis/videointelligence_v1p1beta1.rb +1 -1
- data/generated/google/apis/videointelligence_v1p2beta1/classes.rb +1485 -925
- data/generated/google/apis/videointelligence_v1p2beta1/representations.rb +402 -2
- data/generated/google/apis/videointelligence_v1p2beta1/service.rb +4 -4
- data/generated/google/apis/videointelligence_v1p2beta1.rb +1 -1
- data/generated/google/apis/videointelligence_v1p3beta1/classes.rb +1410 -937
- data/generated/google/apis/videointelligence_v1p3beta1/representations.rb +368 -2
- data/generated/google/apis/videointelligence_v1p3beta1/service.rb +4 -4
- data/generated/google/apis/videointelligence_v1p3beta1.rb +1 -1
- data/generated/google/apis/vision_v1/classes.rb +16 -16
- data/generated/google/apis/vision_v1.rb +1 -1
- data/generated/google/apis/vision_v1p1beta1/classes.rb +16 -16
- data/generated/google/apis/vision_v1p1beta1.rb +1 -1
- data/generated/google/apis/vision_v1p2beta1/classes.rb +16 -16
- data/generated/google/apis/vision_v1p2beta1.rb +1 -1
- data/generated/google/apis/webfonts_v1/classes.rb +1 -2
- data/generated/google/apis/webfonts_v1/service.rb +2 -4
- data/generated/google/apis/webfonts_v1.rb +2 -3
- data/generated/google/apis/websecurityscanner_v1.rb +1 -1
- data/generated/google/apis/websecurityscanner_v1alpha.rb +1 -1
- data/generated/google/apis/websecurityscanner_v1beta.rb +1 -1
- data/generated/google/apis/workflowexecutions_v1beta/classes.rb +155 -0
- data/generated/google/apis/workflowexecutions_v1beta/representations.rb +88 -0
- data/generated/google/apis/{securitycenter_v1p1alpha1 → workflowexecutions_v1beta}/service.rb +73 -69
- data/generated/google/apis/workflowexecutions_v1beta.rb +34 -0
- data/generated/google/apis/workflows_v1beta/classes.rb +406 -0
- data/generated/google/apis/workflows_v1beta/representations.rb +173 -0
- data/generated/google/apis/workflows_v1beta/service.rb +438 -0
- data/generated/google/apis/workflows_v1beta.rb +35 -0
- data/generated/google/apis/youtube_partner_v1.rb +1 -1
- data/generated/google/apis/youtube_v3/classes.rb +0 -586
- data/generated/google/apis/youtube_v3/representations.rb +0 -269
- data/generated/google/apis/youtube_v3/service.rb +3 -120
- data/generated/google/apis/youtube_v3.rb +1 -1
- data/google-api-client.gemspec +25 -24
- data/lib/google/apis/core/api_command.rb +1 -0
- data/lib/google/apis/core/http_command.rb +2 -1
- data/lib/google/apis/options.rb +8 -5
- data/lib/google/apis/version.rb +1 -1
- data/synth.py +40 -0
- metadata +134 -41
- data/generated/google/apis/accessapproval_v1beta1/classes.rb +0 -417
- data/generated/google/apis/accessapproval_v1beta1/representations.rb +0 -203
- data/generated/google/apis/accessapproval_v1beta1/service.rb +0 -857
- data/generated/google/apis/appsactivity_v1/classes.rb +0 -415
- data/generated/google/apis/appsactivity_v1/representations.rb +0 -209
- data/generated/google/apis/appsactivity_v1/service.rb +0 -126
- data/generated/google/apis/dns_v2beta1/classes.rb +0 -1447
- data/generated/google/apis/dns_v2beta1/representations.rb +0 -588
- data/generated/google/apis/dns_v2beta1/service.rb +0 -928
- data/generated/google/apis/dns_v2beta1.rb +0 -43
- data/generated/google/apis/memcache_v1/classes.rb +0 -1157
- data/generated/google/apis/plus_v1/classes.rb +0 -2094
- data/generated/google/apis/plus_v1/representations.rb +0 -907
- data/generated/google/apis/plus_v1/service.rb +0 -451
- data/generated/google/apis/plus_v1.rb +0 -43
- data/generated/google/apis/securitycenter_v1p1beta1/classes.rb +0 -2059
- data/generated/google/apis/securitycenter_v1p1beta1/representations.rb +0 -789
- data/generated/google/apis/securitycenter_v1p1beta1/service.rb +0 -1243
- data/generated/google/apis/storage_v1beta2/classes.rb +0 -1047
- data/generated/google/apis/storage_v1beta2/representations.rb +0 -425
- data/generated/google/apis/storage_v1beta2/service.rb +0 -1667
- data/generated/google/apis/storage_v1beta2.rb +0 -40
@@ -22,9 +22,9 @@ module Google
|
|
22
22
|
module Apis
|
23
23
|
module VideointelligenceV1p2beta1
|
24
24
|
|
25
|
-
# Video annotation progress. Included in the `metadata`
|
26
|
-
#
|
27
|
-
#
|
25
|
+
# Video annotation progress. Included in the `metadata` field of the `Operation`
|
26
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
27
|
+
# service.
|
28
28
|
class GoogleCloudVideointelligenceV1AnnotateVideoProgress
|
29
29
|
include Google::Apis::Core::Hashable
|
30
30
|
|
@@ -43,9 +43,9 @@ module Google
|
|
43
43
|
end
|
44
44
|
end
|
45
45
|
|
46
|
-
# Video annotation response. Included in the `response`
|
47
|
-
#
|
48
|
-
#
|
46
|
+
# Video annotation response. Included in the `response` field of the `Operation`
|
47
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
48
|
+
# service.
|
49
49
|
class GoogleCloudVideointelligenceV1AnnotateVideoResponse
|
50
50
|
include Google::Apis::Core::Hashable
|
51
51
|
|
@@ -73,14 +73,14 @@ module Google
|
|
73
73
|
# @return [Float]
|
74
74
|
attr_accessor :confidence
|
75
75
|
|
76
|
-
# The name of the attribute, for example, glasses, dark_glasses, mouth_open.
|
77
|
-
#
|
76
|
+
# The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
|
77
|
+
# full list of supported type names will be provided in the document.
|
78
78
|
# Corresponds to the JSON property `name`
|
79
79
|
# @return [String]
|
80
80
|
attr_accessor :name
|
81
81
|
|
82
|
-
# Text value of the detection result. For example, the value for "HairColor"
|
83
|
-
#
|
82
|
+
# Text value of the detection result. For example, the value for "HairColor" can
|
83
|
+
# be "black", "blonde", etc.
|
84
84
|
# Corresponds to the JSON property `value`
|
85
85
|
# @return [String]
|
86
86
|
attr_accessor :value
|
@@ -112,9 +112,8 @@ module Google
|
|
112
112
|
# @return [String]
|
113
113
|
attr_accessor :name
|
114
114
|
|
115
|
-
# A vertex represents a 2D point in the image.
|
116
|
-
#
|
117
|
-
# and range from 0 to 1.
|
115
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
116
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
118
117
|
# Corresponds to the JSON property `point`
|
119
118
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1NormalizedVertex]
|
120
119
|
attr_accessor :point
|
@@ -140,8 +139,7 @@ module Google
|
|
140
139
|
# @return [String]
|
141
140
|
attr_accessor :description
|
142
141
|
|
143
|
-
# Opaque entity ID. Some IDs may be available in
|
144
|
-
# [Google Knowledge Graph Search
|
142
|
+
# Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
|
145
143
|
# API](https://developers.google.com/knowledge-graph/).
|
146
144
|
# Corresponds to the JSON property `entityId`
|
147
145
|
# @return [String]
|
@@ -164,9 +162,9 @@ module Google
|
|
164
162
|
end
|
165
163
|
end
|
166
164
|
|
167
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
168
|
-
#
|
169
|
-
#
|
165
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
166
|
+
# explicit content has been detected in a frame, no annotations are present for
|
167
|
+
# that frame.
|
170
168
|
class GoogleCloudVideointelligenceV1ExplicitContentAnnotation
|
171
169
|
include Google::Apis::Core::Hashable
|
172
170
|
|
@@ -217,14 +215,110 @@ module Google
|
|
217
215
|
end
|
218
216
|
end
|
219
217
|
|
218
|
+
# Deprecated. No effect.
|
219
|
+
class GoogleCloudVideointelligenceV1FaceAnnotation
|
220
|
+
include Google::Apis::Core::Hashable
|
221
|
+
|
222
|
+
# All video frames where a face was detected.
|
223
|
+
# Corresponds to the JSON property `frames`
|
224
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1FaceFrame>]
|
225
|
+
attr_accessor :frames
|
226
|
+
|
227
|
+
# All video segments where a face was detected.
|
228
|
+
# Corresponds to the JSON property `segments`
|
229
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1FaceSegment>]
|
230
|
+
attr_accessor :segments
|
231
|
+
|
232
|
+
# Thumbnail of a representative face view (in JPEG format).
|
233
|
+
# Corresponds to the JSON property `thumbnail`
|
234
|
+
# NOTE: Values are automatically base64 encoded/decoded in the client library.
|
235
|
+
# @return [String]
|
236
|
+
attr_accessor :thumbnail
|
237
|
+
|
238
|
+
def initialize(**args)
|
239
|
+
update!(**args)
|
240
|
+
end
|
241
|
+
|
242
|
+
# Update properties of this object
|
243
|
+
def update!(**args)
|
244
|
+
@frames = args[:frames] if args.key?(:frames)
|
245
|
+
@segments = args[:segments] if args.key?(:segments)
|
246
|
+
@thumbnail = args[:thumbnail] if args.key?(:thumbnail)
|
247
|
+
end
|
248
|
+
end
|
249
|
+
|
250
|
+
# Face detection annotation.
|
251
|
+
class GoogleCloudVideointelligenceV1FaceDetectionAnnotation
|
252
|
+
include Google::Apis::Core::Hashable
|
253
|
+
|
254
|
+
# Feature version.
|
255
|
+
# Corresponds to the JSON property `version`
|
256
|
+
# @return [String]
|
257
|
+
attr_accessor :version
|
258
|
+
|
259
|
+
def initialize(**args)
|
260
|
+
update!(**args)
|
261
|
+
end
|
262
|
+
|
263
|
+
# Update properties of this object
|
264
|
+
def update!(**args)
|
265
|
+
@version = args[:version] if args.key?(:version)
|
266
|
+
end
|
267
|
+
end
|
268
|
+
|
269
|
+
# Deprecated. No effect.
|
270
|
+
class GoogleCloudVideointelligenceV1FaceFrame
|
271
|
+
include Google::Apis::Core::Hashable
|
272
|
+
|
273
|
+
# Normalized Bounding boxes in a frame. There can be more than one boxes if the
|
274
|
+
# same face is detected in multiple locations within the current frame.
|
275
|
+
# Corresponds to the JSON property `normalizedBoundingBoxes`
|
276
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1NormalizedBoundingBox>]
|
277
|
+
attr_accessor :normalized_bounding_boxes
|
278
|
+
|
279
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
280
|
+
# video frame for this location.
|
281
|
+
# Corresponds to the JSON property `timeOffset`
|
282
|
+
# @return [String]
|
283
|
+
attr_accessor :time_offset
|
284
|
+
|
285
|
+
def initialize(**args)
|
286
|
+
update!(**args)
|
287
|
+
end
|
288
|
+
|
289
|
+
# Update properties of this object
|
290
|
+
def update!(**args)
|
291
|
+
@normalized_bounding_boxes = args[:normalized_bounding_boxes] if args.key?(:normalized_bounding_boxes)
|
292
|
+
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
293
|
+
end
|
294
|
+
end
|
295
|
+
|
296
|
+
# Video segment level annotation results for face detection.
|
297
|
+
class GoogleCloudVideointelligenceV1FaceSegment
|
298
|
+
include Google::Apis::Core::Hashable
|
299
|
+
|
300
|
+
# Video segment.
|
301
|
+
# Corresponds to the JSON property `segment`
|
302
|
+
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1VideoSegment]
|
303
|
+
attr_accessor :segment
|
304
|
+
|
305
|
+
def initialize(**args)
|
306
|
+
update!(**args)
|
307
|
+
end
|
308
|
+
|
309
|
+
# Update properties of this object
|
310
|
+
def update!(**args)
|
311
|
+
@segment = args[:segment] if args.key?(:segment)
|
312
|
+
end
|
313
|
+
end
|
314
|
+
|
220
315
|
# Label annotation.
|
221
316
|
class GoogleCloudVideointelligenceV1LabelAnnotation
|
222
317
|
include Google::Apis::Core::Hashable
|
223
318
|
|
224
|
-
# Common categories for the detected entity.
|
225
|
-
#
|
226
|
-
#
|
227
|
-
# also be a `pet`.
|
319
|
+
# Common categories for the detected entity. For example, when the label is `
|
320
|
+
# Terrier`, the category is likely `dog`. And in some cases there might be more
|
321
|
+
# than one categories e.g., `Terrier` could also be a `pet`.
|
228
322
|
# Corresponds to the JSON property `categoryEntities`
|
229
323
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1Entity>]
|
230
324
|
attr_accessor :category_entities
|
@@ -323,14 +417,14 @@ module Google
|
|
323
417
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1Entity]
|
324
418
|
attr_accessor :entity
|
325
419
|
|
326
|
-
# All video segments where the recognized logo appears. There might be
|
327
|
-
#
|
420
|
+
# All video segments where the recognized logo appears. There might be multiple
|
421
|
+
# instances of the same logo class appearing in one VideoSegment.
|
328
422
|
# Corresponds to the JSON property `segments`
|
329
423
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1VideoSegment>]
|
330
424
|
attr_accessor :segments
|
331
425
|
|
332
|
-
# All logo tracks where the recognized logo appears. Each track corresponds
|
333
|
-
#
|
426
|
+
# All logo tracks where the recognized logo appears. Each track corresponds to
|
427
|
+
# one logo instance appearing in consecutive frames.
|
334
428
|
# Corresponds to the JSON property `tracks`
|
335
429
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1Track>]
|
336
430
|
attr_accessor :tracks
|
@@ -347,9 +441,8 @@ module Google
|
|
347
441
|
end
|
348
442
|
end
|
349
443
|
|
350
|
-
# Normalized bounding box.
|
351
|
-
#
|
352
|
-
# Range: [0, 1].
|
444
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
445
|
+
# original image. Range: [0, 1].
|
353
446
|
class GoogleCloudVideointelligenceV1NormalizedBoundingBox
|
354
447
|
include Google::Apis::Core::Hashable
|
355
448
|
|
@@ -387,20 +480,12 @@ module Google
|
|
387
480
|
end
|
388
481
|
|
389
482
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
390
|
-
# Contains list of the corner points in clockwise order starting from
|
391
|
-
#
|
392
|
-
#
|
393
|
-
#
|
394
|
-
#
|
395
|
-
#
|
396
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
397
|
-
# becomes:
|
398
|
-
# 2----3
|
399
|
-
# | |
|
400
|
-
# 1----0
|
401
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
402
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
403
|
-
# the box.
|
483
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
484
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
485
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
486
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
487
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
488
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
404
489
|
class GoogleCloudVideointelligenceV1NormalizedBoundingPoly
|
405
490
|
include Google::Apis::Core::Hashable
|
406
491
|
|
@@ -419,9 +504,8 @@ module Google
|
|
419
504
|
end
|
420
505
|
end
|
421
506
|
|
422
|
-
# A vertex represents a 2D point in the image.
|
423
|
-
#
|
424
|
-
# and range from 0 to 1.
|
507
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
508
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
425
509
|
class GoogleCloudVideointelligenceV1NormalizedVertex
|
426
510
|
include Google::Apis::Core::Hashable
|
427
511
|
|
@@ -460,10 +544,10 @@ module Google
|
|
460
544
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1Entity]
|
461
545
|
attr_accessor :entity
|
462
546
|
|
463
|
-
# Information corresponding to all frames where this object track appears.
|
464
|
-
#
|
465
|
-
#
|
466
|
-
#
|
547
|
+
# Information corresponding to all frames where this object track appears. Non-
|
548
|
+
# streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
|
549
|
+
# in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
|
550
|
+
# frames.
|
467
551
|
# Corresponds to the JSON property `frames`
|
468
552
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1ObjectTrackingFrame>]
|
469
553
|
attr_accessor :frames
|
@@ -473,12 +557,11 @@ module Google
|
|
473
557
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1VideoSegment]
|
474
558
|
attr_accessor :segment
|
475
559
|
|
476
|
-
# Streaming mode ONLY.
|
477
|
-
#
|
478
|
-
#
|
479
|
-
#
|
480
|
-
#
|
481
|
-
# ObjectTrackAnnotation of the same track_id over time.
|
560
|
+
# Streaming mode ONLY. In streaming mode, we do not know the end time of a
|
561
|
+
# tracked object before it is completed. Hence, there is no VideoSegment info
|
562
|
+
# returned. Instead, we provide a unique identifiable integer track_id so that
|
563
|
+
# the customers can correlate the results of the ongoing ObjectTrackAnnotation
|
564
|
+
# of the same track_id over time.
|
482
565
|
# Corresponds to the JSON property `trackId`
|
483
566
|
# @return [Fixnum]
|
484
567
|
attr_accessor :track_id
|
@@ -508,9 +591,8 @@ module Google
|
|
508
591
|
class GoogleCloudVideointelligenceV1ObjectTrackingFrame
|
509
592
|
include Google::Apis::Core::Hashable
|
510
593
|
|
511
|
-
# Normalized bounding box.
|
512
|
-
#
|
513
|
-
# Range: [0, 1].
|
594
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
595
|
+
# original image. Range: [0, 1].
|
514
596
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
515
597
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1NormalizedBoundingBox]
|
516
598
|
attr_accessor :normalized_bounding_box
|
@@ -531,16 +613,41 @@ module Google
|
|
531
613
|
end
|
532
614
|
end
|
533
615
|
|
616
|
+
# Person detection annotation per video.
|
617
|
+
class GoogleCloudVideointelligenceV1PersonDetectionAnnotation
|
618
|
+
include Google::Apis::Core::Hashable
|
619
|
+
|
620
|
+
# The detected tracks of a person.
|
621
|
+
# Corresponds to the JSON property `tracks`
|
622
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1Track>]
|
623
|
+
attr_accessor :tracks
|
624
|
+
|
625
|
+
# Feature version.
|
626
|
+
# Corresponds to the JSON property `version`
|
627
|
+
# @return [String]
|
628
|
+
attr_accessor :version
|
629
|
+
|
630
|
+
def initialize(**args)
|
631
|
+
update!(**args)
|
632
|
+
end
|
633
|
+
|
634
|
+
# Update properties of this object
|
635
|
+
def update!(**args)
|
636
|
+
@tracks = args[:tracks] if args.key?(:tracks)
|
637
|
+
@version = args[:version] if args.key?(:version)
|
638
|
+
end
|
639
|
+
end
|
640
|
+
|
534
641
|
# Alternative hypotheses (a.k.a. n-best list).
|
535
642
|
class GoogleCloudVideointelligenceV1SpeechRecognitionAlternative
|
536
643
|
include Google::Apis::Core::Hashable
|
537
644
|
|
538
645
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
539
646
|
# indicates an estimated greater likelihood that the recognized words are
|
540
|
-
# correct. This field is set only for the top alternative.
|
541
|
-
#
|
542
|
-
#
|
543
|
-
#
|
647
|
+
# correct. This field is set only for the top alternative. This field is not
|
648
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
649
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
650
|
+
# not set.
|
544
651
|
# Corresponds to the JSON property `confidence`
|
545
652
|
# @return [Float]
|
546
653
|
attr_accessor :confidence
|
@@ -551,8 +658,8 @@ module Google
|
|
551
658
|
attr_accessor :transcript
|
552
659
|
|
553
660
|
# Output only. A list of word-specific information for each recognized word.
|
554
|
-
# Note: When `enable_speaker_diarization` is set to true, you will see all
|
555
|
-
#
|
661
|
+
# Note: When `enable_speaker_diarization` is set to true, you will see all the
|
662
|
+
# words from the beginning of the audio.
|
556
663
|
# Corresponds to the JSON property `words`
|
557
664
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1WordInfo>]
|
558
665
|
attr_accessor :words
|
@@ -573,18 +680,17 @@ module Google
|
|
573
680
|
class GoogleCloudVideointelligenceV1SpeechTranscription
|
574
681
|
include Google::Apis::Core::Hashable
|
575
682
|
|
576
|
-
# May contain one or more recognition hypotheses (up to the maximum specified
|
577
|
-
#
|
578
|
-
#
|
579
|
-
#
|
683
|
+
# May contain one or more recognition hypotheses (up to the maximum specified in
|
684
|
+
# `max_alternatives`). These alternatives are ordered in terms of accuracy, with
|
685
|
+
# the top (first) alternative being the most probable, as ranked by the
|
686
|
+
# recognizer.
|
580
687
|
# Corresponds to the JSON property `alternatives`
|
581
688
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1SpeechRecognitionAlternative>]
|
582
689
|
attr_accessor :alternatives
|
583
690
|
|
584
691
|
# Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
|
585
|
-
# language tag of
|
586
|
-
#
|
587
|
-
# most likelihood of being spoken in the audio.
|
692
|
+
# language tag of the language in this result. This language code was detected
|
693
|
+
# to have the most likelihood of being spoken in the audio.
|
588
694
|
# Corresponds to the JSON property `languageCode`
|
589
695
|
# @return [String]
|
590
696
|
attr_accessor :language_code
|
@@ -633,27 +739,19 @@ module Google
|
|
633
739
|
end
|
634
740
|
end
|
635
741
|
|
636
|
-
# Video frame level annotation results for text annotation (OCR).
|
637
|
-
#
|
638
|
-
#
|
742
|
+
# Video frame level annotation results for text annotation (OCR). Contains
|
743
|
+
# information regarding timestamp and bounding box locations for the frames
|
744
|
+
# containing detected OCR text snippets.
|
639
745
|
class GoogleCloudVideointelligenceV1TextFrame
|
640
746
|
include Google::Apis::Core::Hashable
|
641
747
|
|
642
748
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
643
|
-
# Contains list of the corner points in clockwise order starting from
|
644
|
-
#
|
645
|
-
#
|
646
|
-
#
|
647
|
-
#
|
648
|
-
#
|
649
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
650
|
-
# becomes:
|
651
|
-
# 2----3
|
652
|
-
# | |
|
653
|
-
# 1----0
|
654
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
655
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
656
|
-
# the box.
|
749
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
750
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
751
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
752
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
753
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
754
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
657
755
|
# Corresponds to the JSON property `rotatedBoundingBox`
|
658
756
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1NormalizedBoundingPoly]
|
659
757
|
attr_accessor :rotated_bounding_box
|
@@ -706,9 +804,8 @@ module Google
|
|
706
804
|
end
|
707
805
|
end
|
708
806
|
|
709
|
-
# For tracking related features.
|
710
|
-
#
|
711
|
-
# normalized_bounding_box.
|
807
|
+
# For tracking related features. An object at time_offset with attributes, and
|
808
|
+
# located with normalized_bounding_box.
|
712
809
|
class GoogleCloudVideointelligenceV1TimestampedObject
|
713
810
|
include Google::Apis::Core::Hashable
|
714
811
|
|
@@ -722,15 +819,14 @@ module Google
|
|
722
819
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1DetectedLandmark>]
|
723
820
|
attr_accessor :landmarks
|
724
821
|
|
725
|
-
# Normalized bounding box.
|
726
|
-
#
|
727
|
-
# Range: [0, 1].
|
822
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
823
|
+
# original image. Range: [0, 1].
|
728
824
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
729
825
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1NormalizedBoundingBox]
|
730
826
|
attr_accessor :normalized_bounding_box
|
731
827
|
|
732
|
-
# Time-offset, relative to the beginning of the video,
|
733
|
-
#
|
828
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
829
|
+
# video frame for this object.
|
734
830
|
# Corresponds to the JSON property `timeOffset`
|
735
831
|
# @return [String]
|
736
832
|
attr_accessor :time_offset
|
@@ -789,20 +885,19 @@ module Google
|
|
789
885
|
class GoogleCloudVideointelligenceV1VideoAnnotationProgress
|
790
886
|
include Google::Apis::Core::Hashable
|
791
887
|
|
792
|
-
# Specifies which feature is being tracked if the request contains more than
|
793
|
-
#
|
888
|
+
# Specifies which feature is being tracked if the request contains more than one
|
889
|
+
# feature.
|
794
890
|
# Corresponds to the JSON property `feature`
|
795
891
|
# @return [String]
|
796
892
|
attr_accessor :feature
|
797
893
|
|
798
|
-
# Video file location in
|
799
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
894
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
800
895
|
# Corresponds to the JSON property `inputUri`
|
801
896
|
# @return [String]
|
802
897
|
attr_accessor :input_uri
|
803
898
|
|
804
|
-
# Approximate percentage processed thus far. Guaranteed to be
|
805
|
-
#
|
899
|
+
# Approximate percentage processed thus far. Guaranteed to be 100 when fully
|
900
|
+
# processed.
|
806
901
|
# Corresponds to the JSON property `progressPercent`
|
807
902
|
# @return [Fixnum]
|
808
903
|
attr_accessor :progress_percent
|
@@ -841,31 +936,40 @@ module Google
|
|
841
936
|
class GoogleCloudVideointelligenceV1VideoAnnotationResults
|
842
937
|
include Google::Apis::Core::Hashable
|
843
938
|
|
844
|
-
# The `Status` type defines a logical error model that is suitable for
|
845
|
-
#
|
846
|
-
#
|
847
|
-
#
|
848
|
-
#
|
849
|
-
#
|
939
|
+
# The `Status` type defines a logical error model that is suitable for different
|
940
|
+
# programming environments, including REST APIs and RPC APIs. It is used by [
|
941
|
+
# gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
|
942
|
+
# data: error code, error message, and error details. You can find out more
|
943
|
+
# about this error model and how to work with it in the [API Design Guide](https:
|
944
|
+
# //cloud.google.com/apis/design/errors).
|
850
945
|
# Corresponds to the JSON property `error`
|
851
946
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleRpcStatus]
|
852
947
|
attr_accessor :error
|
853
948
|
|
854
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
855
|
-
#
|
856
|
-
#
|
949
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
950
|
+
# explicit content has been detected in a frame, no annotations are present for
|
951
|
+
# that frame.
|
857
952
|
# Corresponds to the JSON property `explicitAnnotation`
|
858
953
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1ExplicitContentAnnotation]
|
859
954
|
attr_accessor :explicit_annotation
|
860
955
|
|
861
|
-
#
|
862
|
-
#
|
956
|
+
# Deprecated. Please use `face_detection_annotations` instead.
|
957
|
+
# Corresponds to the JSON property `faceAnnotations`
|
958
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1FaceAnnotation>]
|
959
|
+
attr_accessor :face_annotations
|
960
|
+
|
961
|
+
# Face detection annotations.
|
962
|
+
# Corresponds to the JSON property `faceDetectionAnnotations`
|
963
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1FaceDetectionAnnotation>]
|
964
|
+
attr_accessor :face_detection_annotations
|
965
|
+
|
966
|
+
# Label annotations on frame level. There is exactly one element for each unique
|
967
|
+
# label.
|
863
968
|
# Corresponds to the JSON property `frameLabelAnnotations`
|
864
969
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1LabelAnnotation>]
|
865
970
|
attr_accessor :frame_label_annotations
|
866
971
|
|
867
|
-
# Video file location in
|
868
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
972
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
869
973
|
# Corresponds to the JSON property `inputUri`
|
870
974
|
# @return [String]
|
871
975
|
attr_accessor :input_uri
|
@@ -880,6 +984,11 @@ module Google
|
|
880
984
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1ObjectTrackingAnnotation>]
|
881
985
|
attr_accessor :object_annotations
|
882
986
|
|
987
|
+
# Person detection annotations.
|
988
|
+
# Corresponds to the JSON property `personDetectionAnnotations`
|
989
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1PersonDetectionAnnotation>]
|
990
|
+
attr_accessor :person_detection_annotations
|
991
|
+
|
883
992
|
# Video segment.
|
884
993
|
# Corresponds to the JSON property `segment`
|
885
994
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1VideoSegment]
|
@@ -892,11 +1001,11 @@ module Google
|
|
892
1001
|
attr_accessor :segment_label_annotations
|
893
1002
|
|
894
1003
|
# Presence label annotations on video level or user-specified segment level.
|
895
|
-
# There is exactly one element for each unique label. Compared to the
|
896
|
-
#
|
897
|
-
#
|
898
|
-
#
|
899
|
-
#
|
1004
|
+
# There is exactly one element for each unique label. Compared to the existing
|
1005
|
+
# topical `segment_label_annotations`, this field presents more fine-grained,
|
1006
|
+
# segment-level labels detected in video content and is made available only when
|
1007
|
+
# the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
|
1008
|
+
# request.
|
900
1009
|
# Corresponds to the JSON property `segmentPresenceLabelAnnotations`
|
901
1010
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1LabelAnnotation>]
|
902
1011
|
attr_accessor :segment_presence_label_annotations
|
@@ -906,17 +1015,17 @@ module Google
|
|
906
1015
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1VideoSegment>]
|
907
1016
|
attr_accessor :shot_annotations
|
908
1017
|
|
909
|
-
# Topical label annotations on shot level.
|
910
|
-
#
|
1018
|
+
# Topical label annotations on shot level. There is exactly one element for each
|
1019
|
+
# unique label.
|
911
1020
|
# Corresponds to the JSON property `shotLabelAnnotations`
|
912
1021
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1LabelAnnotation>]
|
913
1022
|
attr_accessor :shot_label_annotations
|
914
1023
|
|
915
1024
|
# Presence label annotations on shot level. There is exactly one element for
|
916
|
-
# each unique label. Compared to the existing topical
|
917
|
-
#
|
918
|
-
#
|
919
|
-
#
|
1025
|
+
# each unique label. Compared to the existing topical `shot_label_annotations`,
|
1026
|
+
# this field presents more fine-grained, shot-level labels detected in video
|
1027
|
+
# content and is made available only when the client sets `LabelDetectionConfig.
|
1028
|
+
# model` to "builtin/latest" in the request.
|
920
1029
|
# Corresponds to the JSON property `shotPresenceLabelAnnotations`
|
921
1030
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1LabelAnnotation>]
|
922
1031
|
attr_accessor :shot_presence_label_annotations
|
@@ -926,9 +1035,8 @@ module Google
|
|
926
1035
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1SpeechTranscription>]
|
927
1036
|
attr_accessor :speech_transcriptions
|
928
1037
|
|
929
|
-
# OCR text detection and tracking.
|
930
|
-
#
|
931
|
-
# frame information associated with it.
|
1038
|
+
# OCR text detection and tracking. Annotations for list of detected text
|
1039
|
+
# snippets. Each will have list of frame information associated with it.
|
932
1040
|
# Corresponds to the JSON property `textAnnotations`
|
933
1041
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1TextAnnotation>]
|
934
1042
|
attr_accessor :text_annotations
|
@@ -941,10 +1049,13 @@ module Google
|
|
941
1049
|
def update!(**args)
|
942
1050
|
@error = args[:error] if args.key?(:error)
|
943
1051
|
@explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
|
1052
|
+
@face_annotations = args[:face_annotations] if args.key?(:face_annotations)
|
1053
|
+
@face_detection_annotations = args[:face_detection_annotations] if args.key?(:face_detection_annotations)
|
944
1054
|
@frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
|
945
1055
|
@input_uri = args[:input_uri] if args.key?(:input_uri)
|
946
1056
|
@logo_recognition_annotations = args[:logo_recognition_annotations] if args.key?(:logo_recognition_annotations)
|
947
1057
|
@object_annotations = args[:object_annotations] if args.key?(:object_annotations)
|
1058
|
+
@person_detection_annotations = args[:person_detection_annotations] if args.key?(:person_detection_annotations)
|
948
1059
|
@segment = args[:segment] if args.key?(:segment)
|
949
1060
|
@segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations)
|
950
1061
|
@segment_presence_label_annotations = args[:segment_presence_label_annotations] if args.key?(:segment_presence_label_annotations)
|
@@ -960,14 +1071,14 @@ module Google
|
|
960
1071
|
class GoogleCloudVideointelligenceV1VideoSegment
|
961
1072
|
include Google::Apis::Core::Hashable
|
962
1073
|
|
963
|
-
# Time-offset, relative to the beginning of the video,
|
964
|
-
#
|
1074
|
+
# Time-offset, relative to the beginning of the video, corresponding to the end
|
1075
|
+
# of the segment (inclusive).
|
965
1076
|
# Corresponds to the JSON property `endTimeOffset`
|
966
1077
|
# @return [String]
|
967
1078
|
attr_accessor :end_time_offset
|
968
1079
|
|
969
|
-
# Time-offset, relative to the beginning of the video,
|
970
|
-
#
|
1080
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
1081
|
+
# start of the segment (inclusive).
|
971
1082
|
# Corresponds to the JSON property `startTimeOffset`
|
972
1083
|
# @return [String]
|
973
1084
|
attr_accessor :start_time_offset
|
@@ -984,41 +1095,41 @@ module Google
|
|
984
1095
|
end
|
985
1096
|
|
986
1097
|
# Word-specific information for recognized words. Word information is only
|
987
|
-
# included in the response when certain request parameters are set, such
|
988
|
-
#
|
1098
|
+
# included in the response when certain request parameters are set, such as `
|
1099
|
+
# enable_word_time_offsets`.
|
989
1100
|
class GoogleCloudVideointelligenceV1WordInfo
|
990
1101
|
include Google::Apis::Core::Hashable
|
991
1102
|
|
992
1103
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
993
1104
|
# indicates an estimated greater likelihood that the recognized words are
|
994
|
-
# correct. This field is set only for the top alternative.
|
995
|
-
#
|
996
|
-
#
|
997
|
-
#
|
1105
|
+
# correct. This field is set only for the top alternative. This field is not
|
1106
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
1107
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
1108
|
+
# not set.
|
998
1109
|
# Corresponds to the JSON property `confidence`
|
999
1110
|
# @return [Float]
|
1000
1111
|
attr_accessor :confidence
|
1001
1112
|
|
1002
|
-
# Time offset relative to the beginning of the audio, and
|
1003
|
-
#
|
1004
|
-
#
|
1005
|
-
#
|
1113
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
1114
|
+
# end of the spoken word. This field is only set if `enable_word_time_offsets=
|
1115
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
1116
|
+
# accuracy of the time offset can vary.
|
1006
1117
|
# Corresponds to the JSON property `endTime`
|
1007
1118
|
# @return [String]
|
1008
1119
|
attr_accessor :end_time
|
1009
1120
|
|
1010
|
-
# Output only. A distinct integer value is assigned for every speaker within
|
1011
|
-
#
|
1012
|
-
#
|
1013
|
-
#
|
1121
|
+
# Output only. A distinct integer value is assigned for every speaker within the
|
1122
|
+
# audio. This field specifies which one of those speakers was detected to have
|
1123
|
+
# spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
|
1124
|
+
# only set if speaker diarization is enabled.
|
1014
1125
|
# Corresponds to the JSON property `speakerTag`
|
1015
1126
|
# @return [Fixnum]
|
1016
1127
|
attr_accessor :speaker_tag
|
1017
1128
|
|
1018
|
-
# Time offset relative to the beginning of the audio, and
|
1019
|
-
#
|
1020
|
-
#
|
1021
|
-
#
|
1129
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
1130
|
+
# start of the spoken word. This field is only set if `enable_word_time_offsets=
|
1131
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
1132
|
+
# accuracy of the time offset can vary.
|
1022
1133
|
# Corresponds to the JSON property `startTime`
|
1023
1134
|
# @return [String]
|
1024
1135
|
attr_accessor :start_time
|
@@ -1042,9 +1153,9 @@ module Google
|
|
1042
1153
|
end
|
1043
1154
|
end
|
1044
1155
|
|
1045
|
-
# Video annotation progress. Included in the `metadata`
|
1046
|
-
#
|
1047
|
-
#
|
1156
|
+
# Video annotation progress. Included in the `metadata` field of the `Operation`
|
1157
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
1158
|
+
# service.
|
1048
1159
|
class GoogleCloudVideointelligenceV1beta2AnnotateVideoProgress
|
1049
1160
|
include Google::Apis::Core::Hashable
|
1050
1161
|
|
@@ -1063,9 +1174,9 @@ module Google
|
|
1063
1174
|
end
|
1064
1175
|
end
|
1065
1176
|
|
1066
|
-
# Video annotation response. Included in the `response`
|
1067
|
-
#
|
1068
|
-
#
|
1177
|
+
# Video annotation response. Included in the `response` field of the `Operation`
|
1178
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
1179
|
+
# service.
|
1069
1180
|
class GoogleCloudVideointelligenceV1beta2AnnotateVideoResponse
|
1070
1181
|
include Google::Apis::Core::Hashable
|
1071
1182
|
|
@@ -1093,14 +1204,14 @@ module Google
|
|
1093
1204
|
# @return [Float]
|
1094
1205
|
attr_accessor :confidence
|
1095
1206
|
|
1096
|
-
# The name of the attribute, for example, glasses, dark_glasses, mouth_open.
|
1097
|
-
#
|
1207
|
+
# The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
|
1208
|
+
# full list of supported type names will be provided in the document.
|
1098
1209
|
# Corresponds to the JSON property `name`
|
1099
1210
|
# @return [String]
|
1100
1211
|
attr_accessor :name
|
1101
1212
|
|
1102
|
-
# Text value of the detection result. For example, the value for "HairColor"
|
1103
|
-
#
|
1213
|
+
# Text value of the detection result. For example, the value for "HairColor" can
|
1214
|
+
# be "black", "blonde", etc.
|
1104
1215
|
# Corresponds to the JSON property `value`
|
1105
1216
|
# @return [String]
|
1106
1217
|
attr_accessor :value
|
@@ -1132,9 +1243,8 @@ module Google
|
|
1132
1243
|
# @return [String]
|
1133
1244
|
attr_accessor :name
|
1134
1245
|
|
1135
|
-
# A vertex represents a 2D point in the image.
|
1136
|
-
#
|
1137
|
-
# and range from 0 to 1.
|
1246
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
1247
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
1138
1248
|
# Corresponds to the JSON property `point`
|
1139
1249
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2NormalizedVertex]
|
1140
1250
|
attr_accessor :point
|
@@ -1160,8 +1270,7 @@ module Google
|
|
1160
1270
|
# @return [String]
|
1161
1271
|
attr_accessor :description
|
1162
1272
|
|
1163
|
-
# Opaque entity ID. Some IDs may be available in
|
1164
|
-
# [Google Knowledge Graph Search
|
1273
|
+
# Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
|
1165
1274
|
# API](https://developers.google.com/knowledge-graph/).
|
1166
1275
|
# Corresponds to the JSON property `entityId`
|
1167
1276
|
# @return [String]
|
@@ -1184,9 +1293,9 @@ module Google
|
|
1184
1293
|
end
|
1185
1294
|
end
|
1186
1295
|
|
1187
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
1188
|
-
#
|
1189
|
-
#
|
1296
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
1297
|
+
# explicit content has been detected in a frame, no annotations are present for
|
1298
|
+
# that frame.
|
1190
1299
|
class GoogleCloudVideointelligenceV1beta2ExplicitContentAnnotation
|
1191
1300
|
include Google::Apis::Core::Hashable
|
1192
1301
|
|
@@ -1237,14 +1346,110 @@ module Google
|
|
1237
1346
|
end
|
1238
1347
|
end
|
1239
1348
|
|
1349
|
+
# Deprecated. No effect.
|
1350
|
+
class GoogleCloudVideointelligenceV1beta2FaceAnnotation
|
1351
|
+
include Google::Apis::Core::Hashable
|
1352
|
+
|
1353
|
+
# All video frames where a face was detected.
|
1354
|
+
# Corresponds to the JSON property `frames`
|
1355
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2FaceFrame>]
|
1356
|
+
attr_accessor :frames
|
1357
|
+
|
1358
|
+
# All video segments where a face was detected.
|
1359
|
+
# Corresponds to the JSON property `segments`
|
1360
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2FaceSegment>]
|
1361
|
+
attr_accessor :segments
|
1362
|
+
|
1363
|
+
# Thumbnail of a representative face view (in JPEG format).
|
1364
|
+
# Corresponds to the JSON property `thumbnail`
|
1365
|
+
# NOTE: Values are automatically base64 encoded/decoded in the client library.
|
1366
|
+
# @return [String]
|
1367
|
+
attr_accessor :thumbnail
|
1368
|
+
|
1369
|
+
def initialize(**args)
|
1370
|
+
update!(**args)
|
1371
|
+
end
|
1372
|
+
|
1373
|
+
# Update properties of this object
|
1374
|
+
def update!(**args)
|
1375
|
+
@frames = args[:frames] if args.key?(:frames)
|
1376
|
+
@segments = args[:segments] if args.key?(:segments)
|
1377
|
+
@thumbnail = args[:thumbnail] if args.key?(:thumbnail)
|
1378
|
+
end
|
1379
|
+
end
|
1380
|
+
|
1381
|
+
# Face detection annotation.
|
1382
|
+
class GoogleCloudVideointelligenceV1beta2FaceDetectionAnnotation
|
1383
|
+
include Google::Apis::Core::Hashable
|
1384
|
+
|
1385
|
+
# Feature version.
|
1386
|
+
# Corresponds to the JSON property `version`
|
1387
|
+
# @return [String]
|
1388
|
+
attr_accessor :version
|
1389
|
+
|
1390
|
+
def initialize(**args)
|
1391
|
+
update!(**args)
|
1392
|
+
end
|
1393
|
+
|
1394
|
+
# Update properties of this object
|
1395
|
+
def update!(**args)
|
1396
|
+
@version = args[:version] if args.key?(:version)
|
1397
|
+
end
|
1398
|
+
end
|
1399
|
+
|
1400
|
+
# Deprecated. No effect.
|
1401
|
+
class GoogleCloudVideointelligenceV1beta2FaceFrame
|
1402
|
+
include Google::Apis::Core::Hashable
|
1403
|
+
|
1404
|
+
# Normalized Bounding boxes in a frame. There can be more than one boxes if the
|
1405
|
+
# same face is detected in multiple locations within the current frame.
|
1406
|
+
# Corresponds to the JSON property `normalizedBoundingBoxes`
|
1407
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox>]
|
1408
|
+
attr_accessor :normalized_bounding_boxes
|
1409
|
+
|
1410
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
1411
|
+
# video frame for this location.
|
1412
|
+
# Corresponds to the JSON property `timeOffset`
|
1413
|
+
# @return [String]
|
1414
|
+
attr_accessor :time_offset
|
1415
|
+
|
1416
|
+
def initialize(**args)
|
1417
|
+
update!(**args)
|
1418
|
+
end
|
1419
|
+
|
1420
|
+
# Update properties of this object
|
1421
|
+
def update!(**args)
|
1422
|
+
@normalized_bounding_boxes = args[:normalized_bounding_boxes] if args.key?(:normalized_bounding_boxes)
|
1423
|
+
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
1424
|
+
end
|
1425
|
+
end
|
1426
|
+
|
1427
|
+
# Video segment level annotation results for face detection.
|
1428
|
+
class GoogleCloudVideointelligenceV1beta2FaceSegment
|
1429
|
+
include Google::Apis::Core::Hashable
|
1430
|
+
|
1431
|
+
# Video segment.
|
1432
|
+
# Corresponds to the JSON property `segment`
|
1433
|
+
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2VideoSegment]
|
1434
|
+
attr_accessor :segment
|
1435
|
+
|
1436
|
+
def initialize(**args)
|
1437
|
+
update!(**args)
|
1438
|
+
end
|
1439
|
+
|
1440
|
+
# Update properties of this object
|
1441
|
+
def update!(**args)
|
1442
|
+
@segment = args[:segment] if args.key?(:segment)
|
1443
|
+
end
|
1444
|
+
end
|
1445
|
+
|
1240
1446
|
# Label annotation.
|
1241
1447
|
class GoogleCloudVideointelligenceV1beta2LabelAnnotation
|
1242
1448
|
include Google::Apis::Core::Hashable
|
1243
1449
|
|
1244
|
-
# Common categories for the detected entity.
|
1245
|
-
#
|
1246
|
-
#
|
1247
|
-
# also be a `pet`.
|
1450
|
+
# Common categories for the detected entity. For example, when the label is `
|
1451
|
+
# Terrier`, the category is likely `dog`. And in some cases there might be more
|
1452
|
+
# than one categories e.g., `Terrier` could also be a `pet`.
|
1248
1453
|
# Corresponds to the JSON property `categoryEntities`
|
1249
1454
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2Entity>]
|
1250
1455
|
attr_accessor :category_entities
|
@@ -1343,14 +1548,14 @@ module Google
|
|
1343
1548
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2Entity]
|
1344
1549
|
attr_accessor :entity
|
1345
1550
|
|
1346
|
-
# All video segments where the recognized logo appears. There might be
|
1347
|
-
#
|
1551
|
+
# All video segments where the recognized logo appears. There might be multiple
|
1552
|
+
# instances of the same logo class appearing in one VideoSegment.
|
1348
1553
|
# Corresponds to the JSON property `segments`
|
1349
1554
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2VideoSegment>]
|
1350
1555
|
attr_accessor :segments
|
1351
1556
|
|
1352
|
-
# All logo tracks where the recognized logo appears. Each track corresponds
|
1353
|
-
#
|
1557
|
+
# All logo tracks where the recognized logo appears. Each track corresponds to
|
1558
|
+
# one logo instance appearing in consecutive frames.
|
1354
1559
|
# Corresponds to the JSON property `tracks`
|
1355
1560
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2Track>]
|
1356
1561
|
attr_accessor :tracks
|
@@ -1367,9 +1572,8 @@ module Google
|
|
1367
1572
|
end
|
1368
1573
|
end
|
1369
1574
|
|
1370
|
-
# Normalized bounding box.
|
1371
|
-
#
|
1372
|
-
# Range: [0, 1].
|
1575
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
1576
|
+
# original image. Range: [0, 1].
|
1373
1577
|
class GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox
|
1374
1578
|
include Google::Apis::Core::Hashable
|
1375
1579
|
|
@@ -1407,20 +1611,12 @@ module Google
|
|
1407
1611
|
end
|
1408
1612
|
|
1409
1613
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
1410
|
-
# Contains list of the corner points in clockwise order starting from
|
1411
|
-
#
|
1412
|
-
#
|
1413
|
-
#
|
1414
|
-
#
|
1415
|
-
#
|
1416
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
1417
|
-
# becomes:
|
1418
|
-
# 2----3
|
1419
|
-
# | |
|
1420
|
-
# 1----0
|
1421
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
1422
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
1423
|
-
# the box.
|
1614
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
1615
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
1616
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
1617
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
1618
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
1619
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
1424
1620
|
class GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly
|
1425
1621
|
include Google::Apis::Core::Hashable
|
1426
1622
|
|
@@ -1439,9 +1635,8 @@ module Google
|
|
1439
1635
|
end
|
1440
1636
|
end
|
1441
1637
|
|
1442
|
-
# A vertex represents a 2D point in the image.
|
1443
|
-
#
|
1444
|
-
# and range from 0 to 1.
|
1638
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
1639
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
1445
1640
|
class GoogleCloudVideointelligenceV1beta2NormalizedVertex
|
1446
1641
|
include Google::Apis::Core::Hashable
|
1447
1642
|
|
@@ -1480,10 +1675,10 @@ module Google
|
|
1480
1675
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2Entity]
|
1481
1676
|
attr_accessor :entity
|
1482
1677
|
|
1483
|
-
# Information corresponding to all frames where this object track appears.
|
1484
|
-
#
|
1485
|
-
#
|
1486
|
-
#
|
1678
|
+
# Information corresponding to all frames where this object track appears. Non-
|
1679
|
+
# streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
|
1680
|
+
# in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
|
1681
|
+
# frames.
|
1487
1682
|
# Corresponds to the JSON property `frames`
|
1488
1683
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame>]
|
1489
1684
|
attr_accessor :frames
|
@@ -1493,12 +1688,11 @@ module Google
|
|
1493
1688
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2VideoSegment]
|
1494
1689
|
attr_accessor :segment
|
1495
1690
|
|
1496
|
-
# Streaming mode ONLY.
|
1497
|
-
#
|
1498
|
-
#
|
1499
|
-
#
|
1500
|
-
#
|
1501
|
-
# ObjectTrackAnnotation of the same track_id over time.
|
1691
|
+
# Streaming mode ONLY. In streaming mode, we do not know the end time of a
|
1692
|
+
# tracked object before it is completed. Hence, there is no VideoSegment info
|
1693
|
+
# returned. Instead, we provide a unique identifiable integer track_id so that
|
1694
|
+
# the customers can correlate the results of the ongoing ObjectTrackAnnotation
|
1695
|
+
# of the same track_id over time.
|
1502
1696
|
# Corresponds to the JSON property `trackId`
|
1503
1697
|
# @return [Fixnum]
|
1504
1698
|
attr_accessor :track_id
|
@@ -1528,9 +1722,8 @@ module Google
|
|
1528
1722
|
class GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame
|
1529
1723
|
include Google::Apis::Core::Hashable
|
1530
1724
|
|
1531
|
-
# Normalized bounding box.
|
1532
|
-
#
|
1533
|
-
# Range: [0, 1].
|
1725
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
1726
|
+
# original image. Range: [0, 1].
|
1534
1727
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
1535
1728
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox]
|
1536
1729
|
attr_accessor :normalized_bounding_box
|
@@ -1551,16 +1744,41 @@ module Google
|
|
1551
1744
|
end
|
1552
1745
|
end
|
1553
1746
|
|
1747
|
+
# Person detection annotation per video.
|
1748
|
+
class GoogleCloudVideointelligenceV1beta2PersonDetectionAnnotation
|
1749
|
+
include Google::Apis::Core::Hashable
|
1750
|
+
|
1751
|
+
# The detected tracks of a person.
|
1752
|
+
# Corresponds to the JSON property `tracks`
|
1753
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2Track>]
|
1754
|
+
attr_accessor :tracks
|
1755
|
+
|
1756
|
+
# Feature version.
|
1757
|
+
# Corresponds to the JSON property `version`
|
1758
|
+
# @return [String]
|
1759
|
+
attr_accessor :version
|
1760
|
+
|
1761
|
+
def initialize(**args)
|
1762
|
+
update!(**args)
|
1763
|
+
end
|
1764
|
+
|
1765
|
+
# Update properties of this object
|
1766
|
+
def update!(**args)
|
1767
|
+
@tracks = args[:tracks] if args.key?(:tracks)
|
1768
|
+
@version = args[:version] if args.key?(:version)
|
1769
|
+
end
|
1770
|
+
end
|
1771
|
+
|
1554
1772
|
# Alternative hypotheses (a.k.a. n-best list).
|
1555
1773
|
class GoogleCloudVideointelligenceV1beta2SpeechRecognitionAlternative
|
1556
1774
|
include Google::Apis::Core::Hashable
|
1557
1775
|
|
1558
1776
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
1559
1777
|
# indicates an estimated greater likelihood that the recognized words are
|
1560
|
-
# correct. This field is set only for the top alternative.
|
1561
|
-
#
|
1562
|
-
#
|
1563
|
-
#
|
1778
|
+
# correct. This field is set only for the top alternative. This field is not
|
1779
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
1780
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
1781
|
+
# not set.
|
1564
1782
|
# Corresponds to the JSON property `confidence`
|
1565
1783
|
# @return [Float]
|
1566
1784
|
attr_accessor :confidence
|
@@ -1571,8 +1789,8 @@ module Google
|
|
1571
1789
|
attr_accessor :transcript
|
1572
1790
|
|
1573
1791
|
# Output only. A list of word-specific information for each recognized word.
|
1574
|
-
# Note: When `enable_speaker_diarization` is set to true, you will see all
|
1575
|
-
#
|
1792
|
+
# Note: When `enable_speaker_diarization` is set to true, you will see all the
|
1793
|
+
# words from the beginning of the audio.
|
1576
1794
|
# Corresponds to the JSON property `words`
|
1577
1795
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2WordInfo>]
|
1578
1796
|
attr_accessor :words
|
@@ -1593,18 +1811,17 @@ module Google
|
|
1593
1811
|
class GoogleCloudVideointelligenceV1beta2SpeechTranscription
|
1594
1812
|
include Google::Apis::Core::Hashable
|
1595
1813
|
|
1596
|
-
# May contain one or more recognition hypotheses (up to the maximum specified
|
1597
|
-
#
|
1598
|
-
#
|
1599
|
-
#
|
1814
|
+
# May contain one or more recognition hypotheses (up to the maximum specified in
|
1815
|
+
# `max_alternatives`). These alternatives are ordered in terms of accuracy, with
|
1816
|
+
# the top (first) alternative being the most probable, as ranked by the
|
1817
|
+
# recognizer.
|
1600
1818
|
# Corresponds to the JSON property `alternatives`
|
1601
1819
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2SpeechRecognitionAlternative>]
|
1602
1820
|
attr_accessor :alternatives
|
1603
1821
|
|
1604
1822
|
# Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
|
1605
|
-
# language tag of
|
1606
|
-
#
|
1607
|
-
# most likelihood of being spoken in the audio.
|
1823
|
+
# language tag of the language in this result. This language code was detected
|
1824
|
+
# to have the most likelihood of being spoken in the audio.
|
1608
1825
|
# Corresponds to the JSON property `languageCode`
|
1609
1826
|
# @return [String]
|
1610
1827
|
attr_accessor :language_code
|
@@ -1653,27 +1870,19 @@ module Google
|
|
1653
1870
|
end
|
1654
1871
|
end
|
1655
1872
|
|
1656
|
-
# Video frame level annotation results for text annotation (OCR).
|
1657
|
-
#
|
1658
|
-
#
|
1873
|
+
# Video frame level annotation results for text annotation (OCR). Contains
|
1874
|
+
# information regarding timestamp and bounding box locations for the frames
|
1875
|
+
# containing detected OCR text snippets.
|
1659
1876
|
class GoogleCloudVideointelligenceV1beta2TextFrame
|
1660
1877
|
include Google::Apis::Core::Hashable
|
1661
1878
|
|
1662
1879
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
1663
|
-
# Contains list of the corner points in clockwise order starting from
|
1664
|
-
#
|
1665
|
-
#
|
1666
|
-
#
|
1667
|
-
#
|
1668
|
-
#
|
1669
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
1670
|
-
# becomes:
|
1671
|
-
# 2----3
|
1672
|
-
# | |
|
1673
|
-
# 1----0
|
1674
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
1675
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
1676
|
-
# the box.
|
1880
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
1881
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
1882
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
1883
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
1884
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
1885
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
1677
1886
|
# Corresponds to the JSON property `rotatedBoundingBox`
|
1678
1887
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly]
|
1679
1888
|
attr_accessor :rotated_bounding_box
|
@@ -1726,9 +1935,8 @@ module Google
|
|
1726
1935
|
end
|
1727
1936
|
end
|
1728
1937
|
|
1729
|
-
# For tracking related features.
|
1730
|
-
#
|
1731
|
-
# normalized_bounding_box.
|
1938
|
+
# For tracking related features. An object at time_offset with attributes, and
|
1939
|
+
# located with normalized_bounding_box.
|
1732
1940
|
class GoogleCloudVideointelligenceV1beta2TimestampedObject
|
1733
1941
|
include Google::Apis::Core::Hashable
|
1734
1942
|
|
@@ -1742,15 +1950,14 @@ module Google
|
|
1742
1950
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2DetectedLandmark>]
|
1743
1951
|
attr_accessor :landmarks
|
1744
1952
|
|
1745
|
-
# Normalized bounding box.
|
1746
|
-
#
|
1747
|
-
# Range: [0, 1].
|
1953
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
1954
|
+
# original image. Range: [0, 1].
|
1748
1955
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
1749
1956
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox]
|
1750
1957
|
attr_accessor :normalized_bounding_box
|
1751
1958
|
|
1752
|
-
# Time-offset, relative to the beginning of the video,
|
1753
|
-
#
|
1959
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
1960
|
+
# video frame for this object.
|
1754
1961
|
# Corresponds to the JSON property `timeOffset`
|
1755
1962
|
# @return [String]
|
1756
1963
|
attr_accessor :time_offset
|
@@ -1809,20 +2016,19 @@ module Google
|
|
1809
2016
|
class GoogleCloudVideointelligenceV1beta2VideoAnnotationProgress
|
1810
2017
|
include Google::Apis::Core::Hashable
|
1811
2018
|
|
1812
|
-
# Specifies which feature is being tracked if the request contains more than
|
1813
|
-
#
|
2019
|
+
# Specifies which feature is being tracked if the request contains more than one
|
2020
|
+
# feature.
|
1814
2021
|
# Corresponds to the JSON property `feature`
|
1815
2022
|
# @return [String]
|
1816
2023
|
attr_accessor :feature
|
1817
2024
|
|
1818
|
-
# Video file location in
|
1819
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
2025
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
1820
2026
|
# Corresponds to the JSON property `inputUri`
|
1821
2027
|
# @return [String]
|
1822
2028
|
attr_accessor :input_uri
|
1823
2029
|
|
1824
|
-
# Approximate percentage processed thus far. Guaranteed to be
|
1825
|
-
#
|
2030
|
+
# Approximate percentage processed thus far. Guaranteed to be 100 when fully
|
2031
|
+
# processed.
|
1826
2032
|
# Corresponds to the JSON property `progressPercent`
|
1827
2033
|
# @return [Fixnum]
|
1828
2034
|
attr_accessor :progress_percent
|
@@ -1861,31 +2067,40 @@ module Google
|
|
1861
2067
|
class GoogleCloudVideointelligenceV1beta2VideoAnnotationResults
|
1862
2068
|
include Google::Apis::Core::Hashable
|
1863
2069
|
|
1864
|
-
# The `Status` type defines a logical error model that is suitable for
|
1865
|
-
#
|
1866
|
-
#
|
1867
|
-
#
|
1868
|
-
#
|
1869
|
-
#
|
2070
|
+
# The `Status` type defines a logical error model that is suitable for different
|
2071
|
+
# programming environments, including REST APIs and RPC APIs. It is used by [
|
2072
|
+
# gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
|
2073
|
+
# data: error code, error message, and error details. You can find out more
|
2074
|
+
# about this error model and how to work with it in the [API Design Guide](https:
|
2075
|
+
# //cloud.google.com/apis/design/errors).
|
1870
2076
|
# Corresponds to the JSON property `error`
|
1871
2077
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleRpcStatus]
|
1872
2078
|
attr_accessor :error
|
1873
2079
|
|
1874
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
1875
|
-
#
|
1876
|
-
#
|
2080
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
2081
|
+
# explicit content has been detected in a frame, no annotations are present for
|
2082
|
+
# that frame.
|
1877
2083
|
# Corresponds to the JSON property `explicitAnnotation`
|
1878
2084
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2ExplicitContentAnnotation]
|
1879
2085
|
attr_accessor :explicit_annotation
|
1880
2086
|
|
1881
|
-
#
|
1882
|
-
#
|
2087
|
+
# Deprecated. Please use `face_detection_annotations` instead.
|
2088
|
+
# Corresponds to the JSON property `faceAnnotations`
|
2089
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2FaceAnnotation>]
|
2090
|
+
attr_accessor :face_annotations
|
2091
|
+
|
2092
|
+
# Face detection annotations.
|
2093
|
+
# Corresponds to the JSON property `faceDetectionAnnotations`
|
2094
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2FaceDetectionAnnotation>]
|
2095
|
+
attr_accessor :face_detection_annotations
|
2096
|
+
|
2097
|
+
# Label annotations on frame level. There is exactly one element for each unique
|
2098
|
+
# label.
|
1883
2099
|
# Corresponds to the JSON property `frameLabelAnnotations`
|
1884
2100
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
|
1885
2101
|
attr_accessor :frame_label_annotations
|
1886
2102
|
|
1887
|
-
# Video file location in
|
1888
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
2103
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
1889
2104
|
# Corresponds to the JSON property `inputUri`
|
1890
2105
|
# @return [String]
|
1891
2106
|
attr_accessor :input_uri
|
@@ -1900,6 +2115,11 @@ module Google
|
|
1900
2115
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2ObjectTrackingAnnotation>]
|
1901
2116
|
attr_accessor :object_annotations
|
1902
2117
|
|
2118
|
+
# Person detection annotations.
|
2119
|
+
# Corresponds to the JSON property `personDetectionAnnotations`
|
2120
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2PersonDetectionAnnotation>]
|
2121
|
+
attr_accessor :person_detection_annotations
|
2122
|
+
|
1903
2123
|
# Video segment.
|
1904
2124
|
# Corresponds to the JSON property `segment`
|
1905
2125
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2VideoSegment]
|
@@ -1912,11 +2132,11 @@ module Google
|
|
1912
2132
|
attr_accessor :segment_label_annotations
|
1913
2133
|
|
1914
2134
|
# Presence label annotations on video level or user-specified segment level.
|
1915
|
-
# There is exactly one element for each unique label. Compared to the
|
1916
|
-
#
|
1917
|
-
#
|
1918
|
-
#
|
1919
|
-
#
|
2135
|
+
# There is exactly one element for each unique label. Compared to the existing
|
2136
|
+
# topical `segment_label_annotations`, this field presents more fine-grained,
|
2137
|
+
# segment-level labels detected in video content and is made available only when
|
2138
|
+
# the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
|
2139
|
+
# request.
|
1920
2140
|
# Corresponds to the JSON property `segmentPresenceLabelAnnotations`
|
1921
2141
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
|
1922
2142
|
attr_accessor :segment_presence_label_annotations
|
@@ -1926,17 +2146,17 @@ module Google
|
|
1926
2146
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2VideoSegment>]
|
1927
2147
|
attr_accessor :shot_annotations
|
1928
2148
|
|
1929
|
-
# Topical label annotations on shot level.
|
1930
|
-
#
|
2149
|
+
# Topical label annotations on shot level. There is exactly one element for each
|
2150
|
+
# unique label.
|
1931
2151
|
# Corresponds to the JSON property `shotLabelAnnotations`
|
1932
2152
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
|
1933
2153
|
attr_accessor :shot_label_annotations
|
1934
2154
|
|
1935
2155
|
# Presence label annotations on shot level. There is exactly one element for
|
1936
|
-
# each unique label. Compared to the existing topical
|
1937
|
-
#
|
1938
|
-
#
|
1939
|
-
#
|
2156
|
+
# each unique label. Compared to the existing topical `shot_label_annotations`,
|
2157
|
+
# this field presents more fine-grained, shot-level labels detected in video
|
2158
|
+
# content and is made available only when the client sets `LabelDetectionConfig.
|
2159
|
+
# model` to "builtin/latest" in the request.
|
1940
2160
|
# Corresponds to the JSON property `shotPresenceLabelAnnotations`
|
1941
2161
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
|
1942
2162
|
attr_accessor :shot_presence_label_annotations
|
@@ -1946,9 +2166,8 @@ module Google
|
|
1946
2166
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2SpeechTranscription>]
|
1947
2167
|
attr_accessor :speech_transcriptions
|
1948
2168
|
|
1949
|
-
# OCR text detection and tracking.
|
1950
|
-
#
|
1951
|
-
# frame information associated with it.
|
2169
|
+
# OCR text detection and tracking. Annotations for list of detected text
|
2170
|
+
# snippets. Each will have list of frame information associated with it.
|
1952
2171
|
# Corresponds to the JSON property `textAnnotations`
|
1953
2172
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2TextAnnotation>]
|
1954
2173
|
attr_accessor :text_annotations
|
@@ -1961,10 +2180,13 @@ module Google
|
|
1961
2180
|
def update!(**args)
|
1962
2181
|
@error = args[:error] if args.key?(:error)
|
1963
2182
|
@explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
|
2183
|
+
@face_annotations = args[:face_annotations] if args.key?(:face_annotations)
|
2184
|
+
@face_detection_annotations = args[:face_detection_annotations] if args.key?(:face_detection_annotations)
|
1964
2185
|
@frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
|
1965
2186
|
@input_uri = args[:input_uri] if args.key?(:input_uri)
|
1966
2187
|
@logo_recognition_annotations = args[:logo_recognition_annotations] if args.key?(:logo_recognition_annotations)
|
1967
2188
|
@object_annotations = args[:object_annotations] if args.key?(:object_annotations)
|
2189
|
+
@person_detection_annotations = args[:person_detection_annotations] if args.key?(:person_detection_annotations)
|
1968
2190
|
@segment = args[:segment] if args.key?(:segment)
|
1969
2191
|
@segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations)
|
1970
2192
|
@segment_presence_label_annotations = args[:segment_presence_label_annotations] if args.key?(:segment_presence_label_annotations)
|
@@ -1980,14 +2202,14 @@ module Google
|
|
1980
2202
|
class GoogleCloudVideointelligenceV1beta2VideoSegment
|
1981
2203
|
include Google::Apis::Core::Hashable
|
1982
2204
|
|
1983
|
-
# Time-offset, relative to the beginning of the video,
|
1984
|
-
#
|
2205
|
+
# Time-offset, relative to the beginning of the video, corresponding to the end
|
2206
|
+
# of the segment (inclusive).
|
1985
2207
|
# Corresponds to the JSON property `endTimeOffset`
|
1986
2208
|
# @return [String]
|
1987
2209
|
attr_accessor :end_time_offset
|
1988
2210
|
|
1989
|
-
# Time-offset, relative to the beginning of the video,
|
1990
|
-
#
|
2211
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
2212
|
+
# start of the segment (inclusive).
|
1991
2213
|
# Corresponds to the JSON property `startTimeOffset`
|
1992
2214
|
# @return [String]
|
1993
2215
|
attr_accessor :start_time_offset
|
@@ -2004,41 +2226,41 @@ module Google
|
|
2004
2226
|
end
|
2005
2227
|
|
2006
2228
|
# Word-specific information for recognized words. Word information is only
|
2007
|
-
# included in the response when certain request parameters are set, such
|
2008
|
-
#
|
2229
|
+
# included in the response when certain request parameters are set, such as `
|
2230
|
+
# enable_word_time_offsets`.
|
2009
2231
|
class GoogleCloudVideointelligenceV1beta2WordInfo
|
2010
2232
|
include Google::Apis::Core::Hashable
|
2011
2233
|
|
2012
2234
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
2013
2235
|
# indicates an estimated greater likelihood that the recognized words are
|
2014
|
-
# correct. This field is set only for the top alternative.
|
2015
|
-
#
|
2016
|
-
#
|
2017
|
-
#
|
2236
|
+
# correct. This field is set only for the top alternative. This field is not
|
2237
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
2238
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
2239
|
+
# not set.
|
2018
2240
|
# Corresponds to the JSON property `confidence`
|
2019
2241
|
# @return [Float]
|
2020
2242
|
attr_accessor :confidence
|
2021
2243
|
|
2022
|
-
# Time offset relative to the beginning of the audio, and
|
2023
|
-
#
|
2024
|
-
#
|
2025
|
-
#
|
2244
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
2245
|
+
# end of the spoken word. This field is only set if `enable_word_time_offsets=
|
2246
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
2247
|
+
# accuracy of the time offset can vary.
|
2026
2248
|
# Corresponds to the JSON property `endTime`
|
2027
2249
|
# @return [String]
|
2028
2250
|
attr_accessor :end_time
|
2029
2251
|
|
2030
|
-
# Output only. A distinct integer value is assigned for every speaker within
|
2031
|
-
#
|
2032
|
-
#
|
2033
|
-
#
|
2252
|
+
# Output only. A distinct integer value is assigned for every speaker within the
|
2253
|
+
# audio. This field specifies which one of those speakers was detected to have
|
2254
|
+
# spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
|
2255
|
+
# only set if speaker diarization is enabled.
|
2034
2256
|
# Corresponds to the JSON property `speakerTag`
|
2035
2257
|
# @return [Fixnum]
|
2036
2258
|
attr_accessor :speaker_tag
|
2037
2259
|
|
2038
|
-
# Time offset relative to the beginning of the audio, and
|
2039
|
-
#
|
2040
|
-
#
|
2041
|
-
#
|
2260
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
2261
|
+
# start of the spoken word. This field is only set if `enable_word_time_offsets=
|
2262
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
2263
|
+
# accuracy of the time offset can vary.
|
2042
2264
|
# Corresponds to the JSON property `startTime`
|
2043
2265
|
# @return [String]
|
2044
2266
|
attr_accessor :start_time
|
@@ -2062,9 +2284,9 @@ module Google
|
|
2062
2284
|
end
|
2063
2285
|
end
|
2064
2286
|
|
2065
|
-
# Video annotation progress. Included in the `metadata`
|
2066
|
-
#
|
2067
|
-
#
|
2287
|
+
# Video annotation progress. Included in the `metadata` field of the `Operation`
|
2288
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
2289
|
+
# service.
|
2068
2290
|
class GoogleCloudVideointelligenceV1p1beta1AnnotateVideoProgress
|
2069
2291
|
include Google::Apis::Core::Hashable
|
2070
2292
|
|
@@ -2083,9 +2305,9 @@ module Google
|
|
2083
2305
|
end
|
2084
2306
|
end
|
2085
2307
|
|
2086
|
-
# Video annotation response. Included in the `response`
|
2087
|
-
#
|
2088
|
-
#
|
2308
|
+
# Video annotation response. Included in the `response` field of the `Operation`
|
2309
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
2310
|
+
# service.
|
2089
2311
|
class GoogleCloudVideointelligenceV1p1beta1AnnotateVideoResponse
|
2090
2312
|
include Google::Apis::Core::Hashable
|
2091
2313
|
|
@@ -2113,14 +2335,14 @@ module Google
|
|
2113
2335
|
# @return [Float]
|
2114
2336
|
attr_accessor :confidence
|
2115
2337
|
|
2116
|
-
# The name of the attribute, for example, glasses, dark_glasses, mouth_open.
|
2117
|
-
#
|
2338
|
+
# The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
|
2339
|
+
# full list of supported type names will be provided in the document.
|
2118
2340
|
# Corresponds to the JSON property `name`
|
2119
2341
|
# @return [String]
|
2120
2342
|
attr_accessor :name
|
2121
2343
|
|
2122
|
-
# Text value of the detection result. For example, the value for "HairColor"
|
2123
|
-
#
|
2344
|
+
# Text value of the detection result. For example, the value for "HairColor" can
|
2345
|
+
# be "black", "blonde", etc.
|
2124
2346
|
# Corresponds to the JSON property `value`
|
2125
2347
|
# @return [String]
|
2126
2348
|
attr_accessor :value
|
@@ -2152,9 +2374,8 @@ module Google
|
|
2152
2374
|
# @return [String]
|
2153
2375
|
attr_accessor :name
|
2154
2376
|
|
2155
|
-
# A vertex represents a 2D point in the image.
|
2156
|
-
#
|
2157
|
-
# and range from 0 to 1.
|
2377
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
2378
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
2158
2379
|
# Corresponds to the JSON property `point`
|
2159
2380
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedVertex]
|
2160
2381
|
attr_accessor :point
|
@@ -2180,8 +2401,7 @@ module Google
|
|
2180
2401
|
# @return [String]
|
2181
2402
|
attr_accessor :description
|
2182
2403
|
|
2183
|
-
# Opaque entity ID. Some IDs may be available in
|
2184
|
-
# [Google Knowledge Graph Search
|
2404
|
+
# Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
|
2185
2405
|
# API](https://developers.google.com/knowledge-graph/).
|
2186
2406
|
# Corresponds to the JSON property `entityId`
|
2187
2407
|
# @return [String]
|
@@ -2204,9 +2424,9 @@ module Google
|
|
2204
2424
|
end
|
2205
2425
|
end
|
2206
2426
|
|
2207
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
2208
|
-
#
|
2209
|
-
#
|
2427
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
2428
|
+
# explicit content has been detected in a frame, no annotations are present for
|
2429
|
+
# that frame.
|
2210
2430
|
class GoogleCloudVideointelligenceV1p1beta1ExplicitContentAnnotation
|
2211
2431
|
include Google::Apis::Core::Hashable
|
2212
2432
|
|
@@ -2257,14 +2477,110 @@ module Google
|
|
2257
2477
|
end
|
2258
2478
|
end
|
2259
2479
|
|
2480
|
+
# Deprecated. No effect.
|
2481
|
+
class GoogleCloudVideointelligenceV1p1beta1FaceAnnotation
|
2482
|
+
include Google::Apis::Core::Hashable
|
2483
|
+
|
2484
|
+
# All video frames where a face was detected.
|
2485
|
+
# Corresponds to the JSON property `frames`
|
2486
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1FaceFrame>]
|
2487
|
+
attr_accessor :frames
|
2488
|
+
|
2489
|
+
# All video segments where a face was detected.
|
2490
|
+
# Corresponds to the JSON property `segments`
|
2491
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1FaceSegment>]
|
2492
|
+
attr_accessor :segments
|
2493
|
+
|
2494
|
+
# Thumbnail of a representative face view (in JPEG format).
|
2495
|
+
# Corresponds to the JSON property `thumbnail`
|
2496
|
+
# NOTE: Values are automatically base64 encoded/decoded in the client library.
|
2497
|
+
# @return [String]
|
2498
|
+
attr_accessor :thumbnail
|
2499
|
+
|
2500
|
+
def initialize(**args)
|
2501
|
+
update!(**args)
|
2502
|
+
end
|
2503
|
+
|
2504
|
+
# Update properties of this object
|
2505
|
+
def update!(**args)
|
2506
|
+
@frames = args[:frames] if args.key?(:frames)
|
2507
|
+
@segments = args[:segments] if args.key?(:segments)
|
2508
|
+
@thumbnail = args[:thumbnail] if args.key?(:thumbnail)
|
2509
|
+
end
|
2510
|
+
end
|
2511
|
+
|
2512
|
+
# Face detection annotation.
|
2513
|
+
class GoogleCloudVideointelligenceV1p1beta1FaceDetectionAnnotation
|
2514
|
+
include Google::Apis::Core::Hashable
|
2515
|
+
|
2516
|
+
# Feature version.
|
2517
|
+
# Corresponds to the JSON property `version`
|
2518
|
+
# @return [String]
|
2519
|
+
attr_accessor :version
|
2520
|
+
|
2521
|
+
def initialize(**args)
|
2522
|
+
update!(**args)
|
2523
|
+
end
|
2524
|
+
|
2525
|
+
# Update properties of this object
|
2526
|
+
def update!(**args)
|
2527
|
+
@version = args[:version] if args.key?(:version)
|
2528
|
+
end
|
2529
|
+
end
|
2530
|
+
|
2531
|
+
# Deprecated. No effect.
|
2532
|
+
class GoogleCloudVideointelligenceV1p1beta1FaceFrame
|
2533
|
+
include Google::Apis::Core::Hashable
|
2534
|
+
|
2535
|
+
# Normalized Bounding boxes in a frame. There can be more than one boxes if the
|
2536
|
+
# same face is detected in multiple locations within the current frame.
|
2537
|
+
# Corresponds to the JSON property `normalizedBoundingBoxes`
|
2538
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox>]
|
2539
|
+
attr_accessor :normalized_bounding_boxes
|
2540
|
+
|
2541
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
2542
|
+
# video frame for this location.
|
2543
|
+
# Corresponds to the JSON property `timeOffset`
|
2544
|
+
# @return [String]
|
2545
|
+
attr_accessor :time_offset
|
2546
|
+
|
2547
|
+
def initialize(**args)
|
2548
|
+
update!(**args)
|
2549
|
+
end
|
2550
|
+
|
2551
|
+
# Update properties of this object
|
2552
|
+
def update!(**args)
|
2553
|
+
@normalized_bounding_boxes = args[:normalized_bounding_boxes] if args.key?(:normalized_bounding_boxes)
|
2554
|
+
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
2555
|
+
end
|
2556
|
+
end
|
2557
|
+
|
2558
|
+
# Video segment level annotation results for face detection.
|
2559
|
+
class GoogleCloudVideointelligenceV1p1beta1FaceSegment
|
2560
|
+
include Google::Apis::Core::Hashable
|
2561
|
+
|
2562
|
+
# Video segment.
|
2563
|
+
# Corresponds to the JSON property `segment`
|
2564
|
+
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment]
|
2565
|
+
attr_accessor :segment
|
2566
|
+
|
2567
|
+
def initialize(**args)
|
2568
|
+
update!(**args)
|
2569
|
+
end
|
2570
|
+
|
2571
|
+
# Update properties of this object
|
2572
|
+
def update!(**args)
|
2573
|
+
@segment = args[:segment] if args.key?(:segment)
|
2574
|
+
end
|
2575
|
+
end
|
2576
|
+
|
2260
2577
|
# Label annotation.
|
2261
2578
|
class GoogleCloudVideointelligenceV1p1beta1LabelAnnotation
|
2262
2579
|
include Google::Apis::Core::Hashable
|
2263
2580
|
|
2264
|
-
# Common categories for the detected entity.
|
2265
|
-
#
|
2266
|
-
#
|
2267
|
-
# also be a `pet`.
|
2581
|
+
# Common categories for the detected entity. For example, when the label is `
|
2582
|
+
# Terrier`, the category is likely `dog`. And in some cases there might be more
|
2583
|
+
# than one categories e.g., `Terrier` could also be a `pet`.
|
2268
2584
|
# Corresponds to the JSON property `categoryEntities`
|
2269
2585
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1Entity>]
|
2270
2586
|
attr_accessor :category_entities
|
@@ -2363,14 +2679,14 @@ module Google
|
|
2363
2679
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1Entity]
|
2364
2680
|
attr_accessor :entity
|
2365
2681
|
|
2366
|
-
# All video segments where the recognized logo appears. There might be
|
2367
|
-
#
|
2682
|
+
# All video segments where the recognized logo appears. There might be multiple
|
2683
|
+
# instances of the same logo class appearing in one VideoSegment.
|
2368
2684
|
# Corresponds to the JSON property `segments`
|
2369
2685
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment>]
|
2370
2686
|
attr_accessor :segments
|
2371
2687
|
|
2372
|
-
# All logo tracks where the recognized logo appears. Each track corresponds
|
2373
|
-
#
|
2688
|
+
# All logo tracks where the recognized logo appears. Each track corresponds to
|
2689
|
+
# one logo instance appearing in consecutive frames.
|
2374
2690
|
# Corresponds to the JSON property `tracks`
|
2375
2691
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1Track>]
|
2376
2692
|
attr_accessor :tracks
|
@@ -2387,9 +2703,8 @@ module Google
|
|
2387
2703
|
end
|
2388
2704
|
end
|
2389
2705
|
|
2390
|
-
# Normalized bounding box.
|
2391
|
-
#
|
2392
|
-
# Range: [0, 1].
|
2706
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
2707
|
+
# original image. Range: [0, 1].
|
2393
2708
|
class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox
|
2394
2709
|
include Google::Apis::Core::Hashable
|
2395
2710
|
|
@@ -2427,20 +2742,12 @@ module Google
|
|
2427
2742
|
end
|
2428
2743
|
|
2429
2744
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
2430
|
-
# Contains list of the corner points in clockwise order starting from
|
2431
|
-
#
|
2432
|
-
#
|
2433
|
-
#
|
2434
|
-
#
|
2435
|
-
#
|
2436
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
2437
|
-
# becomes:
|
2438
|
-
# 2----3
|
2439
|
-
# | |
|
2440
|
-
# 1----0
|
2441
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
2442
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
2443
|
-
# the box.
|
2745
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
2746
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
2747
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
2748
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
2749
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
2750
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
2444
2751
|
class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly
|
2445
2752
|
include Google::Apis::Core::Hashable
|
2446
2753
|
|
@@ -2459,9 +2766,8 @@ module Google
|
|
2459
2766
|
end
|
2460
2767
|
end
|
2461
2768
|
|
2462
|
-
# A vertex represents a 2D point in the image.
|
2463
|
-
#
|
2464
|
-
# and range from 0 to 1.
|
2769
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
2770
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
2465
2771
|
class GoogleCloudVideointelligenceV1p1beta1NormalizedVertex
|
2466
2772
|
include Google::Apis::Core::Hashable
|
2467
2773
|
|
@@ -2500,10 +2806,10 @@ module Google
|
|
2500
2806
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1Entity]
|
2501
2807
|
attr_accessor :entity
|
2502
2808
|
|
2503
|
-
# Information corresponding to all frames where this object track appears.
|
2504
|
-
#
|
2505
|
-
#
|
2506
|
-
#
|
2809
|
+
# Information corresponding to all frames where this object track appears. Non-
|
2810
|
+
# streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
|
2811
|
+
# in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
|
2812
|
+
# frames.
|
2507
2813
|
# Corresponds to the JSON property `frames`
|
2508
2814
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame>]
|
2509
2815
|
attr_accessor :frames
|
@@ -2513,12 +2819,11 @@ module Google
|
|
2513
2819
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment]
|
2514
2820
|
attr_accessor :segment
|
2515
2821
|
|
2516
|
-
# Streaming mode ONLY.
|
2517
|
-
#
|
2518
|
-
#
|
2519
|
-
#
|
2520
|
-
#
|
2521
|
-
# ObjectTrackAnnotation of the same track_id over time.
|
2822
|
+
# Streaming mode ONLY. In streaming mode, we do not know the end time of a
|
2823
|
+
# tracked object before it is completed. Hence, there is no VideoSegment info
|
2824
|
+
# returned. Instead, we provide a unique identifiable integer track_id so that
|
2825
|
+
# the customers can correlate the results of the ongoing ObjectTrackAnnotation
|
2826
|
+
# of the same track_id over time.
|
2522
2827
|
# Corresponds to the JSON property `trackId`
|
2523
2828
|
# @return [Fixnum]
|
2524
2829
|
attr_accessor :track_id
|
@@ -2548,17 +2853,41 @@ module Google
|
|
2548
2853
|
class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame
|
2549
2854
|
include Google::Apis::Core::Hashable
|
2550
2855
|
|
2551
|
-
# Normalized bounding box.
|
2552
|
-
#
|
2553
|
-
# Range: [0, 1].
|
2856
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
2857
|
+
# original image. Range: [0, 1].
|
2554
2858
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
2555
2859
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox]
|
2556
2860
|
attr_accessor :normalized_bounding_box
|
2557
2861
|
|
2558
|
-
# The timestamp of the frame in microseconds.
|
2559
|
-
# Corresponds to the JSON property `timeOffset`
|
2862
|
+
# The timestamp of the frame in microseconds.
|
2863
|
+
# Corresponds to the JSON property `timeOffset`
|
2864
|
+
# @return [String]
|
2865
|
+
attr_accessor :time_offset
|
2866
|
+
|
2867
|
+
def initialize(**args)
|
2868
|
+
update!(**args)
|
2869
|
+
end
|
2870
|
+
|
2871
|
+
# Update properties of this object
|
2872
|
+
def update!(**args)
|
2873
|
+
@normalized_bounding_box = args[:normalized_bounding_box] if args.key?(:normalized_bounding_box)
|
2874
|
+
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
2875
|
+
end
|
2876
|
+
end
|
2877
|
+
|
2878
|
+
# Person detection annotation per video.
|
2879
|
+
class GoogleCloudVideointelligenceV1p1beta1PersonDetectionAnnotation
|
2880
|
+
include Google::Apis::Core::Hashable
|
2881
|
+
|
2882
|
+
# The detected tracks of a person.
|
2883
|
+
# Corresponds to the JSON property `tracks`
|
2884
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1Track>]
|
2885
|
+
attr_accessor :tracks
|
2886
|
+
|
2887
|
+
# Feature version.
|
2888
|
+
# Corresponds to the JSON property `version`
|
2560
2889
|
# @return [String]
|
2561
|
-
attr_accessor :
|
2890
|
+
attr_accessor :version
|
2562
2891
|
|
2563
2892
|
def initialize(**args)
|
2564
2893
|
update!(**args)
|
@@ -2566,8 +2895,8 @@ module Google
|
|
2566
2895
|
|
2567
2896
|
# Update properties of this object
|
2568
2897
|
def update!(**args)
|
2569
|
-
@
|
2570
|
-
@
|
2898
|
+
@tracks = args[:tracks] if args.key?(:tracks)
|
2899
|
+
@version = args[:version] if args.key?(:version)
|
2571
2900
|
end
|
2572
2901
|
end
|
2573
2902
|
|
@@ -2577,10 +2906,10 @@ module Google
|
|
2577
2906
|
|
2578
2907
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
2579
2908
|
# indicates an estimated greater likelihood that the recognized words are
|
2580
|
-
# correct. This field is set only for the top alternative.
|
2581
|
-
#
|
2582
|
-
#
|
2583
|
-
#
|
2909
|
+
# correct. This field is set only for the top alternative. This field is not
|
2910
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
2911
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
2912
|
+
# not set.
|
2584
2913
|
# Corresponds to the JSON property `confidence`
|
2585
2914
|
# @return [Float]
|
2586
2915
|
attr_accessor :confidence
|
@@ -2591,8 +2920,8 @@ module Google
|
|
2591
2920
|
attr_accessor :transcript
|
2592
2921
|
|
2593
2922
|
# Output only. A list of word-specific information for each recognized word.
|
2594
|
-
# Note: When `enable_speaker_diarization` is set to true, you will see all
|
2595
|
-
#
|
2923
|
+
# Note: When `enable_speaker_diarization` is set to true, you will see all the
|
2924
|
+
# words from the beginning of the audio.
|
2596
2925
|
# Corresponds to the JSON property `words`
|
2597
2926
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1WordInfo>]
|
2598
2927
|
attr_accessor :words
|
@@ -2613,18 +2942,17 @@ module Google
|
|
2613
2942
|
class GoogleCloudVideointelligenceV1p1beta1SpeechTranscription
|
2614
2943
|
include Google::Apis::Core::Hashable
|
2615
2944
|
|
2616
|
-
# May contain one or more recognition hypotheses (up to the maximum specified
|
2617
|
-
#
|
2618
|
-
#
|
2619
|
-
#
|
2945
|
+
# May contain one or more recognition hypotheses (up to the maximum specified in
|
2946
|
+
# `max_alternatives`). These alternatives are ordered in terms of accuracy, with
|
2947
|
+
# the top (first) alternative being the most probable, as ranked by the
|
2948
|
+
# recognizer.
|
2620
2949
|
# Corresponds to the JSON property `alternatives`
|
2621
2950
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1SpeechRecognitionAlternative>]
|
2622
2951
|
attr_accessor :alternatives
|
2623
2952
|
|
2624
2953
|
# Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
|
2625
|
-
# language tag of
|
2626
|
-
#
|
2627
|
-
# most likelihood of being spoken in the audio.
|
2954
|
+
# language tag of the language in this result. This language code was detected
|
2955
|
+
# to have the most likelihood of being spoken in the audio.
|
2628
2956
|
# Corresponds to the JSON property `languageCode`
|
2629
2957
|
# @return [String]
|
2630
2958
|
attr_accessor :language_code
|
@@ -2673,27 +3001,19 @@ module Google
|
|
2673
3001
|
end
|
2674
3002
|
end
|
2675
3003
|
|
2676
|
-
# Video frame level annotation results for text annotation (OCR).
|
2677
|
-
#
|
2678
|
-
#
|
3004
|
+
# Video frame level annotation results for text annotation (OCR). Contains
|
3005
|
+
# information regarding timestamp and bounding box locations for the frames
|
3006
|
+
# containing detected OCR text snippets.
|
2679
3007
|
class GoogleCloudVideointelligenceV1p1beta1TextFrame
|
2680
3008
|
include Google::Apis::Core::Hashable
|
2681
3009
|
|
2682
3010
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
2683
|
-
# Contains list of the corner points in clockwise order starting from
|
2684
|
-
#
|
2685
|
-
#
|
2686
|
-
#
|
2687
|
-
#
|
2688
|
-
#
|
2689
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
2690
|
-
# becomes:
|
2691
|
-
# 2----3
|
2692
|
-
# | |
|
2693
|
-
# 1----0
|
2694
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
2695
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
2696
|
-
# the box.
|
3011
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
3012
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
3013
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
3014
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
3015
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
3016
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
2697
3017
|
# Corresponds to the JSON property `rotatedBoundingBox`
|
2698
3018
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly]
|
2699
3019
|
attr_accessor :rotated_bounding_box
|
@@ -2746,9 +3066,8 @@ module Google
|
|
2746
3066
|
end
|
2747
3067
|
end
|
2748
3068
|
|
2749
|
-
# For tracking related features.
|
2750
|
-
#
|
2751
|
-
# normalized_bounding_box.
|
3069
|
+
# For tracking related features. An object at time_offset with attributes, and
|
3070
|
+
# located with normalized_bounding_box.
|
2752
3071
|
class GoogleCloudVideointelligenceV1p1beta1TimestampedObject
|
2753
3072
|
include Google::Apis::Core::Hashable
|
2754
3073
|
|
@@ -2762,15 +3081,14 @@ module Google
|
|
2762
3081
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1DetectedLandmark>]
|
2763
3082
|
attr_accessor :landmarks
|
2764
3083
|
|
2765
|
-
# Normalized bounding box.
|
2766
|
-
#
|
2767
|
-
# Range: [0, 1].
|
3084
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
3085
|
+
# original image. Range: [0, 1].
|
2768
3086
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
2769
3087
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox]
|
2770
3088
|
attr_accessor :normalized_bounding_box
|
2771
3089
|
|
2772
|
-
# Time-offset, relative to the beginning of the video,
|
2773
|
-
#
|
3090
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
3091
|
+
# video frame for this object.
|
2774
3092
|
# Corresponds to the JSON property `timeOffset`
|
2775
3093
|
# @return [String]
|
2776
3094
|
attr_accessor :time_offset
|
@@ -2829,20 +3147,19 @@ module Google
|
|
2829
3147
|
class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationProgress
|
2830
3148
|
include Google::Apis::Core::Hashable
|
2831
3149
|
|
2832
|
-
# Specifies which feature is being tracked if the request contains more than
|
2833
|
-
#
|
3150
|
+
# Specifies which feature is being tracked if the request contains more than one
|
3151
|
+
# feature.
|
2834
3152
|
# Corresponds to the JSON property `feature`
|
2835
3153
|
# @return [String]
|
2836
3154
|
attr_accessor :feature
|
2837
3155
|
|
2838
|
-
# Video file location in
|
2839
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
3156
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
2840
3157
|
# Corresponds to the JSON property `inputUri`
|
2841
3158
|
# @return [String]
|
2842
3159
|
attr_accessor :input_uri
|
2843
3160
|
|
2844
|
-
# Approximate percentage processed thus far. Guaranteed to be
|
2845
|
-
#
|
3161
|
+
# Approximate percentage processed thus far. Guaranteed to be 100 when fully
|
3162
|
+
# processed.
|
2846
3163
|
# Corresponds to the JSON property `progressPercent`
|
2847
3164
|
# @return [Fixnum]
|
2848
3165
|
attr_accessor :progress_percent
|
@@ -2881,31 +3198,40 @@ module Google
|
|
2881
3198
|
class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationResults
|
2882
3199
|
include Google::Apis::Core::Hashable
|
2883
3200
|
|
2884
|
-
# The `Status` type defines a logical error model that is suitable for
|
2885
|
-
#
|
2886
|
-
#
|
2887
|
-
#
|
2888
|
-
#
|
2889
|
-
#
|
3201
|
+
# The `Status` type defines a logical error model that is suitable for different
|
3202
|
+
# programming environments, including REST APIs and RPC APIs. It is used by [
|
3203
|
+
# gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
|
3204
|
+
# data: error code, error message, and error details. You can find out more
|
3205
|
+
# about this error model and how to work with it in the [API Design Guide](https:
|
3206
|
+
# //cloud.google.com/apis/design/errors).
|
2890
3207
|
# Corresponds to the JSON property `error`
|
2891
3208
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleRpcStatus]
|
2892
3209
|
attr_accessor :error
|
2893
3210
|
|
2894
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
2895
|
-
#
|
2896
|
-
#
|
3211
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
3212
|
+
# explicit content has been detected in a frame, no annotations are present for
|
3213
|
+
# that frame.
|
2897
3214
|
# Corresponds to the JSON property `explicitAnnotation`
|
2898
3215
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1ExplicitContentAnnotation]
|
2899
3216
|
attr_accessor :explicit_annotation
|
2900
3217
|
|
2901
|
-
#
|
2902
|
-
#
|
3218
|
+
# Deprecated. Please use `face_detection_annotations` instead.
|
3219
|
+
# Corresponds to the JSON property `faceAnnotations`
|
3220
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1FaceAnnotation>]
|
3221
|
+
attr_accessor :face_annotations
|
3222
|
+
|
3223
|
+
# Face detection annotations.
|
3224
|
+
# Corresponds to the JSON property `faceDetectionAnnotations`
|
3225
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1FaceDetectionAnnotation>]
|
3226
|
+
attr_accessor :face_detection_annotations
|
3227
|
+
|
3228
|
+
# Label annotations on frame level. There is exactly one element for each unique
|
3229
|
+
# label.
|
2903
3230
|
# Corresponds to the JSON property `frameLabelAnnotations`
|
2904
3231
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
|
2905
3232
|
attr_accessor :frame_label_annotations
|
2906
3233
|
|
2907
|
-
# Video file location in
|
2908
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
3234
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
2909
3235
|
# Corresponds to the JSON property `inputUri`
|
2910
3236
|
# @return [String]
|
2911
3237
|
attr_accessor :input_uri
|
@@ -2920,6 +3246,11 @@ module Google
|
|
2920
3246
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingAnnotation>]
|
2921
3247
|
attr_accessor :object_annotations
|
2922
3248
|
|
3249
|
+
# Person detection annotations.
|
3250
|
+
# Corresponds to the JSON property `personDetectionAnnotations`
|
3251
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1PersonDetectionAnnotation>]
|
3252
|
+
attr_accessor :person_detection_annotations
|
3253
|
+
|
2923
3254
|
# Video segment.
|
2924
3255
|
# Corresponds to the JSON property `segment`
|
2925
3256
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment]
|
@@ -2932,11 +3263,11 @@ module Google
|
|
2932
3263
|
attr_accessor :segment_label_annotations
|
2933
3264
|
|
2934
3265
|
# Presence label annotations on video level or user-specified segment level.
|
2935
|
-
# There is exactly one element for each unique label. Compared to the
|
2936
|
-
#
|
2937
|
-
#
|
2938
|
-
#
|
2939
|
-
#
|
3266
|
+
# There is exactly one element for each unique label. Compared to the existing
|
3267
|
+
# topical `segment_label_annotations`, this field presents more fine-grained,
|
3268
|
+
# segment-level labels detected in video content and is made available only when
|
3269
|
+
# the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
|
3270
|
+
# request.
|
2940
3271
|
# Corresponds to the JSON property `segmentPresenceLabelAnnotations`
|
2941
3272
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
|
2942
3273
|
attr_accessor :segment_presence_label_annotations
|
@@ -2946,17 +3277,17 @@ module Google
|
|
2946
3277
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment>]
|
2947
3278
|
attr_accessor :shot_annotations
|
2948
3279
|
|
2949
|
-
# Topical label annotations on shot level.
|
2950
|
-
#
|
3280
|
+
# Topical label annotations on shot level. There is exactly one element for each
|
3281
|
+
# unique label.
|
2951
3282
|
# Corresponds to the JSON property `shotLabelAnnotations`
|
2952
3283
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
|
2953
3284
|
attr_accessor :shot_label_annotations
|
2954
3285
|
|
2955
3286
|
# Presence label annotations on shot level. There is exactly one element for
|
2956
|
-
# each unique label. Compared to the existing topical
|
2957
|
-
#
|
2958
|
-
#
|
2959
|
-
#
|
3287
|
+
# each unique label. Compared to the existing topical `shot_label_annotations`,
|
3288
|
+
# this field presents more fine-grained, shot-level labels detected in video
|
3289
|
+
# content and is made available only when the client sets `LabelDetectionConfig.
|
3290
|
+
# model` to "builtin/latest" in the request.
|
2960
3291
|
# Corresponds to the JSON property `shotPresenceLabelAnnotations`
|
2961
3292
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
|
2962
3293
|
attr_accessor :shot_presence_label_annotations
|
@@ -2966,9 +3297,8 @@ module Google
|
|
2966
3297
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1SpeechTranscription>]
|
2967
3298
|
attr_accessor :speech_transcriptions
|
2968
3299
|
|
2969
|
-
# OCR text detection and tracking.
|
2970
|
-
#
|
2971
|
-
# frame information associated with it.
|
3300
|
+
# OCR text detection and tracking. Annotations for list of detected text
|
3301
|
+
# snippets. Each will have list of frame information associated with it.
|
2972
3302
|
# Corresponds to the JSON property `textAnnotations`
|
2973
3303
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1TextAnnotation>]
|
2974
3304
|
attr_accessor :text_annotations
|
@@ -2981,10 +3311,13 @@ module Google
|
|
2981
3311
|
def update!(**args)
|
2982
3312
|
@error = args[:error] if args.key?(:error)
|
2983
3313
|
@explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
|
3314
|
+
@face_annotations = args[:face_annotations] if args.key?(:face_annotations)
|
3315
|
+
@face_detection_annotations = args[:face_detection_annotations] if args.key?(:face_detection_annotations)
|
2984
3316
|
@frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
|
2985
3317
|
@input_uri = args[:input_uri] if args.key?(:input_uri)
|
2986
3318
|
@logo_recognition_annotations = args[:logo_recognition_annotations] if args.key?(:logo_recognition_annotations)
|
2987
3319
|
@object_annotations = args[:object_annotations] if args.key?(:object_annotations)
|
3320
|
+
@person_detection_annotations = args[:person_detection_annotations] if args.key?(:person_detection_annotations)
|
2988
3321
|
@segment = args[:segment] if args.key?(:segment)
|
2989
3322
|
@segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations)
|
2990
3323
|
@segment_presence_label_annotations = args[:segment_presence_label_annotations] if args.key?(:segment_presence_label_annotations)
|
@@ -3000,14 +3333,14 @@ module Google
|
|
3000
3333
|
class GoogleCloudVideointelligenceV1p1beta1VideoSegment
|
3001
3334
|
include Google::Apis::Core::Hashable
|
3002
3335
|
|
3003
|
-
# Time-offset, relative to the beginning of the video,
|
3004
|
-
#
|
3336
|
+
# Time-offset, relative to the beginning of the video, corresponding to the end
|
3337
|
+
# of the segment (inclusive).
|
3005
3338
|
# Corresponds to the JSON property `endTimeOffset`
|
3006
3339
|
# @return [String]
|
3007
3340
|
attr_accessor :end_time_offset
|
3008
3341
|
|
3009
|
-
# Time-offset, relative to the beginning of the video,
|
3010
|
-
#
|
3342
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
3343
|
+
# start of the segment (inclusive).
|
3011
3344
|
# Corresponds to the JSON property `startTimeOffset`
|
3012
3345
|
# @return [String]
|
3013
3346
|
attr_accessor :start_time_offset
|
@@ -3024,41 +3357,41 @@ module Google
|
|
3024
3357
|
end
|
3025
3358
|
|
3026
3359
|
# Word-specific information for recognized words. Word information is only
|
3027
|
-
# included in the response when certain request parameters are set, such
|
3028
|
-
#
|
3360
|
+
# included in the response when certain request parameters are set, such as `
|
3361
|
+
# enable_word_time_offsets`.
|
3029
3362
|
class GoogleCloudVideointelligenceV1p1beta1WordInfo
|
3030
3363
|
include Google::Apis::Core::Hashable
|
3031
3364
|
|
3032
3365
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
3033
3366
|
# indicates an estimated greater likelihood that the recognized words are
|
3034
|
-
# correct. This field is set only for the top alternative.
|
3035
|
-
#
|
3036
|
-
#
|
3037
|
-
#
|
3367
|
+
# correct. This field is set only for the top alternative. This field is not
|
3368
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
3369
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
3370
|
+
# not set.
|
3038
3371
|
# Corresponds to the JSON property `confidence`
|
3039
3372
|
# @return [Float]
|
3040
3373
|
attr_accessor :confidence
|
3041
3374
|
|
3042
|
-
# Time offset relative to the beginning of the audio, and
|
3043
|
-
#
|
3044
|
-
#
|
3045
|
-
#
|
3375
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
3376
|
+
# end of the spoken word. This field is only set if `enable_word_time_offsets=
|
3377
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
3378
|
+
# accuracy of the time offset can vary.
|
3046
3379
|
# Corresponds to the JSON property `endTime`
|
3047
3380
|
# @return [String]
|
3048
3381
|
attr_accessor :end_time
|
3049
3382
|
|
3050
|
-
# Output only. A distinct integer value is assigned for every speaker within
|
3051
|
-
#
|
3052
|
-
#
|
3053
|
-
#
|
3383
|
+
# Output only. A distinct integer value is assigned for every speaker within the
|
3384
|
+
# audio. This field specifies which one of those speakers was detected to have
|
3385
|
+
# spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
|
3386
|
+
# only set if speaker diarization is enabled.
|
3054
3387
|
# Corresponds to the JSON property `speakerTag`
|
3055
3388
|
# @return [Fixnum]
|
3056
3389
|
attr_accessor :speaker_tag
|
3057
3390
|
|
3058
|
-
# Time offset relative to the beginning of the audio, and
|
3059
|
-
#
|
3060
|
-
#
|
3061
|
-
#
|
3391
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
3392
|
+
# start of the spoken word. This field is only set if `enable_word_time_offsets=
|
3393
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
3394
|
+
# accuracy of the time offset can vary.
|
3062
3395
|
# Corresponds to the JSON property `startTime`
|
3063
3396
|
# @return [String]
|
3064
3397
|
attr_accessor :start_time
|
@@ -3082,9 +3415,9 @@ module Google
|
|
3082
3415
|
end
|
3083
3416
|
end
|
3084
3417
|
|
3085
|
-
# Video annotation progress. Included in the `metadata`
|
3086
|
-
#
|
3087
|
-
#
|
3418
|
+
# Video annotation progress. Included in the `metadata` field of the `Operation`
|
3419
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
3420
|
+
# service.
|
3088
3421
|
class GoogleCloudVideointelligenceV1p2beta1AnnotateVideoProgress
|
3089
3422
|
include Google::Apis::Core::Hashable
|
3090
3423
|
|
@@ -3112,24 +3445,22 @@ module Google
|
|
3112
3445
|
# @return [Array<String>]
|
3113
3446
|
attr_accessor :features
|
3114
3447
|
|
3115
|
-
# The video data bytes.
|
3116
|
-
# If
|
3117
|
-
# If set, `input_uri` must be unset.
|
3448
|
+
# The video data bytes. If unset, the input video(s) should be specified via the
|
3449
|
+
# `input_uri`. If set, `input_uri` must be unset.
|
3118
3450
|
# Corresponds to the JSON property `inputContent`
|
3119
3451
|
# NOTE: Values are automatically base64 encoded/decoded in the client library.
|
3120
3452
|
# @return [String]
|
3121
3453
|
attr_accessor :input_content
|
3122
3454
|
|
3123
|
-
# Input video location. Currently, only
|
3124
|
-
#
|
3125
|
-
#
|
3126
|
-
#
|
3127
|
-
# google.
|
3128
|
-
#
|
3129
|
-
#
|
3130
|
-
#
|
3131
|
-
#
|
3132
|
-
# in the request as `input_content`. If set, `input_content` must be unset.
|
3455
|
+
# Input video location. Currently, only [Cloud Storage](https://cloud.google.com/
|
3456
|
+
# storage/) URIs are supported. URIs must be specified in the following format: `
|
3457
|
+
# gs://bucket-id/object-id` (other URI formats return google.rpc.Code.
|
3458
|
+
# INVALID_ARGUMENT). For more information, see [Request URIs](https://cloud.
|
3459
|
+
# google.com/storage/docs/request-endpoints). To identify multiple videos, a
|
3460
|
+
# video URI may include wildcards in the `object-id`. Supported wildcards: '*'
|
3461
|
+
# to match 0 or more characters; '?' to match 1 character. If unset, the input
|
3462
|
+
# video should be embedded in the request as `input_content`. If set, `
|
3463
|
+
# input_content` must be unset.
|
3133
3464
|
# Corresponds to the JSON property `inputUri`
|
3134
3465
|
# @return [String]
|
3135
3466
|
attr_accessor :input_uri
|
@@ -3143,11 +3474,11 @@ module Google
|
|
3143
3474
|
attr_accessor :location_id
|
3144
3475
|
|
3145
3476
|
# Optional. Location where the output (in JSON format) should be stored.
|
3146
|
-
# Currently, only [Cloud Storage](https://cloud.google.com/storage/)
|
3147
|
-
#
|
3148
|
-
#
|
3149
|
-
#
|
3150
|
-
#
|
3477
|
+
# Currently, only [Cloud Storage](https://cloud.google.com/storage/) URIs are
|
3478
|
+
# supported. These must be specified in the following format: `gs://bucket-id/
|
3479
|
+
# object-id` (other URI formats return google.rpc.Code.INVALID_ARGUMENT). For
|
3480
|
+
# more information, see [Request URIs](https://cloud.google.com/storage/docs/
|
3481
|
+
# request-endpoints).
|
3151
3482
|
# Corresponds to the JSON property `outputUri`
|
3152
3483
|
# @return [String]
|
3153
3484
|
attr_accessor :output_uri
|
@@ -3172,9 +3503,9 @@ module Google
|
|
3172
3503
|
end
|
3173
3504
|
end
|
3174
3505
|
|
3175
|
-
# Video annotation response. Included in the `response`
|
3176
|
-
#
|
3177
|
-
#
|
3506
|
+
# Video annotation response. Included in the `response` field of the `Operation`
|
3507
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
3508
|
+
# service.
|
3178
3509
|
class GoogleCloudVideointelligenceV1p2beta1AnnotateVideoResponse
|
3179
3510
|
include Google::Apis::Core::Hashable
|
3180
3511
|
|
@@ -3202,14 +3533,14 @@ module Google
|
|
3202
3533
|
# @return [Float]
|
3203
3534
|
attr_accessor :confidence
|
3204
3535
|
|
3205
|
-
# The name of the attribute, for example, glasses, dark_glasses, mouth_open.
|
3206
|
-
#
|
3536
|
+
# The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
|
3537
|
+
# full list of supported type names will be provided in the document.
|
3207
3538
|
# Corresponds to the JSON property `name`
|
3208
3539
|
# @return [String]
|
3209
3540
|
attr_accessor :name
|
3210
3541
|
|
3211
|
-
# Text value of the detection result. For example, the value for "HairColor"
|
3212
|
-
#
|
3542
|
+
# Text value of the detection result. For example, the value for "HairColor" can
|
3543
|
+
# be "black", "blonde", etc.
|
3213
3544
|
# Corresponds to the JSON property `value`
|
3214
3545
|
# @return [String]
|
3215
3546
|
attr_accessor :value
|
@@ -3241,9 +3572,8 @@ module Google
|
|
3241
3572
|
# @return [String]
|
3242
3573
|
attr_accessor :name
|
3243
3574
|
|
3244
|
-
# A vertex represents a 2D point in the image.
|
3245
|
-
#
|
3246
|
-
# and range from 0 to 1.
|
3575
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
3576
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
3247
3577
|
# Corresponds to the JSON property `point`
|
3248
3578
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1NormalizedVertex]
|
3249
3579
|
attr_accessor :point
|
@@ -3269,8 +3599,7 @@ module Google
|
|
3269
3599
|
# @return [String]
|
3270
3600
|
attr_accessor :description
|
3271
3601
|
|
3272
|
-
# Opaque entity ID. Some IDs may be available in
|
3273
|
-
# [Google Knowledge Graph Search
|
3602
|
+
# Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
|
3274
3603
|
# API](https://developers.google.com/knowledge-graph/).
|
3275
3604
|
# Corresponds to the JSON property `entityId`
|
3276
3605
|
# @return [String]
|
@@ -3293,9 +3622,9 @@ module Google
|
|
3293
3622
|
end
|
3294
3623
|
end
|
3295
3624
|
|
3296
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
3297
|
-
#
|
3298
|
-
#
|
3625
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
3626
|
+
# explicit content has been detected in a frame, no annotations are present for
|
3627
|
+
# that frame.
|
3299
3628
|
class GoogleCloudVideointelligenceV1p2beta1ExplicitContentAnnotation
|
3300
3629
|
include Google::Apis::Core::Hashable
|
3301
3630
|
|
@@ -3324,9 +3653,8 @@ module Google
|
|
3324
3653
|
class GoogleCloudVideointelligenceV1p2beta1ExplicitContentDetectionConfig
|
3325
3654
|
include Google::Apis::Core::Hashable
|
3326
3655
|
|
3327
|
-
# Model to use for explicit content detection.
|
3328
|
-
#
|
3329
|
-
# "builtin/latest".
|
3656
|
+
# Model to use for explicit content detection. Supported values: "builtin/stable"
|
3657
|
+
# (the default if unset) and "builtin/latest".
|
3330
3658
|
# Corresponds to the JSON property `model`
|
3331
3659
|
# @return [String]
|
3332
3660
|
attr_accessor :model
|
@@ -3367,14 +3695,145 @@ module Google
|
|
3367
3695
|
end
|
3368
3696
|
end
|
3369
3697
|
|
3698
|
+
# Deprecated. No effect.
|
3699
|
+
class GoogleCloudVideointelligenceV1p2beta1FaceAnnotation
|
3700
|
+
include Google::Apis::Core::Hashable
|
3701
|
+
|
3702
|
+
# All video frames where a face was detected.
|
3703
|
+
# Corresponds to the JSON property `frames`
|
3704
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1FaceFrame>]
|
3705
|
+
attr_accessor :frames
|
3706
|
+
|
3707
|
+
# All video segments where a face was detected.
|
3708
|
+
# Corresponds to the JSON property `segments`
|
3709
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1FaceSegment>]
|
3710
|
+
attr_accessor :segments
|
3711
|
+
|
3712
|
+
# Thumbnail of a representative face view (in JPEG format).
|
3713
|
+
# Corresponds to the JSON property `thumbnail`
|
3714
|
+
# NOTE: Values are automatically base64 encoded/decoded in the client library.
|
3715
|
+
# @return [String]
|
3716
|
+
attr_accessor :thumbnail
|
3717
|
+
|
3718
|
+
def initialize(**args)
|
3719
|
+
update!(**args)
|
3720
|
+
end
|
3721
|
+
|
3722
|
+
# Update properties of this object
|
3723
|
+
def update!(**args)
|
3724
|
+
@frames = args[:frames] if args.key?(:frames)
|
3725
|
+
@segments = args[:segments] if args.key?(:segments)
|
3726
|
+
@thumbnail = args[:thumbnail] if args.key?(:thumbnail)
|
3727
|
+
end
|
3728
|
+
end
|
3729
|
+
|
3730
|
+
# Face detection annotation.
|
3731
|
+
class GoogleCloudVideointelligenceV1p2beta1FaceDetectionAnnotation
|
3732
|
+
include Google::Apis::Core::Hashable
|
3733
|
+
|
3734
|
+
# Feature version.
|
3735
|
+
# Corresponds to the JSON property `version`
|
3736
|
+
# @return [String]
|
3737
|
+
attr_accessor :version
|
3738
|
+
|
3739
|
+
def initialize(**args)
|
3740
|
+
update!(**args)
|
3741
|
+
end
|
3742
|
+
|
3743
|
+
# Update properties of this object
|
3744
|
+
def update!(**args)
|
3745
|
+
@version = args[:version] if args.key?(:version)
|
3746
|
+
end
|
3747
|
+
end
|
3748
|
+
|
3749
|
+
# Config for FACE_DETECTION.
|
3750
|
+
class GoogleCloudVideointelligenceV1p2beta1FaceDetectionConfig
|
3751
|
+
include Google::Apis::Core::Hashable
|
3752
|
+
|
3753
|
+
# Whether to enable face attributes detection, such as glasses, dark_glasses,
|
3754
|
+
# mouth_open etc. Ignored if 'include_bounding_boxes' is set to false.
|
3755
|
+
# Corresponds to the JSON property `includeAttributes`
|
3756
|
+
# @return [Boolean]
|
3757
|
+
attr_accessor :include_attributes
|
3758
|
+
alias_method :include_attributes?, :include_attributes
|
3759
|
+
|
3760
|
+
# Whether bounding boxes are included in the face annotation output.
|
3761
|
+
# Corresponds to the JSON property `includeBoundingBoxes`
|
3762
|
+
# @return [Boolean]
|
3763
|
+
attr_accessor :include_bounding_boxes
|
3764
|
+
alias_method :include_bounding_boxes?, :include_bounding_boxes
|
3765
|
+
|
3766
|
+
# Model to use for face detection. Supported values: "builtin/stable" (the
|
3767
|
+
# default if unset) and "builtin/latest".
|
3768
|
+
# Corresponds to the JSON property `model`
|
3769
|
+
# @return [String]
|
3770
|
+
attr_accessor :model
|
3771
|
+
|
3772
|
+
def initialize(**args)
|
3773
|
+
update!(**args)
|
3774
|
+
end
|
3775
|
+
|
3776
|
+
# Update properties of this object
|
3777
|
+
def update!(**args)
|
3778
|
+
@include_attributes = args[:include_attributes] if args.key?(:include_attributes)
|
3779
|
+
@include_bounding_boxes = args[:include_bounding_boxes] if args.key?(:include_bounding_boxes)
|
3780
|
+
@model = args[:model] if args.key?(:model)
|
3781
|
+
end
|
3782
|
+
end
|
3783
|
+
|
3784
|
+
# Deprecated. No effect.
|
3785
|
+
class GoogleCloudVideointelligenceV1p2beta1FaceFrame
|
3786
|
+
include Google::Apis::Core::Hashable
|
3787
|
+
|
3788
|
+
# Normalized Bounding boxes in a frame. There can be more than one boxes if the
|
3789
|
+
# same face is detected in multiple locations within the current frame.
|
3790
|
+
# Corresponds to the JSON property `normalizedBoundingBoxes`
|
3791
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox>]
|
3792
|
+
attr_accessor :normalized_bounding_boxes
|
3793
|
+
|
3794
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
3795
|
+
# video frame for this location.
|
3796
|
+
# Corresponds to the JSON property `timeOffset`
|
3797
|
+
# @return [String]
|
3798
|
+
attr_accessor :time_offset
|
3799
|
+
|
3800
|
+
def initialize(**args)
|
3801
|
+
update!(**args)
|
3802
|
+
end
|
3803
|
+
|
3804
|
+
# Update properties of this object
|
3805
|
+
def update!(**args)
|
3806
|
+
@normalized_bounding_boxes = args[:normalized_bounding_boxes] if args.key?(:normalized_bounding_boxes)
|
3807
|
+
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
3808
|
+
end
|
3809
|
+
end
|
3810
|
+
|
3811
|
+
# Video segment level annotation results for face detection.
|
3812
|
+
class GoogleCloudVideointelligenceV1p2beta1FaceSegment
|
3813
|
+
include Google::Apis::Core::Hashable
|
3814
|
+
|
3815
|
+
# Video segment.
|
3816
|
+
# Corresponds to the JSON property `segment`
|
3817
|
+
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1VideoSegment]
|
3818
|
+
attr_accessor :segment
|
3819
|
+
|
3820
|
+
def initialize(**args)
|
3821
|
+
update!(**args)
|
3822
|
+
end
|
3823
|
+
|
3824
|
+
# Update properties of this object
|
3825
|
+
def update!(**args)
|
3826
|
+
@segment = args[:segment] if args.key?(:segment)
|
3827
|
+
end
|
3828
|
+
end
|
3829
|
+
|
3370
3830
|
# Label annotation.
|
3371
3831
|
class GoogleCloudVideointelligenceV1p2beta1LabelAnnotation
|
3372
3832
|
include Google::Apis::Core::Hashable
|
3373
3833
|
|
3374
|
-
# Common categories for the detected entity.
|
3375
|
-
#
|
3376
|
-
#
|
3377
|
-
# also be a `pet`.
|
3834
|
+
# Common categories for the detected entity. For example, when the label is `
|
3835
|
+
# Terrier`, the category is likely `dog`. And in some cases there might be more
|
3836
|
+
# than one categories e.g., `Terrier` could also be a `pet`.
|
3378
3837
|
# Corresponds to the JSON property `categoryEntities`
|
3379
3838
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1Entity>]
|
3380
3839
|
attr_accessor :category_entities
|
@@ -3417,44 +3876,40 @@ module Google
|
|
3417
3876
|
class GoogleCloudVideointelligenceV1p2beta1LabelDetectionConfig
|
3418
3877
|
include Google::Apis::Core::Hashable
|
3419
3878
|
|
3420
|
-
# The confidence threshold we perform filtering on the labels from
|
3421
|
-
#
|
3422
|
-
#
|
3423
|
-
#
|
3424
|
-
#
|
3425
|
-
# the default threshold everytime when we release a new model.
|
3879
|
+
# The confidence threshold we perform filtering on the labels from frame-level
|
3880
|
+
# detection. If not set, it is set to 0.4 by default. The valid range for this
|
3881
|
+
# threshold is [0.1, 0.9]. Any value set outside of this range will be clipped.
|
3882
|
+
# Note: For best results, follow the default threshold. We will update the
|
3883
|
+
# default threshold everytime when we release a new model.
|
3426
3884
|
# Corresponds to the JSON property `frameConfidenceThreshold`
|
3427
3885
|
# @return [Float]
|
3428
3886
|
attr_accessor :frame_confidence_threshold
|
3429
3887
|
|
3430
|
-
# What labels should be detected with LABEL_DETECTION, in addition to
|
3431
|
-
#
|
3432
|
-
# If unspecified, defaults to `SHOT_MODE`.
|
3888
|
+
# What labels should be detected with LABEL_DETECTION, in addition to video-
|
3889
|
+
# level labels or segment-level labels. If unspecified, defaults to `SHOT_MODE`.
|
3433
3890
|
# Corresponds to the JSON property `labelDetectionMode`
|
3434
3891
|
# @return [String]
|
3435
3892
|
attr_accessor :label_detection_mode
|
3436
3893
|
|
3437
|
-
# Model to use for label detection.
|
3438
|
-
#
|
3439
|
-
# "builtin/latest".
|
3894
|
+
# Model to use for label detection. Supported values: "builtin/stable" (the
|
3895
|
+
# default if unset) and "builtin/latest".
|
3440
3896
|
# Corresponds to the JSON property `model`
|
3441
3897
|
# @return [String]
|
3442
3898
|
attr_accessor :model
|
3443
3899
|
|
3444
|
-
# Whether the video has been shot from a stationary (i.e., non-moving)
|
3445
|
-
#
|
3446
|
-
#
|
3900
|
+
# Whether the video has been shot from a stationary (i.e., non-moving) camera.
|
3901
|
+
# When set to true, might improve detection accuracy for moving objects. Should
|
3902
|
+
# be used with `SHOT_AND_FRAME_MODE` enabled.
|
3447
3903
|
# Corresponds to the JSON property `stationaryCamera`
|
3448
3904
|
# @return [Boolean]
|
3449
3905
|
attr_accessor :stationary_camera
|
3450
3906
|
alias_method :stationary_camera?, :stationary_camera
|
3451
3907
|
|
3452
|
-
# The confidence threshold we perform filtering on the labels from
|
3453
|
-
#
|
3454
|
-
#
|
3455
|
-
#
|
3456
|
-
#
|
3457
|
-
# the default threshold everytime when we release a new model.
|
3908
|
+
# The confidence threshold we perform filtering on the labels from video-level
|
3909
|
+
# and shot-level detections. If not set, it's set to 0.3 by default. The valid
|
3910
|
+
# range for this threshold is [0.1, 0.9]. Any value set outside of this range
|
3911
|
+
# will be clipped. Note: For best results, follow the default threshold. We will
|
3912
|
+
# update the default threshold everytime when we release a new model.
|
3458
3913
|
# Corresponds to the JSON property `videoConfidenceThreshold`
|
3459
3914
|
# @return [Float]
|
3460
3915
|
attr_accessor :video_confidence_threshold
|
@@ -3533,14 +3988,14 @@ module Google
|
|
3533
3988
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1Entity]
|
3534
3989
|
attr_accessor :entity
|
3535
3990
|
|
3536
|
-
# All video segments where the recognized logo appears. There might be
|
3537
|
-
#
|
3991
|
+
# All video segments where the recognized logo appears. There might be multiple
|
3992
|
+
# instances of the same logo class appearing in one VideoSegment.
|
3538
3993
|
# Corresponds to the JSON property `segments`
|
3539
3994
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1VideoSegment>]
|
3540
3995
|
attr_accessor :segments
|
3541
3996
|
|
3542
|
-
# All logo tracks where the recognized logo appears. Each track corresponds
|
3543
|
-
#
|
3997
|
+
# All logo tracks where the recognized logo appears. Each track corresponds to
|
3998
|
+
# one logo instance appearing in consecutive frames.
|
3544
3999
|
# Corresponds to the JSON property `tracks`
|
3545
4000
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1Track>]
|
3546
4001
|
attr_accessor :tracks
|
@@ -3557,9 +4012,8 @@ module Google
|
|
3557
4012
|
end
|
3558
4013
|
end
|
3559
4014
|
|
3560
|
-
# Normalized bounding box.
|
3561
|
-
#
|
3562
|
-
# Range: [0, 1].
|
4015
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
4016
|
+
# original image. Range: [0, 1].
|
3563
4017
|
class GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox
|
3564
4018
|
include Google::Apis::Core::Hashable
|
3565
4019
|
|
@@ -3597,20 +4051,12 @@ module Google
|
|
3597
4051
|
end
|
3598
4052
|
|
3599
4053
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
3600
|
-
# Contains list of the corner points in clockwise order starting from
|
3601
|
-
#
|
3602
|
-
#
|
3603
|
-
#
|
3604
|
-
#
|
3605
|
-
#
|
3606
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
3607
|
-
# becomes:
|
3608
|
-
# 2----3
|
3609
|
-
# | |
|
3610
|
-
# 1----0
|
3611
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
3612
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
3613
|
-
# the box.
|
4054
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
4055
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
4056
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
4057
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
4058
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
4059
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
3614
4060
|
class GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingPoly
|
3615
4061
|
include Google::Apis::Core::Hashable
|
3616
4062
|
|
@@ -3629,9 +4075,8 @@ module Google
|
|
3629
4075
|
end
|
3630
4076
|
end
|
3631
4077
|
|
3632
|
-
# A vertex represents a 2D point in the image.
|
3633
|
-
#
|
3634
|
-
# and range from 0 to 1.
|
4078
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
4079
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
3635
4080
|
class GoogleCloudVideointelligenceV1p2beta1NormalizedVertex
|
3636
4081
|
include Google::Apis::Core::Hashable
|
3637
4082
|
|
@@ -3670,10 +4115,10 @@ module Google
|
|
3670
4115
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1Entity]
|
3671
4116
|
attr_accessor :entity
|
3672
4117
|
|
3673
|
-
# Information corresponding to all frames where this object track appears.
|
3674
|
-
#
|
3675
|
-
#
|
3676
|
-
#
|
4118
|
+
# Information corresponding to all frames where this object track appears. Non-
|
4119
|
+
# streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
|
4120
|
+
# in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
|
4121
|
+
# frames.
|
3677
4122
|
# Corresponds to the JSON property `frames`
|
3678
4123
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1ObjectTrackingFrame>]
|
3679
4124
|
attr_accessor :frames
|
@@ -3683,12 +4128,11 @@ module Google
|
|
3683
4128
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1VideoSegment]
|
3684
4129
|
attr_accessor :segment
|
3685
4130
|
|
3686
|
-
# Streaming mode ONLY.
|
3687
|
-
#
|
3688
|
-
#
|
3689
|
-
#
|
3690
|
-
#
|
3691
|
-
# ObjectTrackAnnotation of the same track_id over time.
|
4131
|
+
# Streaming mode ONLY. In streaming mode, we do not know the end time of a
|
4132
|
+
# tracked object before it is completed. Hence, there is no VideoSegment info
|
4133
|
+
# returned. Instead, we provide a unique identifiable integer track_id so that
|
4134
|
+
# the customers can correlate the results of the ongoing ObjectTrackAnnotation
|
4135
|
+
# of the same track_id over time.
|
3692
4136
|
# Corresponds to the JSON property `trackId`
|
3693
4137
|
# @return [Fixnum]
|
3694
4138
|
attr_accessor :track_id
|
@@ -3717,9 +4161,8 @@ module Google
|
|
3717
4161
|
class GoogleCloudVideointelligenceV1p2beta1ObjectTrackingConfig
|
3718
4162
|
include Google::Apis::Core::Hashable
|
3719
4163
|
|
3720
|
-
# Model to use for object tracking.
|
3721
|
-
#
|
3722
|
-
# "builtin/latest".
|
4164
|
+
# Model to use for object tracking. Supported values: "builtin/stable" (the
|
4165
|
+
# default if unset) and "builtin/latest".
|
3723
4166
|
# Corresponds to the JSON property `model`
|
3724
4167
|
# @return [String]
|
3725
4168
|
attr_accessor :model
|
@@ -3739,9 +4182,8 @@ module Google
|
|
3739
4182
|
class GoogleCloudVideointelligenceV1p2beta1ObjectTrackingFrame
|
3740
4183
|
include Google::Apis::Core::Hashable
|
3741
4184
|
|
3742
|
-
# Normalized bounding box.
|
3743
|
-
#
|
3744
|
-
# Range: [0, 1].
|
4185
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
4186
|
+
# original image. Range: [0, 1].
|
3745
4187
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
3746
4188
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox]
|
3747
4189
|
attr_accessor :normalized_bounding_box
|
@@ -3762,13 +4204,74 @@ module Google
|
|
3762
4204
|
end
|
3763
4205
|
end
|
3764
4206
|
|
4207
|
+
# Person detection annotation per video.
|
4208
|
+
class GoogleCloudVideointelligenceV1p2beta1PersonDetectionAnnotation
|
4209
|
+
include Google::Apis::Core::Hashable
|
4210
|
+
|
4211
|
+
# The detected tracks of a person.
|
4212
|
+
# Corresponds to the JSON property `tracks`
|
4213
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1Track>]
|
4214
|
+
attr_accessor :tracks
|
4215
|
+
|
4216
|
+
# Feature version.
|
4217
|
+
# Corresponds to the JSON property `version`
|
4218
|
+
# @return [String]
|
4219
|
+
attr_accessor :version
|
4220
|
+
|
4221
|
+
def initialize(**args)
|
4222
|
+
update!(**args)
|
4223
|
+
end
|
4224
|
+
|
4225
|
+
# Update properties of this object
|
4226
|
+
def update!(**args)
|
4227
|
+
@tracks = args[:tracks] if args.key?(:tracks)
|
4228
|
+
@version = args[:version] if args.key?(:version)
|
4229
|
+
end
|
4230
|
+
end
|
4231
|
+
|
4232
|
+
# Config for PERSON_DETECTION.
|
4233
|
+
class GoogleCloudVideointelligenceV1p2beta1PersonDetectionConfig
|
4234
|
+
include Google::Apis::Core::Hashable
|
4235
|
+
|
4236
|
+
# Whether to enable person attributes detection, such as cloth color (black,
|
4237
|
+
# blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair, etc.
|
4238
|
+
# Ignored if 'include_bounding_boxes' is set to false.
|
4239
|
+
# Corresponds to the JSON property `includeAttributes`
|
4240
|
+
# @return [Boolean]
|
4241
|
+
attr_accessor :include_attributes
|
4242
|
+
alias_method :include_attributes?, :include_attributes
|
4243
|
+
|
4244
|
+
# Whether bounding boxes are included in the person detection annotation output.
|
4245
|
+
# Corresponds to the JSON property `includeBoundingBoxes`
|
4246
|
+
# @return [Boolean]
|
4247
|
+
attr_accessor :include_bounding_boxes
|
4248
|
+
alias_method :include_bounding_boxes?, :include_bounding_boxes
|
4249
|
+
|
4250
|
+
# Whether to enable pose landmarks detection. Ignored if 'include_bounding_boxes'
|
4251
|
+
# is set to false.
|
4252
|
+
# Corresponds to the JSON property `includePoseLandmarks`
|
4253
|
+
# @return [Boolean]
|
4254
|
+
attr_accessor :include_pose_landmarks
|
4255
|
+
alias_method :include_pose_landmarks?, :include_pose_landmarks
|
4256
|
+
|
4257
|
+
def initialize(**args)
|
4258
|
+
update!(**args)
|
4259
|
+
end
|
4260
|
+
|
4261
|
+
# Update properties of this object
|
4262
|
+
def update!(**args)
|
4263
|
+
@include_attributes = args[:include_attributes] if args.key?(:include_attributes)
|
4264
|
+
@include_bounding_boxes = args[:include_bounding_boxes] if args.key?(:include_bounding_boxes)
|
4265
|
+
@include_pose_landmarks = args[:include_pose_landmarks] if args.key?(:include_pose_landmarks)
|
4266
|
+
end
|
4267
|
+
end
|
4268
|
+
|
3765
4269
|
# Config for SHOT_CHANGE_DETECTION.
|
3766
4270
|
class GoogleCloudVideointelligenceV1p2beta1ShotChangeDetectionConfig
|
3767
4271
|
include Google::Apis::Core::Hashable
|
3768
4272
|
|
3769
|
-
# Model to use for shot change detection.
|
3770
|
-
#
|
3771
|
-
# "builtin/latest".
|
4273
|
+
# Model to use for shot change detection. Supported values: "builtin/stable" (
|
4274
|
+
# the default if unset) and "builtin/latest".
|
3772
4275
|
# Corresponds to the JSON property `model`
|
3773
4276
|
# @return [String]
|
3774
4277
|
attr_accessor :model
|
@@ -3788,12 +4291,12 @@ module Google
|
|
3788
4291
|
class GoogleCloudVideointelligenceV1p2beta1SpeechContext
|
3789
4292
|
include Google::Apis::Core::Hashable
|
3790
4293
|
|
3791
|
-
# Optional. A list of strings containing words and phrases "hints" so that
|
3792
|
-
#
|
3793
|
-
#
|
3794
|
-
#
|
3795
|
-
#
|
3796
|
-
#
|
4294
|
+
# Optional. A list of strings containing words and phrases "hints" so that the
|
4295
|
+
# speech recognition is more likely to recognize them. This can be used to
|
4296
|
+
# improve the accuracy for specific words and phrases, for example, if specific
|
4297
|
+
# commands are typically spoken by the user. This can also be used to add
|
4298
|
+
# additional words to the vocabulary of the recognizer. See [usage limits](https:
|
4299
|
+
# //cloud.google.com/speech/limits#content).
|
3797
4300
|
# Corresponds to the JSON property `phrases`
|
3798
4301
|
# @return [Array<String>]
|
3799
4302
|
attr_accessor :phrases
|
@@ -3814,10 +4317,10 @@ module Google
|
|
3814
4317
|
|
3815
4318
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
3816
4319
|
# indicates an estimated greater likelihood that the recognized words are
|
3817
|
-
# correct. This field is set only for the top alternative.
|
3818
|
-
#
|
3819
|
-
#
|
3820
|
-
#
|
4320
|
+
# correct. This field is set only for the top alternative. This field is not
|
4321
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
4322
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
4323
|
+
# not set.
|
3821
4324
|
# Corresponds to the JSON property `confidence`
|
3822
4325
|
# @return [Float]
|
3823
4326
|
attr_accessor :confidence
|
@@ -3828,8 +4331,8 @@ module Google
|
|
3828
4331
|
attr_accessor :transcript
|
3829
4332
|
|
3830
4333
|
# Output only. A list of word-specific information for each recognized word.
|
3831
|
-
# Note: When `enable_speaker_diarization` is set to true, you will see all
|
3832
|
-
#
|
4334
|
+
# Note: When `enable_speaker_diarization` is set to true, you will see all the
|
4335
|
+
# words from the beginning of the audio.
|
3833
4336
|
# Corresponds to the JSON property `words`
|
3834
4337
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1WordInfo>]
|
3835
4338
|
attr_accessor :words
|
@@ -3850,18 +4353,17 @@ module Google
|
|
3850
4353
|
class GoogleCloudVideointelligenceV1p2beta1SpeechTranscription
|
3851
4354
|
include Google::Apis::Core::Hashable
|
3852
4355
|
|
3853
|
-
# May contain one or more recognition hypotheses (up to the maximum specified
|
3854
|
-
#
|
3855
|
-
#
|
3856
|
-
#
|
4356
|
+
# May contain one or more recognition hypotheses (up to the maximum specified in
|
4357
|
+
# `max_alternatives`). These alternatives are ordered in terms of accuracy, with
|
4358
|
+
# the top (first) alternative being the most probable, as ranked by the
|
4359
|
+
# recognizer.
|
3857
4360
|
# Corresponds to the JSON property `alternatives`
|
3858
4361
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1SpeechRecognitionAlternative>]
|
3859
4362
|
attr_accessor :alternatives
|
3860
4363
|
|
3861
4364
|
# Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
|
3862
|
-
# language tag of
|
3863
|
-
#
|
3864
|
-
# most likelihood of being spoken in the audio.
|
4365
|
+
# language tag of the language in this result. This language code was detected
|
4366
|
+
# to have the most likelihood of being spoken in the audio.
|
3865
4367
|
# Corresponds to the JSON property `languageCode`
|
3866
4368
|
# @return [String]
|
3867
4369
|
attr_accessor :language_code
|
@@ -3888,66 +4390,62 @@ module Google
|
|
3888
4390
|
attr_accessor :audio_tracks
|
3889
4391
|
|
3890
4392
|
# Optional. If set, specifies the estimated number of speakers in the
|
3891
|
-
# conversation.
|
3892
|
-
#
|
3893
|
-
# Ignored unless enable_speaker_diarization is set to true.
|
4393
|
+
# conversation. If not set, defaults to '2'. Ignored unless
|
4394
|
+
# enable_speaker_diarization is set to true.
|
3894
4395
|
# Corresponds to the JSON property `diarizationSpeakerCount`
|
3895
4396
|
# @return [Fixnum]
|
3896
4397
|
attr_accessor :diarization_speaker_count
|
3897
4398
|
|
3898
|
-
# Optional. If 'true', adds punctuation to recognition result hypotheses.
|
3899
|
-
#
|
3900
|
-
#
|
3901
|
-
#
|
3902
|
-
#
|
3903
|
-
#
|
4399
|
+
# Optional. If 'true', adds punctuation to recognition result hypotheses. This
|
4400
|
+
# feature is only available in select languages. Setting this for requests in
|
4401
|
+
# other languages has no effect at all. The default 'false' value does not add
|
4402
|
+
# punctuation to result hypotheses. NOTE: "This is currently offered as an
|
4403
|
+
# experimental service, complimentary to all users. In the future this may be
|
4404
|
+
# exclusively available as a premium feature."
|
3904
4405
|
# Corresponds to the JSON property `enableAutomaticPunctuation`
|
3905
4406
|
# @return [Boolean]
|
3906
4407
|
attr_accessor :enable_automatic_punctuation
|
3907
4408
|
alias_method :enable_automatic_punctuation?, :enable_automatic_punctuation
|
3908
4409
|
|
3909
|
-
# Optional. If 'true', enables speaker detection for each recognized word in
|
3910
|
-
#
|
3911
|
-
#
|
3912
|
-
#
|
3913
|
-
#
|
3914
|
-
#
|
3915
|
-
# identify the speakers in the conversation over time.
|
4410
|
+
# Optional. If 'true', enables speaker detection for each recognized word in the
|
4411
|
+
# top alternative of the recognition result using a speaker_tag provided in the
|
4412
|
+
# WordInfo. Note: When this is true, we send all the words from the beginning of
|
4413
|
+
# the audio for the top alternative in every consecutive response. This is done
|
4414
|
+
# in order to improve our speaker tags as our models learn to identify the
|
4415
|
+
# speakers in the conversation over time.
|
3916
4416
|
# Corresponds to the JSON property `enableSpeakerDiarization`
|
3917
4417
|
# @return [Boolean]
|
3918
4418
|
attr_accessor :enable_speaker_diarization
|
3919
4419
|
alias_method :enable_speaker_diarization?, :enable_speaker_diarization
|
3920
4420
|
|
3921
4421
|
# Optional. If `true`, the top result includes a list of words and the
|
3922
|
-
# confidence for those words. If `false`, no word-level confidence
|
3923
|
-
#
|
4422
|
+
# confidence for those words. If `false`, no word-level confidence information
|
4423
|
+
# is returned. The default is `false`.
|
3924
4424
|
# Corresponds to the JSON property `enableWordConfidence`
|
3925
4425
|
# @return [Boolean]
|
3926
4426
|
attr_accessor :enable_word_confidence
|
3927
4427
|
alias_method :enable_word_confidence?, :enable_word_confidence
|
3928
4428
|
|
3929
|
-
# Optional. If set to `true`, the server will attempt to filter out
|
3930
|
-
#
|
3931
|
-
#
|
3932
|
-
# won't be filtered out.
|
4429
|
+
# Optional. If set to `true`, the server will attempt to filter out profanities,
|
4430
|
+
# replacing all but the initial character in each filtered word with asterisks,
|
4431
|
+
# e.g. "f***". If set to `false` or omitted, profanities won't be filtered out.
|
3933
4432
|
# Corresponds to the JSON property `filterProfanity`
|
3934
4433
|
# @return [Boolean]
|
3935
4434
|
attr_accessor :filter_profanity
|
3936
4435
|
alias_method :filter_profanity?, :filter_profanity
|
3937
4436
|
|
3938
|
-
# Required. *Required* The language of the supplied audio as a
|
3939
|
-
#
|
3940
|
-
#
|
3941
|
-
#
|
3942
|
-
# for a list of the currently supported language codes.
|
4437
|
+
# Required. *Required* The language of the supplied audio as a [BCP-47](https://
|
4438
|
+
# www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. Example: "en-US". See [
|
4439
|
+
# Language Support](https://cloud.google.com/speech/docs/languages) for a list
|
4440
|
+
# of the currently supported language codes.
|
3943
4441
|
# Corresponds to the JSON property `languageCode`
|
3944
4442
|
# @return [String]
|
3945
4443
|
attr_accessor :language_code
|
3946
4444
|
|
3947
4445
|
# Optional. Maximum number of recognition hypotheses to be returned.
|
3948
4446
|
# Specifically, the maximum number of `SpeechRecognitionAlternative` messages
|
3949
|
-
# within each `SpeechTranscription`. The server may return fewer than
|
3950
|
-
#
|
4447
|
+
# within each `SpeechTranscription`. The server may return fewer than `
|
4448
|
+
# max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
|
3951
4449
|
# return a maximum of one. If omitted, will return a maximum of one.
|
3952
4450
|
# Corresponds to the JSON property `maxAlternatives`
|
3953
4451
|
# @return [Fixnum]
|
@@ -4014,16 +4512,15 @@ module Google
|
|
4014
4512
|
include Google::Apis::Core::Hashable
|
4015
4513
|
|
4016
4514
|
# Language hint can be specified if the language to be detected is known a
|
4017
|
-
# priori. It can increase the accuracy of the detection. Language hint must
|
4018
|
-
#
|
4019
|
-
#
|
4515
|
+
# priori. It can increase the accuracy of the detection. Language hint must be
|
4516
|
+
# language code in BCP-47 format. Automatic language detection is performed if
|
4517
|
+
# no hint is provided.
|
4020
4518
|
# Corresponds to the JSON property `languageHints`
|
4021
4519
|
# @return [Array<String>]
|
4022
4520
|
attr_accessor :language_hints
|
4023
4521
|
|
4024
|
-
# Model to use for text detection.
|
4025
|
-
#
|
4026
|
-
# "builtin/latest".
|
4522
|
+
# Model to use for text detection. Supported values: "builtin/stable" (the
|
4523
|
+
# default if unset) and "builtin/latest".
|
4027
4524
|
# Corresponds to the JSON property `model`
|
4028
4525
|
# @return [String]
|
4029
4526
|
attr_accessor :model
|
@@ -4039,27 +4536,19 @@ module Google
|
|
4039
4536
|
end
|
4040
4537
|
end
|
4041
4538
|
|
4042
|
-
# Video frame level annotation results for text annotation (OCR).
|
4043
|
-
#
|
4044
|
-
#
|
4539
|
+
# Video frame level annotation results for text annotation (OCR). Contains
|
4540
|
+
# information regarding timestamp and bounding box locations for the frames
|
4541
|
+
# containing detected OCR text snippets.
|
4045
4542
|
class GoogleCloudVideointelligenceV1p2beta1TextFrame
|
4046
4543
|
include Google::Apis::Core::Hashable
|
4047
4544
|
|
4048
4545
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
4049
|
-
# Contains list of the corner points in clockwise order starting from
|
4050
|
-
#
|
4051
|
-
#
|
4052
|
-
#
|
4053
|
-
#
|
4054
|
-
#
|
4055
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
4056
|
-
# becomes:
|
4057
|
-
# 2----3
|
4058
|
-
# | |
|
4059
|
-
# 1----0
|
4060
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
4061
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
4062
|
-
# the box.
|
4546
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
4547
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
4548
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
4549
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
4550
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
4551
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
4063
4552
|
# Corresponds to the JSON property `rotatedBoundingBox`
|
4064
4553
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingPoly]
|
4065
4554
|
attr_accessor :rotated_bounding_box
|
@@ -4112,9 +4601,8 @@ module Google
|
|
4112
4601
|
end
|
4113
4602
|
end
|
4114
4603
|
|
4115
|
-
# For tracking related features.
|
4116
|
-
#
|
4117
|
-
# normalized_bounding_box.
|
4604
|
+
# For tracking related features. An object at time_offset with attributes, and
|
4605
|
+
# located with normalized_bounding_box.
|
4118
4606
|
class GoogleCloudVideointelligenceV1p2beta1TimestampedObject
|
4119
4607
|
include Google::Apis::Core::Hashable
|
4120
4608
|
|
@@ -4128,15 +4616,14 @@ module Google
|
|
4128
4616
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1DetectedLandmark>]
|
4129
4617
|
attr_accessor :landmarks
|
4130
4618
|
|
4131
|
-
# Normalized bounding box.
|
4132
|
-
#
|
4133
|
-
# Range: [0, 1].
|
4619
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
4620
|
+
# original image. Range: [0, 1].
|
4134
4621
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
4135
4622
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox]
|
4136
4623
|
attr_accessor :normalized_bounding_box
|
4137
4624
|
|
4138
|
-
# Time-offset, relative to the beginning of the video,
|
4139
|
-
#
|
4625
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
4626
|
+
# video frame for this object.
|
4140
4627
|
# Corresponds to the JSON property `timeOffset`
|
4141
4628
|
# @return [String]
|
4142
4629
|
attr_accessor :time_offset
|
@@ -4195,20 +4682,19 @@ module Google
|
|
4195
4682
|
class GoogleCloudVideointelligenceV1p2beta1VideoAnnotationProgress
|
4196
4683
|
include Google::Apis::Core::Hashable
|
4197
4684
|
|
4198
|
-
# Specifies which feature is being tracked if the request contains more than
|
4199
|
-
#
|
4685
|
+
# Specifies which feature is being tracked if the request contains more than one
|
4686
|
+
# feature.
|
4200
4687
|
# Corresponds to the JSON property `feature`
|
4201
4688
|
# @return [String]
|
4202
4689
|
attr_accessor :feature
|
4203
4690
|
|
4204
|
-
# Video file location in
|
4205
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
4691
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
4206
4692
|
# Corresponds to the JSON property `inputUri`
|
4207
4693
|
# @return [String]
|
4208
4694
|
attr_accessor :input_uri
|
4209
4695
|
|
4210
|
-
# Approximate percentage processed thus far. Guaranteed to be
|
4211
|
-
#
|
4696
|
+
# Approximate percentage processed thus far. Guaranteed to be 100 when fully
|
4697
|
+
# processed.
|
4212
4698
|
# Corresponds to the JSON property `progressPercent`
|
4213
4699
|
# @return [Fixnum]
|
4214
4700
|
attr_accessor :progress_percent
|
@@ -4247,31 +4733,40 @@ module Google
|
|
4247
4733
|
class GoogleCloudVideointelligenceV1p2beta1VideoAnnotationResults
|
4248
4734
|
include Google::Apis::Core::Hashable
|
4249
4735
|
|
4250
|
-
# The `Status` type defines a logical error model that is suitable for
|
4251
|
-
#
|
4252
|
-
#
|
4253
|
-
#
|
4254
|
-
#
|
4255
|
-
#
|
4736
|
+
# The `Status` type defines a logical error model that is suitable for different
|
4737
|
+
# programming environments, including REST APIs and RPC APIs. It is used by [
|
4738
|
+
# gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
|
4739
|
+
# data: error code, error message, and error details. You can find out more
|
4740
|
+
# about this error model and how to work with it in the [API Design Guide](https:
|
4741
|
+
# //cloud.google.com/apis/design/errors).
|
4256
4742
|
# Corresponds to the JSON property `error`
|
4257
4743
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleRpcStatus]
|
4258
4744
|
attr_accessor :error
|
4259
4745
|
|
4260
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
4261
|
-
#
|
4262
|
-
#
|
4746
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
4747
|
+
# explicit content has been detected in a frame, no annotations are present for
|
4748
|
+
# that frame.
|
4263
4749
|
# Corresponds to the JSON property `explicitAnnotation`
|
4264
4750
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1ExplicitContentAnnotation]
|
4265
4751
|
attr_accessor :explicit_annotation
|
4266
4752
|
|
4267
|
-
#
|
4268
|
-
#
|
4753
|
+
# Deprecated. Please use `face_detection_annotations` instead.
|
4754
|
+
# Corresponds to the JSON property `faceAnnotations`
|
4755
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1FaceAnnotation>]
|
4756
|
+
attr_accessor :face_annotations
|
4757
|
+
|
4758
|
+
# Face detection annotations.
|
4759
|
+
# Corresponds to the JSON property `faceDetectionAnnotations`
|
4760
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1FaceDetectionAnnotation>]
|
4761
|
+
attr_accessor :face_detection_annotations
|
4762
|
+
|
4763
|
+
# Label annotations on frame level. There is exactly one element for each unique
|
4764
|
+
# label.
|
4269
4765
|
# Corresponds to the JSON property `frameLabelAnnotations`
|
4270
4766
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
|
4271
4767
|
attr_accessor :frame_label_annotations
|
4272
4768
|
|
4273
|
-
# Video file location in
|
4274
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
4769
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
4275
4770
|
# Corresponds to the JSON property `inputUri`
|
4276
4771
|
# @return [String]
|
4277
4772
|
attr_accessor :input_uri
|
@@ -4286,6 +4781,11 @@ module Google
|
|
4286
4781
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1ObjectTrackingAnnotation>]
|
4287
4782
|
attr_accessor :object_annotations
|
4288
4783
|
|
4784
|
+
# Person detection annotations.
|
4785
|
+
# Corresponds to the JSON property `personDetectionAnnotations`
|
4786
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1PersonDetectionAnnotation>]
|
4787
|
+
attr_accessor :person_detection_annotations
|
4788
|
+
|
4289
4789
|
# Video segment.
|
4290
4790
|
# Corresponds to the JSON property `segment`
|
4291
4791
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1VideoSegment]
|
@@ -4298,11 +4798,11 @@ module Google
|
|
4298
4798
|
attr_accessor :segment_label_annotations
|
4299
4799
|
|
4300
4800
|
# Presence label annotations on video level or user-specified segment level.
|
4301
|
-
# There is exactly one element for each unique label. Compared to the
|
4302
|
-
#
|
4303
|
-
#
|
4304
|
-
#
|
4305
|
-
#
|
4801
|
+
# There is exactly one element for each unique label. Compared to the existing
|
4802
|
+
# topical `segment_label_annotations`, this field presents more fine-grained,
|
4803
|
+
# segment-level labels detected in video content and is made available only when
|
4804
|
+
# the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
|
4805
|
+
# request.
|
4306
4806
|
# Corresponds to the JSON property `segmentPresenceLabelAnnotations`
|
4307
4807
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
|
4308
4808
|
attr_accessor :segment_presence_label_annotations
|
@@ -4312,17 +4812,17 @@ module Google
|
|
4312
4812
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1VideoSegment>]
|
4313
4813
|
attr_accessor :shot_annotations
|
4314
4814
|
|
4315
|
-
# Topical label annotations on shot level.
|
4316
|
-
#
|
4815
|
+
# Topical label annotations on shot level. There is exactly one element for each
|
4816
|
+
# unique label.
|
4317
4817
|
# Corresponds to the JSON property `shotLabelAnnotations`
|
4318
4818
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
|
4319
4819
|
attr_accessor :shot_label_annotations
|
4320
4820
|
|
4321
4821
|
# Presence label annotations on shot level. There is exactly one element for
|
4322
|
-
# each unique label. Compared to the existing topical
|
4323
|
-
#
|
4324
|
-
#
|
4325
|
-
#
|
4822
|
+
# each unique label. Compared to the existing topical `shot_label_annotations`,
|
4823
|
+
# this field presents more fine-grained, shot-level labels detected in video
|
4824
|
+
# content and is made available only when the client sets `LabelDetectionConfig.
|
4825
|
+
# model` to "builtin/latest" in the request.
|
4326
4826
|
# Corresponds to the JSON property `shotPresenceLabelAnnotations`
|
4327
4827
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
|
4328
4828
|
attr_accessor :shot_presence_label_annotations
|
@@ -4332,9 +4832,8 @@ module Google
|
|
4332
4832
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1SpeechTranscription>]
|
4333
4833
|
attr_accessor :speech_transcriptions
|
4334
4834
|
|
4335
|
-
# OCR text detection and tracking.
|
4336
|
-
#
|
4337
|
-
# frame information associated with it.
|
4835
|
+
# OCR text detection and tracking. Annotations for list of detected text
|
4836
|
+
# snippets. Each will have list of frame information associated with it.
|
4338
4837
|
# Corresponds to the JSON property `textAnnotations`
|
4339
4838
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1TextAnnotation>]
|
4340
4839
|
attr_accessor :text_annotations
|
@@ -4347,10 +4846,13 @@ module Google
|
|
4347
4846
|
def update!(**args)
|
4348
4847
|
@error = args[:error] if args.key?(:error)
|
4349
4848
|
@explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
|
4849
|
+
@face_annotations = args[:face_annotations] if args.key?(:face_annotations)
|
4850
|
+
@face_detection_annotations = args[:face_detection_annotations] if args.key?(:face_detection_annotations)
|
4350
4851
|
@frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
|
4351
4852
|
@input_uri = args[:input_uri] if args.key?(:input_uri)
|
4352
4853
|
@logo_recognition_annotations = args[:logo_recognition_annotations] if args.key?(:logo_recognition_annotations)
|
4353
4854
|
@object_annotations = args[:object_annotations] if args.key?(:object_annotations)
|
4855
|
+
@person_detection_annotations = args[:person_detection_annotations] if args.key?(:person_detection_annotations)
|
4354
4856
|
@segment = args[:segment] if args.key?(:segment)
|
4355
4857
|
@segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations)
|
4356
4858
|
@segment_presence_label_annotations = args[:segment_presence_label_annotations] if args.key?(:segment_presence_label_annotations)
|
@@ -4371,6 +4873,11 @@ module Google
|
|
4371
4873
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1ExplicitContentDetectionConfig]
|
4372
4874
|
attr_accessor :explicit_content_detection_config
|
4373
4875
|
|
4876
|
+
# Config for FACE_DETECTION.
|
4877
|
+
# Corresponds to the JSON property `faceDetectionConfig`
|
4878
|
+
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1FaceDetectionConfig]
|
4879
|
+
attr_accessor :face_detection_config
|
4880
|
+
|
4374
4881
|
# Config for LABEL_DETECTION.
|
4375
4882
|
# Corresponds to the JSON property `labelDetectionConfig`
|
4376
4883
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1LabelDetectionConfig]
|
@@ -4381,9 +4888,14 @@ module Google
|
|
4381
4888
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1ObjectTrackingConfig]
|
4382
4889
|
attr_accessor :object_tracking_config
|
4383
4890
|
|
4384
|
-
#
|
4385
|
-
# to
|
4386
|
-
#
|
4891
|
+
# Config for PERSON_DETECTION.
|
4892
|
+
# Corresponds to the JSON property `personDetectionConfig`
|
4893
|
+
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1PersonDetectionConfig]
|
4894
|
+
attr_accessor :person_detection_config
|
4895
|
+
|
4896
|
+
# Video segments to annotate. The segments may overlap and are not required to
|
4897
|
+
# be contiguous or span the whole video. If unspecified, each video is treated
|
4898
|
+
# as a single segment.
|
4387
4899
|
# Corresponds to the JSON property `segments`
|
4388
4900
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1VideoSegment>]
|
4389
4901
|
attr_accessor :segments
|
@@ -4410,8 +4922,10 @@ module Google
|
|
4410
4922
|
# Update properties of this object
|
4411
4923
|
def update!(**args)
|
4412
4924
|
@explicit_content_detection_config = args[:explicit_content_detection_config] if args.key?(:explicit_content_detection_config)
|
4925
|
+
@face_detection_config = args[:face_detection_config] if args.key?(:face_detection_config)
|
4413
4926
|
@label_detection_config = args[:label_detection_config] if args.key?(:label_detection_config)
|
4414
4927
|
@object_tracking_config = args[:object_tracking_config] if args.key?(:object_tracking_config)
|
4928
|
+
@person_detection_config = args[:person_detection_config] if args.key?(:person_detection_config)
|
4415
4929
|
@segments = args[:segments] if args.key?(:segments)
|
4416
4930
|
@shot_change_detection_config = args[:shot_change_detection_config] if args.key?(:shot_change_detection_config)
|
4417
4931
|
@speech_transcription_config = args[:speech_transcription_config] if args.key?(:speech_transcription_config)
|
@@ -4423,14 +4937,14 @@ module Google
|
|
4423
4937
|
class GoogleCloudVideointelligenceV1p2beta1VideoSegment
|
4424
4938
|
include Google::Apis::Core::Hashable
|
4425
4939
|
|
4426
|
-
# Time-offset, relative to the beginning of the video,
|
4427
|
-
#
|
4940
|
+
# Time-offset, relative to the beginning of the video, corresponding to the end
|
4941
|
+
# of the segment (inclusive).
|
4428
4942
|
# Corresponds to the JSON property `endTimeOffset`
|
4429
4943
|
# @return [String]
|
4430
4944
|
attr_accessor :end_time_offset
|
4431
4945
|
|
4432
|
-
# Time-offset, relative to the beginning of the video,
|
4433
|
-
#
|
4946
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
4947
|
+
# start of the segment (inclusive).
|
4434
4948
|
# Corresponds to the JSON property `startTimeOffset`
|
4435
4949
|
# @return [String]
|
4436
4950
|
attr_accessor :start_time_offset
|
@@ -4447,41 +4961,41 @@ module Google
|
|
4447
4961
|
end
|
4448
4962
|
|
4449
4963
|
# Word-specific information for recognized words. Word information is only
|
4450
|
-
# included in the response when certain request parameters are set, such
|
4451
|
-
#
|
4964
|
+
# included in the response when certain request parameters are set, such as `
|
4965
|
+
# enable_word_time_offsets`.
|
4452
4966
|
class GoogleCloudVideointelligenceV1p2beta1WordInfo
|
4453
4967
|
include Google::Apis::Core::Hashable
|
4454
4968
|
|
4455
4969
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
4456
4970
|
# indicates an estimated greater likelihood that the recognized words are
|
4457
|
-
# correct. This field is set only for the top alternative.
|
4458
|
-
#
|
4459
|
-
#
|
4460
|
-
#
|
4971
|
+
# correct. This field is set only for the top alternative. This field is not
|
4972
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
4973
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
4974
|
+
# not set.
|
4461
4975
|
# Corresponds to the JSON property `confidence`
|
4462
4976
|
# @return [Float]
|
4463
4977
|
attr_accessor :confidence
|
4464
4978
|
|
4465
|
-
# Time offset relative to the beginning of the audio, and
|
4466
|
-
#
|
4467
|
-
#
|
4468
|
-
#
|
4979
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
4980
|
+
# end of the spoken word. This field is only set if `enable_word_time_offsets=
|
4981
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
4982
|
+
# accuracy of the time offset can vary.
|
4469
4983
|
# Corresponds to the JSON property `endTime`
|
4470
4984
|
# @return [String]
|
4471
4985
|
attr_accessor :end_time
|
4472
4986
|
|
4473
|
-
# Output only. A distinct integer value is assigned for every speaker within
|
4474
|
-
#
|
4475
|
-
#
|
4476
|
-
#
|
4987
|
+
# Output only. A distinct integer value is assigned for every speaker within the
|
4988
|
+
# audio. This field specifies which one of those speakers was detected to have
|
4989
|
+
# spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
|
4990
|
+
# only set if speaker diarization is enabled.
|
4477
4991
|
# Corresponds to the JSON property `speakerTag`
|
4478
4992
|
# @return [Fixnum]
|
4479
4993
|
attr_accessor :speaker_tag
|
4480
4994
|
|
4481
|
-
# Time offset relative to the beginning of the audio, and
|
4482
|
-
#
|
4483
|
-
#
|
4484
|
-
#
|
4995
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
4996
|
+
# start of the spoken word. This field is only set if `enable_word_time_offsets=
|
4997
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
4998
|
+
# accuracy of the time offset can vary.
|
4485
4999
|
# Corresponds to the JSON property `startTime`
|
4486
5000
|
# @return [String]
|
4487
5001
|
attr_accessor :start_time
|
@@ -4505,9 +5019,9 @@ module Google
|
|
4505
5019
|
end
|
4506
5020
|
end
|
4507
5021
|
|
4508
|
-
# Video annotation progress. Included in the `metadata`
|
4509
|
-
#
|
4510
|
-
#
|
5022
|
+
# Video annotation progress. Included in the `metadata` field of the `Operation`
|
5023
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
5024
|
+
# service.
|
4511
5025
|
class GoogleCloudVideointelligenceV1p3beta1AnnotateVideoProgress
|
4512
5026
|
include Google::Apis::Core::Hashable
|
4513
5027
|
|
@@ -4526,9 +5040,9 @@ module Google
|
|
4526
5040
|
end
|
4527
5041
|
end
|
4528
5042
|
|
4529
|
-
# Video annotation response. Included in the `response`
|
4530
|
-
#
|
4531
|
-
#
|
5043
|
+
# Video annotation response. Included in the `response` field of the `Operation`
|
5044
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
5045
|
+
# service.
|
4532
5046
|
class GoogleCloudVideointelligenceV1p3beta1AnnotateVideoResponse
|
4533
5047
|
include Google::Apis::Core::Hashable
|
4534
5048
|
|
@@ -4562,10 +5076,9 @@ module Google
|
|
4562
5076
|
# @return [String]
|
4563
5077
|
attr_accessor :display_name
|
4564
5078
|
|
4565
|
-
# The resource name of the celebrity. Have the format
|
4566
|
-
#
|
4567
|
-
#
|
4568
|
-
# celebrity.
|
5079
|
+
# The resource name of the celebrity. Have the format `video-intelligence/kg-mid`
|
5080
|
+
# indicates a celebrity from preloaded gallery. kg-mid is the id in Google
|
5081
|
+
# knowledge graph, which is unique for the celebrity.
|
4569
5082
|
# Corresponds to the JSON property `name`
|
4570
5083
|
# @return [String]
|
4571
5084
|
attr_accessor :name
|
@@ -4586,8 +5099,8 @@ module Google
|
|
4586
5099
|
class GoogleCloudVideointelligenceV1p3beta1CelebrityRecognitionAnnotation
|
4587
5100
|
include Google::Apis::Core::Hashable
|
4588
5101
|
|
4589
|
-
# The tracks detected from the input video, including recognized celebrities
|
4590
|
-
#
|
5102
|
+
# The tracks detected from the input video, including recognized celebrities and
|
5103
|
+
# other detected faces in the video.
|
4591
5104
|
# Corresponds to the JSON property `celebrityTracks`
|
4592
5105
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1CelebrityTrack>]
|
4593
5106
|
attr_accessor :celebrity_tracks
|
@@ -4643,14 +5156,14 @@ module Google
|
|
4643
5156
|
# @return [Float]
|
4644
5157
|
attr_accessor :confidence
|
4645
5158
|
|
4646
|
-
# The name of the attribute, for example, glasses, dark_glasses, mouth_open.
|
4647
|
-
#
|
5159
|
+
# The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
|
5160
|
+
# full list of supported type names will be provided in the document.
|
4648
5161
|
# Corresponds to the JSON property `name`
|
4649
5162
|
# @return [String]
|
4650
5163
|
attr_accessor :name
|
4651
5164
|
|
4652
|
-
# Text value of the detection result. For example, the value for "HairColor"
|
4653
|
-
#
|
5165
|
+
# Text value of the detection result. For example, the value for "HairColor" can
|
5166
|
+
# be "black", "blonde", etc.
|
4654
5167
|
# Corresponds to the JSON property `value`
|
4655
5168
|
# @return [String]
|
4656
5169
|
attr_accessor :value
|
@@ -4682,9 +5195,8 @@ module Google
|
|
4682
5195
|
# @return [String]
|
4683
5196
|
attr_accessor :name
|
4684
5197
|
|
4685
|
-
# A vertex represents a 2D point in the image.
|
4686
|
-
#
|
4687
|
-
# and range from 0 to 1.
|
5198
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
5199
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
4688
5200
|
# Corresponds to the JSON property `point`
|
4689
5201
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1NormalizedVertex]
|
4690
5202
|
attr_accessor :point
|
@@ -4710,8 +5222,7 @@ module Google
|
|
4710
5222
|
# @return [String]
|
4711
5223
|
attr_accessor :description
|
4712
5224
|
|
4713
|
-
# Opaque entity ID. Some IDs may be available in
|
4714
|
-
# [Google Knowledge Graph Search
|
5225
|
+
# Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
|
4715
5226
|
# API](https://developers.google.com/knowledge-graph/).
|
4716
5227
|
# Corresponds to the JSON property `entityId`
|
4717
5228
|
# @return [String]
|
@@ -4734,9 +5245,9 @@ module Google
|
|
4734
5245
|
end
|
4735
5246
|
end
|
4736
5247
|
|
4737
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
4738
|
-
#
|
4739
|
-
#
|
5248
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
5249
|
+
# explicit content has been detected in a frame, no annotations are present for
|
5250
|
+
# that frame.
|
4740
5251
|
class GoogleCloudVideointelligenceV1p3beta1ExplicitContentAnnotation
|
4741
5252
|
include Google::Apis::Core::Hashable
|
4742
5253
|
|
@@ -4787,20 +5298,41 @@ module Google
|
|
4787
5298
|
end
|
4788
5299
|
end
|
4789
5300
|
|
4790
|
-
#
|
4791
|
-
class
|
5301
|
+
# Deprecated. No effect.
|
5302
|
+
class GoogleCloudVideointelligenceV1p3beta1FaceAnnotation
|
4792
5303
|
include Google::Apis::Core::Hashable
|
4793
5304
|
|
4794
|
-
#
|
5305
|
+
# All video frames where a face was detected.
|
5306
|
+
# Corresponds to the JSON property `frames`
|
5307
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1FaceFrame>]
|
5308
|
+
attr_accessor :frames
|
5309
|
+
|
5310
|
+
# All video segments where a face was detected.
|
5311
|
+
# Corresponds to the JSON property `segments`
|
5312
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1FaceSegment>]
|
5313
|
+
attr_accessor :segments
|
5314
|
+
|
5315
|
+
# Thumbnail of a representative face view (in JPEG format).
|
4795
5316
|
# Corresponds to the JSON property `thumbnail`
|
4796
5317
|
# NOTE: Values are automatically base64 encoded/decoded in the client library.
|
4797
5318
|
# @return [String]
|
4798
5319
|
attr_accessor :thumbnail
|
4799
5320
|
|
4800
|
-
|
4801
|
-
|
4802
|
-
|
4803
|
-
|
5321
|
+
def initialize(**args)
|
5322
|
+
update!(**args)
|
5323
|
+
end
|
5324
|
+
|
5325
|
+
# Update properties of this object
|
5326
|
+
def update!(**args)
|
5327
|
+
@frames = args[:frames] if args.key?(:frames)
|
5328
|
+
@segments = args[:segments] if args.key?(:segments)
|
5329
|
+
@thumbnail = args[:thumbnail] if args.key?(:thumbnail)
|
5330
|
+
end
|
5331
|
+
end
|
5332
|
+
|
5333
|
+
# Face detection annotation.
|
5334
|
+
class GoogleCloudVideointelligenceV1p3beta1FaceDetectionAnnotation
|
5335
|
+
include Google::Apis::Core::Hashable
|
4804
5336
|
|
4805
5337
|
# Feature version.
|
4806
5338
|
# Corresponds to the JSON property `version`
|
@@ -4813,20 +5345,63 @@ module Google
|
|
4813
5345
|
|
4814
5346
|
# Update properties of this object
|
4815
5347
|
def update!(**args)
|
4816
|
-
@thumbnail = args[:thumbnail] if args.key?(:thumbnail)
|
4817
|
-
@tracks = args[:tracks] if args.key?(:tracks)
|
4818
5348
|
@version = args[:version] if args.key?(:version)
|
4819
5349
|
end
|
4820
5350
|
end
|
4821
5351
|
|
5352
|
+
# Deprecated. No effect.
|
5353
|
+
class GoogleCloudVideointelligenceV1p3beta1FaceFrame
|
5354
|
+
include Google::Apis::Core::Hashable
|
5355
|
+
|
5356
|
+
# Normalized Bounding boxes in a frame. There can be more than one boxes if the
|
5357
|
+
# same face is detected in multiple locations within the current frame.
|
5358
|
+
# Corresponds to the JSON property `normalizedBoundingBoxes`
|
5359
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox>]
|
5360
|
+
attr_accessor :normalized_bounding_boxes
|
5361
|
+
|
5362
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
5363
|
+
# video frame for this location.
|
5364
|
+
# Corresponds to the JSON property `timeOffset`
|
5365
|
+
# @return [String]
|
5366
|
+
attr_accessor :time_offset
|
5367
|
+
|
5368
|
+
def initialize(**args)
|
5369
|
+
update!(**args)
|
5370
|
+
end
|
5371
|
+
|
5372
|
+
# Update properties of this object
|
5373
|
+
def update!(**args)
|
5374
|
+
@normalized_bounding_boxes = args[:normalized_bounding_boxes] if args.key?(:normalized_bounding_boxes)
|
5375
|
+
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
5376
|
+
end
|
5377
|
+
end
|
5378
|
+
|
5379
|
+
# Video segment level annotation results for face detection.
|
5380
|
+
class GoogleCloudVideointelligenceV1p3beta1FaceSegment
|
5381
|
+
include Google::Apis::Core::Hashable
|
5382
|
+
|
5383
|
+
# Video segment.
|
5384
|
+
# Corresponds to the JSON property `segment`
|
5385
|
+
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1VideoSegment]
|
5386
|
+
attr_accessor :segment
|
5387
|
+
|
5388
|
+
def initialize(**args)
|
5389
|
+
update!(**args)
|
5390
|
+
end
|
5391
|
+
|
5392
|
+
# Update properties of this object
|
5393
|
+
def update!(**args)
|
5394
|
+
@segment = args[:segment] if args.key?(:segment)
|
5395
|
+
end
|
5396
|
+
end
|
5397
|
+
|
4822
5398
|
# Label annotation.
|
4823
5399
|
class GoogleCloudVideointelligenceV1p3beta1LabelAnnotation
|
4824
5400
|
include Google::Apis::Core::Hashable
|
4825
5401
|
|
4826
|
-
# Common categories for the detected entity.
|
4827
|
-
#
|
4828
|
-
#
|
4829
|
-
# also be a `pet`.
|
5402
|
+
# Common categories for the detected entity. For example, when the label is `
|
5403
|
+
# Terrier`, the category is likely `dog`. And in some cases there might be more
|
5404
|
+
# than one categories e.g., `Terrier` could also be a `pet`.
|
4830
5405
|
# Corresponds to the JSON property `categoryEntities`
|
4831
5406
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1Entity>]
|
4832
5407
|
attr_accessor :category_entities
|
@@ -4925,14 +5500,14 @@ module Google
|
|
4925
5500
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1Entity]
|
4926
5501
|
attr_accessor :entity
|
4927
5502
|
|
4928
|
-
# All video segments where the recognized logo appears. There might be
|
4929
|
-
#
|
5503
|
+
# All video segments where the recognized logo appears. There might be multiple
|
5504
|
+
# instances of the same logo class appearing in one VideoSegment.
|
4930
5505
|
# Corresponds to the JSON property `segments`
|
4931
5506
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1VideoSegment>]
|
4932
5507
|
attr_accessor :segments
|
4933
5508
|
|
4934
|
-
# All logo tracks where the recognized logo appears. Each track corresponds
|
4935
|
-
#
|
5509
|
+
# All logo tracks where the recognized logo appears. Each track corresponds to
|
5510
|
+
# one logo instance appearing in consecutive frames.
|
4936
5511
|
# Corresponds to the JSON property `tracks`
|
4937
5512
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1Track>]
|
4938
5513
|
attr_accessor :tracks
|
@@ -4949,9 +5524,8 @@ module Google
|
|
4949
5524
|
end
|
4950
5525
|
end
|
4951
5526
|
|
4952
|
-
# Normalized bounding box.
|
4953
|
-
#
|
4954
|
-
# Range: [0, 1].
|
5527
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
5528
|
+
# original image. Range: [0, 1].
|
4955
5529
|
class GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox
|
4956
5530
|
include Google::Apis::Core::Hashable
|
4957
5531
|
|
@@ -4989,20 +5563,12 @@ module Google
|
|
4989
5563
|
end
|
4990
5564
|
|
4991
5565
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
4992
|
-
# Contains list of the corner points in clockwise order starting from
|
4993
|
-
#
|
4994
|
-
#
|
4995
|
-
#
|
4996
|
-
#
|
4997
|
-
#
|
4998
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
4999
|
-
# becomes:
|
5000
|
-
# 2----3
|
5001
|
-
# | |
|
5002
|
-
# 1----0
|
5003
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
5004
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
5005
|
-
# the box.
|
5566
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
5567
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
5568
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
5569
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
5570
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
5571
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
5006
5572
|
class GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingPoly
|
5007
5573
|
include Google::Apis::Core::Hashable
|
5008
5574
|
|
@@ -5021,9 +5587,8 @@ module Google
|
|
5021
5587
|
end
|
5022
5588
|
end
|
5023
5589
|
|
5024
|
-
# A vertex represents a 2D point in the image.
|
5025
|
-
#
|
5026
|
-
# and range from 0 to 1.
|
5590
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
5591
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
5027
5592
|
class GoogleCloudVideointelligenceV1p3beta1NormalizedVertex
|
5028
5593
|
include Google::Apis::Core::Hashable
|
5029
5594
|
|
@@ -5062,10 +5627,10 @@ module Google
|
|
5062
5627
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1Entity]
|
5063
5628
|
attr_accessor :entity
|
5064
5629
|
|
5065
|
-
# Information corresponding to all frames where this object track appears.
|
5066
|
-
#
|
5067
|
-
#
|
5068
|
-
#
|
5630
|
+
# Information corresponding to all frames where this object track appears. Non-
|
5631
|
+
# streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
|
5632
|
+
# in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
|
5633
|
+
# frames.
|
5069
5634
|
# Corresponds to the JSON property `frames`
|
5070
5635
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1ObjectTrackingFrame>]
|
5071
5636
|
attr_accessor :frames
|
@@ -5075,12 +5640,11 @@ module Google
|
|
5075
5640
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1VideoSegment]
|
5076
5641
|
attr_accessor :segment
|
5077
5642
|
|
5078
|
-
# Streaming mode ONLY.
|
5079
|
-
#
|
5080
|
-
#
|
5081
|
-
#
|
5082
|
-
#
|
5083
|
-
# ObjectTrackAnnotation of the same track_id over time.
|
5643
|
+
# Streaming mode ONLY. In streaming mode, we do not know the end time of a
|
5644
|
+
# tracked object before it is completed. Hence, there is no VideoSegment info
|
5645
|
+
# returned. Instead, we provide a unique identifiable integer track_id so that
|
5646
|
+
# the customers can correlate the results of the ongoing ObjectTrackAnnotation
|
5647
|
+
# of the same track_id over time.
|
5084
5648
|
# Corresponds to the JSON property `trackId`
|
5085
5649
|
# @return [Fixnum]
|
5086
5650
|
attr_accessor :track_id
|
@@ -5110,9 +5674,8 @@ module Google
|
|
5110
5674
|
class GoogleCloudVideointelligenceV1p3beta1ObjectTrackingFrame
|
5111
5675
|
include Google::Apis::Core::Hashable
|
5112
5676
|
|
5113
|
-
# Normalized bounding box.
|
5114
|
-
#
|
5115
|
-
# Range: [0, 1].
|
5677
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
5678
|
+
# original image. Range: [0, 1].
|
5116
5679
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
5117
5680
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox]
|
5118
5681
|
attr_accessor :normalized_bounding_box
|
@@ -5189,10 +5752,10 @@ module Google
|
|
5189
5752
|
|
5190
5753
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
5191
5754
|
# indicates an estimated greater likelihood that the recognized words are
|
5192
|
-
# correct. This field is set only for the top alternative.
|
5193
|
-
#
|
5194
|
-
#
|
5195
|
-
#
|
5755
|
+
# correct. This field is set only for the top alternative. This field is not
|
5756
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
5757
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
5758
|
+
# not set.
|
5196
5759
|
# Corresponds to the JSON property `confidence`
|
5197
5760
|
# @return [Float]
|
5198
5761
|
attr_accessor :confidence
|
@@ -5203,8 +5766,8 @@ module Google
|
|
5203
5766
|
attr_accessor :transcript
|
5204
5767
|
|
5205
5768
|
# Output only. A list of word-specific information for each recognized word.
|
5206
|
-
# Note: When `enable_speaker_diarization` is set to true, you will see all
|
5207
|
-
#
|
5769
|
+
# Note: When `enable_speaker_diarization` is set to true, you will see all the
|
5770
|
+
# words from the beginning of the audio.
|
5208
5771
|
# Corresponds to the JSON property `words`
|
5209
5772
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1WordInfo>]
|
5210
5773
|
attr_accessor :words
|
@@ -5225,18 +5788,17 @@ module Google
|
|
5225
5788
|
class GoogleCloudVideointelligenceV1p3beta1SpeechTranscription
|
5226
5789
|
include Google::Apis::Core::Hashable
|
5227
5790
|
|
5228
|
-
# May contain one or more recognition hypotheses (up to the maximum specified
|
5229
|
-
#
|
5230
|
-
#
|
5231
|
-
#
|
5791
|
+
# May contain one or more recognition hypotheses (up to the maximum specified in
|
5792
|
+
# `max_alternatives`). These alternatives are ordered in terms of accuracy, with
|
5793
|
+
# the top (first) alternative being the most probable, as ranked by the
|
5794
|
+
# recognizer.
|
5232
5795
|
# Corresponds to the JSON property `alternatives`
|
5233
5796
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1SpeechRecognitionAlternative>]
|
5234
5797
|
attr_accessor :alternatives
|
5235
5798
|
|
5236
5799
|
# Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
|
5237
|
-
# language tag of
|
5238
|
-
#
|
5239
|
-
# most likelihood of being spoken in the audio.
|
5800
|
+
# language tag of the language in this result. This language code was detected
|
5801
|
+
# to have the most likelihood of being spoken in the audio.
|
5240
5802
|
# Corresponds to the JSON property `languageCode`
|
5241
5803
|
# @return [String]
|
5242
5804
|
attr_accessor :language_code
|
@@ -5252,32 +5814,32 @@ module Google
|
|
5252
5814
|
end
|
5253
5815
|
end
|
5254
5816
|
|
5255
|
-
# `StreamingAnnotateVideoResponse` is the only message returned to the client
|
5256
|
-
#
|
5257
|
-
#
|
5817
|
+
# `StreamingAnnotateVideoResponse` is the only message returned to the client by
|
5818
|
+
# `StreamingAnnotateVideo`. A series of zero or more `
|
5819
|
+
# StreamingAnnotateVideoResponse` messages are streamed back to the client.
|
5258
5820
|
class GoogleCloudVideointelligenceV1p3beta1StreamingAnnotateVideoResponse
|
5259
5821
|
include Google::Apis::Core::Hashable
|
5260
5822
|
|
5261
|
-
# Streaming annotation results corresponding to a portion of the video
|
5262
|
-
#
|
5823
|
+
# Streaming annotation results corresponding to a portion of the video that is
|
5824
|
+
# currently being processed. Only ONE type of annotation will be specified in
|
5825
|
+
# the response.
|
5263
5826
|
# Corresponds to the JSON property `annotationResults`
|
5264
5827
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1StreamingVideoAnnotationResults]
|
5265
5828
|
attr_accessor :annotation_results
|
5266
5829
|
|
5267
|
-
# Google Cloud Storage URI that stores annotation results of one
|
5268
|
-
#
|
5269
|
-
#
|
5270
|
-
# from the request followed by '/cloud_project_number-session_id'.
|
5830
|
+
# Google Cloud Storage URI that stores annotation results of one streaming
|
5831
|
+
# session in JSON format. It is the annotation_result_storage_directory from the
|
5832
|
+
# request followed by '/cloud_project_number-session_id'.
|
5271
5833
|
# Corresponds to the JSON property `annotationResultsUri`
|
5272
5834
|
# @return [String]
|
5273
5835
|
attr_accessor :annotation_results_uri
|
5274
5836
|
|
5275
|
-
# The `Status` type defines a logical error model that is suitable for
|
5276
|
-
#
|
5277
|
-
#
|
5278
|
-
#
|
5279
|
-
#
|
5280
|
-
#
|
5837
|
+
# The `Status` type defines a logical error model that is suitable for different
|
5838
|
+
# programming environments, including REST APIs and RPC APIs. It is used by [
|
5839
|
+
# gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
|
5840
|
+
# data: error code, error message, and error details. You can find out more
|
5841
|
+
# about this error model and how to work with it in the [API Design Guide](https:
|
5842
|
+
# //cloud.google.com/apis/design/errors).
|
5281
5843
|
# Corresponds to the JSON property `error`
|
5282
5844
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleRpcStatus]
|
5283
5845
|
attr_accessor :error
|
@@ -5294,18 +5856,24 @@ module Google
|
|
5294
5856
|
end
|
5295
5857
|
end
|
5296
5858
|
|
5297
|
-
# Streaming annotation results corresponding to a portion of the video
|
5298
|
-
#
|
5859
|
+
# Streaming annotation results corresponding to a portion of the video that is
|
5860
|
+
# currently being processed. Only ONE type of annotation will be specified in
|
5861
|
+
# the response.
|
5299
5862
|
class GoogleCloudVideointelligenceV1p3beta1StreamingVideoAnnotationResults
|
5300
5863
|
include Google::Apis::Core::Hashable
|
5301
5864
|
|
5302
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
5303
|
-
#
|
5304
|
-
#
|
5865
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
5866
|
+
# explicit content has been detected in a frame, no annotations are present for
|
5867
|
+
# that frame.
|
5305
5868
|
# Corresponds to the JSON property `explicitAnnotation`
|
5306
5869
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1ExplicitContentAnnotation]
|
5307
5870
|
attr_accessor :explicit_annotation
|
5308
5871
|
|
5872
|
+
# Timestamp of the processed frame in microseconds.
|
5873
|
+
# Corresponds to the JSON property `frameTimestamp`
|
5874
|
+
# @return [String]
|
5875
|
+
attr_accessor :frame_timestamp
|
5876
|
+
|
5309
5877
|
# Label annotation results.
|
5310
5878
|
# Corresponds to the JSON property `labelAnnotations`
|
5311
5879
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
|
@@ -5328,6 +5896,7 @@ module Google
|
|
5328
5896
|
# Update properties of this object
|
5329
5897
|
def update!(**args)
|
5330
5898
|
@explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
|
5899
|
+
@frame_timestamp = args[:frame_timestamp] if args.key?(:frame_timestamp)
|
5331
5900
|
@label_annotations = args[:label_annotations] if args.key?(:label_annotations)
|
5332
5901
|
@object_annotations = args[:object_annotations] if args.key?(:object_annotations)
|
5333
5902
|
@shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations)
|
@@ -5367,27 +5936,19 @@ module Google
|
|
5367
5936
|
end
|
5368
5937
|
end
|
5369
5938
|
|
5370
|
-
# Video frame level annotation results for text annotation (OCR).
|
5371
|
-
#
|
5372
|
-
#
|
5939
|
+
# Video frame level annotation results for text annotation (OCR). Contains
|
5940
|
+
# information regarding timestamp and bounding box locations for the frames
|
5941
|
+
# containing detected OCR text snippets.
|
5373
5942
|
class GoogleCloudVideointelligenceV1p3beta1TextFrame
|
5374
5943
|
include Google::Apis::Core::Hashable
|
5375
5944
|
|
5376
5945
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
5377
|
-
# Contains list of the corner points in clockwise order starting from
|
5378
|
-
#
|
5379
|
-
#
|
5380
|
-
#
|
5381
|
-
#
|
5382
|
-
#
|
5383
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
5384
|
-
# becomes:
|
5385
|
-
# 2----3
|
5386
|
-
# | |
|
5387
|
-
# 1----0
|
5388
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
5389
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
5390
|
-
# the box.
|
5946
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
5947
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
5948
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
5949
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
5950
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
5951
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
5391
5952
|
# Corresponds to the JSON property `rotatedBoundingBox`
|
5392
5953
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingPoly]
|
5393
5954
|
attr_accessor :rotated_bounding_box
|
@@ -5440,9 +6001,8 @@ module Google
|
|
5440
6001
|
end
|
5441
6002
|
end
|
5442
6003
|
|
5443
|
-
# For tracking related features.
|
5444
|
-
#
|
5445
|
-
# normalized_bounding_box.
|
6004
|
+
# For tracking related features. An object at time_offset with attributes, and
|
6005
|
+
# located with normalized_bounding_box.
|
5446
6006
|
class GoogleCloudVideointelligenceV1p3beta1TimestampedObject
|
5447
6007
|
include Google::Apis::Core::Hashable
|
5448
6008
|
|
@@ -5456,15 +6016,14 @@ module Google
|
|
5456
6016
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1DetectedLandmark>]
|
5457
6017
|
attr_accessor :landmarks
|
5458
6018
|
|
5459
|
-
# Normalized bounding box.
|
5460
|
-
#
|
5461
|
-
# Range: [0, 1].
|
6019
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
6020
|
+
# original image. Range: [0, 1].
|
5462
6021
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
5463
6022
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox]
|
5464
6023
|
attr_accessor :normalized_bounding_box
|
5465
6024
|
|
5466
|
-
# Time-offset, relative to the beginning of the video,
|
5467
|
-
#
|
6025
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
6026
|
+
# video frame for this object.
|
5468
6027
|
# Corresponds to the JSON property `timeOffset`
|
5469
6028
|
# @return [String]
|
5470
6029
|
attr_accessor :time_offset
|
@@ -5523,20 +6082,19 @@ module Google
|
|
5523
6082
|
class GoogleCloudVideointelligenceV1p3beta1VideoAnnotationProgress
|
5524
6083
|
include Google::Apis::Core::Hashable
|
5525
6084
|
|
5526
|
-
# Specifies which feature is being tracked if the request contains more than
|
5527
|
-
#
|
6085
|
+
# Specifies which feature is being tracked if the request contains more than one
|
6086
|
+
# feature.
|
5528
6087
|
# Corresponds to the JSON property `feature`
|
5529
6088
|
# @return [String]
|
5530
6089
|
attr_accessor :feature
|
5531
6090
|
|
5532
|
-
# Video file location in
|
5533
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
6091
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
5534
6092
|
# Corresponds to the JSON property `inputUri`
|
5535
6093
|
# @return [String]
|
5536
6094
|
attr_accessor :input_uri
|
5537
6095
|
|
5538
|
-
# Approximate percentage processed thus far. Guaranteed to be
|
5539
|
-
#
|
6096
|
+
# Approximate percentage processed thus far. Guaranteed to be 100 when fully
|
6097
|
+
# processed.
|
5540
6098
|
# Corresponds to the JSON property `progressPercent`
|
5541
6099
|
# @return [Fixnum]
|
5542
6100
|
attr_accessor :progress_percent
|
@@ -5580,36 +6138,40 @@ module Google
|
|
5580
6138
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1CelebrityRecognitionAnnotation]
|
5581
6139
|
attr_accessor :celebrity_recognition_annotations
|
5582
6140
|
|
5583
|
-
# The `Status` type defines a logical error model that is suitable for
|
5584
|
-
#
|
5585
|
-
#
|
5586
|
-
#
|
5587
|
-
#
|
5588
|
-
#
|
6141
|
+
# The `Status` type defines a logical error model that is suitable for different
|
6142
|
+
# programming environments, including REST APIs and RPC APIs. It is used by [
|
6143
|
+
# gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
|
6144
|
+
# data: error code, error message, and error details. You can find out more
|
6145
|
+
# about this error model and how to work with it in the [API Design Guide](https:
|
6146
|
+
# //cloud.google.com/apis/design/errors).
|
5589
6147
|
# Corresponds to the JSON property `error`
|
5590
6148
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleRpcStatus]
|
5591
6149
|
attr_accessor :error
|
5592
6150
|
|
5593
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
5594
|
-
#
|
5595
|
-
#
|
6151
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
6152
|
+
# explicit content has been detected in a frame, no annotations are present for
|
6153
|
+
# that frame.
|
5596
6154
|
# Corresponds to the JSON property `explicitAnnotation`
|
5597
6155
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1ExplicitContentAnnotation]
|
5598
6156
|
attr_accessor :explicit_annotation
|
5599
6157
|
|
6158
|
+
# Deprecated. Please use `face_detection_annotations` instead.
|
6159
|
+
# Corresponds to the JSON property `faceAnnotations`
|
6160
|
+
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1FaceAnnotation>]
|
6161
|
+
attr_accessor :face_annotations
|
6162
|
+
|
5600
6163
|
# Face detection annotations.
|
5601
6164
|
# Corresponds to the JSON property `faceDetectionAnnotations`
|
5602
6165
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1FaceDetectionAnnotation>]
|
5603
6166
|
attr_accessor :face_detection_annotations
|
5604
6167
|
|
5605
|
-
# Label annotations on frame level.
|
5606
|
-
#
|
6168
|
+
# Label annotations on frame level. There is exactly one element for each unique
|
6169
|
+
# label.
|
5607
6170
|
# Corresponds to the JSON property `frameLabelAnnotations`
|
5608
6171
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
|
5609
6172
|
attr_accessor :frame_label_annotations
|
5610
6173
|
|
5611
|
-
# Video file location in
|
5612
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
6174
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
5613
6175
|
# Corresponds to the JSON property `inputUri`
|
5614
6176
|
# @return [String]
|
5615
6177
|
attr_accessor :input_uri
|
@@ -5641,11 +6203,11 @@ module Google
|
|
5641
6203
|
attr_accessor :segment_label_annotations
|
5642
6204
|
|
5643
6205
|
# Presence label annotations on video level or user-specified segment level.
|
5644
|
-
# There is exactly one element for each unique label. Compared to the
|
5645
|
-
#
|
5646
|
-
#
|
5647
|
-
#
|
5648
|
-
#
|
6206
|
+
# There is exactly one element for each unique label. Compared to the existing
|
6207
|
+
# topical `segment_label_annotations`, this field presents more fine-grained,
|
6208
|
+
# segment-level labels detected in video content and is made available only when
|
6209
|
+
# the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
|
6210
|
+
# request.
|
5649
6211
|
# Corresponds to the JSON property `segmentPresenceLabelAnnotations`
|
5650
6212
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
|
5651
6213
|
attr_accessor :segment_presence_label_annotations
|
@@ -5655,17 +6217,17 @@ module Google
|
|
5655
6217
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1VideoSegment>]
|
5656
6218
|
attr_accessor :shot_annotations
|
5657
6219
|
|
5658
|
-
# Topical label annotations on shot level.
|
5659
|
-
#
|
6220
|
+
# Topical label annotations on shot level. There is exactly one element for each
|
6221
|
+
# unique label.
|
5660
6222
|
# Corresponds to the JSON property `shotLabelAnnotations`
|
5661
6223
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
|
5662
6224
|
attr_accessor :shot_label_annotations
|
5663
6225
|
|
5664
6226
|
# Presence label annotations on shot level. There is exactly one element for
|
5665
|
-
# each unique label. Compared to the existing topical
|
5666
|
-
#
|
5667
|
-
#
|
5668
|
-
#
|
6227
|
+
# each unique label. Compared to the existing topical `shot_label_annotations`,
|
6228
|
+
# this field presents more fine-grained, shot-level labels detected in video
|
6229
|
+
# content and is made available only when the client sets `LabelDetectionConfig.
|
6230
|
+
# model` to "builtin/latest" in the request.
|
5669
6231
|
# Corresponds to the JSON property `shotPresenceLabelAnnotations`
|
5670
6232
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
|
5671
6233
|
attr_accessor :shot_presence_label_annotations
|
@@ -5675,9 +6237,8 @@ module Google
|
|
5675
6237
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1SpeechTranscription>]
|
5676
6238
|
attr_accessor :speech_transcriptions
|
5677
6239
|
|
5678
|
-
# OCR text detection and tracking.
|
5679
|
-
#
|
5680
|
-
# frame information associated with it.
|
6240
|
+
# OCR text detection and tracking. Annotations for list of detected text
|
6241
|
+
# snippets. Each will have list of frame information associated with it.
|
5681
6242
|
# Corresponds to the JSON property `textAnnotations`
|
5682
6243
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1TextAnnotation>]
|
5683
6244
|
attr_accessor :text_annotations
|
@@ -5691,6 +6252,7 @@ module Google
|
|
5691
6252
|
@celebrity_recognition_annotations = args[:celebrity_recognition_annotations] if args.key?(:celebrity_recognition_annotations)
|
5692
6253
|
@error = args[:error] if args.key?(:error)
|
5693
6254
|
@explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
|
6255
|
+
@face_annotations = args[:face_annotations] if args.key?(:face_annotations)
|
5694
6256
|
@face_detection_annotations = args[:face_detection_annotations] if args.key?(:face_detection_annotations)
|
5695
6257
|
@frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
|
5696
6258
|
@input_uri = args[:input_uri] if args.key?(:input_uri)
|
@@ -5712,14 +6274,14 @@ module Google
|
|
5712
6274
|
class GoogleCloudVideointelligenceV1p3beta1VideoSegment
|
5713
6275
|
include Google::Apis::Core::Hashable
|
5714
6276
|
|
5715
|
-
# Time-offset, relative to the beginning of the video,
|
5716
|
-
#
|
6277
|
+
# Time-offset, relative to the beginning of the video, corresponding to the end
|
6278
|
+
# of the segment (inclusive).
|
5717
6279
|
# Corresponds to the JSON property `endTimeOffset`
|
5718
6280
|
# @return [String]
|
5719
6281
|
attr_accessor :end_time_offset
|
5720
6282
|
|
5721
|
-
# Time-offset, relative to the beginning of the video,
|
5722
|
-
#
|
6283
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
6284
|
+
# start of the segment (inclusive).
|
5723
6285
|
# Corresponds to the JSON property `startTimeOffset`
|
5724
6286
|
# @return [String]
|
5725
6287
|
attr_accessor :start_time_offset
|
@@ -5736,41 +6298,41 @@ module Google
|
|
5736
6298
|
end
|
5737
6299
|
|
5738
6300
|
# Word-specific information for recognized words. Word information is only
|
5739
|
-
# included in the response when certain request parameters are set, such
|
5740
|
-
#
|
6301
|
+
# included in the response when certain request parameters are set, such as `
|
6302
|
+
# enable_word_time_offsets`.
|
5741
6303
|
class GoogleCloudVideointelligenceV1p3beta1WordInfo
|
5742
6304
|
include Google::Apis::Core::Hashable
|
5743
6305
|
|
5744
6306
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
5745
6307
|
# indicates an estimated greater likelihood that the recognized words are
|
5746
|
-
# correct. This field is set only for the top alternative.
|
5747
|
-
#
|
5748
|
-
#
|
5749
|
-
#
|
6308
|
+
# correct. This field is set only for the top alternative. This field is not
|
6309
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
6310
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
6311
|
+
# not set.
|
5750
6312
|
# Corresponds to the JSON property `confidence`
|
5751
6313
|
# @return [Float]
|
5752
6314
|
attr_accessor :confidence
|
5753
6315
|
|
5754
|
-
# Time offset relative to the beginning of the audio, and
|
5755
|
-
#
|
5756
|
-
#
|
5757
|
-
#
|
6316
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
6317
|
+
# end of the spoken word. This field is only set if `enable_word_time_offsets=
|
6318
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
6319
|
+
# accuracy of the time offset can vary.
|
5758
6320
|
# Corresponds to the JSON property `endTime`
|
5759
6321
|
# @return [String]
|
5760
6322
|
attr_accessor :end_time
|
5761
6323
|
|
5762
|
-
# Output only. A distinct integer value is assigned for every speaker within
|
5763
|
-
#
|
5764
|
-
#
|
5765
|
-
#
|
6324
|
+
# Output only. A distinct integer value is assigned for every speaker within the
|
6325
|
+
# audio. This field specifies which one of those speakers was detected to have
|
6326
|
+
# spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
|
6327
|
+
# only set if speaker diarization is enabled.
|
5766
6328
|
# Corresponds to the JSON property `speakerTag`
|
5767
6329
|
# @return [Fixnum]
|
5768
6330
|
attr_accessor :speaker_tag
|
5769
6331
|
|
5770
|
-
# Time offset relative to the beginning of the audio, and
|
5771
|
-
#
|
5772
|
-
#
|
5773
|
-
#
|
6332
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
6333
|
+
# start of the spoken word. This field is only set if `enable_word_time_offsets=
|
6334
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
6335
|
+
# accuracy of the time offset can vary.
|
5774
6336
|
# Corresponds to the JSON property `startTime`
|
5775
6337
|
# @return [String]
|
5776
6338
|
attr_accessor :start_time
|
@@ -5799,47 +6361,45 @@ module Google
|
|
5799
6361
|
class GoogleLongrunningOperation
|
5800
6362
|
include Google::Apis::Core::Hashable
|
5801
6363
|
|
5802
|
-
# If the value is `false`, it means the operation is still in progress.
|
5803
|
-
#
|
5804
|
-
# available.
|
6364
|
+
# If the value is `false`, it means the operation is still in progress. If `true`
|
6365
|
+
# , the operation is completed, and either `error` or `response` is available.
|
5805
6366
|
# Corresponds to the JSON property `done`
|
5806
6367
|
# @return [Boolean]
|
5807
6368
|
attr_accessor :done
|
5808
6369
|
alias_method :done?, :done
|
5809
6370
|
|
5810
|
-
# The `Status` type defines a logical error model that is suitable for
|
5811
|
-
#
|
5812
|
-
#
|
5813
|
-
#
|
5814
|
-
#
|
5815
|
-
#
|
6371
|
+
# The `Status` type defines a logical error model that is suitable for different
|
6372
|
+
# programming environments, including REST APIs and RPC APIs. It is used by [
|
6373
|
+
# gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
|
6374
|
+
# data: error code, error message, and error details. You can find out more
|
6375
|
+
# about this error model and how to work with it in the [API Design Guide](https:
|
6376
|
+
# //cloud.google.com/apis/design/errors).
|
5816
6377
|
# Corresponds to the JSON property `error`
|
5817
6378
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleRpcStatus]
|
5818
6379
|
attr_accessor :error
|
5819
6380
|
|
5820
|
-
# Service-specific metadata associated with the operation.
|
5821
|
-
#
|
5822
|
-
#
|
5823
|
-
#
|
6381
|
+
# Service-specific metadata associated with the operation. It typically contains
|
6382
|
+
# progress information and common metadata such as create time. Some services
|
6383
|
+
# might not provide such metadata. Any method that returns a long-running
|
6384
|
+
# operation should document the metadata type, if any.
|
5824
6385
|
# Corresponds to the JSON property `metadata`
|
5825
6386
|
# @return [Hash<String,Object>]
|
5826
6387
|
attr_accessor :metadata
|
5827
6388
|
|
5828
6389
|
# The server-assigned name, which is only unique within the same service that
|
5829
|
-
# originally returns it. If you use the default HTTP mapping, the
|
5830
|
-
#
|
6390
|
+
# originally returns it. If you use the default HTTP mapping, the `name` should
|
6391
|
+
# be a resource name ending with `operations/`unique_id``.
|
5831
6392
|
# Corresponds to the JSON property `name`
|
5832
6393
|
# @return [String]
|
5833
6394
|
attr_accessor :name
|
5834
6395
|
|
5835
|
-
# The normal response of the operation in case of success.
|
5836
|
-
# method returns no data on success, such as `Delete`, the response is
|
5837
|
-
#
|
5838
|
-
#
|
5839
|
-
#
|
5840
|
-
#
|
5841
|
-
#
|
5842
|
-
# `TakeSnapshotResponse`.
|
6396
|
+
# The normal response of the operation in case of success. If the original
|
6397
|
+
# method returns no data on success, such as `Delete`, the response is `google.
|
6398
|
+
# protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`,
|
6399
|
+
# the response should be the resource. For other methods, the response should
|
6400
|
+
# have the type `XxxResponse`, where `Xxx` is the original method name. For
|
6401
|
+
# example, if the original method name is `TakeSnapshot()`, the inferred
|
6402
|
+
# response type is `TakeSnapshotResponse`.
|
5843
6403
|
# Corresponds to the JSON property `response`
|
5844
6404
|
# @return [Hash<String,Object>]
|
5845
6405
|
attr_accessor :response
|
@@ -5858,12 +6418,12 @@ module Google
|
|
5858
6418
|
end
|
5859
6419
|
end
|
5860
6420
|
|
5861
|
-
# The `Status` type defines a logical error model that is suitable for
|
5862
|
-
#
|
5863
|
-
#
|
5864
|
-
#
|
5865
|
-
#
|
5866
|
-
#
|
6421
|
+
# The `Status` type defines a logical error model that is suitable for different
|
6422
|
+
# programming environments, including REST APIs and RPC APIs. It is used by [
|
6423
|
+
# gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
|
6424
|
+
# data: error code, error message, and error details. You can find out more
|
6425
|
+
# about this error model and how to work with it in the [API Design Guide](https:
|
6426
|
+
# //cloud.google.com/apis/design/errors).
|
5867
6427
|
class GoogleRpcStatus
|
5868
6428
|
include Google::Apis::Core::Hashable
|
5869
6429
|
|
@@ -5872,15 +6432,15 @@ module Google
|
|
5872
6432
|
# @return [Fixnum]
|
5873
6433
|
attr_accessor :code
|
5874
6434
|
|
5875
|
-
# A list of messages that carry the error details.
|
6435
|
+
# A list of messages that carry the error details. There is a common set of
|
5876
6436
|
# message types for APIs to use.
|
5877
6437
|
# Corresponds to the JSON property `details`
|
5878
6438
|
# @return [Array<Hash<String,Object>>]
|
5879
6439
|
attr_accessor :details
|
5880
6440
|
|
5881
|
-
# A developer-facing error message, which should be in English. Any
|
5882
|
-
#
|
5883
|
-
#
|
6441
|
+
# A developer-facing error message, which should be in English. Any user-facing
|
6442
|
+
# error message should be localized and sent in the google.rpc.Status.details
|
6443
|
+
# field, or localized by the client.
|
5884
6444
|
# Corresponds to the JSON property `message`
|
5885
6445
|
# @return [String]
|
5886
6446
|
attr_accessor :message
|