google-api-client 0.43.0 → 0.48.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.github/workflows/autoapprove.yml +49 -0
- data/.github/workflows/release-please.yml +77 -0
- data/.gitignore +2 -0
- data/.kokoro/trampoline.sh +0 -0
- data/CHANGELOG.md +1066 -184
- data/Gemfile +1 -0
- data/Rakefile +31 -3
- data/api_list_config.yaml +8 -0
- data/api_names.yaml +1 -0
- data/bin/generate-api +77 -15
- data/docs/oauth-server.md +4 -6
- data/generated/google/apis/acceleratedmobilepageurl_v1/classes.rb +7 -9
- data/generated/google/apis/acceleratedmobilepageurl_v1/service.rb +2 -2
- data/generated/google/apis/acceleratedmobilepageurl_v1.rb +1 -1
- data/generated/google/apis/accessapproval_v1/classes.rb +60 -86
- data/generated/google/apis/accessapproval_v1/service.rb +93 -132
- data/generated/google/apis/accessapproval_v1.rb +1 -1
- data/generated/google/apis/accesscontextmanager_v1/classes.rb +266 -236
- data/generated/google/apis/accesscontextmanager_v1/representations.rb +30 -0
- data/generated/google/apis/accesscontextmanager_v1/service.rb +308 -171
- data/generated/google/apis/accesscontextmanager_v1.rb +1 -1
- data/generated/google/apis/accesscontextmanager_v1beta/classes.rb +153 -184
- data/generated/google/apis/accesscontextmanager_v1beta/service.rb +82 -111
- data/generated/google/apis/accesscontextmanager_v1beta.rb +1 -1
- data/generated/google/apis/adexchangebuyer2_v2beta1/classes.rb +47 -36
- data/generated/google/apis/adexchangebuyer2_v2beta1/representations.rb +1 -0
- data/generated/google/apis/adexchangebuyer2_v2beta1.rb +1 -1
- data/generated/google/apis/adexchangebuyer_v1_4/classes.rb +72 -2
- data/generated/google/apis/adexchangebuyer_v1_4/representations.rb +33 -0
- data/generated/google/apis/adexchangebuyer_v1_4.rb +1 -1
- data/generated/google/apis/adexperiencereport_v1.rb +1 -1
- data/generated/google/apis/admin_datatransfer_v1/classes.rb +16 -4
- data/generated/google/apis/admin_datatransfer_v1/service.rb +30 -48
- data/generated/google/apis/admin_datatransfer_v1.rb +6 -4
- data/generated/google/apis/admin_directory_v1/classes.rb +344 -242
- data/generated/google/apis/admin_directory_v1/representations.rb +62 -39
- data/generated/google/apis/admin_directory_v1/service.rb +607 -998
- data/generated/google/apis/admin_directory_v1.rb +6 -8
- data/generated/google/apis/admin_reports_v1/classes.rb +31 -33
- data/generated/google/apis/admin_reports_v1/service.rb +131 -187
- data/generated/google/apis/admin_reports_v1.rb +6 -5
- data/generated/google/apis/admob_v1/classes.rb +31 -31
- data/generated/google/apis/admob_v1/service.rb +2 -1
- data/generated/google/apis/admob_v1.rb +6 -2
- data/generated/google/apis/adsense_v1_4/service.rb +4 -1
- data/generated/google/apis/adsense_v1_4.rb +1 -1
- data/generated/google/apis/alertcenter_v1beta1.rb +1 -1
- data/generated/google/apis/analyticsadmin_v1alpha/classes.rb +1361 -0
- data/generated/google/apis/analyticsadmin_v1alpha/representations.rb +610 -0
- data/generated/google/apis/analyticsadmin_v1alpha/service.rb +2134 -0
- data/generated/google/apis/analyticsadmin_v1alpha.rb +43 -0
- data/generated/google/apis/analyticsdata_v1alpha/classes.rb +1655 -0
- data/generated/google/apis/analyticsdata_v1alpha/representations.rb +806 -0
- data/generated/google/apis/analyticsdata_v1alpha/service.rb +261 -0
- data/generated/google/apis/analyticsdata_v1alpha.rb +37 -0
- data/generated/google/apis/analyticsreporting_v4.rb +1 -1
- data/generated/google/apis/androidenterprise_v1/service.rb +2 -2
- data/generated/google/apis/androidenterprise_v1.rb +1 -1
- data/generated/google/apis/androidmanagement_v1/classes.rb +115 -75
- data/generated/google/apis/androidmanagement_v1/representations.rb +17 -0
- data/generated/google/apis/androidmanagement_v1.rb +1 -1
- data/generated/google/apis/androidpublisher_v3/classes.rb +9 -1
- data/generated/google/apis/androidpublisher_v3/representations.rb +1 -0
- data/generated/google/apis/androidpublisher_v3/service.rb +2 -2
- data/generated/google/apis/androidpublisher_v3.rb +1 -1
- data/generated/google/apis/apigateway_v1beta/classes.rb +1162 -0
- data/generated/google/apis/{memcache_v1 → apigateway_v1beta}/representations.rb +156 -157
- data/generated/google/apis/apigateway_v1beta/service.rb +1172 -0
- data/generated/google/apis/apigateway_v1beta.rb +34 -0
- data/generated/google/apis/apigee_v1/classes.rb +630 -88
- data/generated/google/apis/apigee_v1/representations.rb +209 -1
- data/generated/google/apis/apigee_v1/service.rb +401 -74
- data/generated/google/apis/apigee_v1.rb +6 -7
- data/generated/google/apis/appengine_v1/classes.rb +96 -59
- data/generated/google/apis/appengine_v1/representations.rb +17 -0
- data/generated/google/apis/appengine_v1/service.rb +38 -47
- data/generated/google/apis/appengine_v1.rb +1 -1
- data/generated/google/apis/appengine_v1alpha/classes.rb +9 -11
- data/generated/google/apis/appengine_v1alpha.rb +1 -1
- data/generated/google/apis/appengine_v1beta/classes.rb +103 -59
- data/generated/google/apis/appengine_v1beta/representations.rb +18 -0
- data/generated/google/apis/appengine_v1beta/service.rb +37 -47
- data/generated/google/apis/appengine_v1beta.rb +1 -1
- data/generated/google/apis/appsmarket_v2/classes.rb +14 -16
- data/generated/google/apis/appsmarket_v2.rb +1 -1
- data/generated/google/apis/area120tables_v1alpha1/classes.rb +423 -0
- data/generated/google/apis/area120tables_v1alpha1/representations.rb +248 -0
- data/generated/google/apis/area120tables_v1alpha1/service.rb +381 -0
- data/generated/google/apis/area120tables_v1alpha1.rb +46 -0
- data/generated/google/apis/artifactregistry_v1beta1/classes.rb +249 -337
- data/generated/google/apis/artifactregistry_v1beta1/representations.rb +2 -0
- data/generated/google/apis/artifactregistry_v1beta1/service.rb +44 -57
- data/generated/google/apis/artifactregistry_v1beta1.rb +1 -1
- data/generated/google/apis/assuredworkloads_v1beta1/classes.rb +722 -0
- data/generated/google/apis/assuredworkloads_v1beta1/representations.rb +359 -0
- data/generated/google/apis/assuredworkloads_v1beta1/service.rb +328 -0
- data/generated/google/apis/{accessapproval_v1beta1.rb → assuredworkloads_v1beta1.rb} +8 -8
- data/generated/google/apis/bigquery_v2/classes.rb +593 -576
- data/generated/google/apis/bigquery_v2/representations.rb +85 -0
- data/generated/google/apis/bigquery_v2/service.rb +79 -41
- data/generated/google/apis/bigquery_v2.rb +1 -1
- data/generated/google/apis/bigqueryconnection_v1beta1/classes.rb +192 -337
- data/generated/google/apis/bigqueryconnection_v1beta1/service.rb +29 -32
- data/generated/google/apis/bigqueryconnection_v1beta1.rb +1 -1
- data/generated/google/apis/bigquerydatatransfer_v1/classes.rb +132 -158
- data/generated/google/apis/bigquerydatatransfer_v1/service.rb +232 -282
- data/generated/google/apis/bigquerydatatransfer_v1.rb +1 -1
- data/generated/google/apis/bigqueryreservation_v1/classes.rb +116 -123
- data/generated/google/apis/bigqueryreservation_v1/representations.rb +2 -0
- data/generated/google/apis/bigqueryreservation_v1/service.rb +137 -183
- data/generated/google/apis/bigqueryreservation_v1.rb +1 -1
- data/generated/google/apis/bigqueryreservation_v1alpha2/classes.rb +88 -100
- data/generated/google/apis/bigqueryreservation_v1alpha2/service.rb +77 -100
- data/generated/google/apis/bigqueryreservation_v1alpha2.rb +1 -1
- data/generated/google/apis/bigqueryreservation_v1beta1/classes.rb +93 -98
- data/generated/google/apis/bigqueryreservation_v1beta1/representations.rb +2 -0
- data/generated/google/apis/bigqueryreservation_v1beta1/service.rb +114 -151
- data/generated/google/apis/bigqueryreservation_v1beta1.rb +1 -1
- data/generated/google/apis/bigtableadmin_v1/classes.rb +137 -119
- data/generated/google/apis/bigtableadmin_v1/representations.rb +29 -0
- data/generated/google/apis/bigtableadmin_v1.rb +1 -1
- data/generated/google/apis/bigtableadmin_v2/classes.rb +455 -607
- data/generated/google/apis/bigtableadmin_v2/representations.rb +29 -0
- data/generated/google/apis/bigtableadmin_v2/service.rb +194 -247
- data/generated/google/apis/bigtableadmin_v2.rb +1 -1
- data/generated/google/apis/billingbudgets_v1/classes.rb +373 -0
- data/generated/google/apis/billingbudgets_v1/representations.rb +171 -0
- data/generated/google/apis/billingbudgets_v1/service.rb +249 -0
- data/generated/google/apis/billingbudgets_v1.rb +38 -0
- data/generated/google/apis/billingbudgets_v1beta1/classes.rb +27 -6
- data/generated/google/apis/billingbudgets_v1beta1/representations.rb +2 -0
- data/generated/google/apis/billingbudgets_v1beta1.rb +4 -1
- data/generated/google/apis/binaryauthorization_v1/classes.rb +434 -355
- data/generated/google/apis/binaryauthorization_v1/representations.rb +75 -0
- data/generated/google/apis/binaryauthorization_v1/service.rb +109 -89
- data/generated/google/apis/binaryauthorization_v1.rb +1 -1
- data/generated/google/apis/binaryauthorization_v1beta1/classes.rb +434 -355
- data/generated/google/apis/binaryauthorization_v1beta1/representations.rb +75 -0
- data/generated/google/apis/binaryauthorization_v1beta1/service.rb +109 -89
- data/generated/google/apis/binaryauthorization_v1beta1.rb +1 -1
- data/generated/google/apis/books_v1/service.rb +54 -54
- data/generated/google/apis/books_v1.rb +1 -1
- data/generated/google/apis/calendar_v3/classes.rb +13 -10
- data/generated/google/apis/calendar_v3.rb +1 -1
- data/generated/google/apis/chat_v1/classes.rb +173 -116
- data/generated/google/apis/chat_v1/representations.rb +36 -0
- data/generated/google/apis/chat_v1/service.rb +30 -42
- data/generated/google/apis/chat_v1.rb +1 -1
- data/generated/google/apis/civicinfo_v2/classes.rb +18 -32
- data/generated/google/apis/civicinfo_v2/representations.rb +2 -3
- data/generated/google/apis/civicinfo_v2.rb +1 -1
- data/generated/google/apis/classroom_v1/classes.rb +153 -21
- data/generated/google/apis/classroom_v1/representations.rb +43 -0
- data/generated/google/apis/classroom_v1/service.rb +240 -0
- data/generated/google/apis/classroom_v1.rb +7 -1
- data/generated/google/apis/cloudasset_v1/classes.rb +1461 -1039
- data/generated/google/apis/cloudasset_v1/representations.rb +320 -0
- data/generated/google/apis/cloudasset_v1/service.rb +296 -167
- data/generated/google/apis/cloudasset_v1.rb +1 -1
- data/generated/google/apis/cloudasset_v1beta1/classes.rb +531 -777
- data/generated/google/apis/cloudasset_v1beta1/service.rb +59 -75
- data/generated/google/apis/cloudasset_v1beta1.rb +1 -1
- data/generated/google/apis/cloudasset_v1p1beta1/classes.rb +204 -349
- data/generated/google/apis/cloudasset_v1p1beta1/service.rb +35 -47
- data/generated/google/apis/cloudasset_v1p1beta1.rb +1 -1
- data/generated/google/apis/cloudasset_v1p4beta1/classes.rb +222 -276
- data/generated/google/apis/cloudasset_v1p4beta1/service.rb +75 -93
- data/generated/google/apis/cloudasset_v1p4beta1.rb +1 -1
- data/generated/google/apis/cloudasset_v1p5beta1/classes.rb +481 -720
- data/generated/google/apis/cloudasset_v1p5beta1/service.rb +25 -28
- data/generated/google/apis/cloudasset_v1p5beta1.rb +1 -1
- data/generated/google/apis/cloudbilling_v1/classes.rb +285 -446
- data/generated/google/apis/cloudbilling_v1/service.rb +104 -124
- data/generated/google/apis/cloudbilling_v1.rb +7 -1
- data/generated/google/apis/cloudbuild_v1/classes.rb +339 -344
- data/generated/google/apis/cloudbuild_v1/representations.rb +8 -0
- data/generated/google/apis/cloudbuild_v1/service.rb +277 -67
- data/generated/google/apis/cloudbuild_v1.rb +1 -1
- data/generated/google/apis/cloudbuild_v1alpha1/classes.rb +291 -329
- data/generated/google/apis/cloudbuild_v1alpha1/representations.rb +2 -0
- data/generated/google/apis/cloudbuild_v1alpha1/service.rb +15 -18
- data/generated/google/apis/cloudbuild_v1alpha1.rb +1 -1
- data/generated/google/apis/cloudbuild_v1alpha2/classes.rb +277 -313
- data/generated/google/apis/cloudbuild_v1alpha2/representations.rb +2 -0
- data/generated/google/apis/cloudbuild_v1alpha2/service.rb +22 -28
- data/generated/google/apis/cloudbuild_v1alpha2.rb +1 -1
- data/generated/google/apis/clouddebugger_v2/classes.rb +185 -252
- data/generated/google/apis/clouddebugger_v2/service.rb +53 -59
- data/generated/google/apis/clouddebugger_v2.rb +1 -1
- data/generated/google/apis/clouderrorreporting_v1beta1/classes.rb +127 -156
- data/generated/google/apis/clouderrorreporting_v1beta1/service.rb +53 -69
- data/generated/google/apis/clouderrorreporting_v1beta1.rb +1 -1
- data/generated/google/apis/cloudfunctions_v1/classes.rb +335 -494
- data/generated/google/apis/cloudfunctions_v1/representations.rb +1 -0
- data/generated/google/apis/cloudfunctions_v1/service.rb +79 -93
- data/generated/google/apis/cloudfunctions_v1.rb +1 -1
- data/generated/google/apis/cloudidentity_v1/classes.rb +989 -107
- data/generated/google/apis/cloudidentity_v1/representations.rb +371 -0
- data/generated/google/apis/cloudidentity_v1/service.rb +883 -88
- data/generated/google/apis/cloudidentity_v1.rb +4 -1
- data/generated/google/apis/cloudidentity_v1beta1/classes.rb +1236 -307
- data/generated/google/apis/cloudidentity_v1beta1/representations.rb +424 -21
- data/generated/google/apis/cloudidentity_v1beta1/service.rb +921 -96
- data/generated/google/apis/cloudidentity_v1beta1.rb +4 -1
- data/generated/google/apis/cloudiot_v1/classes.rb +270 -373
- data/generated/google/apis/cloudiot_v1/representations.rb +1 -0
- data/generated/google/apis/cloudiot_v1/service.rb +147 -154
- data/generated/google/apis/cloudiot_v1.rb +1 -1
- data/generated/google/apis/cloudkms_v1/classes.rb +509 -692
- data/generated/google/apis/cloudkms_v1/representations.rb +18 -0
- data/generated/google/apis/cloudkms_v1/service.rb +170 -216
- data/generated/google/apis/cloudkms_v1.rb +1 -1
- data/generated/google/apis/cloudprofiler_v2/classes.rb +28 -33
- data/generated/google/apis/cloudprofiler_v2/service.rb +17 -19
- data/generated/google/apis/cloudprofiler_v2.rb +1 -1
- data/generated/google/apis/cloudresourcemanager_v1/classes.rb +14 -7
- data/generated/google/apis/cloudresourcemanager_v1/representations.rb +1 -0
- data/generated/google/apis/cloudresourcemanager_v1/service.rb +62 -60
- data/generated/google/apis/cloudresourcemanager_v1.rb +1 -1
- data/generated/google/apis/cloudresourcemanager_v1beta1/classes.rb +11 -4
- data/generated/google/apis/cloudresourcemanager_v1beta1/representations.rb +1 -0
- data/generated/google/apis/cloudresourcemanager_v1beta1/service.rb +26 -25
- data/generated/google/apis/cloudresourcemanager_v1beta1.rb +1 -1
- data/generated/google/apis/cloudresourcemanager_v2/classes.rb +7 -0
- data/generated/google/apis/cloudresourcemanager_v2/representations.rb +1 -0
- data/generated/google/apis/cloudresourcemanager_v2/service.rb +7 -7
- data/generated/google/apis/cloudresourcemanager_v2.rb +1 -1
- data/generated/google/apis/cloudresourcemanager_v2beta1/classes.rb +7 -0
- data/generated/google/apis/cloudresourcemanager_v2beta1/representations.rb +1 -0
- data/generated/google/apis/cloudresourcemanager_v2beta1/service.rb +7 -7
- data/generated/google/apis/cloudresourcemanager_v2beta1.rb +1 -1
- data/generated/google/apis/cloudscheduler_v1/classes.rb +272 -383
- data/generated/google/apis/cloudscheduler_v1/service.rb +45 -62
- data/generated/google/apis/cloudscheduler_v1.rb +1 -1
- data/generated/google/apis/cloudscheduler_v1beta1/classes.rb +273 -384
- data/generated/google/apis/cloudscheduler_v1beta1/service.rb +45 -62
- data/generated/google/apis/cloudscheduler_v1beta1.rb +1 -1
- data/generated/google/apis/cloudsearch_v1/classes.rb +651 -781
- data/generated/google/apis/cloudsearch_v1/representations.rb +15 -0
- data/generated/google/apis/cloudsearch_v1/service.rb +286 -326
- data/generated/google/apis/cloudsearch_v1.rb +2 -2
- data/generated/google/apis/cloudshell_v1/classes.rb +256 -105
- data/generated/google/apis/cloudshell_v1/representations.rb +143 -10
- data/generated/google/apis/cloudshell_v1/service.rb +198 -25
- data/generated/google/apis/cloudshell_v1.rb +1 -1
- data/generated/google/apis/cloudshell_v1alpha1/classes.rb +69 -78
- data/generated/google/apis/cloudshell_v1alpha1/service.rb +20 -24
- data/generated/google/apis/cloudshell_v1alpha1.rb +1 -1
- data/generated/google/apis/cloudtasks_v2/classes.rb +612 -933
- data/generated/google/apis/cloudtasks_v2/representations.rb +1 -0
- data/generated/google/apis/cloudtasks_v2/service.rb +146 -217
- data/generated/google/apis/cloudtasks_v2.rb +1 -1
- data/generated/google/apis/cloudtasks_v2beta2/classes.rb +608 -964
- data/generated/google/apis/cloudtasks_v2beta2/representations.rb +1 -0
- data/generated/google/apis/cloudtasks_v2beta2/service.rb +178 -270
- data/generated/google/apis/cloudtasks_v2beta2.rb +1 -1
- data/generated/google/apis/cloudtasks_v2beta3/classes.rb +616 -938
- data/generated/google/apis/cloudtasks_v2beta3/representations.rb +1 -0
- data/generated/google/apis/cloudtasks_v2beta3/service.rb +146 -217
- data/generated/google/apis/cloudtasks_v2beta3.rb +1 -1
- data/generated/google/apis/cloudtrace_v1/classes.rb +39 -61
- data/generated/google/apis/cloudtrace_v1/service.rb +37 -51
- data/generated/google/apis/cloudtrace_v1.rb +1 -1
- data/generated/google/apis/cloudtrace_v2/classes.rb +92 -107
- data/generated/google/apis/cloudtrace_v2/service.rb +8 -11
- data/generated/google/apis/cloudtrace_v2.rb +1 -1
- data/generated/google/apis/cloudtrace_v2beta1/classes.rb +23 -33
- data/generated/google/apis/cloudtrace_v2beta1/service.rb +30 -37
- data/generated/google/apis/cloudtrace_v2beta1.rb +1 -1
- data/generated/google/apis/composer_v1/classes.rb +189 -242
- data/generated/google/apis/composer_v1/service.rb +79 -150
- data/generated/google/apis/composer_v1.rb +1 -1
- data/generated/google/apis/composer_v1beta1/classes.rb +203 -262
- data/generated/google/apis/composer_v1beta1/service.rb +94 -179
- data/generated/google/apis/composer_v1beta1.rb +1 -1
- data/generated/google/apis/compute_alpha/classes.rb +1227 -186
- data/generated/google/apis/compute_alpha/representations.rb +235 -8
- data/generated/google/apis/compute_alpha/service.rb +2009 -1024
- data/generated/google/apis/compute_alpha.rb +1 -1
- data/generated/google/apis/compute_beta/classes.rb +1080 -108
- data/generated/google/apis/compute_beta/representations.rb +212 -2
- data/generated/google/apis/compute_beta/service.rb +1413 -741
- data/generated/google/apis/compute_beta.rb +1 -1
- data/generated/google/apis/compute_v1/classes.rb +1512 -106
- data/generated/google/apis/compute_v1/representations.rb +470 -1
- data/generated/google/apis/compute_v1/service.rb +1625 -285
- data/generated/google/apis/compute_v1.rb +1 -1
- data/generated/google/apis/container_v1/classes.rb +982 -965
- data/generated/google/apis/container_v1/representations.rb +60 -0
- data/generated/google/apis/container_v1/service.rb +435 -502
- data/generated/google/apis/container_v1.rb +1 -1
- data/generated/google/apis/container_v1beta1/classes.rb +1106 -1044
- data/generated/google/apis/container_v1beta1/representations.rb +91 -0
- data/generated/google/apis/container_v1beta1/service.rb +403 -466
- data/generated/google/apis/container_v1beta1.rb +1 -1
- data/generated/google/apis/containeranalysis_v1alpha1/classes.rb +463 -596
- data/generated/google/apis/containeranalysis_v1alpha1/representations.rb +1 -0
- data/generated/google/apis/containeranalysis_v1alpha1/service.rb +149 -169
- data/generated/google/apis/containeranalysis_v1alpha1.rb +1 -1
- data/generated/google/apis/containeranalysis_v1beta1/classes.rb +461 -613
- data/generated/google/apis/containeranalysis_v1beta1/representations.rb +1 -0
- data/generated/google/apis/containeranalysis_v1beta1/service.rb +75 -90
- data/generated/google/apis/containeranalysis_v1beta1.rb +1 -1
- data/generated/google/apis/content_v2/classes.rb +515 -1219
- data/generated/google/apis/content_v2/service.rb +377 -650
- data/generated/google/apis/content_v2.rb +3 -4
- data/generated/google/apis/content_v2_1/classes.rb +1108 -1058
- data/generated/google/apis/content_v2_1/representations.rb +288 -0
- data/generated/google/apis/content_v2_1/service.rb +987 -795
- data/generated/google/apis/content_v2_1.rb +3 -4
- data/generated/google/apis/customsearch_v1/service.rb +2 -2
- data/generated/google/apis/customsearch_v1.rb +1 -1
- data/generated/google/apis/datacatalog_v1beta1/classes.rb +413 -573
- data/generated/google/apis/datacatalog_v1beta1/representations.rb +6 -0
- data/generated/google/apis/datacatalog_v1beta1/service.rb +319 -440
- data/generated/google/apis/datacatalog_v1beta1.rb +1 -1
- data/generated/google/apis/dataflow_v1b3/classes.rb +1174 -973
- data/generated/google/apis/dataflow_v1b3/representations.rb +148 -0
- data/generated/google/apis/dataflow_v1b3/service.rb +308 -257
- data/generated/google/apis/dataflow_v1b3.rb +1 -1
- data/generated/google/apis/datafusion_v1/classes.rb +283 -397
- data/generated/google/apis/datafusion_v1/representations.rb +5 -0
- data/generated/google/apis/datafusion_v1/service.rb +76 -89
- data/generated/google/apis/datafusion_v1.rb +5 -8
- data/generated/google/apis/datafusion_v1beta1/classes.rb +283 -397
- data/generated/google/apis/datafusion_v1beta1/representations.rb +5 -0
- data/generated/google/apis/datafusion_v1beta1/service.rb +81 -95
- data/generated/google/apis/datafusion_v1beta1.rb +5 -8
- data/generated/google/apis/datalabeling_v1beta1/classes.rb +6207 -0
- data/generated/google/apis/datalabeling_v1beta1/representations.rb +3156 -0
- data/generated/google/apis/datalabeling_v1beta1/service.rb +1762 -0
- data/generated/google/apis/datalabeling_v1beta1.rb +34 -0
- data/generated/google/apis/dataproc_v1/classes.rb +97 -13
- data/generated/google/apis/dataproc_v1/representations.rb +34 -0
- data/generated/google/apis/dataproc_v1.rb +1 -1
- data/generated/google/apis/dataproc_v1beta2/classes.rb +117 -9
- data/generated/google/apis/dataproc_v1beta2/representations.rb +49 -0
- data/generated/google/apis/dataproc_v1beta2.rb +1 -1
- data/generated/google/apis/datastore_v1/classes.rb +334 -476
- data/generated/google/apis/datastore_v1/service.rb +52 -63
- data/generated/google/apis/datastore_v1.rb +1 -1
- data/generated/google/apis/datastore_v1beta1/classes.rb +150 -217
- data/generated/google/apis/datastore_v1beta1/service.rb +11 -12
- data/generated/google/apis/datastore_v1beta1.rb +1 -1
- data/generated/google/apis/datastore_v1beta3/classes.rb +259 -375
- data/generated/google/apis/datastore_v1beta3/service.rb +1 -2
- data/generated/google/apis/datastore_v1beta3.rb +1 -1
- data/generated/google/apis/deploymentmanager_v2/classes.rb +203 -558
- data/generated/google/apis/deploymentmanager_v2/representations.rb +0 -132
- data/generated/google/apis/deploymentmanager_v2/service.rb +169 -213
- data/generated/google/apis/deploymentmanager_v2.rb +6 -4
- data/generated/google/apis/deploymentmanager_v2beta/classes.rb +247 -609
- data/generated/google/apis/deploymentmanager_v2beta/representations.rb +0 -132
- data/generated/google/apis/deploymentmanager_v2beta/service.rb +278 -359
- data/generated/google/apis/deploymentmanager_v2beta.rb +6 -5
- data/generated/google/apis/dfareporting_v3_3/classes.rb +162 -339
- data/generated/google/apis/dfareporting_v3_3/service.rb +646 -1262
- data/generated/google/apis/dfareporting_v3_3.rb +2 -2
- data/generated/google/apis/dfareporting_v3_4/classes.rb +184 -350
- data/generated/google/apis/dfareporting_v3_4/representations.rb +1 -0
- data/generated/google/apis/dfareporting_v3_4/service.rb +681 -1261
- data/generated/google/apis/dfareporting_v3_4.rb +2 -2
- data/generated/google/apis/dialogflow_v2/classes.rb +746 -217
- data/generated/google/apis/dialogflow_v2/representations.rb +318 -67
- data/generated/google/apis/dialogflow_v2.rb +1 -1
- data/generated/google/apis/dialogflow_v2beta1/classes.rb +764 -233
- data/generated/google/apis/dialogflow_v2beta1/representations.rb +318 -67
- data/generated/google/apis/dialogflow_v2beta1/service.rb +556 -331
- data/generated/google/apis/dialogflow_v2beta1.rb +1 -1
- data/generated/google/apis/dialogflow_v3beta1/classes.rb +8816 -0
- data/generated/google/apis/dialogflow_v3beta1/representations.rb +3725 -0
- data/generated/google/apis/dialogflow_v3beta1/service.rb +2825 -0
- data/generated/google/apis/dialogflow_v3beta1.rb +38 -0
- data/generated/google/apis/digitalassetlinks_v1/classes.rb +66 -92
- data/generated/google/apis/digitalassetlinks_v1/service.rb +131 -188
- data/generated/google/apis/digitalassetlinks_v1.rb +1 -1
- data/generated/google/apis/displayvideo_v1/classes.rb +271 -38
- data/generated/google/apis/displayvideo_v1/representations.rb +83 -0
- data/generated/google/apis/displayvideo_v1/service.rb +287 -32
- data/generated/google/apis/displayvideo_v1.rb +1 -1
- data/generated/google/apis/displayvideo_v1beta/classes.rb +146 -0
- data/generated/google/apis/displayvideo_v1beta/representations.rb +72 -0
- data/generated/google/apis/displayvideo_v1beta/service.rb +161 -0
- data/generated/google/apis/displayvideo_v1beta.rb +38 -0
- data/generated/google/apis/displayvideo_v1beta2/classes.rb +146 -0
- data/generated/google/apis/displayvideo_v1beta2/representations.rb +72 -0
- data/generated/google/apis/displayvideo_v1beta2/service.rb +130 -0
- data/generated/google/apis/displayvideo_v1beta2.rb +38 -0
- data/generated/google/apis/displayvideo_v1dev/classes.rb +146 -0
- data/generated/google/apis/displayvideo_v1dev/representations.rb +72 -0
- data/generated/google/apis/displayvideo_v1dev/service.rb +130 -0
- data/generated/google/apis/displayvideo_v1dev.rb +38 -0
- data/generated/google/apis/dlp_v2/classes.rb +1111 -1310
- data/generated/google/apis/dlp_v2/representations.rb +16 -0
- data/generated/google/apis/dlp_v2/service.rb +962 -905
- data/generated/google/apis/dlp_v2.rb +1 -1
- data/generated/google/apis/dns_v1/classes.rb +356 -198
- data/generated/google/apis/dns_v1/representations.rb +83 -0
- data/generated/google/apis/dns_v1/service.rb +83 -98
- data/generated/google/apis/dns_v1.rb +2 -2
- data/generated/google/apis/dns_v1beta2/classes.rb +362 -206
- data/generated/google/apis/dns_v1beta2/representations.rb +83 -0
- data/generated/google/apis/dns_v1beta2/service.rb +83 -98
- data/generated/google/apis/dns_v1beta2.rb +2 -2
- data/generated/google/apis/docs_v1/classes.rb +894 -1229
- data/generated/google/apis/docs_v1/service.rb +17 -22
- data/generated/google/apis/docs_v1.rb +1 -1
- data/generated/google/apis/documentai_v1beta2/classes.rb +2253 -824
- data/generated/google/apis/documentai_v1beta2/representations.rb +701 -0
- data/generated/google/apis/documentai_v1beta2/service.rb +22 -24
- data/generated/google/apis/documentai_v1beta2.rb +1 -1
- data/generated/google/apis/documentai_v1beta3/classes.rb +6149 -0
- data/generated/google/apis/documentai_v1beta3/representations.rb +2666 -0
- data/generated/google/apis/documentai_v1beta3/service.rb +263 -0
- data/generated/google/apis/{securitycenter_v1p1alpha1.rb → documentai_v1beta3.rb} +11 -10
- data/generated/google/apis/domains_v1alpha2/classes.rb +1540 -0
- data/generated/google/apis/domains_v1alpha2/representations.rb +606 -0
- data/generated/google/apis/domains_v1alpha2/service.rb +805 -0
- data/generated/google/apis/domains_v1alpha2.rb +34 -0
- data/generated/google/apis/domains_v1beta1/classes.rb +1540 -0
- data/generated/google/apis/domains_v1beta1/representations.rb +606 -0
- data/generated/google/apis/domains_v1beta1/service.rb +805 -0
- data/generated/google/apis/domains_v1beta1.rb +34 -0
- data/generated/google/apis/doubleclickbidmanager_v1/classes.rb +6 -12
- data/generated/google/apis/doubleclickbidmanager_v1/service.rb +33 -64
- data/generated/google/apis/doubleclickbidmanager_v1.rb +3 -2
- data/generated/google/apis/doubleclickbidmanager_v1_1/classes.rb +13 -20
- data/generated/google/apis/doubleclickbidmanager_v1_1/service.rb +33 -64
- data/generated/google/apis/doubleclickbidmanager_v1_1.rb +3 -2
- data/generated/google/apis/doubleclicksearch_v2/service.rb +2 -2
- data/generated/google/apis/doubleclicksearch_v2.rb +1 -1
- data/generated/google/apis/drive_v2/classes.rb +18 -7
- data/generated/google/apis/drive_v2/representations.rb +1 -0
- data/generated/google/apis/drive_v2/service.rb +79 -15
- data/generated/google/apis/drive_v2.rb +1 -1
- data/generated/google/apis/drive_v3/classes.rb +18 -8
- data/generated/google/apis/drive_v3/representations.rb +1 -0
- data/generated/google/apis/drive_v3/service.rb +59 -11
- data/generated/google/apis/drive_v3.rb +1 -1
- data/generated/google/apis/eventarc_v1beta1/classes.rb +931 -0
- data/generated/google/apis/eventarc_v1beta1/representations.rb +379 -0
- data/generated/google/apis/{memcache_v1 → eventarc_v1beta1}/service.rb +236 -215
- data/generated/google/apis/eventarc_v1beta1.rb +34 -0
- data/generated/google/apis/file_v1/classes.rb +155 -174
- data/generated/google/apis/file_v1/service.rb +43 -52
- data/generated/google/apis/file_v1.rb +1 -1
- data/generated/google/apis/file_v1beta1/classes.rb +335 -194
- data/generated/google/apis/file_v1beta1/representations.rb +55 -0
- data/generated/google/apis/file_v1beta1/service.rb +267 -55
- data/generated/google/apis/file_v1beta1.rb +1 -1
- data/generated/google/apis/firebase_v1beta1/classes.rb +33 -51
- data/generated/google/apis/firebase_v1beta1/representations.rb +2 -16
- data/generated/google/apis/firebase_v1beta1/service.rb +21 -1
- data/generated/google/apis/firebase_v1beta1.rb +1 -1
- data/generated/google/apis/firebasedynamiclinks_v1.rb +1 -1
- data/generated/google/apis/firebasehosting_v1beta1/classes.rb +188 -0
- data/generated/google/apis/firebasehosting_v1beta1/representations.rb +83 -0
- data/generated/google/apis/firebasehosting_v1beta1/service.rb +418 -4
- data/generated/google/apis/firebasehosting_v1beta1.rb +1 -1
- data/generated/google/apis/firebaseml_v1beta2/classes.rb +8 -8
- data/generated/google/apis/firebaseml_v1beta2/representations.rb +1 -1
- data/generated/google/apis/firebaseml_v1beta2.rb +1 -1
- data/generated/google/apis/firebaserules_v1/classes.rb +102 -137
- data/generated/google/apis/firebaserules_v1/service.rb +87 -110
- data/generated/google/apis/firebaserules_v1.rb +1 -1
- data/generated/google/apis/firestore_v1/classes.rb +406 -502
- data/generated/google/apis/firestore_v1/service.rb +165 -201
- data/generated/google/apis/firestore_v1.rb +1 -1
- data/generated/google/apis/firestore_v1beta1/classes.rb +338 -413
- data/generated/google/apis/firestore_v1beta1/service.rb +106 -122
- data/generated/google/apis/firestore_v1beta1.rb +1 -1
- data/generated/google/apis/firestore_v1beta2/classes.rb +135 -165
- data/generated/google/apis/firestore_v1beta2/service.rb +65 -86
- data/generated/google/apis/firestore_v1beta2.rb +1 -1
- data/generated/google/apis/fitness_v1/classes.rb +982 -0
- data/generated/google/apis/fitness_v1/representations.rb +398 -0
- data/generated/google/apis/fitness_v1/service.rb +628 -0
- data/generated/google/apis/fitness_v1.rb +97 -0
- data/generated/google/apis/games_configuration_v1configuration/classes.rb +2 -3
- data/generated/google/apis/games_configuration_v1configuration/service.rb +6 -6
- data/generated/google/apis/games_configuration_v1configuration.rb +1 -1
- data/generated/google/apis/games_management_v1management/classes.rb +14 -20
- data/generated/google/apis/games_management_v1management/service.rb +35 -36
- data/generated/google/apis/games_management_v1management.rb +2 -3
- data/generated/google/apis/games_v1/classes.rb +376 -83
- data/generated/google/apis/games_v1/representations.rb +118 -0
- data/generated/google/apis/games_v1/service.rb +118 -90
- data/generated/google/apis/games_v1.rb +2 -3
- data/generated/google/apis/gameservices_v1/classes.rb +22 -14
- data/generated/google/apis/gameservices_v1/representations.rb +1 -0
- data/generated/google/apis/gameservices_v1/service.rb +54 -51
- data/generated/google/apis/gameservices_v1.rb +1 -1
- data/generated/google/apis/gameservices_v1beta/classes.rb +22 -14
- data/generated/google/apis/gameservices_v1beta/representations.rb +1 -0
- data/generated/google/apis/gameservices_v1beta/service.rb +54 -51
- data/generated/google/apis/gameservices_v1beta.rb +1 -1
- data/generated/google/apis/genomics_v1/classes.rb +70 -76
- data/generated/google/apis/genomics_v1/service.rb +28 -43
- data/generated/google/apis/genomics_v1.rb +1 -1
- data/generated/google/apis/genomics_v1alpha2/classes.rb +223 -290
- data/generated/google/apis/genomics_v1alpha2/service.rb +54 -76
- data/generated/google/apis/genomics_v1alpha2.rb +1 -1
- data/generated/google/apis/genomics_v2alpha1/classes.rb +356 -275
- data/generated/google/apis/genomics_v2alpha1/representations.rb +48 -0
- data/generated/google/apis/genomics_v2alpha1/service.rb +47 -66
- data/generated/google/apis/genomics_v2alpha1.rb +1 -1
- data/generated/google/apis/gmail_v1/classes.rb +37 -43
- data/generated/google/apis/gmail_v1/service.rb +5 -4
- data/generated/google/apis/gmail_v1.rb +1 -1
- data/generated/google/apis/gmailpostmastertools_v1beta1/service.rb +11 -11
- data/generated/google/apis/gmailpostmastertools_v1beta1.rb +2 -2
- data/generated/google/apis/groupsmigration_v1/classes.rb +51 -0
- data/generated/google/apis/groupsmigration_v1/representations.rb +40 -0
- data/generated/google/apis/groupsmigration_v1/service.rb +100 -0
- data/generated/google/apis/groupsmigration_v1.rb +35 -0
- data/generated/google/apis/healthcare_v1/classes.rb +637 -826
- data/generated/google/apis/healthcare_v1/representations.rb +32 -0
- data/generated/google/apis/healthcare_v1/service.rb +842 -855
- data/generated/google/apis/healthcare_v1.rb +1 -1
- data/generated/google/apis/healthcare_v1beta1/classes.rb +1937 -1299
- data/generated/google/apis/healthcare_v1beta1/representations.rb +534 -65
- data/generated/google/apis/healthcare_v1beta1/service.rb +2534 -1293
- data/generated/google/apis/healthcare_v1beta1.rb +1 -1
- data/generated/google/apis/homegraph_v1/classes.rb +76 -164
- data/generated/google/apis/homegraph_v1/service.rb +23 -35
- data/generated/google/apis/homegraph_v1.rb +4 -1
- data/generated/google/apis/iam_v1/classes.rb +395 -592
- data/generated/google/apis/iam_v1/representations.rb +1 -0
- data/generated/google/apis/iam_v1/service.rb +427 -555
- data/generated/google/apis/iam_v1.rb +1 -1
- data/generated/google/apis/iamcredentials_v1/classes.rb +75 -85
- data/generated/google/apis/iamcredentials_v1/service.rb +14 -13
- data/generated/google/apis/iamcredentials_v1.rb +3 -2
- data/generated/google/apis/iap_v1/classes.rb +253 -355
- data/generated/google/apis/iap_v1/representations.rb +1 -0
- data/generated/google/apis/iap_v1/service.rb +61 -71
- data/generated/google/apis/iap_v1.rb +1 -1
- data/generated/google/apis/iap_v1beta1/classes.rb +164 -254
- data/generated/google/apis/iap_v1beta1/representations.rb +1 -0
- data/generated/google/apis/iap_v1beta1/service.rb +17 -19
- data/generated/google/apis/iap_v1beta1.rb +1 -1
- data/generated/google/apis/indexing_v3/classes.rb +11 -11
- data/generated/google/apis/indexing_v3.rb +1 -1
- data/generated/google/apis/jobs_v2/classes.rb +1584 -1086
- data/generated/google/apis/jobs_v2/representations.rb +272 -0
- data/generated/google/apis/jobs_v2/service.rb +85 -126
- data/generated/google/apis/jobs_v2.rb +1 -1
- data/generated/google/apis/jobs_v3/classes.rb +1559 -980
- data/generated/google/apis/jobs_v3/representations.rb +272 -0
- data/generated/google/apis/jobs_v3/service.rb +101 -139
- data/generated/google/apis/jobs_v3.rb +1 -1
- data/generated/google/apis/jobs_v3p1beta1/classes.rb +1521 -1023
- data/generated/google/apis/jobs_v3p1beta1/representations.rb +257 -0
- data/generated/google/apis/jobs_v3p1beta1/service.rb +103 -142
- data/generated/google/apis/jobs_v3p1beta1.rb +1 -1
- data/generated/google/apis/kgsearch_v1/classes.rb +4 -4
- data/generated/google/apis/kgsearch_v1/service.rb +11 -11
- data/generated/google/apis/kgsearch_v1.rb +1 -1
- data/generated/google/apis/licensing_v1/classes.rb +1 -1
- data/generated/google/apis/licensing_v1/service.rb +56 -86
- data/generated/google/apis/licensing_v1.rb +4 -3
- data/generated/google/apis/lifesciences_v2beta/classes.rb +366 -290
- data/generated/google/apis/lifesciences_v2beta/representations.rb +47 -0
- data/generated/google/apis/lifesciences_v2beta/service.rb +30 -42
- data/generated/google/apis/lifesciences_v2beta.rb +1 -1
- data/generated/google/apis/localservices_v1/classes.rb +426 -0
- data/generated/google/apis/localservices_v1/representations.rb +174 -0
- data/generated/google/apis/localservices_v1/service.rb +199 -0
- data/generated/google/apis/{appsactivity_v1.rb → localservices_v1.rb} +8 -11
- data/generated/google/apis/logging_v2/classes.rb +306 -232
- data/generated/google/apis/logging_v2/representations.rb +79 -0
- data/generated/google/apis/logging_v2/service.rb +3307 -1579
- data/generated/google/apis/logging_v2.rb +1 -1
- data/generated/google/apis/managedidentities_v1/classes.rb +8 -1
- data/generated/google/apis/managedidentities_v1/representations.rb +1 -0
- data/generated/google/apis/managedidentities_v1/service.rb +1 -4
- data/generated/google/apis/managedidentities_v1.rb +1 -1
- data/generated/google/apis/managedidentities_v1alpha1/classes.rb +87 -1
- data/generated/google/apis/managedidentities_v1alpha1/representations.rb +34 -0
- data/generated/google/apis/managedidentities_v1alpha1/service.rb +83 -5
- data/generated/google/apis/managedidentities_v1alpha1.rb +1 -1
- data/generated/google/apis/managedidentities_v1beta1/classes.rb +88 -1
- data/generated/google/apis/managedidentities_v1beta1/representations.rb +34 -0
- data/generated/google/apis/managedidentities_v1beta1/service.rb +83 -5
- data/generated/google/apis/managedidentities_v1beta1.rb +1 -1
- data/generated/google/apis/manufacturers_v1/classes.rb +99 -109
- data/generated/google/apis/manufacturers_v1/service.rb +44 -55
- data/generated/google/apis/manufacturers_v1.rb +1 -1
- data/generated/google/apis/memcache_v1beta2/classes.rb +171 -250
- data/generated/google/apis/memcache_v1beta2/representations.rb +0 -19
- data/generated/google/apis/memcache_v1beta2/service.rb +60 -73
- data/generated/google/apis/memcache_v1beta2.rb +1 -1
- data/generated/google/apis/ml_v1/classes.rb +1122 -1149
- data/generated/google/apis/ml_v1/representations.rb +82 -0
- data/generated/google/apis/ml_v1/service.rb +194 -253
- data/generated/google/apis/ml_v1.rb +1 -1
- data/generated/google/apis/monitoring_v1/classes.rb +107 -26
- data/generated/google/apis/monitoring_v1/representations.rb +35 -0
- data/generated/google/apis/monitoring_v1/service.rb +10 -11
- data/generated/google/apis/monitoring_v1.rb +1 -1
- data/generated/google/apis/monitoring_v3/classes.rb +303 -345
- data/generated/google/apis/monitoring_v3/representations.rb +18 -0
- data/generated/google/apis/monitoring_v3/service.rb +176 -146
- data/generated/google/apis/monitoring_v3.rb +1 -1
- data/generated/google/apis/networkmanagement_v1/classes.rb +273 -429
- data/generated/google/apis/networkmanagement_v1/service.rb +97 -120
- data/generated/google/apis/networkmanagement_v1.rb +1 -1
- data/generated/google/apis/networkmanagement_v1beta1/classes.rb +456 -429
- data/generated/google/apis/networkmanagement_v1beta1/representations.rb +63 -0
- data/generated/google/apis/networkmanagement_v1beta1/service.rb +97 -120
- data/generated/google/apis/networkmanagement_v1beta1.rb +1 -1
- data/generated/google/apis/osconfig_v1/classes.rb +154 -902
- data/generated/google/apis/osconfig_v1/representations.rb +0 -337
- data/generated/google/apis/osconfig_v1/service.rb +26 -31
- data/generated/google/apis/osconfig_v1.rb +3 -3
- data/generated/google/apis/osconfig_v1beta/classes.rb +327 -411
- data/generated/google/apis/osconfig_v1beta/service.rb +43 -56
- data/generated/google/apis/osconfig_v1beta.rb +3 -3
- data/generated/google/apis/oslogin_v1/classes.rb +14 -12
- data/generated/google/apis/oslogin_v1/representations.rb +1 -0
- data/generated/google/apis/oslogin_v1/service.rb +12 -16
- data/generated/google/apis/oslogin_v1.rb +1 -1
- data/generated/google/apis/oslogin_v1alpha/classes.rb +16 -14
- data/generated/google/apis/oslogin_v1alpha/representations.rb +1 -0
- data/generated/google/apis/oslogin_v1alpha/service.rb +17 -17
- data/generated/google/apis/oslogin_v1alpha.rb +1 -1
- data/generated/google/apis/oslogin_v1beta/classes.rb +14 -12
- data/generated/google/apis/oslogin_v1beta/representations.rb +1 -0
- data/generated/google/apis/oslogin_v1beta/service.rb +12 -16
- data/generated/google/apis/oslogin_v1beta.rb +1 -1
- data/generated/google/apis/pagespeedonline_v5/classes.rb +18 -24
- data/generated/google/apis/pagespeedonline_v5/service.rb +3 -4
- data/generated/google/apis/pagespeedonline_v5.rb +2 -2
- data/generated/google/apis/people_v1/classes.rb +173 -63
- data/generated/google/apis/people_v1/representations.rb +41 -0
- data/generated/google/apis/people_v1/service.rb +63 -61
- data/generated/google/apis/people_v1.rb +1 -1
- data/generated/google/apis/playablelocations_v3/classes.rb +114 -161
- data/generated/google/apis/playablelocations_v3/service.rb +10 -10
- data/generated/google/apis/playablelocations_v3.rb +1 -1
- data/generated/google/apis/playcustomapp_v1/service.rb +2 -2
- data/generated/google/apis/playcustomapp_v1.rb +1 -1
- data/generated/google/apis/poly_v1/classes.rb +65 -79
- data/generated/google/apis/poly_v1/service.rb +50 -63
- data/generated/google/apis/poly_v1.rb +3 -4
- data/generated/google/apis/privateca_v1beta1/classes.rb +2466 -0
- data/generated/google/apis/privateca_v1beta1/representations.rb +996 -0
- data/generated/google/apis/privateca_v1beta1/service.rb +1487 -0
- data/generated/google/apis/privateca_v1beta1.rb +34 -0
- data/generated/google/apis/prod_tt_sasportal_v1alpha1/classes.rb +26 -0
- data/generated/google/apis/prod_tt_sasportal_v1alpha1/representations.rb +14 -0
- data/generated/google/apis/prod_tt_sasportal_v1alpha1/service.rb +644 -56
- data/generated/google/apis/prod_tt_sasportal_v1alpha1.rb +1 -1
- data/generated/google/apis/pubsub_v1/classes.rb +399 -518
- data/generated/google/apis/pubsub_v1/representations.rb +2 -0
- data/generated/google/apis/pubsub_v1/service.rb +221 -247
- data/generated/google/apis/pubsub_v1.rb +1 -1
- data/generated/google/apis/pubsub_v1beta1a/classes.rb +71 -86
- data/generated/google/apis/pubsub_v1beta1a/service.rb +31 -38
- data/generated/google/apis/pubsub_v1beta1a.rb +1 -1
- data/generated/google/apis/pubsub_v1beta2/classes.rb +251 -354
- data/generated/google/apis/pubsub_v1beta2/representations.rb +1 -0
- data/generated/google/apis/pubsub_v1beta2/service.rb +96 -108
- data/generated/google/apis/pubsub_v1beta2.rb +1 -1
- data/generated/google/apis/pubsublite_v1/classes.rb +461 -0
- data/generated/google/apis/pubsublite_v1/representations.rb +261 -0
- data/generated/google/apis/pubsublite_v1/service.rb +558 -0
- data/generated/google/apis/{memcache_v1.rb → pubsublite_v1.rb} +8 -9
- data/generated/google/apis/realtimebidding_v1/classes.rb +84 -123
- data/generated/google/apis/realtimebidding_v1/representations.rb +18 -32
- data/generated/google/apis/realtimebidding_v1/service.rb +4 -1
- data/generated/google/apis/realtimebidding_v1.rb +1 -1
- data/generated/google/apis/recommendationengine_v1beta1/classes.rb +367 -456
- data/generated/google/apis/recommendationengine_v1beta1/representations.rb +25 -16
- data/generated/google/apis/recommendationengine_v1beta1/service.rb +140 -206
- data/generated/google/apis/recommendationengine_v1beta1.rb +1 -1
- data/generated/google/apis/recommender_v1/classes.rb +1 -1
- data/generated/google/apis/recommender_v1/service.rb +4 -2
- data/generated/google/apis/recommender_v1.rb +1 -1
- data/generated/google/apis/recommender_v1beta1/classes.rb +1 -1
- data/generated/google/apis/recommender_v1beta1/service.rb +4 -2
- data/generated/google/apis/recommender_v1beta1.rb +1 -1
- data/generated/google/apis/redis_v1/classes.rb +91 -513
- data/generated/google/apis/redis_v1/representations.rb +0 -139
- data/generated/google/apis/redis_v1/service.rb +92 -109
- data/generated/google/apis/redis_v1.rb +1 -1
- data/generated/google/apis/redis_v1beta1/classes.rb +123 -517
- data/generated/google/apis/redis_v1beta1/representations.rb +12 -137
- data/generated/google/apis/redis_v1beta1/service.rb +126 -109
- data/generated/google/apis/redis_v1beta1.rb +1 -1
- data/generated/google/apis/remotebuildexecution_v1/classes.rb +957 -1078
- data/generated/google/apis/remotebuildexecution_v1/representations.rb +62 -0
- data/generated/google/apis/remotebuildexecution_v1/service.rb +26 -33
- data/generated/google/apis/remotebuildexecution_v1.rb +1 -1
- data/generated/google/apis/remotebuildexecution_v1alpha/classes.rb +952 -1071
- data/generated/google/apis/remotebuildexecution_v1alpha/representations.rb +62 -0
- data/generated/google/apis/remotebuildexecution_v1alpha/service.rb +103 -65
- data/generated/google/apis/remotebuildexecution_v1alpha.rb +1 -1
- data/generated/google/apis/remotebuildexecution_v2/classes.rb +1105 -1250
- data/generated/google/apis/remotebuildexecution_v2/representations.rb +62 -0
- data/generated/google/apis/remotebuildexecution_v2/service.rb +147 -206
- data/generated/google/apis/remotebuildexecution_v2.rb +1 -1
- data/generated/google/apis/reseller_v1/classes.rb +151 -219
- data/generated/google/apis/reseller_v1/service.rb +122 -173
- data/generated/google/apis/reseller_v1.rb +2 -2
- data/generated/google/apis/run_v1/classes.rb +19 -138
- data/generated/google/apis/run_v1/representations.rb +1 -62
- data/generated/google/apis/run_v1/service.rb +0 -342
- data/generated/google/apis/run_v1.rb +1 -1
- data/generated/google/apis/run_v1alpha1/classes.rb +1 -1
- data/generated/google/apis/run_v1alpha1/representations.rb +1 -1
- data/generated/google/apis/run_v1alpha1.rb +1 -1
- data/generated/google/apis/run_v1beta1/classes.rb +3 -2
- data/generated/google/apis/run_v1beta1.rb +1 -1
- data/generated/google/apis/runtimeconfig_v1beta1/classes.rb +301 -412
- data/generated/google/apis/runtimeconfig_v1beta1/representations.rb +1 -0
- data/generated/google/apis/runtimeconfig_v1beta1/service.rb +135 -159
- data/generated/google/apis/runtimeconfig_v1beta1.rb +1 -1
- data/generated/google/apis/safebrowsing_v4/classes.rb +55 -64
- data/generated/google/apis/safebrowsing_v4/service.rb +4 -4
- data/generated/google/apis/safebrowsing_v4.rb +1 -1
- data/generated/google/apis/sasportal_v1alpha1/classes.rb +26 -0
- data/generated/google/apis/sasportal_v1alpha1/representations.rb +14 -0
- data/generated/google/apis/sasportal_v1alpha1/service.rb +644 -56
- data/generated/google/apis/sasportal_v1alpha1.rb +1 -1
- data/generated/google/apis/script_v1/classes.rb +88 -111
- data/generated/google/apis/script_v1/service.rb +63 -69
- data/generated/google/apis/script_v1.rb +1 -1
- data/generated/google/apis/searchconsole_v1/classes.rb +388 -0
- data/generated/google/apis/searchconsole_v1/representations.rb +162 -0
- data/generated/google/apis/searchconsole_v1/service.rb +287 -0
- data/generated/google/apis/searchconsole_v1.rb +7 -1
- data/generated/google/apis/secretmanager_v1/classes.rb +378 -365
- data/generated/google/apis/secretmanager_v1/representations.rb +92 -0
- data/generated/google/apis/secretmanager_v1/service.rb +66 -82
- data/generated/google/apis/secretmanager_v1.rb +1 -1
- data/generated/google/apis/secretmanager_v1beta1/classes.rb +217 -363
- data/generated/google/apis/secretmanager_v1beta1/representations.rb +1 -0
- data/generated/google/apis/secretmanager_v1beta1/service.rb +66 -82
- data/generated/google/apis/secretmanager_v1beta1.rb +1 -1
- data/generated/google/apis/securitycenter_v1/classes.rb +20 -204
- data/generated/google/apis/securitycenter_v1/representations.rb +1 -72
- data/generated/google/apis/securitycenter_v1.rb +1 -1
- data/generated/google/apis/securitycenter_v1beta1/classes.rb +22 -204
- data/generated/google/apis/securitycenter_v1beta1/representations.rb +1 -72
- data/generated/google/apis/securitycenter_v1beta1.rb +1 -1
- data/generated/google/apis/{securitycenter_v1p1alpha1 → securitycenter_v1beta2}/classes.rb +275 -291
- data/generated/google/apis/{securitycenter_v1p1alpha1 → securitycenter_v1beta2}/representations.rb +83 -84
- data/generated/google/apis/securitycenter_v1beta2/service.rb +1494 -0
- data/generated/google/apis/{securitycenter_v1p1beta1.rb → securitycenter_v1beta2.rb} +6 -6
- data/generated/google/apis/serviceconsumermanagement_v1/classes.rb +35 -123
- data/generated/google/apis/serviceconsumermanagement_v1/representations.rb +0 -18
- data/generated/google/apis/serviceconsumermanagement_v1/service.rb +32 -30
- data/generated/google/apis/serviceconsumermanagement_v1.rb +1 -1
- data/generated/google/apis/serviceconsumermanagement_v1beta1/classes.rb +24 -112
- data/generated/google/apis/serviceconsumermanagement_v1beta1/representations.rb +0 -18
- data/generated/google/apis/serviceconsumermanagement_v1beta1.rb +1 -1
- data/generated/google/apis/servicecontrol_v1/classes.rb +601 -642
- data/generated/google/apis/servicecontrol_v1/representations.rb +10 -0
- data/generated/google/apis/servicecontrol_v1/service.rb +36 -46
- data/generated/google/apis/servicecontrol_v1.rb +1 -1
- data/generated/google/apis/servicecontrol_v2/classes.rb +343 -325
- data/generated/google/apis/servicecontrol_v2/representations.rb +8 -0
- data/generated/google/apis/servicecontrol_v2/service.rb +33 -43
- data/generated/google/apis/servicecontrol_v2.rb +1 -1
- data/generated/google/apis/servicedirectory_v1beta1/classes.rb +221 -333
- data/generated/google/apis/servicedirectory_v1beta1/representations.rb +1 -0
- data/generated/google/apis/servicedirectory_v1beta1/service.rb +94 -129
- data/generated/google/apis/servicedirectory_v1beta1.rb +1 -1
- data/generated/google/apis/servicemanagement_v1/classes.rb +1244 -2174
- data/generated/google/apis/servicemanagement_v1/representations.rb +0 -31
- data/generated/google/apis/servicemanagement_v1/service.rb +141 -228
- data/generated/google/apis/servicemanagement_v1.rb +1 -1
- data/generated/google/apis/servicenetworking_v1/classes.rb +278 -121
- data/generated/google/apis/servicenetworking_v1/representations.rb +115 -15
- data/generated/google/apis/servicenetworking_v1/service.rb +118 -2
- data/generated/google/apis/servicenetworking_v1.rb +1 -1
- data/generated/google/apis/servicenetworking_v1beta/classes.rb +213 -112
- data/generated/google/apis/servicenetworking_v1beta/representations.rb +84 -14
- data/generated/google/apis/servicenetworking_v1beta.rb +1 -1
- data/generated/google/apis/serviceusage_v1/classes.rb +57 -111
- data/generated/google/apis/serviceusage_v1/representations.rb +4 -18
- data/generated/google/apis/serviceusage_v1/service.rb +5 -1
- data/generated/google/apis/serviceusage_v1.rb +1 -1
- data/generated/google/apis/serviceusage_v1beta1/classes.rb +122 -112
- data/generated/google/apis/serviceusage_v1beta1/representations.rb +23 -18
- data/generated/google/apis/serviceusage_v1beta1/service.rb +36 -0
- data/generated/google/apis/serviceusage_v1beta1.rb +1 -1
- data/generated/google/apis/sheets_v4/classes.rb +4029 -5014
- data/generated/google/apis/sheets_v4/representations.rb +625 -0
- data/generated/google/apis/sheets_v4/service.rb +113 -149
- data/generated/google/apis/sheets_v4.rb +1 -1
- data/generated/google/apis/site_verification_v1.rb +1 -1
- data/generated/google/apis/slides_v1/classes.rb +841 -1114
- data/generated/google/apis/slides_v1/service.rb +23 -30
- data/generated/google/apis/slides_v1.rb +1 -1
- data/generated/google/apis/smartdevicemanagement_v1/classes.rb +273 -0
- data/generated/google/apis/smartdevicemanagement_v1/representations.rb +157 -0
- data/generated/google/apis/smartdevicemanagement_v1/service.rb +304 -0
- data/generated/google/apis/smartdevicemanagement_v1.rb +35 -0
- data/generated/google/apis/sourcerepo_v1/classes.rb +250 -400
- data/generated/google/apis/sourcerepo_v1/service.rb +40 -49
- data/generated/google/apis/sourcerepo_v1.rb +1 -1
- data/generated/google/apis/spanner_v1/classes.rb +1553 -2157
- data/generated/google/apis/spanner_v1/representations.rb +1 -0
- data/generated/google/apis/spanner_v1/service.rb +443 -618
- data/generated/google/apis/spanner_v1.rb +1 -1
- data/generated/google/apis/speech_v1/classes.rb +174 -220
- data/generated/google/apis/speech_v1/service.rb +27 -32
- data/generated/google/apis/speech_v1.rb +1 -1
- data/generated/google/apis/speech_v1p1beta1/classes.rb +253 -306
- data/generated/google/apis/speech_v1p1beta1/service.rb +27 -32
- data/generated/google/apis/speech_v1p1beta1.rb +1 -1
- data/generated/google/apis/speech_v2beta1/classes.rb +66 -76
- data/generated/google/apis/speech_v2beta1/service.rb +10 -12
- data/generated/google/apis/speech_v2beta1.rb +1 -1
- data/generated/google/apis/{sql_v1beta4 → sqladmin_v1beta4}/classes.rb +537 -452
- data/generated/google/apis/{sql_v1beta4 → sqladmin_v1beta4}/representations.rb +142 -87
- data/generated/google/apis/{sql_v1beta4 → sqladmin_v1beta4}/service.rb +295 -300
- data/generated/google/apis/{sql_v1beta4.rb → sqladmin_v1beta4.rb} +5 -5
- data/generated/google/apis/storage_v1/classes.rb +10 -17
- data/generated/google/apis/storage_v1/representations.rb +2 -3
- data/generated/google/apis/storage_v1/service.rb +3 -2
- data/generated/google/apis/storage_v1.rb +1 -1
- data/generated/google/apis/storagetransfer_v1/classes.rb +301 -349
- data/generated/google/apis/storagetransfer_v1/representations.rb +13 -0
- data/generated/google/apis/storagetransfer_v1/service.rb +53 -72
- data/generated/google/apis/storagetransfer_v1.rb +1 -1
- data/generated/google/apis/streetviewpublish_v1/classes.rb +110 -152
- data/generated/google/apis/streetviewpublish_v1/service.rb +94 -177
- data/generated/google/apis/streetviewpublish_v1.rb +1 -1
- data/generated/google/apis/sts_v1/classes.rb +121 -0
- data/generated/google/apis/sts_v1/representations.rb +59 -0
- data/generated/google/apis/sts_v1/service.rb +90 -0
- data/generated/google/apis/sts_v1.rb +32 -0
- data/generated/google/apis/sts_v1beta/classes.rb +191 -0
- data/generated/google/apis/sts_v1beta/representations.rb +61 -0
- data/generated/google/apis/sts_v1beta/service.rb +92 -0
- data/generated/google/apis/sts_v1beta.rb +32 -0
- data/generated/google/apis/tagmanager_v1/service.rb +2 -2
- data/generated/google/apis/tagmanager_v1.rb +1 -1
- data/generated/google/apis/tagmanager_v2/classes.rb +12 -0
- data/generated/google/apis/tagmanager_v2/representations.rb +3 -0
- data/generated/google/apis/tagmanager_v2/service.rb +2 -2
- data/generated/google/apis/tagmanager_v2.rb +1 -1
- data/generated/google/apis/tasks_v1/classes.rb +21 -22
- data/generated/google/apis/tasks_v1/service.rb +19 -19
- data/generated/google/apis/tasks_v1.rb +1 -1
- data/generated/google/apis/testing_v1/classes.rb +384 -390
- data/generated/google/apis/testing_v1/representations.rb +23 -0
- data/generated/google/apis/testing_v1/service.rb +22 -28
- data/generated/google/apis/testing_v1.rb +1 -1
- data/generated/google/apis/texttospeech_v1/classes.rb +51 -57
- data/generated/google/apis/texttospeech_v1/service.rb +9 -10
- data/generated/google/apis/texttospeech_v1.rb +1 -1
- data/generated/google/apis/texttospeech_v1beta1/classes.rb +96 -57
- data/generated/google/apis/texttospeech_v1beta1/representations.rb +19 -0
- data/generated/google/apis/texttospeech_v1beta1/service.rb +9 -10
- data/generated/google/apis/texttospeech_v1beta1.rb +1 -1
- data/generated/google/apis/toolresults_v1beta3/classes.rb +20 -0
- data/generated/google/apis/toolresults_v1beta3/representations.rb +13 -0
- data/generated/google/apis/toolresults_v1beta3.rb +1 -1
- data/generated/google/apis/tpu_v1/classes.rb +57 -3
- data/generated/google/apis/tpu_v1/representations.rb +19 -0
- data/generated/google/apis/tpu_v1/service.rb +8 -8
- data/generated/google/apis/tpu_v1.rb +1 -1
- data/generated/google/apis/tpu_v1alpha1/classes.rb +57 -3
- data/generated/google/apis/tpu_v1alpha1/representations.rb +19 -0
- data/generated/google/apis/tpu_v1alpha1/service.rb +8 -8
- data/generated/google/apis/tpu_v1alpha1.rb +1 -1
- data/generated/google/apis/trafficdirector_v2/classes.rb +1347 -0
- data/generated/google/apis/trafficdirector_v2/representations.rb +620 -0
- data/generated/google/apis/trafficdirector_v2/service.rb +89 -0
- data/generated/google/apis/trafficdirector_v2.rb +34 -0
- data/generated/google/apis/translate_v3/classes.rb +151 -177
- data/generated/google/apis/translate_v3/service.rb +122 -151
- data/generated/google/apis/translate_v3.rb +1 -1
- data/generated/google/apis/translate_v3beta1/classes.rb +150 -170
- data/generated/google/apis/translate_v3beta1/service.rb +122 -151
- data/generated/google/apis/translate_v3beta1.rb +1 -1
- data/generated/google/apis/vault_v1/classes.rb +413 -103
- data/generated/google/apis/vault_v1/representations.rb +162 -0
- data/generated/google/apis/vault_v1/service.rb +182 -37
- data/generated/google/apis/vault_v1.rb +1 -1
- data/generated/google/apis/vectortile_v1/classes.rb +185 -267
- data/generated/google/apis/vectortile_v1/service.rb +75 -88
- data/generated/google/apis/vectortile_v1.rb +1 -1
- data/generated/google/apis/verifiedaccess_v1/classes.rb +20 -27
- data/generated/google/apis/verifiedaccess_v1.rb +1 -1
- data/generated/google/apis/videointelligence_v1/classes.rb +1493 -935
- data/generated/google/apis/videointelligence_v1/representations.rb +402 -2
- data/generated/google/apis/videointelligence_v1/service.rb +38 -77
- data/generated/google/apis/videointelligence_v1.rb +1 -1
- data/generated/google/apis/videointelligence_v1beta2/classes.rb +1488 -928
- data/generated/google/apis/videointelligence_v1beta2/representations.rb +402 -2
- data/generated/google/apis/videointelligence_v1beta2/service.rb +4 -4
- data/generated/google/apis/videointelligence_v1beta2.rb +1 -1
- data/generated/google/apis/videointelligence_v1p1beta1/classes.rb +1482 -922
- data/generated/google/apis/videointelligence_v1p1beta1/representations.rb +402 -2
- data/generated/google/apis/videointelligence_v1p1beta1/service.rb +4 -4
- data/generated/google/apis/videointelligence_v1p1beta1.rb +1 -1
- data/generated/google/apis/videointelligence_v1p2beta1/classes.rb +1485 -925
- data/generated/google/apis/videointelligence_v1p2beta1/representations.rb +402 -2
- data/generated/google/apis/videointelligence_v1p2beta1/service.rb +4 -4
- data/generated/google/apis/videointelligence_v1p2beta1.rb +1 -1
- data/generated/google/apis/videointelligence_v1p3beta1/classes.rb +1410 -937
- data/generated/google/apis/videointelligence_v1p3beta1/representations.rb +368 -2
- data/generated/google/apis/videointelligence_v1p3beta1/service.rb +4 -4
- data/generated/google/apis/videointelligence_v1p3beta1.rb +1 -1
- data/generated/google/apis/vision_v1/classes.rb +16 -16
- data/generated/google/apis/vision_v1.rb +1 -1
- data/generated/google/apis/vision_v1p1beta1/classes.rb +16 -16
- data/generated/google/apis/vision_v1p1beta1.rb +1 -1
- data/generated/google/apis/vision_v1p2beta1/classes.rb +16 -16
- data/generated/google/apis/vision_v1p2beta1.rb +1 -1
- data/generated/google/apis/webfonts_v1/classes.rb +1 -2
- data/generated/google/apis/webfonts_v1/service.rb +2 -4
- data/generated/google/apis/webfonts_v1.rb +2 -3
- data/generated/google/apis/websecurityscanner_v1.rb +1 -1
- data/generated/google/apis/websecurityscanner_v1alpha.rb +1 -1
- data/generated/google/apis/websecurityscanner_v1beta.rb +1 -1
- data/generated/google/apis/workflowexecutions_v1beta/classes.rb +155 -0
- data/generated/google/apis/workflowexecutions_v1beta/representations.rb +88 -0
- data/generated/google/apis/{securitycenter_v1p1alpha1 → workflowexecutions_v1beta}/service.rb +73 -69
- data/generated/google/apis/workflowexecutions_v1beta.rb +34 -0
- data/generated/google/apis/workflows_v1beta/classes.rb +406 -0
- data/generated/google/apis/workflows_v1beta/representations.rb +173 -0
- data/generated/google/apis/workflows_v1beta/service.rb +438 -0
- data/generated/google/apis/workflows_v1beta.rb +35 -0
- data/generated/google/apis/youtube_partner_v1.rb +1 -1
- data/generated/google/apis/youtube_v3/classes.rb +0 -586
- data/generated/google/apis/youtube_v3/representations.rb +0 -269
- data/generated/google/apis/youtube_v3/service.rb +3 -120
- data/generated/google/apis/youtube_v3.rb +1 -1
- data/google-api-client.gemspec +25 -24
- data/lib/google/apis/core/api_command.rb +1 -0
- data/lib/google/apis/core/http_command.rb +2 -1
- data/lib/google/apis/options.rb +8 -5
- data/lib/google/apis/version.rb +1 -1
- data/synth.py +40 -0
- metadata +134 -41
- data/generated/google/apis/accessapproval_v1beta1/classes.rb +0 -417
- data/generated/google/apis/accessapproval_v1beta1/representations.rb +0 -203
- data/generated/google/apis/accessapproval_v1beta1/service.rb +0 -857
- data/generated/google/apis/appsactivity_v1/classes.rb +0 -415
- data/generated/google/apis/appsactivity_v1/representations.rb +0 -209
- data/generated/google/apis/appsactivity_v1/service.rb +0 -126
- data/generated/google/apis/dns_v2beta1/classes.rb +0 -1447
- data/generated/google/apis/dns_v2beta1/representations.rb +0 -588
- data/generated/google/apis/dns_v2beta1/service.rb +0 -928
- data/generated/google/apis/dns_v2beta1.rb +0 -43
- data/generated/google/apis/memcache_v1/classes.rb +0 -1157
- data/generated/google/apis/plus_v1/classes.rb +0 -2094
- data/generated/google/apis/plus_v1/representations.rb +0 -907
- data/generated/google/apis/plus_v1/service.rb +0 -451
- data/generated/google/apis/plus_v1.rb +0 -43
- data/generated/google/apis/securitycenter_v1p1beta1/classes.rb +0 -2059
- data/generated/google/apis/securitycenter_v1p1beta1/representations.rb +0 -789
- data/generated/google/apis/securitycenter_v1p1beta1/service.rb +0 -1243
- data/generated/google/apis/storage_v1beta2/classes.rb +0 -1047
- data/generated/google/apis/storage_v1beta2/representations.rb +0 -425
- data/generated/google/apis/storage_v1beta2/service.rb +0 -1667
- data/generated/google/apis/storage_v1beta2.rb +0 -40
@@ -22,9 +22,9 @@ module Google
|
|
22
22
|
module Apis
|
23
23
|
module VideointelligenceV1p3beta1
|
24
24
|
|
25
|
-
# Video annotation progress. Included in the `metadata`
|
26
|
-
#
|
27
|
-
#
|
25
|
+
# Video annotation progress. Included in the `metadata` field of the `Operation`
|
26
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
27
|
+
# service.
|
28
28
|
class GoogleCloudVideointelligenceV1AnnotateVideoProgress
|
29
29
|
include Google::Apis::Core::Hashable
|
30
30
|
|
@@ -43,9 +43,9 @@ module Google
|
|
43
43
|
end
|
44
44
|
end
|
45
45
|
|
46
|
-
# Video annotation response. Included in the `response`
|
47
|
-
#
|
48
|
-
#
|
46
|
+
# Video annotation response. Included in the `response` field of the `Operation`
|
47
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
48
|
+
# service.
|
49
49
|
class GoogleCloudVideointelligenceV1AnnotateVideoResponse
|
50
50
|
include Google::Apis::Core::Hashable
|
51
51
|
|
@@ -73,14 +73,14 @@ module Google
|
|
73
73
|
# @return [Float]
|
74
74
|
attr_accessor :confidence
|
75
75
|
|
76
|
-
# The name of the attribute, for example, glasses, dark_glasses, mouth_open.
|
77
|
-
#
|
76
|
+
# The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
|
77
|
+
# full list of supported type names will be provided in the document.
|
78
78
|
# Corresponds to the JSON property `name`
|
79
79
|
# @return [String]
|
80
80
|
attr_accessor :name
|
81
81
|
|
82
|
-
# Text value of the detection result. For example, the value for "HairColor"
|
83
|
-
#
|
82
|
+
# Text value of the detection result. For example, the value for "HairColor" can
|
83
|
+
# be "black", "blonde", etc.
|
84
84
|
# Corresponds to the JSON property `value`
|
85
85
|
# @return [String]
|
86
86
|
attr_accessor :value
|
@@ -112,9 +112,8 @@ module Google
|
|
112
112
|
# @return [String]
|
113
113
|
attr_accessor :name
|
114
114
|
|
115
|
-
# A vertex represents a 2D point in the image.
|
116
|
-
#
|
117
|
-
# and range from 0 to 1.
|
115
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
116
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
118
117
|
# Corresponds to the JSON property `point`
|
119
118
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1NormalizedVertex]
|
120
119
|
attr_accessor :point
|
@@ -140,8 +139,7 @@ module Google
|
|
140
139
|
# @return [String]
|
141
140
|
attr_accessor :description
|
142
141
|
|
143
|
-
# Opaque entity ID. Some IDs may be available in
|
144
|
-
# [Google Knowledge Graph Search
|
142
|
+
# Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
|
145
143
|
# API](https://developers.google.com/knowledge-graph/).
|
146
144
|
# Corresponds to the JSON property `entityId`
|
147
145
|
# @return [String]
|
@@ -164,9 +162,9 @@ module Google
|
|
164
162
|
end
|
165
163
|
end
|
166
164
|
|
167
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
168
|
-
#
|
169
|
-
#
|
165
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
166
|
+
# explicit content has been detected in a frame, no annotations are present for
|
167
|
+
# that frame.
|
170
168
|
class GoogleCloudVideointelligenceV1ExplicitContentAnnotation
|
171
169
|
include Google::Apis::Core::Hashable
|
172
170
|
|
@@ -217,14 +215,110 @@ module Google
|
|
217
215
|
end
|
218
216
|
end
|
219
217
|
|
218
|
+
# Deprecated. No effect.
|
219
|
+
class GoogleCloudVideointelligenceV1FaceAnnotation
|
220
|
+
include Google::Apis::Core::Hashable
|
221
|
+
|
222
|
+
# All video frames where a face was detected.
|
223
|
+
# Corresponds to the JSON property `frames`
|
224
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1FaceFrame>]
|
225
|
+
attr_accessor :frames
|
226
|
+
|
227
|
+
# All video segments where a face was detected.
|
228
|
+
# Corresponds to the JSON property `segments`
|
229
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1FaceSegment>]
|
230
|
+
attr_accessor :segments
|
231
|
+
|
232
|
+
# Thumbnail of a representative face view (in JPEG format).
|
233
|
+
# Corresponds to the JSON property `thumbnail`
|
234
|
+
# NOTE: Values are automatically base64 encoded/decoded in the client library.
|
235
|
+
# @return [String]
|
236
|
+
attr_accessor :thumbnail
|
237
|
+
|
238
|
+
def initialize(**args)
|
239
|
+
update!(**args)
|
240
|
+
end
|
241
|
+
|
242
|
+
# Update properties of this object
|
243
|
+
def update!(**args)
|
244
|
+
@frames = args[:frames] if args.key?(:frames)
|
245
|
+
@segments = args[:segments] if args.key?(:segments)
|
246
|
+
@thumbnail = args[:thumbnail] if args.key?(:thumbnail)
|
247
|
+
end
|
248
|
+
end
|
249
|
+
|
250
|
+
# Face detection annotation.
|
251
|
+
class GoogleCloudVideointelligenceV1FaceDetectionAnnotation
|
252
|
+
include Google::Apis::Core::Hashable
|
253
|
+
|
254
|
+
# Feature version.
|
255
|
+
# Corresponds to the JSON property `version`
|
256
|
+
# @return [String]
|
257
|
+
attr_accessor :version
|
258
|
+
|
259
|
+
def initialize(**args)
|
260
|
+
update!(**args)
|
261
|
+
end
|
262
|
+
|
263
|
+
# Update properties of this object
|
264
|
+
def update!(**args)
|
265
|
+
@version = args[:version] if args.key?(:version)
|
266
|
+
end
|
267
|
+
end
|
268
|
+
|
269
|
+
# Deprecated. No effect.
|
270
|
+
class GoogleCloudVideointelligenceV1FaceFrame
|
271
|
+
include Google::Apis::Core::Hashable
|
272
|
+
|
273
|
+
# Normalized Bounding boxes in a frame. There can be more than one boxes if the
|
274
|
+
# same face is detected in multiple locations within the current frame.
|
275
|
+
# Corresponds to the JSON property `normalizedBoundingBoxes`
|
276
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1NormalizedBoundingBox>]
|
277
|
+
attr_accessor :normalized_bounding_boxes
|
278
|
+
|
279
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
280
|
+
# video frame for this location.
|
281
|
+
# Corresponds to the JSON property `timeOffset`
|
282
|
+
# @return [String]
|
283
|
+
attr_accessor :time_offset
|
284
|
+
|
285
|
+
def initialize(**args)
|
286
|
+
update!(**args)
|
287
|
+
end
|
288
|
+
|
289
|
+
# Update properties of this object
|
290
|
+
def update!(**args)
|
291
|
+
@normalized_bounding_boxes = args[:normalized_bounding_boxes] if args.key?(:normalized_bounding_boxes)
|
292
|
+
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
293
|
+
end
|
294
|
+
end
|
295
|
+
|
296
|
+
# Video segment level annotation results for face detection.
|
297
|
+
class GoogleCloudVideointelligenceV1FaceSegment
|
298
|
+
include Google::Apis::Core::Hashable
|
299
|
+
|
300
|
+
# Video segment.
|
301
|
+
# Corresponds to the JSON property `segment`
|
302
|
+
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1VideoSegment]
|
303
|
+
attr_accessor :segment
|
304
|
+
|
305
|
+
def initialize(**args)
|
306
|
+
update!(**args)
|
307
|
+
end
|
308
|
+
|
309
|
+
# Update properties of this object
|
310
|
+
def update!(**args)
|
311
|
+
@segment = args[:segment] if args.key?(:segment)
|
312
|
+
end
|
313
|
+
end
|
314
|
+
|
220
315
|
# Label annotation.
|
221
316
|
class GoogleCloudVideointelligenceV1LabelAnnotation
|
222
317
|
include Google::Apis::Core::Hashable
|
223
318
|
|
224
|
-
# Common categories for the detected entity.
|
225
|
-
#
|
226
|
-
#
|
227
|
-
# also be a `pet`.
|
319
|
+
# Common categories for the detected entity. For example, when the label is `
|
320
|
+
# Terrier`, the category is likely `dog`. And in some cases there might be more
|
321
|
+
# than one categories e.g., `Terrier` could also be a `pet`.
|
228
322
|
# Corresponds to the JSON property `categoryEntities`
|
229
323
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1Entity>]
|
230
324
|
attr_accessor :category_entities
|
@@ -323,14 +417,14 @@ module Google
|
|
323
417
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1Entity]
|
324
418
|
attr_accessor :entity
|
325
419
|
|
326
|
-
# All video segments where the recognized logo appears. There might be
|
327
|
-
#
|
420
|
+
# All video segments where the recognized logo appears. There might be multiple
|
421
|
+
# instances of the same logo class appearing in one VideoSegment.
|
328
422
|
# Corresponds to the JSON property `segments`
|
329
423
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1VideoSegment>]
|
330
424
|
attr_accessor :segments
|
331
425
|
|
332
|
-
# All logo tracks where the recognized logo appears. Each track corresponds
|
333
|
-
#
|
426
|
+
# All logo tracks where the recognized logo appears. Each track corresponds to
|
427
|
+
# one logo instance appearing in consecutive frames.
|
334
428
|
# Corresponds to the JSON property `tracks`
|
335
429
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1Track>]
|
336
430
|
attr_accessor :tracks
|
@@ -347,9 +441,8 @@ module Google
|
|
347
441
|
end
|
348
442
|
end
|
349
443
|
|
350
|
-
# Normalized bounding box.
|
351
|
-
#
|
352
|
-
# Range: [0, 1].
|
444
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
445
|
+
# original image. Range: [0, 1].
|
353
446
|
class GoogleCloudVideointelligenceV1NormalizedBoundingBox
|
354
447
|
include Google::Apis::Core::Hashable
|
355
448
|
|
@@ -387,20 +480,12 @@ module Google
|
|
387
480
|
end
|
388
481
|
|
389
482
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
390
|
-
# Contains list of the corner points in clockwise order starting from
|
391
|
-
#
|
392
|
-
#
|
393
|
-
#
|
394
|
-
#
|
395
|
-
#
|
396
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
397
|
-
# becomes:
|
398
|
-
# 2----3
|
399
|
-
# | |
|
400
|
-
# 1----0
|
401
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
402
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
403
|
-
# the box.
|
483
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
484
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
485
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
486
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
487
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
488
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
404
489
|
class GoogleCloudVideointelligenceV1NormalizedBoundingPoly
|
405
490
|
include Google::Apis::Core::Hashable
|
406
491
|
|
@@ -419,9 +504,8 @@ module Google
|
|
419
504
|
end
|
420
505
|
end
|
421
506
|
|
422
|
-
# A vertex represents a 2D point in the image.
|
423
|
-
#
|
424
|
-
# and range from 0 to 1.
|
507
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
508
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
425
509
|
class GoogleCloudVideointelligenceV1NormalizedVertex
|
426
510
|
include Google::Apis::Core::Hashable
|
427
511
|
|
@@ -460,10 +544,10 @@ module Google
|
|
460
544
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1Entity]
|
461
545
|
attr_accessor :entity
|
462
546
|
|
463
|
-
# Information corresponding to all frames where this object track appears.
|
464
|
-
#
|
465
|
-
#
|
466
|
-
#
|
547
|
+
# Information corresponding to all frames where this object track appears. Non-
|
548
|
+
# streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
|
549
|
+
# in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
|
550
|
+
# frames.
|
467
551
|
# Corresponds to the JSON property `frames`
|
468
552
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1ObjectTrackingFrame>]
|
469
553
|
attr_accessor :frames
|
@@ -473,12 +557,11 @@ module Google
|
|
473
557
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1VideoSegment]
|
474
558
|
attr_accessor :segment
|
475
559
|
|
476
|
-
# Streaming mode ONLY.
|
477
|
-
#
|
478
|
-
#
|
479
|
-
#
|
480
|
-
#
|
481
|
-
# ObjectTrackAnnotation of the same track_id over time.
|
560
|
+
# Streaming mode ONLY. In streaming mode, we do not know the end time of a
|
561
|
+
# tracked object before it is completed. Hence, there is no VideoSegment info
|
562
|
+
# returned. Instead, we provide a unique identifiable integer track_id so that
|
563
|
+
# the customers can correlate the results of the ongoing ObjectTrackAnnotation
|
564
|
+
# of the same track_id over time.
|
482
565
|
# Corresponds to the JSON property `trackId`
|
483
566
|
# @return [Fixnum]
|
484
567
|
attr_accessor :track_id
|
@@ -508,9 +591,8 @@ module Google
|
|
508
591
|
class GoogleCloudVideointelligenceV1ObjectTrackingFrame
|
509
592
|
include Google::Apis::Core::Hashable
|
510
593
|
|
511
|
-
# Normalized bounding box.
|
512
|
-
#
|
513
|
-
# Range: [0, 1].
|
594
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
595
|
+
# original image. Range: [0, 1].
|
514
596
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
515
597
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1NormalizedBoundingBox]
|
516
598
|
attr_accessor :normalized_bounding_box
|
@@ -531,16 +613,41 @@ module Google
|
|
531
613
|
end
|
532
614
|
end
|
533
615
|
|
616
|
+
# Person detection annotation per video.
|
617
|
+
class GoogleCloudVideointelligenceV1PersonDetectionAnnotation
|
618
|
+
include Google::Apis::Core::Hashable
|
619
|
+
|
620
|
+
# The detected tracks of a person.
|
621
|
+
# Corresponds to the JSON property `tracks`
|
622
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1Track>]
|
623
|
+
attr_accessor :tracks
|
624
|
+
|
625
|
+
# Feature version.
|
626
|
+
# Corresponds to the JSON property `version`
|
627
|
+
# @return [String]
|
628
|
+
attr_accessor :version
|
629
|
+
|
630
|
+
def initialize(**args)
|
631
|
+
update!(**args)
|
632
|
+
end
|
633
|
+
|
634
|
+
# Update properties of this object
|
635
|
+
def update!(**args)
|
636
|
+
@tracks = args[:tracks] if args.key?(:tracks)
|
637
|
+
@version = args[:version] if args.key?(:version)
|
638
|
+
end
|
639
|
+
end
|
640
|
+
|
534
641
|
# Alternative hypotheses (a.k.a. n-best list).
|
535
642
|
class GoogleCloudVideointelligenceV1SpeechRecognitionAlternative
|
536
643
|
include Google::Apis::Core::Hashable
|
537
644
|
|
538
645
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
539
646
|
# indicates an estimated greater likelihood that the recognized words are
|
540
|
-
# correct. This field is set only for the top alternative.
|
541
|
-
#
|
542
|
-
#
|
543
|
-
#
|
647
|
+
# correct. This field is set only for the top alternative. This field is not
|
648
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
649
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
650
|
+
# not set.
|
544
651
|
# Corresponds to the JSON property `confidence`
|
545
652
|
# @return [Float]
|
546
653
|
attr_accessor :confidence
|
@@ -551,8 +658,8 @@ module Google
|
|
551
658
|
attr_accessor :transcript
|
552
659
|
|
553
660
|
# Output only. A list of word-specific information for each recognized word.
|
554
|
-
# Note: When `enable_speaker_diarization` is set to true, you will see all
|
555
|
-
#
|
661
|
+
# Note: When `enable_speaker_diarization` is set to true, you will see all the
|
662
|
+
# words from the beginning of the audio.
|
556
663
|
# Corresponds to the JSON property `words`
|
557
664
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1WordInfo>]
|
558
665
|
attr_accessor :words
|
@@ -573,18 +680,17 @@ module Google
|
|
573
680
|
class GoogleCloudVideointelligenceV1SpeechTranscription
|
574
681
|
include Google::Apis::Core::Hashable
|
575
682
|
|
576
|
-
# May contain one or more recognition hypotheses (up to the maximum specified
|
577
|
-
#
|
578
|
-
#
|
579
|
-
#
|
683
|
+
# May contain one or more recognition hypotheses (up to the maximum specified in
|
684
|
+
# `max_alternatives`). These alternatives are ordered in terms of accuracy, with
|
685
|
+
# the top (first) alternative being the most probable, as ranked by the
|
686
|
+
# recognizer.
|
580
687
|
# Corresponds to the JSON property `alternatives`
|
581
688
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1SpeechRecognitionAlternative>]
|
582
689
|
attr_accessor :alternatives
|
583
690
|
|
584
691
|
# Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
|
585
|
-
# language tag of
|
586
|
-
#
|
587
|
-
# most likelihood of being spoken in the audio.
|
692
|
+
# language tag of the language in this result. This language code was detected
|
693
|
+
# to have the most likelihood of being spoken in the audio.
|
588
694
|
# Corresponds to the JSON property `languageCode`
|
589
695
|
# @return [String]
|
590
696
|
attr_accessor :language_code
|
@@ -633,27 +739,19 @@ module Google
|
|
633
739
|
end
|
634
740
|
end
|
635
741
|
|
636
|
-
# Video frame level annotation results for text annotation (OCR).
|
637
|
-
#
|
638
|
-
#
|
742
|
+
# Video frame level annotation results for text annotation (OCR). Contains
|
743
|
+
# information regarding timestamp and bounding box locations for the frames
|
744
|
+
# containing detected OCR text snippets.
|
639
745
|
class GoogleCloudVideointelligenceV1TextFrame
|
640
746
|
include Google::Apis::Core::Hashable
|
641
747
|
|
642
748
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
643
|
-
# Contains list of the corner points in clockwise order starting from
|
644
|
-
#
|
645
|
-
#
|
646
|
-
#
|
647
|
-
#
|
648
|
-
#
|
649
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
650
|
-
# becomes:
|
651
|
-
# 2----3
|
652
|
-
# | |
|
653
|
-
# 1----0
|
654
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
655
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
656
|
-
# the box.
|
749
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
750
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
751
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
752
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
753
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
754
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
657
755
|
# Corresponds to the JSON property `rotatedBoundingBox`
|
658
756
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1NormalizedBoundingPoly]
|
659
757
|
attr_accessor :rotated_bounding_box
|
@@ -706,9 +804,8 @@ module Google
|
|
706
804
|
end
|
707
805
|
end
|
708
806
|
|
709
|
-
# For tracking related features.
|
710
|
-
#
|
711
|
-
# normalized_bounding_box.
|
807
|
+
# For tracking related features. An object at time_offset with attributes, and
|
808
|
+
# located with normalized_bounding_box.
|
712
809
|
class GoogleCloudVideointelligenceV1TimestampedObject
|
713
810
|
include Google::Apis::Core::Hashable
|
714
811
|
|
@@ -722,15 +819,14 @@ module Google
|
|
722
819
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1DetectedLandmark>]
|
723
820
|
attr_accessor :landmarks
|
724
821
|
|
725
|
-
# Normalized bounding box.
|
726
|
-
#
|
727
|
-
# Range: [0, 1].
|
822
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
823
|
+
# original image. Range: [0, 1].
|
728
824
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
729
825
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1NormalizedBoundingBox]
|
730
826
|
attr_accessor :normalized_bounding_box
|
731
827
|
|
732
|
-
# Time-offset, relative to the beginning of the video,
|
733
|
-
#
|
828
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
829
|
+
# video frame for this object.
|
734
830
|
# Corresponds to the JSON property `timeOffset`
|
735
831
|
# @return [String]
|
736
832
|
attr_accessor :time_offset
|
@@ -789,20 +885,19 @@ module Google
|
|
789
885
|
class GoogleCloudVideointelligenceV1VideoAnnotationProgress
|
790
886
|
include Google::Apis::Core::Hashable
|
791
887
|
|
792
|
-
# Specifies which feature is being tracked if the request contains more than
|
793
|
-
#
|
888
|
+
# Specifies which feature is being tracked if the request contains more than one
|
889
|
+
# feature.
|
794
890
|
# Corresponds to the JSON property `feature`
|
795
891
|
# @return [String]
|
796
892
|
attr_accessor :feature
|
797
893
|
|
798
|
-
# Video file location in
|
799
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
894
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
800
895
|
# Corresponds to the JSON property `inputUri`
|
801
896
|
# @return [String]
|
802
897
|
attr_accessor :input_uri
|
803
898
|
|
804
|
-
# Approximate percentage processed thus far. Guaranteed to be
|
805
|
-
#
|
899
|
+
# Approximate percentage processed thus far. Guaranteed to be 100 when fully
|
900
|
+
# processed.
|
806
901
|
# Corresponds to the JSON property `progressPercent`
|
807
902
|
# @return [Fixnum]
|
808
903
|
attr_accessor :progress_percent
|
@@ -841,31 +936,40 @@ module Google
|
|
841
936
|
class GoogleCloudVideointelligenceV1VideoAnnotationResults
|
842
937
|
include Google::Apis::Core::Hashable
|
843
938
|
|
844
|
-
# The `Status` type defines a logical error model that is suitable for
|
845
|
-
#
|
846
|
-
#
|
847
|
-
#
|
848
|
-
#
|
849
|
-
#
|
939
|
+
# The `Status` type defines a logical error model that is suitable for different
|
940
|
+
# programming environments, including REST APIs and RPC APIs. It is used by [
|
941
|
+
# gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
|
942
|
+
# data: error code, error message, and error details. You can find out more
|
943
|
+
# about this error model and how to work with it in the [API Design Guide](https:
|
944
|
+
# //cloud.google.com/apis/design/errors).
|
850
945
|
# Corresponds to the JSON property `error`
|
851
946
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleRpcStatus]
|
852
947
|
attr_accessor :error
|
853
948
|
|
854
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
855
|
-
#
|
856
|
-
#
|
949
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
950
|
+
# explicit content has been detected in a frame, no annotations are present for
|
951
|
+
# that frame.
|
857
952
|
# Corresponds to the JSON property `explicitAnnotation`
|
858
953
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1ExplicitContentAnnotation]
|
859
954
|
attr_accessor :explicit_annotation
|
860
955
|
|
861
|
-
#
|
862
|
-
#
|
956
|
+
# Deprecated. Please use `face_detection_annotations` instead.
|
957
|
+
# Corresponds to the JSON property `faceAnnotations`
|
958
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1FaceAnnotation>]
|
959
|
+
attr_accessor :face_annotations
|
960
|
+
|
961
|
+
# Face detection annotations.
|
962
|
+
# Corresponds to the JSON property `faceDetectionAnnotations`
|
963
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1FaceDetectionAnnotation>]
|
964
|
+
attr_accessor :face_detection_annotations
|
965
|
+
|
966
|
+
# Label annotations on frame level. There is exactly one element for each unique
|
967
|
+
# label.
|
863
968
|
# Corresponds to the JSON property `frameLabelAnnotations`
|
864
969
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1LabelAnnotation>]
|
865
970
|
attr_accessor :frame_label_annotations
|
866
971
|
|
867
|
-
# Video file location in
|
868
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
972
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
869
973
|
# Corresponds to the JSON property `inputUri`
|
870
974
|
# @return [String]
|
871
975
|
attr_accessor :input_uri
|
@@ -880,6 +984,11 @@ module Google
|
|
880
984
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1ObjectTrackingAnnotation>]
|
881
985
|
attr_accessor :object_annotations
|
882
986
|
|
987
|
+
# Person detection annotations.
|
988
|
+
# Corresponds to the JSON property `personDetectionAnnotations`
|
989
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1PersonDetectionAnnotation>]
|
990
|
+
attr_accessor :person_detection_annotations
|
991
|
+
|
883
992
|
# Video segment.
|
884
993
|
# Corresponds to the JSON property `segment`
|
885
994
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1VideoSegment]
|
@@ -892,11 +1001,11 @@ module Google
|
|
892
1001
|
attr_accessor :segment_label_annotations
|
893
1002
|
|
894
1003
|
# Presence label annotations on video level or user-specified segment level.
|
895
|
-
# There is exactly one element for each unique label. Compared to the
|
896
|
-
#
|
897
|
-
#
|
898
|
-
#
|
899
|
-
#
|
1004
|
+
# There is exactly one element for each unique label. Compared to the existing
|
1005
|
+
# topical `segment_label_annotations`, this field presents more fine-grained,
|
1006
|
+
# segment-level labels detected in video content and is made available only when
|
1007
|
+
# the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
|
1008
|
+
# request.
|
900
1009
|
# Corresponds to the JSON property `segmentPresenceLabelAnnotations`
|
901
1010
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1LabelAnnotation>]
|
902
1011
|
attr_accessor :segment_presence_label_annotations
|
@@ -906,17 +1015,17 @@ module Google
|
|
906
1015
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1VideoSegment>]
|
907
1016
|
attr_accessor :shot_annotations
|
908
1017
|
|
909
|
-
# Topical label annotations on shot level.
|
910
|
-
#
|
1018
|
+
# Topical label annotations on shot level. There is exactly one element for each
|
1019
|
+
# unique label.
|
911
1020
|
# Corresponds to the JSON property `shotLabelAnnotations`
|
912
1021
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1LabelAnnotation>]
|
913
1022
|
attr_accessor :shot_label_annotations
|
914
1023
|
|
915
1024
|
# Presence label annotations on shot level. There is exactly one element for
|
916
|
-
# each unique label. Compared to the existing topical
|
917
|
-
#
|
918
|
-
#
|
919
|
-
#
|
1025
|
+
# each unique label. Compared to the existing topical `shot_label_annotations`,
|
1026
|
+
# this field presents more fine-grained, shot-level labels detected in video
|
1027
|
+
# content and is made available only when the client sets `LabelDetectionConfig.
|
1028
|
+
# model` to "builtin/latest" in the request.
|
920
1029
|
# Corresponds to the JSON property `shotPresenceLabelAnnotations`
|
921
1030
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1LabelAnnotation>]
|
922
1031
|
attr_accessor :shot_presence_label_annotations
|
@@ -926,9 +1035,8 @@ module Google
|
|
926
1035
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1SpeechTranscription>]
|
927
1036
|
attr_accessor :speech_transcriptions
|
928
1037
|
|
929
|
-
# OCR text detection and tracking.
|
930
|
-
#
|
931
|
-
# frame information associated with it.
|
1038
|
+
# OCR text detection and tracking. Annotations for list of detected text
|
1039
|
+
# snippets. Each will have list of frame information associated with it.
|
932
1040
|
# Corresponds to the JSON property `textAnnotations`
|
933
1041
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1TextAnnotation>]
|
934
1042
|
attr_accessor :text_annotations
|
@@ -941,10 +1049,13 @@ module Google
|
|
941
1049
|
def update!(**args)
|
942
1050
|
@error = args[:error] if args.key?(:error)
|
943
1051
|
@explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
|
1052
|
+
@face_annotations = args[:face_annotations] if args.key?(:face_annotations)
|
1053
|
+
@face_detection_annotations = args[:face_detection_annotations] if args.key?(:face_detection_annotations)
|
944
1054
|
@frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
|
945
1055
|
@input_uri = args[:input_uri] if args.key?(:input_uri)
|
946
1056
|
@logo_recognition_annotations = args[:logo_recognition_annotations] if args.key?(:logo_recognition_annotations)
|
947
1057
|
@object_annotations = args[:object_annotations] if args.key?(:object_annotations)
|
1058
|
+
@person_detection_annotations = args[:person_detection_annotations] if args.key?(:person_detection_annotations)
|
948
1059
|
@segment = args[:segment] if args.key?(:segment)
|
949
1060
|
@segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations)
|
950
1061
|
@segment_presence_label_annotations = args[:segment_presence_label_annotations] if args.key?(:segment_presence_label_annotations)
|
@@ -960,14 +1071,14 @@ module Google
|
|
960
1071
|
class GoogleCloudVideointelligenceV1VideoSegment
|
961
1072
|
include Google::Apis::Core::Hashable
|
962
1073
|
|
963
|
-
# Time-offset, relative to the beginning of the video,
|
964
|
-
#
|
1074
|
+
# Time-offset, relative to the beginning of the video, corresponding to the end
|
1075
|
+
# of the segment (inclusive).
|
965
1076
|
# Corresponds to the JSON property `endTimeOffset`
|
966
1077
|
# @return [String]
|
967
1078
|
attr_accessor :end_time_offset
|
968
1079
|
|
969
|
-
# Time-offset, relative to the beginning of the video,
|
970
|
-
#
|
1080
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
1081
|
+
# start of the segment (inclusive).
|
971
1082
|
# Corresponds to the JSON property `startTimeOffset`
|
972
1083
|
# @return [String]
|
973
1084
|
attr_accessor :start_time_offset
|
@@ -984,41 +1095,41 @@ module Google
|
|
984
1095
|
end
|
985
1096
|
|
986
1097
|
# Word-specific information for recognized words. Word information is only
|
987
|
-
# included in the response when certain request parameters are set, such
|
988
|
-
#
|
1098
|
+
# included in the response when certain request parameters are set, such as `
|
1099
|
+
# enable_word_time_offsets`.
|
989
1100
|
class GoogleCloudVideointelligenceV1WordInfo
|
990
1101
|
include Google::Apis::Core::Hashable
|
991
1102
|
|
992
1103
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
993
1104
|
# indicates an estimated greater likelihood that the recognized words are
|
994
|
-
# correct. This field is set only for the top alternative.
|
995
|
-
#
|
996
|
-
#
|
997
|
-
#
|
1105
|
+
# correct. This field is set only for the top alternative. This field is not
|
1106
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
1107
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
1108
|
+
# not set.
|
998
1109
|
# Corresponds to the JSON property `confidence`
|
999
1110
|
# @return [Float]
|
1000
1111
|
attr_accessor :confidence
|
1001
1112
|
|
1002
|
-
# Time offset relative to the beginning of the audio, and
|
1003
|
-
#
|
1004
|
-
#
|
1005
|
-
#
|
1113
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
1114
|
+
# end of the spoken word. This field is only set if `enable_word_time_offsets=
|
1115
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
1116
|
+
# accuracy of the time offset can vary.
|
1006
1117
|
# Corresponds to the JSON property `endTime`
|
1007
1118
|
# @return [String]
|
1008
1119
|
attr_accessor :end_time
|
1009
1120
|
|
1010
|
-
# Output only. A distinct integer value is assigned for every speaker within
|
1011
|
-
#
|
1012
|
-
#
|
1013
|
-
#
|
1121
|
+
# Output only. A distinct integer value is assigned for every speaker within the
|
1122
|
+
# audio. This field specifies which one of those speakers was detected to have
|
1123
|
+
# spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
|
1124
|
+
# only set if speaker diarization is enabled.
|
1014
1125
|
# Corresponds to the JSON property `speakerTag`
|
1015
1126
|
# @return [Fixnum]
|
1016
1127
|
attr_accessor :speaker_tag
|
1017
1128
|
|
1018
|
-
# Time offset relative to the beginning of the audio, and
|
1019
|
-
#
|
1020
|
-
#
|
1021
|
-
#
|
1129
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
1130
|
+
# start of the spoken word. This field is only set if `enable_word_time_offsets=
|
1131
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
1132
|
+
# accuracy of the time offset can vary.
|
1022
1133
|
# Corresponds to the JSON property `startTime`
|
1023
1134
|
# @return [String]
|
1024
1135
|
attr_accessor :start_time
|
@@ -1042,9 +1153,9 @@ module Google
|
|
1042
1153
|
end
|
1043
1154
|
end
|
1044
1155
|
|
1045
|
-
# Video annotation progress. Included in the `metadata`
|
1046
|
-
#
|
1047
|
-
#
|
1156
|
+
# Video annotation progress. Included in the `metadata` field of the `Operation`
|
1157
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
1158
|
+
# service.
|
1048
1159
|
class GoogleCloudVideointelligenceV1beta2AnnotateVideoProgress
|
1049
1160
|
include Google::Apis::Core::Hashable
|
1050
1161
|
|
@@ -1063,9 +1174,9 @@ module Google
|
|
1063
1174
|
end
|
1064
1175
|
end
|
1065
1176
|
|
1066
|
-
# Video annotation response. Included in the `response`
|
1067
|
-
#
|
1068
|
-
#
|
1177
|
+
# Video annotation response. Included in the `response` field of the `Operation`
|
1178
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
1179
|
+
# service.
|
1069
1180
|
class GoogleCloudVideointelligenceV1beta2AnnotateVideoResponse
|
1070
1181
|
include Google::Apis::Core::Hashable
|
1071
1182
|
|
@@ -1093,14 +1204,14 @@ module Google
|
|
1093
1204
|
# @return [Float]
|
1094
1205
|
attr_accessor :confidence
|
1095
1206
|
|
1096
|
-
# The name of the attribute, for example, glasses, dark_glasses, mouth_open.
|
1097
|
-
#
|
1207
|
+
# The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
|
1208
|
+
# full list of supported type names will be provided in the document.
|
1098
1209
|
# Corresponds to the JSON property `name`
|
1099
1210
|
# @return [String]
|
1100
1211
|
attr_accessor :name
|
1101
1212
|
|
1102
|
-
# Text value of the detection result. For example, the value for "HairColor"
|
1103
|
-
#
|
1213
|
+
# Text value of the detection result. For example, the value for "HairColor" can
|
1214
|
+
# be "black", "blonde", etc.
|
1104
1215
|
# Corresponds to the JSON property `value`
|
1105
1216
|
# @return [String]
|
1106
1217
|
attr_accessor :value
|
@@ -1132,9 +1243,8 @@ module Google
|
|
1132
1243
|
# @return [String]
|
1133
1244
|
attr_accessor :name
|
1134
1245
|
|
1135
|
-
# A vertex represents a 2D point in the image.
|
1136
|
-
#
|
1137
|
-
# and range from 0 to 1.
|
1246
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
1247
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
1138
1248
|
# Corresponds to the JSON property `point`
|
1139
1249
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2NormalizedVertex]
|
1140
1250
|
attr_accessor :point
|
@@ -1160,8 +1270,7 @@ module Google
|
|
1160
1270
|
# @return [String]
|
1161
1271
|
attr_accessor :description
|
1162
1272
|
|
1163
|
-
# Opaque entity ID. Some IDs may be available in
|
1164
|
-
# [Google Knowledge Graph Search
|
1273
|
+
# Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
|
1165
1274
|
# API](https://developers.google.com/knowledge-graph/).
|
1166
1275
|
# Corresponds to the JSON property `entityId`
|
1167
1276
|
# @return [String]
|
@@ -1184,9 +1293,9 @@ module Google
|
|
1184
1293
|
end
|
1185
1294
|
end
|
1186
1295
|
|
1187
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
1188
|
-
#
|
1189
|
-
#
|
1296
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
1297
|
+
# explicit content has been detected in a frame, no annotations are present for
|
1298
|
+
# that frame.
|
1190
1299
|
class GoogleCloudVideointelligenceV1beta2ExplicitContentAnnotation
|
1191
1300
|
include Google::Apis::Core::Hashable
|
1192
1301
|
|
@@ -1237,14 +1346,110 @@ module Google
|
|
1237
1346
|
end
|
1238
1347
|
end
|
1239
1348
|
|
1349
|
+
# Deprecated. No effect.
|
1350
|
+
class GoogleCloudVideointelligenceV1beta2FaceAnnotation
|
1351
|
+
include Google::Apis::Core::Hashable
|
1352
|
+
|
1353
|
+
# All video frames where a face was detected.
|
1354
|
+
# Corresponds to the JSON property `frames`
|
1355
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2FaceFrame>]
|
1356
|
+
attr_accessor :frames
|
1357
|
+
|
1358
|
+
# All video segments where a face was detected.
|
1359
|
+
# Corresponds to the JSON property `segments`
|
1360
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2FaceSegment>]
|
1361
|
+
attr_accessor :segments
|
1362
|
+
|
1363
|
+
# Thumbnail of a representative face view (in JPEG format).
|
1364
|
+
# Corresponds to the JSON property `thumbnail`
|
1365
|
+
# NOTE: Values are automatically base64 encoded/decoded in the client library.
|
1366
|
+
# @return [String]
|
1367
|
+
attr_accessor :thumbnail
|
1368
|
+
|
1369
|
+
def initialize(**args)
|
1370
|
+
update!(**args)
|
1371
|
+
end
|
1372
|
+
|
1373
|
+
# Update properties of this object
|
1374
|
+
def update!(**args)
|
1375
|
+
@frames = args[:frames] if args.key?(:frames)
|
1376
|
+
@segments = args[:segments] if args.key?(:segments)
|
1377
|
+
@thumbnail = args[:thumbnail] if args.key?(:thumbnail)
|
1378
|
+
end
|
1379
|
+
end
|
1380
|
+
|
1381
|
+
# Face detection annotation.
|
1382
|
+
class GoogleCloudVideointelligenceV1beta2FaceDetectionAnnotation
|
1383
|
+
include Google::Apis::Core::Hashable
|
1384
|
+
|
1385
|
+
# Feature version.
|
1386
|
+
# Corresponds to the JSON property `version`
|
1387
|
+
# @return [String]
|
1388
|
+
attr_accessor :version
|
1389
|
+
|
1390
|
+
def initialize(**args)
|
1391
|
+
update!(**args)
|
1392
|
+
end
|
1393
|
+
|
1394
|
+
# Update properties of this object
|
1395
|
+
def update!(**args)
|
1396
|
+
@version = args[:version] if args.key?(:version)
|
1397
|
+
end
|
1398
|
+
end
|
1399
|
+
|
1400
|
+
# Deprecated. No effect.
|
1401
|
+
class GoogleCloudVideointelligenceV1beta2FaceFrame
|
1402
|
+
include Google::Apis::Core::Hashable
|
1403
|
+
|
1404
|
+
# Normalized Bounding boxes in a frame. There can be more than one boxes if the
|
1405
|
+
# same face is detected in multiple locations within the current frame.
|
1406
|
+
# Corresponds to the JSON property `normalizedBoundingBoxes`
|
1407
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox>]
|
1408
|
+
attr_accessor :normalized_bounding_boxes
|
1409
|
+
|
1410
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
1411
|
+
# video frame for this location.
|
1412
|
+
# Corresponds to the JSON property `timeOffset`
|
1413
|
+
# @return [String]
|
1414
|
+
attr_accessor :time_offset
|
1415
|
+
|
1416
|
+
def initialize(**args)
|
1417
|
+
update!(**args)
|
1418
|
+
end
|
1419
|
+
|
1420
|
+
# Update properties of this object
|
1421
|
+
def update!(**args)
|
1422
|
+
@normalized_bounding_boxes = args[:normalized_bounding_boxes] if args.key?(:normalized_bounding_boxes)
|
1423
|
+
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
1424
|
+
end
|
1425
|
+
end
|
1426
|
+
|
1427
|
+
# Video segment level annotation results for face detection.
|
1428
|
+
class GoogleCloudVideointelligenceV1beta2FaceSegment
|
1429
|
+
include Google::Apis::Core::Hashable
|
1430
|
+
|
1431
|
+
# Video segment.
|
1432
|
+
# Corresponds to the JSON property `segment`
|
1433
|
+
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2VideoSegment]
|
1434
|
+
attr_accessor :segment
|
1435
|
+
|
1436
|
+
def initialize(**args)
|
1437
|
+
update!(**args)
|
1438
|
+
end
|
1439
|
+
|
1440
|
+
# Update properties of this object
|
1441
|
+
def update!(**args)
|
1442
|
+
@segment = args[:segment] if args.key?(:segment)
|
1443
|
+
end
|
1444
|
+
end
|
1445
|
+
|
1240
1446
|
# Label annotation.
|
1241
1447
|
class GoogleCloudVideointelligenceV1beta2LabelAnnotation
|
1242
1448
|
include Google::Apis::Core::Hashable
|
1243
1449
|
|
1244
|
-
# Common categories for the detected entity.
|
1245
|
-
#
|
1246
|
-
#
|
1247
|
-
# also be a `pet`.
|
1450
|
+
# Common categories for the detected entity. For example, when the label is `
|
1451
|
+
# Terrier`, the category is likely `dog`. And in some cases there might be more
|
1452
|
+
# than one categories e.g., `Terrier` could also be a `pet`.
|
1248
1453
|
# Corresponds to the JSON property `categoryEntities`
|
1249
1454
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2Entity>]
|
1250
1455
|
attr_accessor :category_entities
|
@@ -1343,14 +1548,14 @@ module Google
|
|
1343
1548
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2Entity]
|
1344
1549
|
attr_accessor :entity
|
1345
1550
|
|
1346
|
-
# All video segments where the recognized logo appears. There might be
|
1347
|
-
#
|
1551
|
+
# All video segments where the recognized logo appears. There might be multiple
|
1552
|
+
# instances of the same logo class appearing in one VideoSegment.
|
1348
1553
|
# Corresponds to the JSON property `segments`
|
1349
1554
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2VideoSegment>]
|
1350
1555
|
attr_accessor :segments
|
1351
1556
|
|
1352
|
-
# All logo tracks where the recognized logo appears. Each track corresponds
|
1353
|
-
#
|
1557
|
+
# All logo tracks where the recognized logo appears. Each track corresponds to
|
1558
|
+
# one logo instance appearing in consecutive frames.
|
1354
1559
|
# Corresponds to the JSON property `tracks`
|
1355
1560
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2Track>]
|
1356
1561
|
attr_accessor :tracks
|
@@ -1367,9 +1572,8 @@ module Google
|
|
1367
1572
|
end
|
1368
1573
|
end
|
1369
1574
|
|
1370
|
-
# Normalized bounding box.
|
1371
|
-
#
|
1372
|
-
# Range: [0, 1].
|
1575
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
1576
|
+
# original image. Range: [0, 1].
|
1373
1577
|
class GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox
|
1374
1578
|
include Google::Apis::Core::Hashable
|
1375
1579
|
|
@@ -1407,20 +1611,12 @@ module Google
|
|
1407
1611
|
end
|
1408
1612
|
|
1409
1613
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
1410
|
-
# Contains list of the corner points in clockwise order starting from
|
1411
|
-
#
|
1412
|
-
#
|
1413
|
-
#
|
1414
|
-
#
|
1415
|
-
#
|
1416
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
1417
|
-
# becomes:
|
1418
|
-
# 2----3
|
1419
|
-
# | |
|
1420
|
-
# 1----0
|
1421
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
1422
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
1423
|
-
# the box.
|
1614
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
1615
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
1616
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
1617
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
1618
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
1619
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
1424
1620
|
class GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly
|
1425
1621
|
include Google::Apis::Core::Hashable
|
1426
1622
|
|
@@ -1439,9 +1635,8 @@ module Google
|
|
1439
1635
|
end
|
1440
1636
|
end
|
1441
1637
|
|
1442
|
-
# A vertex represents a 2D point in the image.
|
1443
|
-
#
|
1444
|
-
# and range from 0 to 1.
|
1638
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
1639
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
1445
1640
|
class GoogleCloudVideointelligenceV1beta2NormalizedVertex
|
1446
1641
|
include Google::Apis::Core::Hashable
|
1447
1642
|
|
@@ -1480,10 +1675,10 @@ module Google
|
|
1480
1675
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2Entity]
|
1481
1676
|
attr_accessor :entity
|
1482
1677
|
|
1483
|
-
# Information corresponding to all frames where this object track appears.
|
1484
|
-
#
|
1485
|
-
#
|
1486
|
-
#
|
1678
|
+
# Information corresponding to all frames where this object track appears. Non-
|
1679
|
+
# streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
|
1680
|
+
# in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
|
1681
|
+
# frames.
|
1487
1682
|
# Corresponds to the JSON property `frames`
|
1488
1683
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame>]
|
1489
1684
|
attr_accessor :frames
|
@@ -1493,12 +1688,11 @@ module Google
|
|
1493
1688
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2VideoSegment]
|
1494
1689
|
attr_accessor :segment
|
1495
1690
|
|
1496
|
-
# Streaming mode ONLY.
|
1497
|
-
#
|
1498
|
-
#
|
1499
|
-
#
|
1500
|
-
#
|
1501
|
-
# ObjectTrackAnnotation of the same track_id over time.
|
1691
|
+
# Streaming mode ONLY. In streaming mode, we do not know the end time of a
|
1692
|
+
# tracked object before it is completed. Hence, there is no VideoSegment info
|
1693
|
+
# returned. Instead, we provide a unique identifiable integer track_id so that
|
1694
|
+
# the customers can correlate the results of the ongoing ObjectTrackAnnotation
|
1695
|
+
# of the same track_id over time.
|
1502
1696
|
# Corresponds to the JSON property `trackId`
|
1503
1697
|
# @return [Fixnum]
|
1504
1698
|
attr_accessor :track_id
|
@@ -1528,9 +1722,8 @@ module Google
|
|
1528
1722
|
class GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame
|
1529
1723
|
include Google::Apis::Core::Hashable
|
1530
1724
|
|
1531
|
-
# Normalized bounding box.
|
1532
|
-
#
|
1533
|
-
# Range: [0, 1].
|
1725
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
1726
|
+
# original image. Range: [0, 1].
|
1534
1727
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
1535
1728
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox]
|
1536
1729
|
attr_accessor :normalized_bounding_box
|
@@ -1551,16 +1744,41 @@ module Google
|
|
1551
1744
|
end
|
1552
1745
|
end
|
1553
1746
|
|
1747
|
+
# Person detection annotation per video.
|
1748
|
+
class GoogleCloudVideointelligenceV1beta2PersonDetectionAnnotation
|
1749
|
+
include Google::Apis::Core::Hashable
|
1750
|
+
|
1751
|
+
# The detected tracks of a person.
|
1752
|
+
# Corresponds to the JSON property `tracks`
|
1753
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2Track>]
|
1754
|
+
attr_accessor :tracks
|
1755
|
+
|
1756
|
+
# Feature version.
|
1757
|
+
# Corresponds to the JSON property `version`
|
1758
|
+
# @return [String]
|
1759
|
+
attr_accessor :version
|
1760
|
+
|
1761
|
+
def initialize(**args)
|
1762
|
+
update!(**args)
|
1763
|
+
end
|
1764
|
+
|
1765
|
+
# Update properties of this object
|
1766
|
+
def update!(**args)
|
1767
|
+
@tracks = args[:tracks] if args.key?(:tracks)
|
1768
|
+
@version = args[:version] if args.key?(:version)
|
1769
|
+
end
|
1770
|
+
end
|
1771
|
+
|
1554
1772
|
# Alternative hypotheses (a.k.a. n-best list).
|
1555
1773
|
class GoogleCloudVideointelligenceV1beta2SpeechRecognitionAlternative
|
1556
1774
|
include Google::Apis::Core::Hashable
|
1557
1775
|
|
1558
1776
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
1559
1777
|
# indicates an estimated greater likelihood that the recognized words are
|
1560
|
-
# correct. This field is set only for the top alternative.
|
1561
|
-
#
|
1562
|
-
#
|
1563
|
-
#
|
1778
|
+
# correct. This field is set only for the top alternative. This field is not
|
1779
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
1780
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
1781
|
+
# not set.
|
1564
1782
|
# Corresponds to the JSON property `confidence`
|
1565
1783
|
# @return [Float]
|
1566
1784
|
attr_accessor :confidence
|
@@ -1571,8 +1789,8 @@ module Google
|
|
1571
1789
|
attr_accessor :transcript
|
1572
1790
|
|
1573
1791
|
# Output only. A list of word-specific information for each recognized word.
|
1574
|
-
# Note: When `enable_speaker_diarization` is set to true, you will see all
|
1575
|
-
#
|
1792
|
+
# Note: When `enable_speaker_diarization` is set to true, you will see all the
|
1793
|
+
# words from the beginning of the audio.
|
1576
1794
|
# Corresponds to the JSON property `words`
|
1577
1795
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2WordInfo>]
|
1578
1796
|
attr_accessor :words
|
@@ -1593,18 +1811,17 @@ module Google
|
|
1593
1811
|
class GoogleCloudVideointelligenceV1beta2SpeechTranscription
|
1594
1812
|
include Google::Apis::Core::Hashable
|
1595
1813
|
|
1596
|
-
# May contain one or more recognition hypotheses (up to the maximum specified
|
1597
|
-
#
|
1598
|
-
#
|
1599
|
-
#
|
1814
|
+
# May contain one or more recognition hypotheses (up to the maximum specified in
|
1815
|
+
# `max_alternatives`). These alternatives are ordered in terms of accuracy, with
|
1816
|
+
# the top (first) alternative being the most probable, as ranked by the
|
1817
|
+
# recognizer.
|
1600
1818
|
# Corresponds to the JSON property `alternatives`
|
1601
1819
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2SpeechRecognitionAlternative>]
|
1602
1820
|
attr_accessor :alternatives
|
1603
1821
|
|
1604
1822
|
# Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
|
1605
|
-
# language tag of
|
1606
|
-
#
|
1607
|
-
# most likelihood of being spoken in the audio.
|
1823
|
+
# language tag of the language in this result. This language code was detected
|
1824
|
+
# to have the most likelihood of being spoken in the audio.
|
1608
1825
|
# Corresponds to the JSON property `languageCode`
|
1609
1826
|
# @return [String]
|
1610
1827
|
attr_accessor :language_code
|
@@ -1653,27 +1870,19 @@ module Google
|
|
1653
1870
|
end
|
1654
1871
|
end
|
1655
1872
|
|
1656
|
-
# Video frame level annotation results for text annotation (OCR).
|
1657
|
-
#
|
1658
|
-
#
|
1873
|
+
# Video frame level annotation results for text annotation (OCR). Contains
|
1874
|
+
# information regarding timestamp and bounding box locations for the frames
|
1875
|
+
# containing detected OCR text snippets.
|
1659
1876
|
class GoogleCloudVideointelligenceV1beta2TextFrame
|
1660
1877
|
include Google::Apis::Core::Hashable
|
1661
1878
|
|
1662
1879
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
1663
|
-
# Contains list of the corner points in clockwise order starting from
|
1664
|
-
#
|
1665
|
-
#
|
1666
|
-
#
|
1667
|
-
#
|
1668
|
-
#
|
1669
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
1670
|
-
# becomes:
|
1671
|
-
# 2----3
|
1672
|
-
# | |
|
1673
|
-
# 1----0
|
1674
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
1675
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
1676
|
-
# the box.
|
1880
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
1881
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
1882
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
1883
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
1884
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
1885
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
1677
1886
|
# Corresponds to the JSON property `rotatedBoundingBox`
|
1678
1887
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly]
|
1679
1888
|
attr_accessor :rotated_bounding_box
|
@@ -1726,9 +1935,8 @@ module Google
|
|
1726
1935
|
end
|
1727
1936
|
end
|
1728
1937
|
|
1729
|
-
# For tracking related features.
|
1730
|
-
#
|
1731
|
-
# normalized_bounding_box.
|
1938
|
+
# For tracking related features. An object at time_offset with attributes, and
|
1939
|
+
# located with normalized_bounding_box.
|
1732
1940
|
class GoogleCloudVideointelligenceV1beta2TimestampedObject
|
1733
1941
|
include Google::Apis::Core::Hashable
|
1734
1942
|
|
@@ -1742,15 +1950,14 @@ module Google
|
|
1742
1950
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2DetectedLandmark>]
|
1743
1951
|
attr_accessor :landmarks
|
1744
1952
|
|
1745
|
-
# Normalized bounding box.
|
1746
|
-
#
|
1747
|
-
# Range: [0, 1].
|
1953
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
1954
|
+
# original image. Range: [0, 1].
|
1748
1955
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
1749
1956
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox]
|
1750
1957
|
attr_accessor :normalized_bounding_box
|
1751
1958
|
|
1752
|
-
# Time-offset, relative to the beginning of the video,
|
1753
|
-
#
|
1959
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
1960
|
+
# video frame for this object.
|
1754
1961
|
# Corresponds to the JSON property `timeOffset`
|
1755
1962
|
# @return [String]
|
1756
1963
|
attr_accessor :time_offset
|
@@ -1809,20 +2016,19 @@ module Google
|
|
1809
2016
|
class GoogleCloudVideointelligenceV1beta2VideoAnnotationProgress
|
1810
2017
|
include Google::Apis::Core::Hashable
|
1811
2018
|
|
1812
|
-
# Specifies which feature is being tracked if the request contains more than
|
1813
|
-
#
|
2019
|
+
# Specifies which feature is being tracked if the request contains more than one
|
2020
|
+
# feature.
|
1814
2021
|
# Corresponds to the JSON property `feature`
|
1815
2022
|
# @return [String]
|
1816
2023
|
attr_accessor :feature
|
1817
2024
|
|
1818
|
-
# Video file location in
|
1819
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
2025
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
1820
2026
|
# Corresponds to the JSON property `inputUri`
|
1821
2027
|
# @return [String]
|
1822
2028
|
attr_accessor :input_uri
|
1823
2029
|
|
1824
|
-
# Approximate percentage processed thus far. Guaranteed to be
|
1825
|
-
#
|
2030
|
+
# Approximate percentage processed thus far. Guaranteed to be 100 when fully
|
2031
|
+
# processed.
|
1826
2032
|
# Corresponds to the JSON property `progressPercent`
|
1827
2033
|
# @return [Fixnum]
|
1828
2034
|
attr_accessor :progress_percent
|
@@ -1861,31 +2067,40 @@ module Google
|
|
1861
2067
|
class GoogleCloudVideointelligenceV1beta2VideoAnnotationResults
|
1862
2068
|
include Google::Apis::Core::Hashable
|
1863
2069
|
|
1864
|
-
# The `Status` type defines a logical error model that is suitable for
|
1865
|
-
#
|
1866
|
-
#
|
1867
|
-
#
|
1868
|
-
#
|
1869
|
-
#
|
2070
|
+
# The `Status` type defines a logical error model that is suitable for different
|
2071
|
+
# programming environments, including REST APIs and RPC APIs. It is used by [
|
2072
|
+
# gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
|
2073
|
+
# data: error code, error message, and error details. You can find out more
|
2074
|
+
# about this error model and how to work with it in the [API Design Guide](https:
|
2075
|
+
# //cloud.google.com/apis/design/errors).
|
1870
2076
|
# Corresponds to the JSON property `error`
|
1871
2077
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleRpcStatus]
|
1872
2078
|
attr_accessor :error
|
1873
2079
|
|
1874
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
1875
|
-
#
|
1876
|
-
#
|
2080
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
2081
|
+
# explicit content has been detected in a frame, no annotations are present for
|
2082
|
+
# that frame.
|
1877
2083
|
# Corresponds to the JSON property `explicitAnnotation`
|
1878
2084
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2ExplicitContentAnnotation]
|
1879
2085
|
attr_accessor :explicit_annotation
|
1880
2086
|
|
1881
|
-
#
|
1882
|
-
#
|
2087
|
+
# Deprecated. Please use `face_detection_annotations` instead.
|
2088
|
+
# Corresponds to the JSON property `faceAnnotations`
|
2089
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2FaceAnnotation>]
|
2090
|
+
attr_accessor :face_annotations
|
2091
|
+
|
2092
|
+
# Face detection annotations.
|
2093
|
+
# Corresponds to the JSON property `faceDetectionAnnotations`
|
2094
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2FaceDetectionAnnotation>]
|
2095
|
+
attr_accessor :face_detection_annotations
|
2096
|
+
|
2097
|
+
# Label annotations on frame level. There is exactly one element for each unique
|
2098
|
+
# label.
|
1883
2099
|
# Corresponds to the JSON property `frameLabelAnnotations`
|
1884
2100
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
|
1885
2101
|
attr_accessor :frame_label_annotations
|
1886
2102
|
|
1887
|
-
# Video file location in
|
1888
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
2103
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
1889
2104
|
# Corresponds to the JSON property `inputUri`
|
1890
2105
|
# @return [String]
|
1891
2106
|
attr_accessor :input_uri
|
@@ -1900,6 +2115,11 @@ module Google
|
|
1900
2115
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2ObjectTrackingAnnotation>]
|
1901
2116
|
attr_accessor :object_annotations
|
1902
2117
|
|
2118
|
+
# Person detection annotations.
|
2119
|
+
# Corresponds to the JSON property `personDetectionAnnotations`
|
2120
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2PersonDetectionAnnotation>]
|
2121
|
+
attr_accessor :person_detection_annotations
|
2122
|
+
|
1903
2123
|
# Video segment.
|
1904
2124
|
# Corresponds to the JSON property `segment`
|
1905
2125
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2VideoSegment]
|
@@ -1912,11 +2132,11 @@ module Google
|
|
1912
2132
|
attr_accessor :segment_label_annotations
|
1913
2133
|
|
1914
2134
|
# Presence label annotations on video level or user-specified segment level.
|
1915
|
-
# There is exactly one element for each unique label. Compared to the
|
1916
|
-
#
|
1917
|
-
#
|
1918
|
-
#
|
1919
|
-
#
|
2135
|
+
# There is exactly one element for each unique label. Compared to the existing
|
2136
|
+
# topical `segment_label_annotations`, this field presents more fine-grained,
|
2137
|
+
# segment-level labels detected in video content and is made available only when
|
2138
|
+
# the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
|
2139
|
+
# request.
|
1920
2140
|
# Corresponds to the JSON property `segmentPresenceLabelAnnotations`
|
1921
2141
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
|
1922
2142
|
attr_accessor :segment_presence_label_annotations
|
@@ -1926,17 +2146,17 @@ module Google
|
|
1926
2146
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2VideoSegment>]
|
1927
2147
|
attr_accessor :shot_annotations
|
1928
2148
|
|
1929
|
-
# Topical label annotations on shot level.
|
1930
|
-
#
|
2149
|
+
# Topical label annotations on shot level. There is exactly one element for each
|
2150
|
+
# unique label.
|
1931
2151
|
# Corresponds to the JSON property `shotLabelAnnotations`
|
1932
2152
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
|
1933
2153
|
attr_accessor :shot_label_annotations
|
1934
2154
|
|
1935
2155
|
# Presence label annotations on shot level. There is exactly one element for
|
1936
|
-
# each unique label. Compared to the existing topical
|
1937
|
-
#
|
1938
|
-
#
|
1939
|
-
#
|
2156
|
+
# each unique label. Compared to the existing topical `shot_label_annotations`,
|
2157
|
+
# this field presents more fine-grained, shot-level labels detected in video
|
2158
|
+
# content and is made available only when the client sets `LabelDetectionConfig.
|
2159
|
+
# model` to "builtin/latest" in the request.
|
1940
2160
|
# Corresponds to the JSON property `shotPresenceLabelAnnotations`
|
1941
2161
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
|
1942
2162
|
attr_accessor :shot_presence_label_annotations
|
@@ -1946,9 +2166,8 @@ module Google
|
|
1946
2166
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2SpeechTranscription>]
|
1947
2167
|
attr_accessor :speech_transcriptions
|
1948
2168
|
|
1949
|
-
# OCR text detection and tracking.
|
1950
|
-
#
|
1951
|
-
# frame information associated with it.
|
2169
|
+
# OCR text detection and tracking. Annotations for list of detected text
|
2170
|
+
# snippets. Each will have list of frame information associated with it.
|
1952
2171
|
# Corresponds to the JSON property `textAnnotations`
|
1953
2172
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1beta2TextAnnotation>]
|
1954
2173
|
attr_accessor :text_annotations
|
@@ -1961,10 +2180,13 @@ module Google
|
|
1961
2180
|
def update!(**args)
|
1962
2181
|
@error = args[:error] if args.key?(:error)
|
1963
2182
|
@explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
|
2183
|
+
@face_annotations = args[:face_annotations] if args.key?(:face_annotations)
|
2184
|
+
@face_detection_annotations = args[:face_detection_annotations] if args.key?(:face_detection_annotations)
|
1964
2185
|
@frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
|
1965
2186
|
@input_uri = args[:input_uri] if args.key?(:input_uri)
|
1966
2187
|
@logo_recognition_annotations = args[:logo_recognition_annotations] if args.key?(:logo_recognition_annotations)
|
1967
2188
|
@object_annotations = args[:object_annotations] if args.key?(:object_annotations)
|
2189
|
+
@person_detection_annotations = args[:person_detection_annotations] if args.key?(:person_detection_annotations)
|
1968
2190
|
@segment = args[:segment] if args.key?(:segment)
|
1969
2191
|
@segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations)
|
1970
2192
|
@segment_presence_label_annotations = args[:segment_presence_label_annotations] if args.key?(:segment_presence_label_annotations)
|
@@ -1980,14 +2202,14 @@ module Google
|
|
1980
2202
|
class GoogleCloudVideointelligenceV1beta2VideoSegment
|
1981
2203
|
include Google::Apis::Core::Hashable
|
1982
2204
|
|
1983
|
-
# Time-offset, relative to the beginning of the video,
|
1984
|
-
#
|
2205
|
+
# Time-offset, relative to the beginning of the video, corresponding to the end
|
2206
|
+
# of the segment (inclusive).
|
1985
2207
|
# Corresponds to the JSON property `endTimeOffset`
|
1986
2208
|
# @return [String]
|
1987
2209
|
attr_accessor :end_time_offset
|
1988
2210
|
|
1989
|
-
# Time-offset, relative to the beginning of the video,
|
1990
|
-
#
|
2211
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
2212
|
+
# start of the segment (inclusive).
|
1991
2213
|
# Corresponds to the JSON property `startTimeOffset`
|
1992
2214
|
# @return [String]
|
1993
2215
|
attr_accessor :start_time_offset
|
@@ -2004,41 +2226,41 @@ module Google
|
|
2004
2226
|
end
|
2005
2227
|
|
2006
2228
|
# Word-specific information for recognized words. Word information is only
|
2007
|
-
# included in the response when certain request parameters are set, such
|
2008
|
-
#
|
2229
|
+
# included in the response when certain request parameters are set, such as `
|
2230
|
+
# enable_word_time_offsets`.
|
2009
2231
|
class GoogleCloudVideointelligenceV1beta2WordInfo
|
2010
2232
|
include Google::Apis::Core::Hashable
|
2011
2233
|
|
2012
2234
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
2013
2235
|
# indicates an estimated greater likelihood that the recognized words are
|
2014
|
-
# correct. This field is set only for the top alternative.
|
2015
|
-
#
|
2016
|
-
#
|
2017
|
-
#
|
2236
|
+
# correct. This field is set only for the top alternative. This field is not
|
2237
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
2238
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
2239
|
+
# not set.
|
2018
2240
|
# Corresponds to the JSON property `confidence`
|
2019
2241
|
# @return [Float]
|
2020
2242
|
attr_accessor :confidence
|
2021
2243
|
|
2022
|
-
# Time offset relative to the beginning of the audio, and
|
2023
|
-
#
|
2024
|
-
#
|
2025
|
-
#
|
2244
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
2245
|
+
# end of the spoken word. This field is only set if `enable_word_time_offsets=
|
2246
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
2247
|
+
# accuracy of the time offset can vary.
|
2026
2248
|
# Corresponds to the JSON property `endTime`
|
2027
2249
|
# @return [String]
|
2028
2250
|
attr_accessor :end_time
|
2029
2251
|
|
2030
|
-
# Output only. A distinct integer value is assigned for every speaker within
|
2031
|
-
#
|
2032
|
-
#
|
2033
|
-
#
|
2252
|
+
# Output only. A distinct integer value is assigned for every speaker within the
|
2253
|
+
# audio. This field specifies which one of those speakers was detected to have
|
2254
|
+
# spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
|
2255
|
+
# only set if speaker diarization is enabled.
|
2034
2256
|
# Corresponds to the JSON property `speakerTag`
|
2035
2257
|
# @return [Fixnum]
|
2036
2258
|
attr_accessor :speaker_tag
|
2037
2259
|
|
2038
|
-
# Time offset relative to the beginning of the audio, and
|
2039
|
-
#
|
2040
|
-
#
|
2041
|
-
#
|
2260
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
2261
|
+
# start of the spoken word. This field is only set if `enable_word_time_offsets=
|
2262
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
2263
|
+
# accuracy of the time offset can vary.
|
2042
2264
|
# Corresponds to the JSON property `startTime`
|
2043
2265
|
# @return [String]
|
2044
2266
|
attr_accessor :start_time
|
@@ -2062,9 +2284,9 @@ module Google
|
|
2062
2284
|
end
|
2063
2285
|
end
|
2064
2286
|
|
2065
|
-
# Video annotation progress. Included in the `metadata`
|
2066
|
-
#
|
2067
|
-
#
|
2287
|
+
# Video annotation progress. Included in the `metadata` field of the `Operation`
|
2288
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
2289
|
+
# service.
|
2068
2290
|
class GoogleCloudVideointelligenceV1p1beta1AnnotateVideoProgress
|
2069
2291
|
include Google::Apis::Core::Hashable
|
2070
2292
|
|
@@ -2083,9 +2305,9 @@ module Google
|
|
2083
2305
|
end
|
2084
2306
|
end
|
2085
2307
|
|
2086
|
-
# Video annotation response. Included in the `response`
|
2087
|
-
#
|
2088
|
-
#
|
2308
|
+
# Video annotation response. Included in the `response` field of the `Operation`
|
2309
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
2310
|
+
# service.
|
2089
2311
|
class GoogleCloudVideointelligenceV1p1beta1AnnotateVideoResponse
|
2090
2312
|
include Google::Apis::Core::Hashable
|
2091
2313
|
|
@@ -2113,14 +2335,14 @@ module Google
|
|
2113
2335
|
# @return [Float]
|
2114
2336
|
attr_accessor :confidence
|
2115
2337
|
|
2116
|
-
# The name of the attribute, for example, glasses, dark_glasses, mouth_open.
|
2117
|
-
#
|
2338
|
+
# The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
|
2339
|
+
# full list of supported type names will be provided in the document.
|
2118
2340
|
# Corresponds to the JSON property `name`
|
2119
2341
|
# @return [String]
|
2120
2342
|
attr_accessor :name
|
2121
2343
|
|
2122
|
-
# Text value of the detection result. For example, the value for "HairColor"
|
2123
|
-
#
|
2344
|
+
# Text value of the detection result. For example, the value for "HairColor" can
|
2345
|
+
# be "black", "blonde", etc.
|
2124
2346
|
# Corresponds to the JSON property `value`
|
2125
2347
|
# @return [String]
|
2126
2348
|
attr_accessor :value
|
@@ -2152,9 +2374,8 @@ module Google
|
|
2152
2374
|
# @return [String]
|
2153
2375
|
attr_accessor :name
|
2154
2376
|
|
2155
|
-
# A vertex represents a 2D point in the image.
|
2156
|
-
#
|
2157
|
-
# and range from 0 to 1.
|
2377
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
2378
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
2158
2379
|
# Corresponds to the JSON property `point`
|
2159
2380
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedVertex]
|
2160
2381
|
attr_accessor :point
|
@@ -2180,8 +2401,7 @@ module Google
|
|
2180
2401
|
# @return [String]
|
2181
2402
|
attr_accessor :description
|
2182
2403
|
|
2183
|
-
# Opaque entity ID. Some IDs may be available in
|
2184
|
-
# [Google Knowledge Graph Search
|
2404
|
+
# Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
|
2185
2405
|
# API](https://developers.google.com/knowledge-graph/).
|
2186
2406
|
# Corresponds to the JSON property `entityId`
|
2187
2407
|
# @return [String]
|
@@ -2204,9 +2424,9 @@ module Google
|
|
2204
2424
|
end
|
2205
2425
|
end
|
2206
2426
|
|
2207
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
2208
|
-
#
|
2209
|
-
#
|
2427
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
2428
|
+
# explicit content has been detected in a frame, no annotations are present for
|
2429
|
+
# that frame.
|
2210
2430
|
class GoogleCloudVideointelligenceV1p1beta1ExplicitContentAnnotation
|
2211
2431
|
include Google::Apis::Core::Hashable
|
2212
2432
|
|
@@ -2226,19 +2446,97 @@ module Google
|
|
2226
2446
|
|
2227
2447
|
# Update properties of this object
|
2228
2448
|
def update!(**args)
|
2229
|
-
@frames = args[:frames] if args.key?(:frames)
|
2449
|
+
@frames = args[:frames] if args.key?(:frames)
|
2450
|
+
@version = args[:version] if args.key?(:version)
|
2451
|
+
end
|
2452
|
+
end
|
2453
|
+
|
2454
|
+
# Video frame level annotation results for explicit content.
|
2455
|
+
class GoogleCloudVideointelligenceV1p1beta1ExplicitContentFrame
|
2456
|
+
include Google::Apis::Core::Hashable
|
2457
|
+
|
2458
|
+
# Likelihood of the pornography content..
|
2459
|
+
# Corresponds to the JSON property `pornographyLikelihood`
|
2460
|
+
# @return [String]
|
2461
|
+
attr_accessor :pornography_likelihood
|
2462
|
+
|
2463
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
2464
|
+
# video frame for this location.
|
2465
|
+
# Corresponds to the JSON property `timeOffset`
|
2466
|
+
# @return [String]
|
2467
|
+
attr_accessor :time_offset
|
2468
|
+
|
2469
|
+
def initialize(**args)
|
2470
|
+
update!(**args)
|
2471
|
+
end
|
2472
|
+
|
2473
|
+
# Update properties of this object
|
2474
|
+
def update!(**args)
|
2475
|
+
@pornography_likelihood = args[:pornography_likelihood] if args.key?(:pornography_likelihood)
|
2476
|
+
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
2477
|
+
end
|
2478
|
+
end
|
2479
|
+
|
2480
|
+
# Deprecated. No effect.
|
2481
|
+
class GoogleCloudVideointelligenceV1p1beta1FaceAnnotation
|
2482
|
+
include Google::Apis::Core::Hashable
|
2483
|
+
|
2484
|
+
# All video frames where a face was detected.
|
2485
|
+
# Corresponds to the JSON property `frames`
|
2486
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1FaceFrame>]
|
2487
|
+
attr_accessor :frames
|
2488
|
+
|
2489
|
+
# All video segments where a face was detected.
|
2490
|
+
# Corresponds to the JSON property `segments`
|
2491
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1FaceSegment>]
|
2492
|
+
attr_accessor :segments
|
2493
|
+
|
2494
|
+
# Thumbnail of a representative face view (in JPEG format).
|
2495
|
+
# Corresponds to the JSON property `thumbnail`
|
2496
|
+
# NOTE: Values are automatically base64 encoded/decoded in the client library.
|
2497
|
+
# @return [String]
|
2498
|
+
attr_accessor :thumbnail
|
2499
|
+
|
2500
|
+
def initialize(**args)
|
2501
|
+
update!(**args)
|
2502
|
+
end
|
2503
|
+
|
2504
|
+
# Update properties of this object
|
2505
|
+
def update!(**args)
|
2506
|
+
@frames = args[:frames] if args.key?(:frames)
|
2507
|
+
@segments = args[:segments] if args.key?(:segments)
|
2508
|
+
@thumbnail = args[:thumbnail] if args.key?(:thumbnail)
|
2509
|
+
end
|
2510
|
+
end
|
2511
|
+
|
2512
|
+
# Face detection annotation.
|
2513
|
+
class GoogleCloudVideointelligenceV1p1beta1FaceDetectionAnnotation
|
2514
|
+
include Google::Apis::Core::Hashable
|
2515
|
+
|
2516
|
+
# Feature version.
|
2517
|
+
# Corresponds to the JSON property `version`
|
2518
|
+
# @return [String]
|
2519
|
+
attr_accessor :version
|
2520
|
+
|
2521
|
+
def initialize(**args)
|
2522
|
+
update!(**args)
|
2523
|
+
end
|
2524
|
+
|
2525
|
+
# Update properties of this object
|
2526
|
+
def update!(**args)
|
2230
2527
|
@version = args[:version] if args.key?(:version)
|
2231
2528
|
end
|
2232
2529
|
end
|
2233
2530
|
|
2234
|
-
#
|
2235
|
-
class
|
2531
|
+
# Deprecated. No effect.
|
2532
|
+
class GoogleCloudVideointelligenceV1p1beta1FaceFrame
|
2236
2533
|
include Google::Apis::Core::Hashable
|
2237
2534
|
|
2238
|
-
#
|
2239
|
-
#
|
2240
|
-
#
|
2241
|
-
|
2535
|
+
# Normalized Bounding boxes in a frame. There can be more than one boxes if the
|
2536
|
+
# same face is detected in multiple locations within the current frame.
|
2537
|
+
# Corresponds to the JSON property `normalizedBoundingBoxes`
|
2538
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox>]
|
2539
|
+
attr_accessor :normalized_bounding_boxes
|
2242
2540
|
|
2243
2541
|
# Time-offset, relative to the beginning of the video, corresponding to the
|
2244
2542
|
# video frame for this location.
|
@@ -2252,19 +2550,37 @@ module Google
|
|
2252
2550
|
|
2253
2551
|
# Update properties of this object
|
2254
2552
|
def update!(**args)
|
2255
|
-
@
|
2553
|
+
@normalized_bounding_boxes = args[:normalized_bounding_boxes] if args.key?(:normalized_bounding_boxes)
|
2256
2554
|
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
2257
2555
|
end
|
2258
2556
|
end
|
2259
2557
|
|
2558
|
+
# Video segment level annotation results for face detection.
|
2559
|
+
class GoogleCloudVideointelligenceV1p1beta1FaceSegment
|
2560
|
+
include Google::Apis::Core::Hashable
|
2561
|
+
|
2562
|
+
# Video segment.
|
2563
|
+
# Corresponds to the JSON property `segment`
|
2564
|
+
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment]
|
2565
|
+
attr_accessor :segment
|
2566
|
+
|
2567
|
+
def initialize(**args)
|
2568
|
+
update!(**args)
|
2569
|
+
end
|
2570
|
+
|
2571
|
+
# Update properties of this object
|
2572
|
+
def update!(**args)
|
2573
|
+
@segment = args[:segment] if args.key?(:segment)
|
2574
|
+
end
|
2575
|
+
end
|
2576
|
+
|
2260
2577
|
# Label annotation.
|
2261
2578
|
class GoogleCloudVideointelligenceV1p1beta1LabelAnnotation
|
2262
2579
|
include Google::Apis::Core::Hashable
|
2263
2580
|
|
2264
|
-
# Common categories for the detected entity.
|
2265
|
-
#
|
2266
|
-
#
|
2267
|
-
# also be a `pet`.
|
2581
|
+
# Common categories for the detected entity. For example, when the label is `
|
2582
|
+
# Terrier`, the category is likely `dog`. And in some cases there might be more
|
2583
|
+
# than one categories e.g., `Terrier` could also be a `pet`.
|
2268
2584
|
# Corresponds to the JSON property `categoryEntities`
|
2269
2585
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1Entity>]
|
2270
2586
|
attr_accessor :category_entities
|
@@ -2363,14 +2679,14 @@ module Google
|
|
2363
2679
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1Entity]
|
2364
2680
|
attr_accessor :entity
|
2365
2681
|
|
2366
|
-
# All video segments where the recognized logo appears. There might be
|
2367
|
-
#
|
2682
|
+
# All video segments where the recognized logo appears. There might be multiple
|
2683
|
+
# instances of the same logo class appearing in one VideoSegment.
|
2368
2684
|
# Corresponds to the JSON property `segments`
|
2369
2685
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment>]
|
2370
2686
|
attr_accessor :segments
|
2371
2687
|
|
2372
|
-
# All logo tracks where the recognized logo appears. Each track corresponds
|
2373
|
-
#
|
2688
|
+
# All logo tracks where the recognized logo appears. Each track corresponds to
|
2689
|
+
# one logo instance appearing in consecutive frames.
|
2374
2690
|
# Corresponds to the JSON property `tracks`
|
2375
2691
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1Track>]
|
2376
2692
|
attr_accessor :tracks
|
@@ -2387,9 +2703,8 @@ module Google
|
|
2387
2703
|
end
|
2388
2704
|
end
|
2389
2705
|
|
2390
|
-
# Normalized bounding box.
|
2391
|
-
#
|
2392
|
-
# Range: [0, 1].
|
2706
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
2707
|
+
# original image. Range: [0, 1].
|
2393
2708
|
class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox
|
2394
2709
|
include Google::Apis::Core::Hashable
|
2395
2710
|
|
@@ -2427,20 +2742,12 @@ module Google
|
|
2427
2742
|
end
|
2428
2743
|
|
2429
2744
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
2430
|
-
# Contains list of the corner points in clockwise order starting from
|
2431
|
-
#
|
2432
|
-
#
|
2433
|
-
#
|
2434
|
-
#
|
2435
|
-
#
|
2436
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
2437
|
-
# becomes:
|
2438
|
-
# 2----3
|
2439
|
-
# | |
|
2440
|
-
# 1----0
|
2441
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
2442
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
2443
|
-
# the box.
|
2745
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
2746
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
2747
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
2748
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
2749
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
2750
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
2444
2751
|
class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly
|
2445
2752
|
include Google::Apis::Core::Hashable
|
2446
2753
|
|
@@ -2459,9 +2766,8 @@ module Google
|
|
2459
2766
|
end
|
2460
2767
|
end
|
2461
2768
|
|
2462
|
-
# A vertex represents a 2D point in the image.
|
2463
|
-
#
|
2464
|
-
# and range from 0 to 1.
|
2769
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
2770
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
2465
2771
|
class GoogleCloudVideointelligenceV1p1beta1NormalizedVertex
|
2466
2772
|
include Google::Apis::Core::Hashable
|
2467
2773
|
|
@@ -2500,10 +2806,10 @@ module Google
|
|
2500
2806
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1Entity]
|
2501
2807
|
attr_accessor :entity
|
2502
2808
|
|
2503
|
-
# Information corresponding to all frames where this object track appears.
|
2504
|
-
#
|
2505
|
-
#
|
2506
|
-
#
|
2809
|
+
# Information corresponding to all frames where this object track appears. Non-
|
2810
|
+
# streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
|
2811
|
+
# in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
|
2812
|
+
# frames.
|
2507
2813
|
# Corresponds to the JSON property `frames`
|
2508
2814
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame>]
|
2509
2815
|
attr_accessor :frames
|
@@ -2513,12 +2819,11 @@ module Google
|
|
2513
2819
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment]
|
2514
2820
|
attr_accessor :segment
|
2515
2821
|
|
2516
|
-
# Streaming mode ONLY.
|
2517
|
-
#
|
2518
|
-
#
|
2519
|
-
#
|
2520
|
-
#
|
2521
|
-
# ObjectTrackAnnotation of the same track_id over time.
|
2822
|
+
# Streaming mode ONLY. In streaming mode, we do not know the end time of a
|
2823
|
+
# tracked object before it is completed. Hence, there is no VideoSegment info
|
2824
|
+
# returned. Instead, we provide a unique identifiable integer track_id so that
|
2825
|
+
# the customers can correlate the results of the ongoing ObjectTrackAnnotation
|
2826
|
+
# of the same track_id over time.
|
2522
2827
|
# Corresponds to the JSON property `trackId`
|
2523
2828
|
# @return [Fixnum]
|
2524
2829
|
attr_accessor :track_id
|
@@ -2548,9 +2853,8 @@ module Google
|
|
2548
2853
|
class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame
|
2549
2854
|
include Google::Apis::Core::Hashable
|
2550
2855
|
|
2551
|
-
# Normalized bounding box.
|
2552
|
-
#
|
2553
|
-
# Range: [0, 1].
|
2856
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
2857
|
+
# original image. Range: [0, 1].
|
2554
2858
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
2555
2859
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox]
|
2556
2860
|
attr_accessor :normalized_bounding_box
|
@@ -2571,16 +2875,41 @@ module Google
|
|
2571
2875
|
end
|
2572
2876
|
end
|
2573
2877
|
|
2878
|
+
# Person detection annotation per video.
|
2879
|
+
class GoogleCloudVideointelligenceV1p1beta1PersonDetectionAnnotation
|
2880
|
+
include Google::Apis::Core::Hashable
|
2881
|
+
|
2882
|
+
# The detected tracks of a person.
|
2883
|
+
# Corresponds to the JSON property `tracks`
|
2884
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1Track>]
|
2885
|
+
attr_accessor :tracks
|
2886
|
+
|
2887
|
+
# Feature version.
|
2888
|
+
# Corresponds to the JSON property `version`
|
2889
|
+
# @return [String]
|
2890
|
+
attr_accessor :version
|
2891
|
+
|
2892
|
+
def initialize(**args)
|
2893
|
+
update!(**args)
|
2894
|
+
end
|
2895
|
+
|
2896
|
+
# Update properties of this object
|
2897
|
+
def update!(**args)
|
2898
|
+
@tracks = args[:tracks] if args.key?(:tracks)
|
2899
|
+
@version = args[:version] if args.key?(:version)
|
2900
|
+
end
|
2901
|
+
end
|
2902
|
+
|
2574
2903
|
# Alternative hypotheses (a.k.a. n-best list).
|
2575
2904
|
class GoogleCloudVideointelligenceV1p1beta1SpeechRecognitionAlternative
|
2576
2905
|
include Google::Apis::Core::Hashable
|
2577
2906
|
|
2578
2907
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
2579
2908
|
# indicates an estimated greater likelihood that the recognized words are
|
2580
|
-
# correct. This field is set only for the top alternative.
|
2581
|
-
#
|
2582
|
-
#
|
2583
|
-
#
|
2909
|
+
# correct. This field is set only for the top alternative. This field is not
|
2910
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
2911
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
2912
|
+
# not set.
|
2584
2913
|
# Corresponds to the JSON property `confidence`
|
2585
2914
|
# @return [Float]
|
2586
2915
|
attr_accessor :confidence
|
@@ -2591,8 +2920,8 @@ module Google
|
|
2591
2920
|
attr_accessor :transcript
|
2592
2921
|
|
2593
2922
|
# Output only. A list of word-specific information for each recognized word.
|
2594
|
-
# Note: When `enable_speaker_diarization` is set to true, you will see all
|
2595
|
-
#
|
2923
|
+
# Note: When `enable_speaker_diarization` is set to true, you will see all the
|
2924
|
+
# words from the beginning of the audio.
|
2596
2925
|
# Corresponds to the JSON property `words`
|
2597
2926
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1WordInfo>]
|
2598
2927
|
attr_accessor :words
|
@@ -2613,18 +2942,17 @@ module Google
|
|
2613
2942
|
class GoogleCloudVideointelligenceV1p1beta1SpeechTranscription
|
2614
2943
|
include Google::Apis::Core::Hashable
|
2615
2944
|
|
2616
|
-
# May contain one or more recognition hypotheses (up to the maximum specified
|
2617
|
-
#
|
2618
|
-
#
|
2619
|
-
#
|
2945
|
+
# May contain one or more recognition hypotheses (up to the maximum specified in
|
2946
|
+
# `max_alternatives`). These alternatives are ordered in terms of accuracy, with
|
2947
|
+
# the top (first) alternative being the most probable, as ranked by the
|
2948
|
+
# recognizer.
|
2620
2949
|
# Corresponds to the JSON property `alternatives`
|
2621
2950
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1SpeechRecognitionAlternative>]
|
2622
2951
|
attr_accessor :alternatives
|
2623
2952
|
|
2624
2953
|
# Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
|
2625
|
-
# language tag of
|
2626
|
-
#
|
2627
|
-
# most likelihood of being spoken in the audio.
|
2954
|
+
# language tag of the language in this result. This language code was detected
|
2955
|
+
# to have the most likelihood of being spoken in the audio.
|
2628
2956
|
# Corresponds to the JSON property `languageCode`
|
2629
2957
|
# @return [String]
|
2630
2958
|
attr_accessor :language_code
|
@@ -2673,27 +3001,19 @@ module Google
|
|
2673
3001
|
end
|
2674
3002
|
end
|
2675
3003
|
|
2676
|
-
# Video frame level annotation results for text annotation (OCR).
|
2677
|
-
#
|
2678
|
-
#
|
3004
|
+
# Video frame level annotation results for text annotation (OCR). Contains
|
3005
|
+
# information regarding timestamp and bounding box locations for the frames
|
3006
|
+
# containing detected OCR text snippets.
|
2679
3007
|
class GoogleCloudVideointelligenceV1p1beta1TextFrame
|
2680
3008
|
include Google::Apis::Core::Hashable
|
2681
3009
|
|
2682
3010
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
2683
|
-
# Contains list of the corner points in clockwise order starting from
|
2684
|
-
#
|
2685
|
-
#
|
2686
|
-
#
|
2687
|
-
#
|
2688
|
-
#
|
2689
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
2690
|
-
# becomes:
|
2691
|
-
# 2----3
|
2692
|
-
# | |
|
2693
|
-
# 1----0
|
2694
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
2695
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
2696
|
-
# the box.
|
3011
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
3012
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
3013
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
3014
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
3015
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
3016
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
2697
3017
|
# Corresponds to the JSON property `rotatedBoundingBox`
|
2698
3018
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly]
|
2699
3019
|
attr_accessor :rotated_bounding_box
|
@@ -2746,9 +3066,8 @@ module Google
|
|
2746
3066
|
end
|
2747
3067
|
end
|
2748
3068
|
|
2749
|
-
# For tracking related features.
|
2750
|
-
#
|
2751
|
-
# normalized_bounding_box.
|
3069
|
+
# For tracking related features. An object at time_offset with attributes, and
|
3070
|
+
# located with normalized_bounding_box.
|
2752
3071
|
class GoogleCloudVideointelligenceV1p1beta1TimestampedObject
|
2753
3072
|
include Google::Apis::Core::Hashable
|
2754
3073
|
|
@@ -2762,15 +3081,14 @@ module Google
|
|
2762
3081
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1DetectedLandmark>]
|
2763
3082
|
attr_accessor :landmarks
|
2764
3083
|
|
2765
|
-
# Normalized bounding box.
|
2766
|
-
#
|
2767
|
-
# Range: [0, 1].
|
3084
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
3085
|
+
# original image. Range: [0, 1].
|
2768
3086
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
2769
3087
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox]
|
2770
3088
|
attr_accessor :normalized_bounding_box
|
2771
3089
|
|
2772
|
-
# Time-offset, relative to the beginning of the video,
|
2773
|
-
#
|
3090
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
3091
|
+
# video frame for this object.
|
2774
3092
|
# Corresponds to the JSON property `timeOffset`
|
2775
3093
|
# @return [String]
|
2776
3094
|
attr_accessor :time_offset
|
@@ -2829,20 +3147,19 @@ module Google
|
|
2829
3147
|
class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationProgress
|
2830
3148
|
include Google::Apis::Core::Hashable
|
2831
3149
|
|
2832
|
-
# Specifies which feature is being tracked if the request contains more than
|
2833
|
-
#
|
3150
|
+
# Specifies which feature is being tracked if the request contains more than one
|
3151
|
+
# feature.
|
2834
3152
|
# Corresponds to the JSON property `feature`
|
2835
3153
|
# @return [String]
|
2836
3154
|
attr_accessor :feature
|
2837
3155
|
|
2838
|
-
# Video file location in
|
2839
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
3156
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
2840
3157
|
# Corresponds to the JSON property `inputUri`
|
2841
3158
|
# @return [String]
|
2842
3159
|
attr_accessor :input_uri
|
2843
3160
|
|
2844
|
-
# Approximate percentage processed thus far. Guaranteed to be
|
2845
|
-
#
|
3161
|
+
# Approximate percentage processed thus far. Guaranteed to be 100 when fully
|
3162
|
+
# processed.
|
2846
3163
|
# Corresponds to the JSON property `progressPercent`
|
2847
3164
|
# @return [Fixnum]
|
2848
3165
|
attr_accessor :progress_percent
|
@@ -2881,31 +3198,40 @@ module Google
|
|
2881
3198
|
class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationResults
|
2882
3199
|
include Google::Apis::Core::Hashable
|
2883
3200
|
|
2884
|
-
# The `Status` type defines a logical error model that is suitable for
|
2885
|
-
#
|
2886
|
-
#
|
2887
|
-
#
|
2888
|
-
#
|
2889
|
-
#
|
3201
|
+
# The `Status` type defines a logical error model that is suitable for different
|
3202
|
+
# programming environments, including REST APIs and RPC APIs. It is used by [
|
3203
|
+
# gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
|
3204
|
+
# data: error code, error message, and error details. You can find out more
|
3205
|
+
# about this error model and how to work with it in the [API Design Guide](https:
|
3206
|
+
# //cloud.google.com/apis/design/errors).
|
2890
3207
|
# Corresponds to the JSON property `error`
|
2891
3208
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleRpcStatus]
|
2892
3209
|
attr_accessor :error
|
2893
3210
|
|
2894
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
2895
|
-
#
|
2896
|
-
#
|
3211
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
3212
|
+
# explicit content has been detected in a frame, no annotations are present for
|
3213
|
+
# that frame.
|
2897
3214
|
# Corresponds to the JSON property `explicitAnnotation`
|
2898
3215
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1ExplicitContentAnnotation]
|
2899
3216
|
attr_accessor :explicit_annotation
|
2900
3217
|
|
2901
|
-
#
|
2902
|
-
#
|
3218
|
+
# Deprecated. Please use `face_detection_annotations` instead.
|
3219
|
+
# Corresponds to the JSON property `faceAnnotations`
|
3220
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1FaceAnnotation>]
|
3221
|
+
attr_accessor :face_annotations
|
3222
|
+
|
3223
|
+
# Face detection annotations.
|
3224
|
+
# Corresponds to the JSON property `faceDetectionAnnotations`
|
3225
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1FaceDetectionAnnotation>]
|
3226
|
+
attr_accessor :face_detection_annotations
|
3227
|
+
|
3228
|
+
# Label annotations on frame level. There is exactly one element for each unique
|
3229
|
+
# label.
|
2903
3230
|
# Corresponds to the JSON property `frameLabelAnnotations`
|
2904
3231
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
|
2905
3232
|
attr_accessor :frame_label_annotations
|
2906
3233
|
|
2907
|
-
# Video file location in
|
2908
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
3234
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
2909
3235
|
# Corresponds to the JSON property `inputUri`
|
2910
3236
|
# @return [String]
|
2911
3237
|
attr_accessor :input_uri
|
@@ -2920,6 +3246,11 @@ module Google
|
|
2920
3246
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingAnnotation>]
|
2921
3247
|
attr_accessor :object_annotations
|
2922
3248
|
|
3249
|
+
# Person detection annotations.
|
3250
|
+
# Corresponds to the JSON property `personDetectionAnnotations`
|
3251
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1PersonDetectionAnnotation>]
|
3252
|
+
attr_accessor :person_detection_annotations
|
3253
|
+
|
2923
3254
|
# Video segment.
|
2924
3255
|
# Corresponds to the JSON property `segment`
|
2925
3256
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment]
|
@@ -2932,11 +3263,11 @@ module Google
|
|
2932
3263
|
attr_accessor :segment_label_annotations
|
2933
3264
|
|
2934
3265
|
# Presence label annotations on video level or user-specified segment level.
|
2935
|
-
# There is exactly one element for each unique label. Compared to the
|
2936
|
-
#
|
2937
|
-
#
|
2938
|
-
#
|
2939
|
-
#
|
3266
|
+
# There is exactly one element for each unique label. Compared to the existing
|
3267
|
+
# topical `segment_label_annotations`, this field presents more fine-grained,
|
3268
|
+
# segment-level labels detected in video content and is made available only when
|
3269
|
+
# the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
|
3270
|
+
# request.
|
2940
3271
|
# Corresponds to the JSON property `segmentPresenceLabelAnnotations`
|
2941
3272
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
|
2942
3273
|
attr_accessor :segment_presence_label_annotations
|
@@ -2946,17 +3277,17 @@ module Google
|
|
2946
3277
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment>]
|
2947
3278
|
attr_accessor :shot_annotations
|
2948
3279
|
|
2949
|
-
# Topical label annotations on shot level.
|
2950
|
-
#
|
3280
|
+
# Topical label annotations on shot level. There is exactly one element for each
|
3281
|
+
# unique label.
|
2951
3282
|
# Corresponds to the JSON property `shotLabelAnnotations`
|
2952
3283
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
|
2953
3284
|
attr_accessor :shot_label_annotations
|
2954
3285
|
|
2955
3286
|
# Presence label annotations on shot level. There is exactly one element for
|
2956
|
-
# each unique label. Compared to the existing topical
|
2957
|
-
#
|
2958
|
-
#
|
2959
|
-
#
|
3287
|
+
# each unique label. Compared to the existing topical `shot_label_annotations`,
|
3288
|
+
# this field presents more fine-grained, shot-level labels detected in video
|
3289
|
+
# content and is made available only when the client sets `LabelDetectionConfig.
|
3290
|
+
# model` to "builtin/latest" in the request.
|
2960
3291
|
# Corresponds to the JSON property `shotPresenceLabelAnnotations`
|
2961
3292
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
|
2962
3293
|
attr_accessor :shot_presence_label_annotations
|
@@ -2966,9 +3297,8 @@ module Google
|
|
2966
3297
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1SpeechTranscription>]
|
2967
3298
|
attr_accessor :speech_transcriptions
|
2968
3299
|
|
2969
|
-
# OCR text detection and tracking.
|
2970
|
-
#
|
2971
|
-
# frame information associated with it.
|
3300
|
+
# OCR text detection and tracking. Annotations for list of detected text
|
3301
|
+
# snippets. Each will have list of frame information associated with it.
|
2972
3302
|
# Corresponds to the JSON property `textAnnotations`
|
2973
3303
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p1beta1TextAnnotation>]
|
2974
3304
|
attr_accessor :text_annotations
|
@@ -2981,10 +3311,13 @@ module Google
|
|
2981
3311
|
def update!(**args)
|
2982
3312
|
@error = args[:error] if args.key?(:error)
|
2983
3313
|
@explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
|
3314
|
+
@face_annotations = args[:face_annotations] if args.key?(:face_annotations)
|
3315
|
+
@face_detection_annotations = args[:face_detection_annotations] if args.key?(:face_detection_annotations)
|
2984
3316
|
@frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
|
2985
3317
|
@input_uri = args[:input_uri] if args.key?(:input_uri)
|
2986
3318
|
@logo_recognition_annotations = args[:logo_recognition_annotations] if args.key?(:logo_recognition_annotations)
|
2987
3319
|
@object_annotations = args[:object_annotations] if args.key?(:object_annotations)
|
3320
|
+
@person_detection_annotations = args[:person_detection_annotations] if args.key?(:person_detection_annotations)
|
2988
3321
|
@segment = args[:segment] if args.key?(:segment)
|
2989
3322
|
@segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations)
|
2990
3323
|
@segment_presence_label_annotations = args[:segment_presence_label_annotations] if args.key?(:segment_presence_label_annotations)
|
@@ -3000,14 +3333,14 @@ module Google
|
|
3000
3333
|
class GoogleCloudVideointelligenceV1p1beta1VideoSegment
|
3001
3334
|
include Google::Apis::Core::Hashable
|
3002
3335
|
|
3003
|
-
# Time-offset, relative to the beginning of the video,
|
3004
|
-
#
|
3336
|
+
# Time-offset, relative to the beginning of the video, corresponding to the end
|
3337
|
+
# of the segment (inclusive).
|
3005
3338
|
# Corresponds to the JSON property `endTimeOffset`
|
3006
3339
|
# @return [String]
|
3007
3340
|
attr_accessor :end_time_offset
|
3008
3341
|
|
3009
|
-
# Time-offset, relative to the beginning of the video,
|
3010
|
-
#
|
3342
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
3343
|
+
# start of the segment (inclusive).
|
3011
3344
|
# Corresponds to the JSON property `startTimeOffset`
|
3012
3345
|
# @return [String]
|
3013
3346
|
attr_accessor :start_time_offset
|
@@ -3024,41 +3357,41 @@ module Google
|
|
3024
3357
|
end
|
3025
3358
|
|
3026
3359
|
# Word-specific information for recognized words. Word information is only
|
3027
|
-
# included in the response when certain request parameters are set, such
|
3028
|
-
#
|
3360
|
+
# included in the response when certain request parameters are set, such as `
|
3361
|
+
# enable_word_time_offsets`.
|
3029
3362
|
class GoogleCloudVideointelligenceV1p1beta1WordInfo
|
3030
3363
|
include Google::Apis::Core::Hashable
|
3031
3364
|
|
3032
3365
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
3033
3366
|
# indicates an estimated greater likelihood that the recognized words are
|
3034
|
-
# correct. This field is set only for the top alternative.
|
3035
|
-
#
|
3036
|
-
#
|
3037
|
-
#
|
3367
|
+
# correct. This field is set only for the top alternative. This field is not
|
3368
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
3369
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
3370
|
+
# not set.
|
3038
3371
|
# Corresponds to the JSON property `confidence`
|
3039
3372
|
# @return [Float]
|
3040
3373
|
attr_accessor :confidence
|
3041
3374
|
|
3042
|
-
# Time offset relative to the beginning of the audio, and
|
3043
|
-
#
|
3044
|
-
#
|
3045
|
-
#
|
3375
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
3376
|
+
# end of the spoken word. This field is only set if `enable_word_time_offsets=
|
3377
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
3378
|
+
# accuracy of the time offset can vary.
|
3046
3379
|
# Corresponds to the JSON property `endTime`
|
3047
3380
|
# @return [String]
|
3048
3381
|
attr_accessor :end_time
|
3049
3382
|
|
3050
|
-
# Output only. A distinct integer value is assigned for every speaker within
|
3051
|
-
#
|
3052
|
-
#
|
3053
|
-
#
|
3383
|
+
# Output only. A distinct integer value is assigned for every speaker within the
|
3384
|
+
# audio. This field specifies which one of those speakers was detected to have
|
3385
|
+
# spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
|
3386
|
+
# only set if speaker diarization is enabled.
|
3054
3387
|
# Corresponds to the JSON property `speakerTag`
|
3055
3388
|
# @return [Fixnum]
|
3056
3389
|
attr_accessor :speaker_tag
|
3057
3390
|
|
3058
|
-
# Time offset relative to the beginning of the audio, and
|
3059
|
-
#
|
3060
|
-
#
|
3061
|
-
#
|
3391
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
3392
|
+
# start of the spoken word. This field is only set if `enable_word_time_offsets=
|
3393
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
3394
|
+
# accuracy of the time offset can vary.
|
3062
3395
|
# Corresponds to the JSON property `startTime`
|
3063
3396
|
# @return [String]
|
3064
3397
|
attr_accessor :start_time
|
@@ -3082,9 +3415,9 @@ module Google
|
|
3082
3415
|
end
|
3083
3416
|
end
|
3084
3417
|
|
3085
|
-
# Video annotation progress. Included in the `metadata`
|
3086
|
-
#
|
3087
|
-
#
|
3418
|
+
# Video annotation progress. Included in the `metadata` field of the `Operation`
|
3419
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
3420
|
+
# service.
|
3088
3421
|
class GoogleCloudVideointelligenceV1p2beta1AnnotateVideoProgress
|
3089
3422
|
include Google::Apis::Core::Hashable
|
3090
3423
|
|
@@ -3103,9 +3436,9 @@ module Google
|
|
3103
3436
|
end
|
3104
3437
|
end
|
3105
3438
|
|
3106
|
-
# Video annotation response. Included in the `response`
|
3107
|
-
#
|
3108
|
-
#
|
3439
|
+
# Video annotation response. Included in the `response` field of the `Operation`
|
3440
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
3441
|
+
# service.
|
3109
3442
|
class GoogleCloudVideointelligenceV1p2beta1AnnotateVideoResponse
|
3110
3443
|
include Google::Apis::Core::Hashable
|
3111
3444
|
|
@@ -3133,14 +3466,14 @@ module Google
|
|
3133
3466
|
# @return [Float]
|
3134
3467
|
attr_accessor :confidence
|
3135
3468
|
|
3136
|
-
# The name of the attribute, for example, glasses, dark_glasses, mouth_open.
|
3137
|
-
#
|
3469
|
+
# The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
|
3470
|
+
# full list of supported type names will be provided in the document.
|
3138
3471
|
# Corresponds to the JSON property `name`
|
3139
3472
|
# @return [String]
|
3140
3473
|
attr_accessor :name
|
3141
3474
|
|
3142
|
-
# Text value of the detection result. For example, the value for "HairColor"
|
3143
|
-
#
|
3475
|
+
# Text value of the detection result. For example, the value for "HairColor" can
|
3476
|
+
# be "black", "blonde", etc.
|
3144
3477
|
# Corresponds to the JSON property `value`
|
3145
3478
|
# @return [String]
|
3146
3479
|
attr_accessor :value
|
@@ -3172,9 +3505,8 @@ module Google
|
|
3172
3505
|
# @return [String]
|
3173
3506
|
attr_accessor :name
|
3174
3507
|
|
3175
|
-
# A vertex represents a 2D point in the image.
|
3176
|
-
#
|
3177
|
-
# and range from 0 to 1.
|
3508
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
3509
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
3178
3510
|
# Corresponds to the JSON property `point`
|
3179
3511
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1NormalizedVertex]
|
3180
3512
|
attr_accessor :point
|
@@ -3200,8 +3532,7 @@ module Google
|
|
3200
3532
|
# @return [String]
|
3201
3533
|
attr_accessor :description
|
3202
3534
|
|
3203
|
-
# Opaque entity ID. Some IDs may be available in
|
3204
|
-
# [Google Knowledge Graph Search
|
3535
|
+
# Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
|
3205
3536
|
# API](https://developers.google.com/knowledge-graph/).
|
3206
3537
|
# Corresponds to the JSON property `entityId`
|
3207
3538
|
# @return [String]
|
@@ -3224,9 +3555,9 @@ module Google
|
|
3224
3555
|
end
|
3225
3556
|
end
|
3226
3557
|
|
3227
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
3228
|
-
#
|
3229
|
-
#
|
3558
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
3559
|
+
# explicit content has been detected in a frame, no annotations are present for
|
3560
|
+
# that frame.
|
3230
3561
|
class GoogleCloudVideointelligenceV1p2beta1ExplicitContentAnnotation
|
3231
3562
|
include Google::Apis::Core::Hashable
|
3232
3563
|
|
@@ -3277,14 +3608,110 @@ module Google
|
|
3277
3608
|
end
|
3278
3609
|
end
|
3279
3610
|
|
3611
|
+
# Deprecated. No effect.
|
3612
|
+
class GoogleCloudVideointelligenceV1p2beta1FaceAnnotation
|
3613
|
+
include Google::Apis::Core::Hashable
|
3614
|
+
|
3615
|
+
# All video frames where a face was detected.
|
3616
|
+
# Corresponds to the JSON property `frames`
|
3617
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1FaceFrame>]
|
3618
|
+
attr_accessor :frames
|
3619
|
+
|
3620
|
+
# All video segments where a face was detected.
|
3621
|
+
# Corresponds to the JSON property `segments`
|
3622
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1FaceSegment>]
|
3623
|
+
attr_accessor :segments
|
3624
|
+
|
3625
|
+
# Thumbnail of a representative face view (in JPEG format).
|
3626
|
+
# Corresponds to the JSON property `thumbnail`
|
3627
|
+
# NOTE: Values are automatically base64 encoded/decoded in the client library.
|
3628
|
+
# @return [String]
|
3629
|
+
attr_accessor :thumbnail
|
3630
|
+
|
3631
|
+
def initialize(**args)
|
3632
|
+
update!(**args)
|
3633
|
+
end
|
3634
|
+
|
3635
|
+
# Update properties of this object
|
3636
|
+
def update!(**args)
|
3637
|
+
@frames = args[:frames] if args.key?(:frames)
|
3638
|
+
@segments = args[:segments] if args.key?(:segments)
|
3639
|
+
@thumbnail = args[:thumbnail] if args.key?(:thumbnail)
|
3640
|
+
end
|
3641
|
+
end
|
3642
|
+
|
3643
|
+
# Face detection annotation.
|
3644
|
+
class GoogleCloudVideointelligenceV1p2beta1FaceDetectionAnnotation
|
3645
|
+
include Google::Apis::Core::Hashable
|
3646
|
+
|
3647
|
+
# Feature version.
|
3648
|
+
# Corresponds to the JSON property `version`
|
3649
|
+
# @return [String]
|
3650
|
+
attr_accessor :version
|
3651
|
+
|
3652
|
+
def initialize(**args)
|
3653
|
+
update!(**args)
|
3654
|
+
end
|
3655
|
+
|
3656
|
+
# Update properties of this object
|
3657
|
+
def update!(**args)
|
3658
|
+
@version = args[:version] if args.key?(:version)
|
3659
|
+
end
|
3660
|
+
end
|
3661
|
+
|
3662
|
+
# Deprecated. No effect.
|
3663
|
+
class GoogleCloudVideointelligenceV1p2beta1FaceFrame
|
3664
|
+
include Google::Apis::Core::Hashable
|
3665
|
+
|
3666
|
+
# Normalized Bounding boxes in a frame. There can be more than one boxes if the
|
3667
|
+
# same face is detected in multiple locations within the current frame.
|
3668
|
+
# Corresponds to the JSON property `normalizedBoundingBoxes`
|
3669
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox>]
|
3670
|
+
attr_accessor :normalized_bounding_boxes
|
3671
|
+
|
3672
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
3673
|
+
# video frame for this location.
|
3674
|
+
# Corresponds to the JSON property `timeOffset`
|
3675
|
+
# @return [String]
|
3676
|
+
attr_accessor :time_offset
|
3677
|
+
|
3678
|
+
def initialize(**args)
|
3679
|
+
update!(**args)
|
3680
|
+
end
|
3681
|
+
|
3682
|
+
# Update properties of this object
|
3683
|
+
def update!(**args)
|
3684
|
+
@normalized_bounding_boxes = args[:normalized_bounding_boxes] if args.key?(:normalized_bounding_boxes)
|
3685
|
+
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
3686
|
+
end
|
3687
|
+
end
|
3688
|
+
|
3689
|
+
# Video segment level annotation results for face detection.
|
3690
|
+
class GoogleCloudVideointelligenceV1p2beta1FaceSegment
|
3691
|
+
include Google::Apis::Core::Hashable
|
3692
|
+
|
3693
|
+
# Video segment.
|
3694
|
+
# Corresponds to the JSON property `segment`
|
3695
|
+
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1VideoSegment]
|
3696
|
+
attr_accessor :segment
|
3697
|
+
|
3698
|
+
def initialize(**args)
|
3699
|
+
update!(**args)
|
3700
|
+
end
|
3701
|
+
|
3702
|
+
# Update properties of this object
|
3703
|
+
def update!(**args)
|
3704
|
+
@segment = args[:segment] if args.key?(:segment)
|
3705
|
+
end
|
3706
|
+
end
|
3707
|
+
|
3280
3708
|
# Label annotation.
|
3281
3709
|
class GoogleCloudVideointelligenceV1p2beta1LabelAnnotation
|
3282
3710
|
include Google::Apis::Core::Hashable
|
3283
3711
|
|
3284
|
-
# Common categories for the detected entity.
|
3285
|
-
#
|
3286
|
-
#
|
3287
|
-
# also be a `pet`.
|
3712
|
+
# Common categories for the detected entity. For example, when the label is `
|
3713
|
+
# Terrier`, the category is likely `dog`. And in some cases there might be more
|
3714
|
+
# than one categories e.g., `Terrier` could also be a `pet`.
|
3288
3715
|
# Corresponds to the JSON property `categoryEntities`
|
3289
3716
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1Entity>]
|
3290
3717
|
attr_accessor :category_entities
|
@@ -3383,14 +3810,14 @@ module Google
|
|
3383
3810
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1Entity]
|
3384
3811
|
attr_accessor :entity
|
3385
3812
|
|
3386
|
-
# All video segments where the recognized logo appears. There might be
|
3387
|
-
#
|
3813
|
+
# All video segments where the recognized logo appears. There might be multiple
|
3814
|
+
# instances of the same logo class appearing in one VideoSegment.
|
3388
3815
|
# Corresponds to the JSON property `segments`
|
3389
3816
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1VideoSegment>]
|
3390
3817
|
attr_accessor :segments
|
3391
3818
|
|
3392
|
-
# All logo tracks where the recognized logo appears. Each track corresponds
|
3393
|
-
#
|
3819
|
+
# All logo tracks where the recognized logo appears. Each track corresponds to
|
3820
|
+
# one logo instance appearing in consecutive frames.
|
3394
3821
|
# Corresponds to the JSON property `tracks`
|
3395
3822
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1Track>]
|
3396
3823
|
attr_accessor :tracks
|
@@ -3407,9 +3834,8 @@ module Google
|
|
3407
3834
|
end
|
3408
3835
|
end
|
3409
3836
|
|
3410
|
-
# Normalized bounding box.
|
3411
|
-
#
|
3412
|
-
# Range: [0, 1].
|
3837
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
3838
|
+
# original image. Range: [0, 1].
|
3413
3839
|
class GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox
|
3414
3840
|
include Google::Apis::Core::Hashable
|
3415
3841
|
|
@@ -3447,20 +3873,12 @@ module Google
|
|
3447
3873
|
end
|
3448
3874
|
|
3449
3875
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
3450
|
-
# Contains list of the corner points in clockwise order starting from
|
3451
|
-
#
|
3452
|
-
#
|
3453
|
-
#
|
3454
|
-
#
|
3455
|
-
#
|
3456
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
3457
|
-
# becomes:
|
3458
|
-
# 2----3
|
3459
|
-
# | |
|
3460
|
-
# 1----0
|
3461
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
3462
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
3463
|
-
# the box.
|
3876
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
3877
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
3878
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
3879
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
3880
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
3881
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
3464
3882
|
class GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingPoly
|
3465
3883
|
include Google::Apis::Core::Hashable
|
3466
3884
|
|
@@ -3479,9 +3897,8 @@ module Google
|
|
3479
3897
|
end
|
3480
3898
|
end
|
3481
3899
|
|
3482
|
-
# A vertex represents a 2D point in the image.
|
3483
|
-
#
|
3484
|
-
# and range from 0 to 1.
|
3900
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
3901
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
3485
3902
|
class GoogleCloudVideointelligenceV1p2beta1NormalizedVertex
|
3486
3903
|
include Google::Apis::Core::Hashable
|
3487
3904
|
|
@@ -3520,10 +3937,10 @@ module Google
|
|
3520
3937
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1Entity]
|
3521
3938
|
attr_accessor :entity
|
3522
3939
|
|
3523
|
-
# Information corresponding to all frames where this object track appears.
|
3524
|
-
#
|
3525
|
-
#
|
3526
|
-
#
|
3940
|
+
# Information corresponding to all frames where this object track appears. Non-
|
3941
|
+
# streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
|
3942
|
+
# in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
|
3943
|
+
# frames.
|
3527
3944
|
# Corresponds to the JSON property `frames`
|
3528
3945
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1ObjectTrackingFrame>]
|
3529
3946
|
attr_accessor :frames
|
@@ -3533,12 +3950,11 @@ module Google
|
|
3533
3950
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1VideoSegment]
|
3534
3951
|
attr_accessor :segment
|
3535
3952
|
|
3536
|
-
# Streaming mode ONLY.
|
3537
|
-
#
|
3538
|
-
#
|
3539
|
-
#
|
3540
|
-
#
|
3541
|
-
# ObjectTrackAnnotation of the same track_id over time.
|
3953
|
+
# Streaming mode ONLY. In streaming mode, we do not know the end time of a
|
3954
|
+
# tracked object before it is completed. Hence, there is no VideoSegment info
|
3955
|
+
# returned. Instead, we provide a unique identifiable integer track_id so that
|
3956
|
+
# the customers can correlate the results of the ongoing ObjectTrackAnnotation
|
3957
|
+
# of the same track_id over time.
|
3542
3958
|
# Corresponds to the JSON property `trackId`
|
3543
3959
|
# @return [Fixnum]
|
3544
3960
|
attr_accessor :track_id
|
@@ -3568,9 +3984,8 @@ module Google
|
|
3568
3984
|
class GoogleCloudVideointelligenceV1p2beta1ObjectTrackingFrame
|
3569
3985
|
include Google::Apis::Core::Hashable
|
3570
3986
|
|
3571
|
-
# Normalized bounding box.
|
3572
|
-
#
|
3573
|
-
# Range: [0, 1].
|
3987
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
3988
|
+
# original image. Range: [0, 1].
|
3574
3989
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
3575
3990
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox]
|
3576
3991
|
attr_accessor :normalized_bounding_box
|
@@ -3591,16 +4006,41 @@ module Google
|
|
3591
4006
|
end
|
3592
4007
|
end
|
3593
4008
|
|
4009
|
+
# Person detection annotation per video.
|
4010
|
+
class GoogleCloudVideointelligenceV1p2beta1PersonDetectionAnnotation
|
4011
|
+
include Google::Apis::Core::Hashable
|
4012
|
+
|
4013
|
+
# The detected tracks of a person.
|
4014
|
+
# Corresponds to the JSON property `tracks`
|
4015
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1Track>]
|
4016
|
+
attr_accessor :tracks
|
4017
|
+
|
4018
|
+
# Feature version.
|
4019
|
+
# Corresponds to the JSON property `version`
|
4020
|
+
# @return [String]
|
4021
|
+
attr_accessor :version
|
4022
|
+
|
4023
|
+
def initialize(**args)
|
4024
|
+
update!(**args)
|
4025
|
+
end
|
4026
|
+
|
4027
|
+
# Update properties of this object
|
4028
|
+
def update!(**args)
|
4029
|
+
@tracks = args[:tracks] if args.key?(:tracks)
|
4030
|
+
@version = args[:version] if args.key?(:version)
|
4031
|
+
end
|
4032
|
+
end
|
4033
|
+
|
3594
4034
|
# Alternative hypotheses (a.k.a. n-best list).
|
3595
4035
|
class GoogleCloudVideointelligenceV1p2beta1SpeechRecognitionAlternative
|
3596
4036
|
include Google::Apis::Core::Hashable
|
3597
4037
|
|
3598
4038
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
3599
4039
|
# indicates an estimated greater likelihood that the recognized words are
|
3600
|
-
# correct. This field is set only for the top alternative.
|
3601
|
-
#
|
3602
|
-
#
|
3603
|
-
#
|
4040
|
+
# correct. This field is set only for the top alternative. This field is not
|
4041
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
4042
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
4043
|
+
# not set.
|
3604
4044
|
# Corresponds to the JSON property `confidence`
|
3605
4045
|
# @return [Float]
|
3606
4046
|
attr_accessor :confidence
|
@@ -3611,8 +4051,8 @@ module Google
|
|
3611
4051
|
attr_accessor :transcript
|
3612
4052
|
|
3613
4053
|
# Output only. A list of word-specific information for each recognized word.
|
3614
|
-
# Note: When `enable_speaker_diarization` is set to true, you will see all
|
3615
|
-
#
|
4054
|
+
# Note: When `enable_speaker_diarization` is set to true, you will see all the
|
4055
|
+
# words from the beginning of the audio.
|
3616
4056
|
# Corresponds to the JSON property `words`
|
3617
4057
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1WordInfo>]
|
3618
4058
|
attr_accessor :words
|
@@ -3633,18 +4073,17 @@ module Google
|
|
3633
4073
|
class GoogleCloudVideointelligenceV1p2beta1SpeechTranscription
|
3634
4074
|
include Google::Apis::Core::Hashable
|
3635
4075
|
|
3636
|
-
# May contain one or more recognition hypotheses (up to the maximum specified
|
3637
|
-
#
|
3638
|
-
#
|
3639
|
-
#
|
4076
|
+
# May contain one or more recognition hypotheses (up to the maximum specified in
|
4077
|
+
# `max_alternatives`). These alternatives are ordered in terms of accuracy, with
|
4078
|
+
# the top (first) alternative being the most probable, as ranked by the
|
4079
|
+
# recognizer.
|
3640
4080
|
# Corresponds to the JSON property `alternatives`
|
3641
4081
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1SpeechRecognitionAlternative>]
|
3642
4082
|
attr_accessor :alternatives
|
3643
4083
|
|
3644
4084
|
# Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
|
3645
|
-
# language tag of
|
3646
|
-
#
|
3647
|
-
# most likelihood of being spoken in the audio.
|
4085
|
+
# language tag of the language in this result. This language code was detected
|
4086
|
+
# to have the most likelihood of being spoken in the audio.
|
3648
4087
|
# Corresponds to the JSON property `languageCode`
|
3649
4088
|
# @return [String]
|
3650
4089
|
attr_accessor :language_code
|
@@ -3693,27 +4132,19 @@ module Google
|
|
3693
4132
|
end
|
3694
4133
|
end
|
3695
4134
|
|
3696
|
-
# Video frame level annotation results for text annotation (OCR).
|
3697
|
-
#
|
3698
|
-
#
|
4135
|
+
# Video frame level annotation results for text annotation (OCR). Contains
|
4136
|
+
# information regarding timestamp and bounding box locations for the frames
|
4137
|
+
# containing detected OCR text snippets.
|
3699
4138
|
class GoogleCloudVideointelligenceV1p2beta1TextFrame
|
3700
4139
|
include Google::Apis::Core::Hashable
|
3701
4140
|
|
3702
4141
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
3703
|
-
# Contains list of the corner points in clockwise order starting from
|
3704
|
-
#
|
3705
|
-
#
|
3706
|
-
#
|
3707
|
-
#
|
3708
|
-
#
|
3709
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
3710
|
-
# becomes:
|
3711
|
-
# 2----3
|
3712
|
-
# | |
|
3713
|
-
# 1----0
|
3714
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
3715
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
3716
|
-
# the box.
|
4142
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
4143
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
4144
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
4145
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
4146
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
4147
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
3717
4148
|
# Corresponds to the JSON property `rotatedBoundingBox`
|
3718
4149
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingPoly]
|
3719
4150
|
attr_accessor :rotated_bounding_box
|
@@ -3766,9 +4197,8 @@ module Google
|
|
3766
4197
|
end
|
3767
4198
|
end
|
3768
4199
|
|
3769
|
-
# For tracking related features.
|
3770
|
-
#
|
3771
|
-
# normalized_bounding_box.
|
4200
|
+
# For tracking related features. An object at time_offset with attributes, and
|
4201
|
+
# located with normalized_bounding_box.
|
3772
4202
|
class GoogleCloudVideointelligenceV1p2beta1TimestampedObject
|
3773
4203
|
include Google::Apis::Core::Hashable
|
3774
4204
|
|
@@ -3782,15 +4212,14 @@ module Google
|
|
3782
4212
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1DetectedLandmark>]
|
3783
4213
|
attr_accessor :landmarks
|
3784
4214
|
|
3785
|
-
# Normalized bounding box.
|
3786
|
-
#
|
3787
|
-
# Range: [0, 1].
|
4215
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
4216
|
+
# original image. Range: [0, 1].
|
3788
4217
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
3789
4218
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox]
|
3790
4219
|
attr_accessor :normalized_bounding_box
|
3791
4220
|
|
3792
|
-
# Time-offset, relative to the beginning of the video,
|
3793
|
-
#
|
4221
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
4222
|
+
# video frame for this object.
|
3794
4223
|
# Corresponds to the JSON property `timeOffset`
|
3795
4224
|
# @return [String]
|
3796
4225
|
attr_accessor :time_offset
|
@@ -3849,20 +4278,19 @@ module Google
|
|
3849
4278
|
class GoogleCloudVideointelligenceV1p2beta1VideoAnnotationProgress
|
3850
4279
|
include Google::Apis::Core::Hashable
|
3851
4280
|
|
3852
|
-
# Specifies which feature is being tracked if the request contains more than
|
3853
|
-
#
|
4281
|
+
# Specifies which feature is being tracked if the request contains more than one
|
4282
|
+
# feature.
|
3854
4283
|
# Corresponds to the JSON property `feature`
|
3855
4284
|
# @return [String]
|
3856
4285
|
attr_accessor :feature
|
3857
4286
|
|
3858
|
-
# Video file location in
|
3859
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
4287
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
3860
4288
|
# Corresponds to the JSON property `inputUri`
|
3861
4289
|
# @return [String]
|
3862
4290
|
attr_accessor :input_uri
|
3863
4291
|
|
3864
|
-
# Approximate percentage processed thus far. Guaranteed to be
|
3865
|
-
#
|
4292
|
+
# Approximate percentage processed thus far. Guaranteed to be 100 when fully
|
4293
|
+
# processed.
|
3866
4294
|
# Corresponds to the JSON property `progressPercent`
|
3867
4295
|
# @return [Fixnum]
|
3868
4296
|
attr_accessor :progress_percent
|
@@ -3901,31 +4329,40 @@ module Google
|
|
3901
4329
|
class GoogleCloudVideointelligenceV1p2beta1VideoAnnotationResults
|
3902
4330
|
include Google::Apis::Core::Hashable
|
3903
4331
|
|
3904
|
-
# The `Status` type defines a logical error model that is suitable for
|
3905
|
-
#
|
3906
|
-
#
|
3907
|
-
#
|
3908
|
-
#
|
3909
|
-
#
|
4332
|
+
# The `Status` type defines a logical error model that is suitable for different
|
4333
|
+
# programming environments, including REST APIs and RPC APIs. It is used by [
|
4334
|
+
# gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
|
4335
|
+
# data: error code, error message, and error details. You can find out more
|
4336
|
+
# about this error model and how to work with it in the [API Design Guide](https:
|
4337
|
+
# //cloud.google.com/apis/design/errors).
|
3910
4338
|
# Corresponds to the JSON property `error`
|
3911
4339
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleRpcStatus]
|
3912
4340
|
attr_accessor :error
|
3913
4341
|
|
3914
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
3915
|
-
#
|
3916
|
-
#
|
4342
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
4343
|
+
# explicit content has been detected in a frame, no annotations are present for
|
4344
|
+
# that frame.
|
3917
4345
|
# Corresponds to the JSON property `explicitAnnotation`
|
3918
4346
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1ExplicitContentAnnotation]
|
3919
4347
|
attr_accessor :explicit_annotation
|
3920
4348
|
|
3921
|
-
#
|
3922
|
-
#
|
4349
|
+
# Deprecated. Please use `face_detection_annotations` instead.
|
4350
|
+
# Corresponds to the JSON property `faceAnnotations`
|
4351
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1FaceAnnotation>]
|
4352
|
+
attr_accessor :face_annotations
|
4353
|
+
|
4354
|
+
# Face detection annotations.
|
4355
|
+
# Corresponds to the JSON property `faceDetectionAnnotations`
|
4356
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1FaceDetectionAnnotation>]
|
4357
|
+
attr_accessor :face_detection_annotations
|
4358
|
+
|
4359
|
+
# Label annotations on frame level. There is exactly one element for each unique
|
4360
|
+
# label.
|
3923
4361
|
# Corresponds to the JSON property `frameLabelAnnotations`
|
3924
4362
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
|
3925
4363
|
attr_accessor :frame_label_annotations
|
3926
4364
|
|
3927
|
-
# Video file location in
|
3928
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
4365
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
3929
4366
|
# Corresponds to the JSON property `inputUri`
|
3930
4367
|
# @return [String]
|
3931
4368
|
attr_accessor :input_uri
|
@@ -3940,6 +4377,11 @@ module Google
|
|
3940
4377
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1ObjectTrackingAnnotation>]
|
3941
4378
|
attr_accessor :object_annotations
|
3942
4379
|
|
4380
|
+
# Person detection annotations.
|
4381
|
+
# Corresponds to the JSON property `personDetectionAnnotations`
|
4382
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1PersonDetectionAnnotation>]
|
4383
|
+
attr_accessor :person_detection_annotations
|
4384
|
+
|
3943
4385
|
# Video segment.
|
3944
4386
|
# Corresponds to the JSON property `segment`
|
3945
4387
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1VideoSegment]
|
@@ -3952,11 +4394,11 @@ module Google
|
|
3952
4394
|
attr_accessor :segment_label_annotations
|
3953
4395
|
|
3954
4396
|
# Presence label annotations on video level or user-specified segment level.
|
3955
|
-
# There is exactly one element for each unique label. Compared to the
|
3956
|
-
#
|
3957
|
-
#
|
3958
|
-
#
|
3959
|
-
#
|
4397
|
+
# There is exactly one element for each unique label. Compared to the existing
|
4398
|
+
# topical `segment_label_annotations`, this field presents more fine-grained,
|
4399
|
+
# segment-level labels detected in video content and is made available only when
|
4400
|
+
# the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
|
4401
|
+
# request.
|
3960
4402
|
# Corresponds to the JSON property `segmentPresenceLabelAnnotations`
|
3961
4403
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
|
3962
4404
|
attr_accessor :segment_presence_label_annotations
|
@@ -3966,17 +4408,17 @@ module Google
|
|
3966
4408
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1VideoSegment>]
|
3967
4409
|
attr_accessor :shot_annotations
|
3968
4410
|
|
3969
|
-
# Topical label annotations on shot level.
|
3970
|
-
#
|
4411
|
+
# Topical label annotations on shot level. There is exactly one element for each
|
4412
|
+
# unique label.
|
3971
4413
|
# Corresponds to the JSON property `shotLabelAnnotations`
|
3972
4414
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
|
3973
4415
|
attr_accessor :shot_label_annotations
|
3974
4416
|
|
3975
4417
|
# Presence label annotations on shot level. There is exactly one element for
|
3976
|
-
# each unique label. Compared to the existing topical
|
3977
|
-
#
|
3978
|
-
#
|
3979
|
-
#
|
4418
|
+
# each unique label. Compared to the existing topical `shot_label_annotations`,
|
4419
|
+
# this field presents more fine-grained, shot-level labels detected in video
|
4420
|
+
# content and is made available only when the client sets `LabelDetectionConfig.
|
4421
|
+
# model` to "builtin/latest" in the request.
|
3980
4422
|
# Corresponds to the JSON property `shotPresenceLabelAnnotations`
|
3981
4423
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
|
3982
4424
|
attr_accessor :shot_presence_label_annotations
|
@@ -3986,9 +4428,8 @@ module Google
|
|
3986
4428
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1SpeechTranscription>]
|
3987
4429
|
attr_accessor :speech_transcriptions
|
3988
4430
|
|
3989
|
-
# OCR text detection and tracking.
|
3990
|
-
#
|
3991
|
-
# frame information associated with it.
|
4431
|
+
# OCR text detection and tracking. Annotations for list of detected text
|
4432
|
+
# snippets. Each will have list of frame information associated with it.
|
3992
4433
|
# Corresponds to the JSON property `textAnnotations`
|
3993
4434
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p2beta1TextAnnotation>]
|
3994
4435
|
attr_accessor :text_annotations
|
@@ -4001,10 +4442,13 @@ module Google
|
|
4001
4442
|
def update!(**args)
|
4002
4443
|
@error = args[:error] if args.key?(:error)
|
4003
4444
|
@explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
|
4445
|
+
@face_annotations = args[:face_annotations] if args.key?(:face_annotations)
|
4446
|
+
@face_detection_annotations = args[:face_detection_annotations] if args.key?(:face_detection_annotations)
|
4004
4447
|
@frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
|
4005
4448
|
@input_uri = args[:input_uri] if args.key?(:input_uri)
|
4006
4449
|
@logo_recognition_annotations = args[:logo_recognition_annotations] if args.key?(:logo_recognition_annotations)
|
4007
4450
|
@object_annotations = args[:object_annotations] if args.key?(:object_annotations)
|
4451
|
+
@person_detection_annotations = args[:person_detection_annotations] if args.key?(:person_detection_annotations)
|
4008
4452
|
@segment = args[:segment] if args.key?(:segment)
|
4009
4453
|
@segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations)
|
4010
4454
|
@segment_presence_label_annotations = args[:segment_presence_label_annotations] if args.key?(:segment_presence_label_annotations)
|
@@ -4020,14 +4464,14 @@ module Google
|
|
4020
4464
|
class GoogleCloudVideointelligenceV1p2beta1VideoSegment
|
4021
4465
|
include Google::Apis::Core::Hashable
|
4022
4466
|
|
4023
|
-
# Time-offset, relative to the beginning of the video,
|
4024
|
-
#
|
4467
|
+
# Time-offset, relative to the beginning of the video, corresponding to the end
|
4468
|
+
# of the segment (inclusive).
|
4025
4469
|
# Corresponds to the JSON property `endTimeOffset`
|
4026
4470
|
# @return [String]
|
4027
4471
|
attr_accessor :end_time_offset
|
4028
4472
|
|
4029
|
-
# Time-offset, relative to the beginning of the video,
|
4030
|
-
#
|
4473
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
4474
|
+
# start of the segment (inclusive).
|
4031
4475
|
# Corresponds to the JSON property `startTimeOffset`
|
4032
4476
|
# @return [String]
|
4033
4477
|
attr_accessor :start_time_offset
|
@@ -4044,41 +4488,41 @@ module Google
|
|
4044
4488
|
end
|
4045
4489
|
|
4046
4490
|
# Word-specific information for recognized words. Word information is only
|
4047
|
-
# included in the response when certain request parameters are set, such
|
4048
|
-
#
|
4491
|
+
# included in the response when certain request parameters are set, such as `
|
4492
|
+
# enable_word_time_offsets`.
|
4049
4493
|
class GoogleCloudVideointelligenceV1p2beta1WordInfo
|
4050
4494
|
include Google::Apis::Core::Hashable
|
4051
4495
|
|
4052
4496
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
4053
4497
|
# indicates an estimated greater likelihood that the recognized words are
|
4054
|
-
# correct. This field is set only for the top alternative.
|
4055
|
-
#
|
4056
|
-
#
|
4057
|
-
#
|
4498
|
+
# correct. This field is set only for the top alternative. This field is not
|
4499
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
4500
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
4501
|
+
# not set.
|
4058
4502
|
# Corresponds to the JSON property `confidence`
|
4059
4503
|
# @return [Float]
|
4060
4504
|
attr_accessor :confidence
|
4061
4505
|
|
4062
|
-
# Time offset relative to the beginning of the audio, and
|
4063
|
-
#
|
4064
|
-
#
|
4065
|
-
#
|
4506
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
4507
|
+
# end of the spoken word. This field is only set if `enable_word_time_offsets=
|
4508
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
4509
|
+
# accuracy of the time offset can vary.
|
4066
4510
|
# Corresponds to the JSON property `endTime`
|
4067
4511
|
# @return [String]
|
4068
4512
|
attr_accessor :end_time
|
4069
4513
|
|
4070
|
-
# Output only. A distinct integer value is assigned for every speaker within
|
4071
|
-
#
|
4072
|
-
#
|
4073
|
-
#
|
4514
|
+
# Output only. A distinct integer value is assigned for every speaker within the
|
4515
|
+
# audio. This field specifies which one of those speakers was detected to have
|
4516
|
+
# spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
|
4517
|
+
# only set if speaker diarization is enabled.
|
4074
4518
|
# Corresponds to the JSON property `speakerTag`
|
4075
4519
|
# @return [Fixnum]
|
4076
4520
|
attr_accessor :speaker_tag
|
4077
4521
|
|
4078
|
-
# Time offset relative to the beginning of the audio, and
|
4079
|
-
#
|
4080
|
-
#
|
4081
|
-
#
|
4522
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
4523
|
+
# start of the spoken word. This field is only set if `enable_word_time_offsets=
|
4524
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
4525
|
+
# accuracy of the time offset can vary.
|
4082
4526
|
# Corresponds to the JSON property `startTime`
|
4083
4527
|
# @return [String]
|
4084
4528
|
attr_accessor :start_time
|
@@ -4102,9 +4546,9 @@ module Google
|
|
4102
4546
|
end
|
4103
4547
|
end
|
4104
4548
|
|
4105
|
-
# Video annotation progress. Included in the `metadata`
|
4106
|
-
#
|
4107
|
-
#
|
4549
|
+
# Video annotation progress. Included in the `metadata` field of the `Operation`
|
4550
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
4551
|
+
# service.
|
4108
4552
|
class GoogleCloudVideointelligenceV1p3beta1AnnotateVideoProgress
|
4109
4553
|
include Google::Apis::Core::Hashable
|
4110
4554
|
|
@@ -4132,24 +4576,22 @@ module Google
|
|
4132
4576
|
# @return [Array<String>]
|
4133
4577
|
attr_accessor :features
|
4134
4578
|
|
4135
|
-
# The video data bytes.
|
4136
|
-
# If
|
4137
|
-
# If set, `input_uri` must be unset.
|
4579
|
+
# The video data bytes. If unset, the input video(s) should be specified via the
|
4580
|
+
# `input_uri`. If set, `input_uri` must be unset.
|
4138
4581
|
# Corresponds to the JSON property `inputContent`
|
4139
4582
|
# NOTE: Values are automatically base64 encoded/decoded in the client library.
|
4140
4583
|
# @return [String]
|
4141
4584
|
attr_accessor :input_content
|
4142
4585
|
|
4143
|
-
# Input video location. Currently, only
|
4144
|
-
#
|
4145
|
-
#
|
4146
|
-
#
|
4147
|
-
# google.
|
4148
|
-
#
|
4149
|
-
#
|
4150
|
-
#
|
4151
|
-
#
|
4152
|
-
# in the request as `input_content`. If set, `input_content` must be unset.
|
4586
|
+
# Input video location. Currently, only [Cloud Storage](https://cloud.google.com/
|
4587
|
+
# storage/) URIs are supported. URIs must be specified in the following format: `
|
4588
|
+
# gs://bucket-id/object-id` (other URI formats return google.rpc.Code.
|
4589
|
+
# INVALID_ARGUMENT). For more information, see [Request URIs](https://cloud.
|
4590
|
+
# google.com/storage/docs/request-endpoints). To identify multiple videos, a
|
4591
|
+
# video URI may include wildcards in the `object-id`. Supported wildcards: '*'
|
4592
|
+
# to match 0 or more characters; '?' to match 1 character. If unset, the input
|
4593
|
+
# video should be embedded in the request as `input_content`. If set, `
|
4594
|
+
# input_content` must be unset.
|
4153
4595
|
# Corresponds to the JSON property `inputUri`
|
4154
4596
|
# @return [String]
|
4155
4597
|
attr_accessor :input_uri
|
@@ -4163,11 +4605,11 @@ module Google
|
|
4163
4605
|
attr_accessor :location_id
|
4164
4606
|
|
4165
4607
|
# Optional. Location where the output (in JSON format) should be stored.
|
4166
|
-
# Currently, only [Cloud Storage](https://cloud.google.com/storage/)
|
4167
|
-
#
|
4168
|
-
#
|
4169
|
-
#
|
4170
|
-
#
|
4608
|
+
# Currently, only [Cloud Storage](https://cloud.google.com/storage/) URIs are
|
4609
|
+
# supported. These must be specified in the following format: `gs://bucket-id/
|
4610
|
+
# object-id` (other URI formats return google.rpc.Code.INVALID_ARGUMENT). For
|
4611
|
+
# more information, see [Request URIs](https://cloud.google.com/storage/docs/
|
4612
|
+
# request-endpoints).
|
4171
4613
|
# Corresponds to the JSON property `outputUri`
|
4172
4614
|
# @return [String]
|
4173
4615
|
attr_accessor :output_uri
|
@@ -4192,9 +4634,9 @@ module Google
|
|
4192
4634
|
end
|
4193
4635
|
end
|
4194
4636
|
|
4195
|
-
# Video annotation response. Included in the `response`
|
4196
|
-
#
|
4197
|
-
#
|
4637
|
+
# Video annotation response. Included in the `response` field of the `Operation`
|
4638
|
+
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
4639
|
+
# service.
|
4198
4640
|
class GoogleCloudVideointelligenceV1p3beta1AnnotateVideoResponse
|
4199
4641
|
include Google::Apis::Core::Hashable
|
4200
4642
|
|
@@ -4228,10 +4670,9 @@ module Google
|
|
4228
4670
|
# @return [String]
|
4229
4671
|
attr_accessor :display_name
|
4230
4672
|
|
4231
|
-
# The resource name of the celebrity. Have the format
|
4232
|
-
#
|
4233
|
-
#
|
4234
|
-
# celebrity.
|
4673
|
+
# The resource name of the celebrity. Have the format `video-intelligence/kg-mid`
|
4674
|
+
# indicates a celebrity from preloaded gallery. kg-mid is the id in Google
|
4675
|
+
# knowledge graph, which is unique for the celebrity.
|
4235
4676
|
# Corresponds to the JSON property `name`
|
4236
4677
|
# @return [String]
|
4237
4678
|
attr_accessor :name
|
@@ -4252,8 +4693,8 @@ module Google
|
|
4252
4693
|
class GoogleCloudVideointelligenceV1p3beta1CelebrityRecognitionAnnotation
|
4253
4694
|
include Google::Apis::Core::Hashable
|
4254
4695
|
|
4255
|
-
# The tracks detected from the input video, including recognized celebrities
|
4256
|
-
#
|
4696
|
+
# The tracks detected from the input video, including recognized celebrities and
|
4697
|
+
# other detected faces in the video.
|
4257
4698
|
# Corresponds to the JSON property `celebrityTracks`
|
4258
4699
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1CelebrityTrack>]
|
4259
4700
|
attr_accessor :celebrity_tracks
|
@@ -4309,14 +4750,14 @@ module Google
|
|
4309
4750
|
# @return [Float]
|
4310
4751
|
attr_accessor :confidence
|
4311
4752
|
|
4312
|
-
# The name of the attribute, for example, glasses, dark_glasses, mouth_open.
|
4313
|
-
#
|
4753
|
+
# The name of the attribute, for example, glasses, dark_glasses, mouth_open. A
|
4754
|
+
# full list of supported type names will be provided in the document.
|
4314
4755
|
# Corresponds to the JSON property `name`
|
4315
4756
|
# @return [String]
|
4316
4757
|
attr_accessor :name
|
4317
4758
|
|
4318
|
-
# Text value of the detection result. For example, the value for "HairColor"
|
4319
|
-
#
|
4759
|
+
# Text value of the detection result. For example, the value for "HairColor" can
|
4760
|
+
# be "black", "blonde", etc.
|
4320
4761
|
# Corresponds to the JSON property `value`
|
4321
4762
|
# @return [String]
|
4322
4763
|
attr_accessor :value
|
@@ -4348,9 +4789,8 @@ module Google
|
|
4348
4789
|
# @return [String]
|
4349
4790
|
attr_accessor :name
|
4350
4791
|
|
4351
|
-
# A vertex represents a 2D point in the image.
|
4352
|
-
#
|
4353
|
-
# and range from 0 to 1.
|
4792
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
4793
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
4354
4794
|
# Corresponds to the JSON property `point`
|
4355
4795
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1NormalizedVertex]
|
4356
4796
|
attr_accessor :point
|
@@ -4376,8 +4816,7 @@ module Google
|
|
4376
4816
|
# @return [String]
|
4377
4817
|
attr_accessor :description
|
4378
4818
|
|
4379
|
-
# Opaque entity ID. Some IDs may be available in
|
4380
|
-
# [Google Knowledge Graph Search
|
4819
|
+
# Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
|
4381
4820
|
# API](https://developers.google.com/knowledge-graph/).
|
4382
4821
|
# Corresponds to the JSON property `entityId`
|
4383
4822
|
# @return [String]
|
@@ -4400,9 +4839,9 @@ module Google
|
|
4400
4839
|
end
|
4401
4840
|
end
|
4402
4841
|
|
4403
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
4404
|
-
#
|
4405
|
-
#
|
4842
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
4843
|
+
# explicit content has been detected in a frame, no annotations are present for
|
4844
|
+
# that frame.
|
4406
4845
|
class GoogleCloudVideointelligenceV1p3beta1ExplicitContentAnnotation
|
4407
4846
|
include Google::Apis::Core::Hashable
|
4408
4847
|
|
@@ -4431,9 +4870,8 @@ module Google
|
|
4431
4870
|
class GoogleCloudVideointelligenceV1p3beta1ExplicitContentDetectionConfig
|
4432
4871
|
include Google::Apis::Core::Hashable
|
4433
4872
|
|
4434
|
-
# Model to use for explicit content detection.
|
4435
|
-
#
|
4436
|
-
# "builtin/latest".
|
4873
|
+
# Model to use for explicit content detection. Supported values: "builtin/stable"
|
4874
|
+
# (the default if unset) and "builtin/latest".
|
4437
4875
|
# Corresponds to the JSON property `model`
|
4438
4876
|
# @return [String]
|
4439
4877
|
attr_accessor :model
|
@@ -4474,20 +4912,41 @@ module Google
|
|
4474
4912
|
end
|
4475
4913
|
end
|
4476
4914
|
|
4477
|
-
#
|
4478
|
-
class
|
4915
|
+
# Deprecated. No effect.
|
4916
|
+
class GoogleCloudVideointelligenceV1p3beta1FaceAnnotation
|
4479
4917
|
include Google::Apis::Core::Hashable
|
4480
4918
|
|
4481
|
-
#
|
4919
|
+
# All video frames where a face was detected.
|
4920
|
+
# Corresponds to the JSON property `frames`
|
4921
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1FaceFrame>]
|
4922
|
+
attr_accessor :frames
|
4923
|
+
|
4924
|
+
# All video segments where a face was detected.
|
4925
|
+
# Corresponds to the JSON property `segments`
|
4926
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1FaceSegment>]
|
4927
|
+
attr_accessor :segments
|
4928
|
+
|
4929
|
+
# Thumbnail of a representative face view (in JPEG format).
|
4482
4930
|
# Corresponds to the JSON property `thumbnail`
|
4483
4931
|
# NOTE: Values are automatically base64 encoded/decoded in the client library.
|
4484
4932
|
# @return [String]
|
4485
4933
|
attr_accessor :thumbnail
|
4486
4934
|
|
4487
|
-
|
4488
|
-
|
4489
|
-
|
4490
|
-
|
4935
|
+
def initialize(**args)
|
4936
|
+
update!(**args)
|
4937
|
+
end
|
4938
|
+
|
4939
|
+
# Update properties of this object
|
4940
|
+
def update!(**args)
|
4941
|
+
@frames = args[:frames] if args.key?(:frames)
|
4942
|
+
@segments = args[:segments] if args.key?(:segments)
|
4943
|
+
@thumbnail = args[:thumbnail] if args.key?(:thumbnail)
|
4944
|
+
end
|
4945
|
+
end
|
4946
|
+
|
4947
|
+
# Face detection annotation.
|
4948
|
+
class GoogleCloudVideointelligenceV1p3beta1FaceDetectionAnnotation
|
4949
|
+
include Google::Apis::Core::Hashable
|
4491
4950
|
|
4492
4951
|
# Feature version.
|
4493
4952
|
# Corresponds to the JSON property `version`
|
@@ -4500,8 +4959,6 @@ module Google
|
|
4500
4959
|
|
4501
4960
|
# Update properties of this object
|
4502
4961
|
def update!(**args)
|
4503
|
-
@thumbnail = args[:thumbnail] if args.key?(:thumbnail)
|
4504
|
-
@tracks = args[:tracks] if args.key?(:tracks)
|
4505
4962
|
@version = args[:version] if args.key?(:version)
|
4506
4963
|
end
|
4507
4964
|
end
|
@@ -4523,9 +4980,8 @@ module Google
|
|
4523
4980
|
attr_accessor :include_bounding_boxes
|
4524
4981
|
alias_method :include_bounding_boxes?, :include_bounding_boxes
|
4525
4982
|
|
4526
|
-
# Model to use for face detection.
|
4527
|
-
#
|
4528
|
-
# "builtin/latest".
|
4983
|
+
# Model to use for face detection. Supported values: "builtin/stable" (the
|
4984
|
+
# default if unset) and "builtin/latest".
|
4529
4985
|
# Corresponds to the JSON property `model`
|
4530
4986
|
# @return [String]
|
4531
4987
|
attr_accessor :model
|
@@ -4542,14 +4998,59 @@ module Google
|
|
4542
4998
|
end
|
4543
4999
|
end
|
4544
5000
|
|
5001
|
+
# Deprecated. No effect.
|
5002
|
+
class GoogleCloudVideointelligenceV1p3beta1FaceFrame
|
5003
|
+
include Google::Apis::Core::Hashable
|
5004
|
+
|
5005
|
+
# Normalized Bounding boxes in a frame. There can be more than one boxes if the
|
5006
|
+
# same face is detected in multiple locations within the current frame.
|
5007
|
+
# Corresponds to the JSON property `normalizedBoundingBoxes`
|
5008
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox>]
|
5009
|
+
attr_accessor :normalized_bounding_boxes
|
5010
|
+
|
5011
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
5012
|
+
# video frame for this location.
|
5013
|
+
# Corresponds to the JSON property `timeOffset`
|
5014
|
+
# @return [String]
|
5015
|
+
attr_accessor :time_offset
|
5016
|
+
|
5017
|
+
def initialize(**args)
|
5018
|
+
update!(**args)
|
5019
|
+
end
|
5020
|
+
|
5021
|
+
# Update properties of this object
|
5022
|
+
def update!(**args)
|
5023
|
+
@normalized_bounding_boxes = args[:normalized_bounding_boxes] if args.key?(:normalized_bounding_boxes)
|
5024
|
+
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
5025
|
+
end
|
5026
|
+
end
|
5027
|
+
|
5028
|
+
# Video segment level annotation results for face detection.
|
5029
|
+
class GoogleCloudVideointelligenceV1p3beta1FaceSegment
|
5030
|
+
include Google::Apis::Core::Hashable
|
5031
|
+
|
5032
|
+
# Video segment.
|
5033
|
+
# Corresponds to the JSON property `segment`
|
5034
|
+
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1VideoSegment]
|
5035
|
+
attr_accessor :segment
|
5036
|
+
|
5037
|
+
def initialize(**args)
|
5038
|
+
update!(**args)
|
5039
|
+
end
|
5040
|
+
|
5041
|
+
# Update properties of this object
|
5042
|
+
def update!(**args)
|
5043
|
+
@segment = args[:segment] if args.key?(:segment)
|
5044
|
+
end
|
5045
|
+
end
|
5046
|
+
|
4545
5047
|
# Label annotation.
|
4546
5048
|
class GoogleCloudVideointelligenceV1p3beta1LabelAnnotation
|
4547
5049
|
include Google::Apis::Core::Hashable
|
4548
5050
|
|
4549
|
-
# Common categories for the detected entity.
|
4550
|
-
#
|
4551
|
-
#
|
4552
|
-
# also be a `pet`.
|
5051
|
+
# Common categories for the detected entity. For example, when the label is `
|
5052
|
+
# Terrier`, the category is likely `dog`. And in some cases there might be more
|
5053
|
+
# than one categories e.g., `Terrier` could also be a `pet`.
|
4553
5054
|
# Corresponds to the JSON property `categoryEntities`
|
4554
5055
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1Entity>]
|
4555
5056
|
attr_accessor :category_entities
|
@@ -4592,44 +5093,40 @@ module Google
|
|
4592
5093
|
class GoogleCloudVideointelligenceV1p3beta1LabelDetectionConfig
|
4593
5094
|
include Google::Apis::Core::Hashable
|
4594
5095
|
|
4595
|
-
# The confidence threshold we perform filtering on the labels from
|
4596
|
-
#
|
4597
|
-
#
|
4598
|
-
#
|
4599
|
-
#
|
4600
|
-
# the default threshold everytime when we release a new model.
|
5096
|
+
# The confidence threshold we perform filtering on the labels from frame-level
|
5097
|
+
# detection. If not set, it is set to 0.4 by default. The valid range for this
|
5098
|
+
# threshold is [0.1, 0.9]. Any value set outside of this range will be clipped.
|
5099
|
+
# Note: For best results, follow the default threshold. We will update the
|
5100
|
+
# default threshold everytime when we release a new model.
|
4601
5101
|
# Corresponds to the JSON property `frameConfidenceThreshold`
|
4602
5102
|
# @return [Float]
|
4603
5103
|
attr_accessor :frame_confidence_threshold
|
4604
5104
|
|
4605
|
-
# What labels should be detected with LABEL_DETECTION, in addition to
|
4606
|
-
#
|
4607
|
-
# If unspecified, defaults to `SHOT_MODE`.
|
5105
|
+
# What labels should be detected with LABEL_DETECTION, in addition to video-
|
5106
|
+
# level labels or segment-level labels. If unspecified, defaults to `SHOT_MODE`.
|
4608
5107
|
# Corresponds to the JSON property `labelDetectionMode`
|
4609
5108
|
# @return [String]
|
4610
5109
|
attr_accessor :label_detection_mode
|
4611
5110
|
|
4612
|
-
# Model to use for label detection.
|
4613
|
-
#
|
4614
|
-
# "builtin/latest".
|
5111
|
+
# Model to use for label detection. Supported values: "builtin/stable" (the
|
5112
|
+
# default if unset) and "builtin/latest".
|
4615
5113
|
# Corresponds to the JSON property `model`
|
4616
5114
|
# @return [String]
|
4617
5115
|
attr_accessor :model
|
4618
5116
|
|
4619
|
-
# Whether the video has been shot from a stationary (i.e., non-moving)
|
4620
|
-
#
|
4621
|
-
#
|
5117
|
+
# Whether the video has been shot from a stationary (i.e., non-moving) camera.
|
5118
|
+
# When set to true, might improve detection accuracy for moving objects. Should
|
5119
|
+
# be used with `SHOT_AND_FRAME_MODE` enabled.
|
4622
5120
|
# Corresponds to the JSON property `stationaryCamera`
|
4623
5121
|
# @return [Boolean]
|
4624
5122
|
attr_accessor :stationary_camera
|
4625
5123
|
alias_method :stationary_camera?, :stationary_camera
|
4626
5124
|
|
4627
|
-
# The confidence threshold we perform filtering on the labels from
|
4628
|
-
#
|
4629
|
-
#
|
4630
|
-
#
|
4631
|
-
#
|
4632
|
-
# the default threshold everytime when we release a new model.
|
5125
|
+
# The confidence threshold we perform filtering on the labels from video-level
|
5126
|
+
# and shot-level detections. If not set, it's set to 0.3 by default. The valid
|
5127
|
+
# range for this threshold is [0.1, 0.9]. Any value set outside of this range
|
5128
|
+
# will be clipped. Note: For best results, follow the default threshold. We will
|
5129
|
+
# update the default threshold everytime when we release a new model.
|
4633
5130
|
# Corresponds to the JSON property `videoConfidenceThreshold`
|
4634
5131
|
# @return [Float]
|
4635
5132
|
attr_accessor :video_confidence_threshold
|
@@ -4708,14 +5205,14 @@ module Google
|
|
4708
5205
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1Entity]
|
4709
5206
|
attr_accessor :entity
|
4710
5207
|
|
4711
|
-
# All video segments where the recognized logo appears. There might be
|
4712
|
-
#
|
5208
|
+
# All video segments where the recognized logo appears. There might be multiple
|
5209
|
+
# instances of the same logo class appearing in one VideoSegment.
|
4713
5210
|
# Corresponds to the JSON property `segments`
|
4714
5211
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1VideoSegment>]
|
4715
5212
|
attr_accessor :segments
|
4716
5213
|
|
4717
|
-
# All logo tracks where the recognized logo appears. Each track corresponds
|
4718
|
-
#
|
5214
|
+
# All logo tracks where the recognized logo appears. Each track corresponds to
|
5215
|
+
# one logo instance appearing in consecutive frames.
|
4719
5216
|
# Corresponds to the JSON property `tracks`
|
4720
5217
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1Track>]
|
4721
5218
|
attr_accessor :tracks
|
@@ -4732,9 +5229,8 @@ module Google
|
|
4732
5229
|
end
|
4733
5230
|
end
|
4734
5231
|
|
4735
|
-
# Normalized bounding box.
|
4736
|
-
#
|
4737
|
-
# Range: [0, 1].
|
5232
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
5233
|
+
# original image. Range: [0, 1].
|
4738
5234
|
class GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox
|
4739
5235
|
include Google::Apis::Core::Hashable
|
4740
5236
|
|
@@ -4772,20 +5268,12 @@ module Google
|
|
4772
5268
|
end
|
4773
5269
|
|
4774
5270
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
4775
|
-
# Contains list of the corner points in clockwise order starting from
|
4776
|
-
#
|
4777
|
-
#
|
4778
|
-
#
|
4779
|
-
#
|
4780
|
-
#
|
4781
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
4782
|
-
# becomes:
|
4783
|
-
# 2----3
|
4784
|
-
# | |
|
4785
|
-
# 1----0
|
4786
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
4787
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
4788
|
-
# the box.
|
5271
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
5272
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
5273
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
5274
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
5275
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
5276
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
4789
5277
|
class GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingPoly
|
4790
5278
|
include Google::Apis::Core::Hashable
|
4791
5279
|
|
@@ -4804,9 +5292,8 @@ module Google
|
|
4804
5292
|
end
|
4805
5293
|
end
|
4806
5294
|
|
4807
|
-
# A vertex represents a 2D point in the image.
|
4808
|
-
#
|
4809
|
-
# and range from 0 to 1.
|
5295
|
+
# A vertex represents a 2D point in the image. NOTE: the normalized vertex
|
5296
|
+
# coordinates are relative to the original image and range from 0 to 1.
|
4810
5297
|
class GoogleCloudVideointelligenceV1p3beta1NormalizedVertex
|
4811
5298
|
include Google::Apis::Core::Hashable
|
4812
5299
|
|
@@ -4845,10 +5332,10 @@ module Google
|
|
4845
5332
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1Entity]
|
4846
5333
|
attr_accessor :entity
|
4847
5334
|
|
4848
|
-
# Information corresponding to all frames where this object track appears.
|
4849
|
-
#
|
4850
|
-
#
|
4851
|
-
#
|
5335
|
+
# Information corresponding to all frames where this object track appears. Non-
|
5336
|
+
# streaming batch mode: it may be one or multiple ObjectTrackingFrame messages
|
5337
|
+
# in frames. Streaming mode: it can only be one ObjectTrackingFrame message in
|
5338
|
+
# frames.
|
4852
5339
|
# Corresponds to the JSON property `frames`
|
4853
5340
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1ObjectTrackingFrame>]
|
4854
5341
|
attr_accessor :frames
|
@@ -4858,12 +5345,11 @@ module Google
|
|
4858
5345
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1VideoSegment]
|
4859
5346
|
attr_accessor :segment
|
4860
5347
|
|
4861
|
-
# Streaming mode ONLY.
|
4862
|
-
#
|
4863
|
-
#
|
4864
|
-
#
|
4865
|
-
#
|
4866
|
-
# ObjectTrackAnnotation of the same track_id over time.
|
5348
|
+
# Streaming mode ONLY. In streaming mode, we do not know the end time of a
|
5349
|
+
# tracked object before it is completed. Hence, there is no VideoSegment info
|
5350
|
+
# returned. Instead, we provide a unique identifiable integer track_id so that
|
5351
|
+
# the customers can correlate the results of the ongoing ObjectTrackAnnotation
|
5352
|
+
# of the same track_id over time.
|
4867
5353
|
# Corresponds to the JSON property `trackId`
|
4868
5354
|
# @return [Fixnum]
|
4869
5355
|
attr_accessor :track_id
|
@@ -4892,9 +5378,8 @@ module Google
|
|
4892
5378
|
class GoogleCloudVideointelligenceV1p3beta1ObjectTrackingConfig
|
4893
5379
|
include Google::Apis::Core::Hashable
|
4894
5380
|
|
4895
|
-
# Model to use for object tracking.
|
4896
|
-
#
|
4897
|
-
# "builtin/latest".
|
5381
|
+
# Model to use for object tracking. Supported values: "builtin/stable" (the
|
5382
|
+
# default if unset) and "builtin/latest".
|
4898
5383
|
# Corresponds to the JSON property `model`
|
4899
5384
|
# @return [String]
|
4900
5385
|
attr_accessor :model
|
@@ -4914,9 +5399,8 @@ module Google
|
|
4914
5399
|
class GoogleCloudVideointelligenceV1p3beta1ObjectTrackingFrame
|
4915
5400
|
include Google::Apis::Core::Hashable
|
4916
5401
|
|
4917
|
-
# Normalized bounding box.
|
4918
|
-
#
|
4919
|
-
# Range: [0, 1].
|
5402
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
5403
|
+
# original image. Range: [0, 1].
|
4920
5404
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
4921
5405
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox]
|
4922
5406
|
attr_accessor :normalized_bounding_box
|
@@ -4967,23 +5451,21 @@ module Google
|
|
4967
5451
|
include Google::Apis::Core::Hashable
|
4968
5452
|
|
4969
5453
|
# Whether to enable person attributes detection, such as cloth color (black,
|
4970
|
-
# blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair,
|
4971
|
-
# etc.
|
5454
|
+
# blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair, etc.
|
4972
5455
|
# Ignored if 'include_bounding_boxes' is set to false.
|
4973
5456
|
# Corresponds to the JSON property `includeAttributes`
|
4974
5457
|
# @return [Boolean]
|
4975
5458
|
attr_accessor :include_attributes
|
4976
5459
|
alias_method :include_attributes?, :include_attributes
|
4977
5460
|
|
4978
|
-
# Whether bounding boxes are included in the person detection annotation
|
4979
|
-
# output.
|
5461
|
+
# Whether bounding boxes are included in the person detection annotation output.
|
4980
5462
|
# Corresponds to the JSON property `includeBoundingBoxes`
|
4981
5463
|
# @return [Boolean]
|
4982
5464
|
attr_accessor :include_bounding_boxes
|
4983
5465
|
alias_method :include_bounding_boxes?, :include_bounding_boxes
|
4984
5466
|
|
4985
|
-
# Whether to enable pose landmarks detection. Ignored if
|
4986
|
-
#
|
5467
|
+
# Whether to enable pose landmarks detection. Ignored if 'include_bounding_boxes'
|
5468
|
+
# is set to false.
|
4987
5469
|
# Corresponds to the JSON property `includePoseLandmarks`
|
4988
5470
|
# @return [Boolean]
|
4989
5471
|
attr_accessor :include_pose_landmarks
|
@@ -5030,9 +5512,8 @@ module Google
|
|
5030
5512
|
class GoogleCloudVideointelligenceV1p3beta1ShotChangeDetectionConfig
|
5031
5513
|
include Google::Apis::Core::Hashable
|
5032
5514
|
|
5033
|
-
# Model to use for shot change detection.
|
5034
|
-
#
|
5035
|
-
# "builtin/latest".
|
5515
|
+
# Model to use for shot change detection. Supported values: "builtin/stable" (
|
5516
|
+
# the default if unset) and "builtin/latest".
|
5036
5517
|
# Corresponds to the JSON property `model`
|
5037
5518
|
# @return [String]
|
5038
5519
|
attr_accessor :model
|
@@ -5052,12 +5533,12 @@ module Google
|
|
5052
5533
|
class GoogleCloudVideointelligenceV1p3beta1SpeechContext
|
5053
5534
|
include Google::Apis::Core::Hashable
|
5054
5535
|
|
5055
|
-
# Optional. A list of strings containing words and phrases "hints" so that
|
5056
|
-
#
|
5057
|
-
#
|
5058
|
-
#
|
5059
|
-
#
|
5060
|
-
#
|
5536
|
+
# Optional. A list of strings containing words and phrases "hints" so that the
|
5537
|
+
# speech recognition is more likely to recognize them. This can be used to
|
5538
|
+
# improve the accuracy for specific words and phrases, for example, if specific
|
5539
|
+
# commands are typically spoken by the user. This can also be used to add
|
5540
|
+
# additional words to the vocabulary of the recognizer. See [usage limits](https:
|
5541
|
+
# //cloud.google.com/speech/limits#content).
|
5061
5542
|
# Corresponds to the JSON property `phrases`
|
5062
5543
|
# @return [Array<String>]
|
5063
5544
|
attr_accessor :phrases
|
@@ -5078,10 +5559,10 @@ module Google
|
|
5078
5559
|
|
5079
5560
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
5080
5561
|
# indicates an estimated greater likelihood that the recognized words are
|
5081
|
-
# correct. This field is set only for the top alternative.
|
5082
|
-
#
|
5083
|
-
#
|
5084
|
-
#
|
5562
|
+
# correct. This field is set only for the top alternative. This field is not
|
5563
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
5564
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
5565
|
+
# not set.
|
5085
5566
|
# Corresponds to the JSON property `confidence`
|
5086
5567
|
# @return [Float]
|
5087
5568
|
attr_accessor :confidence
|
@@ -5092,8 +5573,8 @@ module Google
|
|
5092
5573
|
attr_accessor :transcript
|
5093
5574
|
|
5094
5575
|
# Output only. A list of word-specific information for each recognized word.
|
5095
|
-
# Note: When `enable_speaker_diarization` is set to true, you will see all
|
5096
|
-
#
|
5576
|
+
# Note: When `enable_speaker_diarization` is set to true, you will see all the
|
5577
|
+
# words from the beginning of the audio.
|
5097
5578
|
# Corresponds to the JSON property `words`
|
5098
5579
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1WordInfo>]
|
5099
5580
|
attr_accessor :words
|
@@ -5114,18 +5595,17 @@ module Google
|
|
5114
5595
|
class GoogleCloudVideointelligenceV1p3beta1SpeechTranscription
|
5115
5596
|
include Google::Apis::Core::Hashable
|
5116
5597
|
|
5117
|
-
# May contain one or more recognition hypotheses (up to the maximum specified
|
5118
|
-
#
|
5119
|
-
#
|
5120
|
-
#
|
5598
|
+
# May contain one or more recognition hypotheses (up to the maximum specified in
|
5599
|
+
# `max_alternatives`). These alternatives are ordered in terms of accuracy, with
|
5600
|
+
# the top (first) alternative being the most probable, as ranked by the
|
5601
|
+
# recognizer.
|
5121
5602
|
# Corresponds to the JSON property `alternatives`
|
5122
5603
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1SpeechRecognitionAlternative>]
|
5123
5604
|
attr_accessor :alternatives
|
5124
5605
|
|
5125
5606
|
# Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
|
5126
|
-
# language tag of
|
5127
|
-
#
|
5128
|
-
# most likelihood of being spoken in the audio.
|
5607
|
+
# language tag of the language in this result. This language code was detected
|
5608
|
+
# to have the most likelihood of being spoken in the audio.
|
5129
5609
|
# Corresponds to the JSON property `languageCode`
|
5130
5610
|
# @return [String]
|
5131
5611
|
attr_accessor :language_code
|
@@ -5152,66 +5632,62 @@ module Google
|
|
5152
5632
|
attr_accessor :audio_tracks
|
5153
5633
|
|
5154
5634
|
# Optional. If set, specifies the estimated number of speakers in the
|
5155
|
-
# conversation.
|
5156
|
-
#
|
5157
|
-
# Ignored unless enable_speaker_diarization is set to true.
|
5635
|
+
# conversation. If not set, defaults to '2'. Ignored unless
|
5636
|
+
# enable_speaker_diarization is set to true.
|
5158
5637
|
# Corresponds to the JSON property `diarizationSpeakerCount`
|
5159
5638
|
# @return [Fixnum]
|
5160
5639
|
attr_accessor :diarization_speaker_count
|
5161
5640
|
|
5162
|
-
# Optional. If 'true', adds punctuation to recognition result hypotheses.
|
5163
|
-
#
|
5164
|
-
#
|
5165
|
-
#
|
5166
|
-
#
|
5167
|
-
#
|
5641
|
+
# Optional. If 'true', adds punctuation to recognition result hypotheses. This
|
5642
|
+
# feature is only available in select languages. Setting this for requests in
|
5643
|
+
# other languages has no effect at all. The default 'false' value does not add
|
5644
|
+
# punctuation to result hypotheses. NOTE: "This is currently offered as an
|
5645
|
+
# experimental service, complimentary to all users. In the future this may be
|
5646
|
+
# exclusively available as a premium feature."
|
5168
5647
|
# Corresponds to the JSON property `enableAutomaticPunctuation`
|
5169
5648
|
# @return [Boolean]
|
5170
5649
|
attr_accessor :enable_automatic_punctuation
|
5171
5650
|
alias_method :enable_automatic_punctuation?, :enable_automatic_punctuation
|
5172
5651
|
|
5173
|
-
# Optional. If 'true', enables speaker detection for each recognized word in
|
5174
|
-
#
|
5175
|
-
#
|
5176
|
-
#
|
5177
|
-
#
|
5178
|
-
#
|
5179
|
-
# identify the speakers in the conversation over time.
|
5652
|
+
# Optional. If 'true', enables speaker detection for each recognized word in the
|
5653
|
+
# top alternative of the recognition result using a speaker_tag provided in the
|
5654
|
+
# WordInfo. Note: When this is true, we send all the words from the beginning of
|
5655
|
+
# the audio for the top alternative in every consecutive response. This is done
|
5656
|
+
# in order to improve our speaker tags as our models learn to identify the
|
5657
|
+
# speakers in the conversation over time.
|
5180
5658
|
# Corresponds to the JSON property `enableSpeakerDiarization`
|
5181
5659
|
# @return [Boolean]
|
5182
5660
|
attr_accessor :enable_speaker_diarization
|
5183
5661
|
alias_method :enable_speaker_diarization?, :enable_speaker_diarization
|
5184
5662
|
|
5185
5663
|
# Optional. If `true`, the top result includes a list of words and the
|
5186
|
-
# confidence for those words. If `false`, no word-level confidence
|
5187
|
-
#
|
5664
|
+
# confidence for those words. If `false`, no word-level confidence information
|
5665
|
+
# is returned. The default is `false`.
|
5188
5666
|
# Corresponds to the JSON property `enableWordConfidence`
|
5189
5667
|
# @return [Boolean]
|
5190
5668
|
attr_accessor :enable_word_confidence
|
5191
5669
|
alias_method :enable_word_confidence?, :enable_word_confidence
|
5192
5670
|
|
5193
|
-
# Optional. If set to `true`, the server will attempt to filter out
|
5194
|
-
#
|
5195
|
-
#
|
5196
|
-
# won't be filtered out.
|
5671
|
+
# Optional. If set to `true`, the server will attempt to filter out profanities,
|
5672
|
+
# replacing all but the initial character in each filtered word with asterisks,
|
5673
|
+
# e.g. "f***". If set to `false` or omitted, profanities won't be filtered out.
|
5197
5674
|
# Corresponds to the JSON property `filterProfanity`
|
5198
5675
|
# @return [Boolean]
|
5199
5676
|
attr_accessor :filter_profanity
|
5200
5677
|
alias_method :filter_profanity?, :filter_profanity
|
5201
5678
|
|
5202
|
-
# Required. *Required* The language of the supplied audio as a
|
5203
|
-
#
|
5204
|
-
#
|
5205
|
-
#
|
5206
|
-
# for a list of the currently supported language codes.
|
5679
|
+
# Required. *Required* The language of the supplied audio as a [BCP-47](https://
|
5680
|
+
# www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. Example: "en-US". See [
|
5681
|
+
# Language Support](https://cloud.google.com/speech/docs/languages) for a list
|
5682
|
+
# of the currently supported language codes.
|
5207
5683
|
# Corresponds to the JSON property `languageCode`
|
5208
5684
|
# @return [String]
|
5209
5685
|
attr_accessor :language_code
|
5210
5686
|
|
5211
5687
|
# Optional. Maximum number of recognition hypotheses to be returned.
|
5212
5688
|
# Specifically, the maximum number of `SpeechRecognitionAlternative` messages
|
5213
|
-
# within each `SpeechTranscription`. The server may return fewer than
|
5214
|
-
#
|
5689
|
+
# within each `SpeechTranscription`. The server may return fewer than `
|
5690
|
+
# max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
|
5215
5691
|
# return a maximum of one. If omitted, will return a maximum of one.
|
5216
5692
|
# Corresponds to the JSON property `maxAlternatives`
|
5217
5693
|
# @return [Fixnum]
|
@@ -5240,32 +5716,32 @@ module Google
|
|
5240
5716
|
end
|
5241
5717
|
end
|
5242
5718
|
|
5243
|
-
# `StreamingAnnotateVideoResponse` is the only message returned to the client
|
5244
|
-
#
|
5245
|
-
#
|
5719
|
+
# `StreamingAnnotateVideoResponse` is the only message returned to the client by
|
5720
|
+
# `StreamingAnnotateVideo`. A series of zero or more `
|
5721
|
+
# StreamingAnnotateVideoResponse` messages are streamed back to the client.
|
5246
5722
|
class GoogleCloudVideointelligenceV1p3beta1StreamingAnnotateVideoResponse
|
5247
5723
|
include Google::Apis::Core::Hashable
|
5248
5724
|
|
5249
|
-
# Streaming annotation results corresponding to a portion of the video
|
5250
|
-
#
|
5725
|
+
# Streaming annotation results corresponding to a portion of the video that is
|
5726
|
+
# currently being processed. Only ONE type of annotation will be specified in
|
5727
|
+
# the response.
|
5251
5728
|
# Corresponds to the JSON property `annotationResults`
|
5252
5729
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1StreamingVideoAnnotationResults]
|
5253
5730
|
attr_accessor :annotation_results
|
5254
5731
|
|
5255
|
-
# Google Cloud Storage URI that stores annotation results of one
|
5256
|
-
#
|
5257
|
-
#
|
5258
|
-
# from the request followed by '/cloud_project_number-session_id'.
|
5732
|
+
# Google Cloud Storage URI that stores annotation results of one streaming
|
5733
|
+
# session in JSON format. It is the annotation_result_storage_directory from the
|
5734
|
+
# request followed by '/cloud_project_number-session_id'.
|
5259
5735
|
# Corresponds to the JSON property `annotationResultsUri`
|
5260
5736
|
# @return [String]
|
5261
5737
|
attr_accessor :annotation_results_uri
|
5262
5738
|
|
5263
|
-
# The `Status` type defines a logical error model that is suitable for
|
5264
|
-
#
|
5265
|
-
#
|
5266
|
-
#
|
5267
|
-
#
|
5268
|
-
#
|
5739
|
+
# The `Status` type defines a logical error model that is suitable for different
|
5740
|
+
# programming environments, including REST APIs and RPC APIs. It is used by [
|
5741
|
+
# gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
|
5742
|
+
# data: error code, error message, and error details. You can find out more
|
5743
|
+
# about this error model and how to work with it in the [API Design Guide](https:
|
5744
|
+
# //cloud.google.com/apis/design/errors).
|
5269
5745
|
# Corresponds to the JSON property `error`
|
5270
5746
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleRpcStatus]
|
5271
5747
|
attr_accessor :error
|
@@ -5282,18 +5758,24 @@ module Google
|
|
5282
5758
|
end
|
5283
5759
|
end
|
5284
5760
|
|
5285
|
-
# Streaming annotation results corresponding to a portion of the video
|
5286
|
-
#
|
5761
|
+
# Streaming annotation results corresponding to a portion of the video that is
|
5762
|
+
# currently being processed. Only ONE type of annotation will be specified in
|
5763
|
+
# the response.
|
5287
5764
|
class GoogleCloudVideointelligenceV1p3beta1StreamingVideoAnnotationResults
|
5288
5765
|
include Google::Apis::Core::Hashable
|
5289
5766
|
|
5290
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
5291
|
-
#
|
5292
|
-
#
|
5767
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
5768
|
+
# explicit content has been detected in a frame, no annotations are present for
|
5769
|
+
# that frame.
|
5293
5770
|
# Corresponds to the JSON property `explicitAnnotation`
|
5294
5771
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1ExplicitContentAnnotation]
|
5295
5772
|
attr_accessor :explicit_annotation
|
5296
5773
|
|
5774
|
+
# Timestamp of the processed frame in microseconds.
|
5775
|
+
# Corresponds to the JSON property `frameTimestamp`
|
5776
|
+
# @return [String]
|
5777
|
+
attr_accessor :frame_timestamp
|
5778
|
+
|
5297
5779
|
# Label annotation results.
|
5298
5780
|
# Corresponds to the JSON property `labelAnnotations`
|
5299
5781
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
|
@@ -5316,6 +5798,7 @@ module Google
|
|
5316
5798
|
# Update properties of this object
|
5317
5799
|
def update!(**args)
|
5318
5800
|
@explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
|
5801
|
+
@frame_timestamp = args[:frame_timestamp] if args.key?(:frame_timestamp)
|
5319
5802
|
@label_annotations = args[:label_annotations] if args.key?(:label_annotations)
|
5320
5803
|
@object_annotations = args[:object_annotations] if args.key?(:object_annotations)
|
5321
5804
|
@shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations)
|
@@ -5360,16 +5843,15 @@ module Google
|
|
5360
5843
|
include Google::Apis::Core::Hashable
|
5361
5844
|
|
5362
5845
|
# Language hint can be specified if the language to be detected is known a
|
5363
|
-
# priori. It can increase the accuracy of the detection. Language hint must
|
5364
|
-
#
|
5365
|
-
#
|
5846
|
+
# priori. It can increase the accuracy of the detection. Language hint must be
|
5847
|
+
# language code in BCP-47 format. Automatic language detection is performed if
|
5848
|
+
# no hint is provided.
|
5366
5849
|
# Corresponds to the JSON property `languageHints`
|
5367
5850
|
# @return [Array<String>]
|
5368
5851
|
attr_accessor :language_hints
|
5369
5852
|
|
5370
|
-
# Model to use for text detection.
|
5371
|
-
#
|
5372
|
-
# "builtin/latest".
|
5853
|
+
# Model to use for text detection. Supported values: "builtin/stable" (the
|
5854
|
+
# default if unset) and "builtin/latest".
|
5373
5855
|
# Corresponds to the JSON property `model`
|
5374
5856
|
# @return [String]
|
5375
5857
|
attr_accessor :model
|
@@ -5385,27 +5867,19 @@ module Google
|
|
5385
5867
|
end
|
5386
5868
|
end
|
5387
5869
|
|
5388
|
-
# Video frame level annotation results for text annotation (OCR).
|
5389
|
-
#
|
5390
|
-
#
|
5870
|
+
# Video frame level annotation results for text annotation (OCR). Contains
|
5871
|
+
# information regarding timestamp and bounding box locations for the frames
|
5872
|
+
# containing detected OCR text snippets.
|
5391
5873
|
class GoogleCloudVideointelligenceV1p3beta1TextFrame
|
5392
5874
|
include Google::Apis::Core::Hashable
|
5393
5875
|
|
5394
5876
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
5395
|
-
# Contains list of the corner points in clockwise order starting from
|
5396
|
-
#
|
5397
|
-
#
|
5398
|
-
#
|
5399
|
-
#
|
5400
|
-
#
|
5401
|
-
# When it's clockwise rotated 180 degrees around the top-left corner it
|
5402
|
-
# becomes:
|
5403
|
-
# 2----3
|
5404
|
-
# | |
|
5405
|
-
# 1----0
|
5406
|
-
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
5407
|
-
# than 0, or greater than 1 due to trignometric calculations for location of
|
5408
|
-
# the box.
|
5877
|
+
# Contains list of the corner points in clockwise order starting from top-left
|
5878
|
+
# corner. For example, for a rectangular bounding box: When the text is
|
5879
|
+
# horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated
|
5880
|
+
# 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the
|
5881
|
+
# vertex order will still be (0, 1, 2, 3). Note that values can be less than 0,
|
5882
|
+
# or greater than 1 due to trignometric calculations for location of the box.
|
5409
5883
|
# Corresponds to the JSON property `rotatedBoundingBox`
|
5410
5884
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingPoly]
|
5411
5885
|
attr_accessor :rotated_bounding_box
|
@@ -5458,9 +5932,8 @@ module Google
|
|
5458
5932
|
end
|
5459
5933
|
end
|
5460
5934
|
|
5461
|
-
# For tracking related features.
|
5462
|
-
#
|
5463
|
-
# normalized_bounding_box.
|
5935
|
+
# For tracking related features. An object at time_offset with attributes, and
|
5936
|
+
# located with normalized_bounding_box.
|
5464
5937
|
class GoogleCloudVideointelligenceV1p3beta1TimestampedObject
|
5465
5938
|
include Google::Apis::Core::Hashable
|
5466
5939
|
|
@@ -5474,15 +5947,14 @@ module Google
|
|
5474
5947
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1DetectedLandmark>]
|
5475
5948
|
attr_accessor :landmarks
|
5476
5949
|
|
5477
|
-
# Normalized bounding box.
|
5478
|
-
#
|
5479
|
-
# Range: [0, 1].
|
5950
|
+
# Normalized bounding box. The normalized vertex coordinates are relative to the
|
5951
|
+
# original image. Range: [0, 1].
|
5480
5952
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
5481
5953
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox]
|
5482
5954
|
attr_accessor :normalized_bounding_box
|
5483
5955
|
|
5484
|
-
# Time-offset, relative to the beginning of the video,
|
5485
|
-
#
|
5956
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
5957
|
+
# video frame for this object.
|
5486
5958
|
# Corresponds to the JSON property `timeOffset`
|
5487
5959
|
# @return [String]
|
5488
5960
|
attr_accessor :time_offset
|
@@ -5541,20 +6013,19 @@ module Google
|
|
5541
6013
|
class GoogleCloudVideointelligenceV1p3beta1VideoAnnotationProgress
|
5542
6014
|
include Google::Apis::Core::Hashable
|
5543
6015
|
|
5544
|
-
# Specifies which feature is being tracked if the request contains more than
|
5545
|
-
#
|
6016
|
+
# Specifies which feature is being tracked if the request contains more than one
|
6017
|
+
# feature.
|
5546
6018
|
# Corresponds to the JSON property `feature`
|
5547
6019
|
# @return [String]
|
5548
6020
|
attr_accessor :feature
|
5549
6021
|
|
5550
|
-
# Video file location in
|
5551
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
6022
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
5552
6023
|
# Corresponds to the JSON property `inputUri`
|
5553
6024
|
# @return [String]
|
5554
6025
|
attr_accessor :input_uri
|
5555
6026
|
|
5556
|
-
# Approximate percentage processed thus far. Guaranteed to be
|
5557
|
-
#
|
6027
|
+
# Approximate percentage processed thus far. Guaranteed to be 100 when fully
|
6028
|
+
# processed.
|
5558
6029
|
# Corresponds to the JSON property `progressPercent`
|
5559
6030
|
# @return [Fixnum]
|
5560
6031
|
attr_accessor :progress_percent
|
@@ -5598,36 +6069,40 @@ module Google
|
|
5598
6069
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1CelebrityRecognitionAnnotation]
|
5599
6070
|
attr_accessor :celebrity_recognition_annotations
|
5600
6071
|
|
5601
|
-
# The `Status` type defines a logical error model that is suitable for
|
5602
|
-
#
|
5603
|
-
#
|
5604
|
-
#
|
5605
|
-
#
|
5606
|
-
#
|
6072
|
+
# The `Status` type defines a logical error model that is suitable for different
|
6073
|
+
# programming environments, including REST APIs and RPC APIs. It is used by [
|
6074
|
+
# gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
|
6075
|
+
# data: error code, error message, and error details. You can find out more
|
6076
|
+
# about this error model and how to work with it in the [API Design Guide](https:
|
6077
|
+
# //cloud.google.com/apis/design/errors).
|
5607
6078
|
# Corresponds to the JSON property `error`
|
5608
6079
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleRpcStatus]
|
5609
6080
|
attr_accessor :error
|
5610
6081
|
|
5611
|
-
# Explicit content annotation (based on per-frame visual signals only).
|
5612
|
-
#
|
5613
|
-
#
|
6082
|
+
# Explicit content annotation (based on per-frame visual signals only). If no
|
6083
|
+
# explicit content has been detected in a frame, no annotations are present for
|
6084
|
+
# that frame.
|
5614
6085
|
# Corresponds to the JSON property `explicitAnnotation`
|
5615
6086
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1ExplicitContentAnnotation]
|
5616
6087
|
attr_accessor :explicit_annotation
|
5617
6088
|
|
6089
|
+
# Deprecated. Please use `face_detection_annotations` instead.
|
6090
|
+
# Corresponds to the JSON property `faceAnnotations`
|
6091
|
+
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1FaceAnnotation>]
|
6092
|
+
attr_accessor :face_annotations
|
6093
|
+
|
5618
6094
|
# Face detection annotations.
|
5619
6095
|
# Corresponds to the JSON property `faceDetectionAnnotations`
|
5620
6096
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1FaceDetectionAnnotation>]
|
5621
6097
|
attr_accessor :face_detection_annotations
|
5622
6098
|
|
5623
|
-
# Label annotations on frame level.
|
5624
|
-
#
|
6099
|
+
# Label annotations on frame level. There is exactly one element for each unique
|
6100
|
+
# label.
|
5625
6101
|
# Corresponds to the JSON property `frameLabelAnnotations`
|
5626
6102
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
|
5627
6103
|
attr_accessor :frame_label_annotations
|
5628
6104
|
|
5629
|
-
# Video file location in
|
5630
|
-
# [Cloud Storage](https://cloud.google.com/storage/).
|
6105
|
+
# Video file location in [Cloud Storage](https://cloud.google.com/storage/).
|
5631
6106
|
# Corresponds to the JSON property `inputUri`
|
5632
6107
|
# @return [String]
|
5633
6108
|
attr_accessor :input_uri
|
@@ -5659,11 +6134,11 @@ module Google
|
|
5659
6134
|
attr_accessor :segment_label_annotations
|
5660
6135
|
|
5661
6136
|
# Presence label annotations on video level or user-specified segment level.
|
5662
|
-
# There is exactly one element for each unique label. Compared to the
|
5663
|
-
#
|
5664
|
-
#
|
5665
|
-
#
|
5666
|
-
#
|
6137
|
+
# There is exactly one element for each unique label. Compared to the existing
|
6138
|
+
# topical `segment_label_annotations`, this field presents more fine-grained,
|
6139
|
+
# segment-level labels detected in video content and is made available only when
|
6140
|
+
# the client sets `LabelDetectionConfig.model` to "builtin/latest" in the
|
6141
|
+
# request.
|
5667
6142
|
# Corresponds to the JSON property `segmentPresenceLabelAnnotations`
|
5668
6143
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
|
5669
6144
|
attr_accessor :segment_presence_label_annotations
|
@@ -5673,17 +6148,17 @@ module Google
|
|
5673
6148
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1VideoSegment>]
|
5674
6149
|
attr_accessor :shot_annotations
|
5675
6150
|
|
5676
|
-
# Topical label annotations on shot level.
|
5677
|
-
#
|
6151
|
+
# Topical label annotations on shot level. There is exactly one element for each
|
6152
|
+
# unique label.
|
5678
6153
|
# Corresponds to the JSON property `shotLabelAnnotations`
|
5679
6154
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
|
5680
6155
|
attr_accessor :shot_label_annotations
|
5681
6156
|
|
5682
6157
|
# Presence label annotations on shot level. There is exactly one element for
|
5683
|
-
# each unique label. Compared to the existing topical
|
5684
|
-
#
|
5685
|
-
#
|
5686
|
-
#
|
6158
|
+
# each unique label. Compared to the existing topical `shot_label_annotations`,
|
6159
|
+
# this field presents more fine-grained, shot-level labels detected in video
|
6160
|
+
# content and is made available only when the client sets `LabelDetectionConfig.
|
6161
|
+
# model` to "builtin/latest" in the request.
|
5687
6162
|
# Corresponds to the JSON property `shotPresenceLabelAnnotations`
|
5688
6163
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
|
5689
6164
|
attr_accessor :shot_presence_label_annotations
|
@@ -5693,9 +6168,8 @@ module Google
|
|
5693
6168
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1SpeechTranscription>]
|
5694
6169
|
attr_accessor :speech_transcriptions
|
5695
6170
|
|
5696
|
-
# OCR text detection and tracking.
|
5697
|
-
#
|
5698
|
-
# frame information associated with it.
|
6171
|
+
# OCR text detection and tracking. Annotations for list of detected text
|
6172
|
+
# snippets. Each will have list of frame information associated with it.
|
5699
6173
|
# Corresponds to the JSON property `textAnnotations`
|
5700
6174
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1TextAnnotation>]
|
5701
6175
|
attr_accessor :text_annotations
|
@@ -5709,6 +6183,7 @@ module Google
|
|
5709
6183
|
@celebrity_recognition_annotations = args[:celebrity_recognition_annotations] if args.key?(:celebrity_recognition_annotations)
|
5710
6184
|
@error = args[:error] if args.key?(:error)
|
5711
6185
|
@explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
|
6186
|
+
@face_annotations = args[:face_annotations] if args.key?(:face_annotations)
|
5712
6187
|
@face_detection_annotations = args[:face_detection_annotations] if args.key?(:face_detection_annotations)
|
5713
6188
|
@frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
|
5714
6189
|
@input_uri = args[:input_uri] if args.key?(:input_uri)
|
@@ -5755,9 +6230,9 @@ module Google
|
|
5755
6230
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1PersonDetectionConfig]
|
5756
6231
|
attr_accessor :person_detection_config
|
5757
6232
|
|
5758
|
-
# Video segments to annotate. The segments may overlap and are not required
|
5759
|
-
#
|
5760
|
-
#
|
6233
|
+
# Video segments to annotate. The segments may overlap and are not required to
|
6234
|
+
# be contiguous or span the whole video. If unspecified, each video is treated
|
6235
|
+
# as a single segment.
|
5761
6236
|
# Corresponds to the JSON property `segments`
|
5762
6237
|
# @return [Array<Google::Apis::VideointelligenceV1p3beta1::GoogleCloudVideointelligenceV1p3beta1VideoSegment>]
|
5763
6238
|
attr_accessor :segments
|
@@ -5799,14 +6274,14 @@ module Google
|
|
5799
6274
|
class GoogleCloudVideointelligenceV1p3beta1VideoSegment
|
5800
6275
|
include Google::Apis::Core::Hashable
|
5801
6276
|
|
5802
|
-
# Time-offset, relative to the beginning of the video,
|
5803
|
-
#
|
6277
|
+
# Time-offset, relative to the beginning of the video, corresponding to the end
|
6278
|
+
# of the segment (inclusive).
|
5804
6279
|
# Corresponds to the JSON property `endTimeOffset`
|
5805
6280
|
# @return [String]
|
5806
6281
|
attr_accessor :end_time_offset
|
5807
6282
|
|
5808
|
-
# Time-offset, relative to the beginning of the video,
|
5809
|
-
#
|
6283
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
6284
|
+
# start of the segment (inclusive).
|
5810
6285
|
# Corresponds to the JSON property `startTimeOffset`
|
5811
6286
|
# @return [String]
|
5812
6287
|
attr_accessor :start_time_offset
|
@@ -5823,41 +6298,41 @@ module Google
|
|
5823
6298
|
end
|
5824
6299
|
|
5825
6300
|
# Word-specific information for recognized words. Word information is only
|
5826
|
-
# included in the response when certain request parameters are set, such
|
5827
|
-
#
|
6301
|
+
# included in the response when certain request parameters are set, such as `
|
6302
|
+
# enable_word_time_offsets`.
|
5828
6303
|
class GoogleCloudVideointelligenceV1p3beta1WordInfo
|
5829
6304
|
include Google::Apis::Core::Hashable
|
5830
6305
|
|
5831
6306
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
5832
6307
|
# indicates an estimated greater likelihood that the recognized words are
|
5833
|
-
# correct. This field is set only for the top alternative.
|
5834
|
-
#
|
5835
|
-
#
|
5836
|
-
#
|
6308
|
+
# correct. This field is set only for the top alternative. This field is not
|
6309
|
+
# guaranteed to be accurate and users should not rely on it to be always
|
6310
|
+
# provided. The default of 0.0 is a sentinel value indicating `confidence` was
|
6311
|
+
# not set.
|
5837
6312
|
# Corresponds to the JSON property `confidence`
|
5838
6313
|
# @return [Float]
|
5839
6314
|
attr_accessor :confidence
|
5840
6315
|
|
5841
|
-
# Time offset relative to the beginning of the audio, and
|
5842
|
-
#
|
5843
|
-
#
|
5844
|
-
#
|
6316
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
6317
|
+
# end of the spoken word. This field is only set if `enable_word_time_offsets=
|
6318
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
6319
|
+
# accuracy of the time offset can vary.
|
5845
6320
|
# Corresponds to the JSON property `endTime`
|
5846
6321
|
# @return [String]
|
5847
6322
|
attr_accessor :end_time
|
5848
6323
|
|
5849
|
-
# Output only. A distinct integer value is assigned for every speaker within
|
5850
|
-
#
|
5851
|
-
#
|
5852
|
-
#
|
6324
|
+
# Output only. A distinct integer value is assigned for every speaker within the
|
6325
|
+
# audio. This field specifies which one of those speakers was detected to have
|
6326
|
+
# spoken this word. Value ranges from 1 up to diarization_speaker_count, and is
|
6327
|
+
# only set if speaker diarization is enabled.
|
5853
6328
|
# Corresponds to the JSON property `speakerTag`
|
5854
6329
|
# @return [Fixnum]
|
5855
6330
|
attr_accessor :speaker_tag
|
5856
6331
|
|
5857
|
-
# Time offset relative to the beginning of the audio, and
|
5858
|
-
#
|
5859
|
-
#
|
5860
|
-
#
|
6332
|
+
# Time offset relative to the beginning of the audio, and corresponding to the
|
6333
|
+
# start of the spoken word. This field is only set if `enable_word_time_offsets=
|
6334
|
+
# true` and only in the top hypothesis. This is an experimental feature and the
|
6335
|
+
# accuracy of the time offset can vary.
|
5861
6336
|
# Corresponds to the JSON property `startTime`
|
5862
6337
|
# @return [String]
|
5863
6338
|
attr_accessor :start_time
|
@@ -5886,47 +6361,45 @@ module Google
|
|
5886
6361
|
class GoogleLongrunningOperation
|
5887
6362
|
include Google::Apis::Core::Hashable
|
5888
6363
|
|
5889
|
-
# If the value is `false`, it means the operation is still in progress.
|
5890
|
-
#
|
5891
|
-
# available.
|
6364
|
+
# If the value is `false`, it means the operation is still in progress. If `true`
|
6365
|
+
# , the operation is completed, and either `error` or `response` is available.
|
5892
6366
|
# Corresponds to the JSON property `done`
|
5893
6367
|
# @return [Boolean]
|
5894
6368
|
attr_accessor :done
|
5895
6369
|
alias_method :done?, :done
|
5896
6370
|
|
5897
|
-
# The `Status` type defines a logical error model that is suitable for
|
5898
|
-
#
|
5899
|
-
#
|
5900
|
-
#
|
5901
|
-
#
|
5902
|
-
#
|
6371
|
+
# The `Status` type defines a logical error model that is suitable for different
|
6372
|
+
# programming environments, including REST APIs and RPC APIs. It is used by [
|
6373
|
+
# gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
|
6374
|
+
# data: error code, error message, and error details. You can find out more
|
6375
|
+
# about this error model and how to work with it in the [API Design Guide](https:
|
6376
|
+
# //cloud.google.com/apis/design/errors).
|
5903
6377
|
# Corresponds to the JSON property `error`
|
5904
6378
|
# @return [Google::Apis::VideointelligenceV1p3beta1::GoogleRpcStatus]
|
5905
6379
|
attr_accessor :error
|
5906
6380
|
|
5907
|
-
# Service-specific metadata associated with the operation.
|
5908
|
-
#
|
5909
|
-
#
|
5910
|
-
#
|
6381
|
+
# Service-specific metadata associated with the operation. It typically contains
|
6382
|
+
# progress information and common metadata such as create time. Some services
|
6383
|
+
# might not provide such metadata. Any method that returns a long-running
|
6384
|
+
# operation should document the metadata type, if any.
|
5911
6385
|
# Corresponds to the JSON property `metadata`
|
5912
6386
|
# @return [Hash<String,Object>]
|
5913
6387
|
attr_accessor :metadata
|
5914
6388
|
|
5915
6389
|
# The server-assigned name, which is only unique within the same service that
|
5916
|
-
# originally returns it. If you use the default HTTP mapping, the
|
5917
|
-
#
|
6390
|
+
# originally returns it. If you use the default HTTP mapping, the `name` should
|
6391
|
+
# be a resource name ending with `operations/`unique_id``.
|
5918
6392
|
# Corresponds to the JSON property `name`
|
5919
6393
|
# @return [String]
|
5920
6394
|
attr_accessor :name
|
5921
6395
|
|
5922
|
-
# The normal response of the operation in case of success.
|
5923
|
-
# method returns no data on success, such as `Delete`, the response is
|
5924
|
-
#
|
5925
|
-
#
|
5926
|
-
#
|
5927
|
-
#
|
5928
|
-
#
|
5929
|
-
# `TakeSnapshotResponse`.
|
6396
|
+
# The normal response of the operation in case of success. If the original
|
6397
|
+
# method returns no data on success, such as `Delete`, the response is `google.
|
6398
|
+
# protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`,
|
6399
|
+
# the response should be the resource. For other methods, the response should
|
6400
|
+
# have the type `XxxResponse`, where `Xxx` is the original method name. For
|
6401
|
+
# example, if the original method name is `TakeSnapshot()`, the inferred
|
6402
|
+
# response type is `TakeSnapshotResponse`.
|
5930
6403
|
# Corresponds to the JSON property `response`
|
5931
6404
|
# @return [Hash<String,Object>]
|
5932
6405
|
attr_accessor :response
|
@@ -5945,12 +6418,12 @@ module Google
|
|
5945
6418
|
end
|
5946
6419
|
end
|
5947
6420
|
|
5948
|
-
# The `Status` type defines a logical error model that is suitable for
|
5949
|
-
#
|
5950
|
-
#
|
5951
|
-
#
|
5952
|
-
#
|
5953
|
-
#
|
6421
|
+
# The `Status` type defines a logical error model that is suitable for different
|
6422
|
+
# programming environments, including REST APIs and RPC APIs. It is used by [
|
6423
|
+
# gRPC](https://github.com/grpc). Each `Status` message contains three pieces of
|
6424
|
+
# data: error code, error message, and error details. You can find out more
|
6425
|
+
# about this error model and how to work with it in the [API Design Guide](https:
|
6426
|
+
# //cloud.google.com/apis/design/errors).
|
5954
6427
|
class GoogleRpcStatus
|
5955
6428
|
include Google::Apis::Core::Hashable
|
5956
6429
|
|
@@ -5959,15 +6432,15 @@ module Google
|
|
5959
6432
|
# @return [Fixnum]
|
5960
6433
|
attr_accessor :code
|
5961
6434
|
|
5962
|
-
# A list of messages that carry the error details.
|
6435
|
+
# A list of messages that carry the error details. There is a common set of
|
5963
6436
|
# message types for APIs to use.
|
5964
6437
|
# Corresponds to the JSON property `details`
|
5965
6438
|
# @return [Array<Hash<String,Object>>]
|
5966
6439
|
attr_accessor :details
|
5967
6440
|
|
5968
|
-
# A developer-facing error message, which should be in English. Any
|
5969
|
-
#
|
5970
|
-
#
|
6441
|
+
# A developer-facing error message, which should be in English. Any user-facing
|
6442
|
+
# error message should be localized and sent in the google.rpc.Status.details
|
6443
|
+
# field, or localized by the client.
|
5971
6444
|
# Corresponds to the JSON property `message`
|
5972
6445
|
# @return [String]
|
5973
6446
|
attr_accessor :message
|