google-cloud-video_intelligence 2.1.1 → 3.0.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (55) hide show
  1. checksums.yaml +4 -4
  2. data/.yardopts +2 -1
  3. data/AUTHENTICATION.md +51 -54
  4. data/LICENSE.md +203 -0
  5. data/MIGRATING.md +274 -0
  6. data/README.md +36 -62
  7. data/lib/{google/cloud/video_intelligence/credentials.rb → google-cloud-video_intelligence.rb} +5 -13
  8. data/lib/google/cloud/video_intelligence.rb +87 -156
  9. data/lib/google/cloud/video_intelligence/version.rb +6 -2
  10. metadata +99 -83
  11. data/LICENSE +0 -201
  12. data/lib/google/cloud/video_intelligence/v1.rb +0 -186
  13. data/lib/google/cloud/video_intelligence/v1/credentials.rb +0 -41
  14. data/lib/google/cloud/video_intelligence/v1/doc/google/cloud/videointelligence/v1/video_intelligence.rb +0 -789
  15. data/lib/google/cloud/video_intelligence/v1/doc/google/longrunning/operations.rb +0 -51
  16. data/lib/google/cloud/video_intelligence/v1/doc/google/protobuf/any.rb +0 -131
  17. data/lib/google/cloud/video_intelligence/v1/doc/google/protobuf/duration.rb +0 -91
  18. data/lib/google/cloud/video_intelligence/v1/doc/google/rpc/status.rb +0 -39
  19. data/lib/google/cloud/video_intelligence/v1/video_intelligence_service_client.rb +0 -309
  20. data/lib/google/cloud/video_intelligence/v1/video_intelligence_service_client_config.json +0 -31
  21. data/lib/google/cloud/video_intelligence/v1beta2.rb +0 -149
  22. data/lib/google/cloud/video_intelligence/v1beta2/credentials.rb +0 -41
  23. data/lib/google/cloud/video_intelligence/v1beta2/doc/google/cloud/videointelligence/v1beta2/video_intelligence.rb +0 -372
  24. data/lib/google/cloud/video_intelligence/v1beta2/doc/google/longrunning/operations.rb +0 -51
  25. data/lib/google/cloud/video_intelligence/v1beta2/doc/google/protobuf/any.rb +0 -131
  26. data/lib/google/cloud/video_intelligence/v1beta2/doc/google/protobuf/duration.rb +0 -91
  27. data/lib/google/cloud/video_intelligence/v1beta2/doc/google/rpc/status.rb +0 -39
  28. data/lib/google/cloud/video_intelligence/v1beta2/video_intelligence_service_client.rb +0 -309
  29. data/lib/google/cloud/video_intelligence/v1beta2/video_intelligence_service_client_config.json +0 -31
  30. data/lib/google/cloud/video_intelligence/v1p1beta1.rb +0 -187
  31. data/lib/google/cloud/video_intelligence/v1p1beta1/credentials.rb +0 -41
  32. data/lib/google/cloud/video_intelligence/v1p1beta1/doc/google/cloud/videointelligence/v1p1beta1/video_intelligence.rb +0 -410
  33. data/lib/google/cloud/video_intelligence/v1p1beta1/doc/google/longrunning/operations.rb +0 -51
  34. data/lib/google/cloud/video_intelligence/v1p1beta1/doc/google/protobuf/any.rb +0 -131
  35. data/lib/google/cloud/video_intelligence/v1p1beta1/doc/google/protobuf/duration.rb +0 -91
  36. data/lib/google/cloud/video_intelligence/v1p1beta1/doc/google/rpc/status.rb +0 -39
  37. data/lib/google/cloud/video_intelligence/v1p1beta1/video_intelligence_service_client.rb +0 -309
  38. data/lib/google/cloud/video_intelligence/v1p1beta1/video_intelligence_service_client_config.json +0 -31
  39. data/lib/google/cloud/video_intelligence/v1p2beta1.rb +0 -187
  40. data/lib/google/cloud/video_intelligence/v1p2beta1/credentials.rb +0 -41
  41. data/lib/google/cloud/video_intelligence/v1p2beta1/doc/google/cloud/videointelligence/v1p2beta1/video_intelligence.rb +0 -442
  42. data/lib/google/cloud/video_intelligence/v1p2beta1/doc/google/longrunning/operations.rb +0 -51
  43. data/lib/google/cloud/video_intelligence/v1p2beta1/doc/google/protobuf/any.rb +0 -131
  44. data/lib/google/cloud/video_intelligence/v1p2beta1/doc/google/protobuf/duration.rb +0 -91
  45. data/lib/google/cloud/video_intelligence/v1p2beta1/doc/google/rpc/status.rb +0 -39
  46. data/lib/google/cloud/video_intelligence/v1p2beta1/video_intelligence_service_client.rb +0 -309
  47. data/lib/google/cloud/video_intelligence/v1p2beta1/video_intelligence_service_client_config.json +0 -31
  48. data/lib/google/cloud/videointelligence/v1/video_intelligence_pb.rb +0 -304
  49. data/lib/google/cloud/videointelligence/v1/video_intelligence_services_pb.rb +0 -50
  50. data/lib/google/cloud/videointelligence/v1beta2/video_intelligence_pb.rb +0 -170
  51. data/lib/google/cloud/videointelligence/v1beta2/video_intelligence_services_pb.rb +0 -51
  52. data/lib/google/cloud/videointelligence/v1p1beta1/video_intelligence_pb.rb +0 -172
  53. data/lib/google/cloud/videointelligence/v1p1beta1/video_intelligence_services_pb.rb +0 -51
  54. data/lib/google/cloud/videointelligence/v1p2beta1/video_intelligence_pb.rb +0 -193
  55. data/lib/google/cloud/videointelligence/v1p2beta1/video_intelligence_services_pb.rb +0 -51
data/LICENSE DELETED
@@ -1,201 +0,0 @@
1
- Apache License
2
- Version 2.0, January 2004
3
- https://www.apache.org/licenses/
4
-
5
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
-
7
- 1. Definitions.
8
-
9
- "License" shall mean the terms and conditions for use, reproduction,
10
- and distribution as defined by Sections 1 through 9 of this document.
11
-
12
- "Licensor" shall mean the copyright owner or entity authorized by
13
- the copyright owner that is granting the License.
14
-
15
- "Legal Entity" shall mean the union of the acting entity and all
16
- other entities that control, are controlled by, or are under common
17
- control with that entity. For the purposes of this definition,
18
- "control" means (i) the power, direct or indirect, to cause the
19
- direction or management of such entity, whether by contract or
20
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
- outstanding shares, or (iii) beneficial ownership of such entity.
22
-
23
- "You" (or "Your") shall mean an individual or Legal Entity
24
- exercising permissions granted by this License.
25
-
26
- "Source" form shall mean the preferred form for making modifications,
27
- including but not limited to software source code, documentation
28
- source, and configuration files.
29
-
30
- "Object" form shall mean any form resulting from mechanical
31
- transformation or translation of a Source form, including but
32
- not limited to compiled object code, generated documentation,
33
- and conversions to other media types.
34
-
35
- "Work" shall mean the work of authorship, whether in Source or
36
- Object form, made available under the License, as indicated by a
37
- copyright notice that is included in or attached to the work
38
- (an example is provided in the Appendix below).
39
-
40
- "Derivative Works" shall mean any work, whether in Source or Object
41
- form, that is based on (or derived from) the Work and for which the
42
- editorial revisions, annotations, elaborations, or other modifications
43
- represent, as a whole, an original work of authorship. For the purposes
44
- of this License, Derivative Works shall not include works that remain
45
- separable from, or merely link (or bind by name) to the interfaces of,
46
- the Work and Derivative Works thereof.
47
-
48
- "Contribution" shall mean any work of authorship, including
49
- the original version of the Work and any modifications or additions
50
- to that Work or Derivative Works thereof, that is intentionally
51
- submitted to Licensor for inclusion in the Work by the copyright owner
52
- or by an individual or Legal Entity authorized to submit on behalf of
53
- the copyright owner. For the purposes of this definition, "submitted"
54
- means any form of electronic, verbal, or written communication sent
55
- to the Licensor or its representatives, including but not limited to
56
- communication on electronic mailing lists, source code control systems,
57
- and issue tracking systems that are managed by, or on behalf of, the
58
- Licensor for the purpose of discussing and improving the Work, but
59
- excluding communication that is conspicuously marked or otherwise
60
- designated in writing by the copyright owner as "Not a Contribution."
61
-
62
- "Contributor" shall mean Licensor and any individual or Legal Entity
63
- on behalf of whom a Contribution has been received by Licensor and
64
- subsequently incorporated within the Work.
65
-
66
- 2. Grant of Copyright License. Subject to the terms and conditions of
67
- this License, each Contributor hereby grants to You a perpetual,
68
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
- copyright license to reproduce, prepare Derivative Works of,
70
- publicly display, publicly perform, sublicense, and distribute the
71
- Work and such Derivative Works in Source or Object form.
72
-
73
- 3. Grant of Patent License. Subject to the terms and conditions of
74
- this License, each Contributor hereby grants to You a perpetual,
75
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
- (except as stated in this section) patent license to make, have made,
77
- use, offer to sell, sell, import, and otherwise transfer the Work,
78
- where such license applies only to those patent claims licensable
79
- by such Contributor that are necessarily infringed by their
80
- Contribution(s) alone or by combination of their Contribution(s)
81
- with the Work to which such Contribution(s) was submitted. If You
82
- institute patent litigation against any entity (including a
83
- cross-claim or counterclaim in a lawsuit) alleging that the Work
84
- or a Contribution incorporated within the Work constitutes direct
85
- or contributory patent infringement, then any patent licenses
86
- granted to You under this License for that Work shall terminate
87
- as of the date such litigation is filed.
88
-
89
- 4. Redistribution. You may reproduce and distribute copies of the
90
- Work or Derivative Works thereof in any medium, with or without
91
- modifications, and in Source or Object form, provided that You
92
- meet the following conditions:
93
-
94
- (a) You must give any other recipients of the Work or
95
- Derivative Works a copy of this License; and
96
-
97
- (b) You must cause any modified files to carry prominent notices
98
- stating that You changed the files; and
99
-
100
- (c) You must retain, in the Source form of any Derivative Works
101
- that You distribute, all copyright, patent, trademark, and
102
- attribution notices from the Source form of the Work,
103
- excluding those notices that do not pertain to any part of
104
- the Derivative Works; and
105
-
106
- (d) If the Work includes a "NOTICE" text file as part of its
107
- distribution, then any Derivative Works that You distribute must
108
- include a readable copy of the attribution notices contained
109
- within such NOTICE file, excluding those notices that do not
110
- pertain to any part of the Derivative Works, in at least one
111
- of the following places: within a NOTICE text file distributed
112
- as part of the Derivative Works; within the Source form or
113
- documentation, if provided along with the Derivative Works; or,
114
- within a display generated by the Derivative Works, if and
115
- wherever such third-party notices normally appear. The contents
116
- of the NOTICE file are for informational purposes only and
117
- do not modify the License. You may add Your own attribution
118
- notices within Derivative Works that You distribute, alongside
119
- or as an addendum to the NOTICE text from the Work, provided
120
- that such additional attribution notices cannot be construed
121
- as modifying the License.
122
-
123
- You may add Your own copyright statement to Your modifications and
124
- may provide additional or different license terms and conditions
125
- for use, reproduction, or distribution of Your modifications, or
126
- for any such Derivative Works as a whole, provided Your use,
127
- reproduction, and distribution of the Work otherwise complies with
128
- the conditions stated in this License.
129
-
130
- 5. Submission of Contributions. Unless You explicitly state otherwise,
131
- any Contribution intentionally submitted for inclusion in the Work
132
- by You to the Licensor shall be under the terms and conditions of
133
- this License, without any additional terms or conditions.
134
- Notwithstanding the above, nothing herein shall supersede or modify
135
- the terms of any separate license agreement you may have executed
136
- with Licensor regarding such Contributions.
137
-
138
- 6. Trademarks. This License does not grant permission to use the trade
139
- names, trademarks, service marks, or product names of the Licensor,
140
- except as required for reasonable and customary use in describing the
141
- origin of the Work and reproducing the content of the NOTICE file.
142
-
143
- 7. Disclaimer of Warranty. Unless required by applicable law or
144
- agreed to in writing, Licensor provides the Work (and each
145
- Contributor provides its Contributions) on an "AS IS" BASIS,
146
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
- implied, including, without limitation, any warranties or conditions
148
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
- PARTICULAR PURPOSE. You are solely responsible for determining the
150
- appropriateness of using or redistributing the Work and assume any
151
- risks associated with Your exercise of permissions under this License.
152
-
153
- 8. Limitation of Liability. In no event and under no legal theory,
154
- whether in tort (including negligence), contract, or otherwise,
155
- unless required by applicable law (such as deliberate and grossly
156
- negligent acts) or agreed to in writing, shall any Contributor be
157
- liable to You for damages, including any direct, indirect, special,
158
- incidental, or consequential damages of any character arising as a
159
- result of this License or out of the use or inability to use the
160
- Work (including but not limited to damages for loss of goodwill,
161
- work stoppage, computer failure or malfunction, or any and all
162
- other commercial damages or losses), even if such Contributor
163
- has been advised of the possibility of such damages.
164
-
165
- 9. Accepting Warranty or Additional Liability. While redistributing
166
- the Work or Derivative Works thereof, You may choose to offer,
167
- and charge a fee for, acceptance of support, warranty, indemnity,
168
- or other liability obligations and/or rights consistent with this
169
- License. However, in accepting such obligations, You may act only
170
- on Your own behalf and on Your sole responsibility, not on behalf
171
- of any other Contributor, and only if You agree to indemnify,
172
- defend, and hold each Contributor harmless for any liability
173
- incurred by, or claims asserted against, such Contributor by reason
174
- of your accepting any such warranty or additional liability.
175
-
176
- END OF TERMS AND CONDITIONS
177
-
178
- APPENDIX: How to apply the Apache License to your work.
179
-
180
- To apply the Apache License to your work, attach the following
181
- boilerplate notice, with the fields enclosed by brackets "[]"
182
- replaced with your own identifying information. (Don't include
183
- the brackets!) The text should be enclosed in the appropriate
184
- comment syntax for the file format. We also recommend that a
185
- file or class name and description of purpose be included on the
186
- same "printed page" as the copyright notice for easier
187
- identification within third-party archives.
188
-
189
- Copyright [yyyy] [name of copyright owner]
190
-
191
- Licensed under the Apache License, Version 2.0 (the "License");
192
- you may not use this file except in compliance with the License.
193
- You may obtain a copy of the License at
194
-
195
- https://www.apache.org/licenses/LICENSE-2.0
196
-
197
- Unless required by applicable law or agreed to in writing, software
198
- distributed under the License is distributed on an "AS IS" BASIS,
199
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
- See the License for the specific language governing permissions and
201
- limitations under the License.
@@ -1,186 +0,0 @@
1
- # Copyright 2020 Google LLC
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # https://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
-
16
- require "google/cloud/video_intelligence/v1/video_intelligence_service_client"
17
- require "google/cloud/videointelligence/v1/video_intelligence_pb"
18
-
19
- module Google
20
- module Cloud
21
- module VideoIntelligence
22
- # rubocop:disable LineLength
23
-
24
- ##
25
- # # Ruby Client for Cloud Video Intelligence API
26
- #
27
- # [Cloud Video Intelligence API][Product Documentation]:
28
- # Detects objects, explicit content, and scene changes in videos. It also
29
- # specifies the region for annotation and transcribes speech to text.
30
- # Supports both asynchronous API and streaming API.
31
- # - [Product Documentation][]
32
- #
33
- # ## Quick Start
34
- # In order to use this library, you first need to go through the following
35
- # steps:
36
- #
37
- # 1. [Select or create a Cloud Platform project.](https://console.cloud.google.com/project)
38
- # 2. [Enable billing for your project.](https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project)
39
- # 3. [Enable the Cloud Video Intelligence API.](https://console.cloud.google.com/apis/library/videointelligence.googleapis.com)
40
- # 4. [Setup Authentication.](https://googleapis.dev/ruby/google-cloud-video_intelligence/latest/file.AUTHENTICATION.html)
41
- #
42
- # ### Installation
43
- # ```
44
- # $ gem install google-cloud-video_intelligence
45
- # ```
46
- #
47
- # ### Preview
48
- # #### VideoIntelligenceServiceClient
49
- # ```rb
50
- # require "google/cloud/video_intelligence"
51
- #
52
- # video_intelligence_client = Google::Cloud::VideoIntelligence.new(version: :v1)
53
- # input_uri = "gs://cloud-samples-data/video/cat.mp4"
54
- # features_element = :LABEL_DETECTION
55
- # features = [features_element]
56
- #
57
- # # Register a callback during the method call.
58
- # operation = video_intelligence_client.annotate_video(features, input_uri: input_uri) do |op|
59
- # raise op.results.message if op.error?
60
- # op_results = op.results
61
- # # Process the results.
62
- #
63
- # metadata = op.metadata
64
- # # Process the metadata.
65
- # end
66
- #
67
- # # Or use the return value to register a callback.
68
- # operation.on_done do |op|
69
- # raise op.results.message if op.error?
70
- # op_results = op.results
71
- # # Process the results.
72
- #
73
- # metadata = op.metadata
74
- # # Process the metadata.
75
- # end
76
- #
77
- # # Manually reload the operation.
78
- # operation.reload!
79
- #
80
- # # Or block until the operation completes, triggering callbacks on
81
- # # completion.
82
- # operation.wait_until_done!
83
- # ```
84
- #
85
- # ### Next Steps
86
- # - Read the [Cloud Video Intelligence API Product documentation][Product Documentation]
87
- # to learn more about the product and see How-to Guides.
88
- # - View this [repository's main README](https://github.com/googleapis/google-cloud-ruby/blob/master/README.md)
89
- # to see the full list of Cloud APIs that we cover.
90
- #
91
- # [Product Documentation]: https://cloud.google.com/video-intelligence
92
- #
93
- # ## Enabling Logging
94
- #
95
- # To enable logging for this library, set the logger for the underlying [gRPC](https://github.com/grpc/grpc/tree/master/src/ruby) library.
96
- # The logger that you set may be a Ruby stdlib [`Logger`](https://ruby-doc.org/stdlib-2.5.0/libdoc/logger/rdoc/Logger.html) as shown below,
97
- # or a [`Google::Cloud::Logging::Logger`](https://googleapis.dev/ruby/google-cloud-logging/latest)
98
- # that will write logs to [Stackdriver Logging](https://cloud.google.com/logging/). See [grpc/logconfig.rb](https://github.com/grpc/grpc/blob/master/src/ruby/lib/grpc/logconfig.rb)
99
- # and the gRPC [spec_helper.rb](https://github.com/grpc/grpc/blob/master/src/ruby/spec/spec_helper.rb) for additional information.
100
- #
101
- # Configuring a Ruby stdlib logger:
102
- #
103
- # ```ruby
104
- # require "logger"
105
- #
106
- # module MyLogger
107
- # LOGGER = Logger.new $stderr, level: Logger::WARN
108
- # def logger
109
- # LOGGER
110
- # end
111
- # end
112
- #
113
- # # Define a gRPC module-level logger method before grpc/logconfig.rb loads.
114
- # module GRPC
115
- # extend MyLogger
116
- # end
117
- # ```
118
- #
119
- module V1
120
- # rubocop:enable LineLength
121
-
122
- ##
123
- # Service that implements Google Cloud Video Intelligence API.
124
- #
125
- # @param credentials [Google::Auth::Credentials, String, Hash, GRPC::Core::Channel, GRPC::Core::ChannelCredentials, Proc]
126
- # Provides the means for authenticating requests made by the client. This parameter can
127
- # be many types.
128
- # A `Google::Auth::Credentials` uses a the properties of its represented keyfile for
129
- # authenticating requests made by this client.
130
- # A `String` will be treated as the path to the keyfile to be used for the construction of
131
- # credentials for this client.
132
- # A `Hash` will be treated as the contents of a keyfile to be used for the construction of
133
- # credentials for this client.
134
- # A `GRPC::Core::Channel` will be used to make calls through.
135
- # A `GRPC::Core::ChannelCredentials` for the setting up the RPC client. The channel credentials
136
- # should already be composed with a `GRPC::Core::CallCredentials` object.
137
- # A `Proc` will be used as an updater_proc for the Grpc channel. The proc transforms the
138
- # metadata for requests, generally, to give OAuth credentials.
139
- # @param scopes [Array<String>]
140
- # The OAuth scopes for this service. This parameter is ignored if
141
- # an updater_proc is supplied.
142
- # @param client_config [Hash]
143
- # A Hash for call options for each method. See
144
- # Google::Gax#construct_settings for the structure of
145
- # this data. Falls back to the default config if not specified
146
- # or the specified config is missing data points.
147
- # @param timeout [Numeric]
148
- # The default timeout, in seconds, for calls made through this client.
149
- # @param metadata [Hash]
150
- # Default metadata to be sent with each request. This can be overridden on a per call basis.
151
- # @param service_address [String]
152
- # Override for the service hostname, or `nil` to leave as the default.
153
- # @param service_port [Integer]
154
- # Override for the service port, or `nil` to leave as the default.
155
- # @param exception_transformer [Proc]
156
- # An optional proc that intercepts any exceptions raised during an API call to inject
157
- # custom error handling.
158
- def self.new \
159
- credentials: nil,
160
- scopes: nil,
161
- client_config: nil,
162
- timeout: nil,
163
- metadata: nil,
164
- service_address: nil,
165
- service_port: nil,
166
- exception_transformer: nil,
167
- lib_name: nil,
168
- lib_version: nil
169
- kwargs = {
170
- credentials: credentials,
171
- scopes: scopes,
172
- client_config: client_config,
173
- timeout: timeout,
174
- metadata: metadata,
175
- exception_transformer: exception_transformer,
176
- lib_name: lib_name,
177
- service_address: service_address,
178
- service_port: service_port,
179
- lib_version: lib_version
180
- }.select { |_, v| v != nil }
181
- Google::Cloud::VideoIntelligence::V1::VideoIntelligenceServiceClient.new(**kwargs)
182
- end
183
- end
184
- end
185
- end
186
- end
@@ -1,41 +0,0 @@
1
- # Copyright 2020 Google LLC
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # https://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
-
16
- require "googleauth"
17
-
18
- module Google
19
- module Cloud
20
- module VideoIntelligence
21
- module V1
22
- class Credentials < Google::Auth::Credentials
23
- SCOPE = [
24
- "https://www.googleapis.com/auth/cloud-platform"
25
- ].freeze
26
- PATH_ENV_VARS = %w(VIDEO_INTELLIGENCE_CREDENTIALS
27
- VIDEO_INTELLIGENCE_KEYFILE
28
- GOOGLE_CLOUD_CREDENTIALS
29
- GOOGLE_CLOUD_KEYFILE
30
- GCLOUD_KEYFILE)
31
- JSON_ENV_VARS = %w(VIDEO_INTELLIGENCE_CREDENTIALS_JSON
32
- VIDEO_INTELLIGENCE_KEYFILE_JSON
33
- GOOGLE_CLOUD_CREDENTIALS_JSON
34
- GOOGLE_CLOUD_KEYFILE_JSON
35
- GCLOUD_KEYFILE_JSON)
36
- DEFAULT_PATHS = ["~/.config/gcloud/application_default_credentials.json"]
37
- end
38
- end
39
- end
40
- end
41
- end
@@ -1,789 +0,0 @@
1
- # Copyright 2020 Google LLC
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # https://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
-
16
- module Google
17
- module Cloud
18
- module VideoIntelligence
19
- module V1
20
- # Video annotation request.
21
- # @!attribute [rw] input_uri
22
- # @return [String]
23
- # Input video location. Currently, only
24
- # [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
25
- # supported, which must be specified in the following format:
26
- # `gs://bucket-id/object-id` (other URI formats return
27
- # {Google::Rpc::Code::INVALID_ARGUMENT}). For more information, see
28
- # [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
29
- # A video URI may include wildcards in `object-id`, and thus identify
30
- # multiple videos. Supported wildcards: '*' to match 0 or more characters;
31
- # '?' to match 1 character. If unset, the input video should be embedded
32
- # in the request as `input_content`. If set, `input_content` should be unset.
33
- # @!attribute [rw] input_content
34
- # @return [String]
35
- # The video data bytes.
36
- # If unset, the input video(s) should be specified via `input_uri`.
37
- # If set, `input_uri` should be unset.
38
- # @!attribute [rw] features
39
- # @return [Array<Google::Cloud::VideoIntelligence::V1::Feature>]
40
- # Required. Requested video annotation features.
41
- # @!attribute [rw] video_context
42
- # @return [Google::Cloud::VideoIntelligence::V1::VideoContext]
43
- # Additional video context and/or feature-specific parameters.
44
- # @!attribute [rw] output_uri
45
- # @return [String]
46
- # Optional. Location where the output (in JSON format) should be stored.
47
- # Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
48
- # URIs are supported, which must be specified in the following format:
49
- # `gs://bucket-id/object-id` (other URI formats return
50
- # {Google::Rpc::Code::INVALID_ARGUMENT}). For more information, see
51
- # [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
52
- # @!attribute [rw] location_id
53
- # @return [String]
54
- # Optional. Cloud region where annotation should take place. Supported cloud
55
- # regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
56
- # is specified, a region will be determined based on video file location.
57
- class AnnotateVideoRequest; end
58
-
59
- # Video context and/or feature-specific parameters.
60
- # @!attribute [rw] segments
61
- # @return [Array<Google::Cloud::VideoIntelligence::V1::VideoSegment>]
62
- # Video segments to annotate. The segments may overlap and are not required
63
- # to be contiguous or span the whole video. If unspecified, each video is
64
- # treated as a single segment.
65
- # @!attribute [rw] label_detection_config
66
- # @return [Google::Cloud::VideoIntelligence::V1::LabelDetectionConfig]
67
- # Config for LABEL_DETECTION.
68
- # @!attribute [rw] shot_change_detection_config
69
- # @return [Google::Cloud::VideoIntelligence::V1::ShotChangeDetectionConfig]
70
- # Config for SHOT_CHANGE_DETECTION.
71
- # @!attribute [rw] explicit_content_detection_config
72
- # @return [Google::Cloud::VideoIntelligence::V1::ExplicitContentDetectionConfig]
73
- # Config for EXPLICIT_CONTENT_DETECTION.
74
- # @!attribute [rw] face_detection_config
75
- # @return [Google::Cloud::VideoIntelligence::V1::FaceDetectionConfig]
76
- # Config for FACE_DETECTION.
77
- # @!attribute [rw] speech_transcription_config
78
- # @return [Google::Cloud::VideoIntelligence::V1::SpeechTranscriptionConfig]
79
- # Config for SPEECH_TRANSCRIPTION.
80
- # @!attribute [rw] text_detection_config
81
- # @return [Google::Cloud::VideoIntelligence::V1::TextDetectionConfig]
82
- # Config for TEXT_DETECTION.
83
- # @!attribute [rw] object_tracking_config
84
- # @return [Google::Cloud::VideoIntelligence::V1::ObjectTrackingConfig]
85
- # Config for OBJECT_TRACKING.
86
- class VideoContext; end
87
-
88
- # Config for LABEL_DETECTION.
89
- # @!attribute [rw] label_detection_mode
90
- # @return [Google::Cloud::VideoIntelligence::V1::LabelDetectionMode]
91
- # What labels should be detected with LABEL_DETECTION, in addition to
92
- # video-level labels or segment-level labels.
93
- # If unspecified, defaults to `SHOT_MODE`.
94
- # @!attribute [rw] stationary_camera
95
- # @return [true, false]
96
- # Whether the video has been shot from a stationary (i.e. non-moving) camera.
97
- # When set to true, might improve detection accuracy for moving objects.
98
- # Should be used with `SHOT_AND_FRAME_MODE` enabled.
99
- # @!attribute [rw] model
100
- # @return [String]
101
- # Model to use for label detection.
102
- # Supported values: "builtin/stable" (the default if unset) and
103
- # "builtin/latest".
104
- # @!attribute [rw] frame_confidence_threshold
105
- # @return [Float]
106
- # The confidence threshold we perform filtering on the labels from
107
- # frame-level detection. If not set, it is set to 0.4 by default. The valid
108
- # range for this threshold is [0.1, 0.9]. Any value set outside of this
109
- # range will be clipped.
110
- # Note: for best results please follow the default threshold. We will update
111
- # the default threshold everytime when we release a new model.
112
- # @!attribute [rw] video_confidence_threshold
113
- # @return [Float]
114
- # The confidence threshold we perform filtering on the labels from
115
- # video-level and shot-level detections. If not set, it is set to 0.3 by
116
- # default. The valid range for this threshold is [0.1, 0.9]. Any value set
117
- # outside of this range will be clipped.
118
- # Note: for best results please follow the default threshold. We will update
119
- # the default threshold everytime when we release a new model.
120
- class LabelDetectionConfig; end
121
-
122
- # Config for SHOT_CHANGE_DETECTION.
123
- # @!attribute [rw] model
124
- # @return [String]
125
- # Model to use for shot change detection.
126
- # Supported values: "builtin/stable" (the default if unset) and
127
- # "builtin/latest".
128
- class ShotChangeDetectionConfig; end
129
-
130
- # Config for OBJECT_TRACKING.
131
- # @!attribute [rw] model
132
- # @return [String]
133
- # Model to use for object tracking.
134
- # Supported values: "builtin/stable" (the default if unset) and
135
- # "builtin/latest".
136
- class ObjectTrackingConfig; end
137
-
138
- # Config for FACE_DETECTION.
139
- # @!attribute [rw] model
140
- # @return [String]
141
- # Model to use for face detection.
142
- # Supported values: "builtin/stable" (the default if unset) and
143
- # "builtin/latest".
144
- # @!attribute [rw] include_bounding_boxes
145
- # @return [true, false]
146
- # Whether bounding boxes be included in the face annotation output.
147
- class FaceDetectionConfig; end
148
-
149
- # Config for EXPLICIT_CONTENT_DETECTION.
150
- # @!attribute [rw] model
151
- # @return [String]
152
- # Model to use for explicit content detection.
153
- # Supported values: "builtin/stable" (the default if unset) and
154
- # "builtin/latest".
155
- class ExplicitContentDetectionConfig; end
156
-
157
- # Config for TEXT_DETECTION.
158
- # @!attribute [rw] language_hints
159
- # @return [Array<String>]
160
- # Language hint can be specified if the language to be detected is known a
161
- # priori. It can increase the accuracy of the detection. Language hint must
162
- # be language code in BCP-47 format.
163
- #
164
- # Automatic language detection is performed if no hint is provided.
165
- # @!attribute [rw] model
166
- # @return [String]
167
- # Model to use for text detection.
168
- # Supported values: "builtin/stable" (the default if unset) and
169
- # "builtin/latest".
170
- class TextDetectionConfig; end
171
-
172
- # Video segment.
173
- # @!attribute [rw] start_time_offset
174
- # @return [Google::Protobuf::Duration]
175
- # Time-offset, relative to the beginning of the video,
176
- # corresponding to the start of the segment (inclusive).
177
- # @!attribute [rw] end_time_offset
178
- # @return [Google::Protobuf::Duration]
179
- # Time-offset, relative to the beginning of the video,
180
- # corresponding to the end of the segment (inclusive).
181
- class VideoSegment; end
182
-
183
- # Video segment level annotation results for label detection.
184
- # @!attribute [rw] segment
185
- # @return [Google::Cloud::VideoIntelligence::V1::VideoSegment]
186
- # Video segment where a label was detected.
187
- # @!attribute [rw] confidence
188
- # @return [Float]
189
- # Confidence that the label is accurate. Range: [0, 1].
190
- class LabelSegment; end
191
-
192
- # Video frame level annotation results for label detection.
193
- # @!attribute [rw] time_offset
194
- # @return [Google::Protobuf::Duration]
195
- # Time-offset, relative to the beginning of the video, corresponding to the
196
- # video frame for this location.
197
- # @!attribute [rw] confidence
198
- # @return [Float]
199
- # Confidence that the label is accurate. Range: [0, 1].
200
- class LabelFrame; end
201
-
202
- # Detected entity from video analysis.
203
- # @!attribute [rw] entity_id
204
- # @return [String]
205
- # Opaque entity ID. Some IDs may be available in
206
- # [Google Knowledge Graph Search
207
- # API](https://developers.google.com/knowledge-graph/).
208
- # @!attribute [rw] description
209
- # @return [String]
210
- # Textual description, e.g. `Fixed-gear bicycle`.
211
- # @!attribute [rw] language_code
212
- # @return [String]
213
- # Language code for `description` in BCP-47 format.
214
- class Entity; end
215
-
216
- # Label annotation.
217
- # @!attribute [rw] entity
218
- # @return [Google::Cloud::VideoIntelligence::V1::Entity]
219
- # Detected entity.
220
- # @!attribute [rw] category_entities
221
- # @return [Array<Google::Cloud::VideoIntelligence::V1::Entity>]
222
- # Common categories for the detected entity.
223
- # E.g. when the label is `Terrier` the category is likely `dog`. And in some
224
- # cases there might be more than one categories e.g. `Terrier` could also be
225
- # a `pet`.
226
- # @!attribute [rw] segments
227
- # @return [Array<Google::Cloud::VideoIntelligence::V1::LabelSegment>]
228
- # All video segments where a label was detected.
229
- # @!attribute [rw] frames
230
- # @return [Array<Google::Cloud::VideoIntelligence::V1::LabelFrame>]
231
- # All video frames where a label was detected.
232
- class LabelAnnotation; end
233
-
234
- # Video frame level annotation results for explicit content.
235
- # @!attribute [rw] time_offset
236
- # @return [Google::Protobuf::Duration]
237
- # Time-offset, relative to the beginning of the video, corresponding to the
238
- # video frame for this location.
239
- # @!attribute [rw] pornography_likelihood
240
- # @return [Google::Cloud::VideoIntelligence::V1::Likelihood]
241
- # Likelihood of the pornography content..
242
- class ExplicitContentFrame; end
243
-
244
- # Explicit content annotation (based on per-frame visual signals only).
245
- # If no explicit content has been detected in a frame, no annotations are
246
- # present for that frame.
247
- # @!attribute [rw] frames
248
- # @return [Array<Google::Cloud::VideoIntelligence::V1::ExplicitContentFrame>]
249
- # All video frames where explicit content was detected.
250
- class ExplicitContentAnnotation; end
251
-
252
- # Normalized bounding box.
253
- # The normalized vertex coordinates are relative to the original image.
254
- # Range: [0, 1].
255
- # @!attribute [rw] left
256
- # @return [Float]
257
- # Left X coordinate.
258
- # @!attribute [rw] top
259
- # @return [Float]
260
- # Top Y coordinate.
261
- # @!attribute [rw] right
262
- # @return [Float]
263
- # Right X coordinate.
264
- # @!attribute [rw] bottom
265
- # @return [Float]
266
- # Bottom Y coordinate.
267
- class NormalizedBoundingBox; end
268
-
269
- # Video segment level annotation results for face detection.
270
- # @!attribute [rw] segment
271
- # @return [Google::Cloud::VideoIntelligence::V1::VideoSegment]
272
- # Video segment where a face was detected.
273
- class FaceSegment; end
274
-
275
- # Video frame level annotation results for face detection.
276
- # @!attribute [rw] normalized_bounding_boxes
277
- # @return [Array<Google::Cloud::VideoIntelligence::V1::NormalizedBoundingBox>]
278
- # Normalized Bounding boxes in a frame.
279
- # There can be more than one boxes if the same face is detected in multiple
280
- # locations within the current frame.
281
- # @!attribute [rw] time_offset
282
- # @return [Google::Protobuf::Duration]
283
- # Time-offset, relative to the beginning of the video,
284
- # corresponding to the video frame for this location.
285
- class FaceFrame; end
286
-
287
- # Face annotation.
288
- # @!attribute [rw] thumbnail
289
- # @return [String]
290
- # Thumbnail of a representative face view (in JPEG format).
291
- # @!attribute [rw] segments
292
- # @return [Array<Google::Cloud::VideoIntelligence::V1::FaceSegment>]
293
- # All video segments where a face was detected.
294
- # @!attribute [rw] frames
295
- # @return [Array<Google::Cloud::VideoIntelligence::V1::FaceFrame>]
296
- # All video frames where a face was detected.
297
- class FaceAnnotation; end
298
-
299
- # For tracking related features.
300
- # An object at time_offset with attributes, and located with
301
- # normalized_bounding_box.
302
- # @!attribute [rw] normalized_bounding_box
303
- # @return [Google::Cloud::VideoIntelligence::V1::NormalizedBoundingBox]
304
- # Normalized Bounding box in a frame, where the object is located.
305
- # @!attribute [rw] time_offset
306
- # @return [Google::Protobuf::Duration]
307
- # Time-offset, relative to the beginning of the video,
308
- # corresponding to the video frame for this object.
309
- # @!attribute [rw] attributes
310
- # @return [Array<Google::Cloud::VideoIntelligence::V1::DetectedAttribute>]
311
- # Optional. The attributes of the object in the bounding box.
312
- # @!attribute [rw] landmarks
313
- # @return [Array<Google::Cloud::VideoIntelligence::V1::DetectedLandmark>]
314
- # Optional. The detected landmarks.
315
- class TimestampedObject; end
316
-
317
- # A track of an object instance.
318
- # @!attribute [rw] segment
319
- # @return [Google::Cloud::VideoIntelligence::V1::VideoSegment]
320
- # Video segment of a track.
321
- # @!attribute [rw] timestamped_objects
322
- # @return [Array<Google::Cloud::VideoIntelligence::V1::TimestampedObject>]
323
- # The object with timestamp and attributes per frame in the track.
324
- # @!attribute [rw] attributes
325
- # @return [Array<Google::Cloud::VideoIntelligence::V1::DetectedAttribute>]
326
- # Optional. Attributes in the track level.
327
- # @!attribute [rw] confidence
328
- # @return [Float]
329
- # Optional. The confidence score of the tracked object.
330
- class Track; end
331
-
332
- # A generic detected attribute represented by name in string format.
333
- # @!attribute [rw] name
334
- # @return [String]
335
- # The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.
336
- # A full list of supported type names will be provided in the document.
337
- # @!attribute [rw] confidence
338
- # @return [Float]
339
- # Detected attribute confidence. Range [0, 1].
340
- # @!attribute [rw] value
341
- # @return [String]
342
- # Text value of the detection result. For example, the value for "HairColor"
343
- # can be "black", "blonde", etc.
344
- class DetectedAttribute; end
345
-
346
- # A generic detected landmark represented by name in string format and a 2D
347
- # location.
348
- # @!attribute [rw] name
349
- # @return [String]
350
- # The name of this landmark, i.e. left_hand, right_shoulder.
351
- # @!attribute [rw] point
352
- # @return [Google::Cloud::VideoIntelligence::V1::NormalizedVertex]
353
- # The 2D point of the detected landmark using the normalized image
354
- # coordindate system. The normalized coordinates have the range from 0 to 1.
355
- # @!attribute [rw] confidence
356
- # @return [Float]
357
- # The confidence score of the detected landmark. Range [0, 1].
358
- class DetectedLandmark; end
359
-
360
- # Annotation results for a single video.
361
- # @!attribute [rw] input_uri
362
- # @return [String]
363
- # Video file location in
364
- # [Google Cloud Storage](https://cloud.google.com/storage/).
365
- # @!attribute [rw] segment
366
- # @return [Google::Cloud::VideoIntelligence::V1::VideoSegment]
367
- # Video segment on which the annotation is run.
368
- # @!attribute [rw] segment_label_annotations
369
- # @return [Array<Google::Cloud::VideoIntelligence::V1::LabelAnnotation>]
370
- # Topical label annotations on video level or user specified segment level.
371
- # There is exactly one element for each unique label.
372
- # @!attribute [rw] segment_presence_label_annotations
373
- # @return [Array<Google::Cloud::VideoIntelligence::V1::LabelAnnotation>]
374
- # Presence label annotations on video level or user specified segment level.
375
- # There is exactly one element for each unique label. Compared to the
376
- # existing topical `segment_label_annotations`, this field presents more
377
- # fine-grained, segment-level labels detected in video content and is made
378
- # available only when the client sets `LabelDetectionConfig.model` to
379
- # "builtin/latest" in the request.
380
- # @!attribute [rw] shot_label_annotations
381
- # @return [Array<Google::Cloud::VideoIntelligence::V1::LabelAnnotation>]
382
- # Topical label annotations on shot level.
383
- # There is exactly one element for each unique label.
384
- # @!attribute [rw] shot_presence_label_annotations
385
- # @return [Array<Google::Cloud::VideoIntelligence::V1::LabelAnnotation>]
386
- # Presence label annotations on shot level. There is exactly one element for
387
- # each unique label. Compared to the existing topical
388
- # `shot_label_annotations`, this field presents more fine-grained, shot-level
389
- # labels detected in video content and is made available only when the client
390
- # sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
391
- # @!attribute [rw] frame_label_annotations
392
- # @return [Array<Google::Cloud::VideoIntelligence::V1::LabelAnnotation>]
393
- # Label annotations on frame level.
394
- # There is exactly one element for each unique label.
395
- # @!attribute [rw] face_annotations
396
- # @return [Array<Google::Cloud::VideoIntelligence::V1::FaceAnnotation>]
397
- # Face annotations. There is exactly one element for each unique face.
398
- # @!attribute [rw] shot_annotations
399
- # @return [Array<Google::Cloud::VideoIntelligence::V1::VideoSegment>]
400
- # Shot annotations. Each shot is represented as a video segment.
401
- # @!attribute [rw] explicit_annotation
402
- # @return [Google::Cloud::VideoIntelligence::V1::ExplicitContentAnnotation]
403
- # Explicit content annotation.
404
- # @!attribute [rw] speech_transcriptions
405
- # @return [Array<Google::Cloud::VideoIntelligence::V1::SpeechTranscription>]
406
- # Speech transcription.
407
- # @!attribute [rw] text_annotations
408
- # @return [Array<Google::Cloud::VideoIntelligence::V1::TextAnnotation>]
409
- # OCR text detection and tracking.
410
- # Annotations for list of detected text snippets. Each will have list of
411
- # frame information associated with it.
412
- # @!attribute [rw] object_annotations
413
- # @return [Array<Google::Cloud::VideoIntelligence::V1::ObjectTrackingAnnotation>]
414
- # Annotations for list of objects detected and tracked in video.
415
- # @!attribute [rw] logo_recognition_annotations
416
- # @return [Array<Google::Cloud::VideoIntelligence::V1::LogoRecognitionAnnotation>]
417
- # Annotations for list of logos detected, tracked and recognized in video.
418
- # @!attribute [rw] error
419
- # @return [Google::Rpc::Status]
420
- # If set, indicates an error. Note that for a single `AnnotateVideoRequest`
421
- # some videos may succeed and some may fail.
422
- class VideoAnnotationResults; end
423
-
424
- # Video annotation response. Included in the `response`
425
- # field of the `Operation` returned by the `GetOperation`
426
- # call of the `google::longrunning::Operations` service.
427
- # @!attribute [rw] annotation_results
428
- # @return [Array<Google::Cloud::VideoIntelligence::V1::VideoAnnotationResults>]
429
- # Annotation results for all videos specified in `AnnotateVideoRequest`.
430
- class AnnotateVideoResponse; end
431
-
432
- # Annotation progress for a single video.
433
- # @!attribute [rw] input_uri
434
- # @return [String]
435
- # Video file location in
436
- # [Google Cloud Storage](https://cloud.google.com/storage/).
437
- # @!attribute [rw] progress_percent
438
- # @return [Integer]
439
- # Approximate percentage processed thus far. Guaranteed to be
440
- # 100 when fully processed.
441
- # @!attribute [rw] start_time
442
- # @return [Google::Protobuf::Timestamp]
443
- # Time when the request was received.
444
- # @!attribute [rw] update_time
445
- # @return [Google::Protobuf::Timestamp]
446
- # Time of the most recent update.
447
- # @!attribute [rw] feature
448
- # @return [Google::Cloud::VideoIntelligence::V1::Feature]
449
- # Specifies which feature is being tracked if the request contains more than
450
- # one features.
451
- # @!attribute [rw] segment
452
- # @return [Google::Cloud::VideoIntelligence::V1::VideoSegment]
453
- # Specifies which segment is being tracked if the request contains more than
454
- # one segments.
455
- class VideoAnnotationProgress; end
456
-
457
- # Video annotation progress. Included in the `metadata`
458
- # field of the `Operation` returned by the `GetOperation`
459
- # call of the `google::longrunning::Operations` service.
460
- # @!attribute [rw] annotation_progress
461
- # @return [Array<Google::Cloud::VideoIntelligence::V1::VideoAnnotationProgress>]
462
- # Progress metadata for all videos specified in `AnnotateVideoRequest`.
463
- class AnnotateVideoProgress; end
464
-
465
- # Config for SPEECH_TRANSCRIPTION.
466
- # @!attribute [rw] language_code
467
- # @return [String]
468
- # Required. *Required* The language of the supplied audio as a
469
- # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
470
- # Example: "en-US".
471
- # See [Language Support](https://cloud.google.com/speech/docs/languages)
472
- # for a list of the currently supported language codes.
473
- # @!attribute [rw] max_alternatives
474
- # @return [Integer]
475
- # Optional. Maximum number of recognition hypotheses to be returned.
476
- # Specifically, the maximum number of `SpeechRecognitionAlternative` messages
477
- # within each `SpeechTranscription`. The server may return fewer than
478
- # `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
479
- # return a maximum of one. If omitted, will return a maximum of one.
480
- # @!attribute [rw] filter_profanity
481
- # @return [true, false]
482
- # Optional. If set to `true`, the server will attempt to filter out
483
- # profanities, replacing all but the initial character in each filtered word
484
- # with asterisks, e.g. "f***". If set to `false` or omitted, profanities
485
- # won't be filtered out.
486
- # @!attribute [rw] speech_contexts
487
- # @return [Array<Google::Cloud::VideoIntelligence::V1::SpeechContext>]
488
- # Optional. A means to provide context to assist the speech recognition.
489
- # @!attribute [rw] enable_automatic_punctuation
490
- # @return [true, false]
491
- # Optional. If 'true', adds punctuation to recognition result hypotheses.
492
- # This feature is only available in select languages. Setting this for
493
- # requests in other languages has no effect at all. The default 'false' value
494
- # does not add punctuation to result hypotheses. NOTE: "This is currently
495
- # offered as an experimental service, complimentary to all users. In the
496
- # future this may be exclusively available as a premium feature."
497
- # @!attribute [rw] audio_tracks
498
- # @return [Array<Integer>]
499
- # Optional. For file formats, such as MXF or MKV, supporting multiple audio
500
- # tracks, specify up to two tracks. Default: track 0.
501
- # @!attribute [rw] enable_speaker_diarization
502
- # @return [true, false]
503
- # Optional. If 'true', enables speaker detection for each recognized word in
504
- # the top alternative of the recognition result using a speaker_tag provided
505
- # in the WordInfo.
506
- # Note: When this is true, we send all the words from the beginning of the
507
- # audio for the top alternative in every consecutive responses.
508
- # This is done in order to improve our speaker tags as our models learn to
509
- # identify the speakers in the conversation over time.
510
- # @!attribute [rw] diarization_speaker_count
511
- # @return [Integer]
512
- # Optional. If set, specifies the estimated number of speakers in the conversation.
513
- # If not set, defaults to '2'.
514
- # Ignored unless enable_speaker_diarization is set to true.
515
- # @!attribute [rw] enable_word_confidence
516
- # @return [true, false]
517
- # Optional. If `true`, the top result includes a list of words and the
518
- # confidence for those words. If `false`, no word-level confidence
519
- # information is returned. The default is `false`.
520
- class SpeechTranscriptionConfig; end
521
-
522
- # Provides "hints" to the speech recognizer to favor specific words and phrases
523
- # in the results.
524
- # @!attribute [rw] phrases
525
- # @return [Array<String>]
526
- # Optional. A list of strings containing words and phrases "hints" so that
527
- # the speech recognition is more likely to recognize them. This can be used
528
- # to improve the accuracy for specific words and phrases, for example, if
529
- # specific commands are typically spoken by the user. This can also be used
530
- # to add additional words to the vocabulary of the recognizer. See
531
- # [usage limits](https://cloud.google.com/speech/limits#content).
532
- class SpeechContext; end
533
-
534
- # A speech recognition result corresponding to a portion of the audio.
535
- # @!attribute [rw] alternatives
536
- # @return [Array<Google::Cloud::VideoIntelligence::V1::SpeechRecognitionAlternative>]
537
- # May contain one or more recognition hypotheses (up to the maximum specified
538
- # in `max_alternatives`). These alternatives are ordered in terms of
539
- # accuracy, with the top (first) alternative being the most probable, as
540
- # ranked by the recognizer.
541
- # @!attribute [rw] language_code
542
- # @return [String]
543
- # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of
544
- # the language in this result. This language code was detected to have the
545
- # most likelihood of being spoken in the audio.
546
- class SpeechTranscription; end
547
-
548
- # Alternative hypotheses (a.k.a. n-best list).
549
- # @!attribute [rw] transcript
550
- # @return [String]
551
- # Transcript text representing the words that the user spoke.
552
- # @!attribute [rw] confidence
553
- # @return [Float]
554
- # Output only. The confidence estimate between 0.0 and 1.0. A higher number
555
- # indicates an estimated greater likelihood that the recognized words are
556
- # correct. This field is set only for the top alternative.
557
- # This field is not guaranteed to be accurate and users should not rely on it
558
- # to be always provided.
559
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
560
- # @!attribute [rw] words
561
- # @return [Array<Google::Cloud::VideoIntelligence::V1::WordInfo>]
562
- # Output only. A list of word-specific information for each recognized word.
563
- # Note: When `enable_speaker_diarization` is true, you will see all the words
564
- # from the beginning of the audio.
565
- class SpeechRecognitionAlternative; end
566
-
567
- # Word-specific information for recognized words. Word information is only
568
- # included in the response when certain request parameters are set, such
569
- # as `enable_word_time_offsets`.
570
- # @!attribute [rw] start_time
571
- # @return [Google::Protobuf::Duration]
572
- # Time offset relative to the beginning of the audio, and
573
- # corresponding to the start of the spoken word. This field is only set if
574
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
575
- # experimental feature and the accuracy of the time offset can vary.
576
- # @!attribute [rw] end_time
577
- # @return [Google::Protobuf::Duration]
578
- # Time offset relative to the beginning of the audio, and
579
- # corresponding to the end of the spoken word. This field is only set if
580
- # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
581
- # experimental feature and the accuracy of the time offset can vary.
582
- # @!attribute [rw] word
583
- # @return [String]
584
- # The word corresponding to this set of information.
585
- # @!attribute [rw] confidence
586
- # @return [Float]
587
- # Output only. The confidence estimate between 0.0 and 1.0. A higher number
588
- # indicates an estimated greater likelihood that the recognized words are
589
- # correct. This field is set only for the top alternative.
590
- # This field is not guaranteed to be accurate and users should not rely on it
591
- # to be always provided.
592
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
593
- # @!attribute [rw] speaker_tag
594
- # @return [Integer]
595
- # Output only. A distinct integer value is assigned for every speaker within
596
- # the audio. This field specifies which one of those speakers was detected to
597
- # have spoken this word. Value ranges from 1 up to diarization_speaker_count,
598
- # and is only set if speaker diarization is enabled.
599
- class WordInfo; end
600
-
601
- # A vertex represents a 2D point in the image.
602
- # NOTE: the normalized vertex coordinates are relative to the original image
603
- # and range from 0 to 1.
604
- # @!attribute [rw] x
605
- # @return [Float]
606
- # X coordinate.
607
- # @!attribute [rw] y
608
- # @return [Float]
609
- # Y coordinate.
610
- class NormalizedVertex; end
611
-
612
- # Normalized bounding polygon for text (that might not be aligned with axis).
613
- # Contains list of the corner points in clockwise order starting from
614
- # top-left corner. For example, for a rectangular bounding box:
615
- # When the text is horizontal it might look like:
616
- # 0----1
617
- # | |
618
- # 3----2
619
- #
620
- # When it's clockwise rotated 180 degrees around the top-left corner it
621
- # becomes:
622
- # 2----3
623
- # | |
624
- # 1----0
625
- #
626
- # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
627
- # than 0, or greater than 1 due to trignometric calculations for location of
628
- # the box.
629
- # @!attribute [rw] vertices
630
- # @return [Array<Google::Cloud::VideoIntelligence::V1::NormalizedVertex>]
631
- # Normalized vertices of the bounding polygon.
632
- class NormalizedBoundingPoly; end
633
-
634
- # Video segment level annotation results for text detection.
635
- # @!attribute [rw] segment
636
- # @return [Google::Cloud::VideoIntelligence::V1::VideoSegment]
637
- # Video segment where a text snippet was detected.
638
- # @!attribute [rw] confidence
639
- # @return [Float]
640
- # Confidence for the track of detected text. It is calculated as the highest
641
- # over all frames where OCR detected text appears.
642
- # @!attribute [rw] frames
643
- # @return [Array<Google::Cloud::VideoIntelligence::V1::TextFrame>]
644
- # Information related to the frames where OCR detected text appears.
645
- class TextSegment; end
646
-
647
- # Video frame level annotation results for text annotation (OCR).
648
- # Contains information regarding timestamp and bounding box locations for the
649
- # frames containing detected OCR text snippets.
650
- # @!attribute [rw] rotated_bounding_box
651
- # @return [Google::Cloud::VideoIntelligence::V1::NormalizedBoundingPoly]
652
- # Bounding polygon of the detected text for this frame.
653
- # @!attribute [rw] time_offset
654
- # @return [Google::Protobuf::Duration]
655
- # Timestamp of this frame.
656
- class TextFrame; end
657
-
658
- # Annotations related to one detected OCR text snippet. This will contain the
659
- # corresponding text, confidence value, and frame level information for each
660
- # detection.
661
- # @!attribute [rw] text
662
- # @return [String]
663
- # The detected text.
664
- # @!attribute [rw] segments
665
- # @return [Array<Google::Cloud::VideoIntelligence::V1::TextSegment>]
666
- # All video segments where OCR detected text appears.
667
- class TextAnnotation; end
668
-
669
- # Video frame level annotations for object detection and tracking. This field
670
- # stores per frame location, time offset, and confidence.
671
- # @!attribute [rw] normalized_bounding_box
672
- # @return [Google::Cloud::VideoIntelligence::V1::NormalizedBoundingBox]
673
- # The normalized bounding box location of this object track for the frame.
674
- # @!attribute [rw] time_offset
675
- # @return [Google::Protobuf::Duration]
676
- # The timestamp of the frame in microseconds.
677
- class ObjectTrackingFrame; end
678
-
679
- # Annotations corresponding to one tracked object.
680
- # @!attribute [rw] segment
681
- # @return [Google::Cloud::VideoIntelligence::V1::VideoSegment]
682
- # Non-streaming batch mode ONLY.
683
- # Each object track corresponds to one video segment where it appears.
684
- # @!attribute [rw] track_id
685
- # @return [Integer]
686
- # Streaming mode ONLY.
687
- # In streaming mode, we do not know the end time of a tracked object
688
- # before it is completed. Hence, there is no VideoSegment info returned.
689
- # Instead, we provide a unique identifiable integer track_id so that
690
- # the customers can correlate the results of the ongoing
691
- # ObjectTrackAnnotation of the same track_id over time.
692
- # @!attribute [rw] entity
693
- # @return [Google::Cloud::VideoIntelligence::V1::Entity]
694
- # Entity to specify the object category that this track is labeled as.
695
- # @!attribute [rw] confidence
696
- # @return [Float]
697
- # Object category's labeling confidence of this track.
698
- # @!attribute [rw] frames
699
- # @return [Array<Google::Cloud::VideoIntelligence::V1::ObjectTrackingFrame>]
700
- # Information corresponding to all frames where this object track appears.
701
- # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
702
- # messages in frames.
703
- # Streaming mode: it can only be one ObjectTrackingFrame message in frames.
704
- class ObjectTrackingAnnotation; end
705
-
706
- # Annotation corresponding to one detected, tracked and recognized logo class.
707
- # @!attribute [rw] entity
708
- # @return [Google::Cloud::VideoIntelligence::V1::Entity]
709
- # Entity category information to specify the logo class that all the logo
710
- # tracks within this LogoRecognitionAnnotation are recognized as.
711
- # @!attribute [rw] tracks
712
- # @return [Array<Google::Cloud::VideoIntelligence::V1::Track>]
713
- # All logo tracks where the recognized logo appears. Each track corresponds
714
- # to one logo instance appearing in consecutive frames.
715
- # @!attribute [rw] segments
716
- # @return [Array<Google::Cloud::VideoIntelligence::V1::VideoSegment>]
717
- # All video segments where the recognized logo appears. There might be
718
- # multiple instances of the same logo class appearing in one VideoSegment.
719
- class LogoRecognitionAnnotation; end
720
-
721
- # Video annotation feature.
722
- module Feature
723
- # Unspecified.
724
- FEATURE_UNSPECIFIED = 0
725
-
726
- # Label detection. Detect objects, such as dog or flower.
727
- LABEL_DETECTION = 1
728
-
729
- # Shot change detection.
730
- SHOT_CHANGE_DETECTION = 2
731
-
732
- # Explicit content detection.
733
- EXPLICIT_CONTENT_DETECTION = 3
734
-
735
- # Human face detection and tracking.
736
- FACE_DETECTION = 4
737
-
738
- # Speech transcription.
739
- SPEECH_TRANSCRIPTION = 6
740
-
741
- # OCR text detection and tracking.
742
- TEXT_DETECTION = 7
743
-
744
- # Object detection and tracking.
745
- OBJECT_TRACKING = 9
746
-
747
- # Logo detection, tracking, and recognition.
748
- LOGO_RECOGNITION = 12
749
- end
750
-
751
- # Label detection mode.
752
- module LabelDetectionMode
753
- # Unspecified.
754
- LABEL_DETECTION_MODE_UNSPECIFIED = 0
755
-
756
- # Detect shot-level labels.
757
- SHOT_MODE = 1
758
-
759
- # Detect frame-level labels.
760
- FRAME_MODE = 2
761
-
762
- # Detect both shot-level and frame-level labels.
763
- SHOT_AND_FRAME_MODE = 3
764
- end
765
-
766
- # Bucketized representation of likelihood.
767
- module Likelihood
768
- # Unspecified likelihood.
769
- LIKELIHOOD_UNSPECIFIED = 0
770
-
771
- # Very unlikely.
772
- VERY_UNLIKELY = 1
773
-
774
- # Unlikely.
775
- UNLIKELY = 2
776
-
777
- # Possible.
778
- POSSIBLE = 3
779
-
780
- # Likely.
781
- LIKELY = 4
782
-
783
- # Very likely.
784
- VERY_LIKELY = 5
785
- end
786
- end
787
- end
788
- end
789
- end