google-cloud-speech-v1 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,28 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Copyright 2020 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # https://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # Auto-generated by gapic-generator-ruby. DO NOT EDIT!
18
+
19
+
20
+ module Google
21
+ module Cloud
22
+ module Speech
23
+ module V1
24
+ VERSION = "0.1.0"
25
+ end
26
+ end
27
+ end
28
+ end
@@ -0,0 +1,4 @@
1
+ # Cloud Speech-to-Text V1 Protocol Buffer Documentation
2
+
3
+ These files are for the YARD documentation of the generated protobuf files.
4
+ They are not intended to be required or loaded at runtime.
@@ -0,0 +1,59 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Copyright 2020 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # https://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # Auto-generated by gapic-generator-ruby. DO NOT EDIT!
18
+
19
+
20
+ module Google
21
+ module Api
22
+ # An indicator of the behavior of a given field (for example, that a field
23
+ # is required in requests, or given as output but ignored as input).
24
+ # This **does not** change the behavior in protocol buffers itself; it only
25
+ # denotes the behavior and may affect how API tooling handles the field.
26
+ #
27
+ # Note: This enum **may** receive new values in the future.
28
+ module FieldBehavior
29
+ # Conventional default for enums. Do not use this.
30
+ FIELD_BEHAVIOR_UNSPECIFIED = 0
31
+
32
+ # Specifically denotes a field as optional.
33
+ # While all fields in protocol buffers are optional, this may be specified
34
+ # for emphasis if appropriate.
35
+ OPTIONAL = 1
36
+
37
+ # Denotes a field as required.
38
+ # This indicates that the field **must** be provided as part of the request,
39
+ # and failure to do so will cause an error (usually `INVALID_ARGUMENT`).
40
+ REQUIRED = 2
41
+
42
+ # Denotes a field as output only.
43
+ # This indicates that the field is provided in responses, but including the
44
+ # field in a request does nothing (the server *must* ignore it and
45
+ # *must not* throw an error as a result of the field's presence).
46
+ OUTPUT_ONLY = 3
47
+
48
+ # Denotes a field as input only.
49
+ # This indicates that the field is provided in requests, and the
50
+ # corresponding field is not included in output.
51
+ INPUT_ONLY = 4
52
+
53
+ # Denotes a field as immutable.
54
+ # This indicates that the field may be set once in a request to create a
55
+ # resource, but may not be changed thereafter.
56
+ IMMUTABLE = 5
57
+ end
58
+ end
59
+ end
@@ -0,0 +1,247 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Copyright 2020 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # https://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # Auto-generated by gapic-generator-ruby. DO NOT EDIT!
18
+
19
+
20
+ module Google
21
+ module Api
22
+ # A simple descriptor of a resource type.
23
+ #
24
+ # ResourceDescriptor annotates a resource message (either by means of a
25
+ # protobuf annotation or use in the service config), and associates the
26
+ # resource's schema, the resource type, and the pattern of the resource name.
27
+ #
28
+ # Example:
29
+ #
30
+ # message Topic {
31
+ # // Indicates this message defines a resource schema.
32
+ # // Declares the resource type in the format of {service}/{kind}.
33
+ # // For Kubernetes resources, the format is {api group}/{kind}.
34
+ # option (google.api.resource) = {
35
+ # type: "pubsub.googleapis.com/Topic"
36
+ # name_descriptor: {
37
+ # pattern: "projects/{project}/topics/{topic}"
38
+ # parent_type: "cloudresourcemanager.googleapis.com/Project"
39
+ # parent_name_extractor: "projects/{project}"
40
+ # }
41
+ # };
42
+ # }
43
+ #
44
+ # The ResourceDescriptor Yaml config will look like:
45
+ #
46
+ # resources:
47
+ # - type: "pubsub.googleapis.com/Topic"
48
+ # name_descriptor:
49
+ # - pattern: "projects/\\{project}/topics/\\{topic}"
50
+ # parent_type: "cloudresourcemanager.googleapis.com/Project"
51
+ # parent_name_extractor: "projects/\\{project}"
52
+ #
53
+ # Sometimes, resources have multiple patterns, typically because they can
54
+ # live under multiple parents.
55
+ #
56
+ # Example:
57
+ #
58
+ # message LogEntry {
59
+ # option (google.api.resource) = {
60
+ # type: "logging.googleapis.com/LogEntry"
61
+ # name_descriptor: {
62
+ # pattern: "projects/{project}/logs/{log}"
63
+ # parent_type: "cloudresourcemanager.googleapis.com/Project"
64
+ # parent_name_extractor: "projects/{project}"
65
+ # }
66
+ # name_descriptor: {
67
+ # pattern: "folders/{folder}/logs/{log}"
68
+ # parent_type: "cloudresourcemanager.googleapis.com/Folder"
69
+ # parent_name_extractor: "folders/{folder}"
70
+ # }
71
+ # name_descriptor: {
72
+ # pattern: "organizations/{organization}/logs/{log}"
73
+ # parent_type: "cloudresourcemanager.googleapis.com/Organization"
74
+ # parent_name_extractor: "organizations/{organization}"
75
+ # }
76
+ # name_descriptor: {
77
+ # pattern: "billingAccounts/{billing_account}/logs/{log}"
78
+ # parent_type: "billing.googleapis.com/BillingAccount"
79
+ # parent_name_extractor: "billingAccounts/{billing_account}"
80
+ # }
81
+ # };
82
+ # }
83
+ #
84
+ # The ResourceDescriptor Yaml config will look like:
85
+ #
86
+ # resources:
87
+ # - type: 'logging.googleapis.com/LogEntry'
88
+ # name_descriptor:
89
+ # - pattern: "projects/{project}/logs/{log}"
90
+ # parent_type: "cloudresourcemanager.googleapis.com/Project"
91
+ # parent_name_extractor: "projects/{project}"
92
+ # - pattern: "folders/{folder}/logs/{log}"
93
+ # parent_type: "cloudresourcemanager.googleapis.com/Folder"
94
+ # parent_name_extractor: "folders/{folder}"
95
+ # - pattern: "organizations/{organization}/logs/{log}"
96
+ # parent_type: "cloudresourcemanager.googleapis.com/Organization"
97
+ # parent_name_extractor: "organizations/{organization}"
98
+ # - pattern: "billingAccounts/{billing_account}/logs/{log}"
99
+ # parent_type: "billing.googleapis.com/BillingAccount"
100
+ # parent_name_extractor: "billingAccounts/{billing_account}"
101
+ #
102
+ # For flexible resources, the resource name doesn't contain parent names, but
103
+ # the resource itself has parents for policy evaluation.
104
+ #
105
+ # Example:
106
+ #
107
+ # message Shelf {
108
+ # option (google.api.resource) = {
109
+ # type: "library.googleapis.com/Shelf"
110
+ # name_descriptor: {
111
+ # pattern: "shelves/{shelf}"
112
+ # parent_type: "cloudresourcemanager.googleapis.com/Project"
113
+ # }
114
+ # name_descriptor: {
115
+ # pattern: "shelves/{shelf}"
116
+ # parent_type: "cloudresourcemanager.googleapis.com/Folder"
117
+ # }
118
+ # };
119
+ # }
120
+ #
121
+ # The ResourceDescriptor Yaml config will look like:
122
+ #
123
+ # resources:
124
+ # - type: 'library.googleapis.com/Shelf'
125
+ # name_descriptor:
126
+ # - pattern: "shelves/{shelf}"
127
+ # parent_type: "cloudresourcemanager.googleapis.com/Project"
128
+ # - pattern: "shelves/{shelf}"
129
+ # parent_type: "cloudresourcemanager.googleapis.com/Folder"
130
+ # @!attribute [rw] type
131
+ # @return [String]
132
+ # The resource type. It must be in the format of
133
+ # \\{service_name}/\\{resource_type_kind}. The `resource_type_kind` must be
134
+ # singular and must not include version numbers.
135
+ #
136
+ # Example: `storage.googleapis.com/Bucket`
137
+ #
138
+ # The value of the resource_type_kind must follow the regular expression
139
+ # /[A-Za-z][a-zA-Z0-9]+/. It should start with an upper case character and
140
+ # should use PascalCase (UpperCamelCase). The maximum number of
141
+ # characters allowed for the `resource_type_kind` is 100.
142
+ # @!attribute [rw] pattern
143
+ # @return [Array<String>]
144
+ # Optional. The relative resource name pattern associated with this resource
145
+ # type. The DNS prefix of the full resource name shouldn't be specified here.
146
+ #
147
+ # The path pattern must follow the syntax, which aligns with HTTP binding
148
+ # syntax:
149
+ #
150
+ # Template = Segment { "/" Segment } ;
151
+ # Segment = LITERAL | Variable ;
152
+ # Variable = "{" LITERAL "}" ;
153
+ #
154
+ # Examples:
155
+ #
156
+ # - "projects/\\{project}/topics/\\{topic}"
157
+ # - "projects/\\{project}/knowledgeBases/\\{knowledge_base}"
158
+ #
159
+ # The components in braces correspond to the IDs for each resource in the
160
+ # hierarchy. It is expected that, if multiple patterns are provided,
161
+ # the same component name (e.g. "project") refers to IDs of the same
162
+ # type of resource.
163
+ # @!attribute [rw] name_field
164
+ # @return [String]
165
+ # Optional. The field on the resource that designates the resource name
166
+ # field. If omitted, this is assumed to be "name".
167
+ # @!attribute [rw] history
168
+ # @return [Google::Api::ResourceDescriptor::History]
169
+ # Optional. The historical or future-looking state of the resource pattern.
170
+ #
171
+ # Example:
172
+ #
173
+ # // The InspectTemplate message originally only supported resource
174
+ # // names with organization, and project was added later.
175
+ # message InspectTemplate {
176
+ # option (google.api.resource) = {
177
+ # type: "dlp.googleapis.com/InspectTemplate"
178
+ # pattern:
179
+ # "organizations/{organization}/inspectTemplates/{inspect_template}"
180
+ # pattern: "projects/{project}/inspectTemplates/{inspect_template}"
181
+ # history: ORIGINALLY_SINGLE_PATTERN
182
+ # };
183
+ # }
184
+ # @!attribute [rw] plural
185
+ # @return [String]
186
+ # The plural name used in the resource name, such as 'projects' for
187
+ # the name of 'projects/\\{project}'. It is the same concept of the `plural`
188
+ # field in k8s CRD spec
189
+ # https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/
190
+ # @!attribute [rw] singular
191
+ # @return [String]
192
+ # The same concept of the `singular` field in k8s CRD spec
193
+ # https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/
194
+ # Such as "project" for the `resourcemanager.googleapis.com/Project` type.
195
+ class ResourceDescriptor
196
+ include Google::Protobuf::MessageExts
197
+ extend Google::Protobuf::MessageExts::ClassMethods
198
+
199
+ # A description of the historical or future-looking state of the
200
+ # resource pattern.
201
+ module History
202
+ # The "unset" value.
203
+ HISTORY_UNSPECIFIED = 0
204
+
205
+ # The resource originally had one pattern and launched as such, and
206
+ # additional patterns were added later.
207
+ ORIGINALLY_SINGLE_PATTERN = 1
208
+
209
+ # The resource has one pattern, but the API owner expects to add more
210
+ # later. (This is the inverse of ORIGINALLY_SINGLE_PATTERN, and prevents
211
+ # that from being necessary once there are multiple patterns.)
212
+ FUTURE_MULTI_PATTERN = 2
213
+ end
214
+ end
215
+
216
+ # Defines a proto annotation that describes a string field that refers to
217
+ # an API resource.
218
+ # @!attribute [rw] type
219
+ # @return [String]
220
+ # The resource type that the annotated field references.
221
+ #
222
+ # Example:
223
+ #
224
+ # message Subscription {
225
+ # string topic = 2 [(google.api.resource_reference) = {
226
+ # type: "pubsub.googleapis.com/Topic"
227
+ # }];
228
+ # }
229
+ # @!attribute [rw] child_type
230
+ # @return [String]
231
+ # The resource type of a child collection that the annotated field
232
+ # references. This is useful for annotating the `parent` field that
233
+ # doesn't have a fixed resource type.
234
+ #
235
+ # Example:
236
+ #
237
+ # message ListLogEntriesRequest {
238
+ # string parent = 1 [(google.api.resource_reference) = {
239
+ # child_type: "logging.googleapis.com/LogEntry"
240
+ # };
241
+ # }
242
+ class ResourceReference
243
+ include Google::Protobuf::MessageExts
244
+ extend Google::Protobuf::MessageExts::ClassMethods
245
+ end
246
+ end
247
+ end
@@ -0,0 +1,753 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Copyright 2020 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # https://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # Auto-generated by gapic-generator-ruby. DO NOT EDIT!
18
+
19
+
20
+ module Google
21
+ module Cloud
22
+ module Speech
23
+ module V1
24
+ # The top-level message sent by the client for the `Recognize` method.
25
+ # @!attribute [rw] config
26
+ # @return [Google::Cloud::Speech::V1::RecognitionConfig]
27
+ # Required. Provides information to the recognizer that specifies how to
28
+ # process the request.
29
+ # @!attribute [rw] audio
30
+ # @return [Google::Cloud::Speech::V1::RecognitionAudio]
31
+ # Required. The audio data to be recognized.
32
+ class RecognizeRequest
33
+ include Google::Protobuf::MessageExts
34
+ extend Google::Protobuf::MessageExts::ClassMethods
35
+ end
36
+
37
+ # The top-level message sent by the client for the `LongRunningRecognize`
38
+ # method.
39
+ # @!attribute [rw] config
40
+ # @return [Google::Cloud::Speech::V1::RecognitionConfig]
41
+ # Required. Provides information to the recognizer that specifies how to
42
+ # process the request.
43
+ # @!attribute [rw] audio
44
+ # @return [Google::Cloud::Speech::V1::RecognitionAudio]
45
+ # Required. The audio data to be recognized.
46
+ class LongRunningRecognizeRequest
47
+ include Google::Protobuf::MessageExts
48
+ extend Google::Protobuf::MessageExts::ClassMethods
49
+ end
50
+
51
+ # The top-level message sent by the client for the `StreamingRecognize` method.
52
+ # Multiple `StreamingRecognizeRequest` messages are sent. The first message
53
+ # must contain a `streaming_config` message and must not contain
54
+ # `audio_content`. All subsequent messages must contain `audio_content` and
55
+ # must not contain a `streaming_config` message.
56
+ # @!attribute [rw] streaming_config
57
+ # @return [Google::Cloud::Speech::V1::StreamingRecognitionConfig]
58
+ # Provides information to the recognizer that specifies how to process the
59
+ # request. The first `StreamingRecognizeRequest` message must contain a
60
+ # `streaming_config` message.
61
+ # @!attribute [rw] audio_content
62
+ # @return [String]
63
+ # The audio data to be recognized. Sequential chunks of audio data are sent
64
+ # in sequential `StreamingRecognizeRequest` messages. The first
65
+ # `StreamingRecognizeRequest` message must not contain `audio_content` data
66
+ # and all subsequent `StreamingRecognizeRequest` messages must contain
67
+ # `audio_content` data. The audio bytes must be encoded as specified in
68
+ # `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
69
+ # pure binary representation (not base64). See
70
+ # [content limits](https://cloud.google.com/speech-to-text/quotas#content).
71
+ class StreamingRecognizeRequest
72
+ include Google::Protobuf::MessageExts
73
+ extend Google::Protobuf::MessageExts::ClassMethods
74
+ end
75
+
76
+ # Provides information to the recognizer that specifies how to process the
77
+ # request.
78
+ # @!attribute [rw] config
79
+ # @return [Google::Cloud::Speech::V1::RecognitionConfig]
80
+ # Required. Provides information to the recognizer that specifies how to
81
+ # process the request.
82
+ # @!attribute [rw] single_utterance
83
+ # @return [Boolean]
84
+ # If `false` or omitted, the recognizer will perform continuous
85
+ # recognition (continuing to wait for and process audio even if the user
86
+ # pauses speaking) until the client closes the input stream (gRPC API) or
87
+ # until the maximum time limit has been reached. May return multiple
88
+ # `StreamingRecognitionResult`s with the `is_final` flag set to `true`.
89
+ #
90
+ # If `true`, the recognizer will detect a single spoken utterance. When it
91
+ # detects that the user has paused or stopped speaking, it will return an
92
+ # `END_OF_SINGLE_UTTERANCE` event and cease recognition. It will return no
93
+ # more than one `StreamingRecognitionResult` with the `is_final` flag set to
94
+ # `true`.
95
+ # @!attribute [rw] interim_results
96
+ # @return [Boolean]
97
+ # If `true`, interim results (tentative hypotheses) may be
98
+ # returned as they become available (these interim results are indicated with
99
+ # the `is_final=false` flag).
100
+ # If `false` or omitted, only `is_final=true` result(s) are returned.
101
+ class StreamingRecognitionConfig
102
+ include Google::Protobuf::MessageExts
103
+ extend Google::Protobuf::MessageExts::ClassMethods
104
+ end
105
+
106
+ # Provides information to the recognizer that specifies how to process the
107
+ # request.
108
+ # @!attribute [rw] encoding
109
+ # @return [Google::Cloud::Speech::V1::RecognitionConfig::AudioEncoding]
110
+ # Encoding of audio data sent in all `RecognitionAudio` messages.
111
+ # This field is optional for `FLAC` and `WAV` audio files and required
112
+ # for all other audio formats. For details, see {Google::Cloud::Speech::V1::RecognitionConfig::AudioEncoding AudioEncoding}.
113
+ # @!attribute [rw] sample_rate_hertz
114
+ # @return [Integer]
115
+ # Sample rate in Hertz of the audio data sent in all
116
+ # `RecognitionAudio` messages. Valid values are: 8000-48000.
117
+ # 16000 is optimal. For best results, set the sampling rate of the audio
118
+ # source to 16000 Hz. If that's not possible, use the native sample rate of
119
+ # the audio source (instead of re-sampling).
120
+ # This field is optional for FLAC and WAV audio files, but is
121
+ # required for all other audio formats. For details, see {Google::Cloud::Speech::V1::RecognitionConfig::AudioEncoding AudioEncoding}.
122
+ # @!attribute [rw] audio_channel_count
123
+ # @return [Integer]
124
+ # The number of channels in the input audio data.
125
+ # ONLY set this for MULTI-CHANNEL recognition.
126
+ # Valid values for LINEAR16 and FLAC are `1`-`8`.
127
+ # Valid values for OGG_OPUS are '1'-'254'.
128
+ # Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
129
+ # If `0` or omitted, defaults to one channel (mono).
130
+ # Note: We only recognize the first channel by default.
131
+ # To perform independent recognition on each channel set
132
+ # `enable_separate_recognition_per_channel` to 'true'.
133
+ # @!attribute [rw] enable_separate_recognition_per_channel
134
+ # @return [Boolean]
135
+ # This needs to be set to `true` explicitly and `audio_channel_count` > 1
136
+ # to get each channel recognized separately. The recognition result will
137
+ # contain a `channel_tag` field to state which channel that result belongs
138
+ # to. If this is not true, we will only recognize the first channel. The
139
+ # request is billed cumulatively for all channels recognized:
140
+ # `audio_channel_count` multiplied by the length of the audio.
141
+ # @!attribute [rw] language_code
142
+ # @return [String]
143
+ # Required. The language of the supplied audio as a
144
+ # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
145
+ # Example: "en-US".
146
+ # See [Language
147
+ # Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
148
+ # of the currently supported language codes.
149
+ # @!attribute [rw] max_alternatives
150
+ # @return [Integer]
151
+ # Maximum number of recognition hypotheses to be returned.
152
+ # Specifically, the maximum number of `SpeechRecognitionAlternative` messages
153
+ # within each `SpeechRecognitionResult`.
154
+ # The server may return fewer than `max_alternatives`.
155
+ # Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
156
+ # one. If omitted, will return a maximum of one.
157
+ # @!attribute [rw] profanity_filter
158
+ # @return [Boolean]
159
+ # If set to `true`, the server will attempt to filter out
160
+ # profanities, replacing all but the initial character in each filtered word
161
+ # with asterisks, e.g. "f***". If set to `false` or omitted, profanities
162
+ # won't be filtered out.
163
+ # @!attribute [rw] speech_contexts
164
+ # @return [Array<Google::Cloud::Speech::V1::SpeechContext>]
165
+ # Array of {Google::Cloud::Speech::V1::SpeechContext SpeechContext}.
166
+ # A means to provide context to assist the speech recognition. For more
167
+ # information, see
168
+ # [speech
169
+ # adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
170
+ # @!attribute [rw] enable_word_time_offsets
171
+ # @return [Boolean]
172
+ # If `true`, the top result includes a list of words and
173
+ # the start and end time offsets (timestamps) for those words. If
174
+ # `false`, no word-level time offset information is returned. The default is
175
+ # `false`.
176
+ # @!attribute [rw] enable_automatic_punctuation
177
+ # @return [Boolean]
178
+ # If 'true', adds punctuation to recognition result hypotheses.
179
+ # This feature is only available in select languages. Setting this for
180
+ # requests in other languages has no effect at all.
181
+ # The default 'false' value does not add punctuation to result hypotheses.
182
+ # Note: This is currently offered as an experimental service, complimentary
183
+ # to all users. In the future this may be exclusively available as a
184
+ # premium feature.
185
+ # @!attribute [rw] diarization_config
186
+ # @return [Google::Cloud::Speech::V1::SpeakerDiarizationConfig]
187
+ # Config to enable speaker diarization and set additional
188
+ # parameters to make diarization better suited for your application.
189
+ # Note: When this is enabled, we send all the words from the beginning of the
190
+ # audio for the top alternative in every consecutive STREAMING responses.
191
+ # This is done in order to improve our speaker tags as our models learn to
192
+ # identify the speakers in the conversation over time.
193
+ # For non-streaming requests, the diarization results will be provided only
194
+ # in the top alternative of the FINAL SpeechRecognitionResult.
195
+ # @!attribute [rw] metadata
196
+ # @return [Google::Cloud::Speech::V1::RecognitionMetadata]
197
+ # Metadata regarding this request.
198
+ # @!attribute [rw] model
199
+ # @return [String]
200
+ # Which model to select for the given request. Select the model
201
+ # best suited to your domain to get best results. If a model is not
202
+ # explicitly specified, then we auto-select a model based on the parameters
203
+ # in the RecognitionConfig.
204
+ # <table>
205
+ # <tr>
206
+ # <td><b>Model</b></td>
207
+ # <td><b>Description</b></td>
208
+ # </tr>
209
+ # <tr>
210
+ # <td><code>command_and_search</code></td>
211
+ # <td>Best for short queries such as voice commands or voice search.</td>
212
+ # </tr>
213
+ # <tr>
214
+ # <td><code>phone_call</code></td>
215
+ # <td>Best for audio that originated from a phone call (typically
216
+ # recorded at an 8khz sampling rate).</td>
217
+ # </tr>
218
+ # <tr>
219
+ # <td><code>video</code></td>
220
+ # <td>Best for audio that originated from from video or includes multiple
221
+ # speakers. Ideally the audio is recorded at a 16khz or greater
222
+ # sampling rate. This is a premium model that costs more than the
223
+ # standard rate.</td>
224
+ # </tr>
225
+ # <tr>
226
+ # <td><code>default</code></td>
227
+ # <td>Best for audio that is not one of the specific audio models.
228
+ # For example, long-form audio. Ideally the audio is high-fidelity,
229
+ # recorded at a 16khz or greater sampling rate.</td>
230
+ # </tr>
231
+ # </table>
232
+ # @!attribute [rw] use_enhanced
233
+ # @return [Boolean]
234
+ # Set to true to use an enhanced model for speech recognition.
235
+ # If `use_enhanced` is set to true and the `model` field is not set, then
236
+ # an appropriate enhanced model is chosen if an enhanced model exists for
237
+ # the audio.
238
+ #
239
+ # If `use_enhanced` is true and an enhanced version of the specified model
240
+ # does not exist, then the speech is recognized using the standard version
241
+ # of the specified model.
242
+ class RecognitionConfig
243
+ include Google::Protobuf::MessageExts
244
+ extend Google::Protobuf::MessageExts::ClassMethods
245
+
246
+ # The encoding of the audio data sent in the request.
247
+ #
248
+ # All encodings support only 1 channel (mono) audio, unless the
249
+ # `audio_channel_count` and `enable_separate_recognition_per_channel` fields
250
+ # are set.
251
+ #
252
+ # For best results, the audio source should be captured and transmitted using
253
+ # a lossless encoding (`FLAC` or `LINEAR16`). The accuracy of the speech
254
+ # recognition can be reduced if lossy codecs are used to capture or transmit
255
+ # audio, particularly if background noise is present. Lossy codecs include
256
+ # `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, `SPEEX_WITH_HEADER_BYTE`, and `MP3`.
257
+ #
258
+ # The `FLAC` and `WAV` audio file formats include a header that describes the
259
+ # included audio content. You can request recognition for `WAV` files that
260
+ # contain either `LINEAR16` or `MULAW` encoded audio.
261
+ # If you send `FLAC` or `WAV` audio file format in
262
+ # your request, you do not need to specify an `AudioEncoding`; the audio
263
+ # encoding format is determined from the file header. If you specify
264
+ # an `AudioEncoding` when you send send `FLAC` or `WAV` audio, the
265
+ # encoding configuration must match the encoding described in the audio
266
+ # header; otherwise the request returns an
267
+ # [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] error code.
268
+ module AudioEncoding
269
+ # Not specified.
270
+ ENCODING_UNSPECIFIED = 0
271
+
272
+ # Uncompressed 16-bit signed little-endian samples (Linear PCM).
273
+ LINEAR16 = 1
274
+
275
+ # `FLAC` (Free Lossless Audio
276
+ # Codec) is the recommended encoding because it is
277
+ # lossless--therefore recognition is not compromised--and
278
+ # requires only about half the bandwidth of `LINEAR16`. `FLAC` stream
279
+ # encoding supports 16-bit and 24-bit samples, however, not all fields in
280
+ # `STREAMINFO` are supported.
281
+ FLAC = 2
282
+
283
+ # 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
284
+ MULAW = 3
285
+
286
+ # Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
287
+ AMR = 4
288
+
289
+ # Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
290
+ AMR_WB = 5
291
+
292
+ # Opus encoded audio frames in Ogg container
293
+ # ([OggOpus](https://wiki.xiph.org/OggOpus)).
294
+ # `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000.
295
+ OGG_OPUS = 6
296
+
297
+ # Although the use of lossy encodings is not recommended, if a very low
298
+ # bitrate encoding is required, `OGG_OPUS` is highly preferred over
299
+ # Speex encoding. The [Speex](https://speex.org/) encoding supported by
300
+ # Cloud Speech API has a header byte in each block, as in MIME type
301
+ # `audio/x-speex-with-header-byte`.
302
+ # It is a variant of the RTP Speex encoding defined in
303
+ # [RFC 5574](https://tools.ietf.org/html/rfc5574).
304
+ # The stream is a sequence of blocks, one block per RTP packet. Each block
305
+ # starts with a byte containing the length of the block, in bytes, followed
306
+ # by one or more frames of Speex data, padded to an integral number of
307
+ # bytes (octets) as specified in RFC 5574. In other words, each RTP header
308
+ # is replaced with a single byte containing the block length. Only Speex
309
+ # wideband is supported. `sample_rate_hertz` must be 16000.
310
+ SPEEX_WITH_HEADER_BYTE = 7
311
+ end
312
+ end
313
+
314
+ # Config to enable speaker diarization.
315
+ # @!attribute [rw] enable_speaker_diarization
316
+ # @return [Boolean]
317
+ # If 'true', enables speaker detection for each recognized word in
318
+ # the top alternative of the recognition result using a speaker_tag provided
319
+ # in the WordInfo.
320
+ # @!attribute [rw] min_speaker_count
321
+ # @return [Integer]
322
+ # Minimum number of speakers in the conversation. This range gives you more
323
+ # flexibility by allowing the system to automatically determine the correct
324
+ # number of speakers. If not set, the default value is 2.
325
+ # @!attribute [rw] max_speaker_count
326
+ # @return [Integer]
327
+ # Maximum number of speakers in the conversation. This range gives you more
328
+ # flexibility by allowing the system to automatically determine the correct
329
+ # number of speakers. If not set, the default value is 6.
330
+ # @!attribute [r] speaker_tag
331
+ # @return [Integer]
332
+ # Unused.
333
+ class SpeakerDiarizationConfig
334
+ include Google::Protobuf::MessageExts
335
+ extend Google::Protobuf::MessageExts::ClassMethods
336
+ end
337
+
338
+ # Description of audio data to be recognized.
339
+ # @!attribute [rw] interaction_type
340
+ # @return [Google::Cloud::Speech::V1::RecognitionMetadata::InteractionType]
341
+ # The use case most closely describing the audio content to be recognized.
342
+ # @!attribute [rw] industry_naics_code_of_audio
343
+ # @return [Integer]
344
+ # The industry vertical to which this speech recognition request most
345
+ # closely applies. This is most indicative of the topics contained
346
+ # in the audio. Use the 6-digit NAICS code to identify the industry
347
+ # vertical - see https://www.naics.com/search/.
348
+ # @!attribute [rw] microphone_distance
349
+ # @return [Google::Cloud::Speech::V1::RecognitionMetadata::MicrophoneDistance]
350
+ # The audio type that most closely describes the audio being recognized.
351
+ # @!attribute [rw] original_media_type
352
+ # @return [Google::Cloud::Speech::V1::RecognitionMetadata::OriginalMediaType]
353
+ # The original media the speech was recorded on.
354
+ # @!attribute [rw] recording_device_type
355
+ # @return [Google::Cloud::Speech::V1::RecognitionMetadata::RecordingDeviceType]
356
+ # The type of device the speech was recorded with.
357
+ # @!attribute [rw] recording_device_name
358
+ # @return [String]
359
+ # The device used to make the recording. Examples 'Nexus 5X' or
360
+ # 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or
361
+ # 'Cardioid Microphone'.
362
+ # @!attribute [rw] original_mime_type
363
+ # @return [String]
364
+ # Mime type of the original audio file. For example `audio/m4a`,
365
+ # `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`.
366
+ # A list of possible audio mime types is maintained at
367
+ # http://www.iana.org/assignments/media-types/media-types.xhtml#audio
368
+ # @!attribute [rw] audio_topic
369
+ # @return [String]
370
+ # Description of the content. Eg. "Recordings of federal supreme court
371
+ # hearings from 2012".
372
+ class RecognitionMetadata
373
+ include Google::Protobuf::MessageExts
374
+ extend Google::Protobuf::MessageExts::ClassMethods
375
+
376
+ # Use case categories that the audio recognition request can be described
377
+ # by.
378
+ module InteractionType
379
+ # Use case is either unknown or is something other than one of the other
380
+ # values below.
381
+ INTERACTION_TYPE_UNSPECIFIED = 0
382
+
383
+ # Multiple people in a conversation or discussion. For example in a
384
+ # meeting with two or more people actively participating. Typically
385
+ # all the primary people speaking would be in the same room (if not,
386
+ # see PHONE_CALL)
387
+ DISCUSSION = 1
388
+
389
+ # One or more persons lecturing or presenting to others, mostly
390
+ # uninterrupted.
391
+ PRESENTATION = 2
392
+
393
+ # A phone-call or video-conference in which two or more people, who are
394
+ # not in the same room, are actively participating.
395
+ PHONE_CALL = 3
396
+
397
+ # A recorded message intended for another person to listen to.
398
+ VOICEMAIL = 4
399
+
400
+ # Professionally produced audio (eg. TV Show, Podcast).
401
+ PROFESSIONALLY_PRODUCED = 5
402
+
403
+ # Transcribe spoken questions and queries into text.
404
+ VOICE_SEARCH = 6
405
+
406
+ # Transcribe voice commands, such as for controlling a device.
407
+ VOICE_COMMAND = 7
408
+
409
+ # Transcribe speech to text to create a written document, such as a
410
+ # text-message, email or report.
411
+ DICTATION = 8
412
+ end
413
+
414
+ # Enumerates the types of capture settings describing an audio file.
415
+ module MicrophoneDistance
416
+ # Audio type is not known.
417
+ MICROPHONE_DISTANCE_UNSPECIFIED = 0
418
+
419
+ # The audio was captured from a closely placed microphone. Eg. phone,
420
+ # dictaphone, or handheld microphone. Generally if there speaker is within
421
+ # 1 meter of the microphone.
422
+ NEARFIELD = 1
423
+
424
+ # The speaker if within 3 meters of the microphone.
425
+ MIDFIELD = 2
426
+
427
+ # The speaker is more than 3 meters away from the microphone.
428
+ FARFIELD = 3
429
+ end
430
+
431
+ # The original media the speech was recorded on.
432
+ module OriginalMediaType
433
+ # Unknown original media type.
434
+ ORIGINAL_MEDIA_TYPE_UNSPECIFIED = 0
435
+
436
+ # The speech data is an audio recording.
437
+ AUDIO = 1
438
+
439
+ # The speech data originally recorded on a video.
440
+ VIDEO = 2
441
+ end
442
+
443
+ # The type of device the speech was recorded with.
444
+ module RecordingDeviceType
445
+ # The recording device is unknown.
446
+ RECORDING_DEVICE_TYPE_UNSPECIFIED = 0
447
+
448
+ # Speech was recorded on a smartphone.
449
+ SMARTPHONE = 1
450
+
451
+ # Speech was recorded using a personal computer or tablet.
452
+ PC = 2
453
+
454
+ # Speech was recorded over a phone line.
455
+ PHONE_LINE = 3
456
+
457
+ # Speech was recorded in a vehicle.
458
+ VEHICLE = 4
459
+
460
+ # Speech was recorded outdoors.
461
+ OTHER_OUTDOOR_DEVICE = 5
462
+
463
+ # Speech was recorded indoors.
464
+ OTHER_INDOOR_DEVICE = 6
465
+ end
466
+ end
467
+
468
+ # Provides "hints" to the speech recognizer to favor specific words and phrases
469
+ # in the results.
470
+ # @!attribute [rw] phrases
471
+ # @return [Array<String>]
472
+ # A list of strings containing words and phrases "hints" so that
473
+ # the speech recognition is more likely to recognize them. This can be used
474
+ # to improve the accuracy for specific words and phrases, for example, if
475
+ # specific commands are typically spoken by the user. This can also be used
476
+ # to add additional words to the vocabulary of the recognizer. See
477
+ # [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
478
+ #
479
+ # List items can also be set to classes for groups of words that represent
480
+ # common concepts that occur in natural language. For example, rather than
481
+ # providing phrase hints for every month of the year, using the $MONTH class
482
+ # improves the likelihood of correctly transcribing audio that includes
483
+ # months.
484
+ class SpeechContext
485
+ include Google::Protobuf::MessageExts
486
+ extend Google::Protobuf::MessageExts::ClassMethods
487
+ end
488
+
489
+ # Contains audio data in the encoding specified in the `RecognitionConfig`.
490
+ # Either `content` or `uri` must be supplied. Supplying both or neither
491
+ # returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. See
492
+ # [content limits](https://cloud.google.com/speech-to-text/quotas#content).
493
+ # @!attribute [rw] content
494
+ # @return [String]
495
+ # The audio data bytes encoded as specified in
496
+ # `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
497
+ # pure binary representation, whereas JSON representations use base64.
498
+ # @!attribute [rw] uri
499
+ # @return [String]
500
+ # URI that points to a file that contains audio data bytes as specified in
501
+ # `RecognitionConfig`. The file must not be compressed (for example, gzip).
502
+ # Currently, only Google Cloud Storage URIs are
503
+ # supported, which must be specified in the following format:
504
+ # `gs://bucket_name/object_name` (other URI formats return
505
+ # [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
506
+ # [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
507
+ class RecognitionAudio
508
+ include Google::Protobuf::MessageExts
509
+ extend Google::Protobuf::MessageExts::ClassMethods
510
+ end
511
+
512
+ # The only message returned to the client by the `Recognize` method. It
513
+ # contains the result as zero or more sequential `SpeechRecognitionResult`
514
+ # messages.
515
+ # @!attribute [rw] results
516
+ # @return [Array<Google::Cloud::Speech::V1::SpeechRecognitionResult>]
517
+ # Sequential list of transcription results corresponding to
518
+ # sequential portions of audio.
519
+ class RecognizeResponse
520
+ include Google::Protobuf::MessageExts
521
+ extend Google::Protobuf::MessageExts::ClassMethods
522
+ end
523
+
524
+ # The only message returned to the client by the `LongRunningRecognize` method.
525
+ # It contains the result as zero or more sequential `SpeechRecognitionResult`
526
+ # messages. It is included in the `result.response` field of the `Operation`
527
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
528
+ # service.
529
+ # @!attribute [rw] results
530
+ # @return [Array<Google::Cloud::Speech::V1::SpeechRecognitionResult>]
531
+ # Sequential list of transcription results corresponding to
532
+ # sequential portions of audio.
533
+ class LongRunningRecognizeResponse
534
+ include Google::Protobuf::MessageExts
535
+ extend Google::Protobuf::MessageExts::ClassMethods
536
+ end
537
+
538
+ # Describes the progress of a long-running `LongRunningRecognize` call. It is
539
+ # included in the `metadata` field of the `Operation` returned by the
540
+ # `GetOperation` call of the `google::longrunning::Operations` service.
541
+ # @!attribute [rw] progress_percent
542
+ # @return [Integer]
543
+ # Approximate percentage of audio processed thus far. Guaranteed to be 100
544
+ # when the audio is fully processed and the results are available.
545
+ # @!attribute [rw] start_time
546
+ # @return [Google::Protobuf::Timestamp]
547
+ # Time when the request was received.
548
+ # @!attribute [rw] last_update_time
549
+ # @return [Google::Protobuf::Timestamp]
550
+ # Time of the most recent processing update.
551
+ class LongRunningRecognizeMetadata
552
+ include Google::Protobuf::MessageExts
553
+ extend Google::Protobuf::MessageExts::ClassMethods
554
+ end
555
+
556
+ # `StreamingRecognizeResponse` is the only message returned to the client by
557
+ # `StreamingRecognize`. A series of zero or more `StreamingRecognizeResponse`
558
+ # messages are streamed back to the client. If there is no recognizable
559
+ # audio, and `single_utterance` is set to false, then no messages are streamed
560
+ # back to the client.
561
+ #
562
+ # Here's an example of a series of ten `StreamingRecognizeResponse`s that might
563
+ # be returned while processing audio:
564
+ #
565
+ # 1. results { alternatives { transcript: "tube" } stability: 0.01 }
566
+ #
567
+ # 2. results { alternatives { transcript: "to be a" } stability: 0.01 }
568
+ #
569
+ # 3. results { alternatives { transcript: "to be" } stability: 0.9 }
570
+ # results { alternatives { transcript: " or not to be" } stability: 0.01 }
571
+ #
572
+ # 4. results { alternatives { transcript: "to be or not to be"
573
+ # confidence: 0.92 }
574
+ # alternatives { transcript: "to bee or not to bee" }
575
+ # is_final: true }
576
+ #
577
+ # 5. results { alternatives { transcript: " that's" } stability: 0.01 }
578
+ #
579
+ # 6. results { alternatives { transcript: " that is" } stability: 0.9 }
580
+ # results { alternatives { transcript: " the question" } stability: 0.01 }
581
+ #
582
+ # 7. results { alternatives { transcript: " that is the question"
583
+ # confidence: 0.98 }
584
+ # alternatives { transcript: " that was the question" }
585
+ # is_final: true }
586
+ #
587
+ # Notes:
588
+ #
589
+ # - Only two of the above responses #4 and #7 contain final results; they are
590
+ # indicated by `is_final: true`. Concatenating these together generates the
591
+ # full transcript: "to be or not to be that is the question".
592
+ #
593
+ # - The others contain interim `results`. #3 and #6 contain two interim
594
+ # `results`: the first portion has a high stability and is less likely to
595
+ # change; the second portion has a low stability and is very likely to
596
+ # change. A UI designer might choose to show only high stability `results`.
597
+ #
598
+ # - The specific `stability` and `confidence` values shown above are only for
599
+ # illustrative purposes. Actual values may vary.
600
+ #
601
+ # - In each response, only one of these fields will be set:
602
+ # `error`,
603
+ # `speech_event_type`, or
604
+ # one or more (repeated) `results`.
605
+ # @!attribute [rw] error
606
+ # @return [Google::Rpc::Status]
607
+ # If set, returns a {Google::Rpc::Status google.rpc.Status} message that
608
+ # specifies the error for the operation.
609
+ # @!attribute [rw] results
610
+ # @return [Array<Google::Cloud::Speech::V1::StreamingRecognitionResult>]
611
+ # This repeated list contains zero or more results that
612
+ # correspond to consecutive portions of the audio currently being processed.
613
+ # It contains zero or one `is_final=true` result (the newly settled portion),
614
+ # followed by zero or more `is_final=false` results (the interim results).
615
+ # @!attribute [rw] speech_event_type
616
+ # @return [Google::Cloud::Speech::V1::StreamingRecognizeResponse::SpeechEventType]
617
+ # Indicates the type of speech event.
618
+ class StreamingRecognizeResponse
619
+ include Google::Protobuf::MessageExts
620
+ extend Google::Protobuf::MessageExts::ClassMethods
621
+
622
+ # Indicates the type of speech event.
623
+ module SpeechEventType
624
+ # No speech event specified.
625
+ SPEECH_EVENT_UNSPECIFIED = 0
626
+
627
+ # This event indicates that the server has detected the end of the user's
628
+ # speech utterance and expects no additional speech. Therefore, the server
629
+ # will not process additional audio (although it may subsequently return
630
+ # additional results). The client should stop sending additional audio
631
+ # data, half-close the gRPC connection, and wait for any additional results
632
+ # until the server closes the gRPC connection. This event is only sent if
633
+ # `single_utterance` was set to `true`, and is not used otherwise.
634
+ END_OF_SINGLE_UTTERANCE = 1
635
+ end
636
+ end
637
+
638
+ # A streaming speech recognition result corresponding to a portion of the audio
639
+ # that is currently being processed.
640
+ # @!attribute [rw] alternatives
641
+ # @return [Array<Google::Cloud::Speech::V1::SpeechRecognitionAlternative>]
642
+ # May contain one or more recognition hypotheses (up to the
643
+ # maximum specified in `max_alternatives`).
644
+ # These alternatives are ordered in terms of accuracy, with the top (first)
645
+ # alternative being the most probable, as ranked by the recognizer.
646
+ # @!attribute [rw] is_final
647
+ # @return [Boolean]
648
+ # If `false`, this `StreamingRecognitionResult` represents an
649
+ # interim result that may change. If `true`, this is the final time the
650
+ # speech service will return this particular `StreamingRecognitionResult`,
651
+ # the recognizer will not return any further hypotheses for this portion of
652
+ # the transcript and corresponding audio.
653
+ # @!attribute [rw] stability
654
+ # @return [Float]
655
+ # An estimate of the likelihood that the recognizer will not
656
+ # change its guess about this interim result. Values range from 0.0
657
+ # (completely unstable) to 1.0 (completely stable).
658
+ # This field is only provided for interim results (`is_final=false`).
659
+ # The default of 0.0 is a sentinel value indicating `stability` was not set.
660
+ # @!attribute [rw] result_end_time
661
+ # @return [Google::Protobuf::Duration]
662
+ # Time offset of the end of this result relative to the
663
+ # beginning of the audio.
664
+ # @!attribute [rw] channel_tag
665
+ # @return [Integer]
666
+ # For multi-channel audio, this is the channel number corresponding to the
667
+ # recognized result for the audio from that channel.
668
+ # For audio_channel_count = N, its output values can range from '1' to 'N'.
669
+ # @!attribute [r] language_code
670
+ # @return [String]
671
+ # The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of
672
+ # the language in this result. This language code was detected to have the
673
+ # most likelihood of being spoken in the audio.
674
+ class StreamingRecognitionResult
675
+ include Google::Protobuf::MessageExts
676
+ extend Google::Protobuf::MessageExts::ClassMethods
677
+ end
678
+
679
+ # A speech recognition result corresponding to a portion of the audio.
680
+ # @!attribute [rw] alternatives
681
+ # @return [Array<Google::Cloud::Speech::V1::SpeechRecognitionAlternative>]
682
+ # May contain one or more recognition hypotheses (up to the
683
+ # maximum specified in `max_alternatives`).
684
+ # These alternatives are ordered in terms of accuracy, with the top (first)
685
+ # alternative being the most probable, as ranked by the recognizer.
686
+ # @!attribute [rw] channel_tag
687
+ # @return [Integer]
688
+ # For multi-channel audio, this is the channel number corresponding to the
689
+ # recognized result for the audio from that channel.
690
+ # For audio_channel_count = N, its output values can range from '1' to 'N'.
691
+ class SpeechRecognitionResult
692
+ include Google::Protobuf::MessageExts
693
+ extend Google::Protobuf::MessageExts::ClassMethods
694
+ end
695
+
696
+ # Alternative hypotheses (a.k.a. n-best list).
697
+ # @!attribute [rw] transcript
698
+ # @return [String]
699
+ # Transcript text representing the words that the user spoke.
700
+ # @!attribute [rw] confidence
701
+ # @return [Float]
702
+ # The confidence estimate between 0.0 and 1.0. A higher number
703
+ # indicates an estimated greater likelihood that the recognized words are
704
+ # correct. This field is set only for the top alternative of a non-streaming
705
+ # result or, of a streaming result where `is_final=true`.
706
+ # This field is not guaranteed to be accurate and users should not rely on it
707
+ # to be always provided.
708
+ # The default of 0.0 is a sentinel value indicating `confidence` was not set.
709
+ # @!attribute [rw] words
710
+ # @return [Array<Google::Cloud::Speech::V1::WordInfo>]
711
+ # A list of word-specific information for each recognized word.
712
+ # Note: When `enable_speaker_diarization` is true, you will see all the words
713
+ # from the beginning of the audio.
714
+ class SpeechRecognitionAlternative
715
+ include Google::Protobuf::MessageExts
716
+ extend Google::Protobuf::MessageExts::ClassMethods
717
+ end
718
+
719
+ # Word-specific information for recognized words.
720
+ # @!attribute [rw] start_time
721
+ # @return [Google::Protobuf::Duration]
722
+ # Time offset relative to the beginning of the audio,
723
+ # and corresponding to the start of the spoken word.
724
+ # This field is only set if `enable_word_time_offsets=true` and only
725
+ # in the top hypothesis.
726
+ # This is an experimental feature and the accuracy of the time offset can
727
+ # vary.
728
+ # @!attribute [rw] end_time
729
+ # @return [Google::Protobuf::Duration]
730
+ # Time offset relative to the beginning of the audio,
731
+ # and corresponding to the end of the spoken word.
732
+ # This field is only set if `enable_word_time_offsets=true` and only
733
+ # in the top hypothesis.
734
+ # This is an experimental feature and the accuracy of the time offset can
735
+ # vary.
736
+ # @!attribute [rw] word
737
+ # @return [String]
738
+ # The word corresponding to this set of information.
739
+ # @!attribute [r] speaker_tag
740
+ # @return [Integer]
741
+ # A distinct integer value is assigned for every speaker within
742
+ # the audio. This field specifies which one of those speakers was detected to
743
+ # have spoken this word. Value ranges from '1' to diarization_speaker_count.
744
+ # speaker_tag is set if enable_speaker_diarization = 'true' and only in the
745
+ # top alternative.
746
+ class WordInfo
747
+ include Google::Protobuf::MessageExts
748
+ extend Google::Protobuf::MessageExts::ClassMethods
749
+ end
750
+ end
751
+ end
752
+ end
753
+ end