google-cloud-speech 0.32.0 → 1.2.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.yardopts +4 -1
- data/AUTHENTICATION.md +169 -0
- data/LICENSE.md +201 -0
- data/MIGRATING.md +367 -0
- data/README.md +97 -52
- data/lib/google/cloud/speech/version.rb +22 -0
- data/lib/google/cloud/speech.rb +88 -139
- data/lib/google-cloud-speech.rb +19 -0
- metadata +111 -49
- data/LICENSE +0 -201
- data/lib/google/cloud/speech/v1/cloud_speech_pb.rb +0 -129
- data/lib/google/cloud/speech/v1/cloud_speech_services_pb.rb +0 -56
- data/lib/google/cloud/speech/v1/credentials.rb +0 -41
- data/lib/google/cloud/speech/v1/doc/google/cloud/speech/v1/cloud_speech.rb +0 -482
- data/lib/google/cloud/speech/v1/doc/google/longrunning/operations.rb +0 -93
- data/lib/google/cloud/speech/v1/doc/google/protobuf/any.rb +0 -130
- data/lib/google/cloud/speech/v1/doc/google/protobuf/duration.rb +0 -91
- data/lib/google/cloud/speech/v1/doc/google/rpc/status.rb +0 -84
- data/lib/google/cloud/speech/v1/helpers.rb +0 -136
- data/lib/google/cloud/speech/v1/speech_client.rb +0 -331
- data/lib/google/cloud/speech/v1/speech_client_config.json +0 -41
- data/lib/google/cloud/speech/v1/stream.rb +0 -614
- data/lib/google/cloud/speech/v1.rb +0 -157
- data/lib/google/cloud/speech/v1p1beta1/cloud_speech_pb.rb +0 -190
- data/lib/google/cloud/speech/v1p1beta1/cloud_speech_services_pb.rb +0 -56
- data/lib/google/cloud/speech/v1p1beta1/credentials.rb +0 -41
- data/lib/google/cloud/speech/v1p1beta1/doc/google/cloud/speech/v1p1beta1/cloud_speech.rb +0 -719
- data/lib/google/cloud/speech/v1p1beta1/doc/google/longrunning/operations.rb +0 -93
- data/lib/google/cloud/speech/v1p1beta1/doc/google/protobuf/any.rb +0 -130
- data/lib/google/cloud/speech/v1p1beta1/doc/google/protobuf/duration.rb +0 -91
- data/lib/google/cloud/speech/v1p1beta1/doc/google/rpc/status.rb +0 -84
- data/lib/google/cloud/speech/v1p1beta1/helpers.rb +0 -136
- data/lib/google/cloud/speech/v1p1beta1/speech_client.rb +0 -331
- data/lib/google/cloud/speech/v1p1beta1/speech_client_config.json +0 -41
- data/lib/google/cloud/speech/v1p1beta1/stream.rb +0 -614
- data/lib/google/cloud/speech/v1p1beta1.rb +0 -157
@@ -1,719 +0,0 @@
|
|
1
|
-
# Copyright 2018 Google LLC
|
2
|
-
#
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
-
# you may not use this file except in compliance with the License.
|
5
|
-
# You may obtain a copy of the License at
|
6
|
-
#
|
7
|
-
# https://www.apache.org/licenses/LICENSE-2.0
|
8
|
-
#
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
-
# See the License for the specific language governing permissions and
|
13
|
-
# limitations under the License.
|
14
|
-
|
15
|
-
|
16
|
-
module Google
|
17
|
-
module Cloud
|
18
|
-
module Speech
|
19
|
-
module V1p1beta1
|
20
|
-
# The top-level message sent by the client for the `Recognize` method.
|
21
|
-
# @!attribute [rw] config
|
22
|
-
# @return [Google::Cloud::Speech::V1p1beta1::RecognitionConfig]
|
23
|
-
# *Required* Provides information to the recognizer that specifies how to
|
24
|
-
# process the request.
|
25
|
-
# @!attribute [rw] audio
|
26
|
-
# @return [Google::Cloud::Speech::V1p1beta1::RecognitionAudio]
|
27
|
-
# *Required* The audio data to be recognized.
|
28
|
-
class RecognizeRequest; end
|
29
|
-
|
30
|
-
# The top-level message sent by the client for the `LongRunningRecognize`
|
31
|
-
# method.
|
32
|
-
# @!attribute [rw] config
|
33
|
-
# @return [Google::Cloud::Speech::V1p1beta1::RecognitionConfig]
|
34
|
-
# *Required* Provides information to the recognizer that specifies how to
|
35
|
-
# process the request.
|
36
|
-
# @!attribute [rw] audio
|
37
|
-
# @return [Google::Cloud::Speech::V1p1beta1::RecognitionAudio]
|
38
|
-
# *Required* The audio data to be recognized.
|
39
|
-
class LongRunningRecognizeRequest; end
|
40
|
-
|
41
|
-
# The top-level message sent by the client for the `StreamingRecognize` method.
|
42
|
-
# Multiple `StreamingRecognizeRequest` messages are sent. The first message
|
43
|
-
# must contain a `streaming_config` message and must not contain `audio` data.
|
44
|
-
# All subsequent messages must contain `audio` data and must not contain a
|
45
|
-
# `streaming_config` message.
|
46
|
-
# @!attribute [rw] streaming_config
|
47
|
-
# @return [Google::Cloud::Speech::V1p1beta1::StreamingRecognitionConfig]
|
48
|
-
# Provides information to the recognizer that specifies how to process the
|
49
|
-
# request. The first `StreamingRecognizeRequest` message must contain a
|
50
|
-
# `streaming_config` message.
|
51
|
-
# @!attribute [rw] audio_content
|
52
|
-
# @return [String]
|
53
|
-
# The audio data to be recognized. Sequential chunks of audio data are sent
|
54
|
-
# in sequential `StreamingRecognizeRequest` messages. The first
|
55
|
-
# `StreamingRecognizeRequest` message must not contain `audio_content` data
|
56
|
-
# and all subsequent `StreamingRecognizeRequest` messages must contain
|
57
|
-
# `audio_content` data. The audio bytes must be encoded as specified in
|
58
|
-
# `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a
|
59
|
-
# pure binary representation (not base64). See
|
60
|
-
# [content limits](https://cloud.google.com/speech-to-text/quotas#content).
|
61
|
-
class StreamingRecognizeRequest; end
|
62
|
-
|
63
|
-
# Provides information to the recognizer that specifies how to process the
|
64
|
-
# request.
|
65
|
-
# @!attribute [rw] config
|
66
|
-
# @return [Google::Cloud::Speech::V1p1beta1::RecognitionConfig]
|
67
|
-
# *Required* Provides information to the recognizer that specifies how to
|
68
|
-
# process the request.
|
69
|
-
# @!attribute [rw] single_utterance
|
70
|
-
# @return [true, false]
|
71
|
-
# *Optional* If `false` or omitted, the recognizer will perform continuous
|
72
|
-
# recognition (continuing to wait for and process audio even if the user
|
73
|
-
# pauses speaking) until the client closes the input stream (gRPC API) or
|
74
|
-
# until the maximum time limit has been reached. May return multiple
|
75
|
-
# `StreamingRecognitionResult`s with the `is_final` flag set to `true`.
|
76
|
-
#
|
77
|
-
# If `true`, the recognizer will detect a single spoken utterance. When it
|
78
|
-
# detects that the user has paused or stopped speaking, it will return an
|
79
|
-
# `END_OF_SINGLE_UTTERANCE` event and cease recognition. It will return no
|
80
|
-
# more than one `StreamingRecognitionResult` with the `is_final` flag set to
|
81
|
-
# `true`.
|
82
|
-
# @!attribute [rw] interim_results
|
83
|
-
# @return [true, false]
|
84
|
-
# *Optional* If `true`, interim results (tentative hypotheses) may be
|
85
|
-
# returned as they become available (these interim results are indicated with
|
86
|
-
# the `is_final=false` flag).
|
87
|
-
# If `false` or omitted, only `is_final=true` result(s) are returned.
|
88
|
-
class StreamingRecognitionConfig; end
|
89
|
-
|
90
|
-
# Provides information to the recognizer that specifies how to process the
|
91
|
-
# request.
|
92
|
-
# @!attribute [rw] encoding
|
93
|
-
# @return [Google::Cloud::Speech::V1p1beta1::RecognitionConfig::AudioEncoding]
|
94
|
-
# Encoding of audio data sent in all `RecognitionAudio` messages.
|
95
|
-
# This field is optional for `FLAC` and `WAV` audio files and required
|
96
|
-
# for all other audio formats. For details, see {Google::Cloud::Speech::V1p1beta1::RecognitionConfig::AudioEncoding AudioEncoding}.
|
97
|
-
# @!attribute [rw] sample_rate_hertz
|
98
|
-
# @return [Integer]
|
99
|
-
# Sample rate in Hertz of the audio data sent in all
|
100
|
-
# `RecognitionAudio` messages. Valid values are: 8000-48000.
|
101
|
-
# 16000 is optimal. For best results, set the sampling rate of the audio
|
102
|
-
# source to 16000 Hz. If that's not possible, use the native sample rate of
|
103
|
-
# the audio source (instead of re-sampling).
|
104
|
-
# This field is optional for `FLAC` and `WAV` audio files and required
|
105
|
-
# for all other audio formats. For details, see {Google::Cloud::Speech::V1p1beta1::RecognitionConfig::AudioEncoding AudioEncoding}.
|
106
|
-
# @!attribute [rw] audio_channel_count
|
107
|
-
# @return [Integer]
|
108
|
-
# *Optional* The number of channels in the input audio data.
|
109
|
-
# ONLY set this for MULTI-CHANNEL recognition.
|
110
|
-
# Valid values for LINEAR16 and FLAC are `1`-`8`.
|
111
|
-
# Valid values for OGG_OPUS are '1'-'254'.
|
112
|
-
# Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
|
113
|
-
# If `0` or omitted, defaults to one channel (mono).
|
114
|
-
# Note: We only recognize the first channel by default.
|
115
|
-
# To perform independent recognition on each channel set
|
116
|
-
# `enable_separate_recognition_per_channel` to 'true'.
|
117
|
-
# @!attribute [rw] enable_separate_recognition_per_channel
|
118
|
-
# @return [true, false]
|
119
|
-
# This needs to be set to ‘true’ explicitly and `audio_channel_count` > 1
|
120
|
-
# to get each channel recognized separately. The recognition result will
|
121
|
-
# contain a `channel_tag` field to state which channel that result belongs
|
122
|
-
# to. If this is not true, we will only recognize the first channel. The
|
123
|
-
# request is billed cumulatively for all channels recognized:
|
124
|
-
# `audio_channel_count` multiplied by the length of the audio.
|
125
|
-
# @!attribute [rw] language_code
|
126
|
-
# @return [String]
|
127
|
-
# *Required* The language of the supplied audio as a
|
128
|
-
# [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
|
129
|
-
# Example: "en-US".
|
130
|
-
# See [Language Support](https://cloud.google.com/speech-to-text/docs/languages)
|
131
|
-
# for a list of the currently supported language codes.
|
132
|
-
# @!attribute [rw] alternative_language_codes
|
133
|
-
# @return [Array<String>]
|
134
|
-
# *Optional* A list of up to 3 additional
|
135
|
-
# [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
|
136
|
-
# listing possible alternative languages of the supplied audio.
|
137
|
-
# See [Language Support](https://cloud.google.com/speech-to-text/docs/languages)
|
138
|
-
# for a list of the currently supported language codes.
|
139
|
-
# If alternative languages are listed, recognition result will contain
|
140
|
-
# recognition in the most likely language detected including the main
|
141
|
-
# language_code. The recognition result will include the language tag
|
142
|
-
# of the language detected in the audio.
|
143
|
-
# Note: This feature is only supported for Voice Command and Voice Search
|
144
|
-
# use cases and performance may vary for other use cases (e.g., phone call
|
145
|
-
# transcription).
|
146
|
-
# @!attribute [rw] max_alternatives
|
147
|
-
# @return [Integer]
|
148
|
-
# *Optional* Maximum number of recognition hypotheses to be returned.
|
149
|
-
# Specifically, the maximum number of `SpeechRecognitionAlternative` messages
|
150
|
-
# within each `SpeechRecognitionResult`.
|
151
|
-
# The server may return fewer than `max_alternatives`.
|
152
|
-
# Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
|
153
|
-
# one. If omitted, will return a maximum of one.
|
154
|
-
# @!attribute [rw] profanity_filter
|
155
|
-
# @return [true, false]
|
156
|
-
# *Optional* If set to `true`, the server will attempt to filter out
|
157
|
-
# profanities, replacing all but the initial character in each filtered word
|
158
|
-
# with asterisks, e.g. "f***". If set to `false` or omitted, profanities
|
159
|
-
# won't be filtered out.
|
160
|
-
# @!attribute [rw] speech_contexts
|
161
|
-
# @return [Array<Google::Cloud::Speech::V1p1beta1::SpeechContext>]
|
162
|
-
# *Optional* array of {Google::Cloud::Speech::V1p1beta1::SpeechContext SpeechContext}.
|
163
|
-
# A means to provide context to assist the speech recognition. For more
|
164
|
-
# information, see [Phrase Hints](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints).
|
165
|
-
# @!attribute [rw] enable_word_time_offsets
|
166
|
-
# @return [true, false]
|
167
|
-
# *Optional* If `true`, the top result includes a list of words and
|
168
|
-
# the start and end time offsets (timestamps) for those words. If
|
169
|
-
# `false`, no word-level time offset information is returned. The default is
|
170
|
-
# `false`.
|
171
|
-
# @!attribute [rw] enable_word_confidence
|
172
|
-
# @return [true, false]
|
173
|
-
# *Optional* If `true`, the top result includes a list of words and the
|
174
|
-
# confidence for those words. If `false`, no word-level confidence
|
175
|
-
# information is returned. The default is `false`.
|
176
|
-
# @!attribute [rw] enable_automatic_punctuation
|
177
|
-
# @return [true, false]
|
178
|
-
# *Optional* If 'true', adds punctuation to recognition result hypotheses.
|
179
|
-
# This feature is only available in select languages. Setting this for
|
180
|
-
# requests in other languages has no effect at all.
|
181
|
-
# The default 'false' value does not add punctuation to result hypotheses.
|
182
|
-
# Note: This is currently offered as an experimental service, complimentary
|
183
|
-
# to all users. In the future this may be exclusively available as a
|
184
|
-
# premium feature.
|
185
|
-
# @!attribute [rw] enable_speaker_diarization
|
186
|
-
# @return [true, false]
|
187
|
-
# *Optional* If 'true', enables speaker detection for each recognized word in
|
188
|
-
# the top alternative of the recognition result using a speaker_tag provided
|
189
|
-
# in the WordInfo.
|
190
|
-
# Note: When this is true, we send all the words from the beginning of the
|
191
|
-
# audio for the top alternative in every consecutive STREAMING responses.
|
192
|
-
# This is done in order to improve our speaker tags as our models learn to
|
193
|
-
# identify the speakers in the conversation over time.
|
194
|
-
# For non-streaming requests, the diarization results will be provided only
|
195
|
-
# in the top alternative of the FINAL SpeechRecognitionResult.
|
196
|
-
# @!attribute [rw] diarization_speaker_count
|
197
|
-
# @return [Integer]
|
198
|
-
# *Optional*
|
199
|
-
# If set, specifies the estimated number of speakers in the conversation.
|
200
|
-
# If not set, defaults to '2'.
|
201
|
-
# Ignored unless enable_speaker_diarization is set to true."
|
202
|
-
# @!attribute [rw] metadata
|
203
|
-
# @return [Google::Cloud::Speech::V1p1beta1::RecognitionMetadata]
|
204
|
-
# *Optional* Metadata regarding this request.
|
205
|
-
# @!attribute [rw] model
|
206
|
-
# @return [String]
|
207
|
-
# *Optional* Which model to select for the given request. Select the model
|
208
|
-
# best suited to your domain to get best results. If a model is not
|
209
|
-
# explicitly specified, then we auto-select a model based on the parameters
|
210
|
-
# in the RecognitionConfig.
|
211
|
-
# <table>
|
212
|
-
# <tr>
|
213
|
-
# <td><b>Model</b></td>
|
214
|
-
# <td><b>Description</b></td>
|
215
|
-
# </tr>
|
216
|
-
# <tr>
|
217
|
-
# <td><code>command_and_search</code></td>
|
218
|
-
# <td>Best for short queries such as voice commands or voice search.</td>
|
219
|
-
# </tr>
|
220
|
-
# <tr>
|
221
|
-
# <td><code>phone_call</code></td>
|
222
|
-
# <td>Best for audio that originated from a phone call (typically
|
223
|
-
# recorded at an 8khz sampling rate).</td>
|
224
|
-
# </tr>
|
225
|
-
# <tr>
|
226
|
-
# <td><code>video</code></td>
|
227
|
-
# <td>Best for audio that originated from from video or includes multiple
|
228
|
-
# speakers. Ideally the audio is recorded at a 16khz or greater
|
229
|
-
# sampling rate. This is a premium model that costs more than the
|
230
|
-
# standard rate.</td>
|
231
|
-
# </tr>
|
232
|
-
# <tr>
|
233
|
-
# <td><code>default</code></td>
|
234
|
-
# <td>Best for audio that is not one of the specific audio models.
|
235
|
-
# For example, long-form audio. Ideally the audio is high-fidelity,
|
236
|
-
# recorded at a 16khz or greater sampling rate.</td>
|
237
|
-
# </tr>
|
238
|
-
# </table>
|
239
|
-
# @!attribute [rw] use_enhanced
|
240
|
-
# @return [true, false]
|
241
|
-
# *Optional* Set to true to use an enhanced model for speech recognition.
|
242
|
-
# If `use_enhanced` is set to true and the `model` field is not set, then
|
243
|
-
# an appropriate enhanced model is chosen if:
|
244
|
-
# 1. project is eligible for requesting enhanced models
|
245
|
-
# 2. an enhanced model exists for the audio
|
246
|
-
#
|
247
|
-
# If `use_enhanced` is true and an enhanced version of the specified model
|
248
|
-
# does not exist, then the speech is recognized using the standard version
|
249
|
-
# of the specified model.
|
250
|
-
#
|
251
|
-
# Enhanced speech models require that you opt-in to data logging using
|
252
|
-
# instructions in the
|
253
|
-
# [documentation](https://cloud.google.com/speech-to-text/docs/enable-data-logging). If you set
|
254
|
-
# `use_enhanced` to true and you have not enabled audio logging, then you
|
255
|
-
# will receive an error.
|
256
|
-
class RecognitionConfig
|
257
|
-
# The encoding of the audio data sent in the request.
|
258
|
-
#
|
259
|
-
# All encodings support only 1 channel (mono) audio.
|
260
|
-
#
|
261
|
-
# For best results, the audio source should be captured and transmitted using
|
262
|
-
# a lossless encoding (`FLAC` or `LINEAR16`). The accuracy of the speech
|
263
|
-
# recognition can be reduced if lossy codecs are used to capture or transmit
|
264
|
-
# audio, particularly if background noise is present. Lossy codecs include
|
265
|
-
# `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, and `SPEEX_WITH_HEADER_BYTE`.
|
266
|
-
#
|
267
|
-
# The `FLAC` and `WAV` audio file formats include a header that describes the
|
268
|
-
# included audio content. You can request recognition for `WAV` files that
|
269
|
-
# contain either `LINEAR16` or `MULAW` encoded audio.
|
270
|
-
# If you send `FLAC` or `WAV` audio file format in
|
271
|
-
# your request, you do not need to specify an `AudioEncoding`; the audio
|
272
|
-
# encoding format is determined from the file header. If you specify
|
273
|
-
# an `AudioEncoding` when you send send `FLAC` or `WAV` audio, the
|
274
|
-
# encoding configuration must match the encoding described in the audio
|
275
|
-
# header; otherwise the request returns an
|
276
|
-
# {Google::Rpc::Code::INVALID_ARGUMENT} error code.
|
277
|
-
module AudioEncoding
|
278
|
-
# Not specified.
|
279
|
-
ENCODING_UNSPECIFIED = 0
|
280
|
-
|
281
|
-
# Uncompressed 16-bit signed little-endian samples (Linear PCM).
|
282
|
-
LINEAR16 = 1
|
283
|
-
|
284
|
-
# `FLAC` (Free Lossless Audio
|
285
|
-
# Codec) is the recommended encoding because it is
|
286
|
-
# lossless--therefore recognition is not compromised--and
|
287
|
-
# requires only about half the bandwidth of `LINEAR16`. `FLAC` stream
|
288
|
-
# encoding supports 16-bit and 24-bit samples, however, not all fields in
|
289
|
-
# `STREAMINFO` are supported.
|
290
|
-
FLAC = 2
|
291
|
-
|
292
|
-
# 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
|
293
|
-
MULAW = 3
|
294
|
-
|
295
|
-
# Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
|
296
|
-
AMR = 4
|
297
|
-
|
298
|
-
# Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
|
299
|
-
AMR_WB = 5
|
300
|
-
|
301
|
-
# Opus encoded audio frames in Ogg container
|
302
|
-
# ([OggOpus](https://wiki.xiph.org/OggOpus)).
|
303
|
-
# `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000.
|
304
|
-
OGG_OPUS = 6
|
305
|
-
|
306
|
-
# Although the use of lossy encodings is not recommended, if a very low
|
307
|
-
# bitrate encoding is required, `OGG_OPUS` is highly preferred over
|
308
|
-
# Speex encoding. The [Speex](https://speex.org/) encoding supported by
|
309
|
-
# Cloud Speech API has a header byte in each block, as in MIME type
|
310
|
-
# `audio/x-speex-with-header-byte`.
|
311
|
-
# It is a variant of the RTP Speex encoding defined in
|
312
|
-
# [RFC 5574](https://tools.ietf.org/html/rfc5574).
|
313
|
-
# The stream is a sequence of blocks, one block per RTP packet. Each block
|
314
|
-
# starts with a byte containing the length of the block, in bytes, followed
|
315
|
-
# by one or more frames of Speex data, padded to an integral number of
|
316
|
-
# bytes (octets) as specified in RFC 5574. In other words, each RTP header
|
317
|
-
# is replaced with a single byte containing the block length. Only Speex
|
318
|
-
# wideband is supported. `sample_rate_hertz` must be 16000.
|
319
|
-
SPEEX_WITH_HEADER_BYTE = 7
|
320
|
-
end
|
321
|
-
end
|
322
|
-
|
323
|
-
# Description of audio data to be recognized.
|
324
|
-
# @!attribute [rw] interaction_type
|
325
|
-
# @return [Google::Cloud::Speech::V1p1beta1::RecognitionMetadata::InteractionType]
|
326
|
-
# The use case most closely describing the audio content to be recognized.
|
327
|
-
# @!attribute [rw] industry_naics_code_of_audio
|
328
|
-
# @return [Integer]
|
329
|
-
# The industry vertical to which this speech recognition request most
|
330
|
-
# closely applies. This is most indicative of the topics contained
|
331
|
-
# in the audio. Use the 6-digit NAICS code to identify the industry
|
332
|
-
# vertical - see https://www.naics.com/search/.
|
333
|
-
# @!attribute [rw] microphone_distance
|
334
|
-
# @return [Google::Cloud::Speech::V1p1beta1::RecognitionMetadata::MicrophoneDistance]
|
335
|
-
# The audio type that most closely describes the audio being recognized.
|
336
|
-
# @!attribute [rw] original_media_type
|
337
|
-
# @return [Google::Cloud::Speech::V1p1beta1::RecognitionMetadata::OriginalMediaType]
|
338
|
-
# The original media the speech was recorded on.
|
339
|
-
# @!attribute [rw] recording_device_type
|
340
|
-
# @return [Google::Cloud::Speech::V1p1beta1::RecognitionMetadata::RecordingDeviceType]
|
341
|
-
# The type of device the speech was recorded with.
|
342
|
-
# @!attribute [rw] recording_device_name
|
343
|
-
# @return [String]
|
344
|
-
# The device used to make the recording. Examples 'Nexus 5X' or
|
345
|
-
# 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or
|
346
|
-
# 'Cardioid Microphone'.
|
347
|
-
# @!attribute [rw] original_mime_type
|
348
|
-
# @return [String]
|
349
|
-
# Mime type of the original audio file. For example `audio/m4a`,
|
350
|
-
# `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`.
|
351
|
-
# A list of possible audio mime types is maintained at
|
352
|
-
# http://www.iana.org/assignments/media-types/media-types.xhtml#audio
|
353
|
-
# @!attribute [rw] obfuscated_id
|
354
|
-
# @return [Integer]
|
355
|
-
# Obfuscated (privacy-protected) ID of the user, to identify number of
|
356
|
-
# unique users using the service.
|
357
|
-
# @!attribute [rw] audio_topic
|
358
|
-
# @return [String]
|
359
|
-
# Description of the content. Eg. "Recordings of federal supreme court
|
360
|
-
# hearings from 2012".
|
361
|
-
class RecognitionMetadata
|
362
|
-
# Use case categories that the audio recognition request can be described
|
363
|
-
# by.
|
364
|
-
module InteractionType
|
365
|
-
# Use case is either unknown or is something other than one of the other
|
366
|
-
# values below.
|
367
|
-
INTERACTION_TYPE_UNSPECIFIED = 0
|
368
|
-
|
369
|
-
# Multiple people in a conversation or discussion. For example in a
|
370
|
-
# meeting with two or more people actively participating. Typically
|
371
|
-
# all the primary people speaking would be in the same room (if not,
|
372
|
-
# see PHONE_CALL)
|
373
|
-
DISCUSSION = 1
|
374
|
-
|
375
|
-
# One or more persons lecturing or presenting to others, mostly
|
376
|
-
# uninterrupted.
|
377
|
-
PRESENTATION = 2
|
378
|
-
|
379
|
-
# A phone-call or video-conference in which two or more people, who are
|
380
|
-
# not in the same room, are actively participating.
|
381
|
-
PHONE_CALL = 3
|
382
|
-
|
383
|
-
# A recorded message intended for another person to listen to.
|
384
|
-
VOICEMAIL = 4
|
385
|
-
|
386
|
-
# Professionally produced audio (eg. TV Show, Podcast).
|
387
|
-
PROFESSIONALLY_PRODUCED = 5
|
388
|
-
|
389
|
-
# Transcribe spoken questions and queries into text.
|
390
|
-
VOICE_SEARCH = 6
|
391
|
-
|
392
|
-
# Transcribe voice commands, such as for controlling a device.
|
393
|
-
VOICE_COMMAND = 7
|
394
|
-
|
395
|
-
# Transcribe speech to text to create a written document, such as a
|
396
|
-
# text-message, email or report.
|
397
|
-
DICTATION = 8
|
398
|
-
end
|
399
|
-
|
400
|
-
# Enumerates the types of capture settings describing an audio file.
|
401
|
-
module MicrophoneDistance
|
402
|
-
# Audio type is not known.
|
403
|
-
MICROPHONE_DISTANCE_UNSPECIFIED = 0
|
404
|
-
|
405
|
-
# The audio was captured from a closely placed microphone. Eg. phone,
|
406
|
-
# dictaphone, or handheld microphone. Generally if there speaker is within
|
407
|
-
# 1 meter of the microphone.
|
408
|
-
NEARFIELD = 1
|
409
|
-
|
410
|
-
# The speaker if within 3 meters of the microphone.
|
411
|
-
MIDFIELD = 2
|
412
|
-
|
413
|
-
# The speaker is more than 3 meters away from the microphone.
|
414
|
-
FARFIELD = 3
|
415
|
-
end
|
416
|
-
|
417
|
-
# The original media the speech was recorded on.
|
418
|
-
module OriginalMediaType
|
419
|
-
# Unknown original media type.
|
420
|
-
ORIGINAL_MEDIA_TYPE_UNSPECIFIED = 0
|
421
|
-
|
422
|
-
# The speech data is an audio recording.
|
423
|
-
AUDIO = 1
|
424
|
-
|
425
|
-
# The speech data originally recorded on a video.
|
426
|
-
VIDEO = 2
|
427
|
-
end
|
428
|
-
|
429
|
-
# The type of device the speech was recorded with.
|
430
|
-
module RecordingDeviceType
|
431
|
-
# The recording device is unknown.
|
432
|
-
RECORDING_DEVICE_TYPE_UNSPECIFIED = 0
|
433
|
-
|
434
|
-
# Speech was recorded on a smartphone.
|
435
|
-
SMARTPHONE = 1
|
436
|
-
|
437
|
-
# Speech was recorded using a personal computer or tablet.
|
438
|
-
PC = 2
|
439
|
-
|
440
|
-
# Speech was recorded over a phone line.
|
441
|
-
PHONE_LINE = 3
|
442
|
-
|
443
|
-
# Speech was recorded in a vehicle.
|
444
|
-
VEHICLE = 4
|
445
|
-
|
446
|
-
# Speech was recorded outdoors.
|
447
|
-
OTHER_OUTDOOR_DEVICE = 5
|
448
|
-
|
449
|
-
# Speech was recorded indoors.
|
450
|
-
OTHER_INDOOR_DEVICE = 6
|
451
|
-
end
|
452
|
-
end
|
453
|
-
|
454
|
-
# Provides "hints" to the speech recognizer to favor specific words and phrases
|
455
|
-
# in the results.
|
456
|
-
# @!attribute [rw] phrases
|
457
|
-
# @return [Array<String>]
|
458
|
-
# *Optional* A list of strings containing words and phrases "hints" so that
|
459
|
-
# the speech recognition is more likely to recognize them. This can be used
|
460
|
-
# to improve the accuracy for specific words and phrases, for example, if
|
461
|
-
# specific commands are typically spoken by the user. This can also be used
|
462
|
-
# to add additional words to the vocabulary of the recognizer. See
|
463
|
-
# [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
|
464
|
-
class SpeechContext; end
|
465
|
-
|
466
|
-
# Contains audio data in the encoding specified in the `RecognitionConfig`.
|
467
|
-
# Either `content` or `uri` must be supplied. Supplying both or neither
|
468
|
-
# returns {Google::Rpc::Code::INVALID_ARGUMENT}. See
|
469
|
-
# [content limits](https://cloud.google.com/speech-to-text/quotas#content).
|
470
|
-
# @!attribute [rw] content
|
471
|
-
# @return [String]
|
472
|
-
# The audio data bytes encoded as specified in
|
473
|
-
# `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a
|
474
|
-
# pure binary representation, whereas JSON representations use base64.
|
475
|
-
# @!attribute [rw] uri
|
476
|
-
# @return [String]
|
477
|
-
# URI that points to a file that contains audio data bytes as specified in
|
478
|
-
# `RecognitionConfig`. The file must not be compressed (for example, gzip).
|
479
|
-
# Currently, only Google Cloud Storage URIs are
|
480
|
-
# supported, which must be specified in the following format:
|
481
|
-
# `gs://bucket_name/object_name` (other URI formats return
|
482
|
-
# {Google::Rpc::Code::INVALID_ARGUMENT}). For more information, see
|
483
|
-
# [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
|
484
|
-
class RecognitionAudio; end
|
485
|
-
|
486
|
-
# The only message returned to the client by the `Recognize` method. It
|
487
|
-
# contains the result as zero or more sequential `SpeechRecognitionResult`
|
488
|
-
# messages.
|
489
|
-
# @!attribute [rw] results
|
490
|
-
# @return [Array<Google::Cloud::Speech::V1p1beta1::SpeechRecognitionResult>]
|
491
|
-
# Output only. Sequential list of transcription results corresponding to
|
492
|
-
# sequential portions of audio.
|
493
|
-
class RecognizeResponse; end
|
494
|
-
|
495
|
-
# The only message returned to the client by the `LongRunningRecognize` method.
|
496
|
-
# It contains the result as zero or more sequential `SpeechRecognitionResult`
|
497
|
-
# messages. It is included in the `result.response` field of the `Operation`
|
498
|
-
# returned by the `GetOperation` call of the `google::longrunning::Operations`
|
499
|
-
# service.
|
500
|
-
# @!attribute [rw] results
|
501
|
-
# @return [Array<Google::Cloud::Speech::V1p1beta1::SpeechRecognitionResult>]
|
502
|
-
# Output only. Sequential list of transcription results corresponding to
|
503
|
-
# sequential portions of audio.
|
504
|
-
class LongRunningRecognizeResponse; end
|
505
|
-
|
506
|
-
# Describes the progress of a long-running `LongRunningRecognize` call. It is
|
507
|
-
# included in the `metadata` field of the `Operation` returned by the
|
508
|
-
# `GetOperation` call of the `google::longrunning::Operations` service.
|
509
|
-
# @!attribute [rw] progress_percent
|
510
|
-
# @return [Integer]
|
511
|
-
# Approximate percentage of audio processed thus far. Guaranteed to be 100
|
512
|
-
# when the audio is fully processed and the results are available.
|
513
|
-
# @!attribute [rw] start_time
|
514
|
-
# @return [Google::Protobuf::Timestamp]
|
515
|
-
# Time when the request was received.
|
516
|
-
# @!attribute [rw] last_update_time
|
517
|
-
# @return [Google::Protobuf::Timestamp]
|
518
|
-
# Time of the most recent processing update.
|
519
|
-
class LongRunningRecognizeMetadata; end
|
520
|
-
|
521
|
-
# `StreamingRecognizeResponse` is the only message returned to the client by
|
522
|
-
# `StreamingRecognize`. A series of zero or more `StreamingRecognizeResponse`
|
523
|
-
# messages are streamed back to the client. If there is no recognizable
|
524
|
-
# audio, and `single_utterance` is set to false, then no messages are streamed
|
525
|
-
# back to the client.
|
526
|
-
#
|
527
|
-
# Here's an example of a series of ten `StreamingRecognizeResponse`s that might
|
528
|
-
# be returned while processing audio:
|
529
|
-
#
|
530
|
-
# 1. results { alternatives { transcript: "tube" } stability: 0.01 }
|
531
|
-
#
|
532
|
-
# 2. results { alternatives { transcript: "to be a" } stability: 0.01 }
|
533
|
-
#
|
534
|
-
# 3. results { alternatives { transcript: "to be" } stability: 0.9 }
|
535
|
-
# results { alternatives { transcript: " or not to be" } stability: 0.01 }
|
536
|
-
#
|
537
|
-
# 4. results { alternatives { transcript: "to be or not to be"
|
538
|
-
# confidence: 0.92 }
|
539
|
-
# alternatives { transcript: "to bee or not to bee" }
|
540
|
-
# is_final: true }
|
541
|
-
#
|
542
|
-
# 5. results { alternatives { transcript: " that's" } stability: 0.01 }
|
543
|
-
#
|
544
|
-
# 6. results { alternatives { transcript: " that is" } stability: 0.9 }
|
545
|
-
# results { alternatives { transcript: " the question" } stability: 0.01 }
|
546
|
-
#
|
547
|
-
# 7. results { alternatives { transcript: " that is the question"
|
548
|
-
# confidence: 0.98 }
|
549
|
-
# alternatives { transcript: " that was the question" }
|
550
|
-
# is_final: true }
|
551
|
-
#
|
552
|
-
# Notes:
|
553
|
-
#
|
554
|
-
# * Only two of the above responses #4 and #7 contain final results; they are
|
555
|
-
# indicated by `is_final: true`. Concatenating these together generates the
|
556
|
-
# full transcript: "to be or not to be that is the question".
|
557
|
-
#
|
558
|
-
# * The others contain interim `results`. #3 and #6 contain two interim
|
559
|
-
# `results`: the first portion has a high stability and is less likely to
|
560
|
-
# change; the second portion has a low stability and is very likely to
|
561
|
-
# change. A UI designer might choose to show only high stability `results`.
|
562
|
-
#
|
563
|
-
# * The specific `stability` and `confidence` values shown above are only for
|
564
|
-
# illustrative purposes. Actual values may vary.
|
565
|
-
#
|
566
|
-
# * In each response, only one of these fields will be set:
|
567
|
-
# `error`,
|
568
|
-
# `speech_event_type`, or
|
569
|
-
# one or more (repeated) `results`.
|
570
|
-
# @!attribute [rw] error
|
571
|
-
# @return [Google::Rpc::Status]
|
572
|
-
# Output only. If set, returns a {Google::Rpc::Status} message that
|
573
|
-
# specifies the error for the operation.
|
574
|
-
# @!attribute [rw] results
|
575
|
-
# @return [Array<Google::Cloud::Speech::V1p1beta1::StreamingRecognitionResult>]
|
576
|
-
# Output only. This repeated list contains zero or more results that
|
577
|
-
# correspond to consecutive portions of the audio currently being processed.
|
578
|
-
# It contains zero or one `is_final=true` result (the newly settled portion),
|
579
|
-
# followed by zero or more `is_final=false` results (the interim results).
|
580
|
-
# @!attribute [rw] speech_event_type
|
581
|
-
# @return [Google::Cloud::Speech::V1p1beta1::StreamingRecognizeResponse::SpeechEventType]
|
582
|
-
# Output only. Indicates the type of speech event.
|
583
|
-
class StreamingRecognizeResponse
|
584
|
-
# Indicates the type of speech event.
|
585
|
-
module SpeechEventType
|
586
|
-
# No speech event specified.
|
587
|
-
SPEECH_EVENT_UNSPECIFIED = 0
|
588
|
-
|
589
|
-
# This event indicates that the server has detected the end of the user's
|
590
|
-
# speech utterance and expects no additional speech. Therefore, the server
|
591
|
-
# will not process additional audio (although it may subsequently return
|
592
|
-
# additional results). The client should stop sending additional audio
|
593
|
-
# data, half-close the gRPC connection, and wait for any additional results
|
594
|
-
# until the server closes the gRPC connection. This event is only sent if
|
595
|
-
# `single_utterance` was set to `true`, and is not used otherwise.
|
596
|
-
END_OF_SINGLE_UTTERANCE = 1
|
597
|
-
end
|
598
|
-
end
|
599
|
-
|
600
|
-
# A streaming speech recognition result corresponding to a portion of the audio
|
601
|
-
# that is currently being processed.
|
602
|
-
# @!attribute [rw] alternatives
|
603
|
-
# @return [Array<Google::Cloud::Speech::V1p1beta1::SpeechRecognitionAlternative>]
|
604
|
-
# Output only. May contain one or more recognition hypotheses (up to the
|
605
|
-
# maximum specified in `max_alternatives`).
|
606
|
-
# These alternatives are ordered in terms of accuracy, with the top (first)
|
607
|
-
# alternative being the most probable, as ranked by the recognizer.
|
608
|
-
# @!attribute [rw] is_final
|
609
|
-
# @return [true, false]
|
610
|
-
# Output only. If `false`, this `StreamingRecognitionResult` represents an
|
611
|
-
# interim result that may change. If `true`, this is the final time the
|
612
|
-
# speech service will return this particular `StreamingRecognitionResult`,
|
613
|
-
# the recognizer will not return any further hypotheses for this portion of
|
614
|
-
# the transcript and corresponding audio.
|
615
|
-
# @!attribute [rw] stability
|
616
|
-
# @return [Float]
|
617
|
-
# Output only. An estimate of the likelihood that the recognizer will not
|
618
|
-
# change its guess about this interim result. Values range from 0.0
|
619
|
-
# (completely unstable) to 1.0 (completely stable).
|
620
|
-
# This field is only provided for interim results (`is_final=false`).
|
621
|
-
# The default of 0.0 is a sentinel value indicating `stability` was not set.
|
622
|
-
# @!attribute [rw] result_end_time
|
623
|
-
# @return [Google::Protobuf::Duration]
|
624
|
-
# Output only. Time offset of the end of this result relative to the
|
625
|
-
# beginning of the audio.
|
626
|
-
# @!attribute [rw] channel_tag
|
627
|
-
# @return [Integer]
|
628
|
-
# For multi-channel audio, this is the channel number corresponding to the
|
629
|
-
# recognized result for the audio from that channel.
|
630
|
-
# For audio_channel_count = N, its output values can range from '1' to 'N'.
|
631
|
-
# @!attribute [rw] language_code
|
632
|
-
# @return [String]
|
633
|
-
# Output only. The
|
634
|
-
# [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
|
635
|
-
# language in this result. This language code was detected to have the most
|
636
|
-
# likelihood of being spoken in the audio.
|
637
|
-
class StreamingRecognitionResult; end
|
638
|
-
|
639
|
-
# A speech recognition result corresponding to a portion of the audio.
|
640
|
-
# @!attribute [rw] alternatives
|
641
|
-
# @return [Array<Google::Cloud::Speech::V1p1beta1::SpeechRecognitionAlternative>]
|
642
|
-
# Output only. May contain one or more recognition hypotheses (up to the
|
643
|
-
# maximum specified in `max_alternatives`).
|
644
|
-
# These alternatives are ordered in terms of accuracy, with the top (first)
|
645
|
-
# alternative being the most probable, as ranked by the recognizer.
|
646
|
-
# @!attribute [rw] channel_tag
|
647
|
-
# @return [Integer]
|
648
|
-
# For multi-channel audio, this is the channel number corresponding to the
|
649
|
-
# recognized result for the audio from that channel.
|
650
|
-
# For audio_channel_count = N, its output values can range from '1' to 'N'.
|
651
|
-
# @!attribute [rw] language_code
|
652
|
-
# @return [String]
|
653
|
-
# Output only. The
|
654
|
-
# [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
|
655
|
-
# language in this result. This language code was detected to have the most
|
656
|
-
# likelihood of being spoken in the audio.
|
657
|
-
class SpeechRecognitionResult; end
|
658
|
-
|
659
|
-
# Alternative hypotheses (a.k.a. n-best list).
|
660
|
-
# @!attribute [rw] transcript
|
661
|
-
# @return [String]
|
662
|
-
# Output only. Transcript text representing the words that the user spoke.
|
663
|
-
# @!attribute [rw] confidence
|
664
|
-
# @return [Float]
|
665
|
-
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
666
|
-
# indicates an estimated greater likelihood that the recognized words are
|
667
|
-
# correct. This field is set only for the top alternative of a non-streaming
|
668
|
-
# result or, of a streaming result where `is_final=true`.
|
669
|
-
# This field is not guaranteed to be accurate and users should not rely on it
|
670
|
-
# to be always provided.
|
671
|
-
# The default of 0.0 is a sentinel value indicating `confidence` was not set.
|
672
|
-
# @!attribute [rw] words
|
673
|
-
# @return [Array<Google::Cloud::Speech::V1p1beta1::WordInfo>]
|
674
|
-
# Output only. A list of word-specific information for each recognized word.
|
675
|
-
# Note: When `enable_speaker_diarization` is true, you will see all the words
|
676
|
-
# from the beginning of the audio.
|
677
|
-
class SpeechRecognitionAlternative; end
|
678
|
-
|
679
|
-
# Word-specific information for recognized words.
|
680
|
-
# @!attribute [rw] start_time
|
681
|
-
# @return [Google::Protobuf::Duration]
|
682
|
-
# Output only. Time offset relative to the beginning of the audio,
|
683
|
-
# and corresponding to the start of the spoken word.
|
684
|
-
# This field is only set if `enable_word_time_offsets=true` and only
|
685
|
-
# in the top hypothesis.
|
686
|
-
# This is an experimental feature and the accuracy of the time offset can
|
687
|
-
# vary.
|
688
|
-
# @!attribute [rw] end_time
|
689
|
-
# @return [Google::Protobuf::Duration]
|
690
|
-
# Output only. Time offset relative to the beginning of the audio,
|
691
|
-
# and corresponding to the end of the spoken word.
|
692
|
-
# This field is only set if `enable_word_time_offsets=true` and only
|
693
|
-
# in the top hypothesis.
|
694
|
-
# This is an experimental feature and the accuracy of the time offset can
|
695
|
-
# vary.
|
696
|
-
# @!attribute [rw] word
|
697
|
-
# @return [String]
|
698
|
-
# Output only. The word corresponding to this set of information.
|
699
|
-
# @!attribute [rw] confidence
|
700
|
-
# @return [Float]
|
701
|
-
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
702
|
-
# indicates an estimated greater likelihood that the recognized words are
|
703
|
-
# correct. This field is set only for the top alternative of a non-streaming
|
704
|
-
# result or, of a streaming result where `is_final=true`.
|
705
|
-
# This field is not guaranteed to be accurate and users should not rely on it
|
706
|
-
# to be always provided.
|
707
|
-
# The default of 0.0 is a sentinel value indicating `confidence` was not set.
|
708
|
-
# @!attribute [rw] speaker_tag
|
709
|
-
# @return [Integer]
|
710
|
-
# Output only. A distinct integer value is assigned for every speaker within
|
711
|
-
# the audio. This field specifies which one of those speakers was detected to
|
712
|
-
# have spoken this word. Value ranges from '1' to diarization_speaker_count.
|
713
|
-
# speaker_tag is set if enable_speaker_diarization = 'true' and only in the
|
714
|
-
# top alternative.
|
715
|
-
class WordInfo; end
|
716
|
-
end
|
717
|
-
end
|
718
|
-
end
|
719
|
-
end
|