google-cloud-speech 0.41.0 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. checksums.yaml +4 -4
  2. data/.yardopts +2 -1
  3. data/AUTHENTICATION.md +51 -59
  4. data/LICENSE.md +203 -0
  5. data/MIGRATING.md +307 -0
  6. data/README.md +35 -49
  7. data/lib/google-cloud-speech.rb +19 -0
  8. data/lib/google/cloud/speech.rb +81 -142
  9. data/lib/google/cloud/speech/version.rb +1 -1
  10. metadata +103 -67
  11. data/LICENSE +0 -201
  12. data/lib/google/cloud/speech/v1.rb +0 -166
  13. data/lib/google/cloud/speech/v1/cloud_speech_pb.rb +0 -192
  14. data/lib/google/cloud/speech/v1/cloud_speech_services_pb.rb +0 -58
  15. data/lib/google/cloud/speech/v1/credentials.rb +0 -41
  16. data/lib/google/cloud/speech/v1/doc/google/cloud/speech/v1/cloud_speech.rb +0 -698
  17. data/lib/google/cloud/speech/v1/doc/google/longrunning/operations.rb +0 -51
  18. data/lib/google/cloud/speech/v1/doc/google/protobuf/any.rb +0 -131
  19. data/lib/google/cloud/speech/v1/doc/google/protobuf/duration.rb +0 -91
  20. data/lib/google/cloud/speech/v1/doc/google/rpc/status.rb +0 -39
  21. data/lib/google/cloud/speech/v1/helpers.rb +0 -136
  22. data/lib/google/cloud/speech/v1/speech_client.rb +0 -346
  23. data/lib/google/cloud/speech/v1/speech_client_config.json +0 -41
  24. data/lib/google/cloud/speech/v1/stream.rb +0 -615
  25. data/lib/google/cloud/speech/v1p1beta1.rb +0 -166
  26. data/lib/google/cloud/speech/v1p1beta1/cloud_speech_pb.rb +0 -200
  27. data/lib/google/cloud/speech/v1p1beta1/cloud_speech_services_pb.rb +0 -58
  28. data/lib/google/cloud/speech/v1p1beta1/credentials.rb +0 -41
  29. data/lib/google/cloud/speech/v1p1beta1/doc/google/cloud/speech/v1p1beta1/cloud_speech.rb +0 -758
  30. data/lib/google/cloud/speech/v1p1beta1/doc/google/longrunning/operations.rb +0 -51
  31. data/lib/google/cloud/speech/v1p1beta1/doc/google/protobuf/any.rb +0 -131
  32. data/lib/google/cloud/speech/v1p1beta1/doc/google/protobuf/duration.rb +0 -91
  33. data/lib/google/cloud/speech/v1p1beta1/doc/google/rpc/status.rb +0 -39
  34. data/lib/google/cloud/speech/v1p1beta1/helpers.rb +0 -136
  35. data/lib/google/cloud/speech/v1p1beta1/speech_client.rb +0 -346
  36. data/lib/google/cloud/speech/v1p1beta1/speech_client_config.json +0 -41
  37. data/lib/google/cloud/speech/v1p1beta1/stream.rb +0 -615
@@ -1,58 +0,0 @@
1
- # Generated by the protocol buffer compiler. DO NOT EDIT!
2
- # Source: google/cloud/speech/v1/cloud_speech.proto for package 'google.cloud.speech.v1'
3
- # Original file comments:
4
- # Copyright 2019 Google LLC.
5
- #
6
- # Licensed under the Apache License, Version 2.0 (the "License");
7
- # you may not use this file except in compliance with the License.
8
- # You may obtain a copy of the License at
9
- #
10
- # http://www.apache.org/licenses/LICENSE-2.0
11
- #
12
- # Unless required by applicable law or agreed to in writing, software
13
- # distributed under the License is distributed on an "AS IS" BASIS,
14
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
- # See the License for the specific language governing permissions and
16
- # limitations under the License.
17
- #
18
- #
19
-
20
-
21
- require 'grpc'
22
- require 'google/cloud/speech/v1/cloud_speech_pb'
23
-
24
- module Google
25
- module Cloud
26
- module Speech
27
- module V1
28
- module Speech
29
- # Service that implements Google Cloud Speech API.
30
- class Service
31
-
32
- include GRPC::GenericService
33
-
34
- self.marshal_class_method = :encode
35
- self.unmarshal_class_method = :decode
36
- self.service_name = 'google.cloud.speech.v1.Speech'
37
-
38
- # Performs synchronous speech recognition: receive results after all audio
39
- # has been sent and processed.
40
- rpc :Recognize, RecognizeRequest, RecognizeResponse
41
- # Performs asynchronous speech recognition: receive results via the
42
- # google.longrunning.Operations interface. Returns either an
43
- # `Operation.error` or an `Operation.response` which contains
44
- # a `LongRunningRecognizeResponse` message.
45
- # For more information on asynchronous speech recognition, see the
46
- # [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).
47
- rpc :LongRunningRecognize, LongRunningRecognizeRequest, Google::Longrunning::Operation
48
- # Performs bidirectional streaming speech recognition: receive results while
49
- # sending audio. This method is only available via the gRPC API (not REST).
50
- rpc :StreamingRecognize, stream(StreamingRecognizeRequest), stream(StreamingRecognizeResponse)
51
- end
52
-
53
- Stub = Service.rpc_stub_class
54
- end
55
- end
56
- end
57
- end
58
- end
@@ -1,41 +0,0 @@
1
- # Copyright 2020 Google LLC
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # https://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
-
16
- require "googleauth"
17
-
18
- module Google
19
- module Cloud
20
- module Speech
21
- module V1
22
- class Credentials < Google::Auth::Credentials
23
- SCOPE = [
24
- "https://www.googleapis.com/auth/cloud-platform"
25
- ].freeze
26
- PATH_ENV_VARS = %w(SPEECH_CREDENTIALS
27
- SPEECH_KEYFILE
28
- GOOGLE_CLOUD_CREDENTIALS
29
- GOOGLE_CLOUD_KEYFILE
30
- GCLOUD_KEYFILE)
31
- JSON_ENV_VARS = %w(SPEECH_CREDENTIALS_JSON
32
- SPEECH_KEYFILE_JSON
33
- GOOGLE_CLOUD_CREDENTIALS_JSON
34
- GOOGLE_CLOUD_KEYFILE_JSON
35
- GCLOUD_KEYFILE_JSON)
36
- DEFAULT_PATHS = ["~/.config/gcloud/application_default_credentials.json"]
37
- end
38
- end
39
- end
40
- end
41
- end
@@ -1,698 +0,0 @@
1
- # Copyright 2020 Google LLC
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # https://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
-
16
- module Google
17
- module Cloud
18
- module Speech
19
- module V1
20
- # The top-level message sent by the client for the `Recognize` method.
21
- # @!attribute [rw] config
22
- # @return [Google::Cloud::Speech::V1::RecognitionConfig]
23
- # Required. Provides information to the recognizer that specifies how to
24
- # process the request.
25
- # @!attribute [rw] audio
26
- # @return [Google::Cloud::Speech::V1::RecognitionAudio]
27
- # Required. The audio data to be recognized.
28
- class RecognizeRequest; end
29
-
30
- # The top-level message sent by the client for the `LongRunningRecognize`
31
- # method.
32
- # @!attribute [rw] config
33
- # @return [Google::Cloud::Speech::V1::RecognitionConfig]
34
- # Required. Provides information to the recognizer that specifies how to
35
- # process the request.
36
- # @!attribute [rw] audio
37
- # @return [Google::Cloud::Speech::V1::RecognitionAudio]
38
- # Required. The audio data to be recognized.
39
- class LongRunningRecognizeRequest; end
40
-
41
- # The top-level message sent by the client for the `StreamingRecognize` method.
42
- # Multiple `StreamingRecognizeRequest` messages are sent. The first message
43
- # must contain a `streaming_config` message and must not contain
44
- # `audio_content`. All subsequent messages must contain `audio_content` and
45
- # must not contain a `streaming_config` message.
46
- # @!attribute [rw] streaming_config
47
- # @return [Google::Cloud::Speech::V1::StreamingRecognitionConfig]
48
- # Provides information to the recognizer that specifies how to process the
49
- # request. The first `StreamingRecognizeRequest` message must contain a
50
- # `streaming_config` message.
51
- # @!attribute [rw] audio_content
52
- # @return [String]
53
- # The audio data to be recognized. Sequential chunks of audio data are sent
54
- # in sequential `StreamingRecognizeRequest` messages. The first
55
- # `StreamingRecognizeRequest` message must not contain `audio_content` data
56
- # and all subsequent `StreamingRecognizeRequest` messages must contain
57
- # `audio_content` data. The audio bytes must be encoded as specified in
58
- # `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
59
- # pure binary representation (not base64). See
60
- # [content limits](https://cloud.google.com/speech-to-text/quotas#content).
61
- class StreamingRecognizeRequest; end
62
-
63
- # Provides information to the recognizer that specifies how to process the
64
- # request.
65
- # @!attribute [rw] config
66
- # @return [Google::Cloud::Speech::V1::RecognitionConfig]
67
- # Required. Provides information to the recognizer that specifies how to
68
- # process the request.
69
- # @!attribute [rw] single_utterance
70
- # @return [true, false]
71
- # If `false` or omitted, the recognizer will perform continuous
72
- # recognition (continuing to wait for and process audio even if the user
73
- # pauses speaking) until the client closes the input stream (gRPC API) or
74
- # until the maximum time limit has been reached. May return multiple
75
- # `StreamingRecognitionResult`s with the `is_final` flag set to `true`.
76
- #
77
- # If `true`, the recognizer will detect a single spoken utterance. When it
78
- # detects that the user has paused or stopped speaking, it will return an
79
- # `END_OF_SINGLE_UTTERANCE` event and cease recognition. It will return no
80
- # more than one `StreamingRecognitionResult` with the `is_final` flag set to
81
- # `true`.
82
- # @!attribute [rw] interim_results
83
- # @return [true, false]
84
- # If `true`, interim results (tentative hypotheses) may be
85
- # returned as they become available (these interim results are indicated with
86
- # the `is_final=false` flag).
87
- # If `false` or omitted, only `is_final=true` result(s) are returned.
88
- class StreamingRecognitionConfig; end
89
-
90
- # Provides information to the recognizer that specifies how to process the
91
- # request.
92
- # @!attribute [rw] encoding
93
- # @return [Google::Cloud::Speech::V1::RecognitionConfig::AudioEncoding]
94
- # Encoding of audio data sent in all `RecognitionAudio` messages.
95
- # This field is optional for `FLAC` and `WAV` audio files and required
96
- # for all other audio formats. For details, see {Google::Cloud::Speech::V1::RecognitionConfig::AudioEncoding AudioEncoding}.
97
- # @!attribute [rw] sample_rate_hertz
98
- # @return [Integer]
99
- # Sample rate in Hertz of the audio data sent in all
100
- # `RecognitionAudio` messages. Valid values are: 8000-48000.
101
- # 16000 is optimal. For best results, set the sampling rate of the audio
102
- # source to 16000 Hz. If that's not possible, use the native sample rate of
103
- # the audio source (instead of re-sampling).
104
- # This field is optional for FLAC and WAV audio files, but is
105
- # required for all other audio formats. For details, see {Google::Cloud::Speech::V1::RecognitionConfig::AudioEncoding AudioEncoding}.
106
- # @!attribute [rw] audio_channel_count
107
- # @return [Integer]
108
- # The number of channels in the input audio data.
109
- # ONLY set this for MULTI-CHANNEL recognition.
110
- # Valid values for LINEAR16 and FLAC are `1`-`8`.
111
- # Valid values for OGG_OPUS are '1'-'254'.
112
- # Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
113
- # If `0` or omitted, defaults to one channel (mono).
114
- # Note: We only recognize the first channel by default.
115
- # To perform independent recognition on each channel set
116
- # `enable_separate_recognition_per_channel` to 'true'.
117
- # @!attribute [rw] enable_separate_recognition_per_channel
118
- # @return [true, false]
119
- # This needs to be set to `true` explicitly and `audio_channel_count` > 1
120
- # to get each channel recognized separately. The recognition result will
121
- # contain a `channel_tag` field to state which channel that result belongs
122
- # to. If this is not true, we will only recognize the first channel. The
123
- # request is billed cumulatively for all channels recognized:
124
- # `audio_channel_count` multiplied by the length of the audio.
125
- # @!attribute [rw] language_code
126
- # @return [String]
127
- # Required. The language of the supplied audio as a
128
- # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
129
- # Example: "en-US".
130
- # See [Language
131
- # Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
132
- # of the currently supported language codes.
133
- # @!attribute [rw] max_alternatives
134
- # @return [Integer]
135
- # Maximum number of recognition hypotheses to be returned.
136
- # Specifically, the maximum number of `SpeechRecognitionAlternative` messages
137
- # within each `SpeechRecognitionResult`.
138
- # The server may return fewer than `max_alternatives`.
139
- # Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
140
- # one. If omitted, will return a maximum of one.
141
- # @!attribute [rw] profanity_filter
142
- # @return [true, false]
143
- # If set to `true`, the server will attempt to filter out
144
- # profanities, replacing all but the initial character in each filtered word
145
- # with asterisks, e.g. "f***". If set to `false` or omitted, profanities
146
- # won't be filtered out.
147
- # @!attribute [rw] speech_contexts
148
- # @return [Array<Google::Cloud::Speech::V1::SpeechContext>]
149
- # Array of {Google::Cloud::Speech::V1::SpeechContext SpeechContext}.
150
- # A means to provide context to assist the speech recognition. For more
151
- # information, see
152
- # [speech
153
- # adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
154
- # @!attribute [rw] enable_word_time_offsets
155
- # @return [true, false]
156
- # If `true`, the top result includes a list of words and
157
- # the start and end time offsets (timestamps) for those words. If
158
- # `false`, no word-level time offset information is returned. The default is
159
- # `false`.
160
- # @!attribute [rw] enable_automatic_punctuation
161
- # @return [true, false]
162
- # If 'true', adds punctuation to recognition result hypotheses.
163
- # This feature is only available in select languages. Setting this for
164
- # requests in other languages has no effect at all.
165
- # The default 'false' value does not add punctuation to result hypotheses.
166
- # Note: This is currently offered as an experimental service, complimentary
167
- # to all users. In the future this may be exclusively available as a
168
- # premium feature.
169
- # @!attribute [rw] diarization_config
170
- # @return [Google::Cloud::Speech::V1::SpeakerDiarizationConfig]
171
- # Config to enable speaker diarization and set additional
172
- # parameters to make diarization better suited for your application.
173
- # Note: When this is enabled, we send all the words from the beginning of the
174
- # audio for the top alternative in every consecutive STREAMING responses.
175
- # This is done in order to improve our speaker tags as our models learn to
176
- # identify the speakers in the conversation over time.
177
- # For non-streaming requests, the diarization results will be provided only
178
- # in the top alternative of the FINAL SpeechRecognitionResult.
179
- # @!attribute [rw] metadata
180
- # @return [Google::Cloud::Speech::V1::RecognitionMetadata]
181
- # Metadata regarding this request.
182
- # @!attribute [rw] model
183
- # @return [String]
184
- # Which model to select for the given request. Select the model
185
- # best suited to your domain to get best results. If a model is not
186
- # explicitly specified, then we auto-select a model based on the parameters
187
- # in the RecognitionConfig.
188
- # <table>
189
- # <tr>
190
- # <td><b>Model</b></td>
191
- # <td><b>Description</b></td>
192
- # </tr>
193
- # <tr>
194
- # <td><code>command_and_search</code></td>
195
- # <td>Best for short queries such as voice commands or voice search.</td>
196
- # </tr>
197
- # <tr>
198
- # <td><code>phone_call</code></td>
199
- # <td>Best for audio that originated from a phone call (typically
200
- # recorded at an 8khz sampling rate).</td>
201
- # </tr>
202
- # <tr>
203
- # <td><code>video</code></td>
204
- # <td>Best for audio that originated from from video or includes multiple
205
- # speakers. Ideally the audio is recorded at a 16khz or greater
206
- # sampling rate. This is a premium model that costs more than the
207
- # standard rate.</td>
208
- # </tr>
209
- # <tr>
210
- # <td><code>default</code></td>
211
- # <td>Best for audio that is not one of the specific audio models.
212
- # For example, long-form audio. Ideally the audio is high-fidelity,
213
- # recorded at a 16khz or greater sampling rate.</td>
214
- # </tr>
215
- # </table>
216
- # @!attribute [rw] use_enhanced
217
- # @return [true, false]
218
- # Set to true to use an enhanced model for speech recognition.
219
- # If `use_enhanced` is set to true and the `model` field is not set, then
220
- # an appropriate enhanced model is chosen if an enhanced model exists for
221
- # the audio.
222
- #
223
- # If `use_enhanced` is true and an enhanced version of the specified model
224
- # does not exist, then the speech is recognized using the standard version
225
- # of the specified model.
226
- class RecognitionConfig
227
- # The encoding of the audio data sent in the request.
228
- #
229
- # All encodings support only 1 channel (mono) audio, unless the
230
- # `audio_channel_count` and `enable_separate_recognition_per_channel` fields
231
- # are set.
232
- #
233
- # For best results, the audio source should be captured and transmitted using
234
- # a lossless encoding (`FLAC` or `LINEAR16`). The accuracy of the speech
235
- # recognition can be reduced if lossy codecs are used to capture or transmit
236
- # audio, particularly if background noise is present. Lossy codecs include
237
- # `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, `SPEEX_WITH_HEADER_BYTE`, and `MP3`.
238
- #
239
- # The `FLAC` and `WAV` audio file formats include a header that describes the
240
- # included audio content. You can request recognition for `WAV` files that
241
- # contain either `LINEAR16` or `MULAW` encoded audio.
242
- # If you send `FLAC` or `WAV` audio file format in
243
- # your request, you do not need to specify an `AudioEncoding`; the audio
244
- # encoding format is determined from the file header. If you specify
245
- # an `AudioEncoding` when you send send `FLAC` or `WAV` audio, the
246
- # encoding configuration must match the encoding described in the audio
247
- # header; otherwise the request returns an
248
- # {Google::Rpc::Code::INVALID_ARGUMENT} error code.
249
- module AudioEncoding
250
- # Not specified.
251
- ENCODING_UNSPECIFIED = 0
252
-
253
- # Uncompressed 16-bit signed little-endian samples (Linear PCM).
254
- LINEAR16 = 1
255
-
256
- # `FLAC` (Free Lossless Audio
257
- # Codec) is the recommended encoding because it is
258
- # lossless--therefore recognition is not compromised--and
259
- # requires only about half the bandwidth of `LINEAR16`. `FLAC` stream
260
- # encoding supports 16-bit and 24-bit samples, however, not all fields in
261
- # `STREAMINFO` are supported.
262
- FLAC = 2
263
-
264
- # 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
265
- MULAW = 3
266
-
267
- # Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
268
- AMR = 4
269
-
270
- # Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
271
- AMR_WB = 5
272
-
273
- # Opus encoded audio frames in Ogg container
274
- # ([OggOpus](https://wiki.xiph.org/OggOpus)).
275
- # `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000.
276
- OGG_OPUS = 6
277
-
278
- # Although the use of lossy encodings is not recommended, if a very low
279
- # bitrate encoding is required, `OGG_OPUS` is highly preferred over
280
- # Speex encoding. The [Speex](https://speex.org/) encoding supported by
281
- # Cloud Speech API has a header byte in each block, as in MIME type
282
- # `audio/x-speex-with-header-byte`.
283
- # It is a variant of the RTP Speex encoding defined in
284
- # [RFC 5574](https://tools.ietf.org/html/rfc5574).
285
- # The stream is a sequence of blocks, one block per RTP packet. Each block
286
- # starts with a byte containing the length of the block, in bytes, followed
287
- # by one or more frames of Speex data, padded to an integral number of
288
- # bytes (octets) as specified in RFC 5574. In other words, each RTP header
289
- # is replaced with a single byte containing the block length. Only Speex
290
- # wideband is supported. `sample_rate_hertz` must be 16000.
291
- SPEEX_WITH_HEADER_BYTE = 7
292
- end
293
- end
294
-
295
- # Config to enable speaker diarization.
296
- # @!attribute [rw] enable_speaker_diarization
297
- # @return [true, false]
298
- # If 'true', enables speaker detection for each recognized word in
299
- # the top alternative of the recognition result using a speaker_tag provided
300
- # in the WordInfo.
301
- # @!attribute [rw] min_speaker_count
302
- # @return [Integer]
303
- # Minimum number of speakers in the conversation. This range gives you more
304
- # flexibility by allowing the system to automatically determine the correct
305
- # number of speakers. If not set, the default value is 2.
306
- # @!attribute [rw] max_speaker_count
307
- # @return [Integer]
308
- # Maximum number of speakers in the conversation. This range gives you more
309
- # flexibility by allowing the system to automatically determine the correct
310
- # number of speakers. If not set, the default value is 6.
311
- # @!attribute [rw] speaker_tag
312
- # @return [Integer]
313
- # Unused.
314
- class SpeakerDiarizationConfig; end
315
-
316
- # Description of audio data to be recognized.
317
- # @!attribute [rw] interaction_type
318
- # @return [Google::Cloud::Speech::V1::RecognitionMetadata::InteractionType]
319
- # The use case most closely describing the audio content to be recognized.
320
- # @!attribute [rw] industry_naics_code_of_audio
321
- # @return [Integer]
322
- # The industry vertical to which this speech recognition request most
323
- # closely applies. This is most indicative of the topics contained
324
- # in the audio. Use the 6-digit NAICS code to identify the industry
325
- # vertical - see https://www.naics.com/search/.
326
- # @!attribute [rw] microphone_distance
327
- # @return [Google::Cloud::Speech::V1::RecognitionMetadata::MicrophoneDistance]
328
- # The audio type that most closely describes the audio being recognized.
329
- # @!attribute [rw] original_media_type
330
- # @return [Google::Cloud::Speech::V1::RecognitionMetadata::OriginalMediaType]
331
- # The original media the speech was recorded on.
332
- # @!attribute [rw] recording_device_type
333
- # @return [Google::Cloud::Speech::V1::RecognitionMetadata::RecordingDeviceType]
334
- # The type of device the speech was recorded with.
335
- # @!attribute [rw] recording_device_name
336
- # @return [String]
337
- # The device used to make the recording. Examples 'Nexus 5X' or
338
- # 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or
339
- # 'Cardioid Microphone'.
340
- # @!attribute [rw] original_mime_type
341
- # @return [String]
342
- # Mime type of the original audio file. For example `audio/m4a`,
343
- # `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`.
344
- # A list of possible audio mime types is maintained at
345
- # http://www.iana.org/assignments/media-types/media-types.xhtml#audio
346
- # @!attribute [rw] audio_topic
347
- # @return [String]
348
- # Description of the content. Eg. "Recordings of federal supreme court
349
- # hearings from 2012".
350
- class RecognitionMetadata
351
- # Use case categories that the audio recognition request can be described
352
- # by.
353
- module InteractionType
354
- # Use case is either unknown or is something other than one of the other
355
- # values below.
356
- INTERACTION_TYPE_UNSPECIFIED = 0
357
-
358
- # Multiple people in a conversation or discussion. For example in a
359
- # meeting with two or more people actively participating. Typically
360
- # all the primary people speaking would be in the same room (if not,
361
- # see PHONE_CALL)
362
- DISCUSSION = 1
363
-
364
- # One or more persons lecturing or presenting to others, mostly
365
- # uninterrupted.
366
- PRESENTATION = 2
367
-
368
- # A phone-call or video-conference in which two or more people, who are
369
- # not in the same room, are actively participating.
370
- PHONE_CALL = 3
371
-
372
- # A recorded message intended for another person to listen to.
373
- VOICEMAIL = 4
374
-
375
- # Professionally produced audio (eg. TV Show, Podcast).
376
- PROFESSIONALLY_PRODUCED = 5
377
-
378
- # Transcribe spoken questions and queries into text.
379
- VOICE_SEARCH = 6
380
-
381
- # Transcribe voice commands, such as for controlling a device.
382
- VOICE_COMMAND = 7
383
-
384
- # Transcribe speech to text to create a written document, such as a
385
- # text-message, email or report.
386
- DICTATION = 8
387
- end
388
-
389
- # Enumerates the types of capture settings describing an audio file.
390
- module MicrophoneDistance
391
- # Audio type is not known.
392
- MICROPHONE_DISTANCE_UNSPECIFIED = 0
393
-
394
- # The audio was captured from a closely placed microphone. Eg. phone,
395
- # dictaphone, or handheld microphone. Generally if there speaker is within
396
- # 1 meter of the microphone.
397
- NEARFIELD = 1
398
-
399
- # The speaker if within 3 meters of the microphone.
400
- MIDFIELD = 2
401
-
402
- # The speaker is more than 3 meters away from the microphone.
403
- FARFIELD = 3
404
- end
405
-
406
- # The original media the speech was recorded on.
407
- module OriginalMediaType
408
- # Unknown original media type.
409
- ORIGINAL_MEDIA_TYPE_UNSPECIFIED = 0
410
-
411
- # The speech data is an audio recording.
412
- AUDIO = 1
413
-
414
- # The speech data originally recorded on a video.
415
- VIDEO = 2
416
- end
417
-
418
- # The type of device the speech was recorded with.
419
- module RecordingDeviceType
420
- # The recording device is unknown.
421
- RECORDING_DEVICE_TYPE_UNSPECIFIED = 0
422
-
423
- # Speech was recorded on a smartphone.
424
- SMARTPHONE = 1
425
-
426
- # Speech was recorded using a personal computer or tablet.
427
- PC = 2
428
-
429
- # Speech was recorded over a phone line.
430
- PHONE_LINE = 3
431
-
432
- # Speech was recorded in a vehicle.
433
- VEHICLE = 4
434
-
435
- # Speech was recorded outdoors.
436
- OTHER_OUTDOOR_DEVICE = 5
437
-
438
- # Speech was recorded indoors.
439
- OTHER_INDOOR_DEVICE = 6
440
- end
441
- end
442
-
443
- # Provides "hints" to the speech recognizer to favor specific words and phrases
444
- # in the results.
445
- # @!attribute [rw] phrases
446
- # @return [Array<String>]
447
- # A list of strings containing words and phrases "hints" so that
448
- # the speech recognition is more likely to recognize them. This can be used
449
- # to improve the accuracy for specific words and phrases, for example, if
450
- # specific commands are typically spoken by the user. This can also be used
451
- # to add additional words to the vocabulary of the recognizer. See
452
- # [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
453
- #
454
- # List items can also be set to classes for groups of words that represent
455
- # common concepts that occur in natural language. For example, rather than
456
- # providing phrase hints for every month of the year, using the $MONTH class
457
- # improves the likelihood of correctly transcribing audio that includes
458
- # months.
459
- class SpeechContext; end
460
-
461
- # Contains audio data in the encoding specified in the `RecognitionConfig`.
462
- # Either `content` or `uri` must be supplied. Supplying both or neither
463
- # returns {Google::Rpc::Code::INVALID_ARGUMENT}. See
464
- # [content limits](https://cloud.google.com/speech-to-text/quotas#content).
465
- # @!attribute [rw] content
466
- # @return [String]
467
- # The audio data bytes encoded as specified in
468
- # `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
469
- # pure binary representation, whereas JSON representations use base64.
470
- # @!attribute [rw] uri
471
- # @return [String]
472
- # URI that points to a file that contains audio data bytes as specified in
473
- # `RecognitionConfig`. The file must not be compressed (for example, gzip).
474
- # Currently, only Google Cloud Storage URIs are
475
- # supported, which must be specified in the following format:
476
- # `gs://bucket_name/object_name` (other URI formats return
477
- # {Google::Rpc::Code::INVALID_ARGUMENT}). For more information, see
478
- # [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
479
- class RecognitionAudio; end
480
-
481
- # The only message returned to the client by the `Recognize` method. It
482
- # contains the result as zero or more sequential `SpeechRecognitionResult`
483
- # messages.
484
- # @!attribute [rw] results
485
- # @return [Array<Google::Cloud::Speech::V1::SpeechRecognitionResult>]
486
- # Sequential list of transcription results corresponding to
487
- # sequential portions of audio.
488
- class RecognizeResponse; end
489
-
490
- # The only message returned to the client by the `LongRunningRecognize` method.
491
- # It contains the result as zero or more sequential `SpeechRecognitionResult`
492
- # messages. It is included in the `result.response` field of the `Operation`
493
- # returned by the `GetOperation` call of the `google::longrunning::Operations`
494
- # service.
495
- # @!attribute [rw] results
496
- # @return [Array<Google::Cloud::Speech::V1::SpeechRecognitionResult>]
497
- # Sequential list of transcription results corresponding to
498
- # sequential portions of audio.
499
- class LongRunningRecognizeResponse; end
500
-
501
- # Describes the progress of a long-running `LongRunningRecognize` call. It is
502
- # included in the `metadata` field of the `Operation` returned by the
503
- # `GetOperation` call of the `google::longrunning::Operations` service.
504
- # @!attribute [rw] progress_percent
505
- # @return [Integer]
506
- # Approximate percentage of audio processed thus far. Guaranteed to be 100
507
- # when the audio is fully processed and the results are available.
508
- # @!attribute [rw] start_time
509
- # @return [Google::Protobuf::Timestamp]
510
- # Time when the request was received.
511
- # @!attribute [rw] last_update_time
512
- # @return [Google::Protobuf::Timestamp]
513
- # Time of the most recent processing update.
514
- class LongRunningRecognizeMetadata; end
515
-
516
- # `StreamingRecognizeResponse` is the only message returned to the client by
517
- # `StreamingRecognize`. A series of zero or more `StreamingRecognizeResponse`
518
- # messages are streamed back to the client. If there is no recognizable
519
- # audio, and `single_utterance` is set to false, then no messages are streamed
520
- # back to the client.
521
- #
522
- # Here's an example of a series of ten `StreamingRecognizeResponse`s that might
523
- # be returned while processing audio:
524
- #
525
- # 1. results { alternatives { transcript: "tube" } stability: 0.01 }
526
- #
527
- # 2. results { alternatives { transcript: "to be a" } stability: 0.01 }
528
- #
529
- # 3. results { alternatives { transcript: "to be" } stability: 0.9 }
530
- # results { alternatives { transcript: " or not to be" } stability: 0.01 }
531
- #
532
- # 4. results { alternatives { transcript: "to be or not to be"
533
- # confidence: 0.92 }
534
- # alternatives { transcript: "to bee or not to bee" }
535
- # is_final: true }
536
- #
537
- # 5. results { alternatives { transcript: " that's" } stability: 0.01 }
538
- #
539
- # 6. results { alternatives { transcript: " that is" } stability: 0.9 }
540
- # results { alternatives { transcript: " the question" } stability: 0.01 }
541
- #
542
- # 7. results { alternatives { transcript: " that is the question"
543
- # confidence: 0.98 }
544
- # alternatives { transcript: " that was the question" }
545
- # is_final: true }
546
- #
547
- # Notes:
548
- #
549
- # * Only two of the above responses #4 and #7 contain final results; they are
550
- # indicated by `is_final: true`. Concatenating these together generates the
551
- # full transcript: "to be or not to be that is the question".
552
- #
553
- # * The others contain interim `results`. #3 and #6 contain two interim
554
- # `results`: the first portion has a high stability and is less likely to
555
- # change; the second portion has a low stability and is very likely to
556
- # change. A UI designer might choose to show only high stability `results`.
557
- #
558
- # * The specific `stability` and `confidence` values shown above are only for
559
- # illustrative purposes. Actual values may vary.
560
- #
561
- # * In each response, only one of these fields will be set:
562
- # `error`,
563
- # `speech_event_type`, or
564
- # one or more (repeated) `results`.
565
- # @!attribute [rw] error
566
- # @return [Google::Rpc::Status]
567
- # If set, returns a {Google::Rpc::Status} message that
568
- # specifies the error for the operation.
569
- # @!attribute [rw] results
570
- # @return [Array<Google::Cloud::Speech::V1::StreamingRecognitionResult>]
571
- # This repeated list contains zero or more results that
572
- # correspond to consecutive portions of the audio currently being processed.
573
- # It contains zero or one `is_final=true` result (the newly settled portion),
574
- # followed by zero or more `is_final=false` results (the interim results).
575
- # @!attribute [rw] speech_event_type
576
- # @return [Google::Cloud::Speech::V1::StreamingRecognizeResponse::SpeechEventType]
577
- # Indicates the type of speech event.
578
- class StreamingRecognizeResponse
579
- # Indicates the type of speech event.
580
- module SpeechEventType
581
- # No speech event specified.
582
- SPEECH_EVENT_UNSPECIFIED = 0
583
-
584
- # This event indicates that the server has detected the end of the user's
585
- # speech utterance and expects no additional speech. Therefore, the server
586
- # will not process additional audio (although it may subsequently return
587
- # additional results). The client should stop sending additional audio
588
- # data, half-close the gRPC connection, and wait for any additional results
589
- # until the server closes the gRPC connection. This event is only sent if
590
- # `single_utterance` was set to `true`, and is not used otherwise.
591
- END_OF_SINGLE_UTTERANCE = 1
592
- end
593
- end
594
-
595
- # A streaming speech recognition result corresponding to a portion of the audio
596
- # that is currently being processed.
597
- # @!attribute [rw] alternatives
598
- # @return [Array<Google::Cloud::Speech::V1::SpeechRecognitionAlternative>]
599
- # May contain one or more recognition hypotheses (up to the
600
- # maximum specified in `max_alternatives`).
601
- # These alternatives are ordered in terms of accuracy, with the top (first)
602
- # alternative being the most probable, as ranked by the recognizer.
603
- # @!attribute [rw] is_final
604
- # @return [true, false]
605
- # If `false`, this `StreamingRecognitionResult` represents an
606
- # interim result that may change. If `true`, this is the final time the
607
- # speech service will return this particular `StreamingRecognitionResult`,
608
- # the recognizer will not return any further hypotheses for this portion of
609
- # the transcript and corresponding audio.
610
- # @!attribute [rw] stability
611
- # @return [Float]
612
- # An estimate of the likelihood that the recognizer will not
613
- # change its guess about this interim result. Values range from 0.0
614
- # (completely unstable) to 1.0 (completely stable).
615
- # This field is only provided for interim results (`is_final=false`).
616
- # The default of 0.0 is a sentinel value indicating `stability` was not set.
617
- # @!attribute [rw] result_end_time
618
- # @return [Google::Protobuf::Duration]
619
- # Time offset of the end of this result relative to the
620
- # beginning of the audio.
621
- # @!attribute [rw] channel_tag
622
- # @return [Integer]
623
- # For multi-channel audio, this is the channel number corresponding to the
624
- # recognized result for the audio from that channel.
625
- # For audio_channel_count = N, its output values can range from '1' to 'N'.
626
- # @!attribute [rw] language_code
627
- # @return [String]
628
- # The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of
629
- # the language in this result. This language code was detected to have the
630
- # most likelihood of being spoken in the audio.
631
- class StreamingRecognitionResult; end
632
-
633
- # A speech recognition result corresponding to a portion of the audio.
634
- # @!attribute [rw] alternatives
635
- # @return [Array<Google::Cloud::Speech::V1::SpeechRecognitionAlternative>]
636
- # May contain one or more recognition hypotheses (up to the
637
- # maximum specified in `max_alternatives`).
638
- # These alternatives are ordered in terms of accuracy, with the top (first)
639
- # alternative being the most probable, as ranked by the recognizer.
640
- # @!attribute [rw] channel_tag
641
- # @return [Integer]
642
- # For multi-channel audio, this is the channel number corresponding to the
643
- # recognized result for the audio from that channel.
644
- # For audio_channel_count = N, its output values can range from '1' to 'N'.
645
- class SpeechRecognitionResult; end
646
-
647
- # Alternative hypotheses (a.k.a. n-best list).
648
- # @!attribute [rw] transcript
649
- # @return [String]
650
- # Transcript text representing the words that the user spoke.
651
- # @!attribute [rw] confidence
652
- # @return [Float]
653
- # The confidence estimate between 0.0 and 1.0. A higher number
654
- # indicates an estimated greater likelihood that the recognized words are
655
- # correct. This field is set only for the top alternative of a non-streaming
656
- # result or, of a streaming result where `is_final=true`.
657
- # This field is not guaranteed to be accurate and users should not rely on it
658
- # to be always provided.
659
- # The default of 0.0 is a sentinel value indicating `confidence` was not set.
660
- # @!attribute [rw] words
661
- # @return [Array<Google::Cloud::Speech::V1::WordInfo>]
662
- # A list of word-specific information for each recognized word.
663
- # Note: When `enable_speaker_diarization` is true, you will see all the words
664
- # from the beginning of the audio.
665
- class SpeechRecognitionAlternative; end
666
-
667
- # Word-specific information for recognized words.
668
- # @!attribute [rw] start_time
669
- # @return [Google::Protobuf::Duration]
670
- # Time offset relative to the beginning of the audio,
671
- # and corresponding to the start of the spoken word.
672
- # This field is only set if `enable_word_time_offsets=true` and only
673
- # in the top hypothesis.
674
- # This is an experimental feature and the accuracy of the time offset can
675
- # vary.
676
- # @!attribute [rw] end_time
677
- # @return [Google::Protobuf::Duration]
678
- # Time offset relative to the beginning of the audio,
679
- # and corresponding to the end of the spoken word.
680
- # This field is only set if `enable_word_time_offsets=true` and only
681
- # in the top hypothesis.
682
- # This is an experimental feature and the accuracy of the time offset can
683
- # vary.
684
- # @!attribute [rw] word
685
- # @return [String]
686
- # The word corresponding to this set of information.
687
- # @!attribute [rw] speaker_tag
688
- # @return [Integer]
689
- # A distinct integer value is assigned for every speaker within
690
- # the audio. This field specifies which one of those speakers was detected to
691
- # have spoken this word. Value ranges from '1' to diarization_speaker_count.
692
- # speaker_tag is set if enable_speaker_diarization = 'true' and only in the
693
- # top alternative.
694
- class WordInfo; end
695
- end
696
- end
697
- end
698
- end