google-cloud-speech-v1 0.1.3 → 0.1.4

Sign up to get free protection for your applications and to get access to all the features.
@@ -23,29 +23,29 @@ module Google
23
23
  module V1
24
24
  # The top-level message sent by the client for the `Recognize` method.
25
25
  # @!attribute [rw] config
26
- # @return [Google::Cloud::Speech::V1::RecognitionConfig]
26
+ # @return [::Google::Cloud::Speech::V1::RecognitionConfig]
27
27
  # Required. Provides information to the recognizer that specifies how to
28
28
  # process the request.
29
29
  # @!attribute [rw] audio
30
- # @return [Google::Cloud::Speech::V1::RecognitionAudio]
30
+ # @return [::Google::Cloud::Speech::V1::RecognitionAudio]
31
31
  # Required. The audio data to be recognized.
32
32
  class RecognizeRequest
33
- include Google::Protobuf::MessageExts
34
- extend Google::Protobuf::MessageExts::ClassMethods
33
+ include ::Google::Protobuf::MessageExts
34
+ extend ::Google::Protobuf::MessageExts::ClassMethods
35
35
  end
36
36
 
37
37
  # The top-level message sent by the client for the `LongRunningRecognize`
38
38
  # method.
39
39
  # @!attribute [rw] config
40
- # @return [Google::Cloud::Speech::V1::RecognitionConfig]
40
+ # @return [::Google::Cloud::Speech::V1::RecognitionConfig]
41
41
  # Required. Provides information to the recognizer that specifies how to
42
42
  # process the request.
43
43
  # @!attribute [rw] audio
44
- # @return [Google::Cloud::Speech::V1::RecognitionAudio]
44
+ # @return [::Google::Cloud::Speech::V1::RecognitionAudio]
45
45
  # Required. The audio data to be recognized.
46
46
  class LongRunningRecognizeRequest
47
- include Google::Protobuf::MessageExts
48
- extend Google::Protobuf::MessageExts::ClassMethods
47
+ include ::Google::Protobuf::MessageExts
48
+ extend ::Google::Protobuf::MessageExts::ClassMethods
49
49
  end
50
50
 
51
51
  # The top-level message sent by the client for the `StreamingRecognize` method.
@@ -54,12 +54,12 @@ module Google
54
54
  # `audio_content`. All subsequent messages must contain `audio_content` and
55
55
  # must not contain a `streaming_config` message.
56
56
  # @!attribute [rw] streaming_config
57
- # @return [Google::Cloud::Speech::V1::StreamingRecognitionConfig]
57
+ # @return [::Google::Cloud::Speech::V1::StreamingRecognitionConfig]
58
58
  # Provides information to the recognizer that specifies how to process the
59
59
  # request. The first `StreamingRecognizeRequest` message must contain a
60
60
  # `streaming_config` message.
61
61
  # @!attribute [rw] audio_content
62
- # @return [String]
62
+ # @return [::String]
63
63
  # The audio data to be recognized. Sequential chunks of audio data are sent
64
64
  # in sequential `StreamingRecognizeRequest` messages. The first
65
65
  # `StreamingRecognizeRequest` message must not contain `audio_content` data
@@ -69,18 +69,18 @@ module Google
69
69
  # pure binary representation (not base64). See
70
70
  # [content limits](https://cloud.google.com/speech-to-text/quotas#content).
71
71
  class StreamingRecognizeRequest
72
- include Google::Protobuf::MessageExts
73
- extend Google::Protobuf::MessageExts::ClassMethods
72
+ include ::Google::Protobuf::MessageExts
73
+ extend ::Google::Protobuf::MessageExts::ClassMethods
74
74
  end
75
75
 
76
76
  # Provides information to the recognizer that specifies how to process the
77
77
  # request.
78
78
  # @!attribute [rw] config
79
- # @return [Google::Cloud::Speech::V1::RecognitionConfig]
79
+ # @return [::Google::Cloud::Speech::V1::RecognitionConfig]
80
80
  # Required. Provides information to the recognizer that specifies how to
81
81
  # process the request.
82
82
  # @!attribute [rw] single_utterance
83
- # @return [Boolean]
83
+ # @return [::Boolean]
84
84
  # If `false` or omitted, the recognizer will perform continuous
85
85
  # recognition (continuing to wait for and process audio even if the user
86
86
  # pauses speaking) until the client closes the input stream (gRPC API) or
@@ -93,34 +93,34 @@ module Google
93
93
  # more than one `StreamingRecognitionResult` with the `is_final` flag set to
94
94
  # `true`.
95
95
  # @!attribute [rw] interim_results
96
- # @return [Boolean]
96
+ # @return [::Boolean]
97
97
  # If `true`, interim results (tentative hypotheses) may be
98
98
  # returned as they become available (these interim results are indicated with
99
99
  # the `is_final=false` flag).
100
100
  # If `false` or omitted, only `is_final=true` result(s) are returned.
101
101
  class StreamingRecognitionConfig
102
- include Google::Protobuf::MessageExts
103
- extend Google::Protobuf::MessageExts::ClassMethods
102
+ include ::Google::Protobuf::MessageExts
103
+ extend ::Google::Protobuf::MessageExts::ClassMethods
104
104
  end
105
105
 
106
106
  # Provides information to the recognizer that specifies how to process the
107
107
  # request.
108
108
  # @!attribute [rw] encoding
109
- # @return [Google::Cloud::Speech::V1::RecognitionConfig::AudioEncoding]
109
+ # @return [::Google::Cloud::Speech::V1::RecognitionConfig::AudioEncoding]
110
110
  # Encoding of audio data sent in all `RecognitionAudio` messages.
111
111
  # This field is optional for `FLAC` and `WAV` audio files and required
112
- # for all other audio formats. For details, see {Google::Cloud::Speech::V1::RecognitionConfig::AudioEncoding AudioEncoding}.
112
+ # for all other audio formats. For details, see {::Google::Cloud::Speech::V1::RecognitionConfig::AudioEncoding AudioEncoding}.
113
113
  # @!attribute [rw] sample_rate_hertz
114
- # @return [Integer]
114
+ # @return [::Integer]
115
115
  # Sample rate in Hertz of the audio data sent in all
116
116
  # `RecognitionAudio` messages. Valid values are: 8000-48000.
117
117
  # 16000 is optimal. For best results, set the sampling rate of the audio
118
118
  # source to 16000 Hz. If that's not possible, use the native sample rate of
119
119
  # the audio source (instead of re-sampling).
120
120
  # This field is optional for FLAC and WAV audio files, but is
121
- # required for all other audio formats. For details, see {Google::Cloud::Speech::V1::RecognitionConfig::AudioEncoding AudioEncoding}.
121
+ # required for all other audio formats. For details, see {::Google::Cloud::Speech::V1::RecognitionConfig::AudioEncoding AudioEncoding}.
122
122
  # @!attribute [rw] audio_channel_count
123
- # @return [Integer]
123
+ # @return [::Integer]
124
124
  # The number of channels in the input audio data.
125
125
  # ONLY set this for MULTI-CHANNEL recognition.
126
126
  # Valid values for LINEAR16 and FLAC are `1`-`8`.
@@ -131,7 +131,7 @@ module Google
131
131
  # To perform independent recognition on each channel set
132
132
  # `enable_separate_recognition_per_channel` to 'true'.
133
133
  # @!attribute [rw] enable_separate_recognition_per_channel
134
- # @return [Boolean]
134
+ # @return [::Boolean]
135
135
  # This needs to be set to `true` explicitly and `audio_channel_count` > 1
136
136
  # to get each channel recognized separately. The recognition result will
137
137
  # contain a `channel_tag` field to state which channel that result belongs
@@ -139,7 +139,7 @@ module Google
139
139
  # request is billed cumulatively for all channels recognized:
140
140
  # `audio_channel_count` multiplied by the length of the audio.
141
141
  # @!attribute [rw] language_code
142
- # @return [String]
142
+ # @return [::String]
143
143
  # Required. The language of the supplied audio as a
144
144
  # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
145
145
  # Example: "en-US".
@@ -147,7 +147,7 @@ module Google
147
147
  # Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
148
148
  # of the currently supported language codes.
149
149
  # @!attribute [rw] max_alternatives
150
- # @return [Integer]
150
+ # @return [::Integer]
151
151
  # Maximum number of recognition hypotheses to be returned.
152
152
  # Specifically, the maximum number of `SpeechRecognitionAlternative` messages
153
153
  # within each `SpeechRecognitionResult`.
@@ -155,26 +155,26 @@ module Google
155
155
  # Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
156
156
  # one. If omitted, will return a maximum of one.
157
157
  # @!attribute [rw] profanity_filter
158
- # @return [Boolean]
158
+ # @return [::Boolean]
159
159
  # If set to `true`, the server will attempt to filter out
160
160
  # profanities, replacing all but the initial character in each filtered word
161
161
  # with asterisks, e.g. "f***". If set to `false` or omitted, profanities
162
162
  # won't be filtered out.
163
163
  # @!attribute [rw] speech_contexts
164
- # @return [Array<Google::Cloud::Speech::V1::SpeechContext>]
165
- # Array of {Google::Cloud::Speech::V1::SpeechContext SpeechContext}.
164
+ # @return [::Array<::Google::Cloud::Speech::V1::SpeechContext>]
165
+ # Array of {::Google::Cloud::Speech::V1::SpeechContext SpeechContext}.
166
166
  # A means to provide context to assist the speech recognition. For more
167
167
  # information, see
168
168
  # [speech
169
169
  # adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
170
170
  # @!attribute [rw] enable_word_time_offsets
171
- # @return [Boolean]
171
+ # @return [::Boolean]
172
172
  # If `true`, the top result includes a list of words and
173
173
  # the start and end time offsets (timestamps) for those words. If
174
174
  # `false`, no word-level time offset information is returned. The default is
175
175
  # `false`.
176
176
  # @!attribute [rw] enable_automatic_punctuation
177
- # @return [Boolean]
177
+ # @return [::Boolean]
178
178
  # If 'true', adds punctuation to recognition result hypotheses.
179
179
  # This feature is only available in select languages. Setting this for
180
180
  # requests in other languages has no effect at all.
@@ -183,7 +183,7 @@ module Google
183
183
  # to all users. In the future this may be exclusively available as a
184
184
  # premium feature.
185
185
  # @!attribute [rw] diarization_config
186
- # @return [Google::Cloud::Speech::V1::SpeakerDiarizationConfig]
186
+ # @return [::Google::Cloud::Speech::V1::SpeakerDiarizationConfig]
187
187
  # Config to enable speaker diarization and set additional
188
188
  # parameters to make diarization better suited for your application.
189
189
  # Note: When this is enabled, we send all the words from the beginning of the
@@ -193,10 +193,10 @@ module Google
193
193
  # For non-streaming requests, the diarization results will be provided only
194
194
  # in the top alternative of the FINAL SpeechRecognitionResult.
195
195
  # @!attribute [rw] metadata
196
- # @return [Google::Cloud::Speech::V1::RecognitionMetadata]
196
+ # @return [::Google::Cloud::Speech::V1::RecognitionMetadata]
197
197
  # Metadata regarding this request.
198
198
  # @!attribute [rw] model
199
- # @return [String]
199
+ # @return [::String]
200
200
  # Which model to select for the given request. Select the model
201
201
  # best suited to your domain to get best results. If a model is not
202
202
  # explicitly specified, then we auto-select a model based on the parameters
@@ -230,7 +230,7 @@ module Google
230
230
  # </tr>
231
231
  # </table>
232
232
  # @!attribute [rw] use_enhanced
233
- # @return [Boolean]
233
+ # @return [::Boolean]
234
234
  # Set to true to use an enhanced model for speech recognition.
235
235
  # If `use_enhanced` is set to true and the `model` field is not set, then
236
236
  # an appropriate enhanced model is chosen if an enhanced model exists for
@@ -240,8 +240,8 @@ module Google
240
240
  # does not exist, then the speech is recognized using the standard version
241
241
  # of the specified model.
242
242
  class RecognitionConfig
243
- include Google::Protobuf::MessageExts
244
- extend Google::Protobuf::MessageExts::ClassMethods
243
+ include ::Google::Protobuf::MessageExts
244
+ extend ::Google::Protobuf::MessageExts::ClassMethods
245
245
 
246
246
  # The encoding of the audio data sent in the request.
247
247
  #
@@ -313,65 +313,65 @@ module Google
313
313
 
314
314
  # Config to enable speaker diarization.
315
315
  # @!attribute [rw] enable_speaker_diarization
316
- # @return [Boolean]
316
+ # @return [::Boolean]
317
317
  # If 'true', enables speaker detection for each recognized word in
318
318
  # the top alternative of the recognition result using a speaker_tag provided
319
319
  # in the WordInfo.
320
320
  # @!attribute [rw] min_speaker_count
321
- # @return [Integer]
321
+ # @return [::Integer]
322
322
  # Minimum number of speakers in the conversation. This range gives you more
323
323
  # flexibility by allowing the system to automatically determine the correct
324
324
  # number of speakers. If not set, the default value is 2.
325
325
  # @!attribute [rw] max_speaker_count
326
- # @return [Integer]
326
+ # @return [::Integer]
327
327
  # Maximum number of speakers in the conversation. This range gives you more
328
328
  # flexibility by allowing the system to automatically determine the correct
329
329
  # number of speakers. If not set, the default value is 6.
330
330
  # @!attribute [r] speaker_tag
331
- # @return [Integer]
331
+ # @return [::Integer]
332
332
  # Unused.
333
333
  class SpeakerDiarizationConfig
334
- include Google::Protobuf::MessageExts
335
- extend Google::Protobuf::MessageExts::ClassMethods
334
+ include ::Google::Protobuf::MessageExts
335
+ extend ::Google::Protobuf::MessageExts::ClassMethods
336
336
  end
337
337
 
338
338
  # Description of audio data to be recognized.
339
339
  # @!attribute [rw] interaction_type
340
- # @return [Google::Cloud::Speech::V1::RecognitionMetadata::InteractionType]
340
+ # @return [::Google::Cloud::Speech::V1::RecognitionMetadata::InteractionType]
341
341
  # The use case most closely describing the audio content to be recognized.
342
342
  # @!attribute [rw] industry_naics_code_of_audio
343
- # @return [Integer]
343
+ # @return [::Integer]
344
344
  # The industry vertical to which this speech recognition request most
345
345
  # closely applies. This is most indicative of the topics contained
346
346
  # in the audio. Use the 6-digit NAICS code to identify the industry
347
347
  # vertical - see https://www.naics.com/search/.
348
348
  # @!attribute [rw] microphone_distance
349
- # @return [Google::Cloud::Speech::V1::RecognitionMetadata::MicrophoneDistance]
349
+ # @return [::Google::Cloud::Speech::V1::RecognitionMetadata::MicrophoneDistance]
350
350
  # The audio type that most closely describes the audio being recognized.
351
351
  # @!attribute [rw] original_media_type
352
- # @return [Google::Cloud::Speech::V1::RecognitionMetadata::OriginalMediaType]
352
+ # @return [::Google::Cloud::Speech::V1::RecognitionMetadata::OriginalMediaType]
353
353
  # The original media the speech was recorded on.
354
354
  # @!attribute [rw] recording_device_type
355
- # @return [Google::Cloud::Speech::V1::RecognitionMetadata::RecordingDeviceType]
355
+ # @return [::Google::Cloud::Speech::V1::RecognitionMetadata::RecordingDeviceType]
356
356
  # The type of device the speech was recorded with.
357
357
  # @!attribute [rw] recording_device_name
358
- # @return [String]
358
+ # @return [::String]
359
359
  # The device used to make the recording. Examples 'Nexus 5X' or
360
360
  # 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or
361
361
  # 'Cardioid Microphone'.
362
362
  # @!attribute [rw] original_mime_type
363
- # @return [String]
363
+ # @return [::String]
364
364
  # Mime type of the original audio file. For example `audio/m4a`,
365
365
  # `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`.
366
366
  # A list of possible audio mime types is maintained at
367
367
  # http://www.iana.org/assignments/media-types/media-types.xhtml#audio
368
368
  # @!attribute [rw] audio_topic
369
- # @return [String]
369
+ # @return [::String]
370
370
  # Description of the content. Eg. "Recordings of federal supreme court
371
371
  # hearings from 2012".
372
372
  class RecognitionMetadata
373
- include Google::Protobuf::MessageExts
374
- extend Google::Protobuf::MessageExts::ClassMethods
373
+ include ::Google::Protobuf::MessageExts
374
+ extend ::Google::Protobuf::MessageExts::ClassMethods
375
375
 
376
376
  # Use case categories that the audio recognition request can be described
377
377
  # by.
@@ -468,7 +468,7 @@ module Google
468
468
  # Provides "hints" to the speech recognizer to favor specific words and phrases
469
469
  # in the results.
470
470
  # @!attribute [rw] phrases
471
- # @return [Array<String>]
471
+ # @return [::Array<::String>]
472
472
  # A list of strings containing words and phrases "hints" so that
473
473
  # the speech recognition is more likely to recognize them. This can be used
474
474
  # to improve the accuracy for specific words and phrases, for example, if
@@ -482,8 +482,8 @@ module Google
482
482
  # improves the likelihood of correctly transcribing audio that includes
483
483
  # months.
484
484
  class SpeechContext
485
- include Google::Protobuf::MessageExts
486
- extend Google::Protobuf::MessageExts::ClassMethods
485
+ include ::Google::Protobuf::MessageExts
486
+ extend ::Google::Protobuf::MessageExts::ClassMethods
487
487
  end
488
488
 
489
489
  # Contains audio data in the encoding specified in the `RecognitionConfig`.
@@ -491,12 +491,12 @@ module Google
491
491
  # returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. See
492
492
  # [content limits](https://cloud.google.com/speech-to-text/quotas#content).
493
493
  # @!attribute [rw] content
494
- # @return [String]
494
+ # @return [::String]
495
495
  # The audio data bytes encoded as specified in
496
496
  # `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
497
497
  # pure binary representation, whereas JSON representations use base64.
498
498
  # @!attribute [rw] uri
499
- # @return [String]
499
+ # @return [::String]
500
500
  # URI that points to a file that contains audio data bytes as specified in
501
501
  # `RecognitionConfig`. The file must not be compressed (for example, gzip).
502
502
  # Currently, only Google Cloud Storage URIs are
@@ -505,20 +505,20 @@ module Google
505
505
  # [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
506
506
  # [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
507
507
  class RecognitionAudio
508
- include Google::Protobuf::MessageExts
509
- extend Google::Protobuf::MessageExts::ClassMethods
508
+ include ::Google::Protobuf::MessageExts
509
+ extend ::Google::Protobuf::MessageExts::ClassMethods
510
510
  end
511
511
 
512
512
  # The only message returned to the client by the `Recognize` method. It
513
513
  # contains the result as zero or more sequential `SpeechRecognitionResult`
514
514
  # messages.
515
515
  # @!attribute [rw] results
516
- # @return [Array<Google::Cloud::Speech::V1::SpeechRecognitionResult>]
516
+ # @return [::Array<::Google::Cloud::Speech::V1::SpeechRecognitionResult>]
517
517
  # Sequential list of transcription results corresponding to
518
518
  # sequential portions of audio.
519
519
  class RecognizeResponse
520
- include Google::Protobuf::MessageExts
521
- extend Google::Protobuf::MessageExts::ClassMethods
520
+ include ::Google::Protobuf::MessageExts
521
+ extend ::Google::Protobuf::MessageExts::ClassMethods
522
522
  end
523
523
 
524
524
  # The only message returned to the client by the `LongRunningRecognize` method.
@@ -527,30 +527,30 @@ module Google
527
527
  # returned by the `GetOperation` call of the `google::longrunning::Operations`
528
528
  # service.
529
529
  # @!attribute [rw] results
530
- # @return [Array<Google::Cloud::Speech::V1::SpeechRecognitionResult>]
530
+ # @return [::Array<::Google::Cloud::Speech::V1::SpeechRecognitionResult>]
531
531
  # Sequential list of transcription results corresponding to
532
532
  # sequential portions of audio.
533
533
  class LongRunningRecognizeResponse
534
- include Google::Protobuf::MessageExts
535
- extend Google::Protobuf::MessageExts::ClassMethods
534
+ include ::Google::Protobuf::MessageExts
535
+ extend ::Google::Protobuf::MessageExts::ClassMethods
536
536
  end
537
537
 
538
538
  # Describes the progress of a long-running `LongRunningRecognize` call. It is
539
539
  # included in the `metadata` field of the `Operation` returned by the
540
540
  # `GetOperation` call of the `google::longrunning::Operations` service.
541
541
  # @!attribute [rw] progress_percent
542
- # @return [Integer]
542
+ # @return [::Integer]
543
543
  # Approximate percentage of audio processed thus far. Guaranteed to be 100
544
544
  # when the audio is fully processed and the results are available.
545
545
  # @!attribute [rw] start_time
546
- # @return [Google::Protobuf::Timestamp]
546
+ # @return [::Google::Protobuf::Timestamp]
547
547
  # Time when the request was received.
548
548
  # @!attribute [rw] last_update_time
549
- # @return [Google::Protobuf::Timestamp]
549
+ # @return [::Google::Protobuf::Timestamp]
550
550
  # Time of the most recent processing update.
551
551
  class LongRunningRecognizeMetadata
552
- include Google::Protobuf::MessageExts
553
- extend Google::Protobuf::MessageExts::ClassMethods
552
+ include ::Google::Protobuf::MessageExts
553
+ extend ::Google::Protobuf::MessageExts::ClassMethods
554
554
  end
555
555
 
556
556
  # `StreamingRecognizeResponse` is the only message returned to the client by
@@ -603,21 +603,21 @@ module Google
603
603
  # `speech_event_type`, or
604
604
  # one or more (repeated) `results`.
605
605
  # @!attribute [rw] error
606
- # @return [Google::Rpc::Status]
607
- # If set, returns a {Google::Rpc::Status google.rpc.Status} message that
606
+ # @return [::Google::Rpc::Status]
607
+ # If set, returns a {::Google::Rpc::Status google.rpc.Status} message that
608
608
  # specifies the error for the operation.
609
609
  # @!attribute [rw] results
610
- # @return [Array<Google::Cloud::Speech::V1::StreamingRecognitionResult>]
610
+ # @return [::Array<::Google::Cloud::Speech::V1::StreamingRecognitionResult>]
611
611
  # This repeated list contains zero or more results that
612
612
  # correspond to consecutive portions of the audio currently being processed.
613
613
  # It contains zero or one `is_final=true` result (the newly settled portion),
614
614
  # followed by zero or more `is_final=false` results (the interim results).
615
615
  # @!attribute [rw] speech_event_type
616
- # @return [Google::Cloud::Speech::V1::StreamingRecognizeResponse::SpeechEventType]
616
+ # @return [::Google::Cloud::Speech::V1::StreamingRecognizeResponse::SpeechEventType]
617
617
  # Indicates the type of speech event.
618
618
  class StreamingRecognizeResponse
619
- include Google::Protobuf::MessageExts
620
- extend Google::Protobuf::MessageExts::ClassMethods
619
+ include ::Google::Protobuf::MessageExts
620
+ extend ::Google::Protobuf::MessageExts::ClassMethods
621
621
 
622
622
  # Indicates the type of speech event.
623
623
  module SpeechEventType
@@ -638,67 +638,67 @@ module Google
638
638
  # A streaming speech recognition result corresponding to a portion of the audio
639
639
  # that is currently being processed.
640
640
  # @!attribute [rw] alternatives
641
- # @return [Array<Google::Cloud::Speech::V1::SpeechRecognitionAlternative>]
641
+ # @return [::Array<::Google::Cloud::Speech::V1::SpeechRecognitionAlternative>]
642
642
  # May contain one or more recognition hypotheses (up to the
643
643
  # maximum specified in `max_alternatives`).
644
644
  # These alternatives are ordered in terms of accuracy, with the top (first)
645
645
  # alternative being the most probable, as ranked by the recognizer.
646
646
  # @!attribute [rw] is_final
647
- # @return [Boolean]
647
+ # @return [::Boolean]
648
648
  # If `false`, this `StreamingRecognitionResult` represents an
649
649
  # interim result that may change. If `true`, this is the final time the
650
650
  # speech service will return this particular `StreamingRecognitionResult`,
651
651
  # the recognizer will not return any further hypotheses for this portion of
652
652
  # the transcript and corresponding audio.
653
653
  # @!attribute [rw] stability
654
- # @return [Float]
654
+ # @return [::Float]
655
655
  # An estimate of the likelihood that the recognizer will not
656
656
  # change its guess about this interim result. Values range from 0.0
657
657
  # (completely unstable) to 1.0 (completely stable).
658
658
  # This field is only provided for interim results (`is_final=false`).
659
659
  # The default of 0.0 is a sentinel value indicating `stability` was not set.
660
660
  # @!attribute [rw] result_end_time
661
- # @return [Google::Protobuf::Duration]
661
+ # @return [::Google::Protobuf::Duration]
662
662
  # Time offset of the end of this result relative to the
663
663
  # beginning of the audio.
664
664
  # @!attribute [rw] channel_tag
665
- # @return [Integer]
665
+ # @return [::Integer]
666
666
  # For multi-channel audio, this is the channel number corresponding to the
667
667
  # recognized result for the audio from that channel.
668
668
  # For audio_channel_count = N, its output values can range from '1' to 'N'.
669
669
  # @!attribute [r] language_code
670
- # @return [String]
670
+ # @return [::String]
671
671
  # The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of
672
672
  # the language in this result. This language code was detected to have the
673
673
  # most likelihood of being spoken in the audio.
674
674
  class StreamingRecognitionResult
675
- include Google::Protobuf::MessageExts
676
- extend Google::Protobuf::MessageExts::ClassMethods
675
+ include ::Google::Protobuf::MessageExts
676
+ extend ::Google::Protobuf::MessageExts::ClassMethods
677
677
  end
678
678
 
679
679
  # A speech recognition result corresponding to a portion of the audio.
680
680
  # @!attribute [rw] alternatives
681
- # @return [Array<Google::Cloud::Speech::V1::SpeechRecognitionAlternative>]
681
+ # @return [::Array<::Google::Cloud::Speech::V1::SpeechRecognitionAlternative>]
682
682
  # May contain one or more recognition hypotheses (up to the
683
683
  # maximum specified in `max_alternatives`).
684
684
  # These alternatives are ordered in terms of accuracy, with the top (first)
685
685
  # alternative being the most probable, as ranked by the recognizer.
686
686
  # @!attribute [rw] channel_tag
687
- # @return [Integer]
687
+ # @return [::Integer]
688
688
  # For multi-channel audio, this is the channel number corresponding to the
689
689
  # recognized result for the audio from that channel.
690
690
  # For audio_channel_count = N, its output values can range from '1' to 'N'.
691
691
  class SpeechRecognitionResult
692
- include Google::Protobuf::MessageExts
693
- extend Google::Protobuf::MessageExts::ClassMethods
692
+ include ::Google::Protobuf::MessageExts
693
+ extend ::Google::Protobuf::MessageExts::ClassMethods
694
694
  end
695
695
 
696
696
  # Alternative hypotheses (a.k.a. n-best list).
697
697
  # @!attribute [rw] transcript
698
- # @return [String]
698
+ # @return [::String]
699
699
  # Transcript text representing the words that the user spoke.
700
700
  # @!attribute [rw] confidence
701
- # @return [Float]
701
+ # @return [::Float]
702
702
  # The confidence estimate between 0.0 and 1.0. A higher number
703
703
  # indicates an estimated greater likelihood that the recognized words are
704
704
  # correct. This field is set only for the top alternative of a non-streaming
@@ -707,18 +707,18 @@ module Google
707
707
  # to be always provided.
708
708
  # The default of 0.0 is a sentinel value indicating `confidence` was not set.
709
709
  # @!attribute [rw] words
710
- # @return [Array<Google::Cloud::Speech::V1::WordInfo>]
710
+ # @return [::Array<::Google::Cloud::Speech::V1::WordInfo>]
711
711
  # A list of word-specific information for each recognized word.
712
712
  # Note: When `enable_speaker_diarization` is true, you will see all the words
713
713
  # from the beginning of the audio.
714
714
  class SpeechRecognitionAlternative
715
- include Google::Protobuf::MessageExts
716
- extend Google::Protobuf::MessageExts::ClassMethods
715
+ include ::Google::Protobuf::MessageExts
716
+ extend ::Google::Protobuf::MessageExts::ClassMethods
717
717
  end
718
718
 
719
719
  # Word-specific information for recognized words.
720
720
  # @!attribute [rw] start_time
721
- # @return [Google::Protobuf::Duration]
721
+ # @return [::Google::Protobuf::Duration]
722
722
  # Time offset relative to the beginning of the audio,
723
723
  # and corresponding to the start of the spoken word.
724
724
  # This field is only set if `enable_word_time_offsets=true` and only
@@ -726,7 +726,7 @@ module Google
726
726
  # This is an experimental feature and the accuracy of the time offset can
727
727
  # vary.
728
728
  # @!attribute [rw] end_time
729
- # @return [Google::Protobuf::Duration]
729
+ # @return [::Google::Protobuf::Duration]
730
730
  # Time offset relative to the beginning of the audio,
731
731
  # and corresponding to the end of the spoken word.
732
732
  # This field is only set if `enable_word_time_offsets=true` and only
@@ -734,18 +734,18 @@ module Google
734
734
  # This is an experimental feature and the accuracy of the time offset can
735
735
  # vary.
736
736
  # @!attribute [rw] word
737
- # @return [String]
737
+ # @return [::String]
738
738
  # The word corresponding to this set of information.
739
739
  # @!attribute [r] speaker_tag
740
- # @return [Integer]
740
+ # @return [::Integer]
741
741
  # A distinct integer value is assigned for every speaker within
742
742
  # the audio. This field specifies which one of those speakers was detected to
743
743
  # have spoken this word. Value ranges from '1' to diarization_speaker_count.
744
744
  # speaker_tag is set if enable_speaker_diarization = 'true' and only in the
745
745
  # top alternative.
746
746
  class WordInfo
747
- include Google::Protobuf::MessageExts
748
- extend Google::Protobuf::MessageExts::ClassMethods
747
+ include ::Google::Protobuf::MessageExts
748
+ extend ::Google::Protobuf::MessageExts::ClassMethods
749
749
  end
750
750
  end
751
751
  end