google-cloud-speech-v1 0.13.1 → 0.15.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 6ebfda8adf3a8581db66e188089b6577002805411cd40c6e21792b97c13a2bc8
4
- data.tar.gz: c4546d08b8e32355d37440d546221411392f75797599da4a5f47628c94b163f6
3
+ metadata.gz: 06fbd18f76c6bb1319e13c2acedfd38bce39d84ea4100ee0a6b81ca8334713e0
4
+ data.tar.gz: 5a192f601581e3b12408fe82706d7baa349b74b13f1ecd4c90eb44e04c35bddb
5
5
  SHA512:
6
- metadata.gz: 2b952c2630d77df6805fde6cb5761d42921db744f781f59c4407981737c7b2e96c362b1f337a1f124c432095676322265cb3932c229fb58b61016bd0a7e8f084
7
- data.tar.gz: 134c687b57210458b4eda9104a83691459a7b100dc7a9d964f3d47a30d292c5fe158a4c3c5ed42855103125a24cd598ac4b99b1ef6ef2a9f6fb1b8efc3308026
6
+ metadata.gz: de18f2eb452d6201357dc4ef5f55b060a793b5b60ee3da384def0ad6c8a442b2ac219f7bb7d8c0740bf37a6624ac902e170f66ffc429d67dcf35a425ae694a11
7
+ data.tar.gz: 3050660ec37ce8577dacbdea96a8a722b671128802c8ca92634bc11e1fb8e350d6ae74ba0a1ba1b90968623d67f9bf6783c699d832bcfaba6835a1471baa13b6
@@ -138,7 +138,8 @@ module Google
138
138
  credentials: credentials,
139
139
  endpoint: @config.endpoint,
140
140
  channel_args: @config.channel_args,
141
- interceptors: @config.interceptors
141
+ interceptors: @config.interceptors,
142
+ channel_pool_config: @config.channel_pool
142
143
  )
143
144
  end
144
145
 
@@ -1243,6 +1244,14 @@ module Google
1243
1244
  end
1244
1245
  end
1245
1246
 
1247
+ ##
1248
+ # Configuration for the channel pool
1249
+ # @return [::Gapic::ServiceStub::ChannelPool::Configuration]
1250
+ #
1251
+ def channel_pool
1252
+ @channel_pool ||= ::Gapic::ServiceStub::ChannelPool::Configuration.new
1253
+ end
1254
+
1246
1255
  ##
1247
1256
  # Configuration RPC class for the Adaptation API.
1248
1257
  #
@@ -181,6 +181,22 @@ module Google
181
181
  # @return [::Google::Cloud::Speech::V1::PhraseSet]
182
182
  #
183
183
  # @raise [::Google::Cloud::Error] if the REST call is aborted.
184
+ #
185
+ # @example Basic example
186
+ # require "google/cloud/speech/v1"
187
+ #
188
+ # # Create a client object. The client can be reused for multiple calls.
189
+ # client = Google::Cloud::Speech::V1::Adaptation::Rest::Client.new
190
+ #
191
+ # # Create a request. To set request fields, pass in keyword arguments.
192
+ # request = Google::Cloud::Speech::V1::CreatePhraseSetRequest.new
193
+ #
194
+ # # Call the create_phrase_set method.
195
+ # result = client.create_phrase_set request
196
+ #
197
+ # # The returned object is of type Google::Cloud::Speech::V1::PhraseSet.
198
+ # p result
199
+ #
184
200
  def create_phrase_set request, options = nil
185
201
  raise ::ArgumentError, "request must be provided" if request.nil?
186
202
 
@@ -251,6 +267,22 @@ module Google
251
267
  # @return [::Google::Cloud::Speech::V1::PhraseSet]
252
268
  #
253
269
  # @raise [::Google::Cloud::Error] if the REST call is aborted.
270
+ #
271
+ # @example Basic example
272
+ # require "google/cloud/speech/v1"
273
+ #
274
+ # # Create a client object. The client can be reused for multiple calls.
275
+ # client = Google::Cloud::Speech::V1::Adaptation::Rest::Client.new
276
+ #
277
+ # # Create a request. To set request fields, pass in keyword arguments.
278
+ # request = Google::Cloud::Speech::V1::GetPhraseSetRequest.new
279
+ #
280
+ # # Call the get_phrase_set method.
281
+ # result = client.get_phrase_set request
282
+ #
283
+ # # The returned object is of type Google::Cloud::Speech::V1::PhraseSet.
284
+ # p result
285
+ #
254
286
  def get_phrase_set request, options = nil
255
287
  raise ::ArgumentError, "request must be provided" if request.nil?
256
288
 
@@ -332,6 +364,26 @@ module Google
332
364
  # @return [::Gapic::Rest::PagedEnumerable<::Google::Cloud::Speech::V1::PhraseSet>]
333
365
  #
334
366
  # @raise [::Google::Cloud::Error] if the REST call is aborted.
367
+ #
368
+ # @example Basic example
369
+ # require "google/cloud/speech/v1"
370
+ #
371
+ # # Create a client object. The client can be reused for multiple calls.
372
+ # client = Google::Cloud::Speech::V1::Adaptation::Rest::Client.new
373
+ #
374
+ # # Create a request. To set request fields, pass in keyword arguments.
375
+ # request = Google::Cloud::Speech::V1::ListPhraseSetRequest.new
376
+ #
377
+ # # Call the list_phrase_set method.
378
+ # result = client.list_phrase_set request
379
+ #
380
+ # # The returned object is of type Gapic::PagedEnumerable. You can iterate
381
+ # # over elements, and API calls will be issued to fetch pages as needed.
382
+ # result.each do |item|
383
+ # # Each element is of type ::Google::Cloud::Speech::V1::PhraseSet.
384
+ # p item
385
+ # end
386
+ #
335
387
  def list_phrase_set request, options = nil
336
388
  raise ::ArgumentError, "request must be provided" if request.nil?
337
389
 
@@ -408,6 +460,22 @@ module Google
408
460
  # @return [::Google::Cloud::Speech::V1::PhraseSet]
409
461
  #
410
462
  # @raise [::Google::Cloud::Error] if the REST call is aborted.
463
+ #
464
+ # @example Basic example
465
+ # require "google/cloud/speech/v1"
466
+ #
467
+ # # Create a client object. The client can be reused for multiple calls.
468
+ # client = Google::Cloud::Speech::V1::Adaptation::Rest::Client.new
469
+ #
470
+ # # Create a request. To set request fields, pass in keyword arguments.
471
+ # request = Google::Cloud::Speech::V1::UpdatePhraseSetRequest.new
472
+ #
473
+ # # Call the update_phrase_set method.
474
+ # result = client.update_phrase_set request
475
+ #
476
+ # # The returned object is of type Google::Cloud::Speech::V1::PhraseSet.
477
+ # p result
478
+ #
411
479
  def update_phrase_set request, options = nil
412
480
  raise ::ArgumentError, "request must be provided" if request.nil?
413
481
 
@@ -472,6 +540,22 @@ module Google
472
540
  # @return [::Google::Protobuf::Empty]
473
541
  #
474
542
  # @raise [::Google::Cloud::Error] if the REST call is aborted.
543
+ #
544
+ # @example Basic example
545
+ # require "google/cloud/speech/v1"
546
+ #
547
+ # # Create a client object. The client can be reused for multiple calls.
548
+ # client = Google::Cloud::Speech::V1::Adaptation::Rest::Client.new
549
+ #
550
+ # # Create a request. To set request fields, pass in keyword arguments.
551
+ # request = Google::Cloud::Speech::V1::DeletePhraseSetRequest.new
552
+ #
553
+ # # Call the delete_phrase_set method.
554
+ # result = client.delete_phrase_set request
555
+ #
556
+ # # The returned object is of type Google::Protobuf::Empty.
557
+ # p result
558
+ #
475
559
  def delete_phrase_set request, options = nil
476
560
  raise ::ArgumentError, "request must be provided" if request.nil?
477
561
 
@@ -551,6 +635,22 @@ module Google
551
635
  # @return [::Google::Cloud::Speech::V1::CustomClass]
552
636
  #
553
637
  # @raise [::Google::Cloud::Error] if the REST call is aborted.
638
+ #
639
+ # @example Basic example
640
+ # require "google/cloud/speech/v1"
641
+ #
642
+ # # Create a client object. The client can be reused for multiple calls.
643
+ # client = Google::Cloud::Speech::V1::Adaptation::Rest::Client.new
644
+ #
645
+ # # Create a request. To set request fields, pass in keyword arguments.
646
+ # request = Google::Cloud::Speech::V1::CreateCustomClassRequest.new
647
+ #
648
+ # # Call the create_custom_class method.
649
+ # result = client.create_custom_class request
650
+ #
651
+ # # The returned object is of type Google::Cloud::Speech::V1::CustomClass.
652
+ # p result
653
+ #
554
654
  def create_custom_class request, options = nil
555
655
  raise ::ArgumentError, "request must be provided" if request.nil?
556
656
 
@@ -615,6 +715,22 @@ module Google
615
715
  # @return [::Google::Cloud::Speech::V1::CustomClass]
616
716
  #
617
717
  # @raise [::Google::Cloud::Error] if the REST call is aborted.
718
+ #
719
+ # @example Basic example
720
+ # require "google/cloud/speech/v1"
721
+ #
722
+ # # Create a client object. The client can be reused for multiple calls.
723
+ # client = Google::Cloud::Speech::V1::Adaptation::Rest::Client.new
724
+ #
725
+ # # Create a request. To set request fields, pass in keyword arguments.
726
+ # request = Google::Cloud::Speech::V1::GetCustomClassRequest.new
727
+ #
728
+ # # Call the get_custom_class method.
729
+ # result = client.get_custom_class request
730
+ #
731
+ # # The returned object is of type Google::Cloud::Speech::V1::CustomClass.
732
+ # p result
733
+ #
618
734
  def get_custom_class request, options = nil
619
735
  raise ::ArgumentError, "request must be provided" if request.nil?
620
736
 
@@ -696,6 +812,26 @@ module Google
696
812
  # @return [::Gapic::Rest::PagedEnumerable<::Google::Cloud::Speech::V1::CustomClass>]
697
813
  #
698
814
  # @raise [::Google::Cloud::Error] if the REST call is aborted.
815
+ #
816
+ # @example Basic example
817
+ # require "google/cloud/speech/v1"
818
+ #
819
+ # # Create a client object. The client can be reused for multiple calls.
820
+ # client = Google::Cloud::Speech::V1::Adaptation::Rest::Client.new
821
+ #
822
+ # # Create a request. To set request fields, pass in keyword arguments.
823
+ # request = Google::Cloud::Speech::V1::ListCustomClassesRequest.new
824
+ #
825
+ # # Call the list_custom_classes method.
826
+ # result = client.list_custom_classes request
827
+ #
828
+ # # The returned object is of type Gapic::PagedEnumerable. You can iterate
829
+ # # over elements, and API calls will be issued to fetch pages as needed.
830
+ # result.each do |item|
831
+ # # Each element is of type ::Google::Cloud::Speech::V1::CustomClass.
832
+ # p item
833
+ # end
834
+ #
699
835
  def list_custom_classes request, options = nil
700
836
  raise ::ArgumentError, "request must be provided" if request.nil?
701
837
 
@@ -772,6 +908,22 @@ module Google
772
908
  # @return [::Google::Cloud::Speech::V1::CustomClass]
773
909
  #
774
910
  # @raise [::Google::Cloud::Error] if the REST call is aborted.
911
+ #
912
+ # @example Basic example
913
+ # require "google/cloud/speech/v1"
914
+ #
915
+ # # Create a client object. The client can be reused for multiple calls.
916
+ # client = Google::Cloud::Speech::V1::Adaptation::Rest::Client.new
917
+ #
918
+ # # Create a request. To set request fields, pass in keyword arguments.
919
+ # request = Google::Cloud::Speech::V1::UpdateCustomClassRequest.new
920
+ #
921
+ # # Call the update_custom_class method.
922
+ # result = client.update_custom_class request
923
+ #
924
+ # # The returned object is of type Google::Cloud::Speech::V1::CustomClass.
925
+ # p result
926
+ #
775
927
  def update_custom_class request, options = nil
776
928
  raise ::ArgumentError, "request must be provided" if request.nil?
777
929
 
@@ -842,6 +994,22 @@ module Google
842
994
  # @return [::Google::Protobuf::Empty]
843
995
  #
844
996
  # @raise [::Google::Cloud::Error] if the REST call is aborted.
997
+ #
998
+ # @example Basic example
999
+ # require "google/cloud/speech/v1"
1000
+ #
1001
+ # # Create a client object. The client can be reused for multiple calls.
1002
+ # client = Google::Cloud::Speech::V1::Adaptation::Rest::Client.new
1003
+ #
1004
+ # # Create a request. To set request fields, pass in keyword arguments.
1005
+ # request = Google::Cloud::Speech::V1::DeleteCustomClassRequest.new
1006
+ #
1007
+ # # Call the delete_custom_class method.
1008
+ # result = client.delete_custom_class request
1009
+ #
1010
+ # # The returned object is of type Google::Protobuf::Empty.
1011
+ # p result
1012
+ #
845
1013
  def delete_custom_class request, options = nil
846
1014
  raise ::ArgumentError, "request must be provided" if request.nil?
847
1015
 
@@ -15,7 +15,7 @@ require 'google/protobuf/wrappers_pb'
15
15
  require 'google/rpc/status_pb'
16
16
 
17
17
 
18
- descriptor_data = "\n)google/cloud/speech/v1/cloud_speech.proto\x12\x16google.cloud.speech.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a%google/cloud/speech/v1/resource.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto\"\x90\x01\n\x10RecognizeRequest\x12>\n\x06\x63onfig\x18\x01 \x01(\x0b\x32).google.cloud.speech.v1.RecognitionConfigB\x03\xe0\x41\x02\x12<\n\x05\x61udio\x18\x02 \x01(\x0b\x32(.google.cloud.speech.v1.RecognitionAudioB\x03\xe0\x41\x02\"\xe7\x01\n\x1bLongRunningRecognizeRequest\x12>\n\x06\x63onfig\x18\x01 \x01(\x0b\x32).google.cloud.speech.v1.RecognitionConfigB\x03\xe0\x41\x02\x12<\n\x05\x61udio\x18\x02 \x01(\x0b\x32(.google.cloud.speech.v1.RecognitionAudioB\x03\xe0\x41\x02\x12J\n\routput_config\x18\x04 \x01(\x0b\x32..google.cloud.speech.v1.TranscriptOutputConfigB\x03\xe0\x41\x01\":\n\x16TranscriptOutputConfig\x12\x11\n\x07gcs_uri\x18\x01 \x01(\tH\x00\x42\r\n\x0boutput_type\"\x99\x01\n\x19StreamingRecognizeRequest\x12N\n\x10streaming_config\x18\x01 \x01(\x0b\x32\x32.google.cloud.speech.v1.StreamingRecognitionConfigH\x00\x12\x17\n\raudio_content\x18\x02 \x01(\x0cH\x00\x42\x13\n\x11streaming_request\"\xa7\x03\n\x1aStreamingRecognitionConfig\x12>\n\x06\x63onfig\x18\x01 \x01(\x0b\x32).google.cloud.speech.v1.RecognitionConfigB\x03\xe0\x41\x02\x12\x18\n\x10single_utterance\x18\x02 \x01(\x08\x12\x17\n\x0finterim_results\x18\x03 \x01(\x08\x12$\n\x1c\x65nable_voice_activity_events\x18\x05 \x01(\x08\x12g\n\x16voice_activity_timeout\x18\x06 \x01(\x0b\x32G.google.cloud.speech.v1.StreamingRecognitionConfig.VoiceActivityTimeout\x1a\x86\x01\n\x14VoiceActivityTimeout\x12\x37\n\x14speech_start_timeout\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x35\n\x12speech_end_timeout\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\"\xe9\x07\n\x11RecognitionConfig\x12I\n\x08\x65ncoding\x18\x01 \x01(\x0e\x32\x37.google.cloud.speech.v1.RecognitionConfig.AudioEncoding\x12\x19\n\x11sample_rate_hertz\x18\x02 \x01(\x05\x12\x1b\n\x13\x61udio_channel_count\x18\x07 \x01(\x05\x12/\n\'enable_separate_recognition_per_channel\x18\x0c \x01(\x08\x12\x1a\n\rlanguage_code\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\"\n\x1a\x61lternative_language_codes\x18\x12 \x03(\t\x12\x18\n\x10max_alternatives\x18\x04 \x01(\x05\x12\x18\n\x10profanity_filter\x18\x05 \x01(\x08\x12<\n\nadaptation\x18\x14 \x01(\x0b\x32(.google.cloud.speech.v1.SpeechAdaptation\x12>\n\x0fspeech_contexts\x18\x06 \x03(\x0b\x32%.google.cloud.speech.v1.SpeechContext\x12 \n\x18\x65nable_word_time_offsets\x18\x08 \x01(\x08\x12\x1e\n\x16\x65nable_word_confidence\x18\x0f \x01(\x08\x12$\n\x1c\x65nable_automatic_punctuation\x18\x0b \x01(\x08\x12=\n\x19\x65nable_spoken_punctuation\x18\x16 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x38\n\x14\x65nable_spoken_emojis\x18\x17 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12L\n\x12\x64iarization_config\x18\x13 \x01(\x0b\x32\x30.google.cloud.speech.v1.SpeakerDiarizationConfig\x12=\n\x08metadata\x18\t \x01(\x0b\x32+.google.cloud.speech.v1.RecognitionMetadata\x12\r\n\x05model\x18\r \x01(\t\x12\x14\n\x0cuse_enhanced\x18\x0e \x01(\x08\"\x9a\x01\n\rAudioEncoding\x12\x18\n\x14\x45NCODING_UNSPECIFIED\x10\x00\x12\x0c\n\x08LINEAR16\x10\x01\x12\x08\n\x04\x46LAC\x10\x02\x12\t\n\x05MULAW\x10\x03\x12\x07\n\x03\x41MR\x10\x04\x12\n\n\x06\x41MR_WB\x10\x05\x12\x0c\n\x08OGG_OPUS\x10\x06\x12\x1a\n\x16SPEEX_WITH_HEADER_BYTE\x10\x07\x12\r\n\tWEBM_OPUS\x10\t\"\x90\x01\n\x18SpeakerDiarizationConfig\x12\"\n\x1a\x65nable_speaker_diarization\x18\x01 \x01(\x08\x12\x19\n\x11min_speaker_count\x18\x02 \x01(\x05\x12\x19\n\x11max_speaker_count\x18\x03 \x01(\x05\x12\x1a\n\x0bspeaker_tag\x18\x05 \x01(\x05\x42\x05\x18\x01\xe0\x41\x03\"\xa4\x08\n\x13RecognitionMetadata\x12U\n\x10interaction_type\x18\x01 \x01(\x0e\x32;.google.cloud.speech.v1.RecognitionMetadata.InteractionType\x12$\n\x1cindustry_naics_code_of_audio\x18\x03 \x01(\r\x12[\n\x13microphone_distance\x18\x04 \x01(\x0e\x32>.google.cloud.speech.v1.RecognitionMetadata.MicrophoneDistance\x12Z\n\x13original_media_type\x18\x05 \x01(\x0e\x32=.google.cloud.speech.v1.RecognitionMetadata.OriginalMediaType\x12^\n\x15recording_device_type\x18\x06 \x01(\x0e\x32?.google.cloud.speech.v1.RecognitionMetadata.RecordingDeviceType\x12\x1d\n\x15recording_device_name\x18\x07 \x01(\t\x12\x1a\n\x12original_mime_type\x18\x08 \x01(\t\x12\x13\n\x0b\x61udio_topic\x18\n \x01(\t\"\xc5\x01\n\x0fInteractionType\x12 \n\x1cINTERACTION_TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nDISCUSSION\x10\x01\x12\x10\n\x0cPRESENTATION\x10\x02\x12\x0e\n\nPHONE_CALL\x10\x03\x12\r\n\tVOICEMAIL\x10\x04\x12\x1b\n\x17PROFESSIONALLY_PRODUCED\x10\x05\x12\x10\n\x0cVOICE_SEARCH\x10\x06\x12\x11\n\rVOICE_COMMAND\x10\x07\x12\r\n\tDICTATION\x10\x08\"d\n\x12MicrophoneDistance\x12#\n\x1fMICROPHONE_DISTANCE_UNSPECIFIED\x10\x00\x12\r\n\tNEARFIELD\x10\x01\x12\x0c\n\x08MIDFIELD\x10\x02\x12\x0c\n\x08\x46\x41RFIELD\x10\x03\"N\n\x11OriginalMediaType\x12#\n\x1fORIGINAL_MEDIA_TYPE_UNSPECIFIED\x10\x00\x12\t\n\x05\x41UDIO\x10\x01\x12\t\n\x05VIDEO\x10\x02\"\xa4\x01\n\x13RecordingDeviceType\x12%\n!RECORDING_DEVICE_TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nSMARTPHONE\x10\x01\x12\x06\n\x02PC\x10\x02\x12\x0e\n\nPHONE_LINE\x10\x03\x12\x0b\n\x07VEHICLE\x10\x04\x12\x18\n\x14OTHER_OUTDOOR_DEVICE\x10\x05\x12\x17\n\x13OTHER_INDOOR_DEVICE\x10\x06:\x02\x18\x01\"/\n\rSpeechContext\x12\x0f\n\x07phrases\x18\x01 \x03(\t\x12\r\n\x05\x62oost\x18\x04 \x01(\x02\"D\n\x10RecognitionAudio\x12\x11\n\x07\x63ontent\x18\x01 \x01(\x0cH\x00\x12\r\n\x03uri\x18\x02 \x01(\tH\x00\x42\x0e\n\x0c\x61udio_source\"\xed\x01\n\x11RecognizeResponse\x12@\n\x07results\x18\x02 \x03(\x0b\x32/.google.cloud.speech.v1.SpeechRecognitionResult\x12\x34\n\x11total_billed_time\x18\x03 \x01(\x0b\x32\x19.google.protobuf.Duration\x12L\n\x16speech_adaptation_info\x18\x07 \x01(\x0b\x32,.google.cloud.speech.v1.SpeechAdaptationInfo\x12\x12\n\nrequest_id\x18\x08 \x01(\x03\"\xe9\x02\n\x1cLongRunningRecognizeResponse\x12@\n\x07results\x18\x02 \x03(\x0b\x32/.google.cloud.speech.v1.SpeechRecognitionResult\x12\x34\n\x11total_billed_time\x18\x03 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x45\n\routput_config\x18\x06 \x01(\x0b\x32..google.cloud.speech.v1.TranscriptOutputConfig\x12(\n\x0coutput_error\x18\x07 \x01(\x0b\x32\x12.google.rpc.Status\x12L\n\x16speech_adaptation_info\x18\x08 \x01(\x0b\x32,.google.cloud.speech.v1.SpeechAdaptationInfo\x12\x12\n\nrequest_id\x18\t \x01(\x03\"\xb0\x01\n\x1cLongRunningRecognizeMetadata\x12\x18\n\x10progress_percent\x18\x01 \x01(\x05\x12.\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x10last_update_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x10\n\x03uri\x18\x04 \x01(\tB\x03\xe0\x41\x03\"\xd1\x04\n\x1aStreamingRecognizeResponse\x12!\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12\x43\n\x07results\x18\x02 \x03(\x0b\x32\x32.google.cloud.speech.v1.StreamingRecognitionResult\x12]\n\x11speech_event_type\x18\x04 \x01(\x0e\x32\x42.google.cloud.speech.v1.StreamingRecognizeResponse.SpeechEventType\x12\x34\n\x11speech_event_time\x18\x08 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x34\n\x11total_billed_time\x18\x05 \x01(\x0b\x32\x19.google.protobuf.Duration\x12L\n\x16speech_adaptation_info\x18\t \x01(\x0b\x32,.google.cloud.speech.v1.SpeechAdaptationInfo\x12\x12\n\nrequest_id\x18\n \x01(\x03\"\x9d\x01\n\x0fSpeechEventType\x12\x1c\n\x18SPEECH_EVENT_UNSPECIFIED\x10\x00\x12\x1b\n\x17\x45ND_OF_SINGLE_UTTERANCE\x10\x01\x12\x19\n\x15SPEECH_ACTIVITY_BEGIN\x10\x02\x12\x17\n\x13SPEECH_ACTIVITY_END\x10\x03\x12\x1b\n\x17SPEECH_ACTIVITY_TIMEOUT\x10\x04\"\xf2\x01\n\x1aStreamingRecognitionResult\x12J\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32\x34.google.cloud.speech.v1.SpeechRecognitionAlternative\x12\x10\n\x08is_final\x18\x02 \x01(\x08\x12\x11\n\tstability\x18\x03 \x01(\x02\x12\x32\n\x0fresult_end_time\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x13\n\x0b\x63hannel_tag\x18\x05 \x01(\x05\x12\x1a\n\rlanguage_code\x18\x06 \x01(\tB\x03\xe0\x41\x03\"\xca\x01\n\x17SpeechRecognitionResult\x12J\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32\x34.google.cloud.speech.v1.SpeechRecognitionAlternative\x12\x13\n\x0b\x63hannel_tag\x18\x02 \x01(\x05\x12\x32\n\x0fresult_end_time\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x1a\n\rlanguage_code\x18\x05 \x01(\tB\x03\xe0\x41\x03\"w\n\x1cSpeechRecognitionAlternative\x12\x12\n\ntranscript\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12/\n\x05words\x18\x03 \x03(\x0b\x32 .google.cloud.speech.v1.WordInfo\"\xa2\x01\n\x08WordInfo\x12-\n\nstart_time\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12+\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0c\n\x04word\x18\x03 \x01(\t\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12\x18\n\x0bspeaker_tag\x18\x05 \x01(\x05\x42\x03\xe0\x41\x03\"K\n\x14SpeechAdaptationInfo\x12\x1a\n\x12\x61\x64\x61ptation_timeout\x18\x01 \x01(\x08\x12\x17\n\x0ftimeout_message\x18\x04 \x01(\t2\xd1\x04\n\x06Speech\x12\x90\x01\n\tRecognize\x12(.google.cloud.speech.v1.RecognizeRequest\x1a).google.cloud.speech.v1.RecognizeResponse\".\x82\xd3\xe4\x93\x02\x19\"\x14/v1/speech:recognize:\x01*\xda\x41\x0c\x63onfig,audio\x12\xe4\x01\n\x14LongRunningRecognize\x12\x33.google.cloud.speech.v1.LongRunningRecognizeRequest\x1a\x1d.google.longrunning.Operation\"x\x82\xd3\xe4\x93\x02$\"\x1f/v1/speech:longrunningrecognize:\x01*\xda\x41\x0c\x63onfig,audio\xca\x41<\n\x1cLongRunningRecognizeResponse\x12\x1cLongRunningRecognizeMetadata\x12\x81\x01\n\x12StreamingRecognize\x12\x31.google.cloud.speech.v1.StreamingRecognizeRequest\x1a\x32.google.cloud.speech.v1.StreamingRecognizeResponse\"\x00(\x01\x30\x01\x1aI\xca\x41\x15speech.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformBh\n\x1a\x63om.google.cloud.speech.v1B\x0bSpeechProtoP\x01Z2cloud.google.com/go/speech/apiv1/speechpb;speechpb\xf8\x01\x01\xa2\x02\x03GCSb\x06proto3"
18
+ descriptor_data = "\n)google/cloud/speech/v1/cloud_speech.proto\x12\x16google.cloud.speech.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a%google/cloud/speech/v1/resource.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto\"\x90\x01\n\x10RecognizeRequest\x12>\n\x06\x63onfig\x18\x01 \x01(\x0b\x32).google.cloud.speech.v1.RecognitionConfigB\x03\xe0\x41\x02\x12<\n\x05\x61udio\x18\x02 \x01(\x0b\x32(.google.cloud.speech.v1.RecognitionAudioB\x03\xe0\x41\x02\"\xe7\x01\n\x1bLongRunningRecognizeRequest\x12>\n\x06\x63onfig\x18\x01 \x01(\x0b\x32).google.cloud.speech.v1.RecognitionConfigB\x03\xe0\x41\x02\x12<\n\x05\x61udio\x18\x02 \x01(\x0b\x32(.google.cloud.speech.v1.RecognitionAudioB\x03\xe0\x41\x02\x12J\n\routput_config\x18\x04 \x01(\x0b\x32..google.cloud.speech.v1.TranscriptOutputConfigB\x03\xe0\x41\x01\":\n\x16TranscriptOutputConfig\x12\x11\n\x07gcs_uri\x18\x01 \x01(\tH\x00\x42\r\n\x0boutput_type\"\x99\x01\n\x19StreamingRecognizeRequest\x12N\n\x10streaming_config\x18\x01 \x01(\x0b\x32\x32.google.cloud.speech.v1.StreamingRecognitionConfigH\x00\x12\x17\n\raudio_content\x18\x02 \x01(\x0cH\x00\x42\x13\n\x11streaming_request\"\xa7\x03\n\x1aStreamingRecognitionConfig\x12>\n\x06\x63onfig\x18\x01 \x01(\x0b\x32).google.cloud.speech.v1.RecognitionConfigB\x03\xe0\x41\x02\x12\x18\n\x10single_utterance\x18\x02 \x01(\x08\x12\x17\n\x0finterim_results\x18\x03 \x01(\x08\x12$\n\x1c\x65nable_voice_activity_events\x18\x05 \x01(\x08\x12g\n\x16voice_activity_timeout\x18\x06 \x01(\x0b\x32G.google.cloud.speech.v1.StreamingRecognitionConfig.VoiceActivityTimeout\x1a\x86\x01\n\x14VoiceActivityTimeout\x12\x37\n\x14speech_start_timeout\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x35\n\x12speech_end_timeout\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\"\xca\x08\n\x11RecognitionConfig\x12I\n\x08\x65ncoding\x18\x01 \x01(\x0e\x32\x37.google.cloud.speech.v1.RecognitionConfig.AudioEncoding\x12\x19\n\x11sample_rate_hertz\x18\x02 \x01(\x05\x12\x1b\n\x13\x61udio_channel_count\x18\x07 \x01(\x05\x12/\n\'enable_separate_recognition_per_channel\x18\x0c \x01(\x08\x12\x1a\n\rlanguage_code\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\"\n\x1a\x61lternative_language_codes\x18\x12 \x03(\t\x12\x18\n\x10max_alternatives\x18\x04 \x01(\x05\x12\x18\n\x10profanity_filter\x18\x05 \x01(\x08\x12<\n\nadaptation\x18\x14 \x01(\x0b\x32(.google.cloud.speech.v1.SpeechAdaptation\x12V\n\x18transcript_normalization\x18\x18 \x01(\x0b\x32/.google.cloud.speech.v1.TranscriptNormalizationB\x03\xe0\x41\x01\x12>\n\x0fspeech_contexts\x18\x06 \x03(\x0b\x32%.google.cloud.speech.v1.SpeechContext\x12 \n\x18\x65nable_word_time_offsets\x18\x08 \x01(\x08\x12\x1e\n\x16\x65nable_word_confidence\x18\x0f \x01(\x08\x12$\n\x1c\x65nable_automatic_punctuation\x18\x0b \x01(\x08\x12=\n\x19\x65nable_spoken_punctuation\x18\x16 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x38\n\x14\x65nable_spoken_emojis\x18\x17 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12L\n\x12\x64iarization_config\x18\x13 \x01(\x0b\x32\x30.google.cloud.speech.v1.SpeakerDiarizationConfig\x12=\n\x08metadata\x18\t \x01(\x0b\x32+.google.cloud.speech.v1.RecognitionMetadata\x12\r\n\x05model\x18\r \x01(\t\x12\x14\n\x0cuse_enhanced\x18\x0e \x01(\x08\"\xa3\x01\n\rAudioEncoding\x12\x18\n\x14\x45NCODING_UNSPECIFIED\x10\x00\x12\x0c\n\x08LINEAR16\x10\x01\x12\x08\n\x04\x46LAC\x10\x02\x12\t\n\x05MULAW\x10\x03\x12\x07\n\x03\x41MR\x10\x04\x12\n\n\x06\x41MR_WB\x10\x05\x12\x0c\n\x08OGG_OPUS\x10\x06\x12\x1a\n\x16SPEEX_WITH_HEADER_BYTE\x10\x07\x12\x07\n\x03MP3\x10\x08\x12\r\n\tWEBM_OPUS\x10\t\"\x90\x01\n\x18SpeakerDiarizationConfig\x12\"\n\x1a\x65nable_speaker_diarization\x18\x01 \x01(\x08\x12\x19\n\x11min_speaker_count\x18\x02 \x01(\x05\x12\x19\n\x11max_speaker_count\x18\x03 \x01(\x05\x12\x1a\n\x0bspeaker_tag\x18\x05 \x01(\x05\x42\x05\x18\x01\xe0\x41\x03\"\xa4\x08\n\x13RecognitionMetadata\x12U\n\x10interaction_type\x18\x01 \x01(\x0e\x32;.google.cloud.speech.v1.RecognitionMetadata.InteractionType\x12$\n\x1cindustry_naics_code_of_audio\x18\x03 \x01(\r\x12[\n\x13microphone_distance\x18\x04 \x01(\x0e\x32>.google.cloud.speech.v1.RecognitionMetadata.MicrophoneDistance\x12Z\n\x13original_media_type\x18\x05 \x01(\x0e\x32=.google.cloud.speech.v1.RecognitionMetadata.OriginalMediaType\x12^\n\x15recording_device_type\x18\x06 \x01(\x0e\x32?.google.cloud.speech.v1.RecognitionMetadata.RecordingDeviceType\x12\x1d\n\x15recording_device_name\x18\x07 \x01(\t\x12\x1a\n\x12original_mime_type\x18\x08 \x01(\t\x12\x13\n\x0b\x61udio_topic\x18\n \x01(\t\"\xc5\x01\n\x0fInteractionType\x12 \n\x1cINTERACTION_TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nDISCUSSION\x10\x01\x12\x10\n\x0cPRESENTATION\x10\x02\x12\x0e\n\nPHONE_CALL\x10\x03\x12\r\n\tVOICEMAIL\x10\x04\x12\x1b\n\x17PROFESSIONALLY_PRODUCED\x10\x05\x12\x10\n\x0cVOICE_SEARCH\x10\x06\x12\x11\n\rVOICE_COMMAND\x10\x07\x12\r\n\tDICTATION\x10\x08\"d\n\x12MicrophoneDistance\x12#\n\x1fMICROPHONE_DISTANCE_UNSPECIFIED\x10\x00\x12\r\n\tNEARFIELD\x10\x01\x12\x0c\n\x08MIDFIELD\x10\x02\x12\x0c\n\x08\x46\x41RFIELD\x10\x03\"N\n\x11OriginalMediaType\x12#\n\x1fORIGINAL_MEDIA_TYPE_UNSPECIFIED\x10\x00\x12\t\n\x05\x41UDIO\x10\x01\x12\t\n\x05VIDEO\x10\x02\"\xa4\x01\n\x13RecordingDeviceType\x12%\n!RECORDING_DEVICE_TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nSMARTPHONE\x10\x01\x12\x06\n\x02PC\x10\x02\x12\x0e\n\nPHONE_LINE\x10\x03\x12\x0b\n\x07VEHICLE\x10\x04\x12\x18\n\x14OTHER_OUTDOOR_DEVICE\x10\x05\x12\x17\n\x13OTHER_INDOOR_DEVICE\x10\x06:\x02\x18\x01\"/\n\rSpeechContext\x12\x0f\n\x07phrases\x18\x01 \x03(\t\x12\r\n\x05\x62oost\x18\x04 \x01(\x02\"D\n\x10RecognitionAudio\x12\x11\n\x07\x63ontent\x18\x01 \x01(\x0cH\x00\x12\r\n\x03uri\x18\x02 \x01(\tH\x00\x42\x0e\n\x0c\x61udio_source\"\xed\x01\n\x11RecognizeResponse\x12@\n\x07results\x18\x02 \x03(\x0b\x32/.google.cloud.speech.v1.SpeechRecognitionResult\x12\x34\n\x11total_billed_time\x18\x03 \x01(\x0b\x32\x19.google.protobuf.Duration\x12L\n\x16speech_adaptation_info\x18\x07 \x01(\x0b\x32,.google.cloud.speech.v1.SpeechAdaptationInfo\x12\x12\n\nrequest_id\x18\x08 \x01(\x03\"\xe9\x02\n\x1cLongRunningRecognizeResponse\x12@\n\x07results\x18\x02 \x03(\x0b\x32/.google.cloud.speech.v1.SpeechRecognitionResult\x12\x34\n\x11total_billed_time\x18\x03 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x45\n\routput_config\x18\x06 \x01(\x0b\x32..google.cloud.speech.v1.TranscriptOutputConfig\x12(\n\x0coutput_error\x18\x07 \x01(\x0b\x32\x12.google.rpc.Status\x12L\n\x16speech_adaptation_info\x18\x08 \x01(\x0b\x32,.google.cloud.speech.v1.SpeechAdaptationInfo\x12\x12\n\nrequest_id\x18\t \x01(\x03\"\xb0\x01\n\x1cLongRunningRecognizeMetadata\x12\x18\n\x10progress_percent\x18\x01 \x01(\x05\x12.\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x10last_update_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x10\n\x03uri\x18\x04 \x01(\tB\x03\xe0\x41\x03\"\xd1\x04\n\x1aStreamingRecognizeResponse\x12!\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12\x43\n\x07results\x18\x02 \x03(\x0b\x32\x32.google.cloud.speech.v1.StreamingRecognitionResult\x12]\n\x11speech_event_type\x18\x04 \x01(\x0e\x32\x42.google.cloud.speech.v1.StreamingRecognizeResponse.SpeechEventType\x12\x34\n\x11speech_event_time\x18\x08 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x34\n\x11total_billed_time\x18\x05 \x01(\x0b\x32\x19.google.protobuf.Duration\x12L\n\x16speech_adaptation_info\x18\t \x01(\x0b\x32,.google.cloud.speech.v1.SpeechAdaptationInfo\x12\x12\n\nrequest_id\x18\n \x01(\x03\"\x9d\x01\n\x0fSpeechEventType\x12\x1c\n\x18SPEECH_EVENT_UNSPECIFIED\x10\x00\x12\x1b\n\x17\x45ND_OF_SINGLE_UTTERANCE\x10\x01\x12\x19\n\x15SPEECH_ACTIVITY_BEGIN\x10\x02\x12\x17\n\x13SPEECH_ACTIVITY_END\x10\x03\x12\x1b\n\x17SPEECH_ACTIVITY_TIMEOUT\x10\x04\"\xf2\x01\n\x1aStreamingRecognitionResult\x12J\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32\x34.google.cloud.speech.v1.SpeechRecognitionAlternative\x12\x10\n\x08is_final\x18\x02 \x01(\x08\x12\x11\n\tstability\x18\x03 \x01(\x02\x12\x32\n\x0fresult_end_time\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x13\n\x0b\x63hannel_tag\x18\x05 \x01(\x05\x12\x1a\n\rlanguage_code\x18\x06 \x01(\tB\x03\xe0\x41\x03\"\xca\x01\n\x17SpeechRecognitionResult\x12J\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32\x34.google.cloud.speech.v1.SpeechRecognitionAlternative\x12\x13\n\x0b\x63hannel_tag\x18\x02 \x01(\x05\x12\x32\n\x0fresult_end_time\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x1a\n\rlanguage_code\x18\x05 \x01(\tB\x03\xe0\x41\x03\"w\n\x1cSpeechRecognitionAlternative\x12\x12\n\ntranscript\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12/\n\x05words\x18\x03 \x03(\x0b\x32 .google.cloud.speech.v1.WordInfo\"\xc0\x01\n\x08WordInfo\x12-\n\nstart_time\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12+\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0c\n\x04word\x18\x03 \x01(\t\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12\x1a\n\x0bspeaker_tag\x18\x05 \x01(\x05\x42\x05\x18\x01\xe0\x41\x03\x12\x1a\n\rspeaker_label\x18\x06 \x01(\tB\x03\xe0\x41\x03\"K\n\x14SpeechAdaptationInfo\x12\x1a\n\x12\x61\x64\x61ptation_timeout\x18\x01 \x01(\x08\x12\x17\n\x0ftimeout_message\x18\x04 \x01(\t2\xd1\x04\n\x06Speech\x12\x90\x01\n\tRecognize\x12(.google.cloud.speech.v1.RecognizeRequest\x1a).google.cloud.speech.v1.RecognizeResponse\".\x82\xd3\xe4\x93\x02\x19\"\x14/v1/speech:recognize:\x01*\xda\x41\x0c\x63onfig,audio\x12\xe4\x01\n\x14LongRunningRecognize\x12\x33.google.cloud.speech.v1.LongRunningRecognizeRequest\x1a\x1d.google.longrunning.Operation\"x\x82\xd3\xe4\x93\x02$\"\x1f/v1/speech:longrunningrecognize:\x01*\xda\x41\x0c\x63onfig,audio\xca\x41<\n\x1cLongRunningRecognizeResponse\x12\x1cLongRunningRecognizeMetadata\x12\x81\x01\n\x12StreamingRecognize\x12\x31.google.cloud.speech.v1.StreamingRecognizeRequest\x1a\x32.google.cloud.speech.v1.StreamingRecognizeResponse\"\x00(\x01\x30\x01\x1aI\xca\x41\x15speech.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformBh\n\x1a\x63om.google.cloud.speech.v1B\x0bSpeechProtoP\x01Z2cloud.google.com/go/speech/apiv1/speechpb;speechpb\xf8\x01\x01\xa2\x02\x03GCSb\x06proto3"
19
19
 
20
20
  pool = Google::Protobuf::DescriptorPool.generated_pool
21
21
 
@@ -7,7 +7,7 @@ require 'google/protobuf'
7
7
  require 'google/api/resource_pb'
8
8
 
9
9
 
10
- descriptor_data = "\n%google/cloud/speech/v1/resource.proto\x12\x16google.cloud.speech.v1\x1a\x19google/api/resource.proto\"\xfc\x01\n\x0b\x43ustomClass\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0f\x63ustom_class_id\x18\x02 \x01(\t\x12<\n\x05items\x18\x03 \x03(\x0b\x32-.google.cloud.speech.v1.CustomClass.ClassItem\x1a\x1a\n\tClassItem\x12\r\n\x05value\x18\x01 \x01(\t:l\xea\x41i\n!speech.googleapis.com/CustomClass\x12\x44projects/{project}/locations/{location}/customClasses/{custom_class}\"\xf2\x01\n\tPhraseSet\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x39\n\x07phrases\x18\x02 \x03(\x0b\x32(.google.cloud.speech.v1.PhraseSet.Phrase\x12\r\n\x05\x62oost\x18\x04 \x01(\x02\x1a&\n\x06Phrase\x12\r\n\x05value\x18\x01 \x01(\t\x12\r\n\x05\x62oost\x18\x02 \x01(\x02:e\xea\x41\x62\n\x1fspeech.googleapis.com/PhraseSet\x12?projects/{project}/locations/{location}/phraseSets/{phrase_set}\"\xbd\x02\n\x10SpeechAdaptation\x12\x36\n\x0bphrase_sets\x18\x01 \x03(\x0b\x32!.google.cloud.speech.v1.PhraseSet\x12\x43\n\x15phrase_set_references\x18\x02 \x03(\tB$\xfa\x41!\n\x1fspeech.googleapis.com/PhraseSet\x12;\n\x0e\x63ustom_classes\x18\x03 \x03(\x0b\x32#.google.cloud.speech.v1.CustomClass\x12J\n\x0c\x61\x62nf_grammar\x18\x04 \x01(\x0b\x32\x34.google.cloud.speech.v1.SpeechAdaptation.ABNFGrammar\x1a#\n\x0b\x41\x42NFGrammar\x12\x14\n\x0c\x61\x62nf_strings\x18\x01 \x03(\tBp\n\x1a\x63om.google.cloud.speech.v1B\x13SpeechResourceProtoP\x01Z2cloud.google.com/go/speech/apiv1/speechpb;speechpb\xf8\x01\x01\xa2\x02\x03GCSb\x06proto3"
10
+ descriptor_data = "\n%google/cloud/speech/v1/resource.proto\x12\x16google.cloud.speech.v1\x1a\x19google/api/resource.proto\"\xfc\x01\n\x0b\x43ustomClass\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0f\x63ustom_class_id\x18\x02 \x01(\t\x12<\n\x05items\x18\x03 \x03(\x0b\x32-.google.cloud.speech.v1.CustomClass.ClassItem\x1a\x1a\n\tClassItem\x12\r\n\x05value\x18\x01 \x01(\t:l\xea\x41i\n!speech.googleapis.com/CustomClass\x12\x44projects/{project}/locations/{location}/customClasses/{custom_class}\"\xf2\x01\n\tPhraseSet\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x39\n\x07phrases\x18\x02 \x03(\x0b\x32(.google.cloud.speech.v1.PhraseSet.Phrase\x12\r\n\x05\x62oost\x18\x04 \x01(\x02\x1a&\n\x06Phrase\x12\r\n\x05value\x18\x01 \x01(\t\x12\r\n\x05\x62oost\x18\x02 \x01(\x02:e\xea\x41\x62\n\x1fspeech.googleapis.com/PhraseSet\x12?projects/{project}/locations/{location}/phraseSets/{phrase_set}\"\xbd\x02\n\x10SpeechAdaptation\x12\x36\n\x0bphrase_sets\x18\x01 \x03(\x0b\x32!.google.cloud.speech.v1.PhraseSet\x12\x43\n\x15phrase_set_references\x18\x02 \x03(\tB$\xfa\x41!\n\x1fspeech.googleapis.com/PhraseSet\x12;\n\x0e\x63ustom_classes\x18\x03 \x03(\x0b\x32#.google.cloud.speech.v1.CustomClass\x12J\n\x0c\x61\x62nf_grammar\x18\x04 \x01(\x0b\x32\x34.google.cloud.speech.v1.SpeechAdaptation.ABNFGrammar\x1a#\n\x0b\x41\x42NFGrammar\x12\x14\n\x0c\x61\x62nf_strings\x18\x01 \x03(\t\"\xa3\x01\n\x17TranscriptNormalization\x12\x46\n\x07\x65ntries\x18\x01 \x03(\x0b\x32\x35.google.cloud.speech.v1.TranscriptNormalization.Entry\x1a@\n\x05\x45ntry\x12\x0e\n\x06search\x18\x01 \x01(\t\x12\x0f\n\x07replace\x18\x02 \x01(\t\x12\x16\n\x0e\x63\x61se_sensitive\x18\x03 \x01(\x08\x42p\n\x1a\x63om.google.cloud.speech.v1B\x13SpeechResourceProtoP\x01Z2cloud.google.com/go/speech/apiv1/speechpb;speechpb\xf8\x01\x01\xa2\x02\x03GCSb\x06proto3"
11
11
 
12
12
  pool = Google::Protobuf::DescriptorPool.generated_pool
13
13
 
@@ -43,6 +43,8 @@ module Google
43
43
  PhraseSet::Phrase = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.speech.v1.PhraseSet.Phrase").msgclass
44
44
  SpeechAdaptation = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.speech.v1.SpeechAdaptation").msgclass
45
45
  SpeechAdaptation::ABNFGrammar = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.speech.v1.SpeechAdaptation.ABNFGrammar").msgclass
46
+ TranscriptNormalization = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.speech.v1.TranscriptNormalization").msgclass
47
+ TranscriptNormalization::Entry = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.speech.v1.TranscriptNormalization.Entry").msgclass
46
48
  end
47
49
  end
48
50
  end
@@ -156,7 +156,8 @@ module Google
156
156
  credentials: credentials,
157
157
  endpoint: @config.endpoint,
158
158
  channel_args: @config.channel_args,
159
- interceptors: @config.interceptors
159
+ interceptors: @config.interceptors,
160
+ channel_pool_config: @config.channel_pool
160
161
  )
161
162
  end
162
163
 
@@ -543,6 +544,14 @@ module Google
543
544
  end
544
545
  end
545
546
 
547
+ ##
548
+ # Configuration for the channel pool
549
+ # @return [::Gapic::ServiceStub::ChannelPool::Configuration]
550
+ #
551
+ def channel_pool
552
+ @channel_pool ||= ::Gapic::ServiceStub::ChannelPool::Configuration.new
553
+ end
554
+
546
555
  ##
547
556
  # Configuration RPC class for the Speech API.
548
557
  #
@@ -93,7 +93,8 @@ module Google
93
93
  credentials: credentials,
94
94
  endpoint: @config.endpoint,
95
95
  channel_args: @config.channel_args,
96
- interceptors: @config.interceptors
96
+ interceptors: @config.interceptors,
97
+ channel_pool_config: @config.channel_pool
97
98
  )
98
99
 
99
100
  # Used by an LRO wrapper for some methods of this service
@@ -693,6 +694,14 @@ module Google
693
694
  end
694
695
  end
695
696
 
697
+ ##
698
+ # Configuration for the channel pool
699
+ # @return [::Gapic::ServiceStub::ChannelPool::Configuration]
700
+ #
701
+ def channel_pool
702
+ @channel_pool ||= ::Gapic::ServiceStub::ChannelPool::Configuration.new
703
+ end
704
+
696
705
  ##
697
706
  # Configuration RPC class for the Operations API.
698
707
  #
@@ -186,6 +186,22 @@ module Google
186
186
  # @return [::Google::Cloud::Speech::V1::RecognizeResponse]
187
187
  #
188
188
  # @raise [::Google::Cloud::Error] if the REST call is aborted.
189
+ #
190
+ # @example Basic example
191
+ # require "google/cloud/speech/v1"
192
+ #
193
+ # # Create a client object. The client can be reused for multiple calls.
194
+ # client = Google::Cloud::Speech::V1::Speech::Rest::Client.new
195
+ #
196
+ # # Create a request. To set request fields, pass in keyword arguments.
197
+ # request = Google::Cloud::Speech::V1::RecognizeRequest.new
198
+ #
199
+ # # Call the recognize method.
200
+ # result = client.recognize request
201
+ #
202
+ # # The returned object is of type Google::Cloud::Speech::V1::RecognizeResponse.
203
+ # p result
204
+ #
189
205
  def recognize request, options = nil
190
206
  raise ::ArgumentError, "request must be provided" if request.nil?
191
207
 
@@ -258,6 +274,29 @@ module Google
258
274
  # @return [::Gapic::Operation]
259
275
  #
260
276
  # @raise [::Google::Cloud::Error] if the REST call is aborted.
277
+ #
278
+ # @example Basic example
279
+ # require "google/cloud/speech/v1"
280
+ #
281
+ # # Create a client object. The client can be reused for multiple calls.
282
+ # client = Google::Cloud::Speech::V1::Speech::Rest::Client.new
283
+ #
284
+ # # Create a request. To set request fields, pass in keyword arguments.
285
+ # request = Google::Cloud::Speech::V1::LongRunningRecognizeRequest.new
286
+ #
287
+ # # Call the long_running_recognize method.
288
+ # result = client.long_running_recognize request
289
+ #
290
+ # # The returned object is of type Gapic::Operation. You can use it to
291
+ # # check the status of an operation, cancel it, or wait for results.
292
+ # # Here is how to wait for a response.
293
+ # result.wait_until_done! timeout: 60
294
+ # if result.response?
295
+ # p result.response
296
+ # else
297
+ # puts "No response received."
298
+ # end
299
+ #
261
300
  def long_running_recognize request, options = nil
262
301
  raise ::ArgumentError, "request must be provided" if request.nil?
263
302
 
@@ -136,6 +136,26 @@ module Google
136
136
  # @return [::Gapic::Operation]
137
137
  #
138
138
  # @raise [::Google::Cloud::Error] if the REST call is aborted.
139
+ #
140
+ # @example Basic example
141
+ # require "google/longrunning"
142
+ #
143
+ # # Create a client object. The client can be reused for multiple calls.
144
+ # client = Google::Longrunning::Operations::Rest::Client.new
145
+ #
146
+ # # Create a request. To set request fields, pass in keyword arguments.
147
+ # request = Google::Longrunning::ListOperationsRequest.new
148
+ #
149
+ # # Call the list_operations method.
150
+ # result = client.list_operations request
151
+ #
152
+ # # The returned object is of type Gapic::PagedEnumerable. You can iterate
153
+ # # over elements, and API calls will be issued to fetch pages as needed.
154
+ # result.each do |item|
155
+ # # Each element is of type ::Google::Longrunning::Operation.
156
+ # p item
157
+ # end
158
+ #
139
159
  def list_operations request, options = nil
140
160
  raise ::ArgumentError, "request must be provided" if request.nil?
141
161
 
@@ -201,6 +221,29 @@ module Google
201
221
  # @return [::Gapic::Operation]
202
222
  #
203
223
  # @raise [::Google::Cloud::Error] if the REST call is aborted.
224
+ #
225
+ # @example Basic example
226
+ # require "google/longrunning"
227
+ #
228
+ # # Create a client object. The client can be reused for multiple calls.
229
+ # client = Google::Longrunning::Operations::Rest::Client.new
230
+ #
231
+ # # Create a request. To set request fields, pass in keyword arguments.
232
+ # request = Google::Longrunning::GetOperationRequest.new
233
+ #
234
+ # # Call the get_operation method.
235
+ # result = client.get_operation request
236
+ #
237
+ # # The returned object is of type Gapic::Operation. You can use it to
238
+ # # check the status of an operation, cancel it, or wait for results.
239
+ # # Here is how to wait for a response.
240
+ # result.wait_until_done! timeout: 60
241
+ # if result.response?
242
+ # p result.response
243
+ # else
244
+ # puts "No response received."
245
+ # end
246
+ #
204
247
  def get_operation request, options = nil
205
248
  raise ::ArgumentError, "request must be provided" if request.nil?
206
249
 
@@ -267,6 +310,22 @@ module Google
267
310
  # @return [::Google::Protobuf::Empty]
268
311
  #
269
312
  # @raise [::Google::Cloud::Error] if the REST call is aborted.
313
+ #
314
+ # @example Basic example
315
+ # require "google/longrunning"
316
+ #
317
+ # # Create a client object. The client can be reused for multiple calls.
318
+ # client = Google::Longrunning::Operations::Rest::Client.new
319
+ #
320
+ # # Create a request. To set request fields, pass in keyword arguments.
321
+ # request = Google::Longrunning::DeleteOperationRequest.new
322
+ #
323
+ # # Call the delete_operation method.
324
+ # result = client.delete_operation request
325
+ #
326
+ # # The returned object is of type Google::Protobuf::Empty.
327
+ # p result
328
+ #
270
329
  def delete_operation request, options = nil
271
330
  raise ::ArgumentError, "request must be provided" if request.nil?
272
331
 
@@ -338,6 +397,22 @@ module Google
338
397
  # @return [::Google::Protobuf::Empty]
339
398
  #
340
399
  # @raise [::Google::Cloud::Error] if the REST call is aborted.
400
+ #
401
+ # @example Basic example
402
+ # require "google/longrunning"
403
+ #
404
+ # # Create a client object. The client can be reused for multiple calls.
405
+ # client = Google::Longrunning::Operations::Rest::Client.new
406
+ #
407
+ # # Create a request. To set request fields, pass in keyword arguments.
408
+ # request = Google::Longrunning::CancelOperationRequest.new
409
+ #
410
+ # # Call the cancel_operation method.
411
+ # result = client.cancel_operation request
412
+ #
413
+ # # The returned object is of type Google::Protobuf::Empty.
414
+ # p result
415
+ #
341
416
  def cancel_operation request, options = nil
342
417
  raise ::ArgumentError, "request must be provided" if request.nil?
343
418
 
@@ -21,7 +21,7 @@ module Google
21
21
  module Cloud
22
22
  module Speech
23
23
  module V1
24
- VERSION = "0.13.1"
24
+ VERSION = "0.15.0"
25
25
  end
26
26
  end
27
27
  end
@@ -66,6 +66,20 @@ module Google
66
66
  # a non-empty value will be returned. The user will not be aware of what
67
67
  # non-empty value to expect.
68
68
  NON_EMPTY_DEFAULT = 7
69
+
70
+ # Denotes that the field in a resource (a message annotated with
71
+ # google.api.resource) is used in the resource name to uniquely identify the
72
+ # resource. For AIP-compliant APIs, this should only be applied to the
73
+ # `name` field on the resource.
74
+ #
75
+ # This behavior should not be applied to references to other resources within
76
+ # the message.
77
+ #
78
+ # The identifier field of resources often have different field behavior
79
+ # depending on the request it is embedded in (e.g. for Create methods name
80
+ # is optional and unused, while for Update methods it is required). Instead
81
+ # of method-specific annotations, only `IDENTIFIER` is required.
82
+ IDENTIFIER = 8
69
83
  end
70
84
  end
71
85
  end
@@ -227,6 +227,12 @@ module Google
227
227
  # adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
228
228
  # documentation.
229
229
  # When speech adaptation is set it supersedes the `speech_contexts` field.
230
+ # @!attribute [rw] transcript_normalization
231
+ # @return [::Google::Cloud::Speech::V1::TranscriptNormalization]
232
+ # Optional. Use transcription normalization to automatically replace parts of
233
+ # the transcript with phrases of your choosing. For StreamingRecognize, this
234
+ # normalization only applies to stable partial transcripts (stability > 0.8)
235
+ # and final transcripts.
230
236
  # @!attribute [rw] speech_contexts
231
237
  # @return [::Array<::Google::Cloud::Speech::V1::SpeechContext>]
232
238
  # Array of {::Google::Cloud::Speech::V1::SpeechContext SpeechContext}.
@@ -415,6 +421,12 @@ module Google
415
421
  # wideband is supported. `sample_rate_hertz` must be 16000.
416
422
  SPEEX_WITH_HEADER_BYTE = 7
417
423
 
424
+ # MP3 audio. MP3 encoding is a Beta feature and only available in
425
+ # v1p1beta1. Support all standard MP3 bitrates (which range from 32-320
426
+ # kbps). When using this encoding, `sample_rate_hertz` has to match the
427
+ # sample rate of the file being used.
428
+ MP3 = 8
429
+
418
430
  # Opus encoded audio frames in WebM container
419
431
  # ([OggOpus](https://wiki.xiph.org/OggOpus)). `sample_rate_hertz` must be
420
432
  # one of 8000, 12000, 16000, 24000, or 48000.
@@ -426,8 +438,8 @@ module Google
426
438
  # @!attribute [rw] enable_speaker_diarization
427
439
  # @return [::Boolean]
428
440
  # If 'true', enables speaker detection for each recognized word in
429
- # the top alternative of the recognition result using a speaker_tag provided
430
- # in the WordInfo.
441
+ # the top alternative of the recognition result using a speaker_label
442
+ # provided in the WordInfo.
431
443
  # @!attribute [rw] min_speaker_count
432
444
  # @return [::Integer]
433
445
  # Minimum number of speakers in the conversation. This range gives you more
@@ -945,8 +957,17 @@ module Google
945
957
  # Output only. A distinct integer value is assigned for every speaker within
946
958
  # the audio. This field specifies which one of those speakers was detected to
947
959
  # have spoken this word. Value ranges from '1' to diarization_speaker_count.
948
- # speaker_tag is set if enable_speaker_diarization = 'true' and only in the
960
+ # speaker_tag is set if enable_speaker_diarization = 'true' and only for the
949
961
  # top alternative.
962
+ # Note: Use speaker_label instead.
963
+ # @!attribute [r] speaker_label
964
+ # @return [::String]
965
+ # Output only. A label value assigned for every unique speaker within the
966
+ # audio. This field specifies which speaker was detected to have spoken this
967
+ # word. For some models, like medical_conversation this can be actual speaker
968
+ # role, for example "patient" or "provider", but generally this would be a
969
+ # number identifying a speaker. This field is only set if
970
+ # enable_speaker_diarization = 'true' and only for the top alternative.
950
971
  class WordInfo
951
972
  include ::Google::Protobuf::MessageExts
952
973
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -149,6 +149,36 @@ module Google
149
149
  extend ::Google::Protobuf::MessageExts::ClassMethods
150
150
  end
151
151
  end
152
+
153
+ # Transcription normalization configuration. Use transcription normalization
154
+ # to automatically replace parts of the transcript with phrases of your
155
+ # choosing. For StreamingRecognize, this normalization only applies to stable
156
+ # partial transcripts (stability > 0.8) and final transcripts.
157
+ # @!attribute [rw] entries
158
+ # @return [::Array<::Google::Cloud::Speech::V1::TranscriptNormalization::Entry>]
159
+ # A list of replacement entries. We will perform replacement with one entry
160
+ # at a time. For example, the second entry in ["cat" => "dog", "mountain cat"
161
+ # => "mountain dog"] will never be applied because we will always process the
162
+ # first entry before it. At most 100 entries.
163
+ class TranscriptNormalization
164
+ include ::Google::Protobuf::MessageExts
165
+ extend ::Google::Protobuf::MessageExts::ClassMethods
166
+
167
+ # A single replacement configuration.
168
+ # @!attribute [rw] search
169
+ # @return [::String]
170
+ # What to replace. Max length is 100 characters.
171
+ # @!attribute [rw] replace
172
+ # @return [::String]
173
+ # What to replace with. Max length is 100 characters.
174
+ # @!attribute [rw] case_sensitive
175
+ # @return [::Boolean]
176
+ # Whether the search is case sensitive.
177
+ class Entry
178
+ include ::Google::Protobuf::MessageExts
179
+ extend ::Google::Protobuf::MessageExts::ClassMethods
180
+ end
181
+ end
152
182
  end
153
183
  end
154
184
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: google-cloud-speech-v1
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.13.1
4
+ version: 0.15.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Google LLC
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2023-06-06 00:00:00.000000000 Z
11
+ date: 2023-11-06 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: gapic-common
@@ -16,7 +16,7 @@ dependencies:
16
16
  requirements:
17
17
  - - ">="
18
18
  - !ruby/object:Gem::Version
19
- version: 0.19.1
19
+ version: 0.20.0
20
20
  - - "<"
21
21
  - !ruby/object:Gem::Version
22
22
  version: 2.a
@@ -26,7 +26,7 @@ dependencies:
26
26
  requirements:
27
27
  - - ">="
28
28
  - !ruby/object:Gem::Version
29
- version: 0.19.1
29
+ version: 0.20.0
30
30
  - - "<"
31
31
  - !ruby/object:Gem::Version
32
32
  version: 2.a
@@ -233,7 +233,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
233
233
  - !ruby/object:Gem::Version
234
234
  version: '0'
235
235
  requirements: []
236
- rubygems_version: 3.4.2
236
+ rubygems_version: 3.4.19
237
237
  signing_key:
238
238
  specification_version: 4
239
239
  summary: Converts audio to text by applying powerful neural network models.