aws-sdk-transcribestreamingservice 1.42.0 → 1.44.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -27,7 +27,7 @@ require 'aws-sdk-core/plugins/checksum_algorithm.rb'
27
27
  require 'aws-sdk-core/plugins/defaults_mode.rb'
28
28
  require 'aws-sdk-core/plugins/recursion_detection.rb'
29
29
  require 'aws-sdk-core/plugins/invocation_id.rb'
30
- require 'aws-sdk-core/plugins/signature_v4.rb'
30
+ require 'aws-sdk-core/plugins/sign.rb'
31
31
  require 'aws-sdk-core/plugins/protocols/rest_json.rb'
32
32
  require 'aws-sdk-core/plugins/event_stream_configuration.rb'
33
33
 
@@ -59,9 +59,10 @@ module Aws::TranscribeStreamingService
59
59
  add_plugin(Aws::Plugins::DefaultsMode)
60
60
  add_plugin(Aws::Plugins::RecursionDetection)
61
61
  add_plugin(Aws::Plugins::InvocationId)
62
- add_plugin(Aws::Plugins::SignatureV4)
62
+ add_plugin(Aws::Plugins::Sign)
63
63
  add_plugin(Aws::Plugins::Protocols::RestJson)
64
64
  add_plugin(Aws::Plugins::EventStreamConfiguration)
65
+ add_plugin(Aws::TranscribeStreamingService::Plugins::Endpoints)
65
66
 
66
67
  # @option options [required, Aws::CredentialProvider] :credentials
67
68
  # Your AWS credentials. This can be an instance of any one of the
@@ -232,6 +233,19 @@ module Aws::TranscribeStreamingService
232
233
  # ** Please note ** When response stubbing is enabled, no HTTP
233
234
  # requests are made, and retries are disabled.
234
235
  #
236
+ # @option options [Aws::TokenProvider] :token_provider
237
+ # A Bearer Token Provider. This can be an instance of any one of the
238
+ # following classes:
239
+ #
240
+ # * `Aws::StaticTokenProvider` - Used for configuring static, non-refreshing
241
+ # tokens.
242
+ #
243
+ # * `Aws::SSOTokenProvider` - Used for loading tokens from AWS SSO using an
244
+ # access token generated from `aws login`.
245
+ #
246
+ # When `:token_provider` is not configured directly, the `Aws::TokenProviderChain`
247
+ # will be used to search for tokens configured for your profile in shared configuration files.
248
+ #
235
249
  # @option options [Boolean] :use_dualstack_endpoint
236
250
  # When set to `true`, dualstack enabled endpoints (with `.aws` TLD)
237
251
  # will be used if available.
@@ -245,6 +259,9 @@ module Aws::TranscribeStreamingService
245
259
  # When `true`, request parameters are validated before
246
260
  # sending the request.
247
261
  #
262
+ # @option options [Aws::TranscribeStreamingService::EndpointProvider] :endpoint_provider
263
+ # The endpoint provider used to resolve endpoints. Any object that responds to `#resolve_endpoint(parameters)` where `parameters` is a Struct similar to `Aws::TranscribeStreamingService::EndpointParameters`
264
+ #
248
265
  def initialize(*args)
249
266
  unless Kernel.const_defined?("HTTP2")
250
267
  raise "Must include http/2 gem to use AsyncClient instances."
@@ -254,61 +271,109 @@ module Aws::TranscribeStreamingService
254
271
 
255
272
  # @!group API Operations
256
273
 
257
- # Starts a bidirectional HTTP/2 stream where audio is streamed to Amazon
258
- # Transcribe Medical and the transcription results are streamed to your
259
- # application.
274
+ # Starts a bidirectional HTTP/2 or WebSocket stream where audio is
275
+ # streamed to Amazon Transcribe Medical and the transcription results
276
+ # are streamed to your application.
277
+ #
278
+ # For more information on streaming with Amazon Transcribe Medical, see
279
+ # [Transcribing streaming audio][1].
280
+ #
281
+ #
282
+ #
283
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html
260
284
  #
261
285
  # @option params [required, String] :language_code
262
- # Indicates the source language used in the input audio stream. For
263
- # Amazon Transcribe Medical, this is US English (en-US).
286
+ # Specify the language code that represents the language spoken in your
287
+ # audio.
288
+ #
289
+ # Amazon Transcribe Medical only supports US English (`en-US`).
264
290
  #
265
291
  # @option params [required, Integer] :media_sample_rate_hertz
266
- # The sample rate of the input audio (in Hertz). Amazon Transcribe
267
- # medical supports a range from 16,000 Hz to 48,000 Hz. Note that the
292
+ # The sample rate of the input audio (in hertz). Amazon Transcribe
293
+ # Medical supports a range from 16,000 Hz to 48,000 Hz. Note that the
268
294
  # sample rate you specify must match that of your audio.
269
295
  #
270
296
  # @option params [required, String] :media_encoding
271
- # The encoding used for the input audio.
297
+ # Specify the encoding used for the input audio. Supported formats are:
298
+ #
299
+ # * FLAC
300
+ #
301
+ # * OPUS-encoded audio in an Ogg container
302
+ #
303
+ # * PCM (only signed 16-bit little-endian audio formats, which does not
304
+ # include WAV)
305
+ #
306
+ # For more information, see [Media formats][1].
307
+ #
308
+ #
309
+ #
310
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/how-input.html#how-input-audio
272
311
  #
273
312
  # @option params [String] :vocabulary_name
274
- # The name of the medical custom vocabulary to use when processing the
275
- # real-time stream.
313
+ # Specify the name of the custom vocabulary that you want to use when
314
+ # processing your transcription. Note that vocabulary names are case
315
+ # sensitive.
276
316
  #
277
317
  # @option params [required, String] :specialty
278
- # The medical specialty of the clinician or provider.
318
+ # Specify the medical specialty contained in your audio.
279
319
  #
280
320
  # @option params [required, String] :type
281
- # The type of input audio. Choose `DICTATION` for a provider dictating
282
- # patient notes. Choose `CONVERSATION` for a dialogue between a patient
283
- # and one or more medical professionanls.
321
+ # Specify the type of input audio. For example, choose `DICTATION` for a
322
+ # provider dictating patient notes and `CONVERSATION` for a dialogue
323
+ # between a patient and a medical professional.
284
324
  #
285
325
  # @option params [Boolean] :show_speaker_label
286
- # When `true`, enables speaker identification in your real-time stream.
326
+ # Enables speaker partitioning (diarization) in your transcription
327
+ # output. Speaker partitioning labels the speech from individual
328
+ # speakers in your media file.
329
+ #
330
+ # For more information, see [Partitioning speakers (diarization)][1].
331
+ #
332
+ #
333
+ #
334
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/diarization.html
287
335
  #
288
336
  # @option params [String] :session_id
289
- # Optional. An identifier for the transcription session. If you don't
290
- # provide a session ID, Amazon Transcribe generates one for you and
291
- # returns it in the response.
337
+ # Specify a name for your transcription session. If you don't include
338
+ # this parameter in your request, Amazon Transcribe Medical generates an
339
+ # ID and returns it in the response.
340
+ #
341
+ # You can use a session ID to retry a streaming session.
292
342
  #
293
343
  # @option params [Boolean] :enable_channel_identification
294
- # When `true`, instructs Amazon Transcribe Medical to process each audio
295
- # channel separately and then merge the transcription output of each
296
- # channel into a single transcription.
344
+ # Enables channel identification in multi-channel audio.
345
+ #
346
+ # Channel identification transcribes the audio on each channel
347
+ # independently, then appends the output for each channel into one
348
+ # transcript.
349
+ #
350
+ # If you have multi-channel audio and do not enable channel
351
+ # identification, your audio is transcribed in a continuous manner and
352
+ # your transcript is not separated by channel.
353
+ #
354
+ # For more information, see [Transcribing multi-channel audio][1].
297
355
  #
298
- # Amazon Transcribe Medical also produces a transcription of each item.
299
- # An item includes the start time, end time, and any alternative
300
- # transcriptions.
301
356
  #
302
- # You can't set both `ShowSpeakerLabel` and
303
- # `EnableChannelIdentification` in the same request. If you set both,
304
- # your request returns a `BadRequestException`.
357
+ #
358
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/channel-id.html
305
359
  #
306
360
  # @option params [Integer] :number_of_channels
307
- # The number of channels that are in your audio stream.
361
+ # Specify the number of channels in your audio stream. Up to two
362
+ # channels are supported.
308
363
  #
309
364
  # @option params [String] :content_identification_type
310
- # Set this field to `PHI` to identify personal health information in the
311
- # transcription output.
365
+ # Labels all personal health information (PHI) identified in your
366
+ # transcript.
367
+ #
368
+ # Content identification is performed at the segment level; PHI is
369
+ # flagged upon complete transcription of an audio segment.
370
+ #
371
+ # For more information, see [Identifying personal health information
372
+ # (PHI) in a transcription][1].
373
+ #
374
+ #
375
+ #
376
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/phi-id.html
312
377
  #
313
378
  # @return [Types::StartMedicalStreamTranscriptionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
314
379
  #
@@ -421,7 +486,7 @@ module Aws::TranscribeStreamingService
421
486
  # @example Request syntax with placeholder values
422
487
  #
423
488
  # async_resp = async_client.start_medical_stream_transcription({
424
- # language_code: "en-US", # required, accepts en-US, en-GB, es-US, fr-CA, fr-FR, en-AU, it-IT, de-DE, pt-BR, ja-JP, ko-KR, zh-CN
489
+ # language_code: "en-US", # required, accepts en-US, en-GB, es-US, fr-CA, fr-FR, en-AU, it-IT, de-DE, pt-BR, ja-JP, ko-KR, zh-CN, hi-IN, th-TH
425
490
  # media_sample_rate_hertz: 1, # required
426
491
  # media_encoding: "pcm", # required, accepts pcm, ogg-opus, flac
427
492
  # vocabulary_name: "VocabularyName",
@@ -442,7 +507,7 @@ module Aws::TranscribeStreamingService
442
507
  # @example Response structure
443
508
  #
444
509
  # resp.request_id #=> String
445
- # resp.language_code #=> String, one of "en-US", "en-GB", "es-US", "fr-CA", "fr-FR", "en-AU", "it-IT", "de-DE", "pt-BR", "ja-JP", "ko-KR", "zh-CN"
510
+ # resp.language_code #=> String, one of "en-US", "en-GB", "es-US", "fr-CA", "fr-FR", "en-AU", "it-IT", "de-DE", "pt-BR", "ja-JP", "ko-KR", "zh-CN", "hi-IN", "th-TH"
446
511
  # resp.media_sample_rate_hertz #=> Integer
447
512
  # resp.media_encoding #=> String, one of "pcm", "ogg-opus", "flac"
448
513
  # resp.vocabulary_name #=> String
@@ -518,186 +583,338 @@ module Aws::TranscribeStreamingService
518
583
  req = build_request(:start_medical_stream_transcription, params)
519
584
 
520
585
  req.context[:input_event_stream_handler] = input_event_stream_handler
521
- req.handlers.add(Aws::Binary::EncodeHandler, priority: 95)
586
+ req.handlers.add(Aws::Binary::EncodeHandler, priority: 55)
522
587
  req.context[:output_event_stream_handler] = output_event_stream_handler
523
- req.handlers.add(Aws::Binary::DecodeHandler, priority: 95)
588
+ req.handlers.add(Aws::Binary::DecodeHandler, priority: 55)
524
589
 
525
590
  req.send_request(options, &block)
526
591
  end
527
592
 
528
- # Starts a bidirectional HTTP/2 stream where audio is streamed to Amazon
529
- # Transcribe and the transcription results are streamed to your
530
- # application.
593
+ # Starts a bidirectional HTTP/2 or WebSocket stream where audio is
594
+ # streamed to Amazon Transcribe and the transcription results are
595
+ # streamed to your application.
531
596
  #
532
- # The following are encoded as HTTP/2 headers:
597
+ # The following are encoded as headers:
533
598
  #
534
- # * x-amzn-transcribe-language-code
599
+ # * language-code
535
600
  #
536
- # * x-amzn-transcribe-media-encoding
601
+ # * media-encoding
537
602
  #
538
- # * x-amzn-transcribe-sample-rate
603
+ # * sample-rate
539
604
  #
540
- # * x-amzn-transcribe-session-id
605
+ # * session-id
541
606
  #
542
- # See the [ SDK for Go API Reference][1] for more detail.
607
+ # For more information on streaming with Amazon Transcribe, see
608
+ # [Transcribing streaming audio][1].
543
609
  #
544
610
  #
545
611
  #
546
- # [1]: https://docs.aws.amazon.com/sdk-for-go/api/service/transcribestreamingservice/#TranscribeStreamingService.StartStreamTranscription
612
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html
547
613
  #
548
614
  # @option params [String] :language_code
549
- # The language code of the input audio stream.
615
+ # Specify the language code that represents the language spoken in your
616
+ # audio.
617
+ #
618
+ # If you're unsure of the language spoken in your audio, consider using
619
+ # `IdentifyLanguage` to enable automatic language identification.
620
+ #
621
+ # For a list of languages supported with Amazon Transcribe streaming,
622
+ # refer to the [Supported languages][1] table.
623
+ #
624
+ #
625
+ #
626
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html
550
627
  #
551
628
  # @option params [required, Integer] :media_sample_rate_hertz
552
- # The sample rate of the input audio (in Hertz). Low-quality audio, such
629
+ # The sample rate of the input audio (in hertz). Low-quality audio, such
553
630
  # as telephone audio, is typically around 8,000 Hz. High-quality audio
554
631
  # typically ranges from 16,000 Hz to 48,000 Hz. Note that the sample
555
632
  # rate you specify must match that of your audio.
556
633
  #
557
634
  # @option params [required, String] :media_encoding
558
- # The encoding used for the input audio.
635
+ # Specify the encoding used for the input audio. Supported formats are:
636
+ #
637
+ # * FLAC
638
+ #
639
+ # * OPUS-encoded audio in an Ogg container
640
+ #
641
+ # * PCM (only signed 16-bit little-endian audio formats, which does not
642
+ # include WAV)
643
+ #
644
+ # For more information, see [Media formats][1].
645
+ #
646
+ #
647
+ #
648
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/how-input.html#how-input-audio
559
649
  #
560
650
  # @option params [String] :vocabulary_name
561
- # The name of the custom vocabulary you want to use with your
562
- # transcription.
651
+ # Specify the name of the custom vocabulary that you want to use when
652
+ # processing your transcription. Note that vocabulary names are case
653
+ # sensitive.
654
+ #
655
+ # If the language of the specified custom vocabulary doesn't match the
656
+ # language identified in your media, your job fails.
657
+ #
658
+ # This parameter is **not** intended for use with the `IdentifyLanguage`
659
+ # parameter. If you're including `IdentifyLanguage` in your request and
660
+ # want to use one or more custom vocabularies with your transcription,
661
+ # use the `VocabularyNames` parameter instead.
563
662
  #
564
- # This operation is not intended for use in conjunction with the
565
- # `IdentifyLanguage` operation. If you're using `IdentifyLanguage` in
566
- # your request and want to use one or more custom vocabularies with your
567
- # transcription, use the `VocabularyNames` operation instead.
663
+ # For more information, see [Custom vocabularies][1].
664
+ #
665
+ #
666
+ #
667
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/custom-vocabulary.html
568
668
  #
569
669
  # @option params [String] :session_id
570
- # A identifier for the transcription session. Use this parameter when
571
- # you want to retry a session. If you don't provide a session ID,
572
- # Amazon Transcribe will generate one for you and return it in the
573
- # response.
670
+ # Specify a name for your transcription session. If you don't include
671
+ # this parameter in your request, Amazon Transcribe generates an ID and
672
+ # returns it in the response.
673
+ #
674
+ # You can use a session ID to retry a streaming session.
574
675
  #
575
676
  # @option params [String] :vocabulary_filter_name
576
- # The name of the vocabulary filter you want to use with your
577
- # transcription.
677
+ # Specify the name of the custom vocabulary filter that you want to use
678
+ # when processing your transcription. Note that vocabulary filter names
679
+ # are case sensitive.
680
+ #
681
+ # If the language of the specified custom vocabulary filter doesn't
682
+ # match the language identified in your media, your job fails.
683
+ #
684
+ # This parameter is **not** intended for use with the `IdentifyLanguage`
685
+ # parameter. If you're including `IdentifyLanguage` in your request and
686
+ # want to use one or more vocabulary filters with your transcription,
687
+ # use the `VocabularyFilterNames` parameter instead.
578
688
  #
579
- # This operation is not intended for use in conjunction with the
580
- # `IdentifyLanguage` operation. If you're using `IdentifyLanguage` in
581
- # your request and want to use one or more vocabulary filters with your
582
- # transcription, use the `VocabularyFilterNames` operation instead.
689
+ # For more information, see [Using vocabulary filtering with unwanted
690
+ # words][1].
691
+ #
692
+ #
693
+ #
694
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/vocabulary-filtering.html
583
695
  #
584
696
  # @option params [String] :vocabulary_filter_method
585
- # The manner in which you use your vocabulary filter to filter words in
586
- # your transcript. `Remove` removes filtered words from your
587
- # transcription results. `Mask` masks filtered words with a `***` in
588
- # your transcription results. `Tag` keeps the filtered words in your
589
- # transcription results and tags them. The tag appears as
590
- # `VocabularyFilterMatch` equal to `True`.
697
+ # Specify how you want your vocabulary filter applied to your
698
+ # transcript.
699
+ #
700
+ # To replace words with `***`, choose `mask`.
701
+ #
702
+ # To delete words, choose `remove`.
703
+ #
704
+ # To flag words without changing them, choose `tag`.
591
705
  #
592
706
  # @option params [Boolean] :show_speaker_label
593
- # When `true`, enables speaker identification in your media stream.
707
+ # Enables speaker partitioning (diarization) in your transcription
708
+ # output. Speaker partitioning labels the speech from individual
709
+ # speakers in your media file.
710
+ #
711
+ # For more information, see [Partitioning speakers (diarization)][1].
712
+ #
713
+ #
714
+ #
715
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/diarization.html
594
716
  #
595
717
  # @option params [Boolean] :enable_channel_identification
596
- # When `true`, instructs Amazon Transcribe to process each audio channel
597
- # separately, then merges the transcription output of each channel into
598
- # a single transcription.
718
+ # Enables channel identification in multi-channel audio.
719
+ #
720
+ # Channel identification transcribes the audio on each channel
721
+ # independently, then appends the output for each channel into one
722
+ # transcript.
723
+ #
724
+ # If you have multi-channel audio and do not enable channel
725
+ # identification, your audio is transcribed in a continuous manner and
726
+ # your transcript is not separated by channel.
727
+ #
728
+ # For more information, see [Transcribing multi-channel audio][1].
729
+ #
730
+ #
599
731
  #
600
- # Amazon Transcribe also produces a transcription of each item. An item
601
- # includes the start time, end time, and any alternative transcriptions.
732
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/channel-id.html
602
733
  #
603
734
  # @option params [Integer] :number_of_channels
604
- # The number of channels that are in your audio stream.
735
+ # Specify the number of channels in your audio stream. Up to two
736
+ # channels are supported.
605
737
  #
606
738
  # @option params [Boolean] :enable_partial_results_stabilization
607
- # When `true`, instructs Amazon Transcribe to present transcription
608
- # results that have the partial results stabilized. Normally, any word
609
- # or phrase from one partial result can change in a subsequent partial
610
- # result. With partial results stabilization enabled, only the last few
611
- # words of one partial result can change in another partial result.
739
+ # Enables partial result stabilization for your transcription. Partial
740
+ # result stabilization can reduce latency in your output, but may impact
741
+ # accuracy. For more information, see [Partial-result stabilization][1].
742
+ #
743
+ #
744
+ #
745
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html#streaming-partial-result-stabilization
612
746
  #
613
747
  # @option params [String] :partial_results_stability
614
- # You can use this field to set the stability level of the transcription
615
- # results. A higher stability level means that the transcription results
616
- # are less likely to change. Higher stability levels can come with lower
617
- # overall transcription accuracy.
748
+ # Specify the level of stability to use when you enable partial results
749
+ # stabilization (`EnablePartialResultsStabilization`).
750
+ #
751
+ # Low stability provides the highest accuracy. High stability
752
+ # transcribes faster, but with slightly lower accuracy.
753
+ #
754
+ # For more information, see [Partial-result stabilization][1].
755
+ #
756
+ #
757
+ #
758
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html#streaming-partial-result-stabilization
618
759
  #
619
760
  # @option params [String] :content_identification_type
620
- # Set this field to PII to identify personally identifiable information
621
- # (PII) in the transcription output. Content identification is performed
622
- # only upon complete transcription of the audio segments.
761
+ # Labels all personally identifiable information (PII) identified in
762
+ # your transcript.
763
+ #
764
+ # Content identification is performed at the segment level; PII
765
+ # specified in `PiiEntityTypes` is flagged upon complete transcription
766
+ # of an audio segment.
767
+ #
768
+ # You can’t set `ContentIdentificationType` and `ContentRedactionType`
769
+ # in the same request. If you set both, your request returns a
770
+ # `BadRequestException`.
771
+ #
772
+ # For more information, see [Redacting or identifying personally
773
+ # identifiable information][1].
623
774
  #
624
- # You can’t set both `ContentIdentificationType` and
625
- # `ContentRedactionType` in the same request. If you set both, your
626
- # request returns a `BadRequestException`.
775
+ #
776
+ #
777
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/pii-redaction.html
627
778
  #
628
779
  # @option params [String] :content_redaction_type
629
- # Set this field to PII to redact personally identifiable information
630
- # (PII) in the transcription output. Content redaction is performed only
631
- # upon complete transcription of the audio segments.
780
+ # Redacts all personally identifiable information (PII) identified in
781
+ # your transcript.
782
+ #
783
+ # Content redaction is performed at the segment level; PII specified in
784
+ # `PiiEntityTypes` is redacted upon complete transcription of an audio
785
+ # segment.
632
786
  #
633
- # You can’t set both `ContentRedactionType` and
634
- # `ContentIdentificationType` in the same request. If you set both, your
635
- # request returns a `BadRequestException`.
787
+ # You can’t set `ContentRedactionType` and `ContentIdentificationType`
788
+ # in the same request. If you set both, your request returns a
789
+ # `BadRequestException`.
790
+ #
791
+ # For more information, see [Redacting or identifying personally
792
+ # identifiable information][1].
793
+ #
794
+ #
795
+ #
796
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/pii-redaction.html
636
797
  #
637
798
  # @option params [String] :pii_entity_types
638
- # List the PII entity types you want to identify or redact. In order to
639
- # specify entity types, you must have either `ContentIdentificationType`
640
- # or `ContentRedactionType` enabled.
799
+ # Specify which types of personally identifiable information (PII) you
800
+ # want to redact in your transcript. You can include as many types as
801
+ # you'd like, or you can select `ALL`.
641
802
  #
642
- # `PIIEntityTypes` must be comma-separated; the available values are:
643
- # `BANK_ACCOUNT_NUMBER`, `BANK_ROUTING`, `CREDIT_DEBIT_NUMBER`,
644
- # `CREDIT_DEBIT_CVV`, `CREDIT_DEBIT_EXPIRY`, `PIN`, `EMAIL`, `ADDRESS`,
645
- # `NAME`, `PHONE`, `SSN`, and `ALL`.
803
+ # To include `PiiEntityTypes` in your request, you must also include
804
+ # either `ContentIdentificationType` or `ContentRedactionType`.
646
805
  #
647
- # `PiiEntityTypes` is an optional parameter with a default value of
648
- # `ALL`.
806
+ # Values must be comma-separated and can include: `BANK_ACCOUNT_NUMBER`,
807
+ # `BANK_ROUTING`, `CREDIT_DEBIT_NUMBER`, `CREDIT_DEBIT_CVV`,
808
+ # `CREDIT_DEBIT_EXPIRY`, `PIN`, `EMAIL`, `ADDRESS`, `NAME`, `PHONE`,
809
+ # `SSN`, or `ALL`.
649
810
  #
650
811
  # @option params [String] :language_model_name
651
- # The name of the language model you want to use.
812
+ # Specify the name of the custom language model that you want to use
813
+ # when processing your transcription. Note that language model names are
814
+ # case sensitive.
815
+ #
816
+ # The language of the specified language model must match the language
817
+ # code you specify in your transcription request. If the languages
818
+ # don't match, the language model isn't applied. There are no errors
819
+ # or warnings associated with a language mismatch.
820
+ #
821
+ # For more information, see [Custom language models][1].
822
+ #
823
+ #
824
+ #
825
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/custom-language-models.html
652
826
  #
653
827
  # @option params [Boolean] :identify_language
654
- # Optional. Set this value to `true` to enable language identification
655
- # for your media stream.
828
+ # Enables automatic language identification for your transcription.
829
+ #
830
+ # If you include `IdentifyLanguage`, you can optionally include a list
831
+ # of language codes, using `LanguageOptions`, that you think may be
832
+ # present in your audio stream. Including language options can improve
833
+ # transcription accuracy.
834
+ #
835
+ # You can also include a preferred language using `PreferredLanguage`.
836
+ # Adding a preferred language can help Amazon Transcribe identify the
837
+ # language faster than if you omit this parameter.
838
+ #
839
+ # If you have multi-channel audio that contains different languages on
840
+ # each channel, and you've enabled channel identification, automatic
841
+ # language identification identifies the dominant language on each audio
842
+ # channel.
843
+ #
844
+ # Note that you must include either `LanguageCode` or `IdentifyLanguage`
845
+ # in your request. If you include both parameters, your request fails.
846
+ #
847
+ # Streaming language identification can't be combined with custom
848
+ # language models or redaction.
656
849
  #
657
850
  # @option params [String] :language_options
658
- # An object containing a list of languages that might be present in your
659
- # audio.
851
+ # Specify two or more language codes that represent the languages you
852
+ # think may be present in your media; including more than five is not
853
+ # recommended. If you're unsure what languages are present, do not
854
+ # include this parameter.
855
+ #
856
+ # Including language options can improve the accuracy of language
857
+ # identification.
858
+ #
859
+ # If you include `LanguageOptions` in your request, you must also
860
+ # include `IdentifyLanguage`.
861
+ #
862
+ # For a list of languages supported with Amazon Transcribe streaming,
863
+ # refer to the [Supported languages][1] table.
660
864
  #
661
- # You must provide two or more language codes to help Amazon Transcribe
662
- # identify the correct language of your media stream with the highest
663
- # possible accuracy. You can only select one variant per language; for
664
- # example, you can't include both `en-US` and `en-UK` in the same
665
- # request.
865
+ # You can only include one language dialect per language per stream. For
866
+ # example, you cannot include `en-US` and `en-AU` in the same request.
666
867
  #
667
- # You can only use this parameter if you've set `IdentifyLanguage` to
668
- # `true`in your request.
868
+ #
869
+ #
870
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html
669
871
  #
670
872
  # @option params [String] :preferred_language
671
- # Optional. From the subset of languages codes you provided for
672
- # `LanguageOptions`, you can select one preferred language for your
673
- # transcription.
873
+ # Specify a preferred language from the subset of languages codes you
874
+ # specified in `LanguageOptions`.
674
875
  #
675
- # You can only use this parameter if you've set `IdentifyLanguage` to
676
- # `true`in your request.
876
+ # You can only use this parameter if you've included `IdentifyLanguage`
877
+ # and `LanguageOptions` in your request.
677
878
  #
678
879
  # @option params [String] :vocabulary_names
679
- # The names of the custom vocabularies you want to use with your
680
- # transcription.
880
+ # Specify the names of the custom vocabularies that you want to use when
881
+ # processing your transcription. Note that vocabulary names are case
882
+ # sensitive.
883
+ #
884
+ # If none of the languages of the specified custom vocabularies match
885
+ # the language identified in your media, your job fails.
681
886
  #
682
- # Note that if the custom vocabularies you specify are in languages that
683
- # don't match the language identified in your media, your job fails.
887
+ # This parameter is only intended for use **with** the
888
+ # `IdentifyLanguage` parameter. If you're **not** including
889
+ # `IdentifyLanguage` in your request and want to use a custom vocabulary
890
+ # with your transcription, use the `VocabularyName` parameter instead.
684
891
  #
685
- # This operation is only intended for use in conjunction with the
686
- # `IdentifyLanguage` operation. If you're not using `IdentifyLanguage`
687
- # in your request and want to use a custom vocabulary with your
688
- # transcription, use the `VocabularyName` operation instead.
892
+ # For more information, see [Custom vocabularies][1].
893
+ #
894
+ #
895
+ #
896
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/custom-vocabulary.html
689
897
  #
690
898
  # @option params [String] :vocabulary_filter_names
691
- # The names of the vocabulary filters you want to use with your
692
- # transcription.
899
+ # Specify the names of the custom vocabulary filters that you want to
900
+ # use when processing your transcription. Note that vocabulary filter
901
+ # names are case sensitive.
902
+ #
903
+ # If none of the languages of the specified custom vocabulary filters
904
+ # match the language identified in your media, your job fails.
905
+ #
906
+ # This parameter is only intended for use **with** the
907
+ # `IdentifyLanguage` parameter. If you're **not** including
908
+ # `IdentifyLanguage` in your request and want to use a custom vocabulary
909
+ # filter with your transcription, use the `VocabularyFilterName`
910
+ # parameter instead.
911
+ #
912
+ # For more information, see [Using vocabulary filtering with unwanted
913
+ # words][1].
914
+ #
693
915
  #
694
- # Note that if the vocabulary filters you specify are in languages that
695
- # don't match the language identified in your media, your job fails.
696
916
  #
697
- # This operation is only intended for use in conjunction with the
698
- # `IdentifyLanguage` operation. If you're not using `IdentifyLanguage`
699
- # in your request and want to use a vocabulary filter with your
700
- # transcription, use the `VocabularyFilterName` operation instead.
917
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/vocabulary-filtering.html
701
918
  #
702
919
  # @return [Types::StartStreamTranscriptionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
703
920
  #
@@ -820,7 +1037,7 @@ module Aws::TranscribeStreamingService
820
1037
  # @example Request syntax with placeholder values
821
1038
  #
822
1039
  # async_resp = async_client.start_stream_transcription({
823
- # language_code: "en-US", # accepts en-US, en-GB, es-US, fr-CA, fr-FR, en-AU, it-IT, de-DE, pt-BR, ja-JP, ko-KR, zh-CN
1040
+ # language_code: "en-US", # accepts en-US, en-GB, es-US, fr-CA, fr-FR, en-AU, it-IT, de-DE, pt-BR, ja-JP, ko-KR, zh-CN, hi-IN, th-TH
824
1041
  # media_sample_rate_hertz: 1, # required
825
1042
  # media_encoding: "pcm", # required, accepts pcm, ogg-opus, flac
826
1043
  # vocabulary_name: "VocabularyName",
@@ -839,7 +1056,7 @@ module Aws::TranscribeStreamingService
839
1056
  # language_model_name: "ModelName",
840
1057
  # identify_language: false,
841
1058
  # language_options: "LanguageOptions",
842
- # preferred_language: "en-US", # accepts en-US, en-GB, es-US, fr-CA, fr-FR, en-AU, it-IT, de-DE, pt-BR, ja-JP, ko-KR, zh-CN
1059
+ # preferred_language: "en-US", # accepts en-US, en-GB, es-US, fr-CA, fr-FR, en-AU, it-IT, de-DE, pt-BR, ja-JP, ko-KR, zh-CN, hi-IN, th-TH
843
1060
  # vocabulary_names: "VocabularyNames",
844
1061
  # vocabulary_filter_names: "VocabularyFilterNames",
845
1062
  # })
@@ -851,7 +1068,7 @@ module Aws::TranscribeStreamingService
851
1068
  # @example Response structure
852
1069
  #
853
1070
  # resp.request_id #=> String
854
- # resp.language_code #=> String, one of "en-US", "en-GB", "es-US", "fr-CA", "fr-FR", "en-AU", "it-IT", "de-DE", "pt-BR", "ja-JP", "ko-KR", "zh-CN"
1071
+ # resp.language_code #=> String, one of "en-US", "en-GB", "es-US", "fr-CA", "fr-FR", "en-AU", "it-IT", "de-DE", "pt-BR", "ja-JP", "ko-KR", "zh-CN", "hi-IN", "th-TH"
855
1072
  # resp.media_sample_rate_hertz #=> Integer
856
1073
  # resp.media_encoding #=> String, one of "pcm", "ogg-opus", "flac"
857
1074
  # resp.vocabulary_name #=> String
@@ -885,9 +1102,9 @@ module Aws::TranscribeStreamingService
885
1102
  # event.transcript.results[0].alternatives[0].entities[0].content #=> String
886
1103
  # event.transcript.results[0].alternatives[0].entities[0].confidence #=> Float
887
1104
  # event.transcript.results[0].channel_id #=> String
888
- # event.transcript.results[0].language_code #=> String, one of "en-US", "en-GB", "es-US", "fr-CA", "fr-FR", "en-AU", "it-IT", "de-DE", "pt-BR", "ja-JP", "ko-KR", "zh-CN"
1105
+ # event.transcript.results[0].language_code #=> String, one of "en-US", "en-GB", "es-US", "fr-CA", "fr-FR", "en-AU", "it-IT", "de-DE", "pt-BR", "ja-JP", "ko-KR", "zh-CN", "hi-IN", "th-TH"
889
1106
  # event.transcript.results[0].language_identification #=> Array
890
- # event.transcript.results[0].language_identification[0].language_code #=> String, one of "en-US", "en-GB", "es-US", "fr-CA", "fr-FR", "en-AU", "it-IT", "de-DE", "pt-BR", "ja-JP", "ko-KR", "zh-CN"
1107
+ # event.transcript.results[0].language_identification[0].language_code #=> String, one of "en-US", "en-GB", "es-US", "fr-CA", "fr-FR", "en-AU", "it-IT", "de-DE", "pt-BR", "ja-JP", "ko-KR", "zh-CN", "hi-IN", "th-TH"
891
1108
  # event.transcript.results[0].language_identification[0].score #=> Float
892
1109
  #
893
1110
  # For :bad_request_exception event available at #on_bad_request_exception_event callback and response eventstream enumerator:
@@ -918,7 +1135,7 @@ module Aws::TranscribeStreamingService
918
1135
  # resp.language_model_name #=> String
919
1136
  # resp.identify_language #=> Boolean
920
1137
  # resp.language_options #=> String
921
- # resp.preferred_language #=> String, one of "en-US", "en-GB", "es-US", "fr-CA", "fr-FR", "en-AU", "it-IT", "de-DE", "pt-BR", "ja-JP", "ko-KR", "zh-CN"
1138
+ # resp.preferred_language #=> String, one of "en-US", "en-GB", "es-US", "fr-CA", "fr-FR", "en-AU", "it-IT", "de-DE", "pt-BR", "ja-JP", "ko-KR", "zh-CN", "hi-IN", "th-TH"
922
1139
  # resp.vocabulary_names #=> String
923
1140
  # resp.vocabulary_filter_names #=> String
924
1141
  #
@@ -944,9 +1161,9 @@ module Aws::TranscribeStreamingService
944
1161
  req = build_request(:start_stream_transcription, params)
945
1162
 
946
1163
  req.context[:input_event_stream_handler] = input_event_stream_handler
947
- req.handlers.add(Aws::Binary::EncodeHandler, priority: 95)
1164
+ req.handlers.add(Aws::Binary::EncodeHandler, priority: 55)
948
1165
  req.context[:output_event_stream_handler] = output_event_stream_handler
949
- req.handlers.add(Aws::Binary::DecodeHandler, priority: 95)
1166
+ req.handlers.add(Aws::Binary::DecodeHandler, priority: 55)
950
1167
 
951
1168
  req.send_request(options, &block)
952
1169
  end
@@ -965,7 +1182,7 @@ module Aws::TranscribeStreamingService
965
1182
  http_response: Seahorse::Client::Http::AsyncResponse.new,
966
1183
  config: config)
967
1184
  context[:gem_name] = 'aws-sdk-transcribestreamingservice'
968
- context[:gem_version] = '1.42.0'
1185
+ context[:gem_version] = '1.44.0'
969
1186
  Seahorse::Client::Request.new(handlers, context)
970
1187
  end
971
1188