aws-sdk-transcribestreamingservice 1.43.0 → 1.45.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -271,61 +271,552 @@ module Aws::TranscribeStreamingService
271
271
 
272
272
  # @!group API Operations
273
273
 
274
- # Starts a bidirectional HTTP/2 stream where audio is streamed to Amazon
275
- # Transcribe Medical and the transcription results are streamed to your
276
- # application.
274
+ # Starts a bidirectional HTTP/2 or WebSocket stream where audio is
275
+ # streamed to Amazon Transcribe and the transcription results are
276
+ # streamed to your application. Use this operation for [Call
277
+ # Analytics][1] transcriptions.
278
+ #
279
+ # The following parameters are required:
280
+ #
281
+ # * `language-code`
282
+ #
283
+ # * `media-encoding`
284
+ #
285
+ # * `sample-rate`
286
+ #
287
+ # For more information on streaming with Amazon Transcribe, see
288
+ # [Transcribing streaming audio][2].
289
+ #
290
+ #
291
+ #
292
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/call-analytics.html
293
+ # [2]: https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html
294
+ #
295
+ # @option params [required, String] :language_code
296
+ # Specify the language code that represents the language spoken in your
297
+ # audio.
298
+ #
299
+ # If you're unsure of the language spoken in your audio, consider using
300
+ # `IdentifyLanguage` to enable automatic language identification.
301
+ #
302
+ # For a list of languages supported with streaming Call Analytics, refer
303
+ # to the [Supported languages][1] table.
304
+ #
305
+ #
306
+ #
307
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html
308
+ #
309
+ # @option params [required, Integer] :media_sample_rate_hertz
310
+ # The sample rate of the input audio (in hertz). Low-quality audio, such
311
+ # as telephone audio, is typically around 8,000 Hz. High-quality audio
312
+ # typically ranges from 16,000 Hz to 48,000 Hz. Note that the sample
313
+ # rate you specify must match that of your audio.
314
+ #
315
+ # @option params [required, String] :media_encoding
316
+ # Specify the encoding of your input audio. Supported formats are:
317
+ #
318
+ # * FLAC
319
+ #
320
+ # * OPUS-encoded audio in an Ogg container
321
+ #
322
+ # * PCM (only signed 16-bit little-endian audio formats, which does not
323
+ # include WAV)
324
+ #
325
+ # For more information, see [Media formats][1].
326
+ #
327
+ #
328
+ #
329
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/how-input.html#how-input-audio
330
+ #
331
+ # @option params [String] :vocabulary_name
332
+ # Specify the name of the custom vocabulary that you want to use when
333
+ # processing your transcription. Note that vocabulary names are case
334
+ # sensitive.
335
+ #
336
+ # If the language of the specified custom vocabulary doesn't match the
337
+ # language identified in your media, the custom vocabulary is not
338
+ # applied to your transcription.
339
+ #
340
+ # For more information, see [Custom vocabularies][1].
341
+ #
342
+ #
343
+ #
344
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/custom-vocabulary.html
345
+ #
346
+ # @option params [String] :session_id
347
+ # Specify a name for your Call Analytics transcription session. If you
348
+ # don't include this parameter in your request, Amazon Transcribe
349
+ # generates an ID and returns it in the response.
350
+ #
351
+ # You can use a session ID to retry a streaming session.
352
+ #
353
+ # @option params [String] :vocabulary_filter_name
354
+ # Specify the name of the custom vocabulary filter that you want to use
355
+ # when processing your transcription. Note that vocabulary filter names
356
+ # are case sensitive.
357
+ #
358
+ # If the language of the specified custom vocabulary filter doesn't
359
+ # match the language identified in your media, the vocabulary filter is
360
+ # not applied to your transcription.
361
+ #
362
+ # For more information, see [Using vocabulary filtering with unwanted
363
+ # words][1].
364
+ #
365
+ #
366
+ #
367
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/vocabulary-filtering.html
368
+ #
369
+ # @option params [String] :vocabulary_filter_method
370
+ # Specify how you want your vocabulary filter applied to your
371
+ # transcript.
372
+ #
373
+ # To replace words with `***`, choose `mask`.
374
+ #
375
+ # To delete words, choose `remove`.
376
+ #
377
+ # To flag words without changing them, choose `tag`.
378
+ #
379
+ # @option params [String] :language_model_name
380
+ # Specify the name of the custom language model that you want to use
381
+ # when processing your transcription. Note that language model names are
382
+ # case sensitive.
383
+ #
384
+ # The language of the specified language model must match the language
385
+ # code you specify in your transcription request. If the languages
386
+ # don't match, the custom language model isn't applied. There are no
387
+ # errors or warnings associated with a language mismatch.
388
+ #
389
+ # For more information, see [Custom language models][1].
390
+ #
391
+ #
392
+ #
393
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/custom-language-models.html
394
+ #
395
+ # @option params [Boolean] :enable_partial_results_stabilization
396
+ # Enables partial result stabilization for your transcription. Partial
397
+ # result stabilization can reduce latency in your output, but may impact
398
+ # accuracy. For more information, see [Partial-result stabilization][1].
399
+ #
400
+ #
401
+ #
402
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html#streaming-partial-result-stabilization
403
+ #
404
+ # @option params [String] :partial_results_stability
405
+ # Specify the level of stability to use when you enable partial results
406
+ # stabilization (`EnablePartialResultsStabilization`).
407
+ #
408
+ # Low stability provides the highest accuracy. High stability
409
+ # transcribes faster, but with slightly lower accuracy.
410
+ #
411
+ # For more information, see [Partial-result stabilization][1].
412
+ #
413
+ #
414
+ #
415
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html#streaming-partial-result-stabilization
416
+ #
417
+ # @option params [String] :content_identification_type
418
+ # Labels all personally identifiable information (PII) identified in
419
+ # your transcript.
420
+ #
421
+ # Content identification is performed at the segment level; PII
422
+ # specified in `PiiEntityTypes` is flagged upon complete transcription
423
+ # of an audio segment.
424
+ #
425
+ # You can’t set `ContentIdentificationType` and `ContentRedactionType`
426
+ # in the same request. If you set both, your request returns a
427
+ # `BadRequestException`.
428
+ #
429
+ # For more information, see [Redacting or identifying personally
430
+ # identifiable information][1].
431
+ #
432
+ #
433
+ #
434
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/pii-redaction.html
435
+ #
436
+ # @option params [String] :content_redaction_type
437
+ # Redacts all personally identifiable information (PII) identified in
438
+ # your transcript.
439
+ #
440
+ # Content redaction is performed at the segment level; PII specified in
441
+ # `PiiEntityTypes` is redacted upon complete transcription of an audio
442
+ # segment.
443
+ #
444
+ # You can’t set `ContentRedactionType` and `ContentIdentificationType`
445
+ # in the same request. If you set both, your request returns a
446
+ # `BadRequestException`.
447
+ #
448
+ # For more information, see [Redacting or identifying personally
449
+ # identifiable information][1].
450
+ #
451
+ #
452
+ #
453
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/pii-redaction.html
454
+ #
455
+ # @option params [String] :pii_entity_types
456
+ # Specify which types of personally identifiable information (PII) you
457
+ # want to redact in your transcript. You can include as many types as
458
+ # you'd like, or you can select `ALL`.
459
+ #
460
+ # To include `PiiEntityTypes` in your Call Analytics request, you must
461
+ # also include either `ContentIdentificationType` or
462
+ # `ContentRedactionType`.
463
+ #
464
+ # Values must be comma-separated and can include: `BANK_ACCOUNT_NUMBER`,
465
+ # `BANK_ROUTING`, `CREDIT_DEBIT_NUMBER`, `CREDIT_DEBIT_CVV`,
466
+ # `CREDIT_DEBIT_EXPIRY`, `PIN`, `EMAIL`, `ADDRESS`, `NAME`, `PHONE`,
467
+ # `SSN`, or `ALL`.
468
+ #
469
+ # @return [Types::StartCallAnalyticsStreamTranscriptionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
470
+ #
471
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#request_id #request_id} => String
472
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#language_code #language_code} => String
473
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#media_sample_rate_hertz #media_sample_rate_hertz} => Integer
474
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#media_encoding #media_encoding} => String
475
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#vocabulary_name #vocabulary_name} => String
476
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#session_id #session_id} => String
477
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#call_analytics_transcript_result_stream #call_analytics_transcript_result_stream} => Types::CallAnalyticsTranscriptResultStream
478
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#vocabulary_filter_name #vocabulary_filter_name} => String
479
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#vocabulary_filter_method #vocabulary_filter_method} => String
480
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#language_model_name #language_model_name} => String
481
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#enable_partial_results_stabilization #enable_partial_results_stabilization} => Boolean
482
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#partial_results_stability #partial_results_stability} => String
483
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#content_identification_type #content_identification_type} => String
484
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#content_redaction_type #content_redaction_type} => String
485
+ # * {Types::StartCallAnalyticsStreamTranscriptionResponse#pii_entity_types #pii_entity_types} => String
486
+ #
487
+ # @example Bi-directional EventStream Operation Example
488
+ #
489
+ # You can signal input events after initial request is
490
+ # established, events will be sent to stream
491
+ # immediately (once stream connection is established successfully).
492
+ #
493
+ # To signal events, you can call #signal methods from an Aws::TranscribeStreamingService::EventStreams::AudioStream object.
494
+ # Make sure signal events before calling #wait or #join! at async response.
495
+ #
496
+ # input_stream = Aws::TranscribeStreamingService::EventStreams::AudioStream.new
497
+ #
498
+ # async_resp = client.start_call_analytics_stream_transcription( # params input,
499
+ # input_event_stream_handler: input_stream) do |out_stream|
500
+ #
501
+ # # register callbacks for events arrival
502
+ # out_stream.on_utterance_event_event do |event|
503
+ # event # => Aws::TranscribeStreamingService::Types::UtteranceEvent
504
+ # end
505
+ # out_stream.on_category_event_event do |event|
506
+ # event # => Aws::TranscribeStreamingService::Types::CategoryEvent
507
+ # end
508
+ # out_stream.on_bad_request_exception_event do |event|
509
+ # event # => Aws::TranscribeStreamingService::Types::BadRequestException
510
+ # end
511
+ # out_stream.on_limit_exceeded_exception_event do |event|
512
+ # event # => Aws::TranscribeStreamingService::Types::LimitExceededException
513
+ # end
514
+ # out_stream.on_internal_failure_exception_event do |event|
515
+ # event # => Aws::TranscribeStreamingService::Types::InternalFailureException
516
+ # end
517
+ # out_stream.on_conflict_exception_event do |event|
518
+ # event # => Aws::TranscribeStreamingService::Types::ConflictException
519
+ # end
520
+ # out_stream.on_service_unavailable_exception_event do |event|
521
+ # event # => Aws::TranscribeStreamingService::Types::ServiceUnavailableException
522
+ # end
523
+ #
524
+ # end
525
+ # # => returns Aws::Seahorse::Client::AsyncResponse
526
+ #
527
+ # # signal events
528
+ # input_stream.signal_audio_event_event( ... )
529
+ # input_stream.signal_configuration_event_event( ... )
530
+ #
531
+ # # make sure signaling :end_stream in the end
532
+ # input_stream.signal_end_stream
533
+ #
534
+ # # wait until stream is closed before finalizing sync response
535
+ # resp = async_resp.wait
536
+ # # Or close stream and finalizing sync response immediately
537
+ # # resp = async_resp.join!
538
+ #
539
+ # Inorder to streamingly processing events received, you can also provide an Aws::TranscribeStreamingService::EventStreams::CallAnalyticsTranscriptResultStream
540
+ # object to register callbacks before initializing request instead of processing from request block
541
+ #
542
+ # output_stream = Aws::TranscribeStreamingService::EventStreams::CallAnalyticsTranscriptResultStream.new
543
+ # # register callbacks for events arrival
544
+ # output_stream.on_utterance_event_event do |event|
545
+ # event # => Aws::TranscribeStreamingService::Types::UtteranceEvent
546
+ # end
547
+ # output_stream.on_category_event_event do |event|
548
+ # event # => Aws::TranscribeStreamingService::Types::CategoryEvent
549
+ # end
550
+ # output_stream.on_bad_request_exception_event do |event|
551
+ # event # => Aws::TranscribeStreamingService::Types::BadRequestException
552
+ # end
553
+ # output_stream.on_limit_exceeded_exception_event do |event|
554
+ # event # => Aws::TranscribeStreamingService::Types::LimitExceededException
555
+ # end
556
+ # output_stream.on_internal_failure_exception_event do |event|
557
+ # event # => Aws::TranscribeStreamingService::Types::InternalFailureException
558
+ # end
559
+ # output_stream.on_conflict_exception_event do |event|
560
+ # event # => Aws::TranscribeStreamingService::Types::ConflictException
561
+ # end
562
+ # output_stream.on_service_unavailable_exception_event do |event|
563
+ # event # => Aws::TranscribeStreamingService::Types::ServiceUnavailableException
564
+ # end
565
+ # output_stream.on_error_event do |event|
566
+ # # catch unmodeled error event in the stream
567
+ # raise event
568
+ # # => Aws::Errors::EventError
569
+ # # event.event_type => :error
570
+ # # event.error_code => String
571
+ # # event.error_message => String
572
+ # end
573
+ #
574
+ # async_resp = client.start_call_analytics_stream_transcription ( #params input,
575
+ # input_event_stream_handler: input_stream
576
+ # output_event_stream_handler: output_stream
577
+ # )
578
+ #
579
+ # resp = async_resp.wait!
580
+ #
581
+ # Besides above usage patterns for process events when they arrive immediately, you can also
582
+ # iterate through events after response complete.
583
+ #
584
+ # Events are available at resp.call_analytics_transcript_result_stream # => Enumerator
585
+ #
586
+ # @example Request syntax with placeholder values
587
+ #
588
+ # async_resp = async_client.start_call_analytics_stream_transcription({
589
+ # language_code: "en-US", # required, accepts en-US, en-GB, es-US, fr-CA, fr-FR, en-AU, it-IT, de-DE, pt-BR
590
+ # media_sample_rate_hertz: 1, # required
591
+ # media_encoding: "pcm", # required, accepts pcm, ogg-opus, flac
592
+ # vocabulary_name: "VocabularyName",
593
+ # session_id: "SessionId",
594
+ # input_event_stream_hander: EventStreams::AudioStream.new,
595
+ # vocabulary_filter_name: "VocabularyFilterName",
596
+ # vocabulary_filter_method: "remove", # accepts remove, mask, tag
597
+ # language_model_name: "ModelName",
598
+ # enable_partial_results_stabilization: false,
599
+ # partial_results_stability: "high", # accepts high, medium, low
600
+ # content_identification_type: "PII", # accepts PII
601
+ # content_redaction_type: "PII", # accepts PII
602
+ # pii_entity_types: "PiiEntityTypes",
603
+ # })
604
+ # # => Seahorse::Client::AsyncResponse
605
+ # async_resp.wait
606
+ # # => Seahorse::Client::Response
607
+ # # Or use async_resp.join!
608
+ #
609
+ # @example Response structure
610
+ #
611
+ # resp.request_id #=> String
612
+ # resp.language_code #=> String, one of "en-US", "en-GB", "es-US", "fr-CA", "fr-FR", "en-AU", "it-IT", "de-DE", "pt-BR"
613
+ # resp.media_sample_rate_hertz #=> Integer
614
+ # resp.media_encoding #=> String, one of "pcm", "ogg-opus", "flac"
615
+ # resp.vocabulary_name #=> String
616
+ # resp.session_id #=> String
617
+ # All events are available at resp.call_analytics_transcript_result_stream:
618
+ # resp.call_analytics_transcript_result_stream #=> Enumerator
619
+ # resp.call_analytics_transcript_result_stream.event_types #=> [:utterance_event, :category_event, :bad_request_exception, :limit_exceeded_exception, :internal_failure_exception, :conflict_exception, :service_unavailable_exception]
620
+ #
621
+ # For :utterance_event event available at #on_utterance_event_event callback and response eventstream enumerator:
622
+ # event.utterance_id #=> String
623
+ # event.is_partial #=> Boolean
624
+ # event.participant_role #=> String, one of "AGENT", "CUSTOMER"
625
+ # event.begin_offset_millis #=> Integer
626
+ # event.end_offset_millis #=> Integer
627
+ # event.transcript #=> String
628
+ # event.items #=> Array
629
+ # event.items[0].begin_offset_millis #=> Integer
630
+ # event.items[0].end_offset_millis #=> Integer
631
+ # event.items[0].type #=> String, one of "pronunciation", "punctuation"
632
+ # event.items[0].content #=> String
633
+ # event.items[0].confidence #=> Float
634
+ # event.items[0].vocabulary_filter_match #=> Boolean
635
+ # event.items[0].stable #=> Boolean
636
+ # event.entities #=> Array
637
+ # event.entities[0].begin_offset_millis #=> Integer
638
+ # event.entities[0].end_offset_millis #=> Integer
639
+ # event.entities[0].category #=> String
640
+ # event.entities[0].type #=> String
641
+ # event.entities[0].content #=> String
642
+ # event.entities[0].confidence #=> Float
643
+ # event.sentiment #=> String, one of "POSITIVE", "NEGATIVE", "MIXED", "NEUTRAL"
644
+ # event.issues_detected #=> Array
645
+ # event.issues_detected[0].character_offsets.begin #=> Integer
646
+ # event.issues_detected[0].character_offsets.end #=> Integer
647
+ #
648
+ # For :category_event event available at #on_category_event_event callback and response eventstream enumerator:
649
+ # event.matched_categories #=> Array
650
+ # event.matched_categories[0] #=> String
651
+ # event.matched_details #=> Hash
652
+ # event.matched_details["String"].timestamp_ranges #=> Array
653
+ # event.matched_details["String"].timestamp_ranges[0].begin_offset_millis #=> Integer
654
+ # event.matched_details["String"].timestamp_ranges[0].end_offset_millis #=> Integer
655
+ #
656
+ # For :bad_request_exception event available at #on_bad_request_exception_event callback and response eventstream enumerator:
657
+ # event.message #=> String
658
+ #
659
+ # For :limit_exceeded_exception event available at #on_limit_exceeded_exception_event callback and response eventstream enumerator:
660
+ # event.message #=> String
661
+ #
662
+ # For :internal_failure_exception event available at #on_internal_failure_exception_event callback and response eventstream enumerator:
663
+ # event.message #=> String
664
+ #
665
+ # For :conflict_exception event available at #on_conflict_exception_event callback and response eventstream enumerator:
666
+ # event.message #=> String
667
+ #
668
+ # For :service_unavailable_exception event available at #on_service_unavailable_exception_event callback and response eventstream enumerator:
669
+ # event.message #=> String
670
+ #
671
+ # resp.vocabulary_filter_name #=> String
672
+ # resp.vocabulary_filter_method #=> String, one of "remove", "mask", "tag"
673
+ # resp.language_model_name #=> String
674
+ # resp.enable_partial_results_stabilization #=> Boolean
675
+ # resp.partial_results_stability #=> String, one of "high", "medium", "low"
676
+ # resp.content_identification_type #=> String, one of "PII"
677
+ # resp.content_redaction_type #=> String, one of "PII"
678
+ # resp.pii_entity_types #=> String
679
+ #
680
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/StartCallAnalyticsStreamTranscription AWS API Documentation
681
+ #
682
+ # @overload start_call_analytics_stream_transcription(params = {})
683
+ # @param [Hash] params ({})
684
+ def start_call_analytics_stream_transcription(params = {}, options = {}, &block)
685
+ params = params.dup
686
+ input_event_stream_handler = _event_stream_handler(
687
+ :input,
688
+ params.delete(:input_event_stream_handler),
689
+ EventStreams::AudioStream
690
+ )
691
+ output_event_stream_handler = _event_stream_handler(
692
+ :output,
693
+ params.delete(:output_event_stream_handler) || params.delete(:event_stream_handler),
694
+ EventStreams::CallAnalyticsTranscriptResultStream
695
+ )
696
+
697
+ yield(output_event_stream_handler) if block_given?
698
+
699
+ req = build_request(:start_call_analytics_stream_transcription, params)
700
+
701
+ req.context[:input_event_stream_handler] = input_event_stream_handler
702
+ req.handlers.add(Aws::Binary::EncodeHandler, priority: 55)
703
+ req.context[:output_event_stream_handler] = output_event_stream_handler
704
+ req.handlers.add(Aws::Binary::DecodeHandler, priority: 55)
705
+
706
+ req.send_request(options, &block)
707
+ end
708
+
709
+ # Starts a bidirectional HTTP/2 or WebSocket stream where audio is
710
+ # streamed to Amazon Transcribe Medical and the transcription results
711
+ # are streamed to your application.
712
+ #
713
+ # The following parameters are required:
714
+ #
715
+ # * `language-code`
716
+ #
717
+ # * `media-encoding`
718
+ #
719
+ # * `sample-rate`
720
+ #
721
+ # For more information on streaming with Amazon Transcribe Medical, see
722
+ # [Transcribing streaming audio][1].
723
+ #
724
+ #
725
+ #
726
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html
277
727
  #
278
728
  # @option params [required, String] :language_code
279
- # Indicates the source language used in the input audio stream. For
280
- # Amazon Transcribe Medical, this is US English (en-US).
729
+ # Specify the language code that represents the language spoken in your
730
+ # audio.
731
+ #
732
+ # Amazon Transcribe Medical only supports US English (`en-US`).
281
733
  #
282
734
  # @option params [required, Integer] :media_sample_rate_hertz
283
- # The sample rate of the input audio (in Hertz). Amazon Transcribe
284
- # medical supports a range from 16,000 Hz to 48,000 Hz. Note that the
735
+ # The sample rate of the input audio (in hertz). Amazon Transcribe
736
+ # Medical supports a range from 16,000 Hz to 48,000 Hz. Note that the
285
737
  # sample rate you specify must match that of your audio.
286
738
  #
287
739
  # @option params [required, String] :media_encoding
288
- # The encoding used for the input audio.
740
+ # Specify the encoding used for the input audio. Supported formats are:
741
+ #
742
+ # * FLAC
743
+ #
744
+ # * OPUS-encoded audio in an Ogg container
745
+ #
746
+ # * PCM (only signed 16-bit little-endian audio formats, which does not
747
+ # include WAV)
748
+ #
749
+ # For more information, see [Media formats][1].
750
+ #
751
+ #
752
+ #
753
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/how-input.html#how-input-audio
289
754
  #
290
755
  # @option params [String] :vocabulary_name
291
- # The name of the medical custom vocabulary to use when processing the
292
- # real-time stream.
756
+ # Specify the name of the custom vocabulary that you want to use when
757
+ # processing your transcription. Note that vocabulary names are case
758
+ # sensitive.
293
759
  #
294
760
  # @option params [required, String] :specialty
295
- # The medical specialty of the clinician or provider.
761
+ # Specify the medical specialty contained in your audio.
296
762
  #
297
763
  # @option params [required, String] :type
298
- # The type of input audio. Choose `DICTATION` for a provider dictating
299
- # patient notes. Choose `CONVERSATION` for a dialogue between a patient
300
- # and one or more medical professionanls.
764
+ # Specify the type of input audio. For example, choose `DICTATION` for a
765
+ # provider dictating patient notes and `CONVERSATION` for a dialogue
766
+ # between a patient and a medical professional.
301
767
  #
302
768
  # @option params [Boolean] :show_speaker_label
303
- # When `true`, enables speaker identification in your real-time stream.
769
+ # Enables speaker partitioning (diarization) in your transcription
770
+ # output. Speaker partitioning labels the speech from individual
771
+ # speakers in your media file.
772
+ #
773
+ # For more information, see [Partitioning speakers (diarization)][1].
774
+ #
775
+ #
776
+ #
777
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/diarization.html
304
778
  #
305
779
  # @option params [String] :session_id
306
- # Optional. An identifier for the transcription session. If you don't
307
- # provide a session ID, Amazon Transcribe generates one for you and
308
- # returns it in the response.
780
+ # Specify a name for your transcription session. If you don't include
781
+ # this parameter in your request, Amazon Transcribe Medical generates an
782
+ # ID and returns it in the response.
783
+ #
784
+ # You can use a session ID to retry a streaming session.
309
785
  #
310
786
  # @option params [Boolean] :enable_channel_identification
311
- # When `true`, instructs Amazon Transcribe Medical to process each audio
312
- # channel separately and then merge the transcription output of each
313
- # channel into a single transcription.
787
+ # Enables channel identification in multi-channel audio.
788
+ #
789
+ # Channel identification transcribes the audio on each channel
790
+ # independently, then appends the output for each channel into one
791
+ # transcript.
792
+ #
793
+ # If you have multi-channel audio and do not enable channel
794
+ # identification, your audio is transcribed in a continuous manner and
795
+ # your transcript is not separated by channel.
796
+ #
797
+ # For more information, see [Transcribing multi-channel audio][1].
798
+ #
314
799
  #
315
- # Amazon Transcribe Medical also produces a transcription of each item.
316
- # An item includes the start time, end time, and any alternative
317
- # transcriptions.
318
800
  #
319
- # You can't set both `ShowSpeakerLabel` and
320
- # `EnableChannelIdentification` in the same request. If you set both,
321
- # your request returns a `BadRequestException`.
801
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/channel-id.html
322
802
  #
323
803
  # @option params [Integer] :number_of_channels
324
- # The number of channels that are in your audio stream.
804
+ # Specify the number of channels in your audio stream. Up to two
805
+ # channels are supported.
325
806
  #
326
807
  # @option params [String] :content_identification_type
327
- # Set this field to `PHI` to identify personal health information in the
328
- # transcription output.
808
+ # Labels all personal health information (PHI) identified in your
809
+ # transcript.
810
+ #
811
+ # Content identification is performed at the segment level; PHI is
812
+ # flagged upon complete transcription of an audio segment.
813
+ #
814
+ # For more information, see [Identifying personal health information
815
+ # (PHI) in a transcription][1].
816
+ #
817
+ #
818
+ #
819
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/phi-id.html
329
820
  #
330
821
  # @return [Types::StartMedicalStreamTranscriptionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
331
822
  #
@@ -382,6 +873,7 @@ module Aws::TranscribeStreamingService
382
873
  #
383
874
  # # signal events
384
875
  # input_stream.signal_audio_event_event( ... )
876
+ # input_stream.signal_configuration_event_event( ... )
385
877
  #
386
878
  # # make sure signaling :end_stream in the end
387
879
  # input_stream.signal_end_stream
@@ -438,7 +930,7 @@ module Aws::TranscribeStreamingService
438
930
  # @example Request syntax with placeholder values
439
931
  #
440
932
  # async_resp = async_client.start_medical_stream_transcription({
441
- # language_code: "en-US", # required, accepts en-US, en-GB, es-US, fr-CA, fr-FR, en-AU, it-IT, de-DE, pt-BR, ja-JP, ko-KR, zh-CN
933
+ # language_code: "en-US", # required, accepts en-US, en-GB, es-US, fr-CA, fr-FR, en-AU, it-IT, de-DE, pt-BR, ja-JP, ko-KR, zh-CN, hi-IN, th-TH
442
934
  # media_sample_rate_hertz: 1, # required
443
935
  # media_encoding: "pcm", # required, accepts pcm, ogg-opus, flac
444
936
  # vocabulary_name: "VocabularyName",
@@ -459,7 +951,7 @@ module Aws::TranscribeStreamingService
459
951
  # @example Response structure
460
952
  #
461
953
  # resp.request_id #=> String
462
- # resp.language_code #=> String, one of "en-US", "en-GB", "es-US", "fr-CA", "fr-FR", "en-AU", "it-IT", "de-DE", "pt-BR", "ja-JP", "ko-KR", "zh-CN"
954
+ # resp.language_code #=> String, one of "en-US", "en-GB", "es-US", "fr-CA", "fr-FR", "en-AU", "it-IT", "de-DE", "pt-BR", "ja-JP", "ko-KR", "zh-CN", "hi-IN", "th-TH"
463
955
  # resp.media_sample_rate_hertz #=> Integer
464
956
  # resp.media_encoding #=> String, one of "pcm", "ogg-opus", "flac"
465
957
  # resp.vocabulary_name #=> String
@@ -542,179 +1034,331 @@ module Aws::TranscribeStreamingService
542
1034
  req.send_request(options, &block)
543
1035
  end
544
1036
 
545
- # Starts a bidirectional HTTP/2 stream where audio is streamed to Amazon
546
- # Transcribe and the transcription results are streamed to your
547
- # application.
548
- #
549
- # The following are encoded as HTTP/2 headers:
1037
+ # Starts a bidirectional HTTP/2 or WebSocket stream where audio is
1038
+ # streamed to Amazon Transcribe and the transcription results are
1039
+ # streamed to your application.
550
1040
  #
551
- # * x-amzn-transcribe-language-code
1041
+ # The following parameters are required:
552
1042
  #
553
- # * x-amzn-transcribe-media-encoding
1043
+ # * `language-code` or `identify-language`
554
1044
  #
555
- # * x-amzn-transcribe-sample-rate
1045
+ # * `media-encoding`
556
1046
  #
557
- # * x-amzn-transcribe-session-id
1047
+ # * `sample-rate`
558
1048
  #
559
- # See the [ SDK for Go API Reference][1] for more detail.
1049
+ # For more information on streaming with Amazon Transcribe, see
1050
+ # [Transcribing streaming audio][1].
560
1051
  #
561
1052
  #
562
1053
  #
563
- # [1]: https://docs.aws.amazon.com/sdk-for-go/api/service/transcribestreamingservice/#TranscribeStreamingService.StartStreamTranscription
1054
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html
564
1055
  #
565
1056
  # @option params [String] :language_code
566
- # The language code of the input audio stream.
1057
+ # Specify the language code that represents the language spoken in your
1058
+ # audio.
1059
+ #
1060
+ # If you're unsure of the language spoken in your audio, consider using
1061
+ # `IdentifyLanguage` to enable automatic language identification.
1062
+ #
1063
+ # For a list of languages supported with Amazon Transcribe streaming,
1064
+ # refer to the [Supported languages][1] table.
1065
+ #
1066
+ #
1067
+ #
1068
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html
567
1069
  #
568
1070
  # @option params [required, Integer] :media_sample_rate_hertz
569
- # The sample rate of the input audio (in Hertz). Low-quality audio, such
1071
+ # The sample rate of the input audio (in hertz). Low-quality audio, such
570
1072
  # as telephone audio, is typically around 8,000 Hz. High-quality audio
571
1073
  # typically ranges from 16,000 Hz to 48,000 Hz. Note that the sample
572
1074
  # rate you specify must match that of your audio.
573
1075
  #
574
1076
  # @option params [required, String] :media_encoding
575
- # The encoding used for the input audio.
1077
+ # Specify the encoding of your input audio. Supported formats are:
1078
+ #
1079
+ # * FLAC
1080
+ #
1081
+ # * OPUS-encoded audio in an Ogg container
1082
+ #
1083
+ # * PCM (only signed 16-bit little-endian audio formats, which does not
1084
+ # include WAV)
1085
+ #
1086
+ # For more information, see [Media formats][1].
1087
+ #
1088
+ #
1089
+ #
1090
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/how-input.html#how-input-audio
576
1091
  #
577
1092
  # @option params [String] :vocabulary_name
578
- # The name of the custom vocabulary you want to use with your
579
- # transcription.
1093
+ # Specify the name of the custom vocabulary that you want to use when
1094
+ # processing your transcription. Note that vocabulary names are case
1095
+ # sensitive.
1096
+ #
1097
+ # If the language of the specified custom vocabulary doesn't match the
1098
+ # language identified in your media, the custom vocabulary is not
1099
+ # applied to your transcription.
1100
+ #
1101
+ # This parameter is **not** intended for use with the `IdentifyLanguage`
1102
+ # parameter. If you're including `IdentifyLanguage` in your request and
1103
+ # want to use one or more custom vocabularies with your transcription,
1104
+ # use the `VocabularyNames` parameter instead.
1105
+ #
1106
+ # For more information, see [Custom vocabularies][1].
1107
+ #
580
1108
  #
581
- # This operation is not intended for use in conjunction with the
582
- # `IdentifyLanguage` operation. If you're using `IdentifyLanguage` in
583
- # your request and want to use one or more custom vocabularies with your
584
- # transcription, use the `VocabularyNames` operation instead.
1109
+ #
1110
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/custom-vocabulary.html
585
1111
  #
586
1112
  # @option params [String] :session_id
587
- # A identifier for the transcription session. Use this parameter when
588
- # you want to retry a session. If you don't provide a session ID,
589
- # Amazon Transcribe will generate one for you and return it in the
590
- # response.
1113
+ # Specify a name for your transcription session. If you don't include
1114
+ # this parameter in your request, Amazon Transcribe generates an ID and
1115
+ # returns it in the response.
1116
+ #
1117
+ # You can use a session ID to retry a streaming session.
591
1118
  #
592
1119
  # @option params [String] :vocabulary_filter_name
593
- # The name of the vocabulary filter you want to use with your
594
- # transcription.
1120
+ # Specify the name of the custom vocabulary filter that you want to use
1121
+ # when processing your transcription. Note that vocabulary filter names
1122
+ # are case sensitive.
1123
+ #
1124
+ # If the language of the specified custom vocabulary filter doesn't
1125
+ # match the language identified in your media, the vocabulary filter is
1126
+ # not applied to your transcription.
1127
+ #
1128
+ # This parameter is **not** intended for use with the `IdentifyLanguage`
1129
+ # parameter. If you're including `IdentifyLanguage` in your request and
1130
+ # want to use one or more vocabulary filters with your transcription,
1131
+ # use the `VocabularyFilterNames` parameter instead.
1132
+ #
1133
+ # For more information, see [Using vocabulary filtering with unwanted
1134
+ # words][1].
595
1135
  #
596
- # This operation is not intended for use in conjunction with the
597
- # `IdentifyLanguage` operation. If you're using `IdentifyLanguage` in
598
- # your request and want to use one or more vocabulary filters with your
599
- # transcription, use the `VocabularyFilterNames` operation instead.
1136
+ #
1137
+ #
1138
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/vocabulary-filtering.html
600
1139
  #
601
1140
  # @option params [String] :vocabulary_filter_method
602
- # The manner in which you use your vocabulary filter to filter words in
603
- # your transcript. `Remove` removes filtered words from your
604
- # transcription results. `Mask` masks filtered words with a `***` in
605
- # your transcription results. `Tag` keeps the filtered words in your
606
- # transcription results and tags them. The tag appears as
607
- # `VocabularyFilterMatch` equal to `True`.
1141
+ # Specify how you want your vocabulary filter applied to your
1142
+ # transcript.
1143
+ #
1144
+ # To replace words with `***`, choose `mask`.
1145
+ #
1146
+ # To delete words, choose `remove`.
1147
+ #
1148
+ # To flag words without changing them, choose `tag`.
608
1149
  #
609
1150
  # @option params [Boolean] :show_speaker_label
610
- # When `true`, enables speaker identification in your media stream.
1151
+ # Enables speaker partitioning (diarization) in your transcription
1152
+ # output. Speaker partitioning labels the speech from individual
1153
+ # speakers in your media file.
1154
+ #
1155
+ # For more information, see [Partitioning speakers (diarization)][1].
1156
+ #
1157
+ #
1158
+ #
1159
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/diarization.html
611
1160
  #
612
1161
  # @option params [Boolean] :enable_channel_identification
613
- # When `true`, instructs Amazon Transcribe to process each audio channel
614
- # separately, then merges the transcription output of each channel into
615
- # a single transcription.
1162
+ # Enables channel identification in multi-channel audio.
1163
+ #
1164
+ # Channel identification transcribes the audio on each channel
1165
+ # independently, then appends the output for each channel into one
1166
+ # transcript.
1167
+ #
1168
+ # If you have multi-channel audio and do not enable channel
1169
+ # identification, your audio is transcribed in a continuous manner and
1170
+ # your transcript is not separated by channel.
616
1171
  #
617
- # Amazon Transcribe also produces a transcription of each item. An item
618
- # includes the start time, end time, and any alternative transcriptions.
1172
+ # For more information, see [Transcribing multi-channel audio][1].
1173
+ #
1174
+ #
1175
+ #
1176
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/channel-id.html
619
1177
  #
620
1178
  # @option params [Integer] :number_of_channels
621
- # The number of channels that are in your audio stream.
1179
+ # Specify the number of channels in your audio stream. Up to two
1180
+ # channels are supported.
622
1181
  #
623
1182
  # @option params [Boolean] :enable_partial_results_stabilization
624
- # When `true`, instructs Amazon Transcribe to present transcription
625
- # results that have the partial results stabilized. Normally, any word
626
- # or phrase from one partial result can change in a subsequent partial
627
- # result. With partial results stabilization enabled, only the last few
628
- # words of one partial result can change in another partial result.
1183
+ # Enables partial result stabilization for your transcription. Partial
1184
+ # result stabilization can reduce latency in your output, but may impact
1185
+ # accuracy. For more information, see [Partial-result stabilization][1].
1186
+ #
1187
+ #
1188
+ #
1189
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html#streaming-partial-result-stabilization
629
1190
  #
630
1191
  # @option params [String] :partial_results_stability
631
- # You can use this field to set the stability level of the transcription
632
- # results. A higher stability level means that the transcription results
633
- # are less likely to change. Higher stability levels can come with lower
634
- # overall transcription accuracy.
1192
+ # Specify the level of stability to use when you enable partial results
1193
+ # stabilization (`EnablePartialResultsStabilization`).
1194
+ #
1195
+ # Low stability provides the highest accuracy. High stability
1196
+ # transcribes faster, but with slightly lower accuracy.
1197
+ #
1198
+ # For more information, see [Partial-result stabilization][1].
1199
+ #
1200
+ #
1201
+ #
1202
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html#streaming-partial-result-stabilization
635
1203
  #
636
1204
  # @option params [String] :content_identification_type
637
- # Set this field to PII to identify personally identifiable information
638
- # (PII) in the transcription output. Content identification is performed
639
- # only upon complete transcription of the audio segments.
1205
+ # Labels all personally identifiable information (PII) identified in
1206
+ # your transcript.
1207
+ #
1208
+ # Content identification is performed at the segment level; PII
1209
+ # specified in `PiiEntityTypes` is flagged upon complete transcription
1210
+ # of an audio segment.
640
1211
  #
641
- # You can’t set both `ContentIdentificationType` and
642
- # `ContentRedactionType` in the same request. If you set both, your
643
- # request returns a `BadRequestException`.
1212
+ # You can’t set `ContentIdentificationType` and `ContentRedactionType`
1213
+ # in the same request. If you set both, your request returns a
1214
+ # `BadRequestException`.
1215
+ #
1216
+ # For more information, see [Redacting or identifying personally
1217
+ # identifiable information][1].
1218
+ #
1219
+ #
1220
+ #
1221
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/pii-redaction.html
644
1222
  #
645
1223
  # @option params [String] :content_redaction_type
646
- # Set this field to PII to redact personally identifiable information
647
- # (PII) in the transcription output. Content redaction is performed only
648
- # upon complete transcription of the audio segments.
1224
+ # Redacts all personally identifiable information (PII) identified in
1225
+ # your transcript.
1226
+ #
1227
+ # Content redaction is performed at the segment level; PII specified in
1228
+ # `PiiEntityTypes` is redacted upon complete transcription of an audio
1229
+ # segment.
1230
+ #
1231
+ # You can’t set `ContentRedactionType` and `ContentIdentificationType`
1232
+ # in the same request. If you set both, your request returns a
1233
+ # `BadRequestException`.
649
1234
  #
650
- # You can’t set both `ContentRedactionType` and
651
- # `ContentIdentificationType` in the same request. If you set both, your
652
- # request returns a `BadRequestException`.
1235
+ # For more information, see [Redacting or identifying personally
1236
+ # identifiable information][1].
1237
+ #
1238
+ #
1239
+ #
1240
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/pii-redaction.html
653
1241
  #
654
1242
  # @option params [String] :pii_entity_types
655
- # List the PII entity types you want to identify or redact. In order to
656
- # specify entity types, you must have either `ContentIdentificationType`
657
- # or `ContentRedactionType` enabled.
1243
+ # Specify which types of personally identifiable information (PII) you
1244
+ # want to redact in your transcript. You can include as many types as
1245
+ # you'd like, or you can select `ALL`.
658
1246
  #
659
- # `PIIEntityTypes` must be comma-separated; the available values are:
660
- # `BANK_ACCOUNT_NUMBER`, `BANK_ROUTING`, `CREDIT_DEBIT_NUMBER`,
661
- # `CREDIT_DEBIT_CVV`, `CREDIT_DEBIT_EXPIRY`, `PIN`, `EMAIL`, `ADDRESS`,
662
- # `NAME`, `PHONE`, `SSN`, and `ALL`.
1247
+ # To include `PiiEntityTypes` in your request, you must also include
1248
+ # either `ContentIdentificationType` or `ContentRedactionType`.
663
1249
  #
664
- # `PiiEntityTypes` is an optional parameter with a default value of
665
- # `ALL`.
1250
+ # Values must be comma-separated and can include: `BANK_ACCOUNT_NUMBER`,
1251
+ # `BANK_ROUTING`, `CREDIT_DEBIT_NUMBER`, `CREDIT_DEBIT_CVV`,
1252
+ # `CREDIT_DEBIT_EXPIRY`, `PIN`, `EMAIL`, `ADDRESS`, `NAME`, `PHONE`,
1253
+ # `SSN`, or `ALL`.
666
1254
  #
667
1255
  # @option params [String] :language_model_name
668
- # The name of the language model you want to use.
1256
+ # Specify the name of the custom language model that you want to use
1257
+ # when processing your transcription. Note that language model names are
1258
+ # case sensitive.
1259
+ #
1260
+ # The language of the specified language model must match the language
1261
+ # code you specify in your transcription request. If the languages
1262
+ # don't match, the custom language model isn't applied. There are no
1263
+ # errors or warnings associated with a language mismatch.
1264
+ #
1265
+ # For more information, see [Custom language models][1].
1266
+ #
1267
+ #
1268
+ #
1269
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/custom-language-models.html
669
1270
  #
670
1271
  # @option params [Boolean] :identify_language
671
- # Optional. Set this value to `true` to enable language identification
672
- # for your media stream.
1272
+ # Enables automatic language identification for your transcription.
1273
+ #
1274
+ # If you include `IdentifyLanguage`, you can optionally include a list
1275
+ # of language codes, using `LanguageOptions`, that you think may be
1276
+ # present in your audio stream. Including language options can improve
1277
+ # transcription accuracy.
1278
+ #
1279
+ # You can also include a preferred language using `PreferredLanguage`.
1280
+ # Adding a preferred language can help Amazon Transcribe identify the
1281
+ # language faster than if you omit this parameter.
1282
+ #
1283
+ # If you have multi-channel audio that contains different languages on
1284
+ # each channel, and you've enabled channel identification, automatic
1285
+ # language identification identifies the dominant language on each audio
1286
+ # channel.
1287
+ #
1288
+ # Note that you must include either `LanguageCode` or `IdentifyLanguage`
1289
+ # in your request. If you include both parameters, your request fails.
1290
+ #
1291
+ # Streaming language identification can't be combined with custom
1292
+ # language models or redaction.
673
1293
  #
674
1294
  # @option params [String] :language_options
675
- # An object containing a list of languages that might be present in your
676
- # audio.
1295
+ # Specify two or more language codes that represent the languages you
1296
+ # think may be present in your media; including more than five is not
1297
+ # recommended. If you're unsure what languages are present, do not
1298
+ # include this parameter.
1299
+ #
1300
+ # Including language options can improve the accuracy of language
1301
+ # identification.
1302
+ #
1303
+ # If you include `LanguageOptions` in your request, you must also
1304
+ # include `IdentifyLanguage`.
677
1305
  #
678
- # You must provide two or more language codes to help Amazon Transcribe
679
- # identify the correct language of your media stream with the highest
680
- # possible accuracy. You can only select one variant per language; for
681
- # example, you can't include both `en-US` and `en-UK` in the same
682
- # request.
1306
+ # For a list of languages supported with Amazon Transcribe streaming,
1307
+ # refer to the [Supported languages][1] table.
683
1308
  #
684
- # You can only use this parameter if you've set `IdentifyLanguage` to
685
- # `true`in your request.
1309
+ # You can only include one language dialect per language per stream. For
1310
+ # example, you cannot include `en-US` and `en-AU` in the same request.
1311
+ #
1312
+ #
1313
+ #
1314
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html
686
1315
  #
687
1316
  # @option params [String] :preferred_language
688
- # Optional. From the subset of languages codes you provided for
689
- # `LanguageOptions`, you can select one preferred language for your
690
- # transcription.
1317
+ # Specify a preferred language from the subset of languages codes you
1318
+ # specified in `LanguageOptions`.
691
1319
  #
692
- # You can only use this parameter if you've set `IdentifyLanguage` to
693
- # `true`in your request.
1320
+ # You can only use this parameter if you've included `IdentifyLanguage`
1321
+ # and `LanguageOptions` in your request.
694
1322
  #
695
1323
  # @option params [String] :vocabulary_names
696
- # The names of the custom vocabularies you want to use with your
697
- # transcription.
1324
+ # Specify the names of the custom vocabularies that you want to use when
1325
+ # processing your transcription. Note that vocabulary names are case
1326
+ # sensitive.
1327
+ #
1328
+ # If none of the languages of the specified custom vocabularies match
1329
+ # the language identified in your media, your job fails.
1330
+ #
1331
+ # This parameter is only intended for use **with** the
1332
+ # `IdentifyLanguage` parameter. If you're **not** including
1333
+ # `IdentifyLanguage` in your request and want to use a custom vocabulary
1334
+ # with your transcription, use the `VocabularyName` parameter instead.
1335
+ #
1336
+ # For more information, see [Custom vocabularies][1].
698
1337
  #
699
- # Note that if the custom vocabularies you specify are in languages that
700
- # don't match the language identified in your media, your job fails.
701
1338
  #
702
- # This operation is only intended for use in conjunction with the
703
- # `IdentifyLanguage` operation. If you're not using `IdentifyLanguage`
704
- # in your request and want to use a custom vocabulary with your
705
- # transcription, use the `VocabularyName` operation instead.
1339
+ #
1340
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/custom-vocabulary.html
706
1341
  #
707
1342
  # @option params [String] :vocabulary_filter_names
708
- # The names of the vocabulary filters you want to use with your
709
- # transcription.
1343
+ # Specify the names of the custom vocabulary filters that you want to
1344
+ # use when processing your transcription. Note that vocabulary filter
1345
+ # names are case sensitive.
1346
+ #
1347
+ # If none of the languages of the specified custom vocabulary filters
1348
+ # match the language identified in your media, your job fails.
1349
+ #
1350
+ # This parameter is only intended for use **with** the
1351
+ # `IdentifyLanguage` parameter. If you're **not** including
1352
+ # `IdentifyLanguage` in your request and want to use a custom vocabulary
1353
+ # filter with your transcription, use the `VocabularyFilterName`
1354
+ # parameter instead.
1355
+ #
1356
+ # For more information, see [Using vocabulary filtering with unwanted
1357
+ # words][1].
1358
+ #
710
1359
  #
711
- # Note that if the vocabulary filters you specify are in languages that
712
- # don't match the language identified in your media, your job fails.
713
1360
  #
714
- # This operation is only intended for use in conjunction with the
715
- # `IdentifyLanguage` operation. If you're not using `IdentifyLanguage`
716
- # in your request and want to use a vocabulary filter with your
717
- # transcription, use the `VocabularyFilterName` operation instead.
1361
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/vocabulary-filtering.html
718
1362
  #
719
1363
  # @return [Types::StartStreamTranscriptionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
720
1364
  #
@@ -781,6 +1425,7 @@ module Aws::TranscribeStreamingService
781
1425
  #
782
1426
  # # signal events
783
1427
  # input_stream.signal_audio_event_event( ... )
1428
+ # input_stream.signal_configuration_event_event( ... )
784
1429
  #
785
1430
  # # make sure signaling :end_stream in the end
786
1431
  # input_stream.signal_end_stream
@@ -837,7 +1482,7 @@ module Aws::TranscribeStreamingService
837
1482
  # @example Request syntax with placeholder values
838
1483
  #
839
1484
  # async_resp = async_client.start_stream_transcription({
840
- # language_code: "en-US", # accepts en-US, en-GB, es-US, fr-CA, fr-FR, en-AU, it-IT, de-DE, pt-BR, ja-JP, ko-KR, zh-CN
1485
+ # language_code: "en-US", # accepts en-US, en-GB, es-US, fr-CA, fr-FR, en-AU, it-IT, de-DE, pt-BR, ja-JP, ko-KR, zh-CN, hi-IN, th-TH
841
1486
  # media_sample_rate_hertz: 1, # required
842
1487
  # media_encoding: "pcm", # required, accepts pcm, ogg-opus, flac
843
1488
  # vocabulary_name: "VocabularyName",
@@ -856,7 +1501,7 @@ module Aws::TranscribeStreamingService
856
1501
  # language_model_name: "ModelName",
857
1502
  # identify_language: false,
858
1503
  # language_options: "LanguageOptions",
859
- # preferred_language: "en-US", # accepts en-US, en-GB, es-US, fr-CA, fr-FR, en-AU, it-IT, de-DE, pt-BR, ja-JP, ko-KR, zh-CN
1504
+ # preferred_language: "en-US", # accepts en-US, en-GB, es-US, fr-CA, fr-FR, en-AU, it-IT, de-DE, pt-BR, ja-JP, ko-KR, zh-CN, hi-IN, th-TH
860
1505
  # vocabulary_names: "VocabularyNames",
861
1506
  # vocabulary_filter_names: "VocabularyFilterNames",
862
1507
  # })
@@ -868,7 +1513,7 @@ module Aws::TranscribeStreamingService
868
1513
  # @example Response structure
869
1514
  #
870
1515
  # resp.request_id #=> String
871
- # resp.language_code #=> String, one of "en-US", "en-GB", "es-US", "fr-CA", "fr-FR", "en-AU", "it-IT", "de-DE", "pt-BR", "ja-JP", "ko-KR", "zh-CN"
1516
+ # resp.language_code #=> String, one of "en-US", "en-GB", "es-US", "fr-CA", "fr-FR", "en-AU", "it-IT", "de-DE", "pt-BR", "ja-JP", "ko-KR", "zh-CN", "hi-IN", "th-TH"
872
1517
  # resp.media_sample_rate_hertz #=> Integer
873
1518
  # resp.media_encoding #=> String, one of "pcm", "ogg-opus", "flac"
874
1519
  # resp.vocabulary_name #=> String
@@ -902,9 +1547,9 @@ module Aws::TranscribeStreamingService
902
1547
  # event.transcript.results[0].alternatives[0].entities[0].content #=> String
903
1548
  # event.transcript.results[0].alternatives[0].entities[0].confidence #=> Float
904
1549
  # event.transcript.results[0].channel_id #=> String
905
- # event.transcript.results[0].language_code #=> String, one of "en-US", "en-GB", "es-US", "fr-CA", "fr-FR", "en-AU", "it-IT", "de-DE", "pt-BR", "ja-JP", "ko-KR", "zh-CN"
1550
+ # event.transcript.results[0].language_code #=> String, one of "en-US", "en-GB", "es-US", "fr-CA", "fr-FR", "en-AU", "it-IT", "de-DE", "pt-BR", "ja-JP", "ko-KR", "zh-CN", "hi-IN", "th-TH"
906
1551
  # event.transcript.results[0].language_identification #=> Array
907
- # event.transcript.results[0].language_identification[0].language_code #=> String, one of "en-US", "en-GB", "es-US", "fr-CA", "fr-FR", "en-AU", "it-IT", "de-DE", "pt-BR", "ja-JP", "ko-KR", "zh-CN"
1552
+ # event.transcript.results[0].language_identification[0].language_code #=> String, one of "en-US", "en-GB", "es-US", "fr-CA", "fr-FR", "en-AU", "it-IT", "de-DE", "pt-BR", "ja-JP", "ko-KR", "zh-CN", "hi-IN", "th-TH"
908
1553
  # event.transcript.results[0].language_identification[0].score #=> Float
909
1554
  #
910
1555
  # For :bad_request_exception event available at #on_bad_request_exception_event callback and response eventstream enumerator:
@@ -935,7 +1580,7 @@ module Aws::TranscribeStreamingService
935
1580
  # resp.language_model_name #=> String
936
1581
  # resp.identify_language #=> Boolean
937
1582
  # resp.language_options #=> String
938
- # resp.preferred_language #=> String, one of "en-US", "en-GB", "es-US", "fr-CA", "fr-FR", "en-AU", "it-IT", "de-DE", "pt-BR", "ja-JP", "ko-KR", "zh-CN"
1583
+ # resp.preferred_language #=> String, one of "en-US", "en-GB", "es-US", "fr-CA", "fr-FR", "en-AU", "it-IT", "de-DE", "pt-BR", "ja-JP", "ko-KR", "zh-CN", "hi-IN", "th-TH"
939
1584
  # resp.vocabulary_names #=> String
940
1585
  # resp.vocabulary_filter_names #=> String
941
1586
  #
@@ -982,7 +1627,7 @@ module Aws::TranscribeStreamingService
982
1627
  http_response: Seahorse::Client::Http::AsyncResponse.new,
983
1628
  config: config)
984
1629
  context[:gem_name] = 'aws-sdk-transcribestreamingservice'
985
- context[:gem_version] = '1.43.0'
1630
+ context[:gem_version] = '1.45.0'
986
1631
  Seahorse::Client::Request.new(handlers, context)
987
1632
  end
988
1633