aws-sdk-transcribestreamingservice 1.43.0 → 1.45.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -10,19 +10,22 @@
10
10
  module Aws::TranscribeStreamingService
11
11
  module Types
12
12
 
13
- # A list of possible transcriptions for the audio.
13
+ # A list of possible alternative transcriptions for the input audio.
14
+ # Each alternative may contain one or more of `Items`, `Entities`, or
15
+ # `Transcript`.
14
16
  #
15
17
  # @!attribute [rw] transcript
16
- # The text that was transcribed from the audio.
18
+ # Contains transcribed text.
17
19
  # @return [String]
18
20
  #
19
21
  # @!attribute [rw] items
20
- # One or more alternative interpretations of the input audio.
22
+ # Contains words, phrases, or punctuation marks in your transcription
23
+ # output.
21
24
  # @return [Array<Types::Item>]
22
25
  #
23
26
  # @!attribute [rw] entities
24
- # Contains the entities identified as personally identifiable
25
- # information (PII) in the transcription output.
27
+ # Contains entities identified as personally identifiable information
28
+ # (PII) in your transcription output.
26
29
  # @return [Array<Types::Entity>]
27
30
  #
28
31
  # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/Alternative AWS API Documentation
@@ -35,16 +38,14 @@ module Aws::TranscribeStreamingService
35
38
  include Aws::Structure
36
39
  end
37
40
 
38
- # Provides a wrapper for the audio chunks that you are sending.
41
+ # A wrapper for your audio chunks. Your audio stream consists of one or
42
+ # more audio events, which consist of one or more audio chunks.
39
43
  #
40
- # For information on audio encoding in Amazon Transcribe, see [Speech
41
- # input][1]. For information on audio encoding formats in Amazon
42
- # Transcribe Medical, see [Speech input][2].
44
+ # For more information, see [Event stream encoding][1].
43
45
  #
44
46
  #
45
47
  #
46
- # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/input.html
47
- # [2]: https://docs.aws.amazon.com/transcribe/latest/dg/input-med.html
48
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/event-stream.html
48
49
  #
49
50
  # @note When making an API call, you may pass AudioEvent
50
51
  # data as a hash:
@@ -67,11 +68,11 @@ module Aws::TranscribeStreamingService
67
68
  include Aws::Structure
68
69
  end
69
70
 
70
- # One or more arguments to the `StartStreamTranscription` or
71
- # `StartMedicalStreamTranscription` operation was invalid. For example,
72
- # `MediaEncoding` was not set to a valid encoding, or `LanguageCode` was
73
- # not set to a valid code. Check the parameters and try your request
74
- # again.
71
+ # One or more arguments to the `StartStreamTranscription`,
72
+ # `StartMedicalStreamTranscription`, or
73
+ # `StartCallAnalyticsStreamTranscription` operation was not valid. For
74
+ # example, `MediaEncoding` or `LanguageCode` used not valid values.
75
+ # Check the specified parameters and try your request again.
75
76
  #
76
77
  # @!attribute [rw] message
77
78
  # @return [String]
@@ -85,6 +86,235 @@ module Aws::TranscribeStreamingService
85
86
  include Aws::Structure
86
87
  end
87
88
 
89
+ # Contains entities identified as personally identifiable information
90
+ # (PII) in your transcription output, along with various associated
91
+ # attributes. Examples include category, confidence score, content,
92
+ # type, and start and end times.
93
+ #
94
+ # @!attribute [rw] begin_offset_millis
95
+ # The time, in milliseconds, from the beginning of the audio stream to
96
+ # the start of the identified entity.
97
+ # @return [Integer]
98
+ #
99
+ # @!attribute [rw] end_offset_millis
100
+ # The time, in milliseconds, from the beginning of the audio stream to
101
+ # the end of the identified entity.
102
+ # @return [Integer]
103
+ #
104
+ # @!attribute [rw] category
105
+ # The category of information identified. For example, `PII`.
106
+ # @return [String]
107
+ #
108
+ # @!attribute [rw] type
109
+ # The type of PII identified. For example, `NAME` or
110
+ # `CREDIT_DEBIT_NUMBER`.
111
+ # @return [String]
112
+ #
113
+ # @!attribute [rw] content
114
+ # The word or words that represent the identified entity.
115
+ # @return [String]
116
+ #
117
+ # @!attribute [rw] confidence
118
+ # The confidence score associated with the identification of an entity
119
+ # in your transcript.
120
+ #
121
+ # Confidence scores are values between 0 and 1. A larger value
122
+ # indicates a higher probability that the identified entity correctly
123
+ # matches the entity spoken in your media.
124
+ # @return [Float]
125
+ #
126
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/CallAnalyticsEntity AWS API Documentation
127
+ #
128
+ class CallAnalyticsEntity < Struct.new(
129
+ :begin_offset_millis,
130
+ :end_offset_millis,
131
+ :category,
132
+ :type,
133
+ :content,
134
+ :confidence)
135
+ SENSITIVE = []
136
+ include Aws::Structure
137
+ end
138
+
139
+ # A word, phrase, or punctuation mark in your Call Analytics
140
+ # transcription output, along with various associated attributes, such
141
+ # as confidence score, type, and start and end times.
142
+ #
143
+ # @!attribute [rw] begin_offset_millis
144
+ # The time, in milliseconds, from the beginning of the audio stream to
145
+ # the start of the identified item.
146
+ # @return [Integer]
147
+ #
148
+ # @!attribute [rw] end_offset_millis
149
+ # The time, in milliseconds, from the beginning of the audio stream to
150
+ # the end of the identified item.
151
+ # @return [Integer]
152
+ #
153
+ # @!attribute [rw] type
154
+ # The type of item identified. Options are: `PRONUNCIATION` (spoken
155
+ # words) and `PUNCTUATION`.
156
+ # @return [String]
157
+ #
158
+ # @!attribute [rw] content
159
+ # The word or punctuation that was transcribed.
160
+ # @return [String]
161
+ #
162
+ # @!attribute [rw] confidence
163
+ # The confidence score associated with a word or phrase in your
164
+ # transcript.
165
+ #
166
+ # Confidence scores are values between 0 and 1. A larger value
167
+ # indicates a higher probability that the identified item correctly
168
+ # matches the item spoken in your media.
169
+ # @return [Float]
170
+ #
171
+ # @!attribute [rw] vocabulary_filter_match
172
+ # Indicates whether the specified item matches a word in the
173
+ # vocabulary filter included in your Call Analytics request. If
174
+ # `true`, there is a vocabulary filter match.
175
+ # @return [Boolean]
176
+ #
177
+ # @!attribute [rw] stable
178
+ # If partial result stabilization is enabled, `Stable` indicates
179
+ # whether the specified item is stable (`true`) or if it may change
180
+ # when the segment is complete (`false`).
181
+ # @return [Boolean]
182
+ #
183
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/CallAnalyticsItem AWS API Documentation
184
+ #
185
+ class CallAnalyticsItem < Struct.new(
186
+ :begin_offset_millis,
187
+ :end_offset_millis,
188
+ :type,
189
+ :content,
190
+ :confidence,
191
+ :vocabulary_filter_match,
192
+ :stable)
193
+ SENSITIVE = []
194
+ include Aws::Structure
195
+ end
196
+
197
+ # Provides information on any `TranscriptFilterType` categories that
198
+ # matched your transcription output. Matches are identified for each
199
+ # segment upon completion of that segment.
200
+ #
201
+ # @!attribute [rw] matched_categories
202
+ # Lists the categories that were matched in your audio segment.
203
+ # @return [Array<String>]
204
+ #
205
+ # @!attribute [rw] matched_details
206
+ # Contains information about the matched categories, including
207
+ # category names and timestamps.
208
+ # @return [Hash<String,Types::PointsOfInterest>]
209
+ #
210
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/CategoryEvent AWS API Documentation
211
+ #
212
+ class CategoryEvent < Struct.new(
213
+ :matched_categories,
214
+ :matched_details,
215
+ :event_type)
216
+ SENSITIVE = []
217
+ include Aws::Structure
218
+ end
219
+
220
+ # Makes it possible to specify which speaker is on which audio channel.
221
+ # For example, if your agent is the first participant to speak, you
222
+ # would set `ChannelId` to `0` (to indicate the first channel) and
223
+ # `ParticipantRole` to `AGENT` (to indicate that it's the agent
224
+ # speaking).
225
+ #
226
+ # @note When making an API call, you may pass ChannelDefinition
227
+ # data as a hash:
228
+ #
229
+ # {
230
+ # channel_id: 1, # required
231
+ # participant_role: "AGENT", # required, accepts AGENT, CUSTOMER
232
+ # }
233
+ #
234
+ # @!attribute [rw] channel_id
235
+ # Specify the audio channel you want to define.
236
+ # @return [Integer]
237
+ #
238
+ # @!attribute [rw] participant_role
239
+ # Specify the speaker you want to define. Omitting this parameter is
240
+ # equivalent to specifying both participants.
241
+ # @return [String]
242
+ #
243
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/ChannelDefinition AWS API Documentation
244
+ #
245
+ class ChannelDefinition < Struct.new(
246
+ :channel_id,
247
+ :participant_role)
248
+ SENSITIVE = []
249
+ include Aws::Structure
250
+ end
251
+
252
+ # Provides the location, using character count, in your transcript where
253
+ # a match is identified. For example, the location of an issue or a
254
+ # category match within a segment.
255
+ #
256
+ # @!attribute [rw] begin
257
+ # Provides the character count of the first character where a match is
258
+ # identified. For example, the first character associated with an
259
+ # issue or a category match in a segment transcript.
260
+ # @return [Integer]
261
+ #
262
+ # @!attribute [rw] end
263
+ # Provides the character count of the last character where a match is
264
+ # identified. For example, the last character associated with an issue
265
+ # or a category match in a segment transcript.
266
+ # @return [Integer]
267
+ #
268
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/CharacterOffsets AWS API Documentation
269
+ #
270
+ class CharacterOffsets < Struct.new(
271
+ :begin,
272
+ :end)
273
+ SENSITIVE = []
274
+ include Aws::Structure
275
+ end
276
+
277
+ # Allows you to set audio channel definitions and post-call analytics
278
+ # settings.
279
+ #
280
+ # @note When making an API call, you may pass ConfigurationEvent
281
+ # data as a hash:
282
+ #
283
+ # {
284
+ # channel_definitions: [
285
+ # {
286
+ # channel_id: 1, # required
287
+ # participant_role: "AGENT", # required, accepts AGENT, CUSTOMER
288
+ # },
289
+ # ],
290
+ # post_call_analytics_settings: {
291
+ # output_location: "String", # required
292
+ # data_access_role_arn: "String", # required
293
+ # content_redaction_output: "redacted", # accepts redacted, redacted_and_unredacted
294
+ # output_encryption_kms_key_id: "String",
295
+ # },
296
+ # }
297
+ #
298
+ # @!attribute [rw] channel_definitions
299
+ # Indicates which speaker is on which audio channel.
300
+ # @return [Array<Types::ChannelDefinition>]
301
+ #
302
+ # @!attribute [rw] post_call_analytics_settings
303
+ # Provides additional optional settings for your Call Analytics
304
+ # post-call request, including encryption and output locations for
305
+ # your redacted and unredacted transcript.
306
+ # @return [Types::PostCallAnalyticsSettings]
307
+ #
308
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/ConfigurationEvent AWS API Documentation
309
+ #
310
+ class ConfigurationEvent < Struct.new(
311
+ :channel_definitions,
312
+ :post_call_analytics_settings,
313
+ :event_type)
314
+ SENSITIVE = []
315
+ include Aws::Structure
316
+ end
317
+
88
318
  # A new stream started with the same session ID. The current stream has
89
319
  # been terminated.
90
320
  #
@@ -100,35 +330,41 @@ module Aws::TranscribeStreamingService
100
330
  include Aws::Structure
101
331
  end
102
332
 
103
- # The entity identified as personally identifiable information (PII).
333
+ # Contains entities identified as personally identifiable information
334
+ # (PII) in your transcription output, along with various associated
335
+ # attributes. Examples include category, confidence score, type,
336
+ # stability score, and start and end times.
104
337
  #
105
338
  # @!attribute [rw] start_time
106
- # The start time of speech that was identified as PII.
339
+ # The start time, in milliseconds, of the utterance that was
340
+ # identified as PII.
107
341
  # @return [Float]
108
342
  #
109
343
  # @!attribute [rw] end_time
110
- # The end time of speech that was identified as PII.
344
+ # The end time, in milliseconds, of the utterance that was identified
345
+ # as PII.
111
346
  # @return [Float]
112
347
  #
113
348
  # @!attribute [rw] category
114
- # The category of information identified in this entity; for example,
115
- # PII.
349
+ # The category of information identified. The only category is `PII`.
116
350
  # @return [String]
117
351
  #
118
352
  # @!attribute [rw] type
119
- # The type of PII identified in this entity; for example, name or
120
- # credit card number.
353
+ # The type of PII identified. For example, `NAME` or
354
+ # `CREDIT_DEBIT_NUMBER`.
121
355
  # @return [String]
122
356
  #
123
357
  # @!attribute [rw] content
124
- # The words in the transcription output that have been identified as a
125
- # PII entity.
358
+ # The word or words identified as PII.
126
359
  # @return [String]
127
360
  #
128
361
  # @!attribute [rw] confidence
129
- # A value between zero and one that Amazon Transcribe assigns to PII
130
- # identified in the source audio. Larger values indicate a higher
131
- # confidence in PII identification.
362
+ # The confidence score associated with the identified PII entity in
363
+ # your audio.
364
+ #
365
+ # Confidence scores are values between 0 and 1. A larger value
366
+ # indicates a higher probability that the identified entity correctly
367
+ # matches the entity spoken in your media.
132
368
  # @return [Float]
133
369
  #
134
370
  # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/Entity AWS API Documentation
@@ -144,9 +380,8 @@ module Aws::TranscribeStreamingService
144
380
  include Aws::Structure
145
381
  end
146
382
 
147
- # A problem occurred while processing the audio. Amazon Transcribe or
148
- # Amazon Transcribe Medical terminated processing. Try your request
149
- # again.
383
+ # A problem occurred while processing the audio. Amazon Transcribe
384
+ # terminated processing.
150
385
  #
151
386
  # @!attribute [rw] message
152
387
  # @return [String]
@@ -160,50 +395,66 @@ module Aws::TranscribeStreamingService
160
395
  include Aws::Structure
161
396
  end
162
397
 
163
- # A word, phrase, or punctuation mark that is transcribed from the input
164
- # audio.
398
+ # Lists the issues that were identified in your audio segment.
399
+ #
400
+ # @!attribute [rw] character_offsets
401
+ # Provides the timestamps that identify when in an audio segment the
402
+ # specified issue occurs.
403
+ # @return [Types::CharacterOffsets]
404
+ #
405
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/IssueDetected AWS API Documentation
406
+ #
407
+ class IssueDetected < Struct.new(
408
+ :character_offsets)
409
+ SENSITIVE = []
410
+ include Aws::Structure
411
+ end
412
+
413
+ # A word, phrase, or punctuation mark in your transcription output,
414
+ # along with various associated attributes, such as confidence score,
415
+ # type, and start and end times.
165
416
  #
166
417
  # @!attribute [rw] start_time
167
- # The offset from the beginning of the audio stream to the beginning
168
- # of the audio that resulted in the item.
418
+ # The start time, in milliseconds, of the transcribed item.
169
419
  # @return [Float]
170
420
  #
171
421
  # @!attribute [rw] end_time
172
- # The offset from the beginning of the audio stream to the end of the
173
- # audio that resulted in the item.
422
+ # The end time, in milliseconds, of the transcribed item.
174
423
  # @return [Float]
175
424
  #
176
425
  # @!attribute [rw] type
177
- # The type of the item. `PRONUNCIATION` indicates that the item is a
178
- # word that was recognized in the input audio. `PUNCTUATION` indicates
179
- # that the item was interpreted as a pause in the input audio.
426
+ # The type of item identified. Options are: `PRONUNCIATION` (spoken
427
+ # words) and `PUNCTUATION`.
180
428
  # @return [String]
181
429
  #
182
430
  # @!attribute [rw] content
183
- # The word or punctuation that was recognized in the input audio.
431
+ # The word or punctuation that was transcribed.
184
432
  # @return [String]
185
433
  #
186
434
  # @!attribute [rw] vocabulary_filter_match
187
- # Indicates whether a word in the item matches a word in the
188
- # vocabulary filter you've chosen for your media stream. If `true`
189
- # then a word in the item matches your vocabulary filter.
435
+ # Indicates whether the specified item matches a word in the
436
+ # vocabulary filter included in your request. If `true`, there is a
437
+ # vocabulary filter match.
190
438
  # @return [Boolean]
191
439
  #
192
440
  # @!attribute [rw] speaker
193
- # If speaker identification is enabled, shows the speakers identified
194
- # in the media stream.
441
+ # If speaker partitioning is enabled, `Speaker` labels the speaker of
442
+ # the specified item.
195
443
  # @return [String]
196
444
  #
197
445
  # @!attribute [rw] confidence
198
- # A value between zero and one for an item that is a confidence score
199
- # that Amazon Transcribe assigns to each word or phrase that it
200
- # transcribes.
446
+ # The confidence score associated with a word or phrase in your
447
+ # transcript.
448
+ #
449
+ # Confidence scores are values between 0 and 1. A larger value
450
+ # indicates a higher probability that the identified item correctly
451
+ # matches the item spoken in your media.
201
452
  # @return [Float]
202
453
  #
203
454
  # @!attribute [rw] stable
204
- # If partial result stabilization has been enabled, indicates whether
205
- # the word or phrase in the item is stable. If `Stable` is `true`, the
206
- # result is stable.
455
+ # If partial result stabilization is enabled, `Stable` indicates
456
+ # whether the specified item is stable (`true`) or if it may change
457
+ # when the segment is complete (`false`).
207
458
  # @return [Boolean]
208
459
  #
209
460
  # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/Item AWS API Documentation
@@ -221,19 +472,20 @@ module Aws::TranscribeStreamingService
221
472
  include Aws::Structure
222
473
  end
223
474
 
224
- # The language codes of the identified languages and their associated
225
- # confidence scores. The confidence score is a value between zero and
226
- # one; a larger value indicates a higher confidence in the identified
227
- # language.
475
+ # The language code that represents the language identified in your
476
+ # audio, including the associated confidence score. If you enabled
477
+ # channel identification in your request and each channel contained a
478
+ # different language, you will have more than one `LanguageWithScore`
479
+ # result.
228
480
  #
229
481
  # @!attribute [rw] language_code
230
- # The language code of the language identified by Amazon Transcribe.
482
+ # The language code of the identified language.
231
483
  # @return [String]
232
484
  #
233
485
  # @!attribute [rw] score
234
- # The confidence score for the associated language code. Confidence
235
- # scores are values between zero and one; larger values indicate a
236
- # higher confidence in the identified language.
486
+ # The confidence score associated with the identified language code.
487
+ # Confidence scores are values between zero and one; larger values
488
+ # indicate a higher confidence in the identified language.
237
489
  # @return [Float]
238
490
  #
239
491
  # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/LanguageWithScore AWS API Documentation
@@ -245,11 +497,9 @@ module Aws::TranscribeStreamingService
245
497
  include Aws::Structure
246
498
  end
247
499
 
248
- # You have exceeded the maximum number of concurrent transcription
249
- # streams, are starting transcription streams too quickly, or the
250
- # maximum audio length of 4 hours. Wait until a stream has finished
251
- # processing, or break your audio stream into smaller chunks and try
252
- # your request again.
500
+ # Your client has exceeded one of the Amazon Transcribe limits. This is
501
+ # typically the audio length limit. Break your audio stream into smaller
502
+ # chunks and try your request again.
253
503
  #
254
504
  # @!attribute [rw] message
255
505
  # @return [String]
@@ -263,20 +513,22 @@ module Aws::TranscribeStreamingService
263
513
  include Aws::Structure
264
514
  end
265
515
 
266
- # A list of possible transcriptions for the audio.
516
+ # A list of possible alternative transcriptions for the input audio.
517
+ # Each alternative may contain one or more of `Items`, `Entities`, or
518
+ # `Transcript`.
267
519
  #
268
520
  # @!attribute [rw] transcript
269
- # The text that was transcribed from the audio.
521
+ # Contains transcribed text.
270
522
  # @return [String]
271
523
  #
272
524
  # @!attribute [rw] items
273
- # A list of objects that contains words and punctuation marks that
274
- # represents one or more interpretations of the input audio.
525
+ # Contains words, phrases, or punctuation marks in your transcription
526
+ # output.
275
527
  # @return [Array<Types::MedicalItem>]
276
528
  #
277
529
  # @!attribute [rw] entities
278
- # Contains the medical entities identified as personal health
279
- # information in the transcription output.
530
+ # Contains entities identified as personal health information (PHI) in
531
+ # your transcription output.
280
532
  # @return [Array<Types::MedicalEntity>]
281
533
  #
282
534
  # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/MedicalAlternative AWS API Documentation
@@ -289,32 +541,36 @@ module Aws::TranscribeStreamingService
289
541
  include Aws::Structure
290
542
  end
291
543
 
292
- # The medical entity identified as personal health information.
544
+ # Contains entities identified as personal health information (PHI) in
545
+ # your transcription output, along with various associated attributes.
546
+ # Examples include category, confidence score, type, stability score,
547
+ # and start and end times.
293
548
  #
294
549
  # @!attribute [rw] start_time
295
- # The start time of the speech that was identified as a medical
296
- # entity.
550
+ # The start time, in milliseconds, of the utterance that was
551
+ # identified as PHI.
297
552
  # @return [Float]
298
553
  #
299
554
  # @!attribute [rw] end_time
300
- # The end time of the speech that was identified as a medical entity.
555
+ # The end time, in milliseconds, of the utterance that was identified
556
+ # as PHI.
301
557
  # @return [Float]
302
558
  #
303
559
  # @!attribute [rw] category
304
- # The type of personal health information of the medical entity.
560
+ # The category of information identified. The only category is `PHI`.
305
561
  # @return [String]
306
562
  #
307
563
  # @!attribute [rw] content
308
- # The word or words in the transcription output that have been
309
- # identified as a medical entity.
564
+ # The word or words identified as PHI.
310
565
  # @return [String]
311
566
  #
312
567
  # @!attribute [rw] confidence
313
- # A value between zero and one that Amazon Transcribe Medical assigned
314
- # to the personal health information that it identified in the source
315
- # audio. Larger values indicate that Amazon Transcribe Medical has
316
- # higher confidence in the personal health information that it
317
- # identified.
568
+ # The confidence score associated with the identified PHI entity in
569
+ # your audio.
570
+ #
571
+ # Confidence scores are values between 0 and 1. A larger value
572
+ # indicates a higher probability that the identified entity correctly
573
+ # matches the entity spoken in your media.
318
574
  # @return [Float]
319
575
  #
320
576
  # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/MedicalEntity AWS API Documentation
@@ -329,42 +585,39 @@ module Aws::TranscribeStreamingService
329
585
  include Aws::Structure
330
586
  end
331
587
 
332
- # A word, phrase, or punctuation mark that is transcribed from the input
333
- # audio.
588
+ # A word, phrase, or punctuation mark in your transcription output,
589
+ # along with various associated attributes, such as confidence score,
590
+ # type, and start and end times.
334
591
  #
335
592
  # @!attribute [rw] start_time
336
- # The number of seconds into an audio stream that indicates the
337
- # creation time of an item.
593
+ # The start time, in milliseconds, of the transcribed item.
338
594
  # @return [Float]
339
595
  #
340
596
  # @!attribute [rw] end_time
341
- # The number of seconds into an audio stream that indicates the
342
- # creation time of an item.
597
+ # The end time, in milliseconds, of the transcribed item.
343
598
  # @return [Float]
344
599
  #
345
600
  # @!attribute [rw] type
346
- # The type of the item. `PRONUNCIATION` indicates that the item is a
347
- # word that was recognized in the input audio. `PUNCTUATION` indicates
348
- # that the item was interpreted as a pause in the input audio, such as
349
- # a period to indicate the end of a sentence.
601
+ # The type of item identified. Options are: `PRONUNCIATION` (spoken
602
+ # words) and `PUNCTUATION`.
350
603
  # @return [String]
351
604
  #
352
605
  # @!attribute [rw] content
353
- # The word or punctuation mark that was recognized in the input audio.
606
+ # The word or punctuation that was transcribed.
354
607
  # @return [String]
355
608
  #
356
609
  # @!attribute [rw] confidence
357
- # A value between 0 and 1 for an item that is a confidence score that
358
- # Amazon Transcribe Medical assigns to each word that it transcribes.
610
+ # The confidence score associated with a word or phrase in your
611
+ # transcript.
612
+ #
613
+ # Confidence scores are values between 0 and 1. A larger value
614
+ # indicates a higher probability that the identified item correctly
615
+ # matches the item spoken in your media.
359
616
  # @return [Float]
360
617
  #
361
618
  # @!attribute [rw] speaker
362
- # If speaker identification is enabled, shows the integer values that
363
- # correspond to the different speakers identified in the stream. For
364
- # example, if the value of `Speaker` in the stream is either a `0` or
365
- # a `1`, that indicates that Amazon Transcribe Medical has identified
366
- # two speakers in the stream. The value of `0` corresponds to one
367
- # speaker and the value of `1` corresponds to the other speaker.
619
+ # If speaker partitioning is enabled, `Speaker` labels the speaker of
620
+ # the specified item.
368
621
  # @return [String]
369
622
  #
370
623
  # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/MedicalItem AWS API Documentation
@@ -380,45 +633,41 @@ module Aws::TranscribeStreamingService
380
633
  include Aws::Structure
381
634
  end
382
635
 
383
- # The results of transcribing a portion of the input audio stream.
636
+ # The `Result` associated with a ``.
637
+ #
638
+ # Contains a set of transcription results from one or more audio
639
+ # segments, along with additional information per your request
640
+ # parameters. This can include information relating to alternative
641
+ # transcriptions, channel identification, partial result stabilization,
642
+ # language identification, and other transcription-related data.
384
643
  #
385
644
  # @!attribute [rw] result_id
386
- # A unique identifier for the result.
645
+ # Provides a unique identifier for the `Result`.
387
646
  # @return [String]
388
647
  #
389
648
  # @!attribute [rw] start_time
390
- # The time, in seconds, from the beginning of the audio stream to the
391
- # beginning of the result.
649
+ # The start time, in milliseconds, of the `Result`.
392
650
  # @return [Float]
393
651
  #
394
652
  # @!attribute [rw] end_time
395
- # The time, in seconds, from the beginning of the audio stream to the
396
- # end of the result.
653
+ # The end time, in milliseconds, of the `Result`.
397
654
  # @return [Float]
398
655
  #
399
656
  # @!attribute [rw] is_partial
400
- # Amazon Transcribe Medical divides the incoming audio stream into
401
- # segments at natural points in the audio. Transcription results are
402
- # returned based on these segments.
403
- #
404
- # The `IsPartial` field is `true` to indicate that Amazon Transcribe
405
- # Medical has additional transcription data to send. The `IsPartial`
406
- # field is `false` to indicate that this is the last transcription
407
- # result for the segment.
657
+ # Indicates if the segment is complete.
658
+ #
659
+ # If `IsPartial` is `true`, the segment is not complete. If
660
+ # `IsPartial` is `false`, the segment is complete.
408
661
  # @return [Boolean]
409
662
  #
410
663
  # @!attribute [rw] alternatives
411
- # A list of possible transcriptions of the audio. Each alternative
412
- # typically contains one `Item` that contains the result of the
413
- # transcription.
664
+ # A list of possible alternative transcriptions for the input audio.
665
+ # Each alternative may contain one or more of `Items`, `Entities`, or
666
+ # `Transcript`.
414
667
  # @return [Array<Types::MedicalAlternative>]
415
668
  #
416
669
  # @!attribute [rw] channel_id
417
- # When channel identification is enabled, Amazon Transcribe Medical
418
- # transcribes the speech from each audio channel separately.
419
- #
420
- # You can use `ChannelId` to retrieve the transcription results for a
421
- # single channel in your audio stream.
670
+ # Indicates the channel identified for the `Result`.
422
671
  # @return [String]
423
672
  #
424
673
  # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/MedicalResult AWS API Documentation
@@ -434,11 +683,19 @@ module Aws::TranscribeStreamingService
434
683
  include Aws::Structure
435
684
  end
436
685
 
437
- # The medical transcript in a MedicalTranscriptEvent.
686
+ # The `MedicalTranscript` associated with a `.</p> MedicalTranscript
687
+ # contains Results, which contains a set of transcription results from
688
+ # one or more audio segments, along with additional information per your
689
+ # request parameters.
690
+ # `
438
691
  #
439
692
  # @!attribute [rw] results
440
- # MedicalResult objects that contain the results of transcribing a
441
- # portion of the input audio stream. The array can be empty.
693
+ # Contains a set of transcription results from one or more audio
694
+ # segments, along with additional information per your request
695
+ # parameters. This can include information relating to alternative
696
+ # transcriptions, channel identification, partial result
697
+ # stabilization, language identification, and other
698
+ # transcription-related data.
442
699
  # @return [Array<Types::MedicalResult>]
443
700
  #
444
701
  # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/MedicalTranscript AWS API Documentation
@@ -449,12 +706,20 @@ module Aws::TranscribeStreamingService
449
706
  include Aws::Structure
450
707
  end
451
708
 
452
- # Represents a set of transcription results from the server to the
453
- # client. It contains one or more segments of the transcription.
709
+ # The `MedicalTranscriptEvent` associated with a
710
+ # `MedicalTranscriptResultStream`.
711
+ #
712
+ # Contains a set of transcription results from one or more audio
713
+ # segments, along with additional information per your request
714
+ # parameters.
454
715
  #
455
716
  # @!attribute [rw] transcript
456
- # The transcription of the audio stream. The transcription is composed
457
- # of all of the items in the results list.
717
+ # Contains `Results`, which contains a set of transcription results
718
+ # from one or more audio segments, along with additional information
719
+ # per your request parameters. This can include information relating
720
+ # to alternative transcriptions, channel identification, partial
721
+ # result stabilization, language identification, and other
722
+ # transcription-related data.
458
723
  # @return [Types::MedicalTranscript]
459
724
  #
460
725
  # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/MedicalTranscriptEvent AWS API Documentation
@@ -466,79 +731,533 @@ module Aws::TranscribeStreamingService
466
731
  include Aws::Structure
467
732
  end
468
733
 
469
- # The result of transcribing a portion of the input audio stream.
734
+ # Contains the timestamps of matched categories.
735
+ #
736
+ # @!attribute [rw] timestamp_ranges
737
+ # Contains the timestamp ranges (start time through end time) of
738
+ # matched categories and rules.
739
+ # @return [Array<Types::TimestampRange>]
740
+ #
741
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/PointsOfInterest AWS API Documentation
742
+ #
743
+ class PointsOfInterest < Struct.new(
744
+ :timestamp_ranges)
745
+ SENSITIVE = []
746
+ include Aws::Structure
747
+ end
748
+
749
+ # Allows you to specify additional settings for your streaming Call
750
+ # Analytics post-call request, including output locations for your
751
+ # redacted and unredacted transcript, which IAM role to use, and,
752
+ # optionally, which encryption key to use.
753
+ #
754
+ # `ContentRedactionOutput`, `DataAccessRoleArn`, and `OutputLocation`
755
+ # are required fields.
756
+ #
757
+ # @note When making an API call, you may pass PostCallAnalyticsSettings
758
+ # data as a hash:
759
+ #
760
+ # {
761
+ # output_location: "String", # required
762
+ # data_access_role_arn: "String", # required
763
+ # content_redaction_output: "redacted", # accepts redacted, redacted_and_unredacted
764
+ # output_encryption_kms_key_id: "String",
765
+ # }
766
+ #
767
+ # @!attribute [rw] output_location
768
+ # The Amazon S3 location where you want your Call Analytics post-call
769
+ # transcription output stored. You can use any of the following
770
+ # formats to specify the output location:
771
+ #
772
+ # 1. s3://DOC-EXAMPLE-BUCKET
773
+ #
774
+ # 2. s3://DOC-EXAMPLE-BUCKET/my-output-folder/
775
+ #
776
+ # 3. s3://DOC-EXAMPLE-BUCKET/my-output-folder/my-call-analytics-job.json
777
+ # @return [String]
778
+ #
779
+ # @!attribute [rw] data_access_role_arn
780
+ # The Amazon Resource Name (ARN) of an IAM role that has permissions
781
+ # to access the Amazon S3 bucket that contains your input files. If
782
+ # the role that you specify doesn’t have the appropriate permissions
783
+ # to access the specified Amazon S3 location, your request fails.
784
+ #
785
+ # IAM role ARNs have the format
786
+ # `arn:partition:iam::account:role/role-name-with-path`. For example:
787
+ # `arn:aws:iam::111122223333:role/Admin`. For more information, see
788
+ # [IAM ARNs][1].
789
+ #
790
+ #
791
+ #
792
+ # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns
793
+ # @return [String]
794
+ #
795
+ # @!attribute [rw] content_redaction_output
796
+ # Specify whether you want only a redacted transcript or both a
797
+ # redacted and an unredacted transcript. If you choose redacted and
798
+ # unredacted, two JSON files are generated and stored in the Amazon S3
799
+ # output location you specify.
800
+ #
801
+ # Note that to include `ContentRedactionOutput` in your request, you
802
+ # must enable content redaction (`ContentRedactionType`).
803
+ # @return [String]
804
+ #
805
+ # @!attribute [rw] output_encryption_kms_key_id
806
+ # The KMS key you want to use to encrypt your Call Analytics post-call
807
+ # output.
808
+ #
809
+ # If using a key located in the **current** Amazon Web Services
810
+ # account, you can specify your KMS key in one of four ways:
811
+ #
812
+ # 1. Use the KMS key ID itself. For example,
813
+ # `1234abcd-12ab-34cd-56ef-1234567890ab`.
814
+ #
815
+ # 2. Use an alias for the KMS key ID. For example,
816
+ # `alias/ExampleAlias`.
817
+ #
818
+ # 3. Use the Amazon Resource Name (ARN) for the KMS key ID. For
819
+ # example,
820
+ # `arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab`.
821
+ #
822
+ # 4. Use the ARN for the KMS key alias. For example,
823
+ # `arn:aws:kms:region:account-ID:alias/ExampleAlias`.
824
+ #
825
+ # If using a key located in a **different** Amazon Web Services
826
+ # account than the current Amazon Web Services account, you can
827
+ # specify your KMS key in one of two ways:
828
+ #
829
+ # 1. Use the ARN for the KMS key ID. For example,
830
+ # `arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab`.
831
+ #
832
+ # 2. Use the ARN for the KMS key alias. For example,
833
+ # `arn:aws:kms:region:account-ID:alias/ExampleAlias`.
834
+ #
835
+ # Note that the user making the request must have permission to use
836
+ # the specified KMS key.
837
+ # @return [String]
838
+ #
839
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/PostCallAnalyticsSettings AWS API Documentation
840
+ #
841
+ class PostCallAnalyticsSettings < Struct.new(
842
+ :output_location,
843
+ :data_access_role_arn,
844
+ :content_redaction_output,
845
+ :output_encryption_kms_key_id)
846
+ SENSITIVE = []
847
+ include Aws::Structure
848
+ end
849
+
850
+ # The `Result` associated with a ``.
851
+ #
852
+ # Contains a set of transcription results from one or more audio
853
+ # segments, along with additional information per your request
854
+ # parameters. This can include information relating to alternative
855
+ # transcriptions, channel identification, partial result stabilization,
856
+ # language identification, and other transcription-related data.
857
+ #
858
+ # @!attribute [rw] result_id
859
+ # Provides a unique identifier for the `Result`.
860
+ # @return [String]
861
+ #
862
+ # @!attribute [rw] start_time
863
+ # The start time, in milliseconds, of the `Result`.
864
+ # @return [Float]
865
+ #
866
+ # @!attribute [rw] end_time
867
+ # The end time, in milliseconds, of the `Result`.
868
+ # @return [Float]
869
+ #
870
+ # @!attribute [rw] is_partial
871
+ # Indicates if the segment is complete.
872
+ #
873
+ # If `IsPartial` is `true`, the segment is not complete. If
874
+ # `IsPartial` is `false`, the segment is complete.
875
+ # @return [Boolean]
876
+ #
877
+ # @!attribute [rw] alternatives
878
+ # A list of possible alternative transcriptions for the input audio.
879
+ # Each alternative may contain one or more of `Items`, `Entities`, or
880
+ # `Transcript`.
881
+ # @return [Array<Types::Alternative>]
882
+ #
883
+ # @!attribute [rw] channel_id
884
+ # Indicates which audio channel is associated with the `Result`.
885
+ # @return [String]
886
+ #
887
+ # @!attribute [rw] language_code
888
+ # The language code that represents the language spoken in your audio
889
+ # stream.
890
+ # @return [String]
891
+ #
892
+ # @!attribute [rw] language_identification
893
+ # The language code of the dominant language identified in your
894
+ # stream.
895
+ #
896
+ # If you enabled channel identification and each channel of your audio
897
+ # contains a different language, you may have more than one result.
898
+ # @return [Array<Types::LanguageWithScore>]
899
+ #
900
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/Result AWS API Documentation
901
+ #
902
+ class Result < Struct.new(
903
+ :result_id,
904
+ :start_time,
905
+ :end_time,
906
+ :is_partial,
907
+ :alternatives,
908
+ :channel_id,
909
+ :language_code,
910
+ :language_identification)
911
+ SENSITIVE = []
912
+ include Aws::Structure
913
+ end
914
+
915
+ # The service is currently unavailable. Try your request later.
916
+ #
917
+ # @!attribute [rw] message
918
+ # @return [String]
919
+ #
920
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/ServiceUnavailableException AWS API Documentation
921
+ #
922
+ class ServiceUnavailableException < Struct.new(
923
+ :message,
924
+ :event_type)
925
+ SENSITIVE = []
926
+ include Aws::Structure
927
+ end
928
+
929
+ # @note When making an API call, you may pass StartCallAnalyticsStreamTranscriptionRequest
930
+ # data as a hash:
931
+ #
932
+ # {
933
+ # language_code: "en-US", # required, accepts en-US, en-GB, es-US, fr-CA, fr-FR, en-AU, it-IT, de-DE, pt-BR
934
+ # media_sample_rate_hertz: 1, # required
935
+ # media_encoding: "pcm", # required, accepts pcm, ogg-opus, flac
936
+ # vocabulary_name: "VocabularyName",
937
+ # session_id: "SessionId",
938
+ # input_event_stream_hander: EventStreams::AudioStream.new,
939
+ # vocabulary_filter_name: "VocabularyFilterName",
940
+ # vocabulary_filter_method: "remove", # accepts remove, mask, tag
941
+ # language_model_name: "ModelName",
942
+ # enable_partial_results_stabilization: false,
943
+ # partial_results_stability: "high", # accepts high, medium, low
944
+ # content_identification_type: "PII", # accepts PII
945
+ # content_redaction_type: "PII", # accepts PII
946
+ # pii_entity_types: "PiiEntityTypes",
947
+ # }
948
+ #
949
+ # @!attribute [rw] language_code
950
+ # Specify the language code that represents the language spoken in
951
+ # your audio.
952
+ #
953
+ # If you're unsure of the language spoken in your audio, consider
954
+ # using `IdentifyLanguage` to enable automatic language
955
+ # identification.
956
+ #
957
+ # For a list of languages supported with streaming Call Analytics,
958
+ # refer to the [Supported languages][1] table.
959
+ #
960
+ #
961
+ #
962
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html
963
+ # @return [String]
964
+ #
965
+ # @!attribute [rw] media_sample_rate_hertz
966
+ # The sample rate of the input audio (in hertz). Low-quality audio,
967
+ # such as telephone audio, is typically around 8,000 Hz. High-quality
968
+ # audio typically ranges from 16,000 Hz to 48,000 Hz. Note that the
969
+ # sample rate you specify must match that of your audio.
970
+ # @return [Integer]
971
+ #
972
+ # @!attribute [rw] media_encoding
973
+ # Specify the encoding of your input audio. Supported formats are:
974
+ #
975
+ # * FLAC
976
+ #
977
+ # * OPUS-encoded audio in an Ogg container
978
+ #
979
+ # * PCM (only signed 16-bit little-endian audio formats, which does
980
+ # not include WAV)
981
+ #
982
+ # For more information, see [Media formats][1].
983
+ #
984
+ #
985
+ #
986
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/how-input.html#how-input-audio
987
+ # @return [String]
988
+ #
989
+ # @!attribute [rw] vocabulary_name
990
+ # Specify the name of the custom vocabulary that you want to use when
991
+ # processing your transcription. Note that vocabulary names are case
992
+ # sensitive.
993
+ #
994
+ # If the language of the specified custom vocabulary doesn't match
995
+ # the language identified in your media, the custom vocabulary is not
996
+ # applied to your transcription.
997
+ #
998
+ # For more information, see [Custom vocabularies][1].
999
+ #
1000
+ #
1001
+ #
1002
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/custom-vocabulary.html
1003
+ # @return [String]
1004
+ #
1005
+ # @!attribute [rw] session_id
1006
+ # Specify a name for your Call Analytics transcription session. If you
1007
+ # don't include this parameter in your request, Amazon Transcribe
1008
+ # generates an ID and returns it in the response.
1009
+ #
1010
+ # You can use a session ID to retry a streaming session.
1011
+ # @return [String]
1012
+ #
1013
+ # @!attribute [rw] audio_stream
1014
+ # An encoded stream of audio blobs. Audio streams are encoded as
1015
+ # either HTTP/2 or WebSocket data frames.
1016
+ #
1017
+ # For more information, see [Transcribing streaming audio][1].
1018
+ #
1019
+ #
1020
+ #
1021
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html
1022
+ # @return [Types::AudioStream]
1023
+ #
1024
+ # @!attribute [rw] vocabulary_filter_name
1025
+ # Specify the name of the custom vocabulary filter that you want to
1026
+ # use when processing your transcription. Note that vocabulary filter
1027
+ # names are case sensitive.
1028
+ #
1029
+ # If the language of the specified custom vocabulary filter doesn't
1030
+ # match the language identified in your media, the vocabulary filter
1031
+ # is not applied to your transcription.
1032
+ #
1033
+ # For more information, see [Using vocabulary filtering with unwanted
1034
+ # words][1].
1035
+ #
1036
+ #
1037
+ #
1038
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/vocabulary-filtering.html
1039
+ # @return [String]
1040
+ #
1041
+ # @!attribute [rw] vocabulary_filter_method
1042
+ # Specify how you want your vocabulary filter applied to your
1043
+ # transcript.
1044
+ #
1045
+ # To replace words with `***`, choose `mask`.
1046
+ #
1047
+ # To delete words, choose `remove`.
1048
+ #
1049
+ # To flag words without changing them, choose `tag`.
1050
+ # @return [String]
1051
+ #
1052
+ # @!attribute [rw] language_model_name
1053
+ # Specify the name of the custom language model that you want to use
1054
+ # when processing your transcription. Note that language model names
1055
+ # are case sensitive.
1056
+ #
1057
+ # The language of the specified language model must match the language
1058
+ # code you specify in your transcription request. If the languages
1059
+ # don't match, the custom language model isn't applied. There are no
1060
+ # errors or warnings associated with a language mismatch.
1061
+ #
1062
+ # For more information, see [Custom language models][1].
1063
+ #
1064
+ #
1065
+ #
1066
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/custom-language-models.html
1067
+ # @return [String]
1068
+ #
1069
+ # @!attribute [rw] enable_partial_results_stabilization
1070
+ # Enables partial result stabilization for your transcription. Partial
1071
+ # result stabilization can reduce latency in your output, but may
1072
+ # impact accuracy. For more information, see [Partial-result
1073
+ # stabilization][1].
1074
+ #
1075
+ #
1076
+ #
1077
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html#streaming-partial-result-stabilization
1078
+ # @return [Boolean]
1079
+ #
1080
+ # @!attribute [rw] partial_results_stability
1081
+ # Specify the level of stability to use when you enable partial
1082
+ # results stabilization (`EnablePartialResultsStabilization`).
1083
+ #
1084
+ # Low stability provides the highest accuracy. High stability
1085
+ # transcribes faster, but with slightly lower accuracy.
1086
+ #
1087
+ # For more information, see [Partial-result stabilization][1].
1088
+ #
1089
+ #
1090
+ #
1091
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html#streaming-partial-result-stabilization
1092
+ # @return [String]
1093
+ #
1094
+ # @!attribute [rw] content_identification_type
1095
+ # Labels all personally identifiable information (PII) identified in
1096
+ # your transcript.
1097
+ #
1098
+ # Content identification is performed at the segment level; PII
1099
+ # specified in `PiiEntityTypes` is flagged upon complete transcription
1100
+ # of an audio segment.
1101
+ #
1102
+ # You can’t set `ContentIdentificationType` and `ContentRedactionType`
1103
+ # in the same request. If you set both, your request returns a
1104
+ # `BadRequestException`.
1105
+ #
1106
+ # For more information, see [Redacting or identifying personally
1107
+ # identifiable information][1].
1108
+ #
1109
+ #
1110
+ #
1111
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/pii-redaction.html
1112
+ # @return [String]
1113
+ #
1114
+ # @!attribute [rw] content_redaction_type
1115
+ # Redacts all personally identifiable information (PII) identified in
1116
+ # your transcript.
1117
+ #
1118
+ # Content redaction is performed at the segment level; PII specified
1119
+ # in `PiiEntityTypes` is redacted upon complete transcription of an
1120
+ # audio segment.
1121
+ #
1122
+ # You can’t set `ContentRedactionType` and `ContentIdentificationType`
1123
+ # in the same request. If you set both, your request returns a
1124
+ # `BadRequestException`.
1125
+ #
1126
+ # For more information, see [Redacting or identifying personally
1127
+ # identifiable information][1].
1128
+ #
1129
+ #
1130
+ #
1131
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/pii-redaction.html
1132
+ # @return [String]
1133
+ #
1134
+ # @!attribute [rw] pii_entity_types
1135
+ # Specify which types of personally identifiable information (PII) you
1136
+ # want to redact in your transcript. You can include as many types as
1137
+ # you'd like, or you can select `ALL`.
1138
+ #
1139
+ # To include `PiiEntityTypes` in your Call Analytics request, you must
1140
+ # also include either `ContentIdentificationType` or
1141
+ # `ContentRedactionType`.
1142
+ #
1143
+ # Values must be comma-separated and can include:
1144
+ # `BANK_ACCOUNT_NUMBER`, `BANK_ROUTING`, `CREDIT_DEBIT_NUMBER`,
1145
+ # `CREDIT_DEBIT_CVV`, `CREDIT_DEBIT_EXPIRY`, `PIN`, `EMAIL`,
1146
+ # `ADDRESS`, `NAME`, `PHONE`, `SSN`, or `ALL`.
1147
+ # @return [String]
1148
+ #
1149
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/StartCallAnalyticsStreamTranscriptionRequest AWS API Documentation
1150
+ #
1151
+ class StartCallAnalyticsStreamTranscriptionRequest < Struct.new(
1152
+ :language_code,
1153
+ :media_sample_rate_hertz,
1154
+ :media_encoding,
1155
+ :vocabulary_name,
1156
+ :session_id,
1157
+ :audio_stream,
1158
+ :vocabulary_filter_name,
1159
+ :vocabulary_filter_method,
1160
+ :language_model_name,
1161
+ :enable_partial_results_stabilization,
1162
+ :partial_results_stability,
1163
+ :content_identification_type,
1164
+ :content_redaction_type,
1165
+ :pii_entity_types)
1166
+ SENSITIVE = []
1167
+ include Aws::Structure
1168
+ end
1169
+
1170
+ # @!attribute [rw] request_id
1171
+ # Provides the identifier for your Call Analytics streaming request.
1172
+ # @return [String]
1173
+ #
1174
+ # @!attribute [rw] language_code
1175
+ # Provides the language code that you specified in your Call Analytics
1176
+ # request.
1177
+ # @return [String]
1178
+ #
1179
+ # @!attribute [rw] media_sample_rate_hertz
1180
+ # Provides the sample rate that you specified in your Call Analytics
1181
+ # request.
1182
+ # @return [Integer]
470
1183
  #
471
- # @!attribute [rw] result_id
472
- # A unique identifier for the result.
1184
+ # @!attribute [rw] media_encoding
1185
+ # Provides the media encoding you specified in your Call Analytics
1186
+ # request.
473
1187
  # @return [String]
474
1188
  #
475
- # @!attribute [rw] start_time
476
- # The offset in seconds from the beginning of the audio stream to the
477
- # beginning of the result.
478
- # @return [Float]
1189
+ # @!attribute [rw] vocabulary_name
1190
+ # Provides the name of the custom vocabulary that you specified in
1191
+ # your Call Analytics request.
1192
+ # @return [String]
479
1193
  #
480
- # @!attribute [rw] end_time
481
- # The offset in seconds from the beginning of the audio stream to the
482
- # end of the result.
483
- # @return [Float]
1194
+ # @!attribute [rw] session_id
1195
+ # Provides the identifier for your Call Analytics transcription
1196
+ # session.
1197
+ # @return [String]
484
1198
  #
485
- # @!attribute [rw] is_partial
486
- # Amazon Transcribe divides the incoming audio stream into segments at
487
- # natural points in the audio. Transcription results are returned
488
- # based on these segments.
1199
+ # @!attribute [rw] call_analytics_transcript_result_stream
1200
+ # Provides detailed information about your Call Analytics streaming
1201
+ # session.
1202
+ # @return [Types::CallAnalyticsTranscriptResultStream]
489
1203
  #
490
- # The `IsPartial` field is `true` to indicate that Amazon Transcribe
491
- # has additional transcription data to send, `false` to indicate that
492
- # this is the last transcription result for the segment.
493
- # @return [Boolean]
1204
+ # @!attribute [rw] vocabulary_filter_name
1205
+ # Provides the name of the custom vocabulary filter that you specified
1206
+ # in your Call Analytics request.
1207
+ # @return [String]
494
1208
  #
495
- # @!attribute [rw] alternatives
496
- # A list of possible transcriptions for the audio. Each alternative
497
- # typically contains one `item` that contains the result of the
1209
+ # @!attribute [rw] vocabulary_filter_method
1210
+ # Provides the vocabulary filtering method used in your Call Analytics
498
1211
  # transcription.
499
- # @return [Array<Types::Alternative>]
500
- #
501
- # @!attribute [rw] channel_id
502
- # When channel identification is enabled, Amazon Transcribe
503
- # transcribes the speech from each audio channel separately.
504
- #
505
- # You can use `ChannelId` to retrieve the transcription results for a
506
- # single channel in your audio stream.
507
1212
  # @return [String]
508
1213
  #
509
- # @!attribute [rw] language_code
510
- # The language code of the identified language in your media stream.
1214
+ # @!attribute [rw] language_model_name
1215
+ # Provides the name of the custom language model that you specified in
1216
+ # your Call Analytics request.
511
1217
  # @return [String]
512
1218
  #
513
- # @!attribute [rw] language_identification
514
- # The language code of the dominant language identified in your media.
515
- # @return [Array<Types::LanguageWithScore>]
1219
+ # @!attribute [rw] enable_partial_results_stabilization
1220
+ # Shows whether partial results stabilization was enabled for your
1221
+ # Call Analytics transcription.
1222
+ # @return [Boolean]
516
1223
  #
517
- # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/Result AWS API Documentation
1224
+ # @!attribute [rw] partial_results_stability
1225
+ # Provides the stabilization level used for your transcription.
1226
+ # @return [String]
518
1227
  #
519
- class Result < Struct.new(
520
- :result_id,
521
- :start_time,
522
- :end_time,
523
- :is_partial,
524
- :alternatives,
525
- :channel_id,
526
- :language_code,
527
- :language_identification)
528
- SENSITIVE = []
529
- include Aws::Structure
530
- end
531
-
532
- # Service is currently unavailable. Try your request later.
1228
+ # @!attribute [rw] content_identification_type
1229
+ # Shows whether content identification was enabled for your Call
1230
+ # Analytics transcription.
1231
+ # @return [String]
533
1232
  #
534
- # @!attribute [rw] message
1233
+ # @!attribute [rw] content_redaction_type
1234
+ # Shows whether content redaction was enabled for your Call Analytics
1235
+ # transcription.
535
1236
  # @return [String]
536
1237
  #
537
- # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/ServiceUnavailableException AWS API Documentation
1238
+ # @!attribute [rw] pii_entity_types
1239
+ # Lists the PII entity types you specified in your Call Analytics
1240
+ # request.
1241
+ # @return [String]
538
1242
  #
539
- class ServiceUnavailableException < Struct.new(
540
- :message,
541
- :event_type)
1243
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/StartCallAnalyticsStreamTranscriptionResponse AWS API Documentation
1244
+ #
1245
+ class StartCallAnalyticsStreamTranscriptionResponse < Struct.new(
1246
+ :request_id,
1247
+ :language_code,
1248
+ :media_sample_rate_hertz,
1249
+ :media_encoding,
1250
+ :vocabulary_name,
1251
+ :session_id,
1252
+ :call_analytics_transcript_result_stream,
1253
+ :vocabulary_filter_name,
1254
+ :vocabulary_filter_method,
1255
+ :language_model_name,
1256
+ :enable_partial_results_stabilization,
1257
+ :partial_results_stability,
1258
+ :content_identification_type,
1259
+ :content_redaction_type,
1260
+ :pii_entity_types)
542
1261
  SENSITIVE = []
543
1262
  include Aws::Structure
544
1263
  end
@@ -547,7 +1266,7 @@ module Aws::TranscribeStreamingService
547
1266
  # data as a hash:
548
1267
  #
549
1268
  # {
550
- # language_code: "en-US", # required, accepts en-US, en-GB, es-US, fr-CA, fr-FR, en-AU, it-IT, de-DE, pt-BR, ja-JP, ko-KR, zh-CN
1269
+ # language_code: "en-US", # required, accepts en-US, en-GB, es-US, fr-CA, fr-FR, en-AU, it-IT, de-DE, pt-BR, ja-JP, ko-KR, zh-CN, hi-IN, th-TH
551
1270
  # media_sample_rate_hertz: 1, # required
552
1271
  # media_encoding: "pcm", # required, accepts pcm, ogg-opus, flac
553
1272
  # vocabulary_name: "VocabularyName",
@@ -562,72 +1281,119 @@ module Aws::TranscribeStreamingService
562
1281
  # }
563
1282
  #
564
1283
  # @!attribute [rw] language_code
565
- # Indicates the source language used in the input audio stream. For
566
- # Amazon Transcribe Medical, this is US English (en-US).
1284
+ # Specify the language code that represents the language spoken in
1285
+ # your audio.
1286
+ #
1287
+ # Amazon Transcribe Medical only supports US English (`en-US`).
567
1288
  # @return [String]
568
1289
  #
569
1290
  # @!attribute [rw] media_sample_rate_hertz
570
- # The sample rate of the input audio (in Hertz). Amazon Transcribe
571
- # medical supports a range from 16,000 Hz to 48,000 Hz. Note that the
1291
+ # The sample rate of the input audio (in hertz). Amazon Transcribe
1292
+ # Medical supports a range from 16,000 Hz to 48,000 Hz. Note that the
572
1293
  # sample rate you specify must match that of your audio.
573
1294
  # @return [Integer]
574
1295
  #
575
1296
  # @!attribute [rw] media_encoding
576
- # The encoding used for the input audio.
1297
+ # Specify the encoding used for the input audio. Supported formats
1298
+ # are:
1299
+ #
1300
+ # * FLAC
1301
+ #
1302
+ # * OPUS-encoded audio in an Ogg container
1303
+ #
1304
+ # * PCM (only signed 16-bit little-endian audio formats, which does
1305
+ # not include WAV)
1306
+ #
1307
+ # For more information, see [Media formats][1].
1308
+ #
1309
+ #
1310
+ #
1311
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/how-input.html#how-input-audio
577
1312
  # @return [String]
578
1313
  #
579
1314
  # @!attribute [rw] vocabulary_name
580
- # The name of the medical custom vocabulary to use when processing the
581
- # real-time stream.
1315
+ # Specify the name of the custom vocabulary that you want to use when
1316
+ # processing your transcription. Note that vocabulary names are case
1317
+ # sensitive.
582
1318
  # @return [String]
583
1319
  #
584
1320
  # @!attribute [rw] specialty
585
- # The medical specialty of the clinician or provider.
1321
+ # Specify the medical specialty contained in your audio.
586
1322
  # @return [String]
587
1323
  #
588
1324
  # @!attribute [rw] type
589
- # The type of input audio. Choose `DICTATION` for a provider dictating
590
- # patient notes. Choose `CONVERSATION` for a dialogue between a
591
- # patient and one or more medical professionanls.
1325
+ # Specify the type of input audio. For example, choose `DICTATION` for
1326
+ # a provider dictating patient notes and `CONVERSATION` for a dialogue
1327
+ # between a patient and a medical professional.
592
1328
  # @return [String]
593
1329
  #
594
1330
  # @!attribute [rw] show_speaker_label
595
- # When `true`, enables speaker identification in your real-time
596
- # stream.
1331
+ # Enables speaker partitioning (diarization) in your transcription
1332
+ # output. Speaker partitioning labels the speech from individual
1333
+ # speakers in your media file.
1334
+ #
1335
+ # For more information, see [Partitioning speakers (diarization)][1].
1336
+ #
1337
+ #
1338
+ #
1339
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/diarization.html
597
1340
  # @return [Boolean]
598
1341
  #
599
1342
  # @!attribute [rw] session_id
600
- # Optional. An identifier for the transcription session. If you don't
601
- # provide a session ID, Amazon Transcribe generates one for you and
602
- # returns it in the response.
1343
+ # Specify a name for your transcription session. If you don't include
1344
+ # this parameter in your request, Amazon Transcribe Medical generates
1345
+ # an ID and returns it in the response.
1346
+ #
1347
+ # You can use a session ID to retry a streaming session.
603
1348
  # @return [String]
604
1349
  #
605
1350
  # @!attribute [rw] audio_stream
606
- # Represents the audio stream from your application to Amazon
607
- # Transcribe.
1351
+ # An encoded stream of audio blobs. Audio streams are encoded as
1352
+ # either HTTP/2 or WebSocket data frames.
1353
+ #
1354
+ # For more information, see [Transcribing streaming audio][1].
1355
+ #
1356
+ #
1357
+ #
1358
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html
608
1359
  # @return [Types::AudioStream]
609
1360
  #
610
1361
  # @!attribute [rw] enable_channel_identification
611
- # When `true`, instructs Amazon Transcribe Medical to process each
612
- # audio channel separately and then merge the transcription output of
613
- # each channel into a single transcription.
1362
+ # Enables channel identification in multi-channel audio.
1363
+ #
1364
+ # Channel identification transcribes the audio on each channel
1365
+ # independently, then appends the output for each channel into one
1366
+ # transcript.
1367
+ #
1368
+ # If you have multi-channel audio and do not enable channel
1369
+ # identification, your audio is transcribed in a continuous manner and
1370
+ # your transcript is not separated by channel.
614
1371
  #
615
- # Amazon Transcribe Medical also produces a transcription of each
616
- # item. An item includes the start time, end time, and any alternative
617
- # transcriptions.
1372
+ # For more information, see [Transcribing multi-channel audio][1].
618
1373
  #
619
- # You can't set both `ShowSpeakerLabel` and
620
- # `EnableChannelIdentification` in the same request. If you set both,
621
- # your request returns a `BadRequestException`.
1374
+ #
1375
+ #
1376
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/channel-id.html
622
1377
  # @return [Boolean]
623
1378
  #
624
1379
  # @!attribute [rw] number_of_channels
625
- # The number of channels that are in your audio stream.
1380
+ # Specify the number of channels in your audio stream. Up to two
1381
+ # channels are supported.
626
1382
  # @return [Integer]
627
1383
  #
628
1384
  # @!attribute [rw] content_identification_type
629
- # Set this field to `PHI` to identify personal health information in
630
- # the transcription output.
1385
+ # Labels all personal health information (PHI) identified in your
1386
+ # transcript.
1387
+ #
1388
+ # Content identification is performed at the segment level; PHI is
1389
+ # flagged upon complete transcription of an audio segment.
1390
+ #
1391
+ # For more information, see [Identifying personal health information
1392
+ # (PHI) in a transcription][1].
1393
+ #
1394
+ #
1395
+ #
1396
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/phi-id.html
631
1397
  # @return [String]
632
1398
  #
633
1399
  # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/StartMedicalStreamTranscriptionRequest AWS API Documentation
@@ -650,60 +1416,60 @@ module Aws::TranscribeStreamingService
650
1416
  end
651
1417
 
652
1418
  # @!attribute [rw] request_id
653
- # An identifier for the streaming transcription.
1419
+ # Provides the identifier for your streaming request.
654
1420
  # @return [String]
655
1421
  #
656
1422
  # @!attribute [rw] language_code
657
- # The language code for the response transcript. For Amazon Transcribe
658
- # Medical, this is US English (en-US).
1423
+ # Provides the language code that you specified in your request. This
1424
+ # must be `en-US`.
659
1425
  # @return [String]
660
1426
  #
661
1427
  # @!attribute [rw] media_sample_rate_hertz
662
- # The sample rate of the input audio, in Hertz (Hz).
1428
+ # Provides the sample rate that you specified in your request.
663
1429
  # @return [Integer]
664
1430
  #
665
1431
  # @!attribute [rw] media_encoding
666
- # The encoding used for the input audio stream.
1432
+ # Provides the media encoding you specified in your request.
667
1433
  # @return [String]
668
1434
  #
669
1435
  # @!attribute [rw] vocabulary_name
670
- # The name of the vocabulary used when processing the stream.
1436
+ # Provides the name of the custom vocabulary that you specified in
1437
+ # your request.
671
1438
  # @return [String]
672
1439
  #
673
1440
  # @!attribute [rw] specialty
674
- # The specialty in the medical domain.
1441
+ # Provides the medical specialty that you specified in your request.
675
1442
  # @return [String]
676
1443
  #
677
1444
  # @!attribute [rw] type
678
- # The type of audio that was transcribed.
1445
+ # Provides the type of audio you specified in your request.
679
1446
  # @return [String]
680
1447
  #
681
1448
  # @!attribute [rw] show_speaker_label
682
- # Shows whether speaker identification was enabled in the stream.
1449
+ # Shows whether speaker partitioning was enabled for your
1450
+ # transcription.
683
1451
  # @return [Boolean]
684
1452
  #
685
1453
  # @!attribute [rw] session_id
686
- # Optional. An identifier for the transcription session. If you don't
687
- # provide a session ID, Amazon Transcribe generates one for you and
688
- # returns it in the response.
1454
+ # Provides the identifier for your transcription session.
689
1455
  # @return [String]
690
1456
  #
691
1457
  # @!attribute [rw] transcript_result_stream
692
- # Represents the stream of transcription events from Amazon Transcribe
693
- # Medical to your application.
1458
+ # Provides detailed information about your streaming session.
694
1459
  # @return [Types::MedicalTranscriptResultStream]
695
1460
  #
696
1461
  # @!attribute [rw] enable_channel_identification
697
- # Shows whether channel identification has been enabled in the stream.
1462
+ # Shows whether channel identification was enabled for your
1463
+ # transcription.
698
1464
  # @return [Boolean]
699
1465
  #
700
1466
  # @!attribute [rw] number_of_channels
701
- # The number of channels identified in the stream.
1467
+ # Provides the number of channels that you specified in your request.
702
1468
  # @return [Integer]
703
1469
  #
704
1470
  # @!attribute [rw] content_identification_type
705
- # If the value is `PHI`, indicates that you've configured your stream
706
- # to identify personal health information.
1471
+ # Shows whether content identification was enabled for your
1472
+ # transcription.
707
1473
  # @return [String]
708
1474
  #
709
1475
  # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/StartMedicalStreamTranscriptionResponse AWS API Documentation
@@ -730,7 +1496,7 @@ module Aws::TranscribeStreamingService
730
1496
  # data as a hash:
731
1497
  #
732
1498
  # {
733
- # language_code: "en-US", # accepts en-US, en-GB, es-US, fr-CA, fr-FR, en-AU, it-IT, de-DE, pt-BR, ja-JP, ko-KR, zh-CN
1499
+ # language_code: "en-US", # accepts en-US, en-GB, es-US, fr-CA, fr-FR, en-AU, it-IT, de-DE, pt-BR, ja-JP, ko-KR, zh-CN, hi-IN, th-TH
734
1500
  # media_sample_rate_hertz: 1, # required
735
1501
  # media_encoding: "pcm", # required, accepts pcm, ogg-opus, flac
736
1502
  # vocabulary_name: "VocabularyName",
@@ -749,197 +1515,355 @@ module Aws::TranscribeStreamingService
749
1515
  # language_model_name: "ModelName",
750
1516
  # identify_language: false,
751
1517
  # language_options: "LanguageOptions",
752
- # preferred_language: "en-US", # accepts en-US, en-GB, es-US, fr-CA, fr-FR, en-AU, it-IT, de-DE, pt-BR, ja-JP, ko-KR, zh-CN
1518
+ # preferred_language: "en-US", # accepts en-US, en-GB, es-US, fr-CA, fr-FR, en-AU, it-IT, de-DE, pt-BR, ja-JP, ko-KR, zh-CN, hi-IN, th-TH
753
1519
  # vocabulary_names: "VocabularyNames",
754
1520
  # vocabulary_filter_names: "VocabularyFilterNames",
755
1521
  # }
756
1522
  #
757
1523
  # @!attribute [rw] language_code
758
- # The language code of the input audio stream.
1524
+ # Specify the language code that represents the language spoken in
1525
+ # your audio.
1526
+ #
1527
+ # If you're unsure of the language spoken in your audio, consider
1528
+ # using `IdentifyLanguage` to enable automatic language
1529
+ # identification.
1530
+ #
1531
+ # For a list of languages supported with Amazon Transcribe streaming,
1532
+ # refer to the [Supported languages][1] table.
1533
+ #
1534
+ #
1535
+ #
1536
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html
759
1537
  # @return [String]
760
1538
  #
761
1539
  # @!attribute [rw] media_sample_rate_hertz
762
- # The sample rate of the input audio (in Hertz). Low-quality audio,
1540
+ # The sample rate of the input audio (in hertz). Low-quality audio,
763
1541
  # such as telephone audio, is typically around 8,000 Hz. High-quality
764
1542
  # audio typically ranges from 16,000 Hz to 48,000 Hz. Note that the
765
1543
  # sample rate you specify must match that of your audio.
766
1544
  # @return [Integer]
767
1545
  #
768
1546
  # @!attribute [rw] media_encoding
769
- # The encoding used for the input audio.
1547
+ # Specify the encoding of your input audio. Supported formats are:
1548
+ #
1549
+ # * FLAC
1550
+ #
1551
+ # * OPUS-encoded audio in an Ogg container
1552
+ #
1553
+ # * PCM (only signed 16-bit little-endian audio formats, which does
1554
+ # not include WAV)
1555
+ #
1556
+ # For more information, see [Media formats][1].
1557
+ #
1558
+ #
1559
+ #
1560
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/how-input.html#how-input-audio
770
1561
  # @return [String]
771
1562
  #
772
1563
  # @!attribute [rw] vocabulary_name
773
- # The name of the custom vocabulary you want to use with your
774
- # transcription.
1564
+ # Specify the name of the custom vocabulary that you want to use when
1565
+ # processing your transcription. Note that vocabulary names are case
1566
+ # sensitive.
775
1567
  #
776
- # This operation is not intended for use in conjunction with the
777
- # `IdentifyLanguage` operation. If you're using `IdentifyLanguage` in
778
- # your request and want to use one or more custom vocabularies with
779
- # your transcription, use the `VocabularyNames` operation instead.
1568
+ # If the language of the specified custom vocabulary doesn't match
1569
+ # the language identified in your media, the custom vocabulary is not
1570
+ # applied to your transcription.
1571
+ #
1572
+ # This parameter is **not** intended for use with the
1573
+ # `IdentifyLanguage` parameter. If you're including
1574
+ # `IdentifyLanguage` in your request and want to use one or more
1575
+ # custom vocabularies with your transcription, use the
1576
+ # `VocabularyNames` parameter instead.
1577
+ #
1578
+ # For more information, see [Custom vocabularies][1].
1579
+ #
1580
+ #
1581
+ #
1582
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/custom-vocabulary.html
780
1583
  # @return [String]
781
1584
  #
782
1585
  # @!attribute [rw] session_id
783
- # A identifier for the transcription session. Use this parameter when
784
- # you want to retry a session. If you don't provide a session ID,
785
- # Amazon Transcribe will generate one for you and return it in the
786
- # response.
1586
+ # Specify a name for your transcription session. If you don't include
1587
+ # this parameter in your request, Amazon Transcribe generates an ID
1588
+ # and returns it in the response.
1589
+ #
1590
+ # You can use a session ID to retry a streaming session.
787
1591
  # @return [String]
788
1592
  #
789
1593
  # @!attribute [rw] audio_stream
790
- # PCM-encoded stream of audio blobs. The audio stream is encoded as an
791
- # HTTP/2 data frame.
1594
+ # An encoded stream of audio blobs. Audio streams are encoded as
1595
+ # either HTTP/2 or WebSocket data frames.
1596
+ #
1597
+ # For more information, see [Transcribing streaming audio][1].
1598
+ #
1599
+ #
1600
+ #
1601
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html
792
1602
  # @return [Types::AudioStream]
793
1603
  #
794
1604
  # @!attribute [rw] vocabulary_filter_name
795
- # The name of the vocabulary filter you want to use with your
796
- # transcription.
1605
+ # Specify the name of the custom vocabulary filter that you want to
1606
+ # use when processing your transcription. Note that vocabulary filter
1607
+ # names are case sensitive.
1608
+ #
1609
+ # If the language of the specified custom vocabulary filter doesn't
1610
+ # match the language identified in your media, the vocabulary filter
1611
+ # is not applied to your transcription.
1612
+ #
1613
+ # This parameter is **not** intended for use with the
1614
+ # `IdentifyLanguage` parameter. If you're including
1615
+ # `IdentifyLanguage` in your request and want to use one or more
1616
+ # vocabulary filters with your transcription, use the
1617
+ # `VocabularyFilterNames` parameter instead.
1618
+ #
1619
+ # For more information, see [Using vocabulary filtering with unwanted
1620
+ # words][1].
1621
+ #
1622
+ #
797
1623
  #
798
- # This operation is not intended for use in conjunction with the
799
- # `IdentifyLanguage` operation. If you're using `IdentifyLanguage` in
800
- # your request and want to use one or more vocabulary filters with
801
- # your transcription, use the `VocabularyFilterNames` operation
802
- # instead.
1624
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/vocabulary-filtering.html
803
1625
  # @return [String]
804
1626
  #
805
1627
  # @!attribute [rw] vocabulary_filter_method
806
- # The manner in which you use your vocabulary filter to filter words
807
- # in your transcript. `Remove` removes filtered words from your
808
- # transcription results. `Mask` masks filtered words with a `***` in
809
- # your transcription results. `Tag` keeps the filtered words in your
810
- # transcription results and tags them. The tag appears as
811
- # `VocabularyFilterMatch` equal to `True`.
1628
+ # Specify how you want your vocabulary filter applied to your
1629
+ # transcript.
1630
+ #
1631
+ # To replace words with `***`, choose `mask`.
1632
+ #
1633
+ # To delete words, choose `remove`.
1634
+ #
1635
+ # To flag words without changing them, choose `tag`.
812
1636
  # @return [String]
813
1637
  #
814
1638
  # @!attribute [rw] show_speaker_label
815
- # When `true`, enables speaker identification in your media stream.
1639
+ # Enables speaker partitioning (diarization) in your transcription
1640
+ # output. Speaker partitioning labels the speech from individual
1641
+ # speakers in your media file.
1642
+ #
1643
+ # For more information, see [Partitioning speakers (diarization)][1].
1644
+ #
1645
+ #
1646
+ #
1647
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/diarization.html
816
1648
  # @return [Boolean]
817
1649
  #
818
1650
  # @!attribute [rw] enable_channel_identification
819
- # When `true`, instructs Amazon Transcribe to process each audio
820
- # channel separately, then merges the transcription output of each
821
- # channel into a single transcription.
1651
+ # Enables channel identification in multi-channel audio.
1652
+ #
1653
+ # Channel identification transcribes the audio on each channel
1654
+ # independently, then appends the output for each channel into one
1655
+ # transcript.
822
1656
  #
823
- # Amazon Transcribe also produces a transcription of each item. An
824
- # item includes the start time, end time, and any alternative
825
- # transcriptions.
1657
+ # If you have multi-channel audio and do not enable channel
1658
+ # identification, your audio is transcribed in a continuous manner and
1659
+ # your transcript is not separated by channel.
1660
+ #
1661
+ # For more information, see [Transcribing multi-channel audio][1].
1662
+ #
1663
+ #
1664
+ #
1665
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/channel-id.html
826
1666
  # @return [Boolean]
827
1667
  #
828
1668
  # @!attribute [rw] number_of_channels
829
- # The number of channels that are in your audio stream.
1669
+ # Specify the number of channels in your audio stream. Up to two
1670
+ # channels are supported.
830
1671
  # @return [Integer]
831
1672
  #
832
1673
  # @!attribute [rw] enable_partial_results_stabilization
833
- # When `true`, instructs Amazon Transcribe to present transcription
834
- # results that have the partial results stabilized. Normally, any word
835
- # or phrase from one partial result can change in a subsequent partial
836
- # result. With partial results stabilization enabled, only the last
837
- # few words of one partial result can change in another partial
838
- # result.
1674
+ # Enables partial result stabilization for your transcription. Partial
1675
+ # result stabilization can reduce latency in your output, but may
1676
+ # impact accuracy. For more information, see [Partial-result
1677
+ # stabilization][1].
1678
+ #
1679
+ #
1680
+ #
1681
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html#streaming-partial-result-stabilization
839
1682
  # @return [Boolean]
840
1683
  #
841
1684
  # @!attribute [rw] partial_results_stability
842
- # You can use this field to set the stability level of the
843
- # transcription results. A higher stability level means that the
844
- # transcription results are less likely to change. Higher stability
845
- # levels can come with lower overall transcription accuracy.
1685
+ # Specify the level of stability to use when you enable partial
1686
+ # results stabilization (`EnablePartialResultsStabilization`).
1687
+ #
1688
+ # Low stability provides the highest accuracy. High stability
1689
+ # transcribes faster, but with slightly lower accuracy.
1690
+ #
1691
+ # For more information, see [Partial-result stabilization][1].
1692
+ #
1693
+ #
1694
+ #
1695
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html#streaming-partial-result-stabilization
846
1696
  # @return [String]
847
1697
  #
848
1698
  # @!attribute [rw] content_identification_type
849
- # Set this field to PII to identify personally identifiable
850
- # information (PII) in the transcription output. Content
851
- # identification is performed only upon complete transcription of the
852
- # audio segments.
1699
+ # Labels all personally identifiable information (PII) identified in
1700
+ # your transcript.
1701
+ #
1702
+ # Content identification is performed at the segment level; PII
1703
+ # specified in `PiiEntityTypes` is flagged upon complete transcription
1704
+ # of an audio segment.
1705
+ #
1706
+ # You can’t set `ContentIdentificationType` and `ContentRedactionType`
1707
+ # in the same request. If you set both, your request returns a
1708
+ # `BadRequestException`.
1709
+ #
1710
+ # For more information, see [Redacting or identifying personally
1711
+ # identifiable information][1].
1712
+ #
853
1713
  #
854
- # You can’t set both `ContentIdentificationType` and
855
- # `ContentRedactionType` in the same request. If you set both, your
856
- # request returns a `BadRequestException`.
1714
+ #
1715
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/pii-redaction.html
857
1716
  # @return [String]
858
1717
  #
859
1718
  # @!attribute [rw] content_redaction_type
860
- # Set this field to PII to redact personally identifiable information
861
- # (PII) in the transcription output. Content redaction is performed
862
- # only upon complete transcription of the audio segments.
1719
+ # Redacts all personally identifiable information (PII) identified in
1720
+ # your transcript.
1721
+ #
1722
+ # Content redaction is performed at the segment level; PII specified
1723
+ # in `PiiEntityTypes` is redacted upon complete transcription of an
1724
+ # audio segment.
1725
+ #
1726
+ # You can’t set `ContentRedactionType` and `ContentIdentificationType`
1727
+ # in the same request. If you set both, your request returns a
1728
+ # `BadRequestException`.
1729
+ #
1730
+ # For more information, see [Redacting or identifying personally
1731
+ # identifiable information][1].
863
1732
  #
864
- # You can’t set both `ContentRedactionType` and
865
- # `ContentIdentificationType` in the same request. If you set both,
866
- # your request returns a `BadRequestException`.
1733
+ #
1734
+ #
1735
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/pii-redaction.html
867
1736
  # @return [String]
868
1737
  #
869
1738
  # @!attribute [rw] pii_entity_types
870
- # List the PII entity types you want to identify or redact. In order
871
- # to specify entity types, you must have either
872
- # `ContentIdentificationType` or `ContentRedactionType` enabled.
1739
+ # Specify which types of personally identifiable information (PII) you
1740
+ # want to redact in your transcript. You can include as many types as
1741
+ # you'd like, or you can select `ALL`.
873
1742
  #
874
- # `PIIEntityTypes` must be comma-separated; the available values are:
1743
+ # To include `PiiEntityTypes` in your request, you must also include
1744
+ # either `ContentIdentificationType` or `ContentRedactionType`.
1745
+ #
1746
+ # Values must be comma-separated and can include:
875
1747
  # `BANK_ACCOUNT_NUMBER`, `BANK_ROUTING`, `CREDIT_DEBIT_NUMBER`,
876
1748
  # `CREDIT_DEBIT_CVV`, `CREDIT_DEBIT_EXPIRY`, `PIN`, `EMAIL`,
877
- # `ADDRESS`, `NAME`, `PHONE`, `SSN`, and `ALL`.
878
- #
879
- # `PiiEntityTypes` is an optional parameter with a default value of
880
- # `ALL`.
1749
+ # `ADDRESS`, `NAME`, `PHONE`, `SSN`, or `ALL`.
881
1750
  # @return [String]
882
1751
  #
883
1752
  # @!attribute [rw] language_model_name
884
- # The name of the language model you want to use.
1753
+ # Specify the name of the custom language model that you want to use
1754
+ # when processing your transcription. Note that language model names
1755
+ # are case sensitive.
1756
+ #
1757
+ # The language of the specified language model must match the language
1758
+ # code you specify in your transcription request. If the languages
1759
+ # don't match, the custom language model isn't applied. There are no
1760
+ # errors or warnings associated with a language mismatch.
1761
+ #
1762
+ # For more information, see [Custom language models][1].
1763
+ #
1764
+ #
1765
+ #
1766
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/custom-language-models.html
885
1767
  # @return [String]
886
1768
  #
887
1769
  # @!attribute [rw] identify_language
888
- # Optional. Set this value to `true` to enable language identification
889
- # for your media stream.
1770
+ # Enables automatic language identification for your transcription.
1771
+ #
1772
+ # If you include `IdentifyLanguage`, you can optionally include a list
1773
+ # of language codes, using `LanguageOptions`, that you think may be
1774
+ # present in your audio stream. Including language options can improve
1775
+ # transcription accuracy.
1776
+ #
1777
+ # You can also include a preferred language using `PreferredLanguage`.
1778
+ # Adding a preferred language can help Amazon Transcribe identify the
1779
+ # language faster than if you omit this parameter.
1780
+ #
1781
+ # If you have multi-channel audio that contains different languages on
1782
+ # each channel, and you've enabled channel identification, automatic
1783
+ # language identification identifies the dominant language on each
1784
+ # audio channel.
1785
+ #
1786
+ # Note that you must include either `LanguageCode` or
1787
+ # `IdentifyLanguage` in your request. If you include both parameters,
1788
+ # your request fails.
1789
+ #
1790
+ # Streaming language identification can't be combined with custom
1791
+ # language models or redaction.
890
1792
  # @return [Boolean]
891
1793
  #
892
1794
  # @!attribute [rw] language_options
893
- # An object containing a list of languages that might be present in
894
- # your audio.
1795
+ # Specify two or more language codes that represent the languages you
1796
+ # think may be present in your media; including more than five is not
1797
+ # recommended. If you're unsure what languages are present, do not
1798
+ # include this parameter.
1799
+ #
1800
+ # Including language options can improve the accuracy of language
1801
+ # identification.
1802
+ #
1803
+ # If you include `LanguageOptions` in your request, you must also
1804
+ # include `IdentifyLanguage`.
1805
+ #
1806
+ # For a list of languages supported with Amazon Transcribe streaming,
1807
+ # refer to the [Supported languages][1] table.
1808
+ #
1809
+ # You can only include one language dialect per language per stream.
1810
+ # For example, you cannot include `en-US` and `en-AU` in the same
1811
+ # request.
1812
+ #
895
1813
  #
896
- # You must provide two or more language codes to help Amazon
897
- # Transcribe identify the correct language of your media stream with
898
- # the highest possible accuracy. You can only select one variant per
899
- # language; for example, you can't include both `en-US` and `en-UK`
900
- # in the same request.
901
1814
  #
902
- # You can only use this parameter if you've set `IdentifyLanguage` to
903
- # `true`in your request.
1815
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html
904
1816
  # @return [String]
905
1817
  #
906
1818
  # @!attribute [rw] preferred_language
907
- # Optional. From the subset of languages codes you provided for
908
- # `LanguageOptions`, you can select one preferred language for your
909
- # transcription.
1819
+ # Specify a preferred language from the subset of languages codes you
1820
+ # specified in `LanguageOptions`.
910
1821
  #
911
- # You can only use this parameter if you've set `IdentifyLanguage` to
912
- # `true`in your request.
1822
+ # You can only use this parameter if you've included
1823
+ # `IdentifyLanguage` and `LanguageOptions` in your request.
913
1824
  # @return [String]
914
1825
  #
915
1826
  # @!attribute [rw] vocabulary_names
916
- # The names of the custom vocabularies you want to use with your
917
- # transcription.
1827
+ # Specify the names of the custom vocabularies that you want to use
1828
+ # when processing your transcription. Note that vocabulary names are
1829
+ # case sensitive.
918
1830
  #
919
- # Note that if the custom vocabularies you specify are in languages
920
- # that don't match the language identified in your media, your job
921
- # fails.
1831
+ # If none of the languages of the specified custom vocabularies match
1832
+ # the language identified in your media, your job fails.
922
1833
  #
923
- # This operation is only intended for use in conjunction with the
924
- # `IdentifyLanguage` operation. If you're not using
1834
+ # This parameter is only intended for use **with** the
1835
+ # `IdentifyLanguage` parameter. If you're **not** including
925
1836
  # `IdentifyLanguage` in your request and want to use a custom
926
1837
  # vocabulary with your transcription, use the `VocabularyName`
927
- # operation instead.
1838
+ # parameter instead.
1839
+ #
1840
+ # For more information, see [Custom vocabularies][1].
1841
+ #
1842
+ #
1843
+ #
1844
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/custom-vocabulary.html
928
1845
  # @return [String]
929
1846
  #
930
1847
  # @!attribute [rw] vocabulary_filter_names
931
- # The names of the vocabulary filters you want to use with your
932
- # transcription.
1848
+ # Specify the names of the custom vocabulary filters that you want to
1849
+ # use when processing your transcription. Note that vocabulary filter
1850
+ # names are case sensitive.
1851
+ #
1852
+ # If none of the languages of the specified custom vocabulary filters
1853
+ # match the language identified in your media, your job fails.
1854
+ #
1855
+ # This parameter is only intended for use **with** the
1856
+ # `IdentifyLanguage` parameter. If you're **not** including
1857
+ # `IdentifyLanguage` in your request and want to use a custom
1858
+ # vocabulary filter with your transcription, use the
1859
+ # `VocabularyFilterName` parameter instead.
1860
+ #
1861
+ # For more information, see [Using vocabulary filtering with unwanted
1862
+ # words][1].
1863
+ #
933
1864
  #
934
- # Note that if the vocabulary filters you specify are in languages
935
- # that don't match the language identified in your media, your job
936
- # fails.
937
1865
  #
938
- # This operation is only intended for use in conjunction with the
939
- # `IdentifyLanguage` operation. If you're not using
940
- # `IdentifyLanguage` in your request and want to use a vocabulary
941
- # filter with your transcription, use the `VocabularyFilterName`
942
- # operation instead.
1866
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/vocabulary-filtering.html
943
1867
  # @return [String]
944
1868
  #
945
1869
  # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/StartStreamTranscriptionRequest AWS API Documentation
@@ -972,71 +1896,73 @@ module Aws::TranscribeStreamingService
972
1896
  end
973
1897
 
974
1898
  # @!attribute [rw] request_id
975
- # An identifier for the transcription.
1899
+ # Provides the identifier for your streaming request.
976
1900
  # @return [String]
977
1901
  #
978
1902
  # @!attribute [rw] language_code
979
- # The language code of the input audio stream.
1903
+ # Provides the language code that you specified in your request.
980
1904
  # @return [String]
981
1905
  #
982
1906
  # @!attribute [rw] media_sample_rate_hertz
983
- # The sample rate, in Hertz (Hz), for the input audio stream.
1907
+ # Provides the sample rate that you specified in your request.
984
1908
  # @return [Integer]
985
1909
  #
986
1910
  # @!attribute [rw] media_encoding
987
- # The encoding used for the input audio stream.
1911
+ # Provides the media encoding you specified in your request.
988
1912
  # @return [String]
989
1913
  #
990
1914
  # @!attribute [rw] vocabulary_name
991
- # The name of the custom vocabulary used when processing the stream.
1915
+ # Provides the name of the custom vocabulary that you specified in
1916
+ # your request.
992
1917
  # @return [String]
993
1918
  #
994
1919
  # @!attribute [rw] session_id
995
- # An identifier for a specific transcription session.
1920
+ # Provides the identifier for your transcription session.
996
1921
  # @return [String]
997
1922
  #
998
1923
  # @!attribute [rw] transcript_result_stream
999
- # Represents the stream of transcription events from Amazon Transcribe
1000
- # to your application.
1924
+ # Provides detailed information about your streaming session.
1001
1925
  # @return [Types::TranscriptResultStream]
1002
1926
  #
1003
1927
  # @!attribute [rw] vocabulary_filter_name
1004
- # The name of the vocabulary filter used when processing the stream.
1928
+ # Provides the name of the custom vocabulary filter that you specified
1929
+ # in your request.
1005
1930
  # @return [String]
1006
1931
  #
1007
1932
  # @!attribute [rw] vocabulary_filter_method
1008
- # The vocabulary filtering method used when processing the stream.
1933
+ # Provides the vocabulary filtering method used in your transcription.
1009
1934
  # @return [String]
1010
1935
  #
1011
1936
  # @!attribute [rw] show_speaker_label
1012
- # Shows whether speaker identification was enabled in the
1937
+ # Shows whether speaker partitioning was enabled for your
1013
1938
  # transcription.
1014
1939
  # @return [Boolean]
1015
1940
  #
1016
1941
  # @!attribute [rw] enable_channel_identification
1017
- # Shows whether channel identification was enabled in the stream.
1942
+ # Shows whether channel identification was enabled for your
1943
+ # transcription.
1018
1944
  # @return [Boolean]
1019
1945
  #
1020
1946
  # @!attribute [rw] number_of_channels
1021
- # The number of channels identified in the stream.
1947
+ # Provides the number of channels that you specified in your request.
1022
1948
  # @return [Integer]
1023
1949
  #
1024
1950
  # @!attribute [rw] enable_partial_results_stabilization
1025
- # Shows whether partial results stabilization was enabled in the
1951
+ # Shows whether partial results stabilization was enabled for your
1026
1952
  # transcription.
1027
1953
  # @return [Boolean]
1028
1954
  #
1029
1955
  # @!attribute [rw] partial_results_stability
1030
- # If partial results stabilization has been enabled in the stream,
1031
- # shows the stability level.
1956
+ # Provides the stabilization level used for your transcription.
1032
1957
  # @return [String]
1033
1958
  #
1034
1959
  # @!attribute [rw] content_identification_type
1035
- # Shows whether content identification was enabled in this stream.
1960
+ # Shows whether content identification was enabled for your
1961
+ # transcription.
1036
1962
  # @return [String]
1037
1963
  #
1038
1964
  # @!attribute [rw] content_redaction_type
1039
- # Shows whether content redaction was enabled in this stream.
1965
+ # Shows whether content redaction was enabled for your transcription.
1040
1966
  # @return [String]
1041
1967
  #
1042
1968
  # @!attribute [rw] pii_entity_types
@@ -1044,28 +1970,31 @@ module Aws::TranscribeStreamingService
1044
1970
  # @return [String]
1045
1971
  #
1046
1972
  # @!attribute [rw] language_model_name
1047
- # The name of the custom language model used in the transcription.
1973
+ # Provides the name of the custom language model that you specified in
1974
+ # your request.
1048
1975
  # @return [String]
1049
1976
  #
1050
1977
  # @!attribute [rw] identify_language
1051
- # The language code of the language identified in your media stream.
1978
+ # Shows whether automatic language identification was enabled for your
1979
+ # transcription.
1052
1980
  # @return [Boolean]
1053
1981
  #
1054
1982
  # @!attribute [rw] language_options
1055
- # The language codes used in the identification of your media
1056
- # stream's predominant language.
1983
+ # Provides the language codes that you specified in your request.
1057
1984
  # @return [String]
1058
1985
  #
1059
1986
  # @!attribute [rw] preferred_language
1060
- # The preferred language you specified in your request.
1987
+ # Provides the preferred language that you specified in your request.
1061
1988
  # @return [String]
1062
1989
  #
1063
1990
  # @!attribute [rw] vocabulary_names
1064
- # The name of the custom vocabulary used when processing the stream.
1991
+ # Provides the names of the custom vocabularies that you specified in
1992
+ # your request.
1065
1993
  # @return [String]
1066
1994
  #
1067
1995
  # @!attribute [rw] vocabulary_filter_names
1068
- # The name of the vocabulary filter used when processing the stream.
1996
+ # Provides the names of the custom vocabulary filters that you
1997
+ # specified in your request.
1069
1998
  # @return [String]
1070
1999
  #
1071
2000
  # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/StartStreamTranscriptionResponse AWS API Documentation
@@ -1098,11 +2027,41 @@ module Aws::TranscribeStreamingService
1098
2027
  include Aws::Structure
1099
2028
  end
1100
2029
 
1101
- # The transcription in a TranscriptEvent.
2030
+ # Contains the timestamp range (start time through end time) of a
2031
+ # matched category.
2032
+ #
2033
+ # @!attribute [rw] begin_offset_millis
2034
+ # The time, in milliseconds, from the beginning of the audio stream to
2035
+ # the start of the category match.
2036
+ # @return [Integer]
2037
+ #
2038
+ # @!attribute [rw] end_offset_millis
2039
+ # The time, in milliseconds, from the beginning of the audio stream to
2040
+ # the end of the category match.
2041
+ # @return [Integer]
2042
+ #
2043
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/TimestampRange AWS API Documentation
2044
+ #
2045
+ class TimestampRange < Struct.new(
2046
+ :begin_offset_millis,
2047
+ :end_offset_millis)
2048
+ SENSITIVE = []
2049
+ include Aws::Structure
2050
+ end
2051
+
2052
+ # The `Transcript` associated with a `.</p> Transcript contains
2053
+ # Results, which contains a set of transcription results from one or
2054
+ # more audio segments, along with additional information per your
2055
+ # request parameters.
2056
+ # `
1102
2057
  #
1103
2058
  # @!attribute [rw] results
1104
- # Result objects that contain the results of transcribing a portion of
1105
- # the input audio stream. The array can be empty.
2059
+ # Contains a set of transcription results from one or more audio
2060
+ # segments, along with additional information per your request
2061
+ # parameters. This can include information relating to alternative
2062
+ # transcriptions, channel identification, partial result
2063
+ # stabilization, language identification, and other
2064
+ # transcription-related data.
1106
2065
  # @return [Array<Types::Result>]
1107
2066
  #
1108
2067
  # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/Transcript AWS API Documentation
@@ -1113,12 +2072,19 @@ module Aws::TranscribeStreamingService
1113
2072
  include Aws::Structure
1114
2073
  end
1115
2074
 
1116
- # Represents a set of transcription results from the server to the
1117
- # client. It contains one or more segments of the transcription.
2075
+ # The `TranscriptEvent` associated with a `TranscriptResultStream`.
2076
+ #
2077
+ # Contains a set of transcription results from one or more audio
2078
+ # segments, along with additional information per your request
2079
+ # parameters.
1118
2080
  #
1119
2081
  # @!attribute [rw] transcript
1120
- # The transcription of the audio stream. The transcription is composed
1121
- # of all of the items in the results list.
2082
+ # Contains `Results`, which contains a set of transcription results
2083
+ # from one or more audio segments, along with additional information
2084
+ # per your request parameters. This can include information relating
2085
+ # to alternative transcriptions, channel identification, partial
2086
+ # result stabilization, language identification, and other
2087
+ # transcription-related data.
1122
2088
  # @return [Types::Transcript]
1123
2089
  #
1124
2090
  # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/TranscriptEvent AWS API Documentation
@@ -1130,8 +2096,84 @@ module Aws::TranscribeStreamingService
1130
2096
  include Aws::Structure
1131
2097
  end
1132
2098
 
1133
- # Represents the audio stream from your application to Amazon
1134
- # Transcribe.
2099
+ # Contains set of transcription results from one or more audio segments,
2100
+ # along with additional information about the parameters included in
2101
+ # your request. For example, channel definitions, partial result
2102
+ # stabilization, sentiment, and issue detection.
2103
+ #
2104
+ # @!attribute [rw] utterance_id
2105
+ # The unique identifier that is associated with the specified
2106
+ # `UtteranceEvent`.
2107
+ # @return [String]
2108
+ #
2109
+ # @!attribute [rw] is_partial
2110
+ # Indicates whether the segment in the `UtteranceEvent` is complete
2111
+ # (`FALSE`) or partial (`TRUE`).
2112
+ # @return [Boolean]
2113
+ #
2114
+ # @!attribute [rw] participant_role
2115
+ # Provides the role of the speaker for each audio channel, either
2116
+ # `CUSTOMER` or `AGENT`.
2117
+ # @return [String]
2118
+ #
2119
+ # @!attribute [rw] begin_offset_millis
2120
+ # The time, in milliseconds, from the beginning of the audio stream to
2121
+ # the start of the `UtteranceEvent`.
2122
+ # @return [Integer]
2123
+ #
2124
+ # @!attribute [rw] end_offset_millis
2125
+ # The time, in milliseconds, from the beginning of the audio stream to
2126
+ # the start of the `UtteranceEvent`.
2127
+ # @return [Integer]
2128
+ #
2129
+ # @!attribute [rw] transcript
2130
+ # Contains transcribed text.
2131
+ # @return [String]
2132
+ #
2133
+ # @!attribute [rw] items
2134
+ # Contains words, phrases, or punctuation marks that are associated
2135
+ # with the specified `UtteranceEvent`.
2136
+ # @return [Array<Types::CallAnalyticsItem>]
2137
+ #
2138
+ # @!attribute [rw] entities
2139
+ # Contains entities identified as personally identifiable information
2140
+ # (PII) in your transcription output.
2141
+ # @return [Array<Types::CallAnalyticsEntity>]
2142
+ #
2143
+ # @!attribute [rw] sentiment
2144
+ # Provides the sentiment that was detected in the specified segment.
2145
+ # @return [String]
2146
+ #
2147
+ # @!attribute [rw] issues_detected
2148
+ # Provides the issue that was detected in the specified segment.
2149
+ # @return [Array<Types::IssueDetected>]
2150
+ #
2151
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/UtteranceEvent AWS API Documentation
2152
+ #
2153
+ class UtteranceEvent < Struct.new(
2154
+ :utterance_id,
2155
+ :is_partial,
2156
+ :participant_role,
2157
+ :begin_offset_millis,
2158
+ :end_offset_millis,
2159
+ :transcript,
2160
+ :items,
2161
+ :entities,
2162
+ :sentiment,
2163
+ :issues_detected,
2164
+ :event_type)
2165
+ SENSITIVE = []
2166
+ include Aws::Structure
2167
+ end
2168
+
2169
+ # An encoded stream of audio blobs. Audio streams are encoded as either
2170
+ # HTTP/2 or WebSocket data frames.
2171
+ #
2172
+ # For more information, see [Transcribing streaming audio][1].
2173
+ #
2174
+ #
2175
+ #
2176
+ # [1]: https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html
1135
2177
  #
1136
2178
  # @note When making an API call, you may pass AudioStream
1137
2179
  # data as a hash:
@@ -1140,6 +2182,20 @@ module Aws::TranscribeStreamingService
1140
2182
  # audio_event: {
1141
2183
  # audio_chunk: "data",
1142
2184
  # },
2185
+ # configuration_event: {
2186
+ # channel_definitions: [
2187
+ # {
2188
+ # channel_id: 1, # required
2189
+ # participant_role: "AGENT", # required, accepts AGENT, CUSTOMER
2190
+ # },
2191
+ # ],
2192
+ # post_call_analytics_settings: {
2193
+ # output_location: "String", # required
2194
+ # data_access_role_arn: "String", # required
2195
+ # content_redaction_output: "redacted", # accepts redacted, redacted_and_unredacted
2196
+ # output_encryption_kms_key_id: "String",
2197
+ # },
2198
+ # },
1143
2199
  # }
1144
2200
  #
1145
2201
  # EventStream is an Enumerator of Events.
@@ -1151,14 +2207,39 @@ module Aws::TranscribeStreamingService
1151
2207
 
1152
2208
  def event_types
1153
2209
  [
1154
- :audio_event
2210
+ :audio_event,
2211
+ :configuration_event
2212
+ ]
2213
+ end
2214
+
2215
+ end
2216
+
2217
+ # Contains detailed information about your Call Analytics streaming
2218
+ # session. These details are provided in the `UtteranceEvent` and
2219
+ # `CategoryEvent` objects.
2220
+ #
2221
+ # EventStream is an Enumerator of Events.
2222
+ # #event_types #=> Array, returns all modeled event types in the stream
2223
+ #
2224
+ # @see http://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/CallAnalyticsTranscriptResultStream AWS API Documentation
2225
+ #
2226
+ class CallAnalyticsTranscriptResultStream < Enumerator
2227
+
2228
+ def event_types
2229
+ [
2230
+ :utterance_event,
2231
+ :category_event,
2232
+ :bad_request_exception,
2233
+ :limit_exceeded_exception,
2234
+ :internal_failure_exception,
2235
+ :conflict_exception,
2236
+ :service_unavailable_exception
1155
2237
  ]
1156
2238
  end
1157
2239
 
1158
2240
  end
1159
2241
 
1160
- # Represents the transcription result stream from Amazon Transcribe
1161
- # Medical to your application.
2242
+ # Contains detailed information about your streaming session.
1162
2243
  #
1163
2244
  # EventStream is an Enumerator of Events.
1164
2245
  # #event_types #=> Array, returns all modeled event types in the stream
@@ -1180,8 +2261,7 @@ module Aws::TranscribeStreamingService
1180
2261
 
1181
2262
  end
1182
2263
 
1183
- # Represents the transcription result stream from Amazon Transcribe to
1184
- # your application.
2264
+ # Contains detailed information about your streaming session.
1185
2265
  #
1186
2266
  # EventStream is an Enumerator of Events.
1187
2267
  # #event_types #=> Array, returns all modeled event types in the stream