assemblyai 1.0.2 → 1.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/assemblyai/lemur/types/lemur_model.rb +1 -0
- data/lib/assemblyai/transcripts/client.rb +349 -153
- data/lib/assemblyai/transcripts/polling_client.rb +148 -70
- data/lib/assemblyai/transcripts/types/content_safety_labels_result.rb +2 -2
- data/lib/assemblyai/transcripts/types/paragraphs_response.rb +8 -8
- data/lib/assemblyai/transcripts/types/sentences_response.rb +8 -8
- data/lib/assemblyai/transcripts/types/sentiment_analysis_result.rb +12 -1
- data/lib/assemblyai/transcripts/types/topic_detection_result.rb +2 -2
- data/lib/assemblyai/transcripts/types/transcript.rb +98 -56
- data/lib/assemblyai/transcripts/types/transcript_boost_param.rb +1 -1
- data/lib/assemblyai/transcripts/types/transcript_list.rb +4 -4
- data/lib/assemblyai/transcripts/types/transcript_list_item.rb +15 -15
- data/lib/assemblyai/transcripts/types/transcript_optional_params.rb +64 -38
- data/lib/assemblyai/transcripts/types/transcript_paragraph.rb +12 -32
- data/lib/assemblyai/transcripts/types/transcript_sentence.rb +22 -11
- data/lib/assemblyai/transcripts/types/transcript_utterance.rb +14 -2
- data/lib/assemblyai/transcripts/types/transcript_word.rb +21 -11
- data/lib/gemconfig.rb +1 -1
- metadata +2 -2
@@ -26,54 +26,93 @@ module AssemblyAI
|
|
26
26
|
# Create a transcript from an audio or video file that is accessible via a URL.
|
27
27
|
# .transcribe polls for completion of the transcription, while the .submit function does not.
|
28
28
|
#
|
29
|
-
# @param
|
30
|
-
# @param
|
29
|
+
# @param language_code [AssemblyAI::Transcripts::TranscriptLanguageCode]
|
30
|
+
# @param language_detection [Boolean] Enable [Automatic language
|
31
|
+
# www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection),
|
32
|
+
# either true or false.
|
33
|
+
# @param language_confidence_threshold [Float] The confidence threshold for the automatically detected language.
|
34
|
+
# An error will be returned if the language confidence is below this threshold.
|
35
|
+
# Defaults to 0.
|
36
|
+
# @param speech_model [AssemblyAI::Transcripts::SpeechModel]
|
31
37
|
# @param punctuate [Boolean] Enable Automatic Punctuation, can be true or false
|
32
38
|
# @param format_text [Boolean] Enable Text Formatting, can be true or false
|
33
|
-
# @param
|
34
|
-
# @param
|
35
|
-
#
|
36
|
-
#
|
37
|
-
# @param
|
39
|
+
# @param disfluencies [Boolean] Transcribe Filler Words, like "umm", in your media file; can be true or false
|
40
|
+
# @param multichannel [Boolean] Enable
|
41
|
+
# ://www.assemblyai.com/docs/models/speech-recognition#multichannel-transcription)
|
42
|
+
# transcription, can be true or false.
|
43
|
+
# @param dual_channel [Boolean] Enable [Dual
|
44
|
+
# ://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription)
|
45
|
+
# transcription, can be true or false.
|
46
|
+
# @param webhook_url [String] The URL to which we send webhook requests.
|
47
|
+
# We sends two different types of webhook requests.
|
48
|
+
# One request when a transcript is completed or failed, and one request when the
|
49
|
+
# redacted audio is ready if redact_pii_audio is enabled.
|
50
|
+
# @param webhook_auth_header_name [String] The header name to be sent with the transcript completed or failed webhook
|
51
|
+
# requests
|
52
|
+
# @param webhook_auth_header_value [String] The header value to send back with the transcript completed or failed webhook
|
53
|
+
# requests for added security
|
54
|
+
# @param auto_highlights [Boolean] Enable Key Phrases, either true or false
|
38
55
|
# @param audio_start_from [Integer] The point in time, in milliseconds, to begin transcribing in your media file
|
39
56
|
# @param audio_end_at [Integer] The point in time, in milliseconds, to stop transcribing in your media file
|
40
57
|
# @param word_boost [Array<String>] The list of custom vocabulary to boost transcription probability for
|
41
|
-
# @param boost_param [Transcripts::TranscriptBoostParam]
|
58
|
+
# @param boost_param [AssemblyAI::Transcripts::TranscriptBoostParam] How much to boost specified words
|
42
59
|
# @param filter_profanity [Boolean] Filter profanity from the transcribed text, can be true or false
|
43
|
-
# @param redact_pii [Boolean] Redact PII from the transcribed text using the Redact PII model, can be true or
|
44
|
-
#
|
45
|
-
# @param
|
46
|
-
#
|
47
|
-
#
|
48
|
-
#
|
49
|
-
# @param
|
50
|
-
#
|
51
|
-
#
|
52
|
-
#
|
53
|
-
# @param
|
54
|
-
#
|
60
|
+
# @param redact_pii [Boolean] Redact PII from the transcribed text using the Redact PII model, can be true or
|
61
|
+
# false
|
62
|
+
# @param redact_pii_audio [Boolean] Generate a copy of the original media file with spoken PII "beeped" out, can be
|
63
|
+
# true or false. See [PII
|
64
|
+
# redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more
|
65
|
+
# details.
|
66
|
+
# @param redact_pii_audio_quality [AssemblyAI::Transcripts::RedactPiiAudioQuality] Controls the filetype of the audio created by redact_pii_audio. Currently
|
67
|
+
# supports mp3 (default) and wav. See [PII
|
68
|
+
# redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more
|
69
|
+
# details.
|
70
|
+
# @param redact_pii_policies [Array<AssemblyAI::Transcripts::PiiPolicy>] The list of PII Redaction policies to enable. See [PII
|
71
|
+
# redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more
|
72
|
+
# details.
|
73
|
+
# @param redact_pii_sub [AssemblyAI::Transcripts::SubstitutionPolicy]
|
74
|
+
# @param speaker_labels [Boolean] Enable [Speaker
|
75
|
+
# diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be
|
76
|
+
# true or false
|
77
|
+
# @param speakers_expected [Integer] Tells the speaker label model how many speakers it should attempt to identify,
|
78
|
+
# up to 10. See [Speaker
|
79
|
+
# diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for
|
80
|
+
# more details.
|
81
|
+
# @param content_safety [Boolean] Enable [Content
|
82
|
+
# Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be
|
83
|
+
# true or false
|
84
|
+
# @param content_safety_confidence [Integer] The confidence threshold for the Content Moderation model. Values must be
|
85
|
+
# between 25 and 100.
|
86
|
+
# @param iab_categories [Boolean] Enable [Topic
|
87
|
+
# Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true
|
88
|
+
# or false
|
89
|
+
# @param custom_spelling [Array<Hash>] Customize how words are spelled and formatted using to and from valuesRequest of type Array<AssemblyAI::Transcripts::TranscriptCustomSpelling>, as a Hash
|
55
90
|
# * :from (Array<String>)
|
56
91
|
# * :to (String)
|
57
|
-
# @param
|
58
|
-
#
|
59
|
-
#
|
60
|
-
# @param
|
92
|
+
# @param sentiment_analysis [Boolean] Enable [Sentiment
|
93
|
+
# Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), can be
|
94
|
+
# true or false
|
95
|
+
# @param auto_chapters [Boolean] Enable [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters),
|
96
|
+
# can be true or false
|
97
|
+
# @param entity_detection [Boolean] Enable [Entity
|
98
|
+
# Detection](https://www.assemblyai.com/docs/models/entity-detection), can be true
|
99
|
+
# or false
|
61
100
|
# @param speech_threshold [Float] Reject audio files that contain less than this fraction of speech.
|
62
|
-
#
|
63
|
-
# @param summarization [Boolean] Enable [Summarization](https://www.assemblyai.com/docs/models/summarization),
|
64
|
-
#
|
65
|
-
# @param
|
66
|
-
# @param
|
67
|
-
# @param
|
68
|
-
# @param
|
101
|
+
# Valid values are in the range [0, 1] inclusive.
|
102
|
+
# @param summarization [Boolean] Enable [Summarization](https://www.assemblyai.com/docs/models/summarization),
|
103
|
+
# can be true or false
|
104
|
+
# @param summary_model [AssemblyAI::Transcripts::SummaryModel] The model to summarize the transcript
|
105
|
+
# @param summary_type [AssemblyAI::Transcripts::SummaryType] The type of summary
|
106
|
+
# @param custom_topics [Boolean] Enable custom topics, either true or false
|
107
|
+
# @param topics [Array<String>] The list of custom topics
|
69
108
|
# @param audio_url [String] The URL of the audio or video file to transcribe.
|
70
|
-
# @param request_options [RequestOptions]
|
109
|
+
# @param request_options [AssemblyAI::RequestOptions]
|
71
110
|
# @param polling_options [Transcripts::PollingOptions] Configuration options for polling requests.
|
72
111
|
# @return [Transcripts::Transcript]
|
73
|
-
def transcribe(audio_url:,
|
74
|
-
webhook_url: nil, webhook_auth_header_name: nil, webhook_auth_header_value: nil, auto_highlights: nil, audio_start_from: nil, audio_end_at: nil, word_boost: nil, boost_param: nil, filter_profanity: nil, redact_pii: nil, redact_pii_audio: nil, redact_pii_audio_quality: nil, redact_pii_policies: nil, redact_pii_sub: nil, speaker_labels: nil, speakers_expected: nil, content_safety: nil, content_safety_confidence: nil, iab_categories: nil,
|
112
|
+
def transcribe(audio_url:, language_code: nil, language_detection: nil, language_confidence_threshold: nil, speech_model: nil,
|
113
|
+
punctuate: nil, format_text: nil, disfluencies: nil, multichannel: nil, dual_channel: nil, webhook_url: nil, webhook_auth_header_name: nil, webhook_auth_header_value: nil, auto_highlights: nil, audio_start_from: nil, audio_end_at: nil, word_boost: nil, boost_param: nil, filter_profanity: nil, redact_pii: nil, redact_pii_audio: nil, redact_pii_audio_quality: nil, redact_pii_policies: nil, redact_pii_sub: nil, speaker_labels: nil, speakers_expected: nil, content_safety: nil, content_safety_confidence: nil, iab_categories: nil, custom_spelling: nil, sentiment_analysis: nil, auto_chapters: nil, entity_detection: nil, speech_threshold: nil, summarization: nil, summary_model: nil, summary_type: nil, custom_topics: nil, topics: nil, request_options: nil, polling_options: Transcripts::PollingOptions.new)
|
75
114
|
transcript = submit(audio_url: audio_url, speech_model: speech_model, language_code: language_code, punctuate: punctuate, format_text: format_text, dual_channel: dual_channel,
|
76
|
-
webhook_url: webhook_url, webhook_auth_header_name: webhook_auth_header_name, webhook_auth_header_value: webhook_auth_header_value, auto_highlights: auto_highlights, audio_start_from: audio_start_from, audio_end_at: audio_end_at, word_boost: word_boost, boost_param: boost_param, filter_profanity: filter_profanity, redact_pii: redact_pii, redact_pii_audio: redact_pii_audio, redact_pii_audio_quality: redact_pii_audio_quality, redact_pii_policies: redact_pii_policies, redact_pii_sub: redact_pii_sub, speaker_labels: speaker_labels, speakers_expected: speakers_expected, content_safety: content_safety, content_safety_confidence: content_safety_confidence, iab_categories: iab_categories, language_detection: language_detection, custom_spelling: custom_spelling, disfluencies: disfluencies, sentiment_analysis: sentiment_analysis, auto_chapters: auto_chapters, entity_detection: entity_detection, speech_threshold: speech_threshold, summarization: summarization, summary_model: summary_model, summary_type: summary_type, custom_topics: custom_topics, topics: topics,
|
115
|
+
webhook_url: webhook_url, webhook_auth_header_name: webhook_auth_header_name, webhook_auth_header_value: webhook_auth_header_value, auto_highlights: auto_highlights, audio_start_from: audio_start_from, audio_end_at: audio_end_at, word_boost: word_boost, boost_param: boost_param, filter_profanity: filter_profanity, redact_pii: redact_pii, redact_pii_audio: redact_pii_audio, redact_pii_audio_quality: redact_pii_audio_quality, redact_pii_policies: redact_pii_policies, redact_pii_sub: redact_pii_sub, speaker_labels: speaker_labels, speakers_expected: speakers_expected, content_safety: content_safety, content_safety_confidence: content_safety_confidence, iab_categories: iab_categories, language_detection: language_detection, language_confidence_threshold: language_confidence_threshold, custom_spelling: custom_spelling, disfluencies: disfluencies, multichannel: multichannel, sentiment_analysis: sentiment_analysis, auto_chapters: auto_chapters, entity_detection: entity_detection, speech_threshold: speech_threshold, summarization: summarization, summary_model: summary_model, summary_type: summary_type, custom_topics: custom_topics, topics: topics, request_options: request_options)
|
77
116
|
wait_until_ready(transcript_id: transcript.id, polling_options: polling_options)
|
78
117
|
end
|
79
118
|
|
@@ -103,55 +142,94 @@ module AssemblyAI
|
|
103
142
|
# Create a transcript from an audio or video file that is accessible via a URL.
|
104
143
|
# .transcribe polls for completion of the transcription, while the .submit function does not.
|
105
144
|
#
|
106
|
-
# @param
|
107
|
-
# @param
|
145
|
+
# @param language_code [AssemblyAI::Transcripts::TranscriptLanguageCode]
|
146
|
+
# @param language_detection [Boolean] Enable [Automatic language
|
147
|
+
# www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection),
|
148
|
+
# either true or false.
|
149
|
+
# @param language_confidence_threshold [Float] The confidence threshold for the automatically detected language.
|
150
|
+
# An error will be returned if the language confidence is below this threshold.
|
151
|
+
# Defaults to 0.
|
152
|
+
# @param speech_model [AssemblyAI::Transcripts::SpeechModel]
|
108
153
|
# @param punctuate [Boolean] Enable Automatic Punctuation, can be true or false
|
109
154
|
# @param format_text [Boolean] Enable Text Formatting, can be true or false
|
110
|
-
# @param
|
111
|
-
# @param
|
112
|
-
#
|
113
|
-
#
|
114
|
-
# @param
|
155
|
+
# @param disfluencies [Boolean] Transcribe Filler Words, like "umm", in your media file; can be true or false
|
156
|
+
# @param multichannel [Boolean] Enable
|
157
|
+
# ://www.assemblyai.com/docs/models/speech-recognition#multichannel-transcription)
|
158
|
+
# transcription, can be true or false.
|
159
|
+
# @param dual_channel [Boolean] Enable [Dual
|
160
|
+
# ://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription)
|
161
|
+
# transcription, can be true or false.
|
162
|
+
# @param webhook_url [String] The URL to which we send webhook requests.
|
163
|
+
# We sends two different types of webhook requests.
|
164
|
+
# One request when a transcript is completed or failed, and one request when the
|
165
|
+
# redacted audio is ready if redact_pii_audio is enabled.
|
166
|
+
# @param webhook_auth_header_name [String] The header name to be sent with the transcript completed or failed webhook
|
167
|
+
# requests
|
168
|
+
# @param webhook_auth_header_value [String] The header value to send back with the transcript completed or failed webhook
|
169
|
+
# requests for added security
|
170
|
+
# @param auto_highlights [Boolean] Enable Key Phrases, either true or false
|
115
171
|
# @param audio_start_from [Integer] The point in time, in milliseconds, to begin transcribing in your media file
|
116
172
|
# @param audio_end_at [Integer] The point in time, in milliseconds, to stop transcribing in your media file
|
117
173
|
# @param word_boost [Array<String>] The list of custom vocabulary to boost transcription probability for
|
118
|
-
# @param boost_param [Transcripts::TranscriptBoostParam]
|
174
|
+
# @param boost_param [AssemblyAI::Transcripts::TranscriptBoostParam] How much to boost specified words
|
119
175
|
# @param filter_profanity [Boolean] Filter profanity from the transcribed text, can be true or false
|
120
|
-
# @param redact_pii [Boolean] Redact PII from the transcribed text using the Redact PII model, can be true or
|
121
|
-
#
|
122
|
-
# @param
|
123
|
-
#
|
124
|
-
#
|
125
|
-
#
|
126
|
-
# @param
|
127
|
-
#
|
128
|
-
#
|
129
|
-
#
|
130
|
-
# @param
|
131
|
-
#
|
176
|
+
# @param redact_pii [Boolean] Redact PII from the transcribed text using the Redact PII model, can be true or
|
177
|
+
# false
|
178
|
+
# @param redact_pii_audio [Boolean] Generate a copy of the original media file with spoken PII "beeped" out, can be
|
179
|
+
# true or false. See [PII
|
180
|
+
# redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more
|
181
|
+
# details.
|
182
|
+
# @param redact_pii_audio_quality [AssemblyAI::Transcripts::RedactPiiAudioQuality] Controls the filetype of the audio created by redact_pii_audio. Currently
|
183
|
+
# supports mp3 (default) and wav. See [PII
|
184
|
+
# redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more
|
185
|
+
# details.
|
186
|
+
# @param redact_pii_policies [Array<AssemblyAI::Transcripts::PiiPolicy>] The list of PII Redaction policies to enable. See [PII
|
187
|
+
# redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more
|
188
|
+
# details.
|
189
|
+
# @param redact_pii_sub [AssemblyAI::Transcripts::SubstitutionPolicy]
|
190
|
+
# @param speaker_labels [Boolean] Enable [Speaker
|
191
|
+
# diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be
|
192
|
+
# true or false
|
193
|
+
# @param speakers_expected [Integer] Tells the speaker label model how many speakers it should attempt to identify,
|
194
|
+
# up to 10. See [Speaker
|
195
|
+
# diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for
|
196
|
+
# more details.
|
197
|
+
# @param content_safety [Boolean] Enable [Content
|
198
|
+
# Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be
|
199
|
+
# true or false
|
200
|
+
# @param content_safety_confidence [Integer] The confidence threshold for the Content Moderation model. Values must be
|
201
|
+
# between 25 and 100.
|
202
|
+
# @param iab_categories [Boolean] Enable [Topic
|
203
|
+
# Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true
|
204
|
+
# or false
|
205
|
+
# @param custom_spelling [Array<Hash>] Customize how words are spelled and formatted using to and from valuesRequest of type Array<AssemblyAI::Transcripts::TranscriptCustomSpelling>, as a Hash
|
132
206
|
# * :from (Array<String>)
|
133
207
|
# * :to (String)
|
134
|
-
# @param
|
135
|
-
#
|
136
|
-
#
|
137
|
-
# @param
|
208
|
+
# @param sentiment_analysis [Boolean] Enable [Sentiment
|
209
|
+
# Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), can be
|
210
|
+
# true or false
|
211
|
+
# @param auto_chapters [Boolean] Enable [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters),
|
212
|
+
# can be true or false
|
213
|
+
# @param entity_detection [Boolean] Enable [Entity
|
214
|
+
# Detection](https://www.assemblyai.com/docs/models/entity-detection), can be true
|
215
|
+
# or false
|
138
216
|
# @param speech_threshold [Float] Reject audio files that contain less than this fraction of speech.
|
139
|
-
#
|
140
|
-
# @param summarization [Boolean] Enable [Summarization](https://www.assemblyai.com/docs/models/summarization),
|
141
|
-
#
|
142
|
-
# @param
|
143
|
-
# @param
|
144
|
-
# @param
|
145
|
-
# @param
|
217
|
+
# Valid values are in the range [0, 1] inclusive.
|
218
|
+
# @param summarization [Boolean] Enable [Summarization](https://www.assemblyai.com/docs/models/summarization),
|
219
|
+
# can be true or false
|
220
|
+
# @param summary_model [AssemblyAI::Transcripts::SummaryModel] The model to summarize the transcript
|
221
|
+
# @param summary_type [AssemblyAI::Transcripts::SummaryType] The type of summary
|
222
|
+
# @param custom_topics [Boolean] Enable custom topics, either true or false
|
223
|
+
# @param topics [Array<String>] The list of custom topics
|
146
224
|
# @param audio_url [String] The URL of the audio or video file to transcribe.
|
147
|
-
# @param request_options [RequestOptions]
|
225
|
+
# @param request_options [AssemblyAI::RequestOptions]
|
148
226
|
# @param polling_options [Transcripts::PollingOptions] Configuration options for polling requests.
|
149
227
|
# @return [Transcripts::Transcript]
|
150
|
-
def transcribe(audio_url:,
|
151
|
-
webhook_url: nil, webhook_auth_header_name: nil, webhook_auth_header_value: nil, auto_highlights: nil, audio_start_from: nil, audio_end_at: nil, word_boost: nil, boost_param: nil, filter_profanity: nil, redact_pii: nil, redact_pii_audio: nil, redact_pii_audio_quality: nil, redact_pii_policies: nil, redact_pii_sub: nil, speaker_labels: nil, speakers_expected: nil, content_safety: nil, content_safety_confidence: nil, iab_categories: nil,
|
228
|
+
def transcribe(audio_url:, language_code: nil, language_detection: nil, language_confidence_threshold: nil, speech_model: nil,
|
229
|
+
punctuate: nil, format_text: nil, disfluencies: nil, multichannel: nil, dual_channel: nil, webhook_url: nil, webhook_auth_header_name: nil, webhook_auth_header_value: nil, auto_highlights: nil, audio_start_from: nil, audio_end_at: nil, word_boost: nil, boost_param: nil, filter_profanity: nil, redact_pii: nil, redact_pii_audio: nil, redact_pii_audio_quality: nil, redact_pii_policies: nil, redact_pii_sub: nil, speaker_labels: nil, speakers_expected: nil, content_safety: nil, content_safety_confidence: nil, iab_categories: nil, custom_spelling: nil, sentiment_analysis: nil, auto_chapters: nil, entity_detection: nil, speech_threshold: nil, summarization: nil, summary_model: nil, summary_type: nil, custom_topics: nil, topics: nil, request_options: nil, polling_options: Transcripts::PollingOptions.new)
|
152
230
|
Async do
|
153
231
|
transcript = submit(audio_url: audio_url, speech_model: speech_model, language_code: language_code, punctuate: punctuate, format_text: format_text, dual_channel: dual_channel,
|
154
|
-
webhook_url: webhook_url, webhook_auth_header_name: webhook_auth_header_name, webhook_auth_header_value: webhook_auth_header_value, auto_highlights: auto_highlights, audio_start_from: audio_start_from, audio_end_at: audio_end_at, word_boost: word_boost, boost_param: boost_param, filter_profanity: filter_profanity, redact_pii: redact_pii, redact_pii_audio: redact_pii_audio, redact_pii_audio_quality: redact_pii_audio_quality, redact_pii_policies: redact_pii_policies, redact_pii_sub: redact_pii_sub, speaker_labels: speaker_labels, speakers_expected: speakers_expected, content_safety: content_safety, content_safety_confidence: content_safety_confidence, iab_categories: iab_categories, language_detection: language_detection, custom_spelling: custom_spelling, disfluencies: disfluencies, sentiment_analysis: sentiment_analysis, auto_chapters: auto_chapters, entity_detection: entity_detection, speech_threshold: speech_threshold, summarization: summarization, summary_model: summary_model, summary_type: summary_type, custom_topics: custom_topics, topics: topics,
|
232
|
+
webhook_url: webhook_url, webhook_auth_header_name: webhook_auth_header_name, webhook_auth_header_value: webhook_auth_header_value, auto_highlights: auto_highlights, audio_start_from: audio_start_from, audio_end_at: audio_end_at, word_boost: word_boost, boost_param: boost_param, filter_profanity: filter_profanity, redact_pii: redact_pii, redact_pii_audio: redact_pii_audio, redact_pii_audio_quality: redact_pii_audio_quality, redact_pii_policies: redact_pii_policies, redact_pii_sub: redact_pii_sub, speaker_labels: speaker_labels, speakers_expected: speakers_expected, content_safety: content_safety, content_safety_confidence: content_safety_confidence, iab_categories: iab_categories, language_detection: language_detection, language_confidence_threshold: language_confidence_threshold, custom_spelling: custom_spelling, disfluencies: disfluencies, multichannel: multichannel, sentiment_analysis: sentiment_analysis, auto_chapters: auto_chapters, entity_detection: entity_detection, speech_threshold: speech_threshold, summarization: summarization, summary_model: summary_model, summary_type: summary_type, custom_topics: custom_topics, topics: topics, request_options: request_options)
|
155
233
|
wait_until_ready(transcript_id: transcript.id, polling_options: polling_options).wait
|
156
234
|
end
|
157
235
|
end
|
@@ -15,7 +15,7 @@ module AssemblyAI
|
|
15
15
|
# @return [AssemblyAI::Transcripts::AudioIntelligenceModelStatus] The status of the Content Moderation model. Either success, or unavailable in
|
16
16
|
# the rare case that the model failed.
|
17
17
|
attr_reader :status
|
18
|
-
# @return [Array<AssemblyAI::Transcripts::ContentSafetyLabelResult>]
|
18
|
+
# @return [Array<AssemblyAI::Transcripts::ContentSafetyLabelResult>] An array of results for the Content Moderation model
|
19
19
|
attr_reader :results
|
20
20
|
# @return [Hash{String => Float}] A summary of the Content Moderation confidence results for the entire audio file
|
21
21
|
attr_reader :summary
|
@@ -31,7 +31,7 @@ module AssemblyAI
|
|
31
31
|
|
32
32
|
# @param status [AssemblyAI::Transcripts::AudioIntelligenceModelStatus] The status of the Content Moderation model. Either success, or unavailable in
|
33
33
|
# the rare case that the model failed.
|
34
|
-
# @param results [Array<AssemblyAI::Transcripts::ContentSafetyLabelResult>]
|
34
|
+
# @param results [Array<AssemblyAI::Transcripts::ContentSafetyLabelResult>] An array of results for the Content Moderation model
|
35
35
|
# @param summary [Hash{String => Float}] A summary of the Content Moderation confidence results for the entire audio file
|
36
36
|
# @param severity_score_summary [Hash{String => AssemblyAI::Transcripts::SeverityScoreSummary}] A summary of the Content Moderation severity results for the entire audio file
|
37
37
|
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
@@ -7,13 +7,13 @@ require "json"
|
|
7
7
|
module AssemblyAI
|
8
8
|
class Transcripts
|
9
9
|
class ParagraphsResponse
|
10
|
-
# @return [String]
|
10
|
+
# @return [String] The unique identifier of your transcript
|
11
11
|
attr_reader :id
|
12
|
-
# @return [Float]
|
12
|
+
# @return [Float] The confidence score for the transcript
|
13
13
|
attr_reader :confidence
|
14
|
-
# @return [Float]
|
14
|
+
# @return [Float] The duration of the audio file in seconds
|
15
15
|
attr_reader :audio_duration
|
16
|
-
# @return [Array<AssemblyAI::Transcripts::TranscriptParagraph>]
|
16
|
+
# @return [Array<AssemblyAI::Transcripts::TranscriptParagraph>] An array of paragraphs in the transcript
|
17
17
|
attr_reader :paragraphs
|
18
18
|
# @return [OpenStruct] Additional properties unmapped to the current class definition
|
19
19
|
attr_reader :additional_properties
|
@@ -23,10 +23,10 @@ module AssemblyAI
|
|
23
23
|
|
24
24
|
OMIT = Object.new
|
25
25
|
|
26
|
-
# @param id [String]
|
27
|
-
# @param confidence [Float]
|
28
|
-
# @param audio_duration [Float]
|
29
|
-
# @param paragraphs [Array<AssemblyAI::Transcripts::TranscriptParagraph>]
|
26
|
+
# @param id [String] The unique identifier of your transcript
|
27
|
+
# @param confidence [Float] The confidence score for the transcript
|
28
|
+
# @param audio_duration [Float] The duration of the audio file in seconds
|
29
|
+
# @param paragraphs [Array<AssemblyAI::Transcripts::TranscriptParagraph>] An array of paragraphs in the transcript
|
30
30
|
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
31
31
|
# @return [AssemblyAI::Transcripts::ParagraphsResponse]
|
32
32
|
def initialize(id:, confidence:, audio_duration:, paragraphs:, additional_properties: nil)
|
@@ -7,13 +7,13 @@ require "json"
|
|
7
7
|
module AssemblyAI
|
8
8
|
class Transcripts
|
9
9
|
class SentencesResponse
|
10
|
-
# @return [String]
|
10
|
+
# @return [String] The unique identifier for the transcript
|
11
11
|
attr_reader :id
|
12
|
-
# @return [Float]
|
12
|
+
# @return [Float] The confidence score for the transcript
|
13
13
|
attr_reader :confidence
|
14
|
-
# @return [Float]
|
14
|
+
# @return [Float] The duration of the audio file in seconds
|
15
15
|
attr_reader :audio_duration
|
16
|
-
# @return [Array<AssemblyAI::Transcripts::TranscriptSentence>]
|
16
|
+
# @return [Array<AssemblyAI::Transcripts::TranscriptSentence>] An array of sentences in the transcript
|
17
17
|
attr_reader :sentences
|
18
18
|
# @return [OpenStruct] Additional properties unmapped to the current class definition
|
19
19
|
attr_reader :additional_properties
|
@@ -23,10 +23,10 @@ module AssemblyAI
|
|
23
23
|
|
24
24
|
OMIT = Object.new
|
25
25
|
|
26
|
-
# @param id [String]
|
27
|
-
# @param confidence [Float]
|
28
|
-
# @param audio_duration [Float]
|
29
|
-
# @param sentences [Array<AssemblyAI::Transcripts::TranscriptSentence>]
|
26
|
+
# @param id [String] The unique identifier for the transcript
|
27
|
+
# @param confidence [Float] The confidence score for the transcript
|
28
|
+
# @param audio_duration [Float] The duration of the audio file in seconds
|
29
|
+
# @param sentences [Array<AssemblyAI::Transcripts::TranscriptSentence>] An array of sentences in the transcript
|
30
30
|
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
31
31
|
# @return [AssemblyAI::Transcripts::SentencesResponse]
|
32
32
|
def initialize(id:, confidence:, audio_duration:, sentences:, additional_properties: nil)
|
@@ -18,6 +18,9 @@ module AssemblyAI
|
|
18
18
|
attr_reader :sentiment
|
19
19
|
# @return [Float] The confidence score for the detected sentiment of the sentence, from 0 to 1
|
20
20
|
attr_reader :confidence
|
21
|
+
# @return [String] The channel of this utterance. The left and right channels are channels 1 and 2.
|
22
|
+
# Additional channels increment the channel number sequentially.
|
23
|
+
attr_reader :channel
|
21
24
|
# @return [String] The speaker of the sentence if [Speaker
|
22
25
|
# Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is
|
23
26
|
# enabled, else null
|
@@ -35,17 +38,21 @@ module AssemblyAI
|
|
35
38
|
# @param end_ [Integer] The ending time, in milliseconds, of the sentence
|
36
39
|
# @param sentiment [AssemblyAI::Transcripts::Sentiment] The detected sentiment for the sentence, one of POSITIVE, NEUTRAL, NEGATIVE
|
37
40
|
# @param confidence [Float] The confidence score for the detected sentiment of the sentence, from 0 to 1
|
41
|
+
# @param channel [String] The channel of this utterance. The left and right channels are channels 1 and 2.
|
42
|
+
# Additional channels increment the channel number sequentially.
|
38
43
|
# @param speaker [String] The speaker of the sentence if [Speaker
|
39
44
|
# Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is
|
40
45
|
# enabled, else null
|
41
46
|
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
42
47
|
# @return [AssemblyAI::Transcripts::SentimentAnalysisResult]
|
43
|
-
def initialize(text:, start:, end_:, sentiment:, confidence:,
|
48
|
+
def initialize(text:, start:, end_:, sentiment:, confidence:, channel: OMIT, speaker: OMIT,
|
49
|
+
additional_properties: nil)
|
44
50
|
@text = text
|
45
51
|
@start = start
|
46
52
|
@end_ = end_
|
47
53
|
@sentiment = sentiment
|
48
54
|
@confidence = confidence
|
55
|
+
@channel = channel if channel != OMIT
|
49
56
|
@speaker = speaker if speaker != OMIT
|
50
57
|
@additional_properties = additional_properties
|
51
58
|
@_field_set = {
|
@@ -54,6 +61,7 @@ module AssemblyAI
|
|
54
61
|
"end": end_,
|
55
62
|
"sentiment": sentiment,
|
56
63
|
"confidence": confidence,
|
64
|
+
"channel": channel,
|
57
65
|
"speaker": speaker
|
58
66
|
}.reject do |_k, v|
|
59
67
|
v == OMIT
|
@@ -71,6 +79,7 @@ module AssemblyAI
|
|
71
79
|
end_ = struct["end"]
|
72
80
|
sentiment = struct["sentiment"]
|
73
81
|
confidence = struct["confidence"]
|
82
|
+
channel = struct["channel"]
|
74
83
|
speaker = struct["speaker"]
|
75
84
|
new(
|
76
85
|
text: text,
|
@@ -78,6 +87,7 @@ module AssemblyAI
|
|
78
87
|
end_: end_,
|
79
88
|
sentiment: sentiment,
|
80
89
|
confidence: confidence,
|
90
|
+
channel: channel,
|
81
91
|
speaker: speaker,
|
82
92
|
additional_properties: struct
|
83
93
|
)
|
@@ -102,6 +112,7 @@ module AssemblyAI
|
|
102
112
|
obj.end_.is_a?(Integer) != false || raise("Passed value for field obj.end_ is not the expected type, validation failed.")
|
103
113
|
obj.sentiment.is_a?(AssemblyAI::Transcripts::Sentiment) != false || raise("Passed value for field obj.sentiment is not the expected type, validation failed.")
|
104
114
|
obj.confidence.is_a?(Float) != false || raise("Passed value for field obj.confidence is not the expected type, validation failed.")
|
115
|
+
obj.channel&.is_a?(String) != false || raise("Passed value for field obj.channel is not the expected type, validation failed.")
|
105
116
|
obj.speaker&.is_a?(String) != false || raise("Passed value for field obj.speaker is not the expected type, validation failed.")
|
106
117
|
end
|
107
118
|
end
|
@@ -11,7 +11,7 @@ module AssemblyAI
|
|
11
11
|
class TopicDetectionResult
|
12
12
|
# @return [String] The text in the transcript in which a detected topic occurs
|
13
13
|
attr_reader :text
|
14
|
-
# @return [Array<AssemblyAI::Transcripts::TopicDetectionResultLabelsItem>]
|
14
|
+
# @return [Array<AssemblyAI::Transcripts::TopicDetectionResultLabelsItem>] An array of detected topics in the text
|
15
15
|
attr_reader :labels
|
16
16
|
# @return [AssemblyAI::Transcripts::Timestamp]
|
17
17
|
attr_reader :timestamp
|
@@ -24,7 +24,7 @@ module AssemblyAI
|
|
24
24
|
OMIT = Object.new
|
25
25
|
|
26
26
|
# @param text [String] The text in the transcript in which a detected topic occurs
|
27
|
-
# @param labels [Array<AssemblyAI::Transcripts::TopicDetectionResultLabelsItem>]
|
27
|
+
# @param labels [Array<AssemblyAI::Transcripts::TopicDetectionResultLabelsItem>] An array of detected topics in the text
|
28
28
|
# @param timestamp [AssemblyAI::Transcripts::Timestamp]
|
29
29
|
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
30
30
|
# @return [AssemblyAI::Transcripts::TopicDetectionResult]
|