assemblyai 1.0.0.pre.beta.6 → 1.0.0.pre.beta.8

Sign up to get free protection for your applications and to get access to all the features.
Files changed (79) hide show
  1. checksums.yaml +4 -4
  2. data/lib/assemblyai/files/client.rb +4 -2
  3. data/lib/assemblyai/files/types/uploaded_file.rb +20 -11
  4. data/lib/assemblyai/lemur/client.rb +256 -150
  5. data/lib/assemblyai/lemur/types/lemur_action_items_response.rb +28 -14
  6. data/lib/assemblyai/lemur/types/lemur_base_params.rb +83 -54
  7. data/lib/assemblyai/lemur/types/lemur_base_params_context.rb +11 -5
  8. data/lib/assemblyai/lemur/types/lemur_base_response.rb +20 -11
  9. data/lib/assemblyai/lemur/types/lemur_question.rb +57 -31
  10. data/lib/assemblyai/lemur/types/lemur_question_answer.rb +28 -14
  11. data/lib/assemblyai/lemur/types/lemur_question_answer_response.rb +29 -14
  12. data/lib/assemblyai/lemur/types/lemur_question_context.rb +13 -6
  13. data/lib/assemblyai/lemur/types/lemur_summary_response.rb +28 -14
  14. data/lib/assemblyai/lemur/types/lemur_task_response.rb +28 -14
  15. data/lib/assemblyai/lemur/types/purge_lemur_request_data_response.rb +32 -17
  16. data/lib/assemblyai/realtime/client.rb +32 -16
  17. data/lib/assemblyai/realtime/types/configure_end_utterance_silence_threshold.rb +24 -13
  18. data/lib/assemblyai/realtime/types/final_transcript.rb +72 -41
  19. data/lib/assemblyai/realtime/types/force_end_utterance.rb +20 -11
  20. data/lib/assemblyai/realtime/types/message_type.rb +1 -0
  21. data/lib/assemblyai/realtime/types/partial_transcript.rb +62 -34
  22. data/lib/assemblyai/realtime/types/realtime_base_message.rb +22 -13
  23. data/lib/assemblyai/realtime/types/realtime_base_transcript.rb +57 -31
  24. data/lib/assemblyai/realtime/types/realtime_error.rb +20 -11
  25. data/lib/assemblyai/realtime/types/realtime_message.rb +44 -18
  26. data/lib/assemblyai/realtime/types/realtime_temporary_token_response.rb +21 -12
  27. data/lib/assemblyai/realtime/types/realtime_transcript.rb +15 -9
  28. data/lib/assemblyai/realtime/types/session_begins.rb +31 -14
  29. data/lib/assemblyai/realtime/types/session_information.rb +69 -0
  30. data/lib/assemblyai/realtime/types/session_terminated.rb +20 -11
  31. data/lib/assemblyai/realtime/types/terminate_session.rb +21 -12
  32. data/lib/assemblyai/realtime/types/word.rb +36 -18
  33. data/lib/assemblyai/streaming/types/receive_message.rb +113 -0
  34. data/lib/assemblyai/streaming/types/send_message.rb +86 -0
  35. data/lib/assemblyai/streaming/types/streaming.rb +11 -0
  36. data/lib/assemblyai/transcripts/client.rb +54 -20
  37. data/lib/assemblyai/transcripts/list_by_url_client.rb +6 -4
  38. data/lib/assemblyai/transcripts/polling_client.rb +12 -2
  39. data/lib/assemblyai/transcripts/types/auto_highlight_result.rb +40 -19
  40. data/lib/assemblyai/transcripts/types/auto_highlights_result.rb +38 -14
  41. data/lib/assemblyai/transcripts/types/chapter.rb +40 -20
  42. data/lib/assemblyai/transcripts/types/content_safety_label.rb +32 -16
  43. data/lib/assemblyai/transcripts/types/content_safety_label_result.rb +51 -29
  44. data/lib/assemblyai/transcripts/types/content_safety_labels_result.rb +51 -29
  45. data/lib/assemblyai/transcripts/types/entity.rb +41 -21
  46. data/lib/assemblyai/transcripts/types/page_details.rb +60 -33
  47. data/lib/assemblyai/transcripts/types/paragraphs_response.rb +37 -19
  48. data/lib/assemblyai/transcripts/types/redact_pii_audio_quality.rb +4 -1
  49. data/lib/assemblyai/transcripts/types/redacted_audio_response.rb +29 -15
  50. data/lib/assemblyai/transcripts/types/sentences_response.rb +37 -19
  51. data/lib/assemblyai/transcripts/types/sentiment_analysis_result.rb +62 -35
  52. data/lib/assemblyai/transcripts/types/severity_score_summary.rb +32 -16
  53. data/lib/assemblyai/transcripts/types/speech_model.rb +2 -0
  54. data/lib/assemblyai/transcripts/types/substitution_policy.rb +4 -2
  55. data/lib/assemblyai/transcripts/types/timestamp.rb +28 -14
  56. data/lib/assemblyai/transcripts/types/topic_detection_model_result.rb +39 -19
  57. data/lib/assemblyai/transcripts/types/topic_detection_result.rb +40 -21
  58. data/lib/assemblyai/transcripts/types/topic_detection_result_labels_item.rb +31 -15
  59. data/lib/assemblyai/transcripts/types/transcript.rb +512 -293
  60. data/lib/assemblyai/transcripts/types/transcript_custom_spelling.rb +30 -15
  61. data/lib/assemblyai/transcripts/types/transcript_language_code.rb +3 -2
  62. data/lib/assemblyai/transcripts/types/transcript_list.rb +33 -16
  63. data/lib/assemblyai/transcripts/types/transcript_list_item.rb +63 -30
  64. data/lib/assemblyai/transcripts/types/transcript_optional_params.rb +334 -191
  65. data/lib/assemblyai/transcripts/types/transcript_paragraph.rb +61 -33
  66. data/lib/assemblyai/transcripts/types/transcript_ready_notification.rb +30 -16
  67. data/lib/assemblyai/transcripts/types/transcript_sentence.rb +61 -33
  68. data/lib/assemblyai/transcripts/types/transcript_status.rb +2 -1
  69. data/lib/assemblyai/transcripts/types/transcript_utterance.rb +55 -31
  70. data/lib/assemblyai/transcripts/types/transcript_word.rb +55 -24
  71. data/lib/assemblyai/transcripts/types/word_search_match.rb +40 -20
  72. data/lib/assemblyai/transcripts/types/word_search_response.rb +36 -17
  73. data/lib/assemblyai/types/error.rb +32 -16
  74. data/lib/requests.rb +80 -34
  75. data/lib/types_export.rb +9 -8
  76. metadata +6 -5
  77. data/lib/assemblyai/realtime/types/audio_data.rb +0 -7
  78. data/lib/assemblyai/realtime/types/receive_message.rb +0 -87
  79. data/lib/assemblyai/realtime/types/send_message.rb +0 -74
@@ -1,10 +1,10 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require_relative "speech_model"
4
3
  require_relative "transcript_status"
5
4
  require_relative "transcript_language_code"
6
5
  require_relative "transcript_word"
7
6
  require_relative "transcript_utterance"
7
+ require_relative "speech_model"
8
8
  require_relative "auto_highlights_result"
9
9
  require_relative "redact_pii_audio_quality"
10
10
  require_relative "pii_policy"
@@ -15,386 +15,604 @@ require_relative "transcript_custom_spelling"
15
15
  require_relative "chapter"
16
16
  require_relative "sentiment_analysis_result"
17
17
  require_relative "entity"
18
+ require "ostruct"
18
19
  require "json"
19
20
 
20
21
  module AssemblyAI
21
22
  class Transcripts
22
23
  # A transcript object
23
24
  class Transcript
24
- attr_reader :id, :speech_model, :language_model, :acoustic_model, :status, :language_code, :audio_url, :text,
25
- :words, :utterances, :confidence, :audio_duration, :punctuate, :format_text, :dual_channel, :webhook_url, :webhook_status_code, :webhook_auth, :webhook_auth_header_name, :speed_boost, :auto_highlights, :auto_highlights_result, :audio_start_from, :audio_end_at, :word_boost, :boost_param, :filter_profanity, :redact_pii, :redact_pii_audio, :redact_pii_audio_quality, :redact_pii_policies, :redact_pii_sub, :speaker_labels, :speakers_expected, :content_safety, :content_safety_labels, :iab_categories, :iab_categories_result, :language_detection, :custom_spelling, :auto_chapters, :chapters, :summarization, :summary_type, :summary_model, :summary, :custom_topics, :topics, :disfluencies, :sentiment_analysis, :sentiment_analysis_results, :entity_detection, :entities, :speech_threshold, :throttled, :error, :additional_properties
25
+ # @return [String] The unique identifier of your transcript
26
+ attr_reader :id
27
+ # @return [String] The language model that was used for the transcript
28
+ attr_reader :language_model
29
+ # @return [String] The acoustic model that was used for the transcript
30
+ attr_reader :acoustic_model
31
+ # @return [AssemblyAI::Transcripts::TranscriptStatus] The status of your transcript. Possible values are queued, processing,
32
+ # completed, or error.
33
+ attr_reader :status
34
+ # @return [AssemblyAI::Transcripts::TranscriptLanguageCode] The language of your audio file.
35
+ # Possible values are found in [Supported
36
+ # Languages](https://www.assemblyai.com/docs/concepts/supported-languages).
37
+ # The default value is 'en_us'.
38
+ attr_reader :language_code
39
+ # @return [String] The URL of the media that was transcribed
40
+ attr_reader :audio_url
41
+ # @return [String] The textual transcript of your media file
42
+ attr_reader :text
43
+ # @return [Array<AssemblyAI::Transcripts::TranscriptWord>] An array of temporally-sequential word objects, one for each word in the
44
+ # transcript.
45
+ # See [Speech
46
+ # recognition](https://www.assemblyai.com/docs/models/speech-recognition) for more
47
+ # information.
48
+ attr_reader :words
49
+ # @return [Array<AssemblyAI::Transcripts::TranscriptUtterance>] When dual_channel or speaker_labels is enabled, a list of turn-by-turn utterance
50
+ # objects.
51
+ # See [Speaker
52
+ # diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for
53
+ # more information.
54
+ attr_reader :utterances
55
+ # @return [Float] The confidence score for the transcript, between 0.0 (low confidence) and 1.0
56
+ # (high confidence)
57
+ attr_reader :confidence
58
+ # @return [Float] The duration of this transcript object's media file, in seconds
59
+ attr_reader :audio_duration
60
+ # @return [Boolean] Whether Automatic Punctuation is enabled, either true or false
61
+ attr_reader :punctuate
62
+ # @return [Boolean] Whether Text Formatting is enabled, either true or false
63
+ attr_reader :format_text
64
+ # @return [Boolean] Whether [Dual channel
65
+ # ://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription)
66
+ # was enabled in the transcription request, either true or false
67
+ attr_reader :dual_channel
68
+ # @return [AssemblyAI::Transcripts::SpeechModel]
69
+ attr_reader :speech_model
70
+ # @return [String] The URL to which we send webhooks upon trancription completion
71
+ attr_reader :webhook_url
72
+ # @return [Integer] The status code we received from your server when delivering your webhook, if a
73
+ # webhook URL was provided
74
+ attr_reader :webhook_status_code
75
+ # @return [Boolean] Whether webhook authentication details were provided
76
+ attr_reader :webhook_auth
77
+ # @return [String] The header name which should be sent back with webhook calls
78
+ attr_reader :webhook_auth_header_name
79
+ # @return [Boolean] Whether speed boost is enabled
80
+ attr_reader :speed_boost
81
+ # @return [Boolean] Whether Key Phrases is enabled, either true or false
82
+ attr_reader :auto_highlights
83
+ # @return [AssemblyAI::Transcripts::AutoHighlightsResult]
84
+ attr_reader :auto_highlights_result
85
+ # @return [Integer] The point in time, in milliseconds, in the file at which the transcription was
86
+ # started
87
+ attr_reader :audio_start_from
88
+ # @return [Integer] The point in time, in milliseconds, in the file at which the transcription was
89
+ # terminated
90
+ attr_reader :audio_end_at
91
+ # @return [Array<String>] The list of custom vocabulary to boost transcription probability for
92
+ attr_reader :word_boost
93
+ # @return [String] The word boost parameter value
94
+ attr_reader :boost_param
95
+ # @return [Boolean] Whether [Profanity
96
+ # ](https://www.assemblyai.com/docs/models/speech-recognition#profanity-filtering)
97
+ # is enabled, either true or false
98
+ attr_reader :filter_profanity
99
+ # @return [Boolean] Whether [PII Redaction](https://www.assemblyai.com/docs/models/pii-redaction) is
100
+ # enabled, either true or false
101
+ attr_reader :redact_pii
102
+ # @return [Boolean] Whether a redacted version of the audio file was generated,
103
+ # either true or false. See [PII
104
+ # redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more
105
+ # information.
106
+ attr_reader :redact_pii_audio
107
+ # @return [AssemblyAI::Transcripts::RedactPiiAudioQuality]
108
+ attr_reader :redact_pii_audio_quality
109
+ # @return [Array<AssemblyAI::Transcripts::PiiPolicy>] The list of PII Redaction policies that were enabled, if PII Redaction is
110
+ # enabled.
111
+ # See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for
112
+ # more information.
113
+ attr_reader :redact_pii_policies
114
+ # @return [AssemblyAI::Transcripts::SubstitutionPolicy] The replacement logic for detected PII, can be "entity_type" or "hash". See [PII
115
+ # redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more
116
+ # details.
117
+ attr_reader :redact_pii_sub
118
+ # @return [Boolean] Whether [Speaker
119
+ # diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is
120
+ # enabled, can be true or false
121
+ attr_reader :speaker_labels
122
+ # @return [Integer] Tell the speaker label model how many speakers it should attempt to identify, up
123
+ # to 10. See [Speaker
124
+ # diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for
125
+ # more details.
126
+ attr_reader :speakers_expected
127
+ # @return [Boolean] Whether [Content
128
+ # Moderation](https://www.assemblyai.com/docs/models/content-moderation) is
129
+ # enabled, can be true or false
130
+ attr_reader :content_safety
131
+ # @return [AssemblyAI::Transcripts::ContentSafetyLabelsResult]
132
+ attr_reader :content_safety_labels
133
+ # @return [Boolean] Whether [Topic
134
+ # Detection](https://www.assemblyai.com/docs/models/topic-detection) is enabled,
135
+ # can be true or false
136
+ attr_reader :iab_categories
137
+ # @return [AssemblyAI::Transcripts::TopicDetectionModelResult]
138
+ attr_reader :iab_categories_result
139
+ # @return [Boolean] Whether [Automatic language
140
+ # /www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection)
141
+ # is enabled, either true or false
142
+ attr_reader :language_detection
143
+ # @return [Array<AssemblyAI::Transcripts::TranscriptCustomSpelling>] Customize how words are spelled and formatted using to and from values
144
+ attr_reader :custom_spelling
145
+ # @return [Boolean] Whether [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters) is
146
+ # enabled, can be true or false
147
+ attr_reader :auto_chapters
148
+ # @return [Array<AssemblyAI::Transcripts::Chapter>] An array of temporally sequential chapters for the audio file
149
+ attr_reader :chapters
150
+ # @return [Boolean] Whether [Summarization](https://www.assemblyai.com/docs/models/summarization) is
151
+ # enabled, either true or false
152
+ attr_reader :summarization
153
+ # @return [String] The type of summary generated, if
154
+ # [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled
155
+ attr_reader :summary_type
156
+ # @return [String] The Summarization model used to generate the summary,
157
+ # if [Summarization](https://www.assemblyai.com/docs/models/summarization) is
158
+ # enabled
159
+ attr_reader :summary_model
160
+ # @return [String] The generated summary of the media file, if
161
+ # [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled
162
+ attr_reader :summary
163
+ # @return [Boolean] Whether custom topics is enabled, either true or false
164
+ attr_reader :custom_topics
165
+ # @return [Array<String>] The list of custom topics provided if custom topics is enabled
166
+ attr_reader :topics
167
+ # @return [Boolean] Transcribe Filler Words, like "umm", in your media file; can be true or false
168
+ attr_reader :disfluencies
169
+ # @return [Boolean] Whether [Sentiment
170
+ # Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis) is enabled,
171
+ # can be true or false
172
+ attr_reader :sentiment_analysis
173
+ # @return [Array<AssemblyAI::Transcripts::SentimentAnalysisResult>] An array of results for the Sentiment Analysis model, if it is enabled.
174
+ # See [Sentiment
175
+ # analysis](https://www.assemblyai.com/docs/models/sentiment-analysis) for more
176
+ # information.
177
+ attr_reader :sentiment_analysis_results
178
+ # @return [Boolean] Whether [Entity
179
+ # Detection](https://www.assemblyai.com/docs/models/entity-detection) is enabled,
180
+ # can be true or false
181
+ attr_reader :entity_detection
182
+ # @return [Array<AssemblyAI::Transcripts::Entity>] An array of results for the Entity Detection model, if it is enabled.
183
+ # See [Entity detection](https://www.assemblyai.com/docs/models/entity-detection)
184
+ # for more information.
185
+ attr_reader :entities
186
+ # @return [Float] Defaults to null. Reject audio files that contain less than this fraction of
187
+ # speech.
188
+ # Valid values are in the range [0, 1] inclusive.
189
+ attr_reader :speech_threshold
190
+ # @return [Boolean] True while a request is throttled and false when a request is no longer
191
+ # throttled
192
+ attr_reader :throttled
193
+ # @return [String] Error message of why the transcript failed
194
+ attr_reader :error
195
+ # @return [OpenStruct] Additional properties unmapped to the current class definition
196
+ attr_reader :additional_properties
197
+ # @return [Object]
198
+ attr_reader :_field_set
199
+ protected :_field_set
200
+
201
+ OMIT = Object.new
26
202
 
27
203
  # @param id [String] The unique identifier of your transcript
28
- # @param speech_model [Transcripts::SpeechModel]
29
204
  # @param language_model [String] The language model that was used for the transcript
30
205
  # @param acoustic_model [String] The acoustic model that was used for the transcript
31
- # @param status [Transcripts::TranscriptStatus] The status of your transcript. Possible values are queued, processing, completed, or error.
32
- # @param language_code [Transcripts::TranscriptLanguageCode] The language of your audio file.
33
- # Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).
34
- # The default value is 'en_us'.
206
+ # @param status [AssemblyAI::Transcripts::TranscriptStatus] The status of your transcript. Possible values are queued, processing,
207
+ # completed, or error.
208
+ # @param language_code [AssemblyAI::Transcripts::TranscriptLanguageCode] The language of your audio file.
209
+ # Possible values are found in [Supported
210
+ # Languages](https://www.assemblyai.com/docs/concepts/supported-languages).
211
+ # The default value is 'en_us'.
35
212
  # @param audio_url [String] The URL of the media that was transcribed
36
213
  # @param text [String] The textual transcript of your media file
37
- # @param words [Array<Transcripts::TranscriptWord>] An array of temporally-sequential word objects, one for each word in the transcript.
38
- # See [Speech recognition](https://www.assemblyai.com/docs/models/speech-recognition) for more information.
39
- # @param utterances [Array<Transcripts::TranscriptUtterance>] When dual_channel or speaker_labels is enabled, a list of turn-by-turn utterance objects.
40
- # See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more information.
41
- # @param confidence [Float] The confidence score for the transcript, between 0.0 (low confidence) and 1.0 (high confidence)
214
+ # @param words [Array<AssemblyAI::Transcripts::TranscriptWord>] An array of temporally-sequential word objects, one for each word in the
215
+ # transcript.
216
+ # See [Speech
217
+ # recognition](https://www.assemblyai.com/docs/models/speech-recognition) for more
218
+ # information.
219
+ # @param utterances [Array<AssemblyAI::Transcripts::TranscriptUtterance>] When dual_channel or speaker_labels is enabled, a list of turn-by-turn utterance
220
+ # objects.
221
+ # See [Speaker
222
+ # diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for
223
+ # more information.
224
+ # @param confidence [Float] The confidence score for the transcript, between 0.0 (low confidence) and 1.0
225
+ # (high confidence)
42
226
  # @param audio_duration [Float] The duration of this transcript object's media file, in seconds
43
227
  # @param punctuate [Boolean] Whether Automatic Punctuation is enabled, either true or false
44
228
  # @param format_text [Boolean] Whether Text Formatting is enabled, either true or false
45
- # @param dual_channel [Boolean] Whether [Dual channel transcription](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) was enabled in the transcription request, either true or false
229
+ # @param dual_channel [Boolean] Whether [Dual channel
230
+ # ://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription)
231
+ # was enabled in the transcription request, either true or false
232
+ # @param speech_model [AssemblyAI::Transcripts::SpeechModel]
46
233
  # @param webhook_url [String] The URL to which we send webhooks upon trancription completion
47
- # @param webhook_status_code [Integer] The status code we received from your server when delivering your webhook, if a webhook URL was provided
234
+ # @param webhook_status_code [Integer] The status code we received from your server when delivering your webhook, if a
235
+ # webhook URL was provided
48
236
  # @param webhook_auth [Boolean] Whether webhook authentication details were provided
49
237
  # @param webhook_auth_header_name [String] The header name which should be sent back with webhook calls
50
238
  # @param speed_boost [Boolean] Whether speed boost is enabled
51
239
  # @param auto_highlights [Boolean] Whether Key Phrases is enabled, either true or false
52
- # @param auto_highlights_result [Transcripts::AutoHighlightsResult]
53
- # @param audio_start_from [Integer] The point in time, in milliseconds, in the file at which the transcription was started
54
- # @param audio_end_at [Integer] The point in time, in milliseconds, in the file at which the transcription was terminated
240
+ # @param auto_highlights_result [AssemblyAI::Transcripts::AutoHighlightsResult]
241
+ # @param audio_start_from [Integer] The point in time, in milliseconds, in the file at which the transcription was
242
+ # started
243
+ # @param audio_end_at [Integer] The point in time, in milliseconds, in the file at which the transcription was
244
+ # terminated
55
245
  # @param word_boost [Array<String>] The list of custom vocabulary to boost transcription probability for
56
246
  # @param boost_param [String] The word boost parameter value
57
- # @param filter_profanity [Boolean] Whether [Profanity Filtering](https://www.assemblyai.com/docs/models/speech-recognition#profanity-filtering) is enabled, either true or false
58
- # @param redact_pii [Boolean] Whether [PII Redaction](https://www.assemblyai.com/docs/models/pii-redaction) is enabled, either true or false
247
+ # @param filter_profanity [Boolean] Whether [Profanity
248
+ # ](https://www.assemblyai.com/docs/models/speech-recognition#profanity-filtering)
249
+ # is enabled, either true or false
250
+ # @param redact_pii [Boolean] Whether [PII Redaction](https://www.assemblyai.com/docs/models/pii-redaction) is
251
+ # enabled, either true or false
59
252
  # @param redact_pii_audio [Boolean] Whether a redacted version of the audio file was generated,
60
- # either true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more information.
61
- # @param redact_pii_audio_quality [Transcripts::RedactPiiAudioQuality]
62
- # @param redact_pii_policies [Array<Transcripts::PiiPolicy>] The list of PII Redaction policies that were enabled, if PII Redaction is enabled.
63
- # See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more information.
64
- # @param redact_pii_sub [Transcripts::SubstitutionPolicy] The replacement logic for detected PII, can be "entity_type" or "hash". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
65
- # @param speaker_labels [Boolean] Whether [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, can be true or false
66
- # @param speakers_expected [Integer] Tell the speaker label model how many speakers it should attempt to identify, up to 10. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.
67
- # @param content_safety [Boolean] Whether [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation) is enabled, can be true or false
68
- # @param content_safety_labels [Transcripts::ContentSafetyLabelsResult]
69
- # @param iab_categories [Boolean] Whether [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection) is enabled, can be true or false
70
- # @param iab_categories_result [Transcripts::TopicDetectionModelResult]
71
- # @param language_detection [Boolean] Whether [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection) is enabled, either true or false
72
- # @param custom_spelling [Array<Transcripts::TranscriptCustomSpelling>] Customize how words are spelled and formatted using to and from values
73
- # @param auto_chapters [Boolean] Whether [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters) is enabled, can be true or false
74
- # @param chapters [Array<Transcripts::Chapter>] An array of temporally sequential chapters for the audio file
75
- # @param summarization [Boolean] Whether [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled, either true or false
76
- # @param summary_type [String] The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled
253
+ # either true or false. See [PII
254
+ # redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more
255
+ # information.
256
+ # @param redact_pii_audio_quality [AssemblyAI::Transcripts::RedactPiiAudioQuality]
257
+ # @param redact_pii_policies [Array<AssemblyAI::Transcripts::PiiPolicy>] The list of PII Redaction policies that were enabled, if PII Redaction is
258
+ # enabled.
259
+ # See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for
260
+ # more information.
261
+ # @param redact_pii_sub [AssemblyAI::Transcripts::SubstitutionPolicy] The replacement logic for detected PII, can be "entity_type" or "hash". See [PII
262
+ # redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more
263
+ # details.
264
+ # @param speaker_labels [Boolean] Whether [Speaker
265
+ # diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is
266
+ # enabled, can be true or false
267
+ # @param speakers_expected [Integer] Tell the speaker label model how many speakers it should attempt to identify, up
268
+ # to 10. See [Speaker
269
+ # diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for
270
+ # more details.
271
+ # @param content_safety [Boolean] Whether [Content
272
+ # Moderation](https://www.assemblyai.com/docs/models/content-moderation) is
273
+ # enabled, can be true or false
274
+ # @param content_safety_labels [AssemblyAI::Transcripts::ContentSafetyLabelsResult]
275
+ # @param iab_categories [Boolean] Whether [Topic
276
+ # Detection](https://www.assemblyai.com/docs/models/topic-detection) is enabled,
277
+ # can be true or false
278
+ # @param iab_categories_result [AssemblyAI::Transcripts::TopicDetectionModelResult]
279
+ # @param language_detection [Boolean] Whether [Automatic language
280
+ # /www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection)
281
+ # is enabled, either true or false
282
+ # @param custom_spelling [Array<AssemblyAI::Transcripts::TranscriptCustomSpelling>] Customize how words are spelled and formatted using to and from values
283
+ # @param auto_chapters [Boolean] Whether [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters) is
284
+ # enabled, can be true or false
285
+ # @param chapters [Array<AssemblyAI::Transcripts::Chapter>] An array of temporally sequential chapters for the audio file
286
+ # @param summarization [Boolean] Whether [Summarization](https://www.assemblyai.com/docs/models/summarization) is
287
+ # enabled, either true or false
288
+ # @param summary_type [String] The type of summary generated, if
289
+ # [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled
77
290
  # @param summary_model [String] The Summarization model used to generate the summary,
78
- # if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled
79
- # @param summary [String] The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled
291
+ # if [Summarization](https://www.assemblyai.com/docs/models/summarization) is
292
+ # enabled
293
+ # @param summary [String] The generated summary of the media file, if
294
+ # [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled
80
295
  # @param custom_topics [Boolean] Whether custom topics is enabled, either true or false
81
296
  # @param topics [Array<String>] The list of custom topics provided if custom topics is enabled
82
297
  # @param disfluencies [Boolean] Transcribe Filler Words, like "umm", in your media file; can be true or false
83
- # @param sentiment_analysis [Boolean] Whether [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis) is enabled, can be true or false
84
- # @param sentiment_analysis_results [Array<Transcripts::SentimentAnalysisResult>] An array of results for the Sentiment Analysis model, if it is enabled.
85
- # See [Sentiment analysis](https://www.assemblyai.com/docs/models/sentiment-analysis) for more information.
86
- # @param entity_detection [Boolean] Whether [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection) is enabled, can be true or false
87
- # @param entities [Array<Transcripts::Entity>] An array of results for the Entity Detection model, if it is enabled.
88
- # See [Entity detection](https://www.assemblyai.com/docs/models/entity-detection) for more information.
89
- # @param speech_threshold [Float] Defaults to null. Reject audio files that contain less than this fraction of speech.
90
- # Valid values are in the range [0, 1] inclusive.
91
- # @param throttled [Boolean] True while a request is throttled and false when a request is no longer throttled
298
+ # @param sentiment_analysis [Boolean] Whether [Sentiment
299
+ # Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis) is enabled,
300
+ # can be true or false
301
+ # @param sentiment_analysis_results [Array<AssemblyAI::Transcripts::SentimentAnalysisResult>] An array of results for the Sentiment Analysis model, if it is enabled.
302
+ # See [Sentiment
303
+ # analysis](https://www.assemblyai.com/docs/models/sentiment-analysis) for more
304
+ # information.
305
+ # @param entity_detection [Boolean] Whether [Entity
306
+ # Detection](https://www.assemblyai.com/docs/models/entity-detection) is enabled,
307
+ # can be true or false
308
+ # @param entities [Array<AssemblyAI::Transcripts::Entity>] An array of results for the Entity Detection model, if it is enabled.
309
+ # See [Entity detection](https://www.assemblyai.com/docs/models/entity-detection)
310
+ # for more information.
311
+ # @param speech_threshold [Float] Defaults to null. Reject audio files that contain less than this fraction of
312
+ # speech.
313
+ # Valid values are in the range [0, 1] inclusive.
314
+ # @param throttled [Boolean] True while a request is throttled and false when a request is no longer
315
+ # throttled
92
316
  # @param error [String] Error message of why the transcript failed
93
317
  # @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
94
- # @return [Transcripts::Transcript]
95
- def initialize(id:, language_model:, acoustic_model:, status:, audio_url:, webhook_auth:, auto_highlights:, redact_pii:, summarization:, speech_model: nil, language_code: nil,
96
- text: nil, words: nil, utterances: nil, confidence: nil, audio_duration: nil, punctuate: nil, format_text: nil, dual_channel: nil, webhook_url: nil, webhook_status_code: nil, webhook_auth_header_name: nil, speed_boost: nil, auto_highlights_result: nil, audio_start_from: nil, audio_end_at: nil, word_boost: nil, boost_param: nil, filter_profanity: nil, redact_pii_audio: nil, redact_pii_audio_quality: nil, redact_pii_policies: nil, redact_pii_sub: nil, speaker_labels: nil, speakers_expected: nil, content_safety: nil, content_safety_labels: nil, iab_categories: nil, iab_categories_result: nil, language_detection: nil, custom_spelling: nil, auto_chapters: nil, chapters: nil, summary_type: nil, summary_model: nil, summary: nil, custom_topics: nil, topics: nil, disfluencies: nil, sentiment_analysis: nil, sentiment_analysis_results: nil, entity_detection: nil, entities: nil, speech_threshold: nil, throttled: nil, error: nil, additional_properties: nil)
97
- # @type [String] The unique identifier of your transcript
318
+ # @return [AssemblyAI::Transcripts::Transcript]
319
+ def initialize(id:, language_model:, acoustic_model:, status:, audio_url:, webhook_auth:, auto_highlights:, redact_pii:, summarization:, language_code: OMIT, text: OMIT,
320
+ words: OMIT, utterances: OMIT, confidence: OMIT, audio_duration: OMIT, punctuate: OMIT, format_text: OMIT, dual_channel: OMIT, speech_model: OMIT, webhook_url: OMIT, webhook_status_code: OMIT, webhook_auth_header_name: OMIT, speed_boost: OMIT, auto_highlights_result: OMIT, audio_start_from: OMIT, audio_end_at: OMIT, word_boost: OMIT, boost_param: OMIT, filter_profanity: OMIT, redact_pii_audio: OMIT, redact_pii_audio_quality: OMIT, redact_pii_policies: OMIT, redact_pii_sub: OMIT, speaker_labels: OMIT, speakers_expected: OMIT, content_safety: OMIT, content_safety_labels: OMIT, iab_categories: OMIT, iab_categories_result: OMIT, language_detection: OMIT, custom_spelling: OMIT, auto_chapters: OMIT, chapters: OMIT, summary_type: OMIT, summary_model: OMIT, summary: OMIT, custom_topics: OMIT, topics: OMIT, disfluencies: OMIT, sentiment_analysis: OMIT, sentiment_analysis_results: OMIT, entity_detection: OMIT, entities: OMIT, speech_threshold: OMIT, throttled: OMIT, error: OMIT, additional_properties: nil)
98
321
  @id = id
99
- # @type [Transcripts::SpeechModel]
100
- @speech_model = speech_model
101
- # @type [String] The language model that was used for the transcript
102
322
  @language_model = language_model
103
- # @type [String] The acoustic model that was used for the transcript
104
323
  @acoustic_model = acoustic_model
105
- # @type [Transcripts::TranscriptStatus] The status of your transcript. Possible values are queued, processing, completed, or error.
106
324
  @status = status
107
- # @type [Transcripts::TranscriptLanguageCode] The language of your audio file.
108
- # Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).
109
- # The default value is 'en_us'.
110
- @language_code = language_code
111
- # @type [String] The URL of the media that was transcribed
325
+ @language_code = language_code if language_code != OMIT
112
326
  @audio_url = audio_url
113
- # @type [String] The textual transcript of your media file
114
- @text = text
115
- # @type [Array<Transcripts::TranscriptWord>] An array of temporally-sequential word objects, one for each word in the transcript.
116
- # See [Speech recognition](https://www.assemblyai.com/docs/models/speech-recognition) for more information.
117
- @words = words
118
- # @type [Array<Transcripts::TranscriptUtterance>] When dual_channel or speaker_labels is enabled, a list of turn-by-turn utterance objects.
119
- # See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more information.
120
- @utterances = utterances
121
- # @type [Float] The confidence score for the transcript, between 0.0 (low confidence) and 1.0 (high confidence)
122
- @confidence = confidence
123
- # @type [Float] The duration of this transcript object's media file, in seconds
124
- @audio_duration = audio_duration
125
- # @type [Boolean] Whether Automatic Punctuation is enabled, either true or false
126
- @punctuate = punctuate
127
- # @type [Boolean] Whether Text Formatting is enabled, either true or false
128
- @format_text = format_text
129
- # @type [Boolean] Whether [Dual channel transcription](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) was enabled in the transcription request, either true or false
130
- @dual_channel = dual_channel
131
- # @type [String] The URL to which we send webhooks upon trancription completion
132
- @webhook_url = webhook_url
133
- # @type [Integer] The status code we received from your server when delivering your webhook, if a webhook URL was provided
134
- @webhook_status_code = webhook_status_code
135
- # @type [Boolean] Whether webhook authentication details were provided
327
+ @text = text if text != OMIT
328
+ @words = words if words != OMIT
329
+ @utterances = utterances if utterances != OMIT
330
+ @confidence = confidence if confidence != OMIT
331
+ @audio_duration = audio_duration if audio_duration != OMIT
332
+ @punctuate = punctuate if punctuate != OMIT
333
+ @format_text = format_text if format_text != OMIT
334
+ @dual_channel = dual_channel if dual_channel != OMIT
335
+ @speech_model = speech_model if speech_model != OMIT
336
+ @webhook_url = webhook_url if webhook_url != OMIT
337
+ @webhook_status_code = webhook_status_code if webhook_status_code != OMIT
136
338
  @webhook_auth = webhook_auth
137
- # @type [String] The header name which should be sent back with webhook calls
138
- @webhook_auth_header_name = webhook_auth_header_name
139
- # @type [Boolean] Whether speed boost is enabled
140
- @speed_boost = speed_boost
141
- # @type [Boolean] Whether Key Phrases is enabled, either true or false
339
+ @webhook_auth_header_name = webhook_auth_header_name if webhook_auth_header_name != OMIT
340
+ @speed_boost = speed_boost if speed_boost != OMIT
142
341
  @auto_highlights = auto_highlights
143
- # @type [Transcripts::AutoHighlightsResult]
144
- @auto_highlights_result = auto_highlights_result
145
- # @type [Integer] The point in time, in milliseconds, in the file at which the transcription was started
146
- @audio_start_from = audio_start_from
147
- # @type [Integer] The point in time, in milliseconds, in the file at which the transcription was terminated
148
- @audio_end_at = audio_end_at
149
- # @type [Array<String>] The list of custom vocabulary to boost transcription probability for
150
- @word_boost = word_boost
151
- # @type [String] The word boost parameter value
152
- @boost_param = boost_param
153
- # @type [Boolean] Whether [Profanity Filtering](https://www.assemblyai.com/docs/models/speech-recognition#profanity-filtering) is enabled, either true or false
154
- @filter_profanity = filter_profanity
155
- # @type [Boolean] Whether [PII Redaction](https://www.assemblyai.com/docs/models/pii-redaction) is enabled, either true or false
342
+ @auto_highlights_result = auto_highlights_result if auto_highlights_result != OMIT
343
+ @audio_start_from = audio_start_from if audio_start_from != OMIT
344
+ @audio_end_at = audio_end_at if audio_end_at != OMIT
345
+ @word_boost = word_boost if word_boost != OMIT
346
+ @boost_param = boost_param if boost_param != OMIT
347
+ @filter_profanity = filter_profanity if filter_profanity != OMIT
156
348
  @redact_pii = redact_pii
157
- # @type [Boolean] Whether a redacted version of the audio file was generated,
158
- # either true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more information.
159
- @redact_pii_audio = redact_pii_audio
160
- # @type [Transcripts::RedactPiiAudioQuality]
161
- @redact_pii_audio_quality = redact_pii_audio_quality
162
- # @type [Array<Transcripts::PiiPolicy>] The list of PII Redaction policies that were enabled, if PII Redaction is enabled.
163
- # See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more information.
164
- @redact_pii_policies = redact_pii_policies
165
- # @type [Transcripts::SubstitutionPolicy] The replacement logic for detected PII, can be "entity_type" or "hash". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
166
- @redact_pii_sub = redact_pii_sub
167
- # @type [Boolean] Whether [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, can be true or false
168
- @speaker_labels = speaker_labels
169
- # @type [Integer] Tell the speaker label model how many speakers it should attempt to identify, up to 10. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.
170
- @speakers_expected = speakers_expected
171
- # @type [Boolean] Whether [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation) is enabled, can be true or false
172
- @content_safety = content_safety
173
- # @type [Transcripts::ContentSafetyLabelsResult]
174
- @content_safety_labels = content_safety_labels
175
- # @type [Boolean] Whether [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection) is enabled, can be true or false
176
- @iab_categories = iab_categories
177
- # @type [Transcripts::TopicDetectionModelResult]
178
- @iab_categories_result = iab_categories_result
179
- # @type [Boolean] Whether [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection) is enabled, either true or false
180
- @language_detection = language_detection
181
- # @type [Array<Transcripts::TranscriptCustomSpelling>] Customize how words are spelled and formatted using to and from values
182
- @custom_spelling = custom_spelling
183
- # @type [Boolean] Whether [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters) is enabled, can be true or false
184
- @auto_chapters = auto_chapters
185
- # @type [Array<Transcripts::Chapter>] An array of temporally sequential chapters for the audio file
186
- @chapters = chapters
187
- # @type [Boolean] Whether [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled, either true or false
349
+ @redact_pii_audio = redact_pii_audio if redact_pii_audio != OMIT
350
+ @redact_pii_audio_quality = redact_pii_audio_quality if redact_pii_audio_quality != OMIT
351
+ @redact_pii_policies = redact_pii_policies if redact_pii_policies != OMIT
352
+ @redact_pii_sub = redact_pii_sub if redact_pii_sub != OMIT
353
+ @speaker_labels = speaker_labels if speaker_labels != OMIT
354
+ @speakers_expected = speakers_expected if speakers_expected != OMIT
355
+ @content_safety = content_safety if content_safety != OMIT
356
+ @content_safety_labels = content_safety_labels if content_safety_labels != OMIT
357
+ @iab_categories = iab_categories if iab_categories != OMIT
358
+ @iab_categories_result = iab_categories_result if iab_categories_result != OMIT
359
+ @language_detection = language_detection if language_detection != OMIT
360
+ @custom_spelling = custom_spelling if custom_spelling != OMIT
361
+ @auto_chapters = auto_chapters if auto_chapters != OMIT
362
+ @chapters = chapters if chapters != OMIT
188
363
  @summarization = summarization
189
- # @type [String] The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled
190
- @summary_type = summary_type
191
- # @type [String] The Summarization model used to generate the summary,
192
- # if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled
193
- @summary_model = summary_model
194
- # @type [String] The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled
195
- @summary = summary
196
- # @type [Boolean] Whether custom topics is enabled, either true or false
197
- @custom_topics = custom_topics
198
- # @type [Array<String>] The list of custom topics provided if custom topics is enabled
199
- @topics = topics
200
- # @type [Boolean] Transcribe Filler Words, like "umm", in your media file; can be true or false
201
- @disfluencies = disfluencies
202
- # @type [Boolean] Whether [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis) is enabled, can be true or false
203
- @sentiment_analysis = sentiment_analysis
204
- # @type [Array<Transcripts::SentimentAnalysisResult>] An array of results for the Sentiment Analysis model, if it is enabled.
205
- # See [Sentiment analysis](https://www.assemblyai.com/docs/models/sentiment-analysis) for more information.
206
- @sentiment_analysis_results = sentiment_analysis_results
207
- # @type [Boolean] Whether [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection) is enabled, can be true or false
208
- @entity_detection = entity_detection
209
- # @type [Array<Transcripts::Entity>] An array of results for the Entity Detection model, if it is enabled.
210
- # See [Entity detection](https://www.assemblyai.com/docs/models/entity-detection) for more information.
211
- @entities = entities
212
- # @type [Float] Defaults to null. Reject audio files that contain less than this fraction of speech.
213
- # Valid values are in the range [0, 1] inclusive.
214
- @speech_threshold = speech_threshold
215
- # @type [Boolean] True while a request is throttled and false when a request is no longer throttled
216
- @throttled = throttled
217
- # @type [String] Error message of why the transcript failed
218
- @error = error
219
- # @type [OpenStruct] Additional properties unmapped to the current class definition
364
+ @summary_type = summary_type if summary_type != OMIT
365
+ @summary_model = summary_model if summary_model != OMIT
366
+ @summary = summary if summary != OMIT
367
+ @custom_topics = custom_topics if custom_topics != OMIT
368
+ @topics = topics if topics != OMIT
369
+ @disfluencies = disfluencies if disfluencies != OMIT
370
+ @sentiment_analysis = sentiment_analysis if sentiment_analysis != OMIT
371
+ @sentiment_analysis_results = sentiment_analysis_results if sentiment_analysis_results != OMIT
372
+ @entity_detection = entity_detection if entity_detection != OMIT
373
+ @entities = entities if entities != OMIT
374
+ @speech_threshold = speech_threshold if speech_threshold != OMIT
375
+ @throttled = throttled if throttled != OMIT
376
+ @error = error if error != OMIT
220
377
  @additional_properties = additional_properties
378
+ @_field_set = {
379
+ "id": id,
380
+ "language_model": language_model,
381
+ "acoustic_model": acoustic_model,
382
+ "status": status,
383
+ "language_code": language_code,
384
+ "audio_url": audio_url,
385
+ "text": text,
386
+ "words": words,
387
+ "utterances": utterances,
388
+ "confidence": confidence,
389
+ "audio_duration": audio_duration,
390
+ "punctuate": punctuate,
391
+ "format_text": format_text,
392
+ "dual_channel": dual_channel,
393
+ "speech_model": speech_model,
394
+ "webhook_url": webhook_url,
395
+ "webhook_status_code": webhook_status_code,
396
+ "webhook_auth": webhook_auth,
397
+ "webhook_auth_header_name": webhook_auth_header_name,
398
+ "speed_boost": speed_boost,
399
+ "auto_highlights": auto_highlights,
400
+ "auto_highlights_result": auto_highlights_result,
401
+ "audio_start_from": audio_start_from,
402
+ "audio_end_at": audio_end_at,
403
+ "word_boost": word_boost,
404
+ "boost_param": boost_param,
405
+ "filter_profanity": filter_profanity,
406
+ "redact_pii": redact_pii,
407
+ "redact_pii_audio": redact_pii_audio,
408
+ "redact_pii_audio_quality": redact_pii_audio_quality,
409
+ "redact_pii_policies": redact_pii_policies,
410
+ "redact_pii_sub": redact_pii_sub,
411
+ "speaker_labels": speaker_labels,
412
+ "speakers_expected": speakers_expected,
413
+ "content_safety": content_safety,
414
+ "content_safety_labels": content_safety_labels,
415
+ "iab_categories": iab_categories,
416
+ "iab_categories_result": iab_categories_result,
417
+ "language_detection": language_detection,
418
+ "custom_spelling": custom_spelling,
419
+ "auto_chapters": auto_chapters,
420
+ "chapters": chapters,
421
+ "summarization": summarization,
422
+ "summary_type": summary_type,
423
+ "summary_model": summary_model,
424
+ "summary": summary,
425
+ "custom_topics": custom_topics,
426
+ "topics": topics,
427
+ "disfluencies": disfluencies,
428
+ "sentiment_analysis": sentiment_analysis,
429
+ "sentiment_analysis_results": sentiment_analysis_results,
430
+ "entity_detection": entity_detection,
431
+ "entities": entities,
432
+ "speech_threshold": speech_threshold,
433
+ "throttled": throttled,
434
+ "error": error
435
+ }.reject do |_k, v|
436
+ v == OMIT
437
+ end
221
438
  end
222
439
 
223
440
  # Deserialize a JSON object to an instance of Transcript
224
441
  #
225
- # @param json_object [JSON]
226
- # @return [Transcripts::Transcript]
442
+ # @param json_object [String]
443
+ # @return [AssemblyAI::Transcripts::Transcript]
227
444
  def self.from_json(json_object:)
228
445
  struct = JSON.parse(json_object, object_class: OpenStruct)
229
446
  parsed_json = JSON.parse(json_object)
230
- id = struct.id
231
- speech_model = struct.speech_model
232
- language_model = struct.language_model
233
- acoustic_model = struct.acoustic_model
234
- status = struct.status
235
- language_code = struct.language_code
236
- audio_url = struct.audio_url
237
- text = struct.text
447
+ id = struct["id"]
448
+ language_model = struct["language_model"]
449
+ acoustic_model = struct["acoustic_model"]
450
+ status = struct["status"]
451
+ language_code = struct["language_code"]
452
+ audio_url = struct["audio_url"]
453
+ text = struct["text"]
238
454
  words = parsed_json["words"]&.map do |v|
239
455
  v = v.to_json
240
- Transcripts::TranscriptWord.from_json(json_object: v)
456
+ AssemblyAI::Transcripts::TranscriptWord.from_json(json_object: v)
241
457
  end
242
458
  utterances = parsed_json["utterances"]&.map do |v|
243
459
  v = v.to_json
244
- Transcripts::TranscriptUtterance.from_json(json_object: v)
460
+ AssemblyAI::Transcripts::TranscriptUtterance.from_json(json_object: v)
245
461
  end
246
- confidence = struct.confidence
247
- audio_duration = struct.audio_duration
248
- punctuate = struct.punctuate
249
- format_text = struct.format_text
250
- dual_channel = struct.dual_channel
251
- webhook_url = struct.webhook_url
252
- webhook_status_code = struct.webhook_status_code
253
- webhook_auth = struct.webhook_auth
254
- webhook_auth_header_name = struct.webhook_auth_header_name
255
- speed_boost = struct.speed_boost
256
- auto_highlights = struct.auto_highlights
462
+ confidence = struct["confidence"]
463
+ audio_duration = struct["audio_duration"]
464
+ punctuate = struct["punctuate"]
465
+ format_text = struct["format_text"]
466
+ dual_channel = struct["dual_channel"]
467
+ speech_model = struct["speech_model"]
468
+ webhook_url = struct["webhook_url"]
469
+ webhook_status_code = struct["webhook_status_code"]
470
+ webhook_auth = struct["webhook_auth"]
471
+ webhook_auth_header_name = struct["webhook_auth_header_name"]
472
+ speed_boost = struct["speed_boost"]
473
+ auto_highlights = struct["auto_highlights"]
257
474
  if parsed_json["auto_highlights_result"].nil?
258
475
  auto_highlights_result = nil
259
476
  else
260
477
  auto_highlights_result = parsed_json["auto_highlights_result"].to_json
261
- auto_highlights_result = Transcripts::AutoHighlightsResult.from_json(json_object: auto_highlights_result)
478
+ auto_highlights_result = AssemblyAI::Transcripts::AutoHighlightsResult.from_json(json_object: auto_highlights_result)
262
479
  end
263
- audio_start_from = struct.audio_start_from
264
- audio_end_at = struct.audio_end_at
265
- word_boost = struct.word_boost
266
- boost_param = struct.boost_param
267
- filter_profanity = struct.filter_profanity
268
- redact_pii = struct.redact_pii
269
- redact_pii_audio = struct.redact_pii_audio
270
- redact_pii_audio_quality = struct.redact_pii_audio_quality
271
- redact_pii_policies = struct.redact_pii_policies
272
- redact_pii_sub = struct.redact_pii_sub
273
- speaker_labels = struct.speaker_labels
274
- speakers_expected = struct.speakers_expected
275
- content_safety = struct.content_safety
480
+ audio_start_from = struct["audio_start_from"]
481
+ audio_end_at = struct["audio_end_at"]
482
+ word_boost = struct["word_boost"]
483
+ boost_param = struct["boost_param"]
484
+ filter_profanity = struct["filter_profanity"]
485
+ redact_pii = struct["redact_pii"]
486
+ redact_pii_audio = struct["redact_pii_audio"]
487
+ redact_pii_audio_quality = struct["redact_pii_audio_quality"]
488
+ redact_pii_policies = struct["redact_pii_policies"]
489
+ redact_pii_sub = struct["redact_pii_sub"]
490
+ speaker_labels = struct["speaker_labels"]
491
+ speakers_expected = struct["speakers_expected"]
492
+ content_safety = struct["content_safety"]
276
493
  if parsed_json["content_safety_labels"].nil?
277
494
  content_safety_labels = nil
278
495
  else
279
496
  content_safety_labels = parsed_json["content_safety_labels"].to_json
280
- content_safety_labels = Transcripts::ContentSafetyLabelsResult.from_json(json_object: content_safety_labels)
497
+ content_safety_labels = AssemblyAI::Transcripts::ContentSafetyLabelsResult.from_json(json_object: content_safety_labels)
281
498
  end
282
- iab_categories = struct.iab_categories
499
+ iab_categories = struct["iab_categories"]
283
500
  if parsed_json["iab_categories_result"].nil?
284
501
  iab_categories_result = nil
285
502
  else
286
503
  iab_categories_result = parsed_json["iab_categories_result"].to_json
287
- iab_categories_result = Transcripts::TopicDetectionModelResult.from_json(json_object: iab_categories_result)
504
+ iab_categories_result = AssemblyAI::Transcripts::TopicDetectionModelResult.from_json(json_object: iab_categories_result)
288
505
  end
289
- language_detection = struct.language_detection
506
+ language_detection = struct["language_detection"]
290
507
  custom_spelling = parsed_json["custom_spelling"]&.map do |v|
291
508
  v = v.to_json
292
- Transcripts::TranscriptCustomSpelling.from_json(json_object: v)
509
+ AssemblyAI::Transcripts::TranscriptCustomSpelling.from_json(json_object: v)
293
510
  end
294
- auto_chapters = struct.auto_chapters
511
+ auto_chapters = struct["auto_chapters"]
295
512
  chapters = parsed_json["chapters"]&.map do |v|
296
513
  v = v.to_json
297
- Transcripts::Chapter.from_json(json_object: v)
514
+ AssemblyAI::Transcripts::Chapter.from_json(json_object: v)
298
515
  end
299
- summarization = struct.summarization
300
- summary_type = struct.summary_type
301
- summary_model = struct.summary_model
302
- summary = struct.summary
303
- custom_topics = struct.custom_topics
304
- topics = struct.topics
305
- disfluencies = struct.disfluencies
306
- sentiment_analysis = struct.sentiment_analysis
516
+ summarization = struct["summarization"]
517
+ summary_type = struct["summary_type"]
518
+ summary_model = struct["summary_model"]
519
+ summary = struct["summary"]
520
+ custom_topics = struct["custom_topics"]
521
+ topics = struct["topics"]
522
+ disfluencies = struct["disfluencies"]
523
+ sentiment_analysis = struct["sentiment_analysis"]
307
524
  sentiment_analysis_results = parsed_json["sentiment_analysis_results"]&.map do |v|
308
525
  v = v.to_json
309
- Transcripts::SentimentAnalysisResult.from_json(json_object: v)
526
+ AssemblyAI::Transcripts::SentimentAnalysisResult.from_json(json_object: v)
310
527
  end
311
- entity_detection = struct.entity_detection
528
+ entity_detection = struct["entity_detection"]
312
529
  entities = parsed_json["entities"]&.map do |v|
313
530
  v = v.to_json
314
- Transcripts::Entity.from_json(json_object: v)
531
+ AssemblyAI::Transcripts::Entity.from_json(json_object: v)
315
532
  end
316
- speech_threshold = struct.speech_threshold
317
- throttled = struct.throttled
318
- error = struct.error
319
- new(id: id, speech_model: speech_model, language_model: language_model, acoustic_model: acoustic_model,
320
- status: status, language_code: language_code, audio_url: audio_url, text: text, words: words, utterances: utterances, confidence: confidence, audio_duration: audio_duration, punctuate: punctuate, format_text: format_text, dual_channel: dual_channel, webhook_url: webhook_url, webhook_status_code: webhook_status_code, webhook_auth: webhook_auth, webhook_auth_header_name: webhook_auth_header_name, speed_boost: speed_boost, auto_highlights: auto_highlights, auto_highlights_result: auto_highlights_result, audio_start_from: audio_start_from, audio_end_at: audio_end_at, word_boost: word_boost, boost_param: boost_param, filter_profanity: filter_profanity, redact_pii: redact_pii, redact_pii_audio: redact_pii_audio, redact_pii_audio_quality: redact_pii_audio_quality, redact_pii_policies: redact_pii_policies, redact_pii_sub: redact_pii_sub, speaker_labels: speaker_labels, speakers_expected: speakers_expected, content_safety: content_safety, content_safety_labels: content_safety_labels, iab_categories: iab_categories, iab_categories_result: iab_categories_result, language_detection: language_detection, custom_spelling: custom_spelling, auto_chapters: auto_chapters, chapters: chapters, summarization: summarization, summary_type: summary_type, summary_model: summary_model, summary: summary, custom_topics: custom_topics, topics: topics, disfluencies: disfluencies, sentiment_analysis: sentiment_analysis, sentiment_analysis_results: sentiment_analysis_results, entity_detection: entity_detection, entities: entities, speech_threshold: speech_threshold, throttled: throttled, error: error, additional_properties: struct)
533
+ speech_threshold = struct["speech_threshold"]
534
+ throttled = struct["throttled"]
535
+ error = struct["error"]
536
+ new(
537
+ id: id,
538
+ language_model: language_model,
539
+ acoustic_model: acoustic_model,
540
+ status: status,
541
+ language_code: language_code,
542
+ audio_url: audio_url,
543
+ text: text,
544
+ words: words,
545
+ utterances: utterances,
546
+ confidence: confidence,
547
+ audio_duration: audio_duration,
548
+ punctuate: punctuate,
549
+ format_text: format_text,
550
+ dual_channel: dual_channel,
551
+ speech_model: speech_model,
552
+ webhook_url: webhook_url,
553
+ webhook_status_code: webhook_status_code,
554
+ webhook_auth: webhook_auth,
555
+ webhook_auth_header_name: webhook_auth_header_name,
556
+ speed_boost: speed_boost,
557
+ auto_highlights: auto_highlights,
558
+ auto_highlights_result: auto_highlights_result,
559
+ audio_start_from: audio_start_from,
560
+ audio_end_at: audio_end_at,
561
+ word_boost: word_boost,
562
+ boost_param: boost_param,
563
+ filter_profanity: filter_profanity,
564
+ redact_pii: redact_pii,
565
+ redact_pii_audio: redact_pii_audio,
566
+ redact_pii_audio_quality: redact_pii_audio_quality,
567
+ redact_pii_policies: redact_pii_policies,
568
+ redact_pii_sub: redact_pii_sub,
569
+ speaker_labels: speaker_labels,
570
+ speakers_expected: speakers_expected,
571
+ content_safety: content_safety,
572
+ content_safety_labels: content_safety_labels,
573
+ iab_categories: iab_categories,
574
+ iab_categories_result: iab_categories_result,
575
+ language_detection: language_detection,
576
+ custom_spelling: custom_spelling,
577
+ auto_chapters: auto_chapters,
578
+ chapters: chapters,
579
+ summarization: summarization,
580
+ summary_type: summary_type,
581
+ summary_model: summary_model,
582
+ summary: summary,
583
+ custom_topics: custom_topics,
584
+ topics: topics,
585
+ disfluencies: disfluencies,
586
+ sentiment_analysis: sentiment_analysis,
587
+ sentiment_analysis_results: sentiment_analysis_results,
588
+ entity_detection: entity_detection,
589
+ entities: entities,
590
+ speech_threshold: speech_threshold,
591
+ throttled: throttled,
592
+ error: error,
593
+ additional_properties: struct
594
+ )
321
595
  end
322
596
 
323
597
  # Serialize an instance of Transcript to a JSON object
324
598
  #
325
- # @return [JSON]
599
+ # @return [String]
326
600
  def to_json(*_args)
327
- {
328
- "id": @id,
329
- "speech_model": @speech_model,
330
- "language_model": @language_model,
331
- "acoustic_model": @acoustic_model,
332
- "status": @status,
333
- "language_code": @language_code,
334
- "audio_url": @audio_url,
335
- "text": @text,
336
- "words": @words,
337
- "utterances": @utterances,
338
- "confidence": @confidence,
339
- "audio_duration": @audio_duration,
340
- "punctuate": @punctuate,
341
- "format_text": @format_text,
342
- "dual_channel": @dual_channel,
343
- "webhook_url": @webhook_url,
344
- "webhook_status_code": @webhook_status_code,
345
- "webhook_auth": @webhook_auth,
346
- "webhook_auth_header_name": @webhook_auth_header_name,
347
- "speed_boost": @speed_boost,
348
- "auto_highlights": @auto_highlights,
349
- "auto_highlights_result": @auto_highlights_result,
350
- "audio_start_from": @audio_start_from,
351
- "audio_end_at": @audio_end_at,
352
- "word_boost": @word_boost,
353
- "boost_param": @boost_param,
354
- "filter_profanity": @filter_profanity,
355
- "redact_pii": @redact_pii,
356
- "redact_pii_audio": @redact_pii_audio,
357
- "redact_pii_audio_quality": @redact_pii_audio_quality,
358
- "redact_pii_policies": @redact_pii_policies,
359
- "redact_pii_sub": @redact_pii_sub,
360
- "speaker_labels": @speaker_labels,
361
- "speakers_expected": @speakers_expected,
362
- "content_safety": @content_safety,
363
- "content_safety_labels": @content_safety_labels,
364
- "iab_categories": @iab_categories,
365
- "iab_categories_result": @iab_categories_result,
366
- "language_detection": @language_detection,
367
- "custom_spelling": @custom_spelling,
368
- "auto_chapters": @auto_chapters,
369
- "chapters": @chapters,
370
- "summarization": @summarization,
371
- "summary_type": @summary_type,
372
- "summary_model": @summary_model,
373
- "summary": @summary,
374
- "custom_topics": @custom_topics,
375
- "topics": @topics,
376
- "disfluencies": @disfluencies,
377
- "sentiment_analysis": @sentiment_analysis,
378
- "sentiment_analysis_results": @sentiment_analysis_results,
379
- "entity_detection": @entity_detection,
380
- "entities": @entities,
381
- "speech_threshold": @speech_threshold,
382
- "throttled": @throttled,
383
- "error": @error
384
- }.to_json
601
+ @_field_set&.to_json
385
602
  end
386
603
 
387
- # Leveraged for Union-type generation, validate_raw attempts to parse the given hash and check each fields type against the current object's property definitions.
604
+ # Leveraged for Union-type generation, validate_raw attempts to parse the given
605
+ # hash and check each fields type against the current object's property
606
+ # definitions.
388
607
  #
389
608
  # @param obj [Object]
390
609
  # @return [Void]
391
610
  def self.validate_raw(obj:)
392
611
  obj.id.is_a?(String) != false || raise("Passed value for field obj.id is not the expected type, validation failed.")
393
- obj.speech_model&.is_a?(Transcripts::SpeechModel) != false || raise("Passed value for field obj.speech_model is not the expected type, validation failed.")
394
612
  obj.language_model.is_a?(String) != false || raise("Passed value for field obj.language_model is not the expected type, validation failed.")
395
613
  obj.acoustic_model.is_a?(String) != false || raise("Passed value for field obj.acoustic_model is not the expected type, validation failed.")
396
- obj.status.is_a?(Transcripts::TranscriptStatus) != false || raise("Passed value for field obj.status is not the expected type, validation failed.")
397
- obj.language_code&.is_a?(Transcripts::TranscriptLanguageCode) != false || raise("Passed value for field obj.language_code is not the expected type, validation failed.")
614
+ obj.status.is_a?(AssemblyAI::Transcripts::TranscriptStatus) != false || raise("Passed value for field obj.status is not the expected type, validation failed.")
615
+ obj.language_code&.is_a?(AssemblyAI::Transcripts::TranscriptLanguageCode) != false || raise("Passed value for field obj.language_code is not the expected type, validation failed.")
398
616
  obj.audio_url.is_a?(String) != false || raise("Passed value for field obj.audio_url is not the expected type, validation failed.")
399
617
  obj.text&.is_a?(String) != false || raise("Passed value for field obj.text is not the expected type, validation failed.")
400
618
  obj.words&.is_a?(Array) != false || raise("Passed value for field obj.words is not the expected type, validation failed.")
@@ -404,13 +622,14 @@ module AssemblyAI
404
622
  obj.punctuate&.is_a?(Boolean) != false || raise("Passed value for field obj.punctuate is not the expected type, validation failed.")
405
623
  obj.format_text&.is_a?(Boolean) != false || raise("Passed value for field obj.format_text is not the expected type, validation failed.")
406
624
  obj.dual_channel&.is_a?(Boolean) != false || raise("Passed value for field obj.dual_channel is not the expected type, validation failed.")
625
+ obj.speech_model&.is_a?(AssemblyAI::Transcripts::SpeechModel) != false || raise("Passed value for field obj.speech_model is not the expected type, validation failed.")
407
626
  obj.webhook_url&.is_a?(String) != false || raise("Passed value for field obj.webhook_url is not the expected type, validation failed.")
408
627
  obj.webhook_status_code&.is_a?(Integer) != false || raise("Passed value for field obj.webhook_status_code is not the expected type, validation failed.")
409
628
  obj.webhook_auth.is_a?(Boolean) != false || raise("Passed value for field obj.webhook_auth is not the expected type, validation failed.")
410
629
  obj.webhook_auth_header_name&.is_a?(String) != false || raise("Passed value for field obj.webhook_auth_header_name is not the expected type, validation failed.")
411
630
  obj.speed_boost&.is_a?(Boolean) != false || raise("Passed value for field obj.speed_boost is not the expected type, validation failed.")
412
631
  obj.auto_highlights.is_a?(Boolean) != false || raise("Passed value for field obj.auto_highlights is not the expected type, validation failed.")
413
- obj.auto_highlights_result.nil? || Transcripts::AutoHighlightsResult.validate_raw(obj: obj.auto_highlights_result)
632
+ obj.auto_highlights_result.nil? || AssemblyAI::Transcripts::AutoHighlightsResult.validate_raw(obj: obj.auto_highlights_result)
414
633
  obj.audio_start_from&.is_a?(Integer) != false || raise("Passed value for field obj.audio_start_from is not the expected type, validation failed.")
415
634
  obj.audio_end_at&.is_a?(Integer) != false || raise("Passed value for field obj.audio_end_at is not the expected type, validation failed.")
416
635
  obj.word_boost&.is_a?(Array) != false || raise("Passed value for field obj.word_boost is not the expected type, validation failed.")
@@ -418,15 +637,15 @@ module AssemblyAI
418
637
  obj.filter_profanity&.is_a?(Boolean) != false || raise("Passed value for field obj.filter_profanity is not the expected type, validation failed.")
419
638
  obj.redact_pii.is_a?(Boolean) != false || raise("Passed value for field obj.redact_pii is not the expected type, validation failed.")
420
639
  obj.redact_pii_audio&.is_a?(Boolean) != false || raise("Passed value for field obj.redact_pii_audio is not the expected type, validation failed.")
421
- obj.redact_pii_audio_quality&.is_a?(Transcripts::RedactPiiAudioQuality) != false || raise("Passed value for field obj.redact_pii_audio_quality is not the expected type, validation failed.")
640
+ obj.redact_pii_audio_quality&.is_a?(AssemblyAI::Transcripts::RedactPiiAudioQuality) != false || raise("Passed value for field obj.redact_pii_audio_quality is not the expected type, validation failed.")
422
641
  obj.redact_pii_policies&.is_a?(Array) != false || raise("Passed value for field obj.redact_pii_policies is not the expected type, validation failed.")
423
- obj.redact_pii_sub&.is_a?(Transcripts::SubstitutionPolicy) != false || raise("Passed value for field obj.redact_pii_sub is not the expected type, validation failed.")
642
+ obj.redact_pii_sub&.is_a?(AssemblyAI::Transcripts::SubstitutionPolicy) != false || raise("Passed value for field obj.redact_pii_sub is not the expected type, validation failed.")
424
643
  obj.speaker_labels&.is_a?(Boolean) != false || raise("Passed value for field obj.speaker_labels is not the expected type, validation failed.")
425
644
  obj.speakers_expected&.is_a?(Integer) != false || raise("Passed value for field obj.speakers_expected is not the expected type, validation failed.")
426
645
  obj.content_safety&.is_a?(Boolean) != false || raise("Passed value for field obj.content_safety is not the expected type, validation failed.")
427
- obj.content_safety_labels.nil? || Transcripts::ContentSafetyLabelsResult.validate_raw(obj: obj.content_safety_labels)
646
+ obj.content_safety_labels.nil? || AssemblyAI::Transcripts::ContentSafetyLabelsResult.validate_raw(obj: obj.content_safety_labels)
428
647
  obj.iab_categories&.is_a?(Boolean) != false || raise("Passed value for field obj.iab_categories is not the expected type, validation failed.")
429
- obj.iab_categories_result.nil? || Transcripts::TopicDetectionModelResult.validate_raw(obj: obj.iab_categories_result)
648
+ obj.iab_categories_result.nil? || AssemblyAI::Transcripts::TopicDetectionModelResult.validate_raw(obj: obj.iab_categories_result)
430
649
  obj.language_detection&.is_a?(Boolean) != false || raise("Passed value for field obj.language_detection is not the expected type, validation failed.")
431
650
  obj.custom_spelling&.is_a?(Array) != false || raise("Passed value for field obj.custom_spelling is not the expected type, validation failed.")
432
651
  obj.auto_chapters&.is_a?(Boolean) != false || raise("Passed value for field obj.auto_chapters is not the expected type, validation failed.")