assemblyai 1.0.0.pre.beta

Sign up to get free protection for your applications and to get access to all the features.
Files changed (86) hide show
  1. checksums.yaml +7 -0
  2. data/lib/assemblyai/files/client.rb +63 -0
  3. data/lib/assemblyai/files/types/uploaded_file.rb +47 -0
  4. data/lib/assemblyai/lemur/client.rb +390 -0
  5. data/lib/assemblyai/lemur/types/lemur_action_items_response.rb +52 -0
  6. data/lib/assemblyai/lemur/types/lemur_base_params.rb +99 -0
  7. data/lib/assemblyai/lemur/types/lemur_base_params_context.rb +75 -0
  8. data/lib/assemblyai/lemur/types/lemur_base_response.rb +47 -0
  9. data/lib/assemblyai/lemur/types/lemur_model.rb +13 -0
  10. data/lib/assemblyai/lemur/types/lemur_question.rb +74 -0
  11. data/lib/assemblyai/lemur/types/lemur_question_answer.rb +53 -0
  12. data/lib/assemblyai/lemur/types/lemur_question_answer_response.rb +56 -0
  13. data/lib/assemblyai/lemur/types/lemur_question_context.rb +75 -0
  14. data/lib/assemblyai/lemur/types/lemur_summary_response.rb +52 -0
  15. data/lib/assemblyai/lemur/types/lemur_task_response.rb +52 -0
  16. data/lib/assemblyai/lemur/types/purge_lemur_request_data_response.rb +58 -0
  17. data/lib/assemblyai/realtime/client.rb +61 -0
  18. data/lib/assemblyai/realtime/types/audio_data.rb +7 -0
  19. data/lib/assemblyai/realtime/types/audio_encoding.rb +8 -0
  20. data/lib/assemblyai/realtime/types/final_transcript.rb +107 -0
  21. data/lib/assemblyai/realtime/types/message_type.rb +13 -0
  22. data/lib/assemblyai/realtime/types/partial_transcript.rb +94 -0
  23. data/lib/assemblyai/realtime/types/realtime_base_message.rb +48 -0
  24. data/lib/assemblyai/realtime/types/realtime_base_transcript.rb +87 -0
  25. data/lib/assemblyai/realtime/types/realtime_error.rb +47 -0
  26. data/lib/assemblyai/realtime/types/realtime_message.rb +115 -0
  27. data/lib/assemblyai/realtime/types/realtime_temporary_token_response.rb +47 -0
  28. data/lib/assemblyai/realtime/types/realtime_transcript.rb +76 -0
  29. data/lib/assemblyai/realtime/types/realtime_transcript_type.rb +8 -0
  30. data/lib/assemblyai/realtime/types/session_begins.rb +58 -0
  31. data/lib/assemblyai/realtime/types/session_terminated.rb +47 -0
  32. data/lib/assemblyai/realtime/types/terminate_session.rb +56 -0
  33. data/lib/assemblyai/realtime/types/word.rb +62 -0
  34. data/lib/assemblyai/transcripts/client.rb +525 -0
  35. data/lib/assemblyai/transcripts/polling_client.rb +173 -0
  36. data/lib/assemblyai/transcripts/types/audio_intelligence_model_status.rb +8 -0
  37. data/lib/assemblyai/transcripts/types/auto_highlight_result.rb +66 -0
  38. data/lib/assemblyai/transcripts/types/auto_highlights_result.rb +53 -0
  39. data/lib/assemblyai/transcripts/types/chapter.rb +68 -0
  40. data/lib/assemblyai/transcripts/types/content_safety_label.rb +57 -0
  41. data/lib/assemblyai/transcripts/types/content_safety_label_result.rb +84 -0
  42. data/lib/assemblyai/transcripts/types/content_safety_labels_result.rb +75 -0
  43. data/lib/assemblyai/transcripts/types/entity.rb +69 -0
  44. data/lib/assemblyai/transcripts/types/entity_type.rb +38 -0
  45. data/lib/assemblyai/transcripts/types/page_details.rb +74 -0
  46. data/lib/assemblyai/transcripts/types/paragraphs_response.rb +67 -0
  47. data/lib/assemblyai/transcripts/types/pii_policy.rb +36 -0
  48. data/lib/assemblyai/transcripts/types/polling_options.rb +21 -0
  49. data/lib/assemblyai/transcripts/types/redact_pii_audio_quality.rb +8 -0
  50. data/lib/assemblyai/transcripts/types/redacted_audio_response.rb +53 -0
  51. data/lib/assemblyai/transcripts/types/redacted_audio_status.rb +7 -0
  52. data/lib/assemblyai/transcripts/types/sentences_response.rb +67 -0
  53. data/lib/assemblyai/transcripts/types/sentiment.rb +8 -0
  54. data/lib/assemblyai/transcripts/types/sentiment_analysis_result.rb +82 -0
  55. data/lib/assemblyai/transcripts/types/severity_score_summary.rb +57 -0
  56. data/lib/assemblyai/transcripts/types/speech_model.rb +7 -0
  57. data/lib/assemblyai/transcripts/types/substitution_policy.rb +8 -0
  58. data/lib/assemblyai/transcripts/types/subtitle_format.rb +8 -0
  59. data/lib/assemblyai/transcripts/types/summary_model.rb +8 -0
  60. data/lib/assemblyai/transcripts/types/summary_type.rb +14 -0
  61. data/lib/assemblyai/transcripts/types/timestamp.rb +53 -0
  62. data/lib/assemblyai/transcripts/types/topic_detection_model_result.rb +68 -0
  63. data/lib/assemblyai/transcripts/types/topic_detection_result.rb +68 -0
  64. data/lib/assemblyai/transcripts/types/topic_detection_result_labels_item.rb +52 -0
  65. data/lib/assemblyai/transcripts/types/transcript.rb +454 -0
  66. data/lib/assemblyai/transcripts/types/transcript_boost_param.rb +8 -0
  67. data/lib/assemblyai/transcripts/types/transcript_custom_spelling.rb +53 -0
  68. data/lib/assemblyai/transcripts/types/transcript_language_code.rb +29 -0
  69. data/lib/assemblyai/transcripts/types/transcript_list.rb +62 -0
  70. data/lib/assemblyai/transcripts/types/transcript_list_item.rb +82 -0
  71. data/lib/assemblyai/transcripts/types/transcript_optional_params.rb +280 -0
  72. data/lib/assemblyai/transcripts/types/transcript_paragraph.rb +84 -0
  73. data/lib/assemblyai/transcripts/types/transcript_sentence.rb +84 -0
  74. data/lib/assemblyai/transcripts/types/transcript_status.rb +8 -0
  75. data/lib/assemblyai/transcripts/types/transcript_utterance.rb +84 -0
  76. data/lib/assemblyai/transcripts/types/transcript_word.rb +68 -0
  77. data/lib/assemblyai/transcripts/types/word_search_match.rb +63 -0
  78. data/lib/assemblyai/transcripts/types/word_search_response.rb +61 -0
  79. data/lib/assemblyai/transcripts/types/word_search_timestamp.rb +7 -0
  80. data/lib/assemblyai/types/error.rb +50 -0
  81. data/lib/assemblyai.rb +48 -0
  82. data/lib/environment.rb +7 -0
  83. data/lib/gemconfig.rb +14 -0
  84. data/lib/requests.rb +87 -0
  85. data/lib/types_export.rb +75 -0
  86. metadata +170 -0
@@ -0,0 +1,454 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "speech_model"
4
+ require_relative "transcript_status"
5
+ require_relative "transcript_language_code"
6
+ require_relative "transcript_word"
7
+ require_relative "transcript_utterance"
8
+ require_relative "auto_highlights_result"
9
+ require_relative "redact_pii_audio_quality"
10
+ require_relative "pii_policy"
11
+ require_relative "substitution_policy"
12
+ require_relative "content_safety_labels_result"
13
+ require_relative "topic_detection_model_result"
14
+ require_relative "transcript_custom_spelling"
15
+ require_relative "chapter"
16
+ require_relative "sentiment_analysis_result"
17
+ require_relative "entity"
18
+ require "json"
19
+
20
+ module AssemblyAI
21
+ class Transcripts
22
+ # A transcript object
23
+ class Transcript
24
+ attr_reader :id, :speech_model, :language_model, :acoustic_model, :status, :language_code, :audio_url, :text,
25
+ :words, :utterances, :confidence, :audio_duration, :punctuate, :format_text, :dual_channel, :webhook_url, :webhook_status_code, :webhook_auth, :webhook_auth_header_name, :speed_boost, :auto_highlights, :auto_highlights_result, :audio_start_from, :audio_end_at, :word_boost, :boost_param, :filter_profanity, :redact_pii, :redact_pii_audio, :redact_pii_audio_quality, :redact_pii_policies, :redact_pii_sub, :speaker_labels, :speakers_expected, :content_safety, :content_safety_labels, :iab_categories, :iab_categories_result, :language_detection, :custom_spelling, :auto_chapters, :chapters, :summarization, :summary_type, :summary_model, :summary, :custom_topics, :topics, :disfluencies, :sentiment_analysis, :sentiment_analysis_results, :entity_detection, :entities, :speech_threshold, :throttled, :error, :additional_properties
26
+
27
+ # @param id [String] The unique identifier of your transcript
28
+ # @param speech_model [Transcripts::SPEECH_MODEL]
29
+ # @param language_model [String] The language model that was used for the transcript
30
+ # @param acoustic_model [String] The acoustic model that was used for the transcript
31
+ # @param status [TRANSCRIPT_STATUS] The status of your transcript. Possible values are queued, processing, completed, or error.
32
+ # @param language_code [TRANSCRIPT_LANGUAGE_CODE] The language of your audio file.
33
+ # Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).
34
+ # The default value is 'en_us'.
35
+ # @param audio_url [String] The URL of the media that was transcribed
36
+ # @param text [String] The textual transcript of your media file
37
+ # @param words [Array<Transcripts::TranscriptWord>] An array of temporally-sequential word objects, one for each word in the transcript.
38
+ # See [Speech recognition](https://www.assemblyai.com/docs/models/speech-recognition) for more information.
39
+ # @param utterances [Array<Transcripts::TranscriptUtterance>] When dual_channel or speaker_labels is enabled, a list of turn-by-turn utterance objects.
40
+ # See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more information.
41
+ # @param confidence [Float] The confidence score for the transcript, between 0.0 (low confidence) and 1.0 (high confidence)
42
+ # @param audio_duration [Float] The duration of this transcript object's media file, in seconds
43
+ # @param punctuate [Boolean] Whether Automatic Punctuation is enabled, either true or false
44
+ # @param format_text [Boolean] Whether Text Formatting is enabled, either true or false
45
+ # @param dual_channel [Boolean] Whether [Dual channel transcription](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) was enabled in the transcription request, either true or false
46
+ # @param webhook_url [String] The URL to which we send webhooks upon trancription completion
47
+ # @param webhook_status_code [Integer] The status code we received from your server when delivering your webhook, if a webhook URL was provided
48
+ # @param webhook_auth [Boolean] Whether webhook authentication details were provided
49
+ # @param webhook_auth_header_name [String] The header name which should be sent back with webhook calls
50
+ # @param speed_boost [Boolean] Whether speed boost is enabled
51
+ # @param auto_highlights [Boolean] Whether Key Phrases is enabled, either true or false
52
+ # @param auto_highlights_result [Transcripts::AutoHighlightsResult]
53
+ # @param audio_start_from [Integer] The point in time, in milliseconds, in the file at which the transcription was started
54
+ # @param audio_end_at [Integer] The point in time, in milliseconds, in the file at which the transcription was terminated
55
+ # @param word_boost [Array<String>] The list of custom vocabulary to boost transcription probability for
56
+ # @param boost_param [String] The word boost parameter value
57
+ # @param filter_profanity [Boolean] Whether [Profanity Filtering](https://www.assemblyai.com/docs/models/speech-recognition#profanity-filtering) is enabled, either true or false
58
+ # @param redact_pii [Boolean] Whether [PII Redaction](https://www.assemblyai.com/docs/models/pii-redaction) is enabled, either true or false
59
+ # @param redact_pii_audio [Boolean] Whether a redacted version of the audio file was generated,
60
+ # either true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more information.
61
+ # @param redact_pii_audio_quality [REDACT_PII_AUDIO_QUALITY]
62
+ # @param redact_pii_policies [Array<Transcripts::PII_POLICY>] The list of PII Redaction policies that were enabled, if PII Redaction is enabled.
63
+ # See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more information.
64
+ # @param redact_pii_sub [SUBSTITUTION_POLICY] The replacement logic for detected PII, can be "entity_type" or "hash". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
65
+ # @param speaker_labels [Boolean] Whether [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, can be true or false
66
+ # @param speakers_expected [Integer] Tell the speaker label model how many speakers it should attempt to identify, up to 10. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.
67
+ # @param content_safety [Boolean] Whether [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation) is enabled, can be true or false
68
+ # @param content_safety_labels [Transcripts::ContentSafetyLabelsResult]
69
+ # @param iab_categories [Boolean] Whether [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection) is enabled, can be true or false
70
+ # @param iab_categories_result [Transcripts::TopicDetectionModelResult]
71
+ # @param language_detection [Boolean] Whether [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection) is enabled, either true or false
72
+ # @param custom_spelling [Array<Transcripts::TranscriptCustomSpelling>] Customize how words are spelled and formatted using to and from values
73
+ # @param auto_chapters [Boolean] Whether [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters) is enabled, can be true or false
74
+ # @param chapters [Array<Transcripts::Chapter>] An array of temporally sequential chapters for the audio file
75
+ # @param summarization [Boolean] Whether [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled, either true or false
76
+ # @param summary_type [String] The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled
77
+ # @param summary_model [String] The Summarization model used to generate the summary,
78
+ # if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled
79
+ # @param summary [String] The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled
80
+ # @param custom_topics [Boolean] Whether custom topics is enabled, either true or false
81
+ # @param topics [Array<String>] The list of custom topics provided if custom topics is enabled
82
+ # @param disfluencies [Boolean] Transcribe Filler Words, like "umm", in your media file; can be true or false
83
+ # @param sentiment_analysis [Boolean] Whether [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis) is enabled, can be true or false
84
+ # @param sentiment_analysis_results [Array<Transcripts::SentimentAnalysisResult>] An array of results for the Sentiment Analysis model, if it is enabled.
85
+ # See [Sentiment analysis](https://www.assemblyai.com/docs/models/sentiment-analysis) for more information.
86
+ # @param entity_detection [Boolean] Whether [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection) is enabled, can be true or false
87
+ # @param entities [Array<Transcripts::Entity>] An array of results for the Entity Detection model, if it is enabled.
88
+ # See [Entity detection](https://www.assemblyai.com/docs/models/entity-detection) for more information.
89
+ # @param speech_threshold [Float] Defaults to null. Reject audio files that contain less than this fraction of speech.
90
+ # Valid values are in the range [0, 1] inclusive.
91
+ # @param throttled [Boolean] True while a request is throttled and false when a request is no longer throttled
92
+ # @param error [String] Error message of why the transcript failed
93
+ # @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
94
+ # @return [Transcripts::Transcript]
95
+ def initialize(id:, language_model:, acoustic_model:, status:, audio_url:, webhook_auth:, auto_highlights:, redact_pii:, summarization:, speech_model: nil, language_code: nil,
96
+ text: nil, words: nil, utterances: nil, confidence: nil, audio_duration: nil, punctuate: nil, format_text: nil, dual_channel: nil, webhook_url: nil, webhook_status_code: nil, webhook_auth_header_name: nil, speed_boost: nil, auto_highlights_result: nil, audio_start_from: nil, audio_end_at: nil, word_boost: nil, boost_param: nil, filter_profanity: nil, redact_pii_audio: nil, redact_pii_audio_quality: nil, redact_pii_policies: nil, redact_pii_sub: nil, speaker_labels: nil, speakers_expected: nil, content_safety: nil, content_safety_labels: nil, iab_categories: nil, iab_categories_result: nil, language_detection: nil, custom_spelling: nil, auto_chapters: nil, chapters: nil, summary_type: nil, summary_model: nil, summary: nil, custom_topics: nil, topics: nil, disfluencies: nil, sentiment_analysis: nil, sentiment_analysis_results: nil, entity_detection: nil, entities: nil, speech_threshold: nil, throttled: nil, error: nil, additional_properties: nil)
97
+ # @type [String] The unique identifier of your transcript
98
+ @id = id
99
+ # @type [Transcripts::SPEECH_MODEL]
100
+ @speech_model = speech_model
101
+ # @type [String] The language model that was used for the transcript
102
+ @language_model = language_model
103
+ # @type [String] The acoustic model that was used for the transcript
104
+ @acoustic_model = acoustic_model
105
+ # @type [TRANSCRIPT_STATUS] The status of your transcript. Possible values are queued, processing, completed, or error.
106
+ @status = status
107
+ # @type [TRANSCRIPT_LANGUAGE_CODE] The language of your audio file.
108
+ # Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).
109
+ # The default value is 'en_us'.
110
+ @language_code = language_code
111
+ # @type [String] The URL of the media that was transcribed
112
+ @audio_url = audio_url
113
+ # @type [String] The textual transcript of your media file
114
+ @text = text
115
+ # @type [Array<Transcripts::TranscriptWord>] An array of temporally-sequential word objects, one for each word in the transcript.
116
+ # See [Speech recognition](https://www.assemblyai.com/docs/models/speech-recognition) for more information.
117
+ @words = words
118
+ # @type [Array<Transcripts::TranscriptUtterance>] When dual_channel or speaker_labels is enabled, a list of turn-by-turn utterance objects.
119
+ # See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more information.
120
+ @utterances = utterances
121
+ # @type [Float] The confidence score for the transcript, between 0.0 (low confidence) and 1.0 (high confidence)
122
+ @confidence = confidence
123
+ # @type [Float] The duration of this transcript object's media file, in seconds
124
+ @audio_duration = audio_duration
125
+ # @type [Boolean] Whether Automatic Punctuation is enabled, either true or false
126
+ @punctuate = punctuate
127
+ # @type [Boolean] Whether Text Formatting is enabled, either true or false
128
+ @format_text = format_text
129
+ # @type [Boolean] Whether [Dual channel transcription](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) was enabled in the transcription request, either true or false
130
+ @dual_channel = dual_channel
131
+ # @type [String] The URL to which we send webhooks upon trancription completion
132
+ @webhook_url = webhook_url
133
+ # @type [Integer] The status code we received from your server when delivering your webhook, if a webhook URL was provided
134
+ @webhook_status_code = webhook_status_code
135
+ # @type [Boolean] Whether webhook authentication details were provided
136
+ @webhook_auth = webhook_auth
137
+ # @type [String] The header name which should be sent back with webhook calls
138
+ @webhook_auth_header_name = webhook_auth_header_name
139
+ # @type [Boolean] Whether speed boost is enabled
140
+ @speed_boost = speed_boost
141
+ # @type [Boolean] Whether Key Phrases is enabled, either true or false
142
+ @auto_highlights = auto_highlights
143
+ # @type [Transcripts::AutoHighlightsResult]
144
+ @auto_highlights_result = auto_highlights_result
145
+ # @type [Integer] The point in time, in milliseconds, in the file at which the transcription was started
146
+ @audio_start_from = audio_start_from
147
+ # @type [Integer] The point in time, in milliseconds, in the file at which the transcription was terminated
148
+ @audio_end_at = audio_end_at
149
+ # @type [Array<String>] The list of custom vocabulary to boost transcription probability for
150
+ @word_boost = word_boost
151
+ # @type [String] The word boost parameter value
152
+ @boost_param = boost_param
153
+ # @type [Boolean] Whether [Profanity Filtering](https://www.assemblyai.com/docs/models/speech-recognition#profanity-filtering) is enabled, either true or false
154
+ @filter_profanity = filter_profanity
155
+ # @type [Boolean] Whether [PII Redaction](https://www.assemblyai.com/docs/models/pii-redaction) is enabled, either true or false
156
+ @redact_pii = redact_pii
157
+ # @type [Boolean] Whether a redacted version of the audio file was generated,
158
+ # either true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more information.
159
+ @redact_pii_audio = redact_pii_audio
160
+ # @type [REDACT_PII_AUDIO_QUALITY]
161
+ @redact_pii_audio_quality = redact_pii_audio_quality
162
+ # @type [Array<Transcripts::PII_POLICY>] The list of PII Redaction policies that were enabled, if PII Redaction is enabled.
163
+ # See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more information.
164
+ @redact_pii_policies = redact_pii_policies
165
+ # @type [SUBSTITUTION_POLICY] The replacement logic for detected PII, can be "entity_type" or "hash". See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
166
+ @redact_pii_sub = redact_pii_sub
167
+ # @type [Boolean] Whether [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, can be true or false
168
+ @speaker_labels = speaker_labels
169
+ # @type [Integer] Tell the speaker label model how many speakers it should attempt to identify, up to 10. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.
170
+ @speakers_expected = speakers_expected
171
+ # @type [Boolean] Whether [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation) is enabled, can be true or false
172
+ @content_safety = content_safety
173
+ # @type [Transcripts::ContentSafetyLabelsResult]
174
+ @content_safety_labels = content_safety_labels
175
+ # @type [Boolean] Whether [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection) is enabled, can be true or false
176
+ @iab_categories = iab_categories
177
+ # @type [Transcripts::TopicDetectionModelResult]
178
+ @iab_categories_result = iab_categories_result
179
+ # @type [Boolean] Whether [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection) is enabled, either true or false
180
+ @language_detection = language_detection
181
+ # @type [Array<Transcripts::TranscriptCustomSpelling>] Customize how words are spelled and formatted using to and from values
182
+ @custom_spelling = custom_spelling
183
+ # @type [Boolean] Whether [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters) is enabled, can be true or false
184
+ @auto_chapters = auto_chapters
185
+ # @type [Array<Transcripts::Chapter>] An array of temporally sequential chapters for the audio file
186
+ @chapters = chapters
187
+ # @type [Boolean] Whether [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled, either true or false
188
+ @summarization = summarization
189
+ # @type [String] The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled
190
+ @summary_type = summary_type
191
+ # @type [String] The Summarization model used to generate the summary,
192
+ # if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled
193
+ @summary_model = summary_model
194
+ # @type [String] The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled
195
+ @summary = summary
196
+ # @type [Boolean] Whether custom topics is enabled, either true or false
197
+ @custom_topics = custom_topics
198
+ # @type [Array<String>] The list of custom topics provided if custom topics is enabled
199
+ @topics = topics
200
+ # @type [Boolean] Transcribe Filler Words, like "umm", in your media file; can be true or false
201
+ @disfluencies = disfluencies
202
+ # @type [Boolean] Whether [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis) is enabled, can be true or false
203
+ @sentiment_analysis = sentiment_analysis
204
+ # @type [Array<Transcripts::SentimentAnalysisResult>] An array of results for the Sentiment Analysis model, if it is enabled.
205
+ # See [Sentiment analysis](https://www.assemblyai.com/docs/models/sentiment-analysis) for more information.
206
+ @sentiment_analysis_results = sentiment_analysis_results
207
+ # @type [Boolean] Whether [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection) is enabled, can be true or false
208
+ @entity_detection = entity_detection
209
+ # @type [Array<Transcripts::Entity>] An array of results for the Entity Detection model, if it is enabled.
210
+ # See [Entity detection](https://www.assemblyai.com/docs/models/entity-detection) for more information.
211
+ @entities = entities
212
+ # @type [Float] Defaults to null. Reject audio files that contain less than this fraction of speech.
213
+ # Valid values are in the range [0, 1] inclusive.
214
+ @speech_threshold = speech_threshold
215
+ # @type [Boolean] True while a request is throttled and false when a request is no longer throttled
216
+ @throttled = throttled
217
+ # @type [String] Error message of why the transcript failed
218
+ @error = error
219
+ # @type [OpenStruct] Additional properties unmapped to the current class definition
220
+ @additional_properties = additional_properties
221
+ end
222
+
223
+ # Deserialize a JSON object to an instance of Transcript
224
+ #
225
+ # @param json_object [JSON]
226
+ # @return [Transcripts::Transcript]
227
+ def self.from_json(json_object:)
228
+ struct = JSON.parse(json_object, object_class: OpenStruct)
229
+ parsed_json = JSON.parse(json_object)
230
+ id = struct.id
231
+ speech_model = struct.speech_model
232
+ language_model = struct.language_model
233
+ acoustic_model = struct.acoustic_model
234
+ status = Transcripts::TRANSCRIPT_STATUS.key(parsed_json["status"]) || parsed_json["status"]
235
+ language_code = Transcripts::TRANSCRIPT_LANGUAGE_CODE.key(parsed_json["language_code"]) || parsed_json["language_code"]
236
+ audio_url = struct.audio_url
237
+ text = struct.text
238
+ words = parsed_json["words"]&.map do |v|
239
+ v = v.to_json
240
+ Transcripts::TranscriptWord.from_json(json_object: v)
241
+ end
242
+ utterances = parsed_json["utterances"]&.map do |v|
243
+ v = v.to_json
244
+ Transcripts::TranscriptUtterance.from_json(json_object: v)
245
+ end
246
+ confidence = struct.confidence
247
+ audio_duration = struct.audio_duration
248
+ punctuate = struct.punctuate
249
+ format_text = struct.format_text
250
+ dual_channel = struct.dual_channel
251
+ webhook_url = struct.webhook_url
252
+ webhook_status_code = struct.webhook_status_code
253
+ webhook_auth = struct.webhook_auth
254
+ webhook_auth_header_name = struct.webhook_auth_header_name
255
+ speed_boost = struct.speed_boost
256
+ auto_highlights = struct.auto_highlights
257
+ if parsed_json["auto_highlights_result"].nil?
258
+ auto_highlights_result = nil
259
+ else
260
+ auto_highlights_result = parsed_json["auto_highlights_result"].to_json
261
+ auto_highlights_result = Transcripts::AutoHighlightsResult.from_json(json_object: auto_highlights_result)
262
+ end
263
+ audio_start_from = struct.audio_start_from
264
+ audio_end_at = struct.audio_end_at
265
+ word_boost = struct.word_boost
266
+ boost_param = struct.boost_param
267
+ filter_profanity = struct.filter_profanity
268
+ redact_pii = struct.redact_pii
269
+ redact_pii_audio = struct.redact_pii_audio
270
+ redact_pii_audio_quality = Transcripts::REDACT_PII_AUDIO_QUALITY.key(parsed_json["redact_pii_audio_quality"]) || parsed_json["redact_pii_audio_quality"]
271
+ redact_pii_policies = parsed_json["redact_pii_policies"]&.map do |v|
272
+ v = v.to_json
273
+ Transcripts::PII_POLICY.key(v) || v
274
+ end
275
+ redact_pii_sub = Transcripts::SUBSTITUTION_POLICY.key(parsed_json["redact_pii_sub"]) || parsed_json["redact_pii_sub"]
276
+ speaker_labels = struct.speaker_labels
277
+ speakers_expected = struct.speakers_expected
278
+ content_safety = struct.content_safety
279
+ if parsed_json["content_safety_labels"].nil?
280
+ content_safety_labels = nil
281
+ else
282
+ content_safety_labels = parsed_json["content_safety_labels"].to_json
283
+ content_safety_labels = Transcripts::ContentSafetyLabelsResult.from_json(json_object: content_safety_labels)
284
+ end
285
+ iab_categories = struct.iab_categories
286
+ if parsed_json["iab_categories_result"].nil?
287
+ iab_categories_result = nil
288
+ else
289
+ iab_categories_result = parsed_json["iab_categories_result"].to_json
290
+ iab_categories_result = Transcripts::TopicDetectionModelResult.from_json(json_object: iab_categories_result)
291
+ end
292
+ language_detection = struct.language_detection
293
+ custom_spelling = parsed_json["custom_spelling"]&.map do |v|
294
+ v = v.to_json
295
+ Transcripts::TranscriptCustomSpelling.from_json(json_object: v)
296
+ end
297
+ auto_chapters = struct.auto_chapters
298
+ chapters = parsed_json["chapters"]&.map do |v|
299
+ v = v.to_json
300
+ Transcripts::Chapter.from_json(json_object: v)
301
+ end
302
+ summarization = struct.summarization
303
+ summary_type = struct.summary_type
304
+ summary_model = struct.summary_model
305
+ summary = struct.summary
306
+ custom_topics = struct.custom_topics
307
+ topics = struct.topics
308
+ disfluencies = struct.disfluencies
309
+ sentiment_analysis = struct.sentiment_analysis
310
+ sentiment_analysis_results = parsed_json["sentiment_analysis_results"]&.map do |v|
311
+ v = v.to_json
312
+ Transcripts::SentimentAnalysisResult.from_json(json_object: v)
313
+ end
314
+ entity_detection = struct.entity_detection
315
+ entities = parsed_json["entities"]&.map do |v|
316
+ v = v.to_json
317
+ Transcripts::Entity.from_json(json_object: v)
318
+ end
319
+ speech_threshold = struct.speech_threshold
320
+ throttled = struct.throttled
321
+ error = struct.error
322
+ new(id: id, speech_model: speech_model, language_model: language_model, acoustic_model: acoustic_model,
323
+ status: status, language_code: language_code, audio_url: audio_url, text: text, words: words, utterances: utterances, confidence: confidence, audio_duration: audio_duration, punctuate: punctuate, format_text: format_text, dual_channel: dual_channel, webhook_url: webhook_url, webhook_status_code: webhook_status_code, webhook_auth: webhook_auth, webhook_auth_header_name: webhook_auth_header_name, speed_boost: speed_boost, auto_highlights: auto_highlights, auto_highlights_result: auto_highlights_result, audio_start_from: audio_start_from, audio_end_at: audio_end_at, word_boost: word_boost, boost_param: boost_param, filter_profanity: filter_profanity, redact_pii: redact_pii, redact_pii_audio: redact_pii_audio, redact_pii_audio_quality: redact_pii_audio_quality, redact_pii_policies: redact_pii_policies, redact_pii_sub: redact_pii_sub, speaker_labels: speaker_labels, speakers_expected: speakers_expected, content_safety: content_safety, content_safety_labels: content_safety_labels, iab_categories: iab_categories, iab_categories_result: iab_categories_result, language_detection: language_detection, custom_spelling: custom_spelling, auto_chapters: auto_chapters, chapters: chapters, summarization: summarization, summary_type: summary_type, summary_model: summary_model, summary: summary, custom_topics: custom_topics, topics: topics, disfluencies: disfluencies, sentiment_analysis: sentiment_analysis, sentiment_analysis_results: sentiment_analysis_results, entity_detection: entity_detection, entities: entities, speech_threshold: speech_threshold, throttled: throttled, error: error, additional_properties: struct)
324
+ end
325
+
326
+ # Serialize an instance of Transcript to a JSON object
327
+ #
328
+ # @return [JSON]
329
+ def to_json(*_args)
330
+ {
331
+ "id": @id,
332
+ "speech_model": @speech_model,
333
+ "language_model": @language_model,
334
+ "acoustic_model": @acoustic_model,
335
+ "status": Transcripts::TRANSCRIPT_STATUS[@status] || @status,
336
+ "language_code": Transcripts::TRANSCRIPT_LANGUAGE_CODE[@language_code] || @language_code,
337
+ "audio_url": @audio_url,
338
+ "text": @text,
339
+ "words": @words,
340
+ "utterances": @utterances,
341
+ "confidence": @confidence,
342
+ "audio_duration": @audio_duration,
343
+ "punctuate": @punctuate,
344
+ "format_text": @format_text,
345
+ "dual_channel": @dual_channel,
346
+ "webhook_url": @webhook_url,
347
+ "webhook_status_code": @webhook_status_code,
348
+ "webhook_auth": @webhook_auth,
349
+ "webhook_auth_header_name": @webhook_auth_header_name,
350
+ "speed_boost": @speed_boost,
351
+ "auto_highlights": @auto_highlights,
352
+ "auto_highlights_result": @auto_highlights_result,
353
+ "audio_start_from": @audio_start_from,
354
+ "audio_end_at": @audio_end_at,
355
+ "word_boost": @word_boost,
356
+ "boost_param": @boost_param,
357
+ "filter_profanity": @filter_profanity,
358
+ "redact_pii": @redact_pii,
359
+ "redact_pii_audio": @redact_pii_audio,
360
+ "redact_pii_audio_quality": Transcripts::REDACT_PII_AUDIO_QUALITY[@redact_pii_audio_quality] || @redact_pii_audio_quality,
361
+ "redact_pii_policies": @redact_pii_policies,
362
+ "redact_pii_sub": Transcripts::SUBSTITUTION_POLICY[@redact_pii_sub] || @redact_pii_sub,
363
+ "speaker_labels": @speaker_labels,
364
+ "speakers_expected": @speakers_expected,
365
+ "content_safety": @content_safety,
366
+ "content_safety_labels": @content_safety_labels,
367
+ "iab_categories": @iab_categories,
368
+ "iab_categories_result": @iab_categories_result,
369
+ "language_detection": @language_detection,
370
+ "custom_spelling": @custom_spelling,
371
+ "auto_chapters": @auto_chapters,
372
+ "chapters": @chapters,
373
+ "summarization": @summarization,
374
+ "summary_type": @summary_type,
375
+ "summary_model": @summary_model,
376
+ "summary": @summary,
377
+ "custom_topics": @custom_topics,
378
+ "topics": @topics,
379
+ "disfluencies": @disfluencies,
380
+ "sentiment_analysis": @sentiment_analysis,
381
+ "sentiment_analysis_results": @sentiment_analysis_results,
382
+ "entity_detection": @entity_detection,
383
+ "entities": @entities,
384
+ "speech_threshold": @speech_threshold,
385
+ "throttled": @throttled,
386
+ "error": @error
387
+ }.to_json
388
+ end
389
+
390
+ # Leveraged for Union-type generation, validate_raw attempts to parse the given hash and check each fields type against the current object's property definitions.
391
+ #
392
+ # @param obj [Object]
393
+ # @return [Void]
394
+ def self.validate_raw(obj:)
395
+ obj.id.is_a?(String) != false || raise("Passed value for field obj.id is not the expected type, validation failed.")
396
+ obj.speech_model&.is_a?(String) != false || raise("Passed value for field obj.speech_model is not the expected type, validation failed.")
397
+ obj.language_model.is_a?(String) != false || raise("Passed value for field obj.language_model is not the expected type, validation failed.")
398
+ obj.acoustic_model.is_a?(String) != false || raise("Passed value for field obj.acoustic_model is not the expected type, validation failed.")
399
+ obj.status.is_a?(Transcripts::TRANSCRIPT_STATUS) != false || raise("Passed value for field obj.status is not the expected type, validation failed.")
400
+ obj.language_code&.is_a?(Transcripts::TRANSCRIPT_LANGUAGE_CODE) != false || raise("Passed value for field obj.language_code is not the expected type, validation failed.")
401
+ obj.audio_url.is_a?(String) != false || raise("Passed value for field obj.audio_url is not the expected type, validation failed.")
402
+ obj.text&.is_a?(String) != false || raise("Passed value for field obj.text is not the expected type, validation failed.")
403
+ obj.words&.is_a?(Array) != false || raise("Passed value for field obj.words is not the expected type, validation failed.")
404
+ obj.utterances&.is_a?(Array) != false || raise("Passed value for field obj.utterances is not the expected type, validation failed.")
405
+ obj.confidence&.is_a?(Float) != false || raise("Passed value for field obj.confidence is not the expected type, validation failed.")
406
+ obj.audio_duration&.is_a?(Float) != false || raise("Passed value for field obj.audio_duration is not the expected type, validation failed.")
407
+ obj.punctuate&.is_a?(Boolean) != false || raise("Passed value for field obj.punctuate is not the expected type, validation failed.")
408
+ obj.format_text&.is_a?(Boolean) != false || raise("Passed value for field obj.format_text is not the expected type, validation failed.")
409
+ obj.dual_channel&.is_a?(Boolean) != false || raise("Passed value for field obj.dual_channel is not the expected type, validation failed.")
410
+ obj.webhook_url&.is_a?(String) != false || raise("Passed value for field obj.webhook_url is not the expected type, validation failed.")
411
+ obj.webhook_status_code&.is_a?(Integer) != false || raise("Passed value for field obj.webhook_status_code is not the expected type, validation failed.")
412
+ obj.webhook_auth.is_a?(Boolean) != false || raise("Passed value for field obj.webhook_auth is not the expected type, validation failed.")
413
+ obj.webhook_auth_header_name&.is_a?(String) != false || raise("Passed value for field obj.webhook_auth_header_name is not the expected type, validation failed.")
414
+ obj.speed_boost&.is_a?(Boolean) != false || raise("Passed value for field obj.speed_boost is not the expected type, validation failed.")
415
+ obj.auto_highlights.is_a?(Boolean) != false || raise("Passed value for field obj.auto_highlights is not the expected type, validation failed.")
416
+ obj.auto_highlights_result.nil? || Transcripts::AutoHighlightsResult.validate_raw(obj: obj.auto_highlights_result)
417
+ obj.audio_start_from&.is_a?(Integer) != false || raise("Passed value for field obj.audio_start_from is not the expected type, validation failed.")
418
+ obj.audio_end_at&.is_a?(Integer) != false || raise("Passed value for field obj.audio_end_at is not the expected type, validation failed.")
419
+ obj.word_boost&.is_a?(Array) != false || raise("Passed value for field obj.word_boost is not the expected type, validation failed.")
420
+ obj.boost_param&.is_a?(String) != false || raise("Passed value for field obj.boost_param is not the expected type, validation failed.")
421
+ obj.filter_profanity&.is_a?(Boolean) != false || raise("Passed value for field obj.filter_profanity is not the expected type, validation failed.")
422
+ obj.redact_pii.is_a?(Boolean) != false || raise("Passed value for field obj.redact_pii is not the expected type, validation failed.")
423
+ obj.redact_pii_audio&.is_a?(Boolean) != false || raise("Passed value for field obj.redact_pii_audio is not the expected type, validation failed.")
424
+ obj.redact_pii_audio_quality&.is_a?(Transcripts::REDACT_PII_AUDIO_QUALITY) != false || raise("Passed value for field obj.redact_pii_audio_quality is not the expected type, validation failed.")
425
+ obj.redact_pii_policies&.is_a?(Array) != false || raise("Passed value for field obj.redact_pii_policies is not the expected type, validation failed.")
426
+ obj.redact_pii_sub&.is_a?(Transcripts::SUBSTITUTION_POLICY) != false || raise("Passed value for field obj.redact_pii_sub is not the expected type, validation failed.")
427
+ obj.speaker_labels&.is_a?(Boolean) != false || raise("Passed value for field obj.speaker_labels is not the expected type, validation failed.")
428
+ obj.speakers_expected&.is_a?(Integer) != false || raise("Passed value for field obj.speakers_expected is not the expected type, validation failed.")
429
+ obj.content_safety&.is_a?(Boolean) != false || raise("Passed value for field obj.content_safety is not the expected type, validation failed.")
430
+ obj.content_safety_labels.nil? || Transcripts::ContentSafetyLabelsResult.validate_raw(obj: obj.content_safety_labels)
431
+ obj.iab_categories&.is_a?(Boolean) != false || raise("Passed value for field obj.iab_categories is not the expected type, validation failed.")
432
+ obj.iab_categories_result.nil? || Transcripts::TopicDetectionModelResult.validate_raw(obj: obj.iab_categories_result)
433
+ obj.language_detection&.is_a?(Boolean) != false || raise("Passed value for field obj.language_detection is not the expected type, validation failed.")
434
+ obj.custom_spelling&.is_a?(Array) != false || raise("Passed value for field obj.custom_spelling is not the expected type, validation failed.")
435
+ obj.auto_chapters&.is_a?(Boolean) != false || raise("Passed value for field obj.auto_chapters is not the expected type, validation failed.")
436
+ obj.chapters&.is_a?(Array) != false || raise("Passed value for field obj.chapters is not the expected type, validation failed.")
437
+ obj.summarization.is_a?(Boolean) != false || raise("Passed value for field obj.summarization is not the expected type, validation failed.")
438
+ obj.summary_type&.is_a?(String) != false || raise("Passed value for field obj.summary_type is not the expected type, validation failed.")
439
+ obj.summary_model&.is_a?(String) != false || raise("Passed value for field obj.summary_model is not the expected type, validation failed.")
440
+ obj.summary&.is_a?(String) != false || raise("Passed value for field obj.summary is not the expected type, validation failed.")
441
+ obj.custom_topics&.is_a?(Boolean) != false || raise("Passed value for field obj.custom_topics is not the expected type, validation failed.")
442
+ obj.topics&.is_a?(Array) != false || raise("Passed value for field obj.topics is not the expected type, validation failed.")
443
+ obj.disfluencies&.is_a?(Boolean) != false || raise("Passed value for field obj.disfluencies is not the expected type, validation failed.")
444
+ obj.sentiment_analysis&.is_a?(Boolean) != false || raise("Passed value for field obj.sentiment_analysis is not the expected type, validation failed.")
445
+ obj.sentiment_analysis_results&.is_a?(Array) != false || raise("Passed value for field obj.sentiment_analysis_results is not the expected type, validation failed.")
446
+ obj.entity_detection&.is_a?(Boolean) != false || raise("Passed value for field obj.entity_detection is not the expected type, validation failed.")
447
+ obj.entities&.is_a?(Array) != false || raise("Passed value for field obj.entities is not the expected type, validation failed.")
448
+ obj.speech_threshold&.is_a?(Float) != false || raise("Passed value for field obj.speech_threshold is not the expected type, validation failed.")
449
+ obj.throttled&.is_a?(Boolean) != false || raise("Passed value for field obj.throttled is not the expected type, validation failed.")
450
+ obj.error&.is_a?(String) != false || raise("Passed value for field obj.error is not the expected type, validation failed.")
451
+ end
452
+ end
453
+ end
454
+ end
@@ -0,0 +1,8 @@
1
+ # frozen_string_literal: true
2
+
3
+ module AssemblyAI
4
+ class Transcripts
5
+ # @type [TRANSCRIPT_BOOST_PARAM]
6
+ TRANSCRIPT_BOOST_PARAM = { low: "low", default: "default", high: "high" }.freeze
7
+ end
8
+ end
@@ -0,0 +1,53 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "json"
4
+
5
+ module AssemblyAI
6
+ class Transcripts
7
+ # Object containing words or phrases to replace, and the word or phrase to replace with
8
+ class TranscriptCustomSpelling
9
+ attr_reader :from, :to, :additional_properties
10
+
11
+ # @param from [Array<String>] Words or phrases to replace
12
+ # @param to [String] Word or phrase to replace with
13
+ # @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
14
+ # @return [Transcripts::TranscriptCustomSpelling]
15
+ def initialize(from:, to:, additional_properties: nil)
16
+ # @type [Array<String>] Words or phrases to replace
17
+ @from = from
18
+ # @type [String] Word or phrase to replace with
19
+ @to = to
20
+ # @type [OpenStruct] Additional properties unmapped to the current class definition
21
+ @additional_properties = additional_properties
22
+ end
23
+
24
+ # Deserialize a JSON object to an instance of TranscriptCustomSpelling
25
+ #
26
+ # @param json_object [JSON]
27
+ # @return [Transcripts::TranscriptCustomSpelling]
28
+ def self.from_json(json_object:)
29
+ struct = JSON.parse(json_object, object_class: OpenStruct)
30
+ JSON.parse(json_object)
31
+ from = struct.from
32
+ to = struct.to
33
+ new(from: from, to: to, additional_properties: struct)
34
+ end
35
+
36
+ # Serialize an instance of TranscriptCustomSpelling to a JSON object
37
+ #
38
+ # @return [JSON]
39
+ def to_json(*_args)
40
+ { "from": @from, "to": @to }.to_json
41
+ end
42
+
43
+ # Leveraged for Union-type generation, validate_raw attempts to parse the given hash and check each fields type against the current object's property definitions.
44
+ #
45
+ # @param obj [Object]
46
+ # @return [Void]
47
+ def self.validate_raw(obj:)
48
+ obj.from.is_a?(Array) != false || raise("Passed value for field obj.from is not the expected type, validation failed.")
49
+ obj.to.is_a?(String) != false || raise("Passed value for field obj.to is not the expected type, validation failed.")
50
+ end
51
+ end
52
+ end
53
+ end
@@ -0,0 +1,29 @@
1
+ # frozen_string_literal: true
2
+
3
+ module AssemblyAI
4
+ class Transcripts
5
+ # @type [TRANSCRIPT_LANGUAGE_CODE]
6
+ TRANSCRIPT_LANGUAGE_CODE = {
7
+ en: "en",
8
+ en_au: "en_au",
9
+ en_uk: "en_uk",
10
+ en_us: "en_us",
11
+ es: "es",
12
+ fr: "fr",
13
+ de: "de",
14
+ it: "it",
15
+ pt: "pt",
16
+ nl: "nl",
17
+ hi: "hi",
18
+ ja: "ja",
19
+ zh: "zh",
20
+ fi: "fi",
21
+ ko: "ko",
22
+ pl: "pl",
23
+ ru: "ru",
24
+ tr: "tr",
25
+ uk: "uk",
26
+ vi: "vi"
27
+ }.freeze
28
+ end
29
+ end
@@ -0,0 +1,62 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "page_details"
4
+ require_relative "transcript_list_item"
5
+ require "json"
6
+
7
+ module AssemblyAI
8
+ class Transcripts
9
+ class TranscriptList
10
+ attr_reader :page_details, :transcripts, :additional_properties
11
+
12
+ # @param page_details [Transcripts::PageDetails]
13
+ # @param transcripts [Array<Transcripts::TranscriptListItem>]
14
+ # @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
15
+ # @return [Transcripts::TranscriptList]
16
+ def initialize(page_details:, transcripts:, additional_properties: nil)
17
+ # @type [Transcripts::PageDetails]
18
+ @page_details = page_details
19
+ # @type [Array<Transcripts::TranscriptListItem>]
20
+ @transcripts = transcripts
21
+ # @type [OpenStruct] Additional properties unmapped to the current class definition
22
+ @additional_properties = additional_properties
23
+ end
24
+
25
+ # Deserialize a JSON object to an instance of TranscriptList
26
+ #
27
+ # @param json_object [JSON]
28
+ # @return [Transcripts::TranscriptList]
29
+ def self.from_json(json_object:)
30
+ struct = JSON.parse(json_object, object_class: OpenStruct)
31
+ parsed_json = JSON.parse(json_object)
32
+ if parsed_json["page_details"].nil?
33
+ page_details = nil
34
+ else
35
+ page_details = parsed_json["page_details"].to_json
36
+ page_details = Transcripts::PageDetails.from_json(json_object: page_details)
37
+ end
38
+ transcripts = parsed_json["transcripts"]&.map do |v|
39
+ v = v.to_json
40
+ Transcripts::TranscriptListItem.from_json(json_object: v)
41
+ end
42
+ new(page_details: page_details, transcripts: transcripts, additional_properties: struct)
43
+ end
44
+
45
+ # Serialize an instance of TranscriptList to a JSON object
46
+ #
47
+ # @return [JSON]
48
+ def to_json(*_args)
49
+ { "page_details": @page_details, "transcripts": @transcripts }.to_json
50
+ end
51
+
52
+ # Leveraged for Union-type generation, validate_raw attempts to parse the given hash and check each fields type against the current object's property definitions.
53
+ #
54
+ # @param obj [Object]
55
+ # @return [Void]
56
+ def self.validate_raw(obj:)
57
+ Transcripts::PageDetails.validate_raw(obj: obj.page_details)
58
+ obj.transcripts.is_a?(Array) != false || raise("Passed value for field obj.transcripts is not the expected type, validation failed.")
59
+ end
60
+ end
61
+ end
62
+ end