assemblyai 1.0.0.pre.beta
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/lib/assemblyai/files/client.rb +63 -0
- data/lib/assemblyai/files/types/uploaded_file.rb +47 -0
- data/lib/assemblyai/lemur/client.rb +390 -0
- data/lib/assemblyai/lemur/types/lemur_action_items_response.rb +52 -0
- data/lib/assemblyai/lemur/types/lemur_base_params.rb +99 -0
- data/lib/assemblyai/lemur/types/lemur_base_params_context.rb +75 -0
- data/lib/assemblyai/lemur/types/lemur_base_response.rb +47 -0
- data/lib/assemblyai/lemur/types/lemur_model.rb +13 -0
- data/lib/assemblyai/lemur/types/lemur_question.rb +74 -0
- data/lib/assemblyai/lemur/types/lemur_question_answer.rb +53 -0
- data/lib/assemblyai/lemur/types/lemur_question_answer_response.rb +56 -0
- data/lib/assemblyai/lemur/types/lemur_question_context.rb +75 -0
- data/lib/assemblyai/lemur/types/lemur_summary_response.rb +52 -0
- data/lib/assemblyai/lemur/types/lemur_task_response.rb +52 -0
- data/lib/assemblyai/lemur/types/purge_lemur_request_data_response.rb +58 -0
- data/lib/assemblyai/realtime/client.rb +61 -0
- data/lib/assemblyai/realtime/types/audio_data.rb +7 -0
- data/lib/assemblyai/realtime/types/audio_encoding.rb +8 -0
- data/lib/assemblyai/realtime/types/final_transcript.rb +107 -0
- data/lib/assemblyai/realtime/types/message_type.rb +13 -0
- data/lib/assemblyai/realtime/types/partial_transcript.rb +94 -0
- data/lib/assemblyai/realtime/types/realtime_base_message.rb +48 -0
- data/lib/assemblyai/realtime/types/realtime_base_transcript.rb +87 -0
- data/lib/assemblyai/realtime/types/realtime_error.rb +47 -0
- data/lib/assemblyai/realtime/types/realtime_message.rb +115 -0
- data/lib/assemblyai/realtime/types/realtime_temporary_token_response.rb +47 -0
- data/lib/assemblyai/realtime/types/realtime_transcript.rb +76 -0
- data/lib/assemblyai/realtime/types/realtime_transcript_type.rb +8 -0
- data/lib/assemblyai/realtime/types/session_begins.rb +58 -0
- data/lib/assemblyai/realtime/types/session_terminated.rb +47 -0
- data/lib/assemblyai/realtime/types/terminate_session.rb +56 -0
- data/lib/assemblyai/realtime/types/word.rb +62 -0
- data/lib/assemblyai/transcripts/client.rb +525 -0
- data/lib/assemblyai/transcripts/polling_client.rb +173 -0
- data/lib/assemblyai/transcripts/types/audio_intelligence_model_status.rb +8 -0
- data/lib/assemblyai/transcripts/types/auto_highlight_result.rb +66 -0
- data/lib/assemblyai/transcripts/types/auto_highlights_result.rb +53 -0
- data/lib/assemblyai/transcripts/types/chapter.rb +68 -0
- data/lib/assemblyai/transcripts/types/content_safety_label.rb +57 -0
- data/lib/assemblyai/transcripts/types/content_safety_label_result.rb +84 -0
- data/lib/assemblyai/transcripts/types/content_safety_labels_result.rb +75 -0
- data/lib/assemblyai/transcripts/types/entity.rb +69 -0
- data/lib/assemblyai/transcripts/types/entity_type.rb +38 -0
- data/lib/assemblyai/transcripts/types/page_details.rb +74 -0
- data/lib/assemblyai/transcripts/types/paragraphs_response.rb +67 -0
- data/lib/assemblyai/transcripts/types/pii_policy.rb +36 -0
- data/lib/assemblyai/transcripts/types/polling_options.rb +21 -0
- data/lib/assemblyai/transcripts/types/redact_pii_audio_quality.rb +8 -0
- data/lib/assemblyai/transcripts/types/redacted_audio_response.rb +53 -0
- data/lib/assemblyai/transcripts/types/redacted_audio_status.rb +7 -0
- data/lib/assemblyai/transcripts/types/sentences_response.rb +67 -0
- data/lib/assemblyai/transcripts/types/sentiment.rb +8 -0
- data/lib/assemblyai/transcripts/types/sentiment_analysis_result.rb +82 -0
- data/lib/assemblyai/transcripts/types/severity_score_summary.rb +57 -0
- data/lib/assemblyai/transcripts/types/speech_model.rb +7 -0
- data/lib/assemblyai/transcripts/types/substitution_policy.rb +8 -0
- data/lib/assemblyai/transcripts/types/subtitle_format.rb +8 -0
- data/lib/assemblyai/transcripts/types/summary_model.rb +8 -0
- data/lib/assemblyai/transcripts/types/summary_type.rb +14 -0
- data/lib/assemblyai/transcripts/types/timestamp.rb +53 -0
- data/lib/assemblyai/transcripts/types/topic_detection_model_result.rb +68 -0
- data/lib/assemblyai/transcripts/types/topic_detection_result.rb +68 -0
- data/lib/assemblyai/transcripts/types/topic_detection_result_labels_item.rb +52 -0
- data/lib/assemblyai/transcripts/types/transcript.rb +454 -0
- data/lib/assemblyai/transcripts/types/transcript_boost_param.rb +8 -0
- data/lib/assemblyai/transcripts/types/transcript_custom_spelling.rb +53 -0
- data/lib/assemblyai/transcripts/types/transcript_language_code.rb +29 -0
- data/lib/assemblyai/transcripts/types/transcript_list.rb +62 -0
- data/lib/assemblyai/transcripts/types/transcript_list_item.rb +82 -0
- data/lib/assemblyai/transcripts/types/transcript_optional_params.rb +280 -0
- data/lib/assemblyai/transcripts/types/transcript_paragraph.rb +84 -0
- data/lib/assemblyai/transcripts/types/transcript_sentence.rb +84 -0
- data/lib/assemblyai/transcripts/types/transcript_status.rb +8 -0
- data/lib/assemblyai/transcripts/types/transcript_utterance.rb +84 -0
- data/lib/assemblyai/transcripts/types/transcript_word.rb +68 -0
- data/lib/assemblyai/transcripts/types/word_search_match.rb +63 -0
- data/lib/assemblyai/transcripts/types/word_search_response.rb +61 -0
- data/lib/assemblyai/transcripts/types/word_search_timestamp.rb +7 -0
- data/lib/assemblyai/types/error.rb +50 -0
- data/lib/assemblyai.rb +48 -0
- data/lib/environment.rb +7 -0
- data/lib/gemconfig.rb +14 -0
- data/lib/requests.rb +87 -0
- data/lib/types_export.rb +75 -0
- metadata +170 -0
@@ -0,0 +1,173 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative "../../requests"
|
4
|
+
require_relative "types/transcript_status"
|
5
|
+
require_relative "types/transcript_list"
|
6
|
+
require_relative "types/transcript_language_code"
|
7
|
+
require_relative "types/transcript_boost_param"
|
8
|
+
require_relative "types/redact_pii_audio_quality"
|
9
|
+
require_relative "types/pii_policy"
|
10
|
+
require_relative "types/substitution_policy"
|
11
|
+
require_relative "types/transcript_custom_spelling"
|
12
|
+
require_relative "types/summary_model"
|
13
|
+
require_relative "types/summary_type"
|
14
|
+
require_relative "types/transcript"
|
15
|
+
require_relative "types/subtitle_format"
|
16
|
+
require_relative "types/sentences_response"
|
17
|
+
require_relative "types/paragraphs_response"
|
18
|
+
require_relative "types/word_search_response"
|
19
|
+
require_relative "types/redacted_audio_response"
|
20
|
+
require_relative "types/polling_options"
|
21
|
+
require "async"
|
22
|
+
|
23
|
+
module AssemblyAI
|
24
|
+
# :nodoc:
|
25
|
+
class TranscriptsClient
|
26
|
+
# Create a transcript from an audio or video file that is accessible via a URL.
|
27
|
+
# .transcribe polls for completion of the transcription, while the .submit function does not.
|
28
|
+
#
|
29
|
+
# @param language_code [TRANSCRIPT_LANGUAGE_CODE]
|
30
|
+
# @param punctuate [Boolean] Enable Automatic Punctuation, can be true or false
|
31
|
+
# @param format_text [Boolean] Enable Text Formatting, can be true or false
|
32
|
+
# @param dual_channel [Boolean] Enable [Dual Channel](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) transcription, can be true or false.
|
33
|
+
# @param webhook_url [String] The URL to which AssemblyAI send webhooks upon trancription completion
|
34
|
+
# @param webhook_auth_header_name [String] The header name which should be sent back with webhook calls
|
35
|
+
# @param webhook_auth_header_value [String] Specify a header name and value to send back with a webhook call for added security
|
36
|
+
# @param auto_highlights [Boolean] Whether Key Phrases is enabled, either true or false
|
37
|
+
# @param audio_start_from [Integer] The point in time, in milliseconds, to begin transcribing in your media file
|
38
|
+
# @param audio_end_at [Integer] The point in time, in milliseconds, to stop transcribing in your media file
|
39
|
+
# @param word_boost [Array<String>] The list of custom vocabulary to boost transcription probability for
|
40
|
+
# @param boost_param [TRANSCRIPT_BOOST_PARAM] The word boost parameter value
|
41
|
+
# @param filter_profanity [Boolean] Filter profanity from the transcribed text, can be true or false
|
42
|
+
# @param redact_pii [Boolean] Redact PII from the transcribed text using the Redact PII model, can be true or false
|
43
|
+
# @param redact_pii_audio [Boolean] Generate a copy of the original media file with spoken PII "beeped" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
|
44
|
+
# @param redact_pii_audio_quality [REDACT_PII_AUDIO_QUALITY] Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
|
45
|
+
# @param redact_pii_policies [Array<Transcripts::PII_POLICY>] The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
|
46
|
+
# @param redact_pii_sub [SUBSTITUTION_POLICY]
|
47
|
+
# @param speaker_labels [Boolean] Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false
|
48
|
+
# @param speakers_expected [Integer] Tells the speaker label model how many speakers it should attempt to identify, up to 10. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.
|
49
|
+
# @param content_safety [Boolean] Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false
|
50
|
+
# @param content_safety_confidence [Integer] The confidence threshold for content moderation. Values must be between 25 and 100.
|
51
|
+
# @param iab_categories [Boolean] Enable [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true or false
|
52
|
+
# @param language_detection [Boolean] Whether [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection) was enabled in the transcription request, either true or false.
|
53
|
+
# @param custom_spelling [Array<Hash>] Customize how words are spelled and formatted using to and from valuesRequest of type Array<Transcripts::TranscriptCustomSpelling>, as a Hash
|
54
|
+
# * :from (Array<String>)
|
55
|
+
# * :to (String)
|
56
|
+
# @param disfluencies [Boolean] Transcribe Filler Words, like "umm", in your media file; can be true or false
|
57
|
+
# @param sentiment_analysis [Boolean] Enable [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), can be true or false
|
58
|
+
# @param auto_chapters [Boolean] Enable [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters), can be true or false
|
59
|
+
# @param entity_detection [Boolean] Enable [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection), can be true or false
|
60
|
+
# @param speech_threshold [Float] Reject audio files that contain less than this fraction of speech.
|
61
|
+
# Valid values are in the range [0, 1] inclusive.
|
62
|
+
# @param summarization [Boolean] Enable [Summarization](https://www.assemblyai.com/docs/models/summarization), can be true or false
|
63
|
+
# @param summary_model [SUMMARY_MODEL] The model to summarize the transcript
|
64
|
+
# @param summary_type [SUMMARY_TYPE] The type of summary
|
65
|
+
# @param custom_topics [Boolean] Whether custom topics is enabled, either true or false
|
66
|
+
# @param topics [Array<String>] The list of custom topics provided, if custom topics is enabled
|
67
|
+
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
68
|
+
# @param audio_url [String] The URL of the audio or video file to transcribe.
|
69
|
+
# @param request_options [RequestOptions]
|
70
|
+
# @param polling_options [Transcripts::PollingOptions] Configuration options for polling requests.
|
71
|
+
# @return [Transcripts::Transcript]
|
72
|
+
def transcribe(audio_url:, language_code: nil, punctuate: nil, format_text: nil, dual_channel: nil, webhook_url: nil,
|
73
|
+
webhook_auth_header_name: nil, webhook_auth_header_value: nil, auto_highlights: nil, audio_start_from: nil, audio_end_at: nil, word_boost: nil, boost_param: nil, filter_profanity: nil, redact_pii: nil, redact_pii_audio: nil, redact_pii_audio_quality: nil, redact_pii_policies: nil, redact_pii_sub: nil, speaker_labels: nil, speakers_expected: nil, content_safety: nil, content_safety_confidence: nil, iab_categories: nil, language_detection: nil, custom_spelling: nil, disfluencies: nil, sentiment_analysis: nil, auto_chapters: nil, entity_detection: nil, speech_threshold: nil, summarization: nil, summary_model: nil, summary_type: nil, custom_topics: nil, topics: nil, additional_properties: nil, request_options: nil, polling_options: Transcripts::PollingOptions.new)
|
74
|
+
transcript = submit(audio_url: audio_url, language_code: language_code, punctuate: punctuate, format_text: format_text, dual_channel: dual_channel, webhook_url: webhook_url,
|
75
|
+
webhook_auth_header_name: webhook_auth_header_name, webhook_auth_header_value: webhook_auth_header_value, auto_highlights: auto_highlights, audio_start_from: audio_start_from, audio_end_at: audio_end_at, word_boost: word_boost, boost_param: boost_param, filter_profanity: filter_profanity, redact_pii: redact_pii, redact_pii_audio: redact_pii_audio, redact_pii_audio_quality: redact_pii_audio_quality, redact_pii_policies: redact_pii_policies, redact_pii_sub: redact_pii_sub, speaker_labels: speaker_labels, speakers_expected: speakers_expected, content_safety: content_safety, content_safety_confidence: content_safety_confidence, iab_categories: iab_categories, language_detection: language_detection, custom_spelling: custom_spelling, disfluencies: disfluencies, sentiment_analysis: sentiment_analysis, auto_chapters: auto_chapters, entity_detection: entity_detection, speech_threshold: speech_threshold, summarization: summarization, summary_model: summary_model, summary_type: summary_type, custom_topics: custom_topics, topics: topics, additional_properties: additional_properties, request_options: request_options)
|
76
|
+
poll_transcript(transcript_id: transcript.id, polling_options: polling_options)
|
77
|
+
end
|
78
|
+
|
79
|
+
def poll_transcript(transcript_id:, polling_options:)
|
80
|
+
start_time = Time.now
|
81
|
+
timeout_in_seconds = polling_options.timeout / 1000 if polling_options.timeout.positive?
|
82
|
+
loop do
|
83
|
+
transcript = get(transcript_id: transcript_id)
|
84
|
+
if transcript.status == :completed || transcript.status == :error
|
85
|
+
return transcript
|
86
|
+
elsif polling_options.timeout.positive? && Time.now - start_time > timeout_in_seconds
|
87
|
+
raise StandardError, "Polling timeout"
|
88
|
+
end
|
89
|
+
|
90
|
+
sleep polling_options.interval / 1000
|
91
|
+
end
|
92
|
+
end
|
93
|
+
|
94
|
+
private :poll_transcript
|
95
|
+
end
|
96
|
+
|
97
|
+
# :nodoc:
|
98
|
+
class AsyncTranscriptsClient
|
99
|
+
# Create a transcript from an audio or video file that is accessible via a URL.
|
100
|
+
# .transcribe polls for completion of the transcription, while the .submit function does not.
|
101
|
+
#
|
102
|
+
# @param language_code [TRANSCRIPT_LANGUAGE_CODE]
|
103
|
+
# @param punctuate [Boolean] Enable Automatic Punctuation, can be true or false
|
104
|
+
# @param format_text [Boolean] Enable Text Formatting, can be true or false
|
105
|
+
# @param dual_channel [Boolean] Enable [Dual Channel](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) transcription, can be true or false.
|
106
|
+
# @param webhook_url [String] The URL to which AssemblyAI send webhooks upon trancription completion
|
107
|
+
# @param webhook_auth_header_name [String] The header name which should be sent back with webhook calls
|
108
|
+
# @param webhook_auth_header_value [String] Specify a header name and value to send back with a webhook call for added security
|
109
|
+
# @param auto_highlights [Boolean] Whether Key Phrases is enabled, either true or false
|
110
|
+
# @param audio_start_from [Integer] The point in time, in milliseconds, to begin transcribing in your media file
|
111
|
+
# @param audio_end_at [Integer] The point in time, in milliseconds, to stop transcribing in your media file
|
112
|
+
# @param word_boost [Array<String>] The list of custom vocabulary to boost transcription probability for
|
113
|
+
# @param boost_param [TRANSCRIPT_BOOST_PARAM] The word boost parameter value
|
114
|
+
# @param filter_profanity [Boolean] Filter profanity from the transcribed text, can be true or false
|
115
|
+
# @param redact_pii [Boolean] Redact PII from the transcribed text using the Redact PII model, can be true or false
|
116
|
+
# @param redact_pii_audio [Boolean] Generate a copy of the original media file with spoken PII "beeped" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
|
117
|
+
# @param redact_pii_audio_quality [REDACT_PII_AUDIO_QUALITY] Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
|
118
|
+
# @param redact_pii_policies [Array<Transcripts::PII_POLICY>] The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
|
119
|
+
# @param redact_pii_sub [SUBSTITUTION_POLICY]
|
120
|
+
# @param speaker_labels [Boolean] Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false
|
121
|
+
# @param speakers_expected [Integer] Tells the speaker label model how many speakers it should attempt to identify, up to 10. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.
|
122
|
+
# @param content_safety [Boolean] Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false
|
123
|
+
# @param content_safety_confidence [Integer] The confidence threshold for content moderation. Values must be between 25 and 100.
|
124
|
+
# @param iab_categories [Boolean] Enable [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true or false
|
125
|
+
# @param language_detection [Boolean] Whether [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection) was enabled in the transcription request, either true or false.
|
126
|
+
# @param custom_spelling [Array<Hash>] Customize how words are spelled and formatted using to and from valuesRequest of type Array<Transcripts::TranscriptCustomSpelling>, as a Hash
|
127
|
+
# * :from (Array<String>)
|
128
|
+
# * :to (String)
|
129
|
+
# @param disfluencies [Boolean] Transcribe Filler Words, like "umm", in your media file; can be true or false
|
130
|
+
# @param sentiment_analysis [Boolean] Enable [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), can be true or false
|
131
|
+
# @param auto_chapters [Boolean] Enable [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters), can be true or false
|
132
|
+
# @param entity_detection [Boolean] Enable [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection), can be true or false
|
133
|
+
# @param speech_threshold [Float] Reject audio files that contain less than this fraction of speech.
|
134
|
+
# Valid values are in the range [0, 1] inclusive.
|
135
|
+
# @param summarization [Boolean] Enable [Summarization](https://www.assemblyai.com/docs/models/summarization), can be true or false
|
136
|
+
# @param summary_model [SUMMARY_MODEL] The model to summarize the transcript
|
137
|
+
# @param summary_type [SUMMARY_TYPE] The type of summary
|
138
|
+
# @param custom_topics [Boolean] Whether custom topics is enabled, either true or false
|
139
|
+
# @param topics [Array<String>] The list of custom topics provided, if custom topics is enabled
|
140
|
+
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
141
|
+
# @param audio_url [String] The URL of the audio or video file to transcribe.
|
142
|
+
# @param request_options [RequestOptions]
|
143
|
+
# @param polling_options [PollingOptions] Configuration options for polling requests.
|
144
|
+
# @return [Transcripts::Transcript]
|
145
|
+
def transcribe(audio_url:, language_code: nil, punctuate: nil, format_text: nil, dual_channel: nil, webhook_url: nil,
|
146
|
+
webhook_auth_header_name: nil, webhook_auth_header_value: nil, auto_highlights: nil, audio_start_from: nil, audio_end_at: nil, word_boost: nil, boost_param: nil, filter_profanity: nil, redact_pii: nil, redact_pii_audio: nil, redact_pii_audio_quality: nil, redact_pii_policies: nil, redact_pii_sub: nil, speaker_labels: nil, speakers_expected: nil, content_safety: nil, content_safety_confidence: nil, iab_categories: nil, language_detection: nil, custom_spelling: nil, disfluencies: nil, sentiment_analysis: nil, auto_chapters: nil, entity_detection: nil, speech_threshold: nil, summarization: nil, summary_model: nil, summary_type: nil, custom_topics: nil, topics: nil, additional_properties: nil, request_options: nil, polling_options: Transcripts::PollingOptions.new)
|
147
|
+
Async do
|
148
|
+
transcript = submit(audio_url: audio_url, language_code: language_code, punctuate: punctuate, format_text: format_text, dual_channel: dual_channel, webhook_url: webhook_url,
|
149
|
+
webhook_auth_header_name: webhook_auth_header_name, webhook_auth_header_value: webhook_auth_header_value, auto_highlights: auto_highlights, audio_start_from: audio_start_from, audio_end_at: audio_end_at, word_boost: word_boost, boost_param: boost_param, filter_profanity: filter_profanity, redact_pii: redact_pii, redact_pii_audio: redact_pii_audio, redact_pii_audio_quality: redact_pii_audio_quality, redact_pii_policies: redact_pii_policies, redact_pii_sub: redact_pii_sub, speaker_labels: speaker_labels, speakers_expected: speakers_expected, content_safety: content_safety, content_safety_confidence: content_safety_confidence, iab_categories: iab_categories, language_detection: language_detection, custom_spelling: custom_spelling, disfluencies: disfluencies, sentiment_analysis: sentiment_analysis, auto_chapters: auto_chapters, entity_detection: entity_detection, speech_threshold: speech_threshold, summarization: summarization, summary_model: summary_model, summary_type: summary_type, custom_topics: custom_topics, topics: topics, additional_properties: additional_properties, request_options: request_options).wait
|
150
|
+
poll_transcript(transcript_id: transcript.id, polling_options: polling_options).wait
|
151
|
+
end
|
152
|
+
end
|
153
|
+
|
154
|
+
def poll_transcript(transcript_id:, polling_options:)
|
155
|
+
Async do
|
156
|
+
start_time = Time.now
|
157
|
+
timeout_in_seconds = polling_options.timeout / 1000 if polling_options.timeout.positive?
|
158
|
+
loop do
|
159
|
+
transcript = get(transcript_id: transcript_id).wait
|
160
|
+
if transcript.status == :completed || transcript.status == :error
|
161
|
+
break transcript
|
162
|
+
elsif polling_options.timeout.positive? && Time.now - start_time > timeout_in_seconds
|
163
|
+
raise StandardError, "Polling timeout"
|
164
|
+
end
|
165
|
+
|
166
|
+
sleep polling_options.interval / 1000
|
167
|
+
end
|
168
|
+
end
|
169
|
+
end
|
170
|
+
|
171
|
+
private :poll_transcript
|
172
|
+
end
|
173
|
+
end
|
@@ -0,0 +1,66 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative "timestamp"
|
4
|
+
require "json"
|
5
|
+
|
6
|
+
module AssemblyAI
|
7
|
+
class Transcripts
|
8
|
+
class AutoHighlightResult
|
9
|
+
attr_reader :count, :rank, :text, :timestamps, :additional_properties
|
10
|
+
|
11
|
+
# @param count [Integer] The total number of times the key phrase appears in the audio file
|
12
|
+
# @param rank [Float] The total relevancy to the overall audio file of this key phrase - a greater number means more relevant
|
13
|
+
# @param text [String] The text itself of the key phrase
|
14
|
+
# @param timestamps [Array<Transcripts::Timestamp>] The timestamp of the of the key phrase
|
15
|
+
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
16
|
+
# @return [Transcripts::AutoHighlightResult]
|
17
|
+
def initialize(count:, rank:, text:, timestamps:, additional_properties: nil)
|
18
|
+
# @type [Integer] The total number of times the key phrase appears in the audio file
|
19
|
+
@count = count
|
20
|
+
# @type [Float] The total relevancy to the overall audio file of this key phrase - a greater number means more relevant
|
21
|
+
@rank = rank
|
22
|
+
# @type [String] The text itself of the key phrase
|
23
|
+
@text = text
|
24
|
+
# @type [Array<Transcripts::Timestamp>] The timestamp of the of the key phrase
|
25
|
+
@timestamps = timestamps
|
26
|
+
# @type [OpenStruct] Additional properties unmapped to the current class definition
|
27
|
+
@additional_properties = additional_properties
|
28
|
+
end
|
29
|
+
|
30
|
+
# Deserialize a JSON object to an instance of AutoHighlightResult
|
31
|
+
#
|
32
|
+
# @param json_object [JSON]
|
33
|
+
# @return [Transcripts::AutoHighlightResult]
|
34
|
+
def self.from_json(json_object:)
|
35
|
+
struct = JSON.parse(json_object, object_class: OpenStruct)
|
36
|
+
parsed_json = JSON.parse(json_object)
|
37
|
+
count = struct.count
|
38
|
+
rank = struct.rank
|
39
|
+
text = struct.text
|
40
|
+
timestamps = parsed_json["timestamps"]&.map do |v|
|
41
|
+
v = v.to_json
|
42
|
+
Transcripts::Timestamp.from_json(json_object: v)
|
43
|
+
end
|
44
|
+
new(count: count, rank: rank, text: text, timestamps: timestamps, additional_properties: struct)
|
45
|
+
end
|
46
|
+
|
47
|
+
# Serialize an instance of AutoHighlightResult to a JSON object
|
48
|
+
#
|
49
|
+
# @return [JSON]
|
50
|
+
def to_json(*_args)
|
51
|
+
{ "count": @count, "rank": @rank, "text": @text, "timestamps": @timestamps }.to_json
|
52
|
+
end
|
53
|
+
|
54
|
+
# Leveraged for Union-type generation, validate_raw attempts to parse the given hash and check each fields type against the current object's property definitions.
|
55
|
+
#
|
56
|
+
# @param obj [Object]
|
57
|
+
# @return [Void]
|
58
|
+
def self.validate_raw(obj:)
|
59
|
+
obj.count.is_a?(Integer) != false || raise("Passed value for field obj.count is not the expected type, validation failed.")
|
60
|
+
obj.rank.is_a?(Float) != false || raise("Passed value for field obj.rank is not the expected type, validation failed.")
|
61
|
+
obj.text.is_a?(String) != false || raise("Passed value for field obj.text is not the expected type, validation failed.")
|
62
|
+
obj.timestamps.is_a?(Array) != false || raise("Passed value for field obj.timestamps is not the expected type, validation failed.")
|
63
|
+
end
|
64
|
+
end
|
65
|
+
end
|
66
|
+
end
|
@@ -0,0 +1,53 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative "auto_highlight_result"
|
4
|
+
require "json"
|
5
|
+
|
6
|
+
module AssemblyAI
|
7
|
+
class Transcripts
|
8
|
+
# An array of results for the Key Phrases model, if it is enabled.
|
9
|
+
# See [Key phrases](https://www.assemblyai.com/docs/models/key-phrases) for more information.
|
10
|
+
class AutoHighlightsResult
|
11
|
+
attr_reader :results, :additional_properties
|
12
|
+
|
13
|
+
# @param results [Array<Transcripts::AutoHighlightResult>] A temporally-sequential array of Key Phrases
|
14
|
+
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
15
|
+
# @return [Transcripts::AutoHighlightsResult]
|
16
|
+
def initialize(results:, additional_properties: nil)
|
17
|
+
# @type [Array<Transcripts::AutoHighlightResult>] A temporally-sequential array of Key Phrases
|
18
|
+
@results = results
|
19
|
+
# @type [OpenStruct] Additional properties unmapped to the current class definition
|
20
|
+
@additional_properties = additional_properties
|
21
|
+
end
|
22
|
+
|
23
|
+
# Deserialize a JSON object to an instance of AutoHighlightsResult
|
24
|
+
#
|
25
|
+
# @param json_object [JSON]
|
26
|
+
# @return [Transcripts::AutoHighlightsResult]
|
27
|
+
def self.from_json(json_object:)
|
28
|
+
struct = JSON.parse(json_object, object_class: OpenStruct)
|
29
|
+
parsed_json = JSON.parse(json_object)
|
30
|
+
results = parsed_json["results"]&.map do |v|
|
31
|
+
v = v.to_json
|
32
|
+
Transcripts::AutoHighlightResult.from_json(json_object: v)
|
33
|
+
end
|
34
|
+
new(results: results, additional_properties: struct)
|
35
|
+
end
|
36
|
+
|
37
|
+
# Serialize an instance of AutoHighlightsResult to a JSON object
|
38
|
+
#
|
39
|
+
# @return [JSON]
|
40
|
+
def to_json(*_args)
|
41
|
+
{ "results": @results }.to_json
|
42
|
+
end
|
43
|
+
|
44
|
+
# Leveraged for Union-type generation, validate_raw attempts to parse the given hash and check each fields type against the current object's property definitions.
|
45
|
+
#
|
46
|
+
# @param obj [Object]
|
47
|
+
# @return [Void]
|
48
|
+
def self.validate_raw(obj:)
|
49
|
+
obj.results.is_a?(Array) != false || raise("Passed value for field obj.results is not the expected type, validation failed.")
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
53
|
+
end
|
@@ -0,0 +1,68 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "json"
|
4
|
+
|
5
|
+
module AssemblyAI
|
6
|
+
class Transcripts
|
7
|
+
# Chapter of the audio file
|
8
|
+
class Chapter
|
9
|
+
attr_reader :gist, :headline, :summary, :start, :end_, :additional_properties
|
10
|
+
|
11
|
+
# @param gist [String] An ultra-short summary (just a few words) of the content spoken in the chapter
|
12
|
+
# @param headline [String] A single sentence summary of the content spoken during the chapter
|
13
|
+
# @param summary [String] A one paragraph summary of the content spoken during the chapter
|
14
|
+
# @param start [Integer] The starting time, in milliseconds, for the chapter
|
15
|
+
# @param end_ [Integer] The starting time, in milliseconds, for the chapter
|
16
|
+
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
17
|
+
# @return [Transcripts::Chapter]
|
18
|
+
def initialize(gist:, headline:, summary:, start:, end_:, additional_properties: nil)
|
19
|
+
# @type [String] An ultra-short summary (just a few words) of the content spoken in the chapter
|
20
|
+
@gist = gist
|
21
|
+
# @type [String] A single sentence summary of the content spoken during the chapter
|
22
|
+
@headline = headline
|
23
|
+
# @type [String] A one paragraph summary of the content spoken during the chapter
|
24
|
+
@summary = summary
|
25
|
+
# @type [Integer] The starting time, in milliseconds, for the chapter
|
26
|
+
@start = start
|
27
|
+
# @type [Integer] The starting time, in milliseconds, for the chapter
|
28
|
+
@end_ = end_
|
29
|
+
# @type [OpenStruct] Additional properties unmapped to the current class definition
|
30
|
+
@additional_properties = additional_properties
|
31
|
+
end
|
32
|
+
|
33
|
+
# Deserialize a JSON object to an instance of Chapter
|
34
|
+
#
|
35
|
+
# @param json_object [JSON]
|
36
|
+
# @return [Transcripts::Chapter]
|
37
|
+
def self.from_json(json_object:)
|
38
|
+
struct = JSON.parse(json_object, object_class: OpenStruct)
|
39
|
+
JSON.parse(json_object)
|
40
|
+
gist = struct.gist
|
41
|
+
headline = struct.headline
|
42
|
+
summary = struct.summary
|
43
|
+
start = struct.start
|
44
|
+
end_ = struct.end
|
45
|
+
new(gist: gist, headline: headline, summary: summary, start: start, end_: end_, additional_properties: struct)
|
46
|
+
end
|
47
|
+
|
48
|
+
# Serialize an instance of Chapter to a JSON object
|
49
|
+
#
|
50
|
+
# @return [JSON]
|
51
|
+
def to_json(*_args)
|
52
|
+
{ "gist": @gist, "headline": @headline, "summary": @summary, "start": @start, "end": @end_ }.to_json
|
53
|
+
end
|
54
|
+
|
55
|
+
# Leveraged for Union-type generation, validate_raw attempts to parse the given hash and check each fields type against the current object's property definitions.
|
56
|
+
#
|
57
|
+
# @param obj [Object]
|
58
|
+
# @return [Void]
|
59
|
+
def self.validate_raw(obj:)
|
60
|
+
obj.gist.is_a?(String) != false || raise("Passed value for field obj.gist is not the expected type, validation failed.")
|
61
|
+
obj.headline.is_a?(String) != false || raise("Passed value for field obj.headline is not the expected type, validation failed.")
|
62
|
+
obj.summary.is_a?(String) != false || raise("Passed value for field obj.summary is not the expected type, validation failed.")
|
63
|
+
obj.start.is_a?(Integer) != false || raise("Passed value for field obj.start is not the expected type, validation failed.")
|
64
|
+
obj.end_.is_a?(Integer) != false || raise("Passed value for field obj.end_ is not the expected type, validation failed.")
|
65
|
+
end
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
@@ -0,0 +1,57 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "json"
|
4
|
+
|
5
|
+
module AssemblyAI
|
6
|
+
class Transcripts
|
7
|
+
class ContentSafetyLabel
|
8
|
+
attr_reader :label, :confidence, :severity, :additional_properties
|
9
|
+
|
10
|
+
# @param label [String] The label of the sensitive topic
|
11
|
+
# @param confidence [Float] The confidence score for the topic being discussed, from 0 to 1
|
12
|
+
# @param severity [Float] How severely the topic is discussed in the section, from 0 to 1
|
13
|
+
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
14
|
+
# @return [Transcripts::ContentSafetyLabel]
|
15
|
+
def initialize(label:, confidence:, severity:, additional_properties: nil)
|
16
|
+
# @type [String] The label of the sensitive topic
|
17
|
+
@label = label
|
18
|
+
# @type [Float] The confidence score for the topic being discussed, from 0 to 1
|
19
|
+
@confidence = confidence
|
20
|
+
# @type [Float] How severely the topic is discussed in the section, from 0 to 1
|
21
|
+
@severity = severity
|
22
|
+
# @type [OpenStruct] Additional properties unmapped to the current class definition
|
23
|
+
@additional_properties = additional_properties
|
24
|
+
end
|
25
|
+
|
26
|
+
# Deserialize a JSON object to an instance of ContentSafetyLabel
|
27
|
+
#
|
28
|
+
# @param json_object [JSON]
|
29
|
+
# @return [Transcripts::ContentSafetyLabel]
|
30
|
+
def self.from_json(json_object:)
|
31
|
+
struct = JSON.parse(json_object, object_class: OpenStruct)
|
32
|
+
JSON.parse(json_object)
|
33
|
+
label = struct.label
|
34
|
+
confidence = struct.confidence
|
35
|
+
severity = struct.severity
|
36
|
+
new(label: label, confidence: confidence, severity: severity, additional_properties: struct)
|
37
|
+
end
|
38
|
+
|
39
|
+
# Serialize an instance of ContentSafetyLabel to a JSON object
|
40
|
+
#
|
41
|
+
# @return [JSON]
|
42
|
+
def to_json(*_args)
|
43
|
+
{ "label": @label, "confidence": @confidence, "severity": @severity }.to_json
|
44
|
+
end
|
45
|
+
|
46
|
+
# Leveraged for Union-type generation, validate_raw attempts to parse the given hash and check each fields type against the current object's property definitions.
|
47
|
+
#
|
48
|
+
# @param obj [Object]
|
49
|
+
# @return [Void]
|
50
|
+
def self.validate_raw(obj:)
|
51
|
+
obj.label.is_a?(String) != false || raise("Passed value for field obj.label is not the expected type, validation failed.")
|
52
|
+
obj.confidence.is_a?(Float) != false || raise("Passed value for field obj.confidence is not the expected type, validation failed.")
|
53
|
+
obj.severity.is_a?(Float) != false || raise("Passed value for field obj.severity is not the expected type, validation failed.")
|
54
|
+
end
|
55
|
+
end
|
56
|
+
end
|
57
|
+
end
|
@@ -0,0 +1,84 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative "content_safety_label"
|
4
|
+
require_relative "timestamp"
|
5
|
+
require "json"
|
6
|
+
|
7
|
+
module AssemblyAI
|
8
|
+
class Transcripts
|
9
|
+
class ContentSafetyLabelResult
|
10
|
+
attr_reader :text, :labels, :sentences_idx_start, :sentences_idx_end, :timestamp, :additional_properties
|
11
|
+
|
12
|
+
# @param text [String] The transcript of the section flagged by the Content Moderation model
|
13
|
+
# @param labels [Array<Transcripts::ContentSafetyLabel>] An array of safety labels, one per sensitive topic that was detected in the section
|
14
|
+
# @param sentences_idx_start [Integer] The sentence index at which the section begins
|
15
|
+
# @param sentences_idx_end [Integer] The sentence index at which the section ends
|
16
|
+
# @param timestamp [Transcripts::Timestamp] Timestamp information for the section
|
17
|
+
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
18
|
+
# @return [Transcripts::ContentSafetyLabelResult]
|
19
|
+
def initialize(text:, labels:, sentences_idx_start:, sentences_idx_end:, timestamp:, additional_properties: nil)
|
20
|
+
# @type [String] The transcript of the section flagged by the Content Moderation model
|
21
|
+
@text = text
|
22
|
+
# @type [Array<Transcripts::ContentSafetyLabel>] An array of safety labels, one per sensitive topic that was detected in the section
|
23
|
+
@labels = labels
|
24
|
+
# @type [Integer] The sentence index at which the section begins
|
25
|
+
@sentences_idx_start = sentences_idx_start
|
26
|
+
# @type [Integer] The sentence index at which the section ends
|
27
|
+
@sentences_idx_end = sentences_idx_end
|
28
|
+
# @type [Transcripts::Timestamp] Timestamp information for the section
|
29
|
+
@timestamp = timestamp
|
30
|
+
# @type [OpenStruct] Additional properties unmapped to the current class definition
|
31
|
+
@additional_properties = additional_properties
|
32
|
+
end
|
33
|
+
|
34
|
+
# Deserialize a JSON object to an instance of ContentSafetyLabelResult
|
35
|
+
#
|
36
|
+
# @param json_object [JSON]
|
37
|
+
# @return [Transcripts::ContentSafetyLabelResult]
|
38
|
+
def self.from_json(json_object:)
|
39
|
+
struct = JSON.parse(json_object, object_class: OpenStruct)
|
40
|
+
parsed_json = JSON.parse(json_object)
|
41
|
+
text = struct.text
|
42
|
+
labels = parsed_json["labels"]&.map do |v|
|
43
|
+
v = v.to_json
|
44
|
+
Transcripts::ContentSafetyLabel.from_json(json_object: v)
|
45
|
+
end
|
46
|
+
sentences_idx_start = struct.sentences_idx_start
|
47
|
+
sentences_idx_end = struct.sentences_idx_end
|
48
|
+
if parsed_json["timestamp"].nil?
|
49
|
+
timestamp = nil
|
50
|
+
else
|
51
|
+
timestamp = parsed_json["timestamp"].to_json
|
52
|
+
timestamp = Transcripts::Timestamp.from_json(json_object: timestamp)
|
53
|
+
end
|
54
|
+
new(text: text, labels: labels, sentences_idx_start: sentences_idx_start, sentences_idx_end: sentences_idx_end,
|
55
|
+
timestamp: timestamp, additional_properties: struct)
|
56
|
+
end
|
57
|
+
|
58
|
+
# Serialize an instance of ContentSafetyLabelResult to a JSON object
|
59
|
+
#
|
60
|
+
# @return [JSON]
|
61
|
+
def to_json(*_args)
|
62
|
+
{
|
63
|
+
"text": @text,
|
64
|
+
"labels": @labels,
|
65
|
+
"sentences_idx_start": @sentences_idx_start,
|
66
|
+
"sentences_idx_end": @sentences_idx_end,
|
67
|
+
"timestamp": @timestamp
|
68
|
+
}.to_json
|
69
|
+
end
|
70
|
+
|
71
|
+
# Leveraged for Union-type generation, validate_raw attempts to parse the given hash and check each fields type against the current object's property definitions.
|
72
|
+
#
|
73
|
+
# @param obj [Object]
|
74
|
+
# @return [Void]
|
75
|
+
def self.validate_raw(obj:)
|
76
|
+
obj.text.is_a?(String) != false || raise("Passed value for field obj.text is not the expected type, validation failed.")
|
77
|
+
obj.labels.is_a?(Array) != false || raise("Passed value for field obj.labels is not the expected type, validation failed.")
|
78
|
+
obj.sentences_idx_start.is_a?(Integer) != false || raise("Passed value for field obj.sentences_idx_start is not the expected type, validation failed.")
|
79
|
+
obj.sentences_idx_end.is_a?(Integer) != false || raise("Passed value for field obj.sentences_idx_end is not the expected type, validation failed.")
|
80
|
+
Transcripts::Timestamp.validate_raw(obj: obj.timestamp)
|
81
|
+
end
|
82
|
+
end
|
83
|
+
end
|
84
|
+
end
|
@@ -0,0 +1,75 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative "audio_intelligence_model_status"
|
4
|
+
require_relative "content_safety_label_result"
|
5
|
+
require "json"
|
6
|
+
|
7
|
+
module AssemblyAI
|
8
|
+
class Transcripts
|
9
|
+
# An array of results for the Content Moderation model, if it is enabled.
|
10
|
+
# See [Content moderation](https://www.assemblyai.com/docs/models/content-moderation) for more information.
|
11
|
+
class ContentSafetyLabelsResult
|
12
|
+
attr_reader :status, :results, :summary, :severity_score_summary, :additional_properties
|
13
|
+
|
14
|
+
# @param status [AUDIO_INTELLIGENCE_MODEL_STATUS] The status of the Content Moderation model. Either success, or unavailable in the rare case that the model failed.
|
15
|
+
# @param results [Array<Transcripts::ContentSafetyLabelResult>]
|
16
|
+
# @param summary [Hash{String => String}] A summary of the Content Moderation confidence results for the entire audio file
|
17
|
+
# @param severity_score_summary [Hash{String => String}] A summary of the Content Moderation severity results for the entire audio file
|
18
|
+
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
19
|
+
# @return [Transcripts::ContentSafetyLabelsResult]
|
20
|
+
def initialize(status:, results:, summary:, severity_score_summary:, additional_properties: nil)
|
21
|
+
# @type [AUDIO_INTELLIGENCE_MODEL_STATUS] The status of the Content Moderation model. Either success, or unavailable in the rare case that the model failed.
|
22
|
+
@status = status
|
23
|
+
# @type [Array<Transcripts::ContentSafetyLabelResult>]
|
24
|
+
@results = results
|
25
|
+
# @type [Hash{String => String}] A summary of the Content Moderation confidence results for the entire audio file
|
26
|
+
@summary = summary
|
27
|
+
# @type [Hash{String => String}] A summary of the Content Moderation severity results for the entire audio file
|
28
|
+
@severity_score_summary = severity_score_summary
|
29
|
+
# @type [OpenStruct] Additional properties unmapped to the current class definition
|
30
|
+
@additional_properties = additional_properties
|
31
|
+
end
|
32
|
+
|
33
|
+
# Deserialize a JSON object to an instance of ContentSafetyLabelsResult
|
34
|
+
#
|
35
|
+
# @param json_object [JSON]
|
36
|
+
# @return [Transcripts::ContentSafetyLabelsResult]
|
37
|
+
def self.from_json(json_object:)
|
38
|
+
struct = JSON.parse(json_object, object_class: OpenStruct)
|
39
|
+
parsed_json = JSON.parse(json_object)
|
40
|
+
status = Transcripts::AUDIO_INTELLIGENCE_MODEL_STATUS.key(parsed_json["status"]) || parsed_json["status"]
|
41
|
+
results = parsed_json["results"]&.map do |v|
|
42
|
+
v = v.to_json
|
43
|
+
Transcripts::ContentSafetyLabelResult.from_json(json_object: v)
|
44
|
+
end
|
45
|
+
summary = struct.summary
|
46
|
+
severity_score_summary = struct.severity_score_summary
|
47
|
+
new(status: status, results: results, summary: summary, severity_score_summary: severity_score_summary,
|
48
|
+
additional_properties: struct)
|
49
|
+
end
|
50
|
+
|
51
|
+
# Serialize an instance of ContentSafetyLabelsResult to a JSON object
|
52
|
+
#
|
53
|
+
# @return [JSON]
|
54
|
+
def to_json(*_args)
|
55
|
+
{
|
56
|
+
"status": Transcripts::AUDIO_INTELLIGENCE_MODEL_STATUS[@status] || @status,
|
57
|
+
"results": @results,
|
58
|
+
"summary": @summary,
|
59
|
+
"severity_score_summary": @severity_score_summary
|
60
|
+
}.to_json
|
61
|
+
end
|
62
|
+
|
63
|
+
# Leveraged for Union-type generation, validate_raw attempts to parse the given hash and check each fields type against the current object's property definitions.
|
64
|
+
#
|
65
|
+
# @param obj [Object]
|
66
|
+
# @return [Void]
|
67
|
+
def self.validate_raw(obj:)
|
68
|
+
obj.status.is_a?(Transcripts::AUDIO_INTELLIGENCE_MODEL_STATUS) != false || raise("Passed value for field obj.status is not the expected type, validation failed.")
|
69
|
+
obj.results.is_a?(Array) != false || raise("Passed value for field obj.results is not the expected type, validation failed.")
|
70
|
+
obj.summary.is_a?(Hash) != false || raise("Passed value for field obj.summary is not the expected type, validation failed.")
|
71
|
+
obj.severity_score_summary.is_a?(Hash) != false || raise("Passed value for field obj.severity_score_summary is not the expected type, validation failed.")
|
72
|
+
end
|
73
|
+
end
|
74
|
+
end
|
75
|
+
end
|