assemblyai 1.0.0.pre.beta
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/lib/assemblyai/files/client.rb +63 -0
- data/lib/assemblyai/files/types/uploaded_file.rb +47 -0
- data/lib/assemblyai/lemur/client.rb +390 -0
- data/lib/assemblyai/lemur/types/lemur_action_items_response.rb +52 -0
- data/lib/assemblyai/lemur/types/lemur_base_params.rb +99 -0
- data/lib/assemblyai/lemur/types/lemur_base_params_context.rb +75 -0
- data/lib/assemblyai/lemur/types/lemur_base_response.rb +47 -0
- data/lib/assemblyai/lemur/types/lemur_model.rb +13 -0
- data/lib/assemblyai/lemur/types/lemur_question.rb +74 -0
- data/lib/assemblyai/lemur/types/lemur_question_answer.rb +53 -0
- data/lib/assemblyai/lemur/types/lemur_question_answer_response.rb +56 -0
- data/lib/assemblyai/lemur/types/lemur_question_context.rb +75 -0
- data/lib/assemblyai/lemur/types/lemur_summary_response.rb +52 -0
- data/lib/assemblyai/lemur/types/lemur_task_response.rb +52 -0
- data/lib/assemblyai/lemur/types/purge_lemur_request_data_response.rb +58 -0
- data/lib/assemblyai/realtime/client.rb +61 -0
- data/lib/assemblyai/realtime/types/audio_data.rb +7 -0
- data/lib/assemblyai/realtime/types/audio_encoding.rb +8 -0
- data/lib/assemblyai/realtime/types/final_transcript.rb +107 -0
- data/lib/assemblyai/realtime/types/message_type.rb +13 -0
- data/lib/assemblyai/realtime/types/partial_transcript.rb +94 -0
- data/lib/assemblyai/realtime/types/realtime_base_message.rb +48 -0
- data/lib/assemblyai/realtime/types/realtime_base_transcript.rb +87 -0
- data/lib/assemblyai/realtime/types/realtime_error.rb +47 -0
- data/lib/assemblyai/realtime/types/realtime_message.rb +115 -0
- data/lib/assemblyai/realtime/types/realtime_temporary_token_response.rb +47 -0
- data/lib/assemblyai/realtime/types/realtime_transcript.rb +76 -0
- data/lib/assemblyai/realtime/types/realtime_transcript_type.rb +8 -0
- data/lib/assemblyai/realtime/types/session_begins.rb +58 -0
- data/lib/assemblyai/realtime/types/session_terminated.rb +47 -0
- data/lib/assemblyai/realtime/types/terminate_session.rb +56 -0
- data/lib/assemblyai/realtime/types/word.rb +62 -0
- data/lib/assemblyai/transcripts/client.rb +525 -0
- data/lib/assemblyai/transcripts/polling_client.rb +173 -0
- data/lib/assemblyai/transcripts/types/audio_intelligence_model_status.rb +8 -0
- data/lib/assemblyai/transcripts/types/auto_highlight_result.rb +66 -0
- data/lib/assemblyai/transcripts/types/auto_highlights_result.rb +53 -0
- data/lib/assemblyai/transcripts/types/chapter.rb +68 -0
- data/lib/assemblyai/transcripts/types/content_safety_label.rb +57 -0
- data/lib/assemblyai/transcripts/types/content_safety_label_result.rb +84 -0
- data/lib/assemblyai/transcripts/types/content_safety_labels_result.rb +75 -0
- data/lib/assemblyai/transcripts/types/entity.rb +69 -0
- data/lib/assemblyai/transcripts/types/entity_type.rb +38 -0
- data/lib/assemblyai/transcripts/types/page_details.rb +74 -0
- data/lib/assemblyai/transcripts/types/paragraphs_response.rb +67 -0
- data/lib/assemblyai/transcripts/types/pii_policy.rb +36 -0
- data/lib/assemblyai/transcripts/types/polling_options.rb +21 -0
- data/lib/assemblyai/transcripts/types/redact_pii_audio_quality.rb +8 -0
- data/lib/assemblyai/transcripts/types/redacted_audio_response.rb +53 -0
- data/lib/assemblyai/transcripts/types/redacted_audio_status.rb +7 -0
- data/lib/assemblyai/transcripts/types/sentences_response.rb +67 -0
- data/lib/assemblyai/transcripts/types/sentiment.rb +8 -0
- data/lib/assemblyai/transcripts/types/sentiment_analysis_result.rb +82 -0
- data/lib/assemblyai/transcripts/types/severity_score_summary.rb +57 -0
- data/lib/assemblyai/transcripts/types/speech_model.rb +7 -0
- data/lib/assemblyai/transcripts/types/substitution_policy.rb +8 -0
- data/lib/assemblyai/transcripts/types/subtitle_format.rb +8 -0
- data/lib/assemblyai/transcripts/types/summary_model.rb +8 -0
- data/lib/assemblyai/transcripts/types/summary_type.rb +14 -0
- data/lib/assemblyai/transcripts/types/timestamp.rb +53 -0
- data/lib/assemblyai/transcripts/types/topic_detection_model_result.rb +68 -0
- data/lib/assemblyai/transcripts/types/topic_detection_result.rb +68 -0
- data/lib/assemblyai/transcripts/types/topic_detection_result_labels_item.rb +52 -0
- data/lib/assemblyai/transcripts/types/transcript.rb +454 -0
- data/lib/assemblyai/transcripts/types/transcript_boost_param.rb +8 -0
- data/lib/assemblyai/transcripts/types/transcript_custom_spelling.rb +53 -0
- data/lib/assemblyai/transcripts/types/transcript_language_code.rb +29 -0
- data/lib/assemblyai/transcripts/types/transcript_list.rb +62 -0
- data/lib/assemblyai/transcripts/types/transcript_list_item.rb +82 -0
- data/lib/assemblyai/transcripts/types/transcript_optional_params.rb +280 -0
- data/lib/assemblyai/transcripts/types/transcript_paragraph.rb +84 -0
- data/lib/assemblyai/transcripts/types/transcript_sentence.rb +84 -0
- data/lib/assemblyai/transcripts/types/transcript_status.rb +8 -0
- data/lib/assemblyai/transcripts/types/transcript_utterance.rb +84 -0
- data/lib/assemblyai/transcripts/types/transcript_word.rb +68 -0
- data/lib/assemblyai/transcripts/types/word_search_match.rb +63 -0
- data/lib/assemblyai/transcripts/types/word_search_response.rb +61 -0
- data/lib/assemblyai/transcripts/types/word_search_timestamp.rb +7 -0
- data/lib/assemblyai/types/error.rb +50 -0
- data/lib/assemblyai.rb +48 -0
- data/lib/environment.rb +7 -0
- data/lib/gemconfig.rb +14 -0
- data/lib/requests.rb +87 -0
- data/lib/types_export.rb +75 -0
- metadata +170 -0
@@ -0,0 +1,82 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative "transcript_status"
|
4
|
+
require "date"
|
5
|
+
require "json"
|
6
|
+
|
7
|
+
module AssemblyAI
|
8
|
+
class Transcripts
|
9
|
+
class TranscriptListItem
|
10
|
+
attr_reader :id, :resource_url, :status, :created, :completed, :audio_url, :additional_properties
|
11
|
+
|
12
|
+
# @param id [String]
|
13
|
+
# @param resource_url [String]
|
14
|
+
# @param status [TRANSCRIPT_STATUS]
|
15
|
+
# @param created [DateTime]
|
16
|
+
# @param completed [DateTime]
|
17
|
+
# @param audio_url [String]
|
18
|
+
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
19
|
+
# @return [Transcripts::TranscriptListItem]
|
20
|
+
def initialize(id:, resource_url:, status:, created:, completed:, audio_url:, additional_properties: nil)
|
21
|
+
# @type [String]
|
22
|
+
@id = id
|
23
|
+
# @type [String]
|
24
|
+
@resource_url = resource_url
|
25
|
+
# @type [TRANSCRIPT_STATUS]
|
26
|
+
@status = status
|
27
|
+
# @type [DateTime]
|
28
|
+
@created = created
|
29
|
+
# @type [DateTime]
|
30
|
+
@completed = completed
|
31
|
+
# @type [String]
|
32
|
+
@audio_url = audio_url
|
33
|
+
# @type [OpenStruct] Additional properties unmapped to the current class definition
|
34
|
+
@additional_properties = additional_properties
|
35
|
+
end
|
36
|
+
|
37
|
+
# Deserialize a JSON object to an instance of TranscriptListItem
|
38
|
+
#
|
39
|
+
# @param json_object [JSON]
|
40
|
+
# @return [Transcripts::TranscriptListItem]
|
41
|
+
def self.from_json(json_object:)
|
42
|
+
struct = JSON.parse(json_object, object_class: OpenStruct)
|
43
|
+
parsed_json = JSON.parse(json_object)
|
44
|
+
id = struct.id
|
45
|
+
resource_url = struct.resource_url
|
46
|
+
status = Transcripts::TRANSCRIPT_STATUS.key(parsed_json["status"]) || parsed_json["status"]
|
47
|
+
created = (DateTime.parse(parsed_json["created"]) unless parsed_json["created"].nil?)
|
48
|
+
completed = (DateTime.parse(parsed_json["completed"]) unless parsed_json["completed"].nil?)
|
49
|
+
audio_url = struct.audio_url
|
50
|
+
new(id: id, resource_url: resource_url, status: status, created: created, completed: completed,
|
51
|
+
audio_url: audio_url, additional_properties: struct)
|
52
|
+
end
|
53
|
+
|
54
|
+
# Serialize an instance of TranscriptListItem to a JSON object
|
55
|
+
#
|
56
|
+
# @return [JSON]
|
57
|
+
def to_json(*_args)
|
58
|
+
{
|
59
|
+
"id": @id,
|
60
|
+
"resource_url": @resource_url,
|
61
|
+
"status": Transcripts::TRANSCRIPT_STATUS[@status] || @status,
|
62
|
+
"created": @created,
|
63
|
+
"completed": @completed,
|
64
|
+
"audio_url": @audio_url
|
65
|
+
}.to_json
|
66
|
+
end
|
67
|
+
|
68
|
+
# Leveraged for Union-type generation, validate_raw attempts to parse the given hash and check each fields type against the current object's property definitions.
|
69
|
+
#
|
70
|
+
# @param obj [Object]
|
71
|
+
# @return [Void]
|
72
|
+
def self.validate_raw(obj:)
|
73
|
+
obj.id.is_a?(String) != false || raise("Passed value for field obj.id is not the expected type, validation failed.")
|
74
|
+
obj.resource_url.is_a?(String) != false || raise("Passed value for field obj.resource_url is not the expected type, validation failed.")
|
75
|
+
obj.status.is_a?(Transcripts::TRANSCRIPT_STATUS) != false || raise("Passed value for field obj.status is not the expected type, validation failed.")
|
76
|
+
obj.created.is_a?(DateTime) != false || raise("Passed value for field obj.created is not the expected type, validation failed.")
|
77
|
+
obj.completed.is_a?(DateTime) != false || raise("Passed value for field obj.completed is not the expected type, validation failed.")
|
78
|
+
obj.audio_url.is_a?(String) != false || raise("Passed value for field obj.audio_url is not the expected type, validation failed.")
|
79
|
+
end
|
80
|
+
end
|
81
|
+
end
|
82
|
+
end
|
@@ -0,0 +1,280 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative "speech_model"
|
4
|
+
require_relative "transcript_language_code"
|
5
|
+
require_relative "transcript_boost_param"
|
6
|
+
require_relative "redact_pii_audio_quality"
|
7
|
+
require_relative "pii_policy"
|
8
|
+
require_relative "substitution_policy"
|
9
|
+
require_relative "transcript_custom_spelling"
|
10
|
+
require_relative "summary_model"
|
11
|
+
require_relative "summary_type"
|
12
|
+
require "json"
|
13
|
+
|
14
|
+
module AssemblyAI
|
15
|
+
class Transcripts
|
16
|
+
# The parameters for creating a transcript
|
17
|
+
class TranscriptOptionalParams
|
18
|
+
attr_reader :speech_model, :language_code, :punctuate, :format_text, :dual_channel, :webhook_url,
|
19
|
+
:webhook_auth_header_name, :webhook_auth_header_value, :auto_highlights, :audio_start_from, :audio_end_at, :word_boost, :boost_param, :filter_profanity, :redact_pii, :redact_pii_audio, :redact_pii_audio_quality, :redact_pii_policies, :redact_pii_sub, :speaker_labels, :speakers_expected, :content_safety, :content_safety_confidence, :iab_categories, :language_detection, :custom_spelling, :disfluencies, :sentiment_analysis, :auto_chapters, :entity_detection, :speech_threshold, :summarization, :summary_model, :summary_type, :custom_topics, :topics, :additional_properties
|
20
|
+
|
21
|
+
# @param speech_model [Transcripts::SPEECH_MODEL]
|
22
|
+
# @param language_code [TRANSCRIPT_LANGUAGE_CODE]
|
23
|
+
# @param punctuate [Boolean] Enable Automatic Punctuation, can be true or false
|
24
|
+
# @param format_text [Boolean] Enable Text Formatting, can be true or false
|
25
|
+
# @param dual_channel [Boolean] Enable [Dual Channel](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) transcription, can be true or false.
|
26
|
+
# @param webhook_url [String] The URL to which AssemblyAI send webhooks upon trancription completion
|
27
|
+
# @param webhook_auth_header_name [String] The header name which should be sent back with webhook calls
|
28
|
+
# @param webhook_auth_header_value [String] Specify a header name and value to send back with a webhook call for added security
|
29
|
+
# @param auto_highlights [Boolean] Whether Key Phrases is enabled, either true or false
|
30
|
+
# @param audio_start_from [Integer] The point in time, in milliseconds, to begin transcribing in your media file
|
31
|
+
# @param audio_end_at [Integer] The point in time, in milliseconds, to stop transcribing in your media file
|
32
|
+
# @param word_boost [Array<String>] The list of custom vocabulary to boost transcription probability for
|
33
|
+
# @param boost_param [TRANSCRIPT_BOOST_PARAM] The word boost parameter value
|
34
|
+
# @param filter_profanity [Boolean] Filter profanity from the transcribed text, can be true or false
|
35
|
+
# @param redact_pii [Boolean] Redact PII from the transcribed text using the Redact PII model, can be true or false
|
36
|
+
# @param redact_pii_audio [Boolean] Generate a copy of the original media file with spoken PII "beeped" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
|
37
|
+
# @param redact_pii_audio_quality [REDACT_PII_AUDIO_QUALITY] Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
|
38
|
+
# @param redact_pii_policies [Array<Transcripts::PII_POLICY>] The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
|
39
|
+
# @param redact_pii_sub [SUBSTITUTION_POLICY]
|
40
|
+
# @param speaker_labels [Boolean] Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false
|
41
|
+
# @param speakers_expected [Integer] Tells the speaker label model how many speakers it should attempt to identify, up to 10. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.
|
42
|
+
# @param content_safety [Boolean] Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false
|
43
|
+
# @param content_safety_confidence [Integer] The confidence threshold for content moderation. Values must be between 25 and 100.
|
44
|
+
# @param iab_categories [Boolean] Enable [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true or false
|
45
|
+
# @param language_detection [Boolean] Whether [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection) was enabled in the transcription request, either true or false.
|
46
|
+
# @param custom_spelling [Array<Transcripts::TranscriptCustomSpelling>] Customize how words are spelled and formatted using to and from values
|
47
|
+
# @param disfluencies [Boolean] Transcribe Filler Words, like "umm", in your media file; can be true or false
|
48
|
+
# @param sentiment_analysis [Boolean] Enable [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), can be true or false
|
49
|
+
# @param auto_chapters [Boolean] Enable [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters), can be true or false
|
50
|
+
# @param entity_detection [Boolean] Enable [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection), can be true or false
|
51
|
+
# @param speech_threshold [Float] Reject audio files that contain less than this fraction of speech.
|
52
|
+
# Valid values are in the range [0, 1] inclusive.
|
53
|
+
# @param summarization [Boolean] Enable [Summarization](https://www.assemblyai.com/docs/models/summarization), can be true or false
|
54
|
+
# @param summary_model [SUMMARY_MODEL] The model to summarize the transcript
|
55
|
+
# @param summary_type [SUMMARY_TYPE] The type of summary
|
56
|
+
# @param custom_topics [Boolean] Whether custom topics is enabled, either true or false
|
57
|
+
# @param topics [Array<String>] The list of custom topics provided, if custom topics is enabled
|
58
|
+
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
59
|
+
# @return [Transcripts::TranscriptOptionalParams]
|
60
|
+
def initialize(speech_model: nil, language_code: nil, punctuate: nil, format_text: nil, dual_channel: nil,
|
61
|
+
webhook_url: nil, webhook_auth_header_name: nil, webhook_auth_header_value: nil, auto_highlights: nil, audio_start_from: nil, audio_end_at: nil, word_boost: nil, boost_param: nil, filter_profanity: nil, redact_pii: nil, redact_pii_audio: nil, redact_pii_audio_quality: nil, redact_pii_policies: nil, redact_pii_sub: nil, speaker_labels: nil, speakers_expected: nil, content_safety: nil, content_safety_confidence: nil, iab_categories: nil, language_detection: nil, custom_spelling: nil, disfluencies: nil, sentiment_analysis: nil, auto_chapters: nil, entity_detection: nil, speech_threshold: nil, summarization: nil, summary_model: nil, summary_type: nil, custom_topics: nil, topics: nil, additional_properties: nil)
|
62
|
+
# @type [Transcripts::SPEECH_MODEL]
|
63
|
+
@speech_model = speech_model
|
64
|
+
# @type [TRANSCRIPT_LANGUAGE_CODE]
|
65
|
+
@language_code = language_code
|
66
|
+
# @type [Boolean] Enable Automatic Punctuation, can be true or false
|
67
|
+
@punctuate = punctuate
|
68
|
+
# @type [Boolean] Enable Text Formatting, can be true or false
|
69
|
+
@format_text = format_text
|
70
|
+
# @type [Boolean] Enable [Dual Channel](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) transcription, can be true or false.
|
71
|
+
@dual_channel = dual_channel
|
72
|
+
# @type [String] The URL to which AssemblyAI send webhooks upon trancription completion
|
73
|
+
@webhook_url = webhook_url
|
74
|
+
# @type [String] The header name which should be sent back with webhook calls
|
75
|
+
@webhook_auth_header_name = webhook_auth_header_name
|
76
|
+
# @type [String] Specify a header name and value to send back with a webhook call for added security
|
77
|
+
@webhook_auth_header_value = webhook_auth_header_value
|
78
|
+
# @type [Boolean] Whether Key Phrases is enabled, either true or false
|
79
|
+
@auto_highlights = auto_highlights
|
80
|
+
# @type [Integer] The point in time, in milliseconds, to begin transcribing in your media file
|
81
|
+
@audio_start_from = audio_start_from
|
82
|
+
# @type [Integer] The point in time, in milliseconds, to stop transcribing in your media file
|
83
|
+
@audio_end_at = audio_end_at
|
84
|
+
# @type [Array<String>] The list of custom vocabulary to boost transcription probability for
|
85
|
+
@word_boost = word_boost
|
86
|
+
# @type [TRANSCRIPT_BOOST_PARAM] The word boost parameter value
|
87
|
+
@boost_param = boost_param
|
88
|
+
# @type [Boolean] Filter profanity from the transcribed text, can be true or false
|
89
|
+
@filter_profanity = filter_profanity
|
90
|
+
# @type [Boolean] Redact PII from the transcribed text using the Redact PII model, can be true or false
|
91
|
+
@redact_pii = redact_pii
|
92
|
+
# @type [Boolean] Generate a copy of the original media file with spoken PII "beeped" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
|
93
|
+
@redact_pii_audio = redact_pii_audio
|
94
|
+
# @type [REDACT_PII_AUDIO_QUALITY] Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
|
95
|
+
@redact_pii_audio_quality = redact_pii_audio_quality
|
96
|
+
# @type [Array<Transcripts::PII_POLICY>] The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
|
97
|
+
@redact_pii_policies = redact_pii_policies
|
98
|
+
# @type [SUBSTITUTION_POLICY]
|
99
|
+
@redact_pii_sub = redact_pii_sub
|
100
|
+
# @type [Boolean] Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false
|
101
|
+
@speaker_labels = speaker_labels
|
102
|
+
# @type [Integer] Tells the speaker label model how many speakers it should attempt to identify, up to 10. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.
|
103
|
+
@speakers_expected = speakers_expected
|
104
|
+
# @type [Boolean] Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false
|
105
|
+
@content_safety = content_safety
|
106
|
+
# @type [Integer] The confidence threshold for content moderation. Values must be between 25 and 100.
|
107
|
+
@content_safety_confidence = content_safety_confidence
|
108
|
+
# @type [Boolean] Enable [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true or false
|
109
|
+
@iab_categories = iab_categories
|
110
|
+
# @type [Boolean] Whether [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection) was enabled in the transcription request, either true or false.
|
111
|
+
@language_detection = language_detection
|
112
|
+
# @type [Array<Transcripts::TranscriptCustomSpelling>] Customize how words are spelled and formatted using to and from values
|
113
|
+
@custom_spelling = custom_spelling
|
114
|
+
# @type [Boolean] Transcribe Filler Words, like "umm", in your media file; can be true or false
|
115
|
+
@disfluencies = disfluencies
|
116
|
+
# @type [Boolean] Enable [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), can be true or false
|
117
|
+
@sentiment_analysis = sentiment_analysis
|
118
|
+
# @type [Boolean] Enable [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters), can be true or false
|
119
|
+
@auto_chapters = auto_chapters
|
120
|
+
# @type [Boolean] Enable [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection), can be true or false
|
121
|
+
@entity_detection = entity_detection
|
122
|
+
# @type [Float] Reject audio files that contain less than this fraction of speech.
|
123
|
+
# Valid values are in the range [0, 1] inclusive.
|
124
|
+
@speech_threshold = speech_threshold
|
125
|
+
# @type [Boolean] Enable [Summarization](https://www.assemblyai.com/docs/models/summarization), can be true or false
|
126
|
+
@summarization = summarization
|
127
|
+
# @type [SUMMARY_MODEL] The model to summarize the transcript
|
128
|
+
@summary_model = summary_model
|
129
|
+
# @type [SUMMARY_TYPE] The type of summary
|
130
|
+
@summary_type = summary_type
|
131
|
+
# @type [Boolean] Whether custom topics is enabled, either true or false
|
132
|
+
@custom_topics = custom_topics
|
133
|
+
# @type [Array<String>] The list of custom topics provided, if custom topics is enabled
|
134
|
+
@topics = topics
|
135
|
+
# @type [OpenStruct] Additional properties unmapped to the current class definition
|
136
|
+
@additional_properties = additional_properties
|
137
|
+
end
|
138
|
+
|
139
|
+
# Deserialize a JSON object to an instance of TranscriptOptionalParams
|
140
|
+
#
|
141
|
+
# @param json_object [JSON]
|
142
|
+
# @return [Transcripts::TranscriptOptionalParams]
|
143
|
+
def self.from_json(json_object:)
|
144
|
+
struct = JSON.parse(json_object, object_class: OpenStruct)
|
145
|
+
parsed_json = JSON.parse(json_object)
|
146
|
+
speech_model = struct.speech_model
|
147
|
+
language_code = Transcripts::TRANSCRIPT_LANGUAGE_CODE.key(parsed_json["language_code"]) || parsed_json["language_code"]
|
148
|
+
punctuate = struct.punctuate
|
149
|
+
format_text = struct.format_text
|
150
|
+
dual_channel = struct.dual_channel
|
151
|
+
webhook_url = struct.webhook_url
|
152
|
+
webhook_auth_header_name = struct.webhook_auth_header_name
|
153
|
+
webhook_auth_header_value = struct.webhook_auth_header_value
|
154
|
+
auto_highlights = struct.auto_highlights
|
155
|
+
audio_start_from = struct.audio_start_from
|
156
|
+
audio_end_at = struct.audio_end_at
|
157
|
+
word_boost = struct.word_boost
|
158
|
+
boost_param = Transcripts::TRANSCRIPT_BOOST_PARAM.key(parsed_json["boost_param"]) || parsed_json["boost_param"]
|
159
|
+
filter_profanity = struct.filter_profanity
|
160
|
+
redact_pii = struct.redact_pii
|
161
|
+
redact_pii_audio = struct.redact_pii_audio
|
162
|
+
redact_pii_audio_quality = Transcripts::REDACT_PII_AUDIO_QUALITY.key(parsed_json["redact_pii_audio_quality"]) || parsed_json["redact_pii_audio_quality"]
|
163
|
+
redact_pii_policies = parsed_json["redact_pii_policies"]&.map do |v|
|
164
|
+
v = v.to_json
|
165
|
+
Transcripts::PII_POLICY.key(v) || v
|
166
|
+
end
|
167
|
+
redact_pii_sub = Transcripts::SUBSTITUTION_POLICY.key(parsed_json["redact_pii_sub"]) || parsed_json["redact_pii_sub"]
|
168
|
+
speaker_labels = struct.speaker_labels
|
169
|
+
speakers_expected = struct.speakers_expected
|
170
|
+
content_safety = struct.content_safety
|
171
|
+
content_safety_confidence = struct.content_safety_confidence
|
172
|
+
iab_categories = struct.iab_categories
|
173
|
+
language_detection = struct.language_detection
|
174
|
+
custom_spelling = parsed_json["custom_spelling"]&.map do |v|
|
175
|
+
v = v.to_json
|
176
|
+
Transcripts::TranscriptCustomSpelling.from_json(json_object: v)
|
177
|
+
end
|
178
|
+
disfluencies = struct.disfluencies
|
179
|
+
sentiment_analysis = struct.sentiment_analysis
|
180
|
+
auto_chapters = struct.auto_chapters
|
181
|
+
entity_detection = struct.entity_detection
|
182
|
+
speech_threshold = struct.speech_threshold
|
183
|
+
summarization = struct.summarization
|
184
|
+
summary_model = Transcripts::SUMMARY_MODEL.key(parsed_json["summary_model"]) || parsed_json["summary_model"]
|
185
|
+
summary_type = Transcripts::SUMMARY_TYPE.key(parsed_json["summary_type"]) || parsed_json["summary_type"]
|
186
|
+
custom_topics = struct.custom_topics
|
187
|
+
topics = struct.topics
|
188
|
+
new(speech_model: speech_model, language_code: language_code, punctuate: punctuate, format_text: format_text,
|
189
|
+
dual_channel: dual_channel, webhook_url: webhook_url, webhook_auth_header_name: webhook_auth_header_name, webhook_auth_header_value: webhook_auth_header_value, auto_highlights: auto_highlights, audio_start_from: audio_start_from, audio_end_at: audio_end_at, word_boost: word_boost, boost_param: boost_param, filter_profanity: filter_profanity, redact_pii: redact_pii, redact_pii_audio: redact_pii_audio, redact_pii_audio_quality: redact_pii_audio_quality, redact_pii_policies: redact_pii_policies, redact_pii_sub: redact_pii_sub, speaker_labels: speaker_labels, speakers_expected: speakers_expected, content_safety: content_safety, content_safety_confidence: content_safety_confidence, iab_categories: iab_categories, language_detection: language_detection, custom_spelling: custom_spelling, disfluencies: disfluencies, sentiment_analysis: sentiment_analysis, auto_chapters: auto_chapters, entity_detection: entity_detection, speech_threshold: speech_threshold, summarization: summarization, summary_model: summary_model, summary_type: summary_type, custom_topics: custom_topics, topics: topics, additional_properties: struct)
|
190
|
+
end
|
191
|
+
|
192
|
+
# Serialize an instance of TranscriptOptionalParams to a JSON object
|
193
|
+
#
|
194
|
+
# @return [JSON]
|
195
|
+
def to_json(*_args)
|
196
|
+
{
|
197
|
+
"speech_model": @speech_model,
|
198
|
+
"language_code": Transcripts::TRANSCRIPT_LANGUAGE_CODE[@language_code] || @language_code,
|
199
|
+
"punctuate": @punctuate,
|
200
|
+
"format_text": @format_text,
|
201
|
+
"dual_channel": @dual_channel,
|
202
|
+
"webhook_url": @webhook_url,
|
203
|
+
"webhook_auth_header_name": @webhook_auth_header_name,
|
204
|
+
"webhook_auth_header_value": @webhook_auth_header_value,
|
205
|
+
"auto_highlights": @auto_highlights,
|
206
|
+
"audio_start_from": @audio_start_from,
|
207
|
+
"audio_end_at": @audio_end_at,
|
208
|
+
"word_boost": @word_boost,
|
209
|
+
"boost_param": Transcripts::TRANSCRIPT_BOOST_PARAM[@boost_param] || @boost_param,
|
210
|
+
"filter_profanity": @filter_profanity,
|
211
|
+
"redact_pii": @redact_pii,
|
212
|
+
"redact_pii_audio": @redact_pii_audio,
|
213
|
+
"redact_pii_audio_quality": Transcripts::REDACT_PII_AUDIO_QUALITY[@redact_pii_audio_quality] || @redact_pii_audio_quality,
|
214
|
+
"redact_pii_policies": @redact_pii_policies,
|
215
|
+
"redact_pii_sub": Transcripts::SUBSTITUTION_POLICY[@redact_pii_sub] || @redact_pii_sub,
|
216
|
+
"speaker_labels": @speaker_labels,
|
217
|
+
"speakers_expected": @speakers_expected,
|
218
|
+
"content_safety": @content_safety,
|
219
|
+
"content_safety_confidence": @content_safety_confidence,
|
220
|
+
"iab_categories": @iab_categories,
|
221
|
+
"language_detection": @language_detection,
|
222
|
+
"custom_spelling": @custom_spelling,
|
223
|
+
"disfluencies": @disfluencies,
|
224
|
+
"sentiment_analysis": @sentiment_analysis,
|
225
|
+
"auto_chapters": @auto_chapters,
|
226
|
+
"entity_detection": @entity_detection,
|
227
|
+
"speech_threshold": @speech_threshold,
|
228
|
+
"summarization": @summarization,
|
229
|
+
"summary_model": Transcripts::SUMMARY_MODEL[@summary_model] || @summary_model,
|
230
|
+
"summary_type": Transcripts::SUMMARY_TYPE[@summary_type] || @summary_type,
|
231
|
+
"custom_topics": @custom_topics,
|
232
|
+
"topics": @topics
|
233
|
+
}.to_json
|
234
|
+
end
|
235
|
+
|
236
|
+
# Leveraged for Union-type generation, validate_raw attempts to parse the given hash and check each fields type against the current object's property definitions.
|
237
|
+
#
|
238
|
+
# @param obj [Object]
|
239
|
+
# @return [Void]
|
240
|
+
def self.validate_raw(obj:)
|
241
|
+
obj.speech_model&.is_a?(String) != false || raise("Passed value for field obj.speech_model is not the expected type, validation failed.")
|
242
|
+
obj.language_code&.is_a?(Transcripts::TRANSCRIPT_LANGUAGE_CODE) != false || raise("Passed value for field obj.language_code is not the expected type, validation failed.")
|
243
|
+
obj.punctuate&.is_a?(Boolean) != false || raise("Passed value for field obj.punctuate is not the expected type, validation failed.")
|
244
|
+
obj.format_text&.is_a?(Boolean) != false || raise("Passed value for field obj.format_text is not the expected type, validation failed.")
|
245
|
+
obj.dual_channel&.is_a?(Boolean) != false || raise("Passed value for field obj.dual_channel is not the expected type, validation failed.")
|
246
|
+
obj.webhook_url&.is_a?(String) != false || raise("Passed value for field obj.webhook_url is not the expected type, validation failed.")
|
247
|
+
obj.webhook_auth_header_name&.is_a?(String) != false || raise("Passed value for field obj.webhook_auth_header_name is not the expected type, validation failed.")
|
248
|
+
obj.webhook_auth_header_value&.is_a?(String) != false || raise("Passed value for field obj.webhook_auth_header_value is not the expected type, validation failed.")
|
249
|
+
obj.auto_highlights&.is_a?(Boolean) != false || raise("Passed value for field obj.auto_highlights is not the expected type, validation failed.")
|
250
|
+
obj.audio_start_from&.is_a?(Integer) != false || raise("Passed value for field obj.audio_start_from is not the expected type, validation failed.")
|
251
|
+
obj.audio_end_at&.is_a?(Integer) != false || raise("Passed value for field obj.audio_end_at is not the expected type, validation failed.")
|
252
|
+
obj.word_boost&.is_a?(Array) != false || raise("Passed value for field obj.word_boost is not the expected type, validation failed.")
|
253
|
+
obj.boost_param&.is_a?(Transcripts::TRANSCRIPT_BOOST_PARAM) != false || raise("Passed value for field obj.boost_param is not the expected type, validation failed.")
|
254
|
+
obj.filter_profanity&.is_a?(Boolean) != false || raise("Passed value for field obj.filter_profanity is not the expected type, validation failed.")
|
255
|
+
obj.redact_pii&.is_a?(Boolean) != false || raise("Passed value for field obj.redact_pii is not the expected type, validation failed.")
|
256
|
+
obj.redact_pii_audio&.is_a?(Boolean) != false || raise("Passed value for field obj.redact_pii_audio is not the expected type, validation failed.")
|
257
|
+
obj.redact_pii_audio_quality&.is_a?(Transcripts::REDACT_PII_AUDIO_QUALITY) != false || raise("Passed value for field obj.redact_pii_audio_quality is not the expected type, validation failed.")
|
258
|
+
obj.redact_pii_policies&.is_a?(Array) != false || raise("Passed value for field obj.redact_pii_policies is not the expected type, validation failed.")
|
259
|
+
obj.redact_pii_sub&.is_a?(Transcripts::SUBSTITUTION_POLICY) != false || raise("Passed value for field obj.redact_pii_sub is not the expected type, validation failed.")
|
260
|
+
obj.speaker_labels&.is_a?(Boolean) != false || raise("Passed value for field obj.speaker_labels is not the expected type, validation failed.")
|
261
|
+
obj.speakers_expected&.is_a?(Integer) != false || raise("Passed value for field obj.speakers_expected is not the expected type, validation failed.")
|
262
|
+
obj.content_safety&.is_a?(Boolean) != false || raise("Passed value for field obj.content_safety is not the expected type, validation failed.")
|
263
|
+
obj.content_safety_confidence&.is_a?(Integer) != false || raise("Passed value for field obj.content_safety_confidence is not the expected type, validation failed.")
|
264
|
+
obj.iab_categories&.is_a?(Boolean) != false || raise("Passed value for field obj.iab_categories is not the expected type, validation failed.")
|
265
|
+
obj.language_detection&.is_a?(Boolean) != false || raise("Passed value for field obj.language_detection is not the expected type, validation failed.")
|
266
|
+
obj.custom_spelling&.is_a?(Array) != false || raise("Passed value for field obj.custom_spelling is not the expected type, validation failed.")
|
267
|
+
obj.disfluencies&.is_a?(Boolean) != false || raise("Passed value for field obj.disfluencies is not the expected type, validation failed.")
|
268
|
+
obj.sentiment_analysis&.is_a?(Boolean) != false || raise("Passed value for field obj.sentiment_analysis is not the expected type, validation failed.")
|
269
|
+
obj.auto_chapters&.is_a?(Boolean) != false || raise("Passed value for field obj.auto_chapters is not the expected type, validation failed.")
|
270
|
+
obj.entity_detection&.is_a?(Boolean) != false || raise("Passed value for field obj.entity_detection is not the expected type, validation failed.")
|
271
|
+
obj.speech_threshold&.is_a?(Float) != false || raise("Passed value for field obj.speech_threshold is not the expected type, validation failed.")
|
272
|
+
obj.summarization&.is_a?(Boolean) != false || raise("Passed value for field obj.summarization is not the expected type, validation failed.")
|
273
|
+
obj.summary_model&.is_a?(Transcripts::SUMMARY_MODEL) != false || raise("Passed value for field obj.summary_model is not the expected type, validation failed.")
|
274
|
+
obj.summary_type&.is_a?(Transcripts::SUMMARY_TYPE) != false || raise("Passed value for field obj.summary_type is not the expected type, validation failed.")
|
275
|
+
obj.custom_topics&.is_a?(Boolean) != false || raise("Passed value for field obj.custom_topics is not the expected type, validation failed.")
|
276
|
+
obj.topics&.is_a?(Array) != false || raise("Passed value for field obj.topics is not the expected type, validation failed.")
|
277
|
+
end
|
278
|
+
end
|
279
|
+
end
|
280
|
+
end
|
@@ -0,0 +1,84 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative "transcript_word"
|
4
|
+
require "json"
|
5
|
+
|
6
|
+
module AssemblyAI
|
7
|
+
class Transcripts
|
8
|
+
class TranscriptParagraph
|
9
|
+
attr_reader :text, :start, :end_, :confidence, :words, :speaker, :additional_properties
|
10
|
+
|
11
|
+
# @param text [String]
|
12
|
+
# @param start [Integer]
|
13
|
+
# @param end_ [Integer]
|
14
|
+
# @param confidence [Float]
|
15
|
+
# @param words [Array<Transcripts::TranscriptWord>]
|
16
|
+
# @param speaker [String] The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null
|
17
|
+
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
18
|
+
# @return [Transcripts::TranscriptParagraph]
|
19
|
+
def initialize(text:, start:, end_:, confidence:, words:, speaker: nil, additional_properties: nil)
|
20
|
+
# @type [String]
|
21
|
+
@text = text
|
22
|
+
# @type [Integer]
|
23
|
+
@start = start
|
24
|
+
# @type [Integer]
|
25
|
+
@end_ = end_
|
26
|
+
# @type [Float]
|
27
|
+
@confidence = confidence
|
28
|
+
# @type [Array<Transcripts::TranscriptWord>]
|
29
|
+
@words = words
|
30
|
+
# @type [String] The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null
|
31
|
+
@speaker = speaker
|
32
|
+
# @type [OpenStruct] Additional properties unmapped to the current class definition
|
33
|
+
@additional_properties = additional_properties
|
34
|
+
end
|
35
|
+
|
36
|
+
# Deserialize a JSON object to an instance of TranscriptParagraph
|
37
|
+
#
|
38
|
+
# @param json_object [JSON]
|
39
|
+
# @return [Transcripts::TranscriptParagraph]
|
40
|
+
def self.from_json(json_object:)
|
41
|
+
struct = JSON.parse(json_object, object_class: OpenStruct)
|
42
|
+
parsed_json = JSON.parse(json_object)
|
43
|
+
text = struct.text
|
44
|
+
start = struct.start
|
45
|
+
end_ = struct.end
|
46
|
+
confidence = struct.confidence
|
47
|
+
words = parsed_json["words"]&.map do |v|
|
48
|
+
v = v.to_json
|
49
|
+
Transcripts::TranscriptWord.from_json(json_object: v)
|
50
|
+
end
|
51
|
+
speaker = struct.speaker
|
52
|
+
new(text: text, start: start, end_: end_, confidence: confidence, words: words, speaker: speaker,
|
53
|
+
additional_properties: struct)
|
54
|
+
end
|
55
|
+
|
56
|
+
# Serialize an instance of TranscriptParagraph to a JSON object
|
57
|
+
#
|
58
|
+
# @return [JSON]
|
59
|
+
def to_json(*_args)
|
60
|
+
{
|
61
|
+
"text": @text,
|
62
|
+
"start": @start,
|
63
|
+
"end": @end_,
|
64
|
+
"confidence": @confidence,
|
65
|
+
"words": @words,
|
66
|
+
"speaker": @speaker
|
67
|
+
}.to_json
|
68
|
+
end
|
69
|
+
|
70
|
+
# Leveraged for Union-type generation, validate_raw attempts to parse the given hash and check each fields type against the current object's property definitions.
|
71
|
+
#
|
72
|
+
# @param obj [Object]
|
73
|
+
# @return [Void]
|
74
|
+
def self.validate_raw(obj:)
|
75
|
+
obj.text.is_a?(String) != false || raise("Passed value for field obj.text is not the expected type, validation failed.")
|
76
|
+
obj.start.is_a?(Integer) != false || raise("Passed value for field obj.start is not the expected type, validation failed.")
|
77
|
+
obj.end_.is_a?(Integer) != false || raise("Passed value for field obj.end_ is not the expected type, validation failed.")
|
78
|
+
obj.confidence.is_a?(Float) != false || raise("Passed value for field obj.confidence is not the expected type, validation failed.")
|
79
|
+
obj.words.is_a?(Array) != false || raise("Passed value for field obj.words is not the expected type, validation failed.")
|
80
|
+
obj.speaker&.is_a?(String) != false || raise("Passed value for field obj.speaker is not the expected type, validation failed.")
|
81
|
+
end
|
82
|
+
end
|
83
|
+
end
|
84
|
+
end
|
@@ -0,0 +1,84 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative "transcript_word"
|
4
|
+
require "json"
|
5
|
+
|
6
|
+
module AssemblyAI
|
7
|
+
class Transcripts
|
8
|
+
class TranscriptSentence
|
9
|
+
attr_reader :text, :start, :end_, :confidence, :words, :speaker, :additional_properties
|
10
|
+
|
11
|
+
# @param text [String]
|
12
|
+
# @param start [Integer]
|
13
|
+
# @param end_ [Integer]
|
14
|
+
# @param confidence [Float]
|
15
|
+
# @param words [Array<Transcripts::TranscriptWord>]
|
16
|
+
# @param speaker [String] The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null
|
17
|
+
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
18
|
+
# @return [Transcripts::TranscriptSentence]
|
19
|
+
def initialize(text:, start:, end_:, confidence:, words:, speaker: nil, additional_properties: nil)
|
20
|
+
# @type [String]
|
21
|
+
@text = text
|
22
|
+
# @type [Integer]
|
23
|
+
@start = start
|
24
|
+
# @type [Integer]
|
25
|
+
@end_ = end_
|
26
|
+
# @type [Float]
|
27
|
+
@confidence = confidence
|
28
|
+
# @type [Array<Transcripts::TranscriptWord>]
|
29
|
+
@words = words
|
30
|
+
# @type [String] The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null
|
31
|
+
@speaker = speaker
|
32
|
+
# @type [OpenStruct] Additional properties unmapped to the current class definition
|
33
|
+
@additional_properties = additional_properties
|
34
|
+
end
|
35
|
+
|
36
|
+
# Deserialize a JSON object to an instance of TranscriptSentence
|
37
|
+
#
|
38
|
+
# @param json_object [JSON]
|
39
|
+
# @return [Transcripts::TranscriptSentence]
|
40
|
+
def self.from_json(json_object:)
|
41
|
+
struct = JSON.parse(json_object, object_class: OpenStruct)
|
42
|
+
parsed_json = JSON.parse(json_object)
|
43
|
+
text = struct.text
|
44
|
+
start = struct.start
|
45
|
+
end_ = struct.end
|
46
|
+
confidence = struct.confidence
|
47
|
+
words = parsed_json["words"]&.map do |v|
|
48
|
+
v = v.to_json
|
49
|
+
Transcripts::TranscriptWord.from_json(json_object: v)
|
50
|
+
end
|
51
|
+
speaker = struct.speaker
|
52
|
+
new(text: text, start: start, end_: end_, confidence: confidence, words: words, speaker: speaker,
|
53
|
+
additional_properties: struct)
|
54
|
+
end
|
55
|
+
|
56
|
+
# Serialize an instance of TranscriptSentence to a JSON object
|
57
|
+
#
|
58
|
+
# @return [JSON]
|
59
|
+
def to_json(*_args)
|
60
|
+
{
|
61
|
+
"text": @text,
|
62
|
+
"start": @start,
|
63
|
+
"end": @end_,
|
64
|
+
"confidence": @confidence,
|
65
|
+
"words": @words,
|
66
|
+
"speaker": @speaker
|
67
|
+
}.to_json
|
68
|
+
end
|
69
|
+
|
70
|
+
# Leveraged for Union-type generation, validate_raw attempts to parse the given hash and check each fields type against the current object's property definitions.
|
71
|
+
#
|
72
|
+
# @param obj [Object]
|
73
|
+
# @return [Void]
|
74
|
+
def self.validate_raw(obj:)
|
75
|
+
obj.text.is_a?(String) != false || raise("Passed value for field obj.text is not the expected type, validation failed.")
|
76
|
+
obj.start.is_a?(Integer) != false || raise("Passed value for field obj.start is not the expected type, validation failed.")
|
77
|
+
obj.end_.is_a?(Integer) != false || raise("Passed value for field obj.end_ is not the expected type, validation failed.")
|
78
|
+
obj.confidence.is_a?(Float) != false || raise("Passed value for field obj.confidence is not the expected type, validation failed.")
|
79
|
+
obj.words.is_a?(Array) != false || raise("Passed value for field obj.words is not the expected type, validation failed.")
|
80
|
+
obj.speaker&.is_a?(String) != false || raise("Passed value for field obj.speaker is not the expected type, validation failed.")
|
81
|
+
end
|
82
|
+
end
|
83
|
+
end
|
84
|
+
end
|
@@ -0,0 +1,84 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative "transcript_word"
|
4
|
+
require "json"
|
5
|
+
|
6
|
+
module AssemblyAI
|
7
|
+
class Transcripts
|
8
|
+
class TranscriptUtterance
|
9
|
+
attr_reader :confidence, :start, :end_, :text, :words, :speaker, :additional_properties
|
10
|
+
|
11
|
+
# @param confidence [Float] The confidence score for the transcript of this utterance
|
12
|
+
# @param start [Integer] The starting time, in milliseconds, of the utterance in the audio file
|
13
|
+
# @param end_ [Integer] The ending time, in milliseconds, of the utterance in the audio file
|
14
|
+
# @param text [String] The text for this utterance
|
15
|
+
# @param words [Array<Transcripts::TranscriptWord>] The words in the utterance.
|
16
|
+
# @param speaker [String] The speaker of this utterance, where each speaker is assigned a sequential capital letter - e.g. "A" for Speaker A, "B" for Speaker B, etc.
|
17
|
+
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
18
|
+
# @return [Transcripts::TranscriptUtterance]
|
19
|
+
def initialize(confidence:, start:, end_:, text:, words:, speaker:, additional_properties: nil)
|
20
|
+
# @type [Float] The confidence score for the transcript of this utterance
|
21
|
+
@confidence = confidence
|
22
|
+
# @type [Integer] The starting time, in milliseconds, of the utterance in the audio file
|
23
|
+
@start = start
|
24
|
+
# @type [Integer] The ending time, in milliseconds, of the utterance in the audio file
|
25
|
+
@end_ = end_
|
26
|
+
# @type [String] The text for this utterance
|
27
|
+
@text = text
|
28
|
+
# @type [Array<Transcripts::TranscriptWord>] The words in the utterance.
|
29
|
+
@words = words
|
30
|
+
# @type [String] The speaker of this utterance, where each speaker is assigned a sequential capital letter - e.g. "A" for Speaker A, "B" for Speaker B, etc.
|
31
|
+
@speaker = speaker
|
32
|
+
# @type [OpenStruct] Additional properties unmapped to the current class definition
|
33
|
+
@additional_properties = additional_properties
|
34
|
+
end
|
35
|
+
|
36
|
+
# Deserialize a JSON object to an instance of TranscriptUtterance
|
37
|
+
#
|
38
|
+
# @param json_object [JSON]
|
39
|
+
# @return [Transcripts::TranscriptUtterance]
|
40
|
+
def self.from_json(json_object:)
|
41
|
+
struct = JSON.parse(json_object, object_class: OpenStruct)
|
42
|
+
parsed_json = JSON.parse(json_object)
|
43
|
+
confidence = struct.confidence
|
44
|
+
start = struct.start
|
45
|
+
end_ = struct.end
|
46
|
+
text = struct.text
|
47
|
+
words = parsed_json["words"]&.map do |v|
|
48
|
+
v = v.to_json
|
49
|
+
Transcripts::TranscriptWord.from_json(json_object: v)
|
50
|
+
end
|
51
|
+
speaker = struct.speaker
|
52
|
+
new(confidence: confidence, start: start, end_: end_, text: text, words: words, speaker: speaker,
|
53
|
+
additional_properties: struct)
|
54
|
+
end
|
55
|
+
|
56
|
+
# Serialize an instance of TranscriptUtterance to a JSON object
|
57
|
+
#
|
58
|
+
# @return [JSON]
|
59
|
+
def to_json(*_args)
|
60
|
+
{
|
61
|
+
"confidence": @confidence,
|
62
|
+
"start": @start,
|
63
|
+
"end": @end_,
|
64
|
+
"text": @text,
|
65
|
+
"words": @words,
|
66
|
+
"speaker": @speaker
|
67
|
+
}.to_json
|
68
|
+
end
|
69
|
+
|
70
|
+
# Leveraged for Union-type generation, validate_raw attempts to parse the given hash and check each fields type against the current object's property definitions.
|
71
|
+
#
|
72
|
+
# @param obj [Object]
|
73
|
+
# @return [Void]
|
74
|
+
def self.validate_raw(obj:)
|
75
|
+
obj.confidence.is_a?(Float) != false || raise("Passed value for field obj.confidence is not the expected type, validation failed.")
|
76
|
+
obj.start.is_a?(Integer) != false || raise("Passed value for field obj.start is not the expected type, validation failed.")
|
77
|
+
obj.end_.is_a?(Integer) != false || raise("Passed value for field obj.end_ is not the expected type, validation failed.")
|
78
|
+
obj.text.is_a?(String) != false || raise("Passed value for field obj.text is not the expected type, validation failed.")
|
79
|
+
obj.words.is_a?(Array) != false || raise("Passed value for field obj.words is not the expected type, validation failed.")
|
80
|
+
obj.speaker.is_a?(String) != false || raise("Passed value for field obj.speaker is not the expected type, validation failed.")
|
81
|
+
end
|
82
|
+
end
|
83
|
+
end
|
84
|
+
end
|