assemblyai 1.0.0.pre.beta.2 → 1.0.0.pre.beta.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/assemblyai/transcripts/list_by_url_client.rb +57 -0
- data/lib/assemblyai/transcripts/polling_client.rb +27 -25
- data/lib/assemblyai/transcripts/types/polling_options.rb +14 -15
- data/lib/assemblyai/transcripts/types/transcript_ready_notification.rb +54 -0
- data/lib/assemblyai/transcripts/types/transcript_ready_status.rb +11 -0
- data/lib/assemblyai.rb +1 -0
- data/lib/requests.rb +2 -2
- data/lib/types_export.rb +2 -0
- metadata +5 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 82e3b0e81020edc6f6bd8589a23b2d30c78c2138f6b5a9ea1762371c92eacf37
|
4
|
+
data.tar.gz: c7a833cd083ddc051a45cb3a7f27ef5e8eb3b7915808360e5cfe60aa23a5b308
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: b064a231af8d12c62c817e8bc9708fc6657b725f13b4608869c4f323f5a3f88e88bfa57c32a4b1b91d3436f42c0f514d7f464cdd125718bf4c4b331033bb26dc
|
7
|
+
data.tar.gz: f462ce60f2ca15536c09cdab1fa4f0a81033bb80ac745e9dbf8dc1268b3129465ddebec0ebdb37b07c1b328b8051f6c7fdfb4bccfcb9097cf4e30e685b8c513e
|
@@ -0,0 +1,57 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative "../../requests"
|
4
|
+
require_relative "types/transcript_list"
|
5
|
+
require "async"
|
6
|
+
|
7
|
+
module AssemblyAI
|
8
|
+
# :nodoc:
|
9
|
+
class TranscriptsClient
|
10
|
+
# Retrieve a list of transcripts you created, this is used for pagination to easily retrieve the next page of transcripts
|
11
|
+
#
|
12
|
+
# @param url [String] The URL to retrieve the transcript list from
|
13
|
+
# @param request_options [RequestOptions]
|
14
|
+
# @return [Transcripts::TranscriptList]
|
15
|
+
#
|
16
|
+
# @example Retrieve the next page of results
|
17
|
+
# client = AssemblyAI::Client.new(api_key: "YOUR_API_KEY")
|
18
|
+
# transcript_list = client.transcripts.list(limit: 1)
|
19
|
+
# client.transcripts.list_by_url(url: transcript_list.page_details.next_url)
|
20
|
+
def list_by_url(url: nil, request_options: nil)
|
21
|
+
url = "/v2/transcript" if url.nil?
|
22
|
+
response = @request_client.conn.get(url) do |req|
|
23
|
+
req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
|
24
|
+
req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
|
25
|
+
req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
|
26
|
+
end
|
27
|
+
Transcripts::TranscriptList.from_json(json_object: response.body)
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
# :nodoc:
|
32
|
+
class AsyncTranscriptsClient
|
33
|
+
# Retrieve a list of transcripts you created
|
34
|
+
#
|
35
|
+
# @param url [String] The URL to retrieve the transcript list from
|
36
|
+
# @param request_options [RequestOptions]
|
37
|
+
# @return [Transcripts::TranscriptList]
|
38
|
+
#
|
39
|
+
# @example Retrieve the next page of results
|
40
|
+
# client = AssemblyAI::AsyncClient.new(api_key: "YOUR_API_KEY")
|
41
|
+
# Sync do
|
42
|
+
# transcript_list = client.transcripts.list(limit: 1).wait
|
43
|
+
# client.transcripts.list_by_url(url: transcript_list.page_details.next_url)
|
44
|
+
# end
|
45
|
+
def list_by_url(url: nil, request_options: nil)
|
46
|
+
Async do
|
47
|
+
url = "/v2/transcript" if url.nil?
|
48
|
+
response = @request_client.conn.get(url) do |req|
|
49
|
+
req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
|
50
|
+
req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
|
51
|
+
req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
|
52
|
+
end
|
53
|
+
Transcripts::TranscriptList.from_json(json_object: response.body)
|
54
|
+
end
|
55
|
+
end
|
56
|
+
end
|
57
|
+
end
|
@@ -26,7 +26,8 @@ module AssemblyAI
|
|
26
26
|
# Create a transcript from an audio or video file that is accessible via a URL.
|
27
27
|
# .transcribe polls for completion of the transcription, while the .submit function does not.
|
28
28
|
#
|
29
|
-
# @param
|
29
|
+
# @param speech_model [Transcripts::SpeechModel]
|
30
|
+
# @param language_code [Transcripts::TranscriptLanguageCode]
|
30
31
|
# @param punctuate [Boolean] Enable Automatic Punctuation, can be true or false
|
31
32
|
# @param format_text [Boolean] Enable Text Formatting, can be true or false
|
32
33
|
# @param dual_channel [Boolean] Enable [Dual Channel](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) transcription, can be true or false.
|
@@ -37,13 +38,13 @@ module AssemblyAI
|
|
37
38
|
# @param audio_start_from [Integer] The point in time, in milliseconds, to begin transcribing in your media file
|
38
39
|
# @param audio_end_at [Integer] The point in time, in milliseconds, to stop transcribing in your media file
|
39
40
|
# @param word_boost [Array<String>] The list of custom vocabulary to boost transcription probability for
|
40
|
-
# @param boost_param [
|
41
|
+
# @param boost_param [Transcripts::TranscriptBoostParam] The word boost parameter value
|
41
42
|
# @param filter_profanity [Boolean] Filter profanity from the transcribed text, can be true or false
|
42
43
|
# @param redact_pii [Boolean] Redact PII from the transcribed text using the Redact PII model, can be true or false
|
43
44
|
# @param redact_pii_audio [Boolean] Generate a copy of the original media file with spoken PII "beeped" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
|
44
|
-
# @param redact_pii_audio_quality [
|
45
|
-
# @param redact_pii_policies [Array<Transcripts::
|
46
|
-
# @param redact_pii_sub [
|
45
|
+
# @param redact_pii_audio_quality [Transcripts::RedactPiiAudioQuality] Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
|
46
|
+
# @param redact_pii_policies [Array<Transcripts::PiiPolicy>] The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
|
47
|
+
# @param redact_pii_sub [Transcripts::SubstitutionPolicy]
|
47
48
|
# @param speaker_labels [Boolean] Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false
|
48
49
|
# @param speakers_expected [Integer] Tells the speaker label model how many speakers it should attempt to identify, up to 10. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.
|
49
50
|
# @param content_safety [Boolean] Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false
|
@@ -60,8 +61,8 @@ module AssemblyAI
|
|
60
61
|
# @param speech_threshold [Float] Reject audio files that contain less than this fraction of speech.
|
61
62
|
# Valid values are in the range [0, 1] inclusive.
|
62
63
|
# @param summarization [Boolean] Enable [Summarization](https://www.assemblyai.com/docs/models/summarization), can be true or false
|
63
|
-
# @param summary_model [
|
64
|
-
# @param summary_type [
|
64
|
+
# @param summary_model [Transcripts::SummaryModel] The model to summarize the transcript
|
65
|
+
# @param summary_type [Transcripts::SummaryType] The type of summary
|
65
66
|
# @param custom_topics [Boolean] Whether custom topics is enabled, either true or false
|
66
67
|
# @param topics [Array<String>] The list of custom topics provided, if custom topics is enabled
|
67
68
|
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
@@ -69,10 +70,10 @@ module AssemblyAI
|
|
69
70
|
# @param request_options [RequestOptions]
|
70
71
|
# @param polling_options [Transcripts::PollingOptions] Configuration options for polling requests.
|
71
72
|
# @return [Transcripts::Transcript]
|
72
|
-
def transcribe(audio_url:,
|
73
|
-
webhook_auth_header_name: nil, webhook_auth_header_value: nil, auto_highlights: nil, audio_start_from: nil, audio_end_at: nil, word_boost: nil, boost_param: nil, filter_profanity: nil, redact_pii: nil, redact_pii_audio: nil, redact_pii_audio_quality: nil, redact_pii_policies: nil, redact_pii_sub: nil, speaker_labels: nil, speakers_expected: nil, content_safety: nil, content_safety_confidence: nil, iab_categories: nil, language_detection: nil, custom_spelling: nil, disfluencies: nil, sentiment_analysis: nil, auto_chapters: nil, entity_detection: nil, speech_threshold: nil, summarization: nil, summary_model: nil, summary_type: nil, custom_topics: nil, topics: nil, additional_properties: nil, request_options: nil, polling_options: Transcripts::PollingOptions.new)
|
74
|
-
transcript = submit(audio_url: audio_url, language_code: language_code, punctuate: punctuate, format_text: format_text, dual_channel: dual_channel,
|
75
|
-
webhook_auth_header_name: webhook_auth_header_name, webhook_auth_header_value: webhook_auth_header_value, auto_highlights: auto_highlights, audio_start_from: audio_start_from, audio_end_at: audio_end_at, word_boost: word_boost, boost_param: boost_param, filter_profanity: filter_profanity, redact_pii: redact_pii, redact_pii_audio: redact_pii_audio, redact_pii_audio_quality: redact_pii_audio_quality, redact_pii_policies: redact_pii_policies, redact_pii_sub: redact_pii_sub, speaker_labels: speaker_labels, speakers_expected: speakers_expected, content_safety: content_safety, content_safety_confidence: content_safety_confidence, iab_categories: iab_categories, language_detection: language_detection, custom_spelling: custom_spelling, disfluencies: disfluencies, sentiment_analysis: sentiment_analysis, auto_chapters: auto_chapters, entity_detection: entity_detection, speech_threshold: speech_threshold, summarization: summarization, summary_model: summary_model, summary_type: summary_type, custom_topics: custom_topics, topics: topics, additional_properties: additional_properties, request_options: request_options)
|
73
|
+
def transcribe(audio_url:, speech_model: nil, language_code: nil, punctuate: nil, format_text: nil, dual_channel: nil,
|
74
|
+
webhook_url: nil, webhook_auth_header_name: nil, webhook_auth_header_value: nil, auto_highlights: nil, audio_start_from: nil, audio_end_at: nil, word_boost: nil, boost_param: nil, filter_profanity: nil, redact_pii: nil, redact_pii_audio: nil, redact_pii_audio_quality: nil, redact_pii_policies: nil, redact_pii_sub: nil, speaker_labels: nil, speakers_expected: nil, content_safety: nil, content_safety_confidence: nil, iab_categories: nil, language_detection: nil, custom_spelling: nil, disfluencies: nil, sentiment_analysis: nil, auto_chapters: nil, entity_detection: nil, speech_threshold: nil, summarization: nil, summary_model: nil, summary_type: nil, custom_topics: nil, topics: nil, additional_properties: nil, request_options: nil, polling_options: Transcripts::PollingOptions.new)
|
75
|
+
transcript = submit(audio_url: audio_url, speech_model: speech_model, language_code: language_code, punctuate: punctuate, format_text: format_text, dual_channel: dual_channel,
|
76
|
+
webhook_url: webhook_url, webhook_auth_header_name: webhook_auth_header_name, webhook_auth_header_value: webhook_auth_header_value, auto_highlights: auto_highlights, audio_start_from: audio_start_from, audio_end_at: audio_end_at, word_boost: word_boost, boost_param: boost_param, filter_profanity: filter_profanity, redact_pii: redact_pii, redact_pii_audio: redact_pii_audio, redact_pii_audio_quality: redact_pii_audio_quality, redact_pii_policies: redact_pii_policies, redact_pii_sub: redact_pii_sub, speaker_labels: speaker_labels, speakers_expected: speakers_expected, content_safety: content_safety, content_safety_confidence: content_safety_confidence, iab_categories: iab_categories, language_detection: language_detection, custom_spelling: custom_spelling, disfluencies: disfluencies, sentiment_analysis: sentiment_analysis, auto_chapters: auto_chapters, entity_detection: entity_detection, speech_threshold: speech_threshold, summarization: summarization, summary_model: summary_model, summary_type: summary_type, custom_topics: custom_topics, topics: topics, additional_properties: additional_properties, request_options: request_options)
|
76
77
|
poll_transcript(transcript_id: transcript.id, polling_options: polling_options)
|
77
78
|
end
|
78
79
|
|
@@ -81,7 +82,7 @@ module AssemblyAI
|
|
81
82
|
timeout_in_seconds = polling_options.timeout / 1000 if polling_options.timeout.positive?
|
82
83
|
loop do
|
83
84
|
transcript = get(transcript_id: transcript_id)
|
84
|
-
if transcript.status ==
|
85
|
+
if transcript.status == Transcripts::TranscriptStatus::COMPLETED || transcript.status == Transcripts::TranscriptStatus::ERROR
|
85
86
|
return transcript
|
86
87
|
elsif polling_options.timeout.positive? && Time.now - start_time > timeout_in_seconds
|
87
88
|
raise StandardError, "Polling timeout"
|
@@ -99,7 +100,8 @@ module AssemblyAI
|
|
99
100
|
# Create a transcript from an audio or video file that is accessible via a URL.
|
100
101
|
# .transcribe polls for completion of the transcription, while the .submit function does not.
|
101
102
|
#
|
102
|
-
# @param
|
103
|
+
# @param speech_model [Transcripts::SpeechModel]
|
104
|
+
# @param language_code [Transcripts::TranscriptLanguageCode]
|
103
105
|
# @param punctuate [Boolean] Enable Automatic Punctuation, can be true or false
|
104
106
|
# @param format_text [Boolean] Enable Text Formatting, can be true or false
|
105
107
|
# @param dual_channel [Boolean] Enable [Dual Channel](https://www.assemblyai.com/docs/models/speech-recognition#dual-channel-transcription) transcription, can be true or false.
|
@@ -110,13 +112,13 @@ module AssemblyAI
|
|
110
112
|
# @param audio_start_from [Integer] The point in time, in milliseconds, to begin transcribing in your media file
|
111
113
|
# @param audio_end_at [Integer] The point in time, in milliseconds, to stop transcribing in your media file
|
112
114
|
# @param word_boost [Array<String>] The list of custom vocabulary to boost transcription probability for
|
113
|
-
# @param boost_param [
|
115
|
+
# @param boost_param [Transcripts::TranscriptBoostParam] The word boost parameter value
|
114
116
|
# @param filter_profanity [Boolean] Filter profanity from the transcribed text, can be true or false
|
115
117
|
# @param redact_pii [Boolean] Redact PII from the transcribed text using the Redact PII model, can be true or false
|
116
118
|
# @param redact_pii_audio [Boolean] Generate a copy of the original media file with spoken PII "beeped" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
|
117
|
-
# @param redact_pii_audio_quality [
|
118
|
-
# @param redact_pii_policies [Array<Transcripts::
|
119
|
-
# @param redact_pii_sub [
|
119
|
+
# @param redact_pii_audio_quality [Transcripts::RedactPiiAudioQuality] Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
|
120
|
+
# @param redact_pii_policies [Array<Transcripts::PiiPolicy>] The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
|
121
|
+
# @param redact_pii_sub [Transcripts::SubstitutionPolicy]
|
120
122
|
# @param speaker_labels [Boolean] Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false
|
121
123
|
# @param speakers_expected [Integer] Tells the speaker label model how many speakers it should attempt to identify, up to 10. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.
|
122
124
|
# @param content_safety [Boolean] Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false
|
@@ -133,20 +135,20 @@ module AssemblyAI
|
|
133
135
|
# @param speech_threshold [Float] Reject audio files that contain less than this fraction of speech.
|
134
136
|
# Valid values are in the range [0, 1] inclusive.
|
135
137
|
# @param summarization [Boolean] Enable [Summarization](https://www.assemblyai.com/docs/models/summarization), can be true or false
|
136
|
-
# @param summary_model [
|
137
|
-
# @param summary_type [
|
138
|
+
# @param summary_model [Transcripts::SummaryModel] The model to summarize the transcript
|
139
|
+
# @param summary_type [Transcripts::SummaryType] The type of summary
|
138
140
|
# @param custom_topics [Boolean] Whether custom topics is enabled, either true or false
|
139
141
|
# @param topics [Array<String>] The list of custom topics provided, if custom topics is enabled
|
140
142
|
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
141
143
|
# @param audio_url [String] The URL of the audio or video file to transcribe.
|
142
144
|
# @param request_options [RequestOptions]
|
143
|
-
# @param polling_options [PollingOptions] Configuration options for polling requests.
|
145
|
+
# @param polling_options [Transcripts::PollingOptions] Configuration options for polling requests.
|
144
146
|
# @return [Transcripts::Transcript]
|
145
|
-
def transcribe(audio_url:,
|
146
|
-
webhook_auth_header_name: nil, webhook_auth_header_value: nil, auto_highlights: nil, audio_start_from: nil, audio_end_at: nil, word_boost: nil, boost_param: nil, filter_profanity: nil, redact_pii: nil, redact_pii_audio: nil, redact_pii_audio_quality: nil, redact_pii_policies: nil, redact_pii_sub: nil, speaker_labels: nil, speakers_expected: nil, content_safety: nil, content_safety_confidence: nil, iab_categories: nil, language_detection: nil, custom_spelling: nil, disfluencies: nil, sentiment_analysis: nil, auto_chapters: nil, entity_detection: nil, speech_threshold: nil, summarization: nil, summary_model: nil, summary_type: nil, custom_topics: nil, topics: nil, additional_properties: nil, request_options: nil, polling_options: Transcripts::PollingOptions.new)
|
147
|
+
def transcribe(audio_url:, speech_model: nil, language_code: nil, punctuate: nil, format_text: nil, dual_channel: nil,
|
148
|
+
webhook_url: nil, webhook_auth_header_name: nil, webhook_auth_header_value: nil, auto_highlights: nil, audio_start_from: nil, audio_end_at: nil, word_boost: nil, boost_param: nil, filter_profanity: nil, redact_pii: nil, redact_pii_audio: nil, redact_pii_audio_quality: nil, redact_pii_policies: nil, redact_pii_sub: nil, speaker_labels: nil, speakers_expected: nil, content_safety: nil, content_safety_confidence: nil, iab_categories: nil, language_detection: nil, custom_spelling: nil, disfluencies: nil, sentiment_analysis: nil, auto_chapters: nil, entity_detection: nil, speech_threshold: nil, summarization: nil, summary_model: nil, summary_type: nil, custom_topics: nil, topics: nil, additional_properties: nil, request_options: nil, polling_options: Transcripts::PollingOptions.new)
|
147
149
|
Async do
|
148
|
-
transcript = submit(audio_url: audio_url, language_code: language_code, punctuate: punctuate, format_text: format_text, dual_channel: dual_channel,
|
149
|
-
webhook_auth_header_name: webhook_auth_header_name, webhook_auth_header_value: webhook_auth_header_value, auto_highlights: auto_highlights, audio_start_from: audio_start_from, audio_end_at: audio_end_at, word_boost: word_boost, boost_param: boost_param, filter_profanity: filter_profanity, redact_pii: redact_pii, redact_pii_audio: redact_pii_audio, redact_pii_audio_quality: redact_pii_audio_quality, redact_pii_policies: redact_pii_policies, redact_pii_sub: redact_pii_sub, speaker_labels: speaker_labels, speakers_expected: speakers_expected, content_safety: content_safety, content_safety_confidence: content_safety_confidence, iab_categories: iab_categories, language_detection: language_detection, custom_spelling: custom_spelling, disfluencies: disfluencies, sentiment_analysis: sentiment_analysis, auto_chapters: auto_chapters, entity_detection: entity_detection, speech_threshold: speech_threshold, summarization: summarization, summary_model: summary_model, summary_type: summary_type, custom_topics: custom_topics, topics: topics, additional_properties: additional_properties, request_options: request_options).wait
|
150
|
+
transcript = submit(audio_url: audio_url, speech_model: speech_model, language_code: language_code, punctuate: punctuate, format_text: format_text, dual_channel: dual_channel,
|
151
|
+
webhook_url: webhook_url, webhook_auth_header_name: webhook_auth_header_name, webhook_auth_header_value: webhook_auth_header_value, auto_highlights: auto_highlights, audio_start_from: audio_start_from, audio_end_at: audio_end_at, word_boost: word_boost, boost_param: boost_param, filter_profanity: filter_profanity, redact_pii: redact_pii, redact_pii_audio: redact_pii_audio, redact_pii_audio_quality: redact_pii_audio_quality, redact_pii_policies: redact_pii_policies, redact_pii_sub: redact_pii_sub, speaker_labels: speaker_labels, speakers_expected: speakers_expected, content_safety: content_safety, content_safety_confidence: content_safety_confidence, iab_categories: iab_categories, language_detection: language_detection, custom_spelling: custom_spelling, disfluencies: disfluencies, sentiment_analysis: sentiment_analysis, auto_chapters: auto_chapters, entity_detection: entity_detection, speech_threshold: speech_threshold, summarization: summarization, summary_model: summary_model, summary_type: summary_type, custom_topics: custom_topics, topics: topics, additional_properties: additional_properties, request_options: request_options).wait
|
150
152
|
poll_transcript(transcript_id: transcript.id, polling_options: polling_options).wait
|
151
153
|
end
|
152
154
|
end
|
@@ -157,7 +159,7 @@ module AssemblyAI
|
|
157
159
|
timeout_in_seconds = polling_options.timeout / 1000 if polling_options.timeout.positive?
|
158
160
|
loop do
|
159
161
|
transcript = get(transcript_id: transcript_id).wait
|
160
|
-
if transcript.status ==
|
162
|
+
if transcript.status == Transcripts::TranscriptStatus::COMPLETED || transcript.status == Transcripts::TranscriptStatus::ERROR
|
161
163
|
break transcript
|
162
164
|
elsif polling_options.timeout.positive? && Time.now - start_time > timeout_in_seconds
|
163
165
|
raise StandardError, "Polling timeout"
|
@@ -1,21 +1,20 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
module AssemblyAI
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
end
|
4
|
+
class Transcripts
|
5
|
+
# Configuration options for polling requests.
|
6
|
+
class PollingOptions
|
7
|
+
attr_reader :interval, :timeout
|
8
|
+
|
9
|
+
# @param interval [Integer] The amount of time to wait between polling requests, in milliseconds. Defaults to 3000.
|
10
|
+
# @param timeout [Integer] The maximum amount of time to wait for the transcript to be ready, in milliseconds. Defaults to -1, which means poll forever.
|
11
|
+
# @return [Transcripts::PollingOptions]
|
12
|
+
def initialize(interval: 3000, timeout: -1)
|
13
|
+
# @type [Integer] The amount of time to wait between polling requests, in milliseconds.
|
14
|
+
@interval = interval
|
15
|
+
# @type [Integer] The maximum amount of time to wait for the transcript to be ready, in milliseconds.
|
16
|
+
@timeout = timeout
|
18
17
|
end
|
19
18
|
end
|
20
19
|
end
|
21
|
-
|
20
|
+
end
|
@@ -0,0 +1,54 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative "transcript_ready_status"
|
4
|
+
require "json"
|
5
|
+
|
6
|
+
module AssemblyAI
|
7
|
+
class Transcripts
|
8
|
+
# The notification when the transcript status is completed or error.
|
9
|
+
class TranscriptReadyNotification
|
10
|
+
attr_reader :transcript_id, :status, :additional_properties
|
11
|
+
|
12
|
+
# @param transcript_id [String] The ID of the transcript
|
13
|
+
# @param status [Transcripts::TranscriptReadyStatus] The status of the transcript. Either completed or error.
|
14
|
+
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
15
|
+
# @return [Transcripts::TranscriptReadyNotification]
|
16
|
+
def initialize(transcript_id:, status:, additional_properties: nil)
|
17
|
+
# @type [String] The ID of the transcript
|
18
|
+
@transcript_id = transcript_id
|
19
|
+
# @type [Transcripts::TranscriptReadyStatus] The status of the transcript. Either completed or error.
|
20
|
+
@status = status
|
21
|
+
# @type [OpenStruct] Additional properties unmapped to the current class definition
|
22
|
+
@additional_properties = additional_properties
|
23
|
+
end
|
24
|
+
|
25
|
+
# Deserialize a JSON object to an instance of TranscriptReadyNotification
|
26
|
+
#
|
27
|
+
# @param json_object [JSON]
|
28
|
+
# @return [Transcripts::TranscriptReadyNotification]
|
29
|
+
def self.from_json(json_object:)
|
30
|
+
struct = JSON.parse(json_object, object_class: OpenStruct)
|
31
|
+
JSON.parse(json_object)
|
32
|
+
transcript_id = struct.transcript_id
|
33
|
+
status = struct.status
|
34
|
+
new(transcript_id: transcript_id, status: status, additional_properties: struct)
|
35
|
+
end
|
36
|
+
|
37
|
+
# Serialize an instance of TranscriptReadyNotification to a JSON object
|
38
|
+
#
|
39
|
+
# @return [JSON]
|
40
|
+
def to_json(*_args)
|
41
|
+
{ "transcript_id": @transcript_id, "status": @status }.to_json
|
42
|
+
end
|
43
|
+
|
44
|
+
# Leveraged for Union-type generation, validate_raw attempts to parse the given hash and check each fields type against the current object's property definitions.
|
45
|
+
#
|
46
|
+
# @param obj [Object]
|
47
|
+
# @return [Void]
|
48
|
+
def self.validate_raw(obj:)
|
49
|
+
obj.transcript_id.is_a?(String) != false || raise("Passed value for field obj.transcript_id is not the expected type, validation failed.")
|
50
|
+
obj.status.is_a?(Transcripts::TranscriptReadyStatus) != false || raise("Passed value for field obj.status is not the expected type, validation failed.")
|
51
|
+
end
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
data/lib/assemblyai.rb
CHANGED
@@ -6,6 +6,7 @@ require_relative "requests"
|
|
6
6
|
require_relative "assemblyai/files/client"
|
7
7
|
require_relative "assemblyai/transcripts/client"
|
8
8
|
require_relative "assemblyai/transcripts/polling_client"
|
9
|
+
require_relative "assemblyai/transcripts/list_by_url_client"
|
9
10
|
require_relative "assemblyai/realtime/client"
|
10
11
|
require_relative "assemblyai/lemur/client"
|
11
12
|
|
data/lib/requests.rb
CHANGED
@@ -20,7 +20,7 @@ module AssemblyAI
|
|
20
20
|
@headers = {
|
21
21
|
"X-Fern-Language": "Ruby",
|
22
22
|
"X-Fern-SDK-Name": "AssemblyAI",
|
23
|
-
"X-Fern-SDK-Version": "1.0.0-beta.
|
23
|
+
"X-Fern-SDK-Version": "1.0.0-beta.3",
|
24
24
|
"Authorization": api_key.to_s
|
25
25
|
}
|
26
26
|
@conn = Faraday.new(@base_url, headers: @headers) do |faraday|
|
@@ -46,7 +46,7 @@ module AssemblyAI
|
|
46
46
|
@headers = {
|
47
47
|
"X-Fern-Language": "Ruby",
|
48
48
|
"X-Fern-SDK-Name": "AssemblyAI",
|
49
|
-
"X-Fern-SDK-Version": "1.0.0-beta.
|
49
|
+
"X-Fern-SDK-Version": "1.0.0-beta.3",
|
50
50
|
"Authorization": api_key.to_s
|
51
51
|
}
|
52
52
|
@conn = Faraday.new(@base_url, headers: @headers) do |faraday|
|
data/lib/types_export.rb
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
require_relative "assemblyai/files/types/uploaded_file"
|
4
|
+
require_relative "assemblyai/transcripts/types/transcript_ready_notification"
|
4
5
|
require_relative "assemblyai/transcripts/types/redacted_audio_response"
|
5
6
|
require_relative "assemblyai/transcripts/types/redacted_audio_status"
|
6
7
|
require_relative "assemblyai/transcripts/types/subtitle_format"
|
@@ -20,6 +21,7 @@ require_relative "assemblyai/transcripts/types/pii_policy"
|
|
20
21
|
require_relative "assemblyai/transcripts/types/speech_model"
|
21
22
|
require_relative "assemblyai/transcripts/types/transcript_language_code"
|
22
23
|
require_relative "assemblyai/transcripts/types/transcript_status"
|
24
|
+
require_relative "assemblyai/transcripts/types/transcript_ready_status"
|
23
25
|
require_relative "assemblyai/transcripts/types/transcript"
|
24
26
|
require_relative "assemblyai/transcripts/types/topic_detection_model_result"
|
25
27
|
require_relative "assemblyai/transcripts/types/content_safety_labels_result"
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: assemblyai
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.0.0.pre.beta.
|
4
|
+
version: 1.0.0.pre.beta.3
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- ''
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-
|
11
|
+
date: 2024-03-09 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: async-http-faraday
|
@@ -132,6 +132,7 @@ files:
|
|
132
132
|
- lib/assemblyai/realtime/types/terminate_session.rb
|
133
133
|
- lib/assemblyai/realtime/types/word.rb
|
134
134
|
- lib/assemblyai/transcripts/client.rb
|
135
|
+
- lib/assemblyai/transcripts/list_by_url_client.rb
|
135
136
|
- lib/assemblyai/transcripts/polling_client.rb
|
136
137
|
- lib/assemblyai/transcripts/types/audio_intelligence_model_status.rb
|
137
138
|
- lib/assemblyai/transcripts/types/auto_highlight_result.rb
|
@@ -170,6 +171,8 @@ files:
|
|
170
171
|
- lib/assemblyai/transcripts/types/transcript_list_item.rb
|
171
172
|
- lib/assemblyai/transcripts/types/transcript_optional_params.rb
|
172
173
|
- lib/assemblyai/transcripts/types/transcript_paragraph.rb
|
174
|
+
- lib/assemblyai/transcripts/types/transcript_ready_notification.rb
|
175
|
+
- lib/assemblyai/transcripts/types/transcript_ready_status.rb
|
173
176
|
- lib/assemblyai/transcripts/types/transcript_sentence.rb
|
174
177
|
- lib/assemblyai/transcripts/types/transcript_status.rb
|
175
178
|
- lib/assemblyai/transcripts/types/transcript_utterance.rb
|