assemblyai 1.0.0.pre.beta.7 → 1.0.0.pre.beta.9

Sign up to get free protection for your applications and to get access to all the features.
Files changed (79) hide show
  1. checksums.yaml +4 -4
  2. data/lib/assemblyai/files/client.rb +4 -2
  3. data/lib/assemblyai/files/types/uploaded_file.rb +20 -11
  4. data/lib/assemblyai/lemur/client.rb +260 -150
  5. data/lib/assemblyai/lemur/types/lemur_action_items_response.rb +28 -14
  6. data/lib/assemblyai/lemur/types/lemur_base_params.rb +83 -55
  7. data/lib/assemblyai/lemur/types/lemur_base_params_context.rb +11 -5
  8. data/lib/assemblyai/lemur/types/lemur_base_response.rb +20 -11
  9. data/lib/assemblyai/lemur/types/lemur_question.rb +57 -32
  10. data/lib/assemblyai/lemur/types/lemur_question_answer.rb +28 -14
  11. data/lib/assemblyai/lemur/types/lemur_question_answer_response.rb +29 -15
  12. data/lib/assemblyai/lemur/types/lemur_question_context.rb +13 -6
  13. data/lib/assemblyai/lemur/types/lemur_summary_response.rb +28 -14
  14. data/lib/assemblyai/lemur/types/lemur_task_response.rb +28 -14
  15. data/lib/assemblyai/lemur/types/purge_lemur_request_data_response.rb +32 -17
  16. data/lib/assemblyai/realtime/client.rb +30 -14
  17. data/lib/assemblyai/realtime/types/configure_end_utterance_silence_threshold.rb +24 -13
  18. data/lib/assemblyai/realtime/types/final_transcript.rb +73 -42
  19. data/lib/assemblyai/realtime/types/force_end_utterance.rb +20 -11
  20. data/lib/assemblyai/realtime/types/message_type.rb +1 -0
  21. data/lib/assemblyai/realtime/types/partial_transcript.rb +63 -35
  22. data/lib/assemblyai/realtime/types/realtime_base_message.rb +22 -13
  23. data/lib/assemblyai/realtime/types/realtime_base_transcript.rb +57 -31
  24. data/lib/assemblyai/realtime/types/realtime_error.rb +20 -11
  25. data/lib/assemblyai/realtime/types/realtime_message.rb +44 -18
  26. data/lib/assemblyai/realtime/types/realtime_temporary_token_response.rb +20 -11
  27. data/lib/assemblyai/realtime/types/realtime_transcript.rb +15 -9
  28. data/lib/assemblyai/realtime/types/session_begins.rb +32 -15
  29. data/lib/assemblyai/realtime/types/session_information.rb +69 -0
  30. data/lib/assemblyai/realtime/types/session_terminated.rb +21 -12
  31. data/lib/assemblyai/realtime/types/terminate_session.rb +20 -11
  32. data/lib/assemblyai/realtime/types/word.rb +36 -18
  33. data/lib/assemblyai/streaming/types/receive_message.rb +113 -0
  34. data/lib/assemblyai/{realtime → streaming}/types/send_message.rb +26 -14
  35. data/lib/assemblyai/streaming/types/streaming.rb +11 -0
  36. data/lib/assemblyai/transcripts/client.rb +50 -18
  37. data/lib/assemblyai/transcripts/list_by_url_client.rb +6 -4
  38. data/lib/assemblyai/transcripts/polling_client.rb +12 -2
  39. data/lib/assemblyai/transcripts/types/auto_highlight_result.rb +40 -19
  40. data/lib/assemblyai/transcripts/types/auto_highlights_result.rb +38 -14
  41. data/lib/assemblyai/transcripts/types/chapter.rb +40 -20
  42. data/lib/assemblyai/transcripts/types/content_safety_label.rb +32 -16
  43. data/lib/assemblyai/transcripts/types/content_safety_label_result.rb +51 -29
  44. data/lib/assemblyai/transcripts/types/content_safety_labels_result.rb +51 -29
  45. data/lib/assemblyai/transcripts/types/entity.rb +41 -21
  46. data/lib/assemblyai/transcripts/types/entity_type.rb +15 -0
  47. data/lib/assemblyai/transcripts/types/page_details.rb +62 -36
  48. data/lib/assemblyai/transcripts/types/paragraphs_response.rb +37 -19
  49. data/lib/assemblyai/transcripts/types/redact_pii_audio_quality.rb +4 -1
  50. data/lib/assemblyai/transcripts/types/redacted_audio_response.rb +29 -15
  51. data/lib/assemblyai/transcripts/types/sentences_response.rb +37 -19
  52. data/lib/assemblyai/transcripts/types/sentiment_analysis_result.rb +63 -36
  53. data/lib/assemblyai/transcripts/types/severity_score_summary.rb +32 -16
  54. data/lib/assemblyai/transcripts/types/speech_model.rb +1 -0
  55. data/lib/assemblyai/transcripts/types/substitution_policy.rb +3 -1
  56. data/lib/assemblyai/transcripts/types/timestamp.rb +28 -14
  57. data/lib/assemblyai/transcripts/types/topic_detection_model_result.rb +39 -20
  58. data/lib/assemblyai/transcripts/types/topic_detection_result.rb +40 -21
  59. data/lib/assemblyai/transcripts/types/topic_detection_result_labels_item.rb +31 -15
  60. data/lib/assemblyai/transcripts/types/transcript.rb +513 -295
  61. data/lib/assemblyai/transcripts/types/transcript_custom_spelling.rb +30 -15
  62. data/lib/assemblyai/transcripts/types/transcript_language_code.rb +87 -4
  63. data/lib/assemblyai/transcripts/types/transcript_list.rb +33 -17
  64. data/lib/assemblyai/transcripts/types/transcript_list_item.rb +60 -35
  65. data/lib/assemblyai/transcripts/types/transcript_optional_params.rb +338 -196
  66. data/lib/assemblyai/transcripts/types/transcript_paragraph.rb +61 -34
  67. data/lib/assemblyai/transcripts/types/transcript_ready_notification.rb +30 -16
  68. data/lib/assemblyai/transcripts/types/transcript_sentence.rb +61 -34
  69. data/lib/assemblyai/transcripts/types/transcript_status.rb +2 -1
  70. data/lib/assemblyai/transcripts/types/transcript_utterance.rb +55 -32
  71. data/lib/assemblyai/transcripts/types/transcript_word.rb +55 -24
  72. data/lib/assemblyai/transcripts/types/word_search_match.rb +40 -20
  73. data/lib/assemblyai/transcripts/types/word_search_response.rb +36 -17
  74. data/lib/assemblyai/types/error.rb +32 -16
  75. data/lib/requests.rb +80 -34
  76. data/lib/types_export.rb +4 -3
  77. metadata +6 -5
  78. data/lib/assemblyai/realtime/types/audio_data.rb +0 -7
  79. data/lib/assemblyai/realtime/types/receive_message.rb +0 -87
@@ -13,35 +13,45 @@ require "async"
13
13
 
14
14
  module AssemblyAI
15
15
  class LemurClient
16
+ # @return [AssemblyAI::RequestClient]
16
17
  attr_reader :request_client
17
18
 
18
- # @param request_client [RequestClient]
19
- # @return [LemurClient]
19
+ # @param request_client [AssemblyAI::RequestClient]
20
+ # @return [AssemblyAI::LemurClient]
20
21
  def initialize(request_client:)
21
- # @type [RequestClient]
22
22
  @request_client = request_client
23
23
  end
24
24
 
25
25
  # Use the LeMUR task endpoint to input your own LLM prompt.
26
26
  #
27
- # @param transcript_ids [Array<String>] A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.
28
- # Use either transcript_ids or input_text as input into LeMUR.
29
- # @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.
30
- # Use either transcript_ids or input_text as input into LeMUR.
27
+ # @param transcript_ids [Array<String>] A list of completed transcripts with text. Up to a maximum of 100 files or 100
28
+ # hours, whichever is lower.
29
+ # Use either transcript_ids or input_text as input into LeMUR.
30
+ # @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the
31
+ # selected model, which defaults to 100000.
32
+ # Use either transcript_ids or input_text as input into LeMUR.
31
33
  # @param context [String, Hash{String => Object}] Context to provide the model. This can be a string or a free-form JSON value.
32
- # @param final_model [Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
33
- # Defaults to "default".
34
+ # @param final_model [AssemblyAI::Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
35
+ # Defaults to "default".
34
36
  # @param max_output_size [Integer] Max output size in tokens, up to 4000
35
37
  # @param temperature [Float] The temperature to use for the model.
36
- # Higher values result in answers that are more creative, lower values are more conservative.
37
- # Can be any value between 0.0 and 1.0 inclusive.
38
- # @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
39
- # @param prompt [String] Your text to prompt the model to produce a desired output, including any context you want to pass into the model.
40
- # @param request_options [RequestOptions]
41
- # @return [Lemur::LemurTaskResponse]
38
+ # Higher values result in answers that are more creative, lower values are more
39
+ # conservative.
40
+ # Can be any value between 0.0 and 1.0 inclusive.
41
+ # @param prompt [String] Your text to prompt the model to produce a desired output, including any context
42
+ # you want to pass into the model.
43
+ # @param request_options [AssemblyAI::RequestOptions]
44
+ # @return [AssemblyAI::Lemur::LemurTaskResponse]
45
+ # @example
46
+ # api = AssemblyAI::Client.new(
47
+ # environment: Environment::DEFAULT,
48
+ # base_url: "https://api.example.com",
49
+ # api_key: "YOUR_API_KEY"
50
+ # )
51
+ # api.task(prompt: "List all the locations affected by wildfires.")
42
52
  def task(prompt:, transcript_ids: nil, input_text: nil, context: nil, final_model: nil, max_output_size: nil,
43
- temperature: nil, additional_properties: nil, request_options: nil)
44
- response = @request_client.conn.post("/lemur/v3/generate/task") do |req|
53
+ temperature: nil, request_options: nil)
54
+ response = @request_client.conn.post do |req|
45
55
  req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
46
56
  req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
47
57
  req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
@@ -53,33 +63,46 @@ module AssemblyAI
53
63
  final_model: final_model,
54
64
  max_output_size: max_output_size,
55
65
  temperature: temperature,
56
- additional_properties: additional_properties,
57
66
  prompt: prompt
58
67
  }.compact
68
+ req.url "#{@request_client.get_url(request_options: request_options)}/lemur/v3/generate/task"
59
69
  end
60
- Lemur::LemurTaskResponse.from_json(json_object: response.body)
70
+ AssemblyAI::Lemur::LemurTaskResponse.from_json(json_object: response.body)
61
71
  end
62
72
 
63
- # Custom Summary allows you to distill a piece of audio into a few impactful sentences. You can give the model context to obtain more targeted results while outputting the results in a variety of formats described in human language.
73
+ # Custom Summary allows you to distill a piece of audio into a few impactful
74
+ # sentences.
75
+ # You can give the model context to obtain more targeted results while outputting
76
+ # the results in a variety of formats described in human language.
64
77
  #
65
- # @param transcript_ids [Array<String>] A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.
66
- # Use either transcript_ids or input_text as input into LeMUR.
67
- # @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.
68
- # Use either transcript_ids or input_text as input into LeMUR.
78
+ # @param transcript_ids [Array<String>] A list of completed transcripts with text. Up to a maximum of 100 files or 100
79
+ # hours, whichever is lower.
80
+ # Use either transcript_ids or input_text as input into LeMUR.
81
+ # @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the
82
+ # selected model, which defaults to 100000.
83
+ # Use either transcript_ids or input_text as input into LeMUR.
69
84
  # @param context [String, Hash{String => Object}] Context to provide the model. This can be a string or a free-form JSON value.
70
- # @param final_model [Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
71
- # Defaults to "default".
85
+ # @param final_model [AssemblyAI::Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
86
+ # Defaults to "default".
72
87
  # @param max_output_size [Integer] Max output size in tokens, up to 4000
73
88
  # @param temperature [Float] The temperature to use for the model.
74
- # Higher values result in answers that are more creative, lower values are more conservative.
75
- # Can be any value between 0.0 and 1.0 inclusive.
76
- # @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
77
- # @param answer_format [String] How you want the summary to be returned. This can be any text. Examples: "TLDR", "bullet points"
78
- # @param request_options [RequestOptions]
79
- # @return [Lemur::LemurSummaryResponse]
89
+ # Higher values result in answers that are more creative, lower values are more
90
+ # conservative.
91
+ # Can be any value between 0.0 and 1.0 inclusive.
92
+ # @param answer_format [String] How you want the summary to be returned. This can be any text. Examples: "TLDR",
93
+ # "bullet points"
94
+ # @param request_options [AssemblyAI::RequestOptions]
95
+ # @return [AssemblyAI::Lemur::LemurSummaryResponse]
96
+ # @example
97
+ # api = AssemblyAI::Client.new(
98
+ # environment: Environment::DEFAULT,
99
+ # base_url: "https://api.example.com",
100
+ # api_key: "YOUR_API_KEY"
101
+ # )
102
+ # api.summary
80
103
  def summary(transcript_ids: nil, input_text: nil, context: nil, final_model: nil, max_output_size: nil,
81
- temperature: nil, additional_properties: nil, answer_format: nil, request_options: nil)
82
- response = @request_client.conn.post("/lemur/v3/generate/summary") do |req|
104
+ temperature: nil, answer_format: nil, request_options: nil)
105
+ response = @request_client.conn.post do |req|
83
106
  req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
84
107
  req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
85
108
  req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
@@ -91,37 +114,50 @@ module AssemblyAI
91
114
  final_model: final_model,
92
115
  max_output_size: max_output_size,
93
116
  temperature: temperature,
94
- additional_properties: additional_properties,
95
117
  answer_format: answer_format
96
118
  }.compact
119
+ req.url "#{@request_client.get_url(request_options: request_options)}/lemur/v3/generate/summary"
97
120
  end
98
- Lemur::LemurSummaryResponse.from_json(json_object: response.body)
121
+ AssemblyAI::Lemur::LemurSummaryResponse.from_json(json_object: response.body)
99
122
  end
100
123
 
101
- # Question & Answer allows you to ask free-form questions about a single transcript or a group of transcripts. The questions can be any whose answers you find useful, such as judging whether a caller is likely to become a customer or whether all items on a meeting's agenda were covered.
124
+ # Question & Answer allows you to ask free-form questions about a single
125
+ # transcript or a group of transcripts.
126
+ # The questions can be any whose answers you find useful, such as judging whether
127
+ # a caller is likely to become a customer or whether all items on a meeting's
128
+ # agenda were covered.
102
129
  #
103
- # @param transcript_ids [Array<String>] A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.
104
- # Use either transcript_ids or input_text as input into LeMUR.
105
- # @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.
106
- # Use either transcript_ids or input_text as input into LeMUR.
130
+ # @param transcript_ids [Array<String>] A list of completed transcripts with text. Up to a maximum of 100 files or 100
131
+ # hours, whichever is lower.
132
+ # Use either transcript_ids or input_text as input into LeMUR.
133
+ # @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the
134
+ # selected model, which defaults to 100000.
135
+ # Use either transcript_ids or input_text as input into LeMUR.
107
136
  # @param context [String, Hash{String => Object}] Context to provide the model. This can be a string or a free-form JSON value.
108
- # @param final_model [Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
109
- # Defaults to "default".
137
+ # @param final_model [AssemblyAI::Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
138
+ # Defaults to "default".
110
139
  # @param max_output_size [Integer] Max output size in tokens, up to 4000
111
140
  # @param temperature [Float] The temperature to use for the model.
112
- # Higher values result in answers that are more creative, lower values are more conservative.
113
- # Can be any value between 0.0 and 1.0 inclusive.
114
- # @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
115
- # @param questions [Array<Hash>] A list of questions to askRequest of type Array<Lemur::LemurQuestion>, as a Hash
141
+ # Higher values result in answers that are more creative, lower values are more
142
+ # conservative.
143
+ # Can be any value between 0.0 and 1.0 inclusive.
144
+ # @param questions [Array<Hash>] A list of questions to askRequest of type Array<AssemblyAI::Lemur::LemurQuestion>, as a Hash
116
145
  # * :question (String)
117
146
  # * :context (Hash)
118
147
  # * :answer_format (String)
119
148
  # * :answer_options (Array<String>)
120
- # @param request_options [RequestOptions]
121
- # @return [Lemur::LemurQuestionAnswerResponse]
149
+ # @param request_options [AssemblyAI::RequestOptions]
150
+ # @return [AssemblyAI::Lemur::LemurQuestionAnswerResponse]
151
+ # @example
152
+ # api = AssemblyAI::Client.new(
153
+ # environment: Environment::DEFAULT,
154
+ # base_url: "https://api.example.com",
155
+ # api_key: "YOUR_API_KEY"
156
+ # )
157
+ # api.question_answer(questions: [{ question: "Where are there wildfires?", answer_format: "List of countries in ISO 3166-1 alpha-2 format", answer_options: ["US", "CA"] }, { question: "Is global warming affecting wildfires?", answer_options: ["yes", "no"] }])
122
158
  def question_answer(questions:, transcript_ids: nil, input_text: nil, context: nil, final_model: nil, max_output_size: nil,
123
- temperature: nil, additional_properties: nil, request_options: nil)
124
- response = @request_client.conn.post("/lemur/v3/generate/question-answer") do |req|
159
+ temperature: nil, request_options: nil)
160
+ response = @request_client.conn.post do |req|
125
161
  req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
126
162
  req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
127
163
  req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
@@ -133,34 +169,43 @@ module AssemblyAI
133
169
  final_model: final_model,
134
170
  max_output_size: max_output_size,
135
171
  temperature: temperature,
136
- additional_properties: additional_properties,
137
172
  questions: questions
138
173
  }.compact
174
+ req.url "#{@request_client.get_url(request_options: request_options)}/lemur/v3/generate/question-answer"
139
175
  end
140
- Lemur::LemurQuestionAnswerResponse.from_json(json_object: response.body)
176
+ AssemblyAI::Lemur::LemurQuestionAnswerResponse.from_json(json_object: response.body)
141
177
  end
142
178
 
143
179
  # Use LeMUR to generate a list of action items from a transcript
144
180
  #
145
- # @param transcript_ids [Array<String>] A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.
146
- # Use either transcript_ids or input_text as input into LeMUR.
147
- # @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.
148
- # Use either transcript_ids or input_text as input into LeMUR.
181
+ # @param transcript_ids [Array<String>] A list of completed transcripts with text. Up to a maximum of 100 files or 100
182
+ # hours, whichever is lower.
183
+ # Use either transcript_ids or input_text as input into LeMUR.
184
+ # @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the
185
+ # selected model, which defaults to 100000.
186
+ # Use either transcript_ids or input_text as input into LeMUR.
149
187
  # @param context [String, Hash{String => Object}] Context to provide the model. This can be a string or a free-form JSON value.
150
- # @param final_model [Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
151
- # Defaults to "default".
188
+ # @param final_model [AssemblyAI::Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
189
+ # Defaults to "default".
152
190
  # @param max_output_size [Integer] Max output size in tokens, up to 4000
153
191
  # @param temperature [Float] The temperature to use for the model.
154
- # Higher values result in answers that are more creative, lower values are more conservative.
155
- # Can be any value between 0.0 and 1.0 inclusive.
156
- # @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
192
+ # Higher values result in answers that are more creative, lower values are more
193
+ # conservative.
194
+ # Can be any value between 0.0 and 1.0 inclusive.
157
195
  # @param answer_format [String] How you want the action items to be returned. This can be any text.
158
- # Defaults to "Bullet Points".
159
- # @param request_options [RequestOptions]
160
- # @return [Lemur::LemurActionItemsResponse]
196
+ # Defaults to "Bullet Points".
197
+ # @param request_options [AssemblyAI::RequestOptions]
198
+ # @return [AssemblyAI::Lemur::LemurActionItemsResponse]
199
+ # @example
200
+ # api = AssemblyAI::Client.new(
201
+ # environment: Environment::DEFAULT,
202
+ # base_url: "https://api.example.com",
203
+ # api_key: "YOUR_API_KEY"
204
+ # )
205
+ # api.action_items(answer_format: "Bullet Points")
161
206
  def action_items(transcript_ids: nil, input_text: nil, context: nil, final_model: nil, max_output_size: nil,
162
- temperature: nil, additional_properties: nil, answer_format: nil, request_options: nil)
163
- response = @request_client.conn.post("/lemur/v3/generate/action-items") do |req|
207
+ temperature: nil, answer_format: nil, request_options: nil)
208
+ response = @request_client.conn.post do |req|
164
209
  req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
165
210
  req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
166
211
  req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
@@ -172,60 +217,80 @@ module AssemblyAI
172
217
  final_model: final_model,
173
218
  max_output_size: max_output_size,
174
219
  temperature: temperature,
175
- additional_properties: additional_properties,
176
220
  answer_format: answer_format
177
221
  }.compact
222
+ req.url "#{@request_client.get_url(request_options: request_options)}/lemur/v3/generate/action-items"
178
223
  end
179
- Lemur::LemurActionItemsResponse.from_json(json_object: response.body)
224
+ AssemblyAI::Lemur::LemurActionItemsResponse.from_json(json_object: response.body)
180
225
  end
181
226
 
182
227
  # Delete the data for a previously submitted LeMUR request.
183
- # The LLM response data, as well as any context provided in the original request will be removed.
228
+ # The LLM response data, as well as any context provided in the original request
229
+ # will be removed.
184
230
  #
185
- # @param request_id [String] The ID of the LeMUR request whose data you want to delete. This would be found in the response of the original request.
186
- # @param request_options [RequestOptions]
187
- # @return [Lemur::PurgeLemurRequestDataResponse]
231
+ # @param request_id [String] The ID of the LeMUR request whose data you want to delete. This would be found
232
+ # in the response of the original request.
233
+ # @param request_options [AssemblyAI::RequestOptions]
234
+ # @return [AssemblyAI::Lemur::PurgeLemurRequestDataResponse]
235
+ # @example
236
+ # api = AssemblyAI::Client.new(
237
+ # environment: Environment::DEFAULT,
238
+ # base_url: "https://api.example.com",
239
+ # api_key: "YOUR_API_KEY"
240
+ # )
241
+ # api.purge_request_data(request_id: "request_id")
188
242
  def purge_request_data(request_id:, request_options: nil)
189
- response = @request_client.conn.delete("/lemur/v3/#{request_id}") do |req|
243
+ response = @request_client.conn.delete do |req|
190
244
  req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
191
245
  req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
192
246
  req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
247
+ req.url "#{@request_client.get_url(request_options: request_options)}/lemur/v3/#{request_id}"
193
248
  end
194
- Lemur::PurgeLemurRequestDataResponse.from_json(json_object: response.body)
249
+ AssemblyAI::Lemur::PurgeLemurRequestDataResponse.from_json(json_object: response.body)
195
250
  end
196
251
  end
197
252
 
198
253
  class AsyncLemurClient
254
+ # @return [AssemblyAI::AsyncRequestClient]
199
255
  attr_reader :request_client
200
256
 
201
- # @param request_client [AsyncRequestClient]
202
- # @return [AsyncLemurClient]
257
+ # @param request_client [AssemblyAI::AsyncRequestClient]
258
+ # @return [AssemblyAI::AsyncLemurClient]
203
259
  def initialize(request_client:)
204
- # @type [AsyncRequestClient]
205
260
  @request_client = request_client
206
261
  end
207
262
 
208
263
  # Use the LeMUR task endpoint to input your own LLM prompt.
209
264
  #
210
- # @param transcript_ids [Array<String>] A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.
211
- # Use either transcript_ids or input_text as input into LeMUR.
212
- # @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.
213
- # Use either transcript_ids or input_text as input into LeMUR.
265
+ # @param transcript_ids [Array<String>] A list of completed transcripts with text. Up to a maximum of 100 files or 100
266
+ # hours, whichever is lower.
267
+ # Use either transcript_ids or input_text as input into LeMUR.
268
+ # @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the
269
+ # selected model, which defaults to 100000.
270
+ # Use either transcript_ids or input_text as input into LeMUR.
214
271
  # @param context [String, Hash{String => Object}] Context to provide the model. This can be a string or a free-form JSON value.
215
- # @param final_model [Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
216
- # Defaults to "default".
272
+ # @param final_model [AssemblyAI::Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
273
+ # Defaults to "default".
217
274
  # @param max_output_size [Integer] Max output size in tokens, up to 4000
218
275
  # @param temperature [Float] The temperature to use for the model.
219
- # Higher values result in answers that are more creative, lower values are more conservative.
220
- # Can be any value between 0.0 and 1.0 inclusive.
221
- # @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
222
- # @param prompt [String] Your text to prompt the model to produce a desired output, including any context you want to pass into the model.
223
- # @param request_options [RequestOptions]
224
- # @return [Lemur::LemurTaskResponse]
276
+ # Higher values result in answers that are more creative, lower values are more
277
+ # conservative.
278
+ # Can be any value between 0.0 and 1.0 inclusive.
279
+ # @param prompt [String] Your text to prompt the model to produce a desired output, including any context
280
+ # you want to pass into the model.
281
+ # @param request_options [AssemblyAI::RequestOptions]
282
+ # @return [AssemblyAI::Lemur::LemurTaskResponse]
283
+ # @example
284
+ # api = AssemblyAI::Client.new(
285
+ # environment: Environment::DEFAULT,
286
+ # base_url: "https://api.example.com",
287
+ # api_key: "YOUR_API_KEY"
288
+ # )
289
+ # api.task(prompt: "List all the locations affected by wildfires.")
225
290
  def task(prompt:, transcript_ids: nil, input_text: nil, context: nil, final_model: nil, max_output_size: nil,
226
- temperature: nil, additional_properties: nil, request_options: nil)
291
+ temperature: nil, request_options: nil)
227
292
  Async do
228
- response = @request_client.conn.post("/lemur/v3/generate/task") do |req|
293
+ response = @request_client.conn.post do |req|
229
294
  req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
230
295
  req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
231
296
  req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
@@ -237,35 +302,48 @@ module AssemblyAI
237
302
  final_model: final_model,
238
303
  max_output_size: max_output_size,
239
304
  temperature: temperature,
240
- additional_properties: additional_properties,
241
305
  prompt: prompt
242
306
  }.compact
307
+ req.url "#{@request_client.get_url(request_options: request_options)}/lemur/v3/generate/task"
243
308
  end
244
- Lemur::LemurTaskResponse.from_json(json_object: response.body)
309
+ AssemblyAI::Lemur::LemurTaskResponse.from_json(json_object: response.body)
245
310
  end
246
311
  end
247
312
 
248
- # Custom Summary allows you to distill a piece of audio into a few impactful sentences. You can give the model context to obtain more targeted results while outputting the results in a variety of formats described in human language.
313
+ # Custom Summary allows you to distill a piece of audio into a few impactful
314
+ # sentences.
315
+ # You can give the model context to obtain more targeted results while outputting
316
+ # the results in a variety of formats described in human language.
249
317
  #
250
- # @param transcript_ids [Array<String>] A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.
251
- # Use either transcript_ids or input_text as input into LeMUR.
252
- # @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.
253
- # Use either transcript_ids or input_text as input into LeMUR.
318
+ # @param transcript_ids [Array<String>] A list of completed transcripts with text. Up to a maximum of 100 files or 100
319
+ # hours, whichever is lower.
320
+ # Use either transcript_ids or input_text as input into LeMUR.
321
+ # @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the
322
+ # selected model, which defaults to 100000.
323
+ # Use either transcript_ids or input_text as input into LeMUR.
254
324
  # @param context [String, Hash{String => Object}] Context to provide the model. This can be a string or a free-form JSON value.
255
- # @param final_model [Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
256
- # Defaults to "default".
325
+ # @param final_model [AssemblyAI::Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
326
+ # Defaults to "default".
257
327
  # @param max_output_size [Integer] Max output size in tokens, up to 4000
258
328
  # @param temperature [Float] The temperature to use for the model.
259
- # Higher values result in answers that are more creative, lower values are more conservative.
260
- # Can be any value between 0.0 and 1.0 inclusive.
261
- # @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
262
- # @param answer_format [String] How you want the summary to be returned. This can be any text. Examples: "TLDR", "bullet points"
263
- # @param request_options [RequestOptions]
264
- # @return [Lemur::LemurSummaryResponse]
329
+ # Higher values result in answers that are more creative, lower values are more
330
+ # conservative.
331
+ # Can be any value between 0.0 and 1.0 inclusive.
332
+ # @param answer_format [String] How you want the summary to be returned. This can be any text. Examples: "TLDR",
333
+ # "bullet points"
334
+ # @param request_options [AssemblyAI::RequestOptions]
335
+ # @return [AssemblyAI::Lemur::LemurSummaryResponse]
336
+ # @example
337
+ # api = AssemblyAI::Client.new(
338
+ # environment: Environment::DEFAULT,
339
+ # base_url: "https://api.example.com",
340
+ # api_key: "YOUR_API_KEY"
341
+ # )
342
+ # api.summary
265
343
  def summary(transcript_ids: nil, input_text: nil, context: nil, final_model: nil, max_output_size: nil,
266
- temperature: nil, additional_properties: nil, answer_format: nil, request_options: nil)
344
+ temperature: nil, answer_format: nil, request_options: nil)
267
345
  Async do
268
- response = @request_client.conn.post("/lemur/v3/generate/summary") do |req|
346
+ response = @request_client.conn.post do |req|
269
347
  req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
270
348
  req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
271
349
  req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
@@ -277,39 +355,52 @@ module AssemblyAI
277
355
  final_model: final_model,
278
356
  max_output_size: max_output_size,
279
357
  temperature: temperature,
280
- additional_properties: additional_properties,
281
358
  answer_format: answer_format
282
359
  }.compact
360
+ req.url "#{@request_client.get_url(request_options: request_options)}/lemur/v3/generate/summary"
283
361
  end
284
- Lemur::LemurSummaryResponse.from_json(json_object: response.body)
362
+ AssemblyAI::Lemur::LemurSummaryResponse.from_json(json_object: response.body)
285
363
  end
286
364
  end
287
365
 
288
- # Question & Answer allows you to ask free-form questions about a single transcript or a group of transcripts. The questions can be any whose answers you find useful, such as judging whether a caller is likely to become a customer or whether all items on a meeting's agenda were covered.
366
+ # Question & Answer allows you to ask free-form questions about a single
367
+ # transcript or a group of transcripts.
368
+ # The questions can be any whose answers you find useful, such as judging whether
369
+ # a caller is likely to become a customer or whether all items on a meeting's
370
+ # agenda were covered.
289
371
  #
290
- # @param transcript_ids [Array<String>] A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.
291
- # Use either transcript_ids or input_text as input into LeMUR.
292
- # @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.
293
- # Use either transcript_ids or input_text as input into LeMUR.
372
+ # @param transcript_ids [Array<String>] A list of completed transcripts with text. Up to a maximum of 100 files or 100
373
+ # hours, whichever is lower.
374
+ # Use either transcript_ids or input_text as input into LeMUR.
375
+ # @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the
376
+ # selected model, which defaults to 100000.
377
+ # Use either transcript_ids or input_text as input into LeMUR.
294
378
  # @param context [String, Hash{String => Object}] Context to provide the model. This can be a string or a free-form JSON value.
295
- # @param final_model [Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
296
- # Defaults to "default".
379
+ # @param final_model [AssemblyAI::Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
380
+ # Defaults to "default".
297
381
  # @param max_output_size [Integer] Max output size in tokens, up to 4000
298
382
  # @param temperature [Float] The temperature to use for the model.
299
- # Higher values result in answers that are more creative, lower values are more conservative.
300
- # Can be any value between 0.0 and 1.0 inclusive.
301
- # @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
302
- # @param questions [Array<Hash>] A list of questions to askRequest of type Array<Lemur::LemurQuestion>, as a Hash
383
+ # Higher values result in answers that are more creative, lower values are more
384
+ # conservative.
385
+ # Can be any value between 0.0 and 1.0 inclusive.
386
+ # @param questions [Array<Hash>] A list of questions to askRequest of type Array<AssemblyAI::Lemur::LemurQuestion>, as a Hash
303
387
  # * :question (String)
304
388
  # * :context (Hash)
305
389
  # * :answer_format (String)
306
390
  # * :answer_options (Array<String>)
307
- # @param request_options [RequestOptions]
308
- # @return [Lemur::LemurQuestionAnswerResponse]
391
+ # @param request_options [AssemblyAI::RequestOptions]
392
+ # @return [AssemblyAI::Lemur::LemurQuestionAnswerResponse]
393
+ # @example
394
+ # api = AssemblyAI::Client.new(
395
+ # environment: Environment::DEFAULT,
396
+ # base_url: "https://api.example.com",
397
+ # api_key: "YOUR_API_KEY"
398
+ # )
399
+ # api.question_answer(questions: [{ question: "Where are there wildfires?", answer_format: "List of countries in ISO 3166-1 alpha-2 format", answer_options: ["US", "CA"] }, { question: "Is global warming affecting wildfires?", answer_options: ["yes", "no"] }])
309
400
  def question_answer(questions:, transcript_ids: nil, input_text: nil, context: nil, final_model: nil, max_output_size: nil,
310
- temperature: nil, additional_properties: nil, request_options: nil)
401
+ temperature: nil, request_options: nil)
311
402
  Async do
312
- response = @request_client.conn.post("/lemur/v3/generate/question-answer") do |req|
403
+ response = @request_client.conn.post do |req|
313
404
  req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
314
405
  req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
315
406
  req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
@@ -321,36 +412,45 @@ module AssemblyAI
321
412
  final_model: final_model,
322
413
  max_output_size: max_output_size,
323
414
  temperature: temperature,
324
- additional_properties: additional_properties,
325
415
  questions: questions
326
416
  }.compact
417
+ req.url "#{@request_client.get_url(request_options: request_options)}/lemur/v3/generate/question-answer"
327
418
  end
328
- Lemur::LemurQuestionAnswerResponse.from_json(json_object: response.body)
419
+ AssemblyAI::Lemur::LemurQuestionAnswerResponse.from_json(json_object: response.body)
329
420
  end
330
421
  end
331
422
 
332
423
  # Use LeMUR to generate a list of action items from a transcript
333
424
  #
334
- # @param transcript_ids [Array<String>] A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.
335
- # Use either transcript_ids or input_text as input into LeMUR.
336
- # @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.
337
- # Use either transcript_ids or input_text as input into LeMUR.
425
+ # @param transcript_ids [Array<String>] A list of completed transcripts with text. Up to a maximum of 100 files or 100
426
+ # hours, whichever is lower.
427
+ # Use either transcript_ids or input_text as input into LeMUR.
428
+ # @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the
429
+ # selected model, which defaults to 100000.
430
+ # Use either transcript_ids or input_text as input into LeMUR.
338
431
  # @param context [String, Hash{String => Object}] Context to provide the model. This can be a string or a free-form JSON value.
339
- # @param final_model [Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
340
- # Defaults to "default".
432
+ # @param final_model [AssemblyAI::Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
433
+ # Defaults to "default".
341
434
  # @param max_output_size [Integer] Max output size in tokens, up to 4000
342
435
  # @param temperature [Float] The temperature to use for the model.
343
- # Higher values result in answers that are more creative, lower values are more conservative.
344
- # Can be any value between 0.0 and 1.0 inclusive.
345
- # @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
436
+ # Higher values result in answers that are more creative, lower values are more
437
+ # conservative.
438
+ # Can be any value between 0.0 and 1.0 inclusive.
346
439
  # @param answer_format [String] How you want the action items to be returned. This can be any text.
347
- # Defaults to "Bullet Points".
348
- # @param request_options [RequestOptions]
349
- # @return [Lemur::LemurActionItemsResponse]
440
+ # Defaults to "Bullet Points".
441
+ # @param request_options [AssemblyAI::RequestOptions]
442
+ # @return [AssemblyAI::Lemur::LemurActionItemsResponse]
443
+ # @example
444
+ # api = AssemblyAI::Client.new(
445
+ # environment: Environment::DEFAULT,
446
+ # base_url: "https://api.example.com",
447
+ # api_key: "YOUR_API_KEY"
448
+ # )
449
+ # api.action_items(answer_format: "Bullet Points")
350
450
  def action_items(transcript_ids: nil, input_text: nil, context: nil, final_model: nil, max_output_size: nil,
351
- temperature: nil, additional_properties: nil, answer_format: nil, request_options: nil)
451
+ temperature: nil, answer_format: nil, request_options: nil)
352
452
  Async do
353
- response = @request_client.conn.post("/lemur/v3/generate/action-items") do |req|
453
+ response = @request_client.conn.post do |req|
354
454
  req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
355
455
  req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
356
456
  req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
@@ -362,28 +462,38 @@ module AssemblyAI
362
462
  final_model: final_model,
363
463
  max_output_size: max_output_size,
364
464
  temperature: temperature,
365
- additional_properties: additional_properties,
366
465
  answer_format: answer_format
367
466
  }.compact
467
+ req.url "#{@request_client.get_url(request_options: request_options)}/lemur/v3/generate/action-items"
368
468
  end
369
- Lemur::LemurActionItemsResponse.from_json(json_object: response.body)
469
+ AssemblyAI::Lemur::LemurActionItemsResponse.from_json(json_object: response.body)
370
470
  end
371
471
  end
372
472
 
373
473
  # Delete the data for a previously submitted LeMUR request.
374
- # The LLM response data, as well as any context provided in the original request will be removed.
474
+ # The LLM response data, as well as any context provided in the original request
475
+ # will be removed.
375
476
  #
376
- # @param request_id [String] The ID of the LeMUR request whose data you want to delete. This would be found in the response of the original request.
377
- # @param request_options [RequestOptions]
378
- # @return [Lemur::PurgeLemurRequestDataResponse]
477
+ # @param request_id [String] The ID of the LeMUR request whose data you want to delete. This would be found
478
+ # in the response of the original request.
479
+ # @param request_options [AssemblyAI::RequestOptions]
480
+ # @return [AssemblyAI::Lemur::PurgeLemurRequestDataResponse]
481
+ # @example
482
+ # api = AssemblyAI::Client.new(
483
+ # environment: Environment::DEFAULT,
484
+ # base_url: "https://api.example.com",
485
+ # api_key: "YOUR_API_KEY"
486
+ # )
487
+ # api.purge_request_data(request_id: "request_id")
379
488
  def purge_request_data(request_id:, request_options: nil)
380
489
  Async do
381
- response = @request_client.conn.delete("/lemur/v3/#{request_id}") do |req|
490
+ response = @request_client.conn.delete do |req|
382
491
  req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
383
492
  req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
384
493
  req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
494
+ req.url "#{@request_client.get_url(request_options: request_options)}/lemur/v3/#{request_id}"
385
495
  end
386
- Lemur::PurgeLemurRequestDataResponse.from_json(json_object: response.body)
496
+ AssemblyAI::Lemur::PurgeLemurRequestDataResponse.from_json(json_object: response.body)
387
497
  end
388
498
  end
389
499
  end