assemblyai 1.0.0.pre.beta
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/lib/assemblyai/files/client.rb +63 -0
- data/lib/assemblyai/files/types/uploaded_file.rb +47 -0
- data/lib/assemblyai/lemur/client.rb +390 -0
- data/lib/assemblyai/lemur/types/lemur_action_items_response.rb +52 -0
- data/lib/assemblyai/lemur/types/lemur_base_params.rb +99 -0
- data/lib/assemblyai/lemur/types/lemur_base_params_context.rb +75 -0
- data/lib/assemblyai/lemur/types/lemur_base_response.rb +47 -0
- data/lib/assemblyai/lemur/types/lemur_model.rb +13 -0
- data/lib/assemblyai/lemur/types/lemur_question.rb +74 -0
- data/lib/assemblyai/lemur/types/lemur_question_answer.rb +53 -0
- data/lib/assemblyai/lemur/types/lemur_question_answer_response.rb +56 -0
- data/lib/assemblyai/lemur/types/lemur_question_context.rb +75 -0
- data/lib/assemblyai/lemur/types/lemur_summary_response.rb +52 -0
- data/lib/assemblyai/lemur/types/lemur_task_response.rb +52 -0
- data/lib/assemblyai/lemur/types/purge_lemur_request_data_response.rb +58 -0
- data/lib/assemblyai/realtime/client.rb +61 -0
- data/lib/assemblyai/realtime/types/audio_data.rb +7 -0
- data/lib/assemblyai/realtime/types/audio_encoding.rb +8 -0
- data/lib/assemblyai/realtime/types/final_transcript.rb +107 -0
- data/lib/assemblyai/realtime/types/message_type.rb +13 -0
- data/lib/assemblyai/realtime/types/partial_transcript.rb +94 -0
- data/lib/assemblyai/realtime/types/realtime_base_message.rb +48 -0
- data/lib/assemblyai/realtime/types/realtime_base_transcript.rb +87 -0
- data/lib/assemblyai/realtime/types/realtime_error.rb +47 -0
- data/lib/assemblyai/realtime/types/realtime_message.rb +115 -0
- data/lib/assemblyai/realtime/types/realtime_temporary_token_response.rb +47 -0
- data/lib/assemblyai/realtime/types/realtime_transcript.rb +76 -0
- data/lib/assemblyai/realtime/types/realtime_transcript_type.rb +8 -0
- data/lib/assemblyai/realtime/types/session_begins.rb +58 -0
- data/lib/assemblyai/realtime/types/session_terminated.rb +47 -0
- data/lib/assemblyai/realtime/types/terminate_session.rb +56 -0
- data/lib/assemblyai/realtime/types/word.rb +62 -0
- data/lib/assemblyai/transcripts/client.rb +525 -0
- data/lib/assemblyai/transcripts/polling_client.rb +173 -0
- data/lib/assemblyai/transcripts/types/audio_intelligence_model_status.rb +8 -0
- data/lib/assemblyai/transcripts/types/auto_highlight_result.rb +66 -0
- data/lib/assemblyai/transcripts/types/auto_highlights_result.rb +53 -0
- data/lib/assemblyai/transcripts/types/chapter.rb +68 -0
- data/lib/assemblyai/transcripts/types/content_safety_label.rb +57 -0
- data/lib/assemblyai/transcripts/types/content_safety_label_result.rb +84 -0
- data/lib/assemblyai/transcripts/types/content_safety_labels_result.rb +75 -0
- data/lib/assemblyai/transcripts/types/entity.rb +69 -0
- data/lib/assemblyai/transcripts/types/entity_type.rb +38 -0
- data/lib/assemblyai/transcripts/types/page_details.rb +74 -0
- data/lib/assemblyai/transcripts/types/paragraphs_response.rb +67 -0
- data/lib/assemblyai/transcripts/types/pii_policy.rb +36 -0
- data/lib/assemblyai/transcripts/types/polling_options.rb +21 -0
- data/lib/assemblyai/transcripts/types/redact_pii_audio_quality.rb +8 -0
- data/lib/assemblyai/transcripts/types/redacted_audio_response.rb +53 -0
- data/lib/assemblyai/transcripts/types/redacted_audio_status.rb +7 -0
- data/lib/assemblyai/transcripts/types/sentences_response.rb +67 -0
- data/lib/assemblyai/transcripts/types/sentiment.rb +8 -0
- data/lib/assemblyai/transcripts/types/sentiment_analysis_result.rb +82 -0
- data/lib/assemblyai/transcripts/types/severity_score_summary.rb +57 -0
- data/lib/assemblyai/transcripts/types/speech_model.rb +7 -0
- data/lib/assemblyai/transcripts/types/substitution_policy.rb +8 -0
- data/lib/assemblyai/transcripts/types/subtitle_format.rb +8 -0
- data/lib/assemblyai/transcripts/types/summary_model.rb +8 -0
- data/lib/assemblyai/transcripts/types/summary_type.rb +14 -0
- data/lib/assemblyai/transcripts/types/timestamp.rb +53 -0
- data/lib/assemblyai/transcripts/types/topic_detection_model_result.rb +68 -0
- data/lib/assemblyai/transcripts/types/topic_detection_result.rb +68 -0
- data/lib/assemblyai/transcripts/types/topic_detection_result_labels_item.rb +52 -0
- data/lib/assemblyai/transcripts/types/transcript.rb +454 -0
- data/lib/assemblyai/transcripts/types/transcript_boost_param.rb +8 -0
- data/lib/assemblyai/transcripts/types/transcript_custom_spelling.rb +53 -0
- data/lib/assemblyai/transcripts/types/transcript_language_code.rb +29 -0
- data/lib/assemblyai/transcripts/types/transcript_list.rb +62 -0
- data/lib/assemblyai/transcripts/types/transcript_list_item.rb +82 -0
- data/lib/assemblyai/transcripts/types/transcript_optional_params.rb +280 -0
- data/lib/assemblyai/transcripts/types/transcript_paragraph.rb +84 -0
- data/lib/assemblyai/transcripts/types/transcript_sentence.rb +84 -0
- data/lib/assemblyai/transcripts/types/transcript_status.rb +8 -0
- data/lib/assemblyai/transcripts/types/transcript_utterance.rb +84 -0
- data/lib/assemblyai/transcripts/types/transcript_word.rb +68 -0
- data/lib/assemblyai/transcripts/types/word_search_match.rb +63 -0
- data/lib/assemblyai/transcripts/types/word_search_response.rb +61 -0
- data/lib/assemblyai/transcripts/types/word_search_timestamp.rb +7 -0
- data/lib/assemblyai/types/error.rb +50 -0
- data/lib/assemblyai.rb +48 -0
- data/lib/environment.rb +7 -0
- data/lib/gemconfig.rb +14 -0
- data/lib/requests.rb +87 -0
- data/lib/types_export.rb +75 -0
- metadata +170 -0
checksums.yaml
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
---
|
|
2
|
+
SHA256:
|
|
3
|
+
metadata.gz: bc0caf7b84497b13453af130bf9ed562274853b8a40c27c84dbe0e485271f099
|
|
4
|
+
data.tar.gz: fa25a137746aeb43fd13427e7e8c7de382b0a6684744fa7c805f32b2c9ffcc45
|
|
5
|
+
SHA512:
|
|
6
|
+
metadata.gz: 3e6c4c159194b38ed20d04228f253892ec24a4a4550a6ad1f29601a790689d833d5b608a5d8ba19a0fa8cf6585a8af1c5039a62f32d3b3c8664f2aceba68c2cf
|
|
7
|
+
data.tar.gz: 5bffb7383c0b3e0e7ff9c9128c3022db428746beed340222157c49cf6b993f95d1a7cd60e93843cbfd3cd17ff6c4e9e3029b10fb4746a2d5adbe099077eda58f
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative "../../requests"
|
|
4
|
+
require_relative "types/uploaded_file"
|
|
5
|
+
require "async"
|
|
6
|
+
|
|
7
|
+
module AssemblyAI
|
|
8
|
+
class FilesClient
|
|
9
|
+
attr_reader :request_client
|
|
10
|
+
|
|
11
|
+
# @param request_client [RequestClient]
|
|
12
|
+
# @return [FilesClient]
|
|
13
|
+
def initialize(request_client:)
|
|
14
|
+
# @type [RequestClient]
|
|
15
|
+
@request_client = request_client
|
|
16
|
+
end
|
|
17
|
+
|
|
18
|
+
# Upload your media file directly to the AssemblyAI API if it isn't accessible via a URL already.
|
|
19
|
+
#
|
|
20
|
+
# @param request [String] Base64 encoded bytes
|
|
21
|
+
# @param request_options [RequestOptions]
|
|
22
|
+
# @return [Files::UploadedFile]
|
|
23
|
+
def upload(request:, request_options: nil)
|
|
24
|
+
response = @request_client.conn.post("/v2/upload") do |req|
|
|
25
|
+
req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
|
|
26
|
+
req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
|
|
27
|
+
req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
|
|
28
|
+
req.headers["Content-Type"] = "application/octet-stream"
|
|
29
|
+
req.body = { **(request || {}), **(request_options&.additional_body_parameters || {}) }.compact
|
|
30
|
+
end
|
|
31
|
+
Files::UploadedFile.from_json(json_object: response.body)
|
|
32
|
+
end
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
class AsyncFilesClient
|
|
36
|
+
attr_reader :request_client
|
|
37
|
+
|
|
38
|
+
# @param request_client [AsyncRequestClient]
|
|
39
|
+
# @return [AsyncFilesClient]
|
|
40
|
+
def initialize(request_client:)
|
|
41
|
+
# @type [AsyncRequestClient]
|
|
42
|
+
@request_client = request_client
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
# Upload your media file directly to the AssemblyAI API if it isn't accessible via a URL already.
|
|
46
|
+
#
|
|
47
|
+
# @param request [String] Base64 encoded bytes
|
|
48
|
+
# @param request_options [RequestOptions]
|
|
49
|
+
# @return [Files::UploadedFile]
|
|
50
|
+
def upload(request:, request_options: nil)
|
|
51
|
+
Async do
|
|
52
|
+
response = @request_client.conn.post("/v2/upload") do |req|
|
|
53
|
+
req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
|
|
54
|
+
req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
|
|
55
|
+
req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
|
|
56
|
+
req.headers["Content-Type"] = "application/octet-stream"
|
|
57
|
+
req.body = { **(request || {}), **(request_options&.additional_body_parameters || {}) }.compact
|
|
58
|
+
end
|
|
59
|
+
Files::UploadedFile.from_json(json_object: response.body)
|
|
60
|
+
end
|
|
61
|
+
end
|
|
62
|
+
end
|
|
63
|
+
end
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "json"
|
|
4
|
+
|
|
5
|
+
module AssemblyAI
|
|
6
|
+
class Files
|
|
7
|
+
class UploadedFile
|
|
8
|
+
attr_reader :upload_url, :additional_properties
|
|
9
|
+
|
|
10
|
+
# @param upload_url [String] A URL that points to your audio file, accessible only by AssemblyAI's servers
|
|
11
|
+
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
|
12
|
+
# @return [Files::UploadedFile]
|
|
13
|
+
def initialize(upload_url:, additional_properties: nil)
|
|
14
|
+
# @type [String] A URL that points to your audio file, accessible only by AssemblyAI's servers
|
|
15
|
+
@upload_url = upload_url
|
|
16
|
+
# @type [OpenStruct] Additional properties unmapped to the current class definition
|
|
17
|
+
@additional_properties = additional_properties
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
# Deserialize a JSON object to an instance of UploadedFile
|
|
21
|
+
#
|
|
22
|
+
# @param json_object [JSON]
|
|
23
|
+
# @return [Files::UploadedFile]
|
|
24
|
+
def self.from_json(json_object:)
|
|
25
|
+
struct = JSON.parse(json_object, object_class: OpenStruct)
|
|
26
|
+
JSON.parse(json_object)
|
|
27
|
+
upload_url = struct.upload_url
|
|
28
|
+
new(upload_url: upload_url, additional_properties: struct)
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
# Serialize an instance of UploadedFile to a JSON object
|
|
32
|
+
#
|
|
33
|
+
# @return [JSON]
|
|
34
|
+
def to_json(*_args)
|
|
35
|
+
{ "upload_url": @upload_url }.to_json
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
# Leveraged for Union-type generation, validate_raw attempts to parse the given hash and check each fields type against the current object's property definitions.
|
|
39
|
+
#
|
|
40
|
+
# @param obj [Object]
|
|
41
|
+
# @return [Void]
|
|
42
|
+
def self.validate_raw(obj:)
|
|
43
|
+
obj.upload_url.is_a?(String) != false || raise("Passed value for field obj.upload_url is not the expected type, validation failed.")
|
|
44
|
+
end
|
|
45
|
+
end
|
|
46
|
+
end
|
|
47
|
+
end
|
|
@@ -0,0 +1,390 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative "../../requests"
|
|
4
|
+
require_relative "types/lemur_base_params_context"
|
|
5
|
+
require_relative "types/lemur_model"
|
|
6
|
+
require_relative "types/lemur_task_response"
|
|
7
|
+
require_relative "types/lemur_summary_response"
|
|
8
|
+
require_relative "types/lemur_question"
|
|
9
|
+
require_relative "types/lemur_question_answer_response"
|
|
10
|
+
require_relative "types/lemur_action_items_response"
|
|
11
|
+
require_relative "types/purge_lemur_request_data_response"
|
|
12
|
+
require "async"
|
|
13
|
+
|
|
14
|
+
module AssemblyAI
|
|
15
|
+
class LemurClient
|
|
16
|
+
attr_reader :request_client
|
|
17
|
+
|
|
18
|
+
# @param request_client [RequestClient]
|
|
19
|
+
# @return [LemurClient]
|
|
20
|
+
def initialize(request_client:)
|
|
21
|
+
# @type [RequestClient]
|
|
22
|
+
@request_client = request_client
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
# Use the LeMUR task endpoint to input your own LLM prompt.
|
|
26
|
+
#
|
|
27
|
+
# @param transcript_ids [Array<String>] A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.
|
|
28
|
+
# Use either transcript_ids or input_text as input into LeMUR.
|
|
29
|
+
# @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.
|
|
30
|
+
# Use either transcript_ids or input_text as input into LeMUR.
|
|
31
|
+
# @param context [Hash] Context to provide the model. This can be a string or a free-form JSON value.Request of type Lemur::LemurBaseParamsContext, as a Hash
|
|
32
|
+
# @param final_model [LEMUR_MODEL] The model that is used for the final prompt after compression is performed.
|
|
33
|
+
# Defaults to "default".
|
|
34
|
+
# @param max_output_size [Integer] Max output size in tokens, up to 4000
|
|
35
|
+
# @param temperature [Float] The temperature to use for the model.
|
|
36
|
+
# Higher values result in answers that are more creative, lower values are more conservative.
|
|
37
|
+
# Can be any value between 0.0 and 1.0 inclusive.
|
|
38
|
+
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
|
39
|
+
# @param prompt [String] Your text to prompt the model to produce a desired output, including any context you want to pass into the model.
|
|
40
|
+
# @param request_options [RequestOptions]
|
|
41
|
+
# @return [Lemur::LemurTaskResponse]
|
|
42
|
+
def task(prompt:, transcript_ids: nil, input_text: nil, context: nil, final_model: nil, max_output_size: nil,
|
|
43
|
+
temperature: nil, additional_properties: nil, request_options: nil)
|
|
44
|
+
response = @request_client.conn.post("/lemur/v3/generate/task") do |req|
|
|
45
|
+
req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
|
|
46
|
+
req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
|
|
47
|
+
req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
|
|
48
|
+
req.body = {
|
|
49
|
+
**(request_options&.additional_body_parameters || {}),
|
|
50
|
+
transcript_ids: transcript_ids,
|
|
51
|
+
input_text: input_text,
|
|
52
|
+
context: context,
|
|
53
|
+
final_model: final_model,
|
|
54
|
+
max_output_size: max_output_size,
|
|
55
|
+
temperature: temperature,
|
|
56
|
+
additional_properties: additional_properties,
|
|
57
|
+
prompt: prompt
|
|
58
|
+
}.compact
|
|
59
|
+
end
|
|
60
|
+
Lemur::LemurTaskResponse.from_json(json_object: response.body)
|
|
61
|
+
end
|
|
62
|
+
|
|
63
|
+
# Custom Summary allows you to distill a piece of audio into a few impactful sentences. You can give the model context to obtain more targeted results while outputting the results in a variety of formats described in human language.
|
|
64
|
+
#
|
|
65
|
+
# @param transcript_ids [Array<String>] A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.
|
|
66
|
+
# Use either transcript_ids or input_text as input into LeMUR.
|
|
67
|
+
# @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.
|
|
68
|
+
# Use either transcript_ids or input_text as input into LeMUR.
|
|
69
|
+
# @param context [Hash] Context to provide the model. This can be a string or a free-form JSON value.Request of type Lemur::LemurBaseParamsContext, as a Hash
|
|
70
|
+
# @param final_model [LEMUR_MODEL] The model that is used for the final prompt after compression is performed.
|
|
71
|
+
# Defaults to "default".
|
|
72
|
+
# @param max_output_size [Integer] Max output size in tokens, up to 4000
|
|
73
|
+
# @param temperature [Float] The temperature to use for the model.
|
|
74
|
+
# Higher values result in answers that are more creative, lower values are more conservative.
|
|
75
|
+
# Can be any value between 0.0 and 1.0 inclusive.
|
|
76
|
+
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
|
77
|
+
# @param answer_format [String] How you want the summary to be returned. This can be any text. Examples: "TLDR", "bullet points"
|
|
78
|
+
# @param request_options [RequestOptions]
|
|
79
|
+
# @return [Lemur::LemurSummaryResponse]
|
|
80
|
+
def summary(transcript_ids: nil, input_text: nil, context: nil, final_model: nil, max_output_size: nil,
|
|
81
|
+
temperature: nil, additional_properties: nil, answer_format: nil, request_options: nil)
|
|
82
|
+
response = @request_client.conn.post("/lemur/v3/generate/summary") do |req|
|
|
83
|
+
req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
|
|
84
|
+
req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
|
|
85
|
+
req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
|
|
86
|
+
req.body = {
|
|
87
|
+
**(request_options&.additional_body_parameters || {}),
|
|
88
|
+
transcript_ids: transcript_ids,
|
|
89
|
+
input_text: input_text,
|
|
90
|
+
context: context,
|
|
91
|
+
final_model: final_model,
|
|
92
|
+
max_output_size: max_output_size,
|
|
93
|
+
temperature: temperature,
|
|
94
|
+
additional_properties: additional_properties,
|
|
95
|
+
answer_format: answer_format
|
|
96
|
+
}.compact
|
|
97
|
+
end
|
|
98
|
+
Lemur::LemurSummaryResponse.from_json(json_object: response.body)
|
|
99
|
+
end
|
|
100
|
+
|
|
101
|
+
# Question & Answer allows you to ask free-form questions about a single transcript or a group of transcripts. The questions can be any whose answers you find useful, such as judging whether a caller is likely to become a customer or whether all items on a meeting's agenda were covered.
|
|
102
|
+
#
|
|
103
|
+
# @param transcript_ids [Array<String>] A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.
|
|
104
|
+
# Use either transcript_ids or input_text as input into LeMUR.
|
|
105
|
+
# @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.
|
|
106
|
+
# Use either transcript_ids or input_text as input into LeMUR.
|
|
107
|
+
# @param context [Hash] Context to provide the model. This can be a string or a free-form JSON value.Request of type Lemur::LemurBaseParamsContext, as a Hash
|
|
108
|
+
# @param final_model [LEMUR_MODEL] The model that is used for the final prompt after compression is performed.
|
|
109
|
+
# Defaults to "default".
|
|
110
|
+
# @param max_output_size [Integer] Max output size in tokens, up to 4000
|
|
111
|
+
# @param temperature [Float] The temperature to use for the model.
|
|
112
|
+
# Higher values result in answers that are more creative, lower values are more conservative.
|
|
113
|
+
# Can be any value between 0.0 and 1.0 inclusive.
|
|
114
|
+
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
|
115
|
+
# @param questions [Array<Hash>] A list of questions to askRequest of type Array<Lemur::LemurQuestion>, as a Hash
|
|
116
|
+
# * :question (String)
|
|
117
|
+
# * :context (Hash)
|
|
118
|
+
# * :answer_format (String)
|
|
119
|
+
# * :answer_options (Array<String>)
|
|
120
|
+
# @param request_options [RequestOptions]
|
|
121
|
+
# @return [Lemur::LemurQuestionAnswerResponse]
|
|
122
|
+
def question_answer(questions:, transcript_ids: nil, input_text: nil, context: nil, final_model: nil, max_output_size: nil,
|
|
123
|
+
temperature: nil, additional_properties: nil, request_options: nil)
|
|
124
|
+
response = @request_client.conn.post("/lemur/v3/generate/question-answer") do |req|
|
|
125
|
+
req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
|
|
126
|
+
req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
|
|
127
|
+
req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
|
|
128
|
+
req.body = {
|
|
129
|
+
**(request_options&.additional_body_parameters || {}),
|
|
130
|
+
transcript_ids: transcript_ids,
|
|
131
|
+
input_text: input_text,
|
|
132
|
+
context: context,
|
|
133
|
+
final_model: final_model,
|
|
134
|
+
max_output_size: max_output_size,
|
|
135
|
+
temperature: temperature,
|
|
136
|
+
additional_properties: additional_properties,
|
|
137
|
+
questions: questions
|
|
138
|
+
}.compact
|
|
139
|
+
end
|
|
140
|
+
Lemur::LemurQuestionAnswerResponse.from_json(json_object: response.body)
|
|
141
|
+
end
|
|
142
|
+
|
|
143
|
+
# Use LeMUR to generate a list of action items from a transcript
|
|
144
|
+
#
|
|
145
|
+
# @param transcript_ids [Array<String>] A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.
|
|
146
|
+
# Use either transcript_ids or input_text as input into LeMUR.
|
|
147
|
+
# @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.
|
|
148
|
+
# Use either transcript_ids or input_text as input into LeMUR.
|
|
149
|
+
# @param context [Hash] Context to provide the model. This can be a string or a free-form JSON value.Request of type Lemur::LemurBaseParamsContext, as a Hash
|
|
150
|
+
# @param final_model [LEMUR_MODEL] The model that is used for the final prompt after compression is performed.
|
|
151
|
+
# Defaults to "default".
|
|
152
|
+
# @param max_output_size [Integer] Max output size in tokens, up to 4000
|
|
153
|
+
# @param temperature [Float] The temperature to use for the model.
|
|
154
|
+
# Higher values result in answers that are more creative, lower values are more conservative.
|
|
155
|
+
# Can be any value between 0.0 and 1.0 inclusive.
|
|
156
|
+
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
|
157
|
+
# @param answer_format [String] How you want the action items to be returned. This can be any text.
|
|
158
|
+
# Defaults to "Bullet Points".
|
|
159
|
+
# @param request_options [RequestOptions]
|
|
160
|
+
# @return [Lemur::LemurActionItemsResponse]
|
|
161
|
+
def action_items(transcript_ids: nil, input_text: nil, context: nil, final_model: nil, max_output_size: nil,
|
|
162
|
+
temperature: nil, additional_properties: nil, answer_format: nil, request_options: nil)
|
|
163
|
+
response = @request_client.conn.post("/lemur/v3/generate/action-items") do |req|
|
|
164
|
+
req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
|
|
165
|
+
req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
|
|
166
|
+
req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
|
|
167
|
+
req.body = {
|
|
168
|
+
**(request_options&.additional_body_parameters || {}),
|
|
169
|
+
transcript_ids: transcript_ids,
|
|
170
|
+
input_text: input_text,
|
|
171
|
+
context: context,
|
|
172
|
+
final_model: final_model,
|
|
173
|
+
max_output_size: max_output_size,
|
|
174
|
+
temperature: temperature,
|
|
175
|
+
additional_properties: additional_properties,
|
|
176
|
+
answer_format: answer_format
|
|
177
|
+
}.compact
|
|
178
|
+
end
|
|
179
|
+
Lemur::LemurActionItemsResponse.from_json(json_object: response.body)
|
|
180
|
+
end
|
|
181
|
+
|
|
182
|
+
# Delete the data for a previously submitted LeMUR request.
|
|
183
|
+
# The LLM response data, as well as any context provided in the original request will be removed.
|
|
184
|
+
#
|
|
185
|
+
# @param request_id [String] The ID of the LeMUR request whose data you want to delete. This would be found in the response of the original request.
|
|
186
|
+
# @param request_options [RequestOptions]
|
|
187
|
+
# @return [Lemur::PurgeLemurRequestDataResponse]
|
|
188
|
+
def purge_request_data(request_id:, request_options: nil)
|
|
189
|
+
response = @request_client.conn.delete("/lemur/v3/#{request_id}") do |req|
|
|
190
|
+
req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
|
|
191
|
+
req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
|
|
192
|
+
req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
|
|
193
|
+
end
|
|
194
|
+
Lemur::PurgeLemurRequestDataResponse.from_json(json_object: response.body)
|
|
195
|
+
end
|
|
196
|
+
end
|
|
197
|
+
|
|
198
|
+
class AsyncLemurClient
|
|
199
|
+
attr_reader :request_client
|
|
200
|
+
|
|
201
|
+
# @param request_client [AsyncRequestClient]
|
|
202
|
+
# @return [AsyncLemurClient]
|
|
203
|
+
def initialize(request_client:)
|
|
204
|
+
# @type [AsyncRequestClient]
|
|
205
|
+
@request_client = request_client
|
|
206
|
+
end
|
|
207
|
+
|
|
208
|
+
# Use the LeMUR task endpoint to input your own LLM prompt.
|
|
209
|
+
#
|
|
210
|
+
# @param transcript_ids [Array<String>] A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.
|
|
211
|
+
# Use either transcript_ids or input_text as input into LeMUR.
|
|
212
|
+
# @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.
|
|
213
|
+
# Use either transcript_ids or input_text as input into LeMUR.
|
|
214
|
+
# @param context [Hash] Context to provide the model. This can be a string or a free-form JSON value.Request of type Lemur::LemurBaseParamsContext, as a Hash
|
|
215
|
+
# @param final_model [LEMUR_MODEL] The model that is used for the final prompt after compression is performed.
|
|
216
|
+
# Defaults to "default".
|
|
217
|
+
# @param max_output_size [Integer] Max output size in tokens, up to 4000
|
|
218
|
+
# @param temperature [Float] The temperature to use for the model.
|
|
219
|
+
# Higher values result in answers that are more creative, lower values are more conservative.
|
|
220
|
+
# Can be any value between 0.0 and 1.0 inclusive.
|
|
221
|
+
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
|
222
|
+
# @param prompt [String] Your text to prompt the model to produce a desired output, including any context you want to pass into the model.
|
|
223
|
+
# @param request_options [RequestOptions]
|
|
224
|
+
# @return [Lemur::LemurTaskResponse]
|
|
225
|
+
def task(prompt:, transcript_ids: nil, input_text: nil, context: nil, final_model: nil, max_output_size: nil,
|
|
226
|
+
temperature: nil, additional_properties: nil, request_options: nil)
|
|
227
|
+
Async do
|
|
228
|
+
response = @request_client.conn.post("/lemur/v3/generate/task") do |req|
|
|
229
|
+
req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
|
|
230
|
+
req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
|
|
231
|
+
req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
|
|
232
|
+
req.body = {
|
|
233
|
+
**(request_options&.additional_body_parameters || {}),
|
|
234
|
+
transcript_ids: transcript_ids,
|
|
235
|
+
input_text: input_text,
|
|
236
|
+
context: context,
|
|
237
|
+
final_model: final_model,
|
|
238
|
+
max_output_size: max_output_size,
|
|
239
|
+
temperature: temperature,
|
|
240
|
+
additional_properties: additional_properties,
|
|
241
|
+
prompt: prompt
|
|
242
|
+
}.compact
|
|
243
|
+
end
|
|
244
|
+
Lemur::LemurTaskResponse.from_json(json_object: response.body)
|
|
245
|
+
end
|
|
246
|
+
end
|
|
247
|
+
|
|
248
|
+
# Custom Summary allows you to distill a piece of audio into a few impactful sentences. You can give the model context to obtain more targeted results while outputting the results in a variety of formats described in human language.
|
|
249
|
+
#
|
|
250
|
+
# @param transcript_ids [Array<String>] A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.
|
|
251
|
+
# Use either transcript_ids or input_text as input into LeMUR.
|
|
252
|
+
# @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.
|
|
253
|
+
# Use either transcript_ids or input_text as input into LeMUR.
|
|
254
|
+
# @param context [Hash] Context to provide the model. This can be a string or a free-form JSON value.Request of type Lemur::LemurBaseParamsContext, as a Hash
|
|
255
|
+
# @param final_model [LEMUR_MODEL] The model that is used for the final prompt after compression is performed.
|
|
256
|
+
# Defaults to "default".
|
|
257
|
+
# @param max_output_size [Integer] Max output size in tokens, up to 4000
|
|
258
|
+
# @param temperature [Float] The temperature to use for the model.
|
|
259
|
+
# Higher values result in answers that are more creative, lower values are more conservative.
|
|
260
|
+
# Can be any value between 0.0 and 1.0 inclusive.
|
|
261
|
+
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
|
262
|
+
# @param answer_format [String] How you want the summary to be returned. This can be any text. Examples: "TLDR", "bullet points"
|
|
263
|
+
# @param request_options [RequestOptions]
|
|
264
|
+
# @return [Lemur::LemurSummaryResponse]
|
|
265
|
+
def summary(transcript_ids: nil, input_text: nil, context: nil, final_model: nil, max_output_size: nil,
|
|
266
|
+
temperature: nil, additional_properties: nil, answer_format: nil, request_options: nil)
|
|
267
|
+
Async do
|
|
268
|
+
response = @request_client.conn.post("/lemur/v3/generate/summary") do |req|
|
|
269
|
+
req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
|
|
270
|
+
req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
|
|
271
|
+
req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
|
|
272
|
+
req.body = {
|
|
273
|
+
**(request_options&.additional_body_parameters || {}),
|
|
274
|
+
transcript_ids: transcript_ids,
|
|
275
|
+
input_text: input_text,
|
|
276
|
+
context: context,
|
|
277
|
+
final_model: final_model,
|
|
278
|
+
max_output_size: max_output_size,
|
|
279
|
+
temperature: temperature,
|
|
280
|
+
additional_properties: additional_properties,
|
|
281
|
+
answer_format: answer_format
|
|
282
|
+
}.compact
|
|
283
|
+
end
|
|
284
|
+
Lemur::LemurSummaryResponse.from_json(json_object: response.body)
|
|
285
|
+
end
|
|
286
|
+
end
|
|
287
|
+
|
|
288
|
+
# Question & Answer allows you to ask free-form questions about a single transcript or a group of transcripts. The questions can be any whose answers you find useful, such as judging whether a caller is likely to become a customer or whether all items on a meeting's agenda were covered.
|
|
289
|
+
#
|
|
290
|
+
# @param transcript_ids [Array<String>] A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.
|
|
291
|
+
# Use either transcript_ids or input_text as input into LeMUR.
|
|
292
|
+
# @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.
|
|
293
|
+
# Use either transcript_ids or input_text as input into LeMUR.
|
|
294
|
+
# @param context [Hash] Context to provide the model. This can be a string or a free-form JSON value.Request of type Lemur::LemurBaseParamsContext, as a Hash
|
|
295
|
+
# @param final_model [LEMUR_MODEL] The model that is used for the final prompt after compression is performed.
|
|
296
|
+
# Defaults to "default".
|
|
297
|
+
# @param max_output_size [Integer] Max output size in tokens, up to 4000
|
|
298
|
+
# @param temperature [Float] The temperature to use for the model.
|
|
299
|
+
# Higher values result in answers that are more creative, lower values are more conservative.
|
|
300
|
+
# Can be any value between 0.0 and 1.0 inclusive.
|
|
301
|
+
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
|
302
|
+
# @param questions [Array<Hash>] A list of questions to askRequest of type Array<Lemur::LemurQuestion>, as a Hash
|
|
303
|
+
# * :question (String)
|
|
304
|
+
# * :context (Hash)
|
|
305
|
+
# * :answer_format (String)
|
|
306
|
+
# * :answer_options (Array<String>)
|
|
307
|
+
# @param request_options [RequestOptions]
|
|
308
|
+
# @return [Lemur::LemurQuestionAnswerResponse]
|
|
309
|
+
def question_answer(questions:, transcript_ids: nil, input_text: nil, context: nil, final_model: nil, max_output_size: nil,
|
|
310
|
+
temperature: nil, additional_properties: nil, request_options: nil)
|
|
311
|
+
Async do
|
|
312
|
+
response = @request_client.conn.post("/lemur/v3/generate/question-answer") do |req|
|
|
313
|
+
req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
|
|
314
|
+
req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
|
|
315
|
+
req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
|
|
316
|
+
req.body = {
|
|
317
|
+
**(request_options&.additional_body_parameters || {}),
|
|
318
|
+
transcript_ids: transcript_ids,
|
|
319
|
+
input_text: input_text,
|
|
320
|
+
context: context,
|
|
321
|
+
final_model: final_model,
|
|
322
|
+
max_output_size: max_output_size,
|
|
323
|
+
temperature: temperature,
|
|
324
|
+
additional_properties: additional_properties,
|
|
325
|
+
questions: questions
|
|
326
|
+
}.compact
|
|
327
|
+
end
|
|
328
|
+
Lemur::LemurQuestionAnswerResponse.from_json(json_object: response.body)
|
|
329
|
+
end
|
|
330
|
+
end
|
|
331
|
+
|
|
332
|
+
# Use LeMUR to generate a list of action items from a transcript
|
|
333
|
+
#
|
|
334
|
+
# @param transcript_ids [Array<String>] A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.
|
|
335
|
+
# Use either transcript_ids or input_text as input into LeMUR.
|
|
336
|
+
# @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.
|
|
337
|
+
# Use either transcript_ids or input_text as input into LeMUR.
|
|
338
|
+
# @param context [Hash] Context to provide the model. This can be a string or a free-form JSON value.Request of type Lemur::LemurBaseParamsContext, as a Hash
|
|
339
|
+
# @param final_model [LEMUR_MODEL] The model that is used for the final prompt after compression is performed.
|
|
340
|
+
# Defaults to "default".
|
|
341
|
+
# @param max_output_size [Integer] Max output size in tokens, up to 4000
|
|
342
|
+
# @param temperature [Float] The temperature to use for the model.
|
|
343
|
+
# Higher values result in answers that are more creative, lower values are more conservative.
|
|
344
|
+
# Can be any value between 0.0 and 1.0 inclusive.
|
|
345
|
+
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
|
346
|
+
# @param answer_format [String] How you want the action items to be returned. This can be any text.
|
|
347
|
+
# Defaults to "Bullet Points".
|
|
348
|
+
# @param request_options [RequestOptions]
|
|
349
|
+
# @return [Lemur::LemurActionItemsResponse]
|
|
350
|
+
def action_items(transcript_ids: nil, input_text: nil, context: nil, final_model: nil, max_output_size: nil,
|
|
351
|
+
temperature: nil, additional_properties: nil, answer_format: nil, request_options: nil)
|
|
352
|
+
Async do
|
|
353
|
+
response = @request_client.conn.post("/lemur/v3/generate/action-items") do |req|
|
|
354
|
+
req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
|
|
355
|
+
req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
|
|
356
|
+
req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
|
|
357
|
+
req.body = {
|
|
358
|
+
**(request_options&.additional_body_parameters || {}),
|
|
359
|
+
transcript_ids: transcript_ids,
|
|
360
|
+
input_text: input_text,
|
|
361
|
+
context: context,
|
|
362
|
+
final_model: final_model,
|
|
363
|
+
max_output_size: max_output_size,
|
|
364
|
+
temperature: temperature,
|
|
365
|
+
additional_properties: additional_properties,
|
|
366
|
+
answer_format: answer_format
|
|
367
|
+
}.compact
|
|
368
|
+
end
|
|
369
|
+
Lemur::LemurActionItemsResponse.from_json(json_object: response.body)
|
|
370
|
+
end
|
|
371
|
+
end
|
|
372
|
+
|
|
373
|
+
# Delete the data for a previously submitted LeMUR request.
|
|
374
|
+
# The LLM response data, as well as any context provided in the original request will be removed.
|
|
375
|
+
#
|
|
376
|
+
# @param request_id [String] The ID of the LeMUR request whose data you want to delete. This would be found in the response of the original request.
|
|
377
|
+
# @param request_options [RequestOptions]
|
|
378
|
+
# @return [Lemur::PurgeLemurRequestDataResponse]
|
|
379
|
+
def purge_request_data(request_id:, request_options: nil)
|
|
380
|
+
Async do
|
|
381
|
+
response = @request_client.conn.delete("/lemur/v3/#{request_id}") do |req|
|
|
382
|
+
req.options.timeout = request_options.timeout_in_seconds unless request_options&.timeout_in_seconds.nil?
|
|
383
|
+
req.headers["Authorization"] = request_options.api_key unless request_options&.api_key.nil?
|
|
384
|
+
req.headers = { **req.headers, **(request_options&.additional_headers || {}) }.compact
|
|
385
|
+
end
|
|
386
|
+
Lemur::PurgeLemurRequestDataResponse.from_json(json_object: response.body)
|
|
387
|
+
end
|
|
388
|
+
end
|
|
389
|
+
end
|
|
390
|
+
end
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "json"
|
|
4
|
+
|
|
5
|
+
module AssemblyAI
|
|
6
|
+
class Lemur
|
|
7
|
+
class LemurActionItemsResponse
|
|
8
|
+
attr_reader :response, :request_id, :additional_properties
|
|
9
|
+
|
|
10
|
+
# @param response [String] The response generated by LeMUR
|
|
11
|
+
# @param request_id [String] The ID of the LeMUR request
|
|
12
|
+
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
|
13
|
+
# @return [Lemur::LemurActionItemsResponse]
|
|
14
|
+
def initialize(response:, request_id:, additional_properties: nil)
|
|
15
|
+
# @type [String] The response generated by LeMUR
|
|
16
|
+
@response = response
|
|
17
|
+
# @type [String] The ID of the LeMUR request
|
|
18
|
+
@request_id = request_id
|
|
19
|
+
# @type [OpenStruct] Additional properties unmapped to the current class definition
|
|
20
|
+
@additional_properties = additional_properties
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
# Deserialize a JSON object to an instance of LemurActionItemsResponse
|
|
24
|
+
#
|
|
25
|
+
# @param json_object [JSON]
|
|
26
|
+
# @return [Lemur::LemurActionItemsResponse]
|
|
27
|
+
def self.from_json(json_object:)
|
|
28
|
+
struct = JSON.parse(json_object, object_class: OpenStruct)
|
|
29
|
+
JSON.parse(json_object)
|
|
30
|
+
response = struct.response
|
|
31
|
+
request_id = struct.request_id
|
|
32
|
+
new(response: response, request_id: request_id, additional_properties: struct)
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
# Serialize an instance of LemurActionItemsResponse to a JSON object
|
|
36
|
+
#
|
|
37
|
+
# @return [JSON]
|
|
38
|
+
def to_json(*_args)
|
|
39
|
+
{ "response": @response, "request_id": @request_id }.to_json
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
# Leveraged for Union-type generation, validate_raw attempts to parse the given hash and check each fields type against the current object's property definitions.
|
|
43
|
+
#
|
|
44
|
+
# @param obj [Object]
|
|
45
|
+
# @return [Void]
|
|
46
|
+
def self.validate_raw(obj:)
|
|
47
|
+
obj.response.is_a?(String) != false || raise("Passed value for field obj.response is not the expected type, validation failed.")
|
|
48
|
+
obj.request_id.is_a?(String) != false || raise("Passed value for field obj.request_id is not the expected type, validation failed.")
|
|
49
|
+
end
|
|
50
|
+
end
|
|
51
|
+
end
|
|
52
|
+
end
|
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative "lemur_base_params_context"
|
|
4
|
+
require_relative "lemur_model"
|
|
5
|
+
require "json"
|
|
6
|
+
|
|
7
|
+
module AssemblyAI
|
|
8
|
+
class Lemur
|
|
9
|
+
class LemurBaseParams
|
|
10
|
+
attr_reader :transcript_ids, :input_text, :context, :final_model, :max_output_size, :temperature,
|
|
11
|
+
:additional_properties
|
|
12
|
+
|
|
13
|
+
# @param transcript_ids [Array<String>] A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.
|
|
14
|
+
# Use either transcript_ids or input_text as input into LeMUR.
|
|
15
|
+
# @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.
|
|
16
|
+
# Use either transcript_ids or input_text as input into LeMUR.
|
|
17
|
+
# @param context [Lemur::LemurBaseParamsContext] Context to provide the model. This can be a string or a free-form JSON value.
|
|
18
|
+
# @param final_model [LEMUR_MODEL] The model that is used for the final prompt after compression is performed.
|
|
19
|
+
# Defaults to "default".
|
|
20
|
+
# @param max_output_size [Integer] Max output size in tokens, up to 4000
|
|
21
|
+
# @param temperature [Float] The temperature to use for the model.
|
|
22
|
+
# Higher values result in answers that are more creative, lower values are more conservative.
|
|
23
|
+
# Can be any value between 0.0 and 1.0 inclusive.
|
|
24
|
+
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
|
|
25
|
+
# @return [Lemur::LemurBaseParams]
|
|
26
|
+
def initialize(transcript_ids: nil, input_text: nil, context: nil, final_model: nil, max_output_size: nil,
|
|
27
|
+
temperature: nil, additional_properties: nil)
|
|
28
|
+
# @type [Array<String>] A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower.
|
|
29
|
+
# Use either transcript_ids or input_text as input into LeMUR.
|
|
30
|
+
@transcript_ids = transcript_ids
|
|
31
|
+
# @type [String] Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.
|
|
32
|
+
# Use either transcript_ids or input_text as input into LeMUR.
|
|
33
|
+
@input_text = input_text
|
|
34
|
+
# @type [Lemur::LemurBaseParamsContext] Context to provide the model. This can be a string or a free-form JSON value.
|
|
35
|
+
@context = context
|
|
36
|
+
# @type [LEMUR_MODEL] The model that is used for the final prompt after compression is performed.
|
|
37
|
+
# Defaults to "default".
|
|
38
|
+
@final_model = final_model
|
|
39
|
+
# @type [Integer] Max output size in tokens, up to 4000
|
|
40
|
+
@max_output_size = max_output_size
|
|
41
|
+
# @type [Float] The temperature to use for the model.
|
|
42
|
+
# Higher values result in answers that are more creative, lower values are more conservative.
|
|
43
|
+
# Can be any value between 0.0 and 1.0 inclusive.
|
|
44
|
+
@temperature = temperature
|
|
45
|
+
# @type [OpenStruct] Additional properties unmapped to the current class definition
|
|
46
|
+
@additional_properties = additional_properties
|
|
47
|
+
end
|
|
48
|
+
|
|
49
|
+
# Deserialize a JSON object to an instance of LemurBaseParams
|
|
50
|
+
#
|
|
51
|
+
# @param json_object [JSON]
|
|
52
|
+
# @return [Lemur::LemurBaseParams]
|
|
53
|
+
def self.from_json(json_object:)
|
|
54
|
+
struct = JSON.parse(json_object, object_class: OpenStruct)
|
|
55
|
+
parsed_json = JSON.parse(json_object)
|
|
56
|
+
transcript_ids = struct.transcript_ids
|
|
57
|
+
input_text = struct.input_text
|
|
58
|
+
if parsed_json["context"].nil?
|
|
59
|
+
context = nil
|
|
60
|
+
else
|
|
61
|
+
context = parsed_json["context"].to_json
|
|
62
|
+
context = Lemur::LemurBaseParamsContext.from_json(json_object: context)
|
|
63
|
+
end
|
|
64
|
+
final_model = Lemur::LEMUR_MODEL.key(parsed_json["final_model"]) || parsed_json["final_model"]
|
|
65
|
+
max_output_size = struct.max_output_size
|
|
66
|
+
temperature = struct.temperature
|
|
67
|
+
new(transcript_ids: transcript_ids, input_text: input_text, context: context, final_model: final_model,
|
|
68
|
+
max_output_size: max_output_size, temperature: temperature, additional_properties: struct)
|
|
69
|
+
end
|
|
70
|
+
|
|
71
|
+
# Serialize an instance of LemurBaseParams to a JSON object
|
|
72
|
+
#
|
|
73
|
+
# @return [JSON]
|
|
74
|
+
def to_json(*_args)
|
|
75
|
+
{
|
|
76
|
+
"transcript_ids": @transcript_ids,
|
|
77
|
+
"input_text": @input_text,
|
|
78
|
+
"context": @context,
|
|
79
|
+
"final_model": Lemur::LEMUR_MODEL[@final_model] || @final_model,
|
|
80
|
+
"max_output_size": @max_output_size,
|
|
81
|
+
"temperature": @temperature
|
|
82
|
+
}.to_json
|
|
83
|
+
end
|
|
84
|
+
|
|
85
|
+
# Leveraged for Union-type generation, validate_raw attempts to parse the given hash and check each fields type against the current object's property definitions.
|
|
86
|
+
#
|
|
87
|
+
# @param obj [Object]
|
|
88
|
+
# @return [Void]
|
|
89
|
+
def self.validate_raw(obj:)
|
|
90
|
+
obj.transcript_ids&.is_a?(Array) != false || raise("Passed value for field obj.transcript_ids is not the expected type, validation failed.")
|
|
91
|
+
obj.input_text&.is_a?(String) != false || raise("Passed value for field obj.input_text is not the expected type, validation failed.")
|
|
92
|
+
obj.context.nil? || Lemur::LemurBaseParamsContext.validate_raw(obj: obj.context)
|
|
93
|
+
obj.final_model&.is_a?(Lemur::LEMUR_MODEL) != false || raise("Passed value for field obj.final_model is not the expected type, validation failed.")
|
|
94
|
+
obj.max_output_size&.is_a?(Integer) != false || raise("Passed value for field obj.max_output_size is not the expected type, validation failed.")
|
|
95
|
+
obj.temperature&.is_a?(Float) != false || raise("Passed value for field obj.temperature is not the expected type, validation failed.")
|
|
96
|
+
end
|
|
97
|
+
end
|
|
98
|
+
end
|
|
99
|
+
end
|