assemblyai 1.0.0.pre.beta.14 → 1.0.0.pre.beta.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 5ae87cdf281a98b3212969920386369475357ec4d885884dec3801fcb7365cee
|
|
4
|
+
data.tar.gz: 8b887200cc676f11d6f33468bfb3b0b019d256e9305cd2fe656412cbcc39b068
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: da259645f0d5a208bfbb99a7e28505e7d19fbabcdf53b0f34467cb7f1b598188cfb74010e68a1250a7b687924e787ccae99e8b6b0553d583398fba37cf73e9d7
|
|
7
|
+
data.tar.gz: 9c100ade246d673db73c9cefb7c110a4ab80b29f4901dfdcba172d41af787643f27f38d979916e9f597135dc35f72b61251337099c339d5afd4f7da36d1628d9
|
|
@@ -33,7 +33,6 @@ module AssemblyAI
|
|
|
33
33
|
# Use either transcript_ids or input_text as input into LeMUR.
|
|
34
34
|
# @param context [String, Hash{String => Object}] Context to provide the model. This can be a string or a free-form JSON value.
|
|
35
35
|
# @param final_model [AssemblyAI::Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
|
|
36
|
-
# Defaults to "default".
|
|
37
36
|
# @param max_output_size [Integer] Max output size in tokens, up to 4000
|
|
38
37
|
# @param temperature [Float] The temperature to use for the model.
|
|
39
38
|
# Higher values result in answers that are more creative, lower values are more
|
|
@@ -84,7 +83,6 @@ module AssemblyAI
|
|
|
84
83
|
# Use either transcript_ids or input_text as input into LeMUR.
|
|
85
84
|
# @param context [String, Hash{String => Object}] Context to provide the model. This can be a string or a free-form JSON value.
|
|
86
85
|
# @param final_model [AssemblyAI::Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
|
|
87
|
-
# Defaults to "default".
|
|
88
86
|
# @param max_output_size [Integer] Max output size in tokens, up to 4000
|
|
89
87
|
# @param temperature [Float] The temperature to use for the model.
|
|
90
88
|
# Higher values result in answers that are more creative, lower values are more
|
|
@@ -136,7 +134,6 @@ module AssemblyAI
|
|
|
136
134
|
# Use either transcript_ids or input_text as input into LeMUR.
|
|
137
135
|
# @param context [String, Hash{String => Object}] Context to provide the model. This can be a string or a free-form JSON value.
|
|
138
136
|
# @param final_model [AssemblyAI::Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
|
|
139
|
-
# Defaults to "default".
|
|
140
137
|
# @param max_output_size [Integer] Max output size in tokens, up to 4000
|
|
141
138
|
# @param temperature [Float] The temperature to use for the model.
|
|
142
139
|
# Higher values result in answers that are more creative, lower values are more
|
|
@@ -187,7 +184,6 @@ module AssemblyAI
|
|
|
187
184
|
# Use either transcript_ids or input_text as input into LeMUR.
|
|
188
185
|
# @param context [String, Hash{String => Object}] Context to provide the model. This can be a string or a free-form JSON value.
|
|
189
186
|
# @param final_model [AssemblyAI::Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
|
|
190
|
-
# Defaults to "default".
|
|
191
187
|
# @param max_output_size [Integer] Max output size in tokens, up to 4000
|
|
192
188
|
# @param temperature [Float] The temperature to use for the model.
|
|
193
189
|
# Higher values result in answers that are more creative, lower values are more
|
|
@@ -294,7 +290,6 @@ module AssemblyAI
|
|
|
294
290
|
# Use either transcript_ids or input_text as input into LeMUR.
|
|
295
291
|
# @param context [String, Hash{String => Object}] Context to provide the model. This can be a string or a free-form JSON value.
|
|
296
292
|
# @param final_model [AssemblyAI::Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
|
|
297
|
-
# Defaults to "default".
|
|
298
293
|
# @param max_output_size [Integer] Max output size in tokens, up to 4000
|
|
299
294
|
# @param temperature [Float] The temperature to use for the model.
|
|
300
295
|
# Higher values result in answers that are more creative, lower values are more
|
|
@@ -347,7 +342,6 @@ module AssemblyAI
|
|
|
347
342
|
# Use either transcript_ids or input_text as input into LeMUR.
|
|
348
343
|
# @param context [String, Hash{String => Object}] Context to provide the model. This can be a string or a free-form JSON value.
|
|
349
344
|
# @param final_model [AssemblyAI::Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
|
|
350
|
-
# Defaults to "default".
|
|
351
345
|
# @param max_output_size [Integer] Max output size in tokens, up to 4000
|
|
352
346
|
# @param temperature [Float] The temperature to use for the model.
|
|
353
347
|
# Higher values result in answers that are more creative, lower values are more
|
|
@@ -401,7 +395,6 @@ module AssemblyAI
|
|
|
401
395
|
# Use either transcript_ids or input_text as input into LeMUR.
|
|
402
396
|
# @param context [String, Hash{String => Object}] Context to provide the model. This can be a string or a free-form JSON value.
|
|
403
397
|
# @param final_model [AssemblyAI::Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
|
|
404
|
-
# Defaults to "default".
|
|
405
398
|
# @param max_output_size [Integer] Max output size in tokens, up to 4000
|
|
406
399
|
# @param temperature [Float] The temperature to use for the model.
|
|
407
400
|
# Higher values result in answers that are more creative, lower values are more
|
|
@@ -454,7 +447,6 @@ module AssemblyAI
|
|
|
454
447
|
# Use either transcript_ids or input_text as input into LeMUR.
|
|
455
448
|
# @param context [String, Hash{String => Object}] Context to provide the model. This can be a string or a free-form JSON value.
|
|
456
449
|
# @param final_model [AssemblyAI::Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
|
|
457
|
-
# Defaults to "default".
|
|
458
450
|
# @param max_output_size [Integer] Max output size in tokens, up to 4000
|
|
459
451
|
# @param temperature [Float] The temperature to use for the model.
|
|
460
452
|
# Higher values result in answers that are more creative, lower values are more
|
|
@@ -19,7 +19,6 @@ module AssemblyAI
|
|
|
19
19
|
# @return [AssemblyAI::Lemur::LemurBaseParamsContext] Context to provide the model. This can be a string or a free-form JSON value.
|
|
20
20
|
attr_reader :context
|
|
21
21
|
# @return [AssemblyAI::Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
|
|
22
|
-
# Defaults to "default".
|
|
23
22
|
attr_reader :final_model
|
|
24
23
|
# @return [Integer] Max output size in tokens, up to 4000
|
|
25
24
|
attr_reader :max_output_size
|
|
@@ -44,7 +43,6 @@ module AssemblyAI
|
|
|
44
43
|
# Use either transcript_ids or input_text as input into LeMUR.
|
|
45
44
|
# @param context [AssemblyAI::Lemur::LemurBaseParamsContext] Context to provide the model. This can be a string or a free-form JSON value.
|
|
46
45
|
# @param final_model [AssemblyAI::Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
|
|
47
|
-
# Defaults to "default".
|
|
48
46
|
# @param max_output_size [Integer] Max output size in tokens, up to 4000
|
|
49
47
|
# @param temperature [Float] The temperature to use for the model.
|
|
50
48
|
# Higher values result in answers that are more creative, lower values are more
|
|
@@ -4,10 +4,16 @@ module AssemblyAI
|
|
|
4
4
|
class Lemur
|
|
5
5
|
# The model that is used for the final prompt after compression is performed.
|
|
6
6
|
class LemurModel
|
|
7
|
+
ANTHROPIC_CLAUDE3_5_SONNET = "anthropic/claude-3-5-sonnet"
|
|
8
|
+
ANTHROPIC_CLAUDE3_OPUS = "anthropic/claude-3-opus"
|
|
9
|
+
ANTHROPIC_CLAUDE3_HAIKU = "anthropic/claude-3-haiku"
|
|
10
|
+
ANTHROPIC_CLAUDE3_SONNET = "anthropic/claude-3-sonnet"
|
|
11
|
+
ANTHROPIC_CLAUDE2_1 = "anthropic/claude-2-1"
|
|
12
|
+
ANTHROPIC_CLAUDE2 = "anthropic/claude-2"
|
|
7
13
|
DEFAULT = "default"
|
|
14
|
+
ANTHROPIC_CLAUDE_INSTANT1_2 = "anthropic/claude-instant-1-2"
|
|
8
15
|
BASIC = "basic"
|
|
9
16
|
ASSEMBLYAI_MISTRAL7B = "assemblyai/mistral-7b"
|
|
10
|
-
ANTHROPIC_CLAUDE2_1 = "anthropic/claude-2-1"
|
|
11
17
|
end
|
|
12
18
|
end
|
|
13
19
|
end
|
data/lib/gemconfig.rb
CHANGED
metadata
CHANGED
|
@@ -1,14 +1,14 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: assemblyai
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 1.0.0.pre.beta.
|
|
4
|
+
version: 1.0.0.pre.beta.15
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- ''
|
|
8
8
|
autorequire:
|
|
9
9
|
bindir: exe
|
|
10
10
|
cert_chain: []
|
|
11
|
-
date: 2024-07-
|
|
11
|
+
date: 2024-07-10 00:00:00.000000000 Z
|
|
12
12
|
dependencies:
|
|
13
13
|
- !ruby/object:Gem::Dependency
|
|
14
14
|
name: async-http-faraday
|