ruby_llm 0.1.0.pre27 → 0.1.0.pre28
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.rubocop.yml +0 -1
- data/lib/ruby_llm/configuration.rb +1 -1
- data/lib/ruby_llm/model_capabilities/deepseek.rb +50 -11
- data/lib/ruby_llm/model_capabilities/gemini.rb +87 -29
- data/lib/ruby_llm/model_capabilities/openai.rb +75 -24
- data/lib/ruby_llm/models.json +1169 -1169
- data/lib/ruby_llm/models.rb +1 -1
- data/lib/ruby_llm/version.rb +1 -1
- data/lib/tasks/models.rake +164 -2
- data/ruby_llm.gemspec +1 -1
- metadata +16 -16
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 467d0db634f70567417e20d87791372e39276e18fa4cd145cc065925b7379e7f
|
4
|
+
data.tar.gz: 70f3e92280366104d53446891ae645183706df5d8d384799c83299a43142e2eb
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 06e6021199954eace1c88e984f052d1d2644a9df3905280f9903fbd7ecfdf3b3bf904d3078c34982aeed43e15a1871946e1a9ad174ba621c7dcf3ce64ab190b5
|
7
|
+
data.tar.gz: 6697cd297ed9805114c433bf7610d4d608dce919b5ee3d99d1cec135d329640d607f64d3282ace206cb176f30390defc9c14ced6842f659033d96c04e7853dbe
|
data/.rubocop.yml
CHANGED
@@ -6,6 +6,9 @@ module RubyLLM
|
|
6
6
|
module DeepSeek
|
7
7
|
module_function
|
8
8
|
|
9
|
+
# Returns the context window size for the given model
|
10
|
+
# @param model_id [String] the model identifier
|
11
|
+
# @return [Integer] the context window size in tokens
|
9
12
|
def context_window_for(model_id)
|
10
13
|
case model_id
|
11
14
|
when /deepseek-(?:chat|reasoner)/ then 64_000
|
@@ -13,34 +16,61 @@ module RubyLLM
|
|
13
16
|
end
|
14
17
|
end
|
15
18
|
|
16
|
-
|
17
|
-
|
19
|
+
# Returns the maximum output tokens for the given model
|
20
|
+
# @param model_id [String] the model identifier
|
21
|
+
# @return [Integer] the maximum output tokens
|
22
|
+
def max_tokens_for(model_id)
|
23
|
+
case model_id
|
24
|
+
when /deepseek-(?:chat|reasoner)/ then 8_192
|
25
|
+
else 4_096 # Default if max_tokens not specified
|
26
|
+
end
|
18
27
|
end
|
19
28
|
|
29
|
+
# Returns the input price per million tokens for the given model
|
30
|
+
# @param model_id [String] the model identifier
|
31
|
+
# @return [Float] the price per million tokens
|
20
32
|
def input_price_for(model_id)
|
21
33
|
PRICES.dig(model_family(model_id), :input_miss) || default_input_price
|
22
34
|
end
|
23
35
|
|
36
|
+
# Returns the output price per million tokens for the given model
|
37
|
+
# @param model_id [String] the model identifier
|
38
|
+
# @return [Float] the price per million tokens
|
24
39
|
def output_price_for(model_id)
|
25
40
|
PRICES.dig(model_family(model_id), :output) || default_output_price
|
26
41
|
end
|
27
42
|
|
43
|
+
# Returns the cache hit price per million tokens for the given model
|
44
|
+
# @param model_id [String] the model identifier
|
45
|
+
# @return [Float] the price per million tokens
|
28
46
|
def cache_hit_price_for(model_id)
|
29
47
|
PRICES.dig(model_family(model_id), :input_hit) || default_cache_hit_price
|
30
48
|
end
|
31
49
|
|
50
|
+
# Determines if the model supports vision capabilities
|
51
|
+
# @param model_id [String] the model identifier
|
52
|
+
# @return [Boolean] true if the model supports vision
|
32
53
|
def supports_vision?(_model_id)
|
33
|
-
|
54
|
+
false # DeepSeek models don't currently support vision
|
34
55
|
end
|
35
56
|
|
36
|
-
|
37
|
-
|
57
|
+
# Determines if the model supports function calling
|
58
|
+
# @param model_id [String] the model identifier
|
59
|
+
# @return [Boolean] true if the model supports function calling
|
60
|
+
def supports_functions?(model_id)
|
61
|
+
model_id.match?(/deepseek-chat/) # Only deepseek-chat supports function calling
|
38
62
|
end
|
39
63
|
|
40
|
-
|
41
|
-
|
64
|
+
# Determines if the model supports JSON mode
|
65
|
+
# @param model_id [String] the model identifier
|
66
|
+
# @return [Boolean] true if the model supports JSON mode
|
67
|
+
def supports_json_mode?(model_id)
|
68
|
+
model_id.match?(/deepseek-chat/) # Only deepseek-chat supports JSON mode
|
42
69
|
end
|
43
70
|
|
71
|
+
# Formats the model ID into a display name
|
72
|
+
# @param model_id [String] the model identifier
|
73
|
+
# @return [String] the formatted display name
|
44
74
|
def format_display_name(model_id)
|
45
75
|
case model_id
|
46
76
|
when 'deepseek-chat' then 'DeepSeek V3'
|
@@ -52,18 +82,25 @@ module RubyLLM
|
|
52
82
|
end
|
53
83
|
end
|
54
84
|
|
85
|
+
# Returns the model type
|
86
|
+
# @param model_id [String] the model identifier
|
87
|
+
# @return [String] the model type
|
55
88
|
def model_type(_model_id)
|
56
|
-
'chat' #
|
89
|
+
'chat' # All DeepSeek models are chat models
|
57
90
|
end
|
58
91
|
|
92
|
+
# Returns the model family for pricing purposes
|
93
|
+
# @param model_id [String] the model identifier
|
94
|
+
# @return [String] the model family identifier
|
59
95
|
def model_family(model_id)
|
60
96
|
case model_id
|
61
|
-
when /deepseek-chat/ then
|
62
|
-
when /deepseek-reasoner/ then
|
63
|
-
else
|
97
|
+
when /deepseek-chat/ then :chat
|
98
|
+
when /deepseek-reasoner/ then :reasoner
|
99
|
+
else :chat # Default to chat family
|
64
100
|
end
|
65
101
|
end
|
66
102
|
|
103
|
+
# Pricing information for DeepSeek models (USD per 1M tokens)
|
67
104
|
PRICES = {
|
68
105
|
chat: {
|
69
106
|
input_hit: 0.07, # $0.07 per million tokens on cache hit
|
@@ -77,6 +114,8 @@ module RubyLLM
|
|
77
114
|
}
|
78
115
|
}.freeze
|
79
116
|
|
117
|
+
private
|
118
|
+
|
80
119
|
def default_input_price
|
81
120
|
0.27 # Default to chat cache miss price
|
82
121
|
end
|
@@ -4,14 +4,14 @@ module RubyLLM
|
|
4
4
|
module ModelCapabilities
|
5
5
|
# Determines capabilities and pricing for Google Gemini models
|
6
6
|
module Gemini # rubocop:disable Metrics/ModuleLength
|
7
|
-
|
7
|
+
extend self
|
8
8
|
|
9
9
|
def context_window_for(model_id)
|
10
10
|
case model_id
|
11
|
-
when /gemini-2\.0-flash/ then 1_048_576
|
11
|
+
when /gemini-2\.0-flash/, /gemini-1\.5-flash/ then 1_048_576
|
12
12
|
when /gemini-1\.5-pro/ then 2_097_152
|
13
|
-
when /gemini-1\.5/ then 1_048_576
|
14
13
|
when /text-embedding/, /embedding-001/ then 2_048
|
14
|
+
when /aqa/ then 7_168
|
15
15
|
else 32_768 # Sensible default for unknown models
|
16
16
|
end
|
17
17
|
end
|
@@ -26,45 +26,45 @@ module RubyLLM
|
|
26
26
|
end
|
27
27
|
|
28
28
|
def input_price_for(model_id)
|
29
|
-
PRICES.dig(pricing_family(model_id), :input) || default_input_price
|
29
|
+
base_price = PRICES.dig(pricing_family(model_id), :input) || default_input_price
|
30
|
+
return base_price unless long_context_model?(model_id)
|
31
|
+
|
32
|
+
# Double the price for prompts longer than 128k tokens
|
33
|
+
context_length(model_id) > 128_000 ? base_price * 2 : base_price
|
30
34
|
end
|
31
35
|
|
32
36
|
def output_price_for(model_id)
|
33
|
-
PRICES.dig(pricing_family(model_id), :output) || default_output_price
|
37
|
+
base_price = PRICES.dig(pricing_family(model_id), :output) || default_output_price
|
38
|
+
return base_price unless long_context_model?(model_id)
|
39
|
+
|
40
|
+
# Double the price for prompts longer than 128k tokens
|
41
|
+
context_length(model_id) > 128_000 ? base_price * 2 : base_price
|
34
42
|
end
|
35
43
|
|
36
44
|
def supports_vision?(model_id)
|
37
45
|
return false if model_id.match?(/text-embedding|embedding-001|aqa/)
|
38
|
-
return false if model_id.match?(/
|
39
|
-
return false if model_id.match?(/imagen/)
|
46
|
+
return false if model_id.match?(/gemini-1\.0/)
|
40
47
|
|
41
|
-
|
42
|
-
model_id.match?(/gemini-[12]\.(?:5|0)-(?:pro|flash)(?!-lite)/)
|
48
|
+
model_id.match?(/gemini-[12]\.[05]/)
|
43
49
|
end
|
44
50
|
|
45
51
|
def supports_functions?(model_id)
|
46
52
|
return false if model_id.match?(/text-embedding|embedding-001|aqa/)
|
47
|
-
return false if model_id.match?(/imagen/)
|
48
53
|
return false if model_id.match?(/flash-lite/)
|
49
|
-
return false if model_id.match?(/
|
54
|
+
return false if model_id.match?(/gemini-1\.0/)
|
50
55
|
|
51
|
-
|
52
|
-
model_id.match?(/gemini-[12]\.(?:5|0)-(?:pro|flash)(?!-lite)/)
|
56
|
+
model_id.match?(/gemini-[12]\.[05]-(?:pro|flash)(?!-lite)/)
|
53
57
|
end
|
54
58
|
|
55
59
|
def supports_json_mode?(model_id)
|
56
60
|
return false if model_id.match?(/text-embedding|embedding-001|aqa/)
|
57
|
-
return false if model_id.match?(/imagen/)
|
58
61
|
return false if model_id.match?(/flash-lite/)
|
59
|
-
return false if model_id.match?(/
|
62
|
+
return false if model_id.match?(/gemini-1\.0/)
|
60
63
|
|
61
|
-
|
62
|
-
model_id.match?(/gemini-[12]\.(?:5|0)-(?:pro|flash)(?!-lite)/)
|
64
|
+
model_id.match?(/gemini-1\.5/)
|
63
65
|
end
|
64
66
|
|
65
67
|
def format_display_name(model_id)
|
66
|
-
return model_id unless model_id.start_with?('models/')
|
67
|
-
|
68
68
|
model_id
|
69
69
|
.delete_prefix('models/')
|
70
70
|
.split('-')
|
@@ -72,14 +72,28 @@ module RubyLLM
|
|
72
72
|
.join(' ')
|
73
73
|
.gsub(/(\d+\.\d+)/, ' \1') # Add space before version numbers
|
74
74
|
.gsub(/\s+/, ' ') # Clean up multiple spaces
|
75
|
+
.gsub(/Aqa/, 'AQA') # Special case for AQA
|
75
76
|
.strip
|
76
77
|
end
|
77
78
|
|
79
|
+
def supports_caching?(model_id)
|
80
|
+
return false if model_id.match?(/flash-lite|gemini-1\.0/)
|
81
|
+
|
82
|
+
model_id.match?(/gemini-[12]\.[05]/)
|
83
|
+
end
|
84
|
+
|
85
|
+
def supports_tuning?(model_id)
|
86
|
+
model_id.match?(/gemini-1\.5-flash/)
|
87
|
+
end
|
88
|
+
|
89
|
+
def supports_audio?(model_id)
|
90
|
+
model_id.match?(/gemini-[12]\.[05]/)
|
91
|
+
end
|
92
|
+
|
78
93
|
def model_type(model_id)
|
79
94
|
case model_id
|
80
95
|
when /text-embedding|embedding/ then 'embedding'
|
81
96
|
when /imagen/ then 'image'
|
82
|
-
when /bison|text-bison/ then 'legacy'
|
83
97
|
else 'chat'
|
84
98
|
end
|
85
99
|
end
|
@@ -91,33 +105,77 @@ module RubyLLM
|
|
91
105
|
when /gemini-1\.5-flash-8b/ then 'gemini15_flash_8b'
|
92
106
|
when /gemini-1\.5-flash/ then 'gemini15_flash'
|
93
107
|
when /gemini-1\.5-pro/ then 'gemini15_pro'
|
108
|
+
when /gemini-1\.0-pro/ then 'gemini10_pro'
|
94
109
|
when /text-embedding-004/ then 'embedding4'
|
95
110
|
when /embedding-001/ then 'embedding1'
|
96
|
-
when /
|
97
|
-
when /imagen/ then 'imagen3'
|
111
|
+
when /aqa/ then 'aqa'
|
98
112
|
else 'other'
|
99
113
|
end
|
100
114
|
end
|
101
115
|
|
102
|
-
def pricing_family(model_id)
|
116
|
+
def pricing_family(model_id) # rubocop:disable Metrics/CyclomaticComplexity
|
103
117
|
case model_id
|
104
118
|
when /gemini-2\.0-flash-lite/ then :flash_lite_2 # rubocop:disable Naming/VariableNumber
|
105
119
|
when /gemini-2\.0-flash/ then :flash_2 # rubocop:disable Naming/VariableNumber
|
106
120
|
when /gemini-1\.5-flash-8b/ then :flash_8b
|
107
121
|
when /gemini-1\.5-flash/ then :flash
|
108
122
|
when /gemini-1\.5-pro/ then :pro
|
123
|
+
when /gemini-1\.0-pro/ then :pro_1_0 # rubocop:disable Naming/VariableNumber
|
109
124
|
when /text-embedding|embedding/ then :embedding
|
110
125
|
else :base
|
111
126
|
end
|
112
127
|
end
|
113
128
|
|
129
|
+
private
|
130
|
+
|
131
|
+
def long_context_model?(model_id)
|
132
|
+
model_id.match?(/gemini-1\.5-(?:pro|flash)/)
|
133
|
+
end
|
134
|
+
|
135
|
+
def context_length(model_id)
|
136
|
+
context_window_for(model_id)
|
137
|
+
end
|
138
|
+
|
114
139
|
PRICES = {
|
115
|
-
flash_2: {
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
140
|
+
flash_2: { # Gemini 2.0 Flash # rubocop:disable Naming/VariableNumber
|
141
|
+
input: 0.10,
|
142
|
+
output: 0.40,
|
143
|
+
audio_input: 0.70,
|
144
|
+
cache: 0.025,
|
145
|
+
cache_storage: 1.00
|
146
|
+
},
|
147
|
+
flash_lite_2: { # Gemini 2.0 Flash Lite # rubocop:disable Naming/VariableNumber
|
148
|
+
input: 0.075,
|
149
|
+
output: 0.30,
|
150
|
+
cache: 0.01875,
|
151
|
+
cache_storage: 1.00
|
152
|
+
},
|
153
|
+
flash: { # Gemini 1.5 Flash
|
154
|
+
input: 0.075,
|
155
|
+
output: 0.30,
|
156
|
+
cache: 0.01875,
|
157
|
+
cache_storage: 1.00
|
158
|
+
},
|
159
|
+
flash_8b: { # Gemini 1.5 Flash 8B
|
160
|
+
input: 0.0375,
|
161
|
+
output: 0.15,
|
162
|
+
cache: 0.01,
|
163
|
+
cache_storage: 0.25
|
164
|
+
},
|
165
|
+
pro: { # Gemini 1.5 Pro
|
166
|
+
input: 1.25,
|
167
|
+
output: 5.0,
|
168
|
+
cache: 0.3125,
|
169
|
+
cache_storage: 4.50
|
170
|
+
},
|
171
|
+
pro_1_0: { # Gemini 1.0 Pro # rubocop:disable Naming/VariableNumber
|
172
|
+
input: 0.50,
|
173
|
+
output: 1.50
|
174
|
+
},
|
175
|
+
embedding: { # Text Embedding models
|
176
|
+
input: 0.00,
|
177
|
+
output: 0.00
|
178
|
+
}
|
121
179
|
}.freeze
|
122
180
|
|
123
181
|
def default_input_price
|
@@ -8,23 +8,26 @@ module RubyLLM
|
|
8
8
|
|
9
9
|
def context_window_for(model_id)
|
10
10
|
case model_id
|
11
|
-
when /
|
12
|
-
when /
|
13
|
-
when /gpt-
|
14
|
-
when /gpt-3
|
15
|
-
|
11
|
+
when /o[13]-mini/, /o3-mini-2025/ then 200_000
|
12
|
+
when /o1-2024/ then 200_000
|
13
|
+
when /gpt-4o/, /gpt-4-turbo/ then 128_000
|
14
|
+
when /gpt-4-0[0-9]{3}/ then 8_192
|
15
|
+
when /gpt-3.5-turbo-instruct/ then 4_096
|
16
|
+
when /gpt-3.5/ then 16_385
|
17
|
+
else 4_096
|
16
18
|
end
|
17
19
|
end
|
18
20
|
|
19
|
-
def max_tokens_for(model_id)
|
21
|
+
def max_tokens_for(model_id) # rubocop:disable Metrics/CyclomaticComplexity
|
20
22
|
case model_id
|
21
|
-
when /o1-2024-
|
22
|
-
when /o1-mini-2024
|
23
|
-
when /
|
24
|
-
when /gpt-4o/, /gpt-
|
25
|
-
when /gpt-
|
26
|
-
when /gpt-3
|
27
|
-
|
23
|
+
when /o1-2024/, /o3-mini/ then 100_000
|
24
|
+
when /o1-mini-2024/ then 65_536
|
25
|
+
when /gpt-4o-2024-05-13/ then 4_096
|
26
|
+
when /gpt-4o/, /gpt-4o-mini/ then 16_384
|
27
|
+
when /gpt-4o-realtime/ then 4_096
|
28
|
+
when /gpt-4-0[0-9]{3}/ then 8_192
|
29
|
+
when /gpt-3.5-turbo/ then 4_096
|
30
|
+
else 4_096
|
28
31
|
end
|
29
32
|
end
|
30
33
|
|
@@ -37,13 +40,17 @@ module RubyLLM
|
|
37
40
|
end
|
38
41
|
|
39
42
|
def supports_vision?(model_id)
|
40
|
-
model_id.
|
43
|
+
model_id.match?(/gpt-4o|o1/) || model_id.match?(/gpt-4-(?!0314|0613)/)
|
41
44
|
end
|
42
45
|
|
43
46
|
def supports_functions?(model_id)
|
44
47
|
!model_id.include?('instruct')
|
45
48
|
end
|
46
49
|
|
50
|
+
def supports_audio?(model_id)
|
51
|
+
model_id.match?(/audio-preview|realtime-preview|whisper|tts/)
|
52
|
+
end
|
53
|
+
|
47
54
|
def supports_json_mode?(model_id)
|
48
55
|
model_id.match?(/gpt-4-\d{4}-preview/) ||
|
49
56
|
model_id.include?('turbo') ||
|
@@ -65,17 +72,24 @@ module RubyLLM
|
|
65
72
|
end
|
66
73
|
end
|
67
74
|
|
75
|
+
def supports_structured_output?(model_id)
|
76
|
+
model_id.match?(/gpt-4o|o[13]-mini|o1/)
|
77
|
+
end
|
78
|
+
|
68
79
|
def model_family(model_id) # rubocop:disable Metrics/AbcSize,Metrics/CyclomaticComplexity,Metrics/MethodLength
|
69
80
|
case model_id
|
70
|
-
when /
|
81
|
+
when /o3-mini/ then 'o3_mini'
|
71
82
|
when /o1-mini/ then 'o1_mini'
|
72
83
|
when /o1/ then 'o1'
|
84
|
+
when /gpt-4o-audio/ then 'gpt4o_audio'
|
73
85
|
when /gpt-4o-realtime/ then 'gpt4o_realtime'
|
86
|
+
when /gpt-4o-mini-audio/ then 'gpt4o_mini_audio'
|
74
87
|
when /gpt-4o-mini-realtime/ then 'gpt4o_mini_realtime'
|
75
88
|
when /gpt-4o-mini/ then 'gpt4o_mini'
|
76
89
|
when /gpt-4o/ then 'gpt4o'
|
77
90
|
when /gpt-4-turbo/ then 'gpt4_turbo'
|
78
91
|
when /gpt-4/ then 'gpt4'
|
92
|
+
when /gpt-3.5-turbo-instruct/ then 'gpt35_instruct'
|
79
93
|
when /gpt-3.5/ then 'gpt35'
|
80
94
|
when /dall-e-3/ then 'dalle3'
|
81
95
|
when /dall-e-2/ then 'dalle2'
|
@@ -95,14 +109,51 @@ module RubyLLM
|
|
95
109
|
private
|
96
110
|
|
97
111
|
PRICES = {
|
98
|
-
o1: { input: 15.0, output: 60.0 },
|
99
|
-
o1_mini: { input:
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
112
|
+
o1: { input: 15.0, cached_input: 7.5, output: 60.0 },
|
113
|
+
o1_mini: { input: 1.10, cached_input: 0.55, output: 4.40 },
|
114
|
+
o3_mini: { input: 1.10, cached_input: 0.55, output: 4.40 },
|
115
|
+
gpt4o: { input: 2.50, cached_input: 1.25, output: 10.0 },
|
116
|
+
gpt4o_audio: {
|
117
|
+
text_input: 2.50,
|
118
|
+
audio_input: 40.0,
|
119
|
+
text_output: 10.0,
|
120
|
+
audio_output: 80.0
|
121
|
+
},
|
122
|
+
gpt4o_realtime: {
|
123
|
+
text_input: 5.0,
|
124
|
+
cached_text_input: 2.50,
|
125
|
+
audio_input: 40.0,
|
126
|
+
cached_audio_input: 2.50,
|
127
|
+
text_output: 20.0,
|
128
|
+
audio_output: 80.0
|
129
|
+
},
|
130
|
+
gpt4o_mini: { input: 0.15, cached_input: 0.075, output: 0.60 },
|
131
|
+
gpt4o_mini_audio: {
|
132
|
+
text_input: 0.15,
|
133
|
+
audio_input: 10.0,
|
134
|
+
text_output: 0.60,
|
135
|
+
audio_output: 20.0
|
136
|
+
},
|
137
|
+
gpt4o_mini_realtime: {
|
138
|
+
text_input: 0.60,
|
139
|
+
cached_text_input: 0.30,
|
140
|
+
audio_input: 10.0,
|
141
|
+
cached_audio_input: 0.30,
|
142
|
+
text_output: 2.40,
|
143
|
+
audio_output: 20.0
|
144
|
+
},
|
104
145
|
gpt4_turbo: { input: 10.0, output: 30.0 },
|
105
|
-
|
146
|
+
gpt4: { input: 30.0, output: 60.0 },
|
147
|
+
gpt35: { input: 0.50, output: 1.50 },
|
148
|
+
gpt35_instruct: { input: 1.50, output: 2.0 },
|
149
|
+
embedding3_large: { price: 0.13 },
|
150
|
+
embedding3_small: { price: 0.02 },
|
151
|
+
embedding2: { price: 0.10 },
|
152
|
+
davinci: { input: 2.0, output: 2.0 },
|
153
|
+
babbage: { input: 0.40, output: 0.40 },
|
154
|
+
tts1: { price: 15.0 },
|
155
|
+
tts1_hd: { price: 30.0 },
|
156
|
+
whisper1: { price: 0.006 }
|
106
157
|
}.freeze
|
107
158
|
|
108
159
|
def default_input_price
|
@@ -124,13 +175,13 @@ module RubyLLM
|
|
124
175
|
name
|
125
176
|
.gsub(/(\d{4}) (\d{2}) (\d{2})/, '\1\2\3')
|
126
177
|
.gsub(/^Gpt /, 'GPT-')
|
127
|
-
.gsub(/^
|
178
|
+
.gsub(/^O[13] /, 'O\1-')
|
128
179
|
.gsub(/^Chatgpt /, 'ChatGPT-')
|
129
180
|
.gsub(/^Tts /, 'TTS-')
|
130
181
|
.gsub(/^Dall E /, 'DALL-E-')
|
131
182
|
.gsub(/3\.5 /, '3.5-')
|
132
183
|
.gsub(/4 /, '4-')
|
133
|
-
.gsub(/4o (?=Mini|Preview|Turbo)/, '4o-')
|
184
|
+
.gsub(/4o (?=Mini|Preview|Turbo|Audio)/, '4o-')
|
134
185
|
.gsub(/\bHd\b/, 'HD')
|
135
186
|
end
|
136
187
|
end
|