ruby_llm 0.1.0.pre25 → 0.1.0.pre26
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +19 -3
- data/Rakefile +3 -0
- data/bin/console +2 -0
- data/lib/ruby_llm/configuration.rb +2 -0
- data/lib/ruby_llm/error.rb +12 -2
- data/lib/ruby_llm/model_capabilities/anthropic.rb +1 -3
- data/lib/ruby_llm/model_capabilities/deepseek.rb +93 -0
- data/lib/ruby_llm/model_capabilities/gemini.rb +132 -0
- data/lib/ruby_llm/model_capabilities/openai.rb +35 -11
- data/lib/ruby_llm/model_info.rb +6 -10
- data/lib/ruby_llm/models.json +2068 -686
- data/lib/ruby_llm/models.rb +4 -2
- data/lib/ruby_llm/provider.rb +11 -4
- data/lib/ruby_llm/providers/anthropic.rb +8 -5
- data/lib/ruby_llm/providers/deepseek.rb +20 -0
- data/lib/ruby_llm/providers/gemini.rb +28 -0
- data/lib/ruby_llm/providers/openai.rb +12 -7
- data/lib/ruby_llm/stream_accumulator.rb +7 -4
- data/lib/ruby_llm/version.rb +1 -1
- data/lib/ruby_llm.rb +8 -1
- data/lib/tasks/models.rake +7 -3
- metadata +6 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: ea5a792c0bc3361e9784fc902e8e0e08f07cb2a6db03b408ee6362deb50c6edc
|
4
|
+
data.tar.gz: 44679aa95838fb8cbc8ff0ae42db0a5d2c9862d7af0ec461ea98c41276276bdf
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 3fa7cdaa35b5bfe2f60a25edfaefec82f89cf47d54fe8bad2f225fb4d339326fc341194fd734310e224f07055d9c0dccad81c6e610c6bd071aa64883c7b49a1d
|
7
|
+
data.tar.gz: aba94a44af3cb6348cb7bac016ad30a539ef6f980e16261dce324e9235214d869e5bf9ae78d46a7dc01ca1a7cfd0ec682a97b18872c1b50a6ed984bdd699b077
|
data/README.md
CHANGED
@@ -1,6 +1,16 @@
|
|
1
1
|
# RubyLLM
|
2
2
|
|
3
|
-
A delightful Ruby interface to the latest large language models. Stop wrestling with multiple APIs and inconsistent interfaces. RubyLLM gives you a clean, unified way to work with models from OpenAI, Anthropic, and
|
3
|
+
A delightful Ruby interface to the latest large language models. Stop wrestling with multiple APIs and inconsistent interfaces. RubyLLM gives you a clean, unified way to work with models from OpenAI, Anthropic, Google, and DeepSeek.
|
4
|
+
|
5
|
+
<p align="center">
|
6
|
+
<img src="https://upload.wikimedia.org/wikipedia/commons/4/4d/OpenAI_Logo.svg" alt="OpenAI" height="40" width="120">
|
7
|
+
|
8
|
+
<img src="https://upload.wikimedia.org/wikipedia/commons/7/78/Anthropic_logo.svg" alt="Anthropic" height="40" width="120">
|
9
|
+
|
10
|
+
<img src="https://upload.wikimedia.org/wikipedia/commons/8/8a/Google_Gemini_logo.svg" alt="Google" height="40" width="120">
|
11
|
+
|
12
|
+
<img src="https://upload.wikimedia.org/wikipedia/commons/e/ec/DeepSeek_logo.svg" alt="DeepSeek" height="40" width="120"]>
|
13
|
+
</p>
|
4
14
|
|
5
15
|
[](https://badge.fury.io/rb/ruby_llm)
|
6
16
|
[](https://github.com/testdouble/standard)
|
@@ -28,6 +38,8 @@ require 'ruby_llm'
|
|
28
38
|
RubyLLM.configure do |config|
|
29
39
|
config.openai_api_key = ENV['OPENAI_API_KEY']
|
30
40
|
config.anthropic_api_key = ENV['ANTHROPIC_API_KEY']
|
41
|
+
config.gemini_api_key = ENV['GEMINI_API_KEY']
|
42
|
+
config.deepseek_api_key = ENV['DEEPSEEK_API_KEY']
|
31
43
|
end
|
32
44
|
```
|
33
45
|
|
@@ -61,7 +73,7 @@ image_models = RubyLLM.models.image_models
|
|
61
73
|
Conversations are simple and natural:
|
62
74
|
|
63
75
|
```ruby
|
64
|
-
chat = RubyLLM.chat model: 'claude-3-
|
76
|
+
chat = RubyLLM.chat model: 'claude-3-opus-20240229'
|
65
77
|
|
66
78
|
# Ask questions
|
67
79
|
response = chat.ask "What's your favorite Ruby feature?"
|
@@ -153,7 +165,7 @@ search = Search.new repo: Document
|
|
153
165
|
chat.with_tools search, Calculator
|
154
166
|
|
155
167
|
# Configure as needed
|
156
|
-
chat.with_model('claude-3-
|
168
|
+
chat.with_model('claude-3-opus-20240229')
|
157
169
|
.with_temperature(0.9)
|
158
170
|
|
159
171
|
chat.ask "What's 2+2?"
|
@@ -185,6 +197,10 @@ rescue RubyLLM::UnauthorizedError
|
|
185
197
|
puts "Check your API credentials"
|
186
198
|
rescue RubyLLM::BadRequestError => e
|
187
199
|
puts "Something went wrong: #{e.message}"
|
200
|
+
rescue RubyLLM::PaymentRequiredError
|
201
|
+
puts "Time to top up your API credits"
|
202
|
+
rescue RubyLLM::ServiceUnavailableError
|
203
|
+
puts "API service is temporarily down"
|
188
204
|
end
|
189
205
|
```
|
190
206
|
|
data/Rakefile
CHANGED
data/bin/console
CHANGED
@@ -10,6 +10,8 @@ require 'irb'
|
|
10
10
|
RubyLLM.configure do |config|
|
11
11
|
config.openai_api_key = ENV['OPENAI_API_KEY']
|
12
12
|
config.anthropic_api_key = ENV['ANTHROPIC_API_KEY']
|
13
|
+
config.gemini_api_key = ENV['GEMINI_API_KEY']
|
14
|
+
config.deepseek_api_key = ENV['DEEPSEEK_API_KEY']
|
13
15
|
end
|
14
16
|
|
15
17
|
IRB.start(__FILE__)
|
data/lib/ruby_llm/error.rb
CHANGED
@@ -23,6 +23,8 @@ module RubyLLM
|
|
23
23
|
class InvalidRoleError < StandardError; end
|
24
24
|
class UnsupportedFunctionsError < StandardError; end
|
25
25
|
class UnauthorizedError < Error; end
|
26
|
+
class PaymentRequiredError < Error; end
|
27
|
+
class ServiceUnavailableError < Error; end
|
26
28
|
class BadRequestError < Error; end
|
27
29
|
class RateLimitError < Error; end
|
28
30
|
class ServerError < Error; end
|
@@ -42,18 +44,26 @@ module RubyLLM
|
|
42
44
|
end
|
43
45
|
|
44
46
|
class << self
|
45
|
-
def parse_error(provider:, response:) # rubocop:disable Metrics/CyclomaticComplexity,Metrics/MethodLength
|
47
|
+
def parse_error(provider:, response:) # rubocop:disable Metrics/CyclomaticComplexity,Metrics/MethodLength,Metrics/AbcSize,Metrics/PerceivedComplexity
|
46
48
|
message = provider&.parse_error(response)
|
47
49
|
|
48
50
|
case response.status
|
51
|
+
when 200..399
|
52
|
+
message
|
49
53
|
when 400
|
50
54
|
raise BadRequestError.new(response, message || 'Invalid request - please check your input')
|
51
55
|
when 401
|
52
56
|
raise UnauthorizedError.new(response, message || 'Invalid API key - check your credentials')
|
57
|
+
when 402
|
58
|
+
raise PaymentRequiredError.new(response, message || 'Payment required - please top up your account')
|
53
59
|
when 429
|
54
60
|
raise RateLimitError.new(response, message || 'Rate limit exceeded - please wait a moment')
|
55
|
-
when 500
|
61
|
+
when 500
|
56
62
|
raise ServerError.new(response, message || 'API server error - please try again')
|
63
|
+
when 503
|
64
|
+
raise ServiceUnavailableError.new(response, message || 'API server unavailable - please try again later')
|
65
|
+
else
|
66
|
+
raise Error.new(response, message || 'An unknown error occurred')
|
57
67
|
end
|
58
68
|
end
|
59
69
|
end
|
@@ -4,7 +4,7 @@ module RubyLLM
|
|
4
4
|
module ModelCapabilities
|
5
5
|
# Determines capabilities and pricing for Anthropic models
|
6
6
|
module Anthropic
|
7
|
-
|
7
|
+
module_function
|
8
8
|
|
9
9
|
def determine_context_window(model_id)
|
10
10
|
case model_id
|
@@ -43,8 +43,6 @@ module RubyLLM
|
|
43
43
|
model_id.include?('claude-3')
|
44
44
|
end
|
45
45
|
|
46
|
-
private
|
47
|
-
|
48
46
|
def model_family(model_id)
|
49
47
|
case model_id
|
50
48
|
when /claude-3-5-sonnet/ then :claude35_sonnet
|
@@ -0,0 +1,93 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module ModelCapabilities
|
5
|
+
# Determines capabilities and pricing for DeepSeek models
|
6
|
+
module DeepSeek
|
7
|
+
module_function
|
8
|
+
|
9
|
+
def context_window_for(model_id)
|
10
|
+
case model_id
|
11
|
+
when /deepseek-(?:chat|reasoner)/ then 64_000
|
12
|
+
else 32_768 # Sensible default
|
13
|
+
end
|
14
|
+
end
|
15
|
+
|
16
|
+
def max_tokens_for(_model_id)
|
17
|
+
8_192
|
18
|
+
end
|
19
|
+
|
20
|
+
def input_price_for(model_id)
|
21
|
+
PRICES.dig(model_family(model_id), :input_miss) || default_input_price
|
22
|
+
end
|
23
|
+
|
24
|
+
def output_price_for(model_id)
|
25
|
+
PRICES.dig(model_family(model_id), :output) || default_output_price
|
26
|
+
end
|
27
|
+
|
28
|
+
def cache_hit_price_for(model_id)
|
29
|
+
PRICES.dig(model_family(model_id), :input_hit) || default_cache_hit_price
|
30
|
+
end
|
31
|
+
|
32
|
+
def supports_vision?(_model_id)
|
33
|
+
true # Both deepseek-chat and deepseek-reasoner support vision
|
34
|
+
end
|
35
|
+
|
36
|
+
def supports_functions?(_model_id)
|
37
|
+
true # Both models support function calling
|
38
|
+
end
|
39
|
+
|
40
|
+
def supports_json_mode?(_model_id)
|
41
|
+
true # Both models support JSON mode
|
42
|
+
end
|
43
|
+
|
44
|
+
def format_display_name(model_id)
|
45
|
+
case model_id
|
46
|
+
when 'deepseek-chat' then 'DeepSeek V3'
|
47
|
+
when 'deepseek-reasoner' then 'DeepSeek R1'
|
48
|
+
else
|
49
|
+
model_id.split('-')
|
50
|
+
.map(&:capitalize)
|
51
|
+
.join(' ')
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
def model_type(_model_id)
|
56
|
+
'chat' # Both models are chat models
|
57
|
+
end
|
58
|
+
|
59
|
+
def model_family(model_id)
|
60
|
+
case model_id
|
61
|
+
when /deepseek-chat/ then 'deepseek'
|
62
|
+
when /deepseek-reasoner/ then 'deepseek_reasoner'
|
63
|
+
else 'deepseek' # Default to base deepseek family
|
64
|
+
end
|
65
|
+
end
|
66
|
+
|
67
|
+
PRICES = {
|
68
|
+
chat: {
|
69
|
+
input_hit: 0.07, # $0.07 per million tokens on cache hit
|
70
|
+
input_miss: 0.27, # $0.27 per million tokens on cache miss
|
71
|
+
output: 1.10 # $1.10 per million tokens output
|
72
|
+
},
|
73
|
+
reasoner: {
|
74
|
+
input_hit: 0.14, # $0.14 per million tokens on cache hit
|
75
|
+
input_miss: 0.55, # $0.55 per million tokens on cache miss
|
76
|
+
output: 2.19 # $2.19 per million tokens output
|
77
|
+
}
|
78
|
+
}.freeze
|
79
|
+
|
80
|
+
def default_input_price
|
81
|
+
0.27 # Default to chat cache miss price
|
82
|
+
end
|
83
|
+
|
84
|
+
def default_output_price
|
85
|
+
1.10 # Default to chat output price
|
86
|
+
end
|
87
|
+
|
88
|
+
def default_cache_hit_price
|
89
|
+
0.07 # Default to chat cache hit price
|
90
|
+
end
|
91
|
+
end
|
92
|
+
end
|
93
|
+
end
|
@@ -0,0 +1,132 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module ModelCapabilities
|
5
|
+
# Determines capabilities and pricing for Google Gemini models
|
6
|
+
module Gemini # rubocop:disable Metrics/ModuleLength
|
7
|
+
module_function
|
8
|
+
|
9
|
+
def context_window_for(model_id)
|
10
|
+
case model_id
|
11
|
+
when /gemini-2\.0-flash/ then 1_048_576
|
12
|
+
when /gemini-1\.5-pro/ then 2_097_152
|
13
|
+
when /gemini-1\.5/ then 1_048_576
|
14
|
+
when /text-embedding/, /embedding-001/ then 2_048
|
15
|
+
else 32_768 # Sensible default for unknown models
|
16
|
+
end
|
17
|
+
end
|
18
|
+
|
19
|
+
def max_tokens_for(model_id)
|
20
|
+
case model_id
|
21
|
+
when /gemini-2\.0-flash/, /gemini-1\.5/ then 8_192
|
22
|
+
when /text-embedding/, /embedding-001/ then 768 # Output dimension size for embeddings
|
23
|
+
when /aqa/ then 1_024
|
24
|
+
else 4_096 # Sensible default
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
def input_price_for(model_id)
|
29
|
+
PRICES.dig(pricing_family(model_id), :input) || default_input_price
|
30
|
+
end
|
31
|
+
|
32
|
+
def output_price_for(model_id)
|
33
|
+
PRICES.dig(pricing_family(model_id), :output) || default_output_price
|
34
|
+
end
|
35
|
+
|
36
|
+
def supports_vision?(model_id)
|
37
|
+
return false if model_id.match?(/text-embedding|embedding-001|aqa/)
|
38
|
+
return false if model_id.match?(/flash-lite/)
|
39
|
+
return false if model_id.match?(/imagen/)
|
40
|
+
|
41
|
+
# Only pro and regular flash models support vision
|
42
|
+
model_id.match?(/gemini-[12]\.(?:5|0)-(?:pro|flash)(?!-lite)/)
|
43
|
+
end
|
44
|
+
|
45
|
+
def supports_functions?(model_id)
|
46
|
+
return false if model_id.match?(/text-embedding|embedding-001|aqa/)
|
47
|
+
return false if model_id.match?(/imagen/)
|
48
|
+
return false if model_id.match?(/flash-lite/)
|
49
|
+
return false if model_id.match?(/bison|gecko|evergreen/)
|
50
|
+
|
51
|
+
# Currently only full models support function calling
|
52
|
+
model_id.match?(/gemini-[12]\.(?:5|0)-(?:pro|flash)(?!-lite)/)
|
53
|
+
end
|
54
|
+
|
55
|
+
def supports_json_mode?(model_id)
|
56
|
+
return false if model_id.match?(/text-embedding|embedding-001|aqa/)
|
57
|
+
return false if model_id.match?(/imagen/)
|
58
|
+
return false if model_id.match?(/flash-lite/)
|
59
|
+
return false if model_id.match?(/bison|gecko|evergreen/)
|
60
|
+
|
61
|
+
# Gemini 1.5+ models support JSON mode
|
62
|
+
model_id.match?(/gemini-[12]\.(?:5|0)-(?:pro|flash)(?!-lite)/)
|
63
|
+
end
|
64
|
+
|
65
|
+
def format_display_name(model_id)
|
66
|
+
return model_id unless model_id.start_with?('models/')
|
67
|
+
|
68
|
+
model_id
|
69
|
+
.delete_prefix('models/')
|
70
|
+
.split('-')
|
71
|
+
.map(&:capitalize)
|
72
|
+
.join(' ')
|
73
|
+
.gsub(/(\d+\.\d+)/, ' \1') # Add space before version numbers
|
74
|
+
.gsub(/\s+/, ' ') # Clean up multiple spaces
|
75
|
+
.strip
|
76
|
+
end
|
77
|
+
|
78
|
+
def model_type(model_id)
|
79
|
+
case model_id
|
80
|
+
when /text-embedding|embedding/ then 'embedding'
|
81
|
+
when /imagen/ then 'image'
|
82
|
+
when /bison|text-bison/ then 'legacy'
|
83
|
+
else 'chat'
|
84
|
+
end
|
85
|
+
end
|
86
|
+
|
87
|
+
def model_family(model_id) # rubocop:disable Metrics/CyclomaticComplexity,Metrics/MethodLength
|
88
|
+
case model_id
|
89
|
+
when /gemini-2\.0-flash-lite/ then 'gemini20_flash_lite'
|
90
|
+
when /gemini-2\.0-flash/ then 'gemini20_flash'
|
91
|
+
when /gemini-1\.5-flash-8b/ then 'gemini15_flash_8b'
|
92
|
+
when /gemini-1\.5-flash/ then 'gemini15_flash'
|
93
|
+
when /gemini-1\.5-pro/ then 'gemini15_pro'
|
94
|
+
when /text-embedding-004/ then 'embedding4'
|
95
|
+
when /embedding-001/ then 'embedding1'
|
96
|
+
when /bison|text-bison/ then 'bison'
|
97
|
+
when /imagen/ then 'imagen3'
|
98
|
+
else 'other'
|
99
|
+
end
|
100
|
+
end
|
101
|
+
|
102
|
+
def pricing_family(model_id)
|
103
|
+
case model_id
|
104
|
+
when /gemini-2\.0-flash-lite/ then :flash_lite_2 # rubocop:disable Naming/VariableNumber
|
105
|
+
when /gemini-2\.0-flash/ then :flash_2 # rubocop:disable Naming/VariableNumber
|
106
|
+
when /gemini-1\.5-flash-8b/ then :flash_8b
|
107
|
+
when /gemini-1\.5-flash/ then :flash
|
108
|
+
when /gemini-1\.5-pro/ then :pro
|
109
|
+
when /text-embedding|embedding/ then :embedding
|
110
|
+
else :base
|
111
|
+
end
|
112
|
+
end
|
113
|
+
|
114
|
+
PRICES = {
|
115
|
+
flash_2: { input: 0.10, output: 0.40 }, # Gemini 2.0 Flash # rubocop:disable Naming/VariableNumber
|
116
|
+
flash_lite_2: { input: 0.075, output: 0.30 }, # Gemini 2.0 Flash Lite # rubocop:disable Naming/VariableNumber
|
117
|
+
flash: { input: 0.075, output: 0.30 }, # Gemini 1.5 Flash basic pricing
|
118
|
+
flash_8b: { input: 0.0375, output: 0.15 }, # Gemini 1.5 Flash 8B
|
119
|
+
pro: { input: 1.25, output: 5.0 }, # Gemini 1.5 Pro
|
120
|
+
embedding: { input: 0.00, output: 0.00 } # Text Embedding models are free
|
121
|
+
}.freeze
|
122
|
+
|
123
|
+
def default_input_price
|
124
|
+
0.075 # Default to Flash pricing
|
125
|
+
end
|
126
|
+
|
127
|
+
def default_output_price
|
128
|
+
0.30 # Default to Flash pricing
|
129
|
+
end
|
130
|
+
end
|
131
|
+
end
|
132
|
+
end
|
@@ -3,7 +3,7 @@
|
|
3
3
|
module RubyLLM
|
4
4
|
module ModelCapabilities
|
5
5
|
# Determines capabilities and pricing for OpenAI models
|
6
|
-
module OpenAI
|
6
|
+
module OpenAI # rubocop:disable Metrics/ModuleLength
|
7
7
|
extend self
|
8
8
|
|
9
9
|
def context_window_for(model_id)
|
@@ -55,21 +55,45 @@ module RubyLLM
|
|
55
55
|
.then { |name| apply_special_formatting(name) }
|
56
56
|
end
|
57
57
|
|
58
|
-
|
58
|
+
def model_type(model_id)
|
59
|
+
case model_id
|
60
|
+
when /text-embedding|embedding/ then 'embedding'
|
61
|
+
when /dall-e/ then 'image'
|
62
|
+
when /tts|whisper/ then 'audio'
|
63
|
+
when /omni-moderation/ then 'moderation'
|
64
|
+
else 'chat'
|
65
|
+
end
|
66
|
+
end
|
59
67
|
|
60
|
-
def model_family(model_id) # rubocop:disable Metrics/CyclomaticComplexity
|
68
|
+
def model_family(model_id) # rubocop:disable Metrics/AbcSize,Metrics/CyclomaticComplexity,Metrics/MethodLength
|
61
69
|
case model_id
|
62
|
-
when /o1-2024/
|
63
|
-
when /o1-mini/
|
64
|
-
when /
|
65
|
-
when /gpt-4o-
|
66
|
-
when /gpt-4o-mini/
|
67
|
-
when /gpt-4o/
|
68
|
-
when /gpt-
|
69
|
-
|
70
|
+
when /o1-2024|o1-mini-2024/ then 'o1'
|
71
|
+
when /o1-mini/ then 'o1_mini'
|
72
|
+
when /o1/ then 'o1'
|
73
|
+
when /gpt-4o-realtime/ then 'gpt4o_realtime'
|
74
|
+
when /gpt-4o-mini-realtime/ then 'gpt4o_mini_realtime'
|
75
|
+
when /gpt-4o-mini/ then 'gpt4o_mini'
|
76
|
+
when /gpt-4o/ then 'gpt4o'
|
77
|
+
when /gpt-4-turbo/ then 'gpt4_turbo'
|
78
|
+
when /gpt-4/ then 'gpt4'
|
79
|
+
when /gpt-3.5/ then 'gpt35'
|
80
|
+
when /dall-e-3/ then 'dalle3'
|
81
|
+
when /dall-e-2/ then 'dalle2'
|
82
|
+
when /text-embedding-3-large/ then 'embedding3_large'
|
83
|
+
when /text-embedding-3-small/ then 'embedding3_small'
|
84
|
+
when /text-embedding-ada/ then 'embedding2'
|
85
|
+
when /tts-1-hd/ then 'tts1_hd'
|
86
|
+
when /tts-1/ then 'tts1'
|
87
|
+
when /whisper/ then 'whisper1'
|
88
|
+
when /omni-moderation/ then 'moderation'
|
89
|
+
when /babbage/ then 'babbage'
|
90
|
+
when /davinci/ then 'davinci'
|
91
|
+
else 'other'
|
70
92
|
end
|
71
93
|
end
|
72
94
|
|
95
|
+
private
|
96
|
+
|
73
97
|
PRICES = {
|
74
98
|
o1: { input: 15.0, output: 60.0 },
|
75
99
|
o1_mini: { input: 3.0, output: 12.0 },
|
data/lib/ruby_llm/model_info.rb
CHANGED
@@ -15,7 +15,7 @@ module RubyLLM
|
|
15
15
|
class ModelInfo
|
16
16
|
attr_reader :id, :created_at, :display_name, :provider, :metadata,
|
17
17
|
:context_window, :max_tokens, :supports_vision, :supports_functions,
|
18
|
-
:supports_json_mode, :input_price_per_million, :output_price_per_million
|
18
|
+
:supports_json_mode, :input_price_per_million, :output_price_per_million, :type, :family
|
19
19
|
|
20
20
|
def initialize(data) # rubocop:disable Metrics/AbcSize,Metrics/MethodLength
|
21
21
|
@id = data[:id]
|
@@ -24,6 +24,8 @@ module RubyLLM
|
|
24
24
|
@provider = data[:provider]
|
25
25
|
@context_window = data[:context_window]
|
26
26
|
@max_tokens = data[:max_tokens]
|
27
|
+
@type = data[:type]
|
28
|
+
@family = data[:family]
|
27
29
|
@supports_vision = data[:supports_vision]
|
28
30
|
@supports_functions = data[:supports_functions]
|
29
31
|
@supports_json_mode = data[:supports_json_mode]
|
@@ -35,11 +37,13 @@ module RubyLLM
|
|
35
37
|
def to_h # rubocop:disable Metrics/MethodLength
|
36
38
|
{
|
37
39
|
id: id,
|
38
|
-
created_at: created_at
|
40
|
+
created_at: created_at&.iso8601,
|
39
41
|
display_name: display_name,
|
40
42
|
provider: provider,
|
41
43
|
context_window: context_window,
|
42
44
|
max_tokens: max_tokens,
|
45
|
+
type: type,
|
46
|
+
family: family,
|
43
47
|
supports_vision: supports_vision,
|
44
48
|
supports_functions: supports_functions,
|
45
49
|
supports_json_mode: supports_json_mode,
|
@@ -48,13 +52,5 @@ module RubyLLM
|
|
48
52
|
metadata: metadata
|
49
53
|
}
|
50
54
|
end
|
51
|
-
|
52
|
-
def type
|
53
|
-
metadata['type']
|
54
|
-
end
|
55
|
-
|
56
|
-
def family
|
57
|
-
metadata['family']
|
58
|
-
end
|
59
55
|
end
|
60
56
|
end
|