ruby_llm 1.0.1 → 1.1.0rc1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +28 -12
  3. data/lib/ruby_llm/active_record/acts_as.rb +46 -7
  4. data/lib/ruby_llm/aliases.json +65 -0
  5. data/lib/ruby_llm/aliases.rb +56 -0
  6. data/lib/ruby_llm/chat.rb +10 -9
  7. data/lib/ruby_llm/configuration.rb +4 -0
  8. data/lib/ruby_llm/error.rb +15 -4
  9. data/lib/ruby_llm/models.json +1163 -303
  10. data/lib/ruby_llm/models.rb +40 -11
  11. data/lib/ruby_llm/provider.rb +32 -39
  12. data/lib/ruby_llm/providers/anthropic/capabilities.rb +8 -9
  13. data/lib/ruby_llm/providers/anthropic/chat.rb +31 -4
  14. data/lib/ruby_llm/providers/anthropic/streaming.rb +12 -6
  15. data/lib/ruby_llm/providers/anthropic.rb +4 -0
  16. data/lib/ruby_llm/providers/bedrock/capabilities.rb +168 -0
  17. data/lib/ruby_llm/providers/bedrock/chat.rb +108 -0
  18. data/lib/ruby_llm/providers/bedrock/models.rb +84 -0
  19. data/lib/ruby_llm/providers/bedrock/signing.rb +831 -0
  20. data/lib/ruby_llm/providers/bedrock/streaming/base.rb +46 -0
  21. data/lib/ruby_llm/providers/bedrock/streaming/content_extraction.rb +63 -0
  22. data/lib/ruby_llm/providers/bedrock/streaming/message_processing.rb +79 -0
  23. data/lib/ruby_llm/providers/bedrock/streaming/payload_processing.rb +90 -0
  24. data/lib/ruby_llm/providers/bedrock/streaming/prelude_handling.rb +91 -0
  25. data/lib/ruby_llm/providers/bedrock/streaming.rb +36 -0
  26. data/lib/ruby_llm/providers/bedrock.rb +83 -0
  27. data/lib/ruby_llm/providers/deepseek/chat.rb +17 -0
  28. data/lib/ruby_llm/providers/deepseek.rb +5 -0
  29. data/lib/ruby_llm/providers/gemini/capabilities.rb +50 -34
  30. data/lib/ruby_llm/providers/gemini/chat.rb +8 -15
  31. data/lib/ruby_llm/providers/gemini/images.rb +5 -10
  32. data/lib/ruby_llm/providers/gemini/streaming.rb +35 -76
  33. data/lib/ruby_llm/providers/gemini/tools.rb +12 -12
  34. data/lib/ruby_llm/providers/gemini.rb +4 -0
  35. data/lib/ruby_llm/providers/openai/capabilities.rb +146 -206
  36. data/lib/ruby_llm/providers/openai/streaming.rb +9 -13
  37. data/lib/ruby_llm/providers/openai.rb +4 -0
  38. data/lib/ruby_llm/streaming.rb +96 -0
  39. data/lib/ruby_llm/version.rb +1 -1
  40. data/lib/ruby_llm.rb +6 -3
  41. data/lib/tasks/browser_helper.rb +97 -0
  42. data/lib/tasks/capability_generator.rb +123 -0
  43. data/lib/tasks/capability_scraper.rb +224 -0
  44. data/lib/tasks/cli_helper.rb +22 -0
  45. data/lib/tasks/code_validator.rb +29 -0
  46. data/lib/tasks/model_updater.rb +66 -0
  47. data/lib/tasks/models.rake +28 -193
  48. data/lib/tasks/vcr.rake +13 -30
  49. metadata +27 -19
  50. data/.github/workflows/cicd.yml +0 -158
  51. data/.github/workflows/docs.yml +0 -53
  52. data/.gitignore +0 -59
  53. data/.overcommit.yml +0 -26
  54. data/.rspec +0 -3
  55. data/.rubocop.yml +0 -10
  56. data/.yardopts +0 -12
  57. data/CONTRIBUTING.md +0 -207
  58. data/Gemfile +0 -33
  59. data/Rakefile +0 -9
  60. data/bin/console +0 -17
  61. data/bin/setup +0 -6
  62. data/ruby_llm.gemspec +0 -44
@@ -12,10 +12,13 @@ module RubyLLM
12
12
  # @return [Integer] the context window size in tokens
13
13
  def context_window_for(model_id)
14
14
  case model_id
15
- when /gemini-2\.0-flash/, /gemini-1\.5-flash/ then 1_048_576
15
+ when /gemini-2\.5-pro-exp-03-25/, /gemini-2\.0-flash/, /gemini-2\.0-flash-lite/, /gemini-1\.5-flash/, /gemini-1\.5-flash-8b/ # rubocop:disable Layout/LineLength
16
+ 1_048_576
16
17
  when /gemini-1\.5-pro/ then 2_097_152
18
+ when /gemini-embedding-exp/ then 8_192
17
19
  when /text-embedding-004/, /embedding-001/ then 2_048
18
20
  when /aqa/ then 7_168
21
+ when /imagen-3/ then nil # No token limit for image generation
19
22
  else 32_768 # Sensible default for unknown models
20
23
  end
21
24
  end
@@ -25,9 +28,13 @@ module RubyLLM
25
28
  # @return [Integer] the maximum output tokens
26
29
  def max_tokens_for(model_id)
27
30
  case model_id
28
- when /gemini-2\.0-flash/, /gemini-1\.5/ then 8_192
31
+ when /gemini-2\.5-pro-exp-03-25/ then 64_000
32
+ when /gemini-2\.0-flash/, /gemini-2\.0-flash-lite/, /gemini-1\.5-flash/, /gemini-1\.5-flash-8b/, /gemini-1\.5-pro/ # rubocop:disable Layout/LineLength
33
+ 8_192
34
+ when /gemini-embedding-exp/ then nil # Elastic, supports 3072, 1536, or 768
29
35
  when /text-embedding-004/, /embedding-001/ then 768 # Output dimension size for embeddings
30
36
  when /aqa/ then 1_024
37
+ when /imagen-3/ then 4 # Output images
31
38
  else 4_096 # Sensible default
32
39
  end
33
40
  end
@@ -39,8 +46,8 @@ module RubyLLM
39
46
  base_price = PRICES.dig(pricing_family(model_id), :input) || default_input_price
40
47
  return base_price unless long_context_model?(model_id)
41
48
 
42
- # Double the price for prompts longer than 128k tokens
43
- context_length(model_id) > 128_000 ? base_price * 2 : base_price
49
+ # Apply different pricing for prompts longer than 128k tokens
50
+ context_window_for(model_id) > 128_000 ? base_price * 2 : base_price
44
51
  end
45
52
 
46
53
  # Returns the output price per million tokens for the given model
@@ -50,8 +57,8 @@ module RubyLLM
50
57
  base_price = PRICES.dig(pricing_family(model_id), :output) || default_output_price
51
58
  return base_price unless long_context_model?(model_id)
52
59
 
53
- # Double the price for prompts longer than 128k tokens
54
- context_length(model_id) > 128_000 ? base_price * 2 : base_price
60
+ # Apply different pricing for prompts longer than 128k tokens
61
+ context_window_for(model_id) > 128_000 ? base_price * 2 : base_price
55
62
  end
56
63
 
57
64
  # Determines if the model supports vision (image/video) inputs
@@ -59,31 +66,28 @@ module RubyLLM
59
66
  # @return [Boolean] true if the model supports vision inputs
60
67
  def supports_vision?(model_id)
61
68
  return false if model_id.match?(/text-embedding|embedding-001|aqa/)
62
- return false if model_id.match?(/gemini-1\.0/)
63
69
 
64
- model_id.match?(/gemini-[12]\.[05]/)
70
+ model_id.match?(/gemini|flash|pro|imagen/)
65
71
  end
66
72
 
67
73
  # Determines if the model supports function calling
68
74
  # @param model_id [String] the model identifier
69
75
  # @return [Boolean] true if the model supports function calling
70
76
  def supports_functions?(model_id)
71
- return false if model_id.match?(/text-embedding|embedding-001|aqa/)
72
- return false if model_id.match?(/flash-lite/)
73
- return false if model_id.match?(/gemini-1\.0/)
77
+ return false if model_id.match?(/text-embedding|embedding-001|aqa|flash-lite|imagen|gemini-2\.0-flash-lite/)
74
78
 
75
- model_id.match?(/gemini-[12]\.[05]-(?:pro|flash)(?!-lite)/)
79
+ model_id.match?(/gemini|pro|flash/)
76
80
  end
77
81
 
78
82
  # Determines if the model supports JSON mode
79
83
  # @param model_id [String] the model identifier
80
84
  # @return [Boolean] true if the model supports JSON mode
81
85
  def supports_json_mode?(model_id)
82
- return false if model_id.match?(/text-embedding|embedding-001|aqa/)
83
- return false if model_id.match?(/gemini-1\.0/)
84
- return false if model_id.match?(/gemini-2\.0-flash-lite/)
86
+ if model_id.match?(/text-embedding|embedding-001|aqa|imagen|gemini-2\.0-flash-lite|gemini-2\.5-pro-exp-03-25/)
87
+ return false
88
+ end
85
89
 
86
- model_id.match?(/gemini-\d/)
90
+ model_id.match?(/gemini|pro|flash/)
87
91
  end
88
92
 
89
93
  # Formats the model ID into a human-readable display name
@@ -105,23 +109,25 @@ module RubyLLM
105
109
  # @param model_id [String] the model identifier
106
110
  # @return [Boolean] true if the model supports caching
107
111
  def supports_caching?(model_id)
108
- return false if model_id.match?(/flash-lite|gemini-1\.0/)
112
+ if model_id.match?(/flash-lite|gemini-2\.5-pro-exp-03-25|aqa|imagen|text-embedding|embedding-001/)
113
+ return false
114
+ end
109
115
 
110
- model_id.match?(/gemini-[12]\.[05]/)
116
+ model_id.match?(/gemini|pro|flash/)
111
117
  end
112
118
 
113
119
  # Determines if the model supports tuning
114
120
  # @param model_id [String] the model identifier
115
121
  # @return [Boolean] true if the model supports tuning
116
122
  def supports_tuning?(model_id)
117
- model_id.match?(/gemini-1\.5-flash/)
123
+ model_id.match?(/gemini-1\.5-flash|gemini-1\.5-flash-8b/)
118
124
  end
119
125
 
120
126
  # Determines if the model supports audio inputs
121
127
  # @param model_id [String] the model identifier
122
128
  # @return [Boolean] true if the model supports audio inputs
123
129
  def supports_audio?(model_id)
124
- model_id.match?(/gemini-[12]\.[05]/)
130
+ model_id.match?(/gemini|pro|flash/)
125
131
  end
126
132
 
127
133
  # Returns the type of model (chat, embedding, image)
@@ -129,7 +135,7 @@ module RubyLLM
129
135
  # @return [String] the model type
130
136
  def model_type(model_id)
131
137
  case model_id
132
- when /text-embedding|embedding/ then 'embedding'
138
+ when /text-embedding|embedding|gemini-embedding/ then 'embedding'
133
139
  when /imagen/ then 'image'
134
140
  else 'chat'
135
141
  end
@@ -140,15 +146,17 @@ module RubyLLM
140
146
  # @return [String] the model family identifier
141
147
  def model_family(model_id) # rubocop:disable Metrics/CyclomaticComplexity,Metrics/MethodLength
142
148
  case model_id
149
+ when /gemini-2\.5-pro-exp-03-25/ then 'gemini25_pro_exp'
143
150
  when /gemini-2\.0-flash-lite/ then 'gemini20_flash_lite'
144
151
  when /gemini-2\.0-flash/ then 'gemini20_flash'
145
152
  when /gemini-1\.5-flash-8b/ then 'gemini15_flash_8b'
146
153
  when /gemini-1\.5-flash/ then 'gemini15_flash'
147
154
  when /gemini-1\.5-pro/ then 'gemini15_pro'
148
- when /gemini-1\.0-pro/ then 'gemini10_pro'
155
+ when /gemini-embedding-exp/ then 'gemini_embedding_exp'
149
156
  when /text-embedding-004/ then 'embedding4'
150
157
  when /embedding-001/ then 'embedding1'
151
158
  when /aqa/ then 'aqa'
159
+ when /imagen-3/ then 'imagen3'
152
160
  else 'other'
153
161
  end
154
162
  end
@@ -158,13 +166,15 @@ module RubyLLM
158
166
  # @return [Symbol] the pricing family identifier
159
167
  def pricing_family(model_id) # rubocop:disable Metrics/CyclomaticComplexity,Metrics/MethodLength
160
168
  case model_id
169
+ when /gemini-2\.5-pro-exp-03-25/ then :pro_2_5 # rubocop:disable Naming/VariableNumber
161
170
  when /gemini-2\.0-flash-lite/ then :flash_lite_2 # rubocop:disable Naming/VariableNumber
162
171
  when /gemini-2\.0-flash/ then :flash_2 # rubocop:disable Naming/VariableNumber
163
172
  when /gemini-1\.5-flash-8b/ then :flash_8b
164
173
  when /gemini-1\.5-flash/ then :flash
165
174
  when /gemini-1\.5-pro/ then :pro
166
- when /gemini-1\.0-pro/ then :pro_1_0 # rubocop:disable Naming/VariableNumber
175
+ when /gemini-embedding-exp/ then :gemini_embedding
167
176
  when /text-embedding|embedding/ then :embedding
177
+ when /imagen/ then :imagen
168
178
  when /aqa/ then :aqa
169
179
  else :base
170
180
  end
@@ -174,7 +184,7 @@ module RubyLLM
174
184
  # @param model_id [String] the model identifier
175
185
  # @return [Boolean] true if the model supports long context
176
186
  def long_context_model?(model_id)
177
- model_id.match?(/gemini-1\.5-(?:pro|flash)/)
187
+ model_id.match?(/gemini-1\.5-(?:pro|flash)|gemini-1\.5-flash-8b/)
178
188
  end
179
189
 
180
190
  # Returns the context length for the model
@@ -191,43 +201,49 @@ module RubyLLM
191
201
  output: 0.40,
192
202
  audio_input: 0.70,
193
203
  cache: 0.025,
194
- cache_storage: 1.00
204
+ cache_storage: 1.00,
205
+ grounding_search: 35.00 # per 1K requests after 1.5K free
195
206
  },
196
207
  flash_lite_2: { # Gemini 2.0 Flash Lite # rubocop:disable Naming/VariableNumber
197
208
  input: 0.075,
198
- output: 0.30,
199
- cache: 0.01875,
200
- cache_storage: 1.00
209
+ output: 0.30
201
210
  },
202
211
  flash: { # Gemini 1.5 Flash
203
212
  input: 0.075,
204
213
  output: 0.30,
205
214
  cache: 0.01875,
206
215
  cache_storage: 1.00,
207
- grounding_search: 35.00 # per 1K requests
216
+ grounding_search: 35.00 # per 1K requests up to 5K per day
208
217
  },
209
218
  flash_8b: { # Gemini 1.5 Flash 8B
210
219
  input: 0.0375,
211
220
  output: 0.15,
212
221
  cache: 0.01,
213
222
  cache_storage: 0.25,
214
- grounding_search: 35.00 # per 1K requests
223
+ grounding_search: 35.00 # per 1K requests up to 5K per day
215
224
  },
216
225
  pro: { # Gemini 1.5 Pro
217
226
  input: 1.25,
218
227
  output: 5.0,
219
228
  cache: 0.3125,
220
229
  cache_storage: 4.50,
221
- grounding_search: 35.00 # per 1K requests
230
+ grounding_search: 35.00 # per 1K requests up to 5K per day
222
231
  },
223
- pro_1_0: { # Gemini 1.0 Pro # rubocop:disable Naming/VariableNumber
224
- input: 0.50,
225
- output: 1.50
232
+ pro_2_5: { # Gemini 2.5 Pro Experimental # rubocop:disable Naming/VariableNumber
233
+ input: 0.12,
234
+ output: 0.50
235
+ },
236
+ gemini_embedding: { # Gemini Embedding Experimental
237
+ input: 0.002,
238
+ output: 0.004
226
239
  },
227
240
  embedding: { # Text Embedding models
228
241
  input: 0.00,
229
242
  output: 0.00
230
243
  },
244
+ imagen: { # Imagen 3
245
+ price: 0.03 # per image
246
+ },
231
247
  aqa: { # AQA model
232
248
  input: 0.00,
233
249
  output: 0.00
@@ -4,9 +4,13 @@ module RubyLLM
4
4
  module Providers
5
5
  module Gemini
6
6
  # Chat methods for the Gemini API implementation
7
- module Chat # rubocop:disable Metrics/ModuleLength
8
- # Must be public for Provider to use
7
+ module Chat
8
+ def completion_url
9
+ "models/#{@model}:generateContent"
10
+ end
11
+
9
12
  def complete(messages, tools:, temperature:, model:, &block) # rubocop:disable Metrics/MethodLength
13
+ @model = model
10
14
  payload = {
11
15
  contents: format_messages(messages),
12
16
  generationConfig: {
@@ -20,26 +24,15 @@ module RubyLLM
20
24
  @tools = tools
21
25
 
22
26
  if block_given?
23
- stream_completion(model, payload, &block)
27
+ stream_response payload, &block
24
28
  else
25
- generate_completion(model, payload)
29
+ sync_response payload
26
30
  end
27
31
  end
28
32
 
29
33
  # Format methods can be private
30
34
  private
31
35
 
32
- def generate_completion(model, payload)
33
- url = "models/#{model}:generateContent"
34
- response = post(url, payload)
35
- result = parse_completion_response(response)
36
-
37
- # If this contains a tool call, log it
38
- result.tool_calls.values.first if result.tool_call?
39
-
40
- result
41
- end
42
-
43
36
  def format_messages(messages)
44
37
  messages.map do |msg|
45
38
  {
@@ -5,18 +5,13 @@ module RubyLLM
5
5
  module Gemini
6
6
  # Image generation methods for the Gemini API implementation
7
7
  module Images
8
- def images_url(model:)
9
- "models/#{model}:predict"
8
+ def images_url
9
+ "models/#{@model}:predict"
10
10
  end
11
11
 
12
- def paint(prompt, model:, size:) # rubocop:disable Lint/UnusedMethodArgument
13
- payload = render_image_payload(prompt)
14
-
15
- response = post(images_url(model:), payload)
16
- parse_image_response(response)
17
- end
18
-
19
- def render_image_payload(prompt)
12
+ def render_image_payload(prompt, model:, size:) # rubocop:disable Metrics/MethodLength
13
+ RubyLLM.logger.debug "Ignoring size #{size}. Gemini does not support image size customization."
14
+ @model = model
20
15
  {
21
16
  instances: [
22
17
  {
@@ -5,93 +5,52 @@ module RubyLLM
5
5
  module Gemini
6
6
  # Streaming methods for the Gemini API implementation
7
7
  module Streaming
8
- # Need to make stream_completion public for chat.rb to access
9
- def stream_completion(model, payload, &block) # rubocop:disable Metrics/AbcSize,Metrics/MethodLength
10
- url = "models/#{model}:streamGenerateContent?alt=sse"
11
- accumulator = StreamAccumulator.new
12
-
13
- post(url, payload) do |req|
14
- req.options.on_data = stream_handler(accumulator, &block)
15
- end
16
-
17
- # If this is a tool call, immediately execute it and include the result
18
- message = accumulator.to_message
19
- if message.tool_call? && message.content.to_s.empty? && @tools && !@tools.empty?
20
- tool_call = message.tool_calls.values.first
21
- tool = @tools[tool_call.name.to_sym]
22
-
23
- if tool
24
- tool_result = tool.call(tool_call.arguments)
25
- # Create a new chunk with the result
26
- result_chunk = Chunk.new(
27
- role: :assistant,
28
- content: "The result is #{tool_result}",
29
- model_id: message.model_id,
30
- input_tokens: message.input_tokens,
31
- output_tokens: message.output_tokens,
32
- tool_calls: message.tool_calls
33
- )
34
-
35
- # Add to accumulator and call the block
36
- accumulator.add(result_chunk)
37
- block.call(result_chunk)
38
- end
39
- end
8
+ def stream_url
9
+ "models/#{@model}:streamGenerateContent?alt=sse"
10
+ end
40
11
 
41
- accumulator.to_message
12
+ def build_chunk(data)
13
+ Chunk.new(
14
+ role: :assistant,
15
+ model_id: extract_model_id(data),
16
+ content: extract_content(data),
17
+ input_tokens: extract_input_tokens(data),
18
+ output_tokens: extract_output_tokens(data),
19
+ tool_calls: extract_tool_calls(data)
20
+ )
42
21
  end
43
22
 
44
23
  private
45
24
 
46
- # Handle streaming
47
- def stream_handler(accumulator, &block) # rubocop:disable Metrics/AbcSize,Metrics/CyclomaticComplexity,Metrics/MethodLength,Metrics/PerceivedComplexity
48
- to_json_stream do |data| # rubocop:disable Metrics/BlockLength
49
- next unless data['candidates']&.any?
50
-
51
- candidate = data['candidates'][0]
52
- parts = candidate.dig('content', 'parts')
53
- model_id = data['modelVersion']
25
+ def extract_model_id(data)
26
+ data['modelVersion']
27
+ end
54
28
 
55
- # First attempt to extract tool calls
56
- tool_calls = nil
29
+ def extract_content(data)
30
+ return nil unless data['candidates']&.any?
57
31
 
58
- # Check if any part contains a functionCall
59
- if parts&.any? { |p| p['functionCall'] }
60
- function_part = parts.find { |p| p['functionCall'] }
61
- function_data = function_part['functionCall']
32
+ candidate = data['candidates'][0]
33
+ parts = candidate.dig('content', 'parts')
34
+ return nil unless parts
62
35
 
63
- if function_data && function_data['name']
64
- # Create a tool call with proper structure - convert args to JSON string
65
- id = SecureRandom.uuid
66
- tool_calls = {
67
- id => ToolCall.new(
68
- id: id,
69
- name: function_data['name'],
70
- arguments: JSON.generate(function_data['args']) # Convert Hash to JSON string
71
- )
72
- }
73
- end
74
- end
36
+ text_parts = parts.select { |p| p['text'] }
37
+ text_parts.map { |p| p['text'] }.join if text_parts.any?
38
+ end
75
39
 
76
- # Extract text content (if any)
77
- text = nil
78
- if parts
79
- text_parts = parts.select { |p| p['text'] }
80
- text = text_parts.map { |p| p['text'] }.join if text_parts.any?
81
- end
40
+ def extract_input_tokens(data)
41
+ data.dig('usageMetadata', 'promptTokenCount')
42
+ end
82
43
 
83
- chunk = Chunk.new(
84
- role: :assistant,
85
- content: text,
86
- model_id: model_id,
87
- input_tokens: data.dig('usageMetadata', 'promptTokenCount'),
88
- output_tokens: data.dig('usageMetadata', 'candidatesTokenCount'),
89
- tool_calls: tool_calls
90
- )
44
+ def extract_output_tokens(data)
45
+ data.dig('usageMetadata', 'candidatesTokenCount')
46
+ end
91
47
 
92
- accumulator.add(chunk)
93
- block.call(chunk)
94
- end
48
+ def parse_streaming_error(data)
49
+ error_data = JSON.parse(data)
50
+ [error_data['error']['code'], error_data['error']['message']]
51
+ rescue JSON::ParserError => e
52
+ RubyLLM.logger.debug "Failed to parse streaming error: #{e.message}"
53
+ [500, "Failed to parse error: #{data}"]
95
54
  end
96
55
  end
97
56
  end
@@ -54,22 +54,22 @@ module RubyLLM
54
54
  {
55
55
  name: tool.name,
56
56
  description: tool.description,
57
- parameters: {
58
- type: 'OBJECT',
59
- properties: format_parameters(tool.parameters),
60
- required: tool.parameters.select { |_, p| p.required }.keys.map(&:to_s)
61
- }
62
- }
57
+ parameters: tool.parameters.any? ? format_parameters(tool.parameters) : nil
58
+ }.compact
63
59
  end
64
60
 
65
61
  # Format tool parameters for Gemini API
66
62
  def format_parameters(parameters)
67
- parameters.transform_values do |param|
68
- {
69
- type: param_type_for_gemini(param.type),
70
- description: param.description
71
- }.compact
72
- end
63
+ {
64
+ type: 'OBJECT',
65
+ properties: parameters.transform_values do |param|
66
+ {
67
+ type: param_type_for_gemini(param.type),
68
+ description: param.description
69
+ }.compact
70
+ end,
71
+ required: parameters.select { |_, p| p.required }.keys.map(&:to_s)
72
+ }
73
73
  end
74
74
 
75
75
  # Convert RubyLLM param types to Gemini API types
@@ -32,6 +32,10 @@ module RubyLLM
32
32
  def slug
33
33
  'gemini'
34
34
  end
35
+
36
+ def configuration_requirements
37
+ %i[gemini_api_key]
38
+ end
35
39
  end
36
40
  end
37
41
  end