ruby_llm 1.5.1 → 1.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +8 -34
  3. data/lib/ruby_llm/active_record/acts_as.rb +64 -10
  4. data/lib/ruby_llm/aliases.json +23 -3
  5. data/lib/ruby_llm/chat.rb +30 -8
  6. data/lib/ruby_llm/configuration.rb +7 -18
  7. data/lib/ruby_llm/connection.rb +11 -6
  8. data/lib/ruby_llm/context.rb +2 -3
  9. data/lib/ruby_llm/embedding.rb +3 -4
  10. data/lib/ruby_llm/error.rb +2 -2
  11. data/lib/ruby_llm/image.rb +3 -4
  12. data/lib/ruby_llm/message.rb +4 -0
  13. data/lib/ruby_llm/models.json +6598 -6370
  14. data/lib/ruby_llm/models.rb +22 -31
  15. data/lib/ruby_llm/provider.rb +150 -89
  16. data/lib/ruby_llm/providers/anthropic/capabilities.rb +1 -2
  17. data/lib/ruby_llm/providers/anthropic/chat.rb +1 -1
  18. data/lib/ruby_llm/providers/anthropic/embeddings.rb +1 -1
  19. data/lib/ruby_llm/providers/anthropic/media.rb +1 -1
  20. data/lib/ruby_llm/providers/anthropic/models.rb +1 -1
  21. data/lib/ruby_llm/providers/anthropic/streaming.rb +1 -1
  22. data/lib/ruby_llm/providers/anthropic/tools.rb +1 -1
  23. data/lib/ruby_llm/providers/anthropic.rb +17 -22
  24. data/lib/ruby_llm/providers/bedrock/capabilities.rb +3 -63
  25. data/lib/ruby_llm/providers/bedrock/chat.rb +5 -4
  26. data/lib/ruby_llm/providers/bedrock/media.rb +1 -1
  27. data/lib/ruby_llm/providers/bedrock/models.rb +5 -6
  28. data/lib/ruby_llm/providers/bedrock/signing.rb +1 -1
  29. data/lib/ruby_llm/providers/bedrock/streaming/base.rb +5 -4
  30. data/lib/ruby_llm/providers/bedrock/streaming/content_extraction.rb +1 -1
  31. data/lib/ruby_llm/providers/bedrock/streaming/message_processing.rb +1 -1
  32. data/lib/ruby_llm/providers/bedrock/streaming/payload_processing.rb +1 -1
  33. data/lib/ruby_llm/providers/bedrock/streaming/prelude_handling.rb +1 -1
  34. data/lib/ruby_llm/providers/bedrock/streaming.rb +1 -1
  35. data/lib/ruby_llm/providers/bedrock.rb +26 -31
  36. data/lib/ruby_llm/providers/deepseek/capabilities.rb +16 -57
  37. data/lib/ruby_llm/providers/deepseek/chat.rb +1 -1
  38. data/lib/ruby_llm/providers/deepseek.rb +12 -17
  39. data/lib/ruby_llm/providers/gemini/capabilities.rb +1 -1
  40. data/lib/ruby_llm/providers/gemini/chat.rb +1 -1
  41. data/lib/ruby_llm/providers/gemini/embeddings.rb +1 -1
  42. data/lib/ruby_llm/providers/gemini/images.rb +1 -1
  43. data/lib/ruby_llm/providers/gemini/media.rb +1 -1
  44. data/lib/ruby_llm/providers/gemini/models.rb +1 -1
  45. data/lib/ruby_llm/providers/gemini/streaming.rb +1 -1
  46. data/lib/ruby_llm/providers/gemini/tools.rb +1 -7
  47. data/lib/ruby_llm/providers/gemini.rb +18 -23
  48. data/lib/ruby_llm/providers/gpustack/chat.rb +1 -1
  49. data/lib/ruby_llm/providers/gpustack/models.rb +1 -1
  50. data/lib/ruby_llm/providers/gpustack.rb +16 -19
  51. data/lib/ruby_llm/providers/mistral/capabilities.rb +1 -1
  52. data/lib/ruby_llm/providers/mistral/chat.rb +1 -1
  53. data/lib/ruby_llm/providers/mistral/embeddings.rb +1 -1
  54. data/lib/ruby_llm/providers/mistral/models.rb +1 -1
  55. data/lib/ruby_llm/providers/mistral.rb +14 -19
  56. data/lib/ruby_llm/providers/ollama/chat.rb +1 -1
  57. data/lib/ruby_llm/providers/ollama/media.rb +1 -1
  58. data/lib/ruby_llm/providers/ollama.rb +13 -18
  59. data/lib/ruby_llm/providers/openai/capabilities.rb +3 -3
  60. data/lib/ruby_llm/providers/openai/chat.rb +3 -6
  61. data/lib/ruby_llm/providers/openai/embeddings.rb +1 -1
  62. data/lib/ruby_llm/providers/openai/images.rb +1 -1
  63. data/lib/ruby_llm/providers/openai/media.rb +1 -1
  64. data/lib/ruby_llm/providers/openai/models.rb +1 -1
  65. data/lib/ruby_llm/providers/openai/streaming.rb +1 -1
  66. data/lib/ruby_llm/providers/openai/tools.rb +1 -1
  67. data/lib/ruby_llm/providers/openai.rb +24 -36
  68. data/lib/ruby_llm/providers/openrouter/models.rb +1 -1
  69. data/lib/ruby_llm/providers/openrouter.rb +9 -14
  70. data/lib/ruby_llm/providers/perplexity/capabilities.rb +1 -30
  71. data/lib/ruby_llm/providers/perplexity/chat.rb +1 -1
  72. data/lib/ruby_llm/providers/perplexity/models.rb +1 -1
  73. data/lib/ruby_llm/providers/perplexity.rb +13 -18
  74. data/lib/ruby_llm/stream_accumulator.rb +3 -3
  75. data/lib/ruby_llm/streaming.rb +16 -3
  76. data/lib/ruby_llm/tool.rb +19 -0
  77. data/lib/ruby_llm/version.rb +1 -1
  78. data/lib/tasks/models_docs.rake +18 -11
  79. data/lib/tasks/models_update.rake +5 -4
  80. metadata +9 -8
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 854c31993769b5123faf650081276dae4026907d467514c905739b8737220b55
4
- data.tar.gz: 408fb8253e5fdbf82bc3f3c3cc737e313d9835bd8023c1431cc4e6adc2a09be9
3
+ metadata.gz: e1fd977cc46769d8d235fe13a31e030a133618a335c8cd012ab8de2b7d439297
4
+ data.tar.gz: ce8d22464541ac19246e6018fde5c7b414ba0022bc5995f8900052f3d0ac5b42
5
5
  SHA512:
6
- metadata.gz: c5572a959bcd8f314d1af4d66bb363cbe9eaa783fa8cbd6e051d3b0e471da49e87a895724def790fad59589d08eb5eb259ffd8dbe1f09d70274a900ae9575af4
7
- data.tar.gz: 191f6f8aadc239998b0346e6a3e54fa4ace1d39fd56947693d7bd26e4248bfccc7894410f2f758bc456750cbef602b1577e7f3ca8a7c3a986077a7e957b7aa13
6
+ metadata.gz: aec038a65867c5fa13b93a11aa5356183b8f105bb7f48792e6300a9259a809206bc07da0b54c63a0a0de726cc3698ec2cd79c511be43e6fd8f47273adecc47a9
7
+ data.tar.gz: 5386da468b9f413d7fbf27192a693c2427a1e88a9082a7375f5641f01854a7cbd2235c1c8108ea870c97c97ea79c712bdd5d33e2ac09a22e25a74f9510a1599f
data/README.md CHANGED
@@ -1,44 +1,18 @@
1
- <img src="/docs/assets/images/logotype.svg" alt="RubyLLM" height="120" width="250">
2
-
3
- **A delightful Ruby way to work with AI.** RubyLLM provides **one** beautiful, Ruby-like interface to interact with modern AI models. Chat, generate images, create embeddings, and use tools – all with clean, expressive code that feels like Ruby, not like patching together multiple services.
4
-
5
- <div class="provider-icons">
6
- <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/anthropic-text.svg" alt="Anthropic" class="logo-small">
7
- &nbsp;
8
- <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/bedrock-color.svg" alt="Bedrock" class="logo-medium">
9
- <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/bedrock-text.svg" alt="Bedrock" class="logo-small">
10
- &nbsp;
11
- <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/deepseek-color.svg" alt="DeepSeek" class="logo-medium">
12
- <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/deepseek-text.svg" alt="DeepSeek" class="logo-small">
13
- &nbsp;
14
- <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/gemini-brand-color.svg" alt="Gemini" class="logo-large">
15
- <br>
16
- <img src="https://raw.githubusercontent.com/gpustack/gpustack/main/docs/assets/gpustack-logo.png" alt="GPUStack" class="logo-medium" height="16">
17
- &nbsp;
18
- <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/mistral-color.svg" alt="Mistral" class="logo-medium">
19
- <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/mistral-text.svg" alt="Mistral" class="logo-small">
20
- &nbsp;
21
- <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/ollama.svg" alt="Ollama" class="logo-medium">
22
- <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/ollama-text.svg" alt="Ollama" class="logo-medium">
23
- &nbsp;
24
- <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/openai.svg" alt="OpenAI" class="logo-medium">
25
- <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/openai-text.svg" alt="OpenAI" class="logo-medium">
26
- &nbsp;
27
- <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/openrouter.svg" alt="OpenRouter" class="logo-medium">
28
- <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/openrouter-text.svg" alt="OpenRouter" class="logo-small">
29
- &nbsp;
30
- <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/perplexity-color.svg" alt="Perplexity" class="logo-medium">
31
- <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/perplexity-text.svg" alt="Perplexity" class="logo-small">
32
- </div>
1
+ <picture>
2
+ <source media="(prefers-color-scheme: dark)" srcset="/docs/assets/images/logotype_dark.svg">
3
+ <img src="/docs/assets/images/logotype.svg" alt="RubyLLM" height="120" width="250">
4
+ </picture>
5
+
6
+ **One *beautiful* Ruby API for GPT, Claude, Gemini, and more.** Easily build chatbots, AI agents, RAG applications, and content generators. Features chat (text, images, audio, PDFs), image generation, embeddings, tools (function calling), structured output, Rails integration, and streaming. Works with OpenAI, Anthropic, Google Gemini, AWS Bedrock, DeepSeek, Mistral, Ollama (local models), OpenRouter, Perplexity, GPUStack, and any OpenAI-compatible API.
33
7
 
34
8
  <div class="badge-container">
35
- <a href="https://badge.fury.io/rb/ruby_llm"><img src="https://badge.fury.io/rb/ruby_llm.svg?a=1" alt="Gem Version" /></a>
9
+ <a href="https://badge.fury.io/rb/ruby_llm"><img src="https://badge.fury.io/rb/ruby_llm.svg?a=3" alt="Gem Version" /></a>
36
10
  <a href="https://github.com/testdouble/standard"><img src="https://img.shields.io/badge/code_style-standard-brightgreen.svg" alt="Ruby Style Guide" /></a>
37
11
  <a href="https://rubygems.org/gems/ruby_llm"><img alt="Gem Downloads" src="https://img.shields.io/gem/dt/ruby_llm"></a>
38
12
  <a href="https://codecov.io/gh/crmne/ruby_llm"><img src="https://codecov.io/gh/crmne/ruby_llm/branch/main/graph/badge.svg" alt="codecov" /></a>
39
13
  </div>
40
14
 
41
- 🤺 Battle tested at [💬 Chat with Work](https://chatwithwork.com)
15
+ Battle tested at [<picture><source media="(prefers-color-scheme: dark)" srcset="https://chatwithwork.com/logotype-dark.svg"><img src="https://chatwithwork.com/logotype.svg" alt="Chat with Work" height="30" align="absmiddle"></picture>](https://chatwithwork.com) — *Claude Code for your documents*
42
16
 
43
17
  ## The problem with AI libraries
44
18
 
@@ -96,8 +96,7 @@ module RubyLLM
96
96
  @chat.add_message(msg.to_llm)
97
97
  end
98
98
 
99
- @chat.on_new_message { persist_new_message }
100
- .on_end_message { |msg| persist_message_completion(msg) }
99
+ setup_persistence_callbacks
101
100
  end
102
101
 
103
102
  def with_instructions(instructions, replace: false)
@@ -139,18 +138,47 @@ module RubyLLM
139
138
  self
140
139
  end
141
140
 
141
+ def with_headers(...)
142
+ to_llm.with_headers(...)
143
+ self
144
+ end
145
+
142
146
  def with_schema(...)
143
147
  to_llm.with_schema(...)
144
148
  self
145
149
  end
146
150
 
147
- def on_new_message(...)
148
- to_llm.on_new_message(...)
151
+ def on_new_message(&block)
152
+ to_llm
153
+
154
+ existing_callback = @chat.instance_variable_get(:@on)[:new_message]
155
+
156
+ @chat.on_new_message do
157
+ existing_callback&.call
158
+ block&.call
159
+ end
149
160
  self
150
161
  end
151
162
 
152
- def on_end_message(...)
153
- to_llm.on_end_message(...)
163
+ def on_end_message(&block)
164
+ to_llm
165
+
166
+ existing_callback = @chat.instance_variable_get(:@on)[:end_message]
167
+
168
+ @chat.on_end_message do |msg|
169
+ existing_callback&.call(msg)
170
+ block&.call(msg)
171
+ end
172
+ self
173
+ end
174
+
175
+ def on_tool_call(...)
176
+ to_llm.on_tool_call(...)
177
+ self
178
+ end
179
+
180
+ def on_tool_result(...)
181
+ to_llm.on_tool_result(...)
154
182
  self
155
183
  end
156
184
 
@@ -170,15 +198,41 @@ module RubyLLM
170
198
  def complete(...)
171
199
  to_llm.complete(...)
172
200
  rescue RubyLLM::Error => e
173
- if @message&.persisted? && @message.content.blank?
174
- RubyLLM.logger.debug "RubyLLM: API call failed, destroying message: #{@message.id}"
175
- @message.destroy
176
- end
201
+ cleanup_failed_messages if @message&.persisted? && @message.content.blank?
202
+ cleanup_orphaned_tool_results
177
203
  raise e
178
204
  end
179
205
 
180
206
  private
181
207
 
208
+ def cleanup_failed_messages
209
+ RubyLLM.logger.debug "RubyLLM: API call failed, destroying message: #{@message.id}"
210
+ @message.destroy
211
+ end
212
+
213
+ def cleanup_orphaned_tool_results
214
+ loop do
215
+ messages.reload
216
+ last = messages.order(:id).last
217
+
218
+ break unless last&.tool_call? || last&.tool_result?
219
+
220
+ last.destroy
221
+ end
222
+ end
223
+
224
+ def setup_persistence_callbacks
225
+ # Only set up once per chat instance
226
+ return @chat if @chat.instance_variable_get(:@_persistence_callbacks_setup)
227
+
228
+ # Set up persistence callbacks (user callbacks will be chained via on_new_message/on_end_message methods)
229
+ @chat.on_new_message { persist_new_message }
230
+ @chat.on_end_message { |msg| persist_message_completion(msg) }
231
+
232
+ @chat.instance_variable_set(:@_persistence_callbacks_setup, true)
233
+ @chat
234
+ end
235
+
182
236
  def persist_new_message
183
237
  @message = messages.create!(role: :assistant, content: String.new)
184
238
  end
@@ -29,13 +29,17 @@
29
29
  "bedrock": "anthropic.claude-3-opus-20240229-v1:0:200k"
30
30
  },
31
31
  "claude-3-sonnet": {
32
- "bedrock": "anthropic.claude-3-sonnet-20240229-v1:0",
33
- "openrouter": "anthropic/claude-3-sonnet"
32
+ "bedrock": "anthropic.claude-3-sonnet-20240229-v1:0"
34
33
  },
35
34
  "claude-opus-4": {
36
35
  "anthropic": "claude-opus-4-20250514",
37
36
  "openrouter": "anthropic/claude-opus-4",
38
- "bedrock": "us.anthropic.claude-opus-4-20250514-v1:0"
37
+ "bedrock": "us.anthropic.claude-opus-4-1-20250805-v1:0"
38
+ },
39
+ "claude-opus-4-1": {
40
+ "anthropic": "claude-opus-4-1-20250805",
41
+ "openrouter": "anthropic/claude-opus-4.1",
42
+ "bedrock": "us.anthropic.claude-opus-4-1-20250805-v1:0"
39
43
  },
40
44
  "claude-sonnet-4": {
41
45
  "anthropic": "claude-sonnet-4-20250514",
@@ -162,6 +166,22 @@
162
166
  "openai": "gpt-4o-search-preview",
163
167
  "openrouter": "openai/gpt-4o-search-preview"
164
168
  },
169
+ "gpt-5": {
170
+ "openai": "gpt-5",
171
+ "openrouter": "openai/gpt-5"
172
+ },
173
+ "gpt-5-mini": {
174
+ "openai": "gpt-5-mini",
175
+ "openrouter": "openai/gpt-5-mini"
176
+ },
177
+ "gpt-5-nano": {
178
+ "openai": "gpt-5-nano",
179
+ "openrouter": "openai/gpt-5-nano"
180
+ },
181
+ "gpt-oss-120b": {
182
+ "openai": "gpt-oss-120b",
183
+ "openrouter": "openai/gpt-oss-120b"
184
+ },
165
185
  "o1": {
166
186
  "openai": "o1",
167
187
  "openrouter": "openai/o1"
data/lib/ruby_llm/chat.rb CHANGED
@@ -11,7 +11,7 @@ module RubyLLM
11
11
  class Chat
12
12
  include Enumerable
13
13
 
14
- attr_reader :model, :messages, :tools, :params, :schema
14
+ attr_reader :model, :messages, :tools, :params, :headers, :schema
15
15
 
16
16
  def initialize(model: nil, provider: nil, assume_model_exists: false, context: nil)
17
17
  if assume_model_exists && !provider
@@ -26,11 +26,13 @@ module RubyLLM
26
26
  @messages = []
27
27
  @tools = {}
28
28
  @params = {}
29
+ @headers = {}
29
30
  @schema = nil
30
31
  @on = {
31
32
  new_message: nil,
32
33
  end_message: nil,
33
- tool_call: nil
34
+ tool_call: nil,
35
+ tool_result: nil
34
36
  }
35
37
  end
36
38
 
@@ -58,14 +60,15 @@ module RubyLLM
58
60
  self
59
61
  end
60
62
 
61
- def with_tools(*tools)
62
- tools.each { |tool| with_tool tool }
63
+ def with_tools(*tools, replace: false)
64
+ @tools.clear if replace
65
+ tools.compact.each { |tool| with_tool tool }
63
66
  self
64
67
  end
65
68
 
66
69
  def with_model(model_id, provider: nil, assume_exists: false)
67
- @model, @provider = Models.resolve(model_id, provider:, assume_exists:)
68
- @connection = @context ? @context.connection_for(@provider) : @provider.connection(@config)
70
+ @model, @provider = Models.resolve(model_id, provider:, assume_exists:, config: @config)
71
+ @connection = @provider.connection
69
72
  self
70
73
  end
71
74
 
@@ -86,6 +89,11 @@ module RubyLLM
86
89
  self
87
90
  end
88
91
 
92
+ def with_headers(**headers)
93
+ @headers = headers
94
+ self
95
+ end
96
+
89
97
  def with_schema(schema, force: false)
90
98
  unless force || @model.structured_output?
91
99
  raise UnsupportedStructuredOutputError, "Model #{@model.id} doesn't support structured output"
@@ -118,6 +126,11 @@ module RubyLLM
118
126
  self
119
127
  end
120
128
 
129
+ def on_tool_result(&block)
130
+ @on[:tool_result] = block
131
+ self
132
+ end
133
+
121
134
  def each(&)
122
135
  messages.each(&)
123
136
  end
@@ -128,8 +141,8 @@ module RubyLLM
128
141
  tools: @tools,
129
142
  temperature: @temperature,
130
143
  model: @model.id,
131
- connection: @connection,
132
144
  params: @params,
145
+ headers: @headers,
133
146
  schema: @schema,
134
147
  &wrap_streaming_block(&)
135
148
  )
@@ -185,15 +198,20 @@ module RubyLLM
185
198
  end
186
199
 
187
200
  def handle_tool_calls(response, &)
201
+ halt_result = nil
202
+
188
203
  response.tool_calls.each_value do |tool_call|
189
204
  @on[:new_message]&.call
190
205
  @on[:tool_call]&.call(tool_call)
191
206
  result = execute_tool tool_call
207
+ @on[:tool_result]&.call(result)
192
208
  message = add_message role: :tool, content: result.to_s, tool_call_id: tool_call.id
193
209
  @on[:end_message]&.call(message)
210
+
211
+ halt_result = result if result.is_a?(Tool::Halt)
194
212
  end
195
213
 
196
- complete(&)
214
+ halt_result || complete(&)
197
215
  end
198
216
 
199
217
  def execute_tool(tool_call)
@@ -201,5 +219,9 @@ module RubyLLM
201
219
  args = tool_call.arguments
202
220
  tool.call(args)
203
221
  end
222
+
223
+ def instance_variables
224
+ super - %i[@connection @config]
225
+ end
204
226
  end
205
227
  end
@@ -15,6 +15,7 @@ module RubyLLM
15
15
  :openai_api_base,
16
16
  :openai_organization_id,
17
17
  :openai_project_id,
18
+ :openai_use_system_role,
18
19
  :anthropic_api_key,
19
20
  :gemini_api_key,
20
21
  :deepseek_api_key,
@@ -43,7 +44,8 @@ module RubyLLM
43
44
  :logger,
44
45
  :log_file,
45
46
  :log_level,
46
- :log_assume_model_exists
47
+ :log_assume_model_exists,
48
+ :log_stream_debug
47
49
 
48
50
  def initialize
49
51
  # Connection configuration
@@ -57,30 +59,17 @@ module RubyLLM
57
59
  # Default models
58
60
  @default_model = 'gpt-4.1-nano'
59
61
  @default_embedding_model = 'text-embedding-3-small'
60
- @default_image_model = 'dall-e-3'
62
+ @default_image_model = 'gpt-image-1'
61
63
 
62
64
  # Logging configuration
63
65
  @log_file = $stdout
64
66
  @log_level = ENV['RUBYLLM_DEBUG'] ? Logger::DEBUG : Logger::INFO
65
67
  @log_assume_model_exists = true
68
+ @log_stream_debug = ENV['RUBYLLM_STREAM_DEBUG'] == 'true'
66
69
  end
67
70
 
68
- def inspect
69
- redacted = lambda do |name, value|
70
- if name.match?(/_id|_key|_secret|_token$/)
71
- value.nil? ? 'nil' : '[FILTERED]'
72
- else
73
- value
74
- end
75
- end
76
-
77
- inspection = instance_variables.map do |ivar|
78
- name = ivar.to_s.delete_prefix('@')
79
- value = redacted[name, instance_variable_get(ivar)]
80
- "#{name}: #{value}"
81
- end.join(', ')
82
-
83
- "#<#{self.class}:0x#{object_id.to_s(16)} #{inspection}>"
71
+ def instance_variables
72
+ super.reject { |ivar| ivar.to_s.match?(/_id|_key|_secret|_token$/) }
84
73
  end
85
74
  end
86
75
  end
@@ -24,7 +24,7 @@ module RubyLLM
24
24
  @config = config
25
25
 
26
26
  ensure_configured!
27
- @connection ||= Faraday.new(provider.api_base(@config)) do |faraday|
27
+ @connection ||= Faraday.new(provider.api_base) do |faraday|
28
28
  setup_timeout(faraday)
29
29
  setup_logging(faraday)
30
30
  setup_retry(faraday)
@@ -36,14 +36,14 @@ module RubyLLM
36
36
  def post(url, payload, &)
37
37
  body = payload.is_a?(Hash) ? JSON.generate(payload, ascii_only: false) : payload
38
38
  @connection.post url, body do |req|
39
- req.headers.merge! @provider.headers(@config) if @provider.respond_to?(:headers)
39
+ req.headers.merge! @provider.headers if @provider.respond_to?(:headers)
40
40
  yield req if block_given?
41
41
  end
42
42
  end
43
43
 
44
44
  def get(url, &)
45
45
  @connection.get url do |req|
46
- req.headers.merge! @provider.headers(@config) if @provider.respond_to?(:headers)
46
+ req.headers.merge! @provider.headers if @provider.respond_to?(:headers)
47
47
  yield req if block_given?
48
48
  end
49
49
  end
@@ -106,16 +106,21 @@ module RubyLLM
106
106
  end
107
107
 
108
108
  def ensure_configured!
109
- return if @provider.configured?(@config)
109
+ return if @provider.configured?
110
110
 
111
+ missing = @provider.configuration_requirements.reject { |req| @config.send(req) }
111
112
  config_block = <<~RUBY
112
113
  RubyLLM.configure do |config|
113
- #{@provider.missing_configs(@config).map { |key| "config.#{key} = ENV['#{key.to_s.upcase}']" }.join("\n ")}
114
+ #{missing.map { |key| "config.#{key} = ENV['#{key.to_s.upcase}']" }.join("\n ")}
114
115
  end
115
116
  RUBY
116
117
 
117
118
  raise ConfigurationError,
118
- "#{@provider.slug} provider is not configured. Add this to your initialization:\n\n#{config_block}"
119
+ "#{@provider.name} provider is not configured. Add this to your initialization:\n\n#{config_block}"
120
+ end
121
+
122
+ def instance_variables
123
+ super - %i[@config @connection]
119
124
  end
120
125
  end
121
126
  end
@@ -22,9 +22,8 @@ module RubyLLM
22
22
  Image.paint(*args, **kwargs, context: self, &)
23
23
  end
24
24
 
25
- def connection_for(provider_module)
26
- slug = provider_module.slug.to_sym
27
- @connections[slug] ||= Connection.new(provider_module, @config)
25
+ def connection_for(provider_instance)
26
+ provider_instance.connection
28
27
  end
29
28
  end
30
29
  end
@@ -20,12 +20,11 @@ module RubyLLM
20
20
  dimensions: nil)
21
21
  config = context&.config || RubyLLM.config
22
22
  model ||= config.default_embedding_model
23
- model, provider = Models.resolve(model, provider: provider, assume_exists: assume_model_exists)
23
+ model, provider_instance = Models.resolve(model, provider: provider, assume_exists: assume_model_exists,
24
+ config: config)
24
25
  model_id = model.id
25
26
 
26
- provider = Provider.for(model_id) if provider.nil?
27
- connection = context ? context.connection_for(provider) : provider.connection(config)
28
- provider.embed(text, model: model_id, connection:, dimensions:)
27
+ provider_instance.embed(text, model: model_id, dimensions:)
29
28
  end
30
29
  end
31
30
  end
@@ -40,9 +40,9 @@ module RubyLLM
40
40
  # Faraday middleware that maps provider-specific API errors to RubyLLM errors.
41
41
  # Uses provider's parse_error method to extract meaningful error messages.
42
42
  class ErrorMiddleware < Faraday::Middleware
43
- def initialize(app, provider:)
43
+ def initialize(app, options = {})
44
44
  super(app)
45
- @provider = provider
45
+ @provider = options[:provider]
46
46
  end
47
47
 
48
48
  def call(env)
@@ -43,12 +43,11 @@ module RubyLLM
43
43
  context: nil)
44
44
  config = context&.config || RubyLLM.config
45
45
  model ||= config.default_image_model
46
- model, provider = Models.resolve(model, provider: provider, assume_exists: assume_model_exists)
46
+ model, provider_instance = Models.resolve(model, provider: provider, assume_exists: assume_model_exists,
47
+ config: config)
47
48
  model_id = model.id
48
49
 
49
- provider = Provider.for(model_id) if provider.nil?
50
- connection = context ? context.connection_for(provider) : provider.connection(config)
51
- provider.paint(prompt, model: model_id, size:, connection:)
50
+ provider_instance.paint(prompt, model: model_id, size:)
52
51
  end
53
52
  end
54
53
  end
@@ -55,6 +55,10 @@ module RubyLLM
55
55
  }.compact
56
56
  end
57
57
 
58
+ def instance_variables
59
+ super - [:@raw]
60
+ end
61
+
58
62
  private
59
63
 
60
64
  def normalize_content(content)