ruby_llm 1.6.1 → 1.6.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +73 -91
  3. data/lib/ruby_llm/active_record/acts_as.rb +3 -11
  4. data/lib/ruby_llm/aliases.json +4 -0
  5. data/lib/ruby_llm/aliases.rb +7 -25
  6. data/lib/ruby_llm/chat.rb +3 -19
  7. data/lib/ruby_llm/configuration.rb +1 -14
  8. data/lib/ruby_llm/content.rb +1 -3
  9. data/lib/ruby_llm/embedding.rb +1 -2
  10. data/lib/ruby_llm/error.rb +0 -10
  11. data/lib/ruby_llm/image.rb +0 -4
  12. data/lib/ruby_llm/message.rb +2 -4
  13. data/lib/ruby_llm/model/info.rb +0 -10
  14. data/lib/ruby_llm/model/pricing.rb +0 -3
  15. data/lib/ruby_llm/model/pricing_category.rb +0 -2
  16. data/lib/ruby_llm/model/pricing_tier.rb +0 -1
  17. data/lib/ruby_llm/models.json +668 -434
  18. data/lib/ruby_llm/models.rb +6 -18
  19. data/lib/ruby_llm/provider.rb +1 -5
  20. data/lib/ruby_llm/providers/anthropic/capabilities.rb +1 -46
  21. data/lib/ruby_llm/providers/anthropic/media.rb +0 -1
  22. data/lib/ruby_llm/providers/anthropic/tools.rb +0 -1
  23. data/lib/ruby_llm/providers/anthropic.rb +1 -2
  24. data/lib/ruby_llm/providers/bedrock/chat.rb +0 -2
  25. data/lib/ruby_llm/providers/bedrock/media.rb +0 -1
  26. data/lib/ruby_llm/providers/bedrock/models.rb +0 -2
  27. data/lib/ruby_llm/providers/bedrock/streaming/base.rb +1 -13
  28. data/lib/ruby_llm/providers/bedrock/streaming/content_extraction.rb +0 -7
  29. data/lib/ruby_llm/providers/bedrock/streaming/message_processing.rb +0 -12
  30. data/lib/ruby_llm/providers/bedrock/streaming/payload_processing.rb +0 -12
  31. data/lib/ruby_llm/providers/bedrock/streaming/prelude_handling.rb +0 -13
  32. data/lib/ruby_llm/providers/bedrock/streaming.rb +0 -18
  33. data/lib/ruby_llm/providers/bedrock.rb +1 -2
  34. data/lib/ruby_llm/providers/deepseek/capabilities.rb +1 -2
  35. data/lib/ruby_llm/providers/deepseek/chat.rb +0 -1
  36. data/lib/ruby_llm/providers/gemini/capabilities.rb +26 -101
  37. data/lib/ruby_llm/providers/gemini/chat.rb +12 -8
  38. data/lib/ruby_llm/providers/gemini/embeddings.rb +0 -2
  39. data/lib/ruby_llm/providers/gemini/images.rb +0 -1
  40. data/lib/ruby_llm/providers/gemini/media.rb +0 -1
  41. data/lib/ruby_llm/providers/gemini/models.rb +1 -2
  42. data/lib/ruby_llm/providers/gemini/streaming.rb +4 -1
  43. data/lib/ruby_llm/providers/gemini/tools.rb +0 -5
  44. data/lib/ruby_llm/providers/gpustack/chat.rb +0 -1
  45. data/lib/ruby_llm/providers/gpustack/models.rb +3 -4
  46. data/lib/ruby_llm/providers/mistral/capabilities.rb +2 -10
  47. data/lib/ruby_llm/providers/mistral/chat.rb +0 -2
  48. data/lib/ruby_llm/providers/mistral/embeddings.rb +0 -3
  49. data/lib/ruby_llm/providers/mistral/models.rb +0 -1
  50. data/lib/ruby_llm/providers/ollama/chat.rb +0 -1
  51. data/lib/ruby_llm/providers/ollama/media.rb +0 -1
  52. data/lib/ruby_llm/providers/openai/capabilities.rb +2 -17
  53. data/lib/ruby_llm/providers/openai/chat.rb +0 -3
  54. data/lib/ruby_llm/providers/openai/embeddings.rb +0 -3
  55. data/lib/ruby_llm/providers/openai/media.rb +0 -1
  56. data/lib/ruby_llm/providers/openai.rb +1 -3
  57. data/lib/ruby_llm/providers/openrouter/models.rb +1 -16
  58. data/lib/ruby_llm/providers/perplexity/capabilities.rb +0 -1
  59. data/lib/ruby_llm/providers/perplexity/chat.rb +0 -1
  60. data/lib/ruby_llm/providers/perplexity.rb +1 -5
  61. data/lib/ruby_llm/railtie.rb +0 -1
  62. data/lib/ruby_llm/stream_accumulator.rb +3 -5
  63. data/lib/ruby_llm/streaming.rb +16 -25
  64. data/lib/ruby_llm/tool.rb +2 -19
  65. data/lib/ruby_llm/tool_call.rb +0 -9
  66. data/lib/ruby_llm/version.rb +1 -1
  67. data/lib/ruby_llm.rb +0 -2
  68. data/lib/tasks/aliases.rake +7 -37
  69. data/lib/tasks/models_docs.rake +5 -15
  70. data/lib/tasks/models_update.rake +1 -1
  71. data/lib/tasks/vcr.rake +0 -7
  72. metadata +1 -1
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: e1fd977cc46769d8d235fe13a31e030a133618a335c8cd012ab8de2b7d439297
4
- data.tar.gz: ce8d22464541ac19246e6018fde5c7b414ba0022bc5995f8900052f3d0ac5b42
3
+ metadata.gz: 5e454eaf845f9c4b9a03f6e5a12567e59d5621d1ff6db3e866be50e836de65c2
4
+ data.tar.gz: 8e49e339287432cc9feee74ee9b0b49abffcc282bb6387fc9d2cdd6889f9c39c
5
5
  SHA512:
6
- metadata.gz: aec038a65867c5fa13b93a11aa5356183b8f105bb7f48792e6300a9259a809206bc07da0b54c63a0a0de726cc3698ec2cd79c511be43e6fd8f47273adecc47a9
7
- data.tar.gz: 5386da468b9f413d7fbf27192a693c2427a1e88a9082a7375f5641f01854a7cbd2235c1c8108ea870c97c97ea79c712bdd5d33e2ac09a22e25a74f9510a1599f
6
+ metadata.gz: 06e7fc0118d88631e1c41a9b4b0738b6f55b965ee4569643759489e81191317dfe4cba5e55dcf89d386ef68379b013a4a0a6c9b9b71f3dd3115f7f46186545f4
7
+ data.tar.gz: 047a575afaa0cf37934e395bd49460edab5a07e29a8c7f085139c331c94d44bdc226c9c2b8781183d05a7ce6b81b08af943b86c9c3ee76f19188a491585d971b
data/README.md CHANGED
@@ -1,99 +1,114 @@
1
+ <div align="center">
2
+
1
3
  <picture>
2
4
  <source media="(prefers-color-scheme: dark)" srcset="/docs/assets/images/logotype_dark.svg">
3
5
  <img src="/docs/assets/images/logotype.svg" alt="RubyLLM" height="120" width="250">
4
6
  </picture>
5
7
 
6
- **One *beautiful* Ruby API for GPT, Claude, Gemini, and more.** Easily build chatbots, AI agents, RAG applications, and content generators. Features chat (text, images, audio, PDFs), image generation, embeddings, tools (function calling), structured output, Rails integration, and streaming. Works with OpenAI, Anthropic, Google Gemini, AWS Bedrock, DeepSeek, Mistral, Ollama (local models), OpenRouter, Perplexity, GPUStack, and any OpenAI-compatible API.
8
+ <strong>One *beautiful* Ruby API for GPT, Claude, Gemini, and more.</strong>
9
+
10
+ Battle tested at [<picture><source media="(prefers-color-scheme: dark)" srcset="https://chatwithwork.com/logotype-dark.svg"><img src="https://chatwithwork.com/logotype.svg" alt="Chat with Work" height="30" align="absmiddle"></picture>](https://chatwithwork.com) — *Claude Code for your documents*
11
+
12
+ [![Gem Version](https://badge.fury.io/rb/ruby_llm.svg?a=5)](https://badge.fury.io/rb/ruby_llm)
13
+ [![Ruby Style Guide](https://img.shields.io/badge/code_style-standard-brightgreen.svg)](https://github.com/testdouble/standard)
14
+ [![Gem Downloads](https://img.shields.io/gem/dt/ruby_llm)](https://rubygems.org/gems/ruby_llm)
15
+ [![codecov](https://codecov.io/gh/crmne/ruby_llm/branch/main/graph/badge.svg)](https://codecov.io/gh/crmne/ruby_llm)
7
16
 
8
- <div class="badge-container">
9
- <a href="https://badge.fury.io/rb/ruby_llm"><img src="https://badge.fury.io/rb/ruby_llm.svg?a=3" alt="Gem Version" /></a>
10
- <a href="https://github.com/testdouble/standard"><img src="https://img.shields.io/badge/code_style-standard-brightgreen.svg" alt="Ruby Style Guide" /></a>
11
- <a href="https://rubygems.org/gems/ruby_llm"><img alt="Gem Downloads" src="https://img.shields.io/gem/dt/ruby_llm"></a>
12
- <a href="https://codecov.io/gh/crmne/ruby_llm"><img src="https://codecov.io/gh/crmne/ruby_llm/branch/main/graph/badge.svg" alt="codecov" /></a>
17
+ <a href="https://trendshift.io/repositories/13640" target="_blank"><img src="https://trendshift.io/api/badge/repositories/13640" alt="crmne%2Fruby_llm | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
13
18
  </div>
14
19
 
15
- Battle tested at [<picture><source media="(prefers-color-scheme: dark)" srcset="https://chatwithwork.com/logotype-dark.svg"><img src="https://chatwithwork.com/logotype.svg" alt="Chat with Work" height="30" align="absmiddle"></picture>](https://chatwithwork.com) — *Claude Code for your documents*
20
+ ---
21
+
22
+ Build chatbots, AI agents, RAG applications. Works with OpenAI, Anthropic, Google, AWS, local models, and any OpenAI-compatible API.
16
23
 
17
- ## The problem with AI libraries
24
+ ## Why RubyLLM?
18
25
 
19
- Every AI provider comes with its own client library, its own response format, its own conventions for streaming, and its own way of handling errors. Want to use multiple providers? Prepare to juggle incompatible APIs and bloated dependencies.
26
+ Every AI provider ships their own bloated client. Different APIs. Different response formats. Different conventions. It's exhausting.
20
27
 
21
- RubyLLM fixes all that. One beautiful API for everything. One consistent format. Minimal dependencies just Faraday, Zeitwerk, and Marcel. Because working with AI should be a joy, not a chore.
28
+ RubyLLM gives you one beautiful API for all of them. Same interface whether you're using GPT, Claude, or your local Ollama. Just three dependencies: Faraday, Zeitwerk, and Marcel. That's it.
22
29
 
23
- ## What makes it great
30
+ ## Show me the code
24
31
 
25
32
  ```ruby
26
33
  # Just ask questions
27
34
  chat = RubyLLM.chat
28
35
  chat.ask "What's the best way to learn Ruby?"
36
+ ```
29
37
 
30
- # Analyze images, audio, documents, and text files
38
+ ```ruby
39
+ # Analyze any file type
31
40
  chat.ask "What's in this image?", with: "ruby_conf.jpg"
32
41
  chat.ask "Describe this meeting", with: "meeting.wav"
33
42
  chat.ask "Summarize this document", with: "contract.pdf"
34
43
  chat.ask "Explain this code", with: "app.rb"
44
+ ```
35
45
 
36
- # Multiple files at once - types automatically detected
46
+ ```ruby
47
+ # Multiple files at once
37
48
  chat.ask "Analyze these files", with: ["diagram.png", "report.pdf", "notes.txt"]
49
+ ```
38
50
 
39
- # Stream responses in real-time
40
- chat.ask "Tell me a story about a Ruby programmer" do |chunk|
51
+ ```ruby
52
+ # Stream responses
53
+ chat.ask "Tell me a story about Ruby" do |chunk|
41
54
  print chunk.content
42
55
  end
56
+ ```
43
57
 
58
+ ```ruby
44
59
  # Generate images
45
60
  RubyLLM.paint "a sunset over mountains in watercolor style"
61
+ ```
46
62
 
47
- # Create vector embeddings
63
+ ```ruby
64
+ # Create embeddings
48
65
  RubyLLM.embed "Ruby is elegant and expressive"
66
+ ```
49
67
 
68
+ ```ruby
50
69
  # Let AI use your code
51
70
  class Weather < RubyLLM::Tool
52
- description "Gets current weather for a location"
53
- param :latitude, desc: "Latitude (e.g., 52.5200)"
54
- param :longitude, desc: "Longitude (e.g., 13.4050)"
71
+ description "Get current weather"
72
+ param :latitude
73
+ param :longitude
55
74
 
56
75
  def execute(latitude:, longitude:)
57
76
  url = "https://api.open-meteo.com/v1/forecast?latitude=#{latitude}&longitude=#{longitude}&current=temperature_2m,wind_speed_10m"
58
-
59
- response = Faraday.get(url)
60
- data = JSON.parse(response.body)
61
- rescue => e
62
- { error: e.message }
77
+ JSON.parse(Faraday.get(url).body)
63
78
  end
64
79
  end
65
80
 
66
- chat.with_tool(Weather).ask "What's the weather in Berlin? (52.5200, 13.4050)"
81
+ chat.with_tool(Weather).ask "What's the weather in Berlin?"
82
+ ```
67
83
 
68
- # Get structured output with JSON schemas
84
+ ```ruby
85
+ # Get structured output
69
86
  class ProductSchema < RubyLLM::Schema
70
- string :name, description: "Product name"
71
- number :price, description: "Price in USD"
72
- array :features, description: "Key features" do
73
- string description: "Feature description"
87
+ string :name
88
+ number :price
89
+ array :features do
90
+ string
74
91
  end
75
92
  end
76
93
 
77
- response = chat.with_schema(ProductSchema)
78
- .ask "Analyze this product description", with: "product.txt"
79
- # response.content => { "name" => "...", "price" => 99.99, "features" => [...] }
94
+ response = chat.with_schema(ProductSchema).ask "Analyze this product", with: "product.txt"
80
95
  ```
81
96
 
82
- ## Core Capabilities
83
-
84
- * 💬 **Unified Chat:** Converse with models from OpenAI, Anthropic, Gemini, Bedrock, OpenRouter, DeepSeek, Perplexity, Mistral, Ollama, or any OpenAI-compatible API using `RubyLLM.chat`.
85
- * 👁️ **Vision:** Analyze images within chats.
86
- * 🔊 **Audio:** Transcribe and understand audio content.
87
- * 📄 **Document Analysis:** Extract information from PDFs, text files, CSV, JSON, XML, Markdown, and code files.
88
- * 🖼️ **Image Generation:** Create images with `RubyLLM.paint`.
89
- * 📊 **Embeddings:** Generate text embeddings for vector search with `RubyLLM.embed`.
90
- * 🔧 **Tools (Function Calling):** Let AI models call your Ruby code using `RubyLLM::Tool`.
91
- * 📋 **Structured Output:** Guarantee responses conform to JSON schemas with `RubyLLM::Schema`.
92
- * 🚂 **Rails Integration:** Easily persist chats, messages, and tool calls using `acts_as_chat` and `acts_as_message`.
93
- * 🌊 **Streaming:** Process responses in real-time with idiomatic Ruby blocks.
94
- * **Async Support:** Built-in fiber-based concurrency for high-performance operations.
95
- * 🎯 **Smart Configuration:** Global and scoped configs with automatic retries and proxy support.
96
- * 📚 **Model Registry:** Access 500+ models with capability detection and pricing info.
97
+ ## Features
98
+
99
+ * **Chat:** Conversational AI with `RubyLLM.chat`
100
+ * **Vision:** Analyze images and screenshots
101
+ * **Audio:** Transcribe and understand speech
102
+ * **Documents:** Extract from PDFs, CSVs, JSON, any file type
103
+ * **Image generation:** Create images with `RubyLLM.paint`
104
+ * **Embeddings:** Vector search with `RubyLLM.embed`
105
+ * **Tools:** Let AI call your Ruby methods
106
+ * **Structured output:** JSON schemas that just work
107
+ * **Streaming:** Real-time responses with blocks
108
+ * **Rails:** ActiveRecord integration with `acts_as_chat`
109
+ * **Async:** Fiber-based concurrency
110
+ * **Model registry:** 500+ models with capability detection and pricing
111
+ * **Providers:** OpenAI, Anthropic, Gemini, Bedrock, DeepSeek, Mistral, Ollama, OpenRouter, Perplexity, GPUStack, and any OpenAI-compatible API
97
112
 
98
113
  ## Installation
99
114
 
@@ -103,69 +118,36 @@ gem 'ruby_llm'
103
118
  ```
104
119
  Then `bundle install`.
105
120
 
106
- Configure your API keys (using environment variables is recommended):
121
+ Configure your API keys:
107
122
  ```ruby
108
- # config/initializers/ruby_llm.rb or similar
123
+ # config/initializers/ruby_llm.rb
109
124
  RubyLLM.configure do |config|
110
- config.openai_api_key = ENV.fetch('OPENAI_API_KEY', nil)
111
- # Add keys ONLY for providers you intend to use
112
- # config.anthropic_api_key = ENV.fetch('ANTHROPIC_API_KEY', nil)
113
- # ... see Configuration guide for all options ...
125
+ config.openai_api_key = ENV['OPENAI_API_KEY']
114
126
  end
115
127
  ```
116
- See the [Installation Guide](https://rubyllm.com/installation) for full details.
117
128
 
118
- ## Rails Integration
119
-
120
- Add persistence to your chat models effortlessly:
129
+ ## Rails
121
130
 
122
131
  ```bash
123
- # Generate models and migrations
124
132
  rails generate ruby_llm:install
125
133
  ```
126
134
 
127
135
  ```ruby
128
- # Or add to existing models
129
136
  class Chat < ApplicationRecord
130
- acts_as_chat # Automatically saves messages & tool calls
131
- end
132
-
133
- class Message < ApplicationRecord
134
- acts_as_message
137
+ acts_as_chat
135
138
  end
136
139
 
137
- class ToolCall < ApplicationRecord
138
- acts_as_tool_call
139
- end
140
-
141
- # Now chats persist automatically
142
- chat = Chat.create!(model_id: "gpt-4.1-nano")
143
- chat.ask("What's in this file?", with: "report.pdf")
140
+ chat = Chat.create! model_id: "claude-sonnet-4"
141
+ chat.ask "What's in this file?", with: "report.pdf"
144
142
  ```
145
143
 
146
- See the [Rails Integration Guide](https://rubyllm.com/guides/rails) for details.
147
-
148
- ## Learn More
149
-
150
- Dive deeper with the official documentation:
144
+ ## Documentation
151
145
 
152
- - [Installation](https://rubyllm.com/installation)
153
- - [Configuration](https://rubyllm.com/configuration)
154
- - **Guides:**
155
- - [Getting Started](https://rubyllm.com/guides/getting-started)
156
- - [Chatting with AI Models](https://rubyllm.com/guides/chat)
157
- - [Using Tools](https://rubyllm.com/guides/tools)
158
- - [Streaming Responses](https://rubyllm.com/guides/streaming)
159
- - [Rails Integration](https://rubyllm.com/guides/rails)
160
- - [Image Generation](https://rubyllm.com/guides/image-generation)
161
- - [Embeddings](https://rubyllm.com/guides/embeddings)
162
- - [Working with Models](https://rubyllm.com/guides/models)
163
- - [Error Handling](https://rubyllm.com/guides/error-handling)
164
- - [Available Models](https://rubyllm.com/guides/available-models)
146
+ [rubyllm.com](https://rubyllm.com)
165
147
 
166
148
  ## Contributing
167
149
 
168
- We welcome contributions! Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on setup, testing, and contribution guidelines.
150
+ See [CONTRIBUTING.md](CONTRIBUTING.md).
169
151
 
170
152
  ## License
171
153
 
@@ -3,8 +3,6 @@
3
3
  module RubyLLM
4
4
  module ActiveRecord
5
5
  # Adds chat and message persistence capabilities to ActiveRecord models.
6
- # Provides a clean interface for storing chat history, message metadata,
7
- # and attachments in your database.
8
6
  module ActsAs
9
7
  extend ActiveSupport::Concern
10
8
 
@@ -75,8 +73,7 @@ module RubyLLM
75
73
  end
76
74
  end
77
75
 
78
- # Methods mixed into chat models to handle message persistence and
79
- # provide a conversation interface.
76
+ # Methods mixed into chat models.
80
77
  module ChatMethods
81
78
  extend ActiveSupport::Concern
82
79
 
@@ -222,10 +219,8 @@ module RubyLLM
222
219
  end
223
220
 
224
221
  def setup_persistence_callbacks
225
- # Only set up once per chat instance
226
222
  return @chat if @chat.instance_variable_get(:@_persistence_callbacks_setup)
227
223
 
228
- # Set up persistence callbacks (user callbacks will be chained via on_new_message/on_end_message methods)
229
224
  @chat.on_new_message { persist_new_message }
230
225
  @chat.on_end_message { |msg| persist_message_completion(msg) }
231
226
 
@@ -234,7 +229,7 @@ module RubyLLM
234
229
  end
235
230
 
236
231
  def persist_new_message
237
- @message = messages.create!(role: :assistant, content: String.new)
232
+ @message = messages.create!(role: :assistant, content: '')
238
233
  end
239
234
 
240
235
  def persist_message_completion(message)
@@ -243,7 +238,6 @@ module RubyLLM
243
238
  tool_call_id = find_tool_call_id(message.tool_call_id) if message.tool_call_id
244
239
 
245
240
  transaction do
246
- # Convert parsed JSON back to JSON string for storage
247
241
  content = message.content
248
242
  content = content.to_json if content.is_a?(Hash) || content.is_a?(Array)
249
243
 
@@ -297,7 +291,6 @@ module RubyLLM
297
291
  def convert_to_active_storage_format(source)
298
292
  return if source.blank?
299
293
 
300
- # Let RubyLLM::Attachment handle the heavy lifting
301
294
  attachment = RubyLLM::Attachment.new(source)
302
295
 
303
296
  {
@@ -311,8 +304,7 @@ module RubyLLM
311
304
  end
312
305
  end
313
306
 
314
- # Methods mixed into message models to handle serialization and
315
- # provide a clean interface to the underlying message data.
307
+ # Methods mixed into message models.
316
308
  module MessageMethods
317
309
  extend ActiveSupport::Concern
318
310
 
@@ -182,6 +182,10 @@
182
182
  "openai": "gpt-oss-120b",
183
183
  "openrouter": "openai/gpt-oss-120b"
184
184
  },
185
+ "gpt-oss-20b": {
186
+ "openai": "gpt-oss-20b",
187
+ "openrouter": "openai/gpt-oss-20b"
188
+ },
185
189
  "o1": {
186
190
  "openai": "o1",
187
191
  "openrouter": "openai/o1"
@@ -1,53 +1,35 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module RubyLLM
4
- # Manages model aliases, allowing users to reference models by simpler names
5
- # that map to specific model versions across different providers.
6
- #
7
- # Aliases are defined in aliases.json and follow the format:
8
- # {
9
- # "simple-name": {
10
- # "provider1": "specific-version-for-provider1",
11
- # "provider2": "specific-version-for-provider2"
12
- # }
13
- # }
4
+ # Manages model aliases for provider-specific versions
14
5
  class Aliases
15
6
  class << self
16
- # Resolves a model ID to its provider-specific version
17
- #
18
- # @param model_id [String] the model identifier or alias
19
- # @param provider_slug [String, Symbol, nil] optional provider to resolve for
20
- # @return [String] the resolved model ID or the original if no alias exists
21
7
  def resolve(model_id, provider = nil)
22
8
  return model_id unless aliases[model_id]
23
9
 
24
10
  if provider
25
11
  aliases[model_id][provider.to_s] || model_id
26
12
  else
27
- # Get native provider's version
28
13
  aliases[model_id].values.first || model_id
29
14
  end
30
15
  end
31
16
 
32
- # Returns the loaded aliases mapping
33
- # @return [Hash] the aliases mapping
34
17
  def aliases
35
18
  @aliases ||= load_aliases
36
19
  end
37
20
 
38
- # Loads aliases from the JSON file
39
- # @return [Hash] the loaded aliases
21
+ def aliases_file
22
+ File.expand_path('aliases.json', __dir__)
23
+ end
24
+
40
25
  def load_aliases
41
- file_path = File.expand_path('aliases.json', __dir__)
42
- if File.exist?(file_path)
43
- JSON.parse(File.read(file_path))
26
+ if File.exist?(aliases_file)
27
+ JSON.parse(File.read(aliases_file))
44
28
  else
45
29
  {}
46
30
  end
47
31
  end
48
32
 
49
- # Reloads aliases from disk
50
- # @return [Hash] the reloaded aliases
51
33
  def reload!
52
34
  @aliases = load_aliases
53
35
  end
data/lib/ruby_llm/chat.rb CHANGED
@@ -1,13 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module RubyLLM
4
- # Represents a conversation with an AI model. Handles message history,
5
- # streaming responses, and tool integration with a simple, conversational API.
6
- #
7
- # Example:
8
- # chat = RubyLLM.chat
9
- # chat.ask "What's the best way to learn Ruby?"
10
- # chat.ask "Can you elaborate on that?"
4
+ # Represents a conversation with an AI model
11
5
  class Chat
12
6
  include Enumerable
13
7
 
@@ -22,7 +16,7 @@ module RubyLLM
22
16
  @config = context&.config || RubyLLM.config
23
17
  model_id = model || @config.default_model
24
18
  with_model(model_id, provider: provider, assume_exists: assume_model_exists)
25
- @temperature = 0.7
19
+ @temperature = nil
26
20
  @messages = []
27
21
  @tools = {}
28
22
  @params = {}
@@ -51,10 +45,6 @@ module RubyLLM
51
45
  end
52
46
 
53
47
  def with_tool(tool)
54
- unless @model.supports_functions?
55
- raise UnsupportedFunctionsError, "Model #{@model.id} doesn't support function calling"
56
- end
57
-
58
48
  tool_instance = tool.is_a?(Class) ? tool.new : tool
59
49
  @tools[tool_instance.name.to_sym] = tool_instance
60
50
  self
@@ -94,11 +84,7 @@ module RubyLLM
94
84
  self
95
85
  end
96
86
 
97
- def with_schema(schema, force: false)
98
- unless force || @model.structured_output?
99
- raise UnsupportedStructuredOutputError, "Model #{@model.id} doesn't support structured output"
100
- end
101
-
87
+ def with_schema(schema)
102
88
  schema_instance = schema.is_a?(Class) ? schema.new : schema
103
89
 
104
90
  # Accept both RubyLLM::Schema instances and plain JSON schemas
@@ -149,7 +135,6 @@ module RubyLLM
149
135
 
150
136
  @on[:new_message]&.call unless block_given?
151
137
 
152
- # Parse JSON if schema was set
153
138
  if @schema && response.content.is_a?(String)
154
139
  begin
155
140
  response.content = JSON.parse(response.content)
@@ -192,7 +177,6 @@ module RubyLLM
192
177
  @on[:new_message]&.call
193
178
  end
194
179
 
195
- # Pass chunk to user's block
196
180
  block.call chunk
197
181
  end
198
182
  end
@@ -1,16 +1,8 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module RubyLLM
4
- # Global configuration for RubyLLM. Manages API keys, default models,
5
- # and provider-specific settings.
6
- #
7
- # Configure via:
8
- # RubyLLM.configure do |config|
9
- # config.openai_api_key = ENV['OPENAI_API_KEY']
10
- # config.anthropic_api_key = ENV['ANTHROPIC_API_KEY']
11
- # end
4
+ # Global configuration for RubyLLM
12
5
  class Configuration
13
- # Provider-specific configuration
14
6
  attr_accessor :openai_api_key,
15
7
  :openai_api_base,
16
8
  :openai_organization_id,
@@ -44,11 +36,9 @@ module RubyLLM
44
36
  :logger,
45
37
  :log_file,
46
38
  :log_level,
47
- :log_assume_model_exists,
48
39
  :log_stream_debug
49
40
 
50
41
  def initialize
51
- # Connection configuration
52
42
  @request_timeout = 120
53
43
  @max_retries = 3
54
44
  @retry_interval = 0.1
@@ -56,15 +46,12 @@ module RubyLLM
56
46
  @retry_interval_randomness = 0.5
57
47
  @http_proxy = nil
58
48
 
59
- # Default models
60
49
  @default_model = 'gpt-4.1-nano'
61
50
  @default_embedding_model = 'text-embedding-3-small'
62
51
  @default_image_model = 'gpt-image-1'
63
52
 
64
- # Logging configuration
65
53
  @log_file = $stdout
66
54
  @log_level = ENV['RUBYLLM_DEBUG'] ? Logger::DEBUG : Logger::INFO
67
- @log_assume_model_exists = true
68
55
  @log_stream_debug = ENV['RUBYLLM_STREAM_DEBUG'] == 'true'
69
56
  end
70
57
 
@@ -2,7 +2,6 @@
2
2
 
3
3
  module RubyLLM
4
4
  # Represents the content sent to or received from an LLM.
5
- # Selects the appropriate attachment class based on the content type.
6
5
  class Content
7
6
  attr_reader :text, :attachments
8
7
 
@@ -42,8 +41,7 @@ module RubyLLM
42
41
 
43
42
  def process_attachments(attachments)
44
43
  if attachments.is_a?(Hash)
45
- # Ignores types (like :image, :audio, :text, :pdf) since we have robust MIME type detection
46
- attachments.each_value(&method(:process_attachments_array_or_string))
44
+ attachments.each_value { |attachment| process_attachments_array_or_string(attachment) }
47
45
  else
48
46
  process_attachments_array_or_string attachments
49
47
  end
@@ -1,8 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module RubyLLM
4
- # Core embedding interface. Provides a clean way to generate embeddings
5
- # from text using various provider models.
4
+ # Core embedding interface.
6
5
  class Embedding
7
6
  attr_reader :vectors, :model, :input_tokens
8
7
 
@@ -3,13 +3,6 @@
3
3
  module RubyLLM
4
4
  # Custom error class that wraps API errors from different providers
5
5
  # into a consistent format with helpful error messages.
6
- #
7
- # Example:
8
- # begin
9
- # chat.ask "What's 2+2?"
10
- # rescue RubyLLM::Error => e
11
- # puts "Couldn't chat with AI: #{e.message}"
12
- # end
13
6
  class Error < StandardError
14
7
  attr_reader :response
15
8
 
@@ -23,9 +16,7 @@ module RubyLLM
23
16
  class ConfigurationError < StandardError; end
24
17
  class InvalidRoleError < StandardError; end
25
18
  class ModelNotFoundError < StandardError; end
26
- class UnsupportedFunctionsError < StandardError; end
27
19
  class UnsupportedAttachmentError < StandardError; end
28
- class UnsupportedStructuredOutputError < StandardError; end
29
20
 
30
21
  # Error classes for different HTTP status codes
31
22
  class BadRequestError < Error; end
@@ -38,7 +29,6 @@ module RubyLLM
38
29
  class UnauthorizedError < Error; end
39
30
 
40
31
  # Faraday middleware that maps provider-specific API errors to RubyLLM errors.
41
- # Uses provider's parse_error method to extract meaningful error messages.
42
32
  class ErrorMiddleware < Faraday::Middleware
43
33
  def initialize(app, options = {})
44
34
  super(app)
@@ -2,8 +2,6 @@
2
2
 
3
3
  module RubyLLM
4
4
  # Represents a generated image from an AI model.
5
- # Provides an interface to image generation capabilities
6
- # from providers like DALL-E and Gemini's Imagen.
7
5
  class Image
8
6
  attr_reader :url, :data, :mime_type, :revised_prompt, :model_id
9
7
 
@@ -19,7 +17,6 @@ module RubyLLM
19
17
  !@data.nil?
20
18
  end
21
19
 
22
- # Returns the raw binary image data regardless of source
23
20
  def to_blob
24
21
  if base64?
25
22
  Base64.decode64 @data
@@ -29,7 +26,6 @@ module RubyLLM
29
26
  end
30
27
  end
31
28
 
32
- # Saves the image to a file path
33
29
  def save(path)
34
30
  File.binwrite(File.expand_path(path), to_blob)
35
31
  path
@@ -1,9 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module RubyLLM
4
- # A single message in a chat conversation. Can represent user input,
5
- # AI responses, or tool interactions. Tracks token usage and handles
6
- # the complexities of tool calls and responses.
4
+ # A single message in a chat conversation.
7
5
  class Message
8
6
  ROLES = %i[system user assistant tool].freeze
9
7
 
@@ -65,7 +63,7 @@ module RubyLLM
65
63
  case content
66
64
  when String then Content.new(content)
67
65
  when Hash then Content.new(content[:text], content)
68
- else content # Pass through nil, Content, or other types
66
+ else content
69
67
  end
70
68
  end
71
69
 
@@ -3,14 +3,6 @@
3
3
  module RubyLLM
4
4
  module Model
5
5
  # Information about an AI model's capabilities, pricing, and metadata.
6
- # Used by the Models registry to help developers choose the right model
7
- # for their needs.
8
- #
9
- # Example:
10
- # model = RubyLLM.models.find('gpt-4')
11
- # model.supports_vision? # => true
12
- # model.supports_functions? # => true
13
- # model.input_price_per_million # => 30.0
14
6
  class Info
15
7
  attr_reader :id, :name, :provider, :family, :created_at, :context_window, :max_output_tokens, :knowledge_cutoff,
16
8
  :modalities, :capabilities, :pricing, :metadata
@@ -30,7 +22,6 @@ module RubyLLM
30
22
  @metadata = data[:metadata] || {}
31
23
  end
32
24
 
33
- # Capability methods
34
25
  def supports?(capability)
35
26
  capabilities.include?(capability.to_s)
36
27
  end
@@ -41,7 +32,6 @@ module RubyLLM
41
32
  end
42
33
  end
43
34
 
44
- # Backward compatibility methods
45
35
  def display_name
46
36
  name
47
37
  end
@@ -3,12 +3,10 @@
3
3
  module RubyLLM
4
4
  module Model
5
5
  # A collection that manages and provides access to different categories of pricing information
6
- # (text tokens, images, audio tokens, embeddings)
7
6
  class Pricing
8
7
  def initialize(data)
9
8
  @data = {}
10
9
 
11
- # Initialize pricing categories
12
10
  %i[text_tokens images audio_tokens embeddings].each do |category|
13
11
  @data[category] = PricingCategory.new(data[category]) if data[category] && !empty_pricing?(data[category])
14
12
  end
@@ -33,7 +31,6 @@ module RubyLLM
33
31
  private
34
32
 
35
33
  def empty_pricing?(data)
36
- # Check if all pricing values in this category are zero or nil
37
34
  return true unless data
38
35
 
39
36
  %i[standard batch].each do |tier|