ruby_llm_community 0.0.6 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (97) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +3 -3
  3. data/lib/generators/ruby_llm/install/templates/create_models_migration.rb.tt +34 -0
  4. data/lib/generators/ruby_llm/install/templates/initializer.rb.tt +5 -0
  5. data/lib/generators/ruby_llm/install/templates/model_model.rb.tt +6 -0
  6. data/lib/generators/ruby_llm/install_generator.rb +27 -2
  7. data/lib/ruby_llm/active_record/acts_as.rb +163 -24
  8. data/lib/ruby_llm/aliases.json +58 -5
  9. data/lib/ruby_llm/aliases.rb +7 -25
  10. data/lib/ruby_llm/chat.rb +10 -17
  11. data/lib/ruby_llm/configuration.rb +5 -12
  12. data/lib/ruby_llm/connection.rb +4 -4
  13. data/lib/ruby_llm/connection_multipart.rb +19 -0
  14. data/lib/ruby_llm/content.rb +5 -2
  15. data/lib/ruby_llm/embedding.rb +1 -2
  16. data/lib/ruby_llm/error.rb +0 -8
  17. data/lib/ruby_llm/image.rb +23 -8
  18. data/lib/ruby_llm/image_attachment.rb +21 -0
  19. data/lib/ruby_llm/message.rb +6 -6
  20. data/lib/ruby_llm/model/info.rb +12 -10
  21. data/lib/ruby_llm/model/pricing.rb +0 -3
  22. data/lib/ruby_llm/model/pricing_category.rb +0 -2
  23. data/lib/ruby_llm/model/pricing_tier.rb +0 -1
  24. data/lib/ruby_llm/models.json +2147 -470
  25. data/lib/ruby_llm/models.rb +65 -34
  26. data/lib/ruby_llm/provider.rb +8 -8
  27. data/lib/ruby_llm/providers/anthropic/capabilities.rb +1 -46
  28. data/lib/ruby_llm/providers/anthropic/chat.rb +2 -2
  29. data/lib/ruby_llm/providers/anthropic/media.rb +0 -1
  30. data/lib/ruby_llm/providers/anthropic/tools.rb +1 -2
  31. data/lib/ruby_llm/providers/anthropic.rb +1 -2
  32. data/lib/ruby_llm/providers/bedrock/chat.rb +2 -4
  33. data/lib/ruby_llm/providers/bedrock/media.rb +0 -1
  34. data/lib/ruby_llm/providers/bedrock/models.rb +0 -2
  35. data/lib/ruby_llm/providers/bedrock/streaming/base.rb +0 -12
  36. data/lib/ruby_llm/providers/bedrock/streaming/content_extraction.rb +0 -7
  37. data/lib/ruby_llm/providers/bedrock/streaming/message_processing.rb +0 -12
  38. data/lib/ruby_llm/providers/bedrock/streaming/payload_processing.rb +0 -12
  39. data/lib/ruby_llm/providers/bedrock/streaming/prelude_handling.rb +0 -13
  40. data/lib/ruby_llm/providers/bedrock/streaming.rb +0 -18
  41. data/lib/ruby_llm/providers/bedrock.rb +1 -2
  42. data/lib/ruby_llm/providers/deepseek/capabilities.rb +1 -2
  43. data/lib/ruby_llm/providers/deepseek/chat.rb +0 -1
  44. data/lib/ruby_llm/providers/gemini/capabilities.rb +28 -100
  45. data/lib/ruby_llm/providers/gemini/chat.rb +57 -29
  46. data/lib/ruby_llm/providers/gemini/embeddings.rb +0 -2
  47. data/lib/ruby_llm/providers/gemini/images.rb +1 -2
  48. data/lib/ruby_llm/providers/gemini/media.rb +0 -1
  49. data/lib/ruby_llm/providers/gemini/models.rb +1 -2
  50. data/lib/ruby_llm/providers/gemini/streaming.rb +15 -1
  51. data/lib/ruby_llm/providers/gemini/tools.rb +0 -5
  52. data/lib/ruby_llm/providers/gpustack/chat.rb +11 -1
  53. data/lib/ruby_llm/providers/gpustack/media.rb +45 -0
  54. data/lib/ruby_llm/providers/gpustack/models.rb +44 -9
  55. data/lib/ruby_llm/providers/gpustack.rb +1 -0
  56. data/lib/ruby_llm/providers/mistral/capabilities.rb +2 -10
  57. data/lib/ruby_llm/providers/mistral/chat.rb +0 -2
  58. data/lib/ruby_llm/providers/mistral/embeddings.rb +0 -3
  59. data/lib/ruby_llm/providers/mistral/models.rb +0 -1
  60. data/lib/ruby_llm/providers/ollama/chat.rb +0 -1
  61. data/lib/ruby_llm/providers/ollama/media.rb +1 -6
  62. data/lib/ruby_llm/providers/ollama/models.rb +36 -0
  63. data/lib/ruby_llm/providers/ollama.rb +1 -0
  64. data/lib/ruby_llm/providers/openai/capabilities.rb +3 -16
  65. data/lib/ruby_llm/providers/openai/chat.rb +1 -3
  66. data/lib/ruby_llm/providers/openai/embeddings.rb +0 -3
  67. data/lib/ruby_llm/providers/openai/images.rb +73 -3
  68. data/lib/ruby_llm/providers/openai/media.rb +0 -1
  69. data/lib/ruby_llm/providers/openai/response.rb +120 -29
  70. data/lib/ruby_llm/providers/openai/response_media.rb +2 -2
  71. data/lib/ruby_llm/providers/openai/streaming.rb +107 -47
  72. data/lib/ruby_llm/providers/openai/tools.rb +1 -1
  73. data/lib/ruby_llm/providers/openai.rb +1 -3
  74. data/lib/ruby_llm/providers/openai_base.rb +2 -2
  75. data/lib/ruby_llm/providers/openrouter/models.rb +1 -16
  76. data/lib/ruby_llm/providers/perplexity/capabilities.rb +0 -1
  77. data/lib/ruby_llm/providers/perplexity/chat.rb +0 -1
  78. data/lib/ruby_llm/providers/perplexity.rb +1 -5
  79. data/lib/ruby_llm/providers/vertexai/chat.rb +14 -0
  80. data/lib/ruby_llm/providers/vertexai/embeddings.rb +32 -0
  81. data/lib/ruby_llm/providers/vertexai/models.rb +130 -0
  82. data/lib/ruby_llm/providers/vertexai/streaming.rb +14 -0
  83. data/lib/ruby_llm/providers/vertexai.rb +55 -0
  84. data/lib/ruby_llm/railtie.rb +0 -1
  85. data/lib/ruby_llm/stream_accumulator.rb +72 -10
  86. data/lib/ruby_llm/streaming.rb +16 -25
  87. data/lib/ruby_llm/tool.rb +2 -19
  88. data/lib/ruby_llm/tool_call.rb +0 -9
  89. data/lib/ruby_llm/version.rb +1 -1
  90. data/lib/ruby_llm_community.rb +5 -3
  91. data/lib/tasks/models.rake +525 -0
  92. data/lib/tasks/release.rake +37 -2
  93. data/lib/tasks/vcr.rake +0 -7
  94. metadata +13 -4
  95. data/lib/tasks/aliases.rake +0 -235
  96. data/lib/tasks/models_docs.rake +0 -224
  97. data/lib/tasks/models_update.rake +0 -108
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: d3fa93c4ea3fddd52d6cf639bb620c99d10a0fdc01082ecab1c1e9ddb022763d
4
- data.tar.gz: 4be92604b71b94c096162a25165178344f56e22ea332815f542a87af095350f5
3
+ metadata.gz: 0d28b384f53784c1954147d9f217caff1325893f1853e6be63c60158d9c37b79
4
+ data.tar.gz: 3c6e66e260c1300b3b20ae730ec3755fcca19be8afc8dee754e26cc82cbee7f4
5
5
  SHA512:
6
- metadata.gz: 376559dd9e4c9ab5dbcb4c0e4c556d548c13fdebedaecef9edaebffb608f1af5380213bd388562d48866375b4bb63729c877b1548bfab17d07e2d3a9c550d67e
7
- data.tar.gz: c2f8a24697f9abb88509bdf9b793020360bff7175544bd62b31fb3644f276fc103ae5809dc8a8dd8098962f2f59b1decc0baa1f08482d82bb4ab40e29578c705
6
+ metadata.gz: 568f6c9a3facf5e5f721ec6e3d44862eb0988529cf72625a88220fe80f00ba89f5602f0a178d0ead3cfc3c4accca3a4b56321a3a24f84883df16bd1c72432458
7
+ data.tar.gz: e6336936468acfc005284a1e6f7cbffe58e55e85c760082af71569d9555371ed124b26fe0cc70d274124f4b80530881c012d10079145f0c90383f2d6e2c51ff7
data/README.md CHANGED
@@ -9,10 +9,10 @@
9
9
 
10
10
  Battle tested at [<picture><source media="(prefers-color-scheme: dark)" srcset="https://chatwithwork.com/logotype-dark.svg"><img src="https://chatwithwork.com/logotype.svg" alt="Chat with Work" height="30" align="absmiddle"></picture>](https://chatwithwork.com) — *Claude Code for your documents*
11
11
 
12
- [![Gem Version](https://badge.fury.io/rb/ruby_llm.svg?a=5)](https://badge.fury.io/rb/ruby_llm)
12
+ [![Gem Version](https://badge.fury.io/rb/ruby_llm.svg?a=7)](https://badge.fury.io/rb/ruby_llm)
13
13
  [![Ruby Style Guide](https://img.shields.io/badge/code_style-standard-brightgreen.svg)](https://github.com/testdouble/standard)
14
14
  [![Gem Downloads](https://img.shields.io/gem/dt/ruby_llm)](https://rubygems.org/gems/ruby_llm)
15
- [![codecov](https://codecov.io/gh/crmne/ruby_llm/branch/main/graph/badge.svg)](https://codecov.io/gh/crmne/ruby_llm)
15
+ [![codecov](https://codecov.io/gh/crmne/ruby_llm/branch/main/graph/badge.svg?a=2)](https://codecov.io/gh/crmne/ruby_llm)
16
16
 
17
17
  <a href="https://trendshift.io/repositories/13640" target="_blank"><img src="https://trendshift.io/api/badge/repositories/13640" alt="crmne%2Fruby_llm | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
18
18
  </div>
@@ -108,7 +108,7 @@ response = chat.with_schema(ProductSchema).ask "Analyze this product", with: "pr
108
108
  * **Rails:** ActiveRecord integration with `acts_as_chat`
109
109
  * **Async:** Fiber-based concurrency
110
110
  * **Model registry:** 500+ models with capability detection and pricing
111
- * **Providers:** OpenAI, Anthropic, Gemini, Bedrock, DeepSeek, Mistral, Ollama, OpenRouter, Perplexity, GPUStack, and any OpenAI-compatible API
111
+ * **Providers:** OpenAI, Anthropic, Gemini, VertexAI, Bedrock, DeepSeek, Mistral, Ollama, OpenRouter, Perplexity, GPUStack, and any OpenAI-compatible API
112
112
 
113
113
  ## Installation
114
114
 
@@ -0,0 +1,34 @@
1
+ class Create<%= options[:model_model_name].pluralize %> < ActiveRecord::Migration<%= migration_version %>
2
+ def change
3
+ create_table :<%= options[:model_model_name].tableize %> do |t|
4
+ t.string :model_id, null: false
5
+ t.string :name, null: false
6
+ t.string :provider, null: false
7
+ t.string :family
8
+ t.datetime :model_created_at
9
+ t.integer :context_window
10
+ t.integer :max_output_tokens
11
+ t.date :knowledge_cutoff
12
+ <% if postgresql? %>
13
+ t.jsonb :modalities, default: {}
14
+ t.jsonb :capabilities, default: []
15
+ t.jsonb :pricing, default: {}
16
+ t.jsonb :metadata, default: {}
17
+ <% else %>
18
+ t.json :modalities, default: {}
19
+ t.json :capabilities, default: []
20
+ t.json :pricing, default: {}
21
+ t.json :metadata, default: {}
22
+ <% end %>
23
+ t.timestamps
24
+
25
+ t.index [:provider, :model_id], unique: true
26
+ t.index :provider
27
+ t.index :family
28
+ <% if postgresql? %>
29
+ t.index :capabilities, using: :gin
30
+ t.index :modalities, using: :gin
31
+ <% end %>
32
+ end
33
+ end
34
+ end
@@ -3,4 +3,9 @@ RubyLLM.configure do |config|
3
3
  config.anthropic_api_key = ENV["ANTHROPIC_API_KEY"]
4
4
 
5
5
  # config.default_model = "gpt-4.1-nano"
6
+ <% unless options[:skip_model] %>
7
+
8
+ # Model registry persistence
9
+ config.model_registry_class = "<%= options[:model_model_name] %>"
10
+ <% end %>
6
11
  end
@@ -0,0 +1,6 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Model registry persistence
4
+ class <%= options[:model_model_name] %> < ApplicationRecord
5
+ <%= acts_as_model_declaration %>
6
+ end
@@ -18,8 +18,12 @@ module RubyLLM
18
18
  desc: 'Name of the Message model class'
19
19
  class_option :tool_call_model_name, type: :string, default: 'ToolCall',
20
20
  desc: 'Name of the ToolCall model class'
21
+ class_option :model_model_name, type: :string, default: 'Model',
22
+ desc: 'Name of the Model model class (for model registry)'
23
+ class_option :skip_model, type: :boolean, default: false,
24
+ desc: 'Skip creating Model model for model registry persistence'
21
25
 
22
- desc 'Creates model files for Chat, Message, and ToolCall, and creates migrations for RubyLLM Rails integration'
26
+ desc 'Creates models and migrations for RubyLLM Rails integration'
23
27
 
24
28
  def self.next_migration_number(dirname)
25
29
  ::ActiveRecord::Generators::Base.next_migration_number(dirname)
@@ -75,6 +79,10 @@ module RubyLLM
75
79
  end
76
80
  end
77
81
 
82
+ def acts_as_model_declaration
83
+ 'acts_as_model'
84
+ end
85
+
78
86
  def create_migration_files
79
87
  # Create migrations with timestamps to ensure proper order
80
88
  # First create chats table
@@ -86,16 +94,27 @@ module RubyLLM
86
94
  migration_template 'create_messages_migration.rb.tt',
87
95
  "db/migrate/create_#{options[:message_model_name].tableize}.rb"
88
96
 
89
- # Finally create tool_calls table (references messages)
97
+ # Then create tool_calls table (references messages)
90
98
  sleep 1 # Ensure different timestamp
91
99
  migration_template 'create_tool_calls_migration.rb.tt',
92
100
  "db/migrate/create_#{options[:tool_call_model_name].tableize}.rb"
101
+
102
+ # Finally create models table (for model registry)
103
+ return if options[:skip_model]
104
+
105
+ sleep 1 # Ensure different timestamp
106
+ migration_template 'create_models_migration.rb.tt',
107
+ "db/migrate/create_#{options[:model_model_name].tableize}.rb"
93
108
  end
94
109
 
95
110
  def create_model_files
96
111
  template 'chat_model.rb.tt', "app/models/#{options[:chat_model_name].underscore}.rb"
97
112
  template 'message_model.rb.tt', "app/models/#{options[:message_model_name].underscore}.rb"
98
113
  template 'tool_call_model.rb.tt', "app/models/#{options[:tool_call_model_name].underscore}.rb"
114
+
115
+ return if options[:skip_model]
116
+
117
+ template 'model_model.rb.tt', "app/models/#{options[:model_model_name].underscore}.rb"
99
118
  end
100
119
 
101
120
  def create_initializer
@@ -110,6 +129,12 @@ module RubyLLM
110
129
  say ' 2. Set your API keys in config/initializers/ruby_llm.rb'
111
130
  say " 3. Start chatting: #{options[:chat_model_name]}.create!(model_id: 'gpt-4.1-nano').ask('Hello!')"
112
131
 
132
+ unless options[:skip_model]
133
+ say " 4. Sync models: #{options[:model_model_name]}.sync!"
134
+ say "\n 🚀 Model registry is database-backed!", :cyan
135
+ say ' Models will automatically load from the database'
136
+ end
137
+
113
138
  say "\n 📚 Full docs: https://rubyllm.com", :cyan
114
139
 
115
140
  say "\n ❤️ Love RubyLLM?", :magenta
@@ -3,17 +3,19 @@
3
3
  module RubyLLM
4
4
  module ActiveRecord
5
5
  # Adds chat and message persistence capabilities to ActiveRecord models.
6
- # Provides a clean interface for storing chat history, message metadata,
7
- # and attachments in your database.
8
6
  module ActsAs
9
7
  extend ActiveSupport::Concern
10
8
 
11
9
  class_methods do # rubocop:disable Metrics/BlockLength
12
- def acts_as_chat(message_class: 'Message', tool_call_class: 'ToolCall')
10
+ def acts_as_chat(message_class: 'Message', tool_call_class: 'ToolCall',
11
+ model_class: 'Model', model_foreign_key: nil, model_primary_key: nil)
13
12
  include ChatMethods
14
13
 
15
14
  @message_class = message_class.to_s
16
15
  @tool_call_class = tool_call_class.to_s
16
+ @model_class = model_class.to_s
17
+ @model_foreign_key = model_foreign_key || ActiveSupport::Inflector.foreign_key(@model_class)
18
+ @model_primary_key = model_primary_key || 'model_id'
17
19
 
18
20
  has_many :messages,
19
21
  -> { order(created_at: :asc) },
@@ -21,13 +23,43 @@ module RubyLLM
21
23
  inverse_of: :chat,
22
24
  dependent: :destroy
23
25
 
26
+ # Set up model association if model registry is configured
27
+ if RubyLLM.config.model_registry_class
28
+ belongs_to :model,
29
+ class_name: @model_class,
30
+ foreign_key: @model_foreign_key,
31
+ primary_key: @model_primary_key,
32
+ optional: true
33
+ end
34
+
24
35
  delegate :add_message, to: :to_llm
25
36
  end
26
37
 
27
- def acts_as_message(chat_class: 'Chat',
38
+ def acts_as_model(chat_class: 'Chat')
39
+ include ModelMethods
40
+
41
+ @chat_class = chat_class.to_s
42
+
43
+ validates :model_id, presence: true, uniqueness: { scope: :provider }
44
+ validates :name, presence: true
45
+ validates :provider, presence: true
46
+
47
+ # Set up chat association if configured
48
+ return unless RubyLLM.config.model_registry_class
49
+
50
+ has_many :chats,
51
+ class_name: @chat_class,
52
+ foreign_key: 'model_id',
53
+ primary_key: 'model_id'
54
+ end
55
+
56
+ def acts_as_message(chat_class: 'Chat', # rubocop:disable Metrics/ParameterLists
28
57
  chat_foreign_key: nil,
29
58
  tool_call_class: 'ToolCall',
30
59
  tool_call_foreign_key: nil,
60
+ model_class: 'Model',
61
+ model_foreign_key: nil,
62
+ model_primary_key: nil,
31
63
  touch_chat: false)
32
64
  include MessageMethods
33
65
 
@@ -37,6 +69,10 @@ module RubyLLM
37
69
  @tool_call_class = tool_call_class.to_s
38
70
  @tool_call_foreign_key = tool_call_foreign_key || ActiveSupport::Inflector.foreign_key(@tool_call_class)
39
71
 
72
+ @model_class = model_class.to_s
73
+ @model_foreign_key = model_foreign_key || ActiveSupport::Inflector.foreign_key(@model_class)
74
+ @model_primary_key = model_primary_key || 'model_id'
75
+
40
76
  belongs_to :chat,
41
77
  class_name: @chat_class,
42
78
  foreign_key: @chat_foreign_key,
@@ -53,7 +89,21 @@ module RubyLLM
53
89
  optional: true,
54
90
  inverse_of: :result
55
91
 
56
- delegate :tool_call?, :tool_result?, :tool_results, to: :to_llm
92
+ has_many :tool_results,
93
+ through: :tool_calls,
94
+ source: :result,
95
+ class_name: @message_class
96
+
97
+ # Set up model association if model registry is configured
98
+ if RubyLLM.config.model_registry_class
99
+ belongs_to :model,
100
+ class_name: @model_class,
101
+ foreign_key: @model_foreign_key,
102
+ primary_key: @model_primary_key,
103
+ optional: true
104
+ end
105
+
106
+ delegate :tool_call?, :tool_result?, to: :to_llm
57
107
  end
58
108
 
59
109
  def acts_as_tool_call(message_class: 'Message', message_foreign_key: nil, result_foreign_key: nil)
@@ -75,8 +125,7 @@ module RubyLLM
75
125
  end
76
126
  end
77
127
 
78
- # Methods mixed into chat models to handle message persistence and
79
- # provide a conversation interface.
128
+ # Methods mixed into chat models.
80
129
  module ChatMethods
81
130
  extend ActiveSupport::Concern
82
131
 
@@ -85,10 +134,20 @@ module RubyLLM
85
134
  end
86
135
 
87
136
  def to_llm(context: nil)
137
+ # If we have a model association, use both model_id and provider
138
+ # Otherwise, model_id is a string that RubyLLM can resolve
139
+ if respond_to?(:model) && model
140
+ model_to_use = model.model_id
141
+ provider_to_use = model.provider.to_sym
142
+ else
143
+ model_to_use = model_id
144
+ provider_to_use = nil
145
+ end
146
+
88
147
  @chat ||= if context
89
- context.chat(model: model_id)
148
+ context.chat(model: model_to_use, provider: provider_to_use)
90
149
  else
91
- RubyLLM.chat(model: model_id)
150
+ RubyLLM.chat(model: model_to_use, provider: provider_to_use)
92
151
  end
93
152
  @chat.reset_messages!
94
153
 
@@ -211,26 +270,33 @@ module RubyLLM
211
270
  private
212
271
 
213
272
  def cleanup_failed_messages
214
- RubyLLM.logger.debug "RubyLLM: API call failed, destroying message: #{@message.id}"
273
+ RubyLLM.logger.warn "RubyLLM: API call failed, destroying message: #{@message.id}"
215
274
  @message.destroy
216
275
  end
217
276
 
218
- def cleanup_orphaned_tool_results
219
- loop do
220
- messages.reload
221
- last = messages.order(:id).last
277
+ def cleanup_orphaned_tool_results # rubocop:disable Metrics/PerceivedComplexity
278
+ messages.reload
279
+ last = messages.order(:id).last
222
280
 
223
- break unless last&.tool_call? || last&.tool_result?
281
+ return unless last&.tool_call? || last&.tool_result?
224
282
 
283
+ if last.tool_call?
225
284
  last.destroy
285
+ elsif last.tool_result?
286
+ tool_call_message = last.parent_tool_call.message
287
+ expected_results = tool_call_message.tool_calls.pluck(:id)
288
+ actual_results = tool_call_message.tool_results.pluck(:tool_call_id)
289
+
290
+ if expected_results.sort != actual_results.sort
291
+ tool_call_message.tool_results.each(&:destroy)
292
+ tool_call_message.destroy
293
+ end
226
294
  end
227
295
  end
228
296
 
229
297
  def setup_persistence_callbacks
230
- # Only set up once per chat instance
231
298
  return @chat if @chat.instance_variable_get(:@_persistence_callbacks_setup)
232
299
 
233
- # Set up persistence callbacks (user callbacks will be chained via on_new_message/on_end_message methods)
234
300
  @chat.on_new_message { persist_new_message }
235
301
  @chat.on_end_message { |msg| persist_message_completion(msg) }
236
302
 
@@ -242,15 +308,21 @@ module RubyLLM
242
308
  @message = messages.create!(role: :assistant, content: '')
243
309
  end
244
310
 
245
- def persist_message_completion(message)
311
+ def persist_message_completion(message) # rubocop:disable Metrics/PerceivedComplexity
246
312
  return unless message
247
313
 
248
314
  tool_call_id = find_tool_call_id(message.tool_call_id) if message.tool_call_id
249
315
 
250
316
  transaction do
251
- # Convert parsed JSON back to JSON string for storage
252
317
  content = message.content
253
- content = content.to_json if content.is_a?(Hash) || content.is_a?(Array)
318
+ attachments_to_persist = nil
319
+
320
+ if content.is_a?(RubyLLM::Content)
321
+ attachments_to_persist = content.attachments if content.attachments.any?
322
+ content = content.text
323
+ elsif content.is_a?(Hash) || content.is_a?(Array)
324
+ content = content.to_json
325
+ end
254
326
 
255
327
  @message.update!(
256
328
  role: message.role,
@@ -261,6 +333,8 @@ module RubyLLM
261
333
  )
262
334
  @message.write_attribute(@message.class.tool_call_foreign_key, tool_call_id) if tool_call_id
263
335
  @message.save!
336
+
337
+ persist_content(@message, attachments_to_persist) if attachments_to_persist
264
338
  persist_tool_calls(message.tool_calls) if message.tool_calls.present?
265
339
  end
266
340
  end
@@ -302,8 +376,7 @@ module RubyLLM
302
376
  def convert_to_active_storage_format(source)
303
377
  return if source.blank?
304
378
 
305
- # Let RubyLLM::Attachment handle the heavy lifting
306
- attachment = RubyLLM::Attachment.new(source)
379
+ attachment = source.is_a?(RubyLLM::Attachment) ? source : RubyLLM::Attachment.new(source)
307
380
 
308
381
  {
309
382
  io: StringIO.new(attachment.content),
@@ -316,8 +389,7 @@ module RubyLLM
316
389
  end
317
390
  end
318
391
 
319
- # Methods mixed into message models to handle serialization and
320
- # provide a clean interface to the underlying message data.
392
+ # Methods mixed into message models.
321
393
  module MessageMethods
322
394
  extend ActiveSupport::Concern
323
395
 
@@ -383,5 +455,72 @@ module RubyLLM
383
455
  tempfile
384
456
  end
385
457
  end
458
+
459
+ # Methods mixed into model registry models.
460
+ module ModelMethods
461
+ extend ActiveSupport::Concern
462
+
463
+ class_methods do # rubocop:disable Metrics/BlockLength
464
+ def refresh!
465
+ RubyLLM.models.refresh!
466
+
467
+ transaction do
468
+ RubyLLM.models.all.each do |model_info|
469
+ model = find_or_initialize_by(
470
+ model_id: model_info.id,
471
+ provider: model_info.provider
472
+ )
473
+ model.update!(from_llm_attributes(model_info))
474
+ end
475
+ end
476
+ end
477
+
478
+ def from_llm(model_info)
479
+ new(from_llm_attributes(model_info))
480
+ end
481
+
482
+ private
483
+
484
+ def from_llm_attributes(model_info)
485
+ {
486
+ model_id: model_info.id,
487
+ name: model_info.name,
488
+ provider: model_info.provider,
489
+ family: model_info.family,
490
+ model_created_at: model_info.created_at,
491
+ context_window: model_info.context_window,
492
+ max_output_tokens: model_info.max_output_tokens,
493
+ knowledge_cutoff: model_info.knowledge_cutoff,
494
+ modalities: model_info.modalities.to_h,
495
+ capabilities: model_info.capabilities,
496
+ pricing: model_info.pricing.to_h,
497
+ metadata: model_info.metadata
498
+ }
499
+ end
500
+ end
501
+
502
+ def to_llm
503
+ RubyLLM::Model::Info.new(
504
+ id: model_id,
505
+ name: name,
506
+ provider: provider,
507
+ family: family,
508
+ created_at: model_created_at,
509
+ context_window: context_window,
510
+ max_output_tokens: max_output_tokens,
511
+ knowledge_cutoff: knowledge_cutoff,
512
+ modalities: modalities&.deep_symbolize_keys || {},
513
+ capabilities: capabilities,
514
+ pricing: pricing&.deep_symbolize_keys || {},
515
+ metadata: metadata&.deep_symbolize_keys || {}
516
+ )
517
+ end
518
+
519
+ delegate :supports?, :supports_vision?, :supports_functions?, :type,
520
+ :input_price_per_million, :output_price_per_million,
521
+ :function_calling?, :structured_output?, :batch?,
522
+ :reasoning?, :citations?, :streaming?,
523
+ to: :to_llm
524
+ end
386
525
  end
387
526
  end
@@ -50,21 +50,57 @@
50
50
  "deepseek": "deepseek-chat",
51
51
  "openrouter": "deepseek/deepseek-chat"
52
52
  },
53
+ "gemini-1.5-flash": {
54
+ "gemini": "gemini-1.5-flash",
55
+ "vertexai": "gemini-1.5-flash"
56
+ },
57
+ "gemini-1.5-flash-002": {
58
+ "gemini": "gemini-1.5-flash-002",
59
+ "vertexai": "gemini-1.5-flash-002"
60
+ },
61
+ "gemini-1.5-flash-8b": {
62
+ "gemini": "gemini-1.5-flash-8b",
63
+ "vertexai": "gemini-1.5-flash-8b"
64
+ },
65
+ "gemini-1.5-pro": {
66
+ "gemini": "gemini-1.5-pro",
67
+ "vertexai": "gemini-1.5-pro"
68
+ },
69
+ "gemini-1.5-pro-002": {
70
+ "gemini": "gemini-1.5-pro-002",
71
+ "vertexai": "gemini-1.5-pro-002"
72
+ },
73
+ "gemini-2.0-flash": {
74
+ "gemini": "gemini-2.0-flash",
75
+ "vertexai": "gemini-2.0-flash"
76
+ },
53
77
  "gemini-2.0-flash-001": {
54
78
  "gemini": "gemini-2.0-flash-001",
55
- "openrouter": "google/gemini-2.0-flash-001"
79
+ "openrouter": "google/gemini-2.0-flash-001",
80
+ "vertexai": "gemini-2.0-flash-001"
81
+ },
82
+ "gemini-2.0-flash-exp": {
83
+ "gemini": "gemini-2.0-flash-exp",
84
+ "vertexai": "gemini-2.0-flash-exp"
56
85
  },
57
86
  "gemini-2.0-flash-lite-001": {
58
87
  "gemini": "gemini-2.0-flash-lite-001",
59
- "openrouter": "google/gemini-2.0-flash-lite-001"
88
+ "openrouter": "google/gemini-2.0-flash-lite-001",
89
+ "vertexai": "gemini-2.0-flash-lite-001"
60
90
  },
61
91
  "gemini-2.5-flash": {
62
92
  "gemini": "gemini-2.5-flash",
63
- "openrouter": "google/gemini-2.5-flash"
93
+ "openrouter": "google/gemini-2.5-flash",
94
+ "vertexai": "gemini-2.5-flash"
95
+ },
96
+ "gemini-2.5-flash-image-preview": {
97
+ "gemini": "gemini-2.5-flash-image-preview",
98
+ "openrouter": "google/gemini-2.5-flash-image-preview"
64
99
  },
65
100
  "gemini-2.5-flash-lite": {
66
101
  "gemini": "gemini-2.5-flash-lite",
67
- "openrouter": "google/gemini-2.5-flash-lite"
102
+ "openrouter": "google/gemini-2.5-flash-lite",
103
+ "vertexai": "gemini-2.5-flash-lite"
68
104
  },
69
105
  "gemini-2.5-flash-lite-preview-06-17": {
70
106
  "gemini": "gemini-2.5-flash-lite-preview-06-17",
@@ -72,12 +108,21 @@
72
108
  },
73
109
  "gemini-2.5-pro": {
74
110
  "gemini": "gemini-2.5-pro",
75
- "openrouter": "google/gemini-2.5-pro"
111
+ "openrouter": "google/gemini-2.5-pro",
112
+ "vertexai": "gemini-2.5-pro"
76
113
  },
77
114
  "gemini-2.5-pro-preview-05-06": {
78
115
  "gemini": "gemini-2.5-pro-preview-05-06",
79
116
  "openrouter": "google/gemini-2.5-pro-preview-05-06"
80
117
  },
118
+ "gemini-embedding-001": {
119
+ "gemini": "gemini-embedding-001",
120
+ "vertexai": "gemini-embedding-001"
121
+ },
122
+ "gemini-exp-1206": {
123
+ "gemini": "gemini-exp-1206",
124
+ "vertexai": "gemini-exp-1206"
125
+ },
81
126
  "gemma-3-12b-it": {
82
127
  "gemini": "gemma-3-12b-it",
83
128
  "openrouter": "google/gemma-3-12b-it"
@@ -150,6 +195,10 @@
150
195
  "openai": "gpt-4o-2024-11-20",
151
196
  "openrouter": "openai/gpt-4o-2024-11-20"
152
197
  },
198
+ "gpt-4o-audio-preview": {
199
+ "openai": "gpt-4o-audio-preview",
200
+ "openrouter": "openai/gpt-4o-audio-preview"
201
+ },
153
202
  "gpt-4o-mini": {
154
203
  "openai": "gpt-4o-mini",
155
204
  "openrouter": "openai/gpt-4o-mini"
@@ -217,5 +266,9 @@
217
266
  "o4-mini": {
218
267
  "openai": "o4-mini",
219
268
  "openrouter": "openai/o4-mini"
269
+ },
270
+ "text-embedding-004": {
271
+ "gemini": "text-embedding-004",
272
+ "vertexai": "text-embedding-004"
220
273
  }
221
274
  }
@@ -1,53 +1,35 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module RubyLLM
4
- # Manages model aliases, allowing users to reference models by simpler names
5
- # that map to specific model versions across different providers.
6
- #
7
- # Aliases are defined in aliases.json and follow the format:
8
- # {
9
- # "simple-name": {
10
- # "provider1": "specific-version-for-provider1",
11
- # "provider2": "specific-version-for-provider2"
12
- # }
13
- # }
4
+ # Manages model aliases for provider-specific versions
14
5
  class Aliases
15
6
  class << self
16
- # Resolves a model ID to its provider-specific version
17
- #
18
- # @param model_id [String] the model identifier or alias
19
- # @param provider_slug [String, Symbol, nil] optional provider to resolve for
20
- # @return [String] the resolved model ID or the original if no alias exists
21
7
  def resolve(model_id, provider = nil)
22
8
  return model_id unless aliases[model_id]
23
9
 
24
10
  if provider
25
11
  aliases[model_id][provider.to_s] || model_id
26
12
  else
27
- # Get native provider's version
28
13
  aliases[model_id].values.first || model_id
29
14
  end
30
15
  end
31
16
 
32
- # Returns the loaded aliases mapping
33
- # @return [Hash] the aliases mapping
34
17
  def aliases
35
18
  @aliases ||= load_aliases
36
19
  end
37
20
 
38
- # Loads aliases from the JSON file
39
- # @return [Hash] the loaded aliases
21
+ def aliases_file
22
+ File.expand_path('aliases.json', __dir__)
23
+ end
24
+
40
25
  def load_aliases
41
- file_path = File.expand_path('aliases.json', __dir__)
42
- if File.exist?(file_path)
43
- JSON.parse(File.read(file_path))
26
+ if File.exist?(aliases_file)
27
+ JSON.parse(File.read(aliases_file))
44
28
  else
45
29
  {}
46
30
  end
47
31
  end
48
32
 
49
- # Reloads aliases from disk
50
- # @return [Hash] the reloaded aliases
51
33
  def reload!
52
34
  @aliases = load_aliases
53
35
  end