ruby_llm 1.7.1 → 1.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -194,15 +194,15 @@ module RubyLLM
194
194
  end
195
195
 
196
196
  def embedding_models
197
- self.class.new(all.select { |m| m.type == 'embedding' })
197
+ self.class.new(all.select { |m| m.type == 'embedding' || m.modalities.output.include?('embeddings') })
198
198
  end
199
199
 
200
200
  def audio_models
201
- self.class.new(all.select { |m| m.type == 'audio' })
201
+ self.class.new(all.select { |m| m.type == 'audio' || m.modalities.output.include?('audio') })
202
202
  end
203
203
 
204
204
  def image_models
205
- self.class.new(all.select { |m| m.type == 'image' })
205
+ self.class.new(all.select { |m| m.type == 'image' || m.modalities.output.include?('image') })
206
206
  end
207
207
 
208
208
  def by_family(family)
@@ -217,6 +217,10 @@ module RubyLLM
217
217
  self.class.refresh!(remote_only: remote_only)
218
218
  end
219
219
 
220
+ def resolve(model_id, provider: nil, assume_exists: false, config: nil)
221
+ self.class.resolve(model_id, provider: provider, assume_exists: assume_exists, config: config)
222
+ end
223
+
220
224
  private
221
225
 
222
226
  def find_with_provider(model_id, provider)
@@ -0,0 +1,56 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ # Identify potentially harmful content in text.
5
+ # https://platform.openai.com/docs/guides/moderation
6
+ class Moderation
7
+ attr_reader :id, :model, :results
8
+
9
+ def initialize(id:, model:, results:)
10
+ @id = id
11
+ @model = model
12
+ @results = results
13
+ end
14
+
15
+ def self.moderate(input,
16
+ model: nil,
17
+ provider: nil,
18
+ assume_model_exists: false,
19
+ context: nil)
20
+ config = context&.config || RubyLLM.config
21
+ model ||= config.default_moderation_model || 'omni-moderation-latest'
22
+ model, provider_instance = Models.resolve(model, provider: provider, assume_exists: assume_model_exists,
23
+ config: config)
24
+ model_id = model.id
25
+
26
+ provider_instance.moderate(input, model: model_id)
27
+ end
28
+
29
+ # Convenience method to get content from moderation result
30
+ def content
31
+ results
32
+ end
33
+
34
+ # Check if any content was flagged
35
+ def flagged?
36
+ results.any? { |result| result['flagged'] }
37
+ end
38
+
39
+ # Get all flagged categories across all results
40
+ def flagged_categories
41
+ results.flat_map do |result|
42
+ result['categories']&.select { |_category, flagged| flagged }&.keys || []
43
+ end.uniq
44
+ end
45
+
46
+ # Get category scores for the first result (most common case)
47
+ def category_scores
48
+ results.first&.dig('category_scores') || {}
49
+ end
50
+
51
+ # Get categories for the first result (most common case)
52
+ def categories
53
+ results.first&.dig('categories') || {}
54
+ end
55
+ end
56
+ end
@@ -70,6 +70,12 @@ module RubyLLM
70
70
  parse_embedding_response(response, model:, text:)
71
71
  end
72
72
 
73
+ def moderate(input, model:)
74
+ payload = render_moderation_payload(input, model:)
75
+ response = @connection.post moderation_url, payload
76
+ parse_moderation_response(response, model:)
77
+ end
78
+
73
79
  def paint(prompt, model:, size:)
74
80
  payload = render_image_payload(prompt, model:, size:)
75
81
  response = @connection.post images_url, payload
@@ -52,6 +52,10 @@ module RubyLLM
52
52
  model_id.match?(/gemini|flash|pro|imagen/)
53
53
  end
54
54
 
55
+ def supports_video?(model_id)
56
+ model_id.match?(/gemini/)
57
+ end
58
+
55
59
  def supports_functions?(model_id)
56
60
  return false if model_id.match?(/text-embedding|embedding-001|aqa|flash-lite|imagen|gemini-2\.0-flash-lite/)
57
61
 
@@ -217,6 +221,7 @@ module RubyLLM
217
221
  modalities[:input] << 'pdf'
218
222
  end
219
223
 
224
+ modalities[:input] << 'video' if supports_video?(model_id)
220
225
  modalities[:input] << 'audio' if model_id.match?(/audio/)
221
226
  modalities[:output] << 'embeddings' if model_id.match?(/embedding|gemini-embedding/)
222
227
  modalities[:output] = ['image'] if model_id.match?(/imagen/)
@@ -0,0 +1,34 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ class OpenAI
6
+ # Moderation methods of the OpenAI API integration
7
+ module Moderation
8
+ module_function
9
+
10
+ def moderation_url
11
+ 'moderations'
12
+ end
13
+
14
+ def render_moderation_payload(input, model:)
15
+ {
16
+ model: model,
17
+ input: input
18
+ }
19
+ end
20
+
21
+ def parse_moderation_response(response, model:)
22
+ data = response.body
23
+ raise Error.new(response, data.dig('error', 'message')) if data.dig('error', 'message')
24
+
25
+ RubyLLM::Moderation.new(
26
+ id: data['id'],
27
+ model: model,
28
+ results: data['results'] || []
29
+ )
30
+ end
31
+ end
32
+ end
33
+ end
34
+ end
@@ -7,6 +7,7 @@ module RubyLLM
7
7
  include OpenAI::Chat
8
8
  include OpenAI::Embeddings
9
9
  include OpenAI::Models
10
+ include OpenAI::Moderation
10
11
  include OpenAI::Streaming
11
12
  include OpenAI::Tools
12
13
  include OpenAI::Images
@@ -5,7 +5,7 @@ module RubyLLM
5
5
  class Railtie < Rails::Railtie
6
6
  initializer 'ruby_llm.inflections' do
7
7
  ActiveSupport::Inflector.inflections(:en) do |inflect|
8
- inflect.acronym 'LLM'
8
+ inflect.acronym 'RubyLLM'
9
9
  end
10
10
  end
11
11
 
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module RubyLLM
4
- VERSION = '1.7.1'
4
+ VERSION = '1.8.0'
5
5
  end
data/lib/ruby_llm.rb CHANGED
@@ -48,6 +48,10 @@ module RubyLLM
48
48
  Embedding.embed(...)
49
49
  end
50
50
 
51
+ def moderate(...)
52
+ Moderation.moderate(...)
53
+ end
54
+
51
55
  def paint(...)
52
56
  Image.paint(...)
53
57
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby_llm
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.7.1
4
+ version: 1.8.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Carmine Paolino
@@ -154,6 +154,7 @@ files:
154
154
  - lib/generators/ruby_llm/chat_ui/templates/views/models/show.html.erb.tt
155
155
  - lib/generators/ruby_llm/generator_helpers.rb
156
156
  - lib/generators/ruby_llm/install/install_generator.rb
157
+ - lib/generators/ruby_llm/install/templates/add_references_to_chats_tool_calls_and_messages_migration.rb.tt
157
158
  - lib/generators/ruby_llm/install/templates/chat_model.rb.tt
158
159
  - lib/generators/ruby_llm/install/templates/create_chats_migration.rb.tt
159
160
  - lib/generators/ruby_llm/install/templates/create_messages_migration.rb.tt
@@ -194,6 +195,7 @@ files:
194
195
  - lib/ruby_llm/models.json
195
196
  - lib/ruby_llm/models.rb
196
197
  - lib/ruby_llm/models_schema.json
198
+ - lib/ruby_llm/moderation.rb
197
199
  - lib/ruby_llm/provider.rb
198
200
  - lib/ruby_llm/providers/anthropic.rb
199
201
  - lib/ruby_llm/providers/anthropic/capabilities.rb
@@ -247,6 +249,7 @@ files:
247
249
  - lib/ruby_llm/providers/openai/images.rb
248
250
  - lib/ruby_llm/providers/openai/media.rb
249
251
  - lib/ruby_llm/providers/openai/models.rb
252
+ - lib/ruby_llm/providers/openai/moderation.rb
250
253
  - lib/ruby_llm/providers/openai/streaming.rb
251
254
  - lib/ruby_llm/providers/openai/tools.rb
252
255
  - lib/ruby_llm/providers/openrouter.rb