ruby_llm_community 1.1.0 → 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +20 -5
  3. data/lib/generators/ruby_llm/generator_helpers.rb +129 -0
  4. data/lib/generators/ruby_llm/install/install_generator.rb +12 -129
  5. data/lib/generators/ruby_llm/install/templates/add_references_to_chats_tool_calls_and_messages_migration.rb.tt +9 -0
  6. data/lib/generators/ruby_llm/install/templates/create_chats_migration.rb.tt +0 -1
  7. data/lib/generators/ruby_llm/install/templates/create_messages_migration.rb.tt +0 -3
  8. data/lib/generators/ruby_llm/install/templates/create_models_migration.rb.tt +1 -4
  9. data/lib/generators/ruby_llm/install/templates/create_tool_calls_migration.rb.tt +0 -1
  10. data/lib/generators/ruby_llm/upgrade_to_v1_7/templates/migration.rb.tt +8 -0
  11. data/lib/generators/ruby_llm/upgrade_to_v1_7/upgrade_to_v1_7_generator.rb +47 -96
  12. data/lib/ruby_llm/attachment.rb +5 -0
  13. data/lib/ruby_llm/configuration.rb +4 -0
  14. data/lib/ruby_llm/mime_type.rb +4 -0
  15. data/lib/ruby_llm/model/info.rb +4 -0
  16. data/lib/ruby_llm/models.json +780 -511
  17. data/lib/ruby_llm/models.rb +7 -3
  18. data/lib/ruby_llm/moderation.rb +56 -0
  19. data/lib/ruby_llm/provider.rb +6 -0
  20. data/lib/ruby_llm/providers/gemini/capabilities.rb +5 -0
  21. data/lib/ruby_llm/providers/openai/moderation.rb +34 -0
  22. data/lib/ruby_llm/providers/openai_base.rb +1 -0
  23. data/lib/ruby_llm/providers/red_candle/capabilities.rb +124 -0
  24. data/lib/ruby_llm/providers/red_candle/chat.rb +317 -0
  25. data/lib/ruby_llm/providers/red_candle/models.rb +121 -0
  26. data/lib/ruby_llm/providers/red_candle/streaming.rb +40 -0
  27. data/lib/ruby_llm/providers/red_candle.rb +90 -0
  28. data/lib/ruby_llm/railtie.rb +1 -1
  29. data/lib/ruby_llm/version.rb +1 -1
  30. data/lib/ruby_llm_community.rb +32 -0
  31. metadata +10 -1
@@ -0,0 +1,121 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ class RedCandle
6
+ # Models methods of the RedCandle integration
7
+ module Models
8
+ # TODO: red-candle supports more models, but let's start with some well tested ones.
9
+ SUPPORTED_MODELS = [
10
+ {
11
+ id: 'google/gemma-3-4b-it-qat-q4_0-gguf',
12
+ name: 'Gemma 3 4B Instruct (Quantized)',
13
+ gguf_file: 'gemma-3-4b-it-q4_0.gguf',
14
+ tokenizer: 'google/gemma-3-4b-it', # Tokenizer from base model
15
+ context_window: 8192,
16
+ family: 'gemma',
17
+ architecture: 'gemma2',
18
+ supports_chat: true,
19
+ supports_structured: true
20
+ },
21
+ {
22
+ id: 'TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF',
23
+ name: 'TinyLlama 1.1B Chat (Quantized)',
24
+ gguf_file: 'tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf',
25
+ context_window: 2048,
26
+ family: 'llama',
27
+ architecture: 'llama',
28
+ supports_chat: true,
29
+ supports_structured: true
30
+ },
31
+ {
32
+ id: 'TheBloke/Mistral-7B-Instruct-v0.2-GGUF',
33
+ name: 'Mistral 7B Instruct v0.2 (Quantized)',
34
+ gguf_file: 'mistral-7b-instruct-v0.2.Q4_K_M.gguf',
35
+ tokenizer: 'mistralai/Mistral-7B-Instruct-v0.2',
36
+ context_window: 32_768,
37
+ family: 'mistral',
38
+ architecture: 'mistral',
39
+ supports_chat: true,
40
+ supports_structured: true
41
+ },
42
+ {
43
+ id: 'Qwen/Qwen2.5-1.5B-Instruct-GGUF',
44
+ name: 'Qwen 2.1.5B Instruct (Quantized)',
45
+ gguf_file: 'qwen2.5-1.5b-instruct-q4_k_m.gguf',
46
+ context_window: 32_768,
47
+ family: 'qwen2',
48
+ architecture: 'qwen2',
49
+ supports_chat: true,
50
+ supports_structured: true
51
+ },
52
+ {
53
+ id: 'microsoft/Phi-3-mini-4k-instruct',
54
+ name: 'Phi 3',
55
+ context_window: 4096,
56
+ family: 'phi',
57
+ architecture: 'phi',
58
+ supports_chat: true,
59
+ supports_structured: true
60
+ }
61
+ ].freeze
62
+
63
+ def list_models
64
+ SUPPORTED_MODELS.map do |model_data|
65
+ Model::Info.new(
66
+ id: model_data[:id],
67
+ name: model_data[:name],
68
+ provider: slug,
69
+ family: model_data[:family],
70
+ context_window: model_data[:context_window],
71
+ capabilities: %w[streaming structured_output],
72
+ modalities: { input: %w[text], output: %w[text] }
73
+ )
74
+ end
75
+ end
76
+
77
+ def models
78
+ @models ||= list_models
79
+ end
80
+
81
+ def model(id)
82
+ models.find { |m| m.id == id } ||
83
+ raise(Error.new(nil,
84
+ "Model #{id} not found in Red Candle provider. Available models: #{model_ids.join(', ')}"))
85
+ end
86
+
87
+ def model_available?(id)
88
+ SUPPORTED_MODELS.any? { |m| m[:id] == id }
89
+ end
90
+
91
+ def model_ids
92
+ SUPPORTED_MODELS.map { |m| m[:id] }
93
+ end
94
+
95
+ def model_info(id)
96
+ SUPPORTED_MODELS.find { |m| m[:id] == id }
97
+ end
98
+
99
+ def supports_chat?(model_id)
100
+ info = model_info(model_id)
101
+ info ? info[:supports_chat] : false
102
+ end
103
+
104
+ def supports_structured?(model_id)
105
+ info = model_info(model_id)
106
+ info ? info[:supports_structured] : false
107
+ end
108
+
109
+ def gguf_file_for(model_id)
110
+ info = model_info(model_id)
111
+ info ? info[:gguf_file] : nil
112
+ end
113
+
114
+ def tokenizer_for(model_id)
115
+ info = model_info(model_id)
116
+ info ? info[:tokenizer] : nil
117
+ end
118
+ end
119
+ end
120
+ end
121
+ end
@@ -0,0 +1,40 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ class RedCandle
6
+ # Streaming methods of the RedCandle integration
7
+ module Streaming
8
+ def stream(payload, &block)
9
+ if payload[:stream]
10
+ perform_streaming_completion!(payload, &block)
11
+ else
12
+ # Non-streaming fallback
13
+ result = perform_completion!(payload)
14
+ # Yield the complete result as a single chunk
15
+ chunk = {
16
+ content: result[:content],
17
+ role: result[:role],
18
+ finish_reason: result[:finish_reason]
19
+ }
20
+ block.call(chunk)
21
+ end
22
+ end
23
+
24
+ private
25
+
26
+ def stream_processor
27
+ # Red Candle handles streaming internally through blocks
28
+ # This method is here for compatibility with the base streaming interface
29
+ nil
30
+ end
31
+
32
+ def process_stream_response(response)
33
+ # Red Candle doesn't use HTTP responses
34
+ # Streaming is handled directly in perform_streaming_completion!
35
+ response
36
+ end
37
+ end
38
+ end
39
+ end
40
+ end
@@ -0,0 +1,90 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ # Red Candle provider for local LLM execution using the Candle Rust crate.
6
+ class RedCandle < Provider
7
+ include RedCandle::Chat
8
+ include RedCandle::Models
9
+ include RedCandle::Capabilities
10
+ include RedCandle::Streaming
11
+
12
+ def initialize(config)
13
+ ensure_red_candle_available!
14
+ super
15
+ @loaded_models = {} # Cache for loaded models
16
+ @device = determine_device(config)
17
+ end
18
+
19
+ def api_base
20
+ nil # Local execution, no API base needed
21
+ end
22
+
23
+ def headers
24
+ {} # No HTTP headers needed
25
+ end
26
+
27
+ class << self
28
+ def capabilities
29
+ RedCandle::Capabilities
30
+ end
31
+
32
+ def configuration_requirements
33
+ [] # No required config, device is optional
34
+ end
35
+
36
+ def local?
37
+ true
38
+ end
39
+
40
+ def supports_functions?(model_id = nil)
41
+ RedCandle::Capabilities.supports_functions?(model_id)
42
+ end
43
+
44
+ def models
45
+ # Return Red Candle models for registration
46
+ RedCandle::Models::SUPPORTED_MODELS.map do |model_data|
47
+ Model::Info.new(
48
+ id: model_data[:id],
49
+ name: model_data[:name],
50
+ provider: 'red_candle',
51
+ type: 'chat',
52
+ family: model_data[:family],
53
+ context_window: model_data[:context_window],
54
+ capabilities: %w[streaming structured_output],
55
+ modalities: { input: %w[text], output: %w[text] }
56
+ )
57
+ end
58
+ end
59
+ end
60
+
61
+ private
62
+
63
+ def ensure_red_candle_available!
64
+ require 'candle'
65
+ rescue LoadError
66
+ raise Error.new(nil, "Red Candle gem is not installed. Add 'gem \"red-candle\", \"~> 1.2.3\"' to your Gemfile.")
67
+ end
68
+
69
+ def determine_device(config)
70
+ if config.red_candle_device
71
+ case config.red_candle_device.to_s.downcase
72
+ when 'cpu'
73
+ ::Candle::Device.cpu
74
+ when 'cuda', 'gpu'
75
+ ::Candle::Device.cuda
76
+ when 'metal'
77
+ ::Candle::Device.metal
78
+ else
79
+ ::Candle::Device.best
80
+ end
81
+ else
82
+ ::Candle::Device.best
83
+ end
84
+ rescue StandardError => e
85
+ RubyLLM.logger.warn "Failed to initialize device: #{e.message}. Falling back to CPU."
86
+ ::Candle::Device.cpu
87
+ end
88
+ end
89
+ end
90
+ end
@@ -5,7 +5,7 @@ module RubyLLM
5
5
  class Railtie < Rails::Railtie
6
6
  initializer 'ruby_llm.inflections' do
7
7
  ActiveSupport::Inflector.inflections(:en) do |inflect|
8
- inflect.acronym 'LLM'
8
+ inflect.acronym 'RubyLLM'
9
9
  end
10
10
  end
11
11
 
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module RubyLLM
4
- VERSION = '1.1.0'
4
+ VERSION = '1.2.0'
5
5
  end
@@ -58,6 +58,10 @@ module RubyLLM
58
58
  Embedding.embed(...)
59
59
  end
60
60
 
61
+ def moderate(...)
62
+ Moderation.moderate(...)
63
+ end
64
+
61
65
  def paint(...)
62
66
  Image.paint(...)
63
67
  end
@@ -101,6 +105,34 @@ RubyLLM::Provider.register :perplexity, RubyLLM::Providers::Perplexity
101
105
  RubyLLM::Provider.register :vertexai, RubyLLM::Providers::VertexAI
102
106
  RubyLLM::Provider.register :xai, RubyLLM::Providers::XAI
103
107
 
108
+ # Optional Red Candle provider - only available if gem is installed
109
+ begin
110
+ require 'candle'
111
+ require 'ruby_llm/providers/red_candle'
112
+ RubyLLM::Provider.register :red_candle, RubyLLM::Providers::RedCandle
113
+
114
+ # Register Red Candle models with the global registry
115
+ RubyLLM::Providers::RedCandle.models.each do |model|
116
+ RubyLLM.models.instance_variable_get(:@models) << model
117
+ end
118
+ rescue LoadError
119
+ # Red Candle is optional - provider won't be available if gem isn't installed
120
+ end
121
+
122
+ # Optional Red Candle provider - only available if gem is installed
123
+ begin
124
+ require 'candle'
125
+ require 'ruby_llm/providers/red_candle'
126
+ RubyLLM::Provider.register :red_candle, RubyLLM::Providers::RedCandle
127
+
128
+ # Register Red Candle models with the global registry
129
+ RubyLLM::Providers::RedCandle.models.each do |model|
130
+ RubyLLM.models.instance_variable_get(:@models) << model
131
+ end
132
+ rescue LoadError
133
+ # Red Candle is optional - provider won't be available if gem isn't installed
134
+ end
135
+
104
136
  if defined?(Rails::Railtie)
105
137
  require 'ruby_llm/railtie'
106
138
  require 'ruby_llm/active_record/acts_as'
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby_llm_community
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.1.0
4
+ version: 1.2.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Paul Shippy
@@ -152,7 +152,9 @@ files:
152
152
  - lib/generators/ruby_llm/chat_ui/templates/views/models/_model.html.erb.tt
153
153
  - lib/generators/ruby_llm/chat_ui/templates/views/models/index.html.erb.tt
154
154
  - lib/generators/ruby_llm/chat_ui/templates/views/models/show.html.erb.tt
155
+ - lib/generators/ruby_llm/generator_helpers.rb
155
156
  - lib/generators/ruby_llm/install/install_generator.rb
157
+ - lib/generators/ruby_llm/install/templates/add_references_to_chats_tool_calls_and_messages_migration.rb.tt
156
158
  - lib/generators/ruby_llm/install/templates/chat_model.rb.tt
157
159
  - lib/generators/ruby_llm/install/templates/create_chats_migration.rb.tt
158
160
  - lib/generators/ruby_llm/install/templates/create_messages_migration.rb.tt
@@ -194,6 +196,7 @@ files:
194
196
  - lib/ruby_llm/models.json
195
197
  - lib/ruby_llm/models.rb
196
198
  - lib/ruby_llm/models_schema.json
199
+ - lib/ruby_llm/moderation.rb
197
200
  - lib/ruby_llm/provider.rb
198
201
  - lib/ruby_llm/providers/anthropic.rb
199
202
  - lib/ruby_llm/providers/anthropic/capabilities.rb
@@ -247,6 +250,7 @@ files:
247
250
  - lib/ruby_llm/providers/openai/images.rb
248
251
  - lib/ruby_llm/providers/openai/media.rb
249
252
  - lib/ruby_llm/providers/openai/models.rb
253
+ - lib/ruby_llm/providers/openai/moderation.rb
250
254
  - lib/ruby_llm/providers/openai/response.rb
251
255
  - lib/ruby_llm/providers/openai/response_media.rb
252
256
  - lib/ruby_llm/providers/openai/streaming.rb
@@ -258,6 +262,11 @@ files:
258
262
  - lib/ruby_llm/providers/perplexity/capabilities.rb
259
263
  - lib/ruby_llm/providers/perplexity/chat.rb
260
264
  - lib/ruby_llm/providers/perplexity/models.rb
265
+ - lib/ruby_llm/providers/red_candle.rb
266
+ - lib/ruby_llm/providers/red_candle/capabilities.rb
267
+ - lib/ruby_llm/providers/red_candle/chat.rb
268
+ - lib/ruby_llm/providers/red_candle/models.rb
269
+ - lib/ruby_llm/providers/red_candle/streaming.rb
261
270
  - lib/ruby_llm/providers/vertexai.rb
262
271
  - lib/ruby_llm/providers/vertexai/chat.rb
263
272
  - lib/ruby_llm/providers/vertexai/embeddings.rb