ruby_llm_community 1.1.1 → 1.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +16 -3
- data/lib/generators/ruby_llm/install/install_generator.rb +8 -2
- data/lib/generators/ruby_llm/install/templates/add_references_to_chats_tool_calls_and_messages_migration.rb.tt +9 -0
- data/lib/generators/ruby_llm/install/templates/create_chats_migration.rb.tt +0 -1
- data/lib/generators/ruby_llm/install/templates/create_messages_migration.rb.tt +0 -3
- data/lib/generators/ruby_llm/install/templates/create_tool_calls_migration.rb.tt +0 -1
- data/lib/ruby_llm/configuration.rb +4 -0
- data/lib/ruby_llm/models.json +780 -511
- data/lib/ruby_llm/models.rb +7 -3
- data/lib/ruby_llm/moderation.rb +56 -0
- data/lib/ruby_llm/provider.rb +6 -0
- data/lib/ruby_llm/providers/openai/moderation.rb +34 -0
- data/lib/ruby_llm/providers/openai_base.rb +1 -0
- data/lib/ruby_llm/providers/red_candle/capabilities.rb +124 -0
- data/lib/ruby_llm/providers/red_candle/chat.rb +317 -0
- data/lib/ruby_llm/providers/red_candle/models.rb +121 -0
- data/lib/ruby_llm/providers/red_candle/streaming.rb +40 -0
- data/lib/ruby_llm/providers/red_candle.rb +90 -0
- data/lib/ruby_llm/railtie.rb +1 -1
- data/lib/ruby_llm/version.rb +1 -1
- data/lib/ruby_llm_community.rb +32 -0
- metadata +9 -1
@@ -0,0 +1,40 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
class RedCandle
|
6
|
+
# Streaming methods of the RedCandle integration
|
7
|
+
module Streaming
|
8
|
+
def stream(payload, &block)
|
9
|
+
if payload[:stream]
|
10
|
+
perform_streaming_completion!(payload, &block)
|
11
|
+
else
|
12
|
+
# Non-streaming fallback
|
13
|
+
result = perform_completion!(payload)
|
14
|
+
# Yield the complete result as a single chunk
|
15
|
+
chunk = {
|
16
|
+
content: result[:content],
|
17
|
+
role: result[:role],
|
18
|
+
finish_reason: result[:finish_reason]
|
19
|
+
}
|
20
|
+
block.call(chunk)
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
private
|
25
|
+
|
26
|
+
def stream_processor
|
27
|
+
# Red Candle handles streaming internally through blocks
|
28
|
+
# This method is here for compatibility with the base streaming interface
|
29
|
+
nil
|
30
|
+
end
|
31
|
+
|
32
|
+
def process_stream_response(response)
|
33
|
+
# Red Candle doesn't use HTTP responses
|
34
|
+
# Streaming is handled directly in perform_streaming_completion!
|
35
|
+
response
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
@@ -0,0 +1,90 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
# Red Candle provider for local LLM execution using the Candle Rust crate.
|
6
|
+
class RedCandle < Provider
|
7
|
+
include RedCandle::Chat
|
8
|
+
include RedCandle::Models
|
9
|
+
include RedCandle::Capabilities
|
10
|
+
include RedCandle::Streaming
|
11
|
+
|
12
|
+
def initialize(config)
|
13
|
+
ensure_red_candle_available!
|
14
|
+
super
|
15
|
+
@loaded_models = {} # Cache for loaded models
|
16
|
+
@device = determine_device(config)
|
17
|
+
end
|
18
|
+
|
19
|
+
def api_base
|
20
|
+
nil # Local execution, no API base needed
|
21
|
+
end
|
22
|
+
|
23
|
+
def headers
|
24
|
+
{} # No HTTP headers needed
|
25
|
+
end
|
26
|
+
|
27
|
+
class << self
|
28
|
+
def capabilities
|
29
|
+
RedCandle::Capabilities
|
30
|
+
end
|
31
|
+
|
32
|
+
def configuration_requirements
|
33
|
+
[] # No required config, device is optional
|
34
|
+
end
|
35
|
+
|
36
|
+
def local?
|
37
|
+
true
|
38
|
+
end
|
39
|
+
|
40
|
+
def supports_functions?(model_id = nil)
|
41
|
+
RedCandle::Capabilities.supports_functions?(model_id)
|
42
|
+
end
|
43
|
+
|
44
|
+
def models
|
45
|
+
# Return Red Candle models for registration
|
46
|
+
RedCandle::Models::SUPPORTED_MODELS.map do |model_data|
|
47
|
+
Model::Info.new(
|
48
|
+
id: model_data[:id],
|
49
|
+
name: model_data[:name],
|
50
|
+
provider: 'red_candle',
|
51
|
+
type: 'chat',
|
52
|
+
family: model_data[:family],
|
53
|
+
context_window: model_data[:context_window],
|
54
|
+
capabilities: %w[streaming structured_output],
|
55
|
+
modalities: { input: %w[text], output: %w[text] }
|
56
|
+
)
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
private
|
62
|
+
|
63
|
+
def ensure_red_candle_available!
|
64
|
+
require 'candle'
|
65
|
+
rescue LoadError
|
66
|
+
raise Error.new(nil, "Red Candle gem is not installed. Add 'gem \"red-candle\", \"~> 1.2.3\"' to your Gemfile.")
|
67
|
+
end
|
68
|
+
|
69
|
+
def determine_device(config)
|
70
|
+
if config.red_candle_device
|
71
|
+
case config.red_candle_device.to_s.downcase
|
72
|
+
when 'cpu'
|
73
|
+
::Candle::Device.cpu
|
74
|
+
when 'cuda', 'gpu'
|
75
|
+
::Candle::Device.cuda
|
76
|
+
when 'metal'
|
77
|
+
::Candle::Device.metal
|
78
|
+
else
|
79
|
+
::Candle::Device.best
|
80
|
+
end
|
81
|
+
else
|
82
|
+
::Candle::Device.best
|
83
|
+
end
|
84
|
+
rescue StandardError => e
|
85
|
+
RubyLLM.logger.warn "Failed to initialize device: #{e.message}. Falling back to CPU."
|
86
|
+
::Candle::Device.cpu
|
87
|
+
end
|
88
|
+
end
|
89
|
+
end
|
90
|
+
end
|
data/lib/ruby_llm/railtie.rb
CHANGED
data/lib/ruby_llm/version.rb
CHANGED
data/lib/ruby_llm_community.rb
CHANGED
@@ -58,6 +58,10 @@ module RubyLLM
|
|
58
58
|
Embedding.embed(...)
|
59
59
|
end
|
60
60
|
|
61
|
+
def moderate(...)
|
62
|
+
Moderation.moderate(...)
|
63
|
+
end
|
64
|
+
|
61
65
|
def paint(...)
|
62
66
|
Image.paint(...)
|
63
67
|
end
|
@@ -101,6 +105,34 @@ RubyLLM::Provider.register :perplexity, RubyLLM::Providers::Perplexity
|
|
101
105
|
RubyLLM::Provider.register :vertexai, RubyLLM::Providers::VertexAI
|
102
106
|
RubyLLM::Provider.register :xai, RubyLLM::Providers::XAI
|
103
107
|
|
108
|
+
# Optional Red Candle provider - only available if gem is installed
|
109
|
+
begin
|
110
|
+
require 'candle'
|
111
|
+
require 'ruby_llm/providers/red_candle'
|
112
|
+
RubyLLM::Provider.register :red_candle, RubyLLM::Providers::RedCandle
|
113
|
+
|
114
|
+
# Register Red Candle models with the global registry
|
115
|
+
RubyLLM::Providers::RedCandle.models.each do |model|
|
116
|
+
RubyLLM.models.instance_variable_get(:@models) << model
|
117
|
+
end
|
118
|
+
rescue LoadError
|
119
|
+
# Red Candle is optional - provider won't be available if gem isn't installed
|
120
|
+
end
|
121
|
+
|
122
|
+
# Optional Red Candle provider - only available if gem is installed
|
123
|
+
begin
|
124
|
+
require 'candle'
|
125
|
+
require 'ruby_llm/providers/red_candle'
|
126
|
+
RubyLLM::Provider.register :red_candle, RubyLLM::Providers::RedCandle
|
127
|
+
|
128
|
+
# Register Red Candle models with the global registry
|
129
|
+
RubyLLM::Providers::RedCandle.models.each do |model|
|
130
|
+
RubyLLM.models.instance_variable_get(:@models) << model
|
131
|
+
end
|
132
|
+
rescue LoadError
|
133
|
+
# Red Candle is optional - provider won't be available if gem isn't installed
|
134
|
+
end
|
135
|
+
|
104
136
|
if defined?(Rails::Railtie)
|
105
137
|
require 'ruby_llm/railtie'
|
106
138
|
require 'ruby_llm/active_record/acts_as'
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: ruby_llm_community
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.
|
4
|
+
version: 1.2.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Paul Shippy
|
@@ -154,6 +154,7 @@ files:
|
|
154
154
|
- lib/generators/ruby_llm/chat_ui/templates/views/models/show.html.erb.tt
|
155
155
|
- lib/generators/ruby_llm/generator_helpers.rb
|
156
156
|
- lib/generators/ruby_llm/install/install_generator.rb
|
157
|
+
- lib/generators/ruby_llm/install/templates/add_references_to_chats_tool_calls_and_messages_migration.rb.tt
|
157
158
|
- lib/generators/ruby_llm/install/templates/chat_model.rb.tt
|
158
159
|
- lib/generators/ruby_llm/install/templates/create_chats_migration.rb.tt
|
159
160
|
- lib/generators/ruby_llm/install/templates/create_messages_migration.rb.tt
|
@@ -195,6 +196,7 @@ files:
|
|
195
196
|
- lib/ruby_llm/models.json
|
196
197
|
- lib/ruby_llm/models.rb
|
197
198
|
- lib/ruby_llm/models_schema.json
|
199
|
+
- lib/ruby_llm/moderation.rb
|
198
200
|
- lib/ruby_llm/provider.rb
|
199
201
|
- lib/ruby_llm/providers/anthropic.rb
|
200
202
|
- lib/ruby_llm/providers/anthropic/capabilities.rb
|
@@ -248,6 +250,7 @@ files:
|
|
248
250
|
- lib/ruby_llm/providers/openai/images.rb
|
249
251
|
- lib/ruby_llm/providers/openai/media.rb
|
250
252
|
- lib/ruby_llm/providers/openai/models.rb
|
253
|
+
- lib/ruby_llm/providers/openai/moderation.rb
|
251
254
|
- lib/ruby_llm/providers/openai/response.rb
|
252
255
|
- lib/ruby_llm/providers/openai/response_media.rb
|
253
256
|
- lib/ruby_llm/providers/openai/streaming.rb
|
@@ -259,6 +262,11 @@ files:
|
|
259
262
|
- lib/ruby_llm/providers/perplexity/capabilities.rb
|
260
263
|
- lib/ruby_llm/providers/perplexity/chat.rb
|
261
264
|
- lib/ruby_llm/providers/perplexity/models.rb
|
265
|
+
- lib/ruby_llm/providers/red_candle.rb
|
266
|
+
- lib/ruby_llm/providers/red_candle/capabilities.rb
|
267
|
+
- lib/ruby_llm/providers/red_candle/chat.rb
|
268
|
+
- lib/ruby_llm/providers/red_candle/models.rb
|
269
|
+
- lib/ruby_llm/providers/red_candle/streaming.rb
|
262
270
|
- lib/ruby_llm/providers/vertexai.rb
|
263
271
|
- lib/ruby_llm/providers/vertexai/chat.rb
|
264
272
|
- lib/ruby_llm/providers/vertexai/embeddings.rb
|