ruby_llm 1.3.2beta1 → 1.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +39 -18
- data/lib/generators/ruby_llm/install/templates/INSTALL_INFO.md.tt +108 -0
- data/lib/generators/ruby_llm/install/templates/chat_model.rb.tt +3 -0
- data/lib/generators/ruby_llm/install/templates/create_chats_migration.rb.tt +8 -0
- data/lib/generators/ruby_llm/install/templates/create_messages_migration.rb.tt +15 -0
- data/lib/generators/ruby_llm/install/templates/create_tool_calls_migration.rb.tt +14 -0
- data/lib/generators/ruby_llm/install/templates/initializer.rb.tt +6 -0
- data/lib/generators/ruby_llm/install/templates/message_model.rb.tt +3 -0
- data/lib/generators/ruby_llm/install/templates/tool_call_model.rb.tt +3 -0
- data/lib/generators/ruby_llm/install_generator.rb +121 -0
- data/lib/ruby_llm/active_record/acts_as.rb +23 -5
- data/lib/ruby_llm/aliases.json +6 -21
- data/lib/ruby_llm/chat.rb +46 -3
- data/lib/ruby_llm/configuration.rb +4 -0
- data/lib/ruby_llm/error.rb +1 -0
- data/lib/ruby_llm/message.rb +3 -1
- data/lib/ruby_llm/models.json +8461 -6427
- data/lib/ruby_llm/provider.rb +12 -6
- data/lib/ruby_llm/providers/anthropic/chat.rb +13 -12
- data/lib/ruby_llm/providers/anthropic/media.rb +2 -0
- data/lib/ruby_llm/providers/anthropic/tools.rb +23 -13
- data/lib/ruby_llm/providers/bedrock/chat.rb +4 -5
- data/lib/ruby_llm/providers/bedrock/media.rb +2 -0
- data/lib/ruby_llm/providers/bedrock/streaming/base.rb +2 -2
- data/lib/ruby_llm/providers/gemini/chat.rb +37 -2
- data/lib/ruby_llm/providers/gemini/media.rb +2 -0
- data/lib/ruby_llm/providers/gpustack/chat.rb +17 -0
- data/lib/ruby_llm/providers/gpustack/models.rb +55 -0
- data/lib/ruby_llm/providers/gpustack.rb +36 -0
- data/lib/ruby_llm/providers/mistral/capabilities.rb +151 -0
- data/lib/ruby_llm/providers/mistral/chat.rb +26 -0
- data/lib/ruby_llm/providers/mistral/embeddings.rb +36 -0
- data/lib/ruby_llm/providers/mistral/models.rb +49 -0
- data/lib/ruby_llm/providers/mistral.rb +37 -0
- data/lib/ruby_llm/providers/ollama/media.rb +2 -0
- data/lib/ruby_llm/providers/openai/chat.rb +17 -2
- data/lib/ruby_llm/providers/openai/media.rb +2 -0
- data/lib/ruby_llm/providers/openai/streaming.rb +14 -0
- data/lib/ruby_llm/providers/perplexity/capabilities.rb +167 -0
- data/lib/ruby_llm/providers/perplexity/chat.rb +17 -0
- data/lib/ruby_llm/providers/perplexity/models.rb +42 -0
- data/lib/ruby_llm/providers/perplexity.rb +57 -0
- data/lib/ruby_llm/railtie.rb +5 -0
- data/lib/ruby_llm/stream_accumulator.rb +3 -2
- data/lib/ruby_llm/streaming.rb +25 -7
- data/lib/ruby_llm/utils.rb +10 -0
- data/lib/ruby_llm/version.rb +1 -1
- data/lib/ruby_llm.rb +11 -4
- data/lib/tasks/models_docs.rake +7 -7
- data/lib/tasks/models_update.rake +2 -0
- metadata +22 -1
@@ -0,0 +1,37 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
# Mistral API integration.
|
6
|
+
module Mistral
|
7
|
+
extend OpenAI
|
8
|
+
extend Mistral::Chat
|
9
|
+
extend Mistral::Models
|
10
|
+
extend Mistral::Embeddings
|
11
|
+
|
12
|
+
module_function
|
13
|
+
|
14
|
+
def api_base(_config)
|
15
|
+
'https://api.mistral.ai/v1'
|
16
|
+
end
|
17
|
+
|
18
|
+
def headers(config)
|
19
|
+
{
|
20
|
+
'Authorization' => "Bearer #{config.mistral_api_key}"
|
21
|
+
}
|
22
|
+
end
|
23
|
+
|
24
|
+
def capabilities
|
25
|
+
Mistral::Capabilities
|
26
|
+
end
|
27
|
+
|
28
|
+
def slug
|
29
|
+
'mistral'
|
30
|
+
end
|
31
|
+
|
32
|
+
def configuration_requirements
|
33
|
+
%i[mistral_api_key]
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
@@ -11,7 +11,7 @@ module RubyLLM
|
|
11
11
|
|
12
12
|
module_function
|
13
13
|
|
14
|
-
def render_payload(messages, tools:, temperature:, model:, stream: false)
|
14
|
+
def render_payload(messages, tools:, temperature:, model:, stream: false, schema: nil) # rubocop:disable Metrics/ParameterLists
|
15
15
|
payload = {
|
16
16
|
model: model,
|
17
17
|
messages: format_messages(messages),
|
@@ -26,6 +26,20 @@ module RubyLLM
|
|
26
26
|
payload[:tool_choice] = 'auto'
|
27
27
|
end
|
28
28
|
|
29
|
+
if schema
|
30
|
+
# Use strict mode from schema if specified, default to true
|
31
|
+
strict = schema[:strict] != false
|
32
|
+
|
33
|
+
payload[:response_format] = {
|
34
|
+
type: 'json_schema',
|
35
|
+
json_schema: {
|
36
|
+
name: 'response',
|
37
|
+
schema: schema,
|
38
|
+
strict: strict
|
39
|
+
}
|
40
|
+
}
|
41
|
+
end
|
42
|
+
|
29
43
|
payload[:stream_options] = { include_usage: true } if stream
|
30
44
|
payload
|
31
45
|
end
|
@@ -45,7 +59,8 @@ module RubyLLM
|
|
45
59
|
tool_calls: parse_tool_calls(message_data['tool_calls']),
|
46
60
|
input_tokens: data['usage']['prompt_tokens'],
|
47
61
|
output_tokens: data['usage']['completion_tokens'],
|
48
|
-
model_id: data['model']
|
62
|
+
model_id: data['model'],
|
63
|
+
raw: response
|
49
64
|
)
|
50
65
|
end
|
51
66
|
|
@@ -21,6 +21,20 @@ module RubyLLM
|
|
21
21
|
output_tokens: data.dig('usage', 'completion_tokens')
|
22
22
|
)
|
23
23
|
end
|
24
|
+
|
25
|
+
def parse_streaming_error(data)
|
26
|
+
error_data = JSON.parse(data)
|
27
|
+
return unless error_data['error']
|
28
|
+
|
29
|
+
case error_data.dig('error', 'type')
|
30
|
+
when 'server_error'
|
31
|
+
[500, error_data['error']['message']]
|
32
|
+
when 'rate_limit_exceeded', 'insufficient_quota'
|
33
|
+
[429, error_data['error']['message']]
|
34
|
+
else
|
35
|
+
[400, error_data['error']['message']]
|
36
|
+
end
|
37
|
+
end
|
24
38
|
end
|
25
39
|
end
|
26
40
|
end
|
@@ -0,0 +1,167 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
module Perplexity
|
6
|
+
# Determines capabilities and pricing for Perplexity models
|
7
|
+
module Capabilities
|
8
|
+
module_function
|
9
|
+
|
10
|
+
# Returns the context window size for the given model ID
|
11
|
+
# @param model_id [String] the model identifier
|
12
|
+
# @return [Integer] the context window size in tokens
|
13
|
+
def context_window_for(model_id)
|
14
|
+
case model_id
|
15
|
+
when /sonar-pro/ then 200_000
|
16
|
+
else 128_000
|
17
|
+
end
|
18
|
+
end
|
19
|
+
|
20
|
+
# Returns the maximum number of tokens that can be generated
|
21
|
+
# @param model_id [String] the model identifier
|
22
|
+
# @return [Integer] the maximum number of tokens
|
23
|
+
def max_tokens_for(model_id)
|
24
|
+
case model_id
|
25
|
+
when /sonar-(?:pro|reasoning-pro)/ then 8_192
|
26
|
+
else 4_096
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
# Returns the price per million tokens for input
|
31
|
+
# @param model_id [String] the model identifier
|
32
|
+
# @return [Float] the price per million tokens in USD
|
33
|
+
def input_price_for(model_id)
|
34
|
+
PRICES.dig(model_family(model_id), :input) || 1.0
|
35
|
+
end
|
36
|
+
|
37
|
+
# Returns the price per million tokens for output
|
38
|
+
# @param model_id [String] the model identifier
|
39
|
+
# @return [Float] the price per million tokens in USD
|
40
|
+
def output_price_for(model_id)
|
41
|
+
PRICES.dig(model_family(model_id), :output) || 1.0
|
42
|
+
end
|
43
|
+
|
44
|
+
# Determines if the model supports vision capabilities
|
45
|
+
# @param model_id [String] the model identifier
|
46
|
+
# @return [Boolean] true if the model supports vision
|
47
|
+
def supports_vision?(model_id)
|
48
|
+
case model_id
|
49
|
+
when /sonar-reasoning-pro/, /sonar-reasoning/, /sonar-pro/, /sonar/ then true
|
50
|
+
else false
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
# Determines if the model supports function calling
|
55
|
+
# @param model_id [String] the model identifier
|
56
|
+
# @return [Boolean] true if the model supports functions
|
57
|
+
def supports_functions?(_model_id)
|
58
|
+
false
|
59
|
+
end
|
60
|
+
|
61
|
+
# Determines if the model supports JSON mode
|
62
|
+
def supports_json_mode?(_model_id)
|
63
|
+
true
|
64
|
+
end
|
65
|
+
|
66
|
+
# Formats the model ID into a human-readable display name
|
67
|
+
# @param model_id [String] the model identifier
|
68
|
+
# @return [String] the formatted display name
|
69
|
+
def format_display_name(model_id)
|
70
|
+
case model_id
|
71
|
+
when 'sonar' then 'Sonar'
|
72
|
+
when 'sonar-pro' then 'Sonar Pro'
|
73
|
+
when 'sonar-reasoning' then 'Sonar Reasoning'
|
74
|
+
when 'sonar-reasoning-pro' then 'Sonar Reasoning Pro'
|
75
|
+
when 'sonar-deep-research' then 'Sonar Deep Research'
|
76
|
+
else
|
77
|
+
model_id.split('-')
|
78
|
+
.map(&:capitalize)
|
79
|
+
.join(' ')
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
83
|
+
# Returns the model type
|
84
|
+
# @param model_id [String] the model identifier
|
85
|
+
# @return [String] the model type (e.g., 'chat')
|
86
|
+
def model_type(_model_id)
|
87
|
+
'chat'
|
88
|
+
end
|
89
|
+
|
90
|
+
# Returns the model family
|
91
|
+
# @param model_id [String] the model identifier
|
92
|
+
# @return [Symbol] the model family
|
93
|
+
def model_family(model_id)
|
94
|
+
case model_id
|
95
|
+
when 'sonar' then :sonar
|
96
|
+
when 'sonar-pro' then :sonar_pro
|
97
|
+
when 'sonar-reasoning' then :sonar_reasoning
|
98
|
+
when 'sonar-reasoning-pro' then :sonar_reasoning_pro
|
99
|
+
when 'sonar-deep-research' then :sonar_deep_research
|
100
|
+
else :unknown
|
101
|
+
end
|
102
|
+
end
|
103
|
+
|
104
|
+
def modalities_for(_model_id)
|
105
|
+
{
|
106
|
+
input: ['text'],
|
107
|
+
output: ['text']
|
108
|
+
}
|
109
|
+
end
|
110
|
+
|
111
|
+
def capabilities_for(model_id)
|
112
|
+
capabilities = %w[streaming json_mode]
|
113
|
+
capabilities << 'vision' if supports_vision?(model_id)
|
114
|
+
capabilities
|
115
|
+
end
|
116
|
+
|
117
|
+
def pricing_for(model_id)
|
118
|
+
family = model_family(model_id)
|
119
|
+
prices = PRICES.fetch(family, { input: 1.0, output: 1.0 })
|
120
|
+
|
121
|
+
standard_pricing = {
|
122
|
+
input_per_million: prices[:input],
|
123
|
+
output_per_million: prices[:output]
|
124
|
+
}
|
125
|
+
|
126
|
+
# Add special pricing if available
|
127
|
+
standard_pricing[:citation_per_million] = prices[:citation] if prices[:citation]
|
128
|
+
standard_pricing[:reasoning_per_million] = prices[:reasoning] if prices[:reasoning]
|
129
|
+
standard_pricing[:search_per_thousand] = prices[:search_queries] if prices[:search_queries]
|
130
|
+
|
131
|
+
{
|
132
|
+
text_tokens: {
|
133
|
+
standard: standard_pricing
|
134
|
+
}
|
135
|
+
}
|
136
|
+
end
|
137
|
+
|
138
|
+
# Pricing information for Perplexity models (USD per 1M tokens)
|
139
|
+
PRICES = {
|
140
|
+
sonar: {
|
141
|
+
input: 1.0,
|
142
|
+
output: 1.0
|
143
|
+
},
|
144
|
+
sonar_pro: {
|
145
|
+
input: 3.0,
|
146
|
+
output: 15.0
|
147
|
+
},
|
148
|
+
sonar_reasoning: {
|
149
|
+
input: 1.0,
|
150
|
+
output: 5.0
|
151
|
+
},
|
152
|
+
sonar_reasoning_pro: {
|
153
|
+
input: 2.0,
|
154
|
+
output: 8.0
|
155
|
+
},
|
156
|
+
sonar_deep_research: {
|
157
|
+
input: 2.0,
|
158
|
+
output: 8.0,
|
159
|
+
citation: 2.0,
|
160
|
+
reasoning: 3.0,
|
161
|
+
search_queries: 5.0
|
162
|
+
}
|
163
|
+
}.freeze
|
164
|
+
end
|
165
|
+
end
|
166
|
+
end
|
167
|
+
end
|
@@ -0,0 +1,17 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
module Perplexity
|
6
|
+
# Chat formatting for Perplexity provider
|
7
|
+
module Chat
|
8
|
+
module_function
|
9
|
+
|
10
|
+
def format_role(role)
|
11
|
+
# Perplexity doesn't use the new OpenAI convention for system prompts
|
12
|
+
role.to_s
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|
@@ -0,0 +1,42 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
module Perplexity
|
6
|
+
# Models methods of the Perplexity API integration
|
7
|
+
module Models
|
8
|
+
def list_models(**)
|
9
|
+
slug = 'perplexity'
|
10
|
+
capabilities = Perplexity::Capabilities
|
11
|
+
parse_list_models_response(nil, slug, capabilities)
|
12
|
+
end
|
13
|
+
|
14
|
+
def parse_list_models_response(_response, slug, capabilities)
|
15
|
+
[
|
16
|
+
create_model_info('sonar', slug, capabilities),
|
17
|
+
create_model_info('sonar-pro', slug, capabilities),
|
18
|
+
create_model_info('sonar-reasoning', slug, capabilities),
|
19
|
+
create_model_info('sonar-reasoning-pro', slug, capabilities),
|
20
|
+
create_model_info('sonar-deep-research', slug, capabilities)
|
21
|
+
]
|
22
|
+
end
|
23
|
+
|
24
|
+
def create_model_info(id, slug, capabilities)
|
25
|
+
Model::Info.new(
|
26
|
+
id: id,
|
27
|
+
name: capabilities.format_display_name(id),
|
28
|
+
provider: slug,
|
29
|
+
family: capabilities.model_family(id).to_s,
|
30
|
+
created_at: Time.now,
|
31
|
+
context_window: capabilities.context_window_for(id),
|
32
|
+
max_output_tokens: capabilities.max_tokens_for(id),
|
33
|
+
modalities: capabilities.modalities_for(id),
|
34
|
+
capabilities: capabilities.capabilities_for(id),
|
35
|
+
pricing: capabilities.pricing_for(id),
|
36
|
+
metadata: {}
|
37
|
+
)
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
@@ -0,0 +1,57 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
# Perplexity API integration.
|
6
|
+
module Perplexity
|
7
|
+
extend OpenAI
|
8
|
+
extend Perplexity::Chat
|
9
|
+
extend Perplexity::Models
|
10
|
+
|
11
|
+
module_function
|
12
|
+
|
13
|
+
def api_base(_config)
|
14
|
+
'https://api.perplexity.ai'
|
15
|
+
end
|
16
|
+
|
17
|
+
def headers(config)
|
18
|
+
{
|
19
|
+
'Authorization' => "Bearer #{config.perplexity_api_key}",
|
20
|
+
'Content-Type' => 'application/json'
|
21
|
+
}
|
22
|
+
end
|
23
|
+
|
24
|
+
def capabilities
|
25
|
+
Perplexity::Capabilities
|
26
|
+
end
|
27
|
+
|
28
|
+
def slug
|
29
|
+
'perplexity'
|
30
|
+
end
|
31
|
+
|
32
|
+
def configuration_requirements
|
33
|
+
%i[perplexity_api_key]
|
34
|
+
end
|
35
|
+
|
36
|
+
def parse_error(response)
|
37
|
+
body = response.body
|
38
|
+
return if body.empty?
|
39
|
+
|
40
|
+
# If response is HTML (Perplexity returns HTML for auth errors)
|
41
|
+
if body.include?('<html>') && body.include?('<title>')
|
42
|
+
# Extract title content
|
43
|
+
title_match = body.match(%r{<title>(.+?)</title>})
|
44
|
+
if title_match
|
45
|
+
# Clean up the title - remove status code if present
|
46
|
+
message = title_match[1]
|
47
|
+
message = message.sub(/^\d+\s+/, '') # Remove leading digits and space
|
48
|
+
return message
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
# Fall back to parent's implementation
|
53
|
+
super
|
54
|
+
end
|
55
|
+
end
|
56
|
+
end
|
57
|
+
end
|
data/lib/ruby_llm/railtie.rb
CHANGED
@@ -29,14 +29,15 @@ module RubyLLM
|
|
29
29
|
RubyLLM.logger.debug inspect
|
30
30
|
end
|
31
31
|
|
32
|
-
def to_message
|
32
|
+
def to_message(response)
|
33
33
|
Message.new(
|
34
34
|
role: :assistant,
|
35
35
|
content: content.empty? ? nil : content,
|
36
36
|
model_id: model_id,
|
37
37
|
tool_calls: tool_calls_from_stream,
|
38
38
|
input_tokens: @input_tokens.positive? ? @input_tokens : nil,
|
39
|
-
output_tokens: @output_tokens.positive? ? @output_tokens : nil
|
39
|
+
output_tokens: @output_tokens.positive? ? @output_tokens : nil,
|
40
|
+
raw: response
|
40
41
|
)
|
41
42
|
end
|
42
43
|
|
data/lib/ruby_llm/streaming.rb
CHANGED
@@ -11,7 +11,7 @@ module RubyLLM
|
|
11
11
|
def stream_response(connection, payload, &block)
|
12
12
|
accumulator = StreamAccumulator.new
|
13
13
|
|
14
|
-
connection.post stream_url, payload do |req|
|
14
|
+
response = connection.post stream_url, payload do |req|
|
15
15
|
if req.options.respond_to?(:on_data)
|
16
16
|
# Handle Faraday 2.x streaming with on_data method
|
17
17
|
req.options.on_data = handle_stream do |chunk|
|
@@ -27,7 +27,7 @@ module RubyLLM
|
|
27
27
|
end
|
28
28
|
end
|
29
29
|
|
30
|
-
accumulator.to_message
|
30
|
+
accumulator.to_message(response)
|
31
31
|
end
|
32
32
|
|
33
33
|
def handle_stream(&block)
|
@@ -55,13 +55,13 @@ module RubyLLM
|
|
55
55
|
end
|
56
56
|
end
|
57
57
|
|
58
|
-
def process_stream_chunk(chunk, parser,
|
58
|
+
def process_stream_chunk(chunk, parser, env, &)
|
59
59
|
RubyLLM.logger.debug "Received chunk: #{chunk}"
|
60
60
|
|
61
61
|
if error_chunk?(chunk)
|
62
|
-
handle_error_chunk(chunk,
|
62
|
+
handle_error_chunk(chunk, env)
|
63
63
|
else
|
64
|
-
yield handle_sse(chunk, parser,
|
64
|
+
yield handle_sse(chunk, parser, env, &)
|
65
65
|
end
|
66
66
|
end
|
67
67
|
|
@@ -88,7 +88,16 @@ module RubyLLM
|
|
88
88
|
def handle_error_chunk(chunk, env)
|
89
89
|
error_data = chunk.split("\n")[1].delete_prefix('data: ')
|
90
90
|
status, _message = parse_streaming_error(error_data)
|
91
|
-
|
91
|
+
parsed_data = JSON.parse(error_data)
|
92
|
+
|
93
|
+
# Create a response-like object that works for both Faraday v1 and v2
|
94
|
+
error_response = if env
|
95
|
+
env.merge(body: parsed_data, status: status)
|
96
|
+
else
|
97
|
+
# For Faraday v1, create a simple object that responds to .status and .body
|
98
|
+
Struct.new(:body, :status).new(parsed_data, status)
|
99
|
+
end
|
100
|
+
|
92
101
|
ErrorMiddleware.parse_error(provider: self, response: error_response)
|
93
102
|
rescue JSON::ParserError => e
|
94
103
|
RubyLLM.logger.debug "Failed to parse error chunk: #{e.message}"
|
@@ -122,7 +131,16 @@ module RubyLLM
|
|
122
131
|
|
123
132
|
def handle_error_event(data, env)
|
124
133
|
status, _message = parse_streaming_error(data)
|
125
|
-
|
134
|
+
parsed_data = JSON.parse(data)
|
135
|
+
|
136
|
+
# Create a response-like object that works for both Faraday v1 and v2
|
137
|
+
error_response = if env
|
138
|
+
env.merge(body: parsed_data, status: status)
|
139
|
+
else
|
140
|
+
# For Faraday v1, create a simple object that responds to .status and .body
|
141
|
+
Struct.new(:body, :status).new(parsed_data, status)
|
142
|
+
end
|
143
|
+
|
126
144
|
ErrorMiddleware.parse_error(provider: self, response: error_response)
|
127
145
|
rescue JSON::ParserError => e
|
128
146
|
RubyLLM.logger.debug "Failed to parse error event: #{e.message}"
|
data/lib/ruby_llm/utils.rb
CHANGED
@@ -23,5 +23,15 @@ module RubyLLM
|
|
23
23
|
Array(item)
|
24
24
|
end
|
25
25
|
end
|
26
|
+
|
27
|
+
def deep_merge(params, payload)
|
28
|
+
params.merge(payload) do |_key, params_value, payload_value|
|
29
|
+
if params_value.is_a?(Hash) && payload_value.is_a?(Hash)
|
30
|
+
deep_merge(params_value, payload_value)
|
31
|
+
else
|
32
|
+
payload_value
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
26
36
|
end
|
27
37
|
end
|
data/lib/ruby_llm/version.rb
CHANGED
data/lib/ruby_llm.rb
CHANGED
@@ -16,13 +16,17 @@ loader.inflector.inflect(
|
|
16
16
|
'openai' => 'OpenAI',
|
17
17
|
'api' => 'API',
|
18
18
|
'deepseek' => 'DeepSeek',
|
19
|
+
'perplexity' => 'Perplexity',
|
19
20
|
'bedrock' => 'Bedrock',
|
20
21
|
'openrouter' => 'OpenRouter',
|
22
|
+
'gpustack' => 'GPUStack',
|
23
|
+
'mistral' => 'Mistral',
|
21
24
|
'pdf' => 'PDF'
|
22
25
|
)
|
23
26
|
loader.ignore("#{__dir__}/tasks")
|
24
27
|
loader.ignore("#{__dir__}/ruby_llm/railtie")
|
25
28
|
loader.ignore("#{__dir__}/ruby_llm/active_record")
|
29
|
+
loader.ignore("#{__dir__}/generators")
|
26
30
|
loader.setup
|
27
31
|
|
28
32
|
# A delightful Ruby interface to modern AI language models.
|
@@ -76,13 +80,16 @@ module RubyLLM
|
|
76
80
|
end
|
77
81
|
end
|
78
82
|
|
79
|
-
RubyLLM::Provider.register :openai, RubyLLM::Providers::OpenAI
|
80
83
|
RubyLLM::Provider.register :anthropic, RubyLLM::Providers::Anthropic
|
81
|
-
RubyLLM::Provider.register :gemini, RubyLLM::Providers::Gemini
|
82
|
-
RubyLLM::Provider.register :deepseek, RubyLLM::Providers::DeepSeek
|
83
84
|
RubyLLM::Provider.register :bedrock, RubyLLM::Providers::Bedrock
|
84
|
-
RubyLLM::Provider.register :
|
85
|
+
RubyLLM::Provider.register :deepseek, RubyLLM::Providers::DeepSeek
|
86
|
+
RubyLLM::Provider.register :gemini, RubyLLM::Providers::Gemini
|
87
|
+
RubyLLM::Provider.register :gpustack, RubyLLM::Providers::GPUStack
|
88
|
+
RubyLLM::Provider.register :mistral, RubyLLM::Providers::Mistral
|
85
89
|
RubyLLM::Provider.register :ollama, RubyLLM::Providers::Ollama
|
90
|
+
RubyLLM::Provider.register :openai, RubyLLM::Providers::OpenAI
|
91
|
+
RubyLLM::Provider.register :openrouter, RubyLLM::Providers::OpenRouter
|
92
|
+
RubyLLM::Provider.register :perplexity, RubyLLM::Providers::Perplexity
|
86
93
|
|
87
94
|
if defined?(Rails::Railtie)
|
88
95
|
require 'ruby_llm/railtie'
|
data/lib/tasks/models_docs.rake
CHANGED
@@ -6,14 +6,14 @@ require 'fileutils'
|
|
6
6
|
namespace :models do
|
7
7
|
desc 'Generate available models documentation'
|
8
8
|
task :docs do
|
9
|
-
FileUtils.mkdir_p('docs
|
9
|
+
FileUtils.mkdir_p('docs') # ensure output directory exists
|
10
10
|
|
11
11
|
# Generate markdown content
|
12
12
|
output = generate_models_markdown
|
13
13
|
|
14
14
|
# Write the output
|
15
|
-
File.write('docs/
|
16
|
-
puts 'Generated docs/
|
15
|
+
File.write('docs/available-models.md', output)
|
16
|
+
puts 'Generated docs/available-models.md'
|
17
17
|
end
|
18
18
|
end
|
19
19
|
|
@@ -22,15 +22,15 @@ def generate_models_markdown
|
|
22
22
|
---
|
23
23
|
layout: default
|
24
24
|
title: Available Models
|
25
|
-
|
26
|
-
|
27
|
-
|
25
|
+
nav_order: 5
|
26
|
+
permalink: /available-models
|
27
|
+
description: Browse hundreds of AI models from every major provider. Always up-to-date, automatically generated.
|
28
28
|
---
|
29
29
|
|
30
30
|
# Available Models
|
31
31
|
{: .no_toc }
|
32
32
|
|
33
|
-
|
33
|
+
Every model, every provider, always current. Your complete AI model reference.
|
34
34
|
{: .fs-6 .fw-300 }
|
35
35
|
|
36
36
|
## Table of contents
|
@@ -22,7 +22,9 @@ def configure_from_env
|
|
22
22
|
config.anthropic_api_key = ENV.fetch('ANTHROPIC_API_KEY', nil)
|
23
23
|
config.gemini_api_key = ENV.fetch('GEMINI_API_KEY', nil)
|
24
24
|
config.deepseek_api_key = ENV.fetch('DEEPSEEK_API_KEY', nil)
|
25
|
+
config.perplexity_api_key = ENV.fetch('PERPLEXITY_API_KEY', nil)
|
25
26
|
config.openrouter_api_key = ENV.fetch('OPENROUTER_API_KEY', nil)
|
27
|
+
config.mistral_api_key = ENV.fetch('MISTRAL_API_KEY', nil)
|
26
28
|
configure_bedrock(config)
|
27
29
|
config.request_timeout = 30
|
28
30
|
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: ruby_llm
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.
|
4
|
+
version: 1.5.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Carmine Paolino
|
@@ -135,6 +135,15 @@ extra_rdoc_files: []
|
|
135
135
|
files:
|
136
136
|
- LICENSE
|
137
137
|
- README.md
|
138
|
+
- lib/generators/ruby_llm/install/templates/INSTALL_INFO.md.tt
|
139
|
+
- lib/generators/ruby_llm/install/templates/chat_model.rb.tt
|
140
|
+
- lib/generators/ruby_llm/install/templates/create_chats_migration.rb.tt
|
141
|
+
- lib/generators/ruby_llm/install/templates/create_messages_migration.rb.tt
|
142
|
+
- lib/generators/ruby_llm/install/templates/create_tool_calls_migration.rb.tt
|
143
|
+
- lib/generators/ruby_llm/install/templates/initializer.rb.tt
|
144
|
+
- lib/generators/ruby_llm/install/templates/message_model.rb.tt
|
145
|
+
- lib/generators/ruby_llm/install/templates/tool_call_model.rb.tt
|
146
|
+
- lib/generators/ruby_llm/install_generator.rb
|
138
147
|
- lib/ruby_llm.rb
|
139
148
|
- lib/ruby_llm/active_record/acts_as.rb
|
140
149
|
- lib/ruby_llm/aliases.json
|
@@ -192,6 +201,14 @@ files:
|
|
192
201
|
- lib/ruby_llm/providers/gemini/models.rb
|
193
202
|
- lib/ruby_llm/providers/gemini/streaming.rb
|
194
203
|
- lib/ruby_llm/providers/gemini/tools.rb
|
204
|
+
- lib/ruby_llm/providers/gpustack.rb
|
205
|
+
- lib/ruby_llm/providers/gpustack/chat.rb
|
206
|
+
- lib/ruby_llm/providers/gpustack/models.rb
|
207
|
+
- lib/ruby_llm/providers/mistral.rb
|
208
|
+
- lib/ruby_llm/providers/mistral/capabilities.rb
|
209
|
+
- lib/ruby_llm/providers/mistral/chat.rb
|
210
|
+
- lib/ruby_llm/providers/mistral/embeddings.rb
|
211
|
+
- lib/ruby_llm/providers/mistral/models.rb
|
195
212
|
- lib/ruby_llm/providers/ollama.rb
|
196
213
|
- lib/ruby_llm/providers/ollama/chat.rb
|
197
214
|
- lib/ruby_llm/providers/ollama/media.rb
|
@@ -206,6 +223,10 @@ files:
|
|
206
223
|
- lib/ruby_llm/providers/openai/tools.rb
|
207
224
|
- lib/ruby_llm/providers/openrouter.rb
|
208
225
|
- lib/ruby_llm/providers/openrouter/models.rb
|
226
|
+
- lib/ruby_llm/providers/perplexity.rb
|
227
|
+
- lib/ruby_llm/providers/perplexity/capabilities.rb
|
228
|
+
- lib/ruby_llm/providers/perplexity/chat.rb
|
229
|
+
- lib/ruby_llm/providers/perplexity/models.rb
|
209
230
|
- lib/ruby_llm/railtie.rb
|
210
231
|
- lib/ruby_llm/stream_accumulator.rb
|
211
232
|
- lib/ruby_llm/streaming.rb
|