ruby_llm 1.4.0 → 1.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +27 -5
- data/lib/generators/ruby_llm/install_generator.rb +7 -7
- data/lib/ruby_llm/configuration.rb +2 -0
- data/lib/ruby_llm/models.json +2437 -496
- data/lib/ruby_llm/providers/mistral/capabilities.rb +151 -0
- data/lib/ruby_llm/providers/mistral/chat.rb +26 -0
- data/lib/ruby_llm/providers/mistral/embeddings.rb +36 -0
- data/lib/ruby_llm/providers/mistral/models.rb +49 -0
- data/lib/ruby_llm/providers/mistral.rb +37 -0
- data/lib/ruby_llm/providers/perplexity/capabilities.rb +167 -0
- data/lib/ruby_llm/providers/perplexity/chat.rb +17 -0
- data/lib/ruby_llm/providers/perplexity/models.rb +42 -0
- data/lib/ruby_llm/providers/perplexity.rb +57 -0
- data/lib/ruby_llm/version.rb +1 -1
- data/lib/ruby_llm.rb +9 -5
- data/lib/tasks/models_docs.rake +5 -6
- data/lib/tasks/models_update.rake +2 -0
- metadata +10 -1
@@ -0,0 +1,151 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
module Mistral
|
6
|
+
# Determines capabilities for Mistral models
|
7
|
+
module Capabilities
|
8
|
+
module_function
|
9
|
+
|
10
|
+
def supports_streaming?(_model_id)
|
11
|
+
true
|
12
|
+
end
|
13
|
+
|
14
|
+
def supports_tools?(_model_id)
|
15
|
+
true
|
16
|
+
end
|
17
|
+
|
18
|
+
def supports_vision?(model_id)
|
19
|
+
model_id.include?('pixtral')
|
20
|
+
end
|
21
|
+
|
22
|
+
def supports_json_mode?(_model_id)
|
23
|
+
true
|
24
|
+
end
|
25
|
+
|
26
|
+
def format_display_name(model_id)
|
27
|
+
case model_id
|
28
|
+
when /mistral-large/ then 'Mistral Large'
|
29
|
+
when /mistral-medium/ then 'Mistral Medium'
|
30
|
+
when /mistral-small/ then 'Mistral Small'
|
31
|
+
when /ministral-3b/ then 'Ministral 3B'
|
32
|
+
when /ministral-8b/ then 'Ministral 8B'
|
33
|
+
when /codestral/ then 'Codestral'
|
34
|
+
when /pixtral-large/ then 'Pixtral Large'
|
35
|
+
when /pixtral-12b/ then 'Pixtral 12B'
|
36
|
+
when /mistral-embed/ then 'Mistral Embed'
|
37
|
+
when /mistral-moderation/ then 'Mistral Moderation'
|
38
|
+
else model_id.split('-').map(&:capitalize).join(' ')
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
def model_family(model_id)
|
43
|
+
case model_id
|
44
|
+
when /mistral-large/ then 'mistral-large'
|
45
|
+
when /mistral-medium/ then 'mistral-medium'
|
46
|
+
when /mistral-small/ then 'mistral-small'
|
47
|
+
when /ministral/ then 'ministral'
|
48
|
+
when /codestral/ then 'codestral'
|
49
|
+
when /pixtral/ then 'pixtral'
|
50
|
+
when /mistral-embed/ then 'mistral-embed'
|
51
|
+
when /mistral-moderation/ then 'mistral-moderation'
|
52
|
+
else 'mistral'
|
53
|
+
end
|
54
|
+
end
|
55
|
+
|
56
|
+
def context_window_for(_model_id)
|
57
|
+
32_768 # Default for most Mistral models
|
58
|
+
end
|
59
|
+
|
60
|
+
def max_tokens_for(_model_id)
|
61
|
+
8192 # Default for most Mistral models
|
62
|
+
end
|
63
|
+
|
64
|
+
def modalities_for(model_id)
|
65
|
+
case model_id
|
66
|
+
when /pixtral/
|
67
|
+
{
|
68
|
+
input: %w[text image],
|
69
|
+
output: ['text']
|
70
|
+
}
|
71
|
+
when /embed/
|
72
|
+
{
|
73
|
+
input: ['text'],
|
74
|
+
output: ['embedding']
|
75
|
+
}
|
76
|
+
else
|
77
|
+
{
|
78
|
+
input: ['text'],
|
79
|
+
output: ['text']
|
80
|
+
}
|
81
|
+
end
|
82
|
+
end
|
83
|
+
|
84
|
+
def capabilities_for(model_id)
|
85
|
+
case model_id
|
86
|
+
when /embed/ then { embeddings: true }
|
87
|
+
when /moderation/ then { moderation: true }
|
88
|
+
else
|
89
|
+
{
|
90
|
+
chat: true,
|
91
|
+
streaming: supports_streaming?(model_id),
|
92
|
+
tools: supports_tools?(model_id),
|
93
|
+
vision: supports_vision?(model_id),
|
94
|
+
json_mode: supports_json_mode?(model_id)
|
95
|
+
}
|
96
|
+
end
|
97
|
+
end
|
98
|
+
|
99
|
+
def pricing_for(_model_id)
|
100
|
+
{
|
101
|
+
input: 0.0,
|
102
|
+
output: 0.0
|
103
|
+
}
|
104
|
+
end
|
105
|
+
|
106
|
+
def release_date_for(model_id)
|
107
|
+
case model_id
|
108
|
+
# 2023 releases
|
109
|
+
when 'open-mistral-7b', 'mistral-tiny' then '2023-09-27'
|
110
|
+
when 'mistral-medium-2312', 'mistral-small-2312', 'mistral-small',
|
111
|
+
'open-mixtral-8x7b', 'mistral-tiny-2312' then '2023-12-11'
|
112
|
+
|
113
|
+
# 2024 releases
|
114
|
+
when 'mistral-embed' then '2024-01-11'
|
115
|
+
when 'mistral-large-2402', 'mistral-small-2402' then '2024-02-26'
|
116
|
+
when 'open-mixtral-8x22b', 'open-mixtral-8x22b-2404' then '2024-04-17'
|
117
|
+
when 'codestral-2405' then '2024-05-22'
|
118
|
+
when 'codestral-mamba-2407', 'codestral-mamba-latest', 'open-codestral-mamba' then '2024-07-16'
|
119
|
+
when 'open-mistral-nemo', 'open-mistral-nemo-2407', 'mistral-tiny-2407',
|
120
|
+
'mistral-tiny-latest' then '2024-07-18'
|
121
|
+
when 'mistral-large-2407' then '2024-07-24'
|
122
|
+
when 'pixtral-12b-2409', 'pixtral-12b-latest', 'pixtral-12b' then '2024-09-17'
|
123
|
+
when 'mistral-small-2409' then '2024-09-18'
|
124
|
+
when 'ministral-3b-2410', 'ministral-3b-latest', 'ministral-8b-2410',
|
125
|
+
'ministral-8b-latest' then '2024-10-16'
|
126
|
+
when 'pixtral-large-2411', 'pixtral-large-latest', 'mistral-large-pixtral-2411' then '2024-11-12'
|
127
|
+
when 'mistral-large-2411', 'mistral-large-latest', 'mistral-large' then '2024-11-20'
|
128
|
+
when 'codestral-2411-rc5', 'mistral-moderation-2411', 'mistral-moderation-latest' then '2024-11-26'
|
129
|
+
when 'codestral-2412' then '2024-12-17'
|
130
|
+
|
131
|
+
# 2025 releases
|
132
|
+
when 'mistral-small-2501' then '2025-01-13'
|
133
|
+
when 'codestral-2501' then '2025-01-14'
|
134
|
+
when 'mistral-saba-2502', 'mistral-saba-latest' then '2025-02-18'
|
135
|
+
when 'mistral-small-2503' then '2025-03-03'
|
136
|
+
when 'mistral-ocr-2503' then '2025-03-21'
|
137
|
+
when 'mistral-medium', 'mistral-medium-latest', 'mistral-medium-2505' then '2025-05-06'
|
138
|
+
when 'codestral-embed', 'codestral-embed-2505' then '2025-05-21'
|
139
|
+
when 'mistral-ocr-2505', 'mistral-ocr-latest' then '2025-05-23'
|
140
|
+
when 'devstral-small-2505' then '2025-05-28'
|
141
|
+
when 'mistral-small-2506', 'mistral-small-latest', 'magistral-medium-2506',
|
142
|
+
'magistral-medium-latest' then '2025-06-10'
|
143
|
+
when 'devstral-small-2507', 'devstral-small-latest', 'devstral-medium-2507',
|
144
|
+
'devstral-medium-latest' then '2025-07-09'
|
145
|
+
when 'codestral-2508', 'codestral-latest' then '2025-08-30'
|
146
|
+
end
|
147
|
+
end
|
148
|
+
end
|
149
|
+
end
|
150
|
+
end
|
151
|
+
end
|
@@ -0,0 +1,26 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
module Mistral
|
6
|
+
# Chat methods for Mistral API
|
7
|
+
module Chat
|
8
|
+
module_function
|
9
|
+
|
10
|
+
def format_role(role)
|
11
|
+
# Mistral doesn't use the new OpenAI convention for system prompts
|
12
|
+
role.to_s
|
13
|
+
end
|
14
|
+
|
15
|
+
# rubocop:disable Metrics/ParameterLists
|
16
|
+
def render_payload(messages, tools:, temperature:, model:, stream: false, schema: nil)
|
17
|
+
payload = super
|
18
|
+
# Mistral doesn't support stream_options
|
19
|
+
payload.delete(:stream_options)
|
20
|
+
payload
|
21
|
+
end
|
22
|
+
# rubocop:enable Metrics/ParameterLists
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
@@ -0,0 +1,36 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
module Mistral
|
6
|
+
# Embeddings methods for Mistral API
|
7
|
+
module Embeddings
|
8
|
+
module_function
|
9
|
+
|
10
|
+
def embedding_url(...)
|
11
|
+
'embeddings'
|
12
|
+
end
|
13
|
+
|
14
|
+
def render_embedding_payload(text, model:, dimensions:) # rubocop:disable Lint/UnusedMethodArgument
|
15
|
+
# Mistral doesn't support dimensions parameter
|
16
|
+
{
|
17
|
+
model: model,
|
18
|
+
input: text
|
19
|
+
}
|
20
|
+
end
|
21
|
+
|
22
|
+
def parse_embedding_response(response, model:, text:)
|
23
|
+
data = response.body
|
24
|
+
input_tokens = data.dig('usage', 'prompt_tokens') || 0
|
25
|
+
vectors = data['data'].map { |d| d['embedding'] }
|
26
|
+
|
27
|
+
# If we only got one embedding AND the input was a single string (not an array),
|
28
|
+
# return it as a single vector
|
29
|
+
vectors = vectors.first if vectors.length == 1 && !text.is_a?(Array)
|
30
|
+
|
31
|
+
Embedding.new(vectors:, model:, input_tokens:)
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
@@ -0,0 +1,49 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
module Mistral
|
6
|
+
# Model information for Mistral
|
7
|
+
module Models
|
8
|
+
module_function
|
9
|
+
|
10
|
+
def models_url
|
11
|
+
'models'
|
12
|
+
end
|
13
|
+
|
14
|
+
def headers(config)
|
15
|
+
{
|
16
|
+
'Authorization' => "Bearer #{config.mistral_api_key}"
|
17
|
+
}
|
18
|
+
end
|
19
|
+
|
20
|
+
def parse_list_models_response(response, slug, capabilities)
|
21
|
+
Array(response.body['data']).map do |model_data|
|
22
|
+
model_id = model_data['id']
|
23
|
+
|
24
|
+
# Use fixed release date for Mistral models
|
25
|
+
release_date = capabilities.release_date_for(model_id)
|
26
|
+
created_at = release_date ? Time.parse(release_date) : nil
|
27
|
+
|
28
|
+
Model::Info.new(
|
29
|
+
id: model_id,
|
30
|
+
name: capabilities.format_display_name(model_id),
|
31
|
+
provider: slug,
|
32
|
+
family: capabilities.model_family(model_id),
|
33
|
+
created_at: created_at,
|
34
|
+
context_window: capabilities.context_window_for(model_id),
|
35
|
+
max_output_tokens: capabilities.max_tokens_for(model_id),
|
36
|
+
modalities: capabilities.modalities_for(model_id),
|
37
|
+
capabilities: capabilities.capabilities_for(model_id),
|
38
|
+
pricing: capabilities.pricing_for(model_id),
|
39
|
+
metadata: {
|
40
|
+
object: model_data['object'],
|
41
|
+
owned_by: model_data['owned_by']
|
42
|
+
}
|
43
|
+
)
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
@@ -0,0 +1,37 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
# Mistral API integration.
|
6
|
+
module Mistral
|
7
|
+
extend OpenAI
|
8
|
+
extend Mistral::Chat
|
9
|
+
extend Mistral::Models
|
10
|
+
extend Mistral::Embeddings
|
11
|
+
|
12
|
+
module_function
|
13
|
+
|
14
|
+
def api_base(_config)
|
15
|
+
'https://api.mistral.ai/v1'
|
16
|
+
end
|
17
|
+
|
18
|
+
def headers(config)
|
19
|
+
{
|
20
|
+
'Authorization' => "Bearer #{config.mistral_api_key}"
|
21
|
+
}
|
22
|
+
end
|
23
|
+
|
24
|
+
def capabilities
|
25
|
+
Mistral::Capabilities
|
26
|
+
end
|
27
|
+
|
28
|
+
def slug
|
29
|
+
'mistral'
|
30
|
+
end
|
31
|
+
|
32
|
+
def configuration_requirements
|
33
|
+
%i[mistral_api_key]
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
@@ -0,0 +1,167 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
module Perplexity
|
6
|
+
# Determines capabilities and pricing for Perplexity models
|
7
|
+
module Capabilities
|
8
|
+
module_function
|
9
|
+
|
10
|
+
# Returns the context window size for the given model ID
|
11
|
+
# @param model_id [String] the model identifier
|
12
|
+
# @return [Integer] the context window size in tokens
|
13
|
+
def context_window_for(model_id)
|
14
|
+
case model_id
|
15
|
+
when /sonar-pro/ then 200_000
|
16
|
+
else 128_000
|
17
|
+
end
|
18
|
+
end
|
19
|
+
|
20
|
+
# Returns the maximum number of tokens that can be generated
|
21
|
+
# @param model_id [String] the model identifier
|
22
|
+
# @return [Integer] the maximum number of tokens
|
23
|
+
def max_tokens_for(model_id)
|
24
|
+
case model_id
|
25
|
+
when /sonar-(?:pro|reasoning-pro)/ then 8_192
|
26
|
+
else 4_096
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
# Returns the price per million tokens for input
|
31
|
+
# @param model_id [String] the model identifier
|
32
|
+
# @return [Float] the price per million tokens in USD
|
33
|
+
def input_price_for(model_id)
|
34
|
+
PRICES.dig(model_family(model_id), :input) || 1.0
|
35
|
+
end
|
36
|
+
|
37
|
+
# Returns the price per million tokens for output
|
38
|
+
# @param model_id [String] the model identifier
|
39
|
+
# @return [Float] the price per million tokens in USD
|
40
|
+
def output_price_for(model_id)
|
41
|
+
PRICES.dig(model_family(model_id), :output) || 1.0
|
42
|
+
end
|
43
|
+
|
44
|
+
# Determines if the model supports vision capabilities
|
45
|
+
# @param model_id [String] the model identifier
|
46
|
+
# @return [Boolean] true if the model supports vision
|
47
|
+
def supports_vision?(model_id)
|
48
|
+
case model_id
|
49
|
+
when /sonar-reasoning-pro/, /sonar-reasoning/, /sonar-pro/, /sonar/ then true
|
50
|
+
else false
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
# Determines if the model supports function calling
|
55
|
+
# @param model_id [String] the model identifier
|
56
|
+
# @return [Boolean] true if the model supports functions
|
57
|
+
def supports_functions?(_model_id)
|
58
|
+
false
|
59
|
+
end
|
60
|
+
|
61
|
+
# Determines if the model supports JSON mode
|
62
|
+
def supports_json_mode?(_model_id)
|
63
|
+
true
|
64
|
+
end
|
65
|
+
|
66
|
+
# Formats the model ID into a human-readable display name
|
67
|
+
# @param model_id [String] the model identifier
|
68
|
+
# @return [String] the formatted display name
|
69
|
+
def format_display_name(model_id)
|
70
|
+
case model_id
|
71
|
+
when 'sonar' then 'Sonar'
|
72
|
+
when 'sonar-pro' then 'Sonar Pro'
|
73
|
+
when 'sonar-reasoning' then 'Sonar Reasoning'
|
74
|
+
when 'sonar-reasoning-pro' then 'Sonar Reasoning Pro'
|
75
|
+
when 'sonar-deep-research' then 'Sonar Deep Research'
|
76
|
+
else
|
77
|
+
model_id.split('-')
|
78
|
+
.map(&:capitalize)
|
79
|
+
.join(' ')
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
83
|
+
# Returns the model type
|
84
|
+
# @param model_id [String] the model identifier
|
85
|
+
# @return [String] the model type (e.g., 'chat')
|
86
|
+
def model_type(_model_id)
|
87
|
+
'chat'
|
88
|
+
end
|
89
|
+
|
90
|
+
# Returns the model family
|
91
|
+
# @param model_id [String] the model identifier
|
92
|
+
# @return [Symbol] the model family
|
93
|
+
def model_family(model_id)
|
94
|
+
case model_id
|
95
|
+
when 'sonar' then :sonar
|
96
|
+
when 'sonar-pro' then :sonar_pro
|
97
|
+
when 'sonar-reasoning' then :sonar_reasoning
|
98
|
+
when 'sonar-reasoning-pro' then :sonar_reasoning_pro
|
99
|
+
when 'sonar-deep-research' then :sonar_deep_research
|
100
|
+
else :unknown
|
101
|
+
end
|
102
|
+
end
|
103
|
+
|
104
|
+
def modalities_for(_model_id)
|
105
|
+
{
|
106
|
+
input: ['text'],
|
107
|
+
output: ['text']
|
108
|
+
}
|
109
|
+
end
|
110
|
+
|
111
|
+
def capabilities_for(model_id)
|
112
|
+
capabilities = %w[streaming json_mode]
|
113
|
+
capabilities << 'vision' if supports_vision?(model_id)
|
114
|
+
capabilities
|
115
|
+
end
|
116
|
+
|
117
|
+
def pricing_for(model_id)
|
118
|
+
family = model_family(model_id)
|
119
|
+
prices = PRICES.fetch(family, { input: 1.0, output: 1.0 })
|
120
|
+
|
121
|
+
standard_pricing = {
|
122
|
+
input_per_million: prices[:input],
|
123
|
+
output_per_million: prices[:output]
|
124
|
+
}
|
125
|
+
|
126
|
+
# Add special pricing if available
|
127
|
+
standard_pricing[:citation_per_million] = prices[:citation] if prices[:citation]
|
128
|
+
standard_pricing[:reasoning_per_million] = prices[:reasoning] if prices[:reasoning]
|
129
|
+
standard_pricing[:search_per_thousand] = prices[:search_queries] if prices[:search_queries]
|
130
|
+
|
131
|
+
{
|
132
|
+
text_tokens: {
|
133
|
+
standard: standard_pricing
|
134
|
+
}
|
135
|
+
}
|
136
|
+
end
|
137
|
+
|
138
|
+
# Pricing information for Perplexity models (USD per 1M tokens)
|
139
|
+
PRICES = {
|
140
|
+
sonar: {
|
141
|
+
input: 1.0,
|
142
|
+
output: 1.0
|
143
|
+
},
|
144
|
+
sonar_pro: {
|
145
|
+
input: 3.0,
|
146
|
+
output: 15.0
|
147
|
+
},
|
148
|
+
sonar_reasoning: {
|
149
|
+
input: 1.0,
|
150
|
+
output: 5.0
|
151
|
+
},
|
152
|
+
sonar_reasoning_pro: {
|
153
|
+
input: 2.0,
|
154
|
+
output: 8.0
|
155
|
+
},
|
156
|
+
sonar_deep_research: {
|
157
|
+
input: 2.0,
|
158
|
+
output: 8.0,
|
159
|
+
citation: 2.0,
|
160
|
+
reasoning: 3.0,
|
161
|
+
search_queries: 5.0
|
162
|
+
}
|
163
|
+
}.freeze
|
164
|
+
end
|
165
|
+
end
|
166
|
+
end
|
167
|
+
end
|
@@ -0,0 +1,17 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
module Perplexity
|
6
|
+
# Chat formatting for Perplexity provider
|
7
|
+
module Chat
|
8
|
+
module_function
|
9
|
+
|
10
|
+
def format_role(role)
|
11
|
+
# Perplexity doesn't use the new OpenAI convention for system prompts
|
12
|
+
role.to_s
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|
@@ -0,0 +1,42 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
module Perplexity
|
6
|
+
# Models methods of the Perplexity API integration
|
7
|
+
module Models
|
8
|
+
def list_models(**)
|
9
|
+
slug = 'perplexity'
|
10
|
+
capabilities = Perplexity::Capabilities
|
11
|
+
parse_list_models_response(nil, slug, capabilities)
|
12
|
+
end
|
13
|
+
|
14
|
+
def parse_list_models_response(_response, slug, capabilities)
|
15
|
+
[
|
16
|
+
create_model_info('sonar', slug, capabilities),
|
17
|
+
create_model_info('sonar-pro', slug, capabilities),
|
18
|
+
create_model_info('sonar-reasoning', slug, capabilities),
|
19
|
+
create_model_info('sonar-reasoning-pro', slug, capabilities),
|
20
|
+
create_model_info('sonar-deep-research', slug, capabilities)
|
21
|
+
]
|
22
|
+
end
|
23
|
+
|
24
|
+
def create_model_info(id, slug, capabilities)
|
25
|
+
Model::Info.new(
|
26
|
+
id: id,
|
27
|
+
name: capabilities.format_display_name(id),
|
28
|
+
provider: slug,
|
29
|
+
family: capabilities.model_family(id).to_s,
|
30
|
+
created_at: Time.now,
|
31
|
+
context_window: capabilities.context_window_for(id),
|
32
|
+
max_output_tokens: capabilities.max_tokens_for(id),
|
33
|
+
modalities: capabilities.modalities_for(id),
|
34
|
+
capabilities: capabilities.capabilities_for(id),
|
35
|
+
pricing: capabilities.pricing_for(id),
|
36
|
+
metadata: {}
|
37
|
+
)
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
@@ -0,0 +1,57 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
# Perplexity API integration.
|
6
|
+
module Perplexity
|
7
|
+
extend OpenAI
|
8
|
+
extend Perplexity::Chat
|
9
|
+
extend Perplexity::Models
|
10
|
+
|
11
|
+
module_function
|
12
|
+
|
13
|
+
def api_base(_config)
|
14
|
+
'https://api.perplexity.ai'
|
15
|
+
end
|
16
|
+
|
17
|
+
def headers(config)
|
18
|
+
{
|
19
|
+
'Authorization' => "Bearer #{config.perplexity_api_key}",
|
20
|
+
'Content-Type' => 'application/json'
|
21
|
+
}
|
22
|
+
end
|
23
|
+
|
24
|
+
def capabilities
|
25
|
+
Perplexity::Capabilities
|
26
|
+
end
|
27
|
+
|
28
|
+
def slug
|
29
|
+
'perplexity'
|
30
|
+
end
|
31
|
+
|
32
|
+
def configuration_requirements
|
33
|
+
%i[perplexity_api_key]
|
34
|
+
end
|
35
|
+
|
36
|
+
def parse_error(response)
|
37
|
+
body = response.body
|
38
|
+
return if body.empty?
|
39
|
+
|
40
|
+
# If response is HTML (Perplexity returns HTML for auth errors)
|
41
|
+
if body.include?('<html>') && body.include?('<title>')
|
42
|
+
# Extract title content
|
43
|
+
title_match = body.match(%r{<title>(.+?)</title>})
|
44
|
+
if title_match
|
45
|
+
# Clean up the title - remove status code if present
|
46
|
+
message = title_match[1]
|
47
|
+
message = message.sub(/^\d+\s+/, '') # Remove leading digits and space
|
48
|
+
return message
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
# Fall back to parent's implementation
|
53
|
+
super
|
54
|
+
end
|
55
|
+
end
|
56
|
+
end
|
57
|
+
end
|
data/lib/ruby_llm/version.rb
CHANGED
data/lib/ruby_llm.rb
CHANGED
@@ -16,9 +16,11 @@ loader.inflector.inflect(
|
|
16
16
|
'openai' => 'OpenAI',
|
17
17
|
'api' => 'API',
|
18
18
|
'deepseek' => 'DeepSeek',
|
19
|
+
'perplexity' => 'Perplexity',
|
19
20
|
'bedrock' => 'Bedrock',
|
20
21
|
'openrouter' => 'OpenRouter',
|
21
22
|
'gpustack' => 'GPUStack',
|
23
|
+
'mistral' => 'Mistral',
|
22
24
|
'pdf' => 'PDF'
|
23
25
|
)
|
24
26
|
loader.ignore("#{__dir__}/tasks")
|
@@ -78,14 +80,16 @@ module RubyLLM
|
|
78
80
|
end
|
79
81
|
end
|
80
82
|
|
81
|
-
RubyLLM::Provider.register :openai, RubyLLM::Providers::OpenAI
|
82
83
|
RubyLLM::Provider.register :anthropic, RubyLLM::Providers::Anthropic
|
83
|
-
RubyLLM::Provider.register :gemini, RubyLLM::Providers::Gemini
|
84
|
-
RubyLLM::Provider.register :deepseek, RubyLLM::Providers::DeepSeek
|
85
84
|
RubyLLM::Provider.register :bedrock, RubyLLM::Providers::Bedrock
|
86
|
-
RubyLLM::Provider.register :
|
87
|
-
RubyLLM::Provider.register :
|
85
|
+
RubyLLM::Provider.register :deepseek, RubyLLM::Providers::DeepSeek
|
86
|
+
RubyLLM::Provider.register :gemini, RubyLLM::Providers::Gemini
|
88
87
|
RubyLLM::Provider.register :gpustack, RubyLLM::Providers::GPUStack
|
88
|
+
RubyLLM::Provider.register :mistral, RubyLLM::Providers::Mistral
|
89
|
+
RubyLLM::Provider.register :ollama, RubyLLM::Providers::Ollama
|
90
|
+
RubyLLM::Provider.register :openai, RubyLLM::Providers::OpenAI
|
91
|
+
RubyLLM::Provider.register :openrouter, RubyLLM::Providers::OpenRouter
|
92
|
+
RubyLLM::Provider.register :perplexity, RubyLLM::Providers::Perplexity
|
89
93
|
|
90
94
|
if defined?(Rails::Railtie)
|
91
95
|
require 'ruby_llm/railtie'
|
data/lib/tasks/models_docs.rake
CHANGED
@@ -6,14 +6,14 @@ require 'fileutils'
|
|
6
6
|
namespace :models do
|
7
7
|
desc 'Generate available models documentation'
|
8
8
|
task :docs do
|
9
|
-
FileUtils.mkdir_p('docs
|
9
|
+
FileUtils.mkdir_p('docs') # ensure output directory exists
|
10
10
|
|
11
11
|
# Generate markdown content
|
12
12
|
output = generate_models_markdown
|
13
13
|
|
14
14
|
# Write the output
|
15
|
-
File.write('docs/
|
16
|
-
puts 'Generated docs/
|
15
|
+
File.write('docs/available-models.md', output)
|
16
|
+
puts 'Generated docs/available-models.md'
|
17
17
|
end
|
18
18
|
end
|
19
19
|
|
@@ -22,9 +22,8 @@ def generate_models_markdown
|
|
22
22
|
---
|
23
23
|
layout: default
|
24
24
|
title: Available Models
|
25
|
-
|
26
|
-
|
27
|
-
permalink: /guides/available-models
|
25
|
+
nav_order: 5
|
26
|
+
permalink: /available-models
|
28
27
|
description: Browse hundreds of AI models from every major provider. Always up-to-date, automatically generated.
|
29
28
|
---
|
30
29
|
|
@@ -22,7 +22,9 @@ def configure_from_env
|
|
22
22
|
config.anthropic_api_key = ENV.fetch('ANTHROPIC_API_KEY', nil)
|
23
23
|
config.gemini_api_key = ENV.fetch('GEMINI_API_KEY', nil)
|
24
24
|
config.deepseek_api_key = ENV.fetch('DEEPSEEK_API_KEY', nil)
|
25
|
+
config.perplexity_api_key = ENV.fetch('PERPLEXITY_API_KEY', nil)
|
25
26
|
config.openrouter_api_key = ENV.fetch('OPENROUTER_API_KEY', nil)
|
27
|
+
config.mistral_api_key = ENV.fetch('MISTRAL_API_KEY', nil)
|
26
28
|
configure_bedrock(config)
|
27
29
|
config.request_timeout = 30
|
28
30
|
end
|