ruby_llm_community 0.0.6 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +20 -3
- data/lib/generators/ruby_llm/chat_ui/chat_ui_generator.rb +127 -0
- data/lib/generators/ruby_llm/chat_ui/templates/controllers/chats_controller.rb.tt +39 -0
- data/lib/generators/ruby_llm/chat_ui/templates/controllers/messages_controller.rb.tt +24 -0
- data/lib/generators/ruby_llm/chat_ui/templates/controllers/models_controller.rb.tt +14 -0
- data/lib/generators/ruby_llm/chat_ui/templates/jobs/chat_response_job.rb.tt +12 -0
- data/lib/generators/ruby_llm/chat_ui/templates/views/chats/_chat.html.erb.tt +16 -0
- data/lib/generators/ruby_llm/chat_ui/templates/views/chats/_form.html.erb.tt +29 -0
- data/lib/generators/ruby_llm/chat_ui/templates/views/chats/index.html.erb.tt +16 -0
- data/lib/generators/ruby_llm/chat_ui/templates/views/chats/new.html.erb.tt +11 -0
- data/lib/generators/ruby_llm/chat_ui/templates/views/chats/show.html.erb.tt +23 -0
- data/lib/generators/ruby_llm/chat_ui/templates/views/messages/_form.html.erb.tt +21 -0
- data/lib/generators/ruby_llm/chat_ui/templates/views/messages/_message.html.erb.tt +10 -0
- data/lib/generators/ruby_llm/chat_ui/templates/views/messages/create.turbo_stream.erb.tt +9 -0
- data/lib/generators/ruby_llm/chat_ui/templates/views/models/_model.html.erb.tt +16 -0
- data/lib/generators/ruby_llm/chat_ui/templates/views/models/index.html.erb.tt +30 -0
- data/lib/generators/ruby_llm/chat_ui/templates/views/models/show.html.erb.tt +18 -0
- data/lib/generators/ruby_llm/install/install_generator.rb +227 -0
- data/lib/generators/ruby_llm/install/templates/chat_model.rb.tt +2 -2
- data/lib/generators/ruby_llm/install/templates/create_chats_migration.rb.tt +4 -4
- data/lib/generators/ruby_llm/install/templates/create_messages_migration.rb.tt +8 -7
- data/lib/generators/ruby_llm/install/templates/create_models_migration.rb.tt +43 -0
- data/lib/generators/ruby_llm/install/templates/create_tool_calls_migration.rb.tt +6 -5
- data/lib/generators/ruby_llm/install/templates/initializer.rb.tt +10 -4
- data/lib/generators/ruby_llm/install/templates/message_model.rb.tt +4 -3
- data/lib/generators/ruby_llm/install/templates/model_model.rb.tt +3 -0
- data/lib/generators/ruby_llm/install/templates/tool_call_model.rb.tt +2 -2
- data/lib/generators/ruby_llm/upgrade_to_v1_7/templates/migration.rb.tt +137 -0
- data/lib/generators/ruby_llm/upgrade_to_v1_7/upgrade_to_v1_7_generator.rb +170 -0
- data/lib/ruby_llm/active_record/acts_as.rb +112 -332
- data/lib/ruby_llm/active_record/acts_as_legacy.rb +403 -0
- data/lib/ruby_llm/active_record/chat_methods.rb +336 -0
- data/lib/ruby_llm/active_record/message_methods.rb +72 -0
- data/lib/ruby_llm/active_record/model_methods.rb +84 -0
- data/lib/ruby_llm/aliases.json +130 -11
- data/lib/ruby_llm/aliases.rb +7 -25
- data/lib/ruby_llm/attachment.rb +22 -0
- data/lib/ruby_llm/chat.rb +10 -17
- data/lib/ruby_llm/configuration.rb +11 -12
- data/lib/ruby_llm/connection.rb +4 -4
- data/lib/ruby_llm/connection_multipart.rb +19 -0
- data/lib/ruby_llm/content.rb +5 -2
- data/lib/ruby_llm/embedding.rb +1 -2
- data/lib/ruby_llm/error.rb +0 -8
- data/lib/ruby_llm/image.rb +23 -8
- data/lib/ruby_llm/image_attachment.rb +30 -0
- data/lib/ruby_llm/message.rb +7 -7
- data/lib/ruby_llm/model/info.rb +12 -10
- data/lib/ruby_llm/model/pricing.rb +0 -3
- data/lib/ruby_llm/model/pricing_category.rb +0 -2
- data/lib/ruby_llm/model/pricing_tier.rb +0 -1
- data/lib/ruby_llm/models.json +4705 -2144
- data/lib/ruby_llm/models.rb +56 -35
- data/lib/ruby_llm/provider.rb +14 -12
- data/lib/ruby_llm/providers/anthropic/capabilities.rb +1 -46
- data/lib/ruby_llm/providers/anthropic/chat.rb +2 -2
- data/lib/ruby_llm/providers/anthropic/media.rb +1 -2
- data/lib/ruby_llm/providers/anthropic/tools.rb +1 -2
- data/lib/ruby_llm/providers/anthropic.rb +1 -2
- data/lib/ruby_llm/providers/bedrock/chat.rb +2 -4
- data/lib/ruby_llm/providers/bedrock/media.rb +0 -1
- data/lib/ruby_llm/providers/bedrock/models.rb +19 -3
- data/lib/ruby_llm/providers/bedrock/streaming/base.rb +0 -12
- data/lib/ruby_llm/providers/bedrock/streaming/content_extraction.rb +0 -7
- data/lib/ruby_llm/providers/bedrock/streaming/message_processing.rb +0 -12
- data/lib/ruby_llm/providers/bedrock/streaming/payload_processing.rb +0 -12
- data/lib/ruby_llm/providers/bedrock/streaming/prelude_handling.rb +0 -13
- data/lib/ruby_llm/providers/bedrock/streaming.rb +0 -18
- data/lib/ruby_llm/providers/bedrock.rb +1 -2
- data/lib/ruby_llm/providers/deepseek/capabilities.rb +1 -2
- data/lib/ruby_llm/providers/deepseek/chat.rb +0 -1
- data/lib/ruby_llm/providers/gemini/capabilities.rb +28 -100
- data/lib/ruby_llm/providers/gemini/chat.rb +57 -29
- data/lib/ruby_llm/providers/gemini/embeddings.rb +0 -2
- data/lib/ruby_llm/providers/gemini/images.rb +1 -2
- data/lib/ruby_llm/providers/gemini/media.rb +1 -2
- data/lib/ruby_llm/providers/gemini/models.rb +1 -2
- data/lib/ruby_llm/providers/gemini/streaming.rb +15 -1
- data/lib/ruby_llm/providers/gemini/tools.rb +0 -5
- data/lib/ruby_llm/providers/gpustack/chat.rb +11 -1
- data/lib/ruby_llm/providers/gpustack/media.rb +45 -0
- data/lib/ruby_llm/providers/gpustack/models.rb +44 -9
- data/lib/ruby_llm/providers/gpustack.rb +1 -0
- data/lib/ruby_llm/providers/mistral/capabilities.rb +2 -10
- data/lib/ruby_llm/providers/mistral/chat.rb +0 -2
- data/lib/ruby_llm/providers/mistral/embeddings.rb +0 -3
- data/lib/ruby_llm/providers/mistral/models.rb +0 -1
- data/lib/ruby_llm/providers/ollama/chat.rb +0 -1
- data/lib/ruby_llm/providers/ollama/media.rb +2 -7
- data/lib/ruby_llm/providers/ollama/models.rb +36 -0
- data/lib/ruby_llm/providers/ollama.rb +1 -0
- data/lib/ruby_llm/providers/openai/capabilities.rb +3 -16
- data/lib/ruby_llm/providers/openai/chat.rb +1 -3
- data/lib/ruby_llm/providers/openai/embeddings.rb +0 -3
- data/lib/ruby_llm/providers/openai/images.rb +73 -3
- data/lib/ruby_llm/providers/openai/media.rb +4 -5
- data/lib/ruby_llm/providers/openai/response.rb +121 -29
- data/lib/ruby_llm/providers/openai/response_media.rb +3 -3
- data/lib/ruby_llm/providers/openai/streaming.rb +110 -47
- data/lib/ruby_llm/providers/openai/tools.rb +12 -7
- data/lib/ruby_llm/providers/openai.rb +1 -3
- data/lib/ruby_llm/providers/openai_base.rb +2 -2
- data/lib/ruby_llm/providers/openrouter/models.rb +1 -16
- data/lib/ruby_llm/providers/perplexity/capabilities.rb +0 -1
- data/lib/ruby_llm/providers/perplexity/chat.rb +0 -1
- data/lib/ruby_llm/providers/perplexity.rb +1 -5
- data/lib/ruby_llm/providers/vertexai/chat.rb +14 -0
- data/lib/ruby_llm/providers/vertexai/embeddings.rb +32 -0
- data/lib/ruby_llm/providers/vertexai/models.rb +130 -0
- data/lib/ruby_llm/providers/vertexai/streaming.rb +14 -0
- data/lib/ruby_llm/providers/vertexai.rb +55 -0
- data/lib/ruby_llm/providers/xai/capabilities.rb +166 -0
- data/lib/ruby_llm/providers/xai/chat.rb +15 -0
- data/lib/ruby_llm/providers/xai/models.rb +48 -0
- data/lib/ruby_llm/providers/xai.rb +46 -0
- data/lib/ruby_llm/railtie.rb +20 -4
- data/lib/ruby_llm/stream_accumulator.rb +68 -10
- data/lib/ruby_llm/streaming.rb +16 -25
- data/lib/ruby_llm/tool.rb +2 -19
- data/lib/ruby_llm/tool_call.rb +0 -9
- data/lib/ruby_llm/utils.rb +5 -9
- data/lib/ruby_llm/version.rb +1 -1
- data/lib/ruby_llm_community.rb +8 -5
- data/lib/tasks/models.rake +549 -0
- data/lib/tasks/release.rake +37 -2
- data/lib/tasks/ruby_llm.rake +15 -0
- data/lib/tasks/vcr.rake +2 -9
- metadata +44 -6
- data/lib/generators/ruby_llm/install/templates/INSTALL_INFO.md.tt +0 -108
- data/lib/generators/ruby_llm/install_generator.rb +0 -121
- data/lib/tasks/aliases.rake +0 -235
- data/lib/tasks/models_docs.rake +0 -224
- data/lib/tasks/models_update.rake +0 -108
data/lib/tasks/models_docs.rake
DELETED
@@ -1,224 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require 'dotenv/load'
|
4
|
-
require 'fileutils'
|
5
|
-
|
6
|
-
namespace :models do
|
7
|
-
desc 'Generate available models documentation'
|
8
|
-
task :docs do
|
9
|
-
FileUtils.mkdir_p('docs') # ensure output directory exists
|
10
|
-
|
11
|
-
# Generate markdown content
|
12
|
-
output = generate_models_markdown
|
13
|
-
|
14
|
-
# Write the output
|
15
|
-
File.write('docs/_reference/available-models.md', output)
|
16
|
-
puts 'Generated docs/_reference/available-models.md'
|
17
|
-
end
|
18
|
-
end
|
19
|
-
|
20
|
-
def generate_models_markdown
|
21
|
-
<<~MARKDOWN
|
22
|
-
---
|
23
|
-
layout: default
|
24
|
-
title: Available Models
|
25
|
-
nav_order: 1
|
26
|
-
description: Browse hundreds of AI models from every major provider. Always up-to-date, automatically generated.
|
27
|
-
redirect_from:
|
28
|
-
- /guides/available-models
|
29
|
-
---
|
30
|
-
|
31
|
-
# {{ page.title }}
|
32
|
-
{: .no_toc }
|
33
|
-
|
34
|
-
{{ page.description }}
|
35
|
-
{: .fs-6 .fw-300 }
|
36
|
-
|
37
|
-
## Table of contents
|
38
|
-
{: .no_toc .text-delta }
|
39
|
-
|
40
|
-
1. TOC
|
41
|
-
{:toc}
|
42
|
-
|
43
|
-
---
|
44
|
-
|
45
|
-
After reading this guide, you will know:
|
46
|
-
|
47
|
-
* How RubyLLM's model registry works and where data comes from
|
48
|
-
* How to find models by provider, capability, or purpose
|
49
|
-
* What information is available for each model
|
50
|
-
* How to use model aliases for simpler configuration
|
51
|
-
|
52
|
-
## How Model Data Works
|
53
|
-
|
54
|
-
RubyLLM's model registry combines data from multiple sources:
|
55
|
-
|
56
|
-
- **OpenAI, Anthropic, DeepSeek, Gemini**: Data from [Parsera](https://api.parsera.org/v1/llm-specs)
|
57
|
-
- **OpenRouter**: Direct from OpenRouter's API
|
58
|
-
- **Other providers**: Defined in `capabilities.rb` files
|
59
|
-
|
60
|
-
## Contributing Model Updates
|
61
|
-
|
62
|
-
**For major providers** (OpenAI, Anthropic, DeepSeek, Gemini): File issues with [Parsera](https://github.com/parsera-labs/api-llm-specs/issues) for public model data corrections.
|
63
|
-
|
64
|
-
**For other providers**: Edit `lib/ruby_llm/providers/<provider>/capabilities.rb` then run `rake models:update`.
|
65
|
-
|
66
|
-
See the [Contributing Guide](https://github.com/crmne/ruby_llm/blob/main/CONTRIBUTING.md) for details.
|
67
|
-
|
68
|
-
## Last Updated
|
69
|
-
{: .d-inline-block }
|
70
|
-
|
71
|
-
#{Time.now.utc.strftime('%Y-%m-%d')}
|
72
|
-
{: .label .label-green }
|
73
|
-
|
74
|
-
## Models by Provider
|
75
|
-
|
76
|
-
#{generate_provider_sections}
|
77
|
-
|
78
|
-
## Models by Capability
|
79
|
-
|
80
|
-
#{generate_capability_sections}
|
81
|
-
|
82
|
-
## Models by Modality
|
83
|
-
|
84
|
-
#{generate_modality_sections}
|
85
|
-
MARKDOWN
|
86
|
-
end
|
87
|
-
|
88
|
-
def generate_provider_sections
|
89
|
-
RubyLLM::Provider.providers.filter_map do |provider, provider_class|
|
90
|
-
models = RubyLLM.models.by_provider(provider)
|
91
|
-
next if models.none?
|
92
|
-
|
93
|
-
<<~PROVIDER
|
94
|
-
### #{provider_class.name} (#{models.count})
|
95
|
-
|
96
|
-
#{models_table(models)}
|
97
|
-
PROVIDER
|
98
|
-
end.join("\n\n")
|
99
|
-
end
|
100
|
-
|
101
|
-
def generate_capability_sections
|
102
|
-
capabilities = {
|
103
|
-
'Function Calling' => RubyLLM.models.select(&:function_calling?),
|
104
|
-
'Structured Output' => RubyLLM.models.select(&:structured_output?),
|
105
|
-
'Streaming' => RubyLLM.models.select { |m| m.capabilities.include?('streaming') },
|
106
|
-
# 'Reasoning' => RubyLLM.models.select { |m| m.capabilities.include?('reasoning') },
|
107
|
-
'Batch Processing' => RubyLLM.models.select { |m| m.capabilities.include?('batch') }
|
108
|
-
}
|
109
|
-
|
110
|
-
capabilities.filter_map do |capability, models|
|
111
|
-
next if models.none?
|
112
|
-
|
113
|
-
<<~CAPABILITY
|
114
|
-
### #{capability} (#{models.count})
|
115
|
-
|
116
|
-
#{models_table(models)}
|
117
|
-
CAPABILITY
|
118
|
-
end.join("\n\n")
|
119
|
-
end
|
120
|
-
|
121
|
-
def generate_modality_sections # rubocop:disable Metrics/PerceivedComplexity
|
122
|
-
sections = []
|
123
|
-
|
124
|
-
# Models that support vision/images
|
125
|
-
vision_models = RubyLLM.models.select { |m| (m.modalities.input || []).include?('image') }
|
126
|
-
if vision_models.any?
|
127
|
-
sections << <<~SECTION
|
128
|
-
### Vision Models (#{vision_models.count})
|
129
|
-
|
130
|
-
Models that can process images:
|
131
|
-
|
132
|
-
#{models_table(vision_models)}
|
133
|
-
SECTION
|
134
|
-
end
|
135
|
-
|
136
|
-
# Models that support audio
|
137
|
-
audio_models = RubyLLM.models.select { |m| (m.modalities.input || []).include?('audio') }
|
138
|
-
if audio_models.any?
|
139
|
-
sections << <<~SECTION
|
140
|
-
### Audio Input Models (#{audio_models.count})
|
141
|
-
|
142
|
-
Models that can process audio:
|
143
|
-
|
144
|
-
#{models_table(audio_models)}
|
145
|
-
SECTION
|
146
|
-
end
|
147
|
-
|
148
|
-
# Models that support PDFs
|
149
|
-
pdf_models = RubyLLM.models.select { |m| (m.modalities.input || []).include?('pdf') }
|
150
|
-
if pdf_models.any?
|
151
|
-
sections << <<~SECTION
|
152
|
-
### PDF Models (#{pdf_models.count})
|
153
|
-
|
154
|
-
Models that can process PDF documents:
|
155
|
-
|
156
|
-
#{models_table(pdf_models)}
|
157
|
-
SECTION
|
158
|
-
end
|
159
|
-
|
160
|
-
# Models for embeddings
|
161
|
-
embedding_models = RubyLLM.models.select { |m| (m.modalities.output || []).include?('embeddings') }
|
162
|
-
if embedding_models.any?
|
163
|
-
sections << <<~SECTION
|
164
|
-
### Embedding Models (#{embedding_models.count})
|
165
|
-
|
166
|
-
Models that generate embeddings:
|
167
|
-
|
168
|
-
#{models_table(embedding_models)}
|
169
|
-
SECTION
|
170
|
-
end
|
171
|
-
|
172
|
-
sections.join("\n\n")
|
173
|
-
end
|
174
|
-
|
175
|
-
def models_table(models)
|
176
|
-
return '*No models found*' if models.none?
|
177
|
-
|
178
|
-
headers = ['Model', 'Provider', 'Context', 'Max Output', 'Standard Pricing (per 1M tokens)']
|
179
|
-
alignment = [':--', ':--', '--:', '--:', ':--']
|
180
|
-
|
181
|
-
rows = models.sort_by { |m| [m.provider, m.name] }.map do |model|
|
182
|
-
# Format pricing information
|
183
|
-
pricing = standard_pricing_display(model)
|
184
|
-
|
185
|
-
[
|
186
|
-
model.id,
|
187
|
-
model.provider,
|
188
|
-
model.context_window || '-',
|
189
|
-
model.max_output_tokens || '-',
|
190
|
-
pricing
|
191
|
-
]
|
192
|
-
end
|
193
|
-
|
194
|
-
table = []
|
195
|
-
table << "| #{headers.join(' | ')} |"
|
196
|
-
table << "| #{alignment.join(' | ')} |"
|
197
|
-
|
198
|
-
rows.each do |row|
|
199
|
-
table << "| #{row.join(' | ')} |"
|
200
|
-
end
|
201
|
-
|
202
|
-
table.join("\n")
|
203
|
-
end
|
204
|
-
|
205
|
-
def standard_pricing_display(model)
|
206
|
-
# Access pricing data using to_h to get the raw hash
|
207
|
-
pricing_data = model.pricing.to_h[:text_tokens]&.dig(:standard) || {}
|
208
|
-
|
209
|
-
if pricing_data.any?
|
210
|
-
parts = []
|
211
|
-
|
212
|
-
parts << "In: $#{format('%.2f', pricing_data[:input_per_million])}" if pricing_data[:input_per_million]
|
213
|
-
|
214
|
-
parts << "Out: $#{format('%.2f', pricing_data[:output_per_million])}" if pricing_data[:output_per_million]
|
215
|
-
|
216
|
-
if pricing_data[:cached_input_per_million]
|
217
|
-
parts << "Cache: $#{format('%.2f', pricing_data[:cached_input_per_million])}"
|
218
|
-
end
|
219
|
-
|
220
|
-
return parts.join(', ') if parts.any?
|
221
|
-
end
|
222
|
-
|
223
|
-
'-'
|
224
|
-
end
|
@@ -1,108 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require 'dotenv/load'
|
4
|
-
require 'ruby_llm'
|
5
|
-
require 'json-schema'
|
6
|
-
|
7
|
-
task default: ['models:update']
|
8
|
-
|
9
|
-
namespace :models do
|
10
|
-
desc 'Update available models from providers (API keys needed)'
|
11
|
-
task :update do
|
12
|
-
puts 'Configuring RubyLLM...'
|
13
|
-
configure_from_env
|
14
|
-
|
15
|
-
refresh_models
|
16
|
-
display_model_stats
|
17
|
-
end
|
18
|
-
end
|
19
|
-
|
20
|
-
def configure_from_env
|
21
|
-
RubyLLM.configure do |config|
|
22
|
-
config.openai_api_key = ENV.fetch('OPENAI_API_KEY', nil)
|
23
|
-
config.anthropic_api_key = ENV.fetch('ANTHROPIC_API_KEY', nil)
|
24
|
-
config.gemini_api_key = ENV.fetch('GEMINI_API_KEY', nil)
|
25
|
-
config.deepseek_api_key = ENV.fetch('DEEPSEEK_API_KEY', nil)
|
26
|
-
config.perplexity_api_key = ENV.fetch('PERPLEXITY_API_KEY', nil)
|
27
|
-
config.openrouter_api_key = ENV.fetch('OPENROUTER_API_KEY', nil)
|
28
|
-
config.mistral_api_key = ENV.fetch('MISTRAL_API_KEY', nil)
|
29
|
-
configure_bedrock(config)
|
30
|
-
config.request_timeout = 30
|
31
|
-
end
|
32
|
-
end
|
33
|
-
|
34
|
-
def configure_bedrock(config)
|
35
|
-
config.bedrock_api_key = ENV.fetch('AWS_ACCESS_KEY_ID', nil)
|
36
|
-
config.bedrock_secret_key = ENV.fetch('AWS_SECRET_ACCESS_KEY', nil)
|
37
|
-
config.bedrock_region = ENV.fetch('AWS_REGION', nil)
|
38
|
-
config.bedrock_session_token = ENV.fetch('AWS_SESSION_TOKEN', nil)
|
39
|
-
end
|
40
|
-
|
41
|
-
def refresh_models
|
42
|
-
initial_count = RubyLLM.models.all.size
|
43
|
-
puts "Refreshing models (#{initial_count} cached)..."
|
44
|
-
|
45
|
-
models = RubyLLM.models.refresh!
|
46
|
-
|
47
|
-
if models.all.empty? && initial_count.zero?
|
48
|
-
puts 'Error: Failed to fetch models.'
|
49
|
-
exit(1)
|
50
|
-
elsif models.all.size == initial_count && initial_count.positive?
|
51
|
-
puts 'Warning: Model list unchanged.'
|
52
|
-
else
|
53
|
-
puts 'Validating models...'
|
54
|
-
validate_models!(models)
|
55
|
-
|
56
|
-
puts "Saving models.json (#{models.all.size} models)"
|
57
|
-
models.save_models
|
58
|
-
end
|
59
|
-
|
60
|
-
@models = models
|
61
|
-
end
|
62
|
-
|
63
|
-
def validate_models!(models)
|
64
|
-
schema_path = File.expand_path('../ruby_llm/models_schema.json', __dir__)
|
65
|
-
models_data = models.all.map(&:to_h)
|
66
|
-
|
67
|
-
validation_errors = JSON::Validator.fully_validate(schema_path, models_data)
|
68
|
-
|
69
|
-
unless validation_errors.empty?
|
70
|
-
# Save failed models for inspection
|
71
|
-
failed_path = File.expand_path('../ruby_llm/models.failed.json', __dir__)
|
72
|
-
File.write(failed_path, JSON.pretty_generate(models_data))
|
73
|
-
|
74
|
-
puts 'ERROR: Models validation failed:'
|
75
|
-
puts "\nValidation errors:"
|
76
|
-
validation_errors.first(10).each { |error| puts " - #{error}" }
|
77
|
-
puts " ... and #{validation_errors.size - 10} more errors" if validation_errors.size > 10
|
78
|
-
puts "-> Failed models saved to: #{failed_path}"
|
79
|
-
exit(1)
|
80
|
-
end
|
81
|
-
|
82
|
-
puts '✓ Models validation passed'
|
83
|
-
end
|
84
|
-
|
85
|
-
def display_model_stats
|
86
|
-
puts "\nModel count:"
|
87
|
-
provider_counts = @models.all.group_by(&:provider).transform_values(&:count)
|
88
|
-
|
89
|
-
RubyLLM::Provider.providers.each do |sym, provider_class|
|
90
|
-
name = provider_class.name
|
91
|
-
count = provider_counts[sym.to_s] || 0
|
92
|
-
status = status(sym)
|
93
|
-
puts " #{name}: #{count} models #{status}"
|
94
|
-
end
|
95
|
-
|
96
|
-
puts 'Refresh complete.'
|
97
|
-
end
|
98
|
-
|
99
|
-
def status(provider_sym)
|
100
|
-
provider_class = RubyLLM::Provider.providers[provider_sym]
|
101
|
-
if provider_class.local?
|
102
|
-
' (LOCAL - SKIP)'
|
103
|
-
elsif provider_class.configured?(RubyLLM.config)
|
104
|
-
' (OK)'
|
105
|
-
else
|
106
|
-
' (NOT CONFIGURED)'
|
107
|
-
end
|
108
|
-
end
|