ruby_llm 1.0.1 → 1.1.0rc2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +28 -12
- data/lib/ruby_llm/active_record/acts_as.rb +46 -7
- data/lib/ruby_llm/aliases.json +65 -0
- data/lib/ruby_llm/aliases.rb +56 -0
- data/lib/ruby_llm/chat.rb +12 -9
- data/lib/ruby_llm/configuration.rb +4 -0
- data/lib/ruby_llm/error.rb +15 -4
- data/lib/ruby_llm/models.json +1163 -303
- data/lib/ruby_llm/models.rb +40 -11
- data/lib/ruby_llm/provider.rb +32 -39
- data/lib/ruby_llm/providers/anthropic/capabilities.rb +8 -9
- data/lib/ruby_llm/providers/anthropic/chat.rb +31 -4
- data/lib/ruby_llm/providers/anthropic/streaming.rb +12 -6
- data/lib/ruby_llm/providers/anthropic.rb +4 -0
- data/lib/ruby_llm/providers/bedrock/capabilities.rb +168 -0
- data/lib/ruby_llm/providers/bedrock/chat.rb +108 -0
- data/lib/ruby_llm/providers/bedrock/models.rb +84 -0
- data/lib/ruby_llm/providers/bedrock/signing.rb +831 -0
- data/lib/ruby_llm/providers/bedrock/streaming/base.rb +46 -0
- data/lib/ruby_llm/providers/bedrock/streaming/content_extraction.rb +63 -0
- data/lib/ruby_llm/providers/bedrock/streaming/message_processing.rb +79 -0
- data/lib/ruby_llm/providers/bedrock/streaming/payload_processing.rb +90 -0
- data/lib/ruby_llm/providers/bedrock/streaming/prelude_handling.rb +91 -0
- data/lib/ruby_llm/providers/bedrock/streaming.rb +36 -0
- data/lib/ruby_llm/providers/bedrock.rb +99 -0
- data/lib/ruby_llm/providers/deepseek/chat.rb +17 -0
- data/lib/ruby_llm/providers/deepseek.rb +5 -0
- data/lib/ruby_llm/providers/gemini/capabilities.rb +50 -34
- data/lib/ruby_llm/providers/gemini/chat.rb +8 -15
- data/lib/ruby_llm/providers/gemini/images.rb +5 -10
- data/lib/ruby_llm/providers/gemini/streaming.rb +35 -76
- data/lib/ruby_llm/providers/gemini/tools.rb +12 -12
- data/lib/ruby_llm/providers/gemini.rb +4 -0
- data/lib/ruby_llm/providers/openai/capabilities.rb +146 -206
- data/lib/ruby_llm/providers/openai/streaming.rb +9 -13
- data/lib/ruby_llm/providers/openai.rb +4 -0
- data/lib/ruby_llm/streaming.rb +96 -0
- data/lib/ruby_llm/tool.rb +0 -3
- data/lib/ruby_llm/version.rb +1 -1
- data/lib/ruby_llm.rb +6 -3
- data/lib/tasks/browser_helper.rb +97 -0
- data/lib/tasks/capability_generator.rb +123 -0
- data/lib/tasks/capability_scraper.rb +224 -0
- data/lib/tasks/cli_helper.rb +22 -0
- data/lib/tasks/code_validator.rb +29 -0
- data/lib/tasks/model_updater.rb +66 -0
- data/lib/tasks/models.rake +28 -193
- data/lib/tasks/models_docs.rake +156 -0
- data/lib/tasks/vcr.rake +13 -30
- metadata +28 -19
- data/.github/workflows/cicd.yml +0 -158
- data/.github/workflows/docs.yml +0 -53
- data/.gitignore +0 -59
- data/.overcommit.yml +0 -26
- data/.rspec +0 -3
- data/.rubocop.yml +0 -10
- data/.yardopts +0 -12
- data/CONTRIBUTING.md +0 -207
- data/Gemfile +0 -33
- data/Rakefile +0 -9
- data/bin/console +0 -17
- data/bin/setup +0 -6
- data/ruby_llm.gemspec +0 -44
data/lib/tasks/models.rake
CHANGED
@@ -1,208 +1,43 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
|
4
|
-
|
5
|
-
|
6
|
-
require 'nokogiri'
|
7
|
-
require 'ruby_llm'
|
8
|
-
|
9
|
-
# URLs to process
|
10
|
-
PROVIDER_DOCS = {
|
11
|
-
openai: {
|
12
|
-
models: 'https://platform.openai.com/docs/models',
|
13
|
-
pricing: 'https://platform.openai.com/docs/pricing'
|
14
|
-
},
|
15
|
-
gemini: {
|
16
|
-
models: 'https://ai.google.dev/gemini-api/docs/models/gemini',
|
17
|
-
pricing: 'https://ai.google.dev/gemini-api/docs/pricing'
|
18
|
-
},
|
19
|
-
deepseek: {
|
20
|
-
models: 'https://api-docs.deepseek.com/quick_start/pricing/'
|
21
|
-
},
|
22
|
-
anthropic: {
|
23
|
-
models: 'https://docs.anthropic.com/en/docs/about-claude/models/all-models'
|
24
|
-
}
|
25
|
-
}.freeze
|
26
|
-
|
27
|
-
def fetch_page(url) # rubocop:disable Metrics/AbcSize,Metrics/MethodLength
|
28
|
-
if url.include?('openai.com')
|
29
|
-
puts "Please visit #{url} and paste the content below (type 'END' on a new line when done):"
|
30
|
-
original_separator = $INPUT_RECORD_SEPARATOR
|
31
|
-
$/ = 'END'
|
32
|
-
content = $stdin.gets&.chomp
|
33
|
-
$/ = original_separator
|
34
|
-
|
35
|
-
raise "No content provided for #{url}" unless content
|
36
|
-
|
37
|
-
content
|
38
|
-
else
|
39
|
-
response = http_client.get(url)
|
40
|
-
html = Nokogiri::HTML(response.body)
|
41
|
-
|
42
|
-
# Remove script tags and comments
|
43
|
-
html.css('script').remove
|
44
|
-
html.xpath('//comment()').remove
|
45
|
-
|
46
|
-
# Extract text content
|
47
|
-
text = html.css('body').text
|
48
|
-
|
49
|
-
# Clean up whitespace
|
50
|
-
text.gsub!(/\s+/, ' ')
|
51
|
-
text.strip!
|
52
|
-
|
53
|
-
text
|
54
|
-
end
|
55
|
-
rescue StandardError => e
|
56
|
-
raise "Failed to fetch #{url}: #{e.message}"
|
57
|
-
end
|
58
|
-
|
59
|
-
def http_client
|
60
|
-
@http_client ||= Faraday.new do |f|
|
61
|
-
f.response :raise_error
|
62
|
-
f.response :logger, RubyLLM.logger, { headers: false, bodies: true }
|
63
|
-
end
|
64
|
-
end
|
3
|
+
require_relative 'model_updater'
|
4
|
+
require_relative 'capability_scraper'
|
5
|
+
require_relative 'capability_generator'
|
65
6
|
|
66
7
|
namespace :models do # rubocop:disable Metrics/BlockLength
|
67
|
-
desc 'Update available models from providers'
|
8
|
+
desc 'Update available models from providers (API keys needed)'
|
68
9
|
task :update do
|
69
|
-
|
70
|
-
RubyLLM.configure do |config|
|
71
|
-
config.openai_api_key = ENV.fetch('OPENAI_API_KEY')
|
72
|
-
config.anthropic_api_key = ENV.fetch('ANTHROPIC_API_KEY')
|
73
|
-
config.gemini_api_key = ENV.fetch('GEMINI_API_KEY')
|
74
|
-
config.deepseek_api_key = ENV.fetch('DEEPSEEK_API_KEY')
|
75
|
-
end
|
76
|
-
|
77
|
-
models = RubyLLM.models.refresh!
|
78
|
-
models.save_models
|
79
|
-
|
80
|
-
puts "Updated models.json with #{models.all.size} models:"
|
81
|
-
RubyLLM::Provider.providers.each do |provider_sym, provider_module|
|
82
|
-
provider_name = provider_module.to_s.split('::').last
|
83
|
-
provider_models = models.all.select { |m| m.provider == provider_sym.to_s }
|
84
|
-
puts "#{provider_name} models: #{provider_models.size}"
|
85
|
-
end
|
10
|
+
ModelUpdater.new.run
|
86
11
|
end
|
87
12
|
|
88
|
-
desc 'Update
|
89
|
-
task :update_capabilities do
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
RubyLLM.configure do |config|
|
95
|
-
config.openai_api_key = ENV.fetch('OPENAI_API_KEY')
|
96
|
-
config.anthropic_api_key = ENV.fetch('ANTHROPIC_API_KEY')
|
97
|
-
config.gemini_api_key = ENV.fetch('GEMINI_API_KEY')
|
13
|
+
desc 'Update capabilities modules (GEMINI_API_KEY needed)'
|
14
|
+
task :update_capabilities, [:providers] do |_t, args|
|
15
|
+
gemini_key = ENV.fetch('GEMINI_API_KEY', nil)
|
16
|
+
unless gemini_key && !gemini_key.empty?
|
17
|
+
puts 'Error: GEMINI_API_KEY required'
|
18
|
+
exit(1)
|
98
19
|
end
|
99
20
|
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
PROVIDER_DOCS
|
105
|
-
end
|
106
|
-
|
107
|
-
# Process each provider
|
108
|
-
providers_to_process.each do |provider, urls| # rubocop:disable Metrics/BlockLength
|
109
|
-
puts "Processing #{provider}..."
|
110
|
-
|
111
|
-
# Initialize our AI assistants
|
112
|
-
#
|
113
|
-
gemini = RubyLLM.chat(model: 'gemini-2.0-flash').with_temperature(0)
|
114
|
-
claude = RubyLLM.chat(model: 'claude-3-7-sonnet-20250219').with_temperature(0)
|
115
|
-
|
116
|
-
# Read existing capabilities file if present
|
117
|
-
existing_file = "lib/ruby_llm/providers/#{provider}/capabilities.rb"
|
118
|
-
existing_code = File.read(existing_file) if File.exist?(existing_file)
|
119
|
-
|
120
|
-
begin
|
121
|
-
# Download documentation
|
122
|
-
docs = urls.map do |type, url|
|
123
|
-
puts " Getting #{type} documentation..."
|
124
|
-
content = fetch_page(url)
|
125
|
-
|
126
|
-
puts "\nHere's what I got:\n\n"
|
127
|
-
puts "#{content.slice(0, 500)}...\n\n"
|
128
|
-
|
129
|
-
loop do
|
130
|
-
print 'Does this content look correct? (y/n): '
|
131
|
-
answer = $stdin.gets&.chomp&.downcase
|
132
|
-
break if answer == 'y'
|
133
|
-
raise "Content verification failed for #{url}" if answer == 'n'
|
134
|
-
end
|
135
|
-
|
136
|
-
"#{type.to_s.upcase} DOCUMENTATION:\n\n#{content}"
|
137
|
-
end.join("\n\n")
|
138
|
-
|
139
|
-
# Extract relevant information with Gemini
|
140
|
-
puts ' Extracting model information...'
|
141
|
-
extraction_prompt = <<~PROMPT
|
142
|
-
Extract relevant model capabilities information from this documentation:
|
143
|
-
|
144
|
-
#{docs}
|
145
|
-
|
146
|
-
Focus on:
|
147
|
-
1. Available models and their IDs
|
148
|
-
2. Context window sizes
|
149
|
-
3. Maximum output tokens
|
150
|
-
4. Pricing information
|
151
|
-
5. Model capabilities (vision, function calling, etc)
|
152
|
-
|
153
|
-
Format the information clearly and concisely, focusing only on facts about the models.
|
154
|
-
PROMPT
|
155
|
-
|
156
|
-
model_info = gemini.ask(extraction_prompt).content
|
157
|
-
|
158
|
-
# Generate Ruby code with Claude
|
159
|
-
puts ' Generating Ruby code...'
|
160
|
-
code_prompt = <<~PROMPT
|
161
|
-
I need you to generate a Ruby module for #{provider}'s model capabilities.
|
162
|
-
Use this extracted model information:
|
163
|
-
|
164
|
-
#{model_info}
|
165
|
-
|
166
|
-
The module should go in lib/ruby_llm/providers/#{provider}/capabilities.rb and follow these conventions:
|
167
|
-
|
168
|
-
1. Include methods for determining context windows, token limits, pricing, and capabilities
|
169
|
-
2. Use consistent naming with other providers
|
170
|
-
3. Include detailed pricing information in a PRICES constant
|
171
|
-
4. Follow the existing structure in the codebase
|
172
|
-
5. Use Ruby idioms and clean code practices
|
173
|
-
6. Include module_function to make methods callable at module level
|
174
|
-
7. Include all necessary method documentation
|
175
|
-
|
176
|
-
Here's the existing implementation for reference (maintain similar structure and same method names):
|
177
|
-
|
178
|
-
#{existing_code}
|
179
|
-
|
180
|
-
Focus on accuracy and maintaining consistency with the existing codebase structure.
|
181
|
-
PROMPT
|
182
|
-
|
183
|
-
response = claude.ask(code_prompt)
|
184
|
-
|
185
|
-
# Extract Ruby code from Claude's response
|
186
|
-
puts " Extracting Ruby code from Claude's response..."
|
187
|
-
ruby_code = nil
|
188
|
-
|
189
|
-
# Look for Ruby code block
|
190
|
-
ruby_code = Regexp.last_match(1).strip if response.content =~ /```ruby\s*(.*?)```/m
|
191
|
-
|
192
|
-
# Verify we found Ruby code
|
193
|
-
raise "No Ruby code block found in Claude's response" if ruby_code.nil? || ruby_code.empty?
|
21
|
+
RubyLLM.configure do |c|
|
22
|
+
c.gemini_api_key = gemini_key
|
23
|
+
c.request_timeout = 300
|
24
|
+
end
|
194
25
|
|
195
|
-
|
196
|
-
file_path = "lib/ruby_llm/providers/#{provider}/capabilities.rb"
|
197
|
-
puts " Writing #{file_path}..."
|
26
|
+
target_providers = CapabilityScraper.parse_providers(args[:providers])
|
198
27
|
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
28
|
+
begin
|
29
|
+
scraper = CapabilityScraper.new(target_providers)
|
30
|
+
scraper.run do |provider, docs_html|
|
31
|
+
generator = CapabilityGenerator.new(provider, docs_html)
|
32
|
+
generator.generate_and_save
|
203
33
|
end
|
34
|
+
rescue StandardError => e
|
35
|
+
puts "Error: #{e.message}"
|
36
|
+
puts e.backtrace.first(5).join("\n")
|
37
|
+
ensure
|
38
|
+
puts 'Update process complete. Review generated files.'
|
204
39
|
end
|
205
|
-
|
206
|
-
puts "Done! Don't forget to review the generated code and run the tests."
|
207
40
|
end
|
208
41
|
end
|
42
|
+
|
43
|
+
task default: ['models:update']
|
@@ -0,0 +1,156 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'ruby_llm'
|
4
|
+
require 'fileutils'
|
5
|
+
|
6
|
+
MODEL_KEYS_TO_DISPLAY = %i[
|
7
|
+
id
|
8
|
+
type
|
9
|
+
display_name
|
10
|
+
provider
|
11
|
+
context_window
|
12
|
+
max_tokens
|
13
|
+
family
|
14
|
+
input_price_per_million
|
15
|
+
output_price_per_million
|
16
|
+
].freeze
|
17
|
+
|
18
|
+
def to_markdown_table(models) # rubocop:disable Metrics/AbcSize,Metrics/MethodLength
|
19
|
+
to_display_hash = ->(model) { model.to_h.slice(*MODEL_KEYS_TO_DISPLAY) }
|
20
|
+
model_hashes = Array(models).map { |model| to_display_hash.call(model) }
|
21
|
+
|
22
|
+
# Create abbreviated headers
|
23
|
+
headers = {
|
24
|
+
id: 'ID',
|
25
|
+
type: 'Type',
|
26
|
+
display_name: 'Name',
|
27
|
+
provider: 'Provider',
|
28
|
+
context_window: 'Context',
|
29
|
+
max_tokens: 'MaxTok',
|
30
|
+
family: 'Family',
|
31
|
+
input_price_per_million: 'In$/M',
|
32
|
+
output_price_per_million: 'Out$/M'
|
33
|
+
}
|
34
|
+
|
35
|
+
# Create header row with alignment markers
|
36
|
+
# Right-align numbers, left-align text
|
37
|
+
alignments = {
|
38
|
+
id: ':--',
|
39
|
+
type: ':--',
|
40
|
+
display_name: ':--',
|
41
|
+
provider: ':--',
|
42
|
+
context_window: '--:',
|
43
|
+
max_tokens: '--:',
|
44
|
+
family: ':--',
|
45
|
+
input_price_per_million: '--:',
|
46
|
+
output_price_per_million: '--:'
|
47
|
+
}
|
48
|
+
|
49
|
+
# Build the table
|
50
|
+
lines = []
|
51
|
+
|
52
|
+
# Header row
|
53
|
+
lines << "| #{MODEL_KEYS_TO_DISPLAY.map { |key| headers[key] }.join(' | ')} |"
|
54
|
+
|
55
|
+
# Alignment row
|
56
|
+
lines << "| #{MODEL_KEYS_TO_DISPLAY.map { |key| alignments[key] }.join(' | ')} |"
|
57
|
+
|
58
|
+
# Data rows
|
59
|
+
model_hashes.each do |model_hash|
|
60
|
+
values = MODEL_KEYS_TO_DISPLAY.map do |key|
|
61
|
+
if model_hash[key].is_a?(Float)
|
62
|
+
format('%.2f', model_hash[key])
|
63
|
+
else
|
64
|
+
model_hash[key]
|
65
|
+
end
|
66
|
+
end
|
67
|
+
|
68
|
+
lines << "| #{values.join(' | ')} |"
|
69
|
+
end
|
70
|
+
|
71
|
+
lines.join("\n")
|
72
|
+
end
|
73
|
+
|
74
|
+
namespace :models do # rubocop:disable Metrics/BlockLength
|
75
|
+
desc 'Generate available models documentation'
|
76
|
+
task :docs do # rubocop:disable Metrics/BlockLength
|
77
|
+
FileUtils.mkdir_p('docs/guides') # ensure output directory exists
|
78
|
+
|
79
|
+
output = <<~MARKDOWN
|
80
|
+
---
|
81
|
+
layout: default
|
82
|
+
title: Available Models
|
83
|
+
parent: Guides
|
84
|
+
nav_order: 10
|
85
|
+
permalink: /guides/available-models
|
86
|
+
---
|
87
|
+
|
88
|
+
# Available Models
|
89
|
+
|
90
|
+
This guide lists all models available in RubyLLM, automatically generated from the current model registry.
|
91
|
+
|
92
|
+
_Last updated: #{Time.now.utc.strftime('%Y-%m-%d')}_
|
93
|
+
|
94
|
+
## Contributing
|
95
|
+
|
96
|
+
The model list is automatically generated from the model registry. To add or update models:
|
97
|
+
|
98
|
+
1. Edit the appropriate `capabilities.rb` file in `lib/ruby_llm/providers/<provider>/`
|
99
|
+
2. Run `rake models:update` to refresh the model registry
|
100
|
+
3. Submit a pull request with the updated `models.json`
|
101
|
+
|
102
|
+
See [Contributing Guide](/CONTRIBUTING.md) for more details.
|
103
|
+
|
104
|
+
## Additional Model Information
|
105
|
+
|
106
|
+
The tables below show basic model information including context windows, token limits, and pricing. Models also have additional capabilities not shown in the tables:
|
107
|
+
|
108
|
+
- **Vision Support**: Whether the model can process images
|
109
|
+
- **Function Calling**: Whether the model supports function calling
|
110
|
+
- **JSON Mode**: Whether the model can be constrained to output valid JSON
|
111
|
+
- **Structured Output**: Whether the model supports structured output formats
|
112
|
+
|
113
|
+
For complete model information, you can check the `models.json` file in the RubyLLM source code.
|
114
|
+
|
115
|
+
For more information about working with models, see the [Working with Models](/guides/models) guide.
|
116
|
+
|
117
|
+
## Models by Type
|
118
|
+
|
119
|
+
### Chat Models (#{RubyLLM.models.chat_models.count})
|
120
|
+
|
121
|
+
#{to_markdown_table(RubyLLM.models.chat_models)}
|
122
|
+
|
123
|
+
### Image Models (#{RubyLLM.models.image_models.count})
|
124
|
+
|
125
|
+
#{to_markdown_table(RubyLLM.models.image_models)}
|
126
|
+
|
127
|
+
### Audio Models (#{RubyLLM.models.audio_models.count})
|
128
|
+
|
129
|
+
#{to_markdown_table(RubyLLM.models.audio_models)}
|
130
|
+
|
131
|
+
### Embedding Models (#{RubyLLM.models.embedding_models.count})
|
132
|
+
|
133
|
+
#{to_markdown_table(RubyLLM.models.embedding_models)}
|
134
|
+
|
135
|
+
### Moderation Models (#{RubyLLM.models.select { |m| m.type == 'moderation' }.count})
|
136
|
+
|
137
|
+
#{to_markdown_table(RubyLLM.models.select { |m| m.type == 'moderation' })}
|
138
|
+
|
139
|
+
## Models by Provider
|
140
|
+
|
141
|
+
#{RubyLLM::Provider.providers.keys.map do |provider|
|
142
|
+
models = RubyLLM.models.by_provider(provider)
|
143
|
+
next if models.none?
|
144
|
+
|
145
|
+
<<~PROVIDER
|
146
|
+
### #{provider.to_s.capitalize} Models (#{models.count})
|
147
|
+
|
148
|
+
#{to_markdown_table(models)}
|
149
|
+
PROVIDER
|
150
|
+
end.compact.join("\n")}
|
151
|
+
MARKDOWN
|
152
|
+
|
153
|
+
File.write('docs/guides/available-models.md', output)
|
154
|
+
puts 'Generated docs/guides/available-models.md'
|
155
|
+
end
|
156
|
+
end
|
data/lib/tasks/vcr.rake
CHANGED
@@ -29,13 +29,8 @@ def record_for_providers(providers, cassette_dir) # rubocop:disable Metrics/AbcS
|
|
29
29
|
return
|
30
30
|
end
|
31
31
|
|
32
|
-
# Get URL patterns from the providers themselves
|
33
|
-
provider_patterns = get_provider_patterns(providers)
|
34
|
-
|
35
|
-
puts "Finding cassettes for providers: #{providers.join(', ')}"
|
36
|
-
|
37
32
|
# Find and delete matching cassettes
|
38
|
-
cassettes_to_delete = find_matching_cassettes(cassette_dir,
|
33
|
+
cassettes_to_delete = find_matching_cassettes(cassette_dir, providers)
|
39
34
|
|
40
35
|
if cassettes_to_delete.empty?
|
41
36
|
puts 'No cassettes found for the specified providers.'
|
@@ -51,33 +46,21 @@ def record_for_providers(providers, cassette_dir) # rubocop:disable Metrics/AbcS
|
|
51
46
|
puts 'Please review the updated cassettes for sensitive information.'
|
52
47
|
end
|
53
48
|
|
54
|
-
def
|
55
|
-
provider_patterns = {}
|
56
|
-
|
57
|
-
providers.each do |provider_name|
|
58
|
-
provider_module = RubyLLM::Provider.providers[provider_name.to_sym]
|
59
|
-
next unless provider_module
|
60
|
-
|
61
|
-
# Extract the base URL from the provider's api_base method
|
62
|
-
api_base = provider_module.api_base.to_s
|
63
|
-
|
64
|
-
# Create a regex pattern from the domain
|
65
|
-
next unless api_base && !api_base.empty?
|
66
|
-
|
67
|
-
domain = URI.parse(api_base).host
|
68
|
-
pattern = Regexp.new(Regexp.escape(domain))
|
69
|
-
provider_patterns[provider_name] = pattern
|
70
|
-
end
|
71
|
-
|
72
|
-
provider_patterns
|
73
|
-
end
|
74
|
-
|
75
|
-
def find_matching_cassettes(dir, patterns)
|
49
|
+
def find_matching_cassettes(dir, providers) # rubocop:disable Metrics/MethodLength
|
76
50
|
cassettes = []
|
77
51
|
|
78
52
|
Dir.glob("#{dir}/**/*.yml").each do |file|
|
79
|
-
|
80
|
-
|
53
|
+
basename = File.basename(file)
|
54
|
+
|
55
|
+
# Precise matching to avoid cross-provider confusion
|
56
|
+
providers.each do |provider|
|
57
|
+
# Match only exact provider prefixes
|
58
|
+
next unless basename =~ /^[^_]*_#{provider}_/ || # For first section like "chat_openai_"
|
59
|
+
basename =~ /_#{provider}_[^_]+_/ # For middle sections like "_openai_gpt4_"
|
60
|
+
|
61
|
+
cassettes << file
|
62
|
+
break
|
63
|
+
end
|
81
64
|
end
|
82
65
|
|
83
66
|
cassettes
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: ruby_llm
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.
|
4
|
+
version: 1.1.0rc2
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Carmine Paolino
|
8
8
|
autorequire:
|
9
|
-
bindir:
|
9
|
+
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2025-
|
11
|
+
date: 2025-04-04 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: base64
|
@@ -96,31 +96,21 @@ dependencies:
|
|
96
96
|
version: '2'
|
97
97
|
description: A delightful Ruby way to work with AI. Chat in text, analyze and generate
|
98
98
|
images, understand audio, and use tools through a unified interface to OpenAI, Anthropic,
|
99
|
-
Google, and DeepSeek. Built for developer happiness with
|
100
|
-
proper streaming, and Rails integration. No wrapping your
|
101
|
-
- just clean Ruby code that works.
|
99
|
+
Google, AWS Bedrock Anthropic, and DeepSeek. Built for developer happiness with
|
100
|
+
automatic token counting, proper streaming, and Rails integration. No wrapping your
|
101
|
+
head around multiple APIs - just clean Ruby code that works.
|
102
102
|
email:
|
103
103
|
- carmine@paolino.me
|
104
104
|
executables: []
|
105
105
|
extensions: []
|
106
106
|
extra_rdoc_files: []
|
107
107
|
files:
|
108
|
-
- ".github/workflows/cicd.yml"
|
109
|
-
- ".github/workflows/docs.yml"
|
110
|
-
- ".gitignore"
|
111
|
-
- ".overcommit.yml"
|
112
|
-
- ".rspec"
|
113
|
-
- ".rubocop.yml"
|
114
|
-
- ".yardopts"
|
115
|
-
- CONTRIBUTING.md
|
116
|
-
- Gemfile
|
117
108
|
- LICENSE
|
118
109
|
- README.md
|
119
|
-
- Rakefile
|
120
|
-
- bin/console
|
121
|
-
- bin/setup
|
122
110
|
- lib/ruby_llm.rb
|
123
111
|
- lib/ruby_llm/active_record/acts_as.rb
|
112
|
+
- lib/ruby_llm/aliases.json
|
113
|
+
- lib/ruby_llm/aliases.rb
|
124
114
|
- lib/ruby_llm/chat.rb
|
125
115
|
- lib/ruby_llm/chunk.rb
|
126
116
|
- lib/ruby_llm/configuration.rb
|
@@ -141,8 +131,20 @@ files:
|
|
141
131
|
- lib/ruby_llm/providers/anthropic/models.rb
|
142
132
|
- lib/ruby_llm/providers/anthropic/streaming.rb
|
143
133
|
- lib/ruby_llm/providers/anthropic/tools.rb
|
134
|
+
- lib/ruby_llm/providers/bedrock.rb
|
135
|
+
- lib/ruby_llm/providers/bedrock/capabilities.rb
|
136
|
+
- lib/ruby_llm/providers/bedrock/chat.rb
|
137
|
+
- lib/ruby_llm/providers/bedrock/models.rb
|
138
|
+
- lib/ruby_llm/providers/bedrock/signing.rb
|
139
|
+
- lib/ruby_llm/providers/bedrock/streaming.rb
|
140
|
+
- lib/ruby_llm/providers/bedrock/streaming/base.rb
|
141
|
+
- lib/ruby_llm/providers/bedrock/streaming/content_extraction.rb
|
142
|
+
- lib/ruby_llm/providers/bedrock/streaming/message_processing.rb
|
143
|
+
- lib/ruby_llm/providers/bedrock/streaming/payload_processing.rb
|
144
|
+
- lib/ruby_llm/providers/bedrock/streaming/prelude_handling.rb
|
144
145
|
- lib/ruby_llm/providers/deepseek.rb
|
145
146
|
- lib/ruby_llm/providers/deepseek/capabilities.rb
|
147
|
+
- lib/ruby_llm/providers/deepseek/chat.rb
|
146
148
|
- lib/ruby_llm/providers/gemini.rb
|
147
149
|
- lib/ruby_llm/providers/gemini/capabilities.rb
|
148
150
|
- lib/ruby_llm/providers/gemini/chat.rb
|
@@ -163,12 +165,19 @@ files:
|
|
163
165
|
- lib/ruby_llm/providers/openai/tools.rb
|
164
166
|
- lib/ruby_llm/railtie.rb
|
165
167
|
- lib/ruby_llm/stream_accumulator.rb
|
168
|
+
- lib/ruby_llm/streaming.rb
|
166
169
|
- lib/ruby_llm/tool.rb
|
167
170
|
- lib/ruby_llm/tool_call.rb
|
168
171
|
- lib/ruby_llm/version.rb
|
172
|
+
- lib/tasks/browser_helper.rb
|
173
|
+
- lib/tasks/capability_generator.rb
|
174
|
+
- lib/tasks/capability_scraper.rb
|
175
|
+
- lib/tasks/cli_helper.rb
|
176
|
+
- lib/tasks/code_validator.rb
|
177
|
+
- lib/tasks/model_updater.rb
|
169
178
|
- lib/tasks/models.rake
|
179
|
+
- lib/tasks/models_docs.rake
|
170
180
|
- lib/tasks/vcr.rake
|
171
|
-
- ruby_llm.gemspec
|
172
181
|
homepage: https://rubyllm.com
|
173
182
|
licenses:
|
174
183
|
- MIT
|