ruby_llm 1.2.0 → 1.3.0rc1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +80 -133
  3. data/lib/ruby_llm/active_record/acts_as.rb +212 -33
  4. data/lib/ruby_llm/aliases.json +48 -6
  5. data/lib/ruby_llm/attachments/audio.rb +12 -0
  6. data/lib/ruby_llm/attachments/image.rb +9 -0
  7. data/lib/ruby_llm/attachments/pdf.rb +9 -0
  8. data/lib/ruby_llm/attachments.rb +78 -0
  9. data/lib/ruby_llm/chat.rb +22 -19
  10. data/lib/ruby_llm/configuration.rb +30 -1
  11. data/lib/ruby_llm/connection.rb +95 -0
  12. data/lib/ruby_llm/content.rb +51 -72
  13. data/lib/ruby_llm/context.rb +30 -0
  14. data/lib/ruby_llm/embedding.rb +13 -5
  15. data/lib/ruby_llm/error.rb +1 -1
  16. data/lib/ruby_llm/image.rb +13 -5
  17. data/lib/ruby_llm/message.rb +12 -4
  18. data/lib/ruby_llm/mime_types.rb +713 -0
  19. data/lib/ruby_llm/model_info.rb +208 -27
  20. data/lib/ruby_llm/models.json +25766 -2154
  21. data/lib/ruby_llm/models.rb +95 -14
  22. data/lib/ruby_llm/provider.rb +48 -90
  23. data/lib/ruby_llm/providers/anthropic/capabilities.rb +76 -13
  24. data/lib/ruby_llm/providers/anthropic/chat.rb +7 -14
  25. data/lib/ruby_llm/providers/anthropic/media.rb +44 -34
  26. data/lib/ruby_llm/providers/anthropic/models.rb +15 -15
  27. data/lib/ruby_llm/providers/anthropic/tools.rb +2 -2
  28. data/lib/ruby_llm/providers/anthropic.rb +3 -3
  29. data/lib/ruby_llm/providers/bedrock/capabilities.rb +61 -2
  30. data/lib/ruby_llm/providers/bedrock/chat.rb +30 -73
  31. data/lib/ruby_llm/providers/bedrock/media.rb +56 -0
  32. data/lib/ruby_llm/providers/bedrock/models.rb +50 -58
  33. data/lib/ruby_llm/providers/bedrock/streaming/base.rb +16 -0
  34. data/lib/ruby_llm/providers/bedrock.rb +14 -25
  35. data/lib/ruby_llm/providers/deepseek/capabilities.rb +35 -2
  36. data/lib/ruby_llm/providers/deepseek.rb +3 -3
  37. data/lib/ruby_llm/providers/gemini/capabilities.rb +84 -3
  38. data/lib/ruby_llm/providers/gemini/chat.rb +8 -37
  39. data/lib/ruby_llm/providers/gemini/embeddings.rb +18 -34
  40. data/lib/ruby_llm/providers/gemini/images.rb +2 -2
  41. data/lib/ruby_llm/providers/gemini/media.rb +39 -110
  42. data/lib/ruby_llm/providers/gemini/models.rb +16 -22
  43. data/lib/ruby_llm/providers/gemini/tools.rb +1 -1
  44. data/lib/ruby_llm/providers/gemini.rb +3 -3
  45. data/lib/ruby_llm/providers/ollama/chat.rb +28 -0
  46. data/lib/ruby_llm/providers/ollama/media.rb +44 -0
  47. data/lib/ruby_llm/providers/ollama.rb +34 -0
  48. data/lib/ruby_llm/providers/openai/capabilities.rb +78 -3
  49. data/lib/ruby_llm/providers/openai/chat.rb +6 -4
  50. data/lib/ruby_llm/providers/openai/embeddings.rb +8 -12
  51. data/lib/ruby_llm/providers/openai/media.rb +38 -21
  52. data/lib/ruby_llm/providers/openai/models.rb +16 -17
  53. data/lib/ruby_llm/providers/openai/tools.rb +9 -5
  54. data/lib/ruby_llm/providers/openai.rb +7 -5
  55. data/lib/ruby_llm/providers/openrouter/models.rb +88 -0
  56. data/lib/ruby_llm/providers/openrouter.rb +31 -0
  57. data/lib/ruby_llm/stream_accumulator.rb +4 -4
  58. data/lib/ruby_llm/streaming.rb +3 -3
  59. data/lib/ruby_llm/utils.rb +22 -0
  60. data/lib/ruby_llm/version.rb +1 -1
  61. data/lib/ruby_llm.rb +15 -5
  62. data/lib/tasks/models.rake +69 -33
  63. data/lib/tasks/models_docs.rake +164 -121
  64. data/lib/tasks/vcr.rake +4 -2
  65. metadata +23 -14
  66. data/lib/tasks/browser_helper.rb +0 -97
  67. data/lib/tasks/capability_generator.rb +0 -123
  68. data/lib/tasks/capability_scraper.rb +0 -224
  69. data/lib/tasks/cli_helper.rb +0 -22
  70. data/lib/tasks/code_validator.rb +0 -29
  71. data/lib/tasks/model_updater.rb +0 -66
@@ -1,42 +1,78 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require_relative 'model_updater'
4
- require_relative 'capability_scraper'
5
- require_relative 'capability_generator'
3
+ require 'dotenv/load'
4
+ require 'ruby_llm'
6
5
 
7
- namespace :models do # rubocop:disable Metrics/BlockLength
6
+ def configure_from_env
7
+ RubyLLM.configure do |config|
8
+ config.openai_api_key = ENV.fetch('OPENAI_API_KEY', nil)
9
+ config.anthropic_api_key = ENV.fetch('ANTHROPIC_API_KEY', nil)
10
+ config.gemini_api_key = ENV.fetch('GEMINI_API_KEY', nil)
11
+ config.deepseek_api_key = ENV.fetch('DEEPSEEK_API_KEY', nil)
12
+ config.openrouter_api_key = ENV.fetch('OPENROUTER_API_KEY', nil)
13
+ configure_bedrock(config)
14
+ config.request_timeout = 30
15
+ end
16
+ end
17
+
18
+ def configure_bedrock(config)
19
+ config.bedrock_api_key = ENV.fetch('AWS_ACCESS_KEY_ID', nil)
20
+ config.bedrock_secret_key = ENV.fetch('AWS_SECRET_ACCESS_KEY', nil)
21
+ config.bedrock_region = ENV.fetch('AWS_REGION', nil)
22
+ config.bedrock_session_token = ENV.fetch('AWS_SESSION_TOKEN', nil)
23
+ end
24
+
25
+ def refresh_models
26
+ initial_count = RubyLLM.models.all.size
27
+ puts "Refreshing models (#{initial_count} cached)..."
28
+
29
+ models = RubyLLM.models.refresh!
30
+
31
+ if models.all.empty? && initial_count.zero?
32
+ puts 'Error: Failed to fetch models.'
33
+ exit(1)
34
+ elsif models.all.size == initial_count && initial_count.positive?
35
+ puts 'Warning: Model list unchanged.'
36
+ else
37
+ puts "Saving models.json (#{models.all.size} models)"
38
+ models.save_models
39
+ end
40
+
41
+ @models = models
42
+ end
43
+
44
+ def display_model_stats
45
+ puts "\nModel count:"
46
+ provider_counts = @models.all.group_by(&:provider).transform_values(&:count)
47
+
48
+ RubyLLM::Provider.providers.each_key do |sym|
49
+ name = sym.to_s.capitalize
50
+ count = provider_counts[sym.to_s] || 0
51
+ status = status(sym)
52
+ puts " #{name}: #{count} models #{status}"
53
+ end
54
+
55
+ puts 'Refresh complete.'
56
+ end
57
+
58
+ def status(provider_sym)
59
+ if RubyLLM::Provider.providers[provider_sym].local?
60
+ ' (LOCAL - SKIP)'
61
+ elsif RubyLLM::Provider.providers[provider_sym].configured?
62
+ ' (OK)'
63
+ else
64
+ ' (NOT CONFIGURED)'
65
+ end
66
+ end
67
+
68
+ namespace :models do
8
69
  desc 'Update available models from providers (API keys needed)'
9
70
  task :update do
10
- ModelUpdater.new.run
11
- end
71
+ puts 'Configuring RubyLLM...'
72
+ configure_from_env
12
73
 
13
- desc 'Update capabilities modules (GEMINI_API_KEY needed)'
14
- task :update_capabilities, [:providers] do |_t, args|
15
- gemini_key = ENV.fetch('GEMINI_API_KEY', nil)
16
- unless gemini_key && !gemini_key.empty?
17
- puts 'Error: GEMINI_API_KEY required'
18
- exit(1)
19
- end
20
-
21
- RubyLLM.configure do |c|
22
- c.gemini_api_key = gemini_key
23
- c.request_timeout = 300
24
- end
25
-
26
- target_providers = CapabilityScraper.parse_providers(args[:providers])
27
-
28
- begin
29
- scraper = CapabilityScraper.new(target_providers)
30
- scraper.run do |provider, docs_html|
31
- generator = CapabilityGenerator.new(provider, docs_html)
32
- generator.generate_and_save
33
- end
34
- rescue StandardError => e
35
- puts "Error: #{e.message}"
36
- puts e.backtrace.first(5).join("\n")
37
- ensure
38
- puts 'Update process complete. Review generated files.'
39
- end
74
+ refresh_models
75
+ display_model_stats
40
76
  end
41
77
  end
42
78
 
@@ -1,168 +1,211 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require 'ruby_llm'
3
+ require 'dotenv/load'
4
4
  require 'fileutils'
5
5
 
6
- MODEL_KEYS_TO_DISPLAY = %i[
7
- id
8
- type
9
- display_name
10
- provider
11
- context_window
12
- max_tokens
13
- family
14
- input_price_per_million
15
- output_price_per_million
16
- ].freeze
17
-
18
- def to_markdown_table(models) # rubocop:disable Metrics/AbcSize,Metrics/MethodLength
19
- to_display_hash = ->(model) { model.to_h.slice(*MODEL_KEYS_TO_DISPLAY) }
20
- model_hashes = Array(models).map { |model| to_display_hash.call(model) }
21
-
22
- # Create abbreviated headers
23
- headers = {
24
- id: 'ID',
25
- type: 'Type',
26
- display_name: 'Name',
27
- provider: 'Provider',
28
- context_window: 'Context',
29
- max_tokens: 'MaxTok',
30
- family: 'Family',
31
- input_price_per_million: 'In$/M',
32
- output_price_per_million: 'Out$/M'
33
- }
6
+ namespace :models do
7
+ desc 'Generate available models documentation'
8
+ task :docs do
9
+ FileUtils.mkdir_p('docs/guides') # ensure output directory exists
34
10
 
35
- # Create header row with alignment markers
36
- # Right-align numbers, left-align text
37
- alignments = {
38
- id: ':--',
39
- type: ':--',
40
- display_name: ':--',
41
- provider: ':--',
42
- context_window: '--:',
43
- max_tokens: '--:',
44
- family: ':--',
45
- input_price_per_million: '--:',
46
- output_price_per_million: '--:'
47
- }
11
+ # Generate markdown content
12
+ output = generate_models_markdown
48
13
 
49
- # Build the table
50
- lines = []
14
+ # Write the output
15
+ File.write('docs/guides/available-models.md', output)
16
+ puts 'Generated docs/guides/available-models.md'
17
+ end
18
+ end
51
19
 
52
- # Header row
53
- lines << "| #{MODEL_KEYS_TO_DISPLAY.map { |key| headers[key] }.join(' | ')} |"
20
+ def generate_models_markdown
21
+ <<~MARKDOWN
22
+ ---
23
+ layout: default
24
+ title: Available Models
25
+ parent: Guides
26
+ nav_order: 10
27
+ permalink: /guides/available-models
28
+ ---
54
29
 
55
- # Alignment row
56
- lines << "| #{MODEL_KEYS_TO_DISPLAY.map { |key| alignments[key] }.join(' | ')} |"
30
+ # Available Models
31
+ {: .no_toc }
57
32
 
58
- # Data rows
59
- model_hashes.each do |model_hash|
60
- values = MODEL_KEYS_TO_DISPLAY.map do |key|
61
- if model_hash[key].is_a?(Float)
62
- format('%.2f', model_hash[key])
63
- else
64
- model_hash[key]
65
- end
66
- end
33
+ This guide lists all models available in RubyLLM, automatically generated from the current model registry.
34
+ {: .fs-6 .fw-300 }
67
35
 
68
- lines << "| #{values.join(' | ')} |"
69
- end
36
+ ## Table of contents
37
+ {: .no_toc .text-delta }
38
+
39
+ 1. TOC
40
+ {:toc}
41
+
42
+ ---
43
+
44
+ ## Contributing
45
+
46
+ The model list is automatically generated from the model registry. To add or update models:
47
+
48
+ 1. Edit the appropriate `capabilities.rb` file in `lib/ruby_llm/providers/<provider>/`
49
+ 2. Run `rake models:update` to refresh the model registry
50
+ 3. Submit a pull request with the updated `models.json`
51
+
52
+ See [Contributing Guide](/CONTRIBUTING.md) for more details.
53
+
54
+ ## Last Updated
55
+ {: .d-inline-block }
56
+
57
+ #{Time.now.utc.strftime('%Y-%m-%d')}
58
+ {: .label .label-green }
59
+
60
+ ## Models by Provider
61
+
62
+ #{generate_provider_sections}
63
+
64
+ ## Models by Capability
70
65
 
71
- lines.join("\n")
66
+ #{generate_capability_sections}
67
+
68
+ ## Models by Modality
69
+
70
+ #{generate_modality_sections}
71
+ MARKDOWN
72
72
  end
73
73
 
74
- namespace :models do # rubocop:disable Metrics/BlockLength
75
- desc 'Generate available models documentation'
76
- task :docs do # rubocop:disable Metrics/BlockLength
77
- FileUtils.mkdir_p('docs/guides') # ensure output directory exists
74
+ def generate_provider_sections
75
+ RubyLLM::Provider.providers.keys.map do |provider|
76
+ models = RubyLLM.models.by_provider(provider)
77
+ next if models.none?
78
+
79
+ <<~PROVIDER
80
+ ### #{provider.to_s.capitalize} (#{models.count})
78
81
 
79
- output = <<~MARKDOWN
80
- ---
81
- layout: default
82
- title: Available Models
83
- parent: Guides
84
- nav_order: 10
85
- permalink: /guides/available-models
86
- ---
82
+ #{models_table(models)}
83
+ PROVIDER
84
+ end.compact.join("\n\n")
85
+ end
87
86
 
88
- # Available Models
89
- {: .no_toc }
87
+ def generate_capability_sections
88
+ capabilities = {
89
+ 'Function Calling' => RubyLLM.models.select(&:function_calling?),
90
+ 'Structured Output' => RubyLLM.models.select(&:structured_output?),
91
+ 'Streaming' => RubyLLM.models.select { |m| m.capabilities.include?('streaming') },
92
+ # 'Reasoning' => RubyLLM.models.select { |m| m.capabilities.include?('reasoning') },
93
+ 'Batch Processing' => RubyLLM.models.select { |m| m.capabilities.include?('batch') }
94
+ }
90
95
 
91
- This guide lists all models available in RubyLLM, automatically generated from the current model registry.
92
- {: .fs-6 .fw-300 }
96
+ capabilities.map do |capability, models|
97
+ next if models.none?
93
98
 
94
- ## Table of contents
95
- {: .no_toc .text-delta }
99
+ <<~CAPABILITY
100
+ ### #{capability} (#{models.count})
96
101
 
97
- 1. TOC
98
- {:toc}
102
+ #{models_table(models)}
103
+ CAPABILITY
104
+ end.compact.join("\n\n")
105
+ end
99
106
 
100
- ---
107
+ def generate_modality_sections # rubocop:disable Metrics/PerceivedComplexity
108
+ sections = []
101
109
 
102
- ## Contributing
110
+ # Models that support vision/images
111
+ vision_models = RubyLLM.models.select { |m| (m.modalities.input || []).include?('image') }
112
+ if vision_models.any?
113
+ sections << <<~SECTION
114
+ ### Vision Models (#{vision_models.count})
103
115
 
104
- The model list is automatically generated from the model registry. To add or update models:
116
+ Models that can process images:
105
117
 
106
- 1. Edit the appropriate `capabilities.rb` file in `lib/ruby_llm/providers/<provider>/`
107
- 2. Run `rake models:update` to refresh the model registry
108
- 3. Submit a pull request with the updated `models.json`
118
+ #{models_table(vision_models)}
119
+ SECTION
120
+ end
109
121
 
110
- See [Contributing Guide](/CONTRIBUTING.md) for more details.
122
+ # Models that support audio
123
+ audio_models = RubyLLM.models.select { |m| (m.modalities.input || []).include?('audio') }
124
+ if audio_models.any?
125
+ sections << <<~SECTION
126
+ ### Audio Input Models (#{audio_models.count})
111
127
 
112
- ## Additional Model Information
128
+ Models that can process audio:
113
129
 
114
- The tables below show basic model information including context windows, token limits, and pricing. Models also have additional capabilities not shown in the tables:
130
+ #{models_table(audio_models)}
131
+ SECTION
132
+ end
115
133
 
116
- - **Vision Support**: Whether the model can process images
117
- - **Function Calling**: Whether the model supports function calling
118
- - **JSON Mode**: Whether the model can be constrained to output valid JSON
119
- - **Structured Output**: Whether the model supports structured output formats
134
+ # Models that support PDFs
135
+ pdf_models = RubyLLM.models.select { |m| (m.modalities.input || []).include?('pdf') }
136
+ if pdf_models.any?
137
+ sections << <<~SECTION
138
+ ### PDF Models (#{pdf_models.count})
120
139
 
121
- For complete model information, you can check the `models.json` file in the RubyLLM source code.
140
+ Models that can process PDF documents:
122
141
 
123
- For more information about working with models, see the [Working with Models](/guides/models) guide.
142
+ #{models_table(pdf_models)}
143
+ SECTION
144
+ end
124
145
 
125
- ## Models by Type
126
- {: .d-inline-block }
146
+ # Models for embeddings
147
+ embedding_models = RubyLLM.models.select { |m| (m.modalities.output || []).include?('embeddings') }
148
+ if embedding_models.any?
149
+ sections << <<~SECTION
150
+ ### Embedding Models (#{embedding_models.count})
127
151
 
128
- Last updated: #{Time.now.utc.strftime('%Y-%m-%d')}
129
- {: .label .label-green }
152
+ Models that generate embeddings:
130
153
 
131
- ### Chat Models (#{RubyLLM.models.chat_models.count})
154
+ #{models_table(embedding_models)}
155
+ SECTION
156
+ end
132
157
 
133
- #{to_markdown_table(RubyLLM.models.chat_models)}
158
+ sections.join("\n\n")
159
+ end
134
160
 
135
- ### Image Models (#{RubyLLM.models.image_models.count})
161
+ def models_table(models)
162
+ return '*No models found*' if models.none?
136
163
 
137
- #{to_markdown_table(RubyLLM.models.image_models)}
164
+ headers = ['Model', 'ID', 'Provider', 'Context', 'Max Output', 'Standard Pricing (per 1M tokens)']
165
+ alignment = [':--', ':--', ':--', '--:', '--:', ':--']
138
166
 
139
- ### Audio Models (#{RubyLLM.models.audio_models.count})
167
+ rows = models.sort_by { |m| [m.provider, m.name] }.map do |model|
168
+ # Format pricing information
169
+ pricing = standard_pricing_display(model)
140
170
 
141
- #{to_markdown_table(RubyLLM.models.audio_models)}
171
+ [
172
+ model.name,
173
+ model.id,
174
+ model.provider,
175
+ model.context_window || '-',
176
+ model.max_output_tokens || '-',
177
+ pricing
178
+ ]
179
+ end
142
180
 
143
- ### Embedding Models (#{RubyLLM.models.embedding_models.count})
181
+ table = []
182
+ table << "| #{headers.join(' | ')} |"
183
+ table << "| #{alignment.join(' | ')} |"
144
184
 
145
- #{to_markdown_table(RubyLLM.models.embedding_models)}
185
+ rows.each do |row|
186
+ table << "| #{row.join(' | ')} |"
187
+ end
146
188
 
147
- ### Moderation Models (#{RubyLLM.models.select { |m| m.type == 'moderation' }.count})
189
+ table.join("\n")
190
+ end
148
191
 
149
- #{to_markdown_table(RubyLLM.models.select { |m| m.type == 'moderation' })}
192
+ def standard_pricing_display(model)
193
+ # Access pricing data using to_h to get the raw hash
194
+ pricing_data = model.pricing.to_h[:text_tokens]&.dig(:standard) || {}
150
195
 
151
- ## Models by Provider
196
+ if pricing_data.any?
197
+ parts = []
152
198
 
153
- #{RubyLLM::Provider.providers.keys.map do |provider|
154
- models = RubyLLM.models.by_provider(provider)
155
- next if models.none?
199
+ parts << "In: $#{format('%.2f', pricing_data[:input_per_million])}" if pricing_data[:input_per_million]
156
200
 
157
- <<~PROVIDER
158
- ### #{provider.to_s.capitalize} Models (#{models.count})
201
+ parts << "Out: $#{format('%.2f', pricing_data[:output_per_million])}" if pricing_data[:output_per_million]
159
202
 
160
- #{to_markdown_table(models)}
161
- PROVIDER
162
- end.compact.join("\n")}
163
- MARKDOWN
203
+ if pricing_data[:cached_input_per_million]
204
+ parts << "Cache: $#{format('%.2f', pricing_data[:cached_input_per_million])}"
205
+ end
164
206
 
165
- File.write('docs/guides/available-models.md', output)
166
- puts 'Generated docs/guides/available-models.md'
207
+ return parts.join(', ') if parts.any?
167
208
  end
209
+
210
+ '-'
168
211
  end
data/lib/tasks/vcr.rake CHANGED
@@ -1,5 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require 'dotenv/load'
4
+
3
5
  # Helper functions at the top level
4
6
  def record_all_cassettes(cassette_dir)
5
7
  # Re-record all cassettes
@@ -11,7 +13,7 @@ def record_all_cassettes(cassette_dir)
11
13
  puts 'Done recording. Please review the new cassettes.'
12
14
  end
13
15
 
14
- def record_for_providers(providers, cassette_dir) # rubocop:disable Metrics/AbcSize,Metrics/MethodLength
16
+ def record_for_providers(providers, cassette_dir)
15
17
  # Get the list of available providers from RubyLLM itself
16
18
  all_providers = RubyLLM::Provider.providers.keys.map(&:to_s)
17
19
 
@@ -46,7 +48,7 @@ def record_for_providers(providers, cassette_dir) # rubocop:disable Metrics/AbcS
46
48
  puts 'Please review the updated cassettes for sensitive information.'
47
49
  end
48
50
 
49
- def find_matching_cassettes(dir, providers) # rubocop:disable Metrics/MethodLength
51
+ def find_matching_cassettes(dir, providers)
50
52
  cassettes = []
51
53
 
52
54
  Dir.glob("#{dir}/**/*.yml").each do |file|
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby_llm
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.2.0
4
+ version: 1.3.0rc1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Carmine Paolino
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2025-04-17 00:00:00.000000000 Z
11
+ date: 2025-05-12 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: base64
@@ -108,11 +108,12 @@ dependencies:
108
108
  - - "~>"
109
109
  - !ruby/object:Gem::Version
110
110
  version: '2'
111
- description: A delightful Ruby way to work with AI. Chat in text, analyze and generate
112
- images, understand audio, and use tools through a unified interface to OpenAI, Anthropic,
113
- Google, AWS Bedrock Anthropic, and DeepSeek. Built for developer happiness with
114
- automatic token counting, proper streaming, and Rails integration. No wrapping your
115
- head around multiple APIs - just clean Ruby code that works.
111
+ description: A delightful Ruby way to work with AI. Tired of juggling different SDKs?
112
+ RubyLLM provides one beautiful, Ruby-like interface for OpenAI, Anthropic, Gemini,
113
+ Bedrock, OpenRouter, DeepSeek, Ollama, and any OpenAI-compatible API. Chat (with
114
+ text, images, audio, PDFs), generate images, create embeddings, use tools (function
115
+ calling), stream responses, and integrate with Rails effortlessly. Minimal dependencies,
116
+ maximum developer happiness - just clean Ruby code that works.
116
117
  email:
117
118
  - carmine@paolino.me
118
119
  executables: []
@@ -125,14 +126,21 @@ files:
125
126
  - lib/ruby_llm/active_record/acts_as.rb
126
127
  - lib/ruby_llm/aliases.json
127
128
  - lib/ruby_llm/aliases.rb
129
+ - lib/ruby_llm/attachments.rb
130
+ - lib/ruby_llm/attachments/audio.rb
131
+ - lib/ruby_llm/attachments/image.rb
132
+ - lib/ruby_llm/attachments/pdf.rb
128
133
  - lib/ruby_llm/chat.rb
129
134
  - lib/ruby_llm/chunk.rb
130
135
  - lib/ruby_llm/configuration.rb
136
+ - lib/ruby_llm/connection.rb
131
137
  - lib/ruby_llm/content.rb
138
+ - lib/ruby_llm/context.rb
132
139
  - lib/ruby_llm/embedding.rb
133
140
  - lib/ruby_llm/error.rb
134
141
  - lib/ruby_llm/image.rb
135
142
  - lib/ruby_llm/message.rb
143
+ - lib/ruby_llm/mime_types.rb
136
144
  - lib/ruby_llm/model_info.rb
137
145
  - lib/ruby_llm/models.json
138
146
  - lib/ruby_llm/models.rb
@@ -148,6 +156,7 @@ files:
148
156
  - lib/ruby_llm/providers/bedrock.rb
149
157
  - lib/ruby_llm/providers/bedrock/capabilities.rb
150
158
  - lib/ruby_llm/providers/bedrock/chat.rb
159
+ - lib/ruby_llm/providers/bedrock/media.rb
151
160
  - lib/ruby_llm/providers/bedrock/models.rb
152
161
  - lib/ruby_llm/providers/bedrock/signing.rb
153
162
  - lib/ruby_llm/providers/bedrock/streaming.rb
@@ -168,6 +177,9 @@ files:
168
177
  - lib/ruby_llm/providers/gemini/models.rb
169
178
  - lib/ruby_llm/providers/gemini/streaming.rb
170
179
  - lib/ruby_llm/providers/gemini/tools.rb
180
+ - lib/ruby_llm/providers/ollama.rb
181
+ - lib/ruby_llm/providers/ollama/chat.rb
182
+ - lib/ruby_llm/providers/ollama/media.rb
171
183
  - lib/ruby_llm/providers/openai.rb
172
184
  - lib/ruby_llm/providers/openai/capabilities.rb
173
185
  - lib/ruby_llm/providers/openai/chat.rb
@@ -177,18 +189,15 @@ files:
177
189
  - lib/ruby_llm/providers/openai/models.rb
178
190
  - lib/ruby_llm/providers/openai/streaming.rb
179
191
  - lib/ruby_llm/providers/openai/tools.rb
192
+ - lib/ruby_llm/providers/openrouter.rb
193
+ - lib/ruby_llm/providers/openrouter/models.rb
180
194
  - lib/ruby_llm/railtie.rb
181
195
  - lib/ruby_llm/stream_accumulator.rb
182
196
  - lib/ruby_llm/streaming.rb
183
197
  - lib/ruby_llm/tool.rb
184
198
  - lib/ruby_llm/tool_call.rb
199
+ - lib/ruby_llm/utils.rb
185
200
  - lib/ruby_llm/version.rb
186
- - lib/tasks/browser_helper.rb
187
- - lib/tasks/capability_generator.rb
188
- - lib/tasks/capability_scraper.rb
189
- - lib/tasks/cli_helper.rb
190
- - lib/tasks/code_validator.rb
191
- - lib/tasks/model_updater.rb
192
201
  - lib/tasks/models.rake
193
202
  - lib/tasks/models_docs.rake
194
203
  - lib/tasks/vcr.rake
@@ -220,5 +229,5 @@ requirements: []
220
229
  rubygems_version: 3.5.22
221
230
  signing_key:
222
231
  specification_version: 4
223
- summary: Beautiful Ruby interface to modern AI
232
+ summary: A single delightful Ruby way to work with AI.
224
233
  test_files: []
@@ -1,97 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require 'ferrum'
4
- require_relative 'cli_helper'
5
-
6
- class BrowserHelper # rubocop:disable Style/Documentation
7
- REALISTIC_USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36' # rubocop:disable Layout/LineLength
8
-
9
- def initialize
10
- @browser = create_browser
11
- end
12
-
13
- def goto(url)
14
- @browser.goto(url)
15
- end
16
-
17
- def current_url
18
- @browser.page.url
19
- rescue StandardError
20
- 'N/A'
21
- end
22
-
23
- def get_page_content(context = 'current page') # rubocop:disable Metrics/MethodLength
24
- puts " Extracting HTML for #{context}..."
25
-
26
- begin
27
- sleep(1.0) # Small delay for page stability
28
- html = @browser.body
29
-
30
- if html && !html.empty?
31
- puts " Extracted ~#{html.length} chars of HTML"
32
- puts ' WARNING: Challenge page detected' if html.match?(/challenge-platform|Checking site/)
33
- html
34
- else
35
- puts ' Warning: Empty content returned'
36
- ''
37
- end
38
- rescue StandardError => e
39
- puts " Error getting HTML: #{e.class} - #{e.message}"
40
- ''
41
- end
42
- end
43
-
44
- def wait_for_page_load
45
- handle_cloudflare_challenge
46
- end
47
-
48
- def close
49
- puts "\nClosing browser..."
50
- @browser.quit
51
- rescue StandardError => e
52
- puts " Warning: Error closing browser: #{e.message}"
53
- end
54
-
55
- private
56
-
57
- def create_browser
58
- puts ' Initializing browser for manual interaction...'
59
-
60
- Ferrum::Browser.new(
61
- window_size: [1366, 768],
62
- headless: false,
63
- browser_options: browser_options,
64
- timeout: 120,
65
- process_timeout: 120,
66
- pending_connection_errors: false
67
- )
68
- end
69
-
70
- def browser_options
71
- {
72
- 'user-agent' => REALISTIC_USER_AGENT,
73
- 'disable-gpu' => nil,
74
- 'no-sandbox' => nil,
75
- 'disable-blink-features' => 'AutomationControlled',
76
- 'disable-infobars' => nil,
77
- 'start-maximized' => nil
78
- }
79
- end
80
-
81
- def handle_cloudflare_challenge # rubocop:disable Metrics/MethodLength
82
- puts "\nWaiting for Cloudflare challenge resolution..."
83
- puts 'c: Challenge solved'
84
- puts 'q: Quit/Skip'
85
-
86
- choice = CliHelper.get_user_choice('Confirm when ready', %w[c q])
87
- return false if choice == 'q'
88
-
89
- begin
90
- @browser.page.target_id
91
- true
92
- rescue StandardError
93
- puts 'Browser check failed after challenge'
94
- false
95
- end
96
- end
97
- end