ruby_llm 1.5.1 → 1.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +1 -1
  3. data/lib/ruby_llm/active_record/acts_as.rb +46 -6
  4. data/lib/ruby_llm/aliases.json +27 -3
  5. data/lib/ruby_llm/chat.rb +27 -6
  6. data/lib/ruby_llm/configuration.rb +7 -18
  7. data/lib/ruby_llm/connection.rb +11 -6
  8. data/lib/ruby_llm/context.rb +2 -3
  9. data/lib/ruby_llm/embedding.rb +3 -4
  10. data/lib/ruby_llm/error.rb +2 -2
  11. data/lib/ruby_llm/image.rb +3 -4
  12. data/lib/ruby_llm/message.rb +4 -0
  13. data/lib/ruby_llm/models.json +7306 -6676
  14. data/lib/ruby_llm/models.rb +22 -31
  15. data/lib/ruby_llm/provider.rb +150 -89
  16. data/lib/ruby_llm/providers/anthropic/capabilities.rb +1 -2
  17. data/lib/ruby_llm/providers/anthropic/chat.rb +1 -1
  18. data/lib/ruby_llm/providers/anthropic/embeddings.rb +1 -1
  19. data/lib/ruby_llm/providers/anthropic/media.rb +1 -1
  20. data/lib/ruby_llm/providers/anthropic/models.rb +1 -1
  21. data/lib/ruby_llm/providers/anthropic/streaming.rb +1 -1
  22. data/lib/ruby_llm/providers/anthropic/tools.rb +1 -1
  23. data/lib/ruby_llm/providers/anthropic.rb +17 -22
  24. data/lib/ruby_llm/providers/bedrock/capabilities.rb +3 -63
  25. data/lib/ruby_llm/providers/bedrock/chat.rb +5 -4
  26. data/lib/ruby_llm/providers/bedrock/media.rb +1 -1
  27. data/lib/ruby_llm/providers/bedrock/models.rb +5 -6
  28. data/lib/ruby_llm/providers/bedrock/signing.rb +1 -1
  29. data/lib/ruby_llm/providers/bedrock/streaming/base.rb +5 -4
  30. data/lib/ruby_llm/providers/bedrock/streaming/content_extraction.rb +1 -1
  31. data/lib/ruby_llm/providers/bedrock/streaming/message_processing.rb +1 -1
  32. data/lib/ruby_llm/providers/bedrock/streaming/payload_processing.rb +1 -1
  33. data/lib/ruby_llm/providers/bedrock/streaming/prelude_handling.rb +1 -1
  34. data/lib/ruby_llm/providers/bedrock/streaming.rb +1 -1
  35. data/lib/ruby_llm/providers/bedrock.rb +26 -31
  36. data/lib/ruby_llm/providers/deepseek/capabilities.rb +16 -57
  37. data/lib/ruby_llm/providers/deepseek/chat.rb +1 -1
  38. data/lib/ruby_llm/providers/deepseek.rb +12 -17
  39. data/lib/ruby_llm/providers/gemini/capabilities.rb +1 -1
  40. data/lib/ruby_llm/providers/gemini/chat.rb +1 -1
  41. data/lib/ruby_llm/providers/gemini/embeddings.rb +1 -1
  42. data/lib/ruby_llm/providers/gemini/images.rb +1 -1
  43. data/lib/ruby_llm/providers/gemini/media.rb +1 -1
  44. data/lib/ruby_llm/providers/gemini/models.rb +1 -1
  45. data/lib/ruby_llm/providers/gemini/streaming.rb +1 -1
  46. data/lib/ruby_llm/providers/gemini/tools.rb +1 -7
  47. data/lib/ruby_llm/providers/gemini.rb +18 -23
  48. data/lib/ruby_llm/providers/gpustack/chat.rb +1 -1
  49. data/lib/ruby_llm/providers/gpustack/models.rb +1 -1
  50. data/lib/ruby_llm/providers/gpustack.rb +16 -19
  51. data/lib/ruby_llm/providers/mistral/capabilities.rb +1 -1
  52. data/lib/ruby_llm/providers/mistral/chat.rb +1 -1
  53. data/lib/ruby_llm/providers/mistral/embeddings.rb +1 -1
  54. data/lib/ruby_llm/providers/mistral/models.rb +1 -1
  55. data/lib/ruby_llm/providers/mistral.rb +14 -19
  56. data/lib/ruby_llm/providers/ollama/chat.rb +1 -1
  57. data/lib/ruby_llm/providers/ollama/media.rb +1 -1
  58. data/lib/ruby_llm/providers/ollama.rb +13 -18
  59. data/lib/ruby_llm/providers/openai/capabilities.rb +2 -2
  60. data/lib/ruby_llm/providers/openai/chat.rb +2 -2
  61. data/lib/ruby_llm/providers/openai/embeddings.rb +1 -1
  62. data/lib/ruby_llm/providers/openai/images.rb +1 -1
  63. data/lib/ruby_llm/providers/openai/media.rb +1 -1
  64. data/lib/ruby_llm/providers/openai/models.rb +1 -1
  65. data/lib/ruby_llm/providers/openai/streaming.rb +1 -1
  66. data/lib/ruby_llm/providers/openai/tools.rb +1 -1
  67. data/lib/ruby_llm/providers/openai.rb +24 -36
  68. data/lib/ruby_llm/providers/openrouter/models.rb +1 -1
  69. data/lib/ruby_llm/providers/openrouter.rb +9 -14
  70. data/lib/ruby_llm/providers/perplexity/capabilities.rb +1 -30
  71. data/lib/ruby_llm/providers/perplexity/chat.rb +1 -1
  72. data/lib/ruby_llm/providers/perplexity/models.rb +1 -1
  73. data/lib/ruby_llm/providers/perplexity.rb +13 -18
  74. data/lib/ruby_llm/stream_accumulator.rb +3 -3
  75. data/lib/ruby_llm/streaming.rb +16 -3
  76. data/lib/ruby_llm/tool.rb +19 -0
  77. data/lib/ruby_llm/version.rb +1 -1
  78. data/lib/tasks/models_docs.rake +18 -11
  79. data/lib/tasks/models_update.rake +5 -4
  80. metadata +1 -1
@@ -2,7 +2,7 @@
2
2
 
3
3
  module RubyLLM
4
4
  module Providers
5
- module Perplexity
5
+ class Perplexity
6
6
  # Chat formatting for Perplexity provider
7
7
  module Chat
8
8
  module_function
@@ -2,7 +2,7 @@
2
2
 
3
3
  module RubyLLM
4
4
  module Providers
5
- module Perplexity
5
+ class Perplexity
6
6
  # Models methods of the Perplexity API integration
7
7
  module Models
8
8
  def list_models(**)
@@ -3,34 +3,29 @@
3
3
  module RubyLLM
4
4
  module Providers
5
5
  # Perplexity API integration.
6
- module Perplexity
7
- extend OpenAI
8
- extend Perplexity::Chat
9
- extend Perplexity::Models
6
+ class Perplexity < OpenAI
7
+ include Perplexity::Chat
8
+ include Perplexity::Models
10
9
 
11
- module_function
12
-
13
- def api_base(_config)
10
+ def api_base
14
11
  'https://api.perplexity.ai'
15
12
  end
16
13
 
17
- def headers(config)
14
+ def headers
18
15
  {
19
- 'Authorization' => "Bearer #{config.perplexity_api_key}",
16
+ 'Authorization' => "Bearer #{@config.perplexity_api_key}",
20
17
  'Content-Type' => 'application/json'
21
18
  }
22
19
  end
23
20
 
24
- def capabilities
25
- Perplexity::Capabilities
26
- end
27
-
28
- def slug
29
- 'perplexity'
30
- end
21
+ class << self
22
+ def capabilities
23
+ Perplexity::Capabilities
24
+ end
31
25
 
32
- def configuration_requirements
33
- %i[perplexity_api_key]
26
+ def configuration_requirements
27
+ %i[perplexity_api_key]
28
+ end
34
29
  end
35
30
 
36
31
  def parse_error(response)
@@ -16,7 +16,7 @@ module RubyLLM
16
16
  end
17
17
 
18
18
  def add(chunk)
19
- RubyLLM.logger.debug chunk.inspect
19
+ RubyLLM.logger.debug chunk.inspect if RubyLLM.config.log_stream_debug
20
20
  @model_id ||= chunk.model_id
21
21
 
22
22
  if chunk.tool_call?
@@ -26,7 +26,7 @@ module RubyLLM
26
26
  end
27
27
 
28
28
  count_tokens chunk
29
- RubyLLM.logger.debug inspect
29
+ RubyLLM.logger.debug inspect if RubyLLM.config.log_stream_debug
30
30
  end
31
31
 
32
32
  def to_message(response)
@@ -62,7 +62,7 @@ module RubyLLM
62
62
  end
63
63
 
64
64
  def accumulate_tool_calls(new_tool_calls)
65
- RubyLLM.logger.debug "Accumulating tool calls: #{new_tool_calls}"
65
+ RubyLLM.logger.debug "Accumulating tool calls: #{new_tool_calls}" if RubyLLM.config.log_stream_debug
66
66
  new_tool_calls.each_value do |tool_call|
67
67
  if tool_call.id
68
68
  tool_call_id = tool_call.id.empty? ? SecureRandom.uuid : tool_call.id
@@ -8,10 +8,12 @@ module RubyLLM
8
8
  module Streaming
9
9
  module_function
10
10
 
11
- def stream_response(connection, payload, &block)
11
+ def stream_response(connection, payload, additional_headers = {}, &block)
12
12
  accumulator = StreamAccumulator.new
13
13
 
14
14
  response = connection.post stream_url, payload do |req|
15
+ # Merge additional headers, with existing headers taking precedence
16
+ req.headers = additional_headers.merge(req.headers) unless additional_headers.empty?
15
17
  if req.options.respond_to?(:on_data)
16
18
  # Handle Faraday 2.x streaming with on_data method
17
19
  req.options.on_data = handle_stream do |chunk|
@@ -27,7 +29,9 @@ module RubyLLM
27
29
  end
28
30
  end
29
31
 
30
- accumulator.to_message(response)
32
+ message = accumulator.to_message(response)
33
+ RubyLLM.logger.debug "Stream completed: #{message.inspect}"
34
+ message
31
35
  end
32
36
 
33
37
  def handle_stream(&block)
@@ -56,7 +60,7 @@ module RubyLLM
56
60
  end
57
61
 
58
62
  def process_stream_chunk(chunk, parser, env, &)
59
- RubyLLM.logger.debug "Received chunk: #{chunk}"
63
+ RubyLLM.logger.debug "Received chunk: #{chunk}" if RubyLLM.config.log_stream_debug
60
64
 
61
65
  if error_chunk?(chunk)
62
66
  handle_error_chunk(chunk, env)
@@ -145,5 +149,14 @@ module RubyLLM
145
149
  rescue JSON::ParserError => e
146
150
  RubyLLM.logger.debug "Failed to parse error event: #{e.message}"
147
151
  end
152
+
153
+ # Default implementation - providers should override this method
154
+ def parse_streaming_error(data)
155
+ error_data = JSON.parse(data)
156
+ [500, error_data['message'] || 'Unknown streaming error']
157
+ rescue JSON::ParserError => e
158
+ RubyLLM.logger.debug "Failed to parse streaming error: #{e.message}"
159
+ [500, "Failed to parse error: #{data}"]
160
+ end
148
161
  end
149
162
  end
data/lib/ruby_llm/tool.rb CHANGED
@@ -32,6 +32,19 @@ module RubyLLM
32
32
  # end
33
33
  # end
34
34
  class Tool
35
+ # Stops conversation continuation after tool execution
36
+ class Halt
37
+ attr_reader :content
38
+
39
+ def initialize(content)
40
+ @content = content
41
+ end
42
+
43
+ def to_s
44
+ @content.to_s
45
+ end
46
+ end
47
+
35
48
  class << self
36
49
  def description(text = nil)
37
50
  return @description unless text
@@ -77,5 +90,11 @@ module RubyLLM
77
90
  def execute(...)
78
91
  raise NotImplementedError, 'Subclasses must implement #execute'
79
92
  end
93
+
94
+ protected
95
+
96
+ def halt(message)
97
+ Halt.new(message)
98
+ end
80
99
  end
81
100
  end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module RubyLLM
4
- VERSION = '1.5.1'
4
+ VERSION = '1.6.0'
5
5
  end
@@ -12,8 +12,8 @@ namespace :models do
12
12
  output = generate_models_markdown
13
13
 
14
14
  # Write the output
15
- File.write('docs/available-models.md', output)
16
- puts 'Generated docs/available-models.md'
15
+ File.write('docs/_reference/available-models.md', output)
16
+ puts 'Generated docs/_reference/available-models.md'
17
17
  end
18
18
  end
19
19
 
@@ -22,15 +22,16 @@ def generate_models_markdown
22
22
  ---
23
23
  layout: default
24
24
  title: Available Models
25
- nav_order: 5
26
- permalink: /available-models
25
+ nav_order: 1
27
26
  description: Browse hundreds of AI models from every major provider. Always up-to-date, automatically generated.
27
+ redirect_from:
28
+ - /guides/available-models
28
29
  ---
29
30
 
30
- # Available Models
31
+ # {{ page.title }}
31
32
  {: .no_toc }
32
33
 
33
- Every model, every provider, always current. Your complete AI model reference.
34
+ {{ page.description }}
34
35
  {: .fs-6 .fw-300 }
35
36
 
36
37
  ## Table of contents
@@ -41,6 +42,13 @@ def generate_models_markdown
41
42
 
42
43
  ---
43
44
 
45
+ After reading this guide, you will know:
46
+
47
+ * How RubyLLM's model registry works and where data comes from
48
+ * How to find models by provider, capability, or purpose
49
+ * What information is available for each model
50
+ * How to use model aliases for simpler configuration
51
+
44
52
  ## How Model Data Works
45
53
 
46
54
  RubyLLM's model registry combines data from multiple sources:
@@ -78,12 +86,12 @@ def generate_models_markdown
78
86
  end
79
87
 
80
88
  def generate_provider_sections
81
- RubyLLM::Provider.providers.keys.map do |provider|
89
+ RubyLLM::Provider.providers.map do |provider, provider_class|
82
90
  models = RubyLLM.models.by_provider(provider)
83
91
  next if models.none?
84
92
 
85
93
  <<~PROVIDER
86
- ### #{provider.to_s.capitalize} (#{models.count})
94
+ ### #{provider_class.name} (#{models.count})
87
95
 
88
96
  #{models_table(models)}
89
97
  PROVIDER
@@ -167,15 +175,14 @@ end
167
175
  def models_table(models)
168
176
  return '*No models found*' if models.none?
169
177
 
170
- headers = ['Model', 'ID', 'Provider', 'Context', 'Max Output', 'Standard Pricing (per 1M tokens)']
171
- alignment = [':--', ':--', ':--', '--:', '--:', ':--']
178
+ headers = ['Model', 'Provider', 'Context', 'Max Output', 'Standard Pricing (per 1M tokens)']
179
+ alignment = [':--', ':--', '--:', '--:', ':--']
172
180
 
173
181
  rows = models.sort_by { |m| [m.provider, m.name] }.map do |model|
174
182
  # Format pricing information
175
183
  pricing = standard_pricing_display(model)
176
184
 
177
185
  [
178
- model.name,
179
186
  model.id,
180
187
  model.provider,
181
188
  model.context_window || '-',
@@ -86,8 +86,8 @@ def display_model_stats
86
86
  puts "\nModel count:"
87
87
  provider_counts = @models.all.group_by(&:provider).transform_values(&:count)
88
88
 
89
- RubyLLM::Provider.providers.each_key do |sym|
90
- name = sym.to_s.capitalize
89
+ RubyLLM::Provider.providers.each do |sym, provider_class|
90
+ name = provider_class.name
91
91
  count = provider_counts[sym.to_s] || 0
92
92
  status = status(sym)
93
93
  puts " #{name}: #{count} models #{status}"
@@ -97,9 +97,10 @@ def display_model_stats
97
97
  end
98
98
 
99
99
  def status(provider_sym)
100
- if RubyLLM::Provider.providers[provider_sym].local?
100
+ provider_class = RubyLLM::Provider.providers[provider_sym]
101
+ if provider_class.local?
101
102
  ' (LOCAL - SKIP)'
102
- elsif RubyLLM::Provider.providers[provider_sym].configured?
103
+ elsif provider_class.configured?(RubyLLM.config)
103
104
  ' (OK)'
104
105
  else
105
106
  ' (NOT CONFIGURED)'
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby_llm
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.5.1
4
+ version: 1.6.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Carmine Paolino