ruby_llm 1.6.0 → 1.6.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -64,16 +64,12 @@ module RubyLLM
64
64
 
65
65
  model = Model::Info.new(
66
66
  id: model_id,
67
- name: model_id.gsub('-', ' ').capitalize,
67
+ name: model_id.tr('-', ' ').capitalize,
68
68
  provider: provider_instance.slug,
69
69
  capabilities: %w[function_calling streaming],
70
70
  modalities: { input: %w[text image], output: %w[text] },
71
71
  metadata: { warning: 'Assuming model exists, capabilities may not be accurate' }
72
72
  )
73
- if RubyLLM.config.log_assume_model_exists
74
- RubyLLM.logger.warn "Assuming model '#{model_id}' exists for provider '#{provider}'. " \
75
- 'Capabilities may not be accurately reflected.'
76
- end
77
73
  else
78
74
  model = Models.find model_id, provider
79
75
  provider_class = Provider.providers[model.provider.to_sym] || raise(Error,
@@ -47,7 +47,7 @@ module RubyLLM
47
47
  end
48
48
 
49
49
  def handle_stream(&block)
50
- buffer = String.new
50
+ buffer = +''
51
51
  proc do |chunk, _bytes, env|
52
52
  if env && env.status != 200
53
53
  handle_failed_response(chunk, buffer, env)
@@ -80,7 +80,7 @@ module RubyLLM
80
80
  content: extract_content(data),
81
81
  tool_calls: tool_calls,
82
82
  input_tokens: data.dig('usageMetadata', 'promptTokenCount'),
83
- output_tokens: data.dig('usageMetadata', 'candidatesTokenCount'),
83
+ output_tokens: calculate_output_tokens(data),
84
84
  model_id: data['modelVersion'] || response.env.url.path.split('/')[3].split(':')[0],
85
85
  raw: response
86
86
  )
@@ -133,6 +133,12 @@ module RubyLLM
133
133
  parts = candidate.dig('content', 'parts')
134
134
  parts&.any? { |p| p['functionCall'] }
135
135
  end
136
+
137
+ def calculate_output_tokens(data)
138
+ candidates = data.dig('usageMetadata', 'candidatesTokenCount') || 0
139
+ thoughts = data.dig('usageMetadata', 'thoughtsTokenCount') || 0
140
+ candidates + thoughts
141
+ end
136
142
  end
137
143
  end
138
144
  end
@@ -42,7 +42,10 @@ module RubyLLM
42
42
  end
43
43
 
44
44
  def extract_output_tokens(data)
45
- data.dig('usageMetadata', 'candidatesTokenCount')
45
+ candidates = data.dig('usageMetadata', 'candidatesTokenCount') || 0
46
+ thoughts = data.dig('usageMetadata', 'thoughtsTokenCount') || 0
47
+ total = candidates + thoughts
48
+ total.positive? ? total : nil
46
49
  end
47
50
 
48
51
  def parse_streaming_error(data)
@@ -198,11 +198,11 @@ module RubyLLM
198
198
  .gsub(/(\d{4}) (\d{2}) (\d{2})/, '\1\2\3')
199
199
  .gsub(/^(?:Gpt|Chatgpt|Tts|Dall E) /) { |m| special_prefix_format(m.strip) }
200
200
  .gsub(/^O([13]) /, 'O\1-')
201
- .gsub(/^O[13] Mini/, '\0'.gsub(' ', '-'))
201
+ .gsub(/^O[13] Mini/, '\0'.tr(' ', '-'))
202
202
  .gsub(/\d\.\d /, '\0'.sub(' ', '-'))
203
203
  .gsub(/4o (?=Mini|Preview|Turbo|Audio|Realtime|Transcribe|Tts)/, '4o-')
204
204
  .gsub(/\bHd\b/, 'HD')
205
- .gsub(/(?:Omni|Text) Moderation/, '\0'.gsub(' ', '-'))
205
+ .gsub(/(?:Omni|Text) Moderation/, '\0'.tr(' ', '-'))
206
206
  .gsub('Text Embedding', 'text-embedding-')
207
207
  end
208
208
 
@@ -216,7 +216,7 @@ module RubyLLM
216
216
  end
217
217
 
218
218
  def self.normalize_temperature(temperature, model_id)
219
- if model_id.match?(/^o\d/)
219
+ if model_id.match?(/^(o\d|gpt-5)/)
220
220
  RubyLLM.logger.debug "Model #{model_id} requires temperature=1.0, ignoring provided value"
221
221
  1.0
222
222
  elsif model_id.match?(/-search/)
@@ -21,10 +21,7 @@ module RubyLLM
21
21
  # Only include temperature if it's not nil (some models don't accept it)
22
22
  payload[:temperature] = temperature unless temperature.nil?
23
23
 
24
- if tools.any?
25
- payload[:tools] = tools.map { |_, tool| tool_for(tool) }
26
- payload[:tool_choice] = 'auto'
27
- end
24
+ payload[:tools] = tools.map { |_, tool| tool_for(tool) } if tools.any?
28
25
 
29
26
  if schema
30
27
  # Use strict mode from schema if specified, default to true
@@ -8,7 +8,7 @@ module RubyLLM
8
8
  attr_reader :content, :model_id, :tool_calls
9
9
 
10
10
  def initialize
11
- @content = String.new
11
+ @content = +''
12
12
  @tool_calls = {}
13
13
  @input_tokens = 0
14
14
  @output_tokens = 0
@@ -66,7 +66,7 @@ module RubyLLM
66
66
  new_tool_calls.each_value do |tool_call|
67
67
  if tool_call.id
68
68
  tool_call_id = tool_call.id.empty? ? SecureRandom.uuid : tool_call.id
69
- tool_call_arguments = tool_call.arguments.empty? ? String.new : tool_call.arguments
69
+ tool_call_arguments = tool_call.arguments.empty? ? +'' : tool_call.arguments
70
70
  @tool_calls[tool_call.id] = ToolCall.new(
71
71
  id: tool_call_id,
72
72
  name: tool_call.name,
@@ -43,7 +43,7 @@ module RubyLLM
43
43
  private
44
44
 
45
45
  def to_json_stream(&)
46
- buffer = String.new
46
+ buffer = +''
47
47
  parser = EventStreamParser::Parser.new
48
48
 
49
49
  create_stream_processor(parser, buffer, &)
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module RubyLLM
4
- VERSION = '1.6.0'
4
+ VERSION = '1.6.2'
5
5
  end
@@ -65,7 +65,7 @@ namespace :aliases do # rubocop:disable Metrics/BlockLength
65
65
 
66
66
  base_name = Regexp.last_match(1)
67
67
  # Normalize to Anthropic naming convention
68
- anthropic_name = base_name.gsub('.', '-')
68
+ anthropic_name = base_name.tr('.', '-')
69
69
 
70
70
  # Skip if we already have an alias for this
71
71
  next if aliases[anthropic_name]
@@ -91,7 +91,7 @@ namespace :aliases do # rubocop:disable Metrics/BlockLength
91
91
  # OpenRouter uses "google/" prefix and sometimes different naming
92
92
  openrouter_variants = [
93
93
  "google/#{model}",
94
- "google/#{model.gsub('gemini-', 'gemini-').gsub('.', '-')}",
94
+ "google/#{model.gsub('gemini-', 'gemini-').tr('.', '-')}",
95
95
  "google/#{model.gsub('gemini-', 'gemini-')}"
96
96
  ]
97
97
 
@@ -86,7 +86,7 @@ def generate_models_markdown
86
86
  end
87
87
 
88
88
  def generate_provider_sections
89
- RubyLLM::Provider.providers.map do |provider, provider_class|
89
+ RubyLLM::Provider.providers.filter_map do |provider, provider_class|
90
90
  models = RubyLLM.models.by_provider(provider)
91
91
  next if models.none?
92
92
 
@@ -95,7 +95,7 @@ def generate_provider_sections
95
95
 
96
96
  #{models_table(models)}
97
97
  PROVIDER
98
- end.compact.join("\n\n")
98
+ end.join("\n\n")
99
99
  end
100
100
 
101
101
  def generate_capability_sections
@@ -107,7 +107,7 @@ def generate_capability_sections
107
107
  'Batch Processing' => RubyLLM.models.select { |m| m.capabilities.include?('batch') }
108
108
  }
109
109
 
110
- capabilities.map do |capability, models|
110
+ capabilities.filter_map do |capability, models|
111
111
  next if models.none?
112
112
 
113
113
  <<~CAPABILITY
@@ -115,7 +115,7 @@ def generate_capability_sections
115
115
 
116
116
  #{models_table(models)}
117
117
  CAPABILITY
118
- end.compact.join("\n\n")
118
+ end.join("\n\n")
119
119
  end
120
120
 
121
121
  def generate_modality_sections # rubocop:disable Metrics/PerceivedComplexity
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby_llm
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.6.0
4
+ version: 1.6.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Carmine Paolino
@@ -121,12 +121,13 @@ dependencies:
121
121
  - - "~>"
122
122
  - !ruby/object:Gem::Version
123
123
  version: '2'
124
- description: A delightful Ruby way to work with AI. Tired of juggling different SDKs?
125
- RubyLLM provides one beautiful, Ruby-like interface for OpenAI, Anthropic, Gemini,
126
- Bedrock, OpenRouter, DeepSeek, Ollama, and any OpenAI-compatible API. Chat (with
127
- text, images, audio, PDFs), generate images, create embeddings, use tools (function
128
- calling), stream responses, and integrate with Rails effortlessly. Minimal dependencies,
129
- maximum developer happiness - just clean Ruby code that works.
124
+ description: One beautiful Ruby API for GPT, Claude, Gemini, and more. Easily build
125
+ chatbots, AI agents, RAG applications, and content generators. Features chat (text,
126
+ images, audio, PDFs), image generation, embeddings, tools (function calling), structured
127
+ output, Rails integration, and streaming. Works with OpenAI, Anthropic, Google Gemini,
128
+ AWS Bedrock, DeepSeek, Mistral, Ollama (local models), OpenRouter, Perplexity, GPUStack,
129
+ and any OpenAI-compatible API. Minimal dependencies - just Faraday, Zeitwerk, and
130
+ Marcel.
130
131
  email:
131
132
  - carmine@paolino.me
132
133
  executables: []
@@ -266,5 +267,5 @@ required_rubygems_version: !ruby/object:Gem::Requirement
266
267
  requirements: []
267
268
  rubygems_version: 3.6.9
268
269
  specification_version: 4
269
- summary: A single delightful Ruby way to work with AI.
270
+ summary: One beautiful Ruby API for GPT, Claude, Gemini, and more.
270
271
  test_files: []