ruby_llm 1.6.0 → 1.6.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +8 -34
- data/lib/ruby_llm/active_record/acts_as.rb +19 -5
- data/lib/ruby_llm/aliases.json +0 -4
- data/lib/ruby_llm/chat.rb +4 -11
- data/lib/ruby_llm/configuration.rb +0 -2
- data/lib/ruby_llm/content.rb +1 -1
- data/lib/ruby_llm/error.rb +0 -2
- data/lib/ruby_llm/models.json +369 -708
- data/lib/ruby_llm/models.rb +1 -5
- data/lib/ruby_llm/providers/bedrock/streaming/base.rb +1 -1
- data/lib/ruby_llm/providers/gemini/chat.rb +7 -1
- data/lib/ruby_llm/providers/gemini/streaming.rb +4 -1
- data/lib/ruby_llm/providers/openai/capabilities.rb +3 -3
- data/lib/ruby_llm/providers/openai/chat.rb +1 -4
- data/lib/ruby_llm/stream_accumulator.rb +2 -2
- data/lib/ruby_llm/streaming.rb +1 -1
- data/lib/ruby_llm/version.rb +1 -1
- data/lib/tasks/aliases.rake +2 -2
- data/lib/tasks/models_docs.rake +4 -4
- metadata +9 -8
data/lib/ruby_llm/models.rb
CHANGED
@@ -64,16 +64,12 @@ module RubyLLM
|
|
64
64
|
|
65
65
|
model = Model::Info.new(
|
66
66
|
id: model_id,
|
67
|
-
name: model_id.
|
67
|
+
name: model_id.tr('-', ' ').capitalize,
|
68
68
|
provider: provider_instance.slug,
|
69
69
|
capabilities: %w[function_calling streaming],
|
70
70
|
modalities: { input: %w[text image], output: %w[text] },
|
71
71
|
metadata: { warning: 'Assuming model exists, capabilities may not be accurate' }
|
72
72
|
)
|
73
|
-
if RubyLLM.config.log_assume_model_exists
|
74
|
-
RubyLLM.logger.warn "Assuming model '#{model_id}' exists for provider '#{provider}'. " \
|
75
|
-
'Capabilities may not be accurately reflected.'
|
76
|
-
end
|
77
73
|
else
|
78
74
|
model = Models.find model_id, provider
|
79
75
|
provider_class = Provider.providers[model.provider.to_sym] || raise(Error,
|
@@ -80,7 +80,7 @@ module RubyLLM
|
|
80
80
|
content: extract_content(data),
|
81
81
|
tool_calls: tool_calls,
|
82
82
|
input_tokens: data.dig('usageMetadata', 'promptTokenCount'),
|
83
|
-
output_tokens: data
|
83
|
+
output_tokens: calculate_output_tokens(data),
|
84
84
|
model_id: data['modelVersion'] || response.env.url.path.split('/')[3].split(':')[0],
|
85
85
|
raw: response
|
86
86
|
)
|
@@ -133,6 +133,12 @@ module RubyLLM
|
|
133
133
|
parts = candidate.dig('content', 'parts')
|
134
134
|
parts&.any? { |p| p['functionCall'] }
|
135
135
|
end
|
136
|
+
|
137
|
+
def calculate_output_tokens(data)
|
138
|
+
candidates = data.dig('usageMetadata', 'candidatesTokenCount') || 0
|
139
|
+
thoughts = data.dig('usageMetadata', 'thoughtsTokenCount') || 0
|
140
|
+
candidates + thoughts
|
141
|
+
end
|
136
142
|
end
|
137
143
|
end
|
138
144
|
end
|
@@ -42,7 +42,10 @@ module RubyLLM
|
|
42
42
|
end
|
43
43
|
|
44
44
|
def extract_output_tokens(data)
|
45
|
-
data.dig('usageMetadata', 'candidatesTokenCount')
|
45
|
+
candidates = data.dig('usageMetadata', 'candidatesTokenCount') || 0
|
46
|
+
thoughts = data.dig('usageMetadata', 'thoughtsTokenCount') || 0
|
47
|
+
total = candidates + thoughts
|
48
|
+
total.positive? ? total : nil
|
46
49
|
end
|
47
50
|
|
48
51
|
def parse_streaming_error(data)
|
@@ -198,11 +198,11 @@ module RubyLLM
|
|
198
198
|
.gsub(/(\d{4}) (\d{2}) (\d{2})/, '\1\2\3')
|
199
199
|
.gsub(/^(?:Gpt|Chatgpt|Tts|Dall E) /) { |m| special_prefix_format(m.strip) }
|
200
200
|
.gsub(/^O([13]) /, 'O\1-')
|
201
|
-
.gsub(/^O[13] Mini/, '\0'.
|
201
|
+
.gsub(/^O[13] Mini/, '\0'.tr(' ', '-'))
|
202
202
|
.gsub(/\d\.\d /, '\0'.sub(' ', '-'))
|
203
203
|
.gsub(/4o (?=Mini|Preview|Turbo|Audio|Realtime|Transcribe|Tts)/, '4o-')
|
204
204
|
.gsub(/\bHd\b/, 'HD')
|
205
|
-
.gsub(/(?:Omni|Text) Moderation/, '\0'.
|
205
|
+
.gsub(/(?:Omni|Text) Moderation/, '\0'.tr(' ', '-'))
|
206
206
|
.gsub('Text Embedding', 'text-embedding-')
|
207
207
|
end
|
208
208
|
|
@@ -216,7 +216,7 @@ module RubyLLM
|
|
216
216
|
end
|
217
217
|
|
218
218
|
def self.normalize_temperature(temperature, model_id)
|
219
|
-
if model_id.match?(/^o\d/)
|
219
|
+
if model_id.match?(/^(o\d|gpt-5)/)
|
220
220
|
RubyLLM.logger.debug "Model #{model_id} requires temperature=1.0, ignoring provided value"
|
221
221
|
1.0
|
222
222
|
elsif model_id.match?(/-search/)
|
@@ -21,10 +21,7 @@ module RubyLLM
|
|
21
21
|
# Only include temperature if it's not nil (some models don't accept it)
|
22
22
|
payload[:temperature] = temperature unless temperature.nil?
|
23
23
|
|
24
|
-
if tools.any?
|
25
|
-
payload[:tools] = tools.map { |_, tool| tool_for(tool) }
|
26
|
-
payload[:tool_choice] = 'auto'
|
27
|
-
end
|
24
|
+
payload[:tools] = tools.map { |_, tool| tool_for(tool) } if tools.any?
|
28
25
|
|
29
26
|
if schema
|
30
27
|
# Use strict mode from schema if specified, default to true
|
@@ -8,7 +8,7 @@ module RubyLLM
|
|
8
8
|
attr_reader :content, :model_id, :tool_calls
|
9
9
|
|
10
10
|
def initialize
|
11
|
-
@content =
|
11
|
+
@content = +''
|
12
12
|
@tool_calls = {}
|
13
13
|
@input_tokens = 0
|
14
14
|
@output_tokens = 0
|
@@ -66,7 +66,7 @@ module RubyLLM
|
|
66
66
|
new_tool_calls.each_value do |tool_call|
|
67
67
|
if tool_call.id
|
68
68
|
tool_call_id = tool_call.id.empty? ? SecureRandom.uuid : tool_call.id
|
69
|
-
tool_call_arguments = tool_call.arguments.empty? ?
|
69
|
+
tool_call_arguments = tool_call.arguments.empty? ? +'' : tool_call.arguments
|
70
70
|
@tool_calls[tool_call.id] = ToolCall.new(
|
71
71
|
id: tool_call_id,
|
72
72
|
name: tool_call.name,
|
data/lib/ruby_llm/streaming.rb
CHANGED
data/lib/ruby_llm/version.rb
CHANGED
data/lib/tasks/aliases.rake
CHANGED
@@ -65,7 +65,7 @@ namespace :aliases do # rubocop:disable Metrics/BlockLength
|
|
65
65
|
|
66
66
|
base_name = Regexp.last_match(1)
|
67
67
|
# Normalize to Anthropic naming convention
|
68
|
-
anthropic_name = base_name.
|
68
|
+
anthropic_name = base_name.tr('.', '-')
|
69
69
|
|
70
70
|
# Skip if we already have an alias for this
|
71
71
|
next if aliases[anthropic_name]
|
@@ -91,7 +91,7 @@ namespace :aliases do # rubocop:disable Metrics/BlockLength
|
|
91
91
|
# OpenRouter uses "google/" prefix and sometimes different naming
|
92
92
|
openrouter_variants = [
|
93
93
|
"google/#{model}",
|
94
|
-
"google/#{model.gsub('gemini-', 'gemini-').
|
94
|
+
"google/#{model.gsub('gemini-', 'gemini-').tr('.', '-')}",
|
95
95
|
"google/#{model.gsub('gemini-', 'gemini-')}"
|
96
96
|
]
|
97
97
|
|
data/lib/tasks/models_docs.rake
CHANGED
@@ -86,7 +86,7 @@ def generate_models_markdown
|
|
86
86
|
end
|
87
87
|
|
88
88
|
def generate_provider_sections
|
89
|
-
RubyLLM::Provider.providers.
|
89
|
+
RubyLLM::Provider.providers.filter_map do |provider, provider_class|
|
90
90
|
models = RubyLLM.models.by_provider(provider)
|
91
91
|
next if models.none?
|
92
92
|
|
@@ -95,7 +95,7 @@ def generate_provider_sections
|
|
95
95
|
|
96
96
|
#{models_table(models)}
|
97
97
|
PROVIDER
|
98
|
-
end.
|
98
|
+
end.join("\n\n")
|
99
99
|
end
|
100
100
|
|
101
101
|
def generate_capability_sections
|
@@ -107,7 +107,7 @@ def generate_capability_sections
|
|
107
107
|
'Batch Processing' => RubyLLM.models.select { |m| m.capabilities.include?('batch') }
|
108
108
|
}
|
109
109
|
|
110
|
-
capabilities.
|
110
|
+
capabilities.filter_map do |capability, models|
|
111
111
|
next if models.none?
|
112
112
|
|
113
113
|
<<~CAPABILITY
|
@@ -115,7 +115,7 @@ def generate_capability_sections
|
|
115
115
|
|
116
116
|
#{models_table(models)}
|
117
117
|
CAPABILITY
|
118
|
-
end.
|
118
|
+
end.join("\n\n")
|
119
119
|
end
|
120
120
|
|
121
121
|
def generate_modality_sections # rubocop:disable Metrics/PerceivedComplexity
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: ruby_llm
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.6.
|
4
|
+
version: 1.6.2
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Carmine Paolino
|
@@ -121,12 +121,13 @@ dependencies:
|
|
121
121
|
- - "~>"
|
122
122
|
- !ruby/object:Gem::Version
|
123
123
|
version: '2'
|
124
|
-
description:
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
124
|
+
description: One beautiful Ruby API for GPT, Claude, Gemini, and more. Easily build
|
125
|
+
chatbots, AI agents, RAG applications, and content generators. Features chat (text,
|
126
|
+
images, audio, PDFs), image generation, embeddings, tools (function calling), structured
|
127
|
+
output, Rails integration, and streaming. Works with OpenAI, Anthropic, Google Gemini,
|
128
|
+
AWS Bedrock, DeepSeek, Mistral, Ollama (local models), OpenRouter, Perplexity, GPUStack,
|
129
|
+
and any OpenAI-compatible API. Minimal dependencies - just Faraday, Zeitwerk, and
|
130
|
+
Marcel.
|
130
131
|
email:
|
131
132
|
- carmine@paolino.me
|
132
133
|
executables: []
|
@@ -266,5 +267,5 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
266
267
|
requirements: []
|
267
268
|
rubygems_version: 3.6.9
|
268
269
|
specification_version: 4
|
269
|
-
summary:
|
270
|
+
summary: One beautiful Ruby API for GPT, Claude, Gemini, and more.
|
270
271
|
test_files: []
|