ruby_llm 1.9.1 → 1.10.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +3 -2
- data/lib/generators/ruby_llm/install/templates/create_messages_migration.rb.tt +3 -0
- data/lib/generators/ruby_llm/install/templates/create_tool_calls_migration.rb.tt +1 -0
- data/lib/generators/ruby_llm/upgrade_to_v1_10/templates/add_v1_10_message_columns.rb.tt +19 -0
- data/lib/generators/ruby_llm/upgrade_to_v1_10/upgrade_to_v1_10_generator.rb +50 -0
- data/lib/generators/ruby_llm/upgrade_to_v1_7/templates/migration.rb.tt +1 -1
- data/lib/ruby_llm/active_record/acts_as_legacy.rb +5 -1
- data/lib/ruby_llm/active_record/chat_methods.rb +12 -0
- data/lib/ruby_llm/active_record/message_methods.rb +41 -8
- data/lib/ruby_llm/aliases.json +101 -21
- data/lib/ruby_llm/chat.rb +10 -7
- data/lib/ruby_llm/configuration.rb +1 -1
- data/lib/ruby_llm/message.rb +37 -11
- data/lib/ruby_llm/models.json +21119 -10230
- data/lib/ruby_llm/models.rb +271 -27
- data/lib/ruby_llm/models_schema.json +2 -2
- data/lib/ruby_llm/provider.rb +4 -3
- data/lib/ruby_llm/providers/anthropic/chat.rb +128 -13
- data/lib/ruby_llm/providers/anthropic/streaming.rb +25 -1
- data/lib/ruby_llm/providers/bedrock/chat.rb +58 -15
- data/lib/ruby_llm/providers/bedrock/models.rb +21 -15
- data/lib/ruby_llm/providers/bedrock/streaming/content_extraction.rb +59 -2
- data/lib/ruby_llm/providers/bedrock/streaming/payload_processing.rb +5 -0
- data/lib/ruby_llm/providers/gemini/chat.rb +69 -3
- data/lib/ruby_llm/providers/gemini/streaming.rb +32 -1
- data/lib/ruby_llm/providers/gemini/tools.rb +16 -3
- data/lib/ruby_llm/providers/gpustack/chat.rb +1 -1
- data/lib/ruby_llm/providers/mistral/chat.rb +58 -1
- data/lib/ruby_llm/providers/ollama/chat.rb +1 -1
- data/lib/ruby_llm/providers/openai/capabilities.rb +6 -2
- data/lib/ruby_llm/providers/openai/chat.rb +87 -3
- data/lib/ruby_llm/providers/openai/streaming.rb +11 -3
- data/lib/ruby_llm/providers/openai/temperature.rb +28 -0
- data/lib/ruby_llm/providers/openai.rb +1 -1
- data/lib/ruby_llm/providers/openrouter/chat.rb +154 -0
- data/lib/ruby_llm/providers/openrouter/streaming.rb +74 -0
- data/lib/ruby_llm/providers/openrouter.rb +2 -0
- data/lib/ruby_llm/providers/vertexai.rb +5 -1
- data/lib/ruby_llm/stream_accumulator.rb +111 -14
- data/lib/ruby_llm/streaming.rb +76 -54
- data/lib/ruby_llm/thinking.rb +49 -0
- data/lib/ruby_llm/tokens.rb +47 -0
- data/lib/ruby_llm/tool.rb +1 -1
- data/lib/ruby_llm/tool_call.rb +6 -3
- data/lib/ruby_llm/version.rb +1 -1
- data/lib/tasks/models.rake +20 -13
- metadata +12 -5
data/lib/ruby_llm/streaming.rb
CHANGED
|
@@ -29,7 +29,7 @@ module RubyLLM
|
|
|
29
29
|
end
|
|
30
30
|
|
|
31
31
|
def handle_stream(&block)
|
|
32
|
-
|
|
32
|
+
build_on_data_handler do |data|
|
|
33
33
|
block.call(build_chunk(data)) if data
|
|
34
34
|
end
|
|
35
35
|
end
|
|
@@ -40,19 +40,15 @@ module RubyLLM
|
|
|
40
40
|
Faraday::VERSION.start_with?('1')
|
|
41
41
|
end
|
|
42
42
|
|
|
43
|
-
def
|
|
43
|
+
def build_on_data_handler(&handler)
|
|
44
44
|
buffer = +''
|
|
45
45
|
parser = EventStreamParser::Parser.new
|
|
46
46
|
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
legacy_stream_processor(parser, &)
|
|
53
|
-
else
|
|
54
|
-
stream_processor(parser, buffer, &)
|
|
55
|
-
end
|
|
47
|
+
FaradayHandlers.build(
|
|
48
|
+
faraday_v1: faraday_1?,
|
|
49
|
+
on_chunk: ->(chunk, env) { process_stream_chunk(chunk, parser, env, &handler) },
|
|
50
|
+
on_failed_response: ->(chunk, env) { handle_failed_response(chunk, buffer, env) }
|
|
51
|
+
)
|
|
56
52
|
end
|
|
57
53
|
|
|
58
54
|
def process_stream_chunk(chunk, parser, env, &)
|
|
@@ -60,52 +56,34 @@ module RubyLLM
|
|
|
60
56
|
|
|
61
57
|
if error_chunk?(chunk)
|
|
62
58
|
handle_error_chunk(chunk, env)
|
|
59
|
+
elsif json_error_payload?(chunk)
|
|
60
|
+
handle_json_error_chunk(chunk, env)
|
|
63
61
|
else
|
|
64
62
|
yield handle_sse(chunk, parser, env, &)
|
|
65
63
|
end
|
|
66
64
|
end
|
|
67
65
|
|
|
68
|
-
def
|
|
69
|
-
|
|
70
|
-
process_stream_chunk(chunk, parser, nil, &block)
|
|
71
|
-
end
|
|
66
|
+
def error_chunk?(chunk)
|
|
67
|
+
chunk.start_with?('event: error')
|
|
72
68
|
end
|
|
73
69
|
|
|
74
|
-
def
|
|
75
|
-
|
|
76
|
-
if env&.status == 200
|
|
77
|
-
process_stream_chunk(chunk, parser, env, &block)
|
|
78
|
-
else
|
|
79
|
-
handle_failed_response(chunk, buffer, env)
|
|
80
|
-
end
|
|
81
|
-
end
|
|
70
|
+
def json_error_payload?(chunk)
|
|
71
|
+
chunk.lstrip.start_with?('{') && chunk.include?('"error"')
|
|
82
72
|
end
|
|
83
73
|
|
|
84
|
-
def
|
|
85
|
-
chunk
|
|
74
|
+
def handle_json_error_chunk(chunk, env)
|
|
75
|
+
parse_error_from_json(chunk, env, 'Failed to parse JSON error chunk')
|
|
86
76
|
end
|
|
87
77
|
|
|
88
78
|
def handle_error_chunk(chunk, env)
|
|
89
79
|
error_data = chunk.split("\n")[1].delete_prefix('data: ')
|
|
90
|
-
|
|
91
|
-
parsed_data = JSON.parse(error_data)
|
|
92
|
-
|
|
93
|
-
error_response = if faraday_1?
|
|
94
|
-
Struct.new(:body, :status).new(parsed_data, status)
|
|
95
|
-
else
|
|
96
|
-
env.merge(body: parsed_data, status: status)
|
|
97
|
-
end
|
|
98
|
-
|
|
99
|
-
ErrorMiddleware.parse_error(provider: self, response: error_response)
|
|
100
|
-
rescue JSON::ParserError => e
|
|
101
|
-
RubyLLM.logger.debug "Failed to parse error chunk: #{e.message}"
|
|
80
|
+
parse_error_from_json(error_data, env, 'Failed to parse error chunk')
|
|
102
81
|
end
|
|
103
82
|
|
|
104
83
|
def handle_failed_response(chunk, buffer, env)
|
|
105
84
|
buffer << chunk
|
|
106
85
|
error_data = JSON.parse(buffer)
|
|
107
|
-
|
|
108
|
-
ErrorMiddleware.parse_error(provider: self, response: error_response)
|
|
86
|
+
handle_parsed_error(error_data, env)
|
|
109
87
|
rescue JSON::ParserError
|
|
110
88
|
RubyLLM.logger.debug "Accumulating error chunk: #{chunk}"
|
|
111
89
|
end
|
|
@@ -116,30 +94,22 @@ module RubyLLM
|
|
|
116
94
|
when :error
|
|
117
95
|
handle_error_event(data, env)
|
|
118
96
|
else
|
|
119
|
-
yield handle_data(data, &block) unless data == '[DONE]'
|
|
97
|
+
yield handle_data(data, env, &block) unless data == '[DONE]'
|
|
120
98
|
end
|
|
121
99
|
end
|
|
122
100
|
end
|
|
123
101
|
|
|
124
|
-
def handle_data(data)
|
|
125
|
-
JSON.parse(data)
|
|
102
|
+
def handle_data(data, env)
|
|
103
|
+
parsed = JSON.parse(data)
|
|
104
|
+
return parsed unless parsed.is_a?(Hash) && parsed.key?('error')
|
|
105
|
+
|
|
106
|
+
handle_parsed_error(parsed, env)
|
|
126
107
|
rescue JSON::ParserError => e
|
|
127
108
|
RubyLLM.logger.debug "Failed to parse data chunk: #{e.message}"
|
|
128
109
|
end
|
|
129
110
|
|
|
130
111
|
def handle_error_event(data, env)
|
|
131
|
-
|
|
132
|
-
parsed_data = JSON.parse(data)
|
|
133
|
-
|
|
134
|
-
error_response = if faraday_1?
|
|
135
|
-
Struct.new(:body, :status).new(parsed_data, status)
|
|
136
|
-
else
|
|
137
|
-
env.merge(body: parsed_data, status: status)
|
|
138
|
-
end
|
|
139
|
-
|
|
140
|
-
ErrorMiddleware.parse_error(provider: self, response: error_response)
|
|
141
|
-
rescue JSON::ParserError => e
|
|
142
|
-
RubyLLM.logger.debug "Failed to parse error event: #{e.message}"
|
|
112
|
+
parse_error_from_json(data, env, 'Failed to parse error event')
|
|
143
113
|
end
|
|
144
114
|
|
|
145
115
|
def parse_streaming_error(data)
|
|
@@ -149,5 +119,57 @@ module RubyLLM
|
|
|
149
119
|
RubyLLM.logger.debug "Failed to parse streaming error: #{e.message}"
|
|
150
120
|
[500, "Failed to parse error: #{data}"]
|
|
151
121
|
end
|
|
122
|
+
|
|
123
|
+
def handle_parsed_error(parsed_data, env)
|
|
124
|
+
status, _message = parse_streaming_error(parsed_data.to_json)
|
|
125
|
+
error_response = build_stream_error_response(parsed_data, env, status)
|
|
126
|
+
ErrorMiddleware.parse_error(provider: self, response: error_response)
|
|
127
|
+
end
|
|
128
|
+
|
|
129
|
+
def parse_error_from_json(data, env, error_message)
|
|
130
|
+
parsed_data = JSON.parse(data)
|
|
131
|
+
handle_parsed_error(parsed_data, env)
|
|
132
|
+
rescue JSON::ParserError => e
|
|
133
|
+
RubyLLM.logger.debug "#{error_message}: #{e.message}"
|
|
134
|
+
end
|
|
135
|
+
|
|
136
|
+
def build_stream_error_response(parsed_data, env, status)
|
|
137
|
+
error_status = status || env&.status || 500
|
|
138
|
+
|
|
139
|
+
if faraday_1?
|
|
140
|
+
Struct.new(:body, :status).new(parsed_data, error_status)
|
|
141
|
+
else
|
|
142
|
+
env.merge(body: parsed_data, status: error_status)
|
|
143
|
+
end
|
|
144
|
+
end
|
|
145
|
+
|
|
146
|
+
# Builds Faraday on_data handlers for different major versions.
|
|
147
|
+
module FaradayHandlers
|
|
148
|
+
module_function
|
|
149
|
+
|
|
150
|
+
def build(faraday_v1:, on_chunk:, on_failed_response:)
|
|
151
|
+
if faraday_v1
|
|
152
|
+
v1_on_data(on_chunk)
|
|
153
|
+
else
|
|
154
|
+
v2_on_data(on_chunk, on_failed_response)
|
|
155
|
+
end
|
|
156
|
+
end
|
|
157
|
+
|
|
158
|
+
def v1_on_data(on_chunk)
|
|
159
|
+
proc do |chunk, _size|
|
|
160
|
+
on_chunk.call(chunk, nil)
|
|
161
|
+
end
|
|
162
|
+
end
|
|
163
|
+
|
|
164
|
+
def v2_on_data(on_chunk, on_failed_response)
|
|
165
|
+
proc do |chunk, _bytes, env|
|
|
166
|
+
if env&.status == 200
|
|
167
|
+
on_chunk.call(chunk, env)
|
|
168
|
+
else
|
|
169
|
+
on_failed_response.call(chunk, env)
|
|
170
|
+
end
|
|
171
|
+
end
|
|
172
|
+
end
|
|
173
|
+
end
|
|
152
174
|
end
|
|
153
175
|
end
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module RubyLLM
|
|
4
|
+
# Represents provider thinking output.
|
|
5
|
+
class Thinking
|
|
6
|
+
attr_reader :text, :signature
|
|
7
|
+
|
|
8
|
+
def initialize(text: nil, signature: nil)
|
|
9
|
+
@text = text
|
|
10
|
+
@signature = signature
|
|
11
|
+
end
|
|
12
|
+
|
|
13
|
+
def self.build(text: nil, signature: nil)
|
|
14
|
+
text = nil if text.is_a?(String) && text.empty?
|
|
15
|
+
signature = nil if signature.is_a?(String) && signature.empty?
|
|
16
|
+
|
|
17
|
+
return nil if text.nil? && signature.nil?
|
|
18
|
+
|
|
19
|
+
new(text: text, signature: signature)
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
def pretty_print(printer)
|
|
23
|
+
printer.object_group(self) do
|
|
24
|
+
printer.breakable
|
|
25
|
+
printer.text 'text='
|
|
26
|
+
printer.pp text
|
|
27
|
+
printer.comma_breakable
|
|
28
|
+
printer.text 'signature='
|
|
29
|
+
printer.pp(signature ? '[REDACTED]' : nil)
|
|
30
|
+
end
|
|
31
|
+
end
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
class Thinking
|
|
35
|
+
# Normalized config for thinking across providers.
|
|
36
|
+
class Config
|
|
37
|
+
attr_reader :effort, :budget
|
|
38
|
+
|
|
39
|
+
def initialize(effort: nil, budget: nil)
|
|
40
|
+
@effort = effort.is_a?(Symbol) ? effort.to_s : effort
|
|
41
|
+
@budget = budget
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
def enabled?
|
|
45
|
+
!effort.nil? || !budget.nil?
|
|
46
|
+
end
|
|
47
|
+
end
|
|
48
|
+
end
|
|
49
|
+
end
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module RubyLLM
|
|
4
|
+
# Represents token usage for a response.
|
|
5
|
+
class Tokens
|
|
6
|
+
attr_reader :input, :output, :cached, :cache_creation, :thinking
|
|
7
|
+
|
|
8
|
+
# rubocop:disable Metrics/ParameterLists
|
|
9
|
+
def initialize(input: nil, output: nil, cached: nil, cache_creation: nil, thinking: nil, reasoning: nil)
|
|
10
|
+
@input = input
|
|
11
|
+
@output = output
|
|
12
|
+
@cached = cached
|
|
13
|
+
@cache_creation = cache_creation
|
|
14
|
+
@thinking = thinking || reasoning
|
|
15
|
+
end
|
|
16
|
+
# rubocop:enable Metrics/ParameterLists
|
|
17
|
+
|
|
18
|
+
# rubocop:disable Metrics/ParameterLists
|
|
19
|
+
def self.build(input: nil, output: nil, cached: nil, cache_creation: nil, thinking: nil, reasoning: nil)
|
|
20
|
+
return nil if [input, output, cached, cache_creation, thinking, reasoning].all?(&:nil?)
|
|
21
|
+
|
|
22
|
+
new(
|
|
23
|
+
input: input,
|
|
24
|
+
output: output,
|
|
25
|
+
cached: cached,
|
|
26
|
+
cache_creation: cache_creation,
|
|
27
|
+
thinking: thinking,
|
|
28
|
+
reasoning: reasoning
|
|
29
|
+
)
|
|
30
|
+
end
|
|
31
|
+
# rubocop:enable Metrics/ParameterLists
|
|
32
|
+
|
|
33
|
+
def to_h
|
|
34
|
+
{
|
|
35
|
+
input_tokens: input,
|
|
36
|
+
output_tokens: output,
|
|
37
|
+
cached_tokens: cached,
|
|
38
|
+
cache_creation_tokens: cache_creation,
|
|
39
|
+
thinking_tokens: thinking
|
|
40
|
+
}.compact
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
def reasoning
|
|
44
|
+
thinking
|
|
45
|
+
end
|
|
46
|
+
end
|
|
47
|
+
end
|
data/lib/ruby_llm/tool.rb
CHANGED
|
@@ -186,7 +186,7 @@ module RubyLLM
|
|
|
186
186
|
def resolve_direct_schema(schema)
|
|
187
187
|
return extract_schema(schema.to_json_schema) if schema.respond_to?(:to_json_schema)
|
|
188
188
|
return RubyLLM::Utils.deep_dup(schema) if schema.is_a?(Hash)
|
|
189
|
-
if schema.is_a?(Class) && schema.
|
|
189
|
+
if schema.is_a?(Class) && schema.method_defined?(:to_json_schema)
|
|
190
190
|
return extract_schema(schema.new.to_json_schema)
|
|
191
191
|
end
|
|
192
192
|
|
data/lib/ruby_llm/tool_call.rb
CHANGED
|
@@ -4,19 +4,22 @@ module RubyLLM
|
|
|
4
4
|
# Represents a function call from an AI model to a Tool.
|
|
5
5
|
class ToolCall
|
|
6
6
|
attr_reader :id, :name, :arguments
|
|
7
|
+
attr_accessor :thought_signature
|
|
7
8
|
|
|
8
|
-
def initialize(id:, name:, arguments: {})
|
|
9
|
+
def initialize(id:, name:, arguments: {}, thought_signature: nil)
|
|
9
10
|
@id = id
|
|
10
11
|
@name = name
|
|
11
12
|
@arguments = arguments
|
|
13
|
+
@thought_signature = thought_signature
|
|
12
14
|
end
|
|
13
15
|
|
|
14
16
|
def to_h
|
|
15
17
|
{
|
|
16
18
|
id: @id,
|
|
17
19
|
name: @name,
|
|
18
|
-
arguments: @arguments
|
|
19
|
-
|
|
20
|
+
arguments: @arguments,
|
|
21
|
+
thought_signature: @thought_signature
|
|
22
|
+
}.compact
|
|
20
23
|
end
|
|
21
24
|
end
|
|
22
25
|
end
|
data/lib/ruby_llm/version.rb
CHANGED
data/lib/tasks/models.rake
CHANGED
|
@@ -61,7 +61,8 @@ def configure_bedrock(config)
|
|
|
61
61
|
end
|
|
62
62
|
|
|
63
63
|
def refresh_models
|
|
64
|
-
|
|
64
|
+
existing_models = RubyLLM::Models.read_from_json
|
|
65
|
+
initial_count = existing_models.size
|
|
65
66
|
puts "Refreshing models (#{initial_count} cached)..."
|
|
66
67
|
|
|
67
68
|
models = RubyLLM.models.refresh!
|
|
@@ -69,19 +70,29 @@ def refresh_models
|
|
|
69
70
|
if models.all.empty? && initial_count.zero?
|
|
70
71
|
puts 'Error: Failed to fetch models.'
|
|
71
72
|
exit(1)
|
|
72
|
-
elsif models.all.size == initial_count && initial_count.positive?
|
|
73
|
-
puts 'Warning: Model list unchanged.'
|
|
74
73
|
else
|
|
75
|
-
|
|
76
|
-
|
|
74
|
+
existing_data = sorted_models_data(existing_models)
|
|
75
|
+
new_data = sorted_models_data(models.all)
|
|
77
76
|
|
|
78
|
-
|
|
79
|
-
|
|
77
|
+
if new_data == existing_data && initial_count.positive?
|
|
78
|
+
puts 'Warning: Model list unchanged.'
|
|
79
|
+
else
|
|
80
|
+
puts 'Validating models...'
|
|
81
|
+
validate_models!(models)
|
|
82
|
+
|
|
83
|
+
puts "Saving models.json (#{models.all.size} models)"
|
|
84
|
+
models.save_to_json
|
|
85
|
+
end
|
|
80
86
|
end
|
|
81
87
|
|
|
82
88
|
@models = models
|
|
83
89
|
end
|
|
84
90
|
|
|
91
|
+
def sorted_models_data(models)
|
|
92
|
+
models.map(&:to_h)
|
|
93
|
+
.sort_by { |model| [model[:provider].to_s, model[:id].to_s] }
|
|
94
|
+
end
|
|
95
|
+
|
|
85
96
|
def validate_models!(models)
|
|
86
97
|
schema_path = RubyLLM::Models.schema_file
|
|
87
98
|
models_data = models.all.map(&:to_h)
|
|
@@ -154,11 +165,7 @@ def generate_models_markdown
|
|
|
154
165
|
|
|
155
166
|
---
|
|
156
167
|
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
- **OpenAI, Anthropic, DeepSeek, Gemini, VertexAI**: Enriched by [🚀 Parsera](https://parsera.org/) *([free LLM metadata API](https://api.parsera.org/v1/llm-specs) - [go say thanks!](https://github.com/parsera-labs/api-llm-specs))*
|
|
160
|
-
- **OpenRouter**: Direct API
|
|
161
|
-
- **Others**: Local capabilities files
|
|
168
|
+
_Model information enriched by [models.dev](https://models.dev) and our custom code._
|
|
162
169
|
|
|
163
170
|
## Last Updated
|
|
164
171
|
{: .d-inline-block }
|
|
@@ -354,7 +361,7 @@ def generate_aliases # rubocop:disable Metrics/PerceivedComplexity
|
|
|
354
361
|
|
|
355
362
|
models['bedrock'].each do |bedrock_model|
|
|
356
363
|
next unless bedrock_model.start_with?('anthropic.')
|
|
357
|
-
next unless bedrock_model =~ /anthropic\.(claude-[
|
|
364
|
+
next unless bedrock_model =~ /anthropic\.(claude-[a-z0-9.-]+)-\d{8}/
|
|
358
365
|
|
|
359
366
|
base_name = Regexp.last_match(1)
|
|
360
367
|
anthropic_name = base_name.tr('.', '-')
|
metadata
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: ruby_llm
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 1.
|
|
4
|
+
version: 1.10.0
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- Carmine Paolino
|
|
@@ -52,7 +52,7 @@ dependencies:
|
|
|
52
52
|
- !ruby/object:Gem::Version
|
|
53
53
|
version: 1.10.0
|
|
54
54
|
- !ruby/object:Gem::Dependency
|
|
55
|
-
name: faraday-
|
|
55
|
+
name: faraday-retry
|
|
56
56
|
requirement: !ruby/object:Gem::Requirement
|
|
57
57
|
requirements:
|
|
58
58
|
- - ">="
|
|
@@ -66,7 +66,7 @@ dependencies:
|
|
|
66
66
|
- !ruby/object:Gem::Version
|
|
67
67
|
version: '1'
|
|
68
68
|
- !ruby/object:Gem::Dependency
|
|
69
|
-
name: faraday-
|
|
69
|
+
name: faraday-multipart
|
|
70
70
|
requirement: !ruby/object:Gem::Requirement
|
|
71
71
|
requirements:
|
|
72
72
|
- - ">="
|
|
@@ -80,7 +80,7 @@ dependencies:
|
|
|
80
80
|
- !ruby/object:Gem::Version
|
|
81
81
|
version: '1'
|
|
82
82
|
- !ruby/object:Gem::Dependency
|
|
83
|
-
name: faraday-
|
|
83
|
+
name: faraday-net_http
|
|
84
84
|
requirement: !ruby/object:Gem::Requirement
|
|
85
85
|
requirements:
|
|
86
86
|
- - ">="
|
|
@@ -180,6 +180,8 @@ files:
|
|
|
180
180
|
- lib/generators/ruby_llm/install/templates/message_model.rb.tt
|
|
181
181
|
- lib/generators/ruby_llm/install/templates/model_model.rb.tt
|
|
182
182
|
- lib/generators/ruby_llm/install/templates/tool_call_model.rb.tt
|
|
183
|
+
- lib/generators/ruby_llm/upgrade_to_v1_10/templates/add_v1_10_message_columns.rb.tt
|
|
184
|
+
- lib/generators/ruby_llm/upgrade_to_v1_10/upgrade_to_v1_10_generator.rb
|
|
183
185
|
- lib/generators/ruby_llm/upgrade_to_v1_7/templates/migration.rb.tt
|
|
184
186
|
- lib/generators/ruby_llm/upgrade_to_v1_7/upgrade_to_v1_7_generator.rb
|
|
185
187
|
- lib/generators/ruby_llm/upgrade_to_v1_9/templates/add_v1_9_message_columns.rb.tt
|
|
@@ -271,10 +273,13 @@ files:
|
|
|
271
273
|
- lib/ruby_llm/providers/openai/models.rb
|
|
272
274
|
- lib/ruby_llm/providers/openai/moderation.rb
|
|
273
275
|
- lib/ruby_llm/providers/openai/streaming.rb
|
|
276
|
+
- lib/ruby_llm/providers/openai/temperature.rb
|
|
274
277
|
- lib/ruby_llm/providers/openai/tools.rb
|
|
275
278
|
- lib/ruby_llm/providers/openai/transcription.rb
|
|
276
279
|
- lib/ruby_llm/providers/openrouter.rb
|
|
280
|
+
- lib/ruby_llm/providers/openrouter/chat.rb
|
|
277
281
|
- lib/ruby_llm/providers/openrouter/models.rb
|
|
282
|
+
- lib/ruby_llm/providers/openrouter/streaming.rb
|
|
278
283
|
- lib/ruby_llm/providers/perplexity.rb
|
|
279
284
|
- lib/ruby_llm/providers/perplexity/capabilities.rb
|
|
280
285
|
- lib/ruby_llm/providers/perplexity/chat.rb
|
|
@@ -288,6 +293,8 @@ files:
|
|
|
288
293
|
- lib/ruby_llm/railtie.rb
|
|
289
294
|
- lib/ruby_llm/stream_accumulator.rb
|
|
290
295
|
- lib/ruby_llm/streaming.rb
|
|
296
|
+
- lib/ruby_llm/thinking.rb
|
|
297
|
+
- lib/ruby_llm/tokens.rb
|
|
291
298
|
- lib/ruby_llm/tool.rb
|
|
292
299
|
- lib/ruby_llm/tool_call.rb
|
|
293
300
|
- lib/ruby_llm/transcription.rb
|
|
@@ -309,7 +316,7 @@ metadata:
|
|
|
309
316
|
funding_uri: https://github.com/sponsors/crmne
|
|
310
317
|
rubygems_mfa_required: 'true'
|
|
311
318
|
post_install_message: |
|
|
312
|
-
Upgrading from RubyLLM
|
|
319
|
+
Upgrading from RubyLLM < 1.10.x? Check the upgrade guide for new features and migration instructions
|
|
313
320
|
--> https://rubyllm.com/upgrading/
|
|
314
321
|
rdoc_options: []
|
|
315
322
|
require_paths:
|