ruby_llm_community 1.1.1 → 1.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +25 -7
- data/lib/generators/ruby_llm/chat_ui/chat_ui_generator.rb +127 -67
- data/lib/generators/ruby_llm/chat_ui/templates/controllers/chats_controller.rb.tt +12 -12
- data/lib/generators/ruby_llm/chat_ui/templates/controllers/messages_controller.rb.tt +7 -7
- data/lib/generators/ruby_llm/chat_ui/templates/controllers/models_controller.rb.tt +4 -4
- data/lib/generators/ruby_llm/chat_ui/templates/jobs/chat_response_job.rb.tt +6 -6
- data/lib/generators/ruby_llm/chat_ui/templates/views/chats/_chat.html.erb.tt +4 -4
- data/lib/generators/ruby_llm/chat_ui/templates/views/chats/_form.html.erb.tt +5 -5
- data/lib/generators/ruby_llm/chat_ui/templates/views/chats/index.html.erb.tt +5 -5
- data/lib/generators/ruby_llm/chat_ui/templates/views/chats/new.html.erb.tt +4 -4
- data/lib/generators/ruby_llm/chat_ui/templates/views/chats/show.html.erb.tt +8 -8
- data/lib/generators/ruby_llm/chat_ui/templates/views/messages/_content.html.erb.tt +1 -0
- data/lib/generators/ruby_llm/chat_ui/templates/views/messages/_form.html.erb.tt +5 -5
- data/lib/generators/ruby_llm/chat_ui/templates/views/messages/_message.html.erb.tt +9 -6
- data/lib/generators/ruby_llm/chat_ui/templates/views/messages/_tool_calls.html.erb.tt +7 -0
- data/lib/generators/ruby_llm/chat_ui/templates/views/messages/create.turbo_stream.erb.tt +5 -5
- data/lib/generators/ruby_llm/chat_ui/templates/views/models/_model.html.erb.tt +9 -9
- data/lib/generators/ruby_llm/chat_ui/templates/views/models/index.html.erb.tt +4 -6
- data/lib/generators/ruby_llm/chat_ui/templates/views/models/show.html.erb.tt +11 -11
- data/lib/generators/ruby_llm/generator_helpers.rb +131 -87
- data/lib/generators/ruby_llm/install/install_generator.rb +75 -73
- data/lib/generators/ruby_llm/install/templates/add_references_to_chats_tool_calls_and_messages_migration.rb.tt +9 -0
- data/lib/generators/ruby_llm/install/templates/create_chats_migration.rb.tt +0 -1
- data/lib/generators/ruby_llm/install/templates/create_messages_migration.rb.tt +3 -3
- data/lib/generators/ruby_llm/install/templates/create_tool_calls_migration.rb.tt +0 -1
- data/lib/generators/ruby_llm/install/templates/initializer.rb.tt +1 -1
- data/lib/generators/ruby_llm/upgrade_to_v1_7/upgrade_to_v1_7_generator.rb +88 -85
- data/lib/generators/ruby_llm/upgrade_to_v1_9/templates/add_v1_9_message_columns.rb.tt +15 -0
- data/lib/generators/ruby_llm/upgrade_to_v1_9/upgrade_to_v1_9_generator.rb +49 -0
- data/lib/ruby_llm/active_record/acts_as.rb +17 -8
- data/lib/ruby_llm/active_record/chat_methods.rb +41 -13
- data/lib/ruby_llm/active_record/message_methods.rb +11 -2
- data/lib/ruby_llm/active_record/model_methods.rb +1 -1
- data/lib/ruby_llm/aliases.json +46 -20
- data/lib/ruby_llm/attachment.rb +8 -0
- data/lib/ruby_llm/chat.rb +13 -2
- data/lib/ruby_llm/configuration.rb +10 -1
- data/lib/ruby_llm/connection.rb +4 -4
- data/lib/ruby_llm/content.rb +23 -0
- data/lib/ruby_llm/message.rb +17 -9
- data/lib/ruby_llm/model/info.rb +4 -0
- data/lib/ruby_llm/models.json +12050 -9940
- data/lib/ruby_llm/models.rb +21 -25
- data/lib/ruby_llm/moderation.rb +56 -0
- data/lib/ruby_llm/provider.rb +29 -1
- data/lib/ruby_llm/providers/anthropic/chat.rb +18 -5
- data/lib/ruby_llm/providers/anthropic/content.rb +44 -0
- data/lib/ruby_llm/providers/anthropic/media.rb +5 -4
- data/lib/ruby_llm/providers/anthropic/models.rb +9 -2
- data/lib/ruby_llm/providers/anthropic/tools.rb +20 -18
- data/lib/ruby_llm/providers/bedrock/media.rb +2 -1
- data/lib/ruby_llm/providers/bedrock/streaming/content_extraction.rb +9 -2
- data/lib/ruby_llm/providers/gemini/chat.rb +353 -72
- data/lib/ruby_llm/providers/gemini/media.rb +59 -1
- data/lib/ruby_llm/providers/gemini/tools.rb +146 -25
- data/lib/ruby_llm/providers/gemini/transcription.rb +116 -0
- data/lib/ruby_llm/providers/gemini.rb +2 -1
- data/lib/ruby_llm/providers/gpustack/media.rb +1 -0
- data/lib/ruby_llm/providers/ollama/media.rb +1 -0
- data/lib/ruby_llm/providers/openai/capabilities.rb +15 -7
- data/lib/ruby_llm/providers/openai/chat.rb +7 -3
- data/lib/ruby_llm/providers/openai/media.rb +2 -1
- data/lib/ruby_llm/providers/openai/moderation.rb +34 -0
- data/lib/ruby_llm/providers/openai/streaming.rb +7 -3
- data/lib/ruby_llm/providers/openai/tools.rb +34 -12
- data/lib/ruby_llm/providers/openai/transcription.rb +70 -0
- data/lib/ruby_llm/providers/openai_base.rb +2 -0
- data/lib/ruby_llm/providers/red_candle/capabilities.rb +124 -0
- data/lib/ruby_llm/providers/red_candle/chat.rb +317 -0
- data/lib/ruby_llm/providers/red_candle/models.rb +121 -0
- data/lib/ruby_llm/providers/red_candle/streaming.rb +40 -0
- data/lib/ruby_llm/providers/red_candle.rb +90 -0
- data/lib/ruby_llm/providers/vertexai/transcription.rb +16 -0
- data/lib/ruby_llm/providers/vertexai.rb +3 -0
- data/lib/ruby_llm/railtie.rb +1 -1
- data/lib/ruby_llm/stream_accumulator.rb +8 -12
- data/lib/ruby_llm/tool.rb +126 -0
- data/lib/ruby_llm/transcription.rb +35 -0
- data/lib/ruby_llm/utils.rb +46 -0
- data/lib/ruby_llm/version.rb +1 -1
- data/lib/ruby_llm_community.rb +38 -1
- metadata +35 -3
data/lib/ruby_llm/models.rb
CHANGED
|
@@ -10,16 +10,19 @@ module RubyLLM
|
|
|
10
10
|
@instance ||= new
|
|
11
11
|
end
|
|
12
12
|
|
|
13
|
-
def
|
|
14
|
-
|
|
13
|
+
def schema_file
|
|
14
|
+
File.expand_path('models_schema.json', __dir__)
|
|
15
15
|
end
|
|
16
16
|
|
|
17
|
-
def
|
|
18
|
-
|
|
17
|
+
def load_models(file = RubyLLM.config.model_registry_file)
|
|
18
|
+
read_from_json(file)
|
|
19
19
|
end
|
|
20
20
|
|
|
21
|
-
def
|
|
22
|
-
File.
|
|
21
|
+
def read_from_json(file = RubyLLM.config.model_registry_file)
|
|
22
|
+
data = File.exist?(file) ? File.read(file) : '[]'
|
|
23
|
+
JSON.parse(data, symbolize_names: true).map { |model| Model::Info.new(model) }
|
|
24
|
+
rescue JSON::ParserError
|
|
25
|
+
[]
|
|
23
26
|
end
|
|
24
27
|
|
|
25
28
|
def refresh!(remote_only: false)
|
|
@@ -151,26 +154,15 @@ module RubyLLM
|
|
|
151
154
|
end
|
|
152
155
|
|
|
153
156
|
def initialize(models = nil)
|
|
154
|
-
@models = models || load_models
|
|
155
|
-
end
|
|
156
|
-
|
|
157
|
-
def load_models
|
|
158
|
-
read_from_json
|
|
159
|
-
end
|
|
160
|
-
|
|
161
|
-
def load_from_json!
|
|
162
|
-
@models = read_from_json
|
|
157
|
+
@models = models || self.class.load_models
|
|
163
158
|
end
|
|
164
159
|
|
|
165
|
-
def
|
|
166
|
-
|
|
167
|
-
JSON.parse(data, symbolize_names: true).map { |model| Model::Info.new(model) }
|
|
168
|
-
rescue JSON::ParserError
|
|
169
|
-
[]
|
|
160
|
+
def load_from_json!(file = RubyLLM.config.model_registry_file)
|
|
161
|
+
@models = self.class.read_from_json(file)
|
|
170
162
|
end
|
|
171
163
|
|
|
172
|
-
def save_to_json
|
|
173
|
-
File.write(
|
|
164
|
+
def save_to_json(file = RubyLLM.config.model_registry_file)
|
|
165
|
+
File.write(file, JSON.pretty_generate(all.map(&:to_h)))
|
|
174
166
|
end
|
|
175
167
|
|
|
176
168
|
def all
|
|
@@ -194,15 +186,15 @@ module RubyLLM
|
|
|
194
186
|
end
|
|
195
187
|
|
|
196
188
|
def embedding_models
|
|
197
|
-
self.class.new(all.select { |m| m.type == 'embedding' })
|
|
189
|
+
self.class.new(all.select { |m| m.type == 'embedding' || m.modalities.output.include?('embeddings') })
|
|
198
190
|
end
|
|
199
191
|
|
|
200
192
|
def audio_models
|
|
201
|
-
self.class.new(all.select { |m| m.type == 'audio' })
|
|
193
|
+
self.class.new(all.select { |m| m.type == 'audio' || m.modalities.output.include?('audio') })
|
|
202
194
|
end
|
|
203
195
|
|
|
204
196
|
def image_models
|
|
205
|
-
self.class.new(all.select { |m| m.type == 'image' })
|
|
197
|
+
self.class.new(all.select { |m| m.type == 'image' || m.modalities.output.include?('image') })
|
|
206
198
|
end
|
|
207
199
|
|
|
208
200
|
def by_family(family)
|
|
@@ -217,6 +209,10 @@ module RubyLLM
|
|
|
217
209
|
self.class.refresh!(remote_only: remote_only)
|
|
218
210
|
end
|
|
219
211
|
|
|
212
|
+
def resolve(model_id, provider: nil, assume_exists: false, config: nil)
|
|
213
|
+
self.class.resolve(model_id, provider: provider, assume_exists: assume_exists, config: config)
|
|
214
|
+
end
|
|
215
|
+
|
|
220
216
|
private
|
|
221
217
|
|
|
222
218
|
def find_with_provider(model_id, provider)
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module RubyLLM
|
|
4
|
+
# Identify potentially harmful content in text.
|
|
5
|
+
# https://platform.openai.com/docs/guides/moderation
|
|
6
|
+
class Moderation
|
|
7
|
+
attr_reader :id, :model, :results
|
|
8
|
+
|
|
9
|
+
def initialize(id:, model:, results:)
|
|
10
|
+
@id = id
|
|
11
|
+
@model = model
|
|
12
|
+
@results = results
|
|
13
|
+
end
|
|
14
|
+
|
|
15
|
+
def self.moderate(input,
|
|
16
|
+
model: nil,
|
|
17
|
+
provider: nil,
|
|
18
|
+
assume_model_exists: false,
|
|
19
|
+
context: nil)
|
|
20
|
+
config = context&.config || RubyLLM.config
|
|
21
|
+
model ||= config.default_moderation_model || 'omni-moderation-latest'
|
|
22
|
+
model, provider_instance = Models.resolve(model, provider: provider, assume_exists: assume_model_exists,
|
|
23
|
+
config: config)
|
|
24
|
+
model_id = model.id
|
|
25
|
+
|
|
26
|
+
provider_instance.moderate(input, model: model_id)
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
# Convenience method to get content from moderation result
|
|
30
|
+
def content
|
|
31
|
+
results
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
# Check if any content was flagged
|
|
35
|
+
def flagged?
|
|
36
|
+
results.any? { |result| result['flagged'] }
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
# Get all flagged categories across all results
|
|
40
|
+
def flagged_categories
|
|
41
|
+
results.flat_map do |result|
|
|
42
|
+
result['categories']&.select { |_category, flagged| flagged }&.keys || []
|
|
43
|
+
end.uniq
|
|
44
|
+
end
|
|
45
|
+
|
|
46
|
+
# Get category scores for the first result (most common case)
|
|
47
|
+
def category_scores
|
|
48
|
+
results.first&.dig('category_scores') || {}
|
|
49
|
+
end
|
|
50
|
+
|
|
51
|
+
# Get categories for the first result (most common case)
|
|
52
|
+
def categories
|
|
53
|
+
results.first&.dig('categories') || {}
|
|
54
|
+
end
|
|
55
|
+
end
|
|
56
|
+
end
|
data/lib/ruby_llm/provider.rb
CHANGED
|
@@ -74,12 +74,25 @@ module RubyLLM
|
|
|
74
74
|
parse_embedding_response(response, model:, text:)
|
|
75
75
|
end
|
|
76
76
|
|
|
77
|
+
def moderate(input, model:)
|
|
78
|
+
payload = render_moderation_payload(input, model:)
|
|
79
|
+
response = @connection.post moderation_url, payload
|
|
80
|
+
parse_moderation_response(response, model:)
|
|
81
|
+
end
|
|
82
|
+
|
|
77
83
|
def paint(prompt, model:, size:, with:, params:)
|
|
78
84
|
payload = render_image_payload(prompt, model:, size:, with:, params:)
|
|
79
85
|
response = @connection.post images_url, payload
|
|
80
86
|
parse_image_response(response, model:)
|
|
81
87
|
end
|
|
82
88
|
|
|
89
|
+
def transcribe(audio_file, model:, language:, **options)
|
|
90
|
+
file_part = build_audio_file_part(audio_file)
|
|
91
|
+
payload = render_transcription_payload(file_part, model:, language:, **options)
|
|
92
|
+
response = @connection.post transcription_url, payload
|
|
93
|
+
parse_transcription_response(response, model:)
|
|
94
|
+
end
|
|
95
|
+
|
|
83
96
|
def configured?
|
|
84
97
|
configuration_requirements.all? { |req| @config.send(req) }
|
|
85
98
|
end
|
|
@@ -162,9 +175,13 @@ module RubyLLM
|
|
|
162
175
|
providers[name.to_sym] = provider_class
|
|
163
176
|
end
|
|
164
177
|
|
|
178
|
+
def resolve(name)
|
|
179
|
+
providers[name.to_sym]
|
|
180
|
+
end
|
|
181
|
+
|
|
165
182
|
def for(model)
|
|
166
183
|
model_info = Models.find(model)
|
|
167
|
-
|
|
184
|
+
resolve model_info.provider
|
|
168
185
|
end
|
|
169
186
|
|
|
170
187
|
def providers
|
|
@@ -194,6 +211,17 @@ module RubyLLM
|
|
|
194
211
|
|
|
195
212
|
private
|
|
196
213
|
|
|
214
|
+
def build_audio_file_part(file_path)
|
|
215
|
+
expanded_path = File.expand_path(file_path)
|
|
216
|
+
mime_type = Marcel::MimeType.for(Pathname.new(expanded_path))
|
|
217
|
+
|
|
218
|
+
Faraday::Multipart::FilePart.new(
|
|
219
|
+
expanded_path,
|
|
220
|
+
mime_type,
|
|
221
|
+
File.basename(expanded_path)
|
|
222
|
+
)
|
|
223
|
+
end
|
|
224
|
+
|
|
197
225
|
def try_parse_json(maybe_json)
|
|
198
226
|
return maybe_json unless maybe_json.is_a?(String)
|
|
199
227
|
|
|
@@ -74,15 +74,22 @@ module RubyLLM
|
|
|
74
74
|
end
|
|
75
75
|
|
|
76
76
|
def build_message(data, content, tool_use_blocks, response)
|
|
77
|
+
usage = data['usage'] || {}
|
|
78
|
+
cached_tokens = usage['cache_read_input_tokens']
|
|
79
|
+
cache_creation_tokens = usage['cache_creation_input_tokens']
|
|
80
|
+
if cache_creation_tokens.nil? && usage['cache_creation'].is_a?(Hash)
|
|
81
|
+
cache_creation_tokens = usage['cache_creation'].values.compact.sum
|
|
82
|
+
end
|
|
83
|
+
|
|
77
84
|
Message.new(
|
|
78
85
|
role: :assistant,
|
|
79
86
|
content: content,
|
|
80
87
|
tool_calls: Tools.parse_tool_calls(tool_use_blocks),
|
|
81
|
-
input_tokens:
|
|
82
|
-
output_tokens:
|
|
88
|
+
input_tokens: usage['input_tokens'],
|
|
89
|
+
output_tokens: usage['output_tokens'],
|
|
90
|
+
cached_tokens: cached_tokens,
|
|
91
|
+
cache_creation_tokens: cache_creation_tokens,
|
|
83
92
|
model_id: data['model'],
|
|
84
|
-
cache_creation_tokens: data.dig('usage', 'cache_creation_input_tokens'),
|
|
85
|
-
cached_tokens: data.dig('usage', 'cache_read_input_tokens'),
|
|
86
93
|
raw: response
|
|
87
94
|
)
|
|
88
95
|
end
|
|
@@ -98,7 +105,13 @@ module RubyLLM
|
|
|
98
105
|
end
|
|
99
106
|
|
|
100
107
|
def format_system_message(msg, cache: false)
|
|
101
|
-
|
|
108
|
+
content = msg.content
|
|
109
|
+
|
|
110
|
+
if content.is_a?(RubyLLM::Content::Raw)
|
|
111
|
+
content.value
|
|
112
|
+
else
|
|
113
|
+
Media.format_content(content, cache:)
|
|
114
|
+
end
|
|
102
115
|
end
|
|
103
116
|
|
|
104
117
|
def format_basic_message(msg, cache: false)
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module RubyLLM
|
|
4
|
+
module Providers
|
|
5
|
+
class Anthropic
|
|
6
|
+
# Helper for constructing Anthropic native content blocks.
|
|
7
|
+
class Content
|
|
8
|
+
class << self
|
|
9
|
+
def new(text = nil, cache: false, cache_control: nil, parts: nil, **extras)
|
|
10
|
+
payload = resolve_payload(
|
|
11
|
+
text: text,
|
|
12
|
+
parts: parts,
|
|
13
|
+
cache: cache,
|
|
14
|
+
cache_control: cache_control,
|
|
15
|
+
extras: extras
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
RubyLLM::Content::Raw.new(payload)
|
|
19
|
+
end
|
|
20
|
+
|
|
21
|
+
private
|
|
22
|
+
|
|
23
|
+
def resolve_payload(text:, parts:, cache:, cache_control:, extras:)
|
|
24
|
+
return Array(parts) if parts
|
|
25
|
+
|
|
26
|
+
raise ArgumentError, 'text or parts must be provided' if text.nil?
|
|
27
|
+
|
|
28
|
+
block = { type: 'text', text: text }.merge(extras)
|
|
29
|
+
control = determine_cache_control(cache_control, cache)
|
|
30
|
+
block[:cache_control] = control if control
|
|
31
|
+
|
|
32
|
+
[block]
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
def determine_cache_control(cache_control, cache_flag)
|
|
36
|
+
return cache_control if cache_control
|
|
37
|
+
|
|
38
|
+
{ type: 'ephemeral' } if cache_flag
|
|
39
|
+
end
|
|
40
|
+
end
|
|
41
|
+
end
|
|
42
|
+
end
|
|
43
|
+
end
|
|
44
|
+
end
|
|
@@ -7,7 +7,8 @@ module RubyLLM
|
|
|
7
7
|
module Media
|
|
8
8
|
module_function
|
|
9
9
|
|
|
10
|
-
def format_content(content, cache: false)
|
|
10
|
+
def format_content(content, cache: false) # rubocop:disable Metrics/PerceivedComplexity
|
|
11
|
+
return content.value if content.is_a?(RubyLLM::Content::Raw)
|
|
11
12
|
return [format_text(content.to_json, cache:)] if content.is_a?(Hash) || content.is_a?(Array)
|
|
12
13
|
return [format_text(content, cache:)] unless content.is_a?(Content)
|
|
13
14
|
|
|
@@ -17,11 +18,11 @@ module RubyLLM
|
|
|
17
18
|
content.attachments.each do |attachment|
|
|
18
19
|
case attachment.type
|
|
19
20
|
when :image
|
|
20
|
-
parts << format_image(attachment)
|
|
21
|
+
parts << format_image(attachment, cache:)
|
|
21
22
|
when :pdf
|
|
22
|
-
parts << format_pdf(attachment)
|
|
23
|
+
parts << format_pdf(attachment, cache:)
|
|
23
24
|
when :text
|
|
24
|
-
parts << format_text_file(attachment)
|
|
25
|
+
parts << format_text_file(attachment, cache:)
|
|
25
26
|
else
|
|
26
27
|
raise UnsupportedAttachmentError, attachment.mime_type
|
|
27
28
|
end
|
|
@@ -44,11 +44,18 @@ module RubyLLM
|
|
|
44
44
|
end
|
|
45
45
|
|
|
46
46
|
def extract_cached_tokens(data)
|
|
47
|
-
data.dig('message', 'usage', 'cache_read_input_tokens')
|
|
47
|
+
data.dig('message', 'usage', 'cache_read_input_tokens') || data.dig('usage', 'cache_read_input_tokens')
|
|
48
48
|
end
|
|
49
49
|
|
|
50
50
|
def extract_cache_creation_tokens(data)
|
|
51
|
-
data.dig('message', 'usage',
|
|
51
|
+
direct = data.dig('message', 'usage',
|
|
52
|
+
'cache_creation_input_tokens') || data.dig('usage', 'cache_creation_input_tokens')
|
|
53
|
+
return direct if direct
|
|
54
|
+
|
|
55
|
+
breakdown = data.dig('message', 'usage', 'cache_creation') || data.dig('usage', 'cache_creation')
|
|
56
|
+
return unless breakdown.is_a?(Hash)
|
|
57
|
+
|
|
58
|
+
breakdown.values.compact.sum
|
|
52
59
|
end
|
|
53
60
|
end
|
|
54
61
|
end
|
|
@@ -12,6 +12,8 @@ module RubyLLM
|
|
|
12
12
|
end
|
|
13
13
|
|
|
14
14
|
def format_tool_call(msg)
|
|
15
|
+
return { role: 'assistant', content: msg.content.value } if msg.content.is_a?(RubyLLM::Content::Raw)
|
|
16
|
+
|
|
15
17
|
content = []
|
|
16
18
|
|
|
17
19
|
content << Media.format_text(msg.content) unless msg.content.nil? || msg.content.empty?
|
|
@@ -29,7 +31,7 @@ module RubyLLM
|
|
|
29
31
|
def format_tool_result(msg)
|
|
30
32
|
{
|
|
31
33
|
role: 'user',
|
|
32
|
-
content: [format_tool_result_block(msg)]
|
|
34
|
+
content: msg.content.is_a?(RubyLLM::Content::Raw) ? msg.content.value : [format_tool_result_block(msg)]
|
|
33
35
|
}
|
|
34
36
|
end
|
|
35
37
|
|
|
@@ -51,15 +53,18 @@ module RubyLLM
|
|
|
51
53
|
end
|
|
52
54
|
|
|
53
55
|
def function_for(tool)
|
|
54
|
-
|
|
56
|
+
input_schema = tool.params_schema ||
|
|
57
|
+
RubyLLM::Tool::SchemaDefinition.from_parameters(tool.parameters)&.json_schema
|
|
58
|
+
|
|
59
|
+
declaration = {
|
|
55
60
|
name: tool.name,
|
|
56
61
|
description: tool.description,
|
|
57
|
-
input_schema:
|
|
58
|
-
type: 'object',
|
|
59
|
-
properties: clean_parameters(tool.parameters),
|
|
60
|
-
required: required_parameters(tool.parameters)
|
|
61
|
-
}
|
|
62
|
+
input_schema: input_schema || default_input_schema
|
|
62
63
|
}
|
|
64
|
+
|
|
65
|
+
return declaration if tool.provider_params.empty?
|
|
66
|
+
|
|
67
|
+
RubyLLM::Utils.deep_merge(declaration, tool.provider_params)
|
|
63
68
|
end
|
|
64
69
|
|
|
65
70
|
def extract_tool_calls(data)
|
|
@@ -89,17 +94,14 @@ module RubyLLM
|
|
|
89
94
|
tool_calls.empty? ? nil : tool_calls
|
|
90
95
|
end
|
|
91
96
|
|
|
92
|
-
def
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
def required_parameters(parameters)
|
|
102
|
-
parameters.select { |_, param| param.required }.keys
|
|
97
|
+
def default_input_schema
|
|
98
|
+
{
|
|
99
|
+
'type' => 'object',
|
|
100
|
+
'properties' => {},
|
|
101
|
+
'required' => [],
|
|
102
|
+
'additionalProperties' => false,
|
|
103
|
+
'strict' => true
|
|
104
|
+
}
|
|
103
105
|
end
|
|
104
106
|
end
|
|
105
107
|
end
|
|
@@ -10,7 +10,8 @@ module RubyLLM
|
|
|
10
10
|
|
|
11
11
|
module_function
|
|
12
12
|
|
|
13
|
-
def format_content(content, cache: false)
|
|
13
|
+
def format_content(content, cache: false) # rubocop:disable Metrics/PerceivedComplexity
|
|
14
|
+
return content.value if content.is_a?(RubyLLM::Content::Raw)
|
|
14
15
|
return [Anthropic::Media.format_text(content.to_json, cache:)] if content.is_a?(Hash) || content.is_a?(Array)
|
|
15
16
|
return [Anthropic::Media.format_text(content, cache:)] unless content.is_a?(Content)
|
|
16
17
|
|
|
@@ -33,11 +33,18 @@ module RubyLLM
|
|
|
33
33
|
end
|
|
34
34
|
|
|
35
35
|
def extract_cached_tokens(data)
|
|
36
|
-
data.dig('message', 'usage', 'cache_read_input_tokens')
|
|
36
|
+
data.dig('message', 'usage', 'cache_read_input_tokens') || data.dig('usage', 'cache_read_input_tokens')
|
|
37
37
|
end
|
|
38
38
|
|
|
39
39
|
def extract_cache_creation_tokens(data)
|
|
40
|
-
data.dig('message', 'usage',
|
|
40
|
+
direct = data.dig('message', 'usage',
|
|
41
|
+
'cache_creation_input_tokens') || data.dig('usage', 'cache_creation_input_tokens')
|
|
42
|
+
return direct if direct
|
|
43
|
+
|
|
44
|
+
breakdown = data.dig('message', 'usage', 'cache_creation') || data.dig('usage', 'cache_creation')
|
|
45
|
+
return unless breakdown.is_a?(Hash)
|
|
46
|
+
|
|
47
|
+
breakdown.values.compact.sum
|
|
41
48
|
end
|
|
42
49
|
|
|
43
50
|
private
|