ruby_llm_community 0.0.1 → 0.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/LICENSE +22 -0
- data/README.md +172 -0
- data/lib/generators/ruby_llm/install/templates/INSTALL_INFO.md.tt +108 -0
- data/lib/generators/ruby_llm/install/templates/chat_model.rb.tt +3 -0
- data/lib/generators/ruby_llm/install/templates/create_chats_migration.rb.tt +8 -0
- data/lib/generators/ruby_llm/install/templates/create_messages_migration.rb.tt +15 -0
- data/lib/generators/ruby_llm/install/templates/create_tool_calls_migration.rb.tt +14 -0
- data/lib/generators/ruby_llm/install/templates/initializer.rb.tt +6 -0
- data/lib/generators/ruby_llm/install/templates/message_model.rb.tt +3 -0
- data/lib/generators/ruby_llm/install/templates/tool_call_model.rb.tt +3 -0
- data/lib/generators/ruby_llm/install_generator.rb +121 -0
- data/lib/ruby_llm/active_record/acts_as.rb +382 -0
- data/lib/ruby_llm/aliases.json +217 -0
- data/lib/ruby_llm/aliases.rb +56 -0
- data/lib/ruby_llm/attachment.rb +164 -0
- data/lib/ruby_llm/chat.rb +219 -0
- data/lib/ruby_llm/chunk.rb +6 -0
- data/lib/ruby_llm/configuration.rb +75 -0
- data/lib/ruby_llm/connection.rb +126 -0
- data/lib/ruby_llm/content.rb +52 -0
- data/lib/ruby_llm/context.rb +29 -0
- data/lib/ruby_llm/embedding.rb +30 -0
- data/lib/ruby_llm/error.rb +84 -0
- data/lib/ruby_llm/image.rb +53 -0
- data/lib/ruby_llm/message.rb +76 -0
- data/lib/ruby_llm/mime_type.rb +67 -0
- data/lib/ruby_llm/model/info.rb +101 -0
- data/lib/ruby_llm/model/modalities.rb +22 -0
- data/lib/ruby_llm/model/pricing.rb +51 -0
- data/lib/ruby_llm/model/pricing_category.rb +48 -0
- data/lib/ruby_llm/model/pricing_tier.rb +34 -0
- data/lib/ruby_llm/model.rb +7 -0
- data/lib/ruby_llm/models.json +29924 -0
- data/lib/ruby_llm/models.rb +218 -0
- data/lib/ruby_llm/models_schema.json +168 -0
- data/lib/ruby_llm/provider.rb +219 -0
- data/lib/ruby_llm/providers/anthropic/capabilities.rb +179 -0
- data/lib/ruby_llm/providers/anthropic/chat.rb +106 -0
- data/lib/ruby_llm/providers/anthropic/embeddings.rb +20 -0
- data/lib/ruby_llm/providers/anthropic/media.rb +92 -0
- data/lib/ruby_llm/providers/anthropic/models.rb +48 -0
- data/lib/ruby_llm/providers/anthropic/streaming.rb +43 -0
- data/lib/ruby_llm/providers/anthropic/tools.rb +108 -0
- data/lib/ruby_llm/providers/anthropic.rb +37 -0
- data/lib/ruby_llm/providers/bedrock/capabilities.rb +167 -0
- data/lib/ruby_llm/providers/bedrock/chat.rb +65 -0
- data/lib/ruby_llm/providers/bedrock/media.rb +61 -0
- data/lib/ruby_llm/providers/bedrock/models.rb +82 -0
- data/lib/ruby_llm/providers/bedrock/signing.rb +831 -0
- data/lib/ruby_llm/providers/bedrock/streaming/base.rb +63 -0
- data/lib/ruby_llm/providers/bedrock/streaming/content_extraction.rb +63 -0
- data/lib/ruby_llm/providers/bedrock/streaming/message_processing.rb +79 -0
- data/lib/ruby_llm/providers/bedrock/streaming/payload_processing.rb +90 -0
- data/lib/ruby_llm/providers/bedrock/streaming/prelude_handling.rb +91 -0
- data/lib/ruby_llm/providers/bedrock/streaming.rb +36 -0
- data/lib/ruby_llm/providers/bedrock.rb +83 -0
- data/lib/ruby_llm/providers/deepseek/capabilities.rb +131 -0
- data/lib/ruby_llm/providers/deepseek/chat.rb +17 -0
- data/lib/ruby_llm/providers/deepseek.rb +30 -0
- data/lib/ruby_llm/providers/gemini/capabilities.rb +351 -0
- data/lib/ruby_llm/providers/gemini/chat.rb +139 -0
- data/lib/ruby_llm/providers/gemini/embeddings.rb +39 -0
- data/lib/ruby_llm/providers/gemini/images.rb +48 -0
- data/lib/ruby_llm/providers/gemini/media.rb +55 -0
- data/lib/ruby_llm/providers/gemini/models.rb +41 -0
- data/lib/ruby_llm/providers/gemini/streaming.rb +58 -0
- data/lib/ruby_llm/providers/gemini/tools.rb +82 -0
- data/lib/ruby_llm/providers/gemini.rb +36 -0
- data/lib/ruby_llm/providers/gpustack/chat.rb +17 -0
- data/lib/ruby_llm/providers/gpustack/models.rb +55 -0
- data/lib/ruby_llm/providers/gpustack.rb +33 -0
- data/lib/ruby_llm/providers/mistral/capabilities.rb +163 -0
- data/lib/ruby_llm/providers/mistral/chat.rb +26 -0
- data/lib/ruby_llm/providers/mistral/embeddings.rb +36 -0
- data/lib/ruby_llm/providers/mistral/models.rb +49 -0
- data/lib/ruby_llm/providers/mistral.rb +32 -0
- data/lib/ruby_llm/providers/ollama/chat.rb +28 -0
- data/lib/ruby_llm/providers/ollama/media.rb +50 -0
- data/lib/ruby_llm/providers/ollama.rb +29 -0
- data/lib/ruby_llm/providers/openai/capabilities.rb +306 -0
- data/lib/ruby_llm/providers/openai/chat.rb +86 -0
- data/lib/ruby_llm/providers/openai/embeddings.rb +36 -0
- data/lib/ruby_llm/providers/openai/images.rb +38 -0
- data/lib/ruby_llm/providers/openai/media.rb +81 -0
- data/lib/ruby_llm/providers/openai/models.rb +39 -0
- data/lib/ruby_llm/providers/openai/response.rb +115 -0
- data/lib/ruby_llm/providers/openai/response_media.rb +76 -0
- data/lib/ruby_llm/providers/openai/streaming.rb +190 -0
- data/lib/ruby_llm/providers/openai/tools.rb +100 -0
- data/lib/ruby_llm/providers/openai.rb +44 -0
- data/lib/ruby_llm/providers/openai_base.rb +44 -0
- data/lib/ruby_llm/providers/openrouter/models.rb +88 -0
- data/lib/ruby_llm/providers/openrouter.rb +26 -0
- data/lib/ruby_llm/providers/perplexity/capabilities.rb +138 -0
- data/lib/ruby_llm/providers/perplexity/chat.rb +17 -0
- data/lib/ruby_llm/providers/perplexity/models.rb +42 -0
- data/lib/ruby_llm/providers/perplexity.rb +52 -0
- data/lib/ruby_llm/railtie.rb +17 -0
- data/lib/ruby_llm/stream_accumulator.rb +97 -0
- data/lib/ruby_llm/streaming.rb +162 -0
- data/lib/ruby_llm/tool.rb +100 -0
- data/lib/ruby_llm/tool_call.rb +31 -0
- data/lib/ruby_llm/utils.rb +49 -0
- data/lib/ruby_llm/version.rb +5 -0
- data/lib/ruby_llm.rb +98 -0
- data/lib/tasks/aliases.rake +235 -0
- data/lib/tasks/models_docs.rake +224 -0
- data/lib/tasks/models_update.rake +108 -0
- data/lib/tasks/release.rake +32 -0
- data/lib/tasks/vcr.rake +99 -0
- metadata +128 -7
@@ -0,0 +1,115 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
class OpenAI
|
6
|
+
# Response methods of the OpenAI API integration
|
7
|
+
module Response
|
8
|
+
def responses_url
|
9
|
+
'responses'
|
10
|
+
end
|
11
|
+
|
12
|
+
module_function
|
13
|
+
|
14
|
+
def render_response_payload(messages, tools:, temperature:, model:, stream: false, schema: nil) # rubocop:disable Metrics/ParameterLists
|
15
|
+
payload = {
|
16
|
+
model: model,
|
17
|
+
input: format_input(messages),
|
18
|
+
stream: stream
|
19
|
+
}
|
20
|
+
|
21
|
+
# Only include temperature if it's not nil (some models don't accept it)
|
22
|
+
payload[:temperature] = temperature unless temperature.nil?
|
23
|
+
|
24
|
+
payload[:tools] = tools.map { |_, tool| response_tool_for(tool) } if tools.any?
|
25
|
+
|
26
|
+
if schema
|
27
|
+
# Use strict mode from schema if specified, default to true
|
28
|
+
strict = schema[:strict] != false
|
29
|
+
|
30
|
+
payload[:text] = {
|
31
|
+
format: {
|
32
|
+
type: 'json_schema',
|
33
|
+
name: 'response',
|
34
|
+
schema: schema,
|
35
|
+
strict: strict
|
36
|
+
}
|
37
|
+
}
|
38
|
+
end
|
39
|
+
|
40
|
+
payload
|
41
|
+
end
|
42
|
+
|
43
|
+
def format_input(messages) # rubocop:disable Metrics/PerceivedComplexity
|
44
|
+
all_tool_calls = messages.flat_map do |m|
|
45
|
+
m.tool_calls&.values || []
|
46
|
+
end
|
47
|
+
messages.flat_map do |msg|
|
48
|
+
if msg.tool_call?
|
49
|
+
msg.tool_calls.map do |_, tc|
|
50
|
+
{
|
51
|
+
type: 'function_call',
|
52
|
+
call_id: tc.id,
|
53
|
+
name: tc.name,
|
54
|
+
arguments: JSON.generate(tc.arguments),
|
55
|
+
status: 'completed'
|
56
|
+
}
|
57
|
+
end
|
58
|
+
elsif msg.role == :tool
|
59
|
+
{
|
60
|
+
type: 'function_call_output',
|
61
|
+
call_id: all_tool_calls.detect { |tc| tc.id == msg.tool_call_id }&.id,
|
62
|
+
output: msg.content,
|
63
|
+
status: 'completed'
|
64
|
+
}
|
65
|
+
else
|
66
|
+
{
|
67
|
+
type: 'message',
|
68
|
+
role: format_role(msg.role),
|
69
|
+
content: ResponseMedia.format_content(msg.content),
|
70
|
+
status: 'completed'
|
71
|
+
}.compact
|
72
|
+
end
|
73
|
+
end
|
74
|
+
end
|
75
|
+
|
76
|
+
def format_role(role)
|
77
|
+
case role
|
78
|
+
when :system
|
79
|
+
'developer'
|
80
|
+
else
|
81
|
+
role.to_s
|
82
|
+
end
|
83
|
+
end
|
84
|
+
|
85
|
+
def parse_respond_response(response)
|
86
|
+
data = response.body
|
87
|
+
return if data.empty?
|
88
|
+
|
89
|
+
raise Error.new(response, data.dig('error', 'message')) if data.dig('error', 'message')
|
90
|
+
|
91
|
+
outputs = data['output']
|
92
|
+
return unless outputs.any?
|
93
|
+
|
94
|
+
Message.new(
|
95
|
+
role: :assistant,
|
96
|
+
content: all_output_text(outputs),
|
97
|
+
tool_calls: parse_response_tool_calls(outputs),
|
98
|
+
input_tokens: data['usage']['input_tokens'],
|
99
|
+
output_tokens: data['usage']['output_tokens'],
|
100
|
+
model_id: data['model'],
|
101
|
+
raw: response
|
102
|
+
)
|
103
|
+
end
|
104
|
+
|
105
|
+
def all_output_text(outputs)
|
106
|
+
outputs.select { |o| o['type'] == 'message' }.flat_map do |o|
|
107
|
+
o['content'].filter_map do |c|
|
108
|
+
c['type'] == 'output_text' && c['text']
|
109
|
+
end
|
110
|
+
end.join("\n")
|
111
|
+
end
|
112
|
+
end
|
113
|
+
end
|
114
|
+
end
|
115
|
+
end
|
@@ -0,0 +1,76 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
class OpenAI
|
6
|
+
# Handles formatting of media content (images, audio) for OpenAI APIs
|
7
|
+
module ResponseMedia
|
8
|
+
module_function
|
9
|
+
|
10
|
+
def format_content(content)
|
11
|
+
return content.to_json if content.is_a?(Hash) || content.is_a?(Array)
|
12
|
+
return content unless content.is_a?(Content)
|
13
|
+
|
14
|
+
parts = []
|
15
|
+
parts << format_text(content.text) if content.text
|
16
|
+
|
17
|
+
content.attachments.each do |attachment|
|
18
|
+
case attachment.type
|
19
|
+
when :image
|
20
|
+
parts << format_image(attachment)
|
21
|
+
when :pdf
|
22
|
+
parts << format_pdf(attachment)
|
23
|
+
when :audio
|
24
|
+
parts << format_audio(attachment)
|
25
|
+
when :text
|
26
|
+
parts << format_text_file(attachment)
|
27
|
+
else
|
28
|
+
raise UnsupportedAttachmentError, attachment.type
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
parts
|
33
|
+
end
|
34
|
+
|
35
|
+
def format_image(image)
|
36
|
+
{
|
37
|
+
type: 'input_image',
|
38
|
+
image_url: image.url? ? image.source : "data:#{image.mime_type};base64,#{image.encoded}"
|
39
|
+
}
|
40
|
+
end
|
41
|
+
|
42
|
+
def format_pdf(pdf)
|
43
|
+
{
|
44
|
+
type: 'input_file',
|
45
|
+
filename: pdf.filename,
|
46
|
+
file_data: "data:#{pdf.mime_type};base64,#{pdf.encoded}"
|
47
|
+
}
|
48
|
+
end
|
49
|
+
|
50
|
+
def format_text_file(text_file)
|
51
|
+
{
|
52
|
+
type: 'input_text',
|
53
|
+
text: Utils.format_text_file_for_llm(text_file)
|
54
|
+
}
|
55
|
+
end
|
56
|
+
|
57
|
+
def format_audio(audio)
|
58
|
+
{
|
59
|
+
type: 'input_audio',
|
60
|
+
input_audio: {
|
61
|
+
data: audio.encoded,
|
62
|
+
format: audio.mime_type.split('/').last
|
63
|
+
}
|
64
|
+
}
|
65
|
+
end
|
66
|
+
|
67
|
+
def format_text(text)
|
68
|
+
{
|
69
|
+
type: 'input_text',
|
70
|
+
text: text
|
71
|
+
}
|
72
|
+
end
|
73
|
+
end
|
74
|
+
end
|
75
|
+
end
|
76
|
+
end
|
@@ -0,0 +1,190 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
class OpenAI
|
6
|
+
# Streaming methods of the OpenAI API integration
|
7
|
+
module Streaming
|
8
|
+
module_function
|
9
|
+
|
10
|
+
def stream_url
|
11
|
+
completion_url
|
12
|
+
end
|
13
|
+
|
14
|
+
def responses_stream_url
|
15
|
+
responses_url
|
16
|
+
end
|
17
|
+
|
18
|
+
def build_chunk(data)
|
19
|
+
# Check if this is responses API format vs chat completions format
|
20
|
+
if data['type'] # Responses API has a 'type' field
|
21
|
+
build_responses_chunk(data)
|
22
|
+
else
|
23
|
+
build_chat_completions_chunk(data)
|
24
|
+
end
|
25
|
+
end
|
26
|
+
|
27
|
+
def build_responses_chunk(data)
|
28
|
+
case data['type']
|
29
|
+
when 'response.text.delta'
|
30
|
+
# Text content delta - deprecated format
|
31
|
+
Chunk.new(
|
32
|
+
role: :assistant,
|
33
|
+
model_id: data.dig('response', 'model'),
|
34
|
+
content: data['delta'],
|
35
|
+
tool_calls: nil,
|
36
|
+
input_tokens: nil,
|
37
|
+
output_tokens: nil
|
38
|
+
)
|
39
|
+
when 'response.output_text.delta'
|
40
|
+
# Text content delta - new format
|
41
|
+
Chunk.new(
|
42
|
+
role: :assistant,
|
43
|
+
model_id: nil, # Model is in the completion event
|
44
|
+
content: data['delta'],
|
45
|
+
tool_calls: nil,
|
46
|
+
input_tokens: nil,
|
47
|
+
output_tokens: nil
|
48
|
+
)
|
49
|
+
when 'response.function_call_arguments.delta'
|
50
|
+
# Tool call arguments delta - handled by accumulator
|
51
|
+
# We need to track these deltas to build up the complete tool call
|
52
|
+
build_tool_call_delta_chunk(data)
|
53
|
+
when 'response.output_item.added'
|
54
|
+
# New tool call or message starting
|
55
|
+
if data.dig('item', 'type') == 'function_call'
|
56
|
+
build_tool_call_start_chunk(data)
|
57
|
+
else
|
58
|
+
build_empty_chunk(data)
|
59
|
+
end
|
60
|
+
when 'response.output_item.done'
|
61
|
+
# Tool call or message completed
|
62
|
+
if data.dig('item', 'type') == 'function_call'
|
63
|
+
build_tool_call_complete_chunk(data)
|
64
|
+
else
|
65
|
+
build_empty_chunk(data)
|
66
|
+
end
|
67
|
+
when 'response.completed'
|
68
|
+
# Final response with usage stats
|
69
|
+
Chunk.new(
|
70
|
+
role: :assistant,
|
71
|
+
model_id: data.dig('response', 'model'),
|
72
|
+
content: nil,
|
73
|
+
tool_calls: nil,
|
74
|
+
input_tokens: data.dig('response', 'usage', 'input_tokens'),
|
75
|
+
output_tokens: data.dig('response', 'usage', 'output_tokens')
|
76
|
+
)
|
77
|
+
else
|
78
|
+
# Other event types (response.created, response.in_progress, etc.)
|
79
|
+
build_empty_chunk(data)
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
83
|
+
def build_chat_completions_chunk(data)
|
84
|
+
Chunk.new(
|
85
|
+
role: :assistant,
|
86
|
+
model_id: data['model'],
|
87
|
+
content: data.dig('choices', 0, 'delta', 'content'),
|
88
|
+
tool_calls: parse_tool_calls(data.dig('choices', 0, 'delta', 'tool_calls'), parse_arguments: false),
|
89
|
+
input_tokens: data.dig('usage', 'prompt_tokens'),
|
90
|
+
output_tokens: data.dig('usage', 'completion_tokens')
|
91
|
+
)
|
92
|
+
end
|
93
|
+
|
94
|
+
def build_tool_call_delta_chunk(data)
|
95
|
+
# For tool call argument deltas, we need to create a partial tool call
|
96
|
+
# The accumulator will handle building up the complete arguments
|
97
|
+
tool_call_data = {
|
98
|
+
'id' => data['item_id'],
|
99
|
+
'function' => {
|
100
|
+
'name' => '', # Name comes from the initial item.added event
|
101
|
+
'arguments' => data['delta'] || ''
|
102
|
+
}
|
103
|
+
}
|
104
|
+
|
105
|
+
Chunk.new(
|
106
|
+
role: :assistant,
|
107
|
+
model_id: nil,
|
108
|
+
content: nil,
|
109
|
+
tool_calls: { data['item_id'] => create_streaming_tool_call(tool_call_data) },
|
110
|
+
input_tokens: nil,
|
111
|
+
output_tokens: nil
|
112
|
+
)
|
113
|
+
end
|
114
|
+
|
115
|
+
def build_tool_call_start_chunk(data)
|
116
|
+
item = data['item']
|
117
|
+
tool_call_data = {
|
118
|
+
'id' => item['id'],
|
119
|
+
'function' => {
|
120
|
+
'name' => item['name'],
|
121
|
+
'arguments' => item['arguments'] || ''
|
122
|
+
}
|
123
|
+
}
|
124
|
+
|
125
|
+
Chunk.new(
|
126
|
+
role: :assistant,
|
127
|
+
model_id: nil,
|
128
|
+
content: nil,
|
129
|
+
tool_calls: { item['id'] => create_streaming_tool_call(tool_call_data) },
|
130
|
+
input_tokens: nil,
|
131
|
+
output_tokens: nil
|
132
|
+
)
|
133
|
+
end
|
134
|
+
|
135
|
+
def build_tool_call_complete_chunk(data)
|
136
|
+
item = data['item']
|
137
|
+
tool_call_data = {
|
138
|
+
'id' => item['id'],
|
139
|
+
'function' => {
|
140
|
+
'name' => item['name'],
|
141
|
+
'arguments' => item['arguments'] || ''
|
142
|
+
}
|
143
|
+
}
|
144
|
+
|
145
|
+
Chunk.new(
|
146
|
+
role: :assistant,
|
147
|
+
model_id: nil,
|
148
|
+
content: nil,
|
149
|
+
tool_calls: { item['id'] => create_streaming_tool_call(tool_call_data) },
|
150
|
+
input_tokens: nil,
|
151
|
+
output_tokens: nil
|
152
|
+
)
|
153
|
+
end
|
154
|
+
|
155
|
+
def build_empty_chunk(data)
|
156
|
+
Chunk.new(
|
157
|
+
role: :assistant,
|
158
|
+
model_id: data.dig('response', 'model'),
|
159
|
+
content: nil,
|
160
|
+
tool_calls: nil,
|
161
|
+
input_tokens: nil,
|
162
|
+
output_tokens: nil
|
163
|
+
)
|
164
|
+
end
|
165
|
+
|
166
|
+
def create_streaming_tool_call(tool_call_data)
|
167
|
+
ToolCall.new(
|
168
|
+
id: tool_call_data['id'],
|
169
|
+
name: tool_call_data.dig('function', 'name'),
|
170
|
+
arguments: tool_call_data.dig('function', 'arguments')
|
171
|
+
)
|
172
|
+
end
|
173
|
+
|
174
|
+
def parse_streaming_error(data)
|
175
|
+
error_data = JSON.parse(data)
|
176
|
+
return unless error_data['error']
|
177
|
+
|
178
|
+
case error_data.dig('error', 'type')
|
179
|
+
when 'server_error'
|
180
|
+
[500, error_data['error']['message']]
|
181
|
+
when 'rate_limit_exceeded', 'insufficient_quota'
|
182
|
+
[429, error_data['error']['message']]
|
183
|
+
else
|
184
|
+
[400, error_data['error']['message']]
|
185
|
+
end
|
186
|
+
end
|
187
|
+
end
|
188
|
+
end
|
189
|
+
end
|
190
|
+
end
|
@@ -0,0 +1,100 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
class OpenAI
|
6
|
+
# Tools methods of the OpenAI API integration
|
7
|
+
module Tools
|
8
|
+
module_function
|
9
|
+
|
10
|
+
def chat_tool_for(tool)
|
11
|
+
{
|
12
|
+
type: 'function',
|
13
|
+
function: {
|
14
|
+
name: tool.name,
|
15
|
+
description: tool.description,
|
16
|
+
parameters: tool_parameters_for(tool)
|
17
|
+
}
|
18
|
+
}
|
19
|
+
end
|
20
|
+
|
21
|
+
def response_tool_for(tool)
|
22
|
+
{
|
23
|
+
type: 'function',
|
24
|
+
name: tool.name,
|
25
|
+
description: tool.description,
|
26
|
+
parameters: tool_parameters_for(tool)
|
27
|
+
}
|
28
|
+
end
|
29
|
+
|
30
|
+
def param_schema(param)
|
31
|
+
{
|
32
|
+
type: param.type,
|
33
|
+
description: param.description
|
34
|
+
}.compact
|
35
|
+
end
|
36
|
+
|
37
|
+
def tool_parameters_for(tool)
|
38
|
+
{
|
39
|
+
type: 'object',
|
40
|
+
properties: tool.parameters.transform_values { |param| param_schema(param) },
|
41
|
+
required: tool.parameters.select { |_, p| p.required }.keys
|
42
|
+
}
|
43
|
+
end
|
44
|
+
|
45
|
+
def format_tool_calls(tool_calls)
|
46
|
+
return nil unless tool_calls&.any?
|
47
|
+
|
48
|
+
tool_calls.map do |_, tc|
|
49
|
+
{
|
50
|
+
id: tc.id,
|
51
|
+
type: 'function',
|
52
|
+
function: {
|
53
|
+
name: tc.name,
|
54
|
+
arguments: JSON.generate(tc.arguments)
|
55
|
+
}
|
56
|
+
}
|
57
|
+
end
|
58
|
+
end
|
59
|
+
|
60
|
+
def parse_tool_calls(tool_calls, parse_arguments: true)
|
61
|
+
return nil unless tool_calls&.any?
|
62
|
+
|
63
|
+
tool_calls.to_h do |tc|
|
64
|
+
[
|
65
|
+
tc['id'],
|
66
|
+
ToolCall.new(
|
67
|
+
id: tc['id'],
|
68
|
+
name: tc.dig('function', 'name'),
|
69
|
+
arguments: if parse_arguments
|
70
|
+
if tc.dig('function', 'arguments').empty?
|
71
|
+
{}
|
72
|
+
else
|
73
|
+
JSON.parse(tc.dig('function',
|
74
|
+
'arguments'))
|
75
|
+
end
|
76
|
+
else
|
77
|
+
tc.dig('function', 'arguments')
|
78
|
+
end
|
79
|
+
)
|
80
|
+
]
|
81
|
+
end
|
82
|
+
end
|
83
|
+
|
84
|
+
def parse_response_tool_calls(outputs)
|
85
|
+
# TODO: implement the other & built-in tools
|
86
|
+
# 'web_search_call', 'file_search_call', 'image_generation_call',
|
87
|
+
# 'code_interpreter_call', 'local_shell_call', 'mcp_call',
|
88
|
+
# 'mcp_list_tools', 'mcp_approval_request'
|
89
|
+
outputs.select { |o| o['type'] == 'function_call' }.to_h do |o|
|
90
|
+
[o['id'], ToolCall.new(
|
91
|
+
id: o['call_id'],
|
92
|
+
name: o['name'],
|
93
|
+
arguments: JSON.parse(o['arguments'])
|
94
|
+
)]
|
95
|
+
end
|
96
|
+
end
|
97
|
+
end
|
98
|
+
end
|
99
|
+
end
|
100
|
+
end
|
@@ -0,0 +1,44 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
# OpenAI API integration using the new Responses API. Handles response generation,
|
6
|
+
# function calling, and OpenAI's unique streaming format. Supports GPT-4, GPT-3.5,
|
7
|
+
# and other OpenAI models.
|
8
|
+
class OpenAI < OpenAIBase
|
9
|
+
include OpenAI::Response
|
10
|
+
include OpenAI::ResponseMedia
|
11
|
+
|
12
|
+
def audio_input?(messages)
|
13
|
+
messages.any? do |message|
|
14
|
+
next false unless message.respond_to?(:content) && message.content.respond_to?(:attachments)
|
15
|
+
|
16
|
+
message.content.attachments.any? { |attachment| attachment.type == :audio }
|
17
|
+
end
|
18
|
+
end
|
19
|
+
|
20
|
+
def render_payload(messages, tools:, temperature:, model:, stream: false, schema: nil) # rubocop:disable Metrics/ParameterLists
|
21
|
+
@using_responses_api = !audio_input?(messages)
|
22
|
+
|
23
|
+
if @using_responses_api
|
24
|
+
render_response_payload(messages, tools: tools, temperature: temperature, model: model, stream: stream,
|
25
|
+
schema: schema)
|
26
|
+
else
|
27
|
+
super
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
def completion_url
|
32
|
+
@using_responses_api ? responses_url : super
|
33
|
+
end
|
34
|
+
|
35
|
+
def parse_completion_response(response)
|
36
|
+
if @using_responses_api
|
37
|
+
parse_respond_response(response)
|
38
|
+
else
|
39
|
+
super
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
@@ -0,0 +1,44 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
# OpenAI API integration. Handles chat completion, function calling,
|
6
|
+
# and OpenAI's unique streaming format. Supports GPT-4, GPT-3.5,
|
7
|
+
# and other OpenAI models.
|
8
|
+
class OpenAIBase < Provider
|
9
|
+
include OpenAI::Chat
|
10
|
+
include OpenAI::Embeddings
|
11
|
+
include OpenAI::Models
|
12
|
+
include OpenAI::Streaming
|
13
|
+
include OpenAI::Tools
|
14
|
+
include OpenAI::Images
|
15
|
+
include OpenAI::Media
|
16
|
+
|
17
|
+
def api_base
|
18
|
+
@config.openai_api_base || 'https://api.openai.com/v1'
|
19
|
+
end
|
20
|
+
|
21
|
+
def headers
|
22
|
+
{
|
23
|
+
'Authorization' => "Bearer #{@config.openai_api_key}",
|
24
|
+
'OpenAI-Organization' => @config.openai_organization_id,
|
25
|
+
'OpenAI-Project' => @config.openai_project_id
|
26
|
+
}.compact
|
27
|
+
end
|
28
|
+
|
29
|
+
def maybe_normalize_temperature(temperature, model_id)
|
30
|
+
OpenAI::Capabilities.normalize_temperature(temperature, model_id)
|
31
|
+
end
|
32
|
+
|
33
|
+
class << self
|
34
|
+
def capabilities
|
35
|
+
OpenAI::Capabilities
|
36
|
+
end
|
37
|
+
|
38
|
+
def configuration_requirements
|
39
|
+
%i[openai_api_key]
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
@@ -0,0 +1,88 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
class OpenRouter
|
6
|
+
# Models methods of the OpenRouter API integration
|
7
|
+
module Models
|
8
|
+
module_function
|
9
|
+
|
10
|
+
def models_url
|
11
|
+
'models'
|
12
|
+
end
|
13
|
+
|
14
|
+
def parse_list_models_response(response, slug, _capabilities)
|
15
|
+
Array(response.body['data']).map do |model_data| # rubocop:disable Metrics/BlockLength
|
16
|
+
# Extract modalities directly from architecture
|
17
|
+
modalities = {
|
18
|
+
input: Array(model_data.dig('architecture', 'input_modalities')),
|
19
|
+
output: Array(model_data.dig('architecture', 'output_modalities'))
|
20
|
+
}
|
21
|
+
|
22
|
+
# Construct pricing from API data, only adding non-zero values
|
23
|
+
pricing = { text_tokens: { standard: {} } }
|
24
|
+
|
25
|
+
pricing_types = {
|
26
|
+
prompt: :input_per_million,
|
27
|
+
completion: :output_per_million,
|
28
|
+
input_cache_read: :cached_input_per_million,
|
29
|
+
internal_reasoning: :reasoning_output_per_million
|
30
|
+
}
|
31
|
+
|
32
|
+
pricing_types.each do |source_key, target_key|
|
33
|
+
value = model_data.dig('pricing', source_key.to_s).to_f
|
34
|
+
pricing[:text_tokens][:standard][target_key] = value * 1_000_000 if value.positive?
|
35
|
+
end
|
36
|
+
|
37
|
+
# Convert OpenRouter's supported parameters to our capability format
|
38
|
+
capabilities = supported_parameters_to_capabilities(model_data['supported_parameters'])
|
39
|
+
|
40
|
+
Model::Info.new(
|
41
|
+
id: model_data['id'],
|
42
|
+
name: model_data['name'],
|
43
|
+
provider: slug,
|
44
|
+
family: model_data['id'].split('/').first,
|
45
|
+
created_at: model_data['created'] ? Time.at(model_data['created']) : nil,
|
46
|
+
context_window: model_data['context_length'],
|
47
|
+
max_output_tokens: model_data.dig('top_provider', 'max_completion_tokens'),
|
48
|
+
modalities: modalities,
|
49
|
+
capabilities: capabilities,
|
50
|
+
pricing: pricing,
|
51
|
+
metadata: {
|
52
|
+
description: model_data['description'],
|
53
|
+
architecture: model_data['architecture'],
|
54
|
+
top_provider: model_data['top_provider'],
|
55
|
+
per_request_limits: model_data['per_request_limits'],
|
56
|
+
supported_parameters: model_data['supported_parameters']
|
57
|
+
}
|
58
|
+
)
|
59
|
+
end
|
60
|
+
end
|
61
|
+
|
62
|
+
def supported_parameters_to_capabilities(params)
|
63
|
+
return [] unless params
|
64
|
+
|
65
|
+
capabilities = []
|
66
|
+
|
67
|
+
# Standard capabilities mapping
|
68
|
+
capabilities << 'streaming' # Assume all OpenRouter models support streaming
|
69
|
+
|
70
|
+
# Function calling capability
|
71
|
+
capabilities << 'function_calling' if params.include?('tools') || params.include?('tool_choice')
|
72
|
+
|
73
|
+
# Structured output capability
|
74
|
+
capabilities << 'structured_output' if params.include?('response_format')
|
75
|
+
|
76
|
+
# Batch capability
|
77
|
+
capabilities << 'batch' if params.include?('batch')
|
78
|
+
|
79
|
+
# Additional mappings based on params
|
80
|
+
# Handles advanced model capabilities that might be inferred from supported params
|
81
|
+
capabilities << 'predicted_outputs' if params.include?('logit_bias') && params.include?('top_k')
|
82
|
+
|
83
|
+
capabilities
|
84
|
+
end
|
85
|
+
end
|
86
|
+
end
|
87
|
+
end
|
88
|
+
end
|
@@ -0,0 +1,26 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
# OpenRouter API integration.
|
6
|
+
class OpenRouter < OpenAIBase
|
7
|
+
include OpenRouter::Models
|
8
|
+
|
9
|
+
def api_base
|
10
|
+
'https://openrouter.ai/api/v1'
|
11
|
+
end
|
12
|
+
|
13
|
+
def headers
|
14
|
+
{
|
15
|
+
'Authorization' => "Bearer #{@config.openrouter_api_key}"
|
16
|
+
}
|
17
|
+
end
|
18
|
+
|
19
|
+
class << self
|
20
|
+
def configuration_requirements
|
21
|
+
%i[openrouter_api_key]
|
22
|
+
end
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|