ruby_llm_community 0.0.1 → 0.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. checksums.yaml +4 -4
  2. data/LICENSE +22 -0
  3. data/README.md +172 -0
  4. data/lib/generators/ruby_llm/install/templates/INSTALL_INFO.md.tt +108 -0
  5. data/lib/generators/ruby_llm/install/templates/chat_model.rb.tt +3 -0
  6. data/lib/generators/ruby_llm/install/templates/create_chats_migration.rb.tt +8 -0
  7. data/lib/generators/ruby_llm/install/templates/create_messages_migration.rb.tt +15 -0
  8. data/lib/generators/ruby_llm/install/templates/create_tool_calls_migration.rb.tt +14 -0
  9. data/lib/generators/ruby_llm/install/templates/initializer.rb.tt +6 -0
  10. data/lib/generators/ruby_llm/install/templates/message_model.rb.tt +3 -0
  11. data/lib/generators/ruby_llm/install/templates/tool_call_model.rb.tt +3 -0
  12. data/lib/generators/ruby_llm/install_generator.rb +121 -0
  13. data/lib/ruby_llm/active_record/acts_as.rb +382 -0
  14. data/lib/ruby_llm/aliases.json +217 -0
  15. data/lib/ruby_llm/aliases.rb +56 -0
  16. data/lib/ruby_llm/attachment.rb +164 -0
  17. data/lib/ruby_llm/chat.rb +226 -0
  18. data/lib/ruby_llm/chunk.rb +6 -0
  19. data/lib/ruby_llm/configuration.rb +73 -0
  20. data/lib/ruby_llm/connection.rb +126 -0
  21. data/lib/ruby_llm/content.rb +52 -0
  22. data/lib/ruby_llm/context.rb +29 -0
  23. data/lib/ruby_llm/embedding.rb +30 -0
  24. data/lib/ruby_llm/error.rb +84 -0
  25. data/lib/ruby_llm/image.rb +53 -0
  26. data/lib/ruby_llm/message.rb +81 -0
  27. data/lib/ruby_llm/mime_type.rb +67 -0
  28. data/lib/ruby_llm/model/info.rb +101 -0
  29. data/lib/ruby_llm/model/modalities.rb +22 -0
  30. data/lib/ruby_llm/model/pricing.rb +51 -0
  31. data/lib/ruby_llm/model/pricing_category.rb +48 -0
  32. data/lib/ruby_llm/model/pricing_tier.rb +34 -0
  33. data/lib/ruby_llm/model.rb +7 -0
  34. data/lib/ruby_llm/models.json +29924 -0
  35. data/lib/ruby_llm/models.rb +214 -0
  36. data/lib/ruby_llm/models_schema.json +168 -0
  37. data/lib/ruby_llm/provider.rb +221 -0
  38. data/lib/ruby_llm/providers/anthropic/capabilities.rb +179 -0
  39. data/lib/ruby_llm/providers/anthropic/chat.rb +120 -0
  40. data/lib/ruby_llm/providers/anthropic/embeddings.rb +20 -0
  41. data/lib/ruby_llm/providers/anthropic/media.rb +116 -0
  42. data/lib/ruby_llm/providers/anthropic/models.rb +56 -0
  43. data/lib/ruby_llm/providers/anthropic/streaming.rb +45 -0
  44. data/lib/ruby_llm/providers/anthropic/tools.rb +108 -0
  45. data/lib/ruby_llm/providers/anthropic.rb +37 -0
  46. data/lib/ruby_llm/providers/bedrock/capabilities.rb +167 -0
  47. data/lib/ruby_llm/providers/bedrock/chat.rb +76 -0
  48. data/lib/ruby_llm/providers/bedrock/media.rb +73 -0
  49. data/lib/ruby_llm/providers/bedrock/models.rb +82 -0
  50. data/lib/ruby_llm/providers/bedrock/signing.rb +831 -0
  51. data/lib/ruby_llm/providers/bedrock/streaming/base.rb +63 -0
  52. data/lib/ruby_llm/providers/bedrock/streaming/content_extraction.rb +71 -0
  53. data/lib/ruby_llm/providers/bedrock/streaming/message_processing.rb +79 -0
  54. data/lib/ruby_llm/providers/bedrock/streaming/payload_processing.rb +92 -0
  55. data/lib/ruby_llm/providers/bedrock/streaming/prelude_handling.rb +91 -0
  56. data/lib/ruby_llm/providers/bedrock/streaming.rb +36 -0
  57. data/lib/ruby_llm/providers/bedrock.rb +83 -0
  58. data/lib/ruby_llm/providers/deepseek/capabilities.rb +131 -0
  59. data/lib/ruby_llm/providers/deepseek/chat.rb +17 -0
  60. data/lib/ruby_llm/providers/deepseek.rb +30 -0
  61. data/lib/ruby_llm/providers/gemini/capabilities.rb +351 -0
  62. data/lib/ruby_llm/providers/gemini/chat.rb +146 -0
  63. data/lib/ruby_llm/providers/gemini/embeddings.rb +39 -0
  64. data/lib/ruby_llm/providers/gemini/images.rb +48 -0
  65. data/lib/ruby_llm/providers/gemini/media.rb +55 -0
  66. data/lib/ruby_llm/providers/gemini/models.rb +41 -0
  67. data/lib/ruby_llm/providers/gemini/streaming.rb +66 -0
  68. data/lib/ruby_llm/providers/gemini/tools.rb +82 -0
  69. data/lib/ruby_llm/providers/gemini.rb +36 -0
  70. data/lib/ruby_llm/providers/gpustack/chat.rb +17 -0
  71. data/lib/ruby_llm/providers/gpustack/models.rb +55 -0
  72. data/lib/ruby_llm/providers/gpustack.rb +33 -0
  73. data/lib/ruby_llm/providers/mistral/capabilities.rb +163 -0
  74. data/lib/ruby_llm/providers/mistral/chat.rb +26 -0
  75. data/lib/ruby_llm/providers/mistral/embeddings.rb +36 -0
  76. data/lib/ruby_llm/providers/mistral/models.rb +49 -0
  77. data/lib/ruby_llm/providers/mistral.rb +32 -0
  78. data/lib/ruby_llm/providers/ollama/chat.rb +28 -0
  79. data/lib/ruby_llm/providers/ollama/media.rb +50 -0
  80. data/lib/ruby_llm/providers/ollama.rb +29 -0
  81. data/lib/ruby_llm/providers/openai/capabilities.rb +306 -0
  82. data/lib/ruby_llm/providers/openai/chat.rb +87 -0
  83. data/lib/ruby_llm/providers/openai/embeddings.rb +36 -0
  84. data/lib/ruby_llm/providers/openai/images.rb +38 -0
  85. data/lib/ruby_llm/providers/openai/media.rb +81 -0
  86. data/lib/ruby_llm/providers/openai/models.rb +39 -0
  87. data/lib/ruby_llm/providers/openai/response.rb +116 -0
  88. data/lib/ruby_llm/providers/openai/response_media.rb +76 -0
  89. data/lib/ruby_llm/providers/openai/streaming.rb +191 -0
  90. data/lib/ruby_llm/providers/openai/tools.rb +100 -0
  91. data/lib/ruby_llm/providers/openai.rb +44 -0
  92. data/lib/ruby_llm/providers/openai_base.rb +44 -0
  93. data/lib/ruby_llm/providers/openrouter/models.rb +88 -0
  94. data/lib/ruby_llm/providers/openrouter.rb +26 -0
  95. data/lib/ruby_llm/providers/perplexity/capabilities.rb +138 -0
  96. data/lib/ruby_llm/providers/perplexity/chat.rb +17 -0
  97. data/lib/ruby_llm/providers/perplexity/models.rb +42 -0
  98. data/lib/ruby_llm/providers/perplexity.rb +52 -0
  99. data/lib/ruby_llm/railtie.rb +17 -0
  100. data/lib/ruby_llm/stream_accumulator.rb +103 -0
  101. data/lib/ruby_llm/streaming.rb +162 -0
  102. data/lib/ruby_llm/tool.rb +100 -0
  103. data/lib/ruby_llm/tool_call.rb +31 -0
  104. data/lib/ruby_llm/utils.rb +49 -0
  105. data/lib/ruby_llm/version.rb +5 -0
  106. data/lib/ruby_llm.rb +98 -0
  107. data/lib/tasks/aliases.rake +235 -0
  108. data/lib/tasks/models_docs.rake +224 -0
  109. data/lib/tasks/models_update.rake +108 -0
  110. data/lib/tasks/release.rake +32 -0
  111. data/lib/tasks/vcr.rake +99 -0
  112. metadata +128 -7
@@ -0,0 +1,116 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ class OpenAI
6
+ # Response methods of the OpenAI API integration
7
+ module Response
8
+ def responses_url
9
+ 'responses'
10
+ end
11
+
12
+ module_function
13
+
14
+ def render_response_payload(messages, tools:, temperature:, model:, cache_prompts:, stream: false, schema: nil) # rubocop:disable Metrics/ParameterLists,Lint/UnusedMethodArgument
15
+ payload = {
16
+ model: model,
17
+ input: format_input(messages),
18
+ stream: stream
19
+ }
20
+
21
+ # Only include temperature if it's not nil (some models don't accept it)
22
+ payload[:temperature] = temperature unless temperature.nil?
23
+
24
+ payload[:tools] = tools.map { |_, tool| response_tool_for(tool) } if tools.any?
25
+
26
+ if schema
27
+ # Use strict mode from schema if specified, default to true
28
+ strict = schema[:strict] != false
29
+
30
+ payload[:text] = {
31
+ format: {
32
+ type: 'json_schema',
33
+ name: 'response',
34
+ schema: schema,
35
+ strict: strict
36
+ }
37
+ }
38
+ end
39
+
40
+ payload
41
+ end
42
+
43
+ def format_input(messages) # rubocop:disable Metrics/PerceivedComplexity
44
+ all_tool_calls = messages.flat_map do |m|
45
+ m.tool_calls&.values || []
46
+ end
47
+ messages.flat_map do |msg|
48
+ if msg.tool_call?
49
+ msg.tool_calls.map do |_, tc|
50
+ {
51
+ type: 'function_call',
52
+ call_id: tc.id,
53
+ name: tc.name,
54
+ arguments: JSON.generate(tc.arguments),
55
+ status: 'completed'
56
+ }
57
+ end
58
+ elsif msg.role == :tool
59
+ {
60
+ type: 'function_call_output',
61
+ call_id: all_tool_calls.detect { |tc| tc.id == msg.tool_call_id }&.id,
62
+ output: msg.content,
63
+ status: 'completed'
64
+ }
65
+ else
66
+ {
67
+ type: 'message',
68
+ role: format_role(msg.role),
69
+ content: ResponseMedia.format_content(msg.content),
70
+ status: 'completed'
71
+ }.compact
72
+ end
73
+ end
74
+ end
75
+
76
+ def format_role(role)
77
+ case role
78
+ when :system
79
+ 'developer'
80
+ else
81
+ role.to_s
82
+ end
83
+ end
84
+
85
+ def parse_respond_response(response)
86
+ data = response.body
87
+ return if data.empty?
88
+
89
+ raise Error.new(response, data.dig('error', 'message')) if data.dig('error', 'message')
90
+
91
+ outputs = data['output']
92
+ return unless outputs.any?
93
+
94
+ Message.new(
95
+ role: :assistant,
96
+ content: all_output_text(outputs),
97
+ tool_calls: parse_response_tool_calls(outputs),
98
+ input_tokens: data['usage']['input_tokens'],
99
+ output_tokens: data['usage']['output_tokens'],
100
+ cached_tokens: data.dig('usage', 'input_tokens_details', 'cached_tokens'),
101
+ model_id: data['model'],
102
+ raw: response
103
+ )
104
+ end
105
+
106
+ def all_output_text(outputs)
107
+ outputs.select { |o| o['type'] == 'message' }.flat_map do |o|
108
+ o['content'].filter_map do |c|
109
+ c['type'] == 'output_text' && c['text']
110
+ end
111
+ end.join("\n")
112
+ end
113
+ end
114
+ end
115
+ end
116
+ end
@@ -0,0 +1,76 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ class OpenAI
6
+ # Handles formatting of media content (images, audio) for OpenAI APIs
7
+ module ResponseMedia
8
+ module_function
9
+
10
+ def format_content(content)
11
+ return content.to_json if content.is_a?(Hash) || content.is_a?(Array)
12
+ return content unless content.is_a?(Content)
13
+
14
+ parts = []
15
+ parts << format_text(content.text) if content.text
16
+
17
+ content.attachments.each do |attachment|
18
+ case attachment.type
19
+ when :image
20
+ parts << format_image(attachment)
21
+ when :pdf
22
+ parts << format_pdf(attachment)
23
+ when :audio
24
+ parts << format_audio(attachment)
25
+ when :text
26
+ parts << format_text_file(attachment)
27
+ else
28
+ raise UnsupportedAttachmentError, attachment.type
29
+ end
30
+ end
31
+
32
+ parts
33
+ end
34
+
35
+ def format_image(image)
36
+ {
37
+ type: 'input_image',
38
+ image_url: image.url? ? image.source : "data:#{image.mime_type};base64,#{image.encoded}"
39
+ }
40
+ end
41
+
42
+ def format_pdf(pdf)
43
+ {
44
+ type: 'input_file',
45
+ filename: pdf.filename,
46
+ file_data: "data:#{pdf.mime_type};base64,#{pdf.encoded}"
47
+ }
48
+ end
49
+
50
+ def format_text_file(text_file)
51
+ {
52
+ type: 'input_text',
53
+ text: Utils.format_text_file_for_llm(text_file)
54
+ }
55
+ end
56
+
57
+ def format_audio(audio)
58
+ {
59
+ type: 'input_audio',
60
+ input_audio: {
61
+ data: audio.encoded,
62
+ format: audio.mime_type.split('/').last
63
+ }
64
+ }
65
+ end
66
+
67
+ def format_text(text)
68
+ {
69
+ type: 'input_text',
70
+ text: text
71
+ }
72
+ end
73
+ end
74
+ end
75
+ end
76
+ end
@@ -0,0 +1,191 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ class OpenAI
6
+ # Streaming methods of the OpenAI API integration
7
+ module Streaming
8
+ module_function
9
+
10
+ def stream_url
11
+ completion_url
12
+ end
13
+
14
+ def responses_stream_url
15
+ responses_url
16
+ end
17
+
18
+ def build_chunk(data)
19
+ # Check if this is responses API format vs chat completions format
20
+ if data['type'] # Responses API has a 'type' field
21
+ build_responses_chunk(data)
22
+ else
23
+ build_chat_completions_chunk(data)
24
+ end
25
+ end
26
+
27
+ def build_responses_chunk(data)
28
+ case data['type']
29
+ when 'response.text.delta'
30
+ # Text content delta - deprecated format
31
+ Chunk.new(
32
+ role: :assistant,
33
+ model_id: data.dig('response', 'model'),
34
+ content: data['delta'],
35
+ tool_calls: nil,
36
+ input_tokens: nil,
37
+ output_tokens: nil
38
+ )
39
+ when 'response.output_text.delta'
40
+ # Text content delta - new format
41
+ Chunk.new(
42
+ role: :assistant,
43
+ model_id: nil, # Model is in the completion event
44
+ content: data['delta'],
45
+ tool_calls: nil,
46
+ input_tokens: nil,
47
+ output_tokens: nil
48
+ )
49
+ when 'response.function_call_arguments.delta'
50
+ # Tool call arguments delta - handled by accumulator
51
+ # We need to track these deltas to build up the complete tool call
52
+ build_tool_call_delta_chunk(data)
53
+ when 'response.output_item.added'
54
+ # New tool call or message starting
55
+ if data.dig('item', 'type') == 'function_call'
56
+ build_tool_call_start_chunk(data)
57
+ else
58
+ build_empty_chunk(data)
59
+ end
60
+ when 'response.output_item.done'
61
+ # Tool call or message completed
62
+ if data.dig('item', 'type') == 'function_call'
63
+ build_tool_call_complete_chunk(data)
64
+ else
65
+ build_empty_chunk(data)
66
+ end
67
+ when 'response.completed'
68
+ # Final response with usage stats
69
+ Chunk.new(
70
+ role: :assistant,
71
+ model_id: data.dig('response', 'model'),
72
+ content: nil,
73
+ tool_calls: nil,
74
+ input_tokens: data.dig('response', 'usage', 'input_tokens'),
75
+ output_tokens: data.dig('response', 'usage', 'output_tokens')
76
+ )
77
+ else
78
+ # Other event types (response.created, response.in_progress, etc.)
79
+ build_empty_chunk(data)
80
+ end
81
+ end
82
+
83
+ def build_chat_completions_chunk(data)
84
+ Chunk.new(
85
+ role: :assistant,
86
+ model_id: data['model'],
87
+ content: data.dig('choices', 0, 'delta', 'content'),
88
+ tool_calls: parse_tool_calls(data.dig('choices', 0, 'delta', 'tool_calls'), parse_arguments: false),
89
+ input_tokens: data.dig('usage', 'prompt_tokens'),
90
+ output_tokens: data.dig('usage', 'completion_tokens'),
91
+ cached_tokens: data.dig('usage', 'cached_tokens')
92
+ )
93
+ end
94
+
95
+ def build_tool_call_delta_chunk(data)
96
+ # For tool call argument deltas, we need to create a partial tool call
97
+ # The accumulator will handle building up the complete arguments
98
+ tool_call_data = {
99
+ 'id' => data['item_id'],
100
+ 'function' => {
101
+ 'name' => '', # Name comes from the initial item.added event
102
+ 'arguments' => data['delta'] || ''
103
+ }
104
+ }
105
+
106
+ Chunk.new(
107
+ role: :assistant,
108
+ model_id: nil,
109
+ content: nil,
110
+ tool_calls: { data['item_id'] => create_streaming_tool_call(tool_call_data) },
111
+ input_tokens: nil,
112
+ output_tokens: nil
113
+ )
114
+ end
115
+
116
+ def build_tool_call_start_chunk(data)
117
+ item = data['item']
118
+ tool_call_data = {
119
+ 'id' => item['id'],
120
+ 'function' => {
121
+ 'name' => item['name'],
122
+ 'arguments' => item['arguments'] || ''
123
+ }
124
+ }
125
+
126
+ Chunk.new(
127
+ role: :assistant,
128
+ model_id: nil,
129
+ content: nil,
130
+ tool_calls: { item['id'] => create_streaming_tool_call(tool_call_data) },
131
+ input_tokens: nil,
132
+ output_tokens: nil
133
+ )
134
+ end
135
+
136
+ def build_tool_call_complete_chunk(data)
137
+ item = data['item']
138
+ tool_call_data = {
139
+ 'id' => item['id'],
140
+ 'function' => {
141
+ 'name' => item['name'],
142
+ 'arguments' => item['arguments'] || ''
143
+ }
144
+ }
145
+
146
+ Chunk.new(
147
+ role: :assistant,
148
+ model_id: nil,
149
+ content: nil,
150
+ tool_calls: { item['id'] => create_streaming_tool_call(tool_call_data) },
151
+ input_tokens: nil,
152
+ output_tokens: nil
153
+ )
154
+ end
155
+
156
+ def build_empty_chunk(data)
157
+ Chunk.new(
158
+ role: :assistant,
159
+ model_id: data.dig('response', 'model'),
160
+ content: nil,
161
+ tool_calls: nil,
162
+ input_tokens: nil,
163
+ output_tokens: nil
164
+ )
165
+ end
166
+
167
+ def create_streaming_tool_call(tool_call_data)
168
+ ToolCall.new(
169
+ id: tool_call_data['id'],
170
+ name: tool_call_data.dig('function', 'name'),
171
+ arguments: tool_call_data.dig('function', 'arguments')
172
+ )
173
+ end
174
+
175
+ def parse_streaming_error(data)
176
+ error_data = JSON.parse(data)
177
+ return unless error_data['error']
178
+
179
+ case error_data.dig('error', 'type')
180
+ when 'server_error'
181
+ [500, error_data['error']['message']]
182
+ when 'rate_limit_exceeded', 'insufficient_quota'
183
+ [429, error_data['error']['message']]
184
+ else
185
+ [400, error_data['error']['message']]
186
+ end
187
+ end
188
+ end
189
+ end
190
+ end
191
+ end
@@ -0,0 +1,100 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ class OpenAI
6
+ # Tools methods of the OpenAI API integration
7
+ module Tools
8
+ module_function
9
+
10
+ def chat_tool_for(tool)
11
+ {
12
+ type: 'function',
13
+ function: {
14
+ name: tool.name,
15
+ description: tool.description,
16
+ parameters: tool_parameters_for(tool)
17
+ }
18
+ }
19
+ end
20
+
21
+ def response_tool_for(tool)
22
+ {
23
+ type: 'function',
24
+ name: tool.name,
25
+ description: tool.description,
26
+ parameters: tool_parameters_for(tool)
27
+ }
28
+ end
29
+
30
+ def param_schema(param)
31
+ {
32
+ type: param.type,
33
+ description: param.description
34
+ }.compact
35
+ end
36
+
37
+ def tool_parameters_for(tool)
38
+ {
39
+ type: 'object',
40
+ properties: tool.parameters.transform_values { |param| param_schema(param) },
41
+ required: tool.parameters.select { |_, p| p.required }.keys
42
+ }
43
+ end
44
+
45
+ def format_tool_calls(tool_calls)
46
+ return nil unless tool_calls&.any?
47
+
48
+ tool_calls.map do |_, tc|
49
+ {
50
+ id: tc.id,
51
+ type: 'function',
52
+ function: {
53
+ name: tc.name,
54
+ arguments: JSON.generate(tc.arguments)
55
+ }
56
+ }
57
+ end
58
+ end
59
+
60
+ def parse_tool_calls(tool_calls, parse_arguments: true)
61
+ return nil unless tool_calls&.any?
62
+
63
+ tool_calls.to_h do |tc|
64
+ [
65
+ tc['id'],
66
+ ToolCall.new(
67
+ id: tc['id'],
68
+ name: tc.dig('function', 'name'),
69
+ arguments: if parse_arguments
70
+ if tc.dig('function', 'arguments').empty?
71
+ {}
72
+ else
73
+ JSON.parse(tc.dig('function',
74
+ 'arguments'))
75
+ end
76
+ else
77
+ tc.dig('function', 'arguments')
78
+ end
79
+ )
80
+ ]
81
+ end
82
+ end
83
+
84
+ def parse_response_tool_calls(outputs)
85
+ # TODO: implement the other & built-in tools
86
+ # 'web_search_call', 'file_search_call', 'image_generation_call',
87
+ # 'code_interpreter_call', 'local_shell_call', 'mcp_call',
88
+ # 'mcp_list_tools', 'mcp_approval_request'
89
+ outputs.select { |o| o['type'] == 'function_call' }.to_h do |o|
90
+ [o['id'], ToolCall.new(
91
+ id: o['call_id'],
92
+ name: o['name'],
93
+ arguments: JSON.parse(o['arguments'])
94
+ )]
95
+ end
96
+ end
97
+ end
98
+ end
99
+ end
100
+ end
@@ -0,0 +1,44 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ # OpenAI API integration using the new Responses API. Handles response generation,
6
+ # function calling, and OpenAI's unique streaming format. Supports GPT-4, GPT-3.5,
7
+ # and other OpenAI models.
8
+ class OpenAI < OpenAIBase
9
+ include OpenAI::Response
10
+ include OpenAI::ResponseMedia
11
+
12
+ def audio_input?(messages)
13
+ messages.any? do |message|
14
+ next false unless message.respond_to?(:content) && message.content.respond_to?(:attachments)
15
+
16
+ message.content.attachments.any? { |attachment| attachment.type == :audio }
17
+ end
18
+ end
19
+
20
+ def render_payload(messages, tools:, temperature:, model:, cache_prompts:, stream: false, schema: nil) # rubocop:disable Metrics/ParameterLists
21
+ @using_responses_api = !audio_input?(messages)
22
+
23
+ if @using_responses_api
24
+ render_response_payload(messages, tools: tools, temperature: temperature, model: model,
25
+ cache_prompts:, stream:, schema:)
26
+ else
27
+ super
28
+ end
29
+ end
30
+
31
+ def completion_url
32
+ @using_responses_api ? responses_url : super
33
+ end
34
+
35
+ def parse_completion_response(response)
36
+ if @using_responses_api
37
+ parse_respond_response(response)
38
+ else
39
+ super
40
+ end
41
+ end
42
+ end
43
+ end
44
+ end
@@ -0,0 +1,44 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ # OpenAI API integration. Handles chat completion, function calling,
6
+ # and OpenAI's unique streaming format. Supports GPT-4, GPT-3.5,
7
+ # and other OpenAI models.
8
+ class OpenAIBase < Provider
9
+ include OpenAI::Chat
10
+ include OpenAI::Embeddings
11
+ include OpenAI::Models
12
+ include OpenAI::Streaming
13
+ include OpenAI::Tools
14
+ include OpenAI::Images
15
+ include OpenAI::Media
16
+
17
+ def api_base
18
+ @config.openai_api_base || 'https://api.openai.com/v1'
19
+ end
20
+
21
+ def headers
22
+ {
23
+ 'Authorization' => "Bearer #{@config.openai_api_key}",
24
+ 'OpenAI-Organization' => @config.openai_organization_id,
25
+ 'OpenAI-Project' => @config.openai_project_id
26
+ }.compact
27
+ end
28
+
29
+ def maybe_normalize_temperature(temperature, model_id)
30
+ OpenAI::Capabilities.normalize_temperature(temperature, model_id)
31
+ end
32
+
33
+ class << self
34
+ def capabilities
35
+ OpenAI::Capabilities
36
+ end
37
+
38
+ def configuration_requirements
39
+ %i[openai_api_key]
40
+ end
41
+ end
42
+ end
43
+ end
44
+ end
@@ -0,0 +1,88 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ class OpenRouter
6
+ # Models methods of the OpenRouter API integration
7
+ module Models
8
+ module_function
9
+
10
+ def models_url
11
+ 'models'
12
+ end
13
+
14
+ def parse_list_models_response(response, slug, _capabilities)
15
+ Array(response.body['data']).map do |model_data| # rubocop:disable Metrics/BlockLength
16
+ # Extract modalities directly from architecture
17
+ modalities = {
18
+ input: Array(model_data.dig('architecture', 'input_modalities')),
19
+ output: Array(model_data.dig('architecture', 'output_modalities'))
20
+ }
21
+
22
+ # Construct pricing from API data, only adding non-zero values
23
+ pricing = { text_tokens: { standard: {} } }
24
+
25
+ pricing_types = {
26
+ prompt: :input_per_million,
27
+ completion: :output_per_million,
28
+ input_cache_read: :cached_input_per_million,
29
+ internal_reasoning: :reasoning_output_per_million
30
+ }
31
+
32
+ pricing_types.each do |source_key, target_key|
33
+ value = model_data.dig('pricing', source_key.to_s).to_f
34
+ pricing[:text_tokens][:standard][target_key] = value * 1_000_000 if value.positive?
35
+ end
36
+
37
+ # Convert OpenRouter's supported parameters to our capability format
38
+ capabilities = supported_parameters_to_capabilities(model_data['supported_parameters'])
39
+
40
+ Model::Info.new(
41
+ id: model_data['id'],
42
+ name: model_data['name'],
43
+ provider: slug,
44
+ family: model_data['id'].split('/').first,
45
+ created_at: model_data['created'] ? Time.at(model_data['created']) : nil,
46
+ context_window: model_data['context_length'],
47
+ max_output_tokens: model_data.dig('top_provider', 'max_completion_tokens'),
48
+ modalities: modalities,
49
+ capabilities: capabilities,
50
+ pricing: pricing,
51
+ metadata: {
52
+ description: model_data['description'],
53
+ architecture: model_data['architecture'],
54
+ top_provider: model_data['top_provider'],
55
+ per_request_limits: model_data['per_request_limits'],
56
+ supported_parameters: model_data['supported_parameters']
57
+ }
58
+ )
59
+ end
60
+ end
61
+
62
+ def supported_parameters_to_capabilities(params)
63
+ return [] unless params
64
+
65
+ capabilities = []
66
+
67
+ # Standard capabilities mapping
68
+ capabilities << 'streaming' # Assume all OpenRouter models support streaming
69
+
70
+ # Function calling capability
71
+ capabilities << 'function_calling' if params.include?('tools') || params.include?('tool_choice')
72
+
73
+ # Structured output capability
74
+ capabilities << 'structured_output' if params.include?('response_format')
75
+
76
+ # Batch capability
77
+ capabilities << 'batch' if params.include?('batch')
78
+
79
+ # Additional mappings based on params
80
+ # Handles advanced model capabilities that might be inferred from supported params
81
+ capabilities << 'predicted_outputs' if params.include?('logit_bias') && params.include?('top_k')
82
+
83
+ capabilities
84
+ end
85
+ end
86
+ end
87
+ end
88
+ end
@@ -0,0 +1,26 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ # OpenRouter API integration.
6
+ class OpenRouter < OpenAIBase
7
+ include OpenRouter::Models
8
+
9
+ def api_base
10
+ 'https://openrouter.ai/api/v1'
11
+ end
12
+
13
+ def headers
14
+ {
15
+ 'Authorization' => "Bearer #{@config.openrouter_api_key}"
16
+ }
17
+ end
18
+
19
+ class << self
20
+ def configuration_requirements
21
+ %i[openrouter_api_key]
22
+ end
23
+ end
24
+ end
25
+ end
26
+ end