ruby_llm_community 0.0.1 → 0.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. checksums.yaml +4 -4
  2. data/LICENSE +22 -0
  3. data/README.md +172 -0
  4. data/lib/generators/ruby_llm/install/templates/INSTALL_INFO.md.tt +108 -0
  5. data/lib/generators/ruby_llm/install/templates/chat_model.rb.tt +3 -0
  6. data/lib/generators/ruby_llm/install/templates/create_chats_migration.rb.tt +8 -0
  7. data/lib/generators/ruby_llm/install/templates/create_messages_migration.rb.tt +15 -0
  8. data/lib/generators/ruby_llm/install/templates/create_tool_calls_migration.rb.tt +14 -0
  9. data/lib/generators/ruby_llm/install/templates/initializer.rb.tt +6 -0
  10. data/lib/generators/ruby_llm/install/templates/message_model.rb.tt +3 -0
  11. data/lib/generators/ruby_llm/install/templates/tool_call_model.rb.tt +3 -0
  12. data/lib/generators/ruby_llm/install_generator.rb +121 -0
  13. data/lib/ruby_llm/active_record/acts_as.rb +382 -0
  14. data/lib/ruby_llm/aliases.json +217 -0
  15. data/lib/ruby_llm/aliases.rb +56 -0
  16. data/lib/ruby_llm/attachment.rb +164 -0
  17. data/lib/ruby_llm/chat.rb +219 -0
  18. data/lib/ruby_llm/chunk.rb +6 -0
  19. data/lib/ruby_llm/configuration.rb +75 -0
  20. data/lib/ruby_llm/connection.rb +126 -0
  21. data/lib/ruby_llm/content.rb +52 -0
  22. data/lib/ruby_llm/context.rb +29 -0
  23. data/lib/ruby_llm/embedding.rb +30 -0
  24. data/lib/ruby_llm/error.rb +84 -0
  25. data/lib/ruby_llm/image.rb +53 -0
  26. data/lib/ruby_llm/message.rb +76 -0
  27. data/lib/ruby_llm/mime_type.rb +67 -0
  28. data/lib/ruby_llm/model/info.rb +101 -0
  29. data/lib/ruby_llm/model/modalities.rb +22 -0
  30. data/lib/ruby_llm/model/pricing.rb +51 -0
  31. data/lib/ruby_llm/model/pricing_category.rb +48 -0
  32. data/lib/ruby_llm/model/pricing_tier.rb +34 -0
  33. data/lib/ruby_llm/model.rb +7 -0
  34. data/lib/ruby_llm/models.json +29924 -0
  35. data/lib/ruby_llm/models.rb +218 -0
  36. data/lib/ruby_llm/models_schema.json +168 -0
  37. data/lib/ruby_llm/provider.rb +219 -0
  38. data/lib/ruby_llm/providers/anthropic/capabilities.rb +179 -0
  39. data/lib/ruby_llm/providers/anthropic/chat.rb +106 -0
  40. data/lib/ruby_llm/providers/anthropic/embeddings.rb +20 -0
  41. data/lib/ruby_llm/providers/anthropic/media.rb +92 -0
  42. data/lib/ruby_llm/providers/anthropic/models.rb +48 -0
  43. data/lib/ruby_llm/providers/anthropic/streaming.rb +43 -0
  44. data/lib/ruby_llm/providers/anthropic/tools.rb +108 -0
  45. data/lib/ruby_llm/providers/anthropic.rb +37 -0
  46. data/lib/ruby_llm/providers/bedrock/capabilities.rb +167 -0
  47. data/lib/ruby_llm/providers/bedrock/chat.rb +65 -0
  48. data/lib/ruby_llm/providers/bedrock/media.rb +61 -0
  49. data/lib/ruby_llm/providers/bedrock/models.rb +82 -0
  50. data/lib/ruby_llm/providers/bedrock/signing.rb +831 -0
  51. data/lib/ruby_llm/providers/bedrock/streaming/base.rb +63 -0
  52. data/lib/ruby_llm/providers/bedrock/streaming/content_extraction.rb +63 -0
  53. data/lib/ruby_llm/providers/bedrock/streaming/message_processing.rb +79 -0
  54. data/lib/ruby_llm/providers/bedrock/streaming/payload_processing.rb +90 -0
  55. data/lib/ruby_llm/providers/bedrock/streaming/prelude_handling.rb +91 -0
  56. data/lib/ruby_llm/providers/bedrock/streaming.rb +36 -0
  57. data/lib/ruby_llm/providers/bedrock.rb +83 -0
  58. data/lib/ruby_llm/providers/deepseek/capabilities.rb +131 -0
  59. data/lib/ruby_llm/providers/deepseek/chat.rb +17 -0
  60. data/lib/ruby_llm/providers/deepseek.rb +30 -0
  61. data/lib/ruby_llm/providers/gemini/capabilities.rb +351 -0
  62. data/lib/ruby_llm/providers/gemini/chat.rb +139 -0
  63. data/lib/ruby_llm/providers/gemini/embeddings.rb +39 -0
  64. data/lib/ruby_llm/providers/gemini/images.rb +48 -0
  65. data/lib/ruby_llm/providers/gemini/media.rb +55 -0
  66. data/lib/ruby_llm/providers/gemini/models.rb +41 -0
  67. data/lib/ruby_llm/providers/gemini/streaming.rb +58 -0
  68. data/lib/ruby_llm/providers/gemini/tools.rb +82 -0
  69. data/lib/ruby_llm/providers/gemini.rb +36 -0
  70. data/lib/ruby_llm/providers/gpustack/chat.rb +17 -0
  71. data/lib/ruby_llm/providers/gpustack/models.rb +55 -0
  72. data/lib/ruby_llm/providers/gpustack.rb +33 -0
  73. data/lib/ruby_llm/providers/mistral/capabilities.rb +163 -0
  74. data/lib/ruby_llm/providers/mistral/chat.rb +26 -0
  75. data/lib/ruby_llm/providers/mistral/embeddings.rb +36 -0
  76. data/lib/ruby_llm/providers/mistral/models.rb +49 -0
  77. data/lib/ruby_llm/providers/mistral.rb +32 -0
  78. data/lib/ruby_llm/providers/ollama/chat.rb +28 -0
  79. data/lib/ruby_llm/providers/ollama/media.rb +50 -0
  80. data/lib/ruby_llm/providers/ollama.rb +29 -0
  81. data/lib/ruby_llm/providers/openai/capabilities.rb +306 -0
  82. data/lib/ruby_llm/providers/openai/chat.rb +86 -0
  83. data/lib/ruby_llm/providers/openai/embeddings.rb +36 -0
  84. data/lib/ruby_llm/providers/openai/images.rb +38 -0
  85. data/lib/ruby_llm/providers/openai/media.rb +81 -0
  86. data/lib/ruby_llm/providers/openai/models.rb +39 -0
  87. data/lib/ruby_llm/providers/openai/response.rb +115 -0
  88. data/lib/ruby_llm/providers/openai/response_media.rb +76 -0
  89. data/lib/ruby_llm/providers/openai/streaming.rb +190 -0
  90. data/lib/ruby_llm/providers/openai/tools.rb +100 -0
  91. data/lib/ruby_llm/providers/openai.rb +44 -0
  92. data/lib/ruby_llm/providers/openai_base.rb +44 -0
  93. data/lib/ruby_llm/providers/openrouter/models.rb +88 -0
  94. data/lib/ruby_llm/providers/openrouter.rb +26 -0
  95. data/lib/ruby_llm/providers/perplexity/capabilities.rb +138 -0
  96. data/lib/ruby_llm/providers/perplexity/chat.rb +17 -0
  97. data/lib/ruby_llm/providers/perplexity/models.rb +42 -0
  98. data/lib/ruby_llm/providers/perplexity.rb +52 -0
  99. data/lib/ruby_llm/railtie.rb +17 -0
  100. data/lib/ruby_llm/stream_accumulator.rb +97 -0
  101. data/lib/ruby_llm/streaming.rb +162 -0
  102. data/lib/ruby_llm/tool.rb +100 -0
  103. data/lib/ruby_llm/tool_call.rb +31 -0
  104. data/lib/ruby_llm/utils.rb +49 -0
  105. data/lib/ruby_llm/version.rb +5 -0
  106. data/lib/ruby_llm.rb +98 -0
  107. data/lib/tasks/aliases.rake +235 -0
  108. data/lib/tasks/models_docs.rake +224 -0
  109. data/lib/tasks/models_update.rake +108 -0
  110. data/lib/tasks/release.rake +32 -0
  111. data/lib/tasks/vcr.rake +99 -0
  112. metadata +128 -7
@@ -0,0 +1,218 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ # Registry of available AI models and their capabilities. Provides a clean interface
5
+ # to discover and work with models from different providers.
6
+ #
7
+ # Example:
8
+ # RubyLLM.models.all # All available models
9
+ # RubyLLM.models.chat_models # Models that support chat
10
+ # RubyLLM.models.by_provider('openai').chat_models # OpenAI chat models
11
+ # RubyLLM.models.find('claude-3') # Get info about a specific model
12
+ class Models
13
+ include Enumerable
14
+
15
+ class << self
16
+ def instance
17
+ @instance ||= new
18
+ end
19
+
20
+ def provider_for(model)
21
+ Provider.for(model)
22
+ end
23
+
24
+ def models_file
25
+ File.expand_path('models.json', __dir__)
26
+ end
27
+
28
+ def refresh!
29
+ # Collect models from both sources
30
+ provider_models = fetch_from_providers
31
+ parsera_models = fetch_from_parsera
32
+
33
+ # Merge with parsera data taking precedence
34
+ merged_models = merge_models(provider_models, parsera_models)
35
+
36
+ @instance = new(merged_models)
37
+ end
38
+
39
+ def fetch_from_providers
40
+ config = RubyLLM.config
41
+ configured_classes = Provider.configured_remote_providers(config)
42
+ configured = configured_classes.map { |klass| klass.new(config) }
43
+
44
+ RubyLLM.logger.info "Fetching models from providers: #{configured.map(&:name).join(', ')}"
45
+
46
+ configured.flat_map(&:list_models)
47
+ end
48
+
49
+ def resolve(model_id, provider: nil, assume_exists: false, config: nil) # rubocop:disable Metrics/PerceivedComplexity
50
+ config ||= RubyLLM.config
51
+ provider_class = provider ? Provider.providers[provider.to_sym] : nil
52
+
53
+ # Check if provider is local
54
+ if provider_class
55
+ temp_instance = provider_class.new(config)
56
+ assume_exists = true if temp_instance.local?
57
+ end
58
+
59
+ if assume_exists
60
+ raise ArgumentError, 'Provider must be specified if assume_exists is true' unless provider
61
+
62
+ provider_class ||= raise(Error, "Unknown provider: #{provider.to_sym}")
63
+ provider_instance = provider_class.new(config)
64
+
65
+ model = Model::Info.new(
66
+ id: model_id,
67
+ name: model_id.tr('-', ' ').capitalize,
68
+ provider: provider_instance.slug,
69
+ capabilities: %w[function_calling streaming],
70
+ modalities: { input: %w[text image], output: %w[text] },
71
+ metadata: { warning: 'Assuming model exists, capabilities may not be accurate' }
72
+ )
73
+ if RubyLLM.config.log_assume_model_exists
74
+ RubyLLM.logger.warn "Assuming model '#{model_id}' exists for provider '#{provider}'. " \
75
+ 'Capabilities may not be accurately reflected.'
76
+ end
77
+ else
78
+ model = Models.find model_id, provider
79
+ provider_class = Provider.providers[model.provider.to_sym] || raise(Error,
80
+ "Unknown provider: #{model.provider}")
81
+ provider_instance = provider_class.new(config)
82
+ end
83
+ [model, provider_instance]
84
+ end
85
+
86
+ def method_missing(method, ...)
87
+ if instance.respond_to?(method)
88
+ instance.send(method, ...)
89
+ else
90
+ super
91
+ end
92
+ end
93
+
94
+ def respond_to_missing?(method, include_private = false)
95
+ instance.respond_to?(method, include_private) || super
96
+ end
97
+
98
+ def fetch_from_parsera
99
+ RubyLLM.logger.info 'Fetching models from Parsera API...'
100
+
101
+ connection = Connection.basic do |f|
102
+ f.request :json
103
+ f.response :json, parser_options: { symbolize_names: true }
104
+ end
105
+ response = connection.get 'https://api.parsera.org/v1/llm-specs'
106
+ models = response.body.map { |data| Model::Info.new(data) }
107
+ models.reject { |model| model.provider.nil? || model.id.nil? }
108
+ end
109
+
110
+ def merge_models(provider_models, parsera_models)
111
+ parsera_by_key = index_by_key(parsera_models)
112
+ provider_by_key = index_by_key(provider_models)
113
+
114
+ all_keys = parsera_by_key.keys | provider_by_key.keys
115
+
116
+ models = all_keys.map do |key|
117
+ if (parsera_model = parsera_by_key[key])
118
+ if (provider_model = provider_by_key[key])
119
+ add_provider_metadata(parsera_model, provider_model)
120
+ else
121
+ parsera_model
122
+ end
123
+ else
124
+ provider_by_key[key]
125
+ end
126
+ end
127
+
128
+ models.sort_by { |m| [m.provider, m.id] }
129
+ end
130
+
131
+ def index_by_key(models)
132
+ models.each_with_object({}) do |model, hash|
133
+ hash["#{model.provider}:#{model.id}"] = model
134
+ end
135
+ end
136
+
137
+ def add_provider_metadata(parsera_model, provider_model)
138
+ data = parsera_model.to_h
139
+ data[:metadata] = provider_model.metadata.merge(data[:metadata] || {})
140
+ Model::Info.new(data)
141
+ end
142
+ end
143
+
144
+ def initialize(models = nil)
145
+ @models = models || load_models
146
+ end
147
+
148
+ def load_models
149
+ data = File.exist?(self.class.models_file) ? File.read(self.class.models_file) : '[]'
150
+ JSON.parse(data, symbolize_names: true).map { |model| Model::Info.new(model) }
151
+ rescue JSON::ParserError
152
+ []
153
+ end
154
+
155
+ def save_models
156
+ File.write(self.class.models_file, JSON.pretty_generate(all.map(&:to_h)))
157
+ end
158
+
159
+ def all
160
+ @models
161
+ end
162
+
163
+ def each(&)
164
+ all.each(&)
165
+ end
166
+
167
+ def find(model_id, provider = nil)
168
+ if provider
169
+ find_with_provider(model_id, provider)
170
+ else
171
+ find_without_provider(model_id)
172
+ end
173
+ end
174
+
175
+ def chat_models
176
+ self.class.new(all.select { |m| m.type == 'chat' })
177
+ end
178
+
179
+ def embedding_models
180
+ self.class.new(all.select { |m| m.type == 'embedding' })
181
+ end
182
+
183
+ def audio_models
184
+ self.class.new(all.select { |m| m.type == 'audio' })
185
+ end
186
+
187
+ def image_models
188
+ self.class.new(all.select { |m| m.type == 'image' })
189
+ end
190
+
191
+ def by_family(family)
192
+ self.class.new(all.select { |m| m.family == family.to_s })
193
+ end
194
+
195
+ def by_provider(provider)
196
+ self.class.new(all.select { |m| m.provider == provider.to_s })
197
+ end
198
+
199
+ def refresh!
200
+ self.class.refresh!
201
+ end
202
+
203
+ private
204
+
205
+ def find_with_provider(model_id, provider)
206
+ resolved_id = Aliases.resolve(model_id, provider)
207
+ all.find { |m| m.id == model_id && m.provider == provider.to_s } ||
208
+ all.find { |m| m.id == resolved_id && m.provider == provider.to_s } ||
209
+ raise(ModelNotFoundError, "Unknown model: #{model_id} for provider: #{provider}")
210
+ end
211
+
212
+ def find_without_provider(model_id)
213
+ all.find { |m| m.id == model_id } ||
214
+ all.find { |m| m.id == Aliases.resolve(model_id) } ||
215
+ raise(ModelNotFoundError, "Unknown model: #{model_id}")
216
+ end
217
+ end
218
+ end
@@ -0,0 +1,168 @@
1
+ {
2
+ "title": "RubyLLM Models Schema",
3
+ "description": "Schema for validating the structure of models.json",
4
+ "type": "array",
5
+ "items": {
6
+ "type": "object",
7
+ "required": ["id", "name", "provider", "context_window", "max_output_tokens"],
8
+ "properties": {
9
+ "id": {
10
+ "type": "string",
11
+ "description": "Unique identifier for the model"
12
+ },
13
+ "name": {
14
+ "type": "string",
15
+ "description": "Display name of the model"
16
+ },
17
+ "provider": {
18
+ "type": "string",
19
+ "description": "Provider of the model (e.g., openai, anthropic, mistral)"
20
+ },
21
+ "family": {
22
+ "type": ["string", "null"],
23
+ "description": "Model family (e.g., gpt-4, claude-3)"
24
+ },
25
+ "created_at": {
26
+ "type": ["null", {"type": "string", "format": "date-time"}],
27
+ "description": "Creation date of the model"
28
+ },
29
+ "context_window": {
30
+ "type": ["null", {"type": "integer", "minimum": 0}],
31
+ "description": "Maximum context window size"
32
+ },
33
+ "max_output_tokens": {
34
+ "type": ["null", {"type": "integer", "minimum": 0}],
35
+ "description": "Maximum output tokens"
36
+ },
37
+ "knowledge_cutoff": {
38
+ "type": ["null", {"type": "string", "format": "date"}],
39
+ "description": "Knowledge cutoff date"
40
+ },
41
+ "modalities": {
42
+ "type": "object",
43
+ "required": ["input", "output"],
44
+ "properties": {
45
+ "input": {
46
+ "type": "array",
47
+ "items": {
48
+ "type": "string",
49
+ "enum": ["text", "image", "audio", "pdf", "video", "file"]
50
+ },
51
+ "uniqueItems": true,
52
+ "description": "Supported input modalities"
53
+ },
54
+ "output": {
55
+ "type": "array",
56
+ "items": {
57
+ "type": "string",
58
+ "enum": ["text", "image", "audio", "embeddings", "moderation"]
59
+ },
60
+ "uniqueItems": true,
61
+ "description": "Supported output modalities"
62
+ }
63
+ }
64
+ },
65
+ "capabilities": {
66
+ "type": "array",
67
+ "items": {
68
+ "type": "string",
69
+ "enum": [
70
+ "streaming", "function_calling", "structured_output", "predicted_outputs",
71
+ "distillation", "fine_tuning", "batch", "realtime", "image_generation",
72
+ "speech_generation", "transcription", "translation", "citations", "reasoning",
73
+ "caching", "moderation", "json_mode", "vision"
74
+ ]
75
+ },
76
+ "uniqueItems": true,
77
+ "description": "Model capabilities"
78
+ },
79
+ "pricing": {
80
+ "type": "object",
81
+ "properties": {
82
+ "text_tokens": {
83
+ "type": "object",
84
+ "required": ["standard"],
85
+ "properties": {
86
+ "standard": {
87
+ "type": "object",
88
+ "properties": {
89
+ "input_per_million": {"type": "number", "minimum": 0},
90
+ "cached_input_per_million": {"type": "number", "minimum": 0},
91
+ "output_per_million": {"type": "number", "minimum": 0},
92
+ "reasoning_output_per_million": {"type": "number", "minimum": 0}
93
+ }
94
+ },
95
+ "batch": {
96
+ "type": "object",
97
+ "properties": {
98
+ "input_per_million": {"type": "number", "minimum": 0},
99
+ "output_per_million": {"type": "number", "minimum": 0}
100
+ }
101
+ }
102
+ }
103
+ },
104
+ "images": {
105
+ "type": "object",
106
+ "properties": {
107
+ "standard": {
108
+ "type": "object",
109
+ "properties": {
110
+ "input": {"type": "number", "minimum": 0},
111
+ "output": {"type": "number", "minimum": 0}
112
+ }
113
+ },
114
+ "batch": {
115
+ "type": "object",
116
+ "properties": {
117
+ "input": {"type": "number", "minimum": 0},
118
+ "output": {"type": "number", "minimum": 0}
119
+ }
120
+ }
121
+ }
122
+ },
123
+ "audio_tokens": {
124
+ "type": "object",
125
+ "properties": {
126
+ "standard": {
127
+ "type": "object",
128
+ "properties": {
129
+ "input_per_million": {"type": "number", "minimum": 0},
130
+ "output_per_million": {"type": "number", "minimum": 0}
131
+ }
132
+ },
133
+ "batch": {
134
+ "type": "object",
135
+ "properties": {
136
+ "input_per_million": {"type": "number", "minimum": 0},
137
+ "output_per_million": {"type": "number", "minimum": 0}
138
+ }
139
+ }
140
+ }
141
+ },
142
+ "embeddings": {
143
+ "type": "object",
144
+ "properties": {
145
+ "standard": {
146
+ "type": "object",
147
+ "properties": {
148
+ "input_per_million": {"type": "number", "minimum": 0}
149
+ }
150
+ },
151
+ "batch": {
152
+ "type": "object",
153
+ "properties": {
154
+ "input_per_million": {"type": "number", "minimum": 0}
155
+ }
156
+ }
157
+ }
158
+ }
159
+ },
160
+ "description": "Pricing information for the model"
161
+ },
162
+ "metadata": {
163
+ "type": "object",
164
+ "description": "Additional metadata about the model"
165
+ }
166
+ }
167
+ }
168
+ }
@@ -0,0 +1,219 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ # Base class for LLM providers like OpenAI and Anthropic.
5
+ # Handles the complexities of API communication, streaming responses,
6
+ # and error handling so individual providers can focus on their unique features.
7
+ # Encapsulates configuration and connection to eliminate parameter threading.
8
+ class Provider
9
+ include Streaming
10
+
11
+ attr_reader :config, :connection
12
+
13
+ def initialize(config)
14
+ @config = config
15
+ ensure_configured!
16
+ @connection = Connection.new(self, @config)
17
+ end
18
+
19
+ def api_base
20
+ raise NotImplementedError
21
+ end
22
+
23
+ def headers
24
+ {}
25
+ end
26
+
27
+ def slug
28
+ self.class.slug
29
+ end
30
+
31
+ def name
32
+ self.class.name
33
+ end
34
+
35
+ def capabilities
36
+ self.class.capabilities
37
+ end
38
+
39
+ def configuration_requirements
40
+ self.class.configuration_requirements
41
+ end
42
+
43
+ def complete(messages, tools:, temperature:, model:, params: {}, headers: {}, schema: nil, &) # rubocop:disable Metrics/ParameterLists
44
+ normalized_temperature = maybe_normalize_temperature(temperature, model)
45
+
46
+ payload = Utils.deep_merge(
47
+ params,
48
+ render_payload(
49
+ messages,
50
+ tools: tools,
51
+ temperature: normalized_temperature,
52
+ model: model,
53
+ stream: block_given?,
54
+ schema: schema
55
+ )
56
+ )
57
+
58
+ if block_given?
59
+ stream_response @connection, payload, headers, &
60
+ else
61
+ sync_response @connection, payload, headers
62
+ end
63
+ end
64
+
65
+ def list_models
66
+ response = @connection.get models_url
67
+ parse_list_models_response response, slug, capabilities
68
+ end
69
+
70
+ def embed(text, model:, dimensions:)
71
+ payload = render_embedding_payload(text, model:, dimensions:)
72
+ response = @connection.post(embedding_url(model:), payload)
73
+ parse_embedding_response(response, model:, text:)
74
+ end
75
+
76
+ def paint(prompt, model:, size:)
77
+ payload = render_image_payload(prompt, model:, size:)
78
+ response = @connection.post images_url, payload
79
+ parse_image_response(response, model:)
80
+ end
81
+
82
+ def configured?
83
+ configuration_requirements.all? { |req| @config.send(req) }
84
+ end
85
+
86
+ def local?
87
+ self.class.local?
88
+ end
89
+
90
+ def remote?
91
+ self.class.remote?
92
+ end
93
+
94
+ def parse_error(response)
95
+ return if response.body.empty?
96
+
97
+ body = try_parse_json(response.body)
98
+ case body
99
+ when Hash
100
+ body.dig('error', 'message')
101
+ when Array
102
+ body.map do |part|
103
+ part.dig('error', 'message')
104
+ end.join('. ')
105
+ else
106
+ body
107
+ end
108
+ end
109
+
110
+ def format_messages(messages)
111
+ messages.map do |msg|
112
+ {
113
+ role: msg.role.to_s,
114
+ content: msg.content
115
+ }
116
+ end
117
+ end
118
+
119
+ def format_tool_calls(_tool_calls)
120
+ nil
121
+ end
122
+
123
+ def parse_tool_calls(_tool_calls)
124
+ nil
125
+ end
126
+
127
+ class << self
128
+ def name
129
+ to_s.split('::').last
130
+ end
131
+
132
+ def slug
133
+ name.downcase
134
+ end
135
+
136
+ def capabilities
137
+ raise NotImplementedError
138
+ end
139
+
140
+ def configuration_requirements
141
+ []
142
+ end
143
+
144
+ def local?
145
+ false
146
+ end
147
+
148
+ def remote?
149
+ !local?
150
+ end
151
+
152
+ def configured?(config)
153
+ configuration_requirements.all? { |req| config.send(req) }
154
+ end
155
+
156
+ def register(name, provider_class)
157
+ providers[name.to_sym] = provider_class
158
+ end
159
+
160
+ def for(model)
161
+ model_info = Models.find(model)
162
+ providers[model_info.provider.to_sym]
163
+ end
164
+
165
+ def providers
166
+ @providers ||= {}
167
+ end
168
+
169
+ def local_providers
170
+ providers.select { |_slug, provider_class| provider_class.local? }
171
+ end
172
+
173
+ def remote_providers
174
+ providers.select { |_slug, provider_class| provider_class.remote? }
175
+ end
176
+
177
+ def configured_providers(config)
178
+ providers.select do |_slug, provider_class|
179
+ provider_class.configured?(config)
180
+ end.values
181
+ end
182
+
183
+ def configured_remote_providers(config)
184
+ providers.select do |_slug, provider_class|
185
+ provider_class.remote? && provider_class.configured?(config)
186
+ end.values
187
+ end
188
+ end
189
+
190
+ private
191
+
192
+ def try_parse_json(maybe_json)
193
+ return maybe_json unless maybe_json.is_a?(String)
194
+
195
+ JSON.parse(maybe_json)
196
+ rescue JSON::ParserError
197
+ maybe_json
198
+ end
199
+
200
+ def ensure_configured!
201
+ missing = configuration_requirements.reject { |req| @config.send(req) }
202
+ return if missing.empty?
203
+
204
+ raise ConfigurationError, "Missing configuration for #{name}: #{missing.join(', ')}"
205
+ end
206
+
207
+ def maybe_normalize_temperature(temperature, _model_id)
208
+ temperature
209
+ end
210
+
211
+ def sync_response(connection, payload, additional_headers = {})
212
+ response = connection.post completion_url, payload do |req|
213
+ # Merge additional headers, with existing headers taking precedence
214
+ req.headers = additional_headers.merge(req.headers) unless additional_headers.empty?
215
+ end
216
+ parse_completion_response response
217
+ end
218
+ end
219
+ end