ruby_llm_community 0.0.1 → 0.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. checksums.yaml +4 -4
  2. data/LICENSE +22 -0
  3. data/README.md +172 -0
  4. data/lib/generators/ruby_llm/install/templates/INSTALL_INFO.md.tt +108 -0
  5. data/lib/generators/ruby_llm/install/templates/chat_model.rb.tt +3 -0
  6. data/lib/generators/ruby_llm/install/templates/create_chats_migration.rb.tt +8 -0
  7. data/lib/generators/ruby_llm/install/templates/create_messages_migration.rb.tt +15 -0
  8. data/lib/generators/ruby_llm/install/templates/create_tool_calls_migration.rb.tt +14 -0
  9. data/lib/generators/ruby_llm/install/templates/initializer.rb.tt +6 -0
  10. data/lib/generators/ruby_llm/install/templates/message_model.rb.tt +3 -0
  11. data/lib/generators/ruby_llm/install/templates/tool_call_model.rb.tt +3 -0
  12. data/lib/generators/ruby_llm/install_generator.rb +121 -0
  13. data/lib/ruby_llm/active_record/acts_as.rb +382 -0
  14. data/lib/ruby_llm/aliases.json +217 -0
  15. data/lib/ruby_llm/aliases.rb +56 -0
  16. data/lib/ruby_llm/attachment.rb +164 -0
  17. data/lib/ruby_llm/chat.rb +226 -0
  18. data/lib/ruby_llm/chunk.rb +6 -0
  19. data/lib/ruby_llm/configuration.rb +73 -0
  20. data/lib/ruby_llm/connection.rb +126 -0
  21. data/lib/ruby_llm/content.rb +52 -0
  22. data/lib/ruby_llm/context.rb +29 -0
  23. data/lib/ruby_llm/embedding.rb +30 -0
  24. data/lib/ruby_llm/error.rb +84 -0
  25. data/lib/ruby_llm/image.rb +53 -0
  26. data/lib/ruby_llm/message.rb +81 -0
  27. data/lib/ruby_llm/mime_type.rb +67 -0
  28. data/lib/ruby_llm/model/info.rb +101 -0
  29. data/lib/ruby_llm/model/modalities.rb +22 -0
  30. data/lib/ruby_llm/model/pricing.rb +51 -0
  31. data/lib/ruby_llm/model/pricing_category.rb +48 -0
  32. data/lib/ruby_llm/model/pricing_tier.rb +34 -0
  33. data/lib/ruby_llm/model.rb +7 -0
  34. data/lib/ruby_llm/models.json +29924 -0
  35. data/lib/ruby_llm/models.rb +214 -0
  36. data/lib/ruby_llm/models_schema.json +168 -0
  37. data/lib/ruby_llm/provider.rb +221 -0
  38. data/lib/ruby_llm/providers/anthropic/capabilities.rb +179 -0
  39. data/lib/ruby_llm/providers/anthropic/chat.rb +120 -0
  40. data/lib/ruby_llm/providers/anthropic/embeddings.rb +20 -0
  41. data/lib/ruby_llm/providers/anthropic/media.rb +116 -0
  42. data/lib/ruby_llm/providers/anthropic/models.rb +56 -0
  43. data/lib/ruby_llm/providers/anthropic/streaming.rb +45 -0
  44. data/lib/ruby_llm/providers/anthropic/tools.rb +108 -0
  45. data/lib/ruby_llm/providers/anthropic.rb +37 -0
  46. data/lib/ruby_llm/providers/bedrock/capabilities.rb +167 -0
  47. data/lib/ruby_llm/providers/bedrock/chat.rb +76 -0
  48. data/lib/ruby_llm/providers/bedrock/media.rb +73 -0
  49. data/lib/ruby_llm/providers/bedrock/models.rb +82 -0
  50. data/lib/ruby_llm/providers/bedrock/signing.rb +831 -0
  51. data/lib/ruby_llm/providers/bedrock/streaming/base.rb +63 -0
  52. data/lib/ruby_llm/providers/bedrock/streaming/content_extraction.rb +71 -0
  53. data/lib/ruby_llm/providers/bedrock/streaming/message_processing.rb +79 -0
  54. data/lib/ruby_llm/providers/bedrock/streaming/payload_processing.rb +92 -0
  55. data/lib/ruby_llm/providers/bedrock/streaming/prelude_handling.rb +91 -0
  56. data/lib/ruby_llm/providers/bedrock/streaming.rb +36 -0
  57. data/lib/ruby_llm/providers/bedrock.rb +83 -0
  58. data/lib/ruby_llm/providers/deepseek/capabilities.rb +131 -0
  59. data/lib/ruby_llm/providers/deepseek/chat.rb +17 -0
  60. data/lib/ruby_llm/providers/deepseek.rb +30 -0
  61. data/lib/ruby_llm/providers/gemini/capabilities.rb +351 -0
  62. data/lib/ruby_llm/providers/gemini/chat.rb +146 -0
  63. data/lib/ruby_llm/providers/gemini/embeddings.rb +39 -0
  64. data/lib/ruby_llm/providers/gemini/images.rb +48 -0
  65. data/lib/ruby_llm/providers/gemini/media.rb +55 -0
  66. data/lib/ruby_llm/providers/gemini/models.rb +41 -0
  67. data/lib/ruby_llm/providers/gemini/streaming.rb +66 -0
  68. data/lib/ruby_llm/providers/gemini/tools.rb +82 -0
  69. data/lib/ruby_llm/providers/gemini.rb +36 -0
  70. data/lib/ruby_llm/providers/gpustack/chat.rb +17 -0
  71. data/lib/ruby_llm/providers/gpustack/models.rb +55 -0
  72. data/lib/ruby_llm/providers/gpustack.rb +33 -0
  73. data/lib/ruby_llm/providers/mistral/capabilities.rb +163 -0
  74. data/lib/ruby_llm/providers/mistral/chat.rb +26 -0
  75. data/lib/ruby_llm/providers/mistral/embeddings.rb +36 -0
  76. data/lib/ruby_llm/providers/mistral/models.rb +49 -0
  77. data/lib/ruby_llm/providers/mistral.rb +32 -0
  78. data/lib/ruby_llm/providers/ollama/chat.rb +28 -0
  79. data/lib/ruby_llm/providers/ollama/media.rb +50 -0
  80. data/lib/ruby_llm/providers/ollama.rb +29 -0
  81. data/lib/ruby_llm/providers/openai/capabilities.rb +306 -0
  82. data/lib/ruby_llm/providers/openai/chat.rb +87 -0
  83. data/lib/ruby_llm/providers/openai/embeddings.rb +36 -0
  84. data/lib/ruby_llm/providers/openai/images.rb +38 -0
  85. data/lib/ruby_llm/providers/openai/media.rb +81 -0
  86. data/lib/ruby_llm/providers/openai/models.rb +39 -0
  87. data/lib/ruby_llm/providers/openai/response.rb +116 -0
  88. data/lib/ruby_llm/providers/openai/response_media.rb +76 -0
  89. data/lib/ruby_llm/providers/openai/streaming.rb +191 -0
  90. data/lib/ruby_llm/providers/openai/tools.rb +100 -0
  91. data/lib/ruby_llm/providers/openai.rb +44 -0
  92. data/lib/ruby_llm/providers/openai_base.rb +44 -0
  93. data/lib/ruby_llm/providers/openrouter/models.rb +88 -0
  94. data/lib/ruby_llm/providers/openrouter.rb +26 -0
  95. data/lib/ruby_llm/providers/perplexity/capabilities.rb +138 -0
  96. data/lib/ruby_llm/providers/perplexity/chat.rb +17 -0
  97. data/lib/ruby_llm/providers/perplexity/models.rb +42 -0
  98. data/lib/ruby_llm/providers/perplexity.rb +52 -0
  99. data/lib/ruby_llm/railtie.rb +17 -0
  100. data/lib/ruby_llm/stream_accumulator.rb +103 -0
  101. data/lib/ruby_llm/streaming.rb +162 -0
  102. data/lib/ruby_llm/tool.rb +100 -0
  103. data/lib/ruby_llm/tool_call.rb +31 -0
  104. data/lib/ruby_llm/utils.rb +49 -0
  105. data/lib/ruby_llm/version.rb +5 -0
  106. data/lib/ruby_llm.rb +98 -0
  107. data/lib/tasks/aliases.rake +235 -0
  108. data/lib/tasks/models_docs.rake +224 -0
  109. data/lib/tasks/models_update.rake +108 -0
  110. data/lib/tasks/release.rake +32 -0
  111. data/lib/tasks/vcr.rake +99 -0
  112. metadata +128 -7
@@ -0,0 +1,214 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ # Registry of available AI models and their capabilities. Provides a clean interface
5
+ # to discover and work with models from different providers.
6
+ #
7
+ # Example:
8
+ # RubyLLM.models.all # All available models
9
+ # RubyLLM.models.chat_models # Models that support chat
10
+ # RubyLLM.models.by_provider('openai').chat_models # OpenAI chat models
11
+ # RubyLLM.models.find('claude-3') # Get info about a specific model
12
+ class Models
13
+ include Enumerable
14
+
15
+ class << self
16
+ def instance
17
+ @instance ||= new
18
+ end
19
+
20
+ def provider_for(model)
21
+ Provider.for(model)
22
+ end
23
+
24
+ def models_file
25
+ File.expand_path('models.json', __dir__)
26
+ end
27
+
28
+ def refresh!
29
+ # Collect models from both sources
30
+ provider_models = fetch_from_providers
31
+ parsera_models = fetch_from_parsera
32
+
33
+ # Merge with parsera data taking precedence
34
+ merged_models = merge_models(provider_models, parsera_models)
35
+
36
+ @instance = new(merged_models)
37
+ end
38
+
39
+ def fetch_from_providers
40
+ config = RubyLLM.config
41
+ configured_classes = Provider.configured_remote_providers(config)
42
+ configured = configured_classes.map { |klass| klass.new(config) }
43
+
44
+ RubyLLM.logger.info "Fetching models from providers: #{configured.map(&:name).join(', ')}"
45
+
46
+ configured.flat_map(&:list_models)
47
+ end
48
+
49
+ def resolve(model_id, provider: nil, assume_exists: false, config: nil) # rubocop:disable Metrics/PerceivedComplexity
50
+ config ||= RubyLLM.config
51
+ provider_class = provider ? Provider.providers[provider.to_sym] : nil
52
+
53
+ # Check if provider is local
54
+ if provider_class
55
+ temp_instance = provider_class.new(config)
56
+ assume_exists = true if temp_instance.local?
57
+ end
58
+
59
+ if assume_exists
60
+ raise ArgumentError, 'Provider must be specified if assume_exists is true' unless provider
61
+
62
+ provider_class ||= raise(Error, "Unknown provider: #{provider.to_sym}")
63
+ provider_instance = provider_class.new(config)
64
+
65
+ model = Model::Info.new(
66
+ id: model_id,
67
+ name: model_id.tr('-', ' ').capitalize,
68
+ provider: provider_instance.slug,
69
+ capabilities: %w[function_calling streaming],
70
+ modalities: { input: %w[text image], output: %w[text] },
71
+ metadata: { warning: 'Assuming model exists, capabilities may not be accurate' }
72
+ )
73
+ else
74
+ model = Models.find model_id, provider
75
+ provider_class = Provider.providers[model.provider.to_sym] || raise(Error,
76
+ "Unknown provider: #{model.provider}")
77
+ provider_instance = provider_class.new(config)
78
+ end
79
+ [model, provider_instance]
80
+ end
81
+
82
+ def method_missing(method, ...)
83
+ if instance.respond_to?(method)
84
+ instance.send(method, ...)
85
+ else
86
+ super
87
+ end
88
+ end
89
+
90
+ def respond_to_missing?(method, include_private = false)
91
+ instance.respond_to?(method, include_private) || super
92
+ end
93
+
94
+ def fetch_from_parsera
95
+ RubyLLM.logger.info 'Fetching models from Parsera API...'
96
+
97
+ connection = Connection.basic do |f|
98
+ f.request :json
99
+ f.response :json, parser_options: { symbolize_names: true }
100
+ end
101
+ response = connection.get 'https://api.parsera.org/v1/llm-specs'
102
+ models = response.body.map { |data| Model::Info.new(data) }
103
+ models.reject { |model| model.provider.nil? || model.id.nil? }
104
+ end
105
+
106
+ def merge_models(provider_models, parsera_models)
107
+ parsera_by_key = index_by_key(parsera_models)
108
+ provider_by_key = index_by_key(provider_models)
109
+
110
+ all_keys = parsera_by_key.keys | provider_by_key.keys
111
+
112
+ models = all_keys.map do |key|
113
+ if (parsera_model = parsera_by_key[key])
114
+ if (provider_model = provider_by_key[key])
115
+ add_provider_metadata(parsera_model, provider_model)
116
+ else
117
+ parsera_model
118
+ end
119
+ else
120
+ provider_by_key[key]
121
+ end
122
+ end
123
+
124
+ models.sort_by { |m| [m.provider, m.id] }
125
+ end
126
+
127
+ def index_by_key(models)
128
+ models.each_with_object({}) do |model, hash|
129
+ hash["#{model.provider}:#{model.id}"] = model
130
+ end
131
+ end
132
+
133
+ def add_provider_metadata(parsera_model, provider_model)
134
+ data = parsera_model.to_h
135
+ data[:metadata] = provider_model.metadata.merge(data[:metadata] || {})
136
+ Model::Info.new(data)
137
+ end
138
+ end
139
+
140
+ def initialize(models = nil)
141
+ @models = models || load_models
142
+ end
143
+
144
+ def load_models
145
+ data = File.exist?(self.class.models_file) ? File.read(self.class.models_file) : '[]'
146
+ JSON.parse(data, symbolize_names: true).map { |model| Model::Info.new(model) }
147
+ rescue JSON::ParserError
148
+ []
149
+ end
150
+
151
+ def save_models
152
+ File.write(self.class.models_file, JSON.pretty_generate(all.map(&:to_h)))
153
+ end
154
+
155
+ def all
156
+ @models
157
+ end
158
+
159
+ def each(&)
160
+ all.each(&)
161
+ end
162
+
163
+ def find(model_id, provider = nil)
164
+ if provider
165
+ find_with_provider(model_id, provider)
166
+ else
167
+ find_without_provider(model_id)
168
+ end
169
+ end
170
+
171
+ def chat_models
172
+ self.class.new(all.select { |m| m.type == 'chat' })
173
+ end
174
+
175
+ def embedding_models
176
+ self.class.new(all.select { |m| m.type == 'embedding' })
177
+ end
178
+
179
+ def audio_models
180
+ self.class.new(all.select { |m| m.type == 'audio' })
181
+ end
182
+
183
+ def image_models
184
+ self.class.new(all.select { |m| m.type == 'image' })
185
+ end
186
+
187
+ def by_family(family)
188
+ self.class.new(all.select { |m| m.family == family.to_s })
189
+ end
190
+
191
+ def by_provider(provider)
192
+ self.class.new(all.select { |m| m.provider == provider.to_s })
193
+ end
194
+
195
+ def refresh!
196
+ self.class.refresh!
197
+ end
198
+
199
+ private
200
+
201
+ def find_with_provider(model_id, provider)
202
+ resolved_id = Aliases.resolve(model_id, provider)
203
+ all.find { |m| m.id == model_id && m.provider == provider.to_s } ||
204
+ all.find { |m| m.id == resolved_id && m.provider == provider.to_s } ||
205
+ raise(ModelNotFoundError, "Unknown model: #{model_id} for provider: #{provider}")
206
+ end
207
+
208
+ def find_without_provider(model_id)
209
+ all.find { |m| m.id == model_id } ||
210
+ all.find { |m| m.id == Aliases.resolve(model_id) } ||
211
+ raise(ModelNotFoundError, "Unknown model: #{model_id}")
212
+ end
213
+ end
214
+ end
@@ -0,0 +1,168 @@
1
+ {
2
+ "title": "RubyLLM Models Schema",
3
+ "description": "Schema for validating the structure of models.json",
4
+ "type": "array",
5
+ "items": {
6
+ "type": "object",
7
+ "required": ["id", "name", "provider", "context_window", "max_output_tokens"],
8
+ "properties": {
9
+ "id": {
10
+ "type": "string",
11
+ "description": "Unique identifier for the model"
12
+ },
13
+ "name": {
14
+ "type": "string",
15
+ "description": "Display name of the model"
16
+ },
17
+ "provider": {
18
+ "type": "string",
19
+ "description": "Provider of the model (e.g., openai, anthropic, mistral)"
20
+ },
21
+ "family": {
22
+ "type": ["string", "null"],
23
+ "description": "Model family (e.g., gpt-4, claude-3)"
24
+ },
25
+ "created_at": {
26
+ "type": ["null", {"type": "string", "format": "date-time"}],
27
+ "description": "Creation date of the model"
28
+ },
29
+ "context_window": {
30
+ "type": ["null", {"type": "integer", "minimum": 0}],
31
+ "description": "Maximum context window size"
32
+ },
33
+ "max_output_tokens": {
34
+ "type": ["null", {"type": "integer", "minimum": 0}],
35
+ "description": "Maximum output tokens"
36
+ },
37
+ "knowledge_cutoff": {
38
+ "type": ["null", {"type": "string", "format": "date"}],
39
+ "description": "Knowledge cutoff date"
40
+ },
41
+ "modalities": {
42
+ "type": "object",
43
+ "required": ["input", "output"],
44
+ "properties": {
45
+ "input": {
46
+ "type": "array",
47
+ "items": {
48
+ "type": "string",
49
+ "enum": ["text", "image", "audio", "pdf", "video", "file"]
50
+ },
51
+ "uniqueItems": true,
52
+ "description": "Supported input modalities"
53
+ },
54
+ "output": {
55
+ "type": "array",
56
+ "items": {
57
+ "type": "string",
58
+ "enum": ["text", "image", "audio", "embeddings", "moderation"]
59
+ },
60
+ "uniqueItems": true,
61
+ "description": "Supported output modalities"
62
+ }
63
+ }
64
+ },
65
+ "capabilities": {
66
+ "type": "array",
67
+ "items": {
68
+ "type": "string",
69
+ "enum": [
70
+ "streaming", "function_calling", "structured_output", "predicted_outputs",
71
+ "distillation", "fine_tuning", "batch", "realtime", "image_generation",
72
+ "speech_generation", "transcription", "translation", "citations", "reasoning",
73
+ "caching", "moderation", "json_mode", "vision"
74
+ ]
75
+ },
76
+ "uniqueItems": true,
77
+ "description": "Model capabilities"
78
+ },
79
+ "pricing": {
80
+ "type": "object",
81
+ "properties": {
82
+ "text_tokens": {
83
+ "type": "object",
84
+ "required": ["standard"],
85
+ "properties": {
86
+ "standard": {
87
+ "type": "object",
88
+ "properties": {
89
+ "input_per_million": {"type": "number", "minimum": 0},
90
+ "cached_input_per_million": {"type": "number", "minimum": 0},
91
+ "output_per_million": {"type": "number", "minimum": 0},
92
+ "reasoning_output_per_million": {"type": "number", "minimum": 0}
93
+ }
94
+ },
95
+ "batch": {
96
+ "type": "object",
97
+ "properties": {
98
+ "input_per_million": {"type": "number", "minimum": 0},
99
+ "output_per_million": {"type": "number", "minimum": 0}
100
+ }
101
+ }
102
+ }
103
+ },
104
+ "images": {
105
+ "type": "object",
106
+ "properties": {
107
+ "standard": {
108
+ "type": "object",
109
+ "properties": {
110
+ "input": {"type": "number", "minimum": 0},
111
+ "output": {"type": "number", "minimum": 0}
112
+ }
113
+ },
114
+ "batch": {
115
+ "type": "object",
116
+ "properties": {
117
+ "input": {"type": "number", "minimum": 0},
118
+ "output": {"type": "number", "minimum": 0}
119
+ }
120
+ }
121
+ }
122
+ },
123
+ "audio_tokens": {
124
+ "type": "object",
125
+ "properties": {
126
+ "standard": {
127
+ "type": "object",
128
+ "properties": {
129
+ "input_per_million": {"type": "number", "minimum": 0},
130
+ "output_per_million": {"type": "number", "minimum": 0}
131
+ }
132
+ },
133
+ "batch": {
134
+ "type": "object",
135
+ "properties": {
136
+ "input_per_million": {"type": "number", "minimum": 0},
137
+ "output_per_million": {"type": "number", "minimum": 0}
138
+ }
139
+ }
140
+ }
141
+ },
142
+ "embeddings": {
143
+ "type": "object",
144
+ "properties": {
145
+ "standard": {
146
+ "type": "object",
147
+ "properties": {
148
+ "input_per_million": {"type": "number", "minimum": 0}
149
+ }
150
+ },
151
+ "batch": {
152
+ "type": "object",
153
+ "properties": {
154
+ "input_per_million": {"type": "number", "minimum": 0}
155
+ }
156
+ }
157
+ }
158
+ }
159
+ },
160
+ "description": "Pricing information for the model"
161
+ },
162
+ "metadata": {
163
+ "type": "object",
164
+ "description": "Additional metadata about the model"
165
+ }
166
+ }
167
+ }
168
+ }
@@ -0,0 +1,221 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ # Base class for LLM providers like OpenAI and Anthropic.
5
+ # Handles the complexities of API communication, streaming responses,
6
+ # and error handling so individual providers can focus on their unique features.
7
+ # Encapsulates configuration and connection to eliminate parameter threading.
8
+ class Provider
9
+ include Streaming
10
+
11
+ attr_reader :config, :connection
12
+
13
+ def initialize(config)
14
+ @config = config
15
+ ensure_configured!
16
+ @connection = Connection.new(self, @config)
17
+ end
18
+
19
+ def api_base
20
+ raise NotImplementedError
21
+ end
22
+
23
+ def headers
24
+ {}
25
+ end
26
+
27
+ def slug
28
+ self.class.slug
29
+ end
30
+
31
+ def name
32
+ self.class.name
33
+ end
34
+
35
+ def capabilities
36
+ self.class.capabilities
37
+ end
38
+
39
+ def configuration_requirements
40
+ self.class.configuration_requirements
41
+ end
42
+
43
+ def complete(messages, tools:, temperature:, model:, params: {}, headers: {}, schema: nil, # rubocop:disable Metrics/ParameterLists
44
+ cache_prompts: { system: false, user: false, tools: false }, &)
45
+ normalized_temperature = maybe_normalize_temperature(temperature, model)
46
+
47
+ payload = Utils.deep_merge(
48
+ params,
49
+ render_payload(
50
+ messages,
51
+ tools: tools,
52
+ temperature: normalized_temperature,
53
+ model: model,
54
+ cache_prompts: cache_prompts,
55
+ stream: block_given?,
56
+ schema: schema
57
+ )
58
+ )
59
+
60
+ if block_given?
61
+ stream_response @connection, payload, headers, &
62
+ else
63
+ sync_response @connection, payload, headers
64
+ end
65
+ end
66
+
67
+ def list_models
68
+ response = @connection.get models_url
69
+ parse_list_models_response response, slug, capabilities
70
+ end
71
+
72
+ def embed(text, model:, dimensions:)
73
+ payload = render_embedding_payload(text, model:, dimensions:)
74
+ response = @connection.post(embedding_url(model:), payload)
75
+ parse_embedding_response(response, model:, text:)
76
+ end
77
+
78
+ def paint(prompt, model:, size:)
79
+ payload = render_image_payload(prompt, model:, size:)
80
+ response = @connection.post images_url, payload
81
+ parse_image_response(response, model:)
82
+ end
83
+
84
+ def configured?
85
+ configuration_requirements.all? { |req| @config.send(req) }
86
+ end
87
+
88
+ def local?
89
+ self.class.local?
90
+ end
91
+
92
+ def remote?
93
+ self.class.remote?
94
+ end
95
+
96
+ def parse_error(response)
97
+ return if response.body.empty?
98
+
99
+ body = try_parse_json(response.body)
100
+ case body
101
+ when Hash
102
+ body.dig('error', 'message')
103
+ when Array
104
+ body.map do |part|
105
+ part.dig('error', 'message')
106
+ end.join('. ')
107
+ else
108
+ body
109
+ end
110
+ end
111
+
112
+ def format_messages(messages)
113
+ messages.map do |msg|
114
+ {
115
+ role: msg.role.to_s,
116
+ content: msg.content
117
+ }
118
+ end
119
+ end
120
+
121
+ def format_tool_calls(_tool_calls)
122
+ nil
123
+ end
124
+
125
+ def parse_tool_calls(_tool_calls)
126
+ nil
127
+ end
128
+
129
+ class << self
130
+ def name
131
+ to_s.split('::').last
132
+ end
133
+
134
+ def slug
135
+ name.downcase
136
+ end
137
+
138
+ def capabilities
139
+ raise NotImplementedError
140
+ end
141
+
142
+ def configuration_requirements
143
+ []
144
+ end
145
+
146
+ def local?
147
+ false
148
+ end
149
+
150
+ def remote?
151
+ !local?
152
+ end
153
+
154
+ def configured?(config)
155
+ configuration_requirements.all? { |req| config.send(req) }
156
+ end
157
+
158
+ def register(name, provider_class)
159
+ providers[name.to_sym] = provider_class
160
+ end
161
+
162
+ def for(model)
163
+ model_info = Models.find(model)
164
+ providers[model_info.provider.to_sym]
165
+ end
166
+
167
+ def providers
168
+ @providers ||= {}
169
+ end
170
+
171
+ def local_providers
172
+ providers.select { |_slug, provider_class| provider_class.local? }
173
+ end
174
+
175
+ def remote_providers
176
+ providers.select { |_slug, provider_class| provider_class.remote? }
177
+ end
178
+
179
+ def configured_providers(config)
180
+ providers.select do |_slug, provider_class|
181
+ provider_class.configured?(config)
182
+ end.values
183
+ end
184
+
185
+ def configured_remote_providers(config)
186
+ providers.select do |_slug, provider_class|
187
+ provider_class.remote? && provider_class.configured?(config)
188
+ end.values
189
+ end
190
+ end
191
+
192
+ private
193
+
194
+ def try_parse_json(maybe_json)
195
+ return maybe_json unless maybe_json.is_a?(String)
196
+
197
+ JSON.parse(maybe_json)
198
+ rescue JSON::ParserError
199
+ maybe_json
200
+ end
201
+
202
+ def ensure_configured!
203
+ missing = configuration_requirements.reject { |req| @config.send(req) }
204
+ return if missing.empty?
205
+
206
+ raise ConfigurationError, "Missing configuration for #{name}: #{missing.join(', ')}"
207
+ end
208
+
209
+ def maybe_normalize_temperature(temperature, _model_id)
210
+ temperature
211
+ end
212
+
213
+ def sync_response(connection, payload, additional_headers = {})
214
+ response = connection.post completion_url, payload do |req|
215
+ # Merge additional headers, with existing headers taking precedence
216
+ req.headers = additional_headers.merge(req.headers) unless additional_headers.empty?
217
+ end
218
+ parse_completion_response response
219
+ end
220
+ end
221
+ end