dify_llm 1.6.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (129) hide show
  1. checksums.yaml +7 -0
  2. data/LICENSE +21 -0
  3. data/README.md +157 -0
  4. data/lib/generators/ruby_llm/install/templates/chat_model.rb.tt +3 -0
  5. data/lib/generators/ruby_llm/install/templates/create_chats_legacy_migration.rb.tt +8 -0
  6. data/lib/generators/ruby_llm/install/templates/create_chats_migration.rb.tt +8 -0
  7. data/lib/generators/ruby_llm/install/templates/create_messages_legacy_migration.rb.tt +16 -0
  8. data/lib/generators/ruby_llm/install/templates/create_messages_migration.rb.tt +16 -0
  9. data/lib/generators/ruby_llm/install/templates/create_models_migration.rb.tt +43 -0
  10. data/lib/generators/ruby_llm/install/templates/create_tool_calls_migration.rb.tt +15 -0
  11. data/lib/generators/ruby_llm/install/templates/initializer.rb.tt +9 -0
  12. data/lib/generators/ruby_llm/install/templates/message_model.rb.tt +4 -0
  13. data/lib/generators/ruby_llm/install/templates/model_model.rb.tt +3 -0
  14. data/lib/generators/ruby_llm/install/templates/tool_call_model.rb.tt +3 -0
  15. data/lib/generators/ruby_llm/install_generator.rb +184 -0
  16. data/lib/generators/ruby_llm/migrate_model_fields/templates/migration.rb.tt +142 -0
  17. data/lib/generators/ruby_llm/migrate_model_fields_generator.rb +84 -0
  18. data/lib/ruby_llm/active_record/acts_as.rb +137 -0
  19. data/lib/ruby_llm/active_record/acts_as_legacy.rb +398 -0
  20. data/lib/ruby_llm/active_record/chat_methods.rb +315 -0
  21. data/lib/ruby_llm/active_record/message_methods.rb +72 -0
  22. data/lib/ruby_llm/active_record/model_methods.rb +84 -0
  23. data/lib/ruby_llm/aliases.json +274 -0
  24. data/lib/ruby_llm/aliases.rb +38 -0
  25. data/lib/ruby_llm/attachment.rb +191 -0
  26. data/lib/ruby_llm/chat.rb +212 -0
  27. data/lib/ruby_llm/chunk.rb +6 -0
  28. data/lib/ruby_llm/configuration.rb +69 -0
  29. data/lib/ruby_llm/connection.rb +137 -0
  30. data/lib/ruby_llm/content.rb +50 -0
  31. data/lib/ruby_llm/context.rb +29 -0
  32. data/lib/ruby_llm/embedding.rb +29 -0
  33. data/lib/ruby_llm/error.rb +76 -0
  34. data/lib/ruby_llm/image.rb +49 -0
  35. data/lib/ruby_llm/message.rb +76 -0
  36. data/lib/ruby_llm/mime_type.rb +67 -0
  37. data/lib/ruby_llm/model/info.rb +103 -0
  38. data/lib/ruby_llm/model/modalities.rb +22 -0
  39. data/lib/ruby_llm/model/pricing.rb +48 -0
  40. data/lib/ruby_llm/model/pricing_category.rb +46 -0
  41. data/lib/ruby_llm/model/pricing_tier.rb +33 -0
  42. data/lib/ruby_llm/model.rb +7 -0
  43. data/lib/ruby_llm/models.json +31418 -0
  44. data/lib/ruby_llm/models.rb +235 -0
  45. data/lib/ruby_llm/models_schema.json +168 -0
  46. data/lib/ruby_llm/provider.rb +215 -0
  47. data/lib/ruby_llm/providers/anthropic/capabilities.rb +134 -0
  48. data/lib/ruby_llm/providers/anthropic/chat.rb +106 -0
  49. data/lib/ruby_llm/providers/anthropic/embeddings.rb +20 -0
  50. data/lib/ruby_llm/providers/anthropic/media.rb +91 -0
  51. data/lib/ruby_llm/providers/anthropic/models.rb +48 -0
  52. data/lib/ruby_llm/providers/anthropic/streaming.rb +43 -0
  53. data/lib/ruby_llm/providers/anthropic/tools.rb +107 -0
  54. data/lib/ruby_llm/providers/anthropic.rb +36 -0
  55. data/lib/ruby_llm/providers/bedrock/capabilities.rb +167 -0
  56. data/lib/ruby_llm/providers/bedrock/chat.rb +63 -0
  57. data/lib/ruby_llm/providers/bedrock/media.rb +60 -0
  58. data/lib/ruby_llm/providers/bedrock/models.rb +98 -0
  59. data/lib/ruby_llm/providers/bedrock/signing.rb +831 -0
  60. data/lib/ruby_llm/providers/bedrock/streaming/base.rb +51 -0
  61. data/lib/ruby_llm/providers/bedrock/streaming/content_extraction.rb +56 -0
  62. data/lib/ruby_llm/providers/bedrock/streaming/message_processing.rb +67 -0
  63. data/lib/ruby_llm/providers/bedrock/streaming/payload_processing.rb +78 -0
  64. data/lib/ruby_llm/providers/bedrock/streaming/prelude_handling.rb +78 -0
  65. data/lib/ruby_llm/providers/bedrock/streaming.rb +18 -0
  66. data/lib/ruby_llm/providers/bedrock.rb +82 -0
  67. data/lib/ruby_llm/providers/deepseek/capabilities.rb +130 -0
  68. data/lib/ruby_llm/providers/deepseek/chat.rb +16 -0
  69. data/lib/ruby_llm/providers/deepseek.rb +30 -0
  70. data/lib/ruby_llm/providers/dify/capabilities.rb +16 -0
  71. data/lib/ruby_llm/providers/dify/chat.rb +59 -0
  72. data/lib/ruby_llm/providers/dify/media.rb +37 -0
  73. data/lib/ruby_llm/providers/dify/streaming.rb +28 -0
  74. data/lib/ruby_llm/providers/dify.rb +48 -0
  75. data/lib/ruby_llm/providers/gemini/capabilities.rb +276 -0
  76. data/lib/ruby_llm/providers/gemini/chat.rb +171 -0
  77. data/lib/ruby_llm/providers/gemini/embeddings.rb +37 -0
  78. data/lib/ruby_llm/providers/gemini/images.rb +47 -0
  79. data/lib/ruby_llm/providers/gemini/media.rb +54 -0
  80. data/lib/ruby_llm/providers/gemini/models.rb +40 -0
  81. data/lib/ruby_llm/providers/gemini/streaming.rb +61 -0
  82. data/lib/ruby_llm/providers/gemini/tools.rb +77 -0
  83. data/lib/ruby_llm/providers/gemini.rb +36 -0
  84. data/lib/ruby_llm/providers/gpustack/chat.rb +27 -0
  85. data/lib/ruby_llm/providers/gpustack/media.rb +45 -0
  86. data/lib/ruby_llm/providers/gpustack/models.rb +90 -0
  87. data/lib/ruby_llm/providers/gpustack.rb +34 -0
  88. data/lib/ruby_llm/providers/mistral/capabilities.rb +155 -0
  89. data/lib/ruby_llm/providers/mistral/chat.rb +24 -0
  90. data/lib/ruby_llm/providers/mistral/embeddings.rb +33 -0
  91. data/lib/ruby_llm/providers/mistral/models.rb +48 -0
  92. data/lib/ruby_llm/providers/mistral.rb +32 -0
  93. data/lib/ruby_llm/providers/ollama/chat.rb +27 -0
  94. data/lib/ruby_llm/providers/ollama/media.rb +45 -0
  95. data/lib/ruby_llm/providers/ollama/models.rb +36 -0
  96. data/lib/ruby_llm/providers/ollama.rb +30 -0
  97. data/lib/ruby_llm/providers/openai/capabilities.rb +291 -0
  98. data/lib/ruby_llm/providers/openai/chat.rb +83 -0
  99. data/lib/ruby_llm/providers/openai/embeddings.rb +33 -0
  100. data/lib/ruby_llm/providers/openai/images.rb +38 -0
  101. data/lib/ruby_llm/providers/openai/media.rb +80 -0
  102. data/lib/ruby_llm/providers/openai/models.rb +39 -0
  103. data/lib/ruby_llm/providers/openai/streaming.rb +41 -0
  104. data/lib/ruby_llm/providers/openai/tools.rb +78 -0
  105. data/lib/ruby_llm/providers/openai.rb +42 -0
  106. data/lib/ruby_llm/providers/openrouter/models.rb +73 -0
  107. data/lib/ruby_llm/providers/openrouter.rb +26 -0
  108. data/lib/ruby_llm/providers/perplexity/capabilities.rb +137 -0
  109. data/lib/ruby_llm/providers/perplexity/chat.rb +16 -0
  110. data/lib/ruby_llm/providers/perplexity/models.rb +42 -0
  111. data/lib/ruby_llm/providers/perplexity.rb +48 -0
  112. data/lib/ruby_llm/providers/vertexai/chat.rb +14 -0
  113. data/lib/ruby_llm/providers/vertexai/embeddings.rb +32 -0
  114. data/lib/ruby_llm/providers/vertexai/models.rb +130 -0
  115. data/lib/ruby_llm/providers/vertexai/streaming.rb +14 -0
  116. data/lib/ruby_llm/providers/vertexai.rb +55 -0
  117. data/lib/ruby_llm/railtie.rb +41 -0
  118. data/lib/ruby_llm/stream_accumulator.rb +97 -0
  119. data/lib/ruby_llm/streaming.rb +153 -0
  120. data/lib/ruby_llm/tool.rb +83 -0
  121. data/lib/ruby_llm/tool_call.rb +22 -0
  122. data/lib/ruby_llm/utils.rb +45 -0
  123. data/lib/ruby_llm/version.rb +5 -0
  124. data/lib/ruby_llm.rb +97 -0
  125. data/lib/tasks/models.rake +525 -0
  126. data/lib/tasks/release.rake +67 -0
  127. data/lib/tasks/ruby_llm.rake +15 -0
  128. data/lib/tasks/vcr.rake +92 -0
  129. metadata +291 -0
@@ -0,0 +1,235 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ # Registry of available AI models and their capabilities.
5
+ class Models
6
+ include Enumerable
7
+
8
+ class << self
9
+ def instance
10
+ @instance ||= new
11
+ end
12
+
13
+ def provider_for(model)
14
+ Provider.for(model)
15
+ end
16
+
17
+ def models_file
18
+ File.expand_path('models.json', __dir__)
19
+ end
20
+
21
+ def schema_file
22
+ File.expand_path('models_schema.json', __dir__)
23
+ end
24
+
25
+ def refresh!(remote_only: false)
26
+ provider_models = fetch_from_providers(remote_only: remote_only)
27
+ parsera_models = fetch_from_parsera
28
+ merged_models = merge_models(provider_models, parsera_models)
29
+ @instance = new(merged_models)
30
+ end
31
+
32
+ def fetch_from_providers(remote_only: true)
33
+ config = RubyLLM.config
34
+ configured_classes = if remote_only
35
+ Provider.configured_remote_providers(config)
36
+ else
37
+ Provider.configured_providers(config)
38
+ end
39
+ configured = configured_classes.map { |klass| klass.new(config) }
40
+
41
+ RubyLLM.logger.info "Fetching models from providers: #{configured.map(&:name).join(', ')}"
42
+
43
+ configured.flat_map(&:list_models)
44
+ end
45
+
46
+ def resolve(model_id, provider: nil, assume_exists: false, config: nil) # rubocop:disable Metrics/PerceivedComplexity
47
+ config ||= RubyLLM.config
48
+ provider_class = provider ? Provider.providers[provider.to_sym] : nil
49
+
50
+ if provider_class
51
+ temp_instance = provider_class.new(config)
52
+ assume_exists = true if temp_instance.local?
53
+ end
54
+
55
+ if assume_exists
56
+ raise ArgumentError, 'Provider must be specified if assume_exists is true' unless provider
57
+
58
+ provider_class ||= raise(Error, "Unknown provider: #{provider.to_sym}")
59
+ provider_instance = provider_class.new(config)
60
+
61
+ model = if provider_instance.local?
62
+ begin
63
+ Models.find(model_id, provider)
64
+ rescue ModelNotFoundError
65
+ nil
66
+ end
67
+ end
68
+
69
+ model ||= Model::Info.default(model_id, provider_instance.slug)
70
+ else
71
+ model = Models.find model_id, provider
72
+ provider_class = Provider.providers[model.provider.to_sym] || raise(Error,
73
+ "Unknown provider: #{model.provider}")
74
+ provider_instance = provider_class.new(config)
75
+ end
76
+ [model, provider_instance]
77
+ end
78
+
79
+ def method_missing(method, ...)
80
+ if instance.respond_to?(method)
81
+ instance.send(method, ...)
82
+ else
83
+ super
84
+ end
85
+ end
86
+
87
+ def respond_to_missing?(method, include_private = false)
88
+ instance.respond_to?(method, include_private) || super
89
+ end
90
+
91
+ def fetch_from_parsera
92
+ RubyLLM.logger.info 'Fetching models from Parsera API...'
93
+
94
+ connection = Connection.basic do |f|
95
+ f.request :json
96
+ f.response :json, parser_options: { symbolize_names: true }
97
+ end
98
+ response = connection.get 'https://api.parsera.org/v1/llm-specs'
99
+ models = response.body.map { |data| Model::Info.new(data) }
100
+ models.reject { |model| model.provider.nil? || model.id.nil? }
101
+ end
102
+
103
+ def merge_models(provider_models, parsera_models)
104
+ parsera_by_key = index_by_key(parsera_models)
105
+ provider_by_key = index_by_key(provider_models)
106
+
107
+ all_keys = parsera_by_key.keys | provider_by_key.keys
108
+
109
+ models = all_keys.map do |key|
110
+ parsera_model = find_parsera_model(key, parsera_by_key)
111
+ provider_model = provider_by_key[key]
112
+
113
+ if parsera_model && provider_model
114
+ add_provider_metadata(parsera_model, provider_model)
115
+ elsif parsera_model
116
+ parsera_model
117
+ else
118
+ provider_model
119
+ end
120
+ end
121
+
122
+ models.sort_by { |m| [m.provider, m.id] }
123
+ end
124
+
125
+ def find_parsera_model(key, parsera_by_key)
126
+ # Direct match
127
+ return parsera_by_key[key] if parsera_by_key[key]
128
+
129
+ # VertexAI uses same models as Gemini
130
+ provider, model_id = key.split(':', 2)
131
+ return unless provider == 'vertexai'
132
+
133
+ gemini_model = parsera_by_key["gemini:#{model_id}"]
134
+ return unless gemini_model
135
+
136
+ # Return Gemini's Parsera data but with VertexAI as provider
137
+ Model::Info.new(gemini_model.to_h.merge(provider: 'vertexai'))
138
+ end
139
+
140
+ def index_by_key(models)
141
+ models.each_with_object({}) do |model, hash|
142
+ hash["#{model.provider}:#{model.id}"] = model
143
+ end
144
+ end
145
+
146
+ def add_provider_metadata(parsera_model, provider_model)
147
+ data = parsera_model.to_h
148
+ data[:metadata] = provider_model.metadata.merge(data[:metadata] || {})
149
+ Model::Info.new(data)
150
+ end
151
+ end
152
+
153
+ def initialize(models = nil)
154
+ @models = models || load_models
155
+ end
156
+
157
+ def load_models
158
+ read_from_json
159
+ end
160
+
161
+ def load_from_json!
162
+ @models = read_from_json
163
+ end
164
+
165
+ def read_from_json
166
+ data = File.exist?(self.class.models_file) ? File.read(self.class.models_file) : '[]'
167
+ JSON.parse(data, symbolize_names: true).map { |model| Model::Info.new(model) }
168
+ rescue JSON::ParserError
169
+ []
170
+ end
171
+
172
+ def save_to_json
173
+ File.write(self.class.models_file, JSON.pretty_generate(all.map(&:to_h)))
174
+ end
175
+
176
+ def all
177
+ @models
178
+ end
179
+
180
+ def each(&)
181
+ all.each(&)
182
+ end
183
+
184
+ def find(model_id, provider = nil)
185
+ if provider
186
+ find_with_provider(model_id, provider)
187
+ else
188
+ find_without_provider(model_id)
189
+ end
190
+ end
191
+
192
+ def chat_models
193
+ self.class.new(all.select { |m| m.type == 'chat' })
194
+ end
195
+
196
+ def embedding_models
197
+ self.class.new(all.select { |m| m.type == 'embedding' })
198
+ end
199
+
200
+ def audio_models
201
+ self.class.new(all.select { |m| m.type == 'audio' })
202
+ end
203
+
204
+ def image_models
205
+ self.class.new(all.select { |m| m.type == 'image' })
206
+ end
207
+
208
+ def by_family(family)
209
+ self.class.new(all.select { |m| m.family == family.to_s })
210
+ end
211
+
212
+ def by_provider(provider)
213
+ self.class.new(all.select { |m| m.provider == provider.to_s })
214
+ end
215
+
216
+ def refresh!(remote_only: false)
217
+ self.class.refresh!(remote_only: remote_only)
218
+ end
219
+
220
+ private
221
+
222
+ def find_with_provider(model_id, provider)
223
+ resolved_id = Aliases.resolve(model_id, provider)
224
+ all.find { |m| m.id == model_id && m.provider == provider.to_s } ||
225
+ all.find { |m| m.id == resolved_id && m.provider == provider.to_s } ||
226
+ raise(ModelNotFoundError, "Unknown model: #{model_id} for provider: #{provider}")
227
+ end
228
+
229
+ def find_without_provider(model_id)
230
+ all.find { |m| m.id == model_id } ||
231
+ all.find { |m| m.id == Aliases.resolve(model_id) } ||
232
+ raise(ModelNotFoundError, "Unknown model: #{model_id}")
233
+ end
234
+ end
235
+ end
@@ -0,0 +1,168 @@
1
+ {
2
+ "title": "RubyLLM Models Schema",
3
+ "description": "Schema for validating the structure of models.json",
4
+ "type": "array",
5
+ "items": {
6
+ "type": "object",
7
+ "required": ["id", "name", "provider", "context_window", "max_output_tokens"],
8
+ "properties": {
9
+ "id": {
10
+ "type": "string",
11
+ "description": "Unique identifier for the model"
12
+ },
13
+ "name": {
14
+ "type": "string",
15
+ "description": "Display name of the model"
16
+ },
17
+ "provider": {
18
+ "type": "string",
19
+ "description": "Provider of the model (e.g., openai, anthropic, mistral)"
20
+ },
21
+ "family": {
22
+ "type": ["string", "null"],
23
+ "description": "Model family (e.g., gpt-4, claude-3)"
24
+ },
25
+ "created_at": {
26
+ "type": ["null", {"type": "string", "format": "date-time"}],
27
+ "description": "Creation date of the model"
28
+ },
29
+ "context_window": {
30
+ "type": ["null", {"type": "integer", "minimum": 0}],
31
+ "description": "Maximum context window size"
32
+ },
33
+ "max_output_tokens": {
34
+ "type": ["null", {"type": "integer", "minimum": 0}],
35
+ "description": "Maximum output tokens"
36
+ },
37
+ "knowledge_cutoff": {
38
+ "type": ["null", {"type": "string", "format": "date"}],
39
+ "description": "Knowledge cutoff date"
40
+ },
41
+ "modalities": {
42
+ "type": "object",
43
+ "required": ["input", "output"],
44
+ "properties": {
45
+ "input": {
46
+ "type": "array",
47
+ "items": {
48
+ "type": "string",
49
+ "enum": ["text", "image", "audio", "pdf", "video", "file"]
50
+ },
51
+ "uniqueItems": true,
52
+ "description": "Supported input modalities"
53
+ },
54
+ "output": {
55
+ "type": "array",
56
+ "items": {
57
+ "type": "string",
58
+ "enum": ["text", "image", "audio", "embeddings", "moderation"]
59
+ },
60
+ "uniqueItems": true,
61
+ "description": "Supported output modalities"
62
+ }
63
+ }
64
+ },
65
+ "capabilities": {
66
+ "type": "array",
67
+ "items": {
68
+ "type": "string",
69
+ "enum": [
70
+ "streaming", "function_calling", "structured_output", "predicted_outputs",
71
+ "distillation", "fine_tuning", "batch", "realtime", "image_generation",
72
+ "speech_generation", "transcription", "translation", "citations", "reasoning",
73
+ "caching", "moderation", "json_mode", "vision"
74
+ ]
75
+ },
76
+ "uniqueItems": true,
77
+ "description": "Model capabilities"
78
+ },
79
+ "pricing": {
80
+ "type": "object",
81
+ "properties": {
82
+ "text_tokens": {
83
+ "type": "object",
84
+ "required": ["standard"],
85
+ "properties": {
86
+ "standard": {
87
+ "type": "object",
88
+ "properties": {
89
+ "input_per_million": {"type": "number", "minimum": 0},
90
+ "cached_input_per_million": {"type": "number", "minimum": 0},
91
+ "output_per_million": {"type": "number", "minimum": 0},
92
+ "reasoning_output_per_million": {"type": "number", "minimum": 0}
93
+ }
94
+ },
95
+ "batch": {
96
+ "type": "object",
97
+ "properties": {
98
+ "input_per_million": {"type": "number", "minimum": 0},
99
+ "output_per_million": {"type": "number", "minimum": 0}
100
+ }
101
+ }
102
+ }
103
+ },
104
+ "images": {
105
+ "type": "object",
106
+ "properties": {
107
+ "standard": {
108
+ "type": "object",
109
+ "properties": {
110
+ "input": {"type": "number", "minimum": 0},
111
+ "output": {"type": "number", "minimum": 0}
112
+ }
113
+ },
114
+ "batch": {
115
+ "type": "object",
116
+ "properties": {
117
+ "input": {"type": "number", "minimum": 0},
118
+ "output": {"type": "number", "minimum": 0}
119
+ }
120
+ }
121
+ }
122
+ },
123
+ "audio_tokens": {
124
+ "type": "object",
125
+ "properties": {
126
+ "standard": {
127
+ "type": "object",
128
+ "properties": {
129
+ "input_per_million": {"type": "number", "minimum": 0},
130
+ "output_per_million": {"type": "number", "minimum": 0}
131
+ }
132
+ },
133
+ "batch": {
134
+ "type": "object",
135
+ "properties": {
136
+ "input_per_million": {"type": "number", "minimum": 0},
137
+ "output_per_million": {"type": "number", "minimum": 0}
138
+ }
139
+ }
140
+ }
141
+ },
142
+ "embeddings": {
143
+ "type": "object",
144
+ "properties": {
145
+ "standard": {
146
+ "type": "object",
147
+ "properties": {
148
+ "input_per_million": {"type": "number", "minimum": 0}
149
+ }
150
+ },
151
+ "batch": {
152
+ "type": "object",
153
+ "properties": {
154
+ "input_per_million": {"type": "number", "minimum": 0}
155
+ }
156
+ }
157
+ }
158
+ }
159
+ },
160
+ "description": "Pricing information for the model"
161
+ },
162
+ "metadata": {
163
+ "type": "object",
164
+ "description": "Additional metadata about the model"
165
+ }
166
+ }
167
+ }
168
+ }
@@ -0,0 +1,215 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ # Base class for LLM providers.
5
+ class Provider
6
+ include Streaming
7
+
8
+ attr_reader :config, :connection
9
+
10
+ def initialize(config)
11
+ @config = config
12
+ ensure_configured!
13
+ @connection = Connection.new(self, @config)
14
+ end
15
+
16
+ def api_base
17
+ raise NotImplementedError
18
+ end
19
+
20
+ def headers
21
+ {}
22
+ end
23
+
24
+ def slug
25
+ self.class.slug
26
+ end
27
+
28
+ def name
29
+ self.class.name
30
+ end
31
+
32
+ def capabilities
33
+ self.class.capabilities
34
+ end
35
+
36
+ def configuration_requirements
37
+ self.class.configuration_requirements
38
+ end
39
+
40
+ def complete(messages, tools:, temperature:, model:, params: {}, headers: {}, schema: nil, &) # rubocop:disable Metrics/ParameterLists
41
+ normalized_temperature = maybe_normalize_temperature(temperature, model)
42
+
43
+ payload = Utils.deep_merge(
44
+ render_payload(
45
+ messages,
46
+ tools: tools,
47
+ temperature: normalized_temperature,
48
+ model: model,
49
+ stream: block_given?,
50
+ schema: schema
51
+ ),
52
+ params
53
+ )
54
+
55
+ if block_given?
56
+ stream_response @connection, payload, headers, &
57
+ else
58
+ sync_response @connection, payload, headers
59
+ end
60
+ end
61
+
62
+ def list_models
63
+ response = @connection.get models_url
64
+ parse_list_models_response response, slug, capabilities
65
+ end
66
+
67
+ def embed(text, model:, dimensions:)
68
+ payload = render_embedding_payload(text, model:, dimensions:)
69
+ response = @connection.post(embedding_url(model:), payload)
70
+ parse_embedding_response(response, model:, text:)
71
+ end
72
+
73
+ def paint(prompt, model:, size:)
74
+ payload = render_image_payload(prompt, model:, size:)
75
+ response = @connection.post images_url, payload
76
+ parse_image_response(response, model:)
77
+ end
78
+
79
+ def configured?
80
+ configuration_requirements.all? { |req| @config.send(req) }
81
+ end
82
+
83
+ def local?
84
+ self.class.local?
85
+ end
86
+
87
+ def remote?
88
+ self.class.remote?
89
+ end
90
+
91
+ def parse_error(response)
92
+ return if response.body.empty?
93
+
94
+ body = try_parse_json(response.body)
95
+ case body
96
+ when Hash
97
+ body.dig('error', 'message')
98
+ when Array
99
+ body.map do |part|
100
+ part.dig('error', 'message')
101
+ end.join('. ')
102
+ else
103
+ body
104
+ end
105
+ end
106
+
107
+ def format_messages(messages)
108
+ messages.map do |msg|
109
+ {
110
+ role: msg.role.to_s,
111
+ content: msg.content
112
+ }
113
+ end
114
+ end
115
+
116
+ def format_tool_calls(_tool_calls)
117
+ nil
118
+ end
119
+
120
+ def parse_tool_calls(_tool_calls)
121
+ nil
122
+ end
123
+
124
+ class << self
125
+ def name
126
+ to_s.split('::').last
127
+ end
128
+
129
+ def slug
130
+ name.downcase
131
+ end
132
+
133
+ def capabilities
134
+ raise NotImplementedError
135
+ end
136
+
137
+ def configuration_requirements
138
+ []
139
+ end
140
+
141
+ def local?
142
+ false
143
+ end
144
+
145
+ def remote?
146
+ !local?
147
+ end
148
+
149
+ def configured?(config)
150
+ configuration_requirements.all? { |req| config.send(req) }
151
+ end
152
+
153
+ def register(name, provider_class)
154
+ providers[name.to_sym] = provider_class
155
+ end
156
+
157
+ def for(model)
158
+ model_info = Models.find(model)
159
+ providers[model_info.provider.to_sym]
160
+ end
161
+
162
+ def providers
163
+ @providers ||= {}
164
+ end
165
+
166
+ def local_providers
167
+ providers.select { |_slug, provider_class| provider_class.local? }
168
+ end
169
+
170
+ def remote_providers
171
+ providers.select { |_slug, provider_class| provider_class.remote? }
172
+ end
173
+
174
+ def configured_providers(config)
175
+ providers.select do |_slug, provider_class|
176
+ provider_class.configured?(config)
177
+ end.values
178
+ end
179
+
180
+ def configured_remote_providers(config)
181
+ providers.select do |_slug, provider_class|
182
+ provider_class.remote? && provider_class.configured?(config)
183
+ end.values
184
+ end
185
+ end
186
+
187
+ private
188
+
189
+ def try_parse_json(maybe_json)
190
+ return maybe_json unless maybe_json.is_a?(String)
191
+
192
+ JSON.parse(maybe_json)
193
+ rescue JSON::ParserError
194
+ maybe_json
195
+ end
196
+
197
+ def ensure_configured!
198
+ missing = configuration_requirements.reject { |req| @config.send(req) }
199
+ return if missing.empty?
200
+
201
+ raise ConfigurationError, "Missing configuration for #{name}: #{missing.join(', ')}"
202
+ end
203
+
204
+ def maybe_normalize_temperature(temperature, _model)
205
+ temperature
206
+ end
207
+
208
+ def sync_response(connection, payload, additional_headers = {})
209
+ response = connection.post completion_url, payload do |req|
210
+ req.headers = additional_headers.merge(req.headers) unless additional_headers.empty?
211
+ end
212
+ parse_completion_response response
213
+ end
214
+ end
215
+ end