ruby_llm_community 1.0.0 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +18 -1
  3. data/lib/generators/ruby_llm/chat_ui/chat_ui_generator.rb +127 -0
  4. data/lib/generators/ruby_llm/chat_ui/templates/controllers/chats_controller.rb.tt +39 -0
  5. data/lib/generators/ruby_llm/chat_ui/templates/controllers/messages_controller.rb.tt +24 -0
  6. data/lib/generators/ruby_llm/chat_ui/templates/controllers/models_controller.rb.tt +14 -0
  7. data/lib/generators/ruby_llm/chat_ui/templates/jobs/chat_response_job.rb.tt +12 -0
  8. data/lib/generators/ruby_llm/chat_ui/templates/views/chats/_chat.html.erb.tt +16 -0
  9. data/lib/generators/ruby_llm/chat_ui/templates/views/chats/_form.html.erb.tt +29 -0
  10. data/lib/generators/ruby_llm/chat_ui/templates/views/chats/index.html.erb.tt +16 -0
  11. data/lib/generators/ruby_llm/chat_ui/templates/views/chats/new.html.erb.tt +11 -0
  12. data/lib/generators/ruby_llm/chat_ui/templates/views/chats/show.html.erb.tt +23 -0
  13. data/lib/generators/ruby_llm/chat_ui/templates/views/messages/_form.html.erb.tt +21 -0
  14. data/lib/generators/ruby_llm/chat_ui/templates/views/messages/_message.html.erb.tt +10 -0
  15. data/lib/generators/ruby_llm/chat_ui/templates/views/messages/create.turbo_stream.erb.tt +9 -0
  16. data/lib/generators/ruby_llm/chat_ui/templates/views/models/_model.html.erb.tt +16 -0
  17. data/lib/generators/ruby_llm/chat_ui/templates/views/models/index.html.erb.tt +30 -0
  18. data/lib/generators/ruby_llm/chat_ui/templates/views/models/show.html.erb.tt +18 -0
  19. data/lib/generators/ruby_llm/install/install_generator.rb +227 -0
  20. data/lib/generators/ruby_llm/install/templates/chat_model.rb.tt +2 -2
  21. data/lib/generators/ruby_llm/install/templates/create_chats_migration.rb.tt +4 -4
  22. data/lib/generators/ruby_llm/install/templates/create_messages_migration.rb.tt +8 -7
  23. data/lib/generators/ruby_llm/install/templates/create_models_migration.rb.tt +12 -3
  24. data/lib/generators/ruby_llm/install/templates/create_tool_calls_migration.rb.tt +6 -5
  25. data/lib/generators/ruby_llm/install/templates/initializer.rb.tt +9 -8
  26. data/lib/generators/ruby_llm/install/templates/message_model.rb.tt +4 -3
  27. data/lib/generators/ruby_llm/install/templates/model_model.rb.tt +2 -5
  28. data/lib/generators/ruby_llm/install/templates/tool_call_model.rb.tt +2 -2
  29. data/lib/generators/ruby_llm/upgrade_to_v1_7/templates/migration.rb.tt +137 -0
  30. data/lib/generators/ruby_llm/upgrade_to_v1_7/upgrade_to_v1_7_generator.rb +170 -0
  31. data/lib/ruby_llm/active_record/acts_as.rb +108 -467
  32. data/lib/ruby_llm/active_record/acts_as_legacy.rb +403 -0
  33. data/lib/ruby_llm/active_record/chat_methods.rb +336 -0
  34. data/lib/ruby_llm/active_record/message_methods.rb +72 -0
  35. data/lib/ruby_llm/active_record/model_methods.rb +84 -0
  36. data/lib/ruby_llm/aliases.json +72 -6
  37. data/lib/ruby_llm/attachment.rb +22 -0
  38. data/lib/ruby_llm/configuration.rb +6 -0
  39. data/lib/ruby_llm/image_attachment.rb +12 -3
  40. data/lib/ruby_llm/message.rb +1 -1
  41. data/lib/ruby_llm/models.json +2640 -1756
  42. data/lib/ruby_llm/models.rb +5 -15
  43. data/lib/ruby_llm/provider.rb +6 -4
  44. data/lib/ruby_llm/providers/anthropic/media.rb +1 -1
  45. data/lib/ruby_llm/providers/bedrock/models.rb +19 -1
  46. data/lib/ruby_llm/providers/gemini/media.rb +1 -1
  47. data/lib/ruby_llm/providers/gpustack/media.rb +1 -1
  48. data/lib/ruby_llm/providers/ollama/media.rb +1 -1
  49. data/lib/ruby_llm/providers/openai/media.rb +4 -4
  50. data/lib/ruby_llm/providers/openai/response.rb +7 -6
  51. data/lib/ruby_llm/providers/openai/response_media.rb +1 -1
  52. data/lib/ruby_llm/providers/openai/streaming.rb +14 -11
  53. data/lib/ruby_llm/providers/openai/tools.rb +11 -6
  54. data/lib/ruby_llm/providers/vertexai.rb +1 -1
  55. data/lib/ruby_llm/providers/xai/capabilities.rb +166 -0
  56. data/lib/ruby_llm/providers/xai/chat.rb +15 -0
  57. data/lib/ruby_llm/providers/xai/models.rb +48 -0
  58. data/lib/ruby_llm/providers/xai.rb +46 -0
  59. data/lib/ruby_llm/railtie.rb +20 -3
  60. data/lib/ruby_llm/stream_accumulator.rb +0 -4
  61. data/lib/ruby_llm/utils.rb +5 -9
  62. data/lib/ruby_llm/version.rb +1 -1
  63. data/lib/ruby_llm_community.rb +4 -3
  64. data/lib/tasks/models.rake +29 -5
  65. data/lib/tasks/ruby_llm.rake +15 -0
  66. data/lib/tasks/vcr.rake +2 -2
  67. metadata +32 -3
  68. data/lib/generators/ruby_llm/install/templates/INSTALL_INFO.md.tt +0 -108
  69. data/lib/generators/ruby_llm/install_generator.rb +0 -146
@@ -155,31 +155,21 @@ module RubyLLM
155
155
  end
156
156
 
157
157
  def load_models
158
- # Try to load from database first if configured
159
- if RubyLLM.config.model_registry_class
160
- load_from_database
161
- else
162
- load_from_json
163
- end
164
- rescue StandardError => e
165
- RubyLLM.logger.debug "Failed to load models from database: #{e.message}, falling back to JSON"
166
- load_from_json
158
+ read_from_json
167
159
  end
168
160
 
169
- def load_from_database
170
- model_class = RubyLLM.config.model_registry_class
171
- model_class = model_class.constantize if model_class.is_a?(String)
172
- model_class.all.map(&:to_llm)
161
+ def load_from_json!
162
+ @models = read_from_json
173
163
  end
174
164
 
175
- def load_from_json
165
+ def read_from_json
176
166
  data = File.exist?(self.class.models_file) ? File.read(self.class.models_file) : '[]'
177
167
  JSON.parse(data, symbolize_names: true).map { |model| Model::Info.new(model) }
178
168
  rescue JSON::ParserError
179
169
  []
180
170
  end
181
171
 
182
- def save_models
172
+ def save_to_json
183
173
  File.write(self.class.models_file, JSON.pretty_generate(all.map(&:to_h)))
184
174
  end
185
175
 
@@ -42,7 +42,6 @@ module RubyLLM
42
42
  normalized_temperature = maybe_normalize_temperature(temperature, model)
43
43
 
44
44
  payload = Utils.deep_merge(
45
- params,
46
45
  render_payload(
47
46
  messages,
48
47
  tools: tools,
@@ -51,7 +50,8 @@ module RubyLLM
51
50
  cache_prompts: cache_prompts,
52
51
  stream: block_given?,
53
52
  schema: schema
54
- )
53
+ ),
54
+ params
55
55
  )
56
56
 
57
57
  if block_given?
@@ -62,8 +62,10 @@ module RubyLLM
62
62
  end
63
63
 
64
64
  def list_models
65
- response = @connection.get models_url
66
- parse_list_models_response response, slug, capabilities
65
+ Array(models_url).flat_map do |url|
66
+ response = @connection.get(url)
67
+ parse_list_models_response(response, slug, capabilities)
68
+ end
67
69
  end
68
70
 
69
71
  def embed(text, model:, dimensions:)
@@ -98,7 +98,7 @@ module RubyLLM
98
98
  with_cache_control(
99
99
  {
100
100
  type: 'text',
101
- text: Utils.format_text_file_for_llm(text_file)
101
+ text: text_file.for_llm
102
102
  },
103
103
  cache:
104
104
  )
@@ -72,7 +72,25 @@ module RubyLLM
72
72
  return model_id unless model_data['inferenceTypesSupported']&.include?('INFERENCE_PROFILE')
73
73
  return model_id if model_data['inferenceTypesSupported']&.include?('ON_DEMAND')
74
74
 
75
- "us.#{model_id}"
75
+ desired_region_prefix = inference_profile_region_prefix
76
+
77
+ # Return unchanged if model already has the correct region prefix
78
+ return model_id if model_id.start_with?("#{desired_region_prefix}.")
79
+
80
+ # Remove any existing region prefix (e.g., "us.", "eu.", "ap.")
81
+ clean_model_id = model_id.sub(/^[a-z]{2}\./, '')
82
+
83
+ # Apply the desired region prefix
84
+ "#{desired_region_prefix}.#{clean_model_id}"
85
+ end
86
+
87
+ def inference_profile_region_prefix
88
+ # Extract region prefix from bedrock_region (e.g., "eu-west-3" -> "eu")
89
+ region = @config.bedrock_region.to_s
90
+ return 'us' if region.empty? # Default fallback
91
+
92
+ # Take first two characters as the region prefix
93
+ region[0, 2]
76
94
  end
77
95
  end
78
96
  end
@@ -39,7 +39,7 @@ module RubyLLM
39
39
 
40
40
  def format_text_file(text_file)
41
41
  {
42
- text: Utils.format_text_file_for_llm(text_file)
42
+ text: text_file.for_llm
43
43
  }
44
44
  end
45
45
 
@@ -34,7 +34,7 @@ module RubyLLM
34
34
  {
35
35
  type: 'image_url',
36
36
  image_url: {
37
- url: "data:#{image.mime_type};base64,#{image.encoded}",
37
+ url: image.for_llm,
38
38
  detail: 'auto'
39
39
  }
40
40
  }
@@ -34,7 +34,7 @@ module RubyLLM
34
34
  {
35
35
  type: 'image_url',
36
36
  image_url: {
37
- url: "data:#{image.mime_type};base64,#{image.encoded}",
37
+ url: image.for_llm,
38
38
  detail: 'auto'
39
39
  }
40
40
  }
@@ -36,7 +36,7 @@ module RubyLLM
36
36
  {
37
37
  type: 'image_url',
38
38
  image_url: {
39
- url: image.url? ? image.source : "data:#{image.mime_type};base64,#{image.encoded}"
39
+ url: image.url? ? image.source : image.for_llm
40
40
  }
41
41
  }
42
42
  end
@@ -46,7 +46,7 @@ module RubyLLM
46
46
  type: 'file',
47
47
  file: {
48
48
  filename: pdf.filename,
49
- file_data: "data:#{pdf.mime_type};base64,#{pdf.encoded}"
49
+ file_data: pdf.for_llm
50
50
  }
51
51
  }
52
52
  end
@@ -54,7 +54,7 @@ module RubyLLM
54
54
  def format_text_file(text_file)
55
55
  {
56
56
  type: 'text',
57
- text: Utils.format_text_file_for_llm(text_file)
57
+ text: text_file.for_llm
58
58
  }
59
59
  end
60
60
 
@@ -63,7 +63,7 @@ module RubyLLM
63
63
  type: 'input_audio',
64
64
  input_audio: {
65
65
  data: audio.encoded,
66
- format: audio.mime_type.split('/').last
66
+ format: audio.format
67
67
  }
68
68
  }
69
69
  end
@@ -82,7 +82,7 @@ module RubyLLM
82
82
 
83
83
  def format_image_generation_message(msg)
84
84
  items = []
85
- image_attachment = msg.content.attachments.first
85
+ image_attachment = msg.content.attachments.last
86
86
  if image_attachment.reasoning_id
87
87
  items << {
88
88
  type: 'reasoning',
@@ -144,7 +144,7 @@ module RubyLLM
144
144
 
145
145
  return text_content unless image_outputs.any?
146
146
 
147
- build_content_with_images(text_content, image_outputs)
147
+ build_content_with_images(image_outputs, text_content)
148
148
  end
149
149
 
150
150
  private
@@ -157,9 +157,9 @@ module RubyLLM
157
157
  end.join("\n")
158
158
  end
159
159
 
160
- def build_content_with_images(text_content, image_outputs)
161
- content = RubyLLM::Content.new(text_content)
160
+ def build_content_with_images(image_outputs, text_content)
162
161
  reasoning_id = extract_reasoning_id(@current_outputs)
162
+ content = RubyLLM::Content.new(text_content)
163
163
  image_outputs.each do |output|
164
164
  attach_image_to_content(content, output, reasoning_id)
165
165
  end
@@ -177,7 +177,8 @@ module RubyLLM
177
177
  mime_type: mime_type,
178
178
  model_id: nil,
179
179
  id: output['id'],
180
- reasoning_id: reasoning_id
180
+ reasoning_id: reasoning_id,
181
+ revised_prompt: output['revised_prompt']
181
182
  )
182
183
  )
183
184
  end
@@ -194,7 +195,7 @@ module RubyLLM
194
195
  msg.role == :assistant &&
195
196
  msg.content.is_a?(RubyLLM::Content) &&
196
197
  msg.content.attachments.any? &&
197
- msg.content.attachments.first.is_a?(RubyLLM::ImageAttachment)
198
+ msg.content.attachments.last.is_a?(RubyLLM::ImageAttachment)
198
199
  end
199
200
 
200
201
  def extract_reasoning_id(outputs)
@@ -50,7 +50,7 @@ module RubyLLM
50
50
  def format_text_file(text_file)
51
51
  {
52
52
  type: 'input_text',
53
- text: Utils.format_text_file_for_llm(text_file)
53
+ text: text_file.for_llm
54
54
  }
55
55
  end
56
56
 
@@ -168,7 +168,7 @@ module RubyLLM
168
168
  end
169
169
 
170
170
  def build_partial_image_chunk(data)
171
- content = build_image_content(data['partial_image_b64'], 'image/png', nil, nil)
171
+ content = build_image_content(item: data, text_content: nil, revised_prompt: nil, partial: true)
172
172
 
173
173
  Chunk.new(
174
174
  role: :assistant,
@@ -182,12 +182,10 @@ module RubyLLM
182
182
 
183
183
  def build_completed_image_chunk(data)
184
184
  item = data['item']
185
- image_data = item['result']
186
- output_format = item['output_format'] || 'png'
187
- mime_type = "image/#{output_format}"
185
+ text_content = item['delta'] || ''
188
186
  revised_prompt = item['revised_prompt']
189
187
 
190
- content = build_image_content(image_data, mime_type, nil, revised_prompt)
188
+ content = build_image_content(item:, text_content:, revised_prompt:, partial: false)
191
189
 
192
190
  Chunk.new(
193
191
  role: :assistant,
@@ -211,14 +209,19 @@ module RubyLLM
211
209
  )
212
210
  end
213
211
 
214
- def build_image_content(base64_data, mime_type, model_id, revised_prompt = nil)
215
- text_content = revised_prompt || ''
216
- content = RubyLLM::Content.new(text_content)
212
+ def build_image_content(item:, text_content:, revised_prompt: nil, partial: false)
213
+ content = RubyLLM::Content.new(text_content || '')
214
+ output_format = item['output_format'] || 'png'
215
+ mime_type = "image/#{output_format}"
216
+
217
217
  content.attach(
218
218
  RubyLLM::ImageAttachment.new(
219
- data: base64_data,
220
- mime_type: mime_type,
221
- model_id: model_id
219
+ data: item['result'] || item['partial_image_b64'],
220
+ mime_type:,
221
+ model_id: item['model_id'],
222
+ id: item['id'],
223
+ revised_prompt:,
224
+ partial:
222
225
  )
223
226
  )
224
227
  content
@@ -57,6 +57,16 @@ module RubyLLM
57
57
  end
58
58
  end
59
59
 
60
+ def parse_tool_call_arguments(tool_call)
61
+ arguments = tool_call.dig('function', 'arguments')
62
+
63
+ if arguments.nil? || arguments.empty?
64
+ {}
65
+ else
66
+ JSON.parse(arguments)
67
+ end
68
+ end
69
+
60
70
  def parse_tool_calls(tool_calls, parse_arguments: true)
61
71
  return nil unless tool_calls&.any?
62
72
 
@@ -67,12 +77,7 @@ module RubyLLM
67
77
  id: tc['id'],
68
78
  name: tc.dig('function', 'name'),
69
79
  arguments: if parse_arguments
70
- if tc.dig('function', 'arguments').empty?
71
- {}
72
- else
73
- JSON.parse(tc.dig('function',
74
- 'arguments'))
75
- end
80
+ parse_tool_call_arguments(tc)
76
81
  else
77
82
  tc.dig('function', 'arguments')
78
83
  end
@@ -33,7 +33,7 @@ module RubyLLM
33
33
  private
34
34
 
35
35
  def access_token
36
- return 'test-token' if defined?(VCR) && VCR.current_cassette
36
+ return 'test-token' if defined?(VCR) && !VCR.current_cassette.recording?
37
37
 
38
38
  initialize_authorizer unless @authorizer
39
39
  @authorizer.fetch_access_token!['access_token']
@@ -0,0 +1,166 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ class XAI
6
+ # Determines capabilities and pricing for xAI (Grok) models
7
+ # - https://docs.x.ai/docs/models
8
+ module Capabilities
9
+ module_function
10
+
11
+ # rubocop:disable Naming/VariableNumber
12
+ MODEL_PATTERNS = {
13
+ grok_2: /^grok-2(?!-vision)/,
14
+ grok_2_vision: /^grok-2-vision/,
15
+ grok_2_image: /^grok-2-image/,
16
+ grok_3: /^grok-3(?!-(?:fast|mini))/,
17
+ grok_3_fast: /^grok-3-fast/,
18
+ grok_3_mini: /^grok-3-mini(?!-fast)/,
19
+ grok_3_mini_fast: /^grok-3-mini-fast/,
20
+ grok_4: /^grok-4/
21
+ }.freeze
22
+ # rubocop:enable Naming/VariableNumber
23
+
24
+ def context_window_for(model_id)
25
+ case model_family(model_id)
26
+ when 'grok_4' then 256_000
27
+ when 'grok_2_vision' then 32_768
28
+ else 131_072
29
+ end
30
+ end
31
+
32
+ def max_tokens_for(_model_id)
33
+ 4_096
34
+ end
35
+
36
+ def supports_vision?(model_id)
37
+ case model_family(model_id)
38
+ when 'grok_2_vision' then true
39
+ else false
40
+ end
41
+ end
42
+
43
+ def supports_functions?(model_id)
44
+ model_family(model_id) != 'grok_2_image'
45
+ end
46
+
47
+ def supports_structured_output?(model_id)
48
+ model_family(model_id) != 'grok_2_image'
49
+ end
50
+
51
+ def supports_json_mode?(model_id)
52
+ supports_structured_output?(model_id)
53
+ end
54
+
55
+ # Pricing from API data (per million tokens)
56
+ # rubocop:disable Naming/VariableNumber
57
+ PRICES = {
58
+ grok_2: { input: 2.0, output: 10.0 },
59
+ grok_2_vision: { input: 2.0, output: 10.0 },
60
+ grok_3: { input: 3.0, output: 15.0, cached_input: 0.75 },
61
+ grok_3_fast: { input: 5.0, output: 25.0, cached_input: 1.25 },
62
+ grok_3_mini: { input: 0.3, output: 0.5, cached_input: 0.075 },
63
+ grok_3_mini_fast: { input: 0.6, output: 4.0, cached_input: 0.15 },
64
+ grok_4: { input: 3.0, output: 15.0, cached_input: 0.75 }
65
+ }.freeze
66
+ # rubocop:enable Naming/VariableNumber
67
+
68
+ def model_family(model_id)
69
+ MODEL_PATTERNS.each do |family, pattern|
70
+ return family.to_s if model_id.match?(pattern)
71
+ end
72
+ 'other'
73
+ end
74
+
75
+ def input_price_for(model_id)
76
+ family = model_family(model_id).to_sym
77
+ prices = PRICES.fetch(family, { input: default_input_price })
78
+ prices[:input] || default_input_price
79
+ end
80
+
81
+ def cached_input_price_for(model_id)
82
+ family = model_family(model_id).to_sym
83
+ prices = PRICES.fetch(family, {})
84
+ prices[:cached_input]
85
+ end
86
+
87
+ def output_price_for(model_id)
88
+ family = model_family(model_id).to_sym
89
+ prices = PRICES.fetch(family, { output: default_output_price })
90
+ prices[:output] || default_output_price
91
+ end
92
+
93
+ def model_type(model_id)
94
+ return 'image' if model_family(model_id) == 'grok_2_image'
95
+
96
+ 'chat'
97
+ end
98
+
99
+ def default_input_price
100
+ 2.0
101
+ end
102
+
103
+ def default_output_price
104
+ 10.0
105
+ end
106
+
107
+ def format_display_name(model_id)
108
+ model_id.then { |id| humanize(id) }
109
+ .then { |name| apply_special_formatting(name) }
110
+ end
111
+
112
+ def humanize(id)
113
+ id.tr('-', ' ')
114
+ .split
115
+ .map(&:capitalize)
116
+ .join(' ')
117
+ end
118
+
119
+ def apply_special_formatting(name)
120
+ name
121
+ .gsub(/^Grok /, 'Grok-')
122
+ .gsub(/(\d{4}) (\d{2}) (\d{2})/, '\1-\2-\3')
123
+ end
124
+
125
+ def modalities_for(model_id)
126
+ modalities = {
127
+ input: ['text'],
128
+ output: []
129
+ }
130
+
131
+ modalities[:output] << 'text' if model_type(model_id) == 'chat'
132
+
133
+ # Vision support
134
+ modalities[:input] << 'image' if supports_vision?(model_id)
135
+
136
+ modalities
137
+ end
138
+
139
+ def capabilities_for(model_id)
140
+ capabilities = []
141
+
142
+ # Common capabilities
143
+ capabilities << 'streaming'
144
+ capabilities << 'function_calling' if supports_functions?(model_id)
145
+ capabilities << 'structured_output' if supports_structured_output?(model_id)
146
+
147
+ capabilities
148
+ end
149
+
150
+ def pricing_for(model_id)
151
+ standard_pricing = {
152
+ input_per_million: input_price_for(model_id),
153
+ output_per_million: output_price_for(model_id)
154
+ }
155
+
156
+ # Add cached pricing if available
157
+ cached_price = cached_input_price_for(model_id)
158
+ standard_pricing[:cached_input_per_million] = cached_price if cached_price
159
+
160
+ # Pricing structure
161
+ { text_tokens: { standard: standard_pricing } }
162
+ end
163
+ end
164
+ end
165
+ end
166
+ end
@@ -0,0 +1,15 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ class XAI
6
+ # Chat implementation for xAI
7
+ # https://docs.x.ai/docs/api-reference#chat-completions
8
+ module Chat
9
+ def format_role(role)
10
+ role.to_s
11
+ end
12
+ end
13
+ end
14
+ end
15
+ end
@@ -0,0 +1,48 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ class XAI
6
+ # Model definitions for xAI API
7
+ # https://docs.x.ai/docs/api-reference#list-language-models
8
+ # https://docs.x.ai/docs/api-reference#list-image-generation-models
9
+ module Models
10
+ module_function
11
+
12
+ # NOTE: We pull models list from two endpoints here as these provide
13
+ # detailed, modality, capability and cost information for each
14
+ # model that we can leverage which the generic OpenAI compatible
15
+ # /models endpoint does not provide.
16
+ def models_url
17
+ %w[language-models image-generation-models]
18
+ end
19
+
20
+ def parse_list_models_response(response, slug, capabilities)
21
+ data = response.body
22
+ return [] if data.empty?
23
+
24
+ data['models']&.map do |model_data|
25
+ model_id = model_data['id']
26
+
27
+ Model::Info.new(
28
+ id: model_id,
29
+ name: capabilities.format_display_name(model_id),
30
+ provider: slug,
31
+ family: capabilities.model_family(model_id),
32
+ modalities: {
33
+ input: model_data['input_modalities'] | capabilities.modalities_for(model_id)[:input],
34
+ output: model_data['output_modalities'] | capabilities.modalities_for(model_id)[:output]
35
+ },
36
+ context_window: capabilities.context_window_for(model_id),
37
+ capabilities: capabilities.capabilities_for(model_id),
38
+ pricing: capabilities.pricing_for(model_id),
39
+ metadata: {
40
+ aliases: model_data['aliases']
41
+ }
42
+ )
43
+ end || []
44
+ end
45
+ end
46
+ end
47
+ end
48
+ end
@@ -0,0 +1,46 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ # xAI API integration
6
+ class XAI < OpenAIBase
7
+ include XAI::Capabilities
8
+ include XAI::Chat
9
+ include XAI::Models
10
+
11
+ def api_base
12
+ 'https://api.x.ai/v1'
13
+ end
14
+
15
+ def headers
16
+ {
17
+ 'Authorization' => "Bearer #{@config.xai_api_key}",
18
+ 'Content-Type' => 'application/json'
19
+ }
20
+ end
21
+
22
+ # xAI uses a different error format than OpenAI
23
+ # {"code": "...", "error": "..."}
24
+ def parse_error(response)
25
+ return if response.body.empty?
26
+
27
+ body = try_parse_json(response.body)
28
+ case body
29
+ when Hash then body['error']
30
+ when Array then body.map { |part| part['error'] }.join('. ')
31
+ else body
32
+ end
33
+ end
34
+
35
+ class << self
36
+ def capabilities
37
+ XAI::Capabilities
38
+ end
39
+
40
+ def configuration_requirements
41
+ %i[xai_api_key]
42
+ end
43
+ end
44
+ end
45
+ end
46
+ end
@@ -3,14 +3,31 @@
3
3
  module RubyLLM
4
4
  # Rails integration for RubyLLM
5
5
  class Railtie < Rails::Railtie
6
+ initializer 'ruby_llm.inflections' do
7
+ ActiveSupport::Inflector.inflections(:en) do |inflect|
8
+ inflect.acronym 'LLM'
9
+ end
10
+ end
11
+
6
12
  initializer 'ruby_llm.active_record' do
7
13
  ActiveSupport.on_load :active_record do
8
- include RubyLLM::ActiveRecord::ActsAs
14
+ if RubyLLM.config.use_new_acts_as
15
+ require 'ruby_llm/active_record/acts_as'
16
+ ::ActiveRecord::Base.include RubyLLM::ActiveRecord::ActsAs
17
+ else
18
+ require 'ruby_llm/active_record/acts_as_legacy'
19
+ ::ActiveRecord::Base.include RubyLLM::ActiveRecord::ActsAsLegacy
20
+
21
+ Rails.logger.warn(
22
+ "\n!!! RubyLLM's legacy acts_as API is deprecated and will be removed in RubyLLM 2.0.0. " \
23
+ "Please consult the migration guide at https://rubyllm.com/upgrading-to-1-7/\n"
24
+ )
25
+ end
9
26
  end
10
27
  end
11
28
 
12
- generators do
13
- require 'generators/ruby_llm/install_generator'
29
+ rake_tasks do
30
+ load 'tasks/ruby_llm.rake'
14
31
  end
15
32
  end
16
33
  end
@@ -72,11 +72,9 @@ module RubyLLM
72
72
  when [String, String]
73
73
  @content << new_content
74
74
  when [String, Content]
75
- # Convert accumulated string to Content and merge
76
75
  @content = Content.new(@content)
77
76
  merge_content(new_content)
78
77
  when [Content, String]
79
- # Append string to existing Content's text
80
78
  @content.instance_variable_set(:@text, (@content.text || '') + new_content)
81
79
  when [Content, Content]
82
80
  merge_content(new_content)
@@ -85,12 +83,10 @@ module RubyLLM
85
83
  end
86
84
 
87
85
  def merge_content(new_content)
88
- # Merge text
89
86
  current_text = @content.text || ''
90
87
  new_text = new_content.text || ''
91
88
  @content.instance_variable_set(:@text, current_text + new_text)
92
89
 
93
- # Merge attachments
94
90
  new_content.attachments.each do |attachment|
95
91
  @content.attach(attachment)
96
92
  end