ruby_llm_community 0.0.6 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (97) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +3 -3
  3. data/lib/generators/ruby_llm/install/templates/create_models_migration.rb.tt +34 -0
  4. data/lib/generators/ruby_llm/install/templates/initializer.rb.tt +5 -0
  5. data/lib/generators/ruby_llm/install/templates/model_model.rb.tt +6 -0
  6. data/lib/generators/ruby_llm/install_generator.rb +27 -2
  7. data/lib/ruby_llm/active_record/acts_as.rb +163 -24
  8. data/lib/ruby_llm/aliases.json +58 -5
  9. data/lib/ruby_llm/aliases.rb +7 -25
  10. data/lib/ruby_llm/chat.rb +10 -17
  11. data/lib/ruby_llm/configuration.rb +5 -12
  12. data/lib/ruby_llm/connection.rb +4 -4
  13. data/lib/ruby_llm/connection_multipart.rb +19 -0
  14. data/lib/ruby_llm/content.rb +5 -2
  15. data/lib/ruby_llm/embedding.rb +1 -2
  16. data/lib/ruby_llm/error.rb +0 -8
  17. data/lib/ruby_llm/image.rb +23 -8
  18. data/lib/ruby_llm/image_attachment.rb +21 -0
  19. data/lib/ruby_llm/message.rb +6 -6
  20. data/lib/ruby_llm/model/info.rb +12 -10
  21. data/lib/ruby_llm/model/pricing.rb +0 -3
  22. data/lib/ruby_llm/model/pricing_category.rb +0 -2
  23. data/lib/ruby_llm/model/pricing_tier.rb +0 -1
  24. data/lib/ruby_llm/models.json +2147 -470
  25. data/lib/ruby_llm/models.rb +65 -34
  26. data/lib/ruby_llm/provider.rb +8 -8
  27. data/lib/ruby_llm/providers/anthropic/capabilities.rb +1 -46
  28. data/lib/ruby_llm/providers/anthropic/chat.rb +2 -2
  29. data/lib/ruby_llm/providers/anthropic/media.rb +0 -1
  30. data/lib/ruby_llm/providers/anthropic/tools.rb +1 -2
  31. data/lib/ruby_llm/providers/anthropic.rb +1 -2
  32. data/lib/ruby_llm/providers/bedrock/chat.rb +2 -4
  33. data/lib/ruby_llm/providers/bedrock/media.rb +0 -1
  34. data/lib/ruby_llm/providers/bedrock/models.rb +0 -2
  35. data/lib/ruby_llm/providers/bedrock/streaming/base.rb +0 -12
  36. data/lib/ruby_llm/providers/bedrock/streaming/content_extraction.rb +0 -7
  37. data/lib/ruby_llm/providers/bedrock/streaming/message_processing.rb +0 -12
  38. data/lib/ruby_llm/providers/bedrock/streaming/payload_processing.rb +0 -12
  39. data/lib/ruby_llm/providers/bedrock/streaming/prelude_handling.rb +0 -13
  40. data/lib/ruby_llm/providers/bedrock/streaming.rb +0 -18
  41. data/lib/ruby_llm/providers/bedrock.rb +1 -2
  42. data/lib/ruby_llm/providers/deepseek/capabilities.rb +1 -2
  43. data/lib/ruby_llm/providers/deepseek/chat.rb +0 -1
  44. data/lib/ruby_llm/providers/gemini/capabilities.rb +28 -100
  45. data/lib/ruby_llm/providers/gemini/chat.rb +57 -29
  46. data/lib/ruby_llm/providers/gemini/embeddings.rb +0 -2
  47. data/lib/ruby_llm/providers/gemini/images.rb +1 -2
  48. data/lib/ruby_llm/providers/gemini/media.rb +0 -1
  49. data/lib/ruby_llm/providers/gemini/models.rb +1 -2
  50. data/lib/ruby_llm/providers/gemini/streaming.rb +15 -1
  51. data/lib/ruby_llm/providers/gemini/tools.rb +0 -5
  52. data/lib/ruby_llm/providers/gpustack/chat.rb +11 -1
  53. data/lib/ruby_llm/providers/gpustack/media.rb +45 -0
  54. data/lib/ruby_llm/providers/gpustack/models.rb +44 -9
  55. data/lib/ruby_llm/providers/gpustack.rb +1 -0
  56. data/lib/ruby_llm/providers/mistral/capabilities.rb +2 -10
  57. data/lib/ruby_llm/providers/mistral/chat.rb +0 -2
  58. data/lib/ruby_llm/providers/mistral/embeddings.rb +0 -3
  59. data/lib/ruby_llm/providers/mistral/models.rb +0 -1
  60. data/lib/ruby_llm/providers/ollama/chat.rb +0 -1
  61. data/lib/ruby_llm/providers/ollama/media.rb +1 -6
  62. data/lib/ruby_llm/providers/ollama/models.rb +36 -0
  63. data/lib/ruby_llm/providers/ollama.rb +1 -0
  64. data/lib/ruby_llm/providers/openai/capabilities.rb +3 -16
  65. data/lib/ruby_llm/providers/openai/chat.rb +1 -3
  66. data/lib/ruby_llm/providers/openai/embeddings.rb +0 -3
  67. data/lib/ruby_llm/providers/openai/images.rb +73 -3
  68. data/lib/ruby_llm/providers/openai/media.rb +0 -1
  69. data/lib/ruby_llm/providers/openai/response.rb +120 -29
  70. data/lib/ruby_llm/providers/openai/response_media.rb +2 -2
  71. data/lib/ruby_llm/providers/openai/streaming.rb +107 -47
  72. data/lib/ruby_llm/providers/openai/tools.rb +1 -1
  73. data/lib/ruby_llm/providers/openai.rb +1 -3
  74. data/lib/ruby_llm/providers/openai_base.rb +2 -2
  75. data/lib/ruby_llm/providers/openrouter/models.rb +1 -16
  76. data/lib/ruby_llm/providers/perplexity/capabilities.rb +0 -1
  77. data/lib/ruby_llm/providers/perplexity/chat.rb +0 -1
  78. data/lib/ruby_llm/providers/perplexity.rb +1 -5
  79. data/lib/ruby_llm/providers/vertexai/chat.rb +14 -0
  80. data/lib/ruby_llm/providers/vertexai/embeddings.rb +32 -0
  81. data/lib/ruby_llm/providers/vertexai/models.rb +130 -0
  82. data/lib/ruby_llm/providers/vertexai/streaming.rb +14 -0
  83. data/lib/ruby_llm/providers/vertexai.rb +55 -0
  84. data/lib/ruby_llm/railtie.rb +0 -1
  85. data/lib/ruby_llm/stream_accumulator.rb +72 -10
  86. data/lib/ruby_llm/streaming.rb +16 -25
  87. data/lib/ruby_llm/tool.rb +2 -19
  88. data/lib/ruby_llm/tool_call.rb +0 -9
  89. data/lib/ruby_llm/version.rb +1 -1
  90. data/lib/ruby_llm_community.rb +5 -3
  91. data/lib/tasks/models.rake +525 -0
  92. data/lib/tasks/release.rake +37 -2
  93. data/lib/tasks/vcr.rake +0 -7
  94. metadata +13 -4
  95. data/lib/tasks/aliases.rake +0 -235
  96. data/lib/tasks/models_docs.rake +0 -224
  97. data/lib/tasks/models_update.rake +0 -108
@@ -0,0 +1,525 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'dotenv/load'
4
+ require 'ruby_llm'
5
+ require 'json'
6
+ require 'json-schema'
7
+ require 'fileutils'
8
+
9
+ desc 'Update models, docs, and aliases'
10
+ task models: ['models:update', 'models:docs', 'models:aliases']
11
+
12
+ namespace :models do
13
+ desc 'Update available models from providers (API keys needed)'
14
+ task :update do
15
+ puts 'Configuring RubyLLM...'
16
+ configure_from_env
17
+ refresh_models
18
+ display_model_stats
19
+ end
20
+
21
+ desc 'Generate available models documentation'
22
+ task :docs do
23
+ FileUtils.mkdir_p('docs/_reference')
24
+ output = generate_models_markdown
25
+ File.write('docs/_reference/available-models.md', output)
26
+ puts 'Generated docs/_reference/available-models.md'
27
+ end
28
+
29
+ desc 'Generate model aliases from registry'
30
+ task :aliases do
31
+ generate_aliases
32
+ end
33
+ end
34
+
35
+ # Keep aliases:generate for backwards compatibility
36
+ namespace :aliases do
37
+ task generate: ['models:aliases']
38
+ end
39
+
40
+ def configure_from_env
41
+ RubyLLM.configure do |config|
42
+ config.openai_api_key = ENV.fetch('OPENAI_API_KEY', nil)
43
+ config.anthropic_api_key = ENV.fetch('ANTHROPIC_API_KEY', nil)
44
+ config.gemini_api_key = ENV.fetch('GEMINI_API_KEY', nil)
45
+ config.deepseek_api_key = ENV.fetch('DEEPSEEK_API_KEY', nil)
46
+ config.perplexity_api_key = ENV.fetch('PERPLEXITY_API_KEY', nil)
47
+ config.openrouter_api_key = ENV.fetch('OPENROUTER_API_KEY', nil)
48
+ config.mistral_api_key = ENV.fetch('MISTRAL_API_KEY', nil)
49
+ config.vertexai_location = ENV.fetch('GOOGLE_CLOUD_LOCATION', nil)
50
+ config.vertexai_project_id = ENV.fetch('GOOGLE_CLOUD_PROJECT', nil)
51
+ configure_bedrock(config)
52
+ config.request_timeout = 30
53
+ end
54
+ end
55
+
56
+ def configure_bedrock(config)
57
+ config.bedrock_api_key = ENV.fetch('AWS_ACCESS_KEY_ID', nil)
58
+ config.bedrock_secret_key = ENV.fetch('AWS_SECRET_ACCESS_KEY', nil)
59
+ config.bedrock_region = ENV.fetch('AWS_REGION', nil)
60
+ config.bedrock_session_token = ENV.fetch('AWS_SESSION_TOKEN', nil)
61
+ end
62
+
63
+ def refresh_models
64
+ initial_count = RubyLLM.models.all.size
65
+ puts "Refreshing models (#{initial_count} cached)..."
66
+
67
+ models = RubyLLM.models.refresh!
68
+
69
+ if models.all.empty? && initial_count.zero?
70
+ puts 'Error: Failed to fetch models.'
71
+ exit(1)
72
+ elsif models.all.size == initial_count && initial_count.positive?
73
+ puts 'Warning: Model list unchanged.'
74
+ else
75
+ puts 'Validating models...'
76
+ validate_models!(models)
77
+
78
+ puts "Saving models.json (#{models.all.size} models)"
79
+ models.save_models
80
+ end
81
+
82
+ @models = models
83
+ end
84
+
85
+ def validate_models!(models)
86
+ schema_path = RubyLLM::Models.schema_file
87
+ models_data = models.all.map(&:to_h)
88
+
89
+ validation_errors = JSON::Validator.fully_validate(schema_path, models_data)
90
+
91
+ unless validation_errors.empty?
92
+ # Save failed models for inspection
93
+ failed_path = File.expand_path('../ruby_llm/models.failed.json', __dir__)
94
+ File.write(failed_path, JSON.pretty_generate(models_data))
95
+
96
+ puts 'ERROR: Models validation failed:'
97
+ puts "\nValidation errors:"
98
+ validation_errors.first(10).each { |error| puts " - #{error}" }
99
+ puts " ... and #{validation_errors.size - 10} more errors" if validation_errors.size > 10
100
+ puts "-> Failed models saved to: #{failed_path}"
101
+ exit(1)
102
+ end
103
+
104
+ puts 'āœ“ Models validation passed'
105
+ end
106
+
107
+ def display_model_stats
108
+ puts "\nModel count:"
109
+ provider_counts = @models.all.group_by(&:provider).transform_values(&:count)
110
+
111
+ RubyLLM::Provider.providers.each do |sym, provider_class|
112
+ name = provider_class.name
113
+ count = provider_counts[sym.to_s] || 0
114
+ status = status(sym)
115
+ puts " #{name}: #{count} models #{status}"
116
+ end
117
+
118
+ puts 'Refresh complete.'
119
+ end
120
+
121
+ def status(provider_sym)
122
+ provider_class = RubyLLM::Provider.providers[provider_sym]
123
+ if provider_class.local?
124
+ ' (LOCAL - SKIP)'
125
+ elsif provider_class.configured?(RubyLLM.config)
126
+ ' (OK)'
127
+ else
128
+ ' (NOT CONFIGURED)'
129
+ end
130
+ end
131
+
132
+ def generate_models_markdown
133
+ <<~MARKDOWN
134
+ ---
135
+ layout: default
136
+ title: Available Models
137
+ nav_order: 1
138
+ description: Browse hundreds of AI models from every major provider. Always up-to-date, automatically generated.
139
+ redirect_from:
140
+ - /guides/available-models
141
+ ---
142
+
143
+ # {{ page.title }}
144
+ {: .no_toc }
145
+
146
+ {{ page.description }}
147
+ {: .fs-6 .fw-300 }
148
+
149
+ ## Table of contents
150
+ {: .no_toc .text-delta }
151
+
152
+ 1. TOC
153
+ {:toc}
154
+
155
+ ---
156
+
157
+ ## Model Data Sources
158
+
159
+ - **OpenAI, Anthropic, DeepSeek, Gemini, VertexAI**: Enriched by [šŸš€ Parsera](https://parsera.org/) *([free LLM metadata API](https://api.parsera.org/v1/llm-specs) - [go say thanks!](https://github.com/parsera-labs/api-llm-specs))*
160
+ - **OpenRouter**: Direct API
161
+ - **Others**: Local capabilities files
162
+
163
+ ## Last Updated
164
+ {: .d-inline-block }
165
+
166
+ #{Time.now.utc.strftime('%Y-%m-%d')}
167
+ {: .label .label-green }
168
+
169
+ ## Models by Provider
170
+
171
+ #{generate_provider_sections}
172
+
173
+ ## Models by Capability
174
+
175
+ #{generate_capability_sections}
176
+
177
+ ## Models by Modality
178
+
179
+ #{generate_modality_sections}
180
+ MARKDOWN
181
+ end
182
+
183
+ def generate_provider_sections
184
+ RubyLLM::Provider.providers.filter_map do |provider, provider_class|
185
+ models = RubyLLM.models.by_provider(provider)
186
+ next if models.none?
187
+
188
+ <<~PROVIDER
189
+ ### #{provider_class.name} (#{models.count})
190
+
191
+ #{models_table(models)}
192
+ PROVIDER
193
+ end.join("\n\n")
194
+ end
195
+
196
+ def generate_capability_sections
197
+ capabilities = {
198
+ 'Function Calling' => RubyLLM.models.select(&:function_calling?),
199
+ 'Structured Output' => RubyLLM.models.select(&:structured_output?),
200
+ 'Streaming' => RubyLLM.models.select { |m| m.capabilities.include?('streaming') },
201
+ 'Batch Processing' => RubyLLM.models.select { |m| m.capabilities.include?('batch') }
202
+ }
203
+
204
+ capabilities.filter_map do |capability, models|
205
+ next if models.none?
206
+
207
+ <<~CAPABILITY
208
+ ### #{capability} (#{models.count})
209
+
210
+ #{models_table(models)}
211
+ CAPABILITY
212
+ end.join("\n\n")
213
+ end
214
+
215
+ def generate_modality_sections # rubocop:disable Metrics/PerceivedComplexity
216
+ sections = []
217
+
218
+ vision_models = RubyLLM.models.select { |m| (m.modalities.input || []).include?('image') }
219
+ if vision_models.any?
220
+ sections << <<~SECTION
221
+ ### Vision Models (#{vision_models.count})
222
+
223
+ Models that can process images:
224
+
225
+ #{models_table(vision_models)}
226
+ SECTION
227
+ end
228
+
229
+ audio_models = RubyLLM.models.select { |m| (m.modalities.input || []).include?('audio') }
230
+ if audio_models.any?
231
+ sections << <<~SECTION
232
+ ### Audio Input Models (#{audio_models.count})
233
+
234
+ Models that can process audio:
235
+
236
+ #{models_table(audio_models)}
237
+ SECTION
238
+ end
239
+
240
+ pdf_models = RubyLLM.models.select { |m| (m.modalities.input || []).include?('pdf') }
241
+ if pdf_models.any?
242
+ sections << <<~SECTION
243
+ ### PDF Models (#{pdf_models.count})
244
+
245
+ Models that can process PDF documents:
246
+
247
+ #{models_table(pdf_models)}
248
+ SECTION
249
+ end
250
+
251
+ embedding_models = RubyLLM.models.select { |m| (m.modalities.output || []).include?('embeddings') }
252
+ if embedding_models.any?
253
+ sections << <<~SECTION
254
+ ### Embedding Models (#{embedding_models.count})
255
+
256
+ Models that generate embeddings:
257
+
258
+ #{models_table(embedding_models)}
259
+ SECTION
260
+ end
261
+
262
+ sections.join("\n\n")
263
+ end
264
+
265
+ def models_table(models)
266
+ return '*No models found*' if models.none?
267
+
268
+ headers = ['Model', 'Provider', 'Context', 'Max Output', 'Standard Pricing (per 1M tokens)']
269
+ alignment = [':--', ':--', '--:', '--:', ':--']
270
+
271
+ rows = models.sort_by { |m| [m.provider, m.name] }.map do |model|
272
+ pricing = standard_pricing_display(model)
273
+
274
+ [
275
+ model.id,
276
+ model.provider,
277
+ model.context_window || '-',
278
+ model.max_output_tokens || '-',
279
+ pricing
280
+ ]
281
+ end
282
+
283
+ table = []
284
+ table << "| #{headers.join(' | ')} |"
285
+ table << "| #{alignment.join(' | ')} |"
286
+
287
+ rows.each do |row|
288
+ table << "| #{row.join(' | ')} |"
289
+ end
290
+
291
+ table.join("\n")
292
+ end
293
+
294
+ def standard_pricing_display(model)
295
+ pricing_data = model.pricing.to_h[:text_tokens]&.dig(:standard) || {}
296
+
297
+ if pricing_data.any?
298
+ parts = []
299
+
300
+ parts << "In: $#{format('%.2f', pricing_data[:input_per_million])}" if pricing_data[:input_per_million]
301
+
302
+ parts << "Out: $#{format('%.2f', pricing_data[:output_per_million])}" if pricing_data[:output_per_million]
303
+
304
+ if pricing_data[:cached_input_per_million]
305
+ parts << "Cache: $#{format('%.2f', pricing_data[:cached_input_per_million])}"
306
+ end
307
+
308
+ return parts.join(', ') if parts.any?
309
+ end
310
+
311
+ '-'
312
+ end
313
+
314
+ def generate_aliases # rubocop:disable Metrics/PerceivedComplexity
315
+ models = Hash.new { |h, k| h[k] = [] }
316
+
317
+ RubyLLM.models.all.each do |model|
318
+ models[model.provider] << model.id
319
+ end
320
+
321
+ aliases = {}
322
+
323
+ # OpenAI models
324
+ models['openai'].each do |model|
325
+ openrouter_model = "openai/#{model}"
326
+ next unless models['openrouter'].include?(openrouter_model)
327
+
328
+ alias_key = model.gsub('-latest', '')
329
+ aliases[alias_key] = {
330
+ 'openai' => model,
331
+ 'openrouter' => openrouter_model
332
+ }
333
+ end
334
+
335
+ anthropic_latest = group_anthropic_models_by_base_name(models['anthropic'])
336
+
337
+ anthropic_latest.each do |base_name, latest_model|
338
+ openrouter_variants = [
339
+ "anthropic/#{base_name}",
340
+ "anthropic/#{base_name.gsub(/-(\d)/, '.\1')}",
341
+ "anthropic/#{base_name.gsub(/claude-(\d+)-(\d+)/, 'claude-\1.\2')}",
342
+ "anthropic/#{base_name.gsub(/(\d+)-(\d+)/, '\1.\2')}"
343
+ ]
344
+
345
+ openrouter_model = openrouter_variants.find { |v| models['openrouter'].include?(v) }
346
+ bedrock_model = find_best_bedrock_model(latest_model, models['bedrock'])
347
+
348
+ next unless openrouter_model || bedrock_model || models['anthropic'].include?(latest_model)
349
+
350
+ aliases[base_name] = { 'anthropic' => latest_model }
351
+ aliases[base_name]['openrouter'] = openrouter_model if openrouter_model
352
+ aliases[base_name]['bedrock'] = bedrock_model if bedrock_model
353
+ end
354
+
355
+ models['bedrock'].each do |bedrock_model|
356
+ next unless bedrock_model.start_with?('anthropic.')
357
+ next unless bedrock_model =~ /anthropic\.(claude-[\d.]+-[a-z]+)/
358
+
359
+ base_name = Regexp.last_match(1)
360
+ anthropic_name = base_name.tr('.', '-')
361
+
362
+ next if aliases[anthropic_name]
363
+
364
+ openrouter_variants = [
365
+ "anthropic/#{anthropic_name}",
366
+ "anthropic/#{base_name}"
367
+ ]
368
+
369
+ openrouter_model = openrouter_variants.find { |v| models['openrouter'].include?(v) }
370
+
371
+ aliases[anthropic_name] = { 'bedrock' => bedrock_model }
372
+ aliases[anthropic_name]['anthropic'] = anthropic_name if models['anthropic'].include?(anthropic_name)
373
+ aliases[anthropic_name]['openrouter'] = openrouter_model if openrouter_model
374
+ end
375
+
376
+ # Gemini models (also map to vertexai)
377
+ models['gemini'].each do |model|
378
+ openrouter_variants = [
379
+ "google/#{model}",
380
+ "google/#{model.gsub('gemini-', 'gemini-').tr('.', '-')}",
381
+ "google/#{model.gsub('gemini-', 'gemini-')}"
382
+ ]
383
+
384
+ openrouter_model = openrouter_variants.find { |v| models['openrouter'].include?(v) }
385
+ vertexai_model = models['vertexai'].include?(model) ? model : nil
386
+
387
+ next unless openrouter_model || vertexai_model
388
+
389
+ alias_key = model.gsub('-latest', '')
390
+ aliases[alias_key] = { 'gemini' => model }
391
+ aliases[alias_key]['openrouter'] = openrouter_model if openrouter_model
392
+ aliases[alias_key]['vertexai'] = vertexai_model if vertexai_model
393
+ end
394
+
395
+ # VertexAI models that aren't in Gemini (e.g. older models like text-bison)
396
+ models['vertexai'].each do |model|
397
+ # Skip if already handled above
398
+ next if models['gemini'].include?(model)
399
+
400
+ # Check if OpenRouter has this Google model
401
+ openrouter_variants = [
402
+ "google/#{model}",
403
+ "google/#{model.tr('.', '-')}"
404
+ ]
405
+
406
+ openrouter_model = openrouter_variants.find { |v| models['openrouter'].include?(v) }
407
+ gemini_model = models['gemini'].include?(model) ? model : nil
408
+
409
+ next unless openrouter_model || gemini_model
410
+
411
+ alias_key = model.gsub('-latest', '')
412
+ next if aliases[alias_key] # Skip if already created
413
+
414
+ aliases[alias_key] = { 'vertexai' => model }
415
+ aliases[alias_key]['openrouter'] = openrouter_model if openrouter_model
416
+ aliases[alias_key]['gemini'] = gemini_model if gemini_model
417
+ end
418
+
419
+ models['deepseek'].each do |model|
420
+ openrouter_model = "deepseek/#{model}"
421
+ next unless models['openrouter'].include?(openrouter_model)
422
+
423
+ alias_key = model.gsub('-latest', '')
424
+ aliases[alias_key] = {
425
+ 'deepseek' => model,
426
+ 'openrouter' => openrouter_model
427
+ }
428
+ end
429
+
430
+ sorted_aliases = aliases.sort.to_h
431
+ File.write(RubyLLM::Aliases.aliases_file, JSON.pretty_generate(sorted_aliases))
432
+
433
+ puts "Generated #{sorted_aliases.size} aliases"
434
+ end
435
+
436
+ def group_anthropic_models_by_base_name(anthropic_models)
437
+ grouped = Hash.new { |h, k| h[k] = [] }
438
+
439
+ anthropic_models.each do |model|
440
+ base_name = extract_base_name(model)
441
+ grouped[base_name] << model
442
+ end
443
+
444
+ latest_models = {}
445
+ grouped.each do |base_name, model_list|
446
+ if model_list.size == 1
447
+ latest_models[base_name] = model_list.first
448
+ else
449
+ latest_model = model_list.max_by { |model| extract_date_from_model(model) }
450
+ latest_models[base_name] = latest_model
451
+ end
452
+ end
453
+
454
+ latest_models
455
+ end
456
+
457
+ def extract_base_name(model)
458
+ if model =~ /^(.+)-(\d{8})$/
459
+ Regexp.last_match(1)
460
+ else
461
+ model
462
+ end
463
+ end
464
+
465
+ def extract_date_from_model(model)
466
+ if model =~ /-(\d{8})$/
467
+ Regexp.last_match(1)
468
+ else
469
+ '00000000'
470
+ end
471
+ end
472
+
473
+ def find_best_bedrock_model(anthropic_model, bedrock_models) # rubocop:disable Metrics/PerceivedComplexity
474
+ base_pattern = case anthropic_model
475
+ when 'claude-2.0', 'claude-2'
476
+ 'claude-v2'
477
+ when 'claude-2.1'
478
+ 'claude-v2:1'
479
+ when 'claude-instant-v1', 'claude-instant'
480
+ 'claude-instant'
481
+ else
482
+ extract_base_name(anthropic_model)
483
+ end
484
+
485
+ matching_models = bedrock_models.select do |bedrock_model|
486
+ model_without_prefix = bedrock_model.sub(/^(?:us\.)?anthropic\./, '')
487
+ model_without_prefix.start_with?(base_pattern)
488
+ end
489
+
490
+ return nil if matching_models.empty?
491
+
492
+ begin
493
+ model_info = RubyLLM.models.find(anthropic_model)
494
+ target_context = model_info.context_window
495
+ rescue StandardError
496
+ target_context = nil
497
+ end
498
+
499
+ if target_context
500
+ target_k = target_context / 1000
501
+
502
+ with_context = matching_models.select do |m|
503
+ m.include?(":#{target_k}k") || m.include?(":0:#{target_k}k")
504
+ end
505
+
506
+ return with_context.first if with_context.any?
507
+ end
508
+
509
+ matching_models.min_by do |model|
510
+ context_priority = if model =~ /:(?:\d+:)?(\d+)k/
511
+ -Regexp.last_match(1).to_i
512
+ else
513
+ 0
514
+ end
515
+
516
+ version_priority = if model =~ /-v(\d+):/
517
+ -Regexp.last_match(1).to_i
518
+ else
519
+ 0
520
+ end
521
+
522
+ has_context_priority = model.include?('k') ? -1 : 0
523
+ [has_context_priority, context_priority, version_priority]
524
+ end
525
+ end
@@ -1,6 +1,38 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- namespace :release do
3
+ namespace :release do # rubocop:disable Metrics/BlockLength
4
+ desc 'Prepare for release'
5
+ task :prepare do
6
+ Rake::Task['release:refresh_stale_cassettes'].invoke
7
+ sh 'overcommit --run'
8
+ Rake::Task['models'].invoke
9
+ end
10
+
11
+ desc 'Remove stale cassettes and re-record them'
12
+ task :refresh_stale_cassettes do
13
+ max_age_days = 1
14
+ cassette_dir = 'spec/fixtures/vcr_cassettes'
15
+
16
+ stale_count = 0
17
+ Dir.glob("#{cassette_dir}/**/*.yml").each do |cassette|
18
+ age_days = (Time.now - File.mtime(cassette)) / 86_400
19
+ next unless age_days > max_age_days
20
+
21
+ puts "Removing stale cassette: #{File.basename(cassette)} (#{age_days.round(1)} days old)"
22
+ File.delete(cassette)
23
+ stale_count += 1
24
+ end
25
+
26
+ if stale_count.positive?
27
+ puts "\nšŸ—‘ļø Removed #{stale_count} stale cassettes"
28
+ puts 'šŸ”„ Re-recording cassettes...'
29
+ system('bundle exec rspec') || exit(1)
30
+ puts 'āœ… Cassettes refreshed!'
31
+ else
32
+ puts 'āœ… No stale cassettes found'
33
+ end
34
+ end
35
+
4
36
  desc 'Verify cassettes are fresh enough for release'
5
37
  task :verify_cassettes do
6
38
  max_age_days = 1
@@ -20,10 +52,13 @@ namespace :release do
20
52
 
21
53
  if stale_cassettes.any?
22
54
  puts "\nāŒ Found stale cassettes (older than #{max_age_days} days):"
55
+ stale_files = []
23
56
  stale_cassettes.each do |c|
24
57
  puts " - #{c[:file]} (#{c[:age]} days old)"
58
+ stale_files << File.join(cassette_dir, '**', c[:file])
25
59
  end
26
- puts "\nRun locally: bundle exec rspec"
60
+
61
+ puts "\nRun locally: bundle exec rake release:refresh_stale_cassettes"
27
62
  exit 1
28
63
  else
29
64
  puts "āœ… All cassettes are fresh (< #{max_age_days} days old)"
data/lib/tasks/vcr.rake CHANGED
@@ -2,9 +2,7 @@
2
2
 
3
3
  require 'dotenv/load'
4
4
 
5
- # Helper functions at the top level
6
5
  def record_all_cassettes(cassette_dir)
7
- # Re-record all cassettes
8
6
  FileUtils.rm_rf(cassette_dir)
9
7
  FileUtils.mkdir_p(cassette_dir)
10
8
 
@@ -14,10 +12,8 @@ def record_all_cassettes(cassette_dir)
14
12
  end
15
13
 
16
14
  def record_for_providers(providers, cassette_dir)
17
- # Get the list of available providers from RubyLLM itself
18
15
  all_providers = RubyLLM::Provider.providers.keys.map(&:to_s)
19
16
 
20
- # Check for valid providers
21
17
  if providers.empty?
22
18
  puts "Please specify providers or 'all'. Example: rake vcr:record[openai,anthropic]"
23
19
  puts "Available providers: #{all_providers.join(', ')}"
@@ -31,7 +27,6 @@ def record_for_providers(providers, cassette_dir)
31
27
  return
32
28
  end
33
29
 
34
- # Find and delete matching cassettes
35
30
  cassettes_to_delete = find_matching_cassettes(cassette_dir, providers)
36
31
 
37
32
  if cassettes_to_delete.empty?
@@ -54,9 +49,7 @@ def find_matching_cassettes(dir, providers)
54
49
  Dir.glob("#{dir}/**/*.yml").each do |file|
55
50
  basename = File.basename(file)
56
51
 
57
- # Precise matching to avoid cross-provider confusion
58
52
  providers.each do |provider|
59
- # Match only exact provider prefixes
60
53
  next unless basename =~ /^[^_]*_#{provider}_/ || # For first section like "chat_openai_"
61
54
  basename =~ /_#{provider}_[^_]+_/ # For middle sections like "_openai_gpt4_"
62
55
 
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby_llm_community
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.0.6
4
+ version: 1.0.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Paul Shippy
@@ -140,9 +140,11 @@ files:
140
140
  - lib/generators/ruby_llm/install/templates/chat_model.rb.tt
141
141
  - lib/generators/ruby_llm/install/templates/create_chats_migration.rb.tt
142
142
  - lib/generators/ruby_llm/install/templates/create_messages_migration.rb.tt
143
+ - lib/generators/ruby_llm/install/templates/create_models_migration.rb.tt
143
144
  - lib/generators/ruby_llm/install/templates/create_tool_calls_migration.rb.tt
144
145
  - lib/generators/ruby_llm/install/templates/initializer.rb.tt
145
146
  - lib/generators/ruby_llm/install/templates/message_model.rb.tt
147
+ - lib/generators/ruby_llm/install/templates/model_model.rb.tt
146
148
  - lib/generators/ruby_llm/install/templates/tool_call_model.rb.tt
147
149
  - lib/generators/ruby_llm/install_generator.rb
148
150
  - lib/ruby_llm/active_record/acts_as.rb
@@ -153,11 +155,13 @@ files:
153
155
  - lib/ruby_llm/chunk.rb
154
156
  - lib/ruby_llm/configuration.rb
155
157
  - lib/ruby_llm/connection.rb
158
+ - lib/ruby_llm/connection_multipart.rb
156
159
  - lib/ruby_llm/content.rb
157
160
  - lib/ruby_llm/context.rb
158
161
  - lib/ruby_llm/embedding.rb
159
162
  - lib/ruby_llm/error.rb
160
163
  - lib/ruby_llm/image.rb
164
+ - lib/ruby_llm/image_attachment.rb
161
165
  - lib/ruby_llm/message.rb
162
166
  - lib/ruby_llm/mime_type.rb
163
167
  - lib/ruby_llm/model.rb
@@ -204,6 +208,7 @@ files:
204
208
  - lib/ruby_llm/providers/gemini/tools.rb
205
209
  - lib/ruby_llm/providers/gpustack.rb
206
210
  - lib/ruby_llm/providers/gpustack/chat.rb
211
+ - lib/ruby_llm/providers/gpustack/media.rb
207
212
  - lib/ruby_llm/providers/gpustack/models.rb
208
213
  - lib/ruby_llm/providers/mistral.rb
209
214
  - lib/ruby_llm/providers/mistral/capabilities.rb
@@ -213,6 +218,7 @@ files:
213
218
  - lib/ruby_llm/providers/ollama.rb
214
219
  - lib/ruby_llm/providers/ollama/chat.rb
215
220
  - lib/ruby_llm/providers/ollama/media.rb
221
+ - lib/ruby_llm/providers/ollama/models.rb
216
222
  - lib/ruby_llm/providers/openai.rb
217
223
  - lib/ruby_llm/providers/openai/capabilities.rb
218
224
  - lib/ruby_llm/providers/openai/chat.rb
@@ -231,6 +237,11 @@ files:
231
237
  - lib/ruby_llm/providers/perplexity/capabilities.rb
232
238
  - lib/ruby_llm/providers/perplexity/chat.rb
233
239
  - lib/ruby_llm/providers/perplexity/models.rb
240
+ - lib/ruby_llm/providers/vertexai.rb
241
+ - lib/ruby_llm/providers/vertexai/chat.rb
242
+ - lib/ruby_llm/providers/vertexai/embeddings.rb
243
+ - lib/ruby_llm/providers/vertexai/models.rb
244
+ - lib/ruby_llm/providers/vertexai/streaming.rb
234
245
  - lib/ruby_llm/railtie.rb
235
246
  - lib/ruby_llm/stream_accumulator.rb
236
247
  - lib/ruby_llm/streaming.rb
@@ -240,9 +251,7 @@ files:
240
251
  - lib/ruby_llm/version.rb
241
252
  - lib/ruby_llm_community.rb
242
253
  - lib/shims/ruby_llm.rb
243
- - lib/tasks/aliases.rake
244
- - lib/tasks/models_docs.rake
245
- - lib/tasks/models_update.rake
254
+ - lib/tasks/models.rake
246
255
  - lib/tasks/release.rake
247
256
  - lib/tasks/vcr.rake
248
257
  homepage: https://rubyllm.com