ruby_llm 0.1.0.pre44 → 0.1.0.pre46

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 519b87700f2ba3d5aeb8ea3a87d0881f4ef58f974991647b7af4c9f138cbf3d2
4
- data.tar.gz: 553b6441ad59fe5efe6d51374103690101c2cb8d072f91bf2a850d84b4934601
3
+ metadata.gz: 047c20a013a29b030be1c93064a7d1545c570f56845e32c864d20a3fe501a964
4
+ data.tar.gz: f5bfc01bc51e19a6a2d197ae4e9ac50e0fb36757850552a028466a4fcdb35dce
5
5
  SHA512:
6
- metadata.gz: e52cb96479a0f4443521bf3b5b2e98c882fb1c404e3d1527c74f638137bf56c2cb9442cc6e30ef197157fbdaa7dc189b7f6331a31e6f31ddfa28a926035d6578
7
- data.tar.gz: b1d7948851e6c1ffa5257e214f01e6d6efaf64d32d98dcc66ec8f5d6776df232a92b3b26ff1de78889f4195ff27e41dc7375a4b2530dc1b97edc8fd57bb2c835
6
+ metadata.gz: 9888bfab59a6a366dd8b4139586f1fa01ed5fb05a93b0256102b2a5ac3bba3bfb912f48f911f050c2e23a0d432dcc1eaa5d5a3044d1e5affe8f18fd7ac21618e
7
+ data.tar.gz: dd8970bc6f942e5a523b545f9bc09d82e98d65228d081589273509f8deb7cd95ba3adda33cece93498bcf2e045b69b6decfb8aebd42e56ed52f6ca69057c3d59
data/.rspec_status CHANGED
@@ -35,7 +35,16 @@ example_id | status | run_time |
35
35
  ./spec/ruby_llm/embeddings_spec.rb[1:1:2:1] | passed | 0.65614 seconds |
36
36
  ./spec/ruby_llm/embeddings_spec.rb[1:1:2:2] | passed | 2.16 seconds |
37
37
  ./spec/ruby_llm/error_handling_spec.rb[1:1] | passed | 0.29366 seconds |
38
- ./spec/ruby_llm/image_generation_spec.rb[1:1:1] | passed | 24.16 seconds |
39
- ./spec/ruby_llm/image_generation_spec.rb[1:1:2] | passed | 14.81 seconds |
40
- ./spec/ruby_llm/image_generation_spec.rb[1:1:3] | passed | 9.17 seconds |
41
- ./spec/ruby_llm/image_generation_spec.rb[1:1:4] | passed | 0.00083 seconds |
38
+ ./spec/ruby_llm/image_generation_spec.rb[1:1:1] | passed | 14.16 seconds |
39
+ ./spec/ruby_llm/image_generation_spec.rb[1:1:2] | passed | 16.22 seconds |
40
+ ./spec/ruby_llm/image_generation_spec.rb[1:1:3] | passed | 9.1 seconds |
41
+ ./spec/ruby_llm/image_generation_spec.rb[1:1:4] | passed | 0.00138 seconds |
42
+ ./spec/ruby_llm/models_spec.rb[1:1:1] | passed | 0.01071 seconds |
43
+ ./spec/ruby_llm/models_spec.rb[1:1:2] | passed | 0.00056 seconds |
44
+ ./spec/ruby_llm/models_spec.rb[1:1:3] | passed | 0.00336 seconds |
45
+ ./spec/ruby_llm/models_spec.rb[1:2:1] | passed | 0.00016 seconds |
46
+ ./spec/ruby_llm/models_spec.rb[1:2:2] | passed | 0.00085 seconds |
47
+ ./spec/ruby_llm/models_spec.rb[1:3:1] | passed | 1.44 seconds |
48
+ ./spec/ruby_llm/models_spec.rb[1:3:2] | passed | 1.23 seconds |
49
+ ./spec/ruby_llm/models_spec.rb[1:4:1] | passed | 0.0003 seconds |
50
+ ./spec/ruby_llm/models_spec.rb[1:4:2] | passed | 0.00175 seconds |
@@ -168,7 +168,7 @@
168
168
  },
169
169
  {
170
170
  "id": "claude-3-7-sonnet-20250219",
171
- "created_at": "2025-02-19T00:00:00Z",
171
+ "created_at": "2025-02-24T00:00:00Z",
172
172
  "display_name": "Claude 3.7 Sonnet",
173
173
  "provider": "anthropic",
174
174
  "context_window": 200000,
@@ -883,6 +883,56 @@
883
883
  ]
884
884
  }
885
885
  },
886
+ {
887
+ "id": "gemini-2.0-flash-lite-preview",
888
+ "created_at": null,
889
+ "display_name": "Gemini 2.0 Flash-Lite Preview",
890
+ "provider": "gemini",
891
+ "context_window": 1048576,
892
+ "max_tokens": 8192,
893
+ "type": "chat",
894
+ "family": "gemini20_flash_lite",
895
+ "supports_vision": true,
896
+ "supports_functions": false,
897
+ "supports_json_mode": false,
898
+ "input_price_per_million": 0.075,
899
+ "output_price_per_million": 0.3,
900
+ "metadata": {
901
+ "version": "preview-02-05",
902
+ "description": "Preview release (February 5th, 2025) of Gemini 2.0 Flash Lite",
903
+ "input_token_limit": 1048576,
904
+ "output_token_limit": 8192,
905
+ "supported_generation_methods": [
906
+ "generateContent",
907
+ "countTokens"
908
+ ]
909
+ }
910
+ },
911
+ {
912
+ "id": "gemini-2.0-flash-lite-preview-02-05",
913
+ "created_at": null,
914
+ "display_name": "Gemini 2.0 Flash-Lite Preview 02-05",
915
+ "provider": "gemini",
916
+ "context_window": 1048576,
917
+ "max_tokens": 8192,
918
+ "type": "chat",
919
+ "family": "gemini20_flash_lite",
920
+ "supports_vision": true,
921
+ "supports_functions": false,
922
+ "supports_json_mode": false,
923
+ "input_price_per_million": 0.075,
924
+ "output_price_per_million": 0.3,
925
+ "metadata": {
926
+ "version": "preview-02-05",
927
+ "description": "Preview release (February 5th, 2025) of Gemini 2.0 Flash Lite",
928
+ "input_token_limit": 1048576,
929
+ "output_token_limit": 8192,
930
+ "supported_generation_methods": [
931
+ "generateContent",
932
+ "countTokens"
933
+ ]
934
+ }
935
+ },
886
936
  {
887
937
  "id": "gemini-2.0-flash-thinking-exp",
888
938
  "created_at": null,
@@ -1305,6 +1355,44 @@
1305
1355
  "owned_by": "system"
1306
1356
  }
1307
1357
  },
1358
+ {
1359
+ "id": "gpt-4.5-preview",
1360
+ "created_at": "2025-02-27T03:24:19+01:00",
1361
+ "display_name": "GPT-4.5 Preview",
1362
+ "provider": "openai",
1363
+ "context_window": 4096,
1364
+ "max_tokens": 4096,
1365
+ "type": "chat",
1366
+ "family": "gpt4",
1367
+ "supports_vision": false,
1368
+ "supports_functions": true,
1369
+ "supports_json_mode": false,
1370
+ "input_price_per_million": 0.5,
1371
+ "output_price_per_million": 1.5,
1372
+ "metadata": {
1373
+ "object": "model",
1374
+ "owned_by": "system"
1375
+ }
1376
+ },
1377
+ {
1378
+ "id": "gpt-4.5-preview-2025-02-27",
1379
+ "created_at": "2025-02-27T03:28:24+01:00",
1380
+ "display_name": "GPT-4.5 Preview 20250227",
1381
+ "provider": "openai",
1382
+ "context_window": 4096,
1383
+ "max_tokens": 4096,
1384
+ "type": "chat",
1385
+ "family": "gpt4",
1386
+ "supports_vision": false,
1387
+ "supports_functions": true,
1388
+ "supports_json_mode": false,
1389
+ "input_price_per_million": 0.5,
1390
+ "output_price_per_million": 1.5,
1391
+ "metadata": {
1392
+ "object": "model",
1393
+ "owned_by": "system"
1394
+ }
1395
+ },
1308
1396
  {
1309
1397
  "id": "gpt-4o",
1310
1398
  "created_at": "2024-05-10T20:50:49+02:00",
@@ -5,55 +5,108 @@ module RubyLLM
5
5
  # to discover and work with models from different providers.
6
6
  #
7
7
  # Example:
8
- # RubyLLM.models.all # All available models
9
- # RubyLLM.models.chat_models # Models that support chat
10
- # RubyLLM.models.find('claude-3') # Get info about a specific model
11
- module Models
12
- module_function
8
+ # RubyLLM.models.all # All available models
9
+ # RubyLLM.models.chat_models # Models that support chat
10
+ # RubyLLM.models.by_provider('openai').chat_models # OpenAI chat models
11
+ # RubyLLM.models.find('claude-3') # Get info about a specific model
12
+ class Models
13
+ include Enumerable
13
14
 
14
- def provider_for(model)
15
+ def self.instance
16
+ @instance ||= new
17
+ end
18
+
19
+ def self.provider_for(model)
15
20
  Provider.for(model)
16
21
  end
17
22
 
18
- def all
19
- @all ||= begin
20
- data = JSON.parse(File.read(File.expand_path('models.json', __dir__)))
21
- data.map { |model| ModelInfo.new(model.transform_keys(&:to_sym)) }
23
+ # Class method to refresh model data
24
+ def self.refresh!
25
+ models = RubyLLM.providers.flat_map(&:list_models).sort_by(&:id)
26
+ # Write to models.json
27
+ File.write(File.expand_path('models.json', __dir__), JSON.pretty_generate(models.map(&:to_h)))
28
+ @instance = new(models)
29
+ end
30
+
31
+ # Delegate class methods to the singleton instance
32
+ class << self
33
+ def method_missing(method, ...)
34
+ if instance.respond_to?(method)
35
+ instance.send(method, ...)
36
+ else
37
+ super
38
+ end
39
+ end
40
+
41
+ def respond_to_missing?(method, include_private = false)
42
+ instance.respond_to?(method, include_private) || super
22
43
  end
44
+ end
45
+
46
+ # Initialize with optional pre-filtered models
47
+ def initialize(models = nil)
48
+ @models = models || load_models
49
+ end
50
+
51
+ # Load models from the JSON file
52
+ def load_models
53
+ data = JSON.parse(File.read(File.expand_path('models.json', __dir__)))
54
+ data.map { |model| ModelInfo.new(model.transform_keys(&:to_sym)) }
23
55
  rescue Errno::ENOENT
24
56
  [] # Return empty array if file doesn't exist yet
25
57
  end
26
58
 
59
+ # Return all models in the collection
60
+ def all
61
+ @models
62
+ end
63
+
64
+ # Allow enumeration over all models
65
+ def each(&)
66
+ all.each(&)
67
+ end
68
+
69
+ # Find a specific model by ID
27
70
  def find(model_id)
28
- all.find { |m| m.id == model_id } or raise ModelNotFoundError, "Unknown model: #{model_id}"
71
+ all.find { |m| m.id == model_id } or
72
+ raise ModelNotFoundError, "Unknown model: #{model_id}"
29
73
  end
30
74
 
75
+ # Filter to only chat models
31
76
  def chat_models
32
- all.select { |m| m.type == 'chat' }
77
+ self.class.new(all.select { |m| m.type == 'chat' })
33
78
  end
34
79
 
80
+ # Filter to only embedding models
35
81
  def embedding_models
36
- all.select { |m| m.type == 'embedding' }
82
+ self.class.new(all.select { |m| m.type == 'embedding' })
37
83
  end
38
84
 
85
+ # Filter to only audio models
39
86
  def audio_models
40
- all.select { |m| m.type == 'audio' }
87
+ self.class.new(all.select { |m| m.type == 'audio' })
41
88
  end
42
89
 
90
+ # Filter to only image models
43
91
  def image_models
44
- all.select { |m| m.type == 'image' }
92
+ self.class.new(all.select { |m| m.type == 'image' })
45
93
  end
46
94
 
95
+ # Filter models by family
47
96
  def by_family(family)
48
- all.select { |m| m.family == family }
97
+ self.class.new(all.select { |m| m.family == family.to_s })
49
98
  end
50
99
 
51
- def default_model
52
- 'gpt-4o-mini'
100
+ # Filter models by provider
101
+ def by_provider(provider)
102
+ self.class.new(all.select { |m| m.provider == provider.to_s })
53
103
  end
54
104
 
105
+ # Instance method to refresh models
55
106
  def refresh!
56
- @all = RubyLLM.providers.flat_map(&:list_models).sort_by(&:id)
107
+ self.class.refresh!
108
+ # Return self for method chaining
109
+ self
57
110
  end
58
111
  end
59
112
  end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module RubyLLM
4
- VERSION = '0.1.0.pre44'
4
+ VERSION = '0.1.0.pre46'
5
5
  end
data/lib/ruby_llm.rb CHANGED
@@ -39,7 +39,7 @@ module RubyLLM
39
39
  end
40
40
 
41
41
  def models
42
- Models
42
+ Models.instance
43
43
  end
44
44
 
45
45
  def providers
@@ -70,21 +70,18 @@ namespace :models do # rubocop:disable Metrics/BlockLength
70
70
  RubyLLM.configure do |config|
71
71
  config.openai_api_key = ENV.fetch('OPENAI_API_KEY')
72
72
  config.anthropic_api_key = ENV.fetch('ANTHROPIC_API_KEY')
73
- config.gemini_api_key = ENV.fetch('GEMINI_API_KEY', nil)
74
- config.deepseek_api_key = ENV.fetch('DEEPSEEK_API_KEY', nil)
73
+ config.gemini_api_key = ENV.fetch('GEMINI_API_KEY')
74
+ config.deepseek_api_key = ENV.fetch('DEEPSEEK_API_KEY')
75
75
  end
76
76
 
77
- # Get all models
78
- models = RubyLLM.models.refresh!
79
-
80
- # Write to models.json
81
- models_file = File.expand_path('../../lib/ruby_llm/models.json', __dir__)
82
- File.write(models_file, JSON.pretty_generate(models.map(&:to_h)))
77
+ # Refresh models (now returns self instead of models array)
78
+ models = RubyLLM.models.refresh!.all
83
79
 
84
80
  puts "Updated models.json with #{models.size} models:"
85
81
  RubyLLM::Provider.providers.each do |provider_sym, provider_module|
86
82
  provider_name = provider_module.to_s.split('::').last
87
- puts "#{provider_name} models: #{models.count { |m| m.provider == provider_sym.to_s }}"
83
+ provider_models = models.select { |m| m.provider == provider_sym.to_s }
84
+ puts "#{provider_name} models: #{provider_models.size}"
88
85
  end
89
86
  end
90
87
 
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby_llm
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.0.pre44
4
+ version: 0.1.0.pre46
5
5
  platform: ruby
6
6
  authors:
7
7
  - Carmine Paolino