llms 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. checksums.yaml +7 -0
  2. data/LICENSE +21 -0
  3. data/README.md +160 -0
  4. data/bin/llms-chat +6 -0
  5. data/bin/llms-test-model-access +4 -0
  6. data/bin/llms-test-model-image-support +4 -0
  7. data/bin/llms-test-model-prompt-caching +4 -0
  8. data/bin/llms-test-model-tool-use +5 -0
  9. data/lib/llms/adapters/anthropic_message_adapter.rb +73 -0
  10. data/lib/llms/adapters/anthropic_tool_call_adapter.rb +20 -0
  11. data/lib/llms/adapters/base_message_adapter.rb +60 -0
  12. data/lib/llms/adapters/google_gemini_message_adapter.rb +72 -0
  13. data/lib/llms/adapters/google_gemini_tool_call_adapter.rb +20 -0
  14. data/lib/llms/adapters/open_ai_compatible_message_adapter.rb +88 -0
  15. data/lib/llms/adapters/open_ai_compatible_tool_call_adapter.rb +67 -0
  16. data/lib/llms/adapters.rb +12 -0
  17. data/lib/llms/apis/google_gemini_api.rb +45 -0
  18. data/lib/llms/apis/open_ai_compatible_api.rb +54 -0
  19. data/lib/llms/cli/base.rb +186 -0
  20. data/lib/llms/cli/chat.rb +92 -0
  21. data/lib/llms/cli/test_access.rb +79 -0
  22. data/lib/llms/cli/test_image_support.rb +92 -0
  23. data/lib/llms/cli/test_prompt_caching.rb +275 -0
  24. data/lib/llms/cli/test_tool_use.rb +108 -0
  25. data/lib/llms/cli.rb +12 -0
  26. data/lib/llms/conversation.rb +100 -0
  27. data/lib/llms/conversation_message.rb +60 -0
  28. data/lib/llms/conversation_tool_call.rb +14 -0
  29. data/lib/llms/conversation_tool_result.rb +15 -0
  30. data/lib/llms/exceptions.rb +33 -0
  31. data/lib/llms/executors/anthropic_executor.rb +247 -0
  32. data/lib/llms/executors/base_executor.rb +144 -0
  33. data/lib/llms/executors/google_gemini_executor.rb +212 -0
  34. data/lib/llms/executors/hugging_face_executor.rb +17 -0
  35. data/lib/llms/executors/open_ai_compatible_executor.rb +209 -0
  36. data/lib/llms/executors.rb +52 -0
  37. data/lib/llms/models/model.rb +86 -0
  38. data/lib/llms/models/provider.rb +48 -0
  39. data/lib/llms/models.rb +187 -0
  40. data/lib/llms/parsers/anthropic_chat_response_stream_parser.rb +184 -0
  41. data/lib/llms/parsers/google_gemini_chat_response_stream_parser.rb +128 -0
  42. data/lib/llms/parsers/open_ai_compatible_chat_response_stream_parser.rb +170 -0
  43. data/lib/llms/parsers/partial_json_parser.rb +77 -0
  44. data/lib/llms/parsers/sse_chat_response_stream_parser.rb +72 -0
  45. data/lib/llms/public_models.json +607 -0
  46. data/lib/llms/stream/event_emitter.rb +48 -0
  47. data/lib/llms/stream/events.rb +104 -0
  48. data/lib/llms/usage/cost_calculator.rb +75 -0
  49. data/lib/llms/usage/usage_data.rb +46 -0
  50. data/lib/llms.rb +16 -0
  51. metadata +243 -0
@@ -0,0 +1,209 @@
1
+ require_relative 'base_executor'
2
+ require_relative '../apis/open_ai_compatible_api'
3
+ require_relative '../parsers/open_ai_compatible_chat_response_stream_parser'
4
+ require_relative '../adapters/open_ai_compatible_message_adapter'
5
+
6
+ module LLMs
7
+ module Executors
8
+ class OpenAICompatibleExecutor < BaseExecutor
9
+
10
+ def execute_conversation(conversation, &block)
11
+ if block_given?
12
+ stream_conversation(conversation) do |handler|
13
+ handler.on(:text_delta) do |event|
14
+ yield event.text
15
+ end
16
+ end
17
+ else
18
+ send_conversation(conversation)
19
+ end
20
+ end
21
+
22
+ def stream_conversation(conversation)
23
+ init_new_request(conversation)
24
+
25
+ emitter = Stream::EventEmitter.new
26
+ yield emitter if block_given?
27
+
28
+ start_time = Time.now
29
+ begin
30
+ http_response, stream_parsed_response = stream_client_request(emitter)
31
+ rescue StandardError => e
32
+ @last_error = {'error' => e.message, 'backtrace' => e.backtrace}
33
+ return nil
34
+ end
35
+ execution_time = Time.now - start_time
36
+
37
+ if http_response && (http_response['error'] || http_response['errors'])
38
+ @last_error = http_response
39
+ return nil
40
+ end
41
+
42
+ response_data = stream_parsed_response || http_response
43
+
44
+ @last_received_message_id = LLMs::Adapters::OpenAICompatibleMessageAdapter.find_message_id(response_data)
45
+ @last_received_message = LLMs::Adapters::OpenAICompatibleMessageAdapter.message_from_api_format(response_data)
46
+ @last_usage_data = calculate_usage(response_data, execution_time)
47
+
48
+ @last_received_message
49
+ end
50
+
51
+ def send_conversation(conversation)
52
+ init_new_request(conversation)
53
+
54
+ start_time = Time.now
55
+ begin
56
+ http_response = client_request
57
+ rescue StandardError => e
58
+ @last_error = {'error' => e.message, 'backtrace' => e.backtrace}
59
+ @last_usage_data = nil
60
+ @last_received_message = nil
61
+ return nil
62
+ end
63
+ execution_time = Time.now - start_time
64
+
65
+ if http_response && (http_response['error'] || http_response['errors'])
66
+ @last_error = http_response
67
+ return nil
68
+ end
69
+
70
+ @last_received_message_id = LLMs::Adapters::OpenAICompatibleMessageAdapter.find_message_id(http_response)
71
+ @last_received_message = LLMs::Adapters::OpenAICompatibleMessageAdapter.message_from_api_format(http_response)
72
+ @last_usage_data = calculate_usage(http_response, execution_time)
73
+
74
+ @last_received_message
75
+ end
76
+
77
+ private
78
+
79
+ def init_new_request(conversation)
80
+ @last_sent_message = conversation.last_message
81
+ @last_received_message_id = nil
82
+ @last_received_message = nil
83
+ @last_usage_data = nil
84
+ @last_error = nil
85
+
86
+ # need to flatten array since adapter can return array of messages for tool results
87
+ @formatted_messages = conversation.messages(include_system_message: true).flat_map do |message|
88
+ LLMs::Adapters::OpenAICompatibleMessageAdapter.to_api_format(message)
89
+ end
90
+
91
+ @available_tools = conversation.available_tools
92
+ end
93
+
94
+ def client_request
95
+ params = request_params
96
+ params[:stream] = false
97
+ @client.chat_completion(@model_name, @formatted_messages, params)
98
+ end
99
+
100
+ def stream_client_request(emitter)
101
+ parser = Parsers::OpenAICompatibleChatResponseStreamParser.new(emitter)
102
+
103
+ params = request_params(true).merge(stream: Proc.new { |chunk| parser.add_data(chunk) })
104
+ http_response = @client.chat_completion(@model_name, @formatted_messages, params)
105
+
106
+ [http_response, parser.full_response]
107
+ end
108
+
109
+ def request_params(is_stream = false)
110
+ {temperature: @temperature}.tap do |params|
111
+
112
+ if param_ok?(:max_tokens) && @max_tokens
113
+ params[:max_tokens] = @max_tokens
114
+ end
115
+
116
+ ## Will override max_tokens if both are provided
117
+ if param_ok?(:max_completion_tokens) && @max_completion_tokens
118
+ params[:max_completion_tokens] = @max_completion_tokens
119
+ end
120
+
121
+ if @thinking_effort
122
+ params[:reasoning_effort] = @thinking_effort
123
+ end
124
+
125
+ if @available_tools && @available_tools.any?
126
+ params[:tools] = tool_schemas
127
+ end
128
+
129
+ if is_stream && param_ok?(:stream_options)
130
+ params[:stream_options] = {
131
+ include_usage: true
132
+ }
133
+ end
134
+ end
135
+ end
136
+
137
+ def param_ok?(param_name)
138
+ !@exclude_params&.find { |param| param.to_s == param_name.to_s }
139
+ end
140
+
141
+ def initialize_client
142
+ if @base_url.nil? || @base_url.empty?
143
+ raise "base_url required for OpenAICompatibleExecutor"
144
+ end
145
+
146
+ @client = LLMs::APIs::OpenAICompatibleAPI.new(fetch_api_key, @base_url)
147
+ end
148
+
149
+ def calculate_usage(response, execution_time)
150
+ input_tokens = nil
151
+ output_tokens = nil
152
+ cache_was_written = nil
153
+ cache_was_read = nil
154
+ token_counts = {}
155
+
156
+ if !response.nil? && usage = response['usage']
157
+ input_tokens = 0
158
+ output_tokens = 0
159
+ cache_was_read = false
160
+
161
+ if pt = usage['prompt_tokens']
162
+ input_tokens += pt
163
+ token_counts[:input] = pt
164
+ end
165
+
166
+ if ptd = usage['prompt_tokens_details']
167
+ if ct = ptd['cached_tokens']
168
+ if ct > 0
169
+ cache_was_read = true
170
+ end
171
+ token_counts[:cached_input] = ct
172
+ token_counts[:input] -= ct ## TODO confirm this is correct
173
+ end
174
+ end
175
+
176
+ if ct = usage['completion_tokens']
177
+ output_tokens += ct
178
+ token_counts[:output] = ct
179
+ end
180
+ end
181
+
182
+ {
183
+ input_tokens: input_tokens,
184
+ output_tokens: output_tokens,
185
+ cache_was_written: cache_was_written,
186
+ cache_was_read: cache_was_read,
187
+ token_details: token_counts,
188
+ execution_time: execution_time,
189
+ estimated_cost: calculate_cost(token_counts)
190
+ }
191
+ end
192
+
193
+ ## TODO move to adapter
194
+ def tool_schemas
195
+ @available_tools.map do |tool|
196
+ {
197
+ type: 'function',
198
+ function: {
199
+ name: tool.tool_schema[:name],
200
+ description: tool.tool_schema[:description],
201
+ parameters: tool.tool_schema[:parameters],
202
+ }
203
+ }
204
+ end
205
+ end
206
+
207
+ end
208
+ end
209
+ end
@@ -0,0 +1,52 @@
1
+ require_relative 'models'
2
+ require_relative 'executors/base_executor'
3
+ require_relative 'executors/anthropic_executor'
4
+ require_relative 'executors/google_gemini_executor'
5
+ require_relative 'executors/open_ai_compatible_executor'
6
+ require_relative 'executors/hugging_face_executor'
7
+
8
+ module LLMs
9
+ module Executors
10
+
11
+ def self.instance(**params)
12
+ model_name = params[:model_name]
13
+ raise ArgumentError, "No model name provided" if model_name.nil?
14
+
15
+ executor_class = nil
16
+ base_url = nil
17
+ api_key = params[:api_key]
18
+ api_key_env_var = params[:api_key_env_var]
19
+ pricing = params[:pricing]
20
+ exclude_params = params[:exclude_params]
21
+
22
+ if params[:oac_base_url]
23
+ executor_class = OpenAICompatibleExecutor
24
+ base_url = params[:oac_base_url]
25
+ api_key = params[:oac_api_key]
26
+ api_key_env_var = params[:oac_api_key_env_var]
27
+ else
28
+ model = Models.find_model(model_name)
29
+ raise ArgumentError, "Unknown model: #{model_name}" if model.nil?
30
+
31
+ model_name = model.model_name
32
+ executor_class = LLMs::Executors.const_get(model.provider.executor_class_name)
33
+ base_url = model.provider.base_url
34
+ api_key_env_var = model.provider.api_key_env_var
35
+ pricing = model.pricing
36
+ exclude_params = model.provider.exclude_params
37
+ end
38
+
39
+ init_params = params.merge(
40
+ model_name:,
41
+ base_url:,
42
+ api_key:,
43
+ api_key_env_var:,
44
+ pricing:,
45
+ exclude_params:
46
+ )
47
+
48
+ executor_class.new(**init_params)
49
+ end
50
+
51
+ end
52
+ end
@@ -0,0 +1,86 @@
1
+ module LLMs
2
+ module Models
3
+ class Model
4
+ attr_reader :model_name, :provider, :pricing, :supports_tools, :supports_vision, :supports_thinking, :enabled
5
+
6
+ def initialize(model_name, provider, pricing: nil, supports_tools: nil, supports_vision: nil, supports_thinking: nil, enabled: nil)
7
+ @model_name = model_name.to_s
8
+ @provider = provider
9
+ @pricing = pricing&.transform_keys(&:to_sym)
10
+ @supports_tools = supports_tools
11
+ @supports_vision = supports_vision
12
+ @supports_thinking = supports_thinking
13
+ @enabled = enabled
14
+ end
15
+
16
+ def full_name
17
+ "#{@provider.provider_name}:#{@model_name}"
18
+ end
19
+
20
+ def possibly_supports_tools?
21
+ @provider.possibly_supports_tools? && (@supports_tools != false)
22
+ end
23
+
24
+ def certainly_supports_tools?
25
+ (
26
+ @provider.certainly_supports_tools? && (@supports_tools != false)
27
+ ) || (
28
+ @provider.possibly_supports_tools? && (@supports_tools == true)
29
+ )
30
+ end
31
+
32
+ def possibly_supports_vision?
33
+ @provider.possibly_supports_vision? && (@supports_vision != false)
34
+ end
35
+
36
+ def certainly_supports_vision?
37
+ (
38
+ @provider.certainly_supports_vision? && (@supports_vision != false)
39
+ ) || (
40
+ @provider.possibly_supports_vision? && (@supports_vision == true)
41
+ )
42
+ end
43
+
44
+ def possibly_supports_thinking?
45
+ @provider.possibly_supports_thinking? && (@supports_thinking != false)
46
+ end
47
+
48
+ def certainly_supports_thinking?
49
+ (
50
+ @provider.certainly_supports_thinking? && (@supports_thinking != false)
51
+ ) || (
52
+ @provider.possibly_supports_thinking? && (@supports_thinking == true)
53
+ )
54
+ end
55
+
56
+ def is_enabled?
57
+ @provider.is_enabled? && (@enabled != false)
58
+ end
59
+
60
+ def calculate_cost(input_tokens, output_tokens, cache_read_tokens = 0, cache_write_tokens = 0)
61
+ return 0.0 if @pricing.empty?
62
+
63
+ cost = 0.0
64
+
65
+ if input_tokens && input_tokens > 0 && @pricing[:input]
66
+ cost += (input_tokens / 1_000_000.0) * @pricing[:input]
67
+ end
68
+
69
+ if output_tokens && output_tokens > 0 && @pricing[:output]
70
+ cost += (output_tokens / 1_000_000.0) * @pricing[:output]
71
+ end
72
+
73
+ if cache_read_tokens && cache_read_tokens > 0 && @pricing[:cache_read]
74
+ cost += (cache_read_tokens / 1_000_000.0) * @pricing[:cache_read]
75
+ end
76
+
77
+ if cache_write_tokens && cache_write_tokens > 0 && @pricing[:cache_write]
78
+ cost += (cache_write_tokens / 1_000_000.0) * @pricing[:cache_write]
79
+ end
80
+
81
+ cost
82
+ end
83
+
84
+ end
85
+ end
86
+ end
@@ -0,0 +1,48 @@
1
+ module LLMs
2
+ module Models
3
+ class Provider
4
+ attr_reader :provider_name, :executor_class_name, :base_url, :api_key_env_var, :supports_tools, :supports_vision, :supports_thinking, :enabled, :exclude_params
5
+
6
+ def initialize(provider_name, executor_class_name, base_url: nil, api_key_env_var: nil, supports_tools: nil, supports_vision: nil, supports_thinking: nil, enabled: nil, exclude_params: nil)
7
+ @provider_name = provider_name.to_s
8
+ @executor_class_name = executor_class_name.to_s
9
+ @base_url = base_url
10
+ @api_key_env_var = api_key_env_var
11
+ @supports_tools = supports_tools
12
+ @supports_vision = supports_vision
13
+ @supports_thinking = supports_thinking
14
+ @enabled = enabled
15
+ @exclude_params = exclude_params
16
+ end
17
+
18
+ def possibly_supports_tools?
19
+ @supports_tools != false
20
+ end
21
+
22
+ def certainly_supports_tools?
23
+ @supports_tools == true
24
+ end
25
+
26
+ def possibly_supports_vision?
27
+ @supports_vision != false
28
+ end
29
+
30
+ def certainly_supports_vision?
31
+ @supports_vision == true
32
+ end
33
+
34
+ def possibly_supports_thinking?
35
+ @supports_thinking != false
36
+ end
37
+
38
+ def certainly_supports_thinking?
39
+ @supports_thinking == true
40
+ end
41
+
42
+ def is_enabled?
43
+ @enabled != false
44
+ end
45
+
46
+ end
47
+ end
48
+ end
@@ -0,0 +1,187 @@
1
+ require 'json'
2
+ require_relative 'models/provider'
3
+ require_relative 'models/model'
4
+
5
+ module LLMs
6
+ module Models
7
+
8
+ DEFAULT_MODEL = 'claude-sonnet-4-0'
9
+
10
+ PROVIDER_REGISTRY = {}
11
+ PROVIDER_TO_MODEL_REGISTRY = {}
12
+ MODEL_TO_PROVIDER_REGISTRY = {}
13
+ ALIAS_REGISTRY = {}
14
+
15
+ def self.register_model(provider_name, model_name, pricing: nil, tools: nil, vision: nil, thinking: nil, enabled: nil, aliases: nil)
16
+ provider = PROVIDER_REGISTRY[provider_name.to_s]
17
+ raise "Unknown provider: #{provider_name}" unless provider
18
+
19
+ model = LLMs::Models::Model.new(
20
+ model_name,
21
+ provider,
22
+ pricing:,
23
+ supports_tools: tools,
24
+ supports_vision: vision,
25
+ supports_thinking: thinking,
26
+ enabled: enabled
27
+ )
28
+
29
+ PROVIDER_TO_MODEL_REGISTRY[provider.provider_name] ||= {}
30
+ PROVIDER_TO_MODEL_REGISTRY[provider.provider_name][model.model_name] = model
31
+
32
+ MODEL_TO_PROVIDER_REGISTRY[model.model_name] ||= Set.new
33
+ MODEL_TO_PROVIDER_REGISTRY[model.model_name] << provider.provider_name
34
+
35
+ if aliases
36
+ aliases.each do |alias_name|
37
+ if aliased_model_name = ALIAS_REGISTRY[alias_name]
38
+ raise "Alias #{alias_name} already registered for #{aliased_model_name}"
39
+ end
40
+ ALIAS_REGISTRY[alias_name] = model.model_name
41
+ end
42
+ end
43
+
44
+ model
45
+ end
46
+
47
+ def self.register_provider(provider_name, executor_class_name, base_url: nil, api_key_env_var: nil, tools: nil, vision: nil, thinking: nil, enabled: nil, exclude_params: nil)
48
+ provider = LLMs::Models::Provider.new(
49
+ provider_name,
50
+ executor_class_name,
51
+ base_url:,
52
+ api_key_env_var:,
53
+ supports_tools: tools,
54
+ supports_vision: vision,
55
+ supports_thinking: thinking,
56
+ enabled:,
57
+ exclude_params:
58
+ )
59
+
60
+ PROVIDER_REGISTRY[provider.provider_name] = provider
61
+ end
62
+
63
+ def self.disable_model(provider_name, model_name)
64
+ provider = PROVIDER_REGISTRY[provider_name.to_s]
65
+ raise "Unknown provider: #{provider_name}" unless provider
66
+
67
+ model = PROVIDER_TO_MODEL_REGISTRY[provider.provider_name][model_name]
68
+ raise "Unknown model: #{model_name}" unless model
69
+
70
+ model.enabled = false
71
+ end
72
+
73
+ def self.enable_model(provider_name, model_name)
74
+ provider = PROVIDER_REGISTRY[provider_name.to_s]
75
+ raise "Unknown provider: #{provider_name}" unless provider
76
+
77
+ model = PROVIDER_TO_MODEL_REGISTRY[provider.provider_name][model_name]
78
+ raise "Unknown model: #{model_name}" unless model
79
+
80
+ model.enabled = true
81
+ end
82
+
83
+ def self.disable_provider(provider_name)
84
+ provider = PROVIDER_REGISTRY[provider_name.to_s]
85
+ raise "Unknown provider: #{provider_name}" unless provider
86
+
87
+ provider.enabled = false
88
+ end
89
+
90
+ def self.enable_provider(provider_name)
91
+ provider = PROVIDER_REGISTRY[provider_name.to_s]
92
+ raise "Unknown provider: #{provider_name}" unless provider
93
+
94
+ provider.enabled = true
95
+ end
96
+
97
+ def self.add_model(provider_name, model_name, **details)
98
+ executor_class_name = details[:executor]
99
+ provider = register_provider(
100
+ provider_name, executor_class_name,
101
+ **details.slice(:base_url, :api_key_env_var, :exclude_params)
102
+ )
103
+ register_model(
104
+ provider.provider_name, model_name,
105
+ **details.slice(:pricing, :tools, :vision, :thinking, :enabled, :aliases)
106
+ )
107
+ end
108
+
109
+ def self.load_models_file(file_path)
110
+ JSON.parse(File.read(file_path)).each do |provider_name, info|
111
+ executor_class_name = info['executor']
112
+ params = info.slice('base_url', 'api_key_env_var', 'tools', 'vision', 'thinking', 'enabled', 'exclude_params').transform_keys(&:to_sym)
113
+ register_provider(provider_name, executor_class_name, **params)
114
+
115
+ info['models'].each do |model_name, model_info|
116
+ params = model_info.slice('pricing', 'tools', 'vision', 'thinking', 'enabled', 'aliases').transform_keys(&:to_sym)
117
+ register_model(provider_name, model_name, **params)
118
+ end
119
+ end
120
+ end
121
+
122
+ load_models_file(File.join(File.dirname(__FILE__), 'public_models.json'))
123
+
124
+ def self.find_model(model_name, include_disabled = false)
125
+ lookup_model_name = (ALIAS_REGISTRY[model_name] || model_name).to_s
126
+
127
+ candidate_providers = MODEL_TO_PROVIDER_REGISTRY[lookup_model_name].to_a
128
+
129
+ if 1 == candidate_providers.size
130
+ find_model_for_provider(candidate_providers[0], lookup_model_name, include_disabled)
131
+ elsif candidate_providers.size > 1
132
+ raise "Multiple providers match #{model_name}: #{candidate_providers.join(', ')}"
133
+ else
134
+ if model_name.include?(':')
135
+ provider_part, model_name_part = model_name.split(':', 2)
136
+ find_model_for_provider(provider_part, model_name_part, include_disabled)
137
+ else
138
+ nil
139
+ end
140
+ end
141
+ end
142
+
143
+ def self.find_model_for_provider(provider_name, model_name, include_disabled = false)
144
+ provider = PROVIDER_REGISTRY[provider_name.to_s]
145
+ raise "Unknown provider: #{provider_name}" unless provider
146
+
147
+ return nil unless provider.is_enabled? || include_disabled
148
+
149
+ model = PROVIDER_TO_MODEL_REGISTRY[provider.provider_name][model_name]
150
+ if !model.nil? && (model.is_enabled? || include_disabled)
151
+ model
152
+ else
153
+ nil
154
+ end
155
+ end
156
+
157
+ def self.list_model_names(full: true, require_tools: false, require_vision: false, require_thinking: false, include_disabled: false)
158
+ ok_model_names = []
159
+ PROVIDER_REGISTRY.each do |provider_name, provider|
160
+ provider_ok_for_enabled = include_disabled || provider.is_enabled?
161
+ provider_ok_for_tools = !require_tools || provider.possibly_supports_tools?
162
+ provider_ok_for_vision = !require_vision || provider.possibly_supports_vision?
163
+ provider_ok_for_thinking = !require_thinking || provider.possibly_supports_thinking?
164
+
165
+ if provider_ok_for_enabled && provider_ok_for_tools && provider_ok_for_vision && provider_ok_for_thinking
166
+ PROVIDER_TO_MODEL_REGISTRY[provider.provider_name].each do |_, model|
167
+ model_ok_for_enabled = include_disabled || model.is_enabled?
168
+ model_ok_for_tools = !require_tools || (model.certainly_supports_tools?)
169
+ model_ok_for_vision = !require_vision || (model.certainly_supports_vision?)
170
+ model_ok_for_thinking = !require_thinking || (model.certainly_supports_thinking?)
171
+
172
+ if model_ok_for_enabled && model_ok_for_tools && model_ok_for_vision && model_ok_for_thinking
173
+ if full
174
+ ok_model_names << "#{provider_name}:#{model.model_name}"
175
+ else
176
+ ok_model_names << model.model_name
177
+ end
178
+ end
179
+ end
180
+ end
181
+ end
182
+
183
+ ok_model_names.sort
184
+ end
185
+
186
+ end
187
+ end