llm.rb 0.3.2 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +81 -8
  3. data/lib/json/schema/array.rb +22 -0
  4. data/lib/json/schema/boolean.rb +9 -0
  5. data/lib/json/schema/integer.rb +21 -0
  6. data/lib/json/schema/leaf.rb +40 -0
  7. data/lib/json/schema/null.rb +9 -0
  8. data/lib/json/schema/number.rb +21 -0
  9. data/lib/json/schema/object.rb +26 -0
  10. data/lib/json/schema/string.rb +9 -0
  11. data/lib/json/schema.rb +73 -0
  12. data/lib/llm/chat.rb +7 -3
  13. data/lib/llm/core_ext/ostruct.rb +1 -1
  14. data/lib/llm/file.rb +8 -1
  15. data/lib/llm/message.rb +7 -0
  16. data/lib/llm/model.rb +27 -2
  17. data/lib/llm/provider.rb +36 -28
  18. data/lib/llm/providers/anthropic/format.rb +19 -6
  19. data/lib/llm/providers/anthropic/models.rb +62 -0
  20. data/lib/llm/providers/anthropic.rb +22 -8
  21. data/lib/llm/providers/gemini/format.rb +6 -1
  22. data/lib/llm/providers/gemini/images.rb +3 -3
  23. data/lib/llm/providers/gemini/models.rb +69 -0
  24. data/lib/llm/providers/gemini/response_parser.rb +1 -5
  25. data/lib/llm/providers/gemini.rb +30 -5
  26. data/lib/llm/providers/ollama/format.rb +11 -3
  27. data/lib/llm/providers/ollama/models.rb +66 -0
  28. data/lib/llm/providers/ollama.rb +30 -8
  29. data/lib/llm/providers/openai/audio.rb +0 -2
  30. data/lib/llm/providers/openai/format.rb +6 -1
  31. data/lib/llm/providers/openai/images.rb +1 -1
  32. data/lib/llm/providers/openai/models.rb +62 -0
  33. data/lib/llm/providers/openai/response_parser.rb +1 -5
  34. data/lib/llm/providers/openai/responses.rb +12 -6
  35. data/lib/llm/providers/openai.rb +37 -7
  36. data/lib/llm/response/modellist.rb +18 -0
  37. data/lib/llm/response.rb +1 -0
  38. data/lib/llm/version.rb +1 -1
  39. data/lib/llm.rb +2 -1
  40. data/spec/anthropic/completion_spec.rb +36 -0
  41. data/spec/anthropic/models_spec.rb +21 -0
  42. data/spec/gemini/images_spec.rb +4 -12
  43. data/spec/gemini/models_spec.rb +21 -0
  44. data/spec/llm/conversation_spec.rb +71 -3
  45. data/spec/ollama/models_spec.rb +20 -0
  46. data/spec/openai/completion_spec.rb +19 -0
  47. data/spec/openai/images_spec.rb +2 -6
  48. data/spec/openai/models_spec.rb +21 -0
  49. metadata +20 -6
  50. data/share/llm/models/anthropic.yml +0 -35
  51. data/share/llm/models/gemini.yml +0 -35
  52. data/share/llm/models/ollama.yml +0 -155
  53. data/share/llm/models/openai.yml +0 -46
@@ -12,6 +12,7 @@ module LLM
12
12
  require_relative "openai/images"
13
13
  require_relative "openai/audio"
14
14
  require_relative "openai/files"
15
+ require_relative "openai/models"
15
16
  include Format
16
17
 
17
18
  HOST = "api.openai.com"
@@ -43,16 +44,23 @@ module LLM
43
44
  # @param prompt (see LLM::Provider#complete)
44
45
  # @param role (see LLM::Provider#complete)
45
46
  # @param model (see LLM::Provider#complete)
47
+ # @param schema (see LLM::Provider#complete)
46
48
  # @param params (see LLM::Provider#complete)
47
49
  # @example (see LLM::Provider#complete)
48
50
  # @raise (see LLM::Provider#request)
51
+ # @raise [LLM::Error::PromptError]
52
+ # When given an object a provider does not understand
49
53
  # @return (see LLM::Provider#complete)
50
- def complete(prompt, role = :user, model: "gpt-4o-mini", **params)
51
- params = {model:}.merge!(params)
52
- req = Net::HTTP::Post.new("/v1/chat/completions", headers)
54
+ def complete(prompt, role = :user, model: default_model, schema: nil, **params)
55
+ params = {model:}
56
+ .merge!(expand_schema(schema))
57
+ .merge!(params)
58
+ .compact
59
+ req = Net::HTTP::Post.new("/v1/chat/completions", headers)
53
60
  messages = [*(params.delete(:messages) || []), Message.new(role, prompt)]
54
- req.body = JSON.dump({messages: format(messages, :complete)}.merge!(params))
55
- res = request(@http, req)
61
+ body = JSON.dump({messages: format(messages, :complete)}.merge!(params))
62
+ set_body_stream(req, StringIO.new(body))
63
+ res = request(@http, req)
56
64
  Response::Completion.new(res).extend(response_parser)
57
65
  end
58
66
 
@@ -88,14 +96,26 @@ module LLM
88
96
  LLM::OpenAI::Files.new(self)
89
97
  end
90
98
 
99
+ ##
100
+ # Provides an interface to OpenAI's models API
101
+ # @see https://platform.openai.com/docs/api-reference/models/list OpenAI docs
102
+ # @return [LLM::OpenAI::Models]
103
+ def models
104
+ LLM::OpenAI::Models.new(self)
105
+ end
106
+
91
107
  ##
92
108
  # @return (see LLM::Provider#assistant_role)
93
109
  def assistant_role
94
110
  "assistant"
95
111
  end
96
112
 
97
- def models
98
- @models ||= load_models!("openai")
113
+ ##
114
+ # Returns the default model for chat completions
115
+ # @see https://platform.openai.com/docs/models/gpt-4o-mini gpt-4o-mini
116
+ # @return [String]
117
+ def default_model
118
+ "gpt-4o-mini"
99
119
  end
100
120
 
101
121
  private
@@ -114,5 +134,15 @@ module LLM
114
134
  def error_handler
115
135
  LLM::OpenAI::ErrorHandler
116
136
  end
137
+
138
+ def expand_schema(schema)
139
+ return {} unless schema
140
+ {
141
+ response_format: {
142
+ type: "json_schema",
143
+ json_schema: {name: "JSONSchema", schema:}
144
+ }
145
+ }
146
+ end
117
147
  end
118
148
  end
@@ -0,0 +1,18 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM
4
+ ##
5
+ # The {LLM::Response::ModelList LLM::Response::ModelList} class represents a
6
+ # list of model objects that are returned by a provider. It is an Enumerable
7
+ # object, and can be used to iterate over the model objects in a way that is
8
+ # similar to an array. Each element is an instance of OpenStruct.
9
+ class Response::ModelList < Response
10
+ include Enumerable
11
+
12
+ attr_accessor :models
13
+
14
+ def each(&)
15
+ @models.each(&)
16
+ end
17
+ end
18
+ end
data/lib/llm/response.rb CHANGED
@@ -13,6 +13,7 @@ module LLM
13
13
  require_relative "response/file"
14
14
  require_relative "response/filelist"
15
15
  require_relative "response/download_file"
16
+ require_relative "response/modellist"
16
17
 
17
18
  ##
18
19
  # @param [Net::HTTPResponse] res
data/lib/llm/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LLM
4
- VERSION = "0.3.2"
4
+ VERSION = "0.4.0"
5
5
  end
data/lib/llm.rb CHANGED
@@ -1,6 +1,8 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LLM
4
+ require "stringio"
5
+ require_relative "llm/core_ext/ostruct"
4
6
  require_relative "llm/version"
5
7
  require_relative "llm/utils"
6
8
  require_relative "llm/error"
@@ -13,7 +15,6 @@ module LLM
13
15
  require_relative "llm/provider"
14
16
  require_relative "llm/chat"
15
17
  require_relative "llm/buffer"
16
- require_relative "llm/core_ext/ostruct"
17
18
 
18
19
  module_function
19
20
 
@@ -42,6 +42,42 @@ RSpec.describe "LLM::Anthropic: completions" do
42
42
  end
43
43
  end
44
44
 
45
+ context "when given a URI to an image",
46
+ vcr: {cassette_name: "anthropic/completions/successful_response_uri_image"} do
47
+ subject { response.choices[0].content.downcase[0..2] }
48
+ let(:response) do
49
+ anthropic.complete([
50
+ "Is this image the flag of brazil ? ",
51
+ "Answer with yes or no. ",
52
+ "Nothing else.",
53
+ uri
54
+ ], :user)
55
+ end
56
+ let(:uri) { URI("https://upload.wikimedia.org/wikipedia/en/thumb/0/05/Flag_of_Brazil.svg/250px-Flag_of_Brazil.svg.png") }
57
+
58
+ it "describes the image" do
59
+ is_expected.to eq("yes")
60
+ end
61
+ end
62
+
63
+ context "when given a local reference to an image",
64
+ vcr: {cassette_name: "anthropic/completions/successful_response_file_image"} do
65
+ subject { response.choices[0].content.downcase[0..2] }
66
+ let(:response) do
67
+ anthropic.complete([
68
+ "Is this image a representation of a blue book ?",
69
+ "Answer with yes or no.",
70
+ "Nothing else.",
71
+ file
72
+ ], :user)
73
+ end
74
+ let(:file) { LLM::File("spec/fixtures/images/bluebook.png") }
75
+
76
+ it "describes the image" do
77
+ is_expected.to eq("yes")
78
+ end
79
+ end
80
+
45
81
  context "when given an unauthorized response",
46
82
  vcr: {cassette_name: "anthropic/completions/unauthorized_response"} do
47
83
  subject(:response) { anthropic.complete("Hello", :user) }
@@ -0,0 +1,21 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "setup"
4
+
5
+ RSpec.describe "LLM::Anthropic::Models" do
6
+ let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
7
+ let(:provider) { LLM.anthropic(token) }
8
+
9
+ context "when given a successful list operation",
10
+ vcr: {cassette_name: "anthropic/models/successful_list"} do
11
+ subject { provider.models.all }
12
+
13
+ it "is successful" do
14
+ is_expected.to be_instance_of(LLM::Response::ModelList)
15
+ end
16
+
17
+ it "returns a list of models" do
18
+ expect(subject.models).to all(be_a(LLM::Model))
19
+ end
20
+ end
21
+ end
@@ -14,12 +14,8 @@ RSpec.describe "LLM::Gemini::Images" do
14
14
  expect(response).to be_instance_of(LLM::Response::Image)
15
15
  end
16
16
 
17
- it "returns an encoded string" do
18
- expect(response.images[0].encoded).to be_instance_of(String)
19
- end
20
-
21
- it "returns a binary string" do
22
- expect(response.images[0].binary).to be_instance_of(String)
17
+ it "returns an IO-like object" do
18
+ expect(response.images[0]).to be_instance_of(StringIO)
23
19
  end
24
20
  end
25
21
 
@@ -36,12 +32,8 @@ RSpec.describe "LLM::Gemini::Images" do
36
32
  expect(response).to be_instance_of(LLM::Response::Image)
37
33
  end
38
34
 
39
- it "returns data" do
40
- expect(response.images[0].encoded).to be_instance_of(String)
41
- end
42
-
43
- it "returns a url" do
44
- expect(response.images[0].binary).to be_instance_of(String)
35
+ it "returns an IO-like object" do
36
+ expect(response.images[0]).to be_instance_of(StringIO)
45
37
  end
46
38
  end
47
39
  end
@@ -0,0 +1,21 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "setup"
4
+
5
+ RSpec.describe "LLM::Gemini::Models" do
6
+ let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
7
+ let(:provider) { LLM.gemini(token) }
8
+
9
+ context "when given a successful list operation",
10
+ vcr: {cassette_name: "gemini/models/successful_list"} do
11
+ subject { provider.models.all }
12
+
13
+ it "is successful" do
14
+ is_expected.to be_instance_of(LLM::Response::ModelList)
15
+ end
16
+
17
+ it "returns a list of models" do
18
+ expect(subject.models).to all(be_a(LLM::Model))
19
+ end
20
+ end
21
+ end
@@ -27,7 +27,7 @@ RSpec.describe "LLM::Chat: non-lazy" do
27
27
  bot = nil
28
28
  inputs.zip(outputs).each_with_index do |(input, output), index|
29
29
  expect(provider).to receive(:complete)
30
- .with(input.content, instance_of(Symbol), messages:)
30
+ .with(input.content, instance_of(Symbol), messages:, model: provider.default_model, schema: nil)
31
31
  .and_return(OpenStruct.new(choices: [output]))
32
32
  bot = index.zero? ? provider.chat!(input.content, :system) : bot.chat(input.content)
33
33
  messages.concat([input, output])
@@ -117,7 +117,8 @@ RSpec.describe "LLM::Chat: lazy" do
117
117
 
118
118
  context "when given a specific model",
119
119
  vcr: {cassette_name: "openai/lazy_conversation/completions/successful_response_o3_mini"} do
120
- let(:conversation) { described_class.new(provider, model: provider.models["o3-mini"]).lazy }
120
+ let(:model) { provider.models.all.find { _1.id == "o3-mini" } }
121
+ let(:conversation) { described_class.new(provider, model:).lazy }
121
122
 
122
123
  it "maintains the model throughout a conversation" do
123
124
  conversation.chat(prompt, :system)
@@ -179,7 +180,8 @@ RSpec.describe "LLM::Chat: lazy" do
179
180
 
180
181
  context "when given a specific model",
181
182
  vcr: {cassette_name: "openai/lazy_conversation/responses/successful_response_o3_mini"} do
182
- let(:conversation) { described_class.new(provider, model: provider.models["o3-mini"]).lazy }
183
+ let(:model) { provider.models.all.find { _1.id == "o3-mini" } }
184
+ let(:conversation) { described_class.new(provider, model:).lazy }
183
185
 
184
186
  it "maintains the model throughout a conversation" do
185
187
  conversation.respond(prompt, :developer)
@@ -190,4 +192,70 @@ RSpec.describe "LLM::Chat: lazy" do
190
192
  end
191
193
  end
192
194
  end
195
+
196
+ context "when given a schema as JSON" do
197
+ context "with openai" do
198
+ let(:provider) { LLM.openai(token) }
199
+ let(:conversation) { described_class.new(provider, schema:).lazy }
200
+
201
+ context "when given a schema",
202
+ vcr: {cassette_name: "openai/lazy_conversation/completions/successful_response_schema_netbsd"} do
203
+ subject(:message) { conversation.recent_message.content! }
204
+ let(:schema) { provider.schema.object({os: provider.schema.string.enum("OpenBSD", "FreeBSD", "NetBSD")}) }
205
+
206
+ before do
207
+ conversation.chat "You secretly love NetBSD", :system
208
+ conversation.chat "What operating system is the best?", :user
209
+ end
210
+
211
+ it "formats the response" do
212
+ is_expected.to eq("os" => "NetBSD")
213
+ end
214
+ end
215
+ end
216
+
217
+ context "with gemini" do
218
+ let(:provider) { LLM.gemini(token) }
219
+ let(:conversation) { described_class.new(provider, schema:).lazy }
220
+
221
+ context "when given a schema",
222
+ vcr: {cassette_name: "gemini/lazy_conversation/completions/successful_response_schema_netbsd"} do
223
+ subject(:message) { conversation.recent_message.content! }
224
+ let(:schema) { provider.schema.object({os: provider.schema.string.enum("OpenBSD", "FreeBSD", "NetBSD")}) }
225
+
226
+ before do
227
+ conversation.chat "You secretly love NetBSD", :user
228
+ conversation.chat "What operating system is the best?", :user
229
+ end
230
+
231
+ it "formats the response" do
232
+ is_expected.to eq("os" => "NetBSD")
233
+ end
234
+ end
235
+ end
236
+
237
+ context "with ollama" do
238
+ let(:provider) { LLM.ollama(nil, host: "eel.home.network") }
239
+ let(:conversation) { described_class.new(provider, schema:).lazy }
240
+
241
+ context "when given a schema",
242
+ vcr: {cassette_name: "ollama/lazy_conversation/completions/successful_response_schema_netbsd"} do
243
+ subject(:message) { conversation.recent_message.content! }
244
+ let(:schema) do
245
+ provider.schema.object({
246
+ os: provider.schema.string.enum("OpenBSD", "FreeBSD", "NetBSD").required
247
+ })
248
+ end
249
+
250
+ before do
251
+ conversation.chat "You secretly love NetBSD", :system
252
+ conversation.chat "What operating system is the best?", :user
253
+ end
254
+
255
+ it "formats the response" do
256
+ is_expected.to eq("os" => "NetBSD")
257
+ end
258
+ end
259
+ end
260
+ end
193
261
  end
@@ -0,0 +1,20 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "setup"
4
+
5
+ RSpec.describe "LLM::Ollama::Models" do
6
+ let(:provider) { LLM.ollama(nil, host: "eel.home.network") }
7
+
8
+ context "when given a successful list operation",
9
+ vcr: {cassette_name: "ollama/models/successful_list"} do
10
+ subject { provider.models.all }
11
+
12
+ it "is successful" do
13
+ is_expected.to be_instance_of(LLM::Response::ModelList)
14
+ end
15
+
16
+ it "returns a list of models" do
17
+ expect(subject.models).to all(be_a(LLM::Model))
18
+ end
19
+ end
20
+ end
@@ -64,6 +64,25 @@ RSpec.describe "LLM::OpenAI: completions" do
64
64
  end
65
65
  end
66
66
 
67
+ context "when asked to describe an audio file",
68
+ vcr: {cassette_name: "openai/completions/describe_pdf_document"} do
69
+ let(:file) { LLM::File("spec/fixtures/documents/freebsd.sysctl.pdf") }
70
+ let(:response) do
71
+ openai.complete([
72
+ "This PDF document describes sysctl nodes on FreeBSD",
73
+ "Answer yes or no.",
74
+ "Nothing else",
75
+ file
76
+ ], :user)
77
+ end
78
+
79
+ subject { response.choices[0].content.downcase[0..2] }
80
+
81
+ it "is successful" do
82
+ is_expected.to eq("yes")
83
+ end
84
+ end
85
+
67
86
  context "when given a 'bad request' response",
68
87
  vcr: {cassette_name: "openai/completions/bad_request"} do
69
88
  subject(:response) { openai.complete(URI("/foobar.exe"), :user) }
@@ -40,12 +40,8 @@ RSpec.describe "LLM::OpenAI::Images" do
40
40
  expect(response.images).to be_instance_of(Array)
41
41
  end
42
42
 
43
- it "returns an encoded string" do
44
- expect(response.images[0].encoded).to be_instance_of(String)
45
- end
46
-
47
- it "returns an binary string" do
48
- expect(response.images[0].binary).to be_instance_of(String)
43
+ it "returns an IO-like object" do
44
+ expect(response.images[0]).to be_instance_of(StringIO)
49
45
  end
50
46
  end
51
47
 
@@ -0,0 +1,21 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "setup"
4
+
5
+ RSpec.describe "LLM::OpenAI::Models" do
6
+ let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
7
+ let(:provider) { LLM.openai(token) }
8
+
9
+ context "when given a successful list operation",
10
+ vcr: {cassette_name: "openai/models/successful_list"} do
11
+ subject { provider.models.all }
12
+
13
+ it "is successful" do
14
+ is_expected.to be_instance_of(LLM::Response::ModelList)
15
+ end
16
+
17
+ it "returns a list of models" do
18
+ expect(subject.models).to all(be_a(LLM::Model))
19
+ end
20
+ end
21
+ end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llm.rb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.3.2
4
+ version: 0.4.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Antar Azri
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2025-04-26 00:00:00.000000000 Z
12
+ date: 2025-04-29 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: webmock
@@ -148,6 +148,15 @@ extensions: []
148
148
  extra_rdoc_files: []
149
149
  files:
150
150
  - README.md
151
+ - lib/json/schema.rb
152
+ - lib/json/schema/array.rb
153
+ - lib/json/schema/boolean.rb
154
+ - lib/json/schema/integer.rb
155
+ - lib/json/schema/leaf.rb
156
+ - lib/json/schema/null.rb
157
+ - lib/json/schema/number.rb
158
+ - lib/json/schema/object.rb
159
+ - lib/json/schema/string.rb
151
160
  - lib/llm.rb
152
161
  - lib/llm/buffer.rb
153
162
  - lib/llm/chat.rb
@@ -162,6 +171,7 @@ files:
162
171
  - lib/llm/providers/anthropic.rb
163
172
  - lib/llm/providers/anthropic/error_handler.rb
164
173
  - lib/llm/providers/anthropic/format.rb
174
+ - lib/llm/providers/anthropic/models.rb
165
175
  - lib/llm/providers/anthropic/response_parser.rb
166
176
  - lib/llm/providers/gemini.rb
167
177
  - lib/llm/providers/gemini/audio.rb
@@ -169,10 +179,12 @@ files:
169
179
  - lib/llm/providers/gemini/files.rb
170
180
  - lib/llm/providers/gemini/format.rb
171
181
  - lib/llm/providers/gemini/images.rb
182
+ - lib/llm/providers/gemini/models.rb
172
183
  - lib/llm/providers/gemini/response_parser.rb
173
184
  - lib/llm/providers/ollama.rb
174
185
  - lib/llm/providers/ollama/error_handler.rb
175
186
  - lib/llm/providers/ollama/format.rb
187
+ - lib/llm/providers/ollama/models.rb
176
188
  - lib/llm/providers/ollama/response_parser.rb
177
189
  - lib/llm/providers/openai.rb
178
190
  - lib/llm/providers/openai/audio.rb
@@ -180,6 +192,7 @@ files:
180
192
  - lib/llm/providers/openai/files.rb
181
193
  - lib/llm/providers/openai/format.rb
182
194
  - lib/llm/providers/openai/images.rb
195
+ - lib/llm/providers/openai/models.rb
183
196
  - lib/llm/providers/openai/response_parser.rb
184
197
  - lib/llm/providers/openai/responses.rb
185
198
  - lib/llm/providers/voyageai.rb
@@ -195,30 +208,31 @@ files:
195
208
  - lib/llm/response/file.rb
196
209
  - lib/llm/response/filelist.rb
197
210
  - lib/llm/response/image.rb
211
+ - lib/llm/response/modellist.rb
198
212
  - lib/llm/response/output.rb
199
213
  - lib/llm/utils.rb
200
214
  - lib/llm/version.rb
201
215
  - llm.gemspec
202
- - share/llm/models/anthropic.yml
203
- - share/llm/models/gemini.yml
204
- - share/llm/models/ollama.yml
205
- - share/llm/models/openai.yml
206
216
  - spec/anthropic/completion_spec.rb
207
217
  - spec/anthropic/embedding_spec.rb
218
+ - spec/anthropic/models_spec.rb
208
219
  - spec/gemini/completion_spec.rb
209
220
  - spec/gemini/conversation_spec.rb
210
221
  - spec/gemini/embedding_spec.rb
211
222
  - spec/gemini/files_spec.rb
212
223
  - spec/gemini/images_spec.rb
224
+ - spec/gemini/models_spec.rb
213
225
  - spec/llm/conversation_spec.rb
214
226
  - spec/ollama/completion_spec.rb
215
227
  - spec/ollama/conversation_spec.rb
216
228
  - spec/ollama/embedding_spec.rb
229
+ - spec/ollama/models_spec.rb
217
230
  - spec/openai/audio_spec.rb
218
231
  - spec/openai/completion_spec.rb
219
232
  - spec/openai/embedding_spec.rb
220
233
  - spec/openai/files_spec.rb
221
234
  - spec/openai/images_spec.rb
235
+ - spec/openai/models_spec.rb
222
236
  - spec/openai/responses_spec.rb
223
237
  - spec/readme_spec.rb
224
238
  - spec/setup.rb
@@ -1,35 +0,0 @@
1
- claude-3-7-sonnet-20250219:
2
- name: Claude 3.7 Sonnet
3
- parameters: Unknown
4
- description: Most intelligent Claude model with extended thinking and high capability
5
- to_param: claude-3-7-sonnet-20250219
6
-
7
- claude-3-5-sonnet-20241022:
8
- name: Claude 3.5 Sonnet (v2)
9
- parameters: Unknown
10
- description: High intelligence and capability; upgraded from previous Sonnet
11
- to_param: claude-3-5-sonnet-20241022
12
-
13
- claude-3-5-sonnet-20240620:
14
- name: Claude 3.5 Sonnet
15
- parameters: Unknown
16
- description: Intelligent and capable general-purpose model
17
- to_param: claude-3-5-sonnet-20240620
18
-
19
- claude-3-5-haiku-20241022:
20
- name: Claude 3.5 Haiku
21
- parameters: Unknown
22
- description: Blazing fast model for low-latency text generation
23
- to_param: claude-3-5-haiku-20241022
24
-
25
- claude-3-opus-20240229:
26
- name: Claude 3 Opus
27
- parameters: Unknown
28
- description: Top-level intelligence, fluency, and reasoning for complex tasks
29
- to_param: claude-3-opus-20240229
30
-
31
- claude-3-haiku-20240307:
32
- name: Claude 3 Haiku
33
- parameters: Unknown
34
- description: Fastest and most compact Claude model for near-instant responsiveness
35
- to_param: claude-3-haiku-20240307
@@ -1,35 +0,0 @@
1
- gemini-2.5-pro-exp-03-25:
2
- name: Gemini
3
- parameters: Unknown
4
- description: Enhanced thinking and reasoning, multimodal understanding, advanced coding, and more
5
- to_param: gemini-2.5-pro-exp-03-25
6
-
7
- gemini-2.0-flash:
8
- name: Gemini
9
- parameters: Unknown
10
- description: Next generation features, speed, thinking, realtime streaming, and multimodal generation
11
- to_param: gemini-2.0-flash
12
-
13
- gemini-2.0-flash-lite:
14
- name: Gemini
15
- parameters: Unknown
16
- description: Cost efficiency and low latency
17
- to_param: gemini-2.0-flash-lite
18
-
19
- gemini-1.5-flash:
20
- name: Gemini
21
- parameters: Unknown
22
- description: Fast and versatile performance across a diverse variety of tasks
23
- to_param: gemini-1.5-flash
24
-
25
- gemini-1.5-flash-8b:
26
- name: Gemini
27
- parameters: 8B
28
- description: High volume and lower intelligence tasks
29
- to_param: gemini-1.5-flash-8b
30
-
31
- gemini-1.5-pro:
32
- name: Gemini
33
- parameters: Unknown
34
- description: Complex reasoning tasks requiring more intelligence
35
- to_param: gemini-1.5-pro