llm.rb 0.3.1 → 0.3.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +89 -20
  3. data/lib/llm/chat.rb +5 -3
  4. data/lib/llm/core_ext/ostruct.rb +1 -1
  5. data/lib/llm/error.rb +6 -1
  6. data/lib/llm/file.rb +15 -1
  7. data/lib/llm/model.rb +27 -2
  8. data/lib/llm/provider.rb +28 -32
  9. data/lib/llm/providers/anthropic/format.rb +19 -6
  10. data/lib/llm/providers/anthropic/models.rb +62 -0
  11. data/lib/llm/providers/anthropic.rb +23 -8
  12. data/lib/llm/providers/gemini/files.rb +2 -2
  13. data/lib/llm/providers/gemini/format.rb +6 -1
  14. data/lib/llm/providers/gemini/images.rb +5 -5
  15. data/lib/llm/providers/gemini/models.rb +69 -0
  16. data/lib/llm/providers/gemini/response_parser.rb +1 -5
  17. data/lib/llm/providers/gemini.rb +24 -8
  18. data/lib/llm/providers/ollama/format.rb +11 -3
  19. data/lib/llm/providers/ollama/models.rb +66 -0
  20. data/lib/llm/providers/ollama.rb +23 -8
  21. data/lib/llm/providers/openai/audio.rb +3 -5
  22. data/lib/llm/providers/openai/files.rb +2 -2
  23. data/lib/llm/providers/openai/format.rb +47 -11
  24. data/lib/llm/providers/openai/images.rb +4 -4
  25. data/lib/llm/providers/openai/models.rb +62 -0
  26. data/lib/llm/providers/openai/response_parser.rb +1 -5
  27. data/lib/llm/providers/openai/responses.rb +24 -6
  28. data/lib/llm/providers/openai.rb +24 -7
  29. data/lib/llm/response/modellist.rb +18 -0
  30. data/lib/llm/response.rb +1 -0
  31. data/lib/llm/version.rb +1 -1
  32. data/lib/llm.rb +2 -1
  33. data/spec/anthropic/completion_spec.rb +36 -0
  34. data/spec/anthropic/models_spec.rb +21 -0
  35. data/spec/gemini/images_spec.rb +4 -12
  36. data/spec/gemini/models_spec.rb +21 -0
  37. data/spec/llm/conversation_spec.rb +5 -3
  38. data/spec/ollama/models_spec.rb +20 -0
  39. data/spec/openai/completion_spec.rb +21 -2
  40. data/spec/openai/files_spec.rb +3 -3
  41. data/spec/openai/images_spec.rb +2 -6
  42. data/spec/openai/models_spec.rb +21 -0
  43. metadata +11 -6
  44. data/share/llm/models/anthropic.yml +0 -35
  45. data/share/llm/models/gemini.yml +0 -35
  46. data/share/llm/models/ollama.yml +0 -155
  47. data/share/llm/models/openai.yml +0 -46
@@ -0,0 +1,69 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Gemini
4
+ ##
5
+ # The {LLM::Gemini::Models LLM::Gemini::Models} class provides a model
6
+ # object for interacting with [Gemini's models API](https://ai.google.dev/api/models?hl=en#method:-models.list).
7
+ # The models API allows a client to query Gemini for a list of models
8
+ # that are available for use with the Gemini API.
9
+ #
10
+ # @example
11
+ # #!/usr/bin/env ruby
12
+ # require "llm"
13
+ #
14
+ # llm = LLM.gemini(ENV["KEY"])
15
+ # res = llm.models.all
16
+ # res.each do |model|
17
+ # print "id: ", model.id, "\n"
18
+ # end
19
+ class Models
20
+ include LLM::Utils
21
+
22
+ ##
23
+ # Returns a new Models object
24
+ # @param provider [LLM::Provider]
25
+ # @return [LLM::Gemini::Models]
26
+ def initialize(provider)
27
+ @provider = provider
28
+ end
29
+
30
+ ##
31
+ # List all models
32
+ # @example
33
+ # llm = LLM.gemini(ENV["KEY"])
34
+ # res = llm.models.all
35
+ # res.each do |model|
36
+ # print "id: ", model.id, "\n"
37
+ # end
38
+ # @see https://ai.google.dev/api/models?hl=en#method:-models.list Gemini docs
39
+ # @param [Hash] params Other parameters (see Gemini docs)
40
+ # @raise (see LLM::Provider#request)
41
+ # @return [LLM::Response::ModelList]
42
+ def all(**params)
43
+ query = URI.encode_www_form(params.merge!(key: secret))
44
+ req = Net::HTTP::Get.new("/v1beta/models?#{query}", headers)
45
+ res = request(http, req)
46
+ LLM::Response::ModelList.new(res).tap { |modellist|
47
+ models = modellist.body["models"].map do |model|
48
+ model = model.transform_keys { snakecase(_1) }
49
+ LLM::Model.from_hash(model).tap { _1.provider = @provider }
50
+ end
51
+ modellist.models = models
52
+ }
53
+ end
54
+
55
+ private
56
+
57
+ def http
58
+ @provider.instance_variable_get(:@http)
59
+ end
60
+
61
+ def secret
62
+ @provider.instance_variable_get(:@secret)
63
+ end
64
+
65
+ [:headers, :request].each do |m|
66
+ define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
67
+ end
68
+ end
69
+ end
@@ -44,11 +44,7 @@ class LLM::Gemini
44
44
  images: body["candidates"].flat_map do |candidate|
45
45
  candidate["content"]["parts"].filter_map do
46
46
  next unless _1.dig("inlineData", "data")
47
- OpenStruct.from_hash(
48
- mime_type: _1["inlineData"]["mimeType"],
49
- encoded: _1["inlineData"]["data"],
50
- binary: _1["inlineData"]["data"].unpack1("m0")
51
- )
47
+ StringIO.new(_1["inlineData"]["data"].unpack1("m0"))
52
48
  end
53
49
  end
54
50
  }
@@ -34,6 +34,7 @@ module LLM
34
34
  require_relative "gemini/images"
35
35
  require_relative "gemini/files"
36
36
  require_relative "gemini/audio"
37
+ require_relative "gemini/models"
37
38
  include Format
38
39
 
39
40
  HOST = "generativelanguage.googleapis.com"
@@ -52,6 +53,7 @@ module LLM
52
53
  # @raise (see LLM::Provider#request)
53
54
  # @return (see LLM::Provider#embed)
54
55
  def embed(input, model: "text-embedding-004", **params)
56
+ model = model.respond_to?(:id) ? model.id : model
55
57
  path = ["/v1beta/models/#{model}", "embedContent?key=#{@secret}"].join(":")
56
58
  req = Net::HTTP::Post.new(path, headers)
57
59
  req.body = JSON.dump({content: {parts: [{text: input}]}})
@@ -68,13 +70,18 @@ module LLM
68
70
  # @param params (see LLM::Provider#complete)
69
71
  # @example (see LLM::Provider#complete)
70
72
  # @raise (see LLM::Provider#request)
73
+ # @raise [LLM::Error::PromptError]
74
+ # When given an object a provider does not understand
71
75
  # @return (see LLM::Provider#complete)
72
- def complete(prompt, role = :user, model: "gemini-1.5-flash", **params)
73
- path = ["/v1beta/models/#{model}", "generateContent?key=#{@secret}"].join(":")
74
- req = Net::HTTP::Post.new(path, headers)
76
+ def complete(prompt, role = :user, model: default_model, **params)
77
+ model.respond_to?(:id) ? model.id : model
78
+ path = ["/v1beta/models/#{model}", "generateContent?key=#{@secret}"].join(":")
79
+ req = Net::HTTP::Post.new(path, headers)
75
80
  messages = [*(params.delete(:messages) || []), LLM::Message.new(role, prompt)]
76
- req.body = JSON.dump({contents: format(messages)})
77
- res = request(@http, req)
81
+ body = JSON.dump({contents: format(messages)})
82
+ set_body_stream(req, StringIO.new(body))
83
+
84
+ res = request(@http, req)
78
85
  Response::Completion.new(res).extend(response_parser)
79
86
  end
80
87
 
@@ -100,6 +107,13 @@ module LLM
100
107
  LLM::Gemini::Files.new(self)
101
108
  end
102
109
 
110
+ ##
111
+ # Provides an interface to Gemini's models API
112
+ # @see https://ai.google.dev/gemini-api/docs/models Gemini docs
113
+ def models
114
+ LLM::Gemini::Models.new(self)
115
+ end
116
+
103
117
  ##
104
118
  # @return (see LLM::Provider#assistant_role)
105
119
  def assistant_role
@@ -107,9 +121,11 @@ module LLM
107
121
  end
108
122
 
109
123
  ##
110
- # @return (see LLM::Provider#models)
111
- def models
112
- @models ||= load_models!("gemini")
124
+ # Returns the default model for chat completions
125
+ # @see https://ai.google.dev/gemini-api/docs/models#gemini-1.5-flash gemini-1.5-flash
126
+ # @return [String]
127
+ def default_model
128
+ "gemini-1.5-flash"
113
129
  end
114
130
 
115
131
  private
@@ -28,14 +28,22 @@ class LLM::Ollama
28
28
  # @return [String, Hash]
29
29
  # The formatted content
30
30
  def format_content(content)
31
- if LLM::File === content
31
+ case content
32
+ when LLM::File
32
33
  if content.image?
33
34
  {content: "This message has an image associated with it", images: [content.to_b64]}
34
35
  else
35
- raise TypeError, "'#{content.path}' was not recognized as an image file."
36
+ raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
37
+ "is not an image, and therefore not supported by the " \
38
+ "Ollama API"
36
39
  end
37
- else
40
+ when String
38
41
  {content:}
42
+ when LLM::Message
43
+ format_content(content.content)
44
+ else
45
+ raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
46
+ "is not supported by the Ollama API"
39
47
  end
40
48
  end
41
49
  end
@@ -0,0 +1,66 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Ollama
4
+ ##
5
+ # The {LLM::Ollama::Models LLM::Ollama::Models} class provides a model
6
+ # object for interacting with [Ollama's models API](https://github.com/ollama/ollama/blob/main/docs/api.md#list-local-models).
7
+ # The models API allows a client to query Ollama for a list of models
8
+ # that are available for use with the Ollama API.
9
+ #
10
+ # @example
11
+ # #!/usr/bin/env ruby
12
+ # require "llm"
13
+ #
14
+ # llm = LLM.ollama(nil)
15
+ # res = llm.models.all
16
+ # res.each do |model|
17
+ # print "id: ", model.id, "\n"
18
+ # end
19
+ class Models
20
+ include LLM::Utils
21
+
22
+ ##
23
+ # Returns a new Models object
24
+ # @param provider [LLM::Provider]
25
+ # @return [LLM::Ollama::Models]
26
+ def initialize(provider)
27
+ @provider = provider
28
+ end
29
+
30
+ ##
31
+ # List all models
32
+ # @example
33
+ # llm = LLM.ollama(nil)
34
+ # res = llm.models.all
35
+ # res.each do |model|
36
+ # print "id: ", model.id, "\n"
37
+ # end
38
+ # @see https://github.com/ollama/ollama/blob/main/docs/api.md#list-local-models Ollama docs
39
+ # @see https://ollama.com/library Ollama library
40
+ # @param [Hash] params Other parameters (see Ollama docs)
41
+ # @raise (see LLM::Provider#request)
42
+ # @return [LLM::Response::ModelList]
43
+ def all(**params)
44
+ query = URI.encode_www_form(params)
45
+ req = Net::HTTP::Get.new("/api/tags?#{query}", headers)
46
+ res = request(http, req)
47
+ LLM::Response::ModelList.new(res).tap { |modellist|
48
+ models = modellist.body["models"].map do |model|
49
+ model = model.transform_keys { snakecase(_1) }
50
+ LLM::Model.from_hash(model).tap { _1.provider = @provider }
51
+ end
52
+ modellist.models = models
53
+ }
54
+ end
55
+
56
+ private
57
+
58
+ def http
59
+ @provider.instance_variable_get(:@http)
60
+ end
61
+
62
+ [:headers, :request].each do |m|
63
+ define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
64
+ end
65
+ end
66
+ end
@@ -22,6 +22,7 @@ module LLM
22
22
  require_relative "ollama/error_handler"
23
23
  require_relative "ollama/response_parser"
24
24
  require_relative "ollama/format"
25
+ require_relative "ollama/models"
25
26
  include Format
26
27
 
27
28
  HOST = "localhost"
@@ -56,16 +57,28 @@ module LLM
56
57
  # @param params (see LLM::Provider#complete)
57
58
  # @example (see LLM::Provider#complete)
58
59
  # @raise (see LLM::Provider#request)
60
+ # @raise [LLM::Error::PromptError]
61
+ # When given an object a provider does not understand
59
62
  # @return (see LLM::Provider#complete)
60
- def complete(prompt, role = :user, model: "llama3.2", **params)
61
- params = {model:, stream: false}.merge!(params)
62
- req = Net::HTTP::Post.new("/api/chat", headers)
63
+ def complete(prompt, role = :user, model: default_model, **params)
64
+ params = {model:, stream: false}.merge!(params)
65
+ req = Net::HTTP::Post.new("/api/chat", headers)
63
66
  messages = [*(params.delete(:messages) || []), LLM::Message.new(role, prompt)]
64
- req.body = JSON.dump({messages: format(messages)}.merge!(params))
65
- res = request(@http, req)
67
+ body = JSON.dump({messages: format(messages)}.merge!(params))
68
+ set_body_stream(req, StringIO.new(body))
69
+
70
+ res = request(@http, req)
66
71
  Response::Completion.new(res).extend(response_parser)
67
72
  end
68
73
 
74
+ ##
75
+ # Provides an interface to Ollama's models API
76
+ # @see https://github.com/ollama/ollama/blob/main/docs/api.md#list-local-models Ollama docs
77
+ # @return [LLM::Ollama::Models]
78
+ def models
79
+ LLM::Ollama::Models.new(self)
80
+ end
81
+
69
82
  ##
70
83
  # @return (see LLM::Provider#assistant_role)
71
84
  def assistant_role
@@ -73,9 +86,11 @@ module LLM
73
86
  end
74
87
 
75
88
  ##
76
- # @return (see LLM::Provider#models)
77
- def models
78
- @models ||= load_models!("ollama")
89
+ # Returns the default model for chat completions
90
+ # @see https://ollama.com/library llama3.2
91
+ # @return [String]
92
+ def default_model
93
+ "llama3.2"
79
94
  end
80
95
 
81
96
  private
@@ -9,8 +9,6 @@ class LLM::OpenAI
9
9
  # res = llm.audio.create_speech(input: "A dog on a rocket to the moon")
10
10
  # File.binwrite("rocket.mp3", res.audio.string)
11
11
  class Audio
12
- require "stringio"
13
-
14
12
  ##
15
13
  # Returns a new Audio object
16
14
  # @param provider [LLM::Provider]
@@ -57,7 +55,7 @@ class LLM::OpenAI
57
55
  multi = LLM::Multipart.new(params.merge!(file:, model:))
58
56
  req = Net::HTTP::Post.new("/v1/audio/transcriptions", headers)
59
57
  req["content-type"] = multi.content_type
60
- req.body_stream = multi.body
58
+ set_body_stream(req, multi.body)
61
59
  res = request(http, req)
62
60
  LLM::Response::AudioTranscription.new(res).tap { _1.text = _1.body["text"] }
63
61
  end
@@ -79,7 +77,7 @@ class LLM::OpenAI
79
77
  multi = LLM::Multipart.new(params.merge!(file:, model:))
80
78
  req = Net::HTTP::Post.new("/v1/audio/translations", headers)
81
79
  req["content-type"] = multi.content_type
82
- req.body_stream = multi.body
80
+ set_body_stream(req, multi.body)
83
81
  res = request(http, req)
84
82
  LLM::Response::AudioTranslation.new(res).tap { _1.text = _1.body["text"] }
85
83
  end
@@ -90,7 +88,7 @@ class LLM::OpenAI
90
88
  @provider.instance_variable_get(:@http)
91
89
  end
92
90
 
93
- [:headers, :request].each do |m|
91
+ [:headers, :request, :set_body_stream].each do |m|
94
92
  define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
95
93
  end
96
94
  end
@@ -73,7 +73,7 @@ class LLM::OpenAI
73
73
  multi = LLM::Multipart.new(params.merge!(file:, purpose:))
74
74
  req = Net::HTTP::Post.new("/v1/files", headers)
75
75
  req["content-type"] = multi.content_type
76
- req.body_stream = multi.body
76
+ set_body_stream(req, multi.body)
77
77
  res = request(http, req)
78
78
  LLM::Response::File.new(res)
79
79
  end
@@ -141,7 +141,7 @@ class LLM::OpenAI
141
141
  @provider.instance_variable_get(:@http)
142
142
  end
143
143
 
144
- [:headers, :request].each do |m|
144
+ [:headers, :request, :set_body_stream].each do |m|
145
145
  define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
146
146
  end
147
147
  end
@@ -29,19 +29,55 @@ class LLM::OpenAI
29
29
  # The formatted content
30
30
  def format_content(content, mode)
31
31
  if mode == :complete
32
- case content
33
- when Array then content.flat_map { format_content(_1, mode) }
34
- when URI then [{type: :image_url, image_url: {url: content.to_s}}]
35
- when LLM::Response::File then [{type: :file, file: {file_id: content.id}}]
36
- else [{type: :text, text: content.to_s}]
37
- end
32
+ format_complete(content)
38
33
  elsif mode == :response
39
- case content
40
- when Array then content.flat_map { format_content(_1, mode) }
41
- when URI then [{type: :image_url, image_url: {url: content.to_s}}]
42
- when LLM::Response::File then [{type: :input_file, file_id: content.id}]
43
- else [{type: :input_text, text: content.to_s}]
34
+ format_response(content)
35
+ end
36
+ end
37
+
38
+ def format_complete(content)
39
+ case content
40
+ when Array
41
+ content.flat_map { format_complete(_1) }
42
+ when URI
43
+ [{type: :image_url, image_url: {url: content.to_s}}]
44
+ when LLM::File
45
+ file = content
46
+ if file.image?
47
+ [{type: :image_url, image_url: {url: file.to_data_uri}}]
48
+ else
49
+ [{type: :file, file: {filename: file.basename, file_data: file.to_data_uri}}]
50
+ end
51
+ when LLM::Response::File
52
+ [{type: :file, file: {file_id: content.id}}]
53
+ when String
54
+ [{type: :text, text: content.to_s}]
55
+ when LLM::Message
56
+ format_complete(content.content)
57
+ else
58
+ raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
59
+ "is not supported by the OpenAI chat completions API"
60
+ end
61
+ end
62
+
63
+ def format_response(content)
64
+ case content
65
+ when Array
66
+ content.flat_map { format_response(_1) }
67
+ when LLM::Response::File
68
+ file = LLM::File(content.filename)
69
+ if file.image?
70
+ [{type: :input_image, file_id: content.id}]
71
+ else
72
+ [{type: :input_file, file_id: content.id}]
44
73
  end
74
+ when String
75
+ [{type: :input_text, text: content.to_s}]
76
+ when LLM::Message
77
+ format_response(content.content)
78
+ else
79
+ raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
80
+ "is not supported by the OpenAI responses API"
45
81
  end
46
82
  end
47
83
  end
@@ -24,7 +24,7 @@ class LLM::OpenAI
24
24
  # llm = LLM.openai(ENV["KEY"])
25
25
  # res = llm.images.create prompt: "A dog on a rocket to the moon",
26
26
  # response_format: "b64_json"
27
- # File.binwrite("rocket.png", res.images[0].binary)
27
+ # IO.copy_stream res.images[0], "rocket.png"
28
28
  class Images
29
29
  ##
30
30
  # Returns a new Images object
@@ -69,7 +69,7 @@ class LLM::OpenAI
69
69
  multi = LLM::Multipart.new(params.merge!(image:, model:))
70
70
  req = Net::HTTP::Post.new("/v1/images/variations", headers)
71
71
  req["content-type"] = multi.content_type
72
- req.body_stream = multi.body
72
+ set_body_stream(req, multi.body)
73
73
  res = request(http, req)
74
74
  LLM::Response::Image.new(res).extend(response_parser)
75
75
  end
@@ -91,7 +91,7 @@ class LLM::OpenAI
91
91
  multi = LLM::Multipart.new(params.merge!(image:, prompt:, model:))
92
92
  req = Net::HTTP::Post.new("/v1/images/edits", headers)
93
93
  req["content-type"] = multi.content_type
94
- req.body_stream = multi.body
94
+ set_body_stream(req, multi.body)
95
95
  res = request(http, req)
96
96
  LLM::Response::Image.new(res).extend(response_parser)
97
97
  end
@@ -102,7 +102,7 @@ class LLM::OpenAI
102
102
  @provider.instance_variable_get(:@http)
103
103
  end
104
104
 
105
- [:response_parser, :headers, :request].each do |m|
105
+ [:response_parser, :headers, :request, :set_body_stream].each do |m|
106
106
  define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
107
107
  end
108
108
  end
@@ -0,0 +1,62 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::OpenAI
4
+ ##
5
+ # The {LLM::OpenAI::Models LLM::OpenAI::Models} class provides a model
6
+ # object for interacting with [OpenAI's models API](https://platform.openai.com/docs/api-reference/models/list).
7
+ # The models API allows a client to query OpenAI for a list of models
8
+ # that are available for use with the OpenAI API.
9
+ #
10
+ # @example
11
+ # #!/usr/bin/env ruby
12
+ # require "llm"
13
+ #
14
+ # llm = LLM.openai(ENV["KEY"])
15
+ # res = llm.models.all
16
+ # res.each do |model|
17
+ # print "id: ", model.id, "\n"
18
+ # end
19
+ class Models
20
+ ##
21
+ # Returns a new Models object
22
+ # @param provider [LLM::Provider]
23
+ # @return [LLM::OpenAI::Files]
24
+ def initialize(provider)
25
+ @provider = provider
26
+ end
27
+
28
+ ##
29
+ # List all models
30
+ # @example
31
+ # llm = LLM.openai(ENV["KEY"])
32
+ # res = llm.models.all
33
+ # res.each do |model|
34
+ # print "id: ", model.id, "\n"
35
+ # end
36
+ # @see https://platform.openai.com/docs/api-reference/models/list OpenAI docs
37
+ # @param [Hash] params Other parameters (see OpenAI docs)
38
+ # @raise (see LLM::Provider#request)
39
+ # @return [LLM::Response::FileList]
40
+ def all(**params)
41
+ query = URI.encode_www_form(params)
42
+ req = Net::HTTP::Get.new("/v1/models?#{query}", headers)
43
+ res = request(http, req)
44
+ LLM::Response::ModelList.new(res).tap { |modellist|
45
+ models = modellist.body["data"].map do |model|
46
+ LLM::Model.from_hash(model).tap { _1.provider = @provider }
47
+ end
48
+ modellist.models = models
49
+ }
50
+ end
51
+
52
+ private
53
+
54
+ def http
55
+ @provider.instance_variable_get(:@http)
56
+ end
57
+
58
+ [:headers, :request, :set_body_stream].each do |m|
59
+ define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
60
+ end
61
+ end
62
+ end
@@ -69,11 +69,7 @@ class LLM::OpenAI
69
69
  urls: body["data"].filter_map { _1["url"] },
70
70
  images: body["data"].filter_map do
71
71
  next unless _1["b64_json"]
72
- OpenStruct.from_hash(
73
- mime_type: nil,
74
- encoded: _1["b64_json"],
75
- binary: _1["b64_json"].unpack1("m0")
76
- )
72
+ StringIO.new(_1["b64_json"].unpack1("m0"))
77
73
  end
78
74
  }
79
75
  end
@@ -16,6 +16,20 @@ class LLM::OpenAI
16
16
  # res1 = llm.responses.create "Your task is to help me with math", :developer
17
17
  # res2 = llm.responses.create "5 + 5 = ?", :user, previous_response_id: res1.id
18
18
  # [res1,res2].each { llm.responses.delete(_1) }
19
+ # @example
20
+ # #!/usr/bin/env ruby
21
+ # require "llm"
22
+ #
23
+ # llm = LLM.openai(ENV["KEY"])
24
+ # file = llm.files.create file: LLM::File("/images/hat.png")
25
+ # res = llm.responses.create ["Describe the image", file]
26
+ # @example
27
+ # #!/usr/bin/env ruby
28
+ # require "llm"
29
+ #
30
+ # llm = LLM.openai(ENV["KEY"])
31
+ # file = llm.files.create file: LLM::File("/documents/freebsd.pdf")
32
+ # res = llm.responses.create ["Describe the document, file]
19
33
  class Responses
20
34
  include Format
21
35
 
@@ -35,13 +49,17 @@ class LLM::OpenAI
35
49
  # @param model (see LLM::Provider#complete)
36
50
  # @param [Hash] params Response params
37
51
  # @raise (see LLM::Provider#request)
52
+ # @raise [LLM::Error::PromptError]
53
+ # When given an object a provider does not understand
38
54
  # @return [LLM::Response::Output]
39
- def create(prompt, role = :user, model: "gpt-4o-mini", **params)
40
- params = {model:}.merge!(params)
41
- req = Net::HTTP::Post.new("/v1/responses", headers)
55
+ def create(prompt, role = :user, model: @provider.default_model, **params)
56
+ params = {model:}.merge!(params)
57
+ req = Net::HTTP::Post.new("/v1/responses", headers)
42
58
  messages = [*(params.delete(:input) || []), LLM::Message.new(role, prompt)]
43
- req.body = JSON.dump({input: format(messages, :response)}.merge!(params))
44
- res = request(http, req)
59
+ body = JSON.dump({input: format(messages, :response)}.merge!(params))
60
+ set_body_stream(req, StringIO.new(body))
61
+
62
+ res = request(http, req)
45
63
  LLM::Response::Output.new(res).extend(response_parser)
46
64
  end
47
65
 
@@ -78,7 +96,7 @@ class LLM::OpenAI
78
96
  @provider.instance_variable_get(:@http)
79
97
  end
80
98
 
81
- [:response_parser, :headers, :request].each do |m|
99
+ [:response_parser, :headers, :request, :set_body_stream].each do |m|
82
100
  define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
83
101
  end
84
102
  end
@@ -12,6 +12,7 @@ module LLM
12
12
  require_relative "openai/images"
13
13
  require_relative "openai/audio"
14
14
  require_relative "openai/files"
15
+ require_relative "openai/models"
15
16
  include Format
16
17
 
17
18
  HOST = "api.openai.com"
@@ -46,13 +47,17 @@ module LLM
46
47
  # @param params (see LLM::Provider#complete)
47
48
  # @example (see LLM::Provider#complete)
48
49
  # @raise (see LLM::Provider#request)
50
+ # @raise [LLM::Error::PromptError]
51
+ # When given an object a provider does not understand
49
52
  # @return (see LLM::Provider#complete)
50
- def complete(prompt, role = :user, model: "gpt-4o-mini", **params)
51
- params = {model:}.merge!(params)
52
- req = Net::HTTP::Post.new("/v1/chat/completions", headers)
53
+ def complete(prompt, role = :user, model: default_model, **params)
54
+ params = {model:}.merge!(params)
55
+ req = Net::HTTP::Post.new("/v1/chat/completions", headers)
53
56
  messages = [*(params.delete(:messages) || []), Message.new(role, prompt)]
54
- req.body = JSON.dump({messages: format(messages, :complete)}.merge!(params))
55
- res = request(@http, req)
57
+ body = JSON.dump({messages: format(messages, :complete)}.merge!(params))
58
+ set_body_stream(req, StringIO.new(body))
59
+
60
+ res = request(@http, req)
56
61
  Response::Completion.new(res).extend(response_parser)
57
62
  end
58
63
 
@@ -88,14 +93,26 @@ module LLM
88
93
  LLM::OpenAI::Files.new(self)
89
94
  end
90
95
 
96
+ ##
97
+ # Provides an interface to OpenAI's models API
98
+ # @see https://platform.openai.com/docs/api-reference/models/list OpenAI docs
99
+ # @return [LLM::OpenAI::Models]
100
+ def models
101
+ LLM::OpenAI::Models.new(self)
102
+ end
103
+
91
104
  ##
92
105
  # @return (see LLM::Provider#assistant_role)
93
106
  def assistant_role
94
107
  "assistant"
95
108
  end
96
109
 
97
- def models
98
- @models ||= load_models!("openai")
110
+ ##
111
+ # Returns the default model for chat completions
112
+ # @see https://platform.openai.com/docs/models/gpt-4o-mini gpt-4o-mini
113
+ # @return [String]
114
+ def default_model
115
+ "gpt-4o-mini"
99
116
  end
100
117
 
101
118
  private
@@ -0,0 +1,18 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM
4
+ ##
5
+ # The {LLM::Response::ModelList LLM::Response::ModelList} class represents a
6
+ # list of model objects that are returned by a provider. It is an Enumerable
7
+ # object, and can be used to iterate over the model objects in a way that is
8
+ # similar to an array. Each element is an instance of OpenStruct.
9
+ class Response::ModelList < Response
10
+ include Enumerable
11
+
12
+ attr_accessor :models
13
+
14
+ def each(&)
15
+ @models.each(&)
16
+ end
17
+ end
18
+ end
data/lib/llm/response.rb CHANGED
@@ -13,6 +13,7 @@ module LLM
13
13
  require_relative "response/file"
14
14
  require_relative "response/filelist"
15
15
  require_relative "response/download_file"
16
+ require_relative "response/modellist"
16
17
 
17
18
  ##
18
19
  # @param [Net::HTTPResponse] res