llm.rb 0.2.1 → 0.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +318 -110
  3. data/lib/llm/buffer.rb +83 -0
  4. data/lib/llm/chat.rb +131 -0
  5. data/lib/llm/error.rb +3 -3
  6. data/lib/llm/file.rb +36 -40
  7. data/lib/llm/message.rb +21 -8
  8. data/lib/llm/mime.rb +54 -0
  9. data/lib/llm/multipart.rb +100 -0
  10. data/lib/llm/provider.rb +123 -21
  11. data/lib/llm/providers/anthropic/error_handler.rb +3 -1
  12. data/lib/llm/providers/anthropic/format.rb +2 -0
  13. data/lib/llm/providers/anthropic/response_parser.rb +3 -1
  14. data/lib/llm/providers/anthropic.rb +14 -5
  15. data/lib/llm/providers/gemini/audio.rb +77 -0
  16. data/lib/llm/providers/gemini/error_handler.rb +4 -2
  17. data/lib/llm/providers/gemini/files.rb +162 -0
  18. data/lib/llm/providers/gemini/format.rb +12 -6
  19. data/lib/llm/providers/gemini/images.rb +99 -0
  20. data/lib/llm/providers/gemini/response_parser.rb +27 -1
  21. data/lib/llm/providers/gemini.rb +62 -6
  22. data/lib/llm/providers/ollama/error_handler.rb +3 -1
  23. data/lib/llm/providers/ollama/format.rb +13 -5
  24. data/lib/llm/providers/ollama/response_parser.rb +3 -1
  25. data/lib/llm/providers/ollama.rb +30 -7
  26. data/lib/llm/providers/openai/audio.rb +97 -0
  27. data/lib/llm/providers/openai/error_handler.rb +3 -1
  28. data/lib/llm/providers/openai/files.rb +148 -0
  29. data/lib/llm/providers/openai/format.rb +22 -8
  30. data/lib/llm/providers/openai/images.rb +109 -0
  31. data/lib/llm/providers/openai/response_parser.rb +58 -5
  32. data/lib/llm/providers/openai/responses.rb +85 -0
  33. data/lib/llm/providers/openai.rb +52 -6
  34. data/lib/llm/providers/voyageai/error_handler.rb +1 -1
  35. data/lib/llm/providers/voyageai.rb +2 -2
  36. data/lib/llm/response/audio.rb +13 -0
  37. data/lib/llm/response/audio_transcription.rb +14 -0
  38. data/lib/llm/response/audio_translation.rb +14 -0
  39. data/lib/llm/response/download_file.rb +15 -0
  40. data/lib/llm/response/file.rb +42 -0
  41. data/lib/llm/response/filelist.rb +18 -0
  42. data/lib/llm/response/image.rb +29 -0
  43. data/lib/llm/response/output.rb +56 -0
  44. data/lib/llm/response.rb +18 -6
  45. data/lib/llm/utils.rb +19 -0
  46. data/lib/llm/version.rb +1 -1
  47. data/lib/llm.rb +5 -2
  48. data/llm.gemspec +1 -6
  49. data/spec/anthropic/completion_spec.rb +1 -1
  50. data/spec/gemini/completion_spec.rb +1 -1
  51. data/spec/gemini/conversation_spec.rb +31 -0
  52. data/spec/gemini/files_spec.rb +124 -0
  53. data/spec/gemini/images_spec.rb +47 -0
  54. data/spec/llm/conversation_spec.rb +107 -62
  55. data/spec/ollama/completion_spec.rb +1 -1
  56. data/spec/ollama/conversation_spec.rb +31 -0
  57. data/spec/openai/audio_spec.rb +55 -0
  58. data/spec/openai/completion_spec.rb +5 -4
  59. data/spec/openai/files_spec.rb +204 -0
  60. data/spec/openai/images_spec.rb +95 -0
  61. data/spec/openai/responses_spec.rb +51 -0
  62. data/spec/setup.rb +8 -0
  63. metadata +31 -50
  64. data/LICENSE.txt +0 -21
  65. data/lib/llm/conversation.rb +0 -90
  66. data/lib/llm/http_client.rb +0 -29
  67. data/lib/llm/message_queue.rb +0 -54
data/lib/llm/chat.rb ADDED
@@ -0,0 +1,131 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM
4
+ ##
5
+ # {LLM::Chat LLM::Chat} provides a chat object that maintains a
6
+ # thread of messages that acts as context throughout a conversation.
7
+ # A conversation can use the chat completions API that most LLM providers
8
+ # support or the responses API that a select few LLM providers support.
9
+ #
10
+ # @example
11
+ # #!/usr/bin/env ruby
12
+ # require "llm"
13
+ #
14
+ # llm = LLM.openai(ENV["KEY"])
15
+ # bot = LLM::Chat.new(llm).lazy
16
+ # bot.chat("Your task is to answer all of my questions", :system)
17
+ # bot.chat("Your answers should be short and concise", :system)
18
+ # bot.chat("What is 5 + 7 ?", :user)
19
+ # bot.chat("Why is the sky blue ?", :user)
20
+ # bot.chat("Why did the chicken cross the road ?", :user)
21
+ # bot.messages.map { print "[#{_1.role}]", _1.content, "\n" }
22
+ class Chat
23
+ ##
24
+ # @return [Array<LLM::Message>]
25
+ attr_reader :messages
26
+
27
+ ##
28
+ # @param [LLM::Provider] provider
29
+ # A provider
30
+ # @param [Hash] params
31
+ # The parameters to maintain throughout the conversation
32
+ def initialize(provider, params = {})
33
+ @provider = provider
34
+ @params = params
35
+ @lazy = false
36
+ @messages = []
37
+ end
38
+
39
+ ##
40
+ # Maintain a conversation via the chat completions API
41
+ # @param prompt (see LLM::Provider#prompt)
42
+ # @param role (see LLM::Provider#prompt)
43
+ # @param params (see LLM::Provider#prompt)
44
+ # @return [LLM::Chat]
45
+ def chat(prompt, role = :user, **params)
46
+ if lazy?
47
+ @messages << [LLM::Message.new(role, prompt), @params.merge(params), :complete]
48
+ self
49
+ else
50
+ completion = complete!(prompt, role, params)
51
+ @messages.concat [Message.new(role, prompt), completion.choices[0]]
52
+ self
53
+ end
54
+ end
55
+
56
+ ##
57
+ # Maintain a conversation via the responses API
58
+ # @note Not all LLM providers support this API
59
+ # @param prompt (see LLM::Provider#prompt)
60
+ # @param role (see LLM::Provider#prompt)
61
+ # @param params (see LLM::Provider#prompt)
62
+ # @return [LLM::Chat]
63
+ def respond(prompt, role = :user, **params)
64
+ if lazy?
65
+ @messages << [LLM::Message.new(role, prompt), @params.merge(params), :respond]
66
+ self
67
+ else
68
+ @response = respond!(prompt, role, params)
69
+ @messages.concat [Message.new(role, prompt), @response.outputs[0]]
70
+ self
71
+ end
72
+ end
73
+
74
+ ##
75
+ # The last message in the conversation.
76
+ # @note
77
+ # The `read_response` and `recent_message` methods are aliases of
78
+ # the `last_message` method, and you can choose the name that best
79
+ # fits your context or code style.
80
+ # @param [#to_s] role
81
+ # The role of the last message.
82
+ # @return [LLM::Message]
83
+ def last_message(role: @provider.assistant_role)
84
+ messages.reverse_each.find { _1.role == role.to_s }
85
+ end
86
+ alias_method :recent_message, :last_message
87
+ alias_method :read_response, :last_message
88
+
89
+ ##
90
+ # Enables lazy mode for the conversation.
91
+ # @return [LLM::Chat]
92
+ def lazy
93
+ tap do
94
+ next if lazy?
95
+ @lazy = true
96
+ @messages = LLM::Buffer.new(@provider)
97
+ end
98
+ end
99
+
100
+ ##
101
+ # @return [Boolean]
102
+ # Returns true if the conversation is lazy
103
+ def lazy?
104
+ @lazy
105
+ end
106
+
107
+ def inspect
108
+ "#<#{self.class.name}:0x#{object_id.to_s(16)} " \
109
+ "@provider=#{@provider.class}, @params=#{@params.inspect}, " \
110
+ "@messages=#{@messages.inspect}, @lazy=#{@lazy.inspect}>"
111
+ end
112
+
113
+ private
114
+
115
+ def respond!(prompt, role, params)
116
+ @provider.responses.create(
117
+ prompt,
118
+ role,
119
+ **@params.merge(params.merge(@response ? {previous_response_id: @response.id} : {}))
120
+ )
121
+ end
122
+
123
+ def complete!(prompt, role, params)
124
+ @provider.complete(
125
+ prompt,
126
+ role,
127
+ **@params.merge(params.merge(messages:))
128
+ )
129
+ end
130
+ end
131
+ end
data/lib/llm/error.rb CHANGED
@@ -10,7 +10,7 @@ module LLM
10
10
 
11
11
  ##
12
12
  # The superclass of all HTTP protocol errors
13
- class BadResponse < Error
13
+ class ResponseError < Error
14
14
  ##
15
15
  # @return [Net::HTTPResponse]
16
16
  # Returns the response associated with an error
@@ -19,10 +19,10 @@ module LLM
19
19
 
20
20
  ##
21
21
  # HTTPUnauthorized
22
- Unauthorized = Class.new(BadResponse)
22
+ Unauthorized = Class.new(ResponseError)
23
23
 
24
24
  ##
25
25
  # HTTPTooManyRequests
26
- RateLimit = Class.new(BadResponse)
26
+ RateLimit = Class.new(ResponseError)
27
27
  end
28
28
  end
data/lib/llm/file.rb CHANGED
@@ -1,45 +1,10 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ ##
4
+ # The {LLM::File LLM::File} class represents a local file. It can
5
+ # be used as a prompt with certain providers (eg: Ollama, Gemini),
6
+ # and as an input with certain methods
3
7
  class LLM::File
4
- ##
5
- # @return [Hash]
6
- # Returns a hash of common file extensions and their
7
- # corresponding MIME types
8
- def self.mime_types
9
- @mime_types ||= {
10
- # Images
11
- ".png" => "image/png",
12
- ".jpg" => "image/jpeg",
13
- ".jpeg" => "image/jpeg",
14
- ".webp" => "image/webp",
15
-
16
- # Videos
17
- ".flv" => "video/x-flv",
18
- ".mov" => "video/quicktime",
19
- ".mpeg" => "video/mpeg",
20
- ".mpg" => "video/mpeg",
21
- ".mp4" => "video/mp4",
22
- ".webm" => "video/webm",
23
- ".wmv" => "video/x-ms-wmv",
24
- ".3gp" => "video/3gpp",
25
-
26
- # Audio
27
- ".aac" => "audio/aac",
28
- ".flac" => "audio/flac",
29
- ".mp3" => "audio/mpeg",
30
- ".m4a" => "audio/mp4",
31
- ".mpga" => "audio/mpeg",
32
- ".opus" => "audio/opus",
33
- ".pcm" => "audio/L16",
34
- ".wav" => "audio/wav",
35
- ".weba" => "audio/webm",
36
-
37
- # Documents
38
- ".pdf" => "application/pdf",
39
- ".txt" => "text/plain"
40
- }.freeze
41
- end
42
-
43
8
  ##
44
9
  # @return [String]
45
10
  # Returns the path to a file
@@ -53,7 +18,38 @@ class LLM::File
53
18
  # @return [String]
54
19
  # Returns the MIME type of the file
55
20
  def mime_type
56
- self.class.mime_types[File.extname(path)]
21
+ LLM::Mime[File.extname(path)]
22
+ end
23
+
24
+ ##
25
+ # @return [String]
26
+ # Returns true if the file is an image
27
+ def image?
28
+ mime_type.start_with?("image/")
29
+ end
30
+
31
+ ##
32
+ # @return [Integer]
33
+ # Returns the size of the file in bytes
34
+ def bytesize
35
+ File.size(path)
36
+ end
37
+
38
+ ##
39
+ # @return [String]
40
+ # Returns the file contents in base64
41
+ def to_b64
42
+ [File.binread(path)].pack("m0")
43
+ end
44
+
45
+ ##
46
+ # @return [File]
47
+ # Yields an IO object suitable to be streamed
48
+ def with_io
49
+ io = File.open(path, "rb")
50
+ yield(io)
51
+ ensure
52
+ io.close
57
53
  end
58
54
  end
59
55
 
data/lib/llm/message.rb CHANGED
@@ -3,18 +3,22 @@
3
3
  module LLM
4
4
  class Message
5
5
  ##
6
+ # Returns the role of the message
6
7
  # @return [Symbol]
7
8
  attr_reader :role
8
9
 
9
10
  ##
11
+ # Returns the content of the message
10
12
  # @return [String]
11
13
  attr_reader :content
12
14
 
13
15
  ##
16
+ # Returns extra context associated with the message
14
17
  # @return [Hash]
15
18
  attr_reader :extra
16
19
 
17
20
  ##
21
+ # Returns a new message
18
22
  # @param [Symbol] role
19
23
  # @param [String] content
20
24
  # @param [Hash] extra
@@ -26,23 +30,17 @@ module LLM
26
30
  end
27
31
 
28
32
  ##
29
- # @return [OpenStruct]
30
- def logprobs
31
- return nil unless extra.key?(:logprobs)
32
- OpenStruct.from_hash(extra[:logprobs])
33
- end
34
-
35
- ##
33
+ # Returns a hash representation of the message
36
34
  # @return [Hash]
37
35
  def to_h
38
36
  {role:, content:}
39
37
  end
40
38
 
41
39
  ##
40
+ # Returns true when two objects have the same role and content
42
41
  # @param [Object] other
43
42
  # The other object to compare
44
43
  # @return [Boolean]
45
- # Returns true when the "other" object has the same role and content
46
44
  def ==(other)
47
45
  if other.respond_to?(:to_h)
48
46
  to_h == other.to_h
@@ -51,5 +49,20 @@ module LLM
51
49
  end
52
50
  end
53
51
  alias_method :eql?, :==
52
+
53
+ ##
54
+ # Returns true when the message is from the LLM
55
+ # @return [Boolean]
56
+ def assistant?
57
+ role == "assistant" || role == "model"
58
+ end
59
+
60
+ ##
61
+ # Returns a string representation of the message
62
+ # @return [String]
63
+ def inspect
64
+ "#<#{self.class.name}:0x#{object_id.to_s(16)} " \
65
+ "role=#{role.inspect} content=#{content.inspect}>"
66
+ end
54
67
  end
55
68
  end
data/lib/llm/mime.rb ADDED
@@ -0,0 +1,54 @@
1
+ # frozen_string_literal: true
2
+
3
+ ##
4
+ # @private
5
+ class LLM::Mime
6
+ ##
7
+ # Lookup a mime type
8
+ # @return [String, nil]
9
+ def self.[](key)
10
+ if key.respond_to?(:path)
11
+ types[File.extname(key.path)]
12
+ else
13
+ types[key]
14
+ end
15
+ end
16
+
17
+ ##
18
+ # Returns a Hash of mime types
19
+ # @return [Hash]
20
+ def self.types
21
+ @types ||= {
22
+ # Images
23
+ ".png" => "image/png",
24
+ ".jpg" => "image/jpeg",
25
+ ".jpeg" => "image/jpeg",
26
+ ".webp" => "image/webp",
27
+
28
+ # Videos
29
+ ".flv" => "video/x-flv",
30
+ ".mov" => "video/quicktime",
31
+ ".mpeg" => "video/mpeg",
32
+ ".mpg" => "video/mpeg",
33
+ ".mp4" => "video/mp4",
34
+ ".webm" => "video/webm",
35
+ ".wmv" => "video/x-ms-wmv",
36
+ ".3gp" => "video/3gpp",
37
+
38
+ # Audio
39
+ ".aac" => "audio/aac",
40
+ ".flac" => "audio/flac",
41
+ ".mp3" => "audio/mpeg",
42
+ ".m4a" => "audio/mp4",
43
+ ".mpga" => "audio/mpeg",
44
+ ".opus" => "audio/opus",
45
+ ".pcm" => "audio/L16",
46
+ ".wav" => "audio/wav",
47
+ ".weba" => "audio/webm",
48
+
49
+ # Documents
50
+ ".pdf" => "application/pdf",
51
+ ".txt" => "text/plain"
52
+ }
53
+ end
54
+ end
@@ -0,0 +1,100 @@
1
+ # encoding: ascii-8bit
2
+ # frozen_string_literal: true
3
+
4
+ ##
5
+ # @private
6
+ class LLM::Multipart
7
+ require "llm"
8
+ require "securerandom"
9
+
10
+ ##
11
+ # @return [String]
12
+ attr_reader :boundary
13
+
14
+ ##
15
+ # @param [Hash] params
16
+ # Request parameters
17
+ # @return [LLM::Multipart]
18
+ def initialize(params)
19
+ @boundary = "BOUNDARY__#{SecureRandom.hex(16)}"
20
+ @params = params
21
+ end
22
+
23
+ ##
24
+ # Returns the multipart content type
25
+ # @return [String]
26
+ def content_type
27
+ "multipart/form-data; boundary=#{@boundary}"
28
+ end
29
+
30
+ ##
31
+ # Returns the multipart request body parts
32
+ # @return [Array<String>]
33
+ def parts
34
+ params.map do |key, value|
35
+ locals = {key: key.to_s.b, boundary: boundary.to_s.b}
36
+ if value.respond_to?(:path)
37
+ file_part(key, value, locals)
38
+ else
39
+ data_part(key, value, locals)
40
+ end
41
+ end
42
+ end
43
+
44
+ ##
45
+ # Returns the multipart request body
46
+ # @return [String]
47
+ def body
48
+ io = StringIO.new("".b)
49
+ [*parts, StringIO.new("--#{@boundary}--\r\n".b)].each { IO.copy_stream(_1.tap(&:rewind), io) }
50
+ io.tap(&:rewind)
51
+ end
52
+
53
+ private
54
+
55
+ attr_reader :params
56
+
57
+ def attributes(file)
58
+ {
59
+ filename: File.basename(file.path).b,
60
+ content_type: LLM::Mime[file].b
61
+ }
62
+ end
63
+
64
+ def multipart_header(type:, locals:)
65
+ if type == :file
66
+ str = StringIO.new("".b)
67
+ str << "--#{locals[:boundary]}" \
68
+ "\r\n" \
69
+ "Content-Disposition: form-data; name=\"#{locals[:key]}\";" \
70
+ "filename=\"#{locals[:filename]}\"" \
71
+ "\r\n" \
72
+ "Content-Type: #{locals[:content_type]}" \
73
+ "\r\n\r\n"
74
+ elsif type == :data
75
+ str = StringIO.new("".b)
76
+ str << "--#{locals[:boundary]}" \
77
+ "\r\n" \
78
+ "Content-Disposition: form-data; name=\"#{locals[:key]}\"" \
79
+ "\r\n\r\n"
80
+ else
81
+ raise "unknown type: #{type}"
82
+ end
83
+ end
84
+
85
+ def file_part(key, file, locals)
86
+ locals = locals.merge(attributes(file))
87
+ multipart_header(type: :file, locals:).tap do |io|
88
+ IO.copy_stream(file.path, io)
89
+ io << "\r\n"
90
+ end
91
+ end
92
+
93
+ def data_part(key, value, locals)
94
+ locals = locals.merge(value:)
95
+ multipart_header(type: :data, locals:).tap do |io|
96
+ io << value.to_s
97
+ io << "\r\n"
98
+ end
99
+ end
100
+ end
data/lib/llm/provider.rb CHANGED
@@ -15,8 +15,7 @@
15
15
  # @see LLM::Provider::Gemini
16
16
  # @see LLM::Provider::Ollama
17
17
  class LLM::Provider
18
- require_relative "http_client"
19
- include LLM::HTTPClient
18
+ require "net/http"
20
19
 
21
20
  ##
22
21
  # @param [String] secret
@@ -44,62 +43,139 @@ class LLM::Provider
44
43
  end
45
44
 
46
45
  ##
46
+ # Provides an embedding
47
47
  # @param [String, Array<String>] input
48
48
  # The input to embed
49
+ # @param [String] model
50
+ # The embedding model to use
51
+ # @param [Hash] params
52
+ # Other embedding parameters
49
53
  # @raise [NotImplementedError]
50
54
  # When the method is not implemented by a subclass
51
55
  # @return [LLM::Response::Embedding]
52
- def embed(input, **params)
56
+ def embed(input, model:, **params)
53
57
  raise NotImplementedError
54
58
  end
55
59
 
56
60
  ##
57
- # Completes a given prompt using the LLM
61
+ # Provides an interface to the chat completions API
58
62
  # @example
59
63
  # llm = LLM.openai(ENV["KEY"])
60
- # context = [
61
- # {role: "system", content: "Answer all of my questions"},
62
- # {role: "system", content: "Your name is Pablo, you are 25 years old and you are my amigo"},
64
+ # messages = [
65
+ # {role: "system", content: "Your task is to answer all of my questions"},
66
+ # {role: "system", content: "Your answers should be short and concise"},
63
67
  # ]
64
- # res = llm.complete "What is your name and what age are you?", :user, messages: context
68
+ # res = llm.complete("Hello. What is the answer to 5 + 2 ?", :user, messages:)
65
69
  # print "[#{res.choices[0].role}]", res.choices[0].content, "\n"
66
70
  # @param [String] prompt
67
71
  # The input prompt to be completed
68
72
  # @param [Symbol] role
69
73
  # The role of the prompt (e.g. :user, :system)
70
- # @param [Array<Hash, LLM::Message>] messages
71
- # The messages to include in the completion
74
+ # @param [String] model
75
+ # The model to use for the completion
76
+ # @param [Hash] params
77
+ # Other completion parameters
72
78
  # @raise [NotImplementedError]
73
79
  # When the method is not implemented by a subclass
74
80
  # @return [LLM::Response::Completion]
75
- def complete(prompt, role = :user, **params)
81
+ def complete(prompt, role = :user, model:, **params)
76
82
  raise NotImplementedError
77
83
  end
78
84
 
79
85
  ##
80
- # Starts a new lazy conversation
86
+ # Starts a new lazy chat powered by the chat completions API
87
+ # @note
88
+ # This method creates a lazy version of a
89
+ # {LLM::Chat LLM::Chat} object.
90
+ # @param prompt (see LLM::Provider#complete)
91
+ # @param role (see LLM::Provider#complete)
92
+ # @param model (see LLM::Provider#complete)
93
+ # @param [Hash] params
94
+ # Other completion parameters to maintain throughout a chat
95
+ # @raise (see LLM::Provider#complete)
96
+ # @return [LLM::Chat]
97
+ def chat(prompt, role = :user, model: nil, **params)
98
+ LLM::Chat.new(self, params).lazy.chat(prompt, role)
99
+ end
100
+
101
+ ##
102
+ # Starts a new chat powered by the chat completions API
103
+ # @note
104
+ # This method creates a non-lazy version of a
105
+ # {LLM::Chat LLM::Chat} object.
106
+ # @param prompt (see LLM::Provider#complete)
107
+ # @param role (see LLM::Provider#complete)
108
+ # @param model (see LLM::Provider#complete)
109
+ # @param [Hash] params
110
+ # Other completion parameters to maintain throughout a chat
111
+ # @raise (see LLM::Provider#complete)
112
+ # @return [LLM::Chat]
113
+ def chat!(prompt, role = :user, model: nil, **params)
114
+ LLM::Chat.new(self, params).chat(prompt, role)
115
+ end
116
+
117
+ ##
118
+ # Starts a new lazy chat powered by the responses API
81
119
  # @note
82
120
  # This method creates a lazy variant of a
83
- # {LLM::Conversation LLM::Conversation} object.
121
+ # {LLM::Chat LLM::Chat} object.
84
122
  # @param prompt (see LLM::Provider#complete)
85
123
  # @param role (see LLM::Provider#complete)
124
+ # @param model (see LLM::Provider#complete)
125
+ # @param [Hash] params
126
+ # Other completion parameters to maintain throughout a chat
86
127
  # @raise (see LLM::Provider#complete)
87
- # @return [LLM::LazyConversation]
88
- def chat(prompt, role = :user, **params)
89
- LLM::Conversation.new(self, params).lazy.chat(prompt, role)
128
+ # @return [LLM::Chat]
129
+ def respond(prompt, role = :user, model: nil, **params)
130
+ LLM::Chat.new(self, params).lazy.respond(prompt, role)
90
131
  end
91
132
 
92
133
  ##
93
- # Starts a new conversation
134
+ # Starts a new chat powered by the responses API
94
135
  # @note
95
136
  # This method creates a non-lazy variant of a
96
- # {LLM::Conversation LLM::Conversation} object.
137
+ # {LLM::Chat LLM::Chat} object.
97
138
  # @param prompt (see LLM::Provider#complete)
98
139
  # @param role (see LLM::Provider#complete)
140
+ # @param model (see LLM::Provider#complete)
141
+ # @param [Hash] params
142
+ # Other completion parameters to maintain throughout a chat
99
143
  # @raise (see LLM::Provider#complete)
100
- # @return [LLM::Conversation]
101
- def chat!(prompt, role = :user, **params)
102
- LLM::Conversation.new(self, params).chat(prompt, role)
144
+ # @return [LLM::Chat]
145
+ def respond!(prompt, role = :user, model: nil, **params)
146
+ LLM::Chat.new(self, params).respond(prompt, role)
147
+ end
148
+
149
+ ##
150
+ # @note
151
+ # Compared to the chat completions API, the responses API
152
+ # can require less bandwidth on each turn, maintain state
153
+ # server-side, and produce faster responses.
154
+ # @return [LLM::OpenAI::Responses]
155
+ # Returns an interface to the responses API
156
+ def responses
157
+ raise NotImplementedError
158
+ end
159
+
160
+ ##
161
+ # @return [LLM::OpenAI::Images, LLM::Gemini::Images]
162
+ # Returns an interface to the images API
163
+ def images
164
+ raise NotImplementedError
165
+ end
166
+
167
+ ##
168
+ # @return [LLM::OpenAI::Audio]
169
+ # Returns an interface to the audio API
170
+ def audio
171
+ raise NotImplementedError
172
+ end
173
+
174
+ ##
175
+ # @return [LLM::OpenAI::Files]
176
+ # Returns an interface to the files API
177
+ def files
178
+ raise NotImplementedError
103
179
  end
104
180
 
105
181
  ##
@@ -145,6 +221,32 @@ class LLM::Provider
145
221
  raise NotImplementedError
146
222
  end
147
223
 
224
+ ##
225
+ # Initiates a HTTP request
226
+ # @param [Net::HTTP] http
227
+ # The HTTP object to use for the request
228
+ # @param [Net::HTTPRequest] req
229
+ # The request to send
230
+ # @param [Proc] b
231
+ # A block to yield the response to (optional)
232
+ # @return [Net::HTTPResponse]
233
+ # The response from the server
234
+ # @raise [LLM::Error::Unauthorized]
235
+ # When authentication fails
236
+ # @raise [LLM::Error::RateLimit]
237
+ # When the rate limit is exceeded
238
+ # @raise [LLM::Error::ResponseError]
239
+ # When any other unsuccessful status code is returned
240
+ # @raise [SystemCallError]
241
+ # When there is a network error at the operating system level
242
+ def request(http, req, &b)
243
+ res = http.request(req, &b)
244
+ case res
245
+ when Net::HTTPOK then res
246
+ else error_handler.new(res).raise_error!
247
+ end
248
+ end
249
+
148
250
  ##
149
251
  # @param [String] provider
150
252
  # The name of the provider
@@ -1,6 +1,8 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  class LLM::Anthropic
4
+ ##
5
+ # @private
4
6
  class ErrorHandler
5
7
  ##
6
8
  # @return [Net::HTTPResponse]
@@ -25,7 +27,7 @@ class LLM::Anthropic
25
27
  when Net::HTTPTooManyRequests
26
28
  raise LLM::Error::RateLimit.new { _1.response = res }, "Too many requests"
27
29
  else
28
- raise LLM::Error::BadResponse.new { _1.response = res }, "Unexpected response"
30
+ raise LLM::Error::ResponseError.new { _1.response = res }, "Unexpected response"
29
31
  end
30
32
  end
31
33
  end
@@ -1,6 +1,8 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  class LLM::Anthropic
4
+ ##
5
+ # @private
4
6
  module Format
5
7
  ##
6
8
  # @param [Array<LLM::Message>] messages