llm.rb 0.3.0 → 0.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -31,7 +31,7 @@ class LLM::OpenAI
31
31
  # @param [String] model The model to use
32
32
  # @param [String] response_format The response format
33
33
  # @param [Hash] params Other parameters (see OpenAI docs)
34
- # @raise (see LLM::HTTPClient#request)
34
+ # @raise (see LLM::Provider#request)
35
35
  # @return [LLM::Response::Audio]
36
36
  def create_speech(input:, voice: "alloy", model: "gpt-4o-mini-tts", response_format: "mp3", **params)
37
37
  req = Net::HTTP::Post.new("/v1/audio/speech", headers)
@@ -51,13 +51,13 @@ class LLM::OpenAI
51
51
  # @param [LLM::File] file The input audio
52
52
  # @param [String] model The model to use
53
53
  # @param [Hash] params Other parameters (see OpenAI docs)
54
- # @raise (see LLM::HTTPClient#request)
54
+ # @raise (see LLM::Provider#request)
55
55
  # @return [LLM::Response::AudioTranscription]
56
56
  def create_transcription(file:, model: "whisper-1", **params)
57
57
  multi = LLM::Multipart.new(params.merge!(file:, model:))
58
58
  req = Net::HTTP::Post.new("/v1/audio/transcriptions", headers)
59
59
  req["content-type"] = multi.content_type
60
- req.body = multi.body
60
+ set_body_stream(req, multi.body)
61
61
  res = request(http, req)
62
62
  LLM::Response::AudioTranscription.new(res).tap { _1.text = _1.body["text"] }
63
63
  end
@@ -73,13 +73,13 @@ class LLM::OpenAI
73
73
  # @param [LLM::File] file The input audio
74
74
  # @param [String] model The model to use
75
75
  # @param [Hash] params Other parameters (see OpenAI docs)
76
- # @raise (see LLM::HTTPClient#request)
76
+ # @raise (see LLM::Provider#request)
77
77
  # @return [LLM::Response::AudioTranslation]
78
78
  def create_translation(file:, model: "whisper-1", **params)
79
79
  multi = LLM::Multipart.new(params.merge!(file:, model:))
80
80
  req = Net::HTTP::Post.new("/v1/audio/translations", headers)
81
81
  req["content-type"] = multi.content_type
82
- req.body = multi.body
82
+ set_body_stream(req, multi.body)
83
83
  res = request(http, req)
84
84
  LLM::Response::AudioTranslation.new(res).tap { _1.text = _1.body["text"] }
85
85
  end
@@ -90,7 +90,7 @@ class LLM::OpenAI
90
90
  @provider.instance_variable_get(:@http)
91
91
  end
92
92
 
93
- [:headers, :request].each do |m|
93
+ [:headers, :request, :set_body_stream].each do |m|
94
94
  define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
95
95
  end
96
96
  end
@@ -27,7 +27,7 @@ class LLM::OpenAI
27
27
  when Net::HTTPTooManyRequests
28
28
  raise LLM::Error::RateLimit.new { _1.response = res }, "Too many requests"
29
29
  else
30
- raise LLM::Error::BadResponse.new { _1.response = res }, "Unexpected response"
30
+ raise LLM::Error::ResponseError.new { _1.response = res }, "Unexpected response"
31
31
  end
32
32
  end
33
33
  end
@@ -46,7 +46,7 @@ class LLM::OpenAI
46
46
  # end
47
47
  # @see https://platform.openai.com/docs/api-reference/files/list OpenAI docs
48
48
  # @param [Hash] params Other parameters (see OpenAI docs)
49
- # @raise (see LLM::HTTPClient#request)
49
+ # @raise (see LLM::Provider#request)
50
50
  # @return [LLM::Response::FileList]
51
51
  def all(**params)
52
52
  query = URI.encode_www_form(params)
@@ -67,13 +67,13 @@ class LLM::OpenAI
67
67
  # @param [File] file The file
68
68
  # @param [String] purpose The purpose of the file (see OpenAI docs)
69
69
  # @param [Hash] params Other parameters (see OpenAI docs)
70
- # @raise (see LLM::HTTPClient#request)
70
+ # @raise (see LLM::Provider#request)
71
71
  # @return [LLM::Response::File]
72
72
  def create(file:, purpose: "assistants", **params)
73
73
  multi = LLM::Multipart.new(params.merge!(file:, purpose:))
74
74
  req = Net::HTTP::Post.new("/v1/files", headers)
75
75
  req["content-type"] = multi.content_type
76
- req.body = multi.body
76
+ set_body_stream(req, multi.body)
77
77
  res = request(http, req)
78
78
  LLM::Response::File.new(res)
79
79
  end
@@ -87,7 +87,7 @@ class LLM::OpenAI
87
87
  # @see https://platform.openai.com/docs/api-reference/files/get OpenAI docs
88
88
  # @param [#id, #to_s] file The file ID
89
89
  # @param [Hash] params Other parameters (see OpenAI docs)
90
- # @raise (see LLM::HTTPClient#request)
90
+ # @raise (see LLM::Provider#request)
91
91
  # @return [LLM::Response::File]
92
92
  def get(file:, **params)
93
93
  file_id = file.respond_to?(:id) ? file.id : file
@@ -107,7 +107,7 @@ class LLM::OpenAI
107
107
  # @see https://platform.openai.com/docs/api-reference/files/content OpenAI docs
108
108
  # @param [#id, #to_s] file The file ID
109
109
  # @param [Hash] params Other parameters (see OpenAI docs)
110
- # @raise (see LLM::HTTPClient#request)
110
+ # @raise (see LLM::Provider#request)
111
111
  # @return [LLM::Response::DownloadFile]
112
112
  def download(file:, **params)
113
113
  query = URI.encode_www_form(params)
@@ -126,7 +126,7 @@ class LLM::OpenAI
126
126
  # print res.deleted, "\n"
127
127
  # @see https://platform.openai.com/docs/api-reference/files/delete OpenAI docs
128
128
  # @param [#id, #to_s] file The file ID
129
- # @raise (see LLM::HTTPClient#request)
129
+ # @raise (see LLM::Provider#request)
130
130
  # @return [OpenStruct] Response body
131
131
  def delete(file:)
132
132
  file_id = file.respond_to?(:id) ? file.id : file
@@ -141,7 +141,7 @@ class LLM::OpenAI
141
141
  @provider.instance_variable_get(:@http)
142
142
  end
143
143
 
144
- [:headers, :request].each do |m|
144
+ [:headers, :request, :set_body_stream].each do |m|
145
145
  define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
146
146
  end
147
147
  end
@@ -29,18 +29,50 @@ class LLM::OpenAI
29
29
  # The formatted content
30
30
  def format_content(content, mode)
31
31
  if mode == :complete
32
- case content
33
- when Array then content.flat_map { format_content(_1, mode) }
34
- when URI then [{type: :image_url, image_url: {url: content.to_s}}]
35
- else [{type: :text, text: content.to_s}]
36
- end
32
+ format_complete(content)
37
33
  elsif mode == :response
38
- case content
39
- when Array then content.flat_map { format_content(_1, mode) }
40
- when URI then [{type: :image_url, image_url: {url: content.to_s}}]
41
- when LLM::Response::File then [{type: :input_file, file_id: content.id}]
42
- else [{type: :input_text, text: content.to_s}]
34
+ format_response(content)
35
+ end
36
+ end
37
+
38
+ def format_complete(content)
39
+ case content
40
+ when Array
41
+ content.flat_map { format_complete(_1) }
42
+ when URI
43
+ [{type: :image_url, image_url: {url: content.to_s}}]
44
+ when LLM::File
45
+ [{type: :image_url, image_url: {url: content.to_data_uri}}]
46
+ when LLM::Response::File
47
+ [{type: :file, file: {file_id: content.id}}]
48
+ when String
49
+ [{type: :text, text: content.to_s}]
50
+ when LLM::Message
51
+ format_complete(content.content)
52
+ else
53
+ raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
54
+ "is not supported by the OpenAI chat completions API"
55
+ end
56
+ end
57
+
58
+ def format_response(content)
59
+ case content
60
+ when Array
61
+ content.flat_map { format_response(_1) }
62
+ when LLM::Response::File
63
+ file = LLM::File(content.filename)
64
+ if file.image?
65
+ [{type: :input_image, file_id: content.id}]
66
+ else
67
+ [{type: :input_file, file_id: content.id}]
43
68
  end
69
+ when String
70
+ [{type: :input_text, text: content.to_s}]
71
+ when LLM::Message
72
+ format_response(content.content)
73
+ else
74
+ raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
75
+ "is not supported by the OpenAI responses API"
44
76
  end
45
77
  end
46
78
  end
@@ -44,7 +44,7 @@ class LLM::OpenAI
44
44
  # @param [String] prompt The prompt
45
45
  # @param [String] model The model to use
46
46
  # @param [Hash] params Other parameters (see OpenAI docs)
47
- # @raise (see LLM::HTTPClient#request)
47
+ # @raise (see LLM::Provider#request)
48
48
  # @return [LLM::Response::Image]
49
49
  def create(prompt:, model: "dall-e-3", **params)
50
50
  req = Net::HTTP::Post.new("/v1/images/generations", headers)
@@ -63,13 +63,13 @@ class LLM::OpenAI
63
63
  # @param [File] image The image to create variations from
64
64
  # @param [String] model The model to use
65
65
  # @param [Hash] params Other parameters (see OpenAI docs)
66
- # @raise (see LLM::HTTPClient#request)
66
+ # @raise (see LLM::Provider#request)
67
67
  # @return [LLM::Response::Image]
68
68
  def create_variation(image:, model: "dall-e-2", **params)
69
69
  multi = LLM::Multipart.new(params.merge!(image:, model:))
70
70
  req = Net::HTTP::Post.new("/v1/images/variations", headers)
71
71
  req["content-type"] = multi.content_type
72
- req.body = multi.body
72
+ set_body_stream(req, multi.body)
73
73
  res = request(http, req)
74
74
  LLM::Response::Image.new(res).extend(response_parser)
75
75
  end
@@ -85,13 +85,13 @@ class LLM::OpenAI
85
85
  # @param [String] prompt The prompt
86
86
  # @param [String] model The model to use
87
87
  # @param [Hash] params Other parameters (see OpenAI docs)
88
- # @raise (see LLM::HTTPClient#request)
88
+ # @raise (see LLM::Provider#request)
89
89
  # @return [LLM::Response::Image]
90
90
  def edit(image:, prompt:, model: "dall-e-2", **params)
91
91
  multi = LLM::Multipart.new(params.merge!(image:, prompt:, model:))
92
92
  req = Net::HTTP::Post.new("/v1/images/edits", headers)
93
93
  req["content-type"] = multi.content_type
94
- req.body = multi.body
94
+ set_body_stream(req, multi.body)
95
95
  res = request(http, req)
96
96
  LLM::Response::Image.new(res).extend(response_parser)
97
97
  end
@@ -102,7 +102,7 @@ class LLM::OpenAI
102
102
  @provider.instance_variable_get(:@http)
103
103
  end
104
104
 
105
- [:response_parser, :headers, :request].each do |m|
105
+ [:response_parser, :headers, :request, :set_body_stream].each do |m|
106
106
  define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
107
107
  end
108
108
  end
@@ -4,11 +4,32 @@ class LLM::OpenAI
4
4
  ##
5
5
  # The {LLM::OpenAI::Responses LLM::OpenAI::Responses} class provides a responses
6
6
  # object for interacting with [OpenAI's response API](https://platform.openai.com/docs/guides/conversation-state?api-mode=responses).
7
+ # The responses API is similar to the chat completions API but it can maintain
8
+ # conversation state across multiple requests. This is useful when you want to
9
+ # save bandwidth and/or not maintain the message thread by yourself.
10
+ #
7
11
  # @example
12
+ # #!/usr/bin/env ruby
13
+ # require "llm"
14
+ #
8
15
  # llm = LLM.openai(ENV["KEY"])
9
16
  # res1 = llm.responses.create "Your task is to help me with math", :developer
10
17
  # res2 = llm.responses.create "5 + 5 = ?", :user, previous_response_id: res1.id
11
18
  # [res1,res2].each { llm.responses.delete(_1) }
19
+ # @example
20
+ # #!/usr/bin/env ruby
21
+ # require "llm"
22
+ #
23
+ # llm = LLM.openai(ENV["KEY"])
24
+ # file = llm.files.create file: LLM::File("/images/hat.png")
25
+ # res = llm.responses.create ["Describe the image", file]
26
+ # @example
27
+ # #!/usr/bin/env ruby
28
+ # require "llm"
29
+ #
30
+ # llm = LLM.openai(ENV["KEY"])
31
+ # file = llm.files.create file: LLM::File("/documents/freebsd.pdf")
32
+ # res = llm.responses.create ["Describe the document, file]
12
33
  class Responses
13
34
  include Format
14
35
 
@@ -27,7 +48,7 @@ class LLM::OpenAI
27
48
  # @param role (see LLM::Provider#complete)
28
49
  # @param model (see LLM::Provider#complete)
29
50
  # @param [Hash] params Response params
30
- # @raise (see LLM::HTTPClient#request)
51
+ # @raise (see LLM::Provider#request)
31
52
  # @return [LLM::Response::Output]
32
53
  def create(prompt, role = :user, model: "gpt-4o-mini", **params)
33
54
  params = {model:}.merge!(params)
@@ -42,7 +63,7 @@ class LLM::OpenAI
42
63
  # Get a response
43
64
  # @see https://platform.openai.com/docs/api-reference/responses/get OpenAI docs
44
65
  # @param [#id, #to_s] response Response ID
45
- # @raise (see LLM::HTTPClient#request)
66
+ # @raise (see LLM::Provider#request)
46
67
  # @return [LLM::Response::Output]
47
68
  def get(response, **params)
48
69
  response_id = response.respond_to?(:id) ? response.id : response
@@ -56,7 +77,7 @@ class LLM::OpenAI
56
77
  # Deletes a response
57
78
  # @see https://platform.openai.com/docs/api-reference/responses/delete OpenAI docs
58
79
  # @param [#id, #to_s] response Response ID
59
- # @raise (see LLM::HTTPClient#request)
80
+ # @raise (see LLM::Provider#request)
60
81
  # @return [OpenStruct] Response body
61
82
  def delete(response)
62
83
  response_id = response.respond_to?(:id) ? response.id : response
@@ -28,7 +28,7 @@ module LLM
28
28
  # @param input (see LLM::Provider#embed)
29
29
  # @param model (see LLM::Provider#embed)
30
30
  # @param params (see LLM::Provider#embed)
31
- # @raise (see LLM::HTTPClient#request)
31
+ # @raise (see LLM::Provider#request)
32
32
  # @return (see LLM::Provider#embed)
33
33
  def embed(input, model: "text-embedding-3-small", **params)
34
34
  req = Net::HTTP::Post.new("/v1/embeddings", headers)
@@ -45,7 +45,7 @@ module LLM
45
45
  # @param model (see LLM::Provider#complete)
46
46
  # @param params (see LLM::Provider#complete)
47
47
  # @example (see LLM::Provider#complete)
48
- # @raise (see LLM::HTTPClient#request)
48
+ # @raise (see LLM::Provider#request)
49
49
  # @return (see LLM::Provider#complete)
50
50
  def complete(prompt, role = :user, model: "gpt-4o-mini", **params)
51
51
  params = {model:}.merge!(params)
@@ -25,7 +25,7 @@ class LLM::VoyageAI
25
25
  when Net::HTTPTooManyRequests
26
26
  raise LLM::Error::RateLimit.new { _1.response = res }, "Too many requests"
27
27
  else
28
- raise LLM::Error::BadResponse.new { _1.response = res }, "Unexpected response"
28
+ raise LLM::Error::ResponseError.new { _1.response = res }, "Unexpected response"
29
29
  end
30
30
  end
31
31
  end
data/lib/llm/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LLM
4
- VERSION = "0.3.0"
4
+ VERSION = "0.3.2"
5
5
  end
@@ -66,7 +66,7 @@ RSpec.describe "LLM::Gemini::Files" do
66
66
  end
67
67
 
68
68
  it "translates the audio clip" do
69
- is_expected.to eq("In the name of God, the Most Gracious, the Most Merciful.\n")
69
+ is_expected.to eq("In the name of Allah, the Most Gracious, the Most Merciful.\n")
70
70
  end
71
71
  end
72
72
 
@@ -86,7 +86,7 @@ RSpec.describe "LLM::Gemini::Files" do
86
86
  end
87
87
 
88
88
  it "translates the audio clip" do
89
- is_expected.to eq("All praise is due to Allah, Lord of the Worlds.\n")
89
+ is_expected.to eq("All praise is due to Allah, Lord of the worlds.\n")
90
90
  end
91
91
  end
92
92
 
@@ -60,7 +60,12 @@ end
60
60
  RSpec.describe "LLM::Chat: lazy" do
61
61
  let(:described_class) { LLM::Chat }
62
62
  let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
63
- let(:prompt) { "Keep your answers short and concise, and provide three answers to the three questions" }
63
+ let(:prompt) do
64
+ "Keep your answers short and concise, and provide three answers to the three questions" \
65
+ "There should be one answer per line" \
66
+ "An answer should be a number, for example: 5" \
67
+ "Nothing else"
68
+ end
64
69
 
65
70
  context "when given completions" do
66
71
  context "with gemini",
@@ -105,7 +110,7 @@ RSpec.describe "LLM::Chat: lazy" do
105
110
  it "maintains a conversation" do
106
111
  is_expected.to have_attributes(
107
112
  role: "assistant",
108
- content: "1. 5 \n2. 10 \n3. 12 "
113
+ content: %r|5\s*\n10\s*\n12\s*|
109
114
  )
110
115
  end
111
116
  end
@@ -167,7 +172,7 @@ RSpec.describe "LLM::Chat: lazy" do
167
172
  it "maintains a conversation" do
168
173
  is_expected.to have_attributes(
169
174
  role: "assistant",
170
- content: "1. 3 + 2 = 5 \n2. 5 + 5 = 10 \n3. 5 + 7 = 12"
175
+ content: %r|5\s*\n10\s*\n12\s*|
171
176
  )
172
177
  end
173
178
  end
@@ -47,7 +47,8 @@ RSpec.describe "LLM::OpenAI: completions" do
47
47
  subject(:response) do
48
48
  openai.complete "What is your name? What age are you?", :user, messages: [
49
49
  {role: "system", content: "Answer all of my questions"},
50
- {role: "system", content: "Your name is Pablo, you are 25 years old and you are my amigo"}
50
+ {role: "system", content: "Answer in the format: My name is <name> and I am <age> years old"},
51
+ {role: "system", content: "Your name is Pablo and you are 25 years old"}
51
52
  ]
52
53
  end
53
54
 
@@ -56,7 +57,7 @@ RSpec.describe "LLM::OpenAI: completions" do
56
57
  choices: [
57
58
  have_attributes(
58
59
  role: "assistant",
59
- content: "My name is Pablo, and I'm 25 years old! How can I help you today, amigo?"
60
+ content: %r|\AMy name is Pablo and I am 25 years old|
60
61
  )
61
62
  ]
62
63
  )
@@ -68,7 +69,7 @@ RSpec.describe "LLM::OpenAI: completions" do
68
69
  subject(:response) { openai.complete(URI("/foobar.exe"), :user) }
69
70
 
70
71
  it "raises an error" do
71
- expect { response }.to raise_error(LLM::Error::BadResponse)
72
+ expect { response }.to raise_error(LLM::Error::ResponseError)
72
73
  end
73
74
 
74
75
  it "includes the response" do
@@ -80,7 +81,7 @@ RSpec.describe "LLM::OpenAI: completions" do
80
81
 
81
82
  context "when given an unauthorized response",
82
83
  vcr: {cassette_name: "openai/completions/unauthorized_response"} do
83
- subject(:response) { openai.complete(LLM::Message.new("Hello!", :user)) }
84
+ subject(:response) { openai.complete(LLM::Message.new(:user, "Hello!")) }
84
85
  let(:token) { "BADTOKEN" }
85
86
 
86
87
  it "raises an error" do
@@ -9,10 +9,11 @@ RSpec.describe "LLM::OpenAI::Files" do
9
9
  context "when given a successful create operation (haiku1.txt)",
10
10
  vcr: {cassette_name: "openai/files/successful_create_haiku1"} do
11
11
  subject(:file) { provider.files.create(file: LLM::File("spec/fixtures/documents/haiku1.txt")) }
12
- after { provider.files.delete(file:) }
13
12
 
14
13
  it "is successful" do
15
14
  expect(file).to be_instance_of(LLM::Response::File)
15
+ ensure
16
+ provider.files.delete(file:)
16
17
  end
17
18
 
18
19
  it "returns a file object" do
@@ -21,16 +22,19 @@ RSpec.describe "LLM::OpenAI::Files" do
21
22
  filename: "haiku1.txt",
22
23
  purpose: "assistants"
23
24
  )
25
+ ensure
26
+ provider.files.delete(file:)
24
27
  end
25
28
  end
26
29
 
27
30
  context "when given a successful create operation (haiku2.txt)",
28
31
  vcr: {cassette_name: "openai/files/successful_create_haiku2"} do
29
32
  subject(:file) { provider.files.create(file: LLM::File("spec/fixtures/documents/haiku2.txt")) }
30
- after { provider.files.delete(file:) }
31
33
 
32
34
  it "is successful" do
33
35
  expect(file).to be_instance_of(LLM::Response::File)
36
+ ensure
37
+ provider.files.delete(file:)
34
38
  end
35
39
 
36
40
  it "returns a file object" do
@@ -39,6 +43,8 @@ RSpec.describe "LLM::OpenAI::Files" do
39
43
  filename: "haiku2.txt",
40
44
  purpose: "assistants"
41
45
  )
46
+ ensure
47
+ provider.files.delete(file:)
42
48
  end
43
49
  end
44
50
 
@@ -62,10 +68,11 @@ RSpec.describe "LLM::OpenAI::Files" do
62
68
  vcr: {cassette_name: "openai/files/successful_get_haiku4"} do
63
69
  let(:file) { provider.files.create(file: LLM::File("spec/fixtures/documents/haiku4.txt")) }
64
70
  subject { provider.files.get(file:) }
65
- after { provider.files.delete(file:) }
66
71
 
67
72
  it "is successful" do
68
73
  is_expected.to be_instance_of(LLM::Response::File)
74
+ ensure
75
+ provider.files.delete(file:)
69
76
  end
70
77
 
71
78
  it "returns a file object" do
@@ -74,6 +81,8 @@ RSpec.describe "LLM::OpenAI::Files" do
74
81
  filename: "haiku4.txt",
75
82
  purpose: "assistants"
76
83
  )
84
+ ensure
85
+ provider.files.delete(file:)
77
86
  end
78
87
  end
79
88
 
@@ -85,15 +94,16 @@ RSpec.describe "LLM::OpenAI::Files" do
85
94
  provider.files.create(file: LLM::File("spec/fixtures/documents/haiku2.txt"))
86
95
  ]
87
96
  end
88
- subject(:file) { provider.files.all }
89
- after { files.each { |file| provider.files.delete(file:) } }
97
+ subject(:filelist) { provider.files.all }
90
98
 
91
99
  it "is successful" do
92
- expect(file).to be_instance_of(LLM::Response::FileList)
100
+ expect(filelist).to be_instance_of(LLM::Response::FileList)
101
+ ensure
102
+ files.each { |file| provider.files.delete(file:) }
93
103
  end
94
104
 
95
105
  it "returns an array of file objects" do
96
- expect(file).to match_array(
106
+ expect(filelist.files[0..1]).to match_array(
97
107
  [
98
108
  have_attributes(
99
109
  id: instance_of(String),
@@ -107,44 +117,88 @@ RSpec.describe "LLM::OpenAI::Files" do
107
117
  )
108
118
  ]
109
119
  )
120
+ ensure
121
+ files.each { |file| provider.files.delete(file:) }
110
122
  end
111
123
  end
112
124
 
113
125
  context "when asked to describe the contents of a file",
114
126
  vcr: {cassette_name: "openai/files/describe_freebsd.sysctl.pdf"} do
115
- subject { bot.last_message.content }
127
+ subject { bot.last_message.content.downcase[0..2] }
116
128
  let(:bot) { LLM::Chat.new(provider).lazy }
117
129
  let(:file) { provider.files.create(file: LLM::File("spec/fixtures/documents/freebsd.sysctl.pdf")) }
118
- after { provider.files.delete(file:) }
119
130
 
120
131
  before do
121
132
  bot.respond(file)
122
- bot.respond("Describe the contents of the file to me")
123
- bot.respond("Your summary should be no more than ten words")
133
+ bot.respond("Is this PDF document about FreeBSD?")
134
+ bot.respond("Answer with yes or no. Nothing else.")
124
135
  end
125
136
 
126
137
  it "describes the document" do
127
- is_expected.to eq("FreeBSD system control nodes implementation and usage overview.")
138
+ is_expected.to eq("yes")
139
+ ensure
140
+ provider.files.delete(file:)
128
141
  end
129
142
  end
130
143
 
131
144
  context "when asked to describe the contents of a file",
132
145
  vcr: {cassette_name: "openai/files/describe_freebsd.sysctl_2.pdf"} do
133
- subject { bot.last_message.content }
146
+ subject { bot.last_message.content.downcase[0..2] }
134
147
  let(:bot) { LLM::Chat.new(provider).lazy }
135
148
  let(:file) { provider.files.create(file: LLM::File("spec/fixtures/documents/freebsd.sysctl.pdf")) }
136
- after { provider.files.delete(file:) }
137
149
 
138
150
  before do
139
151
  bot.respond([
140
- "Describe the contents of the file to me",
141
- "Your summary should be no more than ten words",
152
+ "Is this PDF document about FreeBSD?",
153
+ "Answer with yes or no. Nothing else.",
142
154
  file
143
155
  ])
144
156
  end
145
157
 
146
158
  it "describes the document" do
147
- is_expected.to eq("FreeBSD kernel system control nodes overview and implementation.")
159
+ is_expected.to eq("yes")
160
+ ensure
161
+ provider.files.delete(file:)
162
+ end
163
+ end
164
+
165
+ context "when asked to describe the contents of a file",
166
+ vcr: {cassette_name: "openai/files/describe_freebsd.sysctl_3.pdf"} do
167
+ subject { bot.last_message.content.downcase[0..2] }
168
+ let(:bot) { LLM::Chat.new(provider).lazy }
169
+ let(:file) { provider.files.create(file: LLM::File("spec/fixtures/documents/freebsd.sysctl.pdf")) }
170
+
171
+ before do
172
+ bot.chat(file)
173
+ bot.chat("Is this PDF document about FreeBSD?")
174
+ bot.chat("Answer with yes or no. Nothing else.")
175
+ end
176
+
177
+ it "describes the document" do
178
+ is_expected.to eq("yes")
179
+ ensure
180
+ provider.files.delete(file:)
181
+ end
182
+ end
183
+
184
+ context "when asked to describe the contents of a file",
185
+ vcr: {cassette_name: "openai/files/describe_freebsd.sysctl_4.pdf"} do
186
+ subject { bot.last_message.content.downcase[0..2] }
187
+ let(:bot) { LLM::Chat.new(provider).lazy }
188
+ let(:file) { provider.files.create(file: LLM::File("spec/fixtures/documents/freebsd.sysctl.pdf")) }
189
+
190
+ before do
191
+ bot.chat([
192
+ "Is this PDF document about FreeBSD?",
193
+ "Answer with yes or no. Nothing else.",
194
+ file
195
+ ])
196
+ end
197
+
198
+ it "describes the document" do
199
+ is_expected.to eq("yes")
200
+ ensure
201
+ provider.files.delete(file:)
148
202
  end
149
203
  end
150
204
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llm.rb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.3.0
4
+ version: 0.3.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Antar Azri
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2025-04-25 00:00:00.000000000 Z
12
+ date: 2025-04-26 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: webmock
@@ -154,7 +154,6 @@ files:
154
154
  - lib/llm/core_ext/ostruct.rb
155
155
  - lib/llm/error.rb
156
156
  - lib/llm/file.rb
157
- - lib/llm/http_client.rb
158
157
  - lib/llm/message.rb
159
158
  - lib/llm/mime.rb
160
159
  - lib/llm/model.rb
@@ -1,34 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module LLM
4
- ##
5
- # @private
6
- module HTTPClient
7
- require "net/http"
8
- ##
9
- # Initiates a HTTP request
10
- # @param [Net::HTTP] http
11
- # The HTTP object to use for the request
12
- # @param [Net::HTTPRequest] req
13
- # The request to send
14
- # @param [Proc] b
15
- # A block to yield the response to (optional)
16
- # @return [Net::HTTPResponse]
17
- # The response from the server
18
- # @raise [LLM::Error::Unauthorized]
19
- # When authentication fails
20
- # @raise [LLM::Error::RateLimit]
21
- # When the rate limit is exceeded
22
- # @raise [LLM::Error::BadResponse]
23
- # When any other unsuccessful status code is returned
24
- # @raise [SystemCallError]
25
- # When there is a network error at the operating system level
26
- def request(http, req, &b)
27
- res = http.request(req, &b)
28
- case res
29
- when Net::HTTPOK then res
30
- else error_handler.new(res).raise_error!
31
- end
32
- end
33
- end
34
- end