llm.rb 0.2.1 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +264 -110
  3. data/lib/llm/buffer.rb +83 -0
  4. data/lib/llm/chat.rb +131 -0
  5. data/lib/llm/file.rb +26 -40
  6. data/lib/llm/http_client.rb +10 -5
  7. data/lib/llm/message.rb +14 -8
  8. data/lib/llm/mime.rb +54 -0
  9. data/lib/llm/multipart.rb +98 -0
  10. data/lib/llm/provider.rb +96 -19
  11. data/lib/llm/providers/anthropic/error_handler.rb +2 -0
  12. data/lib/llm/providers/anthropic/format.rb +2 -0
  13. data/lib/llm/providers/anthropic/response_parser.rb +3 -1
  14. data/lib/llm/providers/anthropic.rb +14 -5
  15. data/lib/llm/providers/gemini/audio.rb +77 -0
  16. data/lib/llm/providers/gemini/error_handler.rb +2 -0
  17. data/lib/llm/providers/gemini/files.rb +160 -0
  18. data/lib/llm/providers/gemini/format.rb +12 -6
  19. data/lib/llm/providers/gemini/images.rb +99 -0
  20. data/lib/llm/providers/gemini/response_parser.rb +27 -1
  21. data/lib/llm/providers/gemini.rb +62 -6
  22. data/lib/llm/providers/ollama/error_handler.rb +2 -0
  23. data/lib/llm/providers/ollama/format.rb +13 -5
  24. data/lib/llm/providers/ollama/response_parser.rb +3 -1
  25. data/lib/llm/providers/ollama.rb +30 -7
  26. data/lib/llm/providers/openai/audio.rb +97 -0
  27. data/lib/llm/providers/openai/error_handler.rb +2 -0
  28. data/lib/llm/providers/openai/files.rb +148 -0
  29. data/lib/llm/providers/openai/format.rb +21 -8
  30. data/lib/llm/providers/openai/images.rb +109 -0
  31. data/lib/llm/providers/openai/response_parser.rb +58 -5
  32. data/lib/llm/providers/openai/responses.rb +78 -0
  33. data/lib/llm/providers/openai.rb +52 -6
  34. data/lib/llm/providers/voyageai.rb +2 -2
  35. data/lib/llm/response/audio.rb +13 -0
  36. data/lib/llm/response/audio_transcription.rb +14 -0
  37. data/lib/llm/response/audio_translation.rb +14 -0
  38. data/lib/llm/response/download_file.rb +15 -0
  39. data/lib/llm/response/file.rb +42 -0
  40. data/lib/llm/response/filelist.rb +18 -0
  41. data/lib/llm/response/image.rb +29 -0
  42. data/lib/llm/response/output.rb +56 -0
  43. data/lib/llm/response.rb +18 -6
  44. data/lib/llm/utils.rb +19 -0
  45. data/lib/llm/version.rb +1 -1
  46. data/lib/llm.rb +5 -2
  47. data/llm.gemspec +1 -6
  48. data/spec/anthropic/completion_spec.rb +1 -1
  49. data/spec/gemini/completion_spec.rb +1 -1
  50. data/spec/gemini/conversation_spec.rb +31 -0
  51. data/spec/gemini/files_spec.rb +124 -0
  52. data/spec/gemini/images_spec.rb +47 -0
  53. data/spec/llm/conversation_spec.rb +101 -61
  54. data/spec/ollama/completion_spec.rb +1 -1
  55. data/spec/ollama/conversation_spec.rb +31 -0
  56. data/spec/openai/audio_spec.rb +55 -0
  57. data/spec/openai/completion_spec.rb +1 -1
  58. data/spec/openai/files_spec.rb +150 -0
  59. data/spec/openai/images_spec.rb +95 -0
  60. data/spec/openai/responses_spec.rb +51 -0
  61. data/spec/setup.rb +8 -0
  62. metadata +31 -49
  63. data/LICENSE.txt +0 -21
  64. data/lib/llm/conversation.rb +0 -90
  65. data/lib/llm/message_queue.rb +0 -54
@@ -0,0 +1,18 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM
4
+ ##
5
+ # The {LLM::Response::FileList LLM::Response::FileList} class represents a
6
+ # list of file objects that are returned by a provider. It is an Enumerable
7
+ # object, and can be used to iterate over the file objects in a way that is
8
+ # similar to an array. Each element is an instance of OpenStruct.
9
+ class Response::FileList < Response
10
+ include Enumerable
11
+
12
+ attr_accessor :files
13
+
14
+ def each(&)
15
+ @files.each(&)
16
+ end
17
+ end
18
+ end
@@ -0,0 +1,29 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM
4
+ ##
5
+ # The {LLM::Response::Image LLM::Response::Image} class represents
6
+ # an image response. An image response might encapsulate one or more
7
+ # URLs, or a base64 encoded image -- depending on the provider.
8
+ class Response::Image < Response
9
+ ##
10
+ # Returns one or more image objects, or nil
11
+ # @return [Array<OpenStruct>, nil]
12
+ def images
13
+ parsed[:images].any? ? parsed[:images] : nil
14
+ end
15
+
16
+ ##
17
+ # Returns one or more image URLs, or nil
18
+ # @return [Array<String>, nil]
19
+ def urls
20
+ parsed[:urls].any? ? parsed[:urls] : nil
21
+ end
22
+
23
+ private
24
+
25
+ def parsed
26
+ @parsed ||= parse_image(body)
27
+ end
28
+ end
29
+ end
@@ -0,0 +1,56 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM
4
+ class Response::Output < Response
5
+ ##
6
+ # @return [String]
7
+ # Returns the id of the response
8
+ def id
9
+ parsed[:id]
10
+ end
11
+
12
+ ##
13
+ # @return [String]
14
+ # Returns the model name
15
+ def model
16
+ parsed[:model]
17
+ end
18
+
19
+ ##
20
+ # @return [Array<LLM::Message>]
21
+ def outputs
22
+ parsed[:outputs]
23
+ end
24
+
25
+ ##
26
+ # @return [Integer]
27
+ # Returns the input token count
28
+ def input_tokens
29
+ parsed[:input_tokens]
30
+ end
31
+
32
+ ##
33
+ # @return [Integer]
34
+ # Returns the output token count
35
+ def output_tokens
36
+ parsed[:output_tokens]
37
+ end
38
+
39
+ ##
40
+ # @return [Integer]
41
+ # Returns the total count of tokens
42
+ def total_tokens
43
+ parsed[:total_tokens]
44
+ end
45
+
46
+ private
47
+
48
+ ##
49
+ # @private
50
+ # @return [Hash]
51
+ # Returns the parsed response from the provider
52
+ def parsed
53
+ @parsed ||= parse_output_response(body)
54
+ end
55
+ end
56
+ end
data/lib/llm/response.rb CHANGED
@@ -5,11 +5,14 @@ module LLM
5
5
  require "json"
6
6
  require_relative "response/completion"
7
7
  require_relative "response/embedding"
8
-
9
- ##
10
- # @return [Hash]
11
- # Returns the response body
12
- attr_reader :body
8
+ require_relative "response/output"
9
+ require_relative "response/image"
10
+ require_relative "response/audio"
11
+ require_relative "response/audio_transcription"
12
+ require_relative "response/audio_translation"
13
+ require_relative "response/file"
14
+ require_relative "response/filelist"
15
+ require_relative "response/download_file"
13
16
 
14
17
  ##
15
18
  # @param [Net::HTTPResponse] res
@@ -18,7 +21,16 @@ module LLM
18
21
  # Returns an instance of LLM::Response
19
22
  def initialize(res)
20
23
  @res = res
21
- @body = JSON.parse(res.body)
24
+ end
25
+
26
+ ##
27
+ # Returns the response body
28
+ # @return [Hash, String]
29
+ def body
30
+ @body ||= case @res["content-type"]
31
+ when %r|\Aapplication/json\s*| then JSON.parse(@res.body)
32
+ else @res.body
33
+ end
22
34
  end
23
35
  end
24
36
  end
data/lib/llm/utils.rb ADDED
@@ -0,0 +1,19 @@
1
+ # frozen_string_literal: true
2
+
3
+ ##
4
+ # @private
5
+ module LLM::Utils
6
+ def camelcase(key)
7
+ key.to_s
8
+ .split("_")
9
+ .map.with_index { (_2 > 0) ? _1.capitalize : _1 }
10
+ .join
11
+ end
12
+
13
+ def snakecase(key)
14
+ key
15
+ .split(/([A-Z])/)
16
+ .map { (_1.size == 1) ? "_#{_1.downcase}" : _1 }
17
+ .join
18
+ end
19
+ end
data/lib/llm/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LLM
4
- VERSION = "0.2.1"
4
+ VERSION = "0.3.0"
5
5
  end
data/lib/llm.rb CHANGED
@@ -2,14 +2,17 @@
2
2
 
3
3
  module LLM
4
4
  require_relative "llm/version"
5
+ require_relative "llm/utils"
5
6
  require_relative "llm/error"
6
7
  require_relative "llm/message"
7
8
  require_relative "llm/response"
9
+ require_relative "llm/mime"
10
+ require_relative "llm/multipart"
8
11
  require_relative "llm/file"
9
12
  require_relative "llm/model"
10
13
  require_relative "llm/provider"
11
- require_relative "llm/conversation"
12
- require_relative "llm/message_queue"
14
+ require_relative "llm/chat"
15
+ require_relative "llm/buffer"
13
16
  require_relative "llm/core_ext/ostruct"
14
17
 
15
18
  module_function
data/llm.gemspec CHANGED
@@ -14,12 +14,11 @@ Gem::Specification.new do |spec|
14
14
  "flexible, and easy to use."
15
15
  spec.description = spec.summary
16
16
  spec.homepage = "https://github.com/llmrb/llm"
17
- spec.license = "MIT"
17
+ spec.license = "0BSDL"
18
18
  spec.required_ruby_version = ">= 3.0.0"
19
19
 
20
20
  spec.metadata["homepage_uri"] = spec.homepage
21
21
  spec.metadata["source_code_uri"] = "https://github.com/llmrb/llm"
22
- spec.metadata["changelog_uri"] = "https://github.com/llmrb/llm/blob/main/CHANGELOG.md"
23
22
 
24
23
  spec.files = Dir[
25
24
  "README.md", "LICENSE.txt",
@@ -29,10 +28,6 @@ Gem::Specification.new do |spec|
29
28
  ]
30
29
  spec.require_paths = ["lib"]
31
30
 
32
- spec.add_runtime_dependency "net-http", "~> 0.6.0"
33
- spec.add_runtime_dependency "json"
34
- spec.add_runtime_dependency "yaml"
35
-
36
31
  spec.add_development_dependency "webmock", "~> 3.24.0"
37
32
  spec.add_development_dependency "yard", "~> 0.9.37"
38
33
  spec.add_development_dependency "kramdown", "~> 2.4"
@@ -37,7 +37,7 @@ RSpec.describe "LLM::Anthropic: completions" do
37
37
  end
38
38
 
39
39
  it "includes the response" do
40
- expect(choice.extra[:completion]).to eq(response)
40
+ expect(choice.extra[:response]).to eq(response)
41
41
  end
42
42
  end
43
43
  end
@@ -41,7 +41,7 @@ RSpec.describe "LLM::Gemini: completions" do
41
41
  end
42
42
 
43
43
  it "includes the response" do
44
- expect(choice.extra[:completion]).to eq(response)
44
+ expect(choice.extra[:response]).to eq(response)
45
45
  end
46
46
  end
47
47
  end
@@ -0,0 +1,31 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "setup"
4
+
5
+ RSpec.describe "LLM::Chat: gemini" do
6
+ let(:described_class) { LLM::Chat }
7
+ let(:provider) { LLM.gemini(token) }
8
+ let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
9
+ let(:conversation) { described_class.new(provider, **params).lazy }
10
+
11
+ context "when asked to describe an image",
12
+ vcr: {cassette_name: "gemini/conversations/multimodal_response"} do
13
+ subject { conversation.last_message }
14
+
15
+ let(:params) { {} }
16
+ let(:image) { LLM::File("spec/fixtures/images/bluebook.png") }
17
+
18
+ before do
19
+ conversation.chat(image, :user)
20
+ conversation.chat("Describe the image with a short sentance", :user)
21
+ end
22
+
23
+ it "describes the image" do
24
+ is_expected.to have_attributes(
25
+ role: "model",
26
+ content: "That's a simple illustration of a book " \
27
+ "resting on a blue, X-shaped book stand.\n"
28
+ )
29
+ end
30
+ end
31
+ end
@@ -0,0 +1,124 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "setup"
4
+
5
+ RSpec.describe "LLM::Gemini::Files" do
6
+ let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
7
+ let(:provider) { LLM.gemini(token) }
8
+
9
+ context "when given a successful create operation (bismillah.mp3)",
10
+ vcr: {cassette_name: "gemini/files/successful_create_bismillah"} do
11
+ subject(:file) { provider.files.create(file: LLM::File("spec/fixtures/audio/bismillah.mp3")) }
12
+ after { provider.files.delete(file:) }
13
+
14
+ it "is successful" do
15
+ expect(file).to be_instance_of(LLM::Response::File)
16
+ end
17
+
18
+ it "returns a file object" do
19
+ expect(file).to have_attributes(
20
+ name: instance_of(String),
21
+ display_name: "bismillah.mp3"
22
+ )
23
+ end
24
+ end
25
+
26
+ context "when given a successful delete operation (bismillah.mp3)",
27
+ vcr: {cassette_name: "gemini/files/successful_delete_bismillah"} do
28
+ let(:file) { provider.files.create(file: LLM::File("spec/fixtures/audio/bismillah.mp3")) }
29
+ subject { provider.files.delete(file:) }
30
+
31
+ it "is successful" do
32
+ is_expected.to be_instance_of(Net::HTTPOK)
33
+ end
34
+ end
35
+
36
+ context "when given a successful get operation (bismillah.mp3)",
37
+ vcr: {cassette_name: "gemini/files/successful_get_bismillah"} do
38
+ let(:file) { provider.files.create(file: LLM::File("spec/fixtures/audio/bismillah.mp3")) }
39
+ subject { provider.files.get(file:) }
40
+ after { provider.files.delete(file:) }
41
+
42
+ it "is successful" do
43
+ is_expected.to be_instance_of(LLM::Response::File)
44
+ end
45
+
46
+ it "returns a file object" do
47
+ is_expected.to have_attributes(
48
+ name: instance_of(String),
49
+ display_name: "bismillah.mp3"
50
+ )
51
+ end
52
+ end
53
+
54
+ context "when given a successful translation operation (bismillah.mp3)",
55
+ vcr: {cassette_name: "gemini/files/successful_translation_bismillah"} do
56
+ subject { bot.last_message.content }
57
+ let(:file) { provider.files.create(file: LLM::File("spec/fixtures/audio/bismillah.mp3")) }
58
+ let(:bot) { LLM::Chat.new(provider).lazy }
59
+ after { provider.files.delete(file:) }
60
+
61
+ before do
62
+ bot.chat file
63
+ bot.chat "Translate the contents of the audio file into English"
64
+ bot.chat "The audio is referenced in the first message I sent to you"
65
+ bot.chat "Provide no other content except the translation"
66
+ end
67
+
68
+ it "translates the audio clip" do
69
+ is_expected.to eq("In the name of God, the Most Gracious, the Most Merciful.\n")
70
+ end
71
+ end
72
+
73
+ context "when given a successful translation operation (alhamdullilah.mp3)",
74
+ vcr: {cassette_name: "gemini/files/successful_translation_alhamdullilah"} do
75
+ subject { bot.last_message.content }
76
+ let(:file) { provider.files.create(file: LLM::File("spec/fixtures/audio/alhamdullilah.mp3")) }
77
+ let(:bot) { LLM::Chat.new(provider).lazy }
78
+ after { provider.files.delete(file:) }
79
+
80
+ before do
81
+ bot.chat [
82
+ "Translate the contents of the audio file into English",
83
+ "Provide no other content except the translation",
84
+ file
85
+ ]
86
+ end
87
+
88
+ it "translates the audio clip" do
89
+ is_expected.to eq("All praise is due to Allah, Lord of the Worlds.\n")
90
+ end
91
+ end
92
+
93
+ context "when given a successful all operation",
94
+ vcr: {cassette_name: "gemini/files/successful_all"} do
95
+ let!(:files) do
96
+ [
97
+ provider.files.create(file: LLM::File("spec/fixtures/audio/bismillah.mp3")),
98
+ provider.files.create(file: LLM::File("spec/fixtures/audio/alhamdullilah.mp3"))
99
+ ]
100
+ end
101
+
102
+ subject(:response) { provider.files.all }
103
+ after { files.each { |file| provider.files.delete(file:) } }
104
+
105
+ it "is successful" do
106
+ expect(response).to be_instance_of(LLM::Response::FileList)
107
+ end
108
+
109
+ it "returns an array of file objects" do
110
+ expect(response).to match_array(
111
+ [
112
+ have_attributes(
113
+ name: instance_of(String),
114
+ display_name: "bismillah.mp3"
115
+ ),
116
+ have_attributes(
117
+ name: instance_of(String),
118
+ display_name: "alhamdullilah.mp3"
119
+ )
120
+ ]
121
+ )
122
+ end
123
+ end
124
+ end
@@ -0,0 +1,47 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "setup"
4
+
5
+ RSpec.describe "LLM::Gemini::Images" do
6
+ let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
7
+ let(:provider) { LLM.gemini(token) }
8
+
9
+ context "when given a successful create operation",
10
+ vcr: {cassette_name: "gemini/images/successful_create"} do
11
+ subject(:response) { provider.images.create(prompt: "A dog on a rocket to the moon") }
12
+
13
+ it "is successful" do
14
+ expect(response).to be_instance_of(LLM::Response::Image)
15
+ end
16
+
17
+ it "returns an encoded string" do
18
+ expect(response.images[0].encoded).to be_instance_of(String)
19
+ end
20
+
21
+ it "returns a binary string" do
22
+ expect(response.images[0].binary).to be_instance_of(String)
23
+ end
24
+ end
25
+
26
+ context "when given a successful edit operation",
27
+ vcr: {cassette_name: "gemini/images/successful_edit"} do
28
+ subject(:response) do
29
+ provider.images.edit(
30
+ image: LLM::File("spec/fixtures/images/bluebook.png"),
31
+ prompt: "Book is floating in the clouds"
32
+ )
33
+ end
34
+
35
+ it "is successful" do
36
+ expect(response).to be_instance_of(LLM::Response::Image)
37
+ end
38
+
39
+ it "returns data" do
40
+ expect(response.images[0].encoded).to be_instance_of(String)
41
+ end
42
+
43
+ it "returns a url" do
44
+ expect(response.images[0].binary).to be_instance_of(String)
45
+ end
46
+ end
47
+ end
@@ -2,7 +2,7 @@
2
2
 
3
3
  require "setup"
4
4
 
5
- RSpec.describe "LLM::Conversation: non-lazy" do
5
+ RSpec.describe "LLM::Chat: non-lazy" do
6
6
  shared_examples "a multi-turn conversation" do
7
7
  context "when given a thread of messages" do
8
8
  let(:inputs) do
@@ -57,91 +57,131 @@ RSpec.describe "LLM::Conversation: non-lazy" do
57
57
  end
58
58
  end
59
59
 
60
- RSpec.describe "LLM::Conversation: lazy" do
61
- let(:described_class) { LLM::Conversation }
60
+ RSpec.describe "LLM::Chat: lazy" do
61
+ let(:described_class) { LLM::Chat }
62
62
  let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
63
63
  let(:prompt) { "Keep your answers short and concise, and provide three answers to the three questions" }
64
64
 
65
- context "with gemini",
66
- vcr: {cassette_name: "gemini/lazy_conversation/successful_response"} do
67
- let(:provider) { LLM.gemini(token) }
68
- let(:conversation) { described_class.new(provider).lazy }
65
+ context "when given completions" do
66
+ context "with gemini",
67
+ vcr: {cassette_name: "gemini/lazy_conversation/successful_response"} do
68
+ let(:provider) { LLM.gemini(token) }
69
+ let(:conversation) { described_class.new(provider).lazy }
69
70
 
70
- context "when given a thread of messages" do
71
- subject(:message) { conversation.messages.to_a[-1] }
71
+ context "when given a thread of messages" do
72
+ subject(:message) { conversation.messages.to_a[-1] }
72
73
 
73
- before do
74
- conversation.chat prompt
75
- conversation.chat "What is 3+2 ?"
76
- conversation.chat "What is 5+5 ?"
77
- conversation.chat "What is 5+7 ?"
78
- end
74
+ before do
75
+ conversation.chat prompt
76
+ conversation.chat "What is 3+2 ?"
77
+ conversation.chat "What is 5+5 ?"
78
+ conversation.chat "What is 5+7 ?"
79
+ end
79
80
 
80
- it "maintains a conversation" do
81
- is_expected.to have_attributes(
82
- role: "model",
83
- content: "5\n10\n12\n"
84
- )
81
+ it "maintains a conversation" do
82
+ is_expected.to have_attributes(
83
+ role: "model",
84
+ content: "5\n10\n12\n"
85
+ )
86
+ end
85
87
  end
86
88
  end
87
- end
88
89
 
89
- context "with openai" do
90
- let(:provider) { LLM.openai(token) }
91
- let(:conversation) { described_class.new(provider).lazy }
90
+ context "with openai" do
91
+ let(:provider) { LLM.openai(token) }
92
+ let(:conversation) { described_class.new(provider).lazy }
92
93
 
93
- context "when given a thread of messages",
94
- vcr: {cassette_name: "openai/lazy_conversation/successful_response"} do
95
- subject(:message) { conversation.recent_message }
94
+ context "when given a thread of messages",
95
+ vcr: {cassette_name: "openai/lazy_conversation/completions/successful_response"} do
96
+ subject(:message) { conversation.recent_message }
96
97
 
97
- before do
98
- conversation.chat prompt, :system
99
- conversation.chat "What is 3+2 ?"
100
- conversation.chat "What is 5+5 ?"
101
- conversation.chat "What is 5+7 ?"
98
+ before do
99
+ conversation.chat prompt, :system
100
+ conversation.chat "What is 3+2 ?"
101
+ conversation.chat "What is 5+5 ?"
102
+ conversation.chat "What is 5+7 ?"
103
+ end
104
+
105
+ it "maintains a conversation" do
106
+ is_expected.to have_attributes(
107
+ role: "assistant",
108
+ content: "1. 5 \n2. 10 \n3. 12 "
109
+ )
110
+ end
102
111
  end
103
112
 
104
- it "maintains a conversation" do
105
- is_expected.to have_attributes(
106
- role: "assistant",
107
- content: "1. 5 \n2. 10 \n3. 12 "
108
- )
113
+ context "when given a specific model",
114
+ vcr: {cassette_name: "openai/lazy_conversation/completions/successful_response_o3_mini"} do
115
+ let(:conversation) { described_class.new(provider, model: provider.models["o3-mini"]).lazy }
116
+
117
+ it "maintains the model throughout a conversation" do
118
+ conversation.chat(prompt, :system)
119
+ expect(conversation.recent_message.extra[:response].model).to eq("o3-mini-2025-01-31")
120
+ conversation.chat("What is 5+5?")
121
+ expect(conversation.recent_message.extra[:response].model).to eq("o3-mini-2025-01-31")
122
+ end
109
123
  end
110
124
  end
111
125
 
112
- context "when given a specific model",
113
- vcr: {cassette_name: "openai/lazy_conversation/successful_response_o3_mini"} do
114
- let(:conversation) { described_class.new(provider, model: provider.models["o3-mini"]).lazy }
126
+ context "with ollama",
127
+ vcr: {cassette_name: "ollama/lazy_conversation/successful_response"} do
128
+ let(:provider) { LLM.ollama(nil, host: "eel.home.network") }
129
+ let(:conversation) { described_class.new(provider).lazy }
115
130
 
116
- it "maintains the model throughout a conversation" do
117
- conversation.chat(prompt, :system)
118
- expect(conversation.recent_message.extra[:completion].model).to eq("o3-mini-2025-01-31")
119
- conversation.chat("What is 5+5?")
120
- expect(conversation.recent_message.extra[:completion].model).to eq("o3-mini-2025-01-31")
131
+ context "when given a thread of messages" do
132
+ subject(:message) { conversation.recent_message }
133
+
134
+ before do
135
+ conversation.chat prompt, :system
136
+ conversation.chat "What is 3+2 ?"
137
+ conversation.chat "What is 5+5 ?"
138
+ conversation.chat "What is 5+7 ?"
139
+ end
140
+
141
+ it "maintains a conversation" do
142
+ is_expected.to have_attributes(
143
+ role: "assistant",
144
+ content: "Here are the calculations:\n\n1. 3 + 2 = 5\n2. 5 + 5 = 10\n3. 5 + 7 = 12"
145
+ )
146
+ end
121
147
  end
122
148
  end
123
149
  end
124
150
 
125
- context "with ollama",
126
- vcr: {cassette_name: "ollama/lazy_conversation/successful_response"} do
127
- let(:provider) { LLM.ollama(nil, host: "eel.home.network") }
128
- let(:conversation) { described_class.new(provider).lazy }
151
+ context "when given responses" do
152
+ context "with openai" do
153
+ let(:provider) { LLM.openai(token) }
154
+ let(:conversation) { described_class.new(provider).lazy }
129
155
 
130
- context "when given a thread of messages" do
131
- subject(:message) { conversation.recent_message }
156
+ context "when given a thread of messages",
157
+ vcr: {cassette_name: "openai/lazy_conversation/responses/successful_response"} do
158
+ subject(:message) { conversation.recent_message }
132
159
 
133
- before do
134
- conversation.chat prompt, :system
135
- conversation.chat "What is 3+2 ?"
136
- conversation.chat "What is 5+5 ?"
137
- conversation.chat "What is 5+7 ?"
160
+ before do
161
+ conversation.respond prompt, :developer
162
+ conversation.respond "What is 3+2 ?"
163
+ conversation.respond "What is 5+5 ?"
164
+ conversation.respond "What is 5+7 ?"
165
+ end
166
+
167
+ it "maintains a conversation" do
168
+ is_expected.to have_attributes(
169
+ role: "assistant",
170
+ content: "1. 3 + 2 = 5 \n2. 5 + 5 = 10 \n3. 5 + 7 = 12"
171
+ )
172
+ end
138
173
  end
139
174
 
140
- it "maintains a conversation" do
141
- is_expected.to have_attributes(
142
- role: "assistant",
143
- content: "Here are the calculations:\n\n1. 3 + 2 = 5\n2. 5 + 5 = 10\n3. 5 + 7 = 12"
144
- )
175
+ context "when given a specific model",
176
+ vcr: {cassette_name: "openai/lazy_conversation/responses/successful_response_o3_mini"} do
177
+ let(:conversation) { described_class.new(provider, model: provider.models["o3-mini"]).lazy }
178
+
179
+ it "maintains the model throughout a conversation" do
180
+ conversation.respond(prompt, :developer)
181
+ expect(conversation.last_message.extra[:response].model).to eq("o3-mini-2025-01-31")
182
+ conversation.respond("What is 5+5?")
183
+ expect(conversation.last_message.extra[:response].model).to eq("o3-mini-2025-01-31")
184
+ end
145
185
  end
146
186
  end
147
187
  end
@@ -36,7 +36,7 @@ RSpec.describe "LLM::Ollama: completions" do
36
36
  end
37
37
 
38
38
  it "includes the response" do
39
- expect(choice.extra[:completion]).to eq(response)
39
+ expect(choice.extra[:response]).to eq(response)
40
40
  end
41
41
  end
42
42
  end