llm.rb 0.2.0 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +264 -110
  3. data/lib/llm/buffer.rb +83 -0
  4. data/lib/llm/chat.rb +131 -0
  5. data/lib/llm/file.rb +26 -40
  6. data/lib/llm/http_client.rb +10 -5
  7. data/lib/llm/message.rb +14 -8
  8. data/lib/llm/mime.rb +54 -0
  9. data/lib/llm/multipart.rb +98 -0
  10. data/lib/llm/provider.rb +116 -12
  11. data/lib/llm/providers/anthropic/error_handler.rb +2 -0
  12. data/lib/llm/providers/anthropic/format.rb +9 -1
  13. data/lib/llm/providers/anthropic/response_parser.rb +3 -1
  14. data/lib/llm/providers/anthropic.rb +14 -5
  15. data/lib/llm/providers/gemini/audio.rb +77 -0
  16. data/lib/llm/providers/gemini/error_handler.rb +2 -0
  17. data/lib/llm/providers/gemini/files.rb +160 -0
  18. data/lib/llm/providers/gemini/format.rb +19 -7
  19. data/lib/llm/providers/gemini/images.rb +99 -0
  20. data/lib/llm/providers/gemini/response_parser.rb +27 -1
  21. data/lib/llm/providers/gemini.rb +62 -6
  22. data/lib/llm/providers/ollama/error_handler.rb +2 -0
  23. data/lib/llm/providers/ollama/format.rb +18 -4
  24. data/lib/llm/providers/ollama/response_parser.rb +3 -1
  25. data/lib/llm/providers/ollama.rb +30 -7
  26. data/lib/llm/providers/openai/audio.rb +97 -0
  27. data/lib/llm/providers/openai/error_handler.rb +2 -0
  28. data/lib/llm/providers/openai/files.rb +148 -0
  29. data/lib/llm/providers/openai/format.rb +26 -7
  30. data/lib/llm/providers/openai/images.rb +109 -0
  31. data/lib/llm/providers/openai/response_parser.rb +58 -5
  32. data/lib/llm/providers/openai/responses.rb +78 -0
  33. data/lib/llm/providers/openai.rb +52 -6
  34. data/lib/llm/providers/voyageai.rb +2 -2
  35. data/lib/llm/response/audio.rb +13 -0
  36. data/lib/llm/response/audio_transcription.rb +14 -0
  37. data/lib/llm/response/audio_translation.rb +14 -0
  38. data/lib/llm/response/download_file.rb +15 -0
  39. data/lib/llm/response/file.rb +42 -0
  40. data/lib/llm/response/filelist.rb +18 -0
  41. data/lib/llm/response/image.rb +29 -0
  42. data/lib/llm/response/output.rb +56 -0
  43. data/lib/llm/response.rb +18 -6
  44. data/lib/llm/utils.rb +19 -0
  45. data/lib/llm/version.rb +1 -1
  46. data/lib/llm.rb +5 -2
  47. data/llm.gemspec +1 -6
  48. data/spec/anthropic/completion_spec.rb +1 -1
  49. data/spec/gemini/completion_spec.rb +22 -1
  50. data/spec/gemini/conversation_spec.rb +31 -0
  51. data/spec/gemini/files_spec.rb +124 -0
  52. data/spec/gemini/images_spec.rb +47 -0
  53. data/spec/llm/conversation_spec.rb +133 -1
  54. data/spec/ollama/completion_spec.rb +1 -1
  55. data/spec/ollama/conversation_spec.rb +31 -0
  56. data/spec/openai/audio_spec.rb +55 -0
  57. data/spec/openai/completion_spec.rb +22 -1
  58. data/spec/openai/files_spec.rb +150 -0
  59. data/spec/openai/images_spec.rb +95 -0
  60. data/spec/openai/responses_spec.rb +51 -0
  61. data/spec/setup.rb +8 -0
  62. metadata +31 -51
  63. data/LICENSE.txt +0 -21
  64. data/lib/llm/conversation.rb +0 -50
  65. data/lib/llm/lazy_conversation.rb +0 -51
  66. data/lib/llm/message_queue.rb +0 -47
  67. data/spec/llm/lazy_conversation_spec.rb +0 -92
@@ -1,6 +1,8 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- RSpec.describe LLM::Conversation do
3
+ require "setup"
4
+
5
+ RSpec.describe "LLM::Chat: non-lazy" do
4
6
  shared_examples "a multi-turn conversation" do
5
7
  context "when given a thread of messages" do
6
8
  let(:inputs) do
@@ -54,3 +56,133 @@ RSpec.describe LLM::Conversation do
54
56
  include_examples "a multi-turn conversation"
55
57
  end
56
58
  end
59
+
60
+ RSpec.describe "LLM::Chat: lazy" do
61
+ let(:described_class) { LLM::Chat }
62
+ let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
63
+ let(:prompt) { "Keep your answers short and concise, and provide three answers to the three questions" }
64
+
65
+ context "when given completions" do
66
+ context "with gemini",
67
+ vcr: {cassette_name: "gemini/lazy_conversation/successful_response"} do
68
+ let(:provider) { LLM.gemini(token) }
69
+ let(:conversation) { described_class.new(provider).lazy }
70
+
71
+ context "when given a thread of messages" do
72
+ subject(:message) { conversation.messages.to_a[-1] }
73
+
74
+ before do
75
+ conversation.chat prompt
76
+ conversation.chat "What is 3+2 ?"
77
+ conversation.chat "What is 5+5 ?"
78
+ conversation.chat "What is 5+7 ?"
79
+ end
80
+
81
+ it "maintains a conversation" do
82
+ is_expected.to have_attributes(
83
+ role: "model",
84
+ content: "5\n10\n12\n"
85
+ )
86
+ end
87
+ end
88
+ end
89
+
90
+ context "with openai" do
91
+ let(:provider) { LLM.openai(token) }
92
+ let(:conversation) { described_class.new(provider).lazy }
93
+
94
+ context "when given a thread of messages",
95
+ vcr: {cassette_name: "openai/lazy_conversation/completions/successful_response"} do
96
+ subject(:message) { conversation.recent_message }
97
+
98
+ before do
99
+ conversation.chat prompt, :system
100
+ conversation.chat "What is 3+2 ?"
101
+ conversation.chat "What is 5+5 ?"
102
+ conversation.chat "What is 5+7 ?"
103
+ end
104
+
105
+ it "maintains a conversation" do
106
+ is_expected.to have_attributes(
107
+ role: "assistant",
108
+ content: "1. 5 \n2. 10 \n3. 12 "
109
+ )
110
+ end
111
+ end
112
+
113
+ context "when given a specific model",
114
+ vcr: {cassette_name: "openai/lazy_conversation/completions/successful_response_o3_mini"} do
115
+ let(:conversation) { described_class.new(provider, model: provider.models["o3-mini"]).lazy }
116
+
117
+ it "maintains the model throughout a conversation" do
118
+ conversation.chat(prompt, :system)
119
+ expect(conversation.recent_message.extra[:response].model).to eq("o3-mini-2025-01-31")
120
+ conversation.chat("What is 5+5?")
121
+ expect(conversation.recent_message.extra[:response].model).to eq("o3-mini-2025-01-31")
122
+ end
123
+ end
124
+ end
125
+
126
+ context "with ollama",
127
+ vcr: {cassette_name: "ollama/lazy_conversation/successful_response"} do
128
+ let(:provider) { LLM.ollama(nil, host: "eel.home.network") }
129
+ let(:conversation) { described_class.new(provider).lazy }
130
+
131
+ context "when given a thread of messages" do
132
+ subject(:message) { conversation.recent_message }
133
+
134
+ before do
135
+ conversation.chat prompt, :system
136
+ conversation.chat "What is 3+2 ?"
137
+ conversation.chat "What is 5+5 ?"
138
+ conversation.chat "What is 5+7 ?"
139
+ end
140
+
141
+ it "maintains a conversation" do
142
+ is_expected.to have_attributes(
143
+ role: "assistant",
144
+ content: "Here are the calculations:\n\n1. 3 + 2 = 5\n2. 5 + 5 = 10\n3. 5 + 7 = 12"
145
+ )
146
+ end
147
+ end
148
+ end
149
+ end
150
+
151
+ context "when given responses" do
152
+ context "with openai" do
153
+ let(:provider) { LLM.openai(token) }
154
+ let(:conversation) { described_class.new(provider).lazy }
155
+
156
+ context "when given a thread of messages",
157
+ vcr: {cassette_name: "openai/lazy_conversation/responses/successful_response"} do
158
+ subject(:message) { conversation.recent_message }
159
+
160
+ before do
161
+ conversation.respond prompt, :developer
162
+ conversation.respond "What is 3+2 ?"
163
+ conversation.respond "What is 5+5 ?"
164
+ conversation.respond "What is 5+7 ?"
165
+ end
166
+
167
+ it "maintains a conversation" do
168
+ is_expected.to have_attributes(
169
+ role: "assistant",
170
+ content: "1. 3 + 2 = 5 \n2. 5 + 5 = 10 \n3. 5 + 7 = 12"
171
+ )
172
+ end
173
+ end
174
+
175
+ context "when given a specific model",
176
+ vcr: {cassette_name: "openai/lazy_conversation/responses/successful_response_o3_mini"} do
177
+ let(:conversation) { described_class.new(provider, model: provider.models["o3-mini"]).lazy }
178
+
179
+ it "maintains the model throughout a conversation" do
180
+ conversation.respond(prompt, :developer)
181
+ expect(conversation.last_message.extra[:response].model).to eq("o3-mini-2025-01-31")
182
+ conversation.respond("What is 5+5?")
183
+ expect(conversation.last_message.extra[:response].model).to eq("o3-mini-2025-01-31")
184
+ end
185
+ end
186
+ end
187
+ end
188
+ end
@@ -36,7 +36,7 @@ RSpec.describe "LLM::Ollama: completions" do
36
36
  end
37
37
 
38
38
  it "includes the response" do
39
- expect(choice.extra[:completion]).to eq(response)
39
+ expect(choice.extra[:response]).to eq(response)
40
40
  end
41
41
  end
42
42
  end
@@ -0,0 +1,31 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "setup"
4
+
5
+ RSpec.describe "LLM::Chat: ollama" do
6
+ let(:described_class) { LLM::Chat }
7
+ let(:provider) { LLM.ollama(nil, host: "eel.home.network") }
8
+ let(:conversation) { described_class.new(provider, **params).lazy }
9
+
10
+ context "when asked to describe an image",
11
+ vcr: {cassette_name: "ollama/conversations/multimodal_response"} do
12
+ subject { conversation.last_message }
13
+
14
+ let(:params) { {model: "llava"} }
15
+ let(:image) { LLM::File("spec/fixtures/images/bluebook.png") }
16
+
17
+ before do
18
+ conversation.chat(image, :user)
19
+ conversation.chat("Describe the image with a short sentance", :user)
20
+ end
21
+
22
+ it "describes the image" do
23
+ is_expected.to have_attributes(
24
+ role: "assistant",
25
+ content: " The image is a graphic illustration of a book" \
26
+ " with its pages spread out, symbolizing openness" \
27
+ " or knowledge. "
28
+ )
29
+ end
30
+ end
31
+ end
@@ -0,0 +1,55 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "setup"
4
+
5
+ RSpec.describe "LLM::OpenAI::Audio" do
6
+ let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
7
+ let(:provider) { LLM.openai(token) }
8
+
9
+ context "when given a successful create operation",
10
+ vcr: {cassette_name: "openai/audio/successful_create"} do
11
+ subject(:response) { provider.audio.create_speech(input: "A dog on a rocket to the moon") }
12
+
13
+ it "is successful" do
14
+ expect(response).to be_instance_of(LLM::Response::Audio)
15
+ end
16
+
17
+ it "returns an audio" do
18
+ expect(response.audio).to be_instance_of(StringIO)
19
+ end
20
+ end
21
+
22
+ context "when given a successful transcription operation",
23
+ vcr: {cassette_name: "openai/audio/successful_transcription"} do
24
+ subject(:response) do
25
+ provider.audio.create_transcription(
26
+ file: LLM::File("spec/fixtures/audio/rocket.mp3")
27
+ )
28
+ end
29
+
30
+ it "is successful" do
31
+ expect(response).to be_instance_of(LLM::Response::AudioTranscription)
32
+ end
33
+
34
+ it "returns a transcription" do
35
+ expect(response.text).to eq("A dog on a rocket to the moon.")
36
+ end
37
+ end
38
+
39
+ context "when given a successful translation operation",
40
+ vcr: {cassette_name: "openai/audio/successful_translation"} do
41
+ subject(:response) do
42
+ provider.audio.create_translation(
43
+ file: LLM::File("spec/fixtures/audio/bismillah.mp3")
44
+ )
45
+ end
46
+
47
+ it "is successful" do
48
+ expect(response).to be_instance_of(LLM::Response::AudioTranslation)
49
+ end
50
+
51
+ it "returns a translation (Arabic => English)" do
52
+ expect(response.text).to eq("In the name of Allah, the Beneficent, the Merciful.")
53
+ end
54
+ end
55
+ end
@@ -37,11 +37,32 @@ RSpec.describe "LLM::OpenAI: completions" do
37
37
  end
38
38
 
39
39
  it "includes the response" do
40
- expect(choice.extra[:completion]).to eq(response)
40
+ expect(choice.extra[:response]).to eq(response)
41
41
  end
42
42
  end
43
43
  end
44
44
 
45
+ context "when given a thread of messages",
46
+ vcr: {cassette_name: "openai/completions/successful_response_thread"} do
47
+ subject(:response) do
48
+ openai.complete "What is your name? What age are you?", :user, messages: [
49
+ {role: "system", content: "Answer all of my questions"},
50
+ {role: "system", content: "Your name is Pablo, you are 25 years old and you are my amigo"}
51
+ ]
52
+ end
53
+
54
+ it "has choices" do
55
+ expect(response).to have_attributes(
56
+ choices: [
57
+ have_attributes(
58
+ role: "assistant",
59
+ content: "My name is Pablo, and I'm 25 years old! How can I help you today, amigo?"
60
+ )
61
+ ]
62
+ )
63
+ end
64
+ end
65
+
45
66
  context "when given a 'bad request' response",
46
67
  vcr: {cassette_name: "openai/completions/bad_request"} do
47
68
  subject(:response) { openai.complete(URI("/foobar.exe"), :user) }
@@ -0,0 +1,150 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "setup"
4
+
5
+ RSpec.describe "LLM::OpenAI::Files" do
6
+ let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
7
+ let(:provider) { LLM.openai(token) }
8
+
9
+ context "when given a successful create operation (haiku1.txt)",
10
+ vcr: {cassette_name: "openai/files/successful_create_haiku1"} do
11
+ subject(:file) { provider.files.create(file: LLM::File("spec/fixtures/documents/haiku1.txt")) }
12
+ after { provider.files.delete(file:) }
13
+
14
+ it "is successful" do
15
+ expect(file).to be_instance_of(LLM::Response::File)
16
+ end
17
+
18
+ it "returns a file object" do
19
+ expect(file).to have_attributes(
20
+ id: instance_of(String),
21
+ filename: "haiku1.txt",
22
+ purpose: "assistants"
23
+ )
24
+ end
25
+ end
26
+
27
+ context "when given a successful create operation (haiku2.txt)",
28
+ vcr: {cassette_name: "openai/files/successful_create_haiku2"} do
29
+ subject(:file) { provider.files.create(file: LLM::File("spec/fixtures/documents/haiku2.txt")) }
30
+ after { provider.files.delete(file:) }
31
+
32
+ it "is successful" do
33
+ expect(file).to be_instance_of(LLM::Response::File)
34
+ end
35
+
36
+ it "returns a file object" do
37
+ expect(file).to have_attributes(
38
+ id: instance_of(String),
39
+ filename: "haiku2.txt",
40
+ purpose: "assistants"
41
+ )
42
+ end
43
+ end
44
+
45
+ context "when given a successful delete operation (haiku3.txt)",
46
+ vcr: {cassette_name: "openai/files/successful_delete_haiku3"} do
47
+ let(:file) { provider.files.create(file: LLM::File("spec/fixtures/documents/haiku3.txt")) }
48
+ subject { provider.files.delete(file:) }
49
+
50
+ it "is successful" do
51
+ is_expected.to be_instance_of(OpenStruct)
52
+ end
53
+
54
+ it "returns deleted status" do
55
+ is_expected.to have_attributes(
56
+ deleted: true
57
+ )
58
+ end
59
+ end
60
+
61
+ context "when given a successful get operation (haiku4.txt)",
62
+ vcr: {cassette_name: "openai/files/successful_get_haiku4"} do
63
+ let(:file) { provider.files.create(file: LLM::File("spec/fixtures/documents/haiku4.txt")) }
64
+ subject { provider.files.get(file:) }
65
+ after { provider.files.delete(file:) }
66
+
67
+ it "is successful" do
68
+ is_expected.to be_instance_of(LLM::Response::File)
69
+ end
70
+
71
+ it "returns a file object" do
72
+ is_expected.to have_attributes(
73
+ id: instance_of(String),
74
+ filename: "haiku4.txt",
75
+ purpose: "assistants"
76
+ )
77
+ end
78
+ end
79
+
80
+ context "when given a successful all operation",
81
+ vcr: {cassette_name: "openai/files/successful_all"} do
82
+ let!(:files) do
83
+ [
84
+ provider.files.create(file: LLM::File("spec/fixtures/documents/haiku1.txt")),
85
+ provider.files.create(file: LLM::File("spec/fixtures/documents/haiku2.txt"))
86
+ ]
87
+ end
88
+ subject(:file) { provider.files.all }
89
+ after { files.each { |file| provider.files.delete(file:) } }
90
+
91
+ it "is successful" do
92
+ expect(file).to be_instance_of(LLM::Response::FileList)
93
+ end
94
+
95
+ it "returns an array of file objects" do
96
+ expect(file).to match_array(
97
+ [
98
+ have_attributes(
99
+ id: instance_of(String),
100
+ filename: "haiku1.txt",
101
+ purpose: "assistants"
102
+ ),
103
+ have_attributes(
104
+ id: instance_of(String),
105
+ filename: "haiku2.txt",
106
+ purpose: "assistants"
107
+ )
108
+ ]
109
+ )
110
+ end
111
+ end
112
+
113
+ context "when asked to describe the contents of a file",
114
+ vcr: {cassette_name: "openai/files/describe_freebsd.sysctl.pdf"} do
115
+ subject { bot.last_message.content }
116
+ let(:bot) { LLM::Chat.new(provider).lazy }
117
+ let(:file) { provider.files.create(file: LLM::File("spec/fixtures/documents/freebsd.sysctl.pdf")) }
118
+ after { provider.files.delete(file:) }
119
+
120
+ before do
121
+ bot.respond(file)
122
+ bot.respond("Describe the contents of the file to me")
123
+ bot.respond("Your summary should be no more than ten words")
124
+ end
125
+
126
+ it "describes the document" do
127
+ is_expected.to eq("FreeBSD system control nodes implementation and usage overview.")
128
+ end
129
+ end
130
+
131
+ context "when asked to describe the contents of a file",
132
+ vcr: {cassette_name: "openai/files/describe_freebsd.sysctl_2.pdf"} do
133
+ subject { bot.last_message.content }
134
+ let(:bot) { LLM::Chat.new(provider).lazy }
135
+ let(:file) { provider.files.create(file: LLM::File("spec/fixtures/documents/freebsd.sysctl.pdf")) }
136
+ after { provider.files.delete(file:) }
137
+
138
+ before do
139
+ bot.respond([
140
+ "Describe the contents of the file to me",
141
+ "Your summary should be no more than ten words",
142
+ file
143
+ ])
144
+ end
145
+
146
+ it "describes the document" do
147
+ is_expected.to eq("FreeBSD kernel system control nodes overview and implementation.")
148
+ end
149
+ end
150
+ end
@@ -0,0 +1,95 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "setup"
4
+
5
+ RSpec.describe "LLM::OpenAI::Images" do
6
+ let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
7
+ let(:provider) { LLM.openai(token) }
8
+
9
+ context "when given a successful create operation (urls)",
10
+ vcr: {cassette_name: "openai/images/successful_create_urls"} do
11
+ subject(:response) { provider.images.create(prompt: "A dog on a rocket to the moon") }
12
+
13
+ it "is successful" do
14
+ expect(response).to be_instance_of(LLM::Response::Image)
15
+ end
16
+
17
+ it "returns an array of urls" do
18
+ expect(response.urls).to be_instance_of(Array)
19
+ end
20
+
21
+ it "returns a url" do
22
+ expect(response.urls[0]).to be_instance_of(String)
23
+ end
24
+ end
25
+
26
+ context "when given a successful create operation (base64)",
27
+ vcr: {cassette_name: "openai/images/successful_create_base64"} do
28
+ subject(:response) do
29
+ provider.images.create(
30
+ prompt: "A dog on a rocket to the moon",
31
+ response_format: "b64_json"
32
+ )
33
+ end
34
+
35
+ it "is successful" do
36
+ expect(response).to be_instance_of(LLM::Response::Image)
37
+ end
38
+
39
+ it "returns an array of images" do
40
+ expect(response.images).to be_instance_of(Array)
41
+ end
42
+
43
+ it "returns an encoded string" do
44
+ expect(response.images[0].encoded).to be_instance_of(String)
45
+ end
46
+
47
+ it "returns an binary string" do
48
+ expect(response.images[0].binary).to be_instance_of(String)
49
+ end
50
+ end
51
+
52
+ context "when given a successful variation operation",
53
+ vcr: {cassette_name: "openai/images/successful_variation"} do
54
+ subject(:response) do
55
+ provider.images.create_variation(
56
+ image: LLM::File("spec/fixtures/images/bluebook.png"),
57
+ n: 5
58
+ )
59
+ end
60
+
61
+ it "is successful" do
62
+ expect(response).to be_instance_of(LLM::Response::Image)
63
+ end
64
+
65
+ it "returns data" do
66
+ expect(response.urls.size).to eq(5)
67
+ end
68
+
69
+ it "returns multiple variations" do
70
+ response.urls.each { expect(_1).to be_instance_of(String) }
71
+ end
72
+ end
73
+
74
+ context "when given a successful edit",
75
+ vcr: {cassette_name: "openai/images/successful_edit"} do
76
+ subject(:response) do
77
+ provider.images.edit(
78
+ image: LLM::File("spec/fixtures/images/bluebook.png"),
79
+ prompt: "Add white background"
80
+ )
81
+ end
82
+
83
+ it "is successful" do
84
+ expect(response).to be_instance_of(LLM::Response::Image)
85
+ end
86
+
87
+ it "returns data" do
88
+ expect(response.urls).to be_instance_of(Array)
89
+ end
90
+
91
+ it "returns a url" do
92
+ expect(response.urls[0]).to be_instance_of(String)
93
+ end
94
+ end
95
+ end
@@ -0,0 +1,51 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "setup"
4
+
5
+ RSpec.describe "LLM::OpenAI::Responses" do
6
+ let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
7
+ let(:provider) { LLM.openai(token) }
8
+
9
+ context "when given a successful create operation",
10
+ vcr: {cassette_name: "openai/responses/successful_create"} do
11
+ subject { provider.responses.create("Hello", :developer) }
12
+
13
+ it "is successful" do
14
+ is_expected.to be_instance_of(LLM::Response::Output)
15
+ end
16
+
17
+ it "has outputs" do
18
+ is_expected.to have_attributes(
19
+ outputs: [instance_of(LLM::Message)]
20
+ )
21
+ end
22
+ end
23
+
24
+ context "when given a successful get operation",
25
+ vcr: {cassette_name: "openai/responses/successful_get"} do
26
+ let(:response) { provider.responses.create("Hello", :developer) }
27
+ subject { provider.responses.get(response) }
28
+
29
+ it "is successful" do
30
+ is_expected.to be_instance_of(LLM::Response::Output)
31
+ end
32
+
33
+ it "has outputs" do
34
+ is_expected.to have_attributes(
35
+ outputs: [instance_of(LLM::Message)]
36
+ )
37
+ end
38
+ end
39
+
40
+ context "when given a successful delete operation",
41
+ vcr: {cassette_name: "openai/responses/successful_delete"} do
42
+ let(:response) { provider.responses.create("Hello", :developer) }
43
+ subject { provider.responses.delete(response) }
44
+
45
+ it "is successful" do
46
+ is_expected.to have_attributes(
47
+ deleted: true
48
+ )
49
+ end
50
+ end
51
+ end
data/spec/setup.rb CHANGED
@@ -16,5 +16,13 @@ VCR.configure do |config|
16
16
  config.cassette_library_dir = "spec/fixtures/cassettes"
17
17
  config.hook_into :webmock
18
18
  config.configure_rspec_metadata!
19
+
20
+ ##
21
+ # scrub
19
22
  config.filter_sensitive_data("TOKEN") { ENV["LLM_SECRET"] }
23
+ config.before_record do
24
+ body = _1.response.body
25
+ body.gsub! %r|#{Regexp.escape("https://oaidalleapiprodscus.blob.core.windows.net/")}[^"]+|,
26
+ "https://openai.com/generated/image.png"
27
+ end
20
28
  end