llm.rb 0.4.0 → 0.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,261 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require "setup"
4
-
5
- RSpec.describe "LLM::Chat: non-lazy" do
6
- shared_examples "a multi-turn conversation" do
7
- context "when given a thread of messages" do
8
- let(:inputs) do
9
- [
10
- LLM::Message.new(:system, "Provide concise, short answers about The Netherlands"),
11
- LLM::Message.new(:user, "What is the capital of The Netherlands?"),
12
- LLM::Message.new(:user, "How many people live in the capital?")
13
- ]
14
- end
15
-
16
- let(:outputs) do
17
- [
18
- LLM::Message.new(:assistant, "Ok, got it"),
19
- LLM::Message.new(:assistant, "The capital of The Netherlands is Amsterdam"),
20
- LLM::Message.new(:assistant, "The population of Amsterdam is about 900,000")
21
- ]
22
- end
23
-
24
- let(:messages) { [] }
25
-
26
- it "maintains a conversation" do
27
- bot = nil
28
- inputs.zip(outputs).each_with_index do |(input, output), index|
29
- expect(provider).to receive(:complete)
30
- .with(input.content, instance_of(Symbol), messages:, model: provider.default_model, schema: nil)
31
- .and_return(OpenStruct.new(choices: [output]))
32
- bot = index.zero? ? provider.chat!(input.content, :system) : bot.chat(input.content)
33
- messages.concat([input, output])
34
- end
35
- end
36
- end
37
- end
38
-
39
- context "with openai" do
40
- subject(:provider) { LLM.openai("") }
41
- include_examples "a multi-turn conversation"
42
- end
43
-
44
- context "with gemini" do
45
- subject(:provider) { LLM.gemini("") }
46
- include_examples "a multi-turn conversation"
47
- end
48
-
49
- context "with anthropic" do
50
- subject(:provider) { LLM.anthropic("") }
51
- include_examples "a multi-turn conversation"
52
- end
53
-
54
- context "with ollama" do
55
- subject(:provider) { LLM.ollama("") }
56
- include_examples "a multi-turn conversation"
57
- end
58
- end
59
-
60
- RSpec.describe "LLM::Chat: lazy" do
61
- let(:described_class) { LLM::Chat }
62
- let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
63
- let(:prompt) do
64
- "Keep your answers short and concise, and provide three answers to the three questions" \
65
- "There should be one answer per line" \
66
- "An answer should be a number, for example: 5" \
67
- "Nothing else"
68
- end
69
-
70
- context "when given completions" do
71
- context "with gemini",
72
- vcr: {cassette_name: "gemini/lazy_conversation/successful_response"} do
73
- let(:provider) { LLM.gemini(token) }
74
- let(:conversation) { described_class.new(provider).lazy }
75
-
76
- context "when given a thread of messages" do
77
- subject(:message) { conversation.messages.to_a[-1] }
78
-
79
- before do
80
- conversation.chat prompt
81
- conversation.chat "What is 3+2 ?"
82
- conversation.chat "What is 5+5 ?"
83
- conversation.chat "What is 5+7 ?"
84
- end
85
-
86
- it "maintains a conversation" do
87
- is_expected.to have_attributes(
88
- role: "model",
89
- content: "5\n10\n12\n"
90
- )
91
- end
92
- end
93
- end
94
-
95
- context "with openai" do
96
- let(:provider) { LLM.openai(token) }
97
- let(:conversation) { described_class.new(provider).lazy }
98
-
99
- context "when given a thread of messages",
100
- vcr: {cassette_name: "openai/lazy_conversation/completions/successful_response"} do
101
- subject(:message) { conversation.recent_message }
102
-
103
- before do
104
- conversation.chat prompt, :system
105
- conversation.chat "What is 3+2 ?"
106
- conversation.chat "What is 5+5 ?"
107
- conversation.chat "What is 5+7 ?"
108
- end
109
-
110
- it "maintains a conversation" do
111
- is_expected.to have_attributes(
112
- role: "assistant",
113
- content: %r|5\s*\n10\s*\n12\s*|
114
- )
115
- end
116
- end
117
-
118
- context "when given a specific model",
119
- vcr: {cassette_name: "openai/lazy_conversation/completions/successful_response_o3_mini"} do
120
- let(:model) { provider.models.all.find { _1.id == "o3-mini" } }
121
- let(:conversation) { described_class.new(provider, model:).lazy }
122
-
123
- it "maintains the model throughout a conversation" do
124
- conversation.chat(prompt, :system)
125
- expect(conversation.recent_message.extra[:response].model).to eq("o3-mini-2025-01-31")
126
- conversation.chat("What is 5+5?")
127
- expect(conversation.recent_message.extra[:response].model).to eq("o3-mini-2025-01-31")
128
- end
129
- end
130
- end
131
-
132
- context "with ollama",
133
- vcr: {cassette_name: "ollama/lazy_conversation/successful_response"} do
134
- let(:provider) { LLM.ollama(nil, host: "eel.home.network") }
135
- let(:conversation) { described_class.new(provider).lazy }
136
-
137
- context "when given a thread of messages" do
138
- subject(:message) { conversation.recent_message }
139
-
140
- before do
141
- conversation.chat prompt, :system
142
- conversation.chat "What is 3+2 ?"
143
- conversation.chat "What is 5+5 ?"
144
- conversation.chat "What is 5+7 ?"
145
- end
146
-
147
- it "maintains a conversation" do
148
- is_expected.to have_attributes(
149
- role: "assistant",
150
- content: "Here are the calculations:\n\n1. 3 + 2 = 5\n2. 5 + 5 = 10\n3. 5 + 7 = 12"
151
- )
152
- end
153
- end
154
- end
155
- end
156
-
157
- context "when given responses" do
158
- context "with openai" do
159
- let(:provider) { LLM.openai(token) }
160
- let(:conversation) { described_class.new(provider).lazy }
161
-
162
- context "when given a thread of messages",
163
- vcr: {cassette_name: "openai/lazy_conversation/responses/successful_response"} do
164
- subject(:message) { conversation.recent_message }
165
-
166
- before do
167
- conversation.respond prompt, :developer
168
- conversation.respond "What is 3+2 ?"
169
- conversation.respond "What is 5+5 ?"
170
- conversation.respond "What is 5+7 ?"
171
- end
172
-
173
- it "maintains a conversation" do
174
- is_expected.to have_attributes(
175
- role: "assistant",
176
- content: %r|5\s*\n10\s*\n12\s*|
177
- )
178
- end
179
- end
180
-
181
- context "when given a specific model",
182
- vcr: {cassette_name: "openai/lazy_conversation/responses/successful_response_o3_mini"} do
183
- let(:model) { provider.models.all.find { _1.id == "o3-mini" } }
184
- let(:conversation) { described_class.new(provider, model:).lazy }
185
-
186
- it "maintains the model throughout a conversation" do
187
- conversation.respond(prompt, :developer)
188
- expect(conversation.last_message.extra[:response].model).to eq("o3-mini-2025-01-31")
189
- conversation.respond("What is 5+5?")
190
- expect(conversation.last_message.extra[:response].model).to eq("o3-mini-2025-01-31")
191
- end
192
- end
193
- end
194
- end
195
-
196
- context "when given a schema as JSON" do
197
- context "with openai" do
198
- let(:provider) { LLM.openai(token) }
199
- let(:conversation) { described_class.new(provider, schema:).lazy }
200
-
201
- context "when given a schema",
202
- vcr: {cassette_name: "openai/lazy_conversation/completions/successful_response_schema_netbsd"} do
203
- subject(:message) { conversation.recent_message.content! }
204
- let(:schema) { provider.schema.object({os: provider.schema.string.enum("OpenBSD", "FreeBSD", "NetBSD")}) }
205
-
206
- before do
207
- conversation.chat "You secretly love NetBSD", :system
208
- conversation.chat "What operating system is the best?", :user
209
- end
210
-
211
- it "formats the response" do
212
- is_expected.to eq("os" => "NetBSD")
213
- end
214
- end
215
- end
216
-
217
- context "with gemini" do
218
- let(:provider) { LLM.gemini(token) }
219
- let(:conversation) { described_class.new(provider, schema:).lazy }
220
-
221
- context "when given a schema",
222
- vcr: {cassette_name: "gemini/lazy_conversation/completions/successful_response_schema_netbsd"} do
223
- subject(:message) { conversation.recent_message.content! }
224
- let(:schema) { provider.schema.object({os: provider.schema.string.enum("OpenBSD", "FreeBSD", "NetBSD")}) }
225
-
226
- before do
227
- conversation.chat "You secretly love NetBSD", :user
228
- conversation.chat "What operating system is the best?", :user
229
- end
230
-
231
- it "formats the response" do
232
- is_expected.to eq("os" => "NetBSD")
233
- end
234
- end
235
- end
236
-
237
- context "with ollama" do
238
- let(:provider) { LLM.ollama(nil, host: "eel.home.network") }
239
- let(:conversation) { described_class.new(provider, schema:).lazy }
240
-
241
- context "when given a schema",
242
- vcr: {cassette_name: "ollama/lazy_conversation/completions/successful_response_schema_netbsd"} do
243
- subject(:message) { conversation.recent_message.content! }
244
- let(:schema) do
245
- provider.schema.object({
246
- os: provider.schema.string.enum("OpenBSD", "FreeBSD", "NetBSD").required
247
- })
248
- end
249
-
250
- before do
251
- conversation.chat "You secretly love NetBSD", :system
252
- conversation.chat "What operating system is the best?", :user
253
- end
254
-
255
- it "formats the response" do
256
- is_expected.to eq("os" => "NetBSD")
257
- end
258
- end
259
- end
260
- end
261
- end
@@ -1,43 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require "setup"
4
-
5
- RSpec.describe "LLM::Ollama: completions" do
6
- let(:ollama) { LLM.ollama(nil, host: "eel.home.network") }
7
-
8
- context "when given a successful response",
9
- vcr: {cassette_name: "ollama/completions/successful_response"} do
10
- subject(:response) { ollama.complete("Hello!", :user) }
11
-
12
- it "returns a completion" do
13
- expect(response).to be_a(LLM::Response::Completion)
14
- end
15
-
16
- it "returns a model" do
17
- expect(response.model).to eq("llama3.2")
18
- end
19
-
20
- it "includes token usage" do
21
- expect(response).to have_attributes(
22
- prompt_tokens: 27,
23
- completion_tokens: 26,
24
- total_tokens: 53
25
- )
26
- end
27
-
28
- context "with a choice" do
29
- subject(:choice) { response.choices[0] }
30
-
31
- it "has choices" do
32
- expect(choice).to have_attributes(
33
- role: "assistant",
34
- content: "Hello! It's nice to meet you. Is there something I can help you with, or would you like to chat?"
35
- )
36
- end
37
-
38
- it "includes the response" do
39
- expect(choice.extra[:response]).to eq(response)
40
- end
41
- end
42
- end
43
- end
@@ -1,31 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require "setup"
4
-
5
- RSpec.describe "LLM::Chat: ollama" do
6
- let(:described_class) { LLM::Chat }
7
- let(:provider) { LLM.ollama(nil, host: "eel.home.network") }
8
- let(:conversation) { described_class.new(provider, **params).lazy }
9
-
10
- context "when asked to describe an image",
11
- vcr: {cassette_name: "ollama/conversations/multimodal_response"} do
12
- subject { conversation.last_message }
13
-
14
- let(:params) { {model: "llava"} }
15
- let(:image) { LLM::File("spec/fixtures/images/bluebook.png") }
16
-
17
- before do
18
- conversation.chat(image, :user)
19
- conversation.chat("Describe the image with a short sentance", :user)
20
- end
21
-
22
- it "describes the image" do
23
- is_expected.to have_attributes(
24
- role: "assistant",
25
- content: " The image is a graphic illustration of a book" \
26
- " with its pages spread out, symbolizing openness" \
27
- " or knowledge. "
28
- )
29
- end
30
- end
31
- end
@@ -1,24 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require "setup"
4
-
5
- RSpec.describe "LLM::Ollama: embeddings" do
6
- let(:ollama) { LLM.ollama(nil, host: "eel.home.network") }
7
-
8
- context "when given a successful response",
9
- vcr: {cassette_name: "ollama/embeddings/successful_response"} do
10
- subject(:response) { ollama.embed(["This is a paragraph", "This is another one"]) }
11
-
12
- it "returns an embedding" do
13
- expect(response).to be_instance_of(LLM::Response::Embedding)
14
- end
15
-
16
- it "returns a model" do
17
- expect(response.model).to eq("llama3.2")
18
- end
19
-
20
- it "has embeddings" do
21
- expect(response.embeddings.size).to eq(2)
22
- end
23
- end
24
- end
@@ -1,20 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require "setup"
4
-
5
- RSpec.describe "LLM::Ollama::Models" do
6
- let(:provider) { LLM.ollama(nil, host: "eel.home.network") }
7
-
8
- context "when given a successful list operation",
9
- vcr: {cassette_name: "ollama/models/successful_list"} do
10
- subject { provider.models.all }
11
-
12
- it "is successful" do
13
- is_expected.to be_instance_of(LLM::Response::ModelList)
14
- end
15
-
16
- it "returns a list of models" do
17
- expect(subject.models).to all(be_a(LLM::Model))
18
- end
19
- end
20
- end
@@ -1,55 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require "setup"
4
-
5
- RSpec.describe "LLM::OpenAI::Audio" do
6
- let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
7
- let(:provider) { LLM.openai(token) }
8
-
9
- context "when given a successful create operation",
10
- vcr: {cassette_name: "openai/audio/successful_create"} do
11
- subject(:response) { provider.audio.create_speech(input: "A dog on a rocket to the moon") }
12
-
13
- it "is successful" do
14
- expect(response).to be_instance_of(LLM::Response::Audio)
15
- end
16
-
17
- it "returns an audio" do
18
- expect(response.audio).to be_instance_of(StringIO)
19
- end
20
- end
21
-
22
- context "when given a successful transcription operation",
23
- vcr: {cassette_name: "openai/audio/successful_transcription"} do
24
- subject(:response) do
25
- provider.audio.create_transcription(
26
- file: LLM::File("spec/fixtures/audio/rocket.mp3")
27
- )
28
- end
29
-
30
- it "is successful" do
31
- expect(response).to be_instance_of(LLM::Response::AudioTranscription)
32
- end
33
-
34
- it "returns a transcription" do
35
- expect(response.text).to eq("A dog on a rocket to the moon.")
36
- end
37
- end
38
-
39
- context "when given a successful translation operation",
40
- vcr: {cassette_name: "openai/audio/successful_translation"} do
41
- subject(:response) do
42
- provider.audio.create_translation(
43
- file: LLM::File("spec/fixtures/audio/bismillah.mp3")
44
- )
45
- end
46
-
47
- it "is successful" do
48
- expect(response).to be_instance_of(LLM::Response::AudioTranslation)
49
- end
50
-
51
- it "returns a translation (Arabic => English)" do
52
- expect(response.text).to eq("In the name of Allah, the Beneficent, the Merciful.")
53
- end
54
- end
55
- end
@@ -1,116 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require "setup"
4
-
5
- RSpec.describe "LLM::OpenAI: completions" do
6
- subject(:openai) { LLM.openai(token) }
7
- let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
8
-
9
- context "when given a successful response",
10
- vcr: {cassette_name: "openai/completions/successful_response"} do
11
- subject(:response) { openai.complete("Hello!", :user) }
12
-
13
- it "returns a completion" do
14
- expect(response).to be_a(LLM::Response::Completion)
15
- end
16
-
17
- it "returns a model" do
18
- expect(response.model).to eq("gpt-4o-mini-2024-07-18")
19
- end
20
-
21
- it "includes token usage" do
22
- expect(response).to have_attributes(
23
- prompt_tokens: 9,
24
- completion_tokens: 10,
25
- total_tokens: 19
26
- )
27
- end
28
-
29
- context "with a choice" do
30
- subject(:choice) { response.choices[0] }
31
-
32
- it "has choices" do
33
- expect(choice).to have_attributes(
34
- role: "assistant",
35
- content: "Hello! How can I assist you today?"
36
- )
37
- end
38
-
39
- it "includes the response" do
40
- expect(choice.extra[:response]).to eq(response)
41
- end
42
- end
43
- end
44
-
45
- context "when given a thread of messages",
46
- vcr: {cassette_name: "openai/completions/successful_response_thread"} do
47
- subject(:response) do
48
- openai.complete "What is your name? What age are you?", :user, messages: [
49
- {role: "system", content: "Answer all of my questions"},
50
- {role: "system", content: "Answer in the format: My name is <name> and I am <age> years old"},
51
- {role: "system", content: "Your name is Pablo and you are 25 years old"}
52
- ]
53
- end
54
-
55
- it "has choices" do
56
- expect(response).to have_attributes(
57
- choices: [
58
- have_attributes(
59
- role: "assistant",
60
- content: %r|\AMy name is Pablo and I am 25 years old|
61
- )
62
- ]
63
- )
64
- end
65
- end
66
-
67
- context "when asked to describe an audio file",
68
- vcr: {cassette_name: "openai/completions/describe_pdf_document"} do
69
- let(:file) { LLM::File("spec/fixtures/documents/freebsd.sysctl.pdf") }
70
- let(:response) do
71
- openai.complete([
72
- "This PDF document describes sysctl nodes on FreeBSD",
73
- "Answer yes or no.",
74
- "Nothing else",
75
- file
76
- ], :user)
77
- end
78
-
79
- subject { response.choices[0].content.downcase[0..2] }
80
-
81
- it "is successful" do
82
- is_expected.to eq("yes")
83
- end
84
- end
85
-
86
- context "when given a 'bad request' response",
87
- vcr: {cassette_name: "openai/completions/bad_request"} do
88
- subject(:response) { openai.complete(URI("/foobar.exe"), :user) }
89
-
90
- it "raises an error" do
91
- expect { response }.to raise_error(LLM::Error::ResponseError)
92
- end
93
-
94
- it "includes the response" do
95
- response
96
- rescue LLM::Error => ex
97
- expect(ex.response).to be_instance_of(Net::HTTPBadRequest)
98
- end
99
- end
100
-
101
- context "when given an unauthorized response",
102
- vcr: {cassette_name: "openai/completions/unauthorized_response"} do
103
- subject(:response) { openai.complete(LLM::Message.new(:user, "Hello!")) }
104
- let(:token) { "BADTOKEN" }
105
-
106
- it "raises an error" do
107
- expect { response }.to raise_error(LLM::Error::Unauthorized)
108
- end
109
-
110
- it "includes the response" do
111
- response
112
- rescue LLM::Error::Unauthorized => ex
113
- expect(ex.response).to be_kind_of(Net::HTTPResponse)
114
- end
115
- end
116
- end
@@ -1,25 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require "setup"
4
-
5
- RSpec.describe "LLM::OpenAI: embeddings" do
6
- let(:openai) { LLM.openai(token) }
7
- let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
8
-
9
- context "when given a successful response",
10
- vcr: {cassette_name: "openai/embeddings/successful_response"} do
11
- subject(:response) { openai.embed("Hello, world") }
12
-
13
- it "returns an embedding" do
14
- expect(response).to be_instance_of(LLM::Response::Embedding)
15
- end
16
-
17
- it "returns a model" do
18
- expect(response.model).to eq("text-embedding-3-small")
19
- end
20
-
21
- it "has embeddings" do
22
- expect(response.embeddings).to be_instance_of(Array)
23
- end
24
- end
25
- end