llm.rb 0.4.0 → 0.4.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/LICENSE +17 -0
- data/README.md +3 -5
- data/lib/json/schema/array.rb +1 -2
- data/lib/json/schema/leaf.rb +27 -0
- data/lib/json/schema/version.rb +6 -0
- data/lib/json/schema.rb +24 -13
- data/lib/llm/version.rb +1 -1
- data/llm.gemspec +2 -3
- metadata +4 -25
- data/spec/anthropic/completion_spec.rb +0 -96
- data/spec/anthropic/embedding_spec.rb +0 -25
- data/spec/anthropic/models_spec.rb +0 -21
- data/spec/gemini/completion_spec.rb +0 -85
- data/spec/gemini/conversation_spec.rb +0 -31
- data/spec/gemini/embedding_spec.rb +0 -25
- data/spec/gemini/files_spec.rb +0 -124
- data/spec/gemini/images_spec.rb +0 -39
- data/spec/gemini/models_spec.rb +0 -21
- data/spec/llm/conversation_spec.rb +0 -261
- data/spec/ollama/completion_spec.rb +0 -43
- data/spec/ollama/conversation_spec.rb +0 -31
- data/spec/ollama/embedding_spec.rb +0 -24
- data/spec/ollama/models_spec.rb +0 -20
- data/spec/openai/audio_spec.rb +0 -55
- data/spec/openai/completion_spec.rb +0 -116
- data/spec/openai/embedding_spec.rb +0 -25
- data/spec/openai/files_spec.rb +0 -204
- data/spec/openai/images_spec.rb +0 -91
- data/spec/openai/models_spec.rb +0 -21
- data/spec/openai/responses_spec.rb +0 -51
- data/spec/readme_spec.rb +0 -61
- data/spec/setup.rb +0 -28
data/spec/gemini/files_spec.rb
DELETED
@@ -1,124 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require "setup"
|
4
|
-
|
5
|
-
RSpec.describe "LLM::Gemini::Files" do
|
6
|
-
let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
|
7
|
-
let(:provider) { LLM.gemini(token) }
|
8
|
-
|
9
|
-
context "when given a successful create operation (bismillah.mp3)",
|
10
|
-
vcr: {cassette_name: "gemini/files/successful_create_bismillah"} do
|
11
|
-
subject(:file) { provider.files.create(file: LLM::File("spec/fixtures/audio/bismillah.mp3")) }
|
12
|
-
after { provider.files.delete(file:) }
|
13
|
-
|
14
|
-
it "is successful" do
|
15
|
-
expect(file).to be_instance_of(LLM::Response::File)
|
16
|
-
end
|
17
|
-
|
18
|
-
it "returns a file object" do
|
19
|
-
expect(file).to have_attributes(
|
20
|
-
name: instance_of(String),
|
21
|
-
display_name: "bismillah.mp3"
|
22
|
-
)
|
23
|
-
end
|
24
|
-
end
|
25
|
-
|
26
|
-
context "when given a successful delete operation (bismillah.mp3)",
|
27
|
-
vcr: {cassette_name: "gemini/files/successful_delete_bismillah"} do
|
28
|
-
let(:file) { provider.files.create(file: LLM::File("spec/fixtures/audio/bismillah.mp3")) }
|
29
|
-
subject { provider.files.delete(file:) }
|
30
|
-
|
31
|
-
it "is successful" do
|
32
|
-
is_expected.to be_instance_of(Net::HTTPOK)
|
33
|
-
end
|
34
|
-
end
|
35
|
-
|
36
|
-
context "when given a successful get operation (bismillah.mp3)",
|
37
|
-
vcr: {cassette_name: "gemini/files/successful_get_bismillah"} do
|
38
|
-
let(:file) { provider.files.create(file: LLM::File("spec/fixtures/audio/bismillah.mp3")) }
|
39
|
-
subject { provider.files.get(file:) }
|
40
|
-
after { provider.files.delete(file:) }
|
41
|
-
|
42
|
-
it "is successful" do
|
43
|
-
is_expected.to be_instance_of(LLM::Response::File)
|
44
|
-
end
|
45
|
-
|
46
|
-
it "returns a file object" do
|
47
|
-
is_expected.to have_attributes(
|
48
|
-
name: instance_of(String),
|
49
|
-
display_name: "bismillah.mp3"
|
50
|
-
)
|
51
|
-
end
|
52
|
-
end
|
53
|
-
|
54
|
-
context "when given a successful translation operation (bismillah.mp3)",
|
55
|
-
vcr: {cassette_name: "gemini/files/successful_translation_bismillah"} do
|
56
|
-
subject { bot.last_message.content }
|
57
|
-
let(:file) { provider.files.create(file: LLM::File("spec/fixtures/audio/bismillah.mp3")) }
|
58
|
-
let(:bot) { LLM::Chat.new(provider).lazy }
|
59
|
-
after { provider.files.delete(file:) }
|
60
|
-
|
61
|
-
before do
|
62
|
-
bot.chat file
|
63
|
-
bot.chat "Translate the contents of the audio file into English"
|
64
|
-
bot.chat "The audio is referenced in the first message I sent to you"
|
65
|
-
bot.chat "Provide no other content except the translation"
|
66
|
-
end
|
67
|
-
|
68
|
-
it "translates the audio clip" do
|
69
|
-
is_expected.to eq("In the name of Allah, the Most Gracious, the Most Merciful.\n")
|
70
|
-
end
|
71
|
-
end
|
72
|
-
|
73
|
-
context "when given a successful translation operation (alhamdullilah.mp3)",
|
74
|
-
vcr: {cassette_name: "gemini/files/successful_translation_alhamdullilah"} do
|
75
|
-
subject { bot.last_message.content }
|
76
|
-
let(:file) { provider.files.create(file: LLM::File("spec/fixtures/audio/alhamdullilah.mp3")) }
|
77
|
-
let(:bot) { LLM::Chat.new(provider).lazy }
|
78
|
-
after { provider.files.delete(file:) }
|
79
|
-
|
80
|
-
before do
|
81
|
-
bot.chat [
|
82
|
-
"Translate the contents of the audio file into English",
|
83
|
-
"Provide no other content except the translation",
|
84
|
-
file
|
85
|
-
]
|
86
|
-
end
|
87
|
-
|
88
|
-
it "translates the audio clip" do
|
89
|
-
is_expected.to eq("All praise is due to Allah, Lord of the worlds.\n")
|
90
|
-
end
|
91
|
-
end
|
92
|
-
|
93
|
-
context "when given a successful all operation",
|
94
|
-
vcr: {cassette_name: "gemini/files/successful_all"} do
|
95
|
-
let!(:files) do
|
96
|
-
[
|
97
|
-
provider.files.create(file: LLM::File("spec/fixtures/audio/bismillah.mp3")),
|
98
|
-
provider.files.create(file: LLM::File("spec/fixtures/audio/alhamdullilah.mp3"))
|
99
|
-
]
|
100
|
-
end
|
101
|
-
|
102
|
-
subject(:response) { provider.files.all }
|
103
|
-
after { files.each { |file| provider.files.delete(file:) } }
|
104
|
-
|
105
|
-
it "is successful" do
|
106
|
-
expect(response).to be_instance_of(LLM::Response::FileList)
|
107
|
-
end
|
108
|
-
|
109
|
-
it "returns an array of file objects" do
|
110
|
-
expect(response).to match_array(
|
111
|
-
[
|
112
|
-
have_attributes(
|
113
|
-
name: instance_of(String),
|
114
|
-
display_name: "bismillah.mp3"
|
115
|
-
),
|
116
|
-
have_attributes(
|
117
|
-
name: instance_of(String),
|
118
|
-
display_name: "alhamdullilah.mp3"
|
119
|
-
)
|
120
|
-
]
|
121
|
-
)
|
122
|
-
end
|
123
|
-
end
|
124
|
-
end
|
data/spec/gemini/images_spec.rb
DELETED
@@ -1,39 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require "setup"
|
4
|
-
|
5
|
-
RSpec.describe "LLM::Gemini::Images" do
|
6
|
-
let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
|
7
|
-
let(:provider) { LLM.gemini(token) }
|
8
|
-
|
9
|
-
context "when given a successful create operation",
|
10
|
-
vcr: {cassette_name: "gemini/images/successful_create"} do
|
11
|
-
subject(:response) { provider.images.create(prompt: "A dog on a rocket to the moon") }
|
12
|
-
|
13
|
-
it "is successful" do
|
14
|
-
expect(response).to be_instance_of(LLM::Response::Image)
|
15
|
-
end
|
16
|
-
|
17
|
-
it "returns an IO-like object" do
|
18
|
-
expect(response.images[0]).to be_instance_of(StringIO)
|
19
|
-
end
|
20
|
-
end
|
21
|
-
|
22
|
-
context "when given a successful edit operation",
|
23
|
-
vcr: {cassette_name: "gemini/images/successful_edit"} do
|
24
|
-
subject(:response) do
|
25
|
-
provider.images.edit(
|
26
|
-
image: LLM::File("spec/fixtures/images/bluebook.png"),
|
27
|
-
prompt: "Book is floating in the clouds"
|
28
|
-
)
|
29
|
-
end
|
30
|
-
|
31
|
-
it "is successful" do
|
32
|
-
expect(response).to be_instance_of(LLM::Response::Image)
|
33
|
-
end
|
34
|
-
|
35
|
-
it "returns an IO-like object" do
|
36
|
-
expect(response.images[0]).to be_instance_of(StringIO)
|
37
|
-
end
|
38
|
-
end
|
39
|
-
end
|
data/spec/gemini/models_spec.rb
DELETED
@@ -1,21 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require "setup"
|
4
|
-
|
5
|
-
RSpec.describe "LLM::Gemini::Models" do
|
6
|
-
let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
|
7
|
-
let(:provider) { LLM.gemini(token) }
|
8
|
-
|
9
|
-
context "when given a successful list operation",
|
10
|
-
vcr: {cassette_name: "gemini/models/successful_list"} do
|
11
|
-
subject { provider.models.all }
|
12
|
-
|
13
|
-
it "is successful" do
|
14
|
-
is_expected.to be_instance_of(LLM::Response::ModelList)
|
15
|
-
end
|
16
|
-
|
17
|
-
it "returns a list of models" do
|
18
|
-
expect(subject.models).to all(be_a(LLM::Model))
|
19
|
-
end
|
20
|
-
end
|
21
|
-
end
|
@@ -1,261 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require "setup"
|
4
|
-
|
5
|
-
RSpec.describe "LLM::Chat: non-lazy" do
|
6
|
-
shared_examples "a multi-turn conversation" do
|
7
|
-
context "when given a thread of messages" do
|
8
|
-
let(:inputs) do
|
9
|
-
[
|
10
|
-
LLM::Message.new(:system, "Provide concise, short answers about The Netherlands"),
|
11
|
-
LLM::Message.new(:user, "What is the capital of The Netherlands?"),
|
12
|
-
LLM::Message.new(:user, "How many people live in the capital?")
|
13
|
-
]
|
14
|
-
end
|
15
|
-
|
16
|
-
let(:outputs) do
|
17
|
-
[
|
18
|
-
LLM::Message.new(:assistant, "Ok, got it"),
|
19
|
-
LLM::Message.new(:assistant, "The capital of The Netherlands is Amsterdam"),
|
20
|
-
LLM::Message.new(:assistant, "The population of Amsterdam is about 900,000")
|
21
|
-
]
|
22
|
-
end
|
23
|
-
|
24
|
-
let(:messages) { [] }
|
25
|
-
|
26
|
-
it "maintains a conversation" do
|
27
|
-
bot = nil
|
28
|
-
inputs.zip(outputs).each_with_index do |(input, output), index|
|
29
|
-
expect(provider).to receive(:complete)
|
30
|
-
.with(input.content, instance_of(Symbol), messages:, model: provider.default_model, schema: nil)
|
31
|
-
.and_return(OpenStruct.new(choices: [output]))
|
32
|
-
bot = index.zero? ? provider.chat!(input.content, :system) : bot.chat(input.content)
|
33
|
-
messages.concat([input, output])
|
34
|
-
end
|
35
|
-
end
|
36
|
-
end
|
37
|
-
end
|
38
|
-
|
39
|
-
context "with openai" do
|
40
|
-
subject(:provider) { LLM.openai("") }
|
41
|
-
include_examples "a multi-turn conversation"
|
42
|
-
end
|
43
|
-
|
44
|
-
context "with gemini" do
|
45
|
-
subject(:provider) { LLM.gemini("") }
|
46
|
-
include_examples "a multi-turn conversation"
|
47
|
-
end
|
48
|
-
|
49
|
-
context "with anthropic" do
|
50
|
-
subject(:provider) { LLM.anthropic("") }
|
51
|
-
include_examples "a multi-turn conversation"
|
52
|
-
end
|
53
|
-
|
54
|
-
context "with ollama" do
|
55
|
-
subject(:provider) { LLM.ollama("") }
|
56
|
-
include_examples "a multi-turn conversation"
|
57
|
-
end
|
58
|
-
end
|
59
|
-
|
60
|
-
RSpec.describe "LLM::Chat: lazy" do
|
61
|
-
let(:described_class) { LLM::Chat }
|
62
|
-
let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
|
63
|
-
let(:prompt) do
|
64
|
-
"Keep your answers short and concise, and provide three answers to the three questions" \
|
65
|
-
"There should be one answer per line" \
|
66
|
-
"An answer should be a number, for example: 5" \
|
67
|
-
"Nothing else"
|
68
|
-
end
|
69
|
-
|
70
|
-
context "when given completions" do
|
71
|
-
context "with gemini",
|
72
|
-
vcr: {cassette_name: "gemini/lazy_conversation/successful_response"} do
|
73
|
-
let(:provider) { LLM.gemini(token) }
|
74
|
-
let(:conversation) { described_class.new(provider).lazy }
|
75
|
-
|
76
|
-
context "when given a thread of messages" do
|
77
|
-
subject(:message) { conversation.messages.to_a[-1] }
|
78
|
-
|
79
|
-
before do
|
80
|
-
conversation.chat prompt
|
81
|
-
conversation.chat "What is 3+2 ?"
|
82
|
-
conversation.chat "What is 5+5 ?"
|
83
|
-
conversation.chat "What is 5+7 ?"
|
84
|
-
end
|
85
|
-
|
86
|
-
it "maintains a conversation" do
|
87
|
-
is_expected.to have_attributes(
|
88
|
-
role: "model",
|
89
|
-
content: "5\n10\n12\n"
|
90
|
-
)
|
91
|
-
end
|
92
|
-
end
|
93
|
-
end
|
94
|
-
|
95
|
-
context "with openai" do
|
96
|
-
let(:provider) { LLM.openai(token) }
|
97
|
-
let(:conversation) { described_class.new(provider).lazy }
|
98
|
-
|
99
|
-
context "when given a thread of messages",
|
100
|
-
vcr: {cassette_name: "openai/lazy_conversation/completions/successful_response"} do
|
101
|
-
subject(:message) { conversation.recent_message }
|
102
|
-
|
103
|
-
before do
|
104
|
-
conversation.chat prompt, :system
|
105
|
-
conversation.chat "What is 3+2 ?"
|
106
|
-
conversation.chat "What is 5+5 ?"
|
107
|
-
conversation.chat "What is 5+7 ?"
|
108
|
-
end
|
109
|
-
|
110
|
-
it "maintains a conversation" do
|
111
|
-
is_expected.to have_attributes(
|
112
|
-
role: "assistant",
|
113
|
-
content: %r|5\s*\n10\s*\n12\s*|
|
114
|
-
)
|
115
|
-
end
|
116
|
-
end
|
117
|
-
|
118
|
-
context "when given a specific model",
|
119
|
-
vcr: {cassette_name: "openai/lazy_conversation/completions/successful_response_o3_mini"} do
|
120
|
-
let(:model) { provider.models.all.find { _1.id == "o3-mini" } }
|
121
|
-
let(:conversation) { described_class.new(provider, model:).lazy }
|
122
|
-
|
123
|
-
it "maintains the model throughout a conversation" do
|
124
|
-
conversation.chat(prompt, :system)
|
125
|
-
expect(conversation.recent_message.extra[:response].model).to eq("o3-mini-2025-01-31")
|
126
|
-
conversation.chat("What is 5+5?")
|
127
|
-
expect(conversation.recent_message.extra[:response].model).to eq("o3-mini-2025-01-31")
|
128
|
-
end
|
129
|
-
end
|
130
|
-
end
|
131
|
-
|
132
|
-
context "with ollama",
|
133
|
-
vcr: {cassette_name: "ollama/lazy_conversation/successful_response"} do
|
134
|
-
let(:provider) { LLM.ollama(nil, host: "eel.home.network") }
|
135
|
-
let(:conversation) { described_class.new(provider).lazy }
|
136
|
-
|
137
|
-
context "when given a thread of messages" do
|
138
|
-
subject(:message) { conversation.recent_message }
|
139
|
-
|
140
|
-
before do
|
141
|
-
conversation.chat prompt, :system
|
142
|
-
conversation.chat "What is 3+2 ?"
|
143
|
-
conversation.chat "What is 5+5 ?"
|
144
|
-
conversation.chat "What is 5+7 ?"
|
145
|
-
end
|
146
|
-
|
147
|
-
it "maintains a conversation" do
|
148
|
-
is_expected.to have_attributes(
|
149
|
-
role: "assistant",
|
150
|
-
content: "Here are the calculations:\n\n1. 3 + 2 = 5\n2. 5 + 5 = 10\n3. 5 + 7 = 12"
|
151
|
-
)
|
152
|
-
end
|
153
|
-
end
|
154
|
-
end
|
155
|
-
end
|
156
|
-
|
157
|
-
context "when given responses" do
|
158
|
-
context "with openai" do
|
159
|
-
let(:provider) { LLM.openai(token) }
|
160
|
-
let(:conversation) { described_class.new(provider).lazy }
|
161
|
-
|
162
|
-
context "when given a thread of messages",
|
163
|
-
vcr: {cassette_name: "openai/lazy_conversation/responses/successful_response"} do
|
164
|
-
subject(:message) { conversation.recent_message }
|
165
|
-
|
166
|
-
before do
|
167
|
-
conversation.respond prompt, :developer
|
168
|
-
conversation.respond "What is 3+2 ?"
|
169
|
-
conversation.respond "What is 5+5 ?"
|
170
|
-
conversation.respond "What is 5+7 ?"
|
171
|
-
end
|
172
|
-
|
173
|
-
it "maintains a conversation" do
|
174
|
-
is_expected.to have_attributes(
|
175
|
-
role: "assistant",
|
176
|
-
content: %r|5\s*\n10\s*\n12\s*|
|
177
|
-
)
|
178
|
-
end
|
179
|
-
end
|
180
|
-
|
181
|
-
context "when given a specific model",
|
182
|
-
vcr: {cassette_name: "openai/lazy_conversation/responses/successful_response_o3_mini"} do
|
183
|
-
let(:model) { provider.models.all.find { _1.id == "o3-mini" } }
|
184
|
-
let(:conversation) { described_class.new(provider, model:).lazy }
|
185
|
-
|
186
|
-
it "maintains the model throughout a conversation" do
|
187
|
-
conversation.respond(prompt, :developer)
|
188
|
-
expect(conversation.last_message.extra[:response].model).to eq("o3-mini-2025-01-31")
|
189
|
-
conversation.respond("What is 5+5?")
|
190
|
-
expect(conversation.last_message.extra[:response].model).to eq("o3-mini-2025-01-31")
|
191
|
-
end
|
192
|
-
end
|
193
|
-
end
|
194
|
-
end
|
195
|
-
|
196
|
-
context "when given a schema as JSON" do
|
197
|
-
context "with openai" do
|
198
|
-
let(:provider) { LLM.openai(token) }
|
199
|
-
let(:conversation) { described_class.new(provider, schema:).lazy }
|
200
|
-
|
201
|
-
context "when given a schema",
|
202
|
-
vcr: {cassette_name: "openai/lazy_conversation/completions/successful_response_schema_netbsd"} do
|
203
|
-
subject(:message) { conversation.recent_message.content! }
|
204
|
-
let(:schema) { provider.schema.object({os: provider.schema.string.enum("OpenBSD", "FreeBSD", "NetBSD")}) }
|
205
|
-
|
206
|
-
before do
|
207
|
-
conversation.chat "You secretly love NetBSD", :system
|
208
|
-
conversation.chat "What operating system is the best?", :user
|
209
|
-
end
|
210
|
-
|
211
|
-
it "formats the response" do
|
212
|
-
is_expected.to eq("os" => "NetBSD")
|
213
|
-
end
|
214
|
-
end
|
215
|
-
end
|
216
|
-
|
217
|
-
context "with gemini" do
|
218
|
-
let(:provider) { LLM.gemini(token) }
|
219
|
-
let(:conversation) { described_class.new(provider, schema:).lazy }
|
220
|
-
|
221
|
-
context "when given a schema",
|
222
|
-
vcr: {cassette_name: "gemini/lazy_conversation/completions/successful_response_schema_netbsd"} do
|
223
|
-
subject(:message) { conversation.recent_message.content! }
|
224
|
-
let(:schema) { provider.schema.object({os: provider.schema.string.enum("OpenBSD", "FreeBSD", "NetBSD")}) }
|
225
|
-
|
226
|
-
before do
|
227
|
-
conversation.chat "You secretly love NetBSD", :user
|
228
|
-
conversation.chat "What operating system is the best?", :user
|
229
|
-
end
|
230
|
-
|
231
|
-
it "formats the response" do
|
232
|
-
is_expected.to eq("os" => "NetBSD")
|
233
|
-
end
|
234
|
-
end
|
235
|
-
end
|
236
|
-
|
237
|
-
context "with ollama" do
|
238
|
-
let(:provider) { LLM.ollama(nil, host: "eel.home.network") }
|
239
|
-
let(:conversation) { described_class.new(provider, schema:).lazy }
|
240
|
-
|
241
|
-
context "when given a schema",
|
242
|
-
vcr: {cassette_name: "ollama/lazy_conversation/completions/successful_response_schema_netbsd"} do
|
243
|
-
subject(:message) { conversation.recent_message.content! }
|
244
|
-
let(:schema) do
|
245
|
-
provider.schema.object({
|
246
|
-
os: provider.schema.string.enum("OpenBSD", "FreeBSD", "NetBSD").required
|
247
|
-
})
|
248
|
-
end
|
249
|
-
|
250
|
-
before do
|
251
|
-
conversation.chat "You secretly love NetBSD", :system
|
252
|
-
conversation.chat "What operating system is the best?", :user
|
253
|
-
end
|
254
|
-
|
255
|
-
it "formats the response" do
|
256
|
-
is_expected.to eq("os" => "NetBSD")
|
257
|
-
end
|
258
|
-
end
|
259
|
-
end
|
260
|
-
end
|
261
|
-
end
|
@@ -1,43 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require "setup"
|
4
|
-
|
5
|
-
RSpec.describe "LLM::Ollama: completions" do
|
6
|
-
let(:ollama) { LLM.ollama(nil, host: "eel.home.network") }
|
7
|
-
|
8
|
-
context "when given a successful response",
|
9
|
-
vcr: {cassette_name: "ollama/completions/successful_response"} do
|
10
|
-
subject(:response) { ollama.complete("Hello!", :user) }
|
11
|
-
|
12
|
-
it "returns a completion" do
|
13
|
-
expect(response).to be_a(LLM::Response::Completion)
|
14
|
-
end
|
15
|
-
|
16
|
-
it "returns a model" do
|
17
|
-
expect(response.model).to eq("llama3.2")
|
18
|
-
end
|
19
|
-
|
20
|
-
it "includes token usage" do
|
21
|
-
expect(response).to have_attributes(
|
22
|
-
prompt_tokens: 27,
|
23
|
-
completion_tokens: 26,
|
24
|
-
total_tokens: 53
|
25
|
-
)
|
26
|
-
end
|
27
|
-
|
28
|
-
context "with a choice" do
|
29
|
-
subject(:choice) { response.choices[0] }
|
30
|
-
|
31
|
-
it "has choices" do
|
32
|
-
expect(choice).to have_attributes(
|
33
|
-
role: "assistant",
|
34
|
-
content: "Hello! It's nice to meet you. Is there something I can help you with, or would you like to chat?"
|
35
|
-
)
|
36
|
-
end
|
37
|
-
|
38
|
-
it "includes the response" do
|
39
|
-
expect(choice.extra[:response]).to eq(response)
|
40
|
-
end
|
41
|
-
end
|
42
|
-
end
|
43
|
-
end
|
@@ -1,31 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require "setup"
|
4
|
-
|
5
|
-
RSpec.describe "LLM::Chat: ollama" do
|
6
|
-
let(:described_class) { LLM::Chat }
|
7
|
-
let(:provider) { LLM.ollama(nil, host: "eel.home.network") }
|
8
|
-
let(:conversation) { described_class.new(provider, **params).lazy }
|
9
|
-
|
10
|
-
context "when asked to describe an image",
|
11
|
-
vcr: {cassette_name: "ollama/conversations/multimodal_response"} do
|
12
|
-
subject { conversation.last_message }
|
13
|
-
|
14
|
-
let(:params) { {model: "llava"} }
|
15
|
-
let(:image) { LLM::File("spec/fixtures/images/bluebook.png") }
|
16
|
-
|
17
|
-
before do
|
18
|
-
conversation.chat(image, :user)
|
19
|
-
conversation.chat("Describe the image with a short sentance", :user)
|
20
|
-
end
|
21
|
-
|
22
|
-
it "describes the image" do
|
23
|
-
is_expected.to have_attributes(
|
24
|
-
role: "assistant",
|
25
|
-
content: " The image is a graphic illustration of a book" \
|
26
|
-
" with its pages spread out, symbolizing openness" \
|
27
|
-
" or knowledge. "
|
28
|
-
)
|
29
|
-
end
|
30
|
-
end
|
31
|
-
end
|
@@ -1,24 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require "setup"
|
4
|
-
|
5
|
-
RSpec.describe "LLM::Ollama: embeddings" do
|
6
|
-
let(:ollama) { LLM.ollama(nil, host: "eel.home.network") }
|
7
|
-
|
8
|
-
context "when given a successful response",
|
9
|
-
vcr: {cassette_name: "ollama/embeddings/successful_response"} do
|
10
|
-
subject(:response) { ollama.embed(["This is a paragraph", "This is another one"]) }
|
11
|
-
|
12
|
-
it "returns an embedding" do
|
13
|
-
expect(response).to be_instance_of(LLM::Response::Embedding)
|
14
|
-
end
|
15
|
-
|
16
|
-
it "returns a model" do
|
17
|
-
expect(response.model).to eq("llama3.2")
|
18
|
-
end
|
19
|
-
|
20
|
-
it "has embeddings" do
|
21
|
-
expect(response.embeddings.size).to eq(2)
|
22
|
-
end
|
23
|
-
end
|
24
|
-
end
|
data/spec/ollama/models_spec.rb
DELETED
@@ -1,20 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require "setup"
|
4
|
-
|
5
|
-
RSpec.describe "LLM::Ollama::Models" do
|
6
|
-
let(:provider) { LLM.ollama(nil, host: "eel.home.network") }
|
7
|
-
|
8
|
-
context "when given a successful list operation",
|
9
|
-
vcr: {cassette_name: "ollama/models/successful_list"} do
|
10
|
-
subject { provider.models.all }
|
11
|
-
|
12
|
-
it "is successful" do
|
13
|
-
is_expected.to be_instance_of(LLM::Response::ModelList)
|
14
|
-
end
|
15
|
-
|
16
|
-
it "returns a list of models" do
|
17
|
-
expect(subject.models).to all(be_a(LLM::Model))
|
18
|
-
end
|
19
|
-
end
|
20
|
-
end
|
data/spec/openai/audio_spec.rb
DELETED
@@ -1,55 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require "setup"
|
4
|
-
|
5
|
-
RSpec.describe "LLM::OpenAI::Audio" do
|
6
|
-
let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
|
7
|
-
let(:provider) { LLM.openai(token) }
|
8
|
-
|
9
|
-
context "when given a successful create operation",
|
10
|
-
vcr: {cassette_name: "openai/audio/successful_create"} do
|
11
|
-
subject(:response) { provider.audio.create_speech(input: "A dog on a rocket to the moon") }
|
12
|
-
|
13
|
-
it "is successful" do
|
14
|
-
expect(response).to be_instance_of(LLM::Response::Audio)
|
15
|
-
end
|
16
|
-
|
17
|
-
it "returns an audio" do
|
18
|
-
expect(response.audio).to be_instance_of(StringIO)
|
19
|
-
end
|
20
|
-
end
|
21
|
-
|
22
|
-
context "when given a successful transcription operation",
|
23
|
-
vcr: {cassette_name: "openai/audio/successful_transcription"} do
|
24
|
-
subject(:response) do
|
25
|
-
provider.audio.create_transcription(
|
26
|
-
file: LLM::File("spec/fixtures/audio/rocket.mp3")
|
27
|
-
)
|
28
|
-
end
|
29
|
-
|
30
|
-
it "is successful" do
|
31
|
-
expect(response).to be_instance_of(LLM::Response::AudioTranscription)
|
32
|
-
end
|
33
|
-
|
34
|
-
it "returns a transcription" do
|
35
|
-
expect(response.text).to eq("A dog on a rocket to the moon.")
|
36
|
-
end
|
37
|
-
end
|
38
|
-
|
39
|
-
context "when given a successful translation operation",
|
40
|
-
vcr: {cassette_name: "openai/audio/successful_translation"} do
|
41
|
-
subject(:response) do
|
42
|
-
provider.audio.create_translation(
|
43
|
-
file: LLM::File("spec/fixtures/audio/bismillah.mp3")
|
44
|
-
)
|
45
|
-
end
|
46
|
-
|
47
|
-
it "is successful" do
|
48
|
-
expect(response).to be_instance_of(LLM::Response::AudioTranslation)
|
49
|
-
end
|
50
|
-
|
51
|
-
it "returns a translation (Arabic => English)" do
|
52
|
-
expect(response.text).to eq("In the name of Allah, the Beneficent, the Merciful.")
|
53
|
-
end
|
54
|
-
end
|
55
|
-
end
|