llm.rb 0.1.0 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +85 -24
  3. data/lib/llm/conversation.rb +62 -10
  4. data/lib/llm/core_ext/ostruct.rb +0 -0
  5. data/lib/llm/error.rb +0 -0
  6. data/lib/llm/file.rb +0 -0
  7. data/lib/llm/http_client.rb +0 -0
  8. data/lib/llm/message.rb +1 -1
  9. data/lib/llm/message_queue.rb +18 -11
  10. data/lib/llm/model.rb +7 -0
  11. data/lib/llm/provider.rb +144 -98
  12. data/lib/llm/providers/anthropic/error_handler.rb +1 -1
  13. data/lib/llm/providers/anthropic/format.rb +7 -1
  14. data/lib/llm/providers/anthropic/response_parser.rb +0 -0
  15. data/lib/llm/providers/anthropic.rb +31 -15
  16. data/lib/llm/providers/gemini/error_handler.rb +0 -0
  17. data/lib/llm/providers/gemini/format.rb +7 -1
  18. data/lib/llm/providers/gemini/response_parser.rb +0 -0
  19. data/lib/llm/providers/gemini.rb +25 -14
  20. data/lib/llm/providers/ollama/error_handler.rb +0 -0
  21. data/lib/llm/providers/ollama/format.rb +7 -1
  22. data/lib/llm/providers/ollama/response_parser.rb +13 -0
  23. data/lib/llm/providers/ollama.rb +32 -8
  24. data/lib/llm/providers/openai/error_handler.rb +0 -0
  25. data/lib/llm/providers/openai/format.rb +7 -1
  26. data/lib/llm/providers/openai/response_parser.rb +5 -3
  27. data/lib/llm/providers/openai.rb +22 -12
  28. data/lib/llm/providers/voyageai/error_handler.rb +32 -0
  29. data/lib/llm/providers/voyageai/response_parser.rb +13 -0
  30. data/lib/llm/providers/voyageai.rb +44 -0
  31. data/lib/llm/response/completion.rb +0 -0
  32. data/lib/llm/response/embedding.rb +0 -0
  33. data/lib/llm/response.rb +0 -0
  34. data/lib/llm/version.rb +1 -1
  35. data/lib/llm.rb +19 -9
  36. data/llm.gemspec +6 -1
  37. data/share/llm/models/anthropic.yml +35 -0
  38. data/share/llm/models/gemini.yml +35 -0
  39. data/share/llm/models/ollama.yml +155 -0
  40. data/share/llm/models/openai.yml +46 -0
  41. data/spec/anthropic/completion_spec.rb +11 -27
  42. data/spec/anthropic/embedding_spec.rb +25 -0
  43. data/spec/gemini/completion_spec.rb +34 -29
  44. data/spec/gemini/embedding_spec.rb +4 -12
  45. data/spec/llm/conversation_spec.rb +93 -1
  46. data/spec/ollama/completion_spec.rb +7 -16
  47. data/spec/ollama/embedding_spec.rb +14 -5
  48. data/spec/openai/completion_spec.rb +40 -43
  49. data/spec/openai/embedding_spec.rb +4 -12
  50. data/spec/readme_spec.rb +9 -12
  51. data/spec/setup.rb +7 -16
  52. metadata +81 -4
  53. data/lib/llm/lazy_conversation.rb +0 -39
  54. data/spec/llm/lazy_conversation_spec.rb +0 -110
@@ -3,38 +3,11 @@
3
3
  require "setup"
4
4
 
5
5
  RSpec.describe "LLM::OpenAI: completions" do
6
- subject(:openai) { LLM.openai("") }
7
-
8
- before(:each, :success) do
9
- stub_request(:post, "https://api.openai.com/v1/chat/completions")
10
- .with(headers: {"Content-Type" => "application/json"})
11
- .to_return(
12
- status: 200,
13
- body: fixture("openai/completions/ok_completion.json"),
14
- headers: {"Content-Type" => "application/json"}
15
- )
16
- end
17
-
18
- before(:each, :unauthorized) do
19
- stub_request(:post, "https://api.openai.com/v1/chat/completions")
20
- .with(headers: {"Content-Type" => "application/json"})
21
- .to_return(
22
- status: 401,
23
- body: fixture("openai/completions/unauthorized_completion.json"),
24
- headers: {"Content-Type" => "application/json"}
25
- )
26
- end
27
-
28
- before(:each, :bad_request) do
29
- stub_request(:post, "https://api.openai.com/v1/chat/completions")
30
- .with(headers: {"Content-Type" => "application/json"})
31
- .to_return(
32
- status: 400,
33
- body: fixture("openai/completions/badrequest_completion.json")
34
- )
35
- end
6
+ subject(:openai) { LLM.openai(token) }
7
+ let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
36
8
 
37
- context "when given a successful response", :success do
9
+ context "when given a successful response",
10
+ vcr: {cassette_name: "openai/completions/successful_response"} do
38
11
  subject(:response) { openai.complete("Hello!", :user) }
39
12
 
40
13
  it "returns a completion" do
@@ -48,8 +21,8 @@ RSpec.describe "LLM::OpenAI: completions" do
48
21
  it "includes token usage" do
49
22
  expect(response).to have_attributes(
50
23
  prompt_tokens: 9,
51
- completion_tokens: 9,
52
- total_tokens: 18
24
+ completion_tokens: 10,
25
+ total_tokens: 19
53
26
  )
54
27
  end
55
28
 
@@ -69,21 +42,29 @@ RSpec.describe "LLM::OpenAI: completions" do
69
42
  end
70
43
  end
71
44
 
72
- context "when given an unauthorized response", :unauthorized do
73
- subject(:response) { openai.complete(LLM::Message.new("Hello!", :user)) }
74
-
75
- it "raises an error" do
76
- expect { response }.to raise_error(LLM::Error::Unauthorized)
45
+ context "when given a thread of messages",
46
+ vcr: {cassette_name: "openai/completions/successful_response_thread"} do
47
+ subject(:response) do
48
+ openai.complete "What is your name? What age are you?", :user, messages: [
49
+ {role: "system", content: "Answer all of my questions"},
50
+ {role: "system", content: "Your name is Pablo, you are 25 years old and you are my amigo"}
51
+ ]
77
52
  end
78
53
 
79
- it "includes the response" do
80
- response
81
- rescue LLM::Error::Unauthorized => ex
82
- expect(ex.response).to be_kind_of(Net::HTTPResponse)
54
+ it "has choices" do
55
+ expect(response).to have_attributes(
56
+ choices: [
57
+ have_attributes(
58
+ role: "assistant",
59
+ content: "My name is Pablo, and I'm 25 years old! How can I help you today, amigo?"
60
+ )
61
+ ]
62
+ )
83
63
  end
84
64
  end
85
65
 
86
- context "when given a 'bad request' response", :bad_request do
66
+ context "when given a 'bad request' response",
67
+ vcr: {cassette_name: "openai/completions/bad_request"} do
87
68
  subject(:response) { openai.complete(URI("/foobar.exe"), :user) }
88
69
 
89
70
  it "raises an error" do
@@ -96,4 +77,20 @@ RSpec.describe "LLM::OpenAI: completions" do
96
77
  expect(ex.response).to be_instance_of(Net::HTTPBadRequest)
97
78
  end
98
79
  end
80
+
81
+ context "when given an unauthorized response",
82
+ vcr: {cassette_name: "openai/completions/unauthorized_response"} do
83
+ subject(:response) { openai.complete(LLM::Message.new("Hello!", :user)) }
84
+ let(:token) { "BADTOKEN" }
85
+
86
+ it "raises an error" do
87
+ expect { response }.to raise_error(LLM::Error::Unauthorized)
88
+ end
89
+
90
+ it "includes the response" do
91
+ response
92
+ rescue LLM::Error::Unauthorized => ex
93
+ expect(ex.response).to be_kind_of(Net::HTTPResponse)
94
+ end
95
+ end
99
96
  end
@@ -3,19 +3,11 @@
3
3
  require "setup"
4
4
 
5
5
  RSpec.describe "LLM::OpenAI: embeddings" do
6
- let(:openai) { LLM.openai("") }
6
+ let(:openai) { LLM.openai(token) }
7
+ let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
7
8
 
8
- before(:each, :success) do
9
- stub_request(:post, "https://api.openai.com/v1/embeddings")
10
- .with(headers: {"Content-Type" => "application/json"})
11
- .to_return(
12
- status: 200,
13
- body: fixture("openai/embeddings/hello_world_embedding.json"),
14
- headers: {"Content-Type" => "application/json"}
15
- )
16
- end
17
-
18
- context "when given a successful response", :success do
9
+ context "when given a successful response",
10
+ vcr: {cassette_name: "openai/embeddings/successful_response"} do
19
11
  subject(:response) { openai.embed("Hello, world") }
20
12
 
21
13
  it "returns an embedding" do
data/spec/readme_spec.rb CHANGED
@@ -22,22 +22,19 @@ RSpec.describe "The README examples" do
22
22
 
23
23
  let(:expected_conversation) do
24
24
  [
25
- "[system] You are a friendly chatbot. Sometimes, you like to tell a joke.",
26
- "But the joke must be based on the given inputs.",
25
+ "[system] You are my math assistant.",
26
+ "I will provide you with (simple) equations.",
27
+ "You will provide answers in the format \"The answer to <equation> is <answer>\".",
27
28
  "I will provide you a set of messages. Reply to all of them.",
28
29
  "A message is considered unanswered if there is no corresponding assistant response.",
29
30
 
30
- "[user] What color is the sky?",
31
- "[user] What color is an orange?",
32
- "[user] I like Ruby",
31
+ "[user] Tell me the answer to 5 + 15",
32
+ "[user] Tell me the answer to (5 + 15) * 2",
33
+ "[user] Tell me the answer to ((5 + 15) * 2) / 10",
33
34
 
34
- "[assistant] The sky is typically blue during the day, but it can have beautiful",
35
- "hues of pink, orange, and purple during sunset! As for an orange,",
36
- "it's typically orange in color - funny how that works, right?",
37
- "I love Ruby too! Did you know that a Ruby is not only a beautiful",
38
- "gemstone, but it's also a programming language that's both elegant",
39
- "and powerful! Speaking of colors, why did the orange stop?",
40
- "Because it ran out of juice!"
35
+ "[assistant] The answer to 5 + 15 is 20.",
36
+ "The answer to (5 + 15) * 2 is 40.",
37
+ "The answer to ((5 + 15) * 2) / 10 is 4."
41
38
  ].map(&:strip)
42
39
  end
43
40
 
data/spec/setup.rb CHANGED
@@ -2,6 +2,7 @@
2
2
 
3
3
  require "llm"
4
4
  require "webmock/rspec"
5
+ require "vcr"
5
6
 
6
7
  RSpec.configure do |config|
7
8
  config.disable_monkey_patching!
@@ -9,21 +10,11 @@ RSpec.configure do |config|
9
10
  config.expect_with :rspec do |c|
10
11
  c.syntax = :expect
11
12
  end
13
+ end
12
14
 
13
- config.include Module.new {
14
- def request_fixture(file)
15
- path = File.join(fixtures, "requests", file)
16
- File.read(path).chomp
17
- end
18
-
19
- def response_fixture(file)
20
- path = File.join(fixtures, "responses", file)
21
- File.read(path).chomp
22
- end
23
- alias_method :fixture, :response_fixture
24
-
25
- def fixtures
26
- File.join(__dir__, "fixtures")
27
- end
28
- }
15
+ VCR.configure do |config|
16
+ config.cassette_library_dir = "spec/fixtures/cassettes"
17
+ config.hook_into :webmock
18
+ config.configure_rspec_metadata!
19
+ config.filter_sensitive_data("TOKEN") { ENV["LLM_SECRET"] }
29
20
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llm.rb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.0
4
+ version: 0.2.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Antar Azri
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2025-03-25 00:00:00.000000000 Z
12
+ date: 2025-04-04 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: net-http
@@ -39,6 +39,20 @@ dependencies:
39
39
  - - ">="
40
40
  - !ruby/object:Gem::Version
41
41
  version: '0'
42
+ - !ruby/object:Gem::Dependency
43
+ name: yaml
44
+ requirement: !ruby/object:Gem::Requirement
45
+ requirements:
46
+ - - ">="
47
+ - !ruby/object:Gem::Version
48
+ version: '0'
49
+ type: :runtime
50
+ prerelease: false
51
+ version_requirements: !ruby/object:Gem::Requirement
52
+ requirements:
53
+ - - ">="
54
+ - !ruby/object:Gem::Version
55
+ version: '0'
42
56
  - !ruby/object:Gem::Dependency
43
57
  name: webmock
44
58
  requirement: !ruby/object:Gem::Requirement
@@ -109,6 +123,62 @@ dependencies:
109
123
  - - "~>"
110
124
  - !ruby/object:Gem::Version
111
125
  version: 0.12.0
126
+ - !ruby/object:Gem::Dependency
127
+ name: rake
128
+ requirement: !ruby/object:Gem::Requirement
129
+ requirements:
130
+ - - "~>"
131
+ - !ruby/object:Gem::Version
132
+ version: '13.0'
133
+ type: :development
134
+ prerelease: false
135
+ version_requirements: !ruby/object:Gem::Requirement
136
+ requirements:
137
+ - - "~>"
138
+ - !ruby/object:Gem::Version
139
+ version: '13.0'
140
+ - !ruby/object:Gem::Dependency
141
+ name: rspec
142
+ requirement: !ruby/object:Gem::Requirement
143
+ requirements:
144
+ - - "~>"
145
+ - !ruby/object:Gem::Version
146
+ version: '3.0'
147
+ type: :development
148
+ prerelease: false
149
+ version_requirements: !ruby/object:Gem::Requirement
150
+ requirements:
151
+ - - "~>"
152
+ - !ruby/object:Gem::Version
153
+ version: '3.0'
154
+ - !ruby/object:Gem::Dependency
155
+ name: standard
156
+ requirement: !ruby/object:Gem::Requirement
157
+ requirements:
158
+ - - "~>"
159
+ - !ruby/object:Gem::Version
160
+ version: '1.40'
161
+ type: :development
162
+ prerelease: false
163
+ version_requirements: !ruby/object:Gem::Requirement
164
+ requirements:
165
+ - - "~>"
166
+ - !ruby/object:Gem::Version
167
+ version: '1.40'
168
+ - !ruby/object:Gem::Dependency
169
+ name: vcr
170
+ requirement: !ruby/object:Gem::Requirement
171
+ requirements:
172
+ - - "~>"
173
+ - !ruby/object:Gem::Version
174
+ version: '6.0'
175
+ type: :development
176
+ prerelease: false
177
+ version_requirements: !ruby/object:Gem::Requirement
178
+ requirements:
179
+ - - "~>"
180
+ - !ruby/object:Gem::Version
181
+ version: '6.0'
112
182
  description: llm.rb is a lightweight Ruby library that provides a common interface
113
183
  and set of functionality for multple Large Language Models (LLMs). It is designed
114
184
  to be simple, flexible, and easy to use.
@@ -127,9 +197,9 @@ files:
127
197
  - lib/llm/error.rb
128
198
  - lib/llm/file.rb
129
199
  - lib/llm/http_client.rb
130
- - lib/llm/lazy_conversation.rb
131
200
  - lib/llm/message.rb
132
201
  - lib/llm/message_queue.rb
202
+ - lib/llm/model.rb
133
203
  - lib/llm/provider.rb
134
204
  - lib/llm/providers/anthropic.rb
135
205
  - lib/llm/providers/anthropic/error_handler.rb
@@ -147,16 +217,23 @@ files:
147
217
  - lib/llm/providers/openai/error_handler.rb
148
218
  - lib/llm/providers/openai/format.rb
149
219
  - lib/llm/providers/openai/response_parser.rb
220
+ - lib/llm/providers/voyageai.rb
221
+ - lib/llm/providers/voyageai/error_handler.rb
222
+ - lib/llm/providers/voyageai/response_parser.rb
150
223
  - lib/llm/response.rb
151
224
  - lib/llm/response/completion.rb
152
225
  - lib/llm/response/embedding.rb
153
226
  - lib/llm/version.rb
154
227
  - llm.gemspec
228
+ - share/llm/models/anthropic.yml
229
+ - share/llm/models/gemini.yml
230
+ - share/llm/models/ollama.yml
231
+ - share/llm/models/openai.yml
155
232
  - spec/anthropic/completion_spec.rb
233
+ - spec/anthropic/embedding_spec.rb
156
234
  - spec/gemini/completion_spec.rb
157
235
  - spec/gemini/embedding_spec.rb
158
236
  - spec/llm/conversation_spec.rb
159
- - spec/llm/lazy_conversation_spec.rb
160
237
  - spec/ollama/completion_spec.rb
161
238
  - spec/ollama/embedding_spec.rb
162
239
  - spec/openai/completion_spec.rb
@@ -1,39 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module LLM
4
- require_relative "message_queue"
5
-
6
- ##
7
- # {LLM::LazyConversation LLM::LazyConversation} provides a
8
- # conversation object that allows input prompts to be queued
9
- # and only sent to the LLM when a response is needed.
10
- #
11
- # @example
12
- # llm = LLM.openai(key)
13
- # bot = llm.chat("Be a helpful weather assistant", :system)
14
- # bot.chat("What's the weather like in Rio?")
15
- # bot.chat("What's the weather like in Algiers?")
16
- # bot.messages.each do |message|
17
- # # A single request is made at this point
18
- # end
19
- class LazyConversation
20
- ##
21
- # @return [LLM::MessageQueue]
22
- attr_reader :messages
23
-
24
- ##
25
- # @param [LLM::Provider] provider
26
- # A provider
27
- def initialize(provider)
28
- @provider = provider
29
- @messages = LLM::MessageQueue.new(provider)
30
- end
31
-
32
- ##
33
- # @param prompt (see LLM::Provider#prompt)
34
- # @return [LLM::Conversation]
35
- def chat(prompt, role = :user, **params)
36
- tap { @messages << [prompt, role, params] }
37
- end
38
- end
39
- end
@@ -1,110 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require "setup"
4
-
5
- RSpec.describe LLM::LazyConversation do
6
- context "with gemini" do
7
- let(:provider) { LLM.gemini("") }
8
- let(:conversation) { described_class.new(provider) }
9
-
10
- context "when given a thread of messages" do
11
- subject(:message) { conversation.messages.to_a[-1] }
12
-
13
- before do
14
- stub_request(:post, "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=")
15
- .with(
16
- headers: {"Content-Type" => "application/json"},
17
- body: request_fixture("gemini/completions/ok_completion.json")
18
- )
19
- .to_return(
20
- status: 200,
21
- body: response_fixture("gemini/completions/ok_completion.json"),
22
- headers: {"Content-Type" => "application/json"}
23
- )
24
- end
25
-
26
- before do
27
- conversation.chat "Hello"
28
- conversation.chat "I have a question"
29
- conversation.chat "How are you?"
30
- end
31
-
32
- it "maintains a conversation" do
33
- expect(message).to have_attributes(
34
- role: "model",
35
- content: "Hello! How can I help you today? \n"
36
- )
37
- end
38
- end
39
- end
40
-
41
- context "with openai" do
42
- let(:provider) { LLM.openai("") }
43
- let(:conversation) { described_class.new(provider) }
44
-
45
- context "when given a thread of messages" do
46
- subject(:message) { conversation.messages.to_a[-1] }
47
-
48
- before do
49
- stub_request(:post, "https://api.openai.com/v1/chat/completions")
50
- .with(
51
- headers: {"Content-Type" => "application/json"},
52
- body: request_fixture("openai/completions/ok_completion.json")
53
- )
54
- .to_return(
55
- status: 200,
56
- body: response_fixture("openai/completions/ok_completion.json"),
57
- headers: {"Content-Type" => "application/json"}
58
- )
59
- end
60
-
61
- before do
62
- conversation.chat "Hello"
63
- conversation.chat "I have a question"
64
- conversation.chat "How are you?"
65
- end
66
-
67
- it "maintains a conversation" do
68
- expect(message).to have_attributes(
69
- role: "assistant",
70
- content: "Hello! How can I assist you today?"
71
- )
72
- end
73
- end
74
- end
75
-
76
- context "with ollama" do
77
- let(:provider) { LLM.ollama("") }
78
- let(:conversation) { described_class.new(provider) }
79
-
80
- context "when given a thread of messages" do
81
- subject(:message) { conversation.messages.to_a[-1] }
82
-
83
- before do
84
- stub_request(:post, "http://localhost:11434/api/chat")
85
- .with(
86
- headers: {"Content-Type" => "application/json"},
87
- body: request_fixture("ollama/completions/ok_completion.json")
88
- )
89
- .to_return(
90
- status: 200,
91
- body: response_fixture("ollama/completions/ok_completion.json"),
92
- headers: {"Content-Type" => "application/json"}
93
- )
94
- end
95
-
96
- before do
97
- conversation.chat "Hello"
98
- conversation.chat "I have a question"
99
- conversation.chat "How are you?"
100
- end
101
-
102
- it "maintains a conversation" do
103
- expect(message).to have_attributes(
104
- role: "assistant",
105
- content: "Hello! How are you today?"
106
- )
107
- end
108
- end
109
- end
110
- end