llm.rb 0.2.1 → 0.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +318 -110
- data/lib/llm/buffer.rb +83 -0
- data/lib/llm/chat.rb +131 -0
- data/lib/llm/error.rb +3 -3
- data/lib/llm/file.rb +36 -40
- data/lib/llm/message.rb +21 -8
- data/lib/llm/mime.rb +54 -0
- data/lib/llm/multipart.rb +100 -0
- data/lib/llm/provider.rb +123 -21
- data/lib/llm/providers/anthropic/error_handler.rb +3 -1
- data/lib/llm/providers/anthropic/format.rb +2 -0
- data/lib/llm/providers/anthropic/response_parser.rb +3 -1
- data/lib/llm/providers/anthropic.rb +14 -5
- data/lib/llm/providers/gemini/audio.rb +77 -0
- data/lib/llm/providers/gemini/error_handler.rb +4 -2
- data/lib/llm/providers/gemini/files.rb +162 -0
- data/lib/llm/providers/gemini/format.rb +12 -6
- data/lib/llm/providers/gemini/images.rb +99 -0
- data/lib/llm/providers/gemini/response_parser.rb +27 -1
- data/lib/llm/providers/gemini.rb +62 -6
- data/lib/llm/providers/ollama/error_handler.rb +3 -1
- data/lib/llm/providers/ollama/format.rb +13 -5
- data/lib/llm/providers/ollama/response_parser.rb +3 -1
- data/lib/llm/providers/ollama.rb +30 -7
- data/lib/llm/providers/openai/audio.rb +97 -0
- data/lib/llm/providers/openai/error_handler.rb +3 -1
- data/lib/llm/providers/openai/files.rb +148 -0
- data/lib/llm/providers/openai/format.rb +22 -8
- data/lib/llm/providers/openai/images.rb +109 -0
- data/lib/llm/providers/openai/response_parser.rb +58 -5
- data/lib/llm/providers/openai/responses.rb +85 -0
- data/lib/llm/providers/openai.rb +52 -6
- data/lib/llm/providers/voyageai/error_handler.rb +1 -1
- data/lib/llm/providers/voyageai.rb +2 -2
- data/lib/llm/response/audio.rb +13 -0
- data/lib/llm/response/audio_transcription.rb +14 -0
- data/lib/llm/response/audio_translation.rb +14 -0
- data/lib/llm/response/download_file.rb +15 -0
- data/lib/llm/response/file.rb +42 -0
- data/lib/llm/response/filelist.rb +18 -0
- data/lib/llm/response/image.rb +29 -0
- data/lib/llm/response/output.rb +56 -0
- data/lib/llm/response.rb +18 -6
- data/lib/llm/utils.rb +19 -0
- data/lib/llm/version.rb +1 -1
- data/lib/llm.rb +5 -2
- data/llm.gemspec +1 -6
- data/spec/anthropic/completion_spec.rb +1 -1
- data/spec/gemini/completion_spec.rb +1 -1
- data/spec/gemini/conversation_spec.rb +31 -0
- data/spec/gemini/files_spec.rb +124 -0
- data/spec/gemini/images_spec.rb +47 -0
- data/spec/llm/conversation_spec.rb +107 -62
- data/spec/ollama/completion_spec.rb +1 -1
- data/spec/ollama/conversation_spec.rb +31 -0
- data/spec/openai/audio_spec.rb +55 -0
- data/spec/openai/completion_spec.rb +5 -4
- data/spec/openai/files_spec.rb +204 -0
- data/spec/openai/images_spec.rb +95 -0
- data/spec/openai/responses_spec.rb +51 -0
- data/spec/setup.rb +8 -0
- metadata +31 -50
- data/LICENSE.txt +0 -21
- data/lib/llm/conversation.rb +0 -90
- data/lib/llm/http_client.rb +0 -29
- data/lib/llm/message_queue.rb +0 -54
@@ -0,0 +1,51 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "setup"
|
4
|
+
|
5
|
+
RSpec.describe "LLM::OpenAI::Responses" do
|
6
|
+
let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
|
7
|
+
let(:provider) { LLM.openai(token) }
|
8
|
+
|
9
|
+
context "when given a successful create operation",
|
10
|
+
vcr: {cassette_name: "openai/responses/successful_create"} do
|
11
|
+
subject { provider.responses.create("Hello", :developer) }
|
12
|
+
|
13
|
+
it "is successful" do
|
14
|
+
is_expected.to be_instance_of(LLM::Response::Output)
|
15
|
+
end
|
16
|
+
|
17
|
+
it "has outputs" do
|
18
|
+
is_expected.to have_attributes(
|
19
|
+
outputs: [instance_of(LLM::Message)]
|
20
|
+
)
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
context "when given a successful get operation",
|
25
|
+
vcr: {cassette_name: "openai/responses/successful_get"} do
|
26
|
+
let(:response) { provider.responses.create("Hello", :developer) }
|
27
|
+
subject { provider.responses.get(response) }
|
28
|
+
|
29
|
+
it "is successful" do
|
30
|
+
is_expected.to be_instance_of(LLM::Response::Output)
|
31
|
+
end
|
32
|
+
|
33
|
+
it "has outputs" do
|
34
|
+
is_expected.to have_attributes(
|
35
|
+
outputs: [instance_of(LLM::Message)]
|
36
|
+
)
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
40
|
+
context "when given a successful delete operation",
|
41
|
+
vcr: {cassette_name: "openai/responses/successful_delete"} do
|
42
|
+
let(:response) { provider.responses.create("Hello", :developer) }
|
43
|
+
subject { provider.responses.delete(response) }
|
44
|
+
|
45
|
+
it "is successful" do
|
46
|
+
is_expected.to have_attributes(
|
47
|
+
deleted: true
|
48
|
+
)
|
49
|
+
end
|
50
|
+
end
|
51
|
+
end
|
data/spec/setup.rb
CHANGED
@@ -16,5 +16,13 @@ VCR.configure do |config|
|
|
16
16
|
config.cassette_library_dir = "spec/fixtures/cassettes"
|
17
17
|
config.hook_into :webmock
|
18
18
|
config.configure_rspec_metadata!
|
19
|
+
|
20
|
+
##
|
21
|
+
# scrub
|
19
22
|
config.filter_sensitive_data("TOKEN") { ENV["LLM_SECRET"] }
|
23
|
+
config.before_record do
|
24
|
+
body = _1.response.body
|
25
|
+
body.gsub! %r|#{Regexp.escape("https://oaidalleapiprodscus.blob.core.windows.net/")}[^"]+|,
|
26
|
+
"https://openai.com/generated/image.png"
|
27
|
+
end
|
20
28
|
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: llm.rb
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.3.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Antar Azri
|
@@ -9,50 +9,8 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date: 2025-04-
|
12
|
+
date: 2025-04-26 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
|
-
- !ruby/object:Gem::Dependency
|
15
|
-
name: net-http
|
16
|
-
requirement: !ruby/object:Gem::Requirement
|
17
|
-
requirements:
|
18
|
-
- - "~>"
|
19
|
-
- !ruby/object:Gem::Version
|
20
|
-
version: 0.6.0
|
21
|
-
type: :runtime
|
22
|
-
prerelease: false
|
23
|
-
version_requirements: !ruby/object:Gem::Requirement
|
24
|
-
requirements:
|
25
|
-
- - "~>"
|
26
|
-
- !ruby/object:Gem::Version
|
27
|
-
version: 0.6.0
|
28
|
-
- !ruby/object:Gem::Dependency
|
29
|
-
name: json
|
30
|
-
requirement: !ruby/object:Gem::Requirement
|
31
|
-
requirements:
|
32
|
-
- - ">="
|
33
|
-
- !ruby/object:Gem::Version
|
34
|
-
version: '0'
|
35
|
-
type: :runtime
|
36
|
-
prerelease: false
|
37
|
-
version_requirements: !ruby/object:Gem::Requirement
|
38
|
-
requirements:
|
39
|
-
- - ">="
|
40
|
-
- !ruby/object:Gem::Version
|
41
|
-
version: '0'
|
42
|
-
- !ruby/object:Gem::Dependency
|
43
|
-
name: yaml
|
44
|
-
requirement: !ruby/object:Gem::Requirement
|
45
|
-
requirements:
|
46
|
-
- - ">="
|
47
|
-
- !ruby/object:Gem::Version
|
48
|
-
version: '0'
|
49
|
-
type: :runtime
|
50
|
-
prerelease: false
|
51
|
-
version_requirements: !ruby/object:Gem::Requirement
|
52
|
-
requirements:
|
53
|
-
- - ">="
|
54
|
-
- !ruby/object:Gem::Version
|
55
|
-
version: '0'
|
56
14
|
- !ruby/object:Gem::Dependency
|
57
15
|
name: webmock
|
58
16
|
requirement: !ruby/object:Gem::Requirement
|
@@ -189,40 +147,56 @@ executables: []
|
|
189
147
|
extensions: []
|
190
148
|
extra_rdoc_files: []
|
191
149
|
files:
|
192
|
-
- LICENSE.txt
|
193
150
|
- README.md
|
194
151
|
- lib/llm.rb
|
195
|
-
- lib/llm/
|
152
|
+
- lib/llm/buffer.rb
|
153
|
+
- lib/llm/chat.rb
|
196
154
|
- lib/llm/core_ext/ostruct.rb
|
197
155
|
- lib/llm/error.rb
|
198
156
|
- lib/llm/file.rb
|
199
|
-
- lib/llm/http_client.rb
|
200
157
|
- lib/llm/message.rb
|
201
|
-
- lib/llm/
|
158
|
+
- lib/llm/mime.rb
|
202
159
|
- lib/llm/model.rb
|
160
|
+
- lib/llm/multipart.rb
|
203
161
|
- lib/llm/provider.rb
|
204
162
|
- lib/llm/providers/anthropic.rb
|
205
163
|
- lib/llm/providers/anthropic/error_handler.rb
|
206
164
|
- lib/llm/providers/anthropic/format.rb
|
207
165
|
- lib/llm/providers/anthropic/response_parser.rb
|
208
166
|
- lib/llm/providers/gemini.rb
|
167
|
+
- lib/llm/providers/gemini/audio.rb
|
209
168
|
- lib/llm/providers/gemini/error_handler.rb
|
169
|
+
- lib/llm/providers/gemini/files.rb
|
210
170
|
- lib/llm/providers/gemini/format.rb
|
171
|
+
- lib/llm/providers/gemini/images.rb
|
211
172
|
- lib/llm/providers/gemini/response_parser.rb
|
212
173
|
- lib/llm/providers/ollama.rb
|
213
174
|
- lib/llm/providers/ollama/error_handler.rb
|
214
175
|
- lib/llm/providers/ollama/format.rb
|
215
176
|
- lib/llm/providers/ollama/response_parser.rb
|
216
177
|
- lib/llm/providers/openai.rb
|
178
|
+
- lib/llm/providers/openai/audio.rb
|
217
179
|
- lib/llm/providers/openai/error_handler.rb
|
180
|
+
- lib/llm/providers/openai/files.rb
|
218
181
|
- lib/llm/providers/openai/format.rb
|
182
|
+
- lib/llm/providers/openai/images.rb
|
219
183
|
- lib/llm/providers/openai/response_parser.rb
|
184
|
+
- lib/llm/providers/openai/responses.rb
|
220
185
|
- lib/llm/providers/voyageai.rb
|
221
186
|
- lib/llm/providers/voyageai/error_handler.rb
|
222
187
|
- lib/llm/providers/voyageai/response_parser.rb
|
223
188
|
- lib/llm/response.rb
|
189
|
+
- lib/llm/response/audio.rb
|
190
|
+
- lib/llm/response/audio_transcription.rb
|
191
|
+
- lib/llm/response/audio_translation.rb
|
224
192
|
- lib/llm/response/completion.rb
|
193
|
+
- lib/llm/response/download_file.rb
|
225
194
|
- lib/llm/response/embedding.rb
|
195
|
+
- lib/llm/response/file.rb
|
196
|
+
- lib/llm/response/filelist.rb
|
197
|
+
- lib/llm/response/image.rb
|
198
|
+
- lib/llm/response/output.rb
|
199
|
+
- lib/llm/utils.rb
|
226
200
|
- lib/llm/version.rb
|
227
201
|
- llm.gemspec
|
228
202
|
- share/llm/models/anthropic.yml
|
@@ -232,21 +206,28 @@ files:
|
|
232
206
|
- spec/anthropic/completion_spec.rb
|
233
207
|
- spec/anthropic/embedding_spec.rb
|
234
208
|
- spec/gemini/completion_spec.rb
|
209
|
+
- spec/gemini/conversation_spec.rb
|
235
210
|
- spec/gemini/embedding_spec.rb
|
211
|
+
- spec/gemini/files_spec.rb
|
212
|
+
- spec/gemini/images_spec.rb
|
236
213
|
- spec/llm/conversation_spec.rb
|
237
214
|
- spec/ollama/completion_spec.rb
|
215
|
+
- spec/ollama/conversation_spec.rb
|
238
216
|
- spec/ollama/embedding_spec.rb
|
217
|
+
- spec/openai/audio_spec.rb
|
239
218
|
- spec/openai/completion_spec.rb
|
240
219
|
- spec/openai/embedding_spec.rb
|
220
|
+
- spec/openai/files_spec.rb
|
221
|
+
- spec/openai/images_spec.rb
|
222
|
+
- spec/openai/responses_spec.rb
|
241
223
|
- spec/readme_spec.rb
|
242
224
|
- spec/setup.rb
|
243
225
|
homepage: https://github.com/llmrb/llm
|
244
226
|
licenses:
|
245
|
-
-
|
227
|
+
- 0BSDL
|
246
228
|
metadata:
|
247
229
|
homepage_uri: https://github.com/llmrb/llm
|
248
230
|
source_code_uri: https://github.com/llmrb/llm
|
249
|
-
changelog_uri: https://github.com/llmrb/llm/blob/main/CHANGELOG.md
|
250
231
|
post_install_message:
|
251
232
|
rdoc_options: []
|
252
233
|
require_paths:
|
data/LICENSE.txt
DELETED
@@ -1,21 +0,0 @@
|
|
1
|
-
The MIT License (MIT)
|
2
|
-
|
3
|
-
Copyright (c) 2024 Antar Azri
|
4
|
-
|
5
|
-
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
-
of this software and associated documentation files (the "Software"), to deal
|
7
|
-
in the Software without restriction, including without limitation the rights
|
8
|
-
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
-
copies of the Software, and to permit persons to whom the Software is
|
10
|
-
furnished to do so, subject to the following conditions:
|
11
|
-
|
12
|
-
The above copyright notice and this permission notice shall be included in
|
13
|
-
all copies or substantial portions of the Software.
|
14
|
-
|
15
|
-
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
-
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
-
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
-
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
-
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
-
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
21
|
-
THE SOFTWARE.
|
data/lib/llm/conversation.rb
DELETED
@@ -1,90 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module LLM
|
4
|
-
##
|
5
|
-
# {LLM::Conversation LLM::Conversation} provides a conversation
|
6
|
-
# object that maintains a thread of messages that acts as context
|
7
|
-
# throughout the conversation.
|
8
|
-
# @example
|
9
|
-
# llm = LLM.openai(ENV["KEY"])
|
10
|
-
# convo = llm.chat("You are my climate expert", :system)
|
11
|
-
# convo.chat("What's the climate like in Rio de Janerio?", :user)
|
12
|
-
# convo.chat("What's the climate like in Algiers?", :user)
|
13
|
-
# convo.chat("What's the climate like in Tokyo?", :user)
|
14
|
-
# p bot.messages.map { [_1.role, _1.content] }
|
15
|
-
class Conversation
|
16
|
-
##
|
17
|
-
# @return [Array<LLM::Message>]
|
18
|
-
attr_reader :messages
|
19
|
-
|
20
|
-
##
|
21
|
-
# @param [LLM::Provider] provider
|
22
|
-
# A provider
|
23
|
-
# @param [Hash] params
|
24
|
-
# The parameters to maintain throughout the conversation
|
25
|
-
def initialize(provider, params = {})
|
26
|
-
@provider = provider
|
27
|
-
@params = params
|
28
|
-
@lazy = false
|
29
|
-
@messages = []
|
30
|
-
end
|
31
|
-
|
32
|
-
##
|
33
|
-
# @param prompt (see LLM::Provider#prompt)
|
34
|
-
# @return [LLM::Conversation]
|
35
|
-
def chat(prompt, role = :user, **params)
|
36
|
-
tap do
|
37
|
-
if lazy?
|
38
|
-
@messages << [LLM::Message.new(role, prompt), @params.merge(params)]
|
39
|
-
else
|
40
|
-
completion = complete(prompt, role, params)
|
41
|
-
@messages.concat [Message.new(role, prompt), completion.choices[0]]
|
42
|
-
end
|
43
|
-
end
|
44
|
-
end
|
45
|
-
|
46
|
-
##
|
47
|
-
# @note
|
48
|
-
# The `read_response` and `recent_message` methods are aliases of
|
49
|
-
# the `last_message` method, and you can choose the name that best
|
50
|
-
# fits your context or code style.
|
51
|
-
# @param [#to_s] role
|
52
|
-
# The role of the last message.
|
53
|
-
# Defaults to the LLM's assistant role (eg "assistant" or "model")
|
54
|
-
# @return [LLM::Message]
|
55
|
-
# The last message for the given role
|
56
|
-
def last_message(role: @provider.assistant_role)
|
57
|
-
messages.reverse_each.find { _1.role == role.to_s }
|
58
|
-
end
|
59
|
-
alias_method :recent_message, :last_message
|
60
|
-
alias_method :read_response, :last_message
|
61
|
-
|
62
|
-
##
|
63
|
-
# Enables lazy mode for the conversation.
|
64
|
-
# @return [LLM::Conversation]
|
65
|
-
def lazy
|
66
|
-
tap do
|
67
|
-
next if lazy?
|
68
|
-
@lazy = true
|
69
|
-
@messages = LLM::MessageQueue.new(@provider)
|
70
|
-
end
|
71
|
-
end
|
72
|
-
|
73
|
-
##
|
74
|
-
# @return [Boolean]
|
75
|
-
# Returns true if the conversation is lazy
|
76
|
-
def lazy?
|
77
|
-
@lazy
|
78
|
-
end
|
79
|
-
|
80
|
-
private
|
81
|
-
|
82
|
-
def complete(prompt, role, params)
|
83
|
-
@provider.complete(
|
84
|
-
prompt,
|
85
|
-
role,
|
86
|
-
**@params.merge(params.merge(messages:))
|
87
|
-
)
|
88
|
-
end
|
89
|
-
end
|
90
|
-
end
|
data/lib/llm/http_client.rb
DELETED
@@ -1,29 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module LLM
|
4
|
-
module HTTPClient
|
5
|
-
require "net/http"
|
6
|
-
##
|
7
|
-
# Initiates a HTTP request
|
8
|
-
# @param [Net::HTTP] http
|
9
|
-
# The HTTP object to use for the request
|
10
|
-
# @param [Net::HTTPRequest] req
|
11
|
-
# The request to send
|
12
|
-
# @return [Net::HTTPResponse]
|
13
|
-
# The response from the server
|
14
|
-
# @raise [LLM::Error::Unauthorized]
|
15
|
-
# When authentication fails
|
16
|
-
# @raise [LLM::Error::RateLimit]
|
17
|
-
# When the rate limit is exceeded
|
18
|
-
# @raise [LLM::Error::BadResponse]
|
19
|
-
# When any other unsuccessful status code is returned
|
20
|
-
# @raise [SystemCallError]
|
21
|
-
# When there is a network error at the operating system level
|
22
|
-
def request(http, req)
|
23
|
-
res = http.request(req)
|
24
|
-
res.tap(&:value)
|
25
|
-
rescue Net::HTTPClientException
|
26
|
-
error_handler.new(res).raise_error!
|
27
|
-
end
|
28
|
-
end
|
29
|
-
end
|
data/lib/llm/message_queue.rb
DELETED
@@ -1,54 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module LLM
|
4
|
-
##
|
5
|
-
# {LLM::MessageQueue LLM::MessageQueue} provides an Enumerable
|
6
|
-
# object that yields each message in a conversation on-demand,
|
7
|
-
# and only sends a request to the LLM when a response is needed.
|
8
|
-
class MessageQueue
|
9
|
-
include Enumerable
|
10
|
-
|
11
|
-
##
|
12
|
-
# @param [LLM::Provider] provider
|
13
|
-
# @return [LLM::MessageQueue]
|
14
|
-
def initialize(provider)
|
15
|
-
@provider = provider
|
16
|
-
@pending = []
|
17
|
-
@completed = []
|
18
|
-
end
|
19
|
-
|
20
|
-
##
|
21
|
-
# @yield [LLM::Message]
|
22
|
-
# Yields each message in the conversation thread
|
23
|
-
# @raise (see LLM::Provider#complete)
|
24
|
-
# @return [void]
|
25
|
-
def each
|
26
|
-
complete! unless @pending.empty?
|
27
|
-
@completed.each { yield(_1) }
|
28
|
-
end
|
29
|
-
|
30
|
-
##
|
31
|
-
# @param [[LLM::Message, Hash]] item
|
32
|
-
# A message and its parameters
|
33
|
-
# @return [void]
|
34
|
-
def <<(item)
|
35
|
-
@pending << item
|
36
|
-
self
|
37
|
-
end
|
38
|
-
alias_method :push, :<<
|
39
|
-
|
40
|
-
private
|
41
|
-
|
42
|
-
def complete!
|
43
|
-
message, params = @pending[-1]
|
44
|
-
messages = @pending[0..-2].map { _1[0] }
|
45
|
-
completion = @provider.complete(
|
46
|
-
message.content,
|
47
|
-
message.role,
|
48
|
-
**params.merge(messages:)
|
49
|
-
)
|
50
|
-
@completed.concat([*messages, message, completion.choices[0]])
|
51
|
-
@pending.clear
|
52
|
-
end
|
53
|
-
end
|
54
|
-
end
|