llm.rb 0.5.0 → 0.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +45 -35
- data/lib/llm/buffer.rb +4 -3
- data/lib/llm/chat.rb +30 -28
- data/lib/llm/function.rb +5 -0
- data/lib/llm/message.rb +24 -17
- data/lib/llm/provider.rb +32 -46
- data/lib/llm/providers/anthropic/format/completion_format.rb +1 -1
- data/lib/llm/providers/anthropic/format.rb +3 -2
- data/lib/llm/providers/anthropic.rb +12 -13
- data/lib/llm/providers/gemini/audio.rb +5 -5
- data/lib/llm/providers/gemini/files.rb +6 -6
- data/lib/llm/providers/gemini/format/completion_format.rb +1 -1
- data/lib/llm/providers/gemini/format.rb +8 -16
- data/lib/llm/providers/gemini/images.rb +4 -4
- data/lib/llm/providers/gemini/models.rb +3 -3
- data/lib/llm/providers/gemini.rb +9 -10
- data/lib/llm/providers/ollama/format.rb +3 -2
- data/lib/llm/providers/ollama.rb +8 -8
- data/lib/llm/providers/openai/format/completion_format.rb +2 -0
- data/lib/llm/providers/openai/format.rb +6 -4
- data/lib/llm/providers/openai/responses.rb +5 -5
- data/lib/llm/providers/openai.rb +8 -9
- data/lib/llm/providers/voyageai.rb +4 -4
- data/lib/llm/version.rb +1 -1
- data/lib/llm.rb +10 -10
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 3452ff48dff867c48be888eb5ae2fff97624b8e51029cd13a26844d67a7824cf
|
4
|
+
data.tar.gz: 51a65baeff8b026c6ea9fdda2063d14a7961dd902f94cccd34c7591f606a586f
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: a548f97a9019529146f0e6f7414239aaee5b0b6695c924f28f602de7a1ef2f390e1b0053545dab2ee6725f7dabd7a2c83e3b9181027a492630f12dda77870637
|
7
|
+
data.tar.gz: f71fbd16d3fb22a0ad37d59e93fc7b9e6093fc2b77ebf0e6b7eeca69cb227fd84867ab02c594c4b608ea9017eb6cc40d3fa5c2ff76ffb09fbc1a1b573823a84b
|
data/README.md
CHANGED
@@ -42,11 +42,11 @@ using an API key (if required) and an optional set of configuration options via
|
|
42
42
|
#!/usr/bin/env ruby
|
43
43
|
require "llm"
|
44
44
|
|
45
|
-
llm = LLM.openai("yourapikey")
|
46
|
-
llm = LLM.gemini("yourapikey")
|
47
|
-
llm = LLM.anthropic("yourapikey")
|
48
|
-
llm = LLM.ollama(nil)
|
49
|
-
llm = LLM.voyageai("yourapikey")
|
45
|
+
llm = LLM.openai(key: "yourapikey")
|
46
|
+
llm = LLM.gemini(key: "yourapikey")
|
47
|
+
llm = LLM.anthropic(key: "yourapikey")
|
48
|
+
llm = LLM.ollama(key: nil)
|
49
|
+
llm = LLM.voyageai(key: "yourapikey")
|
50
50
|
```
|
51
51
|
|
52
52
|
### Conversations
|
@@ -66,12 +66,12 @@ all LLM providers support:
|
|
66
66
|
#!/usr/bin/env ruby
|
67
67
|
require "llm"
|
68
68
|
|
69
|
-
llm = LLM.openai(ENV["KEY"])
|
69
|
+
llm = LLM.openai(key: ENV["KEY"])
|
70
70
|
bot = LLM::Chat.new(llm).lazy
|
71
|
-
bot.chat File.read("./share/llm/prompts/system.txt"), :system
|
72
|
-
bot.chat "Tell me the answer to 5 + 15", :user
|
73
|
-
bot.chat "Tell me the answer to (5 + 15) * 2", :user
|
74
|
-
bot.chat "Tell me the answer to ((5 + 15) * 2) / 10", :user
|
71
|
+
bot.chat File.read("./share/llm/prompts/system.txt"), role: :system
|
72
|
+
bot.chat "Tell me the answer to 5 + 15", role: :user
|
73
|
+
bot.chat "Tell me the answer to (5 + 15) * 2", role: :user
|
74
|
+
bot.chat "Tell me the answer to ((5 + 15) * 2) / 10", role: :user
|
75
75
|
bot.messages.each { print "[#{_1.role}] ", _1.content, "\n" }
|
76
76
|
|
77
77
|
##
|
@@ -106,12 +106,12 @@ for the OpenAI provider:
|
|
106
106
|
#!/usr/bin/env ruby
|
107
107
|
require "llm"
|
108
108
|
|
109
|
-
llm = LLM.openai(ENV["KEY"])
|
109
|
+
llm = LLM.openai(key: ENV["KEY"])
|
110
110
|
bot = LLM::Chat.new(llm).lazy
|
111
|
-
bot.respond File.read("./share/llm/prompts/system.txt"), :developer
|
112
|
-
bot.respond "Tell me the answer to 5 + 15", :user
|
113
|
-
bot.respond "Tell me the answer to (5 + 15) * 2", :user
|
114
|
-
bot.respond "Tell me the answer to ((5 + 15) * 2) / 10", :user
|
111
|
+
bot.respond File.read("./share/llm/prompts/system.txt"), role: :developer
|
112
|
+
bot.respond "Tell me the answer to 5 + 15", role: :user
|
113
|
+
bot.respond "Tell me the answer to (5 + 15) * 2", role: :user
|
114
|
+
bot.respond "Tell me the answer to ((5 + 15) * 2) / 10", role: :user
|
115
115
|
bot.messages.each { print "[#{_1.role}] ", _1.content, "\n" }
|
116
116
|
|
117
117
|
##
|
@@ -152,21 +152,21 @@ The interface is designed so you could drop in any other library in its place:
|
|
152
152
|
#!/usr/bin/env ruby
|
153
153
|
require "llm"
|
154
154
|
|
155
|
-
llm = LLM.openai(ENV["KEY"])
|
155
|
+
llm = LLM.openai(key: ENV["KEY"])
|
156
156
|
schema = llm.schema.object({os: llm.schema.string.enum("OpenBSD", "FreeBSD", "NetBSD")})
|
157
157
|
bot = LLM::Chat.new(llm, schema:)
|
158
|
-
bot.chat "You secretly love NetBSD", :system
|
159
|
-
bot.chat "What operating system is the best?", :user
|
158
|
+
bot.chat "You secretly love NetBSD", role: :system
|
159
|
+
bot.chat "What operating system is the best?", role: :user
|
160
160
|
bot.messages.find(&:assistant?).content! # => {os: "NetBSD"}
|
161
161
|
|
162
162
|
schema = llm.schema.object({answer: llm.schema.integer.required})
|
163
163
|
bot = LLM::Chat.new(llm, schema:)
|
164
|
-
bot.chat "Tell me the answer to ((5 + 5) / 2)", :user
|
164
|
+
bot.chat "Tell me the answer to ((5 + 5) / 2)", role: :user
|
165
165
|
bot.messages.find(&:assistant?).content! # => {answer: 5}
|
166
166
|
|
167
167
|
schema = llm.schema.object({probability: llm.schema.number.required})
|
168
168
|
bot = LLM::Chat.new(llm, schema:)
|
169
|
-
bot.chat "Does the earth orbit the sun?", :user
|
169
|
+
bot.chat "Does the earth orbit the sun?", role: :user
|
170
170
|
bot.messages.find(&:assistant?).content! # => {probability: 1}
|
171
171
|
```
|
172
172
|
|
@@ -195,7 +195,7 @@ arbitrary commands from a LLM without sanitizing the input first :) Without furt
|
|
195
195
|
#!/usr/bin/env ruby
|
196
196
|
require "llm"
|
197
197
|
|
198
|
-
llm = LLM.openai(ENV["KEY"])
|
198
|
+
llm = LLM.openai(key: ENV["KEY"])
|
199
199
|
tool = LLM.function(:system) do |fn|
|
200
200
|
fn.description "Run a shell command"
|
201
201
|
fn.params do |schema|
|
@@ -207,12 +207,12 @@ tool = LLM.function(:system) do |fn|
|
|
207
207
|
end
|
208
208
|
|
209
209
|
bot = LLM::Chat.new(llm, tools: [tool]).lazy
|
210
|
-
bot.chat "Your task is to run shell commands via a tool.", :system
|
210
|
+
bot.chat "Your task is to run shell commands via a tool.", role: :system
|
211
211
|
|
212
|
-
bot.chat "What is the current date?", :user
|
212
|
+
bot.chat "What is the current date?", role: :user
|
213
213
|
bot.chat bot.functions.map(&:call) # report return value to the LLM
|
214
214
|
|
215
|
-
bot.chat "What operating system am I running? (short version please!)", :user
|
215
|
+
bot.chat "What operating system am I running? (short version please!)", role: :user
|
216
216
|
bot.chat bot.functions.map(&:call) # report return value to the LLM
|
217
217
|
|
218
218
|
##
|
@@ -235,7 +235,7 @@ documentation for more information on how to use the audio generation API:
|
|
235
235
|
#!/usr/bin/env ruby
|
236
236
|
require "llm"
|
237
237
|
|
238
|
-
llm = LLM.openai(ENV["KEY"])
|
238
|
+
llm = LLM.openai(key: ENV["KEY"])
|
239
239
|
res = llm.audio.create_speech(input: "Hello world")
|
240
240
|
IO.copy_stream res.audio, File.join(Dir.home, "hello.mp3")
|
241
241
|
```
|
@@ -252,7 +252,7 @@ documentation for more information on how to use the audio transcription API:
|
|
252
252
|
#!/usr/bin/env ruby
|
253
253
|
require "llm"
|
254
254
|
|
255
|
-
llm = LLM.openai(ENV["KEY"])
|
255
|
+
llm = LLM.openai(key: ENV["KEY"])
|
256
256
|
res = llm.audio.create_transcription(
|
257
257
|
file: File.join(Dir.home, "hello.mp3")
|
258
258
|
)
|
@@ -272,7 +272,7 @@ the audio translation API:
|
|
272
272
|
#!/usr/bin/env ruby
|
273
273
|
require "llm"
|
274
274
|
|
275
|
-
llm = LLM.openai(ENV["KEY"])
|
275
|
+
llm = LLM.openai(key: ENV["KEY"])
|
276
276
|
res = llm.audio.create_translation(
|
277
277
|
file: File.join(Dir.home, "bomdia.mp3")
|
278
278
|
)
|
@@ -295,7 +295,7 @@ require "llm"
|
|
295
295
|
require "open-uri"
|
296
296
|
require "fileutils"
|
297
297
|
|
298
|
-
llm = LLM.openai(ENV["KEY"])
|
298
|
+
llm = LLM.openai(key: ENV["KEY"])
|
299
299
|
res = llm.images.create(prompt: "a dog on a rocket to the moon")
|
300
300
|
res.urls.each do |url|
|
301
301
|
FileUtils.mv OpenURI.open_uri(url).path,
|
@@ -320,7 +320,7 @@ require "llm"
|
|
320
320
|
require "open-uri"
|
321
321
|
require "fileutils"
|
322
322
|
|
323
|
-
llm = LLM.openai(ENV["KEY"])
|
323
|
+
llm = LLM.openai(key: ENV["KEY"])
|
324
324
|
res = llm.images.edit(
|
325
325
|
image: "/images/cat.png",
|
326
326
|
prompt: "a cat with a hat",
|
@@ -345,7 +345,7 @@ require "llm"
|
|
345
345
|
require "open-uri"
|
346
346
|
require "fileutils"
|
347
347
|
|
348
|
-
llm = LLM.openai(ENV["KEY"])
|
348
|
+
llm = LLM.openai(key: ENV["KEY"])
|
349
349
|
res = llm.images.create_variation(
|
350
350
|
image: "/images/cat.png",
|
351
351
|
n: 5
|
@@ -373,7 +373,7 @@ can be given to the chat method:
|
|
373
373
|
#!/usr/bin/env ruby
|
374
374
|
require "llm"
|
375
375
|
|
376
|
-
llm = LLM.openai(ENV["KEY"])
|
376
|
+
llm = LLM.openai(key: ENV["KEY"])
|
377
377
|
bot = LLM::Chat.new(llm).lazy
|
378
378
|
file = llm.files.create(file: "/documents/openbsd_is_awesome.pdf")
|
379
379
|
bot.chat(file)
|
@@ -404,7 +404,7 @@ to a prompt:
|
|
404
404
|
#!/usr/bin/env ruby
|
405
405
|
require "llm"
|
406
406
|
|
407
|
-
llm = LLM.openai(ENV["KEY"])
|
407
|
+
llm = LLM.openai(key: ENV["KEY"])
|
408
408
|
bot = LLM::Chat.new(llm).lazy
|
409
409
|
|
410
410
|
bot.chat [URI("https://example.com/path/to/image.png"), "Describe the image in the link"]
|
@@ -439,7 +439,7 @@ which will go on to generate a response:
|
|
439
439
|
#!/usr/bin/env ruby
|
440
440
|
require "llm"
|
441
441
|
|
442
|
-
llm = LLM.openai(ENV["KEY"])
|
442
|
+
llm = LLM.openai(key: ENV["KEY"])
|
443
443
|
res = llm.embed(["programming is fun", "ruby is a programming language", "sushi is art"])
|
444
444
|
print res.class, "\n"
|
445
445
|
print res.embeddings.size, "\n"
|
@@ -470,7 +470,7 @@ require "llm"
|
|
470
470
|
|
471
471
|
##
|
472
472
|
# List all models
|
473
|
-
llm = LLM.openai(ENV["KEY"])
|
473
|
+
llm = LLM.openai(key: ENV["KEY"])
|
474
474
|
llm.models.all.each do |model|
|
475
475
|
print "model: ", model.id, "\n"
|
476
476
|
end
|
@@ -501,7 +501,7 @@ demonstrates how that might look like in practice:
|
|
501
501
|
#!/usr/bin/env ruby
|
502
502
|
require "llm"
|
503
503
|
|
504
|
-
llm = LLM.gemini(ENV["KEY"])
|
504
|
+
llm = LLM.gemini(key: ENV["KEY"])
|
505
505
|
fork do
|
506
506
|
%w[dog cat sheep goat capybara].each do |animal|
|
507
507
|
res = llm.images.create(prompt: "a #{animal} on a rocket to the moon")
|
@@ -545,6 +545,16 @@ llm.rb can be installed via rubygems.org:
|
|
545
545
|
|
546
546
|
gem install llm.rb
|
547
547
|
|
548
|
+
## See also
|
549
|
+
|
550
|
+
**[llmrb/llm-shell](https://github.com/llmrb/llm-shell)**
|
551
|
+
|
552
|
+
An extensible, developer-oriented command line utility that is powered by
|
553
|
+
llm.rb and serves as a demonstration of the library's capabilities. The
|
554
|
+
[demo](https://github.com/llmrb/llm-shell#demos) section has a number of GIF
|
555
|
+
previews might be especially interesting!
|
556
|
+
|
557
|
+
|
548
558
|
## Philosophy
|
549
559
|
|
550
560
|
llm.rb provides a clean, dependency-free interface to Large Language Models,
|
data/lib/llm/buffer.rb
CHANGED
@@ -77,10 +77,10 @@ module LLM
|
|
77
77
|
def complete!(message, params)
|
78
78
|
pendings = @pending.map { _1[0] }
|
79
79
|
messages = [*@completed, *pendings]
|
80
|
+
role = message.role
|
80
81
|
completion = @provider.complete(
|
81
82
|
message.content,
|
82
|
-
|
83
|
-
**params.merge(messages:)
|
83
|
+
params.merge(role:, messages:)
|
84
84
|
)
|
85
85
|
@completed.concat([*pendings, message, completion.choices[0]])
|
86
86
|
@pending.clear
|
@@ -89,11 +89,12 @@ module LLM
|
|
89
89
|
def respond!(message, params)
|
90
90
|
pendings = @pending.map { _1[0] }
|
91
91
|
input = [*pendings]
|
92
|
+
role = message.role
|
92
93
|
params = [
|
93
94
|
params.merge(input:),
|
94
95
|
@response ? {previous_response_id: @response.id} : {}
|
95
96
|
].inject({}, &:merge!)
|
96
|
-
@response = @provider.responses.create(message.content,
|
97
|
+
@response = @provider.responses.create(message.content, params.merge(role:))
|
97
98
|
@completed.concat([*pendings, message, @response.outputs[0]])
|
98
99
|
@pending.clear
|
99
100
|
end
|
data/lib/llm/chat.rb
CHANGED
@@ -13,11 +13,10 @@ module LLM
|
|
13
13
|
#
|
14
14
|
# llm = LLM.openai(ENV["KEY"])
|
15
15
|
# bot = LLM::Chat.new(llm).lazy
|
16
|
-
# bot.chat("
|
17
|
-
# bot.chat("
|
18
|
-
# bot.chat("
|
19
|
-
# bot.chat("Why
|
20
|
-
# bot.chat("Why did the chicken cross the road ?", :user)
|
16
|
+
# bot.chat("Provide short and concise answers", role: :system)
|
17
|
+
# bot.chat("What is 5 + 7 ?", role: :user)
|
18
|
+
# bot.chat("Why is the sky blue ?", role: :user)
|
19
|
+
# bot.chat("Why did the chicken cross the road ?", role: :user)
|
21
20
|
# bot.messages.map { print "[#{_1.role}]", _1.content, "\n" }
|
22
21
|
class Chat
|
23
22
|
##
|
@@ -27,31 +26,34 @@ module LLM
|
|
27
26
|
##
|
28
27
|
# @param [LLM::Provider] provider
|
29
28
|
# A provider
|
30
|
-
# @param [to_json] schema
|
31
|
-
# The JSON schema to maintain throughout the conversation
|
32
|
-
# @param [String] model
|
33
|
-
# The model to maintain throughout the conversation
|
34
29
|
# @param [Hash] params
|
35
|
-
#
|
36
|
-
|
30
|
+
# The parameters to maintain throughout the conversation.
|
31
|
+
# Any parameter the provider supports can be included and
|
32
|
+
# not only those listed here.
|
33
|
+
# @option params [String] :model Defaults to the provider's default model
|
34
|
+
# @option params [#to_json, nil] :schema Defaults to nil
|
35
|
+
# @option params [Array<LLM::Function>, nil] :tools Defaults to nil
|
36
|
+
def initialize(provider, params = {})
|
37
37
|
@provider = provider
|
38
|
-
@params =
|
38
|
+
@params = {model: provider.default_model, schema: nil}.compact.merge!(params)
|
39
39
|
@lazy = false
|
40
40
|
@messages = [].extend(Array)
|
41
41
|
end
|
42
42
|
|
43
43
|
##
|
44
44
|
# Maintain a conversation via the chat completions API
|
45
|
-
# @param prompt (see LLM::Provider#
|
46
|
-
# @param
|
47
|
-
# @param params (see LLM::Provider#prompt)
|
45
|
+
# @param prompt (see LLM::Provider#complete)
|
46
|
+
# @param params (see LLM::Provider#complete)
|
48
47
|
# @return [LLM::Chat]
|
49
|
-
def chat(prompt,
|
48
|
+
def chat(prompt, params = {})
|
49
|
+
params = {role: :user}.merge!(params)
|
50
50
|
if lazy?
|
51
|
+
role = params.delete(:role)
|
51
52
|
@messages << [LLM::Message.new(role, prompt), @params.merge(params), :complete]
|
52
53
|
self
|
53
54
|
else
|
54
|
-
|
55
|
+
role = params[:role]
|
56
|
+
completion = complete!(prompt, params)
|
55
57
|
@messages.concat [Message.new(role, prompt), completion.choices[0]]
|
56
58
|
self
|
57
59
|
end
|
@@ -60,16 +62,18 @@ module LLM
|
|
60
62
|
##
|
61
63
|
# Maintain a conversation via the responses API
|
62
64
|
# @note Not all LLM providers support this API
|
63
|
-
# @param prompt (see LLM::Provider#
|
64
|
-
# @param
|
65
|
-
# @param params (see LLM::Provider#prompt)
|
65
|
+
# @param prompt (see LLM::Provider#complete)
|
66
|
+
# @param params (see LLM::Provider#complete)
|
66
67
|
# @return [LLM::Chat]
|
67
|
-
def respond(prompt,
|
68
|
+
def respond(prompt, params = {})
|
69
|
+
params = {role: :user}.merge!(params)
|
68
70
|
if lazy?
|
71
|
+
role = params.delete(:role)
|
69
72
|
@messages << [LLM::Message.new(role, prompt), @params.merge(params), :respond]
|
70
73
|
self
|
71
74
|
else
|
72
|
-
|
75
|
+
role = params[:role]
|
76
|
+
@response = respond!(prompt, params)
|
73
77
|
@messages.concat [Message.new(role, prompt), @response.outputs[0]]
|
74
78
|
self
|
75
79
|
end
|
@@ -141,19 +145,17 @@ module LLM
|
|
141
145
|
end
|
142
146
|
private_constant :Array
|
143
147
|
|
144
|
-
def respond!(prompt,
|
148
|
+
def respond!(prompt, params)
|
145
149
|
@provider.responses.create(
|
146
150
|
prompt,
|
147
|
-
|
148
|
-
**@params.merge(params.merge(@response ? {previous_response_id: @response.id} : {}))
|
151
|
+
@params.merge(params.merge(@response ? {previous_response_id: @response.id} : {}))
|
149
152
|
)
|
150
153
|
end
|
151
154
|
|
152
|
-
def complete!(prompt,
|
155
|
+
def complete!(prompt, params)
|
153
156
|
@provider.complete(
|
154
157
|
prompt,
|
155
|
-
|
156
|
-
**@params.merge(params.merge(messages:))
|
158
|
+
@params.merge(params.merge(messages:))
|
157
159
|
)
|
158
160
|
end
|
159
161
|
end
|
data/lib/llm/function.rb
CHANGED
data/lib/llm/message.rb
CHANGED
@@ -57,13 +57,6 @@ module LLM
|
|
57
57
|
JSON.parse(content)
|
58
58
|
end
|
59
59
|
|
60
|
-
##
|
61
|
-
# Returns true when the message is from the LLM
|
62
|
-
# @return [Boolean]
|
63
|
-
def assistant?
|
64
|
-
role == "assistant" || role == "model"
|
65
|
-
end
|
66
|
-
|
67
60
|
##
|
68
61
|
# @return [Array<LLM::Function>]
|
69
62
|
def functions
|
@@ -75,10 +68,24 @@ module LLM
|
|
75
68
|
end
|
76
69
|
|
77
70
|
##
|
71
|
+
# Marks the message as read
|
72
|
+
# @return [void]
|
73
|
+
def read!
|
74
|
+
@read = true
|
75
|
+
end
|
76
|
+
|
77
|
+
##
|
78
|
+
# Returns true when the message has been read
|
78
79
|
# @return [Boolean]
|
79
|
-
|
80
|
-
|
81
|
-
|
80
|
+
def read?
|
81
|
+
@read
|
82
|
+
end
|
83
|
+
|
84
|
+
##
|
85
|
+
# Returns true when the message is an assistant message
|
86
|
+
# @return [Boolean]
|
87
|
+
def assistant?
|
88
|
+
role == "assistant" || role == "model"
|
82
89
|
end
|
83
90
|
|
84
91
|
##
|
@@ -89,17 +96,17 @@ module LLM
|
|
89
96
|
end
|
90
97
|
|
91
98
|
##
|
92
|
-
#
|
93
|
-
# @return [
|
94
|
-
def
|
95
|
-
|
99
|
+
# Returns true when the message is a user message
|
100
|
+
# @return [Boolean]
|
101
|
+
def user?
|
102
|
+
role == "user"
|
96
103
|
end
|
97
104
|
|
98
105
|
##
|
99
|
-
# Returns true when the message has been read
|
100
106
|
# @return [Boolean]
|
101
|
-
|
102
|
-
|
107
|
+
# Returns true when the message requests a function call
|
108
|
+
def tool_call?
|
109
|
+
tool_calls.any?
|
103
110
|
end
|
104
111
|
|
105
112
|
##
|
data/lib/llm/provider.rb
CHANGED
@@ -9,7 +9,7 @@ class LLM::Provider
|
|
9
9
|
require "net/http"
|
10
10
|
|
11
11
|
##
|
12
|
-
# @param [String]
|
12
|
+
# @param [String, nil] key
|
13
13
|
# The secret key for authentication
|
14
14
|
# @param [String] host
|
15
15
|
# The host address of the LLM provider
|
@@ -17,8 +17,10 @@ class LLM::Provider
|
|
17
17
|
# The port number
|
18
18
|
# @param [Integer] timeout
|
19
19
|
# The number of seconds to wait for a response
|
20
|
-
|
21
|
-
|
20
|
+
# @param [Boolean] ssl
|
21
|
+
# Whether to use SSL for the connection
|
22
|
+
def initialize(key:, host:, port: 443, timeout: 60, ssl: true)
|
23
|
+
@key = key
|
22
24
|
@http = Net::HTTP.new(host, port).tap do |http|
|
23
25
|
http.use_ssl = ssl
|
24
26
|
http.read_timeout = timeout
|
@@ -30,7 +32,7 @@ class LLM::Provider
|
|
30
32
|
# @return [String]
|
31
33
|
# @note The secret key is redacted in inspect for security reasons
|
32
34
|
def inspect
|
33
|
-
"#<#{self.class.name}:0x#{object_id.to_s(16)} @
|
35
|
+
"#<#{self.class.name}:0x#{object_id.to_s(16)} @key=[REDACTED] @http=#{@http.inspect}>"
|
34
36
|
end
|
35
37
|
|
36
38
|
##
|
@@ -52,26 +54,23 @@ class LLM::Provider
|
|
52
54
|
# Provides an interface to the chat completions API
|
53
55
|
# @example
|
54
56
|
# llm = LLM.openai(ENV["KEY"])
|
55
|
-
# messages = [
|
56
|
-
#
|
57
|
-
# {role: "system", content: "Your answers should be short and concise"},
|
58
|
-
# ]
|
59
|
-
# res = llm.complete("Hello. What is the answer to 5 + 2 ?", :user, messages:)
|
57
|
+
# messages = [{role: "system", content: "Your task is to answer all of my questions"}]
|
58
|
+
# res = llm.complete("5 + 2 ?", messages:)
|
60
59
|
# print "[#{res.choices[0].role}]", res.choices[0].content, "\n"
|
61
60
|
# @param [String] prompt
|
62
61
|
# The input prompt to be completed
|
63
|
-
# @param [Symbol] role
|
64
|
-
# The role of the prompt (e.g. :user, :system)
|
65
|
-
# @param [String] model
|
66
|
-
# The model to use for the completion
|
67
|
-
# @param [#to_json, nil] schema
|
68
|
-
# The schema that describes the expected response format
|
69
62
|
# @param [Hash] params
|
70
|
-
#
|
63
|
+
# The parameters to maintain throughout the conversation.
|
64
|
+
# Any parameter the provider supports can be included and
|
65
|
+
# not only those listed here.
|
66
|
+
# @option params [Symbol] :role Defaults to the provider's default role
|
67
|
+
# @option params [String] :model Defaults to the provider's default model
|
68
|
+
# @option params [#to_json, nil] :schema Defaults to nil
|
69
|
+
# @option params [Array<LLM::Function>, nil] :tools Defaults to nil
|
71
70
|
# @raise [NotImplementedError]
|
72
71
|
# When the method is not implemented by a subclass
|
73
72
|
# @return [LLM::Response::Completion]
|
74
|
-
def complete(prompt,
|
73
|
+
def complete(prompt, params = {})
|
75
74
|
raise NotImplementedError
|
76
75
|
end
|
77
76
|
|
@@ -81,15 +80,11 @@ class LLM::Provider
|
|
81
80
|
# This method creates a lazy version of a
|
82
81
|
# {LLM::Chat LLM::Chat} object.
|
83
82
|
# @param prompt (see LLM::Provider#complete)
|
84
|
-
# @param
|
85
|
-
# @param model (see LLM::Provider#complete)
|
86
|
-
# @param schema (see LLM::Provider#complete)
|
87
|
-
# @param [Hash] params
|
88
|
-
# Other completion parameters to maintain throughout a chat
|
89
|
-
# @raise (see LLM::Provider#complete)
|
83
|
+
# @param params (see LLM::Provider#complete)
|
90
84
|
# @return [LLM::Chat]
|
91
|
-
def chat(prompt,
|
92
|
-
|
85
|
+
def chat(prompt, params = {})
|
86
|
+
role = params.delete(:role)
|
87
|
+
LLM::Chat.new(self, params).lazy.chat(prompt, role:)
|
93
88
|
end
|
94
89
|
|
95
90
|
##
|
@@ -98,15 +93,12 @@ class LLM::Provider
|
|
98
93
|
# This method creates a non-lazy version of a
|
99
94
|
# {LLM::Chat LLM::Chat} object.
|
100
95
|
# @param prompt (see LLM::Provider#complete)
|
101
|
-
# @param
|
102
|
-
# @param model (see LLM::Provider#complete)
|
103
|
-
# @param schema (see LLM::Provider#complete)
|
104
|
-
# @param [Hash] params
|
105
|
-
# Other completion parameters to maintain throughout a chat
|
96
|
+
# @param params (see LLM::Provider#complete)
|
106
97
|
# @raise (see LLM::Provider#complete)
|
107
98
|
# @return [LLM::Chat]
|
108
|
-
def chat!(prompt,
|
109
|
-
|
99
|
+
def chat!(prompt, params = {})
|
100
|
+
role = params.delete(:role)
|
101
|
+
LLM::Chat.new(self, params).chat(prompt, role:)
|
110
102
|
end
|
111
103
|
|
112
104
|
##
|
@@ -115,15 +107,12 @@ class LLM::Provider
|
|
115
107
|
# This method creates a lazy variant of a
|
116
108
|
# {LLM::Chat LLM::Chat} object.
|
117
109
|
# @param prompt (see LLM::Provider#complete)
|
118
|
-
# @param
|
119
|
-
# @param model (see LLM::Provider#complete)
|
120
|
-
# @param schema (see LLM::Provider#complete)
|
121
|
-
# @param [Hash] params
|
122
|
-
# Other completion parameters to maintain throughout a chat
|
110
|
+
# @param params (see LLM::Provider#complete)
|
123
111
|
# @raise (see LLM::Provider#complete)
|
124
112
|
# @return [LLM::Chat]
|
125
|
-
def respond(prompt,
|
126
|
-
|
113
|
+
def respond(prompt, params = {})
|
114
|
+
role = params.delete(:role)
|
115
|
+
LLM::Chat.new(self, params).lazy.respond(prompt, role:)
|
127
116
|
end
|
128
117
|
|
129
118
|
##
|
@@ -132,15 +121,12 @@ class LLM::Provider
|
|
132
121
|
# This method creates a non-lazy variant of a
|
133
122
|
# {LLM::Chat LLM::Chat} object.
|
134
123
|
# @param prompt (see LLM::Provider#complete)
|
135
|
-
# @param
|
136
|
-
# @param model (see LLM::Provider#complete)
|
137
|
-
# @param schema (see LLM::Provider#complete)
|
138
|
-
# @param [Hash] params
|
139
|
-
# Other completion parameters to maintain throughout a chat
|
124
|
+
# @param params (see LLM::Provider#complete)
|
140
125
|
# @raise (see LLM::Provider#complete)
|
141
126
|
# @return [LLM::Chat]
|
142
|
-
def respond!(prompt,
|
143
|
-
|
127
|
+
def respond!(prompt, params = {})
|
128
|
+
role = params.delete(:role)
|
129
|
+
LLM::Chat.new(self, params).respond(prompt, role:)
|
144
130
|
end
|
145
131
|
|
146
132
|
##
|
@@ -60,7 +60,7 @@ module LLM::Anthropic::Format
|
|
60
60
|
when LLM::Message
|
61
61
|
format_content(content.content)
|
62
62
|
when LLM::Function::Return
|
63
|
-
{type: "tool_result", tool_use_id: content.id, content: content.value}
|
63
|
+
[{type: "tool_result", tool_use_id: content.id, content: [{type: :text, text: JSON.dump(content.value)}]}]
|
64
64
|
else
|
65
65
|
raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
|
66
66
|
"is not supported by the Anthropic API"
|
@@ -15,25 +15,25 @@ module LLM
|
|
15
15
|
HOST = "api.anthropic.com"
|
16
16
|
|
17
17
|
##
|
18
|
-
# @param
|
19
|
-
def initialize(
|
20
|
-
super(
|
18
|
+
# @param key (see LLM::Provider#initialize)
|
19
|
+
def initialize(**)
|
20
|
+
super(host: HOST, **)
|
21
21
|
end
|
22
22
|
|
23
23
|
##
|
24
24
|
# Provides an embedding via VoyageAI per
|
25
25
|
# [Anthropic's recommendation](https://docs.anthropic.com/en/docs/build-with-claude/embeddings)
|
26
26
|
# @param input (see LLM::Provider#embed)
|
27
|
-
# @param [String]
|
28
|
-
# Valid
|
27
|
+
# @param [String] key
|
28
|
+
# Valid key for the VoyageAI API
|
29
29
|
# @param [String] model
|
30
30
|
# The embedding model to use
|
31
31
|
# @param [Hash] params
|
32
32
|
# Other embedding parameters
|
33
33
|
# @raise (see LLM::Provider#request)
|
34
34
|
# @return (see LLM::Provider#embed)
|
35
|
-
def embed(input,
|
36
|
-
llm = LLM.voyageai(
|
35
|
+
def embed(input, key:, model: "voyage-2", **params)
|
36
|
+
llm = LLM.voyageai(key:)
|
37
37
|
llm.embed(input, **params.merge(model:))
|
38
38
|
end
|
39
39
|
|
@@ -41,17 +41,16 @@ module LLM
|
|
41
41
|
# Provides an interface to the chat completions API
|
42
42
|
# @see https://docs.anthropic.com/en/api/messages Anthropic docs
|
43
43
|
# @param prompt (see LLM::Provider#complete)
|
44
|
-
# @param role (see LLM::Provider#complete)
|
45
|
-
# @param model (see LLM::Provider#complete)
|
46
|
-
# @param max_tokens The maximum number of tokens to generate
|
47
44
|
# @param params (see LLM::Provider#complete)
|
48
45
|
# @example (see LLM::Provider#complete)
|
49
46
|
# @raise (see LLM::Provider#request)
|
50
47
|
# @raise [LLM::Error::PromptError]
|
51
48
|
# When given an object a provider does not understand
|
52
49
|
# @return (see LLM::Provider#complete)
|
53
|
-
def complete(prompt,
|
54
|
-
params =
|
50
|
+
def complete(prompt, params = {})
|
51
|
+
params = {role: :user, model: default_model, max_tokens: 1024}.merge!(params)
|
52
|
+
params = [params, format_tools(params)].inject({}, &:merge!).compact
|
53
|
+
role = params.delete(:role)
|
55
54
|
req = Net::HTTP::Post.new("/v1/messages", headers)
|
56
55
|
messages = [*(params.delete(:messages) || []), Message.new(role, prompt)]
|
57
56
|
body = JSON.dump({messages: [format(messages)].flatten}.merge!(params))
|
@@ -87,7 +86,7 @@ module LLM
|
|
87
86
|
def headers
|
88
87
|
{
|
89
88
|
"Content-Type" => "application/json",
|
90
|
-
"x-api-key" => @
|
89
|
+
"x-api-key" => @key,
|
91
90
|
"anthropic-version" => "2023-06-01"
|
92
91
|
}
|
93
92
|
end
|
@@ -9,7 +9,7 @@ class LLM::Gemini
|
|
9
9
|
# require "llm"
|
10
10
|
#
|
11
11
|
# llm = LLM.gemini(ENV["KEY"])
|
12
|
-
# res = llm.audio.create_transcription(input:
|
12
|
+
# res = llm.audio.create_transcription(input: "/audio/rocket.mp3")
|
13
13
|
# res.text # => "A dog on a rocket to the moon"
|
14
14
|
class Audio
|
15
15
|
##
|
@@ -31,7 +31,7 @@ class LLM::Gemini
|
|
31
31
|
# Create an audio transcription
|
32
32
|
# @example
|
33
33
|
# llm = LLM.gemini(ENV["KEY"])
|
34
|
-
# res = llm.audio.create_transcription(file:
|
34
|
+
# res = llm.audio.create_transcription(file: "/audio/rocket.mp3")
|
35
35
|
# res.text # => "A dog on a rocket to the moon"
|
36
36
|
# @see https://ai.google.dev/gemini-api/docs/audio Gemini docs
|
37
37
|
# @param [String, LLM::File, LLM::Response::File] file The input audio
|
@@ -44,7 +44,7 @@ class LLM::Gemini
|
|
44
44
|
"Your task is to transcribe the contents of an audio file",
|
45
45
|
"Your response should include the transcription, and nothing else",
|
46
46
|
LLM.File(file)
|
47
|
-
], :user, model
|
47
|
+
], params.merge(role: :user, model:)
|
48
48
|
LLM::Response::AudioTranscription
|
49
49
|
.new(res)
|
50
50
|
.tap { _1.text = res.choices[0].content }
|
@@ -55,7 +55,7 @@ class LLM::Gemini
|
|
55
55
|
# @example
|
56
56
|
# # Arabic => English
|
57
57
|
# llm = LLM.gemini(ENV["KEY"])
|
58
|
-
# res = llm.audio.create_translation(file:
|
58
|
+
# res = llm.audio.create_translation(file: "/audio/bismillah.mp3")
|
59
59
|
# res.text # => "In the name of Allah, the Beneficent, the Merciful."
|
60
60
|
# @see https://ai.google.dev/gemini-api/docs/audio Gemini docs
|
61
61
|
# @param [String, LLM::File, LLM::Response::File] file The input audio
|
@@ -68,7 +68,7 @@ class LLM::Gemini
|
|
68
68
|
"Your task is to translate the contents of an audio file into English",
|
69
69
|
"Your response should include the translation, and nothing else",
|
70
70
|
LLM.File(file)
|
71
|
-
], :user, model
|
71
|
+
], params.merge(role: :user, model:)
|
72
72
|
LLM::Response::AudioTranslation
|
73
73
|
.new(res)
|
74
74
|
.tap { _1.text = res.choices[0].content }
|
@@ -55,7 +55,7 @@ class LLM::Gemini
|
|
55
55
|
# @raise (see LLM::Provider#request)
|
56
56
|
# @return [LLM::Response::FileList]
|
57
57
|
def all(**params)
|
58
|
-
query = URI.encode_www_form(params.merge!(key:
|
58
|
+
query = URI.encode_www_form(params.merge!(key: key))
|
59
59
|
req = Net::HTTP::Get.new("/v1beta/files?#{query}", headers)
|
60
60
|
res = request(http, req)
|
61
61
|
LLM::Response::FileList.new(res).tap { |filelist|
|
@@ -103,7 +103,7 @@ class LLM::Gemini
|
|
103
103
|
# @return [LLM::Response::File]
|
104
104
|
def get(file:, **params)
|
105
105
|
file_id = file.respond_to?(:name) ? file.name : file.to_s
|
106
|
-
query = URI.encode_www_form(params.merge!(key:
|
106
|
+
query = URI.encode_www_form(params.merge!(key: key))
|
107
107
|
req = Net::HTTP::Get.new("/v1beta/#{file_id}?#{query}", headers)
|
108
108
|
res = request(http, req)
|
109
109
|
LLM::Response::File.new(res)
|
@@ -121,7 +121,7 @@ class LLM::Gemini
|
|
121
121
|
# @return [LLM::Response::File]
|
122
122
|
def delete(file:, **params)
|
123
123
|
file_id = file.respond_to?(:name) ? file.name : file.to_s
|
124
|
-
query = URI.encode_www_form(params.merge!(key:
|
124
|
+
query = URI.encode_www_form(params.merge!(key: key))
|
125
125
|
req = Net::HTTP::Delete.new("/v1beta/#{file_id}?#{query}", headers)
|
126
126
|
request(http, req)
|
127
127
|
end
|
@@ -138,7 +138,7 @@ class LLM::Gemini
|
|
138
138
|
include LLM::Utils
|
139
139
|
|
140
140
|
def request_upload_url(file:)
|
141
|
-
req = Net::HTTP::Post.new("/upload/v1beta/files?key=#{
|
141
|
+
req = Net::HTTP::Post.new("/upload/v1beta/files?key=#{key}", headers)
|
142
142
|
req["X-Goog-Upload-Protocol"] = "resumable"
|
143
143
|
req["X-Goog-Upload-Command"] = "start"
|
144
144
|
req["X-Goog-Upload-Header-Content-Length"] = file.bytesize
|
@@ -152,8 +152,8 @@ class LLM::Gemini
|
|
152
152
|
@provider.instance_variable_get(:@http)
|
153
153
|
end
|
154
154
|
|
155
|
-
def
|
156
|
-
@provider.instance_variable_get(:@
|
155
|
+
def key
|
156
|
+
@provider.instance_variable_get(:@key)
|
157
157
|
end
|
158
158
|
|
159
159
|
[:headers, :request, :set_body_stream].each do |m|
|
@@ -41,7 +41,7 @@ module LLM::Gemini::Format
|
|
41
41
|
when LLM::Message
|
42
42
|
format_content(content.content)
|
43
43
|
when LLM::Function::Return
|
44
|
-
[{text: content.value}]
|
44
|
+
[{text: JSON.dump(content.value)}]
|
45
45
|
else
|
46
46
|
raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
|
47
47
|
"is not supported by the Gemini API"
|
@@ -22,28 +22,20 @@ class LLM::Gemini
|
|
22
22
|
# @param [JSON::Schema] schema
|
23
23
|
# The schema to format
|
24
24
|
# @return [Hash]
|
25
|
-
def format_schema(
|
26
|
-
return {} unless schema
|
27
|
-
|
28
|
-
|
29
|
-
"response_mime_type" => "application/json",
|
30
|
-
"response_schema" => schema
|
31
|
-
}
|
32
|
-
}
|
25
|
+
def format_schema(params)
|
26
|
+
return {} unless params and params[:schema]
|
27
|
+
schema = params.delete(:schema)
|
28
|
+
{generationConfig: {response_mime_type: "application/json", response_schema: schema}}
|
33
29
|
end
|
34
30
|
|
35
31
|
##
|
36
32
|
# @param [Array<LLM::Function>] tools
|
37
33
|
# The tools to format
|
38
34
|
# @return [Hash]
|
39
|
-
def format_tools(
|
40
|
-
return {} unless tools
|
41
|
-
functions = tools.grep(LLM::Function)
|
42
|
-
{
|
43
|
-
"tools" => {
|
44
|
-
"functionDeclarations" => functions.map { _1.format(self) }
|
45
|
-
}
|
46
|
-
}
|
35
|
+
def format_tools(params)
|
36
|
+
return {} unless params and params[:tools]&.any?
|
37
|
+
functions = params.delete(:tools).grep(LLM::Function)
|
38
|
+
{tools: {functionDeclarations: functions.map { _1.format(self) }}}
|
47
39
|
end
|
48
40
|
end
|
49
41
|
end
|
@@ -41,7 +41,7 @@ class LLM::Gemini
|
|
41
41
|
# Gemini implements image generation under the hood.
|
42
42
|
# @return [LLM::Response::Image]
|
43
43
|
def create(prompt:, model: "gemini-2.0-flash-exp-image-generation", **params)
|
44
|
-
req = Net::HTTP::Post.new("/v1beta/models/#{model}:generateContent?key=#{
|
44
|
+
req = Net::HTTP::Post.new("/v1beta/models/#{model}:generateContent?key=#{key}", headers)
|
45
45
|
body = JSON.dump({
|
46
46
|
contents: [{parts: {text: prompt}}],
|
47
47
|
generationConfig: {responseModalities: ["TEXT", "IMAGE"]}
|
@@ -65,7 +65,7 @@ class LLM::Gemini
|
|
65
65
|
# @note (see LLM::Gemini::Images#create)
|
66
66
|
# @return [LLM::Response::Image]
|
67
67
|
def edit(image:, prompt:, model: "gemini-2.0-flash-exp-image-generation", **params)
|
68
|
-
req = Net::HTTP::Post.new("/v1beta/models/#{model}:generateContent?key=#{
|
68
|
+
req = Net::HTTP::Post.new("/v1beta/models/#{model}:generateContent?key=#{key}", headers)
|
69
69
|
image = LLM.File(image)
|
70
70
|
body = JSON.dump({
|
71
71
|
contents: [{parts: [{text: prompt}, format.format_content(image)]}],
|
@@ -89,8 +89,8 @@ class LLM::Gemini
|
|
89
89
|
@format ||= CompletionFormat.new(nil)
|
90
90
|
end
|
91
91
|
|
92
|
-
def
|
93
|
-
@provider.instance_variable_get(:@
|
92
|
+
def key
|
93
|
+
@provider.instance_variable_get(:@key)
|
94
94
|
end
|
95
95
|
|
96
96
|
def http
|
@@ -40,7 +40,7 @@ class LLM::Gemini
|
|
40
40
|
# @raise (see LLM::Provider#request)
|
41
41
|
# @return [LLM::Response::ModelList]
|
42
42
|
def all(**params)
|
43
|
-
query = URI.encode_www_form(params.merge!(key:
|
43
|
+
query = URI.encode_www_form(params.merge!(key: key))
|
44
44
|
req = Net::HTTP::Get.new("/v1beta/models?#{query}", headers)
|
45
45
|
res = request(http, req)
|
46
46
|
LLM::Response::ModelList.new(res).tap { |modellist|
|
@@ -58,8 +58,8 @@ class LLM::Gemini
|
|
58
58
|
@provider.instance_variable_get(:@http)
|
59
59
|
end
|
60
60
|
|
61
|
-
def
|
62
|
-
@provider.instance_variable_get(:@
|
61
|
+
def key
|
62
|
+
@provider.instance_variable_get(:@key)
|
63
63
|
end
|
64
64
|
|
65
65
|
[:headers, :request].each do |m|
|
data/lib/llm/providers/gemini.rb
CHANGED
@@ -40,9 +40,9 @@ module LLM
|
|
40
40
|
HOST = "generativelanguage.googleapis.com"
|
41
41
|
|
42
42
|
##
|
43
|
-
# @param
|
44
|
-
def initialize(
|
45
|
-
super(
|
43
|
+
# @param key (see LLM::Provider#initialize)
|
44
|
+
def initialize(**)
|
45
|
+
super(host: HOST, **)
|
46
46
|
end
|
47
47
|
|
48
48
|
##
|
@@ -54,7 +54,7 @@ module LLM
|
|
54
54
|
# @return (see LLM::Provider#embed)
|
55
55
|
def embed(input, model: "text-embedding-004", **params)
|
56
56
|
model = model.respond_to?(:id) ? model.id : model
|
57
|
-
path = ["/v1beta/models/#{model}", "embedContent?key=#{@
|
57
|
+
path = ["/v1beta/models/#{model}", "embedContent?key=#{@key}"].join(":")
|
58
58
|
req = Net::HTTP::Post.new(path, headers)
|
59
59
|
req.body = JSON.dump({content: {parts: [{text: input}]}})
|
60
60
|
res = request(@http, req)
|
@@ -65,19 +65,18 @@ module LLM
|
|
65
65
|
# Provides an interface to the chat completions API
|
66
66
|
# @see https://ai.google.dev/api/generate-content#v1beta.models.generateContent Gemini docs
|
67
67
|
# @param prompt (see LLM::Provider#complete)
|
68
|
-
# @param role (see LLM::Provider#complete)
|
69
|
-
# @param model (see LLM::Provider#complete)
|
70
|
-
# @param schema (see LLM::Provider#complete)
|
71
68
|
# @param params (see LLM::Provider#complete)
|
72
69
|
# @example (see LLM::Provider#complete)
|
73
70
|
# @raise (see LLM::Provider#request)
|
74
71
|
# @raise [LLM::Error::PromptError]
|
75
72
|
# When given an object a provider does not understand
|
76
73
|
# @return (see LLM::Provider#complete)
|
77
|
-
def complete(prompt,
|
78
|
-
params =
|
74
|
+
def complete(prompt, params = {})
|
75
|
+
params = {role: :user, model: default_model}.merge!(params)
|
76
|
+
params = [params, format_schema(params), format_tools(params)].inject({}, &:merge!).compact
|
77
|
+
role, model = [:role, :model].map { params.delete(_1) }
|
79
78
|
model.respond_to?(:id) ? model.id : model
|
80
|
-
path = ["/v1beta/models/#{model}", "generateContent?key=#{@
|
79
|
+
path = ["/v1beta/models/#{model}", "generateContent?key=#{@key}"].join(":")
|
81
80
|
req = Net::HTTP::Post.new(path, headers)
|
82
81
|
messages = [*(params.delete(:messages) || []), LLM::Message.new(role, prompt)]
|
83
82
|
body = JSON.dump({contents: format(messages)}.merge!(params))
|
@@ -22,8 +22,9 @@ class LLM::Ollama
|
|
22
22
|
# @param [Array<LLM::Function>] tools
|
23
23
|
# The tools to format
|
24
24
|
# @return [Hash]
|
25
|
-
def format_tools(
|
26
|
-
return {} unless tools
|
25
|
+
def format_tools(params)
|
26
|
+
return {} unless params and params[:tools]&.any?
|
27
|
+
tools = params[:tools]
|
27
28
|
{tools: tools.map { _1.format(self) }}
|
28
29
|
end
|
29
30
|
end
|
data/lib/llm/providers/ollama.rb
CHANGED
@@ -28,9 +28,9 @@ module LLM
|
|
28
28
|
HOST = "localhost"
|
29
29
|
|
30
30
|
##
|
31
|
-
# @param
|
32
|
-
def initialize(
|
33
|
-
super(
|
31
|
+
# @param key (see LLM::Provider#initialize)
|
32
|
+
def initialize(**)
|
33
|
+
super(host: HOST, port: 11434, ssl: false, **)
|
34
34
|
end
|
35
35
|
|
36
36
|
##
|
@@ -52,16 +52,16 @@ module LLM
|
|
52
52
|
# Provides an interface to the chat completions API
|
53
53
|
# @see https://github.com/ollama/ollama/blob/main/docs/api.md#generate-a-chat-completion Ollama docs
|
54
54
|
# @param prompt (see LLM::Provider#complete)
|
55
|
-
# @param role (see LLM::Provider#complete)
|
56
|
-
# @param model (see LLM::Provider#complete)
|
57
55
|
# @param params (see LLM::Provider#complete)
|
58
56
|
# @example (see LLM::Provider#complete)
|
59
57
|
# @raise (see LLM::Provider#request)
|
60
58
|
# @raise [LLM::Error::PromptError]
|
61
59
|
# When given an object a provider does not understand
|
62
60
|
# @return (see LLM::Provider#complete)
|
63
|
-
def complete(prompt,
|
64
|
-
params =
|
61
|
+
def complete(prompt, params = {})
|
62
|
+
params = {role: :user, model: default_model, stream: false}.merge!(params)
|
63
|
+
params = [params, {format: params[:schema]}, format_tools(params)].inject({}, &:merge!).compact
|
64
|
+
role = params.delete(:role)
|
65
65
|
req = Net::HTTP::Post.new("/api/chat", headers)
|
66
66
|
messages = [*(params.delete(:messages) || []), LLM::Message.new(role, prompt)]
|
67
67
|
body = JSON.dump({messages: [format(messages)].flatten}.merge!(params))
|
@@ -97,7 +97,7 @@ module LLM
|
|
97
97
|
def headers
|
98
98
|
{
|
99
99
|
"Content-Type" => "application/json",
|
100
|
-
"Authorization" => "Bearer #{@
|
100
|
+
"Authorization" => "Bearer #{@key}"
|
101
101
|
}
|
102
102
|
end
|
103
103
|
|
@@ -40,6 +40,8 @@ module LLM::OpenAI::Format
|
|
40
40
|
[{type: :text, text: content.to_s}]
|
41
41
|
when LLM::Message
|
42
42
|
format_content(content.content)
|
43
|
+
when LLM::Function::Return
|
44
|
+
throw(:abort, {role: "tool", tool_call_id: content.id, content: JSON.dump(content.value)})
|
43
45
|
else
|
44
46
|
raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
|
45
47
|
"is not supported by the OpenAI chat completions API"
|
@@ -29,8 +29,9 @@ class LLM::OpenAI
|
|
29
29
|
# @param [JSON::Schema] schema
|
30
30
|
# The schema to format
|
31
31
|
# @return [Hash]
|
32
|
-
def format_schema(
|
33
|
-
return {} unless schema
|
32
|
+
def format_schema(params)
|
33
|
+
return {} unless params and params[:schema]
|
34
|
+
schema = params.delete(:schema)
|
34
35
|
{
|
35
36
|
response_format: {
|
36
37
|
type: "json_schema",
|
@@ -43,8 +44,9 @@ class LLM::OpenAI
|
|
43
44
|
# @param [Array<LLM::Function>] tools
|
44
45
|
# The tools to format
|
45
46
|
# @return [Hash]
|
46
|
-
def format_tools(
|
47
|
-
return {} unless tools
|
47
|
+
def format_tools(params)
|
48
|
+
return {} unless params and params[:tools]&.any?
|
49
|
+
tools = params[:tools]
|
48
50
|
{tools: tools.map { _1.format(self) }}
|
49
51
|
end
|
50
52
|
end
|
@@ -45,15 +45,15 @@ class LLM::OpenAI
|
|
45
45
|
# Create a response
|
46
46
|
# @see https://platform.openai.com/docs/api-reference/responses/create OpenAI docs
|
47
47
|
# @param prompt (see LLM::Provider#complete)
|
48
|
-
# @param
|
49
|
-
# @param model (see LLM::Provider#complete)
|
50
|
-
# @param [Hash] params Response params
|
48
|
+
# @param params (see LLM::Provider#complete)
|
51
49
|
# @raise (see LLM::Provider#request)
|
52
50
|
# @raise [LLM::Error::PromptError]
|
53
51
|
# When given an object a provider does not understand
|
54
52
|
# @return [LLM::Response::Output]
|
55
|
-
def create(prompt,
|
56
|
-
params =
|
53
|
+
def create(prompt, params = {})
|
54
|
+
params = {role: :user, model: @provider.default_model}.merge!(params)
|
55
|
+
params = [params, format_schema(params), format_tools(params)].inject({}, &:merge!).compact
|
56
|
+
role = params.delete(:role)
|
57
57
|
req = Net::HTTP::Post.new("/v1/responses", headers)
|
58
58
|
messages = [*(params.delete(:input) || []), LLM::Message.new(role, prompt)]
|
59
59
|
body = JSON.dump({input: [format(messages, :response)].flatten}.merge!(params))
|
data/lib/llm/providers/openai.rb
CHANGED
@@ -20,9 +20,9 @@ module LLM
|
|
20
20
|
HOST = "api.openai.com"
|
21
21
|
|
22
22
|
##
|
23
|
-
# @param
|
24
|
-
def initialize(
|
25
|
-
super(
|
23
|
+
# @param key (see LLM::Provider#initialize)
|
24
|
+
def initialize(**)
|
25
|
+
super(host: HOST, **)
|
26
26
|
end
|
27
27
|
|
28
28
|
##
|
@@ -44,17 +44,16 @@ module LLM
|
|
44
44
|
# Provides an interface to the chat completions API
|
45
45
|
# @see https://platform.openai.com/docs/api-reference/chat/create OpenAI docs
|
46
46
|
# @param prompt (see LLM::Provider#complete)
|
47
|
-
# @param role (see LLM::Provider#complete)
|
48
|
-
# @param model (see LLM::Provider#complete)
|
49
|
-
# @param schema (see LLM::Provider#complete)
|
50
47
|
# @param params (see LLM::Provider#complete)
|
51
48
|
# @example (see LLM::Provider#complete)
|
52
49
|
# @raise (see LLM::Provider#request)
|
53
50
|
# @raise [LLM::Error::PromptError]
|
54
51
|
# When given an object a provider does not understand
|
55
52
|
# @return (see LLM::Provider#complete)
|
56
|
-
def complete(prompt,
|
57
|
-
params =
|
53
|
+
def complete(prompt, params = {})
|
54
|
+
params = {role: :user, model: default_model}.merge!(params)
|
55
|
+
params = [params, format_schema(params), format_tools(params)].inject({}, &:merge!).compact
|
56
|
+
role = params.delete(:role)
|
58
57
|
req = Net::HTTP::Post.new("/v1/chat/completions", headers)
|
59
58
|
messages = [*(params.delete(:messages) || []), Message.new(role, prompt)]
|
60
59
|
body = JSON.dump({messages: format(messages, :complete).flatten}.merge!(params))
|
@@ -122,7 +121,7 @@ module LLM
|
|
122
121
|
def headers
|
123
122
|
{
|
124
123
|
"Content-Type" => "application/json",
|
125
|
-
"Authorization" => "Bearer #{@
|
124
|
+
"Authorization" => "Bearer #{@key}"
|
126
125
|
}
|
127
126
|
end
|
128
127
|
|
@@ -7,9 +7,9 @@ module LLM
|
|
7
7
|
HOST = "api.voyageai.com"
|
8
8
|
|
9
9
|
##
|
10
|
-
# @param
|
11
|
-
def initialize(
|
12
|
-
super(
|
10
|
+
# @param key (see LLM::Provider#initialize)
|
11
|
+
def initialize(**)
|
12
|
+
super(host: HOST, **)
|
13
13
|
end
|
14
14
|
|
15
15
|
##
|
@@ -29,7 +29,7 @@ module LLM
|
|
29
29
|
def headers
|
30
30
|
{
|
31
31
|
"Content-Type" => "application/json",
|
32
|
-
"Authorization" => "Bearer #{@
|
32
|
+
"Authorization" => "Bearer #{@key}"
|
33
33
|
}
|
34
34
|
end
|
35
35
|
|
data/lib/llm/version.rb
CHANGED
data/lib/llm.rb
CHANGED
@@ -23,42 +23,42 @@ module LLM
|
|
23
23
|
##
|
24
24
|
# @param secret (see LLM::Anthropic#initialize)
|
25
25
|
# @return (see LLM::Anthropic#initialize)
|
26
|
-
def anthropic(
|
26
|
+
def anthropic(**)
|
27
27
|
require_relative "llm/providers/anthropic" unless defined?(LLM::Anthropic)
|
28
28
|
require_relative "llm/providers/voyageai" unless defined?(LLM::VoyageAI)
|
29
|
-
LLM::Anthropic.new(
|
29
|
+
LLM::Anthropic.new(**)
|
30
30
|
end
|
31
31
|
|
32
32
|
##
|
33
33
|
# @param secret (see LLM::VoyageAI#initialize)
|
34
34
|
# @return (see LLM::VoyageAI#initialize)
|
35
|
-
def voyageai(
|
35
|
+
def voyageai(**)
|
36
36
|
require_relative "llm/providers/voyageai" unless defined?(LLM::VoyageAI)
|
37
|
-
LLM::VoyageAI.new(
|
37
|
+
LLM::VoyageAI.new(**)
|
38
38
|
end
|
39
39
|
|
40
40
|
##
|
41
41
|
# @param secret (see LLM::Gemini#initialize)
|
42
42
|
# @return (see LLM::Gemini#initialize)
|
43
|
-
def gemini(
|
43
|
+
def gemini(**)
|
44
44
|
require_relative "llm/providers/gemini" unless defined?(LLM::Gemini)
|
45
|
-
LLM::Gemini.new(
|
45
|
+
LLM::Gemini.new(**)
|
46
46
|
end
|
47
47
|
|
48
48
|
##
|
49
49
|
# @param host (see LLM::Ollama#initialize)
|
50
50
|
# @return (see LLM::Ollama#initialize)
|
51
|
-
def ollama(
|
51
|
+
def ollama(key: nil, **)
|
52
52
|
require_relative "llm/providers/ollama" unless defined?(LLM::Ollama)
|
53
|
-
LLM::Ollama.new(
|
53
|
+
LLM::Ollama.new(key:, **)
|
54
54
|
end
|
55
55
|
|
56
56
|
##
|
57
57
|
# @param secret (see LLM::OpenAI#initialize)
|
58
58
|
# @return (see LLM::OpenAI#initialize)
|
59
|
-
def openai(
|
59
|
+
def openai(**)
|
60
60
|
require_relative "llm/providers/openai" unless defined?(LLM::OpenAI)
|
61
|
-
LLM::OpenAI.new(
|
61
|
+
LLM::OpenAI.new(**)
|
62
62
|
end
|
63
63
|
|
64
64
|
##
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: llm.rb
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.6.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Antar Azri
|
@@ -9,7 +9,7 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date: 2025-05-
|
12
|
+
date: 2025-05-06 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: webmock
|