llm.rb 0.5.0 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 50bc0de3950bfbc8dd1a9ff3b211915fff5ae510f6fd0f0fc277e5cbf689e5ea
4
- data.tar.gz: 3e48e23abefeb652429cb145be5fa1166e78cb24f3ddfb7440daff2e4a461c5d
3
+ metadata.gz: 3452ff48dff867c48be888eb5ae2fff97624b8e51029cd13a26844d67a7824cf
4
+ data.tar.gz: 51a65baeff8b026c6ea9fdda2063d14a7961dd902f94cccd34c7591f606a586f
5
5
  SHA512:
6
- metadata.gz: 8b6ad1955541753ba991a0f9b55e261cb4cc32a3a85470cee7312359cafce7d4689757a2f5efe2f641c9813dca5b7722b47ff8dc12d90f8a600eff1e3c7b5b4d
7
- data.tar.gz: b807543c44e12d4a251370a84cda59714a9b62cf3bf031203d723a8740167aab5a4f9348ae29856b6324384f2042d1851c05c272ba3a6510b33d99d6fddf4fdc
6
+ metadata.gz: a548f97a9019529146f0e6f7414239aaee5b0b6695c924f28f602de7a1ef2f390e1b0053545dab2ee6725f7dabd7a2c83e3b9181027a492630f12dda77870637
7
+ data.tar.gz: f71fbd16d3fb22a0ad37d59e93fc7b9e6093fc2b77ebf0e6b7eeca69cb227fd84867ab02c594c4b608ea9017eb6cc40d3fa5c2ff76ffb09fbc1a1b573823a84b
data/README.md CHANGED
@@ -42,11 +42,11 @@ using an API key (if required) and an optional set of configuration options via
42
42
  #!/usr/bin/env ruby
43
43
  require "llm"
44
44
 
45
- llm = LLM.openai("yourapikey")
46
- llm = LLM.gemini("yourapikey")
47
- llm = LLM.anthropic("yourapikey")
48
- llm = LLM.ollama(nil)
49
- llm = LLM.voyageai("yourapikey")
45
+ llm = LLM.openai(key: "yourapikey")
46
+ llm = LLM.gemini(key: "yourapikey")
47
+ llm = LLM.anthropic(key: "yourapikey")
48
+ llm = LLM.ollama(key: nil)
49
+ llm = LLM.voyageai(key: "yourapikey")
50
50
  ```
51
51
 
52
52
  ### Conversations
@@ -66,12 +66,12 @@ all LLM providers support:
66
66
  #!/usr/bin/env ruby
67
67
  require "llm"
68
68
 
69
- llm = LLM.openai(ENV["KEY"])
69
+ llm = LLM.openai(key: ENV["KEY"])
70
70
  bot = LLM::Chat.new(llm).lazy
71
- bot.chat File.read("./share/llm/prompts/system.txt"), :system
72
- bot.chat "Tell me the answer to 5 + 15", :user
73
- bot.chat "Tell me the answer to (5 + 15) * 2", :user
74
- bot.chat "Tell me the answer to ((5 + 15) * 2) / 10", :user
71
+ bot.chat File.read("./share/llm/prompts/system.txt"), role: :system
72
+ bot.chat "Tell me the answer to 5 + 15", role: :user
73
+ bot.chat "Tell me the answer to (5 + 15) * 2", role: :user
74
+ bot.chat "Tell me the answer to ((5 + 15) * 2) / 10", role: :user
75
75
  bot.messages.each { print "[#{_1.role}] ", _1.content, "\n" }
76
76
 
77
77
  ##
@@ -106,12 +106,12 @@ for the OpenAI provider:
106
106
  #!/usr/bin/env ruby
107
107
  require "llm"
108
108
 
109
- llm = LLM.openai(ENV["KEY"])
109
+ llm = LLM.openai(key: ENV["KEY"])
110
110
  bot = LLM::Chat.new(llm).lazy
111
- bot.respond File.read("./share/llm/prompts/system.txt"), :developer
112
- bot.respond "Tell me the answer to 5 + 15", :user
113
- bot.respond "Tell me the answer to (5 + 15) * 2", :user
114
- bot.respond "Tell me the answer to ((5 + 15) * 2) / 10", :user
111
+ bot.respond File.read("./share/llm/prompts/system.txt"), role: :developer
112
+ bot.respond "Tell me the answer to 5 + 15", role: :user
113
+ bot.respond "Tell me the answer to (5 + 15) * 2", role: :user
114
+ bot.respond "Tell me the answer to ((5 + 15) * 2) / 10", role: :user
115
115
  bot.messages.each { print "[#{_1.role}] ", _1.content, "\n" }
116
116
 
117
117
  ##
@@ -152,21 +152,21 @@ The interface is designed so you could drop in any other library in its place:
152
152
  #!/usr/bin/env ruby
153
153
  require "llm"
154
154
 
155
- llm = LLM.openai(ENV["KEY"])
155
+ llm = LLM.openai(key: ENV["KEY"])
156
156
  schema = llm.schema.object({os: llm.schema.string.enum("OpenBSD", "FreeBSD", "NetBSD")})
157
157
  bot = LLM::Chat.new(llm, schema:)
158
- bot.chat "You secretly love NetBSD", :system
159
- bot.chat "What operating system is the best?", :user
158
+ bot.chat "You secretly love NetBSD", role: :system
159
+ bot.chat "What operating system is the best?", role: :user
160
160
  bot.messages.find(&:assistant?).content! # => {os: "NetBSD"}
161
161
 
162
162
  schema = llm.schema.object({answer: llm.schema.integer.required})
163
163
  bot = LLM::Chat.new(llm, schema:)
164
- bot.chat "Tell me the answer to ((5 + 5) / 2)", :user
164
+ bot.chat "Tell me the answer to ((5 + 5) / 2)", role: :user
165
165
  bot.messages.find(&:assistant?).content! # => {answer: 5}
166
166
 
167
167
  schema = llm.schema.object({probability: llm.schema.number.required})
168
168
  bot = LLM::Chat.new(llm, schema:)
169
- bot.chat "Does the earth orbit the sun?", :user
169
+ bot.chat "Does the earth orbit the sun?", role: :user
170
170
  bot.messages.find(&:assistant?).content! # => {probability: 1}
171
171
  ```
172
172
 
@@ -195,7 +195,7 @@ arbitrary commands from a LLM without sanitizing the input first :) Without furt
195
195
  #!/usr/bin/env ruby
196
196
  require "llm"
197
197
 
198
- llm = LLM.openai(ENV["KEY"])
198
+ llm = LLM.openai(key: ENV["KEY"])
199
199
  tool = LLM.function(:system) do |fn|
200
200
  fn.description "Run a shell command"
201
201
  fn.params do |schema|
@@ -207,12 +207,12 @@ tool = LLM.function(:system) do |fn|
207
207
  end
208
208
 
209
209
  bot = LLM::Chat.new(llm, tools: [tool]).lazy
210
- bot.chat "Your task is to run shell commands via a tool.", :system
210
+ bot.chat "Your task is to run shell commands via a tool.", role: :system
211
211
 
212
- bot.chat "What is the current date?", :user
212
+ bot.chat "What is the current date?", role: :user
213
213
  bot.chat bot.functions.map(&:call) # report return value to the LLM
214
214
 
215
- bot.chat "What operating system am I running? (short version please!)", :user
215
+ bot.chat "What operating system am I running? (short version please!)", role: :user
216
216
  bot.chat bot.functions.map(&:call) # report return value to the LLM
217
217
 
218
218
  ##
@@ -235,7 +235,7 @@ documentation for more information on how to use the audio generation API:
235
235
  #!/usr/bin/env ruby
236
236
  require "llm"
237
237
 
238
- llm = LLM.openai(ENV["KEY"])
238
+ llm = LLM.openai(key: ENV["KEY"])
239
239
  res = llm.audio.create_speech(input: "Hello world")
240
240
  IO.copy_stream res.audio, File.join(Dir.home, "hello.mp3")
241
241
  ```
@@ -252,7 +252,7 @@ documentation for more information on how to use the audio transcription API:
252
252
  #!/usr/bin/env ruby
253
253
  require "llm"
254
254
 
255
- llm = LLM.openai(ENV["KEY"])
255
+ llm = LLM.openai(key: ENV["KEY"])
256
256
  res = llm.audio.create_transcription(
257
257
  file: File.join(Dir.home, "hello.mp3")
258
258
  )
@@ -272,7 +272,7 @@ the audio translation API:
272
272
  #!/usr/bin/env ruby
273
273
  require "llm"
274
274
 
275
- llm = LLM.openai(ENV["KEY"])
275
+ llm = LLM.openai(key: ENV["KEY"])
276
276
  res = llm.audio.create_translation(
277
277
  file: File.join(Dir.home, "bomdia.mp3")
278
278
  )
@@ -295,7 +295,7 @@ require "llm"
295
295
  require "open-uri"
296
296
  require "fileutils"
297
297
 
298
- llm = LLM.openai(ENV["KEY"])
298
+ llm = LLM.openai(key: ENV["KEY"])
299
299
  res = llm.images.create(prompt: "a dog on a rocket to the moon")
300
300
  res.urls.each do |url|
301
301
  FileUtils.mv OpenURI.open_uri(url).path,
@@ -320,7 +320,7 @@ require "llm"
320
320
  require "open-uri"
321
321
  require "fileutils"
322
322
 
323
- llm = LLM.openai(ENV["KEY"])
323
+ llm = LLM.openai(key: ENV["KEY"])
324
324
  res = llm.images.edit(
325
325
  image: "/images/cat.png",
326
326
  prompt: "a cat with a hat",
@@ -345,7 +345,7 @@ require "llm"
345
345
  require "open-uri"
346
346
  require "fileutils"
347
347
 
348
- llm = LLM.openai(ENV["KEY"])
348
+ llm = LLM.openai(key: ENV["KEY"])
349
349
  res = llm.images.create_variation(
350
350
  image: "/images/cat.png",
351
351
  n: 5
@@ -373,7 +373,7 @@ can be given to the chat method:
373
373
  #!/usr/bin/env ruby
374
374
  require "llm"
375
375
 
376
- llm = LLM.openai(ENV["KEY"])
376
+ llm = LLM.openai(key: ENV["KEY"])
377
377
  bot = LLM::Chat.new(llm).lazy
378
378
  file = llm.files.create(file: "/documents/openbsd_is_awesome.pdf")
379
379
  bot.chat(file)
@@ -404,7 +404,7 @@ to a prompt:
404
404
  #!/usr/bin/env ruby
405
405
  require "llm"
406
406
 
407
- llm = LLM.openai(ENV["KEY"])
407
+ llm = LLM.openai(key: ENV["KEY"])
408
408
  bot = LLM::Chat.new(llm).lazy
409
409
 
410
410
  bot.chat [URI("https://example.com/path/to/image.png"), "Describe the image in the link"]
@@ -439,7 +439,7 @@ which will go on to generate a response:
439
439
  #!/usr/bin/env ruby
440
440
  require "llm"
441
441
 
442
- llm = LLM.openai(ENV["KEY"])
442
+ llm = LLM.openai(key: ENV["KEY"])
443
443
  res = llm.embed(["programming is fun", "ruby is a programming language", "sushi is art"])
444
444
  print res.class, "\n"
445
445
  print res.embeddings.size, "\n"
@@ -470,7 +470,7 @@ require "llm"
470
470
 
471
471
  ##
472
472
  # List all models
473
- llm = LLM.openai(ENV["KEY"])
473
+ llm = LLM.openai(key: ENV["KEY"])
474
474
  llm.models.all.each do |model|
475
475
  print "model: ", model.id, "\n"
476
476
  end
@@ -501,7 +501,7 @@ demonstrates how that might look like in practice:
501
501
  #!/usr/bin/env ruby
502
502
  require "llm"
503
503
 
504
- llm = LLM.gemini(ENV["KEY"])
504
+ llm = LLM.gemini(key: ENV["KEY"])
505
505
  fork do
506
506
  %w[dog cat sheep goat capybara].each do |animal|
507
507
  res = llm.images.create(prompt: "a #{animal} on a rocket to the moon")
@@ -545,6 +545,16 @@ llm.rb can be installed via rubygems.org:
545
545
 
546
546
  gem install llm.rb
547
547
 
548
+ ## See also
549
+
550
+ **[llmrb/llm-shell](https://github.com/llmrb/llm-shell)**
551
+
552
+ An extensible, developer-oriented command line utility that is powered by
553
+ llm.rb and serves as a demonstration of the library's capabilities. The
554
+ [demo](https://github.com/llmrb/llm-shell#demos) section has a number of GIF
555
+ previews might be especially interesting!
556
+
557
+
548
558
  ## Philosophy
549
559
 
550
560
  llm.rb provides a clean, dependency-free interface to Large Language Models,
data/lib/llm/buffer.rb CHANGED
@@ -77,10 +77,10 @@ module LLM
77
77
  def complete!(message, params)
78
78
  pendings = @pending.map { _1[0] }
79
79
  messages = [*@completed, *pendings]
80
+ role = message.role
80
81
  completion = @provider.complete(
81
82
  message.content,
82
- message.role,
83
- **params.merge(messages:)
83
+ params.merge(role:, messages:)
84
84
  )
85
85
  @completed.concat([*pendings, message, completion.choices[0]])
86
86
  @pending.clear
@@ -89,11 +89,12 @@ module LLM
89
89
  def respond!(message, params)
90
90
  pendings = @pending.map { _1[0] }
91
91
  input = [*pendings]
92
+ role = message.role
92
93
  params = [
93
94
  params.merge(input:),
94
95
  @response ? {previous_response_id: @response.id} : {}
95
96
  ].inject({}, &:merge!)
96
- @response = @provider.responses.create(message.content, message.role, **params)
97
+ @response = @provider.responses.create(message.content, params.merge(role:))
97
98
  @completed.concat([*pendings, message, @response.outputs[0]])
98
99
  @pending.clear
99
100
  end
data/lib/llm/chat.rb CHANGED
@@ -13,11 +13,10 @@ module LLM
13
13
  #
14
14
  # llm = LLM.openai(ENV["KEY"])
15
15
  # bot = LLM::Chat.new(llm).lazy
16
- # bot.chat("Your task is to answer all of my questions", :system)
17
- # bot.chat("Your answers should be short and concise", :system)
18
- # bot.chat("What is 5 + 7 ?", :user)
19
- # bot.chat("Why is the sky blue ?", :user)
20
- # bot.chat("Why did the chicken cross the road ?", :user)
16
+ # bot.chat("Provide short and concise answers", role: :system)
17
+ # bot.chat("What is 5 + 7 ?", role: :user)
18
+ # bot.chat("Why is the sky blue ?", role: :user)
19
+ # bot.chat("Why did the chicken cross the road ?", role: :user)
21
20
  # bot.messages.map { print "[#{_1.role}]", _1.content, "\n" }
22
21
  class Chat
23
22
  ##
@@ -27,31 +26,34 @@ module LLM
27
26
  ##
28
27
  # @param [LLM::Provider] provider
29
28
  # A provider
30
- # @param [to_json] schema
31
- # The JSON schema to maintain throughout the conversation
32
- # @param [String] model
33
- # The model to maintain throughout the conversation
34
29
  # @param [Hash] params
35
- # Other parameters to maintain throughout the conversation
36
- def initialize(provider, model: provider.default_model, schema: nil, **params)
30
+ # The parameters to maintain throughout the conversation.
31
+ # Any parameter the provider supports can be included and
32
+ # not only those listed here.
33
+ # @option params [String] :model Defaults to the provider's default model
34
+ # @option params [#to_json, nil] :schema Defaults to nil
35
+ # @option params [Array<LLM::Function>, nil] :tools Defaults to nil
36
+ def initialize(provider, params = {})
37
37
  @provider = provider
38
- @params = params.merge!(model:, schema:)
38
+ @params = {model: provider.default_model, schema: nil}.compact.merge!(params)
39
39
  @lazy = false
40
40
  @messages = [].extend(Array)
41
41
  end
42
42
 
43
43
  ##
44
44
  # Maintain a conversation via the chat completions API
45
- # @param prompt (see LLM::Provider#prompt)
46
- # @param role (see LLM::Provider#prompt)
47
- # @param params (see LLM::Provider#prompt)
45
+ # @param prompt (see LLM::Provider#complete)
46
+ # @param params (see LLM::Provider#complete)
48
47
  # @return [LLM::Chat]
49
- def chat(prompt, role = :user, **params)
48
+ def chat(prompt, params = {})
49
+ params = {role: :user}.merge!(params)
50
50
  if lazy?
51
+ role = params.delete(:role)
51
52
  @messages << [LLM::Message.new(role, prompt), @params.merge(params), :complete]
52
53
  self
53
54
  else
54
- completion = complete!(prompt, role, params)
55
+ role = params[:role]
56
+ completion = complete!(prompt, params)
55
57
  @messages.concat [Message.new(role, prompt), completion.choices[0]]
56
58
  self
57
59
  end
@@ -60,16 +62,18 @@ module LLM
60
62
  ##
61
63
  # Maintain a conversation via the responses API
62
64
  # @note Not all LLM providers support this API
63
- # @param prompt (see LLM::Provider#prompt)
64
- # @param role (see LLM::Provider#prompt)
65
- # @param params (see LLM::Provider#prompt)
65
+ # @param prompt (see LLM::Provider#complete)
66
+ # @param params (see LLM::Provider#complete)
66
67
  # @return [LLM::Chat]
67
- def respond(prompt, role = :user, **params)
68
+ def respond(prompt, params = {})
69
+ params = {role: :user}.merge!(params)
68
70
  if lazy?
71
+ role = params.delete(:role)
69
72
  @messages << [LLM::Message.new(role, prompt), @params.merge(params), :respond]
70
73
  self
71
74
  else
72
- @response = respond!(prompt, role, params)
75
+ role = params[:role]
76
+ @response = respond!(prompt, params)
73
77
  @messages.concat [Message.new(role, prompt), @response.outputs[0]]
74
78
  self
75
79
  end
@@ -141,19 +145,17 @@ module LLM
141
145
  end
142
146
  private_constant :Array
143
147
 
144
- def respond!(prompt, role, params)
148
+ def respond!(prompt, params)
145
149
  @provider.responses.create(
146
150
  prompt,
147
- role,
148
- **@params.merge(params.merge(@response ? {previous_response_id: @response.id} : {}))
151
+ @params.merge(params.merge(@response ? {previous_response_id: @response.id} : {}))
149
152
  )
150
153
  end
151
154
 
152
- def complete!(prompt, role, params)
155
+ def complete!(prompt, params)
153
156
  @provider.complete(
154
157
  prompt,
155
- role,
156
- **@params.merge(params.merge(messages:))
158
+ @params.merge(params.merge(messages:))
157
159
  )
158
160
  end
159
161
  end
data/lib/llm/function.rb CHANGED
@@ -4,6 +4,11 @@ class LLM::Function
4
4
  class Return < Struct.new(:id, :value)
5
5
  end
6
6
 
7
+ ##
8
+ # Returns the function name
9
+ # @return [String]
10
+ attr_reader :name
11
+
7
12
  ##
8
13
  # Returns function arguments
9
14
  # @return [Array, nil]
data/lib/llm/message.rb CHANGED
@@ -57,13 +57,6 @@ module LLM
57
57
  JSON.parse(content)
58
58
  end
59
59
 
60
- ##
61
- # Returns true when the message is from the LLM
62
- # @return [Boolean]
63
- def assistant?
64
- role == "assistant" || role == "model"
65
- end
66
-
67
60
  ##
68
61
  # @return [Array<LLM::Function>]
69
62
  def functions
@@ -75,10 +68,24 @@ module LLM
75
68
  end
76
69
 
77
70
  ##
71
+ # Marks the message as read
72
+ # @return [void]
73
+ def read!
74
+ @read = true
75
+ end
76
+
77
+ ##
78
+ # Returns true when the message has been read
78
79
  # @return [Boolean]
79
- # Returns true when the message requests a function call
80
- def tool_call?
81
- tool_calls.any?
80
+ def read?
81
+ @read
82
+ end
83
+
84
+ ##
85
+ # Returns true when the message is an assistant message
86
+ # @return [Boolean]
87
+ def assistant?
88
+ role == "assistant" || role == "model"
82
89
  end
83
90
 
84
91
  ##
@@ -89,17 +96,17 @@ module LLM
89
96
  end
90
97
 
91
98
  ##
92
- # Marks the message as read
93
- # @return [void]
94
- def read!
95
- @read = true
99
+ # Returns true when the message is a user message
100
+ # @return [Boolean]
101
+ def user?
102
+ role == "user"
96
103
  end
97
104
 
98
105
  ##
99
- # Returns true when the message has been read
100
106
  # @return [Boolean]
101
- def read?
102
- @read
107
+ # Returns true when the message requests a function call
108
+ def tool_call?
109
+ tool_calls.any?
103
110
  end
104
111
 
105
112
  ##
data/lib/llm/provider.rb CHANGED
@@ -9,7 +9,7 @@ class LLM::Provider
9
9
  require "net/http"
10
10
 
11
11
  ##
12
- # @param [String] secret
12
+ # @param [String, nil] key
13
13
  # The secret key for authentication
14
14
  # @param [String] host
15
15
  # The host address of the LLM provider
@@ -17,8 +17,10 @@ class LLM::Provider
17
17
  # The port number
18
18
  # @param [Integer] timeout
19
19
  # The number of seconds to wait for a response
20
- def initialize(secret, host:, port: 443, timeout: 60, ssl: true)
21
- @secret = secret
20
+ # @param [Boolean] ssl
21
+ # Whether to use SSL for the connection
22
+ def initialize(key:, host:, port: 443, timeout: 60, ssl: true)
23
+ @key = key
22
24
  @http = Net::HTTP.new(host, port).tap do |http|
23
25
  http.use_ssl = ssl
24
26
  http.read_timeout = timeout
@@ -30,7 +32,7 @@ class LLM::Provider
30
32
  # @return [String]
31
33
  # @note The secret key is redacted in inspect for security reasons
32
34
  def inspect
33
- "#<#{self.class.name}:0x#{object_id.to_s(16)} @secret=[REDACTED] @http=#{@http.inspect}>"
35
+ "#<#{self.class.name}:0x#{object_id.to_s(16)} @key=[REDACTED] @http=#{@http.inspect}>"
34
36
  end
35
37
 
36
38
  ##
@@ -52,26 +54,23 @@ class LLM::Provider
52
54
  # Provides an interface to the chat completions API
53
55
  # @example
54
56
  # llm = LLM.openai(ENV["KEY"])
55
- # messages = [
56
- # {role: "system", content: "Your task is to answer all of my questions"},
57
- # {role: "system", content: "Your answers should be short and concise"},
58
- # ]
59
- # res = llm.complete("Hello. What is the answer to 5 + 2 ?", :user, messages:)
57
+ # messages = [{role: "system", content: "Your task is to answer all of my questions"}]
58
+ # res = llm.complete("5 + 2 ?", messages:)
60
59
  # print "[#{res.choices[0].role}]", res.choices[0].content, "\n"
61
60
  # @param [String] prompt
62
61
  # The input prompt to be completed
63
- # @param [Symbol] role
64
- # The role of the prompt (e.g. :user, :system)
65
- # @param [String] model
66
- # The model to use for the completion
67
- # @param [#to_json, nil] schema
68
- # The schema that describes the expected response format
69
62
  # @param [Hash] params
70
- # Other completion parameters
63
+ # The parameters to maintain throughout the conversation.
64
+ # Any parameter the provider supports can be included and
65
+ # not only those listed here.
66
+ # @option params [Symbol] :role Defaults to the provider's default role
67
+ # @option params [String] :model Defaults to the provider's default model
68
+ # @option params [#to_json, nil] :schema Defaults to nil
69
+ # @option params [Array<LLM::Function>, nil] :tools Defaults to nil
71
70
  # @raise [NotImplementedError]
72
71
  # When the method is not implemented by a subclass
73
72
  # @return [LLM::Response::Completion]
74
- def complete(prompt, role = :user, model: default_model, schema: nil, **params)
73
+ def complete(prompt, params = {})
75
74
  raise NotImplementedError
76
75
  end
77
76
 
@@ -81,15 +80,11 @@ class LLM::Provider
81
80
  # This method creates a lazy version of a
82
81
  # {LLM::Chat LLM::Chat} object.
83
82
  # @param prompt (see LLM::Provider#complete)
84
- # @param role (see LLM::Provider#complete)
85
- # @param model (see LLM::Provider#complete)
86
- # @param schema (see LLM::Provider#complete)
87
- # @param [Hash] params
88
- # Other completion parameters to maintain throughout a chat
89
- # @raise (see LLM::Provider#complete)
83
+ # @param params (see LLM::Provider#complete)
90
84
  # @return [LLM::Chat]
91
- def chat(prompt, role = :user, model: default_model, schema: nil, **params)
92
- LLM::Chat.new(self, **params.merge(model:, schema:)).lazy.chat(prompt, role)
85
+ def chat(prompt, params = {})
86
+ role = params.delete(:role)
87
+ LLM::Chat.new(self, params).lazy.chat(prompt, role:)
93
88
  end
94
89
 
95
90
  ##
@@ -98,15 +93,12 @@ class LLM::Provider
98
93
  # This method creates a non-lazy version of a
99
94
  # {LLM::Chat LLM::Chat} object.
100
95
  # @param prompt (see LLM::Provider#complete)
101
- # @param role (see LLM::Provider#complete)
102
- # @param model (see LLM::Provider#complete)
103
- # @param schema (see LLM::Provider#complete)
104
- # @param [Hash] params
105
- # Other completion parameters to maintain throughout a chat
96
+ # @param params (see LLM::Provider#complete)
106
97
  # @raise (see LLM::Provider#complete)
107
98
  # @return [LLM::Chat]
108
- def chat!(prompt, role = :user, model: default_model, schema: nil, **params)
109
- LLM::Chat.new(self, **params.merge(model:, schema:)).chat(prompt, role)
99
+ def chat!(prompt, params = {})
100
+ role = params.delete(:role)
101
+ LLM::Chat.new(self, params).chat(prompt, role:)
110
102
  end
111
103
 
112
104
  ##
@@ -115,15 +107,12 @@ class LLM::Provider
115
107
  # This method creates a lazy variant of a
116
108
  # {LLM::Chat LLM::Chat} object.
117
109
  # @param prompt (see LLM::Provider#complete)
118
- # @param role (see LLM::Provider#complete)
119
- # @param model (see LLM::Provider#complete)
120
- # @param schema (see LLM::Provider#complete)
121
- # @param [Hash] params
122
- # Other completion parameters to maintain throughout a chat
110
+ # @param params (see LLM::Provider#complete)
123
111
  # @raise (see LLM::Provider#complete)
124
112
  # @return [LLM::Chat]
125
- def respond(prompt, role = :user, model: default_model, schema: nil, **params)
126
- LLM::Chat.new(self, **params.merge(model:, schema:)).lazy.respond(prompt, role)
113
+ def respond(prompt, params = {})
114
+ role = params.delete(:role)
115
+ LLM::Chat.new(self, params).lazy.respond(prompt, role:)
127
116
  end
128
117
 
129
118
  ##
@@ -132,15 +121,12 @@ class LLM::Provider
132
121
  # This method creates a non-lazy variant of a
133
122
  # {LLM::Chat LLM::Chat} object.
134
123
  # @param prompt (see LLM::Provider#complete)
135
- # @param role (see LLM::Provider#complete)
136
- # @param model (see LLM::Provider#complete)
137
- # @param schema (see LLM::Provider#complete)
138
- # @param [Hash] params
139
- # Other completion parameters to maintain throughout a chat
124
+ # @param params (see LLM::Provider#complete)
140
125
  # @raise (see LLM::Provider#complete)
141
126
  # @return [LLM::Chat]
142
- def respond!(prompt, role = :user, model: default_model, schema: nil, **params)
143
- LLM::Chat.new(self, **params.merge(model:, schema:)).respond(prompt, role)
127
+ def respond!(prompt, params = {})
128
+ role = params.delete(:role)
129
+ LLM::Chat.new(self, params).respond(prompt, role:)
144
130
  end
145
131
 
146
132
  ##
@@ -60,7 +60,7 @@ module LLM::Anthropic::Format
60
60
  when LLM::Message
61
61
  format_content(content.content)
62
62
  when LLM::Function::Return
63
- {type: "tool_result", tool_use_id: content.id, content: content.value}
63
+ [{type: "tool_result", tool_use_id: content.id, content: [{type: :text, text: JSON.dump(content.value)}]}]
64
64
  else
65
65
  raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
66
66
  "is not supported by the Anthropic API"
@@ -18,8 +18,9 @@ class LLM::Anthropic
18
18
 
19
19
  private
20
20
 
21
- def format_tools(tools)
22
- return {} unless tools
21
+ def format_tools(params)
22
+ return {} unless params and params[:tools]&.any?
23
+ tools = params[:tools]
23
24
  {tools: tools.map { _1.format(self) }}
24
25
  end
25
26
  end
@@ -15,25 +15,25 @@ module LLM
15
15
  HOST = "api.anthropic.com"
16
16
 
17
17
  ##
18
- # @param secret (see LLM::Provider#initialize)
19
- def initialize(secret, **)
20
- super(secret, host: HOST, **)
18
+ # @param key (see LLM::Provider#initialize)
19
+ def initialize(**)
20
+ super(host: HOST, **)
21
21
  end
22
22
 
23
23
  ##
24
24
  # Provides an embedding via VoyageAI per
25
25
  # [Anthropic's recommendation](https://docs.anthropic.com/en/docs/build-with-claude/embeddings)
26
26
  # @param input (see LLM::Provider#embed)
27
- # @param [String] token
28
- # Valid token for the VoyageAI API
27
+ # @param [String] key
28
+ # Valid key for the VoyageAI API
29
29
  # @param [String] model
30
30
  # The embedding model to use
31
31
  # @param [Hash] params
32
32
  # Other embedding parameters
33
33
  # @raise (see LLM::Provider#request)
34
34
  # @return (see LLM::Provider#embed)
35
- def embed(input, token:, model: "voyage-2", **params)
36
- llm = LLM.voyageai(token)
35
+ def embed(input, key:, model: "voyage-2", **params)
36
+ llm = LLM.voyageai(key:)
37
37
  llm.embed(input, **params.merge(model:))
38
38
  end
39
39
 
@@ -41,17 +41,16 @@ module LLM
41
41
  # Provides an interface to the chat completions API
42
42
  # @see https://docs.anthropic.com/en/api/messages Anthropic docs
43
43
  # @param prompt (see LLM::Provider#complete)
44
- # @param role (see LLM::Provider#complete)
45
- # @param model (see LLM::Provider#complete)
46
- # @param max_tokens The maximum number of tokens to generate
47
44
  # @param params (see LLM::Provider#complete)
48
45
  # @example (see LLM::Provider#complete)
49
46
  # @raise (see LLM::Provider#request)
50
47
  # @raise [LLM::Error::PromptError]
51
48
  # When given an object a provider does not understand
52
49
  # @return (see LLM::Provider#complete)
53
- def complete(prompt, role = :user, model: default_model, max_tokens: 1024, tools: nil, **params)
54
- params = [{max_tokens:, model:}, format_tools(tools), params].inject({}, &:merge!).compact
50
+ def complete(prompt, params = {})
51
+ params = {role: :user, model: default_model, max_tokens: 1024}.merge!(params)
52
+ params = [params, format_tools(params)].inject({}, &:merge!).compact
53
+ role = params.delete(:role)
55
54
  req = Net::HTTP::Post.new("/v1/messages", headers)
56
55
  messages = [*(params.delete(:messages) || []), Message.new(role, prompt)]
57
56
  body = JSON.dump({messages: [format(messages)].flatten}.merge!(params))
@@ -87,7 +86,7 @@ module LLM
87
86
  def headers
88
87
  {
89
88
  "Content-Type" => "application/json",
90
- "x-api-key" => @secret,
89
+ "x-api-key" => @key,
91
90
  "anthropic-version" => "2023-06-01"
92
91
  }
93
92
  end
@@ -9,7 +9,7 @@ class LLM::Gemini
9
9
  # require "llm"
10
10
  #
11
11
  # llm = LLM.gemini(ENV["KEY"])
12
- # res = llm.audio.create_transcription(input: LLM::File("/rocket.mp3"))
12
+ # res = llm.audio.create_transcription(input: "/audio/rocket.mp3")
13
13
  # res.text # => "A dog on a rocket to the moon"
14
14
  class Audio
15
15
  ##
@@ -31,7 +31,7 @@ class LLM::Gemini
31
31
  # Create an audio transcription
32
32
  # @example
33
33
  # llm = LLM.gemini(ENV["KEY"])
34
- # res = llm.audio.create_transcription(file: LLM::File("/rocket.mp3"))
34
+ # res = llm.audio.create_transcription(file: "/audio/rocket.mp3")
35
35
  # res.text # => "A dog on a rocket to the moon"
36
36
  # @see https://ai.google.dev/gemini-api/docs/audio Gemini docs
37
37
  # @param [String, LLM::File, LLM::Response::File] file The input audio
@@ -44,7 +44,7 @@ class LLM::Gemini
44
44
  "Your task is to transcribe the contents of an audio file",
45
45
  "Your response should include the transcription, and nothing else",
46
46
  LLM.File(file)
47
- ], :user, model:, **params
47
+ ], params.merge(role: :user, model:)
48
48
  LLM::Response::AudioTranscription
49
49
  .new(res)
50
50
  .tap { _1.text = res.choices[0].content }
@@ -55,7 +55,7 @@ class LLM::Gemini
55
55
  # @example
56
56
  # # Arabic => English
57
57
  # llm = LLM.gemini(ENV["KEY"])
58
- # res = llm.audio.create_translation(file: LLM::File("/bismillah.mp3"))
58
+ # res = llm.audio.create_translation(file: "/audio/bismillah.mp3")
59
59
  # res.text # => "In the name of Allah, the Beneficent, the Merciful."
60
60
  # @see https://ai.google.dev/gemini-api/docs/audio Gemini docs
61
61
  # @param [String, LLM::File, LLM::Response::File] file The input audio
@@ -68,7 +68,7 @@ class LLM::Gemini
68
68
  "Your task is to translate the contents of an audio file into English",
69
69
  "Your response should include the translation, and nothing else",
70
70
  LLM.File(file)
71
- ], :user, model:, **params
71
+ ], params.merge(role: :user, model:)
72
72
  LLM::Response::AudioTranslation
73
73
  .new(res)
74
74
  .tap { _1.text = res.choices[0].content }
@@ -55,7 +55,7 @@ class LLM::Gemini
55
55
  # @raise (see LLM::Provider#request)
56
56
  # @return [LLM::Response::FileList]
57
57
  def all(**params)
58
- query = URI.encode_www_form(params.merge!(key: secret))
58
+ query = URI.encode_www_form(params.merge!(key: key))
59
59
  req = Net::HTTP::Get.new("/v1beta/files?#{query}", headers)
60
60
  res = request(http, req)
61
61
  LLM::Response::FileList.new(res).tap { |filelist|
@@ -103,7 +103,7 @@ class LLM::Gemini
103
103
  # @return [LLM::Response::File]
104
104
  def get(file:, **params)
105
105
  file_id = file.respond_to?(:name) ? file.name : file.to_s
106
- query = URI.encode_www_form(params.merge!(key: secret))
106
+ query = URI.encode_www_form(params.merge!(key: key))
107
107
  req = Net::HTTP::Get.new("/v1beta/#{file_id}?#{query}", headers)
108
108
  res = request(http, req)
109
109
  LLM::Response::File.new(res)
@@ -121,7 +121,7 @@ class LLM::Gemini
121
121
  # @return [LLM::Response::File]
122
122
  def delete(file:, **params)
123
123
  file_id = file.respond_to?(:name) ? file.name : file.to_s
124
- query = URI.encode_www_form(params.merge!(key: secret))
124
+ query = URI.encode_www_form(params.merge!(key: key))
125
125
  req = Net::HTTP::Delete.new("/v1beta/#{file_id}?#{query}", headers)
126
126
  request(http, req)
127
127
  end
@@ -138,7 +138,7 @@ class LLM::Gemini
138
138
  include LLM::Utils
139
139
 
140
140
  def request_upload_url(file:)
141
- req = Net::HTTP::Post.new("/upload/v1beta/files?key=#{secret}", headers)
141
+ req = Net::HTTP::Post.new("/upload/v1beta/files?key=#{key}", headers)
142
142
  req["X-Goog-Upload-Protocol"] = "resumable"
143
143
  req["X-Goog-Upload-Command"] = "start"
144
144
  req["X-Goog-Upload-Header-Content-Length"] = file.bytesize
@@ -152,8 +152,8 @@ class LLM::Gemini
152
152
  @provider.instance_variable_get(:@http)
153
153
  end
154
154
 
155
- def secret
156
- @provider.instance_variable_get(:@secret)
155
+ def key
156
+ @provider.instance_variable_get(:@key)
157
157
  end
158
158
 
159
159
  [:headers, :request, :set_body_stream].each do |m|
@@ -41,7 +41,7 @@ module LLM::Gemini::Format
41
41
  when LLM::Message
42
42
  format_content(content.content)
43
43
  when LLM::Function::Return
44
- [{text: content.value}]
44
+ [{text: JSON.dump(content.value)}]
45
45
  else
46
46
  raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
47
47
  "is not supported by the Gemini API"
@@ -22,28 +22,20 @@ class LLM::Gemini
22
22
  # @param [JSON::Schema] schema
23
23
  # The schema to format
24
24
  # @return [Hash]
25
- def format_schema(schema)
26
- return {} unless schema
27
- {
28
- "generationConfig" => {
29
- "response_mime_type" => "application/json",
30
- "response_schema" => schema
31
- }
32
- }
25
+ def format_schema(params)
26
+ return {} unless params and params[:schema]
27
+ schema = params.delete(:schema)
28
+ {generationConfig: {response_mime_type: "application/json", response_schema: schema}}
33
29
  end
34
30
 
35
31
  ##
36
32
  # @param [Array<LLM::Function>] tools
37
33
  # The tools to format
38
34
  # @return [Hash]
39
- def format_tools(tools)
40
- return {} unless tools
41
- functions = tools.grep(LLM::Function)
42
- {
43
- "tools" => {
44
- "functionDeclarations" => functions.map { _1.format(self) }
45
- }
46
- }
35
+ def format_tools(params)
36
+ return {} unless params and params[:tools]&.any?
37
+ functions = params.delete(:tools).grep(LLM::Function)
38
+ {tools: {functionDeclarations: functions.map { _1.format(self) }}}
47
39
  end
48
40
  end
49
41
  end
@@ -41,7 +41,7 @@ class LLM::Gemini
41
41
  # Gemini implements image generation under the hood.
42
42
  # @return [LLM::Response::Image]
43
43
  def create(prompt:, model: "gemini-2.0-flash-exp-image-generation", **params)
44
- req = Net::HTTP::Post.new("/v1beta/models/#{model}:generateContent?key=#{secret}", headers)
44
+ req = Net::HTTP::Post.new("/v1beta/models/#{model}:generateContent?key=#{key}", headers)
45
45
  body = JSON.dump({
46
46
  contents: [{parts: {text: prompt}}],
47
47
  generationConfig: {responseModalities: ["TEXT", "IMAGE"]}
@@ -65,7 +65,7 @@ class LLM::Gemini
65
65
  # @note (see LLM::Gemini::Images#create)
66
66
  # @return [LLM::Response::Image]
67
67
  def edit(image:, prompt:, model: "gemini-2.0-flash-exp-image-generation", **params)
68
- req = Net::HTTP::Post.new("/v1beta/models/#{model}:generateContent?key=#{secret}", headers)
68
+ req = Net::HTTP::Post.new("/v1beta/models/#{model}:generateContent?key=#{key}", headers)
69
69
  image = LLM.File(image)
70
70
  body = JSON.dump({
71
71
  contents: [{parts: [{text: prompt}, format.format_content(image)]}],
@@ -89,8 +89,8 @@ class LLM::Gemini
89
89
  @format ||= CompletionFormat.new(nil)
90
90
  end
91
91
 
92
- def secret
93
- @provider.instance_variable_get(:@secret)
92
+ def key
93
+ @provider.instance_variable_get(:@key)
94
94
  end
95
95
 
96
96
  def http
@@ -40,7 +40,7 @@ class LLM::Gemini
40
40
  # @raise (see LLM::Provider#request)
41
41
  # @return [LLM::Response::ModelList]
42
42
  def all(**params)
43
- query = URI.encode_www_form(params.merge!(key: secret))
43
+ query = URI.encode_www_form(params.merge!(key: key))
44
44
  req = Net::HTTP::Get.new("/v1beta/models?#{query}", headers)
45
45
  res = request(http, req)
46
46
  LLM::Response::ModelList.new(res).tap { |modellist|
@@ -58,8 +58,8 @@ class LLM::Gemini
58
58
  @provider.instance_variable_get(:@http)
59
59
  end
60
60
 
61
- def secret
62
- @provider.instance_variable_get(:@secret)
61
+ def key
62
+ @provider.instance_variable_get(:@key)
63
63
  end
64
64
 
65
65
  [:headers, :request].each do |m|
@@ -40,9 +40,9 @@ module LLM
40
40
  HOST = "generativelanguage.googleapis.com"
41
41
 
42
42
  ##
43
- # @param secret (see LLM::Provider#initialize)
44
- def initialize(secret, **)
45
- super(secret, host: HOST, **)
43
+ # @param key (see LLM::Provider#initialize)
44
+ def initialize(**)
45
+ super(host: HOST, **)
46
46
  end
47
47
 
48
48
  ##
@@ -54,7 +54,7 @@ module LLM
54
54
  # @return (see LLM::Provider#embed)
55
55
  def embed(input, model: "text-embedding-004", **params)
56
56
  model = model.respond_to?(:id) ? model.id : model
57
- path = ["/v1beta/models/#{model}", "embedContent?key=#{@secret}"].join(":")
57
+ path = ["/v1beta/models/#{model}", "embedContent?key=#{@key}"].join(":")
58
58
  req = Net::HTTP::Post.new(path, headers)
59
59
  req.body = JSON.dump({content: {parts: [{text: input}]}})
60
60
  res = request(@http, req)
@@ -65,19 +65,18 @@ module LLM
65
65
  # Provides an interface to the chat completions API
66
66
  # @see https://ai.google.dev/api/generate-content#v1beta.models.generateContent Gemini docs
67
67
  # @param prompt (see LLM::Provider#complete)
68
- # @param role (see LLM::Provider#complete)
69
- # @param model (see LLM::Provider#complete)
70
- # @param schema (see LLM::Provider#complete)
71
68
  # @param params (see LLM::Provider#complete)
72
69
  # @example (see LLM::Provider#complete)
73
70
  # @raise (see LLM::Provider#request)
74
71
  # @raise [LLM::Error::PromptError]
75
72
  # When given an object a provider does not understand
76
73
  # @return (see LLM::Provider#complete)
77
- def complete(prompt, role = :user, model: default_model, schema: nil, tools: nil, **params)
78
- params = [format_schema(schema), format_tools(tools), params].inject({}, &:merge!).compact
74
+ def complete(prompt, params = {})
75
+ params = {role: :user, model: default_model}.merge!(params)
76
+ params = [params, format_schema(params), format_tools(params)].inject({}, &:merge!).compact
77
+ role, model = [:role, :model].map { params.delete(_1) }
79
78
  model.respond_to?(:id) ? model.id : model
80
- path = ["/v1beta/models/#{model}", "generateContent?key=#{@secret}"].join(":")
79
+ path = ["/v1beta/models/#{model}", "generateContent?key=#{@key}"].join(":")
81
80
  req = Net::HTTP::Post.new(path, headers)
82
81
  messages = [*(params.delete(:messages) || []), LLM::Message.new(role, prompt)]
83
82
  body = JSON.dump({contents: format(messages)}.merge!(params))
@@ -22,8 +22,9 @@ class LLM::Ollama
22
22
  # @param [Array<LLM::Function>] tools
23
23
  # The tools to format
24
24
  # @return [Hash]
25
- def format_tools(tools)
26
- return {} unless tools
25
+ def format_tools(params)
26
+ return {} unless params and params[:tools]&.any?
27
+ tools = params[:tools]
27
28
  {tools: tools.map { _1.format(self) }}
28
29
  end
29
30
  end
@@ -28,9 +28,9 @@ module LLM
28
28
  HOST = "localhost"
29
29
 
30
30
  ##
31
- # @param secret (see LLM::Provider#initialize)
32
- def initialize(secret, **)
33
- super(secret, host: HOST, port: 11434, ssl: false, **)
31
+ # @param key (see LLM::Provider#initialize)
32
+ def initialize(**)
33
+ super(host: HOST, port: 11434, ssl: false, **)
34
34
  end
35
35
 
36
36
  ##
@@ -52,16 +52,16 @@ module LLM
52
52
  # Provides an interface to the chat completions API
53
53
  # @see https://github.com/ollama/ollama/blob/main/docs/api.md#generate-a-chat-completion Ollama docs
54
54
  # @param prompt (see LLM::Provider#complete)
55
- # @param role (see LLM::Provider#complete)
56
- # @param model (see LLM::Provider#complete)
57
55
  # @param params (see LLM::Provider#complete)
58
56
  # @example (see LLM::Provider#complete)
59
57
  # @raise (see LLM::Provider#request)
60
58
  # @raise [LLM::Error::PromptError]
61
59
  # When given an object a provider does not understand
62
60
  # @return (see LLM::Provider#complete)
63
- def complete(prompt, role = :user, model: default_model, schema: nil, tools: nil, **params)
64
- params = [{model:, stream: false, format: schema}, format_tools(tools), params].inject({}, &:merge!).compact
61
+ def complete(prompt, params = {})
62
+ params = {role: :user, model: default_model, stream: false}.merge!(params)
63
+ params = [params, {format: params[:schema]}, format_tools(params)].inject({}, &:merge!).compact
64
+ role = params.delete(:role)
65
65
  req = Net::HTTP::Post.new("/api/chat", headers)
66
66
  messages = [*(params.delete(:messages) || []), LLM::Message.new(role, prompt)]
67
67
  body = JSON.dump({messages: [format(messages)].flatten}.merge!(params))
@@ -97,7 +97,7 @@ module LLM
97
97
  def headers
98
98
  {
99
99
  "Content-Type" => "application/json",
100
- "Authorization" => "Bearer #{@secret}"
100
+ "Authorization" => "Bearer #{@key}"
101
101
  }
102
102
  end
103
103
 
@@ -40,6 +40,8 @@ module LLM::OpenAI::Format
40
40
  [{type: :text, text: content.to_s}]
41
41
  when LLM::Message
42
42
  format_content(content.content)
43
+ when LLM::Function::Return
44
+ throw(:abort, {role: "tool", tool_call_id: content.id, content: JSON.dump(content.value)})
43
45
  else
44
46
  raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
45
47
  "is not supported by the OpenAI chat completions API"
@@ -29,8 +29,9 @@ class LLM::OpenAI
29
29
  # @param [JSON::Schema] schema
30
30
  # The schema to format
31
31
  # @return [Hash]
32
- def format_schema(schema)
33
- return {} unless schema
32
+ def format_schema(params)
33
+ return {} unless params and params[:schema]
34
+ schema = params.delete(:schema)
34
35
  {
35
36
  response_format: {
36
37
  type: "json_schema",
@@ -43,8 +44,9 @@ class LLM::OpenAI
43
44
  # @param [Array<LLM::Function>] tools
44
45
  # The tools to format
45
46
  # @return [Hash]
46
- def format_tools(tools)
47
- return {} unless tools
47
+ def format_tools(params)
48
+ return {} unless params and params[:tools]&.any?
49
+ tools = params[:tools]
48
50
  {tools: tools.map { _1.format(self) }}
49
51
  end
50
52
  end
@@ -45,15 +45,15 @@ class LLM::OpenAI
45
45
  # Create a response
46
46
  # @see https://platform.openai.com/docs/api-reference/responses/create OpenAI docs
47
47
  # @param prompt (see LLM::Provider#complete)
48
- # @param role (see LLM::Provider#complete)
49
- # @param model (see LLM::Provider#complete)
50
- # @param [Hash] params Response params
48
+ # @param params (see LLM::Provider#complete)
51
49
  # @raise (see LLM::Provider#request)
52
50
  # @raise [LLM::Error::PromptError]
53
51
  # When given an object a provider does not understand
54
52
  # @return [LLM::Response::Output]
55
- def create(prompt, role = :user, model: @provider.default_model, schema: nil, tools: nil, **params)
56
- params = [{model:}, format_schema(schema), format_tools(tools), params].inject({}, &:merge!).compact
53
+ def create(prompt, params = {})
54
+ params = {role: :user, model: @provider.default_model}.merge!(params)
55
+ params = [params, format_schema(params), format_tools(params)].inject({}, &:merge!).compact
56
+ role = params.delete(:role)
57
57
  req = Net::HTTP::Post.new("/v1/responses", headers)
58
58
  messages = [*(params.delete(:input) || []), LLM::Message.new(role, prompt)]
59
59
  body = JSON.dump({input: [format(messages, :response)].flatten}.merge!(params))
@@ -20,9 +20,9 @@ module LLM
20
20
  HOST = "api.openai.com"
21
21
 
22
22
  ##
23
- # @param secret (see LLM::Provider#initialize)
24
- def initialize(secret, **)
25
- super(secret, host: HOST, **)
23
+ # @param key (see LLM::Provider#initialize)
24
+ def initialize(**)
25
+ super(host: HOST, **)
26
26
  end
27
27
 
28
28
  ##
@@ -44,17 +44,16 @@ module LLM
44
44
  # Provides an interface to the chat completions API
45
45
  # @see https://platform.openai.com/docs/api-reference/chat/create OpenAI docs
46
46
  # @param prompt (see LLM::Provider#complete)
47
- # @param role (see LLM::Provider#complete)
48
- # @param model (see LLM::Provider#complete)
49
- # @param schema (see LLM::Provider#complete)
50
47
  # @param params (see LLM::Provider#complete)
51
48
  # @example (see LLM::Provider#complete)
52
49
  # @raise (see LLM::Provider#request)
53
50
  # @raise [LLM::Error::PromptError]
54
51
  # When given an object a provider does not understand
55
52
  # @return (see LLM::Provider#complete)
56
- def complete(prompt, role = :user, model: default_model, schema: nil, tools: nil, **params)
57
- params = [{model:}, format_schema(schema), format_tools(tools), params].inject({}, &:merge!).compact
53
+ def complete(prompt, params = {})
54
+ params = {role: :user, model: default_model}.merge!(params)
55
+ params = [params, format_schema(params), format_tools(params)].inject({}, &:merge!).compact
56
+ role = params.delete(:role)
58
57
  req = Net::HTTP::Post.new("/v1/chat/completions", headers)
59
58
  messages = [*(params.delete(:messages) || []), Message.new(role, prompt)]
60
59
  body = JSON.dump({messages: format(messages, :complete).flatten}.merge!(params))
@@ -122,7 +121,7 @@ module LLM
122
121
  def headers
123
122
  {
124
123
  "Content-Type" => "application/json",
125
- "Authorization" => "Bearer #{@secret}"
124
+ "Authorization" => "Bearer #{@key}"
126
125
  }
127
126
  end
128
127
 
@@ -7,9 +7,9 @@ module LLM
7
7
  HOST = "api.voyageai.com"
8
8
 
9
9
  ##
10
- # @param secret (see LLM::Provider#initialize)
11
- def initialize(secret, **)
12
- super(secret, host: HOST, **)
10
+ # @param key (see LLM::Provider#initialize)
11
+ def initialize(**)
12
+ super(host: HOST, **)
13
13
  end
14
14
 
15
15
  ##
@@ -29,7 +29,7 @@ module LLM
29
29
  def headers
30
30
  {
31
31
  "Content-Type" => "application/json",
32
- "Authorization" => "Bearer #{@secret}"
32
+ "Authorization" => "Bearer #{@key}"
33
33
  }
34
34
  end
35
35
 
data/lib/llm/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LLM
4
- VERSION = "0.5.0"
4
+ VERSION = "0.6.0"
5
5
  end
data/lib/llm.rb CHANGED
@@ -23,42 +23,42 @@ module LLM
23
23
  ##
24
24
  # @param secret (see LLM::Anthropic#initialize)
25
25
  # @return (see LLM::Anthropic#initialize)
26
- def anthropic(secret, options = {})
26
+ def anthropic(**)
27
27
  require_relative "llm/providers/anthropic" unless defined?(LLM::Anthropic)
28
28
  require_relative "llm/providers/voyageai" unless defined?(LLM::VoyageAI)
29
- LLM::Anthropic.new(secret, **options)
29
+ LLM::Anthropic.new(**)
30
30
  end
31
31
 
32
32
  ##
33
33
  # @param secret (see LLM::VoyageAI#initialize)
34
34
  # @return (see LLM::VoyageAI#initialize)
35
- def voyageai(secret, options = {})
35
+ def voyageai(**)
36
36
  require_relative "llm/providers/voyageai" unless defined?(LLM::VoyageAI)
37
- LLM::VoyageAI.new(secret, **options)
37
+ LLM::VoyageAI.new(**)
38
38
  end
39
39
 
40
40
  ##
41
41
  # @param secret (see LLM::Gemini#initialize)
42
42
  # @return (see LLM::Gemini#initialize)
43
- def gemini(secret, options = {})
43
+ def gemini(**)
44
44
  require_relative "llm/providers/gemini" unless defined?(LLM::Gemini)
45
- LLM::Gemini.new(secret, **options)
45
+ LLM::Gemini.new(**)
46
46
  end
47
47
 
48
48
  ##
49
49
  # @param host (see LLM::Ollama#initialize)
50
50
  # @return (see LLM::Ollama#initialize)
51
- def ollama(secret, options = {})
51
+ def ollama(key: nil, **)
52
52
  require_relative "llm/providers/ollama" unless defined?(LLM::Ollama)
53
- LLM::Ollama.new(secret, **options)
53
+ LLM::Ollama.new(key:, **)
54
54
  end
55
55
 
56
56
  ##
57
57
  # @param secret (see LLM::OpenAI#initialize)
58
58
  # @return (see LLM::OpenAI#initialize)
59
- def openai(secret, options = {})
59
+ def openai(**)
60
60
  require_relative "llm/providers/openai" unless defined?(LLM::OpenAI)
61
- LLM::OpenAI.new(secret, **options)
61
+ LLM::OpenAI.new(**)
62
62
  end
63
63
 
64
64
  ##
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llm.rb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.5.0
4
+ version: 0.6.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Antar Azri
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2025-05-04 00:00:00.000000000 Z
12
+ date: 2025-05-06 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: webmock