llm.rb 0.3.1 → 0.3.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +89 -20
- data/lib/llm/chat.rb +5 -3
- data/lib/llm/core_ext/ostruct.rb +1 -1
- data/lib/llm/error.rb +6 -1
- data/lib/llm/file.rb +15 -1
- data/lib/llm/model.rb +27 -2
- data/lib/llm/provider.rb +28 -32
- data/lib/llm/providers/anthropic/format.rb +19 -6
- data/lib/llm/providers/anthropic/models.rb +62 -0
- data/lib/llm/providers/anthropic.rb +23 -8
- data/lib/llm/providers/gemini/files.rb +2 -2
- data/lib/llm/providers/gemini/format.rb +6 -1
- data/lib/llm/providers/gemini/images.rb +5 -5
- data/lib/llm/providers/gemini/models.rb +69 -0
- data/lib/llm/providers/gemini/response_parser.rb +1 -5
- data/lib/llm/providers/gemini.rb +24 -8
- data/lib/llm/providers/ollama/format.rb +11 -3
- data/lib/llm/providers/ollama/models.rb +66 -0
- data/lib/llm/providers/ollama.rb +23 -8
- data/lib/llm/providers/openai/audio.rb +3 -5
- data/lib/llm/providers/openai/files.rb +2 -2
- data/lib/llm/providers/openai/format.rb +47 -11
- data/lib/llm/providers/openai/images.rb +4 -4
- data/lib/llm/providers/openai/models.rb +62 -0
- data/lib/llm/providers/openai/response_parser.rb +1 -5
- data/lib/llm/providers/openai/responses.rb +24 -6
- data/lib/llm/providers/openai.rb +24 -7
- data/lib/llm/response/modellist.rb +18 -0
- data/lib/llm/response.rb +1 -0
- data/lib/llm/version.rb +1 -1
- data/lib/llm.rb +2 -1
- data/spec/anthropic/completion_spec.rb +36 -0
- data/spec/anthropic/models_spec.rb +21 -0
- data/spec/gemini/images_spec.rb +4 -12
- data/spec/gemini/models_spec.rb +21 -0
- data/spec/llm/conversation_spec.rb +5 -3
- data/spec/ollama/models_spec.rb +20 -0
- data/spec/openai/completion_spec.rb +21 -2
- data/spec/openai/files_spec.rb +3 -3
- data/spec/openai/images_spec.rb +2 -6
- data/spec/openai/models_spec.rb +21 -0
- metadata +11 -6
- data/share/llm/models/anthropic.yml +0 -35
- data/share/llm/models/gemini.yml +0 -35
- data/share/llm/models/ollama.yml +0 -155
- data/share/llm/models/openai.yml +0 -46
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: c8ce8caa7c769da9197528a864c153071f3c4aca15718efc985e543911c04ce2
|
4
|
+
data.tar.gz: 389ff41e9e2b35782b1484048b7597f5573bf2a86cf9eaff8cfd7c4cb2b19be3
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 39c8f71eae878b5787ca839138de07ce06cba4fdee0bafb1bd75a71f3b3e59ee08fa05f5d9f280522ec751722c9a8a15430d1b999b05e14052d85c745bf9781c
|
7
|
+
data.tar.gz: fdb5268b0095f09b41481e6c7071a2dae66cf9a3fd21755834b76040e13a236bc18122dfe95230dd49a827e774487a1add1aa35c8976f284a03afad881321b46
|
data/README.md
CHANGED
@@ -26,6 +26,7 @@ llm = LLM.openai("yourapikey")
|
|
26
26
|
llm = LLM.gemini("yourapikey")
|
27
27
|
llm = LLM.anthropic("yourapikey")
|
28
28
|
llm = LLM.ollama(nil)
|
29
|
+
llm = LLM.voyageai("yourapikey")
|
29
30
|
```
|
30
31
|
|
31
32
|
### Conversations
|
@@ -37,7 +38,9 @@ The following example enables lazy mode for a
|
|
37
38
|
object by entering into a "lazy" conversation where messages are buffered and
|
38
39
|
sent to the provider only when necessary. Both lazy and non-lazy conversations
|
39
40
|
maintain a message thread that can be reused as context throughout a conversation.
|
40
|
-
The example
|
41
|
+
The example captures the spirit of llm.rb by demonstrating how objects cooperate
|
42
|
+
together through composition, and it uses the stateless chat completions API that
|
43
|
+
all LLM providers support:
|
41
44
|
|
42
45
|
```ruby
|
43
46
|
#!/usr/bin/env ruby
|
@@ -122,13 +125,10 @@ for more information on how to use the audio generation API:
|
|
122
125
|
```ruby
|
123
126
|
#!/usr/bin/env ruby
|
124
127
|
require "llm"
|
125
|
-
require "open-uri"
|
126
|
-
require "fileutils"
|
127
128
|
|
128
129
|
llm = LLM.openai(ENV["KEY"])
|
129
130
|
res = llm.audio.create_speech(input: "Hello world")
|
130
|
-
|
131
|
-
res.audio.string
|
131
|
+
IO.copy_stream res.audio, File.join(Dir.home, "hello.mp3")
|
132
132
|
```
|
133
133
|
|
134
134
|
#### Transcribe
|
@@ -151,8 +151,6 @@ examples and documentation
|
|
151
151
|
```ruby
|
152
152
|
#!/usr/bin/env ruby
|
153
153
|
require "llm"
|
154
|
-
require "open-uri"
|
155
|
-
require "fileutils"
|
156
154
|
|
157
155
|
llm = LLM.openai(ENV["KEY"])
|
158
156
|
res = llm.audio.create_transcription(
|
@@ -180,9 +178,8 @@ examples and documentation
|
|
180
178
|
|
181
179
|
|
182
180
|
```ruby
|
181
|
+
#!/usr/bin/env ruby
|
183
182
|
require "llm"
|
184
|
-
require "open-uri"
|
185
|
-
require "fileutils"
|
186
183
|
|
187
184
|
llm = LLM.openai(ENV["KEY"])
|
188
185
|
res = llm.audio.create_translation(
|
@@ -320,6 +317,48 @@ bot.messages.select(&:assistant?).each { print "[#{_1.role}] ", _1.content, "\n"
|
|
320
317
|
# contains information about the features, installation, and usage of OpenBSD.
|
321
318
|
```
|
322
319
|
|
320
|
+
### Prompts
|
321
|
+
|
322
|
+
#### Multimodal
|
323
|
+
|
324
|
+
Generally all providers accept text prompts but some providers can
|
325
|
+
also understand URLs, and various file types (eg images, audio, video,
|
326
|
+
etc). The llm.rb approach to multimodal prompts is to let you pass `URI`
|
327
|
+
objects to describe links, `LLM::File` / `LLM::Response::File` objects
|
328
|
+
to describe files, `String` objects to describe text blobs, or an array
|
329
|
+
of the forementioned objects to describe multiple objects in a single
|
330
|
+
prompt. Each object is a first class citizen that can be passed directly
|
331
|
+
to a prompt.
|
332
|
+
|
333
|
+
For more depth and examples on how to use the multimodal API, please see
|
334
|
+
the [provider-specific documentation](https://0x1eef.github.io/x/llm.rb/)
|
335
|
+
for more provider-specific examples – there can be subtle differences
|
336
|
+
between providers and even between APIs from the same provider that are
|
337
|
+
not covered in the README:
|
338
|
+
|
339
|
+
```ruby
|
340
|
+
#!/usr/bin/env ruby
|
341
|
+
require "llm"
|
342
|
+
|
343
|
+
llm = LLM.openai(ENV["KEY"])
|
344
|
+
bot = LLM::Chat.new(llm).lazy
|
345
|
+
|
346
|
+
bot.chat URI("https://example.com/path/to/image.png")
|
347
|
+
bot.chat "Describe the above image"
|
348
|
+
bot.messages.select(&:assistant?).each { print "[#{_1.role}] ", _1.content, "\n" }
|
349
|
+
|
350
|
+
file = bot.files.create(file: LLM::File("/documents/openbsd_is_awesome.pdf"))
|
351
|
+
bot.chat file
|
352
|
+
bot.chat "What is this file about?"
|
353
|
+
bot.messages.select(&:assistant?).each { print "[#{_1.role}] ", _1.content, "\n" }
|
354
|
+
|
355
|
+
bot.chat [LLM::File("/images/puffy.png"), "What is this image about?"]
|
356
|
+
bot.messages.select(&:assistant?).each { print "[#{_1.role}] ", _1.content, "\n" }
|
357
|
+
|
358
|
+
bot.chat [LLM::File("/images/beastie.png"), "What is this image about?"]
|
359
|
+
bot.messages.select(&:assistant?).each { print "[#{_1.role}] ", _1.content, "\n" }
|
360
|
+
```
|
361
|
+
|
323
362
|
### Embeddings
|
324
363
|
|
325
364
|
#### Text
|
@@ -350,6 +389,38 @@ print res.embeddings[0].size, "\n"
|
|
350
389
|
# 1536
|
351
390
|
```
|
352
391
|
|
392
|
+
### Models
|
393
|
+
|
394
|
+
#### List
|
395
|
+
|
396
|
+
Almost all LLM providers provide a models endpoint that allows a client to
|
397
|
+
query the list of models that are available to use. The list is dynamic,
|
398
|
+
maintained by LLM providers, and it is independent of a specific llm.rb release.
|
399
|
+
True to the llm.rb spirit of small, composable objects that cooperate with
|
400
|
+
each other, a
|
401
|
+
[LLM::Model](https://0x1eef.github.io/x/llm.rb/LLM/Model.html)
|
402
|
+
object can be used instead of a string that describes a model name (although
|
403
|
+
either works). Let's take a look at an example:
|
404
|
+
|
405
|
+
```ruby
|
406
|
+
#!/usr/bin/env ruby
|
407
|
+
require "llm"
|
408
|
+
|
409
|
+
##
|
410
|
+
# List all models
|
411
|
+
llm = LLM.openai(ENV["KEY"])
|
412
|
+
llm.models.all.each do |model|
|
413
|
+
print "model: ", model.id, "\n"
|
414
|
+
end
|
415
|
+
|
416
|
+
##
|
417
|
+
# Select a model
|
418
|
+
model = llm.models.all.find { |m| m.id == "gpt-3.5-turbo" }
|
419
|
+
bot = LLM::Chat.new(llm, model:)
|
420
|
+
bot.chat "Hello #{model.id} :)"
|
421
|
+
bot.messages.select(&:assistant?).each { print "[#{_1.role}] ", _1.content, "\n" }
|
422
|
+
```
|
423
|
+
|
353
424
|
### Memory
|
354
425
|
|
355
426
|
#### Child process
|
@@ -372,7 +443,7 @@ llm = LLM.gemini(ENV["KEY"])
|
|
372
443
|
fork do
|
373
444
|
%w[dog cat sheep goat capybara].each do |animal|
|
374
445
|
res = llm.images.create(prompt: "a #{animal} on a rocket to the moon")
|
375
|
-
|
446
|
+
IO.copy_stream res.images[0], "#{animal}.png"
|
376
447
|
end
|
377
448
|
end
|
378
449
|
Process.wait
|
@@ -394,19 +465,17 @@ llm.rb can be installed via rubygems.org:
|
|
394
465
|
|
395
466
|
## Philosophy
|
396
467
|
|
397
|
-
llm.rb was built for developers who believe that simplicity
|
398
|
-
It provides a clean, dependency-free interface to
|
399
|
-
treating Ruby itself as the primary platform –
|
400
|
-
specific framework or library. There is no hidden
|
401
|
-
metaprogramming.
|
468
|
+
llm.rb was built for developers who believe that simplicity can be challenging
|
469
|
+
but it is always worth it. It provides a clean, dependency-free interface to
|
470
|
+
Large Language Models, treating Ruby itself as the primary platform –
|
471
|
+
not Rails or any other specific framework or library. There is no hidden
|
472
|
+
magic or complex metaprogramming.
|
402
473
|
|
403
474
|
Every part of llm.rb is designed to be explicit, composable, memory-safe,
|
404
475
|
and production-ready without compromise. No unnecessary abstractions,
|
405
|
-
no global configuration, and no dependencies that aren't
|
406
|
-
Ruby. It has been inspired in part by other languages such
|
407
|
-
it is not a port of any other library.
|
408
|
-
|
409
|
-
Good software doesn’t need marketing. It just needs to work. :)
|
476
|
+
no global configuration, no global state, and no dependencies that aren't
|
477
|
+
part of standard Ruby. It has been inspired in part by other languages such
|
478
|
+
as Python, but it is not a port of any other library.
|
410
479
|
|
411
480
|
## License
|
412
481
|
|
data/lib/llm/chat.rb
CHANGED
@@ -27,11 +27,13 @@ module LLM
|
|
27
27
|
##
|
28
28
|
# @param [LLM::Provider] provider
|
29
29
|
# A provider
|
30
|
+
# @param [String] model
|
31
|
+
# The model to maintain throughout the conversation
|
30
32
|
# @param [Hash] params
|
31
|
-
#
|
32
|
-
def initialize(provider,
|
33
|
+
# Other parameters to maintain throughout the conversation
|
34
|
+
def initialize(provider, model: provider.default_model, **params)
|
33
35
|
@provider = provider
|
34
|
-
@params = params
|
36
|
+
@params = params.merge!(model:)
|
35
37
|
@lazy = false
|
36
38
|
@messages = []
|
37
39
|
end
|
data/lib/llm/core_ext/ostruct.rb
CHANGED
data/lib/llm/error.rb
CHANGED
@@ -4,8 +4,9 @@ module LLM
|
|
4
4
|
##
|
5
5
|
# The superclass of all LLM errors
|
6
6
|
class Error < RuntimeError
|
7
|
-
def initialize
|
7
|
+
def initialize(...)
|
8
8
|
block_given? ? yield(self) : nil
|
9
|
+
super
|
9
10
|
end
|
10
11
|
|
11
12
|
##
|
@@ -17,6 +18,10 @@ module LLM
|
|
17
18
|
attr_accessor :response
|
18
19
|
end
|
19
20
|
|
21
|
+
##
|
22
|
+
# When a prompt is given an object that's not understood
|
23
|
+
PromptError = Class.new(Error)
|
24
|
+
|
20
25
|
##
|
21
26
|
# HTTPUnauthorized
|
22
27
|
Unauthorized = Class.new(ResponseError)
|
data/lib/llm/file.rb
CHANGED
@@ -7,13 +7,20 @@
|
|
7
7
|
class LLM::File
|
8
8
|
##
|
9
9
|
# @return [String]
|
10
|
-
# Returns the path to
|
10
|
+
# Returns the path to the file
|
11
11
|
attr_reader :path
|
12
12
|
|
13
13
|
def initialize(path)
|
14
14
|
@path = path
|
15
15
|
end
|
16
16
|
|
17
|
+
##
|
18
|
+
# @return [String]
|
19
|
+
# Returns basename of the file
|
20
|
+
def basename
|
21
|
+
File.basename(path)
|
22
|
+
end
|
23
|
+
|
17
24
|
##
|
18
25
|
# @return [String]
|
19
26
|
# Returns the MIME type of the file
|
@@ -42,6 +49,13 @@ class LLM::File
|
|
42
49
|
[File.binread(path)].pack("m0")
|
43
50
|
end
|
44
51
|
|
52
|
+
##
|
53
|
+
# @return [String]
|
54
|
+
# Returns the file contents in base64 URL format
|
55
|
+
def to_data_uri
|
56
|
+
"data:#{mime_type};base64,#{to_b64}"
|
57
|
+
end
|
58
|
+
|
45
59
|
##
|
46
60
|
# @return [File]
|
47
61
|
# Yields an IO object suitable to be streamed
|
data/lib/llm/model.rb
CHANGED
@@ -1,7 +1,32 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
|
3
|
+
##
|
4
|
+
# The {LLM::Model LLM::Model} class represents an LLM model that
|
5
|
+
# is available to use. Its properties are delegated to the underlying
|
6
|
+
# response body, and vary by provider.
|
7
|
+
class LLM::Model < OpenStruct
|
8
|
+
##
|
9
|
+
# Returns a subclass of {LLM::Provider LLM::Provider}
|
10
|
+
# @return [LLM::Provider]
|
11
|
+
attr_accessor :provider
|
12
|
+
|
13
|
+
##
|
14
|
+
# Returns the model ID
|
15
|
+
# @return [String]
|
16
|
+
def id
|
17
|
+
case @provider.class.to_s
|
18
|
+
when "LLM::Ollama"
|
19
|
+
self["name"]
|
20
|
+
when "LLM::Gemini"
|
21
|
+
self["name"].sub(%r|\Amodels/|, "")
|
22
|
+
else
|
23
|
+
self["id"]
|
24
|
+
end
|
25
|
+
end
|
26
|
+
|
27
|
+
##
|
28
|
+
# @return [String]
|
4
29
|
def to_json(*)
|
5
|
-
|
30
|
+
id.to_json(*)
|
6
31
|
end
|
7
32
|
end
|
data/lib/llm/provider.rb
CHANGED
@@ -4,16 +4,7 @@
|
|
4
4
|
# The Provider class represents an abstract class for
|
5
5
|
# LLM (Language Model) providers.
|
6
6
|
#
|
7
|
-
# @note
|
8
|
-
# This class is not meant to be instantiated directly.
|
9
|
-
# Instead, use one of the subclasses that implement
|
10
|
-
# the methods defined here.
|
11
|
-
#
|
12
7
|
# @abstract
|
13
|
-
# @see LLM::Provider::OpenAI
|
14
|
-
# @see LLM::Provider::Anthropic
|
15
|
-
# @see LLM::Provider::Gemini
|
16
|
-
# @see LLM::Provider::Ollama
|
17
8
|
class LLM::Provider
|
18
9
|
require "net/http"
|
19
10
|
|
@@ -53,7 +44,7 @@ class LLM::Provider
|
|
53
44
|
# @raise [NotImplementedError]
|
54
45
|
# When the method is not implemented by a subclass
|
55
46
|
# @return [LLM::Response::Embedding]
|
56
|
-
def embed(input, model
|
47
|
+
def embed(input, model: nil, **params)
|
57
48
|
raise NotImplementedError
|
58
49
|
end
|
59
50
|
|
@@ -78,7 +69,7 @@ class LLM::Provider
|
|
78
69
|
# @raise [NotImplementedError]
|
79
70
|
# When the method is not implemented by a subclass
|
80
71
|
# @return [LLM::Response::Completion]
|
81
|
-
def complete(prompt, role = :user, model
|
72
|
+
def complete(prompt, role = :user, model: default_model, **params)
|
82
73
|
raise NotImplementedError
|
83
74
|
end
|
84
75
|
|
@@ -94,8 +85,8 @@ class LLM::Provider
|
|
94
85
|
# Other completion parameters to maintain throughout a chat
|
95
86
|
# @raise (see LLM::Provider#complete)
|
96
87
|
# @return [LLM::Chat]
|
97
|
-
def chat(prompt, role = :user, model:
|
98
|
-
LLM::Chat.new(self, params).lazy.chat(prompt, role)
|
88
|
+
def chat(prompt, role = :user, model: default_model, **params)
|
89
|
+
LLM::Chat.new(self, **params.merge(model:)).lazy.chat(prompt, role)
|
99
90
|
end
|
100
91
|
|
101
92
|
##
|
@@ -110,8 +101,8 @@ class LLM::Provider
|
|
110
101
|
# Other completion parameters to maintain throughout a chat
|
111
102
|
# @raise (see LLM::Provider#complete)
|
112
103
|
# @return [LLM::Chat]
|
113
|
-
def chat!(prompt, role = :user, model:
|
114
|
-
LLM::Chat.new(self, params).chat(prompt, role)
|
104
|
+
def chat!(prompt, role = :user, model: default_model, **params)
|
105
|
+
LLM::Chat.new(self, **params.merge(model:)).chat(prompt, role)
|
115
106
|
end
|
116
107
|
|
117
108
|
##
|
@@ -126,8 +117,8 @@ class LLM::Provider
|
|
126
117
|
# Other completion parameters to maintain throughout a chat
|
127
118
|
# @raise (see LLM::Provider#complete)
|
128
119
|
# @return [LLM::Chat]
|
129
|
-
def respond(prompt, role = :user, model:
|
130
|
-
LLM::Chat.new(self, params).lazy.respond(prompt, role)
|
120
|
+
def respond(prompt, role = :user, model: default_model, **params)
|
121
|
+
LLM::Chat.new(self, **params.merge(model:)).lazy.respond(prompt, role)
|
131
122
|
end
|
132
123
|
|
133
124
|
##
|
@@ -142,8 +133,8 @@ class LLM::Provider
|
|
142
133
|
# Other completion parameters to maintain throughout a chat
|
143
134
|
# @raise (see LLM::Provider#complete)
|
144
135
|
# @return [LLM::Chat]
|
145
|
-
def respond!(prompt, role = :user, model:
|
146
|
-
LLM::Chat.new(self, params).respond(prompt, role)
|
136
|
+
def respond!(prompt, role = :user, model: default_model, **params)
|
137
|
+
LLM::Chat.new(self, **params.merge(model:)).respond(prompt, role)
|
147
138
|
end
|
148
139
|
|
149
140
|
##
|
@@ -178,6 +169,13 @@ class LLM::Provider
|
|
178
169
|
raise NotImplementedError
|
179
170
|
end
|
180
171
|
|
172
|
+
##
|
173
|
+
# @return [LLM::OpenAI::Models]
|
174
|
+
# Returns an interface to the models API
|
175
|
+
def models
|
176
|
+
raise NotImplementedError
|
177
|
+
end
|
178
|
+
|
181
179
|
##
|
182
180
|
# @return [String]
|
183
181
|
# Returns the role of the assistant in the conversation.
|
@@ -187,9 +185,9 @@ class LLM::Provider
|
|
187
185
|
end
|
188
186
|
|
189
187
|
##
|
190
|
-
# @return [
|
191
|
-
# Returns
|
192
|
-
def
|
188
|
+
# @return [String]
|
189
|
+
# Returns the default model for chat completions
|
190
|
+
def default_model
|
193
191
|
raise NotImplementedError
|
194
192
|
end
|
195
193
|
|
@@ -248,15 +246,13 @@ class LLM::Provider
|
|
248
246
|
end
|
249
247
|
|
250
248
|
##
|
251
|
-
# @param [
|
252
|
-
# The
|
253
|
-
# @
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
yaml = File.join(sharedir, "models", "#{provider}.yml")
|
260
|
-
YAML.safe_load_file(yaml).transform_values { LLM::Model.new(_1) }
|
249
|
+
# @param [Net::HTTPRequest] req
|
250
|
+
# The request to set the body stream for
|
251
|
+
# @param [IO] io
|
252
|
+
# The IO object to set as the body stream
|
253
|
+
# @return [void]
|
254
|
+
def set_body_stream(req, io)
|
255
|
+
req.body_stream = io
|
256
|
+
req["transfer-encoding"] = "chunked" unless req["content-length"]
|
261
257
|
end
|
262
258
|
end
|
@@ -26,13 +26,26 @@ class LLM::Anthropic
|
|
26
26
|
# @return [String, Hash]
|
27
27
|
# The formatted content
|
28
28
|
def format_content(content)
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
}]
|
29
|
+
case content
|
30
|
+
when Array
|
31
|
+
content.flat_map { format_content(_1) }
|
32
|
+
when URI
|
33
|
+
[{type: :image, source: {type: "url", url: content.to_s}}]
|
34
|
+
when LLM::File
|
35
|
+
if content.image?
|
36
|
+
[{type: :image, source: {type: "base64", media_type: content.mime_type, data: content.to_b64}}]
|
37
|
+
else
|
38
|
+
raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
|
39
|
+
"is not an image, and therefore not supported by the " \
|
40
|
+
"Anthropic API"
|
41
|
+
end
|
42
|
+
when String
|
43
|
+
[{type: :text, text: content}]
|
44
|
+
when LLM::Message
|
45
|
+
format_content(content.content)
|
34
46
|
else
|
35
|
-
content
|
47
|
+
raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
|
48
|
+
"is not supported by the Anthropic API"
|
36
49
|
end
|
37
50
|
end
|
38
51
|
end
|
@@ -0,0 +1,62 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class LLM::Anthropic
|
4
|
+
##
|
5
|
+
# The {LLM::Anthropic::Models LLM::Anthropic::Models} class provides a model
|
6
|
+
# object for interacting with [Anthropic's models API](https://platform.anthropic.com/docs/api-reference/models/list).
|
7
|
+
# The models API allows a client to query Anthropic for a list of models
|
8
|
+
# that are available for use with the Anthropic API.
|
9
|
+
#
|
10
|
+
# @example
|
11
|
+
# #!/usr/bin/env ruby
|
12
|
+
# require "llm"
|
13
|
+
#
|
14
|
+
# llm = LLM.anthropic(ENV["KEY"])
|
15
|
+
# res = llm.models.all
|
16
|
+
# res.each do |model|
|
17
|
+
# print "id: ", model.id, "\n"
|
18
|
+
# end
|
19
|
+
class Models
|
20
|
+
##
|
21
|
+
# Returns a new Models object
|
22
|
+
# @param provider [LLM::Provider]
|
23
|
+
# @return [LLM::Anthropic::Files]
|
24
|
+
def initialize(provider)
|
25
|
+
@provider = provider
|
26
|
+
end
|
27
|
+
|
28
|
+
##
|
29
|
+
# List all models
|
30
|
+
# @example
|
31
|
+
# llm = LLM.anthropic(ENV["KEY"])
|
32
|
+
# res = llm.models.all
|
33
|
+
# res.each do |model|
|
34
|
+
# print "id: ", model.id, "\n"
|
35
|
+
# end
|
36
|
+
# @see https://docs.anthropic.com/en/api/models-list Anthropic docs
|
37
|
+
# @param [Hash] params Other parameters (see Anthropic docs)
|
38
|
+
# @raise (see LLM::Provider#request)
|
39
|
+
# @return [LLM::Response::FileList]
|
40
|
+
def all(**params)
|
41
|
+
query = URI.encode_www_form(params)
|
42
|
+
req = Net::HTTP::Get.new("/v1/models?#{query}", headers)
|
43
|
+
res = request(http, req)
|
44
|
+
LLM::Response::ModelList.new(res).tap { |modellist|
|
45
|
+
models = modellist.body["data"].map do |model|
|
46
|
+
LLM::Model.from_hash(model).tap { _1.provider = @provider }
|
47
|
+
end
|
48
|
+
modellist.models = models
|
49
|
+
}
|
50
|
+
end
|
51
|
+
|
52
|
+
private
|
53
|
+
|
54
|
+
def http
|
55
|
+
@provider.instance_variable_get(:@http)
|
56
|
+
end
|
57
|
+
|
58
|
+
[:headers, :request].each do |m|
|
59
|
+
define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
@@ -8,6 +8,7 @@ module LLM
|
|
8
8
|
require_relative "anthropic/error_handler"
|
9
9
|
require_relative "anthropic/response_parser"
|
10
10
|
require_relative "anthropic/format"
|
11
|
+
require_relative "anthropic/models"
|
11
12
|
include Format
|
12
13
|
|
13
14
|
HOST = "api.anthropic.com"
|
@@ -45,16 +46,28 @@ module LLM
|
|
45
46
|
# @param params (see LLM::Provider#complete)
|
46
47
|
# @example (see LLM::Provider#complete)
|
47
48
|
# @raise (see LLM::Provider#request)
|
49
|
+
# @raise [LLM::Error::PromptError]
|
50
|
+
# When given an object a provider does not understand
|
48
51
|
# @return (see LLM::Provider#complete)
|
49
|
-
def complete(prompt, role = :user, model:
|
50
|
-
params
|
51
|
-
req
|
52
|
+
def complete(prompt, role = :user, model: default_model, max_tokens: 1024, **params)
|
53
|
+
params = {max_tokens:, model:}.merge!(params)
|
54
|
+
req = Net::HTTP::Post.new("/v1/messages", headers)
|
52
55
|
messages = [*(params.delete(:messages) || []), Message.new(role, prompt)]
|
53
|
-
|
54
|
-
|
56
|
+
body = JSON.dump({messages: format(messages)}.merge!(params))
|
57
|
+
set_body_stream(req, StringIO.new(body))
|
58
|
+
|
59
|
+
res = request(@http, req)
|
55
60
|
Response::Completion.new(res).extend(response_parser)
|
56
61
|
end
|
57
62
|
|
63
|
+
##
|
64
|
+
# Provides an interface to Anthropic's models API
|
65
|
+
# @see https://docs.anthropic.com/en/api/models-list
|
66
|
+
# @return [LLM::Anthropic::Models]
|
67
|
+
def models
|
68
|
+
LLM::Anthropic::Models.new(self)
|
69
|
+
end
|
70
|
+
|
58
71
|
##
|
59
72
|
# @return (see LLM::Provider#assistant_role)
|
60
73
|
def assistant_role
|
@@ -62,9 +75,11 @@ module LLM
|
|
62
75
|
end
|
63
76
|
|
64
77
|
##
|
65
|
-
#
|
66
|
-
|
67
|
-
|
78
|
+
# Returns the default model for chat completions
|
79
|
+
# @see https://docs.anthropic.com/en/docs/about-claude/models/all-models#model-comparison-table claude-3-5-sonnet-20240620
|
80
|
+
# @return [String]
|
81
|
+
def default_model
|
82
|
+
"claude-3-5-sonnet-20240620"
|
68
83
|
end
|
69
84
|
|
70
85
|
private
|
@@ -83,7 +83,7 @@ class LLM::Gemini
|
|
83
83
|
req["X-Goog-Upload-Offset"] = 0
|
84
84
|
req["X-Goog-Upload-Command"] = "upload, finalize"
|
85
85
|
file.with_io do |io|
|
86
|
-
req
|
86
|
+
set_body_stream(req, io)
|
87
87
|
res = request(http, req)
|
88
88
|
LLM::Response::File.new(res)
|
89
89
|
end
|
@@ -155,7 +155,7 @@ class LLM::Gemini
|
|
155
155
|
@provider.instance_variable_get(:@secret)
|
156
156
|
end
|
157
157
|
|
158
|
-
[:headers, :request].each do |m|
|
158
|
+
[:headers, :request, :set_body_stream].each do |m|
|
159
159
|
define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
|
160
160
|
end
|
161
161
|
end
|
@@ -35,8 +35,13 @@ class LLM::Gemini
|
|
35
35
|
when LLM::File
|
36
36
|
file = content
|
37
37
|
{inline_data: {mime_type: file.mime_type, data: file.to_b64}}
|
38
|
-
|
38
|
+
when String
|
39
39
|
{text: content}
|
40
|
+
when LLM::Message
|
41
|
+
format_content(content.content)
|
42
|
+
else
|
43
|
+
raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
|
44
|
+
"is not supported by the Gemini API"
|
40
45
|
end
|
41
46
|
end
|
42
47
|
end
|
@@ -13,7 +13,7 @@ class LLM::Gemini
|
|
13
13
|
#
|
14
14
|
# llm = LLM.gemini(ENV["KEY"])
|
15
15
|
# res = llm.images.create prompt: "A dog on a rocket to the moon"
|
16
|
-
#
|
16
|
+
# IO.copy_stream res.images[0], "rocket.png"
|
17
17
|
class Images
|
18
18
|
include Format
|
19
19
|
|
@@ -30,7 +30,7 @@ class LLM::Gemini
|
|
30
30
|
# @example
|
31
31
|
# llm = LLM.gemini(ENV["KEY"])
|
32
32
|
# res = llm.images.create prompt: "A dog on a rocket to the moon"
|
33
|
-
#
|
33
|
+
# IO.copy_stream res.images[0], "rocket.png"
|
34
34
|
# @see https://ai.google.dev/gemini-api/docs/image-generation Gemini docs
|
35
35
|
# @param [String] prompt The prompt
|
36
36
|
# @param [Hash] params Other parameters (see Gemini docs)
|
@@ -56,7 +56,7 @@ class LLM::Gemini
|
|
56
56
|
# @example
|
57
57
|
# llm = LLM.gemini(ENV["KEY"])
|
58
58
|
# res = llm.images.edit image: LLM::File("cat.png"), prompt: "Add a hat to the cat"
|
59
|
-
#
|
59
|
+
# IO.copy_stream res.images[0], "hatoncat.png"
|
60
60
|
# @see https://ai.google.dev/gemini-api/docs/image-generation Gemini docs
|
61
61
|
# @param [LLM::File] image The image to edit
|
62
62
|
# @param [String] prompt The prompt
|
@@ -70,7 +70,7 @@ class LLM::Gemini
|
|
70
70
|
contents: [{parts: [{text: prompt}, format_content(image)]}],
|
71
71
|
generationConfig: {responseModalities: ["TEXT", "IMAGE"]}
|
72
72
|
}.merge!(params)).b
|
73
|
-
req
|
73
|
+
set_body_stream(req, StringIO.new(body))
|
74
74
|
res = request(http, req)
|
75
75
|
LLM::Response::Image.new(res).extend(response_parser)
|
76
76
|
end
|
@@ -92,7 +92,7 @@ class LLM::Gemini
|
|
92
92
|
@provider.instance_variable_get(:@http)
|
93
93
|
end
|
94
94
|
|
95
|
-
[:response_parser, :headers, :request].each do |m|
|
95
|
+
[:response_parser, :headers, :request, :set_body_stream].each do |m|
|
96
96
|
define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
|
97
97
|
end
|
98
98
|
end
|