llm.rb 0.11.0 → 0.12.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/LICENSE +0 -0
- data/README.md +51 -14
- data/lib/llm/bot/builder.rb +0 -0
- data/lib/llm/bot/conversable.rb +0 -0
- data/lib/llm/bot/prompt/completion.rb +0 -0
- data/lib/llm/bot/prompt/respond.rb +0 -0
- data/lib/llm/bot.rb +10 -17
- data/lib/llm/buffer.rb +7 -0
- data/lib/llm/error.rb +0 -0
- data/lib/llm/{event_handler.rb → eventhandler.rb} +0 -0
- data/lib/llm/eventstream/event.rb +0 -0
- data/lib/llm/eventstream/parser.rb +0 -0
- data/lib/llm/eventstream.rb +0 -0
- data/lib/llm/file.rb +4 -3
- data/lib/llm/function.rb +2 -2
- data/lib/llm/json/schema/array.rb +0 -0
- data/lib/llm/json/schema/boolean.rb +0 -0
- data/lib/llm/json/schema/integer.rb +0 -0
- data/lib/llm/json/schema/leaf.rb +0 -0
- data/lib/llm/json/schema/null.rb +0 -0
- data/lib/llm/json/schema/number.rb +0 -0
- data/lib/llm/json/schema/object.rb +0 -0
- data/lib/llm/json/schema/string.rb +0 -0
- data/lib/llm/json/schema/version.rb +0 -0
- data/lib/llm/json/schema.rb +0 -0
- data/lib/llm/message.rb +0 -0
- data/lib/llm/mime.rb +0 -0
- data/lib/llm/multipart.rb +0 -1
- data/lib/llm/object/builder.rb +0 -0
- data/lib/llm/object/kernel.rb +0 -0
- data/lib/llm/object.rb +2 -3
- data/lib/llm/provider.rb +1 -1
- data/lib/llm/providers/anthropic/error_handler.rb +0 -0
- data/lib/llm/providers/anthropic/format/completion_format.rb +0 -0
- data/lib/llm/providers/anthropic/format.rb +0 -0
- data/lib/llm/providers/anthropic/models.rb +2 -2
- data/lib/llm/providers/anthropic/response/completion.rb +0 -0
- data/lib/llm/providers/anthropic/stream_parser.rb +0 -0
- data/lib/llm/providers/anthropic.rb +10 -1
- data/lib/llm/providers/deepseek/format/completion_format.rb +0 -0
- data/lib/llm/providers/deepseek/format.rb +0 -0
- data/lib/llm/providers/deepseek.rb +10 -1
- data/lib/llm/providers/gemini/audio.rb +3 -3
- data/lib/llm/providers/gemini/error_handler.rb +0 -0
- data/lib/llm/providers/gemini/files.rb +8 -20
- data/lib/llm/providers/gemini/format/completion_format.rb +2 -2
- data/lib/llm/providers/gemini/format.rb +0 -0
- data/lib/llm/providers/gemini/images.rb +4 -4
- data/lib/llm/providers/gemini/models.rb +2 -2
- data/lib/llm/providers/gemini/response/completion.rb +0 -0
- data/lib/llm/providers/gemini/response/embedding.rb +1 -1
- data/lib/llm/providers/gemini/response/file.rb +0 -0
- data/lib/llm/providers/gemini/response/image.rb +0 -0
- data/lib/llm/providers/gemini/stream_parser.rb +0 -0
- data/lib/llm/providers/gemini.rb +13 -21
- data/lib/llm/providers/llamacpp.rb +12 -1
- data/lib/llm/providers/ollama/error_handler.rb +0 -0
- data/lib/llm/providers/ollama/format/completion_format.rb +0 -0
- data/lib/llm/providers/ollama/format.rb +0 -0
- data/lib/llm/providers/ollama/models.rb +0 -0
- data/lib/llm/providers/ollama/response/completion.rb +0 -0
- data/lib/llm/providers/ollama/response/embedding.rb +1 -2
- data/lib/llm/providers/ollama/stream_parser.rb +0 -0
- data/lib/llm/providers/ollama.rb +8 -11
- data/lib/llm/providers/openai/audio.rb +4 -4
- data/lib/llm/providers/openai/error_handler.rb +0 -0
- data/lib/llm/providers/openai/files.rb +8 -19
- data/lib/llm/providers/openai/format/completion_format.rb +0 -0
- data/lib/llm/providers/openai/format/moderation_format.rb +0 -0
- data/lib/llm/providers/openai/format/respond_format.rb +0 -0
- data/lib/llm/providers/openai/format.rb +0 -0
- data/lib/llm/providers/openai/images.rb +10 -10
- data/lib/llm/providers/openai/models.rb +2 -2
- data/lib/llm/providers/openai/moderations.rb +0 -0
- data/lib/llm/providers/openai/response/audio.rb +0 -0
- data/lib/llm/providers/openai/response/completion.rb +2 -2
- data/lib/llm/providers/openai/response/embedding.rb +3 -3
- data/lib/llm/providers/openai/response/file.rb +0 -0
- data/lib/llm/providers/openai/response/image.rb +0 -0
- data/lib/llm/providers/openai/response/moderations.rb +0 -0
- data/lib/llm/providers/openai/response/responds.rb +0 -1
- data/lib/llm/providers/openai/responses.rb +6 -25
- data/lib/llm/providers/openai/stream_parser.rb +1 -0
- data/lib/llm/providers/openai/vector_stores.rb +85 -3
- data/lib/llm/providers/openai.rb +10 -1
- data/lib/llm/providers/xai/images.rb +58 -0
- data/lib/llm/providers/xai.rb +72 -0
- data/lib/llm/response.rb +5 -0
- data/lib/llm/utils.rb +0 -0
- data/lib/llm/version.rb +1 -1
- data/lib/llm.rb +10 -1
- data/llm.gemspec +4 -4
- metadata +12 -10
data/lib/llm/providers/gemini.rb
CHANGED
@@ -3,30 +3,19 @@
|
|
3
3
|
module LLM
|
4
4
|
##
|
5
5
|
# The Gemini class implements a provider for
|
6
|
-
# [Gemini](https://ai.google.dev/).
|
6
|
+
# [Gemini](https://ai.google.dev/). The Gemini provider
|
7
|
+
# can accept multiple inputs (text, images, audio, and video).
|
8
|
+
# The inputs can be provided inline via the prompt for files
|
9
|
+
# under 20MB or via the Gemini Files API for files
|
10
|
+
# that are over 20MB.
|
7
11
|
#
|
8
|
-
#
|
9
|
-
# audio, and video). The inputs can be provided inline via the
|
10
|
-
# prompt for files under 20MB or via the Gemini Files API for
|
11
|
-
# files that are over 20MB
|
12
|
-
#
|
13
|
-
# @example example #1
|
14
|
-
# #!/usr/bin/env ruby
|
15
|
-
# require "llm"
|
16
|
-
#
|
17
|
-
# llm = LLM.gemini(ENV["KEY"])
|
18
|
-
# bot = LLM::Bot.new(llm)
|
19
|
-
# bot.chat LLM.File("/images/capybara.png")
|
20
|
-
# bot.chat "Describe the image"
|
21
|
-
# bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
|
22
|
-
#
|
23
|
-
# @example example #2
|
12
|
+
# @example
|
24
13
|
# #!/usr/bin/env ruby
|
25
14
|
# require "llm"
|
26
15
|
#
|
27
|
-
# llm = LLM.gemini(ENV["KEY"])
|
16
|
+
# llm = LLM.gemini(key: ENV["KEY"])
|
28
17
|
# bot = LLM::Bot.new(llm)
|
29
|
-
# bot.chat ["
|
18
|
+
# bot.chat ["Tell me about this photo", File.open("/images/horse.jpg", "rb")]
|
30
19
|
# bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
|
31
20
|
class Gemini < Provider
|
32
21
|
require_relative "gemini/response/embedding"
|
@@ -55,7 +44,7 @@ module LLM
|
|
55
44
|
# @param model (see LLM::Provider#embed)
|
56
45
|
# @param params (see LLM::Provider#embed)
|
57
46
|
# @raise (see LLM::Provider#request)
|
58
|
-
# @return
|
47
|
+
# @return [LLM::Response]
|
59
48
|
def embed(input, model: "text-embedding-004", **params)
|
60
49
|
model = model.respond_to?(:id) ? model.id : model
|
61
50
|
path = ["/v1beta/models/#{model}", "embedContent?key=#{@key}"].join(":")
|
@@ -74,7 +63,7 @@ module LLM
|
|
74
63
|
# @raise (see LLM::Provider#request)
|
75
64
|
# @raise [LLM::PromptError]
|
76
65
|
# When given an object a provider does not understand
|
77
|
-
# @return
|
66
|
+
# @return [LLM::Response]
|
78
67
|
def complete(prompt, params = {})
|
79
68
|
params = {role: :user, model: default_model}.merge!(params)
|
80
69
|
params = [params, format_schema(params), format_tools(params)].inject({}, &:merge!).compact
|
@@ -93,6 +82,7 @@ module LLM
|
|
93
82
|
##
|
94
83
|
# Provides an interface to Gemini's audio API
|
95
84
|
# @see https://ai.google.dev/gemini-api/docs/audio Gemini docs
|
85
|
+
# @return [LLM::Gemini::Audio]
|
96
86
|
def audio
|
97
87
|
LLM::Gemini::Audio.new(self)
|
98
88
|
end
|
@@ -108,6 +98,7 @@ module LLM
|
|
108
98
|
##
|
109
99
|
# Provides an interface to Gemini's file management API
|
110
100
|
# @see https://ai.google.dev/gemini-api/docs/files Gemini docs
|
101
|
+
# @return [LLM::Gemini::Files]
|
111
102
|
def files
|
112
103
|
LLM::Gemini::Files.new(self)
|
113
104
|
end
|
@@ -115,6 +106,7 @@ module LLM
|
|
115
106
|
##
|
116
107
|
# Provides an interface to Gemini's models API
|
117
108
|
# @see https://ai.google.dev/gemini-api/docs/models Gemini docs
|
109
|
+
# @return [LLM::Gemini::Models]
|
118
110
|
def models
|
119
111
|
LLM::Gemini::Models.new(self)
|
120
112
|
end
|
@@ -7,7 +7,18 @@ module LLM
|
|
7
7
|
# The LlamaCpp class implements a provider for
|
8
8
|
# [llama.cpp](https://github.com/ggml-org/llama.cpp)
|
9
9
|
# through the OpenAI-compatible API provided by the
|
10
|
-
# llama-server binary.
|
10
|
+
# llama-server binary. Similar to the ollama provider,
|
11
|
+
# this provider supports a wide range of models and
|
12
|
+
# is straightforward to run on your own hardware.
|
13
|
+
#
|
14
|
+
# @example
|
15
|
+
# #!/usr/bin/env ruby
|
16
|
+
# require "llm"
|
17
|
+
#
|
18
|
+
# llm = LLM.llamacpp(key: nil)
|
19
|
+
# bot = LLM::Bot.new(llm)
|
20
|
+
# bot.chat ["Tell me about this photo", File.open("/images/frog.jpg", "rb")]
|
21
|
+
# bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
|
11
22
|
class LlamaCpp < OpenAI
|
12
23
|
##
|
13
24
|
# @param (see LLM::Provider#initialize)
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
data/lib/llm/providers/ollama.rb
CHANGED
@@ -2,21 +2,18 @@
|
|
2
2
|
|
3
3
|
module LLM
|
4
4
|
##
|
5
|
-
# The Ollama class implements a provider for [Ollama](https://ollama.ai/)
|
6
|
-
#
|
7
|
-
#
|
8
|
-
#
|
9
|
-
# models that can process images and text. See the example for a demonstration
|
10
|
-
# of a multi-modal model by the name `llava`
|
5
|
+
# The Ollama class implements a provider for [Ollama](https://ollama.ai/) –
|
6
|
+
# and the provider supports a wide range of models. It is straight forward
|
7
|
+
# to run on your own hardware, and there are a number of multi-modal models
|
8
|
+
# that can process both images and text.
|
11
9
|
#
|
12
10
|
# @example
|
13
11
|
# #!/usr/bin/env ruby
|
14
12
|
# require "llm"
|
15
13
|
#
|
16
|
-
# llm = LLM.ollama(nil)
|
14
|
+
# llm = LLM.ollama(key: nil)
|
17
15
|
# bot = LLM::Bot.new(llm, model: "llava")
|
18
|
-
# bot.chat
|
19
|
-
# bot.chat "Describe the image"
|
16
|
+
# bot.chat ["Tell me about this image", File.open("/images/parrot.png", "rb")]
|
20
17
|
# bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
|
21
18
|
class Ollama < Provider
|
22
19
|
require_relative "ollama/response/embedding"
|
@@ -42,7 +39,7 @@ module LLM
|
|
42
39
|
# @param model (see LLM::Provider#embed)
|
43
40
|
# @param params (see LLM::Provider#embed)
|
44
41
|
# @raise (see LLM::Provider#request)
|
45
|
-
# @return
|
42
|
+
# @return [LLM::Response]
|
46
43
|
def embed(input, model: default_model, **params)
|
47
44
|
params = {model:}.merge!(params)
|
48
45
|
req = Net::HTTP::Post.new("/v1/embeddings", headers)
|
@@ -60,7 +57,7 @@ module LLM
|
|
60
57
|
# @raise (see LLM::Provider#request)
|
61
58
|
# @raise [LLM::PromptError]
|
62
59
|
# When given an object a provider does not understand
|
63
|
-
# @return
|
60
|
+
# @return [LLM::Response]
|
64
61
|
def complete(prompt, params = {})
|
65
62
|
params = {role: :user, model: default_model, stream: true}.merge!(params)
|
66
63
|
params = [params, {format: params[:schema]}, format_tools(params)].inject({}, &:merge!).compact
|
@@ -5,7 +5,7 @@ class LLM::OpenAI
|
|
5
5
|
# The {LLM::OpenAI::Audio LLM::OpenAI::Audio} class provides an audio
|
6
6
|
# object for interacting with [OpenAI's audio API](https://platform.openai.com/docs/api-reference/audio/createSpeech).
|
7
7
|
# @example
|
8
|
-
# llm = LLM.openai(ENV["KEY"])
|
8
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
9
9
|
# res = llm.audio.create_speech(input: "A dog on a rocket to the moon")
|
10
10
|
# IO.copy_stream res.audio, "rocket.mp3"
|
11
11
|
class Audio
|
@@ -20,7 +20,7 @@ class LLM::OpenAI
|
|
20
20
|
##
|
21
21
|
# Create an audio track
|
22
22
|
# @example
|
23
|
-
# llm = LLM.openai(ENV["KEY"])
|
23
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
24
24
|
# res = llm.images.create_speech(input: "A dog on a rocket to the moon")
|
25
25
|
# File.binwrite("rocket.mp3", res.audio.string)
|
26
26
|
# @see https://platform.openai.com/docs/api-reference/audio/createSpeech OpenAI docs
|
@@ -42,7 +42,7 @@ class LLM::OpenAI
|
|
42
42
|
##
|
43
43
|
# Create an audio transcription
|
44
44
|
# @example
|
45
|
-
# llm = LLM.openai(ENV["KEY"])
|
45
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
46
46
|
# res = llm.audio.create_transcription(file: "/audio/rocket.mp3")
|
47
47
|
# res.text # => "A dog on a rocket to the moon"
|
48
48
|
# @see https://platform.openai.com/docs/api-reference/audio/createTranscription OpenAI docs
|
@@ -64,7 +64,7 @@ class LLM::OpenAI
|
|
64
64
|
# Create an audio translation (in English)
|
65
65
|
# @example
|
66
66
|
# # Arabic => English
|
67
|
-
# llm = LLM.openai(ENV["KEY"])
|
67
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
68
68
|
# res = llm.audio.create_translation(file: "/audio/bismillah.mp3")
|
69
69
|
# res.text # => "In the name of Allah, the Beneficent, the Merciful."
|
70
70
|
# @see https://platform.openai.com/docs/api-reference/audio/createTranslation OpenAI docs
|
File without changes
|
@@ -12,21 +12,10 @@ class LLM::OpenAI
|
|
12
12
|
# #!/usr/bin/env ruby
|
13
13
|
# require "llm"
|
14
14
|
#
|
15
|
-
# llm = LLM.openai(ENV["KEY"])
|
15
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
16
16
|
# bot = LLM::Bot.new(llm)
|
17
|
-
# file = llm.files.create file: "/
|
18
|
-
# bot.chat
|
19
|
-
# bot.chat("Describe the document")
|
20
|
-
# bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
|
21
|
-
#
|
22
|
-
# @example example #2
|
23
|
-
# #!/usr/bin/env ruby
|
24
|
-
# require "llm"
|
25
|
-
#
|
26
|
-
# llm = LLM.openai(ENV["KEY"])
|
27
|
-
# bot = LLM::Bot.new(llm)
|
28
|
-
# file = llm.files.create file: "/documents/openbsd.pdf"
|
29
|
-
# bot.chat(["Describe the document I sent to you", file])
|
17
|
+
# file = llm.files.create file: "/books/goodread.pdf"
|
18
|
+
# bot.chat ["Tell me about this PDF", file]
|
30
19
|
# bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
|
31
20
|
class Files
|
32
21
|
require_relative "response/file"
|
@@ -42,7 +31,7 @@ class LLM::OpenAI
|
|
42
31
|
##
|
43
32
|
# List all files
|
44
33
|
# @example
|
45
|
-
# llm = LLM.openai(ENV["KEY"])
|
34
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
46
35
|
# res = llm.files.all
|
47
36
|
# res.each do |file|
|
48
37
|
# print "id: ", file.id, "\n"
|
@@ -61,7 +50,7 @@ class LLM::OpenAI
|
|
61
50
|
##
|
62
51
|
# Create a file
|
63
52
|
# @example
|
64
|
-
# llm = LLM.openai(ENV["KEY"])
|
53
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
65
54
|
# res = llm.files.create file: "/documents/haiku.txt"
|
66
55
|
# @see https://platform.openai.com/docs/api-reference/files/create OpenAI docs
|
67
56
|
# @param [File, LLM::File, String] file The file
|
@@ -81,7 +70,7 @@ class LLM::OpenAI
|
|
81
70
|
##
|
82
71
|
# Get a file
|
83
72
|
# @example
|
84
|
-
# llm = LLM.openai(ENV["KEY"])
|
73
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
85
74
|
# res = llm.files.get(file: "file-1234567890")
|
86
75
|
# print "id: ", res.id, "\n"
|
87
76
|
# @see https://platform.openai.com/docs/api-reference/files/get OpenAI docs
|
@@ -100,7 +89,7 @@ class LLM::OpenAI
|
|
100
89
|
##
|
101
90
|
# Download the content of a file
|
102
91
|
# @example
|
103
|
-
# llm = LLM.openai(ENV["KEY"])
|
92
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
104
93
|
# res = llm.files.download(file: "file-1234567890")
|
105
94
|
# File.binwrite "haiku1.txt", res.file.read
|
106
95
|
# print res.file.read, "\n"
|
@@ -121,7 +110,7 @@ class LLM::OpenAI
|
|
121
110
|
##
|
122
111
|
# Delete a file
|
123
112
|
# @example
|
124
|
-
# llm = LLM.openai(ENV["KEY"])
|
113
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
125
114
|
# res = llm.files.delete(file: "file-1234567890")
|
126
115
|
# print res.deleted, "\n"
|
127
116
|
# @see https://platform.openai.com/docs/api-reference/files/delete OpenAI docs
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
@@ -2,27 +2,27 @@
|
|
2
2
|
|
3
3
|
class LLM::OpenAI
|
4
4
|
##
|
5
|
-
# The {LLM::OpenAI::Images LLM::OpenAI::Images} class provides an
|
6
|
-
#
|
5
|
+
# The {LLM::OpenAI::Images LLM::OpenAI::Images} class provides an interface
|
6
|
+
# for [OpenAI's images API](https://platform.openai.com/docs/api-reference/images).
|
7
7
|
# OpenAI supports multiple response formats: temporary URLs, or binary strings
|
8
8
|
# encoded in base64. The default is to return temporary URLs.
|
9
9
|
#
|
10
|
-
# @example
|
10
|
+
# @example Temporary URLs
|
11
11
|
# #!/usr/bin/env ruby
|
12
12
|
# require "llm"
|
13
13
|
# require "open-uri"
|
14
14
|
# require "fileutils"
|
15
15
|
#
|
16
|
-
# llm = LLM.openai(ENV["KEY"])
|
16
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
17
17
|
# res = llm.images.create prompt: "A dog on a rocket to the moon"
|
18
18
|
# FileUtils.mv OpenURI.open_uri(res.urls[0]).path,
|
19
19
|
# "rocket.png"
|
20
20
|
#
|
21
|
-
# @example
|
21
|
+
# @example Binary strings
|
22
22
|
# #!/usr/bin/env ruby
|
23
23
|
# require "llm"
|
24
24
|
#
|
25
|
-
# llm = LLM.openai(ENV["KEY"])
|
25
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
26
26
|
# res = llm.images.create prompt: "A dog on a rocket to the moon",
|
27
27
|
# response_format: "b64_json"
|
28
28
|
# IO.copy_stream res.images[0], "rocket.png"
|
@@ -39,9 +39,9 @@ class LLM::OpenAI
|
|
39
39
|
##
|
40
40
|
# Create an image
|
41
41
|
# @example
|
42
|
-
# llm = LLM.openai(ENV["KEY"])
|
42
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
43
43
|
# res = llm.images.create prompt: "A dog on a rocket to the moon"
|
44
|
-
#
|
44
|
+
# res.urls.each { print _1, "\n" }
|
45
45
|
# @see https://platform.openai.com/docs/api-reference/images/create OpenAI docs
|
46
46
|
# @param [String] prompt The prompt
|
47
47
|
# @param [String] model The model to use
|
@@ -58,7 +58,7 @@ class LLM::OpenAI
|
|
58
58
|
##
|
59
59
|
# Create image variations
|
60
60
|
# @example
|
61
|
-
# llm = LLM.openai(ENV["KEY"])
|
61
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
62
62
|
# res = llm.images.create_variation(image: "/images/hat.png", n: 5)
|
63
63
|
# p res.urls
|
64
64
|
# @see https://platform.openai.com/docs/api-reference/images/createVariation OpenAI docs
|
@@ -80,7 +80,7 @@ class LLM::OpenAI
|
|
80
80
|
##
|
81
81
|
# Edit an image
|
82
82
|
# @example
|
83
|
-
# llm = LLM.openai(ENV["KEY"])
|
83
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
84
84
|
# res = llm.images.edit(image: "/images/hat.png", prompt: "A cat wearing this hat")
|
85
85
|
# p res.urls
|
86
86
|
# @see https://platform.openai.com/docs/api-reference/images/createEdit OpenAI docs
|
@@ -11,7 +11,7 @@ class LLM::OpenAI
|
|
11
11
|
# #!/usr/bin/env ruby
|
12
12
|
# require "llm"
|
13
13
|
#
|
14
|
-
# llm = LLM.openai(ENV["KEY"])
|
14
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
15
15
|
# res = llm.models.all
|
16
16
|
# res.each do |model|
|
17
17
|
# print "id: ", model.id, "\n"
|
@@ -28,7 +28,7 @@ class LLM::OpenAI
|
|
28
28
|
##
|
29
29
|
# List all models
|
30
30
|
# @example
|
31
|
-
# llm = LLM.openai(ENV["KEY"])
|
31
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
32
32
|
# res = llm.models.all
|
33
33
|
# res.each do |model|
|
34
34
|
# print "id: ", model.id, "\n"
|
File without changes
|
File without changes
|
@@ -16,7 +16,7 @@ module LLM::OpenAI::Response
|
|
16
16
|
end
|
17
17
|
end
|
18
18
|
alias_method :messages, :choices
|
19
|
-
|
19
|
+
|
20
20
|
def model = body.model
|
21
21
|
def prompt_tokens = body.usage&.prompt_tokens
|
22
22
|
def completion_tokens = body.usage&.completion_tokens
|
@@ -36,4 +36,4 @@ module LLM::OpenAI::Response
|
|
36
36
|
end
|
37
37
|
end
|
38
38
|
end
|
39
|
-
end
|
39
|
+
end
|
@@ -1,9 +1,9 @@
|
|
1
|
-
|
1
|
+
# frozen_string_literal: true
|
2
2
|
|
3
|
-
|
3
|
+
module LLM::OpenAI::Response
|
4
4
|
module Embedding
|
5
5
|
def embeddings = data.map { _1["embedding"] }
|
6
6
|
def prompt_tokens = data.dig(0, "usage", "prompt_tokens")
|
7
7
|
def total_tokens = data.dig(0, "usage", "total_tokens")
|
8
8
|
end
|
9
|
-
end
|
9
|
+
end
|
File without changes
|
File without changes
|
File without changes
|
@@ -2,36 +2,17 @@
|
|
2
2
|
|
3
3
|
class LLM::OpenAI
|
4
4
|
##
|
5
|
-
# The {LLM::OpenAI::Responses LLM::OpenAI::Responses} class provides
|
6
|
-
#
|
7
|
-
# The responses API is similar to the chat completions API but it can maintain
|
8
|
-
# conversation state across multiple requests. This is useful when you want to
|
9
|
-
# save bandwidth and/or not maintain the message thread by yourself.
|
5
|
+
# The {LLM::OpenAI::Responses LLM::OpenAI::Responses} class provides
|
6
|
+
# an interface for [OpenAI's response API](https://platform.openai.com/docs/guides/conversation-state?api-mode=responses).
|
10
7
|
#
|
11
8
|
# @example example #1
|
12
9
|
# #!/usr/bin/env ruby
|
13
10
|
# require "llm"
|
14
11
|
#
|
15
|
-
# llm = LLM.openai(ENV["KEY"])
|
16
|
-
# res1 = llm.responses.create "Your task is to
|
17
|
-
# res2 = llm.responses.create "5 + 5
|
18
|
-
# [res1,res2].each { llm.responses.delete(_1) }
|
19
|
-
#
|
20
|
-
# @example example #2
|
21
|
-
# #!/usr/bin/env ruby
|
22
|
-
# require "llm"
|
23
|
-
#
|
24
|
-
# llm = LLM.openai(ENV["KEY"])
|
25
|
-
# file = llm.files.create file: "/images/hat.png"
|
26
|
-
# res = llm.responses.create ["Describe the image", file]
|
27
|
-
#
|
28
|
-
# @example example #3
|
29
|
-
# #!/usr/bin/env ruby
|
30
|
-
# require "llm"
|
31
|
-
#
|
32
|
-
# llm = LLM.openai(ENV["KEY"])
|
33
|
-
# file = llm.files.create file: "/documents/freebsd.pdf"
|
34
|
-
# res = llm.responses.create ["Describe the document, file]
|
12
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
13
|
+
# res1 = llm.responses.create "Your task is to answer the user's questions", role: :developer
|
14
|
+
# res2 = llm.responses.create "5 + 5 = X ?", role: :user, previous_response_id: res1.id
|
15
|
+
# [res1, res2].each { llm.responses.delete(_1) }
|
35
16
|
class Responses
|
36
17
|
require_relative "response/responds"
|
37
18
|
include Format
|
@@ -2,9 +2,8 @@
|
|
2
2
|
|
3
3
|
class LLM::OpenAI
|
4
4
|
##
|
5
|
-
# The {LLM::OpenAI::
|
6
|
-
# an interface
|
7
|
-
# @see https://platform.openai.com/docs/api-reference/vector_stores/create OpenAI docs
|
5
|
+
# The {LLM::OpenAI::VectorStores LLM::OpenAI::VectorStores} class provides
|
6
|
+
# an interface for [OpenAI's vector stores API](https://platform.openai.com/docs/api-reference/vector_stores/create)
|
8
7
|
class VectorStores
|
9
8
|
##
|
10
9
|
# @param [LLM::Provider] provider
|
@@ -97,6 +96,89 @@ class LLM::OpenAI
|
|
97
96
|
LLM::Response.new(res)
|
98
97
|
end
|
99
98
|
|
99
|
+
##
|
100
|
+
# List all files in a vector store
|
101
|
+
# @param [String, #id] vector The ID of the vector store
|
102
|
+
# @param [Hash] params Other parameters (see OpenAI docs)
|
103
|
+
# @raise (see LLM::Provider#request)
|
104
|
+
# @return [LLM::Response]
|
105
|
+
# @see https://platform.openai.com/docs/api-reference/vector_stores_files/listFiles OpenAI docs
|
106
|
+
def all_files(vector:, **params)
|
107
|
+
vector_id = vector.respond_to?(:id) ? vector.id : vector
|
108
|
+
query = URI.encode_www_form(params)
|
109
|
+
req = Net::HTTP::Get.new("/v1/vector_stores/#{vector_id}/files?#{query}", headers)
|
110
|
+
res = execute(request: req)
|
111
|
+
LLM::Response.new(res)
|
112
|
+
end
|
113
|
+
|
114
|
+
##
|
115
|
+
# Add a file to a vector store
|
116
|
+
# @param [String, #id] vector The ID of the vector store
|
117
|
+
# @param [String, #id] file The ID of the file to add
|
118
|
+
# @param [Hash] attributes Attributes to associate with the file (optional)
|
119
|
+
# @param [Hash] params Other parameters (see OpenAI docs)
|
120
|
+
# @raise (see LLM::Provider#request)
|
121
|
+
# @return [LLM::Response]
|
122
|
+
# @see https://platform.openai.com/docs/api-reference/vector_stores_files/createFile OpenAI docs
|
123
|
+
def add_file(vector:, file:, attributes: nil, **params)
|
124
|
+
vector_id = vector.respond_to?(:id) ? vector.id : vector
|
125
|
+
file_id = file.respond_to?(:id) ? file.id : file
|
126
|
+
req = Net::HTTP::Post.new("/v1/vector_stores/#{vector_id}/files", headers)
|
127
|
+
req.body = JSON.dump(params.merge({file_id:, attributes:}).compact)
|
128
|
+
res = execute(request: req)
|
129
|
+
LLM::Response.new(res)
|
130
|
+
end
|
131
|
+
alias_method :create_file, :add_file
|
132
|
+
|
133
|
+
##
|
134
|
+
# Update a file in a vector store
|
135
|
+
# @param [String, #id] vector The ID of the vector store
|
136
|
+
# @param [String, #id] file The ID of the file to update
|
137
|
+
# @param [Hash] attributes Attributes to associate with the file
|
138
|
+
# @param [Hash] params Other parameters (see OpenAI docs)
|
139
|
+
# @raise (see LLM::Provider#request)
|
140
|
+
# @return [LLM::Response]
|
141
|
+
# @see https://platform.openai.com/docs/api-reference/vector_stores_files/updateAttributes OpenAI docs
|
142
|
+
def update_file(vector:, file:, attributes:, **params)
|
143
|
+
vector_id = vector.respond_to?(:id) ? vector.id : vector
|
144
|
+
file_id = file.respond_to?(:id) ? file.id : file
|
145
|
+
req = Net::HTTP::Post.new("/v1/vector_stores/#{vector_id}/files/#{file_id}", headers)
|
146
|
+
req.body = JSON.dump(params.merge({attributes:}).compact)
|
147
|
+
res = execute(request: req)
|
148
|
+
LLM::Response.new(res)
|
149
|
+
end
|
150
|
+
|
151
|
+
##
|
152
|
+
# Get a file from a vector store
|
153
|
+
# @param [String, #id] vector The ID of the vector store
|
154
|
+
# @param [String, #id] file The ID of the file to retrieve
|
155
|
+
# @raise (see LLM::Provider#request)
|
156
|
+
# @return [LLM::Response]
|
157
|
+
# @see https://platform.openai.com/docs/api-reference/vector_stores_files/getFile OpenAI docs
|
158
|
+
def get_file(vector:, file:, **params)
|
159
|
+
vector_id = vector.respond_to?(:id) ? vector.id : vector
|
160
|
+
file_id = file.respond_to?(:id) ? file.id : file
|
161
|
+
query = URI.encode_www_form(params)
|
162
|
+
req = Net::HTTP::Get.new("/v1/vector_stores/#{vector_id}/files/#{file_id}?#{query}", headers)
|
163
|
+
res = execute(request: req)
|
164
|
+
LLM::Response.new(res)
|
165
|
+
end
|
166
|
+
|
167
|
+
##
|
168
|
+
# Delete a file from a vector store
|
169
|
+
# @param [String, #id] vector The ID of the vector store
|
170
|
+
# @param [String, #id] file The ID of the file to delete
|
171
|
+
# @raise (see LLM::Provider#request)
|
172
|
+
# @return [LLM::Response]
|
173
|
+
# @see https://platform.openai.com/docs/api-reference/vector_stores_files/deleteFile OpenAI docs
|
174
|
+
def delete_file(vector:, file:)
|
175
|
+
vector_id = vector.respond_to?(:id) ? vector.id : vector
|
176
|
+
file_id = file.respond_to?(:id) ? file.id : file
|
177
|
+
req = Net::HTTP::Delete.new("/v1/vector_stores/#{vector_id}/files/#{file_id}", headers)
|
178
|
+
res = execute(request: req)
|
179
|
+
LLM::Response.new(res)
|
180
|
+
end
|
181
|
+
|
100
182
|
private
|
101
183
|
|
102
184
|
[:headers, :execute, :set_body_stream].each do |m|
|
data/lib/llm/providers/openai.rb
CHANGED
@@ -3,7 +3,16 @@
|
|
3
3
|
module LLM
|
4
4
|
##
|
5
5
|
# The OpenAI class implements a provider for
|
6
|
-
# [OpenAI](https://platform.openai.com/)
|
6
|
+
# [OpenAI](https://platform.openai.com/).
|
7
|
+
#
|
8
|
+
# @example
|
9
|
+
# #!/usr/bin/env ruby
|
10
|
+
# require "llm"
|
11
|
+
#
|
12
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
13
|
+
# bot = LLM::Bot.new(llm)
|
14
|
+
# bot.chat ["Tell me about this photo", File.open("/images/capybara.jpg", "rb")]
|
15
|
+
# bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
|
7
16
|
class OpenAI < Provider
|
8
17
|
require_relative "openai/response/embedding"
|
9
18
|
require_relative "openai/response/completion"
|
@@ -0,0 +1,58 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class LLM::XAI
|
4
|
+
##
|
5
|
+
# The {LLM::XAI::Images LLM::XAI::Images} class provides an interface
|
6
|
+
# for [xAI's images API](https://docs.x.ai/docs/guides/image-generations).
|
7
|
+
# xAI supports multiple response formats: temporary URLs, or binary strings
|
8
|
+
# encoded in base64. The default is to return temporary URLs.
|
9
|
+
#
|
10
|
+
# @example Temporary URLs
|
11
|
+
# #!/usr/bin/env ruby
|
12
|
+
# require "llm"
|
13
|
+
# require "open-uri"
|
14
|
+
# require "fileutils"
|
15
|
+
#
|
16
|
+
# llm = LLM.xai(key: ENV["KEY"])
|
17
|
+
# res = llm.images.create prompt: "A dog on a rocket to the moon"
|
18
|
+
# FileUtils.mv OpenURI.open_uri(res.urls[0]).path,
|
19
|
+
# "rocket.png"
|
20
|
+
#
|
21
|
+
# @example Binary strings
|
22
|
+
# #!/usr/bin/env ruby
|
23
|
+
# require "llm"
|
24
|
+
#
|
25
|
+
# llm = LLM.xai(key: ENV["KEY"])
|
26
|
+
# res = llm.images.create prompt: "A dog on a rocket to the moon",
|
27
|
+
# response_format: "b64_json"
|
28
|
+
# IO.copy_stream res.images[0], "rocket.png"
|
29
|
+
class Images < LLM::OpenAI::Images
|
30
|
+
##
|
31
|
+
# Create an image
|
32
|
+
# @example
|
33
|
+
# llm = LLM.xai(key: ENV["KEY"])
|
34
|
+
# res = llm.images.create prompt: "A dog on a rocket to the moon"
|
35
|
+
# res.urls.each { print _1, "\n"}
|
36
|
+
# @see https://docs.x.ai/docs/guides/image-generations xAI docs
|
37
|
+
# @param [String] prompt The prompt
|
38
|
+
# @param [String] model The model to use
|
39
|
+
# @param [Hash] params Other parameters (see xAI docs)
|
40
|
+
# @raise (see LLM::Provider#request)
|
41
|
+
# @return [LLM::Response]
|
42
|
+
def create(model: "grok-2-image-1212", **)
|
43
|
+
super
|
44
|
+
end
|
45
|
+
|
46
|
+
##
|
47
|
+
# @raise [NotImplementedError]
|
48
|
+
def edit(model: "grok-2-image-1212", **)
|
49
|
+
raise NotImplementedError
|
50
|
+
end
|
51
|
+
|
52
|
+
##
|
53
|
+
# @raise [NotImplementedError]
|
54
|
+
def create_variation(model: "grok-2-image-1212", **)
|
55
|
+
raise NotImplementedError
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|