llm.rb 0.7.2 → 0.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +93 -63
- data/lib/llm/{chat → bot}/builder.rb +1 -1
- data/lib/llm/bot/conversable.rb +31 -0
- data/lib/llm/{chat → bot}/prompt/completion.rb +14 -4
- data/lib/llm/{chat → bot}/prompt/respond.rb +16 -5
- data/lib/llm/{chat.rb → bot.rb} +48 -66
- data/lib/llm/buffer.rb +2 -2
- data/lib/llm/error.rb +24 -16
- data/lib/llm/event_handler.rb +44 -0
- data/lib/llm/eventstream/event.rb +69 -0
- data/lib/llm/eventstream/parser.rb +88 -0
- data/lib/llm/eventstream.rb +8 -0
- data/lib/llm/function.rb +9 -12
- data/lib/{json → llm/json}/schema/array.rb +1 -1
- data/lib/llm/message.rb +1 -1
- data/lib/llm/model.rb +1 -1
- data/lib/llm/object/builder.rb +38 -0
- data/lib/llm/object/kernel.rb +45 -0
- data/lib/llm/object.rb +77 -0
- data/lib/llm/provider.rb +68 -26
- data/lib/llm/providers/anthropic/error_handler.rb +3 -3
- data/lib/llm/providers/anthropic/models.rb +3 -7
- data/lib/llm/providers/anthropic/response_parser/completion_parser.rb +5 -5
- data/lib/llm/providers/anthropic/response_parser.rb +1 -0
- data/lib/llm/providers/anthropic/stream_parser.rb +66 -0
- data/lib/llm/providers/anthropic.rb +9 -4
- data/lib/llm/providers/deepseek/format/completion_format.rb +68 -0
- data/lib/llm/providers/deepseek/format.rb +28 -0
- data/lib/llm/providers/deepseek.rb +60 -0
- data/lib/llm/providers/gemini/error_handler.rb +4 -4
- data/lib/llm/providers/gemini/files.rb +13 -16
- data/lib/llm/providers/gemini/images.rb +4 -8
- data/lib/llm/providers/gemini/models.rb +3 -7
- data/lib/llm/providers/gemini/response_parser/completion_parser.rb +2 -2
- data/lib/llm/providers/gemini/stream_parser.rb +69 -0
- data/lib/llm/providers/gemini.rb +19 -11
- data/lib/llm/providers/llamacpp.rb +16 -2
- data/lib/llm/providers/ollama/error_handler.rb +3 -3
- data/lib/llm/providers/ollama/format/completion_format.rb +1 -1
- data/lib/llm/providers/ollama/models.rb +3 -7
- data/lib/llm/providers/ollama/response_parser/completion_parser.rb +2 -2
- data/lib/llm/providers/ollama/stream_parser.rb +44 -0
- data/lib/llm/providers/ollama.rb +16 -9
- data/lib/llm/providers/openai/audio.rb +5 -9
- data/lib/llm/providers/openai/error_handler.rb +3 -3
- data/lib/llm/providers/openai/files.rb +15 -18
- data/lib/llm/providers/openai/format/moderation_format.rb +35 -0
- data/lib/llm/providers/openai/format.rb +3 -3
- data/lib/llm/providers/openai/images.rb +8 -11
- data/lib/llm/providers/openai/models.rb +3 -7
- data/lib/llm/providers/openai/moderations.rb +67 -0
- data/lib/llm/providers/openai/response_parser/completion_parser.rb +5 -5
- data/lib/llm/providers/openai/response_parser/respond_parser.rb +2 -2
- data/lib/llm/providers/openai/response_parser.rb +15 -0
- data/lib/llm/providers/openai/responses.rb +14 -16
- data/lib/llm/providers/openai/stream_parser.rb +77 -0
- data/lib/llm/providers/openai.rb +22 -7
- data/lib/llm/providers/voyageai/error_handler.rb +3 -3
- data/lib/llm/providers/voyageai.rb +1 -1
- data/lib/llm/response/filelist.rb +1 -1
- data/lib/llm/response/image.rb +1 -1
- data/lib/llm/response/modellist.rb +1 -1
- data/lib/llm/response/moderationlist/moderation.rb +47 -0
- data/lib/llm/response/moderationlist.rb +51 -0
- data/lib/llm/response.rb +1 -0
- data/lib/llm/version.rb +1 -1
- data/lib/llm.rb +13 -4
- data/llm.gemspec +2 -2
- metadata +42 -28
- data/lib/llm/chat/conversable.rb +0 -53
- data/lib/llm/core_ext/ostruct.rb +0 -43
- /data/lib/{json → llm/json}/schema/boolean.rb +0 -0
- /data/lib/{json → llm/json}/schema/integer.rb +0 -0
- /data/lib/{json → llm/json}/schema/leaf.rb +0 -0
- /data/lib/{json → llm/json}/schema/null.rb +0 -0
- /data/lib/{json → llm/json}/schema/number.rb +0 -0
- /data/lib/{json → llm/json}/schema/object.rb +0 -0
- /data/lib/{json → llm/json}/schema/string.rb +0 -0
- /data/lib/{json → llm/json}/schema/version.rb +0 -0
- /data/lib/{json → llm/json}/schema.rb +0 -0
@@ -1,5 +1,7 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
+
require_relative "openai" unless defined?(LLM::OpenAI)
|
4
|
+
|
3
5
|
module LLM
|
4
6
|
##
|
5
7
|
# The LlamaCpp class implements a provider for
|
@@ -32,12 +34,24 @@ module LLM
|
|
32
34
|
raise NotImplementedError
|
33
35
|
end
|
34
36
|
|
37
|
+
##
|
38
|
+
# @raise [NotImplementedError]
|
39
|
+
def moderations
|
40
|
+
raise NotImplementedError
|
41
|
+
end
|
42
|
+
|
43
|
+
##
|
44
|
+
# @raise [NotImplementedError]
|
45
|
+
def responses
|
46
|
+
raise NotImplementedError
|
47
|
+
end
|
48
|
+
|
35
49
|
##
|
36
50
|
# Returns the default model for chat completions
|
37
|
-
# @see https://ollama.com/library
|
51
|
+
# @see https://ollama.com/library/qwen3 qwen3
|
38
52
|
# @return [String]
|
39
53
|
def default_model
|
40
|
-
"
|
54
|
+
"qwen3"
|
41
55
|
end
|
42
56
|
end
|
43
57
|
end
|
@@ -23,11 +23,11 @@ class LLM::Ollama
|
|
23
23
|
def raise_error!
|
24
24
|
case res
|
25
25
|
when Net::HTTPUnauthorized
|
26
|
-
raise LLM::
|
26
|
+
raise LLM::UnauthorizedError.new { _1.response = res }, "Authentication error"
|
27
27
|
when Net::HTTPTooManyRequests
|
28
|
-
raise LLM::
|
28
|
+
raise LLM::RateLimitError.new { _1.response = res }, "Too many requests"
|
29
29
|
else
|
30
|
-
raise LLM::
|
30
|
+
raise LLM::ResponseError.new { _1.response = res }, "Unexpected response"
|
31
31
|
end
|
32
32
|
end
|
33
33
|
end
|
@@ -63,7 +63,7 @@ module LLM::Ollama::Format
|
|
63
63
|
elsif returns.any?
|
64
64
|
returns.map { {role: "tool", tool_call_id: _1.id, content: JSON.dump(_1.value)} }
|
65
65
|
else
|
66
|
-
|
66
|
+
content.flat_map { {role: message.role}.merge(format_content(_1)) }
|
67
67
|
end
|
68
68
|
end
|
69
69
|
|
@@ -43,7 +43,7 @@ class LLM::Ollama
|
|
43
43
|
def all(**params)
|
44
44
|
query = URI.encode_www_form(params)
|
45
45
|
req = Net::HTTP::Get.new("/api/tags?#{query}", headers)
|
46
|
-
res = request
|
46
|
+
res = execute(request: req)
|
47
47
|
LLM::Response::ModelList.new(res).tap { |modellist|
|
48
48
|
models = modellist.body["models"].map do |model|
|
49
49
|
model = model.transform_keys { snakecase(_1) }
|
@@ -55,12 +55,8 @@ class LLM::Ollama
|
|
55
55
|
|
56
56
|
private
|
57
57
|
|
58
|
-
|
59
|
-
@provider.
|
60
|
-
end
|
61
|
-
|
62
|
-
[:headers, :request].each do |m|
|
63
|
-
define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
|
58
|
+
[:headers, :execute].each do |m|
|
59
|
+
define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
|
64
60
|
end
|
65
61
|
end
|
66
62
|
end
|
@@ -5,7 +5,7 @@ module LLM::Ollama::ResponseParser
|
|
5
5
|
# @private
|
6
6
|
class CompletionParser
|
7
7
|
def initialize(body)
|
8
|
-
@body =
|
8
|
+
@body = LLM::Object.from_hash(body)
|
9
9
|
end
|
10
10
|
|
11
11
|
def format(response)
|
@@ -29,7 +29,7 @@ module LLM::Ollama::ResponseParser
|
|
29
29
|
return [] unless tools
|
30
30
|
tools.filter_map do |tool|
|
31
31
|
next unless tool["function"]
|
32
|
-
|
32
|
+
LLM::Object.new(tool["function"])
|
33
33
|
end
|
34
34
|
end
|
35
35
|
|
@@ -0,0 +1,44 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class LLM::Ollama
|
4
|
+
##
|
5
|
+
# @private
|
6
|
+
class StreamParser
|
7
|
+
##
|
8
|
+
# Returns the fully constructed response body
|
9
|
+
# @return [LLM::Object]
|
10
|
+
attr_reader :body
|
11
|
+
|
12
|
+
##
|
13
|
+
# @return [LLM::OpenAI::Chunk]
|
14
|
+
def initialize(io)
|
15
|
+
@body = LLM::Object.new
|
16
|
+
@io = io
|
17
|
+
end
|
18
|
+
|
19
|
+
##
|
20
|
+
# @param [Hash] chunk
|
21
|
+
# @return [LLM::OpenAI::Chunk]
|
22
|
+
def parse!(chunk)
|
23
|
+
tap { merge!(chunk) }
|
24
|
+
end
|
25
|
+
|
26
|
+
private
|
27
|
+
|
28
|
+
def merge!(chunk)
|
29
|
+
chunk.each do |key, value|
|
30
|
+
if key == "message"
|
31
|
+
if @body[key]
|
32
|
+
@body[key]["content"] << value["content"]
|
33
|
+
@io << value["content"] if @io.respond_to?(:<<)
|
34
|
+
else
|
35
|
+
@body[key] = value
|
36
|
+
@io << value["content"] if @io.respond_to?(:<<)
|
37
|
+
end
|
38
|
+
else
|
39
|
+
@body[key] = value
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
data/lib/llm/providers/ollama.rb
CHANGED
@@ -14,15 +14,17 @@ module LLM
|
|
14
14
|
# require "llm"
|
15
15
|
#
|
16
16
|
# llm = LLM.ollama(nil)
|
17
|
-
# bot = LLM::
|
17
|
+
# bot = LLM::Bot.new(llm, model: "llava")
|
18
18
|
# bot.chat LLM::File("/images/capybara.png")
|
19
19
|
# bot.chat "Describe the image"
|
20
20
|
# bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
|
21
21
|
class Ollama < Provider
|
22
22
|
require_relative "ollama/error_handler"
|
23
|
-
require_relative "ollama/response_parser"
|
24
23
|
require_relative "ollama/format"
|
24
|
+
require_relative "ollama/stream_parser"
|
25
|
+
require_relative "ollama/response_parser"
|
25
26
|
require_relative "ollama/models"
|
27
|
+
|
26
28
|
include Format
|
27
29
|
|
28
30
|
HOST = "localhost"
|
@@ -40,11 +42,11 @@ module LLM
|
|
40
42
|
# @param params (see LLM::Provider#embed)
|
41
43
|
# @raise (see LLM::Provider#request)
|
42
44
|
# @return (see LLM::Provider#embed)
|
43
|
-
def embed(input, model:
|
45
|
+
def embed(input, model: default_model, **params)
|
44
46
|
params = {model:}.merge!(params)
|
45
47
|
req = Net::HTTP::Post.new("/v1/embeddings", headers)
|
46
48
|
req.body = JSON.dump({input:}.merge!(params))
|
47
|
-
res = request
|
49
|
+
res = execute(request: req)
|
48
50
|
Response::Embedding.new(res).extend(response_parser)
|
49
51
|
end
|
50
52
|
|
@@ -59,14 +61,15 @@ module LLM
|
|
59
61
|
# When given an object a provider does not understand
|
60
62
|
# @return (see LLM::Provider#complete)
|
61
63
|
def complete(prompt, params = {})
|
62
|
-
params = {role: :user, model: default_model, stream:
|
64
|
+
params = {role: :user, model: default_model, stream: true}.merge!(params)
|
63
65
|
params = [params, {format: params[:schema]}, format_tools(params)].inject({}, &:merge!).compact
|
64
|
-
role = params.delete(:role)
|
66
|
+
role, stream = params.delete(:role), params.delete(:stream)
|
67
|
+
params[:stream] = true if stream.respond_to?(:<<) || stream == true
|
65
68
|
req = Net::HTTP::Post.new("/api/chat", headers)
|
66
69
|
messages = [*(params.delete(:messages) || []), LLM::Message.new(role, prompt)]
|
67
70
|
body = JSON.dump({messages: [format(messages)].flatten}.merge!(params))
|
68
71
|
set_body_stream(req, StringIO.new(body))
|
69
|
-
res = request
|
72
|
+
res = execute(request: req, stream:)
|
70
73
|
Response::Completion.new(res).extend(response_parser)
|
71
74
|
end
|
72
75
|
|
@@ -86,10 +89,10 @@ module LLM
|
|
86
89
|
|
87
90
|
##
|
88
91
|
# Returns the default model for chat completions
|
89
|
-
# @see https://ollama.com/library
|
92
|
+
# @see https://ollama.com/library/qwen3 qwen3
|
90
93
|
# @return [String]
|
91
94
|
def default_model
|
92
|
-
"
|
95
|
+
"qwen3:latest"
|
93
96
|
end
|
94
97
|
|
95
98
|
private
|
@@ -105,6 +108,10 @@ module LLM
|
|
105
108
|
LLM::Ollama::ResponseParser
|
106
109
|
end
|
107
110
|
|
111
|
+
def stream_parser
|
112
|
+
LLM::Ollama::StreamParser
|
113
|
+
end
|
114
|
+
|
108
115
|
def error_handler
|
109
116
|
LLM::Ollama::ErrorHandler
|
110
117
|
end
|
@@ -35,7 +35,7 @@ class LLM::OpenAI
|
|
35
35
|
req = Net::HTTP::Post.new("/v1/audio/speech", headers)
|
36
36
|
req.body = JSON.dump({input:, voice:, model:, response_format:}.merge!(params))
|
37
37
|
io = StringIO.new("".b)
|
38
|
-
res = request
|
38
|
+
res = execute(request: req) { _1.read_body { |chunk| io << chunk } }
|
39
39
|
LLM::Response::Audio.new(res).tap { _1.audio = io }
|
40
40
|
end
|
41
41
|
|
@@ -56,7 +56,7 @@ class LLM::OpenAI
|
|
56
56
|
req = Net::HTTP::Post.new("/v1/audio/transcriptions", headers)
|
57
57
|
req["content-type"] = multi.content_type
|
58
58
|
set_body_stream(req, multi.body)
|
59
|
-
res = request
|
59
|
+
res = execute(request: req)
|
60
60
|
LLM::Response::AudioTranscription.new(res).tap { _1.text = _1.body["text"] }
|
61
61
|
end
|
62
62
|
|
@@ -78,18 +78,14 @@ class LLM::OpenAI
|
|
78
78
|
req = Net::HTTP::Post.new("/v1/audio/translations", headers)
|
79
79
|
req["content-type"] = multi.content_type
|
80
80
|
set_body_stream(req, multi.body)
|
81
|
-
res = request
|
81
|
+
res = execute(request: req)
|
82
82
|
LLM::Response::AudioTranslation.new(res).tap { _1.text = _1.body["text"] }
|
83
83
|
end
|
84
84
|
|
85
85
|
private
|
86
86
|
|
87
|
-
|
88
|
-
@provider.
|
89
|
-
end
|
90
|
-
|
91
|
-
[:headers, :request, :set_body_stream].each do |m|
|
92
|
-
define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
|
87
|
+
[:headers, :execute, :set_body_stream].each do |m|
|
88
|
+
define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
|
93
89
|
end
|
94
90
|
end
|
95
91
|
end
|
@@ -23,11 +23,11 @@ class LLM::OpenAI
|
|
23
23
|
def raise_error!
|
24
24
|
case res
|
25
25
|
when Net::HTTPUnauthorized
|
26
|
-
raise LLM::
|
26
|
+
raise LLM::UnauthorizedError.new { _1.response = res }, "Authentication error"
|
27
27
|
when Net::HTTPTooManyRequests
|
28
|
-
raise LLM::
|
28
|
+
raise LLM::RateLimitError.new { _1.response = res }, "Too many requests"
|
29
29
|
else
|
30
|
-
raise LLM::
|
30
|
+
raise LLM::ResponseError.new { _1.response = res }, "Unexpected response"
|
31
31
|
end
|
32
32
|
end
|
33
33
|
end
|
@@ -8,22 +8,23 @@ class LLM::OpenAI
|
|
8
8
|
# and API endpoints. OpenAI supports multiple file formats, including text
|
9
9
|
# files, CSV files, JSON files, and more.
|
10
10
|
#
|
11
|
-
# @example
|
11
|
+
# @example example #1
|
12
12
|
# #!/usr/bin/env ruby
|
13
13
|
# require "llm"
|
14
14
|
#
|
15
15
|
# llm = LLM.openai(ENV["KEY"])
|
16
|
-
# bot = LLM::
|
16
|
+
# bot = LLM::Bot.new(llm)
|
17
17
|
# file = llm.files.create file: "/documents/freebsd.pdf"
|
18
18
|
# bot.chat(file)
|
19
19
|
# bot.chat("Describe the document")
|
20
20
|
# bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
|
21
|
-
#
|
21
|
+
#
|
22
|
+
# @example example #2
|
22
23
|
# #!/usr/bin/env ruby
|
23
24
|
# require "llm"
|
24
25
|
#
|
25
26
|
# llm = LLM.openai(ENV["KEY"])
|
26
|
-
# bot = LLM::
|
27
|
+
# bot = LLM::Bot.new(llm)
|
27
28
|
# file = llm.files.create file: "/documents/openbsd.pdf"
|
28
29
|
# bot.chat(["Describe the document I sent to you", file])
|
29
30
|
# bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
|
@@ -51,9 +52,9 @@ class LLM::OpenAI
|
|
51
52
|
def all(**params)
|
52
53
|
query = URI.encode_www_form(params)
|
53
54
|
req = Net::HTTP::Get.new("/v1/files?#{query}", headers)
|
54
|
-
res = request
|
55
|
+
res = execute(request: req)
|
55
56
|
LLM::Response::FileList.new(res).tap { |filelist|
|
56
|
-
files = filelist.body["data"].map {
|
57
|
+
files = filelist.body["data"].map { LLM::Object.from_hash(_1) }
|
57
58
|
filelist.files = files
|
58
59
|
}
|
59
60
|
end
|
@@ -74,7 +75,7 @@ class LLM::OpenAI
|
|
74
75
|
req = Net::HTTP::Post.new("/v1/files", headers)
|
75
76
|
req["content-type"] = multi.content_type
|
76
77
|
set_body_stream(req, multi.body)
|
77
|
-
res = request
|
78
|
+
res = execute(request: req)
|
78
79
|
LLM::Response::File.new(res)
|
79
80
|
end
|
80
81
|
|
@@ -93,7 +94,7 @@ class LLM::OpenAI
|
|
93
94
|
file_id = file.respond_to?(:id) ? file.id : file
|
94
95
|
query = URI.encode_www_form(params)
|
95
96
|
req = Net::HTTP::Get.new("/v1/files/#{file_id}?#{query}", headers)
|
96
|
-
res = request
|
97
|
+
res = execute(request: req)
|
97
98
|
LLM::Response::File.new(res)
|
98
99
|
end
|
99
100
|
|
@@ -114,7 +115,7 @@ class LLM::OpenAI
|
|
114
115
|
file_id = file.respond_to?(:id) ? file.id : file
|
115
116
|
req = Net::HTTP::Get.new("/v1/files/#{file_id}/content?#{query}", headers)
|
116
117
|
io = StringIO.new("".b)
|
117
|
-
res = request
|
118
|
+
res = execute(request: req) { |res| res.read_body { |chunk| io << chunk } }
|
118
119
|
LLM::Response::DownloadFile.new(res).tap { _1.file = io }
|
119
120
|
end
|
120
121
|
|
@@ -127,22 +128,18 @@ class LLM::OpenAI
|
|
127
128
|
# @see https://platform.openai.com/docs/api-reference/files/delete OpenAI docs
|
128
129
|
# @param [#id, #to_s] file The file ID
|
129
130
|
# @raise (see LLM::Provider#request)
|
130
|
-
# @return [
|
131
|
+
# @return [LLM::Object] Response body
|
131
132
|
def delete(file:)
|
132
133
|
file_id = file.respond_to?(:id) ? file.id : file
|
133
134
|
req = Net::HTTP::Delete.new("/v1/files/#{file_id}", headers)
|
134
|
-
res = request
|
135
|
-
|
135
|
+
res = execute(request: req)
|
136
|
+
LLM::Object.from_hash JSON.parse(res.body)
|
136
137
|
end
|
137
138
|
|
138
139
|
private
|
139
140
|
|
140
|
-
|
141
|
-
@provider.
|
142
|
-
end
|
143
|
-
|
144
|
-
[:headers, :request, :set_body_stream].each do |m|
|
145
|
-
define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
|
141
|
+
[:headers, :execute, :set_body_stream].each do |m|
|
142
|
+
define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
|
146
143
|
end
|
147
144
|
end
|
148
145
|
end
|
@@ -0,0 +1,35 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module LLM::OpenAI::Format
|
4
|
+
##
|
5
|
+
# @private
|
6
|
+
class ModerationFormat
|
7
|
+
##
|
8
|
+
# @param [String, URI, Array<String, URI>] inputs
|
9
|
+
# The inputs to format
|
10
|
+
# @return [LLM::OpenAI::Format::ModerationFormat]
|
11
|
+
def initialize(inputs)
|
12
|
+
@inputs = inputs
|
13
|
+
end
|
14
|
+
|
15
|
+
##
|
16
|
+
# Formats the inputs for the OpenAI moderations API
|
17
|
+
# @return [Array<Hash>]
|
18
|
+
def format
|
19
|
+
[*inputs].flat_map do |input|
|
20
|
+
if String === input
|
21
|
+
{type: :text, text: input}
|
22
|
+
elsif URI === input
|
23
|
+
{type: :image_url, url: input.to_s}
|
24
|
+
else
|
25
|
+
raise LLM::Error::FormatError, "The given object (an instance of #{input.class}) " \
|
26
|
+
"is not supported by OpenAI moderations API"
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
private
|
32
|
+
|
33
|
+
attr_reader :inputs
|
34
|
+
end
|
35
|
+
end
|
@@ -6,6 +6,7 @@ class LLM::OpenAI
|
|
6
6
|
module Format
|
7
7
|
require_relative "format/completion_format"
|
8
8
|
require_relative "format/respond_format"
|
9
|
+
require_relative "format/moderation_format"
|
9
10
|
|
10
11
|
##
|
11
12
|
# @param [Array<LLM::Message>] messages
|
@@ -43,9 +44,8 @@ class LLM::OpenAI
|
|
43
44
|
# @param [Hash] params
|
44
45
|
# @return [Hash]
|
45
46
|
def format_tools(params)
|
46
|
-
|
47
|
-
tools
|
48
|
-
{tools: tools.map { _1.format(self) }}
|
47
|
+
tools = params.delete(:tools)
|
48
|
+
(tools.nil? || tools.empty?) ? {} : {tools: tools.map { _1.format(self) }}
|
49
49
|
end
|
50
50
|
end
|
51
51
|
end
|
@@ -7,7 +7,7 @@ class LLM::OpenAI
|
|
7
7
|
# OpenAI supports multiple response formats: temporary URLs, or binary strings
|
8
8
|
# encoded in base64. The default is to return temporary URLs.
|
9
9
|
#
|
10
|
-
# @example
|
10
|
+
# @example example #1
|
11
11
|
# #!/usr/bin/env ruby
|
12
12
|
# require "llm"
|
13
13
|
# require "open-uri"
|
@@ -17,7 +17,8 @@ class LLM::OpenAI
|
|
17
17
|
# res = llm.images.create prompt: "A dog on a rocket to the moon"
|
18
18
|
# FileUtils.mv OpenURI.open_uri(res.urls[0]).path,
|
19
19
|
# "rocket.png"
|
20
|
-
#
|
20
|
+
#
|
21
|
+
# @example example #2
|
21
22
|
# #!/usr/bin/env ruby
|
22
23
|
# require "llm"
|
23
24
|
#
|
@@ -49,7 +50,7 @@ class LLM::OpenAI
|
|
49
50
|
def create(prompt:, model: "dall-e-3", **params)
|
50
51
|
req = Net::HTTP::Post.new("/v1/images/generations", headers)
|
51
52
|
req.body = JSON.dump({prompt:, n: 1, model:}.merge!(params))
|
52
|
-
res = request
|
53
|
+
res = execute(request: req)
|
53
54
|
LLM::Response::Image.new(res).extend(response_parser)
|
54
55
|
end
|
55
56
|
|
@@ -71,7 +72,7 @@ class LLM::OpenAI
|
|
71
72
|
req = Net::HTTP::Post.new("/v1/images/variations", headers)
|
72
73
|
req["content-type"] = multi.content_type
|
73
74
|
set_body_stream(req, multi.body)
|
74
|
-
res = request
|
75
|
+
res = execute(request: req)
|
75
76
|
LLM::Response::Image.new(res).extend(response_parser)
|
76
77
|
end
|
77
78
|
|
@@ -94,18 +95,14 @@ class LLM::OpenAI
|
|
94
95
|
req = Net::HTTP::Post.new("/v1/images/edits", headers)
|
95
96
|
req["content-type"] = multi.content_type
|
96
97
|
set_body_stream(req, multi.body)
|
97
|
-
res = request
|
98
|
+
res = execute(request: req)
|
98
99
|
LLM::Response::Image.new(res).extend(response_parser)
|
99
100
|
end
|
100
101
|
|
101
102
|
private
|
102
103
|
|
103
|
-
|
104
|
-
@provider.
|
105
|
-
end
|
106
|
-
|
107
|
-
[:response_parser, :headers, :request, :set_body_stream].each do |m|
|
108
|
-
define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
|
104
|
+
[:response_parser, :headers, :execute, :set_body_stream].each do |m|
|
105
|
+
define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
|
109
106
|
end
|
110
107
|
end
|
111
108
|
end
|
@@ -40,7 +40,7 @@ class LLM::OpenAI
|
|
40
40
|
def all(**params)
|
41
41
|
query = URI.encode_www_form(params)
|
42
42
|
req = Net::HTTP::Get.new("/v1/models?#{query}", headers)
|
43
|
-
res = request
|
43
|
+
res = execute(request: req)
|
44
44
|
LLM::Response::ModelList.new(res).tap { |modellist|
|
45
45
|
models = modellist.body["data"].map do |model|
|
46
46
|
LLM::Model.from_hash(model).tap { _1.provider = @provider }
|
@@ -51,12 +51,8 @@ class LLM::OpenAI
|
|
51
51
|
|
52
52
|
private
|
53
53
|
|
54
|
-
|
55
|
-
@provider.
|
56
|
-
end
|
57
|
-
|
58
|
-
[:headers, :request, :set_body_stream].each do |m|
|
59
|
-
define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
|
54
|
+
[:headers, :execute, :set_body_stream].each do |m|
|
55
|
+
define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
|
60
56
|
end
|
61
57
|
end
|
62
58
|
end
|
@@ -0,0 +1,67 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class LLM::OpenAI
|
4
|
+
##
|
5
|
+
# The {LLM::OpenAI::Moderations LLM::OpenAI::Moderations} class provides a moderations
|
6
|
+
# object for interacting with [OpenAI's moderations API](https://platform.openai.com/docs/api-reference/moderations).
|
7
|
+
# The moderations API can categorize content into different categories, such as
|
8
|
+
# hate speech, self-harm, and sexual content. It can also provide a confidence score
|
9
|
+
# for each category.
|
10
|
+
#
|
11
|
+
# @example
|
12
|
+
# #!/usr/bin/env ruby
|
13
|
+
# require "llm"
|
14
|
+
#
|
15
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
16
|
+
# mod = llm.moderations.create input: "I hate you"
|
17
|
+
# print "categories: #{mod.categories}", "\n"
|
18
|
+
# print "scores: #{mod.scores}", "\n"
|
19
|
+
#
|
20
|
+
# @example
|
21
|
+
# #!/usr/bin/env ruby
|
22
|
+
# require "llm"
|
23
|
+
#
|
24
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
25
|
+
# mod = llm.moderations.create input: URI.parse("https://example.com/image.png")
|
26
|
+
# print "categories: #{mod.categories}", "\n"
|
27
|
+
# print "scores: #{mod.scores}", "\n"
|
28
|
+
#
|
29
|
+
# @see https://platform.openai.com/docs/api-reference/moderations/create OpenAI docs
|
30
|
+
# @see https://platform.openai.com/docs/models#moderation OpenAI moderation models
|
31
|
+
class Moderations
|
32
|
+
##
|
33
|
+
# Returns a new Moderations object
|
34
|
+
# @param [LLM::Provider] provider
|
35
|
+
# @return [LLM::OpenAI::Moderations]
|
36
|
+
def initialize(provider)
|
37
|
+
@provider = provider
|
38
|
+
end
|
39
|
+
|
40
|
+
##
|
41
|
+
# Create a moderation
|
42
|
+
# @see https://platform.openai.com/docs/api-reference/moderations/create OpenAI docs
|
43
|
+
# @see https://platform.openai.com/docs/models#moderation OpenAI moderation models
|
44
|
+
# @note
|
45
|
+
# Although OpenAI mentions an array as a valid input, and that it can return one
|
46
|
+
# or more moderations, in practice the API only returns one moderation object. We
|
47
|
+
# recommend using a single input string or URI, and to keep in mind that llm.rb
|
48
|
+
# returns a Moderation object but has code in place to return multiple objects in
|
49
|
+
# the future (in case OpenAI documentation ever matches the actual API).
|
50
|
+
# @param [String, URI, Array<String, URI>] input
|
51
|
+
# @param [String, LLM::Model] model The model to use
|
52
|
+
# @return [LLM::Response::ModerationList::Moderation]
|
53
|
+
def create(input:, model: "omni-moderation-latest", **params)
|
54
|
+
req = Net::HTTP::Post.new("/v1/moderations", headers)
|
55
|
+
input = Format::ModerationFormat.new(input).format
|
56
|
+
req.body = JSON.dump({input:, model:}.merge!(params))
|
57
|
+
res = execute(request: req)
|
58
|
+
LLM::Response::ModerationList.new(res).extend(response_parser).first
|
59
|
+
end
|
60
|
+
|
61
|
+
private
|
62
|
+
|
63
|
+
[:response_parser, :headers, :execute].each do |m|
|
64
|
+
define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
|
65
|
+
end
|
66
|
+
end
|
67
|
+
end
|
@@ -5,7 +5,7 @@ module LLM::OpenAI::ResponseParser
|
|
5
5
|
# @private
|
6
6
|
class CompletionParser
|
7
7
|
def initialize(body)
|
8
|
-
@body =
|
8
|
+
@body = LLM::Object.from_hash(body)
|
9
9
|
end
|
10
10
|
|
11
11
|
def format(response)
|
@@ -41,15 +41,15 @@ module LLM::OpenAI::ResponseParser
|
|
41
41
|
name: tool.function.name,
|
42
42
|
arguments: JSON.parse(tool.function.arguments)
|
43
43
|
}
|
44
|
-
|
44
|
+
LLM::Object.new(tool)
|
45
45
|
end
|
46
46
|
end
|
47
47
|
|
48
48
|
def body = @body
|
49
49
|
def model = body.model
|
50
|
-
def prompt_tokens = body.usage
|
51
|
-
def completion_tokens = body.usage
|
52
|
-
def total_tokens = body.usage
|
50
|
+
def prompt_tokens = body.usage&.prompt_tokens
|
51
|
+
def completion_tokens = body.usage&.completion_tokens
|
52
|
+
def total_tokens = body.usage&.total_tokens
|
53
53
|
def choices = body.choices
|
54
54
|
end
|
55
55
|
end
|
@@ -5,7 +5,7 @@ module LLM::OpenAI::ResponseParser
|
|
5
5
|
# @private
|
6
6
|
class RespondParser
|
7
7
|
def initialize(body)
|
8
|
-
@body =
|
8
|
+
@body = LLM::Object.from_hash(body)
|
9
9
|
end
|
10
10
|
|
11
11
|
def format(response)
|
@@ -37,7 +37,7 @@ module LLM::OpenAI::ResponseParser
|
|
37
37
|
end
|
38
38
|
|
39
39
|
def format_tool(tool)
|
40
|
-
|
40
|
+
LLM::Object.new(
|
41
41
|
id: tool.call_id,
|
42
42
|
name: tool.name,
|
43
43
|
arguments: JSON.parse(tool.arguments)
|
@@ -4,6 +4,9 @@ class LLM::OpenAI
|
|
4
4
|
##
|
5
5
|
# @private
|
6
6
|
module ResponseParser
|
7
|
+
require_relative "response_parser/completion_parser"
|
8
|
+
require_relative "response_parser/respond_parser"
|
9
|
+
|
7
10
|
##
|
8
11
|
# @param [Hash] body
|
9
12
|
# The response body from the LLM provider
|
@@ -20,6 +23,18 @@ class LLM::OpenAI
|
|
20
23
|
RespondParser.new(body).format(self)
|
21
24
|
end
|
22
25
|
|
26
|
+
##
|
27
|
+
# @param [Hash] body
|
28
|
+
# The response body from the LLM provider
|
29
|
+
# @return [Hash]
|
30
|
+
def parse_moderation_list(body)
|
31
|
+
{
|
32
|
+
id: body["id"],
|
33
|
+
model: body["model"],
|
34
|
+
moderations: body["results"].map { LLM::Response::ModerationList::Moderation.new(_1) }
|
35
|
+
}
|
36
|
+
end
|
37
|
+
|
23
38
|
##
|
24
39
|
# @param [Hash] body
|
25
40
|
# The response body from the LLM provider
|