llm.rb 0.7.2 → 0.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +93 -63
- data/lib/llm/{chat → bot}/builder.rb +1 -1
- data/lib/llm/bot/conversable.rb +31 -0
- data/lib/llm/{chat → bot}/prompt/completion.rb +14 -4
- data/lib/llm/{chat → bot}/prompt/respond.rb +16 -5
- data/lib/llm/{chat.rb → bot.rb} +48 -66
- data/lib/llm/buffer.rb +2 -2
- data/lib/llm/error.rb +24 -16
- data/lib/llm/event_handler.rb +44 -0
- data/lib/llm/eventstream/event.rb +69 -0
- data/lib/llm/eventstream/parser.rb +88 -0
- data/lib/llm/eventstream.rb +8 -0
- data/lib/llm/function.rb +9 -12
- data/lib/{json → llm/json}/schema/array.rb +1 -1
- data/lib/llm/message.rb +1 -1
- data/lib/llm/model.rb +1 -1
- data/lib/llm/object/builder.rb +38 -0
- data/lib/llm/object/kernel.rb +45 -0
- data/lib/llm/object.rb +77 -0
- data/lib/llm/provider.rb +68 -26
- data/lib/llm/providers/anthropic/error_handler.rb +3 -3
- data/lib/llm/providers/anthropic/models.rb +3 -7
- data/lib/llm/providers/anthropic/response_parser/completion_parser.rb +5 -5
- data/lib/llm/providers/anthropic/response_parser.rb +1 -0
- data/lib/llm/providers/anthropic/stream_parser.rb +66 -0
- data/lib/llm/providers/anthropic.rb +9 -4
- data/lib/llm/providers/deepseek/format/completion_format.rb +68 -0
- data/lib/llm/providers/deepseek/format.rb +28 -0
- data/lib/llm/providers/deepseek.rb +60 -0
- data/lib/llm/providers/gemini/error_handler.rb +4 -4
- data/lib/llm/providers/gemini/files.rb +13 -16
- data/lib/llm/providers/gemini/images.rb +4 -8
- data/lib/llm/providers/gemini/models.rb +3 -7
- data/lib/llm/providers/gemini/response_parser/completion_parser.rb +2 -2
- data/lib/llm/providers/gemini/stream_parser.rb +69 -0
- data/lib/llm/providers/gemini.rb +19 -11
- data/lib/llm/providers/llamacpp.rb +16 -2
- data/lib/llm/providers/ollama/error_handler.rb +3 -3
- data/lib/llm/providers/ollama/format/completion_format.rb +1 -1
- data/lib/llm/providers/ollama/models.rb +3 -7
- data/lib/llm/providers/ollama/response_parser/completion_parser.rb +2 -2
- data/lib/llm/providers/ollama/stream_parser.rb +44 -0
- data/lib/llm/providers/ollama.rb +16 -9
- data/lib/llm/providers/openai/audio.rb +5 -9
- data/lib/llm/providers/openai/error_handler.rb +3 -3
- data/lib/llm/providers/openai/files.rb +15 -18
- data/lib/llm/providers/openai/format/moderation_format.rb +35 -0
- data/lib/llm/providers/openai/format.rb +3 -3
- data/lib/llm/providers/openai/images.rb +8 -11
- data/lib/llm/providers/openai/models.rb +3 -7
- data/lib/llm/providers/openai/moderations.rb +67 -0
- data/lib/llm/providers/openai/response_parser/completion_parser.rb +5 -5
- data/lib/llm/providers/openai/response_parser/respond_parser.rb +2 -2
- data/lib/llm/providers/openai/response_parser.rb +15 -0
- data/lib/llm/providers/openai/responses.rb +14 -16
- data/lib/llm/providers/openai/stream_parser.rb +77 -0
- data/lib/llm/providers/openai.rb +22 -7
- data/lib/llm/providers/voyageai/error_handler.rb +3 -3
- data/lib/llm/providers/voyageai.rb +1 -1
- data/lib/llm/response/filelist.rb +1 -1
- data/lib/llm/response/image.rb +1 -1
- data/lib/llm/response/modellist.rb +1 -1
- data/lib/llm/response/moderationlist/moderation.rb +47 -0
- data/lib/llm/response/moderationlist.rb +51 -0
- data/lib/llm/response.rb +1 -0
- data/lib/llm/version.rb +1 -1
- data/lib/llm.rb +13 -4
- data/llm.gemspec +2 -2
- metadata +42 -28
- data/lib/llm/chat/conversable.rb +0 -53
- data/lib/llm/core_ext/ostruct.rb +0 -43
- /data/lib/{json → llm/json}/schema/boolean.rb +0 -0
- /data/lib/{json → llm/json}/schema/integer.rb +0 -0
- /data/lib/{json → llm/json}/schema/leaf.rb +0 -0
- /data/lib/{json → llm/json}/schema/null.rb +0 -0
- /data/lib/{json → llm/json}/schema/number.rb +0 -0
- /data/lib/{json → llm/json}/schema/object.rb +0 -0
- /data/lib/{json → llm/json}/schema/string.rb +0 -0
- /data/lib/{json → llm/json}/schema/version.rb +0 -0
- /data/lib/{json → llm/json}/schema.rb +0 -0
@@ -8,22 +8,24 @@ class LLM::OpenAI
|
|
8
8
|
# conversation state across multiple requests. This is useful when you want to
|
9
9
|
# save bandwidth and/or not maintain the message thread by yourself.
|
10
10
|
#
|
11
|
-
# @example
|
11
|
+
# @example example #1
|
12
12
|
# #!/usr/bin/env ruby
|
13
13
|
# require "llm"
|
14
14
|
#
|
15
15
|
# llm = LLM.openai(ENV["KEY"])
|
16
|
-
# res1 = llm.responses.create "Your task is to help me with math", :developer
|
17
|
-
# res2 = llm.responses.create "5 + 5 = ?", :user, previous_response_id: res1.id
|
16
|
+
# res1 = llm.responses.create "Your task is to help me with math", role: :developer
|
17
|
+
# res2 = llm.responses.create "5 + 5 = ?", role: :user, previous_response_id: res1.id
|
18
18
|
# [res1,res2].each { llm.responses.delete(_1) }
|
19
|
-
#
|
19
|
+
#
|
20
|
+
# @example example #2
|
20
21
|
# #!/usr/bin/env ruby
|
21
22
|
# require "llm"
|
22
23
|
#
|
23
24
|
# llm = LLM.openai(ENV["KEY"])
|
24
25
|
# file = llm.files.create file: "/images/hat.png"
|
25
26
|
# res = llm.responses.create ["Describe the image", file]
|
26
|
-
#
|
27
|
+
#
|
28
|
+
# @example example #3
|
27
29
|
# #!/usr/bin/env ruby
|
28
30
|
# require "llm"
|
29
31
|
#
|
@@ -58,7 +60,7 @@ class LLM::OpenAI
|
|
58
60
|
messages = [*(params.delete(:input) || []), LLM::Message.new(role, prompt)]
|
59
61
|
body = JSON.dump({input: [format(messages, :response)].flatten}.merge!(params))
|
60
62
|
set_body_stream(req, StringIO.new(body))
|
61
|
-
res = request
|
63
|
+
res = execute(request: req)
|
62
64
|
LLM::Response::Respond.new(res).extend(response_parser)
|
63
65
|
end
|
64
66
|
|
@@ -72,7 +74,7 @@ class LLM::OpenAI
|
|
72
74
|
response_id = response.respond_to?(:id) ? response.id : response
|
73
75
|
query = URI.encode_www_form(params)
|
74
76
|
req = Net::HTTP::Get.new("/v1/responses/#{response_id}?#{query}", headers)
|
75
|
-
res = request
|
77
|
+
res = execute(request: req)
|
76
78
|
LLM::Response::Respond.new(res).extend(response_parser)
|
77
79
|
end
|
78
80
|
|
@@ -81,24 +83,20 @@ class LLM::OpenAI
|
|
81
83
|
# @see https://platform.openai.com/docs/api-reference/responses/delete OpenAI docs
|
82
84
|
# @param [#id, #to_s] response Response ID
|
83
85
|
# @raise (see LLM::Provider#request)
|
84
|
-
# @return [
|
86
|
+
# @return [LLM::Object] Response body
|
85
87
|
def delete(response)
|
86
88
|
response_id = response.respond_to?(:id) ? response.id : response
|
87
89
|
req = Net::HTTP::Delete.new("/v1/responses/#{response_id}", headers)
|
88
|
-
res = request
|
89
|
-
|
90
|
+
res = execute(request: req)
|
91
|
+
LLM::Object.from_hash JSON.parse(res.body)
|
90
92
|
end
|
91
93
|
|
92
94
|
private
|
93
95
|
|
94
|
-
def http
|
95
|
-
@provider.instance_variable_get(:@http)
|
96
|
-
end
|
97
|
-
|
98
96
|
[:response_parser, :headers,
|
99
|
-
:
|
97
|
+
:execute, :set_body_stream,
|
100
98
|
:format_schema, :format_tools].each do |m|
|
101
|
-
define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
|
99
|
+
define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
|
102
100
|
end
|
103
101
|
end
|
104
102
|
end
|
@@ -0,0 +1,77 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class LLM::OpenAI
|
4
|
+
##
|
5
|
+
# @private
|
6
|
+
class StreamParser
|
7
|
+
##
|
8
|
+
# Returns the fully constructed response body
|
9
|
+
# @return [LLM::Object]
|
10
|
+
attr_reader :body
|
11
|
+
|
12
|
+
##
|
13
|
+
# @return [LLM::OpenAI::Chunk]
|
14
|
+
def initialize(io)
|
15
|
+
@body = LLM::Object.new
|
16
|
+
@io = io
|
17
|
+
end
|
18
|
+
|
19
|
+
##
|
20
|
+
# @param [Hash] chunk
|
21
|
+
# @return [LLM::OpenAI::Chunk]
|
22
|
+
def parse!(chunk)
|
23
|
+
tap { merge!(chunk) }
|
24
|
+
end
|
25
|
+
|
26
|
+
private
|
27
|
+
|
28
|
+
def merge!(chunk)
|
29
|
+
chunk.each do |key, value|
|
30
|
+
if key == "choices"
|
31
|
+
@body["choices"] ||= []
|
32
|
+
merge_choices!(value)
|
33
|
+
else
|
34
|
+
@body[key] = value
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
def merge_choices!(choices)
|
40
|
+
choices.each do |choice|
|
41
|
+
if @body.choices[choice["index"]]
|
42
|
+
target = @body["choices"][choice["index"]]["message"]
|
43
|
+
delta = choice["delta"]
|
44
|
+
delta.each do |key, value|
|
45
|
+
if target[key]
|
46
|
+
if key == "content"
|
47
|
+
target[key] << value
|
48
|
+
@io << value if @io.respond_to?(:<<)
|
49
|
+
elsif key == "tool_calls"
|
50
|
+
merge_tools!(target, value)
|
51
|
+
else
|
52
|
+
target[key] = value
|
53
|
+
end
|
54
|
+
else
|
55
|
+
target[key] = value
|
56
|
+
end
|
57
|
+
end
|
58
|
+
else
|
59
|
+
target = {"message" => {"role" => "assistant"}}
|
60
|
+
@body["choices"][choice["index"]] = target
|
61
|
+
target["message"].merge!(choice["delta"])
|
62
|
+
end
|
63
|
+
end
|
64
|
+
end
|
65
|
+
|
66
|
+
def merge_tools!(target, tools)
|
67
|
+
tools.each.with_index do |toola, index|
|
68
|
+
toolb = target["tool_calls"][index]
|
69
|
+
if toolb
|
70
|
+
toola["function"].each { toolb["function"][_1] << _2 }
|
71
|
+
else
|
72
|
+
target["tool_calls"][index] = toola
|
73
|
+
end
|
74
|
+
end
|
75
|
+
end
|
76
|
+
end
|
77
|
+
end
|
data/lib/llm/providers/openai.rb
CHANGED
@@ -5,16 +5,17 @@ module LLM
|
|
5
5
|
# The OpenAI class implements a provider for
|
6
6
|
# [OpenAI](https://platform.openai.com/)
|
7
7
|
class OpenAI < Provider
|
8
|
-
require_relative "openai/format"
|
9
8
|
require_relative "openai/error_handler"
|
9
|
+
require_relative "openai/format"
|
10
|
+
require_relative "openai/stream_parser"
|
10
11
|
require_relative "openai/response_parser"
|
11
|
-
require_relative "openai/
|
12
|
-
require_relative "openai/response_parser/respond_parser"
|
12
|
+
require_relative "openai/models"
|
13
13
|
require_relative "openai/responses"
|
14
14
|
require_relative "openai/images"
|
15
15
|
require_relative "openai/audio"
|
16
16
|
require_relative "openai/files"
|
17
|
-
require_relative "openai/
|
17
|
+
require_relative "openai/moderations"
|
18
|
+
|
18
19
|
include Format
|
19
20
|
|
20
21
|
HOST = "api.openai.com"
|
@@ -36,7 +37,7 @@ module LLM
|
|
36
37
|
def embed(input, model: "text-embedding-3-small", **params)
|
37
38
|
req = Net::HTTP::Post.new("/v1/embeddings", headers)
|
38
39
|
req.body = JSON.dump({input:, model:}.merge!(params))
|
39
|
-
res = request
|
40
|
+
res = execute(request: req)
|
40
41
|
Response::Embedding.new(res).extend(response_parser)
|
41
42
|
end
|
42
43
|
|
@@ -53,12 +54,13 @@ module LLM
|
|
53
54
|
def complete(prompt, params = {})
|
54
55
|
params = {role: :user, model: default_model}.merge!(params)
|
55
56
|
params = [params, format_schema(params), format_tools(params)].inject({}, &:merge!).compact
|
56
|
-
role = params.delete(:role)
|
57
|
+
role, stream = params.delete(:role), params.delete(:stream)
|
58
|
+
params[:stream] = true if stream.respond_to?(:<<) || stream == true
|
57
59
|
req = Net::HTTP::Post.new("/v1/chat/completions", headers)
|
58
60
|
messages = [*(params.delete(:messages) || []), Message.new(role, prompt)]
|
59
61
|
body = JSON.dump({messages: format(messages, :complete).flatten}.merge!(params))
|
60
62
|
set_body_stream(req, StringIO.new(body))
|
61
|
-
res = request
|
63
|
+
res = execute(request: req, stream:)
|
62
64
|
Response::Completion.new(res).extend(response_parser)
|
63
65
|
end
|
64
66
|
|
@@ -102,6 +104,15 @@ module LLM
|
|
102
104
|
LLM::OpenAI::Models.new(self)
|
103
105
|
end
|
104
106
|
|
107
|
+
##
|
108
|
+
# Provides an interface to OpenAI's moderation API
|
109
|
+
# @see https://platform.openai.com/docs/api-reference/moderations/create OpenAI docs
|
110
|
+
# @see https://platform.openai.com/docs/models#moderation OpenAI moderation models
|
111
|
+
# @return [LLM::OpenAI::Moderations]
|
112
|
+
def moderations
|
113
|
+
LLM::OpenAI::Moderations.new(self)
|
114
|
+
end
|
115
|
+
|
105
116
|
##
|
106
117
|
# @return (see LLM::Provider#assistant_role)
|
107
118
|
def assistant_role
|
@@ -129,6 +140,10 @@ module LLM
|
|
129
140
|
LLM::OpenAI::ResponseParser
|
130
141
|
end
|
131
142
|
|
143
|
+
def stream_parser
|
144
|
+
LLM::OpenAI::StreamParser
|
145
|
+
end
|
146
|
+
|
132
147
|
def error_handler
|
133
148
|
LLM::OpenAI::ErrorHandler
|
134
149
|
end
|
@@ -21,11 +21,11 @@ class LLM::VoyageAI
|
|
21
21
|
def raise_error!
|
22
22
|
case res
|
23
23
|
when Net::HTTPUnauthorized
|
24
|
-
raise LLM::
|
24
|
+
raise LLM::UnauthorizedError.new { _1.response = res }, "Authentication error"
|
25
25
|
when Net::HTTPTooManyRequests
|
26
|
-
raise LLM::
|
26
|
+
raise LLM::RateLimitError.new { _1.response = res }, "Too many requests"
|
27
27
|
else
|
28
|
-
raise LLM::
|
28
|
+
raise LLM::ResponseError.new { _1.response = res }, "Unexpected response"
|
29
29
|
end
|
30
30
|
end
|
31
31
|
end
|
@@ -20,7 +20,7 @@ module LLM
|
|
20
20
|
def embed(input, model: "voyage-2", **params)
|
21
21
|
req = Net::HTTP::Post.new("/v1/embeddings", headers)
|
22
22
|
req.body = JSON.dump({input:, model:}.merge!(params))
|
23
|
-
res = request
|
23
|
+
res = execute(request: req)
|
24
24
|
Response::Embedding.new(res).extend(response_parser)
|
25
25
|
end
|
26
26
|
|
@@ -5,7 +5,7 @@ module LLM
|
|
5
5
|
# The {LLM::Response::FileList LLM::Response::FileList} class represents a
|
6
6
|
# list of file objects that are returned by a provider. It is an Enumerable
|
7
7
|
# object, and can be used to iterate over the file objects in a way that is
|
8
|
-
# similar to an array. Each element is an instance of
|
8
|
+
# similar to an array. Each element is an instance of LLM::Object.
|
9
9
|
class Response::FileList < Response
|
10
10
|
include Enumerable
|
11
11
|
|
data/lib/llm/response/image.rb
CHANGED
@@ -5,7 +5,7 @@ module LLM
|
|
5
5
|
# The {LLM::Response::ModelList LLM::Response::ModelList} class represents a
|
6
6
|
# list of model objects that are returned by a provider. It is an Enumerable
|
7
7
|
# object, and can be used to iterate over the model objects in a way that is
|
8
|
-
# similar to an array. Each element is an instance of
|
8
|
+
# similar to an array. Each element is an instance of LLM::Object.
|
9
9
|
class Response::ModelList < Response
|
10
10
|
include Enumerable
|
11
11
|
|
@@ -0,0 +1,47 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class LLM::Response::ModerationList
|
4
|
+
##
|
5
|
+
# The {LLM::Response::ModerationList::Moderation Moderation}
|
6
|
+
# class represents a moderation object that is returned by
|
7
|
+
# the moderations interface.
|
8
|
+
# @see LLM::Response::ModerationList
|
9
|
+
# @see LLM::OpenAI::Moderations
|
10
|
+
class Moderation
|
11
|
+
##
|
12
|
+
# @param [Hash] moderation
|
13
|
+
# @return [LLM::Response::ModerationList::Moderation]
|
14
|
+
def initialize(moderation)
|
15
|
+
@moderation = moderation
|
16
|
+
end
|
17
|
+
|
18
|
+
##
|
19
|
+
# Returns true if the moderation is flagged
|
20
|
+
# @return [Boolean]
|
21
|
+
def flagged?
|
22
|
+
@moderation["flagged"]
|
23
|
+
end
|
24
|
+
|
25
|
+
##
|
26
|
+
# Returns the moderation categories
|
27
|
+
# @return [Array<String>]
|
28
|
+
def categories
|
29
|
+
@moderation["categories"].filter_map { _2 ? _1 : nil }
|
30
|
+
end
|
31
|
+
|
32
|
+
##
|
33
|
+
# Returns the moderation scores
|
34
|
+
# @return [Hash]
|
35
|
+
def scores
|
36
|
+
@moderation["category_scores"].select { categories.include?(_1) }
|
37
|
+
end
|
38
|
+
|
39
|
+
##
|
40
|
+
# @return [String]
|
41
|
+
def inspect
|
42
|
+
"#<#{self.class}:0x#{object_id.to_s(16)} " \
|
43
|
+
"categories=#{categories} " \
|
44
|
+
"scores=#{scores}>"
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
@@ -0,0 +1,51 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module LLM
|
4
|
+
##
|
5
|
+
# The {LLM::Response::ModerationList LLM::Response::ModerationList} class
|
6
|
+
# represents a response from the moderations API. It is an Enumerable that
|
7
|
+
# yields an instance of {LLM::Response::ModerationList::Moderation LLM::Response::ModerationList::Moderation},
|
8
|
+
# and each moderation object contains the categories and scores for a given
|
9
|
+
# input.
|
10
|
+
# @see LLM::OpenAI::Moderations LLM::OpenAI::Moderations
|
11
|
+
class Response::ModerationList < Response
|
12
|
+
require_relative "moderationlist/moderation"
|
13
|
+
include Enumerable
|
14
|
+
|
15
|
+
##
|
16
|
+
# Returns the moderation ID
|
17
|
+
# @return [String]
|
18
|
+
def id
|
19
|
+
parsed[:id]
|
20
|
+
end
|
21
|
+
|
22
|
+
##
|
23
|
+
# Returns the moderation model
|
24
|
+
# @return [String]
|
25
|
+
def model
|
26
|
+
parsed[:model]
|
27
|
+
end
|
28
|
+
|
29
|
+
##
|
30
|
+
# Yields each moderation object
|
31
|
+
# @yieldparam [OpenStruct] moderation
|
32
|
+
# @yieldreturn [void]
|
33
|
+
# @return [void]
|
34
|
+
def each(&)
|
35
|
+
moderations.each(&)
|
36
|
+
end
|
37
|
+
|
38
|
+
private
|
39
|
+
|
40
|
+
def parsed
|
41
|
+
@parsed ||= parse_moderation_list(body)
|
42
|
+
end
|
43
|
+
|
44
|
+
##
|
45
|
+
# Returns an array of moderation objects
|
46
|
+
# @return [Array<OpenStruct>]
|
47
|
+
def moderations
|
48
|
+
parsed[:moderations]
|
49
|
+
end
|
50
|
+
end
|
51
|
+
end
|
data/lib/llm/response.rb
CHANGED
data/lib/llm/version.rb
CHANGED
data/lib/llm.rb
CHANGED
@@ -2,8 +2,8 @@
|
|
2
2
|
|
3
3
|
module LLM
|
4
4
|
require "stringio"
|
5
|
-
require_relative "json/schema"
|
6
|
-
require_relative "llm/
|
5
|
+
require_relative "llm/json/schema"
|
6
|
+
require_relative "llm/object"
|
7
7
|
require_relative "llm/version"
|
8
8
|
require_relative "llm/utils"
|
9
9
|
require_relative "llm/error"
|
@@ -14,9 +14,11 @@ module LLM
|
|
14
14
|
require_relative "llm/file"
|
15
15
|
require_relative "llm/model"
|
16
16
|
require_relative "llm/provider"
|
17
|
-
require_relative "llm/
|
17
|
+
require_relative "llm/bot"
|
18
18
|
require_relative "llm/buffer"
|
19
19
|
require_relative "llm/function"
|
20
|
+
require_relative "llm/eventstream"
|
21
|
+
require_relative "llm/event_handler"
|
20
22
|
|
21
23
|
module_function
|
22
24
|
|
@@ -57,11 +59,18 @@ module LLM
|
|
57
59
|
# @param key (see LLM::Provider#initialize)
|
58
60
|
# @return (see LLM::LlamaCpp#initialize)
|
59
61
|
def llamacpp(key: nil, **)
|
60
|
-
require_relative "llm/providers/openai" unless defined?(LLM::OpenAI)
|
61
62
|
require_relative "llm/providers/llamacpp" unless defined?(LLM::LlamaCpp)
|
62
63
|
LLM::LlamaCpp.new(key:, **)
|
63
64
|
end
|
64
65
|
|
66
|
+
##
|
67
|
+
# @param key (see LLM::Provider#initialize)
|
68
|
+
# @return (see LLM::DeepSeek#initialize)
|
69
|
+
def deepseek(**)
|
70
|
+
require_relative "llm/providers/deepseek" unless defined?(LLM::DeepSeek)
|
71
|
+
LLM::DeepSeek.new(**)
|
72
|
+
end
|
73
|
+
|
65
74
|
##
|
66
75
|
# @param key (see LLM::Provider#initialize)
|
67
76
|
# @return (see LLM::OpenAI#initialize)
|
data/llm.gemspec
CHANGED
@@ -10,8 +10,8 @@ Gem::Specification.new do |spec|
|
|
10
10
|
|
11
11
|
spec.summary = "llm.rb is a zero-dependency Ruby toolkit for " \
|
12
12
|
"Large Language Models that includes OpenAI, Gemini, " \
|
13
|
-
"Anthropic, Ollama, and LlamaCpp. It’s fast, simple " \
|
14
|
-
"and composable – with full support for chat, tool calling, audio, " \
|
13
|
+
"Anthropic, DeepSeek, Ollama, and LlamaCpp. It’s fast, simple " \
|
14
|
+
"and composable – with full support for chat, streaming, tool calling, audio, " \
|
15
15
|
"images, files, and JSON Schema generation."
|
16
16
|
spec.description = spec.summary
|
17
17
|
spec.homepage = "https://github.com/llmrb/llm"
|
metadata
CHANGED
@@ -1,15 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: llm.rb
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.9.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Antar Azri
|
8
8
|
- '0x1eef'
|
9
|
-
autorequire:
|
10
9
|
bindir: bin
|
11
10
|
cert_chain: []
|
12
|
-
date:
|
11
|
+
date: 1980-01-02 00:00:00.000000000 Z
|
13
12
|
dependencies:
|
14
13
|
- !ruby/object:Gem::Dependency
|
15
14
|
name: webmock
|
@@ -152,9 +151,9 @@ dependencies:
|
|
152
151
|
- !ruby/object:Gem::Version
|
153
152
|
version: '2.8'
|
154
153
|
description: llm.rb is a zero-dependency Ruby toolkit for Large Language Models that
|
155
|
-
includes OpenAI, Gemini, Anthropic, Ollama, and LlamaCpp. It’s fast, simple
|
156
|
-
composable – with full support for chat, tool calling, audio, images,
|
157
|
-
JSON Schema generation.
|
154
|
+
includes OpenAI, Gemini, Anthropic, DeepSeek, Ollama, and LlamaCpp. It’s fast, simple
|
155
|
+
and composable – with full support for chat, streaming, tool calling, audio, images,
|
156
|
+
files, and JSON Schema generation.
|
158
157
|
email:
|
159
158
|
- azantar@proton.me
|
160
159
|
- 0x1eef@proton.me
|
@@ -164,31 +163,37 @@ extra_rdoc_files: []
|
|
164
163
|
files:
|
165
164
|
- LICENSE
|
166
165
|
- README.md
|
167
|
-
- lib/json/schema.rb
|
168
|
-
- lib/json/schema/array.rb
|
169
|
-
- lib/json/schema/boolean.rb
|
170
|
-
- lib/json/schema/integer.rb
|
171
|
-
- lib/json/schema/leaf.rb
|
172
|
-
- lib/json/schema/null.rb
|
173
|
-
- lib/json/schema/number.rb
|
174
|
-
- lib/json/schema/object.rb
|
175
|
-
- lib/json/schema/string.rb
|
176
|
-
- lib/json/schema/version.rb
|
177
166
|
- lib/llm.rb
|
167
|
+
- lib/llm/bot.rb
|
168
|
+
- lib/llm/bot/builder.rb
|
169
|
+
- lib/llm/bot/conversable.rb
|
170
|
+
- lib/llm/bot/prompt/completion.rb
|
171
|
+
- lib/llm/bot/prompt/respond.rb
|
178
172
|
- lib/llm/buffer.rb
|
179
|
-
- lib/llm/chat.rb
|
180
|
-
- lib/llm/chat/builder.rb
|
181
|
-
- lib/llm/chat/conversable.rb
|
182
|
-
- lib/llm/chat/prompt/completion.rb
|
183
|
-
- lib/llm/chat/prompt/respond.rb
|
184
|
-
- lib/llm/core_ext/ostruct.rb
|
185
173
|
- lib/llm/error.rb
|
174
|
+
- lib/llm/event_handler.rb
|
175
|
+
- lib/llm/eventstream.rb
|
176
|
+
- lib/llm/eventstream/event.rb
|
177
|
+
- lib/llm/eventstream/parser.rb
|
186
178
|
- lib/llm/file.rb
|
187
179
|
- lib/llm/function.rb
|
180
|
+
- lib/llm/json/schema.rb
|
181
|
+
- lib/llm/json/schema/array.rb
|
182
|
+
- lib/llm/json/schema/boolean.rb
|
183
|
+
- lib/llm/json/schema/integer.rb
|
184
|
+
- lib/llm/json/schema/leaf.rb
|
185
|
+
- lib/llm/json/schema/null.rb
|
186
|
+
- lib/llm/json/schema/number.rb
|
187
|
+
- lib/llm/json/schema/object.rb
|
188
|
+
- lib/llm/json/schema/string.rb
|
189
|
+
- lib/llm/json/schema/version.rb
|
188
190
|
- lib/llm/message.rb
|
189
191
|
- lib/llm/mime.rb
|
190
192
|
- lib/llm/model.rb
|
191
193
|
- lib/llm/multipart.rb
|
194
|
+
- lib/llm/object.rb
|
195
|
+
- lib/llm/object/builder.rb
|
196
|
+
- lib/llm/object/kernel.rb
|
192
197
|
- lib/llm/provider.rb
|
193
198
|
- lib/llm/providers/anthropic.rb
|
194
199
|
- lib/llm/providers/anthropic/error_handler.rb
|
@@ -197,6 +202,10 @@ files:
|
|
197
202
|
- lib/llm/providers/anthropic/models.rb
|
198
203
|
- lib/llm/providers/anthropic/response_parser.rb
|
199
204
|
- lib/llm/providers/anthropic/response_parser/completion_parser.rb
|
205
|
+
- lib/llm/providers/anthropic/stream_parser.rb
|
206
|
+
- lib/llm/providers/deepseek.rb
|
207
|
+
- lib/llm/providers/deepseek/format.rb
|
208
|
+
- lib/llm/providers/deepseek/format/completion_format.rb
|
200
209
|
- lib/llm/providers/gemini.rb
|
201
210
|
- lib/llm/providers/gemini/audio.rb
|
202
211
|
- lib/llm/providers/gemini/error_handler.rb
|
@@ -207,6 +216,7 @@ files:
|
|
207
216
|
- lib/llm/providers/gemini/models.rb
|
208
217
|
- lib/llm/providers/gemini/response_parser.rb
|
209
218
|
- lib/llm/providers/gemini/response_parser/completion_parser.rb
|
219
|
+
- lib/llm/providers/gemini/stream_parser.rb
|
210
220
|
- lib/llm/providers/llamacpp.rb
|
211
221
|
- lib/llm/providers/ollama.rb
|
212
222
|
- lib/llm/providers/ollama/error_handler.rb
|
@@ -215,19 +225,23 @@ files:
|
|
215
225
|
- lib/llm/providers/ollama/models.rb
|
216
226
|
- lib/llm/providers/ollama/response_parser.rb
|
217
227
|
- lib/llm/providers/ollama/response_parser/completion_parser.rb
|
228
|
+
- lib/llm/providers/ollama/stream_parser.rb
|
218
229
|
- lib/llm/providers/openai.rb
|
219
230
|
- lib/llm/providers/openai/audio.rb
|
220
231
|
- lib/llm/providers/openai/error_handler.rb
|
221
232
|
- lib/llm/providers/openai/files.rb
|
222
233
|
- lib/llm/providers/openai/format.rb
|
223
234
|
- lib/llm/providers/openai/format/completion_format.rb
|
235
|
+
- lib/llm/providers/openai/format/moderation_format.rb
|
224
236
|
- lib/llm/providers/openai/format/respond_format.rb
|
225
237
|
- lib/llm/providers/openai/images.rb
|
226
238
|
- lib/llm/providers/openai/models.rb
|
239
|
+
- lib/llm/providers/openai/moderations.rb
|
227
240
|
- lib/llm/providers/openai/response_parser.rb
|
228
241
|
- lib/llm/providers/openai/response_parser/completion_parser.rb
|
229
242
|
- lib/llm/providers/openai/response_parser/respond_parser.rb
|
230
243
|
- lib/llm/providers/openai/responses.rb
|
244
|
+
- lib/llm/providers/openai/stream_parser.rb
|
231
245
|
- lib/llm/providers/voyageai.rb
|
232
246
|
- lib/llm/providers/voyageai/error_handler.rb
|
233
247
|
- lib/llm/providers/voyageai/response_parser.rb
|
@@ -242,6 +256,8 @@ files:
|
|
242
256
|
- lib/llm/response/filelist.rb
|
243
257
|
- lib/llm/response/image.rb
|
244
258
|
- lib/llm/response/modellist.rb
|
259
|
+
- lib/llm/response/moderationlist.rb
|
260
|
+
- lib/llm/response/moderationlist/moderation.rb
|
245
261
|
- lib/llm/response/respond.rb
|
246
262
|
- lib/llm/utils.rb
|
247
263
|
- lib/llm/version.rb
|
@@ -252,7 +268,6 @@ licenses:
|
|
252
268
|
metadata:
|
253
269
|
homepage_uri: https://github.com/llmrb/llm
|
254
270
|
source_code_uri: https://github.com/llmrb/llm
|
255
|
-
post_install_message:
|
256
271
|
rdoc_options: []
|
257
272
|
require_paths:
|
258
273
|
- lib
|
@@ -267,11 +282,10 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
267
282
|
- !ruby/object:Gem::Version
|
268
283
|
version: '0'
|
269
284
|
requirements: []
|
270
|
-
rubygems_version: 3.
|
271
|
-
signing_key:
|
285
|
+
rubygems_version: 3.6.8
|
272
286
|
specification_version: 4
|
273
287
|
summary: llm.rb is a zero-dependency Ruby toolkit for Large Language Models that includes
|
274
|
-
OpenAI, Gemini, Anthropic, Ollama, and LlamaCpp. It’s fast, simple and
|
275
|
-
– with full support for chat, tool calling, audio, images,
|
276
|
-
generation.
|
288
|
+
OpenAI, Gemini, Anthropic, DeepSeek, Ollama, and LlamaCpp. It’s fast, simple and
|
289
|
+
composable – with full support for chat, streaming, tool calling, audio, images,
|
290
|
+
files, and JSON Schema generation.
|
277
291
|
test_files: []
|
data/lib/llm/chat/conversable.rb
DELETED
@@ -1,53 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
class LLM::Chat
|
4
|
-
##
|
5
|
-
# @private
|
6
|
-
module Conversable
|
7
|
-
private
|
8
|
-
|
9
|
-
##
|
10
|
-
# Queues a response to be sent to the provider.
|
11
|
-
# @param [String] prompt The prompt
|
12
|
-
# @param [Hash] params
|
13
|
-
# @return [void]
|
14
|
-
def async_response(prompt, params = {})
|
15
|
-
role = params.delete(:role)
|
16
|
-
@messages << [LLM::Message.new(role, prompt), @params.merge(params), :respond]
|
17
|
-
end
|
18
|
-
|
19
|
-
##
|
20
|
-
# Sends a response to the provider and returns the response.
|
21
|
-
# @param [String] prompt The prompt
|
22
|
-
# @param [Hash] params
|
23
|
-
# @return [LLM::Response::Respond]
|
24
|
-
def sync_response(prompt, params = {})
|
25
|
-
role = params[:role]
|
26
|
-
@response = create_response!(prompt, params)
|
27
|
-
@messages.concat [Message.new(role, prompt), @response.outputs[0]]
|
28
|
-
end
|
29
|
-
|
30
|
-
##
|
31
|
-
# Queues a completion to be sent to the provider.
|
32
|
-
# @param [String] prompt The prompt
|
33
|
-
# @param [Hash] params
|
34
|
-
# @return [void]
|
35
|
-
def async_completion(prompt, params = {})
|
36
|
-
role = params.delete(:role)
|
37
|
-
@messages.push [LLM::Message.new(role, prompt), @params.merge(params), :complete]
|
38
|
-
end
|
39
|
-
|
40
|
-
##
|
41
|
-
# Sends a completion to the provider and returns the completion.
|
42
|
-
# @param [String] prompt The prompt
|
43
|
-
# @param [Hash] params
|
44
|
-
# @return [LLM::Response::Completion]
|
45
|
-
def sync_completion(prompt, params = {})
|
46
|
-
role = params[:role]
|
47
|
-
completion = create_completion!(prompt, params)
|
48
|
-
@messages.concat [Message.new(role, prompt), completion.choices[0]]
|
49
|
-
end
|
50
|
-
|
51
|
-
include LLM
|
52
|
-
end
|
53
|
-
end
|