llm.rb 0.16.1 → 0.16.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/llm/bot.rb +11 -0
- data/lib/llm/buffer.rb +10 -2
- data/lib/llm/error.rb +4 -0
- data/lib/llm/providers/anthropic/response/completion.rb +2 -2
- data/lib/llm/providers/gemini/images.rb +11 -4
- data/lib/llm/providers/gemini/response/image.rb +6 -1
- data/lib/llm/providers/openai/response/completion.rb +3 -3
- data/lib/llm/providers/openai.rb +1 -0
- data/lib/llm/version.rb +1 -1
- data/lib/llm.rb +9 -7
- metadata +1 -1
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 696893f9ef5355ed4433b265dc7771e76aa9c96c78036a70e698615a0c4d7bdd
|
4
|
+
data.tar.gz: 02f48b464173823d0696ea8d2d21ab5a147cdf946f1c3287dedddc1760a93c38
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 15c5549e9165a854814c853c381b36d3de6409a9260acb7a4bd2adddffd08520e3ea8f3dea999b111c6bd7706d9f241eddbcd2d103122ea64482a12ca8fa6879
|
7
|
+
data.tar.gz: 971c404ada0cf5ac1af10ffbd4aabd8654fa11fe69beceb6ef3e3fd8ae68ed3f9426ebc4af829beff2610aded2a77224ebe2b4f3e4c3ec4e7dfd5fe21e0fb90b
|
data/lib/llm/bot.rb
CHANGED
@@ -135,5 +135,16 @@ module LLM
|
|
135
135
|
messages.drain
|
136
136
|
end
|
137
137
|
alias_method :flush, :drain
|
138
|
+
|
139
|
+
##
|
140
|
+
# Returns token usage for the conversation
|
141
|
+
# @note
|
142
|
+
# This method returns token usage for the latest
|
143
|
+
# assistant message, and it returns an empty object
|
144
|
+
# if there are no assistant messages
|
145
|
+
# @return [LLM::Object]
|
146
|
+
def usage
|
147
|
+
messages.find(&:assistant?)&.usage || LLM::Object.from_hash({})
|
148
|
+
end
|
138
149
|
end
|
139
150
|
end
|
data/lib/llm/buffer.rb
CHANGED
@@ -65,12 +65,20 @@ module LLM
|
|
65
65
|
alias_method :push, :<<
|
66
66
|
|
67
67
|
##
|
68
|
-
# @param [Integer, #to_i] index
|
68
|
+
# @param [Integer, Range, #to_i] index
|
69
69
|
# The message index
|
70
70
|
# @return [LLM::Message, nil]
|
71
71
|
# Returns a message, or nil
|
72
72
|
def [](index)
|
73
|
-
|
73
|
+
if index.respond_to?(:to_i)
|
74
|
+
@completed[index.to_i] || to_a[index.to_i]
|
75
|
+
elsif Range === index
|
76
|
+
slice = @completed[index]
|
77
|
+
invalidate = slice.nil? || slice.size < index.size
|
78
|
+
invalidate ? to_a[index] : slice
|
79
|
+
else
|
80
|
+
raise TypeError, "index must be an Integer or Range"
|
81
|
+
end
|
74
82
|
end
|
75
83
|
|
76
84
|
##
|
data/lib/llm/error.rb
CHANGED
@@ -35,6 +35,10 @@ module LLM
|
|
35
35
|
# HTTPServerError
|
36
36
|
ServerError = Class.new(ResponseError)
|
37
37
|
|
38
|
+
##
|
39
|
+
# When no images are found in a response
|
40
|
+
NoImageError = Class.new(ResponseError)
|
41
|
+
|
38
42
|
##
|
39
43
|
# When an given an input object that is not understood
|
40
44
|
FormatError = Class.new(Error)
|
@@ -5,8 +5,8 @@ module LLM::Anthropic::Response
|
|
5
5
|
def choices = format_choices
|
6
6
|
def role = body.role
|
7
7
|
def model = body.model
|
8
|
-
def prompt_tokens = body.usage
|
9
|
-
def completion_tokens = body.usage
|
8
|
+
def prompt_tokens = body.usage["input_tokens"] || 0
|
9
|
+
def completion_tokens = body.usage["output_tokens"] || 0
|
10
10
|
def total_tokens = prompt_tokens + completion_tokens
|
11
11
|
|
12
12
|
private
|
@@ -36,12 +36,13 @@ class LLM::Gemini
|
|
36
36
|
# @param [String] prompt The prompt
|
37
37
|
# @param [Hash] params Other parameters (see Gemini docs)
|
38
38
|
# @raise (see LLM::Provider#request)
|
39
|
+
# @raise [LLM::NoImageError] when no images are returned
|
39
40
|
# @note
|
40
41
|
# The prompt should make it clear you want to generate an image, or you
|
41
42
|
# might unexpectedly receive a purely textual response. This is due to how
|
42
43
|
# Gemini implements image generation under the hood.
|
43
44
|
# @return [LLM::Response]
|
44
|
-
def create(prompt:, model: "gemini-2.
|
45
|
+
def create(prompt:, model: "gemini-2.5-flash-image-preview", **params)
|
45
46
|
req = Net::HTTP::Post.new("/v1beta/models/#{model}:generateContent?key=#{key}", headers)
|
46
47
|
body = JSON.dump({
|
47
48
|
contents: [{parts: [{text: create_prompt}, {text: prompt}]}],
|
@@ -49,7 +50,7 @@ class LLM::Gemini
|
|
49
50
|
}.merge!(params))
|
50
51
|
req.body = body
|
51
52
|
res = execute(request: req)
|
52
|
-
LLM::Response.new(res).extend(LLM::Gemini::Response::Image)
|
53
|
+
validate LLM::Response.new(res).extend(LLM::Gemini::Response::Image)
|
53
54
|
end
|
54
55
|
|
55
56
|
##
|
@@ -63,9 +64,10 @@ class LLM::Gemini
|
|
63
64
|
# @param [String] prompt The prompt
|
64
65
|
# @param [Hash] params Other parameters (see Gemini docs)
|
65
66
|
# @raise (see LLM::Provider#request)
|
67
|
+
# @raise [LLM::NoImageError] when no images are returned
|
66
68
|
# @note (see LLM::Gemini::Images#create)
|
67
69
|
# @return [LLM::Response]
|
68
|
-
def edit(image:, prompt:, model: "gemini-2.
|
70
|
+
def edit(image:, prompt:, model: "gemini-2.5-flash-image-preview", **params)
|
69
71
|
req = Net::HTTP::Post.new("/v1beta/models/#{model}:generateContent?key=#{key}", headers)
|
70
72
|
image = LLM.File(image)
|
71
73
|
body = JSON.dump({
|
@@ -74,7 +76,7 @@ class LLM::Gemini
|
|
74
76
|
}.merge!(params)).b
|
75
77
|
set_body_stream(req, StringIO.new(body))
|
76
78
|
res = execute(request: req)
|
77
|
-
LLM::Response.new(res).extend(LLM::Gemini::Response::Image)
|
79
|
+
validate LLM::Response.new(res).extend(LLM::Gemini::Response::Image)
|
78
80
|
end
|
79
81
|
|
80
82
|
##
|
@@ -119,6 +121,11 @@ class LLM::Gemini
|
|
119
121
|
PROMPT
|
120
122
|
end
|
121
123
|
|
124
|
+
def validate(res)
|
125
|
+
return res unless res.images.empty?
|
126
|
+
raise LLM::NoImageError.new { _1.response = res.res }, "no images found in response"
|
127
|
+
end
|
128
|
+
|
122
129
|
[:headers, :execute, :set_body_stream].each do |m|
|
123
130
|
define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
|
124
131
|
end
|
@@ -6,7 +6,7 @@ module LLM::Gemini::Response
|
|
6
6
|
# @return [Array<StringIO>]
|
7
7
|
def images
|
8
8
|
candidates.flat_map do |candidate|
|
9
|
-
parts = candidate
|
9
|
+
parts = candidate&.dig(:content, :parts) || []
|
10
10
|
parts.filter_map do
|
11
11
|
data = _1.dig(:inlineData, :data)
|
12
12
|
next unless data
|
@@ -22,5 +22,10 @@ module LLM::Gemini::Response
|
|
22
22
|
# will always return an empty array.
|
23
23
|
# @return [Array<String>]
|
24
24
|
def urls = []
|
25
|
+
|
26
|
+
##
|
27
|
+
# Returns one or more candidates, or an empty array
|
28
|
+
# @return [Array<Hash>]
|
29
|
+
def candidates = body.candidates || []
|
25
30
|
end
|
26
31
|
end
|
@@ -18,9 +18,9 @@ module LLM::OpenAI::Response
|
|
18
18
|
alias_method :messages, :choices
|
19
19
|
|
20
20
|
def model = body.model
|
21
|
-
def prompt_tokens = body.usage
|
22
|
-
def completion_tokens = body.usage
|
23
|
-
def total_tokens = body.usage
|
21
|
+
def prompt_tokens = body.usage["prompt_tokens"]
|
22
|
+
def completion_tokens = body.usage["completion_tokens"]
|
23
|
+
def total_tokens = body.usage["total_tokens"]
|
24
24
|
|
25
25
|
private
|
26
26
|
|
data/lib/llm/providers/openai.rb
CHANGED
@@ -68,6 +68,7 @@ module LLM
|
|
68
68
|
params = [params, format_schema(params), format_tools(params)].inject({}, &:merge!).compact
|
69
69
|
role, stream = params.delete(:role), params.delete(:stream)
|
70
70
|
params[:stream] = true if stream.respond_to?(:<<) || stream == true
|
71
|
+
params[:stream_options] = {include_usage: true}.merge!(params[:stream_options] || {}) if params[:stream]
|
71
72
|
req = Net::HTTP::Post.new("/v1/chat/completions", headers)
|
72
73
|
messages = [*(params.delete(:messages) || []), Message.new(role, prompt)]
|
73
74
|
body = JSON.dump({messages: format(messages, :complete).flatten}.merge!(params))
|
data/lib/llm/version.rb
CHANGED
data/lib/llm.rb
CHANGED
@@ -20,13 +20,15 @@ module LLM
|
|
20
20
|
require_relative "llm/eventhandler"
|
21
21
|
require_relative "llm/tool"
|
22
22
|
|
23
|
+
@mutex = Mutex.new
|
24
|
+
|
23
25
|
module_function
|
24
26
|
|
25
27
|
##
|
26
28
|
# @param (see LLM::Provider#initialize)
|
27
29
|
# @return (see LLM::Anthropic#initialize)
|
28
30
|
def anthropic(**)
|
29
|
-
require_relative "llm/providers/anthropic" unless defined?(LLM::Anthropic)
|
31
|
+
@mutex.synchronize { require_relative "llm/providers/anthropic" unless defined?(LLM::Anthropic) }
|
30
32
|
LLM::Anthropic.new(**)
|
31
33
|
end
|
32
34
|
|
@@ -34,7 +36,7 @@ module LLM
|
|
34
36
|
# @param (see LLM::Provider#initialize)
|
35
37
|
# @return (see LLM::Gemini#initialize)
|
36
38
|
def gemini(**)
|
37
|
-
require_relative "llm/providers/gemini" unless defined?(LLM::Gemini)
|
39
|
+
@mutex.synchronize { require_relative "llm/providers/gemini" unless defined?(LLM::Gemini) }
|
38
40
|
LLM::Gemini.new(**)
|
39
41
|
end
|
40
42
|
|
@@ -42,7 +44,7 @@ module LLM
|
|
42
44
|
# @param key (see LLM::Provider#initialize)
|
43
45
|
# @return (see LLM::Ollama#initialize)
|
44
46
|
def ollama(key: nil, **)
|
45
|
-
require_relative "llm/providers/ollama" unless defined?(LLM::Ollama)
|
47
|
+
@mutex.synchronize { require_relative "llm/providers/ollama" unless defined?(LLM::Ollama) }
|
46
48
|
LLM::Ollama.new(key:, **)
|
47
49
|
end
|
48
50
|
|
@@ -50,7 +52,7 @@ module LLM
|
|
50
52
|
# @param key (see LLM::Provider#initialize)
|
51
53
|
# @return (see LLM::LlamaCpp#initialize)
|
52
54
|
def llamacpp(key: nil, **)
|
53
|
-
require_relative "llm/providers/llamacpp" unless defined?(LLM::LlamaCpp)
|
55
|
+
@mutex.synchronize { require_relative "llm/providers/llamacpp" unless defined?(LLM::LlamaCpp) }
|
54
56
|
LLM::LlamaCpp.new(key:, **)
|
55
57
|
end
|
56
58
|
|
@@ -58,7 +60,7 @@ module LLM
|
|
58
60
|
# @param key (see LLM::Provider#initialize)
|
59
61
|
# @return (see LLM::DeepSeek#initialize)
|
60
62
|
def deepseek(**)
|
61
|
-
require_relative "llm/providers/deepseek" unless defined?(LLM::DeepSeek)
|
63
|
+
@mutex.synchronize { require_relative "llm/providers/deepseek" unless defined?(LLM::DeepSeek) }
|
62
64
|
LLM::DeepSeek.new(**)
|
63
65
|
end
|
64
66
|
|
@@ -66,7 +68,7 @@ module LLM
|
|
66
68
|
# @param key (see LLM::Provider#initialize)
|
67
69
|
# @return (see LLM::OpenAI#initialize)
|
68
70
|
def openai(**)
|
69
|
-
require_relative "llm/providers/openai" unless defined?(LLM::OpenAI)
|
71
|
+
@mutex.synchronize { require_relative "llm/providers/openai" unless defined?(LLM::OpenAI) }
|
70
72
|
LLM::OpenAI.new(**)
|
71
73
|
end
|
72
74
|
|
@@ -75,7 +77,7 @@ module LLM
|
|
75
77
|
# @param host (see LLM::XAI#initialize)
|
76
78
|
# @return (see LLM::XAI#initialize)
|
77
79
|
def xai(**)
|
78
|
-
require_relative "llm/providers/xai" unless defined?(LLM::XAI)
|
80
|
+
@mutex.synchronize { require_relative "llm/providers/xai" unless defined?(LLM::XAI) }
|
79
81
|
LLM::XAI.new(**)
|
80
82
|
end
|
81
83
|
|