llm.rb 0.16.2 → 0.16.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 01f2984f551757482fbf590485875bef064060d27eb84d3d4325e307db1029c5
4
- data.tar.gz: a492d121a5dc1916412296269cf0055ce14512855fecb94e2b018dcefb9b404d
3
+ metadata.gz: 696893f9ef5355ed4433b265dc7771e76aa9c96c78036a70e698615a0c4d7bdd
4
+ data.tar.gz: 02f48b464173823d0696ea8d2d21ab5a147cdf946f1c3287dedddc1760a93c38
5
5
  SHA512:
6
- metadata.gz: 341d074d9d732056599d6f27865f8fcf3fecd0f6062de73ec5dffcd056031e727c43f8058c472d69b598ae3fa4232bb47b960b331b20b8f7cc9323a86e9beeb7
7
- data.tar.gz: aa9c19a4b51b8c379f6d8a8f486172e332d6ca1fb4a3f65fa2b32ff7685a3f48fd6d5db0092bcc3a97fca7f36b4f4214df1dfd675ad1215a172e5ae9ce76cc00
6
+ metadata.gz: 15c5549e9165a854814c853c381b36d3de6409a9260acb7a4bd2adddffd08520e3ea8f3dea999b111c6bd7706d9f241eddbcd2d103122ea64482a12ca8fa6879
7
+ data.tar.gz: 971c404ada0cf5ac1af10ffbd4aabd8654fa11fe69beceb6ef3e3fd8ae68ed3f9426ebc4af829beff2610aded2a77224ebe2b4f3e4c3ec4e7dfd5fe21e0fb90b
data/lib/llm/bot.rb CHANGED
@@ -135,5 +135,16 @@ module LLM
135
135
  messages.drain
136
136
  end
137
137
  alias_method :flush, :drain
138
+
139
+ ##
140
+ # Returns token usage for the conversation
141
+ # @note
142
+ # This method returns token usage for the latest
143
+ # assistant message, and it returns an empty object
144
+ # if there are no assistant messages
145
+ # @return [LLM::Object]
146
+ def usage
147
+ messages.find(&:assistant?)&.usage || LLM::Object.from_hash({})
148
+ end
138
149
  end
139
150
  end
@@ -5,8 +5,8 @@ module LLM::Anthropic::Response
5
5
  def choices = format_choices
6
6
  def role = body.role
7
7
  def model = body.model
8
- def prompt_tokens = body.usage&.input_tokens || 0
9
- def completion_tokens = body.usage&.output_tokens || 0
8
+ def prompt_tokens = body.usage["input_tokens"] || 0
9
+ def completion_tokens = body.usage["output_tokens"] || 0
10
10
  def total_tokens = prompt_tokens + completion_tokens
11
11
 
12
12
  private
@@ -6,7 +6,7 @@ module LLM::Gemini::Response
6
6
  # @return [Array<StringIO>]
7
7
  def images
8
8
  candidates.flat_map do |candidate|
9
- parts = candidate["content"]["parts"]
9
+ parts = candidate&.dig(:content, :parts) || []
10
10
  parts.filter_map do
11
11
  data = _1.dig(:inlineData, :data)
12
12
  next unless data
@@ -18,9 +18,9 @@ module LLM::OpenAI::Response
18
18
  alias_method :messages, :choices
19
19
 
20
20
  def model = body.model
21
- def prompt_tokens = body.usage&.prompt_tokens
22
- def completion_tokens = body.usage&.completion_tokens
23
- def total_tokens = body.usage&.total_tokens
21
+ def prompt_tokens = body.usage["prompt_tokens"]
22
+ def completion_tokens = body.usage["completion_tokens"]
23
+ def total_tokens = body.usage["total_tokens"]
24
24
 
25
25
  private
26
26
 
@@ -68,6 +68,7 @@ module LLM
68
68
  params = [params, format_schema(params), format_tools(params)].inject({}, &:merge!).compact
69
69
  role, stream = params.delete(:role), params.delete(:stream)
70
70
  params[:stream] = true if stream.respond_to?(:<<) || stream == true
71
+ params[:stream_options] = {include_usage: true}.merge!(params[:stream_options] || {}) if params[:stream]
71
72
  req = Net::HTTP::Post.new("/v1/chat/completions", headers)
72
73
  messages = [*(params.delete(:messages) || []), Message.new(role, prompt)]
73
74
  body = JSON.dump({messages: format(messages, :complete).flatten}.merge!(params))
data/lib/llm/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LLM
4
- VERSION = "0.16.2"
4
+ VERSION = "0.16.3"
5
5
  end
data/lib/llm.rb CHANGED
@@ -20,13 +20,15 @@ module LLM
20
20
  require_relative "llm/eventhandler"
21
21
  require_relative "llm/tool"
22
22
 
23
+ @mutex = Mutex.new
24
+
23
25
  module_function
24
26
 
25
27
  ##
26
28
  # @param (see LLM::Provider#initialize)
27
29
  # @return (see LLM::Anthropic#initialize)
28
30
  def anthropic(**)
29
- require_relative "llm/providers/anthropic" unless defined?(LLM::Anthropic)
31
+ @mutex.synchronize { require_relative "llm/providers/anthropic" unless defined?(LLM::Anthropic) }
30
32
  LLM::Anthropic.new(**)
31
33
  end
32
34
 
@@ -34,7 +36,7 @@ module LLM
34
36
  # @param (see LLM::Provider#initialize)
35
37
  # @return (see LLM::Gemini#initialize)
36
38
  def gemini(**)
37
- require_relative "llm/providers/gemini" unless defined?(LLM::Gemini)
39
+ @mutex.synchronize { require_relative "llm/providers/gemini" unless defined?(LLM::Gemini) }
38
40
  LLM::Gemini.new(**)
39
41
  end
40
42
 
@@ -42,7 +44,7 @@ module LLM
42
44
  # @param key (see LLM::Provider#initialize)
43
45
  # @return (see LLM::Ollama#initialize)
44
46
  def ollama(key: nil, **)
45
- require_relative "llm/providers/ollama" unless defined?(LLM::Ollama)
47
+ @mutex.synchronize { require_relative "llm/providers/ollama" unless defined?(LLM::Ollama) }
46
48
  LLM::Ollama.new(key:, **)
47
49
  end
48
50
 
@@ -50,7 +52,7 @@ module LLM
50
52
  # @param key (see LLM::Provider#initialize)
51
53
  # @return (see LLM::LlamaCpp#initialize)
52
54
  def llamacpp(key: nil, **)
53
- require_relative "llm/providers/llamacpp" unless defined?(LLM::LlamaCpp)
55
+ @mutex.synchronize { require_relative "llm/providers/llamacpp" unless defined?(LLM::LlamaCpp) }
54
56
  LLM::LlamaCpp.new(key:, **)
55
57
  end
56
58
 
@@ -58,7 +60,7 @@ module LLM
58
60
  # @param key (see LLM::Provider#initialize)
59
61
  # @return (see LLM::DeepSeek#initialize)
60
62
  def deepseek(**)
61
- require_relative "llm/providers/deepseek" unless defined?(LLM::DeepSeek)
63
+ @mutex.synchronize { require_relative "llm/providers/deepseek" unless defined?(LLM::DeepSeek) }
62
64
  LLM::DeepSeek.new(**)
63
65
  end
64
66
 
@@ -66,7 +68,7 @@ module LLM
66
68
  # @param key (see LLM::Provider#initialize)
67
69
  # @return (see LLM::OpenAI#initialize)
68
70
  def openai(**)
69
- require_relative "llm/providers/openai" unless defined?(LLM::OpenAI)
71
+ @mutex.synchronize { require_relative "llm/providers/openai" unless defined?(LLM::OpenAI) }
70
72
  LLM::OpenAI.new(**)
71
73
  end
72
74
 
@@ -75,7 +77,7 @@ module LLM
75
77
  # @param host (see LLM::XAI#initialize)
76
78
  # @return (see LLM::XAI#initialize)
77
79
  def xai(**)
78
- require_relative "llm/providers/xai" unless defined?(LLM::XAI)
80
+ @mutex.synchronize { require_relative "llm/providers/xai" unless defined?(LLM::XAI) }
79
81
  LLM::XAI.new(**)
80
82
  end
81
83
 
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llm.rb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.16.2
4
+ version: 0.16.3
5
5
  platform: ruby
6
6
  authors:
7
7
  - Antar Azri