open_ai_bot 0.3.3 → 0.3.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 2aafb706ad49054fb0e35aab308ea6486ab89ea7e6740f081033b49a972b4dfb
4
- data.tar.gz: 2b6b9c143e1465bc2e1f984d8ec611b234644b6f99bf3a033ca160a558871625
3
+ metadata.gz: b2e5ee8b986bfaff8105b3ae2415e3e5dc0401ad836f4a7b3a3ae4f402c72f07
4
+ data.tar.gz: 781bef28b9a896f40e7c07933bc9ba180875fc242b992749cc6c5eff82c2cc85
5
5
  SHA512:
6
- metadata.gz: f486692224acc9d6897a426b37e56673779d2631bd2577e9a26ed0ff99715a1e8b14972649cfbbaab02db47233a878bd85f00b1a4cb0fb966a169d3a651a56f9
7
- data.tar.gz: 6d0c11a1f76f4edf83ec4a011ded1a88bfc73d48ff7a7567c57525d519ec568fc458f5c262787422ac5f35fad97fe91e2f4d0b5c6564a7d5058ec08b3fd3458b
6
+ metadata.gz: c1f0c896c0d569751ceb554502918ecbd6521db9d734b3725a152686c343137b4b3f28c8062da9655eaa2a80fdb85b7320bc8bfea2573d90f5e0a1c522bb49c5
7
+ data.tar.gz: 5bf24970ad9796510f9de792978529d4413a6d39bd83277c1ab82da296cbb8cc61a2e534194182f3697350e1e8f76b70a5bab1c0a3c855c51ebe204eb5354e7c
data/Gemfile CHANGED
@@ -4,4 +4,4 @@ source "https://rubygems.org"
4
4
 
5
5
  gem "down", "~> 5.4"
6
6
  gem "rubydium", ">= 0.2.5"
7
- gem "ruby-openai", "~> 5.1"
7
+ gem "ruby-openai", "~> 8.1"
data/Gemfile.lock CHANGED
@@ -35,26 +35,29 @@ GEM
35
35
  dry-inflector (~> 1.0)
36
36
  dry-logic (~> 1.4)
37
37
  zeitwerk (~> 2.6)
38
- event_stream_parser (0.3.0)
39
- faraday (2.9.1)
40
- faraday-net_http (>= 2.0, < 3.2)
41
- faraday-multipart (1.0.4)
42
- multipart-post (~> 2)
43
- faraday-net_http (3.1.0)
44
- net-http
38
+ event_stream_parser (1.0.0)
39
+ faraday (2.13.1)
40
+ faraday-net_http (>= 2.0, < 3.5)
41
+ json
42
+ logger
43
+ faraday-multipart (1.1.0)
44
+ multipart-post (~> 2.0)
45
+ faraday-net_http (3.4.1)
46
+ net-http (>= 0.5.0)
45
47
  fiber-annotation (0.2.0)
46
48
  fiber-local (1.1.0)
47
49
  fiber-storage
48
50
  fiber-storage (0.1.1)
49
51
  ice_nine (0.11.2)
50
52
  io-event (1.6.0)
51
- json (2.7.2)
53
+ json (2.12.2)
54
+ logger (1.7.0)
52
55
  multipart-post (2.4.1)
53
- net-http (0.4.1)
56
+ net-http (0.6.0)
54
57
  uri
55
58
  public_suffix (5.0.5)
56
- ruby-openai (5.2.0)
57
- event_stream_parser (>= 0.3.0, < 1.0.0)
59
+ ruby-openai (8.1.0)
60
+ event_stream_parser (>= 0.3.0, < 2.0.0)
58
61
  faraday (>= 1)
59
62
  faraday-multipart (>= 1)
60
63
  rubydium (0.4.1)
@@ -65,7 +68,7 @@ GEM
65
68
  faraday (~> 2.0)
66
69
  faraday-multipart (~> 1.0)
67
70
  zeitwerk (~> 2.6)
68
- uri (0.13.0)
71
+ uri (1.0.3)
69
72
  zeitwerk (2.6.15)
70
73
 
71
74
  PLATFORMS
@@ -74,7 +77,7 @@ PLATFORMS
74
77
 
75
78
  DEPENDENCIES
76
79
  down (~> 5.4)
77
- ruby-openai (~> 5.1)
80
+ ruby-openai (~> 8.1)
78
81
  rubydium (>= 0.2.5)
79
82
 
80
83
  BUNDLED WITH
@@ -145,8 +145,11 @@ module OpenAI
145
145
  def get_tokens_info!(response)
146
146
  completion_tokens = response.dig("usage", "completion_tokens")
147
147
  prompt_tokens = response.dig("usage", "prompt_tokens")
148
+ cached_prompt_tokens = response.dig("usage", "prompt_tokens_details", "cached_tokens")
148
149
 
149
- result = current_thread.model.request_cost(completion_tokens:, prompt_tokens:, current_thread:)
150
+ current_thread.model.request_cost(
151
+ completion_tokens:, prompt_tokens:, cached_prompt_tokens:, current_thread:
152
+ )
150
153
  end
151
154
 
152
155
  def send_chat_gpt_response(text, tokens_info)
@@ -9,7 +9,7 @@ module OpenAI
9
9
  end
10
10
 
11
11
  attr_reader :history
12
- attr_reader :model
12
+ attr_accessor :model
13
13
 
14
14
  alias_method :messages, :history
15
15
 
data/lib/open_ai/model.rb CHANGED
@@ -1,23 +1,59 @@
1
1
  module OpenAI
2
2
  class Model
3
- # All prices are per 1K tokens
3
+ # All prices are in USD per 1M tokens
4
4
  MODEL_INFO = {
5
+ "gpt-4.1": {
6
+ max_context: 1_047_576,
7
+ input_price: 2.00,
8
+ cached_input_price: 0.50,
9
+ output_price: 8.00,
10
+ vision: true
11
+ },
12
+ "gpt-4.1-mini": {
13
+ max_context: 1_047_576,
14
+ input_price: 0.40,
15
+ cached_input_price: 0.10,
16
+ output_price: 1.60,
17
+ vision: true
18
+ },
19
+ "gpt-4.1-nano": {
20
+ max_context: 1_047_576,
21
+ input_price: 0.10,
22
+ cached_input_price: 0.025,
23
+ output_price: 0.40,
24
+ vision: true
25
+ },
5
26
  "gpt-4o": {
6
27
  max_context: 128_000,
7
- prompt_price: 0.005,
8
- completion_price: 0.015,
28
+ input_price: 5.00,
29
+ cached_input_price: 2.50,
30
+ output_price: 20.00,
9
31
  vision: true
10
32
  },
11
- "gpt-3.5-turbo": {
12
- max_context: 16385,
13
- prompt_price: 0.0005,
14
- completion_price: 0.0015
33
+ "gpt-4o-mini": {
34
+ max_context: 128_000,
35
+ input_price: 0.60,
36
+ cached_input_price: 0.30,
37
+ output_price: 2.40,
38
+ vision: true
39
+ },
40
+ "o3": {
41
+ max_context: 200_000,
42
+ input_price: 2.00,
43
+ cached_input_price: 0.50,
44
+ output_price: 8.00,
45
+ vision: true
46
+ },
47
+ "o4-mini": {
48
+ max_context: 200_000,
49
+ input_price: 1.10,
50
+ cached_input_price: 0.275,
51
+ output_price: 4.40,
52
+ vision: true
15
53
  }
16
54
  }
17
55
 
18
- attr_accessor :max_context, :prompt_price, :completion_price
19
-
20
- [:max_context, :prompt_price, :completion_price].each do |attr|
56
+ [:max_context, :input_price, :cached_input_price, :output_price].each do |attr|
21
57
  define_method(attr) do
22
58
  MODEL_INFO[@model][attr]
23
59
  end
@@ -29,9 +65,14 @@ module OpenAI
29
65
  end
30
66
 
31
67
  @model = model
68
+ puts "Using #{@model}"
32
69
  end
33
70
 
34
71
  def to_s
72
+ @model.to_s
73
+ end
74
+
75
+ def to_sym
35
76
  @model
36
77
  end
37
78
 
@@ -39,15 +80,17 @@ module OpenAI
39
80
  MODEL_INFO[@model][:vision]
40
81
  end
41
82
 
42
- def request_cost(prompt_tokens:, completion_tokens:, current_thread:)
43
- prompt_cost = prompt_tokens * prompt_price / 1000
44
- completion_cost = completion_tokens * completion_price / 1000
83
+ def request_cost(prompt_tokens:, cached_prompt_tokens:, completion_tokens:, current_thread:)
84
+ prompt_cost = prompt_tokens * input_price / 1_000_000
85
+ cached_prompt_cost = cached_prompt_tokens * cached_input_price / 1_000_000
86
+ completion_cost = completion_tokens * output_price / 1_000_000
45
87
 
46
- total = prompt_cost + completion_cost
88
+ total = prompt_cost + cached_prompt_cost + completion_cost
47
89
  thread_total = current_thread.total_cost
48
90
 
49
91
  info = "\n\n" + {
50
- prompt: "#{prompt_tokens} tokens (#{prompt_cost.round(5)}$)",
92
+ cached_prompt: "#{cached_prompt_tokens} tokens (#{cached_prompt_cost.round(5)}$)",
93
+ uncached_prompt: "#{prompt_tokens} tokens (#{prompt_cost.round(5)}$)",
51
94
  completion: "#{completion_tokens} tokens (#{completion_cost.round(5)}$)",
52
95
  total: "#{total.round(5)}$",
53
96
  total_for_this_conversation: "#{(thread_total + total).round(5)}$",
@@ -59,4 +102,4 @@ module OpenAI
59
102
  { info:, total: }
60
103
  end
61
104
  end
62
- end
105
+ end
data/lib/open_ai_bot.rb CHANGED
@@ -19,6 +19,7 @@ class OpenAIBot < Rubydium::Bot
19
19
  include OpenAI::Whisper
20
20
 
21
21
  on_every_message :handle_gpt_command
22
+ on_every_message :handle_model_query
22
23
  on_every_message :transcribe
23
24
 
24
25
  on_command "/restart", :init_session, description: "Resets ChatGPT session"
@@ -27,6 +28,47 @@ class OpenAIBot < Rubydium::Bot
27
28
  on_command "/help", description: "Sends useful help info" do
28
29
  reply(self.class.help_message)
29
30
  end
31
+ on_command "/d" do
32
+ return unless @user.username == config.owner_username
33
+ return unless @target&.id.in? [config.bot_id, @user.id]
34
+
35
+ current_thread.delete(@replies_to.message_id)
36
+ safe_delete(@replies_to)
37
+ safe_delete(@msg)
38
+ end
39
+
40
+ on_command "/model" do
41
+ options = []
42
+ OpenAI::Model::MODEL_INFO.each do |model, info|
43
+ options << [
44
+ Telegram::Bot::Types::InlineKeyboardButton.new(
45
+ text: "#{model} - #{sprintf('%.2f', info[:output_price])}$",
46
+ callback_data: "/set #{model}"
47
+ )
48
+ ]
49
+ end
50
+ markup = Telegram::Bot::Types::InlineKeyboardMarkup.new(inline_keyboard: options)
51
+ reply("Select a model:", reply_markup: markup)
52
+ end
53
+
54
+ def handle_model_query
55
+ return unless @update.is_a? Telegram::Bot::Types::CallbackQuery
56
+ return unless @update.data.start_with? "/set "
57
+ return unless @user.username == config.owner_username
58
+
59
+ model = @update.data.delete_prefix("/set ").to_sym
60
+ return if OpenAI::Model::MODEL_INFO[model].nil?
61
+
62
+ text =
63
+ if current_thread.model.to_sym == model
64
+ "Already set to `#{model}`"
65
+ else
66
+ current_thread.model = OpenAI::Model.new(model)
67
+ "Was `#{current_thread.model.to_s}`, now `#{model}`"
68
+ end
69
+
70
+ reply(text, parse_mode: "Markdown")
71
+ end
30
72
 
31
73
  def allowed_chat?
32
74
  return true if @user.username == config.owner_username
data/main.rb CHANGED
@@ -24,7 +24,7 @@ bot = bots[bot_name]
24
24
  bot.config = YAML.load_file("#{__dir__}/config.yaml")
25
25
  bot.configure do |config|
26
26
  config.open_ai_client = OpenAI::Client.new(
27
- access_token: config.open_ai_token
27
+ access_token: config.open_ai['token']
28
28
  # organization_id: config.open_ai_organization_id
29
29
  )
30
30
  end
data/open_ai_bot.gemspec CHANGED
@@ -8,7 +8,7 @@ require_relative "lib/ext/in"
8
8
 
9
9
  Gem::Specification.new do |spec|
10
10
  spec.name = "open_ai_bot"
11
- spec.version = "0.3.3"
11
+ spec.version = "0.3.5"
12
12
  spec.authors = ["bulgakke"]
13
13
  spec.email = ["vvp835@yandex.ru"]
14
14
 
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: open_ai_bot
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.3.3
4
+ version: 0.3.5
5
5
  platform: ruby
6
6
  authors:
7
7
  - bulgakke
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2024-06-08 00:00:00.000000000 Z
11
+ date: 2025-06-16 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: down