translation_api 1.1.1 → 1.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 18aab159f877af05a07c70b365d022ac0531868db5a00dbcbd7e861d299137a9
4
- data.tar.gz: d8fe14a44a14753e1b3c8ea22239dce5c845d1b97914c16305501caaf1ba1af0
3
+ metadata.gz: fef1087b2a2ddc29d3a942318b98fee14c24d941250665e69e7a875eba931b56
4
+ data.tar.gz: 4104acc87346c85fc82f0dd6da55475ad308535f63b8ec0f09db6afa73e341bb
5
5
  SHA512:
6
- metadata.gz: 2d8f18647569f3b6d7f11be1f554efe08a23e834d7a626e2420c15119f29bdb526f0b73d6f888525b2f03c3e1f4f757c1ae099014807f2dd7ed11f63de28fdb2
7
- data.tar.gz: ba509a11536d06ea663b43dee3c81615d1a4a8bf8ebcf496aa7f709b6523aaa710b22d4baef92f63347ffe6d4a04e43e9461a2f0933ae7a0bd7c064cedae6f38
6
+ metadata.gz: 8bf6cac81b1c2e6153194bcd7ec173f3333a19972c07f66a425121c5c2276fa982c10f0ce6e3d81ff7c31457b63e657a0e6046f2e566a08705e38108bc739278
7
+ data.tar.gz: '06609efa3d55a13d4cd8eb54a6ea6580cfa94382bb6cc81c272fad94ee78ce029fb18bb7d9317324b575e7b611d8ea2b1d4f3c1845bcf74d027e2c95101fcb42'
data/CHANGELOG.md CHANGED
@@ -48,3 +48,11 @@
48
48
  ## [1.1.1] - 2025-11-23
49
49
 
50
50
  - configの設定順序を逆に変更
51
+
52
+ ## [1.2.0] - 2025-11-23
53
+
54
+ - Gemini対応
55
+
56
+ ## [1.3.0] - 2026-04-11
57
+
58
+ - OpenAIのモデルを更新
data/README.md CHANGED
@@ -6,22 +6,28 @@ Requires api key.
6
6
  ## For OpenAI
7
7
 
8
8
  1. `touch .env`
9
- 2. Add `OPENAI_API_KEY=YOUR_API_KEY`
10
- 3. Optional: `ENV["OPENAI_MODEL"]`
9
+ 2. Add `OPENAI_API_KEY=YOUR_API_KEY` or `GEMINI_API_KEY=YOUR_API_KEY` or `DEEPL_API_KEY=YOUR_API_KEY` to `.env`
10
+ 3. Optional: `ENV["OPENAI_MODEL"]` or `ENV["GEMINI_MODEL"]`
11
11
  4. `TranslationAPI.translate("text")`
12
12
 
13
13
  ### Configuration Options
14
14
 
15
- * output_logs (default: true)
16
15
  * language (default: "japanese")
17
16
  * provider (default: :openai)
17
+ * output_logs (default: true)
18
18
  * except_words (default: [])
19
+ * custom_prompt (default: nil)
20
+ * Only for OpenAI and Gemini
21
+ * deepl_pro (default: false)
22
+ * Only for DeepL
19
23
 
20
- ### Output
24
+ ### Output(Only for OpenAI and Gemini)
21
25
 
22
26
  * Translated_text
23
27
  * Used Tokens
24
- * Cost Spent(https://openai.com/api/pricing/)
28
+ * Cost Spent
29
+ * https://openai.com/api/pricing/
30
+ * https://ai.google.dev/gemini-api/docs/pricing/
25
31
 
26
32
  ## Example
27
33
 
@@ -29,10 +35,11 @@ Exec `ruby example.rb "text"`
29
35
 
30
36
  ```ruby
31
37
  TranslationAPI.configure do |config|
32
- config.language = "english"
33
- config.provider = :deepl
34
- config.output_logs = false
35
- config.except_words = %w[hoge fuga]
38
+ config.language = "english"
39
+ config.provider = :gemini
40
+ config.output_logs = false
41
+ config.except_words = %w[hoge fuga]
42
+ config.custom_prompt = "Please Samurai style."
36
43
  end
37
44
 
38
45
  TranslationAPI.translate("text")
data/Rakefile CHANGED
@@ -1,39 +1,3 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "bundler/gem_tasks"
4
- require "rspec/core/rake_task"
5
- require "rubocop/rake_task"
6
- require "yard"
7
- require_relative "rake_helper"
8
-
9
- desc "analysis"
10
- task :analysis do
11
- sh "bundle install"
12
-
13
- RakeHelper.init_rake_tasks
14
-
15
- puts "--- rspec ---"
16
- Rake::Task[:spec].invoke
17
-
18
- puts "--- rubocop ---"
19
- Rake::Task[:rubocop].invoke
20
-
21
- puts "--- yard ---"
22
- Rake::Task[:yard].invoke
23
- end
24
-
25
- desc "push to github packages and rubygems"
26
- task :push do
27
- sh "bundle install"
28
-
29
- puts "--- build ---"
30
- RakeHelper.build_gem
31
-
32
- puts "--- push to github packages ---"
33
- RakeHelper.push_to_github_packages
34
-
35
- puts "--- push to rubygems ---"
36
- RakeHelper.push_to_rubygems
37
- end
38
-
39
- task default: :analysis
3
+ require "push_gem"
data/example.rb CHANGED
@@ -16,5 +16,10 @@ TranslationAPI.configure do |config|
16
16
  config.custom_prompt = "Please Samurai style."
17
17
  end
18
18
 
19
- translated_text = TranslationAPI.translate(text)
20
- p translated_text
19
+ begin
20
+ translated_text = TranslationAPI.translate(text)
21
+ p translated_text
22
+ rescue StandardError => e
23
+ puts e
24
+ puts e.backtrace
25
+ end
data/how_to_publish.txt CHANGED
@@ -2,9 +2,9 @@ some code change
2
2
  version.rb change
3
3
  changelog.md change
4
4
 
5
- $rake analysis
5
+ $rake gem:check
6
6
 
7
7
  $git commit
8
8
  $git push
9
9
 
10
- $rake push
10
+ $rake gem:push
@@ -0,0 +1,97 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "fileutils"
4
+
5
+ class TranslationAPI
6
+ module Llm
7
+ class Log
8
+ def initialize(provider:, response:, cost:)
9
+ @provider = provider
10
+ @response = response
11
+ @cost = cost
12
+ end
13
+
14
+ def write
15
+ write_translated_text
16
+ write_used_tokens
17
+ write_total_cost
18
+ end
19
+
20
+ private
21
+
22
+ def write_translated_text
23
+ log_file_path = text_path("translated_text.txt")
24
+
25
+ File.open(log_file_path, "a") do |file|
26
+ file.puts(@response.translated_text)
27
+ end
28
+ end
29
+
30
+ def write_used_tokens
31
+ log_file_path = text_path("tokens.txt")
32
+
33
+ existing_input_tokens, existing_output_tokens = read_existing_tokens
34
+ tokens => { input_tokens:, output_tokens: }
35
+
36
+ total_input_tokens = existing_input_tokens + input_tokens
37
+ total_output_tokens = existing_output_tokens + output_tokens
38
+
39
+ File.open(log_file_path, "w") do |file|
40
+ file.puts("input: #{total_input_tokens}")
41
+ file.puts("output: #{total_output_tokens}")
42
+ end
43
+ end
44
+
45
+ def read_existing_tokens
46
+ log_file_path = text_path("tokens.txt")
47
+ input_tokens, output_tokens = 0, 0
48
+
49
+ if File.exist?(log_file_path)
50
+ File.readlines(log_file_path).each do |line|
51
+ tokens = line.split(":").last.strip.to_i
52
+ input_tokens = tokens if line.start_with?("input:")
53
+ output_tokens = tokens if line.start_with?("output:")
54
+ end
55
+ end
56
+
57
+ [input_tokens, output_tokens]
58
+ end
59
+
60
+ def write_total_cost
61
+ log_file_path = text_path("cost.txt")
62
+ tokens => { input_tokens:, output_tokens: }
63
+
64
+ this_cost = @cost.input_cost(input_tokens) + @cost.output_cost(output_tokens)
65
+ total_cost = this_cost + existing_cost
66
+
67
+ File.open(log_file_path, "w") do |file|
68
+ file.puts(format_cost(total_cost))
69
+ end
70
+ end
71
+
72
+ def format_cost(cost)
73
+ "$#{format("%.8f", cost)}"
74
+ end
75
+
76
+ def existing_cost
77
+ log_file_path = text_path("cost.txt")
78
+
79
+ File.exist?(log_file_path) ? File.read(log_file_path).gsub("$", "").to_f : 0.0
80
+ end
81
+
82
+ def tokens
83
+ {
84
+ input_tokens: @response.dig_used_tokens(type: :input),
85
+ output_tokens: @response.dig_used_tokens(type: :output)
86
+ }
87
+ end
88
+
89
+ def text_path(under_logs_path)
90
+ output_dir = "translator_logs/#{@provider.name}"
91
+ FileUtils.mkdir_p(output_dir) unless File.directory?(output_dir)
92
+
93
+ File.join(output_dir, under_logs_path)
94
+ end
95
+ end
96
+ end
97
+ end
@@ -0,0 +1,26 @@
1
+ # frozen_string_literal: true
2
+
3
+ class TranslationAPI
4
+ module Llm
5
+ class Model
6
+ MODEL_ERROR_MESSAGE =
7
+ "Specified model is not supported. Please check the model name."
8
+
9
+ attr_reader :name
10
+
11
+ def initialize(name)
12
+ @name = name
13
+ validate_model!
14
+ end
15
+
16
+ private
17
+
18
+ def validate_model!
19
+ supported_models =
20
+ self.class.const_defined?(:SUPPORTED_MODELS) ? self.class::SUPPORTED_MODELS : []
21
+
22
+ raise ArgumentError, MODEL_ERROR_MESSAGE unless supported_models.include?(@name)
23
+ end
24
+ end
25
+ end
26
+ end
@@ -0,0 +1,40 @@
1
+ # frozen_string_literal: true
2
+
3
+ class TranslationAPI
4
+ module Llm
5
+ class Prompt
6
+ SYSTEM_PROMPT_BASE = <<~TEXT
7
+ Translate only.
8
+ Return result only, no extra info
9
+ Keep symbols
10
+ TEXT
11
+
12
+ def initialize(except_words:, language:, custom_prompt:)
13
+ @except_words = except_words
14
+ @language = language
15
+ @custom_prompt = custom_prompt
16
+ end
17
+
18
+ def system_prompt
19
+ SYSTEM_PROMPT_BASE + except_option_text
20
+ end
21
+
22
+ def user_prompt
23
+ <<~TEXT
24
+ #{@custom_prompt || ""}
25
+ Please translate this text to #{@language}:
26
+ TEXT
27
+ end
28
+
29
+ private
30
+
31
+ def except_option_text
32
+ return "" if @except_words.empty?
33
+
34
+ <<~TEXT
35
+ Words listed next are not translated: [#{@except_words.join(", ")}]
36
+ TEXT
37
+ end
38
+ end
39
+ end
40
+ end
@@ -43,11 +43,11 @@ class TranslationAPI
43
43
  end
44
44
 
45
45
  def validate_api_key!
46
- raise API_KEY_ERROR_MESSAGE unless ENV["DEEPL_API_KEY"] || ENV["DEEPL_AUTH_KEY"]
46
+ raise ArgumentError, API_KEY_ERROR_MESSAGE unless ENV["DEEPL_API_KEY"] || ENV["DEEPL_AUTH_KEY"]
47
47
  end
48
48
 
49
49
  def validate_supported_language!
50
- raise LANGUAGE_UNSUPPORTED_MESSAGE unless supported_language?
50
+ raise ArgumentError, LANGUAGE_UNSUPPORTED_MESSAGE unless supported_language?
51
51
  end
52
52
 
53
53
  def supported_languages
@@ -0,0 +1,62 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "faraday"
4
+ require "json"
5
+ require_relative "response"
6
+
7
+ class TranslationAPI
8
+ module Provider
9
+ class Gemini
10
+ class Chat
11
+ API_KEY_ERROR_MESSAGE = "API key is not found."
12
+
13
+ def initialize(model:, prompt:)
14
+ validate_api_key!
15
+
16
+ @model = model
17
+ @prompt = prompt
18
+ end
19
+
20
+ def call(text)
21
+ Response.new(request(text))
22
+ end
23
+
24
+ private
25
+
26
+ def request(text)
27
+ connection.post("/v1beta/models/#{@model.name}:generateContent") do |request|
28
+ request.body = body(text)
29
+ end
30
+ end
31
+
32
+ def connection
33
+ Faraday.new(
34
+ url: "https://generativelanguage.googleapis.com",
35
+ headers: {
36
+ "Content-Type" => "application/json",
37
+ "x-goog-api-key" => ENV["GEMINI_API_KEY"]
38
+ }
39
+ )
40
+ end
41
+
42
+ def body(text)
43
+ {
44
+ contents: [
45
+ {
46
+ parts: [
47
+ {
48
+ text: @prompt.system_prompt + @prompt.user_prompt + text
49
+ }
50
+ ]
51
+ }
52
+ ]
53
+ }.to_json
54
+ end
55
+
56
+ def validate_api_key!
57
+ raise ArgumentError, API_KEY_ERROR_MESSAGE unless ENV["GEMINI_API_KEY"]
58
+ end
59
+ end
60
+ end
61
+ end
62
+ end
@@ -0,0 +1,73 @@
1
+ # frozen_string_literal: true
2
+
3
+ class TranslationAPI
4
+ module Provider
5
+ class Gemini
6
+ class Cost
7
+ ONE_MILLION = 1_000_000
8
+
9
+ def initialize(model)
10
+ @model = model
11
+ end
12
+
13
+ def input_cost(used_tokens)
14
+ calculate_cost(used_tokens, :input)
15
+ end
16
+
17
+ def output_cost(used_tokens)
18
+ calculate_cost(used_tokens, :output)
19
+ end
20
+
21
+ private
22
+
23
+ def calculate_cost(used_tokens, type)
24
+ used_tokens * token_rates[@model.name][type]
25
+ end
26
+
27
+ def token_rates
28
+ three_pro.merge(two_five_pro).merge(two_five_flash).merge(two_five_flash_lite)
29
+ end
30
+
31
+ def three_pro
32
+ {
33
+ @model.class.three_pro => {
34
+ input: 2.0 / ONE_MILLION,
35
+ output: (2.0 * pro_cost_diff_ratio) / ONE_MILLION
36
+ }
37
+ }
38
+ end
39
+
40
+ def two_five_pro
41
+ {
42
+ @model.class.two_five_pro => {
43
+ input: 1.25 / ONE_MILLION,
44
+ output: (1.25 * pro_cost_diff_ratio) / ONE_MILLION
45
+ }
46
+ }
47
+ end
48
+
49
+ def two_five_flash
50
+ {
51
+ @model.class.two_five_flash => {
52
+ input: 0.3 / ONE_MILLION,
53
+ output: 2.5 / ONE_MILLION
54
+ }
55
+ }
56
+ end
57
+
58
+ def two_five_flash_lite
59
+ {
60
+ @model.class.two_five_flash_lite => {
61
+ input: 0.1 / ONE_MILLION,
62
+ output: 0.4 / ONE_MILLION
63
+ }
64
+ }
65
+ end
66
+
67
+ def pro_cost_diff_ratio
68
+ 2.0
69
+ end
70
+ end
71
+ end
72
+ end
73
+ end
@@ -0,0 +1,34 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "../../llm/model"
4
+
5
+ class TranslationAPI
6
+ module Provider
7
+ class Gemini
8
+ class Model < Llm::Model
9
+ SUPPORTED_MODELS = [
10
+ "gemini-3-pro-preview",
11
+ "gemini-2.5-pro",
12
+ "gemini-2.5-flash",
13
+ "gemini-2.5-flash-lite"
14
+ ].freeze
15
+
16
+ def self.three_pro
17
+ SUPPORTED_MODELS[0]
18
+ end
19
+
20
+ def self.two_five_pro
21
+ SUPPORTED_MODELS[1]
22
+ end
23
+
24
+ def self.two_five_flash
25
+ SUPPORTED_MODELS[2]
26
+ end
27
+
28
+ def self.two_five_flash_lite
29
+ SUPPORTED_MODELS[3]
30
+ end
31
+ end
32
+ end
33
+ end
34
+ end
@@ -0,0 +1,43 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "json"
4
+
5
+ class TranslationAPI
6
+ module Provider
7
+ class Gemini
8
+ class Response
9
+ REQUEST_FAILED_MESSAGE = "Request failed with status"
10
+
11
+ attr_reader :response
12
+
13
+ def initialize(response)
14
+ @response = response
15
+ end
16
+
17
+ def translated_text
18
+ failed_message = "#{REQUEST_FAILED_MESSAGE} #{@response.status}"
19
+ raise ArgumentError, failed_message unless @response.status == 200
20
+
21
+ body_json.dig("candidates", 0, "content", "parts", 0, "text")
22
+ end
23
+
24
+ def dig_used_tokens(type:)
25
+ case type
26
+ when :input
27
+ body_json.dig("usageMetadata", "promptTokenCount")
28
+ when :output
29
+ body_json.dig("usageMetadata", "candidatesTokenCount")
30
+ else
31
+ raise ArgumentError, "Invalid token type: #{type}"
32
+ end
33
+ end
34
+
35
+ private
36
+
37
+ def body_json
38
+ @body_json ||= JSON.parse(@response.body)
39
+ end
40
+ end
41
+ end
42
+ end
43
+ end
@@ -0,0 +1,39 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "../llm/prompt"
4
+ require_relative "../llm/log"
5
+ require_relative "gemini/model"
6
+ require_relative "gemini/chat"
7
+ require_relative "gemini/cost"
8
+
9
+ class TranslationAPI
10
+ module Provider
11
+ class Gemini
12
+ def initialize(output_logs:, except_words:, language:, custom_prompt: nil)
13
+ @model = Model.new(ENV["GEMINI_MODEL"] || Model.two_five_flash)
14
+ @prompt = Llm::Prompt.new(except_words:, language:, custom_prompt:)
15
+ @chat = Chat.new(model: @model, prompt: @prompt)
16
+ @output_logs = output_logs
17
+ end
18
+
19
+ def translate(text)
20
+ return text if text.strip.empty?
21
+
22
+ @response = @chat.call(text)
23
+ log.write if @output_logs
24
+
25
+ @response.translated_text
26
+ end
27
+
28
+ def name
29
+ "gemini"
30
+ end
31
+
32
+ private
33
+
34
+ def log
35
+ Llm::Log.new(provider: self, response: @response, cost: Cost.new(@model))
36
+ end
37
+ end
38
+ end
39
+ end
@@ -0,0 +1,46 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "openai"
4
+ require_relative "response"
5
+
6
+ class TranslationAPI
7
+ module Provider
8
+ class OpenAI
9
+ class Chat
10
+ API_KEY_ERROR_MESSAGE = "API key is not found."
11
+
12
+ def initialize(model:, prompt:)
13
+ validate_api_key!
14
+
15
+ @model = model
16
+ @prompt = prompt
17
+ @client = init_client
18
+ end
19
+
20
+ def call(text)
21
+ Response.new(request(text))
22
+ end
23
+
24
+ private
25
+
26
+ def request(text)
27
+ @client.chat.completions.create(
28
+ model: @model.name,
29
+ messages: [
30
+ { role: "system", content: @prompt.system_prompt },
31
+ { role: "user", content: @prompt.user_prompt + text }
32
+ ]
33
+ )
34
+ end
35
+
36
+ def init_client
37
+ ::OpenAI::Client.new(api_key: ENV["OPENAI_API_KEY"])
38
+ end
39
+
40
+ def validate_api_key!
41
+ raise ArgumentError, API_KEY_ERROR_MESSAGE unless ENV["OPENAI_API_KEY"]
42
+ end
43
+ end
44
+ end
45
+ end
46
+ end
@@ -4,12 +4,10 @@ class TranslationAPI
4
4
  module Provider
5
5
  class OpenAI
6
6
  class Cost
7
- BASE_MODEL_NAME = "gpt-5"
8
7
  ONE_MILLION = 1_000_000
9
- BASE_MODEL_COST = 1.25 / ONE_MILLION
10
8
 
11
- def initialize(provider)
12
- @provider = provider
9
+ def initialize(model)
10
+ @model = model
13
11
  end
14
12
 
15
13
  def input_cost(used_tokens)
@@ -23,62 +21,39 @@ class TranslationAPI
23
21
  private
24
22
 
25
23
  def calculate_cost(used_tokens, type)
26
- used_tokens * token_rates[@provider.using_model][type]
24
+ used_tokens * token_rates[@model.name][type]
27
25
  end
28
26
 
29
27
  def token_rates
30
- normal_models = base_model.merge(mini_model).merge(nano_model)
31
- normal_models.merge(other_models)
28
+ base.merge(mini).merge(nano)
32
29
  end
33
30
 
34
- def base_model
31
+ def base
35
32
  {
36
- BASE_MODEL_NAME => {
37
- input: BASE_MODEL_COST,
38
- output: BASE_MODEL_COST * normal_io_ratio[:output]
33
+ @model.class.base => {
34
+ input: 2.50 / ONE_MILLION,
35
+ output: 15.00 / ONE_MILLION
39
36
  }
40
37
  }
41
38
  end
42
39
 
43
- def mini_model
40
+ def mini
44
41
  {
45
- "#{BASE_MODEL_NAME}-mini" => {
46
- input: BASE_MODEL_COST / normal_cost_diff_ratio,
47
- output: (BASE_MODEL_COST * normal_io_ratio[:output]) / normal_cost_diff_ratio
42
+ @model.class.mini => {
43
+ input: 0.75 / ONE_MILLION,
44
+ output: 4.50 / ONE_MILLION
48
45
  }
49
46
  }
50
47
  end
51
48
 
52
- def nano_model
53
- mini_model_cost = mini_model.values[0][:input]
54
-
55
- {
56
- "#{BASE_MODEL_NAME}-nano" => {
57
- input: mini_model_cost / normal_cost_diff_ratio,
58
- output: (mini_model_cost * normal_io_ratio[:output]) / normal_cost_diff_ratio
59
- }
60
- }
61
- end
62
-
63
- def other_models
49
+ def nano
64
50
  {
65
- "#{BASE_MODEL_NAME}-chat-latest" => {
66
- input: 1.25 / ONE_MILLION,
67
- output: (1.25 * normal_io_ratio[:output]) / ONE_MILLION
51
+ @model.class.nano => {
52
+ input: 0.20 / ONE_MILLION,
53
+ output: 1.25 / ONE_MILLION
68
54
  }
69
55
  }
70
56
  end
71
-
72
- def normal_io_ratio
73
- {
74
- input: 1.0,
75
- output: 8.0
76
- }
77
- end
78
-
79
- def normal_cost_diff_ratio
80
- 5.0
81
- end
82
57
  end
83
58
  end
84
59
  end
@@ -0,0 +1,29 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "../../llm/model"
4
+
5
+ class TranslationAPI
6
+ module Provider
7
+ class OpenAI
8
+ class Model < Llm::Model
9
+ SUPPORTED_MODELS = [
10
+ "gpt-5.4",
11
+ "gpt-5.4-mini",
12
+ "gpt-5.4-nano"
13
+ ].freeze
14
+
15
+ def self.base
16
+ SUPPORTED_MODELS[0]
17
+ end
18
+
19
+ def self.mini
20
+ SUPPORTED_MODELS[1]
21
+ end
22
+
23
+ def self.nano
24
+ SUPPORTED_MODELS[2]
25
+ end
26
+ end
27
+ end
28
+ end
29
+ end
@@ -0,0 +1,28 @@
1
+ # frozen_string_literal: true
2
+
3
+ class TranslationAPI
4
+ module Provider
5
+ class OpenAI
6
+ class Response
7
+ def initialize(response)
8
+ @response = response
9
+ end
10
+
11
+ def translated_text
12
+ @response.choices[0].message.content
13
+ end
14
+
15
+ def dig_used_tokens(type:)
16
+ case type
17
+ when :input
18
+ @response.usage.prompt_tokens
19
+ when :output
20
+ @response.usage.completion_tokens
21
+ else
22
+ raise ArgumentError, "Invalid token type: #{type}"
23
+ end
24
+ end
25
+ end
26
+ end
27
+ end
28
+ end
@@ -1,97 +1,38 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "openai"
4
- require_relative "openai/log"
3
+ require_relative "../llm/prompt"
4
+ require_relative "../llm/log"
5
+ require_relative "openai/model"
6
+ require_relative "openai/chat"
7
+ require_relative "openai/cost"
5
8
 
6
9
  class TranslationAPI
7
10
  module Provider
8
11
  class OpenAI
9
- SYSTEM_PROMPT_BASE = <<~TEXT
10
- Translate only.
11
- Return result only, no extra info
12
- Keep symbols
13
- TEXT
14
-
15
- API_KEY_ERROR_MESSAGE = "API key is not found."
16
-
17
- MODEL_ERROR_MESSAGE =
18
- "Specified model is not supported. Please check the model name."
19
-
20
12
  def initialize(output_logs:, except_words:, language:, custom_prompt: nil)
21
- validate_api_key!
22
-
23
- @client = init_client
24
- @output_logs = output_logs
25
- @system_prompt = SYSTEM_PROMPT_BASE + except_option_text(except_words)
26
- @user_prompt = user_prompt_text(language, custom_prompt)
13
+ @model = Model.new(ENV["OPENAI_MODEL"] || Model.nano)
14
+ @prompt = Llm::Prompt.new(except_words:, language:, custom_prompt:)
15
+ @chat = Chat.new(model: @model, prompt: @prompt)
16
+ @output_logs = output_logs
27
17
  end
28
18
 
29
19
  def translate(text)
30
20
  return text if text.strip.empty?
31
21
 
32
- @response = chat_to_api(text)
33
- Log.new(self).write if @output_logs
22
+ @response = @chat.call(text)
23
+ log.write if @output_logs
34
24
 
35
- translated_text
25
+ @response.translated_text
36
26
  end
37
27
 
38
- def translated_text
39
- @response["choices"][0]["message"]["content"]
40
- end
41
-
42
- def using_model
43
- ENV["OPENAI_MODEL"] || "gpt-5-mini"
44
- end
45
-
46
- def dig_used_tokens(type:)
47
- case type
48
- when :input
49
- @response["usage"]["prompt_tokens"]
50
- when :output
51
- @response["usage"]["completion_tokens"]
52
- else
53
- raise ArgumentError, "Invalid token type: #{type}"
54
- end
28
+ def name
29
+ "openai"
55
30
  end
56
31
 
57
32
  private
58
33
 
59
- def validate_api_key!
60
- raise API_KEY_ERROR_MESSAGE unless ENV["OPENAI_API_KEY"]
61
- end
62
-
63
- def init_client
64
- ::OpenAI::Client.new(
65
- access_token: ENV["OPENAI_API_KEY"],
66
- log_errors: true
67
- )
68
- end
69
-
70
- def chat_to_api(text)
71
- @client.chat(
72
- parameters: {
73
- model: using_model,
74
- messages: [
75
- { role: "system", content: @system_prompt },
76
- { role: "user", content: @user_prompt + text }
77
- ]
78
- }
79
- )
80
- end
81
-
82
- def except_option_text(except_words)
83
- return "" if except_words.empty?
84
-
85
- <<~TEXT
86
- Words listed next are not translated: [#{except_words.join(", ")}]
87
- TEXT
88
- end
89
-
90
- def user_prompt_text(language, custom_prompt)
91
- <<~TEXT
92
- #{custom_prompt || ""}
93
- Please translate this text to #{language}:
94
- TEXT
34
+ def log
35
+ Llm::Log.new(provider: self, response: @response, cost: Cost.new(@model))
95
36
  end
96
37
  end
97
38
  end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  class TranslationAPI
4
- VERSION = "1.1.1"
4
+ VERSION = "1.3.0"
5
5
  end
@@ -4,6 +4,7 @@ require "dotenv"
4
4
  require_relative "translation_api/version"
5
5
  require_relative "translation_api/config"
6
6
  require_relative "translation_api/provider/openai"
7
+ require_relative "translation_api/provider/gemini"
7
8
  require_relative "translation_api/provider/deepl"
8
9
 
9
10
  class TranslationAPI
@@ -62,6 +63,13 @@ class TranslationAPI
62
63
  language: @language,
63
64
  custom_prompt: @custom_prompt
64
65
  )
66
+ when :gemini
67
+ Provider::Gemini.new(
68
+ output_logs: @output_logs,
69
+ except_words: @except_words,
70
+ language: @language,
71
+ custom_prompt: @custom_prompt
72
+ )
65
73
  when :deepl
66
74
  Provider::DeepL.new(
67
75
  pro: config.deepl_pro,
@@ -69,7 +77,7 @@ class TranslationAPI
69
77
  language: @language
70
78
  )
71
79
  else
72
- raise UNSUPPORTED_PROVIDER_MESSAGE
80
+ raise ArgumentError, UNSUPPORTED_PROVIDER_MESSAGE
73
81
  end
74
82
  end
75
83
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: translation_api
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.1.1
4
+ version: 1.3.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - milkeclair
@@ -38,7 +38,21 @@ dependencies:
38
38
  - !ruby/object:Gem::Version
39
39
  version: '0'
40
40
  - !ruby/object:Gem::Dependency
41
- name: ruby-openai
41
+ name: faraday
42
+ requirement: !ruby/object:Gem::Requirement
43
+ requirements:
44
+ - - ">="
45
+ - !ruby/object:Gem::Version
46
+ version: '0'
47
+ type: :runtime
48
+ prerelease: false
49
+ version_requirements: !ruby/object:Gem::Requirement
50
+ requirements:
51
+ - - ">="
52
+ - !ruby/object:Gem::Version
53
+ version: '0'
54
+ - !ruby/object:Gem::Dependency
55
+ name: openai
42
56
  requirement: !ruby/object:Gem::Requirement
43
57
  requirements:
44
58
  - - ">="
@@ -68,12 +82,21 @@ files:
68
82
  - how_to_publish.txt
69
83
  - lib/translation_api.rb
70
84
  - lib/translation_api/config.rb
85
+ - lib/translation_api/llm/log.rb
86
+ - lib/translation_api/llm/model.rb
87
+ - lib/translation_api/llm/prompt.rb
71
88
  - lib/translation_api/provider/deepl.rb
89
+ - lib/translation_api/provider/gemini.rb
90
+ - lib/translation_api/provider/gemini/chat.rb
91
+ - lib/translation_api/provider/gemini/cost.rb
92
+ - lib/translation_api/provider/gemini/model.rb
93
+ - lib/translation_api/provider/gemini/response.rb
72
94
  - lib/translation_api/provider/openai.rb
95
+ - lib/translation_api/provider/openai/chat.rb
73
96
  - lib/translation_api/provider/openai/cost.rb
74
- - lib/translation_api/provider/openai/log.rb
97
+ - lib/translation_api/provider/openai/model.rb
98
+ - lib/translation_api/provider/openai/response.rb
75
99
  - lib/translation_api/version.rb
76
- - rake_helper.rb
77
100
  homepage: https://github.com/milkeclair/translation_api
78
101
  licenses:
79
102
  - MIT
@@ -1,99 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require "fileutils"
4
- require_relative "cost"
5
-
6
- class TranslationAPI
7
- module Provider
8
- class OpenAI
9
- class Log
10
- def initialize(provider)
11
- @provider = provider
12
- @cost = Cost.new(@provider)
13
- end
14
-
15
- def write
16
- write_translated_text
17
- write_used_tokens
18
- write_total_cost
19
- end
20
-
21
- private
22
-
23
- def write_translated_text
24
- log_file_path = text_path("translated_text.txt")
25
-
26
- File.open(log_file_path, "a") do |file|
27
- file.puts(@provider.translated_text)
28
- end
29
- end
30
-
31
- def write_used_tokens
32
- log_file_path = text_path("tokens.txt")
33
-
34
- existing_input_tokens, existing_output_tokens = read_existing_tokens
35
- tokens => { input_tokens:, output_tokens: }
36
-
37
- total_input_tokens = existing_input_tokens + input_tokens
38
- total_output_tokens = existing_output_tokens + output_tokens
39
-
40
- File.open(log_file_path, "w") do |file|
41
- file.puts("input: #{total_input_tokens}")
42
- file.puts("output: #{total_output_tokens}")
43
- end
44
- end
45
-
46
- def read_existing_tokens
47
- log_file_path = text_path("tokens.txt")
48
- input_tokens, output_tokens = 0, 0
49
-
50
- if File.exist?(log_file_path)
51
- File.readlines(log_file_path).each do |line|
52
- tokens = line.split(":").last.strip.to_i
53
- input_tokens = tokens if line.start_with?("input:")
54
- output_tokens = tokens if line.start_with?("output:")
55
- end
56
- end
57
-
58
- [input_tokens, output_tokens]
59
- end
60
-
61
- def write_total_cost
62
- log_file_path = text_path("cost.txt")
63
- tokens => { input_tokens:, output_tokens: }
64
-
65
- this_cost = @cost.input_cost(input_tokens) + @cost.output_cost(output_tokens)
66
- total_cost = this_cost + existing_cost
67
-
68
- File.open(log_file_path, "w") do |file|
69
- file.puts(format_cost(total_cost))
70
- end
71
- end
72
-
73
- def format_cost(cost)
74
- "$#{format("%.8f", cost)}"
75
- end
76
-
77
- def existing_cost
78
- log_file_path = text_path("cost.txt")
79
-
80
- File.exist?(log_file_path) ? File.read(log_file_path).gsub("$", "").to_f : 0.0
81
- end
82
-
83
- def tokens
84
- {
85
- input_tokens: @provider.dig_used_tokens(type: :input),
86
- output_tokens: @provider.dig_used_tokens(type: :output)
87
- }
88
- end
89
-
90
- def text_path(under_logs_path)
91
- output_dir = "translator_logs/openai"
92
- FileUtils.mkdir_p(output_dir) unless File.directory?(output_dir)
93
-
94
- File.join(output_dir, under_logs_path)
95
- end
96
- end
97
- end
98
- end
99
- end
data/rake_helper.rb DELETED
@@ -1,29 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- class RakeHelper
4
- GITHUB_PACKAGES_PUSH_COMMAND =
5
- "gem push --key github --host https://rubygems.pkg.github.com/milkeclair " \
6
- "pkg/translation_api-#{TranslationAPI::VERSION}.gem".freeze
7
-
8
- RUBYGEMS_PUSH_COMMAND =
9
- "gem push --host https://rubygems.org " \
10
- "pkg/translation_api-#{TranslationAPI::VERSION}.gem".freeze
11
-
12
- def self.init_rake_tasks
13
- RSpec::Core::RakeTask.new(:spec) { |task| task.verbose = false }
14
- RuboCop::RakeTask.new
15
- YARD::Rake::YardocTask.new
16
- end
17
-
18
- def self.build_gem
19
- abort("gemのビルドに失敗しました") unless system("rake build")
20
- end
21
-
22
- def self.push_to_github_packages
23
- abort("githubへのgemのpushに失敗しました") unless system(GITHUB_PACKAGES_PUSH_COMMAND)
24
- end
25
-
26
- def self.push_to_rubygems
27
- abort("rubygemsへのgemのpushに失敗しました") unless system(RUBYGEMS_PUSH_COMMAND)
28
- end
29
- end