translation_api 1.1.0 → 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 9d36a40c0d987797c40e7ab34cf32d270a5b26b0b2d9372d62bbbd0ede73ca5e
4
- data.tar.gz: d05a8271c9fa12105651eacbbfbfd04253da239c89932cdec9a2e64d7b5d7895
3
+ metadata.gz: fd0bf92d0abd7ee5ece885abaf35110441d86e35c5b7c56126928781aafe4ca3
4
+ data.tar.gz: 394ffe1c9ad32ca3e7806e1ae6e14236fe17b3dcb021dc628141b6851590ebe4
5
5
  SHA512:
6
- metadata.gz: bcc16101336a524fd635c23f4028202a2f4fed6215936df5787da62ea07a902298b4cc63eb41f3351555f41ac72c293ea3498bcbc6f699fbc0605311f4288a69
7
- data.tar.gz: 5d13abeeb461a92952f1c34715ae0e0b84dd43d65136584e409520b6021d392a849a5853cced542ab613aca7e6c8d93c8b110ee58d9b82006bbf22f826cc6c3e
6
+ metadata.gz: eaa97abc650985f580c6de3cdbf8eb872cca14e78d7625875b004c1b7e6ca4a2b9090444086c93c353eea0ec4b2ba695eef4e7b22e0c6ac1fae6c7cbbb5af5c9
7
+ data.tar.gz: 79e67af130d3d290e01c89392e1abe094b5eab77ca0a71f845d1f2c6590a5b78a70c1873fa246ea6a1b41aaa59a76390b992c4396bf681006c3a011a6872007b
data/CHANGELOG.md CHANGED
@@ -44,3 +44,11 @@
44
44
  ## [1.1.0] - 2025-11-23
45
45
 
46
46
  - カスタムプロンプトを設定できる機能を追加
47
+
48
+ ## [1.1.1] - 2025-11-23
49
+
50
+ - configの設定順序を逆に変更
51
+
52
+ ## [1.2.0] - 2025-11-23
53
+
54
+ - Gemini対応
data/README.md CHANGED
@@ -6,22 +6,28 @@ Requires api key.
6
6
  ## For OpenAI
7
7
 
8
8
  1. `touch .env`
9
- 2. Add `OPENAI_API_KEY=YOUR_API_KEY`
10
- 3. Optional: `ENV["OPENAI_MODEL"]`
9
+ 2. Add `OPENAI_API_KEY=YOUR_API_KEY` or `GEMINI_API_KEY=YOUR_API_KEY` or `DEEPL_API_KEY=YOUR_API_KEY` to `.env`
10
+ 3. Optional: `ENV["OPENAI_MODEL"]` or `ENV["GEMINI_MODEL"]`
11
11
  4. `TranslationAPI.translate("text")`
12
12
 
13
13
  ### Configuration Options
14
14
 
15
- * output_logs (default: true)
16
15
  * language (default: "japanese")
17
16
  * provider (default: :openai)
17
+ * output_logs (default: true)
18
18
  * except_words (default: [])
19
+ * custom_prompt (default: nil)
20
+ * Only for OpenAI and Gemini
21
+ * deepl_pro (default: false)
22
+ * Only for DeepL
19
23
 
20
- ### Output
24
+ ### Output(Only for OpenAI and Gemini)
21
25
 
22
26
  * Translated_text
23
27
  * Used Tokens
24
- * Cost Spent(https://openai.com/api/pricing/)
28
+ * Cost Spent
29
+ * https://openai.com/api/pricing/
30
+ * https://ai.google.dev/gemini-api/docs/pricing/
25
31
 
26
32
  ## Example
27
33
 
@@ -29,10 +35,11 @@ Exec `ruby example.rb "text"`
29
35
 
30
36
  ```ruby
31
37
  TranslationAPI.configure do |config|
32
- config.language = "english"
33
- config.provider = :deepl
34
- config.output_logs = false
35
- config.except_words = %w[hoge fuga]
38
+ config.language = "english"
39
+ config.provider = :gemini
40
+ config.output_logs = false
41
+ config.except_words = %w[hoge fuga]
42
+ config.custom_prompt = "Please Samurai style."
36
43
  end
37
44
 
38
45
  TranslationAPI.translate("text")
data/example.rb CHANGED
@@ -16,5 +16,10 @@ TranslationAPI.configure do |config|
16
16
  config.custom_prompt = "Please Samurai style."
17
17
  end
18
18
 
19
- translated_text = TranslationAPI.translate(text)
20
- p translated_text
19
+ begin
20
+ translated_text = TranslationAPI.translate(text)
21
+ p translated_text
22
+ rescue StandardError => e
23
+ puts e
24
+ puts e.backtrace
25
+ end
@@ -0,0 +1,97 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "fileutils"
4
+
5
+ class TranslationAPI
6
+ module Llm
7
+ class Log
8
+ def initialize(provider:, response:, cost:)
9
+ @provider = provider
10
+ @response = response
11
+ @cost = cost
12
+ end
13
+
14
+ def write
15
+ write_translated_text
16
+ write_used_tokens
17
+ write_total_cost
18
+ end
19
+
20
+ private
21
+
22
+ def write_translated_text
23
+ log_file_path = text_path("translated_text.txt")
24
+
25
+ File.open(log_file_path, "a") do |file|
26
+ file.puts(@response.translated_text)
27
+ end
28
+ end
29
+
30
+ def write_used_tokens
31
+ log_file_path = text_path("tokens.txt")
32
+
33
+ existing_input_tokens, existing_output_tokens = read_existing_tokens
34
+ tokens => { input_tokens:, output_tokens: }
35
+
36
+ total_input_tokens = existing_input_tokens + input_tokens
37
+ total_output_tokens = existing_output_tokens + output_tokens
38
+
39
+ File.open(log_file_path, "w") do |file|
40
+ file.puts("input: #{total_input_tokens}")
41
+ file.puts("output: #{total_output_tokens}")
42
+ end
43
+ end
44
+
45
+ def read_existing_tokens
46
+ log_file_path = text_path("tokens.txt")
47
+ input_tokens, output_tokens = 0, 0
48
+
49
+ if File.exist?(log_file_path)
50
+ File.readlines(log_file_path).each do |line|
51
+ tokens = line.split(":").last.strip.to_i
52
+ input_tokens = tokens if line.start_with?("input:")
53
+ output_tokens = tokens if line.start_with?("output:")
54
+ end
55
+ end
56
+
57
+ [input_tokens, output_tokens]
58
+ end
59
+
60
+ def write_total_cost
61
+ log_file_path = text_path("cost.txt")
62
+ tokens => { input_tokens:, output_tokens: }
63
+
64
+ this_cost = @cost.input_cost(input_tokens) + @cost.output_cost(output_tokens)
65
+ total_cost = this_cost + existing_cost
66
+
67
+ File.open(log_file_path, "w") do |file|
68
+ file.puts(format_cost(total_cost))
69
+ end
70
+ end
71
+
72
+ def format_cost(cost)
73
+ "$#{format("%.8f", cost)}"
74
+ end
75
+
76
+ def existing_cost
77
+ log_file_path = text_path("cost.txt")
78
+
79
+ File.exist?(log_file_path) ? File.read(log_file_path).gsub("$", "").to_f : 0.0
80
+ end
81
+
82
+ def tokens
83
+ {
84
+ input_tokens: @response.dig_used_tokens(type: :input),
85
+ output_tokens: @response.dig_used_tokens(type: :output)
86
+ }
87
+ end
88
+
89
+ def text_path(under_logs_path)
90
+ output_dir = "translator_logs/#{@provider.name}"
91
+ FileUtils.mkdir_p(output_dir) unless File.directory?(output_dir)
92
+
93
+ File.join(output_dir, under_logs_path)
94
+ end
95
+ end
96
+ end
97
+ end
@@ -0,0 +1,26 @@
1
+ # frozen_string_literal: true
2
+
3
+ class TranslationAPI
4
+ module Llm
5
+ class Model
6
+ MODEL_ERROR_MESSAGE =
7
+ "Specified model is not supported. Please check the model name."
8
+
9
+ attr_reader :name
10
+
11
+ def initialize(name)
12
+ @name = name
13
+ validate_model!
14
+ end
15
+
16
+ private
17
+
18
+ def validate_model!
19
+ supported_models =
20
+ self.class.const_defined?(:SUPPORTED_MODELS) ? self.class::SUPPORTED_MODELS : []
21
+
22
+ raise ArgumentError, MODEL_ERROR_MESSAGE unless supported_models.include?(@name)
23
+ end
24
+ end
25
+ end
26
+ end
@@ -0,0 +1,40 @@
1
+ # frozen_string_literal: true
2
+
3
+ class TranslationAPI
4
+ module Llm
5
+ class Prompt
6
+ SYSTEM_PROMPT_BASE = <<~TEXT
7
+ Translate only.
8
+ Return result only, no extra info
9
+ Keep symbols
10
+ TEXT
11
+
12
+ def initialize(except_words:, language:, custom_prompt:)
13
+ @except_words = except_words
14
+ @language = language
15
+ @custom_prompt = custom_prompt
16
+ end
17
+
18
+ def system_prompt
19
+ SYSTEM_PROMPT_BASE + except_option_text
20
+ end
21
+
22
+ def user_prompt
23
+ <<~TEXT
24
+ #{@custom_prompt || ""}
25
+ Please translate this text to #{@language}:
26
+ TEXT
27
+ end
28
+
29
+ private
30
+
31
+ def except_option_text
32
+ return "" if @except_words.empty?
33
+
34
+ <<~TEXT
35
+ Words listed next are not translated: [#{@except_words.join(", ")}]
36
+ TEXT
37
+ end
38
+ end
39
+ end
40
+ end
@@ -43,11 +43,11 @@ class TranslationAPI
43
43
  end
44
44
 
45
45
  def validate_api_key!
46
- raise API_KEY_ERROR_MESSAGE unless ENV["DEEPL_API_KEY"] || ENV["DEEPL_AUTH_KEY"]
46
+ raise ArgumentError, API_KEY_ERROR_MESSAGE unless ENV["DEEPL_API_KEY"] || ENV["DEEPL_AUTH_KEY"]
47
47
  end
48
48
 
49
49
  def validate_supported_language!
50
- raise LANGUAGE_UNSUPPORTED_MESSAGE unless supported_language?
50
+ raise ArgumentError, LANGUAGE_UNSUPPORTED_MESSAGE unless supported_language?
51
51
  end
52
52
 
53
53
  def supported_languages
@@ -0,0 +1,62 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "faraday"
4
+ require "json"
5
+ require_relative "response"
6
+
7
+ class TranslationAPI
8
+ module Provider
9
+ class Gemini
10
+ class Chat
11
+ API_KEY_ERROR_MESSAGE = "API key is not found."
12
+
13
+ def initialize(model:, prompt:)
14
+ validate_api_key!
15
+
16
+ @model = model
17
+ @prompt = prompt
18
+ end
19
+
20
+ def call(text)
21
+ Response.new(request(text))
22
+ end
23
+
24
+ private
25
+
26
+ def request(text)
27
+ connection.post("/v1beta/models/#{@model.name}:generateContent") do |request|
28
+ request.body = body(text)
29
+ end
30
+ end
31
+
32
+ def connection
33
+ Faraday.new(
34
+ url: "https://generativelanguage.googleapis.com",
35
+ headers: {
36
+ "Content-Type" => "application/json",
37
+ "x-goog-api-key" => ENV["GEMINI_API_KEY"]
38
+ }
39
+ )
40
+ end
41
+
42
+ def body(text)
43
+ {
44
+ contents: [
45
+ {
46
+ parts: [
47
+ {
48
+ text: @prompt.system_prompt + @prompt.user_prompt + text
49
+ }
50
+ ]
51
+ }
52
+ ]
53
+ }.to_json
54
+ end
55
+
56
+ def validate_api_key!
57
+ raise ArgumentError, API_KEY_ERROR_MESSAGE unless ENV["GEMINI_API_KEY"]
58
+ end
59
+ end
60
+ end
61
+ end
62
+ end
@@ -0,0 +1,73 @@
1
+ # frozen_string_literal: true
2
+
3
+ class TranslationAPI
4
+ module Provider
5
+ class Gemini
6
+ class Cost
7
+ ONE_MILLION = 1_000_000
8
+
9
+ def initialize(model)
10
+ @model = model
11
+ end
12
+
13
+ def input_cost(used_tokens)
14
+ calculate_cost(used_tokens, :input)
15
+ end
16
+
17
+ def output_cost(used_tokens)
18
+ calculate_cost(used_tokens, :output)
19
+ end
20
+
21
+ private
22
+
23
+ def calculate_cost(used_tokens, type)
24
+ used_tokens * token_rates[@model.name][type]
25
+ end
26
+
27
+ def token_rates
28
+ three_pro.merge(two_five_pro).merge(two_five_flash).merge(two_five_flash_lite)
29
+ end
30
+
31
+ def three_pro
32
+ {
33
+ @model.class.three_pro => {
34
+ input: 2.0 / ONE_MILLION,
35
+ output: (2.0 * pro_cost_diff_ratio) / ONE_MILLION
36
+ }
37
+ }
38
+ end
39
+
40
+ def two_five_pro
41
+ {
42
+ @model.class.two_five_pro => {
43
+ input: 1.25 / ONE_MILLION,
44
+ output: (1.25 * pro_cost_diff_ratio) / ONE_MILLION
45
+ }
46
+ }
47
+ end
48
+
49
+ def two_five_flash
50
+ {
51
+ @model.class.two_five_flash => {
52
+ input: 0.3 / ONE_MILLION,
53
+ output: 2.5 / ONE_MILLION
54
+ }
55
+ }
56
+ end
57
+
58
+ def two_five_flash_lite
59
+ {
60
+ @model.class.two_five_flash_lite => {
61
+ input: 0.1 / ONE_MILLION,
62
+ output: 0.4 / ONE_MILLION
63
+ }
64
+ }
65
+ end
66
+
67
+ def pro_cost_diff_ratio
68
+ 2.0
69
+ end
70
+ end
71
+ end
72
+ end
73
+ end
@@ -0,0 +1,34 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "../../llm/model"
4
+
5
+ class TranslationAPI
6
+ module Provider
7
+ class Gemini
8
+ class Model < Llm::Model
9
+ SUPPORTED_MODELS = [
10
+ "gemini-3-pro-preview",
11
+ "gemini-2.5-pro",
12
+ "gemini-2.5-flash",
13
+ "gemini-2.5-flash-lite"
14
+ ].freeze
15
+
16
+ def self.three_pro
17
+ SUPPORTED_MODELS[0]
18
+ end
19
+
20
+ def self.two_five_pro
21
+ SUPPORTED_MODELS[1]
22
+ end
23
+
24
+ def self.two_five_flash
25
+ SUPPORTED_MODELS[2]
26
+ end
27
+
28
+ def self.two_five_flash_lite
29
+ SUPPORTED_MODELS[3]
30
+ end
31
+ end
32
+ end
33
+ end
34
+ end
@@ -0,0 +1,43 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "json"
4
+
5
+ class TranslationAPI
6
+ module Provider
7
+ class Gemini
8
+ class Response
9
+ REQUEST_FAILED_MESSAGE = "Request failed with status"
10
+
11
+ attr_reader :response
12
+
13
+ def initialize(response)
14
+ @response = response
15
+ end
16
+
17
+ def translated_text
18
+ failed_message = "#{REQUEST_FAILED_MESSAGE} #{@response.status}"
19
+ raise ArgumentError, failed_message unless @response.status == 200
20
+
21
+ body_json.dig("candidates", 0, "content", "parts", 0, "text")
22
+ end
23
+
24
+ def dig_used_tokens(type:)
25
+ case type
26
+ when :input
27
+ body_json.dig("usageMetadata", "promptTokenCount")
28
+ when :output
29
+ body_json.dig("usageMetadata", "candidatesTokenCount")
30
+ else
31
+ raise ArgumentError, "Invalid token type: #{type}"
32
+ end
33
+ end
34
+
35
+ private
36
+
37
+ def body_json
38
+ @body_json ||= JSON.parse(@response.body)
39
+ end
40
+ end
41
+ end
42
+ end
43
+ end
@@ -0,0 +1,39 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "../llm/prompt"
4
+ require_relative "../llm/log"
5
+ require_relative "gemini/model"
6
+ require_relative "gemini/chat"
7
+ require_relative "gemini/cost"
8
+
9
+ class TranslationAPI
10
+ module Provider
11
+ class Gemini
12
+ def initialize(output_logs:, except_words:, language:, custom_prompt: nil)
13
+ @model = Model.new(ENV["GEMINI_MODEL"] || Model.two_five_flash)
14
+ @prompt = Llm::Prompt.new(except_words:, language:, custom_prompt:)
15
+ @chat = Chat.new(model: @model, prompt: @prompt)
16
+ @output_logs = output_logs
17
+ end
18
+
19
+ def translate(text)
20
+ return text if text.strip.empty?
21
+
22
+ @response = @chat.call(text)
23
+ log.write if @output_logs
24
+
25
+ @response.translated_text
26
+ end
27
+
28
+ def name
29
+ "gemini"
30
+ end
31
+
32
+ private
33
+
34
+ def log
35
+ Llm::Log.new(provider: self, response: @response, cost: Cost.new(@model))
36
+ end
37
+ end
38
+ end
39
+ end
@@ -0,0 +1,48 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "openai"
4
+ require_relative "response"
5
+
6
+ class TranslationAPI
7
+ module Provider
8
+ class OpenAI
9
+ class Chat
10
+ API_KEY_ERROR_MESSAGE = "API key is not found."
11
+
12
+ def initialize(model:, prompt:)
13
+ validate_api_key!
14
+
15
+ @model = model
16
+ @prompt = prompt
17
+ @client = init_client
18
+ end
19
+
20
+ def call(text)
21
+ Response.new(request(text))
22
+ end
23
+
24
+ private
25
+
26
+ def request(text)
27
+ @client.chat(
28
+ parameters: {
29
+ model: @model.name,
30
+ messages: [
31
+ { role: "system", content: @prompt.system_prompt },
32
+ { role: "user", content: @prompt.user_prompt + text }
33
+ ]
34
+ }
35
+ )
36
+ end
37
+
38
+ def init_client
39
+ ::OpenAI::Client.new(access_token: ENV["OPENAI_API_KEY"])
40
+ end
41
+
42
+ def validate_api_key!
43
+ raise ArgumentError, API_KEY_ERROR_MESSAGE unless ENV["OPENAI_API_KEY"]
44
+ end
45
+ end
46
+ end
47
+ end
48
+ end
@@ -4,12 +4,11 @@ class TranslationAPI
4
4
  module Provider
5
5
  class OpenAI
6
6
  class Cost
7
- BASE_MODEL_NAME = "gpt-5"
8
7
  ONE_MILLION = 1_000_000
9
8
  BASE_MODEL_COST = 1.25 / ONE_MILLION
10
9
 
11
- def initialize(provider)
12
- @provider = provider
10
+ def initialize(model)
11
+ @model = model
13
12
  end
14
13
 
15
14
  def input_cost(used_tokens)
@@ -23,48 +22,38 @@ class TranslationAPI
23
22
  private
24
23
 
25
24
  def calculate_cost(used_tokens, type)
26
- used_tokens * token_rates[@provider.using_model][type]
25
+ used_tokens * token_rates[@model.name][type]
27
26
  end
28
27
 
29
28
  def token_rates
30
- normal_models = base_model.merge(mini_model).merge(nano_model)
31
- normal_models.merge(other_models)
29
+ base.merge(mini).merge(nano)
32
30
  end
33
31
 
34
- def base_model
32
+ def base
35
33
  {
36
- BASE_MODEL_NAME => {
34
+ @model.class.base => {
37
35
  input: BASE_MODEL_COST,
38
36
  output: BASE_MODEL_COST * normal_io_ratio[:output]
39
37
  }
40
38
  }
41
39
  end
42
40
 
43
- def mini_model
41
+ def mini
44
42
  {
45
- "#{BASE_MODEL_NAME}-mini" => {
43
+ @model.class.mini => {
46
44
  input: BASE_MODEL_COST / normal_cost_diff_ratio,
47
45
  output: (BASE_MODEL_COST * normal_io_ratio[:output]) / normal_cost_diff_ratio
48
46
  }
49
47
  }
50
48
  end
51
49
 
52
- def nano_model
53
- mini_model_cost = mini_model.values[0][:input]
50
+ def nano
51
+ mini_cost = mini.values[0][:input]
54
52
 
55
53
  {
56
- "#{BASE_MODEL_NAME}-nano" => {
57
- input: mini_model_cost / normal_cost_diff_ratio,
58
- output: (mini_model_cost * normal_io_ratio[:output]) / normal_cost_diff_ratio
59
- }
60
- }
61
- end
62
-
63
- def other_models
64
- {
65
- "#{BASE_MODEL_NAME}-chat-latest" => {
66
- input: 1.25 / ONE_MILLION,
67
- output: (1.25 * normal_io_ratio[:output]) / ONE_MILLION
54
+ @model.class.nano => {
55
+ input: mini_cost / normal_cost_diff_ratio,
56
+ output: (mini_cost * normal_io_ratio[:output]) / normal_cost_diff_ratio
68
57
  }
69
58
  }
70
59
  end
@@ -0,0 +1,29 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "../../llm/model"
4
+
5
+ class TranslationAPI
6
+ module Provider
7
+ class OpenAI
8
+ class Model < Llm::Model
9
+ SUPPORTED_MODELS = [
10
+ "gpt-5",
11
+ "gpt-5-mini",
12
+ "gpt-5-nano"
13
+ ].freeze
14
+
15
+ def self.base
16
+ SUPPORTED_MODELS[0]
17
+ end
18
+
19
+ def self.mini
20
+ SUPPORTED_MODELS[1]
21
+ end
22
+
23
+ def self.nano
24
+ SUPPORTED_MODELS[2]
25
+ end
26
+ end
27
+ end
28
+ end
29
+ end
@@ -0,0 +1,28 @@
1
+ # frozen_string_literal: true
2
+
3
+ class TranslationAPI
4
+ module Provider
5
+ class OpenAI
6
+ class Response
7
+ def initialize(response)
8
+ @response = response
9
+ end
10
+
11
+ def translated_text
12
+ @response.dig("choices", 0, "message", "content")
13
+ end
14
+
15
+ def dig_used_tokens(type:)
16
+ case type
17
+ when :input
18
+ @response.dig("usage", "prompt_tokens")
19
+ when :output
20
+ @response.dig("usage", "completion_tokens")
21
+ else
22
+ raise ArgumentError, "Invalid token type: #{type}"
23
+ end
24
+ end
25
+ end
26
+ end
27
+ end
28
+ end
@@ -1,97 +1,38 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "openai"
4
- require_relative "openai/log"
3
+ require_relative "../llm/prompt"
4
+ require_relative "../llm/log"
5
+ require_relative "openai/model"
6
+ require_relative "openai/chat"
7
+ require_relative "openai/cost"
5
8
 
6
9
  class TranslationAPI
7
10
  module Provider
8
11
  class OpenAI
9
- SYSTEM_PROMPT_BASE = <<~TEXT
10
- Translate only.
11
- Return result only, no extra info
12
- Keep symbols
13
- TEXT
14
-
15
- API_KEY_ERROR_MESSAGE = "API key is not found."
16
-
17
- MODEL_ERROR_MESSAGE =
18
- "Specified model is not supported. Please check the model name."
19
-
20
12
  def initialize(output_logs:, except_words:, language:, custom_prompt: nil)
21
- validate_api_key!
22
-
23
- @client = init_client
24
- @output_logs = output_logs
25
- @system_prompt = SYSTEM_PROMPT_BASE + except_option_text(except_words)
26
- @user_prompt = user_prompt_text(language, custom_prompt)
13
+ @model = Model.new(ENV["OPENAI_MODEL"] || Model.nano)
14
+ @prompt = Llm::Prompt.new(except_words:, language:, custom_prompt:)
15
+ @chat = Chat.new(model: @model, prompt: @prompt)
16
+ @output_logs = output_logs
27
17
  end
28
18
 
29
19
  def translate(text)
30
20
  return text if text.strip.empty?
31
21
 
32
- @response = chat_to_api(text)
33
- Log.new(self).write if @output_logs
22
+ @response = @chat.call(text)
23
+ log.write if @output_logs
34
24
 
35
- translated_text
25
+ @response.translated_text
36
26
  end
37
27
 
38
- def translated_text
39
- @response["choices"][0]["message"]["content"]
40
- end
41
-
42
- def using_model
43
- ENV["OPENAI_MODEL"] || "gpt-5-mini"
44
- end
45
-
46
- def dig_used_tokens(type:)
47
- case type
48
- when :input
49
- @response["usage"]["prompt_tokens"]
50
- when :output
51
- @response["usage"]["completion_tokens"]
52
- else
53
- raise ArgumentError, "Invalid token type: #{type}"
54
- end
28
+ def name
29
+ "openai"
55
30
  end
56
31
 
57
32
  private
58
33
 
59
- def validate_api_key!
60
- raise API_KEY_ERROR_MESSAGE unless ENV["OPENAI_API_KEY"]
61
- end
62
-
63
- def init_client
64
- ::OpenAI::Client.new(
65
- access_token: ENV["OPENAI_API_KEY"],
66
- log_errors: true
67
- )
68
- end
69
-
70
- def chat_to_api(text)
71
- @client.chat(
72
- parameters: {
73
- model: using_model,
74
- messages: [
75
- { role: "system", content: @system_prompt },
76
- { role: "user", content: @user_prompt + text }
77
- ]
78
- }
79
- )
80
- end
81
-
82
- def except_option_text(except_words)
83
- return "" if except_words.empty?
84
-
85
- <<~TEXT
86
- Words listed next are not translated: [#{except_words.join(", ")}]
87
- TEXT
88
- end
89
-
90
- def user_prompt_text(language, custom_prompt)
91
- <<~TEXT
92
- #{custom_prompt || ""}
93
- Please translate this text to #{language}:
94
- TEXT
34
+ def log
35
+ Llm::Log.new(provider: self, response: @response, cost: Cost.new(@model))
95
36
  end
96
37
  end
97
38
  end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  class TranslationAPI
4
- VERSION = "1.1.0"
4
+ VERSION = "1.2.0"
5
5
  end
@@ -4,6 +4,7 @@ require "dotenv"
4
4
  require_relative "translation_api/version"
5
5
  require_relative "translation_api/config"
6
6
  require_relative "translation_api/provider/openai"
7
+ require_relative "translation_api/provider/gemini"
7
8
  require_relative "translation_api/provider/deepl"
8
9
 
9
10
  class TranslationAPI
@@ -24,8 +25,8 @@ class TranslationAPI
24
25
  end
25
26
 
26
27
  def initialize(**options)
27
- use_provided_options(options)
28
28
  use_default_options
29
+ use_provided_options(options)
29
30
 
30
31
  @provider = init_provider(@provider)
31
32
  end
@@ -62,6 +63,13 @@ class TranslationAPI
62
63
  language: @language,
63
64
  custom_prompt: @custom_prompt
64
65
  )
66
+ when :gemini
67
+ Provider::Gemini.new(
68
+ output_logs: @output_logs,
69
+ except_words: @except_words,
70
+ language: @language,
71
+ custom_prompt: @custom_prompt
72
+ )
65
73
  when :deepl
66
74
  Provider::DeepL.new(
67
75
  pro: config.deepl_pro,
@@ -69,7 +77,7 @@ class TranslationAPI
69
77
  language: @language
70
78
  )
71
79
  else
72
- raise UNSUPPORTED_PROVIDER_MESSAGE
80
+ raise ArgumentError, UNSUPPORTED_PROVIDER_MESSAGE
73
81
  end
74
82
  end
75
83
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: translation_api
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.1.0
4
+ version: 1.2.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - milkeclair
@@ -37,6 +37,20 @@ dependencies:
37
37
  - - ">="
38
38
  - !ruby/object:Gem::Version
39
39
  version: '0'
40
+ - !ruby/object:Gem::Dependency
41
+ name: faraday
42
+ requirement: !ruby/object:Gem::Requirement
43
+ requirements:
44
+ - - ">="
45
+ - !ruby/object:Gem::Version
46
+ version: '0'
47
+ type: :runtime
48
+ prerelease: false
49
+ version_requirements: !ruby/object:Gem::Requirement
50
+ requirements:
51
+ - - ">="
52
+ - !ruby/object:Gem::Version
53
+ version: '0'
40
54
  - !ruby/object:Gem::Dependency
41
55
  name: ruby-openai
42
56
  requirement: !ruby/object:Gem::Requirement
@@ -68,10 +82,20 @@ files:
68
82
  - how_to_publish.txt
69
83
  - lib/translation_api.rb
70
84
  - lib/translation_api/config.rb
85
+ - lib/translation_api/llm/log.rb
86
+ - lib/translation_api/llm/model.rb
87
+ - lib/translation_api/llm/prompt.rb
71
88
  - lib/translation_api/provider/deepl.rb
89
+ - lib/translation_api/provider/gemini.rb
90
+ - lib/translation_api/provider/gemini/chat.rb
91
+ - lib/translation_api/provider/gemini/cost.rb
92
+ - lib/translation_api/provider/gemini/model.rb
93
+ - lib/translation_api/provider/gemini/response.rb
72
94
  - lib/translation_api/provider/openai.rb
95
+ - lib/translation_api/provider/openai/chat.rb
73
96
  - lib/translation_api/provider/openai/cost.rb
74
- - lib/translation_api/provider/openai/log.rb
97
+ - lib/translation_api/provider/openai/model.rb
98
+ - lib/translation_api/provider/openai/response.rb
75
99
  - lib/translation_api/version.rb
76
100
  - rake_helper.rb
77
101
  homepage: https://github.com/milkeclair/translation_api
@@ -1,99 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require "fileutils"
4
- require_relative "cost"
5
-
6
- class TranslationAPI
7
- module Provider
8
- class OpenAI
9
- class Log
10
- def initialize(provider)
11
- @provider = provider
12
- @cost = Cost.new(@provider)
13
- end
14
-
15
- def write
16
- write_translated_text
17
- write_used_tokens
18
- write_total_cost
19
- end
20
-
21
- private
22
-
23
- def write_translated_text
24
- log_file_path = text_path("translated_text.txt")
25
-
26
- File.open(log_file_path, "a") do |file|
27
- file.puts(@provider.translated_text)
28
- end
29
- end
30
-
31
- def write_used_tokens
32
- log_file_path = text_path("tokens.txt")
33
-
34
- existing_input_tokens, existing_output_tokens = read_existing_tokens
35
- tokens => { input_tokens:, output_tokens: }
36
-
37
- total_input_tokens = existing_input_tokens + input_tokens
38
- total_output_tokens = existing_output_tokens + output_tokens
39
-
40
- File.open(log_file_path, "w") do |file|
41
- file.puts("input: #{total_input_tokens}")
42
- file.puts("output: #{total_output_tokens}")
43
- end
44
- end
45
-
46
- def read_existing_tokens
47
- log_file_path = text_path("tokens.txt")
48
- input_tokens, output_tokens = 0, 0
49
-
50
- if File.exist?(log_file_path)
51
- File.readlines(log_file_path).each do |line|
52
- tokens = line.split(":").last.strip.to_i
53
- input_tokens = tokens if line.start_with?("input:")
54
- output_tokens = tokens if line.start_with?("output:")
55
- end
56
- end
57
-
58
- [input_tokens, output_tokens]
59
- end
60
-
61
- def write_total_cost
62
- log_file_path = text_path("cost.txt")
63
- tokens => { input_tokens:, output_tokens: }
64
-
65
- this_cost = @cost.input_cost(input_tokens) + @cost.output_cost(output_tokens)
66
- total_cost = this_cost + existing_cost
67
-
68
- File.open(log_file_path, "w") do |file|
69
- file.puts(format_cost(total_cost))
70
- end
71
- end
72
-
73
- def format_cost(cost)
74
- "$#{format("%.8f", cost)}"
75
- end
76
-
77
- def existing_cost
78
- log_file_path = text_path("cost.txt")
79
-
80
- File.exist?(log_file_path) ? File.read(log_file_path).gsub("$", "").to_f : 0.0
81
- end
82
-
83
- def tokens
84
- {
85
- input_tokens: @provider.dig_used_tokens(type: :input),
86
- output_tokens: @provider.dig_used_tokens(type: :output)
87
- }
88
- end
89
-
90
- def text_path(under_logs_path)
91
- output_dir = "translator_logs/openai"
92
- FileUtils.mkdir_p(output_dir) unless File.directory?(output_dir)
93
-
94
- File.join(output_dir, under_logs_path)
95
- end
96
- end
97
- end
98
- end
99
- end