fastlane-plugin-translate_gpt_release_notes 0.1.0 → 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 9292f61608ec4eabd904b13013fc6ca8c58b601db08b82110eef89834b52031f
4
- data.tar.gz: 5a4fce7ea1f6b687931d6c048ad24e679501da4181deaa34c9a1cb34cd5fa79a
3
+ metadata.gz: e58b5b90b41cfb4c9865a1749550d3ebc9a5af135fca5ca666131a94021e2f8a
4
+ data.tar.gz: f847030d39a98cd54fdf50d19c86b4b37e64e2eb19366145cf3e8bfd5110f4f4
5
5
  SHA512:
6
- metadata.gz: 02f219ddcc5415f1e9a4cd71ead1cdc5ba9b0210492ae68d934ed77fa400cffc4161af39432b97f1e9fe6a1fbb041845ebcbfd233a6f749dee68d5c23754f6c7
7
- data.tar.gz: d2774d04548657ee0975cafb89b5f6711c5e3aef7d2355ce0c5dc7c26e59897b43b8193dc5cb8241d0df9c1527eb2d8a717288ee59f5f70a7d18cf36ce1ec063
6
+ metadata.gz: df687f2d1dc2d26c7bc9364eb870ac06f8ae89faf38371cc8981e973d599350b21de5d88e256424ad89bd6439aa9c24349b8e3f80beb6b31c29cb32bcc7f7662
7
+ data.tar.gz: 6c5f4587ef5620db5e1027591753b8c2596857f3aac1b5717610ac16cd77e439f3787517851ed0fecd5e2d46aa25a2ba1383b879aa111fa7968d01ea437175e7
data/README.md CHANGED
@@ -37,7 +37,10 @@ The following example demonstrates how to use `translate-gpt-release-notes` in a
37
37
  translate_gpt_release_notes(
38
38
  master_locale: 'en-US',
39
39
  platform: 'ios',
40
- context: 'This is an app about cute kittens'
40
+ context: 'This is an app about cute kittens',
41
+ model_name: 'gpt-5.2',
42
+ service_tier: 'flex',
43
+ request_timeout: 900
41
44
  # other parameters...
42
45
  )
43
46
  end
@@ -50,9 +53,10 @@ The following options are available for `translate-gpt-release-notes`:
50
53
  | Key | Description | Environment Variable |
51
54
  | --- | --- | --- |
52
55
  | `api_token` | The API key for your OpenAI GPT account. | `GPT_API_KEY` |
53
- | `model_name` | Name of the ChatGPT model to use (default: gpt-4-1106-preview) | `GPT_MODEL_NAME` |
56
+ | `model_name` | Name of the ChatGPT model to use (default: gpt-5.2) | `GPT_MODEL_NAME` |
57
+ | `service_tier` | OpenAI service tier to use (auto, default, flex, or priority). | `GPT_SERVICE_TIER` |
54
58
  | `temperature` | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Defaults to 0.5 | `GPT_TEMPERATURE` |
55
- | `request_timeout` | Timeout for the request in seconds. Defaults to 30 seconds | `GPT_REQUEST_TIMEOUT` |
59
+ | `request_timeout` | Timeout for the request in seconds. Defaults to 30 seconds. If `service_tier` is `flex` and this is lower than 900, the plugin increases it to 900. | `GPT_REQUEST_TIMEOUT` |
56
60
  | `master_locale` | Master language/locale for the source texts | `MASTER_LOCALE` |
57
61
  | `context` | Context for translation to improve accuracy | `GPT_CONTEXT` |
58
62
  | `platform` | Platform for which to translate (ios or android, defaults to ios).| `PLATFORM` |
@@ -70,6 +74,7 @@ translate_gpt_release_notes(
70
74
  api_token: 'YOUR_API_KEY',
71
75
  master_locale: 'en-US',
72
76
  platform: 'ios',
77
+ model_name: 'gpt-5.2',
73
78
  context: 'This is an app about cute kittens'
74
79
 
75
80
  )
@@ -89,6 +94,7 @@ And then call `translate-gp-release-notes` without specifying an API key:
89
94
  translate_gpt_release_notes(
90
95
  master_locale: 'en-US',
91
96
  platform: 'ios',
97
+ model_name: 'gpt-5.2',
92
98
  context: 'This is an app about cute kittens'
93
99
  )
94
100
  ```
@@ -96,6 +102,8 @@ translate_gpt_release_notes(
96
102
 
97
103
  1. Android has a limit of 500 symbols for changelogs and sometimes translations can exceed this number, which leads to Google API errors when submitting the app. Plugin **tries** to handle this, however errors happen. Reducing the length of master_locale changelog usually helps. iOS has a limit of 4000 symbols, which is plenty.
98
104
  2. OpenAI API usage cost money, keep it in mind.
105
+ 3. If you use `service_tier: 'flex'`, the plugin increases `request_timeout` to 900s when it is set lower.
106
+ 4. Hint: Flex processing trades higher latency for lower prices, which can reduce costs for non-urgent translations.
99
107
 
100
108
  ## Issues and Feedback
101
109
 
@@ -115,12 +115,26 @@ module Fastlane
115
115
  key: :model_name,
116
116
  env_name: "GPT_MODEL_NAME",
117
117
  description: "Name of the ChatGPT model to use",
118
- default_value: "gpt-4-turbo-preview"
118
+ default_value: "gpt-5.2"
119
+ ),
120
+ FastlaneCore::ConfigItem.new(
121
+ key: :service_tier,
122
+ env_name: "GPT_SERVICE_TIER",
123
+ description: "OpenAI service tier to use (auto, default, flex, or priority)",
124
+ type: String,
125
+ optional: true,
126
+ verify_block: proc do |value|
127
+ next if value.nil? || value.to_s.strip.empty?
128
+ allowed_values = %w[auto default flex priority]
129
+ unless allowed_values.include?(value)
130
+ UI.user_error!("Invalid service_tier '#{value}'. Allowed values: #{allowed_values.join(', ')}")
131
+ end
132
+ end
119
133
  ),
120
134
  FastlaneCore::ConfigItem.new(
121
135
  key: :request_timeout,
122
136
  env_name: "GPT_REQUEST_TIMEOUT",
123
- description: "Timeout for the request in seconds",
137
+ description: "Timeout for the request in seconds (auto-bumped to 900s for flex if lower)",
124
138
  type: Integer,
125
139
  default_value: 30
126
140
  ),
@@ -9,9 +9,10 @@ module Fastlane
9
9
  class TranslateGptReleaseNotesHelper
10
10
  def initialize(params)
11
11
  @params = params
12
+ @params[:request_timeout] = normalize_request_timeout(@params)
12
13
  @client = OpenAI::Client.new(
13
14
  access_token: params[:api_token],
14
- request_timeout: params[:request_timeout]
15
+ request_timeout: @params[:request_timeout]
15
16
  )
16
17
  end
17
18
 
@@ -32,13 +33,16 @@ module Fastlane
32
33
  end
33
34
 
34
35
  # Updated API call with max_tokens
35
- response = @client.chat(
36
- parameters: {
37
- model: @params[:model_name] || 'gpt-4-1106-preview',
38
- messages: [{ role: "user", content: prompt }],
39
- temperature: @params[:temperature] || 0.5
40
- }
41
- )
36
+ parameters = {
37
+ model: @params[:model_name] || 'gpt-5.2',
38
+ messages: [{ role: "user", content: prompt }],
39
+ temperature: @params[:temperature] || 0.5
40
+ }
41
+
42
+ service_tier = @params[:service_tier].to_s.strip
43
+ parameters[:service_tier] = service_tier unless service_tier.empty?
44
+
45
+ response = @client.chat(parameters: parameters)
42
46
 
43
47
 
44
48
  error = response.dig("error", "message")
@@ -51,6 +55,18 @@ module Fastlane
51
55
  return translated_text
52
56
  end
53
57
  end
58
+
59
+ def normalize_request_timeout(params)
60
+ service_tier = params[:service_tier].to_s.strip
61
+ raw_timeout = params[:request_timeout]
62
+ return nil if raw_timeout.nil?
63
+ timeout = raw_timeout.to_i
64
+ if service_tier == "flex" && timeout > 0 && timeout < 900
65
+ UI.message("Flex processing detected; increasing request_timeout to 900s.")
66
+ return 900
67
+ end
68
+ timeout
69
+ end
54
70
 
55
71
  # Sleep for a specified number of seconds, displaying a progress bar
56
72
  def wait(seconds = @params[:request_timeout])
@@ -1,5 +1,5 @@
1
1
  module Fastlane
2
2
  module TranslateGptReleaseNotes
3
- VERSION = "0.0.3"
3
+ VERSION = "0.1.1"
4
4
  end
5
5
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: fastlane-plugin-translate_gpt_release_notes
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.0
4
+ version: 0.1.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Anton Karliner
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2025-09-11 00:00:00.000000000 Z
11
+ date: 2026-01-07 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: ruby-openai