fastlane-plugin-translate_gpt_release_notes 0.1.0 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +352 -40
- data/lib/fastlane/plugin/translate_gpt_release_notes/actions/translate_gpt_release_notes_action.rb +80 -5
- data/lib/fastlane/plugin/translate_gpt_release_notes/helper/credential_resolver.rb +118 -0
- data/lib/fastlane/plugin/translate_gpt_release_notes/helper/providers/anthropic_provider.rb +119 -0
- data/lib/fastlane/plugin/translate_gpt_release_notes/helper/providers/base_provider.rb +161 -0
- data/lib/fastlane/plugin/translate_gpt_release_notes/helper/providers/deepl_provider.rb +145 -0
- data/lib/fastlane/plugin/translate_gpt_release_notes/helper/providers/gemini_provider.rb +153 -0
- data/lib/fastlane/plugin/translate_gpt_release_notes/helper/providers/openai_provider.rb +136 -0
- data/lib/fastlane/plugin/translate_gpt_release_notes/helper/providers/provider_factory.rb +148 -0
- data/lib/fastlane/plugin/translate_gpt_release_notes/helper/translate_gpt_release_notes_helper.rb +19 -40
- data/lib/fastlane/plugin/translate_gpt_release_notes/version.rb +1 -1
- metadata +52 -4
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 409b1bcd2c73ee2accc11399a91ffb090bbecd3ae523950e1e15583327702d9e
|
|
4
|
+
data.tar.gz: 1ee83d987e74e9dbbb2e903940cdc74eff26476ab436f0637b2ff5681dddb455
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: e64e5272e48df8a01e1a28b625d31a4c859ce7c1e3998a6116ddbb4eed11f9d157eb12e801090d3ee84e0a7175c632f86515f270a55c1e792777c0f8425236cb
|
|
7
|
+
data.tar.gz: 8f5c8d58a9ffc296c035946d303c3439877d24d39262f17231d7fd1d771054a247202ecddd91b1e6a6fc9877db3613c325bc59eec0e673e1c1f10be33fef6c87
|
data/README.md
CHANGED
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
|
|
7
7
|
## Getting Started
|
|
8
8
|
|
|
9
|
-
This project is a [fastlane](https://github.com/fastlane/fastlane) plugin. To get started with `fastlane-plugin-
|
|
9
|
+
This project is a [fastlane](https://github.com/fastlane/fastlane) plugin. To get started with `fastlane-plugin-translate_gpt_release_notes`, add it to your project by running:
|
|
10
10
|
|
|
11
11
|
```bash
|
|
12
12
|
fastlane add_plugin translate_gpt_release_notes
|
|
@@ -15,87 +15,399 @@ fastlane add_plugin translate_gpt_release_notes
|
|
|
15
15
|
### Requirements
|
|
16
16
|
|
|
17
17
|
- Ruby >= 3.1
|
|
18
|
-
-
|
|
18
|
+
- API key for at least one supported translation provider
|
|
19
19
|
|
|
20
20
|
**Note**: This plugin requires Ruby 3.1 or higher to ensure compatibility with the latest security patches in nokogiri.
|
|
21
21
|
|
|
22
22
|
## About translate-gpt-release-notes
|
|
23
23
|
|
|
24
|
-
`translate-gpt-release-notes` is a fastlane plugin that allows you to translate release notes or changelogs for iOS and Android apps using
|
|
24
|
+
`translate-gpt-release-notes` is a fastlane plugin that allows you to translate release notes or changelogs for iOS and Android apps using multiple AI translation providers. Based on [translate-gpt by ftp27](https://github.com/ftp27/fastlane-plugin-translate_gpt).
|
|
25
25
|
|
|
26
|
+
### Supported Translation Providers
|
|
26
27
|
|
|
27
|
-
|
|
28
|
+
The plugin now supports **4 translation providers**, giving you flexibility to choose based on cost, quality, and availability:
|
|
28
29
|
|
|
29
|
-
|
|
30
|
+
| Provider | Best For | Quality | Cost | Speed |
|
|
31
|
+
|----------|----------|---------|------|-------|
|
|
32
|
+
| **OpenAI GPT** | General purpose, flexible translations | ⭐⭐⭐⭐⭐ | $$$ | Fast |
|
|
33
|
+
| **Anthropic Claude** | High-quality, nuanced translations | ⭐⭐⭐⭐⭐ | $$$ | Medium |
|
|
34
|
+
| **Google Gemini** | Cost-effective, high-volume translations | ⭐⭐⭐⭐ | $ | Fast |
|
|
35
|
+
| **DeepL** | European languages, specialized translation | ⭐⭐⭐⭐⭐ | $$ | Fast |
|
|
30
36
|
|
|
31
|
-
##
|
|
37
|
+
## How it works
|
|
32
38
|
|
|
33
|
-
|
|
39
|
+
`translate-gpt-release-notes` takes the changelog file for the master locale (default: en-US), detects other locales based on the fastlane metadata folder structure, translates the changelog to all other languages using your chosen AI provider, and creates localized `.txt` changelog files in their respective folders.
|
|
40
|
+
|
|
41
|
+
## Quick Start
|
|
42
|
+
|
|
43
|
+
### 1. Configure your API key
|
|
44
|
+
|
|
45
|
+
Choose your preferred provider and set the corresponding environment variable:
|
|
46
|
+
|
|
47
|
+
```bash
|
|
48
|
+
# Option 1: OpenAI (default)
|
|
49
|
+
export OPENAI_API_KEY='your-openai-api-key'
|
|
50
|
+
# Or use the legacy variable (still supported)
|
|
51
|
+
export GPT_API_KEY='your-openai-api-key'
|
|
52
|
+
|
|
53
|
+
# Option 2: Anthropic Claude
|
|
54
|
+
export ANTHROPIC_API_KEY='your-anthropic-api-key'
|
|
55
|
+
|
|
56
|
+
# Option 3: Google Gemini
|
|
57
|
+
export GEMINI_API_KEY='your-gemini-api-key'
|
|
58
|
+
|
|
59
|
+
# Option 4: DeepL
|
|
60
|
+
export DEEPL_API_KEY='your-deepl-api-key'
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
### 2. Use in your Fastfile
|
|
34
64
|
|
|
35
65
|
```ruby
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
)
|
|
66
|
+
lane :translate_release_notes do
|
|
67
|
+
translate_gpt_release_notes(
|
|
68
|
+
master_locale: 'en-US',
|
|
69
|
+
platform: 'ios',
|
|
70
|
+
context: 'This is an app about cute kittens'
|
|
71
|
+
)
|
|
43
72
|
end
|
|
44
73
|
```
|
|
45
74
|
|
|
46
|
-
##
|
|
75
|
+
## Provider Selection
|
|
47
76
|
|
|
48
|
-
|
|
77
|
+
### Default Provider
|
|
49
78
|
|
|
50
|
-
|
|
51
|
-
| --- | --- | --- |
|
|
52
|
-
| `api_token` | The API key for your OpenAI GPT account. | `GPT_API_KEY` |
|
|
53
|
-
| `model_name` | Name of the ChatGPT model to use (default: gpt-4-1106-preview) | `GPT_MODEL_NAME` |
|
|
54
|
-
| `temperature` | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Defaults to 0.5 | `GPT_TEMPERATURE` |
|
|
55
|
-
| `request_timeout` | Timeout for the request in seconds. Defaults to 30 seconds | `GPT_REQUEST_TIMEOUT` |
|
|
56
|
-
| `master_locale` | Master language/locale for the source texts | `MASTER_LOCALE` |
|
|
57
|
-
| `context` | Context for translation to improve accuracy | `GPT_CONTEXT` |
|
|
58
|
-
| `platform` | Platform for which to translate (ios or android, defaults to ios).| `PLATFORM` |
|
|
79
|
+
By default, the plugin uses **OpenAI** as the translation provider. This ensures backward compatibility with existing setups.
|
|
59
80
|
|
|
60
|
-
|
|
81
|
+
### Selecting a Provider
|
|
82
|
+
|
|
83
|
+
You can explicitly select a provider using the `provider` parameter:
|
|
84
|
+
|
|
85
|
+
```ruby
|
|
86
|
+
# Use Anthropic Claude
|
|
87
|
+
translate_gpt_release_notes(
|
|
88
|
+
provider: 'anthropic',
|
|
89
|
+
master_locale: 'en-US',
|
|
90
|
+
platform: 'ios'
|
|
91
|
+
)
|
|
61
92
|
|
|
62
|
-
|
|
93
|
+
# Use Google Gemini
|
|
94
|
+
translate_gpt_release_notes(
|
|
95
|
+
provider: 'gemini',
|
|
96
|
+
master_locale: 'en-US',
|
|
97
|
+
platform: 'ios'
|
|
98
|
+
)
|
|
63
99
|
|
|
64
|
-
|
|
100
|
+
# Use DeepL
|
|
101
|
+
translate_gpt_release_notes(
|
|
102
|
+
provider: 'deepl',
|
|
103
|
+
master_locale: 'en-US',
|
|
104
|
+
platform: 'ios'
|
|
105
|
+
)
|
|
106
|
+
```
|
|
65
107
|
|
|
66
|
-
|
|
108
|
+
Or set the default provider via environment variable:
|
|
109
|
+
|
|
110
|
+
```bash
|
|
111
|
+
export TRANSLATION_PROVIDER='anthropic'
|
|
112
|
+
```
|
|
113
|
+
|
|
114
|
+
## Usage Examples by Provider
|
|
115
|
+
|
|
116
|
+
### OpenAI (Default)
|
|
67
117
|
|
|
68
118
|
```ruby
|
|
69
119
|
translate_gpt_release_notes(
|
|
70
|
-
|
|
120
|
+
provider: 'openai', # Optional, this is the default
|
|
121
|
+
openai_api_key: 'sk-...', # Or use OPENAI_API_KEY env var
|
|
122
|
+
model_name: 'gpt-5.2', # Default model
|
|
123
|
+
service_tier: 'flex', # Options: auto, default, flex, priority
|
|
124
|
+
temperature: 0.5, # 0-2, lower = more deterministic
|
|
71
125
|
master_locale: 'en-US',
|
|
72
126
|
platform: 'ios',
|
|
73
|
-
context: '
|
|
127
|
+
context: 'Fitness tracking app'
|
|
128
|
+
)
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
### Anthropic Claude
|
|
74
132
|
|
|
133
|
+
```ruby
|
|
134
|
+
translate_gpt_release_notes(
|
|
135
|
+
provider: 'anthropic',
|
|
136
|
+
anthropic_api_key: 'sk-ant-...', # Or use ANTHROPIC_API_KEY env var
|
|
137
|
+
model_name: 'claude-sonnet-4.5', # Default model
|
|
138
|
+
temperature: 0.5, # 0-1 for Anthropic
|
|
139
|
+
master_locale: 'en-US',
|
|
140
|
+
platform: 'ios',
|
|
141
|
+
context: 'Finance management app'
|
|
75
142
|
)
|
|
76
143
|
```
|
|
77
144
|
|
|
78
|
-
###
|
|
145
|
+
### Google Gemini
|
|
146
|
+
|
|
147
|
+
```ruby
|
|
148
|
+
translate_gpt_release_notes(
|
|
149
|
+
provider: 'gemini',
|
|
150
|
+
gemini_api_key: '...', # Or use GEMINI_API_KEY env var
|
|
151
|
+
model_name: 'gemini-2.5-flash', # Default model
|
|
152
|
+
temperature: 0.5, # 0-1 for Gemini
|
|
153
|
+
master_locale: 'en-US',
|
|
154
|
+
platform: 'android',
|
|
155
|
+
context: 'Social media app'
|
|
156
|
+
)
|
|
157
|
+
```
|
|
158
|
+
|
|
159
|
+
### DeepL
|
|
160
|
+
|
|
161
|
+
```ruby
|
|
162
|
+
translate_gpt_release_notes(
|
|
163
|
+
provider: 'deepl',
|
|
164
|
+
deepl_api_key: '...', # Or use DEEPL_API_KEY env var
|
|
165
|
+
formality: 'less', # Options: default, more, less
|
|
166
|
+
master_locale: 'en-US',
|
|
167
|
+
platform: 'ios',
|
|
168
|
+
context: 'Casual gaming app'
|
|
169
|
+
)
|
|
170
|
+
```
|
|
171
|
+
|
|
172
|
+
**Note**: DeepL automatically detects free vs paid API keys (free keys end with `:fx`) and uses the appropriate endpoint.
|
|
173
|
+
|
|
174
|
+
## Options
|
|
175
|
+
|
|
176
|
+
### Core Options
|
|
177
|
+
|
|
178
|
+
| Key | Description | Environment Variable | Default |
|
|
179
|
+
|-----|-------------|---------------------|---------|
|
|
180
|
+
| `provider` | Translation provider to use (`openai`, `anthropic`, `gemini`, `deepl`) | `TRANSLATION_PROVIDER` | `openai` |
|
|
181
|
+
| `master_locale` | Master language/locale for the source texts | `MASTER_LOCALE` | `en-US` |
|
|
182
|
+
| `platform` | Platform (`ios` or `android`) | `PLATFORM` | `ios` |
|
|
183
|
+
| `context` | Context for translation to improve accuracy | `GPT_CONTEXT` | - |
|
|
184
|
+
|
|
185
|
+
### Provider-Specific API Keys
|
|
186
|
+
|
|
187
|
+
| Key | Description | Environment Variable |
|
|
188
|
+
|-----|-------------|---------------------|
|
|
189
|
+
| `openai_api_key` | OpenAI API key | `OPENAI_API_KEY` or `GPT_API_KEY` |
|
|
190
|
+
| `anthropic_api_key` | Anthropic API key | `ANTHROPIC_API_KEY` |
|
|
191
|
+
| `gemini_api_key` | Google Gemini API key | `GEMINI_API_KEY` |
|
|
192
|
+
| `deepl_api_key` | DeepL API key | `DEEPL_API_KEY` |
|
|
193
|
+
|
|
194
|
+
### OpenAI-Specific Options
|
|
195
|
+
|
|
196
|
+
| Key | Description | Environment Variable | Default |
|
|
197
|
+
|-----|-------------|---------------------|---------|
|
|
198
|
+
| `model_name` | OpenAI model to use | `GPT_MODEL_NAME` | `gpt-5.2` |
|
|
199
|
+
| `service_tier` | Service tier: `auto`, `default`, `flex`, `priority` | `GPT_SERVICE_TIER` | - |
|
|
200
|
+
| `temperature` | Sampling temperature (0-2) | `GPT_TEMPERATURE` | `0.5` |
|
|
201
|
+
| `request_timeout` | Timeout in seconds (auto-bumped to 900s for flex) | `GPT_REQUEST_TIMEOUT` | `30` |
|
|
202
|
+
|
|
203
|
+
### Anthropic-Specific Options
|
|
204
|
+
|
|
205
|
+
| Key | Description | Environment Variable | Default |
|
|
206
|
+
|-----|-------------|---------------------|---------|
|
|
207
|
+
| `model_name` | Anthropic model to use | `ANTHROPIC_MODEL_NAME` | `claude-sonnet-4.5` |
|
|
208
|
+
| `temperature` | Sampling temperature (0-1) | `ANTHROPIC_TEMPERATURE` | `0.5` |
|
|
209
|
+
| `request_timeout` | Timeout in seconds | `ANTHROPIC_REQUEST_TIMEOUT` | `60` |
|
|
210
|
+
|
|
211
|
+
### Google Gemini-Specific Options
|
|
212
|
+
|
|
213
|
+
| Key | Description | Environment Variable | Default |
|
|
214
|
+
|-----|-------------|---------------------|---------|
|
|
215
|
+
| `model_name` | Gemini model to use | `GEMINI_MODEL_NAME` | `gemini-2.5-flash` |
|
|
216
|
+
| `temperature` | Sampling temperature (0-1) | `GEMINI_TEMPERATURE` | `0.5` |
|
|
217
|
+
| `request_timeout` | Timeout in seconds | `GEMINI_REQUEST_TIMEOUT` | `60` |
|
|
218
|
+
|
|
219
|
+
### DeepL-Specific Options
|
|
79
220
|
|
|
80
|
-
|
|
221
|
+
| Key | Description | Environment Variable | Default |
|
|
222
|
+
|-----|-------------|---------------------|---------|
|
|
223
|
+
| `formality` | Formality level: `default`, `more`, `less` | `DEEPL_FORMALITY` | `default` |
|
|
224
|
+
| `request_timeout` | Timeout in seconds | `DEEPL_REQUEST_TIMEOUT` | `30` |
|
|
225
|
+
|
|
226
|
+
## Authentication
|
|
227
|
+
|
|
228
|
+
### Environment Variables (Recommended)
|
|
229
|
+
|
|
230
|
+
The recommended approach is to set API keys via environment variables:
|
|
81
231
|
|
|
82
232
|
```bash
|
|
83
|
-
export
|
|
233
|
+
export OPENAI_API_KEY='sk-...'
|
|
234
|
+
export ANTHROPIC_API_KEY='sk-ant-...'
|
|
235
|
+
export GEMINI_API_KEY='...'
|
|
236
|
+
export DEEPL_API_KEY='...'
|
|
84
237
|
```
|
|
85
238
|
|
|
86
|
-
|
|
239
|
+
### Direct Parameters
|
|
240
|
+
|
|
241
|
+
Alternatively, pass API keys directly (useful for CI/CD with secrets):
|
|
87
242
|
|
|
88
243
|
```ruby
|
|
89
244
|
translate_gpt_release_notes(
|
|
245
|
+
provider: 'anthropic',
|
|
246
|
+
anthropic_api_key: ENV['ANTHROPIC_API_KEY'],
|
|
90
247
|
master_locale: 'en-US',
|
|
91
|
-
platform: 'ios'
|
|
92
|
-
|
|
248
|
+
platform: 'ios'
|
|
249
|
+
)
|
|
250
|
+
```
|
|
251
|
+
|
|
252
|
+
### Multiple Providers Configuration
|
|
253
|
+
|
|
254
|
+
You can configure multiple providers simultaneously and switch between them:
|
|
255
|
+
|
|
256
|
+
```bash
|
|
257
|
+
# Set up all providers
|
|
258
|
+
export OPENAI_API_KEY='sk-...'
|
|
259
|
+
export ANTHROPIC_API_KEY='sk-ant-...'
|
|
260
|
+
export GEMINI_API_KEY='...'
|
|
261
|
+
|
|
262
|
+
# Default to Gemini for cost savings
|
|
263
|
+
export TRANSLATION_PROVIDER='gemini'
|
|
264
|
+
```
|
|
265
|
+
|
|
266
|
+
## Migration Guide
|
|
267
|
+
|
|
268
|
+
### From Single-Provider Setup (v0.1.x)
|
|
269
|
+
|
|
270
|
+
If you're upgrading from a previous version that only supported OpenAI:
|
|
271
|
+
|
|
272
|
+
1. **No breaking changes** - Your existing setup will continue to work
|
|
273
|
+
2. **Existing `GPT_API_KEY` still works** - No need to rename your environment variable
|
|
274
|
+
3. **Default provider is OpenAI** - All existing configurations work unchanged
|
|
275
|
+
|
|
276
|
+
Optional improvements you can make:
|
|
277
|
+
- Rename `GPT_API_KEY` to `OPENAI_API_KEY` for clarity (both work)
|
|
278
|
+
- Set `TRANSLATION_PROVIDER` if you want to experiment with other providers
|
|
279
|
+
- Try different providers for different lanes (e.g., Gemini for development, Claude for production)
|
|
280
|
+
|
|
281
|
+
### Example Migration
|
|
282
|
+
|
|
283
|
+
**Before:**
|
|
284
|
+
```ruby
|
|
285
|
+
translate_gpt_release_notes(
|
|
286
|
+
api_token: ENV['GPT_API_KEY'],
|
|
287
|
+
model_name: 'gpt-5.2',
|
|
288
|
+
master_locale: 'en-US'
|
|
93
289
|
)
|
|
94
290
|
```
|
|
95
|
-
## Important notes:
|
|
96
291
|
|
|
97
|
-
|
|
98
|
-
|
|
292
|
+
**After** (still works, but cleaner):
|
|
293
|
+
```ruby
|
|
294
|
+
translate_gpt_release_notes(
|
|
295
|
+
provider: 'openai',
|
|
296
|
+
master_locale: 'en-US'
|
|
297
|
+
)
|
|
298
|
+
```
|
|
299
|
+
|
|
300
|
+
## Important Notes
|
|
301
|
+
|
|
302
|
+
### Android 500 Character Limit
|
|
303
|
+
|
|
304
|
+
Android has a limit of 500 characters for changelogs. The plugin handles this in two ways:
|
|
305
|
+
|
|
306
|
+
1. **AI Providers (OpenAI, Anthropic, Gemini)**: The character limit is included in the translation prompt, asking the AI to stay within the limit
|
|
307
|
+
2. **DeepL**: Translations are truncated to 500 characters with a warning if they exceed the limit
|
|
308
|
+
|
|
309
|
+
If you frequently hit the limit, consider shortening your master locale changelog.
|
|
310
|
+
|
|
311
|
+
### iOS Character Limit
|
|
312
|
+
|
|
313
|
+
iOS has a limit of 4000 characters, which is rarely an issue for release notes.
|
|
314
|
+
|
|
315
|
+
### Cost Considerations
|
|
316
|
+
|
|
317
|
+
All AI translation APIs cost money. Consider these tips:
|
|
318
|
+
|
|
319
|
+
- Use `service_tier: 'flex'` with OpenAI for lower prices (trades latency for cost)
|
|
320
|
+
- Google Gemini is generally the most cost-effective option
|
|
321
|
+
- DeepL offers competitive pricing for European languages
|
|
322
|
+
- The plugin skips translation if the source file hasn't changed (tracked via `last_successful_run.txt`)
|
|
323
|
+
|
|
324
|
+
### Service Tiers (OpenAI)
|
|
325
|
+
|
|
326
|
+
| Tier | Description | Use Case |
|
|
327
|
+
|------|-------------|----------|
|
|
328
|
+
| `auto` | Automatic tier selection | General use |
|
|
329
|
+
| `default` | Standard processing | Urgent translations |
|
|
330
|
+
| `flex` | Lower cost, higher latency | Non-urgent translations |
|
|
331
|
+
| `priority` | Premium processing | Critical releases |
|
|
332
|
+
|
|
333
|
+
**Note**: When using `flex`, the plugin automatically increases `request_timeout` to 900 seconds if set lower.
|
|
334
|
+
|
|
335
|
+
## Troubleshooting
|
|
336
|
+
|
|
337
|
+
### "No translation provider credentials configured"
|
|
338
|
+
|
|
339
|
+
**Cause**: No API keys are set for any provider.
|
|
340
|
+
|
|
341
|
+
**Solution**: Set at least one provider's API key:
|
|
342
|
+
```bash
|
|
343
|
+
export OPENAI_API_KEY='your-key-here'
|
|
344
|
+
```
|
|
345
|
+
|
|
346
|
+
### "Provider 'X' has no credentials"
|
|
347
|
+
|
|
348
|
+
**Cause**: You specified a provider but haven't configured its API key.
|
|
349
|
+
|
|
350
|
+
**Solution**: Either configure the provider's API key or switch to a provider with configured credentials.
|
|
351
|
+
|
|
352
|
+
### "Invalid provider 'X'"
|
|
353
|
+
|
|
354
|
+
**Cause**: The provider name is not recognized.
|
|
355
|
+
|
|
356
|
+
**Solution**: Use one of the valid provider names: `openai`, `anthropic`, `gemini`, `deepl`.
|
|
357
|
+
|
|
358
|
+
### Translations Exceed Android Character Limit
|
|
359
|
+
|
|
360
|
+
**Cause**: The translated text is longer than 500 characters.
|
|
361
|
+
|
|
362
|
+
**Solutions**:
|
|
363
|
+
1. Shorten your source changelog
|
|
364
|
+
2. For DeepL, translations are automatically truncated
|
|
365
|
+
3. For AI providers, the prompt includes the limit but compliance isn't guaranteed
|
|
366
|
+
|
|
367
|
+
### API Timeout Errors
|
|
368
|
+
|
|
369
|
+
**Cause**: The translation request is taking too long.
|
|
370
|
+
|
|
371
|
+
**Solutions**:
|
|
372
|
+
1. Increase `request_timeout` parameter
|
|
373
|
+
2. For OpenAI flex tier, timeout is automatically increased to 900s
|
|
374
|
+
3. Consider using a faster provider (Gemini or DeepL)
|
|
375
|
+
|
|
376
|
+
### Slow Translations with Flex Tier
|
|
377
|
+
|
|
378
|
+
**Cause**: Flex tier trades latency for lower cost.
|
|
379
|
+
|
|
380
|
+
**Solution**: This is expected behavior. If speed is critical, use `service_tier: 'default'` or `service_tier: 'priority'`.
|
|
381
|
+
|
|
382
|
+
## Provider Comparison Details
|
|
383
|
+
|
|
384
|
+
### When to Use Each Provider
|
|
385
|
+
|
|
386
|
+
**OpenAI GPT**
|
|
387
|
+
- ✅ Best for general-purpose translations
|
|
388
|
+
- ✅ Flexible and customizable
|
|
389
|
+
- ✅ Supports service tiers for cost control
|
|
390
|
+
- ❌ Can be expensive for high volume
|
|
391
|
+
|
|
392
|
+
**Anthropic Claude**
|
|
393
|
+
- ✅ Highest quality nuanced translations
|
|
394
|
+
- ✅ Excellent for complex or technical content
|
|
395
|
+
- ✅ Strong reasoning capabilities
|
|
396
|
+
- ❌ Slower than other options
|
|
397
|
+
- ❌ Higher cost
|
|
398
|
+
|
|
399
|
+
**Google Gemini**
|
|
400
|
+
- ✅ Most cost-effective
|
|
401
|
+
- ✅ Fast response times
|
|
402
|
+
- ✅ Good quality for standard content
|
|
403
|
+
- ❌ May struggle with very nuanced content
|
|
404
|
+
|
|
405
|
+
**DeepL**
|
|
406
|
+
- ✅ Best for European languages
|
|
407
|
+
- ✅ Purpose-built for translation
|
|
408
|
+
- ✅ Formality control
|
|
409
|
+
- ❌ Limited language support compared to AI providers
|
|
410
|
+
- ❌ May not handle app-specific context as well
|
|
99
411
|
|
|
100
412
|
## Issues and Feedback
|
|
101
413
|
|
data/lib/fastlane/plugin/translate_gpt_release_notes/actions/translate_gpt_release_notes_action.rb
CHANGED
|
@@ -1,12 +1,24 @@
|
|
|
1
1
|
require 'fastlane/action'
|
|
2
|
-
require 'openai'
|
|
3
2
|
require_relative '../helper/translate_gpt_release_notes_helper'
|
|
3
|
+
require_relative '../helper/credential_resolver'
|
|
4
|
+
require_relative '../helper/providers/provider_factory'
|
|
4
5
|
require 'fileutils'
|
|
5
6
|
|
|
6
7
|
module Fastlane
|
|
7
8
|
module Actions
|
|
8
9
|
class TranslateGptReleaseNotesAction < Action
|
|
9
10
|
def self.run(params)
|
|
11
|
+
provider_name = params[:provider] || 'openai'
|
|
12
|
+
|
|
13
|
+
unless Helper::CredentialResolver.credentials_exist?(provider_name, params)
|
|
14
|
+
available = Helper::CredentialResolver.available_providers(params)
|
|
15
|
+
if available.empty?
|
|
16
|
+
UI.user_error!("No translation provider credentials configured. Set one of: OPENAI_API_KEY, ANTHROPIC_API_KEY, GEMINI_API_KEY, or DEEPL_API_KEY")
|
|
17
|
+
else
|
|
18
|
+
UI.user_error!("Provider '#{provider_name}' has no credentials. Available providers: #{available.join(', ')}")
|
|
19
|
+
end
|
|
20
|
+
end
|
|
21
|
+
|
|
10
22
|
# Define the path for the last run time file
|
|
11
23
|
last_run_file = "last_successful_run.txt"
|
|
12
24
|
|
|
@@ -98,11 +110,60 @@ module Fastlane
|
|
|
98
110
|
end
|
|
99
111
|
|
|
100
112
|
def self.description
|
|
101
|
-
"Translate release notes
|
|
113
|
+
"Translate release notes using AI providers: OpenAI, Claude, Gemini, or DeepL"
|
|
102
114
|
end
|
|
103
115
|
|
|
104
116
|
def self.available_options
|
|
105
117
|
[
|
|
118
|
+
FastlaneCore::ConfigItem.new(
|
|
119
|
+
key: :provider,
|
|
120
|
+
env_name: 'TRANSLATION_PROVIDER',
|
|
121
|
+
description: "Translation provider to use (#{Helper::Providers::ProviderFactory.available_provider_names.join(', ')})",
|
|
122
|
+
type: String,
|
|
123
|
+
default_value: 'openai',
|
|
124
|
+
verify_block: proc do |value|
|
|
125
|
+
unless Helper::Providers::ProviderFactory.valid_provider?(value)
|
|
126
|
+
available = Helper::Providers::ProviderFactory.available_provider_names.join(', ')
|
|
127
|
+
UI.user_error!("Invalid provider '#{value}'. Available: #{available}")
|
|
128
|
+
end
|
|
129
|
+
end
|
|
130
|
+
),
|
|
131
|
+
FastlaneCore::ConfigItem.new(
|
|
132
|
+
key: :openai_api_key,
|
|
133
|
+
env_name: 'OPENAI_API_KEY',
|
|
134
|
+
description: 'OpenAI API key (alternative to environment variable)',
|
|
135
|
+
sensitive: true,
|
|
136
|
+
code_gen_sensitive: true,
|
|
137
|
+
optional: true,
|
|
138
|
+
default_value: nil
|
|
139
|
+
),
|
|
140
|
+
FastlaneCore::ConfigItem.new(
|
|
141
|
+
key: :anthropic_api_key,
|
|
142
|
+
env_name: 'ANTHROPIC_API_KEY',
|
|
143
|
+
description: 'Anthropic API key (alternative to environment variable)',
|
|
144
|
+
sensitive: true,
|
|
145
|
+
code_gen_sensitive: true,
|
|
146
|
+
optional: true,
|
|
147
|
+
default_value: nil
|
|
148
|
+
),
|
|
149
|
+
FastlaneCore::ConfigItem.new(
|
|
150
|
+
key: :gemini_api_key,
|
|
151
|
+
env_name: 'GEMINI_API_KEY',
|
|
152
|
+
description: 'Google Gemini API key (alternative to environment variable)',
|
|
153
|
+
sensitive: true,
|
|
154
|
+
code_gen_sensitive: true,
|
|
155
|
+
optional: true,
|
|
156
|
+
default_value: nil
|
|
157
|
+
),
|
|
158
|
+
FastlaneCore::ConfigItem.new(
|
|
159
|
+
key: :deepl_api_key,
|
|
160
|
+
env_name: 'DEEPL_API_KEY',
|
|
161
|
+
description: 'DeepL API key (alternative to environment variable)',
|
|
162
|
+
sensitive: true,
|
|
163
|
+
code_gen_sensitive: true,
|
|
164
|
+
optional: true,
|
|
165
|
+
default_value: nil
|
|
166
|
+
),
|
|
106
167
|
FastlaneCore::ConfigItem.new(
|
|
107
168
|
key: :api_token,
|
|
108
169
|
env_name: "GPT_API_KEY",
|
|
@@ -114,13 +175,27 @@ module Fastlane
|
|
|
114
175
|
FastlaneCore::ConfigItem.new(
|
|
115
176
|
key: :model_name,
|
|
116
177
|
env_name: "GPT_MODEL_NAME",
|
|
117
|
-
description: "Name of the
|
|
118
|
-
default_value: "gpt-
|
|
178
|
+
description: "Name of the AI model to use (provider-specific)",
|
|
179
|
+
default_value: "gpt-5.2"
|
|
180
|
+
),
|
|
181
|
+
FastlaneCore::ConfigItem.new(
|
|
182
|
+
key: :service_tier,
|
|
183
|
+
env_name: "GPT_SERVICE_TIER",
|
|
184
|
+
description: "OpenAI service tier to use (auto, default, flex, or priority)",
|
|
185
|
+
type: String,
|
|
186
|
+
optional: true,
|
|
187
|
+
verify_block: proc do |value|
|
|
188
|
+
next if value.nil? || value.to_s.strip.empty?
|
|
189
|
+
allowed_values = %w[auto default flex priority]
|
|
190
|
+
unless allowed_values.include?(value)
|
|
191
|
+
UI.user_error!("Invalid service_tier '#{value}'. Allowed values: #{allowed_values.join(', ')}")
|
|
192
|
+
end
|
|
193
|
+
end
|
|
119
194
|
),
|
|
120
195
|
FastlaneCore::ConfigItem.new(
|
|
121
196
|
key: :request_timeout,
|
|
122
197
|
env_name: "GPT_REQUEST_TIMEOUT",
|
|
123
|
-
description: "Timeout for the request in seconds",
|
|
198
|
+
description: "Timeout for the request in seconds (auto-bumped to 900s for flex if lower)",
|
|
124
199
|
type: Integer,
|
|
125
200
|
default_value: 30
|
|
126
201
|
),
|