@llm-translate/cli 1.0.0-next.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.dockerignore +51 -0
- package/.env.example +33 -0
- package/.github/workflows/docs-pages.yml +57 -0
- package/.github/workflows/release.yml +49 -0
- package/.translaterc.json +44 -0
- package/CLAUDE.md +243 -0
- package/Dockerfile +55 -0
- package/README.md +371 -0
- package/RFC.md +1595 -0
- package/dist/cli/index.d.ts +2 -0
- package/dist/cli/index.js +4494 -0
- package/dist/cli/index.js.map +1 -0
- package/dist/index.d.ts +1152 -0
- package/dist/index.js +3841 -0
- package/dist/index.js.map +1 -0
- package/docker-compose.yml +56 -0
- package/docs/.vitepress/config.ts +161 -0
- package/docs/api/agent.md +262 -0
- package/docs/api/engine.md +274 -0
- package/docs/api/index.md +171 -0
- package/docs/api/providers.md +304 -0
- package/docs/changelog.md +64 -0
- package/docs/cli/dir.md +243 -0
- package/docs/cli/file.md +213 -0
- package/docs/cli/glossary.md +273 -0
- package/docs/cli/index.md +129 -0
- package/docs/cli/init.md +158 -0
- package/docs/cli/serve.md +211 -0
- package/docs/glossary.json +235 -0
- package/docs/guide/chunking.md +272 -0
- package/docs/guide/configuration.md +139 -0
- package/docs/guide/cost-optimization.md +237 -0
- package/docs/guide/docker.md +371 -0
- package/docs/guide/getting-started.md +150 -0
- package/docs/guide/glossary.md +241 -0
- package/docs/guide/index.md +86 -0
- package/docs/guide/ollama.md +515 -0
- package/docs/guide/prompt-caching.md +221 -0
- package/docs/guide/providers.md +232 -0
- package/docs/guide/quality-control.md +206 -0
- package/docs/guide/vitepress-integration.md +265 -0
- package/docs/index.md +63 -0
- package/docs/ja/api/agent.md +262 -0
- package/docs/ja/api/engine.md +274 -0
- package/docs/ja/api/index.md +171 -0
- package/docs/ja/api/providers.md +304 -0
- package/docs/ja/changelog.md +64 -0
- package/docs/ja/cli/dir.md +243 -0
- package/docs/ja/cli/file.md +213 -0
- package/docs/ja/cli/glossary.md +273 -0
- package/docs/ja/cli/index.md +111 -0
- package/docs/ja/cli/init.md +158 -0
- package/docs/ja/guide/chunking.md +271 -0
- package/docs/ja/guide/configuration.md +139 -0
- package/docs/ja/guide/cost-optimization.md +30 -0
- package/docs/ja/guide/getting-started.md +150 -0
- package/docs/ja/guide/glossary.md +214 -0
- package/docs/ja/guide/index.md +32 -0
- package/docs/ja/guide/ollama.md +410 -0
- package/docs/ja/guide/prompt-caching.md +221 -0
- package/docs/ja/guide/providers.md +232 -0
- package/docs/ja/guide/quality-control.md +137 -0
- package/docs/ja/guide/vitepress-integration.md +265 -0
- package/docs/ja/index.md +58 -0
- package/docs/ko/api/agent.md +262 -0
- package/docs/ko/api/engine.md +274 -0
- package/docs/ko/api/index.md +171 -0
- package/docs/ko/api/providers.md +304 -0
- package/docs/ko/changelog.md +64 -0
- package/docs/ko/cli/dir.md +243 -0
- package/docs/ko/cli/file.md +213 -0
- package/docs/ko/cli/glossary.md +273 -0
- package/docs/ko/cli/index.md +111 -0
- package/docs/ko/cli/init.md +158 -0
- package/docs/ko/guide/chunking.md +271 -0
- package/docs/ko/guide/configuration.md +139 -0
- package/docs/ko/guide/cost-optimization.md +30 -0
- package/docs/ko/guide/getting-started.md +150 -0
- package/docs/ko/guide/glossary.md +214 -0
- package/docs/ko/guide/index.md +32 -0
- package/docs/ko/guide/ollama.md +410 -0
- package/docs/ko/guide/prompt-caching.md +221 -0
- package/docs/ko/guide/providers.md +232 -0
- package/docs/ko/guide/quality-control.md +137 -0
- package/docs/ko/guide/vitepress-integration.md +265 -0
- package/docs/ko/index.md +58 -0
- package/docs/zh/api/agent.md +262 -0
- package/docs/zh/api/engine.md +274 -0
- package/docs/zh/api/index.md +171 -0
- package/docs/zh/api/providers.md +304 -0
- package/docs/zh/changelog.md +64 -0
- package/docs/zh/cli/dir.md +243 -0
- package/docs/zh/cli/file.md +213 -0
- package/docs/zh/cli/glossary.md +273 -0
- package/docs/zh/cli/index.md +111 -0
- package/docs/zh/cli/init.md +158 -0
- package/docs/zh/guide/chunking.md +271 -0
- package/docs/zh/guide/configuration.md +139 -0
- package/docs/zh/guide/cost-optimization.md +30 -0
- package/docs/zh/guide/getting-started.md +150 -0
- package/docs/zh/guide/glossary.md +214 -0
- package/docs/zh/guide/index.md +32 -0
- package/docs/zh/guide/ollama.md +410 -0
- package/docs/zh/guide/prompt-caching.md +221 -0
- package/docs/zh/guide/providers.md +232 -0
- package/docs/zh/guide/quality-control.md +137 -0
- package/docs/zh/guide/vitepress-integration.md +265 -0
- package/docs/zh/index.md +58 -0
- package/package.json +91 -0
- package/release.config.mjs +15 -0
- package/schemas/glossary.schema.json +110 -0
- package/src/cli/commands/dir.ts +469 -0
- package/src/cli/commands/file.ts +291 -0
- package/src/cli/commands/glossary.ts +221 -0
- package/src/cli/commands/init.ts +68 -0
- package/src/cli/commands/serve.ts +60 -0
- package/src/cli/index.ts +64 -0
- package/src/cli/options.ts +59 -0
- package/src/core/agent.ts +1119 -0
- package/src/core/chunker.ts +391 -0
- package/src/core/engine.ts +634 -0
- package/src/errors.ts +188 -0
- package/src/index.ts +147 -0
- package/src/integrations/vitepress.ts +549 -0
- package/src/parsers/markdown.ts +383 -0
- package/src/providers/claude.ts +259 -0
- package/src/providers/interface.ts +109 -0
- package/src/providers/ollama.ts +379 -0
- package/src/providers/openai.ts +308 -0
- package/src/providers/registry.ts +153 -0
- package/src/server/index.ts +152 -0
- package/src/server/middleware/auth.ts +93 -0
- package/src/server/middleware/logger.ts +90 -0
- package/src/server/routes/health.ts +84 -0
- package/src/server/routes/translate.ts +210 -0
- package/src/server/types.ts +138 -0
- package/src/services/cache.ts +899 -0
- package/src/services/config.ts +217 -0
- package/src/services/glossary.ts +247 -0
- package/src/types/analysis.ts +164 -0
- package/src/types/index.ts +265 -0
- package/src/types/modes.ts +121 -0
- package/src/types/mqm.ts +157 -0
- package/src/utils/logger.ts +141 -0
- package/src/utils/tokens.ts +116 -0
- package/tests/fixtures/glossaries/ml-glossary.json +53 -0
- package/tests/fixtures/input/lynq-installation.ko.md +350 -0
- package/tests/fixtures/input/lynq-installation.md +350 -0
- package/tests/fixtures/input/simple.ko.md +27 -0
- package/tests/fixtures/input/simple.md +27 -0
- package/tests/unit/chunker.test.ts +229 -0
- package/tests/unit/glossary.test.ts +146 -0
- package/tests/unit/markdown.test.ts +205 -0
- package/tests/unit/tokens.test.ts +81 -0
- package/tsconfig.json +28 -0
- package/tsup.config.ts +34 -0
- package/vitest.config.ts +16 -0
|
@@ -0,0 +1,304 @@
|
|
|
1
|
+
# Providers
|
|
2
|
+
|
|
3
|
+
::: info Translations
|
|
4
|
+
All non-English documentation is automatically translated using Claude Sonnet 4.
|
|
5
|
+
:::
|
|
6
|
+
|
|
7
|
+
LLM provider implementations for different AI services.
|
|
8
|
+
|
|
9
|
+
## Overview
|
|
10
|
+
|
|
11
|
+
All providers implement the `LLMProvider` interface:
|
|
12
|
+
|
|
13
|
+
```typescript
|
|
14
|
+
interface LLMProvider {
|
|
15
|
+
readonly name: ProviderName;
|
|
16
|
+
readonly defaultModel: string;
|
|
17
|
+
|
|
18
|
+
chat(request: ChatRequest): Promise<ChatResponse>;
|
|
19
|
+
stream(request: ChatRequest): AsyncIterable<string>;
|
|
20
|
+
countTokens(text: string): number;
|
|
21
|
+
getModelInfo(model?: string): ModelInfo;
|
|
22
|
+
}
|
|
23
|
+
```
|
|
24
|
+
|
|
25
|
+
## Claude Provider
|
|
26
|
+
|
|
27
|
+
The recommended provider, with full support for prompt caching.
|
|
28
|
+
|
|
29
|
+
### Setup
|
|
30
|
+
|
|
31
|
+
```typescript
|
|
32
|
+
import { createClaudeProvider } from '@llm-translate/cli';
|
|
33
|
+
|
|
34
|
+
const provider = createClaudeProvider({
|
|
35
|
+
apiKey: process.env.ANTHROPIC_API_KEY,
|
|
36
|
+
defaultModel: 'claude-haiku-4-5-20251001',
|
|
37
|
+
});
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
### Configuration
|
|
41
|
+
|
|
42
|
+
```typescript
|
|
43
|
+
interface ClaudeProviderConfig {
|
|
44
|
+
apiKey?: string; // Defaults to ANTHROPIC_API_KEY env
|
|
45
|
+
baseUrl?: string; // Custom API endpoint
|
|
46
|
+
defaultModel?: string; // Default: claude-haiku-4-5-20251001
|
|
47
|
+
}
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
### Available Models
|
|
51
|
+
|
|
52
|
+
| Model | Context | Input Cost | Output Cost |
|
|
53
|
+
|-------|---------|------------|-------------|
|
|
54
|
+
| `claude-haiku-4-5-20251001` | 200K | $0.001/1K | $0.005/1K |
|
|
55
|
+
| `claude-sonnet-4-5-20250929` | 200K | $0.003/1K | $0.015/1K |
|
|
56
|
+
| `claude-opus-4-5-20251101` | 200K | $0.015/1K | $0.075/1K |
|
|
57
|
+
|
|
58
|
+
### Prompt Caching
|
|
59
|
+
|
|
60
|
+
Claude provider supports prompt caching automatically:
|
|
61
|
+
|
|
62
|
+
```typescript
|
|
63
|
+
const response = await provider.chat({
|
|
64
|
+
messages: [
|
|
65
|
+
{
|
|
66
|
+
role: 'user',
|
|
67
|
+
content: [
|
|
68
|
+
{
|
|
69
|
+
type: 'text',
|
|
70
|
+
text: 'System instructions...',
|
|
71
|
+
cacheControl: { type: 'ephemeral' }, // Cache this
|
|
72
|
+
},
|
|
73
|
+
{
|
|
74
|
+
type: 'text',
|
|
75
|
+
text: 'User content...', // Don't cache
|
|
76
|
+
},
|
|
77
|
+
],
|
|
78
|
+
},
|
|
79
|
+
],
|
|
80
|
+
});
|
|
81
|
+
|
|
82
|
+
console.log(response.usage);
|
|
83
|
+
// {
|
|
84
|
+
// inputTokens: 100,
|
|
85
|
+
// outputTokens: 200,
|
|
86
|
+
// cacheReadTokens: 500, // Tokens read from cache
|
|
87
|
+
// cacheWriteTokens: 0, // Tokens written to cache
|
|
88
|
+
// }
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
## OpenAI Provider
|
|
92
|
+
|
|
93
|
+
### Setup
|
|
94
|
+
|
|
95
|
+
```typescript
|
|
96
|
+
import { createOpenAIProvider } from '@llm-translate/cli';
|
|
97
|
+
|
|
98
|
+
const provider = createOpenAIProvider({
|
|
99
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
100
|
+
defaultModel: 'gpt-4o-mini',
|
|
101
|
+
});
|
|
102
|
+
```
|
|
103
|
+
|
|
104
|
+
### Configuration
|
|
105
|
+
|
|
106
|
+
```typescript
|
|
107
|
+
interface OpenAIProviderConfig {
|
|
108
|
+
apiKey?: string; // Defaults to OPENAI_API_KEY env
|
|
109
|
+
baseUrl?: string; // Custom API endpoint
|
|
110
|
+
defaultModel?: string; // Default: gpt-4o-mini
|
|
111
|
+
organization?: string; // OpenAI organization ID
|
|
112
|
+
}
|
|
113
|
+
```
|
|
114
|
+
|
|
115
|
+
### Available Models
|
|
116
|
+
|
|
117
|
+
| Model | Context | Input Cost | Output Cost |
|
|
118
|
+
|-------|---------|------------|-------------|
|
|
119
|
+
| `gpt-4o-mini` | 128K | $0.00015/1K | $0.0006/1K |
|
|
120
|
+
| `gpt-4o` | 128K | $0.0025/1K | $0.01/1K |
|
|
121
|
+
| `gpt-4-turbo` | 128K | $0.01/1K | $0.03/1K |
|
|
122
|
+
|
|
123
|
+
### Automatic Caching
|
|
124
|
+
|
|
125
|
+
OpenAI handles caching automatically for prompts > 1024 tokens.
|
|
126
|
+
|
|
127
|
+
## Ollama Provider
|
|
128
|
+
|
|
129
|
+
For local, self-hosted models.
|
|
130
|
+
|
|
131
|
+
### Setup
|
|
132
|
+
|
|
133
|
+
```typescript
|
|
134
|
+
import { createOllamaProvider } from '@llm-translate/cli';
|
|
135
|
+
|
|
136
|
+
const provider = createOllamaProvider({
|
|
137
|
+
baseUrl: 'http://localhost:11434',
|
|
138
|
+
defaultModel: 'llama3.1',
|
|
139
|
+
});
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
### Configuration
|
|
143
|
+
|
|
144
|
+
```typescript
|
|
145
|
+
interface OllamaProviderConfig {
|
|
146
|
+
baseUrl?: string; // Default: http://localhost:11434
|
|
147
|
+
defaultModel?: string; // Default: llama3.1
|
|
148
|
+
}
|
|
149
|
+
```
|
|
150
|
+
|
|
151
|
+
### Available Models
|
|
152
|
+
|
|
153
|
+
Any model available in your Ollama installation:
|
|
154
|
+
|
|
155
|
+
```bash
|
|
156
|
+
# List available models
|
|
157
|
+
ollama list
|
|
158
|
+
|
|
159
|
+
# Pull a model
|
|
160
|
+
ollama pull llama3.1
|
|
161
|
+
ollama pull mistral
|
|
162
|
+
ollama pull codellama
|
|
163
|
+
```
|
|
164
|
+
|
|
165
|
+
### Limitations
|
|
166
|
+
|
|
167
|
+
- No prompt caching support
|
|
168
|
+
- Quality varies by model
|
|
169
|
+
- Limited context window (model-dependent)
|
|
170
|
+
|
|
171
|
+
## Provider Interface
|
|
172
|
+
|
|
173
|
+
### ChatRequest
|
|
174
|
+
|
|
175
|
+
```typescript
|
|
176
|
+
interface ChatRequest {
|
|
177
|
+
messages: ChatMessage[];
|
|
178
|
+
model?: string;
|
|
179
|
+
temperature?: number; // Default: 0.3
|
|
180
|
+
maxTokens?: number; // Default: 4096
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
interface ChatMessage {
|
|
184
|
+
role: 'system' | 'user' | 'assistant';
|
|
185
|
+
content: string | CacheableTextPart[];
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
interface CacheableTextPart {
|
|
189
|
+
type: 'text';
|
|
190
|
+
text: string;
|
|
191
|
+
cacheControl?: { type: 'ephemeral' };
|
|
192
|
+
}
|
|
193
|
+
```
|
|
194
|
+
|
|
195
|
+
### ChatResponse
|
|
196
|
+
|
|
197
|
+
```typescript
|
|
198
|
+
interface ChatResponse {
|
|
199
|
+
content: string;
|
|
200
|
+
usage: {
|
|
201
|
+
inputTokens: number;
|
|
202
|
+
outputTokens: number;
|
|
203
|
+
cacheReadTokens?: number;
|
|
204
|
+
cacheWriteTokens?: number;
|
|
205
|
+
};
|
|
206
|
+
model: string;
|
|
207
|
+
finishReason: 'stop' | 'length' | 'error';
|
|
208
|
+
}
|
|
209
|
+
```
|
|
210
|
+
|
|
211
|
+
### ModelInfo
|
|
212
|
+
|
|
213
|
+
```typescript
|
|
214
|
+
interface ModelInfo {
|
|
215
|
+
maxContextTokens: number;
|
|
216
|
+
supportsStreaming: boolean;
|
|
217
|
+
costPer1kInput?: number;
|
|
218
|
+
costPer1kOutput?: number;
|
|
219
|
+
}
|
|
220
|
+
```
|
|
221
|
+
|
|
222
|
+
## Custom Provider
|
|
223
|
+
|
|
224
|
+
Implement your own provider:
|
|
225
|
+
|
|
226
|
+
```typescript
|
|
227
|
+
import type { LLMProvider, ChatRequest, ChatResponse } from '@llm-translate/cli';
|
|
228
|
+
|
|
229
|
+
class CustomProvider implements LLMProvider {
|
|
230
|
+
readonly name = 'custom' as const;
|
|
231
|
+
readonly defaultModel = 'custom-model';
|
|
232
|
+
|
|
233
|
+
async chat(request: ChatRequest): Promise<ChatResponse> {
|
|
234
|
+
// Your implementation
|
|
235
|
+
const response = await callYourAPI(request);
|
|
236
|
+
|
|
237
|
+
return {
|
|
238
|
+
content: response.text,
|
|
239
|
+
usage: {
|
|
240
|
+
inputTokens: response.promptTokens,
|
|
241
|
+
outputTokens: response.completionTokens,
|
|
242
|
+
},
|
|
243
|
+
model: request.model ?? this.defaultModel,
|
|
244
|
+
finishReason: 'stop',
|
|
245
|
+
};
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
async *stream(request: ChatRequest): AsyncIterable<string> {
|
|
249
|
+
// Streaming implementation
|
|
250
|
+
for await (const chunk of streamYourAPI(request)) {
|
|
251
|
+
yield chunk.text;
|
|
252
|
+
}
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
countTokens(text: string): number {
|
|
256
|
+
// Token estimation
|
|
257
|
+
return Math.ceil(text.length / 4);
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
getModelInfo(model?: string): ModelInfo {
|
|
261
|
+
return {
|
|
262
|
+
maxContextTokens: 100000,
|
|
263
|
+
supportsStreaming: true,
|
|
264
|
+
};
|
|
265
|
+
}
|
|
266
|
+
}
|
|
267
|
+
```
|
|
268
|
+
|
|
269
|
+
## Provider Selection Guide
|
|
270
|
+
|
|
271
|
+
| Use Case | Recommended Provider | Model |
|
|
272
|
+
|----------|---------------------|-------|
|
|
273
|
+
| Cost-effective | Claude | Haiku 4.5 |
|
|
274
|
+
| High quality | Claude | Sonnet 4.5 |
|
|
275
|
+
| OpenAI ecosystem | OpenAI | GPT-4o |
|
|
276
|
+
| Budget constrained | OpenAI | GPT-4o-mini |
|
|
277
|
+
| Privacy/offline | Ollama | Llama 3.1 |
|
|
278
|
+
| Enterprise | Claude/OpenAI | Varies |
|
|
279
|
+
|
|
280
|
+
## Error Handling
|
|
281
|
+
|
|
282
|
+
All providers throw `TranslationError`:
|
|
283
|
+
|
|
284
|
+
```typescript
|
|
285
|
+
import { TranslationError, ErrorCode } from '@llm-translate/cli';
|
|
286
|
+
|
|
287
|
+
try {
|
|
288
|
+
await provider.chat(request);
|
|
289
|
+
} catch (error) {
|
|
290
|
+
if (error instanceof TranslationError) {
|
|
291
|
+
switch (error.code) {
|
|
292
|
+
case ErrorCode.PROVIDER_AUTH_FAILED:
|
|
293
|
+
console.error('Invalid API key');
|
|
294
|
+
break;
|
|
295
|
+
case ErrorCode.PROVIDER_RATE_LIMITED:
|
|
296
|
+
console.error('Rate limited, retry later');
|
|
297
|
+
break;
|
|
298
|
+
case ErrorCode.PROVIDER_ERROR:
|
|
299
|
+
console.error('Provider error:', error.message);
|
|
300
|
+
break;
|
|
301
|
+
}
|
|
302
|
+
}
|
|
303
|
+
}
|
|
304
|
+
```
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
# Changelog
|
|
2
|
+
|
|
3
|
+
::: info Translations
|
|
4
|
+
All non-English documentation is automatically translated using Claude Sonnet 4.
|
|
5
|
+
:::
|
|
6
|
+
|
|
7
|
+
All notable changes to llm-translate will be documented in this file.
|
|
8
|
+
|
|
9
|
+
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
|
10
|
+
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
|
11
|
+
|
|
12
|
+
## [Unreleased]
|
|
13
|
+
|
|
14
|
+
### Added
|
|
15
|
+
|
|
16
|
+
- Prompt caching support for Claude models (40-50% cost reduction)
|
|
17
|
+
- Cache token usage tracking in translation results
|
|
18
|
+
- `enableCaching` option in TranslationAgent
|
|
19
|
+
- `cacheRead` and `cacheWrite` fields in token usage metadata
|
|
20
|
+
- MQM (Multidimensional Quality Metrics) based quality evaluation system
|
|
21
|
+
- MAPS-style pre-translation analysis step
|
|
22
|
+
- Translation mode support (`--mode fast|balanced|quality`)
|
|
23
|
+
|
|
24
|
+
### Changed
|
|
25
|
+
|
|
26
|
+
- `ChatMessage.content` now supports cacheable text parts
|
|
27
|
+
- `ChatResponse.usage` includes cache token metrics
|
|
28
|
+
- Default model updated to `claude-haiku-4-5-20251001`
|
|
29
|
+
|
|
30
|
+
### Documentation
|
|
31
|
+
|
|
32
|
+
- Added Ollama quality warning: 14B+ models required for reliable translation
|
|
33
|
+
|
|
34
|
+
## [0.1.0] - 2025-12-12
|
|
35
|
+
|
|
36
|
+
### Added
|
|
37
|
+
|
|
38
|
+
- Initial release
|
|
39
|
+
- Single file translation (`llm-translate file`)
|
|
40
|
+
- Directory batch translation (`llm-translate dir`)
|
|
41
|
+
- Configuration initialization (`llm-translate init`)
|
|
42
|
+
- Glossary management (`llm-translate glossary`)
|
|
43
|
+
- Claude, OpenAI, and Ollama provider support
|
|
44
|
+
- Self-Refine quality control loop
|
|
45
|
+
- Markdown AST-based chunking
|
|
46
|
+
- Glossary enforcement
|
|
47
|
+
- Quality threshold configuration
|
|
48
|
+
- Verbose output mode
|
|
49
|
+
|
|
50
|
+
### Providers
|
|
51
|
+
|
|
52
|
+
- Claude (claude-haiku-4-5, claude-sonnet-4-5, claude-opus-4-5)
|
|
53
|
+
- OpenAI (gpt-4o-mini, gpt-4o, gpt-4-turbo)
|
|
54
|
+
- Ollama (any local model)
|
|
55
|
+
|
|
56
|
+
### Documentation
|
|
57
|
+
|
|
58
|
+
- CLI reference documentation
|
|
59
|
+
- API reference documentation
|
|
60
|
+
- Getting started guide
|
|
61
|
+
- Configuration guide
|
|
62
|
+
- Glossary guide
|
|
63
|
+
- Quality control guide
|
|
64
|
+
- Cost optimization guide
|
package/docs/cli/dir.md
ADDED
|
@@ -0,0 +1,243 @@
|
|
|
1
|
+
# llm-translate dir
|
|
2
|
+
|
|
3
|
+
::: info Translations
|
|
4
|
+
All non-English documentation is automatically translated using Claude Sonnet 4.
|
|
5
|
+
:::
|
|
6
|
+
|
|
7
|
+
Translate all files in a directory.
|
|
8
|
+
|
|
9
|
+
## Synopsis
|
|
10
|
+
|
|
11
|
+
```bash
|
|
12
|
+
llm-translate dir <input> <output> [options]
|
|
13
|
+
```
|
|
14
|
+
|
|
15
|
+
## Arguments
|
|
16
|
+
|
|
17
|
+
| Argument | Description |
|
|
18
|
+
|----------|-------------|
|
|
19
|
+
| `<input>` | Input directory path (required) |
|
|
20
|
+
| `<output>` | Output directory path (required) |
|
|
21
|
+
|
|
22
|
+
## Options
|
|
23
|
+
|
|
24
|
+
### Language Options
|
|
25
|
+
|
|
26
|
+
| Option | Default | Description |
|
|
27
|
+
|--------|---------|-------------|
|
|
28
|
+
| `-s, --source-lang <lang>` | config default | Source language code |
|
|
29
|
+
| `-t, --target-lang <lang>` | required | Target language code |
|
|
30
|
+
|
|
31
|
+
### Translation Options
|
|
32
|
+
|
|
33
|
+
| Option | Default | Description |
|
|
34
|
+
|--------|---------|-------------|
|
|
35
|
+
| `-g, --glossary <path>` | none | Path to glossary file |
|
|
36
|
+
| `-p, --provider <name>` | `claude` | LLM provider (claude\|openai\|ollama) |
|
|
37
|
+
| `-m, --model <name>` | provider default | Model name |
|
|
38
|
+
| `--context <text>` | none | Additional context for translation |
|
|
39
|
+
|
|
40
|
+
### Quality Options
|
|
41
|
+
|
|
42
|
+
| Option | Default | Description |
|
|
43
|
+
|--------|---------|-------------|
|
|
44
|
+
| `--quality <0-100>` | 85 | Quality threshold |
|
|
45
|
+
| `--max-iterations <n>` | 4 | Maximum refinement iterations |
|
|
46
|
+
|
|
47
|
+
### File Selection
|
|
48
|
+
|
|
49
|
+
| Option | Default | Description |
|
|
50
|
+
|--------|---------|-------------|
|
|
51
|
+
| `--include <patterns>` | `*.md,*.markdown` | File patterns to include (comma-separated) |
|
|
52
|
+
| `--exclude <patterns>` | none | File patterns to exclude (comma-separated) |
|
|
53
|
+
|
|
54
|
+
### Processing Options
|
|
55
|
+
|
|
56
|
+
| Option | Default | Description |
|
|
57
|
+
|--------|---------|-------------|
|
|
58
|
+
| `--parallel <n>` | 3 | Parallel file processing |
|
|
59
|
+
| `--chunk-size <tokens>` | 1024 | Max tokens per chunk |
|
|
60
|
+
| `--no-cache` | false | Disable translation cache |
|
|
61
|
+
|
|
62
|
+
### Output Options
|
|
63
|
+
|
|
64
|
+
| Option | Default | Description |
|
|
65
|
+
|--------|---------|-------------|
|
|
66
|
+
| `-f, --format <fmt>` | auto | Force output format (md\|html\|txt) |
|
|
67
|
+
| `--dry-run` | false | Show what would be translated |
|
|
68
|
+
| `--json` | false | Output results as JSON |
|
|
69
|
+
| `-v, --verbose` | false | Enable verbose logging |
|
|
70
|
+
| `-q, --quiet` | false | Suppress non-error output |
|
|
71
|
+
|
|
72
|
+
## Examples
|
|
73
|
+
|
|
74
|
+
### Basic Usage
|
|
75
|
+
|
|
76
|
+
```bash
|
|
77
|
+
# Translate all markdown files
|
|
78
|
+
llm-translate dir ./docs ./docs-ko -s en -t ko
|
|
79
|
+
|
|
80
|
+
# With glossary
|
|
81
|
+
llm-translate dir ./docs ./docs-ko -s en -t ko -g glossary.json
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
### File Selection
|
|
85
|
+
|
|
86
|
+
```bash
|
|
87
|
+
# Custom include pattern
|
|
88
|
+
llm-translate dir ./docs ./docs-ko -s en -t ko --include "**/*.md"
|
|
89
|
+
|
|
90
|
+
# Multiple patterns
|
|
91
|
+
llm-translate dir ./docs ./docs-ko -s en -t ko --include "*.md,*.markdown,*.mdx"
|
|
92
|
+
|
|
93
|
+
# Exclude certain directories
|
|
94
|
+
llm-translate dir ./docs ./docs-ko -s en -t ko \
|
|
95
|
+
--exclude "node_modules/**,dist/**,drafts/**"
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
### Parallel Processing
|
|
99
|
+
|
|
100
|
+
```bash
|
|
101
|
+
# Process 5 files in parallel
|
|
102
|
+
llm-translate dir ./docs ./docs-ko -s en -t ko --parallel 5
|
|
103
|
+
|
|
104
|
+
# Sequential processing (for rate-limited APIs)
|
|
105
|
+
llm-translate dir ./docs ./docs-ko -s en -t ko --parallel 1
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
### Quality Settings
|
|
109
|
+
|
|
110
|
+
```bash
|
|
111
|
+
# High quality for important docs
|
|
112
|
+
llm-translate dir ./docs ./docs-ko -s en -t ko --quality 95 --max-iterations 6
|
|
113
|
+
|
|
114
|
+
# Faster processing with lower threshold
|
|
115
|
+
llm-translate dir ./docs ./docs-ko -s en -t ko --quality 70 --max-iterations 2
|
|
116
|
+
```
|
|
117
|
+
|
|
118
|
+
### Preview Mode
|
|
119
|
+
|
|
120
|
+
```bash
|
|
121
|
+
# Show what would be translated
|
|
122
|
+
llm-translate dir ./docs ./docs-ko -s en -t ko --dry-run
|
|
123
|
+
```
|
|
124
|
+
|
|
125
|
+
Output:
|
|
126
|
+
```
|
|
127
|
+
Dry run mode - no translation will be performed
|
|
128
|
+
|
|
129
|
+
Files to translate:
|
|
130
|
+
getting-started.md → docs-ko/getting-started.md
|
|
131
|
+
guide/setup.md → docs-ko/guide/setup.md
|
|
132
|
+
api/reference.md → docs-ko/api/reference.md
|
|
133
|
+
|
|
134
|
+
Total: 3 file(s)
|
|
135
|
+
```
|
|
136
|
+
|
|
137
|
+
## Output Structure
|
|
138
|
+
|
|
139
|
+
Directory structure is preserved by default:
|
|
140
|
+
|
|
141
|
+
```
|
|
142
|
+
Input: Output:
|
|
143
|
+
docs/ docs-ko/
|
|
144
|
+
├── getting-started.md ├── getting-started.md
|
|
145
|
+
├── guide/ ├── guide/
|
|
146
|
+
│ ├── setup.md │ ├── setup.md
|
|
147
|
+
│ └── advanced.md │ └── advanced.md
|
|
148
|
+
└── api/ └── api/
|
|
149
|
+
└── reference.md └── reference.md
|
|
150
|
+
```
|
|
151
|
+
|
|
152
|
+
## Progress Reporting
|
|
153
|
+
|
|
154
|
+
### Normal Mode
|
|
155
|
+
|
|
156
|
+
```
|
|
157
|
+
ℹ Found 5 file(s) to translate
|
|
158
|
+
ℹ Input: ./docs
|
|
159
|
+
ℹ Output: ./docs-ko
|
|
160
|
+
ℹ Target language: ko
|
|
161
|
+
ℹ Parallel processing: 3 file(s) at a time
|
|
162
|
+
[1/5] getting-started.md ✓
|
|
163
|
+
[2/5] guide/setup.md ✓
|
|
164
|
+
[3/5] guide/advanced.md ✓
|
|
165
|
+
[4/5] api/reference.md ✓
|
|
166
|
+
[5/5] api/types.md ✓
|
|
167
|
+
|
|
168
|
+
────────────────────────────────────────────────────────
|
|
169
|
+
Translation Summary
|
|
170
|
+
────────────────────────────────────────────────────────
|
|
171
|
+
Files: 5 succeeded, 0 failed
|
|
172
|
+
Duration: 45.2s
|
|
173
|
+
Tokens: 12,450 input / 8,320 output
|
|
174
|
+
Cache: 5,200 read / 2,100 write
|
|
175
|
+
────────────────────────────────────────────────────────
|
|
176
|
+
```
|
|
177
|
+
|
|
178
|
+
### JSON Output
|
|
179
|
+
|
|
180
|
+
```bash
|
|
181
|
+
llm-translate dir ./docs ./docs-ko -t ko --json
|
|
182
|
+
```
|
|
183
|
+
|
|
184
|
+
```json
|
|
185
|
+
{
|
|
186
|
+
"success": true,
|
|
187
|
+
"totalFiles": 5,
|
|
188
|
+
"successCount": 5,
|
|
189
|
+
"failCount": 0,
|
|
190
|
+
"totalDuration": 45234,
|
|
191
|
+
"tokensUsed": {
|
|
192
|
+
"input": 12450,
|
|
193
|
+
"output": 8320,
|
|
194
|
+
"cacheRead": 5200,
|
|
195
|
+
"cacheWrite": 2100
|
|
196
|
+
},
|
|
197
|
+
"files": [...]
|
|
198
|
+
}
|
|
199
|
+
```
|
|
200
|
+
|
|
201
|
+
## Best Practices
|
|
202
|
+
|
|
203
|
+
### 1. Preview First
|
|
204
|
+
|
|
205
|
+
```bash
|
|
206
|
+
llm-translate dir ./docs ./docs-ko -s en -t ko --dry-run
|
|
207
|
+
```
|
|
208
|
+
|
|
209
|
+
### 2. Use Appropriate Parallelism
|
|
210
|
+
|
|
211
|
+
- Rate-limited APIs: `--parallel 1-2`
|
|
212
|
+
- High limits: `--parallel 5-10`
|
|
213
|
+
- Local (Ollama): `--parallel 1` (model limited)
|
|
214
|
+
|
|
215
|
+
### 3. Handle Large Projects
|
|
216
|
+
|
|
217
|
+
```bash
|
|
218
|
+
# Split by subdirectory for better control
|
|
219
|
+
llm-translate dir ./docs/guide ./docs-ko/guide -s en -t ko
|
|
220
|
+
llm-translate dir ./docs/api ./docs-ko/api -s en -t ko
|
|
221
|
+
```
|
|
222
|
+
|
|
223
|
+
### 4. Leverage Caching
|
|
224
|
+
|
|
225
|
+
Cache allows skipping unchanged content:
|
|
226
|
+
|
|
227
|
+
```bash
|
|
228
|
+
# First run: translates all
|
|
229
|
+
llm-translate dir ./docs ./docs-ko -s en -t ko
|
|
230
|
+
|
|
231
|
+
# Second run: uses cache for unchanged content
|
|
232
|
+
llm-translate dir ./docs ./docs-ko -s en -t ko
|
|
233
|
+
```
|
|
234
|
+
|
|
235
|
+
### 5. Quality by Content Type
|
|
236
|
+
|
|
237
|
+
```bash
|
|
238
|
+
# High quality for user-facing docs
|
|
239
|
+
llm-translate dir ./docs/public ./docs-ko/public -s en -t ko --quality 95
|
|
240
|
+
|
|
241
|
+
# Standard quality for internal docs
|
|
242
|
+
llm-translate dir ./docs/internal ./docs-ko/internal -s en -t ko --quality 80
|
|
243
|
+
```
|