@llm-translate/cli 1.0.0-next.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (157) hide show
  1. package/.dockerignore +51 -0
  2. package/.env.example +33 -0
  3. package/.github/workflows/docs-pages.yml +57 -0
  4. package/.github/workflows/release.yml +49 -0
  5. package/.translaterc.json +44 -0
  6. package/CLAUDE.md +243 -0
  7. package/Dockerfile +55 -0
  8. package/README.md +371 -0
  9. package/RFC.md +1595 -0
  10. package/dist/cli/index.d.ts +2 -0
  11. package/dist/cli/index.js +4494 -0
  12. package/dist/cli/index.js.map +1 -0
  13. package/dist/index.d.ts +1152 -0
  14. package/dist/index.js +3841 -0
  15. package/dist/index.js.map +1 -0
  16. package/docker-compose.yml +56 -0
  17. package/docs/.vitepress/config.ts +161 -0
  18. package/docs/api/agent.md +262 -0
  19. package/docs/api/engine.md +274 -0
  20. package/docs/api/index.md +171 -0
  21. package/docs/api/providers.md +304 -0
  22. package/docs/changelog.md +64 -0
  23. package/docs/cli/dir.md +243 -0
  24. package/docs/cli/file.md +213 -0
  25. package/docs/cli/glossary.md +273 -0
  26. package/docs/cli/index.md +129 -0
  27. package/docs/cli/init.md +158 -0
  28. package/docs/cli/serve.md +211 -0
  29. package/docs/glossary.json +235 -0
  30. package/docs/guide/chunking.md +272 -0
  31. package/docs/guide/configuration.md +139 -0
  32. package/docs/guide/cost-optimization.md +237 -0
  33. package/docs/guide/docker.md +371 -0
  34. package/docs/guide/getting-started.md +150 -0
  35. package/docs/guide/glossary.md +241 -0
  36. package/docs/guide/index.md +86 -0
  37. package/docs/guide/ollama.md +515 -0
  38. package/docs/guide/prompt-caching.md +221 -0
  39. package/docs/guide/providers.md +232 -0
  40. package/docs/guide/quality-control.md +206 -0
  41. package/docs/guide/vitepress-integration.md +265 -0
  42. package/docs/index.md +63 -0
  43. package/docs/ja/api/agent.md +262 -0
  44. package/docs/ja/api/engine.md +274 -0
  45. package/docs/ja/api/index.md +171 -0
  46. package/docs/ja/api/providers.md +304 -0
  47. package/docs/ja/changelog.md +64 -0
  48. package/docs/ja/cli/dir.md +243 -0
  49. package/docs/ja/cli/file.md +213 -0
  50. package/docs/ja/cli/glossary.md +273 -0
  51. package/docs/ja/cli/index.md +111 -0
  52. package/docs/ja/cli/init.md +158 -0
  53. package/docs/ja/guide/chunking.md +271 -0
  54. package/docs/ja/guide/configuration.md +139 -0
  55. package/docs/ja/guide/cost-optimization.md +30 -0
  56. package/docs/ja/guide/getting-started.md +150 -0
  57. package/docs/ja/guide/glossary.md +214 -0
  58. package/docs/ja/guide/index.md +32 -0
  59. package/docs/ja/guide/ollama.md +410 -0
  60. package/docs/ja/guide/prompt-caching.md +221 -0
  61. package/docs/ja/guide/providers.md +232 -0
  62. package/docs/ja/guide/quality-control.md +137 -0
  63. package/docs/ja/guide/vitepress-integration.md +265 -0
  64. package/docs/ja/index.md +58 -0
  65. package/docs/ko/api/agent.md +262 -0
  66. package/docs/ko/api/engine.md +274 -0
  67. package/docs/ko/api/index.md +171 -0
  68. package/docs/ko/api/providers.md +304 -0
  69. package/docs/ko/changelog.md +64 -0
  70. package/docs/ko/cli/dir.md +243 -0
  71. package/docs/ko/cli/file.md +213 -0
  72. package/docs/ko/cli/glossary.md +273 -0
  73. package/docs/ko/cli/index.md +111 -0
  74. package/docs/ko/cli/init.md +158 -0
  75. package/docs/ko/guide/chunking.md +271 -0
  76. package/docs/ko/guide/configuration.md +139 -0
  77. package/docs/ko/guide/cost-optimization.md +30 -0
  78. package/docs/ko/guide/getting-started.md +150 -0
  79. package/docs/ko/guide/glossary.md +214 -0
  80. package/docs/ko/guide/index.md +32 -0
  81. package/docs/ko/guide/ollama.md +410 -0
  82. package/docs/ko/guide/prompt-caching.md +221 -0
  83. package/docs/ko/guide/providers.md +232 -0
  84. package/docs/ko/guide/quality-control.md +137 -0
  85. package/docs/ko/guide/vitepress-integration.md +265 -0
  86. package/docs/ko/index.md +58 -0
  87. package/docs/zh/api/agent.md +262 -0
  88. package/docs/zh/api/engine.md +274 -0
  89. package/docs/zh/api/index.md +171 -0
  90. package/docs/zh/api/providers.md +304 -0
  91. package/docs/zh/changelog.md +64 -0
  92. package/docs/zh/cli/dir.md +243 -0
  93. package/docs/zh/cli/file.md +213 -0
  94. package/docs/zh/cli/glossary.md +273 -0
  95. package/docs/zh/cli/index.md +111 -0
  96. package/docs/zh/cli/init.md +158 -0
  97. package/docs/zh/guide/chunking.md +271 -0
  98. package/docs/zh/guide/configuration.md +139 -0
  99. package/docs/zh/guide/cost-optimization.md +30 -0
  100. package/docs/zh/guide/getting-started.md +150 -0
  101. package/docs/zh/guide/glossary.md +214 -0
  102. package/docs/zh/guide/index.md +32 -0
  103. package/docs/zh/guide/ollama.md +410 -0
  104. package/docs/zh/guide/prompt-caching.md +221 -0
  105. package/docs/zh/guide/providers.md +232 -0
  106. package/docs/zh/guide/quality-control.md +137 -0
  107. package/docs/zh/guide/vitepress-integration.md +265 -0
  108. package/docs/zh/index.md +58 -0
  109. package/package.json +91 -0
  110. package/release.config.mjs +15 -0
  111. package/schemas/glossary.schema.json +110 -0
  112. package/src/cli/commands/dir.ts +469 -0
  113. package/src/cli/commands/file.ts +291 -0
  114. package/src/cli/commands/glossary.ts +221 -0
  115. package/src/cli/commands/init.ts +68 -0
  116. package/src/cli/commands/serve.ts +60 -0
  117. package/src/cli/index.ts +64 -0
  118. package/src/cli/options.ts +59 -0
  119. package/src/core/agent.ts +1119 -0
  120. package/src/core/chunker.ts +391 -0
  121. package/src/core/engine.ts +634 -0
  122. package/src/errors.ts +188 -0
  123. package/src/index.ts +147 -0
  124. package/src/integrations/vitepress.ts +549 -0
  125. package/src/parsers/markdown.ts +383 -0
  126. package/src/providers/claude.ts +259 -0
  127. package/src/providers/interface.ts +109 -0
  128. package/src/providers/ollama.ts +379 -0
  129. package/src/providers/openai.ts +308 -0
  130. package/src/providers/registry.ts +153 -0
  131. package/src/server/index.ts +152 -0
  132. package/src/server/middleware/auth.ts +93 -0
  133. package/src/server/middleware/logger.ts +90 -0
  134. package/src/server/routes/health.ts +84 -0
  135. package/src/server/routes/translate.ts +210 -0
  136. package/src/server/types.ts +138 -0
  137. package/src/services/cache.ts +899 -0
  138. package/src/services/config.ts +217 -0
  139. package/src/services/glossary.ts +247 -0
  140. package/src/types/analysis.ts +164 -0
  141. package/src/types/index.ts +265 -0
  142. package/src/types/modes.ts +121 -0
  143. package/src/types/mqm.ts +157 -0
  144. package/src/utils/logger.ts +141 -0
  145. package/src/utils/tokens.ts +116 -0
  146. package/tests/fixtures/glossaries/ml-glossary.json +53 -0
  147. package/tests/fixtures/input/lynq-installation.ko.md +350 -0
  148. package/tests/fixtures/input/lynq-installation.md +350 -0
  149. package/tests/fixtures/input/simple.ko.md +27 -0
  150. package/tests/fixtures/input/simple.md +27 -0
  151. package/tests/unit/chunker.test.ts +229 -0
  152. package/tests/unit/glossary.test.ts +146 -0
  153. package/tests/unit/markdown.test.ts +205 -0
  154. package/tests/unit/tokens.test.ts +81 -0
  155. package/tsconfig.json +28 -0
  156. package/tsup.config.ts +34 -0
  157. package/vitest.config.ts +16 -0
@@ -0,0 +1,304 @@
1
+ # 提供商
2
+
3
+ ::: info 翻译说明
4
+ 所有非英文文档均使用 Claude Sonnet 4 自动翻译。
5
+ :::
6
+
7
+ 不同 AI 服务的 LLM 提供商实现。
8
+
9
+ ## 概述
10
+
11
+ 所有提供商都实现了 `LLMProvider` 接口:
12
+
13
+ ```typescript
14
+ interface LLMProvider {
15
+ readonly name: ProviderName;
16
+ readonly defaultModel: string;
17
+
18
+ chat(request: ChatRequest): Promise<ChatResponse>;
19
+ stream(request: ChatRequest): AsyncIterable<string>;
20
+ countTokens(text: string): number;
21
+ getModelInfo(model?: string): ModelInfo;
22
+ }
23
+ ```
24
+
25
+ ## Claude 提供商
26
+
27
+ 推荐的提供商,完全支持提示缓存。
28
+
29
+ ### 设置
30
+
31
+ ```typescript
32
+ import { createClaudeProvider } from '@llm-translate/cli';
33
+
34
+ const provider = createClaudeProvider({
35
+ apiKey: process.env.ANTHROPIC_API_KEY,
36
+ defaultModel: 'claude-haiku-4-5-20251001',
37
+ });
38
+ ```
39
+
40
+ ### 配置
41
+
42
+ ```typescript
43
+ interface ClaudeProviderConfig {
44
+ apiKey?: string; // Defaults to ANTHROPIC_API_KEY env
45
+ baseUrl?: string; // Custom API endpoint
46
+ defaultModel?: string; // Default: claude-haiku-4-5-20251001
47
+ }
48
+ ```
49
+
50
+ ### 可用模型
51
+
52
+ | 模型 | 上下文 | 输入成本 | 输出成本 |
53
+ |-------|---------|------------|-------------|
54
+ |`claude-haiku-4-5-20251001`| 200K | $0.001/1K | $0.005/1K |
55
+ |`claude-sonnet-4-5-20250929`| 200K | $0.003/1K | $0.015/1K |
56
+ |`claude-opus-4-5-20251101`| 200K | $0.015/1K | $0.075/1K |
57
+
58
+ ### 提示缓存
59
+
60
+ Claude 提供商自动支持提示缓存:
61
+
62
+ ```typescript
63
+ const response = await provider.chat({
64
+ messages: [
65
+ {
66
+ role: 'user',
67
+ content: [
68
+ {
69
+ type: 'text',
70
+ text: 'System instructions...',
71
+ cacheControl: { type: 'ephemeral' }, // Cache this
72
+ },
73
+ {
74
+ type: 'text',
75
+ text: 'User content...', // Don't cache
76
+ },
77
+ ],
78
+ },
79
+ ],
80
+ });
81
+
82
+ console.log(response.usage);
83
+ // {
84
+ // inputTokens: 100,
85
+ // outputTokens: 200,
86
+ // cacheReadTokens: 500, // Tokens read from cache
87
+ // cacheWriteTokens: 0, // Tokens written to cache
88
+ // }
89
+ ```
90
+
91
+ ## OpenAI 提供商
92
+
93
+ ### 设置
94
+
95
+ ```typescript
96
+ import { createOpenAIProvider } from '@llm-translate/cli';
97
+
98
+ const provider = createOpenAIProvider({
99
+ apiKey: process.env.OPENAI_API_KEY,
100
+ defaultModel: 'gpt-4o-mini',
101
+ });
102
+ ```
103
+
104
+ ### 配置
105
+
106
+ ```typescript
107
+ interface OpenAIProviderConfig {
108
+ apiKey?: string; // Defaults to OPENAI_API_KEY env
109
+ baseUrl?: string; // Custom API endpoint
110
+ defaultModel?: string; // Default: gpt-4o-mini
111
+ organization?: string; // OpenAI organization ID
112
+ }
113
+ ```
114
+
115
+ ### 可用模型
116
+
117
+ | 模型 | 上下文 | 输入成本 | 输出成本 |
118
+ |-------|---------|------------|-------------|
119
+ |`gpt-4o-mini`| 128K | $0.00015/1K | $0.0006/1K |
120
+ |`gpt-4o`| 128K | $0.0025/1K | $0.01/1K |
121
+ |`gpt-4-turbo`| 128K | $0.01/1K | $0.03/1K |
122
+
123
+ ### 自动缓存
124
+
125
+ OpenAI 对超过 1024 个令牌的提示自动处理缓存。
126
+
127
+ ## Ollama 提供商
128
+
129
+ 用于本地自托管模型。
130
+
131
+ ### 设置
132
+
133
+ ```typescript
134
+ import { createOllamaProvider } from '@llm-translate/cli';
135
+
136
+ const provider = createOllamaProvider({
137
+ baseUrl: 'http://localhost:11434',
138
+ defaultModel: 'llama3.1',
139
+ });
140
+ ```
141
+
142
+ ### 配置
143
+
144
+ ```typescript
145
+ interface OllamaProviderConfig {
146
+ baseUrl?: string; // Default: http://localhost:11434
147
+ defaultModel?: string; // Default: llama3.1
148
+ }
149
+ ```
150
+
151
+ ### 可用模型
152
+
153
+ Ollama 安装中可用的任何模型:
154
+
155
+ ```bash
156
+ # List available models
157
+ ollama list
158
+
159
+ # Pull a model
160
+ ollama pull llama3.1
161
+ ollama pull mistral
162
+ ollama pull codellama
163
+ ```
164
+
165
+ ### 限制
166
+
167
+ - 不支持提示缓存
168
+ - 质量因模型而异
169
+ - 上下文窗口有限(取决于模型)
170
+
171
+ ## 提供商接口
172
+
173
+ ### ChatRequest
174
+
175
+ ```typescript
176
+ interface ChatRequest {
177
+ messages: ChatMessage[];
178
+ model?: string;
179
+ temperature?: number; // Default: 0.3
180
+ maxTokens?: number; // Default: 4096
181
+ }
182
+
183
+ interface ChatMessage {
184
+ role: 'system' | 'user' | 'assistant';
185
+ content: string | CacheableTextPart[];
186
+ }
187
+
188
+ interface CacheableTextPart {
189
+ type: 'text';
190
+ text: string;
191
+ cacheControl?: { type: 'ephemeral' };
192
+ }
193
+ ```
194
+
195
+ ### ChatResponse
196
+
197
+ ```typescript
198
+ interface ChatResponse {
199
+ content: string;
200
+ usage: {
201
+ inputTokens: number;
202
+ outputTokens: number;
203
+ cacheReadTokens?: number;
204
+ cacheWriteTokens?: number;
205
+ };
206
+ model: string;
207
+ finishReason: 'stop' | 'length' | 'error';
208
+ }
209
+ ```
210
+
211
+ ### ModelInfo
212
+
213
+ ```typescript
214
+ interface ModelInfo {
215
+ maxContextTokens: number;
216
+ supportsStreaming: boolean;
217
+ costPer1kInput?: number;
218
+ costPer1kOutput?: number;
219
+ }
220
+ ```
221
+
222
+ ## 自定义提供商
223
+
224
+ 实现您自己的提供商:
225
+
226
+ ```typescript
227
+ import type { LLMProvider, ChatRequest, ChatResponse } from '@llm-translate/cli';
228
+
229
+ class CustomProvider implements LLMProvider {
230
+ readonly name = 'custom' as const;
231
+ readonly defaultModel = 'custom-model';
232
+
233
+ async chat(request: ChatRequest): Promise<ChatResponse> {
234
+ // Your implementation
235
+ const response = await callYourAPI(request);
236
+
237
+ return {
238
+ content: response.text,
239
+ usage: {
240
+ inputTokens: response.promptTokens,
241
+ outputTokens: response.completionTokens,
242
+ },
243
+ model: request.model ?? this.defaultModel,
244
+ finishReason: 'stop',
245
+ };
246
+ }
247
+
248
+ async *stream(request: ChatRequest): AsyncIterable<string> {
249
+ // Streaming implementation
250
+ for await (const chunk of streamYourAPI(request)) {
251
+ yield chunk.text;
252
+ }
253
+ }
254
+
255
+ countTokens(text: string): number {
256
+ // Token estimation
257
+ return Math.ceil(text.length / 4);
258
+ }
259
+
260
+ getModelInfo(model?: string): ModelInfo {
261
+ return {
262
+ maxContextTokens: 100000,
263
+ supportsStreaming: true,
264
+ };
265
+ }
266
+ }
267
+ ```
268
+
269
+ ## 提供商选择指南
270
+
271
+ | 使用场景 | 推荐提供商 | 模型 |
272
+ |----------|---------------------|-------|
273
+ | 成本效益 | Claude | Haiku 4.5 |
274
+ | 高质量 | Claude | Sonnet 4.5 |
275
+ | OpenAI 生态系统 | OpenAI | GPT-4o |
276
+ | 预算受限 | OpenAI | GPT-4o-mini |
277
+ | 隐私/离线 | Ollama | Llama 3.1 |
278
+ | 企业级 | Claude/OpenAI | 视情况而定 |
279
+
280
+ ## 错误处理
281
+
282
+ 所有提供商都会抛出 `TranslationError`:
283
+
284
+ ```typescript
285
+ import { TranslationError, ErrorCode } from '@llm-translate/cli';
286
+
287
+ try {
288
+ await provider.chat(request);
289
+ } catch (error) {
290
+ if (error instanceof TranslationError) {
291
+ switch (error.code) {
292
+ case ErrorCode.PROVIDER_AUTH_FAILED:
293
+ console.error('Invalid API key');
294
+ break;
295
+ case ErrorCode.PROVIDER_RATE_LIMITED:
296
+ console.error('Rate limited, retry later');
297
+ break;
298
+ case ErrorCode.PROVIDER_ERROR:
299
+ console.error('Provider error:', error.message);
300
+ break;
301
+ }
302
+ }
303
+ }
304
+ ```
@@ -0,0 +1,64 @@
1
+ # 更新日志
2
+
3
+ ::: info 翻译说明
4
+ 所有非英文文档均使用 Claude Sonnet 4 自动翻译。
5
+ :::
6
+
7
+ llm-translate 的所有重要更改都将记录在此文件中。
8
+
9
+ 格式基于 [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
10
+ 此项目遵循 [语义化版本控制](https://semver.org/spec/v2.0.0.html)。
11
+
12
+ ## [未发布]
13
+
14
+ ### 新增
15
+
16
+ - Claude 模型的提示缓存支持(成本降低 40-50%)
17
+ - 翻译结果中的缓存令牌使用情况跟踪
18
+ - TranslationAgent 中的 `enableCaching` 选项
19
+ - 令牌使用元数据中的 `cacheRead` 和 `cacheWrite` 字段
20
+ - 基于 MQM(多维质量指标)的质量评估系统
21
+ - MAPS 风格的预翻译分析步骤
22
+ - 翻译模式支持(`--mode fast|balanced|quality`)
23
+
24
+ ### 更改
25
+
26
+ -`ChatMessage.content` 现在支持可缓存的文本部分
27
+ -`ChatResponse.usage` 包含缓存令牌指标
28
+ - 默认模型更新为 `claude-haiku-4-5-20251001`
29
+
30
+ ### 文档
31
+
32
+ - 添加 Ollama 质量警告:需要 14B+ 模型才能进行可靠翻译
33
+
34
+ ## [0.1.0] - 2025-12-12
35
+
36
+ ### 新增
37
+
38
+ - 初始版本发布
39
+ - 单文件翻译(`llm-translate file`)
40
+ - 目录批量翻译(`llm-translate dir`)
41
+ - 配置初始化(`llm-translate init`)
42
+ - 术语表管理(`llm-translate glossary`)
43
+ - Claude、OpenAI 和 Ollama 提供商支持
44
+ - Self-Refine 质量控制循环
45
+ - 基于 Markdown AST 的 Chunking
46
+ - 术语表强制执行
47
+ - 质量阈值配置
48
+ - 详细输出模式
49
+
50
+ ### 提供商
51
+
52
+ - Claude(claude-haiku-4-5、claude-sonnet-4-5、claude-opus-4-5)
53
+ - OpenAI(gpt-4o-mini、gpt-4o、gpt-4-turbo)
54
+ - Ollama(任何本地模型)
55
+
56
+ ### 文档
57
+
58
+ - CLI 参考文档
59
+ - API 参考文档
60
+ - 入门指南
61
+ - 配置指南
62
+ - 术语表指南
63
+ - 质量控制指南
64
+ - 成本优化指南
@@ -0,0 +1,243 @@
1
+ # llm-translate dir
2
+
3
+ ::: info 翻译说明
4
+ 所有非英文文档均使用 Claude Sonnet 4 自动翻译。
5
+ :::
6
+
7
+ 翻译目录中的所有文件。
8
+
9
+ ## 语法
10
+
11
+ ```bash
12
+ llm-translate dir <input> <output> [options]
13
+ ```
14
+
15
+ ## 参数
16
+
17
+ | 参数 | 描述 |
18
+ |----------|-------------|
19
+ |`<input>`| 输入目录路径(必需) |
20
+ |`<output>`| 输出目录路径(必需) |
21
+
22
+ ## 选项
23
+
24
+ ### 语言选项
25
+
26
+ | 选项 | 默认值 | 描述 |
27
+ |--------|---------|-------------|
28
+ |`-s, --source-lang <lang>`| 配置默认值 | 源语言代码 |
29
+ |`-t, --target-lang <lang>`| 必需 | 目标语言代码 |
30
+
31
+ ### 翻译选项
32
+
33
+ | 选项 | 默认值 | 描述 |
34
+ |--------|---------|-------------|
35
+ |`-g, --glossary <path>`| 无 | 术语表文件路径 |
36
+ |`-p, --provider <name>`|` claude`| LLM 提供商 (claude\|openai\|ollama) |
37
+ |`-m, --model <name>`| 提供商默认值 | 模型名称 |
38
+ |`--context <text>`| 无 | 翻译的额外上下文 |
39
+
40
+ ### 质量选项
41
+
42
+ | 选项 | 默认值 | 描述 |
43
+ |--------|---------|-------------|
44
+ |`--quality <0-100>`| 85 | 质量阈值 |
45
+ |`--max-iterations <n>`| 4 | 最大优化迭代次数 |
46
+
47
+ ### 文件选择
48
+
49
+ | 选项 | 默认值 | 描述 |
50
+ |--------|---------|-------------|
51
+ |`--include <patterns>`|`*.md,*.markdown`| 要包含的文件模式(逗号分隔) |
52
+ |`--exclude <patterns>`| 无 | 要排除的文件模式(逗号分隔) |
53
+
54
+ ### 处理选项
55
+
56
+ | 选项 | 默认值 | 描述 |
57
+ |--------|---------|-------------|
58
+ |`--parallel <n>`| 3 | 并行文件处理数 |
59
+ |`--chunk-size <tokens>`| 1024 | 每个分块的最大令牌数 |
60
+ |`--no-cache`| false | 禁用翻译缓存 |
61
+
62
+ ### 输出选项
63
+
64
+ | 选项 | 默认值 | 描述 |
65
+ |--------|---------|-------------|
66
+ |`-f, --format <fmt>`| auto | 强制输出格式 (md\|html\|txt) |
67
+ |`--dry-run`| false | 显示将要翻译的内容 |
68
+ |`--json`| false | 以 JSON 格式输出结果 |
69
+ |`-v, --verbose`| false | 启用详细日志 |
70
+ |`-q, --quiet`| false | 抑制非错误输出 |
71
+
72
+ ## 示例
73
+
74
+ ### 基本用法
75
+
76
+ ```bash
77
+ # Translate all markdown files
78
+ llm-translate dir ./docs ./docs-ko -s en -t ko
79
+
80
+ # With glossary
81
+ llm-translate dir ./docs ./docs-ko -s en -t ko -g glossary.json
82
+ ```
83
+
84
+ ### 文件选择
85
+
86
+ ```bash
87
+ # Custom include pattern
88
+ llm-translate dir ./docs ./docs-ko -s en -t ko --include "**/*.md"
89
+
90
+ # Multiple patterns
91
+ llm-translate dir ./docs ./docs-ko -s en -t ko --include "*.md,*.markdown,*.mdx"
92
+
93
+ # Exclude certain directories
94
+ llm-translate dir ./docs ./docs-ko -s en -t ko \
95
+ --exclude "node_modules/**,dist/**,drafts/**"
96
+ ```
97
+
98
+ ### 并行处理
99
+
100
+ ```bash
101
+ # Process 5 files in parallel
102
+ llm-translate dir ./docs ./docs-ko -s en -t ko --parallel 5
103
+
104
+ # Sequential processing (for rate-limited APIs)
105
+ llm-translate dir ./docs ./docs-ko -s en -t ko --parallel 1
106
+ ```
107
+
108
+ ### 质量设置
109
+
110
+ ```bash
111
+ # High quality for important docs
112
+ llm-translate dir ./docs ./docs-ko -s en -t ko --quality 95 --max-iterations 6
113
+
114
+ # Faster processing with lower threshold
115
+ llm-translate dir ./docs ./docs-ko -s en -t ko --quality 70 --max-iterations 2
116
+ ```
117
+
118
+ ### 预览模式
119
+
120
+ ```bash
121
+ # Show what would be translated
122
+ llm-translate dir ./docs ./docs-ko -s en -t ko --dry-run
123
+ ```
124
+
125
+ 输出:
126
+ ```
127
+ Dry run mode - no translation will be performed
128
+
129
+ Files to translate:
130
+ getting-started.md → docs-ko/getting-started.md
131
+ guide/setup.md → docs-ko/guide/setup.md
132
+ api/reference.md → docs-ko/api/reference.md
133
+
134
+ Total: 3 file(s)
135
+ ```
136
+
137
+ ## 输出结构
138
+
139
+ 默认情况下保留目录结构:
140
+
141
+ ```
142
+ Input: Output:
143
+ docs/ docs-ko/
144
+ ├── getting-started.md ├── getting-started.md
145
+ ├── guide/ ├── guide/
146
+ │ ├── setup.md │ ├── setup.md
147
+ │ └── advanced.md │ └── advanced.md
148
+ └── api/ └── api/
149
+ └── reference.md └── reference.md
150
+ ```
151
+
152
+ ## 进度报告
153
+
154
+ ### 普通模式
155
+
156
+ ```
157
+ ℹ Found 5 file(s) to translate
158
+ ℹ Input: ./docs
159
+ ℹ Output: ./docs-ko
160
+ ℹ Target language: ko
161
+ ℹ Parallel processing: 3 file(s) at a time
162
+ [1/5] getting-started.md ✓
163
+ [2/5] guide/setup.md ✓
164
+ [3/5] guide/advanced.md ✓
165
+ [4/5] api/reference.md ✓
166
+ [5/5] api/types.md ✓
167
+
168
+ ────────────────────────────────────────────────────────
169
+ Translation Summary
170
+ ────────────────────────────────────────────────────────
171
+ Files: 5 succeeded, 0 failed
172
+ Duration: 45.2s
173
+ Tokens: 12,450 input / 8,320 output
174
+ Cache: 5,200 read / 2,100 write
175
+ ────────────────────────────────────────────────────────
176
+ ```
177
+
178
+ ### JSON 输出
179
+
180
+ ```bash
181
+ llm-translate dir ./docs ./docs-ko -t ko --json
182
+ ```
183
+
184
+ ```json
185
+ {
186
+ "success": true,
187
+ "totalFiles": 5,
188
+ "successCount": 5,
189
+ "failCount": 0,
190
+ "totalDuration": 45234,
191
+ "tokensUsed": {
192
+ "input": 12450,
193
+ "output": 8320,
194
+ "cacheRead": 5200,
195
+ "cacheWrite": 2100
196
+ },
197
+ "files": [...]
198
+ }
199
+ ```
200
+
201
+ ## 最佳实践
202
+
203
+ ### 1. 先预览
204
+
205
+ ```bash
206
+ llm-translate dir ./docs ./docs-ko -s en -t ko --dry-run
207
+ ```
208
+
209
+ ### 2. 使用适当的并行度
210
+
211
+ - 有速率限制的 API:`--parallel 1-2`
212
+ - 高限制:`--parallel 5-10`
213
+ - 本地 (Ollama):`--parallel 1`(受模型限制)
214
+
215
+ ### 3. 处理大型项目
216
+
217
+ ```bash
218
+ # Split by subdirectory for better control
219
+ llm-translate dir ./docs/guide ./docs-ko/guide -s en -t ko
220
+ llm-translate dir ./docs/api ./docs-ko/api -s en -t ko
221
+ ```
222
+
223
+ ### 4. 利用缓存
224
+
225
+ 缓存允许跳过未更改的内容:
226
+
227
+ ```bash
228
+ # First run: translates all
229
+ llm-translate dir ./docs ./docs-ko -s en -t ko
230
+
231
+ # Second run: uses cache for unchanged content
232
+ llm-translate dir ./docs ./docs-ko -s en -t ko
233
+ ```
234
+
235
+ ### 5. 根据内容类型调整质量
236
+
237
+ ```bash
238
+ # High quality for user-facing docs
239
+ llm-translate dir ./docs/public ./docs-ko/public -s en -t ko --quality 95
240
+
241
+ # Standard quality for internal docs
242
+ llm-translate dir ./docs/internal ./docs-ko/internal -s en -t ko --quality 80
243
+ ```