@llm-translate/cli 1.0.0-next.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (157) hide show
  1. package/.dockerignore +51 -0
  2. package/.env.example +33 -0
  3. package/.github/workflows/docs-pages.yml +57 -0
  4. package/.github/workflows/release.yml +49 -0
  5. package/.translaterc.json +44 -0
  6. package/CLAUDE.md +243 -0
  7. package/Dockerfile +55 -0
  8. package/README.md +371 -0
  9. package/RFC.md +1595 -0
  10. package/dist/cli/index.d.ts +2 -0
  11. package/dist/cli/index.js +4494 -0
  12. package/dist/cli/index.js.map +1 -0
  13. package/dist/index.d.ts +1152 -0
  14. package/dist/index.js +3841 -0
  15. package/dist/index.js.map +1 -0
  16. package/docker-compose.yml +56 -0
  17. package/docs/.vitepress/config.ts +161 -0
  18. package/docs/api/agent.md +262 -0
  19. package/docs/api/engine.md +274 -0
  20. package/docs/api/index.md +171 -0
  21. package/docs/api/providers.md +304 -0
  22. package/docs/changelog.md +64 -0
  23. package/docs/cli/dir.md +243 -0
  24. package/docs/cli/file.md +213 -0
  25. package/docs/cli/glossary.md +273 -0
  26. package/docs/cli/index.md +129 -0
  27. package/docs/cli/init.md +158 -0
  28. package/docs/cli/serve.md +211 -0
  29. package/docs/glossary.json +235 -0
  30. package/docs/guide/chunking.md +272 -0
  31. package/docs/guide/configuration.md +139 -0
  32. package/docs/guide/cost-optimization.md +237 -0
  33. package/docs/guide/docker.md +371 -0
  34. package/docs/guide/getting-started.md +150 -0
  35. package/docs/guide/glossary.md +241 -0
  36. package/docs/guide/index.md +86 -0
  37. package/docs/guide/ollama.md +515 -0
  38. package/docs/guide/prompt-caching.md +221 -0
  39. package/docs/guide/providers.md +232 -0
  40. package/docs/guide/quality-control.md +206 -0
  41. package/docs/guide/vitepress-integration.md +265 -0
  42. package/docs/index.md +63 -0
  43. package/docs/ja/api/agent.md +262 -0
  44. package/docs/ja/api/engine.md +274 -0
  45. package/docs/ja/api/index.md +171 -0
  46. package/docs/ja/api/providers.md +304 -0
  47. package/docs/ja/changelog.md +64 -0
  48. package/docs/ja/cli/dir.md +243 -0
  49. package/docs/ja/cli/file.md +213 -0
  50. package/docs/ja/cli/glossary.md +273 -0
  51. package/docs/ja/cli/index.md +111 -0
  52. package/docs/ja/cli/init.md +158 -0
  53. package/docs/ja/guide/chunking.md +271 -0
  54. package/docs/ja/guide/configuration.md +139 -0
  55. package/docs/ja/guide/cost-optimization.md +30 -0
  56. package/docs/ja/guide/getting-started.md +150 -0
  57. package/docs/ja/guide/glossary.md +214 -0
  58. package/docs/ja/guide/index.md +32 -0
  59. package/docs/ja/guide/ollama.md +410 -0
  60. package/docs/ja/guide/prompt-caching.md +221 -0
  61. package/docs/ja/guide/providers.md +232 -0
  62. package/docs/ja/guide/quality-control.md +137 -0
  63. package/docs/ja/guide/vitepress-integration.md +265 -0
  64. package/docs/ja/index.md +58 -0
  65. package/docs/ko/api/agent.md +262 -0
  66. package/docs/ko/api/engine.md +274 -0
  67. package/docs/ko/api/index.md +171 -0
  68. package/docs/ko/api/providers.md +304 -0
  69. package/docs/ko/changelog.md +64 -0
  70. package/docs/ko/cli/dir.md +243 -0
  71. package/docs/ko/cli/file.md +213 -0
  72. package/docs/ko/cli/glossary.md +273 -0
  73. package/docs/ko/cli/index.md +111 -0
  74. package/docs/ko/cli/init.md +158 -0
  75. package/docs/ko/guide/chunking.md +271 -0
  76. package/docs/ko/guide/configuration.md +139 -0
  77. package/docs/ko/guide/cost-optimization.md +30 -0
  78. package/docs/ko/guide/getting-started.md +150 -0
  79. package/docs/ko/guide/glossary.md +214 -0
  80. package/docs/ko/guide/index.md +32 -0
  81. package/docs/ko/guide/ollama.md +410 -0
  82. package/docs/ko/guide/prompt-caching.md +221 -0
  83. package/docs/ko/guide/providers.md +232 -0
  84. package/docs/ko/guide/quality-control.md +137 -0
  85. package/docs/ko/guide/vitepress-integration.md +265 -0
  86. package/docs/ko/index.md +58 -0
  87. package/docs/zh/api/agent.md +262 -0
  88. package/docs/zh/api/engine.md +274 -0
  89. package/docs/zh/api/index.md +171 -0
  90. package/docs/zh/api/providers.md +304 -0
  91. package/docs/zh/changelog.md +64 -0
  92. package/docs/zh/cli/dir.md +243 -0
  93. package/docs/zh/cli/file.md +213 -0
  94. package/docs/zh/cli/glossary.md +273 -0
  95. package/docs/zh/cli/index.md +111 -0
  96. package/docs/zh/cli/init.md +158 -0
  97. package/docs/zh/guide/chunking.md +271 -0
  98. package/docs/zh/guide/configuration.md +139 -0
  99. package/docs/zh/guide/cost-optimization.md +30 -0
  100. package/docs/zh/guide/getting-started.md +150 -0
  101. package/docs/zh/guide/glossary.md +214 -0
  102. package/docs/zh/guide/index.md +32 -0
  103. package/docs/zh/guide/ollama.md +410 -0
  104. package/docs/zh/guide/prompt-caching.md +221 -0
  105. package/docs/zh/guide/providers.md +232 -0
  106. package/docs/zh/guide/quality-control.md +137 -0
  107. package/docs/zh/guide/vitepress-integration.md +265 -0
  108. package/docs/zh/index.md +58 -0
  109. package/package.json +91 -0
  110. package/release.config.mjs +15 -0
  111. package/schemas/glossary.schema.json +110 -0
  112. package/src/cli/commands/dir.ts +469 -0
  113. package/src/cli/commands/file.ts +291 -0
  114. package/src/cli/commands/glossary.ts +221 -0
  115. package/src/cli/commands/init.ts +68 -0
  116. package/src/cli/commands/serve.ts +60 -0
  117. package/src/cli/index.ts +64 -0
  118. package/src/cli/options.ts +59 -0
  119. package/src/core/agent.ts +1119 -0
  120. package/src/core/chunker.ts +391 -0
  121. package/src/core/engine.ts +634 -0
  122. package/src/errors.ts +188 -0
  123. package/src/index.ts +147 -0
  124. package/src/integrations/vitepress.ts +549 -0
  125. package/src/parsers/markdown.ts +383 -0
  126. package/src/providers/claude.ts +259 -0
  127. package/src/providers/interface.ts +109 -0
  128. package/src/providers/ollama.ts +379 -0
  129. package/src/providers/openai.ts +308 -0
  130. package/src/providers/registry.ts +153 -0
  131. package/src/server/index.ts +152 -0
  132. package/src/server/middleware/auth.ts +93 -0
  133. package/src/server/middleware/logger.ts +90 -0
  134. package/src/server/routes/health.ts +84 -0
  135. package/src/server/routes/translate.ts +210 -0
  136. package/src/server/types.ts +138 -0
  137. package/src/services/cache.ts +899 -0
  138. package/src/services/config.ts +217 -0
  139. package/src/services/glossary.ts +247 -0
  140. package/src/types/analysis.ts +164 -0
  141. package/src/types/index.ts +265 -0
  142. package/src/types/modes.ts +121 -0
  143. package/src/types/mqm.ts +157 -0
  144. package/src/utils/logger.ts +141 -0
  145. package/src/utils/tokens.ts +116 -0
  146. package/tests/fixtures/glossaries/ml-glossary.json +53 -0
  147. package/tests/fixtures/input/lynq-installation.ko.md +350 -0
  148. package/tests/fixtures/input/lynq-installation.md +350 -0
  149. package/tests/fixtures/input/simple.ko.md +27 -0
  150. package/tests/fixtures/input/simple.md +27 -0
  151. package/tests/unit/chunker.test.ts +229 -0
  152. package/tests/unit/glossary.test.ts +146 -0
  153. package/tests/unit/markdown.test.ts +205 -0
  154. package/tests/unit/tokens.test.ts +81 -0
  155. package/tsconfig.json +28 -0
  156. package/tsup.config.ts +34 -0
  157. package/vitest.config.ts +16 -0
@@ -0,0 +1,308 @@
1
+ import { createOpenAI } from '@ai-sdk/openai';
2
+ import { generateText, streamText } from 'ai';
3
+ import type { ProviderName } from '../types/index.js';
4
+ import type {
5
+ LLMProvider,
6
+ ProviderConfig,
7
+ ChatRequest,
8
+ ChatResponse,
9
+ ModelInfo,
10
+ } from './interface.js';
11
+ import { TranslationError, ErrorCode } from '../errors.js';
12
+ import { estimateTokens } from '../utils/tokens.js';
13
+
14
+ // ============================================================================
15
+ // Model Information
16
+ // ============================================================================
17
+
18
+ const MODEL_INFO: Record<string, ModelInfo> = {
19
+ // GPT-4o models (latest)
20
+ 'gpt-4o': {
21
+ maxContextTokens: 128000,
22
+ supportsStreaming: true,
23
+ costPer1kInput: 0.0025,
24
+ costPer1kOutput: 0.01,
25
+ },
26
+ 'gpt-4o-2024-11-20': {
27
+ maxContextTokens: 128000,
28
+ supportsStreaming: true,
29
+ costPer1kInput: 0.0025,
30
+ costPer1kOutput: 0.01,
31
+ },
32
+ 'gpt-4o-2024-08-06': {
33
+ maxContextTokens: 128000,
34
+ supportsStreaming: true,
35
+ costPer1kInput: 0.0025,
36
+ costPer1kOutput: 0.01,
37
+ },
38
+ // GPT-4o mini (cost-effective)
39
+ 'gpt-4o-mini': {
40
+ maxContextTokens: 128000,
41
+ supportsStreaming: true,
42
+ costPer1kInput: 0.00015,
43
+ costPer1kOutput: 0.0006,
44
+ },
45
+ 'gpt-4o-mini-2024-07-18': {
46
+ maxContextTokens: 128000,
47
+ supportsStreaming: true,
48
+ costPer1kInput: 0.00015,
49
+ costPer1kOutput: 0.0006,
50
+ },
51
+ // GPT-4 Turbo
52
+ 'gpt-4-turbo': {
53
+ maxContextTokens: 128000,
54
+ supportsStreaming: true,
55
+ costPer1kInput: 0.01,
56
+ costPer1kOutput: 0.03,
57
+ },
58
+ 'gpt-4-turbo-2024-04-09': {
59
+ maxContextTokens: 128000,
60
+ supportsStreaming: true,
61
+ costPer1kInput: 0.01,
62
+ costPer1kOutput: 0.03,
63
+ },
64
+ // GPT-4 (original)
65
+ 'gpt-4': {
66
+ maxContextTokens: 8192,
67
+ supportsStreaming: true,
68
+ costPer1kInput: 0.03,
69
+ costPer1kOutput: 0.06,
70
+ },
71
+ // GPT-3.5 Turbo
72
+ 'gpt-3.5-turbo': {
73
+ maxContextTokens: 16385,
74
+ supportsStreaming: true,
75
+ costPer1kInput: 0.0005,
76
+ costPer1kOutput: 0.0015,
77
+ },
78
+ // o1 models (reasoning)
79
+ 'o1': {
80
+ maxContextTokens: 200000,
81
+ supportsStreaming: false,
82
+ costPer1kInput: 0.015,
83
+ costPer1kOutput: 0.06,
84
+ },
85
+ 'o1-preview': {
86
+ maxContextTokens: 128000,
87
+ supportsStreaming: false,
88
+ costPer1kInput: 0.015,
89
+ costPer1kOutput: 0.06,
90
+ },
91
+ 'o1-mini': {
92
+ maxContextTokens: 128000,
93
+ supportsStreaming: false,
94
+ costPer1kInput: 0.003,
95
+ costPer1kOutput: 0.012,
96
+ },
97
+ };
98
+
99
+ // Use GPT-4o mini as default for cost-efficiency
100
+ const DEFAULT_MODEL = 'gpt-4o-mini';
101
+
102
+ // ============================================================================
103
+ // OpenAI Provider Implementation
104
+ // ============================================================================
105
+
106
+ export class OpenAIProvider implements LLMProvider {
107
+ readonly name: ProviderName = 'openai';
108
+ readonly defaultModel: string;
109
+ private readonly client: ReturnType<typeof createOpenAI>;
110
+
111
+ constructor(config: ProviderConfig = {}) {
112
+ const apiKey = config.apiKey ?? process.env['OPENAI_API_KEY'];
113
+
114
+ if (!apiKey) {
115
+ throw new TranslationError(ErrorCode.PROVIDER_AUTH_FAILED, {
116
+ provider: 'openai',
117
+ message: 'OPENAI_API_KEY environment variable is not set',
118
+ });
119
+ }
120
+
121
+ this.client = createOpenAI({
122
+ apiKey,
123
+ baseURL: config.baseUrl,
124
+ });
125
+
126
+ this.defaultModel = config.defaultModel ?? DEFAULT_MODEL;
127
+ }
128
+
129
+ async chat(request: ChatRequest): Promise<ChatResponse> {
130
+ const model = request.model ?? this.defaultModel;
131
+
132
+ try {
133
+ const messages = this.convertMessages(request.messages);
134
+
135
+ const result = await generateText({
136
+ model: this.client(model),
137
+ messages,
138
+ temperature: request.temperature ?? 0,
139
+ maxTokens: request.maxTokens ?? 4096,
140
+ });
141
+
142
+ return {
143
+ content: result.text,
144
+ usage: {
145
+ inputTokens: result.usage?.promptTokens ?? 0,
146
+ outputTokens: result.usage?.completionTokens ?? 0,
147
+ },
148
+ model,
149
+ finishReason: mapFinishReason(result.finishReason),
150
+ };
151
+ } catch (error) {
152
+ throw this.handleError(error);
153
+ }
154
+ }
155
+
156
+ /**
157
+ * Convert messages to Vercel AI SDK format
158
+ * OpenAI doesn't support cache control like Claude, so we simplify content
159
+ */
160
+ private convertMessages(
161
+ messages: Array<{
162
+ role: 'system' | 'user' | 'assistant';
163
+ content: string | Array<{ type: 'text'; text: string }>;
164
+ }>
165
+ ) {
166
+ return messages.map((msg) => {
167
+ // If content is an array of parts, concatenate text
168
+ if (Array.isArray(msg.content)) {
169
+ return {
170
+ role: msg.role,
171
+ content: msg.content.map((part) => part.text).join(''),
172
+ };
173
+ }
174
+ return { role: msg.role, content: msg.content };
175
+ });
176
+ }
177
+
178
+ async *stream(request: ChatRequest): AsyncIterable<string> {
179
+ const model = request.model ?? this.defaultModel;
180
+ const modelInfo = this.getModelInfo(model);
181
+
182
+ // o1 models don't support streaming
183
+ if (!modelInfo.supportsStreaming) {
184
+ const response = await this.chat(request);
185
+ yield response.content;
186
+ return;
187
+ }
188
+
189
+ try {
190
+ const messages = this.convertMessages(request.messages);
191
+
192
+ const result = streamText({
193
+ model: this.client(model),
194
+ messages,
195
+ temperature: request.temperature ?? 0,
196
+ maxTokens: request.maxTokens ?? 4096,
197
+ });
198
+
199
+ for await (const chunk of result.textStream) {
200
+ yield chunk;
201
+ }
202
+ } catch (error) {
203
+ throw this.handleError(error);
204
+ }
205
+ }
206
+
207
+ countTokens(text: string): number {
208
+ // Use estimation since exact counting requires tiktoken
209
+ return estimateTokens(text);
210
+ }
211
+
212
+ getModelInfo(model?: string): ModelInfo {
213
+ const modelName = model ?? this.defaultModel;
214
+ return (
215
+ MODEL_INFO[modelName] ?? {
216
+ maxContextTokens: 128000,
217
+ supportsStreaming: true,
218
+ }
219
+ );
220
+ }
221
+
222
+ private handleError(error: unknown): TranslationError {
223
+ if (error instanceof TranslationError) {
224
+ return error;
225
+ }
226
+
227
+ const errorMessage =
228
+ error instanceof Error ? error.message : String(error);
229
+
230
+ // Check for rate limiting
231
+ if (
232
+ errorMessage.includes('rate_limit') ||
233
+ errorMessage.includes('429') ||
234
+ errorMessage.includes('Rate limit')
235
+ ) {
236
+ return new TranslationError(ErrorCode.PROVIDER_RATE_LIMITED, {
237
+ provider: 'openai',
238
+ message: errorMessage,
239
+ });
240
+ }
241
+
242
+ // Check for auth errors
243
+ if (
244
+ errorMessage.includes('authentication') ||
245
+ errorMessage.includes('401') ||
246
+ errorMessage.includes('invalid_api_key') ||
247
+ errorMessage.includes('Incorrect API key')
248
+ ) {
249
+ return new TranslationError(ErrorCode.PROVIDER_AUTH_FAILED, {
250
+ provider: 'openai',
251
+ message: errorMessage,
252
+ });
253
+ }
254
+
255
+ // Check for quota exceeded
256
+ if (
257
+ errorMessage.includes('quota') ||
258
+ errorMessage.includes('insufficient_quota')
259
+ ) {
260
+ return new TranslationError(ErrorCode.PROVIDER_ERROR, {
261
+ provider: 'openai',
262
+ message: 'API quota exceeded. Please check your billing settings.',
263
+ });
264
+ }
265
+
266
+ // Check for context length errors
267
+ if (
268
+ errorMessage.includes('context_length_exceeded') ||
269
+ errorMessage.includes('maximum context length')
270
+ ) {
271
+ return new TranslationError(ErrorCode.CHUNK_TOO_LARGE, {
272
+ provider: 'openai',
273
+ message: errorMessage,
274
+ });
275
+ }
276
+
277
+ return new TranslationError(ErrorCode.PROVIDER_ERROR, {
278
+ provider: 'openai',
279
+ message: errorMessage,
280
+ });
281
+ }
282
+ }
283
+
284
+ // ============================================================================
285
+ // Helper Functions
286
+ // ============================================================================
287
+
288
+ function mapFinishReason(
289
+ reason: string | null | undefined
290
+ ): 'stop' | 'length' | 'error' {
291
+ switch (reason) {
292
+ case 'stop':
293
+ return 'stop';
294
+ case 'length':
295
+ case 'max_tokens':
296
+ return 'length';
297
+ default:
298
+ return 'error';
299
+ }
300
+ }
301
+
302
+ // ============================================================================
303
+ // Factory Function
304
+ // ============================================================================
305
+
306
+ export function createOpenAIProvider(config: ProviderConfig = {}): LLMProvider {
307
+ return new OpenAIProvider(config);
308
+ }
@@ -0,0 +1,153 @@
1
+ import type { ProviderName } from '../types/index.js';
2
+ import type { LLMProvider, ProviderConfig, ProviderFactory } from './interface.js';
3
+ import { TranslationError, ErrorCode } from '../errors.js';
4
+ import { createClaudeProvider } from './claude.js';
5
+ import { createOpenAIProvider } from './openai.js';
6
+ import { createOllamaProvider } from './ollama.js';
7
+
8
+ // ============================================================================
9
+ // Provider Registry
10
+ // ============================================================================
11
+
12
+ const providers = new Map<ProviderName, ProviderFactory>();
13
+
14
+ export function registerProvider(
15
+ name: ProviderName,
16
+ factory: ProviderFactory
17
+ ): void {
18
+ providers.set(name, factory);
19
+ }
20
+
21
+ export function getProvider(
22
+ name: ProviderName,
23
+ config: ProviderConfig = {}
24
+ ): LLMProvider {
25
+ const factory = providers.get(name);
26
+
27
+ if (!factory) {
28
+ throw new TranslationError(ErrorCode.PROVIDER_NOT_FOUND, {
29
+ provider: name,
30
+ available: Array.from(providers.keys()),
31
+ });
32
+ }
33
+
34
+ return factory(config);
35
+ }
36
+
37
+ export function hasProvider(name: ProviderName): boolean {
38
+ return providers.has(name);
39
+ }
40
+
41
+ export function getAvailableProviders(): ProviderName[] {
42
+ return Array.from(providers.keys());
43
+ }
44
+
45
+ // ============================================================================
46
+ // Provider Configuration from Environment
47
+ // ============================================================================
48
+
49
+ export function getProviderConfigFromEnv(name: ProviderName): ProviderConfig {
50
+ switch (name) {
51
+ case 'claude':
52
+ return {
53
+ apiKey: process.env['ANTHROPIC_API_KEY'],
54
+ // defaultModel is handled by the provider itself
55
+ };
56
+
57
+ case 'openai':
58
+ return {
59
+ apiKey: process.env['OPENAI_API_KEY'],
60
+ defaultModel: 'gpt-4o',
61
+ };
62
+
63
+ case 'ollama':
64
+ return {
65
+ baseUrl: process.env['OLLAMA_BASE_URL'] ?? 'http://localhost:11434',
66
+ defaultModel: 'llama3.2', // Better multilingual support than llama2
67
+ };
68
+
69
+ case 'custom':
70
+ return {
71
+ apiKey: process.env['LLM_API_KEY'],
72
+ baseUrl: process.env['LLM_BASE_URL'],
73
+ };
74
+
75
+ default:
76
+ return {};
77
+ }
78
+ }
79
+
80
+ // ============================================================================
81
+ // Create Provider with Fallback
82
+ // ============================================================================
83
+
84
+ export interface CreateProviderOptions {
85
+ primary: ProviderName;
86
+ fallback?: ProviderName[];
87
+ config?: Partial<Record<ProviderName, ProviderConfig>>;
88
+ }
89
+
90
+ // ============================================================================
91
+ // Auto-register Built-in Providers
92
+ // ============================================================================
93
+
94
+ registerProvider('claude', createClaudeProvider);
95
+ registerProvider('openai', createOpenAIProvider);
96
+ registerProvider('ollama', createOllamaProvider);
97
+
98
+ // ============================================================================
99
+ // Create Provider with Fallback
100
+ // ============================================================================
101
+
102
+ /**
103
+ * Check if a provider can be used (has required credentials)
104
+ * Ollama doesn't require an API key, only a running server
105
+ */
106
+ function canUseProvider(name: ProviderName, config: ProviderConfig): boolean {
107
+ if (!hasProvider(name)) {
108
+ return false;
109
+ }
110
+
111
+ // Ollama doesn't require an API key
112
+ if (name === 'ollama') {
113
+ return true;
114
+ }
115
+
116
+ // Other providers require an API key
117
+ return !!config.apiKey;
118
+ }
119
+
120
+ export function createProviderWithFallback(
121
+ options: CreateProviderOptions
122
+ ): LLMProvider {
123
+ const { primary, fallback = [], config = {} } = options;
124
+
125
+ // Try primary provider
126
+ const primaryConfig = {
127
+ ...getProviderConfigFromEnv(primary),
128
+ ...config[primary],
129
+ };
130
+
131
+ if (canUseProvider(primary, primaryConfig)) {
132
+ return getProvider(primary, primaryConfig);
133
+ }
134
+
135
+ // Try fallback providers
136
+ for (const fallbackName of fallback) {
137
+ const fallbackConfig = {
138
+ ...getProviderConfigFromEnv(fallbackName),
139
+ ...config[fallbackName],
140
+ };
141
+
142
+ if (canUseProvider(fallbackName, fallbackConfig)) {
143
+ return getProvider(fallbackName, fallbackConfig);
144
+ }
145
+ }
146
+
147
+ // No provider available
148
+ throw new TranslationError(ErrorCode.PROVIDER_AUTH_FAILED, {
149
+ primary,
150
+ fallback,
151
+ message: 'No API key found for any configured provider',
152
+ });
153
+ }
@@ -0,0 +1,152 @@
1
+ import { Hono } from 'hono';
2
+ import { cors } from 'hono/cors';
3
+ import { serve, type ServerType } from '@hono/node-server';
4
+ import { HTTPException } from 'hono/http-exception';
5
+
6
+ import { createAuthMiddleware } from './middleware/auth.js';
7
+ import { createLoggerMiddleware } from './middleware/logger.js';
8
+ import { healthRouter } from './routes/health.js';
9
+ import { translateRouter } from './routes/translate.js';
10
+ import type { ServerConfig, ErrorResponse, HonoVariables } from './types.js';
11
+
12
+ // ============================================================================
13
+ // Server Factory
14
+ // ============================================================================
15
+
16
+ /**
17
+ * Create and configure the Hono application
18
+ */
19
+ export function createApp(options: ServerConfig) {
20
+ const app = new Hono<{ Variables: HonoVariables }>();
21
+
22
+ // Request logging (first middleware)
23
+ app.use('*', createLoggerMiddleware({
24
+ json: options.jsonLogging ?? false,
25
+ }));
26
+
27
+ // CORS middleware (before auth)
28
+ if (options.enableCors) {
29
+ app.use('*', cors({
30
+ origin: '*',
31
+ allowMethods: ['GET', 'POST', 'OPTIONS'],
32
+ allowHeaders: ['Content-Type', 'Authorization', 'X-API-Key'],
33
+ exposeHeaders: ['X-Request-Id'],
34
+ maxAge: 86400,
35
+ }));
36
+ }
37
+
38
+ // Health endpoints (no auth required)
39
+ app.route('/health', healthRouter);
40
+
41
+ // Authentication middleware for /translate
42
+ app.use('/translate/*', createAuthMiddleware({
43
+ enabled: options.enableAuth,
44
+ apiKey: options.apiKey,
45
+ }));
46
+
47
+ // Also apply auth to the base /translate endpoint
48
+ app.use('/translate', createAuthMiddleware({
49
+ enabled: options.enableAuth,
50
+ apiKey: options.apiKey,
51
+ }));
52
+
53
+ // Translation endpoint
54
+ app.route('/translate', translateRouter);
55
+
56
+ // Global error handler
57
+ app.onError((error, c) => {
58
+ if (error instanceof HTTPException) {
59
+ return c.json<ErrorResponse>(
60
+ {
61
+ error: error.message,
62
+ code: 'HTTP_ERROR',
63
+ },
64
+ error.status
65
+ );
66
+ }
67
+
68
+ console.error('Unhandled error:', error);
69
+
70
+ return c.json<ErrorResponse>(
71
+ {
72
+ error: 'Internal server error',
73
+ code: 'INTERNAL_ERROR',
74
+ },
75
+ 500
76
+ );
77
+ });
78
+
79
+ // 404 handler
80
+ app.notFound((c) => {
81
+ return c.json<ErrorResponse>(
82
+ {
83
+ error: 'Not found',
84
+ code: 'NOT_FOUND',
85
+ },
86
+ 404
87
+ );
88
+ });
89
+
90
+ return app;
91
+ }
92
+
93
+ // ============================================================================
94
+ // Server Startup
95
+ // ============================================================================
96
+
97
+ /**
98
+ * Start the HTTP server with graceful shutdown
99
+ */
100
+ export function startServer(options: ServerConfig): ServerType {
101
+ const app = createApp(options);
102
+
103
+ const server = serve({
104
+ fetch: app.fetch,
105
+ port: options.port,
106
+ hostname: options.host,
107
+ });
108
+
109
+ // Log startup information
110
+ console.log(`\nllm-translate server started`);
111
+ console.log(` - Address: http://${options.host}:${options.port}`);
112
+ console.log(` - Health: http://${options.host}:${options.port}/health`);
113
+ console.log(` - Translate: http://${options.host}:${options.port}/translate`);
114
+ console.log(` - Auth: ${options.enableAuth ? 'enabled' : 'disabled'}`);
115
+ console.log(` - CORS: ${options.enableCors ? 'enabled' : 'disabled'}`);
116
+ console.log('');
117
+
118
+ // Graceful shutdown handlers
119
+ const shutdown = (signal: string) => {
120
+ console.log(`\nReceived ${signal}, shutting down gracefully...`);
121
+
122
+ server.close((err) => {
123
+ if (err) {
124
+ console.error('Error during shutdown:', err);
125
+ process.exit(1);
126
+ }
127
+ console.log('Server closed');
128
+ process.exit(0);
129
+ });
130
+
131
+ // Force exit after timeout
132
+ setTimeout(() => {
133
+ console.error('Forced shutdown after timeout');
134
+ process.exit(1);
135
+ }, 10000);
136
+ };
137
+
138
+ process.on('SIGTERM', () => shutdown('SIGTERM'));
139
+ process.on('SIGINT', () => shutdown('SIGINT'));
140
+
141
+ return server;
142
+ }
143
+
144
+ // ============================================================================
145
+ // Exports
146
+ // ============================================================================
147
+
148
+ export type { ServerConfig } from './types.js';
149
+ export { createAuthMiddleware } from './middleware/auth.js';
150
+ export { createLoggerMiddleware } from './middleware/logger.js';
151
+ export { healthRouter } from './routes/health.js';
152
+ export { translateRouter } from './routes/translate.js';
@@ -0,0 +1,93 @@
1
+ import { createMiddleware } from 'hono/factory';
2
+ import { HTTPException } from 'hono/http-exception';
3
+ import type { Context, Next } from 'hono';
4
+
5
+ // ============================================================================
6
+ // Types
7
+ // ============================================================================
8
+
9
+ export interface AuthConfig {
10
+ enabled: boolean;
11
+ apiKey?: string;
12
+ }
13
+
14
+ // ============================================================================
15
+ // Authentication Middleware
16
+ // ============================================================================
17
+
18
+ /**
19
+ * API Key authentication middleware
20
+ * Supports both X-API-Key header and Authorization: Bearer token
21
+ */
22
+ export function createAuthMiddleware(config: AuthConfig) {
23
+ return createMiddleware(async (c: Context, next: Next) => {
24
+ // Skip auth if disabled
25
+ if (!config.enabled) {
26
+ return next();
27
+ }
28
+
29
+ // Get API key from config or environment
30
+ const expectedKey = config.apiKey ?? process.env.TRANSLATE_API_KEY;
31
+
32
+ if (!expectedKey) {
33
+ // No API key configured, skip auth (warning should be logged at startup)
34
+ return next();
35
+ }
36
+
37
+ // Check X-API-Key header first
38
+ let providedKey = c.req.header('X-API-Key');
39
+
40
+ // Fall back to Authorization: Bearer <token>
41
+ if (!providedKey) {
42
+ const authHeader = c.req.header('Authorization');
43
+ if (authHeader?.startsWith('Bearer ')) {
44
+ providedKey = authHeader.slice(7);
45
+ }
46
+ }
47
+
48
+ if (!providedKey) {
49
+ throw new HTTPException(401, {
50
+ message:
51
+ 'API key required. Provide via X-API-Key header or Authorization: Bearer <token>',
52
+ });
53
+ }
54
+
55
+ // Constant-time comparison to prevent timing attacks
56
+ if (!timingSafeEqual(providedKey, expectedKey)) {
57
+ throw new HTTPException(401, {
58
+ message: 'Invalid API key',
59
+ });
60
+ }
61
+
62
+ return next();
63
+ });
64
+ }
65
+
66
+ // ============================================================================
67
+ // Helper Functions
68
+ // ============================================================================
69
+
70
+ /**
71
+ * Timing-safe string comparison to prevent timing attacks
72
+ * @param a First string
73
+ * @param b Second string
74
+ * @returns true if strings are equal
75
+ */
76
+ function timingSafeEqual(a: string, b: string): boolean {
77
+ if (a.length !== b.length) {
78
+ // Still perform comparison to maintain constant time
79
+ // even when lengths differ
80
+ let result = 1;
81
+ const maxLen = Math.max(a.length, b.length);
82
+ for (let i = 0; i < maxLen; i++) {
83
+ result |= (a.charCodeAt(i % a.length) || 0) ^ (b.charCodeAt(i % b.length) || 0);
84
+ }
85
+ return false;
86
+ }
87
+
88
+ let result = 0;
89
+ for (let i = 0; i < a.length; i++) {
90
+ result |= a.charCodeAt(i) ^ b.charCodeAt(i);
91
+ }
92
+ return result === 0;
93
+ }