@djangocfg/llm 2.1.164
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +181 -0
- package/dist/index.cjs +1164 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +164 -0
- package/dist/index.d.ts +164 -0
- package/dist/index.mjs +1128 -0
- package/dist/index.mjs.map +1 -0
- package/dist/providers/index.cjs +317 -0
- package/dist/providers/index.cjs.map +1 -0
- package/dist/providers/index.d.cts +30 -0
- package/dist/providers/index.d.ts +30 -0
- package/dist/providers/index.mjs +304 -0
- package/dist/providers/index.mjs.map +1 -0
- package/dist/sdkrouter-D8GMBmTi.d.ts +171 -0
- package/dist/sdkrouter-hlQlVd0v.d.cts +171 -0
- package/dist/text-utils-DoYqMIr6.d.ts +289 -0
- package/dist/text-utils-VXWN-8Oq.d.cts +289 -0
- package/dist/translator/index.cjs +794 -0
- package/dist/translator/index.cjs.map +1 -0
- package/dist/translator/index.d.cts +24 -0
- package/dist/translator/index.d.ts +24 -0
- package/dist/translator/index.mjs +769 -0
- package/dist/translator/index.mjs.map +1 -0
- package/dist/types-D6lazgm1.d.cts +59 -0
- package/dist/types-D6lazgm1.d.ts +59 -0
- package/package.json +82 -0
- package/src/client.ts +119 -0
- package/src/index.ts +70 -0
- package/src/providers/anthropic.ts +98 -0
- package/src/providers/base.ts +90 -0
- package/src/providers/index.ts +15 -0
- package/src/providers/openai.ts +73 -0
- package/src/providers/sdkrouter.ts +279 -0
- package/src/translator/cache.ts +237 -0
- package/src/translator/index.ts +55 -0
- package/src/translator/json-translator.ts +408 -0
- package/src/translator/prompts.ts +90 -0
- package/src/translator/text-utils.ts +148 -0
- package/src/translator/types.ts +112 -0
- package/src/translator/validator.ts +181 -0
- package/src/types.ts +85 -0
- package/src/utils/env.ts +67 -0
- package/src/utils/index.ts +2 -0
- package/src/utils/json.ts +44 -0
- package/src/utils/schema.ts +153 -0
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Base LLM provider
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import type {
|
|
6
|
+
LLMClient,
|
|
7
|
+
LLMConfig,
|
|
8
|
+
LLMMessage,
|
|
9
|
+
LLMProvider,
|
|
10
|
+
LLMRequestOptions,
|
|
11
|
+
LLMResponse,
|
|
12
|
+
} from '../types';
|
|
13
|
+
import { extractJson } from '../utils/json';
|
|
14
|
+
|
|
15
|
+
export abstract class BaseLLMProvider implements LLMClient {
|
|
16
|
+
abstract provider: LLMProvider;
|
|
17
|
+
|
|
18
|
+
protected config: Required<
|
|
19
|
+
Pick<LLMConfig, 'model' | 'temperature' | 'maxTokens'>
|
|
20
|
+
> & LLMConfig;
|
|
21
|
+
|
|
22
|
+
constructor(config: LLMConfig) {
|
|
23
|
+
this.config = {
|
|
24
|
+
model: config.model ?? 'gpt-4o-mini',
|
|
25
|
+
temperature: config.temperature ?? 0.1,
|
|
26
|
+
maxTokens: config.maxTokens ?? 4096,
|
|
27
|
+
...config,
|
|
28
|
+
};
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
/**
|
|
32
|
+
* Send chat messages (implemented by provider)
|
|
33
|
+
*/
|
|
34
|
+
abstract chatMessages(
|
|
35
|
+
messages: LLMMessage[],
|
|
36
|
+
options?: LLMRequestOptions
|
|
37
|
+
): Promise<LLMResponse>;
|
|
38
|
+
|
|
39
|
+
/**
|
|
40
|
+
* Send single chat message
|
|
41
|
+
*/
|
|
42
|
+
async chat(prompt: string, options?: LLMRequestOptions): Promise<LLMResponse> {
|
|
43
|
+
const messages: LLMMessage[] = [];
|
|
44
|
+
|
|
45
|
+
if (options?.system) {
|
|
46
|
+
messages.push({ role: 'system', content: options.system });
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
messages.push({ role: 'user', content: prompt });
|
|
50
|
+
|
|
51
|
+
return this.chatMessages(messages, options);
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
/**
|
|
55
|
+
* Get JSON response
|
|
56
|
+
*/
|
|
57
|
+
async json<T = unknown>(
|
|
58
|
+
prompt: string,
|
|
59
|
+
options?: LLMRequestOptions
|
|
60
|
+
): Promise<T> {
|
|
61
|
+
const systemPrompt = `${options?.system ?? ''}\n\nRespond with valid JSON only. No markdown, no explanations.`.trim();
|
|
62
|
+
|
|
63
|
+
const response = await this.chat(prompt, {
|
|
64
|
+
...options,
|
|
65
|
+
system: systemPrompt,
|
|
66
|
+
temperature: options?.temperature ?? 0,
|
|
67
|
+
});
|
|
68
|
+
|
|
69
|
+
return extractJson<T>(response.content);
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
/**
|
|
73
|
+
* Get JSON response with schema hint
|
|
74
|
+
*/
|
|
75
|
+
async jsonSchema<T = unknown>(
|
|
76
|
+
prompt: string,
|
|
77
|
+
schema: string,
|
|
78
|
+
options?: LLMRequestOptions
|
|
79
|
+
): Promise<T> {
|
|
80
|
+
const systemPrompt = `${options?.system ?? ''}\n\nRespond with valid JSON matching this schema:\n${schema}\n\nNo markdown, no explanations.`.trim();
|
|
81
|
+
|
|
82
|
+
const response = await this.chat(prompt, {
|
|
83
|
+
...options,
|
|
84
|
+
system: systemPrompt,
|
|
85
|
+
temperature: options?.temperature ?? 0,
|
|
86
|
+
});
|
|
87
|
+
|
|
88
|
+
return extractJson<T>(response.content);
|
|
89
|
+
}
|
|
90
|
+
}
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
export { BaseLLMProvider } from './base';
|
|
2
|
+
export { OpenAIProvider } from './openai';
|
|
3
|
+
export { AnthropicProvider } from './anthropic';
|
|
4
|
+
export {
|
|
5
|
+
SDKRouterProvider,
|
|
6
|
+
Model,
|
|
7
|
+
ModelPresets,
|
|
8
|
+
buildModelAlias,
|
|
9
|
+
SDKROUTER_BASE_URL,
|
|
10
|
+
type SDKRouterConfig,
|
|
11
|
+
type ModelTier,
|
|
12
|
+
type ModelCapability,
|
|
13
|
+
type ModelCategory,
|
|
14
|
+
type ModelOptions,
|
|
15
|
+
} from './sdkrouter';
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI provider
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import OpenAI from 'openai';
|
|
6
|
+
import type {
|
|
7
|
+
LLMConfig,
|
|
8
|
+
LLMMessage,
|
|
9
|
+
LLMProvider,
|
|
10
|
+
LLMRequestOptions,
|
|
11
|
+
LLMResponse,
|
|
12
|
+
} from '../types';
|
|
13
|
+
import { BaseLLMProvider } from './base';
|
|
14
|
+
|
|
15
|
+
export class OpenAIProvider extends BaseLLMProvider {
|
|
16
|
+
provider: LLMProvider = 'openai';
|
|
17
|
+
private client: OpenAI;
|
|
18
|
+
|
|
19
|
+
constructor(config: LLMConfig) {
|
|
20
|
+
super({
|
|
21
|
+
model: config.model ?? 'gpt-4o-mini',
|
|
22
|
+
...config,
|
|
23
|
+
});
|
|
24
|
+
|
|
25
|
+
if (!config.apiKey) {
|
|
26
|
+
throw new Error('OpenAI API key is required');
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
this.client = new OpenAI({
|
|
30
|
+
apiKey: config.apiKey,
|
|
31
|
+
baseURL: config.baseUrl,
|
|
32
|
+
});
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
async chatMessages(
|
|
36
|
+
messages: LLMMessage[],
|
|
37
|
+
options?: LLMRequestOptions
|
|
38
|
+
): Promise<LLMResponse> {
|
|
39
|
+
const model = options?.model ?? this.config.model;
|
|
40
|
+
const temperature = options?.temperature ?? this.config.temperature;
|
|
41
|
+
const maxTokens = options?.maxTokens ?? this.config.maxTokens;
|
|
42
|
+
|
|
43
|
+
// Add system message if provided
|
|
44
|
+
const allMessages = options?.system
|
|
45
|
+
? [{ role: 'system' as const, content: options.system }, ...messages]
|
|
46
|
+
: messages;
|
|
47
|
+
|
|
48
|
+
const response = await this.client.chat.completions.create({
|
|
49
|
+
model,
|
|
50
|
+
messages: allMessages.map((m) => ({
|
|
51
|
+
role: m.role,
|
|
52
|
+
content: m.content,
|
|
53
|
+
})),
|
|
54
|
+
temperature,
|
|
55
|
+
max_tokens: maxTokens,
|
|
56
|
+
});
|
|
57
|
+
|
|
58
|
+
const choice = response.choices[0];
|
|
59
|
+
|
|
60
|
+
return {
|
|
61
|
+
content: choice.message.content ?? '',
|
|
62
|
+
model: response.model,
|
|
63
|
+
usage: response.usage
|
|
64
|
+
? {
|
|
65
|
+
promptTokens: response.usage.prompt_tokens,
|
|
66
|
+
completionTokens: response.usage.completion_tokens,
|
|
67
|
+
totalTokens: response.usage.total_tokens,
|
|
68
|
+
}
|
|
69
|
+
: undefined,
|
|
70
|
+
finishReason: choice.finish_reason ?? undefined,
|
|
71
|
+
};
|
|
72
|
+
}
|
|
73
|
+
}
|
|
@@ -0,0 +1,279 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* SDKRouter provider
|
|
3
|
+
*
|
|
4
|
+
* Uses https://llm.sdkrouter.com - OpenAI-compatible LLM router
|
|
5
|
+
* with smart model aliases like @smart, @cheap, @balanced
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import OpenAI from 'openai';
|
|
9
|
+
import type {
|
|
10
|
+
LLMConfig,
|
|
11
|
+
LLMMessage,
|
|
12
|
+
LLMProvider,
|
|
13
|
+
LLMRequestOptions,
|
|
14
|
+
LLMResponse,
|
|
15
|
+
} from '../types';
|
|
16
|
+
import { BaseLLMProvider } from './base';
|
|
17
|
+
|
|
18
|
+
/** SDKRouter base URL */
|
|
19
|
+
export const SDKROUTER_BASE_URL = 'https://llm.sdkrouter.com/v1';
|
|
20
|
+
|
|
21
|
+
/**
|
|
22
|
+
* Model tier presets
|
|
23
|
+
*
|
|
24
|
+
* @example '@cheap', '@smart', '@balanced'
|
|
25
|
+
*/
|
|
26
|
+
export type ModelTier =
|
|
27
|
+
| 'cheap' // Cheapest available
|
|
28
|
+
| 'budget' // Budget-friendly
|
|
29
|
+
| 'standard' // Standard tier
|
|
30
|
+
| 'balanced' // Best quality/price ratio
|
|
31
|
+
| 'smart' // Highest quality
|
|
32
|
+
| 'fast' // Lowest latency
|
|
33
|
+
| 'premium'; // Top-tier
|
|
34
|
+
|
|
35
|
+
/**
|
|
36
|
+
* Model capabilities (features)
|
|
37
|
+
*
|
|
38
|
+
* @example '@cheap+vision', '@smart+tools+json'
|
|
39
|
+
*/
|
|
40
|
+
export type ModelCapability =
|
|
41
|
+
| 'vision' // Image understanding
|
|
42
|
+
| 'tools' // Function/tool calling
|
|
43
|
+
| 'agents' // Agent tool calling (verified)
|
|
44
|
+
| 'json' // JSON mode
|
|
45
|
+
| 'streaming' // Streaming support
|
|
46
|
+
| 'long' // Long context (128k+)
|
|
47
|
+
| 'image'; // Image generation
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Model categories (use cases)
|
|
51
|
+
*
|
|
52
|
+
* @example '@balanced+code', '@smart+reasoning'
|
|
53
|
+
*/
|
|
54
|
+
export type ModelCategory =
|
|
55
|
+
| 'code' // Code generation
|
|
56
|
+
| 'vision' // Vision & images
|
|
57
|
+
| 'reasoning' // Reasoning & math
|
|
58
|
+
| 'agents' // Tool use & agents
|
|
59
|
+
| 'creative' // Creative writing
|
|
60
|
+
| 'chat' // Conversational
|
|
61
|
+
| 'analysis'; // Analysis & extraction
|
|
62
|
+
|
|
63
|
+
/**
|
|
64
|
+
* Model alias builder options
|
|
65
|
+
*/
|
|
66
|
+
export interface ModelOptions {
|
|
67
|
+
// Capabilities
|
|
68
|
+
vision?: boolean;
|
|
69
|
+
tools?: boolean;
|
|
70
|
+
agents?: boolean;
|
|
71
|
+
json?: boolean;
|
|
72
|
+
streaming?: boolean;
|
|
73
|
+
long?: boolean;
|
|
74
|
+
image?: boolean;
|
|
75
|
+
// Categories
|
|
76
|
+
code?: boolean;
|
|
77
|
+
reasoning?: boolean;
|
|
78
|
+
creative?: boolean;
|
|
79
|
+
chat?: boolean;
|
|
80
|
+
analysis?: boolean;
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
/**
|
|
84
|
+
* Build model alias string
|
|
85
|
+
*
|
|
86
|
+
* @example
|
|
87
|
+
* ```ts
|
|
88
|
+
* buildModelAlias('smart') // '@smart'
|
|
89
|
+
* buildModelAlias('cheap', { vision: true }) // '@cheap+vision'
|
|
90
|
+
* buildModelAlias('balanced', { code: true, tools: true }) // '@balanced+code+tools'
|
|
91
|
+
* ```
|
|
92
|
+
*/
|
|
93
|
+
export function buildModelAlias(tier: ModelTier, options?: ModelOptions): string {
|
|
94
|
+
const parts: string[] = [tier];
|
|
95
|
+
|
|
96
|
+
if (options) {
|
|
97
|
+
// Capabilities (order matters for consistency)
|
|
98
|
+
if (options.vision) parts.push('vision');
|
|
99
|
+
if (options.tools) parts.push('tools');
|
|
100
|
+
if (options.agents) parts.push('agents');
|
|
101
|
+
if (options.json) parts.push('json');
|
|
102
|
+
if (options.streaming) parts.push('streaming');
|
|
103
|
+
if (options.long) parts.push('long');
|
|
104
|
+
if (options.image) parts.push('image');
|
|
105
|
+
// Categories
|
|
106
|
+
if (options.code) parts.push('code');
|
|
107
|
+
if (options.reasoning) parts.push('reasoning');
|
|
108
|
+
if (options.creative) parts.push('creative');
|
|
109
|
+
if (options.chat) parts.push('chat');
|
|
110
|
+
if (options.analysis) parts.push('analysis');
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
return '@' + parts.join('+');
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
/**
|
|
117
|
+
* Model alias builder with IDE autocomplete
|
|
118
|
+
*
|
|
119
|
+
* @example
|
|
120
|
+
* ```ts
|
|
121
|
+
* import { Model } from '@djangocfg/llm'
|
|
122
|
+
*
|
|
123
|
+
* Model.smart() // '@smart'
|
|
124
|
+
* Model.cheap({ vision: true }) // '@cheap+vision'
|
|
125
|
+
* Model.balanced({ code: true }) // '@balanced+code'
|
|
126
|
+
* Model.fast({ tools: true, json: true }) // '@fast+tools+json'
|
|
127
|
+
* ```
|
|
128
|
+
*/
|
|
129
|
+
export const Model = {
|
|
130
|
+
/** Cheapest available model */
|
|
131
|
+
cheap: (options?: ModelOptions): string => buildModelAlias('cheap', options),
|
|
132
|
+
|
|
133
|
+
/** Budget-friendly with decent quality */
|
|
134
|
+
budget: (options?: ModelOptions): string => buildModelAlias('budget', options),
|
|
135
|
+
|
|
136
|
+
/** Standard tier */
|
|
137
|
+
standard: (options?: ModelOptions): string => buildModelAlias('standard', options),
|
|
138
|
+
|
|
139
|
+
/** Best quality/price ratio (recommended) */
|
|
140
|
+
balanced: (options?: ModelOptions): string => buildModelAlias('balanced', options),
|
|
141
|
+
|
|
142
|
+
/** Highest quality model */
|
|
143
|
+
smart: (options?: ModelOptions): string => buildModelAlias('smart', options),
|
|
144
|
+
|
|
145
|
+
/** Lowest latency model */
|
|
146
|
+
fast: (options?: ModelOptions): string => buildModelAlias('fast', options),
|
|
147
|
+
|
|
148
|
+
/** Top-tier premium model */
|
|
149
|
+
premium: (options?: ModelOptions): string => buildModelAlias('premium', options),
|
|
150
|
+
|
|
151
|
+
/**
|
|
152
|
+
* Build alias from raw strings (escape hatch)
|
|
153
|
+
*
|
|
154
|
+
* @example Model.alias('cheap', 'vision', 'code') // '@cheap+vision+code'
|
|
155
|
+
*/
|
|
156
|
+
alias: (tier: string, ...modifiers: string[]): string =>
|
|
157
|
+
'@' + [tier, ...modifiers].join('+'),
|
|
158
|
+
} as const;
|
|
159
|
+
|
|
160
|
+
/**
|
|
161
|
+
* Pre-built model aliases for common use cases
|
|
162
|
+
*/
|
|
163
|
+
export const ModelPresets = {
|
|
164
|
+
/** Translation: cheap + json mode */
|
|
165
|
+
translation: Model.cheap({ json: true }),
|
|
166
|
+
|
|
167
|
+
/** Code generation: balanced + code */
|
|
168
|
+
code: Model.balanced({ code: true }),
|
|
169
|
+
|
|
170
|
+
/** Code with tools: balanced + code + tools */
|
|
171
|
+
codeWithTools: Model.balanced({ code: true, tools: true }),
|
|
172
|
+
|
|
173
|
+
/** Vision: balanced + vision */
|
|
174
|
+
vision: Model.balanced({ vision: true }),
|
|
175
|
+
|
|
176
|
+
/** Reasoning: smart + reasoning */
|
|
177
|
+
reasoning: Model.smart({ reasoning: true }),
|
|
178
|
+
|
|
179
|
+
/** Creative writing: balanced + creative */
|
|
180
|
+
creative: Model.balanced({ creative: true }),
|
|
181
|
+
|
|
182
|
+
/** Fast chat: fast + chat */
|
|
183
|
+
fastChat: Model.fast({ chat: true }),
|
|
184
|
+
|
|
185
|
+
/** Analysis: balanced + analysis */
|
|
186
|
+
analysis: Model.balanced({ analysis: true }),
|
|
187
|
+
|
|
188
|
+
/** Agents: smart + agents + tools */
|
|
189
|
+
agents: Model.smart({ agents: true, tools: true }),
|
|
190
|
+
} as const;
|
|
191
|
+
|
|
192
|
+
/**
|
|
193
|
+
* SDKRouter provider config
|
|
194
|
+
*/
|
|
195
|
+
export interface SDKRouterConfig extends LLMConfig {
|
|
196
|
+
/** Model tier (shortcut for building alias) */
|
|
197
|
+
tier?: ModelTier;
|
|
198
|
+
/** Model options for alias building */
|
|
199
|
+
modelOptions?: ModelOptions;
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
/**
|
|
203
|
+
* SDKRouter LLM provider
|
|
204
|
+
*
|
|
205
|
+
* Uses OpenAI-compatible API at https://llm.sdkrouter.com
|
|
206
|
+
*
|
|
207
|
+
* @example
|
|
208
|
+
* ```ts
|
|
209
|
+
* const llm = new SDKRouterProvider({
|
|
210
|
+
* apiKey: process.env.SDKROUTER_API_KEY,
|
|
211
|
+
* model: Model.balanced({ code: true })
|
|
212
|
+
* })
|
|
213
|
+
* ```
|
|
214
|
+
*/
|
|
215
|
+
export class SDKRouterProvider extends BaseLLMProvider {
|
|
216
|
+
provider: LLMProvider = 'sdkrouter';
|
|
217
|
+
private client: OpenAI;
|
|
218
|
+
|
|
219
|
+
constructor(config: SDKRouterConfig) {
|
|
220
|
+
// Build model alias if tier provided
|
|
221
|
+
const model =
|
|
222
|
+
config.model ??
|
|
223
|
+
(config.tier
|
|
224
|
+
? buildModelAlias(config.tier, config.modelOptions)
|
|
225
|
+
: '@balanced');
|
|
226
|
+
|
|
227
|
+
super({
|
|
228
|
+
model,
|
|
229
|
+
...config,
|
|
230
|
+
});
|
|
231
|
+
|
|
232
|
+
if (!config.apiKey) {
|
|
233
|
+
throw new Error('SDKRouter API key is required (SDKROUTER_API_KEY)');
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
this.client = new OpenAI({
|
|
237
|
+
apiKey: config.apiKey,
|
|
238
|
+
baseURL: config.baseUrl ?? SDKROUTER_BASE_URL,
|
|
239
|
+
});
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
async chatMessages(
|
|
243
|
+
messages: LLMMessage[],
|
|
244
|
+
options?: LLMRequestOptions
|
|
245
|
+
): Promise<LLMResponse> {
|
|
246
|
+
const model = options?.model ?? this.config.model;
|
|
247
|
+
const temperature = options?.temperature ?? this.config.temperature;
|
|
248
|
+
const maxTokens = options?.maxTokens ?? this.config.maxTokens;
|
|
249
|
+
|
|
250
|
+
const allMessages = options?.system
|
|
251
|
+
? [{ role: 'system' as const, content: options.system }, ...messages]
|
|
252
|
+
: messages;
|
|
253
|
+
|
|
254
|
+
const response = await this.client.chat.completions.create({
|
|
255
|
+
model,
|
|
256
|
+
messages: allMessages.map((m) => ({
|
|
257
|
+
role: m.role,
|
|
258
|
+
content: m.content,
|
|
259
|
+
})),
|
|
260
|
+
temperature,
|
|
261
|
+
max_tokens: maxTokens,
|
|
262
|
+
});
|
|
263
|
+
|
|
264
|
+
const choice = response.choices[0];
|
|
265
|
+
|
|
266
|
+
return {
|
|
267
|
+
content: choice.message.content ?? '',
|
|
268
|
+
model: response.model,
|
|
269
|
+
usage: response.usage
|
|
270
|
+
? {
|
|
271
|
+
promptTokens: response.usage.prompt_tokens,
|
|
272
|
+
completionTokens: response.usage.completion_tokens,
|
|
273
|
+
totalTokens: response.usage.total_tokens,
|
|
274
|
+
}
|
|
275
|
+
: undefined,
|
|
276
|
+
finishReason: choice.finish_reason ?? undefined,
|
|
277
|
+
};
|
|
278
|
+
}
|
|
279
|
+
}
|
|
@@ -0,0 +1,237 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Translation Cache Manager
|
|
3
|
+
*
|
|
4
|
+
* Two-level caching: memory + localStorage/file
|
|
5
|
+
* Organized by language pairs for efficient lookup
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import crypto from 'crypto';
|
|
9
|
+
|
|
10
|
+
export interface CacheStats {
|
|
11
|
+
memorySize: number;
|
|
12
|
+
hits: number;
|
|
13
|
+
misses: number;
|
|
14
|
+
languagePairs: Array<{
|
|
15
|
+
pair: string;
|
|
16
|
+
translations: number;
|
|
17
|
+
}>;
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
/**
|
|
21
|
+
* Translation cache with memory + persistent storage
|
|
22
|
+
*/
|
|
23
|
+
export class TranslationCache {
|
|
24
|
+
private memoryCache = new Map<string, string>();
|
|
25
|
+
private cacheOrder: string[] = [];
|
|
26
|
+
private hits = 0;
|
|
27
|
+
private misses = 0;
|
|
28
|
+
|
|
29
|
+
constructor(
|
|
30
|
+
private maxMemorySize: number = 1000,
|
|
31
|
+
private storage?: Storage // localStorage in browser, undefined in Node
|
|
32
|
+
) {}
|
|
33
|
+
|
|
34
|
+
/**
|
|
35
|
+
* Generate hash for text
|
|
36
|
+
*/
|
|
37
|
+
private getTextHash(text: string): string {
|
|
38
|
+
return crypto.createHash('md5').update(text).digest('hex');
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
/**
|
|
42
|
+
* Get storage key for language pair
|
|
43
|
+
*/
|
|
44
|
+
private getStorageKey(sourceLang: string, targetLang: string): string {
|
|
45
|
+
return `translator:${sourceLang}-${targetLang}`;
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
/**
|
|
49
|
+
* Load from persistent storage
|
|
50
|
+
*/
|
|
51
|
+
private loadFromStorage(sourceLang: string, targetLang: string): Record<string, string> {
|
|
52
|
+
if (!this.storage) return {};
|
|
53
|
+
|
|
54
|
+
try {
|
|
55
|
+
const key = this.getStorageKey(sourceLang, targetLang);
|
|
56
|
+
const data = this.storage.getItem(key);
|
|
57
|
+
return data ? JSON.parse(data) : {};
|
|
58
|
+
} catch {
|
|
59
|
+
return {};
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
/**
|
|
64
|
+
* Save to persistent storage
|
|
65
|
+
*/
|
|
66
|
+
private saveToStorage(
|
|
67
|
+
sourceLang: string,
|
|
68
|
+
targetLang: string,
|
|
69
|
+
cache: Record<string, string>
|
|
70
|
+
): void {
|
|
71
|
+
if (!this.storage) return;
|
|
72
|
+
|
|
73
|
+
try {
|
|
74
|
+
const key = this.getStorageKey(sourceLang, targetLang);
|
|
75
|
+
this.storage.setItem(key, JSON.stringify(cache));
|
|
76
|
+
} catch {
|
|
77
|
+
// Storage full or unavailable
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
/**
|
|
82
|
+
* Evict oldest entries if memory is full
|
|
83
|
+
*/
|
|
84
|
+
private evictIfNeeded(): void {
|
|
85
|
+
while (this.memoryCache.size >= this.maxMemorySize && this.cacheOrder.length > 0) {
|
|
86
|
+
const oldestKey = this.cacheOrder.shift()!;
|
|
87
|
+
this.memoryCache.delete(oldestKey);
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
/**
|
|
92
|
+
* Get translation from cache
|
|
93
|
+
*/
|
|
94
|
+
get(text: string, sourceLang: string, targetLang: string): string | undefined {
|
|
95
|
+
const textHash = this.getTextHash(text);
|
|
96
|
+
const cacheKey = `${sourceLang}-${targetLang}:${textHash}`;
|
|
97
|
+
|
|
98
|
+
// Check memory cache first
|
|
99
|
+
if (this.memoryCache.has(cacheKey)) {
|
|
100
|
+
this.hits++;
|
|
101
|
+
return this.memoryCache.get(cacheKey);
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
// Check persistent storage
|
|
105
|
+
const fileCache = this.loadFromStorage(sourceLang, targetLang);
|
|
106
|
+
if (textHash in fileCache) {
|
|
107
|
+
const translation = fileCache[textHash];
|
|
108
|
+
// Store in memory cache
|
|
109
|
+
this.evictIfNeeded();
|
|
110
|
+
this.memoryCache.set(cacheKey, translation);
|
|
111
|
+
this.cacheOrder.push(cacheKey);
|
|
112
|
+
this.hits++;
|
|
113
|
+
return translation;
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
this.misses++;
|
|
117
|
+
return undefined;
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
/**
|
|
121
|
+
* Store translation in cache
|
|
122
|
+
*/
|
|
123
|
+
set(text: string, sourceLang: string, targetLang: string, translation: string): void {
|
|
124
|
+
const textHash = this.getTextHash(text);
|
|
125
|
+
const cacheKey = `${sourceLang}-${targetLang}:${textHash}`;
|
|
126
|
+
|
|
127
|
+
// Store in memory cache
|
|
128
|
+
this.evictIfNeeded();
|
|
129
|
+
this.memoryCache.set(cacheKey, translation);
|
|
130
|
+
if (!this.cacheOrder.includes(cacheKey)) {
|
|
131
|
+
this.cacheOrder.push(cacheKey);
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
// Store in persistent storage
|
|
135
|
+
const fileCache = this.loadFromStorage(sourceLang, targetLang);
|
|
136
|
+
fileCache[textHash] = translation;
|
|
137
|
+
this.saveToStorage(sourceLang, targetLang, fileCache);
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
/**
|
|
141
|
+
* Get multiple translations at once
|
|
142
|
+
*/
|
|
143
|
+
getMany(
|
|
144
|
+
texts: string[],
|
|
145
|
+
sourceLang: string,
|
|
146
|
+
targetLang: string
|
|
147
|
+
): { cached: Map<string, string>; uncached: string[] } {
|
|
148
|
+
const cached = new Map<string, string>();
|
|
149
|
+
const uncached: string[] = [];
|
|
150
|
+
|
|
151
|
+
for (const text of texts) {
|
|
152
|
+
const translation = this.get(text, sourceLang, targetLang);
|
|
153
|
+
if (translation !== undefined) {
|
|
154
|
+
cached.set(text, translation);
|
|
155
|
+
} else {
|
|
156
|
+
uncached.push(text);
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
return { cached, uncached };
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
/**
|
|
164
|
+
* Store multiple translations at once
|
|
165
|
+
*/
|
|
166
|
+
setMany(
|
|
167
|
+
translations: Map<string, string>,
|
|
168
|
+
sourceLang: string,
|
|
169
|
+
targetLang: string
|
|
170
|
+
): void {
|
|
171
|
+
for (const [text, translation] of translations) {
|
|
172
|
+
this.set(text, sourceLang, targetLang, translation);
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
/**
|
|
177
|
+
* Clear cache
|
|
178
|
+
*/
|
|
179
|
+
clear(sourceLang?: string, targetLang?: string): void {
|
|
180
|
+
if (sourceLang && targetLang) {
|
|
181
|
+
// Clear specific language pair
|
|
182
|
+
const prefix = `${sourceLang}-${targetLang}:`;
|
|
183
|
+
for (const key of [...this.memoryCache.keys()]) {
|
|
184
|
+
if (key.startsWith(prefix)) {
|
|
185
|
+
this.memoryCache.delete(key);
|
|
186
|
+
const idx = this.cacheOrder.indexOf(key);
|
|
187
|
+
if (idx !== -1) this.cacheOrder.splice(idx, 1);
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
if (this.storage) {
|
|
192
|
+
this.storage.removeItem(this.getStorageKey(sourceLang, targetLang));
|
|
193
|
+
}
|
|
194
|
+
} else {
|
|
195
|
+
// Clear all
|
|
196
|
+
this.memoryCache.clear();
|
|
197
|
+
this.cacheOrder = [];
|
|
198
|
+
this.hits = 0;
|
|
199
|
+
this.misses = 0;
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
/**
|
|
204
|
+
* Get cache statistics
|
|
205
|
+
*/
|
|
206
|
+
getStats(): CacheStats {
|
|
207
|
+
const languagePairs: CacheStats['languagePairs'] = [];
|
|
208
|
+
|
|
209
|
+
// Count by language pair from memory
|
|
210
|
+
const pairCounts = new Map<string, number>();
|
|
211
|
+
for (const key of this.memoryCache.keys()) {
|
|
212
|
+
const pair = key.split(':')[0];
|
|
213
|
+
pairCounts.set(pair, (pairCounts.get(pair) || 0) + 1);
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
for (const [pair, count] of pairCounts) {
|
|
217
|
+
languagePairs.push({ pair, translations: count });
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
return {
|
|
221
|
+
memorySize: this.memoryCache.size,
|
|
222
|
+
hits: this.hits,
|
|
223
|
+
misses: this.misses,
|
|
224
|
+
languagePairs,
|
|
225
|
+
};
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
/**
|
|
230
|
+
* Create translation cache
|
|
231
|
+
*/
|
|
232
|
+
export function createCache(
|
|
233
|
+
maxMemorySize?: number,
|
|
234
|
+
storage?: Storage
|
|
235
|
+
): TranslationCache {
|
|
236
|
+
return new TranslationCache(maxMemorySize, storage);
|
|
237
|
+
}
|