@djangocfg/llm 2.1.164
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +181 -0
- package/dist/index.cjs +1164 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +164 -0
- package/dist/index.d.ts +164 -0
- package/dist/index.mjs +1128 -0
- package/dist/index.mjs.map +1 -0
- package/dist/providers/index.cjs +317 -0
- package/dist/providers/index.cjs.map +1 -0
- package/dist/providers/index.d.cts +30 -0
- package/dist/providers/index.d.ts +30 -0
- package/dist/providers/index.mjs +304 -0
- package/dist/providers/index.mjs.map +1 -0
- package/dist/sdkrouter-D8GMBmTi.d.ts +171 -0
- package/dist/sdkrouter-hlQlVd0v.d.cts +171 -0
- package/dist/text-utils-DoYqMIr6.d.ts +289 -0
- package/dist/text-utils-VXWN-8Oq.d.cts +289 -0
- package/dist/translator/index.cjs +794 -0
- package/dist/translator/index.cjs.map +1 -0
- package/dist/translator/index.d.cts +24 -0
- package/dist/translator/index.d.ts +24 -0
- package/dist/translator/index.mjs +769 -0
- package/dist/translator/index.mjs.map +1 -0
- package/dist/types-D6lazgm1.d.cts +59 -0
- package/dist/types-D6lazgm1.d.ts +59 -0
- package/package.json +82 -0
- package/src/client.ts +119 -0
- package/src/index.ts +70 -0
- package/src/providers/anthropic.ts +98 -0
- package/src/providers/base.ts +90 -0
- package/src/providers/index.ts +15 -0
- package/src/providers/openai.ts +73 -0
- package/src/providers/sdkrouter.ts +279 -0
- package/src/translator/cache.ts +237 -0
- package/src/translator/index.ts +55 -0
- package/src/translator/json-translator.ts +408 -0
- package/src/translator/prompts.ts +90 -0
- package/src/translator/text-utils.ts +148 -0
- package/src/translator/types.ts +112 -0
- package/src/translator/validator.ts +181 -0
- package/src/types.ts +85 -0
- package/src/utils/env.ts +67 -0
- package/src/utils/index.ts +2 -0
- package/src/utils/json.ts +44 -0
- package/src/utils/schema.ts +153 -0
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
import { B as BaseLLMProvider } from '../sdkrouter-D8GMBmTi.js';
|
|
2
|
+
export { M as Model, d as ModelCapability, e as ModelCategory, f as ModelOptions, a as ModelPresets, c as ModelTier, S as SDKROUTER_BASE_URL, h as SDKRouterConfig, g as SDKRouterProvider, b as buildModelAlias } from '../sdkrouter-D8GMBmTi.js';
|
|
3
|
+
import { b as LLMProvider, L as LLMConfig, f as LLMMessage, c as LLMRequestOptions, d as LLMResponse } from '../types-D6lazgm1.js';
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* OpenAI provider
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
declare class OpenAIProvider extends BaseLLMProvider {
|
|
10
|
+
provider: LLMProvider;
|
|
11
|
+
private client;
|
|
12
|
+
constructor(config: LLMConfig);
|
|
13
|
+
chatMessages(messages: LLMMessage[], options?: LLMRequestOptions): Promise<LLMResponse>;
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
/**
|
|
17
|
+
* Anthropic provider
|
|
18
|
+
*
|
|
19
|
+
* Uses OpenAI-compatible API endpoint for simplicity.
|
|
20
|
+
* Anthropic provides OpenAI-compatible endpoint at api.anthropic.com
|
|
21
|
+
*/
|
|
22
|
+
|
|
23
|
+
declare class AnthropicProvider extends BaseLLMProvider {
|
|
24
|
+
provider: LLMProvider;
|
|
25
|
+
private client;
|
|
26
|
+
constructor(config: LLMConfig);
|
|
27
|
+
chatMessages(messages: LLMMessage[], options?: LLMRequestOptions): Promise<LLMResponse>;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
export { AnthropicProvider, BaseLLMProvider, OpenAIProvider };
|
|
@@ -0,0 +1,304 @@
|
|
|
1
|
+
import OpenAI from 'openai';
|
|
2
|
+
|
|
3
|
+
var __defProp = Object.defineProperty;
|
|
4
|
+
var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
|
|
5
|
+
var __publicField = (obj, key, value) => __defNormalProp(obj, typeof key !== "symbol" ? key + "" : key, value);
|
|
6
|
+
|
|
7
|
+
// src/utils/json.ts
|
|
8
|
+
function extractJson(text) {
|
|
9
|
+
let jsonStr = text.trim();
|
|
10
|
+
if (jsonStr.startsWith("```json")) {
|
|
11
|
+
jsonStr = jsonStr.slice(7);
|
|
12
|
+
} else if (jsonStr.startsWith("```")) {
|
|
13
|
+
jsonStr = jsonStr.slice(3);
|
|
14
|
+
}
|
|
15
|
+
if (jsonStr.endsWith("```")) {
|
|
16
|
+
jsonStr = jsonStr.slice(0, -3);
|
|
17
|
+
}
|
|
18
|
+
jsonStr = jsonStr.trim();
|
|
19
|
+
const jsonStart = jsonStr.search(/[\[{]/);
|
|
20
|
+
const jsonEndBracket = jsonStr.lastIndexOf("]");
|
|
21
|
+
const jsonEndBrace = jsonStr.lastIndexOf("}");
|
|
22
|
+
const jsonEnd = Math.max(jsonEndBracket, jsonEndBrace);
|
|
23
|
+
if (jsonStart !== -1 && jsonEnd !== -1) {
|
|
24
|
+
jsonStr = jsonStr.slice(jsonStart, jsonEnd + 1);
|
|
25
|
+
}
|
|
26
|
+
try {
|
|
27
|
+
return JSON.parse(jsonStr);
|
|
28
|
+
} catch (error) {
|
|
29
|
+
throw new Error(
|
|
30
|
+
`Failed to parse JSON from LLM response: ${error instanceof Error ? error.message : "Unknown error"}
|
|
31
|
+
|
|
32
|
+
Response:
|
|
33
|
+
${text}`
|
|
34
|
+
);
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
// src/providers/base.ts
|
|
39
|
+
var BaseLLMProvider = class {
|
|
40
|
+
constructor(config) {
|
|
41
|
+
__publicField(this, "config");
|
|
42
|
+
this.config = {
|
|
43
|
+
model: config.model ?? "gpt-4o-mini",
|
|
44
|
+
temperature: config.temperature ?? 0.1,
|
|
45
|
+
maxTokens: config.maxTokens ?? 4096,
|
|
46
|
+
...config
|
|
47
|
+
};
|
|
48
|
+
}
|
|
49
|
+
/**
|
|
50
|
+
* Send single chat message
|
|
51
|
+
*/
|
|
52
|
+
async chat(prompt, options) {
|
|
53
|
+
const messages = [];
|
|
54
|
+
if (options?.system) {
|
|
55
|
+
messages.push({ role: "system", content: options.system });
|
|
56
|
+
}
|
|
57
|
+
messages.push({ role: "user", content: prompt });
|
|
58
|
+
return this.chatMessages(messages, options);
|
|
59
|
+
}
|
|
60
|
+
/**
|
|
61
|
+
* Get JSON response
|
|
62
|
+
*/
|
|
63
|
+
async json(prompt, options) {
|
|
64
|
+
const systemPrompt = `${options?.system ?? ""}
|
|
65
|
+
|
|
66
|
+
Respond with valid JSON only. No markdown, no explanations.`.trim();
|
|
67
|
+
const response = await this.chat(prompt, {
|
|
68
|
+
...options,
|
|
69
|
+
system: systemPrompt,
|
|
70
|
+
temperature: options?.temperature ?? 0
|
|
71
|
+
});
|
|
72
|
+
return extractJson(response.content);
|
|
73
|
+
}
|
|
74
|
+
/**
|
|
75
|
+
* Get JSON response with schema hint
|
|
76
|
+
*/
|
|
77
|
+
async jsonSchema(prompt, schema, options) {
|
|
78
|
+
const systemPrompt = `${options?.system ?? ""}
|
|
79
|
+
|
|
80
|
+
Respond with valid JSON matching this schema:
|
|
81
|
+
${schema}
|
|
82
|
+
|
|
83
|
+
No markdown, no explanations.`.trim();
|
|
84
|
+
const response = await this.chat(prompt, {
|
|
85
|
+
...options,
|
|
86
|
+
system: systemPrompt,
|
|
87
|
+
temperature: options?.temperature ?? 0
|
|
88
|
+
});
|
|
89
|
+
return extractJson(response.content);
|
|
90
|
+
}
|
|
91
|
+
};
|
|
92
|
+
var OpenAIProvider = class extends BaseLLMProvider {
|
|
93
|
+
constructor(config) {
|
|
94
|
+
super({
|
|
95
|
+
model: config.model ?? "gpt-4o-mini",
|
|
96
|
+
...config
|
|
97
|
+
});
|
|
98
|
+
__publicField(this, "provider", "openai");
|
|
99
|
+
__publicField(this, "client");
|
|
100
|
+
if (!config.apiKey) {
|
|
101
|
+
throw new Error("OpenAI API key is required");
|
|
102
|
+
}
|
|
103
|
+
this.client = new OpenAI({
|
|
104
|
+
apiKey: config.apiKey,
|
|
105
|
+
baseURL: config.baseUrl
|
|
106
|
+
});
|
|
107
|
+
}
|
|
108
|
+
async chatMessages(messages, options) {
|
|
109
|
+
const model = options?.model ?? this.config.model;
|
|
110
|
+
const temperature = options?.temperature ?? this.config.temperature;
|
|
111
|
+
const maxTokens = options?.maxTokens ?? this.config.maxTokens;
|
|
112
|
+
const allMessages = options?.system ? [{ role: "system", content: options.system }, ...messages] : messages;
|
|
113
|
+
const response = await this.client.chat.completions.create({
|
|
114
|
+
model,
|
|
115
|
+
messages: allMessages.map((m) => ({
|
|
116
|
+
role: m.role,
|
|
117
|
+
content: m.content
|
|
118
|
+
})),
|
|
119
|
+
temperature,
|
|
120
|
+
max_tokens: maxTokens
|
|
121
|
+
});
|
|
122
|
+
const choice = response.choices[0];
|
|
123
|
+
return {
|
|
124
|
+
content: choice.message.content ?? "",
|
|
125
|
+
model: response.model,
|
|
126
|
+
usage: response.usage ? {
|
|
127
|
+
promptTokens: response.usage.prompt_tokens,
|
|
128
|
+
completionTokens: response.usage.completion_tokens,
|
|
129
|
+
totalTokens: response.usage.total_tokens
|
|
130
|
+
} : void 0,
|
|
131
|
+
finishReason: choice.finish_reason ?? void 0
|
|
132
|
+
};
|
|
133
|
+
}
|
|
134
|
+
};
|
|
135
|
+
var AnthropicProvider = class extends BaseLLMProvider {
|
|
136
|
+
constructor(config) {
|
|
137
|
+
super({
|
|
138
|
+
model: config.model ?? "claude-3-5-haiku-latest",
|
|
139
|
+
...config
|
|
140
|
+
});
|
|
141
|
+
__publicField(this, "provider", "anthropic");
|
|
142
|
+
__publicField(this, "client");
|
|
143
|
+
if (!config.apiKey) {
|
|
144
|
+
throw new Error("Anthropic API key is required");
|
|
145
|
+
}
|
|
146
|
+
this.client = new OpenAI({
|
|
147
|
+
apiKey: config.apiKey,
|
|
148
|
+
baseURL: config.baseUrl ?? "https://api.anthropic.com/v1",
|
|
149
|
+
defaultHeaders: {
|
|
150
|
+
"anthropic-version": "2023-06-01",
|
|
151
|
+
"x-api-key": config.apiKey
|
|
152
|
+
}
|
|
153
|
+
});
|
|
154
|
+
}
|
|
155
|
+
async chatMessages(messages, options) {
|
|
156
|
+
const model = options?.model ?? this.config.model;
|
|
157
|
+
const temperature = options?.temperature ?? this.config.temperature;
|
|
158
|
+
const maxTokens = options?.maxTokens ?? this.config.maxTokens;
|
|
159
|
+
const systemMessage = options?.system ? options.system : messages.find((m) => m.role === "system")?.content;
|
|
160
|
+
const userMessages = messages.filter((m) => m.role !== "system");
|
|
161
|
+
const response = await fetch("https://api.anthropic.com/v1/messages", {
|
|
162
|
+
method: "POST",
|
|
163
|
+
headers: {
|
|
164
|
+
"Content-Type": "application/json",
|
|
165
|
+
"x-api-key": this.config.apiKey,
|
|
166
|
+
"anthropic-version": "2023-06-01"
|
|
167
|
+
},
|
|
168
|
+
body: JSON.stringify({
|
|
169
|
+
model,
|
|
170
|
+
max_tokens: maxTokens,
|
|
171
|
+
temperature,
|
|
172
|
+
system: systemMessage,
|
|
173
|
+
messages: userMessages.map((m) => ({
|
|
174
|
+
role: m.role,
|
|
175
|
+
content: m.content
|
|
176
|
+
}))
|
|
177
|
+
})
|
|
178
|
+
});
|
|
179
|
+
if (!response.ok) {
|
|
180
|
+
const error = await response.text();
|
|
181
|
+
throw new Error(`Anthropic API error: ${response.status} ${error}`);
|
|
182
|
+
}
|
|
183
|
+
const data = await response.json();
|
|
184
|
+
return {
|
|
185
|
+
content: data.content?.[0]?.text ?? "",
|
|
186
|
+
model: data.model,
|
|
187
|
+
usage: data.usage ? {
|
|
188
|
+
promptTokens: data.usage.input_tokens,
|
|
189
|
+
completionTokens: data.usage.output_tokens,
|
|
190
|
+
totalTokens: data.usage.input_tokens + data.usage.output_tokens
|
|
191
|
+
} : void 0,
|
|
192
|
+
finishReason: data.stop_reason ?? void 0
|
|
193
|
+
};
|
|
194
|
+
}
|
|
195
|
+
};
|
|
196
|
+
var SDKROUTER_BASE_URL = "https://llm.sdkrouter.com/v1";
|
|
197
|
+
function buildModelAlias(tier, options) {
|
|
198
|
+
const parts = [tier];
|
|
199
|
+
if (options) {
|
|
200
|
+
if (options.vision) parts.push("vision");
|
|
201
|
+
if (options.tools) parts.push("tools");
|
|
202
|
+
if (options.agents) parts.push("agents");
|
|
203
|
+
if (options.json) parts.push("json");
|
|
204
|
+
if (options.streaming) parts.push("streaming");
|
|
205
|
+
if (options.long) parts.push("long");
|
|
206
|
+
if (options.image) parts.push("image");
|
|
207
|
+
if (options.code) parts.push("code");
|
|
208
|
+
if (options.reasoning) parts.push("reasoning");
|
|
209
|
+
if (options.creative) parts.push("creative");
|
|
210
|
+
if (options.chat) parts.push("chat");
|
|
211
|
+
if (options.analysis) parts.push("analysis");
|
|
212
|
+
}
|
|
213
|
+
return "@" + parts.join("+");
|
|
214
|
+
}
|
|
215
|
+
var Model = {
|
|
216
|
+
/** Cheapest available model */
|
|
217
|
+
cheap: (options) => buildModelAlias("cheap", options),
|
|
218
|
+
/** Budget-friendly with decent quality */
|
|
219
|
+
budget: (options) => buildModelAlias("budget", options),
|
|
220
|
+
/** Standard tier */
|
|
221
|
+
standard: (options) => buildModelAlias("standard", options),
|
|
222
|
+
/** Best quality/price ratio (recommended) */
|
|
223
|
+
balanced: (options) => buildModelAlias("balanced", options),
|
|
224
|
+
/** Highest quality model */
|
|
225
|
+
smart: (options) => buildModelAlias("smart", options),
|
|
226
|
+
/** Lowest latency model */
|
|
227
|
+
fast: (options) => buildModelAlias("fast", options),
|
|
228
|
+
/** Top-tier premium model */
|
|
229
|
+
premium: (options) => buildModelAlias("premium", options),
|
|
230
|
+
/**
|
|
231
|
+
* Build alias from raw strings (escape hatch)
|
|
232
|
+
*
|
|
233
|
+
* @example Model.alias('cheap', 'vision', 'code') // '@cheap+vision+code'
|
|
234
|
+
*/
|
|
235
|
+
alias: (tier, ...modifiers) => "@" + [tier, ...modifiers].join("+")
|
|
236
|
+
};
|
|
237
|
+
var ModelPresets = {
|
|
238
|
+
/** Translation: cheap + json mode */
|
|
239
|
+
translation: Model.cheap({ json: true }),
|
|
240
|
+
/** Code generation: balanced + code */
|
|
241
|
+
code: Model.balanced({ code: true }),
|
|
242
|
+
/** Code with tools: balanced + code + tools */
|
|
243
|
+
codeWithTools: Model.balanced({ code: true, tools: true }),
|
|
244
|
+
/** Vision: balanced + vision */
|
|
245
|
+
vision: Model.balanced({ vision: true }),
|
|
246
|
+
/** Reasoning: smart + reasoning */
|
|
247
|
+
reasoning: Model.smart({ reasoning: true }),
|
|
248
|
+
/** Creative writing: balanced + creative */
|
|
249
|
+
creative: Model.balanced({ creative: true }),
|
|
250
|
+
/** Fast chat: fast + chat */
|
|
251
|
+
fastChat: Model.fast({ chat: true }),
|
|
252
|
+
/** Analysis: balanced + analysis */
|
|
253
|
+
analysis: Model.balanced({ analysis: true }),
|
|
254
|
+
/** Agents: smart + agents + tools */
|
|
255
|
+
agents: Model.smart({ agents: true, tools: true })
|
|
256
|
+
};
|
|
257
|
+
var SDKRouterProvider = class extends BaseLLMProvider {
|
|
258
|
+
constructor(config) {
|
|
259
|
+
const model = config.model ?? (config.tier ? buildModelAlias(config.tier, config.modelOptions) : "@balanced");
|
|
260
|
+
super({
|
|
261
|
+
model,
|
|
262
|
+
...config
|
|
263
|
+
});
|
|
264
|
+
__publicField(this, "provider", "sdkrouter");
|
|
265
|
+
__publicField(this, "client");
|
|
266
|
+
if (!config.apiKey) {
|
|
267
|
+
throw new Error("SDKRouter API key is required (SDKROUTER_API_KEY)");
|
|
268
|
+
}
|
|
269
|
+
this.client = new OpenAI({
|
|
270
|
+
apiKey: config.apiKey,
|
|
271
|
+
baseURL: config.baseUrl ?? SDKROUTER_BASE_URL
|
|
272
|
+
});
|
|
273
|
+
}
|
|
274
|
+
async chatMessages(messages, options) {
|
|
275
|
+
const model = options?.model ?? this.config.model;
|
|
276
|
+
const temperature = options?.temperature ?? this.config.temperature;
|
|
277
|
+
const maxTokens = options?.maxTokens ?? this.config.maxTokens;
|
|
278
|
+
const allMessages = options?.system ? [{ role: "system", content: options.system }, ...messages] : messages;
|
|
279
|
+
const response = await this.client.chat.completions.create({
|
|
280
|
+
model,
|
|
281
|
+
messages: allMessages.map((m) => ({
|
|
282
|
+
role: m.role,
|
|
283
|
+
content: m.content
|
|
284
|
+
})),
|
|
285
|
+
temperature,
|
|
286
|
+
max_tokens: maxTokens
|
|
287
|
+
});
|
|
288
|
+
const choice = response.choices[0];
|
|
289
|
+
return {
|
|
290
|
+
content: choice.message.content ?? "",
|
|
291
|
+
model: response.model,
|
|
292
|
+
usage: response.usage ? {
|
|
293
|
+
promptTokens: response.usage.prompt_tokens,
|
|
294
|
+
completionTokens: response.usage.completion_tokens,
|
|
295
|
+
totalTokens: response.usage.total_tokens
|
|
296
|
+
} : void 0,
|
|
297
|
+
finishReason: choice.finish_reason ?? void 0
|
|
298
|
+
};
|
|
299
|
+
}
|
|
300
|
+
};
|
|
301
|
+
|
|
302
|
+
export { AnthropicProvider, BaseLLMProvider, Model, ModelPresets, OpenAIProvider, SDKROUTER_BASE_URL, SDKRouterProvider, buildModelAlias };
|
|
303
|
+
//# sourceMappingURL=index.mjs.map
|
|
304
|
+
//# sourceMappingURL=index.mjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../src/utils/json.ts","../../src/providers/base.ts","../../src/providers/openai.ts","../../src/providers/anthropic.ts","../../src/providers/sdkrouter.ts"],"names":["OpenAI"],"mappings":";;;;;;;AAOO,SAAS,YAAyB,IAAA,EAAiB;AACxD,EAAA,IAAI,OAAA,GAAU,KAAK,IAAA,EAAK;AAGxB,EAAA,IAAI,OAAA,CAAQ,UAAA,CAAW,SAAS,CAAA,EAAG;AACjC,IAAA,OAAA,GAAU,OAAA,CAAQ,MAAM,CAAC,CAAA;AAAA,EAC3B,CAAA,MAAA,IAAW,OAAA,CAAQ,UAAA,CAAW,KAAK,CAAA,EAAG;AACpC,IAAA,OAAA,GAAU,OAAA,CAAQ,MAAM,CAAC,CAAA;AAAA,EAC3B;AAEA,EAAA,IAAI,OAAA,CAAQ,QAAA,CAAS,KAAK,CAAA,EAAG;AAC3B,IAAA,OAAA,GAAU,OAAA,CAAQ,KAAA,CAAM,CAAA,EAAG,EAAE,CAAA;AAAA,EAC/B;AAEA,EAAA,OAAA,GAAU,QAAQ,IAAA,EAAK;AAGvB,EAAA,MAAM,SAAA,GAAY,OAAA,CAAQ,MAAA,CAAO,OAAO,CAAA;AACxC,EAAA,MAAM,cAAA,GAAiB,OAAA,CAAQ,WAAA,CAAY,GAAG,CAAA;AAC9C,EAAA,MAAM,YAAA,GAAe,OAAA,CAAQ,WAAA,CAAY,GAAG,CAAA;AAC5C,EAAA,MAAM,OAAA,GAAU,IAAA,CAAK,GAAA,CAAI,cAAA,EAAgB,YAAY,CAAA;AAErD,EAAA,IAAI,SAAA,KAAc,EAAA,IAAM,OAAA,KAAY,EAAA,EAAI;AACtC,IAAA,OAAA,GAAU,OAAA,CAAQ,KAAA,CAAM,SAAA,EAAW,OAAA,GAAU,CAAC,CAAA;AAAA,EAChD;AAEA,EAAA,IAAI;AACF,IAAA,OAAO,IAAA,CAAK,MAAM,OAAO,CAAA;AAAA,EAC3B,SAAS,KAAA,EAAO;AACd,IAAA,MAAM,IAAI,KAAA;AAAA,MACR,CAAA,wCAAA,EAA2C,KAAA,YAAiB,KAAA,GAAQ,KAAA,CAAM,UAAU,eAAe;;AAAA;AAAA,EAAkB,IAAI,CAAA;AAAA,KAC3H;AAAA,EACF;AACF;;;AC1BO,IAAe,kBAAf,MAAoD;AAAA,EAOzD,YAAY,MAAA,EAAmB;AAJ/B,IAAA,aAAA,CAAA,IAAA,EAAU,QAAA,CAAA;AAKR,IAAA,IAAA,CAAK,MAAA,GAAS;AAAA,MACZ,KAAA,EAAO,OAAO,KAAA,IAAS,aAAA;AAAA,MACvB,WAAA,EAAa,OAAO,WAAA,IAAe,GAAA;AAAA,MACnC,SAAA,EAAW,OAAO,SAAA,IAAa,IAAA;AAAA,MAC/B,GAAG;AAAA,KACL;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAaA,MAAM,IAAA,CAAK,MAAA,EAAgB,OAAA,EAAmD;AAC5E,IAAA,MAAM,WAAyB,EAAC;AAEhC,IAAA,IAAI,SAAS,MAAA,EAAQ;AACnB,MAAA,QAAA,CAAS,KAAK,EAAE,IAAA,EAAM,UAAU,OAAA,EAAS,OAAA,CAAQ,QAAQ,CAAA;AAAA,IAC3D;AAEA,IAAA,QAAA,CAAS,KAAK,EAAE,IAAA,EAAM,MAAA,EAAQ,OAAA,EAAS,QAAQ,CAAA;AAE/C,IAAA,OAAO,IAAA,CAAK,YAAA,CAAa,QAAA,EAAU,OAAO,CAAA;AAAA,EAC5C;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,IAAA,CACJ,MAAA,EACA,OAAA,EACY;AACZ,IAAA,MAAM,YAAA,GAAe,CAAA,EAAG,OAAA,EAAS,MAAA,IAAU,EAAE;;AAAA,2DAAA,CAAA,CAAkE,IAAA,EAAK;AAEpH,IAAA,MAAM,QAAA,GAAW,MAAM,IAAA,CAAK,IAAA,CAAK,MAAA,EAAQ;AAAA,MACvC,GAAG,OAAA;AAAA,MACH,MAAA,EAAQ,YAAA;AAAA,MACR,WAAA,EAAa,SAAS,WAAA,IAAe;AAAA,KACtC,CAAA;AAED,IAAA,OAAO,WAAA,CAAe,SAAS,OAAO,CAAA;AAAA,EACxC;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,UAAA,CACJ,MAAA,EACA,MAAA,EACA,OAAA,EACY;AACZ,IAAA,MAAM,YAAA,GAAe,CAAA,EAAG,OAAA,EAAS,MAAA,IAAU,EAAE;;AAAA;AAAA,EAAsD,MAAM;;AAAA,6BAAA,CAAA,CAAoC,IAAA,EAAK;AAElJ,IAAA,MAAM,QAAA,GAAW,MAAM,IAAA,CAAK,IAAA,CAAK,MAAA,EAAQ;AAAA,MACvC,GAAG,OAAA;AAAA,MACH,MAAA,EAAQ,YAAA;AAAA,MACR,WAAA,EAAa,SAAS,WAAA,IAAe;AAAA,KACtC,CAAA;AAED,IAAA,OAAO,WAAA,CAAe,SAAS,OAAO,CAAA;AAAA,EACxC;AACF;AC3EO,IAAM,cAAA,GAAN,cAA6B,eAAA,CAAgB;AAAA,EAIlD,YAAY,MAAA,EAAmB;AAC7B,IAAA,KAAA,CAAM;AAAA,MACJ,KAAA,EAAO,OAAO,KAAA,IAAS,aAAA;AAAA,MACvB,GAAG;AAAA,KACJ,CAAA;AAPH,IAAA,aAAA,CAAA,IAAA,EAAA,UAAA,EAAwB,QAAA,CAAA;AACxB,IAAA,aAAA,CAAA,IAAA,EAAQ,QAAA,CAAA;AAQN,IAAA,IAAI,CAAC,OAAO,MAAA,EAAQ;AAClB,MAAA,MAAM,IAAI,MAAM,4BAA4B,CAAA;AAAA,IAC9C;AAEA,IAAA,IAAA,CAAK,MAAA,GAAS,IAAI,MAAA,CAAO;AAAA,MACvB,QAAQ,MAAA,CAAO,MAAA;AAAA,MACf,SAAS,MAAA,CAAO;AAAA,KACjB,CAAA;AAAA,EACH;AAAA,EAEA,MAAM,YAAA,CACJ,QAAA,EACA,OAAA,EACsB;AACtB,IAAA,MAAM,KAAA,GAAQ,OAAA,EAAS,KAAA,IAAS,IAAA,CAAK,MAAA,CAAO,KAAA;AAC5C,IAAA,MAAM,WAAA,GAAc,OAAA,EAAS,WAAA,IAAe,IAAA,CAAK,MAAA,CAAO,WAAA;AACxD,IAAA,MAAM,SAAA,GAAY,OAAA,EAAS,SAAA,IAAa,IAAA,CAAK,MAAA,CAAO,SAAA;AAGpD,IAAA,MAAM,WAAA,GAAc,OAAA,EAAS,MAAA,GACzB,CAAC,EAAE,IAAA,EAAM,QAAA,EAAmB,OAAA,EAAS,OAAA,CAAQ,MAAA,EAAO,EAAG,GAAG,QAAQ,CAAA,GAClE,QAAA;AAEJ,IAAA,MAAM,WAAW,MAAM,IAAA,CAAK,MAAA,CAAO,IAAA,CAAK,YAAY,MAAA,CAAO;AAAA,MACzD,KAAA;AAAA,MACA,QAAA,EAAU,WAAA,CAAY,GAAA,CAAI,CAAC,CAAA,MAAO;AAAA,QAChC,MAAM,CAAA,CAAE,IAAA;AAAA,QACR,SAAS,CAAA,CAAE;AAAA,OACb,CAAE,CAAA;AAAA,MACF,WAAA;AAAA,MACA,UAAA,EAAY;AAAA,KACb,CAAA;AAED,IAAA,MAAM,MAAA,GAAS,QAAA,CAAS,OAAA,CAAQ,CAAC,CAAA;AAEjC,IAAA,OAAO;AAAA,MACL,OAAA,EAAS,MAAA,CAAO,OAAA,CAAQ,OAAA,IAAW,EAAA;AAAA,MACnC,OAAO,QAAA,CAAS,KAAA;AAAA,MAChB,KAAA,EAAO,SAAS,KAAA,GACZ;AAAA,QACE,YAAA,EAAc,SAAS,KAAA,CAAM,aAAA;AAAA,QAC7B,gBAAA,EAAkB,SAAS,KAAA,CAAM,iBAAA;AAAA,QACjC,WAAA,EAAa,SAAS,KAAA,CAAM;AAAA,OAC9B,GACA,MAAA;AAAA,MACJ,YAAA,EAAc,OAAO,aAAA,IAAiB;AAAA,KACxC;AAAA,EACF;AACF;ACvDO,IAAM,iBAAA,GAAN,cAAgC,eAAA,CAAgB;AAAA,EAIrD,YAAY,MAAA,EAAmB;AAC7B,IAAA,KAAA,CAAM;AAAA,MACJ,KAAA,EAAO,OAAO,KAAA,IAAS,yBAAA;AAAA,MACvB,GAAG;AAAA,KACJ,CAAA;AAPH,IAAA,aAAA,CAAA,IAAA,EAAA,UAAA,EAAwB,WAAA,CAAA;AACxB,IAAA,aAAA,CAAA,IAAA,EAAQ,QAAA,CAAA;AAQN,IAAA,IAAI,CAAC,OAAO,MAAA,EAAQ;AAClB,MAAA,MAAM,IAAI,MAAM,+BAA+B,CAAA;AAAA,IACjD;AAGA,IAAA,IAAA,CAAK,MAAA,GAAS,IAAIA,MAAAA,CAAO;AAAA,MACvB,QAAQ,MAAA,CAAO,MAAA;AAAA,MACf,OAAA,EAAS,OAAO,OAAA,IAAW,8BAAA;AAAA,MAC3B,cAAA,EAAgB;AAAA,QACd,mBAAA,EAAqB,YAAA;AAAA,QACrB,aAAa,MAAA,CAAO;AAAA;AACtB,KACD,CAAA;AAAA,EACH;AAAA,EAEA,MAAM,YAAA,CACJ,QAAA,EACA,OAAA,EACsB;AACtB,IAAA,MAAM,KAAA,GAAQ,OAAA,EAAS,KAAA,IAAS,IAAA,CAAK,MAAA,CAAO,KAAA;AAC5C,IAAA,MAAM,WAAA,GAAc,OAAA,EAAS,WAAA,IAAe,IAAA,CAAK,MAAA,CAAO,WAAA;AACxD,IAAA,MAAM,SAAA,GAAY,OAAA,EAAS,SAAA,IAAa,IAAA,CAAK,MAAA,CAAO,SAAA;AAGpD,IAAA,MAAM,aAAA,GAAgB,OAAA,EAAS,MAAA,GAC3B,OAAA,CAAQ,MAAA,GACR,QAAA,CAAS,IAAA,CAAK,CAAC,CAAA,KAAM,CAAA,CAAE,IAAA,KAAS,QAAQ,CAAA,EAAG,OAAA;AAE/C,IAAA,MAAM,eAAe,QAAA,CAAS,MAAA,CAAO,CAAC,CAAA,KAAM,CAAA,CAAE,SAAS,QAAQ,CAAA;AAG/D,IAAA,MAAM,QAAA,GAAW,MAAM,KAAA,CAAM,uCAAA,EAAyC;AAAA,MACpE,MAAA,EAAQ,MAAA;AAAA,MACR,OAAA,EAAS;AAAA,QACP,cAAA,EAAgB,kBAAA;AAAA,QAChB,WAAA,EAAa,KAAK,MAAA,CAAO,MAAA;AAAA,QACzB,mBAAA,EAAqB;AAAA,OACvB;AAAA,MACA,IAAA,EAAM,KAAK,SAAA,CAAU;AAAA,QACnB,KAAA;AAAA,QACA,UAAA,EAAY,SAAA;AAAA,QACZ,WAAA;AAAA,QACA,MAAA,EAAQ,aAAA;AAAA,QACR,QAAA,EAAU,YAAA,CAAa,GAAA,CAAI,CAAC,CAAA,MAAO;AAAA,UACjC,MAAM,CAAA,CAAE,IAAA;AAAA,UACR,SAAS,CAAA,CAAE;AAAA,SACb,CAAE;AAAA,OACH;AAAA,KACF,CAAA;AAED,IAAA,IAAI,CAAC,SAAS,EAAA,EAAI;AAChB,MAAA,MAAM,KAAA,GAAQ,MAAM,QAAA,CAAS,IAAA,EAAK;AAClC,MAAA,MAAM,IAAI,KAAA,CAAM,CAAA,qBAAA,EAAwB,SAAS,MAAM,CAAA,CAAA,EAAI,KAAK,CAAA,CAAE,CAAA;AAAA,IACpE;AAEA,IAAA,MAAM,IAAA,GAAO,MAAM,QAAA,CAAS,IAAA,EAAK;AAEjC,IAAA,OAAO;AAAA,MACL,OAAA,EAAS,IAAA,CAAK,OAAA,GAAU,CAAC,GAAG,IAAA,IAAQ,EAAA;AAAA,MACpC,OAAO,IAAA,CAAK,KAAA;AAAA,MACZ,KAAA,EAAO,KAAK,KAAA,GACR;AAAA,QACE,YAAA,EAAc,KAAK,KAAA,CAAM,YAAA;AAAA,QACzB,gBAAA,EAAkB,KAAK,KAAA,CAAM,aAAA;AAAA,QAC7B,WAAA,EAAa,IAAA,CAAK,KAAA,CAAM,YAAA,GAAe,KAAK,KAAA,CAAM;AAAA,OACpD,GACA,MAAA;AAAA,MACJ,YAAA,EAAc,KAAK,WAAA,IAAe;AAAA,KACpC;AAAA,EACF;AACF;AC/EO,IAAM,kBAAA,GAAqB;AA0E3B,SAAS,eAAA,CAAgB,MAAiB,OAAA,EAAgC;AAC/E,EAAA,MAAM,KAAA,GAAkB,CAAC,IAAI,CAAA;AAE7B,EAAA,IAAI,OAAA,EAAS;AAEX,IAAA,IAAI,OAAA,CAAQ,MAAA,EAAQ,KAAA,CAAM,IAAA,CAAK,QAAQ,CAAA;AACvC,IAAA,IAAI,OAAA,CAAQ,KAAA,EAAO,KAAA,CAAM,IAAA,CAAK,OAAO,CAAA;AACrC,IAAA,IAAI,OAAA,CAAQ,MAAA,EAAQ,KAAA,CAAM,IAAA,CAAK,QAAQ,CAAA;AACvC,IAAA,IAAI,OAAA,CAAQ,IAAA,EAAM,KAAA,CAAM,IAAA,CAAK,MAAM,CAAA;AACnC,IAAA,IAAI,OAAA,CAAQ,SAAA,EAAW,KAAA,CAAM,IAAA,CAAK,WAAW,CAAA;AAC7C,IAAA,IAAI,OAAA,CAAQ,IAAA,EAAM,KAAA,CAAM,IAAA,CAAK,MAAM,CAAA;AACnC,IAAA,IAAI,OAAA,CAAQ,KAAA,EAAO,KAAA,CAAM,IAAA,CAAK,OAAO,CAAA;AAErC,IAAA,IAAI,OAAA,CAAQ,IAAA,EAAM,KAAA,CAAM,IAAA,CAAK,MAAM,CAAA;AACnC,IAAA,IAAI,OAAA,CAAQ,SAAA,EAAW,KAAA,CAAM,IAAA,CAAK,WAAW,CAAA;AAC7C,IAAA,IAAI,OAAA,CAAQ,QAAA,EAAU,KAAA,CAAM,IAAA,CAAK,UAAU,CAAA;AAC3C,IAAA,IAAI,OAAA,CAAQ,IAAA,EAAM,KAAA,CAAM,IAAA,CAAK,MAAM,CAAA;AACnC,IAAA,IAAI,OAAA,CAAQ,QAAA,EAAU,KAAA,CAAM,IAAA,CAAK,UAAU,CAAA;AAAA,EAC7C;AAEA,EAAA,OAAO,GAAA,GAAM,KAAA,CAAM,IAAA,CAAK,GAAG,CAAA;AAC7B;AAeO,IAAM,KAAA,GAAQ;AAAA;AAAA,EAEnB,KAAA,EAAO,CAAC,OAAA,KAAmC,eAAA,CAAgB,SAAS,OAAO,CAAA;AAAA;AAAA,EAG3E,MAAA,EAAQ,CAAC,OAAA,KAAmC,eAAA,CAAgB,UAAU,OAAO,CAAA;AAAA;AAAA,EAG7E,QAAA,EAAU,CAAC,OAAA,KAAmC,eAAA,CAAgB,YAAY,OAAO,CAAA;AAAA;AAAA,EAGjF,QAAA,EAAU,CAAC,OAAA,KAAmC,eAAA,CAAgB,YAAY,OAAO,CAAA;AAAA;AAAA,EAGjF,KAAA,EAAO,CAAC,OAAA,KAAmC,eAAA,CAAgB,SAAS,OAAO,CAAA;AAAA;AAAA,EAG3E,IAAA,EAAM,CAAC,OAAA,KAAmC,eAAA,CAAgB,QAAQ,OAAO,CAAA;AAAA;AAAA,EAGzE,OAAA,EAAS,CAAC,OAAA,KAAmC,eAAA,CAAgB,WAAW,OAAO,CAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO/E,KAAA,EAAO,CAAC,IAAA,EAAA,GAAiB,SAAA,KACvB,GAAA,GAAM,CAAC,IAAA,EAAM,GAAG,SAAS,CAAA,CAAE,IAAA,CAAK,GAAG;AACvC;AAKO,IAAM,YAAA,GAAe;AAAA;AAAA,EAE1B,aAAa,KAAA,CAAM,KAAA,CAAM,EAAE,IAAA,EAAM,MAAM,CAAA;AAAA;AAAA,EAGvC,MAAM,KAAA,CAAM,QAAA,CAAS,EAAE,IAAA,EAAM,MAAM,CAAA;AAAA;AAAA,EAGnC,aAAA,EAAe,MAAM,QAAA,CAAS,EAAE,MAAM,IAAA,EAAM,KAAA,EAAO,MAAM,CAAA;AAAA;AAAA,EAGzD,QAAQ,KAAA,CAAM,QAAA,CAAS,EAAE,MAAA,EAAQ,MAAM,CAAA;AAAA;AAAA,EAGvC,WAAW,KAAA,CAAM,KAAA,CAAM,EAAE,SAAA,EAAW,MAAM,CAAA;AAAA;AAAA,EAG1C,UAAU,KAAA,CAAM,QAAA,CAAS,EAAE,QAAA,EAAU,MAAM,CAAA;AAAA;AAAA,EAG3C,UAAU,KAAA,CAAM,IAAA,CAAK,EAAE,IAAA,EAAM,MAAM,CAAA;AAAA;AAAA,EAGnC,UAAU,KAAA,CAAM,QAAA,CAAS,EAAE,QAAA,EAAU,MAAM,CAAA;AAAA;AAAA,EAG3C,MAAA,EAAQ,MAAM,KAAA,CAAM,EAAE,QAAQ,IAAA,EAAM,KAAA,EAAO,MAAM;AACnD;AAyBO,IAAM,iBAAA,GAAN,cAAgC,eAAA,CAAgB;AAAA,EAIrD,YAAY,MAAA,EAAyB;AAEnC,IAAA,MAAM,KAAA,GACJ,MAAA,CAAO,KAAA,KACN,MAAA,CAAO,IAAA,GACJ,gBAAgB,MAAA,CAAO,IAAA,EAAM,MAAA,CAAO,YAAY,CAAA,GAChD,WAAA,CAAA;AAEN,IAAA,KAAA,CAAM;AAAA,MACJ,KAAA;AAAA,MACA,GAAG;AAAA,KACJ,CAAA;AAdH,IAAA,aAAA,CAAA,IAAA,EAAA,UAAA,EAAwB,WAAA,CAAA;AACxB,IAAA,aAAA,CAAA,IAAA,EAAQ,QAAA,CAAA;AAeN,IAAA,IAAI,CAAC,OAAO,MAAA,EAAQ;AAClB,MAAA,MAAM,IAAI,MAAM,mDAAmD,CAAA;AAAA,IACrE;AAEA,IAAA,IAAA,CAAK,MAAA,GAAS,IAAIA,MAAAA,CAAO;AAAA,MACvB,QAAQ,MAAA,CAAO,MAAA;AAAA,MACf,OAAA,EAAS,OAAO,OAAA,IAAW;AAAA,KAC5B,CAAA;AAAA,EACH;AAAA,EAEA,MAAM,YAAA,CACJ,QAAA,EACA,OAAA,EACsB;AACtB,IAAA,MAAM,KAAA,GAAQ,OAAA,EAAS,KAAA,IAAS,IAAA,CAAK,MAAA,CAAO,KAAA;AAC5C,IAAA,MAAM,WAAA,GAAc,OAAA,EAAS,WAAA,IAAe,IAAA,CAAK,MAAA,CAAO,WAAA;AACxD,IAAA,MAAM,SAAA,GAAY,OAAA,EAAS,SAAA,IAAa,IAAA,CAAK,MAAA,CAAO,SAAA;AAEpD,IAAA,MAAM,WAAA,GAAc,OAAA,EAAS,MAAA,GACzB,CAAC,EAAE,IAAA,EAAM,QAAA,EAAmB,OAAA,EAAS,OAAA,CAAQ,MAAA,EAAO,EAAG,GAAG,QAAQ,CAAA,GAClE,QAAA;AAEJ,IAAA,MAAM,WAAW,MAAM,IAAA,CAAK,MAAA,CAAO,IAAA,CAAK,YAAY,MAAA,CAAO;AAAA,MACzD,KAAA;AAAA,MACA,QAAA,EAAU,WAAA,CAAY,GAAA,CAAI,CAAC,CAAA,MAAO;AAAA,QAChC,MAAM,CAAA,CAAE,IAAA;AAAA,QACR,SAAS,CAAA,CAAE;AAAA,OACb,CAAE,CAAA;AAAA,MACF,WAAA;AAAA,MACA,UAAA,EAAY;AAAA,KACb,CAAA;AAED,IAAA,MAAM,MAAA,GAAS,QAAA,CAAS,OAAA,CAAQ,CAAC,CAAA;AAEjC,IAAA,OAAO;AAAA,MACL,OAAA,EAAS,MAAA,CAAO,OAAA,CAAQ,OAAA,IAAW,EAAA;AAAA,MACnC,OAAO,QAAA,CAAS,KAAA;AAAA,MAChB,KAAA,EAAO,SAAS,KAAA,GACZ;AAAA,QACE,YAAA,EAAc,SAAS,KAAA,CAAM,aAAA;AAAA,QAC7B,gBAAA,EAAkB,SAAS,KAAA,CAAM,iBAAA;AAAA,QACjC,WAAA,EAAa,SAAS,KAAA,CAAM;AAAA,OAC9B,GACA,MAAA;AAAA,MACJ,YAAA,EAAc,OAAO,aAAA,IAAiB;AAAA,KACxC;AAAA,EACF;AACF","file":"index.mjs","sourcesContent":["/**\n * JSON parsing utilities\n */\n\n/**\n * Extract JSON from LLM response (handles markdown code blocks)\n */\nexport function extractJson<T = unknown>(text: string): T {\n let jsonStr = text.trim();\n\n // Remove markdown code blocks\n if (jsonStr.startsWith('```json')) {\n jsonStr = jsonStr.slice(7);\n } else if (jsonStr.startsWith('```')) {\n jsonStr = jsonStr.slice(3);\n }\n\n if (jsonStr.endsWith('```')) {\n jsonStr = jsonStr.slice(0, -3);\n }\n\n jsonStr = jsonStr.trim();\n\n // Try to find JSON object or array\n const jsonStart = jsonStr.search(/[\\[{]/);\n const jsonEndBracket = jsonStr.lastIndexOf(']');\n const jsonEndBrace = jsonStr.lastIndexOf('}');\n const jsonEnd = Math.max(jsonEndBracket, jsonEndBrace);\n\n if (jsonStart !== -1 && jsonEnd !== -1) {\n jsonStr = jsonStr.slice(jsonStart, jsonEnd + 1);\n }\n\n try {\n return JSON.parse(jsonStr) as T;\n } catch (error) {\n throw new Error(\n `Failed to parse JSON from LLM response: ${error instanceof Error ? error.message : 'Unknown error'}\\n\\nResponse:\\n${text}`\n );\n }\n}\n\n// Re-export from translator validator for backwards compatibility\nexport { validateJsonKeys } from '../translator/validator';\n","/**\n * Base LLM provider\n */\n\nimport type {\n LLMClient,\n LLMConfig,\n LLMMessage,\n LLMProvider,\n LLMRequestOptions,\n LLMResponse,\n} from '../types';\nimport { extractJson } from '../utils/json';\n\nexport abstract class BaseLLMProvider implements LLMClient {\n abstract provider: LLMProvider;\n\n protected config: Required<\n Pick<LLMConfig, 'model' | 'temperature' | 'maxTokens'>\n > & LLMConfig;\n\n constructor(config: LLMConfig) {\n this.config = {\n model: config.model ?? 'gpt-4o-mini',\n temperature: config.temperature ?? 0.1,\n maxTokens: config.maxTokens ?? 4096,\n ...config,\n };\n }\n\n /**\n * Send chat messages (implemented by provider)\n */\n abstract chatMessages(\n messages: LLMMessage[],\n options?: LLMRequestOptions\n ): Promise<LLMResponse>;\n\n /**\n * Send single chat message\n */\n async chat(prompt: string, options?: LLMRequestOptions): Promise<LLMResponse> {\n const messages: LLMMessage[] = [];\n\n if (options?.system) {\n messages.push({ role: 'system', content: options.system });\n }\n\n messages.push({ role: 'user', content: prompt });\n\n return this.chatMessages(messages, options);\n }\n\n /**\n * Get JSON response\n */\n async json<T = unknown>(\n prompt: string,\n options?: LLMRequestOptions\n ): Promise<T> {\n const systemPrompt = `${options?.system ?? ''}\\n\\nRespond with valid JSON only. No markdown, no explanations.`.trim();\n\n const response = await this.chat(prompt, {\n ...options,\n system: systemPrompt,\n temperature: options?.temperature ?? 0,\n });\n\n return extractJson<T>(response.content);\n }\n\n /**\n * Get JSON response with schema hint\n */\n async jsonSchema<T = unknown>(\n prompt: string,\n schema: string,\n options?: LLMRequestOptions\n ): Promise<T> {\n const systemPrompt = `${options?.system ?? ''}\\n\\nRespond with valid JSON matching this schema:\\n${schema}\\n\\nNo markdown, no explanations.`.trim();\n\n const response = await this.chat(prompt, {\n ...options,\n system: systemPrompt,\n temperature: options?.temperature ?? 0,\n });\n\n return extractJson<T>(response.content);\n }\n}\n","/**\n * OpenAI provider\n */\n\nimport OpenAI from 'openai';\nimport type {\n LLMConfig,\n LLMMessage,\n LLMProvider,\n LLMRequestOptions,\n LLMResponse,\n} from '../types';\nimport { BaseLLMProvider } from './base';\n\nexport class OpenAIProvider extends BaseLLMProvider {\n provider: LLMProvider = 'openai';\n private client: OpenAI;\n\n constructor(config: LLMConfig) {\n super({\n model: config.model ?? 'gpt-4o-mini',\n ...config,\n });\n\n if (!config.apiKey) {\n throw new Error('OpenAI API key is required');\n }\n\n this.client = new OpenAI({\n apiKey: config.apiKey,\n baseURL: config.baseUrl,\n });\n }\n\n async chatMessages(\n messages: LLMMessage[],\n options?: LLMRequestOptions\n ): Promise<LLMResponse> {\n const model = options?.model ?? this.config.model;\n const temperature = options?.temperature ?? this.config.temperature;\n const maxTokens = options?.maxTokens ?? this.config.maxTokens;\n\n // Add system message if provided\n const allMessages = options?.system\n ? [{ role: 'system' as const, content: options.system }, ...messages]\n : messages;\n\n const response = await this.client.chat.completions.create({\n model,\n messages: allMessages.map((m) => ({\n role: m.role,\n content: m.content,\n })),\n temperature,\n max_tokens: maxTokens,\n });\n\n const choice = response.choices[0];\n\n return {\n content: choice.message.content ?? '',\n model: response.model,\n usage: response.usage\n ? {\n promptTokens: response.usage.prompt_tokens,\n completionTokens: response.usage.completion_tokens,\n totalTokens: response.usage.total_tokens,\n }\n : undefined,\n finishReason: choice.finish_reason ?? undefined,\n };\n }\n}\n","/**\n * Anthropic provider\n *\n * Uses OpenAI-compatible API endpoint for simplicity.\n * Anthropic provides OpenAI-compatible endpoint at api.anthropic.com\n */\n\nimport OpenAI from 'openai';\nimport type {\n LLMConfig,\n LLMMessage,\n LLMProvider,\n LLMRequestOptions,\n LLMResponse,\n} from '../types';\nimport { BaseLLMProvider } from './base';\n\nexport class AnthropicProvider extends BaseLLMProvider {\n provider: LLMProvider = 'anthropic';\n private client: OpenAI;\n\n constructor(config: LLMConfig) {\n super({\n model: config.model ?? 'claude-3-5-haiku-latest',\n ...config,\n });\n\n if (!config.apiKey) {\n throw new Error('Anthropic API key is required');\n }\n\n // Use native Anthropic SDK approach via fetch\n this.client = new OpenAI({\n apiKey: config.apiKey,\n baseURL: config.baseUrl ?? 'https://api.anthropic.com/v1',\n defaultHeaders: {\n 'anthropic-version': '2023-06-01',\n 'x-api-key': config.apiKey,\n },\n });\n }\n\n async chatMessages(\n messages: LLMMessage[],\n options?: LLMRequestOptions\n ): Promise<LLMResponse> {\n const model = options?.model ?? this.config.model;\n const temperature = options?.temperature ?? this.config.temperature;\n const maxTokens = options?.maxTokens ?? this.config.maxTokens;\n\n // Anthropic uses system separately\n const systemMessage = options?.system\n ? options.system\n : messages.find((m) => m.role === 'system')?.content;\n\n const userMessages = messages.filter((m) => m.role !== 'system');\n\n // Direct API call for Anthropic\n const response = await fetch('https://api.anthropic.com/v1/messages', {\n method: 'POST',\n headers: {\n 'Content-Type': 'application/json',\n 'x-api-key': this.config.apiKey!,\n 'anthropic-version': '2023-06-01',\n },\n body: JSON.stringify({\n model,\n max_tokens: maxTokens,\n temperature,\n system: systemMessage,\n messages: userMessages.map((m) => ({\n role: m.role,\n content: m.content,\n })),\n }),\n });\n\n if (!response.ok) {\n const error = await response.text();\n throw new Error(`Anthropic API error: ${response.status} ${error}`);\n }\n\n const data = await response.json();\n\n return {\n content: data.content?.[0]?.text ?? '',\n model: data.model,\n usage: data.usage\n ? {\n promptTokens: data.usage.input_tokens,\n completionTokens: data.usage.output_tokens,\n totalTokens: data.usage.input_tokens + data.usage.output_tokens,\n }\n : undefined,\n finishReason: data.stop_reason ?? undefined,\n };\n }\n}\n","/**\n * SDKRouter provider\n *\n * Uses https://llm.sdkrouter.com - OpenAI-compatible LLM router\n * with smart model aliases like @smart, @cheap, @balanced\n */\n\nimport OpenAI from 'openai';\nimport type {\n LLMConfig,\n LLMMessage,\n LLMProvider,\n LLMRequestOptions,\n LLMResponse,\n} from '../types';\nimport { BaseLLMProvider } from './base';\n\n/** SDKRouter base URL */\nexport const SDKROUTER_BASE_URL = 'https://llm.sdkrouter.com/v1';\n\n/**\n * Model tier presets\n *\n * @example '@cheap', '@smart', '@balanced'\n */\nexport type ModelTier =\n | 'cheap' // Cheapest available\n | 'budget' // Budget-friendly\n | 'standard' // Standard tier\n | 'balanced' // Best quality/price ratio\n | 'smart' // Highest quality\n | 'fast' // Lowest latency\n | 'premium'; // Top-tier\n\n/**\n * Model capabilities (features)\n *\n * @example '@cheap+vision', '@smart+tools+json'\n */\nexport type ModelCapability =\n | 'vision' // Image understanding\n | 'tools' // Function/tool calling\n | 'agents' // Agent tool calling (verified)\n | 'json' // JSON mode\n | 'streaming' // Streaming support\n | 'long' // Long context (128k+)\n | 'image'; // Image generation\n\n/**\n * Model categories (use cases)\n *\n * @example '@balanced+code', '@smart+reasoning'\n */\nexport type ModelCategory =\n | 'code' // Code generation\n | 'vision' // Vision & images\n | 'reasoning' // Reasoning & math\n | 'agents' // Tool use & agents\n | 'creative' // Creative writing\n | 'chat' // Conversational\n | 'analysis'; // Analysis & extraction\n\n/**\n * Model alias builder options\n */\nexport interface ModelOptions {\n // Capabilities\n vision?: boolean;\n tools?: boolean;\n agents?: boolean;\n json?: boolean;\n streaming?: boolean;\n long?: boolean;\n image?: boolean;\n // Categories\n code?: boolean;\n reasoning?: boolean;\n creative?: boolean;\n chat?: boolean;\n analysis?: boolean;\n}\n\n/**\n * Build model alias string\n *\n * @example\n * ```ts\n * buildModelAlias('smart') // '@smart'\n * buildModelAlias('cheap', { vision: true }) // '@cheap+vision'\n * buildModelAlias('balanced', { code: true, tools: true }) // '@balanced+code+tools'\n * ```\n */\nexport function buildModelAlias(tier: ModelTier, options?: ModelOptions): string {\n const parts: string[] = [tier];\n\n if (options) {\n // Capabilities (order matters for consistency)\n if (options.vision) parts.push('vision');\n if (options.tools) parts.push('tools');\n if (options.agents) parts.push('agents');\n if (options.json) parts.push('json');\n if (options.streaming) parts.push('streaming');\n if (options.long) parts.push('long');\n if (options.image) parts.push('image');\n // Categories\n if (options.code) parts.push('code');\n if (options.reasoning) parts.push('reasoning');\n if (options.creative) parts.push('creative');\n if (options.chat) parts.push('chat');\n if (options.analysis) parts.push('analysis');\n }\n\n return '@' + parts.join('+');\n}\n\n/**\n * Model alias builder with IDE autocomplete\n *\n * @example\n * ```ts\n * import { Model } from '@djangocfg/llm'\n *\n * Model.smart() // '@smart'\n * Model.cheap({ vision: true }) // '@cheap+vision'\n * Model.balanced({ code: true }) // '@balanced+code'\n * Model.fast({ tools: true, json: true }) // '@fast+tools+json'\n * ```\n */\nexport const Model = {\n /** Cheapest available model */\n cheap: (options?: ModelOptions): string => buildModelAlias('cheap', options),\n\n /** Budget-friendly with decent quality */\n budget: (options?: ModelOptions): string => buildModelAlias('budget', options),\n\n /** Standard tier */\n standard: (options?: ModelOptions): string => buildModelAlias('standard', options),\n\n /** Best quality/price ratio (recommended) */\n balanced: (options?: ModelOptions): string => buildModelAlias('balanced', options),\n\n /** Highest quality model */\n smart: (options?: ModelOptions): string => buildModelAlias('smart', options),\n\n /** Lowest latency model */\n fast: (options?: ModelOptions): string => buildModelAlias('fast', options),\n\n /** Top-tier premium model */\n premium: (options?: ModelOptions): string => buildModelAlias('premium', options),\n\n /**\n * Build alias from raw strings (escape hatch)\n *\n * @example Model.alias('cheap', 'vision', 'code') // '@cheap+vision+code'\n */\n alias: (tier: string, ...modifiers: string[]): string =>\n '@' + [tier, ...modifiers].join('+'),\n} as const;\n\n/**\n * Pre-built model aliases for common use cases\n */\nexport const ModelPresets = {\n /** Translation: cheap + json mode */\n translation: Model.cheap({ json: true }),\n\n /** Code generation: balanced + code */\n code: Model.balanced({ code: true }),\n\n /** Code with tools: balanced + code + tools */\n codeWithTools: Model.balanced({ code: true, tools: true }),\n\n /** Vision: balanced + vision */\n vision: Model.balanced({ vision: true }),\n\n /** Reasoning: smart + reasoning */\n reasoning: Model.smart({ reasoning: true }),\n\n /** Creative writing: balanced + creative */\n creative: Model.balanced({ creative: true }),\n\n /** Fast chat: fast + chat */\n fastChat: Model.fast({ chat: true }),\n\n /** Analysis: balanced + analysis */\n analysis: Model.balanced({ analysis: true }),\n\n /** Agents: smart + agents + tools */\n agents: Model.smart({ agents: true, tools: true }),\n} as const;\n\n/**\n * SDKRouter provider config\n */\nexport interface SDKRouterConfig extends LLMConfig {\n /** Model tier (shortcut for building alias) */\n tier?: ModelTier;\n /** Model options for alias building */\n modelOptions?: ModelOptions;\n}\n\n/**\n * SDKRouter LLM provider\n *\n * Uses OpenAI-compatible API at https://llm.sdkrouter.com\n *\n * @example\n * ```ts\n * const llm = new SDKRouterProvider({\n * apiKey: process.env.SDKROUTER_API_KEY,\n * model: Model.balanced({ code: true })\n * })\n * ```\n */\nexport class SDKRouterProvider extends BaseLLMProvider {\n provider: LLMProvider = 'sdkrouter';\n private client: OpenAI;\n\n constructor(config: SDKRouterConfig) {\n // Build model alias if tier provided\n const model =\n config.model ??\n (config.tier\n ? buildModelAlias(config.tier, config.modelOptions)\n : '@balanced');\n\n super({\n model,\n ...config,\n });\n\n if (!config.apiKey) {\n throw new Error('SDKRouter API key is required (SDKROUTER_API_KEY)');\n }\n\n this.client = new OpenAI({\n apiKey: config.apiKey,\n baseURL: config.baseUrl ?? SDKROUTER_BASE_URL,\n });\n }\n\n async chatMessages(\n messages: LLMMessage[],\n options?: LLMRequestOptions\n ): Promise<LLMResponse> {\n const model = options?.model ?? this.config.model;\n const temperature = options?.temperature ?? this.config.temperature;\n const maxTokens = options?.maxTokens ?? this.config.maxTokens;\n\n const allMessages = options?.system\n ? [{ role: 'system' as const, content: options.system }, ...messages]\n : messages;\n\n const response = await this.client.chat.completions.create({\n model,\n messages: allMessages.map((m) => ({\n role: m.role,\n content: m.content,\n })),\n temperature,\n max_tokens: maxTokens,\n });\n\n const choice = response.choices[0];\n\n return {\n content: choice.message.content ?? '',\n model: response.model,\n usage: response.usage\n ? {\n promptTokens: response.usage.prompt_tokens,\n completionTokens: response.usage.completion_tokens,\n totalTokens: response.usage.total_tokens,\n }\n : undefined,\n finishReason: choice.finish_reason ?? undefined,\n };\n }\n}\n"]}
|
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
import { a as LLMClient, b as LLMProvider, L as LLMConfig, f as LLMMessage, c as LLMRequestOptions, d as LLMResponse } from './types-D6lazgm1.js';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Base LLM provider
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
declare abstract class BaseLLMProvider implements LLMClient {
|
|
8
|
+
abstract provider: LLMProvider;
|
|
9
|
+
protected config: Required<Pick<LLMConfig, 'model' | 'temperature' | 'maxTokens'>> & LLMConfig;
|
|
10
|
+
constructor(config: LLMConfig);
|
|
11
|
+
/**
|
|
12
|
+
* Send chat messages (implemented by provider)
|
|
13
|
+
*/
|
|
14
|
+
abstract chatMessages(messages: LLMMessage[], options?: LLMRequestOptions): Promise<LLMResponse>;
|
|
15
|
+
/**
|
|
16
|
+
* Send single chat message
|
|
17
|
+
*/
|
|
18
|
+
chat(prompt: string, options?: LLMRequestOptions): Promise<LLMResponse>;
|
|
19
|
+
/**
|
|
20
|
+
* Get JSON response
|
|
21
|
+
*/
|
|
22
|
+
json<T = unknown>(prompt: string, options?: LLMRequestOptions): Promise<T>;
|
|
23
|
+
/**
|
|
24
|
+
* Get JSON response with schema hint
|
|
25
|
+
*/
|
|
26
|
+
jsonSchema<T = unknown>(prompt: string, schema: string, options?: LLMRequestOptions): Promise<T>;
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* SDKRouter provider
|
|
31
|
+
*
|
|
32
|
+
* Uses https://llm.sdkrouter.com - OpenAI-compatible LLM router
|
|
33
|
+
* with smart model aliases like @smart, @cheap, @balanced
|
|
34
|
+
*/
|
|
35
|
+
|
|
36
|
+
/** SDKRouter base URL */
|
|
37
|
+
declare const SDKROUTER_BASE_URL = "https://llm.sdkrouter.com/v1";
|
|
38
|
+
/**
|
|
39
|
+
* Model tier presets
|
|
40
|
+
*
|
|
41
|
+
* @example '@cheap', '@smart', '@balanced'
|
|
42
|
+
*/
|
|
43
|
+
type ModelTier = 'cheap' | 'budget' | 'standard' | 'balanced' | 'smart' | 'fast' | 'premium';
|
|
44
|
+
/**
|
|
45
|
+
* Model capabilities (features)
|
|
46
|
+
*
|
|
47
|
+
* @example '@cheap+vision', '@smart+tools+json'
|
|
48
|
+
*/
|
|
49
|
+
type ModelCapability = 'vision' | 'tools' | 'agents' | 'json' | 'streaming' | 'long' | 'image';
|
|
50
|
+
/**
|
|
51
|
+
* Model categories (use cases)
|
|
52
|
+
*
|
|
53
|
+
* @example '@balanced+code', '@smart+reasoning'
|
|
54
|
+
*/
|
|
55
|
+
type ModelCategory = 'code' | 'vision' | 'reasoning' | 'agents' | 'creative' | 'chat' | 'analysis';
|
|
56
|
+
/**
|
|
57
|
+
* Model alias builder options
|
|
58
|
+
*/
|
|
59
|
+
interface ModelOptions {
|
|
60
|
+
vision?: boolean;
|
|
61
|
+
tools?: boolean;
|
|
62
|
+
agents?: boolean;
|
|
63
|
+
json?: boolean;
|
|
64
|
+
streaming?: boolean;
|
|
65
|
+
long?: boolean;
|
|
66
|
+
image?: boolean;
|
|
67
|
+
code?: boolean;
|
|
68
|
+
reasoning?: boolean;
|
|
69
|
+
creative?: boolean;
|
|
70
|
+
chat?: boolean;
|
|
71
|
+
analysis?: boolean;
|
|
72
|
+
}
|
|
73
|
+
/**
|
|
74
|
+
* Build model alias string
|
|
75
|
+
*
|
|
76
|
+
* @example
|
|
77
|
+
* ```ts
|
|
78
|
+
* buildModelAlias('smart') // '@smart'
|
|
79
|
+
* buildModelAlias('cheap', { vision: true }) // '@cheap+vision'
|
|
80
|
+
* buildModelAlias('balanced', { code: true, tools: true }) // '@balanced+code+tools'
|
|
81
|
+
* ```
|
|
82
|
+
*/
|
|
83
|
+
declare function buildModelAlias(tier: ModelTier, options?: ModelOptions): string;
|
|
84
|
+
/**
|
|
85
|
+
* Model alias builder with IDE autocomplete
|
|
86
|
+
*
|
|
87
|
+
* @example
|
|
88
|
+
* ```ts
|
|
89
|
+
* import { Model } from '@djangocfg/llm'
|
|
90
|
+
*
|
|
91
|
+
* Model.smart() // '@smart'
|
|
92
|
+
* Model.cheap({ vision: true }) // '@cheap+vision'
|
|
93
|
+
* Model.balanced({ code: true }) // '@balanced+code'
|
|
94
|
+
* Model.fast({ tools: true, json: true }) // '@fast+tools+json'
|
|
95
|
+
* ```
|
|
96
|
+
*/
|
|
97
|
+
declare const Model: {
|
|
98
|
+
/** Cheapest available model */
|
|
99
|
+
readonly cheap: (options?: ModelOptions) => string;
|
|
100
|
+
/** Budget-friendly with decent quality */
|
|
101
|
+
readonly budget: (options?: ModelOptions) => string;
|
|
102
|
+
/** Standard tier */
|
|
103
|
+
readonly standard: (options?: ModelOptions) => string;
|
|
104
|
+
/** Best quality/price ratio (recommended) */
|
|
105
|
+
readonly balanced: (options?: ModelOptions) => string;
|
|
106
|
+
/** Highest quality model */
|
|
107
|
+
readonly smart: (options?: ModelOptions) => string;
|
|
108
|
+
/** Lowest latency model */
|
|
109
|
+
readonly fast: (options?: ModelOptions) => string;
|
|
110
|
+
/** Top-tier premium model */
|
|
111
|
+
readonly premium: (options?: ModelOptions) => string;
|
|
112
|
+
/**
|
|
113
|
+
* Build alias from raw strings (escape hatch)
|
|
114
|
+
*
|
|
115
|
+
* @example Model.alias('cheap', 'vision', 'code') // '@cheap+vision+code'
|
|
116
|
+
*/
|
|
117
|
+
readonly alias: (tier: string, ...modifiers: string[]) => string;
|
|
118
|
+
};
|
|
119
|
+
/**
|
|
120
|
+
* Pre-built model aliases for common use cases
|
|
121
|
+
*/
|
|
122
|
+
declare const ModelPresets: {
|
|
123
|
+
/** Translation: cheap + json mode */
|
|
124
|
+
readonly translation: string;
|
|
125
|
+
/** Code generation: balanced + code */
|
|
126
|
+
readonly code: string;
|
|
127
|
+
/** Code with tools: balanced + code + tools */
|
|
128
|
+
readonly codeWithTools: string;
|
|
129
|
+
/** Vision: balanced + vision */
|
|
130
|
+
readonly vision: string;
|
|
131
|
+
/** Reasoning: smart + reasoning */
|
|
132
|
+
readonly reasoning: string;
|
|
133
|
+
/** Creative writing: balanced + creative */
|
|
134
|
+
readonly creative: string;
|
|
135
|
+
/** Fast chat: fast + chat */
|
|
136
|
+
readonly fastChat: string;
|
|
137
|
+
/** Analysis: balanced + analysis */
|
|
138
|
+
readonly analysis: string;
|
|
139
|
+
/** Agents: smart + agents + tools */
|
|
140
|
+
readonly agents: string;
|
|
141
|
+
};
|
|
142
|
+
/**
|
|
143
|
+
* SDKRouter provider config
|
|
144
|
+
*/
|
|
145
|
+
interface SDKRouterConfig extends LLMConfig {
|
|
146
|
+
/** Model tier (shortcut for building alias) */
|
|
147
|
+
tier?: ModelTier;
|
|
148
|
+
/** Model options for alias building */
|
|
149
|
+
modelOptions?: ModelOptions;
|
|
150
|
+
}
|
|
151
|
+
/**
|
|
152
|
+
* SDKRouter LLM provider
|
|
153
|
+
*
|
|
154
|
+
* Uses OpenAI-compatible API at https://llm.sdkrouter.com
|
|
155
|
+
*
|
|
156
|
+
* @example
|
|
157
|
+
* ```ts
|
|
158
|
+
* const llm = new SDKRouterProvider({
|
|
159
|
+
* apiKey: process.env.SDKROUTER_API_KEY,
|
|
160
|
+
* model: Model.balanced({ code: true })
|
|
161
|
+
* })
|
|
162
|
+
* ```
|
|
163
|
+
*/
|
|
164
|
+
declare class SDKRouterProvider extends BaseLLMProvider {
|
|
165
|
+
provider: LLMProvider;
|
|
166
|
+
private client;
|
|
167
|
+
constructor(config: SDKRouterConfig);
|
|
168
|
+
chatMessages(messages: LLMMessage[], options?: LLMRequestOptions): Promise<LLMResponse>;
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
export { BaseLLMProvider as B, Model as M, SDKROUTER_BASE_URL as S, ModelPresets as a, buildModelAlias as b, type ModelTier as c, type ModelCapability as d, type ModelCategory as e, type ModelOptions as f, SDKRouterProvider as g, type SDKRouterConfig as h };
|