@almadar/llm 1.0.16 → 1.0.17
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{chunk-56H37PN5.js → chunk-CAY6ZWBO.js} +43 -5
- package/dist/chunk-CAY6ZWBO.js.map +1 -0
- package/dist/client.d.ts +12 -2
- package/dist/client.js +5 -1
- package/dist/index.d.ts +1 -1
- package/dist/index.js +5 -1
- package/dist/index.js.map +1 -1
- package/package.json +1 -1
- package/src/client.ts +50 -4
- package/src/index.ts +2 -0
- package/dist/chunk-56H37PN5.js.map +0 -1
|
@@ -103,9 +103,23 @@ var PROVIDER_CONFIGS = {
|
|
|
103
103
|
}
|
|
104
104
|
return {
|
|
105
105
|
apiKey,
|
|
106
|
-
baseUrl: "https://api.moonshot.
|
|
106
|
+
baseUrl: "https://api.moonshot.ai/v1",
|
|
107
107
|
defaultModel: "kimi-k2.5"
|
|
108
108
|
};
|
|
109
|
+
},
|
|
110
|
+
openrouter: () => {
|
|
111
|
+
const apiKey = process.env.OPEN_ROUTER_API_KEY;
|
|
112
|
+
if (!apiKey) {
|
|
113
|
+
throw new Error(
|
|
114
|
+
"OPEN_ROUTER_API_KEY environment variable is not set. Please set it in your .env file or environment."
|
|
115
|
+
);
|
|
116
|
+
}
|
|
117
|
+
return {
|
|
118
|
+
apiKey,
|
|
119
|
+
baseUrl: "https://openrouter.ai/api/v1",
|
|
120
|
+
defaultModel: "qwen/qwen-2.5-72b-instruct"
|
|
121
|
+
// Default to Qwen 2.5
|
|
122
|
+
};
|
|
109
123
|
}
|
|
110
124
|
};
|
|
111
125
|
var DEEPSEEK_MODELS = {
|
|
@@ -129,11 +143,22 @@ var ANTHROPIC_MODELS = {
|
|
|
129
143
|
var KIMI_MODELS = {
|
|
130
144
|
K2_5: "kimi-k2.5"
|
|
131
145
|
};
|
|
146
|
+
var OPENROUTER_MODELS = {
|
|
147
|
+
// Qwen models - JSON/structured data specialists
|
|
148
|
+
QWEN_2_5_72B: "qwen/qwen-2.5-72b-instruct",
|
|
149
|
+
QWEN_2_5_CODER_32B: "qwen/qwen-2.5-coder-32b-instruct",
|
|
150
|
+
QWEN_3_235B: "qwen/qwen3-235b-a22b",
|
|
151
|
+
// Llama models - agentic workhorses
|
|
152
|
+
LLAMA_3_3_70B: "meta-llama/llama-3.3-70b-instruct",
|
|
153
|
+
LLAMA_3_1_405B: "meta-llama/llama-3.1-405b-instruct",
|
|
154
|
+
LLAMA_4_MAVERICK: "meta-llama/llama-4-maverick",
|
|
155
|
+
LLAMA_4_SCOUT: "meta-llama/llama-4-scout"
|
|
156
|
+
};
|
|
132
157
|
var DEFAULT_TEMPERATURE = 0.3;
|
|
133
158
|
var LLMClient = class {
|
|
134
159
|
constructor(options = {}) {
|
|
135
160
|
this.provider = options.provider || "openai";
|
|
136
|
-
this.temperature = options.temperature ?? DEFAULT_TEMPERATURE;
|
|
161
|
+
this.temperature = options.temperature ?? (this.provider === "kimi" ? 0.6 : DEFAULT_TEMPERATURE);
|
|
137
162
|
this.streaming = options.streaming ?? false;
|
|
138
163
|
this.providerConfig = PROVIDER_CONFIGS[this.provider]();
|
|
139
164
|
this.modelName = options.model || this.providerConfig.defaultModel;
|
|
@@ -207,13 +232,17 @@ var LLMClient = class {
|
|
|
207
232
|
const useCompletionTokens = this.usesMaxCompletionTokens();
|
|
208
233
|
const tokenConfig = maxTokens ? useCompletionTokens ? { modelKwargs: { max_completion_tokens: maxTokens } } : { maxTokens } : {};
|
|
209
234
|
const timeout = this.provider === "deepseek" ? 6e5 : void 0;
|
|
235
|
+
const isKimi = this.provider === "kimi";
|
|
236
|
+
const thinkingConfig = isKimi ? { modelKwargs: { thinking: { type: "disabled" } } } : {};
|
|
237
|
+
const effectiveTemp = isKimi ? 0.6 : temperature;
|
|
210
238
|
return new ChatOpenAI({
|
|
211
239
|
apiKey: this.providerConfig.apiKey,
|
|
212
240
|
model: this.modelName,
|
|
213
|
-
temperature: useCompletionTokens ? void 0 :
|
|
241
|
+
temperature: useCompletionTokens ? void 0 : effectiveTemp,
|
|
214
242
|
streaming: this.streaming,
|
|
215
243
|
timeout,
|
|
216
244
|
...tokenConfig,
|
|
245
|
+
...thinkingConfig,
|
|
217
246
|
configuration: {
|
|
218
247
|
apiKey: this.providerConfig.apiKey,
|
|
219
248
|
...this.providerConfig.baseUrl ? { baseURL: this.providerConfig.baseUrl } : {}
|
|
@@ -611,12 +640,20 @@ function createKimiClient(options) {
|
|
|
611
640
|
...options
|
|
612
641
|
});
|
|
613
642
|
}
|
|
643
|
+
function createOpenRouterClient(options) {
|
|
644
|
+
return new LLMClient({
|
|
645
|
+
provider: "openrouter",
|
|
646
|
+
model: OPENROUTER_MODELS.QWEN_2_5_72B,
|
|
647
|
+
...options
|
|
648
|
+
});
|
|
649
|
+
}
|
|
614
650
|
|
|
615
651
|
export {
|
|
616
652
|
DEEPSEEK_MODELS,
|
|
617
653
|
OPENAI_MODELS,
|
|
618
654
|
ANTHROPIC_MODELS,
|
|
619
655
|
KIMI_MODELS,
|
|
656
|
+
OPENROUTER_MODELS,
|
|
620
657
|
LLMClient,
|
|
621
658
|
getSharedLLMClient,
|
|
622
659
|
resetSharedLLMClient,
|
|
@@ -628,6 +665,7 @@ export {
|
|
|
628
665
|
createDeepSeekClient,
|
|
629
666
|
createOpenAIClient,
|
|
630
667
|
createAnthropicClient,
|
|
631
|
-
createKimiClient
|
|
668
|
+
createKimiClient,
|
|
669
|
+
createOpenRouterClient
|
|
632
670
|
};
|
|
633
|
-
//# sourceMappingURL=chunk-
|
|
671
|
+
//# sourceMappingURL=chunk-CAY6ZWBO.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/client.ts"],"sourcesContent":["/**\n * Shared LLM Client\n *\n * Multi-provider LLM client with:\n * - OpenAI, DeepSeek, Anthropic, and Kimi support\n * - Anthropic prompt caching (CachingChatAnthropic)\n * - Rate limiting and retry logic\n * - Token tracking\n * - Structured output parsing with Zod\n *\n * @packageDocumentation\n */\n\nimport { ChatOpenAI } from '@langchain/openai';\nimport {\n ChatAnthropic,\n type ChatAnthropicCallOptions,\n} from '@langchain/anthropic';\nimport Anthropic from '@anthropic-ai/sdk';\nimport { z } from 'zod';\nimport {\n RateLimiter,\n getGlobalRateLimiter,\n type RateLimiterOptions,\n} from './rate-limiter.js';\nimport { TokenTracker, getGlobalTokenTracker } from './token-tracker.js';\nimport { parseJsonResponse } from './json-parser.js';\n\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\ntype MessageLike = any;\n\n// ============================================================================\n// Caching Chat Anthropic Wrapper\n// ============================================================================\n\nclass CachingChatAnthropic extends ChatAnthropic {\n async invoke(\n input: MessageLike[] | string,\n options?: Partial<ChatAnthropicCallOptions>,\n ): Promise<MessageLike> {\n let messages: MessageLike[];\n if (typeof input === 'string') {\n messages = [{ role: 'user', content: input }];\n } else {\n messages = input;\n }\n\n const transformedMessages = messages.map((msg: MessageLike) => {\n const msgType = msg._getType?.() || msg.role || 'unknown';\n const isSystem = msgType === 'system';\n\n if (!isSystem) return msg;\n\n if (typeof msg.content === 'string') {\n return {\n ...msg,\n content: [\n {\n type: 'text',\n text: msg.content,\n cache_control: { type: 'ephemeral' },\n },\n ],\n };\n }\n\n if (Array.isArray(msg.content)) {\n const blocks = msg.content as Array<{\n type?: string;\n text?: string;\n cache_control?: unknown;\n }>;\n const hasAnyCacheControl = blocks.some((b) => b.cache_control);\n\n if (!hasAnyCacheControl) {\n const transformedBlocks = blocks.map((block, idx) => {\n if (block.type === 'text' && idx === blocks.length - 1) {\n return {\n ...block,\n cache_control: { type: 'ephemeral' },\n };\n }\n return block;\n });\n\n return { ...msg, content: transformedBlocks };\n }\n }\n\n return msg;\n });\n\n return super.invoke(transformedMessages, options);\n }\n}\n\ntype ChatModel = ChatOpenAI | CachingChatAnthropic;\n\n// ============================================================================\n// Types\n// ============================================================================\n\nexport type LLMProvider = 'openai' | 'deepseek' | 'anthropic' | 'kimi' | 'openrouter';\n\nexport interface ProviderConfig {\n apiKey: string;\n baseUrl?: string;\n defaultModel: string;\n}\n\nexport interface LLMClientOptions {\n provider?: LLMProvider;\n model?: string;\n temperature?: number;\n streaming?: boolean;\n rateLimiter?: RateLimiterOptions;\n useGlobalRateLimiter?: boolean;\n trackTokens?: boolean;\n}\n\nexport interface LLMCallOptions<T = unknown> {\n systemPrompt: string;\n userPrompt: string;\n schema?: z.ZodSchema<T>;\n maxRetries?: number;\n retryWithContext?: boolean;\n maxTokens?: number;\n skipSchemaValidation?: boolean;\n temperature?: number;\n}\n\nexport interface CacheableBlock {\n type: 'text';\n text: string;\n cache_control?: { type: 'ephemeral' };\n}\n\nexport interface CacheAwareLLMCallOptions<T = unknown>\n extends LLMCallOptions<T> {\n systemBlocks?: CacheableBlock[];\n userBlocks?: CacheableBlock[];\n rawText?: boolean;\n}\n\nexport interface LLMUsage {\n promptTokens: number;\n completionTokens: number;\n totalTokens: number;\n}\n\nexport type LLMFinishReason =\n | 'stop'\n | 'length'\n | 'content_filter'\n | 'tool_calls'\n | null;\n\nexport interface LLMResponse<T> {\n data: T;\n raw: string;\n finishReason: LLMFinishReason;\n usage: LLMUsage | null;\n}\n\n// ============================================================================\n// Provider Configuration\n// ============================================================================\n\nconst PROVIDER_CONFIGS: Record<LLMProvider, () => ProviderConfig> = {\n openai: () => {\n const apiKey = process.env.OPENAI_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'OPENAI_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return { apiKey, baseUrl: undefined, defaultModel: 'gpt-4o' };\n },\n deepseek: () => {\n const apiKey = process.env.DEEPSEEK_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'DEEPSEEK_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return {\n apiKey,\n baseUrl: 'https://api.deepseek.com/v1',\n defaultModel: 'deepseek-chat',\n };\n },\n anthropic: () => {\n const apiKey = process.env.ANTHROPIC_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'ANTHROPIC_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return {\n apiKey,\n baseUrl: undefined,\n defaultModel: 'claude-sonnet-4-5-20250929',\n };\n },\n kimi: () => {\n const apiKey = process.env.KIMI_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'KIMI_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return {\n apiKey,\n baseUrl: 'https://api.moonshot.ai/v1',\n defaultModel: 'kimi-k2.5',\n };\n },\n openrouter: () => {\n const apiKey = process.env.OPEN_ROUTER_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'OPEN_ROUTER_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return {\n apiKey,\n baseUrl: 'https://openrouter.ai/api/v1',\n defaultModel: 'qwen/qwen-2.5-72b-instruct', // Default to Qwen 2.5\n };\n },\n};\n\nexport const DEEPSEEK_MODELS = {\n CHAT: 'deepseek-chat',\n CODER: 'deepseek-coder',\n REASONER: 'deepseek-reasoner',\n} as const;\n\nexport const OPENAI_MODELS = {\n GPT4O: 'gpt-4o',\n GPT4O_MINI: 'gpt-4o-mini',\n GPT4_TURBO: 'gpt-4-turbo',\n GPT35_TURBO: 'gpt-3.5-turbo',\n GPT_5_1: 'gpt-5.1',\n} as const;\n\nexport const ANTHROPIC_MODELS = {\n CLAUDE_SONNET_4_5: 'claude-sonnet-4-5-20250929',\n CLAUDE_SONNET_4: 'claude-sonnet-4-20250514',\n CLAUDE_OPUS_4_5: 'claude-opus-4-5-20250929',\n CLAUDE_3_5_HAIKU: 'claude-3-5-haiku-20241022',\n} as const;\n\nexport const KIMI_MODELS = {\n K2_5: 'kimi-k2.5',\n} as const;\n\nexport const OPENROUTER_MODELS = {\n // Qwen models - JSON/structured data specialists\n QWEN_2_5_72B: 'qwen/qwen-2.5-72b-instruct',\n QWEN_2_5_CODER_32B: 'qwen/qwen-2.5-coder-32b-instruct',\n QWEN_3_235B: 'qwen/qwen3-235b-a22b',\n \n // Llama models - agentic workhorses\n LLAMA_3_3_70B: 'meta-llama/llama-3.3-70b-instruct',\n LLAMA_3_1_405B: 'meta-llama/llama-3.1-405b-instruct',\n LLAMA_4_MAVERICK: 'meta-llama/llama-4-maverick',\n LLAMA_4_SCOUT: 'meta-llama/llama-4-scout',\n} as const;\n\nconst DEFAULT_TEMPERATURE = 0.3;\n\n// ============================================================================\n// LLM Client\n// ============================================================================\n\nexport class LLMClient {\n private model: ChatModel;\n private rateLimiter: RateLimiter;\n private tokenTracker: TokenTracker | null;\n private modelName: string;\n private provider: LLMProvider;\n private providerConfig: ProviderConfig;\n private temperature: number;\n private streaming: boolean;\n\n constructor(options: LLMClientOptions = {}) {\n this.provider = options.provider || 'openai';\n // Kimi: 0.6 when thinking disabled (our default), 1.0 when thinking enabled\n this.temperature = options.temperature ?? \n (this.provider === 'kimi' ? 0.6 : DEFAULT_TEMPERATURE);\n this.streaming = options.streaming ?? false;\n\n this.providerConfig = PROVIDER_CONFIGS[this.provider]();\n this.modelName = options.model || this.providerConfig.defaultModel;\n\n const keyPreview = this.providerConfig.apiKey.slice(-4);\n console.log(\n `[LLMClient] Provider: ${this.provider}, Model: ${this.modelName}, Key: ****${keyPreview}`,\n );\n if (this.providerConfig.baseUrl) {\n console.log(\n `[LLMClient] Using custom base URL: ${this.providerConfig.baseUrl}`,\n );\n }\n\n this.model = this.createModel();\n\n this.rateLimiter =\n options.useGlobalRateLimiter !== false\n ? getGlobalRateLimiter(options.rateLimiter)\n : new RateLimiter(options.rateLimiter);\n\n this.tokenTracker =\n options.trackTokens !== false\n ? getGlobalTokenTracker(this.modelName)\n : null;\n }\n\n private usesMaxCompletionTokens(): boolean {\n const model = this.modelName.toLowerCase();\n return (\n model.startsWith('o1') ||\n model.startsWith('gpt-5') ||\n model.includes('o1-') ||\n model.includes('o3')\n );\n }\n\n private createModel(options?: {\n maxTokens?: number;\n temperature?: number;\n }): ChatModel {\n const maxTokens = options?.maxTokens;\n const temperature = options?.temperature ?? this.temperature;\n\n if (this.provider === 'anthropic') {\n return new CachingChatAnthropic({\n apiKey: this.providerConfig.apiKey,\n model: this.modelName,\n temperature,\n streaming: this.streaming,\n maxTokens: maxTokens || 8192,\n callbacks: [\n {\n handleLLMEnd: (output) => {\n const generation = output.generations?.[0]?.[0];\n const usage = (\n generation as unknown as {\n message?: {\n usage_metadata?: {\n cache_creation_input_tokens?: number;\n cache_read_input_tokens?: number;\n input_tokens?: number;\n output_tokens?: number;\n };\n };\n }\n )?.message?.usage_metadata;\n\n if (usage) {\n const cacheCreated = usage.cache_creation_input_tokens ?? 0;\n const cacheRead = usage.cache_read_input_tokens ?? 0;\n const inputTokens = usage.input_tokens ?? 0;\n const outputTokens = usage.output_tokens ?? 0;\n\n if (cacheCreated > 0) {\n console.log(\n `[LLMClient:Anthropic] Cache WRITE: ${cacheCreated} tokens cached`,\n );\n }\n if (cacheRead > 0) {\n const savingsPercent = Math.round(\n (cacheRead / (cacheRead + inputTokens)) * 100,\n );\n console.log(\n `[LLMClient:Anthropic] Cache HIT: ${cacheRead} tokens (~${savingsPercent}% of prompt)`,\n );\n }\n if (cacheCreated === 0 && cacheRead === 0 && inputTokens > 0) {\n if (inputTokens < 500) {\n console.log(\n `[LLMClient:Anthropic] ${inputTokens} input, ${outputTokens} output tokens (likely cached)`,\n );\n } else {\n console.log(\n `[LLMClient:Anthropic] ${inputTokens} input, ${outputTokens} output tokens`,\n );\n }\n }\n }\n },\n },\n ],\n });\n }\n\n const useCompletionTokens = this.usesMaxCompletionTokens();\n\n const tokenConfig = maxTokens\n ? useCompletionTokens\n ? { modelKwargs: { max_completion_tokens: maxTokens } }\n : { maxTokens }\n : {};\n\n const timeout = this.provider === 'deepseek' ? 600000 : undefined;\n\n // Kimi-k2.5: disable thinking to avoid reasoning_content issues with tool calls\n // When thinking is disabled, temperature must be 0.6 (not 1.0)\n const isKimi = this.provider === 'kimi';\n const thinkingConfig = isKimi ? { modelKwargs: { thinking: { type: 'disabled' } } } : {};\n const effectiveTemp = isKimi ? 0.6 : temperature;\n\n return new ChatOpenAI({\n apiKey: this.providerConfig.apiKey,\n model: this.modelName,\n temperature: useCompletionTokens ? undefined : effectiveTemp,\n streaming: this.streaming,\n timeout,\n ...tokenConfig,\n ...thinkingConfig,\n configuration: {\n apiKey: this.providerConfig.apiKey,\n ...(this.providerConfig.baseUrl\n ? { baseURL: this.providerConfig.baseUrl }\n : {}),\n },\n });\n }\n\n private getModelWithOptions(options: {\n maxTokens?: number;\n temperature?: number;\n }): ChatModel {\n return this.createModel(options);\n }\n\n getProvider(): LLMProvider {\n return this.provider;\n }\n\n getModelName(): string {\n return this.modelName;\n }\n\n getModel(): ChatModel {\n return this.model;\n }\n\n getRateLimiterStatus() {\n return this.rateLimiter.getStatus();\n }\n\n getTokenUsage() {\n return this.tokenTracker?.getSummary() ?? null;\n }\n\n async call<T>(options: LLMCallOptions<T>): Promise<T> {\n const response = await this.callWithMetadata(options);\n return response.data;\n }\n\n async callWithMetadata<T>(options: LLMCallOptions<T>): Promise<LLMResponse<T>> {\n const {\n systemPrompt,\n userPrompt,\n schema,\n maxRetries = 2,\n retryWithContext = true,\n maxTokens,\n skipSchemaValidation = false,\n temperature,\n } = options;\n\n let currentPrompt = userPrompt;\n let lastError: Error | null = null;\n\n console.log(\n `[LLMClient:call] Starting call to ${this.provider}/${this.modelName}`,\n );\n console.log(`[LLMClient:call] Prompt length: ${userPrompt.length} chars`);\n if (maxTokens) {\n console.log(`[LLMClient:call] Max tokens: ${maxTokens}`);\n }\n\n for (let attempt = 0; attempt <= maxRetries; attempt++) {\n try {\n console.log(\n `[LLMClient:call] Attempt ${attempt + 1}/${maxRetries + 1}...`,\n );\n const attemptStartTime = Date.now();\n\n const result = await this.rateLimiter.execute(async () => {\n console.log(`[LLMClient:call] Invoking model...`);\n const invokeStartTime = Date.now();\n\n const modelToUse =\n maxTokens || temperature !== undefined\n ? this.getModelWithOptions({ maxTokens, temperature })\n : this.model;\n\n const response = await modelToUse.invoke([\n { role: 'system', content: systemPrompt },\n { role: 'user', content: currentPrompt },\n ]);\n\n console.log(\n `[LLMClient:call] Model responded in ${Date.now() - invokeStartTime}ms`,\n );\n\n let usage: LLMUsage | null = null;\n if (response.usage_metadata) {\n const usageMeta = response.usage_metadata as {\n input_tokens?: number;\n output_tokens?: number;\n };\n usage = {\n promptTokens: usageMeta.input_tokens || 0,\n completionTokens: usageMeta.output_tokens || 0,\n totalTokens:\n (usageMeta.input_tokens || 0) +\n (usageMeta.output_tokens || 0),\n };\n console.log(\n `[LLMClient:call] Tokens used: ${usage.promptTokens} in, ${usage.completionTokens} out`,\n );\n\n if (this.tokenTracker) {\n this.tokenTracker.addUsage(\n usage.promptTokens,\n usage.completionTokens,\n );\n }\n }\n\n const finishReason = this.extractFinishReason(response);\n if (finishReason === 'length') {\n console.warn(\n `[LLMClient:call] Response truncated (finish_reason=length)`,\n );\n }\n\n const content =\n typeof response.content === 'string'\n ? response.content\n : JSON.stringify(response.content);\n\n console.log(\n `[LLMClient:call] Response length: ${content.length} chars, finish_reason: ${finishReason}`,\n );\n\n return { content, finishReason, usage };\n });\n\n console.log(\n `[LLMClient:call] Attempt ${attempt + 1} completed in ${Date.now() - attemptStartTime}ms, parsing response...`,\n );\n\n const parsed = skipSchemaValidation\n ? (parseJsonResponse(result.content, undefined) as T)\n : parseJsonResponse(result.content, schema);\n console.log(\n `[LLMClient:call] Response parsed successfully${skipSchemaValidation ? ' (schema validation skipped)' : ''}`,\n );\n\n return {\n data: parsed,\n raw: result.content,\n finishReason: result.finishReason,\n usage: result.usage,\n };\n } catch (error) {\n lastError = error instanceof Error ? error : new Error(String(error));\n console.error(\n `[LLMClient:call] Attempt ${attempt + 1} failed:`,\n lastError.message,\n );\n\n if (this.isRateLimitError(lastError)) {\n console.error(`[LLMClient:call] Rate limit error, not retrying`);\n throw lastError;\n }\n\n if (attempt < maxRetries && retryWithContext) {\n console.log(`[LLMClient:call] Will retry with error context`);\n currentPrompt =\n `${userPrompt}\\n\\n` +\n `[Previous attempt failed with: ${lastError.message}]\\n` +\n `Please output valid JSON that matches the expected schema.`;\n }\n }\n }\n\n console.error(`[LLMClient:call] All attempts exhausted, throwing error`);\n throw lastError;\n }\n\n private extractFinishReason(\n response: Awaited<ReturnType<ChatOpenAI['invoke']>>,\n ): LLMFinishReason {\n const metadata = response.response_metadata as\n | Record<string, unknown>\n | undefined;\n if (metadata?.finish_reason) {\n const reason = metadata.finish_reason as string;\n if (\n reason === 'stop' ||\n reason === 'length' ||\n reason === 'content_filter' ||\n reason === 'tool_calls'\n ) {\n return reason;\n }\n }\n return null;\n }\n\n async callRaw(options: {\n systemPrompt: string;\n userPrompt: string;\n maxTokens?: number;\n }): Promise<string> {\n const response = await this.callRawWithMetadata(options);\n return response.raw;\n }\n\n async callRawWithMetadata(options: {\n systemPrompt: string;\n userPrompt: string;\n maxTokens?: number;\n }): Promise<Omit<LLMResponse<string>, 'data'> & { raw: string }> {\n const { systemPrompt, userPrompt, maxTokens } = options;\n\n return this.rateLimiter.execute(async () => {\n const modelToUse = maxTokens\n ? this.getModelWithOptions({ maxTokens })\n : this.model;\n\n const response = await modelToUse.invoke([\n { role: 'system', content: systemPrompt },\n { role: 'user', content: userPrompt },\n ]);\n\n let usage: LLMUsage | null = null;\n if (response.usage_metadata) {\n const usageMeta = response.usage_metadata as {\n input_tokens?: number;\n output_tokens?: number;\n };\n usage = {\n promptTokens: usageMeta.input_tokens || 0,\n completionTokens: usageMeta.output_tokens || 0,\n totalTokens:\n (usageMeta.input_tokens || 0) + (usageMeta.output_tokens || 0),\n };\n\n if (this.tokenTracker) {\n this.tokenTracker.addUsage(\n usage.promptTokens,\n usage.completionTokens,\n );\n }\n }\n\n const finishReason = this.extractFinishReason(response);\n const content =\n typeof response.content === 'string'\n ? response.content\n : JSON.stringify(response.content);\n\n return { raw: content, finishReason, usage };\n });\n }\n\n private isRateLimitError(error: Error): boolean {\n const message = error.message.toLowerCase();\n return (\n message.includes('rate limit') ||\n message.includes('429') ||\n message.includes('quota exceeded')\n );\n }\n\n // ==========================================================================\n // Anthropic Cache Control Support\n // ==========================================================================\n\n async callWithCache<T>(\n options: CacheAwareLLMCallOptions<T>,\n ): Promise<LLMResponse<T>> {\n const {\n systemPrompt,\n userPrompt,\n systemBlocks,\n userBlocks,\n schema,\n maxRetries = 2,\n maxTokens,\n skipSchemaValidation = false,\n temperature,\n rawText = false,\n } = options;\n\n if (this.provider !== 'anthropic') {\n console.log(\n `[LLMClient:callWithCache] Provider ${this.provider} doesn't support caching, using regular call`,\n );\n return this.callWithMetadata(options);\n }\n\n const cacheableCount =\n (systemBlocks || []).filter((b) => b.cache_control).length +\n (userBlocks || []).filter((b) => b.cache_control).length;\n console.log(\n `[LLMClient:callWithCache] ${cacheableCount} cacheable block(s)`,\n );\n\n let lastError: Error | null = null;\n\n for (let attempt = 0; attempt <= maxRetries; attempt++) {\n try {\n console.log(\n `[LLMClient:callWithCache] Attempt ${attempt + 1}/${maxRetries + 1}...`,\n );\n\n const result = await this.rateLimiter.execute(async () => {\n const anthropic = new Anthropic();\n\n const systemContent =\n systemBlocks && systemBlocks.length > 0\n ? systemBlocks.map((b) => ({\n type: 'text' as const,\n text: b.text,\n ...(b.cache_control\n ? { cache_control: b.cache_control }\n : {}),\n }))\n : systemPrompt\n ? [{ type: 'text' as const, text: systemPrompt }]\n : [];\n\n const userContent =\n userBlocks && userBlocks.length > 0\n ? userBlocks.map((b) => ({\n type: 'text' as const,\n text: b.text,\n ...(b.cache_control\n ? { cache_control: b.cache_control }\n : {}),\n }))\n : userPrompt\n ? [{ type: 'text' as const, text: userPrompt }]\n : [];\n\n const response = await anthropic.messages.create({\n model: this.modelName,\n max_tokens: maxTokens || 8192,\n temperature: temperature ?? 0,\n system: systemContent,\n messages: [{ role: 'user', content: userContent }],\n });\n\n const textContent = response.content.find((c) => c.type === 'text');\n const content =\n textContent && 'text' in textContent ? textContent.text : '';\n\n const apiUsage = response.usage as {\n input_tokens: number;\n output_tokens: number;\n cache_creation_input_tokens?: number;\n cache_read_input_tokens?: number;\n };\n\n const cacheRead = apiUsage.cache_read_input_tokens || 0;\n const cacheCreation = apiUsage.cache_creation_input_tokens || 0;\n\n if (cacheCreation > 0) {\n console.log(\n `[LLMClient:callWithCache] Cache WRITE: ${cacheCreation} tokens`,\n );\n }\n if (cacheRead > 0) {\n const savingsPercent = Math.round(\n (cacheRead / (cacheRead + apiUsage.input_tokens)) * 100,\n );\n console.log(\n `[LLMClient:callWithCache] Cache HIT: ${cacheRead} tokens (~${savingsPercent}% of prompt)`,\n );\n }\n if (cacheCreation === 0 && cacheRead === 0) {\n console.log(\n `[LLMClient:callWithCache] No caching: ${apiUsage.input_tokens} input tokens`,\n );\n }\n\n const usage: LLMUsage = {\n promptTokens: apiUsage.input_tokens,\n completionTokens: apiUsage.output_tokens,\n totalTokens: apiUsage.input_tokens + apiUsage.output_tokens,\n };\n\n if (this.tokenTracker) {\n this.tokenTracker.addUsage(\n usage.promptTokens,\n usage.completionTokens,\n );\n }\n\n const finishReason =\n response.stop_reason === 'end_turn'\n ? 'stop'\n : response.stop_reason;\n\n return {\n content,\n finishReason: finishReason as LLMFinishReason,\n usage,\n };\n });\n\n let parsed: T;\n if (rawText) {\n parsed = result.content as unknown as T;\n } else if (skipSchemaValidation) {\n parsed = parseJsonResponse(result.content, undefined) as T;\n } else {\n parsed = parseJsonResponse(result.content, schema);\n }\n\n return {\n data: parsed,\n raw: result.content,\n finishReason: result.finishReason,\n usage: result.usage,\n };\n } catch (error) {\n lastError = error instanceof Error ? error : new Error(String(error));\n console.error(\n `[LLMClient:callWithCache] Attempt ${attempt + 1} failed:`,\n lastError.message,\n );\n\n if (this.isRateLimitError(lastError)) {\n throw lastError;\n }\n }\n }\n\n throw lastError;\n }\n\n static cacheableBlock(text: string, cache = true): CacheableBlock {\n return cache\n ? { type: 'text', text, cache_control: { type: 'ephemeral' } }\n : { type: 'text', text };\n }\n}\n\n// ============================================================================\n// Singleton Instances\n// ============================================================================\n\nconst sharedClients: Partial<Record<LLMProvider, LLMClient>> = {};\n\nexport function getSharedLLMClient(options?: LLMClientOptions): LLMClient {\n const provider = options?.provider || 'openai';\n if (!sharedClients[provider]) {\n sharedClients[provider] = new LLMClient(options);\n }\n return sharedClients[provider]!;\n}\n\nexport function resetSharedLLMClient(provider?: LLMProvider): void {\n if (provider) {\n delete sharedClients[provider];\n } else {\n for (const key of Object.keys(sharedClients) as LLMProvider[]) {\n delete sharedClients[key];\n }\n }\n}\n\n// ============================================================================\n// Provider Detection\n// ============================================================================\n\nexport function getAvailableProvider(): LLMProvider {\n if (process.env.ANTHROPIC_API_KEY) return 'anthropic';\n if (process.env.DEEPSEEK_API_KEY) return 'deepseek';\n if (process.env.KIMI_API_KEY) return 'kimi';\n if (process.env.OPENAI_API_KEY) return 'openai';\n throw new Error(\n 'No LLM API key found. Please set ANTHROPIC_API_KEY, OPENAI_API_KEY, DEEPSEEK_API_KEY, or KIMI_API_KEY.',\n );\n}\n\nexport function isProviderAvailable(provider: LLMProvider): boolean {\n switch (provider) {\n case 'openai':\n return !!process.env.OPENAI_API_KEY;\n case 'deepseek':\n return !!process.env.DEEPSEEK_API_KEY;\n case 'anthropic':\n return !!process.env.ANTHROPIC_API_KEY;\n case 'kimi':\n return !!process.env.KIMI_API_KEY;\n default:\n return false;\n }\n}\n\n// ============================================================================\n// Convenience Functions\n// ============================================================================\n\nexport function createRequirementsClient(\n options?: Partial<LLMClientOptions>,\n): LLMClient {\n const provider = options?.provider || getAvailableProvider();\n const defaultModel =\n provider === 'deepseek' ? DEEPSEEK_MODELS.CHAT : OPENAI_MODELS.GPT_5_1;\n return new LLMClient({\n provider,\n model: defaultModel,\n temperature: 0.3,\n ...options,\n });\n}\n\nexport function createCreativeClient(\n options?: Partial<LLMClientOptions>,\n): LLMClient {\n const provider = options?.provider || getAvailableProvider();\n const defaultModel =\n provider === 'deepseek' ? DEEPSEEK_MODELS.REASONER : OPENAI_MODELS.GPT4O;\n return new LLMClient({\n provider,\n model: defaultModel,\n temperature: 0.7,\n ...options,\n });\n}\n\nexport function createFixClient(\n options?: Partial<LLMClientOptions>,\n): LLMClient {\n const provider = options?.provider || getAvailableProvider();\n const defaultModel =\n provider === 'deepseek'\n ? DEEPSEEK_MODELS.CHAT\n : OPENAI_MODELS.GPT4O_MINI;\n return new LLMClient({\n provider,\n model: defaultModel,\n temperature: 0.2,\n ...options,\n });\n}\n\nexport function createDeepSeekClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'deepseek',\n model: DEEPSEEK_MODELS.CHAT,\n ...options,\n });\n}\n\nexport function createOpenAIClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'openai',\n model: OPENAI_MODELS.GPT4O,\n ...options,\n });\n}\n\nexport function createAnthropicClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'anthropic',\n model: ANTHROPIC_MODELS.CLAUDE_SONNET_4_5,\n ...options,\n });\n}\n\nexport function createKimiClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'kimi',\n model: KIMI_MODELS.K2_5,\n ...options,\n });\n}\n\nexport function createOpenRouterClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'openrouter',\n model: OPENROUTER_MODELS.QWEN_2_5_72B,\n ...options,\n });\n}\n"],"mappings":";;;;;;;;;;AAaA,SAAS,kBAAkB;AAC3B;AAAA,EACE;AAAA,OAEK;AACP,OAAO,eAAe;AAiBtB,IAAM,uBAAN,cAAmC,cAAc;AAAA,EAC/C,MAAM,OACJ,OACA,SACsB;AACtB,QAAI;AACJ,QAAI,OAAO,UAAU,UAAU;AAC7B,iBAAW,CAAC,EAAE,MAAM,QAAQ,SAAS,MAAM,CAAC;AAAA,IAC9C,OAAO;AACL,iBAAW;AAAA,IACb;AAEA,UAAM,sBAAsB,SAAS,IAAI,CAAC,QAAqB;AAC7D,YAAM,UAAU,IAAI,WAAW,KAAK,IAAI,QAAQ;AAChD,YAAM,WAAW,YAAY;AAE7B,UAAI,CAAC,SAAU,QAAO;AAEtB,UAAI,OAAO,IAAI,YAAY,UAAU;AACnC,eAAO;AAAA,UACL,GAAG;AAAA,UACH,SAAS;AAAA,YACP;AAAA,cACE,MAAM;AAAA,cACN,MAAM,IAAI;AAAA,cACV,eAAe,EAAE,MAAM,YAAY;AAAA,YACrC;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAEA,UAAI,MAAM,QAAQ,IAAI,OAAO,GAAG;AAC9B,cAAM,SAAS,IAAI;AAKnB,cAAM,qBAAqB,OAAO,KAAK,CAAC,MAAM,EAAE,aAAa;AAE7D,YAAI,CAAC,oBAAoB;AACvB,gBAAM,oBAAoB,OAAO,IAAI,CAAC,OAAO,QAAQ;AACnD,gBAAI,MAAM,SAAS,UAAU,QAAQ,OAAO,SAAS,GAAG;AACtD,qBAAO;AAAA,gBACL,GAAG;AAAA,gBACH,eAAe,EAAE,MAAM,YAAY;AAAA,cACrC;AAAA,YACF;AACA,mBAAO;AAAA,UACT,CAAC;AAED,iBAAO,EAAE,GAAG,KAAK,SAAS,kBAAkB;AAAA,QAC9C;AAAA,MACF;AAEA,aAAO;AAAA,IACT,CAAC;AAED,WAAO,MAAM,OAAO,qBAAqB,OAAO;AAAA,EAClD;AACF;AA0EA,IAAM,mBAA8D;AAAA,EAClE,QAAQ,MAAM;AACZ,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO,EAAE,QAAQ,SAAS,QAAW,cAAc,SAAS;AAAA,EAC9D;AAAA,EACA,UAAU,MAAM;AACd,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO;AAAA,MACL;AAAA,MACA,SAAS;AAAA,MACT,cAAc;AAAA,IAChB;AAAA,EACF;AAAA,EACA,WAAW,MAAM;AACf,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO;AAAA,MACL;AAAA,MACA,SAAS;AAAA,MACT,cAAc;AAAA,IAChB;AAAA,EACF;AAAA,EACA,MAAM,MAAM;AACV,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO;AAAA,MACL;AAAA,MACA,SAAS;AAAA,MACT,cAAc;AAAA,IAChB;AAAA,EACF;AAAA,EACA,YAAY,MAAM;AAChB,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO;AAAA,MACL;AAAA,MACA,SAAS;AAAA,MACT,cAAc;AAAA;AAAA,IAChB;AAAA,EACF;AACF;AAEO,IAAM,kBAAkB;AAAA,EAC7B,MAAM;AAAA,EACN,OAAO;AAAA,EACP,UAAU;AACZ;AAEO,IAAM,gBAAgB;AAAA,EAC3B,OAAO;AAAA,EACP,YAAY;AAAA,EACZ,YAAY;AAAA,EACZ,aAAa;AAAA,EACb,SAAS;AACX;AAEO,IAAM,mBAAmB;AAAA,EAC9B,mBAAmB;AAAA,EACnB,iBAAiB;AAAA,EACjB,iBAAiB;AAAA,EACjB,kBAAkB;AACpB;AAEO,IAAM,cAAc;AAAA,EACzB,MAAM;AACR;AAEO,IAAM,oBAAoB;AAAA;AAAA,EAE/B,cAAc;AAAA,EACd,oBAAoB;AAAA,EACpB,aAAa;AAAA;AAAA,EAGb,eAAe;AAAA,EACf,gBAAgB;AAAA,EAChB,kBAAkB;AAAA,EAClB,eAAe;AACjB;AAEA,IAAM,sBAAsB;AAMrB,IAAM,YAAN,MAAgB;AAAA,EAUrB,YAAY,UAA4B,CAAC,GAAG;AAC1C,SAAK,WAAW,QAAQ,YAAY;AAEpC,SAAK,cAAc,QAAQ,gBACxB,KAAK,aAAa,SAAS,MAAM;AACpC,SAAK,YAAY,QAAQ,aAAa;AAEtC,SAAK,iBAAiB,iBAAiB,KAAK,QAAQ,EAAE;AACtD,SAAK,YAAY,QAAQ,SAAS,KAAK,eAAe;AAEtD,UAAM,aAAa,KAAK,eAAe,OAAO,MAAM,EAAE;AACtD,YAAQ;AAAA,MACN,yBAAyB,KAAK,QAAQ,YAAY,KAAK,SAAS,cAAc,UAAU;AAAA,IAC1F;AACA,QAAI,KAAK,eAAe,SAAS;AAC/B,cAAQ;AAAA,QACN,sCAAsC,KAAK,eAAe,OAAO;AAAA,MACnE;AAAA,IACF;AAEA,SAAK,QAAQ,KAAK,YAAY;AAE9B,SAAK,cACH,QAAQ,yBAAyB,QAC7B,qBAAqB,QAAQ,WAAW,IACxC,IAAI,YAAY,QAAQ,WAAW;AAEzC,SAAK,eACH,QAAQ,gBAAgB,QACpB,sBAAsB,KAAK,SAAS,IACpC;AAAA,EACR;AAAA,EAEQ,0BAAmC;AACzC,UAAM,QAAQ,KAAK,UAAU,YAAY;AACzC,WACE,MAAM,WAAW,IAAI,KACrB,MAAM,WAAW,OAAO,KACxB,MAAM,SAAS,KAAK,KACpB,MAAM,SAAS,IAAI;AAAA,EAEvB;AAAA,EAEQ,YAAY,SAGN;AACZ,UAAM,YAAY,SAAS;AAC3B,UAAM,cAAc,SAAS,eAAe,KAAK;AAEjD,QAAI,KAAK,aAAa,aAAa;AACjC,aAAO,IAAI,qBAAqB;AAAA,QAC9B,QAAQ,KAAK,eAAe;AAAA,QAC5B,OAAO,KAAK;AAAA,QACZ;AAAA,QACA,WAAW,KAAK;AAAA,QAChB,WAAW,aAAa;AAAA,QACxB,WAAW;AAAA,UACT;AAAA,YACE,cAAc,CAAC,WAAW;AACxB,oBAAM,aAAa,OAAO,cAAc,CAAC,IAAI,CAAC;AAC9C,oBAAM,QACJ,YAUC,SAAS;AAEZ,kBAAI,OAAO;AACT,sBAAM,eAAe,MAAM,+BAA+B;AAC1D,sBAAM,YAAY,MAAM,2BAA2B;AACnD,sBAAM,cAAc,MAAM,gBAAgB;AAC1C,sBAAM,eAAe,MAAM,iBAAiB;AAE5C,oBAAI,eAAe,GAAG;AACpB,0BAAQ;AAAA,oBACN,sCAAsC,YAAY;AAAA,kBACpD;AAAA,gBACF;AACA,oBAAI,YAAY,GAAG;AACjB,wBAAM,iBAAiB,KAAK;AAAA,oBACzB,aAAa,YAAY,eAAgB;AAAA,kBAC5C;AACA,0BAAQ;AAAA,oBACN,oCAAoC,SAAS,aAAa,cAAc;AAAA,kBAC1E;AAAA,gBACF;AACA,oBAAI,iBAAiB,KAAK,cAAc,KAAK,cAAc,GAAG;AAC5D,sBAAI,cAAc,KAAK;AACrB,4BAAQ;AAAA,sBACN,yBAAyB,WAAW,WAAW,YAAY;AAAA,oBAC7D;AAAA,kBACF,OAAO;AACL,4BAAQ;AAAA,sBACN,yBAAyB,WAAW,WAAW,YAAY;AAAA,oBAC7D;AAAA,kBACF;AAAA,gBACF;AAAA,cACF;AAAA,YACF;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACH;AAEA,UAAM,sBAAsB,KAAK,wBAAwB;AAEzD,UAAM,cAAc,YAChB,sBACE,EAAE,aAAa,EAAE,uBAAuB,UAAU,EAAE,IACpD,EAAE,UAAU,IACd,CAAC;AAEL,UAAM,UAAU,KAAK,aAAa,aAAa,MAAS;AAIxD,UAAM,SAAS,KAAK,aAAa;AACjC,UAAM,iBAAiB,SAAS,EAAE,aAAa,EAAE,UAAU,EAAE,MAAM,WAAW,EAAE,EAAE,IAAI,CAAC;AACvF,UAAM,gBAAgB,SAAS,MAAM;AAErC,WAAO,IAAI,WAAW;AAAA,MACpB,QAAQ,KAAK,eAAe;AAAA,MAC5B,OAAO,KAAK;AAAA,MACZ,aAAa,sBAAsB,SAAY;AAAA,MAC/C,WAAW,KAAK;AAAA,MAChB;AAAA,MACA,GAAG;AAAA,MACH,GAAG;AAAA,MACH,eAAe;AAAA,QACb,QAAQ,KAAK,eAAe;AAAA,QAC5B,GAAI,KAAK,eAAe,UACpB,EAAE,SAAS,KAAK,eAAe,QAAQ,IACvC,CAAC;AAAA,MACP;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAEQ,oBAAoB,SAGd;AACZ,WAAO,KAAK,YAAY,OAAO;AAAA,EACjC;AAAA,EAEA,cAA2B;AACzB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,eAAuB;AACrB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,WAAsB;AACpB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,uBAAuB;AACrB,WAAO,KAAK,YAAY,UAAU;AAAA,EACpC;AAAA,EAEA,gBAAgB;AACd,WAAO,KAAK,cAAc,WAAW,KAAK;AAAA,EAC5C;AAAA,EAEA,MAAM,KAAQ,SAAwC;AACpD,UAAM,WAAW,MAAM,KAAK,iBAAiB,OAAO;AACpD,WAAO,SAAS;AAAA,EAClB;AAAA,EAEA,MAAM,iBAAoB,SAAqD;AAC7E,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA,aAAa;AAAA,MACb,mBAAmB;AAAA,MACnB;AAAA,MACA,uBAAuB;AAAA,MACvB;AAAA,IACF,IAAI;AAEJ,QAAI,gBAAgB;AACpB,QAAI,YAA0B;AAE9B,YAAQ;AAAA,MACN,qCAAqC,KAAK,QAAQ,IAAI,KAAK,SAAS;AAAA,IACtE;AACA,YAAQ,IAAI,mCAAmC,WAAW,MAAM,QAAQ;AACxE,QAAI,WAAW;AACb,cAAQ,IAAI,gCAAgC,SAAS,EAAE;AAAA,IACzD;AAEA,aAAS,UAAU,GAAG,WAAW,YAAY,WAAW;AACtD,UAAI;AACF,gBAAQ;AAAA,UACN,4BAA4B,UAAU,CAAC,IAAI,aAAa,CAAC;AAAA,QAC3D;AACA,cAAM,mBAAmB,KAAK,IAAI;AAElC,cAAM,SAAS,MAAM,KAAK,YAAY,QAAQ,YAAY;AACxD,kBAAQ,IAAI,oCAAoC;AAChD,gBAAM,kBAAkB,KAAK,IAAI;AAEjC,gBAAM,aACJ,aAAa,gBAAgB,SACzB,KAAK,oBAAoB,EAAE,WAAW,YAAY,CAAC,IACnD,KAAK;AAEX,gBAAM,WAAW,MAAM,WAAW,OAAO;AAAA,YACvC,EAAE,MAAM,UAAU,SAAS,aAAa;AAAA,YACxC,EAAE,MAAM,QAAQ,SAAS,cAAc;AAAA,UACzC,CAAC;AAED,kBAAQ;AAAA,YACN,uCAAuC,KAAK,IAAI,IAAI,eAAe;AAAA,UACrE;AAEA,cAAI,QAAyB;AAC7B,cAAI,SAAS,gBAAgB;AAC3B,kBAAM,YAAY,SAAS;AAI3B,oBAAQ;AAAA,cACN,cAAc,UAAU,gBAAgB;AAAA,cACxC,kBAAkB,UAAU,iBAAiB;AAAA,cAC7C,cACG,UAAU,gBAAgB,MAC1B,UAAU,iBAAiB;AAAA,YAChC;AACA,oBAAQ;AAAA,cACN,iCAAiC,MAAM,YAAY,QAAQ,MAAM,gBAAgB;AAAA,YACnF;AAEA,gBAAI,KAAK,cAAc;AACrB,mBAAK,aAAa;AAAA,gBAChB,MAAM;AAAA,gBACN,MAAM;AAAA,cACR;AAAA,YACF;AAAA,UACF;AAEA,gBAAM,eAAe,KAAK,oBAAoB,QAAQ;AACtD,cAAI,iBAAiB,UAAU;AAC7B,oBAAQ;AAAA,cACN;AAAA,YACF;AAAA,UACF;AAEA,gBAAM,UACJ,OAAO,SAAS,YAAY,WACxB,SAAS,UACT,KAAK,UAAU,SAAS,OAAO;AAErC,kBAAQ;AAAA,YACN,qCAAqC,QAAQ,MAAM,0BAA0B,YAAY;AAAA,UAC3F;AAEA,iBAAO,EAAE,SAAS,cAAc,MAAM;AAAA,QACxC,CAAC;AAED,gBAAQ;AAAA,UACN,4BAA4B,UAAU,CAAC,iBAAiB,KAAK,IAAI,IAAI,gBAAgB;AAAA,QACvF;AAEA,cAAM,SAAS,uBACV,kBAAkB,OAAO,SAAS,MAAS,IAC5C,kBAAkB,OAAO,SAAS,MAAM;AAC5C,gBAAQ;AAAA,UACN,gDAAgD,uBAAuB,iCAAiC,EAAE;AAAA,QAC5G;AAEA,eAAO;AAAA,UACL,MAAM;AAAA,UACN,KAAK,OAAO;AAAA,UACZ,cAAc,OAAO;AAAA,UACrB,OAAO,OAAO;AAAA,QAChB;AAAA,MACF,SAAS,OAAO;AACd,oBAAY,iBAAiB,QAAQ,QAAQ,IAAI,MAAM,OAAO,KAAK,CAAC;AACpE,gBAAQ;AAAA,UACN,4BAA4B,UAAU,CAAC;AAAA,UACvC,UAAU;AAAA,QACZ;AAEA,YAAI,KAAK,iBAAiB,SAAS,GAAG;AACpC,kBAAQ,MAAM,iDAAiD;AAC/D,gBAAM;AAAA,QACR;AAEA,YAAI,UAAU,cAAc,kBAAkB;AAC5C,kBAAQ,IAAI,gDAAgD;AAC5D,0BACE,GAAG,UAAU;AAAA;AAAA,iCACqB,UAAU,OAAO;AAAA;AAAA,QAEvD;AAAA,MACF;AAAA,IACF;AAEA,YAAQ,MAAM,yDAAyD;AACvE,UAAM;AAAA,EACR;AAAA,EAEQ,oBACN,UACiB;AACjB,UAAM,WAAW,SAAS;AAG1B,QAAI,UAAU,eAAe;AAC3B,YAAM,SAAS,SAAS;AACxB,UACE,WAAW,UACX,WAAW,YACX,WAAW,oBACX,WAAW,cACX;AACA,eAAO;AAAA,MACT;AAAA,IACF;AACA,WAAO;AAAA,EACT;AAAA,EAEA,MAAM,QAAQ,SAIM;AAClB,UAAM,WAAW,MAAM,KAAK,oBAAoB,OAAO;AACvD,WAAO,SAAS;AAAA,EAClB;AAAA,EAEA,MAAM,oBAAoB,SAIuC;AAC/D,UAAM,EAAE,cAAc,YAAY,UAAU,IAAI;AAEhD,WAAO,KAAK,YAAY,QAAQ,YAAY;AAC1C,YAAM,aAAa,YACf,KAAK,oBAAoB,EAAE,UAAU,CAAC,IACtC,KAAK;AAET,YAAM,WAAW,MAAM,WAAW,OAAO;AAAA,QACvC,EAAE,MAAM,UAAU,SAAS,aAAa;AAAA,QACxC,EAAE,MAAM,QAAQ,SAAS,WAAW;AAAA,MACtC,CAAC;AAED,UAAI,QAAyB;AAC7B,UAAI,SAAS,gBAAgB;AAC3B,cAAM,YAAY,SAAS;AAI3B,gBAAQ;AAAA,UACN,cAAc,UAAU,gBAAgB;AAAA,UACxC,kBAAkB,UAAU,iBAAiB;AAAA,UAC7C,cACG,UAAU,gBAAgB,MAAM,UAAU,iBAAiB;AAAA,QAChE;AAEA,YAAI,KAAK,cAAc;AACrB,eAAK,aAAa;AAAA,YAChB,MAAM;AAAA,YACN,MAAM;AAAA,UACR;AAAA,QACF;AAAA,MACF;AAEA,YAAM,eAAe,KAAK,oBAAoB,QAAQ;AACtD,YAAM,UACJ,OAAO,SAAS,YAAY,WACxB,SAAS,UACT,KAAK,UAAU,SAAS,OAAO;AAErC,aAAO,EAAE,KAAK,SAAS,cAAc,MAAM;AAAA,IAC7C,CAAC;AAAA,EACH;AAAA,EAEQ,iBAAiB,OAAuB;AAC9C,UAAM,UAAU,MAAM,QAAQ,YAAY;AAC1C,WACE,QAAQ,SAAS,YAAY,KAC7B,QAAQ,SAAS,KAAK,KACtB,QAAQ,SAAS,gBAAgB;AAAA,EAErC;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,cACJ,SACyB;AACzB,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,aAAa;AAAA,MACb;AAAA,MACA,uBAAuB;AAAA,MACvB;AAAA,MACA,UAAU;AAAA,IACZ,IAAI;AAEJ,QAAI,KAAK,aAAa,aAAa;AACjC,cAAQ;AAAA,QACN,sCAAsC,KAAK,QAAQ;AAAA,MACrD;AACA,aAAO,KAAK,iBAAiB,OAAO;AAAA,IACtC;AAEA,UAAM,kBACH,gBAAgB,CAAC,GAAG,OAAO,CAAC,MAAM,EAAE,aAAa,EAAE,UACnD,cAAc,CAAC,GAAG,OAAO,CAAC,MAAM,EAAE,aAAa,EAAE;AACpD,YAAQ;AAAA,MACN,6BAA6B,cAAc;AAAA,IAC7C;AAEA,QAAI,YAA0B;AAE9B,aAAS,UAAU,GAAG,WAAW,YAAY,WAAW;AACtD,UAAI;AACF,gBAAQ;AAAA,UACN,qCAAqC,UAAU,CAAC,IAAI,aAAa,CAAC;AAAA,QACpE;AAEA,cAAM,SAAS,MAAM,KAAK,YAAY,QAAQ,YAAY;AACxD,gBAAM,YAAY,IAAI,UAAU;AAEhC,gBAAM,gBACJ,gBAAgB,aAAa,SAAS,IAClC,aAAa,IAAI,CAAC,OAAO;AAAA,YACvB,MAAM;AAAA,YACN,MAAM,EAAE;AAAA,YACR,GAAI,EAAE,gBACF,EAAE,eAAe,EAAE,cAAc,IACjC,CAAC;AAAA,UACP,EAAE,IACF,eACE,CAAC,EAAE,MAAM,QAAiB,MAAM,aAAa,CAAC,IAC9C,CAAC;AAET,gBAAM,cACJ,cAAc,WAAW,SAAS,IAC9B,WAAW,IAAI,CAAC,OAAO;AAAA,YACrB,MAAM;AAAA,YACN,MAAM,EAAE;AAAA,YACR,GAAI,EAAE,gBACF,EAAE,eAAe,EAAE,cAAc,IACjC,CAAC;AAAA,UACP,EAAE,IACF,aACE,CAAC,EAAE,MAAM,QAAiB,MAAM,WAAW,CAAC,IAC5C,CAAC;AAET,gBAAM,WAAW,MAAM,UAAU,SAAS,OAAO;AAAA,YAC/C,OAAO,KAAK;AAAA,YACZ,YAAY,aAAa;AAAA,YACzB,aAAa,eAAe;AAAA,YAC5B,QAAQ;AAAA,YACR,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,YAAY,CAAC;AAAA,UACnD,CAAC;AAED,gBAAM,cAAc,SAAS,QAAQ,KAAK,CAAC,MAAM,EAAE,SAAS,MAAM;AAClE,gBAAM,UACJ,eAAe,UAAU,cAAc,YAAY,OAAO;AAE5D,gBAAM,WAAW,SAAS;AAO1B,gBAAM,YAAY,SAAS,2BAA2B;AACtD,gBAAM,gBAAgB,SAAS,+BAA+B;AAE9D,cAAI,gBAAgB,GAAG;AACrB,oBAAQ;AAAA,cACN,0CAA0C,aAAa;AAAA,YACzD;AAAA,UACF;AACA,cAAI,YAAY,GAAG;AACjB,kBAAM,iBAAiB,KAAK;AAAA,cACzB,aAAa,YAAY,SAAS,gBAAiB;AAAA,YACtD;AACA,oBAAQ;AAAA,cACN,wCAAwC,SAAS,aAAa,cAAc;AAAA,YAC9E;AAAA,UACF;AACA,cAAI,kBAAkB,KAAK,cAAc,GAAG;AAC1C,oBAAQ;AAAA,cACN,yCAAyC,SAAS,YAAY;AAAA,YAChE;AAAA,UACF;AAEA,gBAAM,QAAkB;AAAA,YACtB,cAAc,SAAS;AAAA,YACvB,kBAAkB,SAAS;AAAA,YAC3B,aAAa,SAAS,eAAe,SAAS;AAAA,UAChD;AAEA,cAAI,KAAK,cAAc;AACrB,iBAAK,aAAa;AAAA,cAChB,MAAM;AAAA,cACN,MAAM;AAAA,YACR;AAAA,UACF;AAEA,gBAAM,eACJ,SAAS,gBAAgB,aACrB,SACA,SAAS;AAEf,iBAAO;AAAA,YACL;AAAA,YACA;AAAA,YACA;AAAA,UACF;AAAA,QACF,CAAC;AAED,YAAI;AACJ,YAAI,SAAS;AACX,mBAAS,OAAO;AAAA,QAClB,WAAW,sBAAsB;AAC/B,mBAAS,kBAAkB,OAAO,SAAS,MAAS;AAAA,QACtD,OAAO;AACL,mBAAS,kBAAkB,OAAO,SAAS,MAAM;AAAA,QACnD;AAEA,eAAO;AAAA,UACL,MAAM;AAAA,UACN,KAAK,OAAO;AAAA,UACZ,cAAc,OAAO;AAAA,UACrB,OAAO,OAAO;AAAA,QAChB;AAAA,MACF,SAAS,OAAO;AACd,oBAAY,iBAAiB,QAAQ,QAAQ,IAAI,MAAM,OAAO,KAAK,CAAC;AACpE,gBAAQ;AAAA,UACN,qCAAqC,UAAU,CAAC;AAAA,UAChD,UAAU;AAAA,QACZ;AAEA,YAAI,KAAK,iBAAiB,SAAS,GAAG;AACpC,gBAAM;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,UAAM;AAAA,EACR;AAAA,EAEA,OAAO,eAAe,MAAc,QAAQ,MAAsB;AAChE,WAAO,QACH,EAAE,MAAM,QAAQ,MAAM,eAAe,EAAE,MAAM,YAAY,EAAE,IAC3D,EAAE,MAAM,QAAQ,KAAK;AAAA,EAC3B;AACF;AAMA,IAAM,gBAAyD,CAAC;AAEzD,SAAS,mBAAmB,SAAuC;AACxE,QAAM,WAAW,SAAS,YAAY;AACtC,MAAI,CAAC,cAAc,QAAQ,GAAG;AAC5B,kBAAc,QAAQ,IAAI,IAAI,UAAU,OAAO;AAAA,EACjD;AACA,SAAO,cAAc,QAAQ;AAC/B;AAEO,SAAS,qBAAqB,UAA8B;AACjE,MAAI,UAAU;AACZ,WAAO,cAAc,QAAQ;AAAA,EAC/B,OAAO;AACL,eAAW,OAAO,OAAO,KAAK,aAAa,GAAoB;AAC7D,aAAO,cAAc,GAAG;AAAA,IAC1B;AAAA,EACF;AACF;AAMO,SAAS,uBAAoC;AAClD,MAAI,QAAQ,IAAI,kBAAmB,QAAO;AAC1C,MAAI,QAAQ,IAAI,iBAAkB,QAAO;AACzC,MAAI,QAAQ,IAAI,aAAc,QAAO;AACrC,MAAI,QAAQ,IAAI,eAAgB,QAAO;AACvC,QAAM,IAAI;AAAA,IACR;AAAA,EACF;AACF;AAEO,SAAS,oBAAoB,UAAgC;AAClE,UAAQ,UAAU;AAAA,IAChB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB;AACE,aAAO;AAAA,EACX;AACF;AAMO,SAAS,yBACd,SACW;AACX,QAAM,WAAW,SAAS,YAAY,qBAAqB;AAC3D,QAAM,eACJ,aAAa,aAAa,gBAAgB,OAAO,cAAc;AACjE,SAAO,IAAI,UAAU;AAAA,IACnB;AAAA,IACA,OAAO;AAAA,IACP,aAAa;AAAA,IACb,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,qBACd,SACW;AACX,QAAM,WAAW,SAAS,YAAY,qBAAqB;AAC3D,QAAM,eACJ,aAAa,aAAa,gBAAgB,WAAW,cAAc;AACrE,SAAO,IAAI,UAAU;AAAA,IACnB;AAAA,IACA,OAAO;AAAA,IACP,aAAa;AAAA,IACb,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,gBACd,SACW;AACX,QAAM,WAAW,SAAS,YAAY,qBAAqB;AAC3D,QAAM,eACJ,aAAa,aACT,gBAAgB,OAChB,cAAc;AACpB,SAAO,IAAI,UAAU;AAAA,IACnB;AAAA,IACA,OAAO;AAAA,IACP,aAAa;AAAA,IACb,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,qBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,gBAAgB;AAAA,IACvB,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,mBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,cAAc;AAAA,IACrB,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,sBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,iBAAiB;AAAA,IACxB,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,iBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,YAAY;AAAA,IACnB,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,uBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,kBAAkB;AAAA,IACzB,GAAG;AAAA,EACL,CAAC;AACH;","names":[]}
|
package/dist/client.d.ts
CHANGED
|
@@ -8,7 +8,7 @@ declare class CachingChatAnthropic extends ChatAnthropic {
|
|
|
8
8
|
invoke(input: MessageLike[] | string, options?: Partial<ChatAnthropicCallOptions>): Promise<MessageLike>;
|
|
9
9
|
}
|
|
10
10
|
type ChatModel = ChatOpenAI | CachingChatAnthropic;
|
|
11
|
-
type LLMProvider = 'openai' | 'deepseek' | 'anthropic' | 'kimi';
|
|
11
|
+
type LLMProvider = 'openai' | 'deepseek' | 'anthropic' | 'kimi' | 'openrouter';
|
|
12
12
|
interface ProviderConfig {
|
|
13
13
|
apiKey: string;
|
|
14
14
|
baseUrl?: string;
|
|
@@ -78,6 +78,15 @@ declare const ANTHROPIC_MODELS: {
|
|
|
78
78
|
declare const KIMI_MODELS: {
|
|
79
79
|
readonly K2_5: "kimi-k2.5";
|
|
80
80
|
};
|
|
81
|
+
declare const OPENROUTER_MODELS: {
|
|
82
|
+
readonly QWEN_2_5_72B: "qwen/qwen-2.5-72b-instruct";
|
|
83
|
+
readonly QWEN_2_5_CODER_32B: "qwen/qwen-2.5-coder-32b-instruct";
|
|
84
|
+
readonly QWEN_3_235B: "qwen/qwen3-235b-a22b";
|
|
85
|
+
readonly LLAMA_3_3_70B: "meta-llama/llama-3.3-70b-instruct";
|
|
86
|
+
readonly LLAMA_3_1_405B: "meta-llama/llama-3.1-405b-instruct";
|
|
87
|
+
readonly LLAMA_4_MAVERICK: "meta-llama/llama-4-maverick";
|
|
88
|
+
readonly LLAMA_4_SCOUT: "meta-llama/llama-4-scout";
|
|
89
|
+
};
|
|
81
90
|
declare class LLMClient {
|
|
82
91
|
private model;
|
|
83
92
|
private rateLimiter;
|
|
@@ -132,5 +141,6 @@ declare function createDeepSeekClient(options?: Partial<Omit<LLMClientOptions, '
|
|
|
132
141
|
declare function createOpenAIClient(options?: Partial<Omit<LLMClientOptions, 'provider'>>): LLMClient;
|
|
133
142
|
declare function createAnthropicClient(options?: Partial<Omit<LLMClientOptions, 'provider'>>): LLMClient;
|
|
134
143
|
declare function createKimiClient(options?: Partial<Omit<LLMClientOptions, 'provider'>>): LLMClient;
|
|
144
|
+
declare function createOpenRouterClient(options?: Partial<Omit<LLMClientOptions, 'provider'>>): LLMClient;
|
|
135
145
|
|
|
136
|
-
export { ANTHROPIC_MODELS, type CacheAwareLLMCallOptions, type CacheableBlock, DEEPSEEK_MODELS, KIMI_MODELS, type LLMCallOptions, LLMClient, type LLMClientOptions, type LLMFinishReason, type LLMProvider, type LLMResponse, type LLMUsage, OPENAI_MODELS, type ProviderConfig, createAnthropicClient, createCreativeClient, createDeepSeekClient, createFixClient, createKimiClient, createOpenAIClient, createRequirementsClient, getAvailableProvider, getSharedLLMClient, isProviderAvailable, resetSharedLLMClient };
|
|
146
|
+
export { ANTHROPIC_MODELS, type CacheAwareLLMCallOptions, type CacheableBlock, DEEPSEEK_MODELS, KIMI_MODELS, type LLMCallOptions, LLMClient, type LLMClientOptions, type LLMFinishReason, type LLMProvider, type LLMResponse, type LLMUsage, OPENAI_MODELS, OPENROUTER_MODELS, type ProviderConfig, createAnthropicClient, createCreativeClient, createDeepSeekClient, createFixClient, createKimiClient, createOpenAIClient, createOpenRouterClient, createRequirementsClient, getAvailableProvider, getSharedLLMClient, isProviderAvailable, resetSharedLLMClient };
|
package/dist/client.js
CHANGED
|
@@ -4,18 +4,20 @@ import {
|
|
|
4
4
|
KIMI_MODELS,
|
|
5
5
|
LLMClient,
|
|
6
6
|
OPENAI_MODELS,
|
|
7
|
+
OPENROUTER_MODELS,
|
|
7
8
|
createAnthropicClient,
|
|
8
9
|
createCreativeClient,
|
|
9
10
|
createDeepSeekClient,
|
|
10
11
|
createFixClient,
|
|
11
12
|
createKimiClient,
|
|
12
13
|
createOpenAIClient,
|
|
14
|
+
createOpenRouterClient,
|
|
13
15
|
createRequirementsClient,
|
|
14
16
|
getAvailableProvider,
|
|
15
17
|
getSharedLLMClient,
|
|
16
18
|
isProviderAvailable,
|
|
17
19
|
resetSharedLLMClient
|
|
18
|
-
} from "./chunk-
|
|
20
|
+
} from "./chunk-CAY6ZWBO.js";
|
|
19
21
|
import "./chunk-WM7QVK2Z.js";
|
|
20
22
|
import "./chunk-MJS33AAS.js";
|
|
21
23
|
export {
|
|
@@ -24,12 +26,14 @@ export {
|
|
|
24
26
|
KIMI_MODELS,
|
|
25
27
|
LLMClient,
|
|
26
28
|
OPENAI_MODELS,
|
|
29
|
+
OPENROUTER_MODELS,
|
|
27
30
|
createAnthropicClient,
|
|
28
31
|
createCreativeClient,
|
|
29
32
|
createDeepSeekClient,
|
|
30
33
|
createFixClient,
|
|
31
34
|
createKimiClient,
|
|
32
35
|
createOpenAIClient,
|
|
36
|
+
createOpenRouterClient,
|
|
33
37
|
createRequirementsClient,
|
|
34
38
|
getAvailableProvider,
|
|
35
39
|
getSharedLLMClient,
|
package/dist/index.d.ts
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { LLMFinishReason, LLMClient } from './client.js';
|
|
2
|
-
export { ANTHROPIC_MODELS, CacheAwareLLMCallOptions, CacheableBlock, DEEPSEEK_MODELS, KIMI_MODELS, LLMCallOptions, LLMClientOptions, LLMProvider, LLMResponse, LLMUsage, OPENAI_MODELS, ProviderConfig, createAnthropicClient, createCreativeClient, createDeepSeekClient, createFixClient, createKimiClient, createOpenAIClient, createRequirementsClient, getAvailableProvider, getSharedLLMClient, isProviderAvailable, resetSharedLLMClient } from './client.js';
|
|
2
|
+
export { ANTHROPIC_MODELS, CacheAwareLLMCallOptions, CacheableBlock, DEEPSEEK_MODELS, KIMI_MODELS, LLMCallOptions, LLMClientOptions, LLMProvider, LLMResponse, LLMUsage, OPENAI_MODELS, OPENROUTER_MODELS, ProviderConfig, createAnthropicClient, createCreativeClient, createDeepSeekClient, createFixClient, createKimiClient, createOpenAIClient, createOpenRouterClient, createRequirementsClient, getAvailableProvider, getSharedLLMClient, isProviderAvailable, resetSharedLLMClient } from './client.js';
|
|
3
3
|
export { R as RateLimiter, a as RateLimiterOptions, T as TokenTracker, b as TokenUsage, g as getGlobalRateLimiter, c as getGlobalTokenTracker, r as resetGlobalRateLimiter, d as resetGlobalTokenTracker } from './rate-limiter-9XAWfHwe.js';
|
|
4
4
|
export { autoCloseJson, extractJsonFromText, isValidJson, parseJsonResponse, safeParseJson } from './json-parser.js';
|
|
5
5
|
import { z } from 'zod';
|
package/dist/index.js
CHANGED
|
@@ -4,18 +4,20 @@ import {
|
|
|
4
4
|
KIMI_MODELS,
|
|
5
5
|
LLMClient,
|
|
6
6
|
OPENAI_MODELS,
|
|
7
|
+
OPENROUTER_MODELS,
|
|
7
8
|
createAnthropicClient,
|
|
8
9
|
createCreativeClient,
|
|
9
10
|
createDeepSeekClient,
|
|
10
11
|
createFixClient,
|
|
11
12
|
createKimiClient,
|
|
12
13
|
createOpenAIClient,
|
|
14
|
+
createOpenRouterClient,
|
|
13
15
|
createRequirementsClient,
|
|
14
16
|
getAvailableProvider,
|
|
15
17
|
getSharedLLMClient,
|
|
16
18
|
isProviderAvailable,
|
|
17
19
|
resetSharedLLMClient
|
|
18
|
-
} from "./chunk-
|
|
20
|
+
} from "./chunk-CAY6ZWBO.js";
|
|
19
21
|
import {
|
|
20
22
|
autoCloseJson,
|
|
21
23
|
extractJsonFromText,
|
|
@@ -439,6 +441,7 @@ export {
|
|
|
439
441
|
KIMI_MODELS,
|
|
440
442
|
LLMClient,
|
|
441
443
|
OPENAI_MODELS,
|
|
444
|
+
OPENROUTER_MODELS,
|
|
442
445
|
RateLimiter,
|
|
443
446
|
STRUCTURED_OUTPUT_MODELS,
|
|
444
447
|
StructuredOutputClient,
|
|
@@ -452,6 +455,7 @@ export {
|
|
|
452
455
|
createFixClient,
|
|
453
456
|
createKimiClient,
|
|
454
457
|
createOpenAIClient,
|
|
458
|
+
createOpenRouterClient,
|
|
455
459
|
createRequirementsClient,
|
|
456
460
|
detectTruncation,
|
|
457
461
|
extractJsonFromText,
|
package/dist/index.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/truncation-detector.ts","../src/continuation.ts"],"sourcesContent":["/**\n * Truncation Detector\n *\n * Utilities for detecting when LLM output has been truncated and\n * extracting usable content from partial responses.\n *\n * @packageDocumentation\n */\n\nimport type { LLMFinishReason } from './client.js';\nimport { autoCloseJson } from './json-parser.js';\n\n// ============================================================================\n// Types\n// ============================================================================\n\nexport type TruncationReason =\n | 'finish_reason'\n | 'json_incomplete'\n | 'bracket_mismatch'\n | 'none';\n\nexport interface TruncationResult {\n isTruncated: boolean;\n reason: TruncationReason;\n partialContent?: string;\n lastCompleteElement?: unknown;\n missingCloseBrackets?: number;\n missingCloseBraces?: number;\n}\n\n// ============================================================================\n// Main Detection Function\n// ============================================================================\n\nexport function detectTruncation(\n response: string,\n finishReason: LLMFinishReason,\n): TruncationResult {\n if (finishReason === 'length') {\n const bracketInfo = countBrackets(response);\n return {\n isTruncated: true,\n reason: 'finish_reason',\n partialContent: response,\n lastCompleteElement: findLastCompleteElement(response),\n missingCloseBrackets: bracketInfo.missingCloseBrackets,\n missingCloseBraces: bracketInfo.missingCloseBraces,\n };\n }\n\n try {\n JSON.parse(response);\n return { isTruncated: false, reason: 'none' };\n } catch {\n // JSON is invalid, check if due to truncation\n }\n\n if (finishReason === 'stop' || finishReason === null) {\n const trimmed = response.trim();\n\n const isMidContent =\n trimmed.endsWith(',') ||\n trimmed.endsWith(':') ||\n trimmed.endsWith('\": ') ||\n /:\\s*$/.test(trimmed) ||\n /,\\s*$/.test(trimmed);\n\n if (isMidContent) {\n const bracketInfo = countBrackets(response);\n return {\n isTruncated: true,\n reason: 'json_incomplete',\n partialContent: response,\n lastCompleteElement: findLastCompleteElement(response),\n missingCloseBrackets: bracketInfo.missingCloseBrackets,\n missingCloseBraces: bracketInfo.missingCloseBraces,\n };\n }\n\n try {\n const closed = autoCloseJson(trimmed);\n JSON.parse(closed);\n return { isTruncated: false, reason: 'none' };\n } catch {\n return { isTruncated: false, reason: 'none' };\n }\n }\n\n const bracketInfo = countBrackets(response);\n if (\n bracketInfo.missingCloseBrackets > 0 ||\n bracketInfo.missingCloseBraces > 0\n ) {\n return {\n isTruncated: true,\n reason: 'bracket_mismatch',\n partialContent: response,\n lastCompleteElement: findLastCompleteElement(response),\n missingCloseBrackets: bracketInfo.missingCloseBrackets,\n missingCloseBraces: bracketInfo.missingCloseBraces,\n };\n }\n\n return { isTruncated: false, reason: 'none' };\n}\n\n// ============================================================================\n// Helper Functions\n// ============================================================================\n\nfunction countBrackets(json: string): {\n openBrackets: number;\n closeBrackets: number;\n openBraces: number;\n closeBraces: number;\n missingCloseBrackets: number;\n missingCloseBraces: number;\n} {\n let inString = false;\n let escaped = false;\n let openBrackets = 0;\n let closeBrackets = 0;\n let openBraces = 0;\n let closeBraces = 0;\n\n for (const char of json) {\n if (escaped) {\n escaped = false;\n continue;\n }\n if (char === '\\\\' && inString) {\n escaped = true;\n continue;\n }\n if (char === '\"') {\n inString = !inString;\n continue;\n }\n if (inString) continue;\n\n switch (char) {\n case '[':\n openBrackets++;\n break;\n case ']':\n closeBrackets++;\n break;\n case '{':\n openBraces++;\n break;\n case '}':\n closeBraces++;\n break;\n }\n }\n\n return {\n openBrackets,\n closeBrackets,\n openBraces,\n closeBraces,\n missingCloseBrackets: Math.max(0, openBrackets - closeBrackets),\n missingCloseBraces: Math.max(0, openBraces - closeBraces),\n };\n}\n\nexport function findLastCompleteElement(json: string): unknown | null {\n const autoClosed = autoCloseJson(json);\n try {\n return JSON.parse(autoClosed);\n } catch {\n // Auto-close didn't work\n }\n\n const trimmed = json.trim();\n\n if (trimmed.startsWith('[')) {\n const lastCompleteIndex = findLastCompleteArrayElement(trimmed);\n if (lastCompleteIndex > 0) {\n const subset = trimmed.substring(0, lastCompleteIndex) + ']';\n try {\n return JSON.parse(subset);\n } catch {\n // Continue\n }\n }\n }\n\n if (trimmed.startsWith('{')) {\n const closed = autoCloseJson(trimmed);\n try {\n return JSON.parse(closed);\n } catch {\n const lastCompleteIndex = findLastCompleteObjectProperty(trimmed);\n if (lastCompleteIndex > 0) {\n const subset = trimmed.substring(0, lastCompleteIndex) + '}';\n try {\n return JSON.parse(subset);\n } catch {\n // Give up\n }\n }\n }\n }\n\n return null;\n}\n\nfunction findLastCompleteArrayElement(json: string): number {\n let depth = 0;\n let inString = false;\n let escaped = false;\n let lastCompleteElementEnd = -1;\n\n for (let i = 0; i < json.length; i++) {\n const char = json[i];\n\n if (escaped) {\n escaped = false;\n continue;\n }\n if (char === '\\\\' && inString) {\n escaped = true;\n continue;\n }\n if (char === '\"') {\n inString = !inString;\n continue;\n }\n if (inString) continue;\n\n if (char === '[' || char === '{') {\n depth++;\n } else if (char === ']' || char === '}') {\n depth--;\n if (depth === 1) {\n lastCompleteElementEnd = i + 1;\n }\n } else if (char === ',' && depth === 1) {\n lastCompleteElementEnd = i;\n }\n }\n\n return lastCompleteElementEnd > 0 ? lastCompleteElementEnd : -1;\n}\n\nfunction findLastCompleteObjectProperty(json: string): number {\n let depth = 0;\n let inString = false;\n let escaped = false;\n let lastCommaIndex = -1;\n\n for (let i = 0; i < json.length; i++) {\n const char = json[i];\n\n if (escaped) {\n escaped = false;\n continue;\n }\n if (char === '\\\\' && inString) {\n escaped = true;\n continue;\n }\n if (char === '\"') {\n inString = !inString;\n continue;\n }\n if (inString) continue;\n\n if (char === '[' || char === '{') {\n depth++;\n } else if (char === ']' || char === '}') {\n depth--;\n } else if (char === ',' && depth === 1) {\n lastCommaIndex = i;\n }\n }\n\n return lastCommaIndex > 0 ? lastCommaIndex : -1;\n}\n\nexport function isLikelyTruncated(content: string): boolean {\n const trimmed = content.trim();\n if (!trimmed) return false;\n\n const brackets = countBrackets(trimmed);\n if (\n brackets.missingCloseBrackets > 0 ||\n brackets.missingCloseBraces > 0\n ) {\n return true;\n }\n\n const abruptEndings = [\n /,\\s*$/,\n /:\\s*$/,\n /\"\\s*:\\s*$/,\n /\\[\\s*$/,\n /{\\s*$/,\n ];\n\n for (const pattern of abruptEndings) {\n if (pattern.test(trimmed)) return true;\n }\n\n return false;\n}\n","/**\n * LLM Continuation Utility\n *\n * Handles truncated LLM responses with automatic continuation.\n * - Detects truncation via finish_reason and JSON structure\n * - Automatically continues with full context\n * - Merges partial and continuation responses\n * - Salvages partial data if max continuations reached\n *\n * @packageDocumentation\n */\n\nimport { z } from 'zod';\nimport { LLMClient, type LLMFinishReason } from './client.js';\nimport { detectTruncation } from './truncation-detector.js';\nimport { extractJsonFromText, autoCloseJson, isValidJson } from './json-parser.js';\n\n// ============================================================================\n// Types\n// ============================================================================\n\nexport interface ContinuationOptions<T> {\n client: LLMClient;\n systemPrompt: string;\n userPrompt: string;\n schema?: z.ZodSchema<T>;\n maxTokens?: number;\n maxContinuations?: number;\n maxRetries?: number;\n buildContinuationPrompt: (\n partialResponse: string,\n attempt: number,\n ) => string;\n continuationSystemPrompt?: string;\n}\n\nexport interface ContinuationResult<T> {\n data: T;\n raw: string;\n continuationCount: number;\n warnings: string[];\n wasSalvaged: boolean;\n}\n\n// ============================================================================\n// Constants\n// ============================================================================\n\nconst DEFAULT_MAX_TOKENS = 8192;\nconst DEFAULT_MAX_CONTINUATIONS = 3;\n\n/**\n * Default continuation system prompt.\n * Used when no custom continuationSystemPrompt is provided.\n */\nconst DEFAULT_CONTINUATION_SYSTEM_PROMPT = `You are a JSON continuation assistant. Your ONLY job is to continue generating JSON from where the previous response was truncated.\n\nRules:\n1. Continue from EXACTLY where the previous output stopped\n2. Do NOT repeat any content already generated\n3. Complete the JSON structure properly with all closing brackets\n4. Do NOT wrap in markdown code blocks\n5. Output ONLY the continuation JSON, nothing else`;\n\n// ============================================================================\n// Helper Functions\n// ============================================================================\n\nexport function mergeResponses(\n previous: string,\n continuation: string,\n): string {\n const trimmedPrev = previous.trimEnd();\n const trimmedCont = continuation.trimStart();\n\n let cleanedCont = trimmedCont\n .replace(/^```json?\\s*/i, '')\n .replace(/```\\s*$/i, '')\n .trim();\n\n if (cleanedCont.startsWith('{')) {\n try {\n const contParsed = JSON.parse(autoCloseJson(cleanedCont));\n const keys = Object.keys(contParsed);\n if (keys.length === 1 && Array.isArray(contParsed[keys[0]])) {\n cleanedCont = contParsed[keys[0]]\n .map((item: unknown) => JSON.stringify(item))\n .join(',\\n');\n }\n } catch {\n // Continue with original cleaning\n }\n }\n\n if (cleanedCont.startsWith('}') || cleanedCont.startsWith(']')) {\n return trimmedPrev + cleanedCont;\n }\n\n const prevEndsWithValue = /[\\}\\]\\\"\\d]$/.test(trimmedPrev);\n const contStartsWithValue = /^[\\{\\[\\\"]/.test(cleanedCont);\n\n if (prevEndsWithValue && contStartsWithValue) {\n return trimmedPrev + ',\\n' + cleanedCont;\n }\n\n return trimmedPrev + cleanedCont;\n}\n\nexport function salvagePartialResponse<T>(rawResponse: string): T | null {\n console.warn('[Continuation] Attempting to salvage partial response');\n\n try {\n const cleanedResponse = extractJsonFromText(rawResponse) || rawResponse;\n const closed = autoCloseJson(cleanedResponse);\n const parsed = JSON.parse(closed) as T;\n console.log('[Continuation] Successfully salvaged partial response');\n return parsed;\n } catch (error) {\n console.error('[Continuation] Could not salvage response:', error);\n }\n\n return null;\n}\n\n// ============================================================================\n// Main Function\n// ============================================================================\n\nexport async function callWithContinuation<T>(\n options: ContinuationOptions<T>,\n): Promise<ContinuationResult<T>> {\n const {\n client,\n systemPrompt,\n userPrompt,\n schema,\n maxTokens = DEFAULT_MAX_TOKENS,\n maxContinuations = DEFAULT_MAX_CONTINUATIONS,\n buildContinuationPrompt,\n continuationSystemPrompt = DEFAULT_CONTINUATION_SYSTEM_PROMPT,\n } = options;\n\n let rawResponse = '';\n let continuationCount = 0;\n const warnings: string[] = [];\n let wasSalvaged = false;\n\n console.log('[Continuation] Starting LLM call with continuation support');\n console.log(\n `[Continuation] Max tokens: ${maxTokens}, Max continuations: ${maxContinuations}`,\n );\n\n try {\n const response = await client.callRawWithMetadata({\n systemPrompt,\n userPrompt,\n maxTokens,\n });\n\n rawResponse = extractJsonFromText(response.raw) || response.raw;\n\n console.log(\n `[Continuation] Initial response: ${rawResponse.length} chars, finish_reason: ${response.finishReason}`,\n );\n\n let truncation = detectTruncation(rawResponse, response.finishReason);\n\n while (truncation.isTruncated && continuationCount < maxContinuations) {\n continuationCount++;\n const warningMsg = `Response truncated (${truncation.reason}), continuing (attempt ${continuationCount}/${maxContinuations})`;\n console.log(`[Continuation] ${warningMsg}`);\n warnings.push(warningMsg);\n\n const contPrompt = buildContinuationPrompt(\n rawResponse,\n continuationCount,\n );\n\n const contResponse = await client.callRawWithMetadata({\n systemPrompt: continuationSystemPrompt,\n userPrompt: contPrompt,\n maxTokens,\n });\n\n console.log(\n `[Continuation] Continuation response: ${contResponse.raw.length} chars, finish_reason: ${contResponse.finishReason}`,\n );\n\n const cleanedContResponse =\n extractJsonFromText(contResponse.raw) || contResponse.raw;\n rawResponse = mergeResponses(rawResponse, cleanedContResponse);\n\n truncation = detectTruncation(rawResponse, contResponse.finishReason);\n }\n\n if (\n continuationCount >= maxContinuations &&\n truncation.isTruncated\n ) {\n console.warn(\n `[Continuation] Reached max continuations (${maxContinuations}), attempting to salvage...`,\n );\n warnings.push(\n `Reached max continuations - some content may be incomplete`,\n );\n wasSalvaged = true;\n }\n\n const cleanedResponse =\n extractJsonFromText(rawResponse) || rawResponse;\n let data: T;\n\n try {\n if (isValidJson(cleanedResponse)) {\n data = JSON.parse(cleanedResponse) as T;\n } else {\n const closed = autoCloseJson(cleanedResponse);\n data = JSON.parse(closed) as T;\n if (!wasSalvaged) {\n warnings.push('Response required auto-closing of JSON brackets');\n }\n }\n } catch (parseError) {\n const salvaged = salvagePartialResponse<T>(cleanedResponse);\n if (salvaged) {\n data = salvaged;\n wasSalvaged = true;\n warnings.push('Response was salvaged from partial data');\n } else {\n throw new Error(\n `Failed to parse response after ${continuationCount} continuations: ${parseError}`,\n );\n }\n }\n\n if (schema) {\n try {\n data = schema.parse(data);\n } catch (validationError) {\n console.warn(\n '[Continuation] Schema validation failed:',\n validationError,\n );\n warnings.push(`Schema validation issue: ${validationError}`);\n }\n }\n\n console.log(\n `[Continuation] Complete. Continuations: ${continuationCount}, Warnings: ${warnings.length}`,\n );\n\n return {\n data,\n raw: rawResponse,\n continuationCount,\n warnings,\n wasSalvaged,\n };\n } catch (error) {\n console.error('[Continuation] Error during LLM call:', error);\n throw error;\n }\n}\n\nexport function buildGenericContinuationPrompt(\n context: string,\n partialResponse: string,\n attempt: number,\n maxAttempts: number = DEFAULT_MAX_CONTINUATIONS,\n): string {\n return `## CONTINUATION REQUEST (Attempt ${attempt}/${maxAttempts})\n\nYour previous response was truncated. Continue generating from where you left off.\n\n### ORIGINAL CONTEXT\n${context}\n\n### WHAT YOU GENERATED SO FAR\n\\`\\`\\`json\n${partialResponse}\n\\`\\`\\`\n\n### INSTRUCTIONS\n1. Continue from EXACTLY where the response was cut off\n2. Do NOT repeat any content already generated\n3. Complete the JSON structure properly\n4. Do NOT wrap your response in markdown code blocks\n\nContinue generating now:`;\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAmCO,SAAS,iBACd,UACA,cACkB;AAClB,MAAI,iBAAiB,UAAU;AAC7B,UAAMA,eAAc,cAAc,QAAQ;AAC1C,WAAO;AAAA,MACL,aAAa;AAAA,MACb,QAAQ;AAAA,MACR,gBAAgB;AAAA,MAChB,qBAAqB,wBAAwB,QAAQ;AAAA,MACrD,sBAAsBA,aAAY;AAAA,MAClC,oBAAoBA,aAAY;AAAA,IAClC;AAAA,EACF;AAEA,MAAI;AACF,SAAK,MAAM,QAAQ;AACnB,WAAO,EAAE,aAAa,OAAO,QAAQ,OAAO;AAAA,EAC9C,QAAQ;AAAA,EAER;AAEA,MAAI,iBAAiB,UAAU,iBAAiB,MAAM;AACpD,UAAM,UAAU,SAAS,KAAK;AAE9B,UAAM,eACJ,QAAQ,SAAS,GAAG,KACpB,QAAQ,SAAS,GAAG,KACpB,QAAQ,SAAS,KAAK,KACtB,QAAQ,KAAK,OAAO,KACpB,QAAQ,KAAK,OAAO;AAEtB,QAAI,cAAc;AAChB,YAAMA,eAAc,cAAc,QAAQ;AAC1C,aAAO;AAAA,QACL,aAAa;AAAA,QACb,QAAQ;AAAA,QACR,gBAAgB;AAAA,QAChB,qBAAqB,wBAAwB,QAAQ;AAAA,QACrD,sBAAsBA,aAAY;AAAA,QAClC,oBAAoBA,aAAY;AAAA,MAClC;AAAA,IACF;AAEA,QAAI;AACF,YAAM,SAAS,cAAc,OAAO;AACpC,WAAK,MAAM,MAAM;AACjB,aAAO,EAAE,aAAa,OAAO,QAAQ,OAAO;AAAA,IAC9C,QAAQ;AACN,aAAO,EAAE,aAAa,OAAO,QAAQ,OAAO;AAAA,IAC9C;AAAA,EACF;AAEA,QAAM,cAAc,cAAc,QAAQ;AAC1C,MACE,YAAY,uBAAuB,KACnC,YAAY,qBAAqB,GACjC;AACA,WAAO;AAAA,MACL,aAAa;AAAA,MACb,QAAQ;AAAA,MACR,gBAAgB;AAAA,MAChB,qBAAqB,wBAAwB,QAAQ;AAAA,MACrD,sBAAsB,YAAY;AAAA,MAClC,oBAAoB,YAAY;AAAA,IAClC;AAAA,EACF;AAEA,SAAO,EAAE,aAAa,OAAO,QAAQ,OAAO;AAC9C;AAMA,SAAS,cAAc,MAOrB;AACA,MAAI,WAAW;AACf,MAAI,UAAU;AACd,MAAI,eAAe;AACnB,MAAI,gBAAgB;AACpB,MAAI,aAAa;AACjB,MAAI,cAAc;AAElB,aAAW,QAAQ,MAAM;AACvB,QAAI,SAAS;AACX,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,QAAQ,UAAU;AAC7B,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,KAAK;AAChB,iBAAW,CAAC;AACZ;AAAA,IACF;AACA,QAAI,SAAU;AAEd,YAAQ,MAAM;AAAA,MACZ,KAAK;AACH;AACA;AAAA,MACF,KAAK;AACH;AACA;AAAA,MACF,KAAK;AACH;AACA;AAAA,MACF,KAAK;AACH;AACA;AAAA,IACJ;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,sBAAsB,KAAK,IAAI,GAAG,eAAe,aAAa;AAAA,IAC9D,oBAAoB,KAAK,IAAI,GAAG,aAAa,WAAW;AAAA,EAC1D;AACF;AAEO,SAAS,wBAAwB,MAA8B;AACpE,QAAM,aAAa,cAAc,IAAI;AACrC,MAAI;AACF,WAAO,KAAK,MAAM,UAAU;AAAA,EAC9B,QAAQ;AAAA,EAER;AAEA,QAAM,UAAU,KAAK,KAAK;AAE1B,MAAI,QAAQ,WAAW,GAAG,GAAG;AAC3B,UAAM,oBAAoB,6BAA6B,OAAO;AAC9D,QAAI,oBAAoB,GAAG;AACzB,YAAM,SAAS,QAAQ,UAAU,GAAG,iBAAiB,IAAI;AACzD,UAAI;AACF,eAAO,KAAK,MAAM,MAAM;AAAA,MAC1B,QAAQ;AAAA,MAER;AAAA,IACF;AAAA,EACF;AAEA,MAAI,QAAQ,WAAW,GAAG,GAAG;AAC3B,UAAM,SAAS,cAAc,OAAO;AACpC,QAAI;AACF,aAAO,KAAK,MAAM,MAAM;AAAA,IAC1B,QAAQ;AACN,YAAM,oBAAoB,+BAA+B,OAAO;AAChE,UAAI,oBAAoB,GAAG;AACzB,cAAM,SAAS,QAAQ,UAAU,GAAG,iBAAiB,IAAI;AACzD,YAAI;AACF,iBAAO,KAAK,MAAM,MAAM;AAAA,QAC1B,QAAQ;AAAA,QAER;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;AAEA,SAAS,6BAA6B,MAAsB;AAC1D,MAAI,QAAQ;AACZ,MAAI,WAAW;AACf,MAAI,UAAU;AACd,MAAI,yBAAyB;AAE7B,WAAS,IAAI,GAAG,IAAI,KAAK,QAAQ,KAAK;AACpC,UAAM,OAAO,KAAK,CAAC;AAEnB,QAAI,SAAS;AACX,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,QAAQ,UAAU;AAC7B,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,KAAK;AAChB,iBAAW,CAAC;AACZ;AAAA,IACF;AACA,QAAI,SAAU;AAEd,QAAI,SAAS,OAAO,SAAS,KAAK;AAChC;AAAA,IACF,WAAW,SAAS,OAAO,SAAS,KAAK;AACvC;AACA,UAAI,UAAU,GAAG;AACf,iCAAyB,IAAI;AAAA,MAC/B;AAAA,IACF,WAAW,SAAS,OAAO,UAAU,GAAG;AACtC,+BAAyB;AAAA,IAC3B;AAAA,EACF;AAEA,SAAO,yBAAyB,IAAI,yBAAyB;AAC/D;AAEA,SAAS,+BAA+B,MAAsB;AAC5D,MAAI,QAAQ;AACZ,MAAI,WAAW;AACf,MAAI,UAAU;AACd,MAAI,iBAAiB;AAErB,WAAS,IAAI,GAAG,IAAI,KAAK,QAAQ,KAAK;AACpC,UAAM,OAAO,KAAK,CAAC;AAEnB,QAAI,SAAS;AACX,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,QAAQ,UAAU;AAC7B,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,KAAK;AAChB,iBAAW,CAAC;AACZ;AAAA,IACF;AACA,QAAI,SAAU;AAEd,QAAI,SAAS,OAAO,SAAS,KAAK;AAChC;AAAA,IACF,WAAW,SAAS,OAAO,SAAS,KAAK;AACvC;AAAA,IACF,WAAW,SAAS,OAAO,UAAU,GAAG;AACtC,uBAAiB;AAAA,IACnB;AAAA,EACF;AAEA,SAAO,iBAAiB,IAAI,iBAAiB;AAC/C;AAEO,SAAS,kBAAkB,SAA0B;AAC1D,QAAM,UAAU,QAAQ,KAAK;AAC7B,MAAI,CAAC,QAAS,QAAO;AAErB,QAAM,WAAW,cAAc,OAAO;AACtC,MACE,SAAS,uBAAuB,KAChC,SAAS,qBAAqB,GAC9B;AACA,WAAO;AAAA,EACT;AAEA,QAAM,gBAAgB;AAAA,IACpB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAEA,aAAW,WAAW,eAAe;AACnC,QAAI,QAAQ,KAAK,OAAO,EAAG,QAAO;AAAA,EACpC;AAEA,SAAO;AACT;;;ACnQA,IAAM,qBAAqB;AAC3B,IAAM,4BAA4B;AAMlC,IAAM,qCAAqC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAapC,SAAS,eACd,UACA,cACQ;AACR,QAAM,cAAc,SAAS,QAAQ;AACrC,QAAM,cAAc,aAAa,UAAU;AAE3C,MAAI,cAAc,YACf,QAAQ,iBAAiB,EAAE,EAC3B,QAAQ,YAAY,EAAE,EACtB,KAAK;AAER,MAAI,YAAY,WAAW,GAAG,GAAG;AAC/B,QAAI;AACF,YAAM,aAAa,KAAK,MAAM,cAAc,WAAW,CAAC;AACxD,YAAM,OAAO,OAAO,KAAK,UAAU;AACnC,UAAI,KAAK,WAAW,KAAK,MAAM,QAAQ,WAAW,KAAK,CAAC,CAAC,CAAC,GAAG;AAC3D,sBAAc,WAAW,KAAK,CAAC,CAAC,EAC7B,IAAI,CAAC,SAAkB,KAAK,UAAU,IAAI,CAAC,EAC3C,KAAK,KAAK;AAAA,MACf;AAAA,IACF,QAAQ;AAAA,IAER;AAAA,EACF;AAEA,MAAI,YAAY,WAAW,GAAG,KAAK,YAAY,WAAW,GAAG,GAAG;AAC9D,WAAO,cAAc;AAAA,EACvB;AAEA,QAAM,oBAAoB,cAAc,KAAK,WAAW;AACxD,QAAM,sBAAsB,YAAY,KAAK,WAAW;AAExD,MAAI,qBAAqB,qBAAqB;AAC5C,WAAO,cAAc,QAAQ;AAAA,EAC/B;AAEA,SAAO,cAAc;AACvB;AAEO,SAAS,uBAA0B,aAA+B;AACvE,UAAQ,KAAK,uDAAuD;AAEpE,MAAI;AACF,UAAM,kBAAkB,oBAAoB,WAAW,KAAK;AAC5D,UAAM,SAAS,cAAc,eAAe;AAC5C,UAAM,SAAS,KAAK,MAAM,MAAM;AAChC,YAAQ,IAAI,uDAAuD;AACnE,WAAO;AAAA,EACT,SAAS,OAAO;AACd,YAAQ,MAAM,8CAA8C,KAAK;AAAA,EACnE;AAEA,SAAO;AACT;AAMA,eAAsB,qBACpB,SACgC;AAChC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,YAAY;AAAA,IACZ,mBAAmB;AAAA,IACnB;AAAA,IACA,2BAA2B;AAAA,EAC7B,IAAI;AAEJ,MAAI,cAAc;AAClB,MAAI,oBAAoB;AACxB,QAAM,WAAqB,CAAC;AAC5B,MAAI,cAAc;AAElB,UAAQ,IAAI,4DAA4D;AACxE,UAAQ;AAAA,IACN,8BAA8B,SAAS,wBAAwB,gBAAgB;AAAA,EACjF;AAEA,MAAI;AACF,UAAM,WAAW,MAAM,OAAO,oBAAoB;AAAA,MAChD;AAAA,MACA;AAAA,MACA;AAAA,IACF,CAAC;AAED,kBAAc,oBAAoB,SAAS,GAAG,KAAK,SAAS;AAE5D,YAAQ;AAAA,MACN,oCAAoC,YAAY,MAAM,0BAA0B,SAAS,YAAY;AAAA,IACvG;AAEA,QAAI,aAAa,iBAAiB,aAAa,SAAS,YAAY;AAEpE,WAAO,WAAW,eAAe,oBAAoB,kBAAkB;AACrE;AACA,YAAM,aAAa,uBAAuB,WAAW,MAAM,0BAA0B,iBAAiB,IAAI,gBAAgB;AAC1H,cAAQ,IAAI,kBAAkB,UAAU,EAAE;AAC1C,eAAS,KAAK,UAAU;AAExB,YAAM,aAAa;AAAA,QACjB;AAAA,QACA;AAAA,MACF;AAEA,YAAM,eAAe,MAAM,OAAO,oBAAoB;AAAA,QACpD,cAAc;AAAA,QACd,YAAY;AAAA,QACZ;AAAA,MACF,CAAC;AAED,cAAQ;AAAA,QACN,yCAAyC,aAAa,IAAI,MAAM,0BAA0B,aAAa,YAAY;AAAA,MACrH;AAEA,YAAM,sBACJ,oBAAoB,aAAa,GAAG,KAAK,aAAa;AACxD,oBAAc,eAAe,aAAa,mBAAmB;AAE7D,mBAAa,iBAAiB,aAAa,aAAa,YAAY;AAAA,IACtE;AAEA,QACE,qBAAqB,oBACrB,WAAW,aACX;AACA,cAAQ;AAAA,QACN,6CAA6C,gBAAgB;AAAA,MAC/D;AACA,eAAS;AAAA,QACP;AAAA,MACF;AACA,oBAAc;AAAA,IAChB;AAEA,UAAM,kBACJ,oBAAoB,WAAW,KAAK;AACtC,QAAI;AAEJ,QAAI;AACF,UAAI,YAAY,eAAe,GAAG;AAChC,eAAO,KAAK,MAAM,eAAe;AAAA,MACnC,OAAO;AACL,cAAM,SAAS,cAAc,eAAe;AAC5C,eAAO,KAAK,MAAM,MAAM;AACxB,YAAI,CAAC,aAAa;AAChB,mBAAS,KAAK,iDAAiD;AAAA,QACjE;AAAA,MACF;AAAA,IACF,SAAS,YAAY;AACnB,YAAM,WAAW,uBAA0B,eAAe;AAC1D,UAAI,UAAU;AACZ,eAAO;AACP,sBAAc;AACd,iBAAS,KAAK,yCAAyC;AAAA,MACzD,OAAO;AACL,cAAM,IAAI;AAAA,UACR,kCAAkC,iBAAiB,mBAAmB,UAAU;AAAA,QAClF;AAAA,MACF;AAAA,IACF;AAEA,QAAI,QAAQ;AACV,UAAI;AACF,eAAO,OAAO,MAAM,IAAI;AAAA,MAC1B,SAAS,iBAAiB;AACxB,gBAAQ;AAAA,UACN;AAAA,UACA;AAAA,QACF;AACA,iBAAS,KAAK,4BAA4B,eAAe,EAAE;AAAA,MAC7D;AAAA,IACF;AAEA,YAAQ;AAAA,MACN,2CAA2C,iBAAiB,eAAe,SAAS,MAAM;AAAA,IAC5F;AAEA,WAAO;AAAA,MACL;AAAA,MACA,KAAK;AAAA,MACL;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,EACF,SAAS,OAAO;AACd,YAAQ,MAAM,yCAAyC,KAAK;AAC5D,UAAM;AAAA,EACR;AACF;AAEO,SAAS,+BACd,SACA,iBACA,SACA,cAAsB,2BACd;AACR,SAAO,oCAAoC,OAAO,IAAI,WAAW;AAAA;AAAA;AAAA;AAAA;AAAA,EAKjE,OAAO;AAAA;AAAA;AAAA;AAAA,EAIP,eAAe;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAUjB;","names":["bracketInfo"]}
|
|
1
|
+
{"version":3,"sources":["../src/truncation-detector.ts","../src/continuation.ts"],"sourcesContent":["/**\n * Truncation Detector\n *\n * Utilities for detecting when LLM output has been truncated and\n * extracting usable content from partial responses.\n *\n * @packageDocumentation\n */\n\nimport type { LLMFinishReason } from './client.js';\nimport { autoCloseJson } from './json-parser.js';\n\n// ============================================================================\n// Types\n// ============================================================================\n\nexport type TruncationReason =\n | 'finish_reason'\n | 'json_incomplete'\n | 'bracket_mismatch'\n | 'none';\n\nexport interface TruncationResult {\n isTruncated: boolean;\n reason: TruncationReason;\n partialContent?: string;\n lastCompleteElement?: unknown;\n missingCloseBrackets?: number;\n missingCloseBraces?: number;\n}\n\n// ============================================================================\n// Main Detection Function\n// ============================================================================\n\nexport function detectTruncation(\n response: string,\n finishReason: LLMFinishReason,\n): TruncationResult {\n if (finishReason === 'length') {\n const bracketInfo = countBrackets(response);\n return {\n isTruncated: true,\n reason: 'finish_reason',\n partialContent: response,\n lastCompleteElement: findLastCompleteElement(response),\n missingCloseBrackets: bracketInfo.missingCloseBrackets,\n missingCloseBraces: bracketInfo.missingCloseBraces,\n };\n }\n\n try {\n JSON.parse(response);\n return { isTruncated: false, reason: 'none' };\n } catch {\n // JSON is invalid, check if due to truncation\n }\n\n if (finishReason === 'stop' || finishReason === null) {\n const trimmed = response.trim();\n\n const isMidContent =\n trimmed.endsWith(',') ||\n trimmed.endsWith(':') ||\n trimmed.endsWith('\": ') ||\n /:\\s*$/.test(trimmed) ||\n /,\\s*$/.test(trimmed);\n\n if (isMidContent) {\n const bracketInfo = countBrackets(response);\n return {\n isTruncated: true,\n reason: 'json_incomplete',\n partialContent: response,\n lastCompleteElement: findLastCompleteElement(response),\n missingCloseBrackets: bracketInfo.missingCloseBrackets,\n missingCloseBraces: bracketInfo.missingCloseBraces,\n };\n }\n\n try {\n const closed = autoCloseJson(trimmed);\n JSON.parse(closed);\n return { isTruncated: false, reason: 'none' };\n } catch {\n return { isTruncated: false, reason: 'none' };\n }\n }\n\n const bracketInfo = countBrackets(response);\n if (\n bracketInfo.missingCloseBrackets > 0 ||\n bracketInfo.missingCloseBraces > 0\n ) {\n return {\n isTruncated: true,\n reason: 'bracket_mismatch',\n partialContent: response,\n lastCompleteElement: findLastCompleteElement(response),\n missingCloseBrackets: bracketInfo.missingCloseBrackets,\n missingCloseBraces: bracketInfo.missingCloseBraces,\n };\n }\n\n return { isTruncated: false, reason: 'none' };\n}\n\n// ============================================================================\n// Helper Functions\n// ============================================================================\n\nfunction countBrackets(json: string): {\n openBrackets: number;\n closeBrackets: number;\n openBraces: number;\n closeBraces: number;\n missingCloseBrackets: number;\n missingCloseBraces: number;\n} {\n let inString = false;\n let escaped = false;\n let openBrackets = 0;\n let closeBrackets = 0;\n let openBraces = 0;\n let closeBraces = 0;\n\n for (const char of json) {\n if (escaped) {\n escaped = false;\n continue;\n }\n if (char === '\\\\' && inString) {\n escaped = true;\n continue;\n }\n if (char === '\"') {\n inString = !inString;\n continue;\n }\n if (inString) continue;\n\n switch (char) {\n case '[':\n openBrackets++;\n break;\n case ']':\n closeBrackets++;\n break;\n case '{':\n openBraces++;\n break;\n case '}':\n closeBraces++;\n break;\n }\n }\n\n return {\n openBrackets,\n closeBrackets,\n openBraces,\n closeBraces,\n missingCloseBrackets: Math.max(0, openBrackets - closeBrackets),\n missingCloseBraces: Math.max(0, openBraces - closeBraces),\n };\n}\n\nexport function findLastCompleteElement(json: string): unknown | null {\n const autoClosed = autoCloseJson(json);\n try {\n return JSON.parse(autoClosed);\n } catch {\n // Auto-close didn't work\n }\n\n const trimmed = json.trim();\n\n if (trimmed.startsWith('[')) {\n const lastCompleteIndex = findLastCompleteArrayElement(trimmed);\n if (lastCompleteIndex > 0) {\n const subset = trimmed.substring(0, lastCompleteIndex) + ']';\n try {\n return JSON.parse(subset);\n } catch {\n // Continue\n }\n }\n }\n\n if (trimmed.startsWith('{')) {\n const closed = autoCloseJson(trimmed);\n try {\n return JSON.parse(closed);\n } catch {\n const lastCompleteIndex = findLastCompleteObjectProperty(trimmed);\n if (lastCompleteIndex > 0) {\n const subset = trimmed.substring(0, lastCompleteIndex) + '}';\n try {\n return JSON.parse(subset);\n } catch {\n // Give up\n }\n }\n }\n }\n\n return null;\n}\n\nfunction findLastCompleteArrayElement(json: string): number {\n let depth = 0;\n let inString = false;\n let escaped = false;\n let lastCompleteElementEnd = -1;\n\n for (let i = 0; i < json.length; i++) {\n const char = json[i];\n\n if (escaped) {\n escaped = false;\n continue;\n }\n if (char === '\\\\' && inString) {\n escaped = true;\n continue;\n }\n if (char === '\"') {\n inString = !inString;\n continue;\n }\n if (inString) continue;\n\n if (char === '[' || char === '{') {\n depth++;\n } else if (char === ']' || char === '}') {\n depth--;\n if (depth === 1) {\n lastCompleteElementEnd = i + 1;\n }\n } else if (char === ',' && depth === 1) {\n lastCompleteElementEnd = i;\n }\n }\n\n return lastCompleteElementEnd > 0 ? lastCompleteElementEnd : -1;\n}\n\nfunction findLastCompleteObjectProperty(json: string): number {\n let depth = 0;\n let inString = false;\n let escaped = false;\n let lastCommaIndex = -1;\n\n for (let i = 0; i < json.length; i++) {\n const char = json[i];\n\n if (escaped) {\n escaped = false;\n continue;\n }\n if (char === '\\\\' && inString) {\n escaped = true;\n continue;\n }\n if (char === '\"') {\n inString = !inString;\n continue;\n }\n if (inString) continue;\n\n if (char === '[' || char === '{') {\n depth++;\n } else if (char === ']' || char === '}') {\n depth--;\n } else if (char === ',' && depth === 1) {\n lastCommaIndex = i;\n }\n }\n\n return lastCommaIndex > 0 ? lastCommaIndex : -1;\n}\n\nexport function isLikelyTruncated(content: string): boolean {\n const trimmed = content.trim();\n if (!trimmed) return false;\n\n const brackets = countBrackets(trimmed);\n if (\n brackets.missingCloseBrackets > 0 ||\n brackets.missingCloseBraces > 0\n ) {\n return true;\n }\n\n const abruptEndings = [\n /,\\s*$/,\n /:\\s*$/,\n /\"\\s*:\\s*$/,\n /\\[\\s*$/,\n /{\\s*$/,\n ];\n\n for (const pattern of abruptEndings) {\n if (pattern.test(trimmed)) return true;\n }\n\n return false;\n}\n","/**\n * LLM Continuation Utility\n *\n * Handles truncated LLM responses with automatic continuation.\n * - Detects truncation via finish_reason and JSON structure\n * - Automatically continues with full context\n * - Merges partial and continuation responses\n * - Salvages partial data if max continuations reached\n *\n * @packageDocumentation\n */\n\nimport { z } from 'zod';\nimport { LLMClient, type LLMFinishReason } from './client.js';\nimport { detectTruncation } from './truncation-detector.js';\nimport { extractJsonFromText, autoCloseJson, isValidJson } from './json-parser.js';\n\n// ============================================================================\n// Types\n// ============================================================================\n\nexport interface ContinuationOptions<T> {\n client: LLMClient;\n systemPrompt: string;\n userPrompt: string;\n schema?: z.ZodSchema<T>;\n maxTokens?: number;\n maxContinuations?: number;\n maxRetries?: number;\n buildContinuationPrompt: (\n partialResponse: string,\n attempt: number,\n ) => string;\n continuationSystemPrompt?: string;\n}\n\nexport interface ContinuationResult<T> {\n data: T;\n raw: string;\n continuationCount: number;\n warnings: string[];\n wasSalvaged: boolean;\n}\n\n// ============================================================================\n// Constants\n// ============================================================================\n\nconst DEFAULT_MAX_TOKENS = 8192;\nconst DEFAULT_MAX_CONTINUATIONS = 3;\n\n/**\n * Default continuation system prompt.\n * Used when no custom continuationSystemPrompt is provided.\n */\nconst DEFAULT_CONTINUATION_SYSTEM_PROMPT = `You are a JSON continuation assistant. Your ONLY job is to continue generating JSON from where the previous response was truncated.\n\nRules:\n1. Continue from EXACTLY where the previous output stopped\n2. Do NOT repeat any content already generated\n3. Complete the JSON structure properly with all closing brackets\n4. Do NOT wrap in markdown code blocks\n5. Output ONLY the continuation JSON, nothing else`;\n\n// ============================================================================\n// Helper Functions\n// ============================================================================\n\nexport function mergeResponses(\n previous: string,\n continuation: string,\n): string {\n const trimmedPrev = previous.trimEnd();\n const trimmedCont = continuation.trimStart();\n\n let cleanedCont = trimmedCont\n .replace(/^```json?\\s*/i, '')\n .replace(/```\\s*$/i, '')\n .trim();\n\n if (cleanedCont.startsWith('{')) {\n try {\n const contParsed = JSON.parse(autoCloseJson(cleanedCont));\n const keys = Object.keys(contParsed);\n if (keys.length === 1 && Array.isArray(contParsed[keys[0]])) {\n cleanedCont = contParsed[keys[0]]\n .map((item: unknown) => JSON.stringify(item))\n .join(',\\n');\n }\n } catch {\n // Continue with original cleaning\n }\n }\n\n if (cleanedCont.startsWith('}') || cleanedCont.startsWith(']')) {\n return trimmedPrev + cleanedCont;\n }\n\n const prevEndsWithValue = /[\\}\\]\\\"\\d]$/.test(trimmedPrev);\n const contStartsWithValue = /^[\\{\\[\\\"]/.test(cleanedCont);\n\n if (prevEndsWithValue && contStartsWithValue) {\n return trimmedPrev + ',\\n' + cleanedCont;\n }\n\n return trimmedPrev + cleanedCont;\n}\n\nexport function salvagePartialResponse<T>(rawResponse: string): T | null {\n console.warn('[Continuation] Attempting to salvage partial response');\n\n try {\n const cleanedResponse = extractJsonFromText(rawResponse) || rawResponse;\n const closed = autoCloseJson(cleanedResponse);\n const parsed = JSON.parse(closed) as T;\n console.log('[Continuation] Successfully salvaged partial response');\n return parsed;\n } catch (error) {\n console.error('[Continuation] Could not salvage response:', error);\n }\n\n return null;\n}\n\n// ============================================================================\n// Main Function\n// ============================================================================\n\nexport async function callWithContinuation<T>(\n options: ContinuationOptions<T>,\n): Promise<ContinuationResult<T>> {\n const {\n client,\n systemPrompt,\n userPrompt,\n schema,\n maxTokens = DEFAULT_MAX_TOKENS,\n maxContinuations = DEFAULT_MAX_CONTINUATIONS,\n buildContinuationPrompt,\n continuationSystemPrompt = DEFAULT_CONTINUATION_SYSTEM_PROMPT,\n } = options;\n\n let rawResponse = '';\n let continuationCount = 0;\n const warnings: string[] = [];\n let wasSalvaged = false;\n\n console.log('[Continuation] Starting LLM call with continuation support');\n console.log(\n `[Continuation] Max tokens: ${maxTokens}, Max continuations: ${maxContinuations}`,\n );\n\n try {\n const response = await client.callRawWithMetadata({\n systemPrompt,\n userPrompt,\n maxTokens,\n });\n\n rawResponse = extractJsonFromText(response.raw) || response.raw;\n\n console.log(\n `[Continuation] Initial response: ${rawResponse.length} chars, finish_reason: ${response.finishReason}`,\n );\n\n let truncation = detectTruncation(rawResponse, response.finishReason);\n\n while (truncation.isTruncated && continuationCount < maxContinuations) {\n continuationCount++;\n const warningMsg = `Response truncated (${truncation.reason}), continuing (attempt ${continuationCount}/${maxContinuations})`;\n console.log(`[Continuation] ${warningMsg}`);\n warnings.push(warningMsg);\n\n const contPrompt = buildContinuationPrompt(\n rawResponse,\n continuationCount,\n );\n\n const contResponse = await client.callRawWithMetadata({\n systemPrompt: continuationSystemPrompt,\n userPrompt: contPrompt,\n maxTokens,\n });\n\n console.log(\n `[Continuation] Continuation response: ${contResponse.raw.length} chars, finish_reason: ${contResponse.finishReason}`,\n );\n\n const cleanedContResponse =\n extractJsonFromText(contResponse.raw) || contResponse.raw;\n rawResponse = mergeResponses(rawResponse, cleanedContResponse);\n\n truncation = detectTruncation(rawResponse, contResponse.finishReason);\n }\n\n if (\n continuationCount >= maxContinuations &&\n truncation.isTruncated\n ) {\n console.warn(\n `[Continuation] Reached max continuations (${maxContinuations}), attempting to salvage...`,\n );\n warnings.push(\n `Reached max continuations - some content may be incomplete`,\n );\n wasSalvaged = true;\n }\n\n const cleanedResponse =\n extractJsonFromText(rawResponse) || rawResponse;\n let data: T;\n\n try {\n if (isValidJson(cleanedResponse)) {\n data = JSON.parse(cleanedResponse) as T;\n } else {\n const closed = autoCloseJson(cleanedResponse);\n data = JSON.parse(closed) as T;\n if (!wasSalvaged) {\n warnings.push('Response required auto-closing of JSON brackets');\n }\n }\n } catch (parseError) {\n const salvaged = salvagePartialResponse<T>(cleanedResponse);\n if (salvaged) {\n data = salvaged;\n wasSalvaged = true;\n warnings.push('Response was salvaged from partial data');\n } else {\n throw new Error(\n `Failed to parse response after ${continuationCount} continuations: ${parseError}`,\n );\n }\n }\n\n if (schema) {\n try {\n data = schema.parse(data);\n } catch (validationError) {\n console.warn(\n '[Continuation] Schema validation failed:',\n validationError,\n );\n warnings.push(`Schema validation issue: ${validationError}`);\n }\n }\n\n console.log(\n `[Continuation] Complete. Continuations: ${continuationCount}, Warnings: ${warnings.length}`,\n );\n\n return {\n data,\n raw: rawResponse,\n continuationCount,\n warnings,\n wasSalvaged,\n };\n } catch (error) {\n console.error('[Continuation] Error during LLM call:', error);\n throw error;\n }\n}\n\nexport function buildGenericContinuationPrompt(\n context: string,\n partialResponse: string,\n attempt: number,\n maxAttempts: number = DEFAULT_MAX_CONTINUATIONS,\n): string {\n return `## CONTINUATION REQUEST (Attempt ${attempt}/${maxAttempts})\n\nYour previous response was truncated. Continue generating from where you left off.\n\n### ORIGINAL CONTEXT\n${context}\n\n### WHAT YOU GENERATED SO FAR\n\\`\\`\\`json\n${partialResponse}\n\\`\\`\\`\n\n### INSTRUCTIONS\n1. Continue from EXACTLY where the response was cut off\n2. Do NOT repeat any content already generated\n3. Complete the JSON structure properly\n4. Do NOT wrap your response in markdown code blocks\n\nContinue generating now:`;\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAmCO,SAAS,iBACd,UACA,cACkB;AAClB,MAAI,iBAAiB,UAAU;AAC7B,UAAMA,eAAc,cAAc,QAAQ;AAC1C,WAAO;AAAA,MACL,aAAa;AAAA,MACb,QAAQ;AAAA,MACR,gBAAgB;AAAA,MAChB,qBAAqB,wBAAwB,QAAQ;AAAA,MACrD,sBAAsBA,aAAY;AAAA,MAClC,oBAAoBA,aAAY;AAAA,IAClC;AAAA,EACF;AAEA,MAAI;AACF,SAAK,MAAM,QAAQ;AACnB,WAAO,EAAE,aAAa,OAAO,QAAQ,OAAO;AAAA,EAC9C,QAAQ;AAAA,EAER;AAEA,MAAI,iBAAiB,UAAU,iBAAiB,MAAM;AACpD,UAAM,UAAU,SAAS,KAAK;AAE9B,UAAM,eACJ,QAAQ,SAAS,GAAG,KACpB,QAAQ,SAAS,GAAG,KACpB,QAAQ,SAAS,KAAK,KACtB,QAAQ,KAAK,OAAO,KACpB,QAAQ,KAAK,OAAO;AAEtB,QAAI,cAAc;AAChB,YAAMA,eAAc,cAAc,QAAQ;AAC1C,aAAO;AAAA,QACL,aAAa;AAAA,QACb,QAAQ;AAAA,QACR,gBAAgB;AAAA,QAChB,qBAAqB,wBAAwB,QAAQ;AAAA,QACrD,sBAAsBA,aAAY;AAAA,QAClC,oBAAoBA,aAAY;AAAA,MAClC;AAAA,IACF;AAEA,QAAI;AACF,YAAM,SAAS,cAAc,OAAO;AACpC,WAAK,MAAM,MAAM;AACjB,aAAO,EAAE,aAAa,OAAO,QAAQ,OAAO;AAAA,IAC9C,QAAQ;AACN,aAAO,EAAE,aAAa,OAAO,QAAQ,OAAO;AAAA,IAC9C;AAAA,EACF;AAEA,QAAM,cAAc,cAAc,QAAQ;AAC1C,MACE,YAAY,uBAAuB,KACnC,YAAY,qBAAqB,GACjC;AACA,WAAO;AAAA,MACL,aAAa;AAAA,MACb,QAAQ;AAAA,MACR,gBAAgB;AAAA,MAChB,qBAAqB,wBAAwB,QAAQ;AAAA,MACrD,sBAAsB,YAAY;AAAA,MAClC,oBAAoB,YAAY;AAAA,IAClC;AAAA,EACF;AAEA,SAAO,EAAE,aAAa,OAAO,QAAQ,OAAO;AAC9C;AAMA,SAAS,cAAc,MAOrB;AACA,MAAI,WAAW;AACf,MAAI,UAAU;AACd,MAAI,eAAe;AACnB,MAAI,gBAAgB;AACpB,MAAI,aAAa;AACjB,MAAI,cAAc;AAElB,aAAW,QAAQ,MAAM;AACvB,QAAI,SAAS;AACX,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,QAAQ,UAAU;AAC7B,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,KAAK;AAChB,iBAAW,CAAC;AACZ;AAAA,IACF;AACA,QAAI,SAAU;AAEd,YAAQ,MAAM;AAAA,MACZ,KAAK;AACH;AACA;AAAA,MACF,KAAK;AACH;AACA;AAAA,MACF,KAAK;AACH;AACA;AAAA,MACF,KAAK;AACH;AACA;AAAA,IACJ;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,sBAAsB,KAAK,IAAI,GAAG,eAAe,aAAa;AAAA,IAC9D,oBAAoB,KAAK,IAAI,GAAG,aAAa,WAAW;AAAA,EAC1D;AACF;AAEO,SAAS,wBAAwB,MAA8B;AACpE,QAAM,aAAa,cAAc,IAAI;AACrC,MAAI;AACF,WAAO,KAAK,MAAM,UAAU;AAAA,EAC9B,QAAQ;AAAA,EAER;AAEA,QAAM,UAAU,KAAK,KAAK;AAE1B,MAAI,QAAQ,WAAW,GAAG,GAAG;AAC3B,UAAM,oBAAoB,6BAA6B,OAAO;AAC9D,QAAI,oBAAoB,GAAG;AACzB,YAAM,SAAS,QAAQ,UAAU,GAAG,iBAAiB,IAAI;AACzD,UAAI;AACF,eAAO,KAAK,MAAM,MAAM;AAAA,MAC1B,QAAQ;AAAA,MAER;AAAA,IACF;AAAA,EACF;AAEA,MAAI,QAAQ,WAAW,GAAG,GAAG;AAC3B,UAAM,SAAS,cAAc,OAAO;AACpC,QAAI;AACF,aAAO,KAAK,MAAM,MAAM;AAAA,IAC1B,QAAQ;AACN,YAAM,oBAAoB,+BAA+B,OAAO;AAChE,UAAI,oBAAoB,GAAG;AACzB,cAAM,SAAS,QAAQ,UAAU,GAAG,iBAAiB,IAAI;AACzD,YAAI;AACF,iBAAO,KAAK,MAAM,MAAM;AAAA,QAC1B,QAAQ;AAAA,QAER;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;AAEA,SAAS,6BAA6B,MAAsB;AAC1D,MAAI,QAAQ;AACZ,MAAI,WAAW;AACf,MAAI,UAAU;AACd,MAAI,yBAAyB;AAE7B,WAAS,IAAI,GAAG,IAAI,KAAK,QAAQ,KAAK;AACpC,UAAM,OAAO,KAAK,CAAC;AAEnB,QAAI,SAAS;AACX,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,QAAQ,UAAU;AAC7B,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,KAAK;AAChB,iBAAW,CAAC;AACZ;AAAA,IACF;AACA,QAAI,SAAU;AAEd,QAAI,SAAS,OAAO,SAAS,KAAK;AAChC;AAAA,IACF,WAAW,SAAS,OAAO,SAAS,KAAK;AACvC;AACA,UAAI,UAAU,GAAG;AACf,iCAAyB,IAAI;AAAA,MAC/B;AAAA,IACF,WAAW,SAAS,OAAO,UAAU,GAAG;AACtC,+BAAyB;AAAA,IAC3B;AAAA,EACF;AAEA,SAAO,yBAAyB,IAAI,yBAAyB;AAC/D;AAEA,SAAS,+BAA+B,MAAsB;AAC5D,MAAI,QAAQ;AACZ,MAAI,WAAW;AACf,MAAI,UAAU;AACd,MAAI,iBAAiB;AAErB,WAAS,IAAI,GAAG,IAAI,KAAK,QAAQ,KAAK;AACpC,UAAM,OAAO,KAAK,CAAC;AAEnB,QAAI,SAAS;AACX,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,QAAQ,UAAU;AAC7B,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,KAAK;AAChB,iBAAW,CAAC;AACZ;AAAA,IACF;AACA,QAAI,SAAU;AAEd,QAAI,SAAS,OAAO,SAAS,KAAK;AAChC;AAAA,IACF,WAAW,SAAS,OAAO,SAAS,KAAK;AACvC;AAAA,IACF,WAAW,SAAS,OAAO,UAAU,GAAG;AACtC,uBAAiB;AAAA,IACnB;AAAA,EACF;AAEA,SAAO,iBAAiB,IAAI,iBAAiB;AAC/C;AAEO,SAAS,kBAAkB,SAA0B;AAC1D,QAAM,UAAU,QAAQ,KAAK;AAC7B,MAAI,CAAC,QAAS,QAAO;AAErB,QAAM,WAAW,cAAc,OAAO;AACtC,MACE,SAAS,uBAAuB,KAChC,SAAS,qBAAqB,GAC9B;AACA,WAAO;AAAA,EACT;AAEA,QAAM,gBAAgB;AAAA,IACpB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAEA,aAAW,WAAW,eAAe;AACnC,QAAI,QAAQ,KAAK,OAAO,EAAG,QAAO;AAAA,EACpC;AAEA,SAAO;AACT;;;ACnQA,IAAM,qBAAqB;AAC3B,IAAM,4BAA4B;AAMlC,IAAM,qCAAqC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAapC,SAAS,eACd,UACA,cACQ;AACR,QAAM,cAAc,SAAS,QAAQ;AACrC,QAAM,cAAc,aAAa,UAAU;AAE3C,MAAI,cAAc,YACf,QAAQ,iBAAiB,EAAE,EAC3B,QAAQ,YAAY,EAAE,EACtB,KAAK;AAER,MAAI,YAAY,WAAW,GAAG,GAAG;AAC/B,QAAI;AACF,YAAM,aAAa,KAAK,MAAM,cAAc,WAAW,CAAC;AACxD,YAAM,OAAO,OAAO,KAAK,UAAU;AACnC,UAAI,KAAK,WAAW,KAAK,MAAM,QAAQ,WAAW,KAAK,CAAC,CAAC,CAAC,GAAG;AAC3D,sBAAc,WAAW,KAAK,CAAC,CAAC,EAC7B,IAAI,CAAC,SAAkB,KAAK,UAAU,IAAI,CAAC,EAC3C,KAAK,KAAK;AAAA,MACf;AAAA,IACF,QAAQ;AAAA,IAER;AAAA,EACF;AAEA,MAAI,YAAY,WAAW,GAAG,KAAK,YAAY,WAAW,GAAG,GAAG;AAC9D,WAAO,cAAc;AAAA,EACvB;AAEA,QAAM,oBAAoB,cAAc,KAAK,WAAW;AACxD,QAAM,sBAAsB,YAAY,KAAK,WAAW;AAExD,MAAI,qBAAqB,qBAAqB;AAC5C,WAAO,cAAc,QAAQ;AAAA,EAC/B;AAEA,SAAO,cAAc;AACvB;AAEO,SAAS,uBAA0B,aAA+B;AACvE,UAAQ,KAAK,uDAAuD;AAEpE,MAAI;AACF,UAAM,kBAAkB,oBAAoB,WAAW,KAAK;AAC5D,UAAM,SAAS,cAAc,eAAe;AAC5C,UAAM,SAAS,KAAK,MAAM,MAAM;AAChC,YAAQ,IAAI,uDAAuD;AACnE,WAAO;AAAA,EACT,SAAS,OAAO;AACd,YAAQ,MAAM,8CAA8C,KAAK;AAAA,EACnE;AAEA,SAAO;AACT;AAMA,eAAsB,qBACpB,SACgC;AAChC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,YAAY;AAAA,IACZ,mBAAmB;AAAA,IACnB;AAAA,IACA,2BAA2B;AAAA,EAC7B,IAAI;AAEJ,MAAI,cAAc;AAClB,MAAI,oBAAoB;AACxB,QAAM,WAAqB,CAAC;AAC5B,MAAI,cAAc;AAElB,UAAQ,IAAI,4DAA4D;AACxE,UAAQ;AAAA,IACN,8BAA8B,SAAS,wBAAwB,gBAAgB;AAAA,EACjF;AAEA,MAAI;AACF,UAAM,WAAW,MAAM,OAAO,oBAAoB;AAAA,MAChD;AAAA,MACA;AAAA,MACA;AAAA,IACF,CAAC;AAED,kBAAc,oBAAoB,SAAS,GAAG,KAAK,SAAS;AAE5D,YAAQ;AAAA,MACN,oCAAoC,YAAY,MAAM,0BAA0B,SAAS,YAAY;AAAA,IACvG;AAEA,QAAI,aAAa,iBAAiB,aAAa,SAAS,YAAY;AAEpE,WAAO,WAAW,eAAe,oBAAoB,kBAAkB;AACrE;AACA,YAAM,aAAa,uBAAuB,WAAW,MAAM,0BAA0B,iBAAiB,IAAI,gBAAgB;AAC1H,cAAQ,IAAI,kBAAkB,UAAU,EAAE;AAC1C,eAAS,KAAK,UAAU;AAExB,YAAM,aAAa;AAAA,QACjB;AAAA,QACA;AAAA,MACF;AAEA,YAAM,eAAe,MAAM,OAAO,oBAAoB;AAAA,QACpD,cAAc;AAAA,QACd,YAAY;AAAA,QACZ;AAAA,MACF,CAAC;AAED,cAAQ;AAAA,QACN,yCAAyC,aAAa,IAAI,MAAM,0BAA0B,aAAa,YAAY;AAAA,MACrH;AAEA,YAAM,sBACJ,oBAAoB,aAAa,GAAG,KAAK,aAAa;AACxD,oBAAc,eAAe,aAAa,mBAAmB;AAE7D,mBAAa,iBAAiB,aAAa,aAAa,YAAY;AAAA,IACtE;AAEA,QACE,qBAAqB,oBACrB,WAAW,aACX;AACA,cAAQ;AAAA,QACN,6CAA6C,gBAAgB;AAAA,MAC/D;AACA,eAAS;AAAA,QACP;AAAA,MACF;AACA,oBAAc;AAAA,IAChB;AAEA,UAAM,kBACJ,oBAAoB,WAAW,KAAK;AACtC,QAAI;AAEJ,QAAI;AACF,UAAI,YAAY,eAAe,GAAG;AAChC,eAAO,KAAK,MAAM,eAAe;AAAA,MACnC,OAAO;AACL,cAAM,SAAS,cAAc,eAAe;AAC5C,eAAO,KAAK,MAAM,MAAM;AACxB,YAAI,CAAC,aAAa;AAChB,mBAAS,KAAK,iDAAiD;AAAA,QACjE;AAAA,MACF;AAAA,IACF,SAAS,YAAY;AACnB,YAAM,WAAW,uBAA0B,eAAe;AAC1D,UAAI,UAAU;AACZ,eAAO;AACP,sBAAc;AACd,iBAAS,KAAK,yCAAyC;AAAA,MACzD,OAAO;AACL,cAAM,IAAI;AAAA,UACR,kCAAkC,iBAAiB,mBAAmB,UAAU;AAAA,QAClF;AAAA,MACF;AAAA,IACF;AAEA,QAAI,QAAQ;AACV,UAAI;AACF,eAAO,OAAO,MAAM,IAAI;AAAA,MAC1B,SAAS,iBAAiB;AACxB,gBAAQ;AAAA,UACN;AAAA,UACA;AAAA,QACF;AACA,iBAAS,KAAK,4BAA4B,eAAe,EAAE;AAAA,MAC7D;AAAA,IACF;AAEA,YAAQ;AAAA,MACN,2CAA2C,iBAAiB,eAAe,SAAS,MAAM;AAAA,IAC5F;AAEA,WAAO;AAAA,MACL;AAAA,MACA,KAAK;AAAA,MACL;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,EACF,SAAS,OAAO;AACd,YAAQ,MAAM,yCAAyC,KAAK;AAC5D,UAAM;AAAA,EACR;AACF;AAEO,SAAS,+BACd,SACA,iBACA,SACA,cAAsB,2BACd;AACR,SAAO,oCAAoC,OAAO,IAAI,WAAW;AAAA;AAAA;AAAA;AAAA;AAAA,EAKjE,OAAO;AAAA;AAAA;AAAA;AAAA,EAIP,eAAe;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAUjB;","names":["bracketInfo"]}
|
package/package.json
CHANGED
package/src/client.ts
CHANGED
|
@@ -100,7 +100,7 @@ type ChatModel = ChatOpenAI | CachingChatAnthropic;
|
|
|
100
100
|
// Types
|
|
101
101
|
// ============================================================================
|
|
102
102
|
|
|
103
|
-
export type LLMProvider = 'openai' | 'deepseek' | 'anthropic' | 'kimi';
|
|
103
|
+
export type LLMProvider = 'openai' | 'deepseek' | 'anthropic' | 'kimi' | 'openrouter';
|
|
104
104
|
|
|
105
105
|
export interface ProviderConfig {
|
|
106
106
|
apiKey: string;
|
|
@@ -215,10 +215,24 @@ const PROVIDER_CONFIGS: Record<LLMProvider, () => ProviderConfig> = {
|
|
|
215
215
|
}
|
|
216
216
|
return {
|
|
217
217
|
apiKey,
|
|
218
|
-
baseUrl: 'https://api.moonshot.
|
|
218
|
+
baseUrl: 'https://api.moonshot.ai/v1',
|
|
219
219
|
defaultModel: 'kimi-k2.5',
|
|
220
220
|
};
|
|
221
221
|
},
|
|
222
|
+
openrouter: () => {
|
|
223
|
+
const apiKey = process.env.OPEN_ROUTER_API_KEY;
|
|
224
|
+
if (!apiKey) {
|
|
225
|
+
throw new Error(
|
|
226
|
+
'OPEN_ROUTER_API_KEY environment variable is not set. ' +
|
|
227
|
+
'Please set it in your .env file or environment.',
|
|
228
|
+
);
|
|
229
|
+
}
|
|
230
|
+
return {
|
|
231
|
+
apiKey,
|
|
232
|
+
baseUrl: 'https://openrouter.ai/api/v1',
|
|
233
|
+
defaultModel: 'qwen/qwen-2.5-72b-instruct', // Default to Qwen 2.5
|
|
234
|
+
};
|
|
235
|
+
},
|
|
222
236
|
};
|
|
223
237
|
|
|
224
238
|
export const DEEPSEEK_MODELS = {
|
|
@@ -246,6 +260,19 @@ export const KIMI_MODELS = {
|
|
|
246
260
|
K2_5: 'kimi-k2.5',
|
|
247
261
|
} as const;
|
|
248
262
|
|
|
263
|
+
export const OPENROUTER_MODELS = {
|
|
264
|
+
// Qwen models - JSON/structured data specialists
|
|
265
|
+
QWEN_2_5_72B: 'qwen/qwen-2.5-72b-instruct',
|
|
266
|
+
QWEN_2_5_CODER_32B: 'qwen/qwen-2.5-coder-32b-instruct',
|
|
267
|
+
QWEN_3_235B: 'qwen/qwen3-235b-a22b',
|
|
268
|
+
|
|
269
|
+
// Llama models - agentic workhorses
|
|
270
|
+
LLAMA_3_3_70B: 'meta-llama/llama-3.3-70b-instruct',
|
|
271
|
+
LLAMA_3_1_405B: 'meta-llama/llama-3.1-405b-instruct',
|
|
272
|
+
LLAMA_4_MAVERICK: 'meta-llama/llama-4-maverick',
|
|
273
|
+
LLAMA_4_SCOUT: 'meta-llama/llama-4-scout',
|
|
274
|
+
} as const;
|
|
275
|
+
|
|
249
276
|
const DEFAULT_TEMPERATURE = 0.3;
|
|
250
277
|
|
|
251
278
|
// ============================================================================
|
|
@@ -264,7 +291,9 @@ export class LLMClient {
|
|
|
264
291
|
|
|
265
292
|
constructor(options: LLMClientOptions = {}) {
|
|
266
293
|
this.provider = options.provider || 'openai';
|
|
267
|
-
|
|
294
|
+
// Kimi: 0.6 when thinking disabled (our default), 1.0 when thinking enabled
|
|
295
|
+
this.temperature = options.temperature ??
|
|
296
|
+
(this.provider === 'kimi' ? 0.6 : DEFAULT_TEMPERATURE);
|
|
268
297
|
this.streaming = options.streaming ?? false;
|
|
269
298
|
|
|
270
299
|
this.providerConfig = PROVIDER_CONFIGS[this.provider]();
|
|
@@ -381,13 +410,20 @@ export class LLMClient {
|
|
|
381
410
|
|
|
382
411
|
const timeout = this.provider === 'deepseek' ? 600000 : undefined;
|
|
383
412
|
|
|
413
|
+
// Kimi-k2.5: disable thinking to avoid reasoning_content issues with tool calls
|
|
414
|
+
// When thinking is disabled, temperature must be 0.6 (not 1.0)
|
|
415
|
+
const isKimi = this.provider === 'kimi';
|
|
416
|
+
const thinkingConfig = isKimi ? { modelKwargs: { thinking: { type: 'disabled' } } } : {};
|
|
417
|
+
const effectiveTemp = isKimi ? 0.6 : temperature;
|
|
418
|
+
|
|
384
419
|
return new ChatOpenAI({
|
|
385
420
|
apiKey: this.providerConfig.apiKey,
|
|
386
421
|
model: this.modelName,
|
|
387
|
-
temperature: useCompletionTokens ? undefined :
|
|
422
|
+
temperature: useCompletionTokens ? undefined : effectiveTemp,
|
|
388
423
|
streaming: this.streaming,
|
|
389
424
|
timeout,
|
|
390
425
|
...tokenConfig,
|
|
426
|
+
...thinkingConfig,
|
|
391
427
|
configuration: {
|
|
392
428
|
apiKey: this.providerConfig.apiKey,
|
|
393
429
|
...(this.providerConfig.baseUrl
|
|
@@ -965,3 +1001,13 @@ export function createKimiClient(
|
|
|
965
1001
|
...options,
|
|
966
1002
|
});
|
|
967
1003
|
}
|
|
1004
|
+
|
|
1005
|
+
export function createOpenRouterClient(
|
|
1006
|
+
options?: Partial<Omit<LLMClientOptions, 'provider'>>,
|
|
1007
|
+
): LLMClient {
|
|
1008
|
+
return new LLMClient({
|
|
1009
|
+
provider: 'openrouter',
|
|
1010
|
+
model: OPENROUTER_MODELS.QWEN_2_5_72B,
|
|
1011
|
+
...options,
|
|
1012
|
+
});
|
|
1013
|
+
}
|
package/src/index.ts
CHANGED
|
@@ -18,12 +18,14 @@ export {
|
|
|
18
18
|
createOpenAIClient,
|
|
19
19
|
createAnthropicClient,
|
|
20
20
|
createKimiClient,
|
|
21
|
+
createOpenRouterClient,
|
|
21
22
|
getAvailableProvider,
|
|
22
23
|
isProviderAvailable,
|
|
23
24
|
DEEPSEEK_MODELS,
|
|
24
25
|
OPENAI_MODELS,
|
|
25
26
|
ANTHROPIC_MODELS,
|
|
26
27
|
KIMI_MODELS,
|
|
28
|
+
OPENROUTER_MODELS,
|
|
27
29
|
type LLMProvider,
|
|
28
30
|
type ProviderConfig,
|
|
29
31
|
type LLMClientOptions,
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/client.ts"],"sourcesContent":["/**\n * Shared LLM Client\n *\n * Multi-provider LLM client with:\n * - OpenAI, DeepSeek, Anthropic, and Kimi support\n * - Anthropic prompt caching (CachingChatAnthropic)\n * - Rate limiting and retry logic\n * - Token tracking\n * - Structured output parsing with Zod\n *\n * @packageDocumentation\n */\n\nimport { ChatOpenAI } from '@langchain/openai';\nimport {\n ChatAnthropic,\n type ChatAnthropicCallOptions,\n} from '@langchain/anthropic';\nimport Anthropic from '@anthropic-ai/sdk';\nimport { z } from 'zod';\nimport {\n RateLimiter,\n getGlobalRateLimiter,\n type RateLimiterOptions,\n} from './rate-limiter.js';\nimport { TokenTracker, getGlobalTokenTracker } from './token-tracker.js';\nimport { parseJsonResponse } from './json-parser.js';\n\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\ntype MessageLike = any;\n\n// ============================================================================\n// Caching Chat Anthropic Wrapper\n// ============================================================================\n\nclass CachingChatAnthropic extends ChatAnthropic {\n async invoke(\n input: MessageLike[] | string,\n options?: Partial<ChatAnthropicCallOptions>,\n ): Promise<MessageLike> {\n let messages: MessageLike[];\n if (typeof input === 'string') {\n messages = [{ role: 'user', content: input }];\n } else {\n messages = input;\n }\n\n const transformedMessages = messages.map((msg: MessageLike) => {\n const msgType = msg._getType?.() || msg.role || 'unknown';\n const isSystem = msgType === 'system';\n\n if (!isSystem) return msg;\n\n if (typeof msg.content === 'string') {\n return {\n ...msg,\n content: [\n {\n type: 'text',\n text: msg.content,\n cache_control: { type: 'ephemeral' },\n },\n ],\n };\n }\n\n if (Array.isArray(msg.content)) {\n const blocks = msg.content as Array<{\n type?: string;\n text?: string;\n cache_control?: unknown;\n }>;\n const hasAnyCacheControl = blocks.some((b) => b.cache_control);\n\n if (!hasAnyCacheControl) {\n const transformedBlocks = blocks.map((block, idx) => {\n if (block.type === 'text' && idx === blocks.length - 1) {\n return {\n ...block,\n cache_control: { type: 'ephemeral' },\n };\n }\n return block;\n });\n\n return { ...msg, content: transformedBlocks };\n }\n }\n\n return msg;\n });\n\n return super.invoke(transformedMessages, options);\n }\n}\n\ntype ChatModel = ChatOpenAI | CachingChatAnthropic;\n\n// ============================================================================\n// Types\n// ============================================================================\n\nexport type LLMProvider = 'openai' | 'deepseek' | 'anthropic' | 'kimi';\n\nexport interface ProviderConfig {\n apiKey: string;\n baseUrl?: string;\n defaultModel: string;\n}\n\nexport interface LLMClientOptions {\n provider?: LLMProvider;\n model?: string;\n temperature?: number;\n streaming?: boolean;\n rateLimiter?: RateLimiterOptions;\n useGlobalRateLimiter?: boolean;\n trackTokens?: boolean;\n}\n\nexport interface LLMCallOptions<T = unknown> {\n systemPrompt: string;\n userPrompt: string;\n schema?: z.ZodSchema<T>;\n maxRetries?: number;\n retryWithContext?: boolean;\n maxTokens?: number;\n skipSchemaValidation?: boolean;\n temperature?: number;\n}\n\nexport interface CacheableBlock {\n type: 'text';\n text: string;\n cache_control?: { type: 'ephemeral' };\n}\n\nexport interface CacheAwareLLMCallOptions<T = unknown>\n extends LLMCallOptions<T> {\n systemBlocks?: CacheableBlock[];\n userBlocks?: CacheableBlock[];\n rawText?: boolean;\n}\n\nexport interface LLMUsage {\n promptTokens: number;\n completionTokens: number;\n totalTokens: number;\n}\n\nexport type LLMFinishReason =\n | 'stop'\n | 'length'\n | 'content_filter'\n | 'tool_calls'\n | null;\n\nexport interface LLMResponse<T> {\n data: T;\n raw: string;\n finishReason: LLMFinishReason;\n usage: LLMUsage | null;\n}\n\n// ============================================================================\n// Provider Configuration\n// ============================================================================\n\nconst PROVIDER_CONFIGS: Record<LLMProvider, () => ProviderConfig> = {\n openai: () => {\n const apiKey = process.env.OPENAI_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'OPENAI_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return { apiKey, baseUrl: undefined, defaultModel: 'gpt-4o' };\n },\n deepseek: () => {\n const apiKey = process.env.DEEPSEEK_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'DEEPSEEK_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return {\n apiKey,\n baseUrl: 'https://api.deepseek.com/v1',\n defaultModel: 'deepseek-chat',\n };\n },\n anthropic: () => {\n const apiKey = process.env.ANTHROPIC_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'ANTHROPIC_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return {\n apiKey,\n baseUrl: undefined,\n defaultModel: 'claude-sonnet-4-5-20250929',\n };\n },\n kimi: () => {\n const apiKey = process.env.KIMI_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'KIMI_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return {\n apiKey,\n baseUrl: 'https://api.moonshot.cn/v1',\n defaultModel: 'kimi-k2.5',\n };\n },\n};\n\nexport const DEEPSEEK_MODELS = {\n CHAT: 'deepseek-chat',\n CODER: 'deepseek-coder',\n REASONER: 'deepseek-reasoner',\n} as const;\n\nexport const OPENAI_MODELS = {\n GPT4O: 'gpt-4o',\n GPT4O_MINI: 'gpt-4o-mini',\n GPT4_TURBO: 'gpt-4-turbo',\n GPT35_TURBO: 'gpt-3.5-turbo',\n GPT_5_1: 'gpt-5.1',\n} as const;\n\nexport const ANTHROPIC_MODELS = {\n CLAUDE_SONNET_4_5: 'claude-sonnet-4-5-20250929',\n CLAUDE_SONNET_4: 'claude-sonnet-4-20250514',\n CLAUDE_OPUS_4_5: 'claude-opus-4-5-20250929',\n CLAUDE_3_5_HAIKU: 'claude-3-5-haiku-20241022',\n} as const;\n\nexport const KIMI_MODELS = {\n K2_5: 'kimi-k2.5',\n} as const;\n\nconst DEFAULT_TEMPERATURE = 0.3;\n\n// ============================================================================\n// LLM Client\n// ============================================================================\n\nexport class LLMClient {\n private model: ChatModel;\n private rateLimiter: RateLimiter;\n private tokenTracker: TokenTracker | null;\n private modelName: string;\n private provider: LLMProvider;\n private providerConfig: ProviderConfig;\n private temperature: number;\n private streaming: boolean;\n\n constructor(options: LLMClientOptions = {}) {\n this.provider = options.provider || 'openai';\n this.temperature = options.temperature ?? DEFAULT_TEMPERATURE;\n this.streaming = options.streaming ?? false;\n\n this.providerConfig = PROVIDER_CONFIGS[this.provider]();\n this.modelName = options.model || this.providerConfig.defaultModel;\n\n const keyPreview = this.providerConfig.apiKey.slice(-4);\n console.log(\n `[LLMClient] Provider: ${this.provider}, Model: ${this.modelName}, Key: ****${keyPreview}`,\n );\n if (this.providerConfig.baseUrl) {\n console.log(\n `[LLMClient] Using custom base URL: ${this.providerConfig.baseUrl}`,\n );\n }\n\n this.model = this.createModel();\n\n this.rateLimiter =\n options.useGlobalRateLimiter !== false\n ? getGlobalRateLimiter(options.rateLimiter)\n : new RateLimiter(options.rateLimiter);\n\n this.tokenTracker =\n options.trackTokens !== false\n ? getGlobalTokenTracker(this.modelName)\n : null;\n }\n\n private usesMaxCompletionTokens(): boolean {\n const model = this.modelName.toLowerCase();\n return (\n model.startsWith('o1') ||\n model.startsWith('gpt-5') ||\n model.includes('o1-') ||\n model.includes('o3')\n );\n }\n\n private createModel(options?: {\n maxTokens?: number;\n temperature?: number;\n }): ChatModel {\n const maxTokens = options?.maxTokens;\n const temperature = options?.temperature ?? this.temperature;\n\n if (this.provider === 'anthropic') {\n return new CachingChatAnthropic({\n apiKey: this.providerConfig.apiKey,\n model: this.modelName,\n temperature,\n streaming: this.streaming,\n maxTokens: maxTokens || 8192,\n callbacks: [\n {\n handleLLMEnd: (output) => {\n const generation = output.generations?.[0]?.[0];\n const usage = (\n generation as unknown as {\n message?: {\n usage_metadata?: {\n cache_creation_input_tokens?: number;\n cache_read_input_tokens?: number;\n input_tokens?: number;\n output_tokens?: number;\n };\n };\n }\n )?.message?.usage_metadata;\n\n if (usage) {\n const cacheCreated = usage.cache_creation_input_tokens ?? 0;\n const cacheRead = usage.cache_read_input_tokens ?? 0;\n const inputTokens = usage.input_tokens ?? 0;\n const outputTokens = usage.output_tokens ?? 0;\n\n if (cacheCreated > 0) {\n console.log(\n `[LLMClient:Anthropic] Cache WRITE: ${cacheCreated} tokens cached`,\n );\n }\n if (cacheRead > 0) {\n const savingsPercent = Math.round(\n (cacheRead / (cacheRead + inputTokens)) * 100,\n );\n console.log(\n `[LLMClient:Anthropic] Cache HIT: ${cacheRead} tokens (~${savingsPercent}% of prompt)`,\n );\n }\n if (cacheCreated === 0 && cacheRead === 0 && inputTokens > 0) {\n if (inputTokens < 500) {\n console.log(\n `[LLMClient:Anthropic] ${inputTokens} input, ${outputTokens} output tokens (likely cached)`,\n );\n } else {\n console.log(\n `[LLMClient:Anthropic] ${inputTokens} input, ${outputTokens} output tokens`,\n );\n }\n }\n }\n },\n },\n ],\n });\n }\n\n const useCompletionTokens = this.usesMaxCompletionTokens();\n\n const tokenConfig = maxTokens\n ? useCompletionTokens\n ? { modelKwargs: { max_completion_tokens: maxTokens } }\n : { maxTokens }\n : {};\n\n const timeout = this.provider === 'deepseek' ? 600000 : undefined;\n\n return new ChatOpenAI({\n apiKey: this.providerConfig.apiKey,\n model: this.modelName,\n temperature: useCompletionTokens ? undefined : temperature,\n streaming: this.streaming,\n timeout,\n ...tokenConfig,\n configuration: {\n apiKey: this.providerConfig.apiKey,\n ...(this.providerConfig.baseUrl\n ? { baseURL: this.providerConfig.baseUrl }\n : {}),\n },\n });\n }\n\n private getModelWithOptions(options: {\n maxTokens?: number;\n temperature?: number;\n }): ChatModel {\n return this.createModel(options);\n }\n\n getProvider(): LLMProvider {\n return this.provider;\n }\n\n getModelName(): string {\n return this.modelName;\n }\n\n getModel(): ChatModel {\n return this.model;\n }\n\n getRateLimiterStatus() {\n return this.rateLimiter.getStatus();\n }\n\n getTokenUsage() {\n return this.tokenTracker?.getSummary() ?? null;\n }\n\n async call<T>(options: LLMCallOptions<T>): Promise<T> {\n const response = await this.callWithMetadata(options);\n return response.data;\n }\n\n async callWithMetadata<T>(options: LLMCallOptions<T>): Promise<LLMResponse<T>> {\n const {\n systemPrompt,\n userPrompt,\n schema,\n maxRetries = 2,\n retryWithContext = true,\n maxTokens,\n skipSchemaValidation = false,\n temperature,\n } = options;\n\n let currentPrompt = userPrompt;\n let lastError: Error | null = null;\n\n console.log(\n `[LLMClient:call] Starting call to ${this.provider}/${this.modelName}`,\n );\n console.log(`[LLMClient:call] Prompt length: ${userPrompt.length} chars`);\n if (maxTokens) {\n console.log(`[LLMClient:call] Max tokens: ${maxTokens}`);\n }\n\n for (let attempt = 0; attempt <= maxRetries; attempt++) {\n try {\n console.log(\n `[LLMClient:call] Attempt ${attempt + 1}/${maxRetries + 1}...`,\n );\n const attemptStartTime = Date.now();\n\n const result = await this.rateLimiter.execute(async () => {\n console.log(`[LLMClient:call] Invoking model...`);\n const invokeStartTime = Date.now();\n\n const modelToUse =\n maxTokens || temperature !== undefined\n ? this.getModelWithOptions({ maxTokens, temperature })\n : this.model;\n\n const response = await modelToUse.invoke([\n { role: 'system', content: systemPrompt },\n { role: 'user', content: currentPrompt },\n ]);\n\n console.log(\n `[LLMClient:call] Model responded in ${Date.now() - invokeStartTime}ms`,\n );\n\n let usage: LLMUsage | null = null;\n if (response.usage_metadata) {\n const usageMeta = response.usage_metadata as {\n input_tokens?: number;\n output_tokens?: number;\n };\n usage = {\n promptTokens: usageMeta.input_tokens || 0,\n completionTokens: usageMeta.output_tokens || 0,\n totalTokens:\n (usageMeta.input_tokens || 0) +\n (usageMeta.output_tokens || 0),\n };\n console.log(\n `[LLMClient:call] Tokens used: ${usage.promptTokens} in, ${usage.completionTokens} out`,\n );\n\n if (this.tokenTracker) {\n this.tokenTracker.addUsage(\n usage.promptTokens,\n usage.completionTokens,\n );\n }\n }\n\n const finishReason = this.extractFinishReason(response);\n if (finishReason === 'length') {\n console.warn(\n `[LLMClient:call] Response truncated (finish_reason=length)`,\n );\n }\n\n const content =\n typeof response.content === 'string'\n ? response.content\n : JSON.stringify(response.content);\n\n console.log(\n `[LLMClient:call] Response length: ${content.length} chars, finish_reason: ${finishReason}`,\n );\n\n return { content, finishReason, usage };\n });\n\n console.log(\n `[LLMClient:call] Attempt ${attempt + 1} completed in ${Date.now() - attemptStartTime}ms, parsing response...`,\n );\n\n const parsed = skipSchemaValidation\n ? (parseJsonResponse(result.content, undefined) as T)\n : parseJsonResponse(result.content, schema);\n console.log(\n `[LLMClient:call] Response parsed successfully${skipSchemaValidation ? ' (schema validation skipped)' : ''}`,\n );\n\n return {\n data: parsed,\n raw: result.content,\n finishReason: result.finishReason,\n usage: result.usage,\n };\n } catch (error) {\n lastError = error instanceof Error ? error : new Error(String(error));\n console.error(\n `[LLMClient:call] Attempt ${attempt + 1} failed:`,\n lastError.message,\n );\n\n if (this.isRateLimitError(lastError)) {\n console.error(`[LLMClient:call] Rate limit error, not retrying`);\n throw lastError;\n }\n\n if (attempt < maxRetries && retryWithContext) {\n console.log(`[LLMClient:call] Will retry with error context`);\n currentPrompt =\n `${userPrompt}\\n\\n` +\n `[Previous attempt failed with: ${lastError.message}]\\n` +\n `Please output valid JSON that matches the expected schema.`;\n }\n }\n }\n\n console.error(`[LLMClient:call] All attempts exhausted, throwing error`);\n throw lastError;\n }\n\n private extractFinishReason(\n response: Awaited<ReturnType<ChatOpenAI['invoke']>>,\n ): LLMFinishReason {\n const metadata = response.response_metadata as\n | Record<string, unknown>\n | undefined;\n if (metadata?.finish_reason) {\n const reason = metadata.finish_reason as string;\n if (\n reason === 'stop' ||\n reason === 'length' ||\n reason === 'content_filter' ||\n reason === 'tool_calls'\n ) {\n return reason;\n }\n }\n return null;\n }\n\n async callRaw(options: {\n systemPrompt: string;\n userPrompt: string;\n maxTokens?: number;\n }): Promise<string> {\n const response = await this.callRawWithMetadata(options);\n return response.raw;\n }\n\n async callRawWithMetadata(options: {\n systemPrompt: string;\n userPrompt: string;\n maxTokens?: number;\n }): Promise<Omit<LLMResponse<string>, 'data'> & { raw: string }> {\n const { systemPrompt, userPrompt, maxTokens } = options;\n\n return this.rateLimiter.execute(async () => {\n const modelToUse = maxTokens\n ? this.getModelWithOptions({ maxTokens })\n : this.model;\n\n const response = await modelToUse.invoke([\n { role: 'system', content: systemPrompt },\n { role: 'user', content: userPrompt },\n ]);\n\n let usage: LLMUsage | null = null;\n if (response.usage_metadata) {\n const usageMeta = response.usage_metadata as {\n input_tokens?: number;\n output_tokens?: number;\n };\n usage = {\n promptTokens: usageMeta.input_tokens || 0,\n completionTokens: usageMeta.output_tokens || 0,\n totalTokens:\n (usageMeta.input_tokens || 0) + (usageMeta.output_tokens || 0),\n };\n\n if (this.tokenTracker) {\n this.tokenTracker.addUsage(\n usage.promptTokens,\n usage.completionTokens,\n );\n }\n }\n\n const finishReason = this.extractFinishReason(response);\n const content =\n typeof response.content === 'string'\n ? response.content\n : JSON.stringify(response.content);\n\n return { raw: content, finishReason, usage };\n });\n }\n\n private isRateLimitError(error: Error): boolean {\n const message = error.message.toLowerCase();\n return (\n message.includes('rate limit') ||\n message.includes('429') ||\n message.includes('quota exceeded')\n );\n }\n\n // ==========================================================================\n // Anthropic Cache Control Support\n // ==========================================================================\n\n async callWithCache<T>(\n options: CacheAwareLLMCallOptions<T>,\n ): Promise<LLMResponse<T>> {\n const {\n systemPrompt,\n userPrompt,\n systemBlocks,\n userBlocks,\n schema,\n maxRetries = 2,\n maxTokens,\n skipSchemaValidation = false,\n temperature,\n rawText = false,\n } = options;\n\n if (this.provider !== 'anthropic') {\n console.log(\n `[LLMClient:callWithCache] Provider ${this.provider} doesn't support caching, using regular call`,\n );\n return this.callWithMetadata(options);\n }\n\n const cacheableCount =\n (systemBlocks || []).filter((b) => b.cache_control).length +\n (userBlocks || []).filter((b) => b.cache_control).length;\n console.log(\n `[LLMClient:callWithCache] ${cacheableCount} cacheable block(s)`,\n );\n\n let lastError: Error | null = null;\n\n for (let attempt = 0; attempt <= maxRetries; attempt++) {\n try {\n console.log(\n `[LLMClient:callWithCache] Attempt ${attempt + 1}/${maxRetries + 1}...`,\n );\n\n const result = await this.rateLimiter.execute(async () => {\n const anthropic = new Anthropic();\n\n const systemContent =\n systemBlocks && systemBlocks.length > 0\n ? systemBlocks.map((b) => ({\n type: 'text' as const,\n text: b.text,\n ...(b.cache_control\n ? { cache_control: b.cache_control }\n : {}),\n }))\n : systemPrompt\n ? [{ type: 'text' as const, text: systemPrompt }]\n : [];\n\n const userContent =\n userBlocks && userBlocks.length > 0\n ? userBlocks.map((b) => ({\n type: 'text' as const,\n text: b.text,\n ...(b.cache_control\n ? { cache_control: b.cache_control }\n : {}),\n }))\n : userPrompt\n ? [{ type: 'text' as const, text: userPrompt }]\n : [];\n\n const response = await anthropic.messages.create({\n model: this.modelName,\n max_tokens: maxTokens || 8192,\n temperature: temperature ?? 0,\n system: systemContent,\n messages: [{ role: 'user', content: userContent }],\n });\n\n const textContent = response.content.find((c) => c.type === 'text');\n const content =\n textContent && 'text' in textContent ? textContent.text : '';\n\n const apiUsage = response.usage as {\n input_tokens: number;\n output_tokens: number;\n cache_creation_input_tokens?: number;\n cache_read_input_tokens?: number;\n };\n\n const cacheRead = apiUsage.cache_read_input_tokens || 0;\n const cacheCreation = apiUsage.cache_creation_input_tokens || 0;\n\n if (cacheCreation > 0) {\n console.log(\n `[LLMClient:callWithCache] Cache WRITE: ${cacheCreation} tokens`,\n );\n }\n if (cacheRead > 0) {\n const savingsPercent = Math.round(\n (cacheRead / (cacheRead + apiUsage.input_tokens)) * 100,\n );\n console.log(\n `[LLMClient:callWithCache] Cache HIT: ${cacheRead} tokens (~${savingsPercent}% of prompt)`,\n );\n }\n if (cacheCreation === 0 && cacheRead === 0) {\n console.log(\n `[LLMClient:callWithCache] No caching: ${apiUsage.input_tokens} input tokens`,\n );\n }\n\n const usage: LLMUsage = {\n promptTokens: apiUsage.input_tokens,\n completionTokens: apiUsage.output_tokens,\n totalTokens: apiUsage.input_tokens + apiUsage.output_tokens,\n };\n\n if (this.tokenTracker) {\n this.tokenTracker.addUsage(\n usage.promptTokens,\n usage.completionTokens,\n );\n }\n\n const finishReason =\n response.stop_reason === 'end_turn'\n ? 'stop'\n : response.stop_reason;\n\n return {\n content,\n finishReason: finishReason as LLMFinishReason,\n usage,\n };\n });\n\n let parsed: T;\n if (rawText) {\n parsed = result.content as unknown as T;\n } else if (skipSchemaValidation) {\n parsed = parseJsonResponse(result.content, undefined) as T;\n } else {\n parsed = parseJsonResponse(result.content, schema);\n }\n\n return {\n data: parsed,\n raw: result.content,\n finishReason: result.finishReason,\n usage: result.usage,\n };\n } catch (error) {\n lastError = error instanceof Error ? error : new Error(String(error));\n console.error(\n `[LLMClient:callWithCache] Attempt ${attempt + 1} failed:`,\n lastError.message,\n );\n\n if (this.isRateLimitError(lastError)) {\n throw lastError;\n }\n }\n }\n\n throw lastError;\n }\n\n static cacheableBlock(text: string, cache = true): CacheableBlock {\n return cache\n ? { type: 'text', text, cache_control: { type: 'ephemeral' } }\n : { type: 'text', text };\n }\n}\n\n// ============================================================================\n// Singleton Instances\n// ============================================================================\n\nconst sharedClients: Partial<Record<LLMProvider, LLMClient>> = {};\n\nexport function getSharedLLMClient(options?: LLMClientOptions): LLMClient {\n const provider = options?.provider || 'openai';\n if (!sharedClients[provider]) {\n sharedClients[provider] = new LLMClient(options);\n }\n return sharedClients[provider]!;\n}\n\nexport function resetSharedLLMClient(provider?: LLMProvider): void {\n if (provider) {\n delete sharedClients[provider];\n } else {\n for (const key of Object.keys(sharedClients) as LLMProvider[]) {\n delete sharedClients[key];\n }\n }\n}\n\n// ============================================================================\n// Provider Detection\n// ============================================================================\n\nexport function getAvailableProvider(): LLMProvider {\n if (process.env.ANTHROPIC_API_KEY) return 'anthropic';\n if (process.env.DEEPSEEK_API_KEY) return 'deepseek';\n if (process.env.KIMI_API_KEY) return 'kimi';\n if (process.env.OPENAI_API_KEY) return 'openai';\n throw new Error(\n 'No LLM API key found. Please set ANTHROPIC_API_KEY, OPENAI_API_KEY, DEEPSEEK_API_KEY, or KIMI_API_KEY.',\n );\n}\n\nexport function isProviderAvailable(provider: LLMProvider): boolean {\n switch (provider) {\n case 'openai':\n return !!process.env.OPENAI_API_KEY;\n case 'deepseek':\n return !!process.env.DEEPSEEK_API_KEY;\n case 'anthropic':\n return !!process.env.ANTHROPIC_API_KEY;\n case 'kimi':\n return !!process.env.KIMI_API_KEY;\n default:\n return false;\n }\n}\n\n// ============================================================================\n// Convenience Functions\n// ============================================================================\n\nexport function createRequirementsClient(\n options?: Partial<LLMClientOptions>,\n): LLMClient {\n const provider = options?.provider || getAvailableProvider();\n const defaultModel =\n provider === 'deepseek' ? DEEPSEEK_MODELS.CHAT : OPENAI_MODELS.GPT_5_1;\n return new LLMClient({\n provider,\n model: defaultModel,\n temperature: 0.3,\n ...options,\n });\n}\n\nexport function createCreativeClient(\n options?: Partial<LLMClientOptions>,\n): LLMClient {\n const provider = options?.provider || getAvailableProvider();\n const defaultModel =\n provider === 'deepseek' ? DEEPSEEK_MODELS.REASONER : OPENAI_MODELS.GPT4O;\n return new LLMClient({\n provider,\n model: defaultModel,\n temperature: 0.7,\n ...options,\n });\n}\n\nexport function createFixClient(\n options?: Partial<LLMClientOptions>,\n): LLMClient {\n const provider = options?.provider || getAvailableProvider();\n const defaultModel =\n provider === 'deepseek'\n ? DEEPSEEK_MODELS.CHAT\n : OPENAI_MODELS.GPT4O_MINI;\n return new LLMClient({\n provider,\n model: defaultModel,\n temperature: 0.2,\n ...options,\n });\n}\n\nexport function createDeepSeekClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'deepseek',\n model: DEEPSEEK_MODELS.CHAT,\n ...options,\n });\n}\n\nexport function createOpenAIClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'openai',\n model: OPENAI_MODELS.GPT4O,\n ...options,\n });\n}\n\nexport function createAnthropicClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'anthropic',\n model: ANTHROPIC_MODELS.CLAUDE_SONNET_4_5,\n ...options,\n });\n}\n\nexport function createKimiClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'kimi',\n model: KIMI_MODELS.K2_5,\n ...options,\n });\n}\n"],"mappings":";;;;;;;;;;AAaA,SAAS,kBAAkB;AAC3B;AAAA,EACE;AAAA,OAEK;AACP,OAAO,eAAe;AAiBtB,IAAM,uBAAN,cAAmC,cAAc;AAAA,EAC/C,MAAM,OACJ,OACA,SACsB;AACtB,QAAI;AACJ,QAAI,OAAO,UAAU,UAAU;AAC7B,iBAAW,CAAC,EAAE,MAAM,QAAQ,SAAS,MAAM,CAAC;AAAA,IAC9C,OAAO;AACL,iBAAW;AAAA,IACb;AAEA,UAAM,sBAAsB,SAAS,IAAI,CAAC,QAAqB;AAC7D,YAAM,UAAU,IAAI,WAAW,KAAK,IAAI,QAAQ;AAChD,YAAM,WAAW,YAAY;AAE7B,UAAI,CAAC,SAAU,QAAO;AAEtB,UAAI,OAAO,IAAI,YAAY,UAAU;AACnC,eAAO;AAAA,UACL,GAAG;AAAA,UACH,SAAS;AAAA,YACP;AAAA,cACE,MAAM;AAAA,cACN,MAAM,IAAI;AAAA,cACV,eAAe,EAAE,MAAM,YAAY;AAAA,YACrC;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAEA,UAAI,MAAM,QAAQ,IAAI,OAAO,GAAG;AAC9B,cAAM,SAAS,IAAI;AAKnB,cAAM,qBAAqB,OAAO,KAAK,CAAC,MAAM,EAAE,aAAa;AAE7D,YAAI,CAAC,oBAAoB;AACvB,gBAAM,oBAAoB,OAAO,IAAI,CAAC,OAAO,QAAQ;AACnD,gBAAI,MAAM,SAAS,UAAU,QAAQ,OAAO,SAAS,GAAG;AACtD,qBAAO;AAAA,gBACL,GAAG;AAAA,gBACH,eAAe,EAAE,MAAM,YAAY;AAAA,cACrC;AAAA,YACF;AACA,mBAAO;AAAA,UACT,CAAC;AAED,iBAAO,EAAE,GAAG,KAAK,SAAS,kBAAkB;AAAA,QAC9C;AAAA,MACF;AAEA,aAAO;AAAA,IACT,CAAC;AAED,WAAO,MAAM,OAAO,qBAAqB,OAAO;AAAA,EAClD;AACF;AA0EA,IAAM,mBAA8D;AAAA,EAClE,QAAQ,MAAM;AACZ,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO,EAAE,QAAQ,SAAS,QAAW,cAAc,SAAS;AAAA,EAC9D;AAAA,EACA,UAAU,MAAM;AACd,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO;AAAA,MACL;AAAA,MACA,SAAS;AAAA,MACT,cAAc;AAAA,IAChB;AAAA,EACF;AAAA,EACA,WAAW,MAAM;AACf,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO;AAAA,MACL;AAAA,MACA,SAAS;AAAA,MACT,cAAc;AAAA,IAChB;AAAA,EACF;AAAA,EACA,MAAM,MAAM;AACV,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO;AAAA,MACL;AAAA,MACA,SAAS;AAAA,MACT,cAAc;AAAA,IAChB;AAAA,EACF;AACF;AAEO,IAAM,kBAAkB;AAAA,EAC7B,MAAM;AAAA,EACN,OAAO;AAAA,EACP,UAAU;AACZ;AAEO,IAAM,gBAAgB;AAAA,EAC3B,OAAO;AAAA,EACP,YAAY;AAAA,EACZ,YAAY;AAAA,EACZ,aAAa;AAAA,EACb,SAAS;AACX;AAEO,IAAM,mBAAmB;AAAA,EAC9B,mBAAmB;AAAA,EACnB,iBAAiB;AAAA,EACjB,iBAAiB;AAAA,EACjB,kBAAkB;AACpB;AAEO,IAAM,cAAc;AAAA,EACzB,MAAM;AACR;AAEA,IAAM,sBAAsB;AAMrB,IAAM,YAAN,MAAgB;AAAA,EAUrB,YAAY,UAA4B,CAAC,GAAG;AAC1C,SAAK,WAAW,QAAQ,YAAY;AACpC,SAAK,cAAc,QAAQ,eAAe;AAC1C,SAAK,YAAY,QAAQ,aAAa;AAEtC,SAAK,iBAAiB,iBAAiB,KAAK,QAAQ,EAAE;AACtD,SAAK,YAAY,QAAQ,SAAS,KAAK,eAAe;AAEtD,UAAM,aAAa,KAAK,eAAe,OAAO,MAAM,EAAE;AACtD,YAAQ;AAAA,MACN,yBAAyB,KAAK,QAAQ,YAAY,KAAK,SAAS,cAAc,UAAU;AAAA,IAC1F;AACA,QAAI,KAAK,eAAe,SAAS;AAC/B,cAAQ;AAAA,QACN,sCAAsC,KAAK,eAAe,OAAO;AAAA,MACnE;AAAA,IACF;AAEA,SAAK,QAAQ,KAAK,YAAY;AAE9B,SAAK,cACH,QAAQ,yBAAyB,QAC7B,qBAAqB,QAAQ,WAAW,IACxC,IAAI,YAAY,QAAQ,WAAW;AAEzC,SAAK,eACH,QAAQ,gBAAgB,QACpB,sBAAsB,KAAK,SAAS,IACpC;AAAA,EACR;AAAA,EAEQ,0BAAmC;AACzC,UAAM,QAAQ,KAAK,UAAU,YAAY;AACzC,WACE,MAAM,WAAW,IAAI,KACrB,MAAM,WAAW,OAAO,KACxB,MAAM,SAAS,KAAK,KACpB,MAAM,SAAS,IAAI;AAAA,EAEvB;AAAA,EAEQ,YAAY,SAGN;AACZ,UAAM,YAAY,SAAS;AAC3B,UAAM,cAAc,SAAS,eAAe,KAAK;AAEjD,QAAI,KAAK,aAAa,aAAa;AACjC,aAAO,IAAI,qBAAqB;AAAA,QAC9B,QAAQ,KAAK,eAAe;AAAA,QAC5B,OAAO,KAAK;AAAA,QACZ;AAAA,QACA,WAAW,KAAK;AAAA,QAChB,WAAW,aAAa;AAAA,QACxB,WAAW;AAAA,UACT;AAAA,YACE,cAAc,CAAC,WAAW;AACxB,oBAAM,aAAa,OAAO,cAAc,CAAC,IAAI,CAAC;AAC9C,oBAAM,QACJ,YAUC,SAAS;AAEZ,kBAAI,OAAO;AACT,sBAAM,eAAe,MAAM,+BAA+B;AAC1D,sBAAM,YAAY,MAAM,2BAA2B;AACnD,sBAAM,cAAc,MAAM,gBAAgB;AAC1C,sBAAM,eAAe,MAAM,iBAAiB;AAE5C,oBAAI,eAAe,GAAG;AACpB,0BAAQ;AAAA,oBACN,sCAAsC,YAAY;AAAA,kBACpD;AAAA,gBACF;AACA,oBAAI,YAAY,GAAG;AACjB,wBAAM,iBAAiB,KAAK;AAAA,oBACzB,aAAa,YAAY,eAAgB;AAAA,kBAC5C;AACA,0BAAQ;AAAA,oBACN,oCAAoC,SAAS,aAAa,cAAc;AAAA,kBAC1E;AAAA,gBACF;AACA,oBAAI,iBAAiB,KAAK,cAAc,KAAK,cAAc,GAAG;AAC5D,sBAAI,cAAc,KAAK;AACrB,4BAAQ;AAAA,sBACN,yBAAyB,WAAW,WAAW,YAAY;AAAA,oBAC7D;AAAA,kBACF,OAAO;AACL,4BAAQ;AAAA,sBACN,yBAAyB,WAAW,WAAW,YAAY;AAAA,oBAC7D;AAAA,kBACF;AAAA,gBACF;AAAA,cACF;AAAA,YACF;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACH;AAEA,UAAM,sBAAsB,KAAK,wBAAwB;AAEzD,UAAM,cAAc,YAChB,sBACE,EAAE,aAAa,EAAE,uBAAuB,UAAU,EAAE,IACpD,EAAE,UAAU,IACd,CAAC;AAEL,UAAM,UAAU,KAAK,aAAa,aAAa,MAAS;AAExD,WAAO,IAAI,WAAW;AAAA,MACpB,QAAQ,KAAK,eAAe;AAAA,MAC5B,OAAO,KAAK;AAAA,MACZ,aAAa,sBAAsB,SAAY;AAAA,MAC/C,WAAW,KAAK;AAAA,MAChB;AAAA,MACA,GAAG;AAAA,MACH,eAAe;AAAA,QACb,QAAQ,KAAK,eAAe;AAAA,QAC5B,GAAI,KAAK,eAAe,UACpB,EAAE,SAAS,KAAK,eAAe,QAAQ,IACvC,CAAC;AAAA,MACP;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAEQ,oBAAoB,SAGd;AACZ,WAAO,KAAK,YAAY,OAAO;AAAA,EACjC;AAAA,EAEA,cAA2B;AACzB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,eAAuB;AACrB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,WAAsB;AACpB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,uBAAuB;AACrB,WAAO,KAAK,YAAY,UAAU;AAAA,EACpC;AAAA,EAEA,gBAAgB;AACd,WAAO,KAAK,cAAc,WAAW,KAAK;AAAA,EAC5C;AAAA,EAEA,MAAM,KAAQ,SAAwC;AACpD,UAAM,WAAW,MAAM,KAAK,iBAAiB,OAAO;AACpD,WAAO,SAAS;AAAA,EAClB;AAAA,EAEA,MAAM,iBAAoB,SAAqD;AAC7E,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA,aAAa;AAAA,MACb,mBAAmB;AAAA,MACnB;AAAA,MACA,uBAAuB;AAAA,MACvB;AAAA,IACF,IAAI;AAEJ,QAAI,gBAAgB;AACpB,QAAI,YAA0B;AAE9B,YAAQ;AAAA,MACN,qCAAqC,KAAK,QAAQ,IAAI,KAAK,SAAS;AAAA,IACtE;AACA,YAAQ,IAAI,mCAAmC,WAAW,MAAM,QAAQ;AACxE,QAAI,WAAW;AACb,cAAQ,IAAI,gCAAgC,SAAS,EAAE;AAAA,IACzD;AAEA,aAAS,UAAU,GAAG,WAAW,YAAY,WAAW;AACtD,UAAI;AACF,gBAAQ;AAAA,UACN,4BAA4B,UAAU,CAAC,IAAI,aAAa,CAAC;AAAA,QAC3D;AACA,cAAM,mBAAmB,KAAK,IAAI;AAElC,cAAM,SAAS,MAAM,KAAK,YAAY,QAAQ,YAAY;AACxD,kBAAQ,IAAI,oCAAoC;AAChD,gBAAM,kBAAkB,KAAK,IAAI;AAEjC,gBAAM,aACJ,aAAa,gBAAgB,SACzB,KAAK,oBAAoB,EAAE,WAAW,YAAY,CAAC,IACnD,KAAK;AAEX,gBAAM,WAAW,MAAM,WAAW,OAAO;AAAA,YACvC,EAAE,MAAM,UAAU,SAAS,aAAa;AAAA,YACxC,EAAE,MAAM,QAAQ,SAAS,cAAc;AAAA,UACzC,CAAC;AAED,kBAAQ;AAAA,YACN,uCAAuC,KAAK,IAAI,IAAI,eAAe;AAAA,UACrE;AAEA,cAAI,QAAyB;AAC7B,cAAI,SAAS,gBAAgB;AAC3B,kBAAM,YAAY,SAAS;AAI3B,oBAAQ;AAAA,cACN,cAAc,UAAU,gBAAgB;AAAA,cACxC,kBAAkB,UAAU,iBAAiB;AAAA,cAC7C,cACG,UAAU,gBAAgB,MAC1B,UAAU,iBAAiB;AAAA,YAChC;AACA,oBAAQ;AAAA,cACN,iCAAiC,MAAM,YAAY,QAAQ,MAAM,gBAAgB;AAAA,YACnF;AAEA,gBAAI,KAAK,cAAc;AACrB,mBAAK,aAAa;AAAA,gBAChB,MAAM;AAAA,gBACN,MAAM;AAAA,cACR;AAAA,YACF;AAAA,UACF;AAEA,gBAAM,eAAe,KAAK,oBAAoB,QAAQ;AACtD,cAAI,iBAAiB,UAAU;AAC7B,oBAAQ;AAAA,cACN;AAAA,YACF;AAAA,UACF;AAEA,gBAAM,UACJ,OAAO,SAAS,YAAY,WACxB,SAAS,UACT,KAAK,UAAU,SAAS,OAAO;AAErC,kBAAQ;AAAA,YACN,qCAAqC,QAAQ,MAAM,0BAA0B,YAAY;AAAA,UAC3F;AAEA,iBAAO,EAAE,SAAS,cAAc,MAAM;AAAA,QACxC,CAAC;AAED,gBAAQ;AAAA,UACN,4BAA4B,UAAU,CAAC,iBAAiB,KAAK,IAAI,IAAI,gBAAgB;AAAA,QACvF;AAEA,cAAM,SAAS,uBACV,kBAAkB,OAAO,SAAS,MAAS,IAC5C,kBAAkB,OAAO,SAAS,MAAM;AAC5C,gBAAQ;AAAA,UACN,gDAAgD,uBAAuB,iCAAiC,EAAE;AAAA,QAC5G;AAEA,eAAO;AAAA,UACL,MAAM;AAAA,UACN,KAAK,OAAO;AAAA,UACZ,cAAc,OAAO;AAAA,UACrB,OAAO,OAAO;AAAA,QAChB;AAAA,MACF,SAAS,OAAO;AACd,oBAAY,iBAAiB,QAAQ,QAAQ,IAAI,MAAM,OAAO,KAAK,CAAC;AACpE,gBAAQ;AAAA,UACN,4BAA4B,UAAU,CAAC;AAAA,UACvC,UAAU;AAAA,QACZ;AAEA,YAAI,KAAK,iBAAiB,SAAS,GAAG;AACpC,kBAAQ,MAAM,iDAAiD;AAC/D,gBAAM;AAAA,QACR;AAEA,YAAI,UAAU,cAAc,kBAAkB;AAC5C,kBAAQ,IAAI,gDAAgD;AAC5D,0BACE,GAAG,UAAU;AAAA;AAAA,iCACqB,UAAU,OAAO;AAAA;AAAA,QAEvD;AAAA,MACF;AAAA,IACF;AAEA,YAAQ,MAAM,yDAAyD;AACvE,UAAM;AAAA,EACR;AAAA,EAEQ,oBACN,UACiB;AACjB,UAAM,WAAW,SAAS;AAG1B,QAAI,UAAU,eAAe;AAC3B,YAAM,SAAS,SAAS;AACxB,UACE,WAAW,UACX,WAAW,YACX,WAAW,oBACX,WAAW,cACX;AACA,eAAO;AAAA,MACT;AAAA,IACF;AACA,WAAO;AAAA,EACT;AAAA,EAEA,MAAM,QAAQ,SAIM;AAClB,UAAM,WAAW,MAAM,KAAK,oBAAoB,OAAO;AACvD,WAAO,SAAS;AAAA,EAClB;AAAA,EAEA,MAAM,oBAAoB,SAIuC;AAC/D,UAAM,EAAE,cAAc,YAAY,UAAU,IAAI;AAEhD,WAAO,KAAK,YAAY,QAAQ,YAAY;AAC1C,YAAM,aAAa,YACf,KAAK,oBAAoB,EAAE,UAAU,CAAC,IACtC,KAAK;AAET,YAAM,WAAW,MAAM,WAAW,OAAO;AAAA,QACvC,EAAE,MAAM,UAAU,SAAS,aAAa;AAAA,QACxC,EAAE,MAAM,QAAQ,SAAS,WAAW;AAAA,MACtC,CAAC;AAED,UAAI,QAAyB;AAC7B,UAAI,SAAS,gBAAgB;AAC3B,cAAM,YAAY,SAAS;AAI3B,gBAAQ;AAAA,UACN,cAAc,UAAU,gBAAgB;AAAA,UACxC,kBAAkB,UAAU,iBAAiB;AAAA,UAC7C,cACG,UAAU,gBAAgB,MAAM,UAAU,iBAAiB;AAAA,QAChE;AAEA,YAAI,KAAK,cAAc;AACrB,eAAK,aAAa;AAAA,YAChB,MAAM;AAAA,YACN,MAAM;AAAA,UACR;AAAA,QACF;AAAA,MACF;AAEA,YAAM,eAAe,KAAK,oBAAoB,QAAQ;AACtD,YAAM,UACJ,OAAO,SAAS,YAAY,WACxB,SAAS,UACT,KAAK,UAAU,SAAS,OAAO;AAErC,aAAO,EAAE,KAAK,SAAS,cAAc,MAAM;AAAA,IAC7C,CAAC;AAAA,EACH;AAAA,EAEQ,iBAAiB,OAAuB;AAC9C,UAAM,UAAU,MAAM,QAAQ,YAAY;AAC1C,WACE,QAAQ,SAAS,YAAY,KAC7B,QAAQ,SAAS,KAAK,KACtB,QAAQ,SAAS,gBAAgB;AAAA,EAErC;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,cACJ,SACyB;AACzB,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,aAAa;AAAA,MACb;AAAA,MACA,uBAAuB;AAAA,MACvB;AAAA,MACA,UAAU;AAAA,IACZ,IAAI;AAEJ,QAAI,KAAK,aAAa,aAAa;AACjC,cAAQ;AAAA,QACN,sCAAsC,KAAK,QAAQ;AAAA,MACrD;AACA,aAAO,KAAK,iBAAiB,OAAO;AAAA,IACtC;AAEA,UAAM,kBACH,gBAAgB,CAAC,GAAG,OAAO,CAAC,MAAM,EAAE,aAAa,EAAE,UACnD,cAAc,CAAC,GAAG,OAAO,CAAC,MAAM,EAAE,aAAa,EAAE;AACpD,YAAQ;AAAA,MACN,6BAA6B,cAAc;AAAA,IAC7C;AAEA,QAAI,YAA0B;AAE9B,aAAS,UAAU,GAAG,WAAW,YAAY,WAAW;AACtD,UAAI;AACF,gBAAQ;AAAA,UACN,qCAAqC,UAAU,CAAC,IAAI,aAAa,CAAC;AAAA,QACpE;AAEA,cAAM,SAAS,MAAM,KAAK,YAAY,QAAQ,YAAY;AACxD,gBAAM,YAAY,IAAI,UAAU;AAEhC,gBAAM,gBACJ,gBAAgB,aAAa,SAAS,IAClC,aAAa,IAAI,CAAC,OAAO;AAAA,YACvB,MAAM;AAAA,YACN,MAAM,EAAE;AAAA,YACR,GAAI,EAAE,gBACF,EAAE,eAAe,EAAE,cAAc,IACjC,CAAC;AAAA,UACP,EAAE,IACF,eACE,CAAC,EAAE,MAAM,QAAiB,MAAM,aAAa,CAAC,IAC9C,CAAC;AAET,gBAAM,cACJ,cAAc,WAAW,SAAS,IAC9B,WAAW,IAAI,CAAC,OAAO;AAAA,YACrB,MAAM;AAAA,YACN,MAAM,EAAE;AAAA,YACR,GAAI,EAAE,gBACF,EAAE,eAAe,EAAE,cAAc,IACjC,CAAC;AAAA,UACP,EAAE,IACF,aACE,CAAC,EAAE,MAAM,QAAiB,MAAM,WAAW,CAAC,IAC5C,CAAC;AAET,gBAAM,WAAW,MAAM,UAAU,SAAS,OAAO;AAAA,YAC/C,OAAO,KAAK;AAAA,YACZ,YAAY,aAAa;AAAA,YACzB,aAAa,eAAe;AAAA,YAC5B,QAAQ;AAAA,YACR,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,YAAY,CAAC;AAAA,UACnD,CAAC;AAED,gBAAM,cAAc,SAAS,QAAQ,KAAK,CAAC,MAAM,EAAE,SAAS,MAAM;AAClE,gBAAM,UACJ,eAAe,UAAU,cAAc,YAAY,OAAO;AAE5D,gBAAM,WAAW,SAAS;AAO1B,gBAAM,YAAY,SAAS,2BAA2B;AACtD,gBAAM,gBAAgB,SAAS,+BAA+B;AAE9D,cAAI,gBAAgB,GAAG;AACrB,oBAAQ;AAAA,cACN,0CAA0C,aAAa;AAAA,YACzD;AAAA,UACF;AACA,cAAI,YAAY,GAAG;AACjB,kBAAM,iBAAiB,KAAK;AAAA,cACzB,aAAa,YAAY,SAAS,gBAAiB;AAAA,YACtD;AACA,oBAAQ;AAAA,cACN,wCAAwC,SAAS,aAAa,cAAc;AAAA,YAC9E;AAAA,UACF;AACA,cAAI,kBAAkB,KAAK,cAAc,GAAG;AAC1C,oBAAQ;AAAA,cACN,yCAAyC,SAAS,YAAY;AAAA,YAChE;AAAA,UACF;AAEA,gBAAM,QAAkB;AAAA,YACtB,cAAc,SAAS;AAAA,YACvB,kBAAkB,SAAS;AAAA,YAC3B,aAAa,SAAS,eAAe,SAAS;AAAA,UAChD;AAEA,cAAI,KAAK,cAAc;AACrB,iBAAK,aAAa;AAAA,cAChB,MAAM;AAAA,cACN,MAAM;AAAA,YACR;AAAA,UACF;AAEA,gBAAM,eACJ,SAAS,gBAAgB,aACrB,SACA,SAAS;AAEf,iBAAO;AAAA,YACL;AAAA,YACA;AAAA,YACA;AAAA,UACF;AAAA,QACF,CAAC;AAED,YAAI;AACJ,YAAI,SAAS;AACX,mBAAS,OAAO;AAAA,QAClB,WAAW,sBAAsB;AAC/B,mBAAS,kBAAkB,OAAO,SAAS,MAAS;AAAA,QACtD,OAAO;AACL,mBAAS,kBAAkB,OAAO,SAAS,MAAM;AAAA,QACnD;AAEA,eAAO;AAAA,UACL,MAAM;AAAA,UACN,KAAK,OAAO;AAAA,UACZ,cAAc,OAAO;AAAA,UACrB,OAAO,OAAO;AAAA,QAChB;AAAA,MACF,SAAS,OAAO;AACd,oBAAY,iBAAiB,QAAQ,QAAQ,IAAI,MAAM,OAAO,KAAK,CAAC;AACpE,gBAAQ;AAAA,UACN,qCAAqC,UAAU,CAAC;AAAA,UAChD,UAAU;AAAA,QACZ;AAEA,YAAI,KAAK,iBAAiB,SAAS,GAAG;AACpC,gBAAM;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,UAAM;AAAA,EACR;AAAA,EAEA,OAAO,eAAe,MAAc,QAAQ,MAAsB;AAChE,WAAO,QACH,EAAE,MAAM,QAAQ,MAAM,eAAe,EAAE,MAAM,YAAY,EAAE,IAC3D,EAAE,MAAM,QAAQ,KAAK;AAAA,EAC3B;AACF;AAMA,IAAM,gBAAyD,CAAC;AAEzD,SAAS,mBAAmB,SAAuC;AACxE,QAAM,WAAW,SAAS,YAAY;AACtC,MAAI,CAAC,cAAc,QAAQ,GAAG;AAC5B,kBAAc,QAAQ,IAAI,IAAI,UAAU,OAAO;AAAA,EACjD;AACA,SAAO,cAAc,QAAQ;AAC/B;AAEO,SAAS,qBAAqB,UAA8B;AACjE,MAAI,UAAU;AACZ,WAAO,cAAc,QAAQ;AAAA,EAC/B,OAAO;AACL,eAAW,OAAO,OAAO,KAAK,aAAa,GAAoB;AAC7D,aAAO,cAAc,GAAG;AAAA,IAC1B;AAAA,EACF;AACF;AAMO,SAAS,uBAAoC;AAClD,MAAI,QAAQ,IAAI,kBAAmB,QAAO;AAC1C,MAAI,QAAQ,IAAI,iBAAkB,QAAO;AACzC,MAAI,QAAQ,IAAI,aAAc,QAAO;AACrC,MAAI,QAAQ,IAAI,eAAgB,QAAO;AACvC,QAAM,IAAI;AAAA,IACR;AAAA,EACF;AACF;AAEO,SAAS,oBAAoB,UAAgC;AAClE,UAAQ,UAAU;AAAA,IAChB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB;AACE,aAAO;AAAA,EACX;AACF;AAMO,SAAS,yBACd,SACW;AACX,QAAM,WAAW,SAAS,YAAY,qBAAqB;AAC3D,QAAM,eACJ,aAAa,aAAa,gBAAgB,OAAO,cAAc;AACjE,SAAO,IAAI,UAAU;AAAA,IACnB;AAAA,IACA,OAAO;AAAA,IACP,aAAa;AAAA,IACb,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,qBACd,SACW;AACX,QAAM,WAAW,SAAS,YAAY,qBAAqB;AAC3D,QAAM,eACJ,aAAa,aAAa,gBAAgB,WAAW,cAAc;AACrE,SAAO,IAAI,UAAU;AAAA,IACnB;AAAA,IACA,OAAO;AAAA,IACP,aAAa;AAAA,IACb,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,gBACd,SACW;AACX,QAAM,WAAW,SAAS,YAAY,qBAAqB;AAC3D,QAAM,eACJ,aAAa,aACT,gBAAgB,OAChB,cAAc;AACpB,SAAO,IAAI,UAAU;AAAA,IACnB;AAAA,IACA,OAAO;AAAA,IACP,aAAa;AAAA,IACb,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,qBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,gBAAgB;AAAA,IACvB,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,mBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,cAAc;AAAA,IACrB,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,sBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,iBAAiB;AAAA,IACxB,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,iBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,YAAY;AAAA,IACnB,GAAG;AAAA,EACL,CAAC;AACH;","names":[]}
|