@almadar/llm 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/client.ts"],"sourcesContent":["/**\n * Shared LLM Client\n *\n * Multi-provider LLM client with:\n * - OpenAI, DeepSeek, Anthropic, and Kimi support\n * - Anthropic prompt caching (CachingChatAnthropic)\n * - Rate limiting and retry logic\n * - Token tracking\n * - Structured output parsing with Zod\n *\n * @packageDocumentation\n */\n\nimport { ChatOpenAI } from '@langchain/openai';\nimport {\n ChatAnthropic,\n type ChatAnthropicCallOptions,\n} from '@langchain/anthropic';\nimport Anthropic from '@anthropic-ai/sdk';\nimport { z } from 'zod';\nimport {\n RateLimiter,\n getGlobalRateLimiter,\n type RateLimiterOptions,\n} from './rate-limiter.js';\nimport { TokenTracker, getGlobalTokenTracker } from './token-tracker.js';\nimport { parseJsonResponse } from './json-parser.js';\n\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\ntype MessageLike = any;\n\n// ============================================================================\n// Caching Chat Anthropic Wrapper\n// ============================================================================\n\nclass CachingChatAnthropic extends ChatAnthropic {\n async invoke(\n input: MessageLike[] | string,\n options?: Partial<ChatAnthropicCallOptions>,\n ): Promise<MessageLike> {\n let messages: MessageLike[];\n if (typeof input === 'string') {\n messages = [{ role: 'user', content: input }];\n } else {\n messages = input;\n }\n\n const transformedMessages = messages.map((msg: MessageLike) => {\n const msgType = msg._getType?.() || msg.role || 'unknown';\n const isSystem = msgType === 'system';\n\n if (!isSystem) return msg;\n\n if (typeof msg.content === 'string') {\n return {\n ...msg,\n content: [\n {\n type: 'text',\n text: msg.content,\n cache_control: { type: 'ephemeral' },\n },\n ],\n };\n }\n\n if (Array.isArray(msg.content)) {\n const blocks = msg.content as Array<{\n type?: string;\n text?: string;\n cache_control?: unknown;\n }>;\n const hasAnyCacheControl = blocks.some((b) => b.cache_control);\n\n if (!hasAnyCacheControl) {\n const transformedBlocks = blocks.map((block, idx) => {\n if (block.type === 'text' && idx === blocks.length - 1) {\n return {\n ...block,\n cache_control: { type: 'ephemeral' },\n };\n }\n return block;\n });\n\n return { ...msg, content: transformedBlocks };\n }\n }\n\n return msg;\n });\n\n return super.invoke(transformedMessages, options);\n }\n}\n\ntype ChatModel = ChatOpenAI | CachingChatAnthropic;\n\n// ============================================================================\n// Types\n// ============================================================================\n\nexport type LLMProvider = 'openai' | 'deepseek' | 'anthropic' | 'kimi';\n\nexport interface ProviderConfig {\n apiKey: string;\n baseUrl?: string;\n defaultModel: string;\n}\n\nexport interface LLMClientOptions {\n provider?: LLMProvider;\n model?: string;\n temperature?: number;\n streaming?: boolean;\n rateLimiter?: RateLimiterOptions;\n useGlobalRateLimiter?: boolean;\n trackTokens?: boolean;\n}\n\nexport interface LLMCallOptions<T = unknown> {\n systemPrompt: string;\n userPrompt: string;\n schema?: z.ZodSchema<T>;\n maxRetries?: number;\n retryWithContext?: boolean;\n maxTokens?: number;\n skipSchemaValidation?: boolean;\n temperature?: number;\n}\n\nexport interface CacheableBlock {\n type: 'text';\n text: string;\n cache_control?: { type: 'ephemeral' };\n}\n\nexport interface CacheAwareLLMCallOptions<T = unknown>\n extends LLMCallOptions<T> {\n systemBlocks?: CacheableBlock[];\n userBlocks?: CacheableBlock[];\n rawText?: boolean;\n}\n\nexport interface LLMUsage {\n promptTokens: number;\n completionTokens: number;\n totalTokens: number;\n}\n\nexport type LLMFinishReason =\n | 'stop'\n | 'length'\n | 'content_filter'\n | 'tool_calls'\n | null;\n\nexport interface LLMResponse<T> {\n data: T;\n raw: string;\n finishReason: LLMFinishReason;\n usage: LLMUsage | null;\n}\n\n// ============================================================================\n// Provider Configuration\n// ============================================================================\n\nconst PROVIDER_CONFIGS: Record<LLMProvider, () => ProviderConfig> = {\n openai: () => {\n const apiKey = process.env.OPENAI_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'OPENAI_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return { apiKey, baseUrl: undefined, defaultModel: 'gpt-4o' };\n },\n deepseek: () => {\n const apiKey = process.env.DEEPSEEK_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'DEEPSEEK_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return {\n apiKey,\n baseUrl: 'https://api.deepseek.com/v1',\n defaultModel: 'deepseek-chat',\n };\n },\n anthropic: () => {\n const apiKey = process.env.ANTHROPIC_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'ANTHROPIC_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return {\n apiKey,\n baseUrl: undefined,\n defaultModel: 'claude-sonnet-4-5-20250929',\n };\n },\n kimi: () => {\n const apiKey = process.env.KIMI_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'KIMI_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return {\n apiKey,\n baseUrl: 'https://api.moonshot.cn/v1',\n defaultModel: 'kimi-k2.5',\n };\n },\n};\n\nexport const DEEPSEEK_MODELS = {\n CHAT: 'deepseek-chat',\n CODER: 'deepseek-coder',\n REASONER: 'deepseek-reasoner',\n} as const;\n\nexport const OPENAI_MODELS = {\n GPT4O: 'gpt-4o',\n GPT4O_MINI: 'gpt-4o-mini',\n GPT4_TURBO: 'gpt-4-turbo',\n GPT35_TURBO: 'gpt-3.5-turbo',\n GPT_5_1: 'gpt-5.1',\n} as const;\n\nexport const ANTHROPIC_MODELS = {\n CLAUDE_SONNET_4_5: 'claude-sonnet-4-5-20250929',\n CLAUDE_SONNET_4: 'claude-sonnet-4-20250514',\n CLAUDE_OPUS_4_5: 'claude-opus-4-5-20250929',\n CLAUDE_3_5_HAIKU: 'claude-3-5-haiku-20241022',\n} as const;\n\nexport const KIMI_MODELS = {\n K2_5: 'kimi-k2.5',\n} as const;\n\nconst DEFAULT_TEMPERATURE = 0.3;\n\n// ============================================================================\n// LLM Client\n// ============================================================================\n\nexport class LLMClient {\n private model: ChatModel;\n private rateLimiter: RateLimiter;\n private tokenTracker: TokenTracker | null;\n private modelName: string;\n private provider: LLMProvider;\n private providerConfig: ProviderConfig;\n private temperature: number;\n private streaming: boolean;\n\n constructor(options: LLMClientOptions = {}) {\n this.provider = options.provider || 'openai';\n this.temperature = options.temperature ?? DEFAULT_TEMPERATURE;\n this.streaming = options.streaming ?? false;\n\n this.providerConfig = PROVIDER_CONFIGS[this.provider]();\n this.modelName = options.model || this.providerConfig.defaultModel;\n\n const keyPreview = this.providerConfig.apiKey.slice(-4);\n console.log(\n `[LLMClient] Provider: ${this.provider}, Model: ${this.modelName}, Key: ****${keyPreview}`,\n );\n if (this.providerConfig.baseUrl) {\n console.log(\n `[LLMClient] Using custom base URL: ${this.providerConfig.baseUrl}`,\n );\n }\n\n this.model = this.createModel();\n\n this.rateLimiter =\n options.useGlobalRateLimiter !== false\n ? getGlobalRateLimiter(options.rateLimiter)\n : new RateLimiter(options.rateLimiter);\n\n this.tokenTracker =\n options.trackTokens !== false\n ? getGlobalTokenTracker(this.modelName)\n : null;\n }\n\n private usesMaxCompletionTokens(): boolean {\n const model = this.modelName.toLowerCase();\n return (\n model.startsWith('o1') ||\n model.startsWith('gpt-5') ||\n model.includes('o1-') ||\n model.includes('o3')\n );\n }\n\n private createModel(options?: {\n maxTokens?: number;\n temperature?: number;\n }): ChatModel {\n const maxTokens = options?.maxTokens;\n const temperature = options?.temperature ?? this.temperature;\n\n if (this.provider === 'anthropic') {\n return new CachingChatAnthropic({\n anthropicApiKey: this.providerConfig.apiKey,\n modelName: this.modelName,\n temperature,\n streaming: this.streaming,\n maxTokens: maxTokens || 8192,\n callbacks: [\n {\n handleLLMEnd: (output) => {\n const generation = output.generations?.[0]?.[0];\n const usage = (\n generation as unknown as {\n message?: {\n usage_metadata?: {\n cache_creation_input_tokens?: number;\n cache_read_input_tokens?: number;\n input_tokens?: number;\n output_tokens?: number;\n };\n };\n }\n )?.message?.usage_metadata;\n\n if (usage) {\n const cacheCreated = usage.cache_creation_input_tokens ?? 0;\n const cacheRead = usage.cache_read_input_tokens ?? 0;\n const inputTokens = usage.input_tokens ?? 0;\n const outputTokens = usage.output_tokens ?? 0;\n\n if (cacheCreated > 0) {\n console.log(\n `[LLMClient:Anthropic] Cache WRITE: ${cacheCreated} tokens cached`,\n );\n }\n if (cacheRead > 0) {\n const savingsPercent = Math.round(\n (cacheRead / (cacheRead + inputTokens)) * 100,\n );\n console.log(\n `[LLMClient:Anthropic] Cache HIT: ${cacheRead} tokens (~${savingsPercent}% of prompt)`,\n );\n }\n if (cacheCreated === 0 && cacheRead === 0 && inputTokens > 0) {\n if (inputTokens < 500) {\n console.log(\n `[LLMClient:Anthropic] ${inputTokens} input, ${outputTokens} output tokens (likely cached)`,\n );\n } else {\n console.log(\n `[LLMClient:Anthropic] ${inputTokens} input, ${outputTokens} output tokens`,\n );\n }\n }\n }\n },\n },\n ],\n });\n }\n\n const useCompletionTokens = this.usesMaxCompletionTokens();\n\n const tokenConfig = maxTokens\n ? useCompletionTokens\n ? { modelKwargs: { max_completion_tokens: maxTokens } }\n : { maxTokens }\n : {};\n\n const timeout = this.provider === 'deepseek' ? 600000 : undefined;\n\n return new ChatOpenAI({\n openAIApiKey: this.providerConfig.apiKey,\n modelName: this.modelName,\n temperature: useCompletionTokens ? undefined : temperature,\n streaming: this.streaming,\n timeout,\n ...tokenConfig,\n configuration: {\n apiKey: this.providerConfig.apiKey,\n ...(this.providerConfig.baseUrl\n ? { baseURL: this.providerConfig.baseUrl }\n : {}),\n },\n });\n }\n\n private getModelWithOptions(options: {\n maxTokens?: number;\n temperature?: number;\n }): ChatModel {\n return this.createModel(options);\n }\n\n getProvider(): LLMProvider {\n return this.provider;\n }\n\n getModelName(): string {\n return this.modelName;\n }\n\n getModel(): ChatModel {\n return this.model;\n }\n\n getRateLimiterStatus() {\n return this.rateLimiter.getStatus();\n }\n\n getTokenUsage() {\n return this.tokenTracker?.getSummary() ?? null;\n }\n\n async call<T>(options: LLMCallOptions<T>): Promise<T> {\n const response = await this.callWithMetadata(options);\n return response.data;\n }\n\n async callWithMetadata<T>(options: LLMCallOptions<T>): Promise<LLMResponse<T>> {\n const {\n systemPrompt,\n userPrompt,\n schema,\n maxRetries = 2,\n retryWithContext = true,\n maxTokens,\n skipSchemaValidation = false,\n temperature,\n } = options;\n\n let currentPrompt = userPrompt;\n let lastError: Error | null = null;\n\n console.log(\n `[LLMClient:call] Starting call to ${this.provider}/${this.modelName}`,\n );\n console.log(`[LLMClient:call] Prompt length: ${userPrompt.length} chars`);\n if (maxTokens) {\n console.log(`[LLMClient:call] Max tokens: ${maxTokens}`);\n }\n\n for (let attempt = 0; attempt <= maxRetries; attempt++) {\n try {\n console.log(\n `[LLMClient:call] Attempt ${attempt + 1}/${maxRetries + 1}...`,\n );\n const attemptStartTime = Date.now();\n\n const result = await this.rateLimiter.execute(async () => {\n console.log(`[LLMClient:call] Invoking model...`);\n const invokeStartTime = Date.now();\n\n const modelToUse =\n maxTokens || temperature !== undefined\n ? this.getModelWithOptions({ maxTokens, temperature })\n : this.model;\n\n const response = await modelToUse.invoke([\n { role: 'system', content: systemPrompt },\n { role: 'user', content: currentPrompt },\n ]);\n\n console.log(\n `[LLMClient:call] Model responded in ${Date.now() - invokeStartTime}ms`,\n );\n\n let usage: LLMUsage | null = null;\n if (response.usage_metadata) {\n const usageMeta = response.usage_metadata as {\n input_tokens?: number;\n output_tokens?: number;\n };\n usage = {\n promptTokens: usageMeta.input_tokens || 0,\n completionTokens: usageMeta.output_tokens || 0,\n totalTokens:\n (usageMeta.input_tokens || 0) +\n (usageMeta.output_tokens || 0),\n };\n console.log(\n `[LLMClient:call] Tokens used: ${usage.promptTokens} in, ${usage.completionTokens} out`,\n );\n\n if (this.tokenTracker) {\n this.tokenTracker.addUsage(\n usage.promptTokens,\n usage.completionTokens,\n );\n }\n }\n\n const finishReason = this.extractFinishReason(response);\n if (finishReason === 'length') {\n console.warn(\n `[LLMClient:call] Response truncated (finish_reason=length)`,\n );\n }\n\n const content =\n typeof response.content === 'string'\n ? response.content\n : JSON.stringify(response.content);\n\n console.log(\n `[LLMClient:call] Response length: ${content.length} chars, finish_reason: ${finishReason}`,\n );\n\n return { content, finishReason, usage };\n });\n\n console.log(\n `[LLMClient:call] Attempt ${attempt + 1} completed in ${Date.now() - attemptStartTime}ms, parsing response...`,\n );\n\n const parsed = skipSchemaValidation\n ? (parseJsonResponse(result.content, undefined) as T)\n : parseJsonResponse(result.content, schema);\n console.log(\n `[LLMClient:call] Response parsed successfully${skipSchemaValidation ? ' (schema validation skipped)' : ''}`,\n );\n\n return {\n data: parsed,\n raw: result.content,\n finishReason: result.finishReason,\n usage: result.usage,\n };\n } catch (error) {\n lastError = error instanceof Error ? error : new Error(String(error));\n console.error(\n `[LLMClient:call] Attempt ${attempt + 1} failed:`,\n lastError.message,\n );\n\n if (this.isRateLimitError(lastError)) {\n console.error(`[LLMClient:call] Rate limit error, not retrying`);\n throw lastError;\n }\n\n if (attempt < maxRetries && retryWithContext) {\n console.log(`[LLMClient:call] Will retry with error context`);\n currentPrompt =\n `${userPrompt}\\n\\n` +\n `[Previous attempt failed with: ${lastError.message}]\\n` +\n `Please output valid JSON that matches the expected schema.`;\n }\n }\n }\n\n console.error(`[LLMClient:call] All attempts exhausted, throwing error`);\n throw lastError;\n }\n\n private extractFinishReason(\n response: Awaited<ReturnType<ChatOpenAI['invoke']>>,\n ): LLMFinishReason {\n const metadata = response.response_metadata as\n | Record<string, unknown>\n | undefined;\n if (metadata?.finish_reason) {\n const reason = metadata.finish_reason as string;\n if (\n reason === 'stop' ||\n reason === 'length' ||\n reason === 'content_filter' ||\n reason === 'tool_calls'\n ) {\n return reason;\n }\n }\n return null;\n }\n\n async callRaw(options: {\n systemPrompt: string;\n userPrompt: string;\n maxTokens?: number;\n }): Promise<string> {\n const response = await this.callRawWithMetadata(options);\n return response.raw;\n }\n\n async callRawWithMetadata(options: {\n systemPrompt: string;\n userPrompt: string;\n maxTokens?: number;\n }): Promise<Omit<LLMResponse<string>, 'data'> & { raw: string }> {\n const { systemPrompt, userPrompt, maxTokens } = options;\n\n return this.rateLimiter.execute(async () => {\n const modelToUse = maxTokens\n ? this.getModelWithOptions({ maxTokens })\n : this.model;\n\n const response = await modelToUse.invoke([\n { role: 'system', content: systemPrompt },\n { role: 'user', content: userPrompt },\n ]);\n\n let usage: LLMUsage | null = null;\n if (response.usage_metadata) {\n const usageMeta = response.usage_metadata as {\n input_tokens?: number;\n output_tokens?: number;\n };\n usage = {\n promptTokens: usageMeta.input_tokens || 0,\n completionTokens: usageMeta.output_tokens || 0,\n totalTokens:\n (usageMeta.input_tokens || 0) + (usageMeta.output_tokens || 0),\n };\n\n if (this.tokenTracker) {\n this.tokenTracker.addUsage(\n usage.promptTokens,\n usage.completionTokens,\n );\n }\n }\n\n const finishReason = this.extractFinishReason(response);\n const content =\n typeof response.content === 'string'\n ? response.content\n : JSON.stringify(response.content);\n\n return { raw: content, finishReason, usage };\n });\n }\n\n private isRateLimitError(error: Error): boolean {\n const message = error.message.toLowerCase();\n return (\n message.includes('rate limit') ||\n message.includes('429') ||\n message.includes('quota exceeded')\n );\n }\n\n // ==========================================================================\n // Anthropic Cache Control Support\n // ==========================================================================\n\n async callWithCache<T>(\n options: CacheAwareLLMCallOptions<T>,\n ): Promise<LLMResponse<T>> {\n const {\n systemPrompt,\n userPrompt,\n systemBlocks,\n userBlocks,\n schema,\n maxRetries = 2,\n maxTokens,\n skipSchemaValidation = false,\n temperature,\n rawText = false,\n } = options;\n\n if (this.provider !== 'anthropic') {\n console.log(\n `[LLMClient:callWithCache] Provider ${this.provider} doesn't support caching, using regular call`,\n );\n return this.callWithMetadata(options);\n }\n\n const cacheableCount =\n (systemBlocks || []).filter((b) => b.cache_control).length +\n (userBlocks || []).filter((b) => b.cache_control).length;\n console.log(\n `[LLMClient:callWithCache] ${cacheableCount} cacheable block(s)`,\n );\n\n let lastError: Error | null = null;\n\n for (let attempt = 0; attempt <= maxRetries; attempt++) {\n try {\n console.log(\n `[LLMClient:callWithCache] Attempt ${attempt + 1}/${maxRetries + 1}...`,\n );\n\n const result = await this.rateLimiter.execute(async () => {\n const anthropic = new Anthropic();\n\n const systemContent =\n systemBlocks && systemBlocks.length > 0\n ? systemBlocks.map((b) => ({\n type: 'text' as const,\n text: b.text,\n ...(b.cache_control\n ? { cache_control: b.cache_control }\n : {}),\n }))\n : systemPrompt\n ? [{ type: 'text' as const, text: systemPrompt }]\n : [];\n\n const userContent =\n userBlocks && userBlocks.length > 0\n ? userBlocks.map((b) => ({\n type: 'text' as const,\n text: b.text,\n ...(b.cache_control\n ? { cache_control: b.cache_control }\n : {}),\n }))\n : userPrompt\n ? [{ type: 'text' as const, text: userPrompt }]\n : [];\n\n const response = await anthropic.messages.create({\n model: this.modelName,\n max_tokens: maxTokens || 8192,\n temperature: temperature ?? 0,\n system: systemContent,\n messages: [{ role: 'user', content: userContent }],\n });\n\n const textContent = response.content.find((c) => c.type === 'text');\n const content =\n textContent && 'text' in textContent ? textContent.text : '';\n\n const apiUsage = response.usage as {\n input_tokens: number;\n output_tokens: number;\n cache_creation_input_tokens?: number;\n cache_read_input_tokens?: number;\n };\n\n const cacheRead = apiUsage.cache_read_input_tokens || 0;\n const cacheCreation = apiUsage.cache_creation_input_tokens || 0;\n\n if (cacheCreation > 0) {\n console.log(\n `[LLMClient:callWithCache] Cache WRITE: ${cacheCreation} tokens`,\n );\n }\n if (cacheRead > 0) {\n const savingsPercent = Math.round(\n (cacheRead / (cacheRead + apiUsage.input_tokens)) * 100,\n );\n console.log(\n `[LLMClient:callWithCache] Cache HIT: ${cacheRead} tokens (~${savingsPercent}% of prompt)`,\n );\n }\n if (cacheCreation === 0 && cacheRead === 0) {\n console.log(\n `[LLMClient:callWithCache] No caching: ${apiUsage.input_tokens} input tokens`,\n );\n }\n\n const usage: LLMUsage = {\n promptTokens: apiUsage.input_tokens,\n completionTokens: apiUsage.output_tokens,\n totalTokens: apiUsage.input_tokens + apiUsage.output_tokens,\n };\n\n if (this.tokenTracker) {\n this.tokenTracker.addUsage(\n usage.promptTokens,\n usage.completionTokens,\n );\n }\n\n const finishReason =\n response.stop_reason === 'end_turn'\n ? 'stop'\n : response.stop_reason;\n\n return {\n content,\n finishReason: finishReason as LLMFinishReason,\n usage,\n };\n });\n\n let parsed: T;\n if (rawText) {\n parsed = result.content as unknown as T;\n } else if (skipSchemaValidation) {\n parsed = parseJsonResponse(result.content, undefined) as T;\n } else {\n parsed = parseJsonResponse(result.content, schema);\n }\n\n return {\n data: parsed,\n raw: result.content,\n finishReason: result.finishReason,\n usage: result.usage,\n };\n } catch (error) {\n lastError = error instanceof Error ? error : new Error(String(error));\n console.error(\n `[LLMClient:callWithCache] Attempt ${attempt + 1} failed:`,\n lastError.message,\n );\n\n if (this.isRateLimitError(lastError)) {\n throw lastError;\n }\n }\n }\n\n throw lastError;\n }\n\n static cacheableBlock(text: string, cache = true): CacheableBlock {\n return cache\n ? { type: 'text', text, cache_control: { type: 'ephemeral' } }\n : { type: 'text', text };\n }\n}\n\n// ============================================================================\n// Singleton Instances\n// ============================================================================\n\nconst sharedClients: Partial<Record<LLMProvider, LLMClient>> = {};\n\nexport function getSharedLLMClient(options?: LLMClientOptions): LLMClient {\n const provider = options?.provider || 'openai';\n if (!sharedClients[provider]) {\n sharedClients[provider] = new LLMClient(options);\n }\n return sharedClients[provider]!;\n}\n\nexport function resetSharedLLMClient(provider?: LLMProvider): void {\n if (provider) {\n delete sharedClients[provider];\n } else {\n for (const key of Object.keys(sharedClients) as LLMProvider[]) {\n delete sharedClients[key];\n }\n }\n}\n\n// ============================================================================\n// Provider Detection\n// ============================================================================\n\nexport function getAvailableProvider(): LLMProvider {\n if (process.env.ANTHROPIC_API_KEY) return 'anthropic';\n if (process.env.DEEPSEEK_API_KEY) return 'deepseek';\n if (process.env.KIMI_API_KEY) return 'kimi';\n if (process.env.OPENAI_API_KEY) return 'openai';\n throw new Error(\n 'No LLM API key found. Please set ANTHROPIC_API_KEY, OPENAI_API_KEY, DEEPSEEK_API_KEY, or KIMI_API_KEY.',\n );\n}\n\nexport function isProviderAvailable(provider: LLMProvider): boolean {\n switch (provider) {\n case 'openai':\n return !!process.env.OPENAI_API_KEY;\n case 'deepseek':\n return !!process.env.DEEPSEEK_API_KEY;\n case 'anthropic':\n return !!process.env.ANTHROPIC_API_KEY;\n case 'kimi':\n return !!process.env.KIMI_API_KEY;\n default:\n return false;\n }\n}\n\n// ============================================================================\n// Convenience Functions\n// ============================================================================\n\nexport function createRequirementsClient(\n options?: Partial<LLMClientOptions>,\n): LLMClient {\n const provider = options?.provider || getAvailableProvider();\n const defaultModel =\n provider === 'deepseek' ? DEEPSEEK_MODELS.CHAT : OPENAI_MODELS.GPT_5_1;\n return new LLMClient({\n provider,\n model: defaultModel,\n temperature: 0.3,\n ...options,\n });\n}\n\nexport function createCreativeClient(\n options?: Partial<LLMClientOptions>,\n): LLMClient {\n const provider = options?.provider || getAvailableProvider();\n const defaultModel =\n provider === 'deepseek' ? DEEPSEEK_MODELS.REASONER : OPENAI_MODELS.GPT4O;\n return new LLMClient({\n provider,\n model: defaultModel,\n temperature: 0.7,\n ...options,\n });\n}\n\nexport function createFixClient(\n options?: Partial<LLMClientOptions>,\n): LLMClient {\n const provider = options?.provider || getAvailableProvider();\n const defaultModel =\n provider === 'deepseek'\n ? DEEPSEEK_MODELS.CHAT\n : OPENAI_MODELS.GPT4O_MINI;\n return new LLMClient({\n provider,\n model: defaultModel,\n temperature: 0.2,\n ...options,\n });\n}\n\nexport function createDeepSeekClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'deepseek',\n model: DEEPSEEK_MODELS.CHAT,\n ...options,\n });\n}\n\nexport function createOpenAIClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'openai',\n model: OPENAI_MODELS.GPT4O,\n ...options,\n });\n}\n\nexport function createAnthropicClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'anthropic',\n model: ANTHROPIC_MODELS.CLAUDE_SONNET_4_5,\n ...options,\n });\n}\n\nexport function createKimiClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'kimi',\n model: KIMI_MODELS.K2_5,\n ...options,\n });\n}\n"],"mappings":";;;;;;;;;;AAaA,SAAS,kBAAkB;AAC3B;AAAA,EACE;AAAA,OAEK;AACP,OAAO,eAAe;AAiBtB,IAAM,uBAAN,cAAmC,cAAc;AAAA,EAC/C,MAAM,OACJ,OACA,SACsB;AACtB,QAAI;AACJ,QAAI,OAAO,UAAU,UAAU;AAC7B,iBAAW,CAAC,EAAE,MAAM,QAAQ,SAAS,MAAM,CAAC;AAAA,IAC9C,OAAO;AACL,iBAAW;AAAA,IACb;AAEA,UAAM,sBAAsB,SAAS,IAAI,CAAC,QAAqB;AAC7D,YAAM,UAAU,IAAI,WAAW,KAAK,IAAI,QAAQ;AAChD,YAAM,WAAW,YAAY;AAE7B,UAAI,CAAC,SAAU,QAAO;AAEtB,UAAI,OAAO,IAAI,YAAY,UAAU;AACnC,eAAO;AAAA,UACL,GAAG;AAAA,UACH,SAAS;AAAA,YACP;AAAA,cACE,MAAM;AAAA,cACN,MAAM,IAAI;AAAA,cACV,eAAe,EAAE,MAAM,YAAY;AAAA,YACrC;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAEA,UAAI,MAAM,QAAQ,IAAI,OAAO,GAAG;AAC9B,cAAM,SAAS,IAAI;AAKnB,cAAM,qBAAqB,OAAO,KAAK,CAAC,MAAM,EAAE,aAAa;AAE7D,YAAI,CAAC,oBAAoB;AACvB,gBAAM,oBAAoB,OAAO,IAAI,CAAC,OAAO,QAAQ;AACnD,gBAAI,MAAM,SAAS,UAAU,QAAQ,OAAO,SAAS,GAAG;AACtD,qBAAO;AAAA,gBACL,GAAG;AAAA,gBACH,eAAe,EAAE,MAAM,YAAY;AAAA,cACrC;AAAA,YACF;AACA,mBAAO;AAAA,UACT,CAAC;AAED,iBAAO,EAAE,GAAG,KAAK,SAAS,kBAAkB;AAAA,QAC9C;AAAA,MACF;AAEA,aAAO;AAAA,IACT,CAAC;AAED,WAAO,MAAM,OAAO,qBAAqB,OAAO;AAAA,EAClD;AACF;AA0EA,IAAM,mBAA8D;AAAA,EAClE,QAAQ,MAAM;AACZ,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO,EAAE,QAAQ,SAAS,QAAW,cAAc,SAAS;AAAA,EAC9D;AAAA,EACA,UAAU,MAAM;AACd,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO;AAAA,MACL;AAAA,MACA,SAAS;AAAA,MACT,cAAc;AAAA,IAChB;AAAA,EACF;AAAA,EACA,WAAW,MAAM;AACf,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO;AAAA,MACL;AAAA,MACA,SAAS;AAAA,MACT,cAAc;AAAA,IAChB;AAAA,EACF;AAAA,EACA,MAAM,MAAM;AACV,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO;AAAA,MACL;AAAA,MACA,SAAS;AAAA,MACT,cAAc;AAAA,IAChB;AAAA,EACF;AACF;AAEO,IAAM,kBAAkB;AAAA,EAC7B,MAAM;AAAA,EACN,OAAO;AAAA,EACP,UAAU;AACZ;AAEO,IAAM,gBAAgB;AAAA,EAC3B,OAAO;AAAA,EACP,YAAY;AAAA,EACZ,YAAY;AAAA,EACZ,aAAa;AAAA,EACb,SAAS;AACX;AAEO,IAAM,mBAAmB;AAAA,EAC9B,mBAAmB;AAAA,EACnB,iBAAiB;AAAA,EACjB,iBAAiB;AAAA,EACjB,kBAAkB;AACpB;AAEO,IAAM,cAAc;AAAA,EACzB,MAAM;AACR;AAEA,IAAM,sBAAsB;AAMrB,IAAM,YAAN,MAAgB;AAAA,EAUrB,YAAY,UAA4B,CAAC,GAAG;AAC1C,SAAK,WAAW,QAAQ,YAAY;AACpC,SAAK,cAAc,QAAQ,eAAe;AAC1C,SAAK,YAAY,QAAQ,aAAa;AAEtC,SAAK,iBAAiB,iBAAiB,KAAK,QAAQ,EAAE;AACtD,SAAK,YAAY,QAAQ,SAAS,KAAK,eAAe;AAEtD,UAAM,aAAa,KAAK,eAAe,OAAO,MAAM,EAAE;AACtD,YAAQ;AAAA,MACN,yBAAyB,KAAK,QAAQ,YAAY,KAAK,SAAS,cAAc,UAAU;AAAA,IAC1F;AACA,QAAI,KAAK,eAAe,SAAS;AAC/B,cAAQ;AAAA,QACN,sCAAsC,KAAK,eAAe,OAAO;AAAA,MACnE;AAAA,IACF;AAEA,SAAK,QAAQ,KAAK,YAAY;AAE9B,SAAK,cACH,QAAQ,yBAAyB,QAC7B,qBAAqB,QAAQ,WAAW,IACxC,IAAI,YAAY,QAAQ,WAAW;AAEzC,SAAK,eACH,QAAQ,gBAAgB,QACpB,sBAAsB,KAAK,SAAS,IACpC;AAAA,EACR;AAAA,EAEQ,0BAAmC;AACzC,UAAM,QAAQ,KAAK,UAAU,YAAY;AACzC,WACE,MAAM,WAAW,IAAI,KACrB,MAAM,WAAW,OAAO,KACxB,MAAM,SAAS,KAAK,KACpB,MAAM,SAAS,IAAI;AAAA,EAEvB;AAAA,EAEQ,YAAY,SAGN;AACZ,UAAM,YAAY,SAAS;AAC3B,UAAM,cAAc,SAAS,eAAe,KAAK;AAEjD,QAAI,KAAK,aAAa,aAAa;AACjC,aAAO,IAAI,qBAAqB;AAAA,QAC9B,iBAAiB,KAAK,eAAe;AAAA,QACrC,WAAW,KAAK;AAAA,QAChB;AAAA,QACA,WAAW,KAAK;AAAA,QAChB,WAAW,aAAa;AAAA,QACxB,WAAW;AAAA,UACT;AAAA,YACE,cAAc,CAAC,WAAW;AACxB,oBAAM,aAAa,OAAO,cAAc,CAAC,IAAI,CAAC;AAC9C,oBAAM,QACJ,YAUC,SAAS;AAEZ,kBAAI,OAAO;AACT,sBAAM,eAAe,MAAM,+BAA+B;AAC1D,sBAAM,YAAY,MAAM,2BAA2B;AACnD,sBAAM,cAAc,MAAM,gBAAgB;AAC1C,sBAAM,eAAe,MAAM,iBAAiB;AAE5C,oBAAI,eAAe,GAAG;AACpB,0BAAQ;AAAA,oBACN,sCAAsC,YAAY;AAAA,kBACpD;AAAA,gBACF;AACA,oBAAI,YAAY,GAAG;AACjB,wBAAM,iBAAiB,KAAK;AAAA,oBACzB,aAAa,YAAY,eAAgB;AAAA,kBAC5C;AACA,0BAAQ;AAAA,oBACN,oCAAoC,SAAS,aAAa,cAAc;AAAA,kBAC1E;AAAA,gBACF;AACA,oBAAI,iBAAiB,KAAK,cAAc,KAAK,cAAc,GAAG;AAC5D,sBAAI,cAAc,KAAK;AACrB,4BAAQ;AAAA,sBACN,yBAAyB,WAAW,WAAW,YAAY;AAAA,oBAC7D;AAAA,kBACF,OAAO;AACL,4BAAQ;AAAA,sBACN,yBAAyB,WAAW,WAAW,YAAY;AAAA,oBAC7D;AAAA,kBACF;AAAA,gBACF;AAAA,cACF;AAAA,YACF;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACH;AAEA,UAAM,sBAAsB,KAAK,wBAAwB;AAEzD,UAAM,cAAc,YAChB,sBACE,EAAE,aAAa,EAAE,uBAAuB,UAAU,EAAE,IACpD,EAAE,UAAU,IACd,CAAC;AAEL,UAAM,UAAU,KAAK,aAAa,aAAa,MAAS;AAExD,WAAO,IAAI,WAAW;AAAA,MACpB,cAAc,KAAK,eAAe;AAAA,MAClC,WAAW,KAAK;AAAA,MAChB,aAAa,sBAAsB,SAAY;AAAA,MAC/C,WAAW,KAAK;AAAA,MAChB;AAAA,MACA,GAAG;AAAA,MACH,eAAe;AAAA,QACb,QAAQ,KAAK,eAAe;AAAA,QAC5B,GAAI,KAAK,eAAe,UACpB,EAAE,SAAS,KAAK,eAAe,QAAQ,IACvC,CAAC;AAAA,MACP;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAEQ,oBAAoB,SAGd;AACZ,WAAO,KAAK,YAAY,OAAO;AAAA,EACjC;AAAA,EAEA,cAA2B;AACzB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,eAAuB;AACrB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,WAAsB;AACpB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,uBAAuB;AACrB,WAAO,KAAK,YAAY,UAAU;AAAA,EACpC;AAAA,EAEA,gBAAgB;AACd,WAAO,KAAK,cAAc,WAAW,KAAK;AAAA,EAC5C;AAAA,EAEA,MAAM,KAAQ,SAAwC;AACpD,UAAM,WAAW,MAAM,KAAK,iBAAiB,OAAO;AACpD,WAAO,SAAS;AAAA,EAClB;AAAA,EAEA,MAAM,iBAAoB,SAAqD;AAC7E,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA,aAAa;AAAA,MACb,mBAAmB;AAAA,MACnB;AAAA,MACA,uBAAuB;AAAA,MACvB;AAAA,IACF,IAAI;AAEJ,QAAI,gBAAgB;AACpB,QAAI,YAA0B;AAE9B,YAAQ;AAAA,MACN,qCAAqC,KAAK,QAAQ,IAAI,KAAK,SAAS;AAAA,IACtE;AACA,YAAQ,IAAI,mCAAmC,WAAW,MAAM,QAAQ;AACxE,QAAI,WAAW;AACb,cAAQ,IAAI,gCAAgC,SAAS,EAAE;AAAA,IACzD;AAEA,aAAS,UAAU,GAAG,WAAW,YAAY,WAAW;AACtD,UAAI;AACF,gBAAQ;AAAA,UACN,4BAA4B,UAAU,CAAC,IAAI,aAAa,CAAC;AAAA,QAC3D;AACA,cAAM,mBAAmB,KAAK,IAAI;AAElC,cAAM,SAAS,MAAM,KAAK,YAAY,QAAQ,YAAY;AACxD,kBAAQ,IAAI,oCAAoC;AAChD,gBAAM,kBAAkB,KAAK,IAAI;AAEjC,gBAAM,aACJ,aAAa,gBAAgB,SACzB,KAAK,oBAAoB,EAAE,WAAW,YAAY,CAAC,IACnD,KAAK;AAEX,gBAAM,WAAW,MAAM,WAAW,OAAO;AAAA,YACvC,EAAE,MAAM,UAAU,SAAS,aAAa;AAAA,YACxC,EAAE,MAAM,QAAQ,SAAS,cAAc;AAAA,UACzC,CAAC;AAED,kBAAQ;AAAA,YACN,uCAAuC,KAAK,IAAI,IAAI,eAAe;AAAA,UACrE;AAEA,cAAI,QAAyB;AAC7B,cAAI,SAAS,gBAAgB;AAC3B,kBAAM,YAAY,SAAS;AAI3B,oBAAQ;AAAA,cACN,cAAc,UAAU,gBAAgB;AAAA,cACxC,kBAAkB,UAAU,iBAAiB;AAAA,cAC7C,cACG,UAAU,gBAAgB,MAC1B,UAAU,iBAAiB;AAAA,YAChC;AACA,oBAAQ;AAAA,cACN,iCAAiC,MAAM,YAAY,QAAQ,MAAM,gBAAgB;AAAA,YACnF;AAEA,gBAAI,KAAK,cAAc;AACrB,mBAAK,aAAa;AAAA,gBAChB,MAAM;AAAA,gBACN,MAAM;AAAA,cACR;AAAA,YACF;AAAA,UACF;AAEA,gBAAM,eAAe,KAAK,oBAAoB,QAAQ;AACtD,cAAI,iBAAiB,UAAU;AAC7B,oBAAQ;AAAA,cACN;AAAA,YACF;AAAA,UACF;AAEA,gBAAM,UACJ,OAAO,SAAS,YAAY,WACxB,SAAS,UACT,KAAK,UAAU,SAAS,OAAO;AAErC,kBAAQ;AAAA,YACN,qCAAqC,QAAQ,MAAM,0BAA0B,YAAY;AAAA,UAC3F;AAEA,iBAAO,EAAE,SAAS,cAAc,MAAM;AAAA,QACxC,CAAC;AAED,gBAAQ;AAAA,UACN,4BAA4B,UAAU,CAAC,iBAAiB,KAAK,IAAI,IAAI,gBAAgB;AAAA,QACvF;AAEA,cAAM,SAAS,uBACV,kBAAkB,OAAO,SAAS,MAAS,IAC5C,kBAAkB,OAAO,SAAS,MAAM;AAC5C,gBAAQ;AAAA,UACN,gDAAgD,uBAAuB,iCAAiC,EAAE;AAAA,QAC5G;AAEA,eAAO;AAAA,UACL,MAAM;AAAA,UACN,KAAK,OAAO;AAAA,UACZ,cAAc,OAAO;AAAA,UACrB,OAAO,OAAO;AAAA,QAChB;AAAA,MACF,SAAS,OAAO;AACd,oBAAY,iBAAiB,QAAQ,QAAQ,IAAI,MAAM,OAAO,KAAK,CAAC;AACpE,gBAAQ;AAAA,UACN,4BAA4B,UAAU,CAAC;AAAA,UACvC,UAAU;AAAA,QACZ;AAEA,YAAI,KAAK,iBAAiB,SAAS,GAAG;AACpC,kBAAQ,MAAM,iDAAiD;AAC/D,gBAAM;AAAA,QACR;AAEA,YAAI,UAAU,cAAc,kBAAkB;AAC5C,kBAAQ,IAAI,gDAAgD;AAC5D,0BACE,GAAG,UAAU;AAAA;AAAA,iCACqB,UAAU,OAAO;AAAA;AAAA,QAEvD;AAAA,MACF;AAAA,IACF;AAEA,YAAQ,MAAM,yDAAyD;AACvE,UAAM;AAAA,EACR;AAAA,EAEQ,oBACN,UACiB;AACjB,UAAM,WAAW,SAAS;AAG1B,QAAI,UAAU,eAAe;AAC3B,YAAM,SAAS,SAAS;AACxB,UACE,WAAW,UACX,WAAW,YACX,WAAW,oBACX,WAAW,cACX;AACA,eAAO;AAAA,MACT;AAAA,IACF;AACA,WAAO;AAAA,EACT;AAAA,EAEA,MAAM,QAAQ,SAIM;AAClB,UAAM,WAAW,MAAM,KAAK,oBAAoB,OAAO;AACvD,WAAO,SAAS;AAAA,EAClB;AAAA,EAEA,MAAM,oBAAoB,SAIuC;AAC/D,UAAM,EAAE,cAAc,YAAY,UAAU,IAAI;AAEhD,WAAO,KAAK,YAAY,QAAQ,YAAY;AAC1C,YAAM,aAAa,YACf,KAAK,oBAAoB,EAAE,UAAU,CAAC,IACtC,KAAK;AAET,YAAM,WAAW,MAAM,WAAW,OAAO;AAAA,QACvC,EAAE,MAAM,UAAU,SAAS,aAAa;AAAA,QACxC,EAAE,MAAM,QAAQ,SAAS,WAAW;AAAA,MACtC,CAAC;AAED,UAAI,QAAyB;AAC7B,UAAI,SAAS,gBAAgB;AAC3B,cAAM,YAAY,SAAS;AAI3B,gBAAQ;AAAA,UACN,cAAc,UAAU,gBAAgB;AAAA,UACxC,kBAAkB,UAAU,iBAAiB;AAAA,UAC7C,cACG,UAAU,gBAAgB,MAAM,UAAU,iBAAiB;AAAA,QAChE;AAEA,YAAI,KAAK,cAAc;AACrB,eAAK,aAAa;AAAA,YAChB,MAAM;AAAA,YACN,MAAM;AAAA,UACR;AAAA,QACF;AAAA,MACF;AAEA,YAAM,eAAe,KAAK,oBAAoB,QAAQ;AACtD,YAAM,UACJ,OAAO,SAAS,YAAY,WACxB,SAAS,UACT,KAAK,UAAU,SAAS,OAAO;AAErC,aAAO,EAAE,KAAK,SAAS,cAAc,MAAM;AAAA,IAC7C,CAAC;AAAA,EACH;AAAA,EAEQ,iBAAiB,OAAuB;AAC9C,UAAM,UAAU,MAAM,QAAQ,YAAY;AAC1C,WACE,QAAQ,SAAS,YAAY,KAC7B,QAAQ,SAAS,KAAK,KACtB,QAAQ,SAAS,gBAAgB;AAAA,EAErC;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,cACJ,SACyB;AACzB,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,aAAa;AAAA,MACb;AAAA,MACA,uBAAuB;AAAA,MACvB;AAAA,MACA,UAAU;AAAA,IACZ,IAAI;AAEJ,QAAI,KAAK,aAAa,aAAa;AACjC,cAAQ;AAAA,QACN,sCAAsC,KAAK,QAAQ;AAAA,MACrD;AACA,aAAO,KAAK,iBAAiB,OAAO;AAAA,IACtC;AAEA,UAAM,kBACH,gBAAgB,CAAC,GAAG,OAAO,CAAC,MAAM,EAAE,aAAa,EAAE,UACnD,cAAc,CAAC,GAAG,OAAO,CAAC,MAAM,EAAE,aAAa,EAAE;AACpD,YAAQ;AAAA,MACN,6BAA6B,cAAc;AAAA,IAC7C;AAEA,QAAI,YAA0B;AAE9B,aAAS,UAAU,GAAG,WAAW,YAAY,WAAW;AACtD,UAAI;AACF,gBAAQ;AAAA,UACN,qCAAqC,UAAU,CAAC,IAAI,aAAa,CAAC;AAAA,QACpE;AAEA,cAAM,SAAS,MAAM,KAAK,YAAY,QAAQ,YAAY;AACxD,gBAAM,YAAY,IAAI,UAAU;AAEhC,gBAAM,gBACJ,gBAAgB,aAAa,SAAS,IAClC,aAAa,IAAI,CAAC,OAAO;AAAA,YACvB,MAAM;AAAA,YACN,MAAM,EAAE;AAAA,YACR,GAAI,EAAE,gBACF,EAAE,eAAe,EAAE,cAAc,IACjC,CAAC;AAAA,UACP,EAAE,IACF,eACE,CAAC,EAAE,MAAM,QAAiB,MAAM,aAAa,CAAC,IAC9C,CAAC;AAET,gBAAM,cACJ,cAAc,WAAW,SAAS,IAC9B,WAAW,IAAI,CAAC,OAAO;AAAA,YACrB,MAAM;AAAA,YACN,MAAM,EAAE;AAAA,YACR,GAAI,EAAE,gBACF,EAAE,eAAe,EAAE,cAAc,IACjC,CAAC;AAAA,UACP,EAAE,IACF,aACE,CAAC,EAAE,MAAM,QAAiB,MAAM,WAAW,CAAC,IAC5C,CAAC;AAET,gBAAM,WAAW,MAAM,UAAU,SAAS,OAAO;AAAA,YAC/C,OAAO,KAAK;AAAA,YACZ,YAAY,aAAa;AAAA,YACzB,aAAa,eAAe;AAAA,YAC5B,QAAQ;AAAA,YACR,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,YAAY,CAAC;AAAA,UACnD,CAAC;AAED,gBAAM,cAAc,SAAS,QAAQ,KAAK,CAAC,MAAM,EAAE,SAAS,MAAM;AAClE,gBAAM,UACJ,eAAe,UAAU,cAAc,YAAY,OAAO;AAE5D,gBAAM,WAAW,SAAS;AAO1B,gBAAM,YAAY,SAAS,2BAA2B;AACtD,gBAAM,gBAAgB,SAAS,+BAA+B;AAE9D,cAAI,gBAAgB,GAAG;AACrB,oBAAQ;AAAA,cACN,0CAA0C,aAAa;AAAA,YACzD;AAAA,UACF;AACA,cAAI,YAAY,GAAG;AACjB,kBAAM,iBAAiB,KAAK;AAAA,cACzB,aAAa,YAAY,SAAS,gBAAiB;AAAA,YACtD;AACA,oBAAQ;AAAA,cACN,wCAAwC,SAAS,aAAa,cAAc;AAAA,YAC9E;AAAA,UACF;AACA,cAAI,kBAAkB,KAAK,cAAc,GAAG;AAC1C,oBAAQ;AAAA,cACN,yCAAyC,SAAS,YAAY;AAAA,YAChE;AAAA,UACF;AAEA,gBAAM,QAAkB;AAAA,YACtB,cAAc,SAAS;AAAA,YACvB,kBAAkB,SAAS;AAAA,YAC3B,aAAa,SAAS,eAAe,SAAS;AAAA,UAChD;AAEA,cAAI,KAAK,cAAc;AACrB,iBAAK,aAAa;AAAA,cAChB,MAAM;AAAA,cACN,MAAM;AAAA,YACR;AAAA,UACF;AAEA,gBAAM,eACJ,SAAS,gBAAgB,aACrB,SACA,SAAS;AAEf,iBAAO;AAAA,YACL;AAAA,YACA;AAAA,YACA;AAAA,UACF;AAAA,QACF,CAAC;AAED,YAAI;AACJ,YAAI,SAAS;AACX,mBAAS,OAAO;AAAA,QAClB,WAAW,sBAAsB;AAC/B,mBAAS,kBAAkB,OAAO,SAAS,MAAS;AAAA,QACtD,OAAO;AACL,mBAAS,kBAAkB,OAAO,SAAS,MAAM;AAAA,QACnD;AAEA,eAAO;AAAA,UACL,MAAM;AAAA,UACN,KAAK,OAAO;AAAA,UACZ,cAAc,OAAO;AAAA,UACrB,OAAO,OAAO;AAAA,QAChB;AAAA,MACF,SAAS,OAAO;AACd,oBAAY,iBAAiB,QAAQ,QAAQ,IAAI,MAAM,OAAO,KAAK,CAAC;AACpE,gBAAQ;AAAA,UACN,qCAAqC,UAAU,CAAC;AAAA,UAChD,UAAU;AAAA,QACZ;AAEA,YAAI,KAAK,iBAAiB,SAAS,GAAG;AACpC,gBAAM;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,UAAM;AAAA,EACR;AAAA,EAEA,OAAO,eAAe,MAAc,QAAQ,MAAsB;AAChE,WAAO,QACH,EAAE,MAAM,QAAQ,MAAM,eAAe,EAAE,MAAM,YAAY,EAAE,IAC3D,EAAE,MAAM,QAAQ,KAAK;AAAA,EAC3B;AACF;AAMA,IAAM,gBAAyD,CAAC;AAEzD,SAAS,mBAAmB,SAAuC;AACxE,QAAM,WAAW,SAAS,YAAY;AACtC,MAAI,CAAC,cAAc,QAAQ,GAAG;AAC5B,kBAAc,QAAQ,IAAI,IAAI,UAAU,OAAO;AAAA,EACjD;AACA,SAAO,cAAc,QAAQ;AAC/B;AAEO,SAAS,qBAAqB,UAA8B;AACjE,MAAI,UAAU;AACZ,WAAO,cAAc,QAAQ;AAAA,EAC/B,OAAO;AACL,eAAW,OAAO,OAAO,KAAK,aAAa,GAAoB;AAC7D,aAAO,cAAc,GAAG;AAAA,IAC1B;AAAA,EACF;AACF;AAMO,SAAS,uBAAoC;AAClD,MAAI,QAAQ,IAAI,kBAAmB,QAAO;AAC1C,MAAI,QAAQ,IAAI,iBAAkB,QAAO;AACzC,MAAI,QAAQ,IAAI,aAAc,QAAO;AACrC,MAAI,QAAQ,IAAI,eAAgB,QAAO;AACvC,QAAM,IAAI;AAAA,IACR;AAAA,EACF;AACF;AAEO,SAAS,oBAAoB,UAAgC;AAClE,UAAQ,UAAU;AAAA,IAChB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB;AACE,aAAO;AAAA,EACX;AACF;AAMO,SAAS,yBACd,SACW;AACX,QAAM,WAAW,SAAS,YAAY,qBAAqB;AAC3D,QAAM,eACJ,aAAa,aAAa,gBAAgB,OAAO,cAAc;AACjE,SAAO,IAAI,UAAU;AAAA,IACnB;AAAA,IACA,OAAO;AAAA,IACP,aAAa;AAAA,IACb,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,qBACd,SACW;AACX,QAAM,WAAW,SAAS,YAAY,qBAAqB;AAC3D,QAAM,eACJ,aAAa,aAAa,gBAAgB,WAAW,cAAc;AACrE,SAAO,IAAI,UAAU;AAAA,IACnB;AAAA,IACA,OAAO;AAAA,IACP,aAAa;AAAA,IACb,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,gBACd,SACW;AACX,QAAM,WAAW,SAAS,YAAY,qBAAqB;AAC3D,QAAM,eACJ,aAAa,aACT,gBAAgB,OAChB,cAAc;AACpB,SAAO,IAAI,UAAU;AAAA,IACnB;AAAA,IACA,OAAO;AAAA,IACP,aAAa;AAAA,IACb,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,qBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,gBAAgB;AAAA,IACvB,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,mBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,cAAc;AAAA,IACrB,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,sBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,iBAAiB;AAAA,IACxB,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,iBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,YAAY;AAAA,IACnB,GAAG;AAAA,EACL,CAAC;AACH;","names":[]}
@@ -0,0 +1,192 @@
1
+ // src/json-parser.ts
2
+ function extractBalancedBrackets(text, startIdx, openBracket, closeBracket) {
3
+ if (text[startIdx] !== openBracket) return null;
4
+ let depth = 0;
5
+ let inString = false;
6
+ let escapeNext = false;
7
+ for (let i = startIdx; i < text.length; i++) {
8
+ const char = text[i];
9
+ if (escapeNext) {
10
+ escapeNext = false;
11
+ continue;
12
+ }
13
+ if (char === "\\" && inString) {
14
+ escapeNext = true;
15
+ continue;
16
+ }
17
+ if (char === '"') {
18
+ inString = !inString;
19
+ continue;
20
+ }
21
+ if (inString) continue;
22
+ if (char === openBracket) {
23
+ depth++;
24
+ } else if (char === closeBracket) {
25
+ depth--;
26
+ if (depth === 0) {
27
+ return text.substring(startIdx, i + 1);
28
+ }
29
+ }
30
+ }
31
+ return null;
32
+ }
33
+ function extractJsonFromText(text) {
34
+ const trimmed = text.trim();
35
+ const codeBlockMatch = trimmed.match(/```(?:json)?\s*([\s\S]*?)```/);
36
+ if (codeBlockMatch) {
37
+ return codeBlockMatch[1].trim();
38
+ }
39
+ const objectStartIdx = trimmed.indexOf("{");
40
+ const arrayStartIdx = trimmed.indexOf("[");
41
+ const objectFirst = objectStartIdx !== -1 && (arrayStartIdx === -1 || objectStartIdx < arrayStartIdx);
42
+ const arrayFirst = arrayStartIdx !== -1 && (objectStartIdx === -1 || arrayStartIdx < objectStartIdx);
43
+ if (arrayFirst) {
44
+ const arrayJson = extractBalancedBrackets(
45
+ trimmed,
46
+ arrayStartIdx,
47
+ "[",
48
+ "]"
49
+ );
50
+ if (arrayJson) return arrayJson;
51
+ const arrayMatch = trimmed.match(/\[[\s\S]*\]/);
52
+ if (arrayMatch) return arrayMatch[0];
53
+ }
54
+ if (objectFirst) {
55
+ const objectJson = extractBalancedBrackets(
56
+ trimmed,
57
+ objectStartIdx,
58
+ "{",
59
+ "}"
60
+ );
61
+ if (objectJson) return objectJson;
62
+ const objectMatch = trimmed.match(/\{[\s\S]*\}/);
63
+ if (objectMatch) return objectMatch[0];
64
+ }
65
+ if (trimmed.startsWith('"') && trimmed.endsWith('"')) return trimmed;
66
+ if (/^-?\d+(\.\d+)?([eE][+-]?\d+)?$/.test(trimmed)) return trimmed;
67
+ if (trimmed === "true" || trimmed === "false") return trimmed;
68
+ if (trimmed === "null") return trimmed;
69
+ return null;
70
+ }
71
+ function parseJsonResponse(response, schema) {
72
+ const jsonStr = extractJsonFromText(response);
73
+ if (!jsonStr) {
74
+ throw new Error(
75
+ "No valid JSON found in response. Expected a JSON value (object, array, string, number, boolean, or null), possibly wrapped in markdown code blocks."
76
+ );
77
+ }
78
+ let parsed;
79
+ try {
80
+ parsed = JSON.parse(jsonStr);
81
+ } catch (parseError) {
82
+ const fixed = fixCommonJsonIssues(jsonStr);
83
+ try {
84
+ parsed = JSON.parse(fixed);
85
+ } catch {
86
+ throw new Error(
87
+ `Failed to parse JSON: ${parseError instanceof Error ? parseError.message : "Unknown error"}. Raw text: ${jsonStr.substring(0, 200)}...`
88
+ );
89
+ }
90
+ }
91
+ if (schema) {
92
+ const result = schema.safeParse(parsed);
93
+ if (!result.success) {
94
+ const errors = result.error.errors.map((e) => `${e.path.join(".")}: ${e.message}`).join("; ");
95
+ throw new Error(`Schema validation failed: ${errors}`);
96
+ }
97
+ return result.data;
98
+ }
99
+ return parsed;
100
+ }
101
+ function fixCommonJsonIssues(json) {
102
+ let fixed = json;
103
+ fixed = fixed.replace(/,(\s*[}\]])/g, "$1");
104
+ fixed = fixed.replace(/([{,]\s*)(\w+)(\s*:)/g, '$1"$2"$3');
105
+ fixed = fixed.replace(/'/g, '"');
106
+ fixed = fixed.replace(/[\x00-\x1F\x7F]/g, " ");
107
+ return fixed;
108
+ }
109
+ function safeParseJson(response, schema) {
110
+ try {
111
+ const data = parseJsonResponse(response, schema);
112
+ return { success: true, data };
113
+ } catch (error) {
114
+ return {
115
+ success: false,
116
+ error: error instanceof Error ? error : new Error(String(error))
117
+ };
118
+ }
119
+ }
120
+ function isValidJson(str) {
121
+ try {
122
+ JSON.parse(str);
123
+ return true;
124
+ } catch {
125
+ return false;
126
+ }
127
+ }
128
+ function autoCloseJson(json) {
129
+ let result = json.trim();
130
+ let inString = false;
131
+ let escaped = false;
132
+ for (const char of result) {
133
+ if (escaped) {
134
+ escaped = false;
135
+ continue;
136
+ }
137
+ if (char === "\\") {
138
+ escaped = true;
139
+ continue;
140
+ }
141
+ if (char === '"') {
142
+ inString = !inString;
143
+ }
144
+ }
145
+ if (inString) {
146
+ result += '"';
147
+ }
148
+ result = result.replace(/,\s*$/, "");
149
+ result = result.replace(/:\s*$/, ": null");
150
+ const closers = buildClosingSequence(result);
151
+ result += closers;
152
+ return result;
153
+ }
154
+ function buildClosingSequence(json) {
155
+ const stack = [];
156
+ let inString = false;
157
+ let escaped = false;
158
+ for (const char of json) {
159
+ if (escaped) {
160
+ escaped = false;
161
+ continue;
162
+ }
163
+ if (char === "\\" && inString) {
164
+ escaped = true;
165
+ continue;
166
+ }
167
+ if (char === '"') {
168
+ inString = !inString;
169
+ continue;
170
+ }
171
+ if (inString) continue;
172
+ if (char === "[") {
173
+ stack.push("]");
174
+ } else if (char === "{") {
175
+ stack.push("}");
176
+ } else if (char === "]" || char === "}") {
177
+ if (stack.length > 0 && stack[stack.length - 1] === char) {
178
+ stack.pop();
179
+ }
180
+ }
181
+ }
182
+ return stack.reverse().join("");
183
+ }
184
+
185
+ export {
186
+ extractJsonFromText,
187
+ parseJsonResponse,
188
+ safeParseJson,
189
+ isValidJson,
190
+ autoCloseJson
191
+ };
192
+ //# sourceMappingURL=chunk-WM7QVK2Z.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/json-parser.ts"],"sourcesContent":["/**\n * JSON Parser Utilities\n *\n * Robust JSON parsing for LLM responses that may contain:\n * - Markdown code blocks\n * - Extra text before/after JSON\n * - Minor formatting issues\n *\n * @packageDocumentation\n */\n\nimport { z } from 'zod';\n\nfunction extractBalancedBrackets(\n text: string,\n startIdx: number,\n openBracket: string,\n closeBracket: string,\n): string | null {\n if (text[startIdx] !== openBracket) return null;\n\n let depth = 0;\n let inString = false;\n let escapeNext = false;\n\n for (let i = startIdx; i < text.length; i++) {\n const char = text[i];\n\n if (escapeNext) {\n escapeNext = false;\n continue;\n }\n\n if (char === '\\\\' && inString) {\n escapeNext = true;\n continue;\n }\n\n if (char === '\"') {\n inString = !inString;\n continue;\n }\n\n if (inString) continue;\n\n if (char === openBracket) {\n depth++;\n } else if (char === closeBracket) {\n depth--;\n if (depth === 0) {\n return text.substring(startIdx, i + 1);\n }\n }\n }\n\n return null;\n}\n\n/**\n * Extract JSON from LLM response text.\n *\n * Handles markdown code blocks, raw JSON objects/arrays, and primitive values.\n */\nexport function extractJsonFromText(text: string): string | null {\n const trimmed = text.trim();\n\n // Try markdown code blocks first\n const codeBlockMatch = trimmed.match(/```(?:json)?\\s*([\\s\\S]*?)```/);\n if (codeBlockMatch) {\n return codeBlockMatch[1].trim();\n }\n\n const objectStartIdx = trimmed.indexOf('{');\n const arrayStartIdx = trimmed.indexOf('[');\n\n const objectFirst =\n objectStartIdx !== -1 &&\n (arrayStartIdx === -1 || objectStartIdx < arrayStartIdx);\n const arrayFirst =\n arrayStartIdx !== -1 &&\n (objectStartIdx === -1 || arrayStartIdx < objectStartIdx);\n\n if (arrayFirst) {\n const arrayJson = extractBalancedBrackets(\n trimmed,\n arrayStartIdx,\n '[',\n ']',\n );\n if (arrayJson) return arrayJson;\n const arrayMatch = trimmed.match(/\\[[\\s\\S]*\\]/);\n if (arrayMatch) return arrayMatch[0];\n }\n\n if (objectFirst) {\n const objectJson = extractBalancedBrackets(\n trimmed,\n objectStartIdx,\n '{',\n '}',\n );\n if (objectJson) return objectJson;\n const objectMatch = trimmed.match(/\\{[\\s\\S]*\\}/);\n if (objectMatch) return objectMatch[0];\n }\n\n // Primitive JSON values\n if (trimmed.startsWith('\"') && trimmed.endsWith('\"')) return trimmed;\n if (/^-?\\d+(\\.\\d+)?([eE][+-]?\\d+)?$/.test(trimmed)) return trimmed;\n if (trimmed === 'true' || trimmed === 'false') return trimmed;\n if (trimmed === 'null') return trimmed;\n\n return null;\n}\n\n/**\n * Parse JSON from LLM response with optional Zod schema validation.\n */\nexport function parseJsonResponse<T>(\n response: string,\n schema?: z.ZodSchema<T>,\n): T {\n const jsonStr = extractJsonFromText(response);\n\n if (!jsonStr) {\n throw new Error(\n 'No valid JSON found in response. ' +\n 'Expected a JSON value (object, array, string, number, boolean, or null), ' +\n 'possibly wrapped in markdown code blocks.',\n );\n }\n\n let parsed: unknown;\n try {\n parsed = JSON.parse(jsonStr);\n } catch (parseError) {\n const fixed = fixCommonJsonIssues(jsonStr);\n try {\n parsed = JSON.parse(fixed);\n } catch {\n throw new Error(\n `Failed to parse JSON: ${parseError instanceof Error ? parseError.message : 'Unknown error'}. ` +\n `Raw text: ${jsonStr.substring(0, 200)}...`,\n );\n }\n }\n\n if (schema) {\n const result = schema.safeParse(parsed);\n if (!result.success) {\n const errors = result.error.errors\n .map((e) => `${e.path.join('.')}: ${e.message}`)\n .join('; ');\n throw new Error(`Schema validation failed: ${errors}`);\n }\n return result.data;\n }\n\n return parsed as T;\n}\n\nfunction fixCommonJsonIssues(json: string): string {\n let fixed = json;\n fixed = fixed.replace(/,(\\s*[}\\]])/g, '$1');\n fixed = fixed.replace(/([{,]\\s*)(\\w+)(\\s*:)/g, '$1\"$2\"$3');\n fixed = fixed.replace(/'/g, '\"');\n fixed = fixed.replace(/[\\x00-\\x1F\\x7F]/g, ' ');\n return fixed;\n}\n\n/**\n * Safely parse JSON without throwing.\n */\nexport function safeParseJson<T>(\n response: string,\n schema?: z.ZodSchema<T>,\n): { success: true; data: T } | { success: false; error: Error } {\n try {\n const data = parseJsonResponse(response, schema);\n return { success: true, data };\n } catch (error) {\n return {\n success: false,\n error: error instanceof Error ? error : new Error(String(error)),\n };\n }\n}\n\n/**\n * Check if a string is valid JSON.\n */\nexport function isValidJson(str: string): boolean {\n try {\n JSON.parse(str);\n return true;\n } catch {\n return false;\n }\n}\n\n/**\n * Attempt to auto-close unclosed JSON brackets.\n */\nexport function autoCloseJson(json: string): string {\n let result = json.trim();\n\n // Handle unclosed strings\n let inString = false;\n let escaped = false;\n for (const char of result) {\n if (escaped) {\n escaped = false;\n continue;\n }\n if (char === '\\\\') {\n escaped = true;\n continue;\n }\n if (char === '\"') {\n inString = !inString;\n }\n }\n if (inString) {\n result += '\"';\n }\n\n // Remove trailing incomplete content\n result = result.replace(/,\\s*$/, '');\n result = result.replace(/:\\s*$/, ': null');\n\n // Build correct closing sequence\n const closers = buildClosingSequence(result);\n result += closers;\n\n return result;\n}\n\nfunction buildClosingSequence(json: string): string {\n const stack: string[] = [];\n let inString = false;\n let escaped = false;\n\n for (const char of json) {\n if (escaped) {\n escaped = false;\n continue;\n }\n\n if (char === '\\\\' && inString) {\n escaped = true;\n continue;\n }\n\n if (char === '\"') {\n inString = !inString;\n continue;\n }\n\n if (inString) continue;\n\n if (char === '[') {\n stack.push(']');\n } else if (char === '{') {\n stack.push('}');\n } else if (char === ']' || char === '}') {\n if (stack.length > 0 && stack[stack.length - 1] === char) {\n stack.pop();\n }\n }\n }\n\n return stack.reverse().join('');\n}\n"],"mappings":";AAaA,SAAS,wBACP,MACA,UACA,aACA,cACe;AACf,MAAI,KAAK,QAAQ,MAAM,YAAa,QAAO;AAE3C,MAAI,QAAQ;AACZ,MAAI,WAAW;AACf,MAAI,aAAa;AAEjB,WAAS,IAAI,UAAU,IAAI,KAAK,QAAQ,KAAK;AAC3C,UAAM,OAAO,KAAK,CAAC;AAEnB,QAAI,YAAY;AACd,mBAAa;AACb;AAAA,IACF;AAEA,QAAI,SAAS,QAAQ,UAAU;AAC7B,mBAAa;AACb;AAAA,IACF;AAEA,QAAI,SAAS,KAAK;AAChB,iBAAW,CAAC;AACZ;AAAA,IACF;AAEA,QAAI,SAAU;AAEd,QAAI,SAAS,aAAa;AACxB;AAAA,IACF,WAAW,SAAS,cAAc;AAChC;AACA,UAAI,UAAU,GAAG;AACf,eAAO,KAAK,UAAU,UAAU,IAAI,CAAC;AAAA,MACvC;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;AAOO,SAAS,oBAAoB,MAA6B;AAC/D,QAAM,UAAU,KAAK,KAAK;AAG1B,QAAM,iBAAiB,QAAQ,MAAM,8BAA8B;AACnE,MAAI,gBAAgB;AAClB,WAAO,eAAe,CAAC,EAAE,KAAK;AAAA,EAChC;AAEA,QAAM,iBAAiB,QAAQ,QAAQ,GAAG;AAC1C,QAAM,gBAAgB,QAAQ,QAAQ,GAAG;AAEzC,QAAM,cACJ,mBAAmB,OAClB,kBAAkB,MAAM,iBAAiB;AAC5C,QAAM,aACJ,kBAAkB,OACjB,mBAAmB,MAAM,gBAAgB;AAE5C,MAAI,YAAY;AACd,UAAM,YAAY;AAAA,MAChB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AACA,QAAI,UAAW,QAAO;AACtB,UAAM,aAAa,QAAQ,MAAM,aAAa;AAC9C,QAAI,WAAY,QAAO,WAAW,CAAC;AAAA,EACrC;AAEA,MAAI,aAAa;AACf,UAAM,aAAa;AAAA,MACjB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AACA,QAAI,WAAY,QAAO;AACvB,UAAM,cAAc,QAAQ,MAAM,aAAa;AAC/C,QAAI,YAAa,QAAO,YAAY,CAAC;AAAA,EACvC;AAGA,MAAI,QAAQ,WAAW,GAAG,KAAK,QAAQ,SAAS,GAAG,EAAG,QAAO;AAC7D,MAAI,iCAAiC,KAAK,OAAO,EAAG,QAAO;AAC3D,MAAI,YAAY,UAAU,YAAY,QAAS,QAAO;AACtD,MAAI,YAAY,OAAQ,QAAO;AAE/B,SAAO;AACT;AAKO,SAAS,kBACd,UACA,QACG;AACH,QAAM,UAAU,oBAAoB,QAAQ;AAE5C,MAAI,CAAC,SAAS;AACZ,UAAM,IAAI;AAAA,MACR;AAAA,IAGF;AAAA,EACF;AAEA,MAAI;AACJ,MAAI;AACF,aAAS,KAAK,MAAM,OAAO;AAAA,EAC7B,SAAS,YAAY;AACnB,UAAM,QAAQ,oBAAoB,OAAO;AACzC,QAAI;AACF,eAAS,KAAK,MAAM,KAAK;AAAA,IAC3B,QAAQ;AACN,YAAM,IAAI;AAAA,QACR,yBAAyB,sBAAsB,QAAQ,WAAW,UAAU,eAAe,eAC5E,QAAQ,UAAU,GAAG,GAAG,CAAC;AAAA,MAC1C;AAAA,IACF;AAAA,EACF;AAEA,MAAI,QAAQ;AACV,UAAM,SAAS,OAAO,UAAU,MAAM;AACtC,QAAI,CAAC,OAAO,SAAS;AACnB,YAAM,SAAS,OAAO,MAAM,OACzB,IAAI,CAAC,MAAM,GAAG,EAAE,KAAK,KAAK,GAAG,CAAC,KAAK,EAAE,OAAO,EAAE,EAC9C,KAAK,IAAI;AACZ,YAAM,IAAI,MAAM,6BAA6B,MAAM,EAAE;AAAA,IACvD;AACA,WAAO,OAAO;AAAA,EAChB;AAEA,SAAO;AACT;AAEA,SAAS,oBAAoB,MAAsB;AACjD,MAAI,QAAQ;AACZ,UAAQ,MAAM,QAAQ,gBAAgB,IAAI;AAC1C,UAAQ,MAAM,QAAQ,yBAAyB,UAAU;AACzD,UAAQ,MAAM,QAAQ,MAAM,GAAG;AAC/B,UAAQ,MAAM,QAAQ,oBAAoB,GAAG;AAC7C,SAAO;AACT;AAKO,SAAS,cACd,UACA,QAC+D;AAC/D,MAAI;AACF,UAAM,OAAO,kBAAkB,UAAU,MAAM;AAC/C,WAAO,EAAE,SAAS,MAAM,KAAK;AAAA,EAC/B,SAAS,OAAO;AACd,WAAO;AAAA,MACL,SAAS;AAAA,MACT,OAAO,iBAAiB,QAAQ,QAAQ,IAAI,MAAM,OAAO,KAAK,CAAC;AAAA,IACjE;AAAA,EACF;AACF;AAKO,SAAS,YAAY,KAAsB;AAChD,MAAI;AACF,SAAK,MAAM,GAAG;AACd,WAAO;AAAA,EACT,QAAQ;AACN,WAAO;AAAA,EACT;AACF;AAKO,SAAS,cAAc,MAAsB;AAClD,MAAI,SAAS,KAAK,KAAK;AAGvB,MAAI,WAAW;AACf,MAAI,UAAU;AACd,aAAW,QAAQ,QAAQ;AACzB,QAAI,SAAS;AACX,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,MAAM;AACjB,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,KAAK;AAChB,iBAAW,CAAC;AAAA,IACd;AAAA,EACF;AACA,MAAI,UAAU;AACZ,cAAU;AAAA,EACZ;AAGA,WAAS,OAAO,QAAQ,SAAS,EAAE;AACnC,WAAS,OAAO,QAAQ,SAAS,QAAQ;AAGzC,QAAM,UAAU,qBAAqB,MAAM;AAC3C,YAAU;AAEV,SAAO;AACT;AAEA,SAAS,qBAAqB,MAAsB;AAClD,QAAM,QAAkB,CAAC;AACzB,MAAI,WAAW;AACf,MAAI,UAAU;AAEd,aAAW,QAAQ,MAAM;AACvB,QAAI,SAAS;AACX,gBAAU;AACV;AAAA,IACF;AAEA,QAAI,SAAS,QAAQ,UAAU;AAC7B,gBAAU;AACV;AAAA,IACF;AAEA,QAAI,SAAS,KAAK;AAChB,iBAAW,CAAC;AACZ;AAAA,IACF;AAEA,QAAI,SAAU;AAEd,QAAI,SAAS,KAAK;AAChB,YAAM,KAAK,GAAG;AAAA,IAChB,WAAW,SAAS,KAAK;AACvB,YAAM,KAAK,GAAG;AAAA,IAChB,WAAW,SAAS,OAAO,SAAS,KAAK;AACvC,UAAI,MAAM,SAAS,KAAK,MAAM,MAAM,SAAS,CAAC,MAAM,MAAM;AACxD,cAAM,IAAI;AAAA,MACZ;AAAA,IACF;AAAA,EACF;AAEA,SAAO,MAAM,QAAQ,EAAE,KAAK,EAAE;AAChC;","names":[]}
@@ -0,0 +1,136 @@
1
+ import { a as RateLimiterOptions, b as TokenUsage } from './rate-limiter-9XAWfHwe.js';
2
+ import { ChatOpenAI } from '@langchain/openai';
3
+ import { ChatAnthropic, ChatAnthropicCallOptions } from '@langchain/anthropic';
4
+ import { z } from 'zod';
5
+
6
+ type MessageLike = any;
7
+ declare class CachingChatAnthropic extends ChatAnthropic {
8
+ invoke(input: MessageLike[] | string, options?: Partial<ChatAnthropicCallOptions>): Promise<MessageLike>;
9
+ }
10
+ type ChatModel = ChatOpenAI | CachingChatAnthropic;
11
+ type LLMProvider = 'openai' | 'deepseek' | 'anthropic' | 'kimi';
12
+ interface ProviderConfig {
13
+ apiKey: string;
14
+ baseUrl?: string;
15
+ defaultModel: string;
16
+ }
17
+ interface LLMClientOptions {
18
+ provider?: LLMProvider;
19
+ model?: string;
20
+ temperature?: number;
21
+ streaming?: boolean;
22
+ rateLimiter?: RateLimiterOptions;
23
+ useGlobalRateLimiter?: boolean;
24
+ trackTokens?: boolean;
25
+ }
26
+ interface LLMCallOptions<T = unknown> {
27
+ systemPrompt: string;
28
+ userPrompt: string;
29
+ schema?: z.ZodSchema<T>;
30
+ maxRetries?: number;
31
+ retryWithContext?: boolean;
32
+ maxTokens?: number;
33
+ skipSchemaValidation?: boolean;
34
+ temperature?: number;
35
+ }
36
+ interface CacheableBlock {
37
+ type: 'text';
38
+ text: string;
39
+ cache_control?: {
40
+ type: 'ephemeral';
41
+ };
42
+ }
43
+ interface CacheAwareLLMCallOptions<T = unknown> extends LLMCallOptions<T> {
44
+ systemBlocks?: CacheableBlock[];
45
+ userBlocks?: CacheableBlock[];
46
+ rawText?: boolean;
47
+ }
48
+ interface LLMUsage {
49
+ promptTokens: number;
50
+ completionTokens: number;
51
+ totalTokens: number;
52
+ }
53
+ type LLMFinishReason = 'stop' | 'length' | 'content_filter' | 'tool_calls' | null;
54
+ interface LLMResponse<T> {
55
+ data: T;
56
+ raw: string;
57
+ finishReason: LLMFinishReason;
58
+ usage: LLMUsage | null;
59
+ }
60
+ declare const DEEPSEEK_MODELS: {
61
+ readonly CHAT: "deepseek-chat";
62
+ readonly CODER: "deepseek-coder";
63
+ readonly REASONER: "deepseek-reasoner";
64
+ };
65
+ declare const OPENAI_MODELS: {
66
+ readonly GPT4O: "gpt-4o";
67
+ readonly GPT4O_MINI: "gpt-4o-mini";
68
+ readonly GPT4_TURBO: "gpt-4-turbo";
69
+ readonly GPT35_TURBO: "gpt-3.5-turbo";
70
+ readonly GPT_5_1: "gpt-5.1";
71
+ };
72
+ declare const ANTHROPIC_MODELS: {
73
+ readonly CLAUDE_SONNET_4_5: "claude-sonnet-4-5-20250929";
74
+ readonly CLAUDE_SONNET_4: "claude-sonnet-4-20250514";
75
+ readonly CLAUDE_OPUS_4_5: "claude-opus-4-5-20250929";
76
+ readonly CLAUDE_3_5_HAIKU: "claude-3-5-haiku-20241022";
77
+ };
78
+ declare const KIMI_MODELS: {
79
+ readonly K2_5: "kimi-k2.5";
80
+ };
81
+ declare class LLMClient {
82
+ private model;
83
+ private rateLimiter;
84
+ private tokenTracker;
85
+ private modelName;
86
+ private provider;
87
+ private providerConfig;
88
+ private temperature;
89
+ private streaming;
90
+ constructor(options?: LLMClientOptions);
91
+ private usesMaxCompletionTokens;
92
+ private createModel;
93
+ private getModelWithOptions;
94
+ getProvider(): LLMProvider;
95
+ getModelName(): string;
96
+ getModel(): ChatModel;
97
+ getRateLimiterStatus(): {
98
+ queueLength: number;
99
+ activeRequests: number;
100
+ minuteTokens: number;
101
+ secondTokens: number;
102
+ backoffMs: number;
103
+ };
104
+ getTokenUsage(): TokenUsage | null;
105
+ call<T>(options: LLMCallOptions<T>): Promise<T>;
106
+ callWithMetadata<T>(options: LLMCallOptions<T>): Promise<LLMResponse<T>>;
107
+ private extractFinishReason;
108
+ callRaw(options: {
109
+ systemPrompt: string;
110
+ userPrompt: string;
111
+ maxTokens?: number;
112
+ }): Promise<string>;
113
+ callRawWithMetadata(options: {
114
+ systemPrompt: string;
115
+ userPrompt: string;
116
+ maxTokens?: number;
117
+ }): Promise<Omit<LLMResponse<string>, 'data'> & {
118
+ raw: string;
119
+ }>;
120
+ private isRateLimitError;
121
+ callWithCache<T>(options: CacheAwareLLMCallOptions<T>): Promise<LLMResponse<T>>;
122
+ static cacheableBlock(text: string, cache?: boolean): CacheableBlock;
123
+ }
124
+ declare function getSharedLLMClient(options?: LLMClientOptions): LLMClient;
125
+ declare function resetSharedLLMClient(provider?: LLMProvider): void;
126
+ declare function getAvailableProvider(): LLMProvider;
127
+ declare function isProviderAvailable(provider: LLMProvider): boolean;
128
+ declare function createRequirementsClient(options?: Partial<LLMClientOptions>): LLMClient;
129
+ declare function createCreativeClient(options?: Partial<LLMClientOptions>): LLMClient;
130
+ declare function createFixClient(options?: Partial<LLMClientOptions>): LLMClient;
131
+ declare function createDeepSeekClient(options?: Partial<Omit<LLMClientOptions, 'provider'>>): LLMClient;
132
+ declare function createOpenAIClient(options?: Partial<Omit<LLMClientOptions, 'provider'>>): LLMClient;
133
+ declare function createAnthropicClient(options?: Partial<Omit<LLMClientOptions, 'provider'>>): LLMClient;
134
+ declare function createKimiClient(options?: Partial<Omit<LLMClientOptions, 'provider'>>): LLMClient;
135
+
136
+ export { ANTHROPIC_MODELS, type CacheAwareLLMCallOptions, type CacheableBlock, DEEPSEEK_MODELS, KIMI_MODELS, type LLMCallOptions, LLMClient, type LLMClientOptions, type LLMFinishReason, type LLMProvider, type LLMResponse, type LLMUsage, OPENAI_MODELS, type ProviderConfig, createAnthropicClient, createCreativeClient, createDeepSeekClient, createFixClient, createKimiClient, createOpenAIClient, createRequirementsClient, getAvailableProvider, getSharedLLMClient, isProviderAvailable, resetSharedLLMClient };
package/dist/client.js ADDED
@@ -0,0 +1,39 @@
1
+ import {
2
+ ANTHROPIC_MODELS,
3
+ DEEPSEEK_MODELS,
4
+ KIMI_MODELS,
5
+ LLMClient,
6
+ OPENAI_MODELS,
7
+ createAnthropicClient,
8
+ createCreativeClient,
9
+ createDeepSeekClient,
10
+ createFixClient,
11
+ createKimiClient,
12
+ createOpenAIClient,
13
+ createRequirementsClient,
14
+ getAvailableProvider,
15
+ getSharedLLMClient,
16
+ isProviderAvailable,
17
+ resetSharedLLMClient
18
+ } from "./chunk-PV3G5PJS.js";
19
+ import "./chunk-WM7QVK2Z.js";
20
+ import "./chunk-MJS33AAS.js";
21
+ export {
22
+ ANTHROPIC_MODELS,
23
+ DEEPSEEK_MODELS,
24
+ KIMI_MODELS,
25
+ LLMClient,
26
+ OPENAI_MODELS,
27
+ createAnthropicClient,
28
+ createCreativeClient,
29
+ createDeepSeekClient,
30
+ createFixClient,
31
+ createKimiClient,
32
+ createOpenAIClient,
33
+ createRequirementsClient,
34
+ getAvailableProvider,
35
+ getSharedLLMClient,
36
+ isProviderAvailable,
37
+ resetSharedLLMClient
38
+ };
39
+ //# sourceMappingURL=client.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":[],"sourcesContent":[],"mappings":"","names":[]}
@@ -0,0 +1,67 @@
1
+ import { LLMFinishReason, LLMClient } from './client.js';
2
+ export { ANTHROPIC_MODELS, CacheAwareLLMCallOptions, CacheableBlock, DEEPSEEK_MODELS, KIMI_MODELS, LLMCallOptions, LLMClientOptions, LLMProvider, LLMResponse, LLMUsage, OPENAI_MODELS, ProviderConfig, createAnthropicClient, createCreativeClient, createDeepSeekClient, createFixClient, createKimiClient, createOpenAIClient, createRequirementsClient, getAvailableProvider, getSharedLLMClient, isProviderAvailable, resetSharedLLMClient } from './client.js';
3
+ export { R as RateLimiter, a as RateLimiterOptions, T as TokenTracker, b as TokenUsage, g as getGlobalRateLimiter, c as getGlobalTokenTracker, r as resetGlobalRateLimiter, d as resetGlobalTokenTracker } from './rate-limiter-9XAWfHwe.js';
4
+ export { autoCloseJson, extractJsonFromText, isValidJson, parseJsonResponse, safeParseJson } from './json-parser.js';
5
+ import { z } from 'zod';
6
+ export { JsonSchema, STRUCTURED_OUTPUT_MODELS, StructuredGenerationOptions, StructuredGenerationResult, StructuredOutputClient, StructuredOutputOptions, getStructuredOutputClient, isStructuredOutputAvailable, resetStructuredOutputClient } from './structured-output.js';
7
+ import '@langchain/openai';
8
+ import '@langchain/anthropic';
9
+
10
+ /**
11
+ * Truncation Detector
12
+ *
13
+ * Utilities for detecting when LLM output has been truncated and
14
+ * extracting usable content from partial responses.
15
+ *
16
+ * @packageDocumentation
17
+ */
18
+
19
+ type TruncationReason = 'finish_reason' | 'json_incomplete' | 'bracket_mismatch' | 'none';
20
+ interface TruncationResult {
21
+ isTruncated: boolean;
22
+ reason: TruncationReason;
23
+ partialContent?: string;
24
+ lastCompleteElement?: unknown;
25
+ missingCloseBrackets?: number;
26
+ missingCloseBraces?: number;
27
+ }
28
+ declare function detectTruncation(response: string, finishReason: LLMFinishReason): TruncationResult;
29
+ declare function findLastCompleteElement(json: string): unknown | null;
30
+ declare function isLikelyTruncated(content: string): boolean;
31
+
32
+ /**
33
+ * LLM Continuation Utility
34
+ *
35
+ * Handles truncated LLM responses with automatic continuation.
36
+ * - Detects truncation via finish_reason and JSON structure
37
+ * - Automatically continues with full context
38
+ * - Merges partial and continuation responses
39
+ * - Salvages partial data if max continuations reached
40
+ *
41
+ * @packageDocumentation
42
+ */
43
+
44
+ interface ContinuationOptions<T> {
45
+ client: LLMClient;
46
+ systemPrompt: string;
47
+ userPrompt: string;
48
+ schema?: z.ZodSchema<T>;
49
+ maxTokens?: number;
50
+ maxContinuations?: number;
51
+ maxRetries?: number;
52
+ buildContinuationPrompt: (partialResponse: string, attempt: number) => string;
53
+ continuationSystemPrompt?: string;
54
+ }
55
+ interface ContinuationResult<T> {
56
+ data: T;
57
+ raw: string;
58
+ continuationCount: number;
59
+ warnings: string[];
60
+ wasSalvaged: boolean;
61
+ }
62
+ declare function mergeResponses(previous: string, continuation: string): string;
63
+ declare function salvagePartialResponse<T>(rawResponse: string): T | null;
64
+ declare function callWithContinuation<T>(options: ContinuationOptions<T>): Promise<ContinuationResult<T>>;
65
+ declare function buildGenericContinuationPrompt(context: string, partialResponse: string, attempt: number, maxAttempts?: number): string;
66
+
67
+ export { type ContinuationOptions, type ContinuationResult, LLMClient, LLMFinishReason, type TruncationReason, type TruncationResult, buildGenericContinuationPrompt, callWithContinuation, detectTruncation, findLastCompleteElement, isLikelyTruncated, mergeResponses, salvagePartialResponse };