@almadar/llm 2.1.0 → 2.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{chunk-3MY2UA2W.js → chunk-JMDIVXMG.js} +13 -3
- package/dist/chunk-JMDIVXMG.js.map +1 -0
- package/dist/chunk-QOOSH67G.js +133 -0
- package/dist/chunk-QOOSH67G.js.map +1 -0
- package/dist/client.d.ts +4 -2
- package/dist/client.js +3 -1
- package/dist/index.d.ts +3 -2
- package/dist/index.js +13 -1
- package/dist/index.js.map +1 -1
- package/dist/providers/index.d.ts +127 -0
- package/dist/providers/index.js +13 -0
- package/dist/providers/index.js.map +1 -0
- package/dist/{rate-limiter-9XAWfHwe.d.ts → rate-limiter-DDH7JH5p.d.ts} +1 -1
- package/dist/structured-output.d.ts +1 -1
- package/package.json +5 -1
- package/src/client.ts +14 -1
- package/src/index.ts +18 -0
- package/src/providers/index.ts +24 -0
- package/src/providers/masar.ts +272 -0
- package/dist/chunk-3MY2UA2W.js.map +0 -1
|
@@ -124,7 +124,9 @@ var OPENROUTER_MODELS = {
|
|
|
124
124
|
LLAMA_4_MAVERICK: "meta-llama/llama-4-maverick",
|
|
125
125
|
LLAMA_4_SCOUT: "meta-llama/llama-4-scout",
|
|
126
126
|
// Kimi models - strong reasoning
|
|
127
|
-
KIMI_K2: "moonshotai/kimi-k2"
|
|
127
|
+
KIMI_K2: "moonshotai/kimi-k2",
|
|
128
|
+
// Zhipu GLM models - via OpenRouter
|
|
129
|
+
GLM_4_7: "z-ai/glm-4.7"
|
|
128
130
|
};
|
|
129
131
|
var DEFAULT_TEMPERATURE = 0.3;
|
|
130
132
|
var LLMClient = class {
|
|
@@ -654,6 +656,13 @@ function createOpenRouterClient(options) {
|
|
|
654
656
|
...options
|
|
655
657
|
});
|
|
656
658
|
}
|
|
659
|
+
function createZhipuClient(options) {
|
|
660
|
+
return new LLMClient({
|
|
661
|
+
provider: "openrouter",
|
|
662
|
+
model: OPENROUTER_MODELS.GLM_4_7,
|
|
663
|
+
...options
|
|
664
|
+
});
|
|
665
|
+
}
|
|
657
666
|
|
|
658
667
|
export {
|
|
659
668
|
DEEPSEEK_MODELS,
|
|
@@ -673,6 +682,7 @@ export {
|
|
|
673
682
|
createOpenAIClient,
|
|
674
683
|
createAnthropicClient,
|
|
675
684
|
createKimiClient,
|
|
676
|
-
createOpenRouterClient
|
|
685
|
+
createOpenRouterClient,
|
|
686
|
+
createZhipuClient
|
|
677
687
|
};
|
|
678
|
-
//# sourceMappingURL=chunk-
|
|
688
|
+
//# sourceMappingURL=chunk-JMDIVXMG.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/client.ts"],"sourcesContent":["/**\n * Shared LLM Client\n *\n * Multi-provider LLM client with:\n * - OpenAI, DeepSeek, Anthropic, and Kimi support\n * - Anthropic prompt caching (CachingChatAnthropic)\n * - Rate limiting and retry logic\n * - Token tracking\n * - Structured output parsing with Zod\n *\n * @packageDocumentation\n */\n\nimport { ChatOpenAI } from '@langchain/openai';\nimport { ChatAnthropic } from '@langchain/anthropic';\nimport type { BaseMessageLike } from '@langchain/core/messages';\nimport Anthropic from '@anthropic-ai/sdk';\nimport { z } from 'zod';\nimport {\n RateLimiter,\n getGlobalRateLimiter,\n type RateLimiterOptions,\n} from './rate-limiter.js';\nimport { TokenTracker, getGlobalTokenTracker } from './token-tracker.js';\nimport { parseJsonResponse } from './json-parser.js';\n\n// ============================================================================\n// Anthropic Cache Control Helper\n// ============================================================================\n\nfunction addCacheControlToSystemMessages(\n messages: Array<{ role: string; content: string }>,\n): BaseMessageLike[] {\n return messages.map((msg) => {\n if (msg.role !== 'system') {\n return msg as BaseMessageLike;\n }\n\n return {\n role: msg.role,\n content: [\n {\n type: 'text' as const,\n text: msg.content,\n cache_control: { type: 'ephemeral' },\n },\n ],\n } as BaseMessageLike;\n });\n}\n\ntype ChatModel = ChatOpenAI | ChatAnthropic;\n\n// ============================================================================\n// Types\n// ============================================================================\n\nexport type LLMProvider = 'openai' | 'deepseek' | 'anthropic' | 'kimi' | 'openrouter';\n\nexport interface ProviderConfig {\n apiKey: string;\n baseUrl?: string;\n defaultModel: string;\n}\n\nexport interface LLMClientOptions {\n provider?: LLMProvider;\n model?: string;\n temperature?: number;\n streaming?: boolean;\n rateLimiter?: RateLimiterOptions;\n useGlobalRateLimiter?: boolean;\n trackTokens?: boolean;\n}\n\nexport interface LLMCallOptions<T = unknown> {\n systemPrompt: string;\n userPrompt: string;\n schema?: z.ZodSchema<T>;\n maxRetries?: number;\n retryWithContext?: boolean;\n maxTokens?: number;\n skipSchemaValidation?: boolean;\n temperature?: number;\n}\n\nexport interface CacheableBlock {\n type: 'text';\n text: string;\n cache_control?: { type: 'ephemeral' };\n}\n\nexport interface CacheAwareLLMCallOptions<T = unknown>\n extends LLMCallOptions<T> {\n systemBlocks?: CacheableBlock[];\n userBlocks?: CacheableBlock[];\n rawText?: boolean;\n}\n\nexport interface LLMUsage {\n promptTokens: number;\n completionTokens: number;\n totalTokens: number;\n}\n\nexport type LLMFinishReason =\n | 'stop'\n | 'length'\n | 'content_filter'\n | 'tool_calls'\n | null;\n\nexport interface LLMResponse<T> {\n data: T;\n raw: string;\n finishReason: LLMFinishReason;\n usage: LLMUsage | null;\n}\n\nexport interface LLMStreamOptions {\n systemPrompt: string;\n messages: Array<{ role: 'system' | 'user' | 'assistant'; content: string }>;\n maxTokens?: number;\n temperature?: number;\n}\n\nexport interface LLMStreamChunk {\n content: string;\n done: boolean;\n}\n\n// ============================================================================\n// Provider Configuration\n// ============================================================================\n\nconst PROVIDER_CONFIGS: Record<LLMProvider, () => ProviderConfig> = {\n openai: () => {\n const apiKey = process.env.OPENAI_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'OPENAI_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return { apiKey, baseUrl: undefined, defaultModel: 'gpt-4o' };\n },\n deepseek: () => {\n const apiKey = process.env.DEEPSEEK_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'DEEPSEEK_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return {\n apiKey,\n baseUrl: 'https://api.deepseek.com/v1',\n defaultModel: 'deepseek-chat',\n };\n },\n anthropic: () => {\n const apiKey = process.env.ANTHROPIC_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'ANTHROPIC_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return {\n apiKey,\n baseUrl: undefined,\n defaultModel: 'claude-sonnet-4-5-20250929',\n };\n },\n kimi: () => {\n const apiKey = process.env.KIMI_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'KIMI_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return {\n apiKey,\n baseUrl: 'https://api.moonshot.ai/v1',\n defaultModel: 'kimi-k2.5',\n };\n },\n openrouter: () => {\n const apiKey = process.env.OPEN_ROUTER_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'OPEN_ROUTER_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return {\n apiKey,\n baseUrl: 'https://openrouter.ai/api/v1',\n defaultModel: 'qwen/qwen-2.5-72b-instruct', // Default to Qwen 2.5\n };\n },\n};\n\nexport const DEEPSEEK_MODELS = {\n CHAT: 'deepseek-chat',\n CODER: 'deepseek-coder',\n REASONER: 'deepseek-reasoner',\n} as const;\n\nexport const OPENAI_MODELS = {\n GPT4O: 'gpt-4o',\n GPT4O_MINI: 'gpt-4o-mini',\n GPT4_TURBO: 'gpt-4-turbo',\n GPT35_TURBO: 'gpt-3.5-turbo',\n GPT_5_1: 'gpt-5.1',\n} as const;\n\nexport const ANTHROPIC_MODELS = {\n CLAUDE_SONNET_4_5: 'claude-sonnet-4-5-20250929',\n CLAUDE_SONNET_4: 'claude-sonnet-4-20250514',\n CLAUDE_OPUS_4_5: 'claude-opus-4-5-20250929',\n CLAUDE_3_5_HAIKU: 'claude-3-5-haiku-20241022',\n} as const;\n\nexport const KIMI_MODELS = {\n K2_5: 'kimi-k2.5',\n} as const;\n\nexport const OPENROUTER_MODELS = {\n // Qwen models - JSON/structured data specialists\n QWEN_2_5_72B: 'qwen/qwen-2.5-72b-instruct',\n QWEN_2_5_CODER_32B: 'qwen/qwen-2.5-coder-32b-instruct',\n QWEN_3_235B: 'qwen/qwen3-235b-a22b',\n\n // Llama models - agentic workhorses\n LLAMA_3_3_70B: 'meta-llama/llama-3.3-70b-instruct',\n LLAMA_3_1_405B: 'meta-llama/llama-3.1-405b-instruct',\n LLAMA_4_MAVERICK: 'meta-llama/llama-4-maverick',\n LLAMA_4_SCOUT: 'meta-llama/llama-4-scout',\n\n // Kimi models - strong reasoning\n KIMI_K2: 'moonshotai/kimi-k2',\n\n // Zhipu GLM models - via OpenRouter\n GLM_4_7: 'z-ai/glm-4.7',\n} as const;\n\nconst DEFAULT_TEMPERATURE = 0.3;\n\n// ============================================================================\n// LLM Client\n// ============================================================================\n\nexport class LLMClient {\n private model: ChatModel;\n private rateLimiter: RateLimiter;\n private tokenTracker: TokenTracker | null;\n private modelName: string;\n private provider: LLMProvider;\n private providerConfig: ProviderConfig;\n private temperature: number;\n private streaming: boolean;\n\n constructor(options: LLMClientOptions = {}) {\n this.provider = options.provider || 'openai';\n // Kimi: 0.6 when thinking disabled (our default), 1.0 when thinking enabled\n this.temperature = options.temperature ?? \n (this.provider === 'kimi' ? 0.6 : DEFAULT_TEMPERATURE);\n this.streaming = options.streaming ?? false;\n\n this.providerConfig = PROVIDER_CONFIGS[this.provider]();\n this.modelName = options.model || this.providerConfig.defaultModel;\n\n const keyPreview = this.providerConfig.apiKey.slice(-4);\n console.log(\n `[LLMClient] Provider: ${this.provider}, Model: ${this.modelName}, Key: ****${keyPreview}`,\n );\n if (this.providerConfig.baseUrl) {\n console.log(\n `[LLMClient] Using custom base URL: ${this.providerConfig.baseUrl}`,\n );\n }\n\n this.model = this.createModel();\n\n this.rateLimiter =\n options.useGlobalRateLimiter !== false\n ? getGlobalRateLimiter(options.rateLimiter)\n : new RateLimiter(options.rateLimiter);\n\n this.tokenTracker =\n options.trackTokens !== false\n ? getGlobalTokenTracker(this.modelName)\n : null;\n }\n\n private usesMaxCompletionTokens(): boolean {\n const model = this.modelName.toLowerCase();\n return (\n model.startsWith('o1') ||\n model.startsWith('gpt-5') ||\n model.includes('o1-') ||\n model.includes('o3')\n );\n }\n\n private createModel(options?: {\n maxTokens?: number;\n temperature?: number;\n }): ChatModel {\n const maxTokens = options?.maxTokens;\n const temperature = options?.temperature ?? this.temperature;\n\n if (this.provider === 'anthropic') {\n return new ChatAnthropic({\n apiKey: this.providerConfig.apiKey,\n model: this.modelName,\n temperature,\n streaming: this.streaming,\n maxTokens: maxTokens || 8192,\n callbacks: [\n {\n handleLLMEnd: (output) => {\n const generation = output.generations?.[0]?.[0];\n const usage = (\n generation as unknown as {\n message?: {\n usage_metadata?: {\n cache_creation_input_tokens?: number;\n cache_read_input_tokens?: number;\n input_tokens?: number;\n output_tokens?: number;\n };\n };\n }\n )?.message?.usage_metadata;\n\n if (usage) {\n const cacheCreated = usage.cache_creation_input_tokens ?? 0;\n const cacheRead = usage.cache_read_input_tokens ?? 0;\n const inputTokens = usage.input_tokens ?? 0;\n const outputTokens = usage.output_tokens ?? 0;\n\n if (cacheCreated > 0) {\n console.log(\n `[LLMClient:Anthropic] Cache WRITE: ${cacheCreated} tokens cached`,\n );\n }\n if (cacheRead > 0) {\n const savingsPercent = Math.round(\n (cacheRead / (cacheRead + inputTokens)) * 100,\n );\n console.log(\n `[LLMClient:Anthropic] Cache HIT: ${cacheRead} tokens (~${savingsPercent}% of prompt)`,\n );\n }\n if (cacheCreated === 0 && cacheRead === 0 && inputTokens > 0) {\n if (inputTokens < 500) {\n console.log(\n `[LLMClient:Anthropic] ${inputTokens} input, ${outputTokens} output tokens (likely cached)`,\n );\n } else {\n console.log(\n `[LLMClient:Anthropic] ${inputTokens} input, ${outputTokens} output tokens`,\n );\n }\n }\n }\n },\n },\n ],\n });\n }\n\n const useCompletionTokens = this.usesMaxCompletionTokens();\n\n const tokenConfig = maxTokens\n ? useCompletionTokens\n ? { modelKwargs: { max_completion_tokens: maxTokens } }\n : { maxTokens }\n : {};\n\n const timeout = this.provider === 'deepseek' ? 600000 : undefined;\n\n // Kimi-k2.5: disable thinking to avoid reasoning_content issues with tool calls\n // When thinking is disabled, temperature must be 0.6 (not 1.0)\n const isKimi = this.provider === 'kimi';\n const effectiveTemp = isKimi ? 0.6 : temperature;\n\n // Build modelKwargs incrementally to avoid spread conflicts\n const modelKwargs: Record<string, unknown> = {};\n if (useCompletionTokens && maxTokens) {\n modelKwargs.max_completion_tokens = maxTokens;\n }\n if (isKimi) {\n modelKwargs.thinking = { type: 'disabled' };\n }\n // OpenRouter (Qwen): explicit tool_choice so the model doesn't ignore tool definitions\n if (this.provider === 'openrouter') {\n modelKwargs.tool_choice = 'auto';\n }\n\n return new ChatOpenAI({\n apiKey: this.providerConfig.apiKey,\n model: this.modelName,\n temperature: useCompletionTokens ? undefined : effectiveTemp,\n streaming: this.streaming,\n timeout,\n ...(Object.keys(modelKwargs).length > 0 ? { modelKwargs } : {}),\n ...(useCompletionTokens ? {} : maxTokens ? { maxTokens } : {}),\n configuration: {\n apiKey: this.providerConfig.apiKey,\n ...(this.providerConfig.baseUrl\n ? { baseURL: this.providerConfig.baseUrl }\n : {}),\n },\n });\n }\n\n private getModelWithOptions(options: {\n maxTokens?: number;\n temperature?: number;\n }): ChatModel {\n return this.createModel(options);\n }\n\n getProvider(): LLMProvider {\n return this.provider;\n }\n\n getModelName(): string {\n return this.modelName;\n }\n\n getModel(): ChatModel {\n return this.model;\n }\n\n getRateLimiterStatus() {\n return this.rateLimiter.getStatus();\n }\n\n getTokenUsage() {\n return this.tokenTracker?.getSummary() ?? null;\n }\n\n async call<T>(options: LLMCallOptions<T>): Promise<T> {\n const response = await this.callWithMetadata(options);\n return response.data;\n }\n\n async callWithMetadata<T>(options: LLMCallOptions<T>): Promise<LLMResponse<T>> {\n const {\n systemPrompt,\n userPrompt,\n schema,\n maxRetries = 2,\n retryWithContext = true,\n maxTokens,\n skipSchemaValidation = false,\n temperature,\n } = options;\n\n let currentPrompt = userPrompt;\n let lastError: Error | null = null;\n\n console.log(\n `[LLMClient:call] Starting call to ${this.provider}/${this.modelName}`,\n );\n console.log(`[LLMClient:call] Prompt length: ${userPrompt.length} chars`);\n if (maxTokens) {\n console.log(`[LLMClient:call] Max tokens: ${maxTokens}`);\n }\n\n for (let attempt = 0; attempt <= maxRetries; attempt++) {\n try {\n console.log(\n `[LLMClient:call] Attempt ${attempt + 1}/${maxRetries + 1}...`,\n );\n const attemptStartTime = Date.now();\n\n const result = await this.rateLimiter.execute(async () => {\n console.log(`[LLMClient:call] Invoking model...`);\n const invokeStartTime = Date.now();\n\n const modelToUse =\n maxTokens || temperature !== undefined\n ? this.getModelWithOptions({ maxTokens, temperature })\n : this.model;\n\n const messages = [\n { role: 'system', content: systemPrompt },\n { role: 'user', content: currentPrompt },\n ];\n const response = await modelToUse.invoke(\n this.provider === 'anthropic'\n ? addCacheControlToSystemMessages(messages)\n : messages,\n );\n\n console.log(\n `[LLMClient:call] Model responded in ${Date.now() - invokeStartTime}ms`,\n );\n\n let usage: LLMUsage | null = null;\n if (response.usage_metadata) {\n const usageMeta = response.usage_metadata as {\n input_tokens?: number;\n output_tokens?: number;\n };\n usage = {\n promptTokens: usageMeta.input_tokens || 0,\n completionTokens: usageMeta.output_tokens || 0,\n totalTokens:\n (usageMeta.input_tokens || 0) +\n (usageMeta.output_tokens || 0),\n };\n console.log(\n `[LLMClient:call] Tokens used: ${usage.promptTokens} in, ${usage.completionTokens} out`,\n );\n\n if (this.tokenTracker) {\n this.tokenTracker.addUsage(\n usage.promptTokens,\n usage.completionTokens,\n );\n }\n }\n\n const finishReason = this.extractFinishReason(response);\n if (finishReason === 'length') {\n console.warn(\n `[LLMClient:call] Response truncated (finish_reason=length)`,\n );\n }\n\n const content =\n typeof response.content === 'string'\n ? response.content\n : JSON.stringify(response.content);\n\n console.log(\n `[LLMClient:call] Response length: ${content.length} chars, finish_reason: ${finishReason}`,\n );\n\n return { content, finishReason, usage };\n });\n\n console.log(\n `[LLMClient:call] Attempt ${attempt + 1} completed in ${Date.now() - attemptStartTime}ms, parsing response...`,\n );\n\n const parsed = skipSchemaValidation\n ? (parseJsonResponse(result.content, undefined) as T)\n : parseJsonResponse(result.content, schema);\n console.log(\n `[LLMClient:call] Response parsed successfully${skipSchemaValidation ? ' (schema validation skipped)' : ''}`,\n );\n\n return {\n data: parsed,\n raw: result.content,\n finishReason: result.finishReason,\n usage: result.usage,\n };\n } catch (error) {\n lastError = error instanceof Error ? error : new Error(String(error));\n console.error(\n `[LLMClient:call] Attempt ${attempt + 1} failed:`,\n lastError.message,\n );\n\n if (this.isRateLimitError(lastError)) {\n console.error(`[LLMClient:call] Rate limit error, not retrying`);\n throw lastError;\n }\n\n if (attempt < maxRetries && retryWithContext) {\n console.log(`[LLMClient:call] Will retry with error context`);\n currentPrompt =\n `${userPrompt}\\n\\n` +\n `[Previous attempt failed with: ${lastError.message}]\\n` +\n `Please output valid JSON that matches the expected schema.`;\n }\n }\n }\n\n console.error(`[LLMClient:call] All attempts exhausted, throwing error`);\n throw lastError;\n }\n\n private extractFinishReason(\n response: Awaited<ReturnType<ChatOpenAI['invoke']>>,\n ): LLMFinishReason {\n const metadata = response.response_metadata as\n | Record<string, unknown>\n | undefined;\n if (metadata?.finish_reason) {\n const reason = metadata.finish_reason as string;\n if (\n reason === 'stop' ||\n reason === 'length' ||\n reason === 'content_filter' ||\n reason === 'tool_calls'\n ) {\n return reason;\n }\n }\n return null;\n }\n\n async callRaw(options: {\n systemPrompt: string;\n userPrompt: string;\n maxTokens?: number;\n }): Promise<string> {\n const response = await this.callRawWithMetadata(options);\n return response.raw;\n }\n\n async callRawWithMetadata(options: {\n systemPrompt: string;\n userPrompt: string;\n maxTokens?: number;\n }): Promise<Omit<LLMResponse<string>, 'data'> & { raw: string }> {\n const { systemPrompt, userPrompt, maxTokens } = options;\n\n return this.rateLimiter.execute(async () => {\n const modelToUse = maxTokens\n ? this.getModelWithOptions({ maxTokens })\n : this.model;\n\n const messages = [\n { role: 'system', content: systemPrompt },\n { role: 'user', content: userPrompt },\n ];\n const response = await modelToUse.invoke(\n this.provider === 'anthropic'\n ? addCacheControlToSystemMessages(messages)\n : messages,\n );\n\n let usage: LLMUsage | null = null;\n if (response.usage_metadata) {\n const usageMeta = response.usage_metadata as {\n input_tokens?: number;\n output_tokens?: number;\n };\n usage = {\n promptTokens: usageMeta.input_tokens || 0,\n completionTokens: usageMeta.output_tokens || 0,\n totalTokens:\n (usageMeta.input_tokens || 0) + (usageMeta.output_tokens || 0),\n };\n\n if (this.tokenTracker) {\n this.tokenTracker.addUsage(\n usage.promptTokens,\n usage.completionTokens,\n );\n }\n }\n\n const finishReason = this.extractFinishReason(response);\n const content =\n typeof response.content === 'string'\n ? response.content\n : JSON.stringify(response.content);\n\n return { raw: content, finishReason, usage };\n });\n }\n\n /**\n * Stream a raw text response as an async iterator of content chunks.\n * Uses the underlying LangChain model's .stream() method.\n *\n * @param options - System prompt plus full message history\n * @yields LLMStreamChunk with content deltas and a done flag\n */\n async *streamRaw(options: LLMStreamOptions): AsyncGenerator<LLMStreamChunk> {\n const { messages, maxTokens, temperature } = options;\n\n const modelToUse = (maxTokens || temperature !== undefined)\n ? this.getModelWithOptions({ maxTokens, temperature })\n : this.model;\n\n const langchainMessages = this.provider === 'anthropic'\n ? addCacheControlToSystemMessages(messages)\n : messages;\n\n const stream = await modelToUse.stream(langchainMessages);\n\n for await (const chunk of stream) {\n const content = typeof chunk.content === 'string'\n ? chunk.content\n : Array.isArray(chunk.content)\n ? chunk.content\n .filter((c): c is { type: 'text'; text: string } => typeof c === 'object' && c !== null && 'text' in c)\n .map((c) => c.text)\n .join('')\n : '';\n\n if (content) {\n yield { content, done: false };\n }\n }\n\n yield { content: '', done: true };\n }\n\n private isRateLimitError(error: Error): boolean {\n const message = error.message.toLowerCase();\n return (\n message.includes('rate limit') ||\n message.includes('429') ||\n message.includes('quota exceeded')\n );\n }\n\n // ==========================================================================\n // Anthropic Cache Control Support\n // ==========================================================================\n\n async callWithCache<T>(\n options: CacheAwareLLMCallOptions<T>,\n ): Promise<LLMResponse<T>> {\n const {\n systemPrompt,\n userPrompt,\n systemBlocks,\n userBlocks,\n schema,\n maxRetries = 2,\n maxTokens,\n skipSchemaValidation = false,\n temperature,\n rawText = false,\n } = options;\n\n if (this.provider !== 'anthropic') {\n console.log(\n `[LLMClient:callWithCache] Provider ${this.provider} doesn't support caching, using regular call`,\n );\n return this.callWithMetadata(options);\n }\n\n const cacheableCount =\n (systemBlocks || []).filter((b) => b.cache_control).length +\n (userBlocks || []).filter((b) => b.cache_control).length;\n console.log(\n `[LLMClient:callWithCache] ${cacheableCount} cacheable block(s)`,\n );\n\n let lastError: Error | null = null;\n\n for (let attempt = 0; attempt <= maxRetries; attempt++) {\n try {\n console.log(\n `[LLMClient:callWithCache] Attempt ${attempt + 1}/${maxRetries + 1}...`,\n );\n\n const result = await this.rateLimiter.execute(async () => {\n const anthropic = new Anthropic();\n\n const systemContent =\n systemBlocks && systemBlocks.length > 0\n ? systemBlocks.map((b) => ({\n type: 'text' as const,\n text: b.text,\n ...(b.cache_control\n ? { cache_control: b.cache_control }\n : {}),\n }))\n : systemPrompt\n ? [{ type: 'text' as const, text: systemPrompt }]\n : [];\n\n const userContent =\n userBlocks && userBlocks.length > 0\n ? userBlocks.map((b) => ({\n type: 'text' as const,\n text: b.text,\n ...(b.cache_control\n ? { cache_control: b.cache_control }\n : {}),\n }))\n : userPrompt\n ? [{ type: 'text' as const, text: userPrompt }]\n : [];\n\n const response = await anthropic.messages.create({\n model: this.modelName,\n max_tokens: maxTokens || 8192,\n temperature: temperature ?? 0,\n system: systemContent,\n messages: [{ role: 'user', content: userContent }],\n });\n\n const textContent = response.content.find((c) => c.type === 'text');\n const content =\n textContent && 'text' in textContent ? textContent.text : '';\n\n const apiUsage = response.usage as {\n input_tokens: number;\n output_tokens: number;\n cache_creation_input_tokens?: number;\n cache_read_input_tokens?: number;\n };\n\n const cacheRead = apiUsage.cache_read_input_tokens || 0;\n const cacheCreation = apiUsage.cache_creation_input_tokens || 0;\n\n if (cacheCreation > 0) {\n console.log(\n `[LLMClient:callWithCache] Cache WRITE: ${cacheCreation} tokens`,\n );\n }\n if (cacheRead > 0) {\n const savingsPercent = Math.round(\n (cacheRead / (cacheRead + apiUsage.input_tokens)) * 100,\n );\n console.log(\n `[LLMClient:callWithCache] Cache HIT: ${cacheRead} tokens (~${savingsPercent}% of prompt)`,\n );\n }\n if (cacheCreation === 0 && cacheRead === 0) {\n console.log(\n `[LLMClient:callWithCache] No caching: ${apiUsage.input_tokens} input tokens`,\n );\n }\n\n const usage: LLMUsage = {\n promptTokens: apiUsage.input_tokens,\n completionTokens: apiUsage.output_tokens,\n totalTokens: apiUsage.input_tokens + apiUsage.output_tokens,\n };\n\n if (this.tokenTracker) {\n this.tokenTracker.addUsage(\n usage.promptTokens,\n usage.completionTokens,\n );\n }\n\n const finishReason =\n response.stop_reason === 'end_turn'\n ? 'stop'\n : response.stop_reason;\n\n return {\n content,\n finishReason: finishReason as LLMFinishReason,\n usage,\n };\n });\n\n let parsed: T;\n if (rawText) {\n parsed = result.content as unknown as T;\n } else if (skipSchemaValidation) {\n parsed = parseJsonResponse(result.content, undefined) as T;\n } else {\n parsed = parseJsonResponse(result.content, schema);\n }\n\n return {\n data: parsed,\n raw: result.content,\n finishReason: result.finishReason,\n usage: result.usage,\n };\n } catch (error) {\n lastError = error instanceof Error ? error : new Error(String(error));\n console.error(\n `[LLMClient:callWithCache] Attempt ${attempt + 1} failed:`,\n lastError.message,\n );\n\n if (this.isRateLimitError(lastError)) {\n throw lastError;\n }\n }\n }\n\n throw lastError;\n }\n\n static cacheableBlock(text: string, cache = true): CacheableBlock {\n return cache\n ? { type: 'text', text, cache_control: { type: 'ephemeral' } }\n : { type: 'text', text };\n }\n}\n\n// ============================================================================\n// Singleton Instances\n// ============================================================================\n\nconst sharedClients: Partial<Record<LLMProvider, LLMClient>> = {};\n\nexport function getSharedLLMClient(options?: LLMClientOptions): LLMClient {\n const provider = options?.provider || 'openai';\n if (!sharedClients[provider]) {\n sharedClients[provider] = new LLMClient(options);\n }\n return sharedClients[provider]!;\n}\n\nexport function resetSharedLLMClient(provider?: LLMProvider): void {\n if (provider) {\n delete sharedClients[provider];\n } else {\n for (const key of Object.keys(sharedClients) as LLMProvider[]) {\n delete sharedClients[key];\n }\n }\n}\n\n// ============================================================================\n// Provider Detection\n// ============================================================================\n\nexport function getAvailableProvider(): LLMProvider {\n if (process.env.ANTHROPIC_API_KEY) return 'anthropic';\n if (process.env.DEEPSEEK_API_KEY) return 'deepseek';\n if (process.env.KIMI_API_KEY) return 'kimi';\n if (process.env.OPENAI_API_KEY) return 'openai';\n throw new Error(\n 'No LLM API key found. Please set ANTHROPIC_API_KEY, OPENAI_API_KEY, DEEPSEEK_API_KEY, or KIMI_API_KEY.',\n );\n}\n\nexport function isProviderAvailable(provider: LLMProvider): boolean {\n switch (provider) {\n case 'openai':\n return !!process.env.OPENAI_API_KEY;\n case 'deepseek':\n return !!process.env.DEEPSEEK_API_KEY;\n case 'anthropic':\n return !!process.env.ANTHROPIC_API_KEY;\n case 'kimi':\n return !!process.env.KIMI_API_KEY;\n default:\n return false;\n }\n}\n\n// ============================================================================\n// Convenience Functions\n// ============================================================================\n\nexport function createRequirementsClient(\n options?: Partial<LLMClientOptions>,\n): LLMClient {\n const provider = options?.provider || getAvailableProvider();\n const defaultModel =\n provider === 'deepseek' ? DEEPSEEK_MODELS.CHAT : OPENAI_MODELS.GPT_5_1;\n return new LLMClient({\n provider,\n model: defaultModel,\n temperature: 0.3,\n ...options,\n });\n}\n\nexport function createCreativeClient(\n options?: Partial<LLMClientOptions>,\n): LLMClient {\n const provider = options?.provider || getAvailableProvider();\n const defaultModel =\n provider === 'deepseek' ? DEEPSEEK_MODELS.REASONER : OPENAI_MODELS.GPT4O;\n return new LLMClient({\n provider,\n model: defaultModel,\n temperature: 0.7,\n ...options,\n });\n}\n\nexport function createFixClient(\n options?: Partial<LLMClientOptions>,\n): LLMClient {\n const provider = options?.provider || getAvailableProvider();\n const defaultModel =\n provider === 'deepseek'\n ? DEEPSEEK_MODELS.CHAT\n : OPENAI_MODELS.GPT4O_MINI;\n return new LLMClient({\n provider,\n model: defaultModel,\n temperature: 0.2,\n ...options,\n });\n}\n\nexport function createDeepSeekClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'deepseek',\n model: DEEPSEEK_MODELS.CHAT,\n ...options,\n });\n}\n\nexport function createOpenAIClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'openai',\n model: OPENAI_MODELS.GPT4O,\n ...options,\n });\n}\n\nexport function createAnthropicClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'anthropic',\n model: ANTHROPIC_MODELS.CLAUDE_SONNET_4_5,\n ...options,\n });\n}\n\nexport function createKimiClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'kimi',\n model: KIMI_MODELS.K2_5,\n ...options,\n });\n}\n\nexport function createOpenRouterClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'openrouter',\n model: OPENROUTER_MODELS.QWEN_2_5_72B,\n ...options,\n });\n}\n\nexport function createZhipuClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'openrouter',\n model: OPENROUTER_MODELS.GLM_4_7,\n ...options,\n });\n}\n"],"mappings":";;;;;;;;;;AAaA,SAAS,kBAAkB;AAC3B,SAAS,qBAAqB;AAE9B,OAAO,eAAe;AActB,SAAS,gCACP,UACmB;AACnB,SAAO,SAAS,IAAI,CAAC,QAAQ;AAC3B,QAAI,IAAI,SAAS,UAAU;AACzB,aAAO;AAAA,IACT;AAEA,WAAO;AAAA,MACL,MAAM,IAAI;AAAA,MACV,SAAS;AAAA,QACP;AAAA,UACE,MAAM;AAAA,UACN,MAAM,IAAI;AAAA,UACV,eAAe,EAAE,MAAM,YAAY;AAAA,QACrC;AAAA,MACF;AAAA,IACF;AAAA,EACF,CAAC;AACH;AAsFA,IAAM,mBAA8D;AAAA,EAClE,QAAQ,MAAM;AACZ,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO,EAAE,QAAQ,SAAS,QAAW,cAAc,SAAS;AAAA,EAC9D;AAAA,EACA,UAAU,MAAM;AACd,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO;AAAA,MACL;AAAA,MACA,SAAS;AAAA,MACT,cAAc;AAAA,IAChB;AAAA,EACF;AAAA,EACA,WAAW,MAAM;AACf,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO;AAAA,MACL;AAAA,MACA,SAAS;AAAA,MACT,cAAc;AAAA,IAChB;AAAA,EACF;AAAA,EACA,MAAM,MAAM;AACV,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO;AAAA,MACL;AAAA,MACA,SAAS;AAAA,MACT,cAAc;AAAA,IAChB;AAAA,EACF;AAAA,EACA,YAAY,MAAM;AAChB,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO;AAAA,MACL;AAAA,MACA,SAAS;AAAA,MACT,cAAc;AAAA;AAAA,IAChB;AAAA,EACF;AACF;AAEO,IAAM,kBAAkB;AAAA,EAC7B,MAAM;AAAA,EACN,OAAO;AAAA,EACP,UAAU;AACZ;AAEO,IAAM,gBAAgB;AAAA,EAC3B,OAAO;AAAA,EACP,YAAY;AAAA,EACZ,YAAY;AAAA,EACZ,aAAa;AAAA,EACb,SAAS;AACX;AAEO,IAAM,mBAAmB;AAAA,EAC9B,mBAAmB;AAAA,EACnB,iBAAiB;AAAA,EACjB,iBAAiB;AAAA,EACjB,kBAAkB;AACpB;AAEO,IAAM,cAAc;AAAA,EACzB,MAAM;AACR;AAEO,IAAM,oBAAoB;AAAA;AAAA,EAE/B,cAAc;AAAA,EACd,oBAAoB;AAAA,EACpB,aAAa;AAAA;AAAA,EAGb,eAAe;AAAA,EACf,gBAAgB;AAAA,EAChB,kBAAkB;AAAA,EAClB,eAAe;AAAA;AAAA,EAGf,SAAS;AAAA;AAAA,EAGT,SAAS;AACX;AAEA,IAAM,sBAAsB;AAMrB,IAAM,YAAN,MAAgB;AAAA,EAUrB,YAAY,UAA4B,CAAC,GAAG;AAC1C,SAAK,WAAW,QAAQ,YAAY;AAEpC,SAAK,cAAc,QAAQ,gBACxB,KAAK,aAAa,SAAS,MAAM;AACpC,SAAK,YAAY,QAAQ,aAAa;AAEtC,SAAK,iBAAiB,iBAAiB,KAAK,QAAQ,EAAE;AACtD,SAAK,YAAY,QAAQ,SAAS,KAAK,eAAe;AAEtD,UAAM,aAAa,KAAK,eAAe,OAAO,MAAM,EAAE;AACtD,YAAQ;AAAA,MACN,yBAAyB,KAAK,QAAQ,YAAY,KAAK,SAAS,cAAc,UAAU;AAAA,IAC1F;AACA,QAAI,KAAK,eAAe,SAAS;AAC/B,cAAQ;AAAA,QACN,sCAAsC,KAAK,eAAe,OAAO;AAAA,MACnE;AAAA,IACF;AAEA,SAAK,QAAQ,KAAK,YAAY;AAE9B,SAAK,cACH,QAAQ,yBAAyB,QAC7B,qBAAqB,QAAQ,WAAW,IACxC,IAAI,YAAY,QAAQ,WAAW;AAEzC,SAAK,eACH,QAAQ,gBAAgB,QACpB,sBAAsB,KAAK,SAAS,IACpC;AAAA,EACR;AAAA,EAEQ,0BAAmC;AACzC,UAAM,QAAQ,KAAK,UAAU,YAAY;AACzC,WACE,MAAM,WAAW,IAAI,KACrB,MAAM,WAAW,OAAO,KACxB,MAAM,SAAS,KAAK,KACpB,MAAM,SAAS,IAAI;AAAA,EAEvB;AAAA,EAEQ,YAAY,SAGN;AACZ,UAAM,YAAY,SAAS;AAC3B,UAAM,cAAc,SAAS,eAAe,KAAK;AAEjD,QAAI,KAAK,aAAa,aAAa;AACjC,aAAO,IAAI,cAAc;AAAA,QACvB,QAAQ,KAAK,eAAe;AAAA,QAC5B,OAAO,KAAK;AAAA,QACZ;AAAA,QACA,WAAW,KAAK;AAAA,QAChB,WAAW,aAAa;AAAA,QACxB,WAAW;AAAA,UACT;AAAA,YACE,cAAc,CAAC,WAAW;AACxB,oBAAM,aAAa,OAAO,cAAc,CAAC,IAAI,CAAC;AAC9C,oBAAM,QACJ,YAUC,SAAS;AAEZ,kBAAI,OAAO;AACT,sBAAM,eAAe,MAAM,+BAA+B;AAC1D,sBAAM,YAAY,MAAM,2BAA2B;AACnD,sBAAM,cAAc,MAAM,gBAAgB;AAC1C,sBAAM,eAAe,MAAM,iBAAiB;AAE5C,oBAAI,eAAe,GAAG;AACpB,0BAAQ;AAAA,oBACN,sCAAsC,YAAY;AAAA,kBACpD;AAAA,gBACF;AACA,oBAAI,YAAY,GAAG;AACjB,wBAAM,iBAAiB,KAAK;AAAA,oBACzB,aAAa,YAAY,eAAgB;AAAA,kBAC5C;AACA,0BAAQ;AAAA,oBACN,oCAAoC,SAAS,aAAa,cAAc;AAAA,kBAC1E;AAAA,gBACF;AACA,oBAAI,iBAAiB,KAAK,cAAc,KAAK,cAAc,GAAG;AAC5D,sBAAI,cAAc,KAAK;AACrB,4BAAQ;AAAA,sBACN,yBAAyB,WAAW,WAAW,YAAY;AAAA,oBAC7D;AAAA,kBACF,OAAO;AACL,4BAAQ;AAAA,sBACN,yBAAyB,WAAW,WAAW,YAAY;AAAA,oBAC7D;AAAA,kBACF;AAAA,gBACF;AAAA,cACF;AAAA,YACF;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACH;AAEA,UAAM,sBAAsB,KAAK,wBAAwB;AAEzD,UAAM,cAAc,YAChB,sBACE,EAAE,aAAa,EAAE,uBAAuB,UAAU,EAAE,IACpD,EAAE,UAAU,IACd,CAAC;AAEL,UAAM,UAAU,KAAK,aAAa,aAAa,MAAS;AAIxD,UAAM,SAAS,KAAK,aAAa;AACjC,UAAM,gBAAgB,SAAS,MAAM;AAGrC,UAAM,cAAuC,CAAC;AAC9C,QAAI,uBAAuB,WAAW;AACpC,kBAAY,wBAAwB;AAAA,IACtC;AACA,QAAI,QAAQ;AACV,kBAAY,WAAW,EAAE,MAAM,WAAW;AAAA,IAC5C;AAEA,QAAI,KAAK,aAAa,cAAc;AAClC,kBAAY,cAAc;AAAA,IAC5B;AAEA,WAAO,IAAI,WAAW;AAAA,MACpB,QAAQ,KAAK,eAAe;AAAA,MAC5B,OAAO,KAAK;AAAA,MACZ,aAAa,sBAAsB,SAAY;AAAA,MAC/C,WAAW,KAAK;AAAA,MAChB;AAAA,MACA,GAAI,OAAO,KAAK,WAAW,EAAE,SAAS,IAAI,EAAE,YAAY,IAAI,CAAC;AAAA,MAC7D,GAAI,sBAAsB,CAAC,IAAI,YAAY,EAAE,UAAU,IAAI,CAAC;AAAA,MAC5D,eAAe;AAAA,QACb,QAAQ,KAAK,eAAe;AAAA,QAC5B,GAAI,KAAK,eAAe,UACpB,EAAE,SAAS,KAAK,eAAe,QAAQ,IACvC,CAAC;AAAA,MACP;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAEQ,oBAAoB,SAGd;AACZ,WAAO,KAAK,YAAY,OAAO;AAAA,EACjC;AAAA,EAEA,cAA2B;AACzB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,eAAuB;AACrB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,WAAsB;AACpB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,uBAAuB;AACrB,WAAO,KAAK,YAAY,UAAU;AAAA,EACpC;AAAA,EAEA,gBAAgB;AACd,WAAO,KAAK,cAAc,WAAW,KAAK;AAAA,EAC5C;AAAA,EAEA,MAAM,KAAQ,SAAwC;AACpD,UAAM,WAAW,MAAM,KAAK,iBAAiB,OAAO;AACpD,WAAO,SAAS;AAAA,EAClB;AAAA,EAEA,MAAM,iBAAoB,SAAqD;AAC7E,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA,aAAa;AAAA,MACb,mBAAmB;AAAA,MACnB;AAAA,MACA,uBAAuB;AAAA,MACvB;AAAA,IACF,IAAI;AAEJ,QAAI,gBAAgB;AACpB,QAAI,YAA0B;AAE9B,YAAQ;AAAA,MACN,qCAAqC,KAAK,QAAQ,IAAI,KAAK,SAAS;AAAA,IACtE;AACA,YAAQ,IAAI,mCAAmC,WAAW,MAAM,QAAQ;AACxE,QAAI,WAAW;AACb,cAAQ,IAAI,gCAAgC,SAAS,EAAE;AAAA,IACzD;AAEA,aAAS,UAAU,GAAG,WAAW,YAAY,WAAW;AACtD,UAAI;AACF,gBAAQ;AAAA,UACN,4BAA4B,UAAU,CAAC,IAAI,aAAa,CAAC;AAAA,QAC3D;AACA,cAAM,mBAAmB,KAAK,IAAI;AAElC,cAAM,SAAS,MAAM,KAAK,YAAY,QAAQ,YAAY;AACxD,kBAAQ,IAAI,oCAAoC;AAChD,gBAAM,kBAAkB,KAAK,IAAI;AAEjC,gBAAM,aACJ,aAAa,gBAAgB,SACzB,KAAK,oBAAoB,EAAE,WAAW,YAAY,CAAC,IACnD,KAAK;AAEX,gBAAM,WAAW;AAAA,YACf,EAAE,MAAM,UAAU,SAAS,aAAa;AAAA,YACxC,EAAE,MAAM,QAAQ,SAAS,cAAc;AAAA,UACzC;AACA,gBAAM,WAAW,MAAM,WAAW;AAAA,YAChC,KAAK,aAAa,cACd,gCAAgC,QAAQ,IACxC;AAAA,UACN;AAEA,kBAAQ;AAAA,YACN,uCAAuC,KAAK,IAAI,IAAI,eAAe;AAAA,UACrE;AAEA,cAAI,QAAyB;AAC7B,cAAI,SAAS,gBAAgB;AAC3B,kBAAM,YAAY,SAAS;AAI3B,oBAAQ;AAAA,cACN,cAAc,UAAU,gBAAgB;AAAA,cACxC,kBAAkB,UAAU,iBAAiB;AAAA,cAC7C,cACG,UAAU,gBAAgB,MAC1B,UAAU,iBAAiB;AAAA,YAChC;AACA,oBAAQ;AAAA,cACN,iCAAiC,MAAM,YAAY,QAAQ,MAAM,gBAAgB;AAAA,YACnF;AAEA,gBAAI,KAAK,cAAc;AACrB,mBAAK,aAAa;AAAA,gBAChB,MAAM;AAAA,gBACN,MAAM;AAAA,cACR;AAAA,YACF;AAAA,UACF;AAEA,gBAAM,eAAe,KAAK,oBAAoB,QAAQ;AACtD,cAAI,iBAAiB,UAAU;AAC7B,oBAAQ;AAAA,cACN;AAAA,YACF;AAAA,UACF;AAEA,gBAAM,UACJ,OAAO,SAAS,YAAY,WACxB,SAAS,UACT,KAAK,UAAU,SAAS,OAAO;AAErC,kBAAQ;AAAA,YACN,qCAAqC,QAAQ,MAAM,0BAA0B,YAAY;AAAA,UAC3F;AAEA,iBAAO,EAAE,SAAS,cAAc,MAAM;AAAA,QACxC,CAAC;AAED,gBAAQ;AAAA,UACN,4BAA4B,UAAU,CAAC,iBAAiB,KAAK,IAAI,IAAI,gBAAgB;AAAA,QACvF;AAEA,cAAM,SAAS,uBACV,kBAAkB,OAAO,SAAS,MAAS,IAC5C,kBAAkB,OAAO,SAAS,MAAM;AAC5C,gBAAQ;AAAA,UACN,gDAAgD,uBAAuB,iCAAiC,EAAE;AAAA,QAC5G;AAEA,eAAO;AAAA,UACL,MAAM;AAAA,UACN,KAAK,OAAO;AAAA,UACZ,cAAc,OAAO;AAAA,UACrB,OAAO,OAAO;AAAA,QAChB;AAAA,MACF,SAAS,OAAO;AACd,oBAAY,iBAAiB,QAAQ,QAAQ,IAAI,MAAM,OAAO,KAAK,CAAC;AACpE,gBAAQ;AAAA,UACN,4BAA4B,UAAU,CAAC;AAAA,UACvC,UAAU;AAAA,QACZ;AAEA,YAAI,KAAK,iBAAiB,SAAS,GAAG;AACpC,kBAAQ,MAAM,iDAAiD;AAC/D,gBAAM;AAAA,QACR;AAEA,YAAI,UAAU,cAAc,kBAAkB;AAC5C,kBAAQ,IAAI,gDAAgD;AAC5D,0BACE,GAAG,UAAU;AAAA;AAAA,iCACqB,UAAU,OAAO;AAAA;AAAA,QAEvD;AAAA,MACF;AAAA,IACF;AAEA,YAAQ,MAAM,yDAAyD;AACvE,UAAM;AAAA,EACR;AAAA,EAEQ,oBACN,UACiB;AACjB,UAAM,WAAW,SAAS;AAG1B,QAAI,UAAU,eAAe;AAC3B,YAAM,SAAS,SAAS;AACxB,UACE,WAAW,UACX,WAAW,YACX,WAAW,oBACX,WAAW,cACX;AACA,eAAO;AAAA,MACT;AAAA,IACF;AACA,WAAO;AAAA,EACT;AAAA,EAEA,MAAM,QAAQ,SAIM;AAClB,UAAM,WAAW,MAAM,KAAK,oBAAoB,OAAO;AACvD,WAAO,SAAS;AAAA,EAClB;AAAA,EAEA,MAAM,oBAAoB,SAIuC;AAC/D,UAAM,EAAE,cAAc,YAAY,UAAU,IAAI;AAEhD,WAAO,KAAK,YAAY,QAAQ,YAAY;AAC1C,YAAM,aAAa,YACf,KAAK,oBAAoB,EAAE,UAAU,CAAC,IACtC,KAAK;AAET,YAAM,WAAW;AAAA,QACf,EAAE,MAAM,UAAU,SAAS,aAAa;AAAA,QACxC,EAAE,MAAM,QAAQ,SAAS,WAAW;AAAA,MACtC;AACA,YAAM,WAAW,MAAM,WAAW;AAAA,QAChC,KAAK,aAAa,cACd,gCAAgC,QAAQ,IACxC;AAAA,MACN;AAEA,UAAI,QAAyB;AAC7B,UAAI,SAAS,gBAAgB;AAC3B,cAAM,YAAY,SAAS;AAI3B,gBAAQ;AAAA,UACN,cAAc,UAAU,gBAAgB;AAAA,UACxC,kBAAkB,UAAU,iBAAiB;AAAA,UAC7C,cACG,UAAU,gBAAgB,MAAM,UAAU,iBAAiB;AAAA,QAChE;AAEA,YAAI,KAAK,cAAc;AACrB,eAAK,aAAa;AAAA,YAChB,MAAM;AAAA,YACN,MAAM;AAAA,UACR;AAAA,QACF;AAAA,MACF;AAEA,YAAM,eAAe,KAAK,oBAAoB,QAAQ;AACtD,YAAM,UACJ,OAAO,SAAS,YAAY,WACxB,SAAS,UACT,KAAK,UAAU,SAAS,OAAO;AAErC,aAAO,EAAE,KAAK,SAAS,cAAc,MAAM;AAAA,IAC7C,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,UAAU,SAA2D;AAC1E,UAAM,EAAE,UAAU,WAAW,YAAY,IAAI;AAE7C,UAAM,aAAc,aAAa,gBAAgB,SAC7C,KAAK,oBAAoB,EAAE,WAAW,YAAY,CAAC,IACnD,KAAK;AAET,UAAM,oBAAoB,KAAK,aAAa,cACxC,gCAAgC,QAAQ,IACxC;AAEJ,UAAM,SAAS,MAAM,WAAW,OAAO,iBAAiB;AAExD,qBAAiB,SAAS,QAAQ;AAChC,YAAM,UAAU,OAAO,MAAM,YAAY,WACrC,MAAM,UACN,MAAM,QAAQ,MAAM,OAAO,IACzB,MAAM,QACH,OAAO,CAAC,MAA2C,OAAO,MAAM,YAAY,MAAM,QAAQ,UAAU,CAAC,EACrG,IAAI,CAAC,MAAM,EAAE,IAAI,EACjB,KAAK,EAAE,IACV;AAEN,UAAI,SAAS;AACX,cAAM,EAAE,SAAS,MAAM,MAAM;AAAA,MAC/B;AAAA,IACF;AAEA,UAAM,EAAE,SAAS,IAAI,MAAM,KAAK;AAAA,EAClC;AAAA,EAEQ,iBAAiB,OAAuB;AAC9C,UAAM,UAAU,MAAM,QAAQ,YAAY;AAC1C,WACE,QAAQ,SAAS,YAAY,KAC7B,QAAQ,SAAS,KAAK,KACtB,QAAQ,SAAS,gBAAgB;AAAA,EAErC;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,cACJ,SACyB;AACzB,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,aAAa;AAAA,MACb;AAAA,MACA,uBAAuB;AAAA,MACvB;AAAA,MACA,UAAU;AAAA,IACZ,IAAI;AAEJ,QAAI,KAAK,aAAa,aAAa;AACjC,cAAQ;AAAA,QACN,sCAAsC,KAAK,QAAQ;AAAA,MACrD;AACA,aAAO,KAAK,iBAAiB,OAAO;AAAA,IACtC;AAEA,UAAM,kBACH,gBAAgB,CAAC,GAAG,OAAO,CAAC,MAAM,EAAE,aAAa,EAAE,UACnD,cAAc,CAAC,GAAG,OAAO,CAAC,MAAM,EAAE,aAAa,EAAE;AACpD,YAAQ;AAAA,MACN,6BAA6B,cAAc;AAAA,IAC7C;AAEA,QAAI,YAA0B;AAE9B,aAAS,UAAU,GAAG,WAAW,YAAY,WAAW;AACtD,UAAI;AACF,gBAAQ;AAAA,UACN,qCAAqC,UAAU,CAAC,IAAI,aAAa,CAAC;AAAA,QACpE;AAEA,cAAM,SAAS,MAAM,KAAK,YAAY,QAAQ,YAAY;AACxD,gBAAM,YAAY,IAAI,UAAU;AAEhC,gBAAM,gBACJ,gBAAgB,aAAa,SAAS,IAClC,aAAa,IAAI,CAAC,OAAO;AAAA,YACvB,MAAM;AAAA,YACN,MAAM,EAAE;AAAA,YACR,GAAI,EAAE,gBACF,EAAE,eAAe,EAAE,cAAc,IACjC,CAAC;AAAA,UACP,EAAE,IACF,eACE,CAAC,EAAE,MAAM,QAAiB,MAAM,aAAa,CAAC,IAC9C,CAAC;AAET,gBAAM,cACJ,cAAc,WAAW,SAAS,IAC9B,WAAW,IAAI,CAAC,OAAO;AAAA,YACrB,MAAM;AAAA,YACN,MAAM,EAAE;AAAA,YACR,GAAI,EAAE,gBACF,EAAE,eAAe,EAAE,cAAc,IACjC,CAAC;AAAA,UACP,EAAE,IACF,aACE,CAAC,EAAE,MAAM,QAAiB,MAAM,WAAW,CAAC,IAC5C,CAAC;AAET,gBAAM,WAAW,MAAM,UAAU,SAAS,OAAO;AAAA,YAC/C,OAAO,KAAK;AAAA,YACZ,YAAY,aAAa;AAAA,YACzB,aAAa,eAAe;AAAA,YAC5B,QAAQ;AAAA,YACR,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,YAAY,CAAC;AAAA,UACnD,CAAC;AAED,gBAAM,cAAc,SAAS,QAAQ,KAAK,CAAC,MAAM,EAAE,SAAS,MAAM;AAClE,gBAAM,UACJ,eAAe,UAAU,cAAc,YAAY,OAAO;AAE5D,gBAAM,WAAW,SAAS;AAO1B,gBAAM,YAAY,SAAS,2BAA2B;AACtD,gBAAM,gBAAgB,SAAS,+BAA+B;AAE9D,cAAI,gBAAgB,GAAG;AACrB,oBAAQ;AAAA,cACN,0CAA0C,aAAa;AAAA,YACzD;AAAA,UACF;AACA,cAAI,YAAY,GAAG;AACjB,kBAAM,iBAAiB,KAAK;AAAA,cACzB,aAAa,YAAY,SAAS,gBAAiB;AAAA,YACtD;AACA,oBAAQ;AAAA,cACN,wCAAwC,SAAS,aAAa,cAAc;AAAA,YAC9E;AAAA,UACF;AACA,cAAI,kBAAkB,KAAK,cAAc,GAAG;AAC1C,oBAAQ;AAAA,cACN,yCAAyC,SAAS,YAAY;AAAA,YAChE;AAAA,UACF;AAEA,gBAAM,QAAkB;AAAA,YACtB,cAAc,SAAS;AAAA,YACvB,kBAAkB,SAAS;AAAA,YAC3B,aAAa,SAAS,eAAe,SAAS;AAAA,UAChD;AAEA,cAAI,KAAK,cAAc;AACrB,iBAAK,aAAa;AAAA,cAChB,MAAM;AAAA,cACN,MAAM;AAAA,YACR;AAAA,UACF;AAEA,gBAAM,eACJ,SAAS,gBAAgB,aACrB,SACA,SAAS;AAEf,iBAAO;AAAA,YACL;AAAA,YACA;AAAA,YACA;AAAA,UACF;AAAA,QACF,CAAC;AAED,YAAI;AACJ,YAAI,SAAS;AACX,mBAAS,OAAO;AAAA,QAClB,WAAW,sBAAsB;AAC/B,mBAAS,kBAAkB,OAAO,SAAS,MAAS;AAAA,QACtD,OAAO;AACL,mBAAS,kBAAkB,OAAO,SAAS,MAAM;AAAA,QACnD;AAEA,eAAO;AAAA,UACL,MAAM;AAAA,UACN,KAAK,OAAO;AAAA,UACZ,cAAc,OAAO;AAAA,UACrB,OAAO,OAAO;AAAA,QAChB;AAAA,MACF,SAAS,OAAO;AACd,oBAAY,iBAAiB,QAAQ,QAAQ,IAAI,MAAM,OAAO,KAAK,CAAC;AACpE,gBAAQ;AAAA,UACN,qCAAqC,UAAU,CAAC;AAAA,UAChD,UAAU;AAAA,QACZ;AAEA,YAAI,KAAK,iBAAiB,SAAS,GAAG;AACpC,gBAAM;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,UAAM;AAAA,EACR;AAAA,EAEA,OAAO,eAAe,MAAc,QAAQ,MAAsB;AAChE,WAAO,QACH,EAAE,MAAM,QAAQ,MAAM,eAAe,EAAE,MAAM,YAAY,EAAE,IAC3D,EAAE,MAAM,QAAQ,KAAK;AAAA,EAC3B;AACF;AAMA,IAAM,gBAAyD,CAAC;AAEzD,SAAS,mBAAmB,SAAuC;AACxE,QAAM,WAAW,SAAS,YAAY;AACtC,MAAI,CAAC,cAAc,QAAQ,GAAG;AAC5B,kBAAc,QAAQ,IAAI,IAAI,UAAU,OAAO;AAAA,EACjD;AACA,SAAO,cAAc,QAAQ;AAC/B;AAEO,SAAS,qBAAqB,UAA8B;AACjE,MAAI,UAAU;AACZ,WAAO,cAAc,QAAQ;AAAA,EAC/B,OAAO;AACL,eAAW,OAAO,OAAO,KAAK,aAAa,GAAoB;AAC7D,aAAO,cAAc,GAAG;AAAA,IAC1B;AAAA,EACF;AACF;AAMO,SAAS,uBAAoC;AAClD,MAAI,QAAQ,IAAI,kBAAmB,QAAO;AAC1C,MAAI,QAAQ,IAAI,iBAAkB,QAAO;AACzC,MAAI,QAAQ,IAAI,aAAc,QAAO;AACrC,MAAI,QAAQ,IAAI,eAAgB,QAAO;AACvC,QAAM,IAAI;AAAA,IACR;AAAA,EACF;AACF;AAEO,SAAS,oBAAoB,UAAgC;AAClE,UAAQ,UAAU;AAAA,IAChB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB;AACE,aAAO;AAAA,EACX;AACF;AAMO,SAAS,yBACd,SACW;AACX,QAAM,WAAW,SAAS,YAAY,qBAAqB;AAC3D,QAAM,eACJ,aAAa,aAAa,gBAAgB,OAAO,cAAc;AACjE,SAAO,IAAI,UAAU;AAAA,IACnB;AAAA,IACA,OAAO;AAAA,IACP,aAAa;AAAA,IACb,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,qBACd,SACW;AACX,QAAM,WAAW,SAAS,YAAY,qBAAqB;AAC3D,QAAM,eACJ,aAAa,aAAa,gBAAgB,WAAW,cAAc;AACrE,SAAO,IAAI,UAAU;AAAA,IACnB;AAAA,IACA,OAAO;AAAA,IACP,aAAa;AAAA,IACb,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,gBACd,SACW;AACX,QAAM,WAAW,SAAS,YAAY,qBAAqB;AAC3D,QAAM,eACJ,aAAa,aACT,gBAAgB,OAChB,cAAc;AACpB,SAAO,IAAI,UAAU;AAAA,IACnB;AAAA,IACA,OAAO;AAAA,IACP,aAAa;AAAA,IACb,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,qBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,gBAAgB;AAAA,IACvB,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,mBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,cAAc;AAAA,IACrB,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,sBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,iBAAiB;AAAA,IACxB,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,iBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,YAAY;AAAA,IACnB,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,uBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,kBAAkB;AAAA,IACzB,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,kBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,kBAAkB;AAAA,IACzB,GAAG;AAAA,EACL,CAAC;AACH;","names":[]}
|
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
// src/providers/masar.ts
|
|
2
|
+
var MasarError = class extends Error {
|
|
3
|
+
constructor(message, statusCode, responseBody) {
|
|
4
|
+
super(message);
|
|
5
|
+
this.statusCode = statusCode;
|
|
6
|
+
this.responseBody = responseBody;
|
|
7
|
+
this.name = "MasarError";
|
|
8
|
+
}
|
|
9
|
+
};
|
|
10
|
+
var DEFAULT_BASE_URL = "https://masar-345008351456.europe-west4.run.app";
|
|
11
|
+
var DEFAULT_TIMEOUT_MS = 3e4;
|
|
12
|
+
var MasarProvider = class {
|
|
13
|
+
constructor(options) {
|
|
14
|
+
this.baseUrl = (options?.baseUrl ?? process.env.MASAR_URL ?? DEFAULT_BASE_URL).replace(/\/+$/, "");
|
|
15
|
+
this.timeoutMs = options?.timeoutMs ?? DEFAULT_TIMEOUT_MS;
|
|
16
|
+
}
|
|
17
|
+
// --------------------------------------------------------------------------
|
|
18
|
+
// Public API
|
|
19
|
+
// --------------------------------------------------------------------------
|
|
20
|
+
/**
|
|
21
|
+
* Generate text from a prompt.
|
|
22
|
+
*
|
|
23
|
+
* POST /generate
|
|
24
|
+
*/
|
|
25
|
+
async generate(prompt, options) {
|
|
26
|
+
return this.post("/generate", {
|
|
27
|
+
prompt,
|
|
28
|
+
...options
|
|
29
|
+
});
|
|
30
|
+
}
|
|
31
|
+
/**
|
|
32
|
+
* Generate a .orb schema via GFlowNet sampling.
|
|
33
|
+
*
|
|
34
|
+
* POST /generate/gflownet
|
|
35
|
+
*/
|
|
36
|
+
async generateGFlowNet(goal) {
|
|
37
|
+
return this.post("/generate/gflownet", goal);
|
|
38
|
+
}
|
|
39
|
+
/**
|
|
40
|
+
* Predict validation errors in a .orb schema before compilation.
|
|
41
|
+
*
|
|
42
|
+
* POST /predict-errors
|
|
43
|
+
*/
|
|
44
|
+
async predictErrors(schema) {
|
|
45
|
+
return this.post("/predict-errors", { schema });
|
|
46
|
+
}
|
|
47
|
+
/**
|
|
48
|
+
* Rank candidate edits for fixing errors in a .orb schema.
|
|
49
|
+
*
|
|
50
|
+
* POST /rank-edits
|
|
51
|
+
*/
|
|
52
|
+
async rankEdits(schema, errors) {
|
|
53
|
+
return this.post("/rank-edits", { schema, errors });
|
|
54
|
+
}
|
|
55
|
+
/**
|
|
56
|
+
* Check server health.
|
|
57
|
+
*
|
|
58
|
+
* GET /health
|
|
59
|
+
*/
|
|
60
|
+
async health() {
|
|
61
|
+
return this.get("/health");
|
|
62
|
+
}
|
|
63
|
+
// --------------------------------------------------------------------------
|
|
64
|
+
// Internal helpers
|
|
65
|
+
// --------------------------------------------------------------------------
|
|
66
|
+
async post(path, body) {
|
|
67
|
+
return this.request(path, {
|
|
68
|
+
method: "POST",
|
|
69
|
+
headers: { "Content-Type": "application/json" },
|
|
70
|
+
body: JSON.stringify(body)
|
|
71
|
+
});
|
|
72
|
+
}
|
|
73
|
+
async get(path) {
|
|
74
|
+
return this.request(path, { method: "GET" });
|
|
75
|
+
}
|
|
76
|
+
async request(path, init) {
|
|
77
|
+
const url = `${this.baseUrl}${path}`;
|
|
78
|
+
const controller = new AbortController();
|
|
79
|
+
const timer = setTimeout(() => controller.abort(), this.timeoutMs);
|
|
80
|
+
try {
|
|
81
|
+
const response = await fetch(url, {
|
|
82
|
+
...init,
|
|
83
|
+
signal: controller.signal
|
|
84
|
+
});
|
|
85
|
+
if (!response.ok) {
|
|
86
|
+
const text = await response.text().catch(() => "");
|
|
87
|
+
throw new MasarError(
|
|
88
|
+
`Masar ${init.method} ${path} failed with status ${response.status}`,
|
|
89
|
+
response.status,
|
|
90
|
+
text
|
|
91
|
+
);
|
|
92
|
+
}
|
|
93
|
+
return await response.json();
|
|
94
|
+
} catch (error) {
|
|
95
|
+
if (error instanceof MasarError) {
|
|
96
|
+
throw error;
|
|
97
|
+
}
|
|
98
|
+
if (error instanceof DOMException && error.name === "AbortError") {
|
|
99
|
+
throw new MasarError(
|
|
100
|
+
`Masar ${init.method} ${path} timed out after ${this.timeoutMs}ms`,
|
|
101
|
+
0,
|
|
102
|
+
""
|
|
103
|
+
);
|
|
104
|
+
}
|
|
105
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
106
|
+
throw new MasarError(
|
|
107
|
+
`Masar ${init.method} ${path} failed: ${message}`,
|
|
108
|
+
0,
|
|
109
|
+
""
|
|
110
|
+
);
|
|
111
|
+
} finally {
|
|
112
|
+
clearTimeout(timer);
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
};
|
|
116
|
+
var sharedInstance = null;
|
|
117
|
+
function getMasarProvider(options) {
|
|
118
|
+
if (!sharedInstance) {
|
|
119
|
+
sharedInstance = new MasarProvider(options);
|
|
120
|
+
}
|
|
121
|
+
return sharedInstance;
|
|
122
|
+
}
|
|
123
|
+
function resetMasarProvider() {
|
|
124
|
+
sharedInstance = null;
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
export {
|
|
128
|
+
MasarError,
|
|
129
|
+
MasarProvider,
|
|
130
|
+
getMasarProvider,
|
|
131
|
+
resetMasarProvider
|
|
132
|
+
};
|
|
133
|
+
//# sourceMappingURL=chunk-QOOSH67G.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/providers/masar.ts"],"sourcesContent":["/**\n * Masar Provider\n *\n * Thin HTTP client for the Masar neural pipeline server.\n * Exposes generate, GFlowNet generation, error prediction,\n * edit ranking, and health-check endpoints.\n *\n * Reads `MASAR_URL` from environment (default: http://localhost:8080).\n *\n * @packageDocumentation\n */\n\n// ============================================================================\n// Types\n// ============================================================================\n\nexport interface MasarGenerateOptions {\n /** Model override (server decides default if omitted). */\n model?: string;\n /** Sampling temperature. */\n temperature?: number;\n /** Maximum tokens to generate. */\n maxTokens?: number;\n}\n\nexport interface MasarGenerateResult {\n text: string;\n usage: {\n promptTokens: number;\n completionTokens: number;\n totalTokens: number;\n };\n}\n\nexport interface GoalSpec {\n /** Natural-language description of the desired application. */\n description: string;\n /** Target entities (e.g. [\"User\", \"Product\", \"Order\"]). */\n entities?: string[];\n /** Domain hint (e.g. \"e-commerce\", \"healthcare\"). */\n domain?: string;\n /** Additional constraints passed to the GFlowNet sampler. */\n constraints?: Record<string, unknown>;\n}\n\nexport interface GFlowNetResult {\n /** Generated .orb schema text. */\n schema: string;\n /** Log-probability of the sampled trajectory. */\n logProb: number;\n /** Number of sampling steps taken. */\n steps: number;\n}\n\nexport interface ErrorPrediction {\n /** Line number (1-based) where the error is predicted. */\n line: number;\n /** Predicted error category. */\n category: string;\n /** Human-readable description. */\n message: string;\n /** Confidence score in [0, 1]. */\n confidence: number;\n}\n\nexport interface PredictErrorsResult {\n errors: ErrorPrediction[];\n}\n\nexport interface RankedEdit {\n /** The proposed replacement text. */\n edit: string;\n /** Score assigned by the ranker (higher is better). */\n score: number;\n /** Which error this edit addresses. */\n targetError: string;\n}\n\nexport interface RankEditsResult {\n edits: RankedEdit[];\n}\n\nexport interface MasarHealthResult {\n status: string;\n version?: string;\n uptime?: number;\n}\n\nexport interface MasarProviderOptions {\n /** Base URL of the Masar server. Overrides MASAR_URL env var. */\n baseUrl?: string;\n /** Request timeout in milliseconds (default: 30 000). */\n timeoutMs?: number;\n}\n\n// ============================================================================\n// Error\n// ============================================================================\n\nexport class MasarError extends Error {\n constructor(\n message: string,\n public readonly statusCode: number,\n public readonly responseBody: string,\n ) {\n super(message);\n this.name = 'MasarError';\n }\n}\n\n// ============================================================================\n// Provider\n// ============================================================================\n\nconst DEFAULT_BASE_URL = 'https://masar-345008351456.europe-west4.run.app';\nconst DEFAULT_TIMEOUT_MS = 30_000;\n\nexport class MasarProvider {\n private readonly baseUrl: string;\n private readonly timeoutMs: number;\n\n constructor(options?: MasarProviderOptions) {\n this.baseUrl = (\n options?.baseUrl ??\n process.env.MASAR_URL ??\n DEFAULT_BASE_URL\n ).replace(/\\/+$/, '');\n this.timeoutMs = options?.timeoutMs ?? DEFAULT_TIMEOUT_MS;\n }\n\n // --------------------------------------------------------------------------\n // Public API\n // --------------------------------------------------------------------------\n\n /**\n * Generate text from a prompt.\n *\n * POST /generate\n */\n async generate(\n prompt: string,\n options?: MasarGenerateOptions,\n ): Promise<MasarGenerateResult> {\n return this.post<MasarGenerateResult>('/generate', {\n prompt,\n ...options,\n });\n }\n\n /**\n * Generate a .orb schema via GFlowNet sampling.\n *\n * POST /generate/gflownet\n */\n async generateGFlowNet(goal: GoalSpec): Promise<GFlowNetResult> {\n return this.post<GFlowNetResult>('/generate/gflownet', goal);\n }\n\n /**\n * Predict validation errors in a .orb schema before compilation.\n *\n * POST /predict-errors\n */\n async predictErrors(schema: string): Promise<PredictErrorsResult> {\n return this.post<PredictErrorsResult>('/predict-errors', { schema });\n }\n\n /**\n * Rank candidate edits for fixing errors in a .orb schema.\n *\n * POST /rank-edits\n */\n async rankEdits(\n schema: string,\n errors: string[],\n ): Promise<RankEditsResult> {\n return this.post<RankEditsResult>('/rank-edits', { schema, errors });\n }\n\n /**\n * Check server health.\n *\n * GET /health\n */\n async health(): Promise<MasarHealthResult> {\n return this.get<MasarHealthResult>('/health');\n }\n\n // --------------------------------------------------------------------------\n // Internal helpers\n // --------------------------------------------------------------------------\n\n private async post<T>(path: string, body: unknown): Promise<T> {\n return this.request<T>(path, {\n method: 'POST',\n headers: { 'Content-Type': 'application/json' },\n body: JSON.stringify(body),\n });\n }\n\n private async get<T>(path: string): Promise<T> {\n return this.request<T>(path, { method: 'GET' });\n }\n\n private async request<T>(\n path: string,\n init: RequestInit,\n ): Promise<T> {\n const url = `${this.baseUrl}${path}`;\n const controller = new AbortController();\n const timer = setTimeout(() => controller.abort(), this.timeoutMs);\n\n try {\n const response = await fetch(url, {\n ...init,\n signal: controller.signal,\n });\n\n if (!response.ok) {\n const text = await response.text().catch(() => '');\n throw new MasarError(\n `Masar ${init.method} ${path} failed with status ${response.status}`,\n response.status,\n text,\n );\n }\n\n return (await response.json()) as T;\n } catch (error) {\n if (error instanceof MasarError) {\n throw error;\n }\n\n if (error instanceof DOMException && error.name === 'AbortError') {\n throw new MasarError(\n `Masar ${init.method} ${path} timed out after ${this.timeoutMs}ms`,\n 0,\n '',\n );\n }\n\n const message =\n error instanceof Error ? error.message : String(error);\n throw new MasarError(\n `Masar ${init.method} ${path} failed: ${message}`,\n 0,\n '',\n );\n } finally {\n clearTimeout(timer);\n }\n }\n}\n\n// ============================================================================\n// Singleton\n// ============================================================================\n\nlet sharedInstance: MasarProvider | null = null;\n\nexport function getMasarProvider(\n options?: MasarProviderOptions,\n): MasarProvider {\n if (!sharedInstance) {\n sharedInstance = new MasarProvider(options);\n }\n return sharedInstance;\n}\n\nexport function resetMasarProvider(): void {\n sharedInstance = null;\n}\n"],"mappings":";AAmGO,IAAM,aAAN,cAAyB,MAAM;AAAA,EACpC,YACE,SACgB,YACA,cAChB;AACA,UAAM,OAAO;AAHG;AACA;AAGhB,SAAK,OAAO;AAAA,EACd;AACF;AAMA,IAAM,mBAAmB;AACzB,IAAM,qBAAqB;AAEpB,IAAM,gBAAN,MAAoB;AAAA,EAIzB,YAAY,SAAgC;AAC1C,SAAK,WACH,SAAS,WACT,QAAQ,IAAI,aACZ,kBACA,QAAQ,QAAQ,EAAE;AACpB,SAAK,YAAY,SAAS,aAAa;AAAA,EACzC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,SACJ,QACA,SAC8B;AAC9B,WAAO,KAAK,KAA0B,aAAa;AAAA,MACjD;AAAA,MACA,GAAG;AAAA,IACL,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,iBAAiB,MAAyC;AAC9D,WAAO,KAAK,KAAqB,sBAAsB,IAAI;AAAA,EAC7D;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,cAAc,QAA8C;AAChE,WAAO,KAAK,KAA0B,mBAAmB,EAAE,OAAO,CAAC;AAAA,EACrE;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,UACJ,QACA,QAC0B;AAC1B,WAAO,KAAK,KAAsB,eAAe,EAAE,QAAQ,OAAO,CAAC;AAAA,EACrE;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,SAAqC;AACzC,WAAO,KAAK,IAAuB,SAAS;AAAA,EAC9C;AAAA;AAAA;AAAA;AAAA,EAMA,MAAc,KAAQ,MAAc,MAA2B;AAC7D,WAAO,KAAK,QAAW,MAAM;AAAA,MAC3B,QAAQ;AAAA,MACR,SAAS,EAAE,gBAAgB,mBAAmB;AAAA,MAC9C,MAAM,KAAK,UAAU,IAAI;AAAA,IAC3B,CAAC;AAAA,EACH;AAAA,EAEA,MAAc,IAAO,MAA0B;AAC7C,WAAO,KAAK,QAAW,MAAM,EAAE,QAAQ,MAAM,CAAC;AAAA,EAChD;AAAA,EAEA,MAAc,QACZ,MACA,MACY;AACZ,UAAM,MAAM,GAAG,KAAK,OAAO,GAAG,IAAI;AAClC,UAAM,aAAa,IAAI,gBAAgB;AACvC,UAAM,QAAQ,WAAW,MAAM,WAAW,MAAM,GAAG,KAAK,SAAS;AAEjE,QAAI;AACF,YAAM,WAAW,MAAM,MAAM,KAAK;AAAA,QAChC,GAAG;AAAA,QACH,QAAQ,WAAW;AAAA,MACrB,CAAC;AAED,UAAI,CAAC,SAAS,IAAI;AAChB,cAAM,OAAO,MAAM,SAAS,KAAK,EAAE,MAAM,MAAM,EAAE;AACjD,cAAM,IAAI;AAAA,UACR,SAAS,KAAK,MAAM,IAAI,IAAI,uBAAuB,SAAS,MAAM;AAAA,UAClE,SAAS;AAAA,UACT;AAAA,QACF;AAAA,MACF;AAEA,aAAQ,MAAM,SAAS,KAAK;AAAA,IAC9B,SAAS,OAAO;AACd,UAAI,iBAAiB,YAAY;AAC/B,cAAM;AAAA,MACR;AAEA,UAAI,iBAAiB,gBAAgB,MAAM,SAAS,cAAc;AAChE,cAAM,IAAI;AAAA,UACR,SAAS,KAAK,MAAM,IAAI,IAAI,oBAAoB,KAAK,SAAS;AAAA,UAC9D;AAAA,UACA;AAAA,QACF;AAAA,MACF;AAEA,YAAM,UACJ,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AACvD,YAAM,IAAI;AAAA,QACR,SAAS,KAAK,MAAM,IAAI,IAAI,YAAY,OAAO;AAAA,QAC/C;AAAA,QACA;AAAA,MACF;AAAA,IACF,UAAE;AACA,mBAAa,KAAK;AAAA,IACpB;AAAA,EACF;AACF;AAMA,IAAI,iBAAuC;AAEpC,SAAS,iBACd,SACe;AACf,MAAI,CAAC,gBAAgB;AACnB,qBAAiB,IAAI,cAAc,OAAO;AAAA,EAC5C;AACA,SAAO;AACT;AAEO,SAAS,qBAA2B;AACzC,mBAAiB;AACnB;","names":[]}
|
package/dist/client.d.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { R as RateLimiterOptions, T as TokenUsage } from './rate-limiter-DDH7JH5p.js';
|
|
2
2
|
import { ChatOpenAI } from '@langchain/openai';
|
|
3
3
|
import { ChatAnthropic } from '@langchain/anthropic';
|
|
4
4
|
import { z } from 'zod';
|
|
@@ -96,6 +96,7 @@ declare const OPENROUTER_MODELS: {
|
|
|
96
96
|
readonly LLAMA_4_MAVERICK: "meta-llama/llama-4-maverick";
|
|
97
97
|
readonly LLAMA_4_SCOUT: "meta-llama/llama-4-scout";
|
|
98
98
|
readonly KIMI_K2: "moonshotai/kimi-k2";
|
|
99
|
+
readonly GLM_4_7: "z-ai/glm-4.7";
|
|
99
100
|
};
|
|
100
101
|
declare class LLMClient {
|
|
101
102
|
private model;
|
|
@@ -160,5 +161,6 @@ declare function createOpenAIClient(options?: Partial<Omit<LLMClientOptions, 'pr
|
|
|
160
161
|
declare function createAnthropicClient(options?: Partial<Omit<LLMClientOptions, 'provider'>>): LLMClient;
|
|
161
162
|
declare function createKimiClient(options?: Partial<Omit<LLMClientOptions, 'provider'>>): LLMClient;
|
|
162
163
|
declare function createOpenRouterClient(options?: Partial<Omit<LLMClientOptions, 'provider'>>): LLMClient;
|
|
164
|
+
declare function createZhipuClient(options?: Partial<Omit<LLMClientOptions, 'provider'>>): LLMClient;
|
|
163
165
|
|
|
164
|
-
export { ANTHROPIC_MODELS, type CacheAwareLLMCallOptions, type CacheableBlock, DEEPSEEK_MODELS, KIMI_MODELS, type LLMCallOptions, LLMClient, type LLMClientOptions, type LLMFinishReason, type LLMProvider, type LLMResponse, type LLMStreamChunk, type LLMStreamOptions, type LLMUsage, OPENAI_MODELS, OPENROUTER_MODELS, type ProviderConfig, createAnthropicClient, createCreativeClient, createDeepSeekClient, createFixClient, createKimiClient, createOpenAIClient, createOpenRouterClient, createRequirementsClient, getAvailableProvider, getSharedLLMClient, isProviderAvailable, resetSharedLLMClient };
|
|
166
|
+
export { ANTHROPIC_MODELS, type CacheAwareLLMCallOptions, type CacheableBlock, DEEPSEEK_MODELS, KIMI_MODELS, type LLMCallOptions, LLMClient, type LLMClientOptions, type LLMFinishReason, type LLMProvider, type LLMResponse, type LLMStreamChunk, type LLMStreamOptions, type LLMUsage, OPENAI_MODELS, OPENROUTER_MODELS, type ProviderConfig, createAnthropicClient, createCreativeClient, createDeepSeekClient, createFixClient, createKimiClient, createOpenAIClient, createOpenRouterClient, createRequirementsClient, createZhipuClient, getAvailableProvider, getSharedLLMClient, isProviderAvailable, resetSharedLLMClient };
|
package/dist/client.js
CHANGED
|
@@ -13,11 +13,12 @@ import {
|
|
|
13
13
|
createOpenAIClient,
|
|
14
14
|
createOpenRouterClient,
|
|
15
15
|
createRequirementsClient,
|
|
16
|
+
createZhipuClient,
|
|
16
17
|
getAvailableProvider,
|
|
17
18
|
getSharedLLMClient,
|
|
18
19
|
isProviderAvailable,
|
|
19
20
|
resetSharedLLMClient
|
|
20
|
-
} from "./chunk-
|
|
21
|
+
} from "./chunk-JMDIVXMG.js";
|
|
21
22
|
import "./chunk-WM7QVK2Z.js";
|
|
22
23
|
import "./chunk-MJS33AAS.js";
|
|
23
24
|
export {
|
|
@@ -35,6 +36,7 @@ export {
|
|
|
35
36
|
createOpenAIClient,
|
|
36
37
|
createOpenRouterClient,
|
|
37
38
|
createRequirementsClient,
|
|
39
|
+
createZhipuClient,
|
|
38
40
|
getAvailableProvider,
|
|
39
41
|
getSharedLLMClient,
|
|
40
42
|
isProviderAvailable,
|
package/dist/index.d.ts
CHANGED
|
@@ -1,10 +1,11 @@
|
|
|
1
1
|
import { LLMFinishReason, LLMClient } from './client.js';
|
|
2
|
-
export { ANTHROPIC_MODELS, CacheAwareLLMCallOptions, CacheableBlock, DEEPSEEK_MODELS, KIMI_MODELS, LLMCallOptions, LLMClientOptions, LLMProvider, LLMResponse, LLMStreamChunk, LLMStreamOptions, LLMUsage, OPENAI_MODELS, OPENROUTER_MODELS, ProviderConfig, createAnthropicClient, createCreativeClient, createDeepSeekClient, createFixClient, createKimiClient, createOpenAIClient, createOpenRouterClient, createRequirementsClient, getAvailableProvider, getSharedLLMClient, isProviderAvailable, resetSharedLLMClient } from './client.js';
|
|
3
|
-
export {
|
|
2
|
+
export { ANTHROPIC_MODELS, CacheAwareLLMCallOptions, CacheableBlock, DEEPSEEK_MODELS, KIMI_MODELS, LLMCallOptions, LLMClientOptions, LLMProvider, LLMResponse, LLMStreamChunk, LLMStreamOptions, LLMUsage, OPENAI_MODELS, OPENROUTER_MODELS, ProviderConfig, createAnthropicClient, createCreativeClient, createDeepSeekClient, createFixClient, createKimiClient, createOpenAIClient, createOpenRouterClient, createRequirementsClient, createZhipuClient, getAvailableProvider, getSharedLLMClient, isProviderAvailable, resetSharedLLMClient } from './client.js';
|
|
3
|
+
export { a as RateLimiter, R as RateLimiterOptions, b as TokenTracker, T as TokenUsage, g as getGlobalRateLimiter, c as getGlobalTokenTracker, r as resetGlobalRateLimiter, d as resetGlobalTokenTracker } from './rate-limiter-DDH7JH5p.js';
|
|
4
4
|
export { autoCloseJson, extractJsonFromText, isValidJson, parseJsonResponse, safeParseJson } from './json-parser.js';
|
|
5
5
|
import { z } from 'zod';
|
|
6
6
|
export { JsonSchema, STRUCTURED_OUTPUT_MODELS, StructuredGenerationOptions, StructuredGenerationResult, StructuredOutputClient, StructuredOutputOptions, getStructuredOutputClient, isStructuredOutputAvailable, resetStructuredOutputClient } from './structured-output.js';
|
|
7
7
|
import { ServiceContract } from '@almadar/core';
|
|
8
|
+
export { ErrorPrediction, GFlowNetResult, GoalSpec, MasarError, MasarGenerateOptions, MasarGenerateResult, MasarHealthResult, MasarProvider, MasarProviderOptions, PredictErrorsResult, RankEditsResult, RankedEdit, getMasarProvider, resetMasarProvider } from './providers/index.js';
|
|
8
9
|
import '@langchain/openai';
|
|
9
10
|
import '@langchain/anthropic';
|
|
10
11
|
|
package/dist/index.js
CHANGED
|
@@ -13,11 +13,12 @@ import {
|
|
|
13
13
|
createOpenAIClient,
|
|
14
14
|
createOpenRouterClient,
|
|
15
15
|
createRequirementsClient,
|
|
16
|
+
createZhipuClient,
|
|
16
17
|
getAvailableProvider,
|
|
17
18
|
getSharedLLMClient,
|
|
18
19
|
isProviderAvailable,
|
|
19
20
|
resetSharedLLMClient
|
|
20
|
-
} from "./chunk-
|
|
21
|
+
} from "./chunk-JMDIVXMG.js";
|
|
21
22
|
import {
|
|
22
23
|
autoCloseJson,
|
|
23
24
|
extractJsonFromText,
|
|
@@ -40,6 +41,12 @@ import {
|
|
|
40
41
|
resetGlobalRateLimiter,
|
|
41
42
|
resetGlobalTokenTracker
|
|
42
43
|
} from "./chunk-MJS33AAS.js";
|
|
44
|
+
import {
|
|
45
|
+
MasarError,
|
|
46
|
+
MasarProvider,
|
|
47
|
+
getMasarProvider,
|
|
48
|
+
resetMasarProvider
|
|
49
|
+
} from "./chunk-QOOSH67G.js";
|
|
43
50
|
|
|
44
51
|
// src/truncation-detector.ts
|
|
45
52
|
function detectTruncation(response, finishReason) {
|
|
@@ -440,6 +447,8 @@ export {
|
|
|
440
447
|
DEEPSEEK_MODELS,
|
|
441
448
|
KIMI_MODELS,
|
|
442
449
|
LLMClient,
|
|
450
|
+
MasarError,
|
|
451
|
+
MasarProvider,
|
|
443
452
|
OPENAI_MODELS,
|
|
444
453
|
OPENROUTER_MODELS,
|
|
445
454
|
RateLimiter,
|
|
@@ -457,12 +466,14 @@ export {
|
|
|
457
466
|
createOpenAIClient,
|
|
458
467
|
createOpenRouterClient,
|
|
459
468
|
createRequirementsClient,
|
|
469
|
+
createZhipuClient,
|
|
460
470
|
detectTruncation,
|
|
461
471
|
extractJsonFromText,
|
|
462
472
|
findLastCompleteElement,
|
|
463
473
|
getAvailableProvider,
|
|
464
474
|
getGlobalRateLimiter,
|
|
465
475
|
getGlobalTokenTracker,
|
|
476
|
+
getMasarProvider,
|
|
466
477
|
getSharedLLMClient,
|
|
467
478
|
getStructuredOutputClient,
|
|
468
479
|
isLikelyTruncated,
|
|
@@ -473,6 +484,7 @@ export {
|
|
|
473
484
|
parseJsonResponse,
|
|
474
485
|
resetGlobalRateLimiter,
|
|
475
486
|
resetGlobalTokenTracker,
|
|
487
|
+
resetMasarProvider,
|
|
476
488
|
resetSharedLLMClient,
|
|
477
489
|
resetStructuredOutputClient,
|
|
478
490
|
safeParseJson,
|
package/dist/index.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/truncation-detector.ts","../src/continuation.ts"],"sourcesContent":["/**\n * Truncation Detector\n *\n * Utilities for detecting when LLM output has been truncated and\n * extracting usable content from partial responses.\n *\n * @packageDocumentation\n */\n\nimport type { LLMFinishReason } from './client.js';\nimport { autoCloseJson } from './json-parser.js';\n\n// ============================================================================\n// Types\n// ============================================================================\n\nexport type TruncationReason =\n | 'finish_reason'\n | 'json_incomplete'\n | 'bracket_mismatch'\n | 'none';\n\nexport interface TruncationResult {\n isTruncated: boolean;\n reason: TruncationReason;\n partialContent?: string;\n lastCompleteElement?: unknown;\n missingCloseBrackets?: number;\n missingCloseBraces?: number;\n}\n\n// ============================================================================\n// Main Detection Function\n// ============================================================================\n\nexport function detectTruncation(\n response: string,\n finishReason: LLMFinishReason,\n): TruncationResult {\n if (finishReason === 'length') {\n const bracketInfo = countBrackets(response);\n return {\n isTruncated: true,\n reason: 'finish_reason',\n partialContent: response,\n lastCompleteElement: findLastCompleteElement(response),\n missingCloseBrackets: bracketInfo.missingCloseBrackets,\n missingCloseBraces: bracketInfo.missingCloseBraces,\n };\n }\n\n try {\n JSON.parse(response);\n return { isTruncated: false, reason: 'none' };\n } catch {\n // JSON is invalid, check if due to truncation\n }\n\n if (finishReason === 'stop' || finishReason === null) {\n const trimmed = response.trim();\n\n const isMidContent =\n trimmed.endsWith(',') ||\n trimmed.endsWith(':') ||\n trimmed.endsWith('\": ') ||\n /:\\s*$/.test(trimmed) ||\n /,\\s*$/.test(trimmed);\n\n if (isMidContent) {\n const bracketInfo = countBrackets(response);\n return {\n isTruncated: true,\n reason: 'json_incomplete',\n partialContent: response,\n lastCompleteElement: findLastCompleteElement(response),\n missingCloseBrackets: bracketInfo.missingCloseBrackets,\n missingCloseBraces: bracketInfo.missingCloseBraces,\n };\n }\n\n try {\n const closed = autoCloseJson(trimmed);\n JSON.parse(closed);\n return { isTruncated: false, reason: 'none' };\n } catch {\n return { isTruncated: false, reason: 'none' };\n }\n }\n\n const bracketInfo = countBrackets(response);\n if (\n bracketInfo.missingCloseBrackets > 0 ||\n bracketInfo.missingCloseBraces > 0\n ) {\n return {\n isTruncated: true,\n reason: 'bracket_mismatch',\n partialContent: response,\n lastCompleteElement: findLastCompleteElement(response),\n missingCloseBrackets: bracketInfo.missingCloseBrackets,\n missingCloseBraces: bracketInfo.missingCloseBraces,\n };\n }\n\n return { isTruncated: false, reason: 'none' };\n}\n\n// ============================================================================\n// Helper Functions\n// ============================================================================\n\nfunction countBrackets(json: string): {\n openBrackets: number;\n closeBrackets: number;\n openBraces: number;\n closeBraces: number;\n missingCloseBrackets: number;\n missingCloseBraces: number;\n} {\n let inString = false;\n let escaped = false;\n let openBrackets = 0;\n let closeBrackets = 0;\n let openBraces = 0;\n let closeBraces = 0;\n\n for (const char of json) {\n if (escaped) {\n escaped = false;\n continue;\n }\n if (char === '\\\\' && inString) {\n escaped = true;\n continue;\n }\n if (char === '\"') {\n inString = !inString;\n continue;\n }\n if (inString) continue;\n\n switch (char) {\n case '[':\n openBrackets++;\n break;\n case ']':\n closeBrackets++;\n break;\n case '{':\n openBraces++;\n break;\n case '}':\n closeBraces++;\n break;\n }\n }\n\n return {\n openBrackets,\n closeBrackets,\n openBraces,\n closeBraces,\n missingCloseBrackets: Math.max(0, openBrackets - closeBrackets),\n missingCloseBraces: Math.max(0, openBraces - closeBraces),\n };\n}\n\nexport function findLastCompleteElement(json: string): unknown | null {\n const autoClosed = autoCloseJson(json);\n try {\n return JSON.parse(autoClosed);\n } catch {\n // Auto-close didn't work\n }\n\n const trimmed = json.trim();\n\n if (trimmed.startsWith('[')) {\n const lastCompleteIndex = findLastCompleteArrayElement(trimmed);\n if (lastCompleteIndex > 0) {\n const subset = trimmed.substring(0, lastCompleteIndex) + ']';\n try {\n return JSON.parse(subset);\n } catch {\n // Continue\n }\n }\n }\n\n if (trimmed.startsWith('{')) {\n const closed = autoCloseJson(trimmed);\n try {\n return JSON.parse(closed);\n } catch {\n const lastCompleteIndex = findLastCompleteObjectProperty(trimmed);\n if (lastCompleteIndex > 0) {\n const subset = trimmed.substring(0, lastCompleteIndex) + '}';\n try {\n return JSON.parse(subset);\n } catch {\n // Give up\n }\n }\n }\n }\n\n return null;\n}\n\nfunction findLastCompleteArrayElement(json: string): number {\n let depth = 0;\n let inString = false;\n let escaped = false;\n let lastCompleteElementEnd = -1;\n\n for (let i = 0; i < json.length; i++) {\n const char = json[i];\n\n if (escaped) {\n escaped = false;\n continue;\n }\n if (char === '\\\\' && inString) {\n escaped = true;\n continue;\n }\n if (char === '\"') {\n inString = !inString;\n continue;\n }\n if (inString) continue;\n\n if (char === '[' || char === '{') {\n depth++;\n } else if (char === ']' || char === '}') {\n depth--;\n if (depth === 1) {\n lastCompleteElementEnd = i + 1;\n }\n } else if (char === ',' && depth === 1) {\n lastCompleteElementEnd = i;\n }\n }\n\n return lastCompleteElementEnd > 0 ? lastCompleteElementEnd : -1;\n}\n\nfunction findLastCompleteObjectProperty(json: string): number {\n let depth = 0;\n let inString = false;\n let escaped = false;\n let lastCommaIndex = -1;\n\n for (let i = 0; i < json.length; i++) {\n const char = json[i];\n\n if (escaped) {\n escaped = false;\n continue;\n }\n if (char === '\\\\' && inString) {\n escaped = true;\n continue;\n }\n if (char === '\"') {\n inString = !inString;\n continue;\n }\n if (inString) continue;\n\n if (char === '[' || char === '{') {\n depth++;\n } else if (char === ']' || char === '}') {\n depth--;\n } else if (char === ',' && depth === 1) {\n lastCommaIndex = i;\n }\n }\n\n return lastCommaIndex > 0 ? lastCommaIndex : -1;\n}\n\nexport function isLikelyTruncated(content: string): boolean {\n const trimmed = content.trim();\n if (!trimmed) return false;\n\n const brackets = countBrackets(trimmed);\n if (\n brackets.missingCloseBrackets > 0 ||\n brackets.missingCloseBraces > 0\n ) {\n return true;\n }\n\n const abruptEndings = [\n /,\\s*$/,\n /:\\s*$/,\n /\"\\s*:\\s*$/,\n /\\[\\s*$/,\n /{\\s*$/,\n ];\n\n for (const pattern of abruptEndings) {\n if (pattern.test(trimmed)) return true;\n }\n\n return false;\n}\n","/**\n * LLM Continuation Utility\n *\n * Handles truncated LLM responses with automatic continuation.\n * - Detects truncation via finish_reason and JSON structure\n * - Automatically continues with full context\n * - Merges partial and continuation responses\n * - Salvages partial data if max continuations reached\n *\n * @packageDocumentation\n */\n\nimport { z } from 'zod';\nimport { LLMClient, type LLMFinishReason } from './client.js';\nimport { detectTruncation } from './truncation-detector.js';\nimport { extractJsonFromText, autoCloseJson, isValidJson } from './json-parser.js';\n\n// ============================================================================\n// Types\n// ============================================================================\n\nexport interface ContinuationOptions<T> {\n client: LLMClient;\n systemPrompt: string;\n userPrompt: string;\n schema?: z.ZodSchema<T>;\n maxTokens?: number;\n maxContinuations?: number;\n maxRetries?: number;\n buildContinuationPrompt: (\n partialResponse: string,\n attempt: number,\n ) => string;\n continuationSystemPrompt?: string;\n}\n\nexport interface ContinuationResult<T> {\n data: T;\n raw: string;\n continuationCount: number;\n warnings: string[];\n wasSalvaged: boolean;\n}\n\n// ============================================================================\n// Constants\n// ============================================================================\n\nconst DEFAULT_MAX_TOKENS = 8192;\nconst DEFAULT_MAX_CONTINUATIONS = 3;\n\n/**\n * Default continuation system prompt.\n * Used when no custom continuationSystemPrompt is provided.\n */\nconst DEFAULT_CONTINUATION_SYSTEM_PROMPT = `You are a JSON continuation assistant. Your ONLY job is to continue generating JSON from where the previous response was truncated.\n\nRules:\n1. Continue from EXACTLY where the previous output stopped\n2. Do NOT repeat any content already generated\n3. Complete the JSON structure properly with all closing brackets\n4. Do NOT wrap in markdown code blocks\n5. Output ONLY the continuation JSON, nothing else`;\n\n// ============================================================================\n// Helper Functions\n// ============================================================================\n\nexport function mergeResponses(\n previous: string,\n continuation: string,\n): string {\n const trimmedPrev = previous.trimEnd();\n const trimmedCont = continuation.trimStart();\n\n let cleanedCont = trimmedCont\n .replace(/^```json?\\s*/i, '')\n .replace(/```\\s*$/i, '')\n .trim();\n\n if (cleanedCont.startsWith('{')) {\n try {\n const contParsed = JSON.parse(autoCloseJson(cleanedCont));\n const keys = Object.keys(contParsed);\n if (keys.length === 1 && Array.isArray(contParsed[keys[0]])) {\n cleanedCont = contParsed[keys[0]]\n .map((item: unknown) => JSON.stringify(item))\n .join(',\\n');\n }\n } catch {\n // Continue with original cleaning\n }\n }\n\n if (cleanedCont.startsWith('}') || cleanedCont.startsWith(']')) {\n return trimmedPrev + cleanedCont;\n }\n\n const prevEndsWithValue = /[\\}\\]\\\"\\d]$/.test(trimmedPrev);\n const contStartsWithValue = /^[\\{\\[\\\"]/.test(cleanedCont);\n\n if (prevEndsWithValue && contStartsWithValue) {\n return trimmedPrev + ',\\n' + cleanedCont;\n }\n\n return trimmedPrev + cleanedCont;\n}\n\nexport function salvagePartialResponse<T>(rawResponse: string): T | null {\n console.warn('[Continuation] Attempting to salvage partial response');\n\n try {\n const cleanedResponse = extractJsonFromText(rawResponse) || rawResponse;\n const closed = autoCloseJson(cleanedResponse);\n const parsed = JSON.parse(closed) as T;\n console.log('[Continuation] Successfully salvaged partial response');\n return parsed;\n } catch (error) {\n console.error('[Continuation] Could not salvage response:', error);\n }\n\n return null;\n}\n\n// ============================================================================\n// Main Function\n// ============================================================================\n\nexport async function callWithContinuation<T>(\n options: ContinuationOptions<T>,\n): Promise<ContinuationResult<T>> {\n const {\n client,\n systemPrompt,\n userPrompt,\n schema,\n maxTokens = DEFAULT_MAX_TOKENS,\n maxContinuations = DEFAULT_MAX_CONTINUATIONS,\n buildContinuationPrompt,\n continuationSystemPrompt = DEFAULT_CONTINUATION_SYSTEM_PROMPT,\n } = options;\n\n let rawResponse = '';\n let continuationCount = 0;\n const warnings: string[] = [];\n let wasSalvaged = false;\n\n console.log('[Continuation] Starting LLM call with continuation support');\n console.log(\n `[Continuation] Max tokens: ${maxTokens}, Max continuations: ${maxContinuations}`,\n );\n\n try {\n const response = await client.callRawWithMetadata({\n systemPrompt,\n userPrompt,\n maxTokens,\n });\n\n rawResponse = extractJsonFromText(response.raw) || response.raw;\n\n console.log(\n `[Continuation] Initial response: ${rawResponse.length} chars, finish_reason: ${response.finishReason}`,\n );\n\n let truncation = detectTruncation(rawResponse, response.finishReason);\n\n while (truncation.isTruncated && continuationCount < maxContinuations) {\n continuationCount++;\n const warningMsg = `Response truncated (${truncation.reason}), continuing (attempt ${continuationCount}/${maxContinuations})`;\n console.log(`[Continuation] ${warningMsg}`);\n warnings.push(warningMsg);\n\n const contPrompt = buildContinuationPrompt(\n rawResponse,\n continuationCount,\n );\n\n const contResponse = await client.callRawWithMetadata({\n systemPrompt: continuationSystemPrompt,\n userPrompt: contPrompt,\n maxTokens,\n });\n\n console.log(\n `[Continuation] Continuation response: ${contResponse.raw.length} chars, finish_reason: ${contResponse.finishReason}`,\n );\n\n const cleanedContResponse =\n extractJsonFromText(contResponse.raw) || contResponse.raw;\n rawResponse = mergeResponses(rawResponse, cleanedContResponse);\n\n truncation = detectTruncation(rawResponse, contResponse.finishReason);\n }\n\n if (\n continuationCount >= maxContinuations &&\n truncation.isTruncated\n ) {\n console.warn(\n `[Continuation] Reached max continuations (${maxContinuations}), attempting to salvage...`,\n );\n warnings.push(\n `Reached max continuations - some content may be incomplete`,\n );\n wasSalvaged = true;\n }\n\n const cleanedResponse =\n extractJsonFromText(rawResponse) || rawResponse;\n let data: T;\n\n try {\n if (isValidJson(cleanedResponse)) {\n data = JSON.parse(cleanedResponse) as T;\n } else {\n const closed = autoCloseJson(cleanedResponse);\n data = JSON.parse(closed) as T;\n if (!wasSalvaged) {\n warnings.push('Response required auto-closing of JSON brackets');\n }\n }\n } catch (parseError) {\n const salvaged = salvagePartialResponse<T>(cleanedResponse);\n if (salvaged) {\n data = salvaged;\n wasSalvaged = true;\n warnings.push('Response was salvaged from partial data');\n } else {\n throw new Error(\n `Failed to parse response after ${continuationCount} continuations: ${parseError}`,\n );\n }\n }\n\n if (schema) {\n try {\n data = schema.parse(data);\n } catch (validationError) {\n console.warn(\n '[Continuation] Schema validation failed:',\n validationError,\n );\n warnings.push(`Schema validation issue: ${validationError}`);\n }\n }\n\n console.log(\n `[Continuation] Complete. Continuations: ${continuationCount}, Warnings: ${warnings.length}`,\n );\n\n return {\n data,\n raw: rawResponse,\n continuationCount,\n warnings,\n wasSalvaged,\n };\n } catch (error) {\n console.error('[Continuation] Error during LLM call:', error);\n throw error;\n }\n}\n\nexport function buildGenericContinuationPrompt(\n context: string,\n partialResponse: string,\n attempt: number,\n maxAttempts: number = DEFAULT_MAX_CONTINUATIONS,\n): string {\n return `## CONTINUATION REQUEST (Attempt ${attempt}/${maxAttempts})\n\nYour previous response was truncated. Continue generating from where you left off.\n\n### ORIGINAL CONTEXT\n${context}\n\n### WHAT YOU GENERATED SO FAR\n\\`\\`\\`json\n${partialResponse}\n\\`\\`\\`\n\n### INSTRUCTIONS\n1. Continue from EXACTLY where the response was cut off\n2. Do NOT repeat any content already generated\n3. Complete the JSON structure properly\n4. Do NOT wrap your response in markdown code blocks\n\nContinue generating now:`;\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAmCO,SAAS,iBACd,UACA,cACkB;AAClB,MAAI,iBAAiB,UAAU;AAC7B,UAAMA,eAAc,cAAc,QAAQ;AAC1C,WAAO;AAAA,MACL,aAAa;AAAA,MACb,QAAQ;AAAA,MACR,gBAAgB;AAAA,MAChB,qBAAqB,wBAAwB,QAAQ;AAAA,MACrD,sBAAsBA,aAAY;AAAA,MAClC,oBAAoBA,aAAY;AAAA,IAClC;AAAA,EACF;AAEA,MAAI;AACF,SAAK,MAAM,QAAQ;AACnB,WAAO,EAAE,aAAa,OAAO,QAAQ,OAAO;AAAA,EAC9C,QAAQ;AAAA,EAER;AAEA,MAAI,iBAAiB,UAAU,iBAAiB,MAAM;AACpD,UAAM,UAAU,SAAS,KAAK;AAE9B,UAAM,eACJ,QAAQ,SAAS,GAAG,KACpB,QAAQ,SAAS,GAAG,KACpB,QAAQ,SAAS,KAAK,KACtB,QAAQ,KAAK,OAAO,KACpB,QAAQ,KAAK,OAAO;AAEtB,QAAI,cAAc;AAChB,YAAMA,eAAc,cAAc,QAAQ;AAC1C,aAAO;AAAA,QACL,aAAa;AAAA,QACb,QAAQ;AAAA,QACR,gBAAgB;AAAA,QAChB,qBAAqB,wBAAwB,QAAQ;AAAA,QACrD,sBAAsBA,aAAY;AAAA,QAClC,oBAAoBA,aAAY;AAAA,MAClC;AAAA,IACF;AAEA,QAAI;AACF,YAAM,SAAS,cAAc,OAAO;AACpC,WAAK,MAAM,MAAM;AACjB,aAAO,EAAE,aAAa,OAAO,QAAQ,OAAO;AAAA,IAC9C,QAAQ;AACN,aAAO,EAAE,aAAa,OAAO,QAAQ,OAAO;AAAA,IAC9C;AAAA,EACF;AAEA,QAAM,cAAc,cAAc,QAAQ;AAC1C,MACE,YAAY,uBAAuB,KACnC,YAAY,qBAAqB,GACjC;AACA,WAAO;AAAA,MACL,aAAa;AAAA,MACb,QAAQ;AAAA,MACR,gBAAgB;AAAA,MAChB,qBAAqB,wBAAwB,QAAQ;AAAA,MACrD,sBAAsB,YAAY;AAAA,MAClC,oBAAoB,YAAY;AAAA,IAClC;AAAA,EACF;AAEA,SAAO,EAAE,aAAa,OAAO,QAAQ,OAAO;AAC9C;AAMA,SAAS,cAAc,MAOrB;AACA,MAAI,WAAW;AACf,MAAI,UAAU;AACd,MAAI,eAAe;AACnB,MAAI,gBAAgB;AACpB,MAAI,aAAa;AACjB,MAAI,cAAc;AAElB,aAAW,QAAQ,MAAM;AACvB,QAAI,SAAS;AACX,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,QAAQ,UAAU;AAC7B,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,KAAK;AAChB,iBAAW,CAAC;AACZ;AAAA,IACF;AACA,QAAI,SAAU;AAEd,YAAQ,MAAM;AAAA,MACZ,KAAK;AACH;AACA;AAAA,MACF,KAAK;AACH;AACA;AAAA,MACF,KAAK;AACH;AACA;AAAA,MACF,KAAK;AACH;AACA;AAAA,IACJ;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,sBAAsB,KAAK,IAAI,GAAG,eAAe,aAAa;AAAA,IAC9D,oBAAoB,KAAK,IAAI,GAAG,aAAa,WAAW;AAAA,EAC1D;AACF;AAEO,SAAS,wBAAwB,MAA8B;AACpE,QAAM,aAAa,cAAc,IAAI;AACrC,MAAI;AACF,WAAO,KAAK,MAAM,UAAU;AAAA,EAC9B,QAAQ;AAAA,EAER;AAEA,QAAM,UAAU,KAAK,KAAK;AAE1B,MAAI,QAAQ,WAAW,GAAG,GAAG;AAC3B,UAAM,oBAAoB,6BAA6B,OAAO;AAC9D,QAAI,oBAAoB,GAAG;AACzB,YAAM,SAAS,QAAQ,UAAU,GAAG,iBAAiB,IAAI;AACzD,UAAI;AACF,eAAO,KAAK,MAAM,MAAM;AAAA,MAC1B,QAAQ;AAAA,MAER;AAAA,IACF;AAAA,EACF;AAEA,MAAI,QAAQ,WAAW,GAAG,GAAG;AAC3B,UAAM,SAAS,cAAc,OAAO;AACpC,QAAI;AACF,aAAO,KAAK,MAAM,MAAM;AAAA,IAC1B,QAAQ;AACN,YAAM,oBAAoB,+BAA+B,OAAO;AAChE,UAAI,oBAAoB,GAAG;AACzB,cAAM,SAAS,QAAQ,UAAU,GAAG,iBAAiB,IAAI;AACzD,YAAI;AACF,iBAAO,KAAK,MAAM,MAAM;AAAA,QAC1B,QAAQ;AAAA,QAER;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;AAEA,SAAS,6BAA6B,MAAsB;AAC1D,MAAI,QAAQ;AACZ,MAAI,WAAW;AACf,MAAI,UAAU;AACd,MAAI,yBAAyB;AAE7B,WAAS,IAAI,GAAG,IAAI,KAAK,QAAQ,KAAK;AACpC,UAAM,OAAO,KAAK,CAAC;AAEnB,QAAI,SAAS;AACX,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,QAAQ,UAAU;AAC7B,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,KAAK;AAChB,iBAAW,CAAC;AACZ;AAAA,IACF;AACA,QAAI,SAAU;AAEd,QAAI,SAAS,OAAO,SAAS,KAAK;AAChC;AAAA,IACF,WAAW,SAAS,OAAO,SAAS,KAAK;AACvC;AACA,UAAI,UAAU,GAAG;AACf,iCAAyB,IAAI;AAAA,MAC/B;AAAA,IACF,WAAW,SAAS,OAAO,UAAU,GAAG;AACtC,+BAAyB;AAAA,IAC3B;AAAA,EACF;AAEA,SAAO,yBAAyB,IAAI,yBAAyB;AAC/D;AAEA,SAAS,+BAA+B,MAAsB;AAC5D,MAAI,QAAQ;AACZ,MAAI,WAAW;AACf,MAAI,UAAU;AACd,MAAI,iBAAiB;AAErB,WAAS,IAAI,GAAG,IAAI,KAAK,QAAQ,KAAK;AACpC,UAAM,OAAO,KAAK,CAAC;AAEnB,QAAI,SAAS;AACX,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,QAAQ,UAAU;AAC7B,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,KAAK;AAChB,iBAAW,CAAC;AACZ;AAAA,IACF;AACA,QAAI,SAAU;AAEd,QAAI,SAAS,OAAO,SAAS,KAAK;AAChC;AAAA,IACF,WAAW,SAAS,OAAO,SAAS,KAAK;AACvC;AAAA,IACF,WAAW,SAAS,OAAO,UAAU,GAAG;AACtC,uBAAiB;AAAA,IACnB;AAAA,EACF;AAEA,SAAO,iBAAiB,IAAI,iBAAiB;AAC/C;AAEO,SAAS,kBAAkB,SAA0B;AAC1D,QAAM,UAAU,QAAQ,KAAK;AAC7B,MAAI,CAAC,QAAS,QAAO;AAErB,QAAM,WAAW,cAAc,OAAO;AACtC,MACE,SAAS,uBAAuB,KAChC,SAAS,qBAAqB,GAC9B;AACA,WAAO;AAAA,EACT;AAEA,QAAM,gBAAgB;AAAA,IACpB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAEA,aAAW,WAAW,eAAe;AACnC,QAAI,QAAQ,KAAK,OAAO,EAAG,QAAO;AAAA,EACpC;AAEA,SAAO;AACT;;;ACnQA,IAAM,qBAAqB;AAC3B,IAAM,4BAA4B;AAMlC,IAAM,qCAAqC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAapC,SAAS,eACd,UACA,cACQ;AACR,QAAM,cAAc,SAAS,QAAQ;AACrC,QAAM,cAAc,aAAa,UAAU;AAE3C,MAAI,cAAc,YACf,QAAQ,iBAAiB,EAAE,EAC3B,QAAQ,YAAY,EAAE,EACtB,KAAK;AAER,MAAI,YAAY,WAAW,GAAG,GAAG;AAC/B,QAAI;AACF,YAAM,aAAa,KAAK,MAAM,cAAc,WAAW,CAAC;AACxD,YAAM,OAAO,OAAO,KAAK,UAAU;AACnC,UAAI,KAAK,WAAW,KAAK,MAAM,QAAQ,WAAW,KAAK,CAAC,CAAC,CAAC,GAAG;AAC3D,sBAAc,WAAW,KAAK,CAAC,CAAC,EAC7B,IAAI,CAAC,SAAkB,KAAK,UAAU,IAAI,CAAC,EAC3C,KAAK,KAAK;AAAA,MACf;AAAA,IACF,QAAQ;AAAA,IAER;AAAA,EACF;AAEA,MAAI,YAAY,WAAW,GAAG,KAAK,YAAY,WAAW,GAAG,GAAG;AAC9D,WAAO,cAAc;AAAA,EACvB;AAEA,QAAM,oBAAoB,cAAc,KAAK,WAAW;AACxD,QAAM,sBAAsB,YAAY,KAAK,WAAW;AAExD,MAAI,qBAAqB,qBAAqB;AAC5C,WAAO,cAAc,QAAQ;AAAA,EAC/B;AAEA,SAAO,cAAc;AACvB;AAEO,SAAS,uBAA0B,aAA+B;AACvE,UAAQ,KAAK,uDAAuD;AAEpE,MAAI;AACF,UAAM,kBAAkB,oBAAoB,WAAW,KAAK;AAC5D,UAAM,SAAS,cAAc,eAAe;AAC5C,UAAM,SAAS,KAAK,MAAM,MAAM;AAChC,YAAQ,IAAI,uDAAuD;AACnE,WAAO;AAAA,EACT,SAAS,OAAO;AACd,YAAQ,MAAM,8CAA8C,KAAK;AAAA,EACnE;AAEA,SAAO;AACT;AAMA,eAAsB,qBACpB,SACgC;AAChC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,YAAY;AAAA,IACZ,mBAAmB;AAAA,IACnB;AAAA,IACA,2BAA2B;AAAA,EAC7B,IAAI;AAEJ,MAAI,cAAc;AAClB,MAAI,oBAAoB;AACxB,QAAM,WAAqB,CAAC;AAC5B,MAAI,cAAc;AAElB,UAAQ,IAAI,4DAA4D;AACxE,UAAQ;AAAA,IACN,8BAA8B,SAAS,wBAAwB,gBAAgB;AAAA,EACjF;AAEA,MAAI;AACF,UAAM,WAAW,MAAM,OAAO,oBAAoB;AAAA,MAChD;AAAA,MACA;AAAA,MACA;AAAA,IACF,CAAC;AAED,kBAAc,oBAAoB,SAAS,GAAG,KAAK,SAAS;AAE5D,YAAQ;AAAA,MACN,oCAAoC,YAAY,MAAM,0BAA0B,SAAS,YAAY;AAAA,IACvG;AAEA,QAAI,aAAa,iBAAiB,aAAa,SAAS,YAAY;AAEpE,WAAO,WAAW,eAAe,oBAAoB,kBAAkB;AACrE;AACA,YAAM,aAAa,uBAAuB,WAAW,MAAM,0BAA0B,iBAAiB,IAAI,gBAAgB;AAC1H,cAAQ,IAAI,kBAAkB,UAAU,EAAE;AAC1C,eAAS,KAAK,UAAU;AAExB,YAAM,aAAa;AAAA,QACjB;AAAA,QACA;AAAA,MACF;AAEA,YAAM,eAAe,MAAM,OAAO,oBAAoB;AAAA,QACpD,cAAc;AAAA,QACd,YAAY;AAAA,QACZ;AAAA,MACF,CAAC;AAED,cAAQ;AAAA,QACN,yCAAyC,aAAa,IAAI,MAAM,0BAA0B,aAAa,YAAY;AAAA,MACrH;AAEA,YAAM,sBACJ,oBAAoB,aAAa,GAAG,KAAK,aAAa;AACxD,oBAAc,eAAe,aAAa,mBAAmB;AAE7D,mBAAa,iBAAiB,aAAa,aAAa,YAAY;AAAA,IACtE;AAEA,QACE,qBAAqB,oBACrB,WAAW,aACX;AACA,cAAQ;AAAA,QACN,6CAA6C,gBAAgB;AAAA,MAC/D;AACA,eAAS;AAAA,QACP;AAAA,MACF;AACA,oBAAc;AAAA,IAChB;AAEA,UAAM,kBACJ,oBAAoB,WAAW,KAAK;AACtC,QAAI;AAEJ,QAAI;AACF,UAAI,YAAY,eAAe,GAAG;AAChC,eAAO,KAAK,MAAM,eAAe;AAAA,MACnC,OAAO;AACL,cAAM,SAAS,cAAc,eAAe;AAC5C,eAAO,KAAK,MAAM,MAAM;AACxB,YAAI,CAAC,aAAa;AAChB,mBAAS,KAAK,iDAAiD;AAAA,QACjE;AAAA,MACF;AAAA,IACF,SAAS,YAAY;AACnB,YAAM,WAAW,uBAA0B,eAAe;AAC1D,UAAI,UAAU;AACZ,eAAO;AACP,sBAAc;AACd,iBAAS,KAAK,yCAAyC;AAAA,MACzD,OAAO;AACL,cAAM,IAAI;AAAA,UACR,kCAAkC,iBAAiB,mBAAmB,UAAU;AAAA,QAClF;AAAA,MACF;AAAA,IACF;AAEA,QAAI,QAAQ;AACV,UAAI;AACF,eAAO,OAAO,MAAM,IAAI;AAAA,MAC1B,SAAS,iBAAiB;AACxB,gBAAQ;AAAA,UACN;AAAA,UACA;AAAA,QACF;AACA,iBAAS,KAAK,4BAA4B,eAAe,EAAE;AAAA,MAC7D;AAAA,IACF;AAEA,YAAQ;AAAA,MACN,2CAA2C,iBAAiB,eAAe,SAAS,MAAM;AAAA,IAC5F;AAEA,WAAO;AAAA,MACL;AAAA,MACA,KAAK;AAAA,MACL;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,EACF,SAAS,OAAO;AACd,YAAQ,MAAM,yCAAyC,KAAK;AAC5D,UAAM;AAAA,EACR;AACF;AAEO,SAAS,+BACd,SACA,iBACA,SACA,cAAsB,2BACd;AACR,SAAO,oCAAoC,OAAO,IAAI,WAAW;AAAA;AAAA;AAAA;AAAA;AAAA,EAKjE,OAAO;AAAA;AAAA;AAAA;AAAA,EAIP,eAAe;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAUjB;","names":["bracketInfo"]}
|
|
1
|
+
{"version":3,"sources":["../src/truncation-detector.ts","../src/continuation.ts"],"sourcesContent":["/**\n * Truncation Detector\n *\n * Utilities for detecting when LLM output has been truncated and\n * extracting usable content from partial responses.\n *\n * @packageDocumentation\n */\n\nimport type { LLMFinishReason } from './client.js';\nimport { autoCloseJson } from './json-parser.js';\n\n// ============================================================================\n// Types\n// ============================================================================\n\nexport type TruncationReason =\n | 'finish_reason'\n | 'json_incomplete'\n | 'bracket_mismatch'\n | 'none';\n\nexport interface TruncationResult {\n isTruncated: boolean;\n reason: TruncationReason;\n partialContent?: string;\n lastCompleteElement?: unknown;\n missingCloseBrackets?: number;\n missingCloseBraces?: number;\n}\n\n// ============================================================================\n// Main Detection Function\n// ============================================================================\n\nexport function detectTruncation(\n response: string,\n finishReason: LLMFinishReason,\n): TruncationResult {\n if (finishReason === 'length') {\n const bracketInfo = countBrackets(response);\n return {\n isTruncated: true,\n reason: 'finish_reason',\n partialContent: response,\n lastCompleteElement: findLastCompleteElement(response),\n missingCloseBrackets: bracketInfo.missingCloseBrackets,\n missingCloseBraces: bracketInfo.missingCloseBraces,\n };\n }\n\n try {\n JSON.parse(response);\n return { isTruncated: false, reason: 'none' };\n } catch {\n // JSON is invalid, check if due to truncation\n }\n\n if (finishReason === 'stop' || finishReason === null) {\n const trimmed = response.trim();\n\n const isMidContent =\n trimmed.endsWith(',') ||\n trimmed.endsWith(':') ||\n trimmed.endsWith('\": ') ||\n /:\\s*$/.test(trimmed) ||\n /,\\s*$/.test(trimmed);\n\n if (isMidContent) {\n const bracketInfo = countBrackets(response);\n return {\n isTruncated: true,\n reason: 'json_incomplete',\n partialContent: response,\n lastCompleteElement: findLastCompleteElement(response),\n missingCloseBrackets: bracketInfo.missingCloseBrackets,\n missingCloseBraces: bracketInfo.missingCloseBraces,\n };\n }\n\n try {\n const closed = autoCloseJson(trimmed);\n JSON.parse(closed);\n return { isTruncated: false, reason: 'none' };\n } catch {\n return { isTruncated: false, reason: 'none' };\n }\n }\n\n const bracketInfo = countBrackets(response);\n if (\n bracketInfo.missingCloseBrackets > 0 ||\n bracketInfo.missingCloseBraces > 0\n ) {\n return {\n isTruncated: true,\n reason: 'bracket_mismatch',\n partialContent: response,\n lastCompleteElement: findLastCompleteElement(response),\n missingCloseBrackets: bracketInfo.missingCloseBrackets,\n missingCloseBraces: bracketInfo.missingCloseBraces,\n };\n }\n\n return { isTruncated: false, reason: 'none' };\n}\n\n// ============================================================================\n// Helper Functions\n// ============================================================================\n\nfunction countBrackets(json: string): {\n openBrackets: number;\n closeBrackets: number;\n openBraces: number;\n closeBraces: number;\n missingCloseBrackets: number;\n missingCloseBraces: number;\n} {\n let inString = false;\n let escaped = false;\n let openBrackets = 0;\n let closeBrackets = 0;\n let openBraces = 0;\n let closeBraces = 0;\n\n for (const char of json) {\n if (escaped) {\n escaped = false;\n continue;\n }\n if (char === '\\\\' && inString) {\n escaped = true;\n continue;\n }\n if (char === '\"') {\n inString = !inString;\n continue;\n }\n if (inString) continue;\n\n switch (char) {\n case '[':\n openBrackets++;\n break;\n case ']':\n closeBrackets++;\n break;\n case '{':\n openBraces++;\n break;\n case '}':\n closeBraces++;\n break;\n }\n }\n\n return {\n openBrackets,\n closeBrackets,\n openBraces,\n closeBraces,\n missingCloseBrackets: Math.max(0, openBrackets - closeBrackets),\n missingCloseBraces: Math.max(0, openBraces - closeBraces),\n };\n}\n\nexport function findLastCompleteElement(json: string): unknown | null {\n const autoClosed = autoCloseJson(json);\n try {\n return JSON.parse(autoClosed);\n } catch {\n // Auto-close didn't work\n }\n\n const trimmed = json.trim();\n\n if (trimmed.startsWith('[')) {\n const lastCompleteIndex = findLastCompleteArrayElement(trimmed);\n if (lastCompleteIndex > 0) {\n const subset = trimmed.substring(0, lastCompleteIndex) + ']';\n try {\n return JSON.parse(subset);\n } catch {\n // Continue\n }\n }\n }\n\n if (trimmed.startsWith('{')) {\n const closed = autoCloseJson(trimmed);\n try {\n return JSON.parse(closed);\n } catch {\n const lastCompleteIndex = findLastCompleteObjectProperty(trimmed);\n if (lastCompleteIndex > 0) {\n const subset = trimmed.substring(0, lastCompleteIndex) + '}';\n try {\n return JSON.parse(subset);\n } catch {\n // Give up\n }\n }\n }\n }\n\n return null;\n}\n\nfunction findLastCompleteArrayElement(json: string): number {\n let depth = 0;\n let inString = false;\n let escaped = false;\n let lastCompleteElementEnd = -1;\n\n for (let i = 0; i < json.length; i++) {\n const char = json[i];\n\n if (escaped) {\n escaped = false;\n continue;\n }\n if (char === '\\\\' && inString) {\n escaped = true;\n continue;\n }\n if (char === '\"') {\n inString = !inString;\n continue;\n }\n if (inString) continue;\n\n if (char === '[' || char === '{') {\n depth++;\n } else if (char === ']' || char === '}') {\n depth--;\n if (depth === 1) {\n lastCompleteElementEnd = i + 1;\n }\n } else if (char === ',' && depth === 1) {\n lastCompleteElementEnd = i;\n }\n }\n\n return lastCompleteElementEnd > 0 ? lastCompleteElementEnd : -1;\n}\n\nfunction findLastCompleteObjectProperty(json: string): number {\n let depth = 0;\n let inString = false;\n let escaped = false;\n let lastCommaIndex = -1;\n\n for (let i = 0; i < json.length; i++) {\n const char = json[i];\n\n if (escaped) {\n escaped = false;\n continue;\n }\n if (char === '\\\\' && inString) {\n escaped = true;\n continue;\n }\n if (char === '\"') {\n inString = !inString;\n continue;\n }\n if (inString) continue;\n\n if (char === '[' || char === '{') {\n depth++;\n } else if (char === ']' || char === '}') {\n depth--;\n } else if (char === ',' && depth === 1) {\n lastCommaIndex = i;\n }\n }\n\n return lastCommaIndex > 0 ? lastCommaIndex : -1;\n}\n\nexport function isLikelyTruncated(content: string): boolean {\n const trimmed = content.trim();\n if (!trimmed) return false;\n\n const brackets = countBrackets(trimmed);\n if (\n brackets.missingCloseBrackets > 0 ||\n brackets.missingCloseBraces > 0\n ) {\n return true;\n }\n\n const abruptEndings = [\n /,\\s*$/,\n /:\\s*$/,\n /\"\\s*:\\s*$/,\n /\\[\\s*$/,\n /{\\s*$/,\n ];\n\n for (const pattern of abruptEndings) {\n if (pattern.test(trimmed)) return true;\n }\n\n return false;\n}\n","/**\n * LLM Continuation Utility\n *\n * Handles truncated LLM responses with automatic continuation.\n * - Detects truncation via finish_reason and JSON structure\n * - Automatically continues with full context\n * - Merges partial and continuation responses\n * - Salvages partial data if max continuations reached\n *\n * @packageDocumentation\n */\n\nimport { z } from 'zod';\nimport { LLMClient, type LLMFinishReason } from './client.js';\nimport { detectTruncation } from './truncation-detector.js';\nimport { extractJsonFromText, autoCloseJson, isValidJson } from './json-parser.js';\n\n// ============================================================================\n// Types\n// ============================================================================\n\nexport interface ContinuationOptions<T> {\n client: LLMClient;\n systemPrompt: string;\n userPrompt: string;\n schema?: z.ZodSchema<T>;\n maxTokens?: number;\n maxContinuations?: number;\n maxRetries?: number;\n buildContinuationPrompt: (\n partialResponse: string,\n attempt: number,\n ) => string;\n continuationSystemPrompt?: string;\n}\n\nexport interface ContinuationResult<T> {\n data: T;\n raw: string;\n continuationCount: number;\n warnings: string[];\n wasSalvaged: boolean;\n}\n\n// ============================================================================\n// Constants\n// ============================================================================\n\nconst DEFAULT_MAX_TOKENS = 8192;\nconst DEFAULT_MAX_CONTINUATIONS = 3;\n\n/**\n * Default continuation system prompt.\n * Used when no custom continuationSystemPrompt is provided.\n */\nconst DEFAULT_CONTINUATION_SYSTEM_PROMPT = `You are a JSON continuation assistant. Your ONLY job is to continue generating JSON from where the previous response was truncated.\n\nRules:\n1. Continue from EXACTLY where the previous output stopped\n2. Do NOT repeat any content already generated\n3. Complete the JSON structure properly with all closing brackets\n4. Do NOT wrap in markdown code blocks\n5. Output ONLY the continuation JSON, nothing else`;\n\n// ============================================================================\n// Helper Functions\n// ============================================================================\n\nexport function mergeResponses(\n previous: string,\n continuation: string,\n): string {\n const trimmedPrev = previous.trimEnd();\n const trimmedCont = continuation.trimStart();\n\n let cleanedCont = trimmedCont\n .replace(/^```json?\\s*/i, '')\n .replace(/```\\s*$/i, '')\n .trim();\n\n if (cleanedCont.startsWith('{')) {\n try {\n const contParsed = JSON.parse(autoCloseJson(cleanedCont));\n const keys = Object.keys(contParsed);\n if (keys.length === 1 && Array.isArray(contParsed[keys[0]])) {\n cleanedCont = contParsed[keys[0]]\n .map((item: unknown) => JSON.stringify(item))\n .join(',\\n');\n }\n } catch {\n // Continue with original cleaning\n }\n }\n\n if (cleanedCont.startsWith('}') || cleanedCont.startsWith(']')) {\n return trimmedPrev + cleanedCont;\n }\n\n const prevEndsWithValue = /[\\}\\]\\\"\\d]$/.test(trimmedPrev);\n const contStartsWithValue = /^[\\{\\[\\\"]/.test(cleanedCont);\n\n if (prevEndsWithValue && contStartsWithValue) {\n return trimmedPrev + ',\\n' + cleanedCont;\n }\n\n return trimmedPrev + cleanedCont;\n}\n\nexport function salvagePartialResponse<T>(rawResponse: string): T | null {\n console.warn('[Continuation] Attempting to salvage partial response');\n\n try {\n const cleanedResponse = extractJsonFromText(rawResponse) || rawResponse;\n const closed = autoCloseJson(cleanedResponse);\n const parsed = JSON.parse(closed) as T;\n console.log('[Continuation] Successfully salvaged partial response');\n return parsed;\n } catch (error) {\n console.error('[Continuation] Could not salvage response:', error);\n }\n\n return null;\n}\n\n// ============================================================================\n// Main Function\n// ============================================================================\n\nexport async function callWithContinuation<T>(\n options: ContinuationOptions<T>,\n): Promise<ContinuationResult<T>> {\n const {\n client,\n systemPrompt,\n userPrompt,\n schema,\n maxTokens = DEFAULT_MAX_TOKENS,\n maxContinuations = DEFAULT_MAX_CONTINUATIONS,\n buildContinuationPrompt,\n continuationSystemPrompt = DEFAULT_CONTINUATION_SYSTEM_PROMPT,\n } = options;\n\n let rawResponse = '';\n let continuationCount = 0;\n const warnings: string[] = [];\n let wasSalvaged = false;\n\n console.log('[Continuation] Starting LLM call with continuation support');\n console.log(\n `[Continuation] Max tokens: ${maxTokens}, Max continuations: ${maxContinuations}`,\n );\n\n try {\n const response = await client.callRawWithMetadata({\n systemPrompt,\n userPrompt,\n maxTokens,\n });\n\n rawResponse = extractJsonFromText(response.raw) || response.raw;\n\n console.log(\n `[Continuation] Initial response: ${rawResponse.length} chars, finish_reason: ${response.finishReason}`,\n );\n\n let truncation = detectTruncation(rawResponse, response.finishReason);\n\n while (truncation.isTruncated && continuationCount < maxContinuations) {\n continuationCount++;\n const warningMsg = `Response truncated (${truncation.reason}), continuing (attempt ${continuationCount}/${maxContinuations})`;\n console.log(`[Continuation] ${warningMsg}`);\n warnings.push(warningMsg);\n\n const contPrompt = buildContinuationPrompt(\n rawResponse,\n continuationCount,\n );\n\n const contResponse = await client.callRawWithMetadata({\n systemPrompt: continuationSystemPrompt,\n userPrompt: contPrompt,\n maxTokens,\n });\n\n console.log(\n `[Continuation] Continuation response: ${contResponse.raw.length} chars, finish_reason: ${contResponse.finishReason}`,\n );\n\n const cleanedContResponse =\n extractJsonFromText(contResponse.raw) || contResponse.raw;\n rawResponse = mergeResponses(rawResponse, cleanedContResponse);\n\n truncation = detectTruncation(rawResponse, contResponse.finishReason);\n }\n\n if (\n continuationCount >= maxContinuations &&\n truncation.isTruncated\n ) {\n console.warn(\n `[Continuation] Reached max continuations (${maxContinuations}), attempting to salvage...`,\n );\n warnings.push(\n `Reached max continuations - some content may be incomplete`,\n );\n wasSalvaged = true;\n }\n\n const cleanedResponse =\n extractJsonFromText(rawResponse) || rawResponse;\n let data: T;\n\n try {\n if (isValidJson(cleanedResponse)) {\n data = JSON.parse(cleanedResponse) as T;\n } else {\n const closed = autoCloseJson(cleanedResponse);\n data = JSON.parse(closed) as T;\n if (!wasSalvaged) {\n warnings.push('Response required auto-closing of JSON brackets');\n }\n }\n } catch (parseError) {\n const salvaged = salvagePartialResponse<T>(cleanedResponse);\n if (salvaged) {\n data = salvaged;\n wasSalvaged = true;\n warnings.push('Response was salvaged from partial data');\n } else {\n throw new Error(\n `Failed to parse response after ${continuationCount} continuations: ${parseError}`,\n );\n }\n }\n\n if (schema) {\n try {\n data = schema.parse(data);\n } catch (validationError) {\n console.warn(\n '[Continuation] Schema validation failed:',\n validationError,\n );\n warnings.push(`Schema validation issue: ${validationError}`);\n }\n }\n\n console.log(\n `[Continuation] Complete. Continuations: ${continuationCount}, Warnings: ${warnings.length}`,\n );\n\n return {\n data,\n raw: rawResponse,\n continuationCount,\n warnings,\n wasSalvaged,\n };\n } catch (error) {\n console.error('[Continuation] Error during LLM call:', error);\n throw error;\n }\n}\n\nexport function buildGenericContinuationPrompt(\n context: string,\n partialResponse: string,\n attempt: number,\n maxAttempts: number = DEFAULT_MAX_CONTINUATIONS,\n): string {\n return `## CONTINUATION REQUEST (Attempt ${attempt}/${maxAttempts})\n\nYour previous response was truncated. Continue generating from where you left off.\n\n### ORIGINAL CONTEXT\n${context}\n\n### WHAT YOU GENERATED SO FAR\n\\`\\`\\`json\n${partialResponse}\n\\`\\`\\`\n\n### INSTRUCTIONS\n1. Continue from EXACTLY where the response was cut off\n2. Do NOT repeat any content already generated\n3. Complete the JSON structure properly\n4. Do NOT wrap your response in markdown code blocks\n\nContinue generating now:`;\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAmCO,SAAS,iBACd,UACA,cACkB;AAClB,MAAI,iBAAiB,UAAU;AAC7B,UAAMA,eAAc,cAAc,QAAQ;AAC1C,WAAO;AAAA,MACL,aAAa;AAAA,MACb,QAAQ;AAAA,MACR,gBAAgB;AAAA,MAChB,qBAAqB,wBAAwB,QAAQ;AAAA,MACrD,sBAAsBA,aAAY;AAAA,MAClC,oBAAoBA,aAAY;AAAA,IAClC;AAAA,EACF;AAEA,MAAI;AACF,SAAK,MAAM,QAAQ;AACnB,WAAO,EAAE,aAAa,OAAO,QAAQ,OAAO;AAAA,EAC9C,QAAQ;AAAA,EAER;AAEA,MAAI,iBAAiB,UAAU,iBAAiB,MAAM;AACpD,UAAM,UAAU,SAAS,KAAK;AAE9B,UAAM,eACJ,QAAQ,SAAS,GAAG,KACpB,QAAQ,SAAS,GAAG,KACpB,QAAQ,SAAS,KAAK,KACtB,QAAQ,KAAK,OAAO,KACpB,QAAQ,KAAK,OAAO;AAEtB,QAAI,cAAc;AAChB,YAAMA,eAAc,cAAc,QAAQ;AAC1C,aAAO;AAAA,QACL,aAAa;AAAA,QACb,QAAQ;AAAA,QACR,gBAAgB;AAAA,QAChB,qBAAqB,wBAAwB,QAAQ;AAAA,QACrD,sBAAsBA,aAAY;AAAA,QAClC,oBAAoBA,aAAY;AAAA,MAClC;AAAA,IACF;AAEA,QAAI;AACF,YAAM,SAAS,cAAc,OAAO;AACpC,WAAK,MAAM,MAAM;AACjB,aAAO,EAAE,aAAa,OAAO,QAAQ,OAAO;AAAA,IAC9C,QAAQ;AACN,aAAO,EAAE,aAAa,OAAO,QAAQ,OAAO;AAAA,IAC9C;AAAA,EACF;AAEA,QAAM,cAAc,cAAc,QAAQ;AAC1C,MACE,YAAY,uBAAuB,KACnC,YAAY,qBAAqB,GACjC;AACA,WAAO;AAAA,MACL,aAAa;AAAA,MACb,QAAQ;AAAA,MACR,gBAAgB;AAAA,MAChB,qBAAqB,wBAAwB,QAAQ;AAAA,MACrD,sBAAsB,YAAY;AAAA,MAClC,oBAAoB,YAAY;AAAA,IAClC;AAAA,EACF;AAEA,SAAO,EAAE,aAAa,OAAO,QAAQ,OAAO;AAC9C;AAMA,SAAS,cAAc,MAOrB;AACA,MAAI,WAAW;AACf,MAAI,UAAU;AACd,MAAI,eAAe;AACnB,MAAI,gBAAgB;AACpB,MAAI,aAAa;AACjB,MAAI,cAAc;AAElB,aAAW,QAAQ,MAAM;AACvB,QAAI,SAAS;AACX,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,QAAQ,UAAU;AAC7B,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,KAAK;AAChB,iBAAW,CAAC;AACZ;AAAA,IACF;AACA,QAAI,SAAU;AAEd,YAAQ,MAAM;AAAA,MACZ,KAAK;AACH;AACA;AAAA,MACF,KAAK;AACH;AACA;AAAA,MACF,KAAK;AACH;AACA;AAAA,MACF,KAAK;AACH;AACA;AAAA,IACJ;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,sBAAsB,KAAK,IAAI,GAAG,eAAe,aAAa;AAAA,IAC9D,oBAAoB,KAAK,IAAI,GAAG,aAAa,WAAW;AAAA,EAC1D;AACF;AAEO,SAAS,wBAAwB,MAA8B;AACpE,QAAM,aAAa,cAAc,IAAI;AACrC,MAAI;AACF,WAAO,KAAK,MAAM,UAAU;AAAA,EAC9B,QAAQ;AAAA,EAER;AAEA,QAAM,UAAU,KAAK,KAAK;AAE1B,MAAI,QAAQ,WAAW,GAAG,GAAG;AAC3B,UAAM,oBAAoB,6BAA6B,OAAO;AAC9D,QAAI,oBAAoB,GAAG;AACzB,YAAM,SAAS,QAAQ,UAAU,GAAG,iBAAiB,IAAI;AACzD,UAAI;AACF,eAAO,KAAK,MAAM,MAAM;AAAA,MAC1B,QAAQ;AAAA,MAER;AAAA,IACF;AAAA,EACF;AAEA,MAAI,QAAQ,WAAW,GAAG,GAAG;AAC3B,UAAM,SAAS,cAAc,OAAO;AACpC,QAAI;AACF,aAAO,KAAK,MAAM,MAAM;AAAA,IAC1B,QAAQ;AACN,YAAM,oBAAoB,+BAA+B,OAAO;AAChE,UAAI,oBAAoB,GAAG;AACzB,cAAM,SAAS,QAAQ,UAAU,GAAG,iBAAiB,IAAI;AACzD,YAAI;AACF,iBAAO,KAAK,MAAM,MAAM;AAAA,QAC1B,QAAQ;AAAA,QAER;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;AAEA,SAAS,6BAA6B,MAAsB;AAC1D,MAAI,QAAQ;AACZ,MAAI,WAAW;AACf,MAAI,UAAU;AACd,MAAI,yBAAyB;AAE7B,WAAS,IAAI,GAAG,IAAI,KAAK,QAAQ,KAAK;AACpC,UAAM,OAAO,KAAK,CAAC;AAEnB,QAAI,SAAS;AACX,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,QAAQ,UAAU;AAC7B,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,KAAK;AAChB,iBAAW,CAAC;AACZ;AAAA,IACF;AACA,QAAI,SAAU;AAEd,QAAI,SAAS,OAAO,SAAS,KAAK;AAChC;AAAA,IACF,WAAW,SAAS,OAAO,SAAS,KAAK;AACvC;AACA,UAAI,UAAU,GAAG;AACf,iCAAyB,IAAI;AAAA,MAC/B;AAAA,IACF,WAAW,SAAS,OAAO,UAAU,GAAG;AACtC,+BAAyB;AAAA,IAC3B;AAAA,EACF;AAEA,SAAO,yBAAyB,IAAI,yBAAyB;AAC/D;AAEA,SAAS,+BAA+B,MAAsB;AAC5D,MAAI,QAAQ;AACZ,MAAI,WAAW;AACf,MAAI,UAAU;AACd,MAAI,iBAAiB;AAErB,WAAS,IAAI,GAAG,IAAI,KAAK,QAAQ,KAAK;AACpC,UAAM,OAAO,KAAK,CAAC;AAEnB,QAAI,SAAS;AACX,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,QAAQ,UAAU;AAC7B,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,KAAK;AAChB,iBAAW,CAAC;AACZ;AAAA,IACF;AACA,QAAI,SAAU;AAEd,QAAI,SAAS,OAAO,SAAS,KAAK;AAChC;AAAA,IACF,WAAW,SAAS,OAAO,SAAS,KAAK;AACvC;AAAA,IACF,WAAW,SAAS,OAAO,UAAU,GAAG;AACtC,uBAAiB;AAAA,IACnB;AAAA,EACF;AAEA,SAAO,iBAAiB,IAAI,iBAAiB;AAC/C;AAEO,SAAS,kBAAkB,SAA0B;AAC1D,QAAM,UAAU,QAAQ,KAAK;AAC7B,MAAI,CAAC,QAAS,QAAO;AAErB,QAAM,WAAW,cAAc,OAAO;AACtC,MACE,SAAS,uBAAuB,KAChC,SAAS,qBAAqB,GAC9B;AACA,WAAO;AAAA,EACT;AAEA,QAAM,gBAAgB;AAAA,IACpB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAEA,aAAW,WAAW,eAAe;AACnC,QAAI,QAAQ,KAAK,OAAO,EAAG,QAAO;AAAA,EACpC;AAEA,SAAO;AACT;;;ACnQA,IAAM,qBAAqB;AAC3B,IAAM,4BAA4B;AAMlC,IAAM,qCAAqC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAapC,SAAS,eACd,UACA,cACQ;AACR,QAAM,cAAc,SAAS,QAAQ;AACrC,QAAM,cAAc,aAAa,UAAU;AAE3C,MAAI,cAAc,YACf,QAAQ,iBAAiB,EAAE,EAC3B,QAAQ,YAAY,EAAE,EACtB,KAAK;AAER,MAAI,YAAY,WAAW,GAAG,GAAG;AAC/B,QAAI;AACF,YAAM,aAAa,KAAK,MAAM,cAAc,WAAW,CAAC;AACxD,YAAM,OAAO,OAAO,KAAK,UAAU;AACnC,UAAI,KAAK,WAAW,KAAK,MAAM,QAAQ,WAAW,KAAK,CAAC,CAAC,CAAC,GAAG;AAC3D,sBAAc,WAAW,KAAK,CAAC,CAAC,EAC7B,IAAI,CAAC,SAAkB,KAAK,UAAU,IAAI,CAAC,EAC3C,KAAK,KAAK;AAAA,MACf;AAAA,IACF,QAAQ;AAAA,IAER;AAAA,EACF;AAEA,MAAI,YAAY,WAAW,GAAG,KAAK,YAAY,WAAW,GAAG,GAAG;AAC9D,WAAO,cAAc;AAAA,EACvB;AAEA,QAAM,oBAAoB,cAAc,KAAK,WAAW;AACxD,QAAM,sBAAsB,YAAY,KAAK,WAAW;AAExD,MAAI,qBAAqB,qBAAqB;AAC5C,WAAO,cAAc,QAAQ;AAAA,EAC/B;AAEA,SAAO,cAAc;AACvB;AAEO,SAAS,uBAA0B,aAA+B;AACvE,UAAQ,KAAK,uDAAuD;AAEpE,MAAI;AACF,UAAM,kBAAkB,oBAAoB,WAAW,KAAK;AAC5D,UAAM,SAAS,cAAc,eAAe;AAC5C,UAAM,SAAS,KAAK,MAAM,MAAM;AAChC,YAAQ,IAAI,uDAAuD;AACnE,WAAO;AAAA,EACT,SAAS,OAAO;AACd,YAAQ,MAAM,8CAA8C,KAAK;AAAA,EACnE;AAEA,SAAO;AACT;AAMA,eAAsB,qBACpB,SACgC;AAChC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,YAAY;AAAA,IACZ,mBAAmB;AAAA,IACnB;AAAA,IACA,2BAA2B;AAAA,EAC7B,IAAI;AAEJ,MAAI,cAAc;AAClB,MAAI,oBAAoB;AACxB,QAAM,WAAqB,CAAC;AAC5B,MAAI,cAAc;AAElB,UAAQ,IAAI,4DAA4D;AACxE,UAAQ;AAAA,IACN,8BAA8B,SAAS,wBAAwB,gBAAgB;AAAA,EACjF;AAEA,MAAI;AACF,UAAM,WAAW,MAAM,OAAO,oBAAoB;AAAA,MAChD;AAAA,MACA;AAAA,MACA;AAAA,IACF,CAAC;AAED,kBAAc,oBAAoB,SAAS,GAAG,KAAK,SAAS;AAE5D,YAAQ;AAAA,MACN,oCAAoC,YAAY,MAAM,0BAA0B,SAAS,YAAY;AAAA,IACvG;AAEA,QAAI,aAAa,iBAAiB,aAAa,SAAS,YAAY;AAEpE,WAAO,WAAW,eAAe,oBAAoB,kBAAkB;AACrE;AACA,YAAM,aAAa,uBAAuB,WAAW,MAAM,0BAA0B,iBAAiB,IAAI,gBAAgB;AAC1H,cAAQ,IAAI,kBAAkB,UAAU,EAAE;AAC1C,eAAS,KAAK,UAAU;AAExB,YAAM,aAAa;AAAA,QACjB;AAAA,QACA;AAAA,MACF;AAEA,YAAM,eAAe,MAAM,OAAO,oBAAoB;AAAA,QACpD,cAAc;AAAA,QACd,YAAY;AAAA,QACZ;AAAA,MACF,CAAC;AAED,cAAQ;AAAA,QACN,yCAAyC,aAAa,IAAI,MAAM,0BAA0B,aAAa,YAAY;AAAA,MACrH;AAEA,YAAM,sBACJ,oBAAoB,aAAa,GAAG,KAAK,aAAa;AACxD,oBAAc,eAAe,aAAa,mBAAmB;AAE7D,mBAAa,iBAAiB,aAAa,aAAa,YAAY;AAAA,IACtE;AAEA,QACE,qBAAqB,oBACrB,WAAW,aACX;AACA,cAAQ;AAAA,QACN,6CAA6C,gBAAgB;AAAA,MAC/D;AACA,eAAS;AAAA,QACP;AAAA,MACF;AACA,oBAAc;AAAA,IAChB;AAEA,UAAM,kBACJ,oBAAoB,WAAW,KAAK;AACtC,QAAI;AAEJ,QAAI;AACF,UAAI,YAAY,eAAe,GAAG;AAChC,eAAO,KAAK,MAAM,eAAe;AAAA,MACnC,OAAO;AACL,cAAM,SAAS,cAAc,eAAe;AAC5C,eAAO,KAAK,MAAM,MAAM;AACxB,YAAI,CAAC,aAAa;AAChB,mBAAS,KAAK,iDAAiD;AAAA,QACjE;AAAA,MACF;AAAA,IACF,SAAS,YAAY;AACnB,YAAM,WAAW,uBAA0B,eAAe;AAC1D,UAAI,UAAU;AACZ,eAAO;AACP,sBAAc;AACd,iBAAS,KAAK,yCAAyC;AAAA,MACzD,OAAO;AACL,cAAM,IAAI;AAAA,UACR,kCAAkC,iBAAiB,mBAAmB,UAAU;AAAA,QAClF;AAAA,MACF;AAAA,IACF;AAEA,QAAI,QAAQ;AACV,UAAI;AACF,eAAO,OAAO,MAAM,IAAI;AAAA,MAC1B,SAAS,iBAAiB;AACxB,gBAAQ;AAAA,UACN;AAAA,UACA;AAAA,QACF;AACA,iBAAS,KAAK,4BAA4B,eAAe,EAAE;AAAA,MAC7D;AAAA,IACF;AAEA,YAAQ;AAAA,MACN,2CAA2C,iBAAiB,eAAe,SAAS,MAAM;AAAA,IAC5F;AAEA,WAAO;AAAA,MACL;AAAA,MACA,KAAK;AAAA,MACL;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,EACF,SAAS,OAAO;AACd,YAAQ,MAAM,yCAAyC,KAAK;AAC5D,UAAM;AAAA,EACR;AACF;AAEO,SAAS,+BACd,SACA,iBACA,SACA,cAAsB,2BACd;AACR,SAAO,oCAAoC,OAAO,IAAI,WAAW;AAAA;AAAA;AAAA;AAAA;AAAA,EAKjE,OAAO;AAAA;AAAA;AAAA;AAAA,EAIP,eAAe;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAUjB;","names":["bracketInfo"]}
|
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Masar Provider
|
|
3
|
+
*
|
|
4
|
+
* Thin HTTP client for the Masar neural pipeline server.
|
|
5
|
+
* Exposes generate, GFlowNet generation, error prediction,
|
|
6
|
+
* edit ranking, and health-check endpoints.
|
|
7
|
+
*
|
|
8
|
+
* Reads `MASAR_URL` from environment (default: http://localhost:8080).
|
|
9
|
+
*
|
|
10
|
+
* @packageDocumentation
|
|
11
|
+
*/
|
|
12
|
+
interface MasarGenerateOptions {
|
|
13
|
+
/** Model override (server decides default if omitted). */
|
|
14
|
+
model?: string;
|
|
15
|
+
/** Sampling temperature. */
|
|
16
|
+
temperature?: number;
|
|
17
|
+
/** Maximum tokens to generate. */
|
|
18
|
+
maxTokens?: number;
|
|
19
|
+
}
|
|
20
|
+
interface MasarGenerateResult {
|
|
21
|
+
text: string;
|
|
22
|
+
usage: {
|
|
23
|
+
promptTokens: number;
|
|
24
|
+
completionTokens: number;
|
|
25
|
+
totalTokens: number;
|
|
26
|
+
};
|
|
27
|
+
}
|
|
28
|
+
interface GoalSpec {
|
|
29
|
+
/** Natural-language description of the desired application. */
|
|
30
|
+
description: string;
|
|
31
|
+
/** Target entities (e.g. ["User", "Product", "Order"]). */
|
|
32
|
+
entities?: string[];
|
|
33
|
+
/** Domain hint (e.g. "e-commerce", "healthcare"). */
|
|
34
|
+
domain?: string;
|
|
35
|
+
/** Additional constraints passed to the GFlowNet sampler. */
|
|
36
|
+
constraints?: Record<string, unknown>;
|
|
37
|
+
}
|
|
38
|
+
interface GFlowNetResult {
|
|
39
|
+
/** Generated .orb schema text. */
|
|
40
|
+
schema: string;
|
|
41
|
+
/** Log-probability of the sampled trajectory. */
|
|
42
|
+
logProb: number;
|
|
43
|
+
/** Number of sampling steps taken. */
|
|
44
|
+
steps: number;
|
|
45
|
+
}
|
|
46
|
+
interface ErrorPrediction {
|
|
47
|
+
/** Line number (1-based) where the error is predicted. */
|
|
48
|
+
line: number;
|
|
49
|
+
/** Predicted error category. */
|
|
50
|
+
category: string;
|
|
51
|
+
/** Human-readable description. */
|
|
52
|
+
message: string;
|
|
53
|
+
/** Confidence score in [0, 1]. */
|
|
54
|
+
confidence: number;
|
|
55
|
+
}
|
|
56
|
+
interface PredictErrorsResult {
|
|
57
|
+
errors: ErrorPrediction[];
|
|
58
|
+
}
|
|
59
|
+
interface RankedEdit {
|
|
60
|
+
/** The proposed replacement text. */
|
|
61
|
+
edit: string;
|
|
62
|
+
/** Score assigned by the ranker (higher is better). */
|
|
63
|
+
score: number;
|
|
64
|
+
/** Which error this edit addresses. */
|
|
65
|
+
targetError: string;
|
|
66
|
+
}
|
|
67
|
+
interface RankEditsResult {
|
|
68
|
+
edits: RankedEdit[];
|
|
69
|
+
}
|
|
70
|
+
interface MasarHealthResult {
|
|
71
|
+
status: string;
|
|
72
|
+
version?: string;
|
|
73
|
+
uptime?: number;
|
|
74
|
+
}
|
|
75
|
+
interface MasarProviderOptions {
|
|
76
|
+
/** Base URL of the Masar server. Overrides MASAR_URL env var. */
|
|
77
|
+
baseUrl?: string;
|
|
78
|
+
/** Request timeout in milliseconds (default: 30 000). */
|
|
79
|
+
timeoutMs?: number;
|
|
80
|
+
}
|
|
81
|
+
declare class MasarError extends Error {
|
|
82
|
+
readonly statusCode: number;
|
|
83
|
+
readonly responseBody: string;
|
|
84
|
+
constructor(message: string, statusCode: number, responseBody: string);
|
|
85
|
+
}
|
|
86
|
+
declare class MasarProvider {
|
|
87
|
+
private readonly baseUrl;
|
|
88
|
+
private readonly timeoutMs;
|
|
89
|
+
constructor(options?: MasarProviderOptions);
|
|
90
|
+
/**
|
|
91
|
+
* Generate text from a prompt.
|
|
92
|
+
*
|
|
93
|
+
* POST /generate
|
|
94
|
+
*/
|
|
95
|
+
generate(prompt: string, options?: MasarGenerateOptions): Promise<MasarGenerateResult>;
|
|
96
|
+
/**
|
|
97
|
+
* Generate a .orb schema via GFlowNet sampling.
|
|
98
|
+
*
|
|
99
|
+
* POST /generate/gflownet
|
|
100
|
+
*/
|
|
101
|
+
generateGFlowNet(goal: GoalSpec): Promise<GFlowNetResult>;
|
|
102
|
+
/**
|
|
103
|
+
* Predict validation errors in a .orb schema before compilation.
|
|
104
|
+
*
|
|
105
|
+
* POST /predict-errors
|
|
106
|
+
*/
|
|
107
|
+
predictErrors(schema: string): Promise<PredictErrorsResult>;
|
|
108
|
+
/**
|
|
109
|
+
* Rank candidate edits for fixing errors in a .orb schema.
|
|
110
|
+
*
|
|
111
|
+
* POST /rank-edits
|
|
112
|
+
*/
|
|
113
|
+
rankEdits(schema: string, errors: string[]): Promise<RankEditsResult>;
|
|
114
|
+
/**
|
|
115
|
+
* Check server health.
|
|
116
|
+
*
|
|
117
|
+
* GET /health
|
|
118
|
+
*/
|
|
119
|
+
health(): Promise<MasarHealthResult>;
|
|
120
|
+
private post;
|
|
121
|
+
private get;
|
|
122
|
+
private request;
|
|
123
|
+
}
|
|
124
|
+
declare function getMasarProvider(options?: MasarProviderOptions): MasarProvider;
|
|
125
|
+
declare function resetMasarProvider(): void;
|
|
126
|
+
|
|
127
|
+
export { type ErrorPrediction, type GFlowNetResult, type GoalSpec, MasarError, type MasarGenerateOptions, type MasarGenerateResult, type MasarHealthResult, MasarProvider, type MasarProviderOptions, type PredictErrorsResult, type RankEditsResult, type RankedEdit, getMasarProvider, resetMasarProvider };
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":[],"sourcesContent":[],"mappings":"","names":[]}
|
|
@@ -95,4 +95,4 @@ declare class RateLimiter {
|
|
|
95
95
|
declare function getGlobalRateLimiter(options?: RateLimiterOptions): RateLimiter;
|
|
96
96
|
declare function resetGlobalRateLimiter(): void;
|
|
97
97
|
|
|
98
|
-
export {
|
|
98
|
+
export { type RateLimiterOptions as R, type TokenUsage as T, RateLimiter as a, TokenTracker as b, getGlobalTokenTracker as c, resetGlobalTokenTracker as d, getGlobalRateLimiter as g, resetGlobalRateLimiter as r };
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@almadar/llm",
|
|
3
|
-
"version": "2.
|
|
3
|
+
"version": "2.3.0",
|
|
4
4
|
"description": "Multi-provider LLM client with rate limiting, token tracking, structured outputs, and continuation handling",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "./dist/index.js",
|
|
@@ -21,6 +21,10 @@
|
|
|
21
21
|
"./structured-output": {
|
|
22
22
|
"types": "./dist/structured-output.d.ts",
|
|
23
23
|
"import": "./dist/structured-output.js"
|
|
24
|
+
},
|
|
25
|
+
"./providers": {
|
|
26
|
+
"types": "./dist/providers/index.d.ts",
|
|
27
|
+
"import": "./dist/providers/index.js"
|
|
24
28
|
}
|
|
25
29
|
},
|
|
26
30
|
"files": [
|
package/src/client.ts
CHANGED
|
@@ -232,7 +232,7 @@ export const OPENROUTER_MODELS = {
|
|
|
232
232
|
QWEN_2_5_72B: 'qwen/qwen-2.5-72b-instruct',
|
|
233
233
|
QWEN_2_5_CODER_32B: 'qwen/qwen-2.5-coder-32b-instruct',
|
|
234
234
|
QWEN_3_235B: 'qwen/qwen3-235b-a22b',
|
|
235
|
-
|
|
235
|
+
|
|
236
236
|
// Llama models - agentic workhorses
|
|
237
237
|
LLAMA_3_3_70B: 'meta-llama/llama-3.3-70b-instruct',
|
|
238
238
|
LLAMA_3_1_405B: 'meta-llama/llama-3.1-405b-instruct',
|
|
@@ -241,6 +241,9 @@ export const OPENROUTER_MODELS = {
|
|
|
241
241
|
|
|
242
242
|
// Kimi models - strong reasoning
|
|
243
243
|
KIMI_K2: 'moonshotai/kimi-k2',
|
|
244
|
+
|
|
245
|
+
// Zhipu GLM models - via OpenRouter
|
|
246
|
+
GLM_4_7: 'z-ai/glm-4.7',
|
|
244
247
|
} as const;
|
|
245
248
|
|
|
246
249
|
const DEFAULT_TEMPERATURE = 0.3;
|
|
@@ -1041,3 +1044,13 @@ export function createOpenRouterClient(
|
|
|
1041
1044
|
...options,
|
|
1042
1045
|
});
|
|
1043
1046
|
}
|
|
1047
|
+
|
|
1048
|
+
export function createZhipuClient(
|
|
1049
|
+
options?: Partial<Omit<LLMClientOptions, 'provider'>>,
|
|
1050
|
+
): LLMClient {
|
|
1051
|
+
return new LLMClient({
|
|
1052
|
+
provider: 'openrouter',
|
|
1053
|
+
model: OPENROUTER_MODELS.GLM_4_7,
|
|
1054
|
+
...options,
|
|
1055
|
+
});
|
|
1056
|
+
}
|
package/src/index.ts
CHANGED
|
@@ -19,6 +19,7 @@ export {
|
|
|
19
19
|
createAnthropicClient,
|
|
20
20
|
createKimiClient,
|
|
21
21
|
createOpenRouterClient,
|
|
22
|
+
createZhipuClient,
|
|
22
23
|
getAvailableProvider,
|
|
23
24
|
isProviderAvailable,
|
|
24
25
|
DEEPSEEK_MODELS,
|
|
@@ -94,3 +95,20 @@ export {
|
|
|
94
95
|
type LLMServiceActions,
|
|
95
96
|
type LLMServiceContract,
|
|
96
97
|
} from './contracts.js';
|
|
98
|
+
|
|
99
|
+
export {
|
|
100
|
+
MasarProvider,
|
|
101
|
+
MasarError,
|
|
102
|
+
getMasarProvider,
|
|
103
|
+
resetMasarProvider,
|
|
104
|
+
type MasarProviderOptions,
|
|
105
|
+
type MasarGenerateOptions,
|
|
106
|
+
type MasarGenerateResult,
|
|
107
|
+
type GoalSpec,
|
|
108
|
+
type GFlowNetResult,
|
|
109
|
+
type ErrorPrediction,
|
|
110
|
+
type PredictErrorsResult,
|
|
111
|
+
type RankedEdit,
|
|
112
|
+
type RankEditsResult,
|
|
113
|
+
type MasarHealthResult,
|
|
114
|
+
} from './providers/index.js';
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LLM Providers
|
|
3
|
+
*
|
|
4
|
+
* Standalone HTTP clients for self-hosted model servers.
|
|
5
|
+
*
|
|
6
|
+
* @packageDocumentation
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
export {
|
|
10
|
+
MasarProvider,
|
|
11
|
+
MasarError,
|
|
12
|
+
getMasarProvider,
|
|
13
|
+
resetMasarProvider,
|
|
14
|
+
type MasarProviderOptions,
|
|
15
|
+
type MasarGenerateOptions,
|
|
16
|
+
type MasarGenerateResult,
|
|
17
|
+
type GoalSpec,
|
|
18
|
+
type GFlowNetResult,
|
|
19
|
+
type ErrorPrediction,
|
|
20
|
+
type PredictErrorsResult,
|
|
21
|
+
type RankedEdit,
|
|
22
|
+
type RankEditsResult,
|
|
23
|
+
type MasarHealthResult,
|
|
24
|
+
} from './masar.js';
|
|
@@ -0,0 +1,272 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Masar Provider
|
|
3
|
+
*
|
|
4
|
+
* Thin HTTP client for the Masar neural pipeline server.
|
|
5
|
+
* Exposes generate, GFlowNet generation, error prediction,
|
|
6
|
+
* edit ranking, and health-check endpoints.
|
|
7
|
+
*
|
|
8
|
+
* Reads `MASAR_URL` from environment (default: http://localhost:8080).
|
|
9
|
+
*
|
|
10
|
+
* @packageDocumentation
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
// ============================================================================
|
|
14
|
+
// Types
|
|
15
|
+
// ============================================================================
|
|
16
|
+
|
|
17
|
+
export interface MasarGenerateOptions {
|
|
18
|
+
/** Model override (server decides default if omitted). */
|
|
19
|
+
model?: string;
|
|
20
|
+
/** Sampling temperature. */
|
|
21
|
+
temperature?: number;
|
|
22
|
+
/** Maximum tokens to generate. */
|
|
23
|
+
maxTokens?: number;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
export interface MasarGenerateResult {
|
|
27
|
+
text: string;
|
|
28
|
+
usage: {
|
|
29
|
+
promptTokens: number;
|
|
30
|
+
completionTokens: number;
|
|
31
|
+
totalTokens: number;
|
|
32
|
+
};
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
export interface GoalSpec {
|
|
36
|
+
/** Natural-language description of the desired application. */
|
|
37
|
+
description: string;
|
|
38
|
+
/** Target entities (e.g. ["User", "Product", "Order"]). */
|
|
39
|
+
entities?: string[];
|
|
40
|
+
/** Domain hint (e.g. "e-commerce", "healthcare"). */
|
|
41
|
+
domain?: string;
|
|
42
|
+
/** Additional constraints passed to the GFlowNet sampler. */
|
|
43
|
+
constraints?: Record<string, unknown>;
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
export interface GFlowNetResult {
|
|
47
|
+
/** Generated .orb schema text. */
|
|
48
|
+
schema: string;
|
|
49
|
+
/** Log-probability of the sampled trajectory. */
|
|
50
|
+
logProb: number;
|
|
51
|
+
/** Number of sampling steps taken. */
|
|
52
|
+
steps: number;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
export interface ErrorPrediction {
|
|
56
|
+
/** Line number (1-based) where the error is predicted. */
|
|
57
|
+
line: number;
|
|
58
|
+
/** Predicted error category. */
|
|
59
|
+
category: string;
|
|
60
|
+
/** Human-readable description. */
|
|
61
|
+
message: string;
|
|
62
|
+
/** Confidence score in [0, 1]. */
|
|
63
|
+
confidence: number;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
export interface PredictErrorsResult {
|
|
67
|
+
errors: ErrorPrediction[];
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
export interface RankedEdit {
|
|
71
|
+
/** The proposed replacement text. */
|
|
72
|
+
edit: string;
|
|
73
|
+
/** Score assigned by the ranker (higher is better). */
|
|
74
|
+
score: number;
|
|
75
|
+
/** Which error this edit addresses. */
|
|
76
|
+
targetError: string;
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
export interface RankEditsResult {
|
|
80
|
+
edits: RankedEdit[];
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
export interface MasarHealthResult {
|
|
84
|
+
status: string;
|
|
85
|
+
version?: string;
|
|
86
|
+
uptime?: number;
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
export interface MasarProviderOptions {
|
|
90
|
+
/** Base URL of the Masar server. Overrides MASAR_URL env var. */
|
|
91
|
+
baseUrl?: string;
|
|
92
|
+
/** Request timeout in milliseconds (default: 30 000). */
|
|
93
|
+
timeoutMs?: number;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
// ============================================================================
|
|
97
|
+
// Error
|
|
98
|
+
// ============================================================================
|
|
99
|
+
|
|
100
|
+
export class MasarError extends Error {
|
|
101
|
+
constructor(
|
|
102
|
+
message: string,
|
|
103
|
+
public readonly statusCode: number,
|
|
104
|
+
public readonly responseBody: string,
|
|
105
|
+
) {
|
|
106
|
+
super(message);
|
|
107
|
+
this.name = 'MasarError';
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
// ============================================================================
|
|
112
|
+
// Provider
|
|
113
|
+
// ============================================================================
|
|
114
|
+
|
|
115
|
+
const DEFAULT_BASE_URL = 'https://masar-345008351456.europe-west4.run.app';
|
|
116
|
+
const DEFAULT_TIMEOUT_MS = 30_000;
|
|
117
|
+
|
|
118
|
+
export class MasarProvider {
|
|
119
|
+
private readonly baseUrl: string;
|
|
120
|
+
private readonly timeoutMs: number;
|
|
121
|
+
|
|
122
|
+
constructor(options?: MasarProviderOptions) {
|
|
123
|
+
this.baseUrl = (
|
|
124
|
+
options?.baseUrl ??
|
|
125
|
+
process.env.MASAR_URL ??
|
|
126
|
+
DEFAULT_BASE_URL
|
|
127
|
+
).replace(/\/+$/, '');
|
|
128
|
+
this.timeoutMs = options?.timeoutMs ?? DEFAULT_TIMEOUT_MS;
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
// --------------------------------------------------------------------------
|
|
132
|
+
// Public API
|
|
133
|
+
// --------------------------------------------------------------------------
|
|
134
|
+
|
|
135
|
+
/**
|
|
136
|
+
* Generate text from a prompt.
|
|
137
|
+
*
|
|
138
|
+
* POST /generate
|
|
139
|
+
*/
|
|
140
|
+
async generate(
|
|
141
|
+
prompt: string,
|
|
142
|
+
options?: MasarGenerateOptions,
|
|
143
|
+
): Promise<MasarGenerateResult> {
|
|
144
|
+
return this.post<MasarGenerateResult>('/generate', {
|
|
145
|
+
prompt,
|
|
146
|
+
...options,
|
|
147
|
+
});
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
/**
|
|
151
|
+
* Generate a .orb schema via GFlowNet sampling.
|
|
152
|
+
*
|
|
153
|
+
* POST /generate/gflownet
|
|
154
|
+
*/
|
|
155
|
+
async generateGFlowNet(goal: GoalSpec): Promise<GFlowNetResult> {
|
|
156
|
+
return this.post<GFlowNetResult>('/generate/gflownet', goal);
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
/**
|
|
160
|
+
* Predict validation errors in a .orb schema before compilation.
|
|
161
|
+
*
|
|
162
|
+
* POST /predict-errors
|
|
163
|
+
*/
|
|
164
|
+
async predictErrors(schema: string): Promise<PredictErrorsResult> {
|
|
165
|
+
return this.post<PredictErrorsResult>('/predict-errors', { schema });
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
/**
|
|
169
|
+
* Rank candidate edits for fixing errors in a .orb schema.
|
|
170
|
+
*
|
|
171
|
+
* POST /rank-edits
|
|
172
|
+
*/
|
|
173
|
+
async rankEdits(
|
|
174
|
+
schema: string,
|
|
175
|
+
errors: string[],
|
|
176
|
+
): Promise<RankEditsResult> {
|
|
177
|
+
return this.post<RankEditsResult>('/rank-edits', { schema, errors });
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
/**
|
|
181
|
+
* Check server health.
|
|
182
|
+
*
|
|
183
|
+
* GET /health
|
|
184
|
+
*/
|
|
185
|
+
async health(): Promise<MasarHealthResult> {
|
|
186
|
+
return this.get<MasarHealthResult>('/health');
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
// --------------------------------------------------------------------------
|
|
190
|
+
// Internal helpers
|
|
191
|
+
// --------------------------------------------------------------------------
|
|
192
|
+
|
|
193
|
+
private async post<T>(path: string, body: unknown): Promise<T> {
|
|
194
|
+
return this.request<T>(path, {
|
|
195
|
+
method: 'POST',
|
|
196
|
+
headers: { 'Content-Type': 'application/json' },
|
|
197
|
+
body: JSON.stringify(body),
|
|
198
|
+
});
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
private async get<T>(path: string): Promise<T> {
|
|
202
|
+
return this.request<T>(path, { method: 'GET' });
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
private async request<T>(
|
|
206
|
+
path: string,
|
|
207
|
+
init: RequestInit,
|
|
208
|
+
): Promise<T> {
|
|
209
|
+
const url = `${this.baseUrl}${path}`;
|
|
210
|
+
const controller = new AbortController();
|
|
211
|
+
const timer = setTimeout(() => controller.abort(), this.timeoutMs);
|
|
212
|
+
|
|
213
|
+
try {
|
|
214
|
+
const response = await fetch(url, {
|
|
215
|
+
...init,
|
|
216
|
+
signal: controller.signal,
|
|
217
|
+
});
|
|
218
|
+
|
|
219
|
+
if (!response.ok) {
|
|
220
|
+
const text = await response.text().catch(() => '');
|
|
221
|
+
throw new MasarError(
|
|
222
|
+
`Masar ${init.method} ${path} failed with status ${response.status}`,
|
|
223
|
+
response.status,
|
|
224
|
+
text,
|
|
225
|
+
);
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
return (await response.json()) as T;
|
|
229
|
+
} catch (error) {
|
|
230
|
+
if (error instanceof MasarError) {
|
|
231
|
+
throw error;
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
if (error instanceof DOMException && error.name === 'AbortError') {
|
|
235
|
+
throw new MasarError(
|
|
236
|
+
`Masar ${init.method} ${path} timed out after ${this.timeoutMs}ms`,
|
|
237
|
+
0,
|
|
238
|
+
'',
|
|
239
|
+
);
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
const message =
|
|
243
|
+
error instanceof Error ? error.message : String(error);
|
|
244
|
+
throw new MasarError(
|
|
245
|
+
`Masar ${init.method} ${path} failed: ${message}`,
|
|
246
|
+
0,
|
|
247
|
+
'',
|
|
248
|
+
);
|
|
249
|
+
} finally {
|
|
250
|
+
clearTimeout(timer);
|
|
251
|
+
}
|
|
252
|
+
}
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
// ============================================================================
|
|
256
|
+
// Singleton
|
|
257
|
+
// ============================================================================
|
|
258
|
+
|
|
259
|
+
let sharedInstance: MasarProvider | null = null;
|
|
260
|
+
|
|
261
|
+
export function getMasarProvider(
|
|
262
|
+
options?: MasarProviderOptions,
|
|
263
|
+
): MasarProvider {
|
|
264
|
+
if (!sharedInstance) {
|
|
265
|
+
sharedInstance = new MasarProvider(options);
|
|
266
|
+
}
|
|
267
|
+
return sharedInstance;
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
export function resetMasarProvider(): void {
|
|
271
|
+
sharedInstance = null;
|
|
272
|
+
}
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/client.ts"],"sourcesContent":["/**\n * Shared LLM Client\n *\n * Multi-provider LLM client with:\n * - OpenAI, DeepSeek, Anthropic, and Kimi support\n * - Anthropic prompt caching (CachingChatAnthropic)\n * - Rate limiting and retry logic\n * - Token tracking\n * - Structured output parsing with Zod\n *\n * @packageDocumentation\n */\n\nimport { ChatOpenAI } from '@langchain/openai';\nimport { ChatAnthropic } from '@langchain/anthropic';\nimport type { BaseMessageLike } from '@langchain/core/messages';\nimport Anthropic from '@anthropic-ai/sdk';\nimport { z } from 'zod';\nimport {\n RateLimiter,\n getGlobalRateLimiter,\n type RateLimiterOptions,\n} from './rate-limiter.js';\nimport { TokenTracker, getGlobalTokenTracker } from './token-tracker.js';\nimport { parseJsonResponse } from './json-parser.js';\n\n// ============================================================================\n// Anthropic Cache Control Helper\n// ============================================================================\n\nfunction addCacheControlToSystemMessages(\n messages: Array<{ role: string; content: string }>,\n): BaseMessageLike[] {\n return messages.map((msg) => {\n if (msg.role !== 'system') {\n return msg as BaseMessageLike;\n }\n\n return {\n role: msg.role,\n content: [\n {\n type: 'text' as const,\n text: msg.content,\n cache_control: { type: 'ephemeral' },\n },\n ],\n } as BaseMessageLike;\n });\n}\n\ntype ChatModel = ChatOpenAI | ChatAnthropic;\n\n// ============================================================================\n// Types\n// ============================================================================\n\nexport type LLMProvider = 'openai' | 'deepseek' | 'anthropic' | 'kimi' | 'openrouter';\n\nexport interface ProviderConfig {\n apiKey: string;\n baseUrl?: string;\n defaultModel: string;\n}\n\nexport interface LLMClientOptions {\n provider?: LLMProvider;\n model?: string;\n temperature?: number;\n streaming?: boolean;\n rateLimiter?: RateLimiterOptions;\n useGlobalRateLimiter?: boolean;\n trackTokens?: boolean;\n}\n\nexport interface LLMCallOptions<T = unknown> {\n systemPrompt: string;\n userPrompt: string;\n schema?: z.ZodSchema<T>;\n maxRetries?: number;\n retryWithContext?: boolean;\n maxTokens?: number;\n skipSchemaValidation?: boolean;\n temperature?: number;\n}\n\nexport interface CacheableBlock {\n type: 'text';\n text: string;\n cache_control?: { type: 'ephemeral' };\n}\n\nexport interface CacheAwareLLMCallOptions<T = unknown>\n extends LLMCallOptions<T> {\n systemBlocks?: CacheableBlock[];\n userBlocks?: CacheableBlock[];\n rawText?: boolean;\n}\n\nexport interface LLMUsage {\n promptTokens: number;\n completionTokens: number;\n totalTokens: number;\n}\n\nexport type LLMFinishReason =\n | 'stop'\n | 'length'\n | 'content_filter'\n | 'tool_calls'\n | null;\n\nexport interface LLMResponse<T> {\n data: T;\n raw: string;\n finishReason: LLMFinishReason;\n usage: LLMUsage | null;\n}\n\nexport interface LLMStreamOptions {\n systemPrompt: string;\n messages: Array<{ role: 'system' | 'user' | 'assistant'; content: string }>;\n maxTokens?: number;\n temperature?: number;\n}\n\nexport interface LLMStreamChunk {\n content: string;\n done: boolean;\n}\n\n// ============================================================================\n// Provider Configuration\n// ============================================================================\n\nconst PROVIDER_CONFIGS: Record<LLMProvider, () => ProviderConfig> = {\n openai: () => {\n const apiKey = process.env.OPENAI_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'OPENAI_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return { apiKey, baseUrl: undefined, defaultModel: 'gpt-4o' };\n },\n deepseek: () => {\n const apiKey = process.env.DEEPSEEK_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'DEEPSEEK_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return {\n apiKey,\n baseUrl: 'https://api.deepseek.com/v1',\n defaultModel: 'deepseek-chat',\n };\n },\n anthropic: () => {\n const apiKey = process.env.ANTHROPIC_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'ANTHROPIC_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return {\n apiKey,\n baseUrl: undefined,\n defaultModel: 'claude-sonnet-4-5-20250929',\n };\n },\n kimi: () => {\n const apiKey = process.env.KIMI_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'KIMI_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return {\n apiKey,\n baseUrl: 'https://api.moonshot.ai/v1',\n defaultModel: 'kimi-k2.5',\n };\n },\n openrouter: () => {\n const apiKey = process.env.OPEN_ROUTER_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'OPEN_ROUTER_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return {\n apiKey,\n baseUrl: 'https://openrouter.ai/api/v1',\n defaultModel: 'qwen/qwen-2.5-72b-instruct', // Default to Qwen 2.5\n };\n },\n};\n\nexport const DEEPSEEK_MODELS = {\n CHAT: 'deepseek-chat',\n CODER: 'deepseek-coder',\n REASONER: 'deepseek-reasoner',\n} as const;\n\nexport const OPENAI_MODELS = {\n GPT4O: 'gpt-4o',\n GPT4O_MINI: 'gpt-4o-mini',\n GPT4_TURBO: 'gpt-4-turbo',\n GPT35_TURBO: 'gpt-3.5-turbo',\n GPT_5_1: 'gpt-5.1',\n} as const;\n\nexport const ANTHROPIC_MODELS = {\n CLAUDE_SONNET_4_5: 'claude-sonnet-4-5-20250929',\n CLAUDE_SONNET_4: 'claude-sonnet-4-20250514',\n CLAUDE_OPUS_4_5: 'claude-opus-4-5-20250929',\n CLAUDE_3_5_HAIKU: 'claude-3-5-haiku-20241022',\n} as const;\n\nexport const KIMI_MODELS = {\n K2_5: 'kimi-k2.5',\n} as const;\n\nexport const OPENROUTER_MODELS = {\n // Qwen models - JSON/structured data specialists\n QWEN_2_5_72B: 'qwen/qwen-2.5-72b-instruct',\n QWEN_2_5_CODER_32B: 'qwen/qwen-2.5-coder-32b-instruct',\n QWEN_3_235B: 'qwen/qwen3-235b-a22b',\n \n // Llama models - agentic workhorses\n LLAMA_3_3_70B: 'meta-llama/llama-3.3-70b-instruct',\n LLAMA_3_1_405B: 'meta-llama/llama-3.1-405b-instruct',\n LLAMA_4_MAVERICK: 'meta-llama/llama-4-maverick',\n LLAMA_4_SCOUT: 'meta-llama/llama-4-scout',\n\n // Kimi models - strong reasoning\n KIMI_K2: 'moonshotai/kimi-k2',\n} as const;\n\nconst DEFAULT_TEMPERATURE = 0.3;\n\n// ============================================================================\n// LLM Client\n// ============================================================================\n\nexport class LLMClient {\n private model: ChatModel;\n private rateLimiter: RateLimiter;\n private tokenTracker: TokenTracker | null;\n private modelName: string;\n private provider: LLMProvider;\n private providerConfig: ProviderConfig;\n private temperature: number;\n private streaming: boolean;\n\n constructor(options: LLMClientOptions = {}) {\n this.provider = options.provider || 'openai';\n // Kimi: 0.6 when thinking disabled (our default), 1.0 when thinking enabled\n this.temperature = options.temperature ?? \n (this.provider === 'kimi' ? 0.6 : DEFAULT_TEMPERATURE);\n this.streaming = options.streaming ?? false;\n\n this.providerConfig = PROVIDER_CONFIGS[this.provider]();\n this.modelName = options.model || this.providerConfig.defaultModel;\n\n const keyPreview = this.providerConfig.apiKey.slice(-4);\n console.log(\n `[LLMClient] Provider: ${this.provider}, Model: ${this.modelName}, Key: ****${keyPreview}`,\n );\n if (this.providerConfig.baseUrl) {\n console.log(\n `[LLMClient] Using custom base URL: ${this.providerConfig.baseUrl}`,\n );\n }\n\n this.model = this.createModel();\n\n this.rateLimiter =\n options.useGlobalRateLimiter !== false\n ? getGlobalRateLimiter(options.rateLimiter)\n : new RateLimiter(options.rateLimiter);\n\n this.tokenTracker =\n options.trackTokens !== false\n ? getGlobalTokenTracker(this.modelName)\n : null;\n }\n\n private usesMaxCompletionTokens(): boolean {\n const model = this.modelName.toLowerCase();\n return (\n model.startsWith('o1') ||\n model.startsWith('gpt-5') ||\n model.includes('o1-') ||\n model.includes('o3')\n );\n }\n\n private createModel(options?: {\n maxTokens?: number;\n temperature?: number;\n }): ChatModel {\n const maxTokens = options?.maxTokens;\n const temperature = options?.temperature ?? this.temperature;\n\n if (this.provider === 'anthropic') {\n return new ChatAnthropic({\n apiKey: this.providerConfig.apiKey,\n model: this.modelName,\n temperature,\n streaming: this.streaming,\n maxTokens: maxTokens || 8192,\n callbacks: [\n {\n handleLLMEnd: (output) => {\n const generation = output.generations?.[0]?.[0];\n const usage = (\n generation as unknown as {\n message?: {\n usage_metadata?: {\n cache_creation_input_tokens?: number;\n cache_read_input_tokens?: number;\n input_tokens?: number;\n output_tokens?: number;\n };\n };\n }\n )?.message?.usage_metadata;\n\n if (usage) {\n const cacheCreated = usage.cache_creation_input_tokens ?? 0;\n const cacheRead = usage.cache_read_input_tokens ?? 0;\n const inputTokens = usage.input_tokens ?? 0;\n const outputTokens = usage.output_tokens ?? 0;\n\n if (cacheCreated > 0) {\n console.log(\n `[LLMClient:Anthropic] Cache WRITE: ${cacheCreated} tokens cached`,\n );\n }\n if (cacheRead > 0) {\n const savingsPercent = Math.round(\n (cacheRead / (cacheRead + inputTokens)) * 100,\n );\n console.log(\n `[LLMClient:Anthropic] Cache HIT: ${cacheRead} tokens (~${savingsPercent}% of prompt)`,\n );\n }\n if (cacheCreated === 0 && cacheRead === 0 && inputTokens > 0) {\n if (inputTokens < 500) {\n console.log(\n `[LLMClient:Anthropic] ${inputTokens} input, ${outputTokens} output tokens (likely cached)`,\n );\n } else {\n console.log(\n `[LLMClient:Anthropic] ${inputTokens} input, ${outputTokens} output tokens`,\n );\n }\n }\n }\n },\n },\n ],\n });\n }\n\n const useCompletionTokens = this.usesMaxCompletionTokens();\n\n const tokenConfig = maxTokens\n ? useCompletionTokens\n ? { modelKwargs: { max_completion_tokens: maxTokens } }\n : { maxTokens }\n : {};\n\n const timeout = this.provider === 'deepseek' ? 600000 : undefined;\n\n // Kimi-k2.5: disable thinking to avoid reasoning_content issues with tool calls\n // When thinking is disabled, temperature must be 0.6 (not 1.0)\n const isKimi = this.provider === 'kimi';\n const effectiveTemp = isKimi ? 0.6 : temperature;\n\n // Build modelKwargs incrementally to avoid spread conflicts\n const modelKwargs: Record<string, unknown> = {};\n if (useCompletionTokens && maxTokens) {\n modelKwargs.max_completion_tokens = maxTokens;\n }\n if (isKimi) {\n modelKwargs.thinking = { type: 'disabled' };\n }\n // OpenRouter (Qwen): explicit tool_choice so the model doesn't ignore tool definitions\n if (this.provider === 'openrouter') {\n modelKwargs.tool_choice = 'auto';\n }\n\n return new ChatOpenAI({\n apiKey: this.providerConfig.apiKey,\n model: this.modelName,\n temperature: useCompletionTokens ? undefined : effectiveTemp,\n streaming: this.streaming,\n timeout,\n ...(Object.keys(modelKwargs).length > 0 ? { modelKwargs } : {}),\n ...(useCompletionTokens ? {} : maxTokens ? { maxTokens } : {}),\n configuration: {\n apiKey: this.providerConfig.apiKey,\n ...(this.providerConfig.baseUrl\n ? { baseURL: this.providerConfig.baseUrl }\n : {}),\n },\n });\n }\n\n private getModelWithOptions(options: {\n maxTokens?: number;\n temperature?: number;\n }): ChatModel {\n return this.createModel(options);\n }\n\n getProvider(): LLMProvider {\n return this.provider;\n }\n\n getModelName(): string {\n return this.modelName;\n }\n\n getModel(): ChatModel {\n return this.model;\n }\n\n getRateLimiterStatus() {\n return this.rateLimiter.getStatus();\n }\n\n getTokenUsage() {\n return this.tokenTracker?.getSummary() ?? null;\n }\n\n async call<T>(options: LLMCallOptions<T>): Promise<T> {\n const response = await this.callWithMetadata(options);\n return response.data;\n }\n\n async callWithMetadata<T>(options: LLMCallOptions<T>): Promise<LLMResponse<T>> {\n const {\n systemPrompt,\n userPrompt,\n schema,\n maxRetries = 2,\n retryWithContext = true,\n maxTokens,\n skipSchemaValidation = false,\n temperature,\n } = options;\n\n let currentPrompt = userPrompt;\n let lastError: Error | null = null;\n\n console.log(\n `[LLMClient:call] Starting call to ${this.provider}/${this.modelName}`,\n );\n console.log(`[LLMClient:call] Prompt length: ${userPrompt.length} chars`);\n if (maxTokens) {\n console.log(`[LLMClient:call] Max tokens: ${maxTokens}`);\n }\n\n for (let attempt = 0; attempt <= maxRetries; attempt++) {\n try {\n console.log(\n `[LLMClient:call] Attempt ${attempt + 1}/${maxRetries + 1}...`,\n );\n const attemptStartTime = Date.now();\n\n const result = await this.rateLimiter.execute(async () => {\n console.log(`[LLMClient:call] Invoking model...`);\n const invokeStartTime = Date.now();\n\n const modelToUse =\n maxTokens || temperature !== undefined\n ? this.getModelWithOptions({ maxTokens, temperature })\n : this.model;\n\n const messages = [\n { role: 'system', content: systemPrompt },\n { role: 'user', content: currentPrompt },\n ];\n const response = await modelToUse.invoke(\n this.provider === 'anthropic'\n ? addCacheControlToSystemMessages(messages)\n : messages,\n );\n\n console.log(\n `[LLMClient:call] Model responded in ${Date.now() - invokeStartTime}ms`,\n );\n\n let usage: LLMUsage | null = null;\n if (response.usage_metadata) {\n const usageMeta = response.usage_metadata as {\n input_tokens?: number;\n output_tokens?: number;\n };\n usage = {\n promptTokens: usageMeta.input_tokens || 0,\n completionTokens: usageMeta.output_tokens || 0,\n totalTokens:\n (usageMeta.input_tokens || 0) +\n (usageMeta.output_tokens || 0),\n };\n console.log(\n `[LLMClient:call] Tokens used: ${usage.promptTokens} in, ${usage.completionTokens} out`,\n );\n\n if (this.tokenTracker) {\n this.tokenTracker.addUsage(\n usage.promptTokens,\n usage.completionTokens,\n );\n }\n }\n\n const finishReason = this.extractFinishReason(response);\n if (finishReason === 'length') {\n console.warn(\n `[LLMClient:call] Response truncated (finish_reason=length)`,\n );\n }\n\n const content =\n typeof response.content === 'string'\n ? response.content\n : JSON.stringify(response.content);\n\n console.log(\n `[LLMClient:call] Response length: ${content.length} chars, finish_reason: ${finishReason}`,\n );\n\n return { content, finishReason, usage };\n });\n\n console.log(\n `[LLMClient:call] Attempt ${attempt + 1} completed in ${Date.now() - attemptStartTime}ms, parsing response...`,\n );\n\n const parsed = skipSchemaValidation\n ? (parseJsonResponse(result.content, undefined) as T)\n : parseJsonResponse(result.content, schema);\n console.log(\n `[LLMClient:call] Response parsed successfully${skipSchemaValidation ? ' (schema validation skipped)' : ''}`,\n );\n\n return {\n data: parsed,\n raw: result.content,\n finishReason: result.finishReason,\n usage: result.usage,\n };\n } catch (error) {\n lastError = error instanceof Error ? error : new Error(String(error));\n console.error(\n `[LLMClient:call] Attempt ${attempt + 1} failed:`,\n lastError.message,\n );\n\n if (this.isRateLimitError(lastError)) {\n console.error(`[LLMClient:call] Rate limit error, not retrying`);\n throw lastError;\n }\n\n if (attempt < maxRetries && retryWithContext) {\n console.log(`[LLMClient:call] Will retry with error context`);\n currentPrompt =\n `${userPrompt}\\n\\n` +\n `[Previous attempt failed with: ${lastError.message}]\\n` +\n `Please output valid JSON that matches the expected schema.`;\n }\n }\n }\n\n console.error(`[LLMClient:call] All attempts exhausted, throwing error`);\n throw lastError;\n }\n\n private extractFinishReason(\n response: Awaited<ReturnType<ChatOpenAI['invoke']>>,\n ): LLMFinishReason {\n const metadata = response.response_metadata as\n | Record<string, unknown>\n | undefined;\n if (metadata?.finish_reason) {\n const reason = metadata.finish_reason as string;\n if (\n reason === 'stop' ||\n reason === 'length' ||\n reason === 'content_filter' ||\n reason === 'tool_calls'\n ) {\n return reason;\n }\n }\n return null;\n }\n\n async callRaw(options: {\n systemPrompt: string;\n userPrompt: string;\n maxTokens?: number;\n }): Promise<string> {\n const response = await this.callRawWithMetadata(options);\n return response.raw;\n }\n\n async callRawWithMetadata(options: {\n systemPrompt: string;\n userPrompt: string;\n maxTokens?: number;\n }): Promise<Omit<LLMResponse<string>, 'data'> & { raw: string }> {\n const { systemPrompt, userPrompt, maxTokens } = options;\n\n return this.rateLimiter.execute(async () => {\n const modelToUse = maxTokens\n ? this.getModelWithOptions({ maxTokens })\n : this.model;\n\n const messages = [\n { role: 'system', content: systemPrompt },\n { role: 'user', content: userPrompt },\n ];\n const response = await modelToUse.invoke(\n this.provider === 'anthropic'\n ? addCacheControlToSystemMessages(messages)\n : messages,\n );\n\n let usage: LLMUsage | null = null;\n if (response.usage_metadata) {\n const usageMeta = response.usage_metadata as {\n input_tokens?: number;\n output_tokens?: number;\n };\n usage = {\n promptTokens: usageMeta.input_tokens || 0,\n completionTokens: usageMeta.output_tokens || 0,\n totalTokens:\n (usageMeta.input_tokens || 0) + (usageMeta.output_tokens || 0),\n };\n\n if (this.tokenTracker) {\n this.tokenTracker.addUsage(\n usage.promptTokens,\n usage.completionTokens,\n );\n }\n }\n\n const finishReason = this.extractFinishReason(response);\n const content =\n typeof response.content === 'string'\n ? response.content\n : JSON.stringify(response.content);\n\n return { raw: content, finishReason, usage };\n });\n }\n\n /**\n * Stream a raw text response as an async iterator of content chunks.\n * Uses the underlying LangChain model's .stream() method.\n *\n * @param options - System prompt plus full message history\n * @yields LLMStreamChunk with content deltas and a done flag\n */\n async *streamRaw(options: LLMStreamOptions): AsyncGenerator<LLMStreamChunk> {\n const { messages, maxTokens, temperature } = options;\n\n const modelToUse = (maxTokens || temperature !== undefined)\n ? this.getModelWithOptions({ maxTokens, temperature })\n : this.model;\n\n const langchainMessages = this.provider === 'anthropic'\n ? addCacheControlToSystemMessages(messages)\n : messages;\n\n const stream = await modelToUse.stream(langchainMessages);\n\n for await (const chunk of stream) {\n const content = typeof chunk.content === 'string'\n ? chunk.content\n : Array.isArray(chunk.content)\n ? chunk.content\n .filter((c): c is { type: 'text'; text: string } => typeof c === 'object' && c !== null && 'text' in c)\n .map((c) => c.text)\n .join('')\n : '';\n\n if (content) {\n yield { content, done: false };\n }\n }\n\n yield { content: '', done: true };\n }\n\n private isRateLimitError(error: Error): boolean {\n const message = error.message.toLowerCase();\n return (\n message.includes('rate limit') ||\n message.includes('429') ||\n message.includes('quota exceeded')\n );\n }\n\n // ==========================================================================\n // Anthropic Cache Control Support\n // ==========================================================================\n\n async callWithCache<T>(\n options: CacheAwareLLMCallOptions<T>,\n ): Promise<LLMResponse<T>> {\n const {\n systemPrompt,\n userPrompt,\n systemBlocks,\n userBlocks,\n schema,\n maxRetries = 2,\n maxTokens,\n skipSchemaValidation = false,\n temperature,\n rawText = false,\n } = options;\n\n if (this.provider !== 'anthropic') {\n console.log(\n `[LLMClient:callWithCache] Provider ${this.provider} doesn't support caching, using regular call`,\n );\n return this.callWithMetadata(options);\n }\n\n const cacheableCount =\n (systemBlocks || []).filter((b) => b.cache_control).length +\n (userBlocks || []).filter((b) => b.cache_control).length;\n console.log(\n `[LLMClient:callWithCache] ${cacheableCount} cacheable block(s)`,\n );\n\n let lastError: Error | null = null;\n\n for (let attempt = 0; attempt <= maxRetries; attempt++) {\n try {\n console.log(\n `[LLMClient:callWithCache] Attempt ${attempt + 1}/${maxRetries + 1}...`,\n );\n\n const result = await this.rateLimiter.execute(async () => {\n const anthropic = new Anthropic();\n\n const systemContent =\n systemBlocks && systemBlocks.length > 0\n ? systemBlocks.map((b) => ({\n type: 'text' as const,\n text: b.text,\n ...(b.cache_control\n ? { cache_control: b.cache_control }\n : {}),\n }))\n : systemPrompt\n ? [{ type: 'text' as const, text: systemPrompt }]\n : [];\n\n const userContent =\n userBlocks && userBlocks.length > 0\n ? userBlocks.map((b) => ({\n type: 'text' as const,\n text: b.text,\n ...(b.cache_control\n ? { cache_control: b.cache_control }\n : {}),\n }))\n : userPrompt\n ? [{ type: 'text' as const, text: userPrompt }]\n : [];\n\n const response = await anthropic.messages.create({\n model: this.modelName,\n max_tokens: maxTokens || 8192,\n temperature: temperature ?? 0,\n system: systemContent,\n messages: [{ role: 'user', content: userContent }],\n });\n\n const textContent = response.content.find((c) => c.type === 'text');\n const content =\n textContent && 'text' in textContent ? textContent.text : '';\n\n const apiUsage = response.usage as {\n input_tokens: number;\n output_tokens: number;\n cache_creation_input_tokens?: number;\n cache_read_input_tokens?: number;\n };\n\n const cacheRead = apiUsage.cache_read_input_tokens || 0;\n const cacheCreation = apiUsage.cache_creation_input_tokens || 0;\n\n if (cacheCreation > 0) {\n console.log(\n `[LLMClient:callWithCache] Cache WRITE: ${cacheCreation} tokens`,\n );\n }\n if (cacheRead > 0) {\n const savingsPercent = Math.round(\n (cacheRead / (cacheRead + apiUsage.input_tokens)) * 100,\n );\n console.log(\n `[LLMClient:callWithCache] Cache HIT: ${cacheRead} tokens (~${savingsPercent}% of prompt)`,\n );\n }\n if (cacheCreation === 0 && cacheRead === 0) {\n console.log(\n `[LLMClient:callWithCache] No caching: ${apiUsage.input_tokens} input tokens`,\n );\n }\n\n const usage: LLMUsage = {\n promptTokens: apiUsage.input_tokens,\n completionTokens: apiUsage.output_tokens,\n totalTokens: apiUsage.input_tokens + apiUsage.output_tokens,\n };\n\n if (this.tokenTracker) {\n this.tokenTracker.addUsage(\n usage.promptTokens,\n usage.completionTokens,\n );\n }\n\n const finishReason =\n response.stop_reason === 'end_turn'\n ? 'stop'\n : response.stop_reason;\n\n return {\n content,\n finishReason: finishReason as LLMFinishReason,\n usage,\n };\n });\n\n let parsed: T;\n if (rawText) {\n parsed = result.content as unknown as T;\n } else if (skipSchemaValidation) {\n parsed = parseJsonResponse(result.content, undefined) as T;\n } else {\n parsed = parseJsonResponse(result.content, schema);\n }\n\n return {\n data: parsed,\n raw: result.content,\n finishReason: result.finishReason,\n usage: result.usage,\n };\n } catch (error) {\n lastError = error instanceof Error ? error : new Error(String(error));\n console.error(\n `[LLMClient:callWithCache] Attempt ${attempt + 1} failed:`,\n lastError.message,\n );\n\n if (this.isRateLimitError(lastError)) {\n throw lastError;\n }\n }\n }\n\n throw lastError;\n }\n\n static cacheableBlock(text: string, cache = true): CacheableBlock {\n return cache\n ? { type: 'text', text, cache_control: { type: 'ephemeral' } }\n : { type: 'text', text };\n }\n}\n\n// ============================================================================\n// Singleton Instances\n// ============================================================================\n\nconst sharedClients: Partial<Record<LLMProvider, LLMClient>> = {};\n\nexport function getSharedLLMClient(options?: LLMClientOptions): LLMClient {\n const provider = options?.provider || 'openai';\n if (!sharedClients[provider]) {\n sharedClients[provider] = new LLMClient(options);\n }\n return sharedClients[provider]!;\n}\n\nexport function resetSharedLLMClient(provider?: LLMProvider): void {\n if (provider) {\n delete sharedClients[provider];\n } else {\n for (const key of Object.keys(sharedClients) as LLMProvider[]) {\n delete sharedClients[key];\n }\n }\n}\n\n// ============================================================================\n// Provider Detection\n// ============================================================================\n\nexport function getAvailableProvider(): LLMProvider {\n if (process.env.ANTHROPIC_API_KEY) return 'anthropic';\n if (process.env.DEEPSEEK_API_KEY) return 'deepseek';\n if (process.env.KIMI_API_KEY) return 'kimi';\n if (process.env.OPENAI_API_KEY) return 'openai';\n throw new Error(\n 'No LLM API key found. Please set ANTHROPIC_API_KEY, OPENAI_API_KEY, DEEPSEEK_API_KEY, or KIMI_API_KEY.',\n );\n}\n\nexport function isProviderAvailable(provider: LLMProvider): boolean {\n switch (provider) {\n case 'openai':\n return !!process.env.OPENAI_API_KEY;\n case 'deepseek':\n return !!process.env.DEEPSEEK_API_KEY;\n case 'anthropic':\n return !!process.env.ANTHROPIC_API_KEY;\n case 'kimi':\n return !!process.env.KIMI_API_KEY;\n default:\n return false;\n }\n}\n\n// ============================================================================\n// Convenience Functions\n// ============================================================================\n\nexport function createRequirementsClient(\n options?: Partial<LLMClientOptions>,\n): LLMClient {\n const provider = options?.provider || getAvailableProvider();\n const defaultModel =\n provider === 'deepseek' ? DEEPSEEK_MODELS.CHAT : OPENAI_MODELS.GPT_5_1;\n return new LLMClient({\n provider,\n model: defaultModel,\n temperature: 0.3,\n ...options,\n });\n}\n\nexport function createCreativeClient(\n options?: Partial<LLMClientOptions>,\n): LLMClient {\n const provider = options?.provider || getAvailableProvider();\n const defaultModel =\n provider === 'deepseek' ? DEEPSEEK_MODELS.REASONER : OPENAI_MODELS.GPT4O;\n return new LLMClient({\n provider,\n model: defaultModel,\n temperature: 0.7,\n ...options,\n });\n}\n\nexport function createFixClient(\n options?: Partial<LLMClientOptions>,\n): LLMClient {\n const provider = options?.provider || getAvailableProvider();\n const defaultModel =\n provider === 'deepseek'\n ? DEEPSEEK_MODELS.CHAT\n : OPENAI_MODELS.GPT4O_MINI;\n return new LLMClient({\n provider,\n model: defaultModel,\n temperature: 0.2,\n ...options,\n });\n}\n\nexport function createDeepSeekClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'deepseek',\n model: DEEPSEEK_MODELS.CHAT,\n ...options,\n });\n}\n\nexport function createOpenAIClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'openai',\n model: OPENAI_MODELS.GPT4O,\n ...options,\n });\n}\n\nexport function createAnthropicClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'anthropic',\n model: ANTHROPIC_MODELS.CLAUDE_SONNET_4_5,\n ...options,\n });\n}\n\nexport function createKimiClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'kimi',\n model: KIMI_MODELS.K2_5,\n ...options,\n });\n}\n\nexport function createOpenRouterClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'openrouter',\n model: OPENROUTER_MODELS.QWEN_2_5_72B,\n ...options,\n });\n}\n"],"mappings":";;;;;;;;;;AAaA,SAAS,kBAAkB;AAC3B,SAAS,qBAAqB;AAE9B,OAAO,eAAe;AActB,SAAS,gCACP,UACmB;AACnB,SAAO,SAAS,IAAI,CAAC,QAAQ;AAC3B,QAAI,IAAI,SAAS,UAAU;AACzB,aAAO;AAAA,IACT;AAEA,WAAO;AAAA,MACL,MAAM,IAAI;AAAA,MACV,SAAS;AAAA,QACP;AAAA,UACE,MAAM;AAAA,UACN,MAAM,IAAI;AAAA,UACV,eAAe,EAAE,MAAM,YAAY;AAAA,QACrC;AAAA,MACF;AAAA,IACF;AAAA,EACF,CAAC;AACH;AAsFA,IAAM,mBAA8D;AAAA,EAClE,QAAQ,MAAM;AACZ,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO,EAAE,QAAQ,SAAS,QAAW,cAAc,SAAS;AAAA,EAC9D;AAAA,EACA,UAAU,MAAM;AACd,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO;AAAA,MACL;AAAA,MACA,SAAS;AAAA,MACT,cAAc;AAAA,IAChB;AAAA,EACF;AAAA,EACA,WAAW,MAAM;AACf,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO;AAAA,MACL;AAAA,MACA,SAAS;AAAA,MACT,cAAc;AAAA,IAChB;AAAA,EACF;AAAA,EACA,MAAM,MAAM;AACV,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO;AAAA,MACL;AAAA,MACA,SAAS;AAAA,MACT,cAAc;AAAA,IAChB;AAAA,EACF;AAAA,EACA,YAAY,MAAM;AAChB,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO;AAAA,MACL;AAAA,MACA,SAAS;AAAA,MACT,cAAc;AAAA;AAAA,IAChB;AAAA,EACF;AACF;AAEO,IAAM,kBAAkB;AAAA,EAC7B,MAAM;AAAA,EACN,OAAO;AAAA,EACP,UAAU;AACZ;AAEO,IAAM,gBAAgB;AAAA,EAC3B,OAAO;AAAA,EACP,YAAY;AAAA,EACZ,YAAY;AAAA,EACZ,aAAa;AAAA,EACb,SAAS;AACX;AAEO,IAAM,mBAAmB;AAAA,EAC9B,mBAAmB;AAAA,EACnB,iBAAiB;AAAA,EACjB,iBAAiB;AAAA,EACjB,kBAAkB;AACpB;AAEO,IAAM,cAAc;AAAA,EACzB,MAAM;AACR;AAEO,IAAM,oBAAoB;AAAA;AAAA,EAE/B,cAAc;AAAA,EACd,oBAAoB;AAAA,EACpB,aAAa;AAAA;AAAA,EAGb,eAAe;AAAA,EACf,gBAAgB;AAAA,EAChB,kBAAkB;AAAA,EAClB,eAAe;AAAA;AAAA,EAGf,SAAS;AACX;AAEA,IAAM,sBAAsB;AAMrB,IAAM,YAAN,MAAgB;AAAA,EAUrB,YAAY,UAA4B,CAAC,GAAG;AAC1C,SAAK,WAAW,QAAQ,YAAY;AAEpC,SAAK,cAAc,QAAQ,gBACxB,KAAK,aAAa,SAAS,MAAM;AACpC,SAAK,YAAY,QAAQ,aAAa;AAEtC,SAAK,iBAAiB,iBAAiB,KAAK,QAAQ,EAAE;AACtD,SAAK,YAAY,QAAQ,SAAS,KAAK,eAAe;AAEtD,UAAM,aAAa,KAAK,eAAe,OAAO,MAAM,EAAE;AACtD,YAAQ;AAAA,MACN,yBAAyB,KAAK,QAAQ,YAAY,KAAK,SAAS,cAAc,UAAU;AAAA,IAC1F;AACA,QAAI,KAAK,eAAe,SAAS;AAC/B,cAAQ;AAAA,QACN,sCAAsC,KAAK,eAAe,OAAO;AAAA,MACnE;AAAA,IACF;AAEA,SAAK,QAAQ,KAAK,YAAY;AAE9B,SAAK,cACH,QAAQ,yBAAyB,QAC7B,qBAAqB,QAAQ,WAAW,IACxC,IAAI,YAAY,QAAQ,WAAW;AAEzC,SAAK,eACH,QAAQ,gBAAgB,QACpB,sBAAsB,KAAK,SAAS,IACpC;AAAA,EACR;AAAA,EAEQ,0BAAmC;AACzC,UAAM,QAAQ,KAAK,UAAU,YAAY;AACzC,WACE,MAAM,WAAW,IAAI,KACrB,MAAM,WAAW,OAAO,KACxB,MAAM,SAAS,KAAK,KACpB,MAAM,SAAS,IAAI;AAAA,EAEvB;AAAA,EAEQ,YAAY,SAGN;AACZ,UAAM,YAAY,SAAS;AAC3B,UAAM,cAAc,SAAS,eAAe,KAAK;AAEjD,QAAI,KAAK,aAAa,aAAa;AACjC,aAAO,IAAI,cAAc;AAAA,QACvB,QAAQ,KAAK,eAAe;AAAA,QAC5B,OAAO,KAAK;AAAA,QACZ;AAAA,QACA,WAAW,KAAK;AAAA,QAChB,WAAW,aAAa;AAAA,QACxB,WAAW;AAAA,UACT;AAAA,YACE,cAAc,CAAC,WAAW;AACxB,oBAAM,aAAa,OAAO,cAAc,CAAC,IAAI,CAAC;AAC9C,oBAAM,QACJ,YAUC,SAAS;AAEZ,kBAAI,OAAO;AACT,sBAAM,eAAe,MAAM,+BAA+B;AAC1D,sBAAM,YAAY,MAAM,2BAA2B;AACnD,sBAAM,cAAc,MAAM,gBAAgB;AAC1C,sBAAM,eAAe,MAAM,iBAAiB;AAE5C,oBAAI,eAAe,GAAG;AACpB,0BAAQ;AAAA,oBACN,sCAAsC,YAAY;AAAA,kBACpD;AAAA,gBACF;AACA,oBAAI,YAAY,GAAG;AACjB,wBAAM,iBAAiB,KAAK;AAAA,oBACzB,aAAa,YAAY,eAAgB;AAAA,kBAC5C;AACA,0BAAQ;AAAA,oBACN,oCAAoC,SAAS,aAAa,cAAc;AAAA,kBAC1E;AAAA,gBACF;AACA,oBAAI,iBAAiB,KAAK,cAAc,KAAK,cAAc,GAAG;AAC5D,sBAAI,cAAc,KAAK;AACrB,4BAAQ;AAAA,sBACN,yBAAyB,WAAW,WAAW,YAAY;AAAA,oBAC7D;AAAA,kBACF,OAAO;AACL,4BAAQ;AAAA,sBACN,yBAAyB,WAAW,WAAW,YAAY;AAAA,oBAC7D;AAAA,kBACF;AAAA,gBACF;AAAA,cACF;AAAA,YACF;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACH;AAEA,UAAM,sBAAsB,KAAK,wBAAwB;AAEzD,UAAM,cAAc,YAChB,sBACE,EAAE,aAAa,EAAE,uBAAuB,UAAU,EAAE,IACpD,EAAE,UAAU,IACd,CAAC;AAEL,UAAM,UAAU,KAAK,aAAa,aAAa,MAAS;AAIxD,UAAM,SAAS,KAAK,aAAa;AACjC,UAAM,gBAAgB,SAAS,MAAM;AAGrC,UAAM,cAAuC,CAAC;AAC9C,QAAI,uBAAuB,WAAW;AACpC,kBAAY,wBAAwB;AAAA,IACtC;AACA,QAAI,QAAQ;AACV,kBAAY,WAAW,EAAE,MAAM,WAAW;AAAA,IAC5C;AAEA,QAAI,KAAK,aAAa,cAAc;AAClC,kBAAY,cAAc;AAAA,IAC5B;AAEA,WAAO,IAAI,WAAW;AAAA,MACpB,QAAQ,KAAK,eAAe;AAAA,MAC5B,OAAO,KAAK;AAAA,MACZ,aAAa,sBAAsB,SAAY;AAAA,MAC/C,WAAW,KAAK;AAAA,MAChB;AAAA,MACA,GAAI,OAAO,KAAK,WAAW,EAAE,SAAS,IAAI,EAAE,YAAY,IAAI,CAAC;AAAA,MAC7D,GAAI,sBAAsB,CAAC,IAAI,YAAY,EAAE,UAAU,IAAI,CAAC;AAAA,MAC5D,eAAe;AAAA,QACb,QAAQ,KAAK,eAAe;AAAA,QAC5B,GAAI,KAAK,eAAe,UACpB,EAAE,SAAS,KAAK,eAAe,QAAQ,IACvC,CAAC;AAAA,MACP;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAEQ,oBAAoB,SAGd;AACZ,WAAO,KAAK,YAAY,OAAO;AAAA,EACjC;AAAA,EAEA,cAA2B;AACzB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,eAAuB;AACrB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,WAAsB;AACpB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,uBAAuB;AACrB,WAAO,KAAK,YAAY,UAAU;AAAA,EACpC;AAAA,EAEA,gBAAgB;AACd,WAAO,KAAK,cAAc,WAAW,KAAK;AAAA,EAC5C;AAAA,EAEA,MAAM,KAAQ,SAAwC;AACpD,UAAM,WAAW,MAAM,KAAK,iBAAiB,OAAO;AACpD,WAAO,SAAS;AAAA,EAClB;AAAA,EAEA,MAAM,iBAAoB,SAAqD;AAC7E,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA,aAAa;AAAA,MACb,mBAAmB;AAAA,MACnB;AAAA,MACA,uBAAuB;AAAA,MACvB;AAAA,IACF,IAAI;AAEJ,QAAI,gBAAgB;AACpB,QAAI,YAA0B;AAE9B,YAAQ;AAAA,MACN,qCAAqC,KAAK,QAAQ,IAAI,KAAK,SAAS;AAAA,IACtE;AACA,YAAQ,IAAI,mCAAmC,WAAW,MAAM,QAAQ;AACxE,QAAI,WAAW;AACb,cAAQ,IAAI,gCAAgC,SAAS,EAAE;AAAA,IACzD;AAEA,aAAS,UAAU,GAAG,WAAW,YAAY,WAAW;AACtD,UAAI;AACF,gBAAQ;AAAA,UACN,4BAA4B,UAAU,CAAC,IAAI,aAAa,CAAC;AAAA,QAC3D;AACA,cAAM,mBAAmB,KAAK,IAAI;AAElC,cAAM,SAAS,MAAM,KAAK,YAAY,QAAQ,YAAY;AACxD,kBAAQ,IAAI,oCAAoC;AAChD,gBAAM,kBAAkB,KAAK,IAAI;AAEjC,gBAAM,aACJ,aAAa,gBAAgB,SACzB,KAAK,oBAAoB,EAAE,WAAW,YAAY,CAAC,IACnD,KAAK;AAEX,gBAAM,WAAW;AAAA,YACf,EAAE,MAAM,UAAU,SAAS,aAAa;AAAA,YACxC,EAAE,MAAM,QAAQ,SAAS,cAAc;AAAA,UACzC;AACA,gBAAM,WAAW,MAAM,WAAW;AAAA,YAChC,KAAK,aAAa,cACd,gCAAgC,QAAQ,IACxC;AAAA,UACN;AAEA,kBAAQ;AAAA,YACN,uCAAuC,KAAK,IAAI,IAAI,eAAe;AAAA,UACrE;AAEA,cAAI,QAAyB;AAC7B,cAAI,SAAS,gBAAgB;AAC3B,kBAAM,YAAY,SAAS;AAI3B,oBAAQ;AAAA,cACN,cAAc,UAAU,gBAAgB;AAAA,cACxC,kBAAkB,UAAU,iBAAiB;AAAA,cAC7C,cACG,UAAU,gBAAgB,MAC1B,UAAU,iBAAiB;AAAA,YAChC;AACA,oBAAQ;AAAA,cACN,iCAAiC,MAAM,YAAY,QAAQ,MAAM,gBAAgB;AAAA,YACnF;AAEA,gBAAI,KAAK,cAAc;AACrB,mBAAK,aAAa;AAAA,gBAChB,MAAM;AAAA,gBACN,MAAM;AAAA,cACR;AAAA,YACF;AAAA,UACF;AAEA,gBAAM,eAAe,KAAK,oBAAoB,QAAQ;AACtD,cAAI,iBAAiB,UAAU;AAC7B,oBAAQ;AAAA,cACN;AAAA,YACF;AAAA,UACF;AAEA,gBAAM,UACJ,OAAO,SAAS,YAAY,WACxB,SAAS,UACT,KAAK,UAAU,SAAS,OAAO;AAErC,kBAAQ;AAAA,YACN,qCAAqC,QAAQ,MAAM,0BAA0B,YAAY;AAAA,UAC3F;AAEA,iBAAO,EAAE,SAAS,cAAc,MAAM;AAAA,QACxC,CAAC;AAED,gBAAQ;AAAA,UACN,4BAA4B,UAAU,CAAC,iBAAiB,KAAK,IAAI,IAAI,gBAAgB;AAAA,QACvF;AAEA,cAAM,SAAS,uBACV,kBAAkB,OAAO,SAAS,MAAS,IAC5C,kBAAkB,OAAO,SAAS,MAAM;AAC5C,gBAAQ;AAAA,UACN,gDAAgD,uBAAuB,iCAAiC,EAAE;AAAA,QAC5G;AAEA,eAAO;AAAA,UACL,MAAM;AAAA,UACN,KAAK,OAAO;AAAA,UACZ,cAAc,OAAO;AAAA,UACrB,OAAO,OAAO;AAAA,QAChB;AAAA,MACF,SAAS,OAAO;AACd,oBAAY,iBAAiB,QAAQ,QAAQ,IAAI,MAAM,OAAO,KAAK,CAAC;AACpE,gBAAQ;AAAA,UACN,4BAA4B,UAAU,CAAC;AAAA,UACvC,UAAU;AAAA,QACZ;AAEA,YAAI,KAAK,iBAAiB,SAAS,GAAG;AACpC,kBAAQ,MAAM,iDAAiD;AAC/D,gBAAM;AAAA,QACR;AAEA,YAAI,UAAU,cAAc,kBAAkB;AAC5C,kBAAQ,IAAI,gDAAgD;AAC5D,0BACE,GAAG,UAAU;AAAA;AAAA,iCACqB,UAAU,OAAO;AAAA;AAAA,QAEvD;AAAA,MACF;AAAA,IACF;AAEA,YAAQ,MAAM,yDAAyD;AACvE,UAAM;AAAA,EACR;AAAA,EAEQ,oBACN,UACiB;AACjB,UAAM,WAAW,SAAS;AAG1B,QAAI,UAAU,eAAe;AAC3B,YAAM,SAAS,SAAS;AACxB,UACE,WAAW,UACX,WAAW,YACX,WAAW,oBACX,WAAW,cACX;AACA,eAAO;AAAA,MACT;AAAA,IACF;AACA,WAAO;AAAA,EACT;AAAA,EAEA,MAAM,QAAQ,SAIM;AAClB,UAAM,WAAW,MAAM,KAAK,oBAAoB,OAAO;AACvD,WAAO,SAAS;AAAA,EAClB;AAAA,EAEA,MAAM,oBAAoB,SAIuC;AAC/D,UAAM,EAAE,cAAc,YAAY,UAAU,IAAI;AAEhD,WAAO,KAAK,YAAY,QAAQ,YAAY;AAC1C,YAAM,aAAa,YACf,KAAK,oBAAoB,EAAE,UAAU,CAAC,IACtC,KAAK;AAET,YAAM,WAAW;AAAA,QACf,EAAE,MAAM,UAAU,SAAS,aAAa;AAAA,QACxC,EAAE,MAAM,QAAQ,SAAS,WAAW;AAAA,MACtC;AACA,YAAM,WAAW,MAAM,WAAW;AAAA,QAChC,KAAK,aAAa,cACd,gCAAgC,QAAQ,IACxC;AAAA,MACN;AAEA,UAAI,QAAyB;AAC7B,UAAI,SAAS,gBAAgB;AAC3B,cAAM,YAAY,SAAS;AAI3B,gBAAQ;AAAA,UACN,cAAc,UAAU,gBAAgB;AAAA,UACxC,kBAAkB,UAAU,iBAAiB;AAAA,UAC7C,cACG,UAAU,gBAAgB,MAAM,UAAU,iBAAiB;AAAA,QAChE;AAEA,YAAI,KAAK,cAAc;AACrB,eAAK,aAAa;AAAA,YAChB,MAAM;AAAA,YACN,MAAM;AAAA,UACR;AAAA,QACF;AAAA,MACF;AAEA,YAAM,eAAe,KAAK,oBAAoB,QAAQ;AACtD,YAAM,UACJ,OAAO,SAAS,YAAY,WACxB,SAAS,UACT,KAAK,UAAU,SAAS,OAAO;AAErC,aAAO,EAAE,KAAK,SAAS,cAAc,MAAM;AAAA,IAC7C,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,UAAU,SAA2D;AAC1E,UAAM,EAAE,UAAU,WAAW,YAAY,IAAI;AAE7C,UAAM,aAAc,aAAa,gBAAgB,SAC7C,KAAK,oBAAoB,EAAE,WAAW,YAAY,CAAC,IACnD,KAAK;AAET,UAAM,oBAAoB,KAAK,aAAa,cACxC,gCAAgC,QAAQ,IACxC;AAEJ,UAAM,SAAS,MAAM,WAAW,OAAO,iBAAiB;AAExD,qBAAiB,SAAS,QAAQ;AAChC,YAAM,UAAU,OAAO,MAAM,YAAY,WACrC,MAAM,UACN,MAAM,QAAQ,MAAM,OAAO,IACzB,MAAM,QACH,OAAO,CAAC,MAA2C,OAAO,MAAM,YAAY,MAAM,QAAQ,UAAU,CAAC,EACrG,IAAI,CAAC,MAAM,EAAE,IAAI,EACjB,KAAK,EAAE,IACV;AAEN,UAAI,SAAS;AACX,cAAM,EAAE,SAAS,MAAM,MAAM;AAAA,MAC/B;AAAA,IACF;AAEA,UAAM,EAAE,SAAS,IAAI,MAAM,KAAK;AAAA,EAClC;AAAA,EAEQ,iBAAiB,OAAuB;AAC9C,UAAM,UAAU,MAAM,QAAQ,YAAY;AAC1C,WACE,QAAQ,SAAS,YAAY,KAC7B,QAAQ,SAAS,KAAK,KACtB,QAAQ,SAAS,gBAAgB;AAAA,EAErC;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,cACJ,SACyB;AACzB,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,aAAa;AAAA,MACb;AAAA,MACA,uBAAuB;AAAA,MACvB;AAAA,MACA,UAAU;AAAA,IACZ,IAAI;AAEJ,QAAI,KAAK,aAAa,aAAa;AACjC,cAAQ;AAAA,QACN,sCAAsC,KAAK,QAAQ;AAAA,MACrD;AACA,aAAO,KAAK,iBAAiB,OAAO;AAAA,IACtC;AAEA,UAAM,kBACH,gBAAgB,CAAC,GAAG,OAAO,CAAC,MAAM,EAAE,aAAa,EAAE,UACnD,cAAc,CAAC,GAAG,OAAO,CAAC,MAAM,EAAE,aAAa,EAAE;AACpD,YAAQ;AAAA,MACN,6BAA6B,cAAc;AAAA,IAC7C;AAEA,QAAI,YAA0B;AAE9B,aAAS,UAAU,GAAG,WAAW,YAAY,WAAW;AACtD,UAAI;AACF,gBAAQ;AAAA,UACN,qCAAqC,UAAU,CAAC,IAAI,aAAa,CAAC;AAAA,QACpE;AAEA,cAAM,SAAS,MAAM,KAAK,YAAY,QAAQ,YAAY;AACxD,gBAAM,YAAY,IAAI,UAAU;AAEhC,gBAAM,gBACJ,gBAAgB,aAAa,SAAS,IAClC,aAAa,IAAI,CAAC,OAAO;AAAA,YACvB,MAAM;AAAA,YACN,MAAM,EAAE;AAAA,YACR,GAAI,EAAE,gBACF,EAAE,eAAe,EAAE,cAAc,IACjC,CAAC;AAAA,UACP,EAAE,IACF,eACE,CAAC,EAAE,MAAM,QAAiB,MAAM,aAAa,CAAC,IAC9C,CAAC;AAET,gBAAM,cACJ,cAAc,WAAW,SAAS,IAC9B,WAAW,IAAI,CAAC,OAAO;AAAA,YACrB,MAAM;AAAA,YACN,MAAM,EAAE;AAAA,YACR,GAAI,EAAE,gBACF,EAAE,eAAe,EAAE,cAAc,IACjC,CAAC;AAAA,UACP,EAAE,IACF,aACE,CAAC,EAAE,MAAM,QAAiB,MAAM,WAAW,CAAC,IAC5C,CAAC;AAET,gBAAM,WAAW,MAAM,UAAU,SAAS,OAAO;AAAA,YAC/C,OAAO,KAAK;AAAA,YACZ,YAAY,aAAa;AAAA,YACzB,aAAa,eAAe;AAAA,YAC5B,QAAQ;AAAA,YACR,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,YAAY,CAAC;AAAA,UACnD,CAAC;AAED,gBAAM,cAAc,SAAS,QAAQ,KAAK,CAAC,MAAM,EAAE,SAAS,MAAM;AAClE,gBAAM,UACJ,eAAe,UAAU,cAAc,YAAY,OAAO;AAE5D,gBAAM,WAAW,SAAS;AAO1B,gBAAM,YAAY,SAAS,2BAA2B;AACtD,gBAAM,gBAAgB,SAAS,+BAA+B;AAE9D,cAAI,gBAAgB,GAAG;AACrB,oBAAQ;AAAA,cACN,0CAA0C,aAAa;AAAA,YACzD;AAAA,UACF;AACA,cAAI,YAAY,GAAG;AACjB,kBAAM,iBAAiB,KAAK;AAAA,cACzB,aAAa,YAAY,SAAS,gBAAiB;AAAA,YACtD;AACA,oBAAQ;AAAA,cACN,wCAAwC,SAAS,aAAa,cAAc;AAAA,YAC9E;AAAA,UACF;AACA,cAAI,kBAAkB,KAAK,cAAc,GAAG;AAC1C,oBAAQ;AAAA,cACN,yCAAyC,SAAS,YAAY;AAAA,YAChE;AAAA,UACF;AAEA,gBAAM,QAAkB;AAAA,YACtB,cAAc,SAAS;AAAA,YACvB,kBAAkB,SAAS;AAAA,YAC3B,aAAa,SAAS,eAAe,SAAS;AAAA,UAChD;AAEA,cAAI,KAAK,cAAc;AACrB,iBAAK,aAAa;AAAA,cAChB,MAAM;AAAA,cACN,MAAM;AAAA,YACR;AAAA,UACF;AAEA,gBAAM,eACJ,SAAS,gBAAgB,aACrB,SACA,SAAS;AAEf,iBAAO;AAAA,YACL;AAAA,YACA;AAAA,YACA;AAAA,UACF;AAAA,QACF,CAAC;AAED,YAAI;AACJ,YAAI,SAAS;AACX,mBAAS,OAAO;AAAA,QAClB,WAAW,sBAAsB;AAC/B,mBAAS,kBAAkB,OAAO,SAAS,MAAS;AAAA,QACtD,OAAO;AACL,mBAAS,kBAAkB,OAAO,SAAS,MAAM;AAAA,QACnD;AAEA,eAAO;AAAA,UACL,MAAM;AAAA,UACN,KAAK,OAAO;AAAA,UACZ,cAAc,OAAO;AAAA,UACrB,OAAO,OAAO;AAAA,QAChB;AAAA,MACF,SAAS,OAAO;AACd,oBAAY,iBAAiB,QAAQ,QAAQ,IAAI,MAAM,OAAO,KAAK,CAAC;AACpE,gBAAQ;AAAA,UACN,qCAAqC,UAAU,CAAC;AAAA,UAChD,UAAU;AAAA,QACZ;AAEA,YAAI,KAAK,iBAAiB,SAAS,GAAG;AACpC,gBAAM;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,UAAM;AAAA,EACR;AAAA,EAEA,OAAO,eAAe,MAAc,QAAQ,MAAsB;AAChE,WAAO,QACH,EAAE,MAAM,QAAQ,MAAM,eAAe,EAAE,MAAM,YAAY,EAAE,IAC3D,EAAE,MAAM,QAAQ,KAAK;AAAA,EAC3B;AACF;AAMA,IAAM,gBAAyD,CAAC;AAEzD,SAAS,mBAAmB,SAAuC;AACxE,QAAM,WAAW,SAAS,YAAY;AACtC,MAAI,CAAC,cAAc,QAAQ,GAAG;AAC5B,kBAAc,QAAQ,IAAI,IAAI,UAAU,OAAO;AAAA,EACjD;AACA,SAAO,cAAc,QAAQ;AAC/B;AAEO,SAAS,qBAAqB,UAA8B;AACjE,MAAI,UAAU;AACZ,WAAO,cAAc,QAAQ;AAAA,EAC/B,OAAO;AACL,eAAW,OAAO,OAAO,KAAK,aAAa,GAAoB;AAC7D,aAAO,cAAc,GAAG;AAAA,IAC1B;AAAA,EACF;AACF;AAMO,SAAS,uBAAoC;AAClD,MAAI,QAAQ,IAAI,kBAAmB,QAAO;AAC1C,MAAI,QAAQ,IAAI,iBAAkB,QAAO;AACzC,MAAI,QAAQ,IAAI,aAAc,QAAO;AACrC,MAAI,QAAQ,IAAI,eAAgB,QAAO;AACvC,QAAM,IAAI;AAAA,IACR;AAAA,EACF;AACF;AAEO,SAAS,oBAAoB,UAAgC;AAClE,UAAQ,UAAU;AAAA,IAChB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB;AACE,aAAO;AAAA,EACX;AACF;AAMO,SAAS,yBACd,SACW;AACX,QAAM,WAAW,SAAS,YAAY,qBAAqB;AAC3D,QAAM,eACJ,aAAa,aAAa,gBAAgB,OAAO,cAAc;AACjE,SAAO,IAAI,UAAU;AAAA,IACnB;AAAA,IACA,OAAO;AAAA,IACP,aAAa;AAAA,IACb,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,qBACd,SACW;AACX,QAAM,WAAW,SAAS,YAAY,qBAAqB;AAC3D,QAAM,eACJ,aAAa,aAAa,gBAAgB,WAAW,cAAc;AACrE,SAAO,IAAI,UAAU;AAAA,IACnB;AAAA,IACA,OAAO;AAAA,IACP,aAAa;AAAA,IACb,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,gBACd,SACW;AACX,QAAM,WAAW,SAAS,YAAY,qBAAqB;AAC3D,QAAM,eACJ,aAAa,aACT,gBAAgB,OAChB,cAAc;AACpB,SAAO,IAAI,UAAU;AAAA,IACnB;AAAA,IACA,OAAO;AAAA,IACP,aAAa;AAAA,IACb,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,qBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,gBAAgB;AAAA,IACvB,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,mBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,cAAc;AAAA,IACrB,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,sBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,iBAAiB;AAAA,IACxB,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,iBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,YAAY;AAAA,IACnB,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,uBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,kBAAkB;AAAA,IACzB,GAAG;AAAA,EACL,CAAC;AACH;","names":[]}
|