@diabolicallabs/llm-client 0.2.0 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js.map CHANGED
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/providers/anthropic.ts","../src/types.ts","../src/retry.ts","../src/providers/deepseek.ts","../src/providers/gemini.ts","../src/providers/openai.ts","../src/providers/perplexity.ts","../src/client.ts"],"sourcesContent":["/**\n * Anthropic Claude provider for @diabolicallabs/llm-client.\n *\n * Implements: complete(), stream(), structured()\n *\n * Token normalization:\n * Anthropic: input_tokens / output_tokens / cache_creation_input_tokens / cache_read_input_tokens\n * → LlmUsage: inputTokens / outputTokens / totalTokens / cacheCreationTokens / cacheReadTokens\n *\n * Error mapping:\n * APIStatusError.status → LlmError.statusCode + retryable flag\n * APIConnectionError → retryable: true\n */\n\nimport Anthropic from '@anthropic-ai/sdk';\nimport { normalizeThrownError, withRetry } from '../retry.js';\nimport type {\n LlmCallOptions,\n LlmClient,\n LlmClientConfig,\n LlmMessage,\n LlmResponse,\n LlmStreamChunk,\n LlmStructuredResponse,\n LlmUsage,\n} from '../types.js';\nimport { LlmError } from '../types.js';\n\nconst PROVIDER = 'anthropic';\n\n/** Normalize Anthropic's usage object to LlmUsage. */\nfunction normalizeUsage(usage: Anthropic.Usage | undefined): LlmUsage {\n const inputTokens = usage?.input_tokens ?? 0;\n const outputTokens = usage?.output_tokens ?? 0;\n return {\n inputTokens,\n outputTokens,\n totalTokens: inputTokens + outputTokens,\n // cache_creation_input_tokens and cache_read_input_tokens are present on\n // extended usage objects from prompt caching — cast to access them safely.\n cacheCreationTokens: (usage as Anthropic.Usage & { cache_creation_input_tokens?: number })\n ?.cache_creation_input_tokens,\n cacheReadTokens: (usage as Anthropic.Usage & { cache_read_input_tokens?: number })\n ?.cache_read_input_tokens,\n };\n}\n\n/** Convert LlmMessages to Anthropic's message format. Extracts system prompt. */\nfunction buildAnthropicMessages(messages: LlmMessage[]): {\n system: string | undefined;\n messages: Anthropic.MessageParam[];\n} {\n const systemMessages = messages.filter((m) => m.role === 'system');\n const conversationMessages = messages.filter((m) => m.role !== 'system');\n\n const system =\n systemMessages.length > 0 ? systemMessages.map((m) => m.content).join('\\n') : undefined;\n\n const anthropicMessages: Anthropic.MessageParam[] = conversationMessages.map((m) => ({\n role: m.role as 'user' | 'assistant',\n content: m.content,\n }));\n\n return { system, messages: anthropicMessages };\n}\n\n/**\n * Normalize any Anthropic SDK error into LlmError.\n * Exported for direct unit testing of the normalization logic.\n */\nexport function normalizeAnthropicError(err: unknown): LlmError {\n if (err instanceof LlmError) return err;\n\n // Anthropic SDK v0.94+: uses Anthropic.APIError as the base class with a `.status` field.\n // APIConnectionError is a subclass of APIError with status: undefined — check it first\n // so network failures are always retryable regardless of the missing status code.\n if (\n typeof Anthropic.APIConnectionError === 'function' &&\n err instanceof Anthropic.APIConnectionError\n ) {\n return new LlmError({\n message: err.message,\n provider: PROVIDER,\n retryable: true,\n cause: err,\n });\n }\n\n // Catch all other APIError subclasses: RateLimitError (429), AuthenticationError (401),\n // InternalServerError (500), etc. Retryability is determined by HTTP status code.\n if (typeof Anthropic.APIError === 'function' && err instanceof Anthropic.APIError) {\n const status: number | undefined = err.status;\n if (status !== undefined) {\n const retryable = [429, 502, 503, 504].includes(status) || status >= 500;\n return new LlmError({\n message: err.message,\n provider: PROVIDER,\n statusCode: status,\n retryable,\n cause: err,\n });\n }\n return new LlmError({ message: err.message, provider: PROVIDER, retryable: false, cause: err });\n }\n\n return normalizeThrownError(err, PROVIDER);\n}\n\n/** Create the Anthropic provider implementation. */\nexport function createAnthropicProvider(config: LlmClientConfig): LlmClient {\n const client = new Anthropic({\n apiKey: config.apiKey,\n timeout: config.timeoutMs ?? 30_000,\n maxRetries: 0, // We manage retries ourselves via withRetry\n });\n\n const retryOpts = {\n maxRetries: config.maxRetries ?? 3,\n baseDelayMs: config.baseDelayMs ?? 1_000,\n provider: PROVIDER,\n };\n\n async function complete(messages: LlmMessage[], options?: LlmCallOptions): Promise<LlmResponse> {\n const model = options?.model ?? config.model;\n const { system, messages: anthropicMessages } = buildAnthropicMessages(messages);\n\n const start = Date.now();\n\n return withRetry(async () => {\n try {\n const params: Anthropic.MessageCreateParamsNonStreaming = {\n model,\n messages: anthropicMessages,\n max_tokens: options?.maxTokens ?? config.maxTokens ?? 1024,\n };\n\n if (system !== undefined) params.system = system;\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) {\n params.temperature = temperature;\n }\n\n const response = await client.messages.create(params);\n\n const content = response.content\n .filter((block): block is Anthropic.TextBlock => block.type === 'text')\n .map((block) => block.text)\n .join('');\n\n return {\n content,\n model: response.model,\n usage: normalizeUsage(response.usage),\n latencyMs: Date.now() - start,\n };\n } catch (err) {\n throw normalizeAnthropicError(err);\n }\n }, retryOpts);\n }\n\n async function* stream(\n messages: LlmMessage[],\n options?: LlmCallOptions\n ): AsyncGenerator<LlmStreamChunk> {\n const model = options?.model ?? config.model;\n const { system, messages: anthropicMessages } = buildAnthropicMessages(messages);\n\n const params: Anthropic.MessageStreamParams = {\n model,\n messages: anthropicMessages,\n max_tokens: options?.maxTokens ?? config.maxTokens ?? 1024,\n };\n\n if (system !== undefined) params.system = system;\n const streamTemperature = options?.temperature ?? config.temperature;\n if (streamTemperature !== undefined) {\n params.temperature = streamTemperature;\n }\n\n let sdkStream: Awaited<ReturnType<typeof client.messages.stream>>;\n\n try {\n sdkStream = client.messages.stream(params);\n } catch (err) {\n throw normalizeAnthropicError(err);\n }\n\n // Accumulate usage — Anthropic sends it in the message_delta event at stream end\n let finalUsage: LlmUsage | undefined;\n\n try {\n for await (const event of sdkStream) {\n if (event.type === 'content_block_delta' && event.delta.type === 'text_delta') {\n yield { token: event.delta.text };\n } else if (event.type === 'message_delta' && 'usage' in event) {\n // Merge input tokens from message_start with output tokens from message_delta\n const accum = await sdkStream.finalMessage();\n finalUsage = normalizeUsage(accum.usage);\n }\n }\n } catch (err) {\n // Propagate as a normalized LlmError regardless of whether streaming had started.\n // Partial stream errors cannot be recovered from — the consumer must handle them.\n throw normalizeAnthropicError(err);\n }\n\n // Yield usage on the final empty chunk\n if (finalUsage !== undefined) {\n yield { token: '', usage: finalUsage };\n }\n }\n\n async function structured<T>(\n messages: LlmMessage[],\n schema: { parse: (data: unknown) => T },\n options?: LlmCallOptions\n ): Promise<LlmStructuredResponse<T>> {\n // Anthropic JSON mode: append a system instruction to return only JSON.\n // We inject this into the messages so the provider returns parseable output.\n const jsonSystemInstruction: LlmMessage = {\n role: 'system',\n content:\n 'You must respond with valid JSON only. No explanations, no markdown code fences, no extra text. Your entire response must be valid JSON that can be parsed with JSON.parse().',\n };\n\n const augmentedMessages = [jsonSystemInstruction, ...messages];\n const start = Date.now();\n\n const response = await complete(augmentedMessages, options);\n\n let parsed: unknown;\n try {\n // Strip markdown code fences if the model included them despite the instruction\n const cleaned = response.content\n .replace(/^```(?:json)?\\s*/i, '')\n .replace(/\\s*```$/, '')\n .trim();\n parsed = JSON.parse(cleaned);\n } catch (err) {\n throw new LlmError({\n message: `Anthropic structured output: response is not valid JSON. Raw: ${response.content.slice(0, 200)}`,\n provider: PROVIDER,\n retryable: false,\n cause: err,\n });\n }\n\n let data: T;\n try {\n data = schema.parse(parsed);\n } catch (err) {\n throw new LlmError({\n message: `Anthropic structured output: response failed schema validation. ${String(err)}`,\n provider: PROVIDER,\n retryable: false,\n cause: err,\n });\n }\n\n return {\n data,\n usage: response.usage,\n latencyMs: Date.now() - start,\n };\n }\n\n return {\n config,\n complete,\n stream,\n structured,\n };\n}\n","/**\n * Core type definitions for @diabolicallabs/llm-client.\n * These are the stable public API surface — implementation is in Week 2.\n * Types here match the spec in briefs/brief-platform.md §4.1 exactly.\n *\n * Week 5 additions:\n * LlmResponse.citations — populated by the Perplexity provider; undefined for all others.\n * LlmCallOptions — per-call options type extracted for reuse; adds providerOptions escape hatch.\n */\n\n// The canonical message format shared across all providers\nexport interface LlmMessage {\n role: 'system' | 'user' | 'assistant';\n content: string;\n}\n\n// Config passed to createClient\nexport interface LlmClientConfig {\n // Full 5-provider union — gemini, deepseek, perplexity are type-only stubs in Week 2\n provider: 'anthropic' | 'openai' | 'gemini' | 'deepseek' | 'perplexity';\n model: string; // e.g. 'claude-sonnet-4-6', 'gpt-4o', 'gemini-2.5-flash'\n apiKey: string;\n maxRetries?: number; // default: 3\n baseDelayMs?: number; // default: 1000 — exponential backoff base\n maxTokens?: number; // provider default if omitted\n temperature?: number; // provider default if omitted\n timeoutMs?: number; // default: 30000\n}\n\n// Normalized token usage — same shape regardless of provider\nexport interface LlmUsage {\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n cacheCreationTokens?: number; // Anthropic prompt cache write tokens\n cacheReadTokens?: number; // Anthropic prompt cache read tokens\n}\n\n// Non-streaming response\nexport interface LlmResponse {\n content: string;\n model: string; // model ID actually used (may differ from requested)\n usage: LlmUsage;\n latencyMs: number;\n /**\n * Web citations returned by the Perplexity provider.\n * Populated only when the Perplexity API returns source references.\n * Always undefined for Anthropic, OpenAI, Gemini, and DeepSeek.\n * Deduplicated by URL within a single response.\n */\n citations?: Array<{\n url: string;\n title?: string;\n }>;\n}\n\n/**\n * Per-call options shared across complete(), stream(), and structured().\n * Extends the standard model/maxTokens/temperature overrides with:\n * providerOptions — generic escape hatch for provider-specific parameters.\n * The Perplexity provider reads search_domain_filter and\n * search_recency_filter from this field; other providers ignore it.\n * Unknown fields are passed through unchanged.\n */\nexport interface LlmCallOptions\n extends Partial<Pick<LlmClientConfig, 'model' | 'maxTokens' | 'temperature'>> {\n providerOptions?: Record<string, unknown>;\n}\n\n// Streaming chunk\nexport interface LlmStreamChunk {\n token: string;\n usage?: LlmUsage; // present only on the final chunk\n}\n\n// Normalized error — wraps provider-specific errors\nexport class LlmError extends Error {\n override readonly name = 'LlmError';\n readonly provider: string;\n readonly statusCode: number | undefined;\n readonly retryable: boolean;\n // `cause` is declared on Error in lib.es2022.error.d.ts as `cause?: unknown`\n // We override it here to make it always present (not optional) after construction.\n override readonly cause: unknown;\n\n constructor(opts: {\n message: string;\n provider: string;\n statusCode?: number;\n retryable: boolean;\n cause?: unknown;\n }) {\n super(opts.message, { cause: opts.cause });\n this.provider = opts.provider;\n this.statusCode = opts.statusCode;\n this.retryable = opts.retryable;\n this.cause = opts.cause;\n }\n}\n\n// Structured output — Zod schema inference\nexport type LlmStructuredResponse<T> = {\n data: T;\n usage: LlmUsage;\n latencyMs: number;\n};\n\n// The LlmClient interface — what consumers program against\nexport interface LlmClient {\n readonly config: Readonly<LlmClientConfig>;\n\n // Non-streaming completion\n complete(messages: LlmMessage[], options?: LlmCallOptions): Promise<LlmResponse>;\n\n // Streaming completion — async generator of chunks\n stream(messages: LlmMessage[], options?: LlmCallOptions): AsyncGenerator<LlmStreamChunk>;\n\n // Structured output — parses and validates the response against a Zod schema\n // Forces JSON mode on providers that support it; falls back to parse-and-validate\n structured<T>(\n messages: LlmMessage[],\n // Using a narrower interface than the full ZodType to avoid a hard zod dependency at types level\n schema: { parse: (data: unknown) => T },\n options?: LlmCallOptions\n ): Promise<LlmStructuredResponse<T>>;\n}\n","/**\n * Exponential backoff with full jitter — shared across all providers.\n *\n * Formula: delay = random(0, baseDelayMs * 2^attempt)\n *\n * Retryable HTTP statuses: 429 (rate limit), 502/503/504 (server errors).\n * Retryable network codes: ECONNRESET, ETIMEDOUT.\n * Non-retryable: 400 (bad request), 401/403 (auth), 404.\n */\n\nimport { LlmError } from './types.js';\n\n// HTTP status codes that should trigger a retry\nconst RETRYABLE_HTTP_STATUSES = new Set([429, 502, 503, 504]);\n\n// Network error codes that should trigger a retry\nconst RETRYABLE_ERROR_CODES = new Set(['ECONNRESET', 'ETIMEDOUT', 'ECONNABORTED']);\n\n// HTTP status codes that should never retry (fail immediately)\nconst NON_RETRYABLE_HTTP_STATUSES = new Set([400, 401, 403, 404]);\n\n/** Determine if an HTTP status code is retryable. */\nexport function isRetryableStatus(statusCode: number): boolean {\n if (RETRYABLE_HTTP_STATUSES.has(statusCode)) return true;\n if (NON_RETRYABLE_HTTP_STATUSES.has(statusCode)) return false;\n // Treat any 5xx not explicitly handled as retryable\n return statusCode >= 500;\n}\n\n/** Determine if a network error code is retryable. */\nexport function isRetryableErrorCode(code: string): boolean {\n return RETRYABLE_ERROR_CODES.has(code);\n}\n\n/** Compute the delay in ms for attempt N (0-indexed). Full jitter. */\nexport function computeBackoffMs(attempt: number, baseDelayMs: number): number {\n const ceiling = baseDelayMs * 2 ** attempt;\n return Math.random() * ceiling;\n}\n\nexport interface RetryOptions {\n maxRetries: number;\n baseDelayMs: number;\n provider: string;\n}\n\n/**\n * Execute `fn` with retry logic. Wraps the result in structured error normalization.\n * `fn` receives the current attempt number (0-indexed).\n *\n * Throws LlmError after all retries are exhausted.\n */\nexport async function withRetry<T>(\n fn: (attempt: number) => Promise<T>,\n opts: RetryOptions\n): Promise<T> {\n let lastError: LlmError | undefined;\n\n for (let attempt = 0; attempt <= opts.maxRetries; attempt++) {\n try {\n return await fn(attempt);\n } catch (err) {\n const llmErr = normalizeThrownError(err, opts.provider);\n\n if (!llmErr.retryable || attempt === opts.maxRetries) {\n throw llmErr;\n }\n\n lastError = llmErr;\n const delayMs = computeBackoffMs(attempt, opts.baseDelayMs);\n await sleep(delayMs);\n }\n }\n\n // This path is unreachable — the loop always throws or returns.\n // TypeScript needs this for exhaustiveness.\n throw (\n lastError ??\n new LlmError({\n message: 'Unexpected retry exhaustion',\n provider: opts.provider,\n retryable: false,\n })\n );\n}\n\n/** Normalize any thrown value into an LlmError. */\nexport function normalizeThrownError(err: unknown, provider: string): LlmError {\n if (err instanceof LlmError) return err;\n\n if (err instanceof Error) {\n const errWithCode = err as Error & { status?: number; statusCode?: number; code?: string };\n\n const statusCode = errWithCode.status ?? errWithCode.statusCode;\n\n // Check for retryable network error codes\n if (errWithCode.code !== undefined && isRetryableErrorCode(errWithCode.code)) {\n if (statusCode !== undefined) {\n return new LlmError({\n message: err.message,\n provider,\n statusCode,\n retryable: true,\n cause: err,\n });\n }\n return new LlmError({ message: err.message, provider, retryable: true, cause: err });\n }\n\n // Check for retryable HTTP status codes\n if (statusCode !== undefined) {\n return new LlmError({\n message: err.message,\n provider,\n statusCode,\n retryable: isRetryableStatus(statusCode),\n cause: err,\n });\n }\n\n return new LlmError({\n message: err.message,\n provider,\n retryable: false,\n cause: err,\n });\n }\n\n return new LlmError({\n message: String(err),\n provider,\n retryable: false,\n cause: err,\n });\n}\n\nfunction sleep(ms: number): Promise<void> {\n return new Promise((resolve) => setTimeout(resolve, ms));\n}\n","/**\n * DeepSeek provider for @diabolicallabs/llm-client.\n *\n * DeepSeek's chat completions API is fully OpenAI-compatible, so this provider\n * uses the OpenAI SDK pointed at DeepSeek's base URL.\n *\n * API base URL: https://api.deepseek.com\n * Docs: https://platform.deepseek.com/api-docs/\n *\n * Implements: complete(), stream(), structured()\n *\n * Token normalization:\n * DeepSeek returns standard OpenAI-format usage: prompt_tokens / completion_tokens / total_tokens\n * → LlmUsage: inputTokens / outputTokens / totalTokens\n *\n * Error mapping:\n * APIConnectionError → retryable: true\n * APIError with status 429 / 5xx → retryable: true\n * Other APIErrors → non-retryable\n *\n * Note: DeepSeek does not support the json_object response_format on all models.\n * structured() injects a system prompt and parses the raw response. If the model\n * includes markdown fences, they are stripped before parsing.\n */\n\nimport OpenAI from 'openai';\nimport { normalizeThrownError, withRetry } from '../retry.js';\nimport type {\n LlmCallOptions,\n LlmClient,\n LlmClientConfig,\n LlmMessage,\n LlmResponse,\n LlmStreamChunk,\n LlmStructuredResponse,\n LlmUsage,\n} from '../types.js';\nimport { LlmError } from '../types.js';\n\nconst PROVIDER = 'deepseek';\nconst DEEPSEEK_BASE_URL = 'https://api.deepseek.com';\n\n/** Normalize OpenAI-format usage object to LlmUsage. */\nfunction normalizeUsage(usage: OpenAI.CompletionUsage | undefined | null): LlmUsage {\n const inputTokens = usage?.prompt_tokens ?? 0;\n const outputTokens = usage?.completion_tokens ?? 0;\n return {\n inputTokens,\n outputTokens,\n totalTokens: usage?.total_tokens ?? inputTokens + outputTokens,\n };\n}\n\n/** Convert LlmMessages to OpenAI-format chat message params (compatible with DeepSeek). */\nfunction buildMessages(messages: LlmMessage[]): OpenAI.Chat.ChatCompletionMessageParam[] {\n return messages.map((m) => ({\n role: m.role,\n content: m.content,\n }));\n}\n\n/**\n * Normalize any DeepSeek / OpenAI SDK error into LlmError.\n * Exported for direct unit testing of the normalization logic.\n *\n * Uses the same OpenAI SDK error hierarchy (APIConnectionError before APIError)\n * since the client is an OpenAI instance pointed at DeepSeek's API.\n */\nexport function normalizeDeepSeekError(err: unknown): LlmError {\n if (err instanceof LlmError) return err;\n\n // APIConnectionError is a subclass of APIError with status: undefined —\n // check it first so network failures are always retryable.\n if (typeof OpenAI.APIConnectionError === 'function' && err instanceof OpenAI.APIConnectionError) {\n return new LlmError({\n message: err.message,\n provider: PROVIDER,\n retryable: true,\n cause: err,\n });\n }\n\n // Catch all other APIError subclasses: RateLimitError (429), AuthenticationError (401), etc.\n if (typeof OpenAI.APIError === 'function' && err instanceof OpenAI.APIError) {\n const status: number | undefined = err.status;\n if (status !== undefined) {\n const retryable = [429, 502, 503, 504].includes(status) || status >= 500;\n return new LlmError({\n message: err.message,\n provider: PROVIDER,\n statusCode: status,\n retryable,\n cause: err,\n });\n }\n return new LlmError({ message: err.message, provider: PROVIDER, retryable: false, cause: err });\n }\n\n return normalizeThrownError(err, PROVIDER);\n}\n\n/** Create the DeepSeek provider implementation. */\nexport function createDeepSeekProvider(config: LlmClientConfig): LlmClient {\n // OpenAI SDK pointed at DeepSeek's OpenAI-compatible endpoint\n const client = new OpenAI({\n apiKey: config.apiKey,\n baseURL: DEEPSEEK_BASE_URL,\n timeout: config.timeoutMs ?? 30_000,\n maxRetries: 0, // Retries managed by withRetry\n });\n\n const retryOpts = {\n maxRetries: config.maxRetries ?? 3,\n baseDelayMs: config.baseDelayMs ?? 1_000,\n provider: PROVIDER,\n };\n\n async function complete(messages: LlmMessage[], options?: LlmCallOptions): Promise<LlmResponse> {\n const model = options?.model ?? config.model;\n const chatMessages = buildMessages(messages);\n const start = Date.now();\n\n return withRetry(async () => {\n try {\n const params: OpenAI.Chat.ChatCompletionCreateParamsNonStreaming = {\n model,\n messages: chatMessages,\n stream: false,\n };\n\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) params.max_tokens = maxTokens;\n\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) params.temperature = temperature;\n\n const response = await client.chat.completions.create(params);\n const content = response.choices.map((c) => c.message.content ?? '').join('');\n\n return {\n content,\n model: response.model,\n usage: normalizeUsage(response.usage),\n latencyMs: Date.now() - start,\n };\n } catch (err) {\n throw normalizeDeepSeekError(err);\n }\n }, retryOpts);\n }\n\n async function* stream(\n messages: LlmMessage[],\n options?: LlmCallOptions\n ): AsyncGenerator<LlmStreamChunk> {\n const model = options?.model ?? config.model;\n const chatMessages = buildMessages(messages);\n\n const params: OpenAI.Chat.ChatCompletionCreateParamsStreaming = {\n model,\n messages: chatMessages,\n stream: true,\n stream_options: { include_usage: true },\n };\n\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) params.max_tokens = maxTokens;\n\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) params.temperature = temperature;\n\n let sdkStream: Awaited<ReturnType<typeof client.chat.completions.create>>;\n\n try {\n sdkStream = await client.chat.completions.create(params);\n } catch (err) {\n throw normalizeDeepSeekError(err);\n }\n\n let finalUsage: LlmUsage | undefined;\n\n try {\n for await (const chunk of sdkStream) {\n const delta = chunk.choices[0]?.delta.content;\n if (delta !== undefined && delta !== null && delta.length > 0) {\n yield { token: delta };\n }\n\n // Usage arrives in the final chunk when stream_options.include_usage is true\n if (chunk.usage !== undefined && chunk.usage !== null) {\n finalUsage = normalizeUsage(chunk.usage);\n }\n }\n } catch (err) {\n throw normalizeDeepSeekError(err);\n }\n\n if (finalUsage !== undefined) {\n yield { token: '', usage: finalUsage };\n }\n }\n\n async function structured<T>(\n messages: LlmMessage[],\n schema: { parse: (data: unknown) => T },\n options?: LlmCallOptions\n ): Promise<LlmStructuredResponse<T>> {\n // Inject JSON-only system instruction. DeepSeek does not guarantee json_object\n // response_format support across all models, so we rely on prompt-level enforcement.\n const jsonSystemInstruction: LlmMessage = {\n role: 'system',\n content:\n 'You must respond with valid JSON only. No explanations, no markdown code fences, no extra text. Your entire response must be valid JSON that can be parsed with JSON.parse().',\n };\n\n const augmentedMessages = [jsonSystemInstruction, ...messages];\n const model = options?.model ?? config.model;\n const chatMessages = buildMessages(augmentedMessages);\n const start = Date.now();\n\n const rawResponse = await withRetry(async () => {\n try {\n const params: OpenAI.Chat.ChatCompletionCreateParamsNonStreaming = {\n model,\n messages: chatMessages,\n stream: false,\n };\n\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) params.max_tokens = maxTokens;\n\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) params.temperature = temperature;\n\n return await client.chat.completions.create(params);\n } catch (err) {\n throw normalizeDeepSeekError(err);\n }\n }, retryOpts);\n\n const rawContent = rawResponse.choices[0]?.message.content ?? '';\n\n let parsed: unknown;\n try {\n // Strip markdown fences if the model included them despite the instruction\n const cleaned = rawContent\n .replace(/^```(?:json)?\\s*/i, '')\n .replace(/\\s*```$/, '')\n .trim();\n parsed = JSON.parse(cleaned);\n } catch (err) {\n throw new LlmError({\n message: `DeepSeek structured output: response is not valid JSON. Raw: ${rawContent.slice(0, 200)}`,\n provider: PROVIDER,\n retryable: false,\n cause: err,\n });\n }\n\n let data: T;\n try {\n data = schema.parse(parsed);\n } catch (err) {\n throw new LlmError({\n message: `DeepSeek structured output: response failed schema validation. ${String(err)}`,\n provider: PROVIDER,\n retryable: false,\n cause: err,\n });\n }\n\n return {\n data,\n usage: normalizeUsage(rawResponse.usage),\n latencyMs: Date.now() - start,\n };\n }\n\n return {\n config,\n complete,\n stream,\n structured,\n };\n}\n","/**\n * Google Gemini provider for @diabolicallabs/llm-client.\n *\n * Uses the @google/genai SDK (v1.x — not the deprecated @google/generative-ai).\n *\n * Implements: complete(), stream(), structured()\n *\n * Token normalization:\n * Gemini: usageMetadata.promptTokenCount / candidatesTokenCount / totalTokenCount\n * → LlmUsage: inputTokens / outputTokens / totalTokens\n *\n * Error mapping:\n * ApiError (public SDK class, status: number always defined):\n * retryable for 429 / 5xx\n * non-retryable for 4xx (except 429)\n * Other errors → normalizeThrownError (handles ECONNRESET / ETIMEDOUT as retryable)\n *\n * API notes:\n * - System instructions are passed via config.systemInstruction (not mixed into contents)\n * - Role mapping: 'user' → 'user', 'assistant' → 'model'\n * - Streaming via ai.models.generateContentStream() returns AsyncGenerator<GenerateContentResponse>\n * - Text is accessed via response.text getter on GenerateContentResponse\n * - Structured output: responseMimeType: 'application/json' in GenerateContentConfig\n *\n * SDK error class note:\n * The @google/genai public API exports only ApiError (lowercase 'a'), which has status: number.\n * Internal APIError / APIConnectionError classes (uppercase) are NOT exported from the package\n * root and must not be imported from internal dist paths.\n * Network errors (ECONNRESET, ETIMEDOUT) arrive as plain Error objects caught by normalizeThrownError.\n */\n\nimport {\n ApiError,\n type Content,\n type GenerateContentConfig,\n type GenerateContentResponse,\n type GenerateContentResponseUsageMetadata,\n GoogleGenAI,\n} from '@google/genai';\nimport { normalizeThrownError, withRetry } from '../retry.js';\nimport type {\n LlmCallOptions,\n LlmClient,\n LlmClientConfig,\n LlmMessage,\n LlmResponse,\n LlmStreamChunk,\n LlmStructuredResponse,\n LlmUsage,\n} from '../types.js';\nimport { LlmError } from '../types.js';\n\nconst PROVIDER = 'gemini';\n\n/** Normalize Gemini's usageMetadata to LlmUsage. */\nfunction normalizeUsage(meta: GenerateContentResponseUsageMetadata | undefined): LlmUsage {\n const inputTokens = meta?.promptTokenCount ?? 0;\n const outputTokens = meta?.candidatesTokenCount ?? 0;\n return {\n inputTokens,\n outputTokens,\n totalTokens: meta?.totalTokenCount ?? inputTokens + outputTokens,\n };\n}\n\n/**\n * Convert LlmMessages to Gemini's Content array format.\n * Extracts system message — Gemini treats system instructions separately from contents.\n * Role mapping: 'user' → 'user', 'assistant' → 'model' (Gemini API requires 'model').\n */\nfunction buildGeminiContents(messages: LlmMessage[]): {\n system: string | undefined;\n contents: Content[];\n} {\n const systemMessages = messages.filter((m) => m.role === 'system');\n const conversationMessages = messages.filter((m) => m.role !== 'system');\n\n const system =\n systemMessages.length > 0 ? systemMessages.map((m) => m.content).join('\\n') : undefined;\n\n const contents: Content[] = conversationMessages.map((m) => ({\n role: m.role === 'assistant' ? 'model' : 'user',\n parts: [{ text: m.content }],\n }));\n\n return { system, contents };\n}\n\n/**\n * Normalize any Gemini SDK error into LlmError.\n * Exported for direct unit testing of the normalization logic.\n *\n * ApiError (public SDK class) always has status: number, so there is no undefined-status branch.\n * Network errors (no HTTP status) arrive as plain Error objects; normalizeThrownError\n * handles retryable error codes (ECONNRESET, ETIMEDOUT, etc.).\n */\nexport function normalizeGeminiError(err: unknown): LlmError {\n if (err instanceof LlmError) return err;\n\n // ApiError is the only publicly-exported SDK error class.\n // status is always number (not undefined) per the ApiError type definition.\n if (err instanceof ApiError) {\n const retryable = err.status === 429 || err.status >= 500;\n return new LlmError({\n message: err.message,\n provider: PROVIDER,\n statusCode: err.status,\n retryable,\n cause: err,\n });\n }\n\n // Network errors (ECONNRESET, ETIMEDOUT, etc.) arrive as plain Error objects.\n // normalizeThrownError classifies retryable codes and handles the unknown-error case.\n return normalizeThrownError(err, PROVIDER);\n}\n\n/** Create the Gemini provider implementation. */\nexport function createGeminiProvider(config: LlmClientConfig): LlmClient {\n const ai = new GoogleGenAI({\n apiKey: config.apiKey,\n httpOptions: {\n timeout: config.timeoutMs ?? 30_000,\n },\n });\n\n const retryOpts = {\n maxRetries: config.maxRetries ?? 3,\n baseDelayMs: config.baseDelayMs ?? 1_000,\n provider: PROVIDER,\n };\n\n async function complete(messages: LlmMessage[], options?: LlmCallOptions): Promise<LlmResponse> {\n const model = options?.model ?? config.model;\n const { system, contents } = buildGeminiContents(messages);\n const start = Date.now();\n\n return withRetry(async () => {\n try {\n // Build config object — always passed (empty object is valid GenerateContentConfig)\n const geminiConfig: GenerateContentConfig = {};\n\n if (system !== undefined) geminiConfig.systemInstruction = system;\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) geminiConfig.maxOutputTokens = maxTokens;\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) geminiConfig.temperature = temperature;\n\n const response = await ai.models.generateContent({\n model,\n contents,\n config: geminiConfig,\n });\n\n return {\n content: response.text ?? '',\n model,\n usage: normalizeUsage(response.usageMetadata),\n latencyMs: Date.now() - start,\n };\n } catch (err) {\n throw normalizeGeminiError(err);\n }\n }, retryOpts);\n }\n\n async function* stream(\n messages: LlmMessage[],\n options?: LlmCallOptions\n ): AsyncGenerator<LlmStreamChunk> {\n const model = options?.model ?? config.model;\n const { system, contents } = buildGeminiContents(messages);\n\n // Build config — always passed (empty object is valid GenerateContentConfig)\n const geminiConfig: GenerateContentConfig = {};\n if (system !== undefined) geminiConfig.systemInstruction = system;\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) geminiConfig.maxOutputTokens = maxTokens;\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) geminiConfig.temperature = temperature;\n\n let sdkStream: AsyncGenerator<GenerateContentResponse>;\n\n try {\n sdkStream = await ai.models.generateContentStream({\n model,\n contents,\n config: geminiConfig,\n });\n } catch (err) {\n throw normalizeGeminiError(err);\n }\n\n let finalUsage: LlmUsage | undefined;\n\n try {\n for await (const chunk of sdkStream) {\n const text = chunk.text;\n if (text !== undefined && text.length > 0) {\n yield { token: text };\n }\n // Capture usage from each chunk — the final chunk has the complete totals\n if (chunk.usageMetadata !== undefined) {\n finalUsage = normalizeUsage(chunk.usageMetadata);\n }\n }\n } catch (err) {\n throw normalizeGeminiError(err);\n }\n\n if (finalUsage !== undefined) {\n yield { token: '', usage: finalUsage };\n }\n }\n\n async function structured<T>(\n messages: LlmMessage[],\n schema: { parse: (data: unknown) => T },\n options?: LlmCallOptions\n ): Promise<LlmStructuredResponse<T>> {\n const augmentedMessages: LlmMessage[] = [\n {\n role: 'system',\n content:\n 'You must respond with valid JSON only. No explanations, no markdown code fences, no extra text. Your entire response must be valid JSON that can be parsed with JSON.parse().',\n },\n ...messages,\n ];\n\n const model = options?.model ?? config.model;\n const { system, contents } = buildGeminiContents(augmentedMessages);\n const start = Date.now();\n\n const rawResponse = await withRetry(async () => {\n try {\n const geminiConfig: GenerateContentConfig = {\n // Instruct Gemini to return JSON directly\n responseMimeType: 'application/json',\n };\n\n if (system !== undefined) geminiConfig.systemInstruction = system;\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) geminiConfig.maxOutputTokens = maxTokens;\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) geminiConfig.temperature = temperature;\n\n return await ai.models.generateContent({\n model,\n contents,\n config: geminiConfig,\n });\n } catch (err) {\n throw normalizeGeminiError(err);\n }\n }, retryOpts);\n\n const rawContent = rawResponse.text ?? '';\n\n let parsed: unknown;\n try {\n // Strip markdown code fences if the model included them despite the instruction\n const cleaned = rawContent\n .replace(/^```(?:json)?\\s*/i, '')\n .replace(/\\s*```$/, '')\n .trim();\n parsed = JSON.parse(cleaned);\n } catch (err) {\n throw new LlmError({\n message: `Gemini structured output: response is not valid JSON. Raw: ${rawContent.slice(0, 200)}`,\n provider: PROVIDER,\n retryable: false,\n cause: err,\n });\n }\n\n let data: T;\n try {\n data = schema.parse(parsed);\n } catch (err) {\n throw new LlmError({\n message: `Gemini structured output: response failed schema validation. ${String(err)}`,\n provider: PROVIDER,\n retryable: false,\n cause: err,\n });\n }\n\n return {\n data,\n usage: normalizeUsage(rawResponse.usageMetadata),\n latencyMs: Date.now() - start,\n };\n }\n\n return {\n config,\n complete,\n stream,\n structured,\n };\n}\n","/**\n * OpenAI provider for @diabolicallabs/llm-client.\n *\n * Implements: complete(), stream(), structured()\n *\n * Token normalization:\n * OpenAI: prompt_tokens / completion_tokens\n * → LlmUsage: inputTokens / outputTokens / totalTokens\n *\n * Error mapping:\n * APIStatusError.status → LlmError.statusCode + retryable flag\n * APIConnectionError → retryable: true\n *\n * Structured output uses OpenAI's response_format: { type: 'json_object' }.\n * For strict schema enforcement, the schema is described in the system prompt.\n */\n\nimport OpenAI from 'openai';\nimport { normalizeThrownError, withRetry } from '../retry.js';\nimport type {\n LlmCallOptions,\n LlmClient,\n LlmClientConfig,\n LlmMessage,\n LlmResponse,\n LlmStreamChunk,\n LlmStructuredResponse,\n LlmUsage,\n} from '../types.js';\nimport { LlmError } from '../types.js';\n\nconst PROVIDER = 'openai';\n\n/** Normalize OpenAI's usage object to LlmUsage. */\nfunction normalizeUsage(usage: OpenAI.CompletionUsage | undefined | null): LlmUsage {\n const inputTokens = usage?.prompt_tokens ?? 0;\n const outputTokens = usage?.completion_tokens ?? 0;\n return {\n inputTokens,\n outputTokens,\n totalTokens: usage?.total_tokens ?? inputTokens + outputTokens,\n };\n}\n\n/** Convert LlmMessages to OpenAI's chat message format. */\nfunction buildOpenAIMessages(messages: LlmMessage[]): OpenAI.Chat.ChatCompletionMessageParam[] {\n return messages.map((m) => ({\n role: m.role,\n content: m.content,\n }));\n}\n\n/**\n * Normalize any OpenAI SDK error into LlmError.\n * Exported for direct unit testing of the normalization logic.\n */\nexport function normalizeOpenAIError(err: unknown): LlmError {\n if (err instanceof LlmError) return err;\n\n // OpenAI SDK v6+: uses OpenAI.APIError as the base class with a `.status` field.\n // APIConnectionError is a subclass of APIError with status: undefined — check it first\n // so network failures are always retryable regardless of the missing status code.\n if (typeof OpenAI.APIConnectionError === 'function' && err instanceof OpenAI.APIConnectionError) {\n return new LlmError({\n message: err.message,\n provider: PROVIDER,\n retryable: true,\n cause: err,\n });\n }\n\n // Catch all other APIError subclasses: RateLimitError (429), AuthenticationError (401),\n // InternalServerError (500), etc. Retryability is determined by HTTP status code.\n if (typeof OpenAI.APIError === 'function' && err instanceof OpenAI.APIError) {\n const status: number | undefined = err.status;\n if (status !== undefined) {\n const retryable = [429, 502, 503, 504].includes(status) || status >= 500;\n return new LlmError({\n message: err.message,\n provider: PROVIDER,\n statusCode: status,\n retryable,\n cause: err,\n });\n }\n return new LlmError({ message: err.message, provider: PROVIDER, retryable: false, cause: err });\n }\n\n return normalizeThrownError(err, PROVIDER);\n}\n\n/** Create the OpenAI provider implementation. */\nexport function createOpenAIProvider(config: LlmClientConfig): LlmClient {\n const client = new OpenAI({\n apiKey: config.apiKey,\n timeout: config.timeoutMs ?? 30_000,\n maxRetries: 0, // We manage retries ourselves via withRetry\n });\n\n const retryOpts = {\n maxRetries: config.maxRetries ?? 3,\n baseDelayMs: config.baseDelayMs ?? 1_000,\n provider: PROVIDER,\n };\n\n async function complete(messages: LlmMessage[], options?: LlmCallOptions): Promise<LlmResponse> {\n const model = options?.model ?? config.model;\n const openAIMessages = buildOpenAIMessages(messages);\n const start = Date.now();\n\n return withRetry(async () => {\n try {\n const params: OpenAI.Chat.ChatCompletionCreateParamsNonStreaming = {\n model,\n messages: openAIMessages,\n stream: false,\n };\n\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) params.max_tokens = maxTokens;\n\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) params.temperature = temperature;\n\n const response = await client.chat.completions.create(params);\n\n const content = response.choices.map((c) => c.message.content ?? '').join('');\n\n return {\n content,\n model: response.model,\n usage: normalizeUsage(response.usage),\n latencyMs: Date.now() - start,\n };\n } catch (err) {\n throw normalizeOpenAIError(err);\n }\n }, retryOpts);\n }\n\n async function* stream(\n messages: LlmMessage[],\n options?: LlmCallOptions\n ): AsyncGenerator<LlmStreamChunk> {\n const model = options?.model ?? config.model;\n const openAIMessages = buildOpenAIMessages(messages);\n\n const params: OpenAI.Chat.ChatCompletionCreateParamsStreaming = {\n model,\n messages: openAIMessages,\n stream: true,\n stream_options: { include_usage: true },\n };\n\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) params.max_tokens = maxTokens;\n\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) params.temperature = temperature;\n\n let sdkStream: Awaited<ReturnType<typeof client.chat.completions.create>>;\n\n try {\n sdkStream = await client.chat.completions.create(params);\n } catch (err) {\n throw normalizeOpenAIError(err);\n }\n\n let finalUsage: LlmUsage | undefined;\n\n try {\n for await (const chunk of sdkStream) {\n // Token chunks arrive in choices[0].delta.content\n const delta = chunk.choices[0]?.delta.content;\n if (delta !== undefined && delta !== null && delta.length > 0) {\n yield { token: delta };\n }\n\n // Usage arrives in the final chunk (stream_options.include_usage must be true)\n if (chunk.usage !== undefined && chunk.usage !== null) {\n finalUsage = normalizeUsage(chunk.usage);\n }\n }\n } catch (err) {\n throw normalizeOpenAIError(err);\n }\n\n // Yield usage on the final sentinel chunk\n if (finalUsage !== undefined) {\n yield { token: '', usage: finalUsage };\n }\n }\n\n async function structured<T>(\n messages: LlmMessage[],\n schema: { parse: (data: unknown) => T },\n options?: LlmCallOptions\n ): Promise<LlmStructuredResponse<T>> {\n // OpenAI JSON mode: response_format: { type: 'json_object' }\n // The system prompt must instruct the model to output JSON — OpenAI requires this.\n const jsonSystemInstruction: LlmMessage = {\n role: 'system',\n content:\n 'You must respond with valid JSON only. No explanations, no markdown code fences, no extra text. Your entire response must be valid JSON that can be parsed with JSON.parse().',\n };\n\n const augmentedMessages = [jsonSystemInstruction, ...messages];\n const model = options?.model ?? config.model;\n const openAIMessages = buildOpenAIMessages(augmentedMessages);\n const start = Date.now();\n\n const rawResponse = await withRetry(async () => {\n try {\n const params: OpenAI.Chat.ChatCompletionCreateParamsNonStreaming = {\n model,\n messages: openAIMessages,\n stream: false,\n response_format: { type: 'json_object' },\n };\n\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) params.max_tokens = maxTokens;\n\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) params.temperature = temperature;\n\n return await client.chat.completions.create(params);\n } catch (err) {\n throw normalizeOpenAIError(err);\n }\n }, retryOpts);\n\n const rawContent = rawResponse.choices[0]?.message.content ?? '';\n\n let parsed: unknown;\n try {\n parsed = JSON.parse(rawContent);\n } catch (err) {\n throw new LlmError({\n message: `OpenAI structured output: response is not valid JSON. Raw: ${rawContent.slice(0, 200)}`,\n provider: PROVIDER,\n retryable: false,\n cause: err,\n });\n }\n\n let data: T;\n try {\n data = schema.parse(parsed);\n } catch (err) {\n throw new LlmError({\n message: `OpenAI structured output: response failed schema validation. ${String(err)}`,\n provider: PROVIDER,\n retryable: false,\n cause: err,\n });\n }\n\n return {\n data,\n usage: normalizeUsage(rawResponse.usage),\n latencyMs: Date.now() - start,\n };\n }\n\n return {\n config,\n complete,\n stream,\n structured,\n };\n}\n","/**\n * Perplexity provider for @diabolicallabs/llm-client.\n *\n * Perplexity's chat completions API is OpenAI-compatible, so this provider\n * uses the OpenAI SDK pointed at Perplexity's base URL — same pattern as DeepSeek.\n *\n * API base URL: https://api.perplexity.ai\n * Docs: https://docs.perplexity.ai\n *\n * Implements: complete(), stream(), structured()\n *\n * Key Perplexity behaviors:\n * - Responses include a `citations` field: string[] of source URLs.\n * We map each URL to { url: string } and deduplicate by URL before returning.\n * - Citations are only available on non-streaming responses. The streaming API\n * does not include citations in individual chunks; consumers needing citations\n * must use complete(), not stream().\n * - Default model: 'sonar' — the lightweight search model (sonar-reasoning was\n * deprecated Dec 2025; sonar-reasoning-pro is its replacement).\n *\n * Model notes (confirmed against live API 2026-05-08):\n * - sonar — lightweight search, web-grounded\n * - sonar-pro — advanced search, more citations\n * - sonar-reasoning-pro — chain-of-thought reasoning (sonar-reasoning deprecated)\n * - sonar-deep-research — exhaustive research; supports async jobs. Perplexity's\n * docs note this model \"supports asynchronous jobs\" which\n * may mean a different response shape. We treat it as a\n * standard synchronous model; if the API returns an\n * incompatible shape, complete() will throw a clear LlmError\n * directing users to sonar-reasoning-pro or the async API.\n *\n * providerOptions (Wave 2 escape hatch):\n * The Perplexity API supports search-specific parameters not present on other providers.\n * Pass them via options.providerOptions:\n * search_recency_filter: 'month' | 'week' | 'day' | 'hour'\n * search_domain_filter: string[] — allowlist of domains to source from\n * Unknown fields are passed through unchanged to support future Perplexity API additions.\n *\n * structured() strategy:\n * Perplexity's response_format handling has limitations (especially on reasoning models\n * where reasoning tokens appear before JSON output). We use system-prompt JSON instruction\n * (same as DeepSeek) and strip both <think>...</think> reasoning blocks (sonar-reasoning-pro)\n * and markdown fences before JSON.parse().\n *\n * Token normalization:\n * Perplexity returns standard OpenAI-format usage: prompt_tokens / completion_tokens / total_tokens\n * → LlmUsage: inputTokens / outputTokens / totalTokens\n *\n * Error mapping:\n * APIConnectionError → retryable: true\n * APIError with status 429 / 5xx → retryable: true\n * Other APIErrors → non-retryable\n */\n\nimport OpenAI from 'openai';\nimport { normalizeThrownError, withRetry } from '../retry.js';\nimport type {\n LlmCallOptions,\n LlmClient,\n LlmClientConfig,\n LlmMessage,\n LlmResponse,\n LlmStreamChunk,\n LlmStructuredResponse,\n LlmUsage,\n} from '../types.js';\nimport { LlmError } from '../types.js';\n\nconst PROVIDER = 'perplexity';\nconst PERPLEXITY_BASE_URL = 'https://api.perplexity.ai';\n\n/**\n * Perplexity-specific fields that may appear on the OpenAI-compatible response object.\n * The SDK types don't include these; we cast and extract them safely.\n */\ninterface PerplexityResponseExtensions {\n citations?: string[];\n}\n\n/** Normalize OpenAI-format usage object to LlmUsage. */\nfunction normalizeUsage(usage: OpenAI.CompletionUsage | undefined | null): LlmUsage {\n const inputTokens = usage?.prompt_tokens ?? 0;\n const outputTokens = usage?.completion_tokens ?? 0;\n return {\n inputTokens,\n outputTokens,\n totalTokens: usage?.total_tokens ?? inputTokens + outputTokens,\n };\n}\n\n/** Convert LlmMessages to OpenAI-format chat message params. */\nfunction buildMessages(messages: LlmMessage[]): OpenAI.Chat.ChatCompletionMessageParam[] {\n return messages.map((m) => ({\n role: m.role,\n content: m.content,\n }));\n}\n\n/**\n * Extract and deduplicate citations from a Perplexity response.\n *\n * Perplexity returns citations as string[] of URLs on the response object\n * (not in the OpenAI SDK types — accessed via cast). Deduplication is by URL.\n * Returns undefined if no citations are present or the array is empty.\n */\nfunction extractCitations(\n response: OpenAI.Chat.ChatCompletion & PerplexityResponseExtensions\n): LlmResponse['citations'] {\n const rawCitations = response.citations;\n if (rawCitations === undefined || rawCitations.length === 0) return undefined;\n\n const seen = new Set<string>();\n const deduped: Array<{ url: string; title?: string }> = [];\n\n for (const url of rawCitations) {\n if (!seen.has(url)) {\n seen.add(url);\n deduped.push({ url });\n }\n }\n\n return deduped.length > 0 ? deduped : undefined;\n}\n\n/**\n * Extract known Perplexity search filter fields from providerOptions.\n * Unknown fields are passed through to the API params unchanged.\n *\n * Known fields at time of implementation (2026-05-08):\n * search_recency_filter: 'month' | 'week' | 'day' | 'hour'\n * search_domain_filter: string[]\n */\nfunction extractProviderOptions(\n providerOptions: Record<string, unknown> | undefined\n): Record<string, unknown> {\n if (providerOptions === undefined) return {};\n // Pass all fields through — Perplexity may add new filters; unknown fields\n // are forwarded unchanged so consumers don't need a toolkit update to use them.\n return { ...providerOptions };\n}\n\n/**\n * Normalize any Perplexity / OpenAI SDK error into LlmError.\n * Exported for direct unit testing of the normalization logic.\n *\n * Uses the same OpenAI SDK error hierarchy since the client is an OpenAI\n * instance pointed at Perplexity's API.\n */\nexport function normalizePerplexityError(err: unknown): LlmError {\n if (err instanceof LlmError) return err;\n\n // APIConnectionError is a subclass of APIError with status: undefined —\n // check it first so network failures are always retryable.\n if (typeof OpenAI.APIConnectionError === 'function' && err instanceof OpenAI.APIConnectionError) {\n return new LlmError({\n message: err.message,\n provider: PROVIDER,\n retryable: true,\n cause: err,\n });\n }\n\n // Catch all other APIError subclasses: RateLimitError (429), AuthenticationError (401), etc.\n if (typeof OpenAI.APIError === 'function' && err instanceof OpenAI.APIError) {\n const status: number | undefined = err.status;\n if (status !== undefined) {\n const retryable = [429, 502, 503, 504].includes(status) || status >= 500;\n return new LlmError({\n message: err.message,\n provider: PROVIDER,\n statusCode: status,\n retryable,\n cause: err,\n });\n }\n return new LlmError({ message: err.message, provider: PROVIDER, retryable: false, cause: err });\n }\n\n return normalizeThrownError(err, PROVIDER);\n}\n\n/** Create the Perplexity provider implementation. */\nexport function createPerplexityProvider(config: LlmClientConfig): LlmClient {\n // OpenAI SDK pointed at Perplexity's OpenAI-compatible endpoint\n const client = new OpenAI({\n apiKey: config.apiKey,\n baseURL: PERPLEXITY_BASE_URL,\n timeout: config.timeoutMs ?? 30_000,\n maxRetries: 0, // Retries managed by withRetry\n });\n\n const retryOpts = {\n maxRetries: config.maxRetries ?? 3,\n baseDelayMs: config.baseDelayMs ?? 1_000,\n provider: PROVIDER,\n };\n\n async function complete(messages: LlmMessage[], options?: LlmCallOptions): Promise<LlmResponse> {\n const model = options?.model ?? config.model;\n const chatMessages = buildMessages(messages);\n const start = Date.now();\n const extraParams = extractProviderOptions(options?.providerOptions);\n\n return withRetry(async () => {\n try {\n const params: OpenAI.Chat.ChatCompletionCreateParamsNonStreaming & Record<string, unknown> =\n {\n model,\n messages: chatMessages,\n stream: false,\n ...extraParams,\n };\n\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) params.max_tokens = maxTokens;\n\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) params.temperature = temperature;\n\n const rawResponse = await client.chat.completions.create(\n params as OpenAI.Chat.ChatCompletionCreateParamsNonStreaming\n );\n\n // Cast to access Perplexity-specific extensions not present in OpenAI SDK types\n const response = rawResponse as OpenAI.Chat.ChatCompletion & PerplexityResponseExtensions;\n\n const content = response.choices.map((c) => c.message.content ?? '').join('');\n\n const result: LlmResponse = {\n content,\n model: response.model,\n usage: normalizeUsage(response.usage),\n latencyMs: Date.now() - start,\n };\n\n const citations = extractCitations(response);\n if (citations !== undefined) result.citations = citations;\n\n return result;\n } catch (err) {\n throw normalizePerplexityError(err);\n }\n }, retryOpts);\n }\n\n async function* stream(\n messages: LlmMessage[],\n options?: LlmCallOptions\n ): AsyncGenerator<LlmStreamChunk> {\n const model = options?.model ?? config.model;\n const chatMessages = buildMessages(messages);\n const extraParams = extractProviderOptions(options?.providerOptions);\n\n const params: OpenAI.Chat.ChatCompletionCreateParamsStreaming & Record<string, unknown> = {\n model,\n messages: chatMessages,\n stream: true,\n stream_options: { include_usage: true },\n ...extraParams,\n };\n\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) params.max_tokens = maxTokens;\n\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) params.temperature = temperature;\n\n let sdkStream: Awaited<ReturnType<typeof client.chat.completions.create>>;\n\n try {\n sdkStream = await client.chat.completions.create(\n params as OpenAI.Chat.ChatCompletionCreateParamsStreaming\n );\n } catch (err) {\n throw normalizePerplexityError(err);\n }\n\n let finalUsage: LlmUsage | undefined;\n\n try {\n for await (const chunk of sdkStream) {\n const delta = chunk.choices[0]?.delta.content;\n if (delta !== undefined && delta !== null && delta.length > 0) {\n yield { token: delta };\n }\n\n // Usage arrives in the final chunk when stream_options.include_usage is true\n if (chunk.usage !== undefined && chunk.usage !== null) {\n finalUsage = normalizeUsage(chunk.usage);\n }\n }\n } catch (err) {\n throw normalizePerplexityError(err);\n }\n\n // Note: citations are NOT available in streaming mode. Perplexity's streaming\n // API does not include citations in the chunk stream. Use complete() if citations\n // are required for your use case.\n if (finalUsage !== undefined) {\n yield { token: '', usage: finalUsage };\n }\n }\n\n async function structured<T>(\n messages: LlmMessage[],\n schema: { parse: (data: unknown) => T },\n options?: LlmCallOptions\n ): Promise<LlmStructuredResponse<T>> {\n // Perplexity's response_format has limitations with reasoning models (reasoning tokens\n // appear before JSON output). Use system-prompt JSON instruction + fence stripping,\n // same as DeepSeek.\n const jsonSystemInstruction: LlmMessage = {\n role: 'system',\n content:\n 'You must respond with valid JSON only. No explanations, no markdown code fences, no extra text. Your entire response must be valid JSON that can be parsed with JSON.parse().',\n };\n\n const augmentedMessages = [jsonSystemInstruction, ...messages];\n const model = options?.model ?? config.model;\n const chatMessages = buildMessages(augmentedMessages);\n const start = Date.now();\n const extraParams = extractProviderOptions(options?.providerOptions);\n\n const rawResponse = await withRetry(async () => {\n try {\n const params: OpenAI.Chat.ChatCompletionCreateParamsNonStreaming & Record<string, unknown> =\n {\n model,\n messages: chatMessages,\n stream: false,\n ...extraParams,\n };\n\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) params.max_tokens = maxTokens;\n\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) params.temperature = temperature;\n\n return await client.chat.completions.create(\n params as OpenAI.Chat.ChatCompletionCreateParamsNonStreaming\n );\n } catch (err) {\n throw normalizePerplexityError(err);\n }\n }, retryOpts);\n\n const rawContent = rawResponse.choices[0]?.message.content ?? '';\n\n let parsed: unknown;\n try {\n // sonar-reasoning-pro emits reasoning tokens inside <think>...</think> before the JSON.\n // Strip them first, then strip any markdown fences.\n const cleaned = rawContent\n .replace(/<think>[\\s\\S]*?<\\/think>/i, '') // strip reasoning block (sonar-reasoning-pro)\n .replace(/^```(?:json)?\\s*/i, '') // strip opening fence\n .replace(/\\s*```$/, '') // strip closing fence\n .trim();\n parsed = JSON.parse(cleaned);\n } catch (err) {\n throw new LlmError({\n message: `Perplexity structured output: response is not valid JSON. Raw: ${rawContent.slice(0, 200)}`,\n provider: PROVIDER,\n retryable: false,\n cause: err,\n });\n }\n\n let data: T;\n try {\n data = schema.parse(parsed);\n } catch (err) {\n throw new LlmError({\n message: `Perplexity structured output: response failed schema validation. ${String(err)}`,\n provider: PROVIDER,\n retryable: false,\n cause: err,\n });\n }\n\n return {\n data,\n usage: normalizeUsage(rawResponse.usage),\n latencyMs: Date.now() - start,\n };\n }\n\n return {\n config,\n complete,\n stream,\n structured,\n };\n}\n","/**\n * Factory functions for LlmClient.\n *\n * createClient — dispatches to the correct provider implementation.\n * createClientFromEnv — convenience wrapper that reads API keys from env vars.\n *\n * Provider dispatch:\n * 'anthropic' → fully implemented (Week 2)\n * 'openai' → fully implemented (Week 2)\n * 'gemini' → fully implemented (Week 3)\n * 'deepseek' → fully implemented (Week 3)\n * 'perplexity' → fully implemented (Week 5) — search-grounded, citations, providerOptions\n */\n\nimport { createAnthropicProvider } from './providers/anthropic.js';\nimport { createDeepSeekProvider } from './providers/deepseek.js';\nimport { createGeminiProvider } from './providers/gemini.js';\nimport { createOpenAIProvider } from './providers/openai.js';\nimport { createPerplexityProvider } from './providers/perplexity.js';\nimport type { LlmClient, LlmClientConfig } from './types.js';\nimport { LlmError } from './types.js';\n\n/**\n * Create an LlmClient for the given provider and config.\n * Dispatches to the provider-specific implementation.\n * All five providers are fully implemented.\n */\nexport function createClient(config: LlmClientConfig): LlmClient {\n switch (config.provider) {\n case 'anthropic':\n return createAnthropicProvider(config);\n\n case 'openai':\n return createOpenAIProvider(config);\n\n case 'gemini':\n return createGeminiProvider(config);\n\n case 'deepseek':\n return createDeepSeekProvider(config);\n\n case 'perplexity':\n return createPerplexityProvider(config);\n\n default: {\n // TypeScript exhaustiveness check — if a new provider is added to the union\n // without a case here, this will be a compile-time error.\n const _exhaustive: never = config.provider;\n throw new LlmError({\n message: `[dlabs-toolkit] Unknown provider: ${String(_exhaustive)}`,\n provider: String(_exhaustive),\n retryable: false,\n });\n }\n }\n}\n\n/**\n * Convenience: create an LlmClient from environment variables.\n *\n * Reads API keys from the environment based on provider:\n * anthropic → ANTHROPIC_API_KEY\n * openai → OPENAI_API_KEY\n * gemini → GOOGLE_AI_API_KEY\n * deepseek → DEEPSEEK_API_KEY\n * perplexity → PERPLEXITY_API_KEY — recommended default model: 'sonar'\n *\n * Throws LlmError if the required env var is not set.\n */\nexport function createClientFromEnv(\n provider: LlmClientConfig['provider'],\n model: string,\n overrides?: Partial<Omit<LlmClientConfig, 'provider' | 'model' | 'apiKey'>>\n): LlmClient {\n const apiKey = resolveApiKey(provider);\n return createClient({ provider, model, apiKey, ...overrides });\n}\n\n/** Read the API key for a given provider from environment variables. */\nfunction resolveApiKey(provider: LlmClientConfig['provider']): string {\n const envVarMap: Record<LlmClientConfig['provider'], string> = {\n anthropic: 'ANTHROPIC_API_KEY',\n openai: 'OPENAI_API_KEY',\n gemini: 'GOOGLE_AI_API_KEY',\n deepseek: 'DEEPSEEK_API_KEY',\n perplexity: 'PERPLEXITY_API_KEY',\n };\n\n const envVar = envVarMap[provider];\n const apiKey = process.env[envVar];\n\n if (apiKey === undefined || apiKey.trim() === '') {\n throw new LlmError({\n message: `[dlabs-toolkit] ${envVar} is not set. Set this environment variable to use the ${provider} provider.`,\n provider,\n retryable: false,\n });\n }\n\n return apiKey;\n}\n"],"mappings":";AAcA,OAAO,eAAe;;;AC8Df,IAAM,WAAN,cAAuB,MAAM;AAAA,EAChB,OAAO;AAAA,EAChB;AAAA,EACA;AAAA,EACA;AAAA;AAAA;AAAA,EAGS;AAAA,EAElB,YAAY,MAMT;AACD,UAAM,KAAK,SAAS,EAAE,OAAO,KAAK,MAAM,CAAC;AACzC,SAAK,WAAW,KAAK;AACrB,SAAK,aAAa,KAAK;AACvB,SAAK,YAAY,KAAK;AACtB,SAAK,QAAQ,KAAK;AAAA,EACpB;AACF;;;ACrFA,IAAM,0BAA0B,oBAAI,IAAI,CAAC,KAAK,KAAK,KAAK,GAAG,CAAC;AAG5D,IAAM,wBAAwB,oBAAI,IAAI,CAAC,cAAc,aAAa,cAAc,CAAC;AAGjF,IAAM,8BAA8B,oBAAI,IAAI,CAAC,KAAK,KAAK,KAAK,GAAG,CAAC;AAGzD,SAAS,kBAAkB,YAA6B;AAC7D,MAAI,wBAAwB,IAAI,UAAU,EAAG,QAAO;AACpD,MAAI,4BAA4B,IAAI,UAAU,EAAG,QAAO;AAExD,SAAO,cAAc;AACvB;AAGO,SAAS,qBAAqB,MAAuB;AAC1D,SAAO,sBAAsB,IAAI,IAAI;AACvC;AAGO,SAAS,iBAAiB,SAAiB,aAA6B;AAC7E,QAAM,UAAU,cAAc,KAAK;AACnC,SAAO,KAAK,OAAO,IAAI;AACzB;AAcA,eAAsB,UACpB,IACA,MACY;AACZ,MAAI;AAEJ,WAAS,UAAU,GAAG,WAAW,KAAK,YAAY,WAAW;AAC3D,QAAI;AACF,aAAO,MAAM,GAAG,OAAO;AAAA,IACzB,SAAS,KAAK;AACZ,YAAM,SAAS,qBAAqB,KAAK,KAAK,QAAQ;AAEtD,UAAI,CAAC,OAAO,aAAa,YAAY,KAAK,YAAY;AACpD,cAAM;AAAA,MACR;AAEA,kBAAY;AACZ,YAAM,UAAU,iBAAiB,SAAS,KAAK,WAAW;AAC1D,YAAM,MAAM,OAAO;AAAA,IACrB;AAAA,EACF;AAIA,QACE,aACA,IAAI,SAAS;AAAA,IACX,SAAS;AAAA,IACT,UAAU,KAAK;AAAA,IACf,WAAW;AAAA,EACb,CAAC;AAEL;AAGO,SAAS,qBAAqB,KAAc,UAA4B;AAC7E,MAAI,eAAe,SAAU,QAAO;AAEpC,MAAI,eAAe,OAAO;AACxB,UAAM,cAAc;AAEpB,UAAM,aAAa,YAAY,UAAU,YAAY;AAGrD,QAAI,YAAY,SAAS,UAAa,qBAAqB,YAAY,IAAI,GAAG;AAC5E,UAAI,eAAe,QAAW;AAC5B,eAAO,IAAI,SAAS;AAAA,UAClB,SAAS,IAAI;AAAA,UACb;AAAA,UACA;AAAA,UACA,WAAW;AAAA,UACX,OAAO;AAAA,QACT,CAAC;AAAA,MACH;AACA,aAAO,IAAI,SAAS,EAAE,SAAS,IAAI,SAAS,UAAU,WAAW,MAAM,OAAO,IAAI,CAAC;AAAA,IACrF;AAGA,QAAI,eAAe,QAAW;AAC5B,aAAO,IAAI,SAAS;AAAA,QAClB,SAAS,IAAI;AAAA,QACb;AAAA,QACA;AAAA,QACA,WAAW,kBAAkB,UAAU;AAAA,QACvC,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,WAAO,IAAI,SAAS;AAAA,MAClB,SAAS,IAAI;AAAA,MACb;AAAA,MACA,WAAW;AAAA,MACX,OAAO;AAAA,IACT,CAAC;AAAA,EACH;AAEA,SAAO,IAAI,SAAS;AAAA,IAClB,SAAS,OAAO,GAAG;AAAA,IACnB;AAAA,IACA,WAAW;AAAA,IACX,OAAO;AAAA,EACT,CAAC;AACH;AAEA,SAAS,MAAM,IAA2B;AACxC,SAAO,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,EAAE,CAAC;AACzD;;;AF9GA,IAAM,WAAW;AAGjB,SAAS,eAAe,OAA8C;AACpE,QAAM,cAAc,OAAO,gBAAgB;AAC3C,QAAM,eAAe,OAAO,iBAAiB;AAC7C,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA,aAAa,cAAc;AAAA;AAAA;AAAA,IAG3B,qBAAsB,OAClB;AAAA,IACJ,iBAAkB,OACd;AAAA,EACN;AACF;AAGA,SAAS,uBAAuB,UAG9B;AACA,QAAM,iBAAiB,SAAS,OAAO,CAAC,MAAM,EAAE,SAAS,QAAQ;AACjE,QAAM,uBAAuB,SAAS,OAAO,CAAC,MAAM,EAAE,SAAS,QAAQ;AAEvE,QAAM,SACJ,eAAe,SAAS,IAAI,eAAe,IAAI,CAAC,MAAM,EAAE,OAAO,EAAE,KAAK,IAAI,IAAI;AAEhF,QAAM,oBAA8C,qBAAqB,IAAI,CAAC,OAAO;AAAA,IACnF,MAAM,EAAE;AAAA,IACR,SAAS,EAAE;AAAA,EACb,EAAE;AAEF,SAAO,EAAE,QAAQ,UAAU,kBAAkB;AAC/C;AAMO,SAAS,wBAAwB,KAAwB;AAC9D,MAAI,eAAe,SAAU,QAAO;AAKpC,MACE,OAAO,UAAU,uBAAuB,cACxC,eAAe,UAAU,oBACzB;AACA,WAAO,IAAI,SAAS;AAAA,MAClB,SAAS,IAAI;AAAA,MACb,UAAU;AAAA,MACV,WAAW;AAAA,MACX,OAAO;AAAA,IACT,CAAC;AAAA,EACH;AAIA,MAAI,OAAO,UAAU,aAAa,cAAc,eAAe,UAAU,UAAU;AACjF,UAAM,SAA6B,IAAI;AACvC,QAAI,WAAW,QAAW;AACxB,YAAM,YAAY,CAAC,KAAK,KAAK,KAAK,GAAG,EAAE,SAAS,MAAM,KAAK,UAAU;AACrE,aAAO,IAAI,SAAS;AAAA,QAClB,SAAS,IAAI;AAAA,QACb,UAAU;AAAA,QACV,YAAY;AAAA,QACZ;AAAA,QACA,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AACA,WAAO,IAAI,SAAS,EAAE,SAAS,IAAI,SAAS,UAAU,UAAU,WAAW,OAAO,OAAO,IAAI,CAAC;AAAA,EAChG;AAEA,SAAO,qBAAqB,KAAK,QAAQ;AAC3C;AAGO,SAAS,wBAAwB,QAAoC;AAC1E,QAAM,SAAS,IAAI,UAAU;AAAA,IAC3B,QAAQ,OAAO;AAAA,IACf,SAAS,OAAO,aAAa;AAAA,IAC7B,YAAY;AAAA;AAAA,EACd,CAAC;AAED,QAAM,YAAY;AAAA,IAChB,YAAY,OAAO,cAAc;AAAA,IACjC,aAAa,OAAO,eAAe;AAAA,IACnC,UAAU;AAAA,EACZ;AAEA,iBAAe,SAAS,UAAwB,SAAgD;AAC9F,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,EAAE,QAAQ,UAAU,kBAAkB,IAAI,uBAAuB,QAAQ;AAE/E,UAAM,QAAQ,KAAK,IAAI;AAEvB,WAAO,UAAU,YAAY;AAC3B,UAAI;AACF,cAAM,SAAoD;AAAA,UACxD;AAAA,UACA,UAAU;AAAA,UACV,YAAY,SAAS,aAAa,OAAO,aAAa;AAAA,QACxD;AAEA,YAAI,WAAW,OAAW,QAAO,SAAS;AAC1C,cAAM,cAAc,SAAS,eAAe,OAAO;AACnD,YAAI,gBAAgB,QAAW;AAC7B,iBAAO,cAAc;AAAA,QACvB;AAEA,cAAM,WAAW,MAAM,OAAO,SAAS,OAAO,MAAM;AAEpD,cAAM,UAAU,SAAS,QACtB,OAAO,CAAC,UAAwC,MAAM,SAAS,MAAM,EACrE,IAAI,CAAC,UAAU,MAAM,IAAI,EACzB,KAAK,EAAE;AAEV,eAAO;AAAA,UACL;AAAA,UACA,OAAO,SAAS;AAAA,UAChB,OAAO,eAAe,SAAS,KAAK;AAAA,UACpC,WAAW,KAAK,IAAI,IAAI;AAAA,QAC1B;AAAA,MACF,SAAS,KAAK;AACZ,cAAM,wBAAwB,GAAG;AAAA,MACnC;AAAA,IACF,GAAG,SAAS;AAAA,EACd;AAEA,kBAAgB,OACd,UACA,SACgC;AAChC,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,EAAE,QAAQ,UAAU,kBAAkB,IAAI,uBAAuB,QAAQ;AAE/E,UAAM,SAAwC;AAAA,MAC5C;AAAA,MACA,UAAU;AAAA,MACV,YAAY,SAAS,aAAa,OAAO,aAAa;AAAA,IACxD;AAEA,QAAI,WAAW,OAAW,QAAO,SAAS;AAC1C,UAAM,oBAAoB,SAAS,eAAe,OAAO;AACzD,QAAI,sBAAsB,QAAW;AACnC,aAAO,cAAc;AAAA,IACvB;AAEA,QAAI;AAEJ,QAAI;AACF,kBAAY,OAAO,SAAS,OAAO,MAAM;AAAA,IAC3C,SAAS,KAAK;AACZ,YAAM,wBAAwB,GAAG;AAAA,IACnC;AAGA,QAAI;AAEJ,QAAI;AACF,uBAAiB,SAAS,WAAW;AACnC,YAAI,MAAM,SAAS,yBAAyB,MAAM,MAAM,SAAS,cAAc;AAC7E,gBAAM,EAAE,OAAO,MAAM,MAAM,KAAK;AAAA,QAClC,WAAW,MAAM,SAAS,mBAAmB,WAAW,OAAO;AAE7D,gBAAM,QAAQ,MAAM,UAAU,aAAa;AAC3C,uBAAa,eAAe,MAAM,KAAK;AAAA,QACzC;AAAA,MACF;AAAA,IACF,SAAS,KAAK;AAGZ,YAAM,wBAAwB,GAAG;AAAA,IACnC;AAGA,QAAI,eAAe,QAAW;AAC5B,YAAM,EAAE,OAAO,IAAI,OAAO,WAAW;AAAA,IACvC;AAAA,EACF;AAEA,iBAAe,WACb,UACA,QACA,SACmC;AAGnC,UAAM,wBAAoC;AAAA,MACxC,MAAM;AAAA,MACN,SACE;AAAA,IACJ;AAEA,UAAM,oBAAoB,CAAC,uBAAuB,GAAG,QAAQ;AAC7D,UAAM,QAAQ,KAAK,IAAI;AAEvB,UAAM,WAAW,MAAM,SAAS,mBAAmB,OAAO;AAE1D,QAAI;AACJ,QAAI;AAEF,YAAM,UAAU,SAAS,QACtB,QAAQ,qBAAqB,EAAE,EAC/B,QAAQ,WAAW,EAAE,EACrB,KAAK;AACR,eAAS,KAAK,MAAM,OAAO;AAAA,IAC7B,SAAS,KAAK;AACZ,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,iEAAiE,SAAS,QAAQ,MAAM,GAAG,GAAG,CAAC;AAAA,QACxG,UAAU;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,QAAI;AACJ,QAAI;AACF,aAAO,OAAO,MAAM,MAAM;AAAA,IAC5B,SAAS,KAAK;AACZ,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,mEAAmE,OAAO,GAAG,CAAC;AAAA,QACvF,UAAU;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,WAAO;AAAA,MACL;AAAA,MACA,OAAO,SAAS;AAAA,MAChB,WAAW,KAAK,IAAI,IAAI;AAAA,IAC1B;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;;;AGxPA,OAAO,YAAY;AAcnB,IAAMA,YAAW;AACjB,IAAM,oBAAoB;AAG1B,SAASC,gBAAe,OAA4D;AAClF,QAAM,cAAc,OAAO,iBAAiB;AAC5C,QAAM,eAAe,OAAO,qBAAqB;AACjD,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA,aAAa,OAAO,gBAAgB,cAAc;AAAA,EACpD;AACF;AAGA,SAAS,cAAc,UAAkE;AACvF,SAAO,SAAS,IAAI,CAAC,OAAO;AAAA,IAC1B,MAAM,EAAE;AAAA,IACR,SAAS,EAAE;AAAA,EACb,EAAE;AACJ;AASO,SAAS,uBAAuB,KAAwB;AAC7D,MAAI,eAAe,SAAU,QAAO;AAIpC,MAAI,OAAO,OAAO,uBAAuB,cAAc,eAAe,OAAO,oBAAoB;AAC/F,WAAO,IAAI,SAAS;AAAA,MAClB,SAAS,IAAI;AAAA,MACb,UAAUD;AAAA,MACV,WAAW;AAAA,MACX,OAAO;AAAA,IACT,CAAC;AAAA,EACH;AAGA,MAAI,OAAO,OAAO,aAAa,cAAc,eAAe,OAAO,UAAU;AAC3E,UAAM,SAA6B,IAAI;AACvC,QAAI,WAAW,QAAW;AACxB,YAAM,YAAY,CAAC,KAAK,KAAK,KAAK,GAAG,EAAE,SAAS,MAAM,KAAK,UAAU;AACrE,aAAO,IAAI,SAAS;AAAA,QAClB,SAAS,IAAI;AAAA,QACb,UAAUA;AAAA,QACV,YAAY;AAAA,QACZ;AAAA,QACA,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AACA,WAAO,IAAI,SAAS,EAAE,SAAS,IAAI,SAAS,UAAUA,WAAU,WAAW,OAAO,OAAO,IAAI,CAAC;AAAA,EAChG;AAEA,SAAO,qBAAqB,KAAKA,SAAQ;AAC3C;AAGO,SAAS,uBAAuB,QAAoC;AAEzE,QAAM,SAAS,IAAI,OAAO;AAAA,IACxB,QAAQ,OAAO;AAAA,IACf,SAAS;AAAA,IACT,SAAS,OAAO,aAAa;AAAA,IAC7B,YAAY;AAAA;AAAA,EACd,CAAC;AAED,QAAM,YAAY;AAAA,IAChB,YAAY,OAAO,cAAc;AAAA,IACjC,aAAa,OAAO,eAAe;AAAA,IACnC,UAAUA;AAAA,EACZ;AAEA,iBAAe,SAAS,UAAwB,SAAgD;AAC9F,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,eAAe,cAAc,QAAQ;AAC3C,UAAM,QAAQ,KAAK,IAAI;AAEvB,WAAO,UAAU,YAAY;AAC3B,UAAI;AACF,cAAM,SAA6D;AAAA,UACjE;AAAA,UACA,UAAU;AAAA,UACV,QAAQ;AAAA,QACV;AAEA,cAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,YAAI,cAAc,OAAW,QAAO,aAAa;AAEjD,cAAM,cAAc,SAAS,eAAe,OAAO;AACnD,YAAI,gBAAgB,OAAW,QAAO,cAAc;AAEpD,cAAM,WAAW,MAAM,OAAO,KAAK,YAAY,OAAO,MAAM;AAC5D,cAAM,UAAU,SAAS,QAAQ,IAAI,CAAC,MAAM,EAAE,QAAQ,WAAW,EAAE,EAAE,KAAK,EAAE;AAE5E,eAAO;AAAA,UACL;AAAA,UACA,OAAO,SAAS;AAAA,UAChB,OAAOC,gBAAe,SAAS,KAAK;AAAA,UACpC,WAAW,KAAK,IAAI,IAAI;AAAA,QAC1B;AAAA,MACF,SAAS,KAAK;AACZ,cAAM,uBAAuB,GAAG;AAAA,MAClC;AAAA,IACF,GAAG,SAAS;AAAA,EACd;AAEA,kBAAgB,OACd,UACA,SACgC;AAChC,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,eAAe,cAAc,QAAQ;AAE3C,UAAM,SAA0D;AAAA,MAC9D;AAAA,MACA,UAAU;AAAA,MACV,QAAQ;AAAA,MACR,gBAAgB,EAAE,eAAe,KAAK;AAAA,IACxC;AAEA,UAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,QAAI,cAAc,OAAW,QAAO,aAAa;AAEjD,UAAM,cAAc,SAAS,eAAe,OAAO;AACnD,QAAI,gBAAgB,OAAW,QAAO,cAAc;AAEpD,QAAI;AAEJ,QAAI;AACF,kBAAY,MAAM,OAAO,KAAK,YAAY,OAAO,MAAM;AAAA,IACzD,SAAS,KAAK;AACZ,YAAM,uBAAuB,GAAG;AAAA,IAClC;AAEA,QAAI;AAEJ,QAAI;AACF,uBAAiB,SAAS,WAAW;AACnC,cAAM,QAAQ,MAAM,QAAQ,CAAC,GAAG,MAAM;AACtC,YAAI,UAAU,UAAa,UAAU,QAAQ,MAAM,SAAS,GAAG;AAC7D,gBAAM,EAAE,OAAO,MAAM;AAAA,QACvB;AAGA,YAAI,MAAM,UAAU,UAAa,MAAM,UAAU,MAAM;AACrD,uBAAaA,gBAAe,MAAM,KAAK;AAAA,QACzC;AAAA,MACF;AAAA,IACF,SAAS,KAAK;AACZ,YAAM,uBAAuB,GAAG;AAAA,IAClC;AAEA,QAAI,eAAe,QAAW;AAC5B,YAAM,EAAE,OAAO,IAAI,OAAO,WAAW;AAAA,IACvC;AAAA,EACF;AAEA,iBAAe,WACb,UACA,QACA,SACmC;AAGnC,UAAM,wBAAoC;AAAA,MACxC,MAAM;AAAA,MACN,SACE;AAAA,IACJ;AAEA,UAAM,oBAAoB,CAAC,uBAAuB,GAAG,QAAQ;AAC7D,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,eAAe,cAAc,iBAAiB;AACpD,UAAM,QAAQ,KAAK,IAAI;AAEvB,UAAM,cAAc,MAAM,UAAU,YAAY;AAC9C,UAAI;AACF,cAAM,SAA6D;AAAA,UACjE;AAAA,UACA,UAAU;AAAA,UACV,QAAQ;AAAA,QACV;AAEA,cAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,YAAI,cAAc,OAAW,QAAO,aAAa;AAEjD,cAAM,cAAc,SAAS,eAAe,OAAO;AACnD,YAAI,gBAAgB,OAAW,QAAO,cAAc;AAEpD,eAAO,MAAM,OAAO,KAAK,YAAY,OAAO,MAAM;AAAA,MACpD,SAAS,KAAK;AACZ,cAAM,uBAAuB,GAAG;AAAA,MAClC;AAAA,IACF,GAAG,SAAS;AAEZ,UAAM,aAAa,YAAY,QAAQ,CAAC,GAAG,QAAQ,WAAW;AAE9D,QAAI;AACJ,QAAI;AAEF,YAAM,UAAU,WACb,QAAQ,qBAAqB,EAAE,EAC/B,QAAQ,WAAW,EAAE,EACrB,KAAK;AACR,eAAS,KAAK,MAAM,OAAO;AAAA,IAC7B,SAAS,KAAK;AACZ,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,gEAAgE,WAAW,MAAM,GAAG,GAAG,CAAC;AAAA,QACjG,UAAUD;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,QAAI;AACJ,QAAI;AACF,aAAO,OAAO,MAAM,MAAM;AAAA,IAC5B,SAAS,KAAK;AACZ,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,kEAAkE,OAAO,GAAG,CAAC;AAAA,QACtF,UAAUA;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,WAAO;AAAA,MACL;AAAA,MACA,OAAOC,gBAAe,YAAY,KAAK;AAAA,MACvC,WAAW,KAAK,IAAI,IAAI;AAAA,IAC1B;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;;;AC7PA;AAAA,EACE;AAAA,EAKA;AAAA,OACK;AAcP,IAAMC,YAAW;AAGjB,SAASC,gBAAe,MAAkE;AACxF,QAAM,cAAc,MAAM,oBAAoB;AAC9C,QAAM,eAAe,MAAM,wBAAwB;AACnD,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA,aAAa,MAAM,mBAAmB,cAAc;AAAA,EACtD;AACF;AAOA,SAAS,oBAAoB,UAG3B;AACA,QAAM,iBAAiB,SAAS,OAAO,CAAC,MAAM,EAAE,SAAS,QAAQ;AACjE,QAAM,uBAAuB,SAAS,OAAO,CAAC,MAAM,EAAE,SAAS,QAAQ;AAEvE,QAAM,SACJ,eAAe,SAAS,IAAI,eAAe,IAAI,CAAC,MAAM,EAAE,OAAO,EAAE,KAAK,IAAI,IAAI;AAEhF,QAAM,WAAsB,qBAAqB,IAAI,CAAC,OAAO;AAAA,IAC3D,MAAM,EAAE,SAAS,cAAc,UAAU;AAAA,IACzC,OAAO,CAAC,EAAE,MAAM,EAAE,QAAQ,CAAC;AAAA,EAC7B,EAAE;AAEF,SAAO,EAAE,QAAQ,SAAS;AAC5B;AAUO,SAAS,qBAAqB,KAAwB;AAC3D,MAAI,eAAe,SAAU,QAAO;AAIpC,MAAI,eAAe,UAAU;AAC3B,UAAM,YAAY,IAAI,WAAW,OAAO,IAAI,UAAU;AACtD,WAAO,IAAI,SAAS;AAAA,MAClB,SAAS,IAAI;AAAA,MACb,UAAUD;AAAA,MACV,YAAY,IAAI;AAAA,MAChB;AAAA,MACA,OAAO;AAAA,IACT,CAAC;AAAA,EACH;AAIA,SAAO,qBAAqB,KAAKA,SAAQ;AAC3C;AAGO,SAAS,qBAAqB,QAAoC;AACvE,QAAM,KAAK,IAAI,YAAY;AAAA,IACzB,QAAQ,OAAO;AAAA,IACf,aAAa;AAAA,MACX,SAAS,OAAO,aAAa;AAAA,IAC/B;AAAA,EACF,CAAC;AAED,QAAM,YAAY;AAAA,IAChB,YAAY,OAAO,cAAc;AAAA,IACjC,aAAa,OAAO,eAAe;AAAA,IACnC,UAAUA;AAAA,EACZ;AAEA,iBAAe,SAAS,UAAwB,SAAgD;AAC9F,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,EAAE,QAAQ,SAAS,IAAI,oBAAoB,QAAQ;AACzD,UAAM,QAAQ,KAAK,IAAI;AAEvB,WAAO,UAAU,YAAY;AAC3B,UAAI;AAEF,cAAM,eAAsC,CAAC;AAE7C,YAAI,WAAW,OAAW,cAAa,oBAAoB;AAC3D,cAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,YAAI,cAAc,OAAW,cAAa,kBAAkB;AAC5D,cAAM,cAAc,SAAS,eAAe,OAAO;AACnD,YAAI,gBAAgB,OAAW,cAAa,cAAc;AAE1D,cAAM,WAAW,MAAM,GAAG,OAAO,gBAAgB;AAAA,UAC/C;AAAA,UACA;AAAA,UACA,QAAQ;AAAA,QACV,CAAC;AAED,eAAO;AAAA,UACL,SAAS,SAAS,QAAQ;AAAA,UAC1B;AAAA,UACA,OAAOC,gBAAe,SAAS,aAAa;AAAA,UAC5C,WAAW,KAAK,IAAI,IAAI;AAAA,QAC1B;AAAA,MACF,SAAS,KAAK;AACZ,cAAM,qBAAqB,GAAG;AAAA,MAChC;AAAA,IACF,GAAG,SAAS;AAAA,EACd;AAEA,kBAAgB,OACd,UACA,SACgC;AAChC,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,EAAE,QAAQ,SAAS,IAAI,oBAAoB,QAAQ;AAGzD,UAAM,eAAsC,CAAC;AAC7C,QAAI,WAAW,OAAW,cAAa,oBAAoB;AAC3D,UAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,QAAI,cAAc,OAAW,cAAa,kBAAkB;AAC5D,UAAM,cAAc,SAAS,eAAe,OAAO;AACnD,QAAI,gBAAgB,OAAW,cAAa,cAAc;AAE1D,QAAI;AAEJ,QAAI;AACF,kBAAY,MAAM,GAAG,OAAO,sBAAsB;AAAA,QAChD;AAAA,QACA;AAAA,QACA,QAAQ;AAAA,MACV,CAAC;AAAA,IACH,SAAS,KAAK;AACZ,YAAM,qBAAqB,GAAG;AAAA,IAChC;AAEA,QAAI;AAEJ,QAAI;AACF,uBAAiB,SAAS,WAAW;AACnC,cAAM,OAAO,MAAM;AACnB,YAAI,SAAS,UAAa,KAAK,SAAS,GAAG;AACzC,gBAAM,EAAE,OAAO,KAAK;AAAA,QACtB;AAEA,YAAI,MAAM,kBAAkB,QAAW;AACrC,uBAAaA,gBAAe,MAAM,aAAa;AAAA,QACjD;AAAA,MACF;AAAA,IACF,SAAS,KAAK;AACZ,YAAM,qBAAqB,GAAG;AAAA,IAChC;AAEA,QAAI,eAAe,QAAW;AAC5B,YAAM,EAAE,OAAO,IAAI,OAAO,WAAW;AAAA,IACvC;AAAA,EACF;AAEA,iBAAe,WACb,UACA,QACA,SACmC;AACnC,UAAM,oBAAkC;AAAA,MACtC;AAAA,QACE,MAAM;AAAA,QACN,SACE;AAAA,MACJ;AAAA,MACA,GAAG;AAAA,IACL;AAEA,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,EAAE,QAAQ,SAAS,IAAI,oBAAoB,iBAAiB;AAClE,UAAM,QAAQ,KAAK,IAAI;AAEvB,UAAM,cAAc,MAAM,UAAU,YAAY;AAC9C,UAAI;AACF,cAAM,eAAsC;AAAA;AAAA,UAE1C,kBAAkB;AAAA,QACpB;AAEA,YAAI,WAAW,OAAW,cAAa,oBAAoB;AAC3D,cAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,YAAI,cAAc,OAAW,cAAa,kBAAkB;AAC5D,cAAM,cAAc,SAAS,eAAe,OAAO;AACnD,YAAI,gBAAgB,OAAW,cAAa,cAAc;AAE1D,eAAO,MAAM,GAAG,OAAO,gBAAgB;AAAA,UACrC;AAAA,UACA;AAAA,UACA,QAAQ;AAAA,QACV,CAAC;AAAA,MACH,SAAS,KAAK;AACZ,cAAM,qBAAqB,GAAG;AAAA,MAChC;AAAA,IACF,GAAG,SAAS;AAEZ,UAAM,aAAa,YAAY,QAAQ;AAEvC,QAAI;AACJ,QAAI;AAEF,YAAM,UAAU,WACb,QAAQ,qBAAqB,EAAE,EAC/B,QAAQ,WAAW,EAAE,EACrB,KAAK;AACR,eAAS,KAAK,MAAM,OAAO;AAAA,IAC7B,SAAS,KAAK;AACZ,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,8DAA8D,WAAW,MAAM,GAAG,GAAG,CAAC;AAAA,QAC/F,UAAUD;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,QAAI;AACJ,QAAI;AACF,aAAO,OAAO,MAAM,MAAM;AAAA,IAC5B,SAAS,KAAK;AACZ,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,gEAAgE,OAAO,GAAG,CAAC;AAAA,QACpF,UAAUA;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,WAAO;AAAA,MACL;AAAA,MACA,OAAOC,gBAAe,YAAY,aAAa;AAAA,MAC/C,WAAW,KAAK,IAAI,IAAI;AAAA,IAC1B;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;;;AC3RA,OAAOC,aAAY;AAcnB,IAAMC,YAAW;AAGjB,SAASC,gBAAe,OAA4D;AAClF,QAAM,cAAc,OAAO,iBAAiB;AAC5C,QAAM,eAAe,OAAO,qBAAqB;AACjD,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA,aAAa,OAAO,gBAAgB,cAAc;AAAA,EACpD;AACF;AAGA,SAAS,oBAAoB,UAAkE;AAC7F,SAAO,SAAS,IAAI,CAAC,OAAO;AAAA,IAC1B,MAAM,EAAE;AAAA,IACR,SAAS,EAAE;AAAA,EACb,EAAE;AACJ;AAMO,SAAS,qBAAqB,KAAwB;AAC3D,MAAI,eAAe,SAAU,QAAO;AAKpC,MAAI,OAAOC,QAAO,uBAAuB,cAAc,eAAeA,QAAO,oBAAoB;AAC/F,WAAO,IAAI,SAAS;AAAA,MAClB,SAAS,IAAI;AAAA,MACb,UAAUF;AAAA,MACV,WAAW;AAAA,MACX,OAAO;AAAA,IACT,CAAC;AAAA,EACH;AAIA,MAAI,OAAOE,QAAO,aAAa,cAAc,eAAeA,QAAO,UAAU;AAC3E,UAAM,SAA6B,IAAI;AACvC,QAAI,WAAW,QAAW;AACxB,YAAM,YAAY,CAAC,KAAK,KAAK,KAAK,GAAG,EAAE,SAAS,MAAM,KAAK,UAAU;AACrE,aAAO,IAAI,SAAS;AAAA,QAClB,SAAS,IAAI;AAAA,QACb,UAAUF;AAAA,QACV,YAAY;AAAA,QACZ;AAAA,QACA,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AACA,WAAO,IAAI,SAAS,EAAE,SAAS,IAAI,SAAS,UAAUA,WAAU,WAAW,OAAO,OAAO,IAAI,CAAC;AAAA,EAChG;AAEA,SAAO,qBAAqB,KAAKA,SAAQ;AAC3C;AAGO,SAAS,qBAAqB,QAAoC;AACvE,QAAM,SAAS,IAAIE,QAAO;AAAA,IACxB,QAAQ,OAAO;AAAA,IACf,SAAS,OAAO,aAAa;AAAA,IAC7B,YAAY;AAAA;AAAA,EACd,CAAC;AAED,QAAM,YAAY;AAAA,IAChB,YAAY,OAAO,cAAc;AAAA,IACjC,aAAa,OAAO,eAAe;AAAA,IACnC,UAAUF;AAAA,EACZ;AAEA,iBAAe,SAAS,UAAwB,SAAgD;AAC9F,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,iBAAiB,oBAAoB,QAAQ;AACnD,UAAM,QAAQ,KAAK,IAAI;AAEvB,WAAO,UAAU,YAAY;AAC3B,UAAI;AACF,cAAM,SAA6D;AAAA,UACjE;AAAA,UACA,UAAU;AAAA,UACV,QAAQ;AAAA,QACV;AAEA,cAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,YAAI,cAAc,OAAW,QAAO,aAAa;AAEjD,cAAM,cAAc,SAAS,eAAe,OAAO;AACnD,YAAI,gBAAgB,OAAW,QAAO,cAAc;AAEpD,cAAM,WAAW,MAAM,OAAO,KAAK,YAAY,OAAO,MAAM;AAE5D,cAAM,UAAU,SAAS,QAAQ,IAAI,CAAC,MAAM,EAAE,QAAQ,WAAW,EAAE,EAAE,KAAK,EAAE;AAE5E,eAAO;AAAA,UACL;AAAA,UACA,OAAO,SAAS;AAAA,UAChB,OAAOC,gBAAe,SAAS,KAAK;AAAA,UACpC,WAAW,KAAK,IAAI,IAAI;AAAA,QAC1B;AAAA,MACF,SAAS,KAAK;AACZ,cAAM,qBAAqB,GAAG;AAAA,MAChC;AAAA,IACF,GAAG,SAAS;AAAA,EACd;AAEA,kBAAgB,OACd,UACA,SACgC;AAChC,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,iBAAiB,oBAAoB,QAAQ;AAEnD,UAAM,SAA0D;AAAA,MAC9D;AAAA,MACA,UAAU;AAAA,MACV,QAAQ;AAAA,MACR,gBAAgB,EAAE,eAAe,KAAK;AAAA,IACxC;AAEA,UAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,QAAI,cAAc,OAAW,QAAO,aAAa;AAEjD,UAAM,cAAc,SAAS,eAAe,OAAO;AACnD,QAAI,gBAAgB,OAAW,QAAO,cAAc;AAEpD,QAAI;AAEJ,QAAI;AACF,kBAAY,MAAM,OAAO,KAAK,YAAY,OAAO,MAAM;AAAA,IACzD,SAAS,KAAK;AACZ,YAAM,qBAAqB,GAAG;AAAA,IAChC;AAEA,QAAI;AAEJ,QAAI;AACF,uBAAiB,SAAS,WAAW;AAEnC,cAAM,QAAQ,MAAM,QAAQ,CAAC,GAAG,MAAM;AACtC,YAAI,UAAU,UAAa,UAAU,QAAQ,MAAM,SAAS,GAAG;AAC7D,gBAAM,EAAE,OAAO,MAAM;AAAA,QACvB;AAGA,YAAI,MAAM,UAAU,UAAa,MAAM,UAAU,MAAM;AACrD,uBAAaA,gBAAe,MAAM,KAAK;AAAA,QACzC;AAAA,MACF;AAAA,IACF,SAAS,KAAK;AACZ,YAAM,qBAAqB,GAAG;AAAA,IAChC;AAGA,QAAI,eAAe,QAAW;AAC5B,YAAM,EAAE,OAAO,IAAI,OAAO,WAAW;AAAA,IACvC;AAAA,EACF;AAEA,iBAAe,WACb,UACA,QACA,SACmC;AAGnC,UAAM,wBAAoC;AAAA,MACxC,MAAM;AAAA,MACN,SACE;AAAA,IACJ;AAEA,UAAM,oBAAoB,CAAC,uBAAuB,GAAG,QAAQ;AAC7D,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,iBAAiB,oBAAoB,iBAAiB;AAC5D,UAAM,QAAQ,KAAK,IAAI;AAEvB,UAAM,cAAc,MAAM,UAAU,YAAY;AAC9C,UAAI;AACF,cAAM,SAA6D;AAAA,UACjE;AAAA,UACA,UAAU;AAAA,UACV,QAAQ;AAAA,UACR,iBAAiB,EAAE,MAAM,cAAc;AAAA,QACzC;AAEA,cAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,YAAI,cAAc,OAAW,QAAO,aAAa;AAEjD,cAAM,cAAc,SAAS,eAAe,OAAO;AACnD,YAAI,gBAAgB,OAAW,QAAO,cAAc;AAEpD,eAAO,MAAM,OAAO,KAAK,YAAY,OAAO,MAAM;AAAA,MACpD,SAAS,KAAK;AACZ,cAAM,qBAAqB,GAAG;AAAA,MAChC;AAAA,IACF,GAAG,SAAS;AAEZ,UAAM,aAAa,YAAY,QAAQ,CAAC,GAAG,QAAQ,WAAW;AAE9D,QAAI;AACJ,QAAI;AACF,eAAS,KAAK,MAAM,UAAU;AAAA,IAChC,SAAS,KAAK;AACZ,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,8DAA8D,WAAW,MAAM,GAAG,GAAG,CAAC;AAAA,QAC/F,UAAUD;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,QAAI;AACJ,QAAI;AACF,aAAO,OAAO,MAAM,MAAM;AAAA,IAC5B,SAAS,KAAK;AACZ,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,gEAAgE,OAAO,GAAG,CAAC;AAAA,QACpF,UAAUA;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,WAAO;AAAA,MACL;AAAA,MACA,OAAOC,gBAAe,YAAY,KAAK;AAAA,MACvC,WAAW,KAAK,IAAI,IAAI;AAAA,IAC1B;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;;;ACzNA,OAAOE,aAAY;AAcnB,IAAMC,YAAW;AACjB,IAAM,sBAAsB;AAW5B,SAASC,gBAAe,OAA4D;AAClF,QAAM,cAAc,OAAO,iBAAiB;AAC5C,QAAM,eAAe,OAAO,qBAAqB;AACjD,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA,aAAa,OAAO,gBAAgB,cAAc;AAAA,EACpD;AACF;AAGA,SAASC,eAAc,UAAkE;AACvF,SAAO,SAAS,IAAI,CAAC,OAAO;AAAA,IAC1B,MAAM,EAAE;AAAA,IACR,SAAS,EAAE;AAAA,EACb,EAAE;AACJ;AASA,SAAS,iBACP,UAC0B;AAC1B,QAAM,eAAe,SAAS;AAC9B,MAAI,iBAAiB,UAAa,aAAa,WAAW,EAAG,QAAO;AAEpE,QAAM,OAAO,oBAAI,IAAY;AAC7B,QAAM,UAAkD,CAAC;AAEzD,aAAW,OAAO,cAAc;AAC9B,QAAI,CAAC,KAAK,IAAI,GAAG,GAAG;AAClB,WAAK,IAAI,GAAG;AACZ,cAAQ,KAAK,EAAE,IAAI,CAAC;AAAA,IACtB;AAAA,EACF;AAEA,SAAO,QAAQ,SAAS,IAAI,UAAU;AACxC;AAUA,SAAS,uBACP,iBACyB;AACzB,MAAI,oBAAoB,OAAW,QAAO,CAAC;AAG3C,SAAO,EAAE,GAAG,gBAAgB;AAC9B;AASO,SAAS,yBAAyB,KAAwB;AAC/D,MAAI,eAAe,SAAU,QAAO;AAIpC,MAAI,OAAOC,QAAO,uBAAuB,cAAc,eAAeA,QAAO,oBAAoB;AAC/F,WAAO,IAAI,SAAS;AAAA,MAClB,SAAS,IAAI;AAAA,MACb,UAAUH;AAAA,MACV,WAAW;AAAA,MACX,OAAO;AAAA,IACT,CAAC;AAAA,EACH;AAGA,MAAI,OAAOG,QAAO,aAAa,cAAc,eAAeA,QAAO,UAAU;AAC3E,UAAM,SAA6B,IAAI;AACvC,QAAI,WAAW,QAAW;AACxB,YAAM,YAAY,CAAC,KAAK,KAAK,KAAK,GAAG,EAAE,SAAS,MAAM,KAAK,UAAU;AACrE,aAAO,IAAI,SAAS;AAAA,QAClB,SAAS,IAAI;AAAA,QACb,UAAUH;AAAA,QACV,YAAY;AAAA,QACZ;AAAA,QACA,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AACA,WAAO,IAAI,SAAS,EAAE,SAAS,IAAI,SAAS,UAAUA,WAAU,WAAW,OAAO,OAAO,IAAI,CAAC;AAAA,EAChG;AAEA,SAAO,qBAAqB,KAAKA,SAAQ;AAC3C;AAGO,SAAS,yBAAyB,QAAoC;AAE3E,QAAM,SAAS,IAAIG,QAAO;AAAA,IACxB,QAAQ,OAAO;AAAA,IACf,SAAS;AAAA,IACT,SAAS,OAAO,aAAa;AAAA,IAC7B,YAAY;AAAA;AAAA,EACd,CAAC;AAED,QAAM,YAAY;AAAA,IAChB,YAAY,OAAO,cAAc;AAAA,IACjC,aAAa,OAAO,eAAe;AAAA,IACnC,UAAUH;AAAA,EACZ;AAEA,iBAAe,SAAS,UAAwB,SAAgD;AAC9F,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,eAAeE,eAAc,QAAQ;AAC3C,UAAM,QAAQ,KAAK,IAAI;AACvB,UAAM,cAAc,uBAAuB,SAAS,eAAe;AAEnE,WAAO,UAAU,YAAY;AAC3B,UAAI;AACF,cAAM,SACJ;AAAA,UACE;AAAA,UACA,UAAU;AAAA,UACV,QAAQ;AAAA,UACR,GAAG;AAAA,QACL;AAEF,cAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,YAAI,cAAc,OAAW,QAAO,aAAa;AAEjD,cAAM,cAAc,SAAS,eAAe,OAAO;AACnD,YAAI,gBAAgB,OAAW,QAAO,cAAc;AAEpD,cAAM,cAAc,MAAM,OAAO,KAAK,YAAY;AAAA,UAChD;AAAA,QACF;AAGA,cAAM,WAAW;AAEjB,cAAM,UAAU,SAAS,QAAQ,IAAI,CAAC,MAAM,EAAE,QAAQ,WAAW,EAAE,EAAE,KAAK,EAAE;AAE5E,cAAM,SAAsB;AAAA,UAC1B;AAAA,UACA,OAAO,SAAS;AAAA,UAChB,OAAOD,gBAAe,SAAS,KAAK;AAAA,UACpC,WAAW,KAAK,IAAI,IAAI;AAAA,QAC1B;AAEA,cAAM,YAAY,iBAAiB,QAAQ;AAC3C,YAAI,cAAc,OAAW,QAAO,YAAY;AAEhD,eAAO;AAAA,MACT,SAAS,KAAK;AACZ,cAAM,yBAAyB,GAAG;AAAA,MACpC;AAAA,IACF,GAAG,SAAS;AAAA,EACd;AAEA,kBAAgB,OACd,UACA,SACgC;AAChC,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,eAAeC,eAAc,QAAQ;AAC3C,UAAM,cAAc,uBAAuB,SAAS,eAAe;AAEnE,UAAM,SAAoF;AAAA,MACxF;AAAA,MACA,UAAU;AAAA,MACV,QAAQ;AAAA,MACR,gBAAgB,EAAE,eAAe,KAAK;AAAA,MACtC,GAAG;AAAA,IACL;AAEA,UAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,QAAI,cAAc,OAAW,QAAO,aAAa;AAEjD,UAAM,cAAc,SAAS,eAAe,OAAO;AACnD,QAAI,gBAAgB,OAAW,QAAO,cAAc;AAEpD,QAAI;AAEJ,QAAI;AACF,kBAAY,MAAM,OAAO,KAAK,YAAY;AAAA,QACxC;AAAA,MACF;AAAA,IACF,SAAS,KAAK;AACZ,YAAM,yBAAyB,GAAG;AAAA,IACpC;AAEA,QAAI;AAEJ,QAAI;AACF,uBAAiB,SAAS,WAAW;AACnC,cAAM,QAAQ,MAAM,QAAQ,CAAC,GAAG,MAAM;AACtC,YAAI,UAAU,UAAa,UAAU,QAAQ,MAAM,SAAS,GAAG;AAC7D,gBAAM,EAAE,OAAO,MAAM;AAAA,QACvB;AAGA,YAAI,MAAM,UAAU,UAAa,MAAM,UAAU,MAAM;AACrD,uBAAaD,gBAAe,MAAM,KAAK;AAAA,QACzC;AAAA,MACF;AAAA,IACF,SAAS,KAAK;AACZ,YAAM,yBAAyB,GAAG;AAAA,IACpC;AAKA,QAAI,eAAe,QAAW;AAC5B,YAAM,EAAE,OAAO,IAAI,OAAO,WAAW;AAAA,IACvC;AAAA,EACF;AAEA,iBAAe,WACb,UACA,QACA,SACmC;AAInC,UAAM,wBAAoC;AAAA,MACxC,MAAM;AAAA,MACN,SACE;AAAA,IACJ;AAEA,UAAM,oBAAoB,CAAC,uBAAuB,GAAG,QAAQ;AAC7D,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,eAAeC,eAAc,iBAAiB;AACpD,UAAM,QAAQ,KAAK,IAAI;AACvB,UAAM,cAAc,uBAAuB,SAAS,eAAe;AAEnE,UAAM,cAAc,MAAM,UAAU,YAAY;AAC9C,UAAI;AACF,cAAM,SACJ;AAAA,UACE;AAAA,UACA,UAAU;AAAA,UACV,QAAQ;AAAA,UACR,GAAG;AAAA,QACL;AAEF,cAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,YAAI,cAAc,OAAW,QAAO,aAAa;AAEjD,cAAM,cAAc,SAAS,eAAe,OAAO;AACnD,YAAI,gBAAgB,OAAW,QAAO,cAAc;AAEpD,eAAO,MAAM,OAAO,KAAK,YAAY;AAAA,UACnC;AAAA,QACF;AAAA,MACF,SAAS,KAAK;AACZ,cAAM,yBAAyB,GAAG;AAAA,MACpC;AAAA,IACF,GAAG,SAAS;AAEZ,UAAM,aAAa,YAAY,QAAQ,CAAC,GAAG,QAAQ,WAAW;AAE9D,QAAI;AACJ,QAAI;AAGF,YAAM,UAAU,WACb,QAAQ,6BAA6B,EAAE,EACvC,QAAQ,qBAAqB,EAAE,EAC/B,QAAQ,WAAW,EAAE,EACrB,KAAK;AACR,eAAS,KAAK,MAAM,OAAO;AAAA,IAC7B,SAAS,KAAK;AACZ,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,kEAAkE,WAAW,MAAM,GAAG,GAAG,CAAC;AAAA,QACnG,UAAUF;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,QAAI;AACJ,QAAI;AACF,aAAO,OAAO,MAAM,MAAM;AAAA,IAC5B,SAAS,KAAK;AACZ,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,oEAAoE,OAAO,GAAG,CAAC;AAAA,QACxF,UAAUA;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,WAAO;AAAA,MACL;AAAA,MACA,OAAOC,gBAAe,YAAY,KAAK;AAAA,MACvC,WAAW,KAAK,IAAI,IAAI;AAAA,IAC1B;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;;;AC9WO,SAAS,aAAa,QAAoC;AAC/D,UAAQ,OAAO,UAAU;AAAA,IACvB,KAAK;AACH,aAAO,wBAAwB,MAAM;AAAA,IAEvC,KAAK;AACH,aAAO,qBAAqB,MAAM;AAAA,IAEpC,KAAK;AACH,aAAO,qBAAqB,MAAM;AAAA,IAEpC,KAAK;AACH,aAAO,uBAAuB,MAAM;AAAA,IAEtC,KAAK;AACH,aAAO,yBAAyB,MAAM;AAAA,IAExC,SAAS;AAGP,YAAM,cAAqB,OAAO;AAClC,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,qCAAqC,OAAO,WAAW,CAAC;AAAA,QACjE,UAAU,OAAO,WAAW;AAAA,QAC5B,WAAW;AAAA,MACb,CAAC;AAAA,IACH;AAAA,EACF;AACF;AAcO,SAAS,oBACd,UACA,OACA,WACW;AACX,QAAM,SAAS,cAAc,QAAQ;AACrC,SAAO,aAAa,EAAE,UAAU,OAAO,QAAQ,GAAG,UAAU,CAAC;AAC/D;AAGA,SAAS,cAAc,UAA+C;AACpE,QAAM,YAAyD;AAAA,IAC7D,WAAW;AAAA,IACX,QAAQ;AAAA,IACR,QAAQ;AAAA,IACR,UAAU;AAAA,IACV,YAAY;AAAA,EACd;AAEA,QAAM,SAAS,UAAU,QAAQ;AACjC,QAAM,SAAS,QAAQ,IAAI,MAAM;AAEjC,MAAI,WAAW,UAAa,OAAO,KAAK,MAAM,IAAI;AAChD,UAAM,IAAI,SAAS;AAAA,MACjB,SAAS,mBAAmB,MAAM,yDAAyD,QAAQ;AAAA,MACnG;AAAA,MACA,WAAW;AAAA,IACb,CAAC;AAAA,EACH;AAEA,SAAO;AACT;","names":["PROVIDER","normalizeUsage","PROVIDER","normalizeUsage","OpenAI","PROVIDER","normalizeUsage","OpenAI","OpenAI","PROVIDER","normalizeUsage","buildMessages","OpenAI"]}
1
+ {"version":3,"sources":["../src/providers/anthropic.ts","../src/types.ts","../src/abort.ts","../src/retry.ts","../src/providers/deepseek.ts","../src/providers/gemini.ts","../src/providers/openai.ts","../src/providers/perplexity.ts","../src/client.ts"],"sourcesContent":["/**\n * Anthropic Claude provider for @diabolicallabs/llm-client.\n *\n * Implements: complete(), stream(), structured()\n *\n * Token normalization:\n * Anthropic: input_tokens / output_tokens / cache_creation_input_tokens / cache_read_input_tokens\n * → LlmUsage: inputTokens / outputTokens / totalTokens / cacheCreationTokens / cacheReadTokens\n *\n * Error mapping:\n * APIStatusError.status → LlmError.statusCode + retryable flag\n * APIConnectionError → retryable: true\n */\n\nimport Anthropic from '@anthropic-ai/sdk';\nimport { classifyAbort, createAttemptController, withStallTimeout } from '../abort.js';\nimport { mergeRetryOptsWithSignal, normalizeThrownError, withRetry } from '../retry.js';\nimport type {\n LlmCallOptions,\n LlmClient,\n LlmClientConfig,\n LlmMessage,\n LlmResponse,\n LlmStreamChunk,\n LlmStructuredResponse,\n LlmUsage,\n} from '../types.js';\nimport { LlmError } from '../types.js';\n\nconst PROVIDER = 'anthropic';\n\n/** Normalize Anthropic's usage object to LlmUsage. */\nfunction normalizeUsage(usage: Anthropic.Usage | undefined): LlmUsage {\n const inputTokens = usage?.input_tokens ?? 0;\n const outputTokens = usage?.output_tokens ?? 0;\n return {\n inputTokens,\n outputTokens,\n totalTokens: inputTokens + outputTokens,\n // cache_creation_input_tokens and cache_read_input_tokens are present on\n // extended usage objects from prompt caching — cast to access them safely.\n cacheCreationTokens: (usage as Anthropic.Usage & { cache_creation_input_tokens?: number })\n ?.cache_creation_input_tokens,\n cacheReadTokens: (usage as Anthropic.Usage & { cache_read_input_tokens?: number })\n ?.cache_read_input_tokens,\n };\n}\n\n/** Convert LlmMessages to Anthropic's message format. Extracts system prompt. */\nfunction buildAnthropicMessages(messages: LlmMessage[]): {\n system: string | undefined;\n messages: Anthropic.MessageParam[];\n} {\n const systemMessages = messages.filter((m) => m.role === 'system');\n const conversationMessages = messages.filter((m) => m.role !== 'system');\n\n const system =\n systemMessages.length > 0 ? systemMessages.map((m) => m.content).join('\\n') : undefined;\n\n const anthropicMessages: Anthropic.MessageParam[] = conversationMessages.map((m) => ({\n role: m.role as 'user' | 'assistant',\n content: m.content,\n }));\n\n return { system, messages: anthropicMessages };\n}\n\n/**\n * Normalize any Anthropic SDK error into LlmError.\n * Exported for direct unit testing of the normalization logic.\n */\nexport function normalizeAnthropicError(err: unknown): LlmError {\n if (err instanceof LlmError) return err;\n\n // Anthropic SDK v0.94+: uses Anthropic.APIError as the base class with a `.status` field.\n // APIConnectionError is a subclass of APIError with status: undefined — check it first\n // so network failures are always retryable regardless of the missing status code.\n if (\n typeof Anthropic.APIConnectionError === 'function' &&\n err instanceof Anthropic.APIConnectionError\n ) {\n return new LlmError({\n message: err.message,\n provider: PROVIDER,\n retryable: true,\n cause: err,\n });\n }\n\n // Catch all other APIError subclasses: RateLimitError (429), AuthenticationError (401),\n // InternalServerError (500), etc. Retryability is determined by HTTP status code.\n if (typeof Anthropic.APIError === 'function' && err instanceof Anthropic.APIError) {\n const status: number | undefined = err.status;\n if (status !== undefined) {\n const retryable = [429, 502, 503, 504].includes(status) || status >= 500;\n return new LlmError({\n message: err.message,\n provider: PROVIDER,\n statusCode: status,\n retryable,\n cause: err,\n });\n }\n return new LlmError({ message: err.message, provider: PROVIDER, retryable: false, cause: err });\n }\n\n return normalizeThrownError(err, PROVIDER);\n}\n\n/** Create the Anthropic provider implementation. */\nexport function createAnthropicProvider(config: LlmClientConfig): LlmClient {\n // SDK client uses config-level timeout as the backstop. Per-call overrides are\n // enforced by createAttemptController which aborts the SDK call via signal.\n const client = new Anthropic({\n apiKey: config.apiKey,\n timeout: config.timeoutMs ?? 30_000,\n maxRetries: 0, // We manage retries ourselves via withRetry\n });\n\n const retryOpts = {\n maxRetries: config.maxRetries ?? 3,\n baseDelayMs: config.baseDelayMs ?? 1_000,\n provider: PROVIDER,\n };\n\n async function complete(messages: LlmMessage[], options?: LlmCallOptions): Promise<LlmResponse> {\n const model = options?.model ?? config.model;\n const { system, messages: anthropicMessages } = buildAnthropicMessages(messages);\n // Per-call timeout overrides config default; falls back to config then hard default.\n const effectiveTimeoutMs = options?.timeoutMs ?? config.timeoutMs ?? 30_000;\n\n const start = Date.now();\n\n return withRetry(async () => {\n // Fresh controller per attempt so each retry gets a full deadline.\n const ctl = createAttemptController(options?.signal, effectiveTimeoutMs);\n try {\n const params: Anthropic.MessageCreateParamsNonStreaming = {\n model,\n messages: anthropicMessages,\n max_tokens: options?.maxTokens ?? config.maxTokens ?? 1024,\n };\n\n if (system !== undefined) params.system = system;\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) {\n params.temperature = temperature;\n }\n\n // Pass ctl.signal as the RequestOptions second argument — Anthropic SDK v0.94+.\n const response = await client.messages.create(params, { signal: ctl.signal });\n\n const content = response.content\n .filter((block): block is Anthropic.TextBlock => block.type === 'text')\n .map((block) => block.text)\n .join('');\n\n return {\n content,\n model: response.model,\n usage: normalizeUsage(response.usage),\n latencyMs: Date.now() - start,\n };\n } catch (err) {\n // classifyAbort checks whether this is an AbortError; if so, applies the\n // correct kind (timeout/cancelled/stall) based on ctl.abortReason(). If\n // not an AbortError, returns the original err so normalizeAnthropicError\n // can classify it as an HTTP/network/unknown error.\n throw normalizeAnthropicError(classifyAbort(err, ctl.abortReason(), PROVIDER));\n } finally {\n ctl.dispose();\n }\n }, mergeRetryOptsWithSignal(retryOpts, options?.signal));\n }\n\n async function* stream(\n messages: LlmMessage[],\n options?: LlmCallOptions\n ): AsyncGenerator<LlmStreamChunk> {\n const model = options?.model ?? config.model;\n const { system, messages: anthropicMessages } = buildAnthropicMessages(messages);\n const effectiveTimeoutMs = options?.timeoutMs ?? config.timeoutMs ?? 30_000;\n const stallMs = options?.streamStallTimeoutMs ?? config.streamStallTimeoutMs ?? 30_000;\n\n const params: Anthropic.MessageStreamParams = {\n model,\n messages: anthropicMessages,\n max_tokens: options?.maxTokens ?? config.maxTokens ?? 1024,\n };\n\n if (system !== undefined) params.system = system;\n const streamTemperature = options?.temperature ?? config.temperature;\n if (streamTemperature !== undefined) {\n params.temperature = streamTemperature;\n }\n\n // Stream is a single attempt — no retry of partial streams.\n const ctl = createAttemptController(options?.signal, effectiveTimeoutMs);\n\n let sdkStream: Awaited<ReturnType<typeof client.messages.stream>>;\n\n try {\n sdkStream = client.messages.stream(params, { signal: ctl.signal });\n } catch (err) {\n ctl.dispose();\n throw normalizeAnthropicError(classifyAbort(err, ctl.abortReason(), PROVIDER));\n }\n\n // Accumulate usage — Anthropic sends it in the message_delta event at stream end\n let finalUsage: LlmUsage | undefined;\n\n try {\n // Wrap the SDK stream with stall detection. Explicitly cast the iterable so\n // TypeScript can infer the generic parameter T = Anthropic.MessageStreamEvent.\n const stallWrapped = withStallTimeout<Anthropic.MessageStreamEvent>(\n sdkStream as AsyncIterable<Anthropic.MessageStreamEvent>,\n stallMs,\n ctl,\n PROVIDER\n );\n for await (const event of stallWrapped) {\n if (event.type === 'content_block_delta' && event.delta.type === 'text_delta') {\n yield { token: event.delta.text };\n } else if (event.type === 'message_delta' && 'usage' in event) {\n // Merge input tokens from message_start with output tokens from message_delta\n const accum = await sdkStream.finalMessage();\n finalUsage = normalizeUsage(accum.usage);\n }\n }\n } catch (err) {\n // Propagate as a normalized LlmError regardless of whether streaming had started.\n // Partial stream errors cannot be recovered from — the consumer must handle them.\n throw normalizeAnthropicError(classifyAbort(err, ctl.abortReason(), PROVIDER));\n } finally {\n ctl.dispose();\n }\n\n // Yield usage on the final empty chunk\n if (finalUsage !== undefined) {\n yield { token: '', usage: finalUsage };\n }\n }\n\n async function structured<T>(\n messages: LlmMessage[],\n schema: { parse: (data: unknown) => T },\n options?: LlmCallOptions\n ): Promise<LlmStructuredResponse<T>> {\n // Anthropic JSON mode: append a system instruction to return only JSON.\n // We inject this into the messages so the provider returns parseable output.\n const jsonSystemInstruction: LlmMessage = {\n role: 'system',\n content:\n 'You must respond with valid JSON only. No explanations, no markdown code fences, no extra text. Your entire response must be valid JSON that can be parsed with JSON.parse().',\n };\n\n const augmentedMessages = [jsonSystemInstruction, ...messages];\n const start = Date.now();\n\n const response = await complete(augmentedMessages, options);\n\n let parsed: unknown;\n try {\n // Strip markdown code fences if the model included them despite the instruction\n const cleaned = response.content\n .replace(/^```(?:json)?\\s*/i, '')\n .replace(/\\s*```$/, '')\n .trim();\n parsed = JSON.parse(cleaned);\n } catch (err) {\n throw new LlmError({\n message: `Anthropic structured output: response is not valid JSON. Raw: ${response.content.slice(0, 200)}`,\n provider: PROVIDER,\n retryable: false,\n cause: err,\n });\n }\n\n let data: T;\n try {\n data = schema.parse(parsed);\n } catch (err) {\n throw new LlmError({\n message: `Anthropic structured output: response failed schema validation. ${String(err)}`,\n provider: PROVIDER,\n retryable: false,\n cause: err,\n });\n }\n\n return {\n data,\n usage: response.usage,\n latencyMs: Date.now() - start,\n };\n }\n\n return {\n config,\n complete,\n stream,\n structured,\n };\n}\n","/**\n * Core type definitions for @diabolicallabs/llm-client.\n * These are the stable public API surface — implementation is in Week 2.\n * Types here match the spec in briefs/brief-platform.md §4.1 exactly.\n *\n * Week 5 additions:\n * LlmResponse.citations — populated by the Perplexity provider; undefined for all others.\n * LlmCallOptions — per-call options type extracted for reuse; adds providerOptions escape hatch.\n *\n * Week 6 additions (v0.3.0 — abort/timeout/stall):\n * LlmCallOptions.timeoutMs — per-call timeout override (ms); overrides config.timeoutMs.\n * LlmCallOptions.signal — caller-supplied AbortSignal; aborts in-flight call.\n * LlmCallOptions.streamStallTimeoutMs — per-stream stall detection (ms); default 30000.\n * LlmClientConfig.streamStallTimeoutMs — config-level stall default.\n * LlmError.kind — discriminator for error classification.\n */\n\n// The canonical message format shared across all providers\nexport interface LlmMessage {\n role: 'system' | 'user' | 'assistant';\n content: string;\n}\n\n// Config passed to createClient\nexport interface LlmClientConfig {\n // Full 5-provider union — gemini, deepseek, perplexity are type-only stubs in Week 2\n provider: 'anthropic' | 'openai' | 'gemini' | 'deepseek' | 'perplexity';\n model: string; // e.g. 'claude-sonnet-4-6', 'gpt-4o', 'gemini-2.5-flash'\n apiKey: string;\n maxRetries?: number; // default: 3\n baseDelayMs?: number; // default: 1000 — exponential backoff base\n maxTokens?: number; // provider default if omitted\n temperature?: number; // provider default if omitted\n timeoutMs?: number; // default: 30000\n /**\n * Default stall timeout for stream() calls (ms). Fires when no chunk is received\n * for this duration. Independent of timeoutMs — tolerant of reasoning-model think-pauses.\n * Default: 30000.\n */\n streamStallTimeoutMs?: number;\n}\n\n// Normalized token usage — same shape regardless of provider\nexport interface LlmUsage {\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n cacheCreationTokens?: number; // Anthropic prompt cache write tokens\n cacheReadTokens?: number; // Anthropic prompt cache read tokens\n}\n\n// Non-streaming response\nexport interface LlmResponse {\n content: string;\n model: string; // model ID actually used (may differ from requested)\n usage: LlmUsage;\n latencyMs: number;\n /**\n * Web citations returned by the Perplexity provider.\n * Populated only when the Perplexity API returns source references.\n * Always undefined for Anthropic, OpenAI, Gemini, and DeepSeek.\n * Deduplicated by URL within a single response.\n */\n citations?: Array<{\n url: string;\n title?: string;\n }>;\n}\n\n/**\n * Per-call options shared across complete(), stream(), and structured().\n * Extends the standard model/maxTokens/temperature overrides with:\n * timeoutMs — per-call timeout override; overrides config.timeoutMs for this call only.\n * signal — caller-supplied AbortSignal; aborts the in-flight call immediately.\n * A pre-aborted signal throws without making an SDK call (no retry).\n * A mid-call abort throws kind:'cancelled', retryable:false (no retry).\n * streamStallTimeoutMs — per-call stall detection for stream(); overrides config default.\n * providerOptions — generic escape hatch for provider-specific parameters.\n * The Perplexity provider reads search_domain_filter and\n * search_recency_filter from this field; other providers ignore it.\n * Unknown fields are passed through unchanged.\n */\nexport interface LlmCallOptions\n extends Partial<Pick<LlmClientConfig, 'model' | 'maxTokens' | 'temperature' | 'timeoutMs'>> {\n /** Caller-supplied AbortSignal. Cancels the in-flight call. Never retried. */\n signal?: AbortSignal;\n /**\n * Per-call stall timeout for stream() in ms. Overrides config.streamStallTimeoutMs.\n * Fires when no chunk arrives within this window. Default: config.streamStallTimeoutMs ?? 30000.\n */\n streamStallTimeoutMs?: number;\n providerOptions?: Record<string, unknown>;\n}\n\n// Streaming chunk\nexport interface LlmStreamChunk {\n token: string;\n usage?: LlmUsage; // present only on the final chunk\n}\n\n/**\n * Discriminator for LlmError — lets callers branch on error class without\n * parsing message strings.\n *\n * cancelled — AbortSignal fired (caller-initiated). Never retried.\n * timeout — Per-call timeoutMs deadline exceeded. Retried by withRetry.\n * stream_stall — No chunk received within streamStallTimeoutMs. Not retried\n * (partial stream output is unsafe to re-issue).\n * http — Non-retryable HTTP error (4xx excluding 429).\n * network — Retryable network-layer error (ECONNRESET, ETIMEDOUT, etc.).\n * unknown — Unclassified error.\n */\nexport type LlmErrorKind = 'cancelled' | 'timeout' | 'stream_stall' | 'http' | 'network' | 'unknown';\n\n// Normalized error — wraps provider-specific errors\nexport class LlmError extends Error {\n override readonly name = 'LlmError';\n readonly provider: string;\n readonly statusCode: number | undefined;\n readonly retryable: boolean;\n /**\n * Optional error kind discriminator. Present on errors produced by the abort/timeout/stall\n * machinery (v0.3.0+). May be undefined on errors from providers that pre-date the kind field\n * or on errors that fall through to the generic normalization path.\n * Typed as LlmErrorKind | undefined to satisfy exactOptionalPropertyTypes.\n */\n readonly kind: LlmErrorKind | undefined;\n // `cause` is declared on Error in lib.es2022.error.d.ts as `cause?: unknown`\n // We override it here to make it always present (not optional) after construction.\n override readonly cause: unknown;\n\n constructor(opts: {\n message: string;\n provider: string;\n statusCode?: number;\n retryable: boolean;\n kind?: LlmErrorKind;\n cause?: unknown;\n }) {\n super(opts.message, { cause: opts.cause });\n this.provider = opts.provider;\n this.statusCode = opts.statusCode;\n this.retryable = opts.retryable;\n this.kind = opts.kind;\n this.cause = opts.cause;\n }\n}\n\n// Structured output — Zod schema inference\nexport type LlmStructuredResponse<T> = {\n data: T;\n usage: LlmUsage;\n latencyMs: number;\n};\n\n// The LlmClient interface — what consumers program against\nexport interface LlmClient {\n readonly config: Readonly<LlmClientConfig>;\n\n // Non-streaming completion\n complete(messages: LlmMessage[], options?: LlmCallOptions): Promise<LlmResponse>;\n\n // Streaming completion — async generator of chunks\n stream(messages: LlmMessage[], options?: LlmCallOptions): AsyncGenerator<LlmStreamChunk>;\n\n // Structured output — parses and validates the response against a Zod schema\n // Forces JSON mode on providers that support it; falls back to parse-and-validate\n structured<T>(\n messages: LlmMessage[],\n // Using a narrower interface than the full ZodType to avoid a hard zod dependency at types level\n schema: { parse: (data: unknown) => T },\n options?: LlmCallOptions\n ): Promise<LlmStructuredResponse<T>>;\n}\n","/**\n * Abort machinery for @diabolicallabs/llm-client.\n *\n * Provides per-attempt cancellation combining:\n * - An internal AbortController per attempt (fresh deadline each retry)\n * - Optional caller-supplied AbortSignal forwarded into the internal controller\n * - Explicit reason tracking ('timeout' | 'caller' | 'stall') so errors can be\n * classified before they reach normalizeThrownError\n *\n * Three primary exports:\n * createAttemptController — combine caller signal + timeout into one AbortSignal\n * cancellableSleep — sleep that aborts early on signal\n * withStallTimeout — async-iterator wrapper that fires on chunk silence\n * classifyAbort — map an AbortError to LlmError with the right kind\n *\n * Design: hand-rolled signal composition (no AbortSignal.any) to avoid requiring\n * Node ≥ 22.0.0 where AbortSignal.any first appeared.\n */\n\nimport { LlmError } from './types.js';\n\n// ─── AttemptController ──────────────────────────────────────────────────────\n\nexport interface AttemptController {\n /** Combined signal to pass to SDK calls. Aborts when timeout fires or caller cancels. */\n signal: AbortSignal;\n /** Returns the abort reason once the signal fires, or undefined if still live. */\n abortReason: () => 'timeout' | 'caller' | 'stall' | undefined;\n /** Abort from the stream-stall wrapper. */\n abort: (reason: 'stall') => void;\n /** Cancel the timeout timer and remove the caller-signal listener. Call in finally. */\n dispose: () => void;\n}\n\n/**\n * Construct a per-attempt AbortController that fires after timeoutMs, or immediately\n * if the caller's signal is already aborted, or whenever the caller's signal fires.\n *\n * Call dispose() in the finally block of every attempt to avoid timer leaks.\n */\nexport function createAttemptController(\n callerSignal: AbortSignal | undefined,\n timeoutMs: number\n): AttemptController {\n const internal = new AbortController();\n let reason: 'timeout' | 'caller' | 'stall' | undefined;\n\n // Timeout fires after timeoutMs. unref() so the timer does not prevent Node exit.\n const timer = setTimeout(() => {\n reason ??= 'timeout';\n internal.abort(new Error('llm-client: timeout'));\n }, timeoutMs);\n // Node.js Timer has unref(); browser setTimeout does not — guard safely.\n (timer as unknown as { unref?: () => void }).unref?.();\n\n // Forward the caller signal into our internal controller.\n const onCallerAbort = (): void => {\n reason ??= 'caller';\n internal.abort(callerSignal!.reason);\n };\n\n if (callerSignal !== undefined) {\n if (callerSignal.aborted) {\n // Already aborted — fire synchronously so the check-before-SDK-call path works.\n onCallerAbort();\n } else {\n callerSignal.addEventListener('abort', onCallerAbort, { once: true });\n }\n }\n\n return {\n signal: internal.signal,\n abortReason: () => reason,\n abort: (r) => {\n reason ??= r;\n internal.abort();\n },\n dispose: () => {\n clearTimeout(timer);\n callerSignal?.removeEventListener('abort', onCallerAbort);\n },\n };\n}\n\n// ─── cancellableSleep ────────────────────────────────────────────────────────\n\n/**\n * Sleep for ms milliseconds. Rejects early (without error — just resolves early)\n * if signal fires. Timer is always cleared on resolution to avoid leaks.\n *\n * Used by withRetry backoff so a cancelled request does not burn delay time.\n */\nexport function cancellableSleep(ms: number, signal?: AbortSignal): Promise<void> {\n return new Promise<void>((resolve) => {\n if (signal?.aborted) {\n resolve();\n return;\n }\n\n const timer = setTimeout(() => {\n signal?.removeEventListener('abort', onAbort);\n resolve();\n }, ms);\n (timer as unknown as { unref?: () => void }).unref?.();\n\n const onAbort = (): void => {\n clearTimeout(timer);\n resolve();\n };\n\n signal?.addEventListener('abort', onAbort, { once: true });\n });\n}\n\n// ─── withStallTimeout ────────────────────────────────────────────────────────\n\n/**\n * Wrap an async iterable with a per-chunk stall timer. If no chunk arrives within\n * stallMs, the controller is aborted (which propagates to the SDK and frees its\n * socket) and an LlmError with kind:'stream_stall' is thrown.\n *\n * The stall timer is reset on each successful yield so reasoning-model think-pauses\n * do not trip the detector as long as chunks keep arriving before the deadline.\n *\n * Stream stall is NOT retried — partial output is unsafe to re-issue. The caller\n * receives kind:'stream_stall' and must decide how to recover.\n *\n * @param source The SDK async iterator to wrap.\n * @param stallMs Milliseconds of silence before declaring a stall.\n * @param ctl The AttemptController for this stream attempt.\n * @param provider Provider name for error classification.\n */\nexport async function* withStallTimeout<T>(\n source: AsyncIterable<T>,\n stallMs: number,\n ctl: AttemptController,\n provider: string\n): AsyncGenerator<T> {\n const it = source[Symbol.asyncIterator]();\n\n while (true) {\n // Race the next chunk against a stall timer.\n // Important: we must use vi.advanceTimersByTimeAsync in tests, not the sync variant,\n // because Promise.race() involves microtask scheduling — the async variant flushes both\n // timers and pending microtasks so the race resolves correctly in fake-timer environments.\n let stallTimer!: ReturnType<typeof setTimeout>;\n\n const stallPromise = new Promise<never>((_, reject) => {\n stallTimer = setTimeout(() => {\n ctl.abort('stall');\n reject(\n new LlmError({\n provider,\n kind: 'stream_stall',\n retryable: true,\n message: `llm-client: no chunk for ${stallMs}ms`,\n })\n );\n }, stallMs);\n });\n\n try {\n const next = await Promise.race([it.next(), stallPromise]);\n clearTimeout(stallTimer);\n if (next.done) return;\n yield next.value;\n } catch (err) {\n clearTimeout(stallTimer);\n throw err;\n }\n }\n}\n\n// ─── classifyAbort ───────────────────────────────────────────────────────────\n\n/**\n * Map a thrown error to an LlmError with the right kind discriminator.\n *\n * If the error is an AbortError (name === 'AbortError' or DOMException with\n * AbortError name OR any error thrown when the controller's signal is already\n * aborted), we look up the abort reason from the AttemptController:\n * 'timeout' → kind:'timeout', retryable:true\n * 'caller' → kind:'cancelled', retryable:false\n * 'stall' → kind:'stream_stall', retryable:true\n * undefined → kind:'cancelled', retryable:false (unknown abort)\n *\n * Note: provider SDKs may throw their own error types (e.g. Anthropic's\n * APIUserAbortError) when a signal fires. We use the controller's abortReason\n * as the authoritative source rather than relying solely on error.name.\n *\n * Non-abort errors where the signal has NOT fired fall through unchanged so\n * existing normalization paths handle them.\n */\nexport function classifyAbort(\n err: unknown,\n abortReason: ReturnType<AttemptController['abortReason']>,\n provider: string\n): unknown {\n // Primary check: our controller fired — use the reason regardless of error type.\n // This handles provider-specific abort errors (e.g. Anthropic APIUserAbortError).\n const controllerFired = abortReason !== undefined;\n if (!controllerFired && !isAbortError(err)) return err;\n\n switch (abortReason) {\n case 'timeout':\n return new LlmError({\n message: 'llm-client: request timed out',\n provider,\n kind: 'timeout',\n retryable: true,\n cause: err,\n });\n case 'stall':\n return new LlmError({\n message: 'llm-client: stream stalled',\n provider,\n kind: 'stream_stall',\n retryable: true,\n cause: err,\n });\n case 'caller':\n default:\n return new LlmError({\n message: 'llm-client: cancelled by caller',\n provider,\n kind: 'cancelled',\n retryable: false,\n cause: err,\n });\n }\n}\n\n/** Returns true if the thrown value is a DOM/Node AbortError. */\nfunction isAbortError(err: unknown): boolean {\n if (err instanceof Error && err.name === 'AbortError') return true;\n if (typeof DOMException !== 'undefined' && err instanceof DOMException && err.name === 'AbortError')\n return true;\n return false;\n}\n","/**\n * Exponential backoff with full jitter — shared across all providers.\n *\n * Formula: delay = random(0, baseDelayMs * 2^attempt)\n *\n * Retryable HTTP statuses: 429 (rate limit), 502/503/504 (server errors).\n * Retryable network codes: ECONNRESET, ETIMEDOUT.\n * Non-retryable: 400 (bad request), 401/403 (auth), 404.\n *\n * v0.3.0 additions:\n * RetryOptions.signal — passed through to abort the loop on caller cancellation.\n * cancellableSleep — sleep that resolves early when the signal fires.\n * normalizeThrownError — gains explicit AbortError branch → kind:'cancelled', retryable:false.\n * withRetry — checks signal before each attempt and during backoff.\n */\n\nimport { cancellableSleep } from './abort.js';\nimport { LlmError } from './types.js';\n\n// HTTP status codes that should trigger a retry\nconst RETRYABLE_HTTP_STATUSES = new Set([429, 502, 503, 504]);\n\n// Network error codes that should trigger a retry\nconst RETRYABLE_ERROR_CODES = new Set(['ECONNRESET', 'ETIMEDOUT', 'ECONNABORTED']);\n\n// HTTP status codes that should never retry (fail immediately)\nconst NON_RETRYABLE_HTTP_STATUSES = new Set([400, 401, 403, 404]);\n\n/** Determine if an HTTP status code is retryable. */\nexport function isRetryableStatus(statusCode: number): boolean {\n if (RETRYABLE_HTTP_STATUSES.has(statusCode)) return true;\n if (NON_RETRYABLE_HTTP_STATUSES.has(statusCode)) return false;\n // Treat any 5xx not explicitly handled as retryable\n return statusCode >= 500;\n}\n\n/** Determine if a network error code is retryable. */\nexport function isRetryableErrorCode(code: string): boolean {\n return RETRYABLE_ERROR_CODES.has(code);\n}\n\n/** Compute the delay in ms for attempt N (0-indexed). Full jitter. */\nexport function computeBackoffMs(attempt: number, baseDelayMs: number): number {\n const ceiling = baseDelayMs * 2 ** attempt;\n return Math.random() * ceiling;\n}\n\nexport interface RetryOptions {\n maxRetries: number;\n baseDelayMs: number;\n provider: string;\n /**\n * Optional caller-supplied signal. Checked before each attempt and during backoff sleep.\n * If aborted, withRetry throws immediately with kind:'cancelled', retryable:false.\n * This is an internal detail — not part of the public LlmCallOptions API.\n */\n signal?: AbortSignal;\n}\n\n/**\n * Merge base RetryOptions with an optional caller signal.\n * Uses conditional spread to satisfy exactOptionalPropertyTypes — avoids spreading\n * `{ signal: AbortSignal | undefined }` into the strictly-typed interface.\n */\nexport function mergeRetryOptsWithSignal(\n base: Omit<RetryOptions, 'signal'>,\n signal: AbortSignal | undefined\n): RetryOptions {\n return signal !== undefined ? { ...base, signal } : { ...base };\n}\n\n/**\n * Execute `fn` with retry logic. Wraps the result in structured error normalization.\n * `fn` receives the current attempt number (0-indexed).\n *\n * If opts.signal is provided:\n * - Checked before each attempt: throws kind:'cancelled', retryable:false immediately.\n * - Passed to cancellableSleep during backoff so an abort cuts the wait short.\n * - kind:'cancelled' errors thrown by fn are never retried regardless of signal state.\n *\n * Throws LlmError after all retries are exhausted.\n */\nexport async function withRetry<T>(\n fn: (attempt: number) => Promise<T>,\n opts: RetryOptions\n): Promise<T> {\n let lastError: LlmError | undefined;\n\n for (let attempt = 0; attempt <= opts.maxRetries; attempt++) {\n // Pre-attempt abort check — fail immediately without calling fn.\n if (opts.signal?.aborted === true) {\n throw new LlmError({\n message: 'llm-client: cancelled by caller',\n provider: opts.provider,\n kind: 'cancelled',\n retryable: false,\n cause: opts.signal.reason,\n });\n }\n\n try {\n return await fn(attempt);\n } catch (err) {\n const llmErr = normalizeThrownError(err, opts.provider);\n\n // Cancelled errors are never retried — propagate immediately.\n if (llmErr.kind === 'cancelled') throw llmErr;\n\n if (!llmErr.retryable || attempt === opts.maxRetries) {\n throw llmErr;\n }\n\n lastError = llmErr;\n const delayMs = computeBackoffMs(attempt, opts.baseDelayMs);\n // cancellableSleep resolves early if signal fires during backoff.\n await cancellableSleep(delayMs, opts.signal);\n }\n }\n\n // This path is unreachable — the loop always throws or returns.\n // TypeScript needs this for exhaustiveness.\n throw (\n lastError ??\n new LlmError({\n message: 'Unexpected retry exhaustion',\n provider: opts.provider,\n retryable: false,\n })\n );\n}\n\n/** Normalize any thrown value into an LlmError. */\nexport function normalizeThrownError(err: unknown, provider: string): LlmError {\n if (err instanceof LlmError) return err;\n\n if (err instanceof Error) {\n // AbortError branch — must be checked before generic Error handling.\n // Covers both DOMException('AbortError') from browsers/jsdom and\n // plain Error({ name: 'AbortError' }) thrown by some SDK fetch layers.\n if (\n err.name === 'AbortError' ||\n (typeof DOMException !== 'undefined' && err instanceof DOMException && err.name === 'AbortError')\n ) {\n return new LlmError({\n message: err.message || 'llm-client: cancelled by caller',\n provider,\n kind: 'cancelled',\n retryable: false,\n cause: err,\n });\n }\n\n const errWithCode = err as Error & { status?: number; statusCode?: number; code?: string };\n const statusCode = errWithCode.status ?? errWithCode.statusCode;\n\n // Check for retryable network error codes\n if (errWithCode.code !== undefined && isRetryableErrorCode(errWithCode.code)) {\n if (statusCode !== undefined) {\n return new LlmError({\n message: err.message,\n provider,\n statusCode,\n kind: 'network',\n retryable: true,\n cause: err,\n });\n }\n return new LlmError({ message: err.message, provider, kind: 'network', retryable: true, cause: err });\n }\n\n // Check for retryable HTTP status codes\n if (statusCode !== undefined) {\n const retryable = isRetryableStatus(statusCode);\n return new LlmError({\n message: err.message,\n provider,\n statusCode,\n kind: retryable ? 'http' : 'http',\n retryable,\n cause: err,\n });\n }\n\n return new LlmError({\n message: err.message,\n provider,\n kind: 'unknown',\n retryable: false,\n cause: err,\n });\n }\n\n return new LlmError({\n message: String(err),\n provider,\n kind: 'unknown',\n retryable: false,\n cause: err,\n });\n}\n\n","/**\n * DeepSeek provider for @diabolicallabs/llm-client.\n *\n * DeepSeek's chat completions API is fully OpenAI-compatible, so this provider\n * uses the OpenAI SDK pointed at DeepSeek's base URL.\n *\n * API base URL: https://api.deepseek.com\n * Docs: https://platform.deepseek.com/api-docs/\n *\n * Implements: complete(), stream(), structured()\n *\n * Token normalization:\n * DeepSeek returns standard OpenAI-format usage: prompt_tokens / completion_tokens / total_tokens\n * → LlmUsage: inputTokens / outputTokens / totalTokens\n *\n * Error mapping:\n * APIConnectionError → retryable: true\n * APIError with status 429 / 5xx → retryable: true\n * Other APIErrors → non-retryable\n *\n * Note: DeepSeek does not support the json_object response_format on all models.\n * structured() injects a system prompt and parses the raw response. If the model\n * includes markdown fences, they are stripped before parsing.\n */\n\nimport OpenAI from 'openai';\nimport { classifyAbort, createAttemptController, withStallTimeout } from '../abort.js';\nimport { mergeRetryOptsWithSignal, normalizeThrownError, withRetry } from '../retry.js';\nimport type {\n LlmCallOptions,\n LlmClient,\n LlmClientConfig,\n LlmMessage,\n LlmResponse,\n LlmStreamChunk,\n LlmStructuredResponse,\n LlmUsage,\n} from '../types.js';\nimport { LlmError } from '../types.js';\n\nconst PROVIDER = 'deepseek';\nconst DEEPSEEK_BASE_URL = 'https://api.deepseek.com';\n\n/** Normalize OpenAI-format usage object to LlmUsage. */\nfunction normalizeUsage(usage: OpenAI.CompletionUsage | undefined | null): LlmUsage {\n const inputTokens = usage?.prompt_tokens ?? 0;\n const outputTokens = usage?.completion_tokens ?? 0;\n return {\n inputTokens,\n outputTokens,\n totalTokens: usage?.total_tokens ?? inputTokens + outputTokens,\n };\n}\n\n/** Convert LlmMessages to OpenAI-format chat message params (compatible with DeepSeek). */\nfunction buildMessages(messages: LlmMessage[]): OpenAI.Chat.ChatCompletionMessageParam[] {\n return messages.map((m) => ({\n role: m.role,\n content: m.content,\n }));\n}\n\n/**\n * Normalize any DeepSeek / OpenAI SDK error into LlmError.\n * Exported for direct unit testing of the normalization logic.\n *\n * Uses the same OpenAI SDK error hierarchy (APIConnectionError before APIError)\n * since the client is an OpenAI instance pointed at DeepSeek's API.\n */\nexport function normalizeDeepSeekError(err: unknown): LlmError {\n if (err instanceof LlmError) return err;\n\n // APIConnectionError is a subclass of APIError with status: undefined —\n // check it first so network failures are always retryable.\n if (typeof OpenAI.APIConnectionError === 'function' && err instanceof OpenAI.APIConnectionError) {\n return new LlmError({\n message: err.message,\n provider: PROVIDER,\n retryable: true,\n cause: err,\n });\n }\n\n // Catch all other APIError subclasses: RateLimitError (429), AuthenticationError (401), etc.\n if (typeof OpenAI.APIError === 'function' && err instanceof OpenAI.APIError) {\n const status: number | undefined = err.status;\n if (status !== undefined) {\n const retryable = [429, 502, 503, 504].includes(status) || status >= 500;\n return new LlmError({\n message: err.message,\n provider: PROVIDER,\n statusCode: status,\n retryable,\n cause: err,\n });\n }\n return new LlmError({ message: err.message, provider: PROVIDER, retryable: false, cause: err });\n }\n\n return normalizeThrownError(err, PROVIDER);\n}\n\n/** Create the DeepSeek provider implementation. */\nexport function createDeepSeekProvider(config: LlmClientConfig): LlmClient {\n // OpenAI SDK pointed at DeepSeek's OpenAI-compatible endpoint\n const client = new OpenAI({\n apiKey: config.apiKey,\n baseURL: DEEPSEEK_BASE_URL,\n timeout: config.timeoutMs ?? 30_000,\n maxRetries: 0, // Retries managed by withRetry\n });\n\n const retryOpts = {\n maxRetries: config.maxRetries ?? 3,\n baseDelayMs: config.baseDelayMs ?? 1_000,\n provider: PROVIDER,\n };\n\n async function complete(messages: LlmMessage[], options?: LlmCallOptions): Promise<LlmResponse> {\n const model = options?.model ?? config.model;\n const chatMessages = buildMessages(messages);\n const effectiveTimeoutMs = options?.timeoutMs ?? config.timeoutMs ?? 30_000;\n const start = Date.now();\n\n return withRetry(async () => {\n const ctl = createAttemptController(options?.signal, effectiveTimeoutMs);\n try {\n const params: OpenAI.Chat.ChatCompletionCreateParamsNonStreaming = {\n model,\n messages: chatMessages,\n stream: false,\n };\n\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) params.max_tokens = maxTokens;\n\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) params.temperature = temperature;\n\n const response = await client.chat.completions.create(params, { signal: ctl.signal });\n const content = response.choices.map((c) => c.message.content ?? '').join('');\n\n return {\n content,\n model: response.model,\n usage: normalizeUsage(response.usage),\n latencyMs: Date.now() - start,\n };\n } catch (err) {\n throw normalizeDeepSeekError(classifyAbort(err, ctl.abortReason(), PROVIDER));\n } finally {\n ctl.dispose();\n }\n }, mergeRetryOptsWithSignal(retryOpts, options?.signal));\n }\n\n async function* stream(\n messages: LlmMessage[],\n options?: LlmCallOptions\n ): AsyncGenerator<LlmStreamChunk> {\n const model = options?.model ?? config.model;\n const chatMessages = buildMessages(messages);\n const effectiveTimeoutMs = options?.timeoutMs ?? config.timeoutMs ?? 30_000;\n const stallMs = options?.streamStallTimeoutMs ?? config.streamStallTimeoutMs ?? 30_000;\n\n const params: OpenAI.Chat.ChatCompletionCreateParamsStreaming = {\n model,\n messages: chatMessages,\n stream: true,\n stream_options: { include_usage: true },\n };\n\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) params.max_tokens = maxTokens;\n\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) params.temperature = temperature;\n\n const ctl = createAttemptController(options?.signal, effectiveTimeoutMs);\n let sdkStream: Awaited<ReturnType<typeof client.chat.completions.create>>;\n\n try {\n sdkStream = await client.chat.completions.create(params, { signal: ctl.signal });\n } catch (err) {\n ctl.dispose();\n throw normalizeDeepSeekError(classifyAbort(err, ctl.abortReason(), PROVIDER));\n }\n\n let finalUsage: LlmUsage | undefined;\n\n try {\n for await (const chunk of withStallTimeout(sdkStream, stallMs, ctl, PROVIDER)) {\n const delta = chunk.choices[0]?.delta.content;\n if (delta !== undefined && delta !== null && delta.length > 0) {\n yield { token: delta };\n }\n\n // Usage arrives in the final chunk when stream_options.include_usage is true\n if (chunk.usage !== undefined && chunk.usage !== null) {\n finalUsage = normalizeUsage(chunk.usage);\n }\n }\n } catch (err) {\n throw normalizeDeepSeekError(classifyAbort(err, ctl.abortReason(), PROVIDER));\n } finally {\n ctl.dispose();\n }\n\n if (finalUsage !== undefined) {\n yield { token: '', usage: finalUsage };\n }\n }\n\n async function structured<T>(\n messages: LlmMessage[],\n schema: { parse: (data: unknown) => T },\n options?: LlmCallOptions\n ): Promise<LlmStructuredResponse<T>> {\n // Inject JSON-only system instruction. DeepSeek does not guarantee json_object\n // response_format support across all models, so we rely on prompt-level enforcement.\n const jsonSystemInstruction: LlmMessage = {\n role: 'system',\n content:\n 'You must respond with valid JSON only. No explanations, no markdown code fences, no extra text. Your entire response must be valid JSON that can be parsed with JSON.parse().',\n };\n\n const augmentedMessages = [jsonSystemInstruction, ...messages];\n const model = options?.model ?? config.model;\n const chatMessages = buildMessages(augmentedMessages);\n const effectiveTimeoutMs = options?.timeoutMs ?? config.timeoutMs ?? 30_000;\n const start = Date.now();\n\n const rawResponse = await withRetry(async () => {\n const ctl = createAttemptController(options?.signal, effectiveTimeoutMs);\n try {\n const params: OpenAI.Chat.ChatCompletionCreateParamsNonStreaming = {\n model,\n messages: chatMessages,\n stream: false,\n };\n\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) params.max_tokens = maxTokens;\n\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) params.temperature = temperature;\n\n return await client.chat.completions.create(params, { signal: ctl.signal });\n } catch (err) {\n throw normalizeDeepSeekError(classifyAbort(err, ctl.abortReason(), PROVIDER));\n } finally {\n ctl.dispose();\n }\n }, mergeRetryOptsWithSignal(retryOpts, options?.signal));\n\n const rawContent = rawResponse.choices[0]?.message.content ?? '';\n\n let parsed: unknown;\n try {\n // Strip markdown fences if the model included them despite the instruction\n const cleaned = rawContent\n .replace(/^```(?:json)?\\s*/i, '')\n .replace(/\\s*```$/, '')\n .trim();\n parsed = JSON.parse(cleaned);\n } catch (err) {\n throw new LlmError({\n message: `DeepSeek structured output: response is not valid JSON. Raw: ${rawContent.slice(0, 200)}`,\n provider: PROVIDER,\n retryable: false,\n cause: err,\n });\n }\n\n let data: T;\n try {\n data = schema.parse(parsed);\n } catch (err) {\n throw new LlmError({\n message: `DeepSeek structured output: response failed schema validation. ${String(err)}`,\n provider: PROVIDER,\n retryable: false,\n cause: err,\n });\n }\n\n return {\n data,\n usage: normalizeUsage(rawResponse.usage),\n latencyMs: Date.now() - start,\n };\n }\n\n return {\n config,\n complete,\n stream,\n structured,\n };\n}\n","/**\n * Google Gemini provider for @diabolicallabs/llm-client.\n *\n * Uses the @google/genai SDK (v1.x — not the deprecated @google/generative-ai).\n *\n * Implements: complete(), stream(), structured()\n *\n * Token normalization:\n * Gemini: usageMetadata.promptTokenCount / candidatesTokenCount / totalTokenCount\n * → LlmUsage: inputTokens / outputTokens / totalTokens\n *\n * Error mapping:\n * ApiError (public SDK class, status: number always defined):\n * retryable for 429 / 5xx\n * non-retryable for 4xx (except 429)\n * Other errors → normalizeThrownError (handles ECONNRESET / ETIMEDOUT as retryable)\n *\n * API notes:\n * - System instructions are passed via config.systemInstruction (not mixed into contents)\n * - Role mapping: 'user' → 'user', 'assistant' → 'model'\n * - Streaming via ai.models.generateContentStream() returns AsyncGenerator<GenerateContentResponse>\n * - Text is accessed via response.text getter on GenerateContentResponse\n * - Structured output: responseMimeType: 'application/json' in GenerateContentConfig\n *\n * SDK error class note:\n * The @google/genai public API exports only ApiError (lowercase 'a'), which has status: number.\n * Internal APIError / APIConnectionError classes (uppercase) are NOT exported from the package\n * root and must not be imported from internal dist paths.\n * Network errors (ECONNRESET, ETIMEDOUT) arrive as plain Error objects caught by normalizeThrownError.\n *\n * ⚠ Cancellation caveat (v0.3.0 — owner-accepted, 2026-05-10):\n * @google/genai SDK does not accept a per-call AbortSignal. Cancellation is implemented\n * via Promise.race: when the internal controller aborts, a rejection promise wins the race\n * and we stop awaiting the SDK call. However, the SDK's underlying HTTP request is NOT\n * cancelled — it continues in the background until it completes or the SDK-level timeout fires.\n *\n * Mitigation: GoogleGenAI is constructed with httpOptions.timeout = effectiveTimeoutMs * 2\n * as a backstop. This bounds the leaked request to at most 2× the per-call timeout.\n *\n * Tracking issue: migrate to native signal support when @google/genai adds it.\n */\n\nimport {\n ApiError,\n type Content,\n type GenerateContentConfig,\n type GenerateContentResponse,\n type GenerateContentResponseUsageMetadata,\n GoogleGenAI,\n} from '@google/genai';\nimport { classifyAbort, createAttemptController, withStallTimeout } from '../abort.js';\nimport { mergeRetryOptsWithSignal, normalizeThrownError, withRetry } from '../retry.js';\nimport type {\n LlmCallOptions,\n LlmClient,\n LlmClientConfig,\n LlmMessage,\n LlmResponse,\n LlmStreamChunk,\n LlmStructuredResponse,\n LlmUsage,\n} from '../types.js';\nimport { LlmError } from '../types.js';\n\nconst PROVIDER = 'gemini';\n\n/** Normalize Gemini's usageMetadata to LlmUsage. */\nfunction normalizeUsage(meta: GenerateContentResponseUsageMetadata | undefined): LlmUsage {\n const inputTokens = meta?.promptTokenCount ?? 0;\n const outputTokens = meta?.candidatesTokenCount ?? 0;\n return {\n inputTokens,\n outputTokens,\n totalTokens: meta?.totalTokenCount ?? inputTokens + outputTokens,\n };\n}\n\n/**\n * Convert LlmMessages to Gemini's Content array format.\n * Extracts system message — Gemini treats system instructions separately from contents.\n * Role mapping: 'user' → 'user', 'assistant' → 'model' (Gemini API requires 'model').\n */\nfunction buildGeminiContents(messages: LlmMessage[]): {\n system: string | undefined;\n contents: Content[];\n} {\n const systemMessages = messages.filter((m) => m.role === 'system');\n const conversationMessages = messages.filter((m) => m.role !== 'system');\n\n const system =\n systemMessages.length > 0 ? systemMessages.map((m) => m.content).join('\\n') : undefined;\n\n const contents: Content[] = conversationMessages.map((m) => ({\n role: m.role === 'assistant' ? 'model' : 'user',\n parts: [{ text: m.content }],\n }));\n\n return { system, contents };\n}\n\n/**\n * Normalize any Gemini SDK error into LlmError.\n * Exported for direct unit testing of the normalization logic.\n *\n * ApiError (public SDK class) always has status: number, so there is no undefined-status branch.\n * Network errors (no HTTP status) arrive as plain Error objects; normalizeThrownError\n * handles retryable error codes (ECONNRESET, ETIMEDOUT, etc.).\n */\nexport function normalizeGeminiError(err: unknown): LlmError {\n if (err instanceof LlmError) return err;\n\n // ApiError is the only publicly-exported SDK error class.\n // status is always number (not undefined) per the ApiError type definition.\n if (err instanceof ApiError) {\n const retryable = err.status === 429 || err.status >= 500;\n return new LlmError({\n message: err.message,\n provider: PROVIDER,\n statusCode: err.status,\n retryable,\n cause: err,\n });\n }\n\n // Network errors (ECONNRESET, ETIMEDOUT, etc.) arrive as plain Error objects.\n // normalizeThrownError classifies retryable codes and handles the unknown-error case.\n return normalizeThrownError(err, PROVIDER);\n}\n\n/**\n * Build an \"abort-rejection\" promise that rejects with an AbortError-shaped error\n * when the controller's signal fires. Used in Promise.race to simulate cancellation\n * for SDK calls that don't accept a signal directly.\n *\n * NOTE: This does NOT cancel the SDK's underlying HTTP request. See the module-level\n * caveat comment for the documented socket-leak behavior and the 2× backstop mitigation.\n */\nfunction makeAbortRacePromise(signal: AbortSignal): Promise<never> {\n return new Promise<never>((_, reject) => {\n const onAbort = (): void => {\n const e = new Error('AbortError');\n e.name = 'AbortError';\n reject(e);\n };\n if (signal.aborted) {\n onAbort();\n } else {\n signal.addEventListener('abort', onAbort, { once: true });\n }\n });\n}\n\n/** Create the Gemini provider implementation. */\nexport function createGeminiProvider(config: LlmClientConfig): LlmClient {\n const configTimeoutMs = config.timeoutMs ?? 30_000;\n\n // GoogleGenAI instance — httpOptions.timeout is the per-call SDK-level backstop.\n // For per-call overrides, we multiply by 2 as the backstop (see caveat above).\n // Since @google/genai does not support per-call construction, we use the config-level\n // timeout * 2 as a static backstop. Per-call overrides shorter than config timeout\n // will be enforced by the Promise.race; longer overrides are bounded by this backstop.\n const ai = new GoogleGenAI({\n apiKey: config.apiKey,\n httpOptions: {\n timeout: configTimeoutMs * 2,\n },\n });\n\n const retryOpts = {\n maxRetries: config.maxRetries ?? 3,\n baseDelayMs: config.baseDelayMs ?? 1_000,\n provider: PROVIDER,\n };\n\n async function complete(messages: LlmMessage[], options?: LlmCallOptions): Promise<LlmResponse> {\n const model = options?.model ?? config.model;\n const { system, contents } = buildGeminiContents(messages);\n const effectiveTimeoutMs = options?.timeoutMs ?? configTimeoutMs;\n const start = Date.now();\n\n return withRetry(async () => {\n const ctl = createAttemptController(options?.signal, effectiveTimeoutMs);\n try {\n // Build config object — always passed (empty object is valid GenerateContentConfig)\n const geminiConfig: GenerateContentConfig = {};\n\n if (system !== undefined) geminiConfig.systemInstruction = system;\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) geminiConfig.maxOutputTokens = maxTokens;\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) geminiConfig.temperature = temperature;\n\n // Promise.race: whichever settles first wins. If ctl.signal aborts (timeout or\n // caller cancel), the abortRace rejects; the SDK call continues in the background\n // until the httpOptions.timeout backstop fires. See module-level caveat.\n const response = await Promise.race([\n ai.models.generateContent({ model, contents, config: geminiConfig }),\n makeAbortRacePromise(ctl.signal),\n ]);\n\n return {\n content: response.text ?? '',\n model,\n usage: normalizeUsage(response.usageMetadata),\n latencyMs: Date.now() - start,\n };\n } catch (err) {\n throw normalizeGeminiError(classifyAbort(err, ctl.abortReason(), PROVIDER));\n } finally {\n ctl.dispose();\n }\n }, mergeRetryOptsWithSignal(retryOpts, options?.signal));\n }\n\n async function* stream(\n messages: LlmMessage[],\n options?: LlmCallOptions\n ): AsyncGenerator<LlmStreamChunk> {\n const model = options?.model ?? config.model;\n const { system, contents } = buildGeminiContents(messages);\n const effectiveTimeoutMs = options?.timeoutMs ?? configTimeoutMs;\n const stallMs = options?.streamStallTimeoutMs ?? config.streamStallTimeoutMs ?? 30_000;\n\n // Build config — always passed (empty object is valid GenerateContentConfig)\n const geminiConfig: GenerateContentConfig = {};\n if (system !== undefined) geminiConfig.systemInstruction = system;\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) geminiConfig.maxOutputTokens = maxTokens;\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) geminiConfig.temperature = temperature;\n\n const ctl = createAttemptController(options?.signal, effectiveTimeoutMs);\n let sdkStream: AsyncGenerator<GenerateContentResponse>;\n\n try {\n // Gemini's generateContentStream() doesn't accept a signal, so we race the\n // initialization promise against an abort promise. See module-level caveat.\n sdkStream = await Promise.race([\n ai.models.generateContentStream({ model, contents, config: geminiConfig }),\n makeAbortRacePromise(ctl.signal),\n ]);\n } catch (err) {\n ctl.dispose();\n throw normalizeGeminiError(classifyAbort(err, ctl.abortReason(), PROVIDER));\n }\n\n let finalUsage: LlmUsage | undefined;\n\n try {\n for await (const chunk of withStallTimeout(sdkStream, stallMs, ctl, PROVIDER)) {\n const text = chunk.text;\n if (text !== undefined && text.length > 0) {\n yield { token: text };\n }\n // Capture usage from each chunk — the final chunk has the complete totals\n if (chunk.usageMetadata !== undefined) {\n finalUsage = normalizeUsage(chunk.usageMetadata);\n }\n }\n } catch (err) {\n throw normalizeGeminiError(classifyAbort(err, ctl.abortReason(), PROVIDER));\n } finally {\n ctl.dispose();\n }\n\n if (finalUsage !== undefined) {\n yield { token: '', usage: finalUsage };\n }\n }\n\n async function structured<T>(\n messages: LlmMessage[],\n schema: { parse: (data: unknown) => T },\n options?: LlmCallOptions\n ): Promise<LlmStructuredResponse<T>> {\n const augmentedMessages: LlmMessage[] = [\n {\n role: 'system',\n content:\n 'You must respond with valid JSON only. No explanations, no markdown code fences, no extra text. Your entire response must be valid JSON that can be parsed with JSON.parse().',\n },\n ...messages,\n ];\n\n const model = options?.model ?? config.model;\n const { system, contents } = buildGeminiContents(augmentedMessages);\n const effectiveTimeoutMs = options?.timeoutMs ?? configTimeoutMs;\n const start = Date.now();\n\n const rawResponse = await withRetry(async () => {\n const ctl = createAttemptController(options?.signal, effectiveTimeoutMs);\n try {\n const geminiConfig: GenerateContentConfig = {\n // Instruct Gemini to return JSON directly\n responseMimeType: 'application/json',\n };\n\n if (system !== undefined) geminiConfig.systemInstruction = system;\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) geminiConfig.maxOutputTokens = maxTokens;\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) geminiConfig.temperature = temperature;\n\n return await Promise.race([\n ai.models.generateContent({ model, contents, config: geminiConfig }),\n makeAbortRacePromise(ctl.signal),\n ]);\n } catch (err) {\n throw normalizeGeminiError(classifyAbort(err, ctl.abortReason(), PROVIDER));\n } finally {\n ctl.dispose();\n }\n }, mergeRetryOptsWithSignal(retryOpts, options?.signal));\n\n const rawContent = rawResponse.text ?? '';\n\n let parsed: unknown;\n try {\n // Strip markdown code fences if the model included them despite the instruction\n const cleaned = rawContent\n .replace(/^```(?:json)?\\s*/i, '')\n .replace(/\\s*```$/, '')\n .trim();\n parsed = JSON.parse(cleaned);\n } catch (err) {\n throw new LlmError({\n message: `Gemini structured output: response is not valid JSON. Raw: ${rawContent.slice(0, 200)}`,\n provider: PROVIDER,\n retryable: false,\n cause: err,\n });\n }\n\n let data: T;\n try {\n data = schema.parse(parsed);\n } catch (err) {\n throw new LlmError({\n message: `Gemini structured output: response failed schema validation. ${String(err)}`,\n provider: PROVIDER,\n retryable: false,\n cause: err,\n });\n }\n\n return {\n data,\n usage: normalizeUsage(rawResponse.usageMetadata),\n latencyMs: Date.now() - start,\n };\n }\n\n return {\n config,\n complete,\n stream,\n structured,\n };\n}\n","/**\n * OpenAI provider for @diabolicallabs/llm-client.\n *\n * Implements: complete(), stream(), structured()\n *\n * Token normalization:\n * OpenAI: prompt_tokens / completion_tokens\n * → LlmUsage: inputTokens / outputTokens / totalTokens\n *\n * Error mapping:\n * APIStatusError.status → LlmError.statusCode + retryable flag\n * APIConnectionError → retryable: true\n *\n * Structured output uses OpenAI's response_format: { type: 'json_object' }.\n * For strict schema enforcement, the schema is described in the system prompt.\n */\n\nimport OpenAI from 'openai';\nimport { classifyAbort, createAttemptController, withStallTimeout } from '../abort.js';\nimport { mergeRetryOptsWithSignal, normalizeThrownError, withRetry } from '../retry.js';\nimport type {\n LlmCallOptions,\n LlmClient,\n LlmClientConfig,\n LlmMessage,\n LlmResponse,\n LlmStreamChunk,\n LlmStructuredResponse,\n LlmUsage,\n} from '../types.js';\nimport { LlmError } from '../types.js';\n\nconst PROVIDER = 'openai';\n\n/** Normalize OpenAI's usage object to LlmUsage. */\nfunction normalizeUsage(usage: OpenAI.CompletionUsage | undefined | null): LlmUsage {\n const inputTokens = usage?.prompt_tokens ?? 0;\n const outputTokens = usage?.completion_tokens ?? 0;\n return {\n inputTokens,\n outputTokens,\n totalTokens: usage?.total_tokens ?? inputTokens + outputTokens,\n };\n}\n\n/** Convert LlmMessages to OpenAI's chat message format. */\nfunction buildOpenAIMessages(messages: LlmMessage[]): OpenAI.Chat.ChatCompletionMessageParam[] {\n return messages.map((m) => ({\n role: m.role,\n content: m.content,\n }));\n}\n\n/**\n * Normalize any OpenAI SDK error into LlmError.\n * Exported for direct unit testing of the normalization logic.\n */\nexport function normalizeOpenAIError(err: unknown): LlmError {\n if (err instanceof LlmError) return err;\n\n // OpenAI SDK v6+: uses OpenAI.APIError as the base class with a `.status` field.\n // APIConnectionError is a subclass of APIError with status: undefined — check it first\n // so network failures are always retryable regardless of the missing status code.\n if (typeof OpenAI.APIConnectionError === 'function' && err instanceof OpenAI.APIConnectionError) {\n return new LlmError({\n message: err.message,\n provider: PROVIDER,\n retryable: true,\n cause: err,\n });\n }\n\n // Catch all other APIError subclasses: RateLimitError (429), AuthenticationError (401),\n // InternalServerError (500), etc. Retryability is determined by HTTP status code.\n if (typeof OpenAI.APIError === 'function' && err instanceof OpenAI.APIError) {\n const status: number | undefined = err.status;\n if (status !== undefined) {\n const retryable = [429, 502, 503, 504].includes(status) || status >= 500;\n return new LlmError({\n message: err.message,\n provider: PROVIDER,\n statusCode: status,\n retryable,\n cause: err,\n });\n }\n return new LlmError({ message: err.message, provider: PROVIDER, retryable: false, cause: err });\n }\n\n return normalizeThrownError(err, PROVIDER);\n}\n\n/** Create the OpenAI provider implementation. */\nexport function createOpenAIProvider(config: LlmClientConfig): LlmClient {\n const client = new OpenAI({\n apiKey: config.apiKey,\n timeout: config.timeoutMs ?? 30_000,\n maxRetries: 0, // We manage retries ourselves via withRetry\n });\n\n const retryOpts = {\n maxRetries: config.maxRetries ?? 3,\n baseDelayMs: config.baseDelayMs ?? 1_000,\n provider: PROVIDER,\n };\n\n async function complete(messages: LlmMessage[], options?: LlmCallOptions): Promise<LlmResponse> {\n const model = options?.model ?? config.model;\n const openAIMessages = buildOpenAIMessages(messages);\n const effectiveTimeoutMs = options?.timeoutMs ?? config.timeoutMs ?? 30_000;\n const start = Date.now();\n\n return withRetry(async () => {\n const ctl = createAttemptController(options?.signal, effectiveTimeoutMs);\n try {\n const params: OpenAI.Chat.ChatCompletionCreateParamsNonStreaming = {\n model,\n messages: openAIMessages,\n stream: false,\n };\n\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) params.max_tokens = maxTokens;\n\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) params.temperature = temperature;\n\n const response = await client.chat.completions.create(params, { signal: ctl.signal });\n\n const content = response.choices.map((c) => c.message.content ?? '').join('');\n\n return {\n content,\n model: response.model,\n usage: normalizeUsage(response.usage),\n latencyMs: Date.now() - start,\n };\n } catch (err) {\n throw normalizeOpenAIError(classifyAbort(err, ctl.abortReason(), PROVIDER));\n } finally {\n ctl.dispose();\n }\n }, mergeRetryOptsWithSignal(retryOpts, options?.signal));\n }\n\n async function* stream(\n messages: LlmMessage[],\n options?: LlmCallOptions\n ): AsyncGenerator<LlmStreamChunk> {\n const model = options?.model ?? config.model;\n const openAIMessages = buildOpenAIMessages(messages);\n const effectiveTimeoutMs = options?.timeoutMs ?? config.timeoutMs ?? 30_000;\n const stallMs = options?.streamStallTimeoutMs ?? config.streamStallTimeoutMs ?? 30_000;\n\n const params: OpenAI.Chat.ChatCompletionCreateParamsStreaming = {\n model,\n messages: openAIMessages,\n stream: true,\n stream_options: { include_usage: true },\n };\n\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) params.max_tokens = maxTokens;\n\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) params.temperature = temperature;\n\n const ctl = createAttemptController(options?.signal, effectiveTimeoutMs);\n let sdkStream: Awaited<ReturnType<typeof client.chat.completions.create>>;\n\n try {\n sdkStream = await client.chat.completions.create(params, { signal: ctl.signal });\n } catch (err) {\n ctl.dispose();\n throw normalizeOpenAIError(classifyAbort(err, ctl.abortReason(), PROVIDER));\n }\n\n let finalUsage: LlmUsage | undefined;\n\n try {\n for await (const chunk of withStallTimeout(sdkStream, stallMs, ctl, PROVIDER)) {\n // Token chunks arrive in choices[0].delta.content\n const delta = chunk.choices[0]?.delta.content;\n if (delta !== undefined && delta !== null && delta.length > 0) {\n yield { token: delta };\n }\n\n // Usage arrives in the final chunk (stream_options.include_usage must be true)\n if (chunk.usage !== undefined && chunk.usage !== null) {\n finalUsage = normalizeUsage(chunk.usage);\n }\n }\n } catch (err) {\n throw normalizeOpenAIError(classifyAbort(err, ctl.abortReason(), PROVIDER));\n } finally {\n ctl.dispose();\n }\n\n // Yield usage on the final sentinel chunk\n if (finalUsage !== undefined) {\n yield { token: '', usage: finalUsage };\n }\n }\n\n async function structured<T>(\n messages: LlmMessage[],\n schema: { parse: (data: unknown) => T },\n options?: LlmCallOptions\n ): Promise<LlmStructuredResponse<T>> {\n // OpenAI JSON mode: response_format: { type: 'json_object' }\n // The system prompt must instruct the model to output JSON — OpenAI requires this.\n const jsonSystemInstruction: LlmMessage = {\n role: 'system',\n content:\n 'You must respond with valid JSON only. No explanations, no markdown code fences, no extra text. Your entire response must be valid JSON that can be parsed with JSON.parse().',\n };\n\n const augmentedMessages = [jsonSystemInstruction, ...messages];\n const model = options?.model ?? config.model;\n const openAIMessages = buildOpenAIMessages(augmentedMessages);\n const effectiveTimeoutMs = options?.timeoutMs ?? config.timeoutMs ?? 30_000;\n const start = Date.now();\n\n const rawResponse = await withRetry(async () => {\n const ctl = createAttemptController(options?.signal, effectiveTimeoutMs);\n try {\n const params: OpenAI.Chat.ChatCompletionCreateParamsNonStreaming = {\n model,\n messages: openAIMessages,\n stream: false,\n response_format: { type: 'json_object' },\n };\n\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) params.max_tokens = maxTokens;\n\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) params.temperature = temperature;\n\n return await client.chat.completions.create(params, { signal: ctl.signal });\n } catch (err) {\n throw normalizeOpenAIError(classifyAbort(err, ctl.abortReason(), PROVIDER));\n } finally {\n ctl.dispose();\n }\n }, mergeRetryOptsWithSignal(retryOpts, options?.signal));\n\n const rawContent = rawResponse.choices[0]?.message.content ?? '';\n\n let parsed: unknown;\n try {\n parsed = JSON.parse(rawContent);\n } catch (err) {\n throw new LlmError({\n message: `OpenAI structured output: response is not valid JSON. Raw: ${rawContent.slice(0, 200)}`,\n provider: PROVIDER,\n retryable: false,\n cause: err,\n });\n }\n\n let data: T;\n try {\n data = schema.parse(parsed);\n } catch (err) {\n throw new LlmError({\n message: `OpenAI structured output: response failed schema validation. ${String(err)}`,\n provider: PROVIDER,\n retryable: false,\n cause: err,\n });\n }\n\n return {\n data,\n usage: normalizeUsage(rawResponse.usage),\n latencyMs: Date.now() - start,\n };\n }\n\n return {\n config,\n complete,\n stream,\n structured,\n };\n}\n","/**\n * Perplexity provider for @diabolicallabs/llm-client.\n *\n * Perplexity's chat completions API is OpenAI-compatible, so this provider\n * uses the OpenAI SDK pointed at Perplexity's base URL — same pattern as DeepSeek.\n *\n * API base URL: https://api.perplexity.ai\n * Docs: https://docs.perplexity.ai\n *\n * Implements: complete(), stream(), structured()\n *\n * Key Perplexity behaviors:\n * - Responses include a `citations` field: string[] of source URLs.\n * We map each URL to { url: string } and deduplicate by URL before returning.\n * - Citations are only available on non-streaming responses. The streaming API\n * does not include citations in individual chunks; consumers needing citations\n * must use complete(), not stream().\n * - Default model: 'sonar' — the lightweight search model (sonar-reasoning was\n * deprecated Dec 2025; sonar-reasoning-pro is its replacement).\n *\n * Model notes (confirmed against live API 2026-05-08):\n * - sonar — lightweight search, web-grounded\n * - sonar-pro — advanced search, more citations\n * - sonar-reasoning-pro — chain-of-thought reasoning (sonar-reasoning deprecated)\n * - sonar-deep-research — exhaustive research; supports async jobs. Perplexity's\n * docs note this model \"supports asynchronous jobs\" which\n * may mean a different response shape. We treat it as a\n * standard synchronous model; if the API returns an\n * incompatible shape, complete() will throw a clear LlmError\n * directing users to sonar-reasoning-pro or the async API.\n *\n * providerOptions (Wave 2 escape hatch):\n * The Perplexity API supports search-specific parameters not present on other providers.\n * Pass them via options.providerOptions:\n * search_recency_filter: 'month' | 'week' | 'day' | 'hour'\n * search_domain_filter: string[] — allowlist of domains to source from\n * Unknown fields are passed through unchanged to support future Perplexity API additions.\n *\n * structured() strategy:\n * Perplexity's response_format handling has limitations (especially on reasoning models\n * where reasoning tokens appear before JSON output). We use system-prompt JSON instruction\n * (same as DeepSeek) and strip both <think>...</think> reasoning blocks (sonar-reasoning-pro)\n * and markdown fences before JSON.parse().\n *\n * Token normalization:\n * Perplexity returns standard OpenAI-format usage: prompt_tokens / completion_tokens / total_tokens\n * → LlmUsage: inputTokens / outputTokens / totalTokens\n *\n * Error mapping:\n * APIConnectionError → retryable: true\n * APIError with status 429 / 5xx → retryable: true\n * Other APIErrors → non-retryable\n */\n\nimport OpenAI from 'openai';\nimport { classifyAbort, createAttemptController, withStallTimeout } from '../abort.js';\nimport { mergeRetryOptsWithSignal, normalizeThrownError, withRetry } from '../retry.js';\nimport type {\n LlmCallOptions,\n LlmClient,\n LlmClientConfig,\n LlmMessage,\n LlmResponse,\n LlmStreamChunk,\n LlmStructuredResponse,\n LlmUsage,\n} from '../types.js';\nimport { LlmError } from '../types.js';\n\nconst PROVIDER = 'perplexity';\nconst PERPLEXITY_BASE_URL = 'https://api.perplexity.ai';\n\n/**\n * Perplexity-specific fields that may appear on the OpenAI-compatible response object.\n * The SDK types don't include these; we cast and extract them safely.\n */\ninterface PerplexityResponseExtensions {\n citations?: string[];\n}\n\n/** Normalize OpenAI-format usage object to LlmUsage. */\nfunction normalizeUsage(usage: OpenAI.CompletionUsage | undefined | null): LlmUsage {\n const inputTokens = usage?.prompt_tokens ?? 0;\n const outputTokens = usage?.completion_tokens ?? 0;\n return {\n inputTokens,\n outputTokens,\n totalTokens: usage?.total_tokens ?? inputTokens + outputTokens,\n };\n}\n\n/** Convert LlmMessages to OpenAI-format chat message params. */\nfunction buildMessages(messages: LlmMessage[]): OpenAI.Chat.ChatCompletionMessageParam[] {\n return messages.map((m) => ({\n role: m.role,\n content: m.content,\n }));\n}\n\n/**\n * Extract and deduplicate citations from a Perplexity response.\n *\n * Perplexity returns citations as string[] of URLs on the response object\n * (not in the OpenAI SDK types — accessed via cast). Deduplication is by URL.\n * Returns undefined if no citations are present or the array is empty.\n */\nfunction extractCitations(\n response: OpenAI.Chat.ChatCompletion & PerplexityResponseExtensions\n): LlmResponse['citations'] {\n const rawCitations = response.citations;\n if (rawCitations === undefined || rawCitations.length === 0) return undefined;\n\n const seen = new Set<string>();\n const deduped: Array<{ url: string; title?: string }> = [];\n\n for (const url of rawCitations) {\n if (!seen.has(url)) {\n seen.add(url);\n deduped.push({ url });\n }\n }\n\n return deduped.length > 0 ? deduped : undefined;\n}\n\n/**\n * Extract known Perplexity search filter fields from providerOptions.\n * Unknown fields are passed through to the API params unchanged.\n *\n * Known fields at time of implementation (2026-05-08):\n * search_recency_filter: 'month' | 'week' | 'day' | 'hour'\n * search_domain_filter: string[]\n */\nfunction extractProviderOptions(\n providerOptions: Record<string, unknown> | undefined\n): Record<string, unknown> {\n if (providerOptions === undefined) return {};\n // Pass all fields through — Perplexity may add new filters; unknown fields\n // are forwarded unchanged so consumers don't need a toolkit update to use them.\n return { ...providerOptions };\n}\n\n/**\n * Normalize any Perplexity / OpenAI SDK error into LlmError.\n * Exported for direct unit testing of the normalization logic.\n *\n * Uses the same OpenAI SDK error hierarchy since the client is an OpenAI\n * instance pointed at Perplexity's API.\n */\nexport function normalizePerplexityError(err: unknown): LlmError {\n if (err instanceof LlmError) return err;\n\n // APIConnectionError is a subclass of APIError with status: undefined —\n // check it first so network failures are always retryable.\n if (typeof OpenAI.APIConnectionError === 'function' && err instanceof OpenAI.APIConnectionError) {\n return new LlmError({\n message: err.message,\n provider: PROVIDER,\n retryable: true,\n cause: err,\n });\n }\n\n // Catch all other APIError subclasses: RateLimitError (429), AuthenticationError (401), etc.\n if (typeof OpenAI.APIError === 'function' && err instanceof OpenAI.APIError) {\n const status: number | undefined = err.status;\n if (status !== undefined) {\n const retryable = [429, 502, 503, 504].includes(status) || status >= 500;\n return new LlmError({\n message: err.message,\n provider: PROVIDER,\n statusCode: status,\n retryable,\n cause: err,\n });\n }\n return new LlmError({ message: err.message, provider: PROVIDER, retryable: false, cause: err });\n }\n\n return normalizeThrownError(err, PROVIDER);\n}\n\n/** Create the Perplexity provider implementation. */\nexport function createPerplexityProvider(config: LlmClientConfig): LlmClient {\n // OpenAI SDK pointed at Perplexity's OpenAI-compatible endpoint\n const client = new OpenAI({\n apiKey: config.apiKey,\n baseURL: PERPLEXITY_BASE_URL,\n timeout: config.timeoutMs ?? 30_000,\n maxRetries: 0, // Retries managed by withRetry\n });\n\n const retryOpts = {\n maxRetries: config.maxRetries ?? 3,\n baseDelayMs: config.baseDelayMs ?? 1_000,\n provider: PROVIDER,\n };\n\n async function complete(messages: LlmMessage[], options?: LlmCallOptions): Promise<LlmResponse> {\n const model = options?.model ?? config.model;\n const chatMessages = buildMessages(messages);\n const effectiveTimeoutMs = options?.timeoutMs ?? config.timeoutMs ?? 30_000;\n const start = Date.now();\n const extraParams = extractProviderOptions(options?.providerOptions);\n\n return withRetry(async () => {\n const ctl = createAttemptController(options?.signal, effectiveTimeoutMs);\n try {\n const params: OpenAI.Chat.ChatCompletionCreateParamsNonStreaming & Record<string, unknown> =\n {\n model,\n messages: chatMessages,\n stream: false,\n ...extraParams,\n };\n\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) params.max_tokens = maxTokens;\n\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) params.temperature = temperature;\n\n const rawResponse = await client.chat.completions.create(\n params as OpenAI.Chat.ChatCompletionCreateParamsNonStreaming,\n { signal: ctl.signal }\n );\n\n // Cast to access Perplexity-specific extensions not present in OpenAI SDK types\n const response = rawResponse as OpenAI.Chat.ChatCompletion & PerplexityResponseExtensions;\n\n const content = response.choices.map((c) => c.message.content ?? '').join('');\n\n const result: LlmResponse = {\n content,\n model: response.model,\n usage: normalizeUsage(response.usage),\n latencyMs: Date.now() - start,\n };\n\n const citations = extractCitations(response);\n if (citations !== undefined) result.citations = citations;\n\n return result;\n } catch (err) {\n throw normalizePerplexityError(classifyAbort(err, ctl.abortReason(), PROVIDER));\n } finally {\n ctl.dispose();\n }\n }, mergeRetryOptsWithSignal(retryOpts, options?.signal));\n }\n\n async function* stream(\n messages: LlmMessage[],\n options?: LlmCallOptions\n ): AsyncGenerator<LlmStreamChunk> {\n const model = options?.model ?? config.model;\n const chatMessages = buildMessages(messages);\n const effectiveTimeoutMs = options?.timeoutMs ?? config.timeoutMs ?? 30_000;\n const stallMs = options?.streamStallTimeoutMs ?? config.streamStallTimeoutMs ?? 30_000;\n const extraParams = extractProviderOptions(options?.providerOptions);\n\n const params: OpenAI.Chat.ChatCompletionCreateParamsStreaming & Record<string, unknown> = {\n model,\n messages: chatMessages,\n stream: true,\n stream_options: { include_usage: true },\n ...extraParams,\n };\n\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) params.max_tokens = maxTokens;\n\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) params.temperature = temperature;\n\n const ctl = createAttemptController(options?.signal, effectiveTimeoutMs);\n let sdkStream: Awaited<ReturnType<typeof client.chat.completions.create>>;\n\n try {\n sdkStream = await client.chat.completions.create(\n params as OpenAI.Chat.ChatCompletionCreateParamsStreaming,\n { signal: ctl.signal }\n );\n } catch (err) {\n ctl.dispose();\n throw normalizePerplexityError(classifyAbort(err, ctl.abortReason(), PROVIDER));\n }\n\n let finalUsage: LlmUsage | undefined;\n\n try {\n for await (const chunk of withStallTimeout(sdkStream, stallMs, ctl, PROVIDER)) {\n const delta = chunk.choices[0]?.delta.content;\n if (delta !== undefined && delta !== null && delta.length > 0) {\n yield { token: delta };\n }\n\n // Usage arrives in the final chunk when stream_options.include_usage is true\n if (chunk.usage !== undefined && chunk.usage !== null) {\n finalUsage = normalizeUsage(chunk.usage);\n }\n }\n } catch (err) {\n throw normalizePerplexityError(classifyAbort(err, ctl.abortReason(), PROVIDER));\n } finally {\n ctl.dispose();\n }\n\n // Note: citations are NOT available in streaming mode. Perplexity's streaming\n // API does not include citations in the chunk stream. Use complete() if citations\n // are required for your use case.\n if (finalUsage !== undefined) {\n yield { token: '', usage: finalUsage };\n }\n }\n\n async function structured<T>(\n messages: LlmMessage[],\n schema: { parse: (data: unknown) => T },\n options?: LlmCallOptions\n ): Promise<LlmStructuredResponse<T>> {\n // Perplexity's response_format has limitations with reasoning models (reasoning tokens\n // appear before JSON output). Use system-prompt JSON instruction + fence stripping,\n // same as DeepSeek.\n const jsonSystemInstruction: LlmMessage = {\n role: 'system',\n content:\n 'You must respond with valid JSON only. No explanations, no markdown code fences, no extra text. Your entire response must be valid JSON that can be parsed with JSON.parse().',\n };\n\n const augmentedMessages = [jsonSystemInstruction, ...messages];\n const model = options?.model ?? config.model;\n const chatMessages = buildMessages(augmentedMessages);\n const effectiveTimeoutMs = options?.timeoutMs ?? config.timeoutMs ?? 30_000;\n const start = Date.now();\n const extraParams = extractProviderOptions(options?.providerOptions);\n\n const rawResponse = await withRetry(async () => {\n const ctl = createAttemptController(options?.signal, effectiveTimeoutMs);\n try {\n const params: OpenAI.Chat.ChatCompletionCreateParamsNonStreaming & Record<string, unknown> =\n {\n model,\n messages: chatMessages,\n stream: false,\n ...extraParams,\n };\n\n const maxTokens = options?.maxTokens ?? config.maxTokens;\n if (maxTokens !== undefined) params.max_tokens = maxTokens;\n\n const temperature = options?.temperature ?? config.temperature;\n if (temperature !== undefined) params.temperature = temperature;\n\n return await client.chat.completions.create(\n params as OpenAI.Chat.ChatCompletionCreateParamsNonStreaming,\n { signal: ctl.signal }\n );\n } catch (err) {\n throw normalizePerplexityError(classifyAbort(err, ctl.abortReason(), PROVIDER));\n } finally {\n ctl.dispose();\n }\n }, mergeRetryOptsWithSignal(retryOpts, options?.signal));\n\n const rawContent = rawResponse.choices[0]?.message.content ?? '';\n\n let parsed: unknown;\n try {\n // sonar-reasoning-pro emits reasoning tokens inside <think>...</think> before the JSON.\n // Strip them first, then strip any markdown fences.\n const cleaned = rawContent\n .replace(/<think>[\\s\\S]*?<\\/think>/i, '') // strip reasoning block (sonar-reasoning-pro)\n .replace(/^```(?:json)?\\s*/i, '') // strip opening fence\n .replace(/\\s*```$/, '') // strip closing fence\n .trim();\n parsed = JSON.parse(cleaned);\n } catch (err) {\n throw new LlmError({\n message: `Perplexity structured output: response is not valid JSON. Raw: ${rawContent.slice(0, 200)}`,\n provider: PROVIDER,\n retryable: false,\n cause: err,\n });\n }\n\n let data: T;\n try {\n data = schema.parse(parsed);\n } catch (err) {\n throw new LlmError({\n message: `Perplexity structured output: response failed schema validation. ${String(err)}`,\n provider: PROVIDER,\n retryable: false,\n cause: err,\n });\n }\n\n return {\n data,\n usage: normalizeUsage(rawResponse.usage),\n latencyMs: Date.now() - start,\n };\n }\n\n return {\n config,\n complete,\n stream,\n structured,\n };\n}\n","/**\n * Factory functions for LlmClient.\n *\n * createClient — dispatches to the correct provider implementation.\n * createClientFromEnv — convenience wrapper that reads API keys from env vars.\n *\n * Provider dispatch:\n * 'anthropic' → fully implemented (Week 2)\n * 'openai' → fully implemented (Week 2)\n * 'gemini' → fully implemented (Week 3)\n * 'deepseek' → fully implemented (Week 3)\n * 'perplexity' → fully implemented (Week 5) — search-grounded, citations, providerOptions\n */\n\nimport { createAnthropicProvider } from './providers/anthropic.js';\nimport { createDeepSeekProvider } from './providers/deepseek.js';\nimport { createGeminiProvider } from './providers/gemini.js';\nimport { createOpenAIProvider } from './providers/openai.js';\nimport { createPerplexityProvider } from './providers/perplexity.js';\nimport type { LlmClient, LlmClientConfig } from './types.js';\nimport { LlmError } from './types.js';\n\n/**\n * Create an LlmClient for the given provider and config.\n * Dispatches to the provider-specific implementation.\n * All five providers are fully implemented.\n */\nexport function createClient(config: LlmClientConfig): LlmClient {\n switch (config.provider) {\n case 'anthropic':\n return createAnthropicProvider(config);\n\n case 'openai':\n return createOpenAIProvider(config);\n\n case 'gemini':\n return createGeminiProvider(config);\n\n case 'deepseek':\n return createDeepSeekProvider(config);\n\n case 'perplexity':\n return createPerplexityProvider(config);\n\n default: {\n // TypeScript exhaustiveness check — if a new provider is added to the union\n // without a case here, this will be a compile-time error.\n const _exhaustive: never = config.provider;\n throw new LlmError({\n message: `[dlabs-toolkit] Unknown provider: ${String(_exhaustive)}`,\n provider: String(_exhaustive),\n retryable: false,\n });\n }\n }\n}\n\n/**\n * Convenience: create an LlmClient from environment variables.\n *\n * Reads API keys from the environment based on provider:\n * anthropic → ANTHROPIC_API_KEY\n * openai → OPENAI_API_KEY\n * gemini → GOOGLE_AI_API_KEY\n * deepseek → DEEPSEEK_API_KEY\n * perplexity → PERPLEXITY_API_KEY — recommended default model: 'sonar'\n *\n * Throws LlmError if the required env var is not set.\n */\nexport function createClientFromEnv(\n provider: LlmClientConfig['provider'],\n model: string,\n overrides?: Partial<Omit<LlmClientConfig, 'provider' | 'model' | 'apiKey'>>\n): LlmClient {\n const apiKey = resolveApiKey(provider);\n return createClient({ provider, model, apiKey, ...overrides });\n}\n\n/** Read the API key for a given provider from environment variables. */\nfunction resolveApiKey(provider: LlmClientConfig['provider']): string {\n const envVarMap: Record<LlmClientConfig['provider'], string> = {\n anthropic: 'ANTHROPIC_API_KEY',\n openai: 'OPENAI_API_KEY',\n gemini: 'GOOGLE_AI_API_KEY',\n deepseek: 'DEEPSEEK_API_KEY',\n perplexity: 'PERPLEXITY_API_KEY',\n };\n\n const envVar = envVarMap[provider];\n const apiKey = process.env[envVar];\n\n if (apiKey === undefined || apiKey.trim() === '') {\n throw new LlmError({\n message: `[dlabs-toolkit] ${envVar} is not set. Set this environment variable to use the ${provider} provider.`,\n provider,\n retryable: false,\n });\n }\n\n return apiKey;\n}\n"],"mappings":";AAcA,OAAO,eAAe;;;ACqGf,IAAM,WAAN,cAAuB,MAAM;AAAA,EAChB,OAAO;AAAA,EAChB;AAAA,EACA;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA;AAAA;AAAA;AAAA,EAGS;AAAA,EAElB,YAAY,MAOT;AACD,UAAM,KAAK,SAAS,EAAE,OAAO,KAAK,MAAM,CAAC;AACzC,SAAK,WAAW,KAAK;AACrB,SAAK,aAAa,KAAK;AACvB,SAAK,YAAY,KAAK;AACtB,SAAK,OAAO,KAAK;AACjB,SAAK,QAAQ,KAAK;AAAA,EACpB;AACF;;;AC1GO,SAAS,wBACd,cACA,WACmB;AACnB,QAAM,WAAW,IAAI,gBAAgB;AACrC,MAAI;AAGJ,QAAM,QAAQ,WAAW,MAAM;AAC7B,eAAW;AACX,aAAS,MAAM,IAAI,MAAM,qBAAqB,CAAC;AAAA,EACjD,GAAG,SAAS;AAEZ,EAAC,MAA4C,QAAQ;AAGrD,QAAM,gBAAgB,MAAY;AAChC,eAAW;AACX,aAAS,MAAM,aAAc,MAAM;AAAA,EACrC;AAEA,MAAI,iBAAiB,QAAW;AAC9B,QAAI,aAAa,SAAS;AAExB,oBAAc;AAAA,IAChB,OAAO;AACL,mBAAa,iBAAiB,SAAS,eAAe,EAAE,MAAM,KAAK,CAAC;AAAA,IACtE;AAAA,EACF;AAEA,SAAO;AAAA,IACL,QAAQ,SAAS;AAAA,IACjB,aAAa,MAAM;AAAA,IACnB,OAAO,CAAC,MAAM;AACZ,iBAAW;AACX,eAAS,MAAM;AAAA,IACjB;AAAA,IACA,SAAS,MAAM;AACb,mBAAa,KAAK;AAClB,oBAAc,oBAAoB,SAAS,aAAa;AAAA,IAC1D;AAAA,EACF;AACF;AAUO,SAAS,iBAAiB,IAAY,QAAqC;AAChF,SAAO,IAAI,QAAc,CAAC,YAAY;AACpC,QAAI,QAAQ,SAAS;AACnB,cAAQ;AACR;AAAA,IACF;AAEA,UAAM,QAAQ,WAAW,MAAM;AAC7B,cAAQ,oBAAoB,SAAS,OAAO;AAC5C,cAAQ;AAAA,IACV,GAAG,EAAE;AACL,IAAC,MAA4C,QAAQ;AAErD,UAAM,UAAU,MAAY;AAC1B,mBAAa,KAAK;AAClB,cAAQ;AAAA,IACV;AAEA,YAAQ,iBAAiB,SAAS,SAAS,EAAE,MAAM,KAAK,CAAC;AAAA,EAC3D,CAAC;AACH;AAoBA,gBAAuB,iBACrB,QACA,SACA,KACA,UACmB;AACnB,QAAM,KAAK,OAAO,OAAO,aAAa,EAAE;AAExC,SAAO,MAAM;AAKX,QAAI;AAEJ,UAAM,eAAe,IAAI,QAAe,CAAC,GAAG,WAAW;AACrD,mBAAa,WAAW,MAAM;AAC5B,YAAI,MAAM,OAAO;AACjB;AAAA,UACE,IAAI,SAAS;AAAA,YACX;AAAA,YACA,MAAM;AAAA,YACN,WAAW;AAAA,YACX,SAAS,4BAA4B,OAAO;AAAA,UAC9C,CAAC;AAAA,QACH;AAAA,MACF,GAAG,OAAO;AAAA,IACZ,CAAC;AAED,QAAI;AACF,YAAM,OAAO,MAAM,QAAQ,KAAK,CAAC,GAAG,KAAK,GAAG,YAAY,CAAC;AACzD,mBAAa,UAAU;AACvB,UAAI,KAAK,KAAM;AACf,YAAM,KAAK;AAAA,IACb,SAAS,KAAK;AACZ,mBAAa,UAAU;AACvB,YAAM;AAAA,IACR;AAAA,EACF;AACF;AAsBO,SAAS,cACd,KACA,aACA,UACS;AAGT,QAAM,kBAAkB,gBAAgB;AACxC,MAAI,CAAC,mBAAmB,CAAC,aAAa,GAAG,EAAG,QAAO;AAEnD,UAAQ,aAAa;AAAA,IACnB,KAAK;AACH,aAAO,IAAI,SAAS;AAAA,QAClB,SAAS;AAAA,QACT;AAAA,QACA,MAAM;AAAA,QACN,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH,KAAK;AACH,aAAO,IAAI,SAAS;AAAA,QAClB,SAAS;AAAA,QACT;AAAA,QACA,MAAM;AAAA,QACN,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH,KAAK;AAAA,IACL;AACE,aAAO,IAAI,SAAS;AAAA,QAClB,SAAS;AAAA,QACT;AAAA,QACA,MAAM;AAAA,QACN,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,EACL;AACF;AAGA,SAAS,aAAa,KAAuB;AAC3C,MAAI,eAAe,SAAS,IAAI,SAAS,aAAc,QAAO;AAC9D,MAAI,OAAO,iBAAiB,eAAe,eAAe,gBAAgB,IAAI,SAAS;AACrF,WAAO;AACT,SAAO;AACT;;;AC1NA,IAAM,0BAA0B,oBAAI,IAAI,CAAC,KAAK,KAAK,KAAK,GAAG,CAAC;AAG5D,IAAM,wBAAwB,oBAAI,IAAI,CAAC,cAAc,aAAa,cAAc,CAAC;AAGjF,IAAM,8BAA8B,oBAAI,IAAI,CAAC,KAAK,KAAK,KAAK,GAAG,CAAC;AAGzD,SAAS,kBAAkB,YAA6B;AAC7D,MAAI,wBAAwB,IAAI,UAAU,EAAG,QAAO;AACpD,MAAI,4BAA4B,IAAI,UAAU,EAAG,QAAO;AAExD,SAAO,cAAc;AACvB;AAGO,SAAS,qBAAqB,MAAuB;AAC1D,SAAO,sBAAsB,IAAI,IAAI;AACvC;AAGO,SAAS,iBAAiB,SAAiB,aAA6B;AAC7E,QAAM,UAAU,cAAc,KAAK;AACnC,SAAO,KAAK,OAAO,IAAI;AACzB;AAmBO,SAAS,yBACd,MACA,QACc;AACd,SAAO,WAAW,SAAY,EAAE,GAAG,MAAM,OAAO,IAAI,EAAE,GAAG,KAAK;AAChE;AAaA,eAAsB,UACpB,IACA,MACY;AACZ,MAAI;AAEJ,WAAS,UAAU,GAAG,WAAW,KAAK,YAAY,WAAW;AAE3D,QAAI,KAAK,QAAQ,YAAY,MAAM;AACjC,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS;AAAA,QACT,UAAU,KAAK;AAAA,QACf,MAAM;AAAA,QACN,WAAW;AAAA,QACX,OAAO,KAAK,OAAO;AAAA,MACrB,CAAC;AAAA,IACH;AAEA,QAAI;AACF,aAAO,MAAM,GAAG,OAAO;AAAA,IACzB,SAAS,KAAK;AACZ,YAAM,SAAS,qBAAqB,KAAK,KAAK,QAAQ;AAGtD,UAAI,OAAO,SAAS,YAAa,OAAM;AAEvC,UAAI,CAAC,OAAO,aAAa,YAAY,KAAK,YAAY;AACpD,cAAM;AAAA,MACR;AAEA,kBAAY;AACZ,YAAM,UAAU,iBAAiB,SAAS,KAAK,WAAW;AAE1D,YAAM,iBAAiB,SAAS,KAAK,MAAM;AAAA,IAC7C;AAAA,EACF;AAIA,QACE,aACA,IAAI,SAAS;AAAA,IACX,SAAS;AAAA,IACT,UAAU,KAAK;AAAA,IACf,WAAW;AAAA,EACb,CAAC;AAEL;AAGO,SAAS,qBAAqB,KAAc,UAA4B;AAC7E,MAAI,eAAe,SAAU,QAAO;AAEpC,MAAI,eAAe,OAAO;AAIxB,QACE,IAAI,SAAS,gBACZ,OAAO,iBAAiB,eAAe,eAAe,gBAAgB,IAAI,SAAS,cACpF;AACA,aAAO,IAAI,SAAS;AAAA,QAClB,SAAS,IAAI,WAAW;AAAA,QACxB;AAAA,QACA,MAAM;AAAA,QACN,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,UAAM,cAAc;AACpB,UAAM,aAAa,YAAY,UAAU,YAAY;AAGrD,QAAI,YAAY,SAAS,UAAa,qBAAqB,YAAY,IAAI,GAAG;AAC5E,UAAI,eAAe,QAAW;AAC5B,eAAO,IAAI,SAAS;AAAA,UAClB,SAAS,IAAI;AAAA,UACb;AAAA,UACA;AAAA,UACA,MAAM;AAAA,UACN,WAAW;AAAA,UACX,OAAO;AAAA,QACT,CAAC;AAAA,MACH;AACA,aAAO,IAAI,SAAS,EAAE,SAAS,IAAI,SAAS,UAAU,MAAM,WAAW,WAAW,MAAM,OAAO,IAAI,CAAC;AAAA,IACtG;AAGA,QAAI,eAAe,QAAW;AAC5B,YAAM,YAAY,kBAAkB,UAAU;AAC9C,aAAO,IAAI,SAAS;AAAA,QAClB,SAAS,IAAI;AAAA,QACb;AAAA,QACA;AAAA,QACA,MAAM,YAAY,SAAS;AAAA,QAC3B;AAAA,QACA,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,WAAO,IAAI,SAAS;AAAA,MAClB,SAAS,IAAI;AAAA,MACb;AAAA,MACA,MAAM;AAAA,MACN,WAAW;AAAA,MACX,OAAO;AAAA,IACT,CAAC;AAAA,EACH;AAEA,SAAO,IAAI,SAAS;AAAA,IAClB,SAAS,OAAO,GAAG;AAAA,IACnB;AAAA,IACA,MAAM;AAAA,IACN,WAAW;AAAA,IACX,OAAO;AAAA,EACT,CAAC;AACH;;;AH1KA,IAAM,WAAW;AAGjB,SAAS,eAAe,OAA8C;AACpE,QAAM,cAAc,OAAO,gBAAgB;AAC3C,QAAM,eAAe,OAAO,iBAAiB;AAC7C,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA,aAAa,cAAc;AAAA;AAAA;AAAA,IAG3B,qBAAsB,OAClB;AAAA,IACJ,iBAAkB,OACd;AAAA,EACN;AACF;AAGA,SAAS,uBAAuB,UAG9B;AACA,QAAM,iBAAiB,SAAS,OAAO,CAAC,MAAM,EAAE,SAAS,QAAQ;AACjE,QAAM,uBAAuB,SAAS,OAAO,CAAC,MAAM,EAAE,SAAS,QAAQ;AAEvE,QAAM,SACJ,eAAe,SAAS,IAAI,eAAe,IAAI,CAAC,MAAM,EAAE,OAAO,EAAE,KAAK,IAAI,IAAI;AAEhF,QAAM,oBAA8C,qBAAqB,IAAI,CAAC,OAAO;AAAA,IACnF,MAAM,EAAE;AAAA,IACR,SAAS,EAAE;AAAA,EACb,EAAE;AAEF,SAAO,EAAE,QAAQ,UAAU,kBAAkB;AAC/C;AAMO,SAAS,wBAAwB,KAAwB;AAC9D,MAAI,eAAe,SAAU,QAAO;AAKpC,MACE,OAAO,UAAU,uBAAuB,cACxC,eAAe,UAAU,oBACzB;AACA,WAAO,IAAI,SAAS;AAAA,MAClB,SAAS,IAAI;AAAA,MACb,UAAU;AAAA,MACV,WAAW;AAAA,MACX,OAAO;AAAA,IACT,CAAC;AAAA,EACH;AAIA,MAAI,OAAO,UAAU,aAAa,cAAc,eAAe,UAAU,UAAU;AACjF,UAAM,SAA6B,IAAI;AACvC,QAAI,WAAW,QAAW;AACxB,YAAM,YAAY,CAAC,KAAK,KAAK,KAAK,GAAG,EAAE,SAAS,MAAM,KAAK,UAAU;AACrE,aAAO,IAAI,SAAS;AAAA,QAClB,SAAS,IAAI;AAAA,QACb,UAAU;AAAA,QACV,YAAY;AAAA,QACZ;AAAA,QACA,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AACA,WAAO,IAAI,SAAS,EAAE,SAAS,IAAI,SAAS,UAAU,UAAU,WAAW,OAAO,OAAO,IAAI,CAAC;AAAA,EAChG;AAEA,SAAO,qBAAqB,KAAK,QAAQ;AAC3C;AAGO,SAAS,wBAAwB,QAAoC;AAG1E,QAAM,SAAS,IAAI,UAAU;AAAA,IAC3B,QAAQ,OAAO;AAAA,IACf,SAAS,OAAO,aAAa;AAAA,IAC7B,YAAY;AAAA;AAAA,EACd,CAAC;AAED,QAAM,YAAY;AAAA,IAChB,YAAY,OAAO,cAAc;AAAA,IACjC,aAAa,OAAO,eAAe;AAAA,IACnC,UAAU;AAAA,EACZ;AAEA,iBAAe,SAAS,UAAwB,SAAgD;AAC9F,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,EAAE,QAAQ,UAAU,kBAAkB,IAAI,uBAAuB,QAAQ;AAE/E,UAAM,qBAAqB,SAAS,aAAa,OAAO,aAAa;AAErE,UAAM,QAAQ,KAAK,IAAI;AAEvB,WAAO,UAAU,YAAY;AAE3B,YAAM,MAAM,wBAAwB,SAAS,QAAQ,kBAAkB;AACvE,UAAI;AACF,cAAM,SAAoD;AAAA,UACxD;AAAA,UACA,UAAU;AAAA,UACV,YAAY,SAAS,aAAa,OAAO,aAAa;AAAA,QACxD;AAEA,YAAI,WAAW,OAAW,QAAO,SAAS;AAC1C,cAAM,cAAc,SAAS,eAAe,OAAO;AACnD,YAAI,gBAAgB,QAAW;AAC7B,iBAAO,cAAc;AAAA,QACvB;AAGA,cAAM,WAAW,MAAM,OAAO,SAAS,OAAO,QAAQ,EAAE,QAAQ,IAAI,OAAO,CAAC;AAE5E,cAAM,UAAU,SAAS,QACtB,OAAO,CAAC,UAAwC,MAAM,SAAS,MAAM,EACrE,IAAI,CAAC,UAAU,MAAM,IAAI,EACzB,KAAK,EAAE;AAEV,eAAO;AAAA,UACL;AAAA,UACA,OAAO,SAAS;AAAA,UAChB,OAAO,eAAe,SAAS,KAAK;AAAA,UACpC,WAAW,KAAK,IAAI,IAAI;AAAA,QAC1B;AAAA,MACF,SAAS,KAAK;AAKZ,cAAM,wBAAwB,cAAc,KAAK,IAAI,YAAY,GAAG,QAAQ,CAAC;AAAA,MAC/E,UAAE;AACA,YAAI,QAAQ;AAAA,MACd;AAAA,IACF,GAAG,yBAAyB,WAAW,SAAS,MAAM,CAAC;AAAA,EACzD;AAEA,kBAAgB,OACd,UACA,SACgC;AAChC,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,EAAE,QAAQ,UAAU,kBAAkB,IAAI,uBAAuB,QAAQ;AAC/E,UAAM,qBAAqB,SAAS,aAAa,OAAO,aAAa;AACrE,UAAM,UAAU,SAAS,wBAAwB,OAAO,wBAAwB;AAEhF,UAAM,SAAwC;AAAA,MAC5C;AAAA,MACA,UAAU;AAAA,MACV,YAAY,SAAS,aAAa,OAAO,aAAa;AAAA,IACxD;AAEA,QAAI,WAAW,OAAW,QAAO,SAAS;AAC1C,UAAM,oBAAoB,SAAS,eAAe,OAAO;AACzD,QAAI,sBAAsB,QAAW;AACnC,aAAO,cAAc;AAAA,IACvB;AAGA,UAAM,MAAM,wBAAwB,SAAS,QAAQ,kBAAkB;AAEvE,QAAI;AAEJ,QAAI;AACF,kBAAY,OAAO,SAAS,OAAO,QAAQ,EAAE,QAAQ,IAAI,OAAO,CAAC;AAAA,IACnE,SAAS,KAAK;AACZ,UAAI,QAAQ;AACZ,YAAM,wBAAwB,cAAc,KAAK,IAAI,YAAY,GAAG,QAAQ,CAAC;AAAA,IAC/E;AAGA,QAAI;AAEJ,QAAI;AAGF,YAAM,eAAe;AAAA,QACnB;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MACF;AACA,uBAAiB,SAAS,cAAc;AACtC,YAAI,MAAM,SAAS,yBAAyB,MAAM,MAAM,SAAS,cAAc;AAC7E,gBAAM,EAAE,OAAO,MAAM,MAAM,KAAK;AAAA,QAClC,WAAW,MAAM,SAAS,mBAAmB,WAAW,OAAO;AAE7D,gBAAM,QAAQ,MAAM,UAAU,aAAa;AAC3C,uBAAa,eAAe,MAAM,KAAK;AAAA,QACzC;AAAA,MACF;AAAA,IACF,SAAS,KAAK;AAGZ,YAAM,wBAAwB,cAAc,KAAK,IAAI,YAAY,GAAG,QAAQ,CAAC;AAAA,IAC/E,UAAE;AACA,UAAI,QAAQ;AAAA,IACd;AAGA,QAAI,eAAe,QAAW;AAC5B,YAAM,EAAE,OAAO,IAAI,OAAO,WAAW;AAAA,IACvC;AAAA,EACF;AAEA,iBAAe,WACb,UACA,QACA,SACmC;AAGnC,UAAM,wBAAoC;AAAA,MACxC,MAAM;AAAA,MACN,SACE;AAAA,IACJ;AAEA,UAAM,oBAAoB,CAAC,uBAAuB,GAAG,QAAQ;AAC7D,UAAM,QAAQ,KAAK,IAAI;AAEvB,UAAM,WAAW,MAAM,SAAS,mBAAmB,OAAO;AAE1D,QAAI;AACJ,QAAI;AAEF,YAAM,UAAU,SAAS,QACtB,QAAQ,qBAAqB,EAAE,EAC/B,QAAQ,WAAW,EAAE,EACrB,KAAK;AACR,eAAS,KAAK,MAAM,OAAO;AAAA,IAC7B,SAAS,KAAK;AACZ,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,iEAAiE,SAAS,QAAQ,MAAM,GAAG,GAAG,CAAC;AAAA,QACxG,UAAU;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,QAAI;AACJ,QAAI;AACF,aAAO,OAAO,MAAM,MAAM;AAAA,IAC5B,SAAS,KAAK;AACZ,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,mEAAmE,OAAO,GAAG,CAAC;AAAA,QACvF,UAAU;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,WAAO;AAAA,MACL;AAAA,MACA,OAAO,SAAS;AAAA,MAChB,WAAW,KAAK,IAAI,IAAI;AAAA,IAC1B;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;;;AItRA,OAAO,YAAY;AAenB,IAAMA,YAAW;AACjB,IAAM,oBAAoB;AAG1B,SAASC,gBAAe,OAA4D;AAClF,QAAM,cAAc,OAAO,iBAAiB;AAC5C,QAAM,eAAe,OAAO,qBAAqB;AACjD,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA,aAAa,OAAO,gBAAgB,cAAc;AAAA,EACpD;AACF;AAGA,SAAS,cAAc,UAAkE;AACvF,SAAO,SAAS,IAAI,CAAC,OAAO;AAAA,IAC1B,MAAM,EAAE;AAAA,IACR,SAAS,EAAE;AAAA,EACb,EAAE;AACJ;AASO,SAAS,uBAAuB,KAAwB;AAC7D,MAAI,eAAe,SAAU,QAAO;AAIpC,MAAI,OAAO,OAAO,uBAAuB,cAAc,eAAe,OAAO,oBAAoB;AAC/F,WAAO,IAAI,SAAS;AAAA,MAClB,SAAS,IAAI;AAAA,MACb,UAAUD;AAAA,MACV,WAAW;AAAA,MACX,OAAO;AAAA,IACT,CAAC;AAAA,EACH;AAGA,MAAI,OAAO,OAAO,aAAa,cAAc,eAAe,OAAO,UAAU;AAC3E,UAAM,SAA6B,IAAI;AACvC,QAAI,WAAW,QAAW;AACxB,YAAM,YAAY,CAAC,KAAK,KAAK,KAAK,GAAG,EAAE,SAAS,MAAM,KAAK,UAAU;AACrE,aAAO,IAAI,SAAS;AAAA,QAClB,SAAS,IAAI;AAAA,QACb,UAAUA;AAAA,QACV,YAAY;AAAA,QACZ;AAAA,QACA,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AACA,WAAO,IAAI,SAAS,EAAE,SAAS,IAAI,SAAS,UAAUA,WAAU,WAAW,OAAO,OAAO,IAAI,CAAC;AAAA,EAChG;AAEA,SAAO,qBAAqB,KAAKA,SAAQ;AAC3C;AAGO,SAAS,uBAAuB,QAAoC;AAEzE,QAAM,SAAS,IAAI,OAAO;AAAA,IACxB,QAAQ,OAAO;AAAA,IACf,SAAS;AAAA,IACT,SAAS,OAAO,aAAa;AAAA,IAC7B,YAAY;AAAA;AAAA,EACd,CAAC;AAED,QAAM,YAAY;AAAA,IAChB,YAAY,OAAO,cAAc;AAAA,IACjC,aAAa,OAAO,eAAe;AAAA,IACnC,UAAUA;AAAA,EACZ;AAEA,iBAAe,SAAS,UAAwB,SAAgD;AAC9F,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,eAAe,cAAc,QAAQ;AAC3C,UAAM,qBAAqB,SAAS,aAAa,OAAO,aAAa;AACrE,UAAM,QAAQ,KAAK,IAAI;AAEvB,WAAO,UAAU,YAAY;AAC3B,YAAM,MAAM,wBAAwB,SAAS,QAAQ,kBAAkB;AACvE,UAAI;AACF,cAAM,SAA6D;AAAA,UACjE;AAAA,UACA,UAAU;AAAA,UACV,QAAQ;AAAA,QACV;AAEA,cAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,YAAI,cAAc,OAAW,QAAO,aAAa;AAEjD,cAAM,cAAc,SAAS,eAAe,OAAO;AACnD,YAAI,gBAAgB,OAAW,QAAO,cAAc;AAEpD,cAAM,WAAW,MAAM,OAAO,KAAK,YAAY,OAAO,QAAQ,EAAE,QAAQ,IAAI,OAAO,CAAC;AACpF,cAAM,UAAU,SAAS,QAAQ,IAAI,CAAC,MAAM,EAAE,QAAQ,WAAW,EAAE,EAAE,KAAK,EAAE;AAE5E,eAAO;AAAA,UACL;AAAA,UACA,OAAO,SAAS;AAAA,UAChB,OAAOC,gBAAe,SAAS,KAAK;AAAA,UACpC,WAAW,KAAK,IAAI,IAAI;AAAA,QAC1B;AAAA,MACF,SAAS,KAAK;AACZ,cAAM,uBAAuB,cAAc,KAAK,IAAI,YAAY,GAAGD,SAAQ,CAAC;AAAA,MAC9E,UAAE;AACA,YAAI,QAAQ;AAAA,MACd;AAAA,IACF,GAAG,yBAAyB,WAAW,SAAS,MAAM,CAAC;AAAA,EACzD;AAEA,kBAAgB,OACd,UACA,SACgC;AAChC,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,eAAe,cAAc,QAAQ;AAC3C,UAAM,qBAAqB,SAAS,aAAa,OAAO,aAAa;AACrE,UAAM,UAAU,SAAS,wBAAwB,OAAO,wBAAwB;AAEhF,UAAM,SAA0D;AAAA,MAC9D;AAAA,MACA,UAAU;AAAA,MACV,QAAQ;AAAA,MACR,gBAAgB,EAAE,eAAe,KAAK;AAAA,IACxC;AAEA,UAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,QAAI,cAAc,OAAW,QAAO,aAAa;AAEjD,UAAM,cAAc,SAAS,eAAe,OAAO;AACnD,QAAI,gBAAgB,OAAW,QAAO,cAAc;AAEpD,UAAM,MAAM,wBAAwB,SAAS,QAAQ,kBAAkB;AACvE,QAAI;AAEJ,QAAI;AACF,kBAAY,MAAM,OAAO,KAAK,YAAY,OAAO,QAAQ,EAAE,QAAQ,IAAI,OAAO,CAAC;AAAA,IACjF,SAAS,KAAK;AACZ,UAAI,QAAQ;AACZ,YAAM,uBAAuB,cAAc,KAAK,IAAI,YAAY,GAAGA,SAAQ,CAAC;AAAA,IAC9E;AAEA,QAAI;AAEJ,QAAI;AACF,uBAAiB,SAAS,iBAAiB,WAAW,SAAS,KAAKA,SAAQ,GAAG;AAC7E,cAAM,QAAQ,MAAM,QAAQ,CAAC,GAAG,MAAM;AACtC,YAAI,UAAU,UAAa,UAAU,QAAQ,MAAM,SAAS,GAAG;AAC7D,gBAAM,EAAE,OAAO,MAAM;AAAA,QACvB;AAGA,YAAI,MAAM,UAAU,UAAa,MAAM,UAAU,MAAM;AACrD,uBAAaC,gBAAe,MAAM,KAAK;AAAA,QACzC;AAAA,MACF;AAAA,IACF,SAAS,KAAK;AACZ,YAAM,uBAAuB,cAAc,KAAK,IAAI,YAAY,GAAGD,SAAQ,CAAC;AAAA,IAC9E,UAAE;AACA,UAAI,QAAQ;AAAA,IACd;AAEA,QAAI,eAAe,QAAW;AAC5B,YAAM,EAAE,OAAO,IAAI,OAAO,WAAW;AAAA,IACvC;AAAA,EACF;AAEA,iBAAe,WACb,UACA,QACA,SACmC;AAGnC,UAAM,wBAAoC;AAAA,MACxC,MAAM;AAAA,MACN,SACE;AAAA,IACJ;AAEA,UAAM,oBAAoB,CAAC,uBAAuB,GAAG,QAAQ;AAC7D,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,eAAe,cAAc,iBAAiB;AACpD,UAAM,qBAAqB,SAAS,aAAa,OAAO,aAAa;AACrE,UAAM,QAAQ,KAAK,IAAI;AAEvB,UAAM,cAAc,MAAM,UAAU,YAAY;AAC9C,YAAM,MAAM,wBAAwB,SAAS,QAAQ,kBAAkB;AACvE,UAAI;AACF,cAAM,SAA6D;AAAA,UACjE;AAAA,UACA,UAAU;AAAA,UACV,QAAQ;AAAA,QACV;AAEA,cAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,YAAI,cAAc,OAAW,QAAO,aAAa;AAEjD,cAAM,cAAc,SAAS,eAAe,OAAO;AACnD,YAAI,gBAAgB,OAAW,QAAO,cAAc;AAEpD,eAAO,MAAM,OAAO,KAAK,YAAY,OAAO,QAAQ,EAAE,QAAQ,IAAI,OAAO,CAAC;AAAA,MAC5E,SAAS,KAAK;AACZ,cAAM,uBAAuB,cAAc,KAAK,IAAI,YAAY,GAAGA,SAAQ,CAAC;AAAA,MAC9E,UAAE;AACA,YAAI,QAAQ;AAAA,MACd;AAAA,IACF,GAAG,yBAAyB,WAAW,SAAS,MAAM,CAAC;AAEvD,UAAM,aAAa,YAAY,QAAQ,CAAC,GAAG,QAAQ,WAAW;AAE9D,QAAI;AACJ,QAAI;AAEF,YAAM,UAAU,WACb,QAAQ,qBAAqB,EAAE,EAC/B,QAAQ,WAAW,EAAE,EACrB,KAAK;AACR,eAAS,KAAK,MAAM,OAAO;AAAA,IAC7B,SAAS,KAAK;AACZ,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,gEAAgE,WAAW,MAAM,GAAG,GAAG,CAAC;AAAA,QACjG,UAAUA;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,QAAI;AACJ,QAAI;AACF,aAAO,OAAO,MAAM,MAAM;AAAA,IAC5B,SAAS,KAAK;AACZ,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,kEAAkE,OAAO,GAAG,CAAC;AAAA,QACtF,UAAUA;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,WAAO;AAAA,MACL;AAAA,MACA,OAAOC,gBAAe,YAAY,KAAK;AAAA,MACvC,WAAW,KAAK,IAAI,IAAI;AAAA,IAC1B;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;;;ACjQA;AAAA,EACE;AAAA,EAKA;AAAA,OACK;AAeP,IAAMC,YAAW;AAGjB,SAASC,gBAAe,MAAkE;AACxF,QAAM,cAAc,MAAM,oBAAoB;AAC9C,QAAM,eAAe,MAAM,wBAAwB;AACnD,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA,aAAa,MAAM,mBAAmB,cAAc;AAAA,EACtD;AACF;AAOA,SAAS,oBAAoB,UAG3B;AACA,QAAM,iBAAiB,SAAS,OAAO,CAAC,MAAM,EAAE,SAAS,QAAQ;AACjE,QAAM,uBAAuB,SAAS,OAAO,CAAC,MAAM,EAAE,SAAS,QAAQ;AAEvE,QAAM,SACJ,eAAe,SAAS,IAAI,eAAe,IAAI,CAAC,MAAM,EAAE,OAAO,EAAE,KAAK,IAAI,IAAI;AAEhF,QAAM,WAAsB,qBAAqB,IAAI,CAAC,OAAO;AAAA,IAC3D,MAAM,EAAE,SAAS,cAAc,UAAU;AAAA,IACzC,OAAO,CAAC,EAAE,MAAM,EAAE,QAAQ,CAAC;AAAA,EAC7B,EAAE;AAEF,SAAO,EAAE,QAAQ,SAAS;AAC5B;AAUO,SAAS,qBAAqB,KAAwB;AAC3D,MAAI,eAAe,SAAU,QAAO;AAIpC,MAAI,eAAe,UAAU;AAC3B,UAAM,YAAY,IAAI,WAAW,OAAO,IAAI,UAAU;AACtD,WAAO,IAAI,SAAS;AAAA,MAClB,SAAS,IAAI;AAAA,MACb,UAAUD;AAAA,MACV,YAAY,IAAI;AAAA,MAChB;AAAA,MACA,OAAO;AAAA,IACT,CAAC;AAAA,EACH;AAIA,SAAO,qBAAqB,KAAKA,SAAQ;AAC3C;AAUA,SAAS,qBAAqB,QAAqC;AACjE,SAAO,IAAI,QAAe,CAAC,GAAG,WAAW;AACvC,UAAM,UAAU,MAAY;AAC1B,YAAM,IAAI,IAAI,MAAM,YAAY;AAChC,QAAE,OAAO;AACT,aAAO,CAAC;AAAA,IACV;AACA,QAAI,OAAO,SAAS;AAClB,cAAQ;AAAA,IACV,OAAO;AACL,aAAO,iBAAiB,SAAS,SAAS,EAAE,MAAM,KAAK,CAAC;AAAA,IAC1D;AAAA,EACF,CAAC;AACH;AAGO,SAAS,qBAAqB,QAAoC;AACvE,QAAM,kBAAkB,OAAO,aAAa;AAO5C,QAAM,KAAK,IAAI,YAAY;AAAA,IACzB,QAAQ,OAAO;AAAA,IACf,aAAa;AAAA,MACX,SAAS,kBAAkB;AAAA,IAC7B;AAAA,EACF,CAAC;AAED,QAAM,YAAY;AAAA,IAChB,YAAY,OAAO,cAAc;AAAA,IACjC,aAAa,OAAO,eAAe;AAAA,IACnC,UAAUA;AAAA,EACZ;AAEA,iBAAe,SAAS,UAAwB,SAAgD;AAC9F,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,EAAE,QAAQ,SAAS,IAAI,oBAAoB,QAAQ;AACzD,UAAM,qBAAqB,SAAS,aAAa;AACjD,UAAM,QAAQ,KAAK,IAAI;AAEvB,WAAO,UAAU,YAAY;AAC3B,YAAM,MAAM,wBAAwB,SAAS,QAAQ,kBAAkB;AACvE,UAAI;AAEF,cAAM,eAAsC,CAAC;AAE7C,YAAI,WAAW,OAAW,cAAa,oBAAoB;AAC3D,cAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,YAAI,cAAc,OAAW,cAAa,kBAAkB;AAC5D,cAAM,cAAc,SAAS,eAAe,OAAO;AACnD,YAAI,gBAAgB,OAAW,cAAa,cAAc;AAK1D,cAAM,WAAW,MAAM,QAAQ,KAAK;AAAA,UAClC,GAAG,OAAO,gBAAgB,EAAE,OAAO,UAAU,QAAQ,aAAa,CAAC;AAAA,UACnE,qBAAqB,IAAI,MAAM;AAAA,QACjC,CAAC;AAED,eAAO;AAAA,UACL,SAAS,SAAS,QAAQ;AAAA,UAC1B;AAAA,UACA,OAAOC,gBAAe,SAAS,aAAa;AAAA,UAC5C,WAAW,KAAK,IAAI,IAAI;AAAA,QAC1B;AAAA,MACF,SAAS,KAAK;AACZ,cAAM,qBAAqB,cAAc,KAAK,IAAI,YAAY,GAAGD,SAAQ,CAAC;AAAA,MAC5E,UAAE;AACA,YAAI,QAAQ;AAAA,MACd;AAAA,IACF,GAAG,yBAAyB,WAAW,SAAS,MAAM,CAAC;AAAA,EACzD;AAEA,kBAAgB,OACd,UACA,SACgC;AAChC,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,EAAE,QAAQ,SAAS,IAAI,oBAAoB,QAAQ;AACzD,UAAM,qBAAqB,SAAS,aAAa;AACjD,UAAM,UAAU,SAAS,wBAAwB,OAAO,wBAAwB;AAGhF,UAAM,eAAsC,CAAC;AAC7C,QAAI,WAAW,OAAW,cAAa,oBAAoB;AAC3D,UAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,QAAI,cAAc,OAAW,cAAa,kBAAkB;AAC5D,UAAM,cAAc,SAAS,eAAe,OAAO;AACnD,QAAI,gBAAgB,OAAW,cAAa,cAAc;AAE1D,UAAM,MAAM,wBAAwB,SAAS,QAAQ,kBAAkB;AACvE,QAAI;AAEJ,QAAI;AAGF,kBAAY,MAAM,QAAQ,KAAK;AAAA,QAC7B,GAAG,OAAO,sBAAsB,EAAE,OAAO,UAAU,QAAQ,aAAa,CAAC;AAAA,QACzE,qBAAqB,IAAI,MAAM;AAAA,MACjC,CAAC;AAAA,IACH,SAAS,KAAK;AACZ,UAAI,QAAQ;AACZ,YAAM,qBAAqB,cAAc,KAAK,IAAI,YAAY,GAAGA,SAAQ,CAAC;AAAA,IAC5E;AAEA,QAAI;AAEJ,QAAI;AACF,uBAAiB,SAAS,iBAAiB,WAAW,SAAS,KAAKA,SAAQ,GAAG;AAC7E,cAAM,OAAO,MAAM;AACnB,YAAI,SAAS,UAAa,KAAK,SAAS,GAAG;AACzC,gBAAM,EAAE,OAAO,KAAK;AAAA,QACtB;AAEA,YAAI,MAAM,kBAAkB,QAAW;AACrC,uBAAaC,gBAAe,MAAM,aAAa;AAAA,QACjD;AAAA,MACF;AAAA,IACF,SAAS,KAAK;AACZ,YAAM,qBAAqB,cAAc,KAAK,IAAI,YAAY,GAAGD,SAAQ,CAAC;AAAA,IAC5E,UAAE;AACA,UAAI,QAAQ;AAAA,IACd;AAEA,QAAI,eAAe,QAAW;AAC5B,YAAM,EAAE,OAAO,IAAI,OAAO,WAAW;AAAA,IACvC;AAAA,EACF;AAEA,iBAAe,WACb,UACA,QACA,SACmC;AACnC,UAAM,oBAAkC;AAAA,MACtC;AAAA,QACE,MAAM;AAAA,QACN,SACE;AAAA,MACJ;AAAA,MACA,GAAG;AAAA,IACL;AAEA,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,EAAE,QAAQ,SAAS,IAAI,oBAAoB,iBAAiB;AAClE,UAAM,qBAAqB,SAAS,aAAa;AACjD,UAAM,QAAQ,KAAK,IAAI;AAEvB,UAAM,cAAc,MAAM,UAAU,YAAY;AAC9C,YAAM,MAAM,wBAAwB,SAAS,QAAQ,kBAAkB;AACvE,UAAI;AACF,cAAM,eAAsC;AAAA;AAAA,UAE1C,kBAAkB;AAAA,QACpB;AAEA,YAAI,WAAW,OAAW,cAAa,oBAAoB;AAC3D,cAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,YAAI,cAAc,OAAW,cAAa,kBAAkB;AAC5D,cAAM,cAAc,SAAS,eAAe,OAAO;AACnD,YAAI,gBAAgB,OAAW,cAAa,cAAc;AAE1D,eAAO,MAAM,QAAQ,KAAK;AAAA,UACxB,GAAG,OAAO,gBAAgB,EAAE,OAAO,UAAU,QAAQ,aAAa,CAAC;AAAA,UACnE,qBAAqB,IAAI,MAAM;AAAA,QACjC,CAAC;AAAA,MACH,SAAS,KAAK;AACZ,cAAM,qBAAqB,cAAc,KAAK,IAAI,YAAY,GAAGA,SAAQ,CAAC;AAAA,MAC5E,UAAE;AACA,YAAI,QAAQ;AAAA,MACd;AAAA,IACF,GAAG,yBAAyB,WAAW,SAAS,MAAM,CAAC;AAEvD,UAAM,aAAa,YAAY,QAAQ;AAEvC,QAAI;AACJ,QAAI;AAEF,YAAM,UAAU,WACb,QAAQ,qBAAqB,EAAE,EAC/B,QAAQ,WAAW,EAAE,EACrB,KAAK;AACR,eAAS,KAAK,MAAM,OAAO;AAAA,IAC7B,SAAS,KAAK;AACZ,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,8DAA8D,WAAW,MAAM,GAAG,GAAG,CAAC;AAAA,QAC/F,UAAUA;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,QAAI;AACJ,QAAI;AACF,aAAO,OAAO,MAAM,MAAM;AAAA,IAC5B,SAAS,KAAK;AACZ,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,gEAAgE,OAAO,GAAG,CAAC;AAAA,QACpF,UAAUA;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,WAAO;AAAA,MACL;AAAA,MACA,OAAOC,gBAAe,YAAY,aAAa;AAAA,MAC/C,WAAW,KAAK,IAAI,IAAI;AAAA,IAC1B;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;;;ACrVA,OAAOC,aAAY;AAenB,IAAMC,YAAW;AAGjB,SAASC,gBAAe,OAA4D;AAClF,QAAM,cAAc,OAAO,iBAAiB;AAC5C,QAAM,eAAe,OAAO,qBAAqB;AACjD,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA,aAAa,OAAO,gBAAgB,cAAc;AAAA,EACpD;AACF;AAGA,SAAS,oBAAoB,UAAkE;AAC7F,SAAO,SAAS,IAAI,CAAC,OAAO;AAAA,IAC1B,MAAM,EAAE;AAAA,IACR,SAAS,EAAE;AAAA,EACb,EAAE;AACJ;AAMO,SAAS,qBAAqB,KAAwB;AAC3D,MAAI,eAAe,SAAU,QAAO;AAKpC,MAAI,OAAOC,QAAO,uBAAuB,cAAc,eAAeA,QAAO,oBAAoB;AAC/F,WAAO,IAAI,SAAS;AAAA,MAClB,SAAS,IAAI;AAAA,MACb,UAAUF;AAAA,MACV,WAAW;AAAA,MACX,OAAO;AAAA,IACT,CAAC;AAAA,EACH;AAIA,MAAI,OAAOE,QAAO,aAAa,cAAc,eAAeA,QAAO,UAAU;AAC3E,UAAM,SAA6B,IAAI;AACvC,QAAI,WAAW,QAAW;AACxB,YAAM,YAAY,CAAC,KAAK,KAAK,KAAK,GAAG,EAAE,SAAS,MAAM,KAAK,UAAU;AACrE,aAAO,IAAI,SAAS;AAAA,QAClB,SAAS,IAAI;AAAA,QACb,UAAUF;AAAA,QACV,YAAY;AAAA,QACZ;AAAA,QACA,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AACA,WAAO,IAAI,SAAS,EAAE,SAAS,IAAI,SAAS,UAAUA,WAAU,WAAW,OAAO,OAAO,IAAI,CAAC;AAAA,EAChG;AAEA,SAAO,qBAAqB,KAAKA,SAAQ;AAC3C;AAGO,SAAS,qBAAqB,QAAoC;AACvE,QAAM,SAAS,IAAIE,QAAO;AAAA,IACxB,QAAQ,OAAO;AAAA,IACf,SAAS,OAAO,aAAa;AAAA,IAC7B,YAAY;AAAA;AAAA,EACd,CAAC;AAED,QAAM,YAAY;AAAA,IAChB,YAAY,OAAO,cAAc;AAAA,IACjC,aAAa,OAAO,eAAe;AAAA,IACnC,UAAUF;AAAA,EACZ;AAEA,iBAAe,SAAS,UAAwB,SAAgD;AAC9F,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,iBAAiB,oBAAoB,QAAQ;AACnD,UAAM,qBAAqB,SAAS,aAAa,OAAO,aAAa;AACrE,UAAM,QAAQ,KAAK,IAAI;AAEvB,WAAO,UAAU,YAAY;AAC3B,YAAM,MAAM,wBAAwB,SAAS,QAAQ,kBAAkB;AACvE,UAAI;AACF,cAAM,SAA6D;AAAA,UACjE;AAAA,UACA,UAAU;AAAA,UACV,QAAQ;AAAA,QACV;AAEA,cAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,YAAI,cAAc,OAAW,QAAO,aAAa;AAEjD,cAAM,cAAc,SAAS,eAAe,OAAO;AACnD,YAAI,gBAAgB,OAAW,QAAO,cAAc;AAEpD,cAAM,WAAW,MAAM,OAAO,KAAK,YAAY,OAAO,QAAQ,EAAE,QAAQ,IAAI,OAAO,CAAC;AAEpF,cAAM,UAAU,SAAS,QAAQ,IAAI,CAAC,MAAM,EAAE,QAAQ,WAAW,EAAE,EAAE,KAAK,EAAE;AAE5E,eAAO;AAAA,UACL;AAAA,UACA,OAAO,SAAS;AAAA,UAChB,OAAOC,gBAAe,SAAS,KAAK;AAAA,UACpC,WAAW,KAAK,IAAI,IAAI;AAAA,QAC1B;AAAA,MACF,SAAS,KAAK;AACZ,cAAM,qBAAqB,cAAc,KAAK,IAAI,YAAY,GAAGD,SAAQ,CAAC;AAAA,MAC5E,UAAE;AACA,YAAI,QAAQ;AAAA,MACd;AAAA,IACF,GAAG,yBAAyB,WAAW,SAAS,MAAM,CAAC;AAAA,EACzD;AAEA,kBAAgB,OACd,UACA,SACgC;AAChC,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,iBAAiB,oBAAoB,QAAQ;AACnD,UAAM,qBAAqB,SAAS,aAAa,OAAO,aAAa;AACrE,UAAM,UAAU,SAAS,wBAAwB,OAAO,wBAAwB;AAEhF,UAAM,SAA0D;AAAA,MAC9D;AAAA,MACA,UAAU;AAAA,MACV,QAAQ;AAAA,MACR,gBAAgB,EAAE,eAAe,KAAK;AAAA,IACxC;AAEA,UAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,QAAI,cAAc,OAAW,QAAO,aAAa;AAEjD,UAAM,cAAc,SAAS,eAAe,OAAO;AACnD,QAAI,gBAAgB,OAAW,QAAO,cAAc;AAEpD,UAAM,MAAM,wBAAwB,SAAS,QAAQ,kBAAkB;AACvE,QAAI;AAEJ,QAAI;AACF,kBAAY,MAAM,OAAO,KAAK,YAAY,OAAO,QAAQ,EAAE,QAAQ,IAAI,OAAO,CAAC;AAAA,IACjF,SAAS,KAAK;AACZ,UAAI,QAAQ;AACZ,YAAM,qBAAqB,cAAc,KAAK,IAAI,YAAY,GAAGA,SAAQ,CAAC;AAAA,IAC5E;AAEA,QAAI;AAEJ,QAAI;AACF,uBAAiB,SAAS,iBAAiB,WAAW,SAAS,KAAKA,SAAQ,GAAG;AAE7E,cAAM,QAAQ,MAAM,QAAQ,CAAC,GAAG,MAAM;AACtC,YAAI,UAAU,UAAa,UAAU,QAAQ,MAAM,SAAS,GAAG;AAC7D,gBAAM,EAAE,OAAO,MAAM;AAAA,QACvB;AAGA,YAAI,MAAM,UAAU,UAAa,MAAM,UAAU,MAAM;AACrD,uBAAaC,gBAAe,MAAM,KAAK;AAAA,QACzC;AAAA,MACF;AAAA,IACF,SAAS,KAAK;AACZ,YAAM,qBAAqB,cAAc,KAAK,IAAI,YAAY,GAAGD,SAAQ,CAAC;AAAA,IAC5E,UAAE;AACA,UAAI,QAAQ;AAAA,IACd;AAGA,QAAI,eAAe,QAAW;AAC5B,YAAM,EAAE,OAAO,IAAI,OAAO,WAAW;AAAA,IACvC;AAAA,EACF;AAEA,iBAAe,WACb,UACA,QACA,SACmC;AAGnC,UAAM,wBAAoC;AAAA,MACxC,MAAM;AAAA,MACN,SACE;AAAA,IACJ;AAEA,UAAM,oBAAoB,CAAC,uBAAuB,GAAG,QAAQ;AAC7D,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,iBAAiB,oBAAoB,iBAAiB;AAC5D,UAAM,qBAAqB,SAAS,aAAa,OAAO,aAAa;AACrE,UAAM,QAAQ,KAAK,IAAI;AAEvB,UAAM,cAAc,MAAM,UAAU,YAAY;AAC9C,YAAM,MAAM,wBAAwB,SAAS,QAAQ,kBAAkB;AACvE,UAAI;AACF,cAAM,SAA6D;AAAA,UACjE;AAAA,UACA,UAAU;AAAA,UACV,QAAQ;AAAA,UACR,iBAAiB,EAAE,MAAM,cAAc;AAAA,QACzC;AAEA,cAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,YAAI,cAAc,OAAW,QAAO,aAAa;AAEjD,cAAM,cAAc,SAAS,eAAe,OAAO;AACnD,YAAI,gBAAgB,OAAW,QAAO,cAAc;AAEpD,eAAO,MAAM,OAAO,KAAK,YAAY,OAAO,QAAQ,EAAE,QAAQ,IAAI,OAAO,CAAC;AAAA,MAC5E,SAAS,KAAK;AACZ,cAAM,qBAAqB,cAAc,KAAK,IAAI,YAAY,GAAGA,SAAQ,CAAC;AAAA,MAC5E,UAAE;AACA,YAAI,QAAQ;AAAA,MACd;AAAA,IACF,GAAG,yBAAyB,WAAW,SAAS,MAAM,CAAC;AAEvD,UAAM,aAAa,YAAY,QAAQ,CAAC,GAAG,QAAQ,WAAW;AAE9D,QAAI;AACJ,QAAI;AACF,eAAS,KAAK,MAAM,UAAU;AAAA,IAChC,SAAS,KAAK;AACZ,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,8DAA8D,WAAW,MAAM,GAAG,GAAG,CAAC;AAAA,QAC/F,UAAUA;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,QAAI;AACJ,QAAI;AACF,aAAO,OAAO,MAAM,MAAM;AAAA,IAC5B,SAAS,KAAK;AACZ,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,gEAAgE,OAAO,GAAG,CAAC;AAAA,QACpF,UAAUA;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,WAAO;AAAA,MACL;AAAA,MACA,OAAOC,gBAAe,YAAY,KAAK;AAAA,MACvC,WAAW,KAAK,IAAI,IAAI;AAAA,IAC1B;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;;;ACxOA,OAAOE,aAAY;AAenB,IAAMC,YAAW;AACjB,IAAM,sBAAsB;AAW5B,SAASC,gBAAe,OAA4D;AAClF,QAAM,cAAc,OAAO,iBAAiB;AAC5C,QAAM,eAAe,OAAO,qBAAqB;AACjD,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA,aAAa,OAAO,gBAAgB,cAAc;AAAA,EACpD;AACF;AAGA,SAASC,eAAc,UAAkE;AACvF,SAAO,SAAS,IAAI,CAAC,OAAO;AAAA,IAC1B,MAAM,EAAE;AAAA,IACR,SAAS,EAAE;AAAA,EACb,EAAE;AACJ;AASA,SAAS,iBACP,UAC0B;AAC1B,QAAM,eAAe,SAAS;AAC9B,MAAI,iBAAiB,UAAa,aAAa,WAAW,EAAG,QAAO;AAEpE,QAAM,OAAO,oBAAI,IAAY;AAC7B,QAAM,UAAkD,CAAC;AAEzD,aAAW,OAAO,cAAc;AAC9B,QAAI,CAAC,KAAK,IAAI,GAAG,GAAG;AAClB,WAAK,IAAI,GAAG;AACZ,cAAQ,KAAK,EAAE,IAAI,CAAC;AAAA,IACtB;AAAA,EACF;AAEA,SAAO,QAAQ,SAAS,IAAI,UAAU;AACxC;AAUA,SAAS,uBACP,iBACyB;AACzB,MAAI,oBAAoB,OAAW,QAAO,CAAC;AAG3C,SAAO,EAAE,GAAG,gBAAgB;AAC9B;AASO,SAAS,yBAAyB,KAAwB;AAC/D,MAAI,eAAe,SAAU,QAAO;AAIpC,MAAI,OAAOC,QAAO,uBAAuB,cAAc,eAAeA,QAAO,oBAAoB;AAC/F,WAAO,IAAI,SAAS;AAAA,MAClB,SAAS,IAAI;AAAA,MACb,UAAUH;AAAA,MACV,WAAW;AAAA,MACX,OAAO;AAAA,IACT,CAAC;AAAA,EACH;AAGA,MAAI,OAAOG,QAAO,aAAa,cAAc,eAAeA,QAAO,UAAU;AAC3E,UAAM,SAA6B,IAAI;AACvC,QAAI,WAAW,QAAW;AACxB,YAAM,YAAY,CAAC,KAAK,KAAK,KAAK,GAAG,EAAE,SAAS,MAAM,KAAK,UAAU;AACrE,aAAO,IAAI,SAAS;AAAA,QAClB,SAAS,IAAI;AAAA,QACb,UAAUH;AAAA,QACV,YAAY;AAAA,QACZ;AAAA,QACA,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AACA,WAAO,IAAI,SAAS,EAAE,SAAS,IAAI,SAAS,UAAUA,WAAU,WAAW,OAAO,OAAO,IAAI,CAAC;AAAA,EAChG;AAEA,SAAO,qBAAqB,KAAKA,SAAQ;AAC3C;AAGO,SAAS,yBAAyB,QAAoC;AAE3E,QAAM,SAAS,IAAIG,QAAO;AAAA,IACxB,QAAQ,OAAO;AAAA,IACf,SAAS;AAAA,IACT,SAAS,OAAO,aAAa;AAAA,IAC7B,YAAY;AAAA;AAAA,EACd,CAAC;AAED,QAAM,YAAY;AAAA,IAChB,YAAY,OAAO,cAAc;AAAA,IACjC,aAAa,OAAO,eAAe;AAAA,IACnC,UAAUH;AAAA,EACZ;AAEA,iBAAe,SAAS,UAAwB,SAAgD;AAC9F,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,eAAeE,eAAc,QAAQ;AAC3C,UAAM,qBAAqB,SAAS,aAAa,OAAO,aAAa;AACrE,UAAM,QAAQ,KAAK,IAAI;AACvB,UAAM,cAAc,uBAAuB,SAAS,eAAe;AAEnE,WAAO,UAAU,YAAY;AAC3B,YAAM,MAAM,wBAAwB,SAAS,QAAQ,kBAAkB;AACvE,UAAI;AACF,cAAM,SACJ;AAAA,UACE;AAAA,UACA,UAAU;AAAA,UACV,QAAQ;AAAA,UACR,GAAG;AAAA,QACL;AAEF,cAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,YAAI,cAAc,OAAW,QAAO,aAAa;AAEjD,cAAM,cAAc,SAAS,eAAe,OAAO;AACnD,YAAI,gBAAgB,OAAW,QAAO,cAAc;AAEpD,cAAM,cAAc,MAAM,OAAO,KAAK,YAAY;AAAA,UAChD;AAAA,UACA,EAAE,QAAQ,IAAI,OAAO;AAAA,QACvB;AAGA,cAAM,WAAW;AAEjB,cAAM,UAAU,SAAS,QAAQ,IAAI,CAAC,MAAM,EAAE,QAAQ,WAAW,EAAE,EAAE,KAAK,EAAE;AAE5E,cAAM,SAAsB;AAAA,UAC1B;AAAA,UACA,OAAO,SAAS;AAAA,UAChB,OAAOD,gBAAe,SAAS,KAAK;AAAA,UACpC,WAAW,KAAK,IAAI,IAAI;AAAA,QAC1B;AAEA,cAAM,YAAY,iBAAiB,QAAQ;AAC3C,YAAI,cAAc,OAAW,QAAO,YAAY;AAEhD,eAAO;AAAA,MACT,SAAS,KAAK;AACZ,cAAM,yBAAyB,cAAc,KAAK,IAAI,YAAY,GAAGD,SAAQ,CAAC;AAAA,MAChF,UAAE;AACA,YAAI,QAAQ;AAAA,MACd;AAAA,IACF,GAAG,yBAAyB,WAAW,SAAS,MAAM,CAAC;AAAA,EACzD;AAEA,kBAAgB,OACd,UACA,SACgC;AAChC,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,eAAeE,eAAc,QAAQ;AAC3C,UAAM,qBAAqB,SAAS,aAAa,OAAO,aAAa;AACrE,UAAM,UAAU,SAAS,wBAAwB,OAAO,wBAAwB;AAChF,UAAM,cAAc,uBAAuB,SAAS,eAAe;AAEnE,UAAM,SAAoF;AAAA,MACxF;AAAA,MACA,UAAU;AAAA,MACV,QAAQ;AAAA,MACR,gBAAgB,EAAE,eAAe,KAAK;AAAA,MACtC,GAAG;AAAA,IACL;AAEA,UAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,QAAI,cAAc,OAAW,QAAO,aAAa;AAEjD,UAAM,cAAc,SAAS,eAAe,OAAO;AACnD,QAAI,gBAAgB,OAAW,QAAO,cAAc;AAEpD,UAAM,MAAM,wBAAwB,SAAS,QAAQ,kBAAkB;AACvE,QAAI;AAEJ,QAAI;AACF,kBAAY,MAAM,OAAO,KAAK,YAAY;AAAA,QACxC;AAAA,QACA,EAAE,QAAQ,IAAI,OAAO;AAAA,MACvB;AAAA,IACF,SAAS,KAAK;AACZ,UAAI,QAAQ;AACZ,YAAM,yBAAyB,cAAc,KAAK,IAAI,YAAY,GAAGF,SAAQ,CAAC;AAAA,IAChF;AAEA,QAAI;AAEJ,QAAI;AACF,uBAAiB,SAAS,iBAAiB,WAAW,SAAS,KAAKA,SAAQ,GAAG;AAC7E,cAAM,QAAQ,MAAM,QAAQ,CAAC,GAAG,MAAM;AACtC,YAAI,UAAU,UAAa,UAAU,QAAQ,MAAM,SAAS,GAAG;AAC7D,gBAAM,EAAE,OAAO,MAAM;AAAA,QACvB;AAGA,YAAI,MAAM,UAAU,UAAa,MAAM,UAAU,MAAM;AACrD,uBAAaC,gBAAe,MAAM,KAAK;AAAA,QACzC;AAAA,MACF;AAAA,IACF,SAAS,KAAK;AACZ,YAAM,yBAAyB,cAAc,KAAK,IAAI,YAAY,GAAGD,SAAQ,CAAC;AAAA,IAChF,UAAE;AACA,UAAI,QAAQ;AAAA,IACd;AAKA,QAAI,eAAe,QAAW;AAC5B,YAAM,EAAE,OAAO,IAAI,OAAO,WAAW;AAAA,IACvC;AAAA,EACF;AAEA,iBAAe,WACb,UACA,QACA,SACmC;AAInC,UAAM,wBAAoC;AAAA,MACxC,MAAM;AAAA,MACN,SACE;AAAA,IACJ;AAEA,UAAM,oBAAoB,CAAC,uBAAuB,GAAG,QAAQ;AAC7D,UAAM,QAAQ,SAAS,SAAS,OAAO;AACvC,UAAM,eAAeE,eAAc,iBAAiB;AACpD,UAAM,qBAAqB,SAAS,aAAa,OAAO,aAAa;AACrE,UAAM,QAAQ,KAAK,IAAI;AACvB,UAAM,cAAc,uBAAuB,SAAS,eAAe;AAEnE,UAAM,cAAc,MAAM,UAAU,YAAY;AAC9C,YAAM,MAAM,wBAAwB,SAAS,QAAQ,kBAAkB;AACvE,UAAI;AACF,cAAM,SACJ;AAAA,UACE;AAAA,UACA,UAAU;AAAA,UACV,QAAQ;AAAA,UACR,GAAG;AAAA,QACL;AAEF,cAAM,YAAY,SAAS,aAAa,OAAO;AAC/C,YAAI,cAAc,OAAW,QAAO,aAAa;AAEjD,cAAM,cAAc,SAAS,eAAe,OAAO;AACnD,YAAI,gBAAgB,OAAW,QAAO,cAAc;AAEpD,eAAO,MAAM,OAAO,KAAK,YAAY;AAAA,UACnC;AAAA,UACA,EAAE,QAAQ,IAAI,OAAO;AAAA,QACvB;AAAA,MACF,SAAS,KAAK;AACZ,cAAM,yBAAyB,cAAc,KAAK,IAAI,YAAY,GAAGF,SAAQ,CAAC;AAAA,MAChF,UAAE;AACA,YAAI,QAAQ;AAAA,MACd;AAAA,IACF,GAAG,yBAAyB,WAAW,SAAS,MAAM,CAAC;AAEvD,UAAM,aAAa,YAAY,QAAQ,CAAC,GAAG,QAAQ,WAAW;AAE9D,QAAI;AACJ,QAAI;AAGF,YAAM,UAAU,WACb,QAAQ,6BAA6B,EAAE,EACvC,QAAQ,qBAAqB,EAAE,EAC/B,QAAQ,WAAW,EAAE,EACrB,KAAK;AACR,eAAS,KAAK,MAAM,OAAO;AAAA,IAC7B,SAAS,KAAK;AACZ,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,kEAAkE,WAAW,MAAM,GAAG,GAAG,CAAC;AAAA,QACnG,UAAUA;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,QAAI;AACJ,QAAI;AACF,aAAO,OAAO,MAAM,MAAM;AAAA,IAC5B,SAAS,KAAK;AACZ,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,oEAAoE,OAAO,GAAG,CAAC;AAAA,QACxF,UAAUA;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAEA,WAAO;AAAA,MACL;AAAA,MACA,OAAOC,gBAAe,YAAY,KAAK;AAAA,MACvC,WAAW,KAAK,IAAI,IAAI;AAAA,IAC1B;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;;;AChYO,SAAS,aAAa,QAAoC;AAC/D,UAAQ,OAAO,UAAU;AAAA,IACvB,KAAK;AACH,aAAO,wBAAwB,MAAM;AAAA,IAEvC,KAAK;AACH,aAAO,qBAAqB,MAAM;AAAA,IAEpC,KAAK;AACH,aAAO,qBAAqB,MAAM;AAAA,IAEpC,KAAK;AACH,aAAO,uBAAuB,MAAM;AAAA,IAEtC,KAAK;AACH,aAAO,yBAAyB,MAAM;AAAA,IAExC,SAAS;AAGP,YAAM,cAAqB,OAAO;AAClC,YAAM,IAAI,SAAS;AAAA,QACjB,SAAS,qCAAqC,OAAO,WAAW,CAAC;AAAA,QACjE,UAAU,OAAO,WAAW;AAAA,QAC5B,WAAW;AAAA,MACb,CAAC;AAAA,IACH;AAAA,EACF;AACF;AAcO,SAAS,oBACd,UACA,OACA,WACW;AACX,QAAM,SAAS,cAAc,QAAQ;AACrC,SAAO,aAAa,EAAE,UAAU,OAAO,QAAQ,GAAG,UAAU,CAAC;AAC/D;AAGA,SAAS,cAAc,UAA+C;AACpE,QAAM,YAAyD;AAAA,IAC7D,WAAW;AAAA,IACX,QAAQ;AAAA,IACR,QAAQ;AAAA,IACR,UAAU;AAAA,IACV,YAAY;AAAA,EACd;AAEA,QAAM,SAAS,UAAU,QAAQ;AACjC,QAAM,SAAS,QAAQ,IAAI,MAAM;AAEjC,MAAI,WAAW,UAAa,OAAO,KAAK,MAAM,IAAI;AAChD,UAAM,IAAI,SAAS;AAAA,MACjB,SAAS,mBAAmB,MAAM,yDAAyD,QAAQ;AAAA,MACnG;AAAA,MACA,WAAW;AAAA,IACb,CAAC;AAAA,EACH;AAEA,SAAO;AACT;","names":["PROVIDER","normalizeUsage","PROVIDER","normalizeUsage","OpenAI","PROVIDER","normalizeUsage","OpenAI","OpenAI","PROVIDER","normalizeUsage","buildMessages","OpenAI"]}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@diabolicallabs/llm-client",
3
- "version": "0.2.0",
3
+ "version": "0.3.0",
4
4
  "description": "Unified LLM API for Anthropic, OpenAI, Google, and DeepSeek. Streaming, retry/backoff, structured output, token normalization. © Diabolical Labs",
5
5
  "author": "Diana Ismail <diana@deeismail.com> (https://deeismail.com)",
6
6
  "publisher": "Diabolical Labs",