@mariozechner/pi-ai 0.50.7 → 0.50.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1 +1 @@
1
- {"version":3,"file":"openai-completions.d.ts","sourceRoot":"","sources":["../../src/providers/openai-completions.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAMX,0BAA0B,EAE1B,MAAM,sCAAsC,CAAC;AAG9C,OAAO,KAAK,EAEX,OAAO,EAEP,KAAK,EACL,uBAAuB,EACvB,mBAAmB,EAEnB,cAAc,EACd,aAAa,EAMb,MAAM,aAAa,CAAC;AA4CrB,MAAM,WAAW,wBAAyB,SAAQ,aAAa;IAC9D,UAAU,CAAC,EAAE,MAAM,GAAG,MAAM,GAAG,UAAU,GAAG;QAAE,IAAI,EAAE,UAAU,CAAC;QAAC,QAAQ,EAAE;YAAE,IAAI,EAAE,MAAM,CAAA;SAAE,CAAA;KAAE,CAAC;IAC7F,eAAe,CAAC,EAAE,SAAS,GAAG,KAAK,GAAG,QAAQ,GAAG,MAAM,GAAG,OAAO,CAAC;CAClE;AAED,eAAO,MAAM,uBAAuB,EAAE,cAAc,CAAC,oBAAoB,EAAE,wBAAwB,CAqPlG,CAAC;AAEF,eAAO,MAAM,6BAA6B,EAAE,cAAc,CAAC,oBAAoB,EAAE,mBAAmB,CAmBnG,CAAC;AA+JF,wBAAgB,eAAe,CAC9B,KAAK,EAAE,KAAK,CAAC,oBAAoB,CAAC,EAClC,OAAO,EAAE,OAAO,EAChB,MAAM,EAAE,QAAQ,CAAC,uBAAuB,CAAC,GACvC,0BAA0B,EAAE,CAsO9B","sourcesContent":["import OpenAI from \"openai\";\nimport type {\n\tChatCompletionAssistantMessageParam,\n\tChatCompletionChunk,\n\tChatCompletionContentPart,\n\tChatCompletionContentPartImage,\n\tChatCompletionContentPartText,\n\tChatCompletionMessageParam,\n\tChatCompletionToolMessageParam,\n} from \"openai/resources/chat/completions.js\";\nimport { getEnvApiKey } from \"../env-api-keys.js\";\nimport { calculateCost, supportsXhigh } from \"../models.js\";\nimport type {\n\tAssistantMessage,\n\tContext,\n\tMessage,\n\tModel,\n\tOpenAICompletionsCompat,\n\tSimpleStreamOptions,\n\tStopReason,\n\tStreamFunction,\n\tStreamOptions,\n\tTextContent,\n\tThinkingContent,\n\tTool,\n\tToolCall,\n\tToolResultMessage,\n} from \"../types.js\";\nimport { AssistantMessageEventStream } from \"../utils/event-stream.js\";\nimport { parseStreamingJson } from \"../utils/json-parse.js\";\nimport { sanitizeSurrogates } from \"../utils/sanitize-unicode.js\";\nimport { buildBaseOptions, clampReasoning } from \"./simple-options.js\";\nimport { transformMessages } from \"./transform-messages.js\";\n\n/**\n * Normalize tool call ID for Mistral.\n * Mistral requires tool IDs to be exactly 9 alphanumeric characters (a-z, A-Z, 0-9).\n */\nfunction normalizeMistralToolId(id: string): string {\n\t// Remove non-alphanumeric characters\n\tlet normalized = id.replace(/[^a-zA-Z0-9]/g, \"\");\n\t// Mistral requires exactly 9 characters\n\tif (normalized.length < 9) {\n\t\t// Pad with deterministic characters based on original ID to ensure matching\n\t\tconst padding = \"ABCDEFGHI\";\n\t\tnormalized = normalized + padding.slice(0, 9 - normalized.length);\n\t} else if (normalized.length > 9) {\n\t\tnormalized = normalized.slice(0, 9);\n\t}\n\treturn normalized;\n}\n\n/**\n * Check if conversation messages contain tool calls or tool results.\n * This is needed because Anthropic (via proxy) requires the tools param\n * to be present when messages include tool_calls or tool role messages.\n */\nfunction hasToolHistory(messages: Message[]): boolean {\n\tfor (const msg of messages) {\n\t\tif (msg.role === \"toolResult\") {\n\t\t\treturn true;\n\t\t}\n\t\tif (msg.role === \"assistant\") {\n\t\t\tif (msg.content.some((block) => block.type === \"toolCall\")) {\n\t\t\t\treturn true;\n\t\t\t}\n\t\t}\n\t}\n\treturn false;\n}\n\nexport interface OpenAICompletionsOptions extends StreamOptions {\n\ttoolChoice?: \"auto\" | \"none\" | \"required\" | { type: \"function\"; function: { name: string } };\n\treasoningEffort?: \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n}\n\nexport const streamOpenAICompletions: StreamFunction<\"openai-completions\", OpenAICompletionsOptions> = (\n\tmodel: Model<\"openai-completions\">,\n\tcontext: Context,\n\toptions?: OpenAICompletionsOptions,\n): AssistantMessageEventStream => {\n\tconst stream = new AssistantMessageEventStream();\n\n\t(async () => {\n\t\tconst output: AssistantMessage = {\n\t\t\trole: \"assistant\",\n\t\t\tcontent: [],\n\t\t\tapi: model.api,\n\t\t\tprovider: model.provider,\n\t\t\tmodel: model.id,\n\t\t\tusage: {\n\t\t\t\tinput: 0,\n\t\t\t\toutput: 0,\n\t\t\t\tcacheRead: 0,\n\t\t\t\tcacheWrite: 0,\n\t\t\t\ttotalTokens: 0,\n\t\t\t\tcost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },\n\t\t\t},\n\t\t\tstopReason: \"stop\",\n\t\t\ttimestamp: Date.now(),\n\t\t};\n\n\t\ttry {\n\t\t\tconst apiKey = options?.apiKey || getEnvApiKey(model.provider) || \"\";\n\t\t\tconst client = createClient(model, context, apiKey, options?.headers);\n\t\t\tconst params = buildParams(model, context, options);\n\t\t\toptions?.onPayload?.(params);\n\t\t\tconst openaiStream = await client.chat.completions.create(params, { signal: options?.signal });\n\t\t\tstream.push({ type: \"start\", partial: output });\n\n\t\t\tlet currentBlock: TextContent | ThinkingContent | (ToolCall & { partialArgs?: string }) | null = null;\n\t\t\tconst blocks = output.content;\n\t\t\tconst blockIndex = () => blocks.length - 1;\n\t\t\tconst finishCurrentBlock = (block?: typeof currentBlock) => {\n\t\t\t\tif (block) {\n\t\t\t\t\tif (block.type === \"text\") {\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"text_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\tcontent: block.text,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t} else if (block.type === \"thinking\") {\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"thinking_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\tcontent: block.thinking,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t} else if (block.type === \"toolCall\") {\n\t\t\t\t\t\tblock.arguments = JSON.parse(block.partialArgs || \"{}\");\n\t\t\t\t\t\tdelete block.partialArgs;\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"toolcall_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\ttoolCall: block,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t};\n\n\t\t\tfor await (const chunk of openaiStream) {\n\t\t\t\tif (chunk.usage) {\n\t\t\t\t\tconst cachedTokens = chunk.usage.prompt_tokens_details?.cached_tokens || 0;\n\t\t\t\t\tconst reasoningTokens = chunk.usage.completion_tokens_details?.reasoning_tokens || 0;\n\t\t\t\t\tconst input = (chunk.usage.prompt_tokens || 0) - cachedTokens;\n\t\t\t\t\tconst outputTokens = (chunk.usage.completion_tokens || 0) + reasoningTokens;\n\t\t\t\t\toutput.usage = {\n\t\t\t\t\t\t// OpenAI includes cached tokens in prompt_tokens, so subtract to get non-cached input\n\t\t\t\t\t\tinput,\n\t\t\t\t\t\toutput: outputTokens,\n\t\t\t\t\t\tcacheRead: cachedTokens,\n\t\t\t\t\t\tcacheWrite: 0,\n\t\t\t\t\t\t// Compute totalTokens ourselves since we add reasoning_tokens to output\n\t\t\t\t\t\t// and some providers (e.g., Groq) don't include them in total_tokens\n\t\t\t\t\t\ttotalTokens: input + outputTokens + cachedTokens,\n\t\t\t\t\t\tcost: {\n\t\t\t\t\t\t\tinput: 0,\n\t\t\t\t\t\t\toutput: 0,\n\t\t\t\t\t\t\tcacheRead: 0,\n\t\t\t\t\t\t\tcacheWrite: 0,\n\t\t\t\t\t\t\ttotal: 0,\n\t\t\t\t\t\t},\n\t\t\t\t\t};\n\t\t\t\t\tcalculateCost(model, output.usage);\n\t\t\t\t}\n\n\t\t\t\tconst choice = chunk.choices[0];\n\t\t\t\tif (!choice) continue;\n\n\t\t\t\tif (choice.finish_reason) {\n\t\t\t\t\toutput.stopReason = mapStopReason(choice.finish_reason);\n\t\t\t\t}\n\n\t\t\t\tif (choice.delta) {\n\t\t\t\t\tif (\n\t\t\t\t\t\tchoice.delta.content !== null &&\n\t\t\t\t\t\tchoice.delta.content !== undefined &&\n\t\t\t\t\t\tchoice.delta.content.length > 0\n\t\t\t\t\t) {\n\t\t\t\t\t\tif (!currentBlock || currentBlock.type !== \"text\") {\n\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\tcurrentBlock = { type: \"text\", text: \"\" };\n\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\tstream.push({ type: \"text_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif (currentBlock.type === \"text\") {\n\t\t\t\t\t\t\tcurrentBlock.text += choice.delta.content;\n\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\ttype: \"text_delta\",\n\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\tdelta: choice.delta.content,\n\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t// Some endpoints return reasoning in reasoning_content (llama.cpp),\n\t\t\t\t\t// or reasoning (other openai compatible endpoints)\n\t\t\t\t\t// Use the first non-empty reasoning field to avoid duplication\n\t\t\t\t\t// (e.g., chutes.ai returns both reasoning_content and reasoning with same content)\n\t\t\t\t\tconst reasoningFields = [\"reasoning_content\", \"reasoning\", \"reasoning_text\"];\n\t\t\t\t\tlet foundReasoningField: string | null = null;\n\t\t\t\t\tfor (const field of reasoningFields) {\n\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t(choice.delta as any)[field] !== null &&\n\t\t\t\t\t\t\t(choice.delta as any)[field] !== undefined &&\n\t\t\t\t\t\t\t(choice.delta as any)[field].length > 0\n\t\t\t\t\t\t) {\n\t\t\t\t\t\t\tif (!foundReasoningField) {\n\t\t\t\t\t\t\t\tfoundReasoningField = field;\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif (foundReasoningField) {\n\t\t\t\t\t\tif (!currentBlock || currentBlock.type !== \"thinking\") {\n\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\tcurrentBlock = {\n\t\t\t\t\t\t\t\ttype: \"thinking\",\n\t\t\t\t\t\t\t\tthinking: \"\",\n\t\t\t\t\t\t\t\tthinkingSignature: foundReasoningField,\n\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\tstream.push({ type: \"thinking_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif (currentBlock.type === \"thinking\") {\n\t\t\t\t\t\t\tconst delta = (choice.delta as any)[foundReasoningField];\n\t\t\t\t\t\t\tcurrentBlock.thinking += delta;\n\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\ttype: \"thinking_delta\",\n\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\tdelta,\n\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif (choice?.delta?.tool_calls) {\n\t\t\t\t\t\tfor (const toolCall of choice.delta.tool_calls) {\n\t\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t\t!currentBlock ||\n\t\t\t\t\t\t\t\tcurrentBlock.type !== \"toolCall\" ||\n\t\t\t\t\t\t\t\t(toolCall.id && currentBlock.id !== toolCall.id)\n\t\t\t\t\t\t\t) {\n\t\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\t\tcurrentBlock = {\n\t\t\t\t\t\t\t\t\ttype: \"toolCall\",\n\t\t\t\t\t\t\t\t\tid: toolCall.id || \"\",\n\t\t\t\t\t\t\t\t\tname: toolCall.function?.name || \"\",\n\t\t\t\t\t\t\t\t\targuments: {},\n\t\t\t\t\t\t\t\t\tpartialArgs: \"\",\n\t\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\t\tstream.push({ type: \"toolcall_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif (currentBlock.type === \"toolCall\") {\n\t\t\t\t\t\t\t\tif (toolCall.id) currentBlock.id = toolCall.id;\n\t\t\t\t\t\t\t\tif (toolCall.function?.name) currentBlock.name = toolCall.function.name;\n\t\t\t\t\t\t\t\tlet delta = \"\";\n\t\t\t\t\t\t\t\tif (toolCall.function?.arguments) {\n\t\t\t\t\t\t\t\t\tdelta = toolCall.function.arguments;\n\t\t\t\t\t\t\t\t\tcurrentBlock.partialArgs += toolCall.function.arguments;\n\t\t\t\t\t\t\t\t\tcurrentBlock.arguments = parseStreamingJson(currentBlock.partialArgs);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\t\ttype: \"toolcall_delta\",\n\t\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\t\tdelta,\n\t\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tconst reasoningDetails = (choice.delta as any).reasoning_details;\n\t\t\t\t\tif (reasoningDetails && Array.isArray(reasoningDetails)) {\n\t\t\t\t\t\tfor (const detail of reasoningDetails) {\n\t\t\t\t\t\t\tif (detail.type === \"reasoning.encrypted\" && detail.id && detail.data) {\n\t\t\t\t\t\t\t\tconst matchingToolCall = output.content.find(\n\t\t\t\t\t\t\t\t\t(b) => b.type === \"toolCall\" && b.id === detail.id,\n\t\t\t\t\t\t\t\t) as ToolCall | undefined;\n\t\t\t\t\t\t\t\tif (matchingToolCall) {\n\t\t\t\t\t\t\t\t\tmatchingToolCall.thoughtSignature = JSON.stringify(detail);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfinishCurrentBlock(currentBlock);\n\n\t\t\tif (options?.signal?.aborted) {\n\t\t\t\tthrow new Error(\"Request was aborted\");\n\t\t\t}\n\n\t\t\tif (output.stopReason === \"aborted\" || output.stopReason === \"error\") {\n\t\t\t\tthrow new Error(\"An unknown error occurred\");\n\t\t\t}\n\n\t\t\tstream.push({ type: \"done\", reason: output.stopReason, message: output });\n\t\t\tstream.end();\n\t\t} catch (error) {\n\t\t\tfor (const block of output.content) delete (block as any).index;\n\t\t\toutput.stopReason = options?.signal?.aborted ? \"aborted\" : \"error\";\n\t\t\toutput.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);\n\t\t\t// Some providers via OpenRouter give additional information in this field.\n\t\t\tconst rawMetadata = (error as any)?.error?.metadata?.raw;\n\t\t\tif (rawMetadata) output.errorMessage += `\\n${rawMetadata}`;\n\t\t\tstream.push({ type: \"error\", reason: output.stopReason, error: output });\n\t\t\tstream.end();\n\t\t}\n\t})();\n\n\treturn stream;\n};\n\nexport const streamSimpleOpenAICompletions: StreamFunction<\"openai-completions\", SimpleStreamOptions> = (\n\tmodel: Model<\"openai-completions\">,\n\tcontext: Context,\n\toptions?: SimpleStreamOptions,\n): AssistantMessageEventStream => {\n\tconst apiKey = options?.apiKey || getEnvApiKey(model.provider);\n\tif (!apiKey) {\n\t\tthrow new Error(`No API key for provider: ${model.provider}`);\n\t}\n\n\tconst base = buildBaseOptions(model, options, apiKey);\n\tconst reasoningEffort = supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning);\n\tconst toolChoice = (options as OpenAICompletionsOptions | undefined)?.toolChoice;\n\n\treturn streamOpenAICompletions(model, context, {\n\t\t...base,\n\t\treasoningEffort,\n\t\ttoolChoice,\n\t} satisfies OpenAICompletionsOptions);\n};\n\nfunction createClient(\n\tmodel: Model<\"openai-completions\">,\n\tcontext: Context,\n\tapiKey?: string,\n\toptionsHeaders?: Record<string, string>,\n) {\n\tif (!apiKey) {\n\t\tif (!process.env.OPENAI_API_KEY) {\n\t\t\tthrow new Error(\n\t\t\t\t\"OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.\",\n\t\t\t);\n\t\t}\n\t\tapiKey = process.env.OPENAI_API_KEY;\n\t}\n\n\tconst headers = { ...model.headers };\n\tif (model.provider === \"github-copilot\") {\n\t\t// Copilot expects X-Initiator to indicate whether the request is user-initiated\n\t\t// or agent-initiated (e.g. follow-up after assistant/tool messages). If there is\n\t\t// no prior message, default to user-initiated.\n\t\tconst messages = context.messages || [];\n\t\tconst lastMessage = messages[messages.length - 1];\n\t\tconst isAgentCall = lastMessage ? lastMessage.role !== \"user\" : false;\n\t\theaders[\"X-Initiator\"] = isAgentCall ? \"agent\" : \"user\";\n\t\theaders[\"Openai-Intent\"] = \"conversation-edits\";\n\n\t\t// Copilot requires this header when sending images\n\t\tconst hasImages = messages.some((msg) => {\n\t\t\tif (msg.role === \"user\" && Array.isArray(msg.content)) {\n\t\t\t\treturn msg.content.some((c) => c.type === \"image\");\n\t\t\t}\n\t\t\tif (msg.role === \"toolResult\" && Array.isArray(msg.content)) {\n\t\t\t\treturn msg.content.some((c) => c.type === \"image\");\n\t\t\t}\n\t\t\treturn false;\n\t\t});\n\t\tif (hasImages) {\n\t\t\theaders[\"Copilot-Vision-Request\"] = \"true\";\n\t\t}\n\t}\n\n\t// Merge options headers last so they can override defaults\n\tif (optionsHeaders) {\n\t\tObject.assign(headers, optionsHeaders);\n\t}\n\n\treturn new OpenAI({\n\t\tapiKey,\n\t\tbaseURL: model.baseUrl,\n\t\tdangerouslyAllowBrowser: true,\n\t\tdefaultHeaders: headers,\n\t});\n}\n\nfunction buildParams(model: Model<\"openai-completions\">, context: Context, options?: OpenAICompletionsOptions) {\n\tconst compat = getCompat(model);\n\tconst messages = convertMessages(model, context, compat);\n\tmaybeAddOpenRouterAnthropicCacheControl(model, messages);\n\n\tconst params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {\n\t\tmodel: model.id,\n\t\tmessages,\n\t\tstream: true,\n\t};\n\n\tif (compat.supportsUsageInStreaming !== false) {\n\t\t(params as any).stream_options = { include_usage: true };\n\t}\n\n\tif (compat.supportsStore) {\n\t\tparams.store = false;\n\t}\n\n\tif (options?.maxTokens) {\n\t\tif (compat.maxTokensField === \"max_tokens\") {\n\t\t\t(params as any).max_tokens = options.maxTokens;\n\t\t} else {\n\t\t\tparams.max_completion_tokens = options.maxTokens;\n\t\t}\n\t}\n\n\tif (options?.temperature !== undefined) {\n\t\tparams.temperature = options.temperature;\n\t}\n\n\tif (context.tools) {\n\t\tparams.tools = convertTools(context.tools);\n\t} else if (hasToolHistory(context.messages)) {\n\t\t// Anthropic (via LiteLLM/proxy) requires tools param when conversation has tool_calls/tool_results\n\t\tparams.tools = [];\n\t}\n\n\tif (options?.toolChoice) {\n\t\tparams.tool_choice = options.toolChoice;\n\t}\n\n\tif (compat.thinkingFormat === \"zai\" && model.reasoning) {\n\t\t// Z.ai uses binary thinking: { type: \"enabled\" | \"disabled\" }\n\t\t// Must explicitly disable since z.ai defaults to thinking enabled\n\t\t(params as any).thinking = { type: options?.reasoningEffort ? \"enabled\" : \"disabled\" };\n\t} else if (options?.reasoningEffort && model.reasoning && compat.supportsReasoningEffort) {\n\t\t// OpenAI-style reasoning_effort\n\t\tparams.reasoning_effort = options.reasoningEffort;\n\t}\n\n\t// OpenRouter provider routing preferences\n\tif (model.baseUrl.includes(\"openrouter.ai\") && model.compat?.openRouterRouting) {\n\t\t(params as any).provider = model.compat.openRouterRouting;\n\t}\n\n\t// Vercel AI Gateway provider routing preferences\n\tif (model.baseUrl.includes(\"ai-gateway.vercel.sh\") && model.compat?.vercelGatewayRouting) {\n\t\tconst routing = model.compat.vercelGatewayRouting;\n\t\tif (routing.only || routing.order) {\n\t\t\tconst gatewayOptions: Record<string, string[]> = {};\n\t\t\tif (routing.only) gatewayOptions.only = routing.only;\n\t\t\tif (routing.order) gatewayOptions.order = routing.order;\n\t\t\t(params as any).providerOptions = { gateway: gatewayOptions };\n\t\t}\n\t}\n\n\treturn params;\n}\n\nfunction maybeAddOpenRouterAnthropicCacheControl(\n\tmodel: Model<\"openai-completions\">,\n\tmessages: ChatCompletionMessageParam[],\n): void {\n\tif (model.provider !== \"openrouter\" || !model.id.startsWith(\"anthropic/\")) return;\n\n\t// Anthropic-style caching requires cache_control on a text part. Add a breakpoint\n\t// on the last user/assistant message (walking backwards until we find text content).\n\tfor (let i = messages.length - 1; i >= 0; i--) {\n\t\tconst msg = messages[i];\n\t\tif (msg.role !== \"user\" && msg.role !== \"assistant\") continue;\n\n\t\tconst content = msg.content;\n\t\tif (typeof content === \"string\") {\n\t\t\tmsg.content = [\n\t\t\t\tObject.assign({ type: \"text\" as const, text: content }, { cache_control: { type: \"ephemeral\" } }),\n\t\t\t];\n\t\t\treturn;\n\t\t}\n\n\t\tif (!Array.isArray(content)) continue;\n\n\t\t// Find last text part and add cache_control\n\t\tfor (let j = content.length - 1; j >= 0; j--) {\n\t\t\tconst part = content[j];\n\t\t\tif (part?.type === \"text\") {\n\t\t\t\tObject.assign(part, { cache_control: { type: \"ephemeral\" } });\n\t\t\t\treturn;\n\t\t\t}\n\t\t}\n\t}\n}\n\nexport function convertMessages(\n\tmodel: Model<\"openai-completions\">,\n\tcontext: Context,\n\tcompat: Required<OpenAICompletionsCompat>,\n): ChatCompletionMessageParam[] {\n\tconst params: ChatCompletionMessageParam[] = [];\n\n\tconst normalizeToolCallId = (id: string): string => {\n\t\tif (compat.requiresMistralToolIds) return normalizeMistralToolId(id);\n\n\t\t// Handle pipe-separated IDs from OpenAI Responses API\n\t\t// Format: {call_id}|{id} where {id} can be 400+ chars with special chars (+, /, =)\n\t\t// These come from providers like github-copilot, openai-codex, opencode\n\t\t// Extract just the call_id part and normalize it\n\t\tif (id.includes(\"|\")) {\n\t\t\tconst [callId] = id.split(\"|\");\n\t\t\t// Sanitize to allowed chars and truncate to 40 chars (OpenAI limit)\n\t\t\treturn callId.replace(/[^a-zA-Z0-9_-]/g, \"_\").slice(0, 40);\n\t\t}\n\n\t\tif (model.provider === \"openai\") return id.length > 40 ? id.slice(0, 40) : id;\n\t\t// Copilot Claude models route to Claude backend which requires Anthropic ID format\n\t\tif (model.provider === \"github-copilot\" && model.id.toLowerCase().includes(\"claude\")) {\n\t\t\treturn id.replace(/[^a-zA-Z0-9_-]/g, \"_\").slice(0, 64);\n\t\t}\n\t\treturn id;\n\t};\n\n\tconst transformedMessages = transformMessages(context.messages, model, (id) => normalizeToolCallId(id));\n\n\tif (context.systemPrompt) {\n\t\tconst useDeveloperRole = model.reasoning && compat.supportsDeveloperRole;\n\t\tconst role = useDeveloperRole ? \"developer\" : \"system\";\n\t\tparams.push({ role: role, content: sanitizeSurrogates(context.systemPrompt) });\n\t}\n\n\tlet lastRole: string | null = null;\n\n\tfor (let i = 0; i < transformedMessages.length; i++) {\n\t\tconst msg = transformedMessages[i];\n\t\t// Some providers (e.g. Mistral/Devstral) don't allow user messages directly after tool results\n\t\t// Insert a synthetic assistant message to bridge the gap\n\t\tif (compat.requiresAssistantAfterToolResult && lastRole === \"toolResult\" && msg.role === \"user\") {\n\t\t\tparams.push({\n\t\t\t\trole: \"assistant\",\n\t\t\t\tcontent: \"I have processed the tool results.\",\n\t\t\t});\n\t\t}\n\n\t\tif (msg.role === \"user\") {\n\t\t\tif (typeof msg.content === \"string\") {\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: sanitizeSurrogates(msg.content),\n\t\t\t\t});\n\t\t\t} else {\n\t\t\t\tconst content: ChatCompletionContentPart[] = msg.content.map((item): ChatCompletionContentPart => {\n\t\t\t\t\tif (item.type === \"text\") {\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\ttype: \"text\",\n\t\t\t\t\t\t\ttext: sanitizeSurrogates(item.text),\n\t\t\t\t\t\t} satisfies ChatCompletionContentPartText;\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\ttype: \"image_url\",\n\t\t\t\t\t\t\timage_url: {\n\t\t\t\t\t\t\t\turl: `data:${item.mimeType};base64,${item.data}`,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t} satisfies ChatCompletionContentPartImage;\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t\tconst filteredContent = !model.input.includes(\"image\")\n\t\t\t\t\t? content.filter((c) => c.type !== \"image_url\")\n\t\t\t\t\t: content;\n\t\t\t\tif (filteredContent.length === 0) continue;\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: filteredContent,\n\t\t\t\t});\n\t\t\t}\n\t\t} else if (msg.role === \"assistant\") {\n\t\t\t// Some providers (e.g. Mistral) don't accept null content, use empty string instead\n\t\t\tconst assistantMsg: ChatCompletionAssistantMessageParam = {\n\t\t\t\trole: \"assistant\",\n\t\t\t\tcontent: compat.requiresAssistantAfterToolResult ? \"\" : null,\n\t\t\t};\n\n\t\t\tconst textBlocks = msg.content.filter((b) => b.type === \"text\") as TextContent[];\n\t\t\t// Filter out empty text blocks to avoid API validation errors\n\t\t\tconst nonEmptyTextBlocks = textBlocks.filter((b) => b.text && b.text.trim().length > 0);\n\t\t\tif (nonEmptyTextBlocks.length > 0) {\n\t\t\t\t// GitHub Copilot requires assistant content as a string, not an array.\n\t\t\t\t// Sending as array causes Claude models to re-answer all previous prompts.\n\t\t\t\tif (model.provider === \"github-copilot\") {\n\t\t\t\t\tassistantMsg.content = nonEmptyTextBlocks.map((b) => sanitizeSurrogates(b.text)).join(\"\");\n\t\t\t\t} else {\n\t\t\t\t\tassistantMsg.content = nonEmptyTextBlocks.map((b) => {\n\t\t\t\t\t\treturn { type: \"text\", text: sanitizeSurrogates(b.text) };\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Handle thinking blocks\n\t\t\tconst thinkingBlocks = msg.content.filter((b) => b.type === \"thinking\") as ThinkingContent[];\n\t\t\t// Filter out empty thinking blocks to avoid API validation errors\n\t\t\tconst nonEmptyThinkingBlocks = thinkingBlocks.filter((b) => b.thinking && b.thinking.trim().length > 0);\n\t\t\tif (nonEmptyThinkingBlocks.length > 0) {\n\t\t\t\tif (compat.requiresThinkingAsText) {\n\t\t\t\t\t// Convert thinking blocks to plain text (no tags to avoid model mimicking them)\n\t\t\t\t\tconst thinkingText = nonEmptyThinkingBlocks.map((b) => b.thinking).join(\"\\n\\n\");\n\t\t\t\t\tconst textContent = assistantMsg.content as Array<{ type: \"text\"; text: string }> | null;\n\t\t\t\t\tif (textContent) {\n\t\t\t\t\t\ttextContent.unshift({ type: \"text\", text: thinkingText });\n\t\t\t\t\t} else {\n\t\t\t\t\t\tassistantMsg.content = [{ type: \"text\", text: thinkingText }];\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// Use the signature from the first thinking block if available (for llama.cpp server + gpt-oss)\n\t\t\t\t\tconst signature = nonEmptyThinkingBlocks[0].thinkingSignature;\n\t\t\t\t\tif (signature && signature.length > 0) {\n\t\t\t\t\t\t(assistantMsg as any)[signature] = nonEmptyThinkingBlocks.map((b) => b.thinking).join(\"\\n\");\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tconst toolCalls = msg.content.filter((b) => b.type === \"toolCall\") as ToolCall[];\n\t\t\tif (toolCalls.length > 0) {\n\t\t\t\tassistantMsg.tool_calls = toolCalls.map((tc) => ({\n\t\t\t\t\tid: tc.id,\n\t\t\t\t\ttype: \"function\" as const,\n\t\t\t\t\tfunction: {\n\t\t\t\t\t\tname: tc.name,\n\t\t\t\t\t\targuments: JSON.stringify(tc.arguments),\n\t\t\t\t\t},\n\t\t\t\t}));\n\t\t\t\tconst reasoningDetails = toolCalls\n\t\t\t\t\t.filter((tc) => tc.thoughtSignature)\n\t\t\t\t\t.map((tc) => {\n\t\t\t\t\t\ttry {\n\t\t\t\t\t\t\treturn JSON.parse(tc.thoughtSignature!);\n\t\t\t\t\t\t} catch {\n\t\t\t\t\t\t\treturn null;\n\t\t\t\t\t\t}\n\t\t\t\t\t})\n\t\t\t\t\t.filter(Boolean);\n\t\t\t\tif (reasoningDetails.length > 0) {\n\t\t\t\t\t(assistantMsg as any).reasoning_details = reasoningDetails;\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Skip assistant messages that have no content and no tool calls.\n\t\t\t// Mistral explicitly requires \"either content or tool_calls, but not none\".\n\t\t\t// Other providers also don't accept empty assistant messages.\n\t\t\t// This handles aborted assistant responses that got no content.\n\t\t\tconst content = assistantMsg.content;\n\t\t\tconst hasContent =\n\t\t\t\tcontent !== null &&\n\t\t\t\tcontent !== undefined &&\n\t\t\t\t(typeof content === \"string\" ? content.length > 0 : content.length > 0);\n\t\t\tif (!hasContent && !assistantMsg.tool_calls) {\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tparams.push(assistantMsg);\n\t\t} else if (msg.role === \"toolResult\") {\n\t\t\tconst imageBlocks: Array<{ type: \"image_url\"; image_url: { url: string } }> = [];\n\t\t\tlet j = i;\n\n\t\t\tfor (; j < transformedMessages.length && transformedMessages[j].role === \"toolResult\"; j++) {\n\t\t\t\tconst toolMsg = transformedMessages[j] as ToolResultMessage;\n\n\t\t\t\t// Extract text and image content\n\t\t\t\tconst textResult = toolMsg.content\n\t\t\t\t\t.filter((c) => c.type === \"text\")\n\t\t\t\t\t.map((c) => (c as any).text)\n\t\t\t\t\t.join(\"\\n\");\n\t\t\t\tconst hasImages = toolMsg.content.some((c) => c.type === \"image\");\n\n\t\t\t\t// Always send tool result with text (or placeholder if only images)\n\t\t\t\tconst hasText = textResult.length > 0;\n\t\t\t\t// Some providers (e.g. Mistral) require the 'name' field in tool results\n\t\t\t\tconst toolResultMsg: ChatCompletionToolMessageParam = {\n\t\t\t\t\trole: \"tool\",\n\t\t\t\t\tcontent: sanitizeSurrogates(hasText ? textResult : \"(see attached image)\"),\n\t\t\t\t\ttool_call_id: toolMsg.toolCallId,\n\t\t\t\t};\n\t\t\t\tif (compat.requiresToolResultName && toolMsg.toolName) {\n\t\t\t\t\t(toolResultMsg as any).name = toolMsg.toolName;\n\t\t\t\t}\n\t\t\t\tparams.push(toolResultMsg);\n\n\t\t\t\tif (hasImages && model.input.includes(\"image\")) {\n\t\t\t\t\tfor (const block of toolMsg.content) {\n\t\t\t\t\t\tif (block.type === \"image\") {\n\t\t\t\t\t\t\timageBlocks.push({\n\t\t\t\t\t\t\t\ttype: \"image_url\",\n\t\t\t\t\t\t\t\timage_url: {\n\t\t\t\t\t\t\t\t\turl: `data:${(block as any).mimeType};base64,${(block as any).data}`,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ti = j - 1;\n\n\t\t\tif (imageBlocks.length > 0) {\n\t\t\t\tif (compat.requiresAssistantAfterToolResult) {\n\t\t\t\t\tparams.push({\n\t\t\t\t\t\trole: \"assistant\",\n\t\t\t\t\t\tcontent: \"I have processed the tool results.\",\n\t\t\t\t\t});\n\t\t\t\t}\n\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: [\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\ttype: \"text\",\n\t\t\t\t\t\t\ttext: \"Attached image(s) from tool result:\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t...imageBlocks,\n\t\t\t\t\t],\n\t\t\t\t});\n\t\t\t\tlastRole = \"user\";\n\t\t\t} else {\n\t\t\t\tlastRole = \"toolResult\";\n\t\t\t}\n\t\t\tcontinue;\n\t\t}\n\n\t\tlastRole = msg.role;\n\t}\n\n\treturn params;\n}\n\nfunction convertTools(tools: Tool[]): OpenAI.Chat.Completions.ChatCompletionTool[] {\n\treturn tools.map((tool) => ({\n\t\ttype: \"function\",\n\t\tfunction: {\n\t\t\tname: tool.name,\n\t\t\tdescription: tool.description,\n\t\t\tparameters: tool.parameters as any, // TypeBox already generates JSON Schema\n\t\t\tstrict: false, // Disable strict mode to allow optional parameters without null unions\n\t\t},\n\t}));\n}\n\nfunction mapStopReason(reason: ChatCompletionChunk.Choice[\"finish_reason\"]): StopReason {\n\tif (reason === null) return \"stop\";\n\tswitch (reason) {\n\t\tcase \"stop\":\n\t\t\treturn \"stop\";\n\t\tcase \"length\":\n\t\t\treturn \"length\";\n\t\tcase \"function_call\":\n\t\tcase \"tool_calls\":\n\t\t\treturn \"toolUse\";\n\t\tcase \"content_filter\":\n\t\t\treturn \"error\";\n\t\tdefault: {\n\t\t\tconst _exhaustive: never = reason;\n\t\t\tthrow new Error(`Unhandled stop reason: ${_exhaustive}`);\n\t\t}\n\t}\n}\n\n/**\n * Detect compatibility settings from provider and baseUrl for known providers.\n * Provider takes precedence over URL-based detection since it's explicitly configured.\n * Returns a fully resolved OpenAICompletionsCompat object with all fields set.\n */\nfunction detectCompat(model: Model<\"openai-completions\">): Required<OpenAICompletionsCompat> {\n\tconst provider = model.provider;\n\tconst baseUrl = model.baseUrl;\n\n\tconst isZai = provider === \"zai\" || baseUrl.includes(\"api.z.ai\");\n\n\tconst isNonStandard =\n\t\tprovider === \"cerebras\" ||\n\t\tbaseUrl.includes(\"cerebras.ai\") ||\n\t\tprovider === \"xai\" ||\n\t\tbaseUrl.includes(\"api.x.ai\") ||\n\t\tprovider === \"mistral\" ||\n\t\tbaseUrl.includes(\"mistral.ai\") ||\n\t\tbaseUrl.includes(\"chutes.ai\") ||\n\t\tbaseUrl.includes(\"deepseek.com\") ||\n\t\tisZai ||\n\t\tprovider === \"opencode\" ||\n\t\tbaseUrl.includes(\"opencode.ai\");\n\n\tconst useMaxTokens = provider === \"mistral\" || baseUrl.includes(\"mistral.ai\") || baseUrl.includes(\"chutes.ai\");\n\n\tconst isGrok = provider === \"xai\" || baseUrl.includes(\"api.x.ai\");\n\n\tconst isMistral = provider === \"mistral\" || baseUrl.includes(\"mistral.ai\");\n\n\treturn {\n\t\tsupportsStore: !isNonStandard,\n\t\tsupportsDeveloperRole: !isNonStandard,\n\t\tsupportsReasoningEffort: !isGrok && !isZai,\n\t\tsupportsUsageInStreaming: true,\n\t\tmaxTokensField: useMaxTokens ? \"max_tokens\" : \"max_completion_tokens\",\n\t\trequiresToolResultName: isMistral,\n\t\trequiresAssistantAfterToolResult: false, // Mistral no longer requires this as of Dec 2024\n\t\trequiresThinkingAsText: isMistral,\n\t\trequiresMistralToolIds: isMistral,\n\t\tthinkingFormat: isZai ? \"zai\" : \"openai\",\n\t\topenRouterRouting: {},\n\t\tvercelGatewayRouting: {},\n\t};\n}\n\n/**\n * Get resolved compatibility settings for a model.\n * Uses explicit model.compat if provided, otherwise auto-detects from provider/URL.\n */\nfunction getCompat(model: Model<\"openai-completions\">): Required<OpenAICompletionsCompat> {\n\tconst detected = detectCompat(model);\n\tif (!model.compat) return detected;\n\n\treturn {\n\t\tsupportsStore: model.compat.supportsStore ?? detected.supportsStore,\n\t\tsupportsDeveloperRole: model.compat.supportsDeveloperRole ?? detected.supportsDeveloperRole,\n\t\tsupportsReasoningEffort: model.compat.supportsReasoningEffort ?? detected.supportsReasoningEffort,\n\t\tsupportsUsageInStreaming: model.compat.supportsUsageInStreaming ?? detected.supportsUsageInStreaming,\n\t\tmaxTokensField: model.compat.maxTokensField ?? detected.maxTokensField,\n\t\trequiresToolResultName: model.compat.requiresToolResultName ?? detected.requiresToolResultName,\n\t\trequiresAssistantAfterToolResult:\n\t\t\tmodel.compat.requiresAssistantAfterToolResult ?? detected.requiresAssistantAfterToolResult,\n\t\trequiresThinkingAsText: model.compat.requiresThinkingAsText ?? detected.requiresThinkingAsText,\n\t\trequiresMistralToolIds: model.compat.requiresMistralToolIds ?? detected.requiresMistralToolIds,\n\t\tthinkingFormat: model.compat.thinkingFormat ?? detected.thinkingFormat,\n\t\topenRouterRouting: model.compat.openRouterRouting ?? {},\n\t\tvercelGatewayRouting: model.compat.vercelGatewayRouting ?? detected.vercelGatewayRouting,\n\t};\n}\n"]}
1
+ {"version":3,"file":"openai-completions.d.ts","sourceRoot":"","sources":["../../src/providers/openai-completions.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAMX,0BAA0B,EAE1B,MAAM,sCAAsC,CAAC;AAG9C,OAAO,KAAK,EAEX,OAAO,EAEP,KAAK,EACL,uBAAuB,EACvB,mBAAmB,EAEnB,cAAc,EACd,aAAa,EAMb,MAAM,aAAa,CAAC;AA4CrB,MAAM,WAAW,wBAAyB,SAAQ,aAAa;IAC9D,UAAU,CAAC,EAAE,MAAM,GAAG,MAAM,GAAG,UAAU,GAAG;QAAE,IAAI,EAAE,UAAU,CAAC;QAAC,QAAQ,EAAE;YAAE,IAAI,EAAE,MAAM,CAAA;SAAE,CAAA;KAAE,CAAC;IAC7F,eAAe,CAAC,EAAE,SAAS,GAAG,KAAK,GAAG,QAAQ,GAAG,MAAM,GAAG,OAAO,CAAC;CAClE;AAED,eAAO,MAAM,uBAAuB,EAAE,cAAc,CAAC,oBAAoB,EAAE,wBAAwB,CAqPlG,CAAC;AAEF,eAAO,MAAM,6BAA6B,EAAE,cAAc,CAAC,oBAAoB,EAAE,mBAAmB,CAmBnG,CAAC;AAkKF,wBAAgB,eAAe,CAC9B,KAAK,EAAE,KAAK,CAAC,oBAAoB,CAAC,EAClC,OAAO,EAAE,OAAO,EAChB,MAAM,EAAE,QAAQ,CAAC,uBAAuB,CAAC,GACvC,0BAA0B,EAAE,CAsO9B","sourcesContent":["import OpenAI from \"openai\";\nimport type {\n\tChatCompletionAssistantMessageParam,\n\tChatCompletionChunk,\n\tChatCompletionContentPart,\n\tChatCompletionContentPartImage,\n\tChatCompletionContentPartText,\n\tChatCompletionMessageParam,\n\tChatCompletionToolMessageParam,\n} from \"openai/resources/chat/completions.js\";\nimport { getEnvApiKey } from \"../env-api-keys.js\";\nimport { calculateCost, supportsXhigh } from \"../models.js\";\nimport type {\n\tAssistantMessage,\n\tContext,\n\tMessage,\n\tModel,\n\tOpenAICompletionsCompat,\n\tSimpleStreamOptions,\n\tStopReason,\n\tStreamFunction,\n\tStreamOptions,\n\tTextContent,\n\tThinkingContent,\n\tTool,\n\tToolCall,\n\tToolResultMessage,\n} from \"../types.js\";\nimport { AssistantMessageEventStream } from \"../utils/event-stream.js\";\nimport { parseStreamingJson } from \"../utils/json-parse.js\";\nimport { sanitizeSurrogates } from \"../utils/sanitize-unicode.js\";\nimport { buildBaseOptions, clampReasoning } from \"./simple-options.js\";\nimport { transformMessages } from \"./transform-messages.js\";\n\n/**\n * Normalize tool call ID for Mistral.\n * Mistral requires tool IDs to be exactly 9 alphanumeric characters (a-z, A-Z, 0-9).\n */\nfunction normalizeMistralToolId(id: string): string {\n\t// Remove non-alphanumeric characters\n\tlet normalized = id.replace(/[^a-zA-Z0-9]/g, \"\");\n\t// Mistral requires exactly 9 characters\n\tif (normalized.length < 9) {\n\t\t// Pad with deterministic characters based on original ID to ensure matching\n\t\tconst padding = \"ABCDEFGHI\";\n\t\tnormalized = normalized + padding.slice(0, 9 - normalized.length);\n\t} else if (normalized.length > 9) {\n\t\tnormalized = normalized.slice(0, 9);\n\t}\n\treturn normalized;\n}\n\n/**\n * Check if conversation messages contain tool calls or tool results.\n * This is needed because Anthropic (via proxy) requires the tools param\n * to be present when messages include tool_calls or tool role messages.\n */\nfunction hasToolHistory(messages: Message[]): boolean {\n\tfor (const msg of messages) {\n\t\tif (msg.role === \"toolResult\") {\n\t\t\treturn true;\n\t\t}\n\t\tif (msg.role === \"assistant\") {\n\t\t\tif (msg.content.some((block) => block.type === \"toolCall\")) {\n\t\t\t\treturn true;\n\t\t\t}\n\t\t}\n\t}\n\treturn false;\n}\n\nexport interface OpenAICompletionsOptions extends StreamOptions {\n\ttoolChoice?: \"auto\" | \"none\" | \"required\" | { type: \"function\"; function: { name: string } };\n\treasoningEffort?: \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n}\n\nexport const streamOpenAICompletions: StreamFunction<\"openai-completions\", OpenAICompletionsOptions> = (\n\tmodel: Model<\"openai-completions\">,\n\tcontext: Context,\n\toptions?: OpenAICompletionsOptions,\n): AssistantMessageEventStream => {\n\tconst stream = new AssistantMessageEventStream();\n\n\t(async () => {\n\t\tconst output: AssistantMessage = {\n\t\t\trole: \"assistant\",\n\t\t\tcontent: [],\n\t\t\tapi: model.api,\n\t\t\tprovider: model.provider,\n\t\t\tmodel: model.id,\n\t\t\tusage: {\n\t\t\t\tinput: 0,\n\t\t\t\toutput: 0,\n\t\t\t\tcacheRead: 0,\n\t\t\t\tcacheWrite: 0,\n\t\t\t\ttotalTokens: 0,\n\t\t\t\tcost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },\n\t\t\t},\n\t\t\tstopReason: \"stop\",\n\t\t\ttimestamp: Date.now(),\n\t\t};\n\n\t\ttry {\n\t\t\tconst apiKey = options?.apiKey || getEnvApiKey(model.provider) || \"\";\n\t\t\tconst client = createClient(model, context, apiKey, options?.headers);\n\t\t\tconst params = buildParams(model, context, options);\n\t\t\toptions?.onPayload?.(params);\n\t\t\tconst openaiStream = await client.chat.completions.create(params, { signal: options?.signal });\n\t\t\tstream.push({ type: \"start\", partial: output });\n\n\t\t\tlet currentBlock: TextContent | ThinkingContent | (ToolCall & { partialArgs?: string }) | null = null;\n\t\t\tconst blocks = output.content;\n\t\t\tconst blockIndex = () => blocks.length - 1;\n\t\t\tconst finishCurrentBlock = (block?: typeof currentBlock) => {\n\t\t\t\tif (block) {\n\t\t\t\t\tif (block.type === \"text\") {\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"text_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\tcontent: block.text,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t} else if (block.type === \"thinking\") {\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"thinking_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\tcontent: block.thinking,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t} else if (block.type === \"toolCall\") {\n\t\t\t\t\t\tblock.arguments = JSON.parse(block.partialArgs || \"{}\");\n\t\t\t\t\t\tdelete block.partialArgs;\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"toolcall_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\ttoolCall: block,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t};\n\n\t\t\tfor await (const chunk of openaiStream) {\n\t\t\t\tif (chunk.usage) {\n\t\t\t\t\tconst cachedTokens = chunk.usage.prompt_tokens_details?.cached_tokens || 0;\n\t\t\t\t\tconst reasoningTokens = chunk.usage.completion_tokens_details?.reasoning_tokens || 0;\n\t\t\t\t\tconst input = (chunk.usage.prompt_tokens || 0) - cachedTokens;\n\t\t\t\t\tconst outputTokens = (chunk.usage.completion_tokens || 0) + reasoningTokens;\n\t\t\t\t\toutput.usage = {\n\t\t\t\t\t\t// OpenAI includes cached tokens in prompt_tokens, so subtract to get non-cached input\n\t\t\t\t\t\tinput,\n\t\t\t\t\t\toutput: outputTokens,\n\t\t\t\t\t\tcacheRead: cachedTokens,\n\t\t\t\t\t\tcacheWrite: 0,\n\t\t\t\t\t\t// Compute totalTokens ourselves since we add reasoning_tokens to output\n\t\t\t\t\t\t// and some providers (e.g., Groq) don't include them in total_tokens\n\t\t\t\t\t\ttotalTokens: input + outputTokens + cachedTokens,\n\t\t\t\t\t\tcost: {\n\t\t\t\t\t\t\tinput: 0,\n\t\t\t\t\t\t\toutput: 0,\n\t\t\t\t\t\t\tcacheRead: 0,\n\t\t\t\t\t\t\tcacheWrite: 0,\n\t\t\t\t\t\t\ttotal: 0,\n\t\t\t\t\t\t},\n\t\t\t\t\t};\n\t\t\t\t\tcalculateCost(model, output.usage);\n\t\t\t\t}\n\n\t\t\t\tconst choice = chunk.choices[0];\n\t\t\t\tif (!choice) continue;\n\n\t\t\t\tif (choice.finish_reason) {\n\t\t\t\t\toutput.stopReason = mapStopReason(choice.finish_reason);\n\t\t\t\t}\n\n\t\t\t\tif (choice.delta) {\n\t\t\t\t\tif (\n\t\t\t\t\t\tchoice.delta.content !== null &&\n\t\t\t\t\t\tchoice.delta.content !== undefined &&\n\t\t\t\t\t\tchoice.delta.content.length > 0\n\t\t\t\t\t) {\n\t\t\t\t\t\tif (!currentBlock || currentBlock.type !== \"text\") {\n\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\tcurrentBlock = { type: \"text\", text: \"\" };\n\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\tstream.push({ type: \"text_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif (currentBlock.type === \"text\") {\n\t\t\t\t\t\t\tcurrentBlock.text += choice.delta.content;\n\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\ttype: \"text_delta\",\n\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\tdelta: choice.delta.content,\n\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t// Some endpoints return reasoning in reasoning_content (llama.cpp),\n\t\t\t\t\t// or reasoning (other openai compatible endpoints)\n\t\t\t\t\t// Use the first non-empty reasoning field to avoid duplication\n\t\t\t\t\t// (e.g., chutes.ai returns both reasoning_content and reasoning with same content)\n\t\t\t\t\tconst reasoningFields = [\"reasoning_content\", \"reasoning\", \"reasoning_text\"];\n\t\t\t\t\tlet foundReasoningField: string | null = null;\n\t\t\t\t\tfor (const field of reasoningFields) {\n\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t(choice.delta as any)[field] !== null &&\n\t\t\t\t\t\t\t(choice.delta as any)[field] !== undefined &&\n\t\t\t\t\t\t\t(choice.delta as any)[field].length > 0\n\t\t\t\t\t\t) {\n\t\t\t\t\t\t\tif (!foundReasoningField) {\n\t\t\t\t\t\t\t\tfoundReasoningField = field;\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif (foundReasoningField) {\n\t\t\t\t\t\tif (!currentBlock || currentBlock.type !== \"thinking\") {\n\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\tcurrentBlock = {\n\t\t\t\t\t\t\t\ttype: \"thinking\",\n\t\t\t\t\t\t\t\tthinking: \"\",\n\t\t\t\t\t\t\t\tthinkingSignature: foundReasoningField,\n\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\tstream.push({ type: \"thinking_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif (currentBlock.type === \"thinking\") {\n\t\t\t\t\t\t\tconst delta = (choice.delta as any)[foundReasoningField];\n\t\t\t\t\t\t\tcurrentBlock.thinking += delta;\n\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\ttype: \"thinking_delta\",\n\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\tdelta,\n\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif (choice?.delta?.tool_calls) {\n\t\t\t\t\t\tfor (const toolCall of choice.delta.tool_calls) {\n\t\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t\t!currentBlock ||\n\t\t\t\t\t\t\t\tcurrentBlock.type !== \"toolCall\" ||\n\t\t\t\t\t\t\t\t(toolCall.id && currentBlock.id !== toolCall.id)\n\t\t\t\t\t\t\t) {\n\t\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\t\tcurrentBlock = {\n\t\t\t\t\t\t\t\t\ttype: \"toolCall\",\n\t\t\t\t\t\t\t\t\tid: toolCall.id || \"\",\n\t\t\t\t\t\t\t\t\tname: toolCall.function?.name || \"\",\n\t\t\t\t\t\t\t\t\targuments: {},\n\t\t\t\t\t\t\t\t\tpartialArgs: \"\",\n\t\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\t\tstream.push({ type: \"toolcall_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif (currentBlock.type === \"toolCall\") {\n\t\t\t\t\t\t\t\tif (toolCall.id) currentBlock.id = toolCall.id;\n\t\t\t\t\t\t\t\tif (toolCall.function?.name) currentBlock.name = toolCall.function.name;\n\t\t\t\t\t\t\t\tlet delta = \"\";\n\t\t\t\t\t\t\t\tif (toolCall.function?.arguments) {\n\t\t\t\t\t\t\t\t\tdelta = toolCall.function.arguments;\n\t\t\t\t\t\t\t\t\tcurrentBlock.partialArgs += toolCall.function.arguments;\n\t\t\t\t\t\t\t\t\tcurrentBlock.arguments = parseStreamingJson(currentBlock.partialArgs);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\t\ttype: \"toolcall_delta\",\n\t\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\t\tdelta,\n\t\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tconst reasoningDetails = (choice.delta as any).reasoning_details;\n\t\t\t\t\tif (reasoningDetails && Array.isArray(reasoningDetails)) {\n\t\t\t\t\t\tfor (const detail of reasoningDetails) {\n\t\t\t\t\t\t\tif (detail.type === \"reasoning.encrypted\" && detail.id && detail.data) {\n\t\t\t\t\t\t\t\tconst matchingToolCall = output.content.find(\n\t\t\t\t\t\t\t\t\t(b) => b.type === \"toolCall\" && b.id === detail.id,\n\t\t\t\t\t\t\t\t) as ToolCall | undefined;\n\t\t\t\t\t\t\t\tif (matchingToolCall) {\n\t\t\t\t\t\t\t\t\tmatchingToolCall.thoughtSignature = JSON.stringify(detail);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfinishCurrentBlock(currentBlock);\n\n\t\t\tif (options?.signal?.aborted) {\n\t\t\t\tthrow new Error(\"Request was aborted\");\n\t\t\t}\n\n\t\t\tif (output.stopReason === \"aborted\" || output.stopReason === \"error\") {\n\t\t\t\tthrow new Error(\"An unknown error occurred\");\n\t\t\t}\n\n\t\t\tstream.push({ type: \"done\", reason: output.stopReason, message: output });\n\t\t\tstream.end();\n\t\t} catch (error) {\n\t\t\tfor (const block of output.content) delete (block as any).index;\n\t\t\toutput.stopReason = options?.signal?.aborted ? \"aborted\" : \"error\";\n\t\t\toutput.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);\n\t\t\t// Some providers via OpenRouter give additional information in this field.\n\t\t\tconst rawMetadata = (error as any)?.error?.metadata?.raw;\n\t\t\tif (rawMetadata) output.errorMessage += `\\n${rawMetadata}`;\n\t\t\tstream.push({ type: \"error\", reason: output.stopReason, error: output });\n\t\t\tstream.end();\n\t\t}\n\t})();\n\n\treturn stream;\n};\n\nexport const streamSimpleOpenAICompletions: StreamFunction<\"openai-completions\", SimpleStreamOptions> = (\n\tmodel: Model<\"openai-completions\">,\n\tcontext: Context,\n\toptions?: SimpleStreamOptions,\n): AssistantMessageEventStream => {\n\tconst apiKey = options?.apiKey || getEnvApiKey(model.provider);\n\tif (!apiKey) {\n\t\tthrow new Error(`No API key for provider: ${model.provider}`);\n\t}\n\n\tconst base = buildBaseOptions(model, options, apiKey);\n\tconst reasoningEffort = supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning);\n\tconst toolChoice = (options as OpenAICompletionsOptions | undefined)?.toolChoice;\n\n\treturn streamOpenAICompletions(model, context, {\n\t\t...base,\n\t\treasoningEffort,\n\t\ttoolChoice,\n\t} satisfies OpenAICompletionsOptions);\n};\n\nfunction createClient(\n\tmodel: Model<\"openai-completions\">,\n\tcontext: Context,\n\tapiKey?: string,\n\toptionsHeaders?: Record<string, string>,\n) {\n\tif (!apiKey) {\n\t\tif (!process.env.OPENAI_API_KEY) {\n\t\t\tthrow new Error(\n\t\t\t\t\"OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.\",\n\t\t\t);\n\t\t}\n\t\tapiKey = process.env.OPENAI_API_KEY;\n\t}\n\n\tconst headers = { ...model.headers };\n\tif (model.provider === \"github-copilot\") {\n\t\t// Copilot expects X-Initiator to indicate whether the request is user-initiated\n\t\t// or agent-initiated (e.g. follow-up after assistant/tool messages). If there is\n\t\t// no prior message, default to user-initiated.\n\t\tconst messages = context.messages || [];\n\t\tconst lastMessage = messages[messages.length - 1];\n\t\tconst isAgentCall = lastMessage ? lastMessage.role !== \"user\" : false;\n\t\theaders[\"X-Initiator\"] = isAgentCall ? \"agent\" : \"user\";\n\t\theaders[\"Openai-Intent\"] = \"conversation-edits\";\n\n\t\t// Copilot requires this header when sending images\n\t\tconst hasImages = messages.some((msg) => {\n\t\t\tif (msg.role === \"user\" && Array.isArray(msg.content)) {\n\t\t\t\treturn msg.content.some((c) => c.type === \"image\");\n\t\t\t}\n\t\t\tif (msg.role === \"toolResult\" && Array.isArray(msg.content)) {\n\t\t\t\treturn msg.content.some((c) => c.type === \"image\");\n\t\t\t}\n\t\t\treturn false;\n\t\t});\n\t\tif (hasImages) {\n\t\t\theaders[\"Copilot-Vision-Request\"] = \"true\";\n\t\t}\n\t}\n\n\t// Merge options headers last so they can override defaults\n\tif (optionsHeaders) {\n\t\tObject.assign(headers, optionsHeaders);\n\t}\n\n\treturn new OpenAI({\n\t\tapiKey,\n\t\tbaseURL: model.baseUrl,\n\t\tdangerouslyAllowBrowser: true,\n\t\tdefaultHeaders: headers,\n\t});\n}\n\nfunction buildParams(model: Model<\"openai-completions\">, context: Context, options?: OpenAICompletionsOptions) {\n\tconst compat = getCompat(model);\n\tconst messages = convertMessages(model, context, compat);\n\tmaybeAddOpenRouterAnthropicCacheControl(model, messages);\n\n\tconst params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {\n\t\tmodel: model.id,\n\t\tmessages,\n\t\tstream: true,\n\t};\n\n\tif (compat.supportsUsageInStreaming !== false) {\n\t\t(params as any).stream_options = { include_usage: true };\n\t}\n\n\tif (compat.supportsStore) {\n\t\tparams.store = false;\n\t}\n\n\tif (options?.maxTokens) {\n\t\tif (compat.maxTokensField === \"max_tokens\") {\n\t\t\t(params as any).max_tokens = options.maxTokens;\n\t\t} else {\n\t\t\tparams.max_completion_tokens = options.maxTokens;\n\t\t}\n\t}\n\n\tif (options?.temperature !== undefined) {\n\t\tparams.temperature = options.temperature;\n\t}\n\n\tif (context.tools) {\n\t\tparams.tools = convertTools(context.tools);\n\t} else if (hasToolHistory(context.messages)) {\n\t\t// Anthropic (via LiteLLM/proxy) requires tools param when conversation has tool_calls/tool_results\n\t\tparams.tools = [];\n\t}\n\n\tif (options?.toolChoice) {\n\t\tparams.tool_choice = options.toolChoice;\n\t}\n\n\tif (compat.thinkingFormat === \"zai\" && model.reasoning) {\n\t\t// Z.ai uses binary thinking: { type: \"enabled\" | \"disabled\" }\n\t\t// Must explicitly disable since z.ai defaults to thinking enabled\n\t\t(params as any).thinking = { type: options?.reasoningEffort ? \"enabled\" : \"disabled\" };\n\t} else if (compat.thinkingFormat === \"qwen\" && model.reasoning) {\n\t\t// Qwen uses enable_thinking: boolean\n\t\t(params as any).enable_thinking = !!options?.reasoningEffort;\n\t} else if (options?.reasoningEffort && model.reasoning && compat.supportsReasoningEffort) {\n\t\t// OpenAI-style reasoning_effort\n\t\tparams.reasoning_effort = options.reasoningEffort;\n\t}\n\n\t// OpenRouter provider routing preferences\n\tif (model.baseUrl.includes(\"openrouter.ai\") && model.compat?.openRouterRouting) {\n\t\t(params as any).provider = model.compat.openRouterRouting;\n\t}\n\n\t// Vercel AI Gateway provider routing preferences\n\tif (model.baseUrl.includes(\"ai-gateway.vercel.sh\") && model.compat?.vercelGatewayRouting) {\n\t\tconst routing = model.compat.vercelGatewayRouting;\n\t\tif (routing.only || routing.order) {\n\t\t\tconst gatewayOptions: Record<string, string[]> = {};\n\t\t\tif (routing.only) gatewayOptions.only = routing.only;\n\t\t\tif (routing.order) gatewayOptions.order = routing.order;\n\t\t\t(params as any).providerOptions = { gateway: gatewayOptions };\n\t\t}\n\t}\n\n\treturn params;\n}\n\nfunction maybeAddOpenRouterAnthropicCacheControl(\n\tmodel: Model<\"openai-completions\">,\n\tmessages: ChatCompletionMessageParam[],\n): void {\n\tif (model.provider !== \"openrouter\" || !model.id.startsWith(\"anthropic/\")) return;\n\n\t// Anthropic-style caching requires cache_control on a text part. Add a breakpoint\n\t// on the last user/assistant message (walking backwards until we find text content).\n\tfor (let i = messages.length - 1; i >= 0; i--) {\n\t\tconst msg = messages[i];\n\t\tif (msg.role !== \"user\" && msg.role !== \"assistant\") continue;\n\n\t\tconst content = msg.content;\n\t\tif (typeof content === \"string\") {\n\t\t\tmsg.content = [\n\t\t\t\tObject.assign({ type: \"text\" as const, text: content }, { cache_control: { type: \"ephemeral\" } }),\n\t\t\t];\n\t\t\treturn;\n\t\t}\n\n\t\tif (!Array.isArray(content)) continue;\n\n\t\t// Find last text part and add cache_control\n\t\tfor (let j = content.length - 1; j >= 0; j--) {\n\t\t\tconst part = content[j];\n\t\t\tif (part?.type === \"text\") {\n\t\t\t\tObject.assign(part, { cache_control: { type: \"ephemeral\" } });\n\t\t\t\treturn;\n\t\t\t}\n\t\t}\n\t}\n}\n\nexport function convertMessages(\n\tmodel: Model<\"openai-completions\">,\n\tcontext: Context,\n\tcompat: Required<OpenAICompletionsCompat>,\n): ChatCompletionMessageParam[] {\n\tconst params: ChatCompletionMessageParam[] = [];\n\n\tconst normalizeToolCallId = (id: string): string => {\n\t\tif (compat.requiresMistralToolIds) return normalizeMistralToolId(id);\n\n\t\t// Handle pipe-separated IDs from OpenAI Responses API\n\t\t// Format: {call_id}|{id} where {id} can be 400+ chars with special chars (+, /, =)\n\t\t// These come from providers like github-copilot, openai-codex, opencode\n\t\t// Extract just the call_id part and normalize it\n\t\tif (id.includes(\"|\")) {\n\t\t\tconst [callId] = id.split(\"|\");\n\t\t\t// Sanitize to allowed chars and truncate to 40 chars (OpenAI limit)\n\t\t\treturn callId.replace(/[^a-zA-Z0-9_-]/g, \"_\").slice(0, 40);\n\t\t}\n\n\t\tif (model.provider === \"openai\") return id.length > 40 ? id.slice(0, 40) : id;\n\t\t// Copilot Claude models route to Claude backend which requires Anthropic ID format\n\t\tif (model.provider === \"github-copilot\" && model.id.toLowerCase().includes(\"claude\")) {\n\t\t\treturn id.replace(/[^a-zA-Z0-9_-]/g, \"_\").slice(0, 64);\n\t\t}\n\t\treturn id;\n\t};\n\n\tconst transformedMessages = transformMessages(context.messages, model, (id) => normalizeToolCallId(id));\n\n\tif (context.systemPrompt) {\n\t\tconst useDeveloperRole = model.reasoning && compat.supportsDeveloperRole;\n\t\tconst role = useDeveloperRole ? \"developer\" : \"system\";\n\t\tparams.push({ role: role, content: sanitizeSurrogates(context.systemPrompt) });\n\t}\n\n\tlet lastRole: string | null = null;\n\n\tfor (let i = 0; i < transformedMessages.length; i++) {\n\t\tconst msg = transformedMessages[i];\n\t\t// Some providers (e.g. Mistral/Devstral) don't allow user messages directly after tool results\n\t\t// Insert a synthetic assistant message to bridge the gap\n\t\tif (compat.requiresAssistantAfterToolResult && lastRole === \"toolResult\" && msg.role === \"user\") {\n\t\t\tparams.push({\n\t\t\t\trole: \"assistant\",\n\t\t\t\tcontent: \"I have processed the tool results.\",\n\t\t\t});\n\t\t}\n\n\t\tif (msg.role === \"user\") {\n\t\t\tif (typeof msg.content === \"string\") {\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: sanitizeSurrogates(msg.content),\n\t\t\t\t});\n\t\t\t} else {\n\t\t\t\tconst content: ChatCompletionContentPart[] = msg.content.map((item): ChatCompletionContentPart => {\n\t\t\t\t\tif (item.type === \"text\") {\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\ttype: \"text\",\n\t\t\t\t\t\t\ttext: sanitizeSurrogates(item.text),\n\t\t\t\t\t\t} satisfies ChatCompletionContentPartText;\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\ttype: \"image_url\",\n\t\t\t\t\t\t\timage_url: {\n\t\t\t\t\t\t\t\turl: `data:${item.mimeType};base64,${item.data}`,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t} satisfies ChatCompletionContentPartImage;\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t\tconst filteredContent = !model.input.includes(\"image\")\n\t\t\t\t\t? content.filter((c) => c.type !== \"image_url\")\n\t\t\t\t\t: content;\n\t\t\t\tif (filteredContent.length === 0) continue;\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: filteredContent,\n\t\t\t\t});\n\t\t\t}\n\t\t} else if (msg.role === \"assistant\") {\n\t\t\t// Some providers (e.g. Mistral) don't accept null content, use empty string instead\n\t\t\tconst assistantMsg: ChatCompletionAssistantMessageParam = {\n\t\t\t\trole: \"assistant\",\n\t\t\t\tcontent: compat.requiresAssistantAfterToolResult ? \"\" : null,\n\t\t\t};\n\n\t\t\tconst textBlocks = msg.content.filter((b) => b.type === \"text\") as TextContent[];\n\t\t\t// Filter out empty text blocks to avoid API validation errors\n\t\t\tconst nonEmptyTextBlocks = textBlocks.filter((b) => b.text && b.text.trim().length > 0);\n\t\t\tif (nonEmptyTextBlocks.length > 0) {\n\t\t\t\t// GitHub Copilot requires assistant content as a string, not an array.\n\t\t\t\t// Sending as array causes Claude models to re-answer all previous prompts.\n\t\t\t\tif (model.provider === \"github-copilot\") {\n\t\t\t\t\tassistantMsg.content = nonEmptyTextBlocks.map((b) => sanitizeSurrogates(b.text)).join(\"\");\n\t\t\t\t} else {\n\t\t\t\t\tassistantMsg.content = nonEmptyTextBlocks.map((b) => {\n\t\t\t\t\t\treturn { type: \"text\", text: sanitizeSurrogates(b.text) };\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Handle thinking blocks\n\t\t\tconst thinkingBlocks = msg.content.filter((b) => b.type === \"thinking\") as ThinkingContent[];\n\t\t\t// Filter out empty thinking blocks to avoid API validation errors\n\t\t\tconst nonEmptyThinkingBlocks = thinkingBlocks.filter((b) => b.thinking && b.thinking.trim().length > 0);\n\t\t\tif (nonEmptyThinkingBlocks.length > 0) {\n\t\t\t\tif (compat.requiresThinkingAsText) {\n\t\t\t\t\t// Convert thinking blocks to plain text (no tags to avoid model mimicking them)\n\t\t\t\t\tconst thinkingText = nonEmptyThinkingBlocks.map((b) => b.thinking).join(\"\\n\\n\");\n\t\t\t\t\tconst textContent = assistantMsg.content as Array<{ type: \"text\"; text: string }> | null;\n\t\t\t\t\tif (textContent) {\n\t\t\t\t\t\ttextContent.unshift({ type: \"text\", text: thinkingText });\n\t\t\t\t\t} else {\n\t\t\t\t\t\tassistantMsg.content = [{ type: \"text\", text: thinkingText }];\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// Use the signature from the first thinking block if available (for llama.cpp server + gpt-oss)\n\t\t\t\t\tconst signature = nonEmptyThinkingBlocks[0].thinkingSignature;\n\t\t\t\t\tif (signature && signature.length > 0) {\n\t\t\t\t\t\t(assistantMsg as any)[signature] = nonEmptyThinkingBlocks.map((b) => b.thinking).join(\"\\n\");\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tconst toolCalls = msg.content.filter((b) => b.type === \"toolCall\") as ToolCall[];\n\t\t\tif (toolCalls.length > 0) {\n\t\t\t\tassistantMsg.tool_calls = toolCalls.map((tc) => ({\n\t\t\t\t\tid: tc.id,\n\t\t\t\t\ttype: \"function\" as const,\n\t\t\t\t\tfunction: {\n\t\t\t\t\t\tname: tc.name,\n\t\t\t\t\t\targuments: JSON.stringify(tc.arguments),\n\t\t\t\t\t},\n\t\t\t\t}));\n\t\t\t\tconst reasoningDetails = toolCalls\n\t\t\t\t\t.filter((tc) => tc.thoughtSignature)\n\t\t\t\t\t.map((tc) => {\n\t\t\t\t\t\ttry {\n\t\t\t\t\t\t\treturn JSON.parse(tc.thoughtSignature!);\n\t\t\t\t\t\t} catch {\n\t\t\t\t\t\t\treturn null;\n\t\t\t\t\t\t}\n\t\t\t\t\t})\n\t\t\t\t\t.filter(Boolean);\n\t\t\t\tif (reasoningDetails.length > 0) {\n\t\t\t\t\t(assistantMsg as any).reasoning_details = reasoningDetails;\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Skip assistant messages that have no content and no tool calls.\n\t\t\t// Mistral explicitly requires \"either content or tool_calls, but not none\".\n\t\t\t// Other providers also don't accept empty assistant messages.\n\t\t\t// This handles aborted assistant responses that got no content.\n\t\t\tconst content = assistantMsg.content;\n\t\t\tconst hasContent =\n\t\t\t\tcontent !== null &&\n\t\t\t\tcontent !== undefined &&\n\t\t\t\t(typeof content === \"string\" ? content.length > 0 : content.length > 0);\n\t\t\tif (!hasContent && !assistantMsg.tool_calls) {\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tparams.push(assistantMsg);\n\t\t} else if (msg.role === \"toolResult\") {\n\t\t\tconst imageBlocks: Array<{ type: \"image_url\"; image_url: { url: string } }> = [];\n\t\t\tlet j = i;\n\n\t\t\tfor (; j < transformedMessages.length && transformedMessages[j].role === \"toolResult\"; j++) {\n\t\t\t\tconst toolMsg = transformedMessages[j] as ToolResultMessage;\n\n\t\t\t\t// Extract text and image content\n\t\t\t\tconst textResult = toolMsg.content\n\t\t\t\t\t.filter((c) => c.type === \"text\")\n\t\t\t\t\t.map((c) => (c as any).text)\n\t\t\t\t\t.join(\"\\n\");\n\t\t\t\tconst hasImages = toolMsg.content.some((c) => c.type === \"image\");\n\n\t\t\t\t// Always send tool result with text (or placeholder if only images)\n\t\t\t\tconst hasText = textResult.length > 0;\n\t\t\t\t// Some providers (e.g. Mistral) require the 'name' field in tool results\n\t\t\t\tconst toolResultMsg: ChatCompletionToolMessageParam = {\n\t\t\t\t\trole: \"tool\",\n\t\t\t\t\tcontent: sanitizeSurrogates(hasText ? textResult : \"(see attached image)\"),\n\t\t\t\t\ttool_call_id: toolMsg.toolCallId,\n\t\t\t\t};\n\t\t\t\tif (compat.requiresToolResultName && toolMsg.toolName) {\n\t\t\t\t\t(toolResultMsg as any).name = toolMsg.toolName;\n\t\t\t\t}\n\t\t\t\tparams.push(toolResultMsg);\n\n\t\t\t\tif (hasImages && model.input.includes(\"image\")) {\n\t\t\t\t\tfor (const block of toolMsg.content) {\n\t\t\t\t\t\tif (block.type === \"image\") {\n\t\t\t\t\t\t\timageBlocks.push({\n\t\t\t\t\t\t\t\ttype: \"image_url\",\n\t\t\t\t\t\t\t\timage_url: {\n\t\t\t\t\t\t\t\t\turl: `data:${(block as any).mimeType};base64,${(block as any).data}`,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ti = j - 1;\n\n\t\t\tif (imageBlocks.length > 0) {\n\t\t\t\tif (compat.requiresAssistantAfterToolResult) {\n\t\t\t\t\tparams.push({\n\t\t\t\t\t\trole: \"assistant\",\n\t\t\t\t\t\tcontent: \"I have processed the tool results.\",\n\t\t\t\t\t});\n\t\t\t\t}\n\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: [\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\ttype: \"text\",\n\t\t\t\t\t\t\ttext: \"Attached image(s) from tool result:\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t...imageBlocks,\n\t\t\t\t\t],\n\t\t\t\t});\n\t\t\t\tlastRole = \"user\";\n\t\t\t} else {\n\t\t\t\tlastRole = \"toolResult\";\n\t\t\t}\n\t\t\tcontinue;\n\t\t}\n\n\t\tlastRole = msg.role;\n\t}\n\n\treturn params;\n}\n\nfunction convertTools(tools: Tool[]): OpenAI.Chat.Completions.ChatCompletionTool[] {\n\treturn tools.map((tool) => ({\n\t\ttype: \"function\",\n\t\tfunction: {\n\t\t\tname: tool.name,\n\t\t\tdescription: tool.description,\n\t\t\tparameters: tool.parameters as any, // TypeBox already generates JSON Schema\n\t\t\tstrict: false, // Disable strict mode to allow optional parameters without null unions\n\t\t},\n\t}));\n}\n\nfunction mapStopReason(reason: ChatCompletionChunk.Choice[\"finish_reason\"]): StopReason {\n\tif (reason === null) return \"stop\";\n\tswitch (reason) {\n\t\tcase \"stop\":\n\t\t\treturn \"stop\";\n\t\tcase \"length\":\n\t\t\treturn \"length\";\n\t\tcase \"function_call\":\n\t\tcase \"tool_calls\":\n\t\t\treturn \"toolUse\";\n\t\tcase \"content_filter\":\n\t\t\treturn \"error\";\n\t\tdefault: {\n\t\t\tconst _exhaustive: never = reason;\n\t\t\tthrow new Error(`Unhandled stop reason: ${_exhaustive}`);\n\t\t}\n\t}\n}\n\n/**\n * Detect compatibility settings from provider and baseUrl for known providers.\n * Provider takes precedence over URL-based detection since it's explicitly configured.\n * Returns a fully resolved OpenAICompletionsCompat object with all fields set.\n */\nfunction detectCompat(model: Model<\"openai-completions\">): Required<OpenAICompletionsCompat> {\n\tconst provider = model.provider;\n\tconst baseUrl = model.baseUrl;\n\n\tconst isZai = provider === \"zai\" || baseUrl.includes(\"api.z.ai\");\n\n\tconst isNonStandard =\n\t\tprovider === \"cerebras\" ||\n\t\tbaseUrl.includes(\"cerebras.ai\") ||\n\t\tprovider === \"xai\" ||\n\t\tbaseUrl.includes(\"api.x.ai\") ||\n\t\tprovider === \"mistral\" ||\n\t\tbaseUrl.includes(\"mistral.ai\") ||\n\t\tbaseUrl.includes(\"chutes.ai\") ||\n\t\tbaseUrl.includes(\"deepseek.com\") ||\n\t\tisZai ||\n\t\tprovider === \"opencode\" ||\n\t\tbaseUrl.includes(\"opencode.ai\");\n\n\tconst useMaxTokens = provider === \"mistral\" || baseUrl.includes(\"mistral.ai\") || baseUrl.includes(\"chutes.ai\");\n\n\tconst isGrok = provider === \"xai\" || baseUrl.includes(\"api.x.ai\");\n\n\tconst isMistral = provider === \"mistral\" || baseUrl.includes(\"mistral.ai\");\n\n\treturn {\n\t\tsupportsStore: !isNonStandard,\n\t\tsupportsDeveloperRole: !isNonStandard,\n\t\tsupportsReasoningEffort: !isGrok && !isZai,\n\t\tsupportsUsageInStreaming: true,\n\t\tmaxTokensField: useMaxTokens ? \"max_tokens\" : \"max_completion_tokens\",\n\t\trequiresToolResultName: isMistral,\n\t\trequiresAssistantAfterToolResult: false, // Mistral no longer requires this as of Dec 2024\n\t\trequiresThinkingAsText: isMistral,\n\t\trequiresMistralToolIds: isMistral,\n\t\tthinkingFormat: isZai ? \"zai\" : \"openai\",\n\t\topenRouterRouting: {},\n\t\tvercelGatewayRouting: {},\n\t};\n}\n\n/**\n * Get resolved compatibility settings for a model.\n * Uses explicit model.compat if provided, otherwise auto-detects from provider/URL.\n */\nfunction getCompat(model: Model<\"openai-completions\">): Required<OpenAICompletionsCompat> {\n\tconst detected = detectCompat(model);\n\tif (!model.compat) return detected;\n\n\treturn {\n\t\tsupportsStore: model.compat.supportsStore ?? detected.supportsStore,\n\t\tsupportsDeveloperRole: model.compat.supportsDeveloperRole ?? detected.supportsDeveloperRole,\n\t\tsupportsReasoningEffort: model.compat.supportsReasoningEffort ?? detected.supportsReasoningEffort,\n\t\tsupportsUsageInStreaming: model.compat.supportsUsageInStreaming ?? detected.supportsUsageInStreaming,\n\t\tmaxTokensField: model.compat.maxTokensField ?? detected.maxTokensField,\n\t\trequiresToolResultName: model.compat.requiresToolResultName ?? detected.requiresToolResultName,\n\t\trequiresAssistantAfterToolResult:\n\t\t\tmodel.compat.requiresAssistantAfterToolResult ?? detected.requiresAssistantAfterToolResult,\n\t\trequiresThinkingAsText: model.compat.requiresThinkingAsText ?? detected.requiresThinkingAsText,\n\t\trequiresMistralToolIds: model.compat.requiresMistralToolIds ?? detected.requiresMistralToolIds,\n\t\tthinkingFormat: model.compat.thinkingFormat ?? detected.thinkingFormat,\n\t\topenRouterRouting: model.compat.openRouterRouting ?? {},\n\t\tvercelGatewayRouting: model.compat.vercelGatewayRouting ?? detected.vercelGatewayRouting,\n\t};\n}\n"]}
@@ -362,6 +362,10 @@ function buildParams(model, context, options) {
362
362
  // Must explicitly disable since z.ai defaults to thinking enabled
363
363
  params.thinking = { type: options?.reasoningEffort ? "enabled" : "disabled" };
364
364
  }
365
+ else if (compat.thinkingFormat === "qwen" && model.reasoning) {
366
+ // Qwen uses enable_thinking: boolean
367
+ params.enable_thinking = !!options?.reasoningEffort;
368
+ }
365
369
  else if (options?.reasoningEffort && model.reasoning && compat.supportsReasoningEffort) {
366
370
  // OpenAI-style reasoning_effort
367
371
  params.reasoning_effort = options.reasoningEffort;
@@ -1 +1 @@
1
- {"version":3,"file":"openai-completions.js","sourceRoot":"","sources":["../../src/providers/openai-completions.ts"],"names":[],"mappings":"AAAA,OAAO,MAAM,MAAM,QAAQ,CAAC;AAU5B,OAAO,EAAE,YAAY,EAAE,MAAM,oBAAoB,CAAC;AAClD,OAAO,EAAE,aAAa,EAAE,aAAa,EAAE,MAAM,cAAc,CAAC;AAiB5D,OAAO,EAAE,2BAA2B,EAAE,MAAM,0BAA0B,CAAC;AACvE,OAAO,EAAE,kBAAkB,EAAE,MAAM,wBAAwB,CAAC;AAC5D,OAAO,EAAE,kBAAkB,EAAE,MAAM,8BAA8B,CAAC;AAClE,OAAO,EAAE,gBAAgB,EAAE,cAAc,EAAE,MAAM,qBAAqB,CAAC;AACvE,OAAO,EAAE,iBAAiB,EAAE,MAAM,yBAAyB,CAAC;AAE5D;;;GAGG;AACH,SAAS,sBAAsB,CAAC,EAAU,EAAU;IACnD,qCAAqC;IACrC,IAAI,UAAU,GAAG,EAAE,CAAC,OAAO,CAAC,eAAe,EAAE,EAAE,CAAC,CAAC;IACjD,wCAAwC;IACxC,IAAI,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;QAC3B,4EAA4E;QAC5E,MAAM,OAAO,GAAG,WAAW,CAAC;QAC5B,UAAU,GAAG,UAAU,GAAG,OAAO,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,GAAG,UAAU,CAAC,MAAM,CAAC,CAAC;IACnE,CAAC;SAAM,IAAI,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;QAClC,UAAU,GAAG,UAAU,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;IACrC,CAAC;IACD,OAAO,UAAU,CAAC;AAAA,CAClB;AAED;;;;GAIG;AACH,SAAS,cAAc,CAAC,QAAmB,EAAW;IACrD,KAAK,MAAM,GAAG,IAAI,QAAQ,EAAE,CAAC;QAC5B,IAAI,GAAG,CAAC,IAAI,KAAK,YAAY,EAAE,CAAC;YAC/B,OAAO,IAAI,CAAC;QACb,CAAC;QACD,IAAI,GAAG,CAAC,IAAI,KAAK,WAAW,EAAE,CAAC;YAC9B,IAAI,GAAG,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,KAAK,EAAE,EAAE,CAAC,KAAK,CAAC,IAAI,KAAK,UAAU,CAAC,EAAE,CAAC;gBAC5D,OAAO,IAAI,CAAC;YACb,CAAC;QACF,CAAC;IACF,CAAC;IACD,OAAO,KAAK,CAAC;AAAA,CACb;AAOD,MAAM,CAAC,MAAM,uBAAuB,GAAmE,CACtG,KAAkC,EAClC,OAAgB,EAChB,OAAkC,EACJ,EAAE,CAAC;IACjC,MAAM,MAAM,GAAG,IAAI,2BAA2B,EAAE,CAAC;IAEjD,CAAC,KAAK,IAAI,EAAE,CAAC;QACZ,MAAM,MAAM,GAAqB;YAChC,IAAI,EAAE,WAAW;YACjB,OAAO,EAAE,EAAE;YACX,GAAG,EAAE,KAAK,CAAC,GAAG;YACd,QAAQ,EAAE,KAAK,CAAC,QAAQ;YACxB,KAAK,EAAE,KAAK,CAAC,EAAE;YACf,KAAK,EAAE;gBACN,KAAK,EAAE,CAAC;gBACR,MAAM,EAAE,CAAC;gBACT,SAAS,EAAE,CAAC;gBACZ,UAAU,EAAE,CAAC;gBACb,WAAW,EAAE,CAAC;gBACd,IAAI,EAAE,EAAE,KAAK,EAAE,CAAC,EAAE,MAAM,EAAE,CAAC,EAAE,SAAS,EAAE,CAAC,EAAE,UAAU,EAAE,CAAC,EAAE,KAAK,EAAE,CAAC,EAAE;aACpE;YACD,UAAU,EAAE,MAAM;YAClB,SAAS,EAAE,IAAI,CAAC,GAAG,EAAE;SACrB,CAAC;QAEF,IAAI,CAAC;YACJ,MAAM,MAAM,GAAG,OAAO,EAAE,MAAM,IAAI,YAAY,CAAC,KAAK,CAAC,QAAQ,CAAC,IAAI,EAAE,CAAC;YACrE,MAAM,MAAM,GAAG,YAAY,CAAC,KAAK,EAAE,OAAO,EAAE,MAAM,EAAE,OAAO,EAAE,OAAO,CAAC,CAAC;YACtE,MAAM,MAAM,GAAG,WAAW,CAAC,KAAK,EAAE,OAAO,EAAE,OAAO,CAAC,CAAC;YACpD,OAAO,EAAE,SAAS,EAAE,CAAC,MAAM,CAAC,CAAC;YAC7B,MAAM,YAAY,GAAG,MAAM,MAAM,CAAC,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC,MAAM,EAAE,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;YAC/F,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,OAAO,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;YAEhD,IAAI,YAAY,GAAiF,IAAI,CAAC;YACtG,MAAM,MAAM,GAAG,MAAM,CAAC,OAAO,CAAC;YAC9B,MAAM,UAAU,GAAG,GAAG,EAAE,CAAC,MAAM,CAAC,MAAM,GAAG,CAAC,CAAC;YAC3C,MAAM,kBAAkB,GAAG,CAAC,KAA2B,EAAE,EAAE,CAAC;gBAC3D,IAAI,KAAK,EAAE,CAAC;oBACX,IAAI,KAAK,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;wBAC3B,MAAM,CAAC,IAAI,CAAC;4BACX,IAAI,EAAE,UAAU;4BAChB,YAAY,EAAE,UAAU,EAAE;4BAC1B,OAAO,EAAE,KAAK,CAAC,IAAI;4BACnB,OAAO,EAAE,MAAM;yBACf,CAAC,CAAC;oBACJ,CAAC;yBAAM,IAAI,KAAK,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;wBACtC,MAAM,CAAC,IAAI,CAAC;4BACX,IAAI,EAAE,cAAc;4BACpB,YAAY,EAAE,UAAU,EAAE;4BAC1B,OAAO,EAAE,KAAK,CAAC,QAAQ;4BACvB,OAAO,EAAE,MAAM;yBACf,CAAC,CAAC;oBACJ,CAAC;yBAAM,IAAI,KAAK,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;wBACtC,KAAK,CAAC,SAAS,GAAG,IAAI,CAAC,KAAK,CAAC,KAAK,CAAC,WAAW,IAAI,IAAI,CAAC,CAAC;wBACxD,OAAO,KAAK,CAAC,WAAW,CAAC;wBACzB,MAAM,CAAC,IAAI,CAAC;4BACX,IAAI,EAAE,cAAc;4BACpB,YAAY,EAAE,UAAU,EAAE;4BAC1B,QAAQ,EAAE,KAAK;4BACf,OAAO,EAAE,MAAM;yBACf,CAAC,CAAC;oBACJ,CAAC;gBACF,CAAC;YAAA,CACD,CAAC;YAEF,IAAI,KAAK,EAAE,MAAM,KAAK,IAAI,YAAY,EAAE,CAAC;gBACxC,IAAI,KAAK,CAAC,KAAK,EAAE,CAAC;oBACjB,MAAM,YAAY,GAAG,KAAK,CAAC,KAAK,CAAC,qBAAqB,EAAE,aAAa,IAAI,CAAC,CAAC;oBAC3E,MAAM,eAAe,GAAG,KAAK,CAAC,KAAK,CAAC,yBAAyB,EAAE,gBAAgB,IAAI,CAAC,CAAC;oBACrF,MAAM,KAAK,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC,aAAa,IAAI,CAAC,CAAC,GAAG,YAAY,CAAC;oBAC9D,MAAM,YAAY,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC,iBAAiB,IAAI,CAAC,CAAC,GAAG,eAAe,CAAC;oBAC5E,MAAM,CAAC,KAAK,GAAG;wBACd,sFAAsF;wBACtF,KAAK;wBACL,MAAM,EAAE,YAAY;wBACpB,SAAS,EAAE,YAAY;wBACvB,UAAU,EAAE,CAAC;wBACb,wEAAwE;wBACxE,qEAAqE;wBACrE,WAAW,EAAE,KAAK,GAAG,YAAY,GAAG,YAAY;wBAChD,IAAI,EAAE;4BACL,KAAK,EAAE,CAAC;4BACR,MAAM,EAAE,CAAC;4BACT,SAAS,EAAE,CAAC;4BACZ,UAAU,EAAE,CAAC;4BACb,KAAK,EAAE,CAAC;yBACR;qBACD,CAAC;oBACF,aAAa,CAAC,KAAK,EAAE,MAAM,CAAC,KAAK,CAAC,CAAC;gBACpC,CAAC;gBAED,MAAM,MAAM,GAAG,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC;gBAChC,IAAI,CAAC,MAAM;oBAAE,SAAS;gBAEtB,IAAI,MAAM,CAAC,aAAa,EAAE,CAAC;oBAC1B,MAAM,CAAC,UAAU,GAAG,aAAa,CAAC,MAAM,CAAC,aAAa,CAAC,CAAC;gBACzD,CAAC;gBAED,IAAI,MAAM,CAAC,KAAK,EAAE,CAAC;oBAClB,IACC,MAAM,CAAC,KAAK,CAAC,OAAO,KAAK,IAAI;wBAC7B,MAAM,CAAC,KAAK,CAAC,OAAO,KAAK,SAAS;wBAClC,MAAM,CAAC,KAAK,CAAC,OAAO,CAAC,MAAM,GAAG,CAAC,EAC9B,CAAC;wBACF,IAAI,CAAC,YAAY,IAAI,YAAY,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;4BACnD,kBAAkB,CAAC,YAAY,CAAC,CAAC;4BACjC,YAAY,GAAG,EAAE,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,EAAE,EAAE,CAAC;4BAC1C,MAAM,CAAC,OAAO,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;4BAClC,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,YAAY,EAAE,YAAY,EAAE,UAAU,EAAE,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;wBAClF,CAAC;wBAED,IAAI,YAAY,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;4BAClC,YAAY,CAAC,IAAI,IAAI,MAAM,CAAC,KAAK,CAAC,OAAO,CAAC;4BAC1C,MAAM,CAAC,IAAI,CAAC;gCACX,IAAI,EAAE,YAAY;gCAClB,YAAY,EAAE,UAAU,EAAE;gCAC1B,KAAK,EAAE,MAAM,CAAC,KAAK,CAAC,OAAO;gCAC3B,OAAO,EAAE,MAAM;6BACf,CAAC,CAAC;wBACJ,CAAC;oBACF,CAAC;oBAED,oEAAoE;oBACpE,mDAAmD;oBACnD,+DAA+D;oBAC/D,mFAAmF;oBACnF,MAAM,eAAe,GAAG,CAAC,mBAAmB,EAAE,WAAW,EAAE,gBAAgB,CAAC,CAAC;oBAC7E,IAAI,mBAAmB,GAAkB,IAAI,CAAC;oBAC9C,KAAK,MAAM,KAAK,IAAI,eAAe,EAAE,CAAC;wBACrC,IACE,MAAM,CAAC,KAAa,CAAC,KAAK,CAAC,KAAK,IAAI;4BACpC,MAAM,CAAC,KAAa,CAAC,KAAK,CAAC,KAAK,SAAS;4BACzC,MAAM,CAAC,KAAa,CAAC,KAAK,CAAC,CAAC,MAAM,GAAG,CAAC,EACtC,CAAC;4BACF,IAAI,CAAC,mBAAmB,EAAE,CAAC;gCAC1B,mBAAmB,GAAG,KAAK,CAAC;gCAC5B,MAAM;4BACP,CAAC;wBACF,CAAC;oBACF,CAAC;oBAED,IAAI,mBAAmB,EAAE,CAAC;wBACzB,IAAI,CAAC,YAAY,IAAI,YAAY,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;4BACvD,kBAAkB,CAAC,YAAY,CAAC,CAAC;4BACjC,YAAY,GAAG;gCACd,IAAI,EAAE,UAAU;gCAChB,QAAQ,EAAE,EAAE;gCACZ,iBAAiB,EAAE,mBAAmB;6BACtC,CAAC;4BACF,MAAM,CAAC,OAAO,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;4BAClC,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,gBAAgB,EAAE,YAAY,EAAE,UAAU,EAAE,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;wBACtF,CAAC;wBAED,IAAI,YAAY,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;4BACtC,MAAM,KAAK,GAAI,MAAM,CAAC,KAAa,CAAC,mBAAmB,CAAC,CAAC;4BACzD,YAAY,CAAC,QAAQ,IAAI,KAAK,CAAC;4BAC/B,MAAM,CAAC,IAAI,CAAC;gCACX,IAAI,EAAE,gBAAgB;gCACtB,YAAY,EAAE,UAAU,EAAE;gCAC1B,KAAK;gCACL,OAAO,EAAE,MAAM;6BACf,CAAC,CAAC;wBACJ,CAAC;oBACF,CAAC;oBAED,IAAI,MAAM,EAAE,KAAK,EAAE,UAAU,EAAE,CAAC;wBAC/B,KAAK,MAAM,QAAQ,IAAI,MAAM,CAAC,KAAK,CAAC,UAAU,EAAE,CAAC;4BAChD,IACC,CAAC,YAAY;gCACb,YAAY,CAAC,IAAI,KAAK,UAAU;gCAChC,CAAC,QAAQ,CAAC,EAAE,IAAI,YAAY,CAAC,EAAE,KAAK,QAAQ,CAAC,EAAE,CAAC,EAC/C,CAAC;gCACF,kBAAkB,CAAC,YAAY,CAAC,CAAC;gCACjC,YAAY,GAAG;oCACd,IAAI,EAAE,UAAU;oCAChB,EAAE,EAAE,QAAQ,CAAC,EAAE,IAAI,EAAE;oCACrB,IAAI,EAAE,QAAQ,CAAC,QAAQ,EAAE,IAAI,IAAI,EAAE;oCACnC,SAAS,EAAE,EAAE;oCACb,WAAW,EAAE,EAAE;iCACf,CAAC;gCACF,MAAM,CAAC,OAAO,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;gCAClC,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,gBAAgB,EAAE,YAAY,EAAE,UAAU,EAAE,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;4BACtF,CAAC;4BAED,IAAI,YAAY,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;gCACtC,IAAI,QAAQ,CAAC,EAAE;oCAAE,YAAY,CAAC,EAAE,GAAG,QAAQ,CAAC,EAAE,CAAC;gCAC/C,IAAI,QAAQ,CAAC,QAAQ,EAAE,IAAI;oCAAE,YAAY,CAAC,IAAI,GAAG,QAAQ,CAAC,QAAQ,CAAC,IAAI,CAAC;gCACxE,IAAI,KAAK,GAAG,EAAE,CAAC;gCACf,IAAI,QAAQ,CAAC,QAAQ,EAAE,SAAS,EAAE,CAAC;oCAClC,KAAK,GAAG,QAAQ,CAAC,QAAQ,CAAC,SAAS,CAAC;oCACpC,YAAY,CAAC,WAAW,IAAI,QAAQ,CAAC,QAAQ,CAAC,SAAS,CAAC;oCACxD,YAAY,CAAC,SAAS,GAAG,kBAAkB,CAAC,YAAY,CAAC,WAAW,CAAC,CAAC;gCACvE,CAAC;gCACD,MAAM,CAAC,IAAI,CAAC;oCACX,IAAI,EAAE,gBAAgB;oCACtB,YAAY,EAAE,UAAU,EAAE;oCAC1B,KAAK;oCACL,OAAO,EAAE,MAAM;iCACf,CAAC,CAAC;4BACJ,CAAC;wBACF,CAAC;oBACF,CAAC;oBAED,MAAM,gBAAgB,GAAI,MAAM,CAAC,KAAa,CAAC,iBAAiB,CAAC;oBACjE,IAAI,gBAAgB,IAAI,KAAK,CAAC,OAAO,CAAC,gBAAgB,CAAC,EAAE,CAAC;wBACzD,KAAK,MAAM,MAAM,IAAI,gBAAgB,EAAE,CAAC;4BACvC,IAAI,MAAM,CAAC,IAAI,KAAK,qBAAqB,IAAI,MAAM,CAAC,EAAE,IAAI,MAAM,CAAC,IAAI,EAAE,CAAC;gCACvE,MAAM,gBAAgB,GAAG,MAAM,CAAC,OAAO,CAAC,IAAI,CAC3C,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,UAAU,IAAI,CAAC,CAAC,EAAE,KAAK,MAAM,CAAC,EAAE,CAC1B,CAAC;gCAC1B,IAAI,gBAAgB,EAAE,CAAC;oCACtB,gBAAgB,CAAC,gBAAgB,GAAG,IAAI,CAAC,SAAS,CAAC,MAAM,CAAC,CAAC;gCAC5D,CAAC;4BACF,CAAC;wBACF,CAAC;oBACF,CAAC;gBACF,CAAC;YACF,CAAC;YAED,kBAAkB,CAAC,YAAY,CAAC,CAAC;YAEjC,IAAI,OAAO,EAAE,MAAM,EAAE,OAAO,EAAE,CAAC;gBAC9B,MAAM,IAAI,KAAK,CAAC,qBAAqB,CAAC,CAAC;YACxC,CAAC;YAED,IAAI,MAAM,CAAC,UAAU,KAAK,SAAS,IAAI,MAAM,CAAC,UAAU,KAAK,OAAO,EAAE,CAAC;gBACtE,MAAM,IAAI,KAAK,CAAC,2BAA2B,CAAC,CAAC;YAC9C,CAAC;YAED,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,CAAC,UAAU,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;YAC1E,MAAM,CAAC,GAAG,EAAE,CAAC;QACd,CAAC;QAAC,OAAO,KAAK,EAAE,CAAC;YAChB,KAAK,MAAM,KAAK,IAAI,MAAM,CAAC,OAAO;gBAAE,OAAQ,KAAa,CAAC,KAAK,CAAC;YAChE,MAAM,CAAC,UAAU,GAAG,OAAO,EAAE,MAAM,EAAE,OAAO,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,OAAO,CAAC;YACnE,MAAM,CAAC,YAAY,GAAG,KAAK,YAAY,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC,SAAS,CAAC,KAAK,CAAC,CAAC;YACrF,2EAA2E;YAC3E,MAAM,WAAW,GAAI,KAAa,EAAE,KAAK,EAAE,QAAQ,EAAE,GAAG,CAAC;YACzD,IAAI,WAAW;gBAAE,MAAM,CAAC,YAAY,IAAI,KAAK,WAAW,EAAE,CAAC;YAC3D,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,OAAO,EAAE,MAAM,EAAE,MAAM,CAAC,UAAU,EAAE,KAAK,EAAE,MAAM,EAAE,CAAC,CAAC;YACzE,MAAM,CAAC,GAAG,EAAE,CAAC;QACd,CAAC;IAAA,CACD,CAAC,EAAE,CAAC;IAEL,OAAO,MAAM,CAAC;AAAA,CACd,CAAC;AAEF,MAAM,CAAC,MAAM,6BAA6B,GAA8D,CACvG,KAAkC,EAClC,OAAgB,EAChB,OAA6B,EACC,EAAE,CAAC;IACjC,MAAM,MAAM,GAAG,OAAO,EAAE,MAAM,IAAI,YAAY,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC;IAC/D,IAAI,CAAC,MAAM,EAAE,CAAC;QACb,MAAM,IAAI,KAAK,CAAC,4BAA4B,KAAK,CAAC,QAAQ,EAAE,CAAC,CAAC;IAC/D,CAAC;IAED,MAAM,IAAI,GAAG,gBAAgB,CAAC,KAAK,EAAE,OAAO,EAAE,MAAM,CAAC,CAAC;IACtD,MAAM,eAAe,GAAG,aAAa,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,OAAO,EAAE,SAAS,CAAC,CAAC,CAAC,cAAc,CAAC,OAAO,EAAE,SAAS,CAAC,CAAC;IACvG,MAAM,UAAU,GAAI,OAAgD,EAAE,UAAU,CAAC;IAEjF,OAAO,uBAAuB,CAAC,KAAK,EAAE,OAAO,EAAE;QAC9C,GAAG,IAAI;QACP,eAAe;QACf,UAAU;KACyB,CAAC,CAAC;AAAA,CACtC,CAAC;AAEF,SAAS,YAAY,CACpB,KAAkC,EAClC,OAAgB,EAChB,MAAe,EACf,cAAuC,EACtC;IACD,IAAI,CAAC,MAAM,EAAE,CAAC;QACb,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,cAAc,EAAE,CAAC;YACjC,MAAM,IAAI,KAAK,CACd,gGAAgG,CAChG,CAAC;QACH,CAAC;QACD,MAAM,GAAG,OAAO,CAAC,GAAG,CAAC,cAAc,CAAC;IACrC,CAAC;IAED,MAAM,OAAO,GAAG,EAAE,GAAG,KAAK,CAAC,OAAO,EAAE,CAAC;IACrC,IAAI,KAAK,CAAC,QAAQ,KAAK,gBAAgB,EAAE,CAAC;QACzC,gFAAgF;QAChF,iFAAiF;QACjF,+CAA+C;QAC/C,MAAM,QAAQ,GAAG,OAAO,CAAC,QAAQ,IAAI,EAAE,CAAC;QACxC,MAAM,WAAW,GAAG,QAAQ,CAAC,QAAQ,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC;QAClD,MAAM,WAAW,GAAG,WAAW,CAAC,CAAC,CAAC,WAAW,CAAC,IAAI,KAAK,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC;QACtE,OAAO,CAAC,aAAa,CAAC,GAAG,WAAW,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,MAAM,CAAC;QACxD,OAAO,CAAC,eAAe,CAAC,GAAG,oBAAoB,CAAC;QAEhD,mDAAmD;QACnD,MAAM,SAAS,GAAG,QAAQ,CAAC,IAAI,CAAC,CAAC,GAAG,EAAE,EAAE,CAAC;YACxC,IAAI,GAAG,CAAC,IAAI,KAAK,MAAM,IAAI,KAAK,CAAC,OAAO,CAAC,GAAG,CAAC,OAAO,CAAC,EAAE,CAAC;gBACvD,OAAO,GAAG,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,OAAO,CAAC,CAAC;YACpD,CAAC;YACD,IAAI,GAAG,CAAC,IAAI,KAAK,YAAY,IAAI,KAAK,CAAC,OAAO,CAAC,GAAG,CAAC,OAAO,CAAC,EAAE,CAAC;gBAC7D,OAAO,GAAG,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,OAAO,CAAC,CAAC;YACpD,CAAC;YACD,OAAO,KAAK,CAAC;QAAA,CACb,CAAC,CAAC;QACH,IAAI,SAAS,EAAE,CAAC;YACf,OAAO,CAAC,wBAAwB,CAAC,GAAG,MAAM,CAAC;QAC5C,CAAC;IACF,CAAC;IAED,2DAA2D;IAC3D,IAAI,cAAc,EAAE,CAAC;QACpB,MAAM,CAAC,MAAM,CAAC,OAAO,EAAE,cAAc,CAAC,CAAC;IACxC,CAAC;IAED,OAAO,IAAI,MAAM,CAAC;QACjB,MAAM;QACN,OAAO,EAAE,KAAK,CAAC,OAAO;QACtB,uBAAuB,EAAE,IAAI;QAC7B,cAAc,EAAE,OAAO;KACvB,CAAC,CAAC;AAAA,CACH;AAED,SAAS,WAAW,CAAC,KAAkC,EAAE,OAAgB,EAAE,OAAkC,EAAE;IAC9G,MAAM,MAAM,GAAG,SAAS,CAAC,KAAK,CAAC,CAAC;IAChC,MAAM,QAAQ,GAAG,eAAe,CAAC,KAAK,EAAE,OAAO,EAAE,MAAM,CAAC,CAAC;IACzD,uCAAuC,CAAC,KAAK,EAAE,QAAQ,CAAC,CAAC;IAEzD,MAAM,MAAM,GAAgE;QAC3E,KAAK,EAAE,KAAK,CAAC,EAAE;QACf,QAAQ;QACR,MAAM,EAAE,IAAI;KACZ,CAAC;IAEF,IAAI,MAAM,CAAC,wBAAwB,KAAK,KAAK,EAAE,CAAC;QAC9C,MAAc,CAAC,cAAc,GAAG,EAAE,aAAa,EAAE,IAAI,EAAE,CAAC;IAC1D,CAAC;IAED,IAAI,MAAM,CAAC,aAAa,EAAE,CAAC;QAC1B,MAAM,CAAC,KAAK,GAAG,KAAK,CAAC;IACtB,CAAC;IAED,IAAI,OAAO,EAAE,SAAS,EAAE,CAAC;QACxB,IAAI,MAAM,CAAC,cAAc,KAAK,YAAY,EAAE,CAAC;YAC3C,MAAc,CAAC,UAAU,GAAG,OAAO,CAAC,SAAS,CAAC;QAChD,CAAC;aAAM,CAAC;YACP,MAAM,CAAC,qBAAqB,GAAG,OAAO,CAAC,SAAS,CAAC;QAClD,CAAC;IACF,CAAC;IAED,IAAI,OAAO,EAAE,WAAW,KAAK,SAAS,EAAE,CAAC;QACxC,MAAM,CAAC,WAAW,GAAG,OAAO,CAAC,WAAW,CAAC;IAC1C,CAAC;IAED,IAAI,OAAO,CAAC,KAAK,EAAE,CAAC;QACnB,MAAM,CAAC,KAAK,GAAG,YAAY,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC;IAC5C,CAAC;SAAM,IAAI,cAAc,CAAC,OAAO,CAAC,QAAQ,CAAC,EAAE,CAAC;QAC7C,mGAAmG;QACnG,MAAM,CAAC,KAAK,GAAG,EAAE,CAAC;IACnB,CAAC;IAED,IAAI,OAAO,EAAE,UAAU,EAAE,CAAC;QACzB,MAAM,CAAC,WAAW,GAAG,OAAO,CAAC,UAAU,CAAC;IACzC,CAAC;IAED,IAAI,MAAM,CAAC,cAAc,KAAK,KAAK,IAAI,KAAK,CAAC,SAAS,EAAE,CAAC;QACxD,8DAA8D;QAC9D,kEAAkE;QACjE,MAAc,CAAC,QAAQ,GAAG,EAAE,IAAI,EAAE,OAAO,EAAE,eAAe,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,UAAU,EAAE,CAAC;IACxF,CAAC;SAAM,IAAI,OAAO,EAAE,eAAe,IAAI,KAAK,CAAC,SAAS,IAAI,MAAM,CAAC,uBAAuB,EAAE,CAAC;QAC1F,gCAAgC;QAChC,MAAM,CAAC,gBAAgB,GAAG,OAAO,CAAC,eAAe,CAAC;IACnD,CAAC;IAED,0CAA0C;IAC1C,IAAI,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,eAAe,CAAC,IAAI,KAAK,CAAC,MAAM,EAAE,iBAAiB,EAAE,CAAC;QAC/E,MAAc,CAAC,QAAQ,GAAG,KAAK,CAAC,MAAM,CAAC,iBAAiB,CAAC;IAC3D,CAAC;IAED,iDAAiD;IACjD,IAAI,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,sBAAsB,CAAC,IAAI,KAAK,CAAC,MAAM,EAAE,oBAAoB,EAAE,CAAC;QAC1F,MAAM,OAAO,GAAG,KAAK,CAAC,MAAM,CAAC,oBAAoB,CAAC;QAClD,IAAI,OAAO,CAAC,IAAI,IAAI,OAAO,CAAC,KAAK,EAAE,CAAC;YACnC,MAAM,cAAc,GAA6B,EAAE,CAAC;YACpD,IAAI,OAAO,CAAC,IAAI;gBAAE,cAAc,CAAC,IAAI,GAAG,OAAO,CAAC,IAAI,CAAC;YACrD,IAAI,OAAO,CAAC,KAAK;gBAAE,cAAc,CAAC,KAAK,GAAG,OAAO,CAAC,KAAK,CAAC;YACvD,MAAc,CAAC,eAAe,GAAG,EAAE,OAAO,EAAE,cAAc,EAAE,CAAC;QAC/D,CAAC;IACF,CAAC;IAED,OAAO,MAAM,CAAC;AAAA,CACd;AAED,SAAS,uCAAuC,CAC/C,KAAkC,EAClC,QAAsC,EAC/B;IACP,IAAI,KAAK,CAAC,QAAQ,KAAK,YAAY,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,UAAU,CAAC,YAAY,CAAC;QAAE,OAAO;IAElF,kFAAkF;IAClF,qFAAqF;IACrF,KAAK,IAAI,CAAC,GAAG,QAAQ,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC;QAC/C,MAAM,GAAG,GAAG,QAAQ,CAAC,CAAC,CAAC,CAAC;QACxB,IAAI,GAAG,CAAC,IAAI,KAAK,MAAM,IAAI,GAAG,CAAC,IAAI,KAAK,WAAW;YAAE,SAAS;QAE9D,MAAM,OAAO,GAAG,GAAG,CAAC,OAAO,CAAC;QAC5B,IAAI,OAAO,OAAO,KAAK,QAAQ,EAAE,CAAC;YACjC,GAAG,CAAC,OAAO,GAAG;gBACb,MAAM,CAAC,MAAM,CAAC,EAAE,IAAI,EAAE,MAAe,EAAE,IAAI,EAAE,OAAO,EAAE,EAAE,EAAE,aAAa,EAAE,EAAE,IAAI,EAAE,WAAW,EAAE,EAAE,CAAC;aACjG,CAAC;YACF,OAAO;QACR,CAAC;QAED,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC,OAAO,CAAC;YAAE,SAAS;QAEtC,4CAA4C;QAC5C,KAAK,IAAI,CAAC,GAAG,OAAO,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC;YAC9C,MAAM,IAAI,GAAG,OAAO,CAAC,CAAC,CAAC,CAAC;YACxB,IAAI,IAAI,EAAE,IAAI,KAAK,MAAM,EAAE,CAAC;gBAC3B,MAAM,CAAC,MAAM,CAAC,IAAI,EAAE,EAAE,aAAa,EAAE,EAAE,IAAI,EAAE,WAAW,EAAE,EAAE,CAAC,CAAC;gBAC9D,OAAO;YACR,CAAC;QACF,CAAC;IACF,CAAC;AAAA,CACD;AAED,MAAM,UAAU,eAAe,CAC9B,KAAkC,EAClC,OAAgB,EAChB,MAAyC,EACV;IAC/B,MAAM,MAAM,GAAiC,EAAE,CAAC;IAEhD,MAAM,mBAAmB,GAAG,CAAC,EAAU,EAAU,EAAE,CAAC;QACnD,IAAI,MAAM,CAAC,sBAAsB;YAAE,OAAO,sBAAsB,CAAC,EAAE,CAAC,CAAC;QAErE,sDAAsD;QACtD,mFAAmF;QACnF,wEAAwE;QACxE,iDAAiD;QACjD,IAAI,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,EAAE,CAAC;YACtB,MAAM,CAAC,MAAM,CAAC,GAAG,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC;YAC/B,oEAAoE;YACpE,OAAO,MAAM,CAAC,OAAO,CAAC,iBAAiB,EAAE,GAAG,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC;QAC5D,CAAC;QAED,IAAI,KAAK,CAAC,QAAQ,KAAK,QAAQ;YAAE,OAAO,EAAE,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC;QAC9E,mFAAmF;QACnF,IAAI,KAAK,CAAC,QAAQ,KAAK,gBAAgB,IAAI,KAAK,CAAC,EAAE,CAAC,WAAW,EAAE,CAAC,QAAQ,CAAC,QAAQ,CAAC,EAAE,CAAC;YACtF,OAAO,EAAE,CAAC,OAAO,CAAC,iBAAiB,EAAE,GAAG,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC;QACxD,CAAC;QACD,OAAO,EAAE,CAAC;IAAA,CACV,CAAC;IAEF,MAAM,mBAAmB,GAAG,iBAAiB,CAAC,OAAO,CAAC,QAAQ,EAAE,KAAK,EAAE,CAAC,EAAE,EAAE,EAAE,CAAC,mBAAmB,CAAC,EAAE,CAAC,CAAC,CAAC;IAExG,IAAI,OAAO,CAAC,YAAY,EAAE,CAAC;QAC1B,MAAM,gBAAgB,GAAG,KAAK,CAAC,SAAS,IAAI,MAAM,CAAC,qBAAqB,CAAC;QACzE,MAAM,IAAI,GAAG,gBAAgB,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC;QACvD,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,IAAI,EAAE,OAAO,EAAE,kBAAkB,CAAC,OAAO,CAAC,YAAY,CAAC,EAAE,CAAC,CAAC;IAChF,CAAC;IAED,IAAI,QAAQ,GAAkB,IAAI,CAAC;IAEnC,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,mBAAmB,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE,CAAC;QACrD,MAAM,GAAG,GAAG,mBAAmB,CAAC,CAAC,CAAC,CAAC;QACnC,+FAA+F;QAC/F,yDAAyD;QACzD,IAAI,MAAM,CAAC,gCAAgC,IAAI,QAAQ,KAAK,YAAY,IAAI,GAAG,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;YACjG,MAAM,CAAC,IAAI,CAAC;gBACX,IAAI,EAAE,WAAW;gBACjB,OAAO,EAAE,oCAAoC;aAC7C,CAAC,CAAC;QACJ,CAAC;QAED,IAAI,GAAG,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;YACzB,IAAI,OAAO,GAAG,CAAC,OAAO,KAAK,QAAQ,EAAE,CAAC;gBACrC,MAAM,CAAC,IAAI,CAAC;oBACX,IAAI,EAAE,MAAM;oBACZ,OAAO,EAAE,kBAAkB,CAAC,GAAG,CAAC,OAAO,CAAC;iBACxC,CAAC,CAAC;YACJ,CAAC;iBAAM,CAAC;gBACP,MAAM,OAAO,GAAgC,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,IAAI,EAA6B,EAAE,CAAC;oBACjG,IAAI,IAAI,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;wBAC1B,OAAO;4BACN,IAAI,EAAE,MAAM;4BACZ,IAAI,EAAE,kBAAkB,CAAC,IAAI,CAAC,IAAI,CAAC;yBACK,CAAC;oBAC3C,CAAC;yBAAM,CAAC;wBACP,OAAO;4BACN,IAAI,EAAE,WAAW;4BACjB,SAAS,EAAE;gCACV,GAAG,EAAE,QAAQ,IAAI,CAAC,QAAQ,WAAW,IAAI,CAAC,IAAI,EAAE;6BAChD;yBACwC,CAAC;oBAC5C,CAAC;gBAAA,CACD,CAAC,CAAC;gBACH,MAAM,eAAe,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC,QAAQ,CAAC,OAAO,CAAC;oBACrD,CAAC,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,WAAW,CAAC;oBAC/C,CAAC,CAAC,OAAO,CAAC;gBACX,IAAI,eAAe,CAAC,MAAM,KAAK,CAAC;oBAAE,SAAS;gBAC3C,MAAM,CAAC,IAAI,CAAC;oBACX,IAAI,EAAE,MAAM;oBACZ,OAAO,EAAE,eAAe;iBACxB,CAAC,CAAC;YACJ,CAAC;QACF,CAAC;aAAM,IAAI,GAAG,CAAC,IAAI,KAAK,WAAW,EAAE,CAAC;YACrC,oFAAoF;YACpF,MAAM,YAAY,GAAwC;gBACzD,IAAI,EAAE,WAAW;gBACjB,OAAO,EAAE,MAAM,CAAC,gCAAgC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI;aAC5D,CAAC;YAEF,MAAM,UAAU,GAAG,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,MAAM,CAAkB,CAAC;YACjF,8DAA8D;YAC9D,MAAM,kBAAkB,GAAG,UAAU,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,IAAI,CAAC,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC;YACxF,IAAI,kBAAkB,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;gBACnC,uEAAuE;gBACvE,2EAA2E;gBAC3E,IAAI,KAAK,CAAC,QAAQ,KAAK,gBAAgB,EAAE,CAAC;oBACzC,YAAY,CAAC,OAAO,GAAG,kBAAkB,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,kBAAkB,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC;gBAC3F,CAAC;qBAAM,CAAC;oBACP,YAAY,CAAC,OAAO,GAAG,kBAAkB,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC;wBACpD,OAAO,EAAE,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,kBAAkB,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC;oBAAA,CAC1D,CAAC,CAAC;gBACJ,CAAC;YACF,CAAC;YAED,yBAAyB;YACzB,MAAM,cAAc,GAAG,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,UAAU,CAAsB,CAAC;YAC7F,kEAAkE;YAClE,MAAM,sBAAsB,GAAG,cAAc,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,QAAQ,IAAI,CAAC,CAAC,QAAQ,CAAC,IAAI,EAAE,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC;YACxG,IAAI,sBAAsB,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;gBACvC,IAAI,MAAM,CAAC,sBAAsB,EAAE,CAAC;oBACnC,gFAAgF;oBAChF,MAAM,YAAY,GAAG,sBAAsB,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;oBAChF,MAAM,WAAW,GAAG,YAAY,CAAC,OAAuD,CAAC;oBACzF,IAAI,WAAW,EAAE,CAAC;wBACjB,WAAW,CAAC,OAAO,CAAC,EAAE,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,YAAY,EAAE,CAAC,CAAC;oBAC3D,CAAC;yBAAM,CAAC;wBACP,YAAY,CAAC,OAAO,GAAG,CAAC,EAAE,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,YAAY,EAAE,CAAC,CAAC;oBAC/D,CAAC;gBACF,CAAC;qBAAM,CAAC;oBACP,gGAAgG;oBAChG,MAAM,SAAS,GAAG,sBAAsB,CAAC,CAAC,CAAC,CAAC,iBAAiB,CAAC;oBAC9D,IAAI,SAAS,IAAI,SAAS,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;wBACtC,YAAoB,CAAC,SAAS,CAAC,GAAG,sBAAsB,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;oBAC7F,CAAC;gBACF,CAAC;YACF,CAAC;YAED,MAAM,SAAS,GAAG,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,UAAU,CAAe,CAAC;YACjF,IAAI,SAAS,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;gBAC1B,YAAY,CAAC,UAAU,GAAG,SAAS,CAAC,GAAG,CAAC,CAAC,EAAE,EAAE,EAAE,CAAC,CAAC;oBAChD,EAAE,EAAE,EAAE,CAAC,EAAE;oBACT,IAAI,EAAE,UAAmB;oBACzB,QAAQ,EAAE;wBACT,IAAI,EAAE,EAAE,CAAC,IAAI;wBACb,SAAS,EAAE,IAAI,CAAC,SAAS,CAAC,EAAE,CAAC,SAAS,CAAC;qBACvC;iBACD,CAAC,CAAC,CAAC;gBACJ,MAAM,gBAAgB,GAAG,SAAS;qBAChC,MAAM,CAAC,CAAC,EAAE,EAAE,EAAE,CAAC,EAAE,CAAC,gBAAgB,CAAC;qBACnC,GAAG,CAAC,CAAC,EAAE,EAAE,EAAE,CAAC;oBACZ,IAAI,CAAC;wBACJ,OAAO,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,gBAAiB,CAAC,CAAC;oBACzC,CAAC;oBAAC,MAAM,CAAC;wBACR,OAAO,IAAI,CAAC;oBACb,CAAC;gBAAA,CACD,CAAC;qBACD,MAAM,CAAC,OAAO,CAAC,CAAC;gBAClB,IAAI,gBAAgB,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;oBAChC,YAAoB,CAAC,iBAAiB,GAAG,gBAAgB,CAAC;gBAC5D,CAAC;YACF,CAAC;YACD,kEAAkE;YAClE,4EAA4E;YAC5E,8DAA8D;YAC9D,gEAAgE;YAChE,MAAM,OAAO,GAAG,YAAY,CAAC,OAAO,CAAC;YACrC,MAAM,UAAU,GACf,OAAO,KAAK,IAAI;gBAChB,OAAO,KAAK,SAAS;gBACrB,CAAC,OAAO,OAAO,KAAK,QAAQ,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC;YACzE,IAAI,CAAC,UAAU,IAAI,CAAC,YAAY,CAAC,UAAU,EAAE,CAAC;gBAC7C,SAAS;YACV,CAAC;YACD,MAAM,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;QAC3B,CAAC;aAAM,IAAI,GAAG,CAAC,IAAI,KAAK,YAAY,EAAE,CAAC;YACtC,MAAM,WAAW,GAA6D,EAAE,CAAC;YACjF,IAAI,CAAC,GAAG,CAAC,CAAC;YAEV,OAAO,CAAC,GAAG,mBAAmB,CAAC,MAAM,IAAI,mBAAmB,CAAC,CAAC,CAAC,CAAC,IAAI,KAAK,YAAY,EAAE,CAAC,EAAE,EAAE,CAAC;gBAC5F,MAAM,OAAO,GAAG,mBAAmB,CAAC,CAAC,CAAsB,CAAC;gBAE5D,iCAAiC;gBACjC,MAAM,UAAU,GAAG,OAAO,CAAC,OAAO;qBAChC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,MAAM,CAAC;qBAChC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAE,CAAS,CAAC,IAAI,CAAC;qBAC3B,IAAI,CAAC,IAAI,CAAC,CAAC;gBACb,MAAM,SAAS,GAAG,OAAO,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,OAAO,CAAC,CAAC;gBAElE,oEAAoE;gBACpE,MAAM,OAAO,GAAG,UAAU,CAAC,MAAM,GAAG,CAAC,CAAC;gBACtC,yEAAyE;gBACzE,MAAM,aAAa,GAAmC;oBACrD,IAAI,EAAE,MAAM;oBACZ,OAAO,EAAE,kBAAkB,CAAC,OAAO,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,sBAAsB,CAAC;oBAC1E,YAAY,EAAE,OAAO,CAAC,UAAU;iBAChC,CAAC;gBACF,IAAI,MAAM,CAAC,sBAAsB,IAAI,OAAO,CAAC,QAAQ,EAAE,CAAC;oBACtD,aAAqB,CAAC,IAAI,GAAG,OAAO,CAAC,QAAQ,CAAC;gBAChD,CAAC;gBACD,MAAM,CAAC,IAAI,CAAC,aAAa,CAAC,CAAC;gBAE3B,IAAI,SAAS,IAAI,KAAK,CAAC,KAAK,CAAC,QAAQ,CAAC,OAAO,CAAC,EAAE,CAAC;oBAChD,KAAK,MAAM,KAAK,IAAI,OAAO,CAAC,OAAO,EAAE,CAAC;wBACrC,IAAI,KAAK,CAAC,IAAI,KAAK,OAAO,EAAE,CAAC;4BAC5B,WAAW,CAAC,IAAI,CAAC;gCAChB,IAAI,EAAE,WAAW;gCACjB,SAAS,EAAE;oCACV,GAAG,EAAE,QAAS,KAAa,CAAC,QAAQ,WAAY,KAAa,CAAC,IAAI,EAAE;iCACpE;6BACD,CAAC,CAAC;wBACJ,CAAC;oBACF,CAAC;gBACF,CAAC;YACF,CAAC;YAED,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC;YAEV,IAAI,WAAW,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;gBAC5B,IAAI,MAAM,CAAC,gCAAgC,EAAE,CAAC;oBAC7C,MAAM,CAAC,IAAI,CAAC;wBACX,IAAI,EAAE,WAAW;wBACjB,OAAO,EAAE,oCAAoC;qBAC7C,CAAC,CAAC;gBACJ,CAAC;gBAED,MAAM,CAAC,IAAI,CAAC;oBACX,IAAI,EAAE,MAAM;oBACZ,OAAO,EAAE;wBACR;4BACC,IAAI,EAAE,MAAM;4BACZ,IAAI,EAAE,qCAAqC;yBAC3C;wBACD,GAAG,WAAW;qBACd;iBACD,CAAC,CAAC;gBACH,QAAQ,GAAG,MAAM,CAAC;YACnB,CAAC;iBAAM,CAAC;gBACP,QAAQ,GAAG,YAAY,CAAC;YACzB,CAAC;YACD,SAAS;QACV,CAAC;QAED,QAAQ,GAAG,GAAG,CAAC,IAAI,CAAC;IACrB,CAAC;IAED,OAAO,MAAM,CAAC;AAAA,CACd;AAED,SAAS,YAAY,CAAC,KAAa,EAAgD;IAClF,OAAO,KAAK,CAAC,GAAG,CAAC,CAAC,IAAI,EAAE,EAAE,CAAC,CAAC;QAC3B,IAAI,EAAE,UAAU;QAChB,QAAQ,EAAE;YACT,IAAI,EAAE,IAAI,CAAC,IAAI;YACf,WAAW,EAAE,IAAI,CAAC,WAAW;YAC7B,UAAU,EAAE,IAAI,CAAC,UAAiB,EAAE,wCAAwC;YAC5E,MAAM,EAAE,KAAK,EAAE,uEAAuE;SACtF;KACD,CAAC,CAAC,CAAC;AAAA,CACJ;AAED,SAAS,aAAa,CAAC,MAAmD,EAAc;IACvF,IAAI,MAAM,KAAK,IAAI;QAAE,OAAO,MAAM,CAAC;IACnC,QAAQ,MAAM,EAAE,CAAC;QAChB,KAAK,MAAM;YACV,OAAO,MAAM,CAAC;QACf,KAAK,QAAQ;YACZ,OAAO,QAAQ,CAAC;QACjB,KAAK,eAAe,CAAC;QACrB,KAAK,YAAY;YAChB,OAAO,SAAS,CAAC;QAClB,KAAK,gBAAgB;YACpB,OAAO,OAAO,CAAC;QAChB,SAAS,CAAC;YACT,MAAM,WAAW,GAAU,MAAM,CAAC;YAClC,MAAM,IAAI,KAAK,CAAC,0BAA0B,WAAW,EAAE,CAAC,CAAC;QAC1D,CAAC;IACF,CAAC;AAAA,CACD;AAED;;;;GAIG;AACH,SAAS,YAAY,CAAC,KAAkC,EAAqC;IAC5F,MAAM,QAAQ,GAAG,KAAK,CAAC,QAAQ,CAAC;IAChC,MAAM,OAAO,GAAG,KAAK,CAAC,OAAO,CAAC;IAE9B,MAAM,KAAK,GAAG,QAAQ,KAAK,KAAK,IAAI,OAAO,CAAC,QAAQ,CAAC,UAAU,CAAC,CAAC;IAEjE,MAAM,aAAa,GAClB,QAAQ,KAAK,UAAU;QACvB,OAAO,CAAC,QAAQ,CAAC,aAAa,CAAC;QAC/B,QAAQ,KAAK,KAAK;QAClB,OAAO,CAAC,QAAQ,CAAC,UAAU,CAAC;QAC5B,QAAQ,KAAK,SAAS;QACtB,OAAO,CAAC,QAAQ,CAAC,YAAY,CAAC;QAC9B,OAAO,CAAC,QAAQ,CAAC,WAAW,CAAC;QAC7B,OAAO,CAAC,QAAQ,CAAC,cAAc,CAAC;QAChC,KAAK;QACL,QAAQ,KAAK,UAAU;QACvB,OAAO,CAAC,QAAQ,CAAC,aAAa,CAAC,CAAC;IAEjC,MAAM,YAAY,GAAG,QAAQ,KAAK,SAAS,IAAI,OAAO,CAAC,QAAQ,CAAC,YAAY,CAAC,IAAI,OAAO,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC;IAE/G,MAAM,MAAM,GAAG,QAAQ,KAAK,KAAK,IAAI,OAAO,CAAC,QAAQ,CAAC,UAAU,CAAC,CAAC;IAElE,MAAM,SAAS,GAAG,QAAQ,KAAK,SAAS,IAAI,OAAO,CAAC,QAAQ,CAAC,YAAY,CAAC,CAAC;IAE3E,OAAO;QACN,aAAa,EAAE,CAAC,aAAa;QAC7B,qBAAqB,EAAE,CAAC,aAAa;QACrC,uBAAuB,EAAE,CAAC,MAAM,IAAI,CAAC,KAAK;QAC1C,wBAAwB,EAAE,IAAI;QAC9B,cAAc,EAAE,YAAY,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,uBAAuB;QACrE,sBAAsB,EAAE,SAAS;QACjC,gCAAgC,EAAE,KAAK,EAAE,iDAAiD;QAC1F,sBAAsB,EAAE,SAAS;QACjC,sBAAsB,EAAE,SAAS;QACjC,cAAc,EAAE,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,QAAQ;QACxC,iBAAiB,EAAE,EAAE;QACrB,oBAAoB,EAAE,EAAE;KACxB,CAAC;AAAA,CACF;AAED;;;GAGG;AACH,SAAS,SAAS,CAAC,KAAkC,EAAqC;IACzF,MAAM,QAAQ,GAAG,YAAY,CAAC,KAAK,CAAC,CAAC;IACrC,IAAI,CAAC,KAAK,CAAC,MAAM;QAAE,OAAO,QAAQ,CAAC;IAEnC,OAAO;QACN,aAAa,EAAE,KAAK,CAAC,MAAM,CAAC,aAAa,IAAI,QAAQ,CAAC,aAAa;QACnE,qBAAqB,EAAE,KAAK,CAAC,MAAM,CAAC,qBAAqB,IAAI,QAAQ,CAAC,qBAAqB;QAC3F,uBAAuB,EAAE,KAAK,CAAC,MAAM,CAAC,uBAAuB,IAAI,QAAQ,CAAC,uBAAuB;QACjG,wBAAwB,EAAE,KAAK,CAAC,MAAM,CAAC,wBAAwB,IAAI,QAAQ,CAAC,wBAAwB;QACpG,cAAc,EAAE,KAAK,CAAC,MAAM,CAAC,cAAc,IAAI,QAAQ,CAAC,cAAc;QACtE,sBAAsB,EAAE,KAAK,CAAC,MAAM,CAAC,sBAAsB,IAAI,QAAQ,CAAC,sBAAsB;QAC9F,gCAAgC,EAC/B,KAAK,CAAC,MAAM,CAAC,gCAAgC,IAAI,QAAQ,CAAC,gCAAgC;QAC3F,sBAAsB,EAAE,KAAK,CAAC,MAAM,CAAC,sBAAsB,IAAI,QAAQ,CAAC,sBAAsB;QAC9F,sBAAsB,EAAE,KAAK,CAAC,MAAM,CAAC,sBAAsB,IAAI,QAAQ,CAAC,sBAAsB;QAC9F,cAAc,EAAE,KAAK,CAAC,MAAM,CAAC,cAAc,IAAI,QAAQ,CAAC,cAAc;QACtE,iBAAiB,EAAE,KAAK,CAAC,MAAM,CAAC,iBAAiB,IAAI,EAAE;QACvD,oBAAoB,EAAE,KAAK,CAAC,MAAM,CAAC,oBAAoB,IAAI,QAAQ,CAAC,oBAAoB;KACxF,CAAC;AAAA,CACF","sourcesContent":["import OpenAI from \"openai\";\nimport type {\n\tChatCompletionAssistantMessageParam,\n\tChatCompletionChunk,\n\tChatCompletionContentPart,\n\tChatCompletionContentPartImage,\n\tChatCompletionContentPartText,\n\tChatCompletionMessageParam,\n\tChatCompletionToolMessageParam,\n} from \"openai/resources/chat/completions.js\";\nimport { getEnvApiKey } from \"../env-api-keys.js\";\nimport { calculateCost, supportsXhigh } from \"../models.js\";\nimport type {\n\tAssistantMessage,\n\tContext,\n\tMessage,\n\tModel,\n\tOpenAICompletionsCompat,\n\tSimpleStreamOptions,\n\tStopReason,\n\tStreamFunction,\n\tStreamOptions,\n\tTextContent,\n\tThinkingContent,\n\tTool,\n\tToolCall,\n\tToolResultMessage,\n} from \"../types.js\";\nimport { AssistantMessageEventStream } from \"../utils/event-stream.js\";\nimport { parseStreamingJson } from \"../utils/json-parse.js\";\nimport { sanitizeSurrogates } from \"../utils/sanitize-unicode.js\";\nimport { buildBaseOptions, clampReasoning } from \"./simple-options.js\";\nimport { transformMessages } from \"./transform-messages.js\";\n\n/**\n * Normalize tool call ID for Mistral.\n * Mistral requires tool IDs to be exactly 9 alphanumeric characters (a-z, A-Z, 0-9).\n */\nfunction normalizeMistralToolId(id: string): string {\n\t// Remove non-alphanumeric characters\n\tlet normalized = id.replace(/[^a-zA-Z0-9]/g, \"\");\n\t// Mistral requires exactly 9 characters\n\tif (normalized.length < 9) {\n\t\t// Pad with deterministic characters based on original ID to ensure matching\n\t\tconst padding = \"ABCDEFGHI\";\n\t\tnormalized = normalized + padding.slice(0, 9 - normalized.length);\n\t} else if (normalized.length > 9) {\n\t\tnormalized = normalized.slice(0, 9);\n\t}\n\treturn normalized;\n}\n\n/**\n * Check if conversation messages contain tool calls or tool results.\n * This is needed because Anthropic (via proxy) requires the tools param\n * to be present when messages include tool_calls or tool role messages.\n */\nfunction hasToolHistory(messages: Message[]): boolean {\n\tfor (const msg of messages) {\n\t\tif (msg.role === \"toolResult\") {\n\t\t\treturn true;\n\t\t}\n\t\tif (msg.role === \"assistant\") {\n\t\t\tif (msg.content.some((block) => block.type === \"toolCall\")) {\n\t\t\t\treturn true;\n\t\t\t}\n\t\t}\n\t}\n\treturn false;\n}\n\nexport interface OpenAICompletionsOptions extends StreamOptions {\n\ttoolChoice?: \"auto\" | \"none\" | \"required\" | { type: \"function\"; function: { name: string } };\n\treasoningEffort?: \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n}\n\nexport const streamOpenAICompletions: StreamFunction<\"openai-completions\", OpenAICompletionsOptions> = (\n\tmodel: Model<\"openai-completions\">,\n\tcontext: Context,\n\toptions?: OpenAICompletionsOptions,\n): AssistantMessageEventStream => {\n\tconst stream = new AssistantMessageEventStream();\n\n\t(async () => {\n\t\tconst output: AssistantMessage = {\n\t\t\trole: \"assistant\",\n\t\t\tcontent: [],\n\t\t\tapi: model.api,\n\t\t\tprovider: model.provider,\n\t\t\tmodel: model.id,\n\t\t\tusage: {\n\t\t\t\tinput: 0,\n\t\t\t\toutput: 0,\n\t\t\t\tcacheRead: 0,\n\t\t\t\tcacheWrite: 0,\n\t\t\t\ttotalTokens: 0,\n\t\t\t\tcost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },\n\t\t\t},\n\t\t\tstopReason: \"stop\",\n\t\t\ttimestamp: Date.now(),\n\t\t};\n\n\t\ttry {\n\t\t\tconst apiKey = options?.apiKey || getEnvApiKey(model.provider) || \"\";\n\t\t\tconst client = createClient(model, context, apiKey, options?.headers);\n\t\t\tconst params = buildParams(model, context, options);\n\t\t\toptions?.onPayload?.(params);\n\t\t\tconst openaiStream = await client.chat.completions.create(params, { signal: options?.signal });\n\t\t\tstream.push({ type: \"start\", partial: output });\n\n\t\t\tlet currentBlock: TextContent | ThinkingContent | (ToolCall & { partialArgs?: string }) | null = null;\n\t\t\tconst blocks = output.content;\n\t\t\tconst blockIndex = () => blocks.length - 1;\n\t\t\tconst finishCurrentBlock = (block?: typeof currentBlock) => {\n\t\t\t\tif (block) {\n\t\t\t\t\tif (block.type === \"text\") {\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"text_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\tcontent: block.text,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t} else if (block.type === \"thinking\") {\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"thinking_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\tcontent: block.thinking,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t} else if (block.type === \"toolCall\") {\n\t\t\t\t\t\tblock.arguments = JSON.parse(block.partialArgs || \"{}\");\n\t\t\t\t\t\tdelete block.partialArgs;\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"toolcall_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\ttoolCall: block,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t};\n\n\t\t\tfor await (const chunk of openaiStream) {\n\t\t\t\tif (chunk.usage) {\n\t\t\t\t\tconst cachedTokens = chunk.usage.prompt_tokens_details?.cached_tokens || 0;\n\t\t\t\t\tconst reasoningTokens = chunk.usage.completion_tokens_details?.reasoning_tokens || 0;\n\t\t\t\t\tconst input = (chunk.usage.prompt_tokens || 0) - cachedTokens;\n\t\t\t\t\tconst outputTokens = (chunk.usage.completion_tokens || 0) + reasoningTokens;\n\t\t\t\t\toutput.usage = {\n\t\t\t\t\t\t// OpenAI includes cached tokens in prompt_tokens, so subtract to get non-cached input\n\t\t\t\t\t\tinput,\n\t\t\t\t\t\toutput: outputTokens,\n\t\t\t\t\t\tcacheRead: cachedTokens,\n\t\t\t\t\t\tcacheWrite: 0,\n\t\t\t\t\t\t// Compute totalTokens ourselves since we add reasoning_tokens to output\n\t\t\t\t\t\t// and some providers (e.g., Groq) don't include them in total_tokens\n\t\t\t\t\t\ttotalTokens: input + outputTokens + cachedTokens,\n\t\t\t\t\t\tcost: {\n\t\t\t\t\t\t\tinput: 0,\n\t\t\t\t\t\t\toutput: 0,\n\t\t\t\t\t\t\tcacheRead: 0,\n\t\t\t\t\t\t\tcacheWrite: 0,\n\t\t\t\t\t\t\ttotal: 0,\n\t\t\t\t\t\t},\n\t\t\t\t\t};\n\t\t\t\t\tcalculateCost(model, output.usage);\n\t\t\t\t}\n\n\t\t\t\tconst choice = chunk.choices[0];\n\t\t\t\tif (!choice) continue;\n\n\t\t\t\tif (choice.finish_reason) {\n\t\t\t\t\toutput.stopReason = mapStopReason(choice.finish_reason);\n\t\t\t\t}\n\n\t\t\t\tif (choice.delta) {\n\t\t\t\t\tif (\n\t\t\t\t\t\tchoice.delta.content !== null &&\n\t\t\t\t\t\tchoice.delta.content !== undefined &&\n\t\t\t\t\t\tchoice.delta.content.length > 0\n\t\t\t\t\t) {\n\t\t\t\t\t\tif (!currentBlock || currentBlock.type !== \"text\") {\n\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\tcurrentBlock = { type: \"text\", text: \"\" };\n\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\tstream.push({ type: \"text_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif (currentBlock.type === \"text\") {\n\t\t\t\t\t\t\tcurrentBlock.text += choice.delta.content;\n\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\ttype: \"text_delta\",\n\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\tdelta: choice.delta.content,\n\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t// Some endpoints return reasoning in reasoning_content (llama.cpp),\n\t\t\t\t\t// or reasoning (other openai compatible endpoints)\n\t\t\t\t\t// Use the first non-empty reasoning field to avoid duplication\n\t\t\t\t\t// (e.g., chutes.ai returns both reasoning_content and reasoning with same content)\n\t\t\t\t\tconst reasoningFields = [\"reasoning_content\", \"reasoning\", \"reasoning_text\"];\n\t\t\t\t\tlet foundReasoningField: string | null = null;\n\t\t\t\t\tfor (const field of reasoningFields) {\n\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t(choice.delta as any)[field] !== null &&\n\t\t\t\t\t\t\t(choice.delta as any)[field] !== undefined &&\n\t\t\t\t\t\t\t(choice.delta as any)[field].length > 0\n\t\t\t\t\t\t) {\n\t\t\t\t\t\t\tif (!foundReasoningField) {\n\t\t\t\t\t\t\t\tfoundReasoningField = field;\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif (foundReasoningField) {\n\t\t\t\t\t\tif (!currentBlock || currentBlock.type !== \"thinking\") {\n\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\tcurrentBlock = {\n\t\t\t\t\t\t\t\ttype: \"thinking\",\n\t\t\t\t\t\t\t\tthinking: \"\",\n\t\t\t\t\t\t\t\tthinkingSignature: foundReasoningField,\n\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\tstream.push({ type: \"thinking_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif (currentBlock.type === \"thinking\") {\n\t\t\t\t\t\t\tconst delta = (choice.delta as any)[foundReasoningField];\n\t\t\t\t\t\t\tcurrentBlock.thinking += delta;\n\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\ttype: \"thinking_delta\",\n\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\tdelta,\n\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif (choice?.delta?.tool_calls) {\n\t\t\t\t\t\tfor (const toolCall of choice.delta.tool_calls) {\n\t\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t\t!currentBlock ||\n\t\t\t\t\t\t\t\tcurrentBlock.type !== \"toolCall\" ||\n\t\t\t\t\t\t\t\t(toolCall.id && currentBlock.id !== toolCall.id)\n\t\t\t\t\t\t\t) {\n\t\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\t\tcurrentBlock = {\n\t\t\t\t\t\t\t\t\ttype: \"toolCall\",\n\t\t\t\t\t\t\t\t\tid: toolCall.id || \"\",\n\t\t\t\t\t\t\t\t\tname: toolCall.function?.name || \"\",\n\t\t\t\t\t\t\t\t\targuments: {},\n\t\t\t\t\t\t\t\t\tpartialArgs: \"\",\n\t\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\t\tstream.push({ type: \"toolcall_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif (currentBlock.type === \"toolCall\") {\n\t\t\t\t\t\t\t\tif (toolCall.id) currentBlock.id = toolCall.id;\n\t\t\t\t\t\t\t\tif (toolCall.function?.name) currentBlock.name = toolCall.function.name;\n\t\t\t\t\t\t\t\tlet delta = \"\";\n\t\t\t\t\t\t\t\tif (toolCall.function?.arguments) {\n\t\t\t\t\t\t\t\t\tdelta = toolCall.function.arguments;\n\t\t\t\t\t\t\t\t\tcurrentBlock.partialArgs += toolCall.function.arguments;\n\t\t\t\t\t\t\t\t\tcurrentBlock.arguments = parseStreamingJson(currentBlock.partialArgs);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\t\ttype: \"toolcall_delta\",\n\t\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\t\tdelta,\n\t\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tconst reasoningDetails = (choice.delta as any).reasoning_details;\n\t\t\t\t\tif (reasoningDetails && Array.isArray(reasoningDetails)) {\n\t\t\t\t\t\tfor (const detail of reasoningDetails) {\n\t\t\t\t\t\t\tif (detail.type === \"reasoning.encrypted\" && detail.id && detail.data) {\n\t\t\t\t\t\t\t\tconst matchingToolCall = output.content.find(\n\t\t\t\t\t\t\t\t\t(b) => b.type === \"toolCall\" && b.id === detail.id,\n\t\t\t\t\t\t\t\t) as ToolCall | undefined;\n\t\t\t\t\t\t\t\tif (matchingToolCall) {\n\t\t\t\t\t\t\t\t\tmatchingToolCall.thoughtSignature = JSON.stringify(detail);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfinishCurrentBlock(currentBlock);\n\n\t\t\tif (options?.signal?.aborted) {\n\t\t\t\tthrow new Error(\"Request was aborted\");\n\t\t\t}\n\n\t\t\tif (output.stopReason === \"aborted\" || output.stopReason === \"error\") {\n\t\t\t\tthrow new Error(\"An unknown error occurred\");\n\t\t\t}\n\n\t\t\tstream.push({ type: \"done\", reason: output.stopReason, message: output });\n\t\t\tstream.end();\n\t\t} catch (error) {\n\t\t\tfor (const block of output.content) delete (block as any).index;\n\t\t\toutput.stopReason = options?.signal?.aborted ? \"aborted\" : \"error\";\n\t\t\toutput.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);\n\t\t\t// Some providers via OpenRouter give additional information in this field.\n\t\t\tconst rawMetadata = (error as any)?.error?.metadata?.raw;\n\t\t\tif (rawMetadata) output.errorMessage += `\\n${rawMetadata}`;\n\t\t\tstream.push({ type: \"error\", reason: output.stopReason, error: output });\n\t\t\tstream.end();\n\t\t}\n\t})();\n\n\treturn stream;\n};\n\nexport const streamSimpleOpenAICompletions: StreamFunction<\"openai-completions\", SimpleStreamOptions> = (\n\tmodel: Model<\"openai-completions\">,\n\tcontext: Context,\n\toptions?: SimpleStreamOptions,\n): AssistantMessageEventStream => {\n\tconst apiKey = options?.apiKey || getEnvApiKey(model.provider);\n\tif (!apiKey) {\n\t\tthrow new Error(`No API key for provider: ${model.provider}`);\n\t}\n\n\tconst base = buildBaseOptions(model, options, apiKey);\n\tconst reasoningEffort = supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning);\n\tconst toolChoice = (options as OpenAICompletionsOptions | undefined)?.toolChoice;\n\n\treturn streamOpenAICompletions(model, context, {\n\t\t...base,\n\t\treasoningEffort,\n\t\ttoolChoice,\n\t} satisfies OpenAICompletionsOptions);\n};\n\nfunction createClient(\n\tmodel: Model<\"openai-completions\">,\n\tcontext: Context,\n\tapiKey?: string,\n\toptionsHeaders?: Record<string, string>,\n) {\n\tif (!apiKey) {\n\t\tif (!process.env.OPENAI_API_KEY) {\n\t\t\tthrow new Error(\n\t\t\t\t\"OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.\",\n\t\t\t);\n\t\t}\n\t\tapiKey = process.env.OPENAI_API_KEY;\n\t}\n\n\tconst headers = { ...model.headers };\n\tif (model.provider === \"github-copilot\") {\n\t\t// Copilot expects X-Initiator to indicate whether the request is user-initiated\n\t\t// or agent-initiated (e.g. follow-up after assistant/tool messages). If there is\n\t\t// no prior message, default to user-initiated.\n\t\tconst messages = context.messages || [];\n\t\tconst lastMessage = messages[messages.length - 1];\n\t\tconst isAgentCall = lastMessage ? lastMessage.role !== \"user\" : false;\n\t\theaders[\"X-Initiator\"] = isAgentCall ? \"agent\" : \"user\";\n\t\theaders[\"Openai-Intent\"] = \"conversation-edits\";\n\n\t\t// Copilot requires this header when sending images\n\t\tconst hasImages = messages.some((msg) => {\n\t\t\tif (msg.role === \"user\" && Array.isArray(msg.content)) {\n\t\t\t\treturn msg.content.some((c) => c.type === \"image\");\n\t\t\t}\n\t\t\tif (msg.role === \"toolResult\" && Array.isArray(msg.content)) {\n\t\t\t\treturn msg.content.some((c) => c.type === \"image\");\n\t\t\t}\n\t\t\treturn false;\n\t\t});\n\t\tif (hasImages) {\n\t\t\theaders[\"Copilot-Vision-Request\"] = \"true\";\n\t\t}\n\t}\n\n\t// Merge options headers last so they can override defaults\n\tif (optionsHeaders) {\n\t\tObject.assign(headers, optionsHeaders);\n\t}\n\n\treturn new OpenAI({\n\t\tapiKey,\n\t\tbaseURL: model.baseUrl,\n\t\tdangerouslyAllowBrowser: true,\n\t\tdefaultHeaders: headers,\n\t});\n}\n\nfunction buildParams(model: Model<\"openai-completions\">, context: Context, options?: OpenAICompletionsOptions) {\n\tconst compat = getCompat(model);\n\tconst messages = convertMessages(model, context, compat);\n\tmaybeAddOpenRouterAnthropicCacheControl(model, messages);\n\n\tconst params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {\n\t\tmodel: model.id,\n\t\tmessages,\n\t\tstream: true,\n\t};\n\n\tif (compat.supportsUsageInStreaming !== false) {\n\t\t(params as any).stream_options = { include_usage: true };\n\t}\n\n\tif (compat.supportsStore) {\n\t\tparams.store = false;\n\t}\n\n\tif (options?.maxTokens) {\n\t\tif (compat.maxTokensField === \"max_tokens\") {\n\t\t\t(params as any).max_tokens = options.maxTokens;\n\t\t} else {\n\t\t\tparams.max_completion_tokens = options.maxTokens;\n\t\t}\n\t}\n\n\tif (options?.temperature !== undefined) {\n\t\tparams.temperature = options.temperature;\n\t}\n\n\tif (context.tools) {\n\t\tparams.tools = convertTools(context.tools);\n\t} else if (hasToolHistory(context.messages)) {\n\t\t// Anthropic (via LiteLLM/proxy) requires tools param when conversation has tool_calls/tool_results\n\t\tparams.tools = [];\n\t}\n\n\tif (options?.toolChoice) {\n\t\tparams.tool_choice = options.toolChoice;\n\t}\n\n\tif (compat.thinkingFormat === \"zai\" && model.reasoning) {\n\t\t// Z.ai uses binary thinking: { type: \"enabled\" | \"disabled\" }\n\t\t// Must explicitly disable since z.ai defaults to thinking enabled\n\t\t(params as any).thinking = { type: options?.reasoningEffort ? \"enabled\" : \"disabled\" };\n\t} else if (options?.reasoningEffort && model.reasoning && compat.supportsReasoningEffort) {\n\t\t// OpenAI-style reasoning_effort\n\t\tparams.reasoning_effort = options.reasoningEffort;\n\t}\n\n\t// OpenRouter provider routing preferences\n\tif (model.baseUrl.includes(\"openrouter.ai\") && model.compat?.openRouterRouting) {\n\t\t(params as any).provider = model.compat.openRouterRouting;\n\t}\n\n\t// Vercel AI Gateway provider routing preferences\n\tif (model.baseUrl.includes(\"ai-gateway.vercel.sh\") && model.compat?.vercelGatewayRouting) {\n\t\tconst routing = model.compat.vercelGatewayRouting;\n\t\tif (routing.only || routing.order) {\n\t\t\tconst gatewayOptions: Record<string, string[]> = {};\n\t\t\tif (routing.only) gatewayOptions.only = routing.only;\n\t\t\tif (routing.order) gatewayOptions.order = routing.order;\n\t\t\t(params as any).providerOptions = { gateway: gatewayOptions };\n\t\t}\n\t}\n\n\treturn params;\n}\n\nfunction maybeAddOpenRouterAnthropicCacheControl(\n\tmodel: Model<\"openai-completions\">,\n\tmessages: ChatCompletionMessageParam[],\n): void {\n\tif (model.provider !== \"openrouter\" || !model.id.startsWith(\"anthropic/\")) return;\n\n\t// Anthropic-style caching requires cache_control on a text part. Add a breakpoint\n\t// on the last user/assistant message (walking backwards until we find text content).\n\tfor (let i = messages.length - 1; i >= 0; i--) {\n\t\tconst msg = messages[i];\n\t\tif (msg.role !== \"user\" && msg.role !== \"assistant\") continue;\n\n\t\tconst content = msg.content;\n\t\tif (typeof content === \"string\") {\n\t\t\tmsg.content = [\n\t\t\t\tObject.assign({ type: \"text\" as const, text: content }, { cache_control: { type: \"ephemeral\" } }),\n\t\t\t];\n\t\t\treturn;\n\t\t}\n\n\t\tif (!Array.isArray(content)) continue;\n\n\t\t// Find last text part and add cache_control\n\t\tfor (let j = content.length - 1; j >= 0; j--) {\n\t\t\tconst part = content[j];\n\t\t\tif (part?.type === \"text\") {\n\t\t\t\tObject.assign(part, { cache_control: { type: \"ephemeral\" } });\n\t\t\t\treturn;\n\t\t\t}\n\t\t}\n\t}\n}\n\nexport function convertMessages(\n\tmodel: Model<\"openai-completions\">,\n\tcontext: Context,\n\tcompat: Required<OpenAICompletionsCompat>,\n): ChatCompletionMessageParam[] {\n\tconst params: ChatCompletionMessageParam[] = [];\n\n\tconst normalizeToolCallId = (id: string): string => {\n\t\tif (compat.requiresMistralToolIds) return normalizeMistralToolId(id);\n\n\t\t// Handle pipe-separated IDs from OpenAI Responses API\n\t\t// Format: {call_id}|{id} where {id} can be 400+ chars with special chars (+, /, =)\n\t\t// These come from providers like github-copilot, openai-codex, opencode\n\t\t// Extract just the call_id part and normalize it\n\t\tif (id.includes(\"|\")) {\n\t\t\tconst [callId] = id.split(\"|\");\n\t\t\t// Sanitize to allowed chars and truncate to 40 chars (OpenAI limit)\n\t\t\treturn callId.replace(/[^a-zA-Z0-9_-]/g, \"_\").slice(0, 40);\n\t\t}\n\n\t\tif (model.provider === \"openai\") return id.length > 40 ? id.slice(0, 40) : id;\n\t\t// Copilot Claude models route to Claude backend which requires Anthropic ID format\n\t\tif (model.provider === \"github-copilot\" && model.id.toLowerCase().includes(\"claude\")) {\n\t\t\treturn id.replace(/[^a-zA-Z0-9_-]/g, \"_\").slice(0, 64);\n\t\t}\n\t\treturn id;\n\t};\n\n\tconst transformedMessages = transformMessages(context.messages, model, (id) => normalizeToolCallId(id));\n\n\tif (context.systemPrompt) {\n\t\tconst useDeveloperRole = model.reasoning && compat.supportsDeveloperRole;\n\t\tconst role = useDeveloperRole ? \"developer\" : \"system\";\n\t\tparams.push({ role: role, content: sanitizeSurrogates(context.systemPrompt) });\n\t}\n\n\tlet lastRole: string | null = null;\n\n\tfor (let i = 0; i < transformedMessages.length; i++) {\n\t\tconst msg = transformedMessages[i];\n\t\t// Some providers (e.g. Mistral/Devstral) don't allow user messages directly after tool results\n\t\t// Insert a synthetic assistant message to bridge the gap\n\t\tif (compat.requiresAssistantAfterToolResult && lastRole === \"toolResult\" && msg.role === \"user\") {\n\t\t\tparams.push({\n\t\t\t\trole: \"assistant\",\n\t\t\t\tcontent: \"I have processed the tool results.\",\n\t\t\t});\n\t\t}\n\n\t\tif (msg.role === \"user\") {\n\t\t\tif (typeof msg.content === \"string\") {\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: sanitizeSurrogates(msg.content),\n\t\t\t\t});\n\t\t\t} else {\n\t\t\t\tconst content: ChatCompletionContentPart[] = msg.content.map((item): ChatCompletionContentPart => {\n\t\t\t\t\tif (item.type === \"text\") {\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\ttype: \"text\",\n\t\t\t\t\t\t\ttext: sanitizeSurrogates(item.text),\n\t\t\t\t\t\t} satisfies ChatCompletionContentPartText;\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\ttype: \"image_url\",\n\t\t\t\t\t\t\timage_url: {\n\t\t\t\t\t\t\t\turl: `data:${item.mimeType};base64,${item.data}`,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t} satisfies ChatCompletionContentPartImage;\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t\tconst filteredContent = !model.input.includes(\"image\")\n\t\t\t\t\t? content.filter((c) => c.type !== \"image_url\")\n\t\t\t\t\t: content;\n\t\t\t\tif (filteredContent.length === 0) continue;\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: filteredContent,\n\t\t\t\t});\n\t\t\t}\n\t\t} else if (msg.role === \"assistant\") {\n\t\t\t// Some providers (e.g. Mistral) don't accept null content, use empty string instead\n\t\t\tconst assistantMsg: ChatCompletionAssistantMessageParam = {\n\t\t\t\trole: \"assistant\",\n\t\t\t\tcontent: compat.requiresAssistantAfterToolResult ? \"\" : null,\n\t\t\t};\n\n\t\t\tconst textBlocks = msg.content.filter((b) => b.type === \"text\") as TextContent[];\n\t\t\t// Filter out empty text blocks to avoid API validation errors\n\t\t\tconst nonEmptyTextBlocks = textBlocks.filter((b) => b.text && b.text.trim().length > 0);\n\t\t\tif (nonEmptyTextBlocks.length > 0) {\n\t\t\t\t// GitHub Copilot requires assistant content as a string, not an array.\n\t\t\t\t// Sending as array causes Claude models to re-answer all previous prompts.\n\t\t\t\tif (model.provider === \"github-copilot\") {\n\t\t\t\t\tassistantMsg.content = nonEmptyTextBlocks.map((b) => sanitizeSurrogates(b.text)).join(\"\");\n\t\t\t\t} else {\n\t\t\t\t\tassistantMsg.content = nonEmptyTextBlocks.map((b) => {\n\t\t\t\t\t\treturn { type: \"text\", text: sanitizeSurrogates(b.text) };\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Handle thinking blocks\n\t\t\tconst thinkingBlocks = msg.content.filter((b) => b.type === \"thinking\") as ThinkingContent[];\n\t\t\t// Filter out empty thinking blocks to avoid API validation errors\n\t\t\tconst nonEmptyThinkingBlocks = thinkingBlocks.filter((b) => b.thinking && b.thinking.trim().length > 0);\n\t\t\tif (nonEmptyThinkingBlocks.length > 0) {\n\t\t\t\tif (compat.requiresThinkingAsText) {\n\t\t\t\t\t// Convert thinking blocks to plain text (no tags to avoid model mimicking them)\n\t\t\t\t\tconst thinkingText = nonEmptyThinkingBlocks.map((b) => b.thinking).join(\"\\n\\n\");\n\t\t\t\t\tconst textContent = assistantMsg.content as Array<{ type: \"text\"; text: string }> | null;\n\t\t\t\t\tif (textContent) {\n\t\t\t\t\t\ttextContent.unshift({ type: \"text\", text: thinkingText });\n\t\t\t\t\t} else {\n\t\t\t\t\t\tassistantMsg.content = [{ type: \"text\", text: thinkingText }];\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// Use the signature from the first thinking block if available (for llama.cpp server + gpt-oss)\n\t\t\t\t\tconst signature = nonEmptyThinkingBlocks[0].thinkingSignature;\n\t\t\t\t\tif (signature && signature.length > 0) {\n\t\t\t\t\t\t(assistantMsg as any)[signature] = nonEmptyThinkingBlocks.map((b) => b.thinking).join(\"\\n\");\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tconst toolCalls = msg.content.filter((b) => b.type === \"toolCall\") as ToolCall[];\n\t\t\tif (toolCalls.length > 0) {\n\t\t\t\tassistantMsg.tool_calls = toolCalls.map((tc) => ({\n\t\t\t\t\tid: tc.id,\n\t\t\t\t\ttype: \"function\" as const,\n\t\t\t\t\tfunction: {\n\t\t\t\t\t\tname: tc.name,\n\t\t\t\t\t\targuments: JSON.stringify(tc.arguments),\n\t\t\t\t\t},\n\t\t\t\t}));\n\t\t\t\tconst reasoningDetails = toolCalls\n\t\t\t\t\t.filter((tc) => tc.thoughtSignature)\n\t\t\t\t\t.map((tc) => {\n\t\t\t\t\t\ttry {\n\t\t\t\t\t\t\treturn JSON.parse(tc.thoughtSignature!);\n\t\t\t\t\t\t} catch {\n\t\t\t\t\t\t\treturn null;\n\t\t\t\t\t\t}\n\t\t\t\t\t})\n\t\t\t\t\t.filter(Boolean);\n\t\t\t\tif (reasoningDetails.length > 0) {\n\t\t\t\t\t(assistantMsg as any).reasoning_details = reasoningDetails;\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Skip assistant messages that have no content and no tool calls.\n\t\t\t// Mistral explicitly requires \"either content or tool_calls, but not none\".\n\t\t\t// Other providers also don't accept empty assistant messages.\n\t\t\t// This handles aborted assistant responses that got no content.\n\t\t\tconst content = assistantMsg.content;\n\t\t\tconst hasContent =\n\t\t\t\tcontent !== null &&\n\t\t\t\tcontent !== undefined &&\n\t\t\t\t(typeof content === \"string\" ? content.length > 0 : content.length > 0);\n\t\t\tif (!hasContent && !assistantMsg.tool_calls) {\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tparams.push(assistantMsg);\n\t\t} else if (msg.role === \"toolResult\") {\n\t\t\tconst imageBlocks: Array<{ type: \"image_url\"; image_url: { url: string } }> = [];\n\t\t\tlet j = i;\n\n\t\t\tfor (; j < transformedMessages.length && transformedMessages[j].role === \"toolResult\"; j++) {\n\t\t\t\tconst toolMsg = transformedMessages[j] as ToolResultMessage;\n\n\t\t\t\t// Extract text and image content\n\t\t\t\tconst textResult = toolMsg.content\n\t\t\t\t\t.filter((c) => c.type === \"text\")\n\t\t\t\t\t.map((c) => (c as any).text)\n\t\t\t\t\t.join(\"\\n\");\n\t\t\t\tconst hasImages = toolMsg.content.some((c) => c.type === \"image\");\n\n\t\t\t\t// Always send tool result with text (or placeholder if only images)\n\t\t\t\tconst hasText = textResult.length > 0;\n\t\t\t\t// Some providers (e.g. Mistral) require the 'name' field in tool results\n\t\t\t\tconst toolResultMsg: ChatCompletionToolMessageParam = {\n\t\t\t\t\trole: \"tool\",\n\t\t\t\t\tcontent: sanitizeSurrogates(hasText ? textResult : \"(see attached image)\"),\n\t\t\t\t\ttool_call_id: toolMsg.toolCallId,\n\t\t\t\t};\n\t\t\t\tif (compat.requiresToolResultName && toolMsg.toolName) {\n\t\t\t\t\t(toolResultMsg as any).name = toolMsg.toolName;\n\t\t\t\t}\n\t\t\t\tparams.push(toolResultMsg);\n\n\t\t\t\tif (hasImages && model.input.includes(\"image\")) {\n\t\t\t\t\tfor (const block of toolMsg.content) {\n\t\t\t\t\t\tif (block.type === \"image\") {\n\t\t\t\t\t\t\timageBlocks.push({\n\t\t\t\t\t\t\t\ttype: \"image_url\",\n\t\t\t\t\t\t\t\timage_url: {\n\t\t\t\t\t\t\t\t\turl: `data:${(block as any).mimeType};base64,${(block as any).data}`,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ti = j - 1;\n\n\t\t\tif (imageBlocks.length > 0) {\n\t\t\t\tif (compat.requiresAssistantAfterToolResult) {\n\t\t\t\t\tparams.push({\n\t\t\t\t\t\trole: \"assistant\",\n\t\t\t\t\t\tcontent: \"I have processed the tool results.\",\n\t\t\t\t\t});\n\t\t\t\t}\n\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: [\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\ttype: \"text\",\n\t\t\t\t\t\t\ttext: \"Attached image(s) from tool result:\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t...imageBlocks,\n\t\t\t\t\t],\n\t\t\t\t});\n\t\t\t\tlastRole = \"user\";\n\t\t\t} else {\n\t\t\t\tlastRole = \"toolResult\";\n\t\t\t}\n\t\t\tcontinue;\n\t\t}\n\n\t\tlastRole = msg.role;\n\t}\n\n\treturn params;\n}\n\nfunction convertTools(tools: Tool[]): OpenAI.Chat.Completions.ChatCompletionTool[] {\n\treturn tools.map((tool) => ({\n\t\ttype: \"function\",\n\t\tfunction: {\n\t\t\tname: tool.name,\n\t\t\tdescription: tool.description,\n\t\t\tparameters: tool.parameters as any, // TypeBox already generates JSON Schema\n\t\t\tstrict: false, // Disable strict mode to allow optional parameters without null unions\n\t\t},\n\t}));\n}\n\nfunction mapStopReason(reason: ChatCompletionChunk.Choice[\"finish_reason\"]): StopReason {\n\tif (reason === null) return \"stop\";\n\tswitch (reason) {\n\t\tcase \"stop\":\n\t\t\treturn \"stop\";\n\t\tcase \"length\":\n\t\t\treturn \"length\";\n\t\tcase \"function_call\":\n\t\tcase \"tool_calls\":\n\t\t\treturn \"toolUse\";\n\t\tcase \"content_filter\":\n\t\t\treturn \"error\";\n\t\tdefault: {\n\t\t\tconst _exhaustive: never = reason;\n\t\t\tthrow new Error(`Unhandled stop reason: ${_exhaustive}`);\n\t\t}\n\t}\n}\n\n/**\n * Detect compatibility settings from provider and baseUrl for known providers.\n * Provider takes precedence over URL-based detection since it's explicitly configured.\n * Returns a fully resolved OpenAICompletionsCompat object with all fields set.\n */\nfunction detectCompat(model: Model<\"openai-completions\">): Required<OpenAICompletionsCompat> {\n\tconst provider = model.provider;\n\tconst baseUrl = model.baseUrl;\n\n\tconst isZai = provider === \"zai\" || baseUrl.includes(\"api.z.ai\");\n\n\tconst isNonStandard =\n\t\tprovider === \"cerebras\" ||\n\t\tbaseUrl.includes(\"cerebras.ai\") ||\n\t\tprovider === \"xai\" ||\n\t\tbaseUrl.includes(\"api.x.ai\") ||\n\t\tprovider === \"mistral\" ||\n\t\tbaseUrl.includes(\"mistral.ai\") ||\n\t\tbaseUrl.includes(\"chutes.ai\") ||\n\t\tbaseUrl.includes(\"deepseek.com\") ||\n\t\tisZai ||\n\t\tprovider === \"opencode\" ||\n\t\tbaseUrl.includes(\"opencode.ai\");\n\n\tconst useMaxTokens = provider === \"mistral\" || baseUrl.includes(\"mistral.ai\") || baseUrl.includes(\"chutes.ai\");\n\n\tconst isGrok = provider === \"xai\" || baseUrl.includes(\"api.x.ai\");\n\n\tconst isMistral = provider === \"mistral\" || baseUrl.includes(\"mistral.ai\");\n\n\treturn {\n\t\tsupportsStore: !isNonStandard,\n\t\tsupportsDeveloperRole: !isNonStandard,\n\t\tsupportsReasoningEffort: !isGrok && !isZai,\n\t\tsupportsUsageInStreaming: true,\n\t\tmaxTokensField: useMaxTokens ? \"max_tokens\" : \"max_completion_tokens\",\n\t\trequiresToolResultName: isMistral,\n\t\trequiresAssistantAfterToolResult: false, // Mistral no longer requires this as of Dec 2024\n\t\trequiresThinkingAsText: isMistral,\n\t\trequiresMistralToolIds: isMistral,\n\t\tthinkingFormat: isZai ? \"zai\" : \"openai\",\n\t\topenRouterRouting: {},\n\t\tvercelGatewayRouting: {},\n\t};\n}\n\n/**\n * Get resolved compatibility settings for a model.\n * Uses explicit model.compat if provided, otherwise auto-detects from provider/URL.\n */\nfunction getCompat(model: Model<\"openai-completions\">): Required<OpenAICompletionsCompat> {\n\tconst detected = detectCompat(model);\n\tif (!model.compat) return detected;\n\n\treturn {\n\t\tsupportsStore: model.compat.supportsStore ?? detected.supportsStore,\n\t\tsupportsDeveloperRole: model.compat.supportsDeveloperRole ?? detected.supportsDeveloperRole,\n\t\tsupportsReasoningEffort: model.compat.supportsReasoningEffort ?? detected.supportsReasoningEffort,\n\t\tsupportsUsageInStreaming: model.compat.supportsUsageInStreaming ?? detected.supportsUsageInStreaming,\n\t\tmaxTokensField: model.compat.maxTokensField ?? detected.maxTokensField,\n\t\trequiresToolResultName: model.compat.requiresToolResultName ?? detected.requiresToolResultName,\n\t\trequiresAssistantAfterToolResult:\n\t\t\tmodel.compat.requiresAssistantAfterToolResult ?? detected.requiresAssistantAfterToolResult,\n\t\trequiresThinkingAsText: model.compat.requiresThinkingAsText ?? detected.requiresThinkingAsText,\n\t\trequiresMistralToolIds: model.compat.requiresMistralToolIds ?? detected.requiresMistralToolIds,\n\t\tthinkingFormat: model.compat.thinkingFormat ?? detected.thinkingFormat,\n\t\topenRouterRouting: model.compat.openRouterRouting ?? {},\n\t\tvercelGatewayRouting: model.compat.vercelGatewayRouting ?? detected.vercelGatewayRouting,\n\t};\n}\n"]}
1
+ {"version":3,"file":"openai-completions.js","sourceRoot":"","sources":["../../src/providers/openai-completions.ts"],"names":[],"mappings":"AAAA,OAAO,MAAM,MAAM,QAAQ,CAAC;AAU5B,OAAO,EAAE,YAAY,EAAE,MAAM,oBAAoB,CAAC;AAClD,OAAO,EAAE,aAAa,EAAE,aAAa,EAAE,MAAM,cAAc,CAAC;AAiB5D,OAAO,EAAE,2BAA2B,EAAE,MAAM,0BAA0B,CAAC;AACvE,OAAO,EAAE,kBAAkB,EAAE,MAAM,wBAAwB,CAAC;AAC5D,OAAO,EAAE,kBAAkB,EAAE,MAAM,8BAA8B,CAAC;AAClE,OAAO,EAAE,gBAAgB,EAAE,cAAc,EAAE,MAAM,qBAAqB,CAAC;AACvE,OAAO,EAAE,iBAAiB,EAAE,MAAM,yBAAyB,CAAC;AAE5D;;;GAGG;AACH,SAAS,sBAAsB,CAAC,EAAU,EAAU;IACnD,qCAAqC;IACrC,IAAI,UAAU,GAAG,EAAE,CAAC,OAAO,CAAC,eAAe,EAAE,EAAE,CAAC,CAAC;IACjD,wCAAwC;IACxC,IAAI,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;QAC3B,4EAA4E;QAC5E,MAAM,OAAO,GAAG,WAAW,CAAC;QAC5B,UAAU,GAAG,UAAU,GAAG,OAAO,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,GAAG,UAAU,CAAC,MAAM,CAAC,CAAC;IACnE,CAAC;SAAM,IAAI,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;QAClC,UAAU,GAAG,UAAU,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;IACrC,CAAC;IACD,OAAO,UAAU,CAAC;AAAA,CAClB;AAED;;;;GAIG;AACH,SAAS,cAAc,CAAC,QAAmB,EAAW;IACrD,KAAK,MAAM,GAAG,IAAI,QAAQ,EAAE,CAAC;QAC5B,IAAI,GAAG,CAAC,IAAI,KAAK,YAAY,EAAE,CAAC;YAC/B,OAAO,IAAI,CAAC;QACb,CAAC;QACD,IAAI,GAAG,CAAC,IAAI,KAAK,WAAW,EAAE,CAAC;YAC9B,IAAI,GAAG,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,KAAK,EAAE,EAAE,CAAC,KAAK,CAAC,IAAI,KAAK,UAAU,CAAC,EAAE,CAAC;gBAC5D,OAAO,IAAI,CAAC;YACb,CAAC;QACF,CAAC;IACF,CAAC;IACD,OAAO,KAAK,CAAC;AAAA,CACb;AAOD,MAAM,CAAC,MAAM,uBAAuB,GAAmE,CACtG,KAAkC,EAClC,OAAgB,EAChB,OAAkC,EACJ,EAAE,CAAC;IACjC,MAAM,MAAM,GAAG,IAAI,2BAA2B,EAAE,CAAC;IAEjD,CAAC,KAAK,IAAI,EAAE,CAAC;QACZ,MAAM,MAAM,GAAqB;YAChC,IAAI,EAAE,WAAW;YACjB,OAAO,EAAE,EAAE;YACX,GAAG,EAAE,KAAK,CAAC,GAAG;YACd,QAAQ,EAAE,KAAK,CAAC,QAAQ;YACxB,KAAK,EAAE,KAAK,CAAC,EAAE;YACf,KAAK,EAAE;gBACN,KAAK,EAAE,CAAC;gBACR,MAAM,EAAE,CAAC;gBACT,SAAS,EAAE,CAAC;gBACZ,UAAU,EAAE,CAAC;gBACb,WAAW,EAAE,CAAC;gBACd,IAAI,EAAE,EAAE,KAAK,EAAE,CAAC,EAAE,MAAM,EAAE,CAAC,EAAE,SAAS,EAAE,CAAC,EAAE,UAAU,EAAE,CAAC,EAAE,KAAK,EAAE,CAAC,EAAE;aACpE;YACD,UAAU,EAAE,MAAM;YAClB,SAAS,EAAE,IAAI,CAAC,GAAG,EAAE;SACrB,CAAC;QAEF,IAAI,CAAC;YACJ,MAAM,MAAM,GAAG,OAAO,EAAE,MAAM,IAAI,YAAY,CAAC,KAAK,CAAC,QAAQ,CAAC,IAAI,EAAE,CAAC;YACrE,MAAM,MAAM,GAAG,YAAY,CAAC,KAAK,EAAE,OAAO,EAAE,MAAM,EAAE,OAAO,EAAE,OAAO,CAAC,CAAC;YACtE,MAAM,MAAM,GAAG,WAAW,CAAC,KAAK,EAAE,OAAO,EAAE,OAAO,CAAC,CAAC;YACpD,OAAO,EAAE,SAAS,EAAE,CAAC,MAAM,CAAC,CAAC;YAC7B,MAAM,YAAY,GAAG,MAAM,MAAM,CAAC,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC,MAAM,EAAE,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;YAC/F,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,OAAO,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;YAEhD,IAAI,YAAY,GAAiF,IAAI,CAAC;YACtG,MAAM,MAAM,GAAG,MAAM,CAAC,OAAO,CAAC;YAC9B,MAAM,UAAU,GAAG,GAAG,EAAE,CAAC,MAAM,CAAC,MAAM,GAAG,CAAC,CAAC;YAC3C,MAAM,kBAAkB,GAAG,CAAC,KAA2B,EAAE,EAAE,CAAC;gBAC3D,IAAI,KAAK,EAAE,CAAC;oBACX,IAAI,KAAK,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;wBAC3B,MAAM,CAAC,IAAI,CAAC;4BACX,IAAI,EAAE,UAAU;4BAChB,YAAY,EAAE,UAAU,EAAE;4BAC1B,OAAO,EAAE,KAAK,CAAC,IAAI;4BACnB,OAAO,EAAE,MAAM;yBACf,CAAC,CAAC;oBACJ,CAAC;yBAAM,IAAI,KAAK,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;wBACtC,MAAM,CAAC,IAAI,CAAC;4BACX,IAAI,EAAE,cAAc;4BACpB,YAAY,EAAE,UAAU,EAAE;4BAC1B,OAAO,EAAE,KAAK,CAAC,QAAQ;4BACvB,OAAO,EAAE,MAAM;yBACf,CAAC,CAAC;oBACJ,CAAC;yBAAM,IAAI,KAAK,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;wBACtC,KAAK,CAAC,SAAS,GAAG,IAAI,CAAC,KAAK,CAAC,KAAK,CAAC,WAAW,IAAI,IAAI,CAAC,CAAC;wBACxD,OAAO,KAAK,CAAC,WAAW,CAAC;wBACzB,MAAM,CAAC,IAAI,CAAC;4BACX,IAAI,EAAE,cAAc;4BACpB,YAAY,EAAE,UAAU,EAAE;4BAC1B,QAAQ,EAAE,KAAK;4BACf,OAAO,EAAE,MAAM;yBACf,CAAC,CAAC;oBACJ,CAAC;gBACF,CAAC;YAAA,CACD,CAAC;YAEF,IAAI,KAAK,EAAE,MAAM,KAAK,IAAI,YAAY,EAAE,CAAC;gBACxC,IAAI,KAAK,CAAC,KAAK,EAAE,CAAC;oBACjB,MAAM,YAAY,GAAG,KAAK,CAAC,KAAK,CAAC,qBAAqB,EAAE,aAAa,IAAI,CAAC,CAAC;oBAC3E,MAAM,eAAe,GAAG,KAAK,CAAC,KAAK,CAAC,yBAAyB,EAAE,gBAAgB,IAAI,CAAC,CAAC;oBACrF,MAAM,KAAK,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC,aAAa,IAAI,CAAC,CAAC,GAAG,YAAY,CAAC;oBAC9D,MAAM,YAAY,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC,iBAAiB,IAAI,CAAC,CAAC,GAAG,eAAe,CAAC;oBAC5E,MAAM,CAAC,KAAK,GAAG;wBACd,sFAAsF;wBACtF,KAAK;wBACL,MAAM,EAAE,YAAY;wBACpB,SAAS,EAAE,YAAY;wBACvB,UAAU,EAAE,CAAC;wBACb,wEAAwE;wBACxE,qEAAqE;wBACrE,WAAW,EAAE,KAAK,GAAG,YAAY,GAAG,YAAY;wBAChD,IAAI,EAAE;4BACL,KAAK,EAAE,CAAC;4BACR,MAAM,EAAE,CAAC;4BACT,SAAS,EAAE,CAAC;4BACZ,UAAU,EAAE,CAAC;4BACb,KAAK,EAAE,CAAC;yBACR;qBACD,CAAC;oBACF,aAAa,CAAC,KAAK,EAAE,MAAM,CAAC,KAAK,CAAC,CAAC;gBACpC,CAAC;gBAED,MAAM,MAAM,GAAG,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC;gBAChC,IAAI,CAAC,MAAM;oBAAE,SAAS;gBAEtB,IAAI,MAAM,CAAC,aAAa,EAAE,CAAC;oBAC1B,MAAM,CAAC,UAAU,GAAG,aAAa,CAAC,MAAM,CAAC,aAAa,CAAC,CAAC;gBACzD,CAAC;gBAED,IAAI,MAAM,CAAC,KAAK,EAAE,CAAC;oBAClB,IACC,MAAM,CAAC,KAAK,CAAC,OAAO,KAAK,IAAI;wBAC7B,MAAM,CAAC,KAAK,CAAC,OAAO,KAAK,SAAS;wBAClC,MAAM,CAAC,KAAK,CAAC,OAAO,CAAC,MAAM,GAAG,CAAC,EAC9B,CAAC;wBACF,IAAI,CAAC,YAAY,IAAI,YAAY,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;4BACnD,kBAAkB,CAAC,YAAY,CAAC,CAAC;4BACjC,YAAY,GAAG,EAAE,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,EAAE,EAAE,CAAC;4BAC1C,MAAM,CAAC,OAAO,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;4BAClC,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,YAAY,EAAE,YAAY,EAAE,UAAU,EAAE,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;wBAClF,CAAC;wBAED,IAAI,YAAY,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;4BAClC,YAAY,CAAC,IAAI,IAAI,MAAM,CAAC,KAAK,CAAC,OAAO,CAAC;4BAC1C,MAAM,CAAC,IAAI,CAAC;gCACX,IAAI,EAAE,YAAY;gCAClB,YAAY,EAAE,UAAU,EAAE;gCAC1B,KAAK,EAAE,MAAM,CAAC,KAAK,CAAC,OAAO;gCAC3B,OAAO,EAAE,MAAM;6BACf,CAAC,CAAC;wBACJ,CAAC;oBACF,CAAC;oBAED,oEAAoE;oBACpE,mDAAmD;oBACnD,+DAA+D;oBAC/D,mFAAmF;oBACnF,MAAM,eAAe,GAAG,CAAC,mBAAmB,EAAE,WAAW,EAAE,gBAAgB,CAAC,CAAC;oBAC7E,IAAI,mBAAmB,GAAkB,IAAI,CAAC;oBAC9C,KAAK,MAAM,KAAK,IAAI,eAAe,EAAE,CAAC;wBACrC,IACE,MAAM,CAAC,KAAa,CAAC,KAAK,CAAC,KAAK,IAAI;4BACpC,MAAM,CAAC,KAAa,CAAC,KAAK,CAAC,KAAK,SAAS;4BACzC,MAAM,CAAC,KAAa,CAAC,KAAK,CAAC,CAAC,MAAM,GAAG,CAAC,EACtC,CAAC;4BACF,IAAI,CAAC,mBAAmB,EAAE,CAAC;gCAC1B,mBAAmB,GAAG,KAAK,CAAC;gCAC5B,MAAM;4BACP,CAAC;wBACF,CAAC;oBACF,CAAC;oBAED,IAAI,mBAAmB,EAAE,CAAC;wBACzB,IAAI,CAAC,YAAY,IAAI,YAAY,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;4BACvD,kBAAkB,CAAC,YAAY,CAAC,CAAC;4BACjC,YAAY,GAAG;gCACd,IAAI,EAAE,UAAU;gCAChB,QAAQ,EAAE,EAAE;gCACZ,iBAAiB,EAAE,mBAAmB;6BACtC,CAAC;4BACF,MAAM,CAAC,OAAO,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;4BAClC,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,gBAAgB,EAAE,YAAY,EAAE,UAAU,EAAE,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;wBACtF,CAAC;wBAED,IAAI,YAAY,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;4BACtC,MAAM,KAAK,GAAI,MAAM,CAAC,KAAa,CAAC,mBAAmB,CAAC,CAAC;4BACzD,YAAY,CAAC,QAAQ,IAAI,KAAK,CAAC;4BAC/B,MAAM,CAAC,IAAI,CAAC;gCACX,IAAI,EAAE,gBAAgB;gCACtB,YAAY,EAAE,UAAU,EAAE;gCAC1B,KAAK;gCACL,OAAO,EAAE,MAAM;6BACf,CAAC,CAAC;wBACJ,CAAC;oBACF,CAAC;oBAED,IAAI,MAAM,EAAE,KAAK,EAAE,UAAU,EAAE,CAAC;wBAC/B,KAAK,MAAM,QAAQ,IAAI,MAAM,CAAC,KAAK,CAAC,UAAU,EAAE,CAAC;4BAChD,IACC,CAAC,YAAY;gCACb,YAAY,CAAC,IAAI,KAAK,UAAU;gCAChC,CAAC,QAAQ,CAAC,EAAE,IAAI,YAAY,CAAC,EAAE,KAAK,QAAQ,CAAC,EAAE,CAAC,EAC/C,CAAC;gCACF,kBAAkB,CAAC,YAAY,CAAC,CAAC;gCACjC,YAAY,GAAG;oCACd,IAAI,EAAE,UAAU;oCAChB,EAAE,EAAE,QAAQ,CAAC,EAAE,IAAI,EAAE;oCACrB,IAAI,EAAE,QAAQ,CAAC,QAAQ,EAAE,IAAI,IAAI,EAAE;oCACnC,SAAS,EAAE,EAAE;oCACb,WAAW,EAAE,EAAE;iCACf,CAAC;gCACF,MAAM,CAAC,OAAO,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;gCAClC,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,gBAAgB,EAAE,YAAY,EAAE,UAAU,EAAE,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;4BACtF,CAAC;4BAED,IAAI,YAAY,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;gCACtC,IAAI,QAAQ,CAAC,EAAE;oCAAE,YAAY,CAAC,EAAE,GAAG,QAAQ,CAAC,EAAE,CAAC;gCAC/C,IAAI,QAAQ,CAAC,QAAQ,EAAE,IAAI;oCAAE,YAAY,CAAC,IAAI,GAAG,QAAQ,CAAC,QAAQ,CAAC,IAAI,CAAC;gCACxE,IAAI,KAAK,GAAG,EAAE,CAAC;gCACf,IAAI,QAAQ,CAAC,QAAQ,EAAE,SAAS,EAAE,CAAC;oCAClC,KAAK,GAAG,QAAQ,CAAC,QAAQ,CAAC,SAAS,CAAC;oCACpC,YAAY,CAAC,WAAW,IAAI,QAAQ,CAAC,QAAQ,CAAC,SAAS,CAAC;oCACxD,YAAY,CAAC,SAAS,GAAG,kBAAkB,CAAC,YAAY,CAAC,WAAW,CAAC,CAAC;gCACvE,CAAC;gCACD,MAAM,CAAC,IAAI,CAAC;oCACX,IAAI,EAAE,gBAAgB;oCACtB,YAAY,EAAE,UAAU,EAAE;oCAC1B,KAAK;oCACL,OAAO,EAAE,MAAM;iCACf,CAAC,CAAC;4BACJ,CAAC;wBACF,CAAC;oBACF,CAAC;oBAED,MAAM,gBAAgB,GAAI,MAAM,CAAC,KAAa,CAAC,iBAAiB,CAAC;oBACjE,IAAI,gBAAgB,IAAI,KAAK,CAAC,OAAO,CAAC,gBAAgB,CAAC,EAAE,CAAC;wBACzD,KAAK,MAAM,MAAM,IAAI,gBAAgB,EAAE,CAAC;4BACvC,IAAI,MAAM,CAAC,IAAI,KAAK,qBAAqB,IAAI,MAAM,CAAC,EAAE,IAAI,MAAM,CAAC,IAAI,EAAE,CAAC;gCACvE,MAAM,gBAAgB,GAAG,MAAM,CAAC,OAAO,CAAC,IAAI,CAC3C,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,UAAU,IAAI,CAAC,CAAC,EAAE,KAAK,MAAM,CAAC,EAAE,CAC1B,CAAC;gCAC1B,IAAI,gBAAgB,EAAE,CAAC;oCACtB,gBAAgB,CAAC,gBAAgB,GAAG,IAAI,CAAC,SAAS,CAAC,MAAM,CAAC,CAAC;gCAC5D,CAAC;4BACF,CAAC;wBACF,CAAC;oBACF,CAAC;gBACF,CAAC;YACF,CAAC;YAED,kBAAkB,CAAC,YAAY,CAAC,CAAC;YAEjC,IAAI,OAAO,EAAE,MAAM,EAAE,OAAO,EAAE,CAAC;gBAC9B,MAAM,IAAI,KAAK,CAAC,qBAAqB,CAAC,CAAC;YACxC,CAAC;YAED,IAAI,MAAM,CAAC,UAAU,KAAK,SAAS,IAAI,MAAM,CAAC,UAAU,KAAK,OAAO,EAAE,CAAC;gBACtE,MAAM,IAAI,KAAK,CAAC,2BAA2B,CAAC,CAAC;YAC9C,CAAC;YAED,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,CAAC,UAAU,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;YAC1E,MAAM,CAAC,GAAG,EAAE,CAAC;QACd,CAAC;QAAC,OAAO,KAAK,EAAE,CAAC;YAChB,KAAK,MAAM,KAAK,IAAI,MAAM,CAAC,OAAO;gBAAE,OAAQ,KAAa,CAAC,KAAK,CAAC;YAChE,MAAM,CAAC,UAAU,GAAG,OAAO,EAAE,MAAM,EAAE,OAAO,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,OAAO,CAAC;YACnE,MAAM,CAAC,YAAY,GAAG,KAAK,YAAY,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC,SAAS,CAAC,KAAK,CAAC,CAAC;YACrF,2EAA2E;YAC3E,MAAM,WAAW,GAAI,KAAa,EAAE,KAAK,EAAE,QAAQ,EAAE,GAAG,CAAC;YACzD,IAAI,WAAW;gBAAE,MAAM,CAAC,YAAY,IAAI,KAAK,WAAW,EAAE,CAAC;YAC3D,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,OAAO,EAAE,MAAM,EAAE,MAAM,CAAC,UAAU,EAAE,KAAK,EAAE,MAAM,EAAE,CAAC,CAAC;YACzE,MAAM,CAAC,GAAG,EAAE,CAAC;QACd,CAAC;IAAA,CACD,CAAC,EAAE,CAAC;IAEL,OAAO,MAAM,CAAC;AAAA,CACd,CAAC;AAEF,MAAM,CAAC,MAAM,6BAA6B,GAA8D,CACvG,KAAkC,EAClC,OAAgB,EAChB,OAA6B,EACC,EAAE,CAAC;IACjC,MAAM,MAAM,GAAG,OAAO,EAAE,MAAM,IAAI,YAAY,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC;IAC/D,IAAI,CAAC,MAAM,EAAE,CAAC;QACb,MAAM,IAAI,KAAK,CAAC,4BAA4B,KAAK,CAAC,QAAQ,EAAE,CAAC,CAAC;IAC/D,CAAC;IAED,MAAM,IAAI,GAAG,gBAAgB,CAAC,KAAK,EAAE,OAAO,EAAE,MAAM,CAAC,CAAC;IACtD,MAAM,eAAe,GAAG,aAAa,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,OAAO,EAAE,SAAS,CAAC,CAAC,CAAC,cAAc,CAAC,OAAO,EAAE,SAAS,CAAC,CAAC;IACvG,MAAM,UAAU,GAAI,OAAgD,EAAE,UAAU,CAAC;IAEjF,OAAO,uBAAuB,CAAC,KAAK,EAAE,OAAO,EAAE;QAC9C,GAAG,IAAI;QACP,eAAe;QACf,UAAU;KACyB,CAAC,CAAC;AAAA,CACtC,CAAC;AAEF,SAAS,YAAY,CACpB,KAAkC,EAClC,OAAgB,EAChB,MAAe,EACf,cAAuC,EACtC;IACD,IAAI,CAAC,MAAM,EAAE,CAAC;QACb,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,cAAc,EAAE,CAAC;YACjC,MAAM,IAAI,KAAK,CACd,gGAAgG,CAChG,CAAC;QACH,CAAC;QACD,MAAM,GAAG,OAAO,CAAC,GAAG,CAAC,cAAc,CAAC;IACrC,CAAC;IAED,MAAM,OAAO,GAAG,EAAE,GAAG,KAAK,CAAC,OAAO,EAAE,CAAC;IACrC,IAAI,KAAK,CAAC,QAAQ,KAAK,gBAAgB,EAAE,CAAC;QACzC,gFAAgF;QAChF,iFAAiF;QACjF,+CAA+C;QAC/C,MAAM,QAAQ,GAAG,OAAO,CAAC,QAAQ,IAAI,EAAE,CAAC;QACxC,MAAM,WAAW,GAAG,QAAQ,CAAC,QAAQ,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC;QAClD,MAAM,WAAW,GAAG,WAAW,CAAC,CAAC,CAAC,WAAW,CAAC,IAAI,KAAK,MAAM,CAAC,CAAC,CAAC,KAAK,CAAC;QACtE,OAAO,CAAC,aAAa,CAAC,GAAG,WAAW,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,MAAM,CAAC;QACxD,OAAO,CAAC,eAAe,CAAC,GAAG,oBAAoB,CAAC;QAEhD,mDAAmD;QACnD,MAAM,SAAS,GAAG,QAAQ,CAAC,IAAI,CAAC,CAAC,GAAG,EAAE,EAAE,CAAC;YACxC,IAAI,GAAG,CAAC,IAAI,KAAK,MAAM,IAAI,KAAK,CAAC,OAAO,CAAC,GAAG,CAAC,OAAO,CAAC,EAAE,CAAC;gBACvD,OAAO,GAAG,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,OAAO,CAAC,CAAC;YACpD,CAAC;YACD,IAAI,GAAG,CAAC,IAAI,KAAK,YAAY,IAAI,KAAK,CAAC,OAAO,CAAC,GAAG,CAAC,OAAO,CAAC,EAAE,CAAC;gBAC7D,OAAO,GAAG,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,OAAO,CAAC,CAAC;YACpD,CAAC;YACD,OAAO,KAAK,CAAC;QAAA,CACb,CAAC,CAAC;QACH,IAAI,SAAS,EAAE,CAAC;YACf,OAAO,CAAC,wBAAwB,CAAC,GAAG,MAAM,CAAC;QAC5C,CAAC;IACF,CAAC;IAED,2DAA2D;IAC3D,IAAI,cAAc,EAAE,CAAC;QACpB,MAAM,CAAC,MAAM,CAAC,OAAO,EAAE,cAAc,CAAC,CAAC;IACxC,CAAC;IAED,OAAO,IAAI,MAAM,CAAC;QACjB,MAAM;QACN,OAAO,EAAE,KAAK,CAAC,OAAO;QACtB,uBAAuB,EAAE,IAAI;QAC7B,cAAc,EAAE,OAAO;KACvB,CAAC,CAAC;AAAA,CACH;AAED,SAAS,WAAW,CAAC,KAAkC,EAAE,OAAgB,EAAE,OAAkC,EAAE;IAC9G,MAAM,MAAM,GAAG,SAAS,CAAC,KAAK,CAAC,CAAC;IAChC,MAAM,QAAQ,GAAG,eAAe,CAAC,KAAK,EAAE,OAAO,EAAE,MAAM,CAAC,CAAC;IACzD,uCAAuC,CAAC,KAAK,EAAE,QAAQ,CAAC,CAAC;IAEzD,MAAM,MAAM,GAAgE;QAC3E,KAAK,EAAE,KAAK,CAAC,EAAE;QACf,QAAQ;QACR,MAAM,EAAE,IAAI;KACZ,CAAC;IAEF,IAAI,MAAM,CAAC,wBAAwB,KAAK,KAAK,EAAE,CAAC;QAC9C,MAAc,CAAC,cAAc,GAAG,EAAE,aAAa,EAAE,IAAI,EAAE,CAAC;IAC1D,CAAC;IAED,IAAI,MAAM,CAAC,aAAa,EAAE,CAAC;QAC1B,MAAM,CAAC,KAAK,GAAG,KAAK,CAAC;IACtB,CAAC;IAED,IAAI,OAAO,EAAE,SAAS,EAAE,CAAC;QACxB,IAAI,MAAM,CAAC,cAAc,KAAK,YAAY,EAAE,CAAC;YAC3C,MAAc,CAAC,UAAU,GAAG,OAAO,CAAC,SAAS,CAAC;QAChD,CAAC;aAAM,CAAC;YACP,MAAM,CAAC,qBAAqB,GAAG,OAAO,CAAC,SAAS,CAAC;QAClD,CAAC;IACF,CAAC;IAED,IAAI,OAAO,EAAE,WAAW,KAAK,SAAS,EAAE,CAAC;QACxC,MAAM,CAAC,WAAW,GAAG,OAAO,CAAC,WAAW,CAAC;IAC1C,CAAC;IAED,IAAI,OAAO,CAAC,KAAK,EAAE,CAAC;QACnB,MAAM,CAAC,KAAK,GAAG,YAAY,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC;IAC5C,CAAC;SAAM,IAAI,cAAc,CAAC,OAAO,CAAC,QAAQ,CAAC,EAAE,CAAC;QAC7C,mGAAmG;QACnG,MAAM,CAAC,KAAK,GAAG,EAAE,CAAC;IACnB,CAAC;IAED,IAAI,OAAO,EAAE,UAAU,EAAE,CAAC;QACzB,MAAM,CAAC,WAAW,GAAG,OAAO,CAAC,UAAU,CAAC;IACzC,CAAC;IAED,IAAI,MAAM,CAAC,cAAc,KAAK,KAAK,IAAI,KAAK,CAAC,SAAS,EAAE,CAAC;QACxD,8DAA8D;QAC9D,kEAAkE;QACjE,MAAc,CAAC,QAAQ,GAAG,EAAE,IAAI,EAAE,OAAO,EAAE,eAAe,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,UAAU,EAAE,CAAC;IACxF,CAAC;SAAM,IAAI,MAAM,CAAC,cAAc,KAAK,MAAM,IAAI,KAAK,CAAC,SAAS,EAAE,CAAC;QAChE,qCAAqC;QACpC,MAAc,CAAC,eAAe,GAAG,CAAC,CAAC,OAAO,EAAE,eAAe,CAAC;IAC9D,CAAC;SAAM,IAAI,OAAO,EAAE,eAAe,IAAI,KAAK,CAAC,SAAS,IAAI,MAAM,CAAC,uBAAuB,EAAE,CAAC;QAC1F,gCAAgC;QAChC,MAAM,CAAC,gBAAgB,GAAG,OAAO,CAAC,eAAe,CAAC;IACnD,CAAC;IAED,0CAA0C;IAC1C,IAAI,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,eAAe,CAAC,IAAI,KAAK,CAAC,MAAM,EAAE,iBAAiB,EAAE,CAAC;QAC/E,MAAc,CAAC,QAAQ,GAAG,KAAK,CAAC,MAAM,CAAC,iBAAiB,CAAC;IAC3D,CAAC;IAED,iDAAiD;IACjD,IAAI,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,sBAAsB,CAAC,IAAI,KAAK,CAAC,MAAM,EAAE,oBAAoB,EAAE,CAAC;QAC1F,MAAM,OAAO,GAAG,KAAK,CAAC,MAAM,CAAC,oBAAoB,CAAC;QAClD,IAAI,OAAO,CAAC,IAAI,IAAI,OAAO,CAAC,KAAK,EAAE,CAAC;YACnC,MAAM,cAAc,GAA6B,EAAE,CAAC;YACpD,IAAI,OAAO,CAAC,IAAI;gBAAE,cAAc,CAAC,IAAI,GAAG,OAAO,CAAC,IAAI,CAAC;YACrD,IAAI,OAAO,CAAC,KAAK;gBAAE,cAAc,CAAC,KAAK,GAAG,OAAO,CAAC,KAAK,CAAC;YACvD,MAAc,CAAC,eAAe,GAAG,EAAE,OAAO,EAAE,cAAc,EAAE,CAAC;QAC/D,CAAC;IACF,CAAC;IAED,OAAO,MAAM,CAAC;AAAA,CACd;AAED,SAAS,uCAAuC,CAC/C,KAAkC,EAClC,QAAsC,EAC/B;IACP,IAAI,KAAK,CAAC,QAAQ,KAAK,YAAY,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,UAAU,CAAC,YAAY,CAAC;QAAE,OAAO;IAElF,kFAAkF;IAClF,qFAAqF;IACrF,KAAK,IAAI,CAAC,GAAG,QAAQ,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC;QAC/C,MAAM,GAAG,GAAG,QAAQ,CAAC,CAAC,CAAC,CAAC;QACxB,IAAI,GAAG,CAAC,IAAI,KAAK,MAAM,IAAI,GAAG,CAAC,IAAI,KAAK,WAAW;YAAE,SAAS;QAE9D,MAAM,OAAO,GAAG,GAAG,CAAC,OAAO,CAAC;QAC5B,IAAI,OAAO,OAAO,KAAK,QAAQ,EAAE,CAAC;YACjC,GAAG,CAAC,OAAO,GAAG;gBACb,MAAM,CAAC,MAAM,CAAC,EAAE,IAAI,EAAE,MAAe,EAAE,IAAI,EAAE,OAAO,EAAE,EAAE,EAAE,aAAa,EAAE,EAAE,IAAI,EAAE,WAAW,EAAE,EAAE,CAAC;aACjG,CAAC;YACF,OAAO;QACR,CAAC;QAED,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC,OAAO,CAAC;YAAE,SAAS;QAEtC,4CAA4C;QAC5C,KAAK,IAAI,CAAC,GAAG,OAAO,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC;YAC9C,MAAM,IAAI,GAAG,OAAO,CAAC,CAAC,CAAC,CAAC;YACxB,IAAI,IAAI,EAAE,IAAI,KAAK,MAAM,EAAE,CAAC;gBAC3B,MAAM,CAAC,MAAM,CAAC,IAAI,EAAE,EAAE,aAAa,EAAE,EAAE,IAAI,EAAE,WAAW,EAAE,EAAE,CAAC,CAAC;gBAC9D,OAAO;YACR,CAAC;QACF,CAAC;IACF,CAAC;AAAA,CACD;AAED,MAAM,UAAU,eAAe,CAC9B,KAAkC,EAClC,OAAgB,EAChB,MAAyC,EACV;IAC/B,MAAM,MAAM,GAAiC,EAAE,CAAC;IAEhD,MAAM,mBAAmB,GAAG,CAAC,EAAU,EAAU,EAAE,CAAC;QACnD,IAAI,MAAM,CAAC,sBAAsB;YAAE,OAAO,sBAAsB,CAAC,EAAE,CAAC,CAAC;QAErE,sDAAsD;QACtD,mFAAmF;QACnF,wEAAwE;QACxE,iDAAiD;QACjD,IAAI,EAAE,CAAC,QAAQ,CAAC,GAAG,CAAC,EAAE,CAAC;YACtB,MAAM,CAAC,MAAM,CAAC,GAAG,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC;YAC/B,oEAAoE;YACpE,OAAO,MAAM,CAAC,OAAO,CAAC,iBAAiB,EAAE,GAAG,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC;QAC5D,CAAC;QAED,IAAI,KAAK,CAAC,QAAQ,KAAK,QAAQ;YAAE,OAAO,EAAE,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,CAAC;QAC9E,mFAAmF;QACnF,IAAI,KAAK,CAAC,QAAQ,KAAK,gBAAgB,IAAI,KAAK,CAAC,EAAE,CAAC,WAAW,EAAE,CAAC,QAAQ,CAAC,QAAQ,CAAC,EAAE,CAAC;YACtF,OAAO,EAAE,CAAC,OAAO,CAAC,iBAAiB,EAAE,GAAG,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC;QACxD,CAAC;QACD,OAAO,EAAE,CAAC;IAAA,CACV,CAAC;IAEF,MAAM,mBAAmB,GAAG,iBAAiB,CAAC,OAAO,CAAC,QAAQ,EAAE,KAAK,EAAE,CAAC,EAAE,EAAE,EAAE,CAAC,mBAAmB,CAAC,EAAE,CAAC,CAAC,CAAC;IAExG,IAAI,OAAO,CAAC,YAAY,EAAE,CAAC;QAC1B,MAAM,gBAAgB,GAAG,KAAK,CAAC,SAAS,IAAI,MAAM,CAAC,qBAAqB,CAAC;QACzE,MAAM,IAAI,GAAG,gBAAgB,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC;QACvD,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,IAAI,EAAE,OAAO,EAAE,kBAAkB,CAAC,OAAO,CAAC,YAAY,CAAC,EAAE,CAAC,CAAC;IAChF,CAAC;IAED,IAAI,QAAQ,GAAkB,IAAI,CAAC;IAEnC,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,mBAAmB,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE,CAAC;QACrD,MAAM,GAAG,GAAG,mBAAmB,CAAC,CAAC,CAAC,CAAC;QACnC,+FAA+F;QAC/F,yDAAyD;QACzD,IAAI,MAAM,CAAC,gCAAgC,IAAI,QAAQ,KAAK,YAAY,IAAI,GAAG,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;YACjG,MAAM,CAAC,IAAI,CAAC;gBACX,IAAI,EAAE,WAAW;gBACjB,OAAO,EAAE,oCAAoC;aAC7C,CAAC,CAAC;QACJ,CAAC;QAED,IAAI,GAAG,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;YACzB,IAAI,OAAO,GAAG,CAAC,OAAO,KAAK,QAAQ,EAAE,CAAC;gBACrC,MAAM,CAAC,IAAI,CAAC;oBACX,IAAI,EAAE,MAAM;oBACZ,OAAO,EAAE,kBAAkB,CAAC,GAAG,CAAC,OAAO,CAAC;iBACxC,CAAC,CAAC;YACJ,CAAC;iBAAM,CAAC;gBACP,MAAM,OAAO,GAAgC,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,IAAI,EAA6B,EAAE,CAAC;oBACjG,IAAI,IAAI,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;wBAC1B,OAAO;4BACN,IAAI,EAAE,MAAM;4BACZ,IAAI,EAAE,kBAAkB,CAAC,IAAI,CAAC,IAAI,CAAC;yBACK,CAAC;oBAC3C,CAAC;yBAAM,CAAC;wBACP,OAAO;4BACN,IAAI,EAAE,WAAW;4BACjB,SAAS,EAAE;gCACV,GAAG,EAAE,QAAQ,IAAI,CAAC,QAAQ,WAAW,IAAI,CAAC,IAAI,EAAE;6BAChD;yBACwC,CAAC;oBAC5C,CAAC;gBAAA,CACD,CAAC,CAAC;gBACH,MAAM,eAAe,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC,QAAQ,CAAC,OAAO,CAAC;oBACrD,CAAC,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,WAAW,CAAC;oBAC/C,CAAC,CAAC,OAAO,CAAC;gBACX,IAAI,eAAe,CAAC,MAAM,KAAK,CAAC;oBAAE,SAAS;gBAC3C,MAAM,CAAC,IAAI,CAAC;oBACX,IAAI,EAAE,MAAM;oBACZ,OAAO,EAAE,eAAe;iBACxB,CAAC,CAAC;YACJ,CAAC;QACF,CAAC;aAAM,IAAI,GAAG,CAAC,IAAI,KAAK,WAAW,EAAE,CAAC;YACrC,oFAAoF;YACpF,MAAM,YAAY,GAAwC;gBACzD,IAAI,EAAE,WAAW;gBACjB,OAAO,EAAE,MAAM,CAAC,gCAAgC,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI;aAC5D,CAAC;YAEF,MAAM,UAAU,GAAG,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,MAAM,CAAkB,CAAC;YACjF,8DAA8D;YAC9D,MAAM,kBAAkB,GAAG,UAAU,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,IAAI,CAAC,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC;YACxF,IAAI,kBAAkB,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;gBACnC,uEAAuE;gBACvE,2EAA2E;gBAC3E,IAAI,KAAK,CAAC,QAAQ,KAAK,gBAAgB,EAAE,CAAC;oBACzC,YAAY,CAAC,OAAO,GAAG,kBAAkB,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,kBAAkB,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC;gBAC3F,CAAC;qBAAM,CAAC;oBACP,YAAY,CAAC,OAAO,GAAG,kBAAkB,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC;wBACpD,OAAO,EAAE,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,kBAAkB,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC;oBAAA,CAC1D,CAAC,CAAC;gBACJ,CAAC;YACF,CAAC;YAED,yBAAyB;YACzB,MAAM,cAAc,GAAG,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,UAAU,CAAsB,CAAC;YAC7F,kEAAkE;YAClE,MAAM,sBAAsB,GAAG,cAAc,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,QAAQ,IAAI,CAAC,CAAC,QAAQ,CAAC,IAAI,EAAE,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC;YACxG,IAAI,sBAAsB,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;gBACvC,IAAI,MAAM,CAAC,sBAAsB,EAAE,CAAC;oBACnC,gFAAgF;oBAChF,MAAM,YAAY,GAAG,sBAAsB,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;oBAChF,MAAM,WAAW,GAAG,YAAY,CAAC,OAAuD,CAAC;oBACzF,IAAI,WAAW,EAAE,CAAC;wBACjB,WAAW,CAAC,OAAO,CAAC,EAAE,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,YAAY,EAAE,CAAC,CAAC;oBAC3D,CAAC;yBAAM,CAAC;wBACP,YAAY,CAAC,OAAO,GAAG,CAAC,EAAE,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,YAAY,EAAE,CAAC,CAAC;oBAC/D,CAAC;gBACF,CAAC;qBAAM,CAAC;oBACP,gGAAgG;oBAChG,MAAM,SAAS,GAAG,sBAAsB,CAAC,CAAC,CAAC,CAAC,iBAAiB,CAAC;oBAC9D,IAAI,SAAS,IAAI,SAAS,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;wBACtC,YAAoB,CAAC,SAAS,CAAC,GAAG,sBAAsB,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;oBAC7F,CAAC;gBACF,CAAC;YACF,CAAC;YAED,MAAM,SAAS,GAAG,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,UAAU,CAAe,CAAC;YACjF,IAAI,SAAS,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;gBAC1B,YAAY,CAAC,UAAU,GAAG,SAAS,CAAC,GAAG,CAAC,CAAC,EAAE,EAAE,EAAE,CAAC,CAAC;oBAChD,EAAE,EAAE,EAAE,CAAC,EAAE;oBACT,IAAI,EAAE,UAAmB;oBACzB,QAAQ,EAAE;wBACT,IAAI,EAAE,EAAE,CAAC,IAAI;wBACb,SAAS,EAAE,IAAI,CAAC,SAAS,CAAC,EAAE,CAAC,SAAS,CAAC;qBACvC;iBACD,CAAC,CAAC,CAAC;gBACJ,MAAM,gBAAgB,GAAG,SAAS;qBAChC,MAAM,CAAC,CAAC,EAAE,EAAE,EAAE,CAAC,EAAE,CAAC,gBAAgB,CAAC;qBACnC,GAAG,CAAC,CAAC,EAAE,EAAE,EAAE,CAAC;oBACZ,IAAI,CAAC;wBACJ,OAAO,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,gBAAiB,CAAC,CAAC;oBACzC,CAAC;oBAAC,MAAM,CAAC;wBACR,OAAO,IAAI,CAAC;oBACb,CAAC;gBAAA,CACD,CAAC;qBACD,MAAM,CAAC,OAAO,CAAC,CAAC;gBAClB,IAAI,gBAAgB,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;oBAChC,YAAoB,CAAC,iBAAiB,GAAG,gBAAgB,CAAC;gBAC5D,CAAC;YACF,CAAC;YACD,kEAAkE;YAClE,4EAA4E;YAC5E,8DAA8D;YAC9D,gEAAgE;YAChE,MAAM,OAAO,GAAG,YAAY,CAAC,OAAO,CAAC;YACrC,MAAM,UAAU,GACf,OAAO,KAAK,IAAI;gBAChB,OAAO,KAAK,SAAS;gBACrB,CAAC,OAAO,OAAO,KAAK,QAAQ,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC;YACzE,IAAI,CAAC,UAAU,IAAI,CAAC,YAAY,CAAC,UAAU,EAAE,CAAC;gBAC7C,SAAS;YACV,CAAC;YACD,MAAM,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;QAC3B,CAAC;aAAM,IAAI,GAAG,CAAC,IAAI,KAAK,YAAY,EAAE,CAAC;YACtC,MAAM,WAAW,GAA6D,EAAE,CAAC;YACjF,IAAI,CAAC,GAAG,CAAC,CAAC;YAEV,OAAO,CAAC,GAAG,mBAAmB,CAAC,MAAM,IAAI,mBAAmB,CAAC,CAAC,CAAC,CAAC,IAAI,KAAK,YAAY,EAAE,CAAC,EAAE,EAAE,CAAC;gBAC5F,MAAM,OAAO,GAAG,mBAAmB,CAAC,CAAC,CAAsB,CAAC;gBAE5D,iCAAiC;gBACjC,MAAM,UAAU,GAAG,OAAO,CAAC,OAAO;qBAChC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,MAAM,CAAC;qBAChC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAE,CAAS,CAAC,IAAI,CAAC;qBAC3B,IAAI,CAAC,IAAI,CAAC,CAAC;gBACb,MAAM,SAAS,GAAG,OAAO,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,OAAO,CAAC,CAAC;gBAElE,oEAAoE;gBACpE,MAAM,OAAO,GAAG,UAAU,CAAC,MAAM,GAAG,CAAC,CAAC;gBACtC,yEAAyE;gBACzE,MAAM,aAAa,GAAmC;oBACrD,IAAI,EAAE,MAAM;oBACZ,OAAO,EAAE,kBAAkB,CAAC,OAAO,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,sBAAsB,CAAC;oBAC1E,YAAY,EAAE,OAAO,CAAC,UAAU;iBAChC,CAAC;gBACF,IAAI,MAAM,CAAC,sBAAsB,IAAI,OAAO,CAAC,QAAQ,EAAE,CAAC;oBACtD,aAAqB,CAAC,IAAI,GAAG,OAAO,CAAC,QAAQ,CAAC;gBAChD,CAAC;gBACD,MAAM,CAAC,IAAI,CAAC,aAAa,CAAC,CAAC;gBAE3B,IAAI,SAAS,IAAI,KAAK,CAAC,KAAK,CAAC,QAAQ,CAAC,OAAO,CAAC,EAAE,CAAC;oBAChD,KAAK,MAAM,KAAK,IAAI,OAAO,CAAC,OAAO,EAAE,CAAC;wBACrC,IAAI,KAAK,CAAC,IAAI,KAAK,OAAO,EAAE,CAAC;4BAC5B,WAAW,CAAC,IAAI,CAAC;gCAChB,IAAI,EAAE,WAAW;gCACjB,SAAS,EAAE;oCACV,GAAG,EAAE,QAAS,KAAa,CAAC,QAAQ,WAAY,KAAa,CAAC,IAAI,EAAE;iCACpE;6BACD,CAAC,CAAC;wBACJ,CAAC;oBACF,CAAC;gBACF,CAAC;YACF,CAAC;YAED,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC;YAEV,IAAI,WAAW,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;gBAC5B,IAAI,MAAM,CAAC,gCAAgC,EAAE,CAAC;oBAC7C,MAAM,CAAC,IAAI,CAAC;wBACX,IAAI,EAAE,WAAW;wBACjB,OAAO,EAAE,oCAAoC;qBAC7C,CAAC,CAAC;gBACJ,CAAC;gBAED,MAAM,CAAC,IAAI,CAAC;oBACX,IAAI,EAAE,MAAM;oBACZ,OAAO,EAAE;wBACR;4BACC,IAAI,EAAE,MAAM;4BACZ,IAAI,EAAE,qCAAqC;yBAC3C;wBACD,GAAG,WAAW;qBACd;iBACD,CAAC,CAAC;gBACH,QAAQ,GAAG,MAAM,CAAC;YACnB,CAAC;iBAAM,CAAC;gBACP,QAAQ,GAAG,YAAY,CAAC;YACzB,CAAC;YACD,SAAS;QACV,CAAC;QAED,QAAQ,GAAG,GAAG,CAAC,IAAI,CAAC;IACrB,CAAC;IAED,OAAO,MAAM,CAAC;AAAA,CACd;AAED,SAAS,YAAY,CAAC,KAAa,EAAgD;IAClF,OAAO,KAAK,CAAC,GAAG,CAAC,CAAC,IAAI,EAAE,EAAE,CAAC,CAAC;QAC3B,IAAI,EAAE,UAAU;QAChB,QAAQ,EAAE;YACT,IAAI,EAAE,IAAI,CAAC,IAAI;YACf,WAAW,EAAE,IAAI,CAAC,WAAW;YAC7B,UAAU,EAAE,IAAI,CAAC,UAAiB,EAAE,wCAAwC;YAC5E,MAAM,EAAE,KAAK,EAAE,uEAAuE;SACtF;KACD,CAAC,CAAC,CAAC;AAAA,CACJ;AAED,SAAS,aAAa,CAAC,MAAmD,EAAc;IACvF,IAAI,MAAM,KAAK,IAAI;QAAE,OAAO,MAAM,CAAC;IACnC,QAAQ,MAAM,EAAE,CAAC;QAChB,KAAK,MAAM;YACV,OAAO,MAAM,CAAC;QACf,KAAK,QAAQ;YACZ,OAAO,QAAQ,CAAC;QACjB,KAAK,eAAe,CAAC;QACrB,KAAK,YAAY;YAChB,OAAO,SAAS,CAAC;QAClB,KAAK,gBAAgB;YACpB,OAAO,OAAO,CAAC;QAChB,SAAS,CAAC;YACT,MAAM,WAAW,GAAU,MAAM,CAAC;YAClC,MAAM,IAAI,KAAK,CAAC,0BAA0B,WAAW,EAAE,CAAC,CAAC;QAC1D,CAAC;IACF,CAAC;AAAA,CACD;AAED;;;;GAIG;AACH,SAAS,YAAY,CAAC,KAAkC,EAAqC;IAC5F,MAAM,QAAQ,GAAG,KAAK,CAAC,QAAQ,CAAC;IAChC,MAAM,OAAO,GAAG,KAAK,CAAC,OAAO,CAAC;IAE9B,MAAM,KAAK,GAAG,QAAQ,KAAK,KAAK,IAAI,OAAO,CAAC,QAAQ,CAAC,UAAU,CAAC,CAAC;IAEjE,MAAM,aAAa,GAClB,QAAQ,KAAK,UAAU;QACvB,OAAO,CAAC,QAAQ,CAAC,aAAa,CAAC;QAC/B,QAAQ,KAAK,KAAK;QAClB,OAAO,CAAC,QAAQ,CAAC,UAAU,CAAC;QAC5B,QAAQ,KAAK,SAAS;QACtB,OAAO,CAAC,QAAQ,CAAC,YAAY,CAAC;QAC9B,OAAO,CAAC,QAAQ,CAAC,WAAW,CAAC;QAC7B,OAAO,CAAC,QAAQ,CAAC,cAAc,CAAC;QAChC,KAAK;QACL,QAAQ,KAAK,UAAU;QACvB,OAAO,CAAC,QAAQ,CAAC,aAAa,CAAC,CAAC;IAEjC,MAAM,YAAY,GAAG,QAAQ,KAAK,SAAS,IAAI,OAAO,CAAC,QAAQ,CAAC,YAAY,CAAC,IAAI,OAAO,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC;IAE/G,MAAM,MAAM,GAAG,QAAQ,KAAK,KAAK,IAAI,OAAO,CAAC,QAAQ,CAAC,UAAU,CAAC,CAAC;IAElE,MAAM,SAAS,GAAG,QAAQ,KAAK,SAAS,IAAI,OAAO,CAAC,QAAQ,CAAC,YAAY,CAAC,CAAC;IAE3E,OAAO;QACN,aAAa,EAAE,CAAC,aAAa;QAC7B,qBAAqB,EAAE,CAAC,aAAa;QACrC,uBAAuB,EAAE,CAAC,MAAM,IAAI,CAAC,KAAK;QAC1C,wBAAwB,EAAE,IAAI;QAC9B,cAAc,EAAE,YAAY,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,uBAAuB;QACrE,sBAAsB,EAAE,SAAS;QACjC,gCAAgC,EAAE,KAAK,EAAE,iDAAiD;QAC1F,sBAAsB,EAAE,SAAS;QACjC,sBAAsB,EAAE,SAAS;QACjC,cAAc,EAAE,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,QAAQ;QACxC,iBAAiB,EAAE,EAAE;QACrB,oBAAoB,EAAE,EAAE;KACxB,CAAC;AAAA,CACF;AAED;;;GAGG;AACH,SAAS,SAAS,CAAC,KAAkC,EAAqC;IACzF,MAAM,QAAQ,GAAG,YAAY,CAAC,KAAK,CAAC,CAAC;IACrC,IAAI,CAAC,KAAK,CAAC,MAAM;QAAE,OAAO,QAAQ,CAAC;IAEnC,OAAO;QACN,aAAa,EAAE,KAAK,CAAC,MAAM,CAAC,aAAa,IAAI,QAAQ,CAAC,aAAa;QACnE,qBAAqB,EAAE,KAAK,CAAC,MAAM,CAAC,qBAAqB,IAAI,QAAQ,CAAC,qBAAqB;QAC3F,uBAAuB,EAAE,KAAK,CAAC,MAAM,CAAC,uBAAuB,IAAI,QAAQ,CAAC,uBAAuB;QACjG,wBAAwB,EAAE,KAAK,CAAC,MAAM,CAAC,wBAAwB,IAAI,QAAQ,CAAC,wBAAwB;QACpG,cAAc,EAAE,KAAK,CAAC,MAAM,CAAC,cAAc,IAAI,QAAQ,CAAC,cAAc;QACtE,sBAAsB,EAAE,KAAK,CAAC,MAAM,CAAC,sBAAsB,IAAI,QAAQ,CAAC,sBAAsB;QAC9F,gCAAgC,EAC/B,KAAK,CAAC,MAAM,CAAC,gCAAgC,IAAI,QAAQ,CAAC,gCAAgC;QAC3F,sBAAsB,EAAE,KAAK,CAAC,MAAM,CAAC,sBAAsB,IAAI,QAAQ,CAAC,sBAAsB;QAC9F,sBAAsB,EAAE,KAAK,CAAC,MAAM,CAAC,sBAAsB,IAAI,QAAQ,CAAC,sBAAsB;QAC9F,cAAc,EAAE,KAAK,CAAC,MAAM,CAAC,cAAc,IAAI,QAAQ,CAAC,cAAc;QACtE,iBAAiB,EAAE,KAAK,CAAC,MAAM,CAAC,iBAAiB,IAAI,EAAE;QACvD,oBAAoB,EAAE,KAAK,CAAC,MAAM,CAAC,oBAAoB,IAAI,QAAQ,CAAC,oBAAoB;KACxF,CAAC;AAAA,CACF","sourcesContent":["import OpenAI from \"openai\";\nimport type {\n\tChatCompletionAssistantMessageParam,\n\tChatCompletionChunk,\n\tChatCompletionContentPart,\n\tChatCompletionContentPartImage,\n\tChatCompletionContentPartText,\n\tChatCompletionMessageParam,\n\tChatCompletionToolMessageParam,\n} from \"openai/resources/chat/completions.js\";\nimport { getEnvApiKey } from \"../env-api-keys.js\";\nimport { calculateCost, supportsXhigh } from \"../models.js\";\nimport type {\n\tAssistantMessage,\n\tContext,\n\tMessage,\n\tModel,\n\tOpenAICompletionsCompat,\n\tSimpleStreamOptions,\n\tStopReason,\n\tStreamFunction,\n\tStreamOptions,\n\tTextContent,\n\tThinkingContent,\n\tTool,\n\tToolCall,\n\tToolResultMessage,\n} from \"../types.js\";\nimport { AssistantMessageEventStream } from \"../utils/event-stream.js\";\nimport { parseStreamingJson } from \"../utils/json-parse.js\";\nimport { sanitizeSurrogates } from \"../utils/sanitize-unicode.js\";\nimport { buildBaseOptions, clampReasoning } from \"./simple-options.js\";\nimport { transformMessages } from \"./transform-messages.js\";\n\n/**\n * Normalize tool call ID for Mistral.\n * Mistral requires tool IDs to be exactly 9 alphanumeric characters (a-z, A-Z, 0-9).\n */\nfunction normalizeMistralToolId(id: string): string {\n\t// Remove non-alphanumeric characters\n\tlet normalized = id.replace(/[^a-zA-Z0-9]/g, \"\");\n\t// Mistral requires exactly 9 characters\n\tif (normalized.length < 9) {\n\t\t// Pad with deterministic characters based on original ID to ensure matching\n\t\tconst padding = \"ABCDEFGHI\";\n\t\tnormalized = normalized + padding.slice(0, 9 - normalized.length);\n\t} else if (normalized.length > 9) {\n\t\tnormalized = normalized.slice(0, 9);\n\t}\n\treturn normalized;\n}\n\n/**\n * Check if conversation messages contain tool calls or tool results.\n * This is needed because Anthropic (via proxy) requires the tools param\n * to be present when messages include tool_calls or tool role messages.\n */\nfunction hasToolHistory(messages: Message[]): boolean {\n\tfor (const msg of messages) {\n\t\tif (msg.role === \"toolResult\") {\n\t\t\treturn true;\n\t\t}\n\t\tif (msg.role === \"assistant\") {\n\t\t\tif (msg.content.some((block) => block.type === \"toolCall\")) {\n\t\t\t\treturn true;\n\t\t\t}\n\t\t}\n\t}\n\treturn false;\n}\n\nexport interface OpenAICompletionsOptions extends StreamOptions {\n\ttoolChoice?: \"auto\" | \"none\" | \"required\" | { type: \"function\"; function: { name: string } };\n\treasoningEffort?: \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n}\n\nexport const streamOpenAICompletions: StreamFunction<\"openai-completions\", OpenAICompletionsOptions> = (\n\tmodel: Model<\"openai-completions\">,\n\tcontext: Context,\n\toptions?: OpenAICompletionsOptions,\n): AssistantMessageEventStream => {\n\tconst stream = new AssistantMessageEventStream();\n\n\t(async () => {\n\t\tconst output: AssistantMessage = {\n\t\t\trole: \"assistant\",\n\t\t\tcontent: [],\n\t\t\tapi: model.api,\n\t\t\tprovider: model.provider,\n\t\t\tmodel: model.id,\n\t\t\tusage: {\n\t\t\t\tinput: 0,\n\t\t\t\toutput: 0,\n\t\t\t\tcacheRead: 0,\n\t\t\t\tcacheWrite: 0,\n\t\t\t\ttotalTokens: 0,\n\t\t\t\tcost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },\n\t\t\t},\n\t\t\tstopReason: \"stop\",\n\t\t\ttimestamp: Date.now(),\n\t\t};\n\n\t\ttry {\n\t\t\tconst apiKey = options?.apiKey || getEnvApiKey(model.provider) || \"\";\n\t\t\tconst client = createClient(model, context, apiKey, options?.headers);\n\t\t\tconst params = buildParams(model, context, options);\n\t\t\toptions?.onPayload?.(params);\n\t\t\tconst openaiStream = await client.chat.completions.create(params, { signal: options?.signal });\n\t\t\tstream.push({ type: \"start\", partial: output });\n\n\t\t\tlet currentBlock: TextContent | ThinkingContent | (ToolCall & { partialArgs?: string }) | null = null;\n\t\t\tconst blocks = output.content;\n\t\t\tconst blockIndex = () => blocks.length - 1;\n\t\t\tconst finishCurrentBlock = (block?: typeof currentBlock) => {\n\t\t\t\tif (block) {\n\t\t\t\t\tif (block.type === \"text\") {\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"text_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\tcontent: block.text,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t} else if (block.type === \"thinking\") {\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"thinking_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\tcontent: block.thinking,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t} else if (block.type === \"toolCall\") {\n\t\t\t\t\t\tblock.arguments = JSON.parse(block.partialArgs || \"{}\");\n\t\t\t\t\t\tdelete block.partialArgs;\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"toolcall_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\ttoolCall: block,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t};\n\n\t\t\tfor await (const chunk of openaiStream) {\n\t\t\t\tif (chunk.usage) {\n\t\t\t\t\tconst cachedTokens = chunk.usage.prompt_tokens_details?.cached_tokens || 0;\n\t\t\t\t\tconst reasoningTokens = chunk.usage.completion_tokens_details?.reasoning_tokens || 0;\n\t\t\t\t\tconst input = (chunk.usage.prompt_tokens || 0) - cachedTokens;\n\t\t\t\t\tconst outputTokens = (chunk.usage.completion_tokens || 0) + reasoningTokens;\n\t\t\t\t\toutput.usage = {\n\t\t\t\t\t\t// OpenAI includes cached tokens in prompt_tokens, so subtract to get non-cached input\n\t\t\t\t\t\tinput,\n\t\t\t\t\t\toutput: outputTokens,\n\t\t\t\t\t\tcacheRead: cachedTokens,\n\t\t\t\t\t\tcacheWrite: 0,\n\t\t\t\t\t\t// Compute totalTokens ourselves since we add reasoning_tokens to output\n\t\t\t\t\t\t// and some providers (e.g., Groq) don't include them in total_tokens\n\t\t\t\t\t\ttotalTokens: input + outputTokens + cachedTokens,\n\t\t\t\t\t\tcost: {\n\t\t\t\t\t\t\tinput: 0,\n\t\t\t\t\t\t\toutput: 0,\n\t\t\t\t\t\t\tcacheRead: 0,\n\t\t\t\t\t\t\tcacheWrite: 0,\n\t\t\t\t\t\t\ttotal: 0,\n\t\t\t\t\t\t},\n\t\t\t\t\t};\n\t\t\t\t\tcalculateCost(model, output.usage);\n\t\t\t\t}\n\n\t\t\t\tconst choice = chunk.choices[0];\n\t\t\t\tif (!choice) continue;\n\n\t\t\t\tif (choice.finish_reason) {\n\t\t\t\t\toutput.stopReason = mapStopReason(choice.finish_reason);\n\t\t\t\t}\n\n\t\t\t\tif (choice.delta) {\n\t\t\t\t\tif (\n\t\t\t\t\t\tchoice.delta.content !== null &&\n\t\t\t\t\t\tchoice.delta.content !== undefined &&\n\t\t\t\t\t\tchoice.delta.content.length > 0\n\t\t\t\t\t) {\n\t\t\t\t\t\tif (!currentBlock || currentBlock.type !== \"text\") {\n\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\tcurrentBlock = { type: \"text\", text: \"\" };\n\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\tstream.push({ type: \"text_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif (currentBlock.type === \"text\") {\n\t\t\t\t\t\t\tcurrentBlock.text += choice.delta.content;\n\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\ttype: \"text_delta\",\n\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\tdelta: choice.delta.content,\n\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t// Some endpoints return reasoning in reasoning_content (llama.cpp),\n\t\t\t\t\t// or reasoning (other openai compatible endpoints)\n\t\t\t\t\t// Use the first non-empty reasoning field to avoid duplication\n\t\t\t\t\t// (e.g., chutes.ai returns both reasoning_content and reasoning with same content)\n\t\t\t\t\tconst reasoningFields = [\"reasoning_content\", \"reasoning\", \"reasoning_text\"];\n\t\t\t\t\tlet foundReasoningField: string | null = null;\n\t\t\t\t\tfor (const field of reasoningFields) {\n\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t(choice.delta as any)[field] !== null &&\n\t\t\t\t\t\t\t(choice.delta as any)[field] !== undefined &&\n\t\t\t\t\t\t\t(choice.delta as any)[field].length > 0\n\t\t\t\t\t\t) {\n\t\t\t\t\t\t\tif (!foundReasoningField) {\n\t\t\t\t\t\t\t\tfoundReasoningField = field;\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif (foundReasoningField) {\n\t\t\t\t\t\tif (!currentBlock || currentBlock.type !== \"thinking\") {\n\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\tcurrentBlock = {\n\t\t\t\t\t\t\t\ttype: \"thinking\",\n\t\t\t\t\t\t\t\tthinking: \"\",\n\t\t\t\t\t\t\t\tthinkingSignature: foundReasoningField,\n\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\tstream.push({ type: \"thinking_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif (currentBlock.type === \"thinking\") {\n\t\t\t\t\t\t\tconst delta = (choice.delta as any)[foundReasoningField];\n\t\t\t\t\t\t\tcurrentBlock.thinking += delta;\n\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\ttype: \"thinking_delta\",\n\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\tdelta,\n\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif (choice?.delta?.tool_calls) {\n\t\t\t\t\t\tfor (const toolCall of choice.delta.tool_calls) {\n\t\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t\t!currentBlock ||\n\t\t\t\t\t\t\t\tcurrentBlock.type !== \"toolCall\" ||\n\t\t\t\t\t\t\t\t(toolCall.id && currentBlock.id !== toolCall.id)\n\t\t\t\t\t\t\t) {\n\t\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\t\tcurrentBlock = {\n\t\t\t\t\t\t\t\t\ttype: \"toolCall\",\n\t\t\t\t\t\t\t\t\tid: toolCall.id || \"\",\n\t\t\t\t\t\t\t\t\tname: toolCall.function?.name || \"\",\n\t\t\t\t\t\t\t\t\targuments: {},\n\t\t\t\t\t\t\t\t\tpartialArgs: \"\",\n\t\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\t\tstream.push({ type: \"toolcall_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif (currentBlock.type === \"toolCall\") {\n\t\t\t\t\t\t\t\tif (toolCall.id) currentBlock.id = toolCall.id;\n\t\t\t\t\t\t\t\tif (toolCall.function?.name) currentBlock.name = toolCall.function.name;\n\t\t\t\t\t\t\t\tlet delta = \"\";\n\t\t\t\t\t\t\t\tif (toolCall.function?.arguments) {\n\t\t\t\t\t\t\t\t\tdelta = toolCall.function.arguments;\n\t\t\t\t\t\t\t\t\tcurrentBlock.partialArgs += toolCall.function.arguments;\n\t\t\t\t\t\t\t\t\tcurrentBlock.arguments = parseStreamingJson(currentBlock.partialArgs);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\t\ttype: \"toolcall_delta\",\n\t\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\t\tdelta,\n\t\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tconst reasoningDetails = (choice.delta as any).reasoning_details;\n\t\t\t\t\tif (reasoningDetails && Array.isArray(reasoningDetails)) {\n\t\t\t\t\t\tfor (const detail of reasoningDetails) {\n\t\t\t\t\t\t\tif (detail.type === \"reasoning.encrypted\" && detail.id && detail.data) {\n\t\t\t\t\t\t\t\tconst matchingToolCall = output.content.find(\n\t\t\t\t\t\t\t\t\t(b) => b.type === \"toolCall\" && b.id === detail.id,\n\t\t\t\t\t\t\t\t) as ToolCall | undefined;\n\t\t\t\t\t\t\t\tif (matchingToolCall) {\n\t\t\t\t\t\t\t\t\tmatchingToolCall.thoughtSignature = JSON.stringify(detail);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfinishCurrentBlock(currentBlock);\n\n\t\t\tif (options?.signal?.aborted) {\n\t\t\t\tthrow new Error(\"Request was aborted\");\n\t\t\t}\n\n\t\t\tif (output.stopReason === \"aborted\" || output.stopReason === \"error\") {\n\t\t\t\tthrow new Error(\"An unknown error occurred\");\n\t\t\t}\n\n\t\t\tstream.push({ type: \"done\", reason: output.stopReason, message: output });\n\t\t\tstream.end();\n\t\t} catch (error) {\n\t\t\tfor (const block of output.content) delete (block as any).index;\n\t\t\toutput.stopReason = options?.signal?.aborted ? \"aborted\" : \"error\";\n\t\t\toutput.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);\n\t\t\t// Some providers via OpenRouter give additional information in this field.\n\t\t\tconst rawMetadata = (error as any)?.error?.metadata?.raw;\n\t\t\tif (rawMetadata) output.errorMessage += `\\n${rawMetadata}`;\n\t\t\tstream.push({ type: \"error\", reason: output.stopReason, error: output });\n\t\t\tstream.end();\n\t\t}\n\t})();\n\n\treturn stream;\n};\n\nexport const streamSimpleOpenAICompletions: StreamFunction<\"openai-completions\", SimpleStreamOptions> = (\n\tmodel: Model<\"openai-completions\">,\n\tcontext: Context,\n\toptions?: SimpleStreamOptions,\n): AssistantMessageEventStream => {\n\tconst apiKey = options?.apiKey || getEnvApiKey(model.provider);\n\tif (!apiKey) {\n\t\tthrow new Error(`No API key for provider: ${model.provider}`);\n\t}\n\n\tconst base = buildBaseOptions(model, options, apiKey);\n\tconst reasoningEffort = supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning);\n\tconst toolChoice = (options as OpenAICompletionsOptions | undefined)?.toolChoice;\n\n\treturn streamOpenAICompletions(model, context, {\n\t\t...base,\n\t\treasoningEffort,\n\t\ttoolChoice,\n\t} satisfies OpenAICompletionsOptions);\n};\n\nfunction createClient(\n\tmodel: Model<\"openai-completions\">,\n\tcontext: Context,\n\tapiKey?: string,\n\toptionsHeaders?: Record<string, string>,\n) {\n\tif (!apiKey) {\n\t\tif (!process.env.OPENAI_API_KEY) {\n\t\t\tthrow new Error(\n\t\t\t\t\"OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.\",\n\t\t\t);\n\t\t}\n\t\tapiKey = process.env.OPENAI_API_KEY;\n\t}\n\n\tconst headers = { ...model.headers };\n\tif (model.provider === \"github-copilot\") {\n\t\t// Copilot expects X-Initiator to indicate whether the request is user-initiated\n\t\t// or agent-initiated (e.g. follow-up after assistant/tool messages). If there is\n\t\t// no prior message, default to user-initiated.\n\t\tconst messages = context.messages || [];\n\t\tconst lastMessage = messages[messages.length - 1];\n\t\tconst isAgentCall = lastMessage ? lastMessage.role !== \"user\" : false;\n\t\theaders[\"X-Initiator\"] = isAgentCall ? \"agent\" : \"user\";\n\t\theaders[\"Openai-Intent\"] = \"conversation-edits\";\n\n\t\t// Copilot requires this header when sending images\n\t\tconst hasImages = messages.some((msg) => {\n\t\t\tif (msg.role === \"user\" && Array.isArray(msg.content)) {\n\t\t\t\treturn msg.content.some((c) => c.type === \"image\");\n\t\t\t}\n\t\t\tif (msg.role === \"toolResult\" && Array.isArray(msg.content)) {\n\t\t\t\treturn msg.content.some((c) => c.type === \"image\");\n\t\t\t}\n\t\t\treturn false;\n\t\t});\n\t\tif (hasImages) {\n\t\t\theaders[\"Copilot-Vision-Request\"] = \"true\";\n\t\t}\n\t}\n\n\t// Merge options headers last so they can override defaults\n\tif (optionsHeaders) {\n\t\tObject.assign(headers, optionsHeaders);\n\t}\n\n\treturn new OpenAI({\n\t\tapiKey,\n\t\tbaseURL: model.baseUrl,\n\t\tdangerouslyAllowBrowser: true,\n\t\tdefaultHeaders: headers,\n\t});\n}\n\nfunction buildParams(model: Model<\"openai-completions\">, context: Context, options?: OpenAICompletionsOptions) {\n\tconst compat = getCompat(model);\n\tconst messages = convertMessages(model, context, compat);\n\tmaybeAddOpenRouterAnthropicCacheControl(model, messages);\n\n\tconst params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {\n\t\tmodel: model.id,\n\t\tmessages,\n\t\tstream: true,\n\t};\n\n\tif (compat.supportsUsageInStreaming !== false) {\n\t\t(params as any).stream_options = { include_usage: true };\n\t}\n\n\tif (compat.supportsStore) {\n\t\tparams.store = false;\n\t}\n\n\tif (options?.maxTokens) {\n\t\tif (compat.maxTokensField === \"max_tokens\") {\n\t\t\t(params as any).max_tokens = options.maxTokens;\n\t\t} else {\n\t\t\tparams.max_completion_tokens = options.maxTokens;\n\t\t}\n\t}\n\n\tif (options?.temperature !== undefined) {\n\t\tparams.temperature = options.temperature;\n\t}\n\n\tif (context.tools) {\n\t\tparams.tools = convertTools(context.tools);\n\t} else if (hasToolHistory(context.messages)) {\n\t\t// Anthropic (via LiteLLM/proxy) requires tools param when conversation has tool_calls/tool_results\n\t\tparams.tools = [];\n\t}\n\n\tif (options?.toolChoice) {\n\t\tparams.tool_choice = options.toolChoice;\n\t}\n\n\tif (compat.thinkingFormat === \"zai\" && model.reasoning) {\n\t\t// Z.ai uses binary thinking: { type: \"enabled\" | \"disabled\" }\n\t\t// Must explicitly disable since z.ai defaults to thinking enabled\n\t\t(params as any).thinking = { type: options?.reasoningEffort ? \"enabled\" : \"disabled\" };\n\t} else if (compat.thinkingFormat === \"qwen\" && model.reasoning) {\n\t\t// Qwen uses enable_thinking: boolean\n\t\t(params as any).enable_thinking = !!options?.reasoningEffort;\n\t} else if (options?.reasoningEffort && model.reasoning && compat.supportsReasoningEffort) {\n\t\t// OpenAI-style reasoning_effort\n\t\tparams.reasoning_effort = options.reasoningEffort;\n\t}\n\n\t// OpenRouter provider routing preferences\n\tif (model.baseUrl.includes(\"openrouter.ai\") && model.compat?.openRouterRouting) {\n\t\t(params as any).provider = model.compat.openRouterRouting;\n\t}\n\n\t// Vercel AI Gateway provider routing preferences\n\tif (model.baseUrl.includes(\"ai-gateway.vercel.sh\") && model.compat?.vercelGatewayRouting) {\n\t\tconst routing = model.compat.vercelGatewayRouting;\n\t\tif (routing.only || routing.order) {\n\t\t\tconst gatewayOptions: Record<string, string[]> = {};\n\t\t\tif (routing.only) gatewayOptions.only = routing.only;\n\t\t\tif (routing.order) gatewayOptions.order = routing.order;\n\t\t\t(params as any).providerOptions = { gateway: gatewayOptions };\n\t\t}\n\t}\n\n\treturn params;\n}\n\nfunction maybeAddOpenRouterAnthropicCacheControl(\n\tmodel: Model<\"openai-completions\">,\n\tmessages: ChatCompletionMessageParam[],\n): void {\n\tif (model.provider !== \"openrouter\" || !model.id.startsWith(\"anthropic/\")) return;\n\n\t// Anthropic-style caching requires cache_control on a text part. Add a breakpoint\n\t// on the last user/assistant message (walking backwards until we find text content).\n\tfor (let i = messages.length - 1; i >= 0; i--) {\n\t\tconst msg = messages[i];\n\t\tif (msg.role !== \"user\" && msg.role !== \"assistant\") continue;\n\n\t\tconst content = msg.content;\n\t\tif (typeof content === \"string\") {\n\t\t\tmsg.content = [\n\t\t\t\tObject.assign({ type: \"text\" as const, text: content }, { cache_control: { type: \"ephemeral\" } }),\n\t\t\t];\n\t\t\treturn;\n\t\t}\n\n\t\tif (!Array.isArray(content)) continue;\n\n\t\t// Find last text part and add cache_control\n\t\tfor (let j = content.length - 1; j >= 0; j--) {\n\t\t\tconst part = content[j];\n\t\t\tif (part?.type === \"text\") {\n\t\t\t\tObject.assign(part, { cache_control: { type: \"ephemeral\" } });\n\t\t\t\treturn;\n\t\t\t}\n\t\t}\n\t}\n}\n\nexport function convertMessages(\n\tmodel: Model<\"openai-completions\">,\n\tcontext: Context,\n\tcompat: Required<OpenAICompletionsCompat>,\n): ChatCompletionMessageParam[] {\n\tconst params: ChatCompletionMessageParam[] = [];\n\n\tconst normalizeToolCallId = (id: string): string => {\n\t\tif (compat.requiresMistralToolIds) return normalizeMistralToolId(id);\n\n\t\t// Handle pipe-separated IDs from OpenAI Responses API\n\t\t// Format: {call_id}|{id} where {id} can be 400+ chars with special chars (+, /, =)\n\t\t// These come from providers like github-copilot, openai-codex, opencode\n\t\t// Extract just the call_id part and normalize it\n\t\tif (id.includes(\"|\")) {\n\t\t\tconst [callId] = id.split(\"|\");\n\t\t\t// Sanitize to allowed chars and truncate to 40 chars (OpenAI limit)\n\t\t\treturn callId.replace(/[^a-zA-Z0-9_-]/g, \"_\").slice(0, 40);\n\t\t}\n\n\t\tif (model.provider === \"openai\") return id.length > 40 ? id.slice(0, 40) : id;\n\t\t// Copilot Claude models route to Claude backend which requires Anthropic ID format\n\t\tif (model.provider === \"github-copilot\" && model.id.toLowerCase().includes(\"claude\")) {\n\t\t\treturn id.replace(/[^a-zA-Z0-9_-]/g, \"_\").slice(0, 64);\n\t\t}\n\t\treturn id;\n\t};\n\n\tconst transformedMessages = transformMessages(context.messages, model, (id) => normalizeToolCallId(id));\n\n\tif (context.systemPrompt) {\n\t\tconst useDeveloperRole = model.reasoning && compat.supportsDeveloperRole;\n\t\tconst role = useDeveloperRole ? \"developer\" : \"system\";\n\t\tparams.push({ role: role, content: sanitizeSurrogates(context.systemPrompt) });\n\t}\n\n\tlet lastRole: string | null = null;\n\n\tfor (let i = 0; i < transformedMessages.length; i++) {\n\t\tconst msg = transformedMessages[i];\n\t\t// Some providers (e.g. Mistral/Devstral) don't allow user messages directly after tool results\n\t\t// Insert a synthetic assistant message to bridge the gap\n\t\tif (compat.requiresAssistantAfterToolResult && lastRole === \"toolResult\" && msg.role === \"user\") {\n\t\t\tparams.push({\n\t\t\t\trole: \"assistant\",\n\t\t\t\tcontent: \"I have processed the tool results.\",\n\t\t\t});\n\t\t}\n\n\t\tif (msg.role === \"user\") {\n\t\t\tif (typeof msg.content === \"string\") {\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: sanitizeSurrogates(msg.content),\n\t\t\t\t});\n\t\t\t} else {\n\t\t\t\tconst content: ChatCompletionContentPart[] = msg.content.map((item): ChatCompletionContentPart => {\n\t\t\t\t\tif (item.type === \"text\") {\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\ttype: \"text\",\n\t\t\t\t\t\t\ttext: sanitizeSurrogates(item.text),\n\t\t\t\t\t\t} satisfies ChatCompletionContentPartText;\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\ttype: \"image_url\",\n\t\t\t\t\t\t\timage_url: {\n\t\t\t\t\t\t\t\turl: `data:${item.mimeType};base64,${item.data}`,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t} satisfies ChatCompletionContentPartImage;\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t\tconst filteredContent = !model.input.includes(\"image\")\n\t\t\t\t\t? content.filter((c) => c.type !== \"image_url\")\n\t\t\t\t\t: content;\n\t\t\t\tif (filteredContent.length === 0) continue;\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: filteredContent,\n\t\t\t\t});\n\t\t\t}\n\t\t} else if (msg.role === \"assistant\") {\n\t\t\t// Some providers (e.g. Mistral) don't accept null content, use empty string instead\n\t\t\tconst assistantMsg: ChatCompletionAssistantMessageParam = {\n\t\t\t\trole: \"assistant\",\n\t\t\t\tcontent: compat.requiresAssistantAfterToolResult ? \"\" : null,\n\t\t\t};\n\n\t\t\tconst textBlocks = msg.content.filter((b) => b.type === \"text\") as TextContent[];\n\t\t\t// Filter out empty text blocks to avoid API validation errors\n\t\t\tconst nonEmptyTextBlocks = textBlocks.filter((b) => b.text && b.text.trim().length > 0);\n\t\t\tif (nonEmptyTextBlocks.length > 0) {\n\t\t\t\t// GitHub Copilot requires assistant content as a string, not an array.\n\t\t\t\t// Sending as array causes Claude models to re-answer all previous prompts.\n\t\t\t\tif (model.provider === \"github-copilot\") {\n\t\t\t\t\tassistantMsg.content = nonEmptyTextBlocks.map((b) => sanitizeSurrogates(b.text)).join(\"\");\n\t\t\t\t} else {\n\t\t\t\t\tassistantMsg.content = nonEmptyTextBlocks.map((b) => {\n\t\t\t\t\t\treturn { type: \"text\", text: sanitizeSurrogates(b.text) };\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Handle thinking blocks\n\t\t\tconst thinkingBlocks = msg.content.filter((b) => b.type === \"thinking\") as ThinkingContent[];\n\t\t\t// Filter out empty thinking blocks to avoid API validation errors\n\t\t\tconst nonEmptyThinkingBlocks = thinkingBlocks.filter((b) => b.thinking && b.thinking.trim().length > 0);\n\t\t\tif (nonEmptyThinkingBlocks.length > 0) {\n\t\t\t\tif (compat.requiresThinkingAsText) {\n\t\t\t\t\t// Convert thinking blocks to plain text (no tags to avoid model mimicking them)\n\t\t\t\t\tconst thinkingText = nonEmptyThinkingBlocks.map((b) => b.thinking).join(\"\\n\\n\");\n\t\t\t\t\tconst textContent = assistantMsg.content as Array<{ type: \"text\"; text: string }> | null;\n\t\t\t\t\tif (textContent) {\n\t\t\t\t\t\ttextContent.unshift({ type: \"text\", text: thinkingText });\n\t\t\t\t\t} else {\n\t\t\t\t\t\tassistantMsg.content = [{ type: \"text\", text: thinkingText }];\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// Use the signature from the first thinking block if available (for llama.cpp server + gpt-oss)\n\t\t\t\t\tconst signature = nonEmptyThinkingBlocks[0].thinkingSignature;\n\t\t\t\t\tif (signature && signature.length > 0) {\n\t\t\t\t\t\t(assistantMsg as any)[signature] = nonEmptyThinkingBlocks.map((b) => b.thinking).join(\"\\n\");\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tconst toolCalls = msg.content.filter((b) => b.type === \"toolCall\") as ToolCall[];\n\t\t\tif (toolCalls.length > 0) {\n\t\t\t\tassistantMsg.tool_calls = toolCalls.map((tc) => ({\n\t\t\t\t\tid: tc.id,\n\t\t\t\t\ttype: \"function\" as const,\n\t\t\t\t\tfunction: {\n\t\t\t\t\t\tname: tc.name,\n\t\t\t\t\t\targuments: JSON.stringify(tc.arguments),\n\t\t\t\t\t},\n\t\t\t\t}));\n\t\t\t\tconst reasoningDetails = toolCalls\n\t\t\t\t\t.filter((tc) => tc.thoughtSignature)\n\t\t\t\t\t.map((tc) => {\n\t\t\t\t\t\ttry {\n\t\t\t\t\t\t\treturn JSON.parse(tc.thoughtSignature!);\n\t\t\t\t\t\t} catch {\n\t\t\t\t\t\t\treturn null;\n\t\t\t\t\t\t}\n\t\t\t\t\t})\n\t\t\t\t\t.filter(Boolean);\n\t\t\t\tif (reasoningDetails.length > 0) {\n\t\t\t\t\t(assistantMsg as any).reasoning_details = reasoningDetails;\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Skip assistant messages that have no content and no tool calls.\n\t\t\t// Mistral explicitly requires \"either content or tool_calls, but not none\".\n\t\t\t// Other providers also don't accept empty assistant messages.\n\t\t\t// This handles aborted assistant responses that got no content.\n\t\t\tconst content = assistantMsg.content;\n\t\t\tconst hasContent =\n\t\t\t\tcontent !== null &&\n\t\t\t\tcontent !== undefined &&\n\t\t\t\t(typeof content === \"string\" ? content.length > 0 : content.length > 0);\n\t\t\tif (!hasContent && !assistantMsg.tool_calls) {\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tparams.push(assistantMsg);\n\t\t} else if (msg.role === \"toolResult\") {\n\t\t\tconst imageBlocks: Array<{ type: \"image_url\"; image_url: { url: string } }> = [];\n\t\t\tlet j = i;\n\n\t\t\tfor (; j < transformedMessages.length && transformedMessages[j].role === \"toolResult\"; j++) {\n\t\t\t\tconst toolMsg = transformedMessages[j] as ToolResultMessage;\n\n\t\t\t\t// Extract text and image content\n\t\t\t\tconst textResult = toolMsg.content\n\t\t\t\t\t.filter((c) => c.type === \"text\")\n\t\t\t\t\t.map((c) => (c as any).text)\n\t\t\t\t\t.join(\"\\n\");\n\t\t\t\tconst hasImages = toolMsg.content.some((c) => c.type === \"image\");\n\n\t\t\t\t// Always send tool result with text (or placeholder if only images)\n\t\t\t\tconst hasText = textResult.length > 0;\n\t\t\t\t// Some providers (e.g. Mistral) require the 'name' field in tool results\n\t\t\t\tconst toolResultMsg: ChatCompletionToolMessageParam = {\n\t\t\t\t\trole: \"tool\",\n\t\t\t\t\tcontent: sanitizeSurrogates(hasText ? textResult : \"(see attached image)\"),\n\t\t\t\t\ttool_call_id: toolMsg.toolCallId,\n\t\t\t\t};\n\t\t\t\tif (compat.requiresToolResultName && toolMsg.toolName) {\n\t\t\t\t\t(toolResultMsg as any).name = toolMsg.toolName;\n\t\t\t\t}\n\t\t\t\tparams.push(toolResultMsg);\n\n\t\t\t\tif (hasImages && model.input.includes(\"image\")) {\n\t\t\t\t\tfor (const block of toolMsg.content) {\n\t\t\t\t\t\tif (block.type === \"image\") {\n\t\t\t\t\t\t\timageBlocks.push({\n\t\t\t\t\t\t\t\ttype: \"image_url\",\n\t\t\t\t\t\t\t\timage_url: {\n\t\t\t\t\t\t\t\t\turl: `data:${(block as any).mimeType};base64,${(block as any).data}`,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ti = j - 1;\n\n\t\t\tif (imageBlocks.length > 0) {\n\t\t\t\tif (compat.requiresAssistantAfterToolResult) {\n\t\t\t\t\tparams.push({\n\t\t\t\t\t\trole: \"assistant\",\n\t\t\t\t\t\tcontent: \"I have processed the tool results.\",\n\t\t\t\t\t});\n\t\t\t\t}\n\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: [\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\ttype: \"text\",\n\t\t\t\t\t\t\ttext: \"Attached image(s) from tool result:\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t...imageBlocks,\n\t\t\t\t\t],\n\t\t\t\t});\n\t\t\t\tlastRole = \"user\";\n\t\t\t} else {\n\t\t\t\tlastRole = \"toolResult\";\n\t\t\t}\n\t\t\tcontinue;\n\t\t}\n\n\t\tlastRole = msg.role;\n\t}\n\n\treturn params;\n}\n\nfunction convertTools(tools: Tool[]): OpenAI.Chat.Completions.ChatCompletionTool[] {\n\treturn tools.map((tool) => ({\n\t\ttype: \"function\",\n\t\tfunction: {\n\t\t\tname: tool.name,\n\t\t\tdescription: tool.description,\n\t\t\tparameters: tool.parameters as any, // TypeBox already generates JSON Schema\n\t\t\tstrict: false, // Disable strict mode to allow optional parameters without null unions\n\t\t},\n\t}));\n}\n\nfunction mapStopReason(reason: ChatCompletionChunk.Choice[\"finish_reason\"]): StopReason {\n\tif (reason === null) return \"stop\";\n\tswitch (reason) {\n\t\tcase \"stop\":\n\t\t\treturn \"stop\";\n\t\tcase \"length\":\n\t\t\treturn \"length\";\n\t\tcase \"function_call\":\n\t\tcase \"tool_calls\":\n\t\t\treturn \"toolUse\";\n\t\tcase \"content_filter\":\n\t\t\treturn \"error\";\n\t\tdefault: {\n\t\t\tconst _exhaustive: never = reason;\n\t\t\tthrow new Error(`Unhandled stop reason: ${_exhaustive}`);\n\t\t}\n\t}\n}\n\n/**\n * Detect compatibility settings from provider and baseUrl for known providers.\n * Provider takes precedence over URL-based detection since it's explicitly configured.\n * Returns a fully resolved OpenAICompletionsCompat object with all fields set.\n */\nfunction detectCompat(model: Model<\"openai-completions\">): Required<OpenAICompletionsCompat> {\n\tconst provider = model.provider;\n\tconst baseUrl = model.baseUrl;\n\n\tconst isZai = provider === \"zai\" || baseUrl.includes(\"api.z.ai\");\n\n\tconst isNonStandard =\n\t\tprovider === \"cerebras\" ||\n\t\tbaseUrl.includes(\"cerebras.ai\") ||\n\t\tprovider === \"xai\" ||\n\t\tbaseUrl.includes(\"api.x.ai\") ||\n\t\tprovider === \"mistral\" ||\n\t\tbaseUrl.includes(\"mistral.ai\") ||\n\t\tbaseUrl.includes(\"chutes.ai\") ||\n\t\tbaseUrl.includes(\"deepseek.com\") ||\n\t\tisZai ||\n\t\tprovider === \"opencode\" ||\n\t\tbaseUrl.includes(\"opencode.ai\");\n\n\tconst useMaxTokens = provider === \"mistral\" || baseUrl.includes(\"mistral.ai\") || baseUrl.includes(\"chutes.ai\");\n\n\tconst isGrok = provider === \"xai\" || baseUrl.includes(\"api.x.ai\");\n\n\tconst isMistral = provider === \"mistral\" || baseUrl.includes(\"mistral.ai\");\n\n\treturn {\n\t\tsupportsStore: !isNonStandard,\n\t\tsupportsDeveloperRole: !isNonStandard,\n\t\tsupportsReasoningEffort: !isGrok && !isZai,\n\t\tsupportsUsageInStreaming: true,\n\t\tmaxTokensField: useMaxTokens ? \"max_tokens\" : \"max_completion_tokens\",\n\t\trequiresToolResultName: isMistral,\n\t\trequiresAssistantAfterToolResult: false, // Mistral no longer requires this as of Dec 2024\n\t\trequiresThinkingAsText: isMistral,\n\t\trequiresMistralToolIds: isMistral,\n\t\tthinkingFormat: isZai ? \"zai\" : \"openai\",\n\t\topenRouterRouting: {},\n\t\tvercelGatewayRouting: {},\n\t};\n}\n\n/**\n * Get resolved compatibility settings for a model.\n * Uses explicit model.compat if provided, otherwise auto-detects from provider/URL.\n */\nfunction getCompat(model: Model<\"openai-completions\">): Required<OpenAICompletionsCompat> {\n\tconst detected = detectCompat(model);\n\tif (!model.compat) return detected;\n\n\treturn {\n\t\tsupportsStore: model.compat.supportsStore ?? detected.supportsStore,\n\t\tsupportsDeveloperRole: model.compat.supportsDeveloperRole ?? detected.supportsDeveloperRole,\n\t\tsupportsReasoningEffort: model.compat.supportsReasoningEffort ?? detected.supportsReasoningEffort,\n\t\tsupportsUsageInStreaming: model.compat.supportsUsageInStreaming ?? detected.supportsUsageInStreaming,\n\t\tmaxTokensField: model.compat.maxTokensField ?? detected.maxTokensField,\n\t\trequiresToolResultName: model.compat.requiresToolResultName ?? detected.requiresToolResultName,\n\t\trequiresAssistantAfterToolResult:\n\t\t\tmodel.compat.requiresAssistantAfterToolResult ?? detected.requiresAssistantAfterToolResult,\n\t\trequiresThinkingAsText: model.compat.requiresThinkingAsText ?? detected.requiresThinkingAsText,\n\t\trequiresMistralToolIds: model.compat.requiresMistralToolIds ?? detected.requiresMistralToolIds,\n\t\tthinkingFormat: model.compat.thinkingFormat ?? detected.thinkingFormat,\n\t\topenRouterRouting: model.compat.openRouterRouting ?? {},\n\t\tvercelGatewayRouting: model.compat.vercelGatewayRouting ?? detected.vercelGatewayRouting,\n\t};\n}\n"]}
@@ -1 +1 @@
1
- {"version":3,"file":"openai-responses.d.ts","sourceRoot":"","sources":["../../src/providers/openai-responses.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,6BAA6B,EAAE,MAAM,yCAAyC,CAAC;AAG7F,OAAO,KAAK,EAKX,mBAAmB,EACnB,cAAc,EACd,aAAa,EAEb,MAAM,aAAa,CAAC;AAwBrB,MAAM,WAAW,sBAAuB,SAAQ,aAAa;IAC5D,eAAe,CAAC,EAAE,SAAS,GAAG,KAAK,GAAG,QAAQ,GAAG,MAAM,GAAG,OAAO,CAAC;IAClE,gBAAgB,CAAC,EAAE,MAAM,GAAG,UAAU,GAAG,SAAS,GAAG,IAAI,CAAC;IAC1D,WAAW,CAAC,EAAE,6BAA6B,CAAC,cAAc,CAAC,CAAC;CAC5D;AAED;;GAEG;AACH,eAAO,MAAM,qBAAqB,EAAE,cAAc,CAAC,kBAAkB,EAAE,sBAAsB,CAgE5F,CAAC;AAEF,eAAO,MAAM,2BAA2B,EAAE,cAAc,CAAC,kBAAkB,EAAE,mBAAmB,CAiB/F,CAAC","sourcesContent":["import OpenAI from \"openai\";\nimport type { ResponseCreateParamsStreaming } from \"openai/resources/responses/responses.js\";\nimport { getEnvApiKey } from \"../env-api-keys.js\";\nimport { supportsXhigh } from \"../models.js\";\nimport type {\n\tApi,\n\tAssistantMessage,\n\tContext,\n\tModel,\n\tSimpleStreamOptions,\n\tStreamFunction,\n\tStreamOptions,\n\tUsage,\n} from \"../types.js\";\nimport { AssistantMessageEventStream } from \"../utils/event-stream.js\";\nimport { convertResponsesMessages, convertResponsesTools, processResponsesStream } from \"./openai-responses-shared.js\";\nimport { buildBaseOptions, clampReasoning } from \"./simple-options.js\";\n\nconst OPENAI_TOOL_CALL_PROVIDERS = new Set([\"openai\", \"openai-codex\", \"opencode\"]);\n\n/**\n * Get prompt cache retention based on PI_CACHE_RETENTION env var.\n * Only applies to direct OpenAI API calls (api.openai.com).\n * Returns '24h' for long retention, undefined for default (in-memory).\n */\nfunction getPromptCacheRetention(baseUrl: string): \"24h\" | undefined {\n\tif (\n\t\ttypeof process !== \"undefined\" &&\n\t\tprocess.env.PI_CACHE_RETENTION === \"long\" &&\n\t\tbaseUrl.includes(\"api.openai.com\")\n\t) {\n\t\treturn \"24h\";\n\t}\n\treturn undefined;\n}\n\n// OpenAI Responses-specific options\nexport interface OpenAIResponsesOptions extends StreamOptions {\n\treasoningEffort?: \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n\treasoningSummary?: \"auto\" | \"detailed\" | \"concise\" | null;\n\tserviceTier?: ResponseCreateParamsStreaming[\"service_tier\"];\n}\n\n/**\n * Generate function for OpenAI Responses API\n */\nexport const streamOpenAIResponses: StreamFunction<\"openai-responses\", OpenAIResponsesOptions> = (\n\tmodel: Model<\"openai-responses\">,\n\tcontext: Context,\n\toptions?: OpenAIResponsesOptions,\n): AssistantMessageEventStream => {\n\tconst stream = new AssistantMessageEventStream();\n\n\t// Start async processing\n\t(async () => {\n\t\tconst output: AssistantMessage = {\n\t\t\trole: \"assistant\",\n\t\t\tcontent: [],\n\t\t\tapi: model.api as Api,\n\t\t\tprovider: model.provider,\n\t\t\tmodel: model.id,\n\t\t\tusage: {\n\t\t\t\tinput: 0,\n\t\t\t\toutput: 0,\n\t\t\t\tcacheRead: 0,\n\t\t\t\tcacheWrite: 0,\n\t\t\t\ttotalTokens: 0,\n\t\t\t\tcost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },\n\t\t\t},\n\t\t\tstopReason: \"stop\",\n\t\t\ttimestamp: Date.now(),\n\t\t};\n\n\t\ttry {\n\t\t\t// Create OpenAI client\n\t\t\tconst apiKey = options?.apiKey || getEnvApiKey(model.provider) || \"\";\n\t\t\tconst client = createClient(model, context, apiKey, options?.headers);\n\t\t\tconst params = buildParams(model, context, options);\n\t\t\toptions?.onPayload?.(params);\n\t\t\tconst openaiStream = await client.responses.create(\n\t\t\t\tparams,\n\t\t\t\toptions?.signal ? { signal: options.signal } : undefined,\n\t\t\t);\n\t\t\tstream.push({ type: \"start\", partial: output });\n\n\t\t\tawait processResponsesStream(openaiStream, output, stream, model, {\n\t\t\t\tserviceTier: options?.serviceTier,\n\t\t\t\tapplyServiceTierPricing,\n\t\t\t});\n\n\t\t\tif (options?.signal?.aborted) {\n\t\t\t\tthrow new Error(\"Request was aborted\");\n\t\t\t}\n\n\t\t\tif (output.stopReason === \"aborted\" || output.stopReason === \"error\") {\n\t\t\t\tthrow new Error(\"An unknown error occurred\");\n\t\t\t}\n\n\t\t\tstream.push({ type: \"done\", reason: output.stopReason, message: output });\n\t\t\tstream.end();\n\t\t} catch (error) {\n\t\t\tfor (const block of output.content) delete (block as { index?: number }).index;\n\t\t\toutput.stopReason = options?.signal?.aborted ? \"aborted\" : \"error\";\n\t\t\toutput.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);\n\t\t\tstream.push({ type: \"error\", reason: output.stopReason, error: output });\n\t\t\tstream.end();\n\t\t}\n\t})();\n\n\treturn stream;\n};\n\nexport const streamSimpleOpenAIResponses: StreamFunction<\"openai-responses\", SimpleStreamOptions> = (\n\tmodel: Model<\"openai-responses\">,\n\tcontext: Context,\n\toptions?: SimpleStreamOptions,\n): AssistantMessageEventStream => {\n\tconst apiKey = options?.apiKey || getEnvApiKey(model.provider);\n\tif (!apiKey) {\n\t\tthrow new Error(`No API key for provider: ${model.provider}`);\n\t}\n\n\tconst base = buildBaseOptions(model, options, apiKey);\n\tconst reasoningEffort = supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning);\n\n\treturn streamOpenAIResponses(model, context, {\n\t\t...base,\n\t\treasoningEffort,\n\t} satisfies OpenAIResponsesOptions);\n};\n\nfunction createClient(\n\tmodel: Model<\"openai-responses\">,\n\tcontext: Context,\n\tapiKey?: string,\n\toptionsHeaders?: Record<string, string>,\n) {\n\tif (!apiKey) {\n\t\tif (!process.env.OPENAI_API_KEY) {\n\t\t\tthrow new Error(\n\t\t\t\t\"OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.\",\n\t\t\t);\n\t\t}\n\t\tapiKey = process.env.OPENAI_API_KEY;\n\t}\n\n\tconst headers = { ...model.headers };\n\tif (model.provider === \"github-copilot\") {\n\t\t// Copilot expects X-Initiator to indicate whether the request is user-initiated\n\t\t// or agent-initiated (e.g. follow-up after assistant/tool messages). If there is\n\t\t// no prior message, default to user-initiated.\n\t\tconst messages = context.messages || [];\n\t\tconst lastMessage = messages[messages.length - 1];\n\t\tconst isAgentCall = lastMessage ? lastMessage.role !== \"user\" : false;\n\t\theaders[\"X-Initiator\"] = isAgentCall ? \"agent\" : \"user\";\n\t\theaders[\"Openai-Intent\"] = \"conversation-edits\";\n\n\t\t// Copilot requires this header when sending images\n\t\tconst hasImages = messages.some((msg) => {\n\t\t\tif (msg.role === \"user\" && Array.isArray(msg.content)) {\n\t\t\t\treturn msg.content.some((c) => c.type === \"image\");\n\t\t\t}\n\t\t\tif (msg.role === \"toolResult\" && Array.isArray(msg.content)) {\n\t\t\t\treturn msg.content.some((c) => c.type === \"image\");\n\t\t\t}\n\t\t\treturn false;\n\t\t});\n\t\tif (hasImages) {\n\t\t\theaders[\"Copilot-Vision-Request\"] = \"true\";\n\t\t}\n\t}\n\n\t// Merge options headers last so they can override defaults\n\tif (optionsHeaders) {\n\t\tObject.assign(headers, optionsHeaders);\n\t}\n\n\treturn new OpenAI({\n\t\tapiKey,\n\t\tbaseURL: model.baseUrl,\n\t\tdangerouslyAllowBrowser: true,\n\t\tdefaultHeaders: headers,\n\t});\n}\n\nfunction buildParams(model: Model<\"openai-responses\">, context: Context, options?: OpenAIResponsesOptions) {\n\tconst messages = convertResponsesMessages(model, context, OPENAI_TOOL_CALL_PROVIDERS);\n\n\tconst params: ResponseCreateParamsStreaming = {\n\t\tmodel: model.id,\n\t\tinput: messages,\n\t\tstream: true,\n\t\tprompt_cache_key: options?.sessionId,\n\t\tprompt_cache_retention: getPromptCacheRetention(model.baseUrl),\n\t};\n\n\tif (options?.maxTokens) {\n\t\tparams.max_output_tokens = options?.maxTokens;\n\t}\n\n\tif (options?.temperature !== undefined) {\n\t\tparams.temperature = options?.temperature;\n\t}\n\n\tif (options?.serviceTier !== undefined) {\n\t\tparams.service_tier = options.serviceTier;\n\t}\n\n\tif (context.tools) {\n\t\tparams.tools = convertResponsesTools(context.tools);\n\t}\n\n\tif (model.reasoning) {\n\t\tif (options?.reasoningEffort || options?.reasoningSummary) {\n\t\t\tparams.reasoning = {\n\t\t\t\teffort: options?.reasoningEffort || \"medium\",\n\t\t\t\tsummary: options?.reasoningSummary || \"auto\",\n\t\t\t};\n\t\t\tparams.include = [\"reasoning.encrypted_content\"];\n\t\t} else {\n\t\t\tif (model.name.startsWith(\"gpt-5\")) {\n\t\t\t\t// Jesus Christ, see https://community.openai.com/t/need-reasoning-false-option-for-gpt-5/1351588/7\n\t\t\t\tmessages.push({\n\t\t\t\t\trole: \"developer\",\n\t\t\t\t\tcontent: [\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\ttype: \"input_text\",\n\t\t\t\t\t\t\ttext: \"# Juice: 0 !important\",\n\t\t\t\t\t\t},\n\t\t\t\t\t],\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t}\n\n\treturn params;\n}\n\nfunction getServiceTierCostMultiplier(serviceTier: ResponseCreateParamsStreaming[\"service_tier\"] | undefined): number {\n\tswitch (serviceTier) {\n\t\tcase \"flex\":\n\t\t\treturn 0.5;\n\t\tcase \"priority\":\n\t\t\treturn 2;\n\t\tdefault:\n\t\t\treturn 1;\n\t}\n}\n\nfunction applyServiceTierPricing(usage: Usage, serviceTier: ResponseCreateParamsStreaming[\"service_tier\"] | undefined) {\n\tconst multiplier = getServiceTierCostMultiplier(serviceTier);\n\tif (multiplier === 1) return;\n\n\tusage.cost.input *= multiplier;\n\tusage.cost.output *= multiplier;\n\tusage.cost.cacheRead *= multiplier;\n\tusage.cost.cacheWrite *= multiplier;\n\tusage.cost.total = usage.cost.input + usage.cost.output + usage.cost.cacheRead + usage.cost.cacheWrite;\n}\n"]}
1
+ {"version":3,"file":"openai-responses.d.ts","sourceRoot":"","sources":["../../src/providers/openai-responses.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,6BAA6B,EAAE,MAAM,yCAAyC,CAAC;AAG7F,OAAO,KAAK,EAMX,mBAAmB,EACnB,cAAc,EACd,aAAa,EAEb,MAAM,aAAa,CAAC;AAoCrB,MAAM,WAAW,sBAAuB,SAAQ,aAAa;IAC5D,eAAe,CAAC,EAAE,SAAS,GAAG,KAAK,GAAG,QAAQ,GAAG,MAAM,GAAG,OAAO,CAAC;IAClE,gBAAgB,CAAC,EAAE,MAAM,GAAG,UAAU,GAAG,SAAS,GAAG,IAAI,CAAC;IAC1D,WAAW,CAAC,EAAE,6BAA6B,CAAC,cAAc,CAAC,CAAC;CAC5D;AAED;;GAEG;AACH,eAAO,MAAM,qBAAqB,EAAE,cAAc,CAAC,kBAAkB,EAAE,sBAAsB,CAgE5F,CAAC;AAEF,eAAO,MAAM,2BAA2B,EAAE,cAAc,CAAC,kBAAkB,EAAE,mBAAmB,CAiB/F,CAAC","sourcesContent":["import OpenAI from \"openai\";\nimport type { ResponseCreateParamsStreaming } from \"openai/resources/responses/responses.js\";\nimport { getEnvApiKey } from \"../env-api-keys.js\";\nimport { supportsXhigh } from \"../models.js\";\nimport type {\n\tApi,\n\tAssistantMessage,\n\tCacheRetention,\n\tContext,\n\tModel,\n\tSimpleStreamOptions,\n\tStreamFunction,\n\tStreamOptions,\n\tUsage,\n} from \"../types.js\";\nimport { AssistantMessageEventStream } from \"../utils/event-stream.js\";\nimport { convertResponsesMessages, convertResponsesTools, processResponsesStream } from \"./openai-responses-shared.js\";\nimport { buildBaseOptions, clampReasoning } from \"./simple-options.js\";\n\nconst OPENAI_TOOL_CALL_PROVIDERS = new Set([\"openai\", \"openai-codex\", \"opencode\"]);\n\n/**\n * Resolve cache retention preference.\n * Defaults to \"short\" and uses PI_CACHE_RETENTION for backward compatibility.\n */\nfunction resolveCacheRetention(cacheRetention?: CacheRetention): CacheRetention {\n\tif (cacheRetention) {\n\t\treturn cacheRetention;\n\t}\n\tif (typeof process !== \"undefined\" && process.env.PI_CACHE_RETENTION === \"long\") {\n\t\treturn \"long\";\n\t}\n\treturn \"short\";\n}\n\n/**\n * Get prompt cache retention based on cacheRetention and base URL.\n * Only applies to direct OpenAI API calls (api.openai.com).\n */\nfunction getPromptCacheRetention(baseUrl: string, cacheRetention: CacheRetention): \"24h\" | undefined {\n\tif (cacheRetention !== \"long\") {\n\t\treturn undefined;\n\t}\n\tif (baseUrl.includes(\"api.openai.com\")) {\n\t\treturn \"24h\";\n\t}\n\treturn undefined;\n}\n\n// OpenAI Responses-specific options\nexport interface OpenAIResponsesOptions extends StreamOptions {\n\treasoningEffort?: \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n\treasoningSummary?: \"auto\" | \"detailed\" | \"concise\" | null;\n\tserviceTier?: ResponseCreateParamsStreaming[\"service_tier\"];\n}\n\n/**\n * Generate function for OpenAI Responses API\n */\nexport const streamOpenAIResponses: StreamFunction<\"openai-responses\", OpenAIResponsesOptions> = (\n\tmodel: Model<\"openai-responses\">,\n\tcontext: Context,\n\toptions?: OpenAIResponsesOptions,\n): AssistantMessageEventStream => {\n\tconst stream = new AssistantMessageEventStream();\n\n\t// Start async processing\n\t(async () => {\n\t\tconst output: AssistantMessage = {\n\t\t\trole: \"assistant\",\n\t\t\tcontent: [],\n\t\t\tapi: model.api as Api,\n\t\t\tprovider: model.provider,\n\t\t\tmodel: model.id,\n\t\t\tusage: {\n\t\t\t\tinput: 0,\n\t\t\t\toutput: 0,\n\t\t\t\tcacheRead: 0,\n\t\t\t\tcacheWrite: 0,\n\t\t\t\ttotalTokens: 0,\n\t\t\t\tcost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },\n\t\t\t},\n\t\t\tstopReason: \"stop\",\n\t\t\ttimestamp: Date.now(),\n\t\t};\n\n\t\ttry {\n\t\t\t// Create OpenAI client\n\t\t\tconst apiKey = options?.apiKey || getEnvApiKey(model.provider) || \"\";\n\t\t\tconst client = createClient(model, context, apiKey, options?.headers);\n\t\t\tconst params = buildParams(model, context, options);\n\t\t\toptions?.onPayload?.(params);\n\t\t\tconst openaiStream = await client.responses.create(\n\t\t\t\tparams,\n\t\t\t\toptions?.signal ? { signal: options.signal } : undefined,\n\t\t\t);\n\t\t\tstream.push({ type: \"start\", partial: output });\n\n\t\t\tawait processResponsesStream(openaiStream, output, stream, model, {\n\t\t\t\tserviceTier: options?.serviceTier,\n\t\t\t\tapplyServiceTierPricing,\n\t\t\t});\n\n\t\t\tif (options?.signal?.aborted) {\n\t\t\t\tthrow new Error(\"Request was aborted\");\n\t\t\t}\n\n\t\t\tif (output.stopReason === \"aborted\" || output.stopReason === \"error\") {\n\t\t\t\tthrow new Error(\"An unknown error occurred\");\n\t\t\t}\n\n\t\t\tstream.push({ type: \"done\", reason: output.stopReason, message: output });\n\t\t\tstream.end();\n\t\t} catch (error) {\n\t\t\tfor (const block of output.content) delete (block as { index?: number }).index;\n\t\t\toutput.stopReason = options?.signal?.aborted ? \"aborted\" : \"error\";\n\t\t\toutput.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);\n\t\t\tstream.push({ type: \"error\", reason: output.stopReason, error: output });\n\t\t\tstream.end();\n\t\t}\n\t})();\n\n\treturn stream;\n};\n\nexport const streamSimpleOpenAIResponses: StreamFunction<\"openai-responses\", SimpleStreamOptions> = (\n\tmodel: Model<\"openai-responses\">,\n\tcontext: Context,\n\toptions?: SimpleStreamOptions,\n): AssistantMessageEventStream => {\n\tconst apiKey = options?.apiKey || getEnvApiKey(model.provider);\n\tif (!apiKey) {\n\t\tthrow new Error(`No API key for provider: ${model.provider}`);\n\t}\n\n\tconst base = buildBaseOptions(model, options, apiKey);\n\tconst reasoningEffort = supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning);\n\n\treturn streamOpenAIResponses(model, context, {\n\t\t...base,\n\t\treasoningEffort,\n\t} satisfies OpenAIResponsesOptions);\n};\n\nfunction createClient(\n\tmodel: Model<\"openai-responses\">,\n\tcontext: Context,\n\tapiKey?: string,\n\toptionsHeaders?: Record<string, string>,\n) {\n\tif (!apiKey) {\n\t\tif (!process.env.OPENAI_API_KEY) {\n\t\t\tthrow new Error(\n\t\t\t\t\"OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.\",\n\t\t\t);\n\t\t}\n\t\tapiKey = process.env.OPENAI_API_KEY;\n\t}\n\n\tconst headers = { ...model.headers };\n\tif (model.provider === \"github-copilot\") {\n\t\t// Copilot expects X-Initiator to indicate whether the request is user-initiated\n\t\t// or agent-initiated (e.g. follow-up after assistant/tool messages). If there is\n\t\t// no prior message, default to user-initiated.\n\t\tconst messages = context.messages || [];\n\t\tconst lastMessage = messages[messages.length - 1];\n\t\tconst isAgentCall = lastMessage ? lastMessage.role !== \"user\" : false;\n\t\theaders[\"X-Initiator\"] = isAgentCall ? \"agent\" : \"user\";\n\t\theaders[\"Openai-Intent\"] = \"conversation-edits\";\n\n\t\t// Copilot requires this header when sending images\n\t\tconst hasImages = messages.some((msg) => {\n\t\t\tif (msg.role === \"user\" && Array.isArray(msg.content)) {\n\t\t\t\treturn msg.content.some((c) => c.type === \"image\");\n\t\t\t}\n\t\t\tif (msg.role === \"toolResult\" && Array.isArray(msg.content)) {\n\t\t\t\treturn msg.content.some((c) => c.type === \"image\");\n\t\t\t}\n\t\t\treturn false;\n\t\t});\n\t\tif (hasImages) {\n\t\t\theaders[\"Copilot-Vision-Request\"] = \"true\";\n\t\t}\n\t}\n\n\t// Merge options headers last so they can override defaults\n\tif (optionsHeaders) {\n\t\tObject.assign(headers, optionsHeaders);\n\t}\n\n\treturn new OpenAI({\n\t\tapiKey,\n\t\tbaseURL: model.baseUrl,\n\t\tdangerouslyAllowBrowser: true,\n\t\tdefaultHeaders: headers,\n\t});\n}\n\nfunction buildParams(model: Model<\"openai-responses\">, context: Context, options?: OpenAIResponsesOptions) {\n\tconst messages = convertResponsesMessages(model, context, OPENAI_TOOL_CALL_PROVIDERS);\n\n\tconst cacheRetention = resolveCacheRetention(options?.cacheRetention);\n\tconst params: ResponseCreateParamsStreaming = {\n\t\tmodel: model.id,\n\t\tinput: messages,\n\t\tstream: true,\n\t\tprompt_cache_key: cacheRetention === \"none\" ? undefined : options?.sessionId,\n\t\tprompt_cache_retention: getPromptCacheRetention(model.baseUrl, cacheRetention),\n\t};\n\n\tif (options?.maxTokens) {\n\t\tparams.max_output_tokens = options?.maxTokens;\n\t}\n\n\tif (options?.temperature !== undefined) {\n\t\tparams.temperature = options?.temperature;\n\t}\n\n\tif (options?.serviceTier !== undefined) {\n\t\tparams.service_tier = options.serviceTier;\n\t}\n\n\tif (context.tools) {\n\t\tparams.tools = convertResponsesTools(context.tools);\n\t}\n\n\tif (model.reasoning) {\n\t\tif (options?.reasoningEffort || options?.reasoningSummary) {\n\t\t\tparams.reasoning = {\n\t\t\t\teffort: options?.reasoningEffort || \"medium\",\n\t\t\t\tsummary: options?.reasoningSummary || \"auto\",\n\t\t\t};\n\t\t\tparams.include = [\"reasoning.encrypted_content\"];\n\t\t} else {\n\t\t\tif (model.name.startsWith(\"gpt-5\")) {\n\t\t\t\t// Jesus Christ, see https://community.openai.com/t/need-reasoning-false-option-for-gpt-5/1351588/7\n\t\t\t\tmessages.push({\n\t\t\t\t\trole: \"developer\",\n\t\t\t\t\tcontent: [\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\ttype: \"input_text\",\n\t\t\t\t\t\t\ttext: \"# Juice: 0 !important\",\n\t\t\t\t\t\t},\n\t\t\t\t\t],\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t}\n\n\treturn params;\n}\n\nfunction getServiceTierCostMultiplier(serviceTier: ResponseCreateParamsStreaming[\"service_tier\"] | undefined): number {\n\tswitch (serviceTier) {\n\t\tcase \"flex\":\n\t\t\treturn 0.5;\n\t\tcase \"priority\":\n\t\t\treturn 2;\n\t\tdefault:\n\t\t\treturn 1;\n\t}\n}\n\nfunction applyServiceTierPricing(usage: Usage, serviceTier: ResponseCreateParamsStreaming[\"service_tier\"] | undefined) {\n\tconst multiplier = getServiceTierCostMultiplier(serviceTier);\n\tif (multiplier === 1) return;\n\n\tusage.cost.input *= multiplier;\n\tusage.cost.output *= multiplier;\n\tusage.cost.cacheRead *= multiplier;\n\tusage.cost.cacheWrite *= multiplier;\n\tusage.cost.total = usage.cost.input + usage.cost.output + usage.cost.cacheRead + usage.cost.cacheWrite;\n}\n"]}
@@ -6,14 +6,27 @@ import { convertResponsesMessages, convertResponsesTools, processResponsesStream
6
6
  import { buildBaseOptions, clampReasoning } from "./simple-options.js";
7
7
  const OPENAI_TOOL_CALL_PROVIDERS = new Set(["openai", "openai-codex", "opencode"]);
8
8
  /**
9
- * Get prompt cache retention based on PI_CACHE_RETENTION env var.
9
+ * Resolve cache retention preference.
10
+ * Defaults to "short" and uses PI_CACHE_RETENTION for backward compatibility.
11
+ */
12
+ function resolveCacheRetention(cacheRetention) {
13
+ if (cacheRetention) {
14
+ return cacheRetention;
15
+ }
16
+ if (typeof process !== "undefined" && process.env.PI_CACHE_RETENTION === "long") {
17
+ return "long";
18
+ }
19
+ return "short";
20
+ }
21
+ /**
22
+ * Get prompt cache retention based on cacheRetention and base URL.
10
23
  * Only applies to direct OpenAI API calls (api.openai.com).
11
- * Returns '24h' for long retention, undefined for default (in-memory).
12
24
  */
13
- function getPromptCacheRetention(baseUrl) {
14
- if (typeof process !== "undefined" &&
15
- process.env.PI_CACHE_RETENTION === "long" &&
16
- baseUrl.includes("api.openai.com")) {
25
+ function getPromptCacheRetention(baseUrl, cacheRetention) {
26
+ if (cacheRetention !== "long") {
27
+ return undefined;
28
+ }
29
+ if (baseUrl.includes("api.openai.com")) {
17
30
  return "24h";
18
31
  }
19
32
  return undefined;
@@ -130,12 +143,13 @@ function createClient(model, context, apiKey, optionsHeaders) {
130
143
  }
131
144
  function buildParams(model, context, options) {
132
145
  const messages = convertResponsesMessages(model, context, OPENAI_TOOL_CALL_PROVIDERS);
146
+ const cacheRetention = resolveCacheRetention(options?.cacheRetention);
133
147
  const params = {
134
148
  model: model.id,
135
149
  input: messages,
136
150
  stream: true,
137
- prompt_cache_key: options?.sessionId,
138
- prompt_cache_retention: getPromptCacheRetention(model.baseUrl),
151
+ prompt_cache_key: cacheRetention === "none" ? undefined : options?.sessionId,
152
+ prompt_cache_retention: getPromptCacheRetention(model.baseUrl, cacheRetention),
139
153
  };
140
154
  if (options?.maxTokens) {
141
155
  params.max_output_tokens = options?.maxTokens;