@mariozechner/pi-ai 0.13.2 → 0.14.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +76 -5
- package/dist/index.d.ts +1 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +1 -0
- package/dist/index.js.map +1 -1
- package/dist/models.generated.d.ts +30 -30
- package/dist/models.generated.d.ts.map +1 -1
- package/dist/models.generated.js +101 -101
- package/dist/models.generated.js.map +1 -1
- package/dist/providers/anthropic.d.ts.map +1 -1
- package/dist/providers/anthropic.js +0 -8
- package/dist/providers/anthropic.js.map +1 -1
- package/dist/providers/google.d.ts.map +1 -1
- package/dist/providers/google.js +0 -8
- package/dist/providers/google.js.map +1 -1
- package/dist/providers/openai-completions.d.ts +1 -1
- package/dist/providers/openai-completions.d.ts.map +1 -1
- package/dist/providers/openai-completions.js +65 -28
- package/dist/providers/openai-completions.js.map +1 -1
- package/dist/providers/openai-responses.d.ts +1 -1
- package/dist/providers/openai-responses.d.ts.map +1 -1
- package/dist/providers/openai-responses.js +4 -9
- package/dist/providers/openai-responses.js.map +1 -1
- package/dist/stream.d.ts.map +1 -1
- package/dist/stream.js +4 -2
- package/dist/stream.js.map +1 -1
- package/dist/types.d.ts +17 -1
- package/dist/types.d.ts.map +1 -1
- package/dist/types.js.map +1 -1
- package/dist/utils/validation.d.ts +8 -0
- package/dist/utils/validation.d.ts.map +1 -1
- package/dist/utils/validation.js +14 -0
- package/dist/utils/validation.js.map +1 -1
- package/package.json +4 -4
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"openai-completions.d.ts","sourceRoot":"","sources":["../../src/providers/openai-completions.ts"],"names":[],"mappings":"AAUA,OAAO,KAAK,EAKX,cAAc,EACd,aAAa,EAKb,MAAM,aAAa,CAAC;AAOrB,MAAM,WAAW,wBAAyB,SAAQ,aAAa;IAC9D,UAAU,CAAC,EAAE,MAAM,GAAG,MAAM,GAAG,UAAU,GAAG;QAAE,IAAI,EAAE,UAAU,CAAC;QAAC,QAAQ,EAAE;YAAE,IAAI,EAAE,MAAM,CAAA;SAAE,CAAA;KAAE,CAAC;IAC7F,eAAe,CAAC,EAAE,SAAS,GAAG,KAAK,GAAG,QAAQ,GAAG,MAAM,CAAC;CACxD;AAED,eAAO,MAAM,uBAAuB,EAAE,cAAc,CAAC,oBAAoB,CAiOxE,CAAC","sourcesContent":["import OpenAI from \"openai\";\nimport type {\n\tChatCompletionAssistantMessageParam,\n\tChatCompletionChunk,\n\tChatCompletionContentPart,\n\tChatCompletionContentPartImage,\n\tChatCompletionContentPartText,\n\tChatCompletionMessageParam,\n} from \"openai/resources/chat/completions.js\";\nimport { calculateCost } from \"../models.js\";\nimport type {\n\tAssistantMessage,\n\tContext,\n\tModel,\n\tStopReason,\n\tStreamFunction,\n\tStreamOptions,\n\tTextContent,\n\tThinkingContent,\n\tTool,\n\tToolCall,\n} from \"../types.js\";\nimport { AssistantMessageEventStream } from \"../utils/event-stream.js\";\nimport { parseStreamingJson } from \"../utils/json-parse.js\";\nimport { sanitizeSurrogates } from \"../utils/sanitize-unicode.js\";\nimport { validateToolArguments } from \"../utils/validation.js\";\nimport { transformMessages } from \"./transorm-messages.js\";\n\nexport interface OpenAICompletionsOptions extends StreamOptions {\n\ttoolChoice?: \"auto\" | \"none\" | \"required\" | { type: \"function\"; function: { name: string } };\n\treasoningEffort?: \"minimal\" | \"low\" | \"medium\" | \"high\";\n}\n\nexport const streamOpenAICompletions: StreamFunction<\"openai-completions\"> = (\n\tmodel: Model<\"openai-completions\">,\n\tcontext: Context,\n\toptions?: OpenAICompletionsOptions,\n): AssistantMessageEventStream => {\n\tconst stream = new AssistantMessageEventStream();\n\n\t(async () => {\n\t\tconst output: AssistantMessage = {\n\t\t\trole: \"assistant\",\n\t\t\tcontent: [],\n\t\t\tapi: model.api,\n\t\t\tprovider: model.provider,\n\t\t\tmodel: model.id,\n\t\t\tusage: {\n\t\t\t\tinput: 0,\n\t\t\t\toutput: 0,\n\t\t\t\tcacheRead: 0,\n\t\t\t\tcacheWrite: 0,\n\t\t\t\ttotalTokens: 0,\n\t\t\t\tcost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },\n\t\t\t},\n\t\t\tstopReason: \"stop\",\n\t\t\ttimestamp: Date.now(),\n\t\t};\n\n\t\ttry {\n\t\t\tconst client = createClient(model, options?.apiKey);\n\t\t\tconst params = buildParams(model, context, options);\n\t\t\tconst openaiStream = await client.chat.completions.create(params, { signal: options?.signal });\n\t\t\tstream.push({ type: \"start\", partial: output });\n\n\t\t\tlet currentBlock: TextContent | ThinkingContent | (ToolCall & { partialArgs?: string }) | null = null;\n\t\t\tconst blocks = output.content;\n\t\t\tconst blockIndex = () => blocks.length - 1;\n\t\t\tconst finishCurrentBlock = (block?: typeof currentBlock) => {\n\t\t\t\tif (block) {\n\t\t\t\t\tif (block.type === \"text\") {\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"text_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\tcontent: block.text,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t} else if (block.type === \"thinking\") {\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"thinking_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\tcontent: block.thinking,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t} else if (block.type === \"toolCall\") {\n\t\t\t\t\t\tblock.arguments = JSON.parse(block.partialArgs || \"{}\");\n\n\t\t\t\t\t\t// Validate tool arguments if tool definition is available\n\t\t\t\t\t\tif (context.tools) {\n\t\t\t\t\t\t\tconst tool = context.tools.find((t) => t.name === block.name);\n\t\t\t\t\t\t\tif (tool) {\n\t\t\t\t\t\t\t\tblock.arguments = validateToolArguments(tool, block);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tdelete block.partialArgs;\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"toolcall_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\ttoolCall: block,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t};\n\n\t\t\tfor await (const chunk of openaiStream) {\n\t\t\t\tif (chunk.usage) {\n\t\t\t\t\tconst cachedTokens = chunk.usage.prompt_tokens_details?.cached_tokens || 0;\n\t\t\t\t\tconst reasoningTokens = chunk.usage.completion_tokens_details?.reasoning_tokens || 0;\n\t\t\t\t\tconst input = (chunk.usage.prompt_tokens || 0) - cachedTokens;\n\t\t\t\t\tconst outputTokens = (chunk.usage.completion_tokens || 0) + reasoningTokens;\n\t\t\t\t\toutput.usage = {\n\t\t\t\t\t\t// OpenAI includes cached tokens in prompt_tokens, so subtract to get non-cached input\n\t\t\t\t\t\tinput,\n\t\t\t\t\t\toutput: outputTokens,\n\t\t\t\t\t\tcacheRead: cachedTokens,\n\t\t\t\t\t\tcacheWrite: 0,\n\t\t\t\t\t\t// Compute totalTokens ourselves since we add reasoning_tokens to output\n\t\t\t\t\t\t// and some providers (e.g., Groq) don't include them in total_tokens\n\t\t\t\t\t\ttotalTokens: input + outputTokens + cachedTokens,\n\t\t\t\t\t\tcost: {\n\t\t\t\t\t\t\tinput: 0,\n\t\t\t\t\t\t\toutput: 0,\n\t\t\t\t\t\t\tcacheRead: 0,\n\t\t\t\t\t\t\tcacheWrite: 0,\n\t\t\t\t\t\t\ttotal: 0,\n\t\t\t\t\t\t},\n\t\t\t\t\t};\n\t\t\t\t\tcalculateCost(model, output.usage);\n\t\t\t\t}\n\n\t\t\t\tconst choice = chunk.choices[0];\n\t\t\t\tif (!choice) continue;\n\n\t\t\t\tif (choice.finish_reason) {\n\t\t\t\t\toutput.stopReason = mapStopReason(choice.finish_reason);\n\t\t\t\t}\n\n\t\t\t\tif (choice.delta) {\n\t\t\t\t\tif (\n\t\t\t\t\t\tchoice.delta.content !== null &&\n\t\t\t\t\t\tchoice.delta.content !== undefined &&\n\t\t\t\t\t\tchoice.delta.content.length > 0\n\t\t\t\t\t) {\n\t\t\t\t\t\tif (!currentBlock || currentBlock.type !== \"text\") {\n\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\tcurrentBlock = { type: \"text\", text: \"\" };\n\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\tstream.push({ type: \"text_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif (currentBlock.type === \"text\") {\n\t\t\t\t\t\t\tcurrentBlock.text += choice.delta.content;\n\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\ttype: \"text_delta\",\n\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\tdelta: choice.delta.content,\n\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t// Some endpoints return reasoning in reasoning_content (llama.cpp),\n\t\t\t\t\t// or reasoning (other openai compatible endpoints)\n\t\t\t\t\tconst reasoningFields = [\"reasoning_content\", \"reasoning\"];\n\t\t\t\t\tfor (const field of reasoningFields) {\n\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t(choice.delta as any)[field] !== null &&\n\t\t\t\t\t\t\t(choice.delta as any)[field] !== undefined &&\n\t\t\t\t\t\t\t(choice.delta as any)[field].length > 0\n\t\t\t\t\t\t) {\n\t\t\t\t\t\t\tif (!currentBlock || currentBlock.type !== \"thinking\") {\n\t\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\t\tcurrentBlock = {\n\t\t\t\t\t\t\t\t\ttype: \"thinking\",\n\t\t\t\t\t\t\t\t\tthinking: \"\",\n\t\t\t\t\t\t\t\t\tthinkingSignature: field,\n\t\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\t\tstream.push({ type: \"thinking_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif (currentBlock.type === \"thinking\") {\n\t\t\t\t\t\t\t\tconst delta = (choice.delta as any)[field];\n\t\t\t\t\t\t\t\tcurrentBlock.thinking += delta;\n\t\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\t\ttype: \"thinking_delta\",\n\t\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\t\tdelta,\n\t\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif (choice?.delta?.tool_calls) {\n\t\t\t\t\t\tfor (const toolCall of choice.delta.tool_calls) {\n\t\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t\t!currentBlock ||\n\t\t\t\t\t\t\t\tcurrentBlock.type !== \"toolCall\" ||\n\t\t\t\t\t\t\t\t(toolCall.id && currentBlock.id !== toolCall.id)\n\t\t\t\t\t\t\t) {\n\t\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\t\tcurrentBlock = {\n\t\t\t\t\t\t\t\t\ttype: \"toolCall\",\n\t\t\t\t\t\t\t\t\tid: toolCall.id || \"\",\n\t\t\t\t\t\t\t\t\tname: toolCall.function?.name || \"\",\n\t\t\t\t\t\t\t\t\targuments: {},\n\t\t\t\t\t\t\t\t\tpartialArgs: \"\",\n\t\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\t\tstream.push({ type: \"toolcall_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif (currentBlock.type === \"toolCall\") {\n\t\t\t\t\t\t\t\tif (toolCall.id) currentBlock.id = toolCall.id;\n\t\t\t\t\t\t\t\tif (toolCall.function?.name) currentBlock.name = toolCall.function.name;\n\t\t\t\t\t\t\t\tlet delta = \"\";\n\t\t\t\t\t\t\t\tif (toolCall.function?.arguments) {\n\t\t\t\t\t\t\t\t\tdelta = toolCall.function.arguments;\n\t\t\t\t\t\t\t\t\tcurrentBlock.partialArgs += toolCall.function.arguments;\n\t\t\t\t\t\t\t\t\tcurrentBlock.arguments = parseStreamingJson(currentBlock.partialArgs);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\t\ttype: \"toolcall_delta\",\n\t\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\t\tdelta,\n\t\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfinishCurrentBlock(currentBlock);\n\n\t\t\tif (options?.signal?.aborted) {\n\t\t\t\tthrow new Error(\"Request was aborted\");\n\t\t\t}\n\n\t\t\tif (output.stopReason === \"aborted\" || output.stopReason === \"error\") {\n\t\t\t\tthrow new Error(\"An unkown error ocurred\");\n\t\t\t}\n\n\t\t\tstream.push({ type: \"done\", reason: output.stopReason, message: output });\n\t\t\tstream.end();\n\t\t} catch (error) {\n\t\t\tfor (const block of output.content) delete (block as any).index;\n\t\t\toutput.stopReason = options?.signal?.aborted ? \"aborted\" : \"error\";\n\t\t\toutput.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);\n\t\t\tstream.push({ type: \"error\", reason: output.stopReason, error: output });\n\t\t\tstream.end();\n\t\t}\n\t})();\n\n\treturn stream;\n};\n\nfunction createClient(model: Model<\"openai-completions\">, apiKey?: string) {\n\tif (!apiKey) {\n\t\tif (!process.env.OPENAI_API_KEY) {\n\t\t\tthrow new Error(\n\t\t\t\t\"OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.\",\n\t\t\t);\n\t\t}\n\t\tapiKey = process.env.OPENAI_API_KEY;\n\t}\n\treturn new OpenAI({\n\t\tapiKey,\n\t\tbaseURL: model.baseUrl,\n\t\tdangerouslyAllowBrowser: true,\n\t\tdefaultHeaders: model.headers,\n\t});\n}\n\nfunction buildParams(model: Model<\"openai-completions\">, context: Context, options?: OpenAICompletionsOptions) {\n\tconst messages = convertMessages(model, context);\n\n\tconst params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {\n\t\tmodel: model.id,\n\t\tmessages,\n\t\tstream: true,\n\t\tstream_options: { include_usage: true },\n\t};\n\n\t// Cerebras/xAI/Mistral dont like the \"store\" field\n\tif (\n\t\t!model.baseUrl.includes(\"cerebras.ai\") &&\n\t\t!model.baseUrl.includes(\"api.x.ai\") &&\n\t\t!model.baseUrl.includes(\"mistral.ai\") &&\n\t\t!model.baseUrl.includes(\"chutes.ai\")\n\t) {\n\t\tparams.store = false;\n\t}\n\n\tif (options?.maxTokens) {\n\t\t// Mistral/Chutes uses max_tokens instead of max_completion_tokens\n\t\tif (model.baseUrl.includes(\"mistral.ai\") || model.baseUrl.includes(\"chutes.ai\")) {\n\t\t\t(params as any).max_tokens = options?.maxTokens;\n\t\t} else {\n\t\t\tparams.max_completion_tokens = options?.maxTokens;\n\t\t}\n\t}\n\n\tif (options?.temperature !== undefined) {\n\t\tparams.temperature = options?.temperature;\n\t}\n\n\tif (context.tools) {\n\t\tparams.tools = convertTools(context.tools);\n\t}\n\n\tif (options?.toolChoice) {\n\t\tparams.tool_choice = options.toolChoice;\n\t}\n\n\t// Grok models don't like reasoning_effort\n\tif (options?.reasoningEffort && model.reasoning && !model.id.toLowerCase().includes(\"grok\")) {\n\t\tparams.reasoning_effort = options.reasoningEffort;\n\t}\n\n\treturn params;\n}\n\nfunction convertMessages(model: Model<\"openai-completions\">, context: Context): ChatCompletionMessageParam[] {\n\tconst params: ChatCompletionMessageParam[] = [];\n\n\tconst transformedMessages = transformMessages(context.messages, model);\n\n\tif (context.systemPrompt) {\n\t\t// Cerebras/xAi/Mistral/Chutes don't like the \"developer\" role\n\t\tconst useDeveloperRole =\n\t\t\tmodel.reasoning &&\n\t\t\t!model.baseUrl.includes(\"cerebras.ai\") &&\n\t\t\t!model.baseUrl.includes(\"api.x.ai\") &&\n\t\t\t!model.baseUrl.includes(\"mistral.ai\") &&\n\t\t\t!model.baseUrl.includes(\"chutes.ai\");\n\t\tconst role = useDeveloperRole ? \"developer\" : \"system\";\n\t\tparams.push({ role: role, content: sanitizeSurrogates(context.systemPrompt) });\n\t}\n\n\tfor (const msg of transformedMessages) {\n\t\tif (msg.role === \"user\") {\n\t\t\tif (typeof msg.content === \"string\") {\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: sanitizeSurrogates(msg.content),\n\t\t\t\t});\n\t\t\t} else {\n\t\t\t\tconst content: ChatCompletionContentPart[] = msg.content.map((item): ChatCompletionContentPart => {\n\t\t\t\t\tif (item.type === \"text\") {\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\ttype: \"text\",\n\t\t\t\t\t\t\ttext: sanitizeSurrogates(item.text),\n\t\t\t\t\t\t} satisfies ChatCompletionContentPartText;\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\ttype: \"image_url\",\n\t\t\t\t\t\t\timage_url: {\n\t\t\t\t\t\t\t\turl: `data:${item.mimeType};base64,${item.data}`,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t} satisfies ChatCompletionContentPartImage;\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t\tconst filteredContent = !model.input.includes(\"image\")\n\t\t\t\t\t? content.filter((c) => c.type !== \"image_url\")\n\t\t\t\t\t: content;\n\t\t\t\tif (filteredContent.length === 0) continue;\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: filteredContent,\n\t\t\t\t});\n\t\t\t}\n\t\t} else if (msg.role === \"assistant\") {\n\t\t\tconst assistantMsg: ChatCompletionAssistantMessageParam = {\n\t\t\t\trole: \"assistant\",\n\t\t\t\tcontent: null,\n\t\t\t};\n\n\t\t\tconst textBlocks = msg.content.filter((b) => b.type === \"text\") as TextContent[];\n\t\t\tif (textBlocks.length > 0) {\n\t\t\t\tassistantMsg.content = textBlocks.map((b) => {\n\t\t\t\t\treturn { type: \"text\", text: sanitizeSurrogates(b.text) };\n\t\t\t\t});\n\t\t\t}\n\n\t\t\t// Handle thinking blocks for llama.cpp server + gpt-oss\n\t\t\tconst thinkingBlocks = msg.content.filter((b) => b.type === \"thinking\") as ThinkingContent[];\n\t\t\tif (thinkingBlocks.length > 0) {\n\t\t\t\t// Use the signature from the first thinking block if available\n\t\t\t\tconst signature = thinkingBlocks[0].thinkingSignature;\n\t\t\t\tif (signature && signature.length > 0) {\n\t\t\t\t\t(assistantMsg as any)[signature] = thinkingBlocks.map((b) => b.thinking).join(\"\\n\");\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tconst toolCalls = msg.content.filter((b) => b.type === \"toolCall\") as ToolCall[];\n\t\t\tif (toolCalls.length > 0) {\n\t\t\t\tassistantMsg.tool_calls = toolCalls.map((tc) => ({\n\t\t\t\t\tid: tc.id,\n\t\t\t\t\ttype: \"function\" as const,\n\t\t\t\t\tfunction: {\n\t\t\t\t\t\tname: tc.name,\n\t\t\t\t\t\targuments: JSON.stringify(tc.arguments),\n\t\t\t\t\t},\n\t\t\t\t}));\n\t\t\t}\n\t\t\tif (assistantMsg.content === null && !assistantMsg.tool_calls) {\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tparams.push(assistantMsg);\n\t\t} else if (msg.role === \"toolResult\") {\n\t\t\t// Extract text and image content\n\t\t\tconst textResult = msg.content\n\t\t\t\t.filter((c) => c.type === \"text\")\n\t\t\t\t.map((c) => (c as any).text)\n\t\t\t\t.join(\"\\n\");\n\t\t\tconst hasImages = msg.content.some((c) => c.type === \"image\");\n\n\t\t\t// Always send tool result with text (or placeholder if only images)\n\t\t\tconst hasText = textResult.length > 0;\n\t\t\tparams.push({\n\t\t\t\trole: \"tool\",\n\t\t\t\tcontent: sanitizeSurrogates(hasText ? textResult : \"(see attached image)\"),\n\t\t\t\ttool_call_id: msg.toolCallId,\n\t\t\t});\n\n\t\t\t// If there are images and model supports them, send a follow-up user message with images\n\t\t\tif (hasImages && model.input.includes(\"image\")) {\n\t\t\t\tconst contentBlocks: Array<\n\t\t\t\t\t{ type: \"text\"; text: string } | { type: \"image_url\"; image_url: { url: string } }\n\t\t\t\t> = [];\n\n\t\t\t\t// Add text prefix\n\t\t\t\tcontentBlocks.push({\n\t\t\t\t\ttype: \"text\",\n\t\t\t\t\ttext: \"Attached image(s) from tool result:\",\n\t\t\t\t});\n\n\t\t\t\t// Add images\n\t\t\t\tfor (const block of msg.content) {\n\t\t\t\t\tif (block.type === \"image\") {\n\t\t\t\t\t\tcontentBlocks.push({\n\t\t\t\t\t\t\ttype: \"image_url\",\n\t\t\t\t\t\t\timage_url: {\n\t\t\t\t\t\t\t\turl: `data:${(block as any).mimeType};base64,${(block as any).data}`,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: contentBlocks,\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t}\n\n\treturn params;\n}\n\nfunction convertTools(tools: Tool[]): OpenAI.Chat.Completions.ChatCompletionTool[] {\n\treturn tools.map((tool) => ({\n\t\ttype: \"function\",\n\t\tfunction: {\n\t\t\tname: tool.name,\n\t\t\tdescription: tool.description,\n\t\t\tparameters: tool.parameters as any, // TypeBox already generates JSON Schema\n\t\t},\n\t}));\n}\n\nfunction mapStopReason(reason: ChatCompletionChunk.Choice[\"finish_reason\"]): StopReason {\n\tif (reason === null) return \"stop\";\n\tswitch (reason) {\n\t\tcase \"stop\":\n\t\t\treturn \"stop\";\n\t\tcase \"length\":\n\t\t\treturn \"length\";\n\t\tcase \"function_call\":\n\t\tcase \"tool_calls\":\n\t\t\treturn \"toolUse\";\n\t\tcase \"content_filter\":\n\t\t\treturn \"error\";\n\t\tdefault: {\n\t\t\tconst _exhaustive: never = reason;\n\t\t\tthrow new Error(`Unhandled stop reason: ${_exhaustive}`);\n\t\t}\n\t}\n}\n"]}
|
|
1
|
+
{"version":3,"file":"openai-completions.d.ts","sourceRoot":"","sources":["../../src/providers/openai-completions.ts"],"names":[],"mappings":"AAUA,OAAO,KAAK,EAOX,cAAc,EACd,aAAa,EAKb,MAAM,aAAa,CAAC;AAyBrB,MAAM,WAAW,wBAAyB,SAAQ,aAAa;IAC9D,UAAU,CAAC,EAAE,MAAM,GAAG,MAAM,GAAG,UAAU,GAAG;QAAE,IAAI,EAAE,UAAU,CAAC;QAAC,QAAQ,EAAE;YAAE,IAAI,EAAE,MAAM,CAAA;SAAE,CAAA;KAAE,CAAC;IAC7F,eAAe,CAAC,EAAE,SAAS,GAAG,KAAK,GAAG,QAAQ,GAAG,MAAM,GAAG,OAAO,CAAC;CAClE;AAED,eAAO,MAAM,uBAAuB,EAAE,cAAc,CAAC,oBAAoB,CAwNxE,CAAC","sourcesContent":["import OpenAI from \"openai\";\nimport type {\n\tChatCompletionAssistantMessageParam,\n\tChatCompletionChunk,\n\tChatCompletionContentPart,\n\tChatCompletionContentPartImage,\n\tChatCompletionContentPartText,\n\tChatCompletionMessageParam,\n} from \"openai/resources/chat/completions.js\";\nimport { calculateCost } from \"../models.js\";\nimport type {\n\tAssistantMessage,\n\tContext,\n\tMessage,\n\tModel,\n\tOpenAICompat,\n\tStopReason,\n\tStreamFunction,\n\tStreamOptions,\n\tTextContent,\n\tThinkingContent,\n\tTool,\n\tToolCall,\n} from \"../types.js\";\nimport { AssistantMessageEventStream } from \"../utils/event-stream.js\";\nimport { parseStreamingJson } from \"../utils/json-parse.js\";\nimport { sanitizeSurrogates } from \"../utils/sanitize-unicode.js\";\nimport { transformMessages } from \"./transorm-messages.js\";\n\n/**\n * Check if conversation messages contain tool calls or tool results.\n * This is needed because Anthropic (via proxy) requires the tools param\n * to be present when messages include tool_calls or tool role messages.\n */\nfunction hasToolHistory(messages: Message[]): boolean {\n\tfor (const msg of messages) {\n\t\tif (msg.role === \"toolResult\") {\n\t\t\treturn true;\n\t\t}\n\t\tif (msg.role === \"assistant\") {\n\t\t\tif (msg.content.some((block) => block.type === \"toolCall\")) {\n\t\t\t\treturn true;\n\t\t\t}\n\t\t}\n\t}\n\treturn false;\n}\n\nexport interface OpenAICompletionsOptions extends StreamOptions {\n\ttoolChoice?: \"auto\" | \"none\" | \"required\" | { type: \"function\"; function: { name: string } };\n\treasoningEffort?: \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n}\n\nexport const streamOpenAICompletions: StreamFunction<\"openai-completions\"> = (\n\tmodel: Model<\"openai-completions\">,\n\tcontext: Context,\n\toptions?: OpenAICompletionsOptions,\n): AssistantMessageEventStream => {\n\tconst stream = new AssistantMessageEventStream();\n\n\t(async () => {\n\t\tconst output: AssistantMessage = {\n\t\t\trole: \"assistant\",\n\t\t\tcontent: [],\n\t\t\tapi: model.api,\n\t\t\tprovider: model.provider,\n\t\t\tmodel: model.id,\n\t\t\tusage: {\n\t\t\t\tinput: 0,\n\t\t\t\toutput: 0,\n\t\t\t\tcacheRead: 0,\n\t\t\t\tcacheWrite: 0,\n\t\t\t\ttotalTokens: 0,\n\t\t\t\tcost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },\n\t\t\t},\n\t\t\tstopReason: \"stop\",\n\t\t\ttimestamp: Date.now(),\n\t\t};\n\n\t\ttry {\n\t\t\tconst client = createClient(model, options?.apiKey);\n\t\t\tconst params = buildParams(model, context, options);\n\t\t\tconst openaiStream = await client.chat.completions.create(params, { signal: options?.signal });\n\t\t\tstream.push({ type: \"start\", partial: output });\n\n\t\t\tlet currentBlock: TextContent | ThinkingContent | (ToolCall & { partialArgs?: string }) | null = null;\n\t\t\tconst blocks = output.content;\n\t\t\tconst blockIndex = () => blocks.length - 1;\n\t\t\tconst finishCurrentBlock = (block?: typeof currentBlock) => {\n\t\t\t\tif (block) {\n\t\t\t\t\tif (block.type === \"text\") {\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"text_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\tcontent: block.text,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t} else if (block.type === \"thinking\") {\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"thinking_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\tcontent: block.thinking,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t} else if (block.type === \"toolCall\") {\n\t\t\t\t\t\tblock.arguments = JSON.parse(block.partialArgs || \"{}\");\n\t\t\t\t\t\tdelete block.partialArgs;\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"toolcall_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\ttoolCall: block,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t};\n\n\t\t\tfor await (const chunk of openaiStream) {\n\t\t\t\tif (chunk.usage) {\n\t\t\t\t\tconst cachedTokens = chunk.usage.prompt_tokens_details?.cached_tokens || 0;\n\t\t\t\t\tconst reasoningTokens = chunk.usage.completion_tokens_details?.reasoning_tokens || 0;\n\t\t\t\t\tconst input = (chunk.usage.prompt_tokens || 0) - cachedTokens;\n\t\t\t\t\tconst outputTokens = (chunk.usage.completion_tokens || 0) + reasoningTokens;\n\t\t\t\t\toutput.usage = {\n\t\t\t\t\t\t// OpenAI includes cached tokens in prompt_tokens, so subtract to get non-cached input\n\t\t\t\t\t\tinput,\n\t\t\t\t\t\toutput: outputTokens,\n\t\t\t\t\t\tcacheRead: cachedTokens,\n\t\t\t\t\t\tcacheWrite: 0,\n\t\t\t\t\t\t// Compute totalTokens ourselves since we add reasoning_tokens to output\n\t\t\t\t\t\t// and some providers (e.g., Groq) don't include them in total_tokens\n\t\t\t\t\t\ttotalTokens: input + outputTokens + cachedTokens,\n\t\t\t\t\t\tcost: {\n\t\t\t\t\t\t\tinput: 0,\n\t\t\t\t\t\t\toutput: 0,\n\t\t\t\t\t\t\tcacheRead: 0,\n\t\t\t\t\t\t\tcacheWrite: 0,\n\t\t\t\t\t\t\ttotal: 0,\n\t\t\t\t\t\t},\n\t\t\t\t\t};\n\t\t\t\t\tcalculateCost(model, output.usage);\n\t\t\t\t}\n\n\t\t\t\tconst choice = chunk.choices[0];\n\t\t\t\tif (!choice) continue;\n\n\t\t\t\tif (choice.finish_reason) {\n\t\t\t\t\toutput.stopReason = mapStopReason(choice.finish_reason);\n\t\t\t\t}\n\n\t\t\t\tif (choice.delta) {\n\t\t\t\t\tif (\n\t\t\t\t\t\tchoice.delta.content !== null &&\n\t\t\t\t\t\tchoice.delta.content !== undefined &&\n\t\t\t\t\t\tchoice.delta.content.length > 0\n\t\t\t\t\t) {\n\t\t\t\t\t\tif (!currentBlock || currentBlock.type !== \"text\") {\n\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\tcurrentBlock = { type: \"text\", text: \"\" };\n\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\tstream.push({ type: \"text_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif (currentBlock.type === \"text\") {\n\t\t\t\t\t\t\tcurrentBlock.text += choice.delta.content;\n\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\ttype: \"text_delta\",\n\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\tdelta: choice.delta.content,\n\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t// Some endpoints return reasoning in reasoning_content (llama.cpp),\n\t\t\t\t\t// or reasoning (other openai compatible endpoints)\n\t\t\t\t\tconst reasoningFields = [\"reasoning_content\", \"reasoning\"];\n\t\t\t\t\tfor (const field of reasoningFields) {\n\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t(choice.delta as any)[field] !== null &&\n\t\t\t\t\t\t\t(choice.delta as any)[field] !== undefined &&\n\t\t\t\t\t\t\t(choice.delta as any)[field].length > 0\n\t\t\t\t\t\t) {\n\t\t\t\t\t\t\tif (!currentBlock || currentBlock.type !== \"thinking\") {\n\t\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\t\tcurrentBlock = {\n\t\t\t\t\t\t\t\t\ttype: \"thinking\",\n\t\t\t\t\t\t\t\t\tthinking: \"\",\n\t\t\t\t\t\t\t\t\tthinkingSignature: field,\n\t\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\t\tstream.push({ type: \"thinking_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif (currentBlock.type === \"thinking\") {\n\t\t\t\t\t\t\t\tconst delta = (choice.delta as any)[field];\n\t\t\t\t\t\t\t\tcurrentBlock.thinking += delta;\n\t\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\t\ttype: \"thinking_delta\",\n\t\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\t\tdelta,\n\t\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif (choice?.delta?.tool_calls) {\n\t\t\t\t\t\tfor (const toolCall of choice.delta.tool_calls) {\n\t\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t\t!currentBlock ||\n\t\t\t\t\t\t\t\tcurrentBlock.type !== \"toolCall\" ||\n\t\t\t\t\t\t\t\t(toolCall.id && currentBlock.id !== toolCall.id)\n\t\t\t\t\t\t\t) {\n\t\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\t\tcurrentBlock = {\n\t\t\t\t\t\t\t\t\ttype: \"toolCall\",\n\t\t\t\t\t\t\t\t\tid: toolCall.id || \"\",\n\t\t\t\t\t\t\t\t\tname: toolCall.function?.name || \"\",\n\t\t\t\t\t\t\t\t\targuments: {},\n\t\t\t\t\t\t\t\t\tpartialArgs: \"\",\n\t\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\t\tstream.push({ type: \"toolcall_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif (currentBlock.type === \"toolCall\") {\n\t\t\t\t\t\t\t\tif (toolCall.id) currentBlock.id = toolCall.id;\n\t\t\t\t\t\t\t\tif (toolCall.function?.name) currentBlock.name = toolCall.function.name;\n\t\t\t\t\t\t\t\tlet delta = \"\";\n\t\t\t\t\t\t\t\tif (toolCall.function?.arguments) {\n\t\t\t\t\t\t\t\t\tdelta = toolCall.function.arguments;\n\t\t\t\t\t\t\t\t\tcurrentBlock.partialArgs += toolCall.function.arguments;\n\t\t\t\t\t\t\t\t\tcurrentBlock.arguments = parseStreamingJson(currentBlock.partialArgs);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\t\ttype: \"toolcall_delta\",\n\t\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\t\tdelta,\n\t\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfinishCurrentBlock(currentBlock);\n\n\t\t\tif (options?.signal?.aborted) {\n\t\t\t\tthrow new Error(\"Request was aborted\");\n\t\t\t}\n\n\t\t\tif (output.stopReason === \"aborted\" || output.stopReason === \"error\") {\n\t\t\t\tthrow new Error(\"An unkown error ocurred\");\n\t\t\t}\n\n\t\t\tstream.push({ type: \"done\", reason: output.stopReason, message: output });\n\t\t\tstream.end();\n\t\t} catch (error) {\n\t\t\tfor (const block of output.content) delete (block as any).index;\n\t\t\toutput.stopReason = options?.signal?.aborted ? \"aborted\" : \"error\";\n\t\t\toutput.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);\n\t\t\tstream.push({ type: \"error\", reason: output.stopReason, error: output });\n\t\t\tstream.end();\n\t\t}\n\t})();\n\n\treturn stream;\n};\n\nfunction createClient(model: Model<\"openai-completions\">, apiKey?: string) {\n\tif (!apiKey) {\n\t\tif (!process.env.OPENAI_API_KEY) {\n\t\t\tthrow new Error(\n\t\t\t\t\"OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.\",\n\t\t\t);\n\t\t}\n\t\tapiKey = process.env.OPENAI_API_KEY;\n\t}\n\treturn new OpenAI({\n\t\tapiKey,\n\t\tbaseURL: model.baseUrl,\n\t\tdangerouslyAllowBrowser: true,\n\t\tdefaultHeaders: model.headers,\n\t});\n}\n\nfunction buildParams(model: Model<\"openai-completions\">, context: Context, options?: OpenAICompletionsOptions) {\n\tconst compat = getCompat(model);\n\tconst messages = convertMessages(model, context, compat);\n\n\tconst params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {\n\t\tmodel: model.id,\n\t\tmessages,\n\t\tstream: true,\n\t\tstream_options: { include_usage: true },\n\t};\n\n\tif (compat.supportsStore) {\n\t\tparams.store = false;\n\t}\n\n\tif (options?.maxTokens) {\n\t\tif (compat.maxTokensField === \"max_tokens\") {\n\t\t\t(params as any).max_tokens = options.maxTokens;\n\t\t} else {\n\t\t\tparams.max_completion_tokens = options.maxTokens;\n\t\t}\n\t}\n\n\tif (options?.temperature !== undefined) {\n\t\tparams.temperature = options.temperature;\n\t}\n\n\tif (context.tools) {\n\t\tparams.tools = convertTools(context.tools);\n\t} else if (hasToolHistory(context.messages)) {\n\t\t// Anthropic (via LiteLLM/proxy) requires tools param when conversation has tool_calls/tool_results\n\t\tparams.tools = [];\n\t}\n\n\tif (options?.toolChoice) {\n\t\tparams.tool_choice = options.toolChoice;\n\t}\n\n\tif (options?.reasoningEffort && model.reasoning && compat.supportsReasoningEffort) {\n\t\tparams.reasoning_effort = options.reasoningEffort;\n\t}\n\n\treturn params;\n}\n\nfunction convertMessages(\n\tmodel: Model<\"openai-completions\">,\n\tcontext: Context,\n\tcompat: Required<OpenAICompat>,\n): ChatCompletionMessageParam[] {\n\tconst params: ChatCompletionMessageParam[] = [];\n\n\tconst transformedMessages = transformMessages(context.messages, model);\n\n\tif (context.systemPrompt) {\n\t\tconst useDeveloperRole = model.reasoning && compat.supportsDeveloperRole;\n\t\tconst role = useDeveloperRole ? \"developer\" : \"system\";\n\t\tparams.push({ role: role, content: sanitizeSurrogates(context.systemPrompt) });\n\t}\n\n\tfor (const msg of transformedMessages) {\n\t\tif (msg.role === \"user\") {\n\t\t\tif (typeof msg.content === \"string\") {\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: sanitizeSurrogates(msg.content),\n\t\t\t\t});\n\t\t\t} else {\n\t\t\t\tconst content: ChatCompletionContentPart[] = msg.content.map((item): ChatCompletionContentPart => {\n\t\t\t\t\tif (item.type === \"text\") {\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\ttype: \"text\",\n\t\t\t\t\t\t\ttext: sanitizeSurrogates(item.text),\n\t\t\t\t\t\t} satisfies ChatCompletionContentPartText;\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\ttype: \"image_url\",\n\t\t\t\t\t\t\timage_url: {\n\t\t\t\t\t\t\t\turl: `data:${item.mimeType};base64,${item.data}`,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t} satisfies ChatCompletionContentPartImage;\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t\tconst filteredContent = !model.input.includes(\"image\")\n\t\t\t\t\t? content.filter((c) => c.type !== \"image_url\")\n\t\t\t\t\t: content;\n\t\t\t\tif (filteredContent.length === 0) continue;\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: filteredContent,\n\t\t\t\t});\n\t\t\t}\n\t\t} else if (msg.role === \"assistant\") {\n\t\t\tconst assistantMsg: ChatCompletionAssistantMessageParam = {\n\t\t\t\trole: \"assistant\",\n\t\t\t\tcontent: null,\n\t\t\t};\n\n\t\t\tconst textBlocks = msg.content.filter((b) => b.type === \"text\") as TextContent[];\n\t\t\tif (textBlocks.length > 0) {\n\t\t\t\tassistantMsg.content = textBlocks.map((b) => {\n\t\t\t\t\treturn { type: \"text\", text: sanitizeSurrogates(b.text) };\n\t\t\t\t});\n\t\t\t}\n\n\t\t\t// Handle thinking blocks for llama.cpp server + gpt-oss\n\t\t\tconst thinkingBlocks = msg.content.filter((b) => b.type === \"thinking\") as ThinkingContent[];\n\t\t\tif (thinkingBlocks.length > 0) {\n\t\t\t\t// Use the signature from the first thinking block if available\n\t\t\t\tconst signature = thinkingBlocks[0].thinkingSignature;\n\t\t\t\tif (signature && signature.length > 0) {\n\t\t\t\t\t(assistantMsg as any)[signature] = thinkingBlocks.map((b) => b.thinking).join(\"\\n\");\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tconst toolCalls = msg.content.filter((b) => b.type === \"toolCall\") as ToolCall[];\n\t\t\tif (toolCalls.length > 0) {\n\t\t\t\tassistantMsg.tool_calls = toolCalls.map((tc) => ({\n\t\t\t\t\tid: tc.id,\n\t\t\t\t\ttype: \"function\" as const,\n\t\t\t\t\tfunction: {\n\t\t\t\t\t\tname: tc.name,\n\t\t\t\t\t\targuments: JSON.stringify(tc.arguments),\n\t\t\t\t\t},\n\t\t\t\t}));\n\t\t\t}\n\t\t\tif (assistantMsg.content === null && !assistantMsg.tool_calls) {\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tparams.push(assistantMsg);\n\t\t} else if (msg.role === \"toolResult\") {\n\t\t\t// Extract text and image content\n\t\t\tconst textResult = msg.content\n\t\t\t\t.filter((c) => c.type === \"text\")\n\t\t\t\t.map((c) => (c as any).text)\n\t\t\t\t.join(\"\\n\");\n\t\t\tconst hasImages = msg.content.some((c) => c.type === \"image\");\n\n\t\t\t// Always send tool result with text (or placeholder if only images)\n\t\t\tconst hasText = textResult.length > 0;\n\t\t\tparams.push({\n\t\t\t\trole: \"tool\",\n\t\t\t\tcontent: sanitizeSurrogates(hasText ? textResult : \"(see attached image)\"),\n\t\t\t\ttool_call_id: msg.toolCallId,\n\t\t\t});\n\n\t\t\t// If there are images and model supports them, send a follow-up user message with images\n\t\t\tif (hasImages && model.input.includes(\"image\")) {\n\t\t\t\tconst contentBlocks: Array<\n\t\t\t\t\t{ type: \"text\"; text: string } | { type: \"image_url\"; image_url: { url: string } }\n\t\t\t\t> = [];\n\n\t\t\t\t// Add text prefix\n\t\t\t\tcontentBlocks.push({\n\t\t\t\t\ttype: \"text\",\n\t\t\t\t\ttext: \"Attached image(s) from tool result:\",\n\t\t\t\t});\n\n\t\t\t\t// Add images\n\t\t\t\tfor (const block of msg.content) {\n\t\t\t\t\tif (block.type === \"image\") {\n\t\t\t\t\t\tcontentBlocks.push({\n\t\t\t\t\t\t\ttype: \"image_url\",\n\t\t\t\t\t\t\timage_url: {\n\t\t\t\t\t\t\t\turl: `data:${(block as any).mimeType};base64,${(block as any).data}`,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: contentBlocks,\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t}\n\n\treturn params;\n}\n\nfunction convertTools(tools: Tool[]): OpenAI.Chat.Completions.ChatCompletionTool[] {\n\treturn tools.map((tool) => ({\n\t\ttype: \"function\",\n\t\tfunction: {\n\t\t\tname: tool.name,\n\t\t\tdescription: tool.description,\n\t\t\tparameters: tool.parameters as any, // TypeBox already generates JSON Schema\n\t\t},\n\t}));\n}\n\nfunction mapStopReason(reason: ChatCompletionChunk.Choice[\"finish_reason\"]): StopReason {\n\tif (reason === null) return \"stop\";\n\tswitch (reason) {\n\t\tcase \"stop\":\n\t\t\treturn \"stop\";\n\t\tcase \"length\":\n\t\t\treturn \"length\";\n\t\tcase \"function_call\":\n\t\tcase \"tool_calls\":\n\t\t\treturn \"toolUse\";\n\t\tcase \"content_filter\":\n\t\t\treturn \"error\";\n\t\tdefault: {\n\t\t\tconst _exhaustive: never = reason;\n\t\t\tthrow new Error(`Unhandled stop reason: ${_exhaustive}`);\n\t\t}\n\t}\n}\n\n/**\n * Detect compatibility settings from baseUrl for known providers.\n * Returns a fully resolved OpenAICompat object with all fields set.\n */\nfunction detectCompatFromUrl(baseUrl: string): Required<OpenAICompat> {\n\tconst isNonStandard =\n\t\tbaseUrl.includes(\"cerebras.ai\") ||\n\t\tbaseUrl.includes(\"api.x.ai\") ||\n\t\tbaseUrl.includes(\"mistral.ai\") ||\n\t\tbaseUrl.includes(\"chutes.ai\");\n\n\tconst useMaxTokens = baseUrl.includes(\"mistral.ai\") || baseUrl.includes(\"chutes.ai\");\n\n\tconst isGrok = baseUrl.includes(\"api.x.ai\");\n\n\treturn {\n\t\tsupportsStore: !isNonStandard,\n\t\tsupportsDeveloperRole: !isNonStandard,\n\t\tsupportsReasoningEffort: !isGrok,\n\t\tmaxTokensField: useMaxTokens ? \"max_tokens\" : \"max_completion_tokens\",\n\t};\n}\n\n/**\n * Get resolved compatibility settings for a model.\n * Uses explicit model.compat if provided, otherwise auto-detects from URL.\n */\nfunction getCompat(model: Model<\"openai-completions\">): Required<OpenAICompat> {\n\tconst detected = detectCompatFromUrl(model.baseUrl);\n\tif (!model.compat) return detected;\n\n\treturn {\n\t\tsupportsStore: model.compat.supportsStore ?? detected.supportsStore,\n\t\tsupportsDeveloperRole: model.compat.supportsDeveloperRole ?? detected.supportsDeveloperRole,\n\t\tsupportsReasoningEffort: model.compat.supportsReasoningEffort ?? detected.supportsReasoningEffort,\n\t\tmaxTokensField: model.compat.maxTokensField ?? detected.maxTokensField,\n\t};\n}\n"]}
|
|
@@ -3,8 +3,25 @@ import { calculateCost } from "../models.js";
|
|
|
3
3
|
import { AssistantMessageEventStream } from "../utils/event-stream.js";
|
|
4
4
|
import { parseStreamingJson } from "../utils/json-parse.js";
|
|
5
5
|
import { sanitizeSurrogates } from "../utils/sanitize-unicode.js";
|
|
6
|
-
import { validateToolArguments } from "../utils/validation.js";
|
|
7
6
|
import { transformMessages } from "./transorm-messages.js";
|
|
7
|
+
/**
|
|
8
|
+
* Check if conversation messages contain tool calls or tool results.
|
|
9
|
+
* This is needed because Anthropic (via proxy) requires the tools param
|
|
10
|
+
* to be present when messages include tool_calls or tool role messages.
|
|
11
|
+
*/
|
|
12
|
+
function hasToolHistory(messages) {
|
|
13
|
+
for (const msg of messages) {
|
|
14
|
+
if (msg.role === "toolResult") {
|
|
15
|
+
return true;
|
|
16
|
+
}
|
|
17
|
+
if (msg.role === "assistant") {
|
|
18
|
+
if (msg.content.some((block) => block.type === "toolCall")) {
|
|
19
|
+
return true;
|
|
20
|
+
}
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
return false;
|
|
24
|
+
}
|
|
8
25
|
export const streamOpenAICompletions = (model, context, options) => {
|
|
9
26
|
const stream = new AssistantMessageEventStream();
|
|
10
27
|
(async () => {
|
|
@@ -53,13 +70,6 @@ export const streamOpenAICompletions = (model, context, options) => {
|
|
|
53
70
|
}
|
|
54
71
|
else if (block.type === "toolCall") {
|
|
55
72
|
block.arguments = JSON.parse(block.partialArgs || "{}");
|
|
56
|
-
// Validate tool arguments if tool definition is available
|
|
57
|
-
if (context.tools) {
|
|
58
|
-
const tool = context.tools.find((t) => t.name === block.name);
|
|
59
|
-
if (tool) {
|
|
60
|
-
block.arguments = validateToolArguments(tool, block);
|
|
61
|
-
}
|
|
62
|
-
}
|
|
63
73
|
delete block.partialArgs;
|
|
64
74
|
stream.push({
|
|
65
75
|
type: "toolcall_end",
|
|
@@ -224,54 +234,48 @@ function createClient(model, apiKey) {
|
|
|
224
234
|
});
|
|
225
235
|
}
|
|
226
236
|
function buildParams(model, context, options) {
|
|
227
|
-
const
|
|
237
|
+
const compat = getCompat(model);
|
|
238
|
+
const messages = convertMessages(model, context, compat);
|
|
228
239
|
const params = {
|
|
229
240
|
model: model.id,
|
|
230
241
|
messages,
|
|
231
242
|
stream: true,
|
|
232
243
|
stream_options: { include_usage: true },
|
|
233
244
|
};
|
|
234
|
-
|
|
235
|
-
if (!model.baseUrl.includes("cerebras.ai") &&
|
|
236
|
-
!model.baseUrl.includes("api.x.ai") &&
|
|
237
|
-
!model.baseUrl.includes("mistral.ai") &&
|
|
238
|
-
!model.baseUrl.includes("chutes.ai")) {
|
|
245
|
+
if (compat.supportsStore) {
|
|
239
246
|
params.store = false;
|
|
240
247
|
}
|
|
241
248
|
if (options?.maxTokens) {
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
params.max_tokens = options?.maxTokens;
|
|
249
|
+
if (compat.maxTokensField === "max_tokens") {
|
|
250
|
+
params.max_tokens = options.maxTokens;
|
|
245
251
|
}
|
|
246
252
|
else {
|
|
247
|
-
params.max_completion_tokens = options
|
|
253
|
+
params.max_completion_tokens = options.maxTokens;
|
|
248
254
|
}
|
|
249
255
|
}
|
|
250
256
|
if (options?.temperature !== undefined) {
|
|
251
|
-
params.temperature = options
|
|
257
|
+
params.temperature = options.temperature;
|
|
252
258
|
}
|
|
253
259
|
if (context.tools) {
|
|
254
260
|
params.tools = convertTools(context.tools);
|
|
255
261
|
}
|
|
262
|
+
else if (hasToolHistory(context.messages)) {
|
|
263
|
+
// Anthropic (via LiteLLM/proxy) requires tools param when conversation has tool_calls/tool_results
|
|
264
|
+
params.tools = [];
|
|
265
|
+
}
|
|
256
266
|
if (options?.toolChoice) {
|
|
257
267
|
params.tool_choice = options.toolChoice;
|
|
258
268
|
}
|
|
259
|
-
|
|
260
|
-
if (options?.reasoningEffort && model.reasoning && !model.id.toLowerCase().includes("grok")) {
|
|
269
|
+
if (options?.reasoningEffort && model.reasoning && compat.supportsReasoningEffort) {
|
|
261
270
|
params.reasoning_effort = options.reasoningEffort;
|
|
262
271
|
}
|
|
263
272
|
return params;
|
|
264
273
|
}
|
|
265
|
-
function convertMessages(model, context) {
|
|
274
|
+
function convertMessages(model, context, compat) {
|
|
266
275
|
const params = [];
|
|
267
276
|
const transformedMessages = transformMessages(context.messages, model);
|
|
268
277
|
if (context.systemPrompt) {
|
|
269
|
-
|
|
270
|
-
const useDeveloperRole = model.reasoning &&
|
|
271
|
-
!model.baseUrl.includes("cerebras.ai") &&
|
|
272
|
-
!model.baseUrl.includes("api.x.ai") &&
|
|
273
|
-
!model.baseUrl.includes("mistral.ai") &&
|
|
274
|
-
!model.baseUrl.includes("chutes.ai");
|
|
278
|
+
const useDeveloperRole = model.reasoning && compat.supportsDeveloperRole;
|
|
275
279
|
const role = useDeveloperRole ? "developer" : "system";
|
|
276
280
|
params.push({ role: role, content: sanitizeSurrogates(context.systemPrompt) });
|
|
277
281
|
}
|
|
@@ -418,4 +422,37 @@ function mapStopReason(reason) {
|
|
|
418
422
|
}
|
|
419
423
|
}
|
|
420
424
|
}
|
|
425
|
+
/**
|
|
426
|
+
* Detect compatibility settings from baseUrl for known providers.
|
|
427
|
+
* Returns a fully resolved OpenAICompat object with all fields set.
|
|
428
|
+
*/
|
|
429
|
+
function detectCompatFromUrl(baseUrl) {
|
|
430
|
+
const isNonStandard = baseUrl.includes("cerebras.ai") ||
|
|
431
|
+
baseUrl.includes("api.x.ai") ||
|
|
432
|
+
baseUrl.includes("mistral.ai") ||
|
|
433
|
+
baseUrl.includes("chutes.ai");
|
|
434
|
+
const useMaxTokens = baseUrl.includes("mistral.ai") || baseUrl.includes("chutes.ai");
|
|
435
|
+
const isGrok = baseUrl.includes("api.x.ai");
|
|
436
|
+
return {
|
|
437
|
+
supportsStore: !isNonStandard,
|
|
438
|
+
supportsDeveloperRole: !isNonStandard,
|
|
439
|
+
supportsReasoningEffort: !isGrok,
|
|
440
|
+
maxTokensField: useMaxTokens ? "max_tokens" : "max_completion_tokens",
|
|
441
|
+
};
|
|
442
|
+
}
|
|
443
|
+
/**
|
|
444
|
+
* Get resolved compatibility settings for a model.
|
|
445
|
+
* Uses explicit model.compat if provided, otherwise auto-detects from URL.
|
|
446
|
+
*/
|
|
447
|
+
function getCompat(model) {
|
|
448
|
+
const detected = detectCompatFromUrl(model.baseUrl);
|
|
449
|
+
if (!model.compat)
|
|
450
|
+
return detected;
|
|
451
|
+
return {
|
|
452
|
+
supportsStore: model.compat.supportsStore ?? detected.supportsStore,
|
|
453
|
+
supportsDeveloperRole: model.compat.supportsDeveloperRole ?? detected.supportsDeveloperRole,
|
|
454
|
+
supportsReasoningEffort: model.compat.supportsReasoningEffort ?? detected.supportsReasoningEffort,
|
|
455
|
+
maxTokensField: model.compat.maxTokensField ?? detected.maxTokensField,
|
|
456
|
+
};
|
|
457
|
+
}
|
|
421
458
|
//# sourceMappingURL=openai-completions.js.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"openai-completions.js","sourceRoot":"","sources":["../../src/providers/openai-completions.ts"],"names":[],"mappings":"AAAA,OAAO,MAAM,MAAM,QAAQ,CAAC;AAS5B,OAAO,EAAE,aAAa,EAAE,MAAM,cAAc,CAAC;AAa7C,OAAO,EAAE,2BAA2B,EAAE,MAAM,0BAA0B,CAAC;AACvE,OAAO,EAAE,kBAAkB,EAAE,MAAM,wBAAwB,CAAC;AAC5D,OAAO,EAAE,kBAAkB,EAAE,MAAM,8BAA8B,CAAC;AAClE,OAAO,EAAE,qBAAqB,EAAE,MAAM,wBAAwB,CAAC;AAC/D,OAAO,EAAE,iBAAiB,EAAE,MAAM,wBAAwB,CAAC;AAO3D,MAAM,CAAC,MAAM,uBAAuB,GAAyC,CAC5E,KAAkC,EAClC,OAAgB,EAChB,OAAkC,EACJ,EAAE,CAAC;IACjC,MAAM,MAAM,GAAG,IAAI,2BAA2B,EAAE,CAAC;IAEjD,CAAC,KAAK,IAAI,EAAE,CAAC;QACZ,MAAM,MAAM,GAAqB;YAChC,IAAI,EAAE,WAAW;YACjB,OAAO,EAAE,EAAE;YACX,GAAG,EAAE,KAAK,CAAC,GAAG;YACd,QAAQ,EAAE,KAAK,CAAC,QAAQ;YACxB,KAAK,EAAE,KAAK,CAAC,EAAE;YACf,KAAK,EAAE;gBACN,KAAK,EAAE,CAAC;gBACR,MAAM,EAAE,CAAC;gBACT,SAAS,EAAE,CAAC;gBACZ,UAAU,EAAE,CAAC;gBACb,WAAW,EAAE,CAAC;gBACd,IAAI,EAAE,EAAE,KAAK,EAAE,CAAC,EAAE,MAAM,EAAE,CAAC,EAAE,SAAS,EAAE,CAAC,EAAE,UAAU,EAAE,CAAC,EAAE,KAAK,EAAE,CAAC,EAAE;aACpE;YACD,UAAU,EAAE,MAAM;YAClB,SAAS,EAAE,IAAI,CAAC,GAAG,EAAE;SACrB,CAAC;QAEF,IAAI,CAAC;YACJ,MAAM,MAAM,GAAG,YAAY,CAAC,KAAK,EAAE,OAAO,EAAE,MAAM,CAAC,CAAC;YACpD,MAAM,MAAM,GAAG,WAAW,CAAC,KAAK,EAAE,OAAO,EAAE,OAAO,CAAC,CAAC;YACpD,MAAM,YAAY,GAAG,MAAM,MAAM,CAAC,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC,MAAM,EAAE,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;YAC/F,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,OAAO,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;YAEhD,IAAI,YAAY,GAAiF,IAAI,CAAC;YACtG,MAAM,MAAM,GAAG,MAAM,CAAC,OAAO,CAAC;YAC9B,MAAM,UAAU,GAAG,GAAG,EAAE,CAAC,MAAM,CAAC,MAAM,GAAG,CAAC,CAAC;YAC3C,MAAM,kBAAkB,GAAG,CAAC,KAA2B,EAAE,EAAE,CAAC;gBAC3D,IAAI,KAAK,EAAE,CAAC;oBACX,IAAI,KAAK,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;wBAC3B,MAAM,CAAC,IAAI,CAAC;4BACX,IAAI,EAAE,UAAU;4BAChB,YAAY,EAAE,UAAU,EAAE;4BAC1B,OAAO,EAAE,KAAK,CAAC,IAAI;4BACnB,OAAO,EAAE,MAAM;yBACf,CAAC,CAAC;oBACJ,CAAC;yBAAM,IAAI,KAAK,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;wBACtC,MAAM,CAAC,IAAI,CAAC;4BACX,IAAI,EAAE,cAAc;4BACpB,YAAY,EAAE,UAAU,EAAE;4BAC1B,OAAO,EAAE,KAAK,CAAC,QAAQ;4BACvB,OAAO,EAAE,MAAM;yBACf,CAAC,CAAC;oBACJ,CAAC;yBAAM,IAAI,KAAK,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;wBACtC,KAAK,CAAC,SAAS,GAAG,IAAI,CAAC,KAAK,CAAC,KAAK,CAAC,WAAW,IAAI,IAAI,CAAC,CAAC;wBAExD,0DAA0D;wBAC1D,IAAI,OAAO,CAAC,KAAK,EAAE,CAAC;4BACnB,MAAM,IAAI,GAAG,OAAO,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,KAAK,CAAC,IAAI,CAAC,CAAC;4BAC9D,IAAI,IAAI,EAAE,CAAC;gCACV,KAAK,CAAC,SAAS,GAAG,qBAAqB,CAAC,IAAI,EAAE,KAAK,CAAC,CAAC;4BACtD,CAAC;wBACF,CAAC;wBAED,OAAO,KAAK,CAAC,WAAW,CAAC;wBACzB,MAAM,CAAC,IAAI,CAAC;4BACX,IAAI,EAAE,cAAc;4BACpB,YAAY,EAAE,UAAU,EAAE;4BAC1B,QAAQ,EAAE,KAAK;4BACf,OAAO,EAAE,MAAM;yBACf,CAAC,CAAC;oBACJ,CAAC;gBACF,CAAC;YAAA,CACD,CAAC;YAEF,IAAI,KAAK,EAAE,MAAM,KAAK,IAAI,YAAY,EAAE,CAAC;gBACxC,IAAI,KAAK,CAAC,KAAK,EAAE,CAAC;oBACjB,MAAM,YAAY,GAAG,KAAK,CAAC,KAAK,CAAC,qBAAqB,EAAE,aAAa,IAAI,CAAC,CAAC;oBAC3E,MAAM,eAAe,GAAG,KAAK,CAAC,KAAK,CAAC,yBAAyB,EAAE,gBAAgB,IAAI,CAAC,CAAC;oBACrF,MAAM,KAAK,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC,aAAa,IAAI,CAAC,CAAC,GAAG,YAAY,CAAC;oBAC9D,MAAM,YAAY,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC,iBAAiB,IAAI,CAAC,CAAC,GAAG,eAAe,CAAC;oBAC5E,MAAM,CAAC,KAAK,GAAG;wBACd,sFAAsF;wBACtF,KAAK;wBACL,MAAM,EAAE,YAAY;wBACpB,SAAS,EAAE,YAAY;wBACvB,UAAU,EAAE,CAAC;wBACb,wEAAwE;wBACxE,qEAAqE;wBACrE,WAAW,EAAE,KAAK,GAAG,YAAY,GAAG,YAAY;wBAChD,IAAI,EAAE;4BACL,KAAK,EAAE,CAAC;4BACR,MAAM,EAAE,CAAC;4BACT,SAAS,EAAE,CAAC;4BACZ,UAAU,EAAE,CAAC;4BACb,KAAK,EAAE,CAAC;yBACR;qBACD,CAAC;oBACF,aAAa,CAAC,KAAK,EAAE,MAAM,CAAC,KAAK,CAAC,CAAC;gBACpC,CAAC;gBAED,MAAM,MAAM,GAAG,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC;gBAChC,IAAI,CAAC,MAAM;oBAAE,SAAS;gBAEtB,IAAI,MAAM,CAAC,aAAa,EAAE,CAAC;oBAC1B,MAAM,CAAC,UAAU,GAAG,aAAa,CAAC,MAAM,CAAC,aAAa,CAAC,CAAC;gBACzD,CAAC;gBAED,IAAI,MAAM,CAAC,KAAK,EAAE,CAAC;oBAClB,IACC,MAAM,CAAC,KAAK,CAAC,OAAO,KAAK,IAAI;wBAC7B,MAAM,CAAC,KAAK,CAAC,OAAO,KAAK,SAAS;wBAClC,MAAM,CAAC,KAAK,CAAC,OAAO,CAAC,MAAM,GAAG,CAAC,EAC9B,CAAC;wBACF,IAAI,CAAC,YAAY,IAAI,YAAY,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;4BACnD,kBAAkB,CAAC,YAAY,CAAC,CAAC;4BACjC,YAAY,GAAG,EAAE,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,EAAE,EAAE,CAAC;4BAC1C,MAAM,CAAC,OAAO,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;4BAClC,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,YAAY,EAAE,YAAY,EAAE,UAAU,EAAE,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;wBAClF,CAAC;wBAED,IAAI,YAAY,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;4BAClC,YAAY,CAAC,IAAI,IAAI,MAAM,CAAC,KAAK,CAAC,OAAO,CAAC;4BAC1C,MAAM,CAAC,IAAI,CAAC;gCACX,IAAI,EAAE,YAAY;gCAClB,YAAY,EAAE,UAAU,EAAE;gCAC1B,KAAK,EAAE,MAAM,CAAC,KAAK,CAAC,OAAO;gCAC3B,OAAO,EAAE,MAAM;6BACf,CAAC,CAAC;wBACJ,CAAC;oBACF,CAAC;oBAED,oEAAoE;oBACpE,mDAAmD;oBACnD,MAAM,eAAe,GAAG,CAAC,mBAAmB,EAAE,WAAW,CAAC,CAAC;oBAC3D,KAAK,MAAM,KAAK,IAAI,eAAe,EAAE,CAAC;wBACrC,IACE,MAAM,CAAC,KAAa,CAAC,KAAK,CAAC,KAAK,IAAI;4BACpC,MAAM,CAAC,KAAa,CAAC,KAAK,CAAC,KAAK,SAAS;4BACzC,MAAM,CAAC,KAAa,CAAC,KAAK,CAAC,CAAC,MAAM,GAAG,CAAC,EACtC,CAAC;4BACF,IAAI,CAAC,YAAY,IAAI,YAAY,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;gCACvD,kBAAkB,CAAC,YAAY,CAAC,CAAC;gCACjC,YAAY,GAAG;oCACd,IAAI,EAAE,UAAU;oCAChB,QAAQ,EAAE,EAAE;oCACZ,iBAAiB,EAAE,KAAK;iCACxB,CAAC;gCACF,MAAM,CAAC,OAAO,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;gCAClC,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,gBAAgB,EAAE,YAAY,EAAE,UAAU,EAAE,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;4BACtF,CAAC;4BAED,IAAI,YAAY,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;gCACtC,MAAM,KAAK,GAAI,MAAM,CAAC,KAAa,CAAC,KAAK,CAAC,CAAC;gCAC3C,YAAY,CAAC,QAAQ,IAAI,KAAK,CAAC;gCAC/B,MAAM,CAAC,IAAI,CAAC;oCACX,IAAI,EAAE,gBAAgB;oCACtB,YAAY,EAAE,UAAU,EAAE;oCAC1B,KAAK;oCACL,OAAO,EAAE,MAAM;iCACf,CAAC,CAAC;4BACJ,CAAC;wBACF,CAAC;oBACF,CAAC;oBAED,IAAI,MAAM,EAAE,KAAK,EAAE,UAAU,EAAE,CAAC;wBAC/B,KAAK,MAAM,QAAQ,IAAI,MAAM,CAAC,KAAK,CAAC,UAAU,EAAE,CAAC;4BAChD,IACC,CAAC,YAAY;gCACb,YAAY,CAAC,IAAI,KAAK,UAAU;gCAChC,CAAC,QAAQ,CAAC,EAAE,IAAI,YAAY,CAAC,EAAE,KAAK,QAAQ,CAAC,EAAE,CAAC,EAC/C,CAAC;gCACF,kBAAkB,CAAC,YAAY,CAAC,CAAC;gCACjC,YAAY,GAAG;oCACd,IAAI,EAAE,UAAU;oCAChB,EAAE,EAAE,QAAQ,CAAC,EAAE,IAAI,EAAE;oCACrB,IAAI,EAAE,QAAQ,CAAC,QAAQ,EAAE,IAAI,IAAI,EAAE;oCACnC,SAAS,EAAE,EAAE;oCACb,WAAW,EAAE,EAAE;iCACf,CAAC;gCACF,MAAM,CAAC,OAAO,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;gCAClC,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,gBAAgB,EAAE,YAAY,EAAE,UAAU,EAAE,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;4BACtF,CAAC;4BAED,IAAI,YAAY,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;gCACtC,IAAI,QAAQ,CAAC,EAAE;oCAAE,YAAY,CAAC,EAAE,GAAG,QAAQ,CAAC,EAAE,CAAC;gCAC/C,IAAI,QAAQ,CAAC,QAAQ,EAAE,IAAI;oCAAE,YAAY,CAAC,IAAI,GAAG,QAAQ,CAAC,QAAQ,CAAC,IAAI,CAAC;gCACxE,IAAI,KAAK,GAAG,EAAE,CAAC;gCACf,IAAI,QAAQ,CAAC,QAAQ,EAAE,SAAS,EAAE,CAAC;oCAClC,KAAK,GAAG,QAAQ,CAAC,QAAQ,CAAC,SAAS,CAAC;oCACpC,YAAY,CAAC,WAAW,IAAI,QAAQ,CAAC,QAAQ,CAAC,SAAS,CAAC;oCACxD,YAAY,CAAC,SAAS,GAAG,kBAAkB,CAAC,YAAY,CAAC,WAAW,CAAC,CAAC;gCACvE,CAAC;gCACD,MAAM,CAAC,IAAI,CAAC;oCACX,IAAI,EAAE,gBAAgB;oCACtB,YAAY,EAAE,UAAU,EAAE;oCAC1B,KAAK;oCACL,OAAO,EAAE,MAAM;iCACf,CAAC,CAAC;4BACJ,CAAC;wBACF,CAAC;oBACF,CAAC;gBACF,CAAC;YACF,CAAC;YAED,kBAAkB,CAAC,YAAY,CAAC,CAAC;YAEjC,IAAI,OAAO,EAAE,MAAM,EAAE,OAAO,EAAE,CAAC;gBAC9B,MAAM,IAAI,KAAK,CAAC,qBAAqB,CAAC,CAAC;YACxC,CAAC;YAED,IAAI,MAAM,CAAC,UAAU,KAAK,SAAS,IAAI,MAAM,CAAC,UAAU,KAAK,OAAO,EAAE,CAAC;gBACtE,MAAM,IAAI,KAAK,CAAC,yBAAyB,CAAC,CAAC;YAC5C,CAAC;YAED,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,CAAC,UAAU,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;YAC1E,MAAM,CAAC,GAAG,EAAE,CAAC;QACd,CAAC;QAAC,OAAO,KAAK,EAAE,CAAC;YAChB,KAAK,MAAM,KAAK,IAAI,MAAM,CAAC,OAAO;gBAAE,OAAQ,KAAa,CAAC,KAAK,CAAC;YAChE,MAAM,CAAC,UAAU,GAAG,OAAO,EAAE,MAAM,EAAE,OAAO,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,OAAO,CAAC;YACnE,MAAM,CAAC,YAAY,GAAG,KAAK,YAAY,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC,SAAS,CAAC,KAAK,CAAC,CAAC;YACrF,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,OAAO,EAAE,MAAM,EAAE,MAAM,CAAC,UAAU,EAAE,KAAK,EAAE,MAAM,EAAE,CAAC,CAAC;YACzE,MAAM,CAAC,GAAG,EAAE,CAAC;QACd,CAAC;IAAA,CACD,CAAC,EAAE,CAAC;IAEL,OAAO,MAAM,CAAC;AAAA,CACd,CAAC;AAEF,SAAS,YAAY,CAAC,KAAkC,EAAE,MAAe,EAAE;IAC1E,IAAI,CAAC,MAAM,EAAE,CAAC;QACb,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,cAAc,EAAE,CAAC;YACjC,MAAM,IAAI,KAAK,CACd,gGAAgG,CAChG,CAAC;QACH,CAAC;QACD,MAAM,GAAG,OAAO,CAAC,GAAG,CAAC,cAAc,CAAC;IACrC,CAAC;IACD,OAAO,IAAI,MAAM,CAAC;QACjB,MAAM;QACN,OAAO,EAAE,KAAK,CAAC,OAAO;QACtB,uBAAuB,EAAE,IAAI;QAC7B,cAAc,EAAE,KAAK,CAAC,OAAO;KAC7B,CAAC,CAAC;AAAA,CACH;AAED,SAAS,WAAW,CAAC,KAAkC,EAAE,OAAgB,EAAE,OAAkC,EAAE;IAC9G,MAAM,QAAQ,GAAG,eAAe,CAAC,KAAK,EAAE,OAAO,CAAC,CAAC;IAEjD,MAAM,MAAM,GAAgE;QAC3E,KAAK,EAAE,KAAK,CAAC,EAAE;QACf,QAAQ;QACR,MAAM,EAAE,IAAI;QACZ,cAAc,EAAE,EAAE,aAAa,EAAE,IAAI,EAAE;KACvC,CAAC;IAEF,mDAAmD;IACnD,IACC,CAAC,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,aAAa,CAAC;QACtC,CAAC,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,UAAU,CAAC;QACnC,CAAC,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,YAAY,CAAC;QACrC,CAAC,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,WAAW,CAAC,EACnC,CAAC;QACF,MAAM,CAAC,KAAK,GAAG,KAAK,CAAC;IACtB,CAAC;IAED,IAAI,OAAO,EAAE,SAAS,EAAE,CAAC;QACxB,kEAAkE;QAClE,IAAI,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,YAAY,CAAC,IAAI,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,WAAW,CAAC,EAAE,CAAC;YAChF,MAAc,CAAC,UAAU,GAAG,OAAO,EAAE,SAAS,CAAC;QACjD,CAAC;aAAM,CAAC;YACP,MAAM,CAAC,qBAAqB,GAAG,OAAO,EAAE,SAAS,CAAC;QACnD,CAAC;IACF,CAAC;IAED,IAAI,OAAO,EAAE,WAAW,KAAK,SAAS,EAAE,CAAC;QACxC,MAAM,CAAC,WAAW,GAAG,OAAO,EAAE,WAAW,CAAC;IAC3C,CAAC;IAED,IAAI,OAAO,CAAC,KAAK,EAAE,CAAC;QACnB,MAAM,CAAC,KAAK,GAAG,YAAY,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC;IAC5C,CAAC;IAED,IAAI,OAAO,EAAE,UAAU,EAAE,CAAC;QACzB,MAAM,CAAC,WAAW,GAAG,OAAO,CAAC,UAAU,CAAC;IACzC,CAAC;IAED,0CAA0C;IAC1C,IAAI,OAAO,EAAE,eAAe,IAAI,KAAK,CAAC,SAAS,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,WAAW,EAAE,CAAC,QAAQ,CAAC,MAAM,CAAC,EAAE,CAAC;QAC7F,MAAM,CAAC,gBAAgB,GAAG,OAAO,CAAC,eAAe,CAAC;IACnD,CAAC;IAED,OAAO,MAAM,CAAC;AAAA,CACd;AAED,SAAS,eAAe,CAAC,KAAkC,EAAE,OAAgB,EAAgC;IAC5G,MAAM,MAAM,GAAiC,EAAE,CAAC;IAEhD,MAAM,mBAAmB,GAAG,iBAAiB,CAAC,OAAO,CAAC,QAAQ,EAAE,KAAK,CAAC,CAAC;IAEvE,IAAI,OAAO,CAAC,YAAY,EAAE,CAAC;QAC1B,8DAA8D;QAC9D,MAAM,gBAAgB,GACrB,KAAK,CAAC,SAAS;YACf,CAAC,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,aAAa,CAAC;YACtC,CAAC,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,UAAU,CAAC;YACnC,CAAC,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,YAAY,CAAC;YACrC,CAAC,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC;QACtC,MAAM,IAAI,GAAG,gBAAgB,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC;QACvD,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,IAAI,EAAE,OAAO,EAAE,kBAAkB,CAAC,OAAO,CAAC,YAAY,CAAC,EAAE,CAAC,CAAC;IAChF,CAAC;IAED,KAAK,MAAM,GAAG,IAAI,mBAAmB,EAAE,CAAC;QACvC,IAAI,GAAG,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;YACzB,IAAI,OAAO,GAAG,CAAC,OAAO,KAAK,QAAQ,EAAE,CAAC;gBACrC,MAAM,CAAC,IAAI,CAAC;oBACX,IAAI,EAAE,MAAM;oBACZ,OAAO,EAAE,kBAAkB,CAAC,GAAG,CAAC,OAAO,CAAC;iBACxC,CAAC,CAAC;YACJ,CAAC;iBAAM,CAAC;gBACP,MAAM,OAAO,GAAgC,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,IAAI,EAA6B,EAAE,CAAC;oBACjG,IAAI,IAAI,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;wBAC1B,OAAO;4BACN,IAAI,EAAE,MAAM;4BACZ,IAAI,EAAE,kBAAkB,CAAC,IAAI,CAAC,IAAI,CAAC;yBACK,CAAC;oBAC3C,CAAC;yBAAM,CAAC;wBACP,OAAO;4BACN,IAAI,EAAE,WAAW;4BACjB,SAAS,EAAE;gCACV,GAAG,EAAE,QAAQ,IAAI,CAAC,QAAQ,WAAW,IAAI,CAAC,IAAI,EAAE;6BAChD;yBACwC,CAAC;oBAC5C,CAAC;gBAAA,CACD,CAAC,CAAC;gBACH,MAAM,eAAe,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC,QAAQ,CAAC,OAAO,CAAC;oBACrD,CAAC,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,WAAW,CAAC;oBAC/C,CAAC,CAAC,OAAO,CAAC;gBACX,IAAI,eAAe,CAAC,MAAM,KAAK,CAAC;oBAAE,SAAS;gBAC3C,MAAM,CAAC,IAAI,CAAC;oBACX,IAAI,EAAE,MAAM;oBACZ,OAAO,EAAE,eAAe;iBACxB,CAAC,CAAC;YACJ,CAAC;QACF,CAAC;aAAM,IAAI,GAAG,CAAC,IAAI,KAAK,WAAW,EAAE,CAAC;YACrC,MAAM,YAAY,GAAwC;gBACzD,IAAI,EAAE,WAAW;gBACjB,OAAO,EAAE,IAAI;aACb,CAAC;YAEF,MAAM,UAAU,GAAG,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,MAAM,CAAkB,CAAC;YACjF,IAAI,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;gBAC3B,YAAY,CAAC,OAAO,GAAG,UAAU,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC;oBAC5C,OAAO,EAAE,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,kBAAkB,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC;gBAAA,CAC1D,CAAC,CAAC;YACJ,CAAC;YAED,wDAAwD;YACxD,MAAM,cAAc,GAAG,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,UAAU,CAAsB,CAAC;YAC7F,IAAI,cAAc,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;gBAC/B,+DAA+D;gBAC/D,MAAM,SAAS,GAAG,cAAc,CAAC,CAAC,CAAC,CAAC,iBAAiB,CAAC;gBACtD,IAAI,SAAS,IAAI,SAAS,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;oBACtC,YAAoB,CAAC,SAAS,CAAC,GAAG,cAAc,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;gBACrF,CAAC;YACF,CAAC;YAED,MAAM,SAAS,GAAG,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,UAAU,CAAe,CAAC;YACjF,IAAI,SAAS,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;gBAC1B,YAAY,CAAC,UAAU,GAAG,SAAS,CAAC,GAAG,CAAC,CAAC,EAAE,EAAE,EAAE,CAAC,CAAC;oBAChD,EAAE,EAAE,EAAE,CAAC,EAAE;oBACT,IAAI,EAAE,UAAmB;oBACzB,QAAQ,EAAE;wBACT,IAAI,EAAE,EAAE,CAAC,IAAI;wBACb,SAAS,EAAE,IAAI,CAAC,SAAS,CAAC,EAAE,CAAC,SAAS,CAAC;qBACvC;iBACD,CAAC,CAAC,CAAC;YACL,CAAC;YACD,IAAI,YAAY,CAAC,OAAO,KAAK,IAAI,IAAI,CAAC,YAAY,CAAC,UAAU,EAAE,CAAC;gBAC/D,SAAS;YACV,CAAC;YACD,MAAM,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;QAC3B,CAAC;aAAM,IAAI,GAAG,CAAC,IAAI,KAAK,YAAY,EAAE,CAAC;YACtC,iCAAiC;YACjC,MAAM,UAAU,GAAG,GAAG,CAAC,OAAO;iBAC5B,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,MAAM,CAAC;iBAChC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAE,CAAS,CAAC,IAAI,CAAC;iBAC3B,IAAI,CAAC,IAAI,CAAC,CAAC;YACb,MAAM,SAAS,GAAG,GAAG,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,OAAO,CAAC,CAAC;YAE9D,oEAAoE;YACpE,MAAM,OAAO,GAAG,UAAU,CAAC,MAAM,GAAG,CAAC,CAAC;YACtC,MAAM,CAAC,IAAI,CAAC;gBACX,IAAI,EAAE,MAAM;gBACZ,OAAO,EAAE,kBAAkB,CAAC,OAAO,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,sBAAsB,CAAC;gBAC1E,YAAY,EAAE,GAAG,CAAC,UAAU;aAC5B,CAAC,CAAC;YAEH,yFAAyF;YACzF,IAAI,SAAS,IAAI,KAAK,CAAC,KAAK,CAAC,QAAQ,CAAC,OAAO,CAAC,EAAE,CAAC;gBAChD,MAAM,aAAa,GAEf,EAAE,CAAC;gBAEP,kBAAkB;gBAClB,aAAa,CAAC,IAAI,CAAC;oBAClB,IAAI,EAAE,MAAM;oBACZ,IAAI,EAAE,qCAAqC;iBAC3C,CAAC,CAAC;gBAEH,aAAa;gBACb,KAAK,MAAM,KAAK,IAAI,GAAG,CAAC,OAAO,EAAE,CAAC;oBACjC,IAAI,KAAK,CAAC,IAAI,KAAK,OAAO,EAAE,CAAC;wBAC5B,aAAa,CAAC,IAAI,CAAC;4BAClB,IAAI,EAAE,WAAW;4BACjB,SAAS,EAAE;gCACV,GAAG,EAAE,QAAS,KAAa,CAAC,QAAQ,WAAY,KAAa,CAAC,IAAI,EAAE;6BACpE;yBACD,CAAC,CAAC;oBACJ,CAAC;gBACF,CAAC;gBAED,MAAM,CAAC,IAAI,CAAC;oBACX,IAAI,EAAE,MAAM;oBACZ,OAAO,EAAE,aAAa;iBACtB,CAAC,CAAC;YACJ,CAAC;QACF,CAAC;IACF,CAAC;IAED,OAAO,MAAM,CAAC;AAAA,CACd;AAED,SAAS,YAAY,CAAC,KAAa,EAAgD;IAClF,OAAO,KAAK,CAAC,GAAG,CAAC,CAAC,IAAI,EAAE,EAAE,CAAC,CAAC;QAC3B,IAAI,EAAE,UAAU;QAChB,QAAQ,EAAE;YACT,IAAI,EAAE,IAAI,CAAC,IAAI;YACf,WAAW,EAAE,IAAI,CAAC,WAAW;YAC7B,UAAU,EAAE,IAAI,CAAC,UAAiB,EAAE,wCAAwC;SAC5E;KACD,CAAC,CAAC,CAAC;AAAA,CACJ;AAED,SAAS,aAAa,CAAC,MAAmD,EAAc;IACvF,IAAI,MAAM,KAAK,IAAI;QAAE,OAAO,MAAM,CAAC;IACnC,QAAQ,MAAM,EAAE,CAAC;QAChB,KAAK,MAAM;YACV,OAAO,MAAM,CAAC;QACf,KAAK,QAAQ;YACZ,OAAO,QAAQ,CAAC;QACjB,KAAK,eAAe,CAAC;QACrB,KAAK,YAAY;YAChB,OAAO,SAAS,CAAC;QAClB,KAAK,gBAAgB;YACpB,OAAO,OAAO,CAAC;QAChB,SAAS,CAAC;YACT,MAAM,WAAW,GAAU,MAAM,CAAC;YAClC,MAAM,IAAI,KAAK,CAAC,0BAA0B,WAAW,EAAE,CAAC,CAAC;QAC1D,CAAC;IACF,CAAC;AAAA,CACD","sourcesContent":["import OpenAI from \"openai\";\nimport type {\n\tChatCompletionAssistantMessageParam,\n\tChatCompletionChunk,\n\tChatCompletionContentPart,\n\tChatCompletionContentPartImage,\n\tChatCompletionContentPartText,\n\tChatCompletionMessageParam,\n} from \"openai/resources/chat/completions.js\";\nimport { calculateCost } from \"../models.js\";\nimport type {\n\tAssistantMessage,\n\tContext,\n\tModel,\n\tStopReason,\n\tStreamFunction,\n\tStreamOptions,\n\tTextContent,\n\tThinkingContent,\n\tTool,\n\tToolCall,\n} from \"../types.js\";\nimport { AssistantMessageEventStream } from \"../utils/event-stream.js\";\nimport { parseStreamingJson } from \"../utils/json-parse.js\";\nimport { sanitizeSurrogates } from \"../utils/sanitize-unicode.js\";\nimport { validateToolArguments } from \"../utils/validation.js\";\nimport { transformMessages } from \"./transorm-messages.js\";\n\nexport interface OpenAICompletionsOptions extends StreamOptions {\n\ttoolChoice?: \"auto\" | \"none\" | \"required\" | { type: \"function\"; function: { name: string } };\n\treasoningEffort?: \"minimal\" | \"low\" | \"medium\" | \"high\";\n}\n\nexport const streamOpenAICompletions: StreamFunction<\"openai-completions\"> = (\n\tmodel: Model<\"openai-completions\">,\n\tcontext: Context,\n\toptions?: OpenAICompletionsOptions,\n): AssistantMessageEventStream => {\n\tconst stream = new AssistantMessageEventStream();\n\n\t(async () => {\n\t\tconst output: AssistantMessage = {\n\t\t\trole: \"assistant\",\n\t\t\tcontent: [],\n\t\t\tapi: model.api,\n\t\t\tprovider: model.provider,\n\t\t\tmodel: model.id,\n\t\t\tusage: {\n\t\t\t\tinput: 0,\n\t\t\t\toutput: 0,\n\t\t\t\tcacheRead: 0,\n\t\t\t\tcacheWrite: 0,\n\t\t\t\ttotalTokens: 0,\n\t\t\t\tcost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },\n\t\t\t},\n\t\t\tstopReason: \"stop\",\n\t\t\ttimestamp: Date.now(),\n\t\t};\n\n\t\ttry {\n\t\t\tconst client = createClient(model, options?.apiKey);\n\t\t\tconst params = buildParams(model, context, options);\n\t\t\tconst openaiStream = await client.chat.completions.create(params, { signal: options?.signal });\n\t\t\tstream.push({ type: \"start\", partial: output });\n\n\t\t\tlet currentBlock: TextContent | ThinkingContent | (ToolCall & { partialArgs?: string }) | null = null;\n\t\t\tconst blocks = output.content;\n\t\t\tconst blockIndex = () => blocks.length - 1;\n\t\t\tconst finishCurrentBlock = (block?: typeof currentBlock) => {\n\t\t\t\tif (block) {\n\t\t\t\t\tif (block.type === \"text\") {\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"text_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\tcontent: block.text,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t} else if (block.type === \"thinking\") {\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"thinking_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\tcontent: block.thinking,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t} else if (block.type === \"toolCall\") {\n\t\t\t\t\t\tblock.arguments = JSON.parse(block.partialArgs || \"{}\");\n\n\t\t\t\t\t\t// Validate tool arguments if tool definition is available\n\t\t\t\t\t\tif (context.tools) {\n\t\t\t\t\t\t\tconst tool = context.tools.find((t) => t.name === block.name);\n\t\t\t\t\t\t\tif (tool) {\n\t\t\t\t\t\t\t\tblock.arguments = validateToolArguments(tool, block);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tdelete block.partialArgs;\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"toolcall_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\ttoolCall: block,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t};\n\n\t\t\tfor await (const chunk of openaiStream) {\n\t\t\t\tif (chunk.usage) {\n\t\t\t\t\tconst cachedTokens = chunk.usage.prompt_tokens_details?.cached_tokens || 0;\n\t\t\t\t\tconst reasoningTokens = chunk.usage.completion_tokens_details?.reasoning_tokens || 0;\n\t\t\t\t\tconst input = (chunk.usage.prompt_tokens || 0) - cachedTokens;\n\t\t\t\t\tconst outputTokens = (chunk.usage.completion_tokens || 0) + reasoningTokens;\n\t\t\t\t\toutput.usage = {\n\t\t\t\t\t\t// OpenAI includes cached tokens in prompt_tokens, so subtract to get non-cached input\n\t\t\t\t\t\tinput,\n\t\t\t\t\t\toutput: outputTokens,\n\t\t\t\t\t\tcacheRead: cachedTokens,\n\t\t\t\t\t\tcacheWrite: 0,\n\t\t\t\t\t\t// Compute totalTokens ourselves since we add reasoning_tokens to output\n\t\t\t\t\t\t// and some providers (e.g., Groq) don't include them in total_tokens\n\t\t\t\t\t\ttotalTokens: input + outputTokens + cachedTokens,\n\t\t\t\t\t\tcost: {\n\t\t\t\t\t\t\tinput: 0,\n\t\t\t\t\t\t\toutput: 0,\n\t\t\t\t\t\t\tcacheRead: 0,\n\t\t\t\t\t\t\tcacheWrite: 0,\n\t\t\t\t\t\t\ttotal: 0,\n\t\t\t\t\t\t},\n\t\t\t\t\t};\n\t\t\t\t\tcalculateCost(model, output.usage);\n\t\t\t\t}\n\n\t\t\t\tconst choice = chunk.choices[0];\n\t\t\t\tif (!choice) continue;\n\n\t\t\t\tif (choice.finish_reason) {\n\t\t\t\t\toutput.stopReason = mapStopReason(choice.finish_reason);\n\t\t\t\t}\n\n\t\t\t\tif (choice.delta) {\n\t\t\t\t\tif (\n\t\t\t\t\t\tchoice.delta.content !== null &&\n\t\t\t\t\t\tchoice.delta.content !== undefined &&\n\t\t\t\t\t\tchoice.delta.content.length > 0\n\t\t\t\t\t) {\n\t\t\t\t\t\tif (!currentBlock || currentBlock.type !== \"text\") {\n\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\tcurrentBlock = { type: \"text\", text: \"\" };\n\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\tstream.push({ type: \"text_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif (currentBlock.type === \"text\") {\n\t\t\t\t\t\t\tcurrentBlock.text += choice.delta.content;\n\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\ttype: \"text_delta\",\n\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\tdelta: choice.delta.content,\n\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t// Some endpoints return reasoning in reasoning_content (llama.cpp),\n\t\t\t\t\t// or reasoning (other openai compatible endpoints)\n\t\t\t\t\tconst reasoningFields = [\"reasoning_content\", \"reasoning\"];\n\t\t\t\t\tfor (const field of reasoningFields) {\n\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t(choice.delta as any)[field] !== null &&\n\t\t\t\t\t\t\t(choice.delta as any)[field] !== undefined &&\n\t\t\t\t\t\t\t(choice.delta as any)[field].length > 0\n\t\t\t\t\t\t) {\n\t\t\t\t\t\t\tif (!currentBlock || currentBlock.type !== \"thinking\") {\n\t\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\t\tcurrentBlock = {\n\t\t\t\t\t\t\t\t\ttype: \"thinking\",\n\t\t\t\t\t\t\t\t\tthinking: \"\",\n\t\t\t\t\t\t\t\t\tthinkingSignature: field,\n\t\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\t\tstream.push({ type: \"thinking_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif (currentBlock.type === \"thinking\") {\n\t\t\t\t\t\t\t\tconst delta = (choice.delta as any)[field];\n\t\t\t\t\t\t\t\tcurrentBlock.thinking += delta;\n\t\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\t\ttype: \"thinking_delta\",\n\t\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\t\tdelta,\n\t\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif (choice?.delta?.tool_calls) {\n\t\t\t\t\t\tfor (const toolCall of choice.delta.tool_calls) {\n\t\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t\t!currentBlock ||\n\t\t\t\t\t\t\t\tcurrentBlock.type !== \"toolCall\" ||\n\t\t\t\t\t\t\t\t(toolCall.id && currentBlock.id !== toolCall.id)\n\t\t\t\t\t\t\t) {\n\t\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\t\tcurrentBlock = {\n\t\t\t\t\t\t\t\t\ttype: \"toolCall\",\n\t\t\t\t\t\t\t\t\tid: toolCall.id || \"\",\n\t\t\t\t\t\t\t\t\tname: toolCall.function?.name || \"\",\n\t\t\t\t\t\t\t\t\targuments: {},\n\t\t\t\t\t\t\t\t\tpartialArgs: \"\",\n\t\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\t\tstream.push({ type: \"toolcall_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif (currentBlock.type === \"toolCall\") {\n\t\t\t\t\t\t\t\tif (toolCall.id) currentBlock.id = toolCall.id;\n\t\t\t\t\t\t\t\tif (toolCall.function?.name) currentBlock.name = toolCall.function.name;\n\t\t\t\t\t\t\t\tlet delta = \"\";\n\t\t\t\t\t\t\t\tif (toolCall.function?.arguments) {\n\t\t\t\t\t\t\t\t\tdelta = toolCall.function.arguments;\n\t\t\t\t\t\t\t\t\tcurrentBlock.partialArgs += toolCall.function.arguments;\n\t\t\t\t\t\t\t\t\tcurrentBlock.arguments = parseStreamingJson(currentBlock.partialArgs);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\t\ttype: \"toolcall_delta\",\n\t\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\t\tdelta,\n\t\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfinishCurrentBlock(currentBlock);\n\n\t\t\tif (options?.signal?.aborted) {\n\t\t\t\tthrow new Error(\"Request was aborted\");\n\t\t\t}\n\n\t\t\tif (output.stopReason === \"aborted\" || output.stopReason === \"error\") {\n\t\t\t\tthrow new Error(\"An unkown error ocurred\");\n\t\t\t}\n\n\t\t\tstream.push({ type: \"done\", reason: output.stopReason, message: output });\n\t\t\tstream.end();\n\t\t} catch (error) {\n\t\t\tfor (const block of output.content) delete (block as any).index;\n\t\t\toutput.stopReason = options?.signal?.aborted ? \"aborted\" : \"error\";\n\t\t\toutput.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);\n\t\t\tstream.push({ type: \"error\", reason: output.stopReason, error: output });\n\t\t\tstream.end();\n\t\t}\n\t})();\n\n\treturn stream;\n};\n\nfunction createClient(model: Model<\"openai-completions\">, apiKey?: string) {\n\tif (!apiKey) {\n\t\tif (!process.env.OPENAI_API_KEY) {\n\t\t\tthrow new Error(\n\t\t\t\t\"OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.\",\n\t\t\t);\n\t\t}\n\t\tapiKey = process.env.OPENAI_API_KEY;\n\t}\n\treturn new OpenAI({\n\t\tapiKey,\n\t\tbaseURL: model.baseUrl,\n\t\tdangerouslyAllowBrowser: true,\n\t\tdefaultHeaders: model.headers,\n\t});\n}\n\nfunction buildParams(model: Model<\"openai-completions\">, context: Context, options?: OpenAICompletionsOptions) {\n\tconst messages = convertMessages(model, context);\n\n\tconst params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {\n\t\tmodel: model.id,\n\t\tmessages,\n\t\tstream: true,\n\t\tstream_options: { include_usage: true },\n\t};\n\n\t// Cerebras/xAI/Mistral dont like the \"store\" field\n\tif (\n\t\t!model.baseUrl.includes(\"cerebras.ai\") &&\n\t\t!model.baseUrl.includes(\"api.x.ai\") &&\n\t\t!model.baseUrl.includes(\"mistral.ai\") &&\n\t\t!model.baseUrl.includes(\"chutes.ai\")\n\t) {\n\t\tparams.store = false;\n\t}\n\n\tif (options?.maxTokens) {\n\t\t// Mistral/Chutes uses max_tokens instead of max_completion_tokens\n\t\tif (model.baseUrl.includes(\"mistral.ai\") || model.baseUrl.includes(\"chutes.ai\")) {\n\t\t\t(params as any).max_tokens = options?.maxTokens;\n\t\t} else {\n\t\t\tparams.max_completion_tokens = options?.maxTokens;\n\t\t}\n\t}\n\n\tif (options?.temperature !== undefined) {\n\t\tparams.temperature = options?.temperature;\n\t}\n\n\tif (context.tools) {\n\t\tparams.tools = convertTools(context.tools);\n\t}\n\n\tif (options?.toolChoice) {\n\t\tparams.tool_choice = options.toolChoice;\n\t}\n\n\t// Grok models don't like reasoning_effort\n\tif (options?.reasoningEffort && model.reasoning && !model.id.toLowerCase().includes(\"grok\")) {\n\t\tparams.reasoning_effort = options.reasoningEffort;\n\t}\n\n\treturn params;\n}\n\nfunction convertMessages(model: Model<\"openai-completions\">, context: Context): ChatCompletionMessageParam[] {\n\tconst params: ChatCompletionMessageParam[] = [];\n\n\tconst transformedMessages = transformMessages(context.messages, model);\n\n\tif (context.systemPrompt) {\n\t\t// Cerebras/xAi/Mistral/Chutes don't like the \"developer\" role\n\t\tconst useDeveloperRole =\n\t\t\tmodel.reasoning &&\n\t\t\t!model.baseUrl.includes(\"cerebras.ai\") &&\n\t\t\t!model.baseUrl.includes(\"api.x.ai\") &&\n\t\t\t!model.baseUrl.includes(\"mistral.ai\") &&\n\t\t\t!model.baseUrl.includes(\"chutes.ai\");\n\t\tconst role = useDeveloperRole ? \"developer\" : \"system\";\n\t\tparams.push({ role: role, content: sanitizeSurrogates(context.systemPrompt) });\n\t}\n\n\tfor (const msg of transformedMessages) {\n\t\tif (msg.role === \"user\") {\n\t\t\tif (typeof msg.content === \"string\") {\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: sanitizeSurrogates(msg.content),\n\t\t\t\t});\n\t\t\t} else {\n\t\t\t\tconst content: ChatCompletionContentPart[] = msg.content.map((item): ChatCompletionContentPart => {\n\t\t\t\t\tif (item.type === \"text\") {\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\ttype: \"text\",\n\t\t\t\t\t\t\ttext: sanitizeSurrogates(item.text),\n\t\t\t\t\t\t} satisfies ChatCompletionContentPartText;\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\ttype: \"image_url\",\n\t\t\t\t\t\t\timage_url: {\n\t\t\t\t\t\t\t\turl: `data:${item.mimeType};base64,${item.data}`,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t} satisfies ChatCompletionContentPartImage;\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t\tconst filteredContent = !model.input.includes(\"image\")\n\t\t\t\t\t? content.filter((c) => c.type !== \"image_url\")\n\t\t\t\t\t: content;\n\t\t\t\tif (filteredContent.length === 0) continue;\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: filteredContent,\n\t\t\t\t});\n\t\t\t}\n\t\t} else if (msg.role === \"assistant\") {\n\t\t\tconst assistantMsg: ChatCompletionAssistantMessageParam = {\n\t\t\t\trole: \"assistant\",\n\t\t\t\tcontent: null,\n\t\t\t};\n\n\t\t\tconst textBlocks = msg.content.filter((b) => b.type === \"text\") as TextContent[];\n\t\t\tif (textBlocks.length > 0) {\n\t\t\t\tassistantMsg.content = textBlocks.map((b) => {\n\t\t\t\t\treturn { type: \"text\", text: sanitizeSurrogates(b.text) };\n\t\t\t\t});\n\t\t\t}\n\n\t\t\t// Handle thinking blocks for llama.cpp server + gpt-oss\n\t\t\tconst thinkingBlocks = msg.content.filter((b) => b.type === \"thinking\") as ThinkingContent[];\n\t\t\tif (thinkingBlocks.length > 0) {\n\t\t\t\t// Use the signature from the first thinking block if available\n\t\t\t\tconst signature = thinkingBlocks[0].thinkingSignature;\n\t\t\t\tif (signature && signature.length > 0) {\n\t\t\t\t\t(assistantMsg as any)[signature] = thinkingBlocks.map((b) => b.thinking).join(\"\\n\");\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tconst toolCalls = msg.content.filter((b) => b.type === \"toolCall\") as ToolCall[];\n\t\t\tif (toolCalls.length > 0) {\n\t\t\t\tassistantMsg.tool_calls = toolCalls.map((tc) => ({\n\t\t\t\t\tid: tc.id,\n\t\t\t\t\ttype: \"function\" as const,\n\t\t\t\t\tfunction: {\n\t\t\t\t\t\tname: tc.name,\n\t\t\t\t\t\targuments: JSON.stringify(tc.arguments),\n\t\t\t\t\t},\n\t\t\t\t}));\n\t\t\t}\n\t\t\tif (assistantMsg.content === null && !assistantMsg.tool_calls) {\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tparams.push(assistantMsg);\n\t\t} else if (msg.role === \"toolResult\") {\n\t\t\t// Extract text and image content\n\t\t\tconst textResult = msg.content\n\t\t\t\t.filter((c) => c.type === \"text\")\n\t\t\t\t.map((c) => (c as any).text)\n\t\t\t\t.join(\"\\n\");\n\t\t\tconst hasImages = msg.content.some((c) => c.type === \"image\");\n\n\t\t\t// Always send tool result with text (or placeholder if only images)\n\t\t\tconst hasText = textResult.length > 0;\n\t\t\tparams.push({\n\t\t\t\trole: \"tool\",\n\t\t\t\tcontent: sanitizeSurrogates(hasText ? textResult : \"(see attached image)\"),\n\t\t\t\ttool_call_id: msg.toolCallId,\n\t\t\t});\n\n\t\t\t// If there are images and model supports them, send a follow-up user message with images\n\t\t\tif (hasImages && model.input.includes(\"image\")) {\n\t\t\t\tconst contentBlocks: Array<\n\t\t\t\t\t{ type: \"text\"; text: string } | { type: \"image_url\"; image_url: { url: string } }\n\t\t\t\t> = [];\n\n\t\t\t\t// Add text prefix\n\t\t\t\tcontentBlocks.push({\n\t\t\t\t\ttype: \"text\",\n\t\t\t\t\ttext: \"Attached image(s) from tool result:\",\n\t\t\t\t});\n\n\t\t\t\t// Add images\n\t\t\t\tfor (const block of msg.content) {\n\t\t\t\t\tif (block.type === \"image\") {\n\t\t\t\t\t\tcontentBlocks.push({\n\t\t\t\t\t\t\ttype: \"image_url\",\n\t\t\t\t\t\t\timage_url: {\n\t\t\t\t\t\t\t\turl: `data:${(block as any).mimeType};base64,${(block as any).data}`,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: contentBlocks,\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t}\n\n\treturn params;\n}\n\nfunction convertTools(tools: Tool[]): OpenAI.Chat.Completions.ChatCompletionTool[] {\n\treturn tools.map((tool) => ({\n\t\ttype: \"function\",\n\t\tfunction: {\n\t\t\tname: tool.name,\n\t\t\tdescription: tool.description,\n\t\t\tparameters: tool.parameters as any, // TypeBox already generates JSON Schema\n\t\t},\n\t}));\n}\n\nfunction mapStopReason(reason: ChatCompletionChunk.Choice[\"finish_reason\"]): StopReason {\n\tif (reason === null) return \"stop\";\n\tswitch (reason) {\n\t\tcase \"stop\":\n\t\t\treturn \"stop\";\n\t\tcase \"length\":\n\t\t\treturn \"length\";\n\t\tcase \"function_call\":\n\t\tcase \"tool_calls\":\n\t\t\treturn \"toolUse\";\n\t\tcase \"content_filter\":\n\t\t\treturn \"error\";\n\t\tdefault: {\n\t\t\tconst _exhaustive: never = reason;\n\t\t\tthrow new Error(`Unhandled stop reason: ${_exhaustive}`);\n\t\t}\n\t}\n}\n"]}
|
|
1
|
+
{"version":3,"file":"openai-completions.js","sourceRoot":"","sources":["../../src/providers/openai-completions.ts"],"names":[],"mappings":"AAAA,OAAO,MAAM,MAAM,QAAQ,CAAC;AAS5B,OAAO,EAAE,aAAa,EAAE,MAAM,cAAc,CAAC;AAe7C,OAAO,EAAE,2BAA2B,EAAE,MAAM,0BAA0B,CAAC;AACvE,OAAO,EAAE,kBAAkB,EAAE,MAAM,wBAAwB,CAAC;AAC5D,OAAO,EAAE,kBAAkB,EAAE,MAAM,8BAA8B,CAAC;AAClE,OAAO,EAAE,iBAAiB,EAAE,MAAM,wBAAwB,CAAC;AAE3D;;;;GAIG;AACH,SAAS,cAAc,CAAC,QAAmB,EAAW;IACrD,KAAK,MAAM,GAAG,IAAI,QAAQ,EAAE,CAAC;QAC5B,IAAI,GAAG,CAAC,IAAI,KAAK,YAAY,EAAE,CAAC;YAC/B,OAAO,IAAI,CAAC;QACb,CAAC;QACD,IAAI,GAAG,CAAC,IAAI,KAAK,WAAW,EAAE,CAAC;YAC9B,IAAI,GAAG,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,KAAK,EAAE,EAAE,CAAC,KAAK,CAAC,IAAI,KAAK,UAAU,CAAC,EAAE,CAAC;gBAC5D,OAAO,IAAI,CAAC;YACb,CAAC;QACF,CAAC;IACF,CAAC;IACD,OAAO,KAAK,CAAC;AAAA,CACb;AAOD,MAAM,CAAC,MAAM,uBAAuB,GAAyC,CAC5E,KAAkC,EAClC,OAAgB,EAChB,OAAkC,EACJ,EAAE,CAAC;IACjC,MAAM,MAAM,GAAG,IAAI,2BAA2B,EAAE,CAAC;IAEjD,CAAC,KAAK,IAAI,EAAE,CAAC;QACZ,MAAM,MAAM,GAAqB;YAChC,IAAI,EAAE,WAAW;YACjB,OAAO,EAAE,EAAE;YACX,GAAG,EAAE,KAAK,CAAC,GAAG;YACd,QAAQ,EAAE,KAAK,CAAC,QAAQ;YACxB,KAAK,EAAE,KAAK,CAAC,EAAE;YACf,KAAK,EAAE;gBACN,KAAK,EAAE,CAAC;gBACR,MAAM,EAAE,CAAC;gBACT,SAAS,EAAE,CAAC;gBACZ,UAAU,EAAE,CAAC;gBACb,WAAW,EAAE,CAAC;gBACd,IAAI,EAAE,EAAE,KAAK,EAAE,CAAC,EAAE,MAAM,EAAE,CAAC,EAAE,SAAS,EAAE,CAAC,EAAE,UAAU,EAAE,CAAC,EAAE,KAAK,EAAE,CAAC,EAAE;aACpE;YACD,UAAU,EAAE,MAAM;YAClB,SAAS,EAAE,IAAI,CAAC,GAAG,EAAE;SACrB,CAAC;QAEF,IAAI,CAAC;YACJ,MAAM,MAAM,GAAG,YAAY,CAAC,KAAK,EAAE,OAAO,EAAE,MAAM,CAAC,CAAC;YACpD,MAAM,MAAM,GAAG,WAAW,CAAC,KAAK,EAAE,OAAO,EAAE,OAAO,CAAC,CAAC;YACpD,MAAM,YAAY,GAAG,MAAM,MAAM,CAAC,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC,MAAM,EAAE,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;YAC/F,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,OAAO,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;YAEhD,IAAI,YAAY,GAAiF,IAAI,CAAC;YACtG,MAAM,MAAM,GAAG,MAAM,CAAC,OAAO,CAAC;YAC9B,MAAM,UAAU,GAAG,GAAG,EAAE,CAAC,MAAM,CAAC,MAAM,GAAG,CAAC,CAAC;YAC3C,MAAM,kBAAkB,GAAG,CAAC,KAA2B,EAAE,EAAE,CAAC;gBAC3D,IAAI,KAAK,EAAE,CAAC;oBACX,IAAI,KAAK,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;wBAC3B,MAAM,CAAC,IAAI,CAAC;4BACX,IAAI,EAAE,UAAU;4BAChB,YAAY,EAAE,UAAU,EAAE;4BAC1B,OAAO,EAAE,KAAK,CAAC,IAAI;4BACnB,OAAO,EAAE,MAAM;yBACf,CAAC,CAAC;oBACJ,CAAC;yBAAM,IAAI,KAAK,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;wBACtC,MAAM,CAAC,IAAI,CAAC;4BACX,IAAI,EAAE,cAAc;4BACpB,YAAY,EAAE,UAAU,EAAE;4BAC1B,OAAO,EAAE,KAAK,CAAC,QAAQ;4BACvB,OAAO,EAAE,MAAM;yBACf,CAAC,CAAC;oBACJ,CAAC;yBAAM,IAAI,KAAK,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;wBACtC,KAAK,CAAC,SAAS,GAAG,IAAI,CAAC,KAAK,CAAC,KAAK,CAAC,WAAW,IAAI,IAAI,CAAC,CAAC;wBACxD,OAAO,KAAK,CAAC,WAAW,CAAC;wBACzB,MAAM,CAAC,IAAI,CAAC;4BACX,IAAI,EAAE,cAAc;4BACpB,YAAY,EAAE,UAAU,EAAE;4BAC1B,QAAQ,EAAE,KAAK;4BACf,OAAO,EAAE,MAAM;yBACf,CAAC,CAAC;oBACJ,CAAC;gBACF,CAAC;YAAA,CACD,CAAC;YAEF,IAAI,KAAK,EAAE,MAAM,KAAK,IAAI,YAAY,EAAE,CAAC;gBACxC,IAAI,KAAK,CAAC,KAAK,EAAE,CAAC;oBACjB,MAAM,YAAY,GAAG,KAAK,CAAC,KAAK,CAAC,qBAAqB,EAAE,aAAa,IAAI,CAAC,CAAC;oBAC3E,MAAM,eAAe,GAAG,KAAK,CAAC,KAAK,CAAC,yBAAyB,EAAE,gBAAgB,IAAI,CAAC,CAAC;oBACrF,MAAM,KAAK,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC,aAAa,IAAI,CAAC,CAAC,GAAG,YAAY,CAAC;oBAC9D,MAAM,YAAY,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC,iBAAiB,IAAI,CAAC,CAAC,GAAG,eAAe,CAAC;oBAC5E,MAAM,CAAC,KAAK,GAAG;wBACd,sFAAsF;wBACtF,KAAK;wBACL,MAAM,EAAE,YAAY;wBACpB,SAAS,EAAE,YAAY;wBACvB,UAAU,EAAE,CAAC;wBACb,wEAAwE;wBACxE,qEAAqE;wBACrE,WAAW,EAAE,KAAK,GAAG,YAAY,GAAG,YAAY;wBAChD,IAAI,EAAE;4BACL,KAAK,EAAE,CAAC;4BACR,MAAM,EAAE,CAAC;4BACT,SAAS,EAAE,CAAC;4BACZ,UAAU,EAAE,CAAC;4BACb,KAAK,EAAE,CAAC;yBACR;qBACD,CAAC;oBACF,aAAa,CAAC,KAAK,EAAE,MAAM,CAAC,KAAK,CAAC,CAAC;gBACpC,CAAC;gBAED,MAAM,MAAM,GAAG,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC;gBAChC,IAAI,CAAC,MAAM;oBAAE,SAAS;gBAEtB,IAAI,MAAM,CAAC,aAAa,EAAE,CAAC;oBAC1B,MAAM,CAAC,UAAU,GAAG,aAAa,CAAC,MAAM,CAAC,aAAa,CAAC,CAAC;gBACzD,CAAC;gBAED,IAAI,MAAM,CAAC,KAAK,EAAE,CAAC;oBAClB,IACC,MAAM,CAAC,KAAK,CAAC,OAAO,KAAK,IAAI;wBAC7B,MAAM,CAAC,KAAK,CAAC,OAAO,KAAK,SAAS;wBAClC,MAAM,CAAC,KAAK,CAAC,OAAO,CAAC,MAAM,GAAG,CAAC,EAC9B,CAAC;wBACF,IAAI,CAAC,YAAY,IAAI,YAAY,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;4BACnD,kBAAkB,CAAC,YAAY,CAAC,CAAC;4BACjC,YAAY,GAAG,EAAE,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,EAAE,EAAE,CAAC;4BAC1C,MAAM,CAAC,OAAO,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;4BAClC,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,YAAY,EAAE,YAAY,EAAE,UAAU,EAAE,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;wBAClF,CAAC;wBAED,IAAI,YAAY,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;4BAClC,YAAY,CAAC,IAAI,IAAI,MAAM,CAAC,KAAK,CAAC,OAAO,CAAC;4BAC1C,MAAM,CAAC,IAAI,CAAC;gCACX,IAAI,EAAE,YAAY;gCAClB,YAAY,EAAE,UAAU,EAAE;gCAC1B,KAAK,EAAE,MAAM,CAAC,KAAK,CAAC,OAAO;gCAC3B,OAAO,EAAE,MAAM;6BACf,CAAC,CAAC;wBACJ,CAAC;oBACF,CAAC;oBAED,oEAAoE;oBACpE,mDAAmD;oBACnD,MAAM,eAAe,GAAG,CAAC,mBAAmB,EAAE,WAAW,CAAC,CAAC;oBAC3D,KAAK,MAAM,KAAK,IAAI,eAAe,EAAE,CAAC;wBACrC,IACE,MAAM,CAAC,KAAa,CAAC,KAAK,CAAC,KAAK,IAAI;4BACpC,MAAM,CAAC,KAAa,CAAC,KAAK,CAAC,KAAK,SAAS;4BACzC,MAAM,CAAC,KAAa,CAAC,KAAK,CAAC,CAAC,MAAM,GAAG,CAAC,EACtC,CAAC;4BACF,IAAI,CAAC,YAAY,IAAI,YAAY,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;gCACvD,kBAAkB,CAAC,YAAY,CAAC,CAAC;gCACjC,YAAY,GAAG;oCACd,IAAI,EAAE,UAAU;oCAChB,QAAQ,EAAE,EAAE;oCACZ,iBAAiB,EAAE,KAAK;iCACxB,CAAC;gCACF,MAAM,CAAC,OAAO,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;gCAClC,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,gBAAgB,EAAE,YAAY,EAAE,UAAU,EAAE,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;4BACtF,CAAC;4BAED,IAAI,YAAY,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;gCACtC,MAAM,KAAK,GAAI,MAAM,CAAC,KAAa,CAAC,KAAK,CAAC,CAAC;gCAC3C,YAAY,CAAC,QAAQ,IAAI,KAAK,CAAC;gCAC/B,MAAM,CAAC,IAAI,CAAC;oCACX,IAAI,EAAE,gBAAgB;oCACtB,YAAY,EAAE,UAAU,EAAE;oCAC1B,KAAK;oCACL,OAAO,EAAE,MAAM;iCACf,CAAC,CAAC;4BACJ,CAAC;wBACF,CAAC;oBACF,CAAC;oBAED,IAAI,MAAM,EAAE,KAAK,EAAE,UAAU,EAAE,CAAC;wBAC/B,KAAK,MAAM,QAAQ,IAAI,MAAM,CAAC,KAAK,CAAC,UAAU,EAAE,CAAC;4BAChD,IACC,CAAC,YAAY;gCACb,YAAY,CAAC,IAAI,KAAK,UAAU;gCAChC,CAAC,QAAQ,CAAC,EAAE,IAAI,YAAY,CAAC,EAAE,KAAK,QAAQ,CAAC,EAAE,CAAC,EAC/C,CAAC;gCACF,kBAAkB,CAAC,YAAY,CAAC,CAAC;gCACjC,YAAY,GAAG;oCACd,IAAI,EAAE,UAAU;oCAChB,EAAE,EAAE,QAAQ,CAAC,EAAE,IAAI,EAAE;oCACrB,IAAI,EAAE,QAAQ,CAAC,QAAQ,EAAE,IAAI,IAAI,EAAE;oCACnC,SAAS,EAAE,EAAE;oCACb,WAAW,EAAE,EAAE;iCACf,CAAC;gCACF,MAAM,CAAC,OAAO,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;gCAClC,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,gBAAgB,EAAE,YAAY,EAAE,UAAU,EAAE,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;4BACtF,CAAC;4BAED,IAAI,YAAY,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;gCACtC,IAAI,QAAQ,CAAC,EAAE;oCAAE,YAAY,CAAC,EAAE,GAAG,QAAQ,CAAC,EAAE,CAAC;gCAC/C,IAAI,QAAQ,CAAC,QAAQ,EAAE,IAAI;oCAAE,YAAY,CAAC,IAAI,GAAG,QAAQ,CAAC,QAAQ,CAAC,IAAI,CAAC;gCACxE,IAAI,KAAK,GAAG,EAAE,CAAC;gCACf,IAAI,QAAQ,CAAC,QAAQ,EAAE,SAAS,EAAE,CAAC;oCAClC,KAAK,GAAG,QAAQ,CAAC,QAAQ,CAAC,SAAS,CAAC;oCACpC,YAAY,CAAC,WAAW,IAAI,QAAQ,CAAC,QAAQ,CAAC,SAAS,CAAC;oCACxD,YAAY,CAAC,SAAS,GAAG,kBAAkB,CAAC,YAAY,CAAC,WAAW,CAAC,CAAC;gCACvE,CAAC;gCACD,MAAM,CAAC,IAAI,CAAC;oCACX,IAAI,EAAE,gBAAgB;oCACtB,YAAY,EAAE,UAAU,EAAE;oCAC1B,KAAK;oCACL,OAAO,EAAE,MAAM;iCACf,CAAC,CAAC;4BACJ,CAAC;wBACF,CAAC;oBACF,CAAC;gBACF,CAAC;YACF,CAAC;YAED,kBAAkB,CAAC,YAAY,CAAC,CAAC;YAEjC,IAAI,OAAO,EAAE,MAAM,EAAE,OAAO,EAAE,CAAC;gBAC9B,MAAM,IAAI,KAAK,CAAC,qBAAqB,CAAC,CAAC;YACxC,CAAC;YAED,IAAI,MAAM,CAAC,UAAU,KAAK,SAAS,IAAI,MAAM,CAAC,UAAU,KAAK,OAAO,EAAE,CAAC;gBACtE,MAAM,IAAI,KAAK,CAAC,yBAAyB,CAAC,CAAC;YAC5C,CAAC;YAED,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,CAAC,UAAU,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;YAC1E,MAAM,CAAC,GAAG,EAAE,CAAC;QACd,CAAC;QAAC,OAAO,KAAK,EAAE,CAAC;YAChB,KAAK,MAAM,KAAK,IAAI,MAAM,CAAC,OAAO;gBAAE,OAAQ,KAAa,CAAC,KAAK,CAAC;YAChE,MAAM,CAAC,UAAU,GAAG,OAAO,EAAE,MAAM,EAAE,OAAO,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,OAAO,CAAC;YACnE,MAAM,CAAC,YAAY,GAAG,KAAK,YAAY,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC,SAAS,CAAC,KAAK,CAAC,CAAC;YACrF,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,OAAO,EAAE,MAAM,EAAE,MAAM,CAAC,UAAU,EAAE,KAAK,EAAE,MAAM,EAAE,CAAC,CAAC;YACzE,MAAM,CAAC,GAAG,EAAE,CAAC;QACd,CAAC;IAAA,CACD,CAAC,EAAE,CAAC;IAEL,OAAO,MAAM,CAAC;AAAA,CACd,CAAC;AAEF,SAAS,YAAY,CAAC,KAAkC,EAAE,MAAe,EAAE;IAC1E,IAAI,CAAC,MAAM,EAAE,CAAC;QACb,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,cAAc,EAAE,CAAC;YACjC,MAAM,IAAI,KAAK,CACd,gGAAgG,CAChG,CAAC;QACH,CAAC;QACD,MAAM,GAAG,OAAO,CAAC,GAAG,CAAC,cAAc,CAAC;IACrC,CAAC;IACD,OAAO,IAAI,MAAM,CAAC;QACjB,MAAM;QACN,OAAO,EAAE,KAAK,CAAC,OAAO;QACtB,uBAAuB,EAAE,IAAI;QAC7B,cAAc,EAAE,KAAK,CAAC,OAAO;KAC7B,CAAC,CAAC;AAAA,CACH;AAED,SAAS,WAAW,CAAC,KAAkC,EAAE,OAAgB,EAAE,OAAkC,EAAE;IAC9G,MAAM,MAAM,GAAG,SAAS,CAAC,KAAK,CAAC,CAAC;IAChC,MAAM,QAAQ,GAAG,eAAe,CAAC,KAAK,EAAE,OAAO,EAAE,MAAM,CAAC,CAAC;IAEzD,MAAM,MAAM,GAAgE;QAC3E,KAAK,EAAE,KAAK,CAAC,EAAE;QACf,QAAQ;QACR,MAAM,EAAE,IAAI;QACZ,cAAc,EAAE,EAAE,aAAa,EAAE,IAAI,EAAE;KACvC,CAAC;IAEF,IAAI,MAAM,CAAC,aAAa,EAAE,CAAC;QAC1B,MAAM,CAAC,KAAK,GAAG,KAAK,CAAC;IACtB,CAAC;IAED,IAAI,OAAO,EAAE,SAAS,EAAE,CAAC;QACxB,IAAI,MAAM,CAAC,cAAc,KAAK,YAAY,EAAE,CAAC;YAC3C,MAAc,CAAC,UAAU,GAAG,OAAO,CAAC,SAAS,CAAC;QAChD,CAAC;aAAM,CAAC;YACP,MAAM,CAAC,qBAAqB,GAAG,OAAO,CAAC,SAAS,CAAC;QAClD,CAAC;IACF,CAAC;IAED,IAAI,OAAO,EAAE,WAAW,KAAK,SAAS,EAAE,CAAC;QACxC,MAAM,CAAC,WAAW,GAAG,OAAO,CAAC,WAAW,CAAC;IAC1C,CAAC;IAED,IAAI,OAAO,CAAC,KAAK,EAAE,CAAC;QACnB,MAAM,CAAC,KAAK,GAAG,YAAY,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC;IAC5C,CAAC;SAAM,IAAI,cAAc,CAAC,OAAO,CAAC,QAAQ,CAAC,EAAE,CAAC;QAC7C,mGAAmG;QACnG,MAAM,CAAC,KAAK,GAAG,EAAE,CAAC;IACnB,CAAC;IAED,IAAI,OAAO,EAAE,UAAU,EAAE,CAAC;QACzB,MAAM,CAAC,WAAW,GAAG,OAAO,CAAC,UAAU,CAAC;IACzC,CAAC;IAED,IAAI,OAAO,EAAE,eAAe,IAAI,KAAK,CAAC,SAAS,IAAI,MAAM,CAAC,uBAAuB,EAAE,CAAC;QACnF,MAAM,CAAC,gBAAgB,GAAG,OAAO,CAAC,eAAe,CAAC;IACnD,CAAC;IAED,OAAO,MAAM,CAAC;AAAA,CACd;AAED,SAAS,eAAe,CACvB,KAAkC,EAClC,OAAgB,EAChB,MAA8B,EACC;IAC/B,MAAM,MAAM,GAAiC,EAAE,CAAC;IAEhD,MAAM,mBAAmB,GAAG,iBAAiB,CAAC,OAAO,CAAC,QAAQ,EAAE,KAAK,CAAC,CAAC;IAEvE,IAAI,OAAO,CAAC,YAAY,EAAE,CAAC;QAC1B,MAAM,gBAAgB,GAAG,KAAK,CAAC,SAAS,IAAI,MAAM,CAAC,qBAAqB,CAAC;QACzE,MAAM,IAAI,GAAG,gBAAgB,CAAC,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC,QAAQ,CAAC;QACvD,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,IAAI,EAAE,OAAO,EAAE,kBAAkB,CAAC,OAAO,CAAC,YAAY,CAAC,EAAE,CAAC,CAAC;IAChF,CAAC;IAED,KAAK,MAAM,GAAG,IAAI,mBAAmB,EAAE,CAAC;QACvC,IAAI,GAAG,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;YACzB,IAAI,OAAO,GAAG,CAAC,OAAO,KAAK,QAAQ,EAAE,CAAC;gBACrC,MAAM,CAAC,IAAI,CAAC;oBACX,IAAI,EAAE,MAAM;oBACZ,OAAO,EAAE,kBAAkB,CAAC,GAAG,CAAC,OAAO,CAAC;iBACxC,CAAC,CAAC;YACJ,CAAC;iBAAM,CAAC;gBACP,MAAM,OAAO,GAAgC,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,IAAI,EAA6B,EAAE,CAAC;oBACjG,IAAI,IAAI,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;wBAC1B,OAAO;4BACN,IAAI,EAAE,MAAM;4BACZ,IAAI,EAAE,kBAAkB,CAAC,IAAI,CAAC,IAAI,CAAC;yBACK,CAAC;oBAC3C,CAAC;yBAAM,CAAC;wBACP,OAAO;4BACN,IAAI,EAAE,WAAW;4BACjB,SAAS,EAAE;gCACV,GAAG,EAAE,QAAQ,IAAI,CAAC,QAAQ,WAAW,IAAI,CAAC,IAAI,EAAE;6BAChD;yBACwC,CAAC;oBAC5C,CAAC;gBAAA,CACD,CAAC,CAAC;gBACH,MAAM,eAAe,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC,QAAQ,CAAC,OAAO,CAAC;oBACrD,CAAC,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,WAAW,CAAC;oBAC/C,CAAC,CAAC,OAAO,CAAC;gBACX,IAAI,eAAe,CAAC,MAAM,KAAK,CAAC;oBAAE,SAAS;gBAC3C,MAAM,CAAC,IAAI,CAAC;oBACX,IAAI,EAAE,MAAM;oBACZ,OAAO,EAAE,eAAe;iBACxB,CAAC,CAAC;YACJ,CAAC;QACF,CAAC;aAAM,IAAI,GAAG,CAAC,IAAI,KAAK,WAAW,EAAE,CAAC;YACrC,MAAM,YAAY,GAAwC;gBACzD,IAAI,EAAE,WAAW;gBACjB,OAAO,EAAE,IAAI;aACb,CAAC;YAEF,MAAM,UAAU,GAAG,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,MAAM,CAAkB,CAAC;YACjF,IAAI,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;gBAC3B,YAAY,CAAC,OAAO,GAAG,UAAU,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC;oBAC5C,OAAO,EAAE,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,kBAAkB,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC;gBAAA,CAC1D,CAAC,CAAC;YACJ,CAAC;YAED,wDAAwD;YACxD,MAAM,cAAc,GAAG,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,UAAU,CAAsB,CAAC;YAC7F,IAAI,cAAc,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;gBAC/B,+DAA+D;gBAC/D,MAAM,SAAS,GAAG,cAAc,CAAC,CAAC,CAAC,CAAC,iBAAiB,CAAC;gBACtD,IAAI,SAAS,IAAI,SAAS,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;oBACtC,YAAoB,CAAC,SAAS,CAAC,GAAG,cAAc,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;gBACrF,CAAC;YACF,CAAC;YAED,MAAM,SAAS,GAAG,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,UAAU,CAAe,CAAC;YACjF,IAAI,SAAS,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;gBAC1B,YAAY,CAAC,UAAU,GAAG,SAAS,CAAC,GAAG,CAAC,CAAC,EAAE,EAAE,EAAE,CAAC,CAAC;oBAChD,EAAE,EAAE,EAAE,CAAC,EAAE;oBACT,IAAI,EAAE,UAAmB;oBACzB,QAAQ,EAAE;wBACT,IAAI,EAAE,EAAE,CAAC,IAAI;wBACb,SAAS,EAAE,IAAI,CAAC,SAAS,CAAC,EAAE,CAAC,SAAS,CAAC;qBACvC;iBACD,CAAC,CAAC,CAAC;YACL,CAAC;YACD,IAAI,YAAY,CAAC,OAAO,KAAK,IAAI,IAAI,CAAC,YAAY,CAAC,UAAU,EAAE,CAAC;gBAC/D,SAAS;YACV,CAAC;YACD,MAAM,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;QAC3B,CAAC;aAAM,IAAI,GAAG,CAAC,IAAI,KAAK,YAAY,EAAE,CAAC;YACtC,iCAAiC;YACjC,MAAM,UAAU,GAAG,GAAG,CAAC,OAAO;iBAC5B,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,MAAM,CAAC;iBAChC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAE,CAAS,CAAC,IAAI,CAAC;iBAC3B,IAAI,CAAC,IAAI,CAAC,CAAC;YACb,MAAM,SAAS,GAAG,GAAG,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,OAAO,CAAC,CAAC;YAE9D,oEAAoE;YACpE,MAAM,OAAO,GAAG,UAAU,CAAC,MAAM,GAAG,CAAC,CAAC;YACtC,MAAM,CAAC,IAAI,CAAC;gBACX,IAAI,EAAE,MAAM;gBACZ,OAAO,EAAE,kBAAkB,CAAC,OAAO,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,sBAAsB,CAAC;gBAC1E,YAAY,EAAE,GAAG,CAAC,UAAU;aAC5B,CAAC,CAAC;YAEH,yFAAyF;YACzF,IAAI,SAAS,IAAI,KAAK,CAAC,KAAK,CAAC,QAAQ,CAAC,OAAO,CAAC,EAAE,CAAC;gBAChD,MAAM,aAAa,GAEf,EAAE,CAAC;gBAEP,kBAAkB;gBAClB,aAAa,CAAC,IAAI,CAAC;oBAClB,IAAI,EAAE,MAAM;oBACZ,IAAI,EAAE,qCAAqC;iBAC3C,CAAC,CAAC;gBAEH,aAAa;gBACb,KAAK,MAAM,KAAK,IAAI,GAAG,CAAC,OAAO,EAAE,CAAC;oBACjC,IAAI,KAAK,CAAC,IAAI,KAAK,OAAO,EAAE,CAAC;wBAC5B,aAAa,CAAC,IAAI,CAAC;4BAClB,IAAI,EAAE,WAAW;4BACjB,SAAS,EAAE;gCACV,GAAG,EAAE,QAAS,KAAa,CAAC,QAAQ,WAAY,KAAa,CAAC,IAAI,EAAE;6BACpE;yBACD,CAAC,CAAC;oBACJ,CAAC;gBACF,CAAC;gBAED,MAAM,CAAC,IAAI,CAAC;oBACX,IAAI,EAAE,MAAM;oBACZ,OAAO,EAAE,aAAa;iBACtB,CAAC,CAAC;YACJ,CAAC;QACF,CAAC;IACF,CAAC;IAED,OAAO,MAAM,CAAC;AAAA,CACd;AAED,SAAS,YAAY,CAAC,KAAa,EAAgD;IAClF,OAAO,KAAK,CAAC,GAAG,CAAC,CAAC,IAAI,EAAE,EAAE,CAAC,CAAC;QAC3B,IAAI,EAAE,UAAU;QAChB,QAAQ,EAAE;YACT,IAAI,EAAE,IAAI,CAAC,IAAI;YACf,WAAW,EAAE,IAAI,CAAC,WAAW;YAC7B,UAAU,EAAE,IAAI,CAAC,UAAiB,EAAE,wCAAwC;SAC5E;KACD,CAAC,CAAC,CAAC;AAAA,CACJ;AAED,SAAS,aAAa,CAAC,MAAmD,EAAc;IACvF,IAAI,MAAM,KAAK,IAAI;QAAE,OAAO,MAAM,CAAC;IACnC,QAAQ,MAAM,EAAE,CAAC;QAChB,KAAK,MAAM;YACV,OAAO,MAAM,CAAC;QACf,KAAK,QAAQ;YACZ,OAAO,QAAQ,CAAC;QACjB,KAAK,eAAe,CAAC;QACrB,KAAK,YAAY;YAChB,OAAO,SAAS,CAAC;QAClB,KAAK,gBAAgB;YACpB,OAAO,OAAO,CAAC;QAChB,SAAS,CAAC;YACT,MAAM,WAAW,GAAU,MAAM,CAAC;YAClC,MAAM,IAAI,KAAK,CAAC,0BAA0B,WAAW,EAAE,CAAC,CAAC;QAC1D,CAAC;IACF,CAAC;AAAA,CACD;AAED;;;GAGG;AACH,SAAS,mBAAmB,CAAC,OAAe,EAA0B;IACrE,MAAM,aAAa,GAClB,OAAO,CAAC,QAAQ,CAAC,aAAa,CAAC;QAC/B,OAAO,CAAC,QAAQ,CAAC,UAAU,CAAC;QAC5B,OAAO,CAAC,QAAQ,CAAC,YAAY,CAAC;QAC9B,OAAO,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC;IAE/B,MAAM,YAAY,GAAG,OAAO,CAAC,QAAQ,CAAC,YAAY,CAAC,IAAI,OAAO,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC;IAErF,MAAM,MAAM,GAAG,OAAO,CAAC,QAAQ,CAAC,UAAU,CAAC,CAAC;IAE5C,OAAO;QACN,aAAa,EAAE,CAAC,aAAa;QAC7B,qBAAqB,EAAE,CAAC,aAAa;QACrC,uBAAuB,EAAE,CAAC,MAAM;QAChC,cAAc,EAAE,YAAY,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,uBAAuB;KACrE,CAAC;AAAA,CACF;AAED;;;GAGG;AACH,SAAS,SAAS,CAAC,KAAkC,EAA0B;IAC9E,MAAM,QAAQ,GAAG,mBAAmB,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC;IACpD,IAAI,CAAC,KAAK,CAAC,MAAM;QAAE,OAAO,QAAQ,CAAC;IAEnC,OAAO;QACN,aAAa,EAAE,KAAK,CAAC,MAAM,CAAC,aAAa,IAAI,QAAQ,CAAC,aAAa;QACnE,qBAAqB,EAAE,KAAK,CAAC,MAAM,CAAC,qBAAqB,IAAI,QAAQ,CAAC,qBAAqB;QAC3F,uBAAuB,EAAE,KAAK,CAAC,MAAM,CAAC,uBAAuB,IAAI,QAAQ,CAAC,uBAAuB;QACjG,cAAc,EAAE,KAAK,CAAC,MAAM,CAAC,cAAc,IAAI,QAAQ,CAAC,cAAc;KACtE,CAAC;AAAA,CACF","sourcesContent":["import OpenAI from \"openai\";\nimport type {\n\tChatCompletionAssistantMessageParam,\n\tChatCompletionChunk,\n\tChatCompletionContentPart,\n\tChatCompletionContentPartImage,\n\tChatCompletionContentPartText,\n\tChatCompletionMessageParam,\n} from \"openai/resources/chat/completions.js\";\nimport { calculateCost } from \"../models.js\";\nimport type {\n\tAssistantMessage,\n\tContext,\n\tMessage,\n\tModel,\n\tOpenAICompat,\n\tStopReason,\n\tStreamFunction,\n\tStreamOptions,\n\tTextContent,\n\tThinkingContent,\n\tTool,\n\tToolCall,\n} from \"../types.js\";\nimport { AssistantMessageEventStream } from \"../utils/event-stream.js\";\nimport { parseStreamingJson } from \"../utils/json-parse.js\";\nimport { sanitizeSurrogates } from \"../utils/sanitize-unicode.js\";\nimport { transformMessages } from \"./transorm-messages.js\";\n\n/**\n * Check if conversation messages contain tool calls or tool results.\n * This is needed because Anthropic (via proxy) requires the tools param\n * to be present when messages include tool_calls or tool role messages.\n */\nfunction hasToolHistory(messages: Message[]): boolean {\n\tfor (const msg of messages) {\n\t\tif (msg.role === \"toolResult\") {\n\t\t\treturn true;\n\t\t}\n\t\tif (msg.role === \"assistant\") {\n\t\t\tif (msg.content.some((block) => block.type === \"toolCall\")) {\n\t\t\t\treturn true;\n\t\t\t}\n\t\t}\n\t}\n\treturn false;\n}\n\nexport interface OpenAICompletionsOptions extends StreamOptions {\n\ttoolChoice?: \"auto\" | \"none\" | \"required\" | { type: \"function\"; function: { name: string } };\n\treasoningEffort?: \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n}\n\nexport const streamOpenAICompletions: StreamFunction<\"openai-completions\"> = (\n\tmodel: Model<\"openai-completions\">,\n\tcontext: Context,\n\toptions?: OpenAICompletionsOptions,\n): AssistantMessageEventStream => {\n\tconst stream = new AssistantMessageEventStream();\n\n\t(async () => {\n\t\tconst output: AssistantMessage = {\n\t\t\trole: \"assistant\",\n\t\t\tcontent: [],\n\t\t\tapi: model.api,\n\t\t\tprovider: model.provider,\n\t\t\tmodel: model.id,\n\t\t\tusage: {\n\t\t\t\tinput: 0,\n\t\t\t\toutput: 0,\n\t\t\t\tcacheRead: 0,\n\t\t\t\tcacheWrite: 0,\n\t\t\t\ttotalTokens: 0,\n\t\t\t\tcost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },\n\t\t\t},\n\t\t\tstopReason: \"stop\",\n\t\t\ttimestamp: Date.now(),\n\t\t};\n\n\t\ttry {\n\t\t\tconst client = createClient(model, options?.apiKey);\n\t\t\tconst params = buildParams(model, context, options);\n\t\t\tconst openaiStream = await client.chat.completions.create(params, { signal: options?.signal });\n\t\t\tstream.push({ type: \"start\", partial: output });\n\n\t\t\tlet currentBlock: TextContent | ThinkingContent | (ToolCall & { partialArgs?: string }) | null = null;\n\t\t\tconst blocks = output.content;\n\t\t\tconst blockIndex = () => blocks.length - 1;\n\t\t\tconst finishCurrentBlock = (block?: typeof currentBlock) => {\n\t\t\t\tif (block) {\n\t\t\t\t\tif (block.type === \"text\") {\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"text_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\tcontent: block.text,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t} else if (block.type === \"thinking\") {\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"thinking_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\tcontent: block.thinking,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t} else if (block.type === \"toolCall\") {\n\t\t\t\t\t\tblock.arguments = JSON.parse(block.partialArgs || \"{}\");\n\t\t\t\t\t\tdelete block.partialArgs;\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"toolcall_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\ttoolCall: block,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t};\n\n\t\t\tfor await (const chunk of openaiStream) {\n\t\t\t\tif (chunk.usage) {\n\t\t\t\t\tconst cachedTokens = chunk.usage.prompt_tokens_details?.cached_tokens || 0;\n\t\t\t\t\tconst reasoningTokens = chunk.usage.completion_tokens_details?.reasoning_tokens || 0;\n\t\t\t\t\tconst input = (chunk.usage.prompt_tokens || 0) - cachedTokens;\n\t\t\t\t\tconst outputTokens = (chunk.usage.completion_tokens || 0) + reasoningTokens;\n\t\t\t\t\toutput.usage = {\n\t\t\t\t\t\t// OpenAI includes cached tokens in prompt_tokens, so subtract to get non-cached input\n\t\t\t\t\t\tinput,\n\t\t\t\t\t\toutput: outputTokens,\n\t\t\t\t\t\tcacheRead: cachedTokens,\n\t\t\t\t\t\tcacheWrite: 0,\n\t\t\t\t\t\t// Compute totalTokens ourselves since we add reasoning_tokens to output\n\t\t\t\t\t\t// and some providers (e.g., Groq) don't include them in total_tokens\n\t\t\t\t\t\ttotalTokens: input + outputTokens + cachedTokens,\n\t\t\t\t\t\tcost: {\n\t\t\t\t\t\t\tinput: 0,\n\t\t\t\t\t\t\toutput: 0,\n\t\t\t\t\t\t\tcacheRead: 0,\n\t\t\t\t\t\t\tcacheWrite: 0,\n\t\t\t\t\t\t\ttotal: 0,\n\t\t\t\t\t\t},\n\t\t\t\t\t};\n\t\t\t\t\tcalculateCost(model, output.usage);\n\t\t\t\t}\n\n\t\t\t\tconst choice = chunk.choices[0];\n\t\t\t\tif (!choice) continue;\n\n\t\t\t\tif (choice.finish_reason) {\n\t\t\t\t\toutput.stopReason = mapStopReason(choice.finish_reason);\n\t\t\t\t}\n\n\t\t\t\tif (choice.delta) {\n\t\t\t\t\tif (\n\t\t\t\t\t\tchoice.delta.content !== null &&\n\t\t\t\t\t\tchoice.delta.content !== undefined &&\n\t\t\t\t\t\tchoice.delta.content.length > 0\n\t\t\t\t\t) {\n\t\t\t\t\t\tif (!currentBlock || currentBlock.type !== \"text\") {\n\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\tcurrentBlock = { type: \"text\", text: \"\" };\n\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\tstream.push({ type: \"text_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif (currentBlock.type === \"text\") {\n\t\t\t\t\t\t\tcurrentBlock.text += choice.delta.content;\n\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\ttype: \"text_delta\",\n\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\tdelta: choice.delta.content,\n\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t// Some endpoints return reasoning in reasoning_content (llama.cpp),\n\t\t\t\t\t// or reasoning (other openai compatible endpoints)\n\t\t\t\t\tconst reasoningFields = [\"reasoning_content\", \"reasoning\"];\n\t\t\t\t\tfor (const field of reasoningFields) {\n\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t(choice.delta as any)[field] !== null &&\n\t\t\t\t\t\t\t(choice.delta as any)[field] !== undefined &&\n\t\t\t\t\t\t\t(choice.delta as any)[field].length > 0\n\t\t\t\t\t\t) {\n\t\t\t\t\t\t\tif (!currentBlock || currentBlock.type !== \"thinking\") {\n\t\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\t\tcurrentBlock = {\n\t\t\t\t\t\t\t\t\ttype: \"thinking\",\n\t\t\t\t\t\t\t\t\tthinking: \"\",\n\t\t\t\t\t\t\t\t\tthinkingSignature: field,\n\t\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\t\tstream.push({ type: \"thinking_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif (currentBlock.type === \"thinking\") {\n\t\t\t\t\t\t\t\tconst delta = (choice.delta as any)[field];\n\t\t\t\t\t\t\t\tcurrentBlock.thinking += delta;\n\t\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\t\ttype: \"thinking_delta\",\n\t\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\t\tdelta,\n\t\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif (choice?.delta?.tool_calls) {\n\t\t\t\t\t\tfor (const toolCall of choice.delta.tool_calls) {\n\t\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t\t!currentBlock ||\n\t\t\t\t\t\t\t\tcurrentBlock.type !== \"toolCall\" ||\n\t\t\t\t\t\t\t\t(toolCall.id && currentBlock.id !== toolCall.id)\n\t\t\t\t\t\t\t) {\n\t\t\t\t\t\t\t\tfinishCurrentBlock(currentBlock);\n\t\t\t\t\t\t\t\tcurrentBlock = {\n\t\t\t\t\t\t\t\t\ttype: \"toolCall\",\n\t\t\t\t\t\t\t\t\tid: toolCall.id || \"\",\n\t\t\t\t\t\t\t\t\tname: toolCall.function?.name || \"\",\n\t\t\t\t\t\t\t\t\targuments: {},\n\t\t\t\t\t\t\t\t\tpartialArgs: \"\",\n\t\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\t\tstream.push({ type: \"toolcall_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif (currentBlock.type === \"toolCall\") {\n\t\t\t\t\t\t\t\tif (toolCall.id) currentBlock.id = toolCall.id;\n\t\t\t\t\t\t\t\tif (toolCall.function?.name) currentBlock.name = toolCall.function.name;\n\t\t\t\t\t\t\t\tlet delta = \"\";\n\t\t\t\t\t\t\t\tif (toolCall.function?.arguments) {\n\t\t\t\t\t\t\t\t\tdelta = toolCall.function.arguments;\n\t\t\t\t\t\t\t\t\tcurrentBlock.partialArgs += toolCall.function.arguments;\n\t\t\t\t\t\t\t\t\tcurrentBlock.arguments = parseStreamingJson(currentBlock.partialArgs);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\t\ttype: \"toolcall_delta\",\n\t\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\t\tdelta,\n\t\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfinishCurrentBlock(currentBlock);\n\n\t\t\tif (options?.signal?.aborted) {\n\t\t\t\tthrow new Error(\"Request was aborted\");\n\t\t\t}\n\n\t\t\tif (output.stopReason === \"aborted\" || output.stopReason === \"error\") {\n\t\t\t\tthrow new Error(\"An unkown error ocurred\");\n\t\t\t}\n\n\t\t\tstream.push({ type: \"done\", reason: output.stopReason, message: output });\n\t\t\tstream.end();\n\t\t} catch (error) {\n\t\t\tfor (const block of output.content) delete (block as any).index;\n\t\t\toutput.stopReason = options?.signal?.aborted ? \"aborted\" : \"error\";\n\t\t\toutput.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);\n\t\t\tstream.push({ type: \"error\", reason: output.stopReason, error: output });\n\t\t\tstream.end();\n\t\t}\n\t})();\n\n\treturn stream;\n};\n\nfunction createClient(model: Model<\"openai-completions\">, apiKey?: string) {\n\tif (!apiKey) {\n\t\tif (!process.env.OPENAI_API_KEY) {\n\t\t\tthrow new Error(\n\t\t\t\t\"OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.\",\n\t\t\t);\n\t\t}\n\t\tapiKey = process.env.OPENAI_API_KEY;\n\t}\n\treturn new OpenAI({\n\t\tapiKey,\n\t\tbaseURL: model.baseUrl,\n\t\tdangerouslyAllowBrowser: true,\n\t\tdefaultHeaders: model.headers,\n\t});\n}\n\nfunction buildParams(model: Model<\"openai-completions\">, context: Context, options?: OpenAICompletionsOptions) {\n\tconst compat = getCompat(model);\n\tconst messages = convertMessages(model, context, compat);\n\n\tconst params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {\n\t\tmodel: model.id,\n\t\tmessages,\n\t\tstream: true,\n\t\tstream_options: { include_usage: true },\n\t};\n\n\tif (compat.supportsStore) {\n\t\tparams.store = false;\n\t}\n\n\tif (options?.maxTokens) {\n\t\tif (compat.maxTokensField === \"max_tokens\") {\n\t\t\t(params as any).max_tokens = options.maxTokens;\n\t\t} else {\n\t\t\tparams.max_completion_tokens = options.maxTokens;\n\t\t}\n\t}\n\n\tif (options?.temperature !== undefined) {\n\t\tparams.temperature = options.temperature;\n\t}\n\n\tif (context.tools) {\n\t\tparams.tools = convertTools(context.tools);\n\t} else if (hasToolHistory(context.messages)) {\n\t\t// Anthropic (via LiteLLM/proxy) requires tools param when conversation has tool_calls/tool_results\n\t\tparams.tools = [];\n\t}\n\n\tif (options?.toolChoice) {\n\t\tparams.tool_choice = options.toolChoice;\n\t}\n\n\tif (options?.reasoningEffort && model.reasoning && compat.supportsReasoningEffort) {\n\t\tparams.reasoning_effort = options.reasoningEffort;\n\t}\n\n\treturn params;\n}\n\nfunction convertMessages(\n\tmodel: Model<\"openai-completions\">,\n\tcontext: Context,\n\tcompat: Required<OpenAICompat>,\n): ChatCompletionMessageParam[] {\n\tconst params: ChatCompletionMessageParam[] = [];\n\n\tconst transformedMessages = transformMessages(context.messages, model);\n\n\tif (context.systemPrompt) {\n\t\tconst useDeveloperRole = model.reasoning && compat.supportsDeveloperRole;\n\t\tconst role = useDeveloperRole ? \"developer\" : \"system\";\n\t\tparams.push({ role: role, content: sanitizeSurrogates(context.systemPrompt) });\n\t}\n\n\tfor (const msg of transformedMessages) {\n\t\tif (msg.role === \"user\") {\n\t\t\tif (typeof msg.content === \"string\") {\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: sanitizeSurrogates(msg.content),\n\t\t\t\t});\n\t\t\t} else {\n\t\t\t\tconst content: ChatCompletionContentPart[] = msg.content.map((item): ChatCompletionContentPart => {\n\t\t\t\t\tif (item.type === \"text\") {\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\ttype: \"text\",\n\t\t\t\t\t\t\ttext: sanitizeSurrogates(item.text),\n\t\t\t\t\t\t} satisfies ChatCompletionContentPartText;\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\ttype: \"image_url\",\n\t\t\t\t\t\t\timage_url: {\n\t\t\t\t\t\t\t\turl: `data:${item.mimeType};base64,${item.data}`,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t} satisfies ChatCompletionContentPartImage;\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t\tconst filteredContent = !model.input.includes(\"image\")\n\t\t\t\t\t? content.filter((c) => c.type !== \"image_url\")\n\t\t\t\t\t: content;\n\t\t\t\tif (filteredContent.length === 0) continue;\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: filteredContent,\n\t\t\t\t});\n\t\t\t}\n\t\t} else if (msg.role === \"assistant\") {\n\t\t\tconst assistantMsg: ChatCompletionAssistantMessageParam = {\n\t\t\t\trole: \"assistant\",\n\t\t\t\tcontent: null,\n\t\t\t};\n\n\t\t\tconst textBlocks = msg.content.filter((b) => b.type === \"text\") as TextContent[];\n\t\t\tif (textBlocks.length > 0) {\n\t\t\t\tassistantMsg.content = textBlocks.map((b) => {\n\t\t\t\t\treturn { type: \"text\", text: sanitizeSurrogates(b.text) };\n\t\t\t\t});\n\t\t\t}\n\n\t\t\t// Handle thinking blocks for llama.cpp server + gpt-oss\n\t\t\tconst thinkingBlocks = msg.content.filter((b) => b.type === \"thinking\") as ThinkingContent[];\n\t\t\tif (thinkingBlocks.length > 0) {\n\t\t\t\t// Use the signature from the first thinking block if available\n\t\t\t\tconst signature = thinkingBlocks[0].thinkingSignature;\n\t\t\t\tif (signature && signature.length > 0) {\n\t\t\t\t\t(assistantMsg as any)[signature] = thinkingBlocks.map((b) => b.thinking).join(\"\\n\");\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tconst toolCalls = msg.content.filter((b) => b.type === \"toolCall\") as ToolCall[];\n\t\t\tif (toolCalls.length > 0) {\n\t\t\t\tassistantMsg.tool_calls = toolCalls.map((tc) => ({\n\t\t\t\t\tid: tc.id,\n\t\t\t\t\ttype: \"function\" as const,\n\t\t\t\t\tfunction: {\n\t\t\t\t\t\tname: tc.name,\n\t\t\t\t\t\targuments: JSON.stringify(tc.arguments),\n\t\t\t\t\t},\n\t\t\t\t}));\n\t\t\t}\n\t\t\tif (assistantMsg.content === null && !assistantMsg.tool_calls) {\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tparams.push(assistantMsg);\n\t\t} else if (msg.role === \"toolResult\") {\n\t\t\t// Extract text and image content\n\t\t\tconst textResult = msg.content\n\t\t\t\t.filter((c) => c.type === \"text\")\n\t\t\t\t.map((c) => (c as any).text)\n\t\t\t\t.join(\"\\n\");\n\t\t\tconst hasImages = msg.content.some((c) => c.type === \"image\");\n\n\t\t\t// Always send tool result with text (or placeholder if only images)\n\t\t\tconst hasText = textResult.length > 0;\n\t\t\tparams.push({\n\t\t\t\trole: \"tool\",\n\t\t\t\tcontent: sanitizeSurrogates(hasText ? textResult : \"(see attached image)\"),\n\t\t\t\ttool_call_id: msg.toolCallId,\n\t\t\t});\n\n\t\t\t// If there are images and model supports them, send a follow-up user message with images\n\t\t\tif (hasImages && model.input.includes(\"image\")) {\n\t\t\t\tconst contentBlocks: Array<\n\t\t\t\t\t{ type: \"text\"; text: string } | { type: \"image_url\"; image_url: { url: string } }\n\t\t\t\t> = [];\n\n\t\t\t\t// Add text prefix\n\t\t\t\tcontentBlocks.push({\n\t\t\t\t\ttype: \"text\",\n\t\t\t\t\ttext: \"Attached image(s) from tool result:\",\n\t\t\t\t});\n\n\t\t\t\t// Add images\n\t\t\t\tfor (const block of msg.content) {\n\t\t\t\t\tif (block.type === \"image\") {\n\t\t\t\t\t\tcontentBlocks.push({\n\t\t\t\t\t\t\ttype: \"image_url\",\n\t\t\t\t\t\t\timage_url: {\n\t\t\t\t\t\t\t\turl: `data:${(block as any).mimeType};base64,${(block as any).data}`,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tparams.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: contentBlocks,\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t}\n\n\treturn params;\n}\n\nfunction convertTools(tools: Tool[]): OpenAI.Chat.Completions.ChatCompletionTool[] {\n\treturn tools.map((tool) => ({\n\t\ttype: \"function\",\n\t\tfunction: {\n\t\t\tname: tool.name,\n\t\t\tdescription: tool.description,\n\t\t\tparameters: tool.parameters as any, // TypeBox already generates JSON Schema\n\t\t},\n\t}));\n}\n\nfunction mapStopReason(reason: ChatCompletionChunk.Choice[\"finish_reason\"]): StopReason {\n\tif (reason === null) return \"stop\";\n\tswitch (reason) {\n\t\tcase \"stop\":\n\t\t\treturn \"stop\";\n\t\tcase \"length\":\n\t\t\treturn \"length\";\n\t\tcase \"function_call\":\n\t\tcase \"tool_calls\":\n\t\t\treturn \"toolUse\";\n\t\tcase \"content_filter\":\n\t\t\treturn \"error\";\n\t\tdefault: {\n\t\t\tconst _exhaustive: never = reason;\n\t\t\tthrow new Error(`Unhandled stop reason: ${_exhaustive}`);\n\t\t}\n\t}\n}\n\n/**\n * Detect compatibility settings from baseUrl for known providers.\n * Returns a fully resolved OpenAICompat object with all fields set.\n */\nfunction detectCompatFromUrl(baseUrl: string): Required<OpenAICompat> {\n\tconst isNonStandard =\n\t\tbaseUrl.includes(\"cerebras.ai\") ||\n\t\tbaseUrl.includes(\"api.x.ai\") ||\n\t\tbaseUrl.includes(\"mistral.ai\") ||\n\t\tbaseUrl.includes(\"chutes.ai\");\n\n\tconst useMaxTokens = baseUrl.includes(\"mistral.ai\") || baseUrl.includes(\"chutes.ai\");\n\n\tconst isGrok = baseUrl.includes(\"api.x.ai\");\n\n\treturn {\n\t\tsupportsStore: !isNonStandard,\n\t\tsupportsDeveloperRole: !isNonStandard,\n\t\tsupportsReasoningEffort: !isGrok,\n\t\tmaxTokensField: useMaxTokens ? \"max_tokens\" : \"max_completion_tokens\",\n\t};\n}\n\n/**\n * Get resolved compatibility settings for a model.\n * Uses explicit model.compat if provided, otherwise auto-detects from URL.\n */\nfunction getCompat(model: Model<\"openai-completions\">): Required<OpenAICompat> {\n\tconst detected = detectCompatFromUrl(model.baseUrl);\n\tif (!model.compat) return detected;\n\n\treturn {\n\t\tsupportsStore: model.compat.supportsStore ?? detected.supportsStore,\n\t\tsupportsDeveloperRole: model.compat.supportsDeveloperRole ?? detected.supportsDeveloperRole,\n\t\tsupportsReasoningEffort: model.compat.supportsReasoningEffort ?? detected.supportsReasoningEffort,\n\t\tmaxTokensField: model.compat.maxTokensField ?? detected.maxTokensField,\n\t};\n}\n"]}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import type { StreamFunction, StreamOptions } from "../types.js";
|
|
2
2
|
export interface OpenAIResponsesOptions extends StreamOptions {
|
|
3
|
-
reasoningEffort?: "minimal" | "low" | "medium" | "high";
|
|
3
|
+
reasoningEffort?: "minimal" | "low" | "medium" | "high" | "xhigh";
|
|
4
4
|
reasoningSummary?: "auto" | "detailed" | "concise" | null;
|
|
5
5
|
}
|
|
6
6
|
/**
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"openai-responses.d.ts","sourceRoot":"","sources":["../../src/providers/openai-responses.ts"],"names":[],"mappings":"AAaA,OAAO,KAAK,EAMX,cAAc,EACd,aAAa,EAKb,MAAM,aAAa,CAAC;AAQrB,MAAM,WAAW,sBAAuB,SAAQ,aAAa;IAC5D,eAAe,CAAC,EAAE,SAAS,GAAG,KAAK,GAAG,QAAQ,GAAG,MAAM,CAAC;IACxD,gBAAgB,CAAC,EAAE,MAAM,GAAG,UAAU,GAAG,SAAS,GAAG,IAAI,CAAC;CAC1D;AAED;;GAEG;AACH,eAAO,MAAM,qBAAqB,EAAE,cAAc,CAAC,kBAAkB,CAqQpE,CAAC","sourcesContent":["import OpenAI from \"openai\";\nimport type {\n\tTool as OpenAITool,\n\tResponseCreateParamsStreaming,\n\tResponseFunctionToolCall,\n\tResponseInput,\n\tResponseInputContent,\n\tResponseInputImage,\n\tResponseInputText,\n\tResponseOutputMessage,\n\tResponseReasoningItem,\n} from \"openai/resources/responses/responses.js\";\nimport { calculateCost } from \"../models.js\";\nimport type {\n\tApi,\n\tAssistantMessage,\n\tContext,\n\tModel,\n\tStopReason,\n\tStreamFunction,\n\tStreamOptions,\n\tTextContent,\n\tThinkingContent,\n\tTool,\n\tToolCall,\n} from \"../types.js\";\nimport { AssistantMessageEventStream } from \"../utils/event-stream.js\";\nimport { parseStreamingJson } from \"../utils/json-parse.js\";\nimport { sanitizeSurrogates } from \"../utils/sanitize-unicode.js\";\nimport { validateToolArguments } from \"../utils/validation.js\";\nimport { transformMessages } from \"./transorm-messages.js\";\n\n// OpenAI Responses-specific options\nexport interface OpenAIResponsesOptions extends StreamOptions {\n\treasoningEffort?: \"minimal\" | \"low\" | \"medium\" | \"high\";\n\treasoningSummary?: \"auto\" | \"detailed\" | \"concise\" | null;\n}\n\n/**\n * Generate function for OpenAI Responses API\n */\nexport const streamOpenAIResponses: StreamFunction<\"openai-responses\"> = (\n\tmodel: Model<\"openai-responses\">,\n\tcontext: Context,\n\toptions?: OpenAIResponsesOptions,\n): AssistantMessageEventStream => {\n\tconst stream = new AssistantMessageEventStream();\n\n\t// Start async processing\n\t(async () => {\n\t\tconst output: AssistantMessage = {\n\t\t\trole: \"assistant\",\n\t\t\tcontent: [],\n\t\t\tapi: \"openai-responses\" as Api,\n\t\t\tprovider: model.provider,\n\t\t\tmodel: model.id,\n\t\t\tusage: {\n\t\t\t\tinput: 0,\n\t\t\t\toutput: 0,\n\t\t\t\tcacheRead: 0,\n\t\t\t\tcacheWrite: 0,\n\t\t\t\ttotalTokens: 0,\n\t\t\t\tcost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },\n\t\t\t},\n\t\t\tstopReason: \"stop\",\n\t\t\ttimestamp: Date.now(),\n\t\t};\n\n\t\ttry {\n\t\t\t// Create OpenAI client\n\t\t\tconst client = createClient(model, options?.apiKey);\n\t\t\tconst params = buildParams(model, context, options);\n\t\t\tconst openaiStream = await client.responses.create(params, { signal: options?.signal });\n\t\t\tstream.push({ type: \"start\", partial: output });\n\n\t\t\tlet currentItem: ResponseReasoningItem | ResponseOutputMessage | ResponseFunctionToolCall | null = null;\n\t\t\tlet currentBlock: ThinkingContent | TextContent | (ToolCall & { partialJson: string }) | null = null;\n\t\t\tconst blocks = output.content;\n\t\t\tconst blockIndex = () => blocks.length - 1;\n\n\t\t\tfor await (const event of openaiStream) {\n\t\t\t\t// Handle output item start\n\t\t\t\tif (event.type === \"response.output_item.added\") {\n\t\t\t\t\tconst item = event.item;\n\t\t\t\t\tif (item.type === \"reasoning\") {\n\t\t\t\t\t\tcurrentItem = item;\n\t\t\t\t\t\tcurrentBlock = { type: \"thinking\", thinking: \"\" };\n\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\tstream.push({ type: \"thinking_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t} else if (item.type === \"message\") {\n\t\t\t\t\t\tcurrentItem = item;\n\t\t\t\t\t\tcurrentBlock = { type: \"text\", text: \"\" };\n\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\tstream.push({ type: \"text_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t} else if (item.type === \"function_call\") {\n\t\t\t\t\t\tcurrentItem = item;\n\t\t\t\t\t\tcurrentBlock = {\n\t\t\t\t\t\t\ttype: \"toolCall\",\n\t\t\t\t\t\t\tid: item.call_id + \"|\" + item.id,\n\t\t\t\t\t\t\tname: item.name,\n\t\t\t\t\t\t\targuments: {},\n\t\t\t\t\t\t\tpartialJson: item.arguments || \"\",\n\t\t\t\t\t\t};\n\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\tstream.push({ type: \"toolcall_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Handle reasoning summary deltas\n\t\t\t\telse if (event.type === \"response.reasoning_summary_part.added\") {\n\t\t\t\t\tif (currentItem && currentItem.type === \"reasoning\") {\n\t\t\t\t\t\tcurrentItem.summary = currentItem.summary || [];\n\t\t\t\t\t\tcurrentItem.summary.push(event.part);\n\t\t\t\t\t}\n\t\t\t\t} else if (event.type === \"response.reasoning_summary_text.delta\") {\n\t\t\t\t\tif (\n\t\t\t\t\t\tcurrentItem &&\n\t\t\t\t\t\tcurrentItem.type === \"reasoning\" &&\n\t\t\t\t\t\tcurrentBlock &&\n\t\t\t\t\t\tcurrentBlock.type === \"thinking\"\n\t\t\t\t\t) {\n\t\t\t\t\t\tcurrentItem.summary = currentItem.summary || [];\n\t\t\t\t\t\tconst lastPart = currentItem.summary[currentItem.summary.length - 1];\n\t\t\t\t\t\tif (lastPart) {\n\t\t\t\t\t\t\tcurrentBlock.thinking += event.delta;\n\t\t\t\t\t\t\tlastPart.text += event.delta;\n\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\ttype: \"thinking_delta\",\n\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\tdelta: event.delta,\n\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Add a new line between summary parts (hack...)\n\t\t\t\telse if (event.type === \"response.reasoning_summary_part.done\") {\n\t\t\t\t\tif (\n\t\t\t\t\t\tcurrentItem &&\n\t\t\t\t\t\tcurrentItem.type === \"reasoning\" &&\n\t\t\t\t\t\tcurrentBlock &&\n\t\t\t\t\t\tcurrentBlock.type === \"thinking\"\n\t\t\t\t\t) {\n\t\t\t\t\t\tcurrentItem.summary = currentItem.summary || [];\n\t\t\t\t\t\tconst lastPart = currentItem.summary[currentItem.summary.length - 1];\n\t\t\t\t\t\tif (lastPart) {\n\t\t\t\t\t\t\tcurrentBlock.thinking += \"\\n\\n\";\n\t\t\t\t\t\t\tlastPart.text += \"\\n\\n\";\n\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\ttype: \"thinking_delta\",\n\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\tdelta: \"\\n\\n\",\n\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Handle text output deltas\n\t\t\t\telse if (event.type === \"response.content_part.added\") {\n\t\t\t\t\tif (currentItem && currentItem.type === \"message\") {\n\t\t\t\t\t\tcurrentItem.content = currentItem.content || [];\n\t\t\t\t\t\tcurrentItem.content.push(event.part);\n\t\t\t\t\t}\n\t\t\t\t} else if (event.type === \"response.output_text.delta\") {\n\t\t\t\t\tif (currentItem && currentItem.type === \"message\" && currentBlock && currentBlock.type === \"text\") {\n\t\t\t\t\t\tconst lastPart = currentItem.content[currentItem.content.length - 1];\n\t\t\t\t\t\tif (lastPart && lastPart.type === \"output_text\") {\n\t\t\t\t\t\t\tcurrentBlock.text += event.delta;\n\t\t\t\t\t\t\tlastPart.text += event.delta;\n\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\ttype: \"text_delta\",\n\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\tdelta: event.delta,\n\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else if (event.type === \"response.refusal.delta\") {\n\t\t\t\t\tif (currentItem && currentItem.type === \"message\" && currentBlock && currentBlock.type === \"text\") {\n\t\t\t\t\t\tconst lastPart = currentItem.content[currentItem.content.length - 1];\n\t\t\t\t\t\tif (lastPart && lastPart.type === \"refusal\") {\n\t\t\t\t\t\t\tcurrentBlock.text += event.delta;\n\t\t\t\t\t\t\tlastPart.refusal += event.delta;\n\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\ttype: \"text_delta\",\n\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\tdelta: event.delta,\n\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Handle function call argument deltas\n\t\t\t\telse if (event.type === \"response.function_call_arguments.delta\") {\n\t\t\t\t\tif (\n\t\t\t\t\t\tcurrentItem &&\n\t\t\t\t\t\tcurrentItem.type === \"function_call\" &&\n\t\t\t\t\t\tcurrentBlock &&\n\t\t\t\t\t\tcurrentBlock.type === \"toolCall\"\n\t\t\t\t\t) {\n\t\t\t\t\t\tcurrentBlock.partialJson += event.delta;\n\t\t\t\t\t\tcurrentBlock.arguments = parseStreamingJson(currentBlock.partialJson);\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"toolcall_delta\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\tdelta: event.delta,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Handle output item completion\n\t\t\t\telse if (event.type === \"response.output_item.done\") {\n\t\t\t\t\tconst item = event.item;\n\n\t\t\t\t\tif (item.type === \"reasoning\" && currentBlock && currentBlock.type === \"thinking\") {\n\t\t\t\t\t\tcurrentBlock.thinking = item.summary?.map((s) => s.text).join(\"\\n\\n\") || \"\";\n\t\t\t\t\t\tcurrentBlock.thinkingSignature = JSON.stringify(item);\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"thinking_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\tcontent: currentBlock.thinking,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t\tcurrentBlock = null;\n\t\t\t\t\t} else if (item.type === \"message\" && currentBlock && currentBlock.type === \"text\") {\n\t\t\t\t\t\tcurrentBlock.text = item.content.map((c) => (c.type === \"output_text\" ? c.text : c.refusal)).join(\"\");\n\t\t\t\t\t\tcurrentBlock.textSignature = item.id;\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"text_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\tcontent: currentBlock.text,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t\tcurrentBlock = null;\n\t\t\t\t\t} else if (item.type === \"function_call\") {\n\t\t\t\t\t\tconst toolCall: ToolCall = {\n\t\t\t\t\t\t\ttype: \"toolCall\",\n\t\t\t\t\t\t\tid: item.call_id + \"|\" + item.id,\n\t\t\t\t\t\t\tname: item.name,\n\t\t\t\t\t\t\targuments: JSON.parse(item.arguments),\n\t\t\t\t\t\t};\n\n\t\t\t\t\t\t// Validate tool arguments if tool definition is available\n\t\t\t\t\t\tif (context.tools) {\n\t\t\t\t\t\t\tconst tool = context.tools.find((t) => t.name === toolCall.name);\n\t\t\t\t\t\t\tif (tool) {\n\t\t\t\t\t\t\t\ttoolCall.arguments = validateToolArguments(tool, toolCall);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tstream.push({ type: \"toolcall_end\", contentIndex: blockIndex(), toolCall, partial: output });\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Handle completion\n\t\t\t\telse if (event.type === \"response.completed\") {\n\t\t\t\t\tconst response = event.response;\n\t\t\t\t\tif (response?.usage) {\n\t\t\t\t\t\tconst cachedTokens = response.usage.input_tokens_details?.cached_tokens || 0;\n\t\t\t\t\t\toutput.usage = {\n\t\t\t\t\t\t\t// OpenAI includes cached tokens in input_tokens, so subtract to get non-cached input\n\t\t\t\t\t\t\tinput: (response.usage.input_tokens || 0) - cachedTokens,\n\t\t\t\t\t\t\toutput: response.usage.output_tokens || 0,\n\t\t\t\t\t\t\tcacheRead: cachedTokens,\n\t\t\t\t\t\t\tcacheWrite: 0,\n\t\t\t\t\t\t\ttotalTokens: response.usage.total_tokens || 0,\n\t\t\t\t\t\t\tcost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },\n\t\t\t\t\t\t};\n\t\t\t\t\t}\n\t\t\t\t\tcalculateCost(model, output.usage);\n\t\t\t\t\t// Map status to stop reason\n\t\t\t\t\toutput.stopReason = mapStopReason(response?.status);\n\t\t\t\t\tif (output.content.some((b) => b.type === \"toolCall\") && output.stopReason === \"stop\") {\n\t\t\t\t\t\toutput.stopReason = \"toolUse\";\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Handle errors\n\t\t\t\telse if (event.type === \"error\") {\n\t\t\t\t\tthrow new Error(`Error Code ${event.code}: ${event.message}` || \"Unknown error\");\n\t\t\t\t} else if (event.type === \"response.failed\") {\n\t\t\t\t\tthrow new Error(\"Unknown error\");\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (options?.signal?.aborted) {\n\t\t\t\tthrow new Error(\"Request was aborted\");\n\t\t\t}\n\n\t\t\tif (output.stopReason === \"aborted\" || output.stopReason === \"error\") {\n\t\t\t\tthrow new Error(\"An unkown error ocurred\");\n\t\t\t}\n\n\t\t\tstream.push({ type: \"done\", reason: output.stopReason, message: output });\n\t\t\tstream.end();\n\t\t} catch (error) {\n\t\t\tfor (const block of output.content) delete (block as any).index;\n\t\t\toutput.stopReason = options?.signal?.aborted ? \"aborted\" : \"error\";\n\t\t\toutput.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);\n\t\t\tstream.push({ type: \"error\", reason: output.stopReason, error: output });\n\t\t\tstream.end();\n\t\t}\n\t})();\n\n\treturn stream;\n};\n\nfunction createClient(model: Model<\"openai-responses\">, apiKey?: string) {\n\tif (!apiKey) {\n\t\tif (!process.env.OPENAI_API_KEY) {\n\t\t\tthrow new Error(\n\t\t\t\t\"OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.\",\n\t\t\t);\n\t\t}\n\t\tapiKey = process.env.OPENAI_API_KEY;\n\t}\n\treturn new OpenAI({\n\t\tapiKey,\n\t\tbaseURL: model.baseUrl,\n\t\tdangerouslyAllowBrowser: true,\n\t\tdefaultHeaders: model.headers,\n\t});\n}\n\nfunction buildParams(model: Model<\"openai-responses\">, context: Context, options?: OpenAIResponsesOptions) {\n\tconst messages = convertMessages(model, context);\n\n\tconst params: ResponseCreateParamsStreaming = {\n\t\tmodel: model.id,\n\t\tinput: messages,\n\t\tstream: true,\n\t};\n\n\tif (options?.maxTokens) {\n\t\tparams.max_output_tokens = options?.maxTokens;\n\t}\n\n\tif (options?.temperature !== undefined) {\n\t\tparams.temperature = options?.temperature;\n\t}\n\n\tif (context.tools) {\n\t\tparams.tools = convertTools(context.tools);\n\t}\n\n\tif (model.reasoning) {\n\t\tif (options?.reasoningEffort || options?.reasoningSummary) {\n\t\t\tparams.reasoning = {\n\t\t\t\teffort: options?.reasoningEffort || \"medium\",\n\t\t\t\tsummary: options?.reasoningSummary || \"auto\",\n\t\t\t};\n\t\t\tparams.include = [\"reasoning.encrypted_content\"];\n\t\t} else {\n\t\t\tif (model.name.startsWith(\"gpt-5\")) {\n\t\t\t\t// Jesus Christ, see https://community.openai.com/t/need-reasoning-false-option-for-gpt-5/1351588/7\n\t\t\t\tmessages.push({\n\t\t\t\t\trole: \"developer\",\n\t\t\t\t\tcontent: [\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\ttype: \"input_text\",\n\t\t\t\t\t\t\ttext: \"# Juice: 0 !important\",\n\t\t\t\t\t\t},\n\t\t\t\t\t],\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t}\n\n\treturn params;\n}\n\nfunction convertMessages(model: Model<\"openai-responses\">, context: Context): ResponseInput {\n\tconst messages: ResponseInput = [];\n\n\tconst transformedMessages = transformMessages(context.messages, model);\n\n\tif (context.systemPrompt) {\n\t\tconst role = model.reasoning ? \"developer\" : \"system\";\n\t\tmessages.push({\n\t\t\trole,\n\t\t\tcontent: sanitizeSurrogates(context.systemPrompt),\n\t\t});\n\t}\n\n\tfor (const msg of transformedMessages) {\n\t\tif (msg.role === \"user\") {\n\t\t\tif (typeof msg.content === \"string\") {\n\t\t\t\tmessages.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: [{ type: \"input_text\", text: sanitizeSurrogates(msg.content) }],\n\t\t\t\t});\n\t\t\t} else {\n\t\t\t\tconst content: ResponseInputContent[] = msg.content.map((item): ResponseInputContent => {\n\t\t\t\t\tif (item.type === \"text\") {\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\ttype: \"input_text\",\n\t\t\t\t\t\t\ttext: sanitizeSurrogates(item.text),\n\t\t\t\t\t\t} satisfies ResponseInputText;\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\ttype: \"input_image\",\n\t\t\t\t\t\t\tdetail: \"auto\",\n\t\t\t\t\t\t\timage_url: `data:${item.mimeType};base64,${item.data}`,\n\t\t\t\t\t\t} satisfies ResponseInputImage;\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t\tconst filteredContent = !model.input.includes(\"image\")\n\t\t\t\t\t? content.filter((c) => c.type !== \"input_image\")\n\t\t\t\t\t: content;\n\t\t\t\tif (filteredContent.length === 0) continue;\n\t\t\t\tmessages.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: filteredContent,\n\t\t\t\t});\n\t\t\t}\n\t\t} else if (msg.role === \"assistant\") {\n\t\t\tconst output: ResponseInput = [];\n\n\t\t\tfor (const block of msg.content) {\n\t\t\t\t// Do not submit thinking blocks if the completion had an error (i.e. abort)\n\t\t\t\tif (block.type === \"thinking\" && msg.stopReason !== \"error\") {\n\t\t\t\t\tif (block.thinkingSignature) {\n\t\t\t\t\t\tconst reasoningItem = JSON.parse(block.thinkingSignature);\n\t\t\t\t\t\toutput.push(reasoningItem);\n\t\t\t\t\t}\n\t\t\t\t} else if (block.type === \"text\") {\n\t\t\t\t\tconst textBlock = block as TextContent;\n\t\t\t\t\toutput.push({\n\t\t\t\t\t\ttype: \"message\",\n\t\t\t\t\t\trole: \"assistant\",\n\t\t\t\t\t\tcontent: [{ type: \"output_text\", text: sanitizeSurrogates(textBlock.text), annotations: [] }],\n\t\t\t\t\t\tstatus: \"completed\",\n\t\t\t\t\t\tid: textBlock.textSignature || \"msg_\" + Math.random().toString(36).substring(2, 15),\n\t\t\t\t\t} satisfies ResponseOutputMessage);\n\t\t\t\t\t// Do not submit toolcall blocks if the completion had an error (i.e. abort)\n\t\t\t\t} else if (block.type === \"toolCall\" && msg.stopReason !== \"error\") {\n\t\t\t\t\tconst toolCall = block as ToolCall;\n\t\t\t\t\toutput.push({\n\t\t\t\t\t\ttype: \"function_call\",\n\t\t\t\t\t\tid: toolCall.id.split(\"|\")[1],\n\t\t\t\t\t\tcall_id: toolCall.id.split(\"|\")[0],\n\t\t\t\t\t\tname: toolCall.name,\n\t\t\t\t\t\targuments: JSON.stringify(toolCall.arguments),\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (output.length === 0) continue;\n\t\t\tmessages.push(...output);\n\t\t} else if (msg.role === \"toolResult\") {\n\t\t\t// Extract text and image content\n\t\t\tconst textResult = msg.content\n\t\t\t\t.filter((c) => c.type === \"text\")\n\t\t\t\t.map((c) => (c as any).text)\n\t\t\t\t.join(\"\\n\");\n\t\t\tconst hasImages = msg.content.some((c) => c.type === \"image\");\n\n\t\t\t// Always send function_call_output with text (or placeholder if only images)\n\t\t\tconst hasText = textResult.length > 0;\n\t\t\tmessages.push({\n\t\t\t\ttype: \"function_call_output\",\n\t\t\t\tcall_id: msg.toolCallId.split(\"|\")[0],\n\t\t\t\toutput: sanitizeSurrogates(hasText ? textResult : \"(see attached image)\"),\n\t\t\t});\n\n\t\t\t// If there are images and model supports them, send a follow-up user message with images\n\t\t\tif (hasImages && model.input.includes(\"image\")) {\n\t\t\t\tconst contentParts: ResponseInputContent[] = [];\n\n\t\t\t\t// Add text prefix\n\t\t\t\tcontentParts.push({\n\t\t\t\t\ttype: \"input_text\",\n\t\t\t\t\ttext: \"Attached image(s) from tool result:\",\n\t\t\t\t} satisfies ResponseInputText);\n\n\t\t\t\t// Add images\n\t\t\t\tfor (const block of msg.content) {\n\t\t\t\t\tif (block.type === \"image\") {\n\t\t\t\t\t\tcontentParts.push({\n\t\t\t\t\t\t\ttype: \"input_image\",\n\t\t\t\t\t\t\tdetail: \"auto\",\n\t\t\t\t\t\t\timage_url: `data:${(block as any).mimeType};base64,${(block as any).data}`,\n\t\t\t\t\t\t} satisfies ResponseInputImage);\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tmessages.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: contentParts,\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t}\n\n\treturn messages;\n}\n\nfunction convertTools(tools: Tool[]): OpenAITool[] {\n\treturn tools.map((tool) => ({\n\t\ttype: \"function\",\n\t\tname: tool.name,\n\t\tdescription: tool.description,\n\t\tparameters: tool.parameters as any, // TypeBox already generates JSON Schema\n\t\tstrict: null,\n\t}));\n}\n\nfunction mapStopReason(status: OpenAI.Responses.ResponseStatus | undefined): StopReason {\n\tif (!status) return \"stop\";\n\tswitch (status) {\n\t\tcase \"completed\":\n\t\t\treturn \"stop\";\n\t\tcase \"incomplete\":\n\t\t\treturn \"length\";\n\t\tcase \"failed\":\n\t\tcase \"cancelled\":\n\t\t\treturn \"error\";\n\t\t// These two are wonky ...\n\t\tcase \"in_progress\":\n\t\tcase \"queued\":\n\t\t\treturn \"stop\";\n\t\tdefault: {\n\t\t\tconst _exhaustive: never = status;\n\t\t\tthrow new Error(`Unhandled stop reason: ${_exhaustive}`);\n\t\t}\n\t}\n}\n"]}
|
|
1
|
+
{"version":3,"file":"openai-responses.d.ts","sourceRoot":"","sources":["../../src/providers/openai-responses.ts"],"names":[],"mappings":"AAaA,OAAO,KAAK,EAMX,cAAc,EACd,aAAa,EAKb,MAAM,aAAa,CAAC;AAQrB,MAAM,WAAW,sBAAuB,SAAQ,aAAa;IAC5D,eAAe,CAAC,EAAE,SAAS,GAAG,KAAK,GAAG,QAAQ,GAAG,MAAM,GAAG,OAAO,CAAC;IAClE,gBAAgB,CAAC,EAAE,MAAM,GAAG,UAAU,GAAG,SAAS,GAAG,IAAI,CAAC;CAC1D;AAED;;GAEG;AACH,eAAO,MAAM,qBAAqB,EAAE,cAAc,CAAC,kBAAkB,CAgQpE,CAAC","sourcesContent":["import OpenAI from \"openai\";\nimport type {\n\tTool as OpenAITool,\n\tResponseCreateParamsStreaming,\n\tResponseFunctionToolCall,\n\tResponseInput,\n\tResponseInputContent,\n\tResponseInputImage,\n\tResponseInputText,\n\tResponseOutputMessage,\n\tResponseReasoningItem,\n} from \"openai/resources/responses/responses.js\";\nimport { calculateCost } from \"../models.js\";\nimport type {\n\tApi,\n\tAssistantMessage,\n\tContext,\n\tModel,\n\tStopReason,\n\tStreamFunction,\n\tStreamOptions,\n\tTextContent,\n\tThinkingContent,\n\tTool,\n\tToolCall,\n} from \"../types.js\";\nimport { AssistantMessageEventStream } from \"../utils/event-stream.js\";\nimport { parseStreamingJson } from \"../utils/json-parse.js\";\nimport { sanitizeSurrogates } from \"../utils/sanitize-unicode.js\";\n\nimport { transformMessages } from \"./transorm-messages.js\";\n\n// OpenAI Responses-specific options\nexport interface OpenAIResponsesOptions extends StreamOptions {\n\treasoningEffort?: \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n\treasoningSummary?: \"auto\" | \"detailed\" | \"concise\" | null;\n}\n\n/**\n * Generate function for OpenAI Responses API\n */\nexport const streamOpenAIResponses: StreamFunction<\"openai-responses\"> = (\n\tmodel: Model<\"openai-responses\">,\n\tcontext: Context,\n\toptions?: OpenAIResponsesOptions,\n): AssistantMessageEventStream => {\n\tconst stream = new AssistantMessageEventStream();\n\n\t// Start async processing\n\t(async () => {\n\t\tconst output: AssistantMessage = {\n\t\t\trole: \"assistant\",\n\t\t\tcontent: [],\n\t\t\tapi: \"openai-responses\" as Api,\n\t\t\tprovider: model.provider,\n\t\t\tmodel: model.id,\n\t\t\tusage: {\n\t\t\t\tinput: 0,\n\t\t\t\toutput: 0,\n\t\t\t\tcacheRead: 0,\n\t\t\t\tcacheWrite: 0,\n\t\t\t\ttotalTokens: 0,\n\t\t\t\tcost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },\n\t\t\t},\n\t\t\tstopReason: \"stop\",\n\t\t\ttimestamp: Date.now(),\n\t\t};\n\n\t\ttry {\n\t\t\t// Create OpenAI client\n\t\t\tconst client = createClient(model, options?.apiKey);\n\t\t\tconst params = buildParams(model, context, options);\n\t\t\tconst openaiStream = await client.responses.create(params, { signal: options?.signal });\n\t\t\tstream.push({ type: \"start\", partial: output });\n\n\t\t\tlet currentItem: ResponseReasoningItem | ResponseOutputMessage | ResponseFunctionToolCall | null = null;\n\t\t\tlet currentBlock: ThinkingContent | TextContent | (ToolCall & { partialJson: string }) | null = null;\n\t\t\tconst blocks = output.content;\n\t\t\tconst blockIndex = () => blocks.length - 1;\n\n\t\t\tfor await (const event of openaiStream) {\n\t\t\t\t// Handle output item start\n\t\t\t\tif (event.type === \"response.output_item.added\") {\n\t\t\t\t\tconst item = event.item;\n\t\t\t\t\tif (item.type === \"reasoning\") {\n\t\t\t\t\t\tcurrentItem = item;\n\t\t\t\t\t\tcurrentBlock = { type: \"thinking\", thinking: \"\" };\n\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\tstream.push({ type: \"thinking_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t} else if (item.type === \"message\") {\n\t\t\t\t\t\tcurrentItem = item;\n\t\t\t\t\t\tcurrentBlock = { type: \"text\", text: \"\" };\n\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\tstream.push({ type: \"text_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t} else if (item.type === \"function_call\") {\n\t\t\t\t\t\tcurrentItem = item;\n\t\t\t\t\t\tcurrentBlock = {\n\t\t\t\t\t\t\ttype: \"toolCall\",\n\t\t\t\t\t\t\tid: item.call_id + \"|\" + item.id,\n\t\t\t\t\t\t\tname: item.name,\n\t\t\t\t\t\t\targuments: {},\n\t\t\t\t\t\t\tpartialJson: item.arguments || \"\",\n\t\t\t\t\t\t};\n\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\tstream.push({ type: \"toolcall_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Handle reasoning summary deltas\n\t\t\t\telse if (event.type === \"response.reasoning_summary_part.added\") {\n\t\t\t\t\tif (currentItem && currentItem.type === \"reasoning\") {\n\t\t\t\t\t\tcurrentItem.summary = currentItem.summary || [];\n\t\t\t\t\t\tcurrentItem.summary.push(event.part);\n\t\t\t\t\t}\n\t\t\t\t} else if (event.type === \"response.reasoning_summary_text.delta\") {\n\t\t\t\t\tif (\n\t\t\t\t\t\tcurrentItem &&\n\t\t\t\t\t\tcurrentItem.type === \"reasoning\" &&\n\t\t\t\t\t\tcurrentBlock &&\n\t\t\t\t\t\tcurrentBlock.type === \"thinking\"\n\t\t\t\t\t) {\n\t\t\t\t\t\tcurrentItem.summary = currentItem.summary || [];\n\t\t\t\t\t\tconst lastPart = currentItem.summary[currentItem.summary.length - 1];\n\t\t\t\t\t\tif (lastPart) {\n\t\t\t\t\t\t\tcurrentBlock.thinking += event.delta;\n\t\t\t\t\t\t\tlastPart.text += event.delta;\n\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\ttype: \"thinking_delta\",\n\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\tdelta: event.delta,\n\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Add a new line between summary parts (hack...)\n\t\t\t\telse if (event.type === \"response.reasoning_summary_part.done\") {\n\t\t\t\t\tif (\n\t\t\t\t\t\tcurrentItem &&\n\t\t\t\t\t\tcurrentItem.type === \"reasoning\" &&\n\t\t\t\t\t\tcurrentBlock &&\n\t\t\t\t\t\tcurrentBlock.type === \"thinking\"\n\t\t\t\t\t) {\n\t\t\t\t\t\tcurrentItem.summary = currentItem.summary || [];\n\t\t\t\t\t\tconst lastPart = currentItem.summary[currentItem.summary.length - 1];\n\t\t\t\t\t\tif (lastPart) {\n\t\t\t\t\t\t\tcurrentBlock.thinking += \"\\n\\n\";\n\t\t\t\t\t\t\tlastPart.text += \"\\n\\n\";\n\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\ttype: \"thinking_delta\",\n\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\tdelta: \"\\n\\n\",\n\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Handle text output deltas\n\t\t\t\telse if (event.type === \"response.content_part.added\") {\n\t\t\t\t\tif (currentItem && currentItem.type === \"message\") {\n\t\t\t\t\t\tcurrentItem.content = currentItem.content || [];\n\t\t\t\t\t\t// Filter out ReasoningText, only accept output_text and refusal\n\t\t\t\t\t\tif (event.part.type === \"output_text\" || event.part.type === \"refusal\") {\n\t\t\t\t\t\t\tcurrentItem.content.push(event.part);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else if (event.type === \"response.output_text.delta\") {\n\t\t\t\t\tif (currentItem && currentItem.type === \"message\" && currentBlock && currentBlock.type === \"text\") {\n\t\t\t\t\t\tconst lastPart = currentItem.content[currentItem.content.length - 1];\n\t\t\t\t\t\tif (lastPart && lastPart.type === \"output_text\") {\n\t\t\t\t\t\t\tcurrentBlock.text += event.delta;\n\t\t\t\t\t\t\tlastPart.text += event.delta;\n\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\ttype: \"text_delta\",\n\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\tdelta: event.delta,\n\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else if (event.type === \"response.refusal.delta\") {\n\t\t\t\t\tif (currentItem && currentItem.type === \"message\" && currentBlock && currentBlock.type === \"text\") {\n\t\t\t\t\t\tconst lastPart = currentItem.content[currentItem.content.length - 1];\n\t\t\t\t\t\tif (lastPart && lastPart.type === \"refusal\") {\n\t\t\t\t\t\t\tcurrentBlock.text += event.delta;\n\t\t\t\t\t\t\tlastPart.refusal += event.delta;\n\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\ttype: \"text_delta\",\n\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\tdelta: event.delta,\n\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Handle function call argument deltas\n\t\t\t\telse if (event.type === \"response.function_call_arguments.delta\") {\n\t\t\t\t\tif (\n\t\t\t\t\t\tcurrentItem &&\n\t\t\t\t\t\tcurrentItem.type === \"function_call\" &&\n\t\t\t\t\t\tcurrentBlock &&\n\t\t\t\t\t\tcurrentBlock.type === \"toolCall\"\n\t\t\t\t\t) {\n\t\t\t\t\t\tcurrentBlock.partialJson += event.delta;\n\t\t\t\t\t\tcurrentBlock.arguments = parseStreamingJson(currentBlock.partialJson);\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"toolcall_delta\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\tdelta: event.delta,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Handle output item completion\n\t\t\t\telse if (event.type === \"response.output_item.done\") {\n\t\t\t\t\tconst item = event.item;\n\n\t\t\t\t\tif (item.type === \"reasoning\" && currentBlock && currentBlock.type === \"thinking\") {\n\t\t\t\t\t\tcurrentBlock.thinking = item.summary?.map((s) => s.text).join(\"\\n\\n\") || \"\";\n\t\t\t\t\t\tcurrentBlock.thinkingSignature = JSON.stringify(item);\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"thinking_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\tcontent: currentBlock.thinking,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t\tcurrentBlock = null;\n\t\t\t\t\t} else if (item.type === \"message\" && currentBlock && currentBlock.type === \"text\") {\n\t\t\t\t\t\tcurrentBlock.text = item.content.map((c) => (c.type === \"output_text\" ? c.text : c.refusal)).join(\"\");\n\t\t\t\t\t\tcurrentBlock.textSignature = item.id;\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"text_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\tcontent: currentBlock.text,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t\tcurrentBlock = null;\n\t\t\t\t\t} else if (item.type === \"function_call\") {\n\t\t\t\t\t\tconst toolCall: ToolCall = {\n\t\t\t\t\t\t\ttype: \"toolCall\",\n\t\t\t\t\t\t\tid: item.call_id + \"|\" + item.id,\n\t\t\t\t\t\t\tname: item.name,\n\t\t\t\t\t\t\targuments: JSON.parse(item.arguments),\n\t\t\t\t\t\t};\n\n\t\t\t\t\t\tstream.push({ type: \"toolcall_end\", contentIndex: blockIndex(), toolCall, partial: output });\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Handle completion\n\t\t\t\telse if (event.type === \"response.completed\") {\n\t\t\t\t\tconst response = event.response;\n\t\t\t\t\tif (response?.usage) {\n\t\t\t\t\t\tconst cachedTokens = response.usage.input_tokens_details?.cached_tokens || 0;\n\t\t\t\t\t\toutput.usage = {\n\t\t\t\t\t\t\t// OpenAI includes cached tokens in input_tokens, so subtract to get non-cached input\n\t\t\t\t\t\t\tinput: (response.usage.input_tokens || 0) - cachedTokens,\n\t\t\t\t\t\t\toutput: response.usage.output_tokens || 0,\n\t\t\t\t\t\t\tcacheRead: cachedTokens,\n\t\t\t\t\t\t\tcacheWrite: 0,\n\t\t\t\t\t\t\ttotalTokens: response.usage.total_tokens || 0,\n\t\t\t\t\t\t\tcost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },\n\t\t\t\t\t\t};\n\t\t\t\t\t}\n\t\t\t\t\tcalculateCost(model, output.usage);\n\t\t\t\t\t// Map status to stop reason\n\t\t\t\t\toutput.stopReason = mapStopReason(response?.status);\n\t\t\t\t\tif (output.content.some((b) => b.type === \"toolCall\") && output.stopReason === \"stop\") {\n\t\t\t\t\t\toutput.stopReason = \"toolUse\";\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Handle errors\n\t\t\t\telse if (event.type === \"error\") {\n\t\t\t\t\tthrow new Error(`Error Code ${event.code}: ${event.message}` || \"Unknown error\");\n\t\t\t\t} else if (event.type === \"response.failed\") {\n\t\t\t\t\tthrow new Error(\"Unknown error\");\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (options?.signal?.aborted) {\n\t\t\t\tthrow new Error(\"Request was aborted\");\n\t\t\t}\n\n\t\t\tif (output.stopReason === \"aborted\" || output.stopReason === \"error\") {\n\t\t\t\tthrow new Error(\"An unkown error ocurred\");\n\t\t\t}\n\n\t\t\tstream.push({ type: \"done\", reason: output.stopReason, message: output });\n\t\t\tstream.end();\n\t\t} catch (error) {\n\t\t\tfor (const block of output.content) delete (block as any).index;\n\t\t\toutput.stopReason = options?.signal?.aborted ? \"aborted\" : \"error\";\n\t\t\toutput.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);\n\t\t\tstream.push({ type: \"error\", reason: output.stopReason, error: output });\n\t\t\tstream.end();\n\t\t}\n\t})();\n\n\treturn stream;\n};\n\nfunction createClient(model: Model<\"openai-responses\">, apiKey?: string) {\n\tif (!apiKey) {\n\t\tif (!process.env.OPENAI_API_KEY) {\n\t\t\tthrow new Error(\n\t\t\t\t\"OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.\",\n\t\t\t);\n\t\t}\n\t\tapiKey = process.env.OPENAI_API_KEY;\n\t}\n\treturn new OpenAI({\n\t\tapiKey,\n\t\tbaseURL: model.baseUrl,\n\t\tdangerouslyAllowBrowser: true,\n\t\tdefaultHeaders: model.headers,\n\t});\n}\n\nfunction buildParams(model: Model<\"openai-responses\">, context: Context, options?: OpenAIResponsesOptions) {\n\tconst messages = convertMessages(model, context);\n\n\tconst params: ResponseCreateParamsStreaming = {\n\t\tmodel: model.id,\n\t\tinput: messages,\n\t\tstream: true,\n\t};\n\n\tif (options?.maxTokens) {\n\t\tparams.max_output_tokens = options?.maxTokens;\n\t}\n\n\tif (options?.temperature !== undefined) {\n\t\tparams.temperature = options?.temperature;\n\t}\n\n\tif (context.tools) {\n\t\tparams.tools = convertTools(context.tools);\n\t}\n\n\tif (model.reasoning) {\n\t\tif (options?.reasoningEffort || options?.reasoningSummary) {\n\t\t\tparams.reasoning = {\n\t\t\t\teffort: options?.reasoningEffort || \"medium\",\n\t\t\t\tsummary: options?.reasoningSummary || \"auto\",\n\t\t\t};\n\t\t\tparams.include = [\"reasoning.encrypted_content\"];\n\t\t} else {\n\t\t\tif (model.name.startsWith(\"gpt-5\")) {\n\t\t\t\t// Jesus Christ, see https://community.openai.com/t/need-reasoning-false-option-for-gpt-5/1351588/7\n\t\t\t\tmessages.push({\n\t\t\t\t\trole: \"developer\",\n\t\t\t\t\tcontent: [\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\ttype: \"input_text\",\n\t\t\t\t\t\t\ttext: \"# Juice: 0 !important\",\n\t\t\t\t\t\t},\n\t\t\t\t\t],\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t}\n\n\treturn params;\n}\n\nfunction convertMessages(model: Model<\"openai-responses\">, context: Context): ResponseInput {\n\tconst messages: ResponseInput = [];\n\n\tconst transformedMessages = transformMessages(context.messages, model);\n\n\tif (context.systemPrompt) {\n\t\tconst role = model.reasoning ? \"developer\" : \"system\";\n\t\tmessages.push({\n\t\t\trole,\n\t\t\tcontent: sanitizeSurrogates(context.systemPrompt),\n\t\t});\n\t}\n\n\tfor (const msg of transformedMessages) {\n\t\tif (msg.role === \"user\") {\n\t\t\tif (typeof msg.content === \"string\") {\n\t\t\t\tmessages.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: [{ type: \"input_text\", text: sanitizeSurrogates(msg.content) }],\n\t\t\t\t});\n\t\t\t} else {\n\t\t\t\tconst content: ResponseInputContent[] = msg.content.map((item): ResponseInputContent => {\n\t\t\t\t\tif (item.type === \"text\") {\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\ttype: \"input_text\",\n\t\t\t\t\t\t\ttext: sanitizeSurrogates(item.text),\n\t\t\t\t\t\t} satisfies ResponseInputText;\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\ttype: \"input_image\",\n\t\t\t\t\t\t\tdetail: \"auto\",\n\t\t\t\t\t\t\timage_url: `data:${item.mimeType};base64,${item.data}`,\n\t\t\t\t\t\t} satisfies ResponseInputImage;\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t\tconst filteredContent = !model.input.includes(\"image\")\n\t\t\t\t\t? content.filter((c) => c.type !== \"input_image\")\n\t\t\t\t\t: content;\n\t\t\t\tif (filteredContent.length === 0) continue;\n\t\t\t\tmessages.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: filteredContent,\n\t\t\t\t});\n\t\t\t}\n\t\t} else if (msg.role === \"assistant\") {\n\t\t\tconst output: ResponseInput = [];\n\n\t\t\tfor (const block of msg.content) {\n\t\t\t\t// Do not submit thinking blocks if the completion had an error (i.e. abort)\n\t\t\t\tif (block.type === \"thinking\" && msg.stopReason !== \"error\") {\n\t\t\t\t\tif (block.thinkingSignature) {\n\t\t\t\t\t\tconst reasoningItem = JSON.parse(block.thinkingSignature);\n\t\t\t\t\t\toutput.push(reasoningItem);\n\t\t\t\t\t}\n\t\t\t\t} else if (block.type === \"text\") {\n\t\t\t\t\tconst textBlock = block as TextContent;\n\t\t\t\t\toutput.push({\n\t\t\t\t\t\ttype: \"message\",\n\t\t\t\t\t\trole: \"assistant\",\n\t\t\t\t\t\tcontent: [{ type: \"output_text\", text: sanitizeSurrogates(textBlock.text), annotations: [] }],\n\t\t\t\t\t\tstatus: \"completed\",\n\t\t\t\t\t\tid: textBlock.textSignature || \"msg_\" + Math.random().toString(36).substring(2, 15),\n\t\t\t\t\t} satisfies ResponseOutputMessage);\n\t\t\t\t\t// Do not submit toolcall blocks if the completion had an error (i.e. abort)\n\t\t\t\t} else if (block.type === \"toolCall\" && msg.stopReason !== \"error\") {\n\t\t\t\t\tconst toolCall = block as ToolCall;\n\t\t\t\t\toutput.push({\n\t\t\t\t\t\ttype: \"function_call\",\n\t\t\t\t\t\tid: toolCall.id.split(\"|\")[1],\n\t\t\t\t\t\tcall_id: toolCall.id.split(\"|\")[0],\n\t\t\t\t\t\tname: toolCall.name,\n\t\t\t\t\t\targuments: JSON.stringify(toolCall.arguments),\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (output.length === 0) continue;\n\t\t\tmessages.push(...output);\n\t\t} else if (msg.role === \"toolResult\") {\n\t\t\t// Extract text and image content\n\t\t\tconst textResult = msg.content\n\t\t\t\t.filter((c) => c.type === \"text\")\n\t\t\t\t.map((c) => (c as any).text)\n\t\t\t\t.join(\"\\n\");\n\t\t\tconst hasImages = msg.content.some((c) => c.type === \"image\");\n\n\t\t\t// Always send function_call_output with text (or placeholder if only images)\n\t\t\tconst hasText = textResult.length > 0;\n\t\t\tmessages.push({\n\t\t\t\ttype: \"function_call_output\",\n\t\t\t\tcall_id: msg.toolCallId.split(\"|\")[0],\n\t\t\t\toutput: sanitizeSurrogates(hasText ? textResult : \"(see attached image)\"),\n\t\t\t});\n\n\t\t\t// If there are images and model supports them, send a follow-up user message with images\n\t\t\tif (hasImages && model.input.includes(\"image\")) {\n\t\t\t\tconst contentParts: ResponseInputContent[] = [];\n\n\t\t\t\t// Add text prefix\n\t\t\t\tcontentParts.push({\n\t\t\t\t\ttype: \"input_text\",\n\t\t\t\t\ttext: \"Attached image(s) from tool result:\",\n\t\t\t\t} satisfies ResponseInputText);\n\n\t\t\t\t// Add images\n\t\t\t\tfor (const block of msg.content) {\n\t\t\t\t\tif (block.type === \"image\") {\n\t\t\t\t\t\tcontentParts.push({\n\t\t\t\t\t\t\ttype: \"input_image\",\n\t\t\t\t\t\t\tdetail: \"auto\",\n\t\t\t\t\t\t\timage_url: `data:${(block as any).mimeType};base64,${(block as any).data}`,\n\t\t\t\t\t\t} satisfies ResponseInputImage);\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tmessages.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: contentParts,\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t}\n\n\treturn messages;\n}\n\nfunction convertTools(tools: Tool[]): OpenAITool[] {\n\treturn tools.map((tool) => ({\n\t\ttype: \"function\",\n\t\tname: tool.name,\n\t\tdescription: tool.description,\n\t\tparameters: tool.parameters as any, // TypeBox already generates JSON Schema\n\t\tstrict: null,\n\t}));\n}\n\nfunction mapStopReason(status: OpenAI.Responses.ResponseStatus | undefined): StopReason {\n\tif (!status) return \"stop\";\n\tswitch (status) {\n\t\tcase \"completed\":\n\t\t\treturn \"stop\";\n\t\tcase \"incomplete\":\n\t\t\treturn \"length\";\n\t\tcase \"failed\":\n\t\tcase \"cancelled\":\n\t\t\treturn \"error\";\n\t\t// These two are wonky ...\n\t\tcase \"in_progress\":\n\t\tcase \"queued\":\n\t\t\treturn \"stop\";\n\t\tdefault: {\n\t\t\tconst _exhaustive: never = status;\n\t\t\tthrow new Error(`Unhandled stop reason: ${_exhaustive}`);\n\t\t}\n\t}\n}\n"]}
|
|
@@ -3,7 +3,6 @@ import { calculateCost } from "../models.js";
|
|
|
3
3
|
import { AssistantMessageEventStream } from "../utils/event-stream.js";
|
|
4
4
|
import { parseStreamingJson } from "../utils/json-parse.js";
|
|
5
5
|
import { sanitizeSurrogates } from "../utils/sanitize-unicode.js";
|
|
6
|
-
import { validateToolArguments } from "../utils/validation.js";
|
|
7
6
|
import { transformMessages } from "./transorm-messages.js";
|
|
8
7
|
/**
|
|
9
8
|
* Generate function for OpenAI Responses API
|
|
@@ -118,7 +117,10 @@ export const streamOpenAIResponses = (model, context, options) => {
|
|
|
118
117
|
else if (event.type === "response.content_part.added") {
|
|
119
118
|
if (currentItem && currentItem.type === "message") {
|
|
120
119
|
currentItem.content = currentItem.content || [];
|
|
121
|
-
|
|
120
|
+
// Filter out ReasoningText, only accept output_text and refusal
|
|
121
|
+
if (event.part.type === "output_text" || event.part.type === "refusal") {
|
|
122
|
+
currentItem.content.push(event.part);
|
|
123
|
+
}
|
|
122
124
|
}
|
|
123
125
|
}
|
|
124
126
|
else if (event.type === "response.output_text.delta") {
|
|
@@ -199,13 +201,6 @@ export const streamOpenAIResponses = (model, context, options) => {
|
|
|
199
201
|
name: item.name,
|
|
200
202
|
arguments: JSON.parse(item.arguments),
|
|
201
203
|
};
|
|
202
|
-
// Validate tool arguments if tool definition is available
|
|
203
|
-
if (context.tools) {
|
|
204
|
-
const tool = context.tools.find((t) => t.name === toolCall.name);
|
|
205
|
-
if (tool) {
|
|
206
|
-
toolCall.arguments = validateToolArguments(tool, toolCall);
|
|
207
|
-
}
|
|
208
|
-
}
|
|
209
204
|
stream.push({ type: "toolcall_end", contentIndex: blockIndex(), toolCall, partial: output });
|
|
210
205
|
}
|
|
211
206
|
}
|