@mariozechner/pi-ai 0.49.3 → 0.50.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +32 -22
- package/dist/api-registry.d.ts +20 -0
- package/dist/api-registry.d.ts.map +1 -0
- package/dist/api-registry.js +44 -0
- package/dist/api-registry.js.map +1 -0
- package/dist/cli.d.ts.map +1 -1
- package/dist/cli.js +22 -67
- package/dist/cli.js.map +1 -1
- package/dist/env-api-keys.d.ts +9 -0
- package/dist/env-api-keys.d.ts.map +1 -0
- package/dist/env-api-keys.js +91 -0
- package/dist/env-api-keys.js.map +1 -0
- package/dist/index.d.ts +4 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +4 -0
- package/dist/index.js.map +1 -1
- package/dist/models.generated.d.ts +583 -105
- package/dist/models.generated.d.ts.map +1 -1
- package/dist/models.generated.js +605 -127
- package/dist/models.generated.js.map +1 -1
- package/dist/providers/amazon-bedrock.d.ts +3 -2
- package/dist/providers/amazon-bedrock.d.ts.map +1 -1
- package/dist/providers/amazon-bedrock.js +52 -5
- package/dist/providers/amazon-bedrock.js.map +1 -1
- package/dist/providers/anthropic.d.ts +3 -2
- package/dist/providers/anthropic.d.ts.map +1 -1
- package/dist/providers/anthropic.js +20 -2
- package/dist/providers/anthropic.js.map +1 -1
- package/dist/providers/azure-openai-responses.d.ts +15 -0
- package/dist/providers/azure-openai-responses.d.ts.map +1 -0
- package/dist/providers/azure-openai-responses.js +184 -0
- package/dist/providers/azure-openai-responses.js.map +1 -0
- package/dist/providers/google-gemini-cli.d.ts +3 -2
- package/dist/providers/google-gemini-cli.d.ts.map +1 -1
- package/dist/providers/google-gemini-cli.js +68 -1
- package/dist/providers/google-gemini-cli.js.map +1 -1
- package/dist/providers/google-vertex.d.ts +3 -2
- package/dist/providers/google-vertex.d.ts.map +1 -1
- package/dist/providers/google-vertex.js +81 -1
- package/dist/providers/google-vertex.js.map +1 -1
- package/dist/providers/google.d.ts +3 -2
- package/dist/providers/google.d.ts.map +1 -1
- package/dist/providers/google.js +84 -3
- package/dist/providers/google.js.map +1 -1
- package/dist/providers/openai-codex-responses.d.ts +3 -2
- package/dist/providers/openai-codex-responses.d.ts.map +1 -1
- package/dist/providers/openai-codex-responses.js +57 -307
- package/dist/providers/openai-codex-responses.js.map +1 -1
- package/dist/providers/openai-completions.d.ts +5 -2
- package/dist/providers/openai-completions.d.ts.map +1 -1
- package/dist/providers/openai-completions.js +78 -41
- package/dist/providers/openai-completions.js.map +1 -1
- package/dist/providers/openai-responses-shared.d.ts +17 -0
- package/dist/providers/openai-responses-shared.d.ts.map +1 -0
- package/dist/providers/openai-responses-shared.js +424 -0
- package/dist/providers/openai-responses-shared.js.map +1 -0
- package/dist/providers/openai-responses.d.ts +3 -2
- package/dist/providers/openai-responses.d.ts.map +1 -1
- package/dist/providers/openai-responses.js +25 -415
- package/dist/providers/openai-responses.js.map +1 -1
- package/dist/providers/register-builtins.d.ts +3 -0
- package/dist/providers/register-builtins.d.ts.map +1 -0
- package/dist/providers/register-builtins.js +63 -0
- package/dist/providers/register-builtins.js.map +1 -0
- package/dist/providers/simple-options.d.ts +8 -0
- package/dist/providers/simple-options.d.ts.map +1 -0
- package/dist/providers/simple-options.js +32 -0
- package/dist/providers/simple-options.js.map +1 -0
- package/dist/stream.d.ts +5 -10
- package/dist/stream.d.ts.map +1 -1
- package/dist/stream.js +17 -420
- package/dist/stream.js.map +1 -1
- package/dist/types.d.ts +18 -22
- package/dist/types.d.ts.map +1 -1
- package/dist/types.js +0 -1
- package/dist/types.js.map +1 -1
- package/dist/utils/event-stream.d.ts +2 -0
- package/dist/utils/event-stream.d.ts.map +1 -1
- package/dist/utils/event-stream.js +4 -0
- package/dist/utils/event-stream.js.map +1 -1
- package/dist/utils/oauth/anthropic.d.ts +2 -1
- package/dist/utils/oauth/anthropic.d.ts.map +1 -1
- package/dist/utils/oauth/anthropic.js +13 -0
- package/dist/utils/oauth/anthropic.js.map +1 -1
- package/dist/utils/oauth/github-copilot.d.ts +2 -1
- package/dist/utils/oauth/github-copilot.d.ts.map +1 -1
- package/dist/utils/oauth/github-copilot.js +25 -0
- package/dist/utils/oauth/github-copilot.js.map +1 -1
- package/dist/utils/oauth/google-antigravity.d.ts +2 -1
- package/dist/utils/oauth/google-antigravity.d.ts.map +1 -1
- package/dist/utils/oauth/google-antigravity.js +19 -0
- package/dist/utils/oauth/google-antigravity.js.map +1 -1
- package/dist/utils/oauth/google-gemini-cli.d.ts +2 -1
- package/dist/utils/oauth/google-gemini-cli.d.ts.map +1 -1
- package/dist/utils/oauth/google-gemini-cli.js +19 -0
- package/dist/utils/oauth/google-gemini-cli.js.map +1 -1
- package/dist/utils/oauth/index.d.ts +26 -16
- package/dist/utils/oauth/index.d.ts.map +1 -1
- package/dist/utils/oauth/index.js +65 -84
- package/dist/utils/oauth/index.js.map +1 -1
- package/dist/utils/oauth/openai-codex.d.ts +2 -1
- package/dist/utils/oauth/openai-codex.d.ts.map +1 -1
- package/dist/utils/oauth/openai-codex.js +20 -1
- package/dist/utils/oauth/openai-codex.js.map +1 -1
- package/dist/utils/oauth/types.d.ts +28 -6
- package/dist/utils/oauth/types.d.ts.map +1 -1
- package/dist/utils/oauth/types.js.map +1 -1
- package/package.json +3 -1
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"openai-responses.d.ts","sourceRoot":"","sources":["../../src/providers/openai-responses.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAEX,6BAA6B,EAQ7B,MAAM,yCAAyC,CAAC;AAGjD,OAAO,KAAK,EAMX,cAAc,EACd,aAAa,EAMb,MAAM,aAAa,CAAC;AAqBrB,MAAM,WAAW,sBAAuB,SAAQ,aAAa;IAC5D,eAAe,CAAC,EAAE,SAAS,GAAG,KAAK,GAAG,QAAQ,GAAG,MAAM,GAAG,OAAO,CAAC;IAClE,gBAAgB,CAAC,EAAE,MAAM,GAAG,UAAU,GAAG,SAAS,GAAG,IAAI,CAAC;IAC1D,WAAW,CAAC,EAAE,6BAA6B,CAAC,cAAc,CAAC,CAAC;CAC5D;AAED;;GAEG;AACH,eAAO,MAAM,qBAAqB,EAAE,cAAc,CAAC,kBAAkB,CAsQpE,CAAC","sourcesContent":["import OpenAI from \"openai\";\nimport type {\n\tTool as OpenAITool,\n\tResponseCreateParamsStreaming,\n\tResponseFunctionToolCall,\n\tResponseInput,\n\tResponseInputContent,\n\tResponseInputImage,\n\tResponseInputText,\n\tResponseOutputMessage,\n\tResponseReasoningItem,\n} from \"openai/resources/responses/responses.js\";\nimport { calculateCost } from \"../models.js\";\nimport { getEnvApiKey } from \"../stream.js\";\nimport type {\n\tApi,\n\tAssistantMessage,\n\tContext,\n\tModel,\n\tStopReason,\n\tStreamFunction,\n\tStreamOptions,\n\tTextContent,\n\tThinkingContent,\n\tTool,\n\tToolCall,\n\tUsage,\n} from \"../types.js\";\nimport { AssistantMessageEventStream } from \"../utils/event-stream.js\";\nimport { parseStreamingJson } from \"../utils/json-parse.js\";\nimport { sanitizeSurrogates } from \"../utils/sanitize-unicode.js\";\nimport { transformMessages } from \"./transform-messages.js\";\n\n/** Fast deterministic hash to shorten long strings */\nfunction shortHash(str: string): string {\n\tlet h1 = 0xdeadbeef;\n\tlet h2 = 0x41c6ce57;\n\tfor (let i = 0; i < str.length; i++) {\n\t\tconst ch = str.charCodeAt(i);\n\t\th1 = Math.imul(h1 ^ ch, 2654435761);\n\t\th2 = Math.imul(h2 ^ ch, 1597334677);\n\t}\n\th1 = Math.imul(h1 ^ (h1 >>> 16), 2246822507) ^ Math.imul(h2 ^ (h2 >>> 13), 3266489909);\n\th2 = Math.imul(h2 ^ (h2 >>> 16), 2246822507) ^ Math.imul(h1 ^ (h1 >>> 13), 3266489909);\n\treturn (h2 >>> 0).toString(36) + (h1 >>> 0).toString(36);\n}\n\n// OpenAI Responses-specific options\nexport interface OpenAIResponsesOptions extends StreamOptions {\n\treasoningEffort?: \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n\treasoningSummary?: \"auto\" | \"detailed\" | \"concise\" | null;\n\tserviceTier?: ResponseCreateParamsStreaming[\"service_tier\"];\n}\n\n/**\n * Generate function for OpenAI Responses API\n */\nexport const streamOpenAIResponses: StreamFunction<\"openai-responses\"> = (\n\tmodel: Model<\"openai-responses\">,\n\tcontext: Context,\n\toptions?: OpenAIResponsesOptions,\n): AssistantMessageEventStream => {\n\tconst stream = new AssistantMessageEventStream();\n\n\t// Start async processing\n\t(async () => {\n\t\tconst output: AssistantMessage = {\n\t\t\trole: \"assistant\",\n\t\t\tcontent: [],\n\t\t\tapi: \"openai-responses\" as Api,\n\t\t\tprovider: model.provider,\n\t\t\tmodel: model.id,\n\t\t\tusage: {\n\t\t\t\tinput: 0,\n\t\t\t\toutput: 0,\n\t\t\t\tcacheRead: 0,\n\t\t\t\tcacheWrite: 0,\n\t\t\t\ttotalTokens: 0,\n\t\t\t\tcost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },\n\t\t\t},\n\t\t\tstopReason: \"stop\",\n\t\t\ttimestamp: Date.now(),\n\t\t};\n\n\t\ttry {\n\t\t\t// Create OpenAI client\n\t\t\tconst apiKey = options?.apiKey || getEnvApiKey(model.provider) || \"\";\n\t\t\tconst client = createClient(model, context, apiKey, options?.headers);\n\t\t\tconst params = buildParams(model, context, options);\n\t\t\toptions?.onPayload?.(params);\n\t\t\tconst openaiStream = await client.responses.create(\n\t\t\t\tparams,\n\t\t\t\toptions?.signal ? { signal: options.signal } : undefined,\n\t\t\t);\n\t\t\tstream.push({ type: \"start\", partial: output });\n\n\t\t\tlet currentItem: ResponseReasoningItem | ResponseOutputMessage | ResponseFunctionToolCall | null = null;\n\t\t\tlet currentBlock: ThinkingContent | TextContent | (ToolCall & { partialJson: string }) | null = null;\n\t\t\tconst blocks = output.content;\n\t\t\tconst blockIndex = () => blocks.length - 1;\n\n\t\t\tfor await (const event of openaiStream) {\n\t\t\t\t// Handle output item start\n\t\t\t\tif (event.type === \"response.output_item.added\") {\n\t\t\t\t\tconst item = event.item;\n\t\t\t\t\tif (item.type === \"reasoning\") {\n\t\t\t\t\t\tcurrentItem = item;\n\t\t\t\t\t\tcurrentBlock = { type: \"thinking\", thinking: \"\" };\n\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\tstream.push({ type: \"thinking_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t} else if (item.type === \"message\") {\n\t\t\t\t\t\tcurrentItem = item;\n\t\t\t\t\t\tcurrentBlock = { type: \"text\", text: \"\" };\n\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\tstream.push({ type: \"text_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t} else if (item.type === \"function_call\") {\n\t\t\t\t\t\tcurrentItem = item;\n\t\t\t\t\t\tcurrentBlock = {\n\t\t\t\t\t\t\ttype: \"toolCall\",\n\t\t\t\t\t\t\tid: `${item.call_id}|${item.id}`,\n\t\t\t\t\t\t\tname: item.name,\n\t\t\t\t\t\t\targuments: {},\n\t\t\t\t\t\t\tpartialJson: item.arguments || \"\",\n\t\t\t\t\t\t};\n\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\tstream.push({ type: \"toolcall_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Handle reasoning summary deltas\n\t\t\t\telse if (event.type === \"response.reasoning_summary_part.added\") {\n\t\t\t\t\tif (currentItem && currentItem.type === \"reasoning\") {\n\t\t\t\t\t\tcurrentItem.summary = currentItem.summary || [];\n\t\t\t\t\t\tcurrentItem.summary.push(event.part);\n\t\t\t\t\t}\n\t\t\t\t} else if (event.type === \"response.reasoning_summary_text.delta\") {\n\t\t\t\t\tif (\n\t\t\t\t\t\tcurrentItem &&\n\t\t\t\t\t\tcurrentItem.type === \"reasoning\" &&\n\t\t\t\t\t\tcurrentBlock &&\n\t\t\t\t\t\tcurrentBlock.type === \"thinking\"\n\t\t\t\t\t) {\n\t\t\t\t\t\tcurrentItem.summary = currentItem.summary || [];\n\t\t\t\t\t\tconst lastPart = currentItem.summary[currentItem.summary.length - 1];\n\t\t\t\t\t\tif (lastPart) {\n\t\t\t\t\t\t\tcurrentBlock.thinking += event.delta;\n\t\t\t\t\t\t\tlastPart.text += event.delta;\n\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\ttype: \"thinking_delta\",\n\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\tdelta: event.delta,\n\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Add a new line between summary parts (hack...)\n\t\t\t\telse if (event.type === \"response.reasoning_summary_part.done\") {\n\t\t\t\t\tif (\n\t\t\t\t\t\tcurrentItem &&\n\t\t\t\t\t\tcurrentItem.type === \"reasoning\" &&\n\t\t\t\t\t\tcurrentBlock &&\n\t\t\t\t\t\tcurrentBlock.type === \"thinking\"\n\t\t\t\t\t) {\n\t\t\t\t\t\tcurrentItem.summary = currentItem.summary || [];\n\t\t\t\t\t\tconst lastPart = currentItem.summary[currentItem.summary.length - 1];\n\t\t\t\t\t\tif (lastPart) {\n\t\t\t\t\t\t\tcurrentBlock.thinking += \"\\n\\n\";\n\t\t\t\t\t\t\tlastPart.text += \"\\n\\n\";\n\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\ttype: \"thinking_delta\",\n\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\tdelta: \"\\n\\n\",\n\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Handle text output deltas\n\t\t\t\telse if (event.type === \"response.content_part.added\") {\n\t\t\t\t\tif (currentItem && currentItem.type === \"message\") {\n\t\t\t\t\t\tcurrentItem.content = currentItem.content || [];\n\t\t\t\t\t\t// Filter out ReasoningText, only accept output_text and refusal\n\t\t\t\t\t\tif (event.part.type === \"output_text\" || event.part.type === \"refusal\") {\n\t\t\t\t\t\t\tcurrentItem.content.push(event.part);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else if (event.type === \"response.output_text.delta\") {\n\t\t\t\t\tif (currentItem && currentItem.type === \"message\" && currentBlock && currentBlock.type === \"text\") {\n\t\t\t\t\t\tconst lastPart = currentItem.content[currentItem.content.length - 1];\n\t\t\t\t\t\tif (lastPart && lastPart.type === \"output_text\") {\n\t\t\t\t\t\t\tcurrentBlock.text += event.delta;\n\t\t\t\t\t\t\tlastPart.text += event.delta;\n\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\ttype: \"text_delta\",\n\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\tdelta: event.delta,\n\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else if (event.type === \"response.refusal.delta\") {\n\t\t\t\t\tif (currentItem && currentItem.type === \"message\" && currentBlock && currentBlock.type === \"text\") {\n\t\t\t\t\t\tconst lastPart = currentItem.content[currentItem.content.length - 1];\n\t\t\t\t\t\tif (lastPart && lastPart.type === \"refusal\") {\n\t\t\t\t\t\t\tcurrentBlock.text += event.delta;\n\t\t\t\t\t\t\tlastPart.refusal += event.delta;\n\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\ttype: \"text_delta\",\n\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\tdelta: event.delta,\n\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Handle function call argument deltas\n\t\t\t\telse if (event.type === \"response.function_call_arguments.delta\") {\n\t\t\t\t\tif (\n\t\t\t\t\t\tcurrentItem &&\n\t\t\t\t\t\tcurrentItem.type === \"function_call\" &&\n\t\t\t\t\t\tcurrentBlock &&\n\t\t\t\t\t\tcurrentBlock.type === \"toolCall\"\n\t\t\t\t\t) {\n\t\t\t\t\t\tcurrentBlock.partialJson += event.delta;\n\t\t\t\t\t\tcurrentBlock.arguments = parseStreamingJson(currentBlock.partialJson);\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"toolcall_delta\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\tdelta: event.delta,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Handle output item completion\n\t\t\t\telse if (event.type === \"response.output_item.done\") {\n\t\t\t\t\tconst item = event.item;\n\n\t\t\t\t\tif (item.type === \"reasoning\" && currentBlock && currentBlock.type === \"thinking\") {\n\t\t\t\t\t\tcurrentBlock.thinking = item.summary?.map((s) => s.text).join(\"\\n\\n\") || \"\";\n\t\t\t\t\t\tcurrentBlock.thinkingSignature = JSON.stringify(item);\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"thinking_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\tcontent: currentBlock.thinking,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t\tcurrentBlock = null;\n\t\t\t\t\t} else if (item.type === \"message\" && currentBlock && currentBlock.type === \"text\") {\n\t\t\t\t\t\tcurrentBlock.text = item.content.map((c) => (c.type === \"output_text\" ? c.text : c.refusal)).join(\"\");\n\t\t\t\t\t\tcurrentBlock.textSignature = item.id;\n\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\ttype: \"text_end\",\n\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\tcontent: currentBlock.text,\n\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t});\n\t\t\t\t\t\tcurrentBlock = null;\n\t\t\t\t\t} else if (item.type === \"function_call\") {\n\t\t\t\t\t\tconst toolCall: ToolCall = {\n\t\t\t\t\t\t\ttype: \"toolCall\",\n\t\t\t\t\t\t\tid: `${item.call_id}|${item.id}`,\n\t\t\t\t\t\t\tname: item.name,\n\t\t\t\t\t\t\targuments: JSON.parse(item.arguments),\n\t\t\t\t\t\t};\n\n\t\t\t\t\t\tstream.push({ type: \"toolcall_end\", contentIndex: blockIndex(), toolCall, partial: output });\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Handle completion\n\t\t\t\telse if (event.type === \"response.completed\") {\n\t\t\t\t\tconst response = event.response;\n\t\t\t\t\tif (response?.usage) {\n\t\t\t\t\t\tconst cachedTokens = response.usage.input_tokens_details?.cached_tokens || 0;\n\t\t\t\t\t\toutput.usage = {\n\t\t\t\t\t\t\t// OpenAI includes cached tokens in input_tokens, so subtract to get non-cached input\n\t\t\t\t\t\t\tinput: (response.usage.input_tokens || 0) - cachedTokens,\n\t\t\t\t\t\t\toutput: response.usage.output_tokens || 0,\n\t\t\t\t\t\t\tcacheRead: cachedTokens,\n\t\t\t\t\t\t\tcacheWrite: 0,\n\t\t\t\t\t\t\ttotalTokens: response.usage.total_tokens || 0,\n\t\t\t\t\t\t\tcost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },\n\t\t\t\t\t\t};\n\t\t\t\t\t}\n\t\t\t\t\tcalculateCost(model, output.usage);\n\t\t\t\t\tapplyServiceTierPricing(output.usage, response?.service_tier ?? options?.serviceTier);\n\t\t\t\t\t// Map status to stop reason\n\t\t\t\t\toutput.stopReason = mapStopReason(response?.status);\n\t\t\t\t\tif (output.content.some((b) => b.type === \"toolCall\") && output.stopReason === \"stop\") {\n\t\t\t\t\t\toutput.stopReason = \"toolUse\";\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Handle errors\n\t\t\t\telse if (event.type === \"error\") {\n\t\t\t\t\tthrow new Error(`Error Code ${event.code}: ${event.message}` || \"Unknown error\");\n\t\t\t\t} else if (event.type === \"response.failed\") {\n\t\t\t\t\tthrow new Error(\"Unknown error\");\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (options?.signal?.aborted) {\n\t\t\t\tthrow new Error(\"Request was aborted\");\n\t\t\t}\n\n\t\t\tif (output.stopReason === \"aborted\" || output.stopReason === \"error\") {\n\t\t\t\tthrow new Error(\"An unkown error ocurred\");\n\t\t\t}\n\n\t\t\tstream.push({ type: \"done\", reason: output.stopReason, message: output });\n\t\t\tstream.end();\n\t\t} catch (error) {\n\t\t\tfor (const block of output.content) delete (block as any).index;\n\t\t\toutput.stopReason = options?.signal?.aborted ? \"aborted\" : \"error\";\n\t\t\toutput.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);\n\t\t\tstream.push({ type: \"error\", reason: output.stopReason, error: output });\n\t\t\tstream.end();\n\t\t}\n\t})();\n\n\treturn stream;\n};\n\nfunction createClient(\n\tmodel: Model<\"openai-responses\">,\n\tcontext: Context,\n\tapiKey?: string,\n\toptionsHeaders?: Record<string, string>,\n) {\n\tif (!apiKey) {\n\t\tif (!process.env.OPENAI_API_KEY) {\n\t\t\tthrow new Error(\n\t\t\t\t\"OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.\",\n\t\t\t);\n\t\t}\n\t\tapiKey = process.env.OPENAI_API_KEY;\n\t}\n\n\tconst headers = { ...model.headers };\n\tif (model.provider === \"github-copilot\") {\n\t\t// Copilot expects X-Initiator to indicate whether the request is user-initiated\n\t\t// or agent-initiated (e.g. follow-up after assistant/tool messages). If there is\n\t\t// no prior message, default to user-initiated.\n\t\tconst messages = context.messages || [];\n\t\tconst lastMessage = messages[messages.length - 1];\n\t\tconst isAgentCall = lastMessage ? lastMessage.role !== \"user\" : false;\n\t\theaders[\"X-Initiator\"] = isAgentCall ? \"agent\" : \"user\";\n\t\theaders[\"Openai-Intent\"] = \"conversation-edits\";\n\n\t\t// Copilot requires this header when sending images\n\t\tconst hasImages = messages.some((msg) => {\n\t\t\tif (msg.role === \"user\" && Array.isArray(msg.content)) {\n\t\t\t\treturn msg.content.some((c) => c.type === \"image\");\n\t\t\t}\n\t\t\tif (msg.role === \"toolResult\" && Array.isArray(msg.content)) {\n\t\t\t\treturn msg.content.some((c) => c.type === \"image\");\n\t\t\t}\n\t\t\treturn false;\n\t\t});\n\t\tif (hasImages) {\n\t\t\theaders[\"Copilot-Vision-Request\"] = \"true\";\n\t\t}\n\t}\n\n\t// Merge options headers last so they can override defaults\n\tif (optionsHeaders) {\n\t\tObject.assign(headers, optionsHeaders);\n\t}\n\n\treturn new OpenAI({\n\t\tapiKey,\n\t\tbaseURL: model.baseUrl,\n\t\tdangerouslyAllowBrowser: true,\n\t\tdefaultHeaders: headers,\n\t});\n}\n\nfunction buildParams(model: Model<\"openai-responses\">, context: Context, options?: OpenAIResponsesOptions) {\n\tconst messages = convertMessages(model, context);\n\n\tconst params: ResponseCreateParamsStreaming = {\n\t\tmodel: model.id,\n\t\tinput: messages,\n\t\tstream: true,\n\t\tprompt_cache_key: options?.sessionId,\n\t};\n\n\tif (options?.maxTokens) {\n\t\tparams.max_output_tokens = options?.maxTokens;\n\t}\n\n\tif (options?.temperature !== undefined) {\n\t\tparams.temperature = options?.temperature;\n\t}\n\n\tif (options?.serviceTier !== undefined) {\n\t\tparams.service_tier = options.serviceTier;\n\t}\n\n\tif (context.tools) {\n\t\tparams.tools = convertTools(context.tools);\n\t}\n\n\tif (model.reasoning) {\n\t\tif (options?.reasoningEffort || options?.reasoningSummary) {\n\t\t\tparams.reasoning = {\n\t\t\t\teffort: options?.reasoningEffort || \"medium\",\n\t\t\t\tsummary: options?.reasoningSummary || \"auto\",\n\t\t\t};\n\t\t\tparams.include = [\"reasoning.encrypted_content\"];\n\t\t} else {\n\t\t\tif (model.name.startsWith(\"gpt-5\")) {\n\t\t\t\t// Jesus Christ, see https://community.openai.com/t/need-reasoning-false-option-for-gpt-5/1351588/7\n\t\t\t\tmessages.push({\n\t\t\t\t\trole: \"developer\",\n\t\t\t\t\tcontent: [\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\ttype: \"input_text\",\n\t\t\t\t\t\t\ttext: \"# Juice: 0 !important\",\n\t\t\t\t\t\t},\n\t\t\t\t\t],\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t}\n\n\treturn params;\n}\n\nfunction convertMessages(model: Model<\"openai-responses\">, context: Context): ResponseInput {\n\tconst messages: ResponseInput = [];\n\n\tconst normalizeToolCallId = (id: string): string => {\n\t\tconst allowedProviders = new Set([\"openai\", \"openai-codex\", \"opencode\"]);\n\t\tif (!allowedProviders.has(model.provider)) return id;\n\t\tif (!id.includes(\"|\")) return id;\n\t\tconst [callId, itemId] = id.split(\"|\");\n\t\tconst sanitizedCallId = callId.replace(/[^a-zA-Z0-9_-]/g, \"_\");\n\t\tlet sanitizedItemId = itemId.replace(/[^a-zA-Z0-9_-]/g, \"_\");\n\t\t// OpenAI Responses API requires item id to start with \"fc\"\n\t\tif (!sanitizedItemId.startsWith(\"fc\")) {\n\t\t\tsanitizedItemId = `fc_${sanitizedItemId}`;\n\t\t}\n\t\tconst normalizedCallId = sanitizedCallId.length > 64 ? sanitizedCallId.slice(0, 64) : sanitizedCallId;\n\t\tconst normalizedItemId = sanitizedItemId.length > 64 ? sanitizedItemId.slice(0, 64) : sanitizedItemId;\n\t\treturn `${normalizedCallId}|${normalizedItemId}`;\n\t};\n\n\tconst transformedMessages = transformMessages(context.messages, model, normalizeToolCallId);\n\n\tif (context.systemPrompt) {\n\t\tconst role = model.reasoning ? \"developer\" : \"system\";\n\t\tmessages.push({\n\t\t\trole,\n\t\t\tcontent: sanitizeSurrogates(context.systemPrompt),\n\t\t});\n\t}\n\n\tlet msgIndex = 0;\n\tfor (const msg of transformedMessages) {\n\t\tif (msg.role === \"user\") {\n\t\t\tif (typeof msg.content === \"string\") {\n\t\t\t\tmessages.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: [{ type: \"input_text\", text: sanitizeSurrogates(msg.content) }],\n\t\t\t\t});\n\t\t\t} else {\n\t\t\t\tconst content: ResponseInputContent[] = msg.content.map((item): ResponseInputContent => {\n\t\t\t\t\tif (item.type === \"text\") {\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\ttype: \"input_text\",\n\t\t\t\t\t\t\ttext: sanitizeSurrogates(item.text),\n\t\t\t\t\t\t} satisfies ResponseInputText;\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\ttype: \"input_image\",\n\t\t\t\t\t\t\tdetail: \"auto\",\n\t\t\t\t\t\t\timage_url: `data:${item.mimeType};base64,${item.data}`,\n\t\t\t\t\t\t} satisfies ResponseInputImage;\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t\tconst filteredContent = !model.input.includes(\"image\")\n\t\t\t\t\t? content.filter((c) => c.type !== \"input_image\")\n\t\t\t\t\t: content;\n\t\t\t\tif (filteredContent.length === 0) continue;\n\t\t\t\tmessages.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: filteredContent,\n\t\t\t\t});\n\t\t\t}\n\t\t} else if (msg.role === \"assistant\") {\n\t\t\tconst output: ResponseInput = [];\n\t\t\tconst assistantMsg = msg as AssistantMessage;\n\n\t\t\t// Check if this message is from a different model (same provider, different model ID).\n\t\t\t// For such messages, tool call IDs with fc_ prefix need to be stripped to avoid\n\t\t\t// OpenAI's reasoning/function_call pairing validation errors.\n\t\t\tconst isDifferentModel =\n\t\t\t\tassistantMsg.model !== model.id &&\n\t\t\t\tassistantMsg.provider === model.provider &&\n\t\t\t\tassistantMsg.api === model.api;\n\n\t\t\tfor (const block of msg.content) {\n\t\t\t\tif (block.type === \"thinking\") {\n\t\t\t\t\tif (block.thinkingSignature) {\n\t\t\t\t\t\tconst reasoningItem = JSON.parse(block.thinkingSignature);\n\t\t\t\t\t\toutput.push(reasoningItem);\n\t\t\t\t\t}\n\t\t\t\t} else if (block.type === \"text\") {\n\t\t\t\t\tconst textBlock = block as TextContent;\n\t\t\t\t\t// OpenAI requires id to be max 64 characters\n\t\t\t\t\tlet msgId = textBlock.textSignature;\n\t\t\t\t\tif (!msgId) {\n\t\t\t\t\t\tmsgId = `msg_${msgIndex}`;\n\t\t\t\t\t} else if (msgId.length > 64) {\n\t\t\t\t\t\tmsgId = `msg_${shortHash(msgId)}`;\n\t\t\t\t\t}\n\t\t\t\t\toutput.push({\n\t\t\t\t\t\ttype: \"message\",\n\t\t\t\t\t\trole: \"assistant\",\n\t\t\t\t\t\tcontent: [{ type: \"output_text\", text: sanitizeSurrogates(textBlock.text), annotations: [] }],\n\t\t\t\t\t\tstatus: \"completed\",\n\t\t\t\t\t\tid: msgId,\n\t\t\t\t\t} satisfies ResponseOutputMessage);\n\t\t\t\t} else if (block.type === \"toolCall\") {\n\t\t\t\t\tconst toolCall = block as ToolCall;\n\t\t\t\t\tconst callId = toolCall.id.split(\"|\")[0];\n\t\t\t\t\tlet itemId: string | undefined = toolCall.id.split(\"|\")[1];\n\n\t\t\t\t\t// For different-model messages, set id to undefined to avoid pairing validation.\n\t\t\t\t\t// OpenAI tracks which fc_xxx IDs were paired with rs_xxx reasoning items.\n\t\t\t\t\t// By omitting the id, we avoid triggering that validation (like cross-provider does).\n\t\t\t\t\tif (isDifferentModel && itemId?.startsWith(\"fc_\")) {\n\t\t\t\t\t\titemId = undefined;\n\t\t\t\t\t}\n\n\t\t\t\t\toutput.push({\n\t\t\t\t\t\ttype: \"function_call\",\n\t\t\t\t\t\tid: itemId,\n\t\t\t\t\t\tcall_id: callId,\n\t\t\t\t\t\tname: toolCall.name,\n\t\t\t\t\t\targuments: JSON.stringify(toolCall.arguments),\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (output.length === 0) continue;\n\t\t\tmessages.push(...output);\n\t\t} else if (msg.role === \"toolResult\") {\n\t\t\t// Extract text and image content\n\t\t\tconst textResult = msg.content\n\t\t\t\t.filter((c) => c.type === \"text\")\n\t\t\t\t.map((c) => (c as any).text)\n\t\t\t\t.join(\"\\n\");\n\t\t\tconst hasImages = msg.content.some((c) => c.type === \"image\");\n\n\t\t\t// Always send function_call_output with text (or placeholder if only images)\n\t\t\tconst hasText = textResult.length > 0;\n\t\t\tmessages.push({\n\t\t\t\ttype: \"function_call_output\",\n\t\t\t\tcall_id: msg.toolCallId.split(\"|\")[0],\n\t\t\t\toutput: sanitizeSurrogates(hasText ? textResult : \"(see attached image)\"),\n\t\t\t});\n\n\t\t\t// If there are images and model supports them, send a follow-up user message with images\n\t\t\tif (hasImages && model.input.includes(\"image\")) {\n\t\t\t\tconst contentParts: ResponseInputContent[] = [];\n\n\t\t\t\t// Add text prefix\n\t\t\t\tcontentParts.push({\n\t\t\t\t\ttype: \"input_text\",\n\t\t\t\t\ttext: \"Attached image(s) from tool result:\",\n\t\t\t\t} satisfies ResponseInputText);\n\n\t\t\t\t// Add images\n\t\t\t\tfor (const block of msg.content) {\n\t\t\t\t\tif (block.type === \"image\") {\n\t\t\t\t\t\tcontentParts.push({\n\t\t\t\t\t\t\ttype: \"input_image\",\n\t\t\t\t\t\t\tdetail: \"auto\",\n\t\t\t\t\t\t\timage_url: `data:${(block as any).mimeType};base64,${(block as any).data}`,\n\t\t\t\t\t\t} satisfies ResponseInputImage);\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tmessages.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent: contentParts,\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t\tmsgIndex++;\n\t}\n\n\treturn messages;\n}\n\nfunction convertTools(tools: Tool[]): OpenAITool[] {\n\treturn tools.map((tool) => ({\n\t\ttype: \"function\",\n\t\tname: tool.name,\n\t\tdescription: tool.description,\n\t\tparameters: tool.parameters as any, // TypeBox already generates JSON Schema\n\t\tstrict: false,\n\t}));\n}\n\nfunction getServiceTierCostMultiplier(serviceTier: ResponseCreateParamsStreaming[\"service_tier\"] | undefined): number {\n\tswitch (serviceTier) {\n\t\tcase \"flex\":\n\t\t\treturn 0.5;\n\t\tcase \"priority\":\n\t\t\treturn 2;\n\t\tdefault:\n\t\t\treturn 1;\n\t}\n}\n\nfunction applyServiceTierPricing(usage: Usage, serviceTier: ResponseCreateParamsStreaming[\"service_tier\"] | undefined) {\n\tconst multiplier = getServiceTierCostMultiplier(serviceTier);\n\tif (multiplier === 1) return;\n\n\tusage.cost.input *= multiplier;\n\tusage.cost.output *= multiplier;\n\tusage.cost.cacheRead *= multiplier;\n\tusage.cost.cacheWrite *= multiplier;\n\tusage.cost.total = usage.cost.input + usage.cost.output + usage.cost.cacheRead + usage.cost.cacheWrite;\n}\n\nfunction mapStopReason(status: OpenAI.Responses.ResponseStatus | undefined): StopReason {\n\tif (!status) return \"stop\";\n\tswitch (status) {\n\t\tcase \"completed\":\n\t\t\treturn \"stop\";\n\t\tcase \"incomplete\":\n\t\t\treturn \"length\";\n\t\tcase \"failed\":\n\t\tcase \"cancelled\":\n\t\t\treturn \"error\";\n\t\t// These two are wonky ...\n\t\tcase \"in_progress\":\n\t\tcase \"queued\":\n\t\t\treturn \"stop\";\n\t\tdefault: {\n\t\t\tconst _exhaustive: never = status;\n\t\t\tthrow new Error(`Unhandled stop reason: ${_exhaustive}`);\n\t\t}\n\t}\n}\n"]}
|
|
1
|
+
{"version":3,"file":"openai-responses.d.ts","sourceRoot":"","sources":["../../src/providers/openai-responses.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,6BAA6B,EAAE,MAAM,yCAAyC,CAAC;AAG7F,OAAO,KAAK,EAKX,mBAAmB,EACnB,cAAc,EACd,aAAa,EAEb,MAAM,aAAa,CAAC;AAQrB,MAAM,WAAW,sBAAuB,SAAQ,aAAa;IAC5D,eAAe,CAAC,EAAE,SAAS,GAAG,KAAK,GAAG,QAAQ,GAAG,MAAM,GAAG,OAAO,CAAC;IAClE,gBAAgB,CAAC,EAAE,MAAM,GAAG,UAAU,GAAG,SAAS,GAAG,IAAI,CAAC;IAC1D,WAAW,CAAC,EAAE,6BAA6B,CAAC,cAAc,CAAC,CAAC;CAC5D;AAED;;GAEG;AACH,eAAO,MAAM,qBAAqB,EAAE,cAAc,CAAC,kBAAkB,EAAE,sBAAsB,CAgE5F,CAAC;AAEF,eAAO,MAAM,2BAA2B,EAAE,cAAc,CAAC,kBAAkB,EAAE,mBAAmB,CAiB/F,CAAC","sourcesContent":["import OpenAI from \"openai\";\nimport type { ResponseCreateParamsStreaming } from \"openai/resources/responses/responses.js\";\nimport { getEnvApiKey } from \"../env-api-keys.js\";\nimport { supportsXhigh } from \"../models.js\";\nimport type {\n\tApi,\n\tAssistantMessage,\n\tContext,\n\tModel,\n\tSimpleStreamOptions,\n\tStreamFunction,\n\tStreamOptions,\n\tUsage,\n} from \"../types.js\";\nimport { AssistantMessageEventStream } from \"../utils/event-stream.js\";\nimport { convertResponsesMessages, convertResponsesTools, processResponsesStream } from \"./openai-responses-shared.js\";\nimport { buildBaseOptions, clampReasoning } from \"./simple-options.js\";\n\nconst OPENAI_TOOL_CALL_PROVIDERS = new Set([\"openai\", \"openai-codex\", \"opencode\"]);\n\n// OpenAI Responses-specific options\nexport interface OpenAIResponsesOptions extends StreamOptions {\n\treasoningEffort?: \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n\treasoningSummary?: \"auto\" | \"detailed\" | \"concise\" | null;\n\tserviceTier?: ResponseCreateParamsStreaming[\"service_tier\"];\n}\n\n/**\n * Generate function for OpenAI Responses API\n */\nexport const streamOpenAIResponses: StreamFunction<\"openai-responses\", OpenAIResponsesOptions> = (\n\tmodel: Model<\"openai-responses\">,\n\tcontext: Context,\n\toptions?: OpenAIResponsesOptions,\n): AssistantMessageEventStream => {\n\tconst stream = new AssistantMessageEventStream();\n\n\t// Start async processing\n\t(async () => {\n\t\tconst output: AssistantMessage = {\n\t\t\trole: \"assistant\",\n\t\t\tcontent: [],\n\t\t\tapi: model.api as Api,\n\t\t\tprovider: model.provider,\n\t\t\tmodel: model.id,\n\t\t\tusage: {\n\t\t\t\tinput: 0,\n\t\t\t\toutput: 0,\n\t\t\t\tcacheRead: 0,\n\t\t\t\tcacheWrite: 0,\n\t\t\t\ttotalTokens: 0,\n\t\t\t\tcost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },\n\t\t\t},\n\t\t\tstopReason: \"stop\",\n\t\t\ttimestamp: Date.now(),\n\t\t};\n\n\t\ttry {\n\t\t\t// Create OpenAI client\n\t\t\tconst apiKey = options?.apiKey || getEnvApiKey(model.provider) || \"\";\n\t\t\tconst client = createClient(model, context, apiKey, options?.headers);\n\t\t\tconst params = buildParams(model, context, options);\n\t\t\toptions?.onPayload?.(params);\n\t\t\tconst openaiStream = await client.responses.create(\n\t\t\t\tparams,\n\t\t\t\toptions?.signal ? { signal: options.signal } : undefined,\n\t\t\t);\n\t\t\tstream.push({ type: \"start\", partial: output });\n\n\t\t\tawait processResponsesStream(openaiStream, output, stream, model, {\n\t\t\t\tserviceTier: options?.serviceTier,\n\t\t\t\tapplyServiceTierPricing,\n\t\t\t});\n\n\t\t\tif (options?.signal?.aborted) {\n\t\t\t\tthrow new Error(\"Request was aborted\");\n\t\t\t}\n\n\t\t\tif (output.stopReason === \"aborted\" || output.stopReason === \"error\") {\n\t\t\t\tthrow new Error(\"An unknown error occurred\");\n\t\t\t}\n\n\t\t\tstream.push({ type: \"done\", reason: output.stopReason, message: output });\n\t\t\tstream.end();\n\t\t} catch (error) {\n\t\t\tfor (const block of output.content) delete (block as { index?: number }).index;\n\t\t\toutput.stopReason = options?.signal?.aborted ? \"aborted\" : \"error\";\n\t\t\toutput.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);\n\t\t\tstream.push({ type: \"error\", reason: output.stopReason, error: output });\n\t\t\tstream.end();\n\t\t}\n\t})();\n\n\treturn stream;\n};\n\nexport const streamSimpleOpenAIResponses: StreamFunction<\"openai-responses\", SimpleStreamOptions> = (\n\tmodel: Model<\"openai-responses\">,\n\tcontext: Context,\n\toptions?: SimpleStreamOptions,\n): AssistantMessageEventStream => {\n\tconst apiKey = options?.apiKey || getEnvApiKey(model.provider);\n\tif (!apiKey) {\n\t\tthrow new Error(`No API key for provider: ${model.provider}`);\n\t}\n\n\tconst base = buildBaseOptions(model, options, apiKey);\n\tconst reasoningEffort = supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning);\n\n\treturn streamOpenAIResponses(model, context, {\n\t\t...base,\n\t\treasoningEffort,\n\t} satisfies OpenAIResponsesOptions);\n};\n\nfunction createClient(\n\tmodel: Model<\"openai-responses\">,\n\tcontext: Context,\n\tapiKey?: string,\n\toptionsHeaders?: Record<string, string>,\n) {\n\tif (!apiKey) {\n\t\tif (!process.env.OPENAI_API_KEY) {\n\t\t\tthrow new Error(\n\t\t\t\t\"OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.\",\n\t\t\t);\n\t\t}\n\t\tapiKey = process.env.OPENAI_API_KEY;\n\t}\n\n\tconst headers = { ...model.headers };\n\tif (model.provider === \"github-copilot\") {\n\t\t// Copilot expects X-Initiator to indicate whether the request is user-initiated\n\t\t// or agent-initiated (e.g. follow-up after assistant/tool messages). If there is\n\t\t// no prior message, default to user-initiated.\n\t\tconst messages = context.messages || [];\n\t\tconst lastMessage = messages[messages.length - 1];\n\t\tconst isAgentCall = lastMessage ? lastMessage.role !== \"user\" : false;\n\t\theaders[\"X-Initiator\"] = isAgentCall ? \"agent\" : \"user\";\n\t\theaders[\"Openai-Intent\"] = \"conversation-edits\";\n\n\t\t// Copilot requires this header when sending images\n\t\tconst hasImages = messages.some((msg) => {\n\t\t\tif (msg.role === \"user\" && Array.isArray(msg.content)) {\n\t\t\t\treturn msg.content.some((c) => c.type === \"image\");\n\t\t\t}\n\t\t\tif (msg.role === \"toolResult\" && Array.isArray(msg.content)) {\n\t\t\t\treturn msg.content.some((c) => c.type === \"image\");\n\t\t\t}\n\t\t\treturn false;\n\t\t});\n\t\tif (hasImages) {\n\t\t\theaders[\"Copilot-Vision-Request\"] = \"true\";\n\t\t}\n\t}\n\n\t// Merge options headers last so they can override defaults\n\tif (optionsHeaders) {\n\t\tObject.assign(headers, optionsHeaders);\n\t}\n\n\treturn new OpenAI({\n\t\tapiKey,\n\t\tbaseURL: model.baseUrl,\n\t\tdangerouslyAllowBrowser: true,\n\t\tdefaultHeaders: headers,\n\t});\n}\n\nfunction buildParams(model: Model<\"openai-responses\">, context: Context, options?: OpenAIResponsesOptions) {\n\tconst messages = convertResponsesMessages(model, context, OPENAI_TOOL_CALL_PROVIDERS);\n\n\tconst params: ResponseCreateParamsStreaming = {\n\t\tmodel: model.id,\n\t\tinput: messages,\n\t\tstream: true,\n\t\tprompt_cache_key: options?.sessionId,\n\t};\n\n\tif (options?.maxTokens) {\n\t\tparams.max_output_tokens = options?.maxTokens;\n\t}\n\n\tif (options?.temperature !== undefined) {\n\t\tparams.temperature = options?.temperature;\n\t}\n\n\tif (options?.serviceTier !== undefined) {\n\t\tparams.service_tier = options.serviceTier;\n\t}\n\n\tif (context.tools) {\n\t\tparams.tools = convertResponsesTools(context.tools);\n\t}\n\n\tif (model.reasoning) {\n\t\tif (options?.reasoningEffort || options?.reasoningSummary) {\n\t\t\tparams.reasoning = {\n\t\t\t\teffort: options?.reasoningEffort || \"medium\",\n\t\t\t\tsummary: options?.reasoningSummary || \"auto\",\n\t\t\t};\n\t\t\tparams.include = [\"reasoning.encrypted_content\"];\n\t\t} else {\n\t\t\tif (model.name.startsWith(\"gpt-5\")) {\n\t\t\t\t// Jesus Christ, see https://community.openai.com/t/need-reasoning-false-option-for-gpt-5/1351588/7\n\t\t\t\tmessages.push({\n\t\t\t\t\trole: \"developer\",\n\t\t\t\t\tcontent: [\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\ttype: \"input_text\",\n\t\t\t\t\t\t\ttext: \"# Juice: 0 !important\",\n\t\t\t\t\t\t},\n\t\t\t\t\t],\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t}\n\n\treturn params;\n}\n\nfunction getServiceTierCostMultiplier(serviceTier: ResponseCreateParamsStreaming[\"service_tier\"] | undefined): number {\n\tswitch (serviceTier) {\n\t\tcase \"flex\":\n\t\t\treturn 0.5;\n\t\tcase \"priority\":\n\t\t\treturn 2;\n\t\tdefault:\n\t\t\treturn 1;\n\t}\n}\n\nfunction applyServiceTierPricing(usage: Usage, serviceTier: ResponseCreateParamsStreaming[\"service_tier\"] | undefined) {\n\tconst multiplier = getServiceTierCostMultiplier(serviceTier);\n\tif (multiplier === 1) return;\n\n\tusage.cost.input *= multiplier;\n\tusage.cost.output *= multiplier;\n\tusage.cost.cacheRead *= multiplier;\n\tusage.cost.cacheWrite *= multiplier;\n\tusage.cost.total = usage.cost.input + usage.cost.output + usage.cost.cacheRead + usage.cost.cacheWrite;\n}\n"]}
|
|
@@ -1,23 +1,10 @@
|
|
|
1
1
|
import OpenAI from "openai";
|
|
2
|
-
import {
|
|
3
|
-
import {
|
|
2
|
+
import { getEnvApiKey } from "../env-api-keys.js";
|
|
3
|
+
import { supportsXhigh } from "../models.js";
|
|
4
4
|
import { AssistantMessageEventStream } from "../utils/event-stream.js";
|
|
5
|
-
import {
|
|
6
|
-
import {
|
|
7
|
-
|
|
8
|
-
/** Fast deterministic hash to shorten long strings */
|
|
9
|
-
function shortHash(str) {
|
|
10
|
-
let h1 = 0xdeadbeef;
|
|
11
|
-
let h2 = 0x41c6ce57;
|
|
12
|
-
for (let i = 0; i < str.length; i++) {
|
|
13
|
-
const ch = str.charCodeAt(i);
|
|
14
|
-
h1 = Math.imul(h1 ^ ch, 2654435761);
|
|
15
|
-
h2 = Math.imul(h2 ^ ch, 1597334677);
|
|
16
|
-
}
|
|
17
|
-
h1 = Math.imul(h1 ^ (h1 >>> 16), 2246822507) ^ Math.imul(h2 ^ (h2 >>> 13), 3266489909);
|
|
18
|
-
h2 = Math.imul(h2 ^ (h2 >>> 16), 2246822507) ^ Math.imul(h1 ^ (h1 >>> 13), 3266489909);
|
|
19
|
-
return (h2 >>> 0).toString(36) + (h1 >>> 0).toString(36);
|
|
20
|
-
}
|
|
5
|
+
import { convertResponsesMessages, convertResponsesTools, processResponsesStream } from "./openai-responses-shared.js";
|
|
6
|
+
import { buildBaseOptions, clampReasoning } from "./simple-options.js";
|
|
7
|
+
const OPENAI_TOOL_CALL_PROVIDERS = new Set(["openai", "openai-codex", "opencode"]);
|
|
21
8
|
/**
|
|
22
9
|
* Generate function for OpenAI Responses API
|
|
23
10
|
*/
|
|
@@ -28,7 +15,7 @@ export const streamOpenAIResponses = (model, context, options) => {
|
|
|
28
15
|
const output = {
|
|
29
16
|
role: "assistant",
|
|
30
17
|
content: [],
|
|
31
|
-
api:
|
|
18
|
+
api: model.api,
|
|
32
19
|
provider: model.provider,
|
|
33
20
|
model: model.id,
|
|
34
21
|
usage: {
|
|
@@ -50,212 +37,15 @@ export const streamOpenAIResponses = (model, context, options) => {
|
|
|
50
37
|
options?.onPayload?.(params);
|
|
51
38
|
const openaiStream = await client.responses.create(params, options?.signal ? { signal: options.signal } : undefined);
|
|
52
39
|
stream.push({ type: "start", partial: output });
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
for await (const event of openaiStream) {
|
|
58
|
-
// Handle output item start
|
|
59
|
-
if (event.type === "response.output_item.added") {
|
|
60
|
-
const item = event.item;
|
|
61
|
-
if (item.type === "reasoning") {
|
|
62
|
-
currentItem = item;
|
|
63
|
-
currentBlock = { type: "thinking", thinking: "" };
|
|
64
|
-
output.content.push(currentBlock);
|
|
65
|
-
stream.push({ type: "thinking_start", contentIndex: blockIndex(), partial: output });
|
|
66
|
-
}
|
|
67
|
-
else if (item.type === "message") {
|
|
68
|
-
currentItem = item;
|
|
69
|
-
currentBlock = { type: "text", text: "" };
|
|
70
|
-
output.content.push(currentBlock);
|
|
71
|
-
stream.push({ type: "text_start", contentIndex: blockIndex(), partial: output });
|
|
72
|
-
}
|
|
73
|
-
else if (item.type === "function_call") {
|
|
74
|
-
currentItem = item;
|
|
75
|
-
currentBlock = {
|
|
76
|
-
type: "toolCall",
|
|
77
|
-
id: `${item.call_id}|${item.id}`,
|
|
78
|
-
name: item.name,
|
|
79
|
-
arguments: {},
|
|
80
|
-
partialJson: item.arguments || "",
|
|
81
|
-
};
|
|
82
|
-
output.content.push(currentBlock);
|
|
83
|
-
stream.push({ type: "toolcall_start", contentIndex: blockIndex(), partial: output });
|
|
84
|
-
}
|
|
85
|
-
}
|
|
86
|
-
// Handle reasoning summary deltas
|
|
87
|
-
else if (event.type === "response.reasoning_summary_part.added") {
|
|
88
|
-
if (currentItem && currentItem.type === "reasoning") {
|
|
89
|
-
currentItem.summary = currentItem.summary || [];
|
|
90
|
-
currentItem.summary.push(event.part);
|
|
91
|
-
}
|
|
92
|
-
}
|
|
93
|
-
else if (event.type === "response.reasoning_summary_text.delta") {
|
|
94
|
-
if (currentItem &&
|
|
95
|
-
currentItem.type === "reasoning" &&
|
|
96
|
-
currentBlock &&
|
|
97
|
-
currentBlock.type === "thinking") {
|
|
98
|
-
currentItem.summary = currentItem.summary || [];
|
|
99
|
-
const lastPart = currentItem.summary[currentItem.summary.length - 1];
|
|
100
|
-
if (lastPart) {
|
|
101
|
-
currentBlock.thinking += event.delta;
|
|
102
|
-
lastPart.text += event.delta;
|
|
103
|
-
stream.push({
|
|
104
|
-
type: "thinking_delta",
|
|
105
|
-
contentIndex: blockIndex(),
|
|
106
|
-
delta: event.delta,
|
|
107
|
-
partial: output,
|
|
108
|
-
});
|
|
109
|
-
}
|
|
110
|
-
}
|
|
111
|
-
}
|
|
112
|
-
// Add a new line between summary parts (hack...)
|
|
113
|
-
else if (event.type === "response.reasoning_summary_part.done") {
|
|
114
|
-
if (currentItem &&
|
|
115
|
-
currentItem.type === "reasoning" &&
|
|
116
|
-
currentBlock &&
|
|
117
|
-
currentBlock.type === "thinking") {
|
|
118
|
-
currentItem.summary = currentItem.summary || [];
|
|
119
|
-
const lastPart = currentItem.summary[currentItem.summary.length - 1];
|
|
120
|
-
if (lastPart) {
|
|
121
|
-
currentBlock.thinking += "\n\n";
|
|
122
|
-
lastPart.text += "\n\n";
|
|
123
|
-
stream.push({
|
|
124
|
-
type: "thinking_delta",
|
|
125
|
-
contentIndex: blockIndex(),
|
|
126
|
-
delta: "\n\n",
|
|
127
|
-
partial: output,
|
|
128
|
-
});
|
|
129
|
-
}
|
|
130
|
-
}
|
|
131
|
-
}
|
|
132
|
-
// Handle text output deltas
|
|
133
|
-
else if (event.type === "response.content_part.added") {
|
|
134
|
-
if (currentItem && currentItem.type === "message") {
|
|
135
|
-
currentItem.content = currentItem.content || [];
|
|
136
|
-
// Filter out ReasoningText, only accept output_text and refusal
|
|
137
|
-
if (event.part.type === "output_text" || event.part.type === "refusal") {
|
|
138
|
-
currentItem.content.push(event.part);
|
|
139
|
-
}
|
|
140
|
-
}
|
|
141
|
-
}
|
|
142
|
-
else if (event.type === "response.output_text.delta") {
|
|
143
|
-
if (currentItem && currentItem.type === "message" && currentBlock && currentBlock.type === "text") {
|
|
144
|
-
const lastPart = currentItem.content[currentItem.content.length - 1];
|
|
145
|
-
if (lastPart && lastPart.type === "output_text") {
|
|
146
|
-
currentBlock.text += event.delta;
|
|
147
|
-
lastPart.text += event.delta;
|
|
148
|
-
stream.push({
|
|
149
|
-
type: "text_delta",
|
|
150
|
-
contentIndex: blockIndex(),
|
|
151
|
-
delta: event.delta,
|
|
152
|
-
partial: output,
|
|
153
|
-
});
|
|
154
|
-
}
|
|
155
|
-
}
|
|
156
|
-
}
|
|
157
|
-
else if (event.type === "response.refusal.delta") {
|
|
158
|
-
if (currentItem && currentItem.type === "message" && currentBlock && currentBlock.type === "text") {
|
|
159
|
-
const lastPart = currentItem.content[currentItem.content.length - 1];
|
|
160
|
-
if (lastPart && lastPart.type === "refusal") {
|
|
161
|
-
currentBlock.text += event.delta;
|
|
162
|
-
lastPart.refusal += event.delta;
|
|
163
|
-
stream.push({
|
|
164
|
-
type: "text_delta",
|
|
165
|
-
contentIndex: blockIndex(),
|
|
166
|
-
delta: event.delta,
|
|
167
|
-
partial: output,
|
|
168
|
-
});
|
|
169
|
-
}
|
|
170
|
-
}
|
|
171
|
-
}
|
|
172
|
-
// Handle function call argument deltas
|
|
173
|
-
else if (event.type === "response.function_call_arguments.delta") {
|
|
174
|
-
if (currentItem &&
|
|
175
|
-
currentItem.type === "function_call" &&
|
|
176
|
-
currentBlock &&
|
|
177
|
-
currentBlock.type === "toolCall") {
|
|
178
|
-
currentBlock.partialJson += event.delta;
|
|
179
|
-
currentBlock.arguments = parseStreamingJson(currentBlock.partialJson);
|
|
180
|
-
stream.push({
|
|
181
|
-
type: "toolcall_delta",
|
|
182
|
-
contentIndex: blockIndex(),
|
|
183
|
-
delta: event.delta,
|
|
184
|
-
partial: output,
|
|
185
|
-
});
|
|
186
|
-
}
|
|
187
|
-
}
|
|
188
|
-
// Handle output item completion
|
|
189
|
-
else if (event.type === "response.output_item.done") {
|
|
190
|
-
const item = event.item;
|
|
191
|
-
if (item.type === "reasoning" && currentBlock && currentBlock.type === "thinking") {
|
|
192
|
-
currentBlock.thinking = item.summary?.map((s) => s.text).join("\n\n") || "";
|
|
193
|
-
currentBlock.thinkingSignature = JSON.stringify(item);
|
|
194
|
-
stream.push({
|
|
195
|
-
type: "thinking_end",
|
|
196
|
-
contentIndex: blockIndex(),
|
|
197
|
-
content: currentBlock.thinking,
|
|
198
|
-
partial: output,
|
|
199
|
-
});
|
|
200
|
-
currentBlock = null;
|
|
201
|
-
}
|
|
202
|
-
else if (item.type === "message" && currentBlock && currentBlock.type === "text") {
|
|
203
|
-
currentBlock.text = item.content.map((c) => (c.type === "output_text" ? c.text : c.refusal)).join("");
|
|
204
|
-
currentBlock.textSignature = item.id;
|
|
205
|
-
stream.push({
|
|
206
|
-
type: "text_end",
|
|
207
|
-
contentIndex: blockIndex(),
|
|
208
|
-
content: currentBlock.text,
|
|
209
|
-
partial: output,
|
|
210
|
-
});
|
|
211
|
-
currentBlock = null;
|
|
212
|
-
}
|
|
213
|
-
else if (item.type === "function_call") {
|
|
214
|
-
const toolCall = {
|
|
215
|
-
type: "toolCall",
|
|
216
|
-
id: `${item.call_id}|${item.id}`,
|
|
217
|
-
name: item.name,
|
|
218
|
-
arguments: JSON.parse(item.arguments),
|
|
219
|
-
};
|
|
220
|
-
stream.push({ type: "toolcall_end", contentIndex: blockIndex(), toolCall, partial: output });
|
|
221
|
-
}
|
|
222
|
-
}
|
|
223
|
-
// Handle completion
|
|
224
|
-
else if (event.type === "response.completed") {
|
|
225
|
-
const response = event.response;
|
|
226
|
-
if (response?.usage) {
|
|
227
|
-
const cachedTokens = response.usage.input_tokens_details?.cached_tokens || 0;
|
|
228
|
-
output.usage = {
|
|
229
|
-
// OpenAI includes cached tokens in input_tokens, so subtract to get non-cached input
|
|
230
|
-
input: (response.usage.input_tokens || 0) - cachedTokens,
|
|
231
|
-
output: response.usage.output_tokens || 0,
|
|
232
|
-
cacheRead: cachedTokens,
|
|
233
|
-
cacheWrite: 0,
|
|
234
|
-
totalTokens: response.usage.total_tokens || 0,
|
|
235
|
-
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
|
|
236
|
-
};
|
|
237
|
-
}
|
|
238
|
-
calculateCost(model, output.usage);
|
|
239
|
-
applyServiceTierPricing(output.usage, response?.service_tier ?? options?.serviceTier);
|
|
240
|
-
// Map status to stop reason
|
|
241
|
-
output.stopReason = mapStopReason(response?.status);
|
|
242
|
-
if (output.content.some((b) => b.type === "toolCall") && output.stopReason === "stop") {
|
|
243
|
-
output.stopReason = "toolUse";
|
|
244
|
-
}
|
|
245
|
-
}
|
|
246
|
-
// Handle errors
|
|
247
|
-
else if (event.type === "error") {
|
|
248
|
-
throw new Error(`Error Code ${event.code}: ${event.message}` || "Unknown error");
|
|
249
|
-
}
|
|
250
|
-
else if (event.type === "response.failed") {
|
|
251
|
-
throw new Error("Unknown error");
|
|
252
|
-
}
|
|
253
|
-
}
|
|
40
|
+
await processResponsesStream(openaiStream, output, stream, model, {
|
|
41
|
+
serviceTier: options?.serviceTier,
|
|
42
|
+
applyServiceTierPricing,
|
|
43
|
+
});
|
|
254
44
|
if (options?.signal?.aborted) {
|
|
255
45
|
throw new Error("Request was aborted");
|
|
256
46
|
}
|
|
257
47
|
if (output.stopReason === "aborted" || output.stopReason === "error") {
|
|
258
|
-
throw new Error("An
|
|
48
|
+
throw new Error("An unknown error occurred");
|
|
259
49
|
}
|
|
260
50
|
stream.push({ type: "done", reason: output.stopReason, message: output });
|
|
261
51
|
stream.end();
|
|
@@ -271,6 +61,18 @@ export const streamOpenAIResponses = (model, context, options) => {
|
|
|
271
61
|
})();
|
|
272
62
|
return stream;
|
|
273
63
|
};
|
|
64
|
+
export const streamSimpleOpenAIResponses = (model, context, options) => {
|
|
65
|
+
const apiKey = options?.apiKey || getEnvApiKey(model.provider);
|
|
66
|
+
if (!apiKey) {
|
|
67
|
+
throw new Error(`No API key for provider: ${model.provider}`);
|
|
68
|
+
}
|
|
69
|
+
const base = buildBaseOptions(model, options, apiKey);
|
|
70
|
+
const reasoningEffort = supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning);
|
|
71
|
+
return streamOpenAIResponses(model, context, {
|
|
72
|
+
...base,
|
|
73
|
+
reasoningEffort,
|
|
74
|
+
});
|
|
75
|
+
};
|
|
274
76
|
function createClient(model, context, apiKey, optionsHeaders) {
|
|
275
77
|
if (!apiKey) {
|
|
276
78
|
if (!process.env.OPENAI_API_KEY) {
|
|
@@ -314,7 +116,7 @@ function createClient(model, context, apiKey, optionsHeaders) {
|
|
|
314
116
|
});
|
|
315
117
|
}
|
|
316
118
|
function buildParams(model, context, options) {
|
|
317
|
-
const messages =
|
|
119
|
+
const messages = convertResponsesMessages(model, context, OPENAI_TOOL_CALL_PROVIDERS);
|
|
318
120
|
const params = {
|
|
319
121
|
model: model.id,
|
|
320
122
|
input: messages,
|
|
@@ -331,7 +133,7 @@ function buildParams(model, context, options) {
|
|
|
331
133
|
params.service_tier = options.serviceTier;
|
|
332
134
|
}
|
|
333
135
|
if (context.tools) {
|
|
334
|
-
params.tools =
|
|
136
|
+
params.tools = convertResponsesTools(context.tools);
|
|
335
137
|
}
|
|
336
138
|
if (model.reasoning) {
|
|
337
139
|
if (options?.reasoningEffort || options?.reasoningSummary) {
|
|
@@ -358,177 +160,6 @@ function buildParams(model, context, options) {
|
|
|
358
160
|
}
|
|
359
161
|
return params;
|
|
360
162
|
}
|
|
361
|
-
function convertMessages(model, context) {
|
|
362
|
-
const messages = [];
|
|
363
|
-
const normalizeToolCallId = (id) => {
|
|
364
|
-
const allowedProviders = new Set(["openai", "openai-codex", "opencode"]);
|
|
365
|
-
if (!allowedProviders.has(model.provider))
|
|
366
|
-
return id;
|
|
367
|
-
if (!id.includes("|"))
|
|
368
|
-
return id;
|
|
369
|
-
const [callId, itemId] = id.split("|");
|
|
370
|
-
const sanitizedCallId = callId.replace(/[^a-zA-Z0-9_-]/g, "_");
|
|
371
|
-
let sanitizedItemId = itemId.replace(/[^a-zA-Z0-9_-]/g, "_");
|
|
372
|
-
// OpenAI Responses API requires item id to start with "fc"
|
|
373
|
-
if (!sanitizedItemId.startsWith("fc")) {
|
|
374
|
-
sanitizedItemId = `fc_${sanitizedItemId}`;
|
|
375
|
-
}
|
|
376
|
-
const normalizedCallId = sanitizedCallId.length > 64 ? sanitizedCallId.slice(0, 64) : sanitizedCallId;
|
|
377
|
-
const normalizedItemId = sanitizedItemId.length > 64 ? sanitizedItemId.slice(0, 64) : sanitizedItemId;
|
|
378
|
-
return `${normalizedCallId}|${normalizedItemId}`;
|
|
379
|
-
};
|
|
380
|
-
const transformedMessages = transformMessages(context.messages, model, normalizeToolCallId);
|
|
381
|
-
if (context.systemPrompt) {
|
|
382
|
-
const role = model.reasoning ? "developer" : "system";
|
|
383
|
-
messages.push({
|
|
384
|
-
role,
|
|
385
|
-
content: sanitizeSurrogates(context.systemPrompt),
|
|
386
|
-
});
|
|
387
|
-
}
|
|
388
|
-
let msgIndex = 0;
|
|
389
|
-
for (const msg of transformedMessages) {
|
|
390
|
-
if (msg.role === "user") {
|
|
391
|
-
if (typeof msg.content === "string") {
|
|
392
|
-
messages.push({
|
|
393
|
-
role: "user",
|
|
394
|
-
content: [{ type: "input_text", text: sanitizeSurrogates(msg.content) }],
|
|
395
|
-
});
|
|
396
|
-
}
|
|
397
|
-
else {
|
|
398
|
-
const content = msg.content.map((item) => {
|
|
399
|
-
if (item.type === "text") {
|
|
400
|
-
return {
|
|
401
|
-
type: "input_text",
|
|
402
|
-
text: sanitizeSurrogates(item.text),
|
|
403
|
-
};
|
|
404
|
-
}
|
|
405
|
-
else {
|
|
406
|
-
return {
|
|
407
|
-
type: "input_image",
|
|
408
|
-
detail: "auto",
|
|
409
|
-
image_url: `data:${item.mimeType};base64,${item.data}`,
|
|
410
|
-
};
|
|
411
|
-
}
|
|
412
|
-
});
|
|
413
|
-
const filteredContent = !model.input.includes("image")
|
|
414
|
-
? content.filter((c) => c.type !== "input_image")
|
|
415
|
-
: content;
|
|
416
|
-
if (filteredContent.length === 0)
|
|
417
|
-
continue;
|
|
418
|
-
messages.push({
|
|
419
|
-
role: "user",
|
|
420
|
-
content: filteredContent,
|
|
421
|
-
});
|
|
422
|
-
}
|
|
423
|
-
}
|
|
424
|
-
else if (msg.role === "assistant") {
|
|
425
|
-
const output = [];
|
|
426
|
-
const assistantMsg = msg;
|
|
427
|
-
// Check if this message is from a different model (same provider, different model ID).
|
|
428
|
-
// For such messages, tool call IDs with fc_ prefix need to be stripped to avoid
|
|
429
|
-
// OpenAI's reasoning/function_call pairing validation errors.
|
|
430
|
-
const isDifferentModel = assistantMsg.model !== model.id &&
|
|
431
|
-
assistantMsg.provider === model.provider &&
|
|
432
|
-
assistantMsg.api === model.api;
|
|
433
|
-
for (const block of msg.content) {
|
|
434
|
-
if (block.type === "thinking") {
|
|
435
|
-
if (block.thinkingSignature) {
|
|
436
|
-
const reasoningItem = JSON.parse(block.thinkingSignature);
|
|
437
|
-
output.push(reasoningItem);
|
|
438
|
-
}
|
|
439
|
-
}
|
|
440
|
-
else if (block.type === "text") {
|
|
441
|
-
const textBlock = block;
|
|
442
|
-
// OpenAI requires id to be max 64 characters
|
|
443
|
-
let msgId = textBlock.textSignature;
|
|
444
|
-
if (!msgId) {
|
|
445
|
-
msgId = `msg_${msgIndex}`;
|
|
446
|
-
}
|
|
447
|
-
else if (msgId.length > 64) {
|
|
448
|
-
msgId = `msg_${shortHash(msgId)}`;
|
|
449
|
-
}
|
|
450
|
-
output.push({
|
|
451
|
-
type: "message",
|
|
452
|
-
role: "assistant",
|
|
453
|
-
content: [{ type: "output_text", text: sanitizeSurrogates(textBlock.text), annotations: [] }],
|
|
454
|
-
status: "completed",
|
|
455
|
-
id: msgId,
|
|
456
|
-
});
|
|
457
|
-
}
|
|
458
|
-
else if (block.type === "toolCall") {
|
|
459
|
-
const toolCall = block;
|
|
460
|
-
const callId = toolCall.id.split("|")[0];
|
|
461
|
-
let itemId = toolCall.id.split("|")[1];
|
|
462
|
-
// For different-model messages, set id to undefined to avoid pairing validation.
|
|
463
|
-
// OpenAI tracks which fc_xxx IDs were paired with rs_xxx reasoning items.
|
|
464
|
-
// By omitting the id, we avoid triggering that validation (like cross-provider does).
|
|
465
|
-
if (isDifferentModel && itemId?.startsWith("fc_")) {
|
|
466
|
-
itemId = undefined;
|
|
467
|
-
}
|
|
468
|
-
output.push({
|
|
469
|
-
type: "function_call",
|
|
470
|
-
id: itemId,
|
|
471
|
-
call_id: callId,
|
|
472
|
-
name: toolCall.name,
|
|
473
|
-
arguments: JSON.stringify(toolCall.arguments),
|
|
474
|
-
});
|
|
475
|
-
}
|
|
476
|
-
}
|
|
477
|
-
if (output.length === 0)
|
|
478
|
-
continue;
|
|
479
|
-
messages.push(...output);
|
|
480
|
-
}
|
|
481
|
-
else if (msg.role === "toolResult") {
|
|
482
|
-
// Extract text and image content
|
|
483
|
-
const textResult = msg.content
|
|
484
|
-
.filter((c) => c.type === "text")
|
|
485
|
-
.map((c) => c.text)
|
|
486
|
-
.join("\n");
|
|
487
|
-
const hasImages = msg.content.some((c) => c.type === "image");
|
|
488
|
-
// Always send function_call_output with text (or placeholder if only images)
|
|
489
|
-
const hasText = textResult.length > 0;
|
|
490
|
-
messages.push({
|
|
491
|
-
type: "function_call_output",
|
|
492
|
-
call_id: msg.toolCallId.split("|")[0],
|
|
493
|
-
output: sanitizeSurrogates(hasText ? textResult : "(see attached image)"),
|
|
494
|
-
});
|
|
495
|
-
// If there are images and model supports them, send a follow-up user message with images
|
|
496
|
-
if (hasImages && model.input.includes("image")) {
|
|
497
|
-
const contentParts = [];
|
|
498
|
-
// Add text prefix
|
|
499
|
-
contentParts.push({
|
|
500
|
-
type: "input_text",
|
|
501
|
-
text: "Attached image(s) from tool result:",
|
|
502
|
-
});
|
|
503
|
-
// Add images
|
|
504
|
-
for (const block of msg.content) {
|
|
505
|
-
if (block.type === "image") {
|
|
506
|
-
contentParts.push({
|
|
507
|
-
type: "input_image",
|
|
508
|
-
detail: "auto",
|
|
509
|
-
image_url: `data:${block.mimeType};base64,${block.data}`,
|
|
510
|
-
});
|
|
511
|
-
}
|
|
512
|
-
}
|
|
513
|
-
messages.push({
|
|
514
|
-
role: "user",
|
|
515
|
-
content: contentParts,
|
|
516
|
-
});
|
|
517
|
-
}
|
|
518
|
-
}
|
|
519
|
-
msgIndex++;
|
|
520
|
-
}
|
|
521
|
-
return messages;
|
|
522
|
-
}
|
|
523
|
-
function convertTools(tools) {
|
|
524
|
-
return tools.map((tool) => ({
|
|
525
|
-
type: "function",
|
|
526
|
-
name: tool.name,
|
|
527
|
-
description: tool.description,
|
|
528
|
-
parameters: tool.parameters, // TypeBox already generates JSON Schema
|
|
529
|
-
strict: false,
|
|
530
|
-
}));
|
|
531
|
-
}
|
|
532
163
|
function getServiceTierCostMultiplier(serviceTier) {
|
|
533
164
|
switch (serviceTier) {
|
|
534
165
|
case "flex":
|
|
@@ -549,25 +180,4 @@ function applyServiceTierPricing(usage, serviceTier) {
|
|
|
549
180
|
usage.cost.cacheWrite *= multiplier;
|
|
550
181
|
usage.cost.total = usage.cost.input + usage.cost.output + usage.cost.cacheRead + usage.cost.cacheWrite;
|
|
551
182
|
}
|
|
552
|
-
function mapStopReason(status) {
|
|
553
|
-
if (!status)
|
|
554
|
-
return "stop";
|
|
555
|
-
switch (status) {
|
|
556
|
-
case "completed":
|
|
557
|
-
return "stop";
|
|
558
|
-
case "incomplete":
|
|
559
|
-
return "length";
|
|
560
|
-
case "failed":
|
|
561
|
-
case "cancelled":
|
|
562
|
-
return "error";
|
|
563
|
-
// These two are wonky ...
|
|
564
|
-
case "in_progress":
|
|
565
|
-
case "queued":
|
|
566
|
-
return "stop";
|
|
567
|
-
default: {
|
|
568
|
-
const _exhaustive = status;
|
|
569
|
-
throw new Error(`Unhandled stop reason: ${_exhaustive}`);
|
|
570
|
-
}
|
|
571
|
-
}
|
|
572
|
-
}
|
|
573
183
|
//# sourceMappingURL=openai-responses.js.map
|