veryfront 0.1.321 → 0.1.323

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/esm/deno.js +1 -1
  2. package/esm/extensions/ext-jwt/src/index.d.ts +39 -0
  3. package/esm/extensions/ext-jwt/src/index.d.ts.map +1 -0
  4. package/esm/extensions/ext-jwt/src/index.js +103 -0
  5. package/esm/extensions/ext-openai/src/openai-provider.d.ts +29 -0
  6. package/esm/extensions/ext-openai/src/openai-provider.d.ts.map +1 -0
  7. package/esm/extensions/ext-openai/src/openai-provider.js +1095 -0
  8. package/esm/src/embedding/veryfront-cloud/provider.d.ts.map +1 -1
  9. package/esm/src/embedding/veryfront-cloud/provider.js +6 -1
  10. package/esm/src/provider/shared/index.d.ts +16 -0
  11. package/esm/src/provider/shared/index.d.ts.map +1 -0
  12. package/esm/src/provider/shared/index.js +18 -0
  13. package/esm/src/provider/veryfront-cloud/openai.d.ts +10 -0
  14. package/esm/src/provider/veryfront-cloud/openai.d.ts.map +1 -0
  15. package/esm/src/provider/veryfront-cloud/openai.js +18 -0
  16. package/esm/src/provider/veryfront-cloud/provider.d.ts.map +1 -1
  17. package/esm/src/provider/veryfront-cloud/provider.js +6 -1
  18. package/esm/src/proxy/main.js +3 -0
  19. package/esm/src/tool/host-tools.d.ts +15 -0
  20. package/esm/src/tool/host-tools.d.ts.map +1 -0
  21. package/esm/src/tool/host-tools.js +60 -0
  22. package/esm/src/tool/index.d.ts +2 -0
  23. package/esm/src/tool/index.d.ts.map +1 -1
  24. package/esm/src/tool/index.js +1 -0
  25. package/esm/src/utils/version-constant.d.ts +1 -1
  26. package/esm/src/utils/version-constant.js +1 -1
  27. package/package.json +2 -1
  28. package/src/deno.js +1 -1
  29. package/src/extensions/ext-jwt/src/index.ts +173 -0
  30. package/src/extensions/ext-openai/src/openai-provider.ts +1481 -0
  31. package/src/src/embedding/veryfront-cloud/provider.ts +6 -3
  32. package/src/src/provider/shared/index.ts +62 -0
  33. package/src/src/provider/veryfront-cloud/openai.ts +34 -0
  34. package/src/src/provider/veryfront-cloud/provider.ts +6 -3
  35. package/src/src/proxy/main.ts +4 -0
  36. package/src/src/tool/host-tools.ts +92 -0
  37. package/src/src/tool/index.ts +2 -0
  38. package/src/src/utils/version-constant.ts +1 -1
@@ -0,0 +1,1095 @@
1
+ /**
2
+ * OpenAI provider — implements the {@link AIProvider} contract for OpenAI,
3
+ * OpenAI-compatible endpoints (Azure OpenAI, Moonshot AI), and OpenAI's
4
+ * Responses API.
5
+ *
6
+ * Ported from `src/provider/runtime-loader.ts` as part of PR 11.
7
+ *
8
+ * @module extensions/ext-openai/openai-provider
9
+ */
10
+ import { buildProviderError, createOpenAIRequestInit, createWarningCollector, getOpenAIChatCompletionsUrl, getOpenAIEmbeddingUrl, getOpenAIResponsesUrl, isNumberArray, mergeUsage, parseRetryAfterMs, ProviderError, ProviderOverloadedError, ProviderQuotaError, ProviderRateLimitError, ProviderRequestError, readProviderOptions, readRecord, readTextParts, requestJson, requestStream, stringifyJsonValue, TOOL_INPUT_PENDING_THRESHOLD_MS, toOpenAICompatibleMessages, toOpenAICompatibleTools, withToolInputStatusTransitions, } from "../../../src/provider/shared/index.js";
11
+ // Re-export error classes so extension tests can import from this module.
12
+ export { buildProviderError, isNumberArray, mergeUsage, parseRetryAfterMs, ProviderError, ProviderOverloadedError, ProviderQuotaError, ProviderRateLimitError, ProviderRequestError, TOOL_INPUT_PENDING_THRESHOLD_MS, withToolInputStatusTransitions, };
13
+ // ---------------------------------------------------------------------------
14
+ // Embedding helpers
15
+ // ---------------------------------------------------------------------------
16
+ function extractOpenAIEmbeddings(payload) {
17
+ const record = readRecord(payload);
18
+ const data = record?.data;
19
+ if (!Array.isArray(data)) {
20
+ throw new Error("Invalid OpenAI embedding response: data array missing");
21
+ }
22
+ const embeddings = [];
23
+ for (const item of data) {
24
+ const itemRecord = readRecord(item);
25
+ const embedding = itemRecord?.embedding;
26
+ if (!isNumberArray(embedding)) {
27
+ throw new Error("Invalid OpenAI embedding response: embedding vector missing");
28
+ }
29
+ embeddings.push(embedding);
30
+ }
31
+ return embeddings;
32
+ }
33
+ function extractOpenAIUsageTokens(payload) {
34
+ const record = readRecord(payload);
35
+ const usage = readRecord(record?.usage);
36
+ const totalTokens = usage?.total_tokens;
37
+ return typeof totalTokens === "number" ? totalTokens : undefined;
38
+ }
39
+ // ---------------------------------------------------------------------------
40
+ // Chat helpers
41
+ // ---------------------------------------------------------------------------
42
+ function normalizeOpenAIFinishReason(raw) {
43
+ if (typeof raw !== "string") {
44
+ return null;
45
+ }
46
+ if (raw === "tool_calls") {
47
+ return { unified: "tool-calls", raw };
48
+ }
49
+ if (raw === "content_filter") {
50
+ return { unified: "content-filter", raw };
51
+ }
52
+ return raw;
53
+ }
54
+ function extractOpenAIUsage(payload) {
55
+ const record = readRecord(payload);
56
+ const usage = readRecord(record?.usage);
57
+ if (!usage) {
58
+ return undefined;
59
+ }
60
+ const inputTokens = usage.prompt_tokens;
61
+ const outputTokens = usage.completion_tokens;
62
+ const totalTokens = usage.total_tokens;
63
+ const promptTokensDetails = readRecord(usage.prompt_tokens_details);
64
+ const cachedTokens = promptTokensDetails?.cached_tokens;
65
+ return {
66
+ inputTokens: typeof inputTokens === "number" ? inputTokens : undefined,
67
+ outputTokens: typeof outputTokens === "number" ? outputTokens : undefined,
68
+ totalTokens: typeof totalTokens === "number" ? totalTokens : undefined,
69
+ ...(typeof cachedTokens === "number" ? { cacheReadInputTokens: cachedTokens } : {}),
70
+ };
71
+ }
72
+ function extractOpenAIContentText(content) {
73
+ if (typeof content === "string") {
74
+ return content;
75
+ }
76
+ if (!Array.isArray(content)) {
77
+ return "";
78
+ }
79
+ let text = "";
80
+ for (const part of content) {
81
+ const record = readRecord(part);
82
+ const type = record?.type;
83
+ if (type === "text" && typeof record?.text === "string") {
84
+ text += record.text;
85
+ }
86
+ }
87
+ return text;
88
+ }
89
+ function extractOpenAIToolCalls(message) {
90
+ const toolCalls = message.tool_calls;
91
+ if (!Array.isArray(toolCalls)) {
92
+ return [];
93
+ }
94
+ const normalized = [];
95
+ for (const entry of toolCalls) {
96
+ const record = readRecord(entry);
97
+ const id = typeof record?.id === "string" ? record.id : undefined;
98
+ const fn = readRecord(record?.function);
99
+ const name = typeof fn?.name === "string" ? fn.name : undefined;
100
+ const argumentsText = typeof fn?.arguments === "string" ? fn.arguments : undefined;
101
+ if (!id || !name || argumentsText === undefined) {
102
+ continue;
103
+ }
104
+ normalized.push({
105
+ toolCallId: id,
106
+ toolName: name,
107
+ input: argumentsText,
108
+ });
109
+ }
110
+ return normalized;
111
+ }
112
+ /**
113
+ * OpenAI reasoning models (o1 / o3 / o4 family) use the completion path but
114
+ * have different constraints than chat models: sampling params are rejected,
115
+ * and they accept a `reasoning_effort` field. We detect them by model id
116
+ * prefix so callers don't have to configure it per runtime.
117
+ */
118
+ function isOpenAIReasoningModel(modelId) {
119
+ return /^o[134](-|$)/.test(modelId);
120
+ }
121
+ /**
122
+ * Detect native OpenAI models (gpt-*, o-series, chatgpt-*) vs third-party
123
+ * OpenAI-compatible providers (Kimi, etc.). Native OpenAI models require
124
+ * `max_completion_tokens` (the old `max_tokens` is rejected by newer models
125
+ * like gpt-5.2), while third-party providers still expect `max_tokens`.
126
+ */
127
+ function isNativeOpenAIModel(modelId) {
128
+ return /^(gpt-|o[134](-|$)|chatgpt-)/.test(modelId);
129
+ }
130
+ /**
131
+ * Kimi K2.5 fixes sampling parameters (temperature, top_p, presence_penalty,
132
+ * frequency_penalty) to predetermined values and rejects any other values.
133
+ * See https://platform.moonshot.cn/docs/guide/kimi-k2-5-quickstart
134
+ */
135
+ function isFixedSamplingModel(modelId) {
136
+ return /^kimi-k2\.5/.test(modelId);
137
+ }
138
+ /**
139
+ * Map the unified reasoning effort to OpenAI's `reasoning_effort` enum.
140
+ * OpenAI doesn't accept "max" — we collapse it to "high".
141
+ */
142
+ function resolveOpenAIReasoningEffort(option) {
143
+ if (!option || option.enabled !== true) {
144
+ return undefined;
145
+ }
146
+ switch (option.effort) {
147
+ case "low":
148
+ return "low";
149
+ case "high":
150
+ case "max":
151
+ return "high";
152
+ case "medium":
153
+ default:
154
+ return "medium";
155
+ }
156
+ }
157
+ function unwrapToolInputSchema(inputSchema) {
158
+ if (typeof inputSchema !== "object" || inputSchema === null || Array.isArray(inputSchema)) {
159
+ return inputSchema;
160
+ }
161
+ const candidate = Reflect.get(inputSchema, "jsonSchema");
162
+ return candidate ?? inputSchema;
163
+ }
164
+ function toSnakeCaseRecord(record) {
165
+ return Object.fromEntries(Object.entries(record).map(([key, value]) => [
166
+ key.replace(/[A-Z]/g, (match) => `_${match.toLowerCase()}`),
167
+ value,
168
+ ]));
169
+ }
170
+ function buildOpenAIChatRequest(modelId, providerName, options, stream, warnings) {
171
+ const isReasoningModel = isOpenAIReasoningModel(modelId);
172
+ const reasoningEffort = resolveOpenAIReasoningEffort(options.reasoning);
173
+ const reasoningEnabled = isReasoningModel || reasoningEffort !== undefined;
174
+ const fixedSampling = isFixedSamplingModel(modelId);
175
+ const dropSamplingParams = reasoningEnabled || fixedSampling;
176
+ // OpenAI Chat Completions has no top_k surface.
177
+ if (options.topK !== undefined) {
178
+ warnings.push({
179
+ type: "unsupported-setting",
180
+ provider: "openai",
181
+ setting: "topK",
182
+ details: "OpenAI Chat Completions does not expose top_k; the value was dropped.",
183
+ });
184
+ }
185
+ // Reasoning models (o1 / o3 / o4) and models with fixed sampling params
186
+ // reject sampling params outright. Emit warnings.
187
+ if (dropSamplingParams) {
188
+ const dropped = [
189
+ ["temperature", "temperature"],
190
+ ["topP", "top_p"],
191
+ ["presencePenalty", "presence_penalty"],
192
+ ["frequencyPenalty", "frequency_penalty"],
193
+ ];
194
+ for (const [key, openaiName] of dropped) {
195
+ if (options[key] !== undefined) {
196
+ warnings.push({
197
+ type: "unsupported-setting",
198
+ provider: "openai",
199
+ setting: key,
200
+ details: fixedSampling
201
+ ? `Dropped because this model uses fixed sampling parameters.`
202
+ : `Dropped because OpenAI reasoning models reject ${openaiName}. Reasoning was active for this request.`,
203
+ });
204
+ }
205
+ }
206
+ }
207
+ const body = {
208
+ model: modelId,
209
+ messages: toOpenAICompatibleMessages(options.prompt),
210
+ ...(stream ? { stream: true, stream_options: { include_usage: true } } : {}),
211
+ ...(options.maxOutputTokens !== undefined
212
+ ? isNativeOpenAIModel(modelId)
213
+ ? { max_completion_tokens: options.maxOutputTokens }
214
+ : { max_tokens: options.maxOutputTokens }
215
+ : {}),
216
+ ...(!dropSamplingParams && options.temperature !== undefined
217
+ ? { temperature: options.temperature }
218
+ : {}),
219
+ ...(!dropSamplingParams && options.topP !== undefined ? { top_p: options.topP } : {}),
220
+ ...(options.stopSequences && options.stopSequences.length > 0
221
+ ? { stop: options.stopSequences }
222
+ : {}),
223
+ ...(toOpenAICompatibleTools(options.tools)
224
+ ? { tools: toOpenAICompatibleTools(options.tools) }
225
+ : {}),
226
+ ...(options.toolChoice !== undefined ? { tool_choice: options.toolChoice } : {}),
227
+ ...(options.seed !== undefined ? { seed: options.seed } : {}),
228
+ ...(!dropSamplingParams && options.presencePenalty !== undefined
229
+ ? { presence_penalty: options.presencePenalty }
230
+ : {}),
231
+ ...(!dropSamplingParams && options.frequencyPenalty !== undefined
232
+ ? { frequency_penalty: options.frequencyPenalty }
233
+ : {}),
234
+ ...(reasoningEffort !== undefined ? { reasoning_effort: reasoningEffort } : {}),
235
+ ...(typeof options.userId === "string" && options.userId.length > 0
236
+ ? { user: options.userId }
237
+ : {}),
238
+ ...(options.serviceTier !== undefined ? { service_tier: options.serviceTier } : {}),
239
+ ...(options.parallelToolCalls !== undefined
240
+ ? { parallel_tool_calls: options.parallelToolCalls }
241
+ : {}),
242
+ ...(options.responseFormat && options.responseFormat.type !== "text"
243
+ ? {
244
+ response_format: options.responseFormat.type === "json" ? { type: "json_object" } : {
245
+ type: "json_schema",
246
+ json_schema: {
247
+ name: options.responseFormat.name,
248
+ ...(typeof options.responseFormat.description === "string"
249
+ ? { description: options.responseFormat.description }
250
+ : {}),
251
+ schema: unwrapToolInputSchema(options.responseFormat.schema),
252
+ ...(options.responseFormat.strict !== undefined
253
+ ? { strict: options.responseFormat.strict }
254
+ : {}),
255
+ },
256
+ },
257
+ }
258
+ : {}),
259
+ };
260
+ const providerOpts = readProviderOptions(options.providerOptions, "openai", providerName);
261
+ // Normalize max_tokens → max_completion_tokens for native OpenAI models.
262
+ if (isNativeOpenAIModel(modelId) && "max_tokens" in providerOpts) {
263
+ if (!("max_completion_tokens" in providerOpts)) {
264
+ providerOpts.max_completion_tokens = providerOpts.max_tokens;
265
+ }
266
+ delete providerOpts.max_tokens;
267
+ }
268
+ Object.assign(body, providerOpts);
269
+ return body;
270
+ }
271
+ // ---------------------------------------------------------------------------
272
+ // Chat streaming
273
+ // ---------------------------------------------------------------------------
274
+ function parseSseChunk(chunk) {
275
+ const blocks = chunk.split(/\r?\n\r?\n/);
276
+ const remainder = blocks.pop() ?? "";
277
+ const events = blocks.flatMap((block) => {
278
+ const dataLines = block.split(/\r?\n/)
279
+ .filter((line) => line.startsWith("data:"))
280
+ .map((line) => line.slice(5).trimStart());
281
+ if (!dataLines.length) {
282
+ return [];
283
+ }
284
+ const payload = dataLines.join("\n").trim();
285
+ if (payload === "[DONE]") {
286
+ return ["[DONE]"];
287
+ }
288
+ try {
289
+ return [JSON.parse(payload)];
290
+ }
291
+ catch {
292
+ return [];
293
+ }
294
+ });
295
+ return { events, remainder };
296
+ }
297
+ function extractFirstChoice(payload) {
298
+ const record = readRecord(payload);
299
+ const choices = record?.choices;
300
+ if (!Array.isArray(choices) || choices.length === 0) {
301
+ return undefined;
302
+ }
303
+ const first = readRecord(choices[0]);
304
+ if (!first) {
305
+ return undefined;
306
+ }
307
+ return first;
308
+ }
309
+ function buildOpenAIGenerateResult(payload) {
310
+ const choice = extractFirstChoice(payload);
311
+ const message = readRecord(choice?.message);
312
+ const text = extractOpenAIContentText(message?.content);
313
+ const toolCalls = message ? extractOpenAIToolCalls(message) : [];
314
+ return {
315
+ content: [
316
+ ...(text.length > 0 ? [{ type: "text", text }] : []),
317
+ ...toolCalls.map((toolCall) => ({
318
+ type: "tool-call",
319
+ toolCallId: toolCall.toolCallId,
320
+ toolName: toolCall.toolName,
321
+ input: toolCall.input,
322
+ })),
323
+ ],
324
+ finishReason: normalizeOpenAIFinishReason(choice?.finish_reason),
325
+ usage: extractOpenAIUsage(payload),
326
+ };
327
+ }
328
+ async function* streamOpenAICompatibleParts(stream) {
329
+ const decoder = new TextDecoder();
330
+ let buffer = "";
331
+ const toolCalls = new Map();
332
+ let reasoningId = null;
333
+ let reasoningIndex = 0;
334
+ let finishReason = null;
335
+ let usage;
336
+ for await (const chunk of stream) {
337
+ buffer += decoder.decode(chunk, { stream: true });
338
+ const parsed = parseSseChunk(buffer);
339
+ buffer = parsed.remainder;
340
+ for (const event of parsed.events) {
341
+ if (event === "[DONE]") {
342
+ continue;
343
+ }
344
+ const record = readRecord(event);
345
+ usage = extractOpenAIUsage(record) ?? usage;
346
+ const choice = extractFirstChoice(record);
347
+ if (!choice) {
348
+ continue;
349
+ }
350
+ const delta = readRecord(choice.delta);
351
+ if (typeof delta?.reasoning_content === "string" && delta.reasoning_content.length > 0) {
352
+ if (!reasoningId) {
353
+ reasoningId = `reasoning-${reasoningIndex++}`;
354
+ yield {
355
+ type: "reasoning-start",
356
+ id: reasoningId,
357
+ };
358
+ }
359
+ yield {
360
+ type: "reasoning-delta",
361
+ id: reasoningId,
362
+ delta: delta.reasoning_content,
363
+ };
364
+ }
365
+ const textDelta = extractOpenAIContentText(delta?.content);
366
+ if (textDelta.length > 0) {
367
+ if (reasoningId) {
368
+ yield {
369
+ type: "reasoning-end",
370
+ id: reasoningId,
371
+ };
372
+ reasoningId = null;
373
+ }
374
+ yield { type: "text-delta", delta: textDelta };
375
+ }
376
+ const rawToolCalls = Array.isArray(delta?.tool_calls) ? delta.tool_calls : [];
377
+ for (const rawToolCall of rawToolCalls) {
378
+ if (reasoningId) {
379
+ yield {
380
+ type: "reasoning-end",
381
+ id: reasoningId,
382
+ };
383
+ reasoningId = null;
384
+ }
385
+ const toolCallRecord = readRecord(rawToolCall);
386
+ const index = typeof toolCallRecord?.index === "number" ? toolCallRecord.index : 0;
387
+ const current = toolCalls.get(index) ?? {
388
+ id: typeof toolCallRecord?.id === "string" ? toolCallRecord.id : `tool-${index}`,
389
+ name: "",
390
+ arguments: "",
391
+ started: false,
392
+ };
393
+ if (typeof toolCallRecord?.id === "string") {
394
+ current.id = toolCallRecord.id;
395
+ }
396
+ const fn = readRecord(toolCallRecord?.function);
397
+ if (typeof fn?.name === "string") {
398
+ current.name = fn.name;
399
+ }
400
+ if (!current.started && current.name.length > 0) {
401
+ current.started = true;
402
+ yield {
403
+ type: "tool-input-start",
404
+ id: current.id,
405
+ toolName: current.name,
406
+ };
407
+ }
408
+ if (typeof fn?.arguments === "string" && fn.arguments.length > 0) {
409
+ current.arguments += fn.arguments;
410
+ yield {
411
+ type: "tool-input-delta",
412
+ id: current.id,
413
+ delta: fn.arguments,
414
+ };
415
+ }
416
+ toolCalls.set(index, current);
417
+ }
418
+ const normalizedFinishReason = normalizeOpenAIFinishReason(choice.finish_reason);
419
+ if (normalizedFinishReason) {
420
+ finishReason = normalizedFinishReason;
421
+ }
422
+ }
423
+ }
424
+ if (buffer.trim().length > 0) {
425
+ const parsed = parseSseChunk(`${buffer}\n\n`);
426
+ for (const event of parsed.events) {
427
+ if (event === "[DONE]") {
428
+ continue;
429
+ }
430
+ const record = readRecord(event);
431
+ usage = extractOpenAIUsage(record) ?? usage;
432
+ }
433
+ }
434
+ if (reasoningId) {
435
+ yield {
436
+ type: "reasoning-end",
437
+ id: reasoningId,
438
+ };
439
+ }
440
+ if (finishReason &&
441
+ typeof finishReason === "object" &&
442
+ finishReason.unified === "tool-calls") {
443
+ for (const toolCall of toolCalls.values()) {
444
+ yield {
445
+ type: "tool-call",
446
+ toolCallId: toolCall.id,
447
+ toolName: toolCall.name,
448
+ input: toolCall.arguments,
449
+ };
450
+ }
451
+ }
452
+ yield {
453
+ type: "finish",
454
+ finishReason,
455
+ ...(usage ? { usage } : {}),
456
+ };
457
+ }
458
+ /**
459
+ * Convert the unified RuntimePromptMessage[] to the Responses API `input`
460
+ * array shape. Differences from Chat Completions:
461
+ * - System prompts go on the top-level `instructions` field, not inline.
462
+ * - Content parts use `input_text` / `output_text` discriminants instead
463
+ * of the Chat Completions plain-text shorthand.
464
+ * - Assistant tool calls become standalone `function_call` items in the
465
+ * input array, not nested `tool_calls` on a message.
466
+ * - Tool results become standalone `function_call_output` items.
467
+ * - Reasoning content parts roundtrip as `reasoning` items so callers can
468
+ * replay multi-turn conversations with chain-of-thought intact.
469
+ */
470
+ function toOpenAIResponsesInput(prompt) {
471
+ const instructionsParts = [];
472
+ const input = [];
473
+ for (const message of prompt) {
474
+ switch (message.role) {
475
+ case "system":
476
+ if (message.content.length > 0) {
477
+ instructionsParts.push(message.content);
478
+ }
479
+ break;
480
+ case "user":
481
+ input.push({
482
+ role: "user",
483
+ content: [{ type: "input_text", text: readTextParts(message.content) }],
484
+ });
485
+ break;
486
+ case "assistant": {
487
+ const messageContent = [];
488
+ for (const part of message.content) {
489
+ if (part.type === "text") {
490
+ messageContent.push({ type: "output_text", text: part.text });
491
+ continue;
492
+ }
493
+ if (part.type === "reasoning") {
494
+ // Reasoning items are top-level entries in the input array,
495
+ // not nested inside the assistant message — flush whatever
496
+ // text we've accumulated first, then push the reasoning item.
497
+ if (messageContent.length > 0) {
498
+ input.push({ role: "assistant", content: [...messageContent] });
499
+ messageContent.length = 0;
500
+ }
501
+ const summary = [];
502
+ if (typeof part.text === "string" && part.text.length > 0) {
503
+ summary.push({ type: "summary_text", text: part.text });
504
+ }
505
+ input.push({
506
+ type: "reasoning",
507
+ ...(typeof part.signature === "string" ? { encrypted_content: part.signature } : {}),
508
+ summary,
509
+ });
510
+ continue;
511
+ }
512
+ // tool-call: flush message content, then push as standalone
513
+ // function_call item per Responses API shape.
514
+ if (messageContent.length > 0) {
515
+ input.push({ role: "assistant", content: [...messageContent] });
516
+ messageContent.length = 0;
517
+ }
518
+ input.push({
519
+ type: "function_call",
520
+ call_id: part.toolCallId,
521
+ name: part.toolName,
522
+ arguments: stringifyJsonValue(part.input),
523
+ });
524
+ }
525
+ if (messageContent.length > 0) {
526
+ input.push({ role: "assistant", content: messageContent });
527
+ }
528
+ break;
529
+ }
530
+ case "tool":
531
+ for (const part of message.content) {
532
+ input.push({
533
+ type: "function_call_output",
534
+ call_id: part.toolCallId,
535
+ output: stringifyJsonValue(part.output.value),
536
+ });
537
+ }
538
+ break;
539
+ }
540
+ }
541
+ return {
542
+ ...(instructionsParts.length > 0 ? { instructions: instructionsParts.join("\n\n") } : {}),
543
+ input,
544
+ };
545
+ }
546
+ /**
547
+ * Tools on the Responses API differ from Chat Completions: instead of
548
+ * `{ type: "function", function: { name, parameters } }` the function
549
+ * shape lifts the name/parameters/strict to the top of the entry. Native
550
+ * tools (web_search, file_search, computer_use, code_interpreter) live
551
+ * alongside function tools in the same array.
552
+ */
553
+ function toOpenAIResponsesTools(tools) {
554
+ if (!tools)
555
+ return undefined;
556
+ const normalized = [];
557
+ for (const tool of tools) {
558
+ if (tool.type === "function") {
559
+ normalized.push({
560
+ type: "function",
561
+ name: tool.name,
562
+ ...(typeof tool.description === "string" ? { description: tool.description } : {}),
563
+ parameters: unwrapToolInputSchema(tool.inputSchema),
564
+ });
565
+ continue;
566
+ }
567
+ if (!tool.id.startsWith("openai."))
568
+ continue;
569
+ const providerType = tool.id.slice("openai.".length);
570
+ if (providerType.length === 0)
571
+ continue;
572
+ normalized.push({
573
+ type: providerType,
574
+ ...toSnakeCaseRecord(tool.args),
575
+ });
576
+ }
577
+ return normalized.length > 0 ? normalized : undefined;
578
+ }
579
+ function buildOpenAIResponsesRequest(modelId, providerName, options, stream, warnings) {
580
+ const isReasoningModel = isOpenAIReasoningModel(modelId);
581
+ const reasoningEffort = resolveOpenAIReasoningEffort(options.reasoning);
582
+ const reasoningEnabled = isReasoningModel || reasoningEffort !== undefined;
583
+ // Same param-sanitization rules as Chat Completions: reasoning models
584
+ // reject sampling params. Drop with a warning.
585
+ if (options.topK !== undefined) {
586
+ warnings.push({
587
+ type: "unsupported-setting",
588
+ provider: "openai",
589
+ setting: "topK",
590
+ details: "OpenAI Responses API does not expose top_k; the value was dropped.",
591
+ });
592
+ }
593
+ if (reasoningEnabled) {
594
+ const dropped = [
595
+ ["temperature", "temperature"],
596
+ ["topP", "top_p"],
597
+ ["presencePenalty", "presence_penalty"],
598
+ ["frequencyPenalty", "frequency_penalty"],
599
+ ];
600
+ for (const [key, openaiName] of dropped) {
601
+ if (options[key] !== undefined) {
602
+ warnings.push({
603
+ type: "unsupported-setting",
604
+ provider: "openai",
605
+ setting: key,
606
+ details: `Dropped because OpenAI reasoning models reject ${openaiName}. Reasoning was active for this request.`,
607
+ });
608
+ }
609
+ }
610
+ }
611
+ const { instructions, input } = toOpenAIResponsesInput(options.prompt);
612
+ const responsesTools = toOpenAIResponsesTools(options.tools);
613
+ const body = {
614
+ model: modelId,
615
+ input,
616
+ ...(instructions !== undefined ? { instructions } : {}),
617
+ ...(stream ? { stream: true } : {}),
618
+ ...(options.maxOutputTokens !== undefined
619
+ ? { max_output_tokens: options.maxOutputTokens }
620
+ : {}),
621
+ ...(!reasoningEnabled && options.temperature !== undefined
622
+ ? { temperature: options.temperature }
623
+ : {}),
624
+ ...(!reasoningEnabled && options.topP !== undefined ? { top_p: options.topP } : {}),
625
+ ...(responsesTools ? { tools: responsesTools } : {}),
626
+ ...(options.toolChoice !== undefined ? { tool_choice: options.toolChoice } : {}),
627
+ // The Responses API surfaces reasoning effort + summary verbosity
628
+ // in a structured `reasoning` object instead of a flat field.
629
+ ...(reasoningEffort !== undefined
630
+ ? { reasoning: { effort: reasoningEffort, summary: "auto" } }
631
+ : {}),
632
+ ...(typeof options.userId === "string" && options.userId.length > 0
633
+ ? { user: options.userId }
634
+ : {}),
635
+ ...(options.serviceTier !== undefined ? { service_tier: options.serviceTier } : {}),
636
+ ...(options.parallelToolCalls !== undefined
637
+ ? { parallel_tool_calls: options.parallelToolCalls }
638
+ : {}),
639
+ // Responses API uses `text.format` instead of Chat Completions'
640
+ // `response_format`. The shape is similar but nested under `text`.
641
+ ...(options.responseFormat && options.responseFormat.type !== "text"
642
+ ? {
643
+ text: {
644
+ format: options.responseFormat.type === "json" ? { type: "json_object" } : {
645
+ type: "json_schema",
646
+ name: options.responseFormat.name,
647
+ ...(typeof options.responseFormat.description === "string"
648
+ ? { description: options.responseFormat.description }
649
+ : {}),
650
+ schema: unwrapToolInputSchema(options.responseFormat.schema),
651
+ ...(options.responseFormat.strict !== undefined
652
+ ? { strict: options.responseFormat.strict }
653
+ : {}),
654
+ },
655
+ },
656
+ }
657
+ : {}),
658
+ };
659
+ Object.assign(body, readProviderOptions(options.providerOptions, "openai", providerName));
660
+ return body;
661
+ }
662
+ /**
663
+ * The Responses API uses `input_tokens` / `output_tokens` field names
664
+ * instead of Chat Completions' `prompt_tokens` / `completion_tokens`.
665
+ */
666
+ function extractOpenAIResponsesUsage(payload) {
667
+ const record = readRecord(payload);
668
+ // Streaming usage lives on response.completed inside `response.usage`;
669
+ // non-streaming has it at the top level.
670
+ const responseRecord = readRecord(record?.response);
671
+ const usage = readRecord(responseRecord?.usage) ?? readRecord(record?.usage);
672
+ if (!usage)
673
+ return undefined;
674
+ const inputTokens = typeof usage.input_tokens === "number" ? usage.input_tokens : undefined;
675
+ const outputTokens = typeof usage.output_tokens === "number" ? usage.output_tokens : undefined;
676
+ const totalTokens = typeof usage.total_tokens === "number"
677
+ ? usage.total_tokens
678
+ : (inputTokens !== undefined || outputTokens !== undefined
679
+ ? (inputTokens ?? 0) + (outputTokens ?? 0)
680
+ : undefined);
681
+ const inputDetails = readRecord(usage.input_tokens_details);
682
+ const cachedTokens = inputDetails?.cached_tokens;
683
+ return {
684
+ inputTokens,
685
+ outputTokens,
686
+ totalTokens,
687
+ ...(typeof cachedTokens === "number" ? { cacheReadInputTokens: cachedTokens } : {}),
688
+ };
689
+ }
690
+ function normalizeOpenAIResponsesFinishReason(raw) {
691
+ if (typeof raw !== "string")
692
+ return null;
693
+ switch (raw) {
694
+ case "completed":
695
+ return { unified: "stop", raw };
696
+ case "incomplete":
697
+ return { unified: "length", raw };
698
+ case "failed":
699
+ return { unified: "error", raw };
700
+ case "in_progress":
701
+ return null;
702
+ default:
703
+ return raw;
704
+ }
705
+ }
706
+ function buildOpenAIResponsesGenerateResult(payload) {
707
+ const record = readRecord(payload);
708
+ const output = Array.isArray(record?.output) ? record.output : [];
709
+ const content = [];
710
+ for (const item of output) {
711
+ const itemRecord = readRecord(item);
712
+ const itemType = typeof itemRecord?.type === "string" ? itemRecord.type : undefined;
713
+ if (itemType === "message" && Array.isArray(itemRecord?.content)) {
714
+ // A message item bundles one or more output_text parts.
715
+ let text = "";
716
+ for (const part of itemRecord.content) {
717
+ const p = readRecord(part);
718
+ if (typeof p?.type === "string" && p.type === "output_text" && typeof p.text === "string") {
719
+ text += p.text;
720
+ }
721
+ }
722
+ if (text.length > 0) {
723
+ content.push({ type: "text", text });
724
+ }
725
+ continue;
726
+ }
727
+ if (itemType === "function_call") {
728
+ content.push({
729
+ type: "tool-call",
730
+ toolCallId: typeof itemRecord?.call_id === "string"
731
+ ? itemRecord.call_id
732
+ : (typeof itemRecord?.id === "string" ? itemRecord.id : ""),
733
+ toolName: typeof itemRecord?.name === "string" ? itemRecord.name : "",
734
+ input: typeof itemRecord?.arguments === "string"
735
+ ? itemRecord.arguments
736
+ : stringifyJsonValue(itemRecord?.arguments ?? {}),
737
+ });
738
+ continue;
739
+ }
740
+ if (itemType === "reasoning") {
741
+ const summary = Array.isArray(itemRecord?.summary) ? itemRecord.summary : [];
742
+ const summaries = [];
743
+ for (const s of summary) {
744
+ const sr = readRecord(s);
745
+ if (typeof sr?.text === "string" && sr.text.length > 0) {
746
+ summaries.push({
747
+ ...(typeof sr?.id === "string" ? { id: sr.id } : {}),
748
+ text: sr.text,
749
+ });
750
+ }
751
+ }
752
+ content.push({
753
+ type: "reasoning",
754
+ ...(summaries.length > 0 ? { summaries } : {}),
755
+ ...(typeof itemRecord?.encrypted_content === "string"
756
+ ? { signature: itemRecord.encrypted_content }
757
+ : {}),
758
+ });
759
+ continue;
760
+ }
761
+ }
762
+ return {
763
+ content,
764
+ finishReason: normalizeOpenAIResponsesFinishReason(record?.status),
765
+ usage: extractOpenAIResponsesUsage(payload),
766
+ };
767
+ }
768
+ /**
769
+ * Parse the Responses API streaming event grammar into the same UI part
770
+ * shapes the existing OpenAI / Anthropic / Google streams emit.
771
+ */
772
+ async function* streamOpenAIResponsesParts(stream) {
773
+ const decoder = new TextDecoder();
774
+ let buffer = "";
775
+ const reasoningBlocks = new Map();
776
+ const functionCalls = new Map();
777
+ const startedToolCalls = new Set();
778
+ let finishReason = null;
779
+ let usage;
780
+ let reasoningCounter = 0;
781
+ for await (const chunk of stream) {
782
+ buffer += decoder.decode(chunk, { stream: true });
783
+ const parsed = parseSseChunk(buffer);
784
+ buffer = parsed.remainder;
785
+ for (const event of parsed.events) {
786
+ if (event === "[DONE]")
787
+ continue;
788
+ const record = readRecord(event);
789
+ const type = typeof record?.type === "string" ? record.type : undefined;
790
+ if (!type)
791
+ continue;
792
+ // response.output_item.added: a new output item begins.
793
+ if (type === "response.output_item.added") {
794
+ const item = readRecord(record?.item);
795
+ const itemType = typeof item?.type === "string" ? item.type : undefined;
796
+ const itemId = typeof item?.id === "string" ? item.id : undefined;
797
+ if (itemType === "function_call" && itemId) {
798
+ const callId = typeof item?.call_id === "string" ? item.call_id : itemId;
799
+ const name = typeof item?.name === "string" ? item.name : "";
800
+ functionCalls.set(itemId, {
801
+ id: itemId,
802
+ toolCallId: callId,
803
+ name,
804
+ arguments: "",
805
+ });
806
+ }
807
+ if (itemType === "reasoning" && itemId) {
808
+ reasoningBlocks.set(itemId, {
809
+ id: `reasoning-${reasoningCounter++}`,
810
+ emittedStart: false,
811
+ });
812
+ }
813
+ continue;
814
+ }
815
+ // response.output_text.delta: text chunk for a message item.
816
+ if (type === "response.output_text.delta" && typeof record?.delta === "string") {
817
+ if (record.delta.length > 0) {
818
+ yield { type: "text-delta", delta: record.delta };
819
+ }
820
+ continue;
821
+ }
822
+ // response.reasoning_summary_text.delta: reasoning summary text chunk.
823
+ if (type === "response.reasoning_summary_text.delta" && typeof record?.delta === "string") {
824
+ const itemId = typeof record?.item_id === "string" ? record.item_id : undefined;
825
+ const state = itemId ? reasoningBlocks.get(itemId) : undefined;
826
+ if (state && record.delta.length > 0) {
827
+ if (!state.emittedStart) {
828
+ yield { type: "reasoning-start", id: state.id };
829
+ state.emittedStart = true;
830
+ }
831
+ yield { type: "reasoning-delta", id: state.id, delta: record.delta };
832
+ }
833
+ continue;
834
+ }
835
+ // response.function_call_arguments.delta: tool call argument chunk.
836
+ if (type === "response.function_call_arguments.delta" && typeof record?.delta === "string") {
837
+ const itemId = typeof record?.item_id === "string" ? record.item_id : undefined;
838
+ const state = itemId ? functionCalls.get(itemId) : undefined;
839
+ if (state && record.delta.length > 0) {
840
+ if (!startedToolCalls.has(state.id)) {
841
+ yield {
842
+ type: "tool-input-start",
843
+ id: state.toolCallId,
844
+ toolName: state.name,
845
+ };
846
+ startedToolCalls.add(state.id);
847
+ }
848
+ state.arguments += record.delta;
849
+ yield {
850
+ type: "tool-input-delta",
851
+ id: state.toolCallId,
852
+ delta: record.delta,
853
+ };
854
+ }
855
+ continue;
856
+ }
857
+ // response.output_item.done: an item has finished emitting deltas.
858
+ if (type === "response.output_item.done") {
859
+ const item = readRecord(record?.item);
860
+ const itemType = typeof item?.type === "string" ? item.type : undefined;
861
+ const itemId = typeof item?.id === "string" ? item.id : undefined;
862
+ if (itemType === "reasoning" && itemId) {
863
+ const state = reasoningBlocks.get(itemId);
864
+ if (state?.emittedStart) {
865
+ yield { type: "reasoning-end", id: state.id };
866
+ }
867
+ reasoningBlocks.delete(itemId);
868
+ }
869
+ if (itemType === "function_call" && itemId) {
870
+ const state = functionCalls.get(itemId);
871
+ if (state) {
872
+ yield {
873
+ type: "tool-call",
874
+ toolCallId: state.toolCallId,
875
+ toolName: state.name,
876
+ input: state.arguments,
877
+ };
878
+ }
879
+ functionCalls.delete(itemId);
880
+ }
881
+ continue;
882
+ }
883
+ // response.completed: terminal event with the final response object.
884
+ if (type === "response.completed") {
885
+ usage = extractOpenAIResponsesUsage(record) ?? usage;
886
+ const responseRecord = readRecord(record?.response);
887
+ finishReason = normalizeOpenAIResponsesFinishReason(responseRecord?.status);
888
+ continue;
889
+ }
890
+ if (type === "response.failed" || type === "response.incomplete") {
891
+ const responseRecord = readRecord(record?.response);
892
+ finishReason = normalizeOpenAIResponsesFinishReason(responseRecord?.status) ??
893
+ (type === "response.failed"
894
+ ? { unified: "error", raw: "failed" }
895
+ : { unified: "length", raw: "incomplete" });
896
+ usage = extractOpenAIResponsesUsage(record) ?? usage;
897
+ continue;
898
+ }
899
+ }
900
+ }
901
+ // Close any reasoning streams still open at end-of-stream (defensive).
902
+ for (const state of reasoningBlocks.values()) {
903
+ if (state.emittedStart) {
904
+ yield { type: "reasoning-end", id: state.id };
905
+ }
906
+ }
907
+ yield {
908
+ type: "finish",
909
+ finishReason,
910
+ ...(usage ? { usage } : {}),
911
+ };
912
+ }
913
+ // ---------------------------------------------------------------------------
914
+ // Public factory functions
915
+ // ---------------------------------------------------------------------------
916
+ export function createOpenAIModelRuntime(config, modelId) {
917
+ const fetchImpl = config.fetch ?? globalThis.fetch;
918
+ return {
919
+ provider: config.name ?? "openai",
920
+ modelId,
921
+ specificationVersion: "v3",
922
+ supportedUrls: {},
923
+ doGenerate(optionsForRuntime) {
924
+ const options = optionsForRuntime;
925
+ const url = getOpenAIChatCompletionsUrl(config.baseURL);
926
+ const warnings = createWarningCollector();
927
+ const body = buildOpenAIChatRequest(modelId, config.name ?? "openai", options, false, warnings);
928
+ return requestJson({
929
+ url,
930
+ fetchImpl,
931
+ providerLabel: config.name ?? "openai",
932
+ providerKind: "openai",
933
+ init: createOpenAIRequestInit({
934
+ apiKey: config.apiKey,
935
+ extraHeaders: options.headers,
936
+ body: JSON.stringify(body),
937
+ signal: options.abortSignal,
938
+ }),
939
+ }).then((payload) => {
940
+ const drained = warnings.drain();
941
+ return {
942
+ ...buildOpenAIGenerateResult(payload),
943
+ ...(drained.length > 0 ? { warnings: drained } : {}),
944
+ };
945
+ });
946
+ },
947
+ doStream(optionsForRuntime) {
948
+ const options = optionsForRuntime;
949
+ const url = getOpenAIChatCompletionsUrl(config.baseURL);
950
+ const warnings = createWarningCollector();
951
+ const body = buildOpenAIChatRequest(modelId, config.name ?? "openai", options, true, warnings);
952
+ return requestStream({
953
+ url,
954
+ fetchImpl,
955
+ providerLabel: config.name ?? "openai",
956
+ providerKind: "openai",
957
+ init: createOpenAIRequestInit({
958
+ apiKey: config.apiKey,
959
+ extraHeaders: options.headers,
960
+ body: JSON.stringify(body),
961
+ signal: options.abortSignal,
962
+ }),
963
+ }).then((responseStream) => {
964
+ const drained = warnings.drain();
965
+ return {
966
+ stream: ReadableStream.from(withToolInputStatusTransitions(streamOpenAICompatibleParts(responseStream))),
967
+ ...(drained.length > 0 ? { warnings: drained } : {}),
968
+ };
969
+ });
970
+ },
971
+ };
972
+ }
973
+ export function createOpenAIResponsesRuntime(config, modelId) {
974
+ const fetchImpl = config.fetch ?? globalThis.fetch;
975
+ return {
976
+ provider: config.name ?? "openai",
977
+ modelId,
978
+ specificationVersion: "v3",
979
+ supportedUrls: {},
980
+ doGenerate(optionsForRuntime) {
981
+ const options = optionsForRuntime;
982
+ const url = getOpenAIResponsesUrl(config.baseURL);
983
+ const warnings = createWarningCollector();
984
+ const body = buildOpenAIResponsesRequest(modelId, config.name ?? "openai", options, false, warnings);
985
+ return requestJson({
986
+ url,
987
+ fetchImpl,
988
+ providerLabel: config.name ?? "openai",
989
+ providerKind: "openai",
990
+ init: createOpenAIRequestInit({
991
+ apiKey: config.apiKey,
992
+ extraHeaders: options.headers,
993
+ body: JSON.stringify(body),
994
+ signal: options.abortSignal,
995
+ }),
996
+ }).then((payload) => {
997
+ const drained = warnings.drain();
998
+ return {
999
+ ...buildOpenAIResponsesGenerateResult(payload),
1000
+ ...(drained.length > 0 ? { warnings: drained } : {}),
1001
+ };
1002
+ });
1003
+ },
1004
+ doStream(optionsForRuntime) {
1005
+ const options = optionsForRuntime;
1006
+ const url = getOpenAIResponsesUrl(config.baseURL);
1007
+ const warnings = createWarningCollector();
1008
+ const body = buildOpenAIResponsesRequest(modelId, config.name ?? "openai", options, true, warnings);
1009
+ return requestStream({
1010
+ url,
1011
+ fetchImpl,
1012
+ providerLabel: config.name ?? "openai",
1013
+ providerKind: "openai",
1014
+ init: createOpenAIRequestInit({
1015
+ apiKey: config.apiKey,
1016
+ extraHeaders: options.headers,
1017
+ body: JSON.stringify(body),
1018
+ signal: options.abortSignal,
1019
+ }),
1020
+ }).then((responseStream) => {
1021
+ const drained = warnings.drain();
1022
+ return {
1023
+ stream: ReadableStream.from(withToolInputStatusTransitions(streamOpenAIResponsesParts(responseStream))),
1024
+ ...(drained.length > 0 ? { warnings: drained } : {}),
1025
+ };
1026
+ });
1027
+ },
1028
+ };
1029
+ }
1030
+ export function createOpenAIEmbeddingRuntime(config, modelId) {
1031
+ const fetchImpl = config.fetch ?? globalThis.fetch;
1032
+ return {
1033
+ provider: config.name ?? "openai",
1034
+ modelId,
1035
+ supportsParallelCalls: true,
1036
+ doEmbed({ values, abortSignal }) {
1037
+ if (values.length === 0) {
1038
+ return Promise.resolve({
1039
+ embeddings: [],
1040
+ warnings: [],
1041
+ rawResponse: { data: [] },
1042
+ });
1043
+ }
1044
+ const url = getOpenAIEmbeddingUrl(config.baseURL);
1045
+ return requestJson({
1046
+ url,
1047
+ fetchImpl,
1048
+ providerLabel: config.name ?? "openai",
1049
+ providerKind: "openai",
1050
+ init: createOpenAIRequestInit({
1051
+ apiKey: config.apiKey,
1052
+ body: JSON.stringify({
1053
+ model: modelId,
1054
+ input: values,
1055
+ }),
1056
+ signal: abortSignal,
1057
+ }),
1058
+ }).then((payload) => ({
1059
+ embeddings: extractOpenAIEmbeddings(payload),
1060
+ usage: {
1061
+ tokens: extractOpenAIUsageTokens(payload),
1062
+ },
1063
+ rawResponse: payload,
1064
+ warnings: [],
1065
+ }));
1066
+ },
1067
+ };
1068
+ }
1069
+ export class OpenAIProvider {
1070
+ id = "openai";
1071
+ createModel(modelId, config) {
1072
+ return createOpenAIModelRuntime({
1073
+ apiKey: config.credential,
1074
+ baseURL: config.baseURL,
1075
+ name: config.name ?? "openai",
1076
+ fetch: config.fetch,
1077
+ }, modelId);
1078
+ }
1079
+ createEmbedding(modelId, config) {
1080
+ return createOpenAIEmbeddingRuntime({
1081
+ apiKey: config.credential,
1082
+ baseURL: config.baseURL,
1083
+ name: config.name ?? "openai",
1084
+ fetch: config.fetch,
1085
+ }, modelId);
1086
+ }
1087
+ createResponses(modelId, config) {
1088
+ return createOpenAIResponsesRuntime({
1089
+ apiKey: config.credential,
1090
+ baseURL: config.baseURL,
1091
+ name: config.name ?? "openai",
1092
+ fetch: config.fetch,
1093
+ }, modelId);
1094
+ }
1095
+ }