@ai-sdk/openai 2.0.45 → 2.0.47
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +2 -2
- package/dist/index.d.ts +2 -2
- package/dist/index.js +25 -5
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +25 -5
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +1 -1
- package/dist/internal/index.d.ts +1 -1
- package/dist/internal/index.js +24 -4
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +24 -4
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/internal/index.mjs
CHANGED
|
@@ -445,13 +445,15 @@ var openaiChatLanguageModelOptions = lazyValidator2(
|
|
|
445
445
|
structuredOutputs: z3.boolean().optional(),
|
|
446
446
|
/**
|
|
447
447
|
* Service tier for the request.
|
|
448
|
-
* - 'auto': Default service tier
|
|
448
|
+
* - 'auto': Default service tier. The request will be processed with the service tier configured in the
|
|
449
|
+
* Project settings. Unless otherwise configured, the Project will use 'default'.
|
|
449
450
|
* - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.
|
|
450
451
|
* - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.
|
|
452
|
+
* - 'default': The request will be processed with the standard pricing and performance for the selected model.
|
|
451
453
|
*
|
|
452
454
|
* @default 'auto'
|
|
453
455
|
*/
|
|
454
|
-
serviceTier: z3.enum(["auto", "flex", "priority"]).optional(),
|
|
456
|
+
serviceTier: z3.enum(["auto", "flex", "priority", "default"]).optional(),
|
|
455
457
|
/**
|
|
456
458
|
* Whether to use strict JSON schema validation.
|
|
457
459
|
*
|
|
@@ -2415,11 +2417,29 @@ async function convertToOpenAIResponsesInput({
|
|
|
2415
2417
|
case "error-text":
|
|
2416
2418
|
contentValue = output.value;
|
|
2417
2419
|
break;
|
|
2418
|
-
case "content":
|
|
2419
2420
|
case "json":
|
|
2420
2421
|
case "error-json":
|
|
2421
2422
|
contentValue = JSON.stringify(output.value);
|
|
2422
2423
|
break;
|
|
2424
|
+
case "content":
|
|
2425
|
+
contentValue = output.value.map((item) => {
|
|
2426
|
+
switch (item.type) {
|
|
2427
|
+
case "text": {
|
|
2428
|
+
return { type: "input_text", text: item.text };
|
|
2429
|
+
}
|
|
2430
|
+
case "media": {
|
|
2431
|
+
return item.mediaType.startsWith("image/") ? {
|
|
2432
|
+
type: "input_image",
|
|
2433
|
+
image_url: `data:${item.mediaType};base64,${item.data}`
|
|
2434
|
+
} : {
|
|
2435
|
+
type: "input_file",
|
|
2436
|
+
filename: "data",
|
|
2437
|
+
file_data: `data:${item.mediaType};base64,${item.data}`
|
|
2438
|
+
};
|
|
2439
|
+
}
|
|
2440
|
+
}
|
|
2441
|
+
});
|
|
2442
|
+
break;
|
|
2423
2443
|
}
|
|
2424
2444
|
input.push({
|
|
2425
2445
|
type: "function_call_output",
|
|
@@ -2986,7 +3006,7 @@ var openaiResponsesProviderOptionsSchema = lazyValidator12(
|
|
|
2986
3006
|
reasoningEffort: z15.string().nullish(),
|
|
2987
3007
|
reasoningSummary: z15.string().nullish(),
|
|
2988
3008
|
safetyIdentifier: z15.string().nullish(),
|
|
2989
|
-
serviceTier: z15.enum(["auto", "flex", "priority"]).nullish(),
|
|
3009
|
+
serviceTier: z15.enum(["auto", "flex", "priority", "default"]).nullish(),
|
|
2990
3010
|
store: z15.boolean().nullish(),
|
|
2991
3011
|
strictJsonSchema: z15.boolean().nullish(),
|
|
2992
3012
|
textVerbosity: z15.enum(["low", "medium", "high"]).nullish(),
|