@ai-sdk/openai 3.0.0-beta.74 → 3.0.0-beta.75
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +6 -0
- package/dist/index.d.mts +0 -1
- package/dist/index.d.ts +0 -1
- package/dist/index.js +8 -24
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +8 -24
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +0 -1
- package/dist/internal/index.d.ts +0 -1
- package/dist/internal/index.js +7 -23
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +7 -23
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/internal/index.mjs
CHANGED
|
@@ -444,12 +444,6 @@ var openaiChatLanguageModelOptions = lazySchema2(
|
|
|
444
444
|
* Parameters for prediction mode.
|
|
445
445
|
*/
|
|
446
446
|
prediction: z3.record(z3.string(), z3.any()).optional(),
|
|
447
|
-
/**
|
|
448
|
-
* Whether to use structured outputs.
|
|
449
|
-
*
|
|
450
|
-
* @default true
|
|
451
|
-
*/
|
|
452
|
-
structuredOutputs: z3.boolean().optional(),
|
|
453
447
|
/**
|
|
454
448
|
* Service tier for the request.
|
|
455
449
|
* - 'auto': Default service tier. The request will be processed with the service tier configured in the
|
|
@@ -464,7 +458,7 @@ var openaiChatLanguageModelOptions = lazySchema2(
|
|
|
464
458
|
/**
|
|
465
459
|
* Whether to use strict JSON schema validation.
|
|
466
460
|
*
|
|
467
|
-
* @default
|
|
461
|
+
* @default true
|
|
468
462
|
*/
|
|
469
463
|
strictJsonSchema: z3.boolean().optional(),
|
|
470
464
|
/**
|
|
@@ -505,7 +499,6 @@ import {
|
|
|
505
499
|
function prepareChatTools({
|
|
506
500
|
tools,
|
|
507
501
|
toolChoice,
|
|
508
|
-
structuredOutputs,
|
|
509
502
|
strictJsonSchema
|
|
510
503
|
}) {
|
|
511
504
|
tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
|
|
@@ -523,7 +516,7 @@ function prepareChatTools({
|
|
|
523
516
|
name: tool.name,
|
|
524
517
|
description: tool.description,
|
|
525
518
|
parameters: tool.inputSchema,
|
|
526
|
-
strict:
|
|
519
|
+
strict: strictJsonSchema
|
|
527
520
|
}
|
|
528
521
|
});
|
|
529
522
|
break;
|
|
@@ -592,24 +585,16 @@ var OpenAIChatLanguageModel = class {
|
|
|
592
585
|
toolChoice,
|
|
593
586
|
providerOptions
|
|
594
587
|
}) {
|
|
595
|
-
var _a, _b, _c
|
|
588
|
+
var _a, _b, _c;
|
|
596
589
|
const warnings = [];
|
|
597
590
|
const openaiOptions = (_a = await parseProviderOptions({
|
|
598
591
|
provider: "openai",
|
|
599
592
|
providerOptions,
|
|
600
593
|
schema: openaiChatLanguageModelOptions
|
|
601
594
|
})) != null ? _a : {};
|
|
602
|
-
const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
|
|
603
595
|
if (topK != null) {
|
|
604
596
|
warnings.push({ type: "unsupported", feature: "topK" });
|
|
605
597
|
}
|
|
606
|
-
if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
|
|
607
|
-
warnings.push({
|
|
608
|
-
type: "unsupported",
|
|
609
|
-
feature: "responseFormat",
|
|
610
|
-
details: "JSON response format schema is only supported with structuredOutputs"
|
|
611
|
-
});
|
|
612
|
-
}
|
|
613
598
|
const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
|
|
614
599
|
{
|
|
615
600
|
prompt,
|
|
@@ -617,7 +602,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
617
602
|
}
|
|
618
603
|
);
|
|
619
604
|
warnings.push(...messageWarnings);
|
|
620
|
-
const strictJsonSchema = (
|
|
605
|
+
const strictJsonSchema = (_b = openaiOptions.strictJsonSchema) != null ? _b : true;
|
|
621
606
|
const baseArgs = {
|
|
622
607
|
// model id:
|
|
623
608
|
model: this.modelId,
|
|
@@ -633,12 +618,12 @@ var OpenAIChatLanguageModel = class {
|
|
|
633
618
|
top_p: topP,
|
|
634
619
|
frequency_penalty: frequencyPenalty,
|
|
635
620
|
presence_penalty: presencePenalty,
|
|
636
|
-
response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ?
|
|
621
|
+
response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? responseFormat.schema != null ? {
|
|
637
622
|
type: "json_schema",
|
|
638
623
|
json_schema: {
|
|
639
624
|
schema: responseFormat.schema,
|
|
640
625
|
strict: strictJsonSchema,
|
|
641
|
-
name: (
|
|
626
|
+
name: (_c = responseFormat.name) != null ? _c : "response",
|
|
642
627
|
description: responseFormat.description
|
|
643
628
|
}
|
|
644
629
|
} : { type: "json_object" } : void 0,
|
|
@@ -752,7 +737,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
752
737
|
} = prepareChatTools({
|
|
753
738
|
tools,
|
|
754
739
|
toolChoice,
|
|
755
|
-
structuredOutputs,
|
|
756
740
|
strictJsonSchema
|
|
757
741
|
});
|
|
758
742
|
return {
|
|
@@ -3920,7 +3904,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3920
3904
|
hasApplyPatchTool: hasOpenAITool("openai.apply_patch")
|
|
3921
3905
|
});
|
|
3922
3906
|
warnings.push(...inputWarnings);
|
|
3923
|
-
const strictJsonSchema = (_b = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _b :
|
|
3907
|
+
const strictJsonSchema = (_b = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _b : true;
|
|
3924
3908
|
let include = openaiOptions == null ? void 0 : openaiOptions.include;
|
|
3925
3909
|
function addInclude(key) {
|
|
3926
3910
|
if (include == null) {
|