@ai-sdk/openai 2.0.0-canary.10 → 2.0.0-canary.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +23 -0
- package/dist/index.d.mts +11 -50
- package/dist/index.d.ts +11 -50
- package/dist/index.js +273 -301
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +278 -305
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +30 -39
- package/dist/internal/index.d.ts +30 -39
- package/dist/internal/index.js +273 -294
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +276 -298
- package/dist/internal/index.mjs.map +1 -1
- package/internal.d.ts +1 -0
- package/package.json +5 -4
package/dist/index.js
CHANGED
|
@@ -211,19 +211,6 @@ function getResponseMetadata({
|
|
|
211
211
|
};
|
|
212
212
|
}
|
|
213
213
|
|
|
214
|
-
// src/map-openai-chat-logprobs.ts
|
|
215
|
-
function mapOpenAIChatLogProbsOutput(logprobs) {
|
|
216
|
-
var _a, _b;
|
|
217
|
-
return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
|
|
218
|
-
token,
|
|
219
|
-
logprob,
|
|
220
|
-
topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
|
|
221
|
-
token: token2,
|
|
222
|
-
logprob: logprob2
|
|
223
|
-
})) : []
|
|
224
|
-
}))) != null ? _b : void 0;
|
|
225
|
-
}
|
|
226
|
-
|
|
227
214
|
// src/map-openai-finish-reason.ts
|
|
228
215
|
function mapOpenAIFinishReason(finishReason) {
|
|
229
216
|
switch (finishReason) {
|
|
@@ -251,16 +238,6 @@ var openaiProviderOptions = import_zod.z.object({
|
|
|
251
238
|
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
252
239
|
*/
|
|
253
240
|
logitBias: import_zod.z.record(import_zod.z.coerce.number(), import_zod.z.number()).optional(),
|
|
254
|
-
/**
|
|
255
|
-
* Return the log probabilities of the tokens.
|
|
256
|
-
*
|
|
257
|
-
* Setting to true will return the log probabilities of the tokens that
|
|
258
|
-
* were generated.
|
|
259
|
-
*
|
|
260
|
-
* Setting to a number will return the log probabilities of the top n
|
|
261
|
-
* tokens that were generated.
|
|
262
|
-
*/
|
|
263
|
-
logprobs: import_zod.z.union([import_zod.z.boolean(), import_zod.z.number()]).optional(),
|
|
264
241
|
/**
|
|
265
242
|
* Whether to enable parallel function calling during tool use. Default to true.
|
|
266
243
|
*/
|
|
@@ -384,7 +361,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
384
361
|
"image/*": [/^https?:\/\/.*$/]
|
|
385
362
|
};
|
|
386
363
|
}
|
|
387
|
-
getArgs({
|
|
364
|
+
async getArgs({
|
|
388
365
|
prompt,
|
|
389
366
|
maxOutputTokens,
|
|
390
367
|
temperature,
|
|
@@ -401,7 +378,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
401
378
|
}) {
|
|
402
379
|
var _a, _b, _c;
|
|
403
380
|
const warnings = [];
|
|
404
|
-
const openaiOptions = (_a = (0, import_provider_utils3.parseProviderOptions)({
|
|
381
|
+
const openaiOptions = (_a = await (0, import_provider_utils3.parseProviderOptions)({
|
|
405
382
|
provider: "openai",
|
|
406
383
|
providerOptions,
|
|
407
384
|
schema: openaiProviderOptions
|
|
@@ -431,8 +408,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
431
408
|
model: this.modelId,
|
|
432
409
|
// model specific settings:
|
|
433
410
|
logit_bias: openaiOptions.logitBias,
|
|
434
|
-
logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
|
|
435
|
-
top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
|
|
436
411
|
user: openaiOptions.user,
|
|
437
412
|
parallel_tool_calls: openaiOptions.parallelToolCalls,
|
|
438
413
|
// standardized settings:
|
|
@@ -505,20 +480,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
505
480
|
message: "logitBias is not supported for reasoning models"
|
|
506
481
|
});
|
|
507
482
|
}
|
|
508
|
-
if (baseArgs.logprobs != null) {
|
|
509
|
-
baseArgs.logprobs = void 0;
|
|
510
|
-
warnings.push({
|
|
511
|
-
type: "other",
|
|
512
|
-
message: "logprobs is not supported for reasoning models"
|
|
513
|
-
});
|
|
514
|
-
}
|
|
515
|
-
if (baseArgs.top_logprobs != null) {
|
|
516
|
-
baseArgs.top_logprobs = void 0;
|
|
517
|
-
warnings.push({
|
|
518
|
-
type: "other",
|
|
519
|
-
message: "topLogprobs is not supported for reasoning models"
|
|
520
|
-
});
|
|
521
|
-
}
|
|
522
483
|
if (baseArgs.max_tokens != null) {
|
|
523
484
|
if (baseArgs.max_completion_tokens == null) {
|
|
524
485
|
baseArgs.max_completion_tokens = baseArgs.max_tokens;
|
|
@@ -555,7 +516,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
555
516
|
}
|
|
556
517
|
async doGenerate(options) {
|
|
557
518
|
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
558
|
-
const { args: body, warnings } = this.getArgs(options);
|
|
519
|
+
const { args: body, warnings } = await this.getArgs(options);
|
|
559
520
|
const {
|
|
560
521
|
responseHeaders,
|
|
561
522
|
value: response,
|
|
@@ -618,12 +579,11 @@ var OpenAIChatLanguageModel = class {
|
|
|
618
579
|
body: rawResponse
|
|
619
580
|
},
|
|
620
581
|
warnings,
|
|
621
|
-
logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
|
|
622
582
|
providerMetadata
|
|
623
583
|
};
|
|
624
584
|
}
|
|
625
585
|
async doStream(options) {
|
|
626
|
-
const { args, warnings } = this.getArgs(options);
|
|
586
|
+
const { args, warnings } = await this.getArgs(options);
|
|
627
587
|
const body = {
|
|
628
588
|
...args,
|
|
629
589
|
stream: true,
|
|
@@ -651,7 +611,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
651
611
|
inputTokens: void 0,
|
|
652
612
|
outputTokens: void 0
|
|
653
613
|
};
|
|
654
|
-
let logprobs;
|
|
655
614
|
let isFirstChunk = true;
|
|
656
615
|
const providerMetadata = { openai: {} };
|
|
657
616
|
return {
|
|
@@ -716,13 +675,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
716
675
|
text: delta.content
|
|
717
676
|
});
|
|
718
677
|
}
|
|
719
|
-
const mappedLogprobs = mapOpenAIChatLogProbsOutput(
|
|
720
|
-
choice == null ? void 0 : choice.logprobs
|
|
721
|
-
);
|
|
722
|
-
if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
|
|
723
|
-
if (logprobs === void 0) logprobs = [];
|
|
724
|
-
logprobs.push(...mappedLogprobs);
|
|
725
|
-
}
|
|
726
678
|
if (delta.tool_calls != null) {
|
|
727
679
|
for (const toolCallDelta of delta.tool_calls) {
|
|
728
680
|
const index = toolCallDelta.index;
|
|
@@ -809,7 +761,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
809
761
|
controller.enqueue({
|
|
810
762
|
type: "finish",
|
|
811
763
|
finishReason,
|
|
812
|
-
logprobs,
|
|
813
764
|
usage,
|
|
814
765
|
...providerMetadata != null ? { providerMetadata } : {}
|
|
815
766
|
});
|
|
@@ -854,20 +805,6 @@ var openaiChatResponseSchema = import_zod3.z.object({
|
|
|
854
805
|
).nullish()
|
|
855
806
|
}),
|
|
856
807
|
index: import_zod3.z.number(),
|
|
857
|
-
logprobs: import_zod3.z.object({
|
|
858
|
-
content: import_zod3.z.array(
|
|
859
|
-
import_zod3.z.object({
|
|
860
|
-
token: import_zod3.z.string(),
|
|
861
|
-
logprob: import_zod3.z.number(),
|
|
862
|
-
top_logprobs: import_zod3.z.array(
|
|
863
|
-
import_zod3.z.object({
|
|
864
|
-
token: import_zod3.z.string(),
|
|
865
|
-
logprob: import_zod3.z.number()
|
|
866
|
-
})
|
|
867
|
-
)
|
|
868
|
-
})
|
|
869
|
-
).nullable()
|
|
870
|
-
}).nullish(),
|
|
871
808
|
finish_reason: import_zod3.z.string().nullish()
|
|
872
809
|
})
|
|
873
810
|
),
|
|
@@ -895,20 +832,6 @@ var openaiChatChunkSchema = import_zod3.z.union([
|
|
|
895
832
|
})
|
|
896
833
|
).nullish()
|
|
897
834
|
}).nullish(),
|
|
898
|
-
logprobs: import_zod3.z.object({
|
|
899
|
-
content: import_zod3.z.array(
|
|
900
|
-
import_zod3.z.object({
|
|
901
|
-
token: import_zod3.z.string(),
|
|
902
|
-
logprob: import_zod3.z.number(),
|
|
903
|
-
top_logprobs: import_zod3.z.array(
|
|
904
|
-
import_zod3.z.object({
|
|
905
|
-
token: import_zod3.z.string(),
|
|
906
|
-
logprob: import_zod3.z.number()
|
|
907
|
-
})
|
|
908
|
-
)
|
|
909
|
-
})
|
|
910
|
-
).nullable()
|
|
911
|
-
}).nullish(),
|
|
912
835
|
finish_reason: import_zod3.z.string().nullable().optional(),
|
|
913
836
|
index: import_zod3.z.number()
|
|
914
837
|
})
|
|
@@ -950,7 +873,7 @@ var reasoningModels = {
|
|
|
950
873
|
|
|
951
874
|
// src/openai-completion-language-model.ts
|
|
952
875
|
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
|
953
|
-
var
|
|
876
|
+
var import_zod5 = require("zod");
|
|
954
877
|
|
|
955
878
|
// src/convert-to-openai-completion-prompt.ts
|
|
956
879
|
var import_provider4 = require("@ai-sdk/provider");
|
|
@@ -1031,28 +954,49 @@ ${user}:`]
|
|
|
1031
954
|
};
|
|
1032
955
|
}
|
|
1033
956
|
|
|
1034
|
-
// src/
|
|
1035
|
-
|
|
1036
|
-
|
|
1037
|
-
|
|
1038
|
-
|
|
1039
|
-
|
|
1040
|
-
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
957
|
+
// src/openai-completion-options.ts
|
|
958
|
+
var import_zod4 = require("zod");
|
|
959
|
+
var openaiCompletionProviderOptions = import_zod4.z.object({
|
|
960
|
+
/**
|
|
961
|
+
Echo back the prompt in addition to the completion.
|
|
962
|
+
*/
|
|
963
|
+
echo: import_zod4.z.boolean().optional(),
|
|
964
|
+
/**
|
|
965
|
+
Modify the likelihood of specified tokens appearing in the completion.
|
|
966
|
+
|
|
967
|
+
Accepts a JSON object that maps tokens (specified by their token ID in
|
|
968
|
+
the GPT tokenizer) to an associated bias value from -100 to 100. You
|
|
969
|
+
can use this tokenizer tool to convert text to token IDs. Mathematically,
|
|
970
|
+
the bias is added to the logits generated by the model prior to sampling.
|
|
971
|
+
The exact effect will vary per model, but values between -1 and 1 should
|
|
972
|
+
decrease or increase likelihood of selection; values like -100 or 100
|
|
973
|
+
should result in a ban or exclusive selection of the relevant token.
|
|
974
|
+
|
|
975
|
+
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
976
|
+
token from being generated.
|
|
977
|
+
*/
|
|
978
|
+
logitBias: import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number()).optional(),
|
|
979
|
+
/**
|
|
980
|
+
The suffix that comes after a completion of inserted text.
|
|
981
|
+
*/
|
|
982
|
+
suffix: import_zod4.z.string().optional(),
|
|
983
|
+
/**
|
|
984
|
+
A unique identifier representing your end-user, which can help OpenAI to
|
|
985
|
+
monitor and detect abuse. Learn more.
|
|
986
|
+
*/
|
|
987
|
+
user: import_zod4.z.string().optional()
|
|
988
|
+
});
|
|
1047
989
|
|
|
1048
990
|
// src/openai-completion-language-model.ts
|
|
1049
991
|
var OpenAICompletionLanguageModel = class {
|
|
1050
|
-
constructor(modelId,
|
|
992
|
+
constructor(modelId, config) {
|
|
1051
993
|
this.specificationVersion = "v2";
|
|
1052
994
|
this.modelId = modelId;
|
|
1053
|
-
this.settings = settings;
|
|
1054
995
|
this.config = config;
|
|
1055
996
|
}
|
|
997
|
+
get providerOptionsName() {
|
|
998
|
+
return this.config.provider.split(".")[0].trim();
|
|
999
|
+
}
|
|
1056
1000
|
get provider() {
|
|
1057
1001
|
return this.config.provider;
|
|
1058
1002
|
}
|
|
@@ -1061,7 +1005,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1061
1005
|
// no supported urls for completion models
|
|
1062
1006
|
};
|
|
1063
1007
|
}
|
|
1064
|
-
getArgs({
|
|
1008
|
+
async getArgs({
|
|
1065
1009
|
inputFormat,
|
|
1066
1010
|
prompt,
|
|
1067
1011
|
maxOutputTokens,
|
|
@@ -1074,9 +1018,22 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1074
1018
|
responseFormat,
|
|
1075
1019
|
tools,
|
|
1076
1020
|
toolChoice,
|
|
1077
|
-
seed
|
|
1021
|
+
seed,
|
|
1022
|
+
providerOptions
|
|
1078
1023
|
}) {
|
|
1079
1024
|
const warnings = [];
|
|
1025
|
+
const openaiOptions = {
|
|
1026
|
+
...await (0, import_provider_utils4.parseProviderOptions)({
|
|
1027
|
+
provider: "openai",
|
|
1028
|
+
providerOptions,
|
|
1029
|
+
schema: openaiCompletionProviderOptions
|
|
1030
|
+
}),
|
|
1031
|
+
...await (0, import_provider_utils4.parseProviderOptions)({
|
|
1032
|
+
provider: this.providerOptionsName,
|
|
1033
|
+
providerOptions,
|
|
1034
|
+
schema: openaiCompletionProviderOptions
|
|
1035
|
+
})
|
|
1036
|
+
};
|
|
1080
1037
|
if (topK != null) {
|
|
1081
1038
|
warnings.push({ type: "unsupported-setting", setting: "topK" });
|
|
1082
1039
|
}
|
|
@@ -1100,11 +1057,10 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1100
1057
|
// model id:
|
|
1101
1058
|
model: this.modelId,
|
|
1102
1059
|
// model specific settings:
|
|
1103
|
-
echo:
|
|
1104
|
-
logit_bias:
|
|
1105
|
-
|
|
1106
|
-
|
|
1107
|
-
user: this.settings.user,
|
|
1060
|
+
echo: openaiOptions.echo,
|
|
1061
|
+
logit_bias: openaiOptions.logitBias,
|
|
1062
|
+
suffix: openaiOptions.suffix,
|
|
1063
|
+
user: openaiOptions.user,
|
|
1108
1064
|
// standardized settings:
|
|
1109
1065
|
max_tokens: maxOutputTokens,
|
|
1110
1066
|
temperature,
|
|
@@ -1121,7 +1077,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1121
1077
|
};
|
|
1122
1078
|
}
|
|
1123
1079
|
async doGenerate(options) {
|
|
1124
|
-
const { args, warnings } = this.getArgs(options);
|
|
1080
|
+
const { args, warnings } = await this.getArgs(options);
|
|
1125
1081
|
const {
|
|
1126
1082
|
responseHeaders,
|
|
1127
1083
|
value: response,
|
|
@@ -1148,7 +1104,6 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1148
1104
|
outputTokens: response.usage.completion_tokens
|
|
1149
1105
|
},
|
|
1150
1106
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
1151
|
-
logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
|
|
1152
1107
|
request: { body: args },
|
|
1153
1108
|
response: {
|
|
1154
1109
|
...getResponseMetadata(response),
|
|
@@ -1159,7 +1114,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1159
1114
|
};
|
|
1160
1115
|
}
|
|
1161
1116
|
async doStream(options) {
|
|
1162
|
-
const { args, warnings } = this.getArgs(options);
|
|
1117
|
+
const { args, warnings } = await this.getArgs(options);
|
|
1163
1118
|
const body = {
|
|
1164
1119
|
...args,
|
|
1165
1120
|
stream: true,
|
|
@@ -1185,7 +1140,6 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1185
1140
|
inputTokens: void 0,
|
|
1186
1141
|
outputTokens: void 0
|
|
1187
1142
|
};
|
|
1188
|
-
let logprobs;
|
|
1189
1143
|
let isFirstChunk = true;
|
|
1190
1144
|
return {
|
|
1191
1145
|
stream: response.pipeThrough(
|
|
@@ -1226,19 +1180,11 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1226
1180
|
text: choice.text
|
|
1227
1181
|
});
|
|
1228
1182
|
}
|
|
1229
|
-
const mappedLogprobs = mapOpenAICompletionLogProbs(
|
|
1230
|
-
choice == null ? void 0 : choice.logprobs
|
|
1231
|
-
);
|
|
1232
|
-
if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
|
|
1233
|
-
if (logprobs === void 0) logprobs = [];
|
|
1234
|
-
logprobs.push(...mappedLogprobs);
|
|
1235
|
-
}
|
|
1236
1183
|
},
|
|
1237
1184
|
flush(controller) {
|
|
1238
1185
|
controller.enqueue({
|
|
1239
1186
|
type: "finish",
|
|
1240
1187
|
finishReason,
|
|
1241
|
-
logprobs,
|
|
1242
1188
|
usage
|
|
1243
1189
|
});
|
|
1244
1190
|
}
|
|
@@ -1249,46 +1195,36 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1249
1195
|
};
|
|
1250
1196
|
}
|
|
1251
1197
|
};
|
|
1252
|
-
var openaiCompletionResponseSchema =
|
|
1253
|
-
id:
|
|
1254
|
-
created:
|
|
1255
|
-
model:
|
|
1256
|
-
choices:
|
|
1257
|
-
|
|
1258
|
-
text:
|
|
1259
|
-
finish_reason:
|
|
1260
|
-
logprobs: import_zod4.z.object({
|
|
1261
|
-
tokens: import_zod4.z.array(import_zod4.z.string()),
|
|
1262
|
-
token_logprobs: import_zod4.z.array(import_zod4.z.number()),
|
|
1263
|
-
top_logprobs: import_zod4.z.array(import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number())).nullable()
|
|
1264
|
-
}).nullish()
|
|
1198
|
+
var openaiCompletionResponseSchema = import_zod5.z.object({
|
|
1199
|
+
id: import_zod5.z.string().nullish(),
|
|
1200
|
+
created: import_zod5.z.number().nullish(),
|
|
1201
|
+
model: import_zod5.z.string().nullish(),
|
|
1202
|
+
choices: import_zod5.z.array(
|
|
1203
|
+
import_zod5.z.object({
|
|
1204
|
+
text: import_zod5.z.string(),
|
|
1205
|
+
finish_reason: import_zod5.z.string()
|
|
1265
1206
|
})
|
|
1266
1207
|
),
|
|
1267
|
-
usage:
|
|
1268
|
-
prompt_tokens:
|
|
1269
|
-
completion_tokens:
|
|
1208
|
+
usage: import_zod5.z.object({
|
|
1209
|
+
prompt_tokens: import_zod5.z.number(),
|
|
1210
|
+
completion_tokens: import_zod5.z.number()
|
|
1270
1211
|
})
|
|
1271
1212
|
});
|
|
1272
|
-
var openaiCompletionChunkSchema =
|
|
1273
|
-
|
|
1274
|
-
id:
|
|
1275
|
-
created:
|
|
1276
|
-
model:
|
|
1277
|
-
choices:
|
|
1278
|
-
|
|
1279
|
-
text:
|
|
1280
|
-
finish_reason:
|
|
1281
|
-
index:
|
|
1282
|
-
logprobs: import_zod4.z.object({
|
|
1283
|
-
tokens: import_zod4.z.array(import_zod4.z.string()),
|
|
1284
|
-
token_logprobs: import_zod4.z.array(import_zod4.z.number()),
|
|
1285
|
-
top_logprobs: import_zod4.z.array(import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number())).nullable()
|
|
1286
|
-
}).nullish()
|
|
1213
|
+
var openaiCompletionChunkSchema = import_zod5.z.union([
|
|
1214
|
+
import_zod5.z.object({
|
|
1215
|
+
id: import_zod5.z.string().nullish(),
|
|
1216
|
+
created: import_zod5.z.number().nullish(),
|
|
1217
|
+
model: import_zod5.z.string().nullish(),
|
|
1218
|
+
choices: import_zod5.z.array(
|
|
1219
|
+
import_zod5.z.object({
|
|
1220
|
+
text: import_zod5.z.string(),
|
|
1221
|
+
finish_reason: import_zod5.z.string().nullish(),
|
|
1222
|
+
index: import_zod5.z.number()
|
|
1287
1223
|
})
|
|
1288
1224
|
),
|
|
1289
|
-
usage:
|
|
1290
|
-
prompt_tokens:
|
|
1291
|
-
completion_tokens:
|
|
1225
|
+
usage: import_zod5.z.object({
|
|
1226
|
+
prompt_tokens: import_zod5.z.number(),
|
|
1227
|
+
completion_tokens: import_zod5.z.number()
|
|
1292
1228
|
}).nullish()
|
|
1293
1229
|
}),
|
|
1294
1230
|
openaiErrorDataSchema
|
|
@@ -1297,21 +1233,21 @@ var openaiCompletionChunkSchema = import_zod4.z.union([
|
|
|
1297
1233
|
// src/openai-embedding-model.ts
|
|
1298
1234
|
var import_provider5 = require("@ai-sdk/provider");
|
|
1299
1235
|
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
|
1300
|
-
var
|
|
1236
|
+
var import_zod7 = require("zod");
|
|
1301
1237
|
|
|
1302
1238
|
// src/openai-embedding-options.ts
|
|
1303
|
-
var
|
|
1304
|
-
var openaiEmbeddingProviderOptions =
|
|
1239
|
+
var import_zod6 = require("zod");
|
|
1240
|
+
var openaiEmbeddingProviderOptions = import_zod6.z.object({
|
|
1305
1241
|
/**
|
|
1306
1242
|
The number of dimensions the resulting output embeddings should have.
|
|
1307
1243
|
Only supported in text-embedding-3 and later models.
|
|
1308
1244
|
*/
|
|
1309
|
-
dimensions:
|
|
1245
|
+
dimensions: import_zod6.z.number().optional(),
|
|
1310
1246
|
/**
|
|
1311
1247
|
A unique identifier representing your end-user, which can help OpenAI to
|
|
1312
1248
|
monitor and detect abuse. Learn more.
|
|
1313
1249
|
*/
|
|
1314
|
-
user:
|
|
1250
|
+
user: import_zod6.z.string().optional()
|
|
1315
1251
|
});
|
|
1316
1252
|
|
|
1317
1253
|
// src/openai-embedding-model.ts
|
|
@@ -1348,7 +1284,7 @@ var OpenAIEmbeddingModel = class {
|
|
|
1348
1284
|
values
|
|
1349
1285
|
});
|
|
1350
1286
|
}
|
|
1351
|
-
const openaiOptions = (_a = (0, import_provider_utils5.parseProviderOptions)({
|
|
1287
|
+
const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({
|
|
1352
1288
|
provider: "openai",
|
|
1353
1289
|
providerOptions,
|
|
1354
1290
|
schema: openaiEmbeddingProviderOptions
|
|
@@ -1384,20 +1320,22 @@ var OpenAIEmbeddingModel = class {
|
|
|
1384
1320
|
};
|
|
1385
1321
|
}
|
|
1386
1322
|
};
|
|
1387
|
-
var openaiTextEmbeddingResponseSchema =
|
|
1388
|
-
data:
|
|
1389
|
-
usage:
|
|
1323
|
+
var openaiTextEmbeddingResponseSchema = import_zod7.z.object({
|
|
1324
|
+
data: import_zod7.z.array(import_zod7.z.object({ embedding: import_zod7.z.array(import_zod7.z.number()) })),
|
|
1325
|
+
usage: import_zod7.z.object({ prompt_tokens: import_zod7.z.number() }).nullish()
|
|
1390
1326
|
});
|
|
1391
1327
|
|
|
1392
1328
|
// src/openai-image-model.ts
|
|
1393
1329
|
var import_provider_utils6 = require("@ai-sdk/provider-utils");
|
|
1394
|
-
var
|
|
1330
|
+
var import_zod8 = require("zod");
|
|
1395
1331
|
|
|
1396
1332
|
// src/openai-image-settings.ts
|
|
1397
1333
|
var modelMaxImagesPerCall = {
|
|
1398
1334
|
"dall-e-3": 1,
|
|
1399
|
-
"dall-e-2": 10
|
|
1335
|
+
"dall-e-2": 10,
|
|
1336
|
+
"gpt-image-1": 10
|
|
1400
1337
|
};
|
|
1338
|
+
var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
|
|
1401
1339
|
|
|
1402
1340
|
// src/openai-image-model.ts
|
|
1403
1341
|
var OpenAIImageModel = class {
|
|
@@ -1449,7 +1387,7 @@ var OpenAIImageModel = class {
|
|
|
1449
1387
|
n,
|
|
1450
1388
|
size,
|
|
1451
1389
|
...(_d = providerOptions.openai) != null ? _d : {},
|
|
1452
|
-
response_format: "b64_json"
|
|
1390
|
+
...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
|
|
1453
1391
|
},
|
|
1454
1392
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1455
1393
|
successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
|
|
@@ -1469,13 +1407,13 @@ var OpenAIImageModel = class {
|
|
|
1469
1407
|
};
|
|
1470
1408
|
}
|
|
1471
1409
|
};
|
|
1472
|
-
var openaiImageResponseSchema =
|
|
1473
|
-
data:
|
|
1410
|
+
var openaiImageResponseSchema = import_zod8.z.object({
|
|
1411
|
+
data: import_zod8.z.array(import_zod8.z.object({ b64_json: import_zod8.z.string() }))
|
|
1474
1412
|
});
|
|
1475
1413
|
|
|
1476
1414
|
// src/openai-tools.ts
|
|
1477
|
-
var
|
|
1478
|
-
var WebSearchPreviewParameters =
|
|
1415
|
+
var import_zod9 = require("zod");
|
|
1416
|
+
var WebSearchPreviewParameters = import_zod9.z.object({});
|
|
1479
1417
|
function webSearchPreviewTool({
|
|
1480
1418
|
searchContextSize,
|
|
1481
1419
|
userLocation
|
|
@@ -1496,13 +1434,13 @@ var openaiTools = {
|
|
|
1496
1434
|
|
|
1497
1435
|
// src/openai-transcription-model.ts
|
|
1498
1436
|
var import_provider_utils7 = require("@ai-sdk/provider-utils");
|
|
1499
|
-
var
|
|
1500
|
-
var openAIProviderOptionsSchema =
|
|
1501
|
-
include:
|
|
1502
|
-
language:
|
|
1503
|
-
prompt:
|
|
1504
|
-
temperature:
|
|
1505
|
-
timestampGranularities:
|
|
1437
|
+
var import_zod10 = require("zod");
|
|
1438
|
+
var openAIProviderOptionsSchema = import_zod10.z.object({
|
|
1439
|
+
include: import_zod10.z.array(import_zod10.z.string()).nullish(),
|
|
1440
|
+
language: import_zod10.z.string().nullish(),
|
|
1441
|
+
prompt: import_zod10.z.string().nullish(),
|
|
1442
|
+
temperature: import_zod10.z.number().min(0).max(1).nullish().default(0),
|
|
1443
|
+
timestampGranularities: import_zod10.z.array(import_zod10.z.enum(["word", "segment"])).nullish().default(["segment"])
|
|
1506
1444
|
});
|
|
1507
1445
|
var languageMap = {
|
|
1508
1446
|
afrikaans: "af",
|
|
@@ -1572,14 +1510,14 @@ var OpenAITranscriptionModel = class {
|
|
|
1572
1510
|
get provider() {
|
|
1573
1511
|
return this.config.provider;
|
|
1574
1512
|
}
|
|
1575
|
-
getArgs({
|
|
1513
|
+
async getArgs({
|
|
1576
1514
|
audio,
|
|
1577
1515
|
mediaType,
|
|
1578
1516
|
providerOptions
|
|
1579
1517
|
}) {
|
|
1580
1518
|
var _a, _b, _c, _d, _e;
|
|
1581
1519
|
const warnings = [];
|
|
1582
|
-
const openAIOptions = (0, import_provider_utils7.parseProviderOptions)({
|
|
1520
|
+
const openAIOptions = await (0, import_provider_utils7.parseProviderOptions)({
|
|
1583
1521
|
provider: "openai",
|
|
1584
1522
|
providerOptions,
|
|
1585
1523
|
schema: openAIProviderOptionsSchema
|
|
@@ -1611,7 +1549,7 @@ var OpenAITranscriptionModel = class {
|
|
|
1611
1549
|
async doGenerate(options) {
|
|
1612
1550
|
var _a, _b, _c, _d, _e, _f;
|
|
1613
1551
|
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
1614
|
-
const { formData, warnings } = this.getArgs(options);
|
|
1552
|
+
const { formData, warnings } = await this.getArgs(options);
|
|
1615
1553
|
const {
|
|
1616
1554
|
value: response,
|
|
1617
1555
|
responseHeaders,
|
|
@@ -1650,22 +1588,22 @@ var OpenAITranscriptionModel = class {
|
|
|
1650
1588
|
};
|
|
1651
1589
|
}
|
|
1652
1590
|
};
|
|
1653
|
-
var openaiTranscriptionResponseSchema =
|
|
1654
|
-
text:
|
|
1655
|
-
language:
|
|
1656
|
-
duration:
|
|
1657
|
-
words:
|
|
1658
|
-
|
|
1659
|
-
word:
|
|
1660
|
-
start:
|
|
1661
|
-
end:
|
|
1591
|
+
var openaiTranscriptionResponseSchema = import_zod10.z.object({
|
|
1592
|
+
text: import_zod10.z.string(),
|
|
1593
|
+
language: import_zod10.z.string().nullish(),
|
|
1594
|
+
duration: import_zod10.z.number().nullish(),
|
|
1595
|
+
words: import_zod10.z.array(
|
|
1596
|
+
import_zod10.z.object({
|
|
1597
|
+
word: import_zod10.z.string(),
|
|
1598
|
+
start: import_zod10.z.number(),
|
|
1599
|
+
end: import_zod10.z.number()
|
|
1662
1600
|
})
|
|
1663
1601
|
).nullish()
|
|
1664
1602
|
});
|
|
1665
1603
|
|
|
1666
1604
|
// src/responses/openai-responses-language-model.ts
|
|
1667
1605
|
var import_provider_utils8 = require("@ai-sdk/provider-utils");
|
|
1668
|
-
var
|
|
1606
|
+
var import_zod11 = require("zod");
|
|
1669
1607
|
|
|
1670
1608
|
// src/responses/convert-to-openai-responses-messages.ts
|
|
1671
1609
|
var import_provider6 = require("@ai-sdk/provider");
|
|
@@ -1885,7 +1823,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
1885
1823
|
get provider() {
|
|
1886
1824
|
return this.config.provider;
|
|
1887
1825
|
}
|
|
1888
|
-
getArgs({
|
|
1826
|
+
async getArgs({
|
|
1889
1827
|
maxOutputTokens,
|
|
1890
1828
|
temperature,
|
|
1891
1829
|
stopSequences,
|
|
@@ -1929,7 +1867,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
1929
1867
|
systemMessageMode: modelConfig.systemMessageMode
|
|
1930
1868
|
});
|
|
1931
1869
|
warnings.push(...messageWarnings);
|
|
1932
|
-
const openaiOptions = (0, import_provider_utils8.parseProviderOptions)({
|
|
1870
|
+
const openaiOptions = await (0, import_provider_utils8.parseProviderOptions)({
|
|
1933
1871
|
provider: "openai",
|
|
1934
1872
|
providerOptions,
|
|
1935
1873
|
schema: openaiResponsesProviderOptionsSchema
|
|
@@ -1960,8 +1898,15 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
1960
1898
|
user: openaiOptions == null ? void 0 : openaiOptions.user,
|
|
1961
1899
|
instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
|
|
1962
1900
|
// model-specific settings:
|
|
1963
|
-
...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
|
|
1964
|
-
reasoning: {
|
|
1901
|
+
...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
1902
|
+
reasoning: {
|
|
1903
|
+
...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
|
|
1904
|
+
effort: openaiOptions.reasoningEffort
|
|
1905
|
+
},
|
|
1906
|
+
...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
|
|
1907
|
+
summary: openaiOptions.reasoningSummary
|
|
1908
|
+
}
|
|
1909
|
+
}
|
|
1965
1910
|
},
|
|
1966
1911
|
...modelConfig.requiredAutoTruncation && {
|
|
1967
1912
|
truncation: "auto"
|
|
@@ -2005,7 +1950,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2005
1950
|
}
|
|
2006
1951
|
async doGenerate(options) {
|
|
2007
1952
|
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
2008
|
-
const { args: body, warnings } = this.getArgs(options);
|
|
1953
|
+
const { args: body, warnings } = await this.getArgs(options);
|
|
2009
1954
|
const {
|
|
2010
1955
|
responseHeaders,
|
|
2011
1956
|
value: response,
|
|
@@ -2019,49 +1964,55 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2019
1964
|
body,
|
|
2020
1965
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
2021
1966
|
successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
|
|
2022
|
-
|
|
2023
|
-
id:
|
|
2024
|
-
created_at:
|
|
2025
|
-
model:
|
|
2026
|
-
output:
|
|
2027
|
-
|
|
2028
|
-
|
|
2029
|
-
type:
|
|
2030
|
-
role:
|
|
2031
|
-
content:
|
|
2032
|
-
|
|
2033
|
-
type:
|
|
2034
|
-
text:
|
|
2035
|
-
annotations:
|
|
2036
|
-
|
|
2037
|
-
type:
|
|
2038
|
-
start_index:
|
|
2039
|
-
end_index:
|
|
2040
|
-
url:
|
|
2041
|
-
title:
|
|
1967
|
+
import_zod11.z.object({
|
|
1968
|
+
id: import_zod11.z.string(),
|
|
1969
|
+
created_at: import_zod11.z.number(),
|
|
1970
|
+
model: import_zod11.z.string(),
|
|
1971
|
+
output: import_zod11.z.array(
|
|
1972
|
+
import_zod11.z.discriminatedUnion("type", [
|
|
1973
|
+
import_zod11.z.object({
|
|
1974
|
+
type: import_zod11.z.literal("message"),
|
|
1975
|
+
role: import_zod11.z.literal("assistant"),
|
|
1976
|
+
content: import_zod11.z.array(
|
|
1977
|
+
import_zod11.z.object({
|
|
1978
|
+
type: import_zod11.z.literal("output_text"),
|
|
1979
|
+
text: import_zod11.z.string(),
|
|
1980
|
+
annotations: import_zod11.z.array(
|
|
1981
|
+
import_zod11.z.object({
|
|
1982
|
+
type: import_zod11.z.literal("url_citation"),
|
|
1983
|
+
start_index: import_zod11.z.number(),
|
|
1984
|
+
end_index: import_zod11.z.number(),
|
|
1985
|
+
url: import_zod11.z.string(),
|
|
1986
|
+
title: import_zod11.z.string()
|
|
2042
1987
|
})
|
|
2043
1988
|
)
|
|
2044
1989
|
})
|
|
2045
1990
|
)
|
|
2046
1991
|
}),
|
|
2047
|
-
|
|
2048
|
-
type:
|
|
2049
|
-
call_id:
|
|
2050
|
-
name:
|
|
2051
|
-
arguments:
|
|
1992
|
+
import_zod11.z.object({
|
|
1993
|
+
type: import_zod11.z.literal("function_call"),
|
|
1994
|
+
call_id: import_zod11.z.string(),
|
|
1995
|
+
name: import_zod11.z.string(),
|
|
1996
|
+
arguments: import_zod11.z.string()
|
|
2052
1997
|
}),
|
|
2053
|
-
|
|
2054
|
-
type:
|
|
1998
|
+
import_zod11.z.object({
|
|
1999
|
+
type: import_zod11.z.literal("web_search_call")
|
|
2055
2000
|
}),
|
|
2056
|
-
|
|
2057
|
-
type:
|
|
2001
|
+
import_zod11.z.object({
|
|
2002
|
+
type: import_zod11.z.literal("computer_call")
|
|
2058
2003
|
}),
|
|
2059
|
-
|
|
2060
|
-
type:
|
|
2004
|
+
import_zod11.z.object({
|
|
2005
|
+
type: import_zod11.z.literal("reasoning"),
|
|
2006
|
+
summary: import_zod11.z.array(
|
|
2007
|
+
import_zod11.z.object({
|
|
2008
|
+
type: import_zod11.z.literal("summary_text"),
|
|
2009
|
+
text: import_zod11.z.string()
|
|
2010
|
+
})
|
|
2011
|
+
)
|
|
2061
2012
|
})
|
|
2062
2013
|
])
|
|
2063
2014
|
),
|
|
2064
|
-
incomplete_details:
|
|
2015
|
+
incomplete_details: import_zod11.z.object({ reason: import_zod11.z.string() }).nullable(),
|
|
2065
2016
|
usage: usageSchema
|
|
2066
2017
|
})
|
|
2067
2018
|
),
|
|
@@ -2071,6 +2022,13 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2071
2022
|
const content = [];
|
|
2072
2023
|
for (const part of response.output) {
|
|
2073
2024
|
switch (part.type) {
|
|
2025
|
+
case "reasoning": {
|
|
2026
|
+
content.push({
|
|
2027
|
+
type: "reasoning",
|
|
2028
|
+
text: part.summary.map((summary) => summary.text).join()
|
|
2029
|
+
});
|
|
2030
|
+
break;
|
|
2031
|
+
}
|
|
2074
2032
|
case "message": {
|
|
2075
2033
|
for (const contentPart of part.content) {
|
|
2076
2034
|
content.push({
|
|
@@ -2130,7 +2088,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2130
2088
|
};
|
|
2131
2089
|
}
|
|
2132
2090
|
async doStream(options) {
|
|
2133
|
-
const { args: body, warnings } = this.getArgs(options);
|
|
2091
|
+
const { args: body, warnings } = await this.getArgs(options);
|
|
2134
2092
|
const { responseHeaders, value: response } = await (0, import_provider_utils8.postJsonToApi)({
|
|
2135
2093
|
url: this.config.url({
|
|
2136
2094
|
path: "/responses",
|
|
@@ -2211,6 +2169,11 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2211
2169
|
type: "text",
|
|
2212
2170
|
text: value.delta
|
|
2213
2171
|
});
|
|
2172
|
+
} else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
|
|
2173
|
+
controller.enqueue({
|
|
2174
|
+
type: "reasoning",
|
|
2175
|
+
text: value.delta
|
|
2176
|
+
});
|
|
2214
2177
|
} else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
|
|
2215
2178
|
ongoingToolCalls[value.output_index] = void 0;
|
|
2216
2179
|
hasToolCalls = true;
|
|
@@ -2263,79 +2226,86 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2263
2226
|
};
|
|
2264
2227
|
}
|
|
2265
2228
|
};
|
|
2266
|
-
var usageSchema =
|
|
2267
|
-
input_tokens:
|
|
2268
|
-
input_tokens_details:
|
|
2269
|
-
output_tokens:
|
|
2270
|
-
output_tokens_details:
|
|
2229
|
+
var usageSchema = import_zod11.z.object({
|
|
2230
|
+
input_tokens: import_zod11.z.number(),
|
|
2231
|
+
input_tokens_details: import_zod11.z.object({ cached_tokens: import_zod11.z.number().nullish() }).nullish(),
|
|
2232
|
+
output_tokens: import_zod11.z.number(),
|
|
2233
|
+
output_tokens_details: import_zod11.z.object({ reasoning_tokens: import_zod11.z.number().nullish() }).nullish()
|
|
2271
2234
|
});
|
|
2272
|
-
var textDeltaChunkSchema =
|
|
2273
|
-
type:
|
|
2274
|
-
delta:
|
|
2235
|
+
var textDeltaChunkSchema = import_zod11.z.object({
|
|
2236
|
+
type: import_zod11.z.literal("response.output_text.delta"),
|
|
2237
|
+
delta: import_zod11.z.string()
|
|
2275
2238
|
});
|
|
2276
|
-
var responseFinishedChunkSchema =
|
|
2277
|
-
type:
|
|
2278
|
-
response:
|
|
2279
|
-
incomplete_details:
|
|
2239
|
+
var responseFinishedChunkSchema = import_zod11.z.object({
|
|
2240
|
+
type: import_zod11.z.enum(["response.completed", "response.incomplete"]),
|
|
2241
|
+
response: import_zod11.z.object({
|
|
2242
|
+
incomplete_details: import_zod11.z.object({ reason: import_zod11.z.string() }).nullish(),
|
|
2280
2243
|
usage: usageSchema
|
|
2281
2244
|
})
|
|
2282
2245
|
});
|
|
2283
|
-
var responseCreatedChunkSchema =
|
|
2284
|
-
type:
|
|
2285
|
-
response:
|
|
2286
|
-
id:
|
|
2287
|
-
created_at:
|
|
2288
|
-
model:
|
|
2246
|
+
var responseCreatedChunkSchema = import_zod11.z.object({
|
|
2247
|
+
type: import_zod11.z.literal("response.created"),
|
|
2248
|
+
response: import_zod11.z.object({
|
|
2249
|
+
id: import_zod11.z.string(),
|
|
2250
|
+
created_at: import_zod11.z.number(),
|
|
2251
|
+
model: import_zod11.z.string()
|
|
2289
2252
|
})
|
|
2290
2253
|
});
|
|
2291
|
-
var responseOutputItemDoneSchema =
|
|
2292
|
-
type:
|
|
2293
|
-
output_index:
|
|
2294
|
-
item:
|
|
2295
|
-
|
|
2296
|
-
type:
|
|
2254
|
+
var responseOutputItemDoneSchema = import_zod11.z.object({
|
|
2255
|
+
type: import_zod11.z.literal("response.output_item.done"),
|
|
2256
|
+
output_index: import_zod11.z.number(),
|
|
2257
|
+
item: import_zod11.z.discriminatedUnion("type", [
|
|
2258
|
+
import_zod11.z.object({
|
|
2259
|
+
type: import_zod11.z.literal("message")
|
|
2297
2260
|
}),
|
|
2298
|
-
|
|
2299
|
-
type:
|
|
2300
|
-
id:
|
|
2301
|
-
call_id:
|
|
2302
|
-
name:
|
|
2303
|
-
arguments:
|
|
2304
|
-
status:
|
|
2261
|
+
import_zod11.z.object({
|
|
2262
|
+
type: import_zod11.z.literal("function_call"),
|
|
2263
|
+
id: import_zod11.z.string(),
|
|
2264
|
+
call_id: import_zod11.z.string(),
|
|
2265
|
+
name: import_zod11.z.string(),
|
|
2266
|
+
arguments: import_zod11.z.string(),
|
|
2267
|
+
status: import_zod11.z.literal("completed")
|
|
2305
2268
|
})
|
|
2306
2269
|
])
|
|
2307
2270
|
});
|
|
2308
|
-
var responseFunctionCallArgumentsDeltaSchema =
|
|
2309
|
-
type:
|
|
2310
|
-
item_id:
|
|
2311
|
-
output_index:
|
|
2312
|
-
delta:
|
|
2271
|
+
var responseFunctionCallArgumentsDeltaSchema = import_zod11.z.object({
|
|
2272
|
+
type: import_zod11.z.literal("response.function_call_arguments.delta"),
|
|
2273
|
+
item_id: import_zod11.z.string(),
|
|
2274
|
+
output_index: import_zod11.z.number(),
|
|
2275
|
+
delta: import_zod11.z.string()
|
|
2313
2276
|
});
|
|
2314
|
-
var responseOutputItemAddedSchema =
|
|
2315
|
-
type:
|
|
2316
|
-
output_index:
|
|
2317
|
-
item:
|
|
2318
|
-
|
|
2319
|
-
type:
|
|
2277
|
+
var responseOutputItemAddedSchema = import_zod11.z.object({
|
|
2278
|
+
type: import_zod11.z.literal("response.output_item.added"),
|
|
2279
|
+
output_index: import_zod11.z.number(),
|
|
2280
|
+
item: import_zod11.z.discriminatedUnion("type", [
|
|
2281
|
+
import_zod11.z.object({
|
|
2282
|
+
type: import_zod11.z.literal("message")
|
|
2320
2283
|
}),
|
|
2321
|
-
|
|
2322
|
-
type:
|
|
2323
|
-
id:
|
|
2324
|
-
call_id:
|
|
2325
|
-
name:
|
|
2326
|
-
arguments:
|
|
2284
|
+
import_zod11.z.object({
|
|
2285
|
+
type: import_zod11.z.literal("function_call"),
|
|
2286
|
+
id: import_zod11.z.string(),
|
|
2287
|
+
call_id: import_zod11.z.string(),
|
|
2288
|
+
name: import_zod11.z.string(),
|
|
2289
|
+
arguments: import_zod11.z.string()
|
|
2327
2290
|
})
|
|
2328
2291
|
])
|
|
2329
2292
|
});
|
|
2330
|
-
var responseAnnotationAddedSchema =
|
|
2331
|
-
type:
|
|
2332
|
-
annotation:
|
|
2333
|
-
type:
|
|
2334
|
-
url:
|
|
2335
|
-
title:
|
|
2293
|
+
var responseAnnotationAddedSchema = import_zod11.z.object({
|
|
2294
|
+
type: import_zod11.z.literal("response.output_text.annotation.added"),
|
|
2295
|
+
annotation: import_zod11.z.object({
|
|
2296
|
+
type: import_zod11.z.literal("url_citation"),
|
|
2297
|
+
url: import_zod11.z.string(),
|
|
2298
|
+
title: import_zod11.z.string()
|
|
2336
2299
|
})
|
|
2337
2300
|
});
|
|
2338
|
-
var
|
|
2301
|
+
var responseReasoningSummaryTextDeltaSchema = import_zod11.z.object({
|
|
2302
|
+
type: import_zod11.z.literal("response.reasoning_summary_text.delta"),
|
|
2303
|
+
item_id: import_zod11.z.string(),
|
|
2304
|
+
output_index: import_zod11.z.number(),
|
|
2305
|
+
summary_index: import_zod11.z.number(),
|
|
2306
|
+
delta: import_zod11.z.string()
|
|
2307
|
+
});
|
|
2308
|
+
var openaiResponsesChunkSchema = import_zod11.z.union([
|
|
2339
2309
|
textDeltaChunkSchema,
|
|
2340
2310
|
responseFinishedChunkSchema,
|
|
2341
2311
|
responseCreatedChunkSchema,
|
|
@@ -2343,7 +2313,8 @@ var openaiResponsesChunkSchema = import_zod10.z.union([
|
|
|
2343
2313
|
responseFunctionCallArgumentsDeltaSchema,
|
|
2344
2314
|
responseOutputItemAddedSchema,
|
|
2345
2315
|
responseAnnotationAddedSchema,
|
|
2346
|
-
|
|
2316
|
+
responseReasoningSummaryTextDeltaSchema,
|
|
2317
|
+
import_zod11.z.object({ type: import_zod11.z.string() }).passthrough()
|
|
2347
2318
|
// fallback for unknown chunks
|
|
2348
2319
|
]);
|
|
2349
2320
|
function isTextDeltaChunk(chunk) {
|
|
@@ -2367,6 +2338,9 @@ function isResponseOutputItemAddedChunk(chunk) {
|
|
|
2367
2338
|
function isResponseAnnotationAddedChunk(chunk) {
|
|
2368
2339
|
return chunk.type === "response.output_text.annotation.added";
|
|
2369
2340
|
}
|
|
2341
|
+
function isResponseReasoningSummaryTextDeltaChunk(chunk) {
|
|
2342
|
+
return chunk.type === "response.reasoning_summary_text.delta";
|
|
2343
|
+
}
|
|
2370
2344
|
function getResponsesModelConfig(modelId) {
|
|
2371
2345
|
if (modelId.startsWith("o")) {
|
|
2372
2346
|
if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
|
|
@@ -2388,23 +2362,24 @@ function getResponsesModelConfig(modelId) {
|
|
|
2388
2362
|
requiredAutoTruncation: false
|
|
2389
2363
|
};
|
|
2390
2364
|
}
|
|
2391
|
-
var openaiResponsesProviderOptionsSchema =
|
|
2392
|
-
metadata:
|
|
2393
|
-
parallelToolCalls:
|
|
2394
|
-
previousResponseId:
|
|
2395
|
-
store:
|
|
2396
|
-
user:
|
|
2397
|
-
reasoningEffort:
|
|
2398
|
-
strictSchemas:
|
|
2399
|
-
instructions:
|
|
2365
|
+
var openaiResponsesProviderOptionsSchema = import_zod11.z.object({
|
|
2366
|
+
metadata: import_zod11.z.any().nullish(),
|
|
2367
|
+
parallelToolCalls: import_zod11.z.boolean().nullish(),
|
|
2368
|
+
previousResponseId: import_zod11.z.string().nullish(),
|
|
2369
|
+
store: import_zod11.z.boolean().nullish(),
|
|
2370
|
+
user: import_zod11.z.string().nullish(),
|
|
2371
|
+
reasoningEffort: import_zod11.z.string().nullish(),
|
|
2372
|
+
strictSchemas: import_zod11.z.boolean().nullish(),
|
|
2373
|
+
instructions: import_zod11.z.string().nullish(),
|
|
2374
|
+
reasoningSummary: import_zod11.z.string().nullish()
|
|
2400
2375
|
});
|
|
2401
2376
|
|
|
2402
2377
|
// src/openai-speech-model.ts
|
|
2403
2378
|
var import_provider_utils9 = require("@ai-sdk/provider-utils");
|
|
2404
|
-
var
|
|
2405
|
-
var OpenAIProviderOptionsSchema =
|
|
2406
|
-
instructions:
|
|
2407
|
-
speed:
|
|
2379
|
+
var import_zod12 = require("zod");
|
|
2380
|
+
var OpenAIProviderOptionsSchema = import_zod12.z.object({
|
|
2381
|
+
instructions: import_zod12.z.string().nullish(),
|
|
2382
|
+
speed: import_zod12.z.number().min(0.25).max(4).default(1).nullish()
|
|
2408
2383
|
});
|
|
2409
2384
|
var OpenAISpeechModel = class {
|
|
2410
2385
|
constructor(modelId, config) {
|
|
@@ -2415,7 +2390,7 @@ var OpenAISpeechModel = class {
|
|
|
2415
2390
|
get provider() {
|
|
2416
2391
|
return this.config.provider;
|
|
2417
2392
|
}
|
|
2418
|
-
getArgs({
|
|
2393
|
+
async getArgs({
|
|
2419
2394
|
text,
|
|
2420
2395
|
voice = "alloy",
|
|
2421
2396
|
outputFormat = "mp3",
|
|
@@ -2424,7 +2399,7 @@ var OpenAISpeechModel = class {
|
|
|
2424
2399
|
providerOptions
|
|
2425
2400
|
}) {
|
|
2426
2401
|
const warnings = [];
|
|
2427
|
-
const openAIOptions = (0, import_provider_utils9.parseProviderOptions)({
|
|
2402
|
+
const openAIOptions = await (0, import_provider_utils9.parseProviderOptions)({
|
|
2428
2403
|
provider: "openai",
|
|
2429
2404
|
providerOptions,
|
|
2430
2405
|
schema: OpenAIProviderOptionsSchema
|
|
@@ -2465,7 +2440,7 @@ var OpenAISpeechModel = class {
|
|
|
2465
2440
|
async doGenerate(options) {
|
|
2466
2441
|
var _a, _b, _c;
|
|
2467
2442
|
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
2468
|
-
const { requestBody, warnings } = this.getArgs(options);
|
|
2443
|
+
const { requestBody, warnings } = await this.getArgs(options);
|
|
2469
2444
|
const {
|
|
2470
2445
|
value: audio,
|
|
2471
2446
|
responseHeaders,
|
|
@@ -2521,7 +2496,7 @@ function createOpenAI(options = {}) {
|
|
|
2521
2496
|
compatibility,
|
|
2522
2497
|
fetch: options.fetch
|
|
2523
2498
|
});
|
|
2524
|
-
const createCompletionModel = (modelId
|
|
2499
|
+
const createCompletionModel = (modelId) => new OpenAICompletionLanguageModel(modelId, {
|
|
2525
2500
|
provider: `${providerName}.completion`,
|
|
2526
2501
|
url: ({ path }) => `${baseURL}${path}`,
|
|
2527
2502
|
headers: getHeaders,
|
|
@@ -2559,10 +2534,7 @@ function createOpenAI(options = {}) {
|
|
|
2559
2534
|
);
|
|
2560
2535
|
}
|
|
2561
2536
|
if (modelId === "gpt-3.5-turbo-instruct") {
|
|
2562
|
-
return createCompletionModel(
|
|
2563
|
-
modelId,
|
|
2564
|
-
settings
|
|
2565
|
-
);
|
|
2537
|
+
return createCompletionModel(modelId);
|
|
2566
2538
|
}
|
|
2567
2539
|
return createChatModel(modelId, settings);
|
|
2568
2540
|
};
|