@ai-sdk/openai 2.0.0-canary.10 → 2.0.0-canary.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +23 -0
- package/dist/index.d.mts +11 -50
- package/dist/index.d.ts +11 -50
- package/dist/index.js +273 -301
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +278 -305
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +30 -39
- package/dist/internal/index.d.ts +30 -39
- package/dist/internal/index.js +273 -294
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +276 -298
- package/dist/internal/index.mjs.map +1 -1
- package/internal.d.ts +1 -0
- package/package.json +5 -4
package/dist/index.mjs
CHANGED
|
@@ -199,19 +199,6 @@ function getResponseMetadata({
|
|
|
199
199
|
};
|
|
200
200
|
}
|
|
201
201
|
|
|
202
|
-
// src/map-openai-chat-logprobs.ts
|
|
203
|
-
function mapOpenAIChatLogProbsOutput(logprobs) {
|
|
204
|
-
var _a, _b;
|
|
205
|
-
return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
|
|
206
|
-
token,
|
|
207
|
-
logprob,
|
|
208
|
-
topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
|
|
209
|
-
token: token2,
|
|
210
|
-
logprob: logprob2
|
|
211
|
-
})) : []
|
|
212
|
-
}))) != null ? _b : void 0;
|
|
213
|
-
}
|
|
214
|
-
|
|
215
202
|
// src/map-openai-finish-reason.ts
|
|
216
203
|
function mapOpenAIFinishReason(finishReason) {
|
|
217
204
|
switch (finishReason) {
|
|
@@ -239,16 +226,6 @@ var openaiProviderOptions = z.object({
|
|
|
239
226
|
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
240
227
|
*/
|
|
241
228
|
logitBias: z.record(z.coerce.number(), z.number()).optional(),
|
|
242
|
-
/**
|
|
243
|
-
* Return the log probabilities of the tokens.
|
|
244
|
-
*
|
|
245
|
-
* Setting to true will return the log probabilities of the tokens that
|
|
246
|
-
* were generated.
|
|
247
|
-
*
|
|
248
|
-
* Setting to a number will return the log probabilities of the top n
|
|
249
|
-
* tokens that were generated.
|
|
250
|
-
*/
|
|
251
|
-
logprobs: z.union([z.boolean(), z.number()]).optional(),
|
|
252
229
|
/**
|
|
253
230
|
* Whether to enable parallel function calling during tool use. Default to true.
|
|
254
231
|
*/
|
|
@@ -374,7 +351,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
374
351
|
"image/*": [/^https?:\/\/.*$/]
|
|
375
352
|
};
|
|
376
353
|
}
|
|
377
|
-
getArgs({
|
|
354
|
+
async getArgs({
|
|
378
355
|
prompt,
|
|
379
356
|
maxOutputTokens,
|
|
380
357
|
temperature,
|
|
@@ -391,7 +368,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
391
368
|
}) {
|
|
392
369
|
var _a, _b, _c;
|
|
393
370
|
const warnings = [];
|
|
394
|
-
const openaiOptions = (_a = parseProviderOptions({
|
|
371
|
+
const openaiOptions = (_a = await parseProviderOptions({
|
|
395
372
|
provider: "openai",
|
|
396
373
|
providerOptions,
|
|
397
374
|
schema: openaiProviderOptions
|
|
@@ -421,8 +398,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
421
398
|
model: this.modelId,
|
|
422
399
|
// model specific settings:
|
|
423
400
|
logit_bias: openaiOptions.logitBias,
|
|
424
|
-
logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
|
|
425
|
-
top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
|
|
426
401
|
user: openaiOptions.user,
|
|
427
402
|
parallel_tool_calls: openaiOptions.parallelToolCalls,
|
|
428
403
|
// standardized settings:
|
|
@@ -495,20 +470,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
495
470
|
message: "logitBias is not supported for reasoning models"
|
|
496
471
|
});
|
|
497
472
|
}
|
|
498
|
-
if (baseArgs.logprobs != null) {
|
|
499
|
-
baseArgs.logprobs = void 0;
|
|
500
|
-
warnings.push({
|
|
501
|
-
type: "other",
|
|
502
|
-
message: "logprobs is not supported for reasoning models"
|
|
503
|
-
});
|
|
504
|
-
}
|
|
505
|
-
if (baseArgs.top_logprobs != null) {
|
|
506
|
-
baseArgs.top_logprobs = void 0;
|
|
507
|
-
warnings.push({
|
|
508
|
-
type: "other",
|
|
509
|
-
message: "topLogprobs is not supported for reasoning models"
|
|
510
|
-
});
|
|
511
|
-
}
|
|
512
473
|
if (baseArgs.max_tokens != null) {
|
|
513
474
|
if (baseArgs.max_completion_tokens == null) {
|
|
514
475
|
baseArgs.max_completion_tokens = baseArgs.max_tokens;
|
|
@@ -545,7 +506,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
545
506
|
}
|
|
546
507
|
async doGenerate(options) {
|
|
547
508
|
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
548
|
-
const { args: body, warnings } = this.getArgs(options);
|
|
509
|
+
const { args: body, warnings } = await this.getArgs(options);
|
|
549
510
|
const {
|
|
550
511
|
responseHeaders,
|
|
551
512
|
value: response,
|
|
@@ -608,12 +569,11 @@ var OpenAIChatLanguageModel = class {
|
|
|
608
569
|
body: rawResponse
|
|
609
570
|
},
|
|
610
571
|
warnings,
|
|
611
|
-
logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
|
|
612
572
|
providerMetadata
|
|
613
573
|
};
|
|
614
574
|
}
|
|
615
575
|
async doStream(options) {
|
|
616
|
-
const { args, warnings } = this.getArgs(options);
|
|
576
|
+
const { args, warnings } = await this.getArgs(options);
|
|
617
577
|
const body = {
|
|
618
578
|
...args,
|
|
619
579
|
stream: true,
|
|
@@ -641,7 +601,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
641
601
|
inputTokens: void 0,
|
|
642
602
|
outputTokens: void 0
|
|
643
603
|
};
|
|
644
|
-
let logprobs;
|
|
645
604
|
let isFirstChunk = true;
|
|
646
605
|
const providerMetadata = { openai: {} };
|
|
647
606
|
return {
|
|
@@ -706,13 +665,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
706
665
|
text: delta.content
|
|
707
666
|
});
|
|
708
667
|
}
|
|
709
|
-
const mappedLogprobs = mapOpenAIChatLogProbsOutput(
|
|
710
|
-
choice == null ? void 0 : choice.logprobs
|
|
711
|
-
);
|
|
712
|
-
if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
|
|
713
|
-
if (logprobs === void 0) logprobs = [];
|
|
714
|
-
logprobs.push(...mappedLogprobs);
|
|
715
|
-
}
|
|
716
668
|
if (delta.tool_calls != null) {
|
|
717
669
|
for (const toolCallDelta of delta.tool_calls) {
|
|
718
670
|
const index = toolCallDelta.index;
|
|
@@ -799,7 +751,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
799
751
|
controller.enqueue({
|
|
800
752
|
type: "finish",
|
|
801
753
|
finishReason,
|
|
802
|
-
logprobs,
|
|
803
754
|
usage,
|
|
804
755
|
...providerMetadata != null ? { providerMetadata } : {}
|
|
805
756
|
});
|
|
@@ -844,20 +795,6 @@ var openaiChatResponseSchema = z3.object({
|
|
|
844
795
|
).nullish()
|
|
845
796
|
}),
|
|
846
797
|
index: z3.number(),
|
|
847
|
-
logprobs: z3.object({
|
|
848
|
-
content: z3.array(
|
|
849
|
-
z3.object({
|
|
850
|
-
token: z3.string(),
|
|
851
|
-
logprob: z3.number(),
|
|
852
|
-
top_logprobs: z3.array(
|
|
853
|
-
z3.object({
|
|
854
|
-
token: z3.string(),
|
|
855
|
-
logprob: z3.number()
|
|
856
|
-
})
|
|
857
|
-
)
|
|
858
|
-
})
|
|
859
|
-
).nullable()
|
|
860
|
-
}).nullish(),
|
|
861
798
|
finish_reason: z3.string().nullish()
|
|
862
799
|
})
|
|
863
800
|
),
|
|
@@ -885,20 +822,6 @@ var openaiChatChunkSchema = z3.union([
|
|
|
885
822
|
})
|
|
886
823
|
).nullish()
|
|
887
824
|
}).nullish(),
|
|
888
|
-
logprobs: z3.object({
|
|
889
|
-
content: z3.array(
|
|
890
|
-
z3.object({
|
|
891
|
-
token: z3.string(),
|
|
892
|
-
logprob: z3.number(),
|
|
893
|
-
top_logprobs: z3.array(
|
|
894
|
-
z3.object({
|
|
895
|
-
token: z3.string(),
|
|
896
|
-
logprob: z3.number()
|
|
897
|
-
})
|
|
898
|
-
)
|
|
899
|
-
})
|
|
900
|
-
).nullable()
|
|
901
|
-
}).nullish(),
|
|
902
825
|
finish_reason: z3.string().nullable().optional(),
|
|
903
826
|
index: z3.number()
|
|
904
827
|
})
|
|
@@ -943,9 +866,10 @@ import {
|
|
|
943
866
|
combineHeaders as combineHeaders2,
|
|
944
867
|
createEventSourceResponseHandler as createEventSourceResponseHandler2,
|
|
945
868
|
createJsonResponseHandler as createJsonResponseHandler2,
|
|
869
|
+
parseProviderOptions as parseProviderOptions2,
|
|
946
870
|
postJsonToApi as postJsonToApi2
|
|
947
871
|
} from "@ai-sdk/provider-utils";
|
|
948
|
-
import { z as
|
|
872
|
+
import { z as z5 } from "zod";
|
|
949
873
|
|
|
950
874
|
// src/convert-to-openai-completion-prompt.ts
|
|
951
875
|
import {
|
|
@@ -1029,28 +953,49 @@ ${user}:`]
|
|
|
1029
953
|
};
|
|
1030
954
|
}
|
|
1031
955
|
|
|
1032
|
-
// src/
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
|
|
1036
|
-
|
|
1037
|
-
|
|
1038
|
-
|
|
1039
|
-
|
|
1040
|
-
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
|
|
956
|
+
// src/openai-completion-options.ts
|
|
957
|
+
import { z as z4 } from "zod";
|
|
958
|
+
var openaiCompletionProviderOptions = z4.object({
|
|
959
|
+
/**
|
|
960
|
+
Echo back the prompt in addition to the completion.
|
|
961
|
+
*/
|
|
962
|
+
echo: z4.boolean().optional(),
|
|
963
|
+
/**
|
|
964
|
+
Modify the likelihood of specified tokens appearing in the completion.
|
|
965
|
+
|
|
966
|
+
Accepts a JSON object that maps tokens (specified by their token ID in
|
|
967
|
+
the GPT tokenizer) to an associated bias value from -100 to 100. You
|
|
968
|
+
can use this tokenizer tool to convert text to token IDs. Mathematically,
|
|
969
|
+
the bias is added to the logits generated by the model prior to sampling.
|
|
970
|
+
The exact effect will vary per model, but values between -1 and 1 should
|
|
971
|
+
decrease or increase likelihood of selection; values like -100 or 100
|
|
972
|
+
should result in a ban or exclusive selection of the relevant token.
|
|
973
|
+
|
|
974
|
+
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
975
|
+
token from being generated.
|
|
976
|
+
*/
|
|
977
|
+
logitBias: z4.record(z4.string(), z4.number()).optional(),
|
|
978
|
+
/**
|
|
979
|
+
The suffix that comes after a completion of inserted text.
|
|
980
|
+
*/
|
|
981
|
+
suffix: z4.string().optional(),
|
|
982
|
+
/**
|
|
983
|
+
A unique identifier representing your end-user, which can help OpenAI to
|
|
984
|
+
monitor and detect abuse. Learn more.
|
|
985
|
+
*/
|
|
986
|
+
user: z4.string().optional()
|
|
987
|
+
});
|
|
1045
988
|
|
|
1046
989
|
// src/openai-completion-language-model.ts
|
|
1047
990
|
var OpenAICompletionLanguageModel = class {
|
|
1048
|
-
constructor(modelId,
|
|
991
|
+
constructor(modelId, config) {
|
|
1049
992
|
this.specificationVersion = "v2";
|
|
1050
993
|
this.modelId = modelId;
|
|
1051
|
-
this.settings = settings;
|
|
1052
994
|
this.config = config;
|
|
1053
995
|
}
|
|
996
|
+
get providerOptionsName() {
|
|
997
|
+
return this.config.provider.split(".")[0].trim();
|
|
998
|
+
}
|
|
1054
999
|
get provider() {
|
|
1055
1000
|
return this.config.provider;
|
|
1056
1001
|
}
|
|
@@ -1059,7 +1004,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1059
1004
|
// no supported urls for completion models
|
|
1060
1005
|
};
|
|
1061
1006
|
}
|
|
1062
|
-
getArgs({
|
|
1007
|
+
async getArgs({
|
|
1063
1008
|
inputFormat,
|
|
1064
1009
|
prompt,
|
|
1065
1010
|
maxOutputTokens,
|
|
@@ -1072,9 +1017,22 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1072
1017
|
responseFormat,
|
|
1073
1018
|
tools,
|
|
1074
1019
|
toolChoice,
|
|
1075
|
-
seed
|
|
1020
|
+
seed,
|
|
1021
|
+
providerOptions
|
|
1076
1022
|
}) {
|
|
1077
1023
|
const warnings = [];
|
|
1024
|
+
const openaiOptions = {
|
|
1025
|
+
...await parseProviderOptions2({
|
|
1026
|
+
provider: "openai",
|
|
1027
|
+
providerOptions,
|
|
1028
|
+
schema: openaiCompletionProviderOptions
|
|
1029
|
+
}),
|
|
1030
|
+
...await parseProviderOptions2({
|
|
1031
|
+
provider: this.providerOptionsName,
|
|
1032
|
+
providerOptions,
|
|
1033
|
+
schema: openaiCompletionProviderOptions
|
|
1034
|
+
})
|
|
1035
|
+
};
|
|
1078
1036
|
if (topK != null) {
|
|
1079
1037
|
warnings.push({ type: "unsupported-setting", setting: "topK" });
|
|
1080
1038
|
}
|
|
@@ -1098,11 +1056,10 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1098
1056
|
// model id:
|
|
1099
1057
|
model: this.modelId,
|
|
1100
1058
|
// model specific settings:
|
|
1101
|
-
echo:
|
|
1102
|
-
logit_bias:
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
user: this.settings.user,
|
|
1059
|
+
echo: openaiOptions.echo,
|
|
1060
|
+
logit_bias: openaiOptions.logitBias,
|
|
1061
|
+
suffix: openaiOptions.suffix,
|
|
1062
|
+
user: openaiOptions.user,
|
|
1106
1063
|
// standardized settings:
|
|
1107
1064
|
max_tokens: maxOutputTokens,
|
|
1108
1065
|
temperature,
|
|
@@ -1119,7 +1076,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1119
1076
|
};
|
|
1120
1077
|
}
|
|
1121
1078
|
async doGenerate(options) {
|
|
1122
|
-
const { args, warnings } = this.getArgs(options);
|
|
1079
|
+
const { args, warnings } = await this.getArgs(options);
|
|
1123
1080
|
const {
|
|
1124
1081
|
responseHeaders,
|
|
1125
1082
|
value: response,
|
|
@@ -1146,7 +1103,6 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1146
1103
|
outputTokens: response.usage.completion_tokens
|
|
1147
1104
|
},
|
|
1148
1105
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
1149
|
-
logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
|
|
1150
1106
|
request: { body: args },
|
|
1151
1107
|
response: {
|
|
1152
1108
|
...getResponseMetadata(response),
|
|
@@ -1157,7 +1113,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1157
1113
|
};
|
|
1158
1114
|
}
|
|
1159
1115
|
async doStream(options) {
|
|
1160
|
-
const { args, warnings } = this.getArgs(options);
|
|
1116
|
+
const { args, warnings } = await this.getArgs(options);
|
|
1161
1117
|
const body = {
|
|
1162
1118
|
...args,
|
|
1163
1119
|
stream: true,
|
|
@@ -1183,7 +1139,6 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1183
1139
|
inputTokens: void 0,
|
|
1184
1140
|
outputTokens: void 0
|
|
1185
1141
|
};
|
|
1186
|
-
let logprobs;
|
|
1187
1142
|
let isFirstChunk = true;
|
|
1188
1143
|
return {
|
|
1189
1144
|
stream: response.pipeThrough(
|
|
@@ -1224,19 +1179,11 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1224
1179
|
text: choice.text
|
|
1225
1180
|
});
|
|
1226
1181
|
}
|
|
1227
|
-
const mappedLogprobs = mapOpenAICompletionLogProbs(
|
|
1228
|
-
choice == null ? void 0 : choice.logprobs
|
|
1229
|
-
);
|
|
1230
|
-
if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
|
|
1231
|
-
if (logprobs === void 0) logprobs = [];
|
|
1232
|
-
logprobs.push(...mappedLogprobs);
|
|
1233
|
-
}
|
|
1234
1182
|
},
|
|
1235
1183
|
flush(controller) {
|
|
1236
1184
|
controller.enqueue({
|
|
1237
1185
|
type: "finish",
|
|
1238
1186
|
finishReason,
|
|
1239
|
-
logprobs,
|
|
1240
1187
|
usage
|
|
1241
1188
|
});
|
|
1242
1189
|
}
|
|
@@ -1247,46 +1194,36 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1247
1194
|
};
|
|
1248
1195
|
}
|
|
1249
1196
|
};
|
|
1250
|
-
var openaiCompletionResponseSchema =
|
|
1251
|
-
id:
|
|
1252
|
-
created:
|
|
1253
|
-
model:
|
|
1254
|
-
choices:
|
|
1255
|
-
|
|
1256
|
-
text:
|
|
1257
|
-
finish_reason:
|
|
1258
|
-
logprobs: z4.object({
|
|
1259
|
-
tokens: z4.array(z4.string()),
|
|
1260
|
-
token_logprobs: z4.array(z4.number()),
|
|
1261
|
-
top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullable()
|
|
1262
|
-
}).nullish()
|
|
1197
|
+
var openaiCompletionResponseSchema = z5.object({
|
|
1198
|
+
id: z5.string().nullish(),
|
|
1199
|
+
created: z5.number().nullish(),
|
|
1200
|
+
model: z5.string().nullish(),
|
|
1201
|
+
choices: z5.array(
|
|
1202
|
+
z5.object({
|
|
1203
|
+
text: z5.string(),
|
|
1204
|
+
finish_reason: z5.string()
|
|
1263
1205
|
})
|
|
1264
1206
|
),
|
|
1265
|
-
usage:
|
|
1266
|
-
prompt_tokens:
|
|
1267
|
-
completion_tokens:
|
|
1207
|
+
usage: z5.object({
|
|
1208
|
+
prompt_tokens: z5.number(),
|
|
1209
|
+
completion_tokens: z5.number()
|
|
1268
1210
|
})
|
|
1269
1211
|
});
|
|
1270
|
-
var openaiCompletionChunkSchema =
|
|
1271
|
-
|
|
1272
|
-
id:
|
|
1273
|
-
created:
|
|
1274
|
-
model:
|
|
1275
|
-
choices:
|
|
1276
|
-
|
|
1277
|
-
text:
|
|
1278
|
-
finish_reason:
|
|
1279
|
-
index:
|
|
1280
|
-
logprobs: z4.object({
|
|
1281
|
-
tokens: z4.array(z4.string()),
|
|
1282
|
-
token_logprobs: z4.array(z4.number()),
|
|
1283
|
-
top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullable()
|
|
1284
|
-
}).nullish()
|
|
1212
|
+
var openaiCompletionChunkSchema = z5.union([
|
|
1213
|
+
z5.object({
|
|
1214
|
+
id: z5.string().nullish(),
|
|
1215
|
+
created: z5.number().nullish(),
|
|
1216
|
+
model: z5.string().nullish(),
|
|
1217
|
+
choices: z5.array(
|
|
1218
|
+
z5.object({
|
|
1219
|
+
text: z5.string(),
|
|
1220
|
+
finish_reason: z5.string().nullish(),
|
|
1221
|
+
index: z5.number()
|
|
1285
1222
|
})
|
|
1286
1223
|
),
|
|
1287
|
-
usage:
|
|
1288
|
-
prompt_tokens:
|
|
1289
|
-
completion_tokens:
|
|
1224
|
+
usage: z5.object({
|
|
1225
|
+
prompt_tokens: z5.number(),
|
|
1226
|
+
completion_tokens: z5.number()
|
|
1290
1227
|
}).nullish()
|
|
1291
1228
|
}),
|
|
1292
1229
|
openaiErrorDataSchema
|
|
@@ -1299,24 +1236,24 @@ import {
|
|
|
1299
1236
|
import {
|
|
1300
1237
|
combineHeaders as combineHeaders3,
|
|
1301
1238
|
createJsonResponseHandler as createJsonResponseHandler3,
|
|
1302
|
-
parseProviderOptions as
|
|
1239
|
+
parseProviderOptions as parseProviderOptions3,
|
|
1303
1240
|
postJsonToApi as postJsonToApi3
|
|
1304
1241
|
} from "@ai-sdk/provider-utils";
|
|
1305
|
-
import { z as
|
|
1242
|
+
import { z as z7 } from "zod";
|
|
1306
1243
|
|
|
1307
1244
|
// src/openai-embedding-options.ts
|
|
1308
|
-
import { z as
|
|
1309
|
-
var openaiEmbeddingProviderOptions =
|
|
1245
|
+
import { z as z6 } from "zod";
|
|
1246
|
+
var openaiEmbeddingProviderOptions = z6.object({
|
|
1310
1247
|
/**
|
|
1311
1248
|
The number of dimensions the resulting output embeddings should have.
|
|
1312
1249
|
Only supported in text-embedding-3 and later models.
|
|
1313
1250
|
*/
|
|
1314
|
-
dimensions:
|
|
1251
|
+
dimensions: z6.number().optional(),
|
|
1315
1252
|
/**
|
|
1316
1253
|
A unique identifier representing your end-user, which can help OpenAI to
|
|
1317
1254
|
monitor and detect abuse. Learn more.
|
|
1318
1255
|
*/
|
|
1319
|
-
user:
|
|
1256
|
+
user: z6.string().optional()
|
|
1320
1257
|
});
|
|
1321
1258
|
|
|
1322
1259
|
// src/openai-embedding-model.ts
|
|
@@ -1353,7 +1290,7 @@ var OpenAIEmbeddingModel = class {
|
|
|
1353
1290
|
values
|
|
1354
1291
|
});
|
|
1355
1292
|
}
|
|
1356
|
-
const openaiOptions = (_a =
|
|
1293
|
+
const openaiOptions = (_a = await parseProviderOptions3({
|
|
1357
1294
|
provider: "openai",
|
|
1358
1295
|
providerOptions,
|
|
1359
1296
|
schema: openaiEmbeddingProviderOptions
|
|
@@ -1389,9 +1326,9 @@ var OpenAIEmbeddingModel = class {
|
|
|
1389
1326
|
};
|
|
1390
1327
|
}
|
|
1391
1328
|
};
|
|
1392
|
-
var openaiTextEmbeddingResponseSchema =
|
|
1393
|
-
data:
|
|
1394
|
-
usage:
|
|
1329
|
+
var openaiTextEmbeddingResponseSchema = z7.object({
|
|
1330
|
+
data: z7.array(z7.object({ embedding: z7.array(z7.number()) })),
|
|
1331
|
+
usage: z7.object({ prompt_tokens: z7.number() }).nullish()
|
|
1395
1332
|
});
|
|
1396
1333
|
|
|
1397
1334
|
// src/openai-image-model.ts
|
|
@@ -1400,13 +1337,15 @@ import {
|
|
|
1400
1337
|
createJsonResponseHandler as createJsonResponseHandler4,
|
|
1401
1338
|
postJsonToApi as postJsonToApi4
|
|
1402
1339
|
} from "@ai-sdk/provider-utils";
|
|
1403
|
-
import { z as
|
|
1340
|
+
import { z as z8 } from "zod";
|
|
1404
1341
|
|
|
1405
1342
|
// src/openai-image-settings.ts
|
|
1406
1343
|
var modelMaxImagesPerCall = {
|
|
1407
1344
|
"dall-e-3": 1,
|
|
1408
|
-
"dall-e-2": 10
|
|
1345
|
+
"dall-e-2": 10,
|
|
1346
|
+
"gpt-image-1": 10
|
|
1409
1347
|
};
|
|
1348
|
+
var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
|
|
1410
1349
|
|
|
1411
1350
|
// src/openai-image-model.ts
|
|
1412
1351
|
var OpenAIImageModel = class {
|
|
@@ -1458,7 +1397,7 @@ var OpenAIImageModel = class {
|
|
|
1458
1397
|
n,
|
|
1459
1398
|
size,
|
|
1460
1399
|
...(_d = providerOptions.openai) != null ? _d : {},
|
|
1461
|
-
response_format: "b64_json"
|
|
1400
|
+
...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
|
|
1462
1401
|
},
|
|
1463
1402
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1464
1403
|
successfulResponseHandler: createJsonResponseHandler4(
|
|
@@ -1478,13 +1417,13 @@ var OpenAIImageModel = class {
|
|
|
1478
1417
|
};
|
|
1479
1418
|
}
|
|
1480
1419
|
};
|
|
1481
|
-
var openaiImageResponseSchema =
|
|
1482
|
-
data:
|
|
1420
|
+
var openaiImageResponseSchema = z8.object({
|
|
1421
|
+
data: z8.array(z8.object({ b64_json: z8.string() }))
|
|
1483
1422
|
});
|
|
1484
1423
|
|
|
1485
1424
|
// src/openai-tools.ts
|
|
1486
|
-
import { z as
|
|
1487
|
-
var WebSearchPreviewParameters =
|
|
1425
|
+
import { z as z9 } from "zod";
|
|
1426
|
+
var WebSearchPreviewParameters = z9.object({});
|
|
1488
1427
|
function webSearchPreviewTool({
|
|
1489
1428
|
searchContextSize,
|
|
1490
1429
|
userLocation
|
|
@@ -1508,16 +1447,16 @@ import {
|
|
|
1508
1447
|
combineHeaders as combineHeaders5,
|
|
1509
1448
|
convertBase64ToUint8Array,
|
|
1510
1449
|
createJsonResponseHandler as createJsonResponseHandler5,
|
|
1511
|
-
parseProviderOptions as
|
|
1450
|
+
parseProviderOptions as parseProviderOptions4,
|
|
1512
1451
|
postFormDataToApi
|
|
1513
1452
|
} from "@ai-sdk/provider-utils";
|
|
1514
|
-
import { z as
|
|
1515
|
-
var openAIProviderOptionsSchema =
|
|
1516
|
-
include:
|
|
1517
|
-
language:
|
|
1518
|
-
prompt:
|
|
1519
|
-
temperature:
|
|
1520
|
-
timestampGranularities:
|
|
1453
|
+
import { z as z10 } from "zod";
|
|
1454
|
+
var openAIProviderOptionsSchema = z10.object({
|
|
1455
|
+
include: z10.array(z10.string()).nullish(),
|
|
1456
|
+
language: z10.string().nullish(),
|
|
1457
|
+
prompt: z10.string().nullish(),
|
|
1458
|
+
temperature: z10.number().min(0).max(1).nullish().default(0),
|
|
1459
|
+
timestampGranularities: z10.array(z10.enum(["word", "segment"])).nullish().default(["segment"])
|
|
1521
1460
|
});
|
|
1522
1461
|
var languageMap = {
|
|
1523
1462
|
afrikaans: "af",
|
|
@@ -1587,14 +1526,14 @@ var OpenAITranscriptionModel = class {
|
|
|
1587
1526
|
get provider() {
|
|
1588
1527
|
return this.config.provider;
|
|
1589
1528
|
}
|
|
1590
|
-
getArgs({
|
|
1529
|
+
async getArgs({
|
|
1591
1530
|
audio,
|
|
1592
1531
|
mediaType,
|
|
1593
1532
|
providerOptions
|
|
1594
1533
|
}) {
|
|
1595
1534
|
var _a, _b, _c, _d, _e;
|
|
1596
1535
|
const warnings = [];
|
|
1597
|
-
const openAIOptions =
|
|
1536
|
+
const openAIOptions = await parseProviderOptions4({
|
|
1598
1537
|
provider: "openai",
|
|
1599
1538
|
providerOptions,
|
|
1600
1539
|
schema: openAIProviderOptionsSchema
|
|
@@ -1626,7 +1565,7 @@ var OpenAITranscriptionModel = class {
|
|
|
1626
1565
|
async doGenerate(options) {
|
|
1627
1566
|
var _a, _b, _c, _d, _e, _f;
|
|
1628
1567
|
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
1629
|
-
const { formData, warnings } = this.getArgs(options);
|
|
1568
|
+
const { formData, warnings } = await this.getArgs(options);
|
|
1630
1569
|
const {
|
|
1631
1570
|
value: response,
|
|
1632
1571
|
responseHeaders,
|
|
@@ -1665,15 +1604,15 @@ var OpenAITranscriptionModel = class {
|
|
|
1665
1604
|
};
|
|
1666
1605
|
}
|
|
1667
1606
|
};
|
|
1668
|
-
var openaiTranscriptionResponseSchema =
|
|
1669
|
-
text:
|
|
1670
|
-
language:
|
|
1671
|
-
duration:
|
|
1672
|
-
words:
|
|
1673
|
-
|
|
1674
|
-
word:
|
|
1675
|
-
start:
|
|
1676
|
-
end:
|
|
1607
|
+
var openaiTranscriptionResponseSchema = z10.object({
|
|
1608
|
+
text: z10.string(),
|
|
1609
|
+
language: z10.string().nullish(),
|
|
1610
|
+
duration: z10.number().nullish(),
|
|
1611
|
+
words: z10.array(
|
|
1612
|
+
z10.object({
|
|
1613
|
+
word: z10.string(),
|
|
1614
|
+
start: z10.number(),
|
|
1615
|
+
end: z10.number()
|
|
1677
1616
|
})
|
|
1678
1617
|
).nullish()
|
|
1679
1618
|
});
|
|
@@ -1684,10 +1623,10 @@ import {
|
|
|
1684
1623
|
createEventSourceResponseHandler as createEventSourceResponseHandler3,
|
|
1685
1624
|
createJsonResponseHandler as createJsonResponseHandler6,
|
|
1686
1625
|
generateId as generateId2,
|
|
1687
|
-
parseProviderOptions as
|
|
1626
|
+
parseProviderOptions as parseProviderOptions5,
|
|
1688
1627
|
postJsonToApi as postJsonToApi5
|
|
1689
1628
|
} from "@ai-sdk/provider-utils";
|
|
1690
|
-
import { z as
|
|
1629
|
+
import { z as z11 } from "zod";
|
|
1691
1630
|
|
|
1692
1631
|
// src/responses/convert-to-openai-responses-messages.ts
|
|
1693
1632
|
import {
|
|
@@ -1911,7 +1850,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
1911
1850
|
get provider() {
|
|
1912
1851
|
return this.config.provider;
|
|
1913
1852
|
}
|
|
1914
|
-
getArgs({
|
|
1853
|
+
async getArgs({
|
|
1915
1854
|
maxOutputTokens,
|
|
1916
1855
|
temperature,
|
|
1917
1856
|
stopSequences,
|
|
@@ -1955,7 +1894,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
1955
1894
|
systemMessageMode: modelConfig.systemMessageMode
|
|
1956
1895
|
});
|
|
1957
1896
|
warnings.push(...messageWarnings);
|
|
1958
|
-
const openaiOptions =
|
|
1897
|
+
const openaiOptions = await parseProviderOptions5({
|
|
1959
1898
|
provider: "openai",
|
|
1960
1899
|
providerOptions,
|
|
1961
1900
|
schema: openaiResponsesProviderOptionsSchema
|
|
@@ -1986,8 +1925,15 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
1986
1925
|
user: openaiOptions == null ? void 0 : openaiOptions.user,
|
|
1987
1926
|
instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
|
|
1988
1927
|
// model-specific settings:
|
|
1989
|
-
...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
|
|
1990
|
-
reasoning: {
|
|
1928
|
+
...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
1929
|
+
reasoning: {
|
|
1930
|
+
...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
|
|
1931
|
+
effort: openaiOptions.reasoningEffort
|
|
1932
|
+
},
|
|
1933
|
+
...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
|
|
1934
|
+
summary: openaiOptions.reasoningSummary
|
|
1935
|
+
}
|
|
1936
|
+
}
|
|
1991
1937
|
},
|
|
1992
1938
|
...modelConfig.requiredAutoTruncation && {
|
|
1993
1939
|
truncation: "auto"
|
|
@@ -2031,7 +1977,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2031
1977
|
}
|
|
2032
1978
|
async doGenerate(options) {
|
|
2033
1979
|
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
2034
|
-
const { args: body, warnings } = this.getArgs(options);
|
|
1980
|
+
const { args: body, warnings } = await this.getArgs(options);
|
|
2035
1981
|
const {
|
|
2036
1982
|
responseHeaders,
|
|
2037
1983
|
value: response,
|
|
@@ -2045,49 +1991,55 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2045
1991
|
body,
|
|
2046
1992
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
2047
1993
|
successfulResponseHandler: createJsonResponseHandler6(
|
|
2048
|
-
|
|
2049
|
-
id:
|
|
2050
|
-
created_at:
|
|
2051
|
-
model:
|
|
2052
|
-
output:
|
|
2053
|
-
|
|
2054
|
-
|
|
2055
|
-
type:
|
|
2056
|
-
role:
|
|
2057
|
-
content:
|
|
2058
|
-
|
|
2059
|
-
type:
|
|
2060
|
-
text:
|
|
2061
|
-
annotations:
|
|
2062
|
-
|
|
2063
|
-
type:
|
|
2064
|
-
start_index:
|
|
2065
|
-
end_index:
|
|
2066
|
-
url:
|
|
2067
|
-
title:
|
|
1994
|
+
z11.object({
|
|
1995
|
+
id: z11.string(),
|
|
1996
|
+
created_at: z11.number(),
|
|
1997
|
+
model: z11.string(),
|
|
1998
|
+
output: z11.array(
|
|
1999
|
+
z11.discriminatedUnion("type", [
|
|
2000
|
+
z11.object({
|
|
2001
|
+
type: z11.literal("message"),
|
|
2002
|
+
role: z11.literal("assistant"),
|
|
2003
|
+
content: z11.array(
|
|
2004
|
+
z11.object({
|
|
2005
|
+
type: z11.literal("output_text"),
|
|
2006
|
+
text: z11.string(),
|
|
2007
|
+
annotations: z11.array(
|
|
2008
|
+
z11.object({
|
|
2009
|
+
type: z11.literal("url_citation"),
|
|
2010
|
+
start_index: z11.number(),
|
|
2011
|
+
end_index: z11.number(),
|
|
2012
|
+
url: z11.string(),
|
|
2013
|
+
title: z11.string()
|
|
2068
2014
|
})
|
|
2069
2015
|
)
|
|
2070
2016
|
})
|
|
2071
2017
|
)
|
|
2072
2018
|
}),
|
|
2073
|
-
|
|
2074
|
-
type:
|
|
2075
|
-
call_id:
|
|
2076
|
-
name:
|
|
2077
|
-
arguments:
|
|
2019
|
+
z11.object({
|
|
2020
|
+
type: z11.literal("function_call"),
|
|
2021
|
+
call_id: z11.string(),
|
|
2022
|
+
name: z11.string(),
|
|
2023
|
+
arguments: z11.string()
|
|
2078
2024
|
}),
|
|
2079
|
-
|
|
2080
|
-
type:
|
|
2025
|
+
z11.object({
|
|
2026
|
+
type: z11.literal("web_search_call")
|
|
2081
2027
|
}),
|
|
2082
|
-
|
|
2083
|
-
type:
|
|
2028
|
+
z11.object({
|
|
2029
|
+
type: z11.literal("computer_call")
|
|
2084
2030
|
}),
|
|
2085
|
-
|
|
2086
|
-
type:
|
|
2031
|
+
z11.object({
|
|
2032
|
+
type: z11.literal("reasoning"),
|
|
2033
|
+
summary: z11.array(
|
|
2034
|
+
z11.object({
|
|
2035
|
+
type: z11.literal("summary_text"),
|
|
2036
|
+
text: z11.string()
|
|
2037
|
+
})
|
|
2038
|
+
)
|
|
2087
2039
|
})
|
|
2088
2040
|
])
|
|
2089
2041
|
),
|
|
2090
|
-
incomplete_details:
|
|
2042
|
+
incomplete_details: z11.object({ reason: z11.string() }).nullable(),
|
|
2091
2043
|
usage: usageSchema
|
|
2092
2044
|
})
|
|
2093
2045
|
),
|
|
@@ -2097,6 +2049,13 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2097
2049
|
const content = [];
|
|
2098
2050
|
for (const part of response.output) {
|
|
2099
2051
|
switch (part.type) {
|
|
2052
|
+
case "reasoning": {
|
|
2053
|
+
content.push({
|
|
2054
|
+
type: "reasoning",
|
|
2055
|
+
text: part.summary.map((summary) => summary.text).join()
|
|
2056
|
+
});
|
|
2057
|
+
break;
|
|
2058
|
+
}
|
|
2100
2059
|
case "message": {
|
|
2101
2060
|
for (const contentPart of part.content) {
|
|
2102
2061
|
content.push({
|
|
@@ -2156,7 +2115,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2156
2115
|
};
|
|
2157
2116
|
}
|
|
2158
2117
|
async doStream(options) {
|
|
2159
|
-
const { args: body, warnings } = this.getArgs(options);
|
|
2118
|
+
const { args: body, warnings } = await this.getArgs(options);
|
|
2160
2119
|
const { responseHeaders, value: response } = await postJsonToApi5({
|
|
2161
2120
|
url: this.config.url({
|
|
2162
2121
|
path: "/responses",
|
|
@@ -2237,6 +2196,11 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2237
2196
|
type: "text",
|
|
2238
2197
|
text: value.delta
|
|
2239
2198
|
});
|
|
2199
|
+
} else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
|
|
2200
|
+
controller.enqueue({
|
|
2201
|
+
type: "reasoning",
|
|
2202
|
+
text: value.delta
|
|
2203
|
+
});
|
|
2240
2204
|
} else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
|
|
2241
2205
|
ongoingToolCalls[value.output_index] = void 0;
|
|
2242
2206
|
hasToolCalls = true;
|
|
@@ -2289,79 +2253,86 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2289
2253
|
};
|
|
2290
2254
|
}
|
|
2291
2255
|
};
|
|
2292
|
-
var usageSchema =
|
|
2293
|
-
input_tokens:
|
|
2294
|
-
input_tokens_details:
|
|
2295
|
-
output_tokens:
|
|
2296
|
-
output_tokens_details:
|
|
2256
|
+
var usageSchema = z11.object({
|
|
2257
|
+
input_tokens: z11.number(),
|
|
2258
|
+
input_tokens_details: z11.object({ cached_tokens: z11.number().nullish() }).nullish(),
|
|
2259
|
+
output_tokens: z11.number(),
|
|
2260
|
+
output_tokens_details: z11.object({ reasoning_tokens: z11.number().nullish() }).nullish()
|
|
2297
2261
|
});
|
|
2298
|
-
var textDeltaChunkSchema =
|
|
2299
|
-
type:
|
|
2300
|
-
delta:
|
|
2262
|
+
var textDeltaChunkSchema = z11.object({
|
|
2263
|
+
type: z11.literal("response.output_text.delta"),
|
|
2264
|
+
delta: z11.string()
|
|
2301
2265
|
});
|
|
2302
|
-
var responseFinishedChunkSchema =
|
|
2303
|
-
type:
|
|
2304
|
-
response:
|
|
2305
|
-
incomplete_details:
|
|
2266
|
+
var responseFinishedChunkSchema = z11.object({
|
|
2267
|
+
type: z11.enum(["response.completed", "response.incomplete"]),
|
|
2268
|
+
response: z11.object({
|
|
2269
|
+
incomplete_details: z11.object({ reason: z11.string() }).nullish(),
|
|
2306
2270
|
usage: usageSchema
|
|
2307
2271
|
})
|
|
2308
2272
|
});
|
|
2309
|
-
var responseCreatedChunkSchema =
|
|
2310
|
-
type:
|
|
2311
|
-
response:
|
|
2312
|
-
id:
|
|
2313
|
-
created_at:
|
|
2314
|
-
model:
|
|
2273
|
+
var responseCreatedChunkSchema = z11.object({
|
|
2274
|
+
type: z11.literal("response.created"),
|
|
2275
|
+
response: z11.object({
|
|
2276
|
+
id: z11.string(),
|
|
2277
|
+
created_at: z11.number(),
|
|
2278
|
+
model: z11.string()
|
|
2315
2279
|
})
|
|
2316
2280
|
});
|
|
2317
|
-
var responseOutputItemDoneSchema =
|
|
2318
|
-
type:
|
|
2319
|
-
output_index:
|
|
2320
|
-
item:
|
|
2321
|
-
|
|
2322
|
-
type:
|
|
2281
|
+
var responseOutputItemDoneSchema = z11.object({
|
|
2282
|
+
type: z11.literal("response.output_item.done"),
|
|
2283
|
+
output_index: z11.number(),
|
|
2284
|
+
item: z11.discriminatedUnion("type", [
|
|
2285
|
+
z11.object({
|
|
2286
|
+
type: z11.literal("message")
|
|
2323
2287
|
}),
|
|
2324
|
-
|
|
2325
|
-
type:
|
|
2326
|
-
id:
|
|
2327
|
-
call_id:
|
|
2328
|
-
name:
|
|
2329
|
-
arguments:
|
|
2330
|
-
status:
|
|
2288
|
+
z11.object({
|
|
2289
|
+
type: z11.literal("function_call"),
|
|
2290
|
+
id: z11.string(),
|
|
2291
|
+
call_id: z11.string(),
|
|
2292
|
+
name: z11.string(),
|
|
2293
|
+
arguments: z11.string(),
|
|
2294
|
+
status: z11.literal("completed")
|
|
2331
2295
|
})
|
|
2332
2296
|
])
|
|
2333
2297
|
});
|
|
2334
|
-
var responseFunctionCallArgumentsDeltaSchema =
|
|
2335
|
-
type:
|
|
2336
|
-
item_id:
|
|
2337
|
-
output_index:
|
|
2338
|
-
delta:
|
|
2298
|
+
var responseFunctionCallArgumentsDeltaSchema = z11.object({
|
|
2299
|
+
type: z11.literal("response.function_call_arguments.delta"),
|
|
2300
|
+
item_id: z11.string(),
|
|
2301
|
+
output_index: z11.number(),
|
|
2302
|
+
delta: z11.string()
|
|
2339
2303
|
});
|
|
2340
|
-
var responseOutputItemAddedSchema =
|
|
2341
|
-
type:
|
|
2342
|
-
output_index:
|
|
2343
|
-
item:
|
|
2344
|
-
|
|
2345
|
-
type:
|
|
2304
|
+
var responseOutputItemAddedSchema = z11.object({
|
|
2305
|
+
type: z11.literal("response.output_item.added"),
|
|
2306
|
+
output_index: z11.number(),
|
|
2307
|
+
item: z11.discriminatedUnion("type", [
|
|
2308
|
+
z11.object({
|
|
2309
|
+
type: z11.literal("message")
|
|
2346
2310
|
}),
|
|
2347
|
-
|
|
2348
|
-
type:
|
|
2349
|
-
id:
|
|
2350
|
-
call_id:
|
|
2351
|
-
name:
|
|
2352
|
-
arguments:
|
|
2311
|
+
z11.object({
|
|
2312
|
+
type: z11.literal("function_call"),
|
|
2313
|
+
id: z11.string(),
|
|
2314
|
+
call_id: z11.string(),
|
|
2315
|
+
name: z11.string(),
|
|
2316
|
+
arguments: z11.string()
|
|
2353
2317
|
})
|
|
2354
2318
|
])
|
|
2355
2319
|
});
|
|
2356
|
-
var responseAnnotationAddedSchema =
|
|
2357
|
-
type:
|
|
2358
|
-
annotation:
|
|
2359
|
-
type:
|
|
2360
|
-
url:
|
|
2361
|
-
title:
|
|
2320
|
+
var responseAnnotationAddedSchema = z11.object({
|
|
2321
|
+
type: z11.literal("response.output_text.annotation.added"),
|
|
2322
|
+
annotation: z11.object({
|
|
2323
|
+
type: z11.literal("url_citation"),
|
|
2324
|
+
url: z11.string(),
|
|
2325
|
+
title: z11.string()
|
|
2362
2326
|
})
|
|
2363
2327
|
});
|
|
2364
|
-
var
|
|
2328
|
+
var responseReasoningSummaryTextDeltaSchema = z11.object({
|
|
2329
|
+
type: z11.literal("response.reasoning_summary_text.delta"),
|
|
2330
|
+
item_id: z11.string(),
|
|
2331
|
+
output_index: z11.number(),
|
|
2332
|
+
summary_index: z11.number(),
|
|
2333
|
+
delta: z11.string()
|
|
2334
|
+
});
|
|
2335
|
+
var openaiResponsesChunkSchema = z11.union([
|
|
2365
2336
|
textDeltaChunkSchema,
|
|
2366
2337
|
responseFinishedChunkSchema,
|
|
2367
2338
|
responseCreatedChunkSchema,
|
|
@@ -2369,7 +2340,8 @@ var openaiResponsesChunkSchema = z10.union([
|
|
|
2369
2340
|
responseFunctionCallArgumentsDeltaSchema,
|
|
2370
2341
|
responseOutputItemAddedSchema,
|
|
2371
2342
|
responseAnnotationAddedSchema,
|
|
2372
|
-
|
|
2343
|
+
responseReasoningSummaryTextDeltaSchema,
|
|
2344
|
+
z11.object({ type: z11.string() }).passthrough()
|
|
2373
2345
|
// fallback for unknown chunks
|
|
2374
2346
|
]);
|
|
2375
2347
|
function isTextDeltaChunk(chunk) {
|
|
@@ -2393,6 +2365,9 @@ function isResponseOutputItemAddedChunk(chunk) {
|
|
|
2393
2365
|
function isResponseAnnotationAddedChunk(chunk) {
|
|
2394
2366
|
return chunk.type === "response.output_text.annotation.added";
|
|
2395
2367
|
}
|
|
2368
|
+
function isResponseReasoningSummaryTextDeltaChunk(chunk) {
|
|
2369
|
+
return chunk.type === "response.reasoning_summary_text.delta";
|
|
2370
|
+
}
|
|
2396
2371
|
function getResponsesModelConfig(modelId) {
|
|
2397
2372
|
if (modelId.startsWith("o")) {
|
|
2398
2373
|
if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
|
|
@@ -2414,28 +2389,29 @@ function getResponsesModelConfig(modelId) {
|
|
|
2414
2389
|
requiredAutoTruncation: false
|
|
2415
2390
|
};
|
|
2416
2391
|
}
|
|
2417
|
-
var openaiResponsesProviderOptionsSchema =
|
|
2418
|
-
metadata:
|
|
2419
|
-
parallelToolCalls:
|
|
2420
|
-
previousResponseId:
|
|
2421
|
-
store:
|
|
2422
|
-
user:
|
|
2423
|
-
reasoningEffort:
|
|
2424
|
-
strictSchemas:
|
|
2425
|
-
instructions:
|
|
2392
|
+
var openaiResponsesProviderOptionsSchema = z11.object({
|
|
2393
|
+
metadata: z11.any().nullish(),
|
|
2394
|
+
parallelToolCalls: z11.boolean().nullish(),
|
|
2395
|
+
previousResponseId: z11.string().nullish(),
|
|
2396
|
+
store: z11.boolean().nullish(),
|
|
2397
|
+
user: z11.string().nullish(),
|
|
2398
|
+
reasoningEffort: z11.string().nullish(),
|
|
2399
|
+
strictSchemas: z11.boolean().nullish(),
|
|
2400
|
+
instructions: z11.string().nullish(),
|
|
2401
|
+
reasoningSummary: z11.string().nullish()
|
|
2426
2402
|
});
|
|
2427
2403
|
|
|
2428
2404
|
// src/openai-speech-model.ts
|
|
2429
2405
|
import {
|
|
2430
2406
|
combineHeaders as combineHeaders7,
|
|
2431
2407
|
createBinaryResponseHandler,
|
|
2432
|
-
parseProviderOptions as
|
|
2408
|
+
parseProviderOptions as parseProviderOptions6,
|
|
2433
2409
|
postJsonToApi as postJsonToApi6
|
|
2434
2410
|
} from "@ai-sdk/provider-utils";
|
|
2435
|
-
import { z as
|
|
2436
|
-
var OpenAIProviderOptionsSchema =
|
|
2437
|
-
instructions:
|
|
2438
|
-
speed:
|
|
2411
|
+
import { z as z12 } from "zod";
|
|
2412
|
+
var OpenAIProviderOptionsSchema = z12.object({
|
|
2413
|
+
instructions: z12.string().nullish(),
|
|
2414
|
+
speed: z12.number().min(0.25).max(4).default(1).nullish()
|
|
2439
2415
|
});
|
|
2440
2416
|
var OpenAISpeechModel = class {
|
|
2441
2417
|
constructor(modelId, config) {
|
|
@@ -2446,7 +2422,7 @@ var OpenAISpeechModel = class {
|
|
|
2446
2422
|
get provider() {
|
|
2447
2423
|
return this.config.provider;
|
|
2448
2424
|
}
|
|
2449
|
-
getArgs({
|
|
2425
|
+
async getArgs({
|
|
2450
2426
|
text,
|
|
2451
2427
|
voice = "alloy",
|
|
2452
2428
|
outputFormat = "mp3",
|
|
@@ -2455,7 +2431,7 @@ var OpenAISpeechModel = class {
|
|
|
2455
2431
|
providerOptions
|
|
2456
2432
|
}) {
|
|
2457
2433
|
const warnings = [];
|
|
2458
|
-
const openAIOptions =
|
|
2434
|
+
const openAIOptions = await parseProviderOptions6({
|
|
2459
2435
|
provider: "openai",
|
|
2460
2436
|
providerOptions,
|
|
2461
2437
|
schema: OpenAIProviderOptionsSchema
|
|
@@ -2496,7 +2472,7 @@ var OpenAISpeechModel = class {
|
|
|
2496
2472
|
async doGenerate(options) {
|
|
2497
2473
|
var _a, _b, _c;
|
|
2498
2474
|
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
2499
|
-
const { requestBody, warnings } = this.getArgs(options);
|
|
2475
|
+
const { requestBody, warnings } = await this.getArgs(options);
|
|
2500
2476
|
const {
|
|
2501
2477
|
value: audio,
|
|
2502
2478
|
responseHeaders,
|
|
@@ -2552,7 +2528,7 @@ function createOpenAI(options = {}) {
|
|
|
2552
2528
|
compatibility,
|
|
2553
2529
|
fetch: options.fetch
|
|
2554
2530
|
});
|
|
2555
|
-
const createCompletionModel = (modelId
|
|
2531
|
+
const createCompletionModel = (modelId) => new OpenAICompletionLanguageModel(modelId, {
|
|
2556
2532
|
provider: `${providerName}.completion`,
|
|
2557
2533
|
url: ({ path }) => `${baseURL}${path}`,
|
|
2558
2534
|
headers: getHeaders,
|
|
@@ -2590,10 +2566,7 @@ function createOpenAI(options = {}) {
|
|
|
2590
2566
|
);
|
|
2591
2567
|
}
|
|
2592
2568
|
if (modelId === "gpt-3.5-turbo-instruct") {
|
|
2593
|
-
return createCompletionModel(
|
|
2594
|
-
modelId,
|
|
2595
|
-
settings
|
|
2596
|
-
);
|
|
2569
|
+
return createCompletionModel(modelId);
|
|
2597
2570
|
}
|
|
2598
2571
|
return createChatModel(modelId, settings);
|
|
2599
2572
|
};
|