@ai-sdk/openai 2.0.0-canary.10 → 2.0.0-canary.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +9 -0
- package/dist/index.d.mts +3 -12
- package/dist/index.d.ts +3 -12
- package/dist/index.js +45 -118
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +45 -118
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +3 -24
- package/dist/internal/index.d.ts +3 -24
- package/dist/internal/index.js +45 -118
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +45 -118
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/index.mjs
CHANGED
|
@@ -199,19 +199,6 @@ function getResponseMetadata({
|
|
|
199
199
|
};
|
|
200
200
|
}
|
|
201
201
|
|
|
202
|
-
// src/map-openai-chat-logprobs.ts
|
|
203
|
-
function mapOpenAIChatLogProbsOutput(logprobs) {
|
|
204
|
-
var _a, _b;
|
|
205
|
-
return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
|
|
206
|
-
token,
|
|
207
|
-
logprob,
|
|
208
|
-
topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
|
|
209
|
-
token: token2,
|
|
210
|
-
logprob: logprob2
|
|
211
|
-
})) : []
|
|
212
|
-
}))) != null ? _b : void 0;
|
|
213
|
-
}
|
|
214
|
-
|
|
215
202
|
// src/map-openai-finish-reason.ts
|
|
216
203
|
function mapOpenAIFinishReason(finishReason) {
|
|
217
204
|
switch (finishReason) {
|
|
@@ -239,16 +226,6 @@ var openaiProviderOptions = z.object({
|
|
|
239
226
|
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
240
227
|
*/
|
|
241
228
|
logitBias: z.record(z.coerce.number(), z.number()).optional(),
|
|
242
|
-
/**
|
|
243
|
-
* Return the log probabilities of the tokens.
|
|
244
|
-
*
|
|
245
|
-
* Setting to true will return the log probabilities of the tokens that
|
|
246
|
-
* were generated.
|
|
247
|
-
*
|
|
248
|
-
* Setting to a number will return the log probabilities of the top n
|
|
249
|
-
* tokens that were generated.
|
|
250
|
-
*/
|
|
251
|
-
logprobs: z.union([z.boolean(), z.number()]).optional(),
|
|
252
229
|
/**
|
|
253
230
|
* Whether to enable parallel function calling during tool use. Default to true.
|
|
254
231
|
*/
|
|
@@ -421,8 +398,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
421
398
|
model: this.modelId,
|
|
422
399
|
// model specific settings:
|
|
423
400
|
logit_bias: openaiOptions.logitBias,
|
|
424
|
-
logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
|
|
425
|
-
top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
|
|
426
401
|
user: openaiOptions.user,
|
|
427
402
|
parallel_tool_calls: openaiOptions.parallelToolCalls,
|
|
428
403
|
// standardized settings:
|
|
@@ -495,20 +470,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
495
470
|
message: "logitBias is not supported for reasoning models"
|
|
496
471
|
});
|
|
497
472
|
}
|
|
498
|
-
if (baseArgs.logprobs != null) {
|
|
499
|
-
baseArgs.logprobs = void 0;
|
|
500
|
-
warnings.push({
|
|
501
|
-
type: "other",
|
|
502
|
-
message: "logprobs is not supported for reasoning models"
|
|
503
|
-
});
|
|
504
|
-
}
|
|
505
|
-
if (baseArgs.top_logprobs != null) {
|
|
506
|
-
baseArgs.top_logprobs = void 0;
|
|
507
|
-
warnings.push({
|
|
508
|
-
type: "other",
|
|
509
|
-
message: "topLogprobs is not supported for reasoning models"
|
|
510
|
-
});
|
|
511
|
-
}
|
|
512
473
|
if (baseArgs.max_tokens != null) {
|
|
513
474
|
if (baseArgs.max_completion_tokens == null) {
|
|
514
475
|
baseArgs.max_completion_tokens = baseArgs.max_tokens;
|
|
@@ -608,7 +569,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
608
569
|
body: rawResponse
|
|
609
570
|
},
|
|
610
571
|
warnings,
|
|
611
|
-
logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
|
|
612
572
|
providerMetadata
|
|
613
573
|
};
|
|
614
574
|
}
|
|
@@ -641,7 +601,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
641
601
|
inputTokens: void 0,
|
|
642
602
|
outputTokens: void 0
|
|
643
603
|
};
|
|
644
|
-
let logprobs;
|
|
645
604
|
let isFirstChunk = true;
|
|
646
605
|
const providerMetadata = { openai: {} };
|
|
647
606
|
return {
|
|
@@ -706,13 +665,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
706
665
|
text: delta.content
|
|
707
666
|
});
|
|
708
667
|
}
|
|
709
|
-
const mappedLogprobs = mapOpenAIChatLogProbsOutput(
|
|
710
|
-
choice == null ? void 0 : choice.logprobs
|
|
711
|
-
);
|
|
712
|
-
if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
|
|
713
|
-
if (logprobs === void 0) logprobs = [];
|
|
714
|
-
logprobs.push(...mappedLogprobs);
|
|
715
|
-
}
|
|
716
668
|
if (delta.tool_calls != null) {
|
|
717
669
|
for (const toolCallDelta of delta.tool_calls) {
|
|
718
670
|
const index = toolCallDelta.index;
|
|
@@ -799,7 +751,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
799
751
|
controller.enqueue({
|
|
800
752
|
type: "finish",
|
|
801
753
|
finishReason,
|
|
802
|
-
logprobs,
|
|
803
754
|
usage,
|
|
804
755
|
...providerMetadata != null ? { providerMetadata } : {}
|
|
805
756
|
});
|
|
@@ -844,20 +795,6 @@ var openaiChatResponseSchema = z3.object({
|
|
|
844
795
|
).nullish()
|
|
845
796
|
}),
|
|
846
797
|
index: z3.number(),
|
|
847
|
-
logprobs: z3.object({
|
|
848
|
-
content: z3.array(
|
|
849
|
-
z3.object({
|
|
850
|
-
token: z3.string(),
|
|
851
|
-
logprob: z3.number(),
|
|
852
|
-
top_logprobs: z3.array(
|
|
853
|
-
z3.object({
|
|
854
|
-
token: z3.string(),
|
|
855
|
-
logprob: z3.number()
|
|
856
|
-
})
|
|
857
|
-
)
|
|
858
|
-
})
|
|
859
|
-
).nullable()
|
|
860
|
-
}).nullish(),
|
|
861
798
|
finish_reason: z3.string().nullish()
|
|
862
799
|
})
|
|
863
800
|
),
|
|
@@ -885,20 +822,6 @@ var openaiChatChunkSchema = z3.union([
|
|
|
885
822
|
})
|
|
886
823
|
).nullish()
|
|
887
824
|
}).nullish(),
|
|
888
|
-
logprobs: z3.object({
|
|
889
|
-
content: z3.array(
|
|
890
|
-
z3.object({
|
|
891
|
-
token: z3.string(),
|
|
892
|
-
logprob: z3.number(),
|
|
893
|
-
top_logprobs: z3.array(
|
|
894
|
-
z3.object({
|
|
895
|
-
token: z3.string(),
|
|
896
|
-
logprob: z3.number()
|
|
897
|
-
})
|
|
898
|
-
)
|
|
899
|
-
})
|
|
900
|
-
).nullable()
|
|
901
|
-
}).nullish(),
|
|
902
825
|
finish_reason: z3.string().nullable().optional(),
|
|
903
826
|
index: z3.number()
|
|
904
827
|
})
|
|
@@ -1029,20 +952,6 @@ ${user}:`]
|
|
|
1029
952
|
};
|
|
1030
953
|
}
|
|
1031
954
|
|
|
1032
|
-
// src/map-openai-completion-logprobs.ts
|
|
1033
|
-
function mapOpenAICompletionLogProbs(logprobs) {
|
|
1034
|
-
return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => ({
|
|
1035
|
-
token,
|
|
1036
|
-
logprob: logprobs.token_logprobs[index],
|
|
1037
|
-
topLogprobs: logprobs.top_logprobs ? Object.entries(logprobs.top_logprobs[index]).map(
|
|
1038
|
-
([token2, logprob]) => ({
|
|
1039
|
-
token: token2,
|
|
1040
|
-
logprob
|
|
1041
|
-
})
|
|
1042
|
-
) : []
|
|
1043
|
-
}));
|
|
1044
|
-
}
|
|
1045
|
-
|
|
1046
955
|
// src/openai-completion-language-model.ts
|
|
1047
956
|
var OpenAICompletionLanguageModel = class {
|
|
1048
957
|
constructor(modelId, settings, config) {
|
|
@@ -1100,7 +1009,6 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1100
1009
|
// model specific settings:
|
|
1101
1010
|
echo: this.settings.echo,
|
|
1102
1011
|
logit_bias: this.settings.logitBias,
|
|
1103
|
-
logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
|
|
1104
1012
|
suffix: this.settings.suffix,
|
|
1105
1013
|
user: this.settings.user,
|
|
1106
1014
|
// standardized settings:
|
|
@@ -1146,7 +1054,6 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1146
1054
|
outputTokens: response.usage.completion_tokens
|
|
1147
1055
|
},
|
|
1148
1056
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
1149
|
-
logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
|
|
1150
1057
|
request: { body: args },
|
|
1151
1058
|
response: {
|
|
1152
1059
|
...getResponseMetadata(response),
|
|
@@ -1183,7 +1090,6 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1183
1090
|
inputTokens: void 0,
|
|
1184
1091
|
outputTokens: void 0
|
|
1185
1092
|
};
|
|
1186
|
-
let logprobs;
|
|
1187
1093
|
let isFirstChunk = true;
|
|
1188
1094
|
return {
|
|
1189
1095
|
stream: response.pipeThrough(
|
|
@@ -1224,19 +1130,11 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1224
1130
|
text: choice.text
|
|
1225
1131
|
});
|
|
1226
1132
|
}
|
|
1227
|
-
const mappedLogprobs = mapOpenAICompletionLogProbs(
|
|
1228
|
-
choice == null ? void 0 : choice.logprobs
|
|
1229
|
-
);
|
|
1230
|
-
if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
|
|
1231
|
-
if (logprobs === void 0) logprobs = [];
|
|
1232
|
-
logprobs.push(...mappedLogprobs);
|
|
1233
|
-
}
|
|
1234
1133
|
},
|
|
1235
1134
|
flush(controller) {
|
|
1236
1135
|
controller.enqueue({
|
|
1237
1136
|
type: "finish",
|
|
1238
1137
|
finishReason,
|
|
1239
|
-
logprobs,
|
|
1240
1138
|
usage
|
|
1241
1139
|
});
|
|
1242
1140
|
}
|
|
@@ -1254,12 +1152,7 @@ var openaiCompletionResponseSchema = z4.object({
|
|
|
1254
1152
|
choices: z4.array(
|
|
1255
1153
|
z4.object({
|
|
1256
1154
|
text: z4.string(),
|
|
1257
|
-
finish_reason: z4.string()
|
|
1258
|
-
logprobs: z4.object({
|
|
1259
|
-
tokens: z4.array(z4.string()),
|
|
1260
|
-
token_logprobs: z4.array(z4.number()),
|
|
1261
|
-
top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullable()
|
|
1262
|
-
}).nullish()
|
|
1155
|
+
finish_reason: z4.string()
|
|
1263
1156
|
})
|
|
1264
1157
|
),
|
|
1265
1158
|
usage: z4.object({
|
|
@@ -1276,12 +1169,7 @@ var openaiCompletionChunkSchema = z4.union([
|
|
|
1276
1169
|
z4.object({
|
|
1277
1170
|
text: z4.string(),
|
|
1278
1171
|
finish_reason: z4.string().nullish(),
|
|
1279
|
-
index: z4.number()
|
|
1280
|
-
logprobs: z4.object({
|
|
1281
|
-
tokens: z4.array(z4.string()),
|
|
1282
|
-
token_logprobs: z4.array(z4.number()),
|
|
1283
|
-
top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullable()
|
|
1284
|
-
}).nullish()
|
|
1172
|
+
index: z4.number()
|
|
1285
1173
|
})
|
|
1286
1174
|
),
|
|
1287
1175
|
usage: z4.object({
|
|
@@ -1986,8 +1874,15 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
1986
1874
|
user: openaiOptions == null ? void 0 : openaiOptions.user,
|
|
1987
1875
|
instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
|
|
1988
1876
|
// model-specific settings:
|
|
1989
|
-
...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
|
|
1990
|
-
reasoning: {
|
|
1877
|
+
...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
1878
|
+
reasoning: {
|
|
1879
|
+
...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
|
|
1880
|
+
effort: openaiOptions.reasoningEffort
|
|
1881
|
+
},
|
|
1882
|
+
...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
|
|
1883
|
+
summary: openaiOptions.reasoningSummary
|
|
1884
|
+
}
|
|
1885
|
+
}
|
|
1991
1886
|
},
|
|
1992
1887
|
...modelConfig.requiredAutoTruncation && {
|
|
1993
1888
|
truncation: "auto"
|
|
@@ -2083,7 +1978,13 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2083
1978
|
type: z10.literal("computer_call")
|
|
2084
1979
|
}),
|
|
2085
1980
|
z10.object({
|
|
2086
|
-
type: z10.literal("reasoning")
|
|
1981
|
+
type: z10.literal("reasoning"),
|
|
1982
|
+
summary: z10.array(
|
|
1983
|
+
z10.object({
|
|
1984
|
+
type: z10.literal("summary_text"),
|
|
1985
|
+
text: z10.string()
|
|
1986
|
+
})
|
|
1987
|
+
)
|
|
2087
1988
|
})
|
|
2088
1989
|
])
|
|
2089
1990
|
),
|
|
@@ -2097,6 +1998,14 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2097
1998
|
const content = [];
|
|
2098
1999
|
for (const part of response.output) {
|
|
2099
2000
|
switch (part.type) {
|
|
2001
|
+
case "reasoning": {
|
|
2002
|
+
content.push({
|
|
2003
|
+
type: "reasoning",
|
|
2004
|
+
reasoningType: "text",
|
|
2005
|
+
text: part.summary.map((summary) => summary.text).join()
|
|
2006
|
+
});
|
|
2007
|
+
break;
|
|
2008
|
+
}
|
|
2100
2009
|
case "message": {
|
|
2101
2010
|
for (const contentPart of part.content) {
|
|
2102
2011
|
content.push({
|
|
@@ -2237,6 +2146,12 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2237
2146
|
type: "text",
|
|
2238
2147
|
text: value.delta
|
|
2239
2148
|
});
|
|
2149
|
+
} else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
|
|
2150
|
+
controller.enqueue({
|
|
2151
|
+
type: "reasoning",
|
|
2152
|
+
reasoningType: "text",
|
|
2153
|
+
text: value.delta
|
|
2154
|
+
});
|
|
2240
2155
|
} else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
|
|
2241
2156
|
ongoingToolCalls[value.output_index] = void 0;
|
|
2242
2157
|
hasToolCalls = true;
|
|
@@ -2361,6 +2276,13 @@ var responseAnnotationAddedSchema = z10.object({
|
|
|
2361
2276
|
title: z10.string()
|
|
2362
2277
|
})
|
|
2363
2278
|
});
|
|
2279
|
+
var responseReasoningSummaryTextDeltaSchema = z10.object({
|
|
2280
|
+
type: z10.literal("response.reasoning_summary_text.delta"),
|
|
2281
|
+
item_id: z10.string(),
|
|
2282
|
+
output_index: z10.number(),
|
|
2283
|
+
summary_index: z10.number(),
|
|
2284
|
+
delta: z10.string()
|
|
2285
|
+
});
|
|
2364
2286
|
var openaiResponsesChunkSchema = z10.union([
|
|
2365
2287
|
textDeltaChunkSchema,
|
|
2366
2288
|
responseFinishedChunkSchema,
|
|
@@ -2369,6 +2291,7 @@ var openaiResponsesChunkSchema = z10.union([
|
|
|
2369
2291
|
responseFunctionCallArgumentsDeltaSchema,
|
|
2370
2292
|
responseOutputItemAddedSchema,
|
|
2371
2293
|
responseAnnotationAddedSchema,
|
|
2294
|
+
responseReasoningSummaryTextDeltaSchema,
|
|
2372
2295
|
z10.object({ type: z10.string() }).passthrough()
|
|
2373
2296
|
// fallback for unknown chunks
|
|
2374
2297
|
]);
|
|
@@ -2393,6 +2316,9 @@ function isResponseOutputItemAddedChunk(chunk) {
|
|
|
2393
2316
|
function isResponseAnnotationAddedChunk(chunk) {
|
|
2394
2317
|
return chunk.type === "response.output_text.annotation.added";
|
|
2395
2318
|
}
|
|
2319
|
+
function isResponseReasoningSummaryTextDeltaChunk(chunk) {
|
|
2320
|
+
return chunk.type === "response.reasoning_summary_text.delta";
|
|
2321
|
+
}
|
|
2396
2322
|
function getResponsesModelConfig(modelId) {
|
|
2397
2323
|
if (modelId.startsWith("o")) {
|
|
2398
2324
|
if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
|
|
@@ -2422,7 +2348,8 @@ var openaiResponsesProviderOptionsSchema = z10.object({
|
|
|
2422
2348
|
user: z10.string().nullish(),
|
|
2423
2349
|
reasoningEffort: z10.string().nullish(),
|
|
2424
2350
|
strictSchemas: z10.boolean().nullish(),
|
|
2425
|
-
instructions: z10.string().nullish()
|
|
2351
|
+
instructions: z10.string().nullish(),
|
|
2352
|
+
reasoningSummary: z10.string().nullish()
|
|
2426
2353
|
});
|
|
2427
2354
|
|
|
2428
2355
|
// src/openai-speech-model.ts
|