@ai-sdk/openai 2.0.0-canary.10 → 2.0.0-canary.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +9 -0
- package/dist/index.d.mts +3 -12
- package/dist/index.d.ts +3 -12
- package/dist/index.js +45 -118
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +45 -118
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +3 -24
- package/dist/internal/index.d.ts +3 -24
- package/dist/internal/index.js +45 -118
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +45 -118
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/internal/index.mjs
CHANGED
|
@@ -193,19 +193,6 @@ function getResponseMetadata({
|
|
|
193
193
|
};
|
|
194
194
|
}
|
|
195
195
|
|
|
196
|
-
// src/map-openai-chat-logprobs.ts
|
|
197
|
-
function mapOpenAIChatLogProbsOutput(logprobs) {
|
|
198
|
-
var _a, _b;
|
|
199
|
-
return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
|
|
200
|
-
token,
|
|
201
|
-
logprob,
|
|
202
|
-
topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
|
|
203
|
-
token: token2,
|
|
204
|
-
logprob: logprob2
|
|
205
|
-
})) : []
|
|
206
|
-
}))) != null ? _b : void 0;
|
|
207
|
-
}
|
|
208
|
-
|
|
209
196
|
// src/map-openai-finish-reason.ts
|
|
210
197
|
function mapOpenAIFinishReason(finishReason) {
|
|
211
198
|
switch (finishReason) {
|
|
@@ -233,16 +220,6 @@ var openaiProviderOptions = z.object({
|
|
|
233
220
|
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
234
221
|
*/
|
|
235
222
|
logitBias: z.record(z.coerce.number(), z.number()).optional(),
|
|
236
|
-
/**
|
|
237
|
-
* Return the log probabilities of the tokens.
|
|
238
|
-
*
|
|
239
|
-
* Setting to true will return the log probabilities of the tokens that
|
|
240
|
-
* were generated.
|
|
241
|
-
*
|
|
242
|
-
* Setting to a number will return the log probabilities of the top n
|
|
243
|
-
* tokens that were generated.
|
|
244
|
-
*/
|
|
245
|
-
logprobs: z.union([z.boolean(), z.number()]).optional(),
|
|
246
223
|
/**
|
|
247
224
|
* Whether to enable parallel function calling during tool use. Default to true.
|
|
248
225
|
*/
|
|
@@ -415,8 +392,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
415
392
|
model: this.modelId,
|
|
416
393
|
// model specific settings:
|
|
417
394
|
logit_bias: openaiOptions.logitBias,
|
|
418
|
-
logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
|
|
419
|
-
top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
|
|
420
395
|
user: openaiOptions.user,
|
|
421
396
|
parallel_tool_calls: openaiOptions.parallelToolCalls,
|
|
422
397
|
// standardized settings:
|
|
@@ -489,20 +464,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
489
464
|
message: "logitBias is not supported for reasoning models"
|
|
490
465
|
});
|
|
491
466
|
}
|
|
492
|
-
if (baseArgs.logprobs != null) {
|
|
493
|
-
baseArgs.logprobs = void 0;
|
|
494
|
-
warnings.push({
|
|
495
|
-
type: "other",
|
|
496
|
-
message: "logprobs is not supported for reasoning models"
|
|
497
|
-
});
|
|
498
|
-
}
|
|
499
|
-
if (baseArgs.top_logprobs != null) {
|
|
500
|
-
baseArgs.top_logprobs = void 0;
|
|
501
|
-
warnings.push({
|
|
502
|
-
type: "other",
|
|
503
|
-
message: "topLogprobs is not supported for reasoning models"
|
|
504
|
-
});
|
|
505
|
-
}
|
|
506
467
|
if (baseArgs.max_tokens != null) {
|
|
507
468
|
if (baseArgs.max_completion_tokens == null) {
|
|
508
469
|
baseArgs.max_completion_tokens = baseArgs.max_tokens;
|
|
@@ -602,7 +563,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
602
563
|
body: rawResponse
|
|
603
564
|
},
|
|
604
565
|
warnings,
|
|
605
|
-
logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
|
|
606
566
|
providerMetadata
|
|
607
567
|
};
|
|
608
568
|
}
|
|
@@ -635,7 +595,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
635
595
|
inputTokens: void 0,
|
|
636
596
|
outputTokens: void 0
|
|
637
597
|
};
|
|
638
|
-
let logprobs;
|
|
639
598
|
let isFirstChunk = true;
|
|
640
599
|
const providerMetadata = { openai: {} };
|
|
641
600
|
return {
|
|
@@ -700,13 +659,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
700
659
|
text: delta.content
|
|
701
660
|
});
|
|
702
661
|
}
|
|
703
|
-
const mappedLogprobs = mapOpenAIChatLogProbsOutput(
|
|
704
|
-
choice == null ? void 0 : choice.logprobs
|
|
705
|
-
);
|
|
706
|
-
if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
|
|
707
|
-
if (logprobs === void 0) logprobs = [];
|
|
708
|
-
logprobs.push(...mappedLogprobs);
|
|
709
|
-
}
|
|
710
662
|
if (delta.tool_calls != null) {
|
|
711
663
|
for (const toolCallDelta of delta.tool_calls) {
|
|
712
664
|
const index = toolCallDelta.index;
|
|
@@ -793,7 +745,6 @@ var OpenAIChatLanguageModel = class {
|
|
|
793
745
|
controller.enqueue({
|
|
794
746
|
type: "finish",
|
|
795
747
|
finishReason,
|
|
796
|
-
logprobs,
|
|
797
748
|
usage,
|
|
798
749
|
...providerMetadata != null ? { providerMetadata } : {}
|
|
799
750
|
});
|
|
@@ -838,20 +789,6 @@ var openaiChatResponseSchema = z3.object({
|
|
|
838
789
|
).nullish()
|
|
839
790
|
}),
|
|
840
791
|
index: z3.number(),
|
|
841
|
-
logprobs: z3.object({
|
|
842
|
-
content: z3.array(
|
|
843
|
-
z3.object({
|
|
844
|
-
token: z3.string(),
|
|
845
|
-
logprob: z3.number(),
|
|
846
|
-
top_logprobs: z3.array(
|
|
847
|
-
z3.object({
|
|
848
|
-
token: z3.string(),
|
|
849
|
-
logprob: z3.number()
|
|
850
|
-
})
|
|
851
|
-
)
|
|
852
|
-
})
|
|
853
|
-
).nullable()
|
|
854
|
-
}).nullish(),
|
|
855
792
|
finish_reason: z3.string().nullish()
|
|
856
793
|
})
|
|
857
794
|
),
|
|
@@ -879,20 +816,6 @@ var openaiChatChunkSchema = z3.union([
|
|
|
879
816
|
})
|
|
880
817
|
).nullish()
|
|
881
818
|
}).nullish(),
|
|
882
|
-
logprobs: z3.object({
|
|
883
|
-
content: z3.array(
|
|
884
|
-
z3.object({
|
|
885
|
-
token: z3.string(),
|
|
886
|
-
logprob: z3.number(),
|
|
887
|
-
top_logprobs: z3.array(
|
|
888
|
-
z3.object({
|
|
889
|
-
token: z3.string(),
|
|
890
|
-
logprob: z3.number()
|
|
891
|
-
})
|
|
892
|
-
)
|
|
893
|
-
})
|
|
894
|
-
).nullable()
|
|
895
|
-
}).nullish(),
|
|
896
819
|
finish_reason: z3.string().nullable().optional(),
|
|
897
820
|
index: z3.number()
|
|
898
821
|
})
|
|
@@ -1023,20 +946,6 @@ ${user}:`]
|
|
|
1023
946
|
};
|
|
1024
947
|
}
|
|
1025
948
|
|
|
1026
|
-
// src/map-openai-completion-logprobs.ts
|
|
1027
|
-
function mapOpenAICompletionLogProbs(logprobs) {
|
|
1028
|
-
return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => ({
|
|
1029
|
-
token,
|
|
1030
|
-
logprob: logprobs.token_logprobs[index],
|
|
1031
|
-
topLogprobs: logprobs.top_logprobs ? Object.entries(logprobs.top_logprobs[index]).map(
|
|
1032
|
-
([token2, logprob]) => ({
|
|
1033
|
-
token: token2,
|
|
1034
|
-
logprob
|
|
1035
|
-
})
|
|
1036
|
-
) : []
|
|
1037
|
-
}));
|
|
1038
|
-
}
|
|
1039
|
-
|
|
1040
949
|
// src/openai-completion-language-model.ts
|
|
1041
950
|
var OpenAICompletionLanguageModel = class {
|
|
1042
951
|
constructor(modelId, settings, config) {
|
|
@@ -1094,7 +1003,6 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1094
1003
|
// model specific settings:
|
|
1095
1004
|
echo: this.settings.echo,
|
|
1096
1005
|
logit_bias: this.settings.logitBias,
|
|
1097
|
-
logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
|
|
1098
1006
|
suffix: this.settings.suffix,
|
|
1099
1007
|
user: this.settings.user,
|
|
1100
1008
|
// standardized settings:
|
|
@@ -1140,7 +1048,6 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1140
1048
|
outputTokens: response.usage.completion_tokens
|
|
1141
1049
|
},
|
|
1142
1050
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
1143
|
-
logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
|
|
1144
1051
|
request: { body: args },
|
|
1145
1052
|
response: {
|
|
1146
1053
|
...getResponseMetadata(response),
|
|
@@ -1177,7 +1084,6 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1177
1084
|
inputTokens: void 0,
|
|
1178
1085
|
outputTokens: void 0
|
|
1179
1086
|
};
|
|
1180
|
-
let logprobs;
|
|
1181
1087
|
let isFirstChunk = true;
|
|
1182
1088
|
return {
|
|
1183
1089
|
stream: response.pipeThrough(
|
|
@@ -1218,19 +1124,11 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1218
1124
|
text: choice.text
|
|
1219
1125
|
});
|
|
1220
1126
|
}
|
|
1221
|
-
const mappedLogprobs = mapOpenAICompletionLogProbs(
|
|
1222
|
-
choice == null ? void 0 : choice.logprobs
|
|
1223
|
-
);
|
|
1224
|
-
if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
|
|
1225
|
-
if (logprobs === void 0) logprobs = [];
|
|
1226
|
-
logprobs.push(...mappedLogprobs);
|
|
1227
|
-
}
|
|
1228
1127
|
},
|
|
1229
1128
|
flush(controller) {
|
|
1230
1129
|
controller.enqueue({
|
|
1231
1130
|
type: "finish",
|
|
1232
1131
|
finishReason,
|
|
1233
|
-
logprobs,
|
|
1234
1132
|
usage
|
|
1235
1133
|
});
|
|
1236
1134
|
}
|
|
@@ -1248,12 +1146,7 @@ var openaiCompletionResponseSchema = z4.object({
|
|
|
1248
1146
|
choices: z4.array(
|
|
1249
1147
|
z4.object({
|
|
1250
1148
|
text: z4.string(),
|
|
1251
|
-
finish_reason: z4.string()
|
|
1252
|
-
logprobs: z4.object({
|
|
1253
|
-
tokens: z4.array(z4.string()),
|
|
1254
|
-
token_logprobs: z4.array(z4.number()),
|
|
1255
|
-
top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullable()
|
|
1256
|
-
}).nullish()
|
|
1149
|
+
finish_reason: z4.string()
|
|
1257
1150
|
})
|
|
1258
1151
|
),
|
|
1259
1152
|
usage: z4.object({
|
|
@@ -1270,12 +1163,7 @@ var openaiCompletionChunkSchema = z4.union([
|
|
|
1270
1163
|
z4.object({
|
|
1271
1164
|
text: z4.string(),
|
|
1272
1165
|
finish_reason: z4.string().nullish(),
|
|
1273
|
-
index: z4.number()
|
|
1274
|
-
logprobs: z4.object({
|
|
1275
|
-
tokens: z4.array(z4.string()),
|
|
1276
|
-
token_logprobs: z4.array(z4.number()),
|
|
1277
|
-
top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullable()
|
|
1278
|
-
}).nullish()
|
|
1166
|
+
index: z4.number()
|
|
1279
1167
|
})
|
|
1280
1168
|
),
|
|
1281
1169
|
usage: z4.object({
|
|
@@ -2063,8 +1951,15 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2063
1951
|
user: openaiOptions == null ? void 0 : openaiOptions.user,
|
|
2064
1952
|
instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
|
|
2065
1953
|
// model-specific settings:
|
|
2066
|
-
...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
|
|
2067
|
-
reasoning: {
|
|
1954
|
+
...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
1955
|
+
reasoning: {
|
|
1956
|
+
...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
|
|
1957
|
+
effort: openaiOptions.reasoningEffort
|
|
1958
|
+
},
|
|
1959
|
+
...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
|
|
1960
|
+
summary: openaiOptions.reasoningSummary
|
|
1961
|
+
}
|
|
1962
|
+
}
|
|
2068
1963
|
},
|
|
2069
1964
|
...modelConfig.requiredAutoTruncation && {
|
|
2070
1965
|
truncation: "auto"
|
|
@@ -2160,7 +2055,13 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2160
2055
|
type: z10.literal("computer_call")
|
|
2161
2056
|
}),
|
|
2162
2057
|
z10.object({
|
|
2163
|
-
type: z10.literal("reasoning")
|
|
2058
|
+
type: z10.literal("reasoning"),
|
|
2059
|
+
summary: z10.array(
|
|
2060
|
+
z10.object({
|
|
2061
|
+
type: z10.literal("summary_text"),
|
|
2062
|
+
text: z10.string()
|
|
2063
|
+
})
|
|
2064
|
+
)
|
|
2164
2065
|
})
|
|
2165
2066
|
])
|
|
2166
2067
|
),
|
|
@@ -2174,6 +2075,14 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2174
2075
|
const content = [];
|
|
2175
2076
|
for (const part of response.output) {
|
|
2176
2077
|
switch (part.type) {
|
|
2078
|
+
case "reasoning": {
|
|
2079
|
+
content.push({
|
|
2080
|
+
type: "reasoning",
|
|
2081
|
+
reasoningType: "text",
|
|
2082
|
+
text: part.summary.map((summary) => summary.text).join()
|
|
2083
|
+
});
|
|
2084
|
+
break;
|
|
2085
|
+
}
|
|
2177
2086
|
case "message": {
|
|
2178
2087
|
for (const contentPart of part.content) {
|
|
2179
2088
|
content.push({
|
|
@@ -2314,6 +2223,12 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2314
2223
|
type: "text",
|
|
2315
2224
|
text: value.delta
|
|
2316
2225
|
});
|
|
2226
|
+
} else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
|
|
2227
|
+
controller.enqueue({
|
|
2228
|
+
type: "reasoning",
|
|
2229
|
+
reasoningType: "text",
|
|
2230
|
+
text: value.delta
|
|
2231
|
+
});
|
|
2317
2232
|
} else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
|
|
2318
2233
|
ongoingToolCalls[value.output_index] = void 0;
|
|
2319
2234
|
hasToolCalls = true;
|
|
@@ -2438,6 +2353,13 @@ var responseAnnotationAddedSchema = z10.object({
|
|
|
2438
2353
|
title: z10.string()
|
|
2439
2354
|
})
|
|
2440
2355
|
});
|
|
2356
|
+
var responseReasoningSummaryTextDeltaSchema = z10.object({
|
|
2357
|
+
type: z10.literal("response.reasoning_summary_text.delta"),
|
|
2358
|
+
item_id: z10.string(),
|
|
2359
|
+
output_index: z10.number(),
|
|
2360
|
+
summary_index: z10.number(),
|
|
2361
|
+
delta: z10.string()
|
|
2362
|
+
});
|
|
2441
2363
|
var openaiResponsesChunkSchema = z10.union([
|
|
2442
2364
|
textDeltaChunkSchema,
|
|
2443
2365
|
responseFinishedChunkSchema,
|
|
@@ -2446,6 +2368,7 @@ var openaiResponsesChunkSchema = z10.union([
|
|
|
2446
2368
|
responseFunctionCallArgumentsDeltaSchema,
|
|
2447
2369
|
responseOutputItemAddedSchema,
|
|
2448
2370
|
responseAnnotationAddedSchema,
|
|
2371
|
+
responseReasoningSummaryTextDeltaSchema,
|
|
2449
2372
|
z10.object({ type: z10.string() }).passthrough()
|
|
2450
2373
|
// fallback for unknown chunks
|
|
2451
2374
|
]);
|
|
@@ -2470,6 +2393,9 @@ function isResponseOutputItemAddedChunk(chunk) {
|
|
|
2470
2393
|
function isResponseAnnotationAddedChunk(chunk) {
|
|
2471
2394
|
return chunk.type === "response.output_text.annotation.added";
|
|
2472
2395
|
}
|
|
2396
|
+
function isResponseReasoningSummaryTextDeltaChunk(chunk) {
|
|
2397
|
+
return chunk.type === "response.reasoning_summary_text.delta";
|
|
2398
|
+
}
|
|
2473
2399
|
function getResponsesModelConfig(modelId) {
|
|
2474
2400
|
if (modelId.startsWith("o")) {
|
|
2475
2401
|
if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
|
|
@@ -2499,7 +2425,8 @@ var openaiResponsesProviderOptionsSchema = z10.object({
|
|
|
2499
2425
|
user: z10.string().nullish(),
|
|
2500
2426
|
reasoningEffort: z10.string().nullish(),
|
|
2501
2427
|
strictSchemas: z10.boolean().nullish(),
|
|
2502
|
-
instructions: z10.string().nullish()
|
|
2428
|
+
instructions: z10.string().nullish(),
|
|
2429
|
+
reasoningSummary: z10.string().nullish()
|
|
2503
2430
|
});
|
|
2504
2431
|
export {
|
|
2505
2432
|
OpenAIChatLanguageModel,
|