@ai-sdk/openai 2.0.0-canary.2 → 2.0.0-canary.20
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +257 -0
- package/README.md +2 -2
- package/dist/index.d.mts +39 -176
- package/dist/index.d.ts +39 -176
- package/dist/index.js +1118 -802
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1144 -815
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +379 -0
- package/dist/internal/index.d.ts +379 -0
- package/{internal/dist → dist/internal}/index.js +1108 -785
- package/dist/internal/index.js.map +1 -0
- package/{internal/dist → dist/internal}/index.mjs +1125 -796
- package/dist/internal/index.mjs.map +1 -0
- package/internal.d.ts +1 -0
- package/package.json +19 -18
- package/internal/dist/index.d.mts +0 -290
- package/internal/dist/index.d.ts +0 -290
- package/internal/dist/index.js.map +0 -1
- package/internal/dist/index.mjs.map +0 -1
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
// src/openai-chat-language-model.ts
|
|
2
2
|
import {
|
|
3
|
-
InvalidResponseDataError
|
|
4
|
-
UnsupportedFunctionalityError as UnsupportedFunctionalityError3
|
|
3
|
+
InvalidResponseDataError
|
|
5
4
|
} from "@ai-sdk/provider";
|
|
6
5
|
import {
|
|
7
6
|
combineHeaders,
|
|
@@ -9,18 +8,18 @@ import {
|
|
|
9
8
|
createJsonResponseHandler,
|
|
10
9
|
generateId,
|
|
11
10
|
isParsableJson,
|
|
11
|
+
parseProviderOptions,
|
|
12
12
|
postJsonToApi
|
|
13
13
|
} from "@ai-sdk/provider-utils";
|
|
14
|
-
import { z as
|
|
14
|
+
import { z as z3 } from "zod";
|
|
15
15
|
|
|
16
16
|
// src/convert-to-openai-chat-messages.ts
|
|
17
17
|
import {
|
|
18
18
|
UnsupportedFunctionalityError
|
|
19
19
|
} from "@ai-sdk/provider";
|
|
20
|
-
import {
|
|
20
|
+
import { convertToBase64 } from "@ai-sdk/provider-utils";
|
|
21
21
|
function convertToOpenAIChatMessages({
|
|
22
22
|
prompt,
|
|
23
|
-
useLegacyFunctionCalling = false,
|
|
24
23
|
systemMessageMode = "system"
|
|
25
24
|
}) {
|
|
26
25
|
const messages = [];
|
|
@@ -61,55 +60,71 @@ function convertToOpenAIChatMessages({
|
|
|
61
60
|
messages.push({
|
|
62
61
|
role: "user",
|
|
63
62
|
content: content.map((part, index) => {
|
|
64
|
-
var _a, _b, _c
|
|
63
|
+
var _a, _b, _c;
|
|
65
64
|
switch (part.type) {
|
|
66
65
|
case "text": {
|
|
67
66
|
return { type: "text", text: part.text };
|
|
68
67
|
}
|
|
69
|
-
case "image": {
|
|
70
|
-
return {
|
|
71
|
-
type: "image_url",
|
|
72
|
-
image_url: {
|
|
73
|
-
url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase64(part.image)}`,
|
|
74
|
-
// OpenAI specific extension: image detail
|
|
75
|
-
detail: (_c = (_b = part.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
|
|
76
|
-
}
|
|
77
|
-
};
|
|
78
|
-
}
|
|
79
68
|
case "file": {
|
|
80
|
-
if (part.
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
type: "input_audio",
|
|
96
|
-
input_audio: { data: part.data, format: "mp3" }
|
|
97
|
-
};
|
|
69
|
+
if (part.mediaType.startsWith("image/")) {
|
|
70
|
+
const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
|
|
71
|
+
return {
|
|
72
|
+
type: "image_url",
|
|
73
|
+
image_url: {
|
|
74
|
+
url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${convertToBase64(part.data)}`,
|
|
75
|
+
// OpenAI specific extension: image detail
|
|
76
|
+
detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
|
|
77
|
+
}
|
|
78
|
+
};
|
|
79
|
+
} else if (part.mediaType.startsWith("audio/")) {
|
|
80
|
+
if (part.data instanceof URL) {
|
|
81
|
+
throw new UnsupportedFunctionalityError({
|
|
82
|
+
functionality: "audio file parts with URLs"
|
|
83
|
+
});
|
|
98
84
|
}
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
85
|
+
switch (part.mediaType) {
|
|
86
|
+
case "audio/wav": {
|
|
87
|
+
return {
|
|
88
|
+
type: "input_audio",
|
|
89
|
+
input_audio: {
|
|
90
|
+
data: convertToBase64(part.data),
|
|
91
|
+
format: "wav"
|
|
92
|
+
}
|
|
93
|
+
};
|
|
94
|
+
}
|
|
95
|
+
case "audio/mp3":
|
|
96
|
+
case "audio/mpeg": {
|
|
97
|
+
return {
|
|
98
|
+
type: "input_audio",
|
|
99
|
+
input_audio: {
|
|
100
|
+
data: convertToBase64(part.data),
|
|
101
|
+
format: "mp3"
|
|
102
|
+
}
|
|
103
|
+
};
|
|
104
|
+
}
|
|
105
|
+
default: {
|
|
106
|
+
throw new UnsupportedFunctionalityError({
|
|
107
|
+
functionality: `audio content parts with media type ${part.mediaType}`
|
|
108
|
+
});
|
|
109
|
+
}
|
|
107
110
|
}
|
|
108
|
-
|
|
111
|
+
} else if (part.mediaType === "application/pdf") {
|
|
112
|
+
if (part.data instanceof URL) {
|
|
109
113
|
throw new UnsupportedFunctionalityError({
|
|
110
|
-
functionality:
|
|
114
|
+
functionality: "PDF file parts with URLs"
|
|
111
115
|
});
|
|
112
116
|
}
|
|
117
|
+
return {
|
|
118
|
+
type: "file",
|
|
119
|
+
file: {
|
|
120
|
+
filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
|
|
121
|
+
file_data: `data:application/pdf;base64,${part.data}`
|
|
122
|
+
}
|
|
123
|
+
};
|
|
124
|
+
} else {
|
|
125
|
+
throw new UnsupportedFunctionalityError({
|
|
126
|
+
functionality: `file part media type ${part.mediaType}`
|
|
127
|
+
});
|
|
113
128
|
}
|
|
114
129
|
}
|
|
115
130
|
}
|
|
@@ -139,41 +154,20 @@ function convertToOpenAIChatMessages({
|
|
|
139
154
|
}
|
|
140
155
|
}
|
|
141
156
|
}
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
}
|
|
148
|
-
messages.push({
|
|
149
|
-
role: "assistant",
|
|
150
|
-
content: text,
|
|
151
|
-
function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
|
|
152
|
-
});
|
|
153
|
-
} else {
|
|
154
|
-
messages.push({
|
|
155
|
-
role: "assistant",
|
|
156
|
-
content: text,
|
|
157
|
-
tool_calls: toolCalls.length > 0 ? toolCalls : void 0
|
|
158
|
-
});
|
|
159
|
-
}
|
|
157
|
+
messages.push({
|
|
158
|
+
role: "assistant",
|
|
159
|
+
content: text,
|
|
160
|
+
tool_calls: toolCalls.length > 0 ? toolCalls : void 0
|
|
161
|
+
});
|
|
160
162
|
break;
|
|
161
163
|
}
|
|
162
164
|
case "tool": {
|
|
163
165
|
for (const toolResponse of content) {
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
});
|
|
170
|
-
} else {
|
|
171
|
-
messages.push({
|
|
172
|
-
role: "tool",
|
|
173
|
-
tool_call_id: toolResponse.toolCallId,
|
|
174
|
-
content: JSON.stringify(toolResponse.result)
|
|
175
|
-
});
|
|
176
|
-
}
|
|
166
|
+
messages.push({
|
|
167
|
+
role: "tool",
|
|
168
|
+
tool_call_id: toolResponse.toolCallId,
|
|
169
|
+
content: JSON.stringify(toolResponse.result)
|
|
170
|
+
});
|
|
177
171
|
}
|
|
178
172
|
break;
|
|
179
173
|
}
|
|
@@ -186,17 +180,17 @@ function convertToOpenAIChatMessages({
|
|
|
186
180
|
return { messages, warnings };
|
|
187
181
|
}
|
|
188
182
|
|
|
189
|
-
// src/
|
|
190
|
-
function
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
}
|
|
183
|
+
// src/get-response-metadata.ts
|
|
184
|
+
function getResponseMetadata({
|
|
185
|
+
id,
|
|
186
|
+
model,
|
|
187
|
+
created
|
|
188
|
+
}) {
|
|
189
|
+
return {
|
|
190
|
+
id: id != null ? id : void 0,
|
|
191
|
+
modelId: model != null ? model : void 0,
|
|
192
|
+
timestamp: created != null ? new Date(created * 1e3) : void 0
|
|
193
|
+
};
|
|
200
194
|
}
|
|
201
195
|
|
|
202
196
|
// src/map-openai-finish-reason.ts
|
|
@@ -216,18 +210,75 @@ function mapOpenAIFinishReason(finishReason) {
|
|
|
216
210
|
}
|
|
217
211
|
}
|
|
218
212
|
|
|
219
|
-
// src/openai-
|
|
213
|
+
// src/openai-chat-options.ts
|
|
220
214
|
import { z } from "zod";
|
|
215
|
+
var openaiProviderOptions = z.object({
|
|
216
|
+
/**
|
|
217
|
+
* Modify the likelihood of specified tokens appearing in the completion.
|
|
218
|
+
*
|
|
219
|
+
* Accepts a JSON object that maps tokens (specified by their token ID in
|
|
220
|
+
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
221
|
+
*/
|
|
222
|
+
logitBias: z.record(z.coerce.number(), z.number()).optional(),
|
|
223
|
+
/**
|
|
224
|
+
* Return the log probabilities of the tokens.
|
|
225
|
+
*
|
|
226
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
227
|
+
* were generated.
|
|
228
|
+
*
|
|
229
|
+
* Setting to a number will return the log probabilities of the top n
|
|
230
|
+
* tokens that were generated.
|
|
231
|
+
*/
|
|
232
|
+
logprobs: z.union([z.boolean(), z.number()]).optional(),
|
|
233
|
+
/**
|
|
234
|
+
* Whether to enable parallel function calling during tool use. Default to true.
|
|
235
|
+
*/
|
|
236
|
+
parallelToolCalls: z.boolean().optional(),
|
|
237
|
+
/**
|
|
238
|
+
* A unique identifier representing your end-user, which can help OpenAI to
|
|
239
|
+
* monitor and detect abuse.
|
|
240
|
+
*/
|
|
241
|
+
user: z.string().optional(),
|
|
242
|
+
/**
|
|
243
|
+
* Reasoning effort for reasoning models. Defaults to `medium`.
|
|
244
|
+
*/
|
|
245
|
+
reasoningEffort: z.enum(["low", "medium", "high"]).optional(),
|
|
246
|
+
/**
|
|
247
|
+
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
248
|
+
*/
|
|
249
|
+
maxCompletionTokens: z.number().optional(),
|
|
250
|
+
/**
|
|
251
|
+
* Whether to enable persistence in responses API.
|
|
252
|
+
*/
|
|
253
|
+
store: z.boolean().optional(),
|
|
254
|
+
/**
|
|
255
|
+
* Metadata to associate with the request.
|
|
256
|
+
*/
|
|
257
|
+
metadata: z.record(z.string()).optional(),
|
|
258
|
+
/**
|
|
259
|
+
* Parameters for prediction mode.
|
|
260
|
+
*/
|
|
261
|
+
prediction: z.record(z.any()).optional(),
|
|
262
|
+
/**
|
|
263
|
+
* Whether to use structured outputs.
|
|
264
|
+
*
|
|
265
|
+
* @default true
|
|
266
|
+
*/
|
|
267
|
+
structuredOutputs: z.boolean().optional()
|
|
268
|
+
});
|
|
269
|
+
|
|
270
|
+
// src/openai-error.ts
|
|
271
|
+
import { z as z2 } from "zod";
|
|
221
272
|
import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
|
|
222
|
-
var openaiErrorDataSchema =
|
|
223
|
-
error:
|
|
224
|
-
message:
|
|
273
|
+
var openaiErrorDataSchema = z2.object({
|
|
274
|
+
error: z2.object({
|
|
275
|
+
message: z2.string(),
|
|
225
276
|
// The additional information below is handled loosely to support
|
|
226
277
|
// OpenAI-compatible providers that have slightly different error
|
|
227
278
|
// responses:
|
|
228
|
-
type:
|
|
229
|
-
param:
|
|
230
|
-
code:
|
|
279
|
+
type: z2.string().nullish(),
|
|
280
|
+
param: z2.any().nullish(),
|
|
281
|
+
code: z2.union([z2.string(), z2.number()]).nullish()
|
|
231
282
|
})
|
|
232
283
|
});
|
|
233
284
|
var openaiFailedResponseHandler = createJsonErrorResponseHandler({
|
|
@@ -235,19 +286,6 @@ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
|
|
|
235
286
|
errorToMessage: (data) => data.error.message
|
|
236
287
|
});
|
|
237
288
|
|
|
238
|
-
// src/get-response-metadata.ts
|
|
239
|
-
function getResponseMetadata({
|
|
240
|
-
id,
|
|
241
|
-
model,
|
|
242
|
-
created
|
|
243
|
-
}) {
|
|
244
|
-
return {
|
|
245
|
-
id: id != null ? id : void 0,
|
|
246
|
-
modelId: model != null ? model : void 0,
|
|
247
|
-
timestamp: created != null ? new Date(created * 1e3) : void 0
|
|
248
|
-
};
|
|
249
|
-
}
|
|
250
|
-
|
|
251
289
|
// src/openai-prepare-tools.ts
|
|
252
290
|
import {
|
|
253
291
|
UnsupportedFunctionalityError as UnsupportedFunctionalityError2
|
|
@@ -255,7 +293,6 @@ import {
|
|
|
255
293
|
function prepareTools({
|
|
256
294
|
tools,
|
|
257
295
|
toolChoice,
|
|
258
|
-
useLegacyFunctionCalling = false,
|
|
259
296
|
structuredOutputs
|
|
260
297
|
}) {
|
|
261
298
|
tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
|
|
@@ -263,48 +300,6 @@ function prepareTools({
|
|
|
263
300
|
if (tools == null) {
|
|
264
301
|
return { tools: void 0, toolChoice: void 0, toolWarnings };
|
|
265
302
|
}
|
|
266
|
-
if (useLegacyFunctionCalling) {
|
|
267
|
-
const openaiFunctions = [];
|
|
268
|
-
for (const tool of tools) {
|
|
269
|
-
if (tool.type === "provider-defined") {
|
|
270
|
-
toolWarnings.push({ type: "unsupported-tool", tool });
|
|
271
|
-
} else {
|
|
272
|
-
openaiFunctions.push({
|
|
273
|
-
name: tool.name,
|
|
274
|
-
description: tool.description,
|
|
275
|
-
parameters: tool.parameters
|
|
276
|
-
});
|
|
277
|
-
}
|
|
278
|
-
}
|
|
279
|
-
if (toolChoice == null) {
|
|
280
|
-
return {
|
|
281
|
-
functions: openaiFunctions,
|
|
282
|
-
function_call: void 0,
|
|
283
|
-
toolWarnings
|
|
284
|
-
};
|
|
285
|
-
}
|
|
286
|
-
const type2 = toolChoice.type;
|
|
287
|
-
switch (type2) {
|
|
288
|
-
case "auto":
|
|
289
|
-
case "none":
|
|
290
|
-
case void 0:
|
|
291
|
-
return {
|
|
292
|
-
functions: openaiFunctions,
|
|
293
|
-
function_call: void 0,
|
|
294
|
-
toolWarnings
|
|
295
|
-
};
|
|
296
|
-
case "required":
|
|
297
|
-
throw new UnsupportedFunctionalityError2({
|
|
298
|
-
functionality: "useLegacyFunctionCalling and toolChoice: required"
|
|
299
|
-
});
|
|
300
|
-
default:
|
|
301
|
-
return {
|
|
302
|
-
functions: openaiFunctions,
|
|
303
|
-
function_call: { name: toolChoice.toolName },
|
|
304
|
-
toolWarnings
|
|
305
|
-
};
|
|
306
|
-
}
|
|
307
|
-
}
|
|
308
303
|
const openaiTools = [];
|
|
309
304
|
for (const tool of tools) {
|
|
310
305
|
if (tool.type === "provider-defined") {
|
|
@@ -344,7 +339,7 @@ function prepareTools({
|
|
|
344
339
|
default: {
|
|
345
340
|
const _exhaustiveCheck = type;
|
|
346
341
|
throw new UnsupportedFunctionalityError2({
|
|
347
|
-
functionality: `
|
|
342
|
+
functionality: `tool choice type: ${_exhaustiveCheck}`
|
|
348
343
|
});
|
|
349
344
|
}
|
|
350
345
|
}
|
|
@@ -352,31 +347,20 @@ function prepareTools({
|
|
|
352
347
|
|
|
353
348
|
// src/openai-chat-language-model.ts
|
|
354
349
|
var OpenAIChatLanguageModel = class {
|
|
355
|
-
constructor(modelId,
|
|
350
|
+
constructor(modelId, config) {
|
|
356
351
|
this.specificationVersion = "v2";
|
|
352
|
+
this.supportedUrls = {
|
|
353
|
+
"image/*": [/^https?:\/\/.*$/]
|
|
354
|
+
};
|
|
357
355
|
this.modelId = modelId;
|
|
358
|
-
this.settings = settings;
|
|
359
356
|
this.config = config;
|
|
360
357
|
}
|
|
361
|
-
get supportsStructuredOutputs() {
|
|
362
|
-
var _a;
|
|
363
|
-
return (_a = this.settings.structuredOutputs) != null ? _a : isReasoningModel(this.modelId);
|
|
364
|
-
}
|
|
365
|
-
get defaultObjectGenerationMode() {
|
|
366
|
-
if (isAudioModel(this.modelId)) {
|
|
367
|
-
return "tool";
|
|
368
|
-
}
|
|
369
|
-
return this.supportsStructuredOutputs ? "json" : "tool";
|
|
370
|
-
}
|
|
371
358
|
get provider() {
|
|
372
359
|
return this.config.provider;
|
|
373
360
|
}
|
|
374
|
-
|
|
375
|
-
return !this.settings.downloadImages;
|
|
376
|
-
}
|
|
377
|
-
getArgs({
|
|
361
|
+
async getArgs({
|
|
378
362
|
prompt,
|
|
379
|
-
|
|
363
|
+
maxOutputTokens,
|
|
380
364
|
temperature,
|
|
381
365
|
topP,
|
|
382
366
|
topK,
|
|
@@ -389,36 +373,30 @@ var OpenAIChatLanguageModel = class {
|
|
|
389
373
|
toolChoice,
|
|
390
374
|
providerOptions
|
|
391
375
|
}) {
|
|
392
|
-
var _a, _b, _c
|
|
376
|
+
var _a, _b, _c;
|
|
393
377
|
const warnings = [];
|
|
378
|
+
const openaiOptions = (_a = await parseProviderOptions({
|
|
379
|
+
provider: "openai",
|
|
380
|
+
providerOptions,
|
|
381
|
+
schema: openaiProviderOptions
|
|
382
|
+
})) != null ? _a : {};
|
|
383
|
+
const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
|
|
394
384
|
if (topK != null) {
|
|
395
385
|
warnings.push({
|
|
396
386
|
type: "unsupported-setting",
|
|
397
387
|
setting: "topK"
|
|
398
388
|
});
|
|
399
389
|
}
|
|
400
|
-
if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !
|
|
390
|
+
if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
|
|
401
391
|
warnings.push({
|
|
402
392
|
type: "unsupported-setting",
|
|
403
393
|
setting: "responseFormat",
|
|
404
394
|
details: "JSON response format schema is only supported with structuredOutputs"
|
|
405
395
|
});
|
|
406
396
|
}
|
|
407
|
-
const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
|
|
408
|
-
if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
|
|
409
|
-
throw new UnsupportedFunctionalityError3({
|
|
410
|
-
functionality: "useLegacyFunctionCalling with parallelToolCalls"
|
|
411
|
-
});
|
|
412
|
-
}
|
|
413
|
-
if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
|
|
414
|
-
throw new UnsupportedFunctionalityError3({
|
|
415
|
-
functionality: "structuredOutputs with useLegacyFunctionCalling"
|
|
416
|
-
});
|
|
417
|
-
}
|
|
418
397
|
const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
|
|
419
398
|
{
|
|
420
399
|
prompt,
|
|
421
|
-
useLegacyFunctionCalling,
|
|
422
400
|
systemMessageMode: getSystemMessageMode(this.modelId)
|
|
423
401
|
}
|
|
424
402
|
);
|
|
@@ -427,36 +405,38 @@ var OpenAIChatLanguageModel = class {
|
|
|
427
405
|
// model id:
|
|
428
406
|
model: this.modelId,
|
|
429
407
|
// model specific settings:
|
|
430
|
-
logit_bias:
|
|
431
|
-
logprobs:
|
|
432
|
-
top_logprobs: typeof
|
|
433
|
-
user:
|
|
434
|
-
parallel_tool_calls:
|
|
408
|
+
logit_bias: openaiOptions.logitBias,
|
|
409
|
+
logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
|
|
410
|
+
top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
|
|
411
|
+
user: openaiOptions.user,
|
|
412
|
+
parallel_tool_calls: openaiOptions.parallelToolCalls,
|
|
435
413
|
// standardized settings:
|
|
436
|
-
max_tokens:
|
|
414
|
+
max_tokens: maxOutputTokens,
|
|
437
415
|
temperature,
|
|
438
416
|
top_p: topP,
|
|
439
417
|
frequency_penalty: frequencyPenalty,
|
|
440
418
|
presence_penalty: presencePenalty,
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
419
|
+
response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? (
|
|
420
|
+
// TODO convert into provider option
|
|
421
|
+
structuredOutputs && responseFormat.schema != null ? {
|
|
422
|
+
type: "json_schema",
|
|
423
|
+
json_schema: {
|
|
424
|
+
schema: responseFormat.schema,
|
|
425
|
+
strict: true,
|
|
426
|
+
name: (_c = responseFormat.name) != null ? _c : "response",
|
|
427
|
+
description: responseFormat.description
|
|
428
|
+
}
|
|
429
|
+
} : { type: "json_object" }
|
|
430
|
+
) : void 0,
|
|
451
431
|
stop: stopSequences,
|
|
452
432
|
seed,
|
|
453
433
|
// openai specific settings:
|
|
454
|
-
// TODO remove in next major version; we auto-map
|
|
455
|
-
max_completion_tokens:
|
|
456
|
-
store:
|
|
457
|
-
metadata:
|
|
458
|
-
prediction:
|
|
459
|
-
reasoning_effort:
|
|
434
|
+
// TODO remove in next major version; we auto-map maxOutputTokens now
|
|
435
|
+
max_completion_tokens: openaiOptions.maxCompletionTokens,
|
|
436
|
+
store: openaiOptions.store,
|
|
437
|
+
metadata: openaiOptions.metadata,
|
|
438
|
+
prediction: openaiOptions.prediction,
|
|
439
|
+
reasoning_effort: openaiOptions.reasoningEffort,
|
|
460
440
|
// messages:
|
|
461
441
|
messages
|
|
462
442
|
};
|
|
@@ -520,33 +500,37 @@ var OpenAIChatLanguageModel = class {
|
|
|
520
500
|
}
|
|
521
501
|
baseArgs.max_tokens = void 0;
|
|
522
502
|
}
|
|
503
|
+
} else if (this.modelId.startsWith("gpt-4o-search-preview") || this.modelId.startsWith("gpt-4o-mini-search-preview")) {
|
|
504
|
+
if (baseArgs.temperature != null) {
|
|
505
|
+
baseArgs.temperature = void 0;
|
|
506
|
+
warnings.push({
|
|
507
|
+
type: "unsupported-setting",
|
|
508
|
+
setting: "temperature",
|
|
509
|
+
details: "temperature is not supported for the search preview models and has been removed."
|
|
510
|
+
});
|
|
511
|
+
}
|
|
523
512
|
}
|
|
524
513
|
const {
|
|
525
514
|
tools: openaiTools,
|
|
526
515
|
toolChoice: openaiToolChoice,
|
|
527
|
-
functions,
|
|
528
|
-
function_call,
|
|
529
516
|
toolWarnings
|
|
530
517
|
} = prepareTools({
|
|
531
518
|
tools,
|
|
532
519
|
toolChoice,
|
|
533
|
-
|
|
534
|
-
structuredOutputs: this.supportsStructuredOutputs
|
|
520
|
+
structuredOutputs
|
|
535
521
|
});
|
|
536
522
|
return {
|
|
537
523
|
args: {
|
|
538
524
|
...baseArgs,
|
|
539
525
|
tools: openaiTools,
|
|
540
|
-
tool_choice: openaiToolChoice
|
|
541
|
-
functions,
|
|
542
|
-
function_call
|
|
526
|
+
tool_choice: openaiToolChoice
|
|
543
527
|
},
|
|
544
528
|
warnings: [...warnings, ...toolWarnings]
|
|
545
529
|
};
|
|
546
530
|
}
|
|
547
531
|
async doGenerate(options) {
|
|
548
|
-
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
549
|
-
const { args: body, warnings } = this.getArgs(options);
|
|
532
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
|
|
533
|
+
const { args: body, warnings } = await this.getArgs(options);
|
|
550
534
|
const {
|
|
551
535
|
responseHeaders,
|
|
552
536
|
value: response,
|
|
@@ -565,105 +549,61 @@ var OpenAIChatLanguageModel = class {
|
|
|
565
549
|
abortSignal: options.abortSignal,
|
|
566
550
|
fetch: this.config.fetch
|
|
567
551
|
});
|
|
568
|
-
const { messages: rawPrompt, ...rawSettings } = body;
|
|
569
552
|
const choice = response.choices[0];
|
|
570
|
-
const
|
|
571
|
-
const
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
|
|
553
|
+
const content = [];
|
|
554
|
+
const text = choice.message.content;
|
|
555
|
+
if (text != null && text.length > 0) {
|
|
556
|
+
content.push({ type: "text", text });
|
|
575
557
|
}
|
|
558
|
+
for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
|
|
559
|
+
content.push({
|
|
560
|
+
type: "tool-call",
|
|
561
|
+
toolCallType: "function",
|
|
562
|
+
toolCallId: (_b = toolCall.id) != null ? _b : generateId(),
|
|
563
|
+
toolName: toolCall.function.name,
|
|
564
|
+
args: toolCall.function.arguments
|
|
565
|
+
});
|
|
566
|
+
}
|
|
567
|
+
const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
|
|
568
|
+
const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
|
|
569
|
+
const providerMetadata = { openai: {} };
|
|
576
570
|
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
|
|
577
571
|
providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
|
|
578
572
|
}
|
|
579
573
|
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
|
|
580
574
|
providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
|
|
581
575
|
}
|
|
582
|
-
if ((
|
|
583
|
-
providerMetadata.openai.
|
|
576
|
+
if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
|
|
577
|
+
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
584
578
|
}
|
|
585
579
|
return {
|
|
586
|
-
|
|
587
|
-
toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
|
|
588
|
-
{
|
|
589
|
-
toolCallType: "function",
|
|
590
|
-
toolCallId: generateId(),
|
|
591
|
-
toolName: choice.message.function_call.name,
|
|
592
|
-
args: choice.message.function_call.arguments
|
|
593
|
-
}
|
|
594
|
-
] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
|
|
595
|
-
var _a2;
|
|
596
|
-
return {
|
|
597
|
-
toolCallType: "function",
|
|
598
|
-
toolCallId: (_a2 = toolCall.id) != null ? _a2 : generateId(),
|
|
599
|
-
toolName: toolCall.function.name,
|
|
600
|
-
args: toolCall.function.arguments
|
|
601
|
-
};
|
|
602
|
-
}),
|
|
580
|
+
content,
|
|
603
581
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
604
582
|
usage: {
|
|
605
|
-
|
|
606
|
-
|
|
583
|
+
inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
|
|
584
|
+
outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
|
|
585
|
+
totalTokens: (_k = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
|
|
586
|
+
reasoningTokens: (_l = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _l : void 0,
|
|
587
|
+
cachedInputTokens: (_m = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _m : void 0
|
|
588
|
+
},
|
|
589
|
+
request: { body },
|
|
590
|
+
response: {
|
|
591
|
+
...getResponseMetadata(response),
|
|
592
|
+
headers: responseHeaders,
|
|
593
|
+
body: rawResponse
|
|
607
594
|
},
|
|
608
|
-
rawCall: { rawPrompt, rawSettings },
|
|
609
|
-
rawResponse: { headers: responseHeaders, body: rawResponse },
|
|
610
|
-
request: { body: JSON.stringify(body) },
|
|
611
|
-
response: getResponseMetadata(response),
|
|
612
595
|
warnings,
|
|
613
|
-
logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
|
|
614
596
|
providerMetadata
|
|
615
597
|
};
|
|
616
598
|
}
|
|
617
599
|
async doStream(options) {
|
|
618
|
-
|
|
619
|
-
const result = await this.doGenerate(options);
|
|
620
|
-
const simulatedStream = new ReadableStream({
|
|
621
|
-
start(controller) {
|
|
622
|
-
controller.enqueue({ type: "response-metadata", ...result.response });
|
|
623
|
-
if (result.text) {
|
|
624
|
-
controller.enqueue({
|
|
625
|
-
type: "text-delta",
|
|
626
|
-
textDelta: result.text
|
|
627
|
-
});
|
|
628
|
-
}
|
|
629
|
-
if (result.toolCalls) {
|
|
630
|
-
for (const toolCall of result.toolCalls) {
|
|
631
|
-
controller.enqueue({
|
|
632
|
-
type: "tool-call-delta",
|
|
633
|
-
toolCallType: "function",
|
|
634
|
-
toolCallId: toolCall.toolCallId,
|
|
635
|
-
toolName: toolCall.toolName,
|
|
636
|
-
argsTextDelta: toolCall.args
|
|
637
|
-
});
|
|
638
|
-
controller.enqueue({
|
|
639
|
-
type: "tool-call",
|
|
640
|
-
...toolCall
|
|
641
|
-
});
|
|
642
|
-
}
|
|
643
|
-
}
|
|
644
|
-
controller.enqueue({
|
|
645
|
-
type: "finish",
|
|
646
|
-
finishReason: result.finishReason,
|
|
647
|
-
usage: result.usage,
|
|
648
|
-
logprobs: result.logprobs,
|
|
649
|
-
providerMetadata: result.providerMetadata
|
|
650
|
-
});
|
|
651
|
-
controller.close();
|
|
652
|
-
}
|
|
653
|
-
});
|
|
654
|
-
return {
|
|
655
|
-
stream: simulatedStream,
|
|
656
|
-
rawCall: result.rawCall,
|
|
657
|
-
rawResponse: result.rawResponse,
|
|
658
|
-
warnings: result.warnings
|
|
659
|
-
};
|
|
660
|
-
}
|
|
661
|
-
const { args, warnings } = this.getArgs(options);
|
|
600
|
+
const { args, warnings } = await this.getArgs(options);
|
|
662
601
|
const body = {
|
|
663
602
|
...args,
|
|
664
603
|
stream: true,
|
|
665
|
-
|
|
666
|
-
|
|
604
|
+
stream_options: {
|
|
605
|
+
include_usage: true
|
|
606
|
+
}
|
|
667
607
|
};
|
|
668
608
|
const { responseHeaders, value: response } = await postJsonToApi({
|
|
669
609
|
url: this.config.url({
|
|
@@ -679,22 +619,23 @@ var OpenAIChatLanguageModel = class {
|
|
|
679
619
|
abortSignal: options.abortSignal,
|
|
680
620
|
fetch: this.config.fetch
|
|
681
621
|
});
|
|
682
|
-
const { messages: rawPrompt, ...rawSettings } = args;
|
|
683
622
|
const toolCalls = [];
|
|
684
623
|
let finishReason = "unknown";
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
624
|
+
const usage = {
|
|
625
|
+
inputTokens: void 0,
|
|
626
|
+
outputTokens: void 0,
|
|
627
|
+
totalTokens: void 0
|
|
688
628
|
};
|
|
689
|
-
let logprobs;
|
|
690
629
|
let isFirstChunk = true;
|
|
691
|
-
const { useLegacyFunctionCalling } = this.settings;
|
|
692
630
|
const providerMetadata = { openai: {} };
|
|
693
631
|
return {
|
|
694
632
|
stream: response.pipeThrough(
|
|
695
633
|
new TransformStream({
|
|
634
|
+
start(controller) {
|
|
635
|
+
controller.enqueue({ type: "stream-start", warnings });
|
|
636
|
+
},
|
|
696
637
|
transform(chunk, controller) {
|
|
697
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
|
|
638
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
|
|
698
639
|
if (!chunk.success) {
|
|
699
640
|
finishReason = "error";
|
|
700
641
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
@@ -714,60 +655,37 @@ var OpenAIChatLanguageModel = class {
|
|
|
714
655
|
});
|
|
715
656
|
}
|
|
716
657
|
if (value.usage != null) {
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
|
|
725
|
-
completionTokens: completion_tokens != null ? completion_tokens : void 0
|
|
726
|
-
};
|
|
727
|
-
if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
|
|
728
|
-
providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
|
|
729
|
-
}
|
|
730
|
-
if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
|
|
731
|
-
providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
|
|
658
|
+
usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
|
|
659
|
+
usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
|
|
660
|
+
usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
|
|
661
|
+
usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
|
|
662
|
+
usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
|
|
663
|
+
if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
|
|
664
|
+
providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
|
|
732
665
|
}
|
|
733
|
-
if ((completion_tokens_details == null ? void 0 :
|
|
734
|
-
providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 :
|
|
735
|
-
}
|
|
736
|
-
if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
|
|
737
|
-
providerMetadata.openai.cachedPromptTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
|
|
666
|
+
if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
|
|
667
|
+
providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
|
|
738
668
|
}
|
|
739
669
|
}
|
|
740
670
|
const choice = value.choices[0];
|
|
741
671
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
742
672
|
finishReason = mapOpenAIFinishReason(choice.finish_reason);
|
|
743
673
|
}
|
|
674
|
+
if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
|
|
675
|
+
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
676
|
+
}
|
|
744
677
|
if ((choice == null ? void 0 : choice.delta) == null) {
|
|
745
678
|
return;
|
|
746
679
|
}
|
|
747
680
|
const delta = choice.delta;
|
|
748
681
|
if (delta.content != null) {
|
|
749
682
|
controller.enqueue({
|
|
750
|
-
type: "text
|
|
751
|
-
|
|
683
|
+
type: "text",
|
|
684
|
+
text: delta.content
|
|
752
685
|
});
|
|
753
686
|
}
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
);
|
|
757
|
-
if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
|
|
758
|
-
if (logprobs === void 0) logprobs = [];
|
|
759
|
-
logprobs.push(...mappedLogprobs);
|
|
760
|
-
}
|
|
761
|
-
const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
|
|
762
|
-
{
|
|
763
|
-
type: "function",
|
|
764
|
-
id: generateId(),
|
|
765
|
-
function: delta.function_call,
|
|
766
|
-
index: 0
|
|
767
|
-
}
|
|
768
|
-
] : delta.tool_calls;
|
|
769
|
-
if (mappedToolCalls != null) {
|
|
770
|
-
for (const toolCallDelta of mappedToolCalls) {
|
|
687
|
+
if (delta.tool_calls != null) {
|
|
688
|
+
for (const toolCallDelta of delta.tool_calls) {
|
|
771
689
|
const index = toolCallDelta.index;
|
|
772
690
|
if (toolCalls[index] == null) {
|
|
773
691
|
if (toolCallDelta.type !== "function") {
|
|
@@ -782,7 +700,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
782
700
|
message: `Expected 'id' to be a string.`
|
|
783
701
|
});
|
|
784
702
|
}
|
|
785
|
-
if (((
|
|
703
|
+
if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
|
|
786
704
|
throw new InvalidResponseDataError({
|
|
787
705
|
data: toolCallDelta,
|
|
788
706
|
message: `Expected 'function.name' to be a string.`
|
|
@@ -793,12 +711,12 @@ var OpenAIChatLanguageModel = class {
|
|
|
793
711
|
type: "function",
|
|
794
712
|
function: {
|
|
795
713
|
name: toolCallDelta.function.name,
|
|
796
|
-
arguments: (
|
|
714
|
+
arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
|
|
797
715
|
},
|
|
798
716
|
hasFinished: false
|
|
799
717
|
};
|
|
800
718
|
const toolCall2 = toolCalls[index];
|
|
801
|
-
if (((
|
|
719
|
+
if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
|
|
802
720
|
if (toolCall2.function.arguments.length > 0) {
|
|
803
721
|
controller.enqueue({
|
|
804
722
|
type: "tool-call-delta",
|
|
@@ -812,7 +730,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
812
730
|
controller.enqueue({
|
|
813
731
|
type: "tool-call",
|
|
814
732
|
toolCallType: "function",
|
|
815
|
-
toolCallId: (
|
|
733
|
+
toolCallId: (_q = toolCall2.id) != null ? _q : generateId(),
|
|
816
734
|
toolName: toolCall2.function.name,
|
|
817
735
|
args: toolCall2.function.arguments
|
|
818
736
|
});
|
|
@@ -825,21 +743,21 @@ var OpenAIChatLanguageModel = class {
|
|
|
825
743
|
if (toolCall.hasFinished) {
|
|
826
744
|
continue;
|
|
827
745
|
}
|
|
828
|
-
if (((
|
|
829
|
-
toolCall.function.arguments += (
|
|
746
|
+
if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
|
|
747
|
+
toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
|
|
830
748
|
}
|
|
831
749
|
controller.enqueue({
|
|
832
750
|
type: "tool-call-delta",
|
|
833
751
|
toolCallType: "function",
|
|
834
752
|
toolCallId: toolCall.id,
|
|
835
753
|
toolName: toolCall.function.name,
|
|
836
|
-
argsTextDelta: (
|
|
754
|
+
argsTextDelta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
|
|
837
755
|
});
|
|
838
|
-
if (((
|
|
756
|
+
if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && isParsableJson(toolCall.function.arguments)) {
|
|
839
757
|
controller.enqueue({
|
|
840
758
|
type: "tool-call",
|
|
841
759
|
toolCallType: "function",
|
|
842
|
-
toolCallId: (
|
|
760
|
+
toolCallId: (_x = toolCall.id) != null ? _x : generateId(),
|
|
843
761
|
toolName: toolCall.function.name,
|
|
844
762
|
args: toolCall.function.arguments
|
|
845
763
|
});
|
|
@@ -849,125 +767,111 @@ var OpenAIChatLanguageModel = class {
|
|
|
849
767
|
}
|
|
850
768
|
},
|
|
851
769
|
flush(controller) {
|
|
852
|
-
var _a, _b;
|
|
853
770
|
controller.enqueue({
|
|
854
771
|
type: "finish",
|
|
855
772
|
finishReason,
|
|
856
|
-
|
|
857
|
-
usage: {
|
|
858
|
-
promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
|
|
859
|
-
completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
|
|
860
|
-
},
|
|
773
|
+
usage,
|
|
861
774
|
...providerMetadata != null ? { providerMetadata } : {}
|
|
862
775
|
});
|
|
863
776
|
}
|
|
864
777
|
})
|
|
865
778
|
),
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
request: { body: JSON.stringify(body) },
|
|
869
|
-
warnings
|
|
779
|
+
request: { body },
|
|
780
|
+
response: { headers: responseHeaders }
|
|
870
781
|
};
|
|
871
782
|
}
|
|
872
783
|
};
|
|
873
|
-
var openaiTokenUsageSchema =
|
|
874
|
-
prompt_tokens:
|
|
875
|
-
completion_tokens:
|
|
876
|
-
|
|
877
|
-
|
|
784
|
+
var openaiTokenUsageSchema = z3.object({
|
|
785
|
+
prompt_tokens: z3.number().nullish(),
|
|
786
|
+
completion_tokens: z3.number().nullish(),
|
|
787
|
+
total_tokens: z3.number().nullish(),
|
|
788
|
+
prompt_tokens_details: z3.object({
|
|
789
|
+
cached_tokens: z3.number().nullish()
|
|
878
790
|
}).nullish(),
|
|
879
|
-
completion_tokens_details:
|
|
880
|
-
reasoning_tokens:
|
|
881
|
-
accepted_prediction_tokens:
|
|
882
|
-
rejected_prediction_tokens:
|
|
791
|
+
completion_tokens_details: z3.object({
|
|
792
|
+
reasoning_tokens: z3.number().nullish(),
|
|
793
|
+
accepted_prediction_tokens: z3.number().nullish(),
|
|
794
|
+
rejected_prediction_tokens: z3.number().nullish()
|
|
883
795
|
}).nullish()
|
|
884
796
|
}).nullish();
|
|
885
|
-
var openaiChatResponseSchema =
|
|
886
|
-
id:
|
|
887
|
-
created:
|
|
888
|
-
model:
|
|
889
|
-
choices:
|
|
890
|
-
|
|
891
|
-
message:
|
|
892
|
-
role:
|
|
893
|
-
content:
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
type: z2.literal("function"),
|
|
902
|
-
function: z2.object({
|
|
903
|
-
name: z2.string(),
|
|
904
|
-
arguments: z2.string()
|
|
797
|
+
var openaiChatResponseSchema = z3.object({
|
|
798
|
+
id: z3.string().nullish(),
|
|
799
|
+
created: z3.number().nullish(),
|
|
800
|
+
model: z3.string().nullish(),
|
|
801
|
+
choices: z3.array(
|
|
802
|
+
z3.object({
|
|
803
|
+
message: z3.object({
|
|
804
|
+
role: z3.literal("assistant").nullish(),
|
|
805
|
+
content: z3.string().nullish(),
|
|
806
|
+
tool_calls: z3.array(
|
|
807
|
+
z3.object({
|
|
808
|
+
id: z3.string().nullish(),
|
|
809
|
+
type: z3.literal("function"),
|
|
810
|
+
function: z3.object({
|
|
811
|
+
name: z3.string(),
|
|
812
|
+
arguments: z3.string()
|
|
905
813
|
})
|
|
906
814
|
})
|
|
907
815
|
).nullish()
|
|
908
816
|
}),
|
|
909
|
-
index:
|
|
910
|
-
logprobs:
|
|
911
|
-
content:
|
|
912
|
-
|
|
913
|
-
token:
|
|
914
|
-
logprob:
|
|
915
|
-
top_logprobs:
|
|
916
|
-
|
|
917
|
-
token:
|
|
918
|
-
logprob:
|
|
817
|
+
index: z3.number(),
|
|
818
|
+
logprobs: z3.object({
|
|
819
|
+
content: z3.array(
|
|
820
|
+
z3.object({
|
|
821
|
+
token: z3.string(),
|
|
822
|
+
logprob: z3.number(),
|
|
823
|
+
top_logprobs: z3.array(
|
|
824
|
+
z3.object({
|
|
825
|
+
token: z3.string(),
|
|
826
|
+
logprob: z3.number()
|
|
919
827
|
})
|
|
920
828
|
)
|
|
921
829
|
})
|
|
922
|
-
).
|
|
830
|
+
).nullish()
|
|
923
831
|
}).nullish(),
|
|
924
|
-
finish_reason:
|
|
832
|
+
finish_reason: z3.string().nullish()
|
|
925
833
|
})
|
|
926
834
|
),
|
|
927
835
|
usage: openaiTokenUsageSchema
|
|
928
836
|
});
|
|
929
|
-
var openaiChatChunkSchema =
|
|
930
|
-
|
|
931
|
-
id:
|
|
932
|
-
created:
|
|
933
|
-
model:
|
|
934
|
-
choices:
|
|
935
|
-
|
|
936
|
-
delta:
|
|
937
|
-
role:
|
|
938
|
-
content:
|
|
939
|
-
|
|
940
|
-
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
type: z2.literal("function").optional(),
|
|
948
|
-
function: z2.object({
|
|
949
|
-
name: z2.string().nullish(),
|
|
950
|
-
arguments: z2.string().nullish()
|
|
837
|
+
var openaiChatChunkSchema = z3.union([
|
|
838
|
+
z3.object({
|
|
839
|
+
id: z3.string().nullish(),
|
|
840
|
+
created: z3.number().nullish(),
|
|
841
|
+
model: z3.string().nullish(),
|
|
842
|
+
choices: z3.array(
|
|
843
|
+
z3.object({
|
|
844
|
+
delta: z3.object({
|
|
845
|
+
role: z3.enum(["assistant"]).nullish(),
|
|
846
|
+
content: z3.string().nullish(),
|
|
847
|
+
tool_calls: z3.array(
|
|
848
|
+
z3.object({
|
|
849
|
+
index: z3.number(),
|
|
850
|
+
id: z3.string().nullish(),
|
|
851
|
+
type: z3.literal("function").nullish(),
|
|
852
|
+
function: z3.object({
|
|
853
|
+
name: z3.string().nullish(),
|
|
854
|
+
arguments: z3.string().nullish()
|
|
951
855
|
})
|
|
952
856
|
})
|
|
953
857
|
).nullish()
|
|
954
858
|
}).nullish(),
|
|
955
|
-
logprobs:
|
|
956
|
-
content:
|
|
957
|
-
|
|
958
|
-
token:
|
|
959
|
-
logprob:
|
|
960
|
-
top_logprobs:
|
|
961
|
-
|
|
962
|
-
token:
|
|
963
|
-
logprob:
|
|
859
|
+
logprobs: z3.object({
|
|
860
|
+
content: z3.array(
|
|
861
|
+
z3.object({
|
|
862
|
+
token: z3.string(),
|
|
863
|
+
logprob: z3.number(),
|
|
864
|
+
top_logprobs: z3.array(
|
|
865
|
+
z3.object({
|
|
866
|
+
token: z3.string(),
|
|
867
|
+
logprob: z3.number()
|
|
964
868
|
})
|
|
965
869
|
)
|
|
966
870
|
})
|
|
967
|
-
).
|
|
871
|
+
).nullish()
|
|
968
872
|
}).nullish(),
|
|
969
|
-
finish_reason:
|
|
970
|
-
index:
|
|
873
|
+
finish_reason: z3.string().nullish(),
|
|
874
|
+
index: z3.number()
|
|
971
875
|
})
|
|
972
876
|
),
|
|
973
877
|
usage: openaiTokenUsageSchema
|
|
@@ -975,10 +879,7 @@ var openaiChatChunkSchema = z2.union([
|
|
|
975
879
|
openaiErrorDataSchema
|
|
976
880
|
]);
|
|
977
881
|
function isReasoningModel(modelId) {
|
|
978
|
-
return modelId
|
|
979
|
-
}
|
|
980
|
-
function isAudioModel(modelId) {
|
|
981
|
-
return modelId.startsWith("gpt-4o-audio-preview");
|
|
882
|
+
return modelId.startsWith("o");
|
|
982
883
|
}
|
|
983
884
|
function getSystemMessageMode(modelId) {
|
|
984
885
|
var _a, _b;
|
|
@@ -1000,11 +901,23 @@ var reasoningModels = {
|
|
|
1000
901
|
"o1-preview-2024-09-12": {
|
|
1001
902
|
systemMessageMode: "remove"
|
|
1002
903
|
},
|
|
904
|
+
o3: {
|
|
905
|
+
systemMessageMode: "developer"
|
|
906
|
+
},
|
|
907
|
+
"o3-2025-04-16": {
|
|
908
|
+
systemMessageMode: "developer"
|
|
909
|
+
},
|
|
1003
910
|
"o3-mini": {
|
|
1004
911
|
systemMessageMode: "developer"
|
|
1005
912
|
},
|
|
1006
913
|
"o3-mini-2025-01-31": {
|
|
1007
914
|
systemMessageMode: "developer"
|
|
915
|
+
},
|
|
916
|
+
"o4-mini": {
|
|
917
|
+
systemMessageMode: "developer"
|
|
918
|
+
},
|
|
919
|
+
"o4-mini-2025-04-16": {
|
|
920
|
+
systemMessageMode: "developer"
|
|
1008
921
|
}
|
|
1009
922
|
};
|
|
1010
923
|
|
|
@@ -1013,24 +926,21 @@ import {
|
|
|
1013
926
|
combineHeaders as combineHeaders2,
|
|
1014
927
|
createEventSourceResponseHandler as createEventSourceResponseHandler2,
|
|
1015
928
|
createJsonResponseHandler as createJsonResponseHandler2,
|
|
929
|
+
parseProviderOptions as parseProviderOptions2,
|
|
1016
930
|
postJsonToApi as postJsonToApi2
|
|
1017
931
|
} from "@ai-sdk/provider-utils";
|
|
1018
|
-
import { z as
|
|
932
|
+
import { z as z5 } from "zod";
|
|
1019
933
|
|
|
1020
934
|
// src/convert-to-openai-completion-prompt.ts
|
|
1021
935
|
import {
|
|
1022
936
|
InvalidPromptError,
|
|
1023
|
-
UnsupportedFunctionalityError as
|
|
937
|
+
UnsupportedFunctionalityError as UnsupportedFunctionalityError3
|
|
1024
938
|
} from "@ai-sdk/provider";
|
|
1025
939
|
function convertToOpenAICompletionPrompt({
|
|
1026
940
|
prompt,
|
|
1027
|
-
inputFormat,
|
|
1028
941
|
user = "user",
|
|
1029
942
|
assistant = "assistant"
|
|
1030
943
|
}) {
|
|
1031
|
-
if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
|
|
1032
|
-
return { prompt: prompt[0].content[0].text };
|
|
1033
|
-
}
|
|
1034
944
|
let text = "";
|
|
1035
945
|
if (prompt[0].role === "system") {
|
|
1036
946
|
text += `${prompt[0].content}
|
|
@@ -1052,13 +962,8 @@ function convertToOpenAICompletionPrompt({
|
|
|
1052
962
|
case "text": {
|
|
1053
963
|
return part.text;
|
|
1054
964
|
}
|
|
1055
|
-
case "image": {
|
|
1056
|
-
throw new UnsupportedFunctionalityError4({
|
|
1057
|
-
functionality: "images"
|
|
1058
|
-
});
|
|
1059
|
-
}
|
|
1060
965
|
}
|
|
1061
|
-
}).join("");
|
|
966
|
+
}).filter(Boolean).join("");
|
|
1062
967
|
text += `${user}:
|
|
1063
968
|
${userMessage}
|
|
1064
969
|
|
|
@@ -1072,7 +977,7 @@ ${userMessage}
|
|
|
1072
977
|
return part.text;
|
|
1073
978
|
}
|
|
1074
979
|
case "tool-call": {
|
|
1075
|
-
throw new
|
|
980
|
+
throw new UnsupportedFunctionalityError3({
|
|
1076
981
|
functionality: "tool-call messages"
|
|
1077
982
|
});
|
|
1078
983
|
}
|
|
@@ -1085,7 +990,7 @@ ${assistantMessage}
|
|
|
1085
990
|
break;
|
|
1086
991
|
}
|
|
1087
992
|
case "tool": {
|
|
1088
|
-
throw new
|
|
993
|
+
throw new UnsupportedFunctionalityError3({
|
|
1089
994
|
functionality: "tool messages"
|
|
1090
995
|
});
|
|
1091
996
|
}
|
|
@@ -1104,36 +1009,68 @@ ${user}:`]
|
|
|
1104
1009
|
};
|
|
1105
1010
|
}
|
|
1106
1011
|
|
|
1107
|
-
// src/
|
|
1108
|
-
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
|
|
1114
|
-
|
|
1115
|
-
|
|
1116
|
-
|
|
1117
|
-
|
|
1118
|
-
|
|
1119
|
-
|
|
1012
|
+
// src/openai-completion-options.ts
|
|
1013
|
+
import { z as z4 } from "zod";
|
|
1014
|
+
var openaiCompletionProviderOptions = z4.object({
|
|
1015
|
+
/**
|
|
1016
|
+
Echo back the prompt in addition to the completion.
|
|
1017
|
+
*/
|
|
1018
|
+
echo: z4.boolean().optional(),
|
|
1019
|
+
/**
|
|
1020
|
+
Modify the likelihood of specified tokens appearing in the completion.
|
|
1021
|
+
|
|
1022
|
+
Accepts a JSON object that maps tokens (specified by their token ID in
|
|
1023
|
+
the GPT tokenizer) to an associated bias value from -100 to 100. You
|
|
1024
|
+
can use this tokenizer tool to convert text to token IDs. Mathematically,
|
|
1025
|
+
the bias is added to the logits generated by the model prior to sampling.
|
|
1026
|
+
The exact effect will vary per model, but values between -1 and 1 should
|
|
1027
|
+
decrease or increase likelihood of selection; values like -100 or 100
|
|
1028
|
+
should result in a ban or exclusive selection of the relevant token.
|
|
1029
|
+
|
|
1030
|
+
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
1031
|
+
token from being generated.
|
|
1032
|
+
*/
|
|
1033
|
+
logitBias: z4.record(z4.string(), z4.number()).optional(),
|
|
1034
|
+
/**
|
|
1035
|
+
The suffix that comes after a completion of inserted text.
|
|
1036
|
+
*/
|
|
1037
|
+
suffix: z4.string().optional(),
|
|
1038
|
+
/**
|
|
1039
|
+
A unique identifier representing your end-user, which can help OpenAI to
|
|
1040
|
+
monitor and detect abuse. Learn more.
|
|
1041
|
+
*/
|
|
1042
|
+
user: z4.string().optional(),
|
|
1043
|
+
/**
|
|
1044
|
+
Return the log probabilities of the tokens. Including logprobs will increase
|
|
1045
|
+
the response size and can slow down response times. However, it can
|
|
1046
|
+
be useful to better understand how the model is behaving.
|
|
1047
|
+
Setting to true will return the log probabilities of the tokens that
|
|
1048
|
+
were generated.
|
|
1049
|
+
Setting to a number will return the log probabilities of the top n
|
|
1050
|
+
tokens that were generated.
|
|
1051
|
+
*/
|
|
1052
|
+
logprobs: z4.union([z4.boolean(), z4.number()]).optional()
|
|
1053
|
+
});
|
|
1120
1054
|
|
|
1121
1055
|
// src/openai-completion-language-model.ts
|
|
1122
1056
|
var OpenAICompletionLanguageModel = class {
|
|
1123
|
-
constructor(modelId,
|
|
1057
|
+
constructor(modelId, config) {
|
|
1124
1058
|
this.specificationVersion = "v2";
|
|
1125
|
-
this.
|
|
1059
|
+
this.supportedUrls = {
|
|
1060
|
+
// No URLs are supported for completion models.
|
|
1061
|
+
};
|
|
1126
1062
|
this.modelId = modelId;
|
|
1127
|
-
this.settings = settings;
|
|
1128
1063
|
this.config = config;
|
|
1129
1064
|
}
|
|
1065
|
+
get providerOptionsName() {
|
|
1066
|
+
return this.config.provider.split(".")[0].trim();
|
|
1067
|
+
}
|
|
1130
1068
|
get provider() {
|
|
1131
1069
|
return this.config.provider;
|
|
1132
1070
|
}
|
|
1133
|
-
getArgs({
|
|
1134
|
-
inputFormat,
|
|
1071
|
+
async getArgs({
|
|
1135
1072
|
prompt,
|
|
1136
|
-
|
|
1073
|
+
maxOutputTokens,
|
|
1137
1074
|
temperature,
|
|
1138
1075
|
topP,
|
|
1139
1076
|
topK,
|
|
@@ -1143,9 +1080,22 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1143
1080
|
responseFormat,
|
|
1144
1081
|
tools,
|
|
1145
1082
|
toolChoice,
|
|
1146
|
-
seed
|
|
1083
|
+
seed,
|
|
1084
|
+
providerOptions
|
|
1147
1085
|
}) {
|
|
1148
1086
|
const warnings = [];
|
|
1087
|
+
const openaiOptions = {
|
|
1088
|
+
...await parseProviderOptions2({
|
|
1089
|
+
provider: "openai",
|
|
1090
|
+
providerOptions,
|
|
1091
|
+
schema: openaiCompletionProviderOptions
|
|
1092
|
+
}),
|
|
1093
|
+
...await parseProviderOptions2({
|
|
1094
|
+
provider: this.providerOptionsName,
|
|
1095
|
+
providerOptions,
|
|
1096
|
+
schema: openaiCompletionProviderOptions
|
|
1097
|
+
})
|
|
1098
|
+
};
|
|
1149
1099
|
if (topK != null) {
|
|
1150
1100
|
warnings.push({ type: "unsupported-setting", setting: "topK" });
|
|
1151
1101
|
}
|
|
@@ -1162,20 +1112,20 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1162
1112
|
details: "JSON response format is not supported."
|
|
1163
1113
|
});
|
|
1164
1114
|
}
|
|
1165
|
-
const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt
|
|
1115
|
+
const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt });
|
|
1166
1116
|
const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
|
|
1167
1117
|
return {
|
|
1168
1118
|
args: {
|
|
1169
1119
|
// model id:
|
|
1170
1120
|
model: this.modelId,
|
|
1171
1121
|
// model specific settings:
|
|
1172
|
-
echo:
|
|
1173
|
-
logit_bias:
|
|
1174
|
-
logprobs:
|
|
1175
|
-
suffix:
|
|
1176
|
-
user:
|
|
1122
|
+
echo: openaiOptions.echo,
|
|
1123
|
+
logit_bias: openaiOptions.logitBias,
|
|
1124
|
+
logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
|
|
1125
|
+
suffix: openaiOptions.suffix,
|
|
1126
|
+
user: openaiOptions.user,
|
|
1177
1127
|
// standardized settings:
|
|
1178
|
-
max_tokens:
|
|
1128
|
+
max_tokens: maxOutputTokens,
|
|
1179
1129
|
temperature,
|
|
1180
1130
|
top_p: topP,
|
|
1181
1131
|
frequency_penalty: frequencyPenalty,
|
|
@@ -1190,7 +1140,8 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1190
1140
|
};
|
|
1191
1141
|
}
|
|
1192
1142
|
async doGenerate(options) {
|
|
1193
|
-
|
|
1143
|
+
var _a, _b, _c;
|
|
1144
|
+
const { args, warnings } = await this.getArgs(options);
|
|
1194
1145
|
const {
|
|
1195
1146
|
responseHeaders,
|
|
1196
1147
|
value: response,
|
|
@@ -1209,30 +1160,37 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1209
1160
|
abortSignal: options.abortSignal,
|
|
1210
1161
|
fetch: this.config.fetch
|
|
1211
1162
|
});
|
|
1212
|
-
const { prompt: rawPrompt, ...rawSettings } = args;
|
|
1213
1163
|
const choice = response.choices[0];
|
|
1164
|
+
const providerMetadata = { openai: {} };
|
|
1165
|
+
if (choice.logprobs != null) {
|
|
1166
|
+
providerMetadata.openai.logprobs = choice.logprobs;
|
|
1167
|
+
}
|
|
1214
1168
|
return {
|
|
1215
|
-
text: choice.text,
|
|
1169
|
+
content: [{ type: "text", text: choice.text }],
|
|
1216
1170
|
usage: {
|
|
1217
|
-
|
|
1218
|
-
|
|
1171
|
+
inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
|
|
1172
|
+
outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
|
|
1173
|
+
totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
|
|
1219
1174
|
},
|
|
1220
1175
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
1221
|
-
|
|
1222
|
-
|
|
1223
|
-
|
|
1224
|
-
|
|
1225
|
-
|
|
1226
|
-
|
|
1176
|
+
request: { body: args },
|
|
1177
|
+
response: {
|
|
1178
|
+
...getResponseMetadata(response),
|
|
1179
|
+
headers: responseHeaders,
|
|
1180
|
+
body: rawResponse
|
|
1181
|
+
},
|
|
1182
|
+
providerMetadata,
|
|
1183
|
+
warnings
|
|
1227
1184
|
};
|
|
1228
1185
|
}
|
|
1229
1186
|
async doStream(options) {
|
|
1230
|
-
const { args, warnings } = this.getArgs(options);
|
|
1187
|
+
const { args, warnings } = await this.getArgs(options);
|
|
1231
1188
|
const body = {
|
|
1232
1189
|
...args,
|
|
1233
1190
|
stream: true,
|
|
1234
|
-
|
|
1235
|
-
|
|
1191
|
+
stream_options: {
|
|
1192
|
+
include_usage: true
|
|
1193
|
+
}
|
|
1236
1194
|
};
|
|
1237
1195
|
const { responseHeaders, value: response } = await postJsonToApi2({
|
|
1238
1196
|
url: this.config.url({
|
|
@@ -1248,17 +1206,20 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1248
1206
|
abortSignal: options.abortSignal,
|
|
1249
1207
|
fetch: this.config.fetch
|
|
1250
1208
|
});
|
|
1251
|
-
const { prompt: rawPrompt, ...rawSettings } = args;
|
|
1252
1209
|
let finishReason = "unknown";
|
|
1253
|
-
|
|
1254
|
-
|
|
1255
|
-
|
|
1210
|
+
const providerMetadata = { openai: {} };
|
|
1211
|
+
const usage = {
|
|
1212
|
+
inputTokens: void 0,
|
|
1213
|
+
outputTokens: void 0,
|
|
1214
|
+
totalTokens: void 0
|
|
1256
1215
|
};
|
|
1257
|
-
let logprobs;
|
|
1258
1216
|
let isFirstChunk = true;
|
|
1259
1217
|
return {
|
|
1260
1218
|
stream: response.pipeThrough(
|
|
1261
1219
|
new TransformStream({
|
|
1220
|
+
start(controller) {
|
|
1221
|
+
controller.enqueue({ type: "stream-start", warnings });
|
|
1222
|
+
},
|
|
1262
1223
|
transform(chunk, controller) {
|
|
1263
1224
|
if (!chunk.success) {
|
|
1264
1225
|
finishReason = "error";
|
|
@@ -1279,87 +1240,79 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1279
1240
|
});
|
|
1280
1241
|
}
|
|
1281
1242
|
if (value.usage != null) {
|
|
1282
|
-
usage =
|
|
1283
|
-
|
|
1284
|
-
|
|
1285
|
-
};
|
|
1243
|
+
usage.inputTokens = value.usage.prompt_tokens;
|
|
1244
|
+
usage.outputTokens = value.usage.completion_tokens;
|
|
1245
|
+
usage.totalTokens = value.usage.total_tokens;
|
|
1286
1246
|
}
|
|
1287
1247
|
const choice = value.choices[0];
|
|
1288
1248
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
1289
1249
|
finishReason = mapOpenAIFinishReason(choice.finish_reason);
|
|
1290
1250
|
}
|
|
1251
|
+
if ((choice == null ? void 0 : choice.logprobs) != null) {
|
|
1252
|
+
providerMetadata.openai.logprobs = choice.logprobs;
|
|
1253
|
+
}
|
|
1291
1254
|
if ((choice == null ? void 0 : choice.text) != null) {
|
|
1292
1255
|
controller.enqueue({
|
|
1293
|
-
type: "text
|
|
1294
|
-
|
|
1256
|
+
type: "text",
|
|
1257
|
+
text: choice.text
|
|
1295
1258
|
});
|
|
1296
1259
|
}
|
|
1297
|
-
const mappedLogprobs = mapOpenAICompletionLogProbs(
|
|
1298
|
-
choice == null ? void 0 : choice.logprobs
|
|
1299
|
-
);
|
|
1300
|
-
if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
|
|
1301
|
-
if (logprobs === void 0) logprobs = [];
|
|
1302
|
-
logprobs.push(...mappedLogprobs);
|
|
1303
|
-
}
|
|
1304
1260
|
},
|
|
1305
1261
|
flush(controller) {
|
|
1306
1262
|
controller.enqueue({
|
|
1307
1263
|
type: "finish",
|
|
1308
1264
|
finishReason,
|
|
1309
|
-
|
|
1265
|
+
providerMetadata,
|
|
1310
1266
|
usage
|
|
1311
1267
|
});
|
|
1312
1268
|
}
|
|
1313
1269
|
})
|
|
1314
1270
|
),
|
|
1315
|
-
|
|
1316
|
-
|
|
1317
|
-
warnings,
|
|
1318
|
-
request: { body: JSON.stringify(body) }
|
|
1271
|
+
request: { body },
|
|
1272
|
+
response: { headers: responseHeaders }
|
|
1319
1273
|
};
|
|
1320
1274
|
}
|
|
1321
1275
|
};
|
|
1322
|
-
var
|
|
1323
|
-
|
|
1324
|
-
|
|
1325
|
-
|
|
1326
|
-
|
|
1327
|
-
|
|
1328
|
-
|
|
1329
|
-
|
|
1330
|
-
|
|
1331
|
-
|
|
1332
|
-
|
|
1333
|
-
|
|
1276
|
+
var usageSchema = z5.object({
|
|
1277
|
+
prompt_tokens: z5.number(),
|
|
1278
|
+
completion_tokens: z5.number(),
|
|
1279
|
+
total_tokens: z5.number()
|
|
1280
|
+
});
|
|
1281
|
+
var openaiCompletionResponseSchema = z5.object({
|
|
1282
|
+
id: z5.string().nullish(),
|
|
1283
|
+
created: z5.number().nullish(),
|
|
1284
|
+
model: z5.string().nullish(),
|
|
1285
|
+
choices: z5.array(
|
|
1286
|
+
z5.object({
|
|
1287
|
+
text: z5.string(),
|
|
1288
|
+
finish_reason: z5.string(),
|
|
1289
|
+
logprobs: z5.object({
|
|
1290
|
+
tokens: z5.array(z5.string()),
|
|
1291
|
+
token_logprobs: z5.array(z5.number()),
|
|
1292
|
+
top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
|
|
1334
1293
|
}).nullish()
|
|
1335
1294
|
})
|
|
1336
1295
|
),
|
|
1337
|
-
usage:
|
|
1338
|
-
prompt_tokens: z3.number(),
|
|
1339
|
-
completion_tokens: z3.number()
|
|
1340
|
-
})
|
|
1296
|
+
usage: usageSchema.nullish()
|
|
1341
1297
|
});
|
|
1342
|
-
var openaiCompletionChunkSchema =
|
|
1343
|
-
|
|
1344
|
-
id:
|
|
1345
|
-
created:
|
|
1346
|
-
model:
|
|
1347
|
-
choices:
|
|
1348
|
-
|
|
1349
|
-
text:
|
|
1350
|
-
finish_reason:
|
|
1351
|
-
index:
|
|
1352
|
-
logprobs:
|
|
1353
|
-
tokens:
|
|
1354
|
-
token_logprobs:
|
|
1355
|
-
top_logprobs:
|
|
1298
|
+
var openaiCompletionChunkSchema = z5.union([
|
|
1299
|
+
z5.object({
|
|
1300
|
+
id: z5.string().nullish(),
|
|
1301
|
+
created: z5.number().nullish(),
|
|
1302
|
+
model: z5.string().nullish(),
|
|
1303
|
+
choices: z5.array(
|
|
1304
|
+
z5.object({
|
|
1305
|
+
text: z5.string(),
|
|
1306
|
+
finish_reason: z5.string().nullish(),
|
|
1307
|
+
index: z5.number(),
|
|
1308
|
+
logprobs: z5.object({
|
|
1309
|
+
tokens: z5.array(z5.string()),
|
|
1310
|
+
token_logprobs: z5.array(z5.number()),
|
|
1311
|
+
top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
|
|
1356
1312
|
}).nullish()
|
|
1357
1313
|
})
|
|
1358
1314
|
),
|
|
1359
|
-
usage:
|
|
1360
|
-
prompt_tokens: z3.number(),
|
|
1361
|
-
completion_tokens: z3.number()
|
|
1362
|
-
}).nullish()
|
|
1315
|
+
usage: usageSchema.nullish()
|
|
1363
1316
|
}),
|
|
1364
1317
|
openaiErrorDataSchema
|
|
1365
1318
|
]);
|
|
@@ -1371,32 +1324,45 @@ import {
|
|
|
1371
1324
|
import {
|
|
1372
1325
|
combineHeaders as combineHeaders3,
|
|
1373
1326
|
createJsonResponseHandler as createJsonResponseHandler3,
|
|
1327
|
+
parseProviderOptions as parseProviderOptions3,
|
|
1374
1328
|
postJsonToApi as postJsonToApi3
|
|
1375
1329
|
} from "@ai-sdk/provider-utils";
|
|
1376
|
-
import { z as
|
|
1330
|
+
import { z as z7 } from "zod";
|
|
1331
|
+
|
|
1332
|
+
// src/openai-embedding-options.ts
|
|
1333
|
+
import { z as z6 } from "zod";
|
|
1334
|
+
var openaiEmbeddingProviderOptions = z6.object({
|
|
1335
|
+
/**
|
|
1336
|
+
The number of dimensions the resulting output embeddings should have.
|
|
1337
|
+
Only supported in text-embedding-3 and later models.
|
|
1338
|
+
*/
|
|
1339
|
+
dimensions: z6.number().optional(),
|
|
1340
|
+
/**
|
|
1341
|
+
A unique identifier representing your end-user, which can help OpenAI to
|
|
1342
|
+
monitor and detect abuse. Learn more.
|
|
1343
|
+
*/
|
|
1344
|
+
user: z6.string().optional()
|
|
1345
|
+
});
|
|
1346
|
+
|
|
1347
|
+
// src/openai-embedding-model.ts
|
|
1377
1348
|
var OpenAIEmbeddingModel = class {
|
|
1378
|
-
constructor(modelId,
|
|
1379
|
-
this.specificationVersion = "
|
|
1349
|
+
constructor(modelId, config) {
|
|
1350
|
+
this.specificationVersion = "v2";
|
|
1351
|
+
this.maxEmbeddingsPerCall = 2048;
|
|
1352
|
+
this.supportsParallelCalls = true;
|
|
1380
1353
|
this.modelId = modelId;
|
|
1381
|
-
this.settings = settings;
|
|
1382
1354
|
this.config = config;
|
|
1383
1355
|
}
|
|
1384
1356
|
get provider() {
|
|
1385
1357
|
return this.config.provider;
|
|
1386
1358
|
}
|
|
1387
|
-
get maxEmbeddingsPerCall() {
|
|
1388
|
-
var _a;
|
|
1389
|
-
return (_a = this.settings.maxEmbeddingsPerCall) != null ? _a : 2048;
|
|
1390
|
-
}
|
|
1391
|
-
get supportsParallelCalls() {
|
|
1392
|
-
var _a;
|
|
1393
|
-
return (_a = this.settings.supportsParallelCalls) != null ? _a : true;
|
|
1394
|
-
}
|
|
1395
1359
|
async doEmbed({
|
|
1396
1360
|
values,
|
|
1397
1361
|
headers,
|
|
1398
|
-
abortSignal
|
|
1362
|
+
abortSignal,
|
|
1363
|
+
providerOptions
|
|
1399
1364
|
}) {
|
|
1365
|
+
var _a;
|
|
1400
1366
|
if (values.length > this.maxEmbeddingsPerCall) {
|
|
1401
1367
|
throw new TooManyEmbeddingValuesForCallError({
|
|
1402
1368
|
provider: this.provider,
|
|
@@ -1405,7 +1371,16 @@ var OpenAIEmbeddingModel = class {
|
|
|
1405
1371
|
values
|
|
1406
1372
|
});
|
|
1407
1373
|
}
|
|
1408
|
-
const
|
|
1374
|
+
const openaiOptions = (_a = await parseProviderOptions3({
|
|
1375
|
+
provider: "openai",
|
|
1376
|
+
providerOptions,
|
|
1377
|
+
schema: openaiEmbeddingProviderOptions
|
|
1378
|
+
})) != null ? _a : {};
|
|
1379
|
+
const {
|
|
1380
|
+
responseHeaders,
|
|
1381
|
+
value: response,
|
|
1382
|
+
rawValue
|
|
1383
|
+
} = await postJsonToApi3({
|
|
1409
1384
|
url: this.config.url({
|
|
1410
1385
|
path: "/embeddings",
|
|
1411
1386
|
modelId: this.modelId
|
|
@@ -1415,8 +1390,8 @@ var OpenAIEmbeddingModel = class {
|
|
|
1415
1390
|
model: this.modelId,
|
|
1416
1391
|
input: values,
|
|
1417
1392
|
encoding_format: "float",
|
|
1418
|
-
dimensions:
|
|
1419
|
-
user:
|
|
1393
|
+
dimensions: openaiOptions.dimensions,
|
|
1394
|
+
user: openaiOptions.user
|
|
1420
1395
|
},
|
|
1421
1396
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1422
1397
|
successfulResponseHandler: createJsonResponseHandler3(
|
|
@@ -1428,13 +1403,13 @@ var OpenAIEmbeddingModel = class {
|
|
|
1428
1403
|
return {
|
|
1429
1404
|
embeddings: response.data.map((item) => item.embedding),
|
|
1430
1405
|
usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
|
|
1431
|
-
|
|
1406
|
+
response: { headers: responseHeaders, body: rawValue }
|
|
1432
1407
|
};
|
|
1433
1408
|
}
|
|
1434
1409
|
};
|
|
1435
|
-
var openaiTextEmbeddingResponseSchema =
|
|
1436
|
-
data:
|
|
1437
|
-
usage:
|
|
1410
|
+
var openaiTextEmbeddingResponseSchema = z7.object({
|
|
1411
|
+
data: z7.array(z7.object({ embedding: z7.array(z7.number()) })),
|
|
1412
|
+
usage: z7.object({ prompt_tokens: z7.number() }).nullish()
|
|
1438
1413
|
});
|
|
1439
1414
|
|
|
1440
1415
|
// src/openai-image-model.ts
|
|
@@ -1443,25 +1418,26 @@ import {
|
|
|
1443
1418
|
createJsonResponseHandler as createJsonResponseHandler4,
|
|
1444
1419
|
postJsonToApi as postJsonToApi4
|
|
1445
1420
|
} from "@ai-sdk/provider-utils";
|
|
1446
|
-
import { z as
|
|
1421
|
+
import { z as z8 } from "zod";
|
|
1447
1422
|
|
|
1448
1423
|
// src/openai-image-settings.ts
|
|
1449
1424
|
var modelMaxImagesPerCall = {
|
|
1450
1425
|
"dall-e-3": 1,
|
|
1451
|
-
"dall-e-2": 10
|
|
1426
|
+
"dall-e-2": 10,
|
|
1427
|
+
"gpt-image-1": 10
|
|
1452
1428
|
};
|
|
1429
|
+
var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
|
|
1453
1430
|
|
|
1454
1431
|
// src/openai-image-model.ts
|
|
1455
1432
|
var OpenAIImageModel = class {
|
|
1456
|
-
constructor(modelId,
|
|
1433
|
+
constructor(modelId, config) {
|
|
1457
1434
|
this.modelId = modelId;
|
|
1458
|
-
this.settings = settings;
|
|
1459
1435
|
this.config = config;
|
|
1460
|
-
this.specificationVersion = "
|
|
1436
|
+
this.specificationVersion = "v2";
|
|
1461
1437
|
}
|
|
1462
1438
|
get maxImagesPerCall() {
|
|
1463
|
-
var _a
|
|
1464
|
-
return (
|
|
1439
|
+
var _a;
|
|
1440
|
+
return (_a = modelMaxImagesPerCall[this.modelId]) != null ? _a : 1;
|
|
1465
1441
|
}
|
|
1466
1442
|
get provider() {
|
|
1467
1443
|
return this.config.provider;
|
|
@@ -1501,7 +1477,7 @@ var OpenAIImageModel = class {
|
|
|
1501
1477
|
n,
|
|
1502
1478
|
size,
|
|
1503
1479
|
...(_d = providerOptions.openai) != null ? _d : {},
|
|
1504
|
-
response_format: "b64_json"
|
|
1480
|
+
...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
|
|
1505
1481
|
},
|
|
1506
1482
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1507
1483
|
successfulResponseHandler: createJsonResponseHandler4(
|
|
@@ -1517,30 +1493,339 @@ var OpenAIImageModel = class {
|
|
|
1517
1493
|
timestamp: currentDate,
|
|
1518
1494
|
modelId: this.modelId,
|
|
1519
1495
|
headers: responseHeaders
|
|
1496
|
+
},
|
|
1497
|
+
providerMetadata: {
|
|
1498
|
+
openai: {
|
|
1499
|
+
images: response.data.map(
|
|
1500
|
+
(item) => item.revised_prompt ? {
|
|
1501
|
+
revisedPrompt: item.revised_prompt
|
|
1502
|
+
} : null
|
|
1503
|
+
)
|
|
1504
|
+
}
|
|
1520
1505
|
}
|
|
1521
1506
|
};
|
|
1522
1507
|
}
|
|
1523
1508
|
};
|
|
1524
|
-
var openaiImageResponseSchema =
|
|
1525
|
-
data:
|
|
1509
|
+
var openaiImageResponseSchema = z8.object({
|
|
1510
|
+
data: z8.array(
|
|
1511
|
+
z8.object({ b64_json: z8.string(), revised_prompt: z8.string().optional() })
|
|
1512
|
+
)
|
|
1526
1513
|
});
|
|
1527
1514
|
|
|
1528
|
-
// src/
|
|
1515
|
+
// src/openai-transcription-model.ts
|
|
1529
1516
|
import {
|
|
1530
1517
|
combineHeaders as combineHeaders5,
|
|
1531
|
-
|
|
1518
|
+
convertBase64ToUint8Array,
|
|
1532
1519
|
createJsonResponseHandler as createJsonResponseHandler5,
|
|
1533
|
-
|
|
1534
|
-
|
|
1520
|
+
parseProviderOptions as parseProviderOptions4,
|
|
1521
|
+
postFormDataToApi
|
|
1522
|
+
} from "@ai-sdk/provider-utils";
|
|
1523
|
+
import { z as z10 } from "zod";
|
|
1524
|
+
|
|
1525
|
+
// src/openai-transcription-options.ts
|
|
1526
|
+
import { z as z9 } from "zod";
|
|
1527
|
+
var openAITranscriptionProviderOptions = z9.object({
|
|
1528
|
+
/**
|
|
1529
|
+
* Additional information to include in the transcription response.
|
|
1530
|
+
*/
|
|
1531
|
+
include: z9.array(z9.string()).optional(),
|
|
1532
|
+
/**
|
|
1533
|
+
* The language of the input audio in ISO-639-1 format.
|
|
1534
|
+
*/
|
|
1535
|
+
language: z9.string().optional(),
|
|
1536
|
+
/**
|
|
1537
|
+
* An optional text to guide the model's style or continue a previous audio segment.
|
|
1538
|
+
*/
|
|
1539
|
+
prompt: z9.string().optional(),
|
|
1540
|
+
/**
|
|
1541
|
+
* The sampling temperature, between 0 and 1.
|
|
1542
|
+
* @default 0
|
|
1543
|
+
*/
|
|
1544
|
+
temperature: z9.number().min(0).max(1).default(0).optional(),
|
|
1545
|
+
/**
|
|
1546
|
+
* The timestamp granularities to populate for this transcription.
|
|
1547
|
+
* @default ['segment']
|
|
1548
|
+
*/
|
|
1549
|
+
timestampGranularities: z9.array(z9.enum(["word", "segment"])).default(["segment"]).optional()
|
|
1550
|
+
});
|
|
1551
|
+
|
|
1552
|
+
// src/openai-transcription-model.ts
|
|
1553
|
+
var languageMap = {
|
|
1554
|
+
afrikaans: "af",
|
|
1555
|
+
arabic: "ar",
|
|
1556
|
+
armenian: "hy",
|
|
1557
|
+
azerbaijani: "az",
|
|
1558
|
+
belarusian: "be",
|
|
1559
|
+
bosnian: "bs",
|
|
1560
|
+
bulgarian: "bg",
|
|
1561
|
+
catalan: "ca",
|
|
1562
|
+
chinese: "zh",
|
|
1563
|
+
croatian: "hr",
|
|
1564
|
+
czech: "cs",
|
|
1565
|
+
danish: "da",
|
|
1566
|
+
dutch: "nl",
|
|
1567
|
+
english: "en",
|
|
1568
|
+
estonian: "et",
|
|
1569
|
+
finnish: "fi",
|
|
1570
|
+
french: "fr",
|
|
1571
|
+
galician: "gl",
|
|
1572
|
+
german: "de",
|
|
1573
|
+
greek: "el",
|
|
1574
|
+
hebrew: "he",
|
|
1575
|
+
hindi: "hi",
|
|
1576
|
+
hungarian: "hu",
|
|
1577
|
+
icelandic: "is",
|
|
1578
|
+
indonesian: "id",
|
|
1579
|
+
italian: "it",
|
|
1580
|
+
japanese: "ja",
|
|
1581
|
+
kannada: "kn",
|
|
1582
|
+
kazakh: "kk",
|
|
1583
|
+
korean: "ko",
|
|
1584
|
+
latvian: "lv",
|
|
1585
|
+
lithuanian: "lt",
|
|
1586
|
+
macedonian: "mk",
|
|
1587
|
+
malay: "ms",
|
|
1588
|
+
marathi: "mr",
|
|
1589
|
+
maori: "mi",
|
|
1590
|
+
nepali: "ne",
|
|
1591
|
+
norwegian: "no",
|
|
1592
|
+
persian: "fa",
|
|
1593
|
+
polish: "pl",
|
|
1594
|
+
portuguese: "pt",
|
|
1595
|
+
romanian: "ro",
|
|
1596
|
+
russian: "ru",
|
|
1597
|
+
serbian: "sr",
|
|
1598
|
+
slovak: "sk",
|
|
1599
|
+
slovenian: "sl",
|
|
1600
|
+
spanish: "es",
|
|
1601
|
+
swahili: "sw",
|
|
1602
|
+
swedish: "sv",
|
|
1603
|
+
tagalog: "tl",
|
|
1604
|
+
tamil: "ta",
|
|
1605
|
+
thai: "th",
|
|
1606
|
+
turkish: "tr",
|
|
1607
|
+
ukrainian: "uk",
|
|
1608
|
+
urdu: "ur",
|
|
1609
|
+
vietnamese: "vi",
|
|
1610
|
+
welsh: "cy"
|
|
1611
|
+
};
|
|
1612
|
+
var OpenAITranscriptionModel = class {
|
|
1613
|
+
constructor(modelId, config) {
|
|
1614
|
+
this.modelId = modelId;
|
|
1615
|
+
this.config = config;
|
|
1616
|
+
this.specificationVersion = "v1";
|
|
1617
|
+
}
|
|
1618
|
+
get provider() {
|
|
1619
|
+
return this.config.provider;
|
|
1620
|
+
}
|
|
1621
|
+
async getArgs({
|
|
1622
|
+
audio,
|
|
1623
|
+
mediaType,
|
|
1624
|
+
providerOptions
|
|
1625
|
+
}) {
|
|
1626
|
+
const warnings = [];
|
|
1627
|
+
const openAIOptions = await parseProviderOptions4({
|
|
1628
|
+
provider: "openai",
|
|
1629
|
+
providerOptions,
|
|
1630
|
+
schema: openAITranscriptionProviderOptions
|
|
1631
|
+
});
|
|
1632
|
+
const formData = new FormData();
|
|
1633
|
+
const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([convertBase64ToUint8Array(audio)]);
|
|
1634
|
+
formData.append("model", this.modelId);
|
|
1635
|
+
formData.append("file", new File([blob], "audio", { type: mediaType }));
|
|
1636
|
+
if (openAIOptions) {
|
|
1637
|
+
const transcriptionModelOptions = {
|
|
1638
|
+
include: openAIOptions.include,
|
|
1639
|
+
language: openAIOptions.language,
|
|
1640
|
+
prompt: openAIOptions.prompt,
|
|
1641
|
+
temperature: openAIOptions.temperature,
|
|
1642
|
+
timestamp_granularities: openAIOptions.timestampGranularities
|
|
1643
|
+
};
|
|
1644
|
+
for (const [key, value] of Object.entries(transcriptionModelOptions)) {
|
|
1645
|
+
if (value != null) {
|
|
1646
|
+
formData.append(key, String(value));
|
|
1647
|
+
}
|
|
1648
|
+
}
|
|
1649
|
+
}
|
|
1650
|
+
return {
|
|
1651
|
+
formData,
|
|
1652
|
+
warnings
|
|
1653
|
+
};
|
|
1654
|
+
}
|
|
1655
|
+
async doGenerate(options) {
|
|
1656
|
+
var _a, _b, _c, _d, _e, _f;
|
|
1657
|
+
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
1658
|
+
const { formData, warnings } = await this.getArgs(options);
|
|
1659
|
+
const {
|
|
1660
|
+
value: response,
|
|
1661
|
+
responseHeaders,
|
|
1662
|
+
rawValue: rawResponse
|
|
1663
|
+
} = await postFormDataToApi({
|
|
1664
|
+
url: this.config.url({
|
|
1665
|
+
path: "/audio/transcriptions",
|
|
1666
|
+
modelId: this.modelId
|
|
1667
|
+
}),
|
|
1668
|
+
headers: combineHeaders5(this.config.headers(), options.headers),
|
|
1669
|
+
formData,
|
|
1670
|
+
failedResponseHandler: openaiFailedResponseHandler,
|
|
1671
|
+
successfulResponseHandler: createJsonResponseHandler5(
|
|
1672
|
+
openaiTranscriptionResponseSchema
|
|
1673
|
+
),
|
|
1674
|
+
abortSignal: options.abortSignal,
|
|
1675
|
+
fetch: this.config.fetch
|
|
1676
|
+
});
|
|
1677
|
+
const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
|
|
1678
|
+
return {
|
|
1679
|
+
text: response.text,
|
|
1680
|
+
segments: (_e = (_d = response.words) == null ? void 0 : _d.map((word) => ({
|
|
1681
|
+
text: word.word,
|
|
1682
|
+
startSecond: word.start,
|
|
1683
|
+
endSecond: word.end
|
|
1684
|
+
}))) != null ? _e : [],
|
|
1685
|
+
language,
|
|
1686
|
+
durationInSeconds: (_f = response.duration) != null ? _f : void 0,
|
|
1687
|
+
warnings,
|
|
1688
|
+
response: {
|
|
1689
|
+
timestamp: currentDate,
|
|
1690
|
+
modelId: this.modelId,
|
|
1691
|
+
headers: responseHeaders,
|
|
1692
|
+
body: rawResponse
|
|
1693
|
+
}
|
|
1694
|
+
};
|
|
1695
|
+
}
|
|
1696
|
+
};
|
|
1697
|
+
var openaiTranscriptionResponseSchema = z10.object({
|
|
1698
|
+
text: z10.string(),
|
|
1699
|
+
language: z10.string().nullish(),
|
|
1700
|
+
duration: z10.number().nullish(),
|
|
1701
|
+
words: z10.array(
|
|
1702
|
+
z10.object({
|
|
1703
|
+
word: z10.string(),
|
|
1704
|
+
start: z10.number(),
|
|
1705
|
+
end: z10.number()
|
|
1706
|
+
})
|
|
1707
|
+
).nullish()
|
|
1708
|
+
});
|
|
1709
|
+
|
|
1710
|
+
// src/openai-speech-model.ts
|
|
1711
|
+
import {
|
|
1712
|
+
combineHeaders as combineHeaders6,
|
|
1713
|
+
createBinaryResponseHandler,
|
|
1714
|
+
parseProviderOptions as parseProviderOptions5,
|
|
1535
1715
|
postJsonToApi as postJsonToApi5
|
|
1536
1716
|
} from "@ai-sdk/provider-utils";
|
|
1537
|
-
import { z as
|
|
1717
|
+
import { z as z11 } from "zod";
|
|
1718
|
+
var OpenAIProviderOptionsSchema = z11.object({
|
|
1719
|
+
instructions: z11.string().nullish(),
|
|
1720
|
+
speed: z11.number().min(0.25).max(4).default(1).nullish()
|
|
1721
|
+
});
|
|
1722
|
+
var OpenAISpeechModel = class {
|
|
1723
|
+
constructor(modelId, config) {
|
|
1724
|
+
this.modelId = modelId;
|
|
1725
|
+
this.config = config;
|
|
1726
|
+
this.specificationVersion = "v1";
|
|
1727
|
+
}
|
|
1728
|
+
get provider() {
|
|
1729
|
+
return this.config.provider;
|
|
1730
|
+
}
|
|
1731
|
+
async getArgs({
|
|
1732
|
+
text,
|
|
1733
|
+
voice = "alloy",
|
|
1734
|
+
outputFormat = "mp3",
|
|
1735
|
+
speed,
|
|
1736
|
+
instructions,
|
|
1737
|
+
providerOptions
|
|
1738
|
+
}) {
|
|
1739
|
+
const warnings = [];
|
|
1740
|
+
const openAIOptions = await parseProviderOptions5({
|
|
1741
|
+
provider: "openai",
|
|
1742
|
+
providerOptions,
|
|
1743
|
+
schema: OpenAIProviderOptionsSchema
|
|
1744
|
+
});
|
|
1745
|
+
const requestBody = {
|
|
1746
|
+
model: this.modelId,
|
|
1747
|
+
input: text,
|
|
1748
|
+
voice,
|
|
1749
|
+
response_format: "mp3",
|
|
1750
|
+
speed,
|
|
1751
|
+
instructions
|
|
1752
|
+
};
|
|
1753
|
+
if (outputFormat) {
|
|
1754
|
+
if (["mp3", "opus", "aac", "flac", "wav", "pcm"].includes(outputFormat)) {
|
|
1755
|
+
requestBody.response_format = outputFormat;
|
|
1756
|
+
} else {
|
|
1757
|
+
warnings.push({
|
|
1758
|
+
type: "unsupported-setting",
|
|
1759
|
+
setting: "outputFormat",
|
|
1760
|
+
details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
|
|
1761
|
+
});
|
|
1762
|
+
}
|
|
1763
|
+
}
|
|
1764
|
+
if (openAIOptions) {
|
|
1765
|
+
const speechModelOptions = {};
|
|
1766
|
+
for (const key in speechModelOptions) {
|
|
1767
|
+
const value = speechModelOptions[key];
|
|
1768
|
+
if (value !== void 0) {
|
|
1769
|
+
requestBody[key] = value;
|
|
1770
|
+
}
|
|
1771
|
+
}
|
|
1772
|
+
}
|
|
1773
|
+
return {
|
|
1774
|
+
requestBody,
|
|
1775
|
+
warnings
|
|
1776
|
+
};
|
|
1777
|
+
}
|
|
1778
|
+
async doGenerate(options) {
|
|
1779
|
+
var _a, _b, _c;
|
|
1780
|
+
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
1781
|
+
const { requestBody, warnings } = await this.getArgs(options);
|
|
1782
|
+
const {
|
|
1783
|
+
value: audio,
|
|
1784
|
+
responseHeaders,
|
|
1785
|
+
rawValue: rawResponse
|
|
1786
|
+
} = await postJsonToApi5({
|
|
1787
|
+
url: this.config.url({
|
|
1788
|
+
path: "/audio/speech",
|
|
1789
|
+
modelId: this.modelId
|
|
1790
|
+
}),
|
|
1791
|
+
headers: combineHeaders6(this.config.headers(), options.headers),
|
|
1792
|
+
body: requestBody,
|
|
1793
|
+
failedResponseHandler: openaiFailedResponseHandler,
|
|
1794
|
+
successfulResponseHandler: createBinaryResponseHandler(),
|
|
1795
|
+
abortSignal: options.abortSignal,
|
|
1796
|
+
fetch: this.config.fetch
|
|
1797
|
+
});
|
|
1798
|
+
return {
|
|
1799
|
+
audio,
|
|
1800
|
+
warnings,
|
|
1801
|
+
request: {
|
|
1802
|
+
body: JSON.stringify(requestBody)
|
|
1803
|
+
},
|
|
1804
|
+
response: {
|
|
1805
|
+
timestamp: currentDate,
|
|
1806
|
+
modelId: this.modelId,
|
|
1807
|
+
headers: responseHeaders,
|
|
1808
|
+
body: rawResponse
|
|
1809
|
+
}
|
|
1810
|
+
};
|
|
1811
|
+
}
|
|
1812
|
+
};
|
|
1813
|
+
|
|
1814
|
+
// src/responses/openai-responses-language-model.ts
|
|
1815
|
+
import {
|
|
1816
|
+
combineHeaders as combineHeaders7,
|
|
1817
|
+
createEventSourceResponseHandler as createEventSourceResponseHandler3,
|
|
1818
|
+
createJsonResponseHandler as createJsonResponseHandler6,
|
|
1819
|
+
generateId as generateId2,
|
|
1820
|
+
parseProviderOptions as parseProviderOptions6,
|
|
1821
|
+
postJsonToApi as postJsonToApi6
|
|
1822
|
+
} from "@ai-sdk/provider-utils";
|
|
1823
|
+
import { z as z12 } from "zod";
|
|
1538
1824
|
|
|
1539
1825
|
// src/responses/convert-to-openai-responses-messages.ts
|
|
1540
1826
|
import {
|
|
1541
|
-
UnsupportedFunctionalityError as
|
|
1827
|
+
UnsupportedFunctionalityError as UnsupportedFunctionalityError4
|
|
1542
1828
|
} from "@ai-sdk/provider";
|
|
1543
|
-
import { convertUint8ArrayToBase64 as convertUint8ArrayToBase642 } from "@ai-sdk/provider-utils";
|
|
1544
1829
|
function convertToOpenAIResponsesMessages({
|
|
1545
1830
|
prompt,
|
|
1546
1831
|
systemMessageMode
|
|
@@ -1579,38 +1864,35 @@ function convertToOpenAIResponsesMessages({
|
|
|
1579
1864
|
messages.push({
|
|
1580
1865
|
role: "user",
|
|
1581
1866
|
content: content.map((part, index) => {
|
|
1582
|
-
var _a, _b, _c
|
|
1867
|
+
var _a, _b, _c;
|
|
1583
1868
|
switch (part.type) {
|
|
1584
1869
|
case "text": {
|
|
1585
1870
|
return { type: "input_text", text: part.text };
|
|
1586
1871
|
}
|
|
1587
|
-
case "image": {
|
|
1588
|
-
return {
|
|
1589
|
-
type: "input_image",
|
|
1590
|
-
image_url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase642(part.image)}`,
|
|
1591
|
-
// OpenAI specific extension: image detail
|
|
1592
|
-
detail: (_c = (_b = part.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
|
|
1593
|
-
};
|
|
1594
|
-
}
|
|
1595
1872
|
case "file": {
|
|
1596
|
-
if (part.
|
|
1597
|
-
|
|
1598
|
-
|
|
1599
|
-
|
|
1600
|
-
|
|
1601
|
-
|
|
1602
|
-
|
|
1603
|
-
|
|
1604
|
-
|
|
1605
|
-
|
|
1606
|
-
|
|
1607
|
-
|
|
1608
|
-
}
|
|
1609
|
-
default: {
|
|
1610
|
-
throw new UnsupportedFunctionalityError5({
|
|
1611
|
-
functionality: "Only PDF files are supported in user messages"
|
|
1873
|
+
if (part.mediaType.startsWith("image/")) {
|
|
1874
|
+
const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
|
|
1875
|
+
return {
|
|
1876
|
+
type: "input_image",
|
|
1877
|
+
image_url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
|
|
1878
|
+
// OpenAI specific extension: image detail
|
|
1879
|
+
detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
|
|
1880
|
+
};
|
|
1881
|
+
} else if (part.mediaType === "application/pdf") {
|
|
1882
|
+
if (part.data instanceof URL) {
|
|
1883
|
+
throw new UnsupportedFunctionalityError4({
|
|
1884
|
+
functionality: "PDF file parts with URLs"
|
|
1612
1885
|
});
|
|
1613
1886
|
}
|
|
1887
|
+
return {
|
|
1888
|
+
type: "input_file",
|
|
1889
|
+
filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
|
|
1890
|
+
file_data: `data:application/pdf;base64,${part.data}`
|
|
1891
|
+
};
|
|
1892
|
+
} else {
|
|
1893
|
+
throw new UnsupportedFunctionalityError4({
|
|
1894
|
+
functionality: `file part media type ${part.mediaType}`
|
|
1895
|
+
});
|
|
1614
1896
|
}
|
|
1615
1897
|
}
|
|
1616
1898
|
}
|
|
@@ -1680,7 +1962,7 @@ function mapOpenAIResponseFinishReason({
|
|
|
1680
1962
|
|
|
1681
1963
|
// src/responses/openai-responses-prepare-tools.ts
|
|
1682
1964
|
import {
|
|
1683
|
-
UnsupportedFunctionalityError as
|
|
1965
|
+
UnsupportedFunctionalityError as UnsupportedFunctionalityError5
|
|
1684
1966
|
} from "@ai-sdk/provider";
|
|
1685
1967
|
function prepareResponsesTools({
|
|
1686
1968
|
tools,
|
|
@@ -1740,8 +2022,8 @@ function prepareResponsesTools({
|
|
|
1740
2022
|
};
|
|
1741
2023
|
default: {
|
|
1742
2024
|
const _exhaustiveCheck = type;
|
|
1743
|
-
throw new
|
|
1744
|
-
functionality: `
|
|
2025
|
+
throw new UnsupportedFunctionalityError5({
|
|
2026
|
+
functionality: `tool choice type: ${_exhaustiveCheck}`
|
|
1745
2027
|
});
|
|
1746
2028
|
}
|
|
1747
2029
|
}
|
|
@@ -1751,15 +2033,17 @@ function prepareResponsesTools({
|
|
|
1751
2033
|
var OpenAIResponsesLanguageModel = class {
|
|
1752
2034
|
constructor(modelId, config) {
|
|
1753
2035
|
this.specificationVersion = "v2";
|
|
1754
|
-
this.
|
|
2036
|
+
this.supportedUrls = {
|
|
2037
|
+
"image/*": [/^https?:\/\/.*$/]
|
|
2038
|
+
};
|
|
1755
2039
|
this.modelId = modelId;
|
|
1756
2040
|
this.config = config;
|
|
1757
2041
|
}
|
|
1758
2042
|
get provider() {
|
|
1759
2043
|
return this.config.provider;
|
|
1760
2044
|
}
|
|
1761
|
-
getArgs({
|
|
1762
|
-
|
|
2045
|
+
async getArgs({
|
|
2046
|
+
maxOutputTokens,
|
|
1763
2047
|
temperature,
|
|
1764
2048
|
stopSequences,
|
|
1765
2049
|
topP,
|
|
@@ -1802,7 +2086,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
1802
2086
|
systemMessageMode: modelConfig.systemMessageMode
|
|
1803
2087
|
});
|
|
1804
2088
|
warnings.push(...messageWarnings);
|
|
1805
|
-
const openaiOptions =
|
|
2089
|
+
const openaiOptions = await parseProviderOptions6({
|
|
1806
2090
|
provider: "openai",
|
|
1807
2091
|
providerOptions,
|
|
1808
2092
|
schema: openaiResponsesProviderOptionsSchema
|
|
@@ -1813,7 +2097,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
1813
2097
|
input: messages,
|
|
1814
2098
|
temperature,
|
|
1815
2099
|
top_p: topP,
|
|
1816
|
-
max_output_tokens:
|
|
2100
|
+
max_output_tokens: maxOutputTokens,
|
|
1817
2101
|
...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
|
|
1818
2102
|
text: {
|
|
1819
2103
|
format: responseFormat.schema != null ? {
|
|
@@ -1833,8 +2117,15 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
1833
2117
|
user: openaiOptions == null ? void 0 : openaiOptions.user,
|
|
1834
2118
|
instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
|
|
1835
2119
|
// model-specific settings:
|
|
1836
|
-
...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
|
|
1837
|
-
reasoning: {
|
|
2120
|
+
...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
2121
|
+
reasoning: {
|
|
2122
|
+
...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
|
|
2123
|
+
effort: openaiOptions.reasoningEffort
|
|
2124
|
+
},
|
|
2125
|
+
...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
|
|
2126
|
+
summary: openaiOptions.reasoningSummary
|
|
2127
|
+
}
|
|
2128
|
+
}
|
|
1838
2129
|
},
|
|
1839
2130
|
...modelConfig.requiredAutoTruncation && {
|
|
1840
2131
|
truncation: "auto"
|
|
@@ -1877,133 +2168,153 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
1877
2168
|
};
|
|
1878
2169
|
}
|
|
1879
2170
|
async doGenerate(options) {
|
|
1880
|
-
var _a, _b, _c, _d, _e;
|
|
1881
|
-
const { args: body, warnings } = this.getArgs(options);
|
|
2171
|
+
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
2172
|
+
const { args: body, warnings } = await this.getArgs(options);
|
|
1882
2173
|
const {
|
|
1883
2174
|
responseHeaders,
|
|
1884
2175
|
value: response,
|
|
1885
2176
|
rawValue: rawResponse
|
|
1886
|
-
} = await
|
|
2177
|
+
} = await postJsonToApi6({
|
|
1887
2178
|
url: this.config.url({
|
|
1888
2179
|
path: "/responses",
|
|
1889
2180
|
modelId: this.modelId
|
|
1890
2181
|
}),
|
|
1891
|
-
headers:
|
|
2182
|
+
headers: combineHeaders7(this.config.headers(), options.headers),
|
|
1892
2183
|
body,
|
|
1893
2184
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1894
|
-
successfulResponseHandler:
|
|
1895
|
-
|
|
1896
|
-
id:
|
|
1897
|
-
created_at:
|
|
1898
|
-
model:
|
|
1899
|
-
output:
|
|
1900
|
-
|
|
1901
|
-
|
|
1902
|
-
type:
|
|
1903
|
-
role:
|
|
1904
|
-
content:
|
|
1905
|
-
|
|
1906
|
-
type:
|
|
1907
|
-
text:
|
|
1908
|
-
annotations:
|
|
1909
|
-
|
|
1910
|
-
type:
|
|
1911
|
-
start_index:
|
|
1912
|
-
end_index:
|
|
1913
|
-
url:
|
|
1914
|
-
title:
|
|
2185
|
+
successfulResponseHandler: createJsonResponseHandler6(
|
|
2186
|
+
z12.object({
|
|
2187
|
+
id: z12.string(),
|
|
2188
|
+
created_at: z12.number(),
|
|
2189
|
+
model: z12.string(),
|
|
2190
|
+
output: z12.array(
|
|
2191
|
+
z12.discriminatedUnion("type", [
|
|
2192
|
+
z12.object({
|
|
2193
|
+
type: z12.literal("message"),
|
|
2194
|
+
role: z12.literal("assistant"),
|
|
2195
|
+
content: z12.array(
|
|
2196
|
+
z12.object({
|
|
2197
|
+
type: z12.literal("output_text"),
|
|
2198
|
+
text: z12.string(),
|
|
2199
|
+
annotations: z12.array(
|
|
2200
|
+
z12.object({
|
|
2201
|
+
type: z12.literal("url_citation"),
|
|
2202
|
+
start_index: z12.number(),
|
|
2203
|
+
end_index: z12.number(),
|
|
2204
|
+
url: z12.string(),
|
|
2205
|
+
title: z12.string()
|
|
1915
2206
|
})
|
|
1916
2207
|
)
|
|
1917
2208
|
})
|
|
1918
2209
|
)
|
|
1919
2210
|
}),
|
|
1920
|
-
|
|
1921
|
-
type:
|
|
1922
|
-
call_id:
|
|
1923
|
-
name:
|
|
1924
|
-
arguments:
|
|
2211
|
+
z12.object({
|
|
2212
|
+
type: z12.literal("function_call"),
|
|
2213
|
+
call_id: z12.string(),
|
|
2214
|
+
name: z12.string(),
|
|
2215
|
+
arguments: z12.string()
|
|
1925
2216
|
}),
|
|
1926
|
-
|
|
1927
|
-
type:
|
|
2217
|
+
z12.object({
|
|
2218
|
+
type: z12.literal("web_search_call")
|
|
1928
2219
|
}),
|
|
1929
|
-
|
|
1930
|
-
type:
|
|
2220
|
+
z12.object({
|
|
2221
|
+
type: z12.literal("computer_call")
|
|
1931
2222
|
}),
|
|
1932
|
-
|
|
1933
|
-
type:
|
|
2223
|
+
z12.object({
|
|
2224
|
+
type: z12.literal("reasoning"),
|
|
2225
|
+
summary: z12.array(
|
|
2226
|
+
z12.object({
|
|
2227
|
+
type: z12.literal("summary_text"),
|
|
2228
|
+
text: z12.string()
|
|
2229
|
+
})
|
|
2230
|
+
)
|
|
1934
2231
|
})
|
|
1935
2232
|
])
|
|
1936
2233
|
),
|
|
1937
|
-
incomplete_details:
|
|
1938
|
-
usage:
|
|
2234
|
+
incomplete_details: z12.object({ reason: z12.string() }).nullable(),
|
|
2235
|
+
usage: usageSchema2
|
|
1939
2236
|
})
|
|
1940
2237
|
),
|
|
1941
2238
|
abortSignal: options.abortSignal,
|
|
1942
2239
|
fetch: this.config.fetch
|
|
1943
2240
|
});
|
|
1944
|
-
const
|
|
1945
|
-
const
|
|
1946
|
-
|
|
1947
|
-
|
|
1948
|
-
|
|
1949
|
-
|
|
1950
|
-
|
|
2241
|
+
const content = [];
|
|
2242
|
+
for (const part of response.output) {
|
|
2243
|
+
switch (part.type) {
|
|
2244
|
+
case "reasoning": {
|
|
2245
|
+
content.push({
|
|
2246
|
+
type: "reasoning",
|
|
2247
|
+
text: part.summary.map((summary) => summary.text).join()
|
|
2248
|
+
});
|
|
2249
|
+
break;
|
|
2250
|
+
}
|
|
2251
|
+
case "message": {
|
|
2252
|
+
for (const contentPart of part.content) {
|
|
2253
|
+
content.push({
|
|
2254
|
+
type: "text",
|
|
2255
|
+
text: contentPart.text
|
|
2256
|
+
});
|
|
2257
|
+
for (const annotation of contentPart.annotations) {
|
|
2258
|
+
content.push({
|
|
2259
|
+
type: "source",
|
|
2260
|
+
sourceType: "url",
|
|
2261
|
+
id: (_c = (_b = (_a = this.config).generateId) == null ? void 0 : _b.call(_a)) != null ? _c : generateId2(),
|
|
2262
|
+
url: annotation.url,
|
|
2263
|
+
title: annotation.title
|
|
2264
|
+
});
|
|
2265
|
+
}
|
|
2266
|
+
}
|
|
2267
|
+
break;
|
|
2268
|
+
}
|
|
2269
|
+
case "function_call": {
|
|
2270
|
+
content.push({
|
|
2271
|
+
type: "tool-call",
|
|
2272
|
+
toolCallType: "function",
|
|
2273
|
+
toolCallId: part.call_id,
|
|
2274
|
+
toolName: part.name,
|
|
2275
|
+
args: part.arguments
|
|
2276
|
+
});
|
|
2277
|
+
break;
|
|
2278
|
+
}
|
|
2279
|
+
}
|
|
2280
|
+
}
|
|
1951
2281
|
return {
|
|
1952
|
-
|
|
1953
|
-
sources: outputTextElements.flatMap(
|
|
1954
|
-
(content) => content.annotations.map((annotation) => {
|
|
1955
|
-
var _a2, _b2, _c2;
|
|
1956
|
-
return {
|
|
1957
|
-
sourceType: "url",
|
|
1958
|
-
id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : generateId2(),
|
|
1959
|
-
url: annotation.url,
|
|
1960
|
-
title: annotation.title
|
|
1961
|
-
};
|
|
1962
|
-
})
|
|
1963
|
-
),
|
|
2282
|
+
content,
|
|
1964
2283
|
finishReason: mapOpenAIResponseFinishReason({
|
|
1965
|
-
finishReason: (
|
|
1966
|
-
hasToolCalls:
|
|
2284
|
+
finishReason: (_d = response.incomplete_details) == null ? void 0 : _d.reason,
|
|
2285
|
+
hasToolCalls: content.some((part) => part.type === "tool-call")
|
|
1967
2286
|
}),
|
|
1968
|
-
toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
|
|
1969
2287
|
usage: {
|
|
1970
|
-
|
|
1971
|
-
|
|
1972
|
-
|
|
1973
|
-
|
|
1974
|
-
|
|
1975
|
-
rawSettings: {}
|
|
1976
|
-
},
|
|
1977
|
-
rawResponse: {
|
|
1978
|
-
headers: responseHeaders,
|
|
1979
|
-
body: rawResponse
|
|
1980
|
-
},
|
|
1981
|
-
request: {
|
|
1982
|
-
body: JSON.stringify(body)
|
|
2288
|
+
inputTokens: response.usage.input_tokens,
|
|
2289
|
+
outputTokens: response.usage.output_tokens,
|
|
2290
|
+
totalTokens: response.usage.input_tokens + response.usage.output_tokens,
|
|
2291
|
+
reasoningTokens: (_f = (_e = response.usage.output_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : void 0,
|
|
2292
|
+
cachedInputTokens: (_h = (_g = response.usage.input_tokens_details) == null ? void 0 : _g.cached_tokens) != null ? _h : void 0
|
|
1983
2293
|
},
|
|
2294
|
+
request: { body },
|
|
1984
2295
|
response: {
|
|
1985
2296
|
id: response.id,
|
|
1986
2297
|
timestamp: new Date(response.created_at * 1e3),
|
|
1987
|
-
modelId: response.model
|
|
2298
|
+
modelId: response.model,
|
|
2299
|
+
headers: responseHeaders,
|
|
2300
|
+
body: rawResponse
|
|
1988
2301
|
},
|
|
1989
2302
|
providerMetadata: {
|
|
1990
2303
|
openai: {
|
|
1991
|
-
responseId: response.id
|
|
1992
|
-
cachedPromptTokens: (_c = (_b = response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : null,
|
|
1993
|
-
reasoningTokens: (_e = (_d = response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : null
|
|
2304
|
+
responseId: response.id
|
|
1994
2305
|
}
|
|
1995
2306
|
},
|
|
1996
2307
|
warnings
|
|
1997
2308
|
};
|
|
1998
2309
|
}
|
|
1999
2310
|
async doStream(options) {
|
|
2000
|
-
const { args: body, warnings } = this.getArgs(options);
|
|
2001
|
-
const { responseHeaders, value: response } = await
|
|
2311
|
+
const { args: body, warnings } = await this.getArgs(options);
|
|
2312
|
+
const { responseHeaders, value: response } = await postJsonToApi6({
|
|
2002
2313
|
url: this.config.url({
|
|
2003
2314
|
path: "/responses",
|
|
2004
2315
|
modelId: this.modelId
|
|
2005
2316
|
}),
|
|
2006
|
-
headers:
|
|
2317
|
+
headers: combineHeaders7(this.config.headers(), options.headers),
|
|
2007
2318
|
body: {
|
|
2008
2319
|
...body,
|
|
2009
2320
|
stream: true
|
|
@@ -2017,16 +2328,20 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2017
2328
|
});
|
|
2018
2329
|
const self = this;
|
|
2019
2330
|
let finishReason = "unknown";
|
|
2020
|
-
|
|
2021
|
-
|
|
2022
|
-
|
|
2023
|
-
|
|
2331
|
+
const usage = {
|
|
2332
|
+
inputTokens: void 0,
|
|
2333
|
+
outputTokens: void 0,
|
|
2334
|
+
totalTokens: void 0
|
|
2335
|
+
};
|
|
2024
2336
|
let responseId = null;
|
|
2025
2337
|
const ongoingToolCalls = {};
|
|
2026
2338
|
let hasToolCalls = false;
|
|
2027
2339
|
return {
|
|
2028
2340
|
stream: response.pipeThrough(
|
|
2029
2341
|
new TransformStream({
|
|
2342
|
+
start(controller) {
|
|
2343
|
+
controller.enqueue({ type: "stream-start", warnings });
|
|
2344
|
+
},
|
|
2030
2345
|
transform(chunk, controller) {
|
|
2031
2346
|
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
2032
2347
|
if (!chunk.success) {
|
|
@@ -2070,8 +2385,13 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2070
2385
|
});
|
|
2071
2386
|
} else if (isTextDeltaChunk(value)) {
|
|
2072
2387
|
controller.enqueue({
|
|
2073
|
-
type: "text
|
|
2074
|
-
|
|
2388
|
+
type: "text",
|
|
2389
|
+
text: value.delta
|
|
2390
|
+
});
|
|
2391
|
+
} else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
|
|
2392
|
+
controller.enqueue({
|
|
2393
|
+
type: "reasoning",
|
|
2394
|
+
text: value.delta
|
|
2075
2395
|
});
|
|
2076
2396
|
} else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
|
|
2077
2397
|
ongoingToolCalls[value.output_index] = void 0;
|
|
@@ -2088,19 +2408,18 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2088
2408
|
finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
|
|
2089
2409
|
hasToolCalls
|
|
2090
2410
|
});
|
|
2091
|
-
|
|
2092
|
-
|
|
2093
|
-
|
|
2094
|
-
reasoningTokens = (
|
|
2411
|
+
usage.inputTokens = value.response.usage.input_tokens;
|
|
2412
|
+
usage.outputTokens = value.response.usage.output_tokens;
|
|
2413
|
+
usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
|
|
2414
|
+
usage.reasoningTokens = (_c = (_b = value.response.usage.output_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : void 0;
|
|
2415
|
+
usage.cachedInputTokens = (_e = (_d = value.response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : void 0;
|
|
2095
2416
|
} else if (isResponseAnnotationAddedChunk(value)) {
|
|
2096
2417
|
controller.enqueue({
|
|
2097
2418
|
type: "source",
|
|
2098
|
-
|
|
2099
|
-
|
|
2100
|
-
|
|
2101
|
-
|
|
2102
|
-
title: value.annotation.title
|
|
2103
|
-
}
|
|
2419
|
+
sourceType: "url",
|
|
2420
|
+
id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : generateId2(),
|
|
2421
|
+
url: value.annotation.url,
|
|
2422
|
+
title: value.annotation.title
|
|
2104
2423
|
});
|
|
2105
2424
|
}
|
|
2106
2425
|
},
|
|
@@ -2108,103 +2427,101 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2108
2427
|
controller.enqueue({
|
|
2109
2428
|
type: "finish",
|
|
2110
2429
|
finishReason,
|
|
2111
|
-
usage
|
|
2112
|
-
|
|
2113
|
-
|
|
2114
|
-
|
|
2115
|
-
responseId,
|
|
2116
|
-
cachedPromptTokens,
|
|
2117
|
-
reasoningTokens
|
|
2118
|
-
}
|
|
2430
|
+
usage,
|
|
2431
|
+
providerMetadata: {
|
|
2432
|
+
openai: {
|
|
2433
|
+
responseId
|
|
2119
2434
|
}
|
|
2120
2435
|
}
|
|
2121
2436
|
});
|
|
2122
2437
|
}
|
|
2123
2438
|
})
|
|
2124
2439
|
),
|
|
2125
|
-
|
|
2126
|
-
|
|
2127
|
-
rawSettings: {}
|
|
2128
|
-
},
|
|
2129
|
-
rawResponse: { headers: responseHeaders },
|
|
2130
|
-
request: { body: JSON.stringify(body) },
|
|
2131
|
-
warnings
|
|
2440
|
+
request: { body },
|
|
2441
|
+
response: { headers: responseHeaders }
|
|
2132
2442
|
};
|
|
2133
2443
|
}
|
|
2134
2444
|
};
|
|
2135
|
-
var
|
|
2136
|
-
input_tokens:
|
|
2137
|
-
input_tokens_details:
|
|
2138
|
-
output_tokens:
|
|
2139
|
-
output_tokens_details:
|
|
2445
|
+
var usageSchema2 = z12.object({
|
|
2446
|
+
input_tokens: z12.number(),
|
|
2447
|
+
input_tokens_details: z12.object({ cached_tokens: z12.number().nullish() }).nullish(),
|
|
2448
|
+
output_tokens: z12.number(),
|
|
2449
|
+
output_tokens_details: z12.object({ reasoning_tokens: z12.number().nullish() }).nullish()
|
|
2140
2450
|
});
|
|
2141
|
-
var textDeltaChunkSchema =
|
|
2142
|
-
type:
|
|
2143
|
-
delta:
|
|
2451
|
+
var textDeltaChunkSchema = z12.object({
|
|
2452
|
+
type: z12.literal("response.output_text.delta"),
|
|
2453
|
+
delta: z12.string()
|
|
2144
2454
|
});
|
|
2145
|
-
var responseFinishedChunkSchema =
|
|
2146
|
-
type:
|
|
2147
|
-
response:
|
|
2148
|
-
incomplete_details:
|
|
2149
|
-
usage:
|
|
2455
|
+
var responseFinishedChunkSchema = z12.object({
|
|
2456
|
+
type: z12.enum(["response.completed", "response.incomplete"]),
|
|
2457
|
+
response: z12.object({
|
|
2458
|
+
incomplete_details: z12.object({ reason: z12.string() }).nullish(),
|
|
2459
|
+
usage: usageSchema2
|
|
2150
2460
|
})
|
|
2151
2461
|
});
|
|
2152
|
-
var responseCreatedChunkSchema =
|
|
2153
|
-
type:
|
|
2154
|
-
response:
|
|
2155
|
-
id:
|
|
2156
|
-
created_at:
|
|
2157
|
-
model:
|
|
2462
|
+
var responseCreatedChunkSchema = z12.object({
|
|
2463
|
+
type: z12.literal("response.created"),
|
|
2464
|
+
response: z12.object({
|
|
2465
|
+
id: z12.string(),
|
|
2466
|
+
created_at: z12.number(),
|
|
2467
|
+
model: z12.string()
|
|
2158
2468
|
})
|
|
2159
2469
|
});
|
|
2160
|
-
var responseOutputItemDoneSchema =
|
|
2161
|
-
type:
|
|
2162
|
-
output_index:
|
|
2163
|
-
item:
|
|
2164
|
-
|
|
2165
|
-
type:
|
|
2470
|
+
var responseOutputItemDoneSchema = z12.object({
|
|
2471
|
+
type: z12.literal("response.output_item.done"),
|
|
2472
|
+
output_index: z12.number(),
|
|
2473
|
+
item: z12.discriminatedUnion("type", [
|
|
2474
|
+
z12.object({
|
|
2475
|
+
type: z12.literal("message")
|
|
2166
2476
|
}),
|
|
2167
|
-
|
|
2168
|
-
type:
|
|
2169
|
-
id:
|
|
2170
|
-
call_id:
|
|
2171
|
-
name:
|
|
2172
|
-
arguments:
|
|
2173
|
-
status:
|
|
2477
|
+
z12.object({
|
|
2478
|
+
type: z12.literal("function_call"),
|
|
2479
|
+
id: z12.string(),
|
|
2480
|
+
call_id: z12.string(),
|
|
2481
|
+
name: z12.string(),
|
|
2482
|
+
arguments: z12.string(),
|
|
2483
|
+
status: z12.literal("completed")
|
|
2174
2484
|
})
|
|
2175
2485
|
])
|
|
2176
2486
|
});
|
|
2177
|
-
var responseFunctionCallArgumentsDeltaSchema =
|
|
2178
|
-
type:
|
|
2179
|
-
item_id:
|
|
2180
|
-
output_index:
|
|
2181
|
-
delta:
|
|
2487
|
+
var responseFunctionCallArgumentsDeltaSchema = z12.object({
|
|
2488
|
+
type: z12.literal("response.function_call_arguments.delta"),
|
|
2489
|
+
item_id: z12.string(),
|
|
2490
|
+
output_index: z12.number(),
|
|
2491
|
+
delta: z12.string()
|
|
2182
2492
|
});
|
|
2183
|
-
var responseOutputItemAddedSchema =
|
|
2184
|
-
type:
|
|
2185
|
-
output_index:
|
|
2186
|
-
item:
|
|
2187
|
-
|
|
2188
|
-
type:
|
|
2493
|
+
var responseOutputItemAddedSchema = z12.object({
|
|
2494
|
+
type: z12.literal("response.output_item.added"),
|
|
2495
|
+
output_index: z12.number(),
|
|
2496
|
+
item: z12.discriminatedUnion("type", [
|
|
2497
|
+
z12.object({
|
|
2498
|
+
type: z12.literal("message")
|
|
2189
2499
|
}),
|
|
2190
|
-
|
|
2191
|
-
type:
|
|
2192
|
-
id:
|
|
2193
|
-
call_id:
|
|
2194
|
-
name:
|
|
2195
|
-
arguments:
|
|
2500
|
+
z12.object({
|
|
2501
|
+
type: z12.literal("function_call"),
|
|
2502
|
+
id: z12.string(),
|
|
2503
|
+
call_id: z12.string(),
|
|
2504
|
+
name: z12.string(),
|
|
2505
|
+
arguments: z12.string()
|
|
2196
2506
|
})
|
|
2197
2507
|
])
|
|
2198
2508
|
});
|
|
2199
|
-
var responseAnnotationAddedSchema =
|
|
2200
|
-
type:
|
|
2201
|
-
annotation:
|
|
2202
|
-
type:
|
|
2203
|
-
url:
|
|
2204
|
-
title:
|
|
2509
|
+
var responseAnnotationAddedSchema = z12.object({
|
|
2510
|
+
type: z12.literal("response.output_text.annotation.added"),
|
|
2511
|
+
annotation: z12.object({
|
|
2512
|
+
type: z12.literal("url_citation"),
|
|
2513
|
+
url: z12.string(),
|
|
2514
|
+
title: z12.string()
|
|
2205
2515
|
})
|
|
2206
2516
|
});
|
|
2207
|
-
var
|
|
2517
|
+
var responseReasoningSummaryTextDeltaSchema = z12.object({
|
|
2518
|
+
type: z12.literal("response.reasoning_summary_text.delta"),
|
|
2519
|
+
item_id: z12.string(),
|
|
2520
|
+
output_index: z12.number(),
|
|
2521
|
+
summary_index: z12.number(),
|
|
2522
|
+
delta: z12.string()
|
|
2523
|
+
});
|
|
2524
|
+
var openaiResponsesChunkSchema = z12.union([
|
|
2208
2525
|
textDeltaChunkSchema,
|
|
2209
2526
|
responseFinishedChunkSchema,
|
|
2210
2527
|
responseCreatedChunkSchema,
|
|
@@ -2212,7 +2529,8 @@ var openaiResponsesChunkSchema = z6.union([
|
|
|
2212
2529
|
responseFunctionCallArgumentsDeltaSchema,
|
|
2213
2530
|
responseOutputItemAddedSchema,
|
|
2214
2531
|
responseAnnotationAddedSchema,
|
|
2215
|
-
|
|
2532
|
+
responseReasoningSummaryTextDeltaSchema,
|
|
2533
|
+
z12.object({ type: z12.string() }).passthrough()
|
|
2216
2534
|
// fallback for unknown chunks
|
|
2217
2535
|
]);
|
|
2218
2536
|
function isTextDeltaChunk(chunk) {
|
|
@@ -2236,6 +2554,9 @@ function isResponseOutputItemAddedChunk(chunk) {
|
|
|
2236
2554
|
function isResponseAnnotationAddedChunk(chunk) {
|
|
2237
2555
|
return chunk.type === "response.output_text.annotation.added";
|
|
2238
2556
|
}
|
|
2557
|
+
function isResponseReasoningSummaryTextDeltaChunk(chunk) {
|
|
2558
|
+
return chunk.type === "response.reasoning_summary_text.delta";
|
|
2559
|
+
}
|
|
2239
2560
|
function getResponsesModelConfig(modelId) {
|
|
2240
2561
|
if (modelId.startsWith("o")) {
|
|
2241
2562
|
if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
|
|
@@ -2257,15 +2578,16 @@ function getResponsesModelConfig(modelId) {
|
|
|
2257
2578
|
requiredAutoTruncation: false
|
|
2258
2579
|
};
|
|
2259
2580
|
}
|
|
2260
|
-
var openaiResponsesProviderOptionsSchema =
|
|
2261
|
-
metadata:
|
|
2262
|
-
parallelToolCalls:
|
|
2263
|
-
previousResponseId:
|
|
2264
|
-
store:
|
|
2265
|
-
user:
|
|
2266
|
-
reasoningEffort:
|
|
2267
|
-
strictSchemas:
|
|
2268
|
-
instructions:
|
|
2581
|
+
var openaiResponsesProviderOptionsSchema = z12.object({
|
|
2582
|
+
metadata: z12.any().nullish(),
|
|
2583
|
+
parallelToolCalls: z12.boolean().nullish(),
|
|
2584
|
+
previousResponseId: z12.string().nullish(),
|
|
2585
|
+
store: z12.boolean().nullish(),
|
|
2586
|
+
user: z12.string().nullish(),
|
|
2587
|
+
reasoningEffort: z12.string().nullish(),
|
|
2588
|
+
strictSchemas: z12.boolean().nullish(),
|
|
2589
|
+
instructions: z12.string().nullish(),
|
|
2590
|
+
reasoningSummary: z12.string().nullish()
|
|
2269
2591
|
});
|
|
2270
2592
|
export {
|
|
2271
2593
|
OpenAIChatLanguageModel,
|
|
@@ -2273,6 +2595,13 @@ export {
|
|
|
2273
2595
|
OpenAIEmbeddingModel,
|
|
2274
2596
|
OpenAIImageModel,
|
|
2275
2597
|
OpenAIResponsesLanguageModel,
|
|
2276
|
-
|
|
2598
|
+
OpenAISpeechModel,
|
|
2599
|
+
OpenAITranscriptionModel,
|
|
2600
|
+
hasDefaultResponseFormat,
|
|
2601
|
+
modelMaxImagesPerCall,
|
|
2602
|
+
openAITranscriptionProviderOptions,
|
|
2603
|
+
openaiCompletionProviderOptions,
|
|
2604
|
+
openaiEmbeddingProviderOptions,
|
|
2605
|
+
openaiProviderOptions
|
|
2277
2606
|
};
|
|
2278
2607
|
//# sourceMappingURL=index.mjs.map
|