@ai-sdk/openai 1.3.22 → 2.0.0-alpha.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +239 -45
- package/dist/index.d.mts +31 -183
- package/dist/index.d.ts +31 -183
- package/dist/index.js +947 -1114
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +942 -1109
- package/dist/index.mjs.map +1 -1
- package/{internal/dist → dist/internal}/index.d.mts +157 -166
- package/{internal/dist → dist/internal}/index.d.ts +157 -166
- package/{internal/dist → dist/internal}/index.js +917 -1067
- package/dist/internal/index.js.map +1 -0
- package/{internal/dist → dist/internal}/index.mjs +914 -1068
- package/dist/internal/index.mjs.map +1 -0
- package/internal.d.ts +1 -0
- package/package.json +19 -18
- package/internal/dist/index.js.map +0 -1
- package/internal/dist/index.mjs.map +0 -1
package/dist/index.js
CHANGED
|
@@ -26,19 +26,18 @@ __export(src_exports, {
|
|
|
26
26
|
module.exports = __toCommonJS(src_exports);
|
|
27
27
|
|
|
28
28
|
// src/openai-provider.ts
|
|
29
|
-
var
|
|
29
|
+
var import_provider_utils10 = require("@ai-sdk/provider-utils");
|
|
30
30
|
|
|
31
31
|
// src/openai-chat-language-model.ts
|
|
32
32
|
var import_provider3 = require("@ai-sdk/provider");
|
|
33
33
|
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
|
34
|
-
var
|
|
34
|
+
var import_zod3 = require("zod");
|
|
35
35
|
|
|
36
36
|
// src/convert-to-openai-chat-messages.ts
|
|
37
37
|
var import_provider = require("@ai-sdk/provider");
|
|
38
38
|
var import_provider_utils = require("@ai-sdk/provider-utils");
|
|
39
39
|
function convertToOpenAIChatMessages({
|
|
40
40
|
prompt,
|
|
41
|
-
useLegacyFunctionCalling = false,
|
|
42
41
|
systemMessageMode = "system"
|
|
43
42
|
}) {
|
|
44
43
|
const messages = [];
|
|
@@ -79,55 +78,71 @@ function convertToOpenAIChatMessages({
|
|
|
79
78
|
messages.push({
|
|
80
79
|
role: "user",
|
|
81
80
|
content: content.map((part, index) => {
|
|
82
|
-
var _a, _b, _c
|
|
81
|
+
var _a, _b, _c;
|
|
83
82
|
switch (part.type) {
|
|
84
83
|
case "text": {
|
|
85
84
|
return { type: "text", text: part.text };
|
|
86
85
|
}
|
|
87
|
-
case "image": {
|
|
88
|
-
return {
|
|
89
|
-
type: "image_url",
|
|
90
|
-
image_url: {
|
|
91
|
-
url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${(0, import_provider_utils.convertUint8ArrayToBase64)(part.image)}`,
|
|
92
|
-
// OpenAI specific extension: image detail
|
|
93
|
-
detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
|
|
94
|
-
}
|
|
95
|
-
};
|
|
96
|
-
}
|
|
97
86
|
case "file": {
|
|
98
|
-
if (part.
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
type: "input_audio",
|
|
114
|
-
input_audio: { data: part.data, format: "mp3" }
|
|
115
|
-
};
|
|
87
|
+
if (part.mediaType.startsWith("image/")) {
|
|
88
|
+
const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
|
|
89
|
+
return {
|
|
90
|
+
type: "image_url",
|
|
91
|
+
image_url: {
|
|
92
|
+
url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${(0, import_provider_utils.convertToBase64)(part.data)}`,
|
|
93
|
+
// OpenAI specific extension: image detail
|
|
94
|
+
detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
|
|
95
|
+
}
|
|
96
|
+
};
|
|
97
|
+
} else if (part.mediaType.startsWith("audio/")) {
|
|
98
|
+
if (part.data instanceof URL) {
|
|
99
|
+
throw new import_provider.UnsupportedFunctionalityError({
|
|
100
|
+
functionality: "audio file parts with URLs"
|
|
101
|
+
});
|
|
116
102
|
}
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
103
|
+
switch (part.mediaType) {
|
|
104
|
+
case "audio/wav": {
|
|
105
|
+
return {
|
|
106
|
+
type: "input_audio",
|
|
107
|
+
input_audio: {
|
|
108
|
+
data: (0, import_provider_utils.convertToBase64)(part.data),
|
|
109
|
+
format: "wav"
|
|
110
|
+
}
|
|
111
|
+
};
|
|
112
|
+
}
|
|
113
|
+
case "audio/mp3":
|
|
114
|
+
case "audio/mpeg": {
|
|
115
|
+
return {
|
|
116
|
+
type: "input_audio",
|
|
117
|
+
input_audio: {
|
|
118
|
+
data: (0, import_provider_utils.convertToBase64)(part.data),
|
|
119
|
+
format: "mp3"
|
|
120
|
+
}
|
|
121
|
+
};
|
|
122
|
+
}
|
|
123
|
+
default: {
|
|
124
|
+
throw new import_provider.UnsupportedFunctionalityError({
|
|
125
|
+
functionality: `audio content parts with media type ${part.mediaType}`
|
|
126
|
+
});
|
|
127
|
+
}
|
|
125
128
|
}
|
|
126
|
-
|
|
129
|
+
} else if (part.mediaType === "application/pdf") {
|
|
130
|
+
if (part.data instanceof URL) {
|
|
127
131
|
throw new import_provider.UnsupportedFunctionalityError({
|
|
128
|
-
functionality:
|
|
132
|
+
functionality: "PDF file parts with URLs"
|
|
129
133
|
});
|
|
130
134
|
}
|
|
135
|
+
return {
|
|
136
|
+
type: "file",
|
|
137
|
+
file: {
|
|
138
|
+
filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
|
|
139
|
+
file_data: `data:application/pdf;base64,${part.data}`
|
|
140
|
+
}
|
|
141
|
+
};
|
|
142
|
+
} else {
|
|
143
|
+
throw new import_provider.UnsupportedFunctionalityError({
|
|
144
|
+
functionality: `file part media type ${part.mediaType}`
|
|
145
|
+
});
|
|
131
146
|
}
|
|
132
147
|
}
|
|
133
148
|
}
|
|
@@ -157,41 +172,20 @@ function convertToOpenAIChatMessages({
|
|
|
157
172
|
}
|
|
158
173
|
}
|
|
159
174
|
}
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
}
|
|
166
|
-
messages.push({
|
|
167
|
-
role: "assistant",
|
|
168
|
-
content: text,
|
|
169
|
-
function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
|
|
170
|
-
});
|
|
171
|
-
} else {
|
|
172
|
-
messages.push({
|
|
173
|
-
role: "assistant",
|
|
174
|
-
content: text,
|
|
175
|
-
tool_calls: toolCalls.length > 0 ? toolCalls : void 0
|
|
176
|
-
});
|
|
177
|
-
}
|
|
175
|
+
messages.push({
|
|
176
|
+
role: "assistant",
|
|
177
|
+
content: text,
|
|
178
|
+
tool_calls: toolCalls.length > 0 ? toolCalls : void 0
|
|
179
|
+
});
|
|
178
180
|
break;
|
|
179
181
|
}
|
|
180
182
|
case "tool": {
|
|
181
183
|
for (const toolResponse of content) {
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
});
|
|
188
|
-
} else {
|
|
189
|
-
messages.push({
|
|
190
|
-
role: "tool",
|
|
191
|
-
tool_call_id: toolResponse.toolCallId,
|
|
192
|
-
content: JSON.stringify(toolResponse.result)
|
|
193
|
-
});
|
|
194
|
-
}
|
|
184
|
+
messages.push({
|
|
185
|
+
role: "tool",
|
|
186
|
+
tool_call_id: toolResponse.toolCallId,
|
|
187
|
+
content: JSON.stringify(toolResponse.result)
|
|
188
|
+
});
|
|
195
189
|
}
|
|
196
190
|
break;
|
|
197
191
|
}
|
|
@@ -204,17 +198,17 @@ function convertToOpenAIChatMessages({
|
|
|
204
198
|
return { messages, warnings };
|
|
205
199
|
}
|
|
206
200
|
|
|
207
|
-
// src/
|
|
208
|
-
function
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
}
|
|
201
|
+
// src/get-response-metadata.ts
|
|
202
|
+
function getResponseMetadata({
|
|
203
|
+
id,
|
|
204
|
+
model,
|
|
205
|
+
created
|
|
206
|
+
}) {
|
|
207
|
+
return {
|
|
208
|
+
id: id != null ? id : void 0,
|
|
209
|
+
modelId: model != null ? model : void 0,
|
|
210
|
+
timestamp: created != null ? new Date(created * 1e3) : void 0
|
|
211
|
+
};
|
|
218
212
|
}
|
|
219
213
|
|
|
220
214
|
// src/map-openai-finish-reason.ts
|
|
@@ -234,18 +228,75 @@ function mapOpenAIFinishReason(finishReason) {
|
|
|
234
228
|
}
|
|
235
229
|
}
|
|
236
230
|
|
|
237
|
-
// src/openai-
|
|
231
|
+
// src/openai-chat-options.ts
|
|
238
232
|
var import_zod = require("zod");
|
|
233
|
+
var openaiProviderOptions = import_zod.z.object({
|
|
234
|
+
/**
|
|
235
|
+
* Modify the likelihood of specified tokens appearing in the completion.
|
|
236
|
+
*
|
|
237
|
+
* Accepts a JSON object that maps tokens (specified by their token ID in
|
|
238
|
+
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
239
|
+
*/
|
|
240
|
+
logitBias: import_zod.z.record(import_zod.z.coerce.number(), import_zod.z.number()).optional(),
|
|
241
|
+
/**
|
|
242
|
+
* Return the log probabilities of the tokens.
|
|
243
|
+
*
|
|
244
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
245
|
+
* were generated.
|
|
246
|
+
*
|
|
247
|
+
* Setting to a number will return the log probabilities of the top n
|
|
248
|
+
* tokens that were generated.
|
|
249
|
+
*/
|
|
250
|
+
logprobs: import_zod.z.union([import_zod.z.boolean(), import_zod.z.number()]).optional(),
|
|
251
|
+
/**
|
|
252
|
+
* Whether to enable parallel function calling during tool use. Default to true.
|
|
253
|
+
*/
|
|
254
|
+
parallelToolCalls: import_zod.z.boolean().optional(),
|
|
255
|
+
/**
|
|
256
|
+
* A unique identifier representing your end-user, which can help OpenAI to
|
|
257
|
+
* monitor and detect abuse.
|
|
258
|
+
*/
|
|
259
|
+
user: import_zod.z.string().optional(),
|
|
260
|
+
/**
|
|
261
|
+
* Reasoning effort for reasoning models. Defaults to `medium`.
|
|
262
|
+
*/
|
|
263
|
+
reasoningEffort: import_zod.z.enum(["low", "medium", "high"]).optional(),
|
|
264
|
+
/**
|
|
265
|
+
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
266
|
+
*/
|
|
267
|
+
maxCompletionTokens: import_zod.z.number().optional(),
|
|
268
|
+
/**
|
|
269
|
+
* Whether to enable persistence in responses API.
|
|
270
|
+
*/
|
|
271
|
+
store: import_zod.z.boolean().optional(),
|
|
272
|
+
/**
|
|
273
|
+
* Metadata to associate with the request.
|
|
274
|
+
*/
|
|
275
|
+
metadata: import_zod.z.record(import_zod.z.string()).optional(),
|
|
276
|
+
/**
|
|
277
|
+
* Parameters for prediction mode.
|
|
278
|
+
*/
|
|
279
|
+
prediction: import_zod.z.record(import_zod.z.any()).optional(),
|
|
280
|
+
/**
|
|
281
|
+
* Whether to use structured outputs.
|
|
282
|
+
*
|
|
283
|
+
* @default true
|
|
284
|
+
*/
|
|
285
|
+
structuredOutputs: import_zod.z.boolean().optional()
|
|
286
|
+
});
|
|
287
|
+
|
|
288
|
+
// src/openai-error.ts
|
|
289
|
+
var import_zod2 = require("zod");
|
|
239
290
|
var import_provider_utils2 = require("@ai-sdk/provider-utils");
|
|
240
|
-
var openaiErrorDataSchema =
|
|
241
|
-
error:
|
|
242
|
-
message:
|
|
291
|
+
var openaiErrorDataSchema = import_zod2.z.object({
|
|
292
|
+
error: import_zod2.z.object({
|
|
293
|
+
message: import_zod2.z.string(),
|
|
243
294
|
// The additional information below is handled loosely to support
|
|
244
295
|
// OpenAI-compatible providers that have slightly different error
|
|
245
296
|
// responses:
|
|
246
|
-
type:
|
|
247
|
-
param:
|
|
248
|
-
code:
|
|
297
|
+
type: import_zod2.z.string().nullish(),
|
|
298
|
+
param: import_zod2.z.any().nullish(),
|
|
299
|
+
code: import_zod2.z.union([import_zod2.z.string(), import_zod2.z.number()]).nullish()
|
|
249
300
|
})
|
|
250
301
|
});
|
|
251
302
|
var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
|
|
@@ -253,74 +304,17 @@ var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResp
|
|
|
253
304
|
errorToMessage: (data) => data.error.message
|
|
254
305
|
});
|
|
255
306
|
|
|
256
|
-
// src/get-response-metadata.ts
|
|
257
|
-
function getResponseMetadata({
|
|
258
|
-
id,
|
|
259
|
-
model,
|
|
260
|
-
created
|
|
261
|
-
}) {
|
|
262
|
-
return {
|
|
263
|
-
id: id != null ? id : void 0,
|
|
264
|
-
modelId: model != null ? model : void 0,
|
|
265
|
-
timestamp: created != null ? new Date(created * 1e3) : void 0
|
|
266
|
-
};
|
|
267
|
-
}
|
|
268
|
-
|
|
269
307
|
// src/openai-prepare-tools.ts
|
|
270
308
|
var import_provider2 = require("@ai-sdk/provider");
|
|
271
309
|
function prepareTools({
|
|
272
|
-
|
|
273
|
-
|
|
310
|
+
tools,
|
|
311
|
+
toolChoice,
|
|
274
312
|
structuredOutputs
|
|
275
313
|
}) {
|
|
276
|
-
|
|
277
|
-
const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
|
|
314
|
+
tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
|
|
278
315
|
const toolWarnings = [];
|
|
279
316
|
if (tools == null) {
|
|
280
|
-
return { tools: void 0,
|
|
281
|
-
}
|
|
282
|
-
const toolChoice = mode.toolChoice;
|
|
283
|
-
if (useLegacyFunctionCalling) {
|
|
284
|
-
const openaiFunctions = [];
|
|
285
|
-
for (const tool of tools) {
|
|
286
|
-
if (tool.type === "provider-defined") {
|
|
287
|
-
toolWarnings.push({ type: "unsupported-tool", tool });
|
|
288
|
-
} else {
|
|
289
|
-
openaiFunctions.push({
|
|
290
|
-
name: tool.name,
|
|
291
|
-
description: tool.description,
|
|
292
|
-
parameters: tool.parameters
|
|
293
|
-
});
|
|
294
|
-
}
|
|
295
|
-
}
|
|
296
|
-
if (toolChoice == null) {
|
|
297
|
-
return {
|
|
298
|
-
functions: openaiFunctions,
|
|
299
|
-
function_call: void 0,
|
|
300
|
-
toolWarnings
|
|
301
|
-
};
|
|
302
|
-
}
|
|
303
|
-
const type2 = toolChoice.type;
|
|
304
|
-
switch (type2) {
|
|
305
|
-
case "auto":
|
|
306
|
-
case "none":
|
|
307
|
-
case void 0:
|
|
308
|
-
return {
|
|
309
|
-
functions: openaiFunctions,
|
|
310
|
-
function_call: void 0,
|
|
311
|
-
toolWarnings
|
|
312
|
-
};
|
|
313
|
-
case "required":
|
|
314
|
-
throw new import_provider2.UnsupportedFunctionalityError({
|
|
315
|
-
functionality: "useLegacyFunctionCalling and toolChoice: required"
|
|
316
|
-
});
|
|
317
|
-
default:
|
|
318
|
-
return {
|
|
319
|
-
functions: openaiFunctions,
|
|
320
|
-
function_call: { name: toolChoice.toolName },
|
|
321
|
-
toolWarnings
|
|
322
|
-
};
|
|
323
|
-
}
|
|
317
|
+
return { tools: void 0, toolChoice: void 0, toolWarnings };
|
|
324
318
|
}
|
|
325
319
|
const openaiTools2 = [];
|
|
326
320
|
for (const tool of tools) {
|
|
@@ -339,18 +333,18 @@ function prepareTools({
|
|
|
339
333
|
}
|
|
340
334
|
}
|
|
341
335
|
if (toolChoice == null) {
|
|
342
|
-
return { tools: openaiTools2,
|
|
336
|
+
return { tools: openaiTools2, toolChoice: void 0, toolWarnings };
|
|
343
337
|
}
|
|
344
338
|
const type = toolChoice.type;
|
|
345
339
|
switch (type) {
|
|
346
340
|
case "auto":
|
|
347
341
|
case "none":
|
|
348
342
|
case "required":
|
|
349
|
-
return { tools: openaiTools2,
|
|
343
|
+
return { tools: openaiTools2, toolChoice: type, toolWarnings };
|
|
350
344
|
case "tool":
|
|
351
345
|
return {
|
|
352
346
|
tools: openaiTools2,
|
|
353
|
-
|
|
347
|
+
toolChoice: {
|
|
354
348
|
type: "function",
|
|
355
349
|
function: {
|
|
356
350
|
name: toolChoice.toolName
|
|
@@ -361,7 +355,7 @@ function prepareTools({
|
|
|
361
355
|
default: {
|
|
362
356
|
const _exhaustiveCheck = type;
|
|
363
357
|
throw new import_provider2.UnsupportedFunctionalityError({
|
|
364
|
-
functionality: `
|
|
358
|
+
functionality: `tool choice type: ${_exhaustiveCheck}`
|
|
365
359
|
});
|
|
366
360
|
}
|
|
367
361
|
}
|
|
@@ -369,32 +363,20 @@ function prepareTools({
|
|
|
369
363
|
|
|
370
364
|
// src/openai-chat-language-model.ts
|
|
371
365
|
var OpenAIChatLanguageModel = class {
|
|
372
|
-
constructor(modelId,
|
|
373
|
-
this.specificationVersion = "
|
|
366
|
+
constructor(modelId, config) {
|
|
367
|
+
this.specificationVersion = "v2";
|
|
368
|
+
this.supportedUrls = {
|
|
369
|
+
"image/*": [/^https?:\/\/.*$/]
|
|
370
|
+
};
|
|
374
371
|
this.modelId = modelId;
|
|
375
|
-
this.settings = settings;
|
|
376
372
|
this.config = config;
|
|
377
373
|
}
|
|
378
|
-
get supportsStructuredOutputs() {
|
|
379
|
-
var _a;
|
|
380
|
-
return (_a = this.settings.structuredOutputs) != null ? _a : isReasoningModel(this.modelId);
|
|
381
|
-
}
|
|
382
|
-
get defaultObjectGenerationMode() {
|
|
383
|
-
if (isAudioModel(this.modelId)) {
|
|
384
|
-
return "tool";
|
|
385
|
-
}
|
|
386
|
-
return this.supportsStructuredOutputs ? "json" : "tool";
|
|
387
|
-
}
|
|
388
374
|
get provider() {
|
|
389
375
|
return this.config.provider;
|
|
390
376
|
}
|
|
391
|
-
|
|
392
|
-
return !this.settings.downloadImages;
|
|
393
|
-
}
|
|
394
|
-
getArgs({
|
|
395
|
-
mode,
|
|
377
|
+
async getArgs({
|
|
396
378
|
prompt,
|
|
397
|
-
|
|
379
|
+
maxOutputTokens,
|
|
398
380
|
temperature,
|
|
399
381
|
topP,
|
|
400
382
|
topK,
|
|
@@ -403,39 +385,34 @@ var OpenAIChatLanguageModel = class {
|
|
|
403
385
|
stopSequences,
|
|
404
386
|
responseFormat,
|
|
405
387
|
seed,
|
|
406
|
-
|
|
388
|
+
tools,
|
|
389
|
+
toolChoice,
|
|
390
|
+
providerOptions
|
|
407
391
|
}) {
|
|
408
|
-
var _a, _b, _c
|
|
409
|
-
const type = mode.type;
|
|
392
|
+
var _a, _b, _c;
|
|
410
393
|
const warnings = [];
|
|
394
|
+
const openaiOptions = (_a = await (0, import_provider_utils3.parseProviderOptions)({
|
|
395
|
+
provider: "openai",
|
|
396
|
+
providerOptions,
|
|
397
|
+
schema: openaiProviderOptions
|
|
398
|
+
})) != null ? _a : {};
|
|
399
|
+
const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
|
|
411
400
|
if (topK != null) {
|
|
412
401
|
warnings.push({
|
|
413
402
|
type: "unsupported-setting",
|
|
414
403
|
setting: "topK"
|
|
415
404
|
});
|
|
416
405
|
}
|
|
417
|
-
if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !
|
|
406
|
+
if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
|
|
418
407
|
warnings.push({
|
|
419
408
|
type: "unsupported-setting",
|
|
420
409
|
setting: "responseFormat",
|
|
421
410
|
details: "JSON response format schema is only supported with structuredOutputs"
|
|
422
411
|
});
|
|
423
412
|
}
|
|
424
|
-
const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
|
|
425
|
-
if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
|
|
426
|
-
throw new import_provider3.UnsupportedFunctionalityError({
|
|
427
|
-
functionality: "useLegacyFunctionCalling with parallelToolCalls"
|
|
428
|
-
});
|
|
429
|
-
}
|
|
430
|
-
if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
|
|
431
|
-
throw new import_provider3.UnsupportedFunctionalityError({
|
|
432
|
-
functionality: "structuredOutputs with useLegacyFunctionCalling"
|
|
433
|
-
});
|
|
434
|
-
}
|
|
435
413
|
const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
|
|
436
414
|
{
|
|
437
415
|
prompt,
|
|
438
|
-
useLegacyFunctionCalling,
|
|
439
416
|
systemMessageMode: getSystemMessageMode(this.modelId)
|
|
440
417
|
}
|
|
441
418
|
);
|
|
@@ -444,35 +421,38 @@ var OpenAIChatLanguageModel = class {
|
|
|
444
421
|
// model id:
|
|
445
422
|
model: this.modelId,
|
|
446
423
|
// model specific settings:
|
|
447
|
-
logit_bias:
|
|
448
|
-
logprobs:
|
|
449
|
-
top_logprobs: typeof
|
|
450
|
-
user:
|
|
451
|
-
parallel_tool_calls:
|
|
424
|
+
logit_bias: openaiOptions.logitBias,
|
|
425
|
+
logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
|
|
426
|
+
top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
|
|
427
|
+
user: openaiOptions.user,
|
|
428
|
+
parallel_tool_calls: openaiOptions.parallelToolCalls,
|
|
452
429
|
// standardized settings:
|
|
453
|
-
max_tokens:
|
|
430
|
+
max_tokens: maxOutputTokens,
|
|
454
431
|
temperature,
|
|
455
432
|
top_p: topP,
|
|
456
433
|
frequency_penalty: frequencyPenalty,
|
|
457
434
|
presence_penalty: presencePenalty,
|
|
458
|
-
response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ?
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
435
|
+
response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? (
|
|
436
|
+
// TODO convert into provider option
|
|
437
|
+
structuredOutputs && responseFormat.schema != null ? {
|
|
438
|
+
type: "json_schema",
|
|
439
|
+
json_schema: {
|
|
440
|
+
schema: responseFormat.schema,
|
|
441
|
+
strict: true,
|
|
442
|
+
name: (_c = responseFormat.name) != null ? _c : "response",
|
|
443
|
+
description: responseFormat.description
|
|
444
|
+
}
|
|
445
|
+
} : { type: "json_object" }
|
|
446
|
+
) : void 0,
|
|
467
447
|
stop: stopSequences,
|
|
468
448
|
seed,
|
|
469
449
|
// openai specific settings:
|
|
470
|
-
// TODO remove in next major version; we auto-map
|
|
471
|
-
max_completion_tokens:
|
|
472
|
-
store:
|
|
473
|
-
metadata:
|
|
474
|
-
prediction:
|
|
475
|
-
reasoning_effort:
|
|
450
|
+
// TODO remove in next major version; we auto-map maxOutputTokens now
|
|
451
|
+
max_completion_tokens: openaiOptions.maxCompletionTokens,
|
|
452
|
+
store: openaiOptions.store,
|
|
453
|
+
metadata: openaiOptions.metadata,
|
|
454
|
+
prediction: openaiOptions.prediction,
|
|
455
|
+
reasoning_effort: openaiOptions.reasoningEffort,
|
|
476
456
|
// messages:
|
|
477
457
|
messages
|
|
478
458
|
};
|
|
@@ -546,85 +526,27 @@ var OpenAIChatLanguageModel = class {
|
|
|
546
526
|
});
|
|
547
527
|
}
|
|
548
528
|
}
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
}
|
|
567
|
-
case "object-json": {
|
|
568
|
-
return {
|
|
569
|
-
args: {
|
|
570
|
-
...baseArgs,
|
|
571
|
-
response_format: this.supportsStructuredOutputs && mode.schema != null ? {
|
|
572
|
-
type: "json_schema",
|
|
573
|
-
json_schema: {
|
|
574
|
-
schema: mode.schema,
|
|
575
|
-
strict: true,
|
|
576
|
-
name: (_h = mode.name) != null ? _h : "response",
|
|
577
|
-
description: mode.description
|
|
578
|
-
}
|
|
579
|
-
} : { type: "json_object" }
|
|
580
|
-
},
|
|
581
|
-
warnings
|
|
582
|
-
};
|
|
583
|
-
}
|
|
584
|
-
case "object-tool": {
|
|
585
|
-
return {
|
|
586
|
-
args: useLegacyFunctionCalling ? {
|
|
587
|
-
...baseArgs,
|
|
588
|
-
function_call: {
|
|
589
|
-
name: mode.tool.name
|
|
590
|
-
},
|
|
591
|
-
functions: [
|
|
592
|
-
{
|
|
593
|
-
name: mode.tool.name,
|
|
594
|
-
description: mode.tool.description,
|
|
595
|
-
parameters: mode.tool.parameters
|
|
596
|
-
}
|
|
597
|
-
]
|
|
598
|
-
} : {
|
|
599
|
-
...baseArgs,
|
|
600
|
-
tool_choice: {
|
|
601
|
-
type: "function",
|
|
602
|
-
function: { name: mode.tool.name }
|
|
603
|
-
},
|
|
604
|
-
tools: [
|
|
605
|
-
{
|
|
606
|
-
type: "function",
|
|
607
|
-
function: {
|
|
608
|
-
name: mode.tool.name,
|
|
609
|
-
description: mode.tool.description,
|
|
610
|
-
parameters: mode.tool.parameters,
|
|
611
|
-
strict: this.supportsStructuredOutputs ? true : void 0
|
|
612
|
-
}
|
|
613
|
-
}
|
|
614
|
-
]
|
|
615
|
-
},
|
|
616
|
-
warnings
|
|
617
|
-
};
|
|
618
|
-
}
|
|
619
|
-
default: {
|
|
620
|
-
const _exhaustiveCheck = type;
|
|
621
|
-
throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
|
|
622
|
-
}
|
|
623
|
-
}
|
|
529
|
+
const {
|
|
530
|
+
tools: openaiTools2,
|
|
531
|
+
toolChoice: openaiToolChoice,
|
|
532
|
+
toolWarnings
|
|
533
|
+
} = prepareTools({
|
|
534
|
+
tools,
|
|
535
|
+
toolChoice,
|
|
536
|
+
structuredOutputs
|
|
537
|
+
});
|
|
538
|
+
return {
|
|
539
|
+
args: {
|
|
540
|
+
...baseArgs,
|
|
541
|
+
tools: openaiTools2,
|
|
542
|
+
tool_choice: openaiToolChoice
|
|
543
|
+
},
|
|
544
|
+
warnings: [...warnings, ...toolWarnings]
|
|
545
|
+
};
|
|
624
546
|
}
|
|
625
547
|
async doGenerate(options) {
|
|
626
|
-
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
627
|
-
const { args: body, warnings } = this.getArgs(options);
|
|
548
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
|
|
549
|
+
const { args: body, warnings } = await this.getArgs(options);
|
|
628
550
|
const {
|
|
629
551
|
responseHeaders,
|
|
630
552
|
value: response,
|
|
@@ -643,105 +565,61 @@ var OpenAIChatLanguageModel = class {
|
|
|
643
565
|
abortSignal: options.abortSignal,
|
|
644
566
|
fetch: this.config.fetch
|
|
645
567
|
});
|
|
646
|
-
const { messages: rawPrompt, ...rawSettings } = body;
|
|
647
568
|
const choice = response.choices[0];
|
|
648
|
-
const
|
|
649
|
-
const
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
|
|
569
|
+
const content = [];
|
|
570
|
+
const text = choice.message.content;
|
|
571
|
+
if (text != null && text.length > 0) {
|
|
572
|
+
content.push({ type: "text", text });
|
|
653
573
|
}
|
|
574
|
+
for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
|
|
575
|
+
content.push({
|
|
576
|
+
type: "tool-call",
|
|
577
|
+
toolCallType: "function",
|
|
578
|
+
toolCallId: (_b = toolCall.id) != null ? _b : (0, import_provider_utils3.generateId)(),
|
|
579
|
+
toolName: toolCall.function.name,
|
|
580
|
+
args: toolCall.function.arguments
|
|
581
|
+
});
|
|
582
|
+
}
|
|
583
|
+
const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
|
|
584
|
+
const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
|
|
585
|
+
const providerMetadata = { openai: {} };
|
|
654
586
|
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
|
|
655
587
|
providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
|
|
656
588
|
}
|
|
657
589
|
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
|
|
658
590
|
providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
|
|
659
591
|
}
|
|
660
|
-
if ((
|
|
661
|
-
providerMetadata.openai.
|
|
592
|
+
if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
|
|
593
|
+
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
662
594
|
}
|
|
663
595
|
return {
|
|
664
|
-
|
|
665
|
-
toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
|
|
666
|
-
{
|
|
667
|
-
toolCallType: "function",
|
|
668
|
-
toolCallId: (0, import_provider_utils3.generateId)(),
|
|
669
|
-
toolName: choice.message.function_call.name,
|
|
670
|
-
args: choice.message.function_call.arguments
|
|
671
|
-
}
|
|
672
|
-
] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
|
|
673
|
-
var _a2;
|
|
674
|
-
return {
|
|
675
|
-
toolCallType: "function",
|
|
676
|
-
toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils3.generateId)(),
|
|
677
|
-
toolName: toolCall.function.name,
|
|
678
|
-
args: toolCall.function.arguments
|
|
679
|
-
};
|
|
680
|
-
}),
|
|
596
|
+
content,
|
|
681
597
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
682
598
|
usage: {
|
|
683
|
-
|
|
684
|
-
|
|
599
|
+
inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
|
|
600
|
+
outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
|
|
601
|
+
totalTokens: (_k = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
|
|
602
|
+
reasoningTokens: (_l = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _l : void 0,
|
|
603
|
+
cachedInputTokens: (_m = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _m : void 0
|
|
604
|
+
},
|
|
605
|
+
request: { body },
|
|
606
|
+
response: {
|
|
607
|
+
...getResponseMetadata(response),
|
|
608
|
+
headers: responseHeaders,
|
|
609
|
+
body: rawResponse
|
|
685
610
|
},
|
|
686
|
-
rawCall: { rawPrompt, rawSettings },
|
|
687
|
-
rawResponse: { headers: responseHeaders, body: rawResponse },
|
|
688
|
-
request: { body: JSON.stringify(body) },
|
|
689
|
-
response: getResponseMetadata(response),
|
|
690
611
|
warnings,
|
|
691
|
-
logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
|
|
692
612
|
providerMetadata
|
|
693
613
|
};
|
|
694
614
|
}
|
|
695
615
|
async doStream(options) {
|
|
696
|
-
|
|
697
|
-
const result = await this.doGenerate(options);
|
|
698
|
-
const simulatedStream = new ReadableStream({
|
|
699
|
-
start(controller) {
|
|
700
|
-
controller.enqueue({ type: "response-metadata", ...result.response });
|
|
701
|
-
if (result.text) {
|
|
702
|
-
controller.enqueue({
|
|
703
|
-
type: "text-delta",
|
|
704
|
-
textDelta: result.text
|
|
705
|
-
});
|
|
706
|
-
}
|
|
707
|
-
if (result.toolCalls) {
|
|
708
|
-
for (const toolCall of result.toolCalls) {
|
|
709
|
-
controller.enqueue({
|
|
710
|
-
type: "tool-call-delta",
|
|
711
|
-
toolCallType: "function",
|
|
712
|
-
toolCallId: toolCall.toolCallId,
|
|
713
|
-
toolName: toolCall.toolName,
|
|
714
|
-
argsTextDelta: toolCall.args
|
|
715
|
-
});
|
|
716
|
-
controller.enqueue({
|
|
717
|
-
type: "tool-call",
|
|
718
|
-
...toolCall
|
|
719
|
-
});
|
|
720
|
-
}
|
|
721
|
-
}
|
|
722
|
-
controller.enqueue({
|
|
723
|
-
type: "finish",
|
|
724
|
-
finishReason: result.finishReason,
|
|
725
|
-
usage: result.usage,
|
|
726
|
-
logprobs: result.logprobs,
|
|
727
|
-
providerMetadata: result.providerMetadata
|
|
728
|
-
});
|
|
729
|
-
controller.close();
|
|
730
|
-
}
|
|
731
|
-
});
|
|
732
|
-
return {
|
|
733
|
-
stream: simulatedStream,
|
|
734
|
-
rawCall: result.rawCall,
|
|
735
|
-
rawResponse: result.rawResponse,
|
|
736
|
-
warnings: result.warnings
|
|
737
|
-
};
|
|
738
|
-
}
|
|
739
|
-
const { args, warnings } = this.getArgs(options);
|
|
616
|
+
const { args, warnings } = await this.getArgs(options);
|
|
740
617
|
const body = {
|
|
741
618
|
...args,
|
|
742
619
|
stream: true,
|
|
743
|
-
|
|
744
|
-
|
|
620
|
+
stream_options: {
|
|
621
|
+
include_usage: true
|
|
622
|
+
}
|
|
745
623
|
};
|
|
746
624
|
const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
|
|
747
625
|
url: this.config.url({
|
|
@@ -757,22 +635,23 @@ var OpenAIChatLanguageModel = class {
|
|
|
757
635
|
abortSignal: options.abortSignal,
|
|
758
636
|
fetch: this.config.fetch
|
|
759
637
|
});
|
|
760
|
-
const { messages: rawPrompt, ...rawSettings } = args;
|
|
761
638
|
const toolCalls = [];
|
|
762
639
|
let finishReason = "unknown";
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
640
|
+
const usage = {
|
|
641
|
+
inputTokens: void 0,
|
|
642
|
+
outputTokens: void 0,
|
|
643
|
+
totalTokens: void 0
|
|
766
644
|
};
|
|
767
|
-
let logprobs;
|
|
768
645
|
let isFirstChunk = true;
|
|
769
|
-
const { useLegacyFunctionCalling } = this.settings;
|
|
770
646
|
const providerMetadata = { openai: {} };
|
|
771
647
|
return {
|
|
772
648
|
stream: response.pipeThrough(
|
|
773
649
|
new TransformStream({
|
|
650
|
+
start(controller) {
|
|
651
|
+
controller.enqueue({ type: "stream-start", warnings });
|
|
652
|
+
},
|
|
774
653
|
transform(chunk, controller) {
|
|
775
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
|
|
654
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
|
|
776
655
|
if (!chunk.success) {
|
|
777
656
|
finishReason = "error";
|
|
778
657
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
@@ -792,60 +671,37 @@ var OpenAIChatLanguageModel = class {
|
|
|
792
671
|
});
|
|
793
672
|
}
|
|
794
673
|
if (value.usage != null) {
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
|
|
803
|
-
completionTokens: completion_tokens != null ? completion_tokens : void 0
|
|
804
|
-
};
|
|
805
|
-
if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
|
|
806
|
-
providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
|
|
674
|
+
usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
|
|
675
|
+
usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
|
|
676
|
+
usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
|
|
677
|
+
usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
|
|
678
|
+
usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
|
|
679
|
+
if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
|
|
680
|
+
providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
|
|
807
681
|
}
|
|
808
|
-
if ((completion_tokens_details == null ? void 0 :
|
|
809
|
-
providerMetadata.openai.
|
|
810
|
-
}
|
|
811
|
-
if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
|
|
812
|
-
providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
|
|
813
|
-
}
|
|
814
|
-
if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
|
|
815
|
-
providerMetadata.openai.cachedPromptTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
|
|
682
|
+
if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
|
|
683
|
+
providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
|
|
816
684
|
}
|
|
817
685
|
}
|
|
818
686
|
const choice = value.choices[0];
|
|
819
687
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
820
688
|
finishReason = mapOpenAIFinishReason(choice.finish_reason);
|
|
821
689
|
}
|
|
690
|
+
if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
|
|
691
|
+
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
692
|
+
}
|
|
822
693
|
if ((choice == null ? void 0 : choice.delta) == null) {
|
|
823
694
|
return;
|
|
824
695
|
}
|
|
825
696
|
const delta = choice.delta;
|
|
826
697
|
if (delta.content != null) {
|
|
827
698
|
controller.enqueue({
|
|
828
|
-
type: "text
|
|
829
|
-
|
|
699
|
+
type: "text",
|
|
700
|
+
text: delta.content
|
|
830
701
|
});
|
|
831
702
|
}
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
);
|
|
835
|
-
if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
|
|
836
|
-
if (logprobs === void 0) logprobs = [];
|
|
837
|
-
logprobs.push(...mappedLogprobs);
|
|
838
|
-
}
|
|
839
|
-
const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
|
|
840
|
-
{
|
|
841
|
-
type: "function",
|
|
842
|
-
id: (0, import_provider_utils3.generateId)(),
|
|
843
|
-
function: delta.function_call,
|
|
844
|
-
index: 0
|
|
845
|
-
}
|
|
846
|
-
] : delta.tool_calls;
|
|
847
|
-
if (mappedToolCalls != null) {
|
|
848
|
-
for (const toolCallDelta of mappedToolCalls) {
|
|
703
|
+
if (delta.tool_calls != null) {
|
|
704
|
+
for (const toolCallDelta of delta.tool_calls) {
|
|
849
705
|
const index = toolCallDelta.index;
|
|
850
706
|
if (toolCalls[index] == null) {
|
|
851
707
|
if (toolCallDelta.type !== "function") {
|
|
@@ -860,7 +716,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
860
716
|
message: `Expected 'id' to be a string.`
|
|
861
717
|
});
|
|
862
718
|
}
|
|
863
|
-
if (((
|
|
719
|
+
if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
|
|
864
720
|
throw new import_provider3.InvalidResponseDataError({
|
|
865
721
|
data: toolCallDelta,
|
|
866
722
|
message: `Expected 'function.name' to be a string.`
|
|
@@ -871,12 +727,12 @@ var OpenAIChatLanguageModel = class {
|
|
|
871
727
|
type: "function",
|
|
872
728
|
function: {
|
|
873
729
|
name: toolCallDelta.function.name,
|
|
874
|
-
arguments: (
|
|
730
|
+
arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
|
|
875
731
|
},
|
|
876
732
|
hasFinished: false
|
|
877
733
|
};
|
|
878
734
|
const toolCall2 = toolCalls[index];
|
|
879
|
-
if (((
|
|
735
|
+
if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
|
|
880
736
|
if (toolCall2.function.arguments.length > 0) {
|
|
881
737
|
controller.enqueue({
|
|
882
738
|
type: "tool-call-delta",
|
|
@@ -890,7 +746,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
890
746
|
controller.enqueue({
|
|
891
747
|
type: "tool-call",
|
|
892
748
|
toolCallType: "function",
|
|
893
|
-
toolCallId: (
|
|
749
|
+
toolCallId: (_q = toolCall2.id) != null ? _q : (0, import_provider_utils3.generateId)(),
|
|
894
750
|
toolName: toolCall2.function.name,
|
|
895
751
|
args: toolCall2.function.arguments
|
|
896
752
|
});
|
|
@@ -903,21 +759,21 @@ var OpenAIChatLanguageModel = class {
|
|
|
903
759
|
if (toolCall.hasFinished) {
|
|
904
760
|
continue;
|
|
905
761
|
}
|
|
906
|
-
if (((
|
|
907
|
-
toolCall.function.arguments += (
|
|
762
|
+
if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
|
|
763
|
+
toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
|
|
908
764
|
}
|
|
909
765
|
controller.enqueue({
|
|
910
766
|
type: "tool-call-delta",
|
|
911
767
|
toolCallType: "function",
|
|
912
768
|
toolCallId: toolCall.id,
|
|
913
769
|
toolName: toolCall.function.name,
|
|
914
|
-
argsTextDelta: (
|
|
770
|
+
argsTextDelta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
|
|
915
771
|
});
|
|
916
|
-
if (((
|
|
772
|
+
if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
|
|
917
773
|
controller.enqueue({
|
|
918
774
|
type: "tool-call",
|
|
919
775
|
toolCallType: "function",
|
|
920
|
-
toolCallId: (
|
|
776
|
+
toolCallId: (_x = toolCall.id) != null ? _x : (0, import_provider_utils3.generateId)(),
|
|
921
777
|
toolName: toolCall.function.name,
|
|
922
778
|
args: toolCall.function.arguments
|
|
923
779
|
});
|
|
@@ -927,125 +783,111 @@ var OpenAIChatLanguageModel = class {
|
|
|
927
783
|
}
|
|
928
784
|
},
|
|
929
785
|
flush(controller) {
|
|
930
|
-
var _a, _b;
|
|
931
786
|
controller.enqueue({
|
|
932
787
|
type: "finish",
|
|
933
788
|
finishReason,
|
|
934
|
-
|
|
935
|
-
usage: {
|
|
936
|
-
promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
|
|
937
|
-
completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
|
|
938
|
-
},
|
|
789
|
+
usage,
|
|
939
790
|
...providerMetadata != null ? { providerMetadata } : {}
|
|
940
791
|
});
|
|
941
792
|
}
|
|
942
793
|
})
|
|
943
794
|
),
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
request: { body: JSON.stringify(body) },
|
|
947
|
-
warnings
|
|
795
|
+
request: { body },
|
|
796
|
+
response: { headers: responseHeaders }
|
|
948
797
|
};
|
|
949
798
|
}
|
|
950
799
|
};
|
|
951
|
-
var openaiTokenUsageSchema =
|
|
952
|
-
prompt_tokens:
|
|
953
|
-
completion_tokens:
|
|
954
|
-
|
|
955
|
-
|
|
800
|
+
var openaiTokenUsageSchema = import_zod3.z.object({
|
|
801
|
+
prompt_tokens: import_zod3.z.number().nullish(),
|
|
802
|
+
completion_tokens: import_zod3.z.number().nullish(),
|
|
803
|
+
total_tokens: import_zod3.z.number().nullish(),
|
|
804
|
+
prompt_tokens_details: import_zod3.z.object({
|
|
805
|
+
cached_tokens: import_zod3.z.number().nullish()
|
|
956
806
|
}).nullish(),
|
|
957
|
-
completion_tokens_details:
|
|
958
|
-
reasoning_tokens:
|
|
959
|
-
accepted_prediction_tokens:
|
|
960
|
-
rejected_prediction_tokens:
|
|
807
|
+
completion_tokens_details: import_zod3.z.object({
|
|
808
|
+
reasoning_tokens: import_zod3.z.number().nullish(),
|
|
809
|
+
accepted_prediction_tokens: import_zod3.z.number().nullish(),
|
|
810
|
+
rejected_prediction_tokens: import_zod3.z.number().nullish()
|
|
961
811
|
}).nullish()
|
|
962
812
|
}).nullish();
|
|
963
|
-
var openaiChatResponseSchema =
|
|
964
|
-
id:
|
|
965
|
-
created:
|
|
966
|
-
model:
|
|
967
|
-
choices:
|
|
968
|
-
|
|
969
|
-
message:
|
|
970
|
-
role:
|
|
971
|
-
content:
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
|
|
975
|
-
|
|
976
|
-
|
|
977
|
-
|
|
978
|
-
|
|
979
|
-
type: import_zod2.z.literal("function"),
|
|
980
|
-
function: import_zod2.z.object({
|
|
981
|
-
name: import_zod2.z.string(),
|
|
982
|
-
arguments: import_zod2.z.string()
|
|
813
|
+
var openaiChatResponseSchema = import_zod3.z.object({
|
|
814
|
+
id: import_zod3.z.string().nullish(),
|
|
815
|
+
created: import_zod3.z.number().nullish(),
|
|
816
|
+
model: import_zod3.z.string().nullish(),
|
|
817
|
+
choices: import_zod3.z.array(
|
|
818
|
+
import_zod3.z.object({
|
|
819
|
+
message: import_zod3.z.object({
|
|
820
|
+
role: import_zod3.z.literal("assistant").nullish(),
|
|
821
|
+
content: import_zod3.z.string().nullish(),
|
|
822
|
+
tool_calls: import_zod3.z.array(
|
|
823
|
+
import_zod3.z.object({
|
|
824
|
+
id: import_zod3.z.string().nullish(),
|
|
825
|
+
type: import_zod3.z.literal("function"),
|
|
826
|
+
function: import_zod3.z.object({
|
|
827
|
+
name: import_zod3.z.string(),
|
|
828
|
+
arguments: import_zod3.z.string()
|
|
983
829
|
})
|
|
984
830
|
})
|
|
985
831
|
).nullish()
|
|
986
832
|
}),
|
|
987
|
-
index:
|
|
988
|
-
logprobs:
|
|
989
|
-
content:
|
|
990
|
-
|
|
991
|
-
token:
|
|
992
|
-
logprob:
|
|
993
|
-
top_logprobs:
|
|
994
|
-
|
|
995
|
-
token:
|
|
996
|
-
logprob:
|
|
833
|
+
index: import_zod3.z.number(),
|
|
834
|
+
logprobs: import_zod3.z.object({
|
|
835
|
+
content: import_zod3.z.array(
|
|
836
|
+
import_zod3.z.object({
|
|
837
|
+
token: import_zod3.z.string(),
|
|
838
|
+
logprob: import_zod3.z.number(),
|
|
839
|
+
top_logprobs: import_zod3.z.array(
|
|
840
|
+
import_zod3.z.object({
|
|
841
|
+
token: import_zod3.z.string(),
|
|
842
|
+
logprob: import_zod3.z.number()
|
|
997
843
|
})
|
|
998
844
|
)
|
|
999
845
|
})
|
|
1000
|
-
).
|
|
846
|
+
).nullish()
|
|
1001
847
|
}).nullish(),
|
|
1002
|
-
finish_reason:
|
|
848
|
+
finish_reason: import_zod3.z.string().nullish()
|
|
1003
849
|
})
|
|
1004
850
|
),
|
|
1005
851
|
usage: openaiTokenUsageSchema
|
|
1006
852
|
});
|
|
1007
|
-
var openaiChatChunkSchema =
|
|
1008
|
-
|
|
1009
|
-
id:
|
|
1010
|
-
created:
|
|
1011
|
-
model:
|
|
1012
|
-
choices:
|
|
1013
|
-
|
|
1014
|
-
delta:
|
|
1015
|
-
role:
|
|
1016
|
-
content:
|
|
1017
|
-
|
|
1018
|
-
|
|
1019
|
-
|
|
1020
|
-
|
|
1021
|
-
|
|
1022
|
-
|
|
1023
|
-
|
|
1024
|
-
|
|
1025
|
-
type: import_zod2.z.literal("function").nullish(),
|
|
1026
|
-
function: import_zod2.z.object({
|
|
1027
|
-
name: import_zod2.z.string().nullish(),
|
|
1028
|
-
arguments: import_zod2.z.string().nullish()
|
|
853
|
+
var openaiChatChunkSchema = import_zod3.z.union([
|
|
854
|
+
import_zod3.z.object({
|
|
855
|
+
id: import_zod3.z.string().nullish(),
|
|
856
|
+
created: import_zod3.z.number().nullish(),
|
|
857
|
+
model: import_zod3.z.string().nullish(),
|
|
858
|
+
choices: import_zod3.z.array(
|
|
859
|
+
import_zod3.z.object({
|
|
860
|
+
delta: import_zod3.z.object({
|
|
861
|
+
role: import_zod3.z.enum(["assistant"]).nullish(),
|
|
862
|
+
content: import_zod3.z.string().nullish(),
|
|
863
|
+
tool_calls: import_zod3.z.array(
|
|
864
|
+
import_zod3.z.object({
|
|
865
|
+
index: import_zod3.z.number(),
|
|
866
|
+
id: import_zod3.z.string().nullish(),
|
|
867
|
+
type: import_zod3.z.literal("function").nullish(),
|
|
868
|
+
function: import_zod3.z.object({
|
|
869
|
+
name: import_zod3.z.string().nullish(),
|
|
870
|
+
arguments: import_zod3.z.string().nullish()
|
|
1029
871
|
})
|
|
1030
872
|
})
|
|
1031
873
|
).nullish()
|
|
1032
874
|
}).nullish(),
|
|
1033
|
-
logprobs:
|
|
1034
|
-
content:
|
|
1035
|
-
|
|
1036
|
-
token:
|
|
1037
|
-
logprob:
|
|
1038
|
-
top_logprobs:
|
|
1039
|
-
|
|
1040
|
-
token:
|
|
1041
|
-
logprob:
|
|
875
|
+
logprobs: import_zod3.z.object({
|
|
876
|
+
content: import_zod3.z.array(
|
|
877
|
+
import_zod3.z.object({
|
|
878
|
+
token: import_zod3.z.string(),
|
|
879
|
+
logprob: import_zod3.z.number(),
|
|
880
|
+
top_logprobs: import_zod3.z.array(
|
|
881
|
+
import_zod3.z.object({
|
|
882
|
+
token: import_zod3.z.string(),
|
|
883
|
+
logprob: import_zod3.z.number()
|
|
1042
884
|
})
|
|
1043
885
|
)
|
|
1044
886
|
})
|
|
1045
|
-
).
|
|
887
|
+
).nullish()
|
|
1046
888
|
}).nullish(),
|
|
1047
|
-
finish_reason:
|
|
1048
|
-
index:
|
|
889
|
+
finish_reason: import_zod3.z.string().nullish(),
|
|
890
|
+
index: import_zod3.z.number()
|
|
1049
891
|
})
|
|
1050
892
|
),
|
|
1051
893
|
usage: openaiTokenUsageSchema
|
|
@@ -1055,9 +897,6 @@ var openaiChatChunkSchema = import_zod2.z.union([
|
|
|
1055
897
|
function isReasoningModel(modelId) {
|
|
1056
898
|
return modelId.startsWith("o");
|
|
1057
899
|
}
|
|
1058
|
-
function isAudioModel(modelId) {
|
|
1059
|
-
return modelId.startsWith("gpt-4o-audio-preview");
|
|
1060
|
-
}
|
|
1061
900
|
function getSystemMessageMode(modelId) {
|
|
1062
901
|
var _a, _b;
|
|
1063
902
|
if (!isReasoningModel(modelId)) {
|
|
@@ -1099,21 +938,16 @@ var reasoningModels = {
|
|
|
1099
938
|
};
|
|
1100
939
|
|
|
1101
940
|
// src/openai-completion-language-model.ts
|
|
1102
|
-
var import_provider5 = require("@ai-sdk/provider");
|
|
1103
941
|
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
|
1104
|
-
var
|
|
942
|
+
var import_zod5 = require("zod");
|
|
1105
943
|
|
|
1106
944
|
// src/convert-to-openai-completion-prompt.ts
|
|
1107
945
|
var import_provider4 = require("@ai-sdk/provider");
|
|
1108
946
|
function convertToOpenAICompletionPrompt({
|
|
1109
947
|
prompt,
|
|
1110
|
-
inputFormat,
|
|
1111
948
|
user = "user",
|
|
1112
949
|
assistant = "assistant"
|
|
1113
950
|
}) {
|
|
1114
|
-
if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
|
|
1115
|
-
return { prompt: prompt[0].content[0].text };
|
|
1116
|
-
}
|
|
1117
951
|
let text = "";
|
|
1118
952
|
if (prompt[0].role === "system") {
|
|
1119
953
|
text += `${prompt[0].content}
|
|
@@ -1135,13 +969,8 @@ function convertToOpenAICompletionPrompt({
|
|
|
1135
969
|
case "text": {
|
|
1136
970
|
return part.text;
|
|
1137
971
|
}
|
|
1138
|
-
case "image": {
|
|
1139
|
-
throw new import_provider4.UnsupportedFunctionalityError({
|
|
1140
|
-
functionality: "images"
|
|
1141
|
-
});
|
|
1142
|
-
}
|
|
1143
972
|
}
|
|
1144
|
-
}).join("");
|
|
973
|
+
}).filter(Boolean).join("");
|
|
1145
974
|
text += `${user}:
|
|
1146
975
|
${userMessage}
|
|
1147
976
|
|
|
@@ -1187,37 +1016,68 @@ ${user}:`]
|
|
|
1187
1016
|
};
|
|
1188
1017
|
}
|
|
1189
1018
|
|
|
1190
|
-
// src/
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
|
|
1194
|
-
|
|
1195
|
-
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
|
-
|
|
1199
|
-
|
|
1200
|
-
|
|
1201
|
-
|
|
1202
|
-
|
|
1019
|
+
// src/openai-completion-options.ts
|
|
1020
|
+
var import_zod4 = require("zod");
|
|
1021
|
+
var openaiCompletionProviderOptions = import_zod4.z.object({
|
|
1022
|
+
/**
|
|
1023
|
+
Echo back the prompt in addition to the completion.
|
|
1024
|
+
*/
|
|
1025
|
+
echo: import_zod4.z.boolean().optional(),
|
|
1026
|
+
/**
|
|
1027
|
+
Modify the likelihood of specified tokens appearing in the completion.
|
|
1028
|
+
|
|
1029
|
+
Accepts a JSON object that maps tokens (specified by their token ID in
|
|
1030
|
+
the GPT tokenizer) to an associated bias value from -100 to 100. You
|
|
1031
|
+
can use this tokenizer tool to convert text to token IDs. Mathematically,
|
|
1032
|
+
the bias is added to the logits generated by the model prior to sampling.
|
|
1033
|
+
The exact effect will vary per model, but values between -1 and 1 should
|
|
1034
|
+
decrease or increase likelihood of selection; values like -100 or 100
|
|
1035
|
+
should result in a ban or exclusive selection of the relevant token.
|
|
1036
|
+
|
|
1037
|
+
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
1038
|
+
token from being generated.
|
|
1039
|
+
*/
|
|
1040
|
+
logitBias: import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number()).optional(),
|
|
1041
|
+
/**
|
|
1042
|
+
The suffix that comes after a completion of inserted text.
|
|
1043
|
+
*/
|
|
1044
|
+
suffix: import_zod4.z.string().optional(),
|
|
1045
|
+
/**
|
|
1046
|
+
A unique identifier representing your end-user, which can help OpenAI to
|
|
1047
|
+
monitor and detect abuse. Learn more.
|
|
1048
|
+
*/
|
|
1049
|
+
user: import_zod4.z.string().optional(),
|
|
1050
|
+
/**
|
|
1051
|
+
Return the log probabilities of the tokens. Including logprobs will increase
|
|
1052
|
+
the response size and can slow down response times. However, it can
|
|
1053
|
+
be useful to better understand how the model is behaving.
|
|
1054
|
+
Setting to true will return the log probabilities of the tokens that
|
|
1055
|
+
were generated.
|
|
1056
|
+
Setting to a number will return the log probabilities of the top n
|
|
1057
|
+
tokens that were generated.
|
|
1058
|
+
*/
|
|
1059
|
+
logprobs: import_zod4.z.union([import_zod4.z.boolean(), import_zod4.z.number()]).optional()
|
|
1060
|
+
});
|
|
1203
1061
|
|
|
1204
1062
|
// src/openai-completion-language-model.ts
|
|
1205
1063
|
var OpenAICompletionLanguageModel = class {
|
|
1206
|
-
constructor(modelId,
|
|
1207
|
-
this.specificationVersion = "
|
|
1208
|
-
this.
|
|
1064
|
+
constructor(modelId, config) {
|
|
1065
|
+
this.specificationVersion = "v2";
|
|
1066
|
+
this.supportedUrls = {
|
|
1067
|
+
// No URLs are supported for completion models.
|
|
1068
|
+
};
|
|
1209
1069
|
this.modelId = modelId;
|
|
1210
|
-
this.settings = settings;
|
|
1211
1070
|
this.config = config;
|
|
1212
1071
|
}
|
|
1072
|
+
get providerOptionsName() {
|
|
1073
|
+
return this.config.provider.split(".")[0].trim();
|
|
1074
|
+
}
|
|
1213
1075
|
get provider() {
|
|
1214
1076
|
return this.config.provider;
|
|
1215
1077
|
}
|
|
1216
|
-
getArgs({
|
|
1217
|
-
mode,
|
|
1218
|
-
inputFormat,
|
|
1078
|
+
async getArgs({
|
|
1219
1079
|
prompt,
|
|
1220
|
-
|
|
1080
|
+
maxOutputTokens,
|
|
1221
1081
|
temperature,
|
|
1222
1082
|
topP,
|
|
1223
1083
|
topK,
|
|
@@ -1225,16 +1085,32 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1225
1085
|
presencePenalty,
|
|
1226
1086
|
stopSequences: userStopSequences,
|
|
1227
1087
|
responseFormat,
|
|
1228
|
-
|
|
1088
|
+
tools,
|
|
1089
|
+
toolChoice,
|
|
1090
|
+
seed,
|
|
1091
|
+
providerOptions
|
|
1229
1092
|
}) {
|
|
1230
|
-
var _a;
|
|
1231
|
-
const type = mode.type;
|
|
1232
1093
|
const warnings = [];
|
|
1094
|
+
const openaiOptions = {
|
|
1095
|
+
...await (0, import_provider_utils4.parseProviderOptions)({
|
|
1096
|
+
provider: "openai",
|
|
1097
|
+
providerOptions,
|
|
1098
|
+
schema: openaiCompletionProviderOptions
|
|
1099
|
+
}),
|
|
1100
|
+
...await (0, import_provider_utils4.parseProviderOptions)({
|
|
1101
|
+
provider: this.providerOptionsName,
|
|
1102
|
+
providerOptions,
|
|
1103
|
+
schema: openaiCompletionProviderOptions
|
|
1104
|
+
})
|
|
1105
|
+
};
|
|
1233
1106
|
if (topK != null) {
|
|
1234
|
-
warnings.push({
|
|
1235
|
-
|
|
1236
|
-
|
|
1237
|
-
});
|
|
1107
|
+
warnings.push({ type: "unsupported-setting", setting: "topK" });
|
|
1108
|
+
}
|
|
1109
|
+
if (tools == null ? void 0 : tools.length) {
|
|
1110
|
+
warnings.push({ type: "unsupported-setting", setting: "tools" });
|
|
1111
|
+
}
|
|
1112
|
+
if (toolChoice != null) {
|
|
1113
|
+
warnings.push({ type: "unsupported-setting", setting: "toolChoice" });
|
|
1238
1114
|
}
|
|
1239
1115
|
if (responseFormat != null && responseFormat.type !== "text") {
|
|
1240
1116
|
warnings.push({
|
|
@@ -1243,61 +1119,36 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1243
1119
|
details: "JSON response format is not supported."
|
|
1244
1120
|
});
|
|
1245
1121
|
}
|
|
1246
|
-
const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt
|
|
1122
|
+
const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt });
|
|
1247
1123
|
const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
|
|
1248
|
-
|
|
1249
|
-
|
|
1250
|
-
|
|
1251
|
-
|
|
1252
|
-
|
|
1253
|
-
|
|
1254
|
-
|
|
1255
|
-
|
|
1256
|
-
|
|
1257
|
-
|
|
1258
|
-
|
|
1259
|
-
|
|
1260
|
-
|
|
1261
|
-
|
|
1262
|
-
|
|
1263
|
-
|
|
1264
|
-
|
|
1265
|
-
|
|
1266
|
-
|
|
1267
|
-
|
|
1124
|
+
return {
|
|
1125
|
+
args: {
|
|
1126
|
+
// model id:
|
|
1127
|
+
model: this.modelId,
|
|
1128
|
+
// model specific settings:
|
|
1129
|
+
echo: openaiOptions.echo,
|
|
1130
|
+
logit_bias: openaiOptions.logitBias,
|
|
1131
|
+
logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
|
|
1132
|
+
suffix: openaiOptions.suffix,
|
|
1133
|
+
user: openaiOptions.user,
|
|
1134
|
+
// standardized settings:
|
|
1135
|
+
max_tokens: maxOutputTokens,
|
|
1136
|
+
temperature,
|
|
1137
|
+
top_p: topP,
|
|
1138
|
+
frequency_penalty: frequencyPenalty,
|
|
1139
|
+
presence_penalty: presencePenalty,
|
|
1140
|
+
seed,
|
|
1141
|
+
// prompt:
|
|
1142
|
+
prompt: completionPrompt,
|
|
1143
|
+
// stop sequences:
|
|
1144
|
+
stop: stop.length > 0 ? stop : void 0
|
|
1145
|
+
},
|
|
1146
|
+
warnings
|
|
1268
1147
|
};
|
|
1269
|
-
switch (type) {
|
|
1270
|
-
case "regular": {
|
|
1271
|
-
if ((_a = mode.tools) == null ? void 0 : _a.length) {
|
|
1272
|
-
throw new import_provider5.UnsupportedFunctionalityError({
|
|
1273
|
-
functionality: "tools"
|
|
1274
|
-
});
|
|
1275
|
-
}
|
|
1276
|
-
if (mode.toolChoice) {
|
|
1277
|
-
throw new import_provider5.UnsupportedFunctionalityError({
|
|
1278
|
-
functionality: "toolChoice"
|
|
1279
|
-
});
|
|
1280
|
-
}
|
|
1281
|
-
return { args: baseArgs, warnings };
|
|
1282
|
-
}
|
|
1283
|
-
case "object-json": {
|
|
1284
|
-
throw new import_provider5.UnsupportedFunctionalityError({
|
|
1285
|
-
functionality: "object-json mode"
|
|
1286
|
-
});
|
|
1287
|
-
}
|
|
1288
|
-
case "object-tool": {
|
|
1289
|
-
throw new import_provider5.UnsupportedFunctionalityError({
|
|
1290
|
-
functionality: "object-tool mode"
|
|
1291
|
-
});
|
|
1292
|
-
}
|
|
1293
|
-
default: {
|
|
1294
|
-
const _exhaustiveCheck = type;
|
|
1295
|
-
throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
|
|
1296
|
-
}
|
|
1297
|
-
}
|
|
1298
1148
|
}
|
|
1299
1149
|
async doGenerate(options) {
|
|
1300
|
-
|
|
1150
|
+
var _a, _b, _c;
|
|
1151
|
+
const { args, warnings } = await this.getArgs(options);
|
|
1301
1152
|
const {
|
|
1302
1153
|
responseHeaders,
|
|
1303
1154
|
value: response,
|
|
@@ -1316,30 +1167,37 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1316
1167
|
abortSignal: options.abortSignal,
|
|
1317
1168
|
fetch: this.config.fetch
|
|
1318
1169
|
});
|
|
1319
|
-
const { prompt: rawPrompt, ...rawSettings } = args;
|
|
1320
1170
|
const choice = response.choices[0];
|
|
1171
|
+
const providerMetadata = { openai: {} };
|
|
1172
|
+
if (choice.logprobs != null) {
|
|
1173
|
+
providerMetadata.openai.logprobs = choice.logprobs;
|
|
1174
|
+
}
|
|
1321
1175
|
return {
|
|
1322
|
-
text: choice.text,
|
|
1176
|
+
content: [{ type: "text", text: choice.text }],
|
|
1323
1177
|
usage: {
|
|
1324
|
-
|
|
1325
|
-
|
|
1178
|
+
inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
|
|
1179
|
+
outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
|
|
1180
|
+
totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
|
|
1326
1181
|
},
|
|
1327
1182
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
1328
|
-
|
|
1329
|
-
|
|
1330
|
-
|
|
1331
|
-
|
|
1332
|
-
|
|
1333
|
-
|
|
1183
|
+
request: { body: args },
|
|
1184
|
+
response: {
|
|
1185
|
+
...getResponseMetadata(response),
|
|
1186
|
+
headers: responseHeaders,
|
|
1187
|
+
body: rawResponse
|
|
1188
|
+
},
|
|
1189
|
+
providerMetadata,
|
|
1190
|
+
warnings
|
|
1334
1191
|
};
|
|
1335
1192
|
}
|
|
1336
1193
|
async doStream(options) {
|
|
1337
|
-
const { args, warnings } = this.getArgs(options);
|
|
1194
|
+
const { args, warnings } = await this.getArgs(options);
|
|
1338
1195
|
const body = {
|
|
1339
1196
|
...args,
|
|
1340
1197
|
stream: true,
|
|
1341
|
-
|
|
1342
|
-
|
|
1198
|
+
stream_options: {
|
|
1199
|
+
include_usage: true
|
|
1200
|
+
}
|
|
1343
1201
|
};
|
|
1344
1202
|
const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
|
|
1345
1203
|
url: this.config.url({
|
|
@@ -1355,17 +1213,20 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1355
1213
|
abortSignal: options.abortSignal,
|
|
1356
1214
|
fetch: this.config.fetch
|
|
1357
1215
|
});
|
|
1358
|
-
const { prompt: rawPrompt, ...rawSettings } = args;
|
|
1359
1216
|
let finishReason = "unknown";
|
|
1360
|
-
|
|
1361
|
-
|
|
1362
|
-
|
|
1217
|
+
const providerMetadata = { openai: {} };
|
|
1218
|
+
const usage = {
|
|
1219
|
+
inputTokens: void 0,
|
|
1220
|
+
outputTokens: void 0,
|
|
1221
|
+
totalTokens: void 0
|
|
1363
1222
|
};
|
|
1364
|
-
let logprobs;
|
|
1365
1223
|
let isFirstChunk = true;
|
|
1366
1224
|
return {
|
|
1367
1225
|
stream: response.pipeThrough(
|
|
1368
1226
|
new TransformStream({
|
|
1227
|
+
start(controller) {
|
|
1228
|
+
controller.enqueue({ type: "stream-start", warnings });
|
|
1229
|
+
},
|
|
1369
1230
|
transform(chunk, controller) {
|
|
1370
1231
|
if (!chunk.success) {
|
|
1371
1232
|
finishReason = "error";
|
|
@@ -1386,127 +1247,140 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1386
1247
|
});
|
|
1387
1248
|
}
|
|
1388
1249
|
if (value.usage != null) {
|
|
1389
|
-
usage =
|
|
1390
|
-
|
|
1391
|
-
|
|
1392
|
-
};
|
|
1250
|
+
usage.inputTokens = value.usage.prompt_tokens;
|
|
1251
|
+
usage.outputTokens = value.usage.completion_tokens;
|
|
1252
|
+
usage.totalTokens = value.usage.total_tokens;
|
|
1393
1253
|
}
|
|
1394
1254
|
const choice = value.choices[0];
|
|
1395
1255
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
1396
1256
|
finishReason = mapOpenAIFinishReason(choice.finish_reason);
|
|
1397
1257
|
}
|
|
1258
|
+
if ((choice == null ? void 0 : choice.logprobs) != null) {
|
|
1259
|
+
providerMetadata.openai.logprobs = choice.logprobs;
|
|
1260
|
+
}
|
|
1398
1261
|
if ((choice == null ? void 0 : choice.text) != null) {
|
|
1399
1262
|
controller.enqueue({
|
|
1400
|
-
type: "text
|
|
1401
|
-
|
|
1263
|
+
type: "text",
|
|
1264
|
+
text: choice.text
|
|
1402
1265
|
});
|
|
1403
1266
|
}
|
|
1404
|
-
const mappedLogprobs = mapOpenAICompletionLogProbs(
|
|
1405
|
-
choice == null ? void 0 : choice.logprobs
|
|
1406
|
-
);
|
|
1407
|
-
if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
|
|
1408
|
-
if (logprobs === void 0) logprobs = [];
|
|
1409
|
-
logprobs.push(...mappedLogprobs);
|
|
1410
|
-
}
|
|
1411
1267
|
},
|
|
1412
1268
|
flush(controller) {
|
|
1413
1269
|
controller.enqueue({
|
|
1414
1270
|
type: "finish",
|
|
1415
1271
|
finishReason,
|
|
1416
|
-
|
|
1272
|
+
providerMetadata,
|
|
1417
1273
|
usage
|
|
1418
1274
|
});
|
|
1419
1275
|
}
|
|
1420
1276
|
})
|
|
1421
1277
|
),
|
|
1422
|
-
|
|
1423
|
-
|
|
1424
|
-
warnings,
|
|
1425
|
-
request: { body: JSON.stringify(body) }
|
|
1278
|
+
request: { body },
|
|
1279
|
+
response: { headers: responseHeaders }
|
|
1426
1280
|
};
|
|
1427
1281
|
}
|
|
1428
1282
|
};
|
|
1429
|
-
var
|
|
1430
|
-
|
|
1431
|
-
|
|
1432
|
-
|
|
1433
|
-
|
|
1434
|
-
|
|
1435
|
-
|
|
1436
|
-
|
|
1437
|
-
|
|
1438
|
-
|
|
1439
|
-
|
|
1440
|
-
|
|
1283
|
+
var usageSchema = import_zod5.z.object({
|
|
1284
|
+
prompt_tokens: import_zod5.z.number(),
|
|
1285
|
+
completion_tokens: import_zod5.z.number(),
|
|
1286
|
+
total_tokens: import_zod5.z.number()
|
|
1287
|
+
});
|
|
1288
|
+
var openaiCompletionResponseSchema = import_zod5.z.object({
|
|
1289
|
+
id: import_zod5.z.string().nullish(),
|
|
1290
|
+
created: import_zod5.z.number().nullish(),
|
|
1291
|
+
model: import_zod5.z.string().nullish(),
|
|
1292
|
+
choices: import_zod5.z.array(
|
|
1293
|
+
import_zod5.z.object({
|
|
1294
|
+
text: import_zod5.z.string(),
|
|
1295
|
+
finish_reason: import_zod5.z.string(),
|
|
1296
|
+
logprobs: import_zod5.z.object({
|
|
1297
|
+
tokens: import_zod5.z.array(import_zod5.z.string()),
|
|
1298
|
+
token_logprobs: import_zod5.z.array(import_zod5.z.number()),
|
|
1299
|
+
top_logprobs: import_zod5.z.array(import_zod5.z.record(import_zod5.z.string(), import_zod5.z.number())).nullish()
|
|
1441
1300
|
}).nullish()
|
|
1442
1301
|
})
|
|
1443
1302
|
),
|
|
1444
|
-
usage:
|
|
1445
|
-
prompt_tokens: import_zod3.z.number(),
|
|
1446
|
-
completion_tokens: import_zod3.z.number()
|
|
1447
|
-
})
|
|
1303
|
+
usage: usageSchema.nullish()
|
|
1448
1304
|
});
|
|
1449
|
-
var openaiCompletionChunkSchema =
|
|
1450
|
-
|
|
1451
|
-
id:
|
|
1452
|
-
created:
|
|
1453
|
-
model:
|
|
1454
|
-
choices:
|
|
1455
|
-
|
|
1456
|
-
text:
|
|
1457
|
-
finish_reason:
|
|
1458
|
-
index:
|
|
1459
|
-
logprobs:
|
|
1460
|
-
tokens:
|
|
1461
|
-
token_logprobs:
|
|
1462
|
-
top_logprobs:
|
|
1305
|
+
var openaiCompletionChunkSchema = import_zod5.z.union([
|
|
1306
|
+
import_zod5.z.object({
|
|
1307
|
+
id: import_zod5.z.string().nullish(),
|
|
1308
|
+
created: import_zod5.z.number().nullish(),
|
|
1309
|
+
model: import_zod5.z.string().nullish(),
|
|
1310
|
+
choices: import_zod5.z.array(
|
|
1311
|
+
import_zod5.z.object({
|
|
1312
|
+
text: import_zod5.z.string(),
|
|
1313
|
+
finish_reason: import_zod5.z.string().nullish(),
|
|
1314
|
+
index: import_zod5.z.number(),
|
|
1315
|
+
logprobs: import_zod5.z.object({
|
|
1316
|
+
tokens: import_zod5.z.array(import_zod5.z.string()),
|
|
1317
|
+
token_logprobs: import_zod5.z.array(import_zod5.z.number()),
|
|
1318
|
+
top_logprobs: import_zod5.z.array(import_zod5.z.record(import_zod5.z.string(), import_zod5.z.number())).nullish()
|
|
1463
1319
|
}).nullish()
|
|
1464
1320
|
})
|
|
1465
1321
|
),
|
|
1466
|
-
usage:
|
|
1467
|
-
prompt_tokens: import_zod3.z.number(),
|
|
1468
|
-
completion_tokens: import_zod3.z.number()
|
|
1469
|
-
}).nullish()
|
|
1322
|
+
usage: usageSchema.nullish()
|
|
1470
1323
|
}),
|
|
1471
1324
|
openaiErrorDataSchema
|
|
1472
1325
|
]);
|
|
1473
1326
|
|
|
1474
1327
|
// src/openai-embedding-model.ts
|
|
1475
|
-
var
|
|
1328
|
+
var import_provider5 = require("@ai-sdk/provider");
|
|
1476
1329
|
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
|
1477
|
-
var
|
|
1330
|
+
var import_zod7 = require("zod");
|
|
1331
|
+
|
|
1332
|
+
// src/openai-embedding-options.ts
|
|
1333
|
+
var import_zod6 = require("zod");
|
|
1334
|
+
var openaiEmbeddingProviderOptions = import_zod6.z.object({
|
|
1335
|
+
/**
|
|
1336
|
+
The number of dimensions the resulting output embeddings should have.
|
|
1337
|
+
Only supported in text-embedding-3 and later models.
|
|
1338
|
+
*/
|
|
1339
|
+
dimensions: import_zod6.z.number().optional(),
|
|
1340
|
+
/**
|
|
1341
|
+
A unique identifier representing your end-user, which can help OpenAI to
|
|
1342
|
+
monitor and detect abuse. Learn more.
|
|
1343
|
+
*/
|
|
1344
|
+
user: import_zod6.z.string().optional()
|
|
1345
|
+
});
|
|
1346
|
+
|
|
1347
|
+
// src/openai-embedding-model.ts
|
|
1478
1348
|
var OpenAIEmbeddingModel = class {
|
|
1479
|
-
constructor(modelId,
|
|
1480
|
-
this.specificationVersion = "
|
|
1349
|
+
constructor(modelId, config) {
|
|
1350
|
+
this.specificationVersion = "v2";
|
|
1351
|
+
this.maxEmbeddingsPerCall = 2048;
|
|
1352
|
+
this.supportsParallelCalls = true;
|
|
1481
1353
|
this.modelId = modelId;
|
|
1482
|
-
this.settings = settings;
|
|
1483
1354
|
this.config = config;
|
|
1484
1355
|
}
|
|
1485
1356
|
get provider() {
|
|
1486
1357
|
return this.config.provider;
|
|
1487
1358
|
}
|
|
1488
|
-
get maxEmbeddingsPerCall() {
|
|
1489
|
-
var _a;
|
|
1490
|
-
return (_a = this.settings.maxEmbeddingsPerCall) != null ? _a : 2048;
|
|
1491
|
-
}
|
|
1492
|
-
get supportsParallelCalls() {
|
|
1493
|
-
var _a;
|
|
1494
|
-
return (_a = this.settings.supportsParallelCalls) != null ? _a : true;
|
|
1495
|
-
}
|
|
1496
1359
|
async doEmbed({
|
|
1497
1360
|
values,
|
|
1498
1361
|
headers,
|
|
1499
|
-
abortSignal
|
|
1362
|
+
abortSignal,
|
|
1363
|
+
providerOptions
|
|
1500
1364
|
}) {
|
|
1365
|
+
var _a;
|
|
1501
1366
|
if (values.length > this.maxEmbeddingsPerCall) {
|
|
1502
|
-
throw new
|
|
1367
|
+
throw new import_provider5.TooManyEmbeddingValuesForCallError({
|
|
1503
1368
|
provider: this.provider,
|
|
1504
1369
|
modelId: this.modelId,
|
|
1505
1370
|
maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
|
|
1506
1371
|
values
|
|
1507
1372
|
});
|
|
1508
1373
|
}
|
|
1509
|
-
const
|
|
1374
|
+
const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({
|
|
1375
|
+
provider: "openai",
|
|
1376
|
+
providerOptions,
|
|
1377
|
+
schema: openaiEmbeddingProviderOptions
|
|
1378
|
+
})) != null ? _a : {};
|
|
1379
|
+
const {
|
|
1380
|
+
responseHeaders,
|
|
1381
|
+
value: response,
|
|
1382
|
+
rawValue
|
|
1383
|
+
} = await (0, import_provider_utils5.postJsonToApi)({
|
|
1510
1384
|
url: this.config.url({
|
|
1511
1385
|
path: "/embeddings",
|
|
1512
1386
|
modelId: this.modelId
|
|
@@ -1516,8 +1390,8 @@ var OpenAIEmbeddingModel = class {
|
|
|
1516
1390
|
model: this.modelId,
|
|
1517
1391
|
input: values,
|
|
1518
1392
|
encoding_format: "float",
|
|
1519
|
-
dimensions:
|
|
1520
|
-
user:
|
|
1393
|
+
dimensions: openaiOptions.dimensions,
|
|
1394
|
+
user: openaiOptions.user
|
|
1521
1395
|
},
|
|
1522
1396
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1523
1397
|
successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
|
|
@@ -1529,18 +1403,18 @@ var OpenAIEmbeddingModel = class {
|
|
|
1529
1403
|
return {
|
|
1530
1404
|
embeddings: response.data.map((item) => item.embedding),
|
|
1531
1405
|
usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
|
|
1532
|
-
|
|
1406
|
+
response: { headers: responseHeaders, body: rawValue }
|
|
1533
1407
|
};
|
|
1534
1408
|
}
|
|
1535
1409
|
};
|
|
1536
|
-
var openaiTextEmbeddingResponseSchema =
|
|
1537
|
-
data:
|
|
1538
|
-
usage:
|
|
1410
|
+
var openaiTextEmbeddingResponseSchema = import_zod7.z.object({
|
|
1411
|
+
data: import_zod7.z.array(import_zod7.z.object({ embedding: import_zod7.z.array(import_zod7.z.number()) })),
|
|
1412
|
+
usage: import_zod7.z.object({ prompt_tokens: import_zod7.z.number() }).nullish()
|
|
1539
1413
|
});
|
|
1540
1414
|
|
|
1541
1415
|
// src/openai-image-model.ts
|
|
1542
1416
|
var import_provider_utils6 = require("@ai-sdk/provider-utils");
|
|
1543
|
-
var
|
|
1417
|
+
var import_zod8 = require("zod");
|
|
1544
1418
|
|
|
1545
1419
|
// src/openai-image-settings.ts
|
|
1546
1420
|
var modelMaxImagesPerCall = {
|
|
@@ -1552,15 +1426,14 @@ var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
|
|
|
1552
1426
|
|
|
1553
1427
|
// src/openai-image-model.ts
|
|
1554
1428
|
var OpenAIImageModel = class {
|
|
1555
|
-
constructor(modelId,
|
|
1429
|
+
constructor(modelId, config) {
|
|
1556
1430
|
this.modelId = modelId;
|
|
1557
|
-
this.settings = settings;
|
|
1558
1431
|
this.config = config;
|
|
1559
|
-
this.specificationVersion = "
|
|
1432
|
+
this.specificationVersion = "v2";
|
|
1560
1433
|
}
|
|
1561
1434
|
get maxImagesPerCall() {
|
|
1562
|
-
var _a
|
|
1563
|
-
return (
|
|
1435
|
+
var _a;
|
|
1436
|
+
return (_a = modelMaxImagesPerCall[this.modelId]) != null ? _a : 1;
|
|
1564
1437
|
}
|
|
1565
1438
|
get provider() {
|
|
1566
1439
|
return this.config.provider;
|
|
@@ -1616,24 +1489,78 @@ var OpenAIImageModel = class {
|
|
|
1616
1489
|
timestamp: currentDate,
|
|
1617
1490
|
modelId: this.modelId,
|
|
1618
1491
|
headers: responseHeaders
|
|
1492
|
+
},
|
|
1493
|
+
providerMetadata: {
|
|
1494
|
+
openai: {
|
|
1495
|
+
images: response.data.map(
|
|
1496
|
+
(item) => item.revised_prompt ? {
|
|
1497
|
+
revisedPrompt: item.revised_prompt
|
|
1498
|
+
} : null
|
|
1499
|
+
)
|
|
1500
|
+
}
|
|
1619
1501
|
}
|
|
1620
1502
|
};
|
|
1621
1503
|
}
|
|
1622
1504
|
};
|
|
1623
|
-
var openaiImageResponseSchema =
|
|
1624
|
-
data:
|
|
1505
|
+
var openaiImageResponseSchema = import_zod8.z.object({
|
|
1506
|
+
data: import_zod8.z.array(
|
|
1507
|
+
import_zod8.z.object({ b64_json: import_zod8.z.string(), revised_prompt: import_zod8.z.string().optional() })
|
|
1508
|
+
)
|
|
1625
1509
|
});
|
|
1626
1510
|
|
|
1511
|
+
// src/openai-tools.ts
|
|
1512
|
+
var import_zod9 = require("zod");
|
|
1513
|
+
var WebSearchPreviewParameters = import_zod9.z.object({});
|
|
1514
|
+
function webSearchPreviewTool({
|
|
1515
|
+
searchContextSize,
|
|
1516
|
+
userLocation
|
|
1517
|
+
} = {}) {
|
|
1518
|
+
return {
|
|
1519
|
+
type: "provider-defined",
|
|
1520
|
+
id: "openai.web_search_preview",
|
|
1521
|
+
args: {
|
|
1522
|
+
searchContextSize,
|
|
1523
|
+
userLocation
|
|
1524
|
+
},
|
|
1525
|
+
parameters: WebSearchPreviewParameters
|
|
1526
|
+
};
|
|
1527
|
+
}
|
|
1528
|
+
var openaiTools = {
|
|
1529
|
+
webSearchPreview: webSearchPreviewTool
|
|
1530
|
+
};
|
|
1531
|
+
|
|
1627
1532
|
// src/openai-transcription-model.ts
|
|
1628
1533
|
var import_provider_utils7 = require("@ai-sdk/provider-utils");
|
|
1629
|
-
var
|
|
1630
|
-
|
|
1631
|
-
|
|
1632
|
-
|
|
1633
|
-
|
|
1634
|
-
|
|
1635
|
-
|
|
1534
|
+
var import_zod11 = require("zod");
|
|
1535
|
+
|
|
1536
|
+
// src/openai-transcription-options.ts
|
|
1537
|
+
var import_zod10 = require("zod");
|
|
1538
|
+
var openAITranscriptionProviderOptions = import_zod10.z.object({
|
|
1539
|
+
/**
|
|
1540
|
+
* Additional information to include in the transcription response.
|
|
1541
|
+
*/
|
|
1542
|
+
include: import_zod10.z.array(import_zod10.z.string()).optional(),
|
|
1543
|
+
/**
|
|
1544
|
+
* The language of the input audio in ISO-639-1 format.
|
|
1545
|
+
*/
|
|
1546
|
+
language: import_zod10.z.string().optional(),
|
|
1547
|
+
/**
|
|
1548
|
+
* An optional text to guide the model's style or continue a previous audio segment.
|
|
1549
|
+
*/
|
|
1550
|
+
prompt: import_zod10.z.string().optional(),
|
|
1551
|
+
/**
|
|
1552
|
+
* The sampling temperature, between 0 and 1.
|
|
1553
|
+
* @default 0
|
|
1554
|
+
*/
|
|
1555
|
+
temperature: import_zod10.z.number().min(0).max(1).default(0).optional(),
|
|
1556
|
+
/**
|
|
1557
|
+
* The timestamp granularities to populate for this transcription.
|
|
1558
|
+
* @default ['segment']
|
|
1559
|
+
*/
|
|
1560
|
+
timestampGranularities: import_zod10.z.array(import_zod10.z.enum(["word", "segment"])).default(["segment"]).optional()
|
|
1636
1561
|
});
|
|
1562
|
+
|
|
1563
|
+
// src/openai-transcription-model.ts
|
|
1637
1564
|
var languageMap = {
|
|
1638
1565
|
afrikaans: "af",
|
|
1639
1566
|
arabic: "ar",
|
|
@@ -1702,17 +1629,16 @@ var OpenAITranscriptionModel = class {
|
|
|
1702
1629
|
get provider() {
|
|
1703
1630
|
return this.config.provider;
|
|
1704
1631
|
}
|
|
1705
|
-
getArgs({
|
|
1632
|
+
async getArgs({
|
|
1706
1633
|
audio,
|
|
1707
1634
|
mediaType,
|
|
1708
1635
|
providerOptions
|
|
1709
1636
|
}) {
|
|
1710
|
-
var _a, _b, _c, _d, _e;
|
|
1711
1637
|
const warnings = [];
|
|
1712
|
-
const openAIOptions = (0, import_provider_utils7.parseProviderOptions)({
|
|
1638
|
+
const openAIOptions = await (0, import_provider_utils7.parseProviderOptions)({
|
|
1713
1639
|
provider: "openai",
|
|
1714
1640
|
providerOptions,
|
|
1715
|
-
schema:
|
|
1641
|
+
schema: openAITranscriptionProviderOptions
|
|
1716
1642
|
});
|
|
1717
1643
|
const formData = new FormData();
|
|
1718
1644
|
const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils7.convertBase64ToUint8Array)(audio)]);
|
|
@@ -1720,15 +1646,14 @@ var OpenAITranscriptionModel = class {
|
|
|
1720
1646
|
formData.append("file", new File([blob], "audio", { type: mediaType }));
|
|
1721
1647
|
if (openAIOptions) {
|
|
1722
1648
|
const transcriptionModelOptions = {
|
|
1723
|
-
include:
|
|
1724
|
-
language:
|
|
1725
|
-
prompt:
|
|
1726
|
-
temperature:
|
|
1727
|
-
timestamp_granularities:
|
|
1649
|
+
include: openAIOptions.include,
|
|
1650
|
+
language: openAIOptions.language,
|
|
1651
|
+
prompt: openAIOptions.prompt,
|
|
1652
|
+
temperature: openAIOptions.temperature,
|
|
1653
|
+
timestamp_granularities: openAIOptions.timestampGranularities
|
|
1728
1654
|
};
|
|
1729
|
-
for (const key
|
|
1730
|
-
|
|
1731
|
-
if (value !== void 0) {
|
|
1655
|
+
for (const [key, value] of Object.entries(transcriptionModelOptions)) {
|
|
1656
|
+
if (value != null) {
|
|
1732
1657
|
formData.append(key, String(value));
|
|
1733
1658
|
}
|
|
1734
1659
|
}
|
|
@@ -1741,7 +1666,7 @@ var OpenAITranscriptionModel = class {
|
|
|
1741
1666
|
async doGenerate(options) {
|
|
1742
1667
|
var _a, _b, _c, _d, _e, _f;
|
|
1743
1668
|
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
1744
|
-
const { formData, warnings } = this.getArgs(options);
|
|
1669
|
+
const { formData, warnings } = await this.getArgs(options);
|
|
1745
1670
|
const {
|
|
1746
1671
|
value: response,
|
|
1747
1672
|
responseHeaders,
|
|
@@ -1780,26 +1705,25 @@ var OpenAITranscriptionModel = class {
|
|
|
1780
1705
|
};
|
|
1781
1706
|
}
|
|
1782
1707
|
};
|
|
1783
|
-
var openaiTranscriptionResponseSchema =
|
|
1784
|
-
text:
|
|
1785
|
-
language:
|
|
1786
|
-
duration:
|
|
1787
|
-
words:
|
|
1788
|
-
|
|
1789
|
-
word:
|
|
1790
|
-
start:
|
|
1791
|
-
end:
|
|
1708
|
+
var openaiTranscriptionResponseSchema = import_zod11.z.object({
|
|
1709
|
+
text: import_zod11.z.string(),
|
|
1710
|
+
language: import_zod11.z.string().nullish(),
|
|
1711
|
+
duration: import_zod11.z.number().nullish(),
|
|
1712
|
+
words: import_zod11.z.array(
|
|
1713
|
+
import_zod11.z.object({
|
|
1714
|
+
word: import_zod11.z.string(),
|
|
1715
|
+
start: import_zod11.z.number(),
|
|
1716
|
+
end: import_zod11.z.number()
|
|
1792
1717
|
})
|
|
1793
1718
|
).nullish()
|
|
1794
1719
|
});
|
|
1795
1720
|
|
|
1796
1721
|
// src/responses/openai-responses-language-model.ts
|
|
1797
|
-
var
|
|
1798
|
-
var
|
|
1722
|
+
var import_provider_utils8 = require("@ai-sdk/provider-utils");
|
|
1723
|
+
var import_zod12 = require("zod");
|
|
1799
1724
|
|
|
1800
1725
|
// src/responses/convert-to-openai-responses-messages.ts
|
|
1801
|
-
var
|
|
1802
|
-
var import_provider_utils8 = require("@ai-sdk/provider-utils");
|
|
1726
|
+
var import_provider6 = require("@ai-sdk/provider");
|
|
1803
1727
|
function convertToOpenAIResponsesMessages({
|
|
1804
1728
|
prompt,
|
|
1805
1729
|
systemMessageMode
|
|
@@ -1838,38 +1762,35 @@ function convertToOpenAIResponsesMessages({
|
|
|
1838
1762
|
messages.push({
|
|
1839
1763
|
role: "user",
|
|
1840
1764
|
content: content.map((part, index) => {
|
|
1841
|
-
var _a, _b, _c
|
|
1765
|
+
var _a, _b, _c;
|
|
1842
1766
|
switch (part.type) {
|
|
1843
1767
|
case "text": {
|
|
1844
1768
|
return { type: "input_text", text: part.text };
|
|
1845
1769
|
}
|
|
1846
|
-
case "image": {
|
|
1847
|
-
return {
|
|
1848
|
-
type: "input_image",
|
|
1849
|
-
image_url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${(0, import_provider_utils8.convertUint8ArrayToBase64)(part.image)}`,
|
|
1850
|
-
// OpenAI specific extension: image detail
|
|
1851
|
-
detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
|
|
1852
|
-
};
|
|
1853
|
-
}
|
|
1854
1770
|
case "file": {
|
|
1855
|
-
if (part.
|
|
1856
|
-
|
|
1857
|
-
|
|
1858
|
-
|
|
1859
|
-
|
|
1860
|
-
|
|
1861
|
-
|
|
1862
|
-
|
|
1863
|
-
|
|
1864
|
-
|
|
1865
|
-
|
|
1866
|
-
|
|
1867
|
-
}
|
|
1868
|
-
default: {
|
|
1869
|
-
throw new import_provider7.UnsupportedFunctionalityError({
|
|
1870
|
-
functionality: "Only PDF files are supported in user messages"
|
|
1771
|
+
if (part.mediaType.startsWith("image/")) {
|
|
1772
|
+
const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
|
|
1773
|
+
return {
|
|
1774
|
+
type: "input_image",
|
|
1775
|
+
image_url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
|
|
1776
|
+
// OpenAI specific extension: image detail
|
|
1777
|
+
detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
|
|
1778
|
+
};
|
|
1779
|
+
} else if (part.mediaType === "application/pdf") {
|
|
1780
|
+
if (part.data instanceof URL) {
|
|
1781
|
+
throw new import_provider6.UnsupportedFunctionalityError({
|
|
1782
|
+
functionality: "PDF file parts with URLs"
|
|
1871
1783
|
});
|
|
1872
1784
|
}
|
|
1785
|
+
return {
|
|
1786
|
+
type: "input_file",
|
|
1787
|
+
filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
|
|
1788
|
+
file_data: `data:application/pdf;base64,${part.data}`
|
|
1789
|
+
};
|
|
1790
|
+
} else {
|
|
1791
|
+
throw new import_provider6.UnsupportedFunctionalityError({
|
|
1792
|
+
functionality: `file part media type ${part.mediaType}`
|
|
1793
|
+
});
|
|
1873
1794
|
}
|
|
1874
1795
|
}
|
|
1875
1796
|
}
|
|
@@ -1938,18 +1859,17 @@ function mapOpenAIResponseFinishReason({
|
|
|
1938
1859
|
}
|
|
1939
1860
|
|
|
1940
1861
|
// src/responses/openai-responses-prepare-tools.ts
|
|
1941
|
-
var
|
|
1862
|
+
var import_provider7 = require("@ai-sdk/provider");
|
|
1942
1863
|
function prepareResponsesTools({
|
|
1943
|
-
|
|
1864
|
+
tools,
|
|
1865
|
+
toolChoice,
|
|
1944
1866
|
strict
|
|
1945
1867
|
}) {
|
|
1946
|
-
|
|
1947
|
-
const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
|
|
1868
|
+
tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
|
|
1948
1869
|
const toolWarnings = [];
|
|
1949
1870
|
if (tools == null) {
|
|
1950
|
-
return { tools: void 0,
|
|
1871
|
+
return { tools: void 0, toolChoice: void 0, toolWarnings };
|
|
1951
1872
|
}
|
|
1952
|
-
const toolChoice = mode.toolChoice;
|
|
1953
1873
|
const openaiTools2 = [];
|
|
1954
1874
|
for (const tool of tools) {
|
|
1955
1875
|
switch (tool.type) {
|
|
@@ -1982,37 +1902,24 @@ function prepareResponsesTools({
|
|
|
1982
1902
|
}
|
|
1983
1903
|
}
|
|
1984
1904
|
if (toolChoice == null) {
|
|
1985
|
-
return { tools: openaiTools2,
|
|
1905
|
+
return { tools: openaiTools2, toolChoice: void 0, toolWarnings };
|
|
1986
1906
|
}
|
|
1987
1907
|
const type = toolChoice.type;
|
|
1988
1908
|
switch (type) {
|
|
1989
1909
|
case "auto":
|
|
1990
1910
|
case "none":
|
|
1991
1911
|
case "required":
|
|
1992
|
-
return { tools: openaiTools2,
|
|
1993
|
-
case "tool":
|
|
1994
|
-
if (toolChoice.toolName === "web_search_preview") {
|
|
1995
|
-
return {
|
|
1996
|
-
tools: openaiTools2,
|
|
1997
|
-
tool_choice: {
|
|
1998
|
-
type: "web_search_preview"
|
|
1999
|
-
},
|
|
2000
|
-
toolWarnings
|
|
2001
|
-
};
|
|
2002
|
-
}
|
|
1912
|
+
return { tools: openaiTools2, toolChoice: type, toolWarnings };
|
|
1913
|
+
case "tool":
|
|
2003
1914
|
return {
|
|
2004
1915
|
tools: openaiTools2,
|
|
2005
|
-
|
|
2006
|
-
type: "function",
|
|
2007
|
-
name: toolChoice.toolName
|
|
2008
|
-
},
|
|
1916
|
+
toolChoice: toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
|
|
2009
1917
|
toolWarnings
|
|
2010
1918
|
};
|
|
2011
|
-
}
|
|
2012
1919
|
default: {
|
|
2013
1920
|
const _exhaustiveCheck = type;
|
|
2014
|
-
throw new
|
|
2015
|
-
functionality: `
|
|
1921
|
+
throw new import_provider7.UnsupportedFunctionalityError({
|
|
1922
|
+
functionality: `tool choice type: ${_exhaustiveCheck}`
|
|
2016
1923
|
});
|
|
2017
1924
|
}
|
|
2018
1925
|
}
|
|
@@ -2021,18 +1928,18 @@ function prepareResponsesTools({
|
|
|
2021
1928
|
// src/responses/openai-responses-language-model.ts
|
|
2022
1929
|
var OpenAIResponsesLanguageModel = class {
|
|
2023
1930
|
constructor(modelId, config) {
|
|
2024
|
-
this.specificationVersion = "
|
|
2025
|
-
this.
|
|
2026
|
-
|
|
1931
|
+
this.specificationVersion = "v2";
|
|
1932
|
+
this.supportedUrls = {
|
|
1933
|
+
"image/*": [/^https?:\/\/.*$/]
|
|
1934
|
+
};
|
|
2027
1935
|
this.modelId = modelId;
|
|
2028
1936
|
this.config = config;
|
|
2029
1937
|
}
|
|
2030
1938
|
get provider() {
|
|
2031
1939
|
return this.config.provider;
|
|
2032
1940
|
}
|
|
2033
|
-
getArgs({
|
|
2034
|
-
|
|
2035
|
-
maxTokens,
|
|
1941
|
+
async getArgs({
|
|
1942
|
+
maxOutputTokens,
|
|
2036
1943
|
temperature,
|
|
2037
1944
|
stopSequences,
|
|
2038
1945
|
topP,
|
|
@@ -2041,24 +1948,19 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2041
1948
|
frequencyPenalty,
|
|
2042
1949
|
seed,
|
|
2043
1950
|
prompt,
|
|
2044
|
-
|
|
1951
|
+
providerOptions,
|
|
1952
|
+
tools,
|
|
1953
|
+
toolChoice,
|
|
2045
1954
|
responseFormat
|
|
2046
1955
|
}) {
|
|
2047
|
-
var _a, _b
|
|
1956
|
+
var _a, _b;
|
|
2048
1957
|
const warnings = [];
|
|
2049
1958
|
const modelConfig = getResponsesModelConfig(this.modelId);
|
|
2050
|
-
const type = mode.type;
|
|
2051
1959
|
if (topK != null) {
|
|
2052
|
-
warnings.push({
|
|
2053
|
-
type: "unsupported-setting",
|
|
2054
|
-
setting: "topK"
|
|
2055
|
-
});
|
|
1960
|
+
warnings.push({ type: "unsupported-setting", setting: "topK" });
|
|
2056
1961
|
}
|
|
2057
1962
|
if (seed != null) {
|
|
2058
|
-
warnings.push({
|
|
2059
|
-
type: "unsupported-setting",
|
|
2060
|
-
setting: "seed"
|
|
2061
|
-
});
|
|
1963
|
+
warnings.push({ type: "unsupported-setting", setting: "seed" });
|
|
2062
1964
|
}
|
|
2063
1965
|
if (presencePenalty != null) {
|
|
2064
1966
|
warnings.push({
|
|
@@ -2073,19 +1975,16 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2073
1975
|
});
|
|
2074
1976
|
}
|
|
2075
1977
|
if (stopSequences != null) {
|
|
2076
|
-
warnings.push({
|
|
2077
|
-
type: "unsupported-setting",
|
|
2078
|
-
setting: "stopSequences"
|
|
2079
|
-
});
|
|
1978
|
+
warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
|
|
2080
1979
|
}
|
|
2081
1980
|
const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
|
|
2082
1981
|
prompt,
|
|
2083
1982
|
systemMessageMode: modelConfig.systemMessageMode
|
|
2084
1983
|
});
|
|
2085
1984
|
warnings.push(...messageWarnings);
|
|
2086
|
-
const openaiOptions = (0,
|
|
1985
|
+
const openaiOptions = await (0, import_provider_utils8.parseProviderOptions)({
|
|
2087
1986
|
provider: "openai",
|
|
2088
|
-
providerOptions
|
|
1987
|
+
providerOptions,
|
|
2089
1988
|
schema: openaiResponsesProviderOptionsSchema
|
|
2090
1989
|
});
|
|
2091
1990
|
const isStrict = (_a = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a : true;
|
|
@@ -2094,7 +1993,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2094
1993
|
input: messages,
|
|
2095
1994
|
temperature,
|
|
2096
1995
|
top_p: topP,
|
|
2097
|
-
max_output_tokens:
|
|
1996
|
+
max_output_tokens: maxOutputTokens,
|
|
2098
1997
|
...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
|
|
2099
1998
|
text: {
|
|
2100
1999
|
format: responseFormat.schema != null ? {
|
|
@@ -2146,208 +2045,178 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2146
2045
|
});
|
|
2147
2046
|
}
|
|
2148
2047
|
}
|
|
2149
|
-
|
|
2150
|
-
|
|
2151
|
-
|
|
2152
|
-
|
|
2153
|
-
|
|
2154
|
-
|
|
2155
|
-
|
|
2156
|
-
|
|
2157
|
-
|
|
2158
|
-
|
|
2159
|
-
|
|
2160
|
-
|
|
2161
|
-
|
|
2162
|
-
|
|
2163
|
-
|
|
2164
|
-
|
|
2165
|
-
|
|
2166
|
-
return {
|
|
2167
|
-
args: {
|
|
2168
|
-
...baseArgs,
|
|
2169
|
-
text: {
|
|
2170
|
-
format: mode.schema != null ? {
|
|
2171
|
-
type: "json_schema",
|
|
2172
|
-
strict: isStrict,
|
|
2173
|
-
name: (_c = mode.name) != null ? _c : "response",
|
|
2174
|
-
description: mode.description,
|
|
2175
|
-
schema: mode.schema
|
|
2176
|
-
} : { type: "json_object" }
|
|
2177
|
-
}
|
|
2178
|
-
},
|
|
2179
|
-
warnings
|
|
2180
|
-
};
|
|
2181
|
-
}
|
|
2182
|
-
case "object-tool": {
|
|
2183
|
-
return {
|
|
2184
|
-
args: {
|
|
2185
|
-
...baseArgs,
|
|
2186
|
-
tool_choice: { type: "function", name: mode.tool.name },
|
|
2187
|
-
tools: [
|
|
2188
|
-
{
|
|
2189
|
-
type: "function",
|
|
2190
|
-
name: mode.tool.name,
|
|
2191
|
-
description: mode.tool.description,
|
|
2192
|
-
parameters: mode.tool.parameters,
|
|
2193
|
-
strict: isStrict
|
|
2194
|
-
}
|
|
2195
|
-
]
|
|
2196
|
-
},
|
|
2197
|
-
warnings
|
|
2198
|
-
};
|
|
2199
|
-
}
|
|
2200
|
-
default: {
|
|
2201
|
-
const _exhaustiveCheck = type;
|
|
2202
|
-
throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
|
|
2203
|
-
}
|
|
2204
|
-
}
|
|
2048
|
+
const {
|
|
2049
|
+
tools: openaiTools2,
|
|
2050
|
+
toolChoice: openaiToolChoice,
|
|
2051
|
+
toolWarnings
|
|
2052
|
+
} = prepareResponsesTools({
|
|
2053
|
+
tools,
|
|
2054
|
+
toolChoice,
|
|
2055
|
+
strict: isStrict
|
|
2056
|
+
});
|
|
2057
|
+
return {
|
|
2058
|
+
args: {
|
|
2059
|
+
...baseArgs,
|
|
2060
|
+
tools: openaiTools2,
|
|
2061
|
+
tool_choice: openaiToolChoice
|
|
2062
|
+
},
|
|
2063
|
+
warnings: [...warnings, ...toolWarnings]
|
|
2064
|
+
};
|
|
2205
2065
|
}
|
|
2206
2066
|
async doGenerate(options) {
|
|
2207
|
-
var _a, _b, _c, _d, _e, _f, _g;
|
|
2208
|
-
const { args: body, warnings } = this.getArgs(options);
|
|
2067
|
+
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
2068
|
+
const { args: body, warnings } = await this.getArgs(options);
|
|
2209
2069
|
const {
|
|
2210
2070
|
responseHeaders,
|
|
2211
2071
|
value: response,
|
|
2212
2072
|
rawValue: rawResponse
|
|
2213
|
-
} = await (0,
|
|
2073
|
+
} = await (0, import_provider_utils8.postJsonToApi)({
|
|
2214
2074
|
url: this.config.url({
|
|
2215
2075
|
path: "/responses",
|
|
2216
2076
|
modelId: this.modelId
|
|
2217
2077
|
}),
|
|
2218
|
-
headers: (0,
|
|
2078
|
+
headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
|
|
2219
2079
|
body,
|
|
2220
2080
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
2221
|
-
successfulResponseHandler: (0,
|
|
2222
|
-
|
|
2223
|
-
id:
|
|
2224
|
-
created_at:
|
|
2225
|
-
model:
|
|
2226
|
-
output:
|
|
2227
|
-
|
|
2228
|
-
|
|
2229
|
-
type:
|
|
2230
|
-
role:
|
|
2231
|
-
content:
|
|
2232
|
-
|
|
2233
|
-
type:
|
|
2234
|
-
text:
|
|
2235
|
-
annotations:
|
|
2236
|
-
|
|
2237
|
-
type:
|
|
2238
|
-
start_index:
|
|
2239
|
-
end_index:
|
|
2240
|
-
url:
|
|
2241
|
-
title:
|
|
2081
|
+
successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
|
|
2082
|
+
import_zod12.z.object({
|
|
2083
|
+
id: import_zod12.z.string(),
|
|
2084
|
+
created_at: import_zod12.z.number(),
|
|
2085
|
+
model: import_zod12.z.string(),
|
|
2086
|
+
output: import_zod12.z.array(
|
|
2087
|
+
import_zod12.z.discriminatedUnion("type", [
|
|
2088
|
+
import_zod12.z.object({
|
|
2089
|
+
type: import_zod12.z.literal("message"),
|
|
2090
|
+
role: import_zod12.z.literal("assistant"),
|
|
2091
|
+
content: import_zod12.z.array(
|
|
2092
|
+
import_zod12.z.object({
|
|
2093
|
+
type: import_zod12.z.literal("output_text"),
|
|
2094
|
+
text: import_zod12.z.string(),
|
|
2095
|
+
annotations: import_zod12.z.array(
|
|
2096
|
+
import_zod12.z.object({
|
|
2097
|
+
type: import_zod12.z.literal("url_citation"),
|
|
2098
|
+
start_index: import_zod12.z.number(),
|
|
2099
|
+
end_index: import_zod12.z.number(),
|
|
2100
|
+
url: import_zod12.z.string(),
|
|
2101
|
+
title: import_zod12.z.string()
|
|
2242
2102
|
})
|
|
2243
2103
|
)
|
|
2244
2104
|
})
|
|
2245
2105
|
)
|
|
2246
2106
|
}),
|
|
2247
|
-
|
|
2248
|
-
type:
|
|
2249
|
-
call_id:
|
|
2250
|
-
name:
|
|
2251
|
-
arguments:
|
|
2107
|
+
import_zod12.z.object({
|
|
2108
|
+
type: import_zod12.z.literal("function_call"),
|
|
2109
|
+
call_id: import_zod12.z.string(),
|
|
2110
|
+
name: import_zod12.z.string(),
|
|
2111
|
+
arguments: import_zod12.z.string()
|
|
2252
2112
|
}),
|
|
2253
|
-
|
|
2254
|
-
type:
|
|
2113
|
+
import_zod12.z.object({
|
|
2114
|
+
type: import_zod12.z.literal("web_search_call")
|
|
2255
2115
|
}),
|
|
2256
|
-
|
|
2257
|
-
type:
|
|
2116
|
+
import_zod12.z.object({
|
|
2117
|
+
type: import_zod12.z.literal("computer_call")
|
|
2258
2118
|
}),
|
|
2259
|
-
|
|
2260
|
-
type:
|
|
2261
|
-
summary:
|
|
2262
|
-
|
|
2263
|
-
type:
|
|
2264
|
-
text:
|
|
2119
|
+
import_zod12.z.object({
|
|
2120
|
+
type: import_zod12.z.literal("reasoning"),
|
|
2121
|
+
summary: import_zod12.z.array(
|
|
2122
|
+
import_zod12.z.object({
|
|
2123
|
+
type: import_zod12.z.literal("summary_text"),
|
|
2124
|
+
text: import_zod12.z.string()
|
|
2265
2125
|
})
|
|
2266
2126
|
)
|
|
2267
2127
|
})
|
|
2268
2128
|
])
|
|
2269
2129
|
),
|
|
2270
|
-
incomplete_details:
|
|
2271
|
-
usage:
|
|
2130
|
+
incomplete_details: import_zod12.z.object({ reason: import_zod12.z.string() }).nullable(),
|
|
2131
|
+
usage: usageSchema2
|
|
2272
2132
|
})
|
|
2273
2133
|
),
|
|
2274
2134
|
abortSignal: options.abortSignal,
|
|
2275
2135
|
fetch: this.config.fetch
|
|
2276
2136
|
});
|
|
2277
|
-
const
|
|
2278
|
-
const
|
|
2279
|
-
|
|
2280
|
-
|
|
2281
|
-
|
|
2282
|
-
|
|
2283
|
-
|
|
2284
|
-
|
|
2137
|
+
const content = [];
|
|
2138
|
+
for (const part of response.output) {
|
|
2139
|
+
switch (part.type) {
|
|
2140
|
+
case "reasoning": {
|
|
2141
|
+
content.push({
|
|
2142
|
+
type: "reasoning",
|
|
2143
|
+
text: part.summary.map((summary) => summary.text).join()
|
|
2144
|
+
});
|
|
2145
|
+
break;
|
|
2146
|
+
}
|
|
2147
|
+
case "message": {
|
|
2148
|
+
for (const contentPart of part.content) {
|
|
2149
|
+
content.push({
|
|
2150
|
+
type: "text",
|
|
2151
|
+
text: contentPart.text
|
|
2152
|
+
});
|
|
2153
|
+
for (const annotation of contentPart.annotations) {
|
|
2154
|
+
content.push({
|
|
2155
|
+
type: "source",
|
|
2156
|
+
sourceType: "url",
|
|
2157
|
+
id: (_c = (_b = (_a = this.config).generateId) == null ? void 0 : _b.call(_a)) != null ? _c : (0, import_provider_utils8.generateId)(),
|
|
2158
|
+
url: annotation.url,
|
|
2159
|
+
title: annotation.title
|
|
2160
|
+
});
|
|
2161
|
+
}
|
|
2162
|
+
}
|
|
2163
|
+
break;
|
|
2164
|
+
}
|
|
2165
|
+
case "function_call": {
|
|
2166
|
+
content.push({
|
|
2167
|
+
type: "tool-call",
|
|
2168
|
+
toolCallType: "function",
|
|
2169
|
+
toolCallId: part.call_id,
|
|
2170
|
+
toolName: part.name,
|
|
2171
|
+
args: part.arguments
|
|
2172
|
+
});
|
|
2173
|
+
break;
|
|
2174
|
+
}
|
|
2175
|
+
}
|
|
2176
|
+
}
|
|
2285
2177
|
return {
|
|
2286
|
-
|
|
2287
|
-
sources: outputTextElements.flatMap(
|
|
2288
|
-
(content) => content.annotations.map((annotation) => {
|
|
2289
|
-
var _a2, _b2, _c2;
|
|
2290
|
-
return {
|
|
2291
|
-
sourceType: "url",
|
|
2292
|
-
id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : (0, import_provider_utils9.generateId)(),
|
|
2293
|
-
url: annotation.url,
|
|
2294
|
-
title: annotation.title
|
|
2295
|
-
};
|
|
2296
|
-
})
|
|
2297
|
-
),
|
|
2178
|
+
content,
|
|
2298
2179
|
finishReason: mapOpenAIResponseFinishReason({
|
|
2299
|
-
finishReason: (
|
|
2300
|
-
hasToolCalls:
|
|
2180
|
+
finishReason: (_d = response.incomplete_details) == null ? void 0 : _d.reason,
|
|
2181
|
+
hasToolCalls: content.some((part) => part.type === "tool-call")
|
|
2301
2182
|
}),
|
|
2302
|
-
toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
|
|
2303
|
-
reasoning: reasoningSummary ? reasoningSummary.map((summary) => ({
|
|
2304
|
-
type: "text",
|
|
2305
|
-
text: summary.text
|
|
2306
|
-
})) : void 0,
|
|
2307
2183
|
usage: {
|
|
2308
|
-
|
|
2309
|
-
|
|
2310
|
-
|
|
2311
|
-
|
|
2312
|
-
|
|
2313
|
-
rawSettings: {}
|
|
2314
|
-
},
|
|
2315
|
-
rawResponse: {
|
|
2316
|
-
headers: responseHeaders,
|
|
2317
|
-
body: rawResponse
|
|
2318
|
-
},
|
|
2319
|
-
request: {
|
|
2320
|
-
body: JSON.stringify(body)
|
|
2184
|
+
inputTokens: response.usage.input_tokens,
|
|
2185
|
+
outputTokens: response.usage.output_tokens,
|
|
2186
|
+
totalTokens: response.usage.input_tokens + response.usage.output_tokens,
|
|
2187
|
+
reasoningTokens: (_f = (_e = response.usage.output_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : void 0,
|
|
2188
|
+
cachedInputTokens: (_h = (_g = response.usage.input_tokens_details) == null ? void 0 : _g.cached_tokens) != null ? _h : void 0
|
|
2321
2189
|
},
|
|
2190
|
+
request: { body },
|
|
2322
2191
|
response: {
|
|
2323
2192
|
id: response.id,
|
|
2324
2193
|
timestamp: new Date(response.created_at * 1e3),
|
|
2325
|
-
modelId: response.model
|
|
2194
|
+
modelId: response.model,
|
|
2195
|
+
headers: responseHeaders,
|
|
2196
|
+
body: rawResponse
|
|
2326
2197
|
},
|
|
2327
2198
|
providerMetadata: {
|
|
2328
2199
|
openai: {
|
|
2329
|
-
responseId: response.id
|
|
2330
|
-
cachedPromptTokens: (_e = (_d = response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : null,
|
|
2331
|
-
reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : null
|
|
2200
|
+
responseId: response.id
|
|
2332
2201
|
}
|
|
2333
2202
|
},
|
|
2334
2203
|
warnings
|
|
2335
2204
|
};
|
|
2336
2205
|
}
|
|
2337
2206
|
async doStream(options) {
|
|
2338
|
-
const { args: body, warnings } = this.getArgs(options);
|
|
2339
|
-
const { responseHeaders, value: response } = await (0,
|
|
2207
|
+
const { args: body, warnings } = await this.getArgs(options);
|
|
2208
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils8.postJsonToApi)({
|
|
2340
2209
|
url: this.config.url({
|
|
2341
2210
|
path: "/responses",
|
|
2342
2211
|
modelId: this.modelId
|
|
2343
2212
|
}),
|
|
2344
|
-
headers: (0,
|
|
2213
|
+
headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
|
|
2345
2214
|
body: {
|
|
2346
2215
|
...body,
|
|
2347
2216
|
stream: true
|
|
2348
2217
|
},
|
|
2349
2218
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
2350
|
-
successfulResponseHandler: (0,
|
|
2219
|
+
successfulResponseHandler: (0, import_provider_utils8.createEventSourceResponseHandler)(
|
|
2351
2220
|
openaiResponsesChunkSchema
|
|
2352
2221
|
),
|
|
2353
2222
|
abortSignal: options.abortSignal,
|
|
@@ -2355,16 +2224,20 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2355
2224
|
});
|
|
2356
2225
|
const self = this;
|
|
2357
2226
|
let finishReason = "unknown";
|
|
2358
|
-
|
|
2359
|
-
|
|
2360
|
-
|
|
2361
|
-
|
|
2227
|
+
const usage = {
|
|
2228
|
+
inputTokens: void 0,
|
|
2229
|
+
outputTokens: void 0,
|
|
2230
|
+
totalTokens: void 0
|
|
2231
|
+
};
|
|
2362
2232
|
let responseId = null;
|
|
2363
2233
|
const ongoingToolCalls = {};
|
|
2364
2234
|
let hasToolCalls = false;
|
|
2365
2235
|
return {
|
|
2366
2236
|
stream: response.pipeThrough(
|
|
2367
2237
|
new TransformStream({
|
|
2238
|
+
start(controller) {
|
|
2239
|
+
controller.enqueue({ type: "stream-start", warnings });
|
|
2240
|
+
},
|
|
2368
2241
|
transform(chunk, controller) {
|
|
2369
2242
|
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
2370
2243
|
if (!chunk.success) {
|
|
@@ -2408,13 +2281,13 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2408
2281
|
});
|
|
2409
2282
|
} else if (isTextDeltaChunk(value)) {
|
|
2410
2283
|
controller.enqueue({
|
|
2411
|
-
type: "text
|
|
2412
|
-
|
|
2284
|
+
type: "text",
|
|
2285
|
+
text: value.delta
|
|
2413
2286
|
});
|
|
2414
2287
|
} else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
|
|
2415
2288
|
controller.enqueue({
|
|
2416
2289
|
type: "reasoning",
|
|
2417
|
-
|
|
2290
|
+
text: value.delta
|
|
2418
2291
|
});
|
|
2419
2292
|
} else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
|
|
2420
2293
|
ongoingToolCalls[value.output_index] = void 0;
|
|
@@ -2431,19 +2304,18 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2431
2304
|
finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
|
|
2432
2305
|
hasToolCalls
|
|
2433
2306
|
});
|
|
2434
|
-
|
|
2435
|
-
|
|
2436
|
-
|
|
2437
|
-
reasoningTokens = (
|
|
2307
|
+
usage.inputTokens = value.response.usage.input_tokens;
|
|
2308
|
+
usage.outputTokens = value.response.usage.output_tokens;
|
|
2309
|
+
usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
|
|
2310
|
+
usage.reasoningTokens = (_c = (_b = value.response.usage.output_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : void 0;
|
|
2311
|
+
usage.cachedInputTokens = (_e = (_d = value.response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : void 0;
|
|
2438
2312
|
} else if (isResponseAnnotationAddedChunk(value)) {
|
|
2439
2313
|
controller.enqueue({
|
|
2440
2314
|
type: "source",
|
|
2441
|
-
|
|
2442
|
-
|
|
2443
|
-
|
|
2444
|
-
|
|
2445
|
-
title: value.annotation.title
|
|
2446
|
-
}
|
|
2315
|
+
sourceType: "url",
|
|
2316
|
+
id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils8.generateId)(),
|
|
2317
|
+
url: value.annotation.url,
|
|
2318
|
+
title: value.annotation.title
|
|
2447
2319
|
});
|
|
2448
2320
|
}
|
|
2449
2321
|
},
|
|
@@ -2451,110 +2323,101 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2451
2323
|
controller.enqueue({
|
|
2452
2324
|
type: "finish",
|
|
2453
2325
|
finishReason,
|
|
2454
|
-
usage
|
|
2455
|
-
|
|
2456
|
-
|
|
2457
|
-
|
|
2458
|
-
responseId,
|
|
2459
|
-
cachedPromptTokens,
|
|
2460
|
-
reasoningTokens
|
|
2461
|
-
}
|
|
2326
|
+
usage,
|
|
2327
|
+
providerMetadata: {
|
|
2328
|
+
openai: {
|
|
2329
|
+
responseId
|
|
2462
2330
|
}
|
|
2463
2331
|
}
|
|
2464
2332
|
});
|
|
2465
2333
|
}
|
|
2466
2334
|
})
|
|
2467
2335
|
),
|
|
2468
|
-
|
|
2469
|
-
|
|
2470
|
-
rawSettings: {}
|
|
2471
|
-
},
|
|
2472
|
-
rawResponse: { headers: responseHeaders },
|
|
2473
|
-
request: { body: JSON.stringify(body) },
|
|
2474
|
-
warnings
|
|
2336
|
+
request: { body },
|
|
2337
|
+
response: { headers: responseHeaders }
|
|
2475
2338
|
};
|
|
2476
2339
|
}
|
|
2477
2340
|
};
|
|
2478
|
-
var
|
|
2479
|
-
input_tokens:
|
|
2480
|
-
input_tokens_details:
|
|
2481
|
-
output_tokens:
|
|
2482
|
-
output_tokens_details:
|
|
2341
|
+
var usageSchema2 = import_zod12.z.object({
|
|
2342
|
+
input_tokens: import_zod12.z.number(),
|
|
2343
|
+
input_tokens_details: import_zod12.z.object({ cached_tokens: import_zod12.z.number().nullish() }).nullish(),
|
|
2344
|
+
output_tokens: import_zod12.z.number(),
|
|
2345
|
+
output_tokens_details: import_zod12.z.object({ reasoning_tokens: import_zod12.z.number().nullish() }).nullish()
|
|
2483
2346
|
});
|
|
2484
|
-
var textDeltaChunkSchema =
|
|
2485
|
-
type:
|
|
2486
|
-
delta:
|
|
2347
|
+
var textDeltaChunkSchema = import_zod12.z.object({
|
|
2348
|
+
type: import_zod12.z.literal("response.output_text.delta"),
|
|
2349
|
+
delta: import_zod12.z.string()
|
|
2487
2350
|
});
|
|
2488
|
-
var responseFinishedChunkSchema =
|
|
2489
|
-
type:
|
|
2490
|
-
response:
|
|
2491
|
-
incomplete_details:
|
|
2492
|
-
usage:
|
|
2351
|
+
var responseFinishedChunkSchema = import_zod12.z.object({
|
|
2352
|
+
type: import_zod12.z.enum(["response.completed", "response.incomplete"]),
|
|
2353
|
+
response: import_zod12.z.object({
|
|
2354
|
+
incomplete_details: import_zod12.z.object({ reason: import_zod12.z.string() }).nullish(),
|
|
2355
|
+
usage: usageSchema2
|
|
2493
2356
|
})
|
|
2494
2357
|
});
|
|
2495
|
-
var responseCreatedChunkSchema =
|
|
2496
|
-
type:
|
|
2497
|
-
response:
|
|
2498
|
-
id:
|
|
2499
|
-
created_at:
|
|
2500
|
-
model:
|
|
2358
|
+
var responseCreatedChunkSchema = import_zod12.z.object({
|
|
2359
|
+
type: import_zod12.z.literal("response.created"),
|
|
2360
|
+
response: import_zod12.z.object({
|
|
2361
|
+
id: import_zod12.z.string(),
|
|
2362
|
+
created_at: import_zod12.z.number(),
|
|
2363
|
+
model: import_zod12.z.string()
|
|
2501
2364
|
})
|
|
2502
2365
|
});
|
|
2503
|
-
var responseOutputItemDoneSchema =
|
|
2504
|
-
type:
|
|
2505
|
-
output_index:
|
|
2506
|
-
item:
|
|
2507
|
-
|
|
2508
|
-
type:
|
|
2366
|
+
var responseOutputItemDoneSchema = import_zod12.z.object({
|
|
2367
|
+
type: import_zod12.z.literal("response.output_item.done"),
|
|
2368
|
+
output_index: import_zod12.z.number(),
|
|
2369
|
+
item: import_zod12.z.discriminatedUnion("type", [
|
|
2370
|
+
import_zod12.z.object({
|
|
2371
|
+
type: import_zod12.z.literal("message")
|
|
2509
2372
|
}),
|
|
2510
|
-
|
|
2511
|
-
type:
|
|
2512
|
-
id:
|
|
2513
|
-
call_id:
|
|
2514
|
-
name:
|
|
2515
|
-
arguments:
|
|
2516
|
-
status:
|
|
2373
|
+
import_zod12.z.object({
|
|
2374
|
+
type: import_zod12.z.literal("function_call"),
|
|
2375
|
+
id: import_zod12.z.string(),
|
|
2376
|
+
call_id: import_zod12.z.string(),
|
|
2377
|
+
name: import_zod12.z.string(),
|
|
2378
|
+
arguments: import_zod12.z.string(),
|
|
2379
|
+
status: import_zod12.z.literal("completed")
|
|
2517
2380
|
})
|
|
2518
2381
|
])
|
|
2519
2382
|
});
|
|
2520
|
-
var responseFunctionCallArgumentsDeltaSchema =
|
|
2521
|
-
type:
|
|
2522
|
-
item_id:
|
|
2523
|
-
output_index:
|
|
2524
|
-
delta:
|
|
2383
|
+
var responseFunctionCallArgumentsDeltaSchema = import_zod12.z.object({
|
|
2384
|
+
type: import_zod12.z.literal("response.function_call_arguments.delta"),
|
|
2385
|
+
item_id: import_zod12.z.string(),
|
|
2386
|
+
output_index: import_zod12.z.number(),
|
|
2387
|
+
delta: import_zod12.z.string()
|
|
2525
2388
|
});
|
|
2526
|
-
var responseOutputItemAddedSchema =
|
|
2527
|
-
type:
|
|
2528
|
-
output_index:
|
|
2529
|
-
item:
|
|
2530
|
-
|
|
2531
|
-
type:
|
|
2389
|
+
var responseOutputItemAddedSchema = import_zod12.z.object({
|
|
2390
|
+
type: import_zod12.z.literal("response.output_item.added"),
|
|
2391
|
+
output_index: import_zod12.z.number(),
|
|
2392
|
+
item: import_zod12.z.discriminatedUnion("type", [
|
|
2393
|
+
import_zod12.z.object({
|
|
2394
|
+
type: import_zod12.z.literal("message")
|
|
2532
2395
|
}),
|
|
2533
|
-
|
|
2534
|
-
type:
|
|
2535
|
-
id:
|
|
2536
|
-
call_id:
|
|
2537
|
-
name:
|
|
2538
|
-
arguments:
|
|
2396
|
+
import_zod12.z.object({
|
|
2397
|
+
type: import_zod12.z.literal("function_call"),
|
|
2398
|
+
id: import_zod12.z.string(),
|
|
2399
|
+
call_id: import_zod12.z.string(),
|
|
2400
|
+
name: import_zod12.z.string(),
|
|
2401
|
+
arguments: import_zod12.z.string()
|
|
2539
2402
|
})
|
|
2540
2403
|
])
|
|
2541
2404
|
});
|
|
2542
|
-
var responseAnnotationAddedSchema =
|
|
2543
|
-
type:
|
|
2544
|
-
annotation:
|
|
2545
|
-
type:
|
|
2546
|
-
url:
|
|
2547
|
-
title:
|
|
2405
|
+
var responseAnnotationAddedSchema = import_zod12.z.object({
|
|
2406
|
+
type: import_zod12.z.literal("response.output_text.annotation.added"),
|
|
2407
|
+
annotation: import_zod12.z.object({
|
|
2408
|
+
type: import_zod12.z.literal("url_citation"),
|
|
2409
|
+
url: import_zod12.z.string(),
|
|
2410
|
+
title: import_zod12.z.string()
|
|
2548
2411
|
})
|
|
2549
2412
|
});
|
|
2550
|
-
var responseReasoningSummaryTextDeltaSchema =
|
|
2551
|
-
type:
|
|
2552
|
-
item_id:
|
|
2553
|
-
output_index:
|
|
2554
|
-
summary_index:
|
|
2555
|
-
delta:
|
|
2413
|
+
var responseReasoningSummaryTextDeltaSchema = import_zod12.z.object({
|
|
2414
|
+
type: import_zod12.z.literal("response.reasoning_summary_text.delta"),
|
|
2415
|
+
item_id: import_zod12.z.string(),
|
|
2416
|
+
output_index: import_zod12.z.number(),
|
|
2417
|
+
summary_index: import_zod12.z.number(),
|
|
2418
|
+
delta: import_zod12.z.string()
|
|
2556
2419
|
});
|
|
2557
|
-
var openaiResponsesChunkSchema =
|
|
2420
|
+
var openaiResponsesChunkSchema = import_zod12.z.union([
|
|
2558
2421
|
textDeltaChunkSchema,
|
|
2559
2422
|
responseFinishedChunkSchema,
|
|
2560
2423
|
responseCreatedChunkSchema,
|
|
@@ -2563,7 +2426,7 @@ var openaiResponsesChunkSchema = import_zod7.z.union([
|
|
|
2563
2426
|
responseOutputItemAddedSchema,
|
|
2564
2427
|
responseAnnotationAddedSchema,
|
|
2565
2428
|
responseReasoningSummaryTextDeltaSchema,
|
|
2566
|
-
|
|
2429
|
+
import_zod12.z.object({ type: import_zod12.z.string() }).passthrough()
|
|
2567
2430
|
// fallback for unknown chunks
|
|
2568
2431
|
]);
|
|
2569
2432
|
function isTextDeltaChunk(chunk) {
|
|
@@ -2611,45 +2474,24 @@ function getResponsesModelConfig(modelId) {
|
|
|
2611
2474
|
requiredAutoTruncation: false
|
|
2612
2475
|
};
|
|
2613
2476
|
}
|
|
2614
|
-
var openaiResponsesProviderOptionsSchema =
|
|
2615
|
-
metadata:
|
|
2616
|
-
parallelToolCalls:
|
|
2617
|
-
previousResponseId:
|
|
2618
|
-
store:
|
|
2619
|
-
user:
|
|
2620
|
-
reasoningEffort:
|
|
2621
|
-
strictSchemas:
|
|
2622
|
-
instructions:
|
|
2623
|
-
reasoningSummary:
|
|
2477
|
+
var openaiResponsesProviderOptionsSchema = import_zod12.z.object({
|
|
2478
|
+
metadata: import_zod12.z.any().nullish(),
|
|
2479
|
+
parallelToolCalls: import_zod12.z.boolean().nullish(),
|
|
2480
|
+
previousResponseId: import_zod12.z.string().nullish(),
|
|
2481
|
+
store: import_zod12.z.boolean().nullish(),
|
|
2482
|
+
user: import_zod12.z.string().nullish(),
|
|
2483
|
+
reasoningEffort: import_zod12.z.string().nullish(),
|
|
2484
|
+
strictSchemas: import_zod12.z.boolean().nullish(),
|
|
2485
|
+
instructions: import_zod12.z.string().nullish(),
|
|
2486
|
+
reasoningSummary: import_zod12.z.string().nullish()
|
|
2624
2487
|
});
|
|
2625
2488
|
|
|
2626
|
-
// src/openai-tools.ts
|
|
2627
|
-
var import_zod8 = require("zod");
|
|
2628
|
-
var WebSearchPreviewParameters = import_zod8.z.object({});
|
|
2629
|
-
function webSearchPreviewTool({
|
|
2630
|
-
searchContextSize,
|
|
2631
|
-
userLocation
|
|
2632
|
-
} = {}) {
|
|
2633
|
-
return {
|
|
2634
|
-
type: "provider-defined",
|
|
2635
|
-
id: "openai.web_search_preview",
|
|
2636
|
-
args: {
|
|
2637
|
-
searchContextSize,
|
|
2638
|
-
userLocation
|
|
2639
|
-
},
|
|
2640
|
-
parameters: WebSearchPreviewParameters
|
|
2641
|
-
};
|
|
2642
|
-
}
|
|
2643
|
-
var openaiTools = {
|
|
2644
|
-
webSearchPreview: webSearchPreviewTool
|
|
2645
|
-
};
|
|
2646
|
-
|
|
2647
2489
|
// src/openai-speech-model.ts
|
|
2648
|
-
var
|
|
2649
|
-
var
|
|
2650
|
-
var OpenAIProviderOptionsSchema =
|
|
2651
|
-
instructions:
|
|
2652
|
-
speed:
|
|
2490
|
+
var import_provider_utils9 = require("@ai-sdk/provider-utils");
|
|
2491
|
+
var import_zod13 = require("zod");
|
|
2492
|
+
var OpenAIProviderOptionsSchema = import_zod13.z.object({
|
|
2493
|
+
instructions: import_zod13.z.string().nullish(),
|
|
2494
|
+
speed: import_zod13.z.number().min(0.25).max(4).default(1).nullish()
|
|
2653
2495
|
});
|
|
2654
2496
|
var OpenAISpeechModel = class {
|
|
2655
2497
|
constructor(modelId, config) {
|
|
@@ -2660,7 +2502,7 @@ var OpenAISpeechModel = class {
|
|
|
2660
2502
|
get provider() {
|
|
2661
2503
|
return this.config.provider;
|
|
2662
2504
|
}
|
|
2663
|
-
getArgs({
|
|
2505
|
+
async getArgs({
|
|
2664
2506
|
text,
|
|
2665
2507
|
voice = "alloy",
|
|
2666
2508
|
outputFormat = "mp3",
|
|
@@ -2669,7 +2511,7 @@ var OpenAISpeechModel = class {
|
|
|
2669
2511
|
providerOptions
|
|
2670
2512
|
}) {
|
|
2671
2513
|
const warnings = [];
|
|
2672
|
-
const openAIOptions = (0,
|
|
2514
|
+
const openAIOptions = await (0, import_provider_utils9.parseProviderOptions)({
|
|
2673
2515
|
provider: "openai",
|
|
2674
2516
|
providerOptions,
|
|
2675
2517
|
schema: OpenAIProviderOptionsSchema
|
|
@@ -2710,20 +2552,20 @@ var OpenAISpeechModel = class {
|
|
|
2710
2552
|
async doGenerate(options) {
|
|
2711
2553
|
var _a, _b, _c;
|
|
2712
2554
|
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
2713
|
-
const { requestBody, warnings } = this.getArgs(options);
|
|
2555
|
+
const { requestBody, warnings } = await this.getArgs(options);
|
|
2714
2556
|
const {
|
|
2715
2557
|
value: audio,
|
|
2716
2558
|
responseHeaders,
|
|
2717
2559
|
rawValue: rawResponse
|
|
2718
|
-
} = await (0,
|
|
2560
|
+
} = await (0, import_provider_utils9.postJsonToApi)({
|
|
2719
2561
|
url: this.config.url({
|
|
2720
2562
|
path: "/audio/speech",
|
|
2721
2563
|
modelId: this.modelId
|
|
2722
2564
|
}),
|
|
2723
|
-
headers: (0,
|
|
2565
|
+
headers: (0, import_provider_utils9.combineHeaders)(this.config.headers(), options.headers),
|
|
2724
2566
|
body: requestBody,
|
|
2725
2567
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
2726
|
-
successfulResponseHandler: (0,
|
|
2568
|
+
successfulResponseHandler: (0, import_provider_utils9.createBinaryResponseHandler)(),
|
|
2727
2569
|
abortSignal: options.abortSignal,
|
|
2728
2570
|
fetch: this.config.fetch
|
|
2729
2571
|
});
|
|
@@ -2745,12 +2587,11 @@ var OpenAISpeechModel = class {
|
|
|
2745
2587
|
|
|
2746
2588
|
// src/openai-provider.ts
|
|
2747
2589
|
function createOpenAI(options = {}) {
|
|
2748
|
-
var _a, _b
|
|
2749
|
-
const baseURL = (_a = (0,
|
|
2750
|
-
const
|
|
2751
|
-
const providerName = (_c = options.name) != null ? _c : "openai";
|
|
2590
|
+
var _a, _b;
|
|
2591
|
+
const baseURL = (_a = (0, import_provider_utils10.withoutTrailingSlash)(options.baseURL)) != null ? _a : "https://api.openai.com/v1";
|
|
2592
|
+
const providerName = (_b = options.name) != null ? _b : "openai";
|
|
2752
2593
|
const getHeaders = () => ({
|
|
2753
|
-
Authorization: `Bearer ${(0,
|
|
2594
|
+
Authorization: `Bearer ${(0, import_provider_utils10.loadApiKey)({
|
|
2754
2595
|
apiKey: options.apiKey,
|
|
2755
2596
|
environmentVariableName: "OPENAI_API_KEY",
|
|
2756
2597
|
description: "OpenAI"
|
|
@@ -2759,27 +2600,25 @@ function createOpenAI(options = {}) {
|
|
|
2759
2600
|
"OpenAI-Project": options.project,
|
|
2760
2601
|
...options.headers
|
|
2761
2602
|
});
|
|
2762
|
-
const createChatModel = (modelId
|
|
2603
|
+
const createChatModel = (modelId) => new OpenAIChatLanguageModel(modelId, {
|
|
2763
2604
|
provider: `${providerName}.chat`,
|
|
2764
2605
|
url: ({ path }) => `${baseURL}${path}`,
|
|
2765
2606
|
headers: getHeaders,
|
|
2766
|
-
compatibility,
|
|
2767
2607
|
fetch: options.fetch
|
|
2768
2608
|
});
|
|
2769
|
-
const createCompletionModel = (modelId
|
|
2609
|
+
const createCompletionModel = (modelId) => new OpenAICompletionLanguageModel(modelId, {
|
|
2770
2610
|
provider: `${providerName}.completion`,
|
|
2771
2611
|
url: ({ path }) => `${baseURL}${path}`,
|
|
2772
2612
|
headers: getHeaders,
|
|
2773
|
-
compatibility,
|
|
2774
2613
|
fetch: options.fetch
|
|
2775
2614
|
});
|
|
2776
|
-
const createEmbeddingModel = (modelId
|
|
2615
|
+
const createEmbeddingModel = (modelId) => new OpenAIEmbeddingModel(modelId, {
|
|
2777
2616
|
provider: `${providerName}.embedding`,
|
|
2778
2617
|
url: ({ path }) => `${baseURL}${path}`,
|
|
2779
2618
|
headers: getHeaders,
|
|
2780
2619
|
fetch: options.fetch
|
|
2781
2620
|
});
|
|
2782
|
-
const createImageModel = (modelId
|
|
2621
|
+
const createImageModel = (modelId) => new OpenAIImageModel(modelId, {
|
|
2783
2622
|
provider: `${providerName}.image`,
|
|
2784
2623
|
url: ({ path }) => `${baseURL}${path}`,
|
|
2785
2624
|
headers: getHeaders,
|
|
@@ -2797,19 +2636,16 @@ function createOpenAI(options = {}) {
|
|
|
2797
2636
|
headers: getHeaders,
|
|
2798
2637
|
fetch: options.fetch
|
|
2799
2638
|
});
|
|
2800
|
-
const createLanguageModel = (modelId
|
|
2639
|
+
const createLanguageModel = (modelId) => {
|
|
2801
2640
|
if (new.target) {
|
|
2802
2641
|
throw new Error(
|
|
2803
2642
|
"The OpenAI model function cannot be called with the new keyword."
|
|
2804
2643
|
);
|
|
2805
2644
|
}
|
|
2806
2645
|
if (modelId === "gpt-3.5-turbo-instruct") {
|
|
2807
|
-
return createCompletionModel(
|
|
2808
|
-
modelId,
|
|
2809
|
-
settings
|
|
2810
|
-
);
|
|
2646
|
+
return createCompletionModel(modelId);
|
|
2811
2647
|
}
|
|
2812
|
-
return createChatModel(modelId
|
|
2648
|
+
return createChatModel(modelId);
|
|
2813
2649
|
};
|
|
2814
2650
|
const createResponsesModel = (modelId) => {
|
|
2815
2651
|
return new OpenAIResponsesLanguageModel(modelId, {
|
|
@@ -2819,8 +2655,8 @@ function createOpenAI(options = {}) {
|
|
|
2819
2655
|
fetch: options.fetch
|
|
2820
2656
|
});
|
|
2821
2657
|
};
|
|
2822
|
-
const provider = function(modelId
|
|
2823
|
-
return createLanguageModel(modelId
|
|
2658
|
+
const provider = function(modelId) {
|
|
2659
|
+
return createLanguageModel(modelId);
|
|
2824
2660
|
};
|
|
2825
2661
|
provider.languageModel = createLanguageModel;
|
|
2826
2662
|
provider.chat = createChatModel;
|
|
@@ -2838,10 +2674,7 @@ function createOpenAI(options = {}) {
|
|
|
2838
2674
|
provider.tools = openaiTools;
|
|
2839
2675
|
return provider;
|
|
2840
2676
|
}
|
|
2841
|
-
var openai = createOpenAI(
|
|
2842
|
-
compatibility: "strict"
|
|
2843
|
-
// strict for OpenAI API
|
|
2844
|
-
});
|
|
2677
|
+
var openai = createOpenAI();
|
|
2845
2678
|
// Annotate the CommonJS export names for ESM import in node:
|
|
2846
2679
|
0 && (module.exports = {
|
|
2847
2680
|
createOpenAI,
|