@ai-sdk/openai 1.3.22 → 2.0.0-alpha.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +297 -45
- package/dist/index.d.mts +31 -183
- package/dist/index.d.ts +31 -183
- package/dist/index.js +947 -1114
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +942 -1109
- package/dist/index.mjs.map +1 -1
- package/{internal/dist → dist/internal}/index.d.mts +157 -166
- package/{internal/dist → dist/internal}/index.d.ts +157 -166
- package/{internal/dist → dist/internal}/index.js +917 -1067
- package/dist/internal/index.js.map +1 -0
- package/{internal/dist → dist/internal}/index.mjs +914 -1068
- package/dist/internal/index.mjs.map +1 -0
- package/internal.d.ts +1 -0
- package/package.json +19 -18
- package/internal/dist/index.js.map +0 -1
- package/internal/dist/index.mjs.map +0 -1
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
// src/openai-chat-language-model.ts
|
|
2
2
|
import {
|
|
3
|
-
InvalidResponseDataError
|
|
4
|
-
UnsupportedFunctionalityError as UnsupportedFunctionalityError3
|
|
3
|
+
InvalidResponseDataError
|
|
5
4
|
} from "@ai-sdk/provider";
|
|
6
5
|
import {
|
|
7
6
|
combineHeaders,
|
|
@@ -9,18 +8,18 @@ import {
|
|
|
9
8
|
createJsonResponseHandler,
|
|
10
9
|
generateId,
|
|
11
10
|
isParsableJson,
|
|
11
|
+
parseProviderOptions,
|
|
12
12
|
postJsonToApi
|
|
13
13
|
} from "@ai-sdk/provider-utils";
|
|
14
|
-
import { z as
|
|
14
|
+
import { z as z3 } from "zod";
|
|
15
15
|
|
|
16
16
|
// src/convert-to-openai-chat-messages.ts
|
|
17
17
|
import {
|
|
18
18
|
UnsupportedFunctionalityError
|
|
19
19
|
} from "@ai-sdk/provider";
|
|
20
|
-
import {
|
|
20
|
+
import { convertToBase64 } from "@ai-sdk/provider-utils";
|
|
21
21
|
function convertToOpenAIChatMessages({
|
|
22
22
|
prompt,
|
|
23
|
-
useLegacyFunctionCalling = false,
|
|
24
23
|
systemMessageMode = "system"
|
|
25
24
|
}) {
|
|
26
25
|
const messages = [];
|
|
@@ -61,55 +60,71 @@ function convertToOpenAIChatMessages({
|
|
|
61
60
|
messages.push({
|
|
62
61
|
role: "user",
|
|
63
62
|
content: content.map((part, index) => {
|
|
64
|
-
var _a, _b, _c
|
|
63
|
+
var _a, _b, _c;
|
|
65
64
|
switch (part.type) {
|
|
66
65
|
case "text": {
|
|
67
66
|
return { type: "text", text: part.text };
|
|
68
67
|
}
|
|
69
|
-
case "image": {
|
|
70
|
-
return {
|
|
71
|
-
type: "image_url",
|
|
72
|
-
image_url: {
|
|
73
|
-
url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase64(part.image)}`,
|
|
74
|
-
// OpenAI specific extension: image detail
|
|
75
|
-
detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
|
|
76
|
-
}
|
|
77
|
-
};
|
|
78
|
-
}
|
|
79
68
|
case "file": {
|
|
80
|
-
if (part.
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
type: "input_audio",
|
|
96
|
-
input_audio: { data: part.data, format: "mp3" }
|
|
97
|
-
};
|
|
69
|
+
if (part.mediaType.startsWith("image/")) {
|
|
70
|
+
const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
|
|
71
|
+
return {
|
|
72
|
+
type: "image_url",
|
|
73
|
+
image_url: {
|
|
74
|
+
url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${convertToBase64(part.data)}`,
|
|
75
|
+
// OpenAI specific extension: image detail
|
|
76
|
+
detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
|
|
77
|
+
}
|
|
78
|
+
};
|
|
79
|
+
} else if (part.mediaType.startsWith("audio/")) {
|
|
80
|
+
if (part.data instanceof URL) {
|
|
81
|
+
throw new UnsupportedFunctionalityError({
|
|
82
|
+
functionality: "audio file parts with URLs"
|
|
83
|
+
});
|
|
98
84
|
}
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
85
|
+
switch (part.mediaType) {
|
|
86
|
+
case "audio/wav": {
|
|
87
|
+
return {
|
|
88
|
+
type: "input_audio",
|
|
89
|
+
input_audio: {
|
|
90
|
+
data: convertToBase64(part.data),
|
|
91
|
+
format: "wav"
|
|
92
|
+
}
|
|
93
|
+
};
|
|
94
|
+
}
|
|
95
|
+
case "audio/mp3":
|
|
96
|
+
case "audio/mpeg": {
|
|
97
|
+
return {
|
|
98
|
+
type: "input_audio",
|
|
99
|
+
input_audio: {
|
|
100
|
+
data: convertToBase64(part.data),
|
|
101
|
+
format: "mp3"
|
|
102
|
+
}
|
|
103
|
+
};
|
|
104
|
+
}
|
|
105
|
+
default: {
|
|
106
|
+
throw new UnsupportedFunctionalityError({
|
|
107
|
+
functionality: `audio content parts with media type ${part.mediaType}`
|
|
108
|
+
});
|
|
109
|
+
}
|
|
107
110
|
}
|
|
108
|
-
|
|
111
|
+
} else if (part.mediaType === "application/pdf") {
|
|
112
|
+
if (part.data instanceof URL) {
|
|
109
113
|
throw new UnsupportedFunctionalityError({
|
|
110
|
-
functionality:
|
|
114
|
+
functionality: "PDF file parts with URLs"
|
|
111
115
|
});
|
|
112
116
|
}
|
|
117
|
+
return {
|
|
118
|
+
type: "file",
|
|
119
|
+
file: {
|
|
120
|
+
filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
|
|
121
|
+
file_data: `data:application/pdf;base64,${convertToBase64(part.data)}`
|
|
122
|
+
}
|
|
123
|
+
};
|
|
124
|
+
} else {
|
|
125
|
+
throw new UnsupportedFunctionalityError({
|
|
126
|
+
functionality: `file part media type ${part.mediaType}`
|
|
127
|
+
});
|
|
113
128
|
}
|
|
114
129
|
}
|
|
115
130
|
}
|
|
@@ -139,41 +154,20 @@ function convertToOpenAIChatMessages({
|
|
|
139
154
|
}
|
|
140
155
|
}
|
|
141
156
|
}
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
}
|
|
148
|
-
messages.push({
|
|
149
|
-
role: "assistant",
|
|
150
|
-
content: text,
|
|
151
|
-
function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
|
|
152
|
-
});
|
|
153
|
-
} else {
|
|
154
|
-
messages.push({
|
|
155
|
-
role: "assistant",
|
|
156
|
-
content: text,
|
|
157
|
-
tool_calls: toolCalls.length > 0 ? toolCalls : void 0
|
|
158
|
-
});
|
|
159
|
-
}
|
|
157
|
+
messages.push({
|
|
158
|
+
role: "assistant",
|
|
159
|
+
content: text,
|
|
160
|
+
tool_calls: toolCalls.length > 0 ? toolCalls : void 0
|
|
161
|
+
});
|
|
160
162
|
break;
|
|
161
163
|
}
|
|
162
164
|
case "tool": {
|
|
163
165
|
for (const toolResponse of content) {
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
});
|
|
170
|
-
} else {
|
|
171
|
-
messages.push({
|
|
172
|
-
role: "tool",
|
|
173
|
-
tool_call_id: toolResponse.toolCallId,
|
|
174
|
-
content: JSON.stringify(toolResponse.result)
|
|
175
|
-
});
|
|
176
|
-
}
|
|
166
|
+
messages.push({
|
|
167
|
+
role: "tool",
|
|
168
|
+
tool_call_id: toolResponse.toolCallId,
|
|
169
|
+
content: JSON.stringify(toolResponse.result)
|
|
170
|
+
});
|
|
177
171
|
}
|
|
178
172
|
break;
|
|
179
173
|
}
|
|
@@ -186,17 +180,17 @@ function convertToOpenAIChatMessages({
|
|
|
186
180
|
return { messages, warnings };
|
|
187
181
|
}
|
|
188
182
|
|
|
189
|
-
// src/
|
|
190
|
-
function
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
}
|
|
183
|
+
// src/get-response-metadata.ts
|
|
184
|
+
function getResponseMetadata({
|
|
185
|
+
id,
|
|
186
|
+
model,
|
|
187
|
+
created
|
|
188
|
+
}) {
|
|
189
|
+
return {
|
|
190
|
+
id: id != null ? id : void 0,
|
|
191
|
+
modelId: model != null ? model : void 0,
|
|
192
|
+
timestamp: created != null ? new Date(created * 1e3) : void 0
|
|
193
|
+
};
|
|
200
194
|
}
|
|
201
195
|
|
|
202
196
|
// src/map-openai-finish-reason.ts
|
|
@@ -216,18 +210,75 @@ function mapOpenAIFinishReason(finishReason) {
|
|
|
216
210
|
}
|
|
217
211
|
}
|
|
218
212
|
|
|
219
|
-
// src/openai-
|
|
213
|
+
// src/openai-chat-options.ts
|
|
220
214
|
import { z } from "zod";
|
|
215
|
+
var openaiProviderOptions = z.object({
|
|
216
|
+
/**
|
|
217
|
+
* Modify the likelihood of specified tokens appearing in the completion.
|
|
218
|
+
*
|
|
219
|
+
* Accepts a JSON object that maps tokens (specified by their token ID in
|
|
220
|
+
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
221
|
+
*/
|
|
222
|
+
logitBias: z.record(z.coerce.number(), z.number()).optional(),
|
|
223
|
+
/**
|
|
224
|
+
* Return the log probabilities of the tokens.
|
|
225
|
+
*
|
|
226
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
227
|
+
* were generated.
|
|
228
|
+
*
|
|
229
|
+
* Setting to a number will return the log probabilities of the top n
|
|
230
|
+
* tokens that were generated.
|
|
231
|
+
*/
|
|
232
|
+
logprobs: z.union([z.boolean(), z.number()]).optional(),
|
|
233
|
+
/**
|
|
234
|
+
* Whether to enable parallel function calling during tool use. Default to true.
|
|
235
|
+
*/
|
|
236
|
+
parallelToolCalls: z.boolean().optional(),
|
|
237
|
+
/**
|
|
238
|
+
* A unique identifier representing your end-user, which can help OpenAI to
|
|
239
|
+
* monitor and detect abuse.
|
|
240
|
+
*/
|
|
241
|
+
user: z.string().optional(),
|
|
242
|
+
/**
|
|
243
|
+
* Reasoning effort for reasoning models. Defaults to `medium`.
|
|
244
|
+
*/
|
|
245
|
+
reasoningEffort: z.enum(["low", "medium", "high"]).optional(),
|
|
246
|
+
/**
|
|
247
|
+
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
248
|
+
*/
|
|
249
|
+
maxCompletionTokens: z.number().optional(),
|
|
250
|
+
/**
|
|
251
|
+
* Whether to enable persistence in responses API.
|
|
252
|
+
*/
|
|
253
|
+
store: z.boolean().optional(),
|
|
254
|
+
/**
|
|
255
|
+
* Metadata to associate with the request.
|
|
256
|
+
*/
|
|
257
|
+
metadata: z.record(z.string()).optional(),
|
|
258
|
+
/**
|
|
259
|
+
* Parameters for prediction mode.
|
|
260
|
+
*/
|
|
261
|
+
prediction: z.record(z.any()).optional(),
|
|
262
|
+
/**
|
|
263
|
+
* Whether to use structured outputs.
|
|
264
|
+
*
|
|
265
|
+
* @default true
|
|
266
|
+
*/
|
|
267
|
+
structuredOutputs: z.boolean().optional()
|
|
268
|
+
});
|
|
269
|
+
|
|
270
|
+
// src/openai-error.ts
|
|
271
|
+
import { z as z2 } from "zod";
|
|
221
272
|
import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
|
|
222
|
-
var openaiErrorDataSchema =
|
|
223
|
-
error:
|
|
224
|
-
message:
|
|
273
|
+
var openaiErrorDataSchema = z2.object({
|
|
274
|
+
error: z2.object({
|
|
275
|
+
message: z2.string(),
|
|
225
276
|
// The additional information below is handled loosely to support
|
|
226
277
|
// OpenAI-compatible providers that have slightly different error
|
|
227
278
|
// responses:
|
|
228
|
-
type:
|
|
229
|
-
param:
|
|
230
|
-
code:
|
|
279
|
+
type: z2.string().nullish(),
|
|
280
|
+
param: z2.any().nullish(),
|
|
281
|
+
code: z2.union([z2.string(), z2.number()]).nullish()
|
|
231
282
|
})
|
|
232
283
|
});
|
|
233
284
|
var openaiFailedResponseHandler = createJsonErrorResponseHandler({
|
|
@@ -235,76 +286,19 @@ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
|
|
|
235
286
|
errorToMessage: (data) => data.error.message
|
|
236
287
|
});
|
|
237
288
|
|
|
238
|
-
// src/get-response-metadata.ts
|
|
239
|
-
function getResponseMetadata({
|
|
240
|
-
id,
|
|
241
|
-
model,
|
|
242
|
-
created
|
|
243
|
-
}) {
|
|
244
|
-
return {
|
|
245
|
-
id: id != null ? id : void 0,
|
|
246
|
-
modelId: model != null ? model : void 0,
|
|
247
|
-
timestamp: created != null ? new Date(created * 1e3) : void 0
|
|
248
|
-
};
|
|
249
|
-
}
|
|
250
|
-
|
|
251
289
|
// src/openai-prepare-tools.ts
|
|
252
290
|
import {
|
|
253
291
|
UnsupportedFunctionalityError as UnsupportedFunctionalityError2
|
|
254
292
|
} from "@ai-sdk/provider";
|
|
255
293
|
function prepareTools({
|
|
256
|
-
|
|
257
|
-
|
|
294
|
+
tools,
|
|
295
|
+
toolChoice,
|
|
258
296
|
structuredOutputs
|
|
259
297
|
}) {
|
|
260
|
-
|
|
261
|
-
const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
|
|
298
|
+
tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
|
|
262
299
|
const toolWarnings = [];
|
|
263
300
|
if (tools == null) {
|
|
264
|
-
return { tools: void 0,
|
|
265
|
-
}
|
|
266
|
-
const toolChoice = mode.toolChoice;
|
|
267
|
-
if (useLegacyFunctionCalling) {
|
|
268
|
-
const openaiFunctions = [];
|
|
269
|
-
for (const tool of tools) {
|
|
270
|
-
if (tool.type === "provider-defined") {
|
|
271
|
-
toolWarnings.push({ type: "unsupported-tool", tool });
|
|
272
|
-
} else {
|
|
273
|
-
openaiFunctions.push({
|
|
274
|
-
name: tool.name,
|
|
275
|
-
description: tool.description,
|
|
276
|
-
parameters: tool.parameters
|
|
277
|
-
});
|
|
278
|
-
}
|
|
279
|
-
}
|
|
280
|
-
if (toolChoice == null) {
|
|
281
|
-
return {
|
|
282
|
-
functions: openaiFunctions,
|
|
283
|
-
function_call: void 0,
|
|
284
|
-
toolWarnings
|
|
285
|
-
};
|
|
286
|
-
}
|
|
287
|
-
const type2 = toolChoice.type;
|
|
288
|
-
switch (type2) {
|
|
289
|
-
case "auto":
|
|
290
|
-
case "none":
|
|
291
|
-
case void 0:
|
|
292
|
-
return {
|
|
293
|
-
functions: openaiFunctions,
|
|
294
|
-
function_call: void 0,
|
|
295
|
-
toolWarnings
|
|
296
|
-
};
|
|
297
|
-
case "required":
|
|
298
|
-
throw new UnsupportedFunctionalityError2({
|
|
299
|
-
functionality: "useLegacyFunctionCalling and toolChoice: required"
|
|
300
|
-
});
|
|
301
|
-
default:
|
|
302
|
-
return {
|
|
303
|
-
functions: openaiFunctions,
|
|
304
|
-
function_call: { name: toolChoice.toolName },
|
|
305
|
-
toolWarnings
|
|
306
|
-
};
|
|
307
|
-
}
|
|
301
|
+
return { tools: void 0, toolChoice: void 0, toolWarnings };
|
|
308
302
|
}
|
|
309
303
|
const openaiTools = [];
|
|
310
304
|
for (const tool of tools) {
|
|
@@ -323,18 +317,18 @@ function prepareTools({
|
|
|
323
317
|
}
|
|
324
318
|
}
|
|
325
319
|
if (toolChoice == null) {
|
|
326
|
-
return { tools: openaiTools,
|
|
320
|
+
return { tools: openaiTools, toolChoice: void 0, toolWarnings };
|
|
327
321
|
}
|
|
328
322
|
const type = toolChoice.type;
|
|
329
323
|
switch (type) {
|
|
330
324
|
case "auto":
|
|
331
325
|
case "none":
|
|
332
326
|
case "required":
|
|
333
|
-
return { tools: openaiTools,
|
|
327
|
+
return { tools: openaiTools, toolChoice: type, toolWarnings };
|
|
334
328
|
case "tool":
|
|
335
329
|
return {
|
|
336
330
|
tools: openaiTools,
|
|
337
|
-
|
|
331
|
+
toolChoice: {
|
|
338
332
|
type: "function",
|
|
339
333
|
function: {
|
|
340
334
|
name: toolChoice.toolName
|
|
@@ -345,7 +339,7 @@ function prepareTools({
|
|
|
345
339
|
default: {
|
|
346
340
|
const _exhaustiveCheck = type;
|
|
347
341
|
throw new UnsupportedFunctionalityError2({
|
|
348
|
-
functionality: `
|
|
342
|
+
functionality: `tool choice type: ${_exhaustiveCheck}`
|
|
349
343
|
});
|
|
350
344
|
}
|
|
351
345
|
}
|
|
@@ -353,32 +347,20 @@ function prepareTools({
|
|
|
353
347
|
|
|
354
348
|
// src/openai-chat-language-model.ts
|
|
355
349
|
var OpenAIChatLanguageModel = class {
|
|
356
|
-
constructor(modelId,
|
|
357
|
-
this.specificationVersion = "
|
|
350
|
+
constructor(modelId, config) {
|
|
351
|
+
this.specificationVersion = "v2";
|
|
352
|
+
this.supportedUrls = {
|
|
353
|
+
"image/*": [/^https?:\/\/.*$/]
|
|
354
|
+
};
|
|
358
355
|
this.modelId = modelId;
|
|
359
|
-
this.settings = settings;
|
|
360
356
|
this.config = config;
|
|
361
357
|
}
|
|
362
|
-
get supportsStructuredOutputs() {
|
|
363
|
-
var _a;
|
|
364
|
-
return (_a = this.settings.structuredOutputs) != null ? _a : isReasoningModel(this.modelId);
|
|
365
|
-
}
|
|
366
|
-
get defaultObjectGenerationMode() {
|
|
367
|
-
if (isAudioModel(this.modelId)) {
|
|
368
|
-
return "tool";
|
|
369
|
-
}
|
|
370
|
-
return this.supportsStructuredOutputs ? "json" : "tool";
|
|
371
|
-
}
|
|
372
358
|
get provider() {
|
|
373
359
|
return this.config.provider;
|
|
374
360
|
}
|
|
375
|
-
|
|
376
|
-
return !this.settings.downloadImages;
|
|
377
|
-
}
|
|
378
|
-
getArgs({
|
|
379
|
-
mode,
|
|
361
|
+
async getArgs({
|
|
380
362
|
prompt,
|
|
381
|
-
|
|
363
|
+
maxOutputTokens,
|
|
382
364
|
temperature,
|
|
383
365
|
topP,
|
|
384
366
|
topK,
|
|
@@ -387,39 +369,34 @@ var OpenAIChatLanguageModel = class {
|
|
|
387
369
|
stopSequences,
|
|
388
370
|
responseFormat,
|
|
389
371
|
seed,
|
|
390
|
-
|
|
372
|
+
tools,
|
|
373
|
+
toolChoice,
|
|
374
|
+
providerOptions
|
|
391
375
|
}) {
|
|
392
|
-
var _a, _b, _c
|
|
393
|
-
const type = mode.type;
|
|
376
|
+
var _a, _b, _c;
|
|
394
377
|
const warnings = [];
|
|
378
|
+
const openaiOptions = (_a = await parseProviderOptions({
|
|
379
|
+
provider: "openai",
|
|
380
|
+
providerOptions,
|
|
381
|
+
schema: openaiProviderOptions
|
|
382
|
+
})) != null ? _a : {};
|
|
383
|
+
const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
|
|
395
384
|
if (topK != null) {
|
|
396
385
|
warnings.push({
|
|
397
386
|
type: "unsupported-setting",
|
|
398
387
|
setting: "topK"
|
|
399
388
|
});
|
|
400
389
|
}
|
|
401
|
-
if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !
|
|
390
|
+
if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
|
|
402
391
|
warnings.push({
|
|
403
392
|
type: "unsupported-setting",
|
|
404
393
|
setting: "responseFormat",
|
|
405
394
|
details: "JSON response format schema is only supported with structuredOutputs"
|
|
406
395
|
});
|
|
407
396
|
}
|
|
408
|
-
const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
|
|
409
|
-
if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
|
|
410
|
-
throw new UnsupportedFunctionalityError3({
|
|
411
|
-
functionality: "useLegacyFunctionCalling with parallelToolCalls"
|
|
412
|
-
});
|
|
413
|
-
}
|
|
414
|
-
if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
|
|
415
|
-
throw new UnsupportedFunctionalityError3({
|
|
416
|
-
functionality: "structuredOutputs with useLegacyFunctionCalling"
|
|
417
|
-
});
|
|
418
|
-
}
|
|
419
397
|
const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
|
|
420
398
|
{
|
|
421
399
|
prompt,
|
|
422
|
-
useLegacyFunctionCalling,
|
|
423
400
|
systemMessageMode: getSystemMessageMode(this.modelId)
|
|
424
401
|
}
|
|
425
402
|
);
|
|
@@ -428,35 +405,38 @@ var OpenAIChatLanguageModel = class {
|
|
|
428
405
|
// model id:
|
|
429
406
|
model: this.modelId,
|
|
430
407
|
// model specific settings:
|
|
431
|
-
logit_bias:
|
|
432
|
-
logprobs:
|
|
433
|
-
top_logprobs: typeof
|
|
434
|
-
user:
|
|
435
|
-
parallel_tool_calls:
|
|
408
|
+
logit_bias: openaiOptions.logitBias,
|
|
409
|
+
logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
|
|
410
|
+
top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
|
|
411
|
+
user: openaiOptions.user,
|
|
412
|
+
parallel_tool_calls: openaiOptions.parallelToolCalls,
|
|
436
413
|
// standardized settings:
|
|
437
|
-
max_tokens:
|
|
414
|
+
max_tokens: maxOutputTokens,
|
|
438
415
|
temperature,
|
|
439
416
|
top_p: topP,
|
|
440
417
|
frequency_penalty: frequencyPenalty,
|
|
441
418
|
presence_penalty: presencePenalty,
|
|
442
|
-
response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ?
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
419
|
+
response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? (
|
|
420
|
+
// TODO convert into provider option
|
|
421
|
+
structuredOutputs && responseFormat.schema != null ? {
|
|
422
|
+
type: "json_schema",
|
|
423
|
+
json_schema: {
|
|
424
|
+
schema: responseFormat.schema,
|
|
425
|
+
strict: true,
|
|
426
|
+
name: (_c = responseFormat.name) != null ? _c : "response",
|
|
427
|
+
description: responseFormat.description
|
|
428
|
+
}
|
|
429
|
+
} : { type: "json_object" }
|
|
430
|
+
) : void 0,
|
|
451
431
|
stop: stopSequences,
|
|
452
432
|
seed,
|
|
453
433
|
// openai specific settings:
|
|
454
|
-
// TODO remove in next major version; we auto-map
|
|
455
|
-
max_completion_tokens:
|
|
456
|
-
store:
|
|
457
|
-
metadata:
|
|
458
|
-
prediction:
|
|
459
|
-
reasoning_effort:
|
|
434
|
+
// TODO remove in next major version; we auto-map maxOutputTokens now
|
|
435
|
+
max_completion_tokens: openaiOptions.maxCompletionTokens,
|
|
436
|
+
store: openaiOptions.store,
|
|
437
|
+
metadata: openaiOptions.metadata,
|
|
438
|
+
prediction: openaiOptions.prediction,
|
|
439
|
+
reasoning_effort: openaiOptions.reasoningEffort,
|
|
460
440
|
// messages:
|
|
461
441
|
messages
|
|
462
442
|
};
|
|
@@ -530,85 +510,27 @@ var OpenAIChatLanguageModel = class {
|
|
|
530
510
|
});
|
|
531
511
|
}
|
|
532
512
|
}
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
}
|
|
551
|
-
case "object-json": {
|
|
552
|
-
return {
|
|
553
|
-
args: {
|
|
554
|
-
...baseArgs,
|
|
555
|
-
response_format: this.supportsStructuredOutputs && mode.schema != null ? {
|
|
556
|
-
type: "json_schema",
|
|
557
|
-
json_schema: {
|
|
558
|
-
schema: mode.schema,
|
|
559
|
-
strict: true,
|
|
560
|
-
name: (_h = mode.name) != null ? _h : "response",
|
|
561
|
-
description: mode.description
|
|
562
|
-
}
|
|
563
|
-
} : { type: "json_object" }
|
|
564
|
-
},
|
|
565
|
-
warnings
|
|
566
|
-
};
|
|
567
|
-
}
|
|
568
|
-
case "object-tool": {
|
|
569
|
-
return {
|
|
570
|
-
args: useLegacyFunctionCalling ? {
|
|
571
|
-
...baseArgs,
|
|
572
|
-
function_call: {
|
|
573
|
-
name: mode.tool.name
|
|
574
|
-
},
|
|
575
|
-
functions: [
|
|
576
|
-
{
|
|
577
|
-
name: mode.tool.name,
|
|
578
|
-
description: mode.tool.description,
|
|
579
|
-
parameters: mode.tool.parameters
|
|
580
|
-
}
|
|
581
|
-
]
|
|
582
|
-
} : {
|
|
583
|
-
...baseArgs,
|
|
584
|
-
tool_choice: {
|
|
585
|
-
type: "function",
|
|
586
|
-
function: { name: mode.tool.name }
|
|
587
|
-
},
|
|
588
|
-
tools: [
|
|
589
|
-
{
|
|
590
|
-
type: "function",
|
|
591
|
-
function: {
|
|
592
|
-
name: mode.tool.name,
|
|
593
|
-
description: mode.tool.description,
|
|
594
|
-
parameters: mode.tool.parameters,
|
|
595
|
-
strict: this.supportsStructuredOutputs ? true : void 0
|
|
596
|
-
}
|
|
597
|
-
}
|
|
598
|
-
]
|
|
599
|
-
},
|
|
600
|
-
warnings
|
|
601
|
-
};
|
|
602
|
-
}
|
|
603
|
-
default: {
|
|
604
|
-
const _exhaustiveCheck = type;
|
|
605
|
-
throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
|
|
606
|
-
}
|
|
607
|
-
}
|
|
513
|
+
const {
|
|
514
|
+
tools: openaiTools,
|
|
515
|
+
toolChoice: openaiToolChoice,
|
|
516
|
+
toolWarnings
|
|
517
|
+
} = prepareTools({
|
|
518
|
+
tools,
|
|
519
|
+
toolChoice,
|
|
520
|
+
structuredOutputs
|
|
521
|
+
});
|
|
522
|
+
return {
|
|
523
|
+
args: {
|
|
524
|
+
...baseArgs,
|
|
525
|
+
tools: openaiTools,
|
|
526
|
+
tool_choice: openaiToolChoice
|
|
527
|
+
},
|
|
528
|
+
warnings: [...warnings, ...toolWarnings]
|
|
529
|
+
};
|
|
608
530
|
}
|
|
609
531
|
async doGenerate(options) {
|
|
610
|
-
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
611
|
-
const { args: body, warnings } = this.getArgs(options);
|
|
532
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
|
|
533
|
+
const { args: body, warnings } = await this.getArgs(options);
|
|
612
534
|
const {
|
|
613
535
|
responseHeaders,
|
|
614
536
|
value: response,
|
|
@@ -627,105 +549,61 @@ var OpenAIChatLanguageModel = class {
|
|
|
627
549
|
abortSignal: options.abortSignal,
|
|
628
550
|
fetch: this.config.fetch
|
|
629
551
|
});
|
|
630
|
-
const { messages: rawPrompt, ...rawSettings } = body;
|
|
631
552
|
const choice = response.choices[0];
|
|
632
|
-
const
|
|
633
|
-
const
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
553
|
+
const content = [];
|
|
554
|
+
const text = choice.message.content;
|
|
555
|
+
if (text != null && text.length > 0) {
|
|
556
|
+
content.push({ type: "text", text });
|
|
557
|
+
}
|
|
558
|
+
for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
|
|
559
|
+
content.push({
|
|
560
|
+
type: "tool-call",
|
|
561
|
+
toolCallType: "function",
|
|
562
|
+
toolCallId: (_b = toolCall.id) != null ? _b : generateId(),
|
|
563
|
+
toolName: toolCall.function.name,
|
|
564
|
+
args: toolCall.function.arguments
|
|
565
|
+
});
|
|
637
566
|
}
|
|
567
|
+
const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
|
|
568
|
+
const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
|
|
569
|
+
const providerMetadata = { openai: {} };
|
|
638
570
|
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
|
|
639
571
|
providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
|
|
640
572
|
}
|
|
641
573
|
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
|
|
642
574
|
providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
|
|
643
575
|
}
|
|
644
|
-
if ((
|
|
645
|
-
providerMetadata.openai.
|
|
576
|
+
if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
|
|
577
|
+
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
646
578
|
}
|
|
647
579
|
return {
|
|
648
|
-
|
|
649
|
-
toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
|
|
650
|
-
{
|
|
651
|
-
toolCallType: "function",
|
|
652
|
-
toolCallId: generateId(),
|
|
653
|
-
toolName: choice.message.function_call.name,
|
|
654
|
-
args: choice.message.function_call.arguments
|
|
655
|
-
}
|
|
656
|
-
] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
|
|
657
|
-
var _a2;
|
|
658
|
-
return {
|
|
659
|
-
toolCallType: "function",
|
|
660
|
-
toolCallId: (_a2 = toolCall.id) != null ? _a2 : generateId(),
|
|
661
|
-
toolName: toolCall.function.name,
|
|
662
|
-
args: toolCall.function.arguments
|
|
663
|
-
};
|
|
664
|
-
}),
|
|
580
|
+
content,
|
|
665
581
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
666
582
|
usage: {
|
|
667
|
-
|
|
668
|
-
|
|
583
|
+
inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
|
|
584
|
+
outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
|
|
585
|
+
totalTokens: (_k = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
|
|
586
|
+
reasoningTokens: (_l = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _l : void 0,
|
|
587
|
+
cachedInputTokens: (_m = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _m : void 0
|
|
588
|
+
},
|
|
589
|
+
request: { body },
|
|
590
|
+
response: {
|
|
591
|
+
...getResponseMetadata(response),
|
|
592
|
+
headers: responseHeaders,
|
|
593
|
+
body: rawResponse
|
|
669
594
|
},
|
|
670
|
-
rawCall: { rawPrompt, rawSettings },
|
|
671
|
-
rawResponse: { headers: responseHeaders, body: rawResponse },
|
|
672
|
-
request: { body: JSON.stringify(body) },
|
|
673
|
-
response: getResponseMetadata(response),
|
|
674
595
|
warnings,
|
|
675
|
-
logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
|
|
676
596
|
providerMetadata
|
|
677
597
|
};
|
|
678
598
|
}
|
|
679
599
|
async doStream(options) {
|
|
680
|
-
|
|
681
|
-
const result = await this.doGenerate(options);
|
|
682
|
-
const simulatedStream = new ReadableStream({
|
|
683
|
-
start(controller) {
|
|
684
|
-
controller.enqueue({ type: "response-metadata", ...result.response });
|
|
685
|
-
if (result.text) {
|
|
686
|
-
controller.enqueue({
|
|
687
|
-
type: "text-delta",
|
|
688
|
-
textDelta: result.text
|
|
689
|
-
});
|
|
690
|
-
}
|
|
691
|
-
if (result.toolCalls) {
|
|
692
|
-
for (const toolCall of result.toolCalls) {
|
|
693
|
-
controller.enqueue({
|
|
694
|
-
type: "tool-call-delta",
|
|
695
|
-
toolCallType: "function",
|
|
696
|
-
toolCallId: toolCall.toolCallId,
|
|
697
|
-
toolName: toolCall.toolName,
|
|
698
|
-
argsTextDelta: toolCall.args
|
|
699
|
-
});
|
|
700
|
-
controller.enqueue({
|
|
701
|
-
type: "tool-call",
|
|
702
|
-
...toolCall
|
|
703
|
-
});
|
|
704
|
-
}
|
|
705
|
-
}
|
|
706
|
-
controller.enqueue({
|
|
707
|
-
type: "finish",
|
|
708
|
-
finishReason: result.finishReason,
|
|
709
|
-
usage: result.usage,
|
|
710
|
-
logprobs: result.logprobs,
|
|
711
|
-
providerMetadata: result.providerMetadata
|
|
712
|
-
});
|
|
713
|
-
controller.close();
|
|
714
|
-
}
|
|
715
|
-
});
|
|
716
|
-
return {
|
|
717
|
-
stream: simulatedStream,
|
|
718
|
-
rawCall: result.rawCall,
|
|
719
|
-
rawResponse: result.rawResponse,
|
|
720
|
-
warnings: result.warnings
|
|
721
|
-
};
|
|
722
|
-
}
|
|
723
|
-
const { args, warnings } = this.getArgs(options);
|
|
600
|
+
const { args, warnings } = await this.getArgs(options);
|
|
724
601
|
const body = {
|
|
725
602
|
...args,
|
|
726
603
|
stream: true,
|
|
727
|
-
|
|
728
|
-
|
|
604
|
+
stream_options: {
|
|
605
|
+
include_usage: true
|
|
606
|
+
}
|
|
729
607
|
};
|
|
730
608
|
const { responseHeaders, value: response } = await postJsonToApi({
|
|
731
609
|
url: this.config.url({
|
|
@@ -741,22 +619,23 @@ var OpenAIChatLanguageModel = class {
|
|
|
741
619
|
abortSignal: options.abortSignal,
|
|
742
620
|
fetch: this.config.fetch
|
|
743
621
|
});
|
|
744
|
-
const { messages: rawPrompt, ...rawSettings } = args;
|
|
745
622
|
const toolCalls = [];
|
|
746
623
|
let finishReason = "unknown";
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
624
|
+
const usage = {
|
|
625
|
+
inputTokens: void 0,
|
|
626
|
+
outputTokens: void 0,
|
|
627
|
+
totalTokens: void 0
|
|
750
628
|
};
|
|
751
|
-
let logprobs;
|
|
752
629
|
let isFirstChunk = true;
|
|
753
|
-
const { useLegacyFunctionCalling } = this.settings;
|
|
754
630
|
const providerMetadata = { openai: {} };
|
|
755
631
|
return {
|
|
756
632
|
stream: response.pipeThrough(
|
|
757
633
|
new TransformStream({
|
|
634
|
+
start(controller) {
|
|
635
|
+
controller.enqueue({ type: "stream-start", warnings });
|
|
636
|
+
},
|
|
758
637
|
transform(chunk, controller) {
|
|
759
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
|
|
638
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
|
|
760
639
|
if (!chunk.success) {
|
|
761
640
|
finishReason = "error";
|
|
762
641
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
@@ -776,60 +655,37 @@ var OpenAIChatLanguageModel = class {
|
|
|
776
655
|
});
|
|
777
656
|
}
|
|
778
657
|
if (value.usage != null) {
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
|
|
787
|
-
completionTokens: completion_tokens != null ? completion_tokens : void 0
|
|
788
|
-
};
|
|
789
|
-
if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
|
|
790
|
-
providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
|
|
658
|
+
usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
|
|
659
|
+
usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
|
|
660
|
+
usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
|
|
661
|
+
usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
|
|
662
|
+
usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
|
|
663
|
+
if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
|
|
664
|
+
providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
|
|
791
665
|
}
|
|
792
|
-
if ((completion_tokens_details == null ? void 0 :
|
|
793
|
-
providerMetadata.openai.
|
|
794
|
-
}
|
|
795
|
-
if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
|
|
796
|
-
providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
|
|
797
|
-
}
|
|
798
|
-
if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
|
|
799
|
-
providerMetadata.openai.cachedPromptTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
|
|
666
|
+
if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
|
|
667
|
+
providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
|
|
800
668
|
}
|
|
801
669
|
}
|
|
802
670
|
const choice = value.choices[0];
|
|
803
671
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
804
672
|
finishReason = mapOpenAIFinishReason(choice.finish_reason);
|
|
805
673
|
}
|
|
674
|
+
if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
|
|
675
|
+
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
676
|
+
}
|
|
806
677
|
if ((choice == null ? void 0 : choice.delta) == null) {
|
|
807
678
|
return;
|
|
808
679
|
}
|
|
809
680
|
const delta = choice.delta;
|
|
810
681
|
if (delta.content != null) {
|
|
811
682
|
controller.enqueue({
|
|
812
|
-
type: "text
|
|
813
|
-
|
|
683
|
+
type: "text",
|
|
684
|
+
text: delta.content
|
|
814
685
|
});
|
|
815
686
|
}
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
);
|
|
819
|
-
if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
|
|
820
|
-
if (logprobs === void 0) logprobs = [];
|
|
821
|
-
logprobs.push(...mappedLogprobs);
|
|
822
|
-
}
|
|
823
|
-
const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
|
|
824
|
-
{
|
|
825
|
-
type: "function",
|
|
826
|
-
id: generateId(),
|
|
827
|
-
function: delta.function_call,
|
|
828
|
-
index: 0
|
|
829
|
-
}
|
|
830
|
-
] : delta.tool_calls;
|
|
831
|
-
if (mappedToolCalls != null) {
|
|
832
|
-
for (const toolCallDelta of mappedToolCalls) {
|
|
687
|
+
if (delta.tool_calls != null) {
|
|
688
|
+
for (const toolCallDelta of delta.tool_calls) {
|
|
833
689
|
const index = toolCallDelta.index;
|
|
834
690
|
if (toolCalls[index] == null) {
|
|
835
691
|
if (toolCallDelta.type !== "function") {
|
|
@@ -844,7 +700,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
844
700
|
message: `Expected 'id' to be a string.`
|
|
845
701
|
});
|
|
846
702
|
}
|
|
847
|
-
if (((
|
|
703
|
+
if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
|
|
848
704
|
throw new InvalidResponseDataError({
|
|
849
705
|
data: toolCallDelta,
|
|
850
706
|
message: `Expected 'function.name' to be a string.`
|
|
@@ -855,12 +711,12 @@ var OpenAIChatLanguageModel = class {
|
|
|
855
711
|
type: "function",
|
|
856
712
|
function: {
|
|
857
713
|
name: toolCallDelta.function.name,
|
|
858
|
-
arguments: (
|
|
714
|
+
arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
|
|
859
715
|
},
|
|
860
716
|
hasFinished: false
|
|
861
717
|
};
|
|
862
718
|
const toolCall2 = toolCalls[index];
|
|
863
|
-
if (((
|
|
719
|
+
if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
|
|
864
720
|
if (toolCall2.function.arguments.length > 0) {
|
|
865
721
|
controller.enqueue({
|
|
866
722
|
type: "tool-call-delta",
|
|
@@ -874,7 +730,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
874
730
|
controller.enqueue({
|
|
875
731
|
type: "tool-call",
|
|
876
732
|
toolCallType: "function",
|
|
877
|
-
toolCallId: (
|
|
733
|
+
toolCallId: (_q = toolCall2.id) != null ? _q : generateId(),
|
|
878
734
|
toolName: toolCall2.function.name,
|
|
879
735
|
args: toolCall2.function.arguments
|
|
880
736
|
});
|
|
@@ -887,21 +743,21 @@ var OpenAIChatLanguageModel = class {
|
|
|
887
743
|
if (toolCall.hasFinished) {
|
|
888
744
|
continue;
|
|
889
745
|
}
|
|
890
|
-
if (((
|
|
891
|
-
toolCall.function.arguments += (
|
|
746
|
+
if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
|
|
747
|
+
toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
|
|
892
748
|
}
|
|
893
749
|
controller.enqueue({
|
|
894
750
|
type: "tool-call-delta",
|
|
895
751
|
toolCallType: "function",
|
|
896
752
|
toolCallId: toolCall.id,
|
|
897
753
|
toolName: toolCall.function.name,
|
|
898
|
-
argsTextDelta: (
|
|
754
|
+
argsTextDelta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
|
|
899
755
|
});
|
|
900
|
-
if (((
|
|
756
|
+
if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && isParsableJson(toolCall.function.arguments)) {
|
|
901
757
|
controller.enqueue({
|
|
902
758
|
type: "tool-call",
|
|
903
759
|
toolCallType: "function",
|
|
904
|
-
toolCallId: (
|
|
760
|
+
toolCallId: (_x = toolCall.id) != null ? _x : generateId(),
|
|
905
761
|
toolName: toolCall.function.name,
|
|
906
762
|
args: toolCall.function.arguments
|
|
907
763
|
});
|
|
@@ -911,125 +767,111 @@ var OpenAIChatLanguageModel = class {
|
|
|
911
767
|
}
|
|
912
768
|
},
|
|
913
769
|
flush(controller) {
|
|
914
|
-
var _a, _b;
|
|
915
770
|
controller.enqueue({
|
|
916
771
|
type: "finish",
|
|
917
772
|
finishReason,
|
|
918
|
-
|
|
919
|
-
usage: {
|
|
920
|
-
promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
|
|
921
|
-
completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
|
|
922
|
-
},
|
|
773
|
+
usage,
|
|
923
774
|
...providerMetadata != null ? { providerMetadata } : {}
|
|
924
775
|
});
|
|
925
776
|
}
|
|
926
777
|
})
|
|
927
778
|
),
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
request: { body: JSON.stringify(body) },
|
|
931
|
-
warnings
|
|
779
|
+
request: { body },
|
|
780
|
+
response: { headers: responseHeaders }
|
|
932
781
|
};
|
|
933
782
|
}
|
|
934
783
|
};
|
|
935
|
-
var openaiTokenUsageSchema =
|
|
936
|
-
prompt_tokens:
|
|
937
|
-
completion_tokens:
|
|
938
|
-
|
|
939
|
-
|
|
784
|
+
var openaiTokenUsageSchema = z3.object({
|
|
785
|
+
prompt_tokens: z3.number().nullish(),
|
|
786
|
+
completion_tokens: z3.number().nullish(),
|
|
787
|
+
total_tokens: z3.number().nullish(),
|
|
788
|
+
prompt_tokens_details: z3.object({
|
|
789
|
+
cached_tokens: z3.number().nullish()
|
|
940
790
|
}).nullish(),
|
|
941
|
-
completion_tokens_details:
|
|
942
|
-
reasoning_tokens:
|
|
943
|
-
accepted_prediction_tokens:
|
|
944
|
-
rejected_prediction_tokens:
|
|
791
|
+
completion_tokens_details: z3.object({
|
|
792
|
+
reasoning_tokens: z3.number().nullish(),
|
|
793
|
+
accepted_prediction_tokens: z3.number().nullish(),
|
|
794
|
+
rejected_prediction_tokens: z3.number().nullish()
|
|
945
795
|
}).nullish()
|
|
946
796
|
}).nullish();
|
|
947
|
-
var openaiChatResponseSchema =
|
|
948
|
-
id:
|
|
949
|
-
created:
|
|
950
|
-
model:
|
|
951
|
-
choices:
|
|
952
|
-
|
|
953
|
-
message:
|
|
954
|
-
role:
|
|
955
|
-
content:
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
type: z2.literal("function"),
|
|
964
|
-
function: z2.object({
|
|
965
|
-
name: z2.string(),
|
|
966
|
-
arguments: z2.string()
|
|
797
|
+
var openaiChatResponseSchema = z3.object({
|
|
798
|
+
id: z3.string().nullish(),
|
|
799
|
+
created: z3.number().nullish(),
|
|
800
|
+
model: z3.string().nullish(),
|
|
801
|
+
choices: z3.array(
|
|
802
|
+
z3.object({
|
|
803
|
+
message: z3.object({
|
|
804
|
+
role: z3.literal("assistant").nullish(),
|
|
805
|
+
content: z3.string().nullish(),
|
|
806
|
+
tool_calls: z3.array(
|
|
807
|
+
z3.object({
|
|
808
|
+
id: z3.string().nullish(),
|
|
809
|
+
type: z3.literal("function"),
|
|
810
|
+
function: z3.object({
|
|
811
|
+
name: z3.string(),
|
|
812
|
+
arguments: z3.string()
|
|
967
813
|
})
|
|
968
814
|
})
|
|
969
815
|
).nullish()
|
|
970
816
|
}),
|
|
971
|
-
index:
|
|
972
|
-
logprobs:
|
|
973
|
-
content:
|
|
974
|
-
|
|
975
|
-
token:
|
|
976
|
-
logprob:
|
|
977
|
-
top_logprobs:
|
|
978
|
-
|
|
979
|
-
token:
|
|
980
|
-
logprob:
|
|
817
|
+
index: z3.number(),
|
|
818
|
+
logprobs: z3.object({
|
|
819
|
+
content: z3.array(
|
|
820
|
+
z3.object({
|
|
821
|
+
token: z3.string(),
|
|
822
|
+
logprob: z3.number(),
|
|
823
|
+
top_logprobs: z3.array(
|
|
824
|
+
z3.object({
|
|
825
|
+
token: z3.string(),
|
|
826
|
+
logprob: z3.number()
|
|
981
827
|
})
|
|
982
828
|
)
|
|
983
829
|
})
|
|
984
|
-
).
|
|
830
|
+
).nullish()
|
|
985
831
|
}).nullish(),
|
|
986
|
-
finish_reason:
|
|
832
|
+
finish_reason: z3.string().nullish()
|
|
987
833
|
})
|
|
988
834
|
),
|
|
989
835
|
usage: openaiTokenUsageSchema
|
|
990
836
|
});
|
|
991
|
-
var openaiChatChunkSchema =
|
|
992
|
-
|
|
993
|
-
id:
|
|
994
|
-
created:
|
|
995
|
-
model:
|
|
996
|
-
choices:
|
|
997
|
-
|
|
998
|
-
delta:
|
|
999
|
-
role:
|
|
1000
|
-
content:
|
|
1001
|
-
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
type: z2.literal("function").nullish(),
|
|
1010
|
-
function: z2.object({
|
|
1011
|
-
name: z2.string().nullish(),
|
|
1012
|
-
arguments: z2.string().nullish()
|
|
837
|
+
var openaiChatChunkSchema = z3.union([
|
|
838
|
+
z3.object({
|
|
839
|
+
id: z3.string().nullish(),
|
|
840
|
+
created: z3.number().nullish(),
|
|
841
|
+
model: z3.string().nullish(),
|
|
842
|
+
choices: z3.array(
|
|
843
|
+
z3.object({
|
|
844
|
+
delta: z3.object({
|
|
845
|
+
role: z3.enum(["assistant"]).nullish(),
|
|
846
|
+
content: z3.string().nullish(),
|
|
847
|
+
tool_calls: z3.array(
|
|
848
|
+
z3.object({
|
|
849
|
+
index: z3.number(),
|
|
850
|
+
id: z3.string().nullish(),
|
|
851
|
+
type: z3.literal("function").nullish(),
|
|
852
|
+
function: z3.object({
|
|
853
|
+
name: z3.string().nullish(),
|
|
854
|
+
arguments: z3.string().nullish()
|
|
1013
855
|
})
|
|
1014
856
|
})
|
|
1015
857
|
).nullish()
|
|
1016
858
|
}).nullish(),
|
|
1017
|
-
logprobs:
|
|
1018
|
-
content:
|
|
1019
|
-
|
|
1020
|
-
token:
|
|
1021
|
-
logprob:
|
|
1022
|
-
top_logprobs:
|
|
1023
|
-
|
|
1024
|
-
token:
|
|
1025
|
-
logprob:
|
|
859
|
+
logprobs: z3.object({
|
|
860
|
+
content: z3.array(
|
|
861
|
+
z3.object({
|
|
862
|
+
token: z3.string(),
|
|
863
|
+
logprob: z3.number(),
|
|
864
|
+
top_logprobs: z3.array(
|
|
865
|
+
z3.object({
|
|
866
|
+
token: z3.string(),
|
|
867
|
+
logprob: z3.number()
|
|
1026
868
|
})
|
|
1027
869
|
)
|
|
1028
870
|
})
|
|
1029
|
-
).
|
|
871
|
+
).nullish()
|
|
1030
872
|
}).nullish(),
|
|
1031
|
-
finish_reason:
|
|
1032
|
-
index:
|
|
873
|
+
finish_reason: z3.string().nullish(),
|
|
874
|
+
index: z3.number()
|
|
1033
875
|
})
|
|
1034
876
|
),
|
|
1035
877
|
usage: openaiTokenUsageSchema
|
|
@@ -1039,9 +881,6 @@ var openaiChatChunkSchema = z2.union([
|
|
|
1039
881
|
function isReasoningModel(modelId) {
|
|
1040
882
|
return modelId.startsWith("o");
|
|
1041
883
|
}
|
|
1042
|
-
function isAudioModel(modelId) {
|
|
1043
|
-
return modelId.startsWith("gpt-4o-audio-preview");
|
|
1044
|
-
}
|
|
1045
884
|
function getSystemMessageMode(modelId) {
|
|
1046
885
|
var _a, _b;
|
|
1047
886
|
if (!isReasoningModel(modelId)) {
|
|
@@ -1083,31 +922,25 @@ var reasoningModels = {
|
|
|
1083
922
|
};
|
|
1084
923
|
|
|
1085
924
|
// src/openai-completion-language-model.ts
|
|
1086
|
-
import {
|
|
1087
|
-
UnsupportedFunctionalityError as UnsupportedFunctionalityError5
|
|
1088
|
-
} from "@ai-sdk/provider";
|
|
1089
925
|
import {
|
|
1090
926
|
combineHeaders as combineHeaders2,
|
|
1091
927
|
createEventSourceResponseHandler as createEventSourceResponseHandler2,
|
|
1092
928
|
createJsonResponseHandler as createJsonResponseHandler2,
|
|
929
|
+
parseProviderOptions as parseProviderOptions2,
|
|
1093
930
|
postJsonToApi as postJsonToApi2
|
|
1094
931
|
} from "@ai-sdk/provider-utils";
|
|
1095
|
-
import { z as
|
|
932
|
+
import { z as z5 } from "zod";
|
|
1096
933
|
|
|
1097
934
|
// src/convert-to-openai-completion-prompt.ts
|
|
1098
935
|
import {
|
|
1099
936
|
InvalidPromptError,
|
|
1100
|
-
UnsupportedFunctionalityError as
|
|
937
|
+
UnsupportedFunctionalityError as UnsupportedFunctionalityError3
|
|
1101
938
|
} from "@ai-sdk/provider";
|
|
1102
939
|
function convertToOpenAICompletionPrompt({
|
|
1103
940
|
prompt,
|
|
1104
|
-
inputFormat,
|
|
1105
941
|
user = "user",
|
|
1106
942
|
assistant = "assistant"
|
|
1107
943
|
}) {
|
|
1108
|
-
if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
|
|
1109
|
-
return { prompt: prompt[0].content[0].text };
|
|
1110
|
-
}
|
|
1111
944
|
let text = "";
|
|
1112
945
|
if (prompt[0].role === "system") {
|
|
1113
946
|
text += `${prompt[0].content}
|
|
@@ -1129,13 +962,8 @@ function convertToOpenAICompletionPrompt({
|
|
|
1129
962
|
case "text": {
|
|
1130
963
|
return part.text;
|
|
1131
964
|
}
|
|
1132
|
-
case "image": {
|
|
1133
|
-
throw new UnsupportedFunctionalityError4({
|
|
1134
|
-
functionality: "images"
|
|
1135
|
-
});
|
|
1136
|
-
}
|
|
1137
965
|
}
|
|
1138
|
-
}).join("");
|
|
966
|
+
}).filter(Boolean).join("");
|
|
1139
967
|
text += `${user}:
|
|
1140
968
|
${userMessage}
|
|
1141
969
|
|
|
@@ -1149,7 +977,7 @@ ${userMessage}
|
|
|
1149
977
|
return part.text;
|
|
1150
978
|
}
|
|
1151
979
|
case "tool-call": {
|
|
1152
|
-
throw new
|
|
980
|
+
throw new UnsupportedFunctionalityError3({
|
|
1153
981
|
functionality: "tool-call messages"
|
|
1154
982
|
});
|
|
1155
983
|
}
|
|
@@ -1162,7 +990,7 @@ ${assistantMessage}
|
|
|
1162
990
|
break;
|
|
1163
991
|
}
|
|
1164
992
|
case "tool": {
|
|
1165
|
-
throw new
|
|
993
|
+
throw new UnsupportedFunctionalityError3({
|
|
1166
994
|
functionality: "tool messages"
|
|
1167
995
|
});
|
|
1168
996
|
}
|
|
@@ -1181,37 +1009,68 @@ ${user}:`]
|
|
|
1181
1009
|
};
|
|
1182
1010
|
}
|
|
1183
1011
|
|
|
1184
|
-
// src/
|
|
1185
|
-
|
|
1186
|
-
|
|
1187
|
-
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
|
|
1194
|
-
|
|
1195
|
-
|
|
1196
|
-
|
|
1012
|
+
// src/openai-completion-options.ts
|
|
1013
|
+
import { z as z4 } from "zod";
|
|
1014
|
+
var openaiCompletionProviderOptions = z4.object({
|
|
1015
|
+
/**
|
|
1016
|
+
Echo back the prompt in addition to the completion.
|
|
1017
|
+
*/
|
|
1018
|
+
echo: z4.boolean().optional(),
|
|
1019
|
+
/**
|
|
1020
|
+
Modify the likelihood of specified tokens appearing in the completion.
|
|
1021
|
+
|
|
1022
|
+
Accepts a JSON object that maps tokens (specified by their token ID in
|
|
1023
|
+
the GPT tokenizer) to an associated bias value from -100 to 100. You
|
|
1024
|
+
can use this tokenizer tool to convert text to token IDs. Mathematically,
|
|
1025
|
+
the bias is added to the logits generated by the model prior to sampling.
|
|
1026
|
+
The exact effect will vary per model, but values between -1 and 1 should
|
|
1027
|
+
decrease or increase likelihood of selection; values like -100 or 100
|
|
1028
|
+
should result in a ban or exclusive selection of the relevant token.
|
|
1029
|
+
|
|
1030
|
+
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
1031
|
+
token from being generated.
|
|
1032
|
+
*/
|
|
1033
|
+
logitBias: z4.record(z4.string(), z4.number()).optional(),
|
|
1034
|
+
/**
|
|
1035
|
+
The suffix that comes after a completion of inserted text.
|
|
1036
|
+
*/
|
|
1037
|
+
suffix: z4.string().optional(),
|
|
1038
|
+
/**
|
|
1039
|
+
A unique identifier representing your end-user, which can help OpenAI to
|
|
1040
|
+
monitor and detect abuse. Learn more.
|
|
1041
|
+
*/
|
|
1042
|
+
user: z4.string().optional(),
|
|
1043
|
+
/**
|
|
1044
|
+
Return the log probabilities of the tokens. Including logprobs will increase
|
|
1045
|
+
the response size and can slow down response times. However, it can
|
|
1046
|
+
be useful to better understand how the model is behaving.
|
|
1047
|
+
Setting to true will return the log probabilities of the tokens that
|
|
1048
|
+
were generated.
|
|
1049
|
+
Setting to a number will return the log probabilities of the top n
|
|
1050
|
+
tokens that were generated.
|
|
1051
|
+
*/
|
|
1052
|
+
logprobs: z4.union([z4.boolean(), z4.number()]).optional()
|
|
1053
|
+
});
|
|
1197
1054
|
|
|
1198
1055
|
// src/openai-completion-language-model.ts
|
|
1199
1056
|
var OpenAICompletionLanguageModel = class {
|
|
1200
|
-
constructor(modelId,
|
|
1201
|
-
this.specificationVersion = "
|
|
1202
|
-
this.
|
|
1057
|
+
constructor(modelId, config) {
|
|
1058
|
+
this.specificationVersion = "v2";
|
|
1059
|
+
this.supportedUrls = {
|
|
1060
|
+
// No URLs are supported for completion models.
|
|
1061
|
+
};
|
|
1203
1062
|
this.modelId = modelId;
|
|
1204
|
-
this.settings = settings;
|
|
1205
1063
|
this.config = config;
|
|
1206
1064
|
}
|
|
1065
|
+
get providerOptionsName() {
|
|
1066
|
+
return this.config.provider.split(".")[0].trim();
|
|
1067
|
+
}
|
|
1207
1068
|
get provider() {
|
|
1208
1069
|
return this.config.provider;
|
|
1209
1070
|
}
|
|
1210
|
-
getArgs({
|
|
1211
|
-
mode,
|
|
1212
|
-
inputFormat,
|
|
1071
|
+
async getArgs({
|
|
1213
1072
|
prompt,
|
|
1214
|
-
|
|
1073
|
+
maxOutputTokens,
|
|
1215
1074
|
temperature,
|
|
1216
1075
|
topP,
|
|
1217
1076
|
topK,
|
|
@@ -1219,16 +1078,32 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1219
1078
|
presencePenalty,
|
|
1220
1079
|
stopSequences: userStopSequences,
|
|
1221
1080
|
responseFormat,
|
|
1222
|
-
|
|
1081
|
+
tools,
|
|
1082
|
+
toolChoice,
|
|
1083
|
+
seed,
|
|
1084
|
+
providerOptions
|
|
1223
1085
|
}) {
|
|
1224
|
-
var _a;
|
|
1225
|
-
const type = mode.type;
|
|
1226
1086
|
const warnings = [];
|
|
1087
|
+
const openaiOptions = {
|
|
1088
|
+
...await parseProviderOptions2({
|
|
1089
|
+
provider: "openai",
|
|
1090
|
+
providerOptions,
|
|
1091
|
+
schema: openaiCompletionProviderOptions
|
|
1092
|
+
}),
|
|
1093
|
+
...await parseProviderOptions2({
|
|
1094
|
+
provider: this.providerOptionsName,
|
|
1095
|
+
providerOptions,
|
|
1096
|
+
schema: openaiCompletionProviderOptions
|
|
1097
|
+
})
|
|
1098
|
+
};
|
|
1227
1099
|
if (topK != null) {
|
|
1228
|
-
warnings.push({
|
|
1229
|
-
|
|
1230
|
-
|
|
1231
|
-
});
|
|
1100
|
+
warnings.push({ type: "unsupported-setting", setting: "topK" });
|
|
1101
|
+
}
|
|
1102
|
+
if (tools == null ? void 0 : tools.length) {
|
|
1103
|
+
warnings.push({ type: "unsupported-setting", setting: "tools" });
|
|
1104
|
+
}
|
|
1105
|
+
if (toolChoice != null) {
|
|
1106
|
+
warnings.push({ type: "unsupported-setting", setting: "toolChoice" });
|
|
1232
1107
|
}
|
|
1233
1108
|
if (responseFormat != null && responseFormat.type !== "text") {
|
|
1234
1109
|
warnings.push({
|
|
@@ -1237,61 +1112,36 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1237
1112
|
details: "JSON response format is not supported."
|
|
1238
1113
|
});
|
|
1239
1114
|
}
|
|
1240
|
-
const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt
|
|
1115
|
+
const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt });
|
|
1241
1116
|
const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
|
|
1242
|
-
|
|
1243
|
-
|
|
1244
|
-
|
|
1245
|
-
|
|
1246
|
-
|
|
1247
|
-
|
|
1248
|
-
|
|
1249
|
-
|
|
1250
|
-
|
|
1251
|
-
|
|
1252
|
-
|
|
1253
|
-
|
|
1254
|
-
|
|
1255
|
-
|
|
1256
|
-
|
|
1257
|
-
|
|
1258
|
-
|
|
1259
|
-
|
|
1260
|
-
|
|
1261
|
-
|
|
1117
|
+
return {
|
|
1118
|
+
args: {
|
|
1119
|
+
// model id:
|
|
1120
|
+
model: this.modelId,
|
|
1121
|
+
// model specific settings:
|
|
1122
|
+
echo: openaiOptions.echo,
|
|
1123
|
+
logit_bias: openaiOptions.logitBias,
|
|
1124
|
+
logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
|
|
1125
|
+
suffix: openaiOptions.suffix,
|
|
1126
|
+
user: openaiOptions.user,
|
|
1127
|
+
// standardized settings:
|
|
1128
|
+
max_tokens: maxOutputTokens,
|
|
1129
|
+
temperature,
|
|
1130
|
+
top_p: topP,
|
|
1131
|
+
frequency_penalty: frequencyPenalty,
|
|
1132
|
+
presence_penalty: presencePenalty,
|
|
1133
|
+
seed,
|
|
1134
|
+
// prompt:
|
|
1135
|
+
prompt: completionPrompt,
|
|
1136
|
+
// stop sequences:
|
|
1137
|
+
stop: stop.length > 0 ? stop : void 0
|
|
1138
|
+
},
|
|
1139
|
+
warnings
|
|
1262
1140
|
};
|
|
1263
|
-
switch (type) {
|
|
1264
|
-
case "regular": {
|
|
1265
|
-
if ((_a = mode.tools) == null ? void 0 : _a.length) {
|
|
1266
|
-
throw new UnsupportedFunctionalityError5({
|
|
1267
|
-
functionality: "tools"
|
|
1268
|
-
});
|
|
1269
|
-
}
|
|
1270
|
-
if (mode.toolChoice) {
|
|
1271
|
-
throw new UnsupportedFunctionalityError5({
|
|
1272
|
-
functionality: "toolChoice"
|
|
1273
|
-
});
|
|
1274
|
-
}
|
|
1275
|
-
return { args: baseArgs, warnings };
|
|
1276
|
-
}
|
|
1277
|
-
case "object-json": {
|
|
1278
|
-
throw new UnsupportedFunctionalityError5({
|
|
1279
|
-
functionality: "object-json mode"
|
|
1280
|
-
});
|
|
1281
|
-
}
|
|
1282
|
-
case "object-tool": {
|
|
1283
|
-
throw new UnsupportedFunctionalityError5({
|
|
1284
|
-
functionality: "object-tool mode"
|
|
1285
|
-
});
|
|
1286
|
-
}
|
|
1287
|
-
default: {
|
|
1288
|
-
const _exhaustiveCheck = type;
|
|
1289
|
-
throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
|
|
1290
|
-
}
|
|
1291
|
-
}
|
|
1292
1141
|
}
|
|
1293
1142
|
async doGenerate(options) {
|
|
1294
|
-
|
|
1143
|
+
var _a, _b, _c;
|
|
1144
|
+
const { args, warnings } = await this.getArgs(options);
|
|
1295
1145
|
const {
|
|
1296
1146
|
responseHeaders,
|
|
1297
1147
|
value: response,
|
|
@@ -1310,30 +1160,37 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1310
1160
|
abortSignal: options.abortSignal,
|
|
1311
1161
|
fetch: this.config.fetch
|
|
1312
1162
|
});
|
|
1313
|
-
const { prompt: rawPrompt, ...rawSettings } = args;
|
|
1314
1163
|
const choice = response.choices[0];
|
|
1164
|
+
const providerMetadata = { openai: {} };
|
|
1165
|
+
if (choice.logprobs != null) {
|
|
1166
|
+
providerMetadata.openai.logprobs = choice.logprobs;
|
|
1167
|
+
}
|
|
1315
1168
|
return {
|
|
1316
|
-
text: choice.text,
|
|
1169
|
+
content: [{ type: "text", text: choice.text }],
|
|
1317
1170
|
usage: {
|
|
1318
|
-
|
|
1319
|
-
|
|
1171
|
+
inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
|
|
1172
|
+
outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
|
|
1173
|
+
totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
|
|
1320
1174
|
},
|
|
1321
1175
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
1322
|
-
|
|
1323
|
-
|
|
1324
|
-
|
|
1325
|
-
|
|
1326
|
-
|
|
1327
|
-
|
|
1176
|
+
request: { body: args },
|
|
1177
|
+
response: {
|
|
1178
|
+
...getResponseMetadata(response),
|
|
1179
|
+
headers: responseHeaders,
|
|
1180
|
+
body: rawResponse
|
|
1181
|
+
},
|
|
1182
|
+
providerMetadata,
|
|
1183
|
+
warnings
|
|
1328
1184
|
};
|
|
1329
1185
|
}
|
|
1330
1186
|
async doStream(options) {
|
|
1331
|
-
const { args, warnings } = this.getArgs(options);
|
|
1187
|
+
const { args, warnings } = await this.getArgs(options);
|
|
1332
1188
|
const body = {
|
|
1333
1189
|
...args,
|
|
1334
1190
|
stream: true,
|
|
1335
|
-
|
|
1336
|
-
|
|
1191
|
+
stream_options: {
|
|
1192
|
+
include_usage: true
|
|
1193
|
+
}
|
|
1337
1194
|
};
|
|
1338
1195
|
const { responseHeaders, value: response } = await postJsonToApi2({
|
|
1339
1196
|
url: this.config.url({
|
|
@@ -1349,17 +1206,20 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1349
1206
|
abortSignal: options.abortSignal,
|
|
1350
1207
|
fetch: this.config.fetch
|
|
1351
1208
|
});
|
|
1352
|
-
const { prompt: rawPrompt, ...rawSettings } = args;
|
|
1353
1209
|
let finishReason = "unknown";
|
|
1354
|
-
|
|
1355
|
-
|
|
1356
|
-
|
|
1210
|
+
const providerMetadata = { openai: {} };
|
|
1211
|
+
const usage = {
|
|
1212
|
+
inputTokens: void 0,
|
|
1213
|
+
outputTokens: void 0,
|
|
1214
|
+
totalTokens: void 0
|
|
1357
1215
|
};
|
|
1358
|
-
let logprobs;
|
|
1359
1216
|
let isFirstChunk = true;
|
|
1360
1217
|
return {
|
|
1361
1218
|
stream: response.pipeThrough(
|
|
1362
1219
|
new TransformStream({
|
|
1220
|
+
start(controller) {
|
|
1221
|
+
controller.enqueue({ type: "stream-start", warnings });
|
|
1222
|
+
},
|
|
1363
1223
|
transform(chunk, controller) {
|
|
1364
1224
|
if (!chunk.success) {
|
|
1365
1225
|
finishReason = "error";
|
|
@@ -1380,87 +1240,79 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1380
1240
|
});
|
|
1381
1241
|
}
|
|
1382
1242
|
if (value.usage != null) {
|
|
1383
|
-
usage =
|
|
1384
|
-
|
|
1385
|
-
|
|
1386
|
-
};
|
|
1243
|
+
usage.inputTokens = value.usage.prompt_tokens;
|
|
1244
|
+
usage.outputTokens = value.usage.completion_tokens;
|
|
1245
|
+
usage.totalTokens = value.usage.total_tokens;
|
|
1387
1246
|
}
|
|
1388
1247
|
const choice = value.choices[0];
|
|
1389
1248
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
1390
1249
|
finishReason = mapOpenAIFinishReason(choice.finish_reason);
|
|
1391
1250
|
}
|
|
1251
|
+
if ((choice == null ? void 0 : choice.logprobs) != null) {
|
|
1252
|
+
providerMetadata.openai.logprobs = choice.logprobs;
|
|
1253
|
+
}
|
|
1392
1254
|
if ((choice == null ? void 0 : choice.text) != null) {
|
|
1393
1255
|
controller.enqueue({
|
|
1394
|
-
type: "text
|
|
1395
|
-
|
|
1256
|
+
type: "text",
|
|
1257
|
+
text: choice.text
|
|
1396
1258
|
});
|
|
1397
1259
|
}
|
|
1398
|
-
const mappedLogprobs = mapOpenAICompletionLogProbs(
|
|
1399
|
-
choice == null ? void 0 : choice.logprobs
|
|
1400
|
-
);
|
|
1401
|
-
if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
|
|
1402
|
-
if (logprobs === void 0) logprobs = [];
|
|
1403
|
-
logprobs.push(...mappedLogprobs);
|
|
1404
|
-
}
|
|
1405
1260
|
},
|
|
1406
1261
|
flush(controller) {
|
|
1407
1262
|
controller.enqueue({
|
|
1408
1263
|
type: "finish",
|
|
1409
1264
|
finishReason,
|
|
1410
|
-
|
|
1265
|
+
providerMetadata,
|
|
1411
1266
|
usage
|
|
1412
1267
|
});
|
|
1413
1268
|
}
|
|
1414
1269
|
})
|
|
1415
1270
|
),
|
|
1416
|
-
|
|
1417
|
-
|
|
1418
|
-
warnings,
|
|
1419
|
-
request: { body: JSON.stringify(body) }
|
|
1271
|
+
request: { body },
|
|
1272
|
+
response: { headers: responseHeaders }
|
|
1420
1273
|
};
|
|
1421
1274
|
}
|
|
1422
1275
|
};
|
|
1423
|
-
var
|
|
1424
|
-
|
|
1425
|
-
|
|
1426
|
-
|
|
1427
|
-
|
|
1428
|
-
|
|
1429
|
-
|
|
1430
|
-
|
|
1431
|
-
|
|
1432
|
-
|
|
1433
|
-
|
|
1434
|
-
|
|
1276
|
+
var usageSchema = z5.object({
|
|
1277
|
+
prompt_tokens: z5.number(),
|
|
1278
|
+
completion_tokens: z5.number(),
|
|
1279
|
+
total_tokens: z5.number()
|
|
1280
|
+
});
|
|
1281
|
+
var openaiCompletionResponseSchema = z5.object({
|
|
1282
|
+
id: z5.string().nullish(),
|
|
1283
|
+
created: z5.number().nullish(),
|
|
1284
|
+
model: z5.string().nullish(),
|
|
1285
|
+
choices: z5.array(
|
|
1286
|
+
z5.object({
|
|
1287
|
+
text: z5.string(),
|
|
1288
|
+
finish_reason: z5.string(),
|
|
1289
|
+
logprobs: z5.object({
|
|
1290
|
+
tokens: z5.array(z5.string()),
|
|
1291
|
+
token_logprobs: z5.array(z5.number()),
|
|
1292
|
+
top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
|
|
1435
1293
|
}).nullish()
|
|
1436
1294
|
})
|
|
1437
1295
|
),
|
|
1438
|
-
usage:
|
|
1439
|
-
prompt_tokens: z3.number(),
|
|
1440
|
-
completion_tokens: z3.number()
|
|
1441
|
-
})
|
|
1296
|
+
usage: usageSchema.nullish()
|
|
1442
1297
|
});
|
|
1443
|
-
var openaiCompletionChunkSchema =
|
|
1444
|
-
|
|
1445
|
-
id:
|
|
1446
|
-
created:
|
|
1447
|
-
model:
|
|
1448
|
-
choices:
|
|
1449
|
-
|
|
1450
|
-
text:
|
|
1451
|
-
finish_reason:
|
|
1452
|
-
index:
|
|
1453
|
-
logprobs:
|
|
1454
|
-
tokens:
|
|
1455
|
-
token_logprobs:
|
|
1456
|
-
top_logprobs:
|
|
1298
|
+
var openaiCompletionChunkSchema = z5.union([
|
|
1299
|
+
z5.object({
|
|
1300
|
+
id: z5.string().nullish(),
|
|
1301
|
+
created: z5.number().nullish(),
|
|
1302
|
+
model: z5.string().nullish(),
|
|
1303
|
+
choices: z5.array(
|
|
1304
|
+
z5.object({
|
|
1305
|
+
text: z5.string(),
|
|
1306
|
+
finish_reason: z5.string().nullish(),
|
|
1307
|
+
index: z5.number(),
|
|
1308
|
+
logprobs: z5.object({
|
|
1309
|
+
tokens: z5.array(z5.string()),
|
|
1310
|
+
token_logprobs: z5.array(z5.number()),
|
|
1311
|
+
top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
|
|
1457
1312
|
}).nullish()
|
|
1458
1313
|
})
|
|
1459
1314
|
),
|
|
1460
|
-
usage:
|
|
1461
|
-
prompt_tokens: z3.number(),
|
|
1462
|
-
completion_tokens: z3.number()
|
|
1463
|
-
}).nullish()
|
|
1315
|
+
usage: usageSchema.nullish()
|
|
1464
1316
|
}),
|
|
1465
1317
|
openaiErrorDataSchema
|
|
1466
1318
|
]);
|
|
@@ -1472,32 +1324,45 @@ import {
|
|
|
1472
1324
|
import {
|
|
1473
1325
|
combineHeaders as combineHeaders3,
|
|
1474
1326
|
createJsonResponseHandler as createJsonResponseHandler3,
|
|
1327
|
+
parseProviderOptions as parseProviderOptions3,
|
|
1475
1328
|
postJsonToApi as postJsonToApi3
|
|
1476
1329
|
} from "@ai-sdk/provider-utils";
|
|
1477
|
-
import { z as
|
|
1330
|
+
import { z as z7 } from "zod";
|
|
1331
|
+
|
|
1332
|
+
// src/openai-embedding-options.ts
|
|
1333
|
+
import { z as z6 } from "zod";
|
|
1334
|
+
var openaiEmbeddingProviderOptions = z6.object({
|
|
1335
|
+
/**
|
|
1336
|
+
The number of dimensions the resulting output embeddings should have.
|
|
1337
|
+
Only supported in text-embedding-3 and later models.
|
|
1338
|
+
*/
|
|
1339
|
+
dimensions: z6.number().optional(),
|
|
1340
|
+
/**
|
|
1341
|
+
A unique identifier representing your end-user, which can help OpenAI to
|
|
1342
|
+
monitor and detect abuse. Learn more.
|
|
1343
|
+
*/
|
|
1344
|
+
user: z6.string().optional()
|
|
1345
|
+
});
|
|
1346
|
+
|
|
1347
|
+
// src/openai-embedding-model.ts
|
|
1478
1348
|
var OpenAIEmbeddingModel = class {
|
|
1479
|
-
constructor(modelId,
|
|
1480
|
-
this.specificationVersion = "
|
|
1349
|
+
constructor(modelId, config) {
|
|
1350
|
+
this.specificationVersion = "v2";
|
|
1351
|
+
this.maxEmbeddingsPerCall = 2048;
|
|
1352
|
+
this.supportsParallelCalls = true;
|
|
1481
1353
|
this.modelId = modelId;
|
|
1482
|
-
this.settings = settings;
|
|
1483
1354
|
this.config = config;
|
|
1484
1355
|
}
|
|
1485
1356
|
get provider() {
|
|
1486
1357
|
return this.config.provider;
|
|
1487
1358
|
}
|
|
1488
|
-
get maxEmbeddingsPerCall() {
|
|
1489
|
-
var _a;
|
|
1490
|
-
return (_a = this.settings.maxEmbeddingsPerCall) != null ? _a : 2048;
|
|
1491
|
-
}
|
|
1492
|
-
get supportsParallelCalls() {
|
|
1493
|
-
var _a;
|
|
1494
|
-
return (_a = this.settings.supportsParallelCalls) != null ? _a : true;
|
|
1495
|
-
}
|
|
1496
1359
|
async doEmbed({
|
|
1497
1360
|
values,
|
|
1498
1361
|
headers,
|
|
1499
|
-
abortSignal
|
|
1362
|
+
abortSignal,
|
|
1363
|
+
providerOptions
|
|
1500
1364
|
}) {
|
|
1365
|
+
var _a;
|
|
1501
1366
|
if (values.length > this.maxEmbeddingsPerCall) {
|
|
1502
1367
|
throw new TooManyEmbeddingValuesForCallError({
|
|
1503
1368
|
provider: this.provider,
|
|
@@ -1506,7 +1371,16 @@ var OpenAIEmbeddingModel = class {
|
|
|
1506
1371
|
values
|
|
1507
1372
|
});
|
|
1508
1373
|
}
|
|
1509
|
-
const
|
|
1374
|
+
const openaiOptions = (_a = await parseProviderOptions3({
|
|
1375
|
+
provider: "openai",
|
|
1376
|
+
providerOptions,
|
|
1377
|
+
schema: openaiEmbeddingProviderOptions
|
|
1378
|
+
})) != null ? _a : {};
|
|
1379
|
+
const {
|
|
1380
|
+
responseHeaders,
|
|
1381
|
+
value: response,
|
|
1382
|
+
rawValue
|
|
1383
|
+
} = await postJsonToApi3({
|
|
1510
1384
|
url: this.config.url({
|
|
1511
1385
|
path: "/embeddings",
|
|
1512
1386
|
modelId: this.modelId
|
|
@@ -1516,8 +1390,8 @@ var OpenAIEmbeddingModel = class {
|
|
|
1516
1390
|
model: this.modelId,
|
|
1517
1391
|
input: values,
|
|
1518
1392
|
encoding_format: "float",
|
|
1519
|
-
dimensions:
|
|
1520
|
-
user:
|
|
1393
|
+
dimensions: openaiOptions.dimensions,
|
|
1394
|
+
user: openaiOptions.user
|
|
1521
1395
|
},
|
|
1522
1396
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1523
1397
|
successfulResponseHandler: createJsonResponseHandler3(
|
|
@@ -1529,13 +1403,13 @@ var OpenAIEmbeddingModel = class {
|
|
|
1529
1403
|
return {
|
|
1530
1404
|
embeddings: response.data.map((item) => item.embedding),
|
|
1531
1405
|
usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
|
|
1532
|
-
|
|
1406
|
+
response: { headers: responseHeaders, body: rawValue }
|
|
1533
1407
|
};
|
|
1534
1408
|
}
|
|
1535
1409
|
};
|
|
1536
|
-
var openaiTextEmbeddingResponseSchema =
|
|
1537
|
-
data:
|
|
1538
|
-
usage:
|
|
1410
|
+
var openaiTextEmbeddingResponseSchema = z7.object({
|
|
1411
|
+
data: z7.array(z7.object({ embedding: z7.array(z7.number()) })),
|
|
1412
|
+
usage: z7.object({ prompt_tokens: z7.number() }).nullish()
|
|
1539
1413
|
});
|
|
1540
1414
|
|
|
1541
1415
|
// src/openai-image-model.ts
|
|
@@ -1544,7 +1418,7 @@ import {
|
|
|
1544
1418
|
createJsonResponseHandler as createJsonResponseHandler4,
|
|
1545
1419
|
postJsonToApi as postJsonToApi4
|
|
1546
1420
|
} from "@ai-sdk/provider-utils";
|
|
1547
|
-
import { z as
|
|
1421
|
+
import { z as z8 } from "zod";
|
|
1548
1422
|
|
|
1549
1423
|
// src/openai-image-settings.ts
|
|
1550
1424
|
var modelMaxImagesPerCall = {
|
|
@@ -1556,15 +1430,14 @@ var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
|
|
|
1556
1430
|
|
|
1557
1431
|
// src/openai-image-model.ts
|
|
1558
1432
|
var OpenAIImageModel = class {
|
|
1559
|
-
constructor(modelId,
|
|
1433
|
+
constructor(modelId, config) {
|
|
1560
1434
|
this.modelId = modelId;
|
|
1561
|
-
this.settings = settings;
|
|
1562
1435
|
this.config = config;
|
|
1563
|
-
this.specificationVersion = "
|
|
1436
|
+
this.specificationVersion = "v2";
|
|
1564
1437
|
}
|
|
1565
1438
|
get maxImagesPerCall() {
|
|
1566
|
-
var _a
|
|
1567
|
-
return (
|
|
1439
|
+
var _a;
|
|
1440
|
+
return (_a = modelMaxImagesPerCall[this.modelId]) != null ? _a : 1;
|
|
1568
1441
|
}
|
|
1569
1442
|
get provider() {
|
|
1570
1443
|
return this.config.provider;
|
|
@@ -1620,12 +1493,23 @@ var OpenAIImageModel = class {
|
|
|
1620
1493
|
timestamp: currentDate,
|
|
1621
1494
|
modelId: this.modelId,
|
|
1622
1495
|
headers: responseHeaders
|
|
1496
|
+
},
|
|
1497
|
+
providerMetadata: {
|
|
1498
|
+
openai: {
|
|
1499
|
+
images: response.data.map(
|
|
1500
|
+
(item) => item.revised_prompt ? {
|
|
1501
|
+
revisedPrompt: item.revised_prompt
|
|
1502
|
+
} : null
|
|
1503
|
+
)
|
|
1504
|
+
}
|
|
1623
1505
|
}
|
|
1624
1506
|
};
|
|
1625
1507
|
}
|
|
1626
1508
|
};
|
|
1627
|
-
var openaiImageResponseSchema =
|
|
1628
|
-
data:
|
|
1509
|
+
var openaiImageResponseSchema = z8.object({
|
|
1510
|
+
data: z8.array(
|
|
1511
|
+
z8.object({ b64_json: z8.string(), revised_prompt: z8.string().optional() })
|
|
1512
|
+
)
|
|
1629
1513
|
});
|
|
1630
1514
|
|
|
1631
1515
|
// src/openai-transcription-model.ts
|
|
@@ -1633,17 +1517,39 @@ import {
|
|
|
1633
1517
|
combineHeaders as combineHeaders5,
|
|
1634
1518
|
convertBase64ToUint8Array,
|
|
1635
1519
|
createJsonResponseHandler as createJsonResponseHandler5,
|
|
1636
|
-
parseProviderOptions,
|
|
1520
|
+
parseProviderOptions as parseProviderOptions4,
|
|
1637
1521
|
postFormDataToApi
|
|
1638
1522
|
} from "@ai-sdk/provider-utils";
|
|
1639
|
-
import { z as
|
|
1640
|
-
|
|
1641
|
-
|
|
1642
|
-
|
|
1643
|
-
|
|
1644
|
-
|
|
1645
|
-
|
|
1523
|
+
import { z as z10 } from "zod";
|
|
1524
|
+
|
|
1525
|
+
// src/openai-transcription-options.ts
|
|
1526
|
+
import { z as z9 } from "zod";
|
|
1527
|
+
var openAITranscriptionProviderOptions = z9.object({
|
|
1528
|
+
/**
|
|
1529
|
+
* Additional information to include in the transcription response.
|
|
1530
|
+
*/
|
|
1531
|
+
include: z9.array(z9.string()).optional(),
|
|
1532
|
+
/**
|
|
1533
|
+
* The language of the input audio in ISO-639-1 format.
|
|
1534
|
+
*/
|
|
1535
|
+
language: z9.string().optional(),
|
|
1536
|
+
/**
|
|
1537
|
+
* An optional text to guide the model's style or continue a previous audio segment.
|
|
1538
|
+
*/
|
|
1539
|
+
prompt: z9.string().optional(),
|
|
1540
|
+
/**
|
|
1541
|
+
* The sampling temperature, between 0 and 1.
|
|
1542
|
+
* @default 0
|
|
1543
|
+
*/
|
|
1544
|
+
temperature: z9.number().min(0).max(1).default(0).optional(),
|
|
1545
|
+
/**
|
|
1546
|
+
* The timestamp granularities to populate for this transcription.
|
|
1547
|
+
* @default ['segment']
|
|
1548
|
+
*/
|
|
1549
|
+
timestampGranularities: z9.array(z9.enum(["word", "segment"])).default(["segment"]).optional()
|
|
1646
1550
|
});
|
|
1551
|
+
|
|
1552
|
+
// src/openai-transcription-model.ts
|
|
1647
1553
|
var languageMap = {
|
|
1648
1554
|
afrikaans: "af",
|
|
1649
1555
|
arabic: "ar",
|
|
@@ -1712,17 +1618,16 @@ var OpenAITranscriptionModel = class {
|
|
|
1712
1618
|
get provider() {
|
|
1713
1619
|
return this.config.provider;
|
|
1714
1620
|
}
|
|
1715
|
-
getArgs({
|
|
1621
|
+
async getArgs({
|
|
1716
1622
|
audio,
|
|
1717
1623
|
mediaType,
|
|
1718
1624
|
providerOptions
|
|
1719
1625
|
}) {
|
|
1720
|
-
var _a, _b, _c, _d, _e;
|
|
1721
1626
|
const warnings = [];
|
|
1722
|
-
const openAIOptions =
|
|
1627
|
+
const openAIOptions = await parseProviderOptions4({
|
|
1723
1628
|
provider: "openai",
|
|
1724
1629
|
providerOptions,
|
|
1725
|
-
schema:
|
|
1630
|
+
schema: openAITranscriptionProviderOptions
|
|
1726
1631
|
});
|
|
1727
1632
|
const formData = new FormData();
|
|
1728
1633
|
const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([convertBase64ToUint8Array(audio)]);
|
|
@@ -1730,15 +1635,14 @@ var OpenAITranscriptionModel = class {
|
|
|
1730
1635
|
formData.append("file", new File([blob], "audio", { type: mediaType }));
|
|
1731
1636
|
if (openAIOptions) {
|
|
1732
1637
|
const transcriptionModelOptions = {
|
|
1733
|
-
include:
|
|
1734
|
-
language:
|
|
1735
|
-
prompt:
|
|
1736
|
-
temperature:
|
|
1737
|
-
timestamp_granularities:
|
|
1638
|
+
include: openAIOptions.include,
|
|
1639
|
+
language: openAIOptions.language,
|
|
1640
|
+
prompt: openAIOptions.prompt,
|
|
1641
|
+
temperature: openAIOptions.temperature,
|
|
1642
|
+
timestamp_granularities: openAIOptions.timestampGranularities
|
|
1738
1643
|
};
|
|
1739
|
-
for (const key
|
|
1740
|
-
|
|
1741
|
-
if (value !== void 0) {
|
|
1644
|
+
for (const [key, value] of Object.entries(transcriptionModelOptions)) {
|
|
1645
|
+
if (value != null) {
|
|
1742
1646
|
formData.append(key, String(value));
|
|
1743
1647
|
}
|
|
1744
1648
|
}
|
|
@@ -1751,7 +1655,7 @@ var OpenAITranscriptionModel = class {
|
|
|
1751
1655
|
async doGenerate(options) {
|
|
1752
1656
|
var _a, _b, _c, _d, _e, _f;
|
|
1753
1657
|
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
1754
|
-
const { formData, warnings } = this.getArgs(options);
|
|
1658
|
+
const { formData, warnings } = await this.getArgs(options);
|
|
1755
1659
|
const {
|
|
1756
1660
|
value: response,
|
|
1757
1661
|
responseHeaders,
|
|
@@ -1790,15 +1694,15 @@ var OpenAITranscriptionModel = class {
|
|
|
1790
1694
|
};
|
|
1791
1695
|
}
|
|
1792
1696
|
};
|
|
1793
|
-
var openaiTranscriptionResponseSchema =
|
|
1794
|
-
text:
|
|
1795
|
-
language:
|
|
1796
|
-
duration:
|
|
1797
|
-
words:
|
|
1798
|
-
|
|
1799
|
-
word:
|
|
1800
|
-
start:
|
|
1801
|
-
end:
|
|
1697
|
+
var openaiTranscriptionResponseSchema = z10.object({
|
|
1698
|
+
text: z10.string(),
|
|
1699
|
+
language: z10.string().nullish(),
|
|
1700
|
+
duration: z10.number().nullish(),
|
|
1701
|
+
words: z10.array(
|
|
1702
|
+
z10.object({
|
|
1703
|
+
word: z10.string(),
|
|
1704
|
+
start: z10.number(),
|
|
1705
|
+
end: z10.number()
|
|
1802
1706
|
})
|
|
1803
1707
|
).nullish()
|
|
1804
1708
|
});
|
|
@@ -1807,13 +1711,13 @@ var openaiTranscriptionResponseSchema = z6.object({
|
|
|
1807
1711
|
import {
|
|
1808
1712
|
combineHeaders as combineHeaders6,
|
|
1809
1713
|
createBinaryResponseHandler,
|
|
1810
|
-
parseProviderOptions as
|
|
1714
|
+
parseProviderOptions as parseProviderOptions5,
|
|
1811
1715
|
postJsonToApi as postJsonToApi5
|
|
1812
1716
|
} from "@ai-sdk/provider-utils";
|
|
1813
|
-
import { z as
|
|
1814
|
-
var OpenAIProviderOptionsSchema =
|
|
1815
|
-
instructions:
|
|
1816
|
-
speed:
|
|
1717
|
+
import { z as z11 } from "zod";
|
|
1718
|
+
var OpenAIProviderOptionsSchema = z11.object({
|
|
1719
|
+
instructions: z11.string().nullish(),
|
|
1720
|
+
speed: z11.number().min(0.25).max(4).default(1).nullish()
|
|
1817
1721
|
});
|
|
1818
1722
|
var OpenAISpeechModel = class {
|
|
1819
1723
|
constructor(modelId, config) {
|
|
@@ -1824,7 +1728,7 @@ var OpenAISpeechModel = class {
|
|
|
1824
1728
|
get provider() {
|
|
1825
1729
|
return this.config.provider;
|
|
1826
1730
|
}
|
|
1827
|
-
getArgs({
|
|
1731
|
+
async getArgs({
|
|
1828
1732
|
text,
|
|
1829
1733
|
voice = "alloy",
|
|
1830
1734
|
outputFormat = "mp3",
|
|
@@ -1833,7 +1737,7 @@ var OpenAISpeechModel = class {
|
|
|
1833
1737
|
providerOptions
|
|
1834
1738
|
}) {
|
|
1835
1739
|
const warnings = [];
|
|
1836
|
-
const openAIOptions =
|
|
1740
|
+
const openAIOptions = await parseProviderOptions5({
|
|
1837
1741
|
provider: "openai",
|
|
1838
1742
|
providerOptions,
|
|
1839
1743
|
schema: OpenAIProviderOptionsSchema
|
|
@@ -1874,7 +1778,7 @@ var OpenAISpeechModel = class {
|
|
|
1874
1778
|
async doGenerate(options) {
|
|
1875
1779
|
var _a, _b, _c;
|
|
1876
1780
|
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
1877
|
-
const { requestBody, warnings } = this.getArgs(options);
|
|
1781
|
+
const { requestBody, warnings } = await this.getArgs(options);
|
|
1878
1782
|
const {
|
|
1879
1783
|
value: audio,
|
|
1880
1784
|
responseHeaders,
|
|
@@ -1913,16 +1817,15 @@ import {
|
|
|
1913
1817
|
createEventSourceResponseHandler as createEventSourceResponseHandler3,
|
|
1914
1818
|
createJsonResponseHandler as createJsonResponseHandler6,
|
|
1915
1819
|
generateId as generateId2,
|
|
1916
|
-
parseProviderOptions as
|
|
1820
|
+
parseProviderOptions as parseProviderOptions6,
|
|
1917
1821
|
postJsonToApi as postJsonToApi6
|
|
1918
1822
|
} from "@ai-sdk/provider-utils";
|
|
1919
|
-
import { z as
|
|
1823
|
+
import { z as z12 } from "zod";
|
|
1920
1824
|
|
|
1921
1825
|
// src/responses/convert-to-openai-responses-messages.ts
|
|
1922
1826
|
import {
|
|
1923
|
-
UnsupportedFunctionalityError as
|
|
1827
|
+
UnsupportedFunctionalityError as UnsupportedFunctionalityError4
|
|
1924
1828
|
} from "@ai-sdk/provider";
|
|
1925
|
-
import { convertUint8ArrayToBase64 as convertUint8ArrayToBase642 } from "@ai-sdk/provider-utils";
|
|
1926
1829
|
function convertToOpenAIResponsesMessages({
|
|
1927
1830
|
prompt,
|
|
1928
1831
|
systemMessageMode
|
|
@@ -1961,38 +1864,35 @@ function convertToOpenAIResponsesMessages({
|
|
|
1961
1864
|
messages.push({
|
|
1962
1865
|
role: "user",
|
|
1963
1866
|
content: content.map((part, index) => {
|
|
1964
|
-
var _a, _b, _c
|
|
1867
|
+
var _a, _b, _c;
|
|
1965
1868
|
switch (part.type) {
|
|
1966
1869
|
case "text": {
|
|
1967
1870
|
return { type: "input_text", text: part.text };
|
|
1968
1871
|
}
|
|
1969
|
-
case "image": {
|
|
1970
|
-
return {
|
|
1971
|
-
type: "input_image",
|
|
1972
|
-
image_url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase642(part.image)}`,
|
|
1973
|
-
// OpenAI specific extension: image detail
|
|
1974
|
-
detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
|
|
1975
|
-
};
|
|
1976
|
-
}
|
|
1977
1872
|
case "file": {
|
|
1978
|
-
if (part.
|
|
1979
|
-
|
|
1980
|
-
|
|
1981
|
-
|
|
1982
|
-
|
|
1983
|
-
|
|
1984
|
-
|
|
1985
|
-
|
|
1986
|
-
|
|
1987
|
-
|
|
1988
|
-
|
|
1989
|
-
|
|
1990
|
-
}
|
|
1991
|
-
default: {
|
|
1992
|
-
throw new UnsupportedFunctionalityError6({
|
|
1993
|
-
functionality: "Only PDF files are supported in user messages"
|
|
1873
|
+
if (part.mediaType.startsWith("image/")) {
|
|
1874
|
+
const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
|
|
1875
|
+
return {
|
|
1876
|
+
type: "input_image",
|
|
1877
|
+
image_url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
|
|
1878
|
+
// OpenAI specific extension: image detail
|
|
1879
|
+
detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
|
|
1880
|
+
};
|
|
1881
|
+
} else if (part.mediaType === "application/pdf") {
|
|
1882
|
+
if (part.data instanceof URL) {
|
|
1883
|
+
throw new UnsupportedFunctionalityError4({
|
|
1884
|
+
functionality: "PDF file parts with URLs"
|
|
1994
1885
|
});
|
|
1995
1886
|
}
|
|
1887
|
+
return {
|
|
1888
|
+
type: "input_file",
|
|
1889
|
+
filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
|
|
1890
|
+
file_data: `data:application/pdf;base64,${part.data}`
|
|
1891
|
+
};
|
|
1892
|
+
} else {
|
|
1893
|
+
throw new UnsupportedFunctionalityError4({
|
|
1894
|
+
functionality: `file part media type ${part.mediaType}`
|
|
1895
|
+
});
|
|
1996
1896
|
}
|
|
1997
1897
|
}
|
|
1998
1898
|
}
|
|
@@ -2062,19 +1962,18 @@ function mapOpenAIResponseFinishReason({
|
|
|
2062
1962
|
|
|
2063
1963
|
// src/responses/openai-responses-prepare-tools.ts
|
|
2064
1964
|
import {
|
|
2065
|
-
UnsupportedFunctionalityError as
|
|
1965
|
+
UnsupportedFunctionalityError as UnsupportedFunctionalityError5
|
|
2066
1966
|
} from "@ai-sdk/provider";
|
|
2067
1967
|
function prepareResponsesTools({
|
|
2068
|
-
|
|
1968
|
+
tools,
|
|
1969
|
+
toolChoice,
|
|
2069
1970
|
strict
|
|
2070
1971
|
}) {
|
|
2071
|
-
|
|
2072
|
-
const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
|
|
1972
|
+
tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
|
|
2073
1973
|
const toolWarnings = [];
|
|
2074
1974
|
if (tools == null) {
|
|
2075
|
-
return { tools: void 0,
|
|
1975
|
+
return { tools: void 0, toolChoice: void 0, toolWarnings };
|
|
2076
1976
|
}
|
|
2077
|
-
const toolChoice = mode.toolChoice;
|
|
2078
1977
|
const openaiTools = [];
|
|
2079
1978
|
for (const tool of tools) {
|
|
2080
1979
|
switch (tool.type) {
|
|
@@ -2107,37 +2006,24 @@ function prepareResponsesTools({
|
|
|
2107
2006
|
}
|
|
2108
2007
|
}
|
|
2109
2008
|
if (toolChoice == null) {
|
|
2110
|
-
return { tools: openaiTools,
|
|
2009
|
+
return { tools: openaiTools, toolChoice: void 0, toolWarnings };
|
|
2111
2010
|
}
|
|
2112
2011
|
const type = toolChoice.type;
|
|
2113
2012
|
switch (type) {
|
|
2114
2013
|
case "auto":
|
|
2115
2014
|
case "none":
|
|
2116
2015
|
case "required":
|
|
2117
|
-
return { tools: openaiTools,
|
|
2118
|
-
case "tool":
|
|
2119
|
-
if (toolChoice.toolName === "web_search_preview") {
|
|
2120
|
-
return {
|
|
2121
|
-
tools: openaiTools,
|
|
2122
|
-
tool_choice: {
|
|
2123
|
-
type: "web_search_preview"
|
|
2124
|
-
},
|
|
2125
|
-
toolWarnings
|
|
2126
|
-
};
|
|
2127
|
-
}
|
|
2016
|
+
return { tools: openaiTools, toolChoice: type, toolWarnings };
|
|
2017
|
+
case "tool":
|
|
2128
2018
|
return {
|
|
2129
2019
|
tools: openaiTools,
|
|
2130
|
-
|
|
2131
|
-
type: "function",
|
|
2132
|
-
name: toolChoice.toolName
|
|
2133
|
-
},
|
|
2020
|
+
toolChoice: toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
|
|
2134
2021
|
toolWarnings
|
|
2135
2022
|
};
|
|
2136
|
-
}
|
|
2137
2023
|
default: {
|
|
2138
2024
|
const _exhaustiveCheck = type;
|
|
2139
|
-
throw new
|
|
2140
|
-
functionality: `
|
|
2025
|
+
throw new UnsupportedFunctionalityError5({
|
|
2026
|
+
functionality: `tool choice type: ${_exhaustiveCheck}`
|
|
2141
2027
|
});
|
|
2142
2028
|
}
|
|
2143
2029
|
}
|
|
@@ -2146,18 +2032,18 @@ function prepareResponsesTools({
|
|
|
2146
2032
|
// src/responses/openai-responses-language-model.ts
|
|
2147
2033
|
var OpenAIResponsesLanguageModel = class {
|
|
2148
2034
|
constructor(modelId, config) {
|
|
2149
|
-
this.specificationVersion = "
|
|
2150
|
-
this.
|
|
2151
|
-
|
|
2035
|
+
this.specificationVersion = "v2";
|
|
2036
|
+
this.supportedUrls = {
|
|
2037
|
+
"image/*": [/^https?:\/\/.*$/]
|
|
2038
|
+
};
|
|
2152
2039
|
this.modelId = modelId;
|
|
2153
2040
|
this.config = config;
|
|
2154
2041
|
}
|
|
2155
2042
|
get provider() {
|
|
2156
2043
|
return this.config.provider;
|
|
2157
2044
|
}
|
|
2158
|
-
getArgs({
|
|
2159
|
-
|
|
2160
|
-
maxTokens,
|
|
2045
|
+
async getArgs({
|
|
2046
|
+
maxOutputTokens,
|
|
2161
2047
|
temperature,
|
|
2162
2048
|
stopSequences,
|
|
2163
2049
|
topP,
|
|
@@ -2166,24 +2052,19 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2166
2052
|
frequencyPenalty,
|
|
2167
2053
|
seed,
|
|
2168
2054
|
prompt,
|
|
2169
|
-
|
|
2055
|
+
providerOptions,
|
|
2056
|
+
tools,
|
|
2057
|
+
toolChoice,
|
|
2170
2058
|
responseFormat
|
|
2171
2059
|
}) {
|
|
2172
|
-
var _a, _b
|
|
2060
|
+
var _a, _b;
|
|
2173
2061
|
const warnings = [];
|
|
2174
2062
|
const modelConfig = getResponsesModelConfig(this.modelId);
|
|
2175
|
-
const type = mode.type;
|
|
2176
2063
|
if (topK != null) {
|
|
2177
|
-
warnings.push({
|
|
2178
|
-
type: "unsupported-setting",
|
|
2179
|
-
setting: "topK"
|
|
2180
|
-
});
|
|
2064
|
+
warnings.push({ type: "unsupported-setting", setting: "topK" });
|
|
2181
2065
|
}
|
|
2182
2066
|
if (seed != null) {
|
|
2183
|
-
warnings.push({
|
|
2184
|
-
type: "unsupported-setting",
|
|
2185
|
-
setting: "seed"
|
|
2186
|
-
});
|
|
2067
|
+
warnings.push({ type: "unsupported-setting", setting: "seed" });
|
|
2187
2068
|
}
|
|
2188
2069
|
if (presencePenalty != null) {
|
|
2189
2070
|
warnings.push({
|
|
@@ -2198,19 +2079,16 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2198
2079
|
});
|
|
2199
2080
|
}
|
|
2200
2081
|
if (stopSequences != null) {
|
|
2201
|
-
warnings.push({
|
|
2202
|
-
type: "unsupported-setting",
|
|
2203
|
-
setting: "stopSequences"
|
|
2204
|
-
});
|
|
2082
|
+
warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
|
|
2205
2083
|
}
|
|
2206
2084
|
const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
|
|
2207
2085
|
prompt,
|
|
2208
2086
|
systemMessageMode: modelConfig.systemMessageMode
|
|
2209
2087
|
});
|
|
2210
2088
|
warnings.push(...messageWarnings);
|
|
2211
|
-
const openaiOptions =
|
|
2089
|
+
const openaiOptions = await parseProviderOptions6({
|
|
2212
2090
|
provider: "openai",
|
|
2213
|
-
providerOptions
|
|
2091
|
+
providerOptions,
|
|
2214
2092
|
schema: openaiResponsesProviderOptionsSchema
|
|
2215
2093
|
});
|
|
2216
2094
|
const isStrict = (_a = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a : true;
|
|
@@ -2219,7 +2097,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2219
2097
|
input: messages,
|
|
2220
2098
|
temperature,
|
|
2221
2099
|
top_p: topP,
|
|
2222
|
-
max_output_tokens:
|
|
2100
|
+
max_output_tokens: maxOutputTokens,
|
|
2223
2101
|
...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
|
|
2224
2102
|
text: {
|
|
2225
2103
|
format: responseFormat.schema != null ? {
|
|
@@ -2271,66 +2149,27 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2271
2149
|
});
|
|
2272
2150
|
}
|
|
2273
2151
|
}
|
|
2274
|
-
|
|
2275
|
-
|
|
2276
|
-
|
|
2277
|
-
|
|
2278
|
-
|
|
2279
|
-
|
|
2280
|
-
|
|
2281
|
-
|
|
2282
|
-
|
|
2283
|
-
|
|
2284
|
-
|
|
2285
|
-
|
|
2286
|
-
|
|
2287
|
-
|
|
2288
|
-
|
|
2289
|
-
|
|
2290
|
-
|
|
2291
|
-
return {
|
|
2292
|
-
args: {
|
|
2293
|
-
...baseArgs,
|
|
2294
|
-
text: {
|
|
2295
|
-
format: mode.schema != null ? {
|
|
2296
|
-
type: "json_schema",
|
|
2297
|
-
strict: isStrict,
|
|
2298
|
-
name: (_c = mode.name) != null ? _c : "response",
|
|
2299
|
-
description: mode.description,
|
|
2300
|
-
schema: mode.schema
|
|
2301
|
-
} : { type: "json_object" }
|
|
2302
|
-
}
|
|
2303
|
-
},
|
|
2304
|
-
warnings
|
|
2305
|
-
};
|
|
2306
|
-
}
|
|
2307
|
-
case "object-tool": {
|
|
2308
|
-
return {
|
|
2309
|
-
args: {
|
|
2310
|
-
...baseArgs,
|
|
2311
|
-
tool_choice: { type: "function", name: mode.tool.name },
|
|
2312
|
-
tools: [
|
|
2313
|
-
{
|
|
2314
|
-
type: "function",
|
|
2315
|
-
name: mode.tool.name,
|
|
2316
|
-
description: mode.tool.description,
|
|
2317
|
-
parameters: mode.tool.parameters,
|
|
2318
|
-
strict: isStrict
|
|
2319
|
-
}
|
|
2320
|
-
]
|
|
2321
|
-
},
|
|
2322
|
-
warnings
|
|
2323
|
-
};
|
|
2324
|
-
}
|
|
2325
|
-
default: {
|
|
2326
|
-
const _exhaustiveCheck = type;
|
|
2327
|
-
throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
|
|
2328
|
-
}
|
|
2329
|
-
}
|
|
2152
|
+
const {
|
|
2153
|
+
tools: openaiTools,
|
|
2154
|
+
toolChoice: openaiToolChoice,
|
|
2155
|
+
toolWarnings
|
|
2156
|
+
} = prepareResponsesTools({
|
|
2157
|
+
tools,
|
|
2158
|
+
toolChoice,
|
|
2159
|
+
strict: isStrict
|
|
2160
|
+
});
|
|
2161
|
+
return {
|
|
2162
|
+
args: {
|
|
2163
|
+
...baseArgs,
|
|
2164
|
+
tools: openaiTools,
|
|
2165
|
+
tool_choice: openaiToolChoice
|
|
2166
|
+
},
|
|
2167
|
+
warnings: [...warnings, ...toolWarnings]
|
|
2168
|
+
};
|
|
2330
2169
|
}
|
|
2331
2170
|
async doGenerate(options) {
|
|
2332
|
-
var _a, _b, _c, _d, _e, _f, _g;
|
|
2333
|
-
const { args: body, warnings } = this.getArgs(options);
|
|
2171
|
+
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
2172
|
+
const { args: body, warnings } = await this.getArgs(options);
|
|
2334
2173
|
const {
|
|
2335
2174
|
responseHeaders,
|
|
2336
2175
|
value: response,
|
|
@@ -2344,123 +2183,132 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2344
2183
|
body,
|
|
2345
2184
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
2346
2185
|
successfulResponseHandler: createJsonResponseHandler6(
|
|
2347
|
-
|
|
2348
|
-
id:
|
|
2349
|
-
created_at:
|
|
2350
|
-
model:
|
|
2351
|
-
output:
|
|
2352
|
-
|
|
2353
|
-
|
|
2354
|
-
type:
|
|
2355
|
-
role:
|
|
2356
|
-
content:
|
|
2357
|
-
|
|
2358
|
-
type:
|
|
2359
|
-
text:
|
|
2360
|
-
annotations:
|
|
2361
|
-
|
|
2362
|
-
type:
|
|
2363
|
-
start_index:
|
|
2364
|
-
end_index:
|
|
2365
|
-
url:
|
|
2366
|
-
title:
|
|
2186
|
+
z12.object({
|
|
2187
|
+
id: z12.string(),
|
|
2188
|
+
created_at: z12.number(),
|
|
2189
|
+
model: z12.string(),
|
|
2190
|
+
output: z12.array(
|
|
2191
|
+
z12.discriminatedUnion("type", [
|
|
2192
|
+
z12.object({
|
|
2193
|
+
type: z12.literal("message"),
|
|
2194
|
+
role: z12.literal("assistant"),
|
|
2195
|
+
content: z12.array(
|
|
2196
|
+
z12.object({
|
|
2197
|
+
type: z12.literal("output_text"),
|
|
2198
|
+
text: z12.string(),
|
|
2199
|
+
annotations: z12.array(
|
|
2200
|
+
z12.object({
|
|
2201
|
+
type: z12.literal("url_citation"),
|
|
2202
|
+
start_index: z12.number(),
|
|
2203
|
+
end_index: z12.number(),
|
|
2204
|
+
url: z12.string(),
|
|
2205
|
+
title: z12.string()
|
|
2367
2206
|
})
|
|
2368
2207
|
)
|
|
2369
2208
|
})
|
|
2370
2209
|
)
|
|
2371
2210
|
}),
|
|
2372
|
-
|
|
2373
|
-
type:
|
|
2374
|
-
call_id:
|
|
2375
|
-
name:
|
|
2376
|
-
arguments:
|
|
2211
|
+
z12.object({
|
|
2212
|
+
type: z12.literal("function_call"),
|
|
2213
|
+
call_id: z12.string(),
|
|
2214
|
+
name: z12.string(),
|
|
2215
|
+
arguments: z12.string()
|
|
2377
2216
|
}),
|
|
2378
|
-
|
|
2379
|
-
type:
|
|
2217
|
+
z12.object({
|
|
2218
|
+
type: z12.literal("web_search_call")
|
|
2380
2219
|
}),
|
|
2381
|
-
|
|
2382
|
-
type:
|
|
2220
|
+
z12.object({
|
|
2221
|
+
type: z12.literal("computer_call")
|
|
2383
2222
|
}),
|
|
2384
|
-
|
|
2385
|
-
type:
|
|
2386
|
-
summary:
|
|
2387
|
-
|
|
2388
|
-
type:
|
|
2389
|
-
text:
|
|
2223
|
+
z12.object({
|
|
2224
|
+
type: z12.literal("reasoning"),
|
|
2225
|
+
summary: z12.array(
|
|
2226
|
+
z12.object({
|
|
2227
|
+
type: z12.literal("summary_text"),
|
|
2228
|
+
text: z12.string()
|
|
2390
2229
|
})
|
|
2391
2230
|
)
|
|
2392
2231
|
})
|
|
2393
2232
|
])
|
|
2394
2233
|
),
|
|
2395
|
-
incomplete_details:
|
|
2396
|
-
usage:
|
|
2234
|
+
incomplete_details: z12.object({ reason: z12.string() }).nullable(),
|
|
2235
|
+
usage: usageSchema2
|
|
2397
2236
|
})
|
|
2398
2237
|
),
|
|
2399
2238
|
abortSignal: options.abortSignal,
|
|
2400
2239
|
fetch: this.config.fetch
|
|
2401
2240
|
});
|
|
2402
|
-
const
|
|
2403
|
-
const
|
|
2404
|
-
|
|
2405
|
-
|
|
2406
|
-
|
|
2407
|
-
|
|
2408
|
-
|
|
2409
|
-
|
|
2241
|
+
const content = [];
|
|
2242
|
+
for (const part of response.output) {
|
|
2243
|
+
switch (part.type) {
|
|
2244
|
+
case "reasoning": {
|
|
2245
|
+
content.push({
|
|
2246
|
+
type: "reasoning",
|
|
2247
|
+
text: part.summary.map((summary) => summary.text).join()
|
|
2248
|
+
});
|
|
2249
|
+
break;
|
|
2250
|
+
}
|
|
2251
|
+
case "message": {
|
|
2252
|
+
for (const contentPart of part.content) {
|
|
2253
|
+
content.push({
|
|
2254
|
+
type: "text",
|
|
2255
|
+
text: contentPart.text
|
|
2256
|
+
});
|
|
2257
|
+
for (const annotation of contentPart.annotations) {
|
|
2258
|
+
content.push({
|
|
2259
|
+
type: "source",
|
|
2260
|
+
sourceType: "url",
|
|
2261
|
+
id: (_c = (_b = (_a = this.config).generateId) == null ? void 0 : _b.call(_a)) != null ? _c : generateId2(),
|
|
2262
|
+
url: annotation.url,
|
|
2263
|
+
title: annotation.title
|
|
2264
|
+
});
|
|
2265
|
+
}
|
|
2266
|
+
}
|
|
2267
|
+
break;
|
|
2268
|
+
}
|
|
2269
|
+
case "function_call": {
|
|
2270
|
+
content.push({
|
|
2271
|
+
type: "tool-call",
|
|
2272
|
+
toolCallType: "function",
|
|
2273
|
+
toolCallId: part.call_id,
|
|
2274
|
+
toolName: part.name,
|
|
2275
|
+
args: part.arguments
|
|
2276
|
+
});
|
|
2277
|
+
break;
|
|
2278
|
+
}
|
|
2279
|
+
}
|
|
2280
|
+
}
|
|
2410
2281
|
return {
|
|
2411
|
-
|
|
2412
|
-
sources: outputTextElements.flatMap(
|
|
2413
|
-
(content) => content.annotations.map((annotation) => {
|
|
2414
|
-
var _a2, _b2, _c2;
|
|
2415
|
-
return {
|
|
2416
|
-
sourceType: "url",
|
|
2417
|
-
id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : generateId2(),
|
|
2418
|
-
url: annotation.url,
|
|
2419
|
-
title: annotation.title
|
|
2420
|
-
};
|
|
2421
|
-
})
|
|
2422
|
-
),
|
|
2282
|
+
content,
|
|
2423
2283
|
finishReason: mapOpenAIResponseFinishReason({
|
|
2424
|
-
finishReason: (
|
|
2425
|
-
hasToolCalls:
|
|
2284
|
+
finishReason: (_d = response.incomplete_details) == null ? void 0 : _d.reason,
|
|
2285
|
+
hasToolCalls: content.some((part) => part.type === "tool-call")
|
|
2426
2286
|
}),
|
|
2427
|
-
toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
|
|
2428
|
-
reasoning: reasoningSummary ? reasoningSummary.map((summary) => ({
|
|
2429
|
-
type: "text",
|
|
2430
|
-
text: summary.text
|
|
2431
|
-
})) : void 0,
|
|
2432
2287
|
usage: {
|
|
2433
|
-
|
|
2434
|
-
|
|
2435
|
-
|
|
2436
|
-
|
|
2437
|
-
|
|
2438
|
-
rawSettings: {}
|
|
2439
|
-
},
|
|
2440
|
-
rawResponse: {
|
|
2441
|
-
headers: responseHeaders,
|
|
2442
|
-
body: rawResponse
|
|
2443
|
-
},
|
|
2444
|
-
request: {
|
|
2445
|
-
body: JSON.stringify(body)
|
|
2288
|
+
inputTokens: response.usage.input_tokens,
|
|
2289
|
+
outputTokens: response.usage.output_tokens,
|
|
2290
|
+
totalTokens: response.usage.input_tokens + response.usage.output_tokens,
|
|
2291
|
+
reasoningTokens: (_f = (_e = response.usage.output_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : void 0,
|
|
2292
|
+
cachedInputTokens: (_h = (_g = response.usage.input_tokens_details) == null ? void 0 : _g.cached_tokens) != null ? _h : void 0
|
|
2446
2293
|
},
|
|
2294
|
+
request: { body },
|
|
2447
2295
|
response: {
|
|
2448
2296
|
id: response.id,
|
|
2449
2297
|
timestamp: new Date(response.created_at * 1e3),
|
|
2450
|
-
modelId: response.model
|
|
2298
|
+
modelId: response.model,
|
|
2299
|
+
headers: responseHeaders,
|
|
2300
|
+
body: rawResponse
|
|
2451
2301
|
},
|
|
2452
2302
|
providerMetadata: {
|
|
2453
2303
|
openai: {
|
|
2454
|
-
responseId: response.id
|
|
2455
|
-
cachedPromptTokens: (_e = (_d = response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : null,
|
|
2456
|
-
reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : null
|
|
2304
|
+
responseId: response.id
|
|
2457
2305
|
}
|
|
2458
2306
|
},
|
|
2459
2307
|
warnings
|
|
2460
2308
|
};
|
|
2461
2309
|
}
|
|
2462
2310
|
async doStream(options) {
|
|
2463
|
-
const { args: body, warnings } = this.getArgs(options);
|
|
2311
|
+
const { args: body, warnings } = await this.getArgs(options);
|
|
2464
2312
|
const { responseHeaders, value: response } = await postJsonToApi6({
|
|
2465
2313
|
url: this.config.url({
|
|
2466
2314
|
path: "/responses",
|
|
@@ -2480,16 +2328,20 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2480
2328
|
});
|
|
2481
2329
|
const self = this;
|
|
2482
2330
|
let finishReason = "unknown";
|
|
2483
|
-
|
|
2484
|
-
|
|
2485
|
-
|
|
2486
|
-
|
|
2331
|
+
const usage = {
|
|
2332
|
+
inputTokens: void 0,
|
|
2333
|
+
outputTokens: void 0,
|
|
2334
|
+
totalTokens: void 0
|
|
2335
|
+
};
|
|
2487
2336
|
let responseId = null;
|
|
2488
2337
|
const ongoingToolCalls = {};
|
|
2489
2338
|
let hasToolCalls = false;
|
|
2490
2339
|
return {
|
|
2491
2340
|
stream: response.pipeThrough(
|
|
2492
2341
|
new TransformStream({
|
|
2342
|
+
start(controller) {
|
|
2343
|
+
controller.enqueue({ type: "stream-start", warnings });
|
|
2344
|
+
},
|
|
2493
2345
|
transform(chunk, controller) {
|
|
2494
2346
|
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
2495
2347
|
if (!chunk.success) {
|
|
@@ -2533,13 +2385,13 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2533
2385
|
});
|
|
2534
2386
|
} else if (isTextDeltaChunk(value)) {
|
|
2535
2387
|
controller.enqueue({
|
|
2536
|
-
type: "text
|
|
2537
|
-
|
|
2388
|
+
type: "text",
|
|
2389
|
+
text: value.delta
|
|
2538
2390
|
});
|
|
2539
2391
|
} else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
|
|
2540
2392
|
controller.enqueue({
|
|
2541
2393
|
type: "reasoning",
|
|
2542
|
-
|
|
2394
|
+
text: value.delta
|
|
2543
2395
|
});
|
|
2544
2396
|
} else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
|
|
2545
2397
|
ongoingToolCalls[value.output_index] = void 0;
|
|
@@ -2556,19 +2408,18 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2556
2408
|
finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
|
|
2557
2409
|
hasToolCalls
|
|
2558
2410
|
});
|
|
2559
|
-
|
|
2560
|
-
|
|
2561
|
-
|
|
2562
|
-
reasoningTokens = (
|
|
2411
|
+
usage.inputTokens = value.response.usage.input_tokens;
|
|
2412
|
+
usage.outputTokens = value.response.usage.output_tokens;
|
|
2413
|
+
usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
|
|
2414
|
+
usage.reasoningTokens = (_c = (_b = value.response.usage.output_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : void 0;
|
|
2415
|
+
usage.cachedInputTokens = (_e = (_d = value.response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : void 0;
|
|
2563
2416
|
} else if (isResponseAnnotationAddedChunk(value)) {
|
|
2564
2417
|
controller.enqueue({
|
|
2565
2418
|
type: "source",
|
|
2566
|
-
|
|
2567
|
-
|
|
2568
|
-
|
|
2569
|
-
|
|
2570
|
-
title: value.annotation.title
|
|
2571
|
-
}
|
|
2419
|
+
sourceType: "url",
|
|
2420
|
+
id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : generateId2(),
|
|
2421
|
+
url: value.annotation.url,
|
|
2422
|
+
title: value.annotation.title
|
|
2572
2423
|
});
|
|
2573
2424
|
}
|
|
2574
2425
|
},
|
|
@@ -2576,110 +2427,101 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2576
2427
|
controller.enqueue({
|
|
2577
2428
|
type: "finish",
|
|
2578
2429
|
finishReason,
|
|
2579
|
-
usage
|
|
2580
|
-
|
|
2581
|
-
|
|
2582
|
-
|
|
2583
|
-
responseId,
|
|
2584
|
-
cachedPromptTokens,
|
|
2585
|
-
reasoningTokens
|
|
2586
|
-
}
|
|
2430
|
+
usage,
|
|
2431
|
+
providerMetadata: {
|
|
2432
|
+
openai: {
|
|
2433
|
+
responseId
|
|
2587
2434
|
}
|
|
2588
2435
|
}
|
|
2589
2436
|
});
|
|
2590
2437
|
}
|
|
2591
2438
|
})
|
|
2592
2439
|
),
|
|
2593
|
-
|
|
2594
|
-
|
|
2595
|
-
rawSettings: {}
|
|
2596
|
-
},
|
|
2597
|
-
rawResponse: { headers: responseHeaders },
|
|
2598
|
-
request: { body: JSON.stringify(body) },
|
|
2599
|
-
warnings
|
|
2440
|
+
request: { body },
|
|
2441
|
+
response: { headers: responseHeaders }
|
|
2600
2442
|
};
|
|
2601
2443
|
}
|
|
2602
2444
|
};
|
|
2603
|
-
var
|
|
2604
|
-
input_tokens:
|
|
2605
|
-
input_tokens_details:
|
|
2606
|
-
output_tokens:
|
|
2607
|
-
output_tokens_details:
|
|
2445
|
+
var usageSchema2 = z12.object({
|
|
2446
|
+
input_tokens: z12.number(),
|
|
2447
|
+
input_tokens_details: z12.object({ cached_tokens: z12.number().nullish() }).nullish(),
|
|
2448
|
+
output_tokens: z12.number(),
|
|
2449
|
+
output_tokens_details: z12.object({ reasoning_tokens: z12.number().nullish() }).nullish()
|
|
2608
2450
|
});
|
|
2609
|
-
var textDeltaChunkSchema =
|
|
2610
|
-
type:
|
|
2611
|
-
delta:
|
|
2451
|
+
var textDeltaChunkSchema = z12.object({
|
|
2452
|
+
type: z12.literal("response.output_text.delta"),
|
|
2453
|
+
delta: z12.string()
|
|
2612
2454
|
});
|
|
2613
|
-
var responseFinishedChunkSchema =
|
|
2614
|
-
type:
|
|
2615
|
-
response:
|
|
2616
|
-
incomplete_details:
|
|
2617
|
-
usage:
|
|
2455
|
+
var responseFinishedChunkSchema = z12.object({
|
|
2456
|
+
type: z12.enum(["response.completed", "response.incomplete"]),
|
|
2457
|
+
response: z12.object({
|
|
2458
|
+
incomplete_details: z12.object({ reason: z12.string() }).nullish(),
|
|
2459
|
+
usage: usageSchema2
|
|
2618
2460
|
})
|
|
2619
2461
|
});
|
|
2620
|
-
var responseCreatedChunkSchema =
|
|
2621
|
-
type:
|
|
2622
|
-
response:
|
|
2623
|
-
id:
|
|
2624
|
-
created_at:
|
|
2625
|
-
model:
|
|
2462
|
+
var responseCreatedChunkSchema = z12.object({
|
|
2463
|
+
type: z12.literal("response.created"),
|
|
2464
|
+
response: z12.object({
|
|
2465
|
+
id: z12.string(),
|
|
2466
|
+
created_at: z12.number(),
|
|
2467
|
+
model: z12.string()
|
|
2626
2468
|
})
|
|
2627
2469
|
});
|
|
2628
|
-
var responseOutputItemDoneSchema =
|
|
2629
|
-
type:
|
|
2630
|
-
output_index:
|
|
2631
|
-
item:
|
|
2632
|
-
|
|
2633
|
-
type:
|
|
2470
|
+
var responseOutputItemDoneSchema = z12.object({
|
|
2471
|
+
type: z12.literal("response.output_item.done"),
|
|
2472
|
+
output_index: z12.number(),
|
|
2473
|
+
item: z12.discriminatedUnion("type", [
|
|
2474
|
+
z12.object({
|
|
2475
|
+
type: z12.literal("message")
|
|
2634
2476
|
}),
|
|
2635
|
-
|
|
2636
|
-
type:
|
|
2637
|
-
id:
|
|
2638
|
-
call_id:
|
|
2639
|
-
name:
|
|
2640
|
-
arguments:
|
|
2641
|
-
status:
|
|
2477
|
+
z12.object({
|
|
2478
|
+
type: z12.literal("function_call"),
|
|
2479
|
+
id: z12.string(),
|
|
2480
|
+
call_id: z12.string(),
|
|
2481
|
+
name: z12.string(),
|
|
2482
|
+
arguments: z12.string(),
|
|
2483
|
+
status: z12.literal("completed")
|
|
2642
2484
|
})
|
|
2643
2485
|
])
|
|
2644
2486
|
});
|
|
2645
|
-
var responseFunctionCallArgumentsDeltaSchema =
|
|
2646
|
-
type:
|
|
2647
|
-
item_id:
|
|
2648
|
-
output_index:
|
|
2649
|
-
delta:
|
|
2487
|
+
var responseFunctionCallArgumentsDeltaSchema = z12.object({
|
|
2488
|
+
type: z12.literal("response.function_call_arguments.delta"),
|
|
2489
|
+
item_id: z12.string(),
|
|
2490
|
+
output_index: z12.number(),
|
|
2491
|
+
delta: z12.string()
|
|
2650
2492
|
});
|
|
2651
|
-
var responseOutputItemAddedSchema =
|
|
2652
|
-
type:
|
|
2653
|
-
output_index:
|
|
2654
|
-
item:
|
|
2655
|
-
|
|
2656
|
-
type:
|
|
2493
|
+
var responseOutputItemAddedSchema = z12.object({
|
|
2494
|
+
type: z12.literal("response.output_item.added"),
|
|
2495
|
+
output_index: z12.number(),
|
|
2496
|
+
item: z12.discriminatedUnion("type", [
|
|
2497
|
+
z12.object({
|
|
2498
|
+
type: z12.literal("message")
|
|
2657
2499
|
}),
|
|
2658
|
-
|
|
2659
|
-
type:
|
|
2660
|
-
id:
|
|
2661
|
-
call_id:
|
|
2662
|
-
name:
|
|
2663
|
-
arguments:
|
|
2500
|
+
z12.object({
|
|
2501
|
+
type: z12.literal("function_call"),
|
|
2502
|
+
id: z12.string(),
|
|
2503
|
+
call_id: z12.string(),
|
|
2504
|
+
name: z12.string(),
|
|
2505
|
+
arguments: z12.string()
|
|
2664
2506
|
})
|
|
2665
2507
|
])
|
|
2666
2508
|
});
|
|
2667
|
-
var responseAnnotationAddedSchema =
|
|
2668
|
-
type:
|
|
2669
|
-
annotation:
|
|
2670
|
-
type:
|
|
2671
|
-
url:
|
|
2672
|
-
title:
|
|
2509
|
+
var responseAnnotationAddedSchema = z12.object({
|
|
2510
|
+
type: z12.literal("response.output_text.annotation.added"),
|
|
2511
|
+
annotation: z12.object({
|
|
2512
|
+
type: z12.literal("url_citation"),
|
|
2513
|
+
url: z12.string(),
|
|
2514
|
+
title: z12.string()
|
|
2673
2515
|
})
|
|
2674
2516
|
});
|
|
2675
|
-
var responseReasoningSummaryTextDeltaSchema =
|
|
2676
|
-
type:
|
|
2677
|
-
item_id:
|
|
2678
|
-
output_index:
|
|
2679
|
-
summary_index:
|
|
2680
|
-
delta:
|
|
2517
|
+
var responseReasoningSummaryTextDeltaSchema = z12.object({
|
|
2518
|
+
type: z12.literal("response.reasoning_summary_text.delta"),
|
|
2519
|
+
item_id: z12.string(),
|
|
2520
|
+
output_index: z12.number(),
|
|
2521
|
+
summary_index: z12.number(),
|
|
2522
|
+
delta: z12.string()
|
|
2681
2523
|
});
|
|
2682
|
-
var openaiResponsesChunkSchema =
|
|
2524
|
+
var openaiResponsesChunkSchema = z12.union([
|
|
2683
2525
|
textDeltaChunkSchema,
|
|
2684
2526
|
responseFinishedChunkSchema,
|
|
2685
2527
|
responseCreatedChunkSchema,
|
|
@@ -2688,7 +2530,7 @@ var openaiResponsesChunkSchema = z8.union([
|
|
|
2688
2530
|
responseOutputItemAddedSchema,
|
|
2689
2531
|
responseAnnotationAddedSchema,
|
|
2690
2532
|
responseReasoningSummaryTextDeltaSchema,
|
|
2691
|
-
|
|
2533
|
+
z12.object({ type: z12.string() }).passthrough()
|
|
2692
2534
|
// fallback for unknown chunks
|
|
2693
2535
|
]);
|
|
2694
2536
|
function isTextDeltaChunk(chunk) {
|
|
@@ -2736,16 +2578,16 @@ function getResponsesModelConfig(modelId) {
|
|
|
2736
2578
|
requiredAutoTruncation: false
|
|
2737
2579
|
};
|
|
2738
2580
|
}
|
|
2739
|
-
var openaiResponsesProviderOptionsSchema =
|
|
2740
|
-
metadata:
|
|
2741
|
-
parallelToolCalls:
|
|
2742
|
-
previousResponseId:
|
|
2743
|
-
store:
|
|
2744
|
-
user:
|
|
2745
|
-
reasoningEffort:
|
|
2746
|
-
strictSchemas:
|
|
2747
|
-
instructions:
|
|
2748
|
-
reasoningSummary:
|
|
2581
|
+
var openaiResponsesProviderOptionsSchema = z12.object({
|
|
2582
|
+
metadata: z12.any().nullish(),
|
|
2583
|
+
parallelToolCalls: z12.boolean().nullish(),
|
|
2584
|
+
previousResponseId: z12.string().nullish(),
|
|
2585
|
+
store: z12.boolean().nullish(),
|
|
2586
|
+
user: z12.string().nullish(),
|
|
2587
|
+
reasoningEffort: z12.string().nullish(),
|
|
2588
|
+
strictSchemas: z12.boolean().nullish(),
|
|
2589
|
+
instructions: z12.string().nullish(),
|
|
2590
|
+
reasoningSummary: z12.string().nullish()
|
|
2749
2591
|
});
|
|
2750
2592
|
export {
|
|
2751
2593
|
OpenAIChatLanguageModel,
|
|
@@ -2756,6 +2598,10 @@ export {
|
|
|
2756
2598
|
OpenAISpeechModel,
|
|
2757
2599
|
OpenAITranscriptionModel,
|
|
2758
2600
|
hasDefaultResponseFormat,
|
|
2759
|
-
modelMaxImagesPerCall
|
|
2601
|
+
modelMaxImagesPerCall,
|
|
2602
|
+
openAITranscriptionProviderOptions,
|
|
2603
|
+
openaiCompletionProviderOptions,
|
|
2604
|
+
openaiEmbeddingProviderOptions,
|
|
2605
|
+
openaiProviderOptions
|
|
2760
2606
|
};
|
|
2761
2607
|
//# sourceMappingURL=index.mjs.map
|