@ai-sdk/openai 2.0.0-canary.8 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +663 -0
- package/README.md +2 -2
- package/dist/index.d.mts +84 -176
- package/dist/index.d.ts +84 -176
- package/dist/index.js +1323 -627
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1275 -575
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +85 -252
- package/dist/internal/index.d.ts +85 -252
- package/dist/internal/index.js +1315 -584
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +1266 -535
- package/dist/internal/index.mjs.map +1 -1
- package/internal.d.ts +1 -0
- package/package.json +11 -9
package/dist/internal/index.js
CHANGED
|
@@ -27,15 +27,19 @@ __export(internal_exports, {
|
|
|
27
27
|
OpenAIResponsesLanguageModel: () => OpenAIResponsesLanguageModel,
|
|
28
28
|
OpenAISpeechModel: () => OpenAISpeechModel,
|
|
29
29
|
OpenAITranscriptionModel: () => OpenAITranscriptionModel,
|
|
30
|
+
hasDefaultResponseFormat: () => hasDefaultResponseFormat,
|
|
30
31
|
modelMaxImagesPerCall: () => modelMaxImagesPerCall,
|
|
32
|
+
openAITranscriptionProviderOptions: () => openAITranscriptionProviderOptions,
|
|
33
|
+
openaiCompletionProviderOptions: () => openaiCompletionProviderOptions,
|
|
34
|
+
openaiEmbeddingProviderOptions: () => openaiEmbeddingProviderOptions,
|
|
31
35
|
openaiProviderOptions: () => openaiProviderOptions
|
|
32
36
|
});
|
|
33
37
|
module.exports = __toCommonJS(internal_exports);
|
|
34
38
|
|
|
35
39
|
// src/openai-chat-language-model.ts
|
|
36
40
|
var import_provider3 = require("@ai-sdk/provider");
|
|
37
|
-
var
|
|
38
|
-
var
|
|
41
|
+
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
|
42
|
+
var import_v45 = require("zod/v4");
|
|
39
43
|
|
|
40
44
|
// src/convert-to-openai-chat-messages.ts
|
|
41
45
|
var import_provider = require("@ai-sdk/provider");
|
|
@@ -140,7 +144,7 @@ function convertToOpenAIChatMessages({
|
|
|
140
144
|
type: "file",
|
|
141
145
|
file: {
|
|
142
146
|
filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
|
|
143
|
-
file_data: `data:application/pdf;base64,${part.data}`
|
|
147
|
+
file_data: `data:application/pdf;base64,${(0, import_provider_utils.convertToBase64)(part.data)}`
|
|
144
148
|
}
|
|
145
149
|
};
|
|
146
150
|
} else {
|
|
@@ -169,7 +173,7 @@ function convertToOpenAIChatMessages({
|
|
|
169
173
|
type: "function",
|
|
170
174
|
function: {
|
|
171
175
|
name: part.toolName,
|
|
172
|
-
arguments: JSON.stringify(part.
|
|
176
|
+
arguments: JSON.stringify(part.input)
|
|
173
177
|
}
|
|
174
178
|
});
|
|
175
179
|
break;
|
|
@@ -185,10 +189,23 @@ function convertToOpenAIChatMessages({
|
|
|
185
189
|
}
|
|
186
190
|
case "tool": {
|
|
187
191
|
for (const toolResponse of content) {
|
|
192
|
+
const output = toolResponse.output;
|
|
193
|
+
let contentValue;
|
|
194
|
+
switch (output.type) {
|
|
195
|
+
case "text":
|
|
196
|
+
case "error-text":
|
|
197
|
+
contentValue = output.value;
|
|
198
|
+
break;
|
|
199
|
+
case "content":
|
|
200
|
+
case "json":
|
|
201
|
+
case "error-json":
|
|
202
|
+
contentValue = JSON.stringify(output.value);
|
|
203
|
+
break;
|
|
204
|
+
}
|
|
188
205
|
messages.push({
|
|
189
206
|
role: "tool",
|
|
190
207
|
tool_call_id: toolResponse.toolCallId,
|
|
191
|
-
content:
|
|
208
|
+
content: contentValue
|
|
192
209
|
});
|
|
193
210
|
}
|
|
194
211
|
break;
|
|
@@ -202,17 +219,17 @@ function convertToOpenAIChatMessages({
|
|
|
202
219
|
return { messages, warnings };
|
|
203
220
|
}
|
|
204
221
|
|
|
205
|
-
// src/
|
|
206
|
-
function
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
}
|
|
222
|
+
// src/get-response-metadata.ts
|
|
223
|
+
function getResponseMetadata({
|
|
224
|
+
id,
|
|
225
|
+
model,
|
|
226
|
+
created
|
|
227
|
+
}) {
|
|
228
|
+
return {
|
|
229
|
+
id: id != null ? id : void 0,
|
|
230
|
+
modelId: model != null ? model : void 0,
|
|
231
|
+
timestamp: created != null ? new Date(created * 1e3) : void 0
|
|
232
|
+
};
|
|
216
233
|
}
|
|
217
234
|
|
|
218
235
|
// src/map-openai-finish-reason.ts
|
|
@@ -233,15 +250,15 @@ function mapOpenAIFinishReason(finishReason) {
|
|
|
233
250
|
}
|
|
234
251
|
|
|
235
252
|
// src/openai-chat-options.ts
|
|
236
|
-
var
|
|
237
|
-
var openaiProviderOptions =
|
|
253
|
+
var import_v4 = require("zod/v4");
|
|
254
|
+
var openaiProviderOptions = import_v4.z.object({
|
|
238
255
|
/**
|
|
239
256
|
* Modify the likelihood of specified tokens appearing in the completion.
|
|
240
257
|
*
|
|
241
258
|
* Accepts a JSON object that maps tokens (specified by their token ID in
|
|
242
259
|
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
243
260
|
*/
|
|
244
|
-
logitBias:
|
|
261
|
+
logitBias: import_v4.z.record(import_v4.z.coerce.number(), import_v4.z.number()).optional(),
|
|
245
262
|
/**
|
|
246
263
|
* Return the log probabilities of the tokens.
|
|
247
264
|
*
|
|
@@ -251,50 +268,71 @@ var openaiProviderOptions = import_zod.z.object({
|
|
|
251
268
|
* Setting to a number will return the log probabilities of the top n
|
|
252
269
|
* tokens that were generated.
|
|
253
270
|
*/
|
|
254
|
-
logprobs:
|
|
271
|
+
logprobs: import_v4.z.union([import_v4.z.boolean(), import_v4.z.number()]).optional(),
|
|
255
272
|
/**
|
|
256
273
|
* Whether to enable parallel function calling during tool use. Default to true.
|
|
257
274
|
*/
|
|
258
|
-
parallelToolCalls:
|
|
275
|
+
parallelToolCalls: import_v4.z.boolean().optional(),
|
|
259
276
|
/**
|
|
260
277
|
* A unique identifier representing your end-user, which can help OpenAI to
|
|
261
278
|
* monitor and detect abuse.
|
|
262
279
|
*/
|
|
263
|
-
user:
|
|
280
|
+
user: import_v4.z.string().optional(),
|
|
264
281
|
/**
|
|
265
282
|
* Reasoning effort for reasoning models. Defaults to `medium`.
|
|
266
283
|
*/
|
|
267
|
-
reasoningEffort:
|
|
284
|
+
reasoningEffort: import_v4.z.enum(["low", "medium", "high"]).optional(),
|
|
268
285
|
/**
|
|
269
286
|
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
270
287
|
*/
|
|
271
|
-
maxCompletionTokens:
|
|
288
|
+
maxCompletionTokens: import_v4.z.number().optional(),
|
|
272
289
|
/**
|
|
273
290
|
* Whether to enable persistence in responses API.
|
|
274
291
|
*/
|
|
275
|
-
store:
|
|
292
|
+
store: import_v4.z.boolean().optional(),
|
|
276
293
|
/**
|
|
277
294
|
* Metadata to associate with the request.
|
|
278
295
|
*/
|
|
279
|
-
metadata:
|
|
296
|
+
metadata: import_v4.z.record(import_v4.z.string().max(64), import_v4.z.string().max(512)).optional(),
|
|
280
297
|
/**
|
|
281
298
|
* Parameters for prediction mode.
|
|
282
299
|
*/
|
|
283
|
-
prediction:
|
|
300
|
+
prediction: import_v4.z.record(import_v4.z.string(), import_v4.z.any()).optional(),
|
|
301
|
+
/**
|
|
302
|
+
* Whether to use structured outputs.
|
|
303
|
+
*
|
|
304
|
+
* @default true
|
|
305
|
+
*/
|
|
306
|
+
structuredOutputs: import_v4.z.boolean().optional(),
|
|
307
|
+
/**
|
|
308
|
+
* Service tier for the request.
|
|
309
|
+
* - 'auto': Default service tier
|
|
310
|
+
* - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.
|
|
311
|
+
* - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.
|
|
312
|
+
*
|
|
313
|
+
* @default 'auto'
|
|
314
|
+
*/
|
|
315
|
+
serviceTier: import_v4.z.enum(["auto", "flex", "priority"]).optional(),
|
|
316
|
+
/**
|
|
317
|
+
* Whether to use strict JSON schema validation.
|
|
318
|
+
*
|
|
319
|
+
* @default false
|
|
320
|
+
*/
|
|
321
|
+
strictJsonSchema: import_v4.z.boolean().optional()
|
|
284
322
|
});
|
|
285
323
|
|
|
286
324
|
// src/openai-error.ts
|
|
287
|
-
var
|
|
325
|
+
var import_v42 = require("zod/v4");
|
|
288
326
|
var import_provider_utils2 = require("@ai-sdk/provider-utils");
|
|
289
|
-
var openaiErrorDataSchema =
|
|
290
|
-
error:
|
|
291
|
-
message:
|
|
327
|
+
var openaiErrorDataSchema = import_v42.z.object({
|
|
328
|
+
error: import_v42.z.object({
|
|
329
|
+
message: import_v42.z.string(),
|
|
292
330
|
// The additional information below is handled loosely to support
|
|
293
331
|
// OpenAI-compatible providers that have slightly different error
|
|
294
332
|
// responses:
|
|
295
|
-
type:
|
|
296
|
-
param:
|
|
297
|
-
code:
|
|
333
|
+
type: import_v42.z.string().nullish(),
|
|
334
|
+
param: import_v42.z.any().nullish(),
|
|
335
|
+
code: import_v42.z.union([import_v42.z.string(), import_v42.z.number()]).nullish()
|
|
298
336
|
})
|
|
299
337
|
});
|
|
300
338
|
var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
|
|
@@ -302,25 +340,101 @@ var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResp
|
|
|
302
340
|
errorToMessage: (data) => data.error.message
|
|
303
341
|
});
|
|
304
342
|
|
|
305
|
-
// src/get-response-metadata.ts
|
|
306
|
-
function getResponseMetadata({
|
|
307
|
-
id,
|
|
308
|
-
model,
|
|
309
|
-
created
|
|
310
|
-
}) {
|
|
311
|
-
return {
|
|
312
|
-
id: id != null ? id : void 0,
|
|
313
|
-
modelId: model != null ? model : void 0,
|
|
314
|
-
timestamp: created != null ? new Date(created * 1e3) : void 0
|
|
315
|
-
};
|
|
316
|
-
}
|
|
317
|
-
|
|
318
343
|
// src/openai-prepare-tools.ts
|
|
319
344
|
var import_provider2 = require("@ai-sdk/provider");
|
|
345
|
+
|
|
346
|
+
// src/tool/file-search.ts
|
|
347
|
+
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
|
348
|
+
var import_v43 = require("zod/v4");
|
|
349
|
+
var comparisonFilterSchema = import_v43.z.object({
|
|
350
|
+
key: import_v43.z.string(),
|
|
351
|
+
type: import_v43.z.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
|
|
352
|
+
value: import_v43.z.union([import_v43.z.string(), import_v43.z.number(), import_v43.z.boolean()])
|
|
353
|
+
});
|
|
354
|
+
var compoundFilterSchema = import_v43.z.object({
|
|
355
|
+
type: import_v43.z.enum(["and", "or"]),
|
|
356
|
+
filters: import_v43.z.array(
|
|
357
|
+
import_v43.z.union([comparisonFilterSchema, import_v43.z.lazy(() => compoundFilterSchema)])
|
|
358
|
+
)
|
|
359
|
+
});
|
|
360
|
+
var filtersSchema = import_v43.z.union([comparisonFilterSchema, compoundFilterSchema]);
|
|
361
|
+
var fileSearchArgsSchema = import_v43.z.object({
|
|
362
|
+
/**
|
|
363
|
+
* List of vector store IDs to search through. If not provided, searches all available vector stores.
|
|
364
|
+
*/
|
|
365
|
+
vectorStoreIds: import_v43.z.array(import_v43.z.string()).optional(),
|
|
366
|
+
/**
|
|
367
|
+
* Maximum number of search results to return. Defaults to 10.
|
|
368
|
+
*/
|
|
369
|
+
maxNumResults: import_v43.z.number().optional(),
|
|
370
|
+
/**
|
|
371
|
+
* Ranking options for the search.
|
|
372
|
+
*/
|
|
373
|
+
ranking: import_v43.z.object({
|
|
374
|
+
ranker: import_v43.z.enum(["auto", "default-2024-08-21"]).optional()
|
|
375
|
+
}).optional(),
|
|
376
|
+
/**
|
|
377
|
+
* A filter to apply based on file attributes.
|
|
378
|
+
*/
|
|
379
|
+
filters: filtersSchema.optional()
|
|
380
|
+
});
|
|
381
|
+
var fileSearch = (0, import_provider_utils3.createProviderDefinedToolFactory)({
|
|
382
|
+
id: "openai.file_search",
|
|
383
|
+
name: "file_search",
|
|
384
|
+
inputSchema: import_v43.z.object({
|
|
385
|
+
query: import_v43.z.string()
|
|
386
|
+
})
|
|
387
|
+
});
|
|
388
|
+
|
|
389
|
+
// src/tool/web-search-preview.ts
|
|
390
|
+
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
|
391
|
+
var import_v44 = require("zod/v4");
|
|
392
|
+
var webSearchPreviewArgsSchema = import_v44.z.object({
|
|
393
|
+
/**
|
|
394
|
+
* Search context size to use for the web search.
|
|
395
|
+
* - high: Most comprehensive context, highest cost, slower response
|
|
396
|
+
* - medium: Balanced context, cost, and latency (default)
|
|
397
|
+
* - low: Least context, lowest cost, fastest response
|
|
398
|
+
*/
|
|
399
|
+
searchContextSize: import_v44.z.enum(["low", "medium", "high"]).optional(),
|
|
400
|
+
/**
|
|
401
|
+
* User location information to provide geographically relevant search results.
|
|
402
|
+
*/
|
|
403
|
+
userLocation: import_v44.z.object({
|
|
404
|
+
/**
|
|
405
|
+
* Type of location (always 'approximate')
|
|
406
|
+
*/
|
|
407
|
+
type: import_v44.z.literal("approximate"),
|
|
408
|
+
/**
|
|
409
|
+
* Two-letter ISO country code (e.g., 'US', 'GB')
|
|
410
|
+
*/
|
|
411
|
+
country: import_v44.z.string().optional(),
|
|
412
|
+
/**
|
|
413
|
+
* City name (free text, e.g., 'Minneapolis')
|
|
414
|
+
*/
|
|
415
|
+
city: import_v44.z.string().optional(),
|
|
416
|
+
/**
|
|
417
|
+
* Region name (free text, e.g., 'Minnesota')
|
|
418
|
+
*/
|
|
419
|
+
region: import_v44.z.string().optional(),
|
|
420
|
+
/**
|
|
421
|
+
* IANA timezone (e.g., 'America/Chicago')
|
|
422
|
+
*/
|
|
423
|
+
timezone: import_v44.z.string().optional()
|
|
424
|
+
}).optional()
|
|
425
|
+
});
|
|
426
|
+
var webSearchPreview = (0, import_provider_utils4.createProviderDefinedToolFactory)({
|
|
427
|
+
id: "openai.web_search_preview",
|
|
428
|
+
name: "web_search_preview",
|
|
429
|
+
inputSchema: import_v44.z.object({})
|
|
430
|
+
});
|
|
431
|
+
|
|
432
|
+
// src/openai-prepare-tools.ts
|
|
320
433
|
function prepareTools({
|
|
321
434
|
tools,
|
|
322
435
|
toolChoice,
|
|
323
|
-
structuredOutputs
|
|
436
|
+
structuredOutputs,
|
|
437
|
+
strictJsonSchema
|
|
324
438
|
}) {
|
|
325
439
|
tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
|
|
326
440
|
const toolWarnings = [];
|
|
@@ -329,18 +443,48 @@ function prepareTools({
|
|
|
329
443
|
}
|
|
330
444
|
const openaiTools = [];
|
|
331
445
|
for (const tool of tools) {
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
446
|
+
switch (tool.type) {
|
|
447
|
+
case "function":
|
|
448
|
+
openaiTools.push({
|
|
449
|
+
type: "function",
|
|
450
|
+
function: {
|
|
451
|
+
name: tool.name,
|
|
452
|
+
description: tool.description,
|
|
453
|
+
parameters: tool.inputSchema,
|
|
454
|
+
strict: structuredOutputs ? strictJsonSchema : void 0
|
|
455
|
+
}
|
|
456
|
+
});
|
|
457
|
+
break;
|
|
458
|
+
case "provider-defined":
|
|
459
|
+
switch (tool.id) {
|
|
460
|
+
case "openai.file_search": {
|
|
461
|
+
const args = fileSearchArgsSchema.parse(tool.args);
|
|
462
|
+
openaiTools.push({
|
|
463
|
+
type: "file_search",
|
|
464
|
+
vector_store_ids: args.vectorStoreIds,
|
|
465
|
+
max_num_results: args.maxNumResults,
|
|
466
|
+
ranking_options: args.ranking ? { ranker: args.ranking.ranker } : void 0,
|
|
467
|
+
filters: args.filters
|
|
468
|
+
});
|
|
469
|
+
break;
|
|
470
|
+
}
|
|
471
|
+
case "openai.web_search_preview": {
|
|
472
|
+
const args = webSearchPreviewArgsSchema.parse(tool.args);
|
|
473
|
+
openaiTools.push({
|
|
474
|
+
type: "web_search_preview",
|
|
475
|
+
search_context_size: args.searchContextSize,
|
|
476
|
+
user_location: args.userLocation
|
|
477
|
+
});
|
|
478
|
+
break;
|
|
479
|
+
}
|
|
480
|
+
default:
|
|
481
|
+
toolWarnings.push({ type: "unsupported-tool", tool });
|
|
482
|
+
break;
|
|
342
483
|
}
|
|
343
|
-
|
|
484
|
+
break;
|
|
485
|
+
default:
|
|
486
|
+
toolWarnings.push({ type: "unsupported-tool", tool });
|
|
487
|
+
break;
|
|
344
488
|
}
|
|
345
489
|
}
|
|
346
490
|
if (toolChoice == null) {
|
|
@@ -374,29 +518,18 @@ function prepareTools({
|
|
|
374
518
|
|
|
375
519
|
// src/openai-chat-language-model.ts
|
|
376
520
|
var OpenAIChatLanguageModel = class {
|
|
377
|
-
constructor(modelId,
|
|
521
|
+
constructor(modelId, config) {
|
|
378
522
|
this.specificationVersion = "v2";
|
|
523
|
+
this.supportedUrls = {
|
|
524
|
+
"image/*": [/^https?:\/\/.*$/]
|
|
525
|
+
};
|
|
379
526
|
this.modelId = modelId;
|
|
380
|
-
this.settings = settings;
|
|
381
527
|
this.config = config;
|
|
382
528
|
}
|
|
383
|
-
get supportsStructuredOutputs() {
|
|
384
|
-
var _a;
|
|
385
|
-
return (_a = this.settings.structuredOutputs) != null ? _a : isReasoningModel(this.modelId);
|
|
386
|
-
}
|
|
387
|
-
get defaultObjectGenerationMode() {
|
|
388
|
-
if (isAudioModel(this.modelId)) {
|
|
389
|
-
return "tool";
|
|
390
|
-
}
|
|
391
|
-
return this.supportsStructuredOutputs ? "json" : "tool";
|
|
392
|
-
}
|
|
393
529
|
get provider() {
|
|
394
530
|
return this.config.provider;
|
|
395
531
|
}
|
|
396
|
-
|
|
397
|
-
return !this.settings.downloadImages;
|
|
398
|
-
}
|
|
399
|
-
getArgs({
|
|
532
|
+
async getArgs({
|
|
400
533
|
prompt,
|
|
401
534
|
maxOutputTokens,
|
|
402
535
|
temperature,
|
|
@@ -411,20 +544,21 @@ var OpenAIChatLanguageModel = class {
|
|
|
411
544
|
toolChoice,
|
|
412
545
|
providerOptions
|
|
413
546
|
}) {
|
|
414
|
-
var _a, _b;
|
|
547
|
+
var _a, _b, _c, _d;
|
|
415
548
|
const warnings = [];
|
|
416
|
-
const openaiOptions = (_a = (0,
|
|
549
|
+
const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({
|
|
417
550
|
provider: "openai",
|
|
418
551
|
providerOptions,
|
|
419
552
|
schema: openaiProviderOptions
|
|
420
553
|
})) != null ? _a : {};
|
|
554
|
+
const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
|
|
421
555
|
if (topK != null) {
|
|
422
556
|
warnings.push({
|
|
423
557
|
type: "unsupported-setting",
|
|
424
558
|
setting: "topK"
|
|
425
559
|
});
|
|
426
560
|
}
|
|
427
|
-
if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !
|
|
561
|
+
if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
|
|
428
562
|
warnings.push({
|
|
429
563
|
type: "unsupported-setting",
|
|
430
564
|
setting: "responseFormat",
|
|
@@ -438,6 +572,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
438
572
|
}
|
|
439
573
|
);
|
|
440
574
|
warnings.push(...messageWarnings);
|
|
575
|
+
const strictJsonSchema = (_c = openaiOptions.strictJsonSchema) != null ? _c : false;
|
|
441
576
|
const baseArgs = {
|
|
442
577
|
// model id:
|
|
443
578
|
model: this.modelId,
|
|
@@ -453,13 +588,12 @@ var OpenAIChatLanguageModel = class {
|
|
|
453
588
|
top_p: topP,
|
|
454
589
|
frequency_penalty: frequencyPenalty,
|
|
455
590
|
presence_penalty: presencePenalty,
|
|
456
|
-
|
|
457
|
-
response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs && responseFormat.schema != null ? {
|
|
591
|
+
response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? structuredOutputs && responseFormat.schema != null ? {
|
|
458
592
|
type: "json_schema",
|
|
459
593
|
json_schema: {
|
|
460
594
|
schema: responseFormat.schema,
|
|
461
|
-
strict:
|
|
462
|
-
name: (
|
|
595
|
+
strict: strictJsonSchema,
|
|
596
|
+
name: (_d = responseFormat.name) != null ? _d : "response",
|
|
463
597
|
description: responseFormat.description
|
|
464
598
|
}
|
|
465
599
|
} : { type: "json_object" } : void 0,
|
|
@@ -472,6 +606,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
472
606
|
metadata: openaiOptions.metadata,
|
|
473
607
|
prediction: openaiOptions.prediction,
|
|
474
608
|
reasoning_effort: openaiOptions.reasoningEffort,
|
|
609
|
+
service_tier: openaiOptions.serviceTier,
|
|
475
610
|
// messages:
|
|
476
611
|
messages
|
|
477
612
|
};
|
|
@@ -535,16 +670,32 @@ var OpenAIChatLanguageModel = class {
|
|
|
535
670
|
}
|
|
536
671
|
baseArgs.max_tokens = void 0;
|
|
537
672
|
}
|
|
538
|
-
} else if (this.modelId.startsWith("gpt-4o-search-preview")) {
|
|
673
|
+
} else if (this.modelId.startsWith("gpt-4o-search-preview") || this.modelId.startsWith("gpt-4o-mini-search-preview")) {
|
|
539
674
|
if (baseArgs.temperature != null) {
|
|
540
675
|
baseArgs.temperature = void 0;
|
|
541
676
|
warnings.push({
|
|
542
677
|
type: "unsupported-setting",
|
|
543
678
|
setting: "temperature",
|
|
544
|
-
details: "temperature is not supported for the
|
|
679
|
+
details: "temperature is not supported for the search preview models and has been removed."
|
|
545
680
|
});
|
|
546
681
|
}
|
|
547
682
|
}
|
|
683
|
+
if (openaiOptions.serviceTier === "flex" && !supportsFlexProcessing(this.modelId)) {
|
|
684
|
+
warnings.push({
|
|
685
|
+
type: "unsupported-setting",
|
|
686
|
+
setting: "serviceTier",
|
|
687
|
+
details: "flex processing is only available for o3 and o4-mini models"
|
|
688
|
+
});
|
|
689
|
+
baseArgs.service_tier = void 0;
|
|
690
|
+
}
|
|
691
|
+
if (openaiOptions.serviceTier === "priority" && !supportsPriorityProcessing(this.modelId)) {
|
|
692
|
+
warnings.push({
|
|
693
|
+
type: "unsupported-setting",
|
|
694
|
+
setting: "serviceTier",
|
|
695
|
+
details: "priority processing is only available for supported models (GPT-4, o3, o4-mini) and requires Enterprise access"
|
|
696
|
+
});
|
|
697
|
+
baseArgs.service_tier = void 0;
|
|
698
|
+
}
|
|
548
699
|
const {
|
|
549
700
|
tools: openaiTools,
|
|
550
701
|
toolChoice: openaiToolChoice,
|
|
@@ -552,7 +703,8 @@ var OpenAIChatLanguageModel = class {
|
|
|
552
703
|
} = prepareTools({
|
|
553
704
|
tools,
|
|
554
705
|
toolChoice,
|
|
555
|
-
structuredOutputs
|
|
706
|
+
structuredOutputs,
|
|
707
|
+
strictJsonSchema
|
|
556
708
|
});
|
|
557
709
|
return {
|
|
558
710
|
args: {
|
|
@@ -564,21 +716,21 @@ var OpenAIChatLanguageModel = class {
|
|
|
564
716
|
};
|
|
565
717
|
}
|
|
566
718
|
async doGenerate(options) {
|
|
567
|
-
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
568
|
-
const { args: body, warnings } = this.getArgs(options);
|
|
719
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
|
|
720
|
+
const { args: body, warnings } = await this.getArgs(options);
|
|
569
721
|
const {
|
|
570
722
|
responseHeaders,
|
|
571
723
|
value: response,
|
|
572
724
|
rawValue: rawResponse
|
|
573
|
-
} = await (0,
|
|
725
|
+
} = await (0, import_provider_utils5.postJsonToApi)({
|
|
574
726
|
url: this.config.url({
|
|
575
727
|
path: "/chat/completions",
|
|
576
728
|
modelId: this.modelId
|
|
577
729
|
}),
|
|
578
|
-
headers: (0,
|
|
730
|
+
headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), options.headers),
|
|
579
731
|
body,
|
|
580
732
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
581
|
-
successfulResponseHandler: (0,
|
|
733
|
+
successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
|
|
582
734
|
openaiChatResponseSchema
|
|
583
735
|
),
|
|
584
736
|
abortSignal: options.abortSignal,
|
|
@@ -593,33 +745,32 @@ var OpenAIChatLanguageModel = class {
|
|
|
593
745
|
for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
|
|
594
746
|
content.push({
|
|
595
747
|
type: "tool-call",
|
|
596
|
-
|
|
597
|
-
toolCallId: (_b = toolCall.id) != null ? _b : (0, import_provider_utils3.generateId)(),
|
|
748
|
+
toolCallId: (_b = toolCall.id) != null ? _b : (0, import_provider_utils5.generateId)(),
|
|
598
749
|
toolName: toolCall.function.name,
|
|
599
|
-
|
|
750
|
+
input: toolCall.function.arguments
|
|
600
751
|
});
|
|
601
752
|
}
|
|
602
753
|
const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
|
|
603
754
|
const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
|
|
604
755
|
const providerMetadata = { openai: {} };
|
|
605
|
-
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
|
|
606
|
-
providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
|
|
607
|
-
}
|
|
608
756
|
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
|
|
609
757
|
providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
|
|
610
758
|
}
|
|
611
759
|
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
|
|
612
760
|
providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
|
|
613
761
|
}
|
|
614
|
-
if ((
|
|
615
|
-
providerMetadata.openai.
|
|
762
|
+
if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
|
|
763
|
+
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
616
764
|
}
|
|
617
765
|
return {
|
|
618
766
|
content,
|
|
619
767
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
620
768
|
usage: {
|
|
621
|
-
inputTokens: (
|
|
622
|
-
outputTokens: (
|
|
769
|
+
inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
|
|
770
|
+
outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
|
|
771
|
+
totalTokens: (_k = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
|
|
772
|
+
reasoningTokens: (_l = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _l : void 0,
|
|
773
|
+
cachedInputTokens: (_m = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _m : void 0
|
|
623
774
|
},
|
|
624
775
|
request: { body },
|
|
625
776
|
response: {
|
|
@@ -628,41 +779,41 @@ var OpenAIChatLanguageModel = class {
|
|
|
628
779
|
body: rawResponse
|
|
629
780
|
},
|
|
630
781
|
warnings,
|
|
631
|
-
logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
|
|
632
782
|
providerMetadata
|
|
633
783
|
};
|
|
634
784
|
}
|
|
635
785
|
async doStream(options) {
|
|
636
|
-
const { args, warnings } = this.getArgs(options);
|
|
786
|
+
const { args, warnings } = await this.getArgs(options);
|
|
637
787
|
const body = {
|
|
638
788
|
...args,
|
|
639
789
|
stream: true,
|
|
640
|
-
|
|
641
|
-
|
|
790
|
+
stream_options: {
|
|
791
|
+
include_usage: true
|
|
792
|
+
}
|
|
642
793
|
};
|
|
643
|
-
const { responseHeaders, value: response } = await (0,
|
|
794
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
|
|
644
795
|
url: this.config.url({
|
|
645
796
|
path: "/chat/completions",
|
|
646
797
|
modelId: this.modelId
|
|
647
798
|
}),
|
|
648
|
-
headers: (0,
|
|
799
|
+
headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), options.headers),
|
|
649
800
|
body,
|
|
650
801
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
651
|
-
successfulResponseHandler: (0,
|
|
802
|
+
successfulResponseHandler: (0, import_provider_utils5.createEventSourceResponseHandler)(
|
|
652
803
|
openaiChatChunkSchema
|
|
653
804
|
),
|
|
654
805
|
abortSignal: options.abortSignal,
|
|
655
806
|
fetch: this.config.fetch
|
|
656
807
|
});
|
|
657
|
-
const { messages: rawPrompt, ...rawSettings } = args;
|
|
658
808
|
const toolCalls = [];
|
|
659
809
|
let finishReason = "unknown";
|
|
660
810
|
const usage = {
|
|
661
811
|
inputTokens: void 0,
|
|
662
|
-
outputTokens: void 0
|
|
812
|
+
outputTokens: void 0,
|
|
813
|
+
totalTokens: void 0
|
|
663
814
|
};
|
|
664
|
-
let logprobs;
|
|
665
815
|
let isFirstChunk = true;
|
|
816
|
+
let isActiveText = false;
|
|
666
817
|
const providerMetadata = { openai: {} };
|
|
667
818
|
return {
|
|
668
819
|
stream: response.pipeThrough(
|
|
@@ -671,7 +822,10 @@ var OpenAIChatLanguageModel = class {
|
|
|
671
822
|
controller.enqueue({ type: "stream-start", warnings });
|
|
672
823
|
},
|
|
673
824
|
transform(chunk, controller) {
|
|
674
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
|
|
825
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
|
|
826
|
+
if (options.includeRawChunks) {
|
|
827
|
+
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
828
|
+
}
|
|
675
829
|
if (!chunk.success) {
|
|
676
830
|
finishReason = "error";
|
|
677
831
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
@@ -691,48 +845,40 @@ var OpenAIChatLanguageModel = class {
|
|
|
691
845
|
});
|
|
692
846
|
}
|
|
693
847
|
if (value.usage != null) {
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
|
|
702
|
-
if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
|
|
703
|
-
providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
|
|
704
|
-
}
|
|
705
|
-
if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
|
|
706
|
-
providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
|
|
707
|
-
}
|
|
708
|
-
if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
|
|
709
|
-
providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
|
|
848
|
+
usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
|
|
849
|
+
usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
|
|
850
|
+
usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
|
|
851
|
+
usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
|
|
852
|
+
usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
|
|
853
|
+
if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
|
|
854
|
+
providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
|
|
710
855
|
}
|
|
711
|
-
if ((
|
|
712
|
-
providerMetadata.openai.
|
|
856
|
+
if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
|
|
857
|
+
providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
|
|
713
858
|
}
|
|
714
859
|
}
|
|
715
860
|
const choice = value.choices[0];
|
|
716
861
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
717
862
|
finishReason = mapOpenAIFinishReason(choice.finish_reason);
|
|
718
863
|
}
|
|
864
|
+
if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
|
|
865
|
+
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
866
|
+
}
|
|
719
867
|
if ((choice == null ? void 0 : choice.delta) == null) {
|
|
720
868
|
return;
|
|
721
869
|
}
|
|
722
870
|
const delta = choice.delta;
|
|
723
871
|
if (delta.content != null) {
|
|
872
|
+
if (!isActiveText) {
|
|
873
|
+
controller.enqueue({ type: "text-start", id: "0" });
|
|
874
|
+
isActiveText = true;
|
|
875
|
+
}
|
|
724
876
|
controller.enqueue({
|
|
725
|
-
type: "text",
|
|
726
|
-
|
|
877
|
+
type: "text-delta",
|
|
878
|
+
id: "0",
|
|
879
|
+
delta: delta.content
|
|
727
880
|
});
|
|
728
881
|
}
|
|
729
|
-
const mappedLogprobs = mapOpenAIChatLogProbsOutput(
|
|
730
|
-
choice == null ? void 0 : choice.logprobs
|
|
731
|
-
);
|
|
732
|
-
if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
|
|
733
|
-
if (logprobs === void 0) logprobs = [];
|
|
734
|
-
logprobs.push(...mappedLogprobs);
|
|
735
|
-
}
|
|
736
882
|
if (delta.tool_calls != null) {
|
|
737
883
|
for (const toolCallDelta of delta.tool_calls) {
|
|
738
884
|
const index = toolCallDelta.index;
|
|
@@ -749,39 +895,45 @@ var OpenAIChatLanguageModel = class {
|
|
|
749
895
|
message: `Expected 'id' to be a string.`
|
|
750
896
|
});
|
|
751
897
|
}
|
|
752
|
-
if (((
|
|
898
|
+
if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
|
|
753
899
|
throw new import_provider3.InvalidResponseDataError({
|
|
754
900
|
data: toolCallDelta,
|
|
755
901
|
message: `Expected 'function.name' to be a string.`
|
|
756
902
|
});
|
|
757
903
|
}
|
|
904
|
+
controller.enqueue({
|
|
905
|
+
type: "tool-input-start",
|
|
906
|
+
id: toolCallDelta.id,
|
|
907
|
+
toolName: toolCallDelta.function.name
|
|
908
|
+
});
|
|
758
909
|
toolCalls[index] = {
|
|
759
910
|
id: toolCallDelta.id,
|
|
760
911
|
type: "function",
|
|
761
912
|
function: {
|
|
762
913
|
name: toolCallDelta.function.name,
|
|
763
|
-
arguments: (
|
|
914
|
+
arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
|
|
764
915
|
},
|
|
765
916
|
hasFinished: false
|
|
766
917
|
};
|
|
767
918
|
const toolCall2 = toolCalls[index];
|
|
768
|
-
if (((
|
|
919
|
+
if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
|
|
769
920
|
if (toolCall2.function.arguments.length > 0) {
|
|
770
921
|
controller.enqueue({
|
|
771
|
-
type: "tool-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
toolName: toolCall2.function.name,
|
|
775
|
-
argsTextDelta: toolCall2.function.arguments
|
|
922
|
+
type: "tool-input-delta",
|
|
923
|
+
id: toolCall2.id,
|
|
924
|
+
delta: toolCall2.function.arguments
|
|
776
925
|
});
|
|
777
926
|
}
|
|
778
|
-
if ((0,
|
|
927
|
+
if ((0, import_provider_utils5.isParsableJson)(toolCall2.function.arguments)) {
|
|
928
|
+
controller.enqueue({
|
|
929
|
+
type: "tool-input-end",
|
|
930
|
+
id: toolCall2.id
|
|
931
|
+
});
|
|
779
932
|
controller.enqueue({
|
|
780
933
|
type: "tool-call",
|
|
781
|
-
|
|
782
|
-
toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils3.generateId)(),
|
|
934
|
+
toolCallId: (_q = toolCall2.id) != null ? _q : (0, import_provider_utils5.generateId)(),
|
|
783
935
|
toolName: toolCall2.function.name,
|
|
784
|
-
|
|
936
|
+
input: toolCall2.function.arguments
|
|
785
937
|
});
|
|
786
938
|
toolCall2.hasFinished = true;
|
|
787
939
|
}
|
|
@@ -792,23 +944,24 @@ var OpenAIChatLanguageModel = class {
|
|
|
792
944
|
if (toolCall.hasFinished) {
|
|
793
945
|
continue;
|
|
794
946
|
}
|
|
795
|
-
if (((
|
|
796
|
-
toolCall.function.arguments += (
|
|
947
|
+
if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
|
|
948
|
+
toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
|
|
797
949
|
}
|
|
798
950
|
controller.enqueue({
|
|
799
|
-
type: "tool-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
toolName: toolCall.function.name,
|
|
803
|
-
argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
|
|
951
|
+
type: "tool-input-delta",
|
|
952
|
+
id: toolCall.id,
|
|
953
|
+
delta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
|
|
804
954
|
});
|
|
805
|
-
if (((
|
|
955
|
+
if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0, import_provider_utils5.isParsableJson)(toolCall.function.arguments)) {
|
|
956
|
+
controller.enqueue({
|
|
957
|
+
type: "tool-input-end",
|
|
958
|
+
id: toolCall.id
|
|
959
|
+
});
|
|
806
960
|
controller.enqueue({
|
|
807
961
|
type: "tool-call",
|
|
808
|
-
|
|
809
|
-
toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils3.generateId)(),
|
|
962
|
+
toolCallId: (_x = toolCall.id) != null ? _x : (0, import_provider_utils5.generateId)(),
|
|
810
963
|
toolName: toolCall.function.name,
|
|
811
|
-
|
|
964
|
+
input: toolCall.function.arguments
|
|
812
965
|
});
|
|
813
966
|
toolCall.hasFinished = true;
|
|
814
967
|
}
|
|
@@ -816,10 +969,12 @@ var OpenAIChatLanguageModel = class {
|
|
|
816
969
|
}
|
|
817
970
|
},
|
|
818
971
|
flush(controller) {
|
|
972
|
+
if (isActiveText) {
|
|
973
|
+
controller.enqueue({ type: "text-end", id: "0" });
|
|
974
|
+
}
|
|
819
975
|
controller.enqueue({
|
|
820
976
|
type: "finish",
|
|
821
977
|
finishReason,
|
|
822
|
-
logprobs,
|
|
823
978
|
usage,
|
|
824
979
|
...providerMetadata != null ? { providerMetadata } : {}
|
|
825
980
|
});
|
|
@@ -831,96 +986,97 @@ var OpenAIChatLanguageModel = class {
|
|
|
831
986
|
};
|
|
832
987
|
}
|
|
833
988
|
};
|
|
834
|
-
var openaiTokenUsageSchema =
|
|
835
|
-
prompt_tokens:
|
|
836
|
-
completion_tokens:
|
|
837
|
-
|
|
838
|
-
|
|
989
|
+
var openaiTokenUsageSchema = import_v45.z.object({
|
|
990
|
+
prompt_tokens: import_v45.z.number().nullish(),
|
|
991
|
+
completion_tokens: import_v45.z.number().nullish(),
|
|
992
|
+
total_tokens: import_v45.z.number().nullish(),
|
|
993
|
+
prompt_tokens_details: import_v45.z.object({
|
|
994
|
+
cached_tokens: import_v45.z.number().nullish()
|
|
839
995
|
}).nullish(),
|
|
840
|
-
completion_tokens_details:
|
|
841
|
-
reasoning_tokens:
|
|
842
|
-
accepted_prediction_tokens:
|
|
843
|
-
rejected_prediction_tokens:
|
|
996
|
+
completion_tokens_details: import_v45.z.object({
|
|
997
|
+
reasoning_tokens: import_v45.z.number().nullish(),
|
|
998
|
+
accepted_prediction_tokens: import_v45.z.number().nullish(),
|
|
999
|
+
rejected_prediction_tokens: import_v45.z.number().nullish()
|
|
844
1000
|
}).nullish()
|
|
845
1001
|
}).nullish();
|
|
846
|
-
var openaiChatResponseSchema =
|
|
847
|
-
id:
|
|
848
|
-
created:
|
|
849
|
-
model:
|
|
850
|
-
choices:
|
|
851
|
-
|
|
852
|
-
message:
|
|
853
|
-
role:
|
|
854
|
-
content:
|
|
855
|
-
tool_calls:
|
|
856
|
-
|
|
857
|
-
id:
|
|
858
|
-
type:
|
|
859
|
-
function:
|
|
860
|
-
name:
|
|
861
|
-
arguments:
|
|
1002
|
+
var openaiChatResponseSchema = import_v45.z.object({
|
|
1003
|
+
id: import_v45.z.string().nullish(),
|
|
1004
|
+
created: import_v45.z.number().nullish(),
|
|
1005
|
+
model: import_v45.z.string().nullish(),
|
|
1006
|
+
choices: import_v45.z.array(
|
|
1007
|
+
import_v45.z.object({
|
|
1008
|
+
message: import_v45.z.object({
|
|
1009
|
+
role: import_v45.z.literal("assistant").nullish(),
|
|
1010
|
+
content: import_v45.z.string().nullish(),
|
|
1011
|
+
tool_calls: import_v45.z.array(
|
|
1012
|
+
import_v45.z.object({
|
|
1013
|
+
id: import_v45.z.string().nullish(),
|
|
1014
|
+
type: import_v45.z.literal("function"),
|
|
1015
|
+
function: import_v45.z.object({
|
|
1016
|
+
name: import_v45.z.string(),
|
|
1017
|
+
arguments: import_v45.z.string()
|
|
862
1018
|
})
|
|
863
1019
|
})
|
|
864
1020
|
).nullish()
|
|
865
1021
|
}),
|
|
866
|
-
index:
|
|
867
|
-
logprobs:
|
|
868
|
-
content:
|
|
869
|
-
|
|
870
|
-
token:
|
|
871
|
-
logprob:
|
|
872
|
-
top_logprobs:
|
|
873
|
-
|
|
874
|
-
token:
|
|
875
|
-
logprob:
|
|
1022
|
+
index: import_v45.z.number(),
|
|
1023
|
+
logprobs: import_v45.z.object({
|
|
1024
|
+
content: import_v45.z.array(
|
|
1025
|
+
import_v45.z.object({
|
|
1026
|
+
token: import_v45.z.string(),
|
|
1027
|
+
logprob: import_v45.z.number(),
|
|
1028
|
+
top_logprobs: import_v45.z.array(
|
|
1029
|
+
import_v45.z.object({
|
|
1030
|
+
token: import_v45.z.string(),
|
|
1031
|
+
logprob: import_v45.z.number()
|
|
876
1032
|
})
|
|
877
1033
|
)
|
|
878
1034
|
})
|
|
879
|
-
).
|
|
1035
|
+
).nullish()
|
|
880
1036
|
}).nullish(),
|
|
881
|
-
finish_reason:
|
|
1037
|
+
finish_reason: import_v45.z.string().nullish()
|
|
882
1038
|
})
|
|
883
1039
|
),
|
|
884
1040
|
usage: openaiTokenUsageSchema
|
|
885
1041
|
});
|
|
886
|
-
var openaiChatChunkSchema =
|
|
887
|
-
|
|
888
|
-
id:
|
|
889
|
-
created:
|
|
890
|
-
model:
|
|
891
|
-
choices:
|
|
892
|
-
|
|
893
|
-
delta:
|
|
894
|
-
role:
|
|
895
|
-
content:
|
|
896
|
-
tool_calls:
|
|
897
|
-
|
|
898
|
-
index:
|
|
899
|
-
id:
|
|
900
|
-
type:
|
|
901
|
-
function:
|
|
902
|
-
name:
|
|
903
|
-
arguments:
|
|
1042
|
+
var openaiChatChunkSchema = import_v45.z.union([
|
|
1043
|
+
import_v45.z.object({
|
|
1044
|
+
id: import_v45.z.string().nullish(),
|
|
1045
|
+
created: import_v45.z.number().nullish(),
|
|
1046
|
+
model: import_v45.z.string().nullish(),
|
|
1047
|
+
choices: import_v45.z.array(
|
|
1048
|
+
import_v45.z.object({
|
|
1049
|
+
delta: import_v45.z.object({
|
|
1050
|
+
role: import_v45.z.enum(["assistant"]).nullish(),
|
|
1051
|
+
content: import_v45.z.string().nullish(),
|
|
1052
|
+
tool_calls: import_v45.z.array(
|
|
1053
|
+
import_v45.z.object({
|
|
1054
|
+
index: import_v45.z.number(),
|
|
1055
|
+
id: import_v45.z.string().nullish(),
|
|
1056
|
+
type: import_v45.z.literal("function").nullish(),
|
|
1057
|
+
function: import_v45.z.object({
|
|
1058
|
+
name: import_v45.z.string().nullish(),
|
|
1059
|
+
arguments: import_v45.z.string().nullish()
|
|
904
1060
|
})
|
|
905
1061
|
})
|
|
906
1062
|
).nullish()
|
|
907
1063
|
}).nullish(),
|
|
908
|
-
logprobs:
|
|
909
|
-
content:
|
|
910
|
-
|
|
911
|
-
token:
|
|
912
|
-
logprob:
|
|
913
|
-
top_logprobs:
|
|
914
|
-
|
|
915
|
-
token:
|
|
916
|
-
logprob:
|
|
1064
|
+
logprobs: import_v45.z.object({
|
|
1065
|
+
content: import_v45.z.array(
|
|
1066
|
+
import_v45.z.object({
|
|
1067
|
+
token: import_v45.z.string(),
|
|
1068
|
+
logprob: import_v45.z.number(),
|
|
1069
|
+
top_logprobs: import_v45.z.array(
|
|
1070
|
+
import_v45.z.object({
|
|
1071
|
+
token: import_v45.z.string(),
|
|
1072
|
+
logprob: import_v45.z.number()
|
|
917
1073
|
})
|
|
918
1074
|
)
|
|
919
1075
|
})
|
|
920
|
-
).
|
|
1076
|
+
).nullish()
|
|
921
1077
|
}).nullish(),
|
|
922
|
-
finish_reason:
|
|
923
|
-
index:
|
|
1078
|
+
finish_reason: import_v45.z.string().nullish(),
|
|
1079
|
+
index: import_v45.z.number()
|
|
924
1080
|
})
|
|
925
1081
|
),
|
|
926
1082
|
usage: openaiTokenUsageSchema
|
|
@@ -928,10 +1084,13 @@ var openaiChatChunkSchema = import_zod3.z.union([
|
|
|
928
1084
|
openaiErrorDataSchema
|
|
929
1085
|
]);
|
|
930
1086
|
function isReasoningModel(modelId) {
|
|
931
|
-
return modelId
|
|
1087
|
+
return modelId.startsWith("o");
|
|
1088
|
+
}
|
|
1089
|
+
function supportsFlexProcessing(modelId) {
|
|
1090
|
+
return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
932
1091
|
}
|
|
933
|
-
function
|
|
934
|
-
return modelId.startsWith("gpt-
|
|
1092
|
+
function supportsPriorityProcessing(modelId) {
|
|
1093
|
+
return modelId.startsWith("gpt-4") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
935
1094
|
}
|
|
936
1095
|
function getSystemMessageMode(modelId) {
|
|
937
1096
|
var _a, _b;
|
|
@@ -953,29 +1112,37 @@ var reasoningModels = {
|
|
|
953
1112
|
"o1-preview-2024-09-12": {
|
|
954
1113
|
systemMessageMode: "remove"
|
|
955
1114
|
},
|
|
1115
|
+
o3: {
|
|
1116
|
+
systemMessageMode: "developer"
|
|
1117
|
+
},
|
|
1118
|
+
"o3-2025-04-16": {
|
|
1119
|
+
systemMessageMode: "developer"
|
|
1120
|
+
},
|
|
956
1121
|
"o3-mini": {
|
|
957
1122
|
systemMessageMode: "developer"
|
|
958
1123
|
},
|
|
959
1124
|
"o3-mini-2025-01-31": {
|
|
960
1125
|
systemMessageMode: "developer"
|
|
1126
|
+
},
|
|
1127
|
+
"o4-mini": {
|
|
1128
|
+
systemMessageMode: "developer"
|
|
1129
|
+
},
|
|
1130
|
+
"o4-mini-2025-04-16": {
|
|
1131
|
+
systemMessageMode: "developer"
|
|
961
1132
|
}
|
|
962
1133
|
};
|
|
963
1134
|
|
|
964
1135
|
// src/openai-completion-language-model.ts
|
|
965
|
-
var
|
|
966
|
-
var
|
|
1136
|
+
var import_provider_utils6 = require("@ai-sdk/provider-utils");
|
|
1137
|
+
var import_v47 = require("zod/v4");
|
|
967
1138
|
|
|
968
1139
|
// src/convert-to-openai-completion-prompt.ts
|
|
969
1140
|
var import_provider4 = require("@ai-sdk/provider");
|
|
970
1141
|
function convertToOpenAICompletionPrompt({
|
|
971
1142
|
prompt,
|
|
972
|
-
inputFormat,
|
|
973
1143
|
user = "user",
|
|
974
1144
|
assistant = "assistant"
|
|
975
1145
|
}) {
|
|
976
|
-
if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
|
|
977
|
-
return { prompt: prompt[0].content[0].text };
|
|
978
|
-
}
|
|
979
1146
|
let text = "";
|
|
980
1147
|
if (prompt[0].role === "system") {
|
|
981
1148
|
text += `${prompt[0].content}
|
|
@@ -1044,34 +1211,66 @@ ${user}:`]
|
|
|
1044
1211
|
};
|
|
1045
1212
|
}
|
|
1046
1213
|
|
|
1047
|
-
// src/
|
|
1048
|
-
|
|
1049
|
-
|
|
1050
|
-
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1214
|
+
// src/openai-completion-options.ts
|
|
1215
|
+
var import_v46 = require("zod/v4");
|
|
1216
|
+
var openaiCompletionProviderOptions = import_v46.z.object({
|
|
1217
|
+
/**
|
|
1218
|
+
Echo back the prompt in addition to the completion.
|
|
1219
|
+
*/
|
|
1220
|
+
echo: import_v46.z.boolean().optional(),
|
|
1221
|
+
/**
|
|
1222
|
+
Modify the likelihood of specified tokens appearing in the completion.
|
|
1223
|
+
|
|
1224
|
+
Accepts a JSON object that maps tokens (specified by their token ID in
|
|
1225
|
+
the GPT tokenizer) to an associated bias value from -100 to 100. You
|
|
1226
|
+
can use this tokenizer tool to convert text to token IDs. Mathematically,
|
|
1227
|
+
the bias is added to the logits generated by the model prior to sampling.
|
|
1228
|
+
The exact effect will vary per model, but values between -1 and 1 should
|
|
1229
|
+
decrease or increase likelihood of selection; values like -100 or 100
|
|
1230
|
+
should result in a ban or exclusive selection of the relevant token.
|
|
1231
|
+
|
|
1232
|
+
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
1233
|
+
token from being generated.
|
|
1234
|
+
*/
|
|
1235
|
+
logitBias: import_v46.z.record(import_v46.z.string(), import_v46.z.number()).optional(),
|
|
1236
|
+
/**
|
|
1237
|
+
The suffix that comes after a completion of inserted text.
|
|
1238
|
+
*/
|
|
1239
|
+
suffix: import_v46.z.string().optional(),
|
|
1240
|
+
/**
|
|
1241
|
+
A unique identifier representing your end-user, which can help OpenAI to
|
|
1242
|
+
monitor and detect abuse. Learn more.
|
|
1243
|
+
*/
|
|
1244
|
+
user: import_v46.z.string().optional(),
|
|
1245
|
+
/**
|
|
1246
|
+
Return the log probabilities of the tokens. Including logprobs will increase
|
|
1247
|
+
the response size and can slow down response times. However, it can
|
|
1248
|
+
be useful to better understand how the model is behaving.
|
|
1249
|
+
Setting to true will return the log probabilities of the tokens that
|
|
1250
|
+
were generated.
|
|
1251
|
+
Setting to a number will return the log probabilities of the top n
|
|
1252
|
+
tokens that were generated.
|
|
1253
|
+
*/
|
|
1254
|
+
logprobs: import_v46.z.union([import_v46.z.boolean(), import_v46.z.number()]).optional()
|
|
1255
|
+
});
|
|
1060
1256
|
|
|
1061
1257
|
// src/openai-completion-language-model.ts
|
|
1062
1258
|
var OpenAICompletionLanguageModel = class {
|
|
1063
|
-
constructor(modelId,
|
|
1259
|
+
constructor(modelId, config) {
|
|
1064
1260
|
this.specificationVersion = "v2";
|
|
1065
|
-
this.
|
|
1261
|
+
this.supportedUrls = {
|
|
1262
|
+
// No URLs are supported for completion models.
|
|
1263
|
+
};
|
|
1066
1264
|
this.modelId = modelId;
|
|
1067
|
-
this.settings = settings;
|
|
1068
1265
|
this.config = config;
|
|
1069
1266
|
}
|
|
1267
|
+
get providerOptionsName() {
|
|
1268
|
+
return this.config.provider.split(".")[0].trim();
|
|
1269
|
+
}
|
|
1070
1270
|
get provider() {
|
|
1071
1271
|
return this.config.provider;
|
|
1072
1272
|
}
|
|
1073
|
-
getArgs({
|
|
1074
|
-
inputFormat,
|
|
1273
|
+
async getArgs({
|
|
1075
1274
|
prompt,
|
|
1076
1275
|
maxOutputTokens,
|
|
1077
1276
|
temperature,
|
|
@@ -1083,9 +1282,22 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1083
1282
|
responseFormat,
|
|
1084
1283
|
tools,
|
|
1085
1284
|
toolChoice,
|
|
1086
|
-
seed
|
|
1285
|
+
seed,
|
|
1286
|
+
providerOptions
|
|
1087
1287
|
}) {
|
|
1088
1288
|
const warnings = [];
|
|
1289
|
+
const openaiOptions = {
|
|
1290
|
+
...await (0, import_provider_utils6.parseProviderOptions)({
|
|
1291
|
+
provider: "openai",
|
|
1292
|
+
providerOptions,
|
|
1293
|
+
schema: openaiCompletionProviderOptions
|
|
1294
|
+
}),
|
|
1295
|
+
...await (0, import_provider_utils6.parseProviderOptions)({
|
|
1296
|
+
provider: this.providerOptionsName,
|
|
1297
|
+
providerOptions,
|
|
1298
|
+
schema: openaiCompletionProviderOptions
|
|
1299
|
+
})
|
|
1300
|
+
};
|
|
1089
1301
|
if (topK != null) {
|
|
1090
1302
|
warnings.push({ type: "unsupported-setting", setting: "topK" });
|
|
1091
1303
|
}
|
|
@@ -1102,18 +1314,18 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1102
1314
|
details: "JSON response format is not supported."
|
|
1103
1315
|
});
|
|
1104
1316
|
}
|
|
1105
|
-
const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt
|
|
1317
|
+
const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt });
|
|
1106
1318
|
const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
|
|
1107
1319
|
return {
|
|
1108
1320
|
args: {
|
|
1109
1321
|
// model id:
|
|
1110
1322
|
model: this.modelId,
|
|
1111
1323
|
// model specific settings:
|
|
1112
|
-
echo:
|
|
1113
|
-
logit_bias:
|
|
1114
|
-
logprobs:
|
|
1115
|
-
suffix:
|
|
1116
|
-
user:
|
|
1324
|
+
echo: openaiOptions.echo,
|
|
1325
|
+
logit_bias: openaiOptions.logitBias,
|
|
1326
|
+
logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
|
|
1327
|
+
suffix: openaiOptions.suffix,
|
|
1328
|
+
user: openaiOptions.user,
|
|
1117
1329
|
// standardized settings:
|
|
1118
1330
|
max_tokens: maxOutputTokens,
|
|
1119
1331
|
temperature,
|
|
@@ -1130,71 +1342,79 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1130
1342
|
};
|
|
1131
1343
|
}
|
|
1132
1344
|
async doGenerate(options) {
|
|
1133
|
-
|
|
1345
|
+
var _a, _b, _c;
|
|
1346
|
+
const { args, warnings } = await this.getArgs(options);
|
|
1134
1347
|
const {
|
|
1135
1348
|
responseHeaders,
|
|
1136
1349
|
value: response,
|
|
1137
1350
|
rawValue: rawResponse
|
|
1138
|
-
} = await (0,
|
|
1351
|
+
} = await (0, import_provider_utils6.postJsonToApi)({
|
|
1139
1352
|
url: this.config.url({
|
|
1140
1353
|
path: "/completions",
|
|
1141
1354
|
modelId: this.modelId
|
|
1142
1355
|
}),
|
|
1143
|
-
headers: (0,
|
|
1356
|
+
headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), options.headers),
|
|
1144
1357
|
body: args,
|
|
1145
1358
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1146
|
-
successfulResponseHandler: (0,
|
|
1359
|
+
successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
|
|
1147
1360
|
openaiCompletionResponseSchema
|
|
1148
1361
|
),
|
|
1149
1362
|
abortSignal: options.abortSignal,
|
|
1150
1363
|
fetch: this.config.fetch
|
|
1151
1364
|
});
|
|
1152
1365
|
const choice = response.choices[0];
|
|
1366
|
+
const providerMetadata = { openai: {} };
|
|
1367
|
+
if (choice.logprobs != null) {
|
|
1368
|
+
providerMetadata.openai.logprobs = choice.logprobs;
|
|
1369
|
+
}
|
|
1153
1370
|
return {
|
|
1154
1371
|
content: [{ type: "text", text: choice.text }],
|
|
1155
1372
|
usage: {
|
|
1156
|
-
inputTokens: response.usage.prompt_tokens,
|
|
1157
|
-
outputTokens: response.usage.completion_tokens
|
|
1373
|
+
inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
|
|
1374
|
+
outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
|
|
1375
|
+
totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
|
|
1158
1376
|
},
|
|
1159
1377
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
1160
|
-
logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
|
|
1161
1378
|
request: { body: args },
|
|
1162
1379
|
response: {
|
|
1163
1380
|
...getResponseMetadata(response),
|
|
1164
1381
|
headers: responseHeaders,
|
|
1165
1382
|
body: rawResponse
|
|
1166
1383
|
},
|
|
1384
|
+
providerMetadata,
|
|
1167
1385
|
warnings
|
|
1168
1386
|
};
|
|
1169
1387
|
}
|
|
1170
1388
|
async doStream(options) {
|
|
1171
|
-
const { args, warnings } = this.getArgs(options);
|
|
1389
|
+
const { args, warnings } = await this.getArgs(options);
|
|
1172
1390
|
const body = {
|
|
1173
1391
|
...args,
|
|
1174
1392
|
stream: true,
|
|
1175
|
-
|
|
1176
|
-
|
|
1393
|
+
stream_options: {
|
|
1394
|
+
include_usage: true
|
|
1395
|
+
}
|
|
1177
1396
|
};
|
|
1178
|
-
const { responseHeaders, value: response } = await (0,
|
|
1397
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils6.postJsonToApi)({
|
|
1179
1398
|
url: this.config.url({
|
|
1180
1399
|
path: "/completions",
|
|
1181
1400
|
modelId: this.modelId
|
|
1182
1401
|
}),
|
|
1183
|
-
headers: (0,
|
|
1402
|
+
headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), options.headers),
|
|
1184
1403
|
body,
|
|
1185
1404
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1186
|
-
successfulResponseHandler: (0,
|
|
1405
|
+
successfulResponseHandler: (0, import_provider_utils6.createEventSourceResponseHandler)(
|
|
1187
1406
|
openaiCompletionChunkSchema
|
|
1188
1407
|
),
|
|
1189
1408
|
abortSignal: options.abortSignal,
|
|
1190
1409
|
fetch: this.config.fetch
|
|
1191
1410
|
});
|
|
1192
1411
|
let finishReason = "unknown";
|
|
1412
|
+
const providerMetadata = { openai: {} };
|
|
1193
1413
|
const usage = {
|
|
1194
1414
|
inputTokens: void 0,
|
|
1195
|
-
outputTokens: void 0
|
|
1415
|
+
outputTokens: void 0,
|
|
1416
|
+
totalTokens: void 0
|
|
1196
1417
|
};
|
|
1197
|
-
let logprobs;
|
|
1198
1418
|
let isFirstChunk = true;
|
|
1199
1419
|
return {
|
|
1200
1420
|
stream: response.pipeThrough(
|
|
@@ -1203,6 +1423,9 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1203
1423
|
controller.enqueue({ type: "stream-start", warnings });
|
|
1204
1424
|
},
|
|
1205
1425
|
transform(chunk, controller) {
|
|
1426
|
+
if (options.includeRawChunks) {
|
|
1427
|
+
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
1428
|
+
}
|
|
1206
1429
|
if (!chunk.success) {
|
|
1207
1430
|
finishReason = "error";
|
|
1208
1431
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
@@ -1220,34 +1443,36 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1220
1443
|
type: "response-metadata",
|
|
1221
1444
|
...getResponseMetadata(value)
|
|
1222
1445
|
});
|
|
1446
|
+
controller.enqueue({ type: "text-start", id: "0" });
|
|
1223
1447
|
}
|
|
1224
1448
|
if (value.usage != null) {
|
|
1225
1449
|
usage.inputTokens = value.usage.prompt_tokens;
|
|
1226
1450
|
usage.outputTokens = value.usage.completion_tokens;
|
|
1451
|
+
usage.totalTokens = value.usage.total_tokens;
|
|
1227
1452
|
}
|
|
1228
1453
|
const choice = value.choices[0];
|
|
1229
1454
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
1230
1455
|
finishReason = mapOpenAIFinishReason(choice.finish_reason);
|
|
1231
1456
|
}
|
|
1232
|
-
if ((choice == null ? void 0 : choice.
|
|
1457
|
+
if ((choice == null ? void 0 : choice.logprobs) != null) {
|
|
1458
|
+
providerMetadata.openai.logprobs = choice.logprobs;
|
|
1459
|
+
}
|
|
1460
|
+
if ((choice == null ? void 0 : choice.text) != null && choice.text.length > 0) {
|
|
1233
1461
|
controller.enqueue({
|
|
1234
|
-
type: "text",
|
|
1235
|
-
|
|
1462
|
+
type: "text-delta",
|
|
1463
|
+
id: "0",
|
|
1464
|
+
delta: choice.text
|
|
1236
1465
|
});
|
|
1237
1466
|
}
|
|
1238
|
-
const mappedLogprobs = mapOpenAICompletionLogProbs(
|
|
1239
|
-
choice == null ? void 0 : choice.logprobs
|
|
1240
|
-
);
|
|
1241
|
-
if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
|
|
1242
|
-
if (logprobs === void 0) logprobs = [];
|
|
1243
|
-
logprobs.push(...mappedLogprobs);
|
|
1244
|
-
}
|
|
1245
1467
|
},
|
|
1246
1468
|
flush(controller) {
|
|
1469
|
+
if (!isFirstChunk) {
|
|
1470
|
+
controller.enqueue({ type: "text-end", id: "0" });
|
|
1471
|
+
}
|
|
1247
1472
|
controller.enqueue({
|
|
1248
1473
|
type: "finish",
|
|
1249
1474
|
finishReason,
|
|
1250
|
-
|
|
1475
|
+
providerMetadata,
|
|
1251
1476
|
usage
|
|
1252
1477
|
});
|
|
1253
1478
|
}
|
|
@@ -1258,78 +1483,89 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1258
1483
|
};
|
|
1259
1484
|
}
|
|
1260
1485
|
};
|
|
1261
|
-
var
|
|
1262
|
-
|
|
1263
|
-
|
|
1264
|
-
|
|
1265
|
-
|
|
1266
|
-
|
|
1267
|
-
|
|
1268
|
-
|
|
1269
|
-
|
|
1270
|
-
|
|
1271
|
-
|
|
1272
|
-
|
|
1486
|
+
var usageSchema = import_v47.z.object({
|
|
1487
|
+
prompt_tokens: import_v47.z.number(),
|
|
1488
|
+
completion_tokens: import_v47.z.number(),
|
|
1489
|
+
total_tokens: import_v47.z.number()
|
|
1490
|
+
});
|
|
1491
|
+
var openaiCompletionResponseSchema = import_v47.z.object({
|
|
1492
|
+
id: import_v47.z.string().nullish(),
|
|
1493
|
+
created: import_v47.z.number().nullish(),
|
|
1494
|
+
model: import_v47.z.string().nullish(),
|
|
1495
|
+
choices: import_v47.z.array(
|
|
1496
|
+
import_v47.z.object({
|
|
1497
|
+
text: import_v47.z.string(),
|
|
1498
|
+
finish_reason: import_v47.z.string(),
|
|
1499
|
+
logprobs: import_v47.z.object({
|
|
1500
|
+
tokens: import_v47.z.array(import_v47.z.string()),
|
|
1501
|
+
token_logprobs: import_v47.z.array(import_v47.z.number()),
|
|
1502
|
+
top_logprobs: import_v47.z.array(import_v47.z.record(import_v47.z.string(), import_v47.z.number())).nullish()
|
|
1273
1503
|
}).nullish()
|
|
1274
1504
|
})
|
|
1275
1505
|
),
|
|
1276
|
-
usage:
|
|
1277
|
-
prompt_tokens: import_zod4.z.number(),
|
|
1278
|
-
completion_tokens: import_zod4.z.number()
|
|
1279
|
-
})
|
|
1506
|
+
usage: usageSchema.nullish()
|
|
1280
1507
|
});
|
|
1281
|
-
var openaiCompletionChunkSchema =
|
|
1282
|
-
|
|
1283
|
-
id:
|
|
1284
|
-
created:
|
|
1285
|
-
model:
|
|
1286
|
-
choices:
|
|
1287
|
-
|
|
1288
|
-
text:
|
|
1289
|
-
finish_reason:
|
|
1290
|
-
index:
|
|
1291
|
-
logprobs:
|
|
1292
|
-
tokens:
|
|
1293
|
-
token_logprobs:
|
|
1294
|
-
top_logprobs:
|
|
1508
|
+
var openaiCompletionChunkSchema = import_v47.z.union([
|
|
1509
|
+
import_v47.z.object({
|
|
1510
|
+
id: import_v47.z.string().nullish(),
|
|
1511
|
+
created: import_v47.z.number().nullish(),
|
|
1512
|
+
model: import_v47.z.string().nullish(),
|
|
1513
|
+
choices: import_v47.z.array(
|
|
1514
|
+
import_v47.z.object({
|
|
1515
|
+
text: import_v47.z.string(),
|
|
1516
|
+
finish_reason: import_v47.z.string().nullish(),
|
|
1517
|
+
index: import_v47.z.number(),
|
|
1518
|
+
logprobs: import_v47.z.object({
|
|
1519
|
+
tokens: import_v47.z.array(import_v47.z.string()),
|
|
1520
|
+
token_logprobs: import_v47.z.array(import_v47.z.number()),
|
|
1521
|
+
top_logprobs: import_v47.z.array(import_v47.z.record(import_v47.z.string(), import_v47.z.number())).nullish()
|
|
1295
1522
|
}).nullish()
|
|
1296
1523
|
})
|
|
1297
1524
|
),
|
|
1298
|
-
usage:
|
|
1299
|
-
prompt_tokens: import_zod4.z.number(),
|
|
1300
|
-
completion_tokens: import_zod4.z.number()
|
|
1301
|
-
}).nullish()
|
|
1525
|
+
usage: usageSchema.nullish()
|
|
1302
1526
|
}),
|
|
1303
1527
|
openaiErrorDataSchema
|
|
1304
1528
|
]);
|
|
1305
1529
|
|
|
1306
1530
|
// src/openai-embedding-model.ts
|
|
1307
1531
|
var import_provider5 = require("@ai-sdk/provider");
|
|
1308
|
-
var
|
|
1309
|
-
var
|
|
1532
|
+
var import_provider_utils7 = require("@ai-sdk/provider-utils");
|
|
1533
|
+
var import_v49 = require("zod/v4");
|
|
1534
|
+
|
|
1535
|
+
// src/openai-embedding-options.ts
|
|
1536
|
+
var import_v48 = require("zod/v4");
|
|
1537
|
+
var openaiEmbeddingProviderOptions = import_v48.z.object({
|
|
1538
|
+
/**
|
|
1539
|
+
The number of dimensions the resulting output embeddings should have.
|
|
1540
|
+
Only supported in text-embedding-3 and later models.
|
|
1541
|
+
*/
|
|
1542
|
+
dimensions: import_v48.z.number().optional(),
|
|
1543
|
+
/**
|
|
1544
|
+
A unique identifier representing your end-user, which can help OpenAI to
|
|
1545
|
+
monitor and detect abuse. Learn more.
|
|
1546
|
+
*/
|
|
1547
|
+
user: import_v48.z.string().optional()
|
|
1548
|
+
});
|
|
1549
|
+
|
|
1550
|
+
// src/openai-embedding-model.ts
|
|
1310
1551
|
var OpenAIEmbeddingModel = class {
|
|
1311
|
-
constructor(modelId,
|
|
1552
|
+
constructor(modelId, config) {
|
|
1312
1553
|
this.specificationVersion = "v2";
|
|
1554
|
+
this.maxEmbeddingsPerCall = 2048;
|
|
1555
|
+
this.supportsParallelCalls = true;
|
|
1313
1556
|
this.modelId = modelId;
|
|
1314
|
-
this.settings = settings;
|
|
1315
1557
|
this.config = config;
|
|
1316
1558
|
}
|
|
1317
1559
|
get provider() {
|
|
1318
1560
|
return this.config.provider;
|
|
1319
1561
|
}
|
|
1320
|
-
get maxEmbeddingsPerCall() {
|
|
1321
|
-
var _a;
|
|
1322
|
-
return (_a = this.settings.maxEmbeddingsPerCall) != null ? _a : 2048;
|
|
1323
|
-
}
|
|
1324
|
-
get supportsParallelCalls() {
|
|
1325
|
-
var _a;
|
|
1326
|
-
return (_a = this.settings.supportsParallelCalls) != null ? _a : true;
|
|
1327
|
-
}
|
|
1328
1562
|
async doEmbed({
|
|
1329
1563
|
values,
|
|
1330
1564
|
headers,
|
|
1331
|
-
abortSignal
|
|
1565
|
+
abortSignal,
|
|
1566
|
+
providerOptions
|
|
1332
1567
|
}) {
|
|
1568
|
+
var _a;
|
|
1333
1569
|
if (values.length > this.maxEmbeddingsPerCall) {
|
|
1334
1570
|
throw new import_provider5.TooManyEmbeddingValuesForCallError({
|
|
1335
1571
|
provider: this.provider,
|
|
@@ -1338,25 +1574,30 @@ var OpenAIEmbeddingModel = class {
|
|
|
1338
1574
|
values
|
|
1339
1575
|
});
|
|
1340
1576
|
}
|
|
1577
|
+
const openaiOptions = (_a = await (0, import_provider_utils7.parseProviderOptions)({
|
|
1578
|
+
provider: "openai",
|
|
1579
|
+
providerOptions,
|
|
1580
|
+
schema: openaiEmbeddingProviderOptions
|
|
1581
|
+
})) != null ? _a : {};
|
|
1341
1582
|
const {
|
|
1342
1583
|
responseHeaders,
|
|
1343
1584
|
value: response,
|
|
1344
1585
|
rawValue
|
|
1345
|
-
} = await (0,
|
|
1586
|
+
} = await (0, import_provider_utils7.postJsonToApi)({
|
|
1346
1587
|
url: this.config.url({
|
|
1347
1588
|
path: "/embeddings",
|
|
1348
1589
|
modelId: this.modelId
|
|
1349
1590
|
}),
|
|
1350
|
-
headers: (0,
|
|
1591
|
+
headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), headers),
|
|
1351
1592
|
body: {
|
|
1352
1593
|
model: this.modelId,
|
|
1353
1594
|
input: values,
|
|
1354
1595
|
encoding_format: "float",
|
|
1355
|
-
dimensions:
|
|
1356
|
-
user:
|
|
1596
|
+
dimensions: openaiOptions.dimensions,
|
|
1597
|
+
user: openaiOptions.user
|
|
1357
1598
|
},
|
|
1358
1599
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1359
|
-
successfulResponseHandler: (0,
|
|
1600
|
+
successfulResponseHandler: (0, import_provider_utils7.createJsonResponseHandler)(
|
|
1360
1601
|
openaiTextEmbeddingResponseSchema
|
|
1361
1602
|
),
|
|
1362
1603
|
abortSignal,
|
|
@@ -1369,32 +1610,33 @@ var OpenAIEmbeddingModel = class {
|
|
|
1369
1610
|
};
|
|
1370
1611
|
}
|
|
1371
1612
|
};
|
|
1372
|
-
var openaiTextEmbeddingResponseSchema =
|
|
1373
|
-
data:
|
|
1374
|
-
usage:
|
|
1613
|
+
var openaiTextEmbeddingResponseSchema = import_v49.z.object({
|
|
1614
|
+
data: import_v49.z.array(import_v49.z.object({ embedding: import_v49.z.array(import_v49.z.number()) })),
|
|
1615
|
+
usage: import_v49.z.object({ prompt_tokens: import_v49.z.number() }).nullish()
|
|
1375
1616
|
});
|
|
1376
1617
|
|
|
1377
1618
|
// src/openai-image-model.ts
|
|
1378
|
-
var
|
|
1379
|
-
var
|
|
1619
|
+
var import_provider_utils8 = require("@ai-sdk/provider-utils");
|
|
1620
|
+
var import_v410 = require("zod/v4");
|
|
1380
1621
|
|
|
1381
1622
|
// src/openai-image-settings.ts
|
|
1382
1623
|
var modelMaxImagesPerCall = {
|
|
1383
1624
|
"dall-e-3": 1,
|
|
1384
|
-
"dall-e-2": 10
|
|
1625
|
+
"dall-e-2": 10,
|
|
1626
|
+
"gpt-image-1": 10
|
|
1385
1627
|
};
|
|
1628
|
+
var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
|
|
1386
1629
|
|
|
1387
1630
|
// src/openai-image-model.ts
|
|
1388
1631
|
var OpenAIImageModel = class {
|
|
1389
|
-
constructor(modelId,
|
|
1632
|
+
constructor(modelId, config) {
|
|
1390
1633
|
this.modelId = modelId;
|
|
1391
|
-
this.settings = settings;
|
|
1392
1634
|
this.config = config;
|
|
1393
|
-
this.specificationVersion = "
|
|
1635
|
+
this.specificationVersion = "v2";
|
|
1394
1636
|
}
|
|
1395
1637
|
get maxImagesPerCall() {
|
|
1396
|
-
var _a
|
|
1397
|
-
return (
|
|
1638
|
+
var _a;
|
|
1639
|
+
return (_a = modelMaxImagesPerCall[this.modelId]) != null ? _a : 1;
|
|
1398
1640
|
}
|
|
1399
1641
|
get provider() {
|
|
1400
1642
|
return this.config.provider;
|
|
@@ -1422,22 +1664,22 @@ var OpenAIImageModel = class {
|
|
|
1422
1664
|
warnings.push({ type: "unsupported-setting", setting: "seed" });
|
|
1423
1665
|
}
|
|
1424
1666
|
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
1425
|
-
const { value: response, responseHeaders } = await (0,
|
|
1667
|
+
const { value: response, responseHeaders } = await (0, import_provider_utils8.postJsonToApi)({
|
|
1426
1668
|
url: this.config.url({
|
|
1427
1669
|
path: "/images/generations",
|
|
1428
1670
|
modelId: this.modelId
|
|
1429
1671
|
}),
|
|
1430
|
-
headers: (0,
|
|
1672
|
+
headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), headers),
|
|
1431
1673
|
body: {
|
|
1432
1674
|
model: this.modelId,
|
|
1433
1675
|
prompt,
|
|
1434
1676
|
n,
|
|
1435
1677
|
size,
|
|
1436
1678
|
...(_d = providerOptions.openai) != null ? _d : {},
|
|
1437
|
-
response_format: "b64_json"
|
|
1679
|
+
...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
|
|
1438
1680
|
},
|
|
1439
1681
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1440
|
-
successfulResponseHandler: (0,
|
|
1682
|
+
successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
|
|
1441
1683
|
openaiImageResponseSchema
|
|
1442
1684
|
),
|
|
1443
1685
|
abortSignal,
|
|
@@ -1450,24 +1692,57 @@ var OpenAIImageModel = class {
|
|
|
1450
1692
|
timestamp: currentDate,
|
|
1451
1693
|
modelId: this.modelId,
|
|
1452
1694
|
headers: responseHeaders
|
|
1695
|
+
},
|
|
1696
|
+
providerMetadata: {
|
|
1697
|
+
openai: {
|
|
1698
|
+
images: response.data.map(
|
|
1699
|
+
(item) => item.revised_prompt ? {
|
|
1700
|
+
revisedPrompt: item.revised_prompt
|
|
1701
|
+
} : null
|
|
1702
|
+
)
|
|
1703
|
+
}
|
|
1453
1704
|
}
|
|
1454
1705
|
};
|
|
1455
1706
|
}
|
|
1456
1707
|
};
|
|
1457
|
-
var openaiImageResponseSchema =
|
|
1458
|
-
data:
|
|
1708
|
+
var openaiImageResponseSchema = import_v410.z.object({
|
|
1709
|
+
data: import_v410.z.array(
|
|
1710
|
+
import_v410.z.object({ b64_json: import_v410.z.string(), revised_prompt: import_v410.z.string().optional() })
|
|
1711
|
+
)
|
|
1459
1712
|
});
|
|
1460
1713
|
|
|
1461
1714
|
// src/openai-transcription-model.ts
|
|
1462
|
-
var
|
|
1463
|
-
var
|
|
1464
|
-
|
|
1465
|
-
|
|
1466
|
-
|
|
1467
|
-
|
|
1468
|
-
|
|
1469
|
-
|
|
1715
|
+
var import_provider_utils9 = require("@ai-sdk/provider-utils");
|
|
1716
|
+
var import_v412 = require("zod/v4");
|
|
1717
|
+
|
|
1718
|
+
// src/openai-transcription-options.ts
|
|
1719
|
+
var import_v411 = require("zod/v4");
|
|
1720
|
+
var openAITranscriptionProviderOptions = import_v411.z.object({
|
|
1721
|
+
/**
|
|
1722
|
+
* Additional information to include in the transcription response.
|
|
1723
|
+
*/
|
|
1724
|
+
include: import_v411.z.array(import_v411.z.string()).optional(),
|
|
1725
|
+
/**
|
|
1726
|
+
* The language of the input audio in ISO-639-1 format.
|
|
1727
|
+
*/
|
|
1728
|
+
language: import_v411.z.string().optional(),
|
|
1729
|
+
/**
|
|
1730
|
+
* An optional text to guide the model's style or continue a previous audio segment.
|
|
1731
|
+
*/
|
|
1732
|
+
prompt: import_v411.z.string().optional(),
|
|
1733
|
+
/**
|
|
1734
|
+
* The sampling temperature, between 0 and 1.
|
|
1735
|
+
* @default 0
|
|
1736
|
+
*/
|
|
1737
|
+
temperature: import_v411.z.number().min(0).max(1).default(0).optional(),
|
|
1738
|
+
/**
|
|
1739
|
+
* The timestamp granularities to populate for this transcription.
|
|
1740
|
+
* @default ['segment']
|
|
1741
|
+
*/
|
|
1742
|
+
timestampGranularities: import_v411.z.array(import_v411.z.enum(["word", "segment"])).default(["segment"]).optional()
|
|
1470
1743
|
});
|
|
1744
|
+
|
|
1745
|
+
// src/openai-transcription-model.ts
|
|
1471
1746
|
var languageMap = {
|
|
1472
1747
|
afrikaans: "af",
|
|
1473
1748
|
arabic: "ar",
|
|
@@ -1531,38 +1806,36 @@ var OpenAITranscriptionModel = class {
|
|
|
1531
1806
|
constructor(modelId, config) {
|
|
1532
1807
|
this.modelId = modelId;
|
|
1533
1808
|
this.config = config;
|
|
1534
|
-
this.specificationVersion = "
|
|
1809
|
+
this.specificationVersion = "v2";
|
|
1535
1810
|
}
|
|
1536
1811
|
get provider() {
|
|
1537
1812
|
return this.config.provider;
|
|
1538
1813
|
}
|
|
1539
|
-
getArgs({
|
|
1814
|
+
async getArgs({
|
|
1540
1815
|
audio,
|
|
1541
1816
|
mediaType,
|
|
1542
1817
|
providerOptions
|
|
1543
1818
|
}) {
|
|
1544
|
-
var _a, _b, _c, _d, _e;
|
|
1545
1819
|
const warnings = [];
|
|
1546
|
-
const openAIOptions = (0,
|
|
1820
|
+
const openAIOptions = await (0, import_provider_utils9.parseProviderOptions)({
|
|
1547
1821
|
provider: "openai",
|
|
1548
1822
|
providerOptions,
|
|
1549
|
-
schema:
|
|
1823
|
+
schema: openAITranscriptionProviderOptions
|
|
1550
1824
|
});
|
|
1551
1825
|
const formData = new FormData();
|
|
1552
|
-
const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0,
|
|
1826
|
+
const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils9.convertBase64ToUint8Array)(audio)]);
|
|
1553
1827
|
formData.append("model", this.modelId);
|
|
1554
1828
|
formData.append("file", new File([blob], "audio", { type: mediaType }));
|
|
1555
1829
|
if (openAIOptions) {
|
|
1556
1830
|
const transcriptionModelOptions = {
|
|
1557
|
-
include:
|
|
1558
|
-
language:
|
|
1559
|
-
prompt:
|
|
1560
|
-
temperature:
|
|
1561
|
-
timestamp_granularities:
|
|
1831
|
+
include: openAIOptions.include,
|
|
1832
|
+
language: openAIOptions.language,
|
|
1833
|
+
prompt: openAIOptions.prompt,
|
|
1834
|
+
temperature: openAIOptions.temperature,
|
|
1835
|
+
timestamp_granularities: openAIOptions.timestampGranularities
|
|
1562
1836
|
};
|
|
1563
|
-
for (const key
|
|
1564
|
-
|
|
1565
|
-
if (value !== void 0) {
|
|
1837
|
+
for (const [key, value] of Object.entries(transcriptionModelOptions)) {
|
|
1838
|
+
if (value != null) {
|
|
1566
1839
|
formData.append(key, String(value));
|
|
1567
1840
|
}
|
|
1568
1841
|
}
|
|
@@ -1575,20 +1848,20 @@ var OpenAITranscriptionModel = class {
|
|
|
1575
1848
|
async doGenerate(options) {
|
|
1576
1849
|
var _a, _b, _c, _d, _e, _f;
|
|
1577
1850
|
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
1578
|
-
const { formData, warnings } = this.getArgs(options);
|
|
1851
|
+
const { formData, warnings } = await this.getArgs(options);
|
|
1579
1852
|
const {
|
|
1580
1853
|
value: response,
|
|
1581
1854
|
responseHeaders,
|
|
1582
1855
|
rawValue: rawResponse
|
|
1583
|
-
} = await (0,
|
|
1856
|
+
} = await (0, import_provider_utils9.postFormDataToApi)({
|
|
1584
1857
|
url: this.config.url({
|
|
1585
1858
|
path: "/audio/transcriptions",
|
|
1586
1859
|
modelId: this.modelId
|
|
1587
1860
|
}),
|
|
1588
|
-
headers: (0,
|
|
1861
|
+
headers: (0, import_provider_utils9.combineHeaders)(this.config.headers(), options.headers),
|
|
1589
1862
|
formData,
|
|
1590
1863
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1591
|
-
successfulResponseHandler: (0,
|
|
1864
|
+
successfulResponseHandler: (0, import_provider_utils9.createJsonResponseHandler)(
|
|
1592
1865
|
openaiTranscriptionResponseSchema
|
|
1593
1866
|
),
|
|
1594
1867
|
abortSignal: options.abortSignal,
|
|
@@ -1614,45 +1887,46 @@ var OpenAITranscriptionModel = class {
|
|
|
1614
1887
|
};
|
|
1615
1888
|
}
|
|
1616
1889
|
};
|
|
1617
|
-
var openaiTranscriptionResponseSchema =
|
|
1618
|
-
text:
|
|
1619
|
-
language:
|
|
1620
|
-
duration:
|
|
1621
|
-
words:
|
|
1622
|
-
|
|
1623
|
-
word:
|
|
1624
|
-
start:
|
|
1625
|
-
end:
|
|
1890
|
+
var openaiTranscriptionResponseSchema = import_v412.z.object({
|
|
1891
|
+
text: import_v412.z.string(),
|
|
1892
|
+
language: import_v412.z.string().nullish(),
|
|
1893
|
+
duration: import_v412.z.number().nullish(),
|
|
1894
|
+
words: import_v412.z.array(
|
|
1895
|
+
import_v412.z.object({
|
|
1896
|
+
word: import_v412.z.string(),
|
|
1897
|
+
start: import_v412.z.number(),
|
|
1898
|
+
end: import_v412.z.number()
|
|
1626
1899
|
})
|
|
1627
1900
|
).nullish()
|
|
1628
1901
|
});
|
|
1629
1902
|
|
|
1630
1903
|
// src/openai-speech-model.ts
|
|
1631
|
-
var
|
|
1632
|
-
var
|
|
1633
|
-
var OpenAIProviderOptionsSchema =
|
|
1634
|
-
instructions:
|
|
1635
|
-
speed:
|
|
1904
|
+
var import_provider_utils10 = require("@ai-sdk/provider-utils");
|
|
1905
|
+
var import_v413 = require("zod/v4");
|
|
1906
|
+
var OpenAIProviderOptionsSchema = import_v413.z.object({
|
|
1907
|
+
instructions: import_v413.z.string().nullish(),
|
|
1908
|
+
speed: import_v413.z.number().min(0.25).max(4).default(1).nullish()
|
|
1636
1909
|
});
|
|
1637
1910
|
var OpenAISpeechModel = class {
|
|
1638
1911
|
constructor(modelId, config) {
|
|
1639
1912
|
this.modelId = modelId;
|
|
1640
1913
|
this.config = config;
|
|
1641
|
-
this.specificationVersion = "
|
|
1914
|
+
this.specificationVersion = "v2";
|
|
1642
1915
|
}
|
|
1643
1916
|
get provider() {
|
|
1644
1917
|
return this.config.provider;
|
|
1645
1918
|
}
|
|
1646
|
-
getArgs({
|
|
1919
|
+
async getArgs({
|
|
1647
1920
|
text,
|
|
1648
1921
|
voice = "alloy",
|
|
1649
1922
|
outputFormat = "mp3",
|
|
1650
1923
|
speed,
|
|
1651
1924
|
instructions,
|
|
1925
|
+
language,
|
|
1652
1926
|
providerOptions
|
|
1653
1927
|
}) {
|
|
1654
1928
|
const warnings = [];
|
|
1655
|
-
const openAIOptions = (0,
|
|
1929
|
+
const openAIOptions = await (0, import_provider_utils10.parseProviderOptions)({
|
|
1656
1930
|
provider: "openai",
|
|
1657
1931
|
providerOptions,
|
|
1658
1932
|
schema: OpenAIProviderOptionsSchema
|
|
@@ -1685,6 +1959,13 @@ var OpenAISpeechModel = class {
|
|
|
1685
1959
|
}
|
|
1686
1960
|
}
|
|
1687
1961
|
}
|
|
1962
|
+
if (language) {
|
|
1963
|
+
warnings.push({
|
|
1964
|
+
type: "unsupported-setting",
|
|
1965
|
+
setting: "language",
|
|
1966
|
+
details: `OpenAI speech models do not support language selection. Language parameter "${language}" was ignored.`
|
|
1967
|
+
});
|
|
1968
|
+
}
|
|
1688
1969
|
return {
|
|
1689
1970
|
requestBody,
|
|
1690
1971
|
warnings
|
|
@@ -1693,20 +1974,20 @@ var OpenAISpeechModel = class {
|
|
|
1693
1974
|
async doGenerate(options) {
|
|
1694
1975
|
var _a, _b, _c;
|
|
1695
1976
|
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
1696
|
-
const { requestBody, warnings } = this.getArgs(options);
|
|
1977
|
+
const { requestBody, warnings } = await this.getArgs(options);
|
|
1697
1978
|
const {
|
|
1698
1979
|
value: audio,
|
|
1699
1980
|
responseHeaders,
|
|
1700
1981
|
rawValue: rawResponse
|
|
1701
|
-
} = await (0,
|
|
1982
|
+
} = await (0, import_provider_utils10.postJsonToApi)({
|
|
1702
1983
|
url: this.config.url({
|
|
1703
1984
|
path: "/audio/speech",
|
|
1704
1985
|
modelId: this.modelId
|
|
1705
1986
|
}),
|
|
1706
|
-
headers: (0,
|
|
1987
|
+
headers: (0, import_provider_utils10.combineHeaders)(this.config.headers(), options.headers),
|
|
1707
1988
|
body: requestBody,
|
|
1708
1989
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
1709
|
-
successfulResponseHandler: (0,
|
|
1990
|
+
successfulResponseHandler: (0, import_provider_utils10.createBinaryResponseHandler)(),
|
|
1710
1991
|
abortSignal: options.abortSignal,
|
|
1711
1992
|
fetch: this.config.fetch
|
|
1712
1993
|
});
|
|
@@ -1727,15 +2008,19 @@ var OpenAISpeechModel = class {
|
|
|
1727
2008
|
};
|
|
1728
2009
|
|
|
1729
2010
|
// src/responses/openai-responses-language-model.ts
|
|
1730
|
-
var
|
|
1731
|
-
var
|
|
2011
|
+
var import_provider8 = require("@ai-sdk/provider");
|
|
2012
|
+
var import_provider_utils12 = require("@ai-sdk/provider-utils");
|
|
2013
|
+
var import_v415 = require("zod/v4");
|
|
1732
2014
|
|
|
1733
2015
|
// src/responses/convert-to-openai-responses-messages.ts
|
|
1734
2016
|
var import_provider6 = require("@ai-sdk/provider");
|
|
1735
|
-
|
|
2017
|
+
var import_provider_utils11 = require("@ai-sdk/provider-utils");
|
|
2018
|
+
var import_v414 = require("zod/v4");
|
|
2019
|
+
async function convertToOpenAIResponsesMessages({
|
|
1736
2020
|
prompt,
|
|
1737
2021
|
systemMessageMode
|
|
1738
2022
|
}) {
|
|
2023
|
+
var _a, _b, _c, _d, _e, _f;
|
|
1739
2024
|
const messages = [];
|
|
1740
2025
|
const warnings = [];
|
|
1741
2026
|
for (const { role, content } of prompt) {
|
|
@@ -1770,7 +2055,7 @@ function convertToOpenAIResponsesMessages({
|
|
|
1770
2055
|
messages.push({
|
|
1771
2056
|
role: "user",
|
|
1772
2057
|
content: content.map((part, index) => {
|
|
1773
|
-
var
|
|
2058
|
+
var _a2, _b2, _c2;
|
|
1774
2059
|
switch (part.type) {
|
|
1775
2060
|
case "text": {
|
|
1776
2061
|
return { type: "input_text", text: part.text };
|
|
@@ -1782,7 +2067,7 @@ function convertToOpenAIResponsesMessages({
|
|
|
1782
2067
|
type: "input_image",
|
|
1783
2068
|
image_url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
|
|
1784
2069
|
// OpenAI specific extension: image detail
|
|
1785
|
-
detail: (
|
|
2070
|
+
detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
|
|
1786
2071
|
};
|
|
1787
2072
|
} else if (part.mediaType === "application/pdf") {
|
|
1788
2073
|
if (part.data instanceof URL) {
|
|
@@ -1792,7 +2077,7 @@ function convertToOpenAIResponsesMessages({
|
|
|
1792
2077
|
}
|
|
1793
2078
|
return {
|
|
1794
2079
|
type: "input_file",
|
|
1795
|
-
filename: (
|
|
2080
|
+
filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
|
|
1796
2081
|
file_data: `data:application/pdf;base64,${part.data}`
|
|
1797
2082
|
};
|
|
1798
2083
|
} else {
|
|
@@ -1807,34 +2092,97 @@ function convertToOpenAIResponsesMessages({
|
|
|
1807
2092
|
break;
|
|
1808
2093
|
}
|
|
1809
2094
|
case "assistant": {
|
|
2095
|
+
const reasoningMessages = {};
|
|
1810
2096
|
for (const part of content) {
|
|
1811
2097
|
switch (part.type) {
|
|
1812
2098
|
case "text": {
|
|
1813
2099
|
messages.push({
|
|
1814
2100
|
role: "assistant",
|
|
1815
|
-
content: [{ type: "output_text", text: part.text }]
|
|
2101
|
+
content: [{ type: "output_text", text: part.text }],
|
|
2102
|
+
id: (_c = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.itemId) != null ? _c : void 0
|
|
1816
2103
|
});
|
|
1817
2104
|
break;
|
|
1818
2105
|
}
|
|
1819
2106
|
case "tool-call": {
|
|
2107
|
+
if (part.providerExecuted) {
|
|
2108
|
+
break;
|
|
2109
|
+
}
|
|
1820
2110
|
messages.push({
|
|
1821
2111
|
type: "function_call",
|
|
1822
2112
|
call_id: part.toolCallId,
|
|
1823
2113
|
name: part.toolName,
|
|
1824
|
-
arguments: JSON.stringify(part.
|
|
2114
|
+
arguments: JSON.stringify(part.input),
|
|
2115
|
+
id: (_f = (_e = (_d = part.providerOptions) == null ? void 0 : _d.openai) == null ? void 0 : _e.itemId) != null ? _f : void 0
|
|
2116
|
+
});
|
|
2117
|
+
break;
|
|
2118
|
+
}
|
|
2119
|
+
case "tool-result": {
|
|
2120
|
+
warnings.push({
|
|
2121
|
+
type: "other",
|
|
2122
|
+
message: `tool result parts in assistant messages are not supported for OpenAI responses`
|
|
1825
2123
|
});
|
|
1826
2124
|
break;
|
|
1827
2125
|
}
|
|
2126
|
+
case "reasoning": {
|
|
2127
|
+
const providerOptions = await (0, import_provider_utils11.parseProviderOptions)({
|
|
2128
|
+
provider: "openai",
|
|
2129
|
+
providerOptions: part.providerOptions,
|
|
2130
|
+
schema: openaiResponsesReasoningProviderOptionsSchema
|
|
2131
|
+
});
|
|
2132
|
+
const reasoningId = providerOptions == null ? void 0 : providerOptions.itemId;
|
|
2133
|
+
if (reasoningId != null) {
|
|
2134
|
+
const existingReasoningMessage = reasoningMessages[reasoningId];
|
|
2135
|
+
const summaryParts = [];
|
|
2136
|
+
if (part.text.length > 0) {
|
|
2137
|
+
summaryParts.push({ type: "summary_text", text: part.text });
|
|
2138
|
+
} else if (existingReasoningMessage !== void 0) {
|
|
2139
|
+
warnings.push({
|
|
2140
|
+
type: "other",
|
|
2141
|
+
message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
|
|
2142
|
+
});
|
|
2143
|
+
}
|
|
2144
|
+
if (existingReasoningMessage === void 0) {
|
|
2145
|
+
reasoningMessages[reasoningId] = {
|
|
2146
|
+
type: "reasoning",
|
|
2147
|
+
id: reasoningId,
|
|
2148
|
+
encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
|
|
2149
|
+
summary: summaryParts
|
|
2150
|
+
};
|
|
2151
|
+
messages.push(reasoningMessages[reasoningId]);
|
|
2152
|
+
} else {
|
|
2153
|
+
existingReasoningMessage.summary.push(...summaryParts);
|
|
2154
|
+
}
|
|
2155
|
+
} else {
|
|
2156
|
+
warnings.push({
|
|
2157
|
+
type: "other",
|
|
2158
|
+
message: `Non-OpenAI reasoning parts are not supported. Skipping reasoning part: ${JSON.stringify(part)}.`
|
|
2159
|
+
});
|
|
2160
|
+
}
|
|
2161
|
+
break;
|
|
2162
|
+
}
|
|
1828
2163
|
}
|
|
1829
2164
|
}
|
|
1830
2165
|
break;
|
|
1831
2166
|
}
|
|
1832
2167
|
case "tool": {
|
|
1833
2168
|
for (const part of content) {
|
|
2169
|
+
const output = part.output;
|
|
2170
|
+
let contentValue;
|
|
2171
|
+
switch (output.type) {
|
|
2172
|
+
case "text":
|
|
2173
|
+
case "error-text":
|
|
2174
|
+
contentValue = output.value;
|
|
2175
|
+
break;
|
|
2176
|
+
case "content":
|
|
2177
|
+
case "json":
|
|
2178
|
+
case "error-json":
|
|
2179
|
+
contentValue = JSON.stringify(output.value);
|
|
2180
|
+
break;
|
|
2181
|
+
}
|
|
1834
2182
|
messages.push({
|
|
1835
2183
|
type: "function_call_output",
|
|
1836
2184
|
call_id: part.toolCallId,
|
|
1837
|
-
output:
|
|
2185
|
+
output: contentValue
|
|
1838
2186
|
});
|
|
1839
2187
|
}
|
|
1840
2188
|
break;
|
|
@@ -1847,6 +2195,10 @@ function convertToOpenAIResponsesMessages({
|
|
|
1847
2195
|
}
|
|
1848
2196
|
return { messages, warnings };
|
|
1849
2197
|
}
|
|
2198
|
+
var openaiResponsesReasoningProviderOptionsSchema = import_v414.z.object({
|
|
2199
|
+
itemId: import_v414.z.string().nullish(),
|
|
2200
|
+
reasoningEncryptedContent: import_v414.z.string().nullish()
|
|
2201
|
+
});
|
|
1850
2202
|
|
|
1851
2203
|
// src/responses/map-openai-responses-finish-reason.ts
|
|
1852
2204
|
function mapOpenAIResponseFinishReason({
|
|
@@ -1871,7 +2223,7 @@ var import_provider7 = require("@ai-sdk/provider");
|
|
|
1871
2223
|
function prepareResponsesTools({
|
|
1872
2224
|
tools,
|
|
1873
2225
|
toolChoice,
|
|
1874
|
-
|
|
2226
|
+
strictJsonSchema
|
|
1875
2227
|
}) {
|
|
1876
2228
|
tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
|
|
1877
2229
|
const toolWarnings = [];
|
|
@@ -1886,12 +2238,23 @@ function prepareResponsesTools({
|
|
|
1886
2238
|
type: "function",
|
|
1887
2239
|
name: tool.name,
|
|
1888
2240
|
description: tool.description,
|
|
1889
|
-
parameters: tool.
|
|
1890
|
-
strict:
|
|
2241
|
+
parameters: tool.inputSchema,
|
|
2242
|
+
strict: strictJsonSchema
|
|
1891
2243
|
});
|
|
1892
2244
|
break;
|
|
1893
2245
|
case "provider-defined":
|
|
1894
2246
|
switch (tool.id) {
|
|
2247
|
+
case "openai.file_search": {
|
|
2248
|
+
const args = fileSearchArgsSchema.parse(tool.args);
|
|
2249
|
+
openaiTools.push({
|
|
2250
|
+
type: "file_search",
|
|
2251
|
+
vector_store_ids: args.vectorStoreIds,
|
|
2252
|
+
max_num_results: args.maxNumResults,
|
|
2253
|
+
ranking_options: args.ranking ? { ranker: args.ranking.ranker } : void 0,
|
|
2254
|
+
filters: args.filters
|
|
2255
|
+
});
|
|
2256
|
+
break;
|
|
2257
|
+
}
|
|
1895
2258
|
case "openai.web_search_preview":
|
|
1896
2259
|
openaiTools.push({
|
|
1897
2260
|
type: "web_search_preview",
|
|
@@ -1921,7 +2284,7 @@ function prepareResponsesTools({
|
|
|
1921
2284
|
case "tool":
|
|
1922
2285
|
return {
|
|
1923
2286
|
tools: openaiTools,
|
|
1924
|
-
toolChoice: toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
|
|
2287
|
+
toolChoice: toolChoice.toolName === "file_search" ? { type: "file_search" } : toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
|
|
1925
2288
|
toolWarnings
|
|
1926
2289
|
};
|
|
1927
2290
|
default: {
|
|
@@ -1937,14 +2300,16 @@ function prepareResponsesTools({
|
|
|
1937
2300
|
var OpenAIResponsesLanguageModel = class {
|
|
1938
2301
|
constructor(modelId, config) {
|
|
1939
2302
|
this.specificationVersion = "v2";
|
|
1940
|
-
this.
|
|
2303
|
+
this.supportedUrls = {
|
|
2304
|
+
"image/*": [/^https?:\/\/.*$/]
|
|
2305
|
+
};
|
|
1941
2306
|
this.modelId = modelId;
|
|
1942
2307
|
this.config = config;
|
|
1943
2308
|
}
|
|
1944
2309
|
get provider() {
|
|
1945
2310
|
return this.config.provider;
|
|
1946
2311
|
}
|
|
1947
|
-
getArgs({
|
|
2312
|
+
async getArgs({
|
|
1948
2313
|
maxOutputTokens,
|
|
1949
2314
|
temperature,
|
|
1950
2315
|
stopSequences,
|
|
@@ -1983,17 +2348,17 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
1983
2348
|
if (stopSequences != null) {
|
|
1984
2349
|
warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
|
|
1985
2350
|
}
|
|
1986
|
-
const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
|
|
2351
|
+
const { messages, warnings: messageWarnings } = await convertToOpenAIResponsesMessages({
|
|
1987
2352
|
prompt,
|
|
1988
2353
|
systemMessageMode: modelConfig.systemMessageMode
|
|
1989
2354
|
});
|
|
1990
2355
|
warnings.push(...messageWarnings);
|
|
1991
|
-
const openaiOptions = (0,
|
|
2356
|
+
const openaiOptions = await (0, import_provider_utils12.parseProviderOptions)({
|
|
1992
2357
|
provider: "openai",
|
|
1993
2358
|
providerOptions,
|
|
1994
2359
|
schema: openaiResponsesProviderOptionsSchema
|
|
1995
2360
|
});
|
|
1996
|
-
const
|
|
2361
|
+
const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
|
|
1997
2362
|
const baseArgs = {
|
|
1998
2363
|
model: this.modelId,
|
|
1999
2364
|
input: messages,
|
|
@@ -2004,7 +2369,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2004
2369
|
text: {
|
|
2005
2370
|
format: responseFormat.schema != null ? {
|
|
2006
2371
|
type: "json_schema",
|
|
2007
|
-
strict:
|
|
2372
|
+
strict: strictJsonSchema,
|
|
2008
2373
|
name: (_b = responseFormat.name) != null ? _b : "response",
|
|
2009
2374
|
description: responseFormat.description,
|
|
2010
2375
|
schema: responseFormat.schema
|
|
@@ -2018,9 +2383,18 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2018
2383
|
store: openaiOptions == null ? void 0 : openaiOptions.store,
|
|
2019
2384
|
user: openaiOptions == null ? void 0 : openaiOptions.user,
|
|
2020
2385
|
instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
|
|
2386
|
+
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
2387
|
+
include: openaiOptions == null ? void 0 : openaiOptions.include,
|
|
2021
2388
|
// model-specific settings:
|
|
2022
|
-
...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
|
|
2023
|
-
reasoning: {
|
|
2389
|
+
...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
2390
|
+
reasoning: {
|
|
2391
|
+
...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
|
|
2392
|
+
effort: openaiOptions.reasoningEffort
|
|
2393
|
+
},
|
|
2394
|
+
...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
|
|
2395
|
+
summary: openaiOptions.reasoningSummary
|
|
2396
|
+
}
|
|
2397
|
+
}
|
|
2024
2398
|
},
|
|
2025
2399
|
...modelConfig.requiredAutoTruncation && {
|
|
2026
2400
|
truncation: "auto"
|
|
@@ -2043,6 +2417,37 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2043
2417
|
details: "topP is not supported for reasoning models"
|
|
2044
2418
|
});
|
|
2045
2419
|
}
|
|
2420
|
+
} else {
|
|
2421
|
+
if ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null) {
|
|
2422
|
+
warnings.push({
|
|
2423
|
+
type: "unsupported-setting",
|
|
2424
|
+
setting: "reasoningEffort",
|
|
2425
|
+
details: "reasoningEffort is not supported for non-reasoning models"
|
|
2426
|
+
});
|
|
2427
|
+
}
|
|
2428
|
+
if ((openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) {
|
|
2429
|
+
warnings.push({
|
|
2430
|
+
type: "unsupported-setting",
|
|
2431
|
+
setting: "reasoningSummary",
|
|
2432
|
+
details: "reasoningSummary is not supported for non-reasoning models"
|
|
2433
|
+
});
|
|
2434
|
+
}
|
|
2435
|
+
}
|
|
2436
|
+
if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !supportsFlexProcessing2(this.modelId)) {
|
|
2437
|
+
warnings.push({
|
|
2438
|
+
type: "unsupported-setting",
|
|
2439
|
+
setting: "serviceTier",
|
|
2440
|
+
details: "flex processing is only available for o3 and o4-mini models"
|
|
2441
|
+
});
|
|
2442
|
+
delete baseArgs.service_tier;
|
|
2443
|
+
}
|
|
2444
|
+
if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "priority" && !supportsPriorityProcessing2(this.modelId)) {
|
|
2445
|
+
warnings.push({
|
|
2446
|
+
type: "unsupported-setting",
|
|
2447
|
+
setting: "serviceTier",
|
|
2448
|
+
details: "priority processing is only available for supported models (GPT-4, o3, o4-mini) and requires Enterprise access"
|
|
2449
|
+
});
|
|
2450
|
+
delete baseArgs.service_tier;
|
|
2046
2451
|
}
|
|
2047
2452
|
const {
|
|
2048
2453
|
tools: openaiTools,
|
|
@@ -2051,7 +2456,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2051
2456
|
} = prepareResponsesTools({
|
|
2052
2457
|
tools,
|
|
2053
2458
|
toolChoice,
|
|
2054
|
-
|
|
2459
|
+
strictJsonSchema
|
|
2055
2460
|
});
|
|
2056
2461
|
return {
|
|
2057
2462
|
args: {
|
|
@@ -2063,84 +2468,137 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2063
2468
|
};
|
|
2064
2469
|
}
|
|
2065
2470
|
async doGenerate(options) {
|
|
2066
|
-
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
2067
|
-
const { args: body, warnings } = this.getArgs(options);
|
|
2471
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
2472
|
+
const { args: body, warnings } = await this.getArgs(options);
|
|
2473
|
+
const url = this.config.url({
|
|
2474
|
+
path: "/responses",
|
|
2475
|
+
modelId: this.modelId
|
|
2476
|
+
});
|
|
2068
2477
|
const {
|
|
2069
2478
|
responseHeaders,
|
|
2070
2479
|
value: response,
|
|
2071
2480
|
rawValue: rawResponse
|
|
2072
|
-
} = await (0,
|
|
2073
|
-
url
|
|
2074
|
-
|
|
2075
|
-
modelId: this.modelId
|
|
2076
|
-
}),
|
|
2077
|
-
headers: (0, import_provider_utils9.combineHeaders)(this.config.headers(), options.headers),
|
|
2481
|
+
} = await (0, import_provider_utils12.postJsonToApi)({
|
|
2482
|
+
url,
|
|
2483
|
+
headers: (0, import_provider_utils12.combineHeaders)(this.config.headers(), options.headers),
|
|
2078
2484
|
body,
|
|
2079
2485
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
2080
|
-
successfulResponseHandler: (0,
|
|
2081
|
-
|
|
2082
|
-
id:
|
|
2083
|
-
created_at:
|
|
2084
|
-
|
|
2085
|
-
|
|
2086
|
-
|
|
2087
|
-
|
|
2088
|
-
|
|
2089
|
-
|
|
2090
|
-
|
|
2091
|
-
|
|
2092
|
-
|
|
2093
|
-
|
|
2094
|
-
|
|
2095
|
-
|
|
2096
|
-
|
|
2097
|
-
|
|
2098
|
-
|
|
2099
|
-
|
|
2100
|
-
|
|
2486
|
+
successfulResponseHandler: (0, import_provider_utils12.createJsonResponseHandler)(
|
|
2487
|
+
import_v415.z.object({
|
|
2488
|
+
id: import_v415.z.string(),
|
|
2489
|
+
created_at: import_v415.z.number(),
|
|
2490
|
+
error: import_v415.z.object({
|
|
2491
|
+
code: import_v415.z.string(),
|
|
2492
|
+
message: import_v415.z.string()
|
|
2493
|
+
}).nullish(),
|
|
2494
|
+
model: import_v415.z.string(),
|
|
2495
|
+
output: import_v415.z.array(
|
|
2496
|
+
import_v415.z.discriminatedUnion("type", [
|
|
2497
|
+
import_v415.z.object({
|
|
2498
|
+
type: import_v415.z.literal("message"),
|
|
2499
|
+
role: import_v415.z.literal("assistant"),
|
|
2500
|
+
id: import_v415.z.string(),
|
|
2501
|
+
content: import_v415.z.array(
|
|
2502
|
+
import_v415.z.object({
|
|
2503
|
+
type: import_v415.z.literal("output_text"),
|
|
2504
|
+
text: import_v415.z.string(),
|
|
2505
|
+
annotations: import_v415.z.array(
|
|
2506
|
+
import_v415.z.object({
|
|
2507
|
+
type: import_v415.z.literal("url_citation"),
|
|
2508
|
+
start_index: import_v415.z.number(),
|
|
2509
|
+
end_index: import_v415.z.number(),
|
|
2510
|
+
url: import_v415.z.string(),
|
|
2511
|
+
title: import_v415.z.string()
|
|
2101
2512
|
})
|
|
2102
2513
|
)
|
|
2103
2514
|
})
|
|
2104
2515
|
)
|
|
2105
2516
|
}),
|
|
2106
|
-
|
|
2107
|
-
type:
|
|
2108
|
-
call_id:
|
|
2109
|
-
name:
|
|
2110
|
-
arguments:
|
|
2517
|
+
import_v415.z.object({
|
|
2518
|
+
type: import_v415.z.literal("function_call"),
|
|
2519
|
+
call_id: import_v415.z.string(),
|
|
2520
|
+
name: import_v415.z.string(),
|
|
2521
|
+
arguments: import_v415.z.string(),
|
|
2522
|
+
id: import_v415.z.string()
|
|
2111
2523
|
}),
|
|
2112
|
-
|
|
2113
|
-
type:
|
|
2524
|
+
import_v415.z.object({
|
|
2525
|
+
type: import_v415.z.literal("web_search_call"),
|
|
2526
|
+
id: import_v415.z.string(),
|
|
2527
|
+
status: import_v415.z.string().optional()
|
|
2114
2528
|
}),
|
|
2115
|
-
|
|
2116
|
-
type:
|
|
2529
|
+
import_v415.z.object({
|
|
2530
|
+
type: import_v415.z.literal("computer_call"),
|
|
2531
|
+
id: import_v415.z.string(),
|
|
2532
|
+
status: import_v415.z.string().optional()
|
|
2117
2533
|
}),
|
|
2118
|
-
|
|
2119
|
-
type:
|
|
2534
|
+
import_v415.z.object({
|
|
2535
|
+
type: import_v415.z.literal("reasoning"),
|
|
2536
|
+
id: import_v415.z.string(),
|
|
2537
|
+
encrypted_content: import_v415.z.string().nullish(),
|
|
2538
|
+
summary: import_v415.z.array(
|
|
2539
|
+
import_v415.z.object({
|
|
2540
|
+
type: import_v415.z.literal("summary_text"),
|
|
2541
|
+
text: import_v415.z.string()
|
|
2542
|
+
})
|
|
2543
|
+
)
|
|
2120
2544
|
})
|
|
2121
2545
|
])
|
|
2122
2546
|
),
|
|
2123
|
-
incomplete_details:
|
|
2124
|
-
usage:
|
|
2547
|
+
incomplete_details: import_v415.z.object({ reason: import_v415.z.string() }).nullable(),
|
|
2548
|
+
usage: usageSchema2
|
|
2125
2549
|
})
|
|
2126
2550
|
),
|
|
2127
2551
|
abortSignal: options.abortSignal,
|
|
2128
2552
|
fetch: this.config.fetch
|
|
2129
2553
|
});
|
|
2554
|
+
if (response.error) {
|
|
2555
|
+
throw new import_provider8.APICallError({
|
|
2556
|
+
message: response.error.message,
|
|
2557
|
+
url,
|
|
2558
|
+
requestBodyValues: body,
|
|
2559
|
+
statusCode: 400,
|
|
2560
|
+
responseHeaders,
|
|
2561
|
+
responseBody: rawResponse,
|
|
2562
|
+
isRetryable: false
|
|
2563
|
+
});
|
|
2564
|
+
}
|
|
2130
2565
|
const content = [];
|
|
2131
2566
|
for (const part of response.output) {
|
|
2132
2567
|
switch (part.type) {
|
|
2568
|
+
case "reasoning": {
|
|
2569
|
+
if (part.summary.length === 0) {
|
|
2570
|
+
part.summary.push({ type: "summary_text", text: "" });
|
|
2571
|
+
}
|
|
2572
|
+
for (const summary of part.summary) {
|
|
2573
|
+
content.push({
|
|
2574
|
+
type: "reasoning",
|
|
2575
|
+
text: summary.text,
|
|
2576
|
+
providerMetadata: {
|
|
2577
|
+
openai: {
|
|
2578
|
+
itemId: part.id,
|
|
2579
|
+
reasoningEncryptedContent: (_a = part.encrypted_content) != null ? _a : null
|
|
2580
|
+
}
|
|
2581
|
+
}
|
|
2582
|
+
});
|
|
2583
|
+
}
|
|
2584
|
+
break;
|
|
2585
|
+
}
|
|
2133
2586
|
case "message": {
|
|
2134
2587
|
for (const contentPart of part.content) {
|
|
2135
2588
|
content.push({
|
|
2136
2589
|
type: "text",
|
|
2137
|
-
text: contentPart.text
|
|
2590
|
+
text: contentPart.text,
|
|
2591
|
+
providerMetadata: {
|
|
2592
|
+
openai: {
|
|
2593
|
+
itemId: part.id
|
|
2594
|
+
}
|
|
2595
|
+
}
|
|
2138
2596
|
});
|
|
2139
2597
|
for (const annotation of contentPart.annotations) {
|
|
2140
2598
|
content.push({
|
|
2141
2599
|
type: "source",
|
|
2142
2600
|
sourceType: "url",
|
|
2143
|
-
id: (
|
|
2601
|
+
id: (_d = (_c = (_b = this.config).generateId) == null ? void 0 : _c.call(_b)) != null ? _d : (0, import_provider_utils12.generateId)(),
|
|
2144
2602
|
url: annotation.url,
|
|
2145
2603
|
title: annotation.title
|
|
2146
2604
|
});
|
|
@@ -2151,10 +2609,51 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2151
2609
|
case "function_call": {
|
|
2152
2610
|
content.push({
|
|
2153
2611
|
type: "tool-call",
|
|
2154
|
-
toolCallType: "function",
|
|
2155
2612
|
toolCallId: part.call_id,
|
|
2156
2613
|
toolName: part.name,
|
|
2157
|
-
|
|
2614
|
+
input: part.arguments,
|
|
2615
|
+
providerMetadata: {
|
|
2616
|
+
openai: {
|
|
2617
|
+
itemId: part.id
|
|
2618
|
+
}
|
|
2619
|
+
}
|
|
2620
|
+
});
|
|
2621
|
+
break;
|
|
2622
|
+
}
|
|
2623
|
+
case "web_search_call": {
|
|
2624
|
+
content.push({
|
|
2625
|
+
type: "tool-call",
|
|
2626
|
+
toolCallId: part.id,
|
|
2627
|
+
toolName: "web_search_preview",
|
|
2628
|
+
input: "",
|
|
2629
|
+
providerExecuted: true
|
|
2630
|
+
});
|
|
2631
|
+
content.push({
|
|
2632
|
+
type: "tool-result",
|
|
2633
|
+
toolCallId: part.id,
|
|
2634
|
+
toolName: "web_search_preview",
|
|
2635
|
+
result: { status: part.status || "completed" },
|
|
2636
|
+
providerExecuted: true
|
|
2637
|
+
});
|
|
2638
|
+
break;
|
|
2639
|
+
}
|
|
2640
|
+
case "computer_call": {
|
|
2641
|
+
content.push({
|
|
2642
|
+
type: "tool-call",
|
|
2643
|
+
toolCallId: part.id,
|
|
2644
|
+
toolName: "computer_use",
|
|
2645
|
+
input: "",
|
|
2646
|
+
providerExecuted: true
|
|
2647
|
+
});
|
|
2648
|
+
content.push({
|
|
2649
|
+
type: "tool-result",
|
|
2650
|
+
toolCallId: part.id,
|
|
2651
|
+
toolName: "computer_use",
|
|
2652
|
+
result: {
|
|
2653
|
+
type: "computer_use_tool_result",
|
|
2654
|
+
status: part.status || "completed"
|
|
2655
|
+
},
|
|
2656
|
+
providerExecuted: true
|
|
2158
2657
|
});
|
|
2159
2658
|
break;
|
|
2160
2659
|
}
|
|
@@ -2163,12 +2662,15 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2163
2662
|
return {
|
|
2164
2663
|
content,
|
|
2165
2664
|
finishReason: mapOpenAIResponseFinishReason({
|
|
2166
|
-
finishReason: (
|
|
2665
|
+
finishReason: (_e = response.incomplete_details) == null ? void 0 : _e.reason,
|
|
2167
2666
|
hasToolCalls: content.some((part) => part.type === "tool-call")
|
|
2168
2667
|
}),
|
|
2169
2668
|
usage: {
|
|
2170
2669
|
inputTokens: response.usage.input_tokens,
|
|
2171
|
-
outputTokens: response.usage.output_tokens
|
|
2670
|
+
outputTokens: response.usage.output_tokens,
|
|
2671
|
+
totalTokens: response.usage.input_tokens + response.usage.output_tokens,
|
|
2672
|
+
reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : void 0,
|
|
2673
|
+
cachedInputTokens: (_i = (_h = response.usage.input_tokens_details) == null ? void 0 : _h.cached_tokens) != null ? _i : void 0
|
|
2172
2674
|
},
|
|
2173
2675
|
request: { body },
|
|
2174
2676
|
response: {
|
|
@@ -2180,28 +2682,26 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2180
2682
|
},
|
|
2181
2683
|
providerMetadata: {
|
|
2182
2684
|
openai: {
|
|
2183
|
-
responseId: response.id
|
|
2184
|
-
cachedPromptTokens: (_f = (_e = response.usage.input_tokens_details) == null ? void 0 : _e.cached_tokens) != null ? _f : null,
|
|
2185
|
-
reasoningTokens: (_h = (_g = response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : null
|
|
2685
|
+
responseId: response.id
|
|
2186
2686
|
}
|
|
2187
2687
|
},
|
|
2188
2688
|
warnings
|
|
2189
2689
|
};
|
|
2190
2690
|
}
|
|
2191
2691
|
async doStream(options) {
|
|
2192
|
-
const { args: body, warnings } = this.getArgs(options);
|
|
2193
|
-
const { responseHeaders, value: response } = await (0,
|
|
2692
|
+
const { args: body, warnings } = await this.getArgs(options);
|
|
2693
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils12.postJsonToApi)({
|
|
2194
2694
|
url: this.config.url({
|
|
2195
2695
|
path: "/responses",
|
|
2196
2696
|
modelId: this.modelId
|
|
2197
2697
|
}),
|
|
2198
|
-
headers: (0,
|
|
2698
|
+
headers: (0, import_provider_utils12.combineHeaders)(this.config.headers(), options.headers),
|
|
2199
2699
|
body: {
|
|
2200
2700
|
...body,
|
|
2201
2701
|
stream: true
|
|
2202
2702
|
},
|
|
2203
2703
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
2204
|
-
successfulResponseHandler: (0,
|
|
2704
|
+
successfulResponseHandler: (0, import_provider_utils12.createEventSourceResponseHandler)(
|
|
2205
2705
|
openaiResponsesChunkSchema
|
|
2206
2706
|
),
|
|
2207
2707
|
abortSignal: options.abortSignal,
|
|
@@ -2211,13 +2711,13 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2211
2711
|
let finishReason = "unknown";
|
|
2212
2712
|
const usage = {
|
|
2213
2713
|
inputTokens: void 0,
|
|
2214
|
-
outputTokens: void 0
|
|
2714
|
+
outputTokens: void 0,
|
|
2715
|
+
totalTokens: void 0
|
|
2215
2716
|
};
|
|
2216
|
-
let cachedPromptTokens = null;
|
|
2217
|
-
let reasoningTokens = null;
|
|
2218
2717
|
let responseId = null;
|
|
2219
2718
|
const ongoingToolCalls = {};
|
|
2220
2719
|
let hasToolCalls = false;
|
|
2720
|
+
const activeReasoning = {};
|
|
2221
2721
|
return {
|
|
2222
2722
|
stream: response.pipeThrough(
|
|
2223
2723
|
new TransformStream({
|
|
@@ -2225,7 +2725,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2225
2725
|
controller.enqueue({ type: "stream-start", warnings });
|
|
2226
2726
|
},
|
|
2227
2727
|
transform(chunk, controller) {
|
|
2228
|
-
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
2728
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
|
|
2729
|
+
if (options.includeRawChunks) {
|
|
2730
|
+
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
2731
|
+
}
|
|
2229
2732
|
if (!chunk.success) {
|
|
2230
2733
|
finishReason = "error";
|
|
2231
2734
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
@@ -2239,22 +2742,151 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2239
2742
|
toolCallId: value.item.call_id
|
|
2240
2743
|
};
|
|
2241
2744
|
controller.enqueue({
|
|
2242
|
-
type: "tool-
|
|
2243
|
-
|
|
2745
|
+
type: "tool-input-start",
|
|
2746
|
+
id: value.item.call_id,
|
|
2747
|
+
toolName: value.item.name
|
|
2748
|
+
});
|
|
2749
|
+
} else if (value.item.type === "web_search_call") {
|
|
2750
|
+
ongoingToolCalls[value.output_index] = {
|
|
2751
|
+
toolName: "web_search_preview",
|
|
2752
|
+
toolCallId: value.item.id
|
|
2753
|
+
};
|
|
2754
|
+
controller.enqueue({
|
|
2755
|
+
type: "tool-input-start",
|
|
2756
|
+
id: value.item.id,
|
|
2757
|
+
toolName: "web_search_preview"
|
|
2758
|
+
});
|
|
2759
|
+
} else if (value.item.type === "computer_call") {
|
|
2760
|
+
ongoingToolCalls[value.output_index] = {
|
|
2761
|
+
toolName: "computer_use",
|
|
2762
|
+
toolCallId: value.item.id
|
|
2763
|
+
};
|
|
2764
|
+
controller.enqueue({
|
|
2765
|
+
type: "tool-input-start",
|
|
2766
|
+
id: value.item.id,
|
|
2767
|
+
toolName: "computer_use"
|
|
2768
|
+
});
|
|
2769
|
+
} else if (value.item.type === "message") {
|
|
2770
|
+
controller.enqueue({
|
|
2771
|
+
type: "text-start",
|
|
2772
|
+
id: value.item.id,
|
|
2773
|
+
providerMetadata: {
|
|
2774
|
+
openai: {
|
|
2775
|
+
itemId: value.item.id
|
|
2776
|
+
}
|
|
2777
|
+
}
|
|
2778
|
+
});
|
|
2779
|
+
} else if (isResponseOutputItemAddedReasoningChunk(value)) {
|
|
2780
|
+
activeReasoning[value.item.id] = {
|
|
2781
|
+
encryptedContent: value.item.encrypted_content,
|
|
2782
|
+
summaryParts: [0]
|
|
2783
|
+
};
|
|
2784
|
+
controller.enqueue({
|
|
2785
|
+
type: "reasoning-start",
|
|
2786
|
+
id: `${value.item.id}:0`,
|
|
2787
|
+
providerMetadata: {
|
|
2788
|
+
openai: {
|
|
2789
|
+
itemId: value.item.id,
|
|
2790
|
+
reasoningEncryptedContent: (_a = value.item.encrypted_content) != null ? _a : null
|
|
2791
|
+
}
|
|
2792
|
+
}
|
|
2793
|
+
});
|
|
2794
|
+
}
|
|
2795
|
+
} else if (isResponseOutputItemDoneChunk(value)) {
|
|
2796
|
+
if (value.item.type === "function_call") {
|
|
2797
|
+
ongoingToolCalls[value.output_index] = void 0;
|
|
2798
|
+
hasToolCalls = true;
|
|
2799
|
+
controller.enqueue({
|
|
2800
|
+
type: "tool-input-end",
|
|
2801
|
+
id: value.item.call_id
|
|
2802
|
+
});
|
|
2803
|
+
controller.enqueue({
|
|
2804
|
+
type: "tool-call",
|
|
2244
2805
|
toolCallId: value.item.call_id,
|
|
2245
2806
|
toolName: value.item.name,
|
|
2246
|
-
|
|
2807
|
+
input: value.item.arguments,
|
|
2808
|
+
providerMetadata: {
|
|
2809
|
+
openai: {
|
|
2810
|
+
itemId: value.item.id
|
|
2811
|
+
}
|
|
2812
|
+
}
|
|
2813
|
+
});
|
|
2814
|
+
} else if (value.item.type === "web_search_call") {
|
|
2815
|
+
ongoingToolCalls[value.output_index] = void 0;
|
|
2816
|
+
hasToolCalls = true;
|
|
2817
|
+
controller.enqueue({
|
|
2818
|
+
type: "tool-input-end",
|
|
2819
|
+
id: value.item.id
|
|
2820
|
+
});
|
|
2821
|
+
controller.enqueue({
|
|
2822
|
+
type: "tool-call",
|
|
2823
|
+
toolCallId: value.item.id,
|
|
2824
|
+
toolName: "web_search_preview",
|
|
2825
|
+
input: "",
|
|
2826
|
+
providerExecuted: true
|
|
2827
|
+
});
|
|
2828
|
+
controller.enqueue({
|
|
2829
|
+
type: "tool-result",
|
|
2830
|
+
toolCallId: value.item.id,
|
|
2831
|
+
toolName: "web_search_preview",
|
|
2832
|
+
result: {
|
|
2833
|
+
type: "web_search_tool_result",
|
|
2834
|
+
status: value.item.status || "completed"
|
|
2835
|
+
},
|
|
2836
|
+
providerExecuted: true
|
|
2837
|
+
});
|
|
2838
|
+
} else if (value.item.type === "computer_call") {
|
|
2839
|
+
ongoingToolCalls[value.output_index] = void 0;
|
|
2840
|
+
hasToolCalls = true;
|
|
2841
|
+
controller.enqueue({
|
|
2842
|
+
type: "tool-input-end",
|
|
2843
|
+
id: value.item.id
|
|
2844
|
+
});
|
|
2845
|
+
controller.enqueue({
|
|
2846
|
+
type: "tool-call",
|
|
2847
|
+
toolCallId: value.item.id,
|
|
2848
|
+
toolName: "computer_use",
|
|
2849
|
+
input: "",
|
|
2850
|
+
providerExecuted: true
|
|
2851
|
+
});
|
|
2852
|
+
controller.enqueue({
|
|
2853
|
+
type: "tool-result",
|
|
2854
|
+
toolCallId: value.item.id,
|
|
2855
|
+
toolName: "computer_use",
|
|
2856
|
+
result: {
|
|
2857
|
+
type: "computer_use_tool_result",
|
|
2858
|
+
status: value.item.status || "completed"
|
|
2859
|
+
},
|
|
2860
|
+
providerExecuted: true
|
|
2861
|
+
});
|
|
2862
|
+
} else if (value.item.type === "message") {
|
|
2863
|
+
controller.enqueue({
|
|
2864
|
+
type: "text-end",
|
|
2865
|
+
id: value.item.id
|
|
2247
2866
|
});
|
|
2867
|
+
} else if (isResponseOutputItemDoneReasoningChunk(value)) {
|
|
2868
|
+
const activeReasoningPart = activeReasoning[value.item.id];
|
|
2869
|
+
for (const summaryIndex of activeReasoningPart.summaryParts) {
|
|
2870
|
+
controller.enqueue({
|
|
2871
|
+
type: "reasoning-end",
|
|
2872
|
+
id: `${value.item.id}:${summaryIndex}`,
|
|
2873
|
+
providerMetadata: {
|
|
2874
|
+
openai: {
|
|
2875
|
+
itemId: value.item.id,
|
|
2876
|
+
reasoningEncryptedContent: (_b = value.item.encrypted_content) != null ? _b : null
|
|
2877
|
+
}
|
|
2878
|
+
}
|
|
2879
|
+
});
|
|
2880
|
+
}
|
|
2881
|
+
delete activeReasoning[value.item.id];
|
|
2248
2882
|
}
|
|
2249
2883
|
} else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
|
|
2250
2884
|
const toolCall = ongoingToolCalls[value.output_index];
|
|
2251
2885
|
if (toolCall != null) {
|
|
2252
2886
|
controller.enqueue({
|
|
2253
|
-
type: "tool-
|
|
2254
|
-
|
|
2255
|
-
|
|
2256
|
-
toolName: toolCall.toolName,
|
|
2257
|
-
argsTextDelta: value.delta
|
|
2887
|
+
type: "tool-input-delta",
|
|
2888
|
+
id: toolCall.toolCallId,
|
|
2889
|
+
delta: value.delta
|
|
2258
2890
|
});
|
|
2259
2891
|
}
|
|
2260
2892
|
} else if (isResponseCreatedChunk(value)) {
|
|
@@ -2267,36 +2899,57 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2267
2899
|
});
|
|
2268
2900
|
} else if (isTextDeltaChunk(value)) {
|
|
2269
2901
|
controller.enqueue({
|
|
2270
|
-
type: "text",
|
|
2271
|
-
|
|
2902
|
+
type: "text-delta",
|
|
2903
|
+
id: value.item_id,
|
|
2904
|
+
delta: value.delta
|
|
2272
2905
|
});
|
|
2273
|
-
} else if (
|
|
2274
|
-
|
|
2275
|
-
|
|
2906
|
+
} else if (isResponseReasoningSummaryPartAddedChunk(value)) {
|
|
2907
|
+
if (value.summary_index > 0) {
|
|
2908
|
+
(_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
|
|
2909
|
+
value.summary_index
|
|
2910
|
+
);
|
|
2911
|
+
controller.enqueue({
|
|
2912
|
+
type: "reasoning-start",
|
|
2913
|
+
id: `${value.item_id}:${value.summary_index}`,
|
|
2914
|
+
providerMetadata: {
|
|
2915
|
+
openai: {
|
|
2916
|
+
itemId: value.item_id,
|
|
2917
|
+
reasoningEncryptedContent: (_e = (_d = activeReasoning[value.item_id]) == null ? void 0 : _d.encryptedContent) != null ? _e : null
|
|
2918
|
+
}
|
|
2919
|
+
}
|
|
2920
|
+
});
|
|
2921
|
+
}
|
|
2922
|
+
} else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
|
|
2276
2923
|
controller.enqueue({
|
|
2277
|
-
type: "
|
|
2278
|
-
|
|
2279
|
-
|
|
2280
|
-
|
|
2281
|
-
|
|
2924
|
+
type: "reasoning-delta",
|
|
2925
|
+
id: `${value.item_id}:${value.summary_index}`,
|
|
2926
|
+
delta: value.delta,
|
|
2927
|
+
providerMetadata: {
|
|
2928
|
+
openai: {
|
|
2929
|
+
itemId: value.item_id
|
|
2930
|
+
}
|
|
2931
|
+
}
|
|
2282
2932
|
});
|
|
2283
2933
|
} else if (isResponseFinishedChunk(value)) {
|
|
2284
2934
|
finishReason = mapOpenAIResponseFinishReason({
|
|
2285
|
-
finishReason: (
|
|
2935
|
+
finishReason: (_f = value.response.incomplete_details) == null ? void 0 : _f.reason,
|
|
2286
2936
|
hasToolCalls
|
|
2287
2937
|
});
|
|
2288
2938
|
usage.inputTokens = value.response.usage.input_tokens;
|
|
2289
2939
|
usage.outputTokens = value.response.usage.output_tokens;
|
|
2290
|
-
|
|
2291
|
-
reasoningTokens = (
|
|
2940
|
+
usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
|
|
2941
|
+
usage.reasoningTokens = (_h = (_g = value.response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : void 0;
|
|
2942
|
+
usage.cachedInputTokens = (_j = (_i = value.response.usage.input_tokens_details) == null ? void 0 : _i.cached_tokens) != null ? _j : void 0;
|
|
2292
2943
|
} else if (isResponseAnnotationAddedChunk(value)) {
|
|
2293
2944
|
controller.enqueue({
|
|
2294
2945
|
type: "source",
|
|
2295
2946
|
sourceType: "url",
|
|
2296
|
-
id: (
|
|
2947
|
+
id: (_m = (_l = (_k = self.config).generateId) == null ? void 0 : _l.call(_k)) != null ? _m : (0, import_provider_utils12.generateId)(),
|
|
2297
2948
|
url: value.annotation.url,
|
|
2298
2949
|
title: value.annotation.title
|
|
2299
2950
|
});
|
|
2951
|
+
} else if (isErrorChunk(value)) {
|
|
2952
|
+
controller.enqueue({ type: "error", error: value });
|
|
2300
2953
|
}
|
|
2301
2954
|
},
|
|
2302
2955
|
flush(controller) {
|
|
@@ -2304,13 +2957,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2304
2957
|
type: "finish",
|
|
2305
2958
|
finishReason,
|
|
2306
2959
|
usage,
|
|
2307
|
-
|
|
2308
|
-
|
|
2309
|
-
|
|
2310
|
-
responseId,
|
|
2311
|
-
cachedPromptTokens,
|
|
2312
|
-
reasoningTokens
|
|
2313
|
-
}
|
|
2960
|
+
providerMetadata: {
|
|
2961
|
+
openai: {
|
|
2962
|
+
responseId
|
|
2314
2963
|
}
|
|
2315
2964
|
}
|
|
2316
2965
|
});
|
|
@@ -2322,87 +2971,141 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2322
2971
|
};
|
|
2323
2972
|
}
|
|
2324
2973
|
};
|
|
2325
|
-
var
|
|
2326
|
-
input_tokens:
|
|
2327
|
-
input_tokens_details:
|
|
2328
|
-
output_tokens:
|
|
2329
|
-
output_tokens_details:
|
|
2974
|
+
var usageSchema2 = import_v415.z.object({
|
|
2975
|
+
input_tokens: import_v415.z.number(),
|
|
2976
|
+
input_tokens_details: import_v415.z.object({ cached_tokens: import_v415.z.number().nullish() }).nullish(),
|
|
2977
|
+
output_tokens: import_v415.z.number(),
|
|
2978
|
+
output_tokens_details: import_v415.z.object({ reasoning_tokens: import_v415.z.number().nullish() }).nullish()
|
|
2979
|
+
});
|
|
2980
|
+
var textDeltaChunkSchema = import_v415.z.object({
|
|
2981
|
+
type: import_v415.z.literal("response.output_text.delta"),
|
|
2982
|
+
item_id: import_v415.z.string(),
|
|
2983
|
+
delta: import_v415.z.string()
|
|
2330
2984
|
});
|
|
2331
|
-
var
|
|
2332
|
-
type:
|
|
2333
|
-
|
|
2985
|
+
var errorChunkSchema = import_v415.z.object({
|
|
2986
|
+
type: import_v415.z.literal("error"),
|
|
2987
|
+
code: import_v415.z.string(),
|
|
2988
|
+
message: import_v415.z.string(),
|
|
2989
|
+
param: import_v415.z.string().nullish(),
|
|
2990
|
+
sequence_number: import_v415.z.number()
|
|
2334
2991
|
});
|
|
2335
|
-
var responseFinishedChunkSchema =
|
|
2336
|
-
type:
|
|
2337
|
-
response:
|
|
2338
|
-
incomplete_details:
|
|
2339
|
-
usage:
|
|
2992
|
+
var responseFinishedChunkSchema = import_v415.z.object({
|
|
2993
|
+
type: import_v415.z.enum(["response.completed", "response.incomplete"]),
|
|
2994
|
+
response: import_v415.z.object({
|
|
2995
|
+
incomplete_details: import_v415.z.object({ reason: import_v415.z.string() }).nullish(),
|
|
2996
|
+
usage: usageSchema2
|
|
2340
2997
|
})
|
|
2341
2998
|
});
|
|
2342
|
-
var responseCreatedChunkSchema =
|
|
2343
|
-
type:
|
|
2344
|
-
response:
|
|
2345
|
-
id:
|
|
2346
|
-
created_at:
|
|
2347
|
-
model:
|
|
2999
|
+
var responseCreatedChunkSchema = import_v415.z.object({
|
|
3000
|
+
type: import_v415.z.literal("response.created"),
|
|
3001
|
+
response: import_v415.z.object({
|
|
3002
|
+
id: import_v415.z.string(),
|
|
3003
|
+
created_at: import_v415.z.number(),
|
|
3004
|
+
model: import_v415.z.string()
|
|
2348
3005
|
})
|
|
2349
3006
|
});
|
|
2350
|
-
var
|
|
2351
|
-
type:
|
|
2352
|
-
output_index:
|
|
2353
|
-
item:
|
|
2354
|
-
|
|
2355
|
-
type:
|
|
3007
|
+
var responseOutputItemAddedSchema = import_v415.z.object({
|
|
3008
|
+
type: import_v415.z.literal("response.output_item.added"),
|
|
3009
|
+
output_index: import_v415.z.number(),
|
|
3010
|
+
item: import_v415.z.discriminatedUnion("type", [
|
|
3011
|
+
import_v415.z.object({
|
|
3012
|
+
type: import_v415.z.literal("message"),
|
|
3013
|
+
id: import_v415.z.string()
|
|
3014
|
+
}),
|
|
3015
|
+
import_v415.z.object({
|
|
3016
|
+
type: import_v415.z.literal("reasoning"),
|
|
3017
|
+
id: import_v415.z.string(),
|
|
3018
|
+
encrypted_content: import_v415.z.string().nullish()
|
|
2356
3019
|
}),
|
|
2357
|
-
|
|
2358
|
-
type:
|
|
2359
|
-
id:
|
|
2360
|
-
call_id:
|
|
2361
|
-
name:
|
|
2362
|
-
arguments:
|
|
2363
|
-
|
|
3020
|
+
import_v415.z.object({
|
|
3021
|
+
type: import_v415.z.literal("function_call"),
|
|
3022
|
+
id: import_v415.z.string(),
|
|
3023
|
+
call_id: import_v415.z.string(),
|
|
3024
|
+
name: import_v415.z.string(),
|
|
3025
|
+
arguments: import_v415.z.string()
|
|
3026
|
+
}),
|
|
3027
|
+
import_v415.z.object({
|
|
3028
|
+
type: import_v415.z.literal("web_search_call"),
|
|
3029
|
+
id: import_v415.z.string(),
|
|
3030
|
+
status: import_v415.z.string()
|
|
3031
|
+
}),
|
|
3032
|
+
import_v415.z.object({
|
|
3033
|
+
type: import_v415.z.literal("computer_call"),
|
|
3034
|
+
id: import_v415.z.string(),
|
|
3035
|
+
status: import_v415.z.string()
|
|
2364
3036
|
})
|
|
2365
3037
|
])
|
|
2366
3038
|
});
|
|
2367
|
-
var
|
|
2368
|
-
type:
|
|
2369
|
-
|
|
2370
|
-
|
|
2371
|
-
|
|
2372
|
-
|
|
2373
|
-
|
|
2374
|
-
|
|
2375
|
-
|
|
2376
|
-
|
|
2377
|
-
|
|
2378
|
-
|
|
3039
|
+
var responseOutputItemDoneSchema = import_v415.z.object({
|
|
3040
|
+
type: import_v415.z.literal("response.output_item.done"),
|
|
3041
|
+
output_index: import_v415.z.number(),
|
|
3042
|
+
item: import_v415.z.discriminatedUnion("type", [
|
|
3043
|
+
import_v415.z.object({
|
|
3044
|
+
type: import_v415.z.literal("message"),
|
|
3045
|
+
id: import_v415.z.string()
|
|
3046
|
+
}),
|
|
3047
|
+
import_v415.z.object({
|
|
3048
|
+
type: import_v415.z.literal("reasoning"),
|
|
3049
|
+
id: import_v415.z.string(),
|
|
3050
|
+
encrypted_content: import_v415.z.string().nullish()
|
|
3051
|
+
}),
|
|
3052
|
+
import_v415.z.object({
|
|
3053
|
+
type: import_v415.z.literal("function_call"),
|
|
3054
|
+
id: import_v415.z.string(),
|
|
3055
|
+
call_id: import_v415.z.string(),
|
|
3056
|
+
name: import_v415.z.string(),
|
|
3057
|
+
arguments: import_v415.z.string(),
|
|
3058
|
+
status: import_v415.z.literal("completed")
|
|
3059
|
+
}),
|
|
3060
|
+
import_v415.z.object({
|
|
3061
|
+
type: import_v415.z.literal("web_search_call"),
|
|
3062
|
+
id: import_v415.z.string(),
|
|
3063
|
+
status: import_v415.z.literal("completed")
|
|
2379
3064
|
}),
|
|
2380
|
-
|
|
2381
|
-
type:
|
|
2382
|
-
id:
|
|
2383
|
-
|
|
2384
|
-
name: import_zod9.z.string(),
|
|
2385
|
-
arguments: import_zod9.z.string()
|
|
3065
|
+
import_v415.z.object({
|
|
3066
|
+
type: import_v415.z.literal("computer_call"),
|
|
3067
|
+
id: import_v415.z.string(),
|
|
3068
|
+
status: import_v415.z.literal("completed")
|
|
2386
3069
|
})
|
|
2387
3070
|
])
|
|
2388
3071
|
});
|
|
2389
|
-
var
|
|
2390
|
-
type:
|
|
2391
|
-
|
|
2392
|
-
|
|
2393
|
-
|
|
2394
|
-
|
|
3072
|
+
var responseFunctionCallArgumentsDeltaSchema = import_v415.z.object({
|
|
3073
|
+
type: import_v415.z.literal("response.function_call_arguments.delta"),
|
|
3074
|
+
item_id: import_v415.z.string(),
|
|
3075
|
+
output_index: import_v415.z.number(),
|
|
3076
|
+
delta: import_v415.z.string()
|
|
3077
|
+
});
|
|
3078
|
+
var responseAnnotationAddedSchema = import_v415.z.object({
|
|
3079
|
+
type: import_v415.z.literal("response.output_text.annotation.added"),
|
|
3080
|
+
annotation: import_v415.z.object({
|
|
3081
|
+
type: import_v415.z.literal("url_citation"),
|
|
3082
|
+
url: import_v415.z.string(),
|
|
3083
|
+
title: import_v415.z.string()
|
|
2395
3084
|
})
|
|
2396
3085
|
});
|
|
2397
|
-
var
|
|
3086
|
+
var responseReasoningSummaryPartAddedSchema = import_v415.z.object({
|
|
3087
|
+
type: import_v415.z.literal("response.reasoning_summary_part.added"),
|
|
3088
|
+
item_id: import_v415.z.string(),
|
|
3089
|
+
summary_index: import_v415.z.number()
|
|
3090
|
+
});
|
|
3091
|
+
var responseReasoningSummaryTextDeltaSchema = import_v415.z.object({
|
|
3092
|
+
type: import_v415.z.literal("response.reasoning_summary_text.delta"),
|
|
3093
|
+
item_id: import_v415.z.string(),
|
|
3094
|
+
summary_index: import_v415.z.number(),
|
|
3095
|
+
delta: import_v415.z.string()
|
|
3096
|
+
});
|
|
3097
|
+
var openaiResponsesChunkSchema = import_v415.z.union([
|
|
2398
3098
|
textDeltaChunkSchema,
|
|
2399
3099
|
responseFinishedChunkSchema,
|
|
2400
3100
|
responseCreatedChunkSchema,
|
|
3101
|
+
responseOutputItemAddedSchema,
|
|
2401
3102
|
responseOutputItemDoneSchema,
|
|
2402
3103
|
responseFunctionCallArgumentsDeltaSchema,
|
|
2403
|
-
responseOutputItemAddedSchema,
|
|
2404
3104
|
responseAnnotationAddedSchema,
|
|
2405
|
-
|
|
3105
|
+
responseReasoningSummaryPartAddedSchema,
|
|
3106
|
+
responseReasoningSummaryTextDeltaSchema,
|
|
3107
|
+
errorChunkSchema,
|
|
3108
|
+
import_v415.z.object({ type: import_v415.z.string() }).loose()
|
|
2406
3109
|
// fallback for unknown chunks
|
|
2407
3110
|
]);
|
|
2408
3111
|
function isTextDeltaChunk(chunk) {
|
|
@@ -2411,6 +3114,9 @@ function isTextDeltaChunk(chunk) {
|
|
|
2411
3114
|
function isResponseOutputItemDoneChunk(chunk) {
|
|
2412
3115
|
return chunk.type === "response.output_item.done";
|
|
2413
3116
|
}
|
|
3117
|
+
function isResponseOutputItemDoneReasoningChunk(chunk) {
|
|
3118
|
+
return isResponseOutputItemDoneChunk(chunk) && chunk.item.type === "reasoning";
|
|
3119
|
+
}
|
|
2414
3120
|
function isResponseFinishedChunk(chunk) {
|
|
2415
3121
|
return chunk.type === "response.completed" || chunk.type === "response.incomplete";
|
|
2416
3122
|
}
|
|
@@ -2423,11 +3129,23 @@ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
|
|
|
2423
3129
|
function isResponseOutputItemAddedChunk(chunk) {
|
|
2424
3130
|
return chunk.type === "response.output_item.added";
|
|
2425
3131
|
}
|
|
3132
|
+
function isResponseOutputItemAddedReasoningChunk(chunk) {
|
|
3133
|
+
return isResponseOutputItemAddedChunk(chunk) && chunk.item.type === "reasoning";
|
|
3134
|
+
}
|
|
2426
3135
|
function isResponseAnnotationAddedChunk(chunk) {
|
|
2427
3136
|
return chunk.type === "response.output_text.annotation.added";
|
|
2428
3137
|
}
|
|
3138
|
+
function isResponseReasoningSummaryPartAddedChunk(chunk) {
|
|
3139
|
+
return chunk.type === "response.reasoning_summary_part.added";
|
|
3140
|
+
}
|
|
3141
|
+
function isResponseReasoningSummaryTextDeltaChunk(chunk) {
|
|
3142
|
+
return chunk.type === "response.reasoning_summary_text.delta";
|
|
3143
|
+
}
|
|
3144
|
+
function isErrorChunk(chunk) {
|
|
3145
|
+
return chunk.type === "error";
|
|
3146
|
+
}
|
|
2429
3147
|
function getResponsesModelConfig(modelId) {
|
|
2430
|
-
if (modelId.startsWith("o")) {
|
|
3148
|
+
if (modelId.startsWith("o") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
|
|
2431
3149
|
if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
|
|
2432
3150
|
return {
|
|
2433
3151
|
isReasoningModel: true,
|
|
@@ -2447,15 +3165,24 @@ function getResponsesModelConfig(modelId) {
|
|
|
2447
3165
|
requiredAutoTruncation: false
|
|
2448
3166
|
};
|
|
2449
3167
|
}
|
|
2450
|
-
|
|
2451
|
-
|
|
2452
|
-
|
|
2453
|
-
|
|
2454
|
-
|
|
2455
|
-
|
|
2456
|
-
|
|
2457
|
-
|
|
2458
|
-
|
|
3168
|
+
function supportsFlexProcessing2(modelId) {
|
|
3169
|
+
return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
3170
|
+
}
|
|
3171
|
+
function supportsPriorityProcessing2(modelId) {
|
|
3172
|
+
return modelId.startsWith("gpt-4") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
3173
|
+
}
|
|
3174
|
+
var openaiResponsesProviderOptionsSchema = import_v415.z.object({
|
|
3175
|
+
metadata: import_v415.z.any().nullish(),
|
|
3176
|
+
parallelToolCalls: import_v415.z.boolean().nullish(),
|
|
3177
|
+
previousResponseId: import_v415.z.string().nullish(),
|
|
3178
|
+
store: import_v415.z.boolean().nullish(),
|
|
3179
|
+
user: import_v415.z.string().nullish(),
|
|
3180
|
+
reasoningEffort: import_v415.z.string().nullish(),
|
|
3181
|
+
strictJsonSchema: import_v415.z.boolean().nullish(),
|
|
3182
|
+
instructions: import_v415.z.string().nullish(),
|
|
3183
|
+
reasoningSummary: import_v415.z.string().nullish(),
|
|
3184
|
+
serviceTier: import_v415.z.enum(["auto", "flex", "priority"]).nullish(),
|
|
3185
|
+
include: import_v415.z.array(import_v415.z.enum(["reasoning.encrypted_content", "file_search_call.results"])).nullish()
|
|
2459
3186
|
});
|
|
2460
3187
|
// Annotate the CommonJS export names for ESM import in node:
|
|
2461
3188
|
0 && (module.exports = {
|
|
@@ -2466,7 +3193,11 @@ var openaiResponsesProviderOptionsSchema = import_zod9.z.object({
|
|
|
2466
3193
|
OpenAIResponsesLanguageModel,
|
|
2467
3194
|
OpenAISpeechModel,
|
|
2468
3195
|
OpenAITranscriptionModel,
|
|
3196
|
+
hasDefaultResponseFormat,
|
|
2469
3197
|
modelMaxImagesPerCall,
|
|
3198
|
+
openAITranscriptionProviderOptions,
|
|
3199
|
+
openaiCompletionProviderOptions,
|
|
3200
|
+
openaiEmbeddingProviderOptions,
|
|
2470
3201
|
openaiProviderOptions
|
|
2471
3202
|
});
|
|
2472
3203
|
//# sourceMappingURL=index.js.map
|