@zenning/openai 1.6.0 → 2.0.29

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,3390 +0,0 @@
1
- "use strict";
2
- var __defProp = Object.defineProperty;
3
- var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
- var __getOwnPropNames = Object.getOwnPropertyNames;
5
- var __hasOwnProp = Object.prototype.hasOwnProperty;
6
- var __export = (target, all) => {
7
- for (var name14 in all)
8
- __defProp(target, name14, { get: all[name14], enumerable: true });
9
- };
10
- var __copyProps = (to, from, except, desc) => {
11
- if (from && typeof from === "object" || typeof from === "function") {
12
- for (let key of __getOwnPropNames(from))
13
- if (!__hasOwnProp.call(to, key) && key !== except)
14
- __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
- }
16
- return to;
17
- };
18
- var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
-
20
- // src/internal/index.ts
21
- var internal_exports = {};
22
- __export(internal_exports, {
23
- OpenAIChatLanguageModel: () => OpenAIChatLanguageModel,
24
- OpenAICompletionLanguageModel: () => OpenAICompletionLanguageModel,
25
- OpenAIEmbeddingModel: () => OpenAIEmbeddingModel,
26
- OpenAIImageModel: () => OpenAIImageModel,
27
- OpenAIResponsesLanguageModel: () => OpenAIResponsesLanguageModel,
28
- OpenAISpeechModel: () => OpenAISpeechModel,
29
- OpenAITranscriptionModel: () => OpenAITranscriptionModel,
30
- hasDefaultResponseFormat: () => hasDefaultResponseFormat,
31
- modelMaxImagesPerCall: () => modelMaxImagesPerCall
32
- });
33
- module.exports = __toCommonJS(internal_exports);
34
-
35
- // ../provider/dist/index.mjs
36
- var marker = "vercel.ai.error";
37
- var symbol = Symbol.for(marker);
38
- var _a;
39
- var _AISDKError = class _AISDKError2 extends Error {
40
- /**
41
- * Creates an AI SDK Error.
42
- *
43
- * @param {Object} params - The parameters for creating the error.
44
- * @param {string} params.name - The name of the error.
45
- * @param {string} params.message - The error message.
46
- * @param {unknown} [params.cause] - The underlying cause of the error.
47
- */
48
- constructor({
49
- name: name14,
50
- message,
51
- cause
52
- }) {
53
- super(message);
54
- this[_a] = true;
55
- this.name = name14;
56
- this.cause = cause;
57
- }
58
- /**
59
- * Checks if the given error is an AI SDK Error.
60
- * @param {unknown} error - The error to check.
61
- * @returns {boolean} True if the error is an AI SDK Error, false otherwise.
62
- */
63
- static isInstance(error) {
64
- return _AISDKError2.hasMarker(error, marker);
65
- }
66
- static hasMarker(error, marker15) {
67
- const markerSymbol = Symbol.for(marker15);
68
- return error != null && typeof error === "object" && markerSymbol in error && typeof error[markerSymbol] === "boolean" && error[markerSymbol] === true;
69
- }
70
- };
71
- _a = symbol;
72
- var AISDKError = _AISDKError;
73
- var name = "AI_APICallError";
74
- var marker2 = `vercel.ai.error.${name}`;
75
- var symbol2 = Symbol.for(marker2);
76
- var _a2;
77
- _a2 = symbol2;
78
- var name2 = "AI_EmptyResponseBodyError";
79
- var marker3 = `vercel.ai.error.${name2}`;
80
- var symbol3 = Symbol.for(marker3);
81
- var _a3;
82
- _a3 = symbol3;
83
- var name3 = "AI_InvalidArgumentError";
84
- var marker4 = `vercel.ai.error.${name3}`;
85
- var symbol4 = Symbol.for(marker4);
86
- var _a4;
87
- _a4 = symbol4;
88
- var name4 = "AI_InvalidPromptError";
89
- var marker5 = `vercel.ai.error.${name4}`;
90
- var symbol5 = Symbol.for(marker5);
91
- var _a5;
92
- var InvalidPromptError = class extends AISDKError {
93
- constructor({
94
- prompt,
95
- message,
96
- cause
97
- }) {
98
- super({ name: name4, message: `Invalid prompt: ${message}`, cause });
99
- this[_a5] = true;
100
- this.prompt = prompt;
101
- }
102
- static isInstance(error) {
103
- return AISDKError.hasMarker(error, marker5);
104
- }
105
- };
106
- _a5 = symbol5;
107
- var name5 = "AI_InvalidResponseDataError";
108
- var marker6 = `vercel.ai.error.${name5}`;
109
- var symbol6 = Symbol.for(marker6);
110
- var _a6;
111
- var InvalidResponseDataError = class extends AISDKError {
112
- constructor({
113
- data,
114
- message = `Invalid response data: ${JSON.stringify(data)}.`
115
- }) {
116
- super({ name: name5, message });
117
- this[_a6] = true;
118
- this.data = data;
119
- }
120
- static isInstance(error) {
121
- return AISDKError.hasMarker(error, marker6);
122
- }
123
- };
124
- _a6 = symbol6;
125
- var name6 = "AI_JSONParseError";
126
- var marker7 = `vercel.ai.error.${name6}`;
127
- var symbol7 = Symbol.for(marker7);
128
- var _a7;
129
- _a7 = symbol7;
130
- var name7 = "AI_LoadAPIKeyError";
131
- var marker8 = `vercel.ai.error.${name7}`;
132
- var symbol8 = Symbol.for(marker8);
133
- var _a8;
134
- _a8 = symbol8;
135
- var name8 = "AI_LoadSettingError";
136
- var marker9 = `vercel.ai.error.${name8}`;
137
- var symbol9 = Symbol.for(marker9);
138
- var _a9;
139
- _a9 = symbol9;
140
- var name9 = "AI_NoContentGeneratedError";
141
- var marker10 = `vercel.ai.error.${name9}`;
142
- var symbol10 = Symbol.for(marker10);
143
- var _a10;
144
- _a10 = symbol10;
145
- var name10 = "AI_NoSuchModelError";
146
- var marker11 = `vercel.ai.error.${name10}`;
147
- var symbol11 = Symbol.for(marker11);
148
- var _a11;
149
- _a11 = symbol11;
150
- var name11 = "AI_TooManyEmbeddingValuesForCallError";
151
- var marker12 = `vercel.ai.error.${name11}`;
152
- var symbol12 = Symbol.for(marker12);
153
- var _a12;
154
- var TooManyEmbeddingValuesForCallError = class extends AISDKError {
155
- constructor(options) {
156
- super({
157
- name: name11,
158
- message: `Too many values for a single embedding call. The ${options.provider} model "${options.modelId}" can only embed up to ${options.maxEmbeddingsPerCall} values per call, but ${options.values.length} values were provided.`
159
- });
160
- this[_a12] = true;
161
- this.provider = options.provider;
162
- this.modelId = options.modelId;
163
- this.maxEmbeddingsPerCall = options.maxEmbeddingsPerCall;
164
- this.values = options.values;
165
- }
166
- static isInstance(error) {
167
- return AISDKError.hasMarker(error, marker12);
168
- }
169
- };
170
- _a12 = symbol12;
171
- var name12 = "AI_TypeValidationError";
172
- var marker13 = `vercel.ai.error.${name12}`;
173
- var symbol13 = Symbol.for(marker13);
174
- var _a13;
175
- _a13 = symbol13;
176
- var name13 = "AI_UnsupportedFunctionalityError";
177
- var marker14 = `vercel.ai.error.${name13}`;
178
- var symbol14 = Symbol.for(marker14);
179
- var _a14;
180
- var UnsupportedFunctionalityError = class extends AISDKError {
181
- constructor({
182
- functionality,
183
- message = `'${functionality}' functionality not supported.`
184
- }) {
185
- super({ name: name13, message });
186
- this[_a14] = true;
187
- this.functionality = functionality;
188
- }
189
- static isInstance(error) {
190
- return AISDKError.hasMarker(error, marker14);
191
- }
192
- };
193
- _a14 = symbol14;
194
-
195
- // src/openai-chat-language-model.ts
196
- var import_provider_utils3 = require("@ai-sdk/provider-utils");
197
- var import_zod2 = require("zod");
198
-
199
- // src/convert-to-openai-chat-messages.ts
200
- var import_provider_utils = require("@ai-sdk/provider-utils");
201
- function convertToOpenAIChatMessages({
202
- prompt,
203
- useLegacyFunctionCalling = false,
204
- systemMessageMode = "system"
205
- }) {
206
- const messages = [];
207
- const warnings = [];
208
- for (const { role, content } of prompt) {
209
- switch (role) {
210
- case "system": {
211
- switch (systemMessageMode) {
212
- case "system": {
213
- messages.push({ role: "system", content });
214
- break;
215
- }
216
- case "developer": {
217
- messages.push({ role: "developer", content });
218
- break;
219
- }
220
- case "remove": {
221
- warnings.push({
222
- type: "other",
223
- message: "system messages are removed for this model"
224
- });
225
- break;
226
- }
227
- default: {
228
- const _exhaustiveCheck = systemMessageMode;
229
- throw new Error(
230
- `Unsupported system message mode: ${_exhaustiveCheck}`
231
- );
232
- }
233
- }
234
- break;
235
- }
236
- case "user": {
237
- if (content.length === 1 && content[0].type === "text") {
238
- messages.push({ role: "user", content: content[0].text });
239
- break;
240
- }
241
- messages.push({
242
- role: "user",
243
- content: content.map((part, index) => {
244
- var _a15, _b, _c, _d;
245
- switch (part.type) {
246
- case "text": {
247
- return { type: "text", text: part.text };
248
- }
249
- case "image": {
250
- return {
251
- type: "image_url",
252
- image_url: {
253
- url: part.image instanceof URL ? part.image.toString() : `data:${(_a15 = part.mimeType) != null ? _a15 : "image/jpeg"};base64,${(0, import_provider_utils.convertUint8ArrayToBase64)(part.image)}`,
254
- // OpenAI specific extension: image detail
255
- detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
256
- }
257
- };
258
- }
259
- case "file": {
260
- if (part.data instanceof URL) {
261
- throw new UnsupportedFunctionalityError({
262
- functionality: "'File content parts with URL data' functionality not supported."
263
- });
264
- }
265
- switch (part.mimeType) {
266
- case "audio/wav": {
267
- return {
268
- type: "input_audio",
269
- input_audio: { data: part.data, format: "wav" }
270
- };
271
- }
272
- case "audio/mp3":
273
- case "audio/mpeg": {
274
- return {
275
- type: "input_audio",
276
- input_audio: { data: part.data, format: "mp3" }
277
- };
278
- }
279
- case "application/pdf": {
280
- return {
281
- type: "file",
282
- file: {
283
- filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
284
- file_data: `data:application/pdf;base64,${part.data}`
285
- }
286
- };
287
- }
288
- default: {
289
- throw new UnsupportedFunctionalityError({
290
- functionality: `File content part type ${part.mimeType} in user messages`
291
- });
292
- }
293
- }
294
- }
295
- }
296
- })
297
- });
298
- break;
299
- }
300
- case "assistant": {
301
- let text = "";
302
- const toolCalls = [];
303
- for (const part of content) {
304
- switch (part.type) {
305
- case "text": {
306
- text += part.text;
307
- break;
308
- }
309
- case "tool-call": {
310
- toolCalls.push({
311
- id: part.toolCallId,
312
- type: "function",
313
- function: {
314
- name: part.toolName,
315
- arguments: JSON.stringify(part.args)
316
- }
317
- });
318
- break;
319
- }
320
- }
321
- }
322
- if (useLegacyFunctionCalling) {
323
- if (toolCalls.length > 1) {
324
- throw new UnsupportedFunctionalityError({
325
- functionality: "useLegacyFunctionCalling with multiple tool calls in one message"
326
- });
327
- }
328
- messages.push({
329
- role: "assistant",
330
- content: text,
331
- function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
332
- });
333
- } else {
334
- messages.push({
335
- role: "assistant",
336
- content: text,
337
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
338
- });
339
- }
340
- break;
341
- }
342
- case "tool": {
343
- for (const toolResponse of content) {
344
- if (useLegacyFunctionCalling) {
345
- messages.push({
346
- role: "function",
347
- name: toolResponse.toolName,
348
- content: JSON.stringify(toolResponse.result)
349
- });
350
- } else {
351
- messages.push({
352
- role: "tool",
353
- tool_call_id: toolResponse.toolCallId,
354
- content: JSON.stringify(toolResponse.result)
355
- });
356
- }
357
- }
358
- break;
359
- }
360
- default: {
361
- const _exhaustiveCheck = role;
362
- throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
363
- }
364
- }
365
- }
366
- return { messages, warnings };
367
- }
368
-
369
- // src/map-openai-chat-logprobs.ts
370
- function mapOpenAIChatLogProbsOutput(logprobs) {
371
- var _a15, _b;
372
- return (_b = (_a15 = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a15.map(({ token, logprob, top_logprobs }) => ({
373
- token,
374
- logprob,
375
- topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
376
- token: token2,
377
- logprob: logprob2
378
- })) : []
379
- }))) != null ? _b : void 0;
380
- }
381
-
382
- // src/map-openai-finish-reason.ts
383
- function mapOpenAIFinishReason(finishReason) {
384
- switch (finishReason) {
385
- case "stop":
386
- return "stop";
387
- case "length":
388
- return "length";
389
- case "content_filter":
390
- return "content-filter";
391
- case "function_call":
392
- case "tool_calls":
393
- return "tool-calls";
394
- default:
395
- return "unknown";
396
- }
397
- }
398
-
399
- // src/openai-error.ts
400
- var import_zod = require("zod");
401
- var import_provider_utils2 = require("@ai-sdk/provider-utils");
402
- var openaiErrorDataSchema = import_zod.z.object({
403
- error: import_zod.z.object({
404
- message: import_zod.z.string(),
405
- // The additional information below is handled loosely to support
406
- // OpenAI-compatible providers that have slightly different error
407
- // responses:
408
- type: import_zod.z.string().nullish(),
409
- param: import_zod.z.any().nullish(),
410
- code: import_zod.z.union([import_zod.z.string(), import_zod.z.number()]).nullish()
411
- })
412
- });
413
- var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
414
- errorSchema: openaiErrorDataSchema,
415
- errorToMessage: (data) => data.error.message
416
- });
417
-
418
- // src/get-response-metadata.ts
419
- function getResponseMetadata({
420
- id,
421
- model,
422
- created
423
- }) {
424
- return {
425
- id: id != null ? id : void 0,
426
- modelId: model != null ? model : void 0,
427
- timestamp: created != null ? new Date(created * 1e3) : void 0
428
- };
429
- }
430
-
431
- // src/openai-prepare-tools.ts
432
- function prepareTools({
433
- mode,
434
- useLegacyFunctionCalling = false,
435
- structuredOutputs
436
- }) {
437
- var _a15;
438
- const tools = ((_a15 = mode.tools) == null ? void 0 : _a15.length) ? mode.tools : void 0;
439
- const toolWarnings = [];
440
- if (tools == null) {
441
- return { tools: void 0, tool_choice: void 0, toolWarnings };
442
- }
443
- const toolChoice = mode.toolChoice;
444
- if (useLegacyFunctionCalling) {
445
- const openaiFunctions = [];
446
- for (const tool of tools) {
447
- if (tool.type === "provider-defined") {
448
- toolWarnings.push({ type: "unsupported-tool", tool });
449
- } else {
450
- openaiFunctions.push({
451
- name: tool.name,
452
- description: tool.description,
453
- parameters: tool.parameters
454
- });
455
- }
456
- }
457
- if (toolChoice == null) {
458
- return {
459
- functions: openaiFunctions,
460
- function_call: void 0,
461
- toolWarnings
462
- };
463
- }
464
- const type2 = toolChoice.type;
465
- switch (type2) {
466
- case "auto":
467
- case "none":
468
- case void 0:
469
- return {
470
- functions: openaiFunctions,
471
- function_call: void 0,
472
- toolWarnings
473
- };
474
- case "required":
475
- throw new UnsupportedFunctionalityError({
476
- functionality: "useLegacyFunctionCalling and toolChoice: required"
477
- });
478
- default:
479
- return {
480
- functions: openaiFunctions,
481
- function_call: { name: toolChoice.toolName },
482
- toolWarnings
483
- };
484
- }
485
- }
486
- const openaiTools = [];
487
- for (const tool of tools) {
488
- if (tool.type === "provider-defined") {
489
- toolWarnings.push({ type: "unsupported-tool", tool });
490
- } else {
491
- openaiTools.push({
492
- type: "function",
493
- function: {
494
- name: tool.name,
495
- description: tool.description,
496
- parameters: tool.parameters,
497
- strict: structuredOutputs ? true : void 0
498
- }
499
- });
500
- }
501
- }
502
- if (toolChoice == null) {
503
- return { tools: openaiTools, tool_choice: void 0, toolWarnings };
504
- }
505
- const type = toolChoice.type;
506
- switch (type) {
507
- case "auto":
508
- case "none":
509
- case "required":
510
- return { tools: openaiTools, tool_choice: type, toolWarnings };
511
- case "tool":
512
- return {
513
- tools: openaiTools,
514
- tool_choice: {
515
- type: "function",
516
- function: {
517
- name: toolChoice.toolName
518
- }
519
- },
520
- toolWarnings
521
- };
522
- default: {
523
- const _exhaustiveCheck = type;
524
- throw new UnsupportedFunctionalityError({
525
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
526
- });
527
- }
528
- }
529
- }
530
-
531
- // src/openai-chat-language-model.ts
532
- var OpenAIChatLanguageModel = class {
533
- constructor(modelId, settings, config) {
534
- this.specificationVersion = "v1";
535
- this.modelId = modelId;
536
- this.settings = settings;
537
- this.config = config;
538
- }
539
- get supportsStructuredOutputs() {
540
- var _a15;
541
- return (_a15 = this.settings.structuredOutputs) != null ? _a15 : isReasoningModel(this.modelId);
542
- }
543
- get defaultObjectGenerationMode() {
544
- if (isAudioModel(this.modelId)) {
545
- return "tool";
546
- }
547
- return this.supportsStructuredOutputs ? "json" : "tool";
548
- }
549
- get provider() {
550
- return this.config.provider;
551
- }
552
- get supportsImageUrls() {
553
- return !this.settings.downloadImages;
554
- }
555
- getArgs({
556
- mode,
557
- prompt,
558
- maxTokens,
559
- temperature,
560
- topP,
561
- topK,
562
- frequencyPenalty,
563
- presencePenalty,
564
- stopSequences,
565
- responseFormat,
566
- seed,
567
- providerMetadata
568
- }) {
569
- var _a15, _b, _c, _d, _e, _f, _g, _h;
570
- const type = mode.type;
571
- const warnings = [];
572
- if (topK != null) {
573
- warnings.push({
574
- type: "unsupported-setting",
575
- setting: "topK"
576
- });
577
- }
578
- if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
579
- warnings.push({
580
- type: "unsupported-setting",
581
- setting: "responseFormat",
582
- details: "JSON response format schema is only supported with structuredOutputs"
583
- });
584
- }
585
- const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
586
- if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
587
- throw new UnsupportedFunctionalityError({
588
- functionality: "useLegacyFunctionCalling with parallelToolCalls"
589
- });
590
- }
591
- if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
592
- throw new UnsupportedFunctionalityError({
593
- functionality: "structuredOutputs with useLegacyFunctionCalling"
594
- });
595
- }
596
- const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
597
- {
598
- prompt,
599
- useLegacyFunctionCalling,
600
- systemMessageMode: getSystemMessageMode(this.modelId)
601
- }
602
- );
603
- warnings.push(...messageWarnings);
604
- const baseArgs = {
605
- // model id:
606
- model: this.modelId,
607
- // model specific settings:
608
- logit_bias: this.settings.logitBias,
609
- logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
610
- top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
611
- user: this.settings.user,
612
- parallel_tool_calls: this.settings.parallelToolCalls,
613
- // standardized settings:
614
- max_tokens: maxTokens,
615
- temperature,
616
- top_p: topP,
617
- frequency_penalty: frequencyPenalty,
618
- presence_penalty: presencePenalty,
619
- response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs && responseFormat.schema != null ? {
620
- type: "json_schema",
621
- json_schema: {
622
- schema: responseFormat.schema,
623
- strict: true,
624
- name: (_a15 = responseFormat.name) != null ? _a15 : "response",
625
- description: responseFormat.description
626
- }
627
- } : { type: "json_object" } : void 0,
628
- stop: stopSequences,
629
- seed,
630
- // openai specific settings:
631
- // TODO remove in next major version; we auto-map maxTokens now
632
- max_completion_tokens: (_b = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _b.maxCompletionTokens,
633
- store: (_c = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _c.store,
634
- metadata: (_d = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _d.metadata,
635
- prediction: (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.prediction,
636
- reasoning_effort: (_g = (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
637
- // messages:
638
- messages
639
- };
640
- if (isReasoningModel(this.modelId)) {
641
- if (baseArgs.temperature != null) {
642
- baseArgs.temperature = void 0;
643
- warnings.push({
644
- type: "unsupported-setting",
645
- setting: "temperature",
646
- details: "temperature is not supported for reasoning models"
647
- });
648
- }
649
- if (baseArgs.top_p != null) {
650
- baseArgs.top_p = void 0;
651
- warnings.push({
652
- type: "unsupported-setting",
653
- setting: "topP",
654
- details: "topP is not supported for reasoning models"
655
- });
656
- }
657
- if (baseArgs.frequency_penalty != null) {
658
- baseArgs.frequency_penalty = void 0;
659
- warnings.push({
660
- type: "unsupported-setting",
661
- setting: "frequencyPenalty",
662
- details: "frequencyPenalty is not supported for reasoning models"
663
- });
664
- }
665
- if (baseArgs.presence_penalty != null) {
666
- baseArgs.presence_penalty = void 0;
667
- warnings.push({
668
- type: "unsupported-setting",
669
- setting: "presencePenalty",
670
- details: "presencePenalty is not supported for reasoning models"
671
- });
672
- }
673
- if (baseArgs.logit_bias != null) {
674
- baseArgs.logit_bias = void 0;
675
- warnings.push({
676
- type: "other",
677
- message: "logitBias is not supported for reasoning models"
678
- });
679
- }
680
- if (baseArgs.logprobs != null) {
681
- baseArgs.logprobs = void 0;
682
- warnings.push({
683
- type: "other",
684
- message: "logprobs is not supported for reasoning models"
685
- });
686
- }
687
- if (baseArgs.top_logprobs != null) {
688
- baseArgs.top_logprobs = void 0;
689
- warnings.push({
690
- type: "other",
691
- message: "topLogprobs is not supported for reasoning models"
692
- });
693
- }
694
- if (baseArgs.max_tokens != null) {
695
- if (baseArgs.max_completion_tokens == null) {
696
- baseArgs.max_completion_tokens = baseArgs.max_tokens;
697
- }
698
- baseArgs.max_tokens = void 0;
699
- }
700
- } else if (this.modelId.startsWith("gpt-4o-search-preview") || this.modelId.startsWith("gpt-4o-mini-search-preview")) {
701
- if (baseArgs.temperature != null) {
702
- baseArgs.temperature = void 0;
703
- warnings.push({
704
- type: "unsupported-setting",
705
- setting: "temperature",
706
- details: "temperature is not supported for the search preview models and has been removed."
707
- });
708
- }
709
- }
710
- switch (type) {
711
- case "regular": {
712
- const { tools, tool_choice, functions, function_call, toolWarnings } = prepareTools({
713
- mode,
714
- useLegacyFunctionCalling,
715
- structuredOutputs: this.supportsStructuredOutputs
716
- });
717
- return {
718
- args: {
719
- ...baseArgs,
720
- tools,
721
- tool_choice,
722
- functions,
723
- function_call
724
- },
725
- warnings: [...warnings, ...toolWarnings]
726
- };
727
- }
728
- case "object-json": {
729
- return {
730
- args: {
731
- ...baseArgs,
732
- response_format: this.supportsStructuredOutputs && mode.schema != null ? {
733
- type: "json_schema",
734
- json_schema: {
735
- schema: mode.schema,
736
- strict: true,
737
- name: (_h = mode.name) != null ? _h : "response",
738
- description: mode.description
739
- }
740
- } : { type: "json_object" }
741
- },
742
- warnings
743
- };
744
- }
745
- case "object-tool": {
746
- return {
747
- args: useLegacyFunctionCalling ? {
748
- ...baseArgs,
749
- function_call: {
750
- name: mode.tool.name
751
- },
752
- functions: [
753
- {
754
- name: mode.tool.name,
755
- description: mode.tool.description,
756
- parameters: mode.tool.parameters
757
- }
758
- ]
759
- } : {
760
- ...baseArgs,
761
- tool_choice: {
762
- type: "function",
763
- function: { name: mode.tool.name }
764
- },
765
- tools: [
766
- {
767
- type: "function",
768
- function: {
769
- name: mode.tool.name,
770
- description: mode.tool.description,
771
- parameters: mode.tool.parameters,
772
- strict: this.supportsStructuredOutputs ? true : void 0
773
- }
774
- }
775
- ]
776
- },
777
- warnings
778
- };
779
- }
780
- default: {
781
- const _exhaustiveCheck = type;
782
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
783
- }
784
- }
785
- }
786
- async doGenerate(options) {
787
- var _a15, _b, _c, _d, _e, _f, _g, _h;
788
- const { args: body, warnings } = this.getArgs(options);
789
- const {
790
- responseHeaders,
791
- value: response,
792
- rawValue: rawResponse
793
- } = await (0, import_provider_utils3.postJsonToApi)({
794
- url: this.config.url({
795
- path: "/chat/completions",
796
- modelId: this.modelId
797
- }),
798
- headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
799
- body,
800
- failedResponseHandler: openaiFailedResponseHandler,
801
- successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
802
- openaiChatResponseSchema
803
- ),
804
- abortSignal: options.abortSignal,
805
- fetch: this.config.fetch
806
- });
807
- const { messages: rawPrompt, ...rawSettings } = body;
808
- const choice = response.choices[0];
809
- const completionTokenDetails = (_a15 = response.usage) == null ? void 0 : _a15.completion_tokens_details;
810
- const promptTokenDetails = (_b = response.usage) == null ? void 0 : _b.prompt_tokens_details;
811
- const providerMetadata = { openai: {} };
812
- if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
813
- providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
814
- }
815
- if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
816
- providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
817
- }
818
- if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
819
- providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
820
- }
821
- if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
822
- providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
823
- }
824
- return {
825
- text: (_c = choice.message.content) != null ? _c : void 0,
826
- toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
827
- {
828
- toolCallType: "function",
829
- toolCallId: (0, import_provider_utils3.generateId)(),
830
- toolName: choice.message.function_call.name,
831
- args: choice.message.function_call.arguments
832
- }
833
- ] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
834
- var _a16;
835
- return {
836
- toolCallType: "function",
837
- toolCallId: (_a16 = toolCall.id) != null ? _a16 : (0, import_provider_utils3.generateId)(),
838
- toolName: toolCall.function.name,
839
- args: toolCall.function.arguments
840
- };
841
- }),
842
- finishReason: mapOpenAIFinishReason(choice.finish_reason),
843
- usage: {
844
- promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
845
- completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
846
- },
847
- rawCall: { rawPrompt, rawSettings },
848
- rawResponse: { headers: responseHeaders, body: rawResponse },
849
- request: { body: JSON.stringify(body) },
850
- response: getResponseMetadata(response),
851
- warnings,
852
- logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
853
- providerMetadata
854
- };
855
- }
856
- async doStream(options) {
857
- if (this.settings.simulateStreaming) {
858
- const result = await this.doGenerate(options);
859
- const simulatedStream = new ReadableStream({
860
- start(controller) {
861
- controller.enqueue({ type: "response-metadata", ...result.response });
862
- if (result.text) {
863
- controller.enqueue({
864
- type: "text-delta",
865
- textDelta: result.text
866
- });
867
- }
868
- if (result.toolCalls) {
869
- for (const toolCall of result.toolCalls) {
870
- controller.enqueue({
871
- type: "tool-call-delta",
872
- toolCallType: "function",
873
- toolCallId: toolCall.toolCallId,
874
- toolName: toolCall.toolName,
875
- argsTextDelta: toolCall.args
876
- });
877
- controller.enqueue({
878
- type: "tool-call",
879
- ...toolCall
880
- });
881
- }
882
- }
883
- controller.enqueue({
884
- type: "finish",
885
- finishReason: result.finishReason,
886
- usage: result.usage,
887
- logprobs: result.logprobs,
888
- providerMetadata: result.providerMetadata
889
- });
890
- controller.close();
891
- }
892
- });
893
- return {
894
- stream: simulatedStream,
895
- rawCall: result.rawCall,
896
- rawResponse: result.rawResponse,
897
- warnings: result.warnings
898
- };
899
- }
900
- const { args, warnings } = this.getArgs(options);
901
- const body = {
902
- ...args,
903
- stream: true,
904
- // only include stream_options when in strict compatibility mode:
905
- stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
906
- };
907
- const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
908
- url: this.config.url({
909
- path: "/chat/completions",
910
- modelId: this.modelId
911
- }),
912
- headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
913
- body,
914
- failedResponseHandler: openaiFailedResponseHandler,
915
- successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(
916
- openaiChatChunkSchema
917
- ),
918
- abortSignal: options.abortSignal,
919
- fetch: this.config.fetch
920
- });
921
- const { messages: rawPrompt, ...rawSettings } = args;
922
- const toolCalls = [];
923
- let finishReason = "unknown";
924
- let usage = {
925
- promptTokens: void 0,
926
- completionTokens: void 0
927
- };
928
- let logprobs;
929
- let isFirstChunk = true;
930
- const { useLegacyFunctionCalling } = this.settings;
931
- const providerMetadata = { openai: {} };
932
- return {
933
- stream: response.pipeThrough(
934
- new TransformStream({
935
- transform(chunk, controller) {
936
- var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
937
- if (!chunk.success) {
938
- finishReason = "error";
939
- controller.enqueue({ type: "error", error: chunk.error });
940
- return;
941
- }
942
- const value = chunk.value;
943
- if ("error" in value) {
944
- finishReason = "error";
945
- controller.enqueue({ type: "error", error: value.error });
946
- return;
947
- }
948
- if (isFirstChunk) {
949
- isFirstChunk = false;
950
- controller.enqueue({
951
- type: "response-metadata",
952
- ...getResponseMetadata(value)
953
- });
954
- }
955
- if (value.usage != null) {
956
- const {
957
- prompt_tokens,
958
- completion_tokens,
959
- prompt_tokens_details,
960
- completion_tokens_details
961
- } = value.usage;
962
- usage = {
963
- promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
964
- completionTokens: completion_tokens != null ? completion_tokens : void 0
965
- };
966
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
967
- providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
968
- }
969
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
970
- providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
971
- }
972
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
973
- providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
974
- }
975
- if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
976
- providerMetadata.openai.cachedPromptTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
977
- }
978
- }
979
- const choice = value.choices[0];
980
- if ((choice == null ? void 0 : choice.finish_reason) != null) {
981
- finishReason = mapOpenAIFinishReason(choice.finish_reason);
982
- }
983
- if ((choice == null ? void 0 : choice.delta) == null) {
984
- return;
985
- }
986
- const delta = choice.delta;
987
- if (delta.content != null) {
988
- controller.enqueue({
989
- type: "text-delta",
990
- textDelta: delta.content
991
- });
992
- }
993
- const mappedLogprobs = mapOpenAIChatLogProbsOutput(
994
- choice == null ? void 0 : choice.logprobs
995
- );
996
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
997
- if (logprobs === void 0) logprobs = [];
998
- logprobs.push(...mappedLogprobs);
999
- }
1000
- const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
1001
- {
1002
- type: "function",
1003
- id: (0, import_provider_utils3.generateId)(),
1004
- function: delta.function_call,
1005
- index: 0
1006
- }
1007
- ] : delta.tool_calls;
1008
- if (mappedToolCalls != null) {
1009
- for (const toolCallDelta of mappedToolCalls) {
1010
- const index = toolCallDelta.index;
1011
- if (toolCalls[index] == null) {
1012
- if (toolCallDelta.type !== "function") {
1013
- throw new InvalidResponseDataError({
1014
- data: toolCallDelta,
1015
- message: `Expected 'function' type.`
1016
- });
1017
- }
1018
- if (toolCallDelta.id == null) {
1019
- throw new InvalidResponseDataError({
1020
- data: toolCallDelta,
1021
- message: `Expected 'id' to be a string.`
1022
- });
1023
- }
1024
- if (((_a15 = toolCallDelta.function) == null ? void 0 : _a15.name) == null) {
1025
- throw new InvalidResponseDataError({
1026
- data: toolCallDelta,
1027
- message: `Expected 'function.name' to be a string.`
1028
- });
1029
- }
1030
- toolCalls[index] = {
1031
- id: toolCallDelta.id,
1032
- type: "function",
1033
- function: {
1034
- name: toolCallDelta.function.name,
1035
- arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
1036
- },
1037
- hasFinished: false
1038
- };
1039
- const toolCall2 = toolCalls[index];
1040
- if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
1041
- if (toolCall2.function.arguments.length > 0) {
1042
- controller.enqueue({
1043
- type: "tool-call-delta",
1044
- toolCallType: "function",
1045
- toolCallId: toolCall2.id,
1046
- toolName: toolCall2.function.name,
1047
- argsTextDelta: toolCall2.function.arguments
1048
- });
1049
- }
1050
- if ((0, import_provider_utils3.isParsableJson)(toolCall2.function.arguments)) {
1051
- controller.enqueue({
1052
- type: "tool-call",
1053
- toolCallType: "function",
1054
- toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils3.generateId)(),
1055
- toolName: toolCall2.function.name,
1056
- args: toolCall2.function.arguments
1057
- });
1058
- toolCall2.hasFinished = true;
1059
- }
1060
- }
1061
- continue;
1062
- }
1063
- const toolCall = toolCalls[index];
1064
- if (toolCall.hasFinished) {
1065
- continue;
1066
- }
1067
- if (((_f = toolCallDelta.function) == null ? void 0 : _f.arguments) != null) {
1068
- toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
1069
- }
1070
- controller.enqueue({
1071
- type: "tool-call-delta",
1072
- toolCallType: "function",
1073
- toolCallId: toolCall.id,
1074
- toolName: toolCall.function.name,
1075
- argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
1076
- });
1077
- if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
1078
- controller.enqueue({
1079
- type: "tool-call",
1080
- toolCallType: "function",
1081
- toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils3.generateId)(),
1082
- toolName: toolCall.function.name,
1083
- args: toolCall.function.arguments
1084
- });
1085
- toolCall.hasFinished = true;
1086
- }
1087
- }
1088
- }
1089
- },
1090
- flush(controller) {
1091
- var _a15, _b;
1092
- controller.enqueue({
1093
- type: "finish",
1094
- finishReason,
1095
- logprobs,
1096
- usage: {
1097
- promptTokens: (_a15 = usage.promptTokens) != null ? _a15 : NaN,
1098
- completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
1099
- },
1100
- ...providerMetadata != null ? { providerMetadata } : {}
1101
- });
1102
- }
1103
- })
1104
- ),
1105
- rawCall: { rawPrompt, rawSettings },
1106
- rawResponse: { headers: responseHeaders },
1107
- request: { body: JSON.stringify(body) },
1108
- warnings
1109
- };
1110
- }
1111
- };
1112
- var openaiTokenUsageSchema = import_zod2.z.object({
1113
- prompt_tokens: import_zod2.z.number().nullish(),
1114
- completion_tokens: import_zod2.z.number().nullish(),
1115
- prompt_tokens_details: import_zod2.z.object({
1116
- cached_tokens: import_zod2.z.number().nullish()
1117
- }).nullish(),
1118
- completion_tokens_details: import_zod2.z.object({
1119
- reasoning_tokens: import_zod2.z.number().nullish(),
1120
- accepted_prediction_tokens: import_zod2.z.number().nullish(),
1121
- rejected_prediction_tokens: import_zod2.z.number().nullish()
1122
- }).nullish()
1123
- }).nullish();
1124
- var openaiChatResponseSchema = import_zod2.z.object({
1125
- id: import_zod2.z.string().nullish(),
1126
- created: import_zod2.z.number().nullish(),
1127
- model: import_zod2.z.string().nullish(),
1128
- choices: import_zod2.z.array(
1129
- import_zod2.z.object({
1130
- message: import_zod2.z.object({
1131
- role: import_zod2.z.literal("assistant").nullish(),
1132
- content: import_zod2.z.string().nullish(),
1133
- function_call: import_zod2.z.object({
1134
- arguments: import_zod2.z.string(),
1135
- name: import_zod2.z.string()
1136
- }).nullish(),
1137
- tool_calls: import_zod2.z.array(
1138
- import_zod2.z.object({
1139
- id: import_zod2.z.string().nullish(),
1140
- type: import_zod2.z.literal("function"),
1141
- function: import_zod2.z.object({
1142
- name: import_zod2.z.string(),
1143
- arguments: import_zod2.z.string()
1144
- })
1145
- })
1146
- ).nullish()
1147
- }),
1148
- index: import_zod2.z.number(),
1149
- logprobs: import_zod2.z.object({
1150
- content: import_zod2.z.array(
1151
- import_zod2.z.object({
1152
- token: import_zod2.z.string(),
1153
- logprob: import_zod2.z.number(),
1154
- top_logprobs: import_zod2.z.array(
1155
- import_zod2.z.object({
1156
- token: import_zod2.z.string(),
1157
- logprob: import_zod2.z.number()
1158
- })
1159
- )
1160
- })
1161
- ).nullable()
1162
- }).nullish(),
1163
- finish_reason: import_zod2.z.string().nullish()
1164
- })
1165
- ),
1166
- usage: openaiTokenUsageSchema
1167
- });
1168
- var openaiChatChunkSchema = import_zod2.z.union([
1169
- import_zod2.z.object({
1170
- id: import_zod2.z.string().nullish(),
1171
- created: import_zod2.z.number().nullish(),
1172
- model: import_zod2.z.string().nullish(),
1173
- choices: import_zod2.z.array(
1174
- import_zod2.z.object({
1175
- delta: import_zod2.z.object({
1176
- role: import_zod2.z.enum(["assistant"]).nullish(),
1177
- content: import_zod2.z.string().nullish(),
1178
- function_call: import_zod2.z.object({
1179
- name: import_zod2.z.string().optional(),
1180
- arguments: import_zod2.z.string().optional()
1181
- }).nullish(),
1182
- tool_calls: import_zod2.z.array(
1183
- import_zod2.z.object({
1184
- index: import_zod2.z.number(),
1185
- id: import_zod2.z.string().nullish(),
1186
- type: import_zod2.z.literal("function").nullish(),
1187
- function: import_zod2.z.object({
1188
- name: import_zod2.z.string().nullish(),
1189
- arguments: import_zod2.z.string().nullish()
1190
- })
1191
- })
1192
- ).nullish()
1193
- }).nullish(),
1194
- logprobs: import_zod2.z.object({
1195
- content: import_zod2.z.array(
1196
- import_zod2.z.object({
1197
- token: import_zod2.z.string(),
1198
- logprob: import_zod2.z.number(),
1199
- top_logprobs: import_zod2.z.array(
1200
- import_zod2.z.object({
1201
- token: import_zod2.z.string(),
1202
- logprob: import_zod2.z.number()
1203
- })
1204
- )
1205
- })
1206
- ).nullable()
1207
- }).nullish(),
1208
- finish_reason: import_zod2.z.string().nullish(),
1209
- index: import_zod2.z.number()
1210
- })
1211
- ),
1212
- usage: openaiTokenUsageSchema
1213
- }),
1214
- openaiErrorDataSchema
1215
- ]);
1216
- function isReasoningModel(modelId) {
1217
- return modelId.startsWith("o");
1218
- }
1219
- function isAudioModel(modelId) {
1220
- return modelId.startsWith("gpt-4o-audio-preview");
1221
- }
1222
- function getSystemMessageMode(modelId) {
1223
- var _a15, _b;
1224
- if (!isReasoningModel(modelId)) {
1225
- return "system";
1226
- }
1227
- return (_b = (_a15 = reasoningModels[modelId]) == null ? void 0 : _a15.systemMessageMode) != null ? _b : "developer";
1228
- }
1229
- var reasoningModels = {
1230
- "o1-mini": {
1231
- systemMessageMode: "remove"
1232
- },
1233
- "o1-mini-2024-09-12": {
1234
- systemMessageMode: "remove"
1235
- },
1236
- "o1-preview": {
1237
- systemMessageMode: "remove"
1238
- },
1239
- "o1-preview-2024-09-12": {
1240
- systemMessageMode: "remove"
1241
- },
1242
- o3: {
1243
- systemMessageMode: "developer"
1244
- },
1245
- "o3-2025-04-16": {
1246
- systemMessageMode: "developer"
1247
- },
1248
- "o3-mini": {
1249
- systemMessageMode: "developer"
1250
- },
1251
- "o3-mini-2025-01-31": {
1252
- systemMessageMode: "developer"
1253
- },
1254
- "o4-mini": {
1255
- systemMessageMode: "developer"
1256
- },
1257
- "o4-mini-2025-04-16": {
1258
- systemMessageMode: "developer"
1259
- }
1260
- };
1261
-
1262
- // src/openai-completion-language-model.ts
1263
- var import_provider_utils4 = require("@ai-sdk/provider-utils");
1264
- var import_zod3 = require("zod");
1265
-
1266
- // src/convert-to-openai-completion-prompt.ts
1267
- function convertToOpenAICompletionPrompt({
1268
- prompt,
1269
- inputFormat,
1270
- user = "user",
1271
- assistant = "assistant"
1272
- }) {
1273
- if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
1274
- return { prompt: prompt[0].content[0].text };
1275
- }
1276
- let text = "";
1277
- if (prompt[0].role === "system") {
1278
- text += `${prompt[0].content}
1279
-
1280
- `;
1281
- prompt = prompt.slice(1);
1282
- }
1283
- for (const { role, content } of prompt) {
1284
- switch (role) {
1285
- case "system": {
1286
- throw new InvalidPromptError({
1287
- message: "Unexpected system message in prompt: ${content}",
1288
- prompt
1289
- });
1290
- }
1291
- case "user": {
1292
- const userMessage = content.map((part) => {
1293
- switch (part.type) {
1294
- case "text": {
1295
- return part.text;
1296
- }
1297
- case "image": {
1298
- throw new UnsupportedFunctionalityError({
1299
- functionality: "images"
1300
- });
1301
- }
1302
- }
1303
- }).join("");
1304
- text += `${user}:
1305
- ${userMessage}
1306
-
1307
- `;
1308
- break;
1309
- }
1310
- case "assistant": {
1311
- const assistantMessage = content.map((part) => {
1312
- switch (part.type) {
1313
- case "text": {
1314
- return part.text;
1315
- }
1316
- case "tool-call": {
1317
- throw new UnsupportedFunctionalityError({
1318
- functionality: "tool-call messages"
1319
- });
1320
- }
1321
- }
1322
- }).join("");
1323
- text += `${assistant}:
1324
- ${assistantMessage}
1325
-
1326
- `;
1327
- break;
1328
- }
1329
- case "tool": {
1330
- throw new UnsupportedFunctionalityError({
1331
- functionality: "tool messages"
1332
- });
1333
- }
1334
- default: {
1335
- const _exhaustiveCheck = role;
1336
- throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1337
- }
1338
- }
1339
- }
1340
- text += `${assistant}:
1341
- `;
1342
- return {
1343
- prompt: text,
1344
- stopSequences: [`
1345
- ${user}:`]
1346
- };
1347
- }
1348
-
1349
- // src/map-openai-completion-logprobs.ts
1350
- function mapOpenAICompletionLogProbs(logprobs) {
1351
- return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => ({
1352
- token,
1353
- logprob: logprobs.token_logprobs[index],
1354
- topLogprobs: logprobs.top_logprobs ? Object.entries(logprobs.top_logprobs[index]).map(
1355
- ([token2, logprob]) => ({
1356
- token: token2,
1357
- logprob
1358
- })
1359
- ) : []
1360
- }));
1361
- }
1362
-
1363
- // src/openai-completion-language-model.ts
1364
- var OpenAICompletionLanguageModel = class {
1365
- constructor(modelId, settings, config) {
1366
- this.specificationVersion = "v1";
1367
- this.defaultObjectGenerationMode = void 0;
1368
- this.modelId = modelId;
1369
- this.settings = settings;
1370
- this.config = config;
1371
- }
1372
- get provider() {
1373
- return this.config.provider;
1374
- }
1375
- getArgs({
1376
- mode,
1377
- inputFormat,
1378
- prompt,
1379
- maxTokens,
1380
- temperature,
1381
- topP,
1382
- topK,
1383
- frequencyPenalty,
1384
- presencePenalty,
1385
- stopSequences: userStopSequences,
1386
- responseFormat,
1387
- seed
1388
- }) {
1389
- var _a15;
1390
- const type = mode.type;
1391
- const warnings = [];
1392
- if (topK != null) {
1393
- warnings.push({
1394
- type: "unsupported-setting",
1395
- setting: "topK"
1396
- });
1397
- }
1398
- if (responseFormat != null && responseFormat.type !== "text") {
1399
- warnings.push({
1400
- type: "unsupported-setting",
1401
- setting: "responseFormat",
1402
- details: "JSON response format is not supported."
1403
- });
1404
- }
1405
- const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat });
1406
- const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1407
- const baseArgs = {
1408
- // model id:
1409
- model: this.modelId,
1410
- // model specific settings:
1411
- echo: this.settings.echo,
1412
- logit_bias: this.settings.logitBias,
1413
- logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
1414
- suffix: this.settings.suffix,
1415
- user: this.settings.user,
1416
- // standardized settings:
1417
- max_tokens: maxTokens,
1418
- temperature,
1419
- top_p: topP,
1420
- frequency_penalty: frequencyPenalty,
1421
- presence_penalty: presencePenalty,
1422
- seed,
1423
- // prompt:
1424
- prompt: completionPrompt,
1425
- // stop sequences:
1426
- stop: stop.length > 0 ? stop : void 0
1427
- };
1428
- switch (type) {
1429
- case "regular": {
1430
- if ((_a15 = mode.tools) == null ? void 0 : _a15.length) {
1431
- throw new UnsupportedFunctionalityError({
1432
- functionality: "tools"
1433
- });
1434
- }
1435
- if (mode.toolChoice) {
1436
- throw new UnsupportedFunctionalityError({
1437
- functionality: "toolChoice"
1438
- });
1439
- }
1440
- return { args: baseArgs, warnings };
1441
- }
1442
- case "object-json": {
1443
- throw new UnsupportedFunctionalityError({
1444
- functionality: "object-json mode"
1445
- });
1446
- }
1447
- case "object-tool": {
1448
- throw new UnsupportedFunctionalityError({
1449
- functionality: "object-tool mode"
1450
- });
1451
- }
1452
- default: {
1453
- const _exhaustiveCheck = type;
1454
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
1455
- }
1456
- }
1457
- }
1458
- async doGenerate(options) {
1459
- const { args, warnings } = this.getArgs(options);
1460
- const {
1461
- responseHeaders,
1462
- value: response,
1463
- rawValue: rawResponse
1464
- } = await (0, import_provider_utils4.postJsonToApi)({
1465
- url: this.config.url({
1466
- path: "/completions",
1467
- modelId: this.modelId
1468
- }),
1469
- headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
1470
- body: args,
1471
- failedResponseHandler: openaiFailedResponseHandler,
1472
- successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
1473
- openaiCompletionResponseSchema
1474
- ),
1475
- abortSignal: options.abortSignal,
1476
- fetch: this.config.fetch
1477
- });
1478
- const { prompt: rawPrompt, ...rawSettings } = args;
1479
- const choice = response.choices[0];
1480
- return {
1481
- text: choice.text,
1482
- usage: {
1483
- promptTokens: response.usage.prompt_tokens,
1484
- completionTokens: response.usage.completion_tokens
1485
- },
1486
- finishReason: mapOpenAIFinishReason(choice.finish_reason),
1487
- logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
1488
- rawCall: { rawPrompt, rawSettings },
1489
- rawResponse: { headers: responseHeaders, body: rawResponse },
1490
- response: getResponseMetadata(response),
1491
- warnings,
1492
- request: { body: JSON.stringify(args) }
1493
- };
1494
- }
1495
- async doStream(options) {
1496
- const { args, warnings } = this.getArgs(options);
1497
- const body = {
1498
- ...args,
1499
- stream: true,
1500
- // only include stream_options when in strict compatibility mode:
1501
- stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
1502
- };
1503
- const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
1504
- url: this.config.url({
1505
- path: "/completions",
1506
- modelId: this.modelId
1507
- }),
1508
- headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
1509
- body,
1510
- failedResponseHandler: openaiFailedResponseHandler,
1511
- successfulResponseHandler: (0, import_provider_utils4.createEventSourceResponseHandler)(
1512
- openaiCompletionChunkSchema
1513
- ),
1514
- abortSignal: options.abortSignal,
1515
- fetch: this.config.fetch
1516
- });
1517
- const { prompt: rawPrompt, ...rawSettings } = args;
1518
- let finishReason = "unknown";
1519
- let usage = {
1520
- promptTokens: Number.NaN,
1521
- completionTokens: Number.NaN
1522
- };
1523
- let logprobs;
1524
- let isFirstChunk = true;
1525
- return {
1526
- stream: response.pipeThrough(
1527
- new TransformStream({
1528
- transform(chunk, controller) {
1529
- if (!chunk.success) {
1530
- finishReason = "error";
1531
- controller.enqueue({ type: "error", error: chunk.error });
1532
- return;
1533
- }
1534
- const value = chunk.value;
1535
- if ("error" in value) {
1536
- finishReason = "error";
1537
- controller.enqueue({ type: "error", error: value.error });
1538
- return;
1539
- }
1540
- if (isFirstChunk) {
1541
- isFirstChunk = false;
1542
- controller.enqueue({
1543
- type: "response-metadata",
1544
- ...getResponseMetadata(value)
1545
- });
1546
- }
1547
- if (value.usage != null) {
1548
- usage = {
1549
- promptTokens: value.usage.prompt_tokens,
1550
- completionTokens: value.usage.completion_tokens
1551
- };
1552
- }
1553
- const choice = value.choices[0];
1554
- if ((choice == null ? void 0 : choice.finish_reason) != null) {
1555
- finishReason = mapOpenAIFinishReason(choice.finish_reason);
1556
- }
1557
- if ((choice == null ? void 0 : choice.text) != null) {
1558
- controller.enqueue({
1559
- type: "text-delta",
1560
- textDelta: choice.text
1561
- });
1562
- }
1563
- const mappedLogprobs = mapOpenAICompletionLogProbs(
1564
- choice == null ? void 0 : choice.logprobs
1565
- );
1566
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
1567
- if (logprobs === void 0) logprobs = [];
1568
- logprobs.push(...mappedLogprobs);
1569
- }
1570
- },
1571
- flush(controller) {
1572
- controller.enqueue({
1573
- type: "finish",
1574
- finishReason,
1575
- logprobs,
1576
- usage
1577
- });
1578
- }
1579
- })
1580
- ),
1581
- rawCall: { rawPrompt, rawSettings },
1582
- rawResponse: { headers: responseHeaders },
1583
- warnings,
1584
- request: { body: JSON.stringify(body) }
1585
- };
1586
- }
1587
- };
1588
- var openaiCompletionResponseSchema = import_zod3.z.object({
1589
- id: import_zod3.z.string().nullish(),
1590
- created: import_zod3.z.number().nullish(),
1591
- model: import_zod3.z.string().nullish(),
1592
- choices: import_zod3.z.array(
1593
- import_zod3.z.object({
1594
- text: import_zod3.z.string(),
1595
- finish_reason: import_zod3.z.string(),
1596
- logprobs: import_zod3.z.object({
1597
- tokens: import_zod3.z.array(import_zod3.z.string()),
1598
- token_logprobs: import_zod3.z.array(import_zod3.z.number()),
1599
- top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
1600
- }).nullish()
1601
- })
1602
- ),
1603
- usage: import_zod3.z.object({
1604
- prompt_tokens: import_zod3.z.number(),
1605
- completion_tokens: import_zod3.z.number()
1606
- })
1607
- });
1608
- var openaiCompletionChunkSchema = import_zod3.z.union([
1609
- import_zod3.z.object({
1610
- id: import_zod3.z.string().nullish(),
1611
- created: import_zod3.z.number().nullish(),
1612
- model: import_zod3.z.string().nullish(),
1613
- choices: import_zod3.z.array(
1614
- import_zod3.z.object({
1615
- text: import_zod3.z.string(),
1616
- finish_reason: import_zod3.z.string().nullish(),
1617
- index: import_zod3.z.number(),
1618
- logprobs: import_zod3.z.object({
1619
- tokens: import_zod3.z.array(import_zod3.z.string()),
1620
- token_logprobs: import_zod3.z.array(import_zod3.z.number()),
1621
- top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
1622
- }).nullish()
1623
- })
1624
- ),
1625
- usage: import_zod3.z.object({
1626
- prompt_tokens: import_zod3.z.number(),
1627
- completion_tokens: import_zod3.z.number()
1628
- }).nullish()
1629
- }),
1630
- openaiErrorDataSchema
1631
- ]);
1632
-
1633
- // src/openai-embedding-model.ts
1634
- var import_provider_utils5 = require("@ai-sdk/provider-utils");
1635
- var import_zod4 = require("zod");
1636
- var OpenAIEmbeddingModel = class {
1637
- constructor(modelId, settings, config) {
1638
- this.specificationVersion = "v1";
1639
- this.modelId = modelId;
1640
- this.settings = settings;
1641
- this.config = config;
1642
- }
1643
- get provider() {
1644
- return this.config.provider;
1645
- }
1646
- get maxEmbeddingsPerCall() {
1647
- var _a15;
1648
- return (_a15 = this.settings.maxEmbeddingsPerCall) != null ? _a15 : 2048;
1649
- }
1650
- get supportsParallelCalls() {
1651
- var _a15;
1652
- return (_a15 = this.settings.supportsParallelCalls) != null ? _a15 : true;
1653
- }
1654
- async doEmbed({
1655
- values,
1656
- headers,
1657
- abortSignal
1658
- }) {
1659
- if (values.length > this.maxEmbeddingsPerCall) {
1660
- throw new TooManyEmbeddingValuesForCallError({
1661
- provider: this.provider,
1662
- modelId: this.modelId,
1663
- maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
1664
- values
1665
- });
1666
- }
1667
- const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
1668
- url: this.config.url({
1669
- path: "/embeddings",
1670
- modelId: this.modelId
1671
- }),
1672
- headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), headers),
1673
- body: {
1674
- model: this.modelId,
1675
- input: values,
1676
- encoding_format: "float",
1677
- dimensions: this.settings.dimensions,
1678
- user: this.settings.user
1679
- },
1680
- failedResponseHandler: openaiFailedResponseHandler,
1681
- successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
1682
- openaiTextEmbeddingResponseSchema
1683
- ),
1684
- abortSignal,
1685
- fetch: this.config.fetch
1686
- });
1687
- return {
1688
- embeddings: response.data.map((item) => item.embedding),
1689
- usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1690
- rawResponse: { headers: responseHeaders }
1691
- };
1692
- }
1693
- };
1694
- var openaiTextEmbeddingResponseSchema = import_zod4.z.object({
1695
- data: import_zod4.z.array(import_zod4.z.object({ embedding: import_zod4.z.array(import_zod4.z.number()) })),
1696
- usage: import_zod4.z.object({ prompt_tokens: import_zod4.z.number() }).nullish()
1697
- });
1698
-
1699
- // src/openai-image-model.ts
1700
- var import_provider_utils6 = require("@ai-sdk/provider-utils");
1701
- var import_zod5 = require("zod");
1702
-
1703
- // src/openai-image-settings.ts
1704
- var modelMaxImagesPerCall = {
1705
- "dall-e-3": 1,
1706
- "dall-e-2": 10,
1707
- "gpt-image-1": 10
1708
- };
1709
- var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
1710
-
1711
- // src/openai-image-model.ts
1712
- var OpenAIImageModel = class {
1713
- constructor(modelId, settings, config) {
1714
- this.modelId = modelId;
1715
- this.settings = settings;
1716
- this.config = config;
1717
- this.specificationVersion = "v1";
1718
- }
1719
- get maxImagesPerCall() {
1720
- var _a15, _b;
1721
- return (_b = (_a15 = this.settings.maxImagesPerCall) != null ? _a15 : modelMaxImagesPerCall[this.modelId]) != null ? _b : 1;
1722
- }
1723
- get provider() {
1724
- return this.config.provider;
1725
- }
1726
- async doGenerate({
1727
- prompt,
1728
- n,
1729
- size,
1730
- aspectRatio,
1731
- seed,
1732
- providerOptions,
1733
- headers,
1734
- abortSignal
1735
- }) {
1736
- var _a15, _b, _c, _d;
1737
- const warnings = [];
1738
- if (aspectRatio != null) {
1739
- warnings.push({
1740
- type: "unsupported-setting",
1741
- setting: "aspectRatio",
1742
- details: "This model does not support aspect ratio. Use `size` instead."
1743
- });
1744
- }
1745
- if (seed != null) {
1746
- warnings.push({ type: "unsupported-setting", setting: "seed" });
1747
- }
1748
- const currentDate = (_c = (_b = (_a15 = this.config._internal) == null ? void 0 : _a15.currentDate) == null ? void 0 : _b.call(_a15)) != null ? _c : /* @__PURE__ */ new Date();
1749
- const { value: response, responseHeaders } = await (0, import_provider_utils6.postJsonToApi)({
1750
- url: this.config.url({
1751
- path: "/images/generations",
1752
- modelId: this.modelId
1753
- }),
1754
- headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), headers),
1755
- body: {
1756
- model: this.modelId,
1757
- prompt,
1758
- n,
1759
- size,
1760
- ...(_d = providerOptions.openai) != null ? _d : {},
1761
- ...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
1762
- },
1763
- failedResponseHandler: openaiFailedResponseHandler,
1764
- successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
1765
- openaiImageResponseSchema
1766
- ),
1767
- abortSignal,
1768
- fetch: this.config.fetch
1769
- });
1770
- return {
1771
- images: response.data.map((item) => item.b64_json),
1772
- warnings,
1773
- response: {
1774
- timestamp: currentDate,
1775
- modelId: this.modelId,
1776
- headers: responseHeaders
1777
- }
1778
- };
1779
- }
1780
- };
1781
- var openaiImageResponseSchema = import_zod5.z.object({
1782
- data: import_zod5.z.array(import_zod5.z.object({ b64_json: import_zod5.z.string() }))
1783
- });
1784
-
1785
- // src/openai-transcription-model.ts
1786
- var import_provider_utils7 = require("@ai-sdk/provider-utils");
1787
- var import_zod6 = require("zod");
1788
- var openAIProviderOptionsSchema = import_zod6.z.object({
1789
- include: import_zod6.z.array(import_zod6.z.string()).nullish(),
1790
- language: import_zod6.z.string().nullish(),
1791
- prompt: import_zod6.z.string().nullish(),
1792
- temperature: import_zod6.z.number().min(0).max(1).nullish().default(0),
1793
- timestampGranularities: import_zod6.z.array(import_zod6.z.enum(["word", "segment"])).nullish().default(["segment"])
1794
- });
1795
- var languageMap = {
1796
- afrikaans: "af",
1797
- arabic: "ar",
1798
- armenian: "hy",
1799
- azerbaijani: "az",
1800
- belarusian: "be",
1801
- bosnian: "bs",
1802
- bulgarian: "bg",
1803
- catalan: "ca",
1804
- chinese: "zh",
1805
- croatian: "hr",
1806
- czech: "cs",
1807
- danish: "da",
1808
- dutch: "nl",
1809
- english: "en",
1810
- estonian: "et",
1811
- finnish: "fi",
1812
- french: "fr",
1813
- galician: "gl",
1814
- german: "de",
1815
- greek: "el",
1816
- hebrew: "he",
1817
- hindi: "hi",
1818
- hungarian: "hu",
1819
- icelandic: "is",
1820
- indonesian: "id",
1821
- italian: "it",
1822
- japanese: "ja",
1823
- kannada: "kn",
1824
- kazakh: "kk",
1825
- korean: "ko",
1826
- latvian: "lv",
1827
- lithuanian: "lt",
1828
- macedonian: "mk",
1829
- malay: "ms",
1830
- marathi: "mr",
1831
- maori: "mi",
1832
- nepali: "ne",
1833
- norwegian: "no",
1834
- persian: "fa",
1835
- polish: "pl",
1836
- portuguese: "pt",
1837
- romanian: "ro",
1838
- russian: "ru",
1839
- serbian: "sr",
1840
- slovak: "sk",
1841
- slovenian: "sl",
1842
- spanish: "es",
1843
- swahili: "sw",
1844
- swedish: "sv",
1845
- tagalog: "tl",
1846
- tamil: "ta",
1847
- thai: "th",
1848
- turkish: "tr",
1849
- ukrainian: "uk",
1850
- urdu: "ur",
1851
- vietnamese: "vi",
1852
- welsh: "cy"
1853
- };
1854
- var OpenAITranscriptionModel = class {
1855
- constructor(modelId, config) {
1856
- this.modelId = modelId;
1857
- this.config = config;
1858
- this.specificationVersion = "v1";
1859
- }
1860
- get provider() {
1861
- return this.config.provider;
1862
- }
1863
- getArgs({
1864
- audio,
1865
- mediaType,
1866
- providerOptions
1867
- }) {
1868
- var _a15, _b, _c, _d, _e;
1869
- const warnings = [];
1870
- const openAIOptions = (0, import_provider_utils7.parseProviderOptions)({
1871
- provider: "openai",
1872
- providerOptions,
1873
- schema: openAIProviderOptionsSchema
1874
- });
1875
- const formData = new FormData();
1876
- const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils7.convertBase64ToUint8Array)(audio)]);
1877
- formData.append("model", this.modelId);
1878
- formData.append("file", new File([blob], "audio", { type: mediaType }));
1879
- if (openAIOptions) {
1880
- const transcriptionModelOptions = {
1881
- include: (_a15 = openAIOptions.include) != null ? _a15 : void 0,
1882
- language: (_b = openAIOptions.language) != null ? _b : void 0,
1883
- prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
1884
- temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
1885
- timestamp_granularities: (_e = openAIOptions.timestampGranularities) != null ? _e : void 0
1886
- };
1887
- for (const key in transcriptionModelOptions) {
1888
- const value = transcriptionModelOptions[key];
1889
- if (value !== void 0) {
1890
- formData.append(key, String(value));
1891
- }
1892
- }
1893
- }
1894
- return {
1895
- formData,
1896
- warnings
1897
- };
1898
- }
1899
- async doGenerate(options) {
1900
- var _a15, _b, _c, _d, _e, _f;
1901
- const currentDate = (_c = (_b = (_a15 = this.config._internal) == null ? void 0 : _a15.currentDate) == null ? void 0 : _b.call(_a15)) != null ? _c : /* @__PURE__ */ new Date();
1902
- const { formData, warnings } = this.getArgs(options);
1903
- const {
1904
- value: response,
1905
- responseHeaders,
1906
- rawValue: rawResponse
1907
- } = await (0, import_provider_utils7.postFormDataToApi)({
1908
- url: this.config.url({
1909
- path: "/audio/transcriptions",
1910
- modelId: this.modelId
1911
- }),
1912
- headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), options.headers),
1913
- formData,
1914
- failedResponseHandler: openaiFailedResponseHandler,
1915
- successfulResponseHandler: (0, import_provider_utils7.createJsonResponseHandler)(
1916
- openaiTranscriptionResponseSchema
1917
- ),
1918
- abortSignal: options.abortSignal,
1919
- fetch: this.config.fetch
1920
- });
1921
- const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
1922
- return {
1923
- text: response.text,
1924
- segments: (_e = (_d = response.words) == null ? void 0 : _d.map((word) => ({
1925
- text: word.word,
1926
- startSecond: word.start,
1927
- endSecond: word.end
1928
- }))) != null ? _e : [],
1929
- language,
1930
- durationInSeconds: (_f = response.duration) != null ? _f : void 0,
1931
- warnings,
1932
- response: {
1933
- timestamp: currentDate,
1934
- modelId: this.modelId,
1935
- headers: responseHeaders,
1936
- body: rawResponse
1937
- }
1938
- };
1939
- }
1940
- };
1941
- var openaiTranscriptionResponseSchema = import_zod6.z.object({
1942
- text: import_zod6.z.string(),
1943
- language: import_zod6.z.string().nullish(),
1944
- duration: import_zod6.z.number().nullish(),
1945
- words: import_zod6.z.array(
1946
- import_zod6.z.object({
1947
- word: import_zod6.z.string(),
1948
- start: import_zod6.z.number(),
1949
- end: import_zod6.z.number()
1950
- })
1951
- ).nullish()
1952
- });
1953
-
1954
- // src/openai-speech-model.ts
1955
- var import_provider_utils8 = require("@ai-sdk/provider-utils");
1956
- var import_zod7 = require("zod");
1957
- var OpenAIProviderOptionsSchema = import_zod7.z.object({
1958
- instructions: import_zod7.z.string().nullish(),
1959
- speed: import_zod7.z.number().min(0.25).max(4).default(1).nullish()
1960
- });
1961
- var OpenAISpeechModel = class {
1962
- constructor(modelId, config) {
1963
- this.modelId = modelId;
1964
- this.config = config;
1965
- this.specificationVersion = "v1";
1966
- }
1967
- get provider() {
1968
- return this.config.provider;
1969
- }
1970
- getArgs({
1971
- text,
1972
- voice = "alloy",
1973
- outputFormat = "mp3",
1974
- speed,
1975
- instructions,
1976
- providerOptions
1977
- }) {
1978
- const warnings = [];
1979
- const openAIOptions = (0, import_provider_utils8.parseProviderOptions)({
1980
- provider: "openai",
1981
- providerOptions,
1982
- schema: OpenAIProviderOptionsSchema
1983
- });
1984
- const requestBody = {
1985
- model: this.modelId,
1986
- input: text,
1987
- voice,
1988
- response_format: "mp3",
1989
- speed,
1990
- instructions
1991
- };
1992
- if (outputFormat) {
1993
- if (["mp3", "opus", "aac", "flac", "wav", "pcm"].includes(outputFormat)) {
1994
- requestBody.response_format = outputFormat;
1995
- } else {
1996
- warnings.push({
1997
- type: "unsupported-setting",
1998
- setting: "outputFormat",
1999
- details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
2000
- });
2001
- }
2002
- }
2003
- if (openAIOptions) {
2004
- const speechModelOptions = {};
2005
- for (const key in speechModelOptions) {
2006
- const value = speechModelOptions[key];
2007
- if (value !== void 0) {
2008
- requestBody[key] = value;
2009
- }
2010
- }
2011
- }
2012
- return {
2013
- requestBody,
2014
- warnings
2015
- };
2016
- }
2017
- async doGenerate(options) {
2018
- var _a15, _b, _c;
2019
- const currentDate = (_c = (_b = (_a15 = this.config._internal) == null ? void 0 : _a15.currentDate) == null ? void 0 : _b.call(_a15)) != null ? _c : /* @__PURE__ */ new Date();
2020
- const { requestBody, warnings } = this.getArgs(options);
2021
- const {
2022
- value: audio,
2023
- responseHeaders,
2024
- rawValue: rawResponse
2025
- } = await (0, import_provider_utils8.postJsonToApi)({
2026
- url: this.config.url({
2027
- path: "/audio/speech",
2028
- modelId: this.modelId
2029
- }),
2030
- headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
2031
- body: requestBody,
2032
- failedResponseHandler: openaiFailedResponseHandler,
2033
- successfulResponseHandler: (0, import_provider_utils8.createBinaryResponseHandler)(),
2034
- abortSignal: options.abortSignal,
2035
- fetch: this.config.fetch
2036
- });
2037
- return {
2038
- audio,
2039
- warnings,
2040
- request: {
2041
- body: JSON.stringify(requestBody)
2042
- },
2043
- response: {
2044
- timestamp: currentDate,
2045
- modelId: this.modelId,
2046
- headers: responseHeaders,
2047
- body: rawResponse
2048
- }
2049
- };
2050
- }
2051
- };
2052
-
2053
- // src/responses/openai-responses-language-model.ts
2054
- var import_provider_utils10 = require("@ai-sdk/provider-utils");
2055
- var import_zod12 = require("zod");
2056
-
2057
- // src/responses/convert-to-openai-responses-messages.ts
2058
- var import_provider_utils9 = require("@ai-sdk/provider-utils");
2059
- function convertToOpenAIResponsesMessages({
2060
- prompt,
2061
- systemMessageMode
2062
- }) {
2063
- const messages = [];
2064
- const warnings = [];
2065
- for (const { role, content } of prompt) {
2066
- switch (role) {
2067
- case "system": {
2068
- switch (systemMessageMode) {
2069
- case "system": {
2070
- messages.push({ role: "system", content });
2071
- break;
2072
- }
2073
- case "developer": {
2074
- messages.push({ role: "developer", content });
2075
- break;
2076
- }
2077
- case "remove": {
2078
- warnings.push({
2079
- type: "other",
2080
- message: "system messages are removed for this model"
2081
- });
2082
- break;
2083
- }
2084
- default: {
2085
- const _exhaustiveCheck = systemMessageMode;
2086
- throw new Error(
2087
- `Unsupported system message mode: ${_exhaustiveCheck}`
2088
- );
2089
- }
2090
- }
2091
- break;
2092
- }
2093
- case "user": {
2094
- messages.push({
2095
- role: "user",
2096
- content: content.map((part, index) => {
2097
- var _a15, _b, _c, _d;
2098
- switch (part.type) {
2099
- case "text": {
2100
- return { type: "input_text", text: part.text };
2101
- }
2102
- case "image": {
2103
- return {
2104
- type: "input_image",
2105
- image_url: part.image instanceof URL ? part.image.toString() : `data:${(_a15 = part.mimeType) != null ? _a15 : "image/jpeg"};base64,${(0, import_provider_utils9.convertUint8ArrayToBase64)(part.image)}`,
2106
- // OpenAI specific extension: image detail
2107
- detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
2108
- };
2109
- }
2110
- case "file": {
2111
- if (part.data instanceof URL) {
2112
- throw new UnsupportedFunctionalityError({
2113
- functionality: "File URLs in user messages"
2114
- });
2115
- }
2116
- switch (part.mimeType) {
2117
- case "application/pdf": {
2118
- return {
2119
- type: "input_file",
2120
- filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
2121
- file_data: `data:application/pdf;base64,${part.data}`
2122
- };
2123
- }
2124
- default: {
2125
- throw new UnsupportedFunctionalityError({
2126
- functionality: "Only PDF files are supported in user messages"
2127
- });
2128
- }
2129
- }
2130
- }
2131
- }
2132
- })
2133
- });
2134
- break;
2135
- }
2136
- case "assistant": {
2137
- for (const part of content) {
2138
- switch (part.type) {
2139
- case "text": {
2140
- messages.push({
2141
- role: "assistant",
2142
- content: [{ type: "output_text", text: part.text }]
2143
- });
2144
- break;
2145
- }
2146
- case "tool-call": {
2147
- messages.push({
2148
- type: "function_call",
2149
- call_id: part.toolCallId,
2150
- name: part.toolName,
2151
- arguments: JSON.stringify(part.args)
2152
- });
2153
- break;
2154
- }
2155
- }
2156
- }
2157
- break;
2158
- }
2159
- case "tool": {
2160
- for (const part of content) {
2161
- messages.push({
2162
- type: "function_call_output",
2163
- call_id: part.toolCallId,
2164
- output: JSON.stringify(part.result)
2165
- });
2166
- }
2167
- break;
2168
- }
2169
- default: {
2170
- const _exhaustiveCheck = role;
2171
- throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
2172
- }
2173
- }
2174
- }
2175
- return { messages, warnings };
2176
- }
2177
-
2178
- // src/responses/map-openai-responses-finish-reason.ts
2179
- function mapOpenAIResponseFinishReason({
2180
- finishReason,
2181
- hasToolCalls
2182
- }) {
2183
- switch (finishReason) {
2184
- case void 0:
2185
- case null:
2186
- return hasToolCalls ? "tool-calls" : "stop";
2187
- case "max_output_tokens":
2188
- return "length";
2189
- case "content_filter":
2190
- return "content-filter";
2191
- default:
2192
- return hasToolCalls ? "tool-calls" : "unknown";
2193
- }
2194
- }
2195
-
2196
- // src/tool/code-interpreter.ts
2197
- var import_zod8 = require("zod");
2198
- var codeInterpreterArgsSchema = import_zod8.z.object({
2199
- container: import_zod8.z.union([
2200
- import_zod8.z.string(),
2201
- import_zod8.z.object({
2202
- fileIds: import_zod8.z.array(import_zod8.z.string()).optional()
2203
- })
2204
- ]).optional()
2205
- });
2206
-
2207
- // src/tool/file-search.ts
2208
- var import_zod9 = require("zod");
2209
- var comparisonFilterSchema = import_zod9.z.object({
2210
- key: import_zod9.z.string(),
2211
- type: import_zod9.z.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
2212
- value: import_zod9.z.union([import_zod9.z.string(), import_zod9.z.number(), import_zod9.z.boolean()])
2213
- });
2214
- var compoundFilterSchema = import_zod9.z.object({
2215
- type: import_zod9.z.enum(["and", "or"]),
2216
- filters: import_zod9.z.array(
2217
- import_zod9.z.union([comparisonFilterSchema, import_zod9.z.lazy(() => compoundFilterSchema)])
2218
- )
2219
- });
2220
- var filtersSchema = import_zod9.z.union([comparisonFilterSchema, compoundFilterSchema]);
2221
- var fileSearchArgsSchema = import_zod9.z.object({
2222
- vectorStoreIds: import_zod9.z.array(import_zod9.z.string()).optional(),
2223
- maxNumResults: import_zod9.z.number().optional(),
2224
- ranking: import_zod9.z.object({
2225
- ranker: import_zod9.z.enum(["auto", "default-2024-08-21"]).optional()
2226
- }).optional(),
2227
- filters: filtersSchema.optional()
2228
- });
2229
-
2230
- // src/tool/web-search.ts
2231
- var import_zod10 = require("zod");
2232
- var webSearchArgsSchema = import_zod10.z.object({
2233
- filters: import_zod10.z.object({
2234
- allowedDomains: import_zod10.z.array(import_zod10.z.string()).optional()
2235
- }).optional(),
2236
- searchContextSize: import_zod10.z.enum(["low", "medium", "high"]).optional(),
2237
- userLocation: import_zod10.z.object({
2238
- type: import_zod10.z.literal("approximate"),
2239
- country: import_zod10.z.string().optional(),
2240
- city: import_zod10.z.string().optional(),
2241
- region: import_zod10.z.string().optional(),
2242
- timezone: import_zod10.z.string().optional()
2243
- }).optional()
2244
- });
2245
-
2246
- // src/tool/web-search-preview.ts
2247
- var import_zod11 = require("zod");
2248
- var webSearchPreviewArgsSchema = import_zod11.z.object({
2249
- searchContextSize: import_zod11.z.enum(["low", "medium", "high"]).optional(),
2250
- userLocation: import_zod11.z.object({
2251
- type: import_zod11.z.literal("approximate"),
2252
- country: import_zod11.z.string().optional(),
2253
- city: import_zod11.z.string().optional(),
2254
- region: import_zod11.z.string().optional(),
2255
- timezone: import_zod11.z.string().optional()
2256
- }).optional()
2257
- });
2258
-
2259
- // src/responses/openai-responses-prepare-tools.ts
2260
- function prepareResponsesTools({
2261
- mode,
2262
- strict
2263
- }) {
2264
- var _a15;
2265
- const tools = ((_a15 = mode.tools) == null ? void 0 : _a15.length) ? mode.tools : void 0;
2266
- const toolWarnings = [];
2267
- if (tools == null) {
2268
- return { tools: void 0, tool_choice: void 0, toolWarnings };
2269
- }
2270
- const toolChoice = mode.toolChoice;
2271
- const openaiTools = [];
2272
- for (const tool of tools) {
2273
- switch (tool.type) {
2274
- case "function":
2275
- openaiTools.push({
2276
- type: "function",
2277
- name: tool.name,
2278
- description: tool.description,
2279
- parameters: tool.parameters,
2280
- strict: strict ? true : void 0
2281
- });
2282
- break;
2283
- case "provider-defined":
2284
- switch (tool.id) {
2285
- case "openai.file_search": {
2286
- const args = fileSearchArgsSchema.parse(tool.args);
2287
- openaiTools.push({
2288
- type: "file_search",
2289
- vector_store_ids: args.vectorStoreIds,
2290
- max_num_results: args.maxNumResults,
2291
- ranking_options: args.ranking ? { ranker: args.ranking.ranker } : void 0,
2292
- filters: args.filters
2293
- });
2294
- break;
2295
- }
2296
- case "openai.web_search_preview": {
2297
- const args = webSearchPreviewArgsSchema.parse(tool.args);
2298
- openaiTools.push({
2299
- type: "web_search_preview",
2300
- search_context_size: args.searchContextSize,
2301
- user_location: args.userLocation
2302
- });
2303
- break;
2304
- }
2305
- case "openai.web_search": {
2306
- const args = webSearchArgsSchema.parse(tool.args);
2307
- openaiTools.push({
2308
- type: "web_search",
2309
- filters: args.filters != null ? { allowed_domains: args.filters.allowedDomains } : void 0,
2310
- search_context_size: args.searchContextSize,
2311
- user_location: args.userLocation
2312
- });
2313
- break;
2314
- }
2315
- case "openai.code_interpreter": {
2316
- const args = codeInterpreterArgsSchema.parse(tool.args);
2317
- openaiTools.push({
2318
- type: "code_interpreter",
2319
- container: args.container == null ? { type: "auto", file_ids: void 0 } : typeof args.container === "string" ? args.container : { type: "auto", file_ids: args.container.fileIds }
2320
- });
2321
- break;
2322
- }
2323
- default: {
2324
- toolWarnings.push({ type: "unsupported-tool", tool });
2325
- break;
2326
- }
2327
- }
2328
- break;
2329
- default:
2330
- toolWarnings.push({ type: "unsupported-tool", tool });
2331
- break;
2332
- }
2333
- }
2334
- if (toolChoice == null) {
2335
- return { tools: openaiTools, tool_choice: void 0, toolWarnings };
2336
- }
2337
- const type = toolChoice.type;
2338
- switch (type) {
2339
- case "auto":
2340
- case "none":
2341
- case "required":
2342
- return { tools: openaiTools, tool_choice: type, toolWarnings };
2343
- case "tool":
2344
- return {
2345
- tools: openaiTools,
2346
- tool_choice: toolChoice.toolName === "code_interpreter" || toolChoice.toolName === "file_search" || toolChoice.toolName === "web_search_preview" || toolChoice.toolName === "web_search" ? { type: toolChoice.toolName } : { type: "function", name: toolChoice.toolName },
2347
- toolWarnings
2348
- };
2349
- default: {
2350
- const _exhaustiveCheck = type;
2351
- throw new UnsupportedFunctionalityError({
2352
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
2353
- });
2354
- }
2355
- }
2356
- }
2357
-
2358
- // src/responses/openai-responses-language-model.ts
2359
- var OpenAIResponsesLanguageModel = class {
2360
- constructor(modelId, config) {
2361
- this.specificationVersion = "v1";
2362
- this.defaultObjectGenerationMode = "json";
2363
- this.supportsStructuredOutputs = true;
2364
- this.modelId = modelId;
2365
- this.config = config;
2366
- }
2367
- get provider() {
2368
- return this.config.provider;
2369
- }
2370
- getArgs({
2371
- mode,
2372
- maxTokens,
2373
- temperature,
2374
- stopSequences,
2375
- topP,
2376
- topK,
2377
- presencePenalty,
2378
- frequencyPenalty,
2379
- seed,
2380
- prompt,
2381
- providerMetadata,
2382
- responseFormat
2383
- }) {
2384
- var _a15, _b, _c;
2385
- const warnings = [];
2386
- const modelConfig = getResponsesModelConfig(this.modelId);
2387
- const type = mode.type;
2388
- if (topK != null) {
2389
- warnings.push({
2390
- type: "unsupported-setting",
2391
- setting: "topK"
2392
- });
2393
- }
2394
- if (seed != null) {
2395
- warnings.push({
2396
- type: "unsupported-setting",
2397
- setting: "seed"
2398
- });
2399
- }
2400
- if (presencePenalty != null) {
2401
- warnings.push({
2402
- type: "unsupported-setting",
2403
- setting: "presencePenalty"
2404
- });
2405
- }
2406
- if (frequencyPenalty != null) {
2407
- warnings.push({
2408
- type: "unsupported-setting",
2409
- setting: "frequencyPenalty"
2410
- });
2411
- }
2412
- if (stopSequences != null) {
2413
- warnings.push({
2414
- type: "unsupported-setting",
2415
- setting: "stopSequences"
2416
- });
2417
- }
2418
- const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
2419
- prompt,
2420
- systemMessageMode: modelConfig.systemMessageMode
2421
- });
2422
- warnings.push(...messageWarnings);
2423
- console.log("providerMetadata", JSON.stringify(providerMetadata));
2424
- const openaiOptions = (0, import_provider_utils10.parseProviderOptions)({
2425
- provider: "openai",
2426
- providerOptions: providerMetadata,
2427
- schema: openaiResponsesProviderOptionsSchema
2428
- });
2429
- const isStrict = (_a15 = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a15 : true;
2430
- console.log("openaiOptions", JSON.stringify(openaiOptions));
2431
- const baseArgs = {
2432
- model: this.modelId,
2433
- input: messages,
2434
- temperature: (openaiOptions == null ? void 0 : openaiOptions.forceNoTemperature) ? void 0 : temperature,
2435
- top_p: topP,
2436
- max_output_tokens: maxTokens,
2437
- ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
2438
- text: {
2439
- format: responseFormat.schema != null ? {
2440
- type: "json_schema",
2441
- strict: isStrict,
2442
- name: (_b = responseFormat.name) != null ? _b : "response",
2443
- description: responseFormat.description,
2444
- schema: responseFormat.schema
2445
- } : { type: "json_object" }
2446
- }
2447
- },
2448
- // provider options:
2449
- metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
2450
- parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
2451
- include: openaiOptions == null ? void 0 : openaiOptions.include,
2452
- previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
2453
- store: openaiOptions == null ? void 0 : openaiOptions.store,
2454
- user: openaiOptions == null ? void 0 : openaiOptions.user,
2455
- instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2456
- // model-specific settings:
2457
- ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2458
- reasoning: {
2459
- ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
2460
- effort: openaiOptions.reasoningEffort
2461
- },
2462
- ...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
2463
- summary: openaiOptions.reasoningSummary
2464
- }
2465
- }
2466
- },
2467
- ...modelConfig.requiredAutoTruncation && {
2468
- truncation: "auto"
2469
- }
2470
- };
2471
- if (modelConfig.isReasoningModel) {
2472
- if (baseArgs.temperature != null) {
2473
- baseArgs.temperature = void 0;
2474
- warnings.push({
2475
- type: "unsupported-setting",
2476
- setting: "temperature",
2477
- details: "temperature is not supported for reasoning models"
2478
- });
2479
- }
2480
- if (baseArgs.top_p != null) {
2481
- baseArgs.top_p = void 0;
2482
- warnings.push({
2483
- type: "unsupported-setting",
2484
- setting: "topP",
2485
- details: "topP is not supported for reasoning models"
2486
- });
2487
- }
2488
- }
2489
- switch (type) {
2490
- case "regular": {
2491
- const { tools, tool_choice, toolWarnings } = prepareResponsesTools({
2492
- mode,
2493
- strict: isStrict
2494
- // TODO support provider options on tools
2495
- });
2496
- return {
2497
- args: {
2498
- ...baseArgs,
2499
- tools,
2500
- tool_choice
2501
- },
2502
- warnings: [...warnings, ...toolWarnings]
2503
- };
2504
- }
2505
- case "object-json": {
2506
- return {
2507
- args: {
2508
- ...baseArgs,
2509
- text: {
2510
- format: mode.schema != null ? {
2511
- type: "json_schema",
2512
- strict: isStrict,
2513
- name: (_c = mode.name) != null ? _c : "response",
2514
- description: mode.description,
2515
- schema: mode.schema
2516
- } : { type: "json_object" }
2517
- }
2518
- },
2519
- warnings
2520
- };
2521
- }
2522
- case "object-tool": {
2523
- return {
2524
- args: {
2525
- ...baseArgs,
2526
- tool_choice: { type: "function", name: mode.tool.name },
2527
- tools: [
2528
- {
2529
- type: "function",
2530
- name: mode.tool.name,
2531
- description: mode.tool.description,
2532
- parameters: mode.tool.parameters,
2533
- strict: isStrict
2534
- }
2535
- ]
2536
- },
2537
- warnings
2538
- };
2539
- }
2540
- default: {
2541
- const _exhaustiveCheck = type;
2542
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
2543
- }
2544
- }
2545
- }
2546
- async doGenerate(options) {
2547
- var _a15, _b, _c, _d, _e, _f, _g;
2548
- const { args: body, warnings } = this.getArgs(options);
2549
- const {
2550
- responseHeaders,
2551
- value: response,
2552
- rawValue: rawResponse
2553
- } = await (0, import_provider_utils10.postJsonToApi)({
2554
- url: this.config.url({
2555
- path: "/responses",
2556
- modelId: this.modelId
2557
- }),
2558
- headers: (0, import_provider_utils10.combineHeaders)(this.config.headers(), options.headers),
2559
- body,
2560
- failedResponseHandler: openaiFailedResponseHandler,
2561
- successfulResponseHandler: (0, import_provider_utils10.createJsonResponseHandler)(
2562
- import_zod12.z.object({
2563
- id: import_zod12.z.string(),
2564
- created_at: import_zod12.z.number(),
2565
- model: import_zod12.z.string(),
2566
- output: import_zod12.z.array(
2567
- import_zod12.z.discriminatedUnion("type", [
2568
- import_zod12.z.object({
2569
- type: import_zod12.z.literal("message"),
2570
- role: import_zod12.z.literal("assistant"),
2571
- content: import_zod12.z.array(
2572
- import_zod12.z.object({
2573
- type: import_zod12.z.literal("output_text"),
2574
- text: import_zod12.z.string(),
2575
- annotations: import_zod12.z.array(
2576
- import_zod12.z.discriminatedUnion("type", [
2577
- import_zod12.z.object({
2578
- type: import_zod12.z.literal("url_citation"),
2579
- start_index: import_zod12.z.number(),
2580
- end_index: import_zod12.z.number(),
2581
- url: import_zod12.z.string(),
2582
- title: import_zod12.z.string()
2583
- }),
2584
- import_zod12.z.object({
2585
- type: import_zod12.z.literal("file_citation"),
2586
- file_id: import_zod12.z.string(),
2587
- filename: import_zod12.z.string().nullish(),
2588
- index: import_zod12.z.number().nullish(),
2589
- start_index: import_zod12.z.number().nullish(),
2590
- end_index: import_zod12.z.number().nullish(),
2591
- quote: import_zod12.z.string().nullish()
2592
- }),
2593
- import_zod12.z.object({
2594
- type: import_zod12.z.literal("container_file_citation")
2595
- })
2596
- ])
2597
- )
2598
- })
2599
- )
2600
- }),
2601
- import_zod12.z.object({
2602
- type: import_zod12.z.literal("code_interpreter_call")
2603
- }),
2604
- import_zod12.z.object({
2605
- type: import_zod12.z.literal("function_call"),
2606
- call_id: import_zod12.z.string(),
2607
- name: import_zod12.z.string(),
2608
- arguments: import_zod12.z.string()
2609
- }),
2610
- import_zod12.z.object({
2611
- type: import_zod12.z.literal("web_search_call"),
2612
- id: import_zod12.z.string(),
2613
- status: import_zod12.z.string().optional(),
2614
- action: import_zod12.z.discriminatedUnion("type", [
2615
- import_zod12.z.object({
2616
- type: import_zod12.z.literal("search"),
2617
- query: import_zod12.z.string().nullish()
2618
- }),
2619
- import_zod12.z.object({
2620
- type: import_zod12.z.literal("open_page"),
2621
- url: import_zod12.z.string()
2622
- }),
2623
- import_zod12.z.object({
2624
- type: import_zod12.z.literal("find"),
2625
- url: import_zod12.z.string(),
2626
- pattern: import_zod12.z.string()
2627
- })
2628
- ]).nullish()
2629
- }),
2630
- import_zod12.z.object({
2631
- type: import_zod12.z.literal("computer_call"),
2632
- id: import_zod12.z.string(),
2633
- status: import_zod12.z.string().optional()
2634
- }),
2635
- import_zod12.z.object({
2636
- type: import_zod12.z.literal("file_search_call"),
2637
- id: import_zod12.z.string(),
2638
- status: import_zod12.z.string().optional(),
2639
- queries: import_zod12.z.array(import_zod12.z.string()).nullish(),
2640
- results: import_zod12.z.array(
2641
- import_zod12.z.object({
2642
- attributes: import_zod12.z.object({
2643
- file_id: import_zod12.z.string(),
2644
- filename: import_zod12.z.string(),
2645
- score: import_zod12.z.number(),
2646
- text: import_zod12.z.string()
2647
- })
2648
- })
2649
- ).nullish()
2650
- }),
2651
- import_zod12.z.object({
2652
- type: import_zod12.z.literal("reasoning"),
2653
- summary: import_zod12.z.array(
2654
- import_zod12.z.object({
2655
- type: import_zod12.z.literal("summary_text"),
2656
- text: import_zod12.z.string()
2657
- })
2658
- )
2659
- })
2660
- ])
2661
- ),
2662
- incomplete_details: import_zod12.z.object({ reason: import_zod12.z.string() }).nullable(),
2663
- usage: usageSchema
2664
- })
2665
- ),
2666
- abortSignal: options.abortSignal,
2667
- fetch: this.config.fetch
2668
- });
2669
- const outputTextElements = response.output.filter((output) => output.type === "message").flatMap((output) => output.content).filter((content) => content.type === "output_text");
2670
- const toolCalls = [];
2671
- for (const output of response.output) {
2672
- if (output.type === "function_call") {
2673
- toolCalls.push({
2674
- toolCallType: "function",
2675
- toolCallId: output.call_id,
2676
- toolName: output.name,
2677
- args: output.arguments
2678
- });
2679
- } else if (output.type === "web_search_call") {
2680
- toolCalls.push({
2681
- toolCallType: "function",
2682
- toolCallId: output.id,
2683
- toolName: "web_search_preview",
2684
- args: JSON.stringify({ action: output.action })
2685
- });
2686
- } else if (output.type === "computer_call") {
2687
- toolCalls.push({
2688
- toolCallType: "function",
2689
- toolCallId: output.id,
2690
- toolName: "computer_use",
2691
- args: ""
2692
- });
2693
- } else if (output.type === "file_search_call") {
2694
- toolCalls.push({
2695
- toolCallType: "function",
2696
- toolCallId: output.id,
2697
- toolName: "file_search",
2698
- args: ""
2699
- });
2700
- }
2701
- }
2702
- const reasoningSummary = (_b = (_a15 = response.output.find((item) => item.type === "reasoning")) == null ? void 0 : _a15.summary) != null ? _b : null;
2703
- const allAnnotations = outputTextElements.flatMap((content) => content.annotations);
2704
- return {
2705
- text: outputTextElements.map((content) => content.text).join("\n"),
2706
- sources: outputTextElements.flatMap(
2707
- (content) => content.annotations.map((annotation) => {
2708
- var _a16, _b2, _c2, _d2, _e2, _f2, _g2, _h, _i;
2709
- if (annotation.type === "url_citation") {
2710
- return {
2711
- sourceType: "url",
2712
- id: (_c2 = (_b2 = (_a16 = this.config).generateId) == null ? void 0 : _b2.call(_a16)) != null ? _c2 : (0, import_provider_utils10.generateId)(),
2713
- url: annotation.url,
2714
- title: annotation.title
2715
- };
2716
- } else if (annotation.type === "file_citation") {
2717
- return {
2718
- sourceType: "url",
2719
- id: (_f2 = (_e2 = (_d2 = this.config).generateId) == null ? void 0 : _e2.call(_d2)) != null ? _f2 : (0, import_provider_utils10.generateId)(),
2720
- url: `file://${annotation.file_id}`,
2721
- title: annotation.quote || annotation.filename || "Document"
2722
- };
2723
- } else {
2724
- return {
2725
- sourceType: "url",
2726
- id: (_i = (_h = (_g2 = this.config).generateId) == null ? void 0 : _h.call(_g2)) != null ? _i : (0, import_provider_utils10.generateId)(),
2727
- url: "",
2728
- title: "Unknown Source"
2729
- };
2730
- }
2731
- })
2732
- ),
2733
- finishReason: mapOpenAIResponseFinishReason({
2734
- finishReason: (_c = response.incomplete_details) == null ? void 0 : _c.reason,
2735
- hasToolCalls: toolCalls.length > 0
2736
- }),
2737
- toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
2738
- reasoning: reasoningSummary ? reasoningSummary.map((summary) => ({
2739
- type: "text",
2740
- text: summary.text
2741
- })) : void 0,
2742
- usage: {
2743
- promptTokens: response.usage.input_tokens,
2744
- completionTokens: response.usage.output_tokens
2745
- },
2746
- rawCall: {
2747
- rawPrompt: void 0,
2748
- rawSettings: {}
2749
- },
2750
- rawResponse: {
2751
- headers: responseHeaders,
2752
- body: rawResponse
2753
- },
2754
- request: {
2755
- body: JSON.stringify(body)
2756
- },
2757
- response: {
2758
- id: response.id,
2759
- timestamp: new Date(response.created_at * 1e3),
2760
- modelId: response.model
2761
- },
2762
- providerMetadata: {
2763
- openai: {
2764
- responseId: response.id,
2765
- cachedPromptTokens: (_e = (_d = response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : null,
2766
- reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : null
2767
- }
2768
- },
2769
- warnings
2770
- };
2771
- }
2772
- async doStream(options) {
2773
- const { args: body, warnings } = this.getArgs(options);
2774
- let response;
2775
- let responseHeaders;
2776
- try {
2777
- const result = await (0, import_provider_utils10.postJsonToApi)({
2778
- url: this.config.url({
2779
- path: "/responses",
2780
- modelId: this.modelId
2781
- }),
2782
- headers: (0, import_provider_utils10.combineHeaders)(this.config.headers(), options.headers),
2783
- body: {
2784
- ...body,
2785
- stream: true
2786
- },
2787
- failedResponseHandler: openaiFailedResponseHandler,
2788
- successfulResponseHandler: (0, import_provider_utils10.createEventSourceResponseHandler)(
2789
- openaiResponsesChunkSchema
2790
- ),
2791
- abortSignal: options.abortSignal,
2792
- fetch: this.config.fetch
2793
- });
2794
- response = result.value;
2795
- responseHeaders = result.responseHeaders;
2796
- } catch (error) {
2797
- console.error("\u274C API request failed:", error);
2798
- throw error;
2799
- }
2800
- const self = this;
2801
- let finishReason = "unknown";
2802
- let promptTokens = NaN;
2803
- let completionTokens = NaN;
2804
- let cachedPromptTokens = null;
2805
- let reasoningTokens = null;
2806
- let responseId = null;
2807
- const ongoingToolCalls = {};
2808
- let hasToolCalls = false;
2809
- return {
2810
- stream: response.pipeThrough(
2811
- new TransformStream({
2812
- transform(chunk, controller) {
2813
- var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
2814
- try {
2815
- if (!chunk.success) {
2816
- console.error("\u274C Chunk parsing failed:", chunk.error);
2817
- finishReason = "error";
2818
- controller.enqueue({ type: "error", error: chunk.error });
2819
- return;
2820
- }
2821
- const value = chunk.value;
2822
- if (isResponseOutputItemAddedChunk(value)) {
2823
- if (value.item.type === "function_call") {
2824
- ongoingToolCalls[value.output_index] = {
2825
- toolName: value.item.name,
2826
- toolCallId: value.item.call_id
2827
- };
2828
- controller.enqueue({
2829
- type: "tool-call-delta",
2830
- toolCallType: "function",
2831
- toolCallId: value.item.call_id,
2832
- toolName: value.item.name,
2833
- argsTextDelta: value.item.arguments
2834
- });
2835
- } else if (value.item.type === "web_search_call") {
2836
- ongoingToolCalls[value.output_index] = {
2837
- toolName: "web_search_preview",
2838
- toolCallId: value.item.id
2839
- };
2840
- controller.enqueue({
2841
- type: "tool-call-delta",
2842
- toolCallType: "function",
2843
- toolCallId: value.item.id,
2844
- toolName: "web_search_preview",
2845
- argsTextDelta: JSON.stringify({ action: value.item.action })
2846
- });
2847
- } else if (value.item.type === "computer_call") {
2848
- ongoingToolCalls[value.output_index] = {
2849
- toolName: "computer_use",
2850
- toolCallId: value.item.id
2851
- };
2852
- controller.enqueue({
2853
- type: "tool-call-delta",
2854
- toolCallType: "function",
2855
- toolCallId: value.item.id,
2856
- toolName: "computer_use",
2857
- argsTextDelta: ""
2858
- });
2859
- } else if (value.item.type === "file_search_call") {
2860
- ongoingToolCalls[value.output_index] = {
2861
- toolName: "file_search",
2862
- toolCallId: value.item.id
2863
- };
2864
- controller.enqueue({
2865
- type: "tool-call-delta",
2866
- toolCallType: "function",
2867
- toolCallId: value.item.id,
2868
- toolName: "file_search",
2869
- argsTextDelta: ""
2870
- });
2871
- }
2872
- } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
2873
- console.log("\u{1F527} Function call arguments delta:", JSON.stringify(value, null, 2));
2874
- const toolCall = ongoingToolCalls[value.output_index];
2875
- if (toolCall != null) {
2876
- controller.enqueue({
2877
- type: "tool-call-delta",
2878
- toolCallType: "function",
2879
- toolCallId: toolCall.toolCallId,
2880
- toolName: toolCall.toolName,
2881
- argsTextDelta: value.delta
2882
- });
2883
- }
2884
- } else if (isResponseCreatedChunk(value)) {
2885
- responseId = value.response.id;
2886
- controller.enqueue({
2887
- type: "response-metadata",
2888
- id: value.response.id,
2889
- timestamp: new Date(value.response.created_at * 1e3),
2890
- modelId: value.response.model
2891
- });
2892
- } else if (isTextDeltaChunk(value)) {
2893
- controller.enqueue({
2894
- type: "text-delta",
2895
- textDelta: value.delta
2896
- });
2897
- } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2898
- controller.enqueue({
2899
- type: "reasoning",
2900
- textDelta: value.delta
2901
- });
2902
- } else if (isResponseOutputItemDoneChunk(value)) {
2903
- if (value.item.type === "function_call") {
2904
- ongoingToolCalls[value.output_index] = void 0;
2905
- hasToolCalls = true;
2906
- controller.enqueue({
2907
- type: "tool-call",
2908
- toolCallType: "function",
2909
- toolCallId: value.item.call_id,
2910
- toolName: value.item.name,
2911
- args: value.item.arguments
2912
- });
2913
- } else if (value.item.type === "web_search_call") {
2914
- ongoingToolCalls[value.output_index] = void 0;
2915
- hasToolCalls = true;
2916
- controller.enqueue({
2917
- type: "tool-call",
2918
- toolCallType: "function",
2919
- toolCallId: value.item.id,
2920
- toolName: "web_search_preview",
2921
- args: JSON.stringify({ action: value.item.action })
2922
- });
2923
- } else if (value.item.type === "computer_call") {
2924
- ongoingToolCalls[value.output_index] = void 0;
2925
- hasToolCalls = true;
2926
- controller.enqueue({
2927
- type: "tool-call",
2928
- toolCallType: "function",
2929
- toolCallId: value.item.id,
2930
- toolName: "computer_use",
2931
- args: ""
2932
- });
2933
- } else if (value.item.type === "file_search_call") {
2934
- ongoingToolCalls[value.output_index] = void 0;
2935
- hasToolCalls = true;
2936
- controller.enqueue({
2937
- type: "tool-call",
2938
- toolCallType: "function",
2939
- toolCallId: value.item.id,
2940
- toolName: "file_search",
2941
- args: JSON.stringify({
2942
- queries: value.item.queries,
2943
- results: value.item.results
2944
- })
2945
- });
2946
- }
2947
- } else if (isResponseFinishedChunk(value)) {
2948
- finishReason = mapOpenAIResponseFinishReason({
2949
- finishReason: (_a15 = value.response.incomplete_details) == null ? void 0 : _a15.reason,
2950
- hasToolCalls
2951
- });
2952
- promptTokens = value.response.usage.input_tokens;
2953
- completionTokens = value.response.usage.output_tokens;
2954
- cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2955
- reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2956
- } else if (isResponseAnnotationAddedChunk(value)) {
2957
- try {
2958
- if (value.annotation.type === "url_citation") {
2959
- const urlSource = {
2960
- sourceType: "url",
2961
- id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils10.generateId)(),
2962
- url: String(value.annotation.url || ""),
2963
- title: String(value.annotation.title || "Web Source")
2964
- };
2965
- controller.enqueue({
2966
- type: "source",
2967
- source: urlSource
2968
- });
2969
- } else if (value.annotation.type === "file_citation") {
2970
- const urlSource = {
2971
- sourceType: "url",
2972
- id: (_k = (_j = (_i = self.config).generateId) == null ? void 0 : _j.call(_i)) != null ? _k : (0, import_provider_utils10.generateId)(),
2973
- url: `file://${value.annotation.file_id}`,
2974
- title: String(value.annotation.quote || value.annotation.filename || "Document")
2975
- };
2976
- controller.enqueue({
2977
- type: "source",
2978
- source: urlSource
2979
- });
2980
- }
2981
- } catch (error) {
2982
- console.error("\u274C Error processing annotation:", error);
2983
- }
2984
- } else if (isResponseInProgressChunk(value)) {
2985
- } else if (isResponseContentPartAddedChunk(value)) {
2986
- } else if (isResponseOutputTextDoneChunk(value)) {
2987
- } else if (isResponseContentPartDoneChunk(value)) {
2988
- } else if (isResponseFileSearchCallInProgressChunk(value)) {
2989
- } else if (isResponseFileSearchCallSearchingChunk(value)) {
2990
- } else if (isResponseFileSearchCallCompletedChunk(value)) {
2991
- } else if (isResponseFileSearchCallQueryAddedChunk(value)) {
2992
- } else if (isResponseFileSearchCallResultAddedChunk(value)) {
2993
- } else {
2994
- console.log("\u2753 Unhandled chunk type:", value.type);
2995
- }
2996
- } catch (error) {
2997
- console.error("\u{1F4A5} FATAL ERROR in chunk processing:", {
2998
- error: error instanceof Error ? error.message : String(error),
2999
- stack: error instanceof Error ? error.stack : void 0
3000
- });
3001
- finishReason = "error";
3002
- controller.enqueue({
3003
- type: "error",
3004
- error: error instanceof Error ? error : new Error(String(error))
3005
- });
3006
- }
3007
- },
3008
- flush(controller) {
3009
- controller.enqueue({
3010
- type: "finish",
3011
- finishReason,
3012
- usage: { promptTokens, completionTokens },
3013
- ...(cachedPromptTokens != null || reasoningTokens != null) && {
3014
- providerMetadata: {
3015
- openai: {
3016
- responseId,
3017
- cachedPromptTokens,
3018
- reasoningTokens
3019
- }
3020
- }
3021
- }
3022
- });
3023
- }
3024
- })
3025
- ),
3026
- rawCall: {
3027
- rawPrompt: void 0,
3028
- rawSettings: {}
3029
- },
3030
- rawResponse: { headers: responseHeaders },
3031
- request: { body: JSON.stringify(body) },
3032
- warnings
3033
- };
3034
- }
3035
- };
3036
- var usageSchema = import_zod12.z.object({
3037
- input_tokens: import_zod12.z.number(),
3038
- input_tokens_details: import_zod12.z.object({ cached_tokens: import_zod12.z.number().nullish() }).nullish(),
3039
- output_tokens: import_zod12.z.number(),
3040
- output_tokens_details: import_zod12.z.object({ reasoning_tokens: import_zod12.z.number().nullish() }).nullish()
3041
- });
3042
- var textDeltaChunkSchema = import_zod12.z.object({
3043
- type: import_zod12.z.literal("response.output_text.delta"),
3044
- delta: import_zod12.z.string()
3045
- });
3046
- var responseFinishedChunkSchema = import_zod12.z.object({
3047
- type: import_zod12.z.enum(["response.completed", "response.incomplete"]),
3048
- response: import_zod12.z.object({
3049
- incomplete_details: import_zod12.z.object({ reason: import_zod12.z.string() }).nullish(),
3050
- usage: usageSchema
3051
- })
3052
- });
3053
- var responseCreatedChunkSchema = import_zod12.z.object({
3054
- type: import_zod12.z.literal("response.created"),
3055
- response: import_zod12.z.object({
3056
- id: import_zod12.z.string(),
3057
- created_at: import_zod12.z.number(),
3058
- model: import_zod12.z.string()
3059
- })
3060
- });
3061
- var responseOutputItemDoneSchema = import_zod12.z.object({
3062
- type: import_zod12.z.literal("response.output_item.done"),
3063
- output_index: import_zod12.z.number(),
3064
- item: import_zod12.z.discriminatedUnion("type", [
3065
- import_zod12.z.object({
3066
- type: import_zod12.z.literal("message")
3067
- }),
3068
- import_zod12.z.object({
3069
- type: import_zod12.z.literal("function_call"),
3070
- id: import_zod12.z.string(),
3071
- call_id: import_zod12.z.string(),
3072
- name: import_zod12.z.string(),
3073
- arguments: import_zod12.z.string(),
3074
- status: import_zod12.z.literal("completed")
3075
- }),
3076
- import_zod12.z.object({
3077
- type: import_zod12.z.literal("web_search_call"),
3078
- id: import_zod12.z.string(),
3079
- status: import_zod12.z.string(),
3080
- action: import_zod12.z.discriminatedUnion("type", [
3081
- import_zod12.z.object({
3082
- type: import_zod12.z.literal("search"),
3083
- query: import_zod12.z.string().nullish()
3084
- }),
3085
- import_zod12.z.object({
3086
- type: import_zod12.z.literal("open_page"),
3087
- url: import_zod12.z.string()
3088
- }),
3089
- import_zod12.z.object({
3090
- type: import_zod12.z.literal("find"),
3091
- url: import_zod12.z.string(),
3092
- pattern: import_zod12.z.string()
3093
- })
3094
- ]).nullish()
3095
- }),
3096
- import_zod12.z.object({
3097
- type: import_zod12.z.literal("code_interpreter_call")
3098
- }),
3099
- import_zod12.z.object({
3100
- type: import_zod12.z.literal("computer_call"),
3101
- id: import_zod12.z.string(),
3102
- status: import_zod12.z.literal("completed")
3103
- }),
3104
- import_zod12.z.object({
3105
- type: import_zod12.z.literal("file_search_call"),
3106
- id: import_zod12.z.string(),
3107
- status: import_zod12.z.literal("completed"),
3108
- queries: import_zod12.z.array(import_zod12.z.string()).nullish(),
3109
- results: import_zod12.z.array(
3110
- import_zod12.z.object({
3111
- attributes: import_zod12.z.object({
3112
- file_id: import_zod12.z.string(),
3113
- filename: import_zod12.z.string(),
3114
- score: import_zod12.z.number(),
3115
- text: import_zod12.z.string()
3116
- })
3117
- })
3118
- ).nullish()
3119
- })
3120
- ])
3121
- });
3122
- var responseFunctionCallArgumentsDeltaSchema = import_zod12.z.object({
3123
- type: import_zod12.z.literal("response.function_call_arguments.delta"),
3124
- item_id: import_zod12.z.string(),
3125
- output_index: import_zod12.z.number(),
3126
- delta: import_zod12.z.string()
3127
- });
3128
- var responseOutputItemAddedSchema = import_zod12.z.object({
3129
- type: import_zod12.z.literal("response.output_item.added"),
3130
- output_index: import_zod12.z.number(),
3131
- item: import_zod12.z.discriminatedUnion("type", [
3132
- import_zod12.z.object({
3133
- type: import_zod12.z.literal("message")
3134
- }),
3135
- import_zod12.z.object({
3136
- type: import_zod12.z.literal("function_call"),
3137
- id: import_zod12.z.string(),
3138
- call_id: import_zod12.z.string(),
3139
- name: import_zod12.z.string(),
3140
- arguments: import_zod12.z.string()
3141
- }),
3142
- import_zod12.z.object({
3143
- type: import_zod12.z.literal("web_search_call"),
3144
- id: import_zod12.z.string(),
3145
- status: import_zod12.z.string(),
3146
- action: import_zod12.z.object({
3147
- type: import_zod12.z.literal("search"),
3148
- query: import_zod12.z.string().optional()
3149
- }).nullish()
3150
- }),
3151
- import_zod12.z.object({
3152
- type: import_zod12.z.literal("computer_call"),
3153
- id: import_zod12.z.string(),
3154
- status: import_zod12.z.string()
3155
- }),
3156
- import_zod12.z.object({
3157
- type: import_zod12.z.literal("file_search_call"),
3158
- id: import_zod12.z.string(),
3159
- status: import_zod12.z.string(),
3160
- queries: import_zod12.z.array(import_zod12.z.string()).nullish(),
3161
- results: import_zod12.z.array(
3162
- import_zod12.z.object({
3163
- attributes: import_zod12.z.object({
3164
- file_id: import_zod12.z.string(),
3165
- filename: import_zod12.z.string(),
3166
- score: import_zod12.z.number(),
3167
- text: import_zod12.z.string()
3168
- })
3169
- })
3170
- ).optional()
3171
- })
3172
- ])
3173
- });
3174
- var responseAnnotationAddedSchema = import_zod12.z.object({
3175
- type: import_zod12.z.literal("response.output_text.annotation.added"),
3176
- annotation: import_zod12.z.discriminatedUnion("type", [
3177
- import_zod12.z.object({
3178
- type: import_zod12.z.literal("url_citation"),
3179
- url: import_zod12.z.string(),
3180
- title: import_zod12.z.string()
3181
- }),
3182
- import_zod12.z.object({
3183
- type: import_zod12.z.literal("file_citation"),
3184
- file_id: import_zod12.z.string(),
3185
- filename: import_zod12.z.string().nullish(),
3186
- index: import_zod12.z.number().nullish(),
3187
- start_index: import_zod12.z.number().nullish(),
3188
- end_index: import_zod12.z.number().nullish(),
3189
- quote: import_zod12.z.string().nullish()
3190
- })
3191
- ])
3192
- });
3193
- var responseReasoningSummaryTextDeltaSchema = import_zod12.z.object({
3194
- type: import_zod12.z.literal("response.reasoning_summary_text.delta"),
3195
- item_id: import_zod12.z.string(),
3196
- output_index: import_zod12.z.number(),
3197
- summary_index: import_zod12.z.number(),
3198
- delta: import_zod12.z.string()
3199
- });
3200
- var responseInProgressChunkSchema = import_zod12.z.object({
3201
- type: import_zod12.z.literal("response.in_progress"),
3202
- sequence_number: import_zod12.z.number(),
3203
- response: import_zod12.z.any()
3204
- });
3205
- var responseContentPartAddedChunkSchema = import_zod12.z.object({
3206
- type: import_zod12.z.literal("response.content_part.added"),
3207
- sequence_number: import_zod12.z.number(),
3208
- item_id: import_zod12.z.string(),
3209
- output_index: import_zod12.z.number(),
3210
- content_index: import_zod12.z.number(),
3211
- part: import_zod12.z.any()
3212
- });
3213
- var responseOutputTextDoneChunkSchema = import_zod12.z.object({
3214
- type: import_zod12.z.literal("response.output_text.done"),
3215
- sequence_number: import_zod12.z.number(),
3216
- item_id: import_zod12.z.string(),
3217
- output_index: import_zod12.z.number(),
3218
- content_index: import_zod12.z.number(),
3219
- text: import_zod12.z.string(),
3220
- logprobs: import_zod12.z.array(import_zod12.z.any()).optional()
3221
- });
3222
- var responseContentPartDoneChunkSchema = import_zod12.z.object({
3223
- type: import_zod12.z.literal("response.content_part.done"),
3224
- sequence_number: import_zod12.z.number(),
3225
- item_id: import_zod12.z.string(),
3226
- output_index: import_zod12.z.number(),
3227
- content_index: import_zod12.z.number(),
3228
- part: import_zod12.z.any()
3229
- });
3230
- var responseFileSearchCallInProgressChunkSchema = import_zod12.z.object({
3231
- type: import_zod12.z.literal("response.file_search_call.in_progress"),
3232
- sequence_number: import_zod12.z.number(),
3233
- output_index: import_zod12.z.number(),
3234
- item_id: import_zod12.z.string()
3235
- });
3236
- var responseFileSearchCallSearchingChunkSchema = import_zod12.z.object({
3237
- type: import_zod12.z.literal("response.file_search_call.searching"),
3238
- sequence_number: import_zod12.z.number(),
3239
- output_index: import_zod12.z.number(),
3240
- item_id: import_zod12.z.string()
3241
- });
3242
- var responseFileSearchCallCompletedChunkSchema = import_zod12.z.object({
3243
- type: import_zod12.z.literal("response.file_search_call.completed"),
3244
- sequence_number: import_zod12.z.number(),
3245
- output_index: import_zod12.z.number(),
3246
- item_id: import_zod12.z.string()
3247
- });
3248
- var responseFileSearchCallQueryAddedChunkSchema = import_zod12.z.object({
3249
- type: import_zod12.z.literal("response.file_search_call.query.added"),
3250
- sequence_number: import_zod12.z.number(),
3251
- output_index: import_zod12.z.number(),
3252
- item_id: import_zod12.z.string(),
3253
- query: import_zod12.z.string()
3254
- });
3255
- var responseFileSearchCallResultAddedChunkSchema = import_zod12.z.object({
3256
- type: import_zod12.z.literal("response.file_search_call.result.added"),
3257
- sequence_number: import_zod12.z.number(),
3258
- output_index: import_zod12.z.number(),
3259
- item_id: import_zod12.z.string(),
3260
- result: import_zod12.z.any()
3261
- });
3262
- var openaiResponsesChunkSchema = import_zod12.z.union([
3263
- textDeltaChunkSchema,
3264
- responseFinishedChunkSchema,
3265
- responseCreatedChunkSchema,
3266
- responseOutputItemDoneSchema,
3267
- responseFunctionCallArgumentsDeltaSchema,
3268
- responseOutputItemAddedSchema,
3269
- responseAnnotationAddedSchema,
3270
- responseReasoningSummaryTextDeltaSchema,
3271
- responseInProgressChunkSchema,
3272
- responseContentPartAddedChunkSchema,
3273
- responseOutputTextDoneChunkSchema,
3274
- responseContentPartDoneChunkSchema,
3275
- responseFileSearchCallInProgressChunkSchema,
3276
- responseFileSearchCallSearchingChunkSchema,
3277
- responseFileSearchCallCompletedChunkSchema,
3278
- responseFileSearchCallQueryAddedChunkSchema,
3279
- responseFileSearchCallResultAddedChunkSchema,
3280
- import_zod12.z.object({ type: import_zod12.z.string() }).passthrough()
3281
- // fallback for unknown chunks
3282
- ]);
3283
- function isTextDeltaChunk(chunk) {
3284
- return chunk.type === "response.output_text.delta";
3285
- }
3286
- function isResponseOutputItemDoneChunk(chunk) {
3287
- return chunk.type === "response.output_item.done";
3288
- }
3289
- function isResponseFinishedChunk(chunk) {
3290
- return chunk.type === "response.completed" || chunk.type === "response.incomplete";
3291
- }
3292
- function isResponseCreatedChunk(chunk) {
3293
- return chunk.type === "response.created";
3294
- }
3295
- function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
3296
- return chunk.type === "response.function_call_arguments.delta";
3297
- }
3298
- function isResponseOutputItemAddedChunk(chunk) {
3299
- return chunk.type === "response.output_item.added";
3300
- }
3301
- function isResponseAnnotationAddedChunk(chunk) {
3302
- return chunk.type === "response.output_text.annotation.added";
3303
- }
3304
- function isResponseReasoningSummaryTextDeltaChunk(chunk) {
3305
- return chunk.type === "response.reasoning_summary_text.delta";
3306
- }
3307
- function isResponseInProgressChunk(chunk) {
3308
- return chunk.type === "response.in_progress";
3309
- }
3310
- function isResponseContentPartAddedChunk(chunk) {
3311
- return chunk.type === "response.content_part.added";
3312
- }
3313
- function isResponseOutputTextDoneChunk(chunk) {
3314
- return chunk.type === "response.output_text.done";
3315
- }
3316
- function isResponseContentPartDoneChunk(chunk) {
3317
- return chunk.type === "response.content_part.done";
3318
- }
3319
- function isResponseFileSearchCallInProgressChunk(chunk) {
3320
- return chunk.type === "response.file_search_call.in_progress";
3321
- }
3322
- function isResponseFileSearchCallSearchingChunk(chunk) {
3323
- return chunk.type === "response.file_search_call.searching";
3324
- }
3325
- function isResponseFileSearchCallCompletedChunk(chunk) {
3326
- return chunk.type === "response.file_search_call.completed";
3327
- }
3328
- function isResponseFileSearchCallQueryAddedChunk(chunk) {
3329
- return chunk.type === "response.file_search_call.query.added";
3330
- }
3331
- function isResponseFileSearchCallResultAddedChunk(chunk) {
3332
- return chunk.type === "response.file_search_call.result.added";
3333
- }
3334
- function getResponsesModelConfig(modelId) {
3335
- if (modelId.startsWith("o")) {
3336
- if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
3337
- return {
3338
- isReasoningModel: true,
3339
- systemMessageMode: "remove",
3340
- requiredAutoTruncation: false
3341
- };
3342
- }
3343
- return {
3344
- isReasoningModel: true,
3345
- systemMessageMode: "developer",
3346
- requiredAutoTruncation: false
3347
- };
3348
- }
3349
- return {
3350
- isReasoningModel: false,
3351
- systemMessageMode: "system",
3352
- requiredAutoTruncation: false
3353
- };
3354
- }
3355
- var openaiResponsesProviderOptionsSchema = import_zod12.z.object({
3356
- metadata: import_zod12.z.any().nullish(),
3357
- parallelToolCalls: import_zod12.z.boolean().nullish(),
3358
- include: import_zod12.z.array(
3359
- import_zod12.z.enum([
3360
- "file_search_call.results",
3361
- "code_interpreter_call.output",
3362
- "web_search_call.results",
3363
- "message.input_image.image_url",
3364
- "computer_call_output.output.image_url",
3365
- "reasoning.encrypted_content",
3366
- "message.output_text.logprobs"
3367
- ])
3368
- ).nullish(),
3369
- previousResponseId: import_zod12.z.string().nullish(),
3370
- forceNoTemperature: import_zod12.z.boolean().nullish(),
3371
- store: import_zod12.z.boolean().nullish(),
3372
- user: import_zod12.z.string().nullish(),
3373
- reasoningEffort: import_zod12.z.string().nullish(),
3374
- strictSchemas: import_zod12.z.boolean().nullish(),
3375
- instructions: import_zod12.z.string().nullish(),
3376
- reasoningSummary: import_zod12.z.string().nullish()
3377
- });
3378
- // Annotate the CommonJS export names for ESM import in node:
3379
- 0 && (module.exports = {
3380
- OpenAIChatLanguageModel,
3381
- OpenAICompletionLanguageModel,
3382
- OpenAIEmbeddingModel,
3383
- OpenAIImageModel,
3384
- OpenAIResponsesLanguageModel,
3385
- OpenAISpeechModel,
3386
- OpenAITranscriptionModel,
3387
- hasDefaultResponseFormat,
3388
- modelMaxImagesPerCall
3389
- });
3390
- //# sourceMappingURL=index.js.map