@zenning/openai 1.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,2789 @@
1
+ // src/openai-chat-language-model.ts
2
+ import {
3
+ InvalidResponseDataError,
4
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError3
5
+ } from "@ai-sdk/provider";
6
+ import {
7
+ combineHeaders,
8
+ createEventSourceResponseHandler,
9
+ createJsonResponseHandler,
10
+ generateId,
11
+ isParsableJson,
12
+ postJsonToApi
13
+ } from "@ai-sdk/provider-utils";
14
+ import { z as z2 } from "zod";
15
+
16
+ // src/convert-to-openai-chat-messages.ts
17
+ import {
18
+ UnsupportedFunctionalityError
19
+ } from "@ai-sdk/provider";
20
+ import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils";
21
+ function convertToOpenAIChatMessages({
22
+ prompt,
23
+ useLegacyFunctionCalling = false,
24
+ systemMessageMode = "system"
25
+ }) {
26
+ const messages = [];
27
+ const warnings = [];
28
+ for (const { role, content } of prompt) {
29
+ switch (role) {
30
+ case "system": {
31
+ switch (systemMessageMode) {
32
+ case "system": {
33
+ messages.push({ role: "system", content });
34
+ break;
35
+ }
36
+ case "developer": {
37
+ messages.push({ role: "developer", content });
38
+ break;
39
+ }
40
+ case "remove": {
41
+ warnings.push({
42
+ type: "other",
43
+ message: "system messages are removed for this model"
44
+ });
45
+ break;
46
+ }
47
+ default: {
48
+ const _exhaustiveCheck = systemMessageMode;
49
+ throw new Error(
50
+ `Unsupported system message mode: ${_exhaustiveCheck}`
51
+ );
52
+ }
53
+ }
54
+ break;
55
+ }
56
+ case "user": {
57
+ if (content.length === 1 && content[0].type === "text") {
58
+ messages.push({ role: "user", content: content[0].text });
59
+ break;
60
+ }
61
+ messages.push({
62
+ role: "user",
63
+ content: content.map((part, index) => {
64
+ var _a, _b, _c, _d;
65
+ switch (part.type) {
66
+ case "text": {
67
+ return { type: "text", text: part.text };
68
+ }
69
+ case "image": {
70
+ return {
71
+ type: "image_url",
72
+ image_url: {
73
+ url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase64(part.image)}`,
74
+ // OpenAI specific extension: image detail
75
+ detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
76
+ }
77
+ };
78
+ }
79
+ case "file": {
80
+ if (part.data instanceof URL) {
81
+ throw new UnsupportedFunctionalityError({
82
+ functionality: "'File content parts with URL data' functionality not supported."
83
+ });
84
+ }
85
+ switch (part.mimeType) {
86
+ case "audio/wav": {
87
+ return {
88
+ type: "input_audio",
89
+ input_audio: { data: part.data, format: "wav" }
90
+ };
91
+ }
92
+ case "audio/mp3":
93
+ case "audio/mpeg": {
94
+ return {
95
+ type: "input_audio",
96
+ input_audio: { data: part.data, format: "mp3" }
97
+ };
98
+ }
99
+ case "application/pdf": {
100
+ return {
101
+ type: "file",
102
+ file: {
103
+ filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
104
+ file_data: `data:application/pdf;base64,${part.data}`
105
+ }
106
+ };
107
+ }
108
+ default: {
109
+ throw new UnsupportedFunctionalityError({
110
+ functionality: `File content part type ${part.mimeType} in user messages`
111
+ });
112
+ }
113
+ }
114
+ }
115
+ }
116
+ })
117
+ });
118
+ break;
119
+ }
120
+ case "assistant": {
121
+ let text = "";
122
+ const toolCalls = [];
123
+ for (const part of content) {
124
+ switch (part.type) {
125
+ case "text": {
126
+ text += part.text;
127
+ break;
128
+ }
129
+ case "tool-call": {
130
+ toolCalls.push({
131
+ id: part.toolCallId,
132
+ type: "function",
133
+ function: {
134
+ name: part.toolName,
135
+ arguments: JSON.stringify(part.args)
136
+ }
137
+ });
138
+ break;
139
+ }
140
+ }
141
+ }
142
+ if (useLegacyFunctionCalling) {
143
+ if (toolCalls.length > 1) {
144
+ throw new UnsupportedFunctionalityError({
145
+ functionality: "useLegacyFunctionCalling with multiple tool calls in one message"
146
+ });
147
+ }
148
+ messages.push({
149
+ role: "assistant",
150
+ content: text,
151
+ function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
152
+ });
153
+ } else {
154
+ messages.push({
155
+ role: "assistant",
156
+ content: text,
157
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
158
+ });
159
+ }
160
+ break;
161
+ }
162
+ case "tool": {
163
+ for (const toolResponse of content) {
164
+ if (useLegacyFunctionCalling) {
165
+ messages.push({
166
+ role: "function",
167
+ name: toolResponse.toolName,
168
+ content: JSON.stringify(toolResponse.result)
169
+ });
170
+ } else {
171
+ messages.push({
172
+ role: "tool",
173
+ tool_call_id: toolResponse.toolCallId,
174
+ content: JSON.stringify(toolResponse.result)
175
+ });
176
+ }
177
+ }
178
+ break;
179
+ }
180
+ default: {
181
+ const _exhaustiveCheck = role;
182
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
183
+ }
184
+ }
185
+ }
186
+ return { messages, warnings };
187
+ }
188
+
189
+ // src/map-openai-chat-logprobs.ts
190
+ function mapOpenAIChatLogProbsOutput(logprobs) {
191
+ var _a, _b;
192
+ return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
193
+ token,
194
+ logprob,
195
+ topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
196
+ token: token2,
197
+ logprob: logprob2
198
+ })) : []
199
+ }))) != null ? _b : void 0;
200
+ }
201
+
202
+ // src/map-openai-finish-reason.ts
203
+ function mapOpenAIFinishReason(finishReason) {
204
+ switch (finishReason) {
205
+ case "stop":
206
+ return "stop";
207
+ case "length":
208
+ return "length";
209
+ case "content_filter":
210
+ return "content-filter";
211
+ case "function_call":
212
+ case "tool_calls":
213
+ return "tool-calls";
214
+ default:
215
+ return "unknown";
216
+ }
217
+ }
218
+
219
+ // src/openai-error.ts
220
+ import { z } from "zod";
221
+ import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
222
+ var openaiErrorDataSchema = z.object({
223
+ error: z.object({
224
+ message: z.string(),
225
+ // The additional information below is handled loosely to support
226
+ // OpenAI-compatible providers that have slightly different error
227
+ // responses:
228
+ type: z.string().nullish(),
229
+ param: z.any().nullish(),
230
+ code: z.union([z.string(), z.number()]).nullish()
231
+ })
232
+ });
233
+ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
234
+ errorSchema: openaiErrorDataSchema,
235
+ errorToMessage: (data) => data.error.message
236
+ });
237
+
238
+ // src/get-response-metadata.ts
239
+ function getResponseMetadata({
240
+ id,
241
+ model,
242
+ created
243
+ }) {
244
+ return {
245
+ id: id != null ? id : void 0,
246
+ modelId: model != null ? model : void 0,
247
+ timestamp: created != null ? new Date(created * 1e3) : void 0
248
+ };
249
+ }
250
+
251
+ // src/openai-prepare-tools.ts
252
+ import {
253
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError2
254
+ } from "@ai-sdk/provider";
255
+ function prepareTools({
256
+ mode,
257
+ useLegacyFunctionCalling = false,
258
+ structuredOutputs
259
+ }) {
260
+ var _a;
261
+ const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
262
+ const toolWarnings = [];
263
+ if (tools == null) {
264
+ return { tools: void 0, tool_choice: void 0, toolWarnings };
265
+ }
266
+ const toolChoice = mode.toolChoice;
267
+ if (useLegacyFunctionCalling) {
268
+ const openaiFunctions = [];
269
+ for (const tool of tools) {
270
+ if (tool.type === "provider-defined") {
271
+ toolWarnings.push({ type: "unsupported-tool", tool });
272
+ } else {
273
+ openaiFunctions.push({
274
+ name: tool.name,
275
+ description: tool.description,
276
+ parameters: tool.parameters
277
+ });
278
+ }
279
+ }
280
+ if (toolChoice == null) {
281
+ return {
282
+ functions: openaiFunctions,
283
+ function_call: void 0,
284
+ toolWarnings
285
+ };
286
+ }
287
+ const type2 = toolChoice.type;
288
+ switch (type2) {
289
+ case "auto":
290
+ case "none":
291
+ case void 0:
292
+ return {
293
+ functions: openaiFunctions,
294
+ function_call: void 0,
295
+ toolWarnings
296
+ };
297
+ case "required":
298
+ throw new UnsupportedFunctionalityError2({
299
+ functionality: "useLegacyFunctionCalling and toolChoice: required"
300
+ });
301
+ default:
302
+ return {
303
+ functions: openaiFunctions,
304
+ function_call: { name: toolChoice.toolName },
305
+ toolWarnings
306
+ };
307
+ }
308
+ }
309
+ const openaiTools = [];
310
+ for (const tool of tools) {
311
+ if (tool.type === "provider-defined") {
312
+ toolWarnings.push({ type: "unsupported-tool", tool });
313
+ } else {
314
+ openaiTools.push({
315
+ type: "function",
316
+ function: {
317
+ name: tool.name,
318
+ description: tool.description,
319
+ parameters: tool.parameters,
320
+ strict: structuredOutputs ? true : void 0
321
+ }
322
+ });
323
+ }
324
+ }
325
+ if (toolChoice == null) {
326
+ return { tools: openaiTools, tool_choice: void 0, toolWarnings };
327
+ }
328
+ const type = toolChoice.type;
329
+ switch (type) {
330
+ case "auto":
331
+ case "none":
332
+ case "required":
333
+ return { tools: openaiTools, tool_choice: type, toolWarnings };
334
+ case "tool":
335
+ return {
336
+ tools: openaiTools,
337
+ tool_choice: {
338
+ type: "function",
339
+ function: {
340
+ name: toolChoice.toolName
341
+ }
342
+ },
343
+ toolWarnings
344
+ };
345
+ default: {
346
+ const _exhaustiveCheck = type;
347
+ throw new UnsupportedFunctionalityError2({
348
+ functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
349
+ });
350
+ }
351
+ }
352
+ }
353
+
354
+ // src/openai-chat-language-model.ts
355
+ var OpenAIChatLanguageModel = class {
356
+ constructor(modelId, settings, config) {
357
+ this.specificationVersion = "v1";
358
+ this.modelId = modelId;
359
+ this.settings = settings;
360
+ this.config = config;
361
+ }
362
+ get supportsStructuredOutputs() {
363
+ var _a;
364
+ return (_a = this.settings.structuredOutputs) != null ? _a : isReasoningModel(this.modelId);
365
+ }
366
+ get defaultObjectGenerationMode() {
367
+ if (isAudioModel(this.modelId)) {
368
+ return "tool";
369
+ }
370
+ return this.supportsStructuredOutputs ? "json" : "tool";
371
+ }
372
+ get provider() {
373
+ return this.config.provider;
374
+ }
375
+ get supportsImageUrls() {
376
+ return !this.settings.downloadImages;
377
+ }
378
+ getArgs({
379
+ mode,
380
+ prompt,
381
+ maxTokens,
382
+ temperature,
383
+ topP,
384
+ topK,
385
+ frequencyPenalty,
386
+ presencePenalty,
387
+ stopSequences,
388
+ responseFormat,
389
+ seed,
390
+ providerMetadata
391
+ }) {
392
+ var _a, _b, _c, _d, _e, _f, _g, _h;
393
+ const type = mode.type;
394
+ const warnings = [];
395
+ if (topK != null) {
396
+ warnings.push({
397
+ type: "unsupported-setting",
398
+ setting: "topK"
399
+ });
400
+ }
401
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
402
+ warnings.push({
403
+ type: "unsupported-setting",
404
+ setting: "responseFormat",
405
+ details: "JSON response format schema is only supported with structuredOutputs"
406
+ });
407
+ }
408
+ const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
409
+ if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
410
+ throw new UnsupportedFunctionalityError3({
411
+ functionality: "useLegacyFunctionCalling with parallelToolCalls"
412
+ });
413
+ }
414
+ if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
415
+ throw new UnsupportedFunctionalityError3({
416
+ functionality: "structuredOutputs with useLegacyFunctionCalling"
417
+ });
418
+ }
419
+ const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
420
+ {
421
+ prompt,
422
+ useLegacyFunctionCalling,
423
+ systemMessageMode: getSystemMessageMode(this.modelId)
424
+ }
425
+ );
426
+ warnings.push(...messageWarnings);
427
+ const baseArgs = {
428
+ // model id:
429
+ model: this.modelId,
430
+ // model specific settings:
431
+ logit_bias: this.settings.logitBias,
432
+ logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
433
+ top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
434
+ user: this.settings.user,
435
+ parallel_tool_calls: this.settings.parallelToolCalls,
436
+ // standardized settings:
437
+ max_tokens: maxTokens,
438
+ temperature,
439
+ top_p: topP,
440
+ frequency_penalty: frequencyPenalty,
441
+ presence_penalty: presencePenalty,
442
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs && responseFormat.schema != null ? {
443
+ type: "json_schema",
444
+ json_schema: {
445
+ schema: responseFormat.schema,
446
+ strict: true,
447
+ name: (_a = responseFormat.name) != null ? _a : "response",
448
+ description: responseFormat.description
449
+ }
450
+ } : { type: "json_object" } : void 0,
451
+ stop: stopSequences,
452
+ seed,
453
+ // openai specific settings:
454
+ // TODO remove in next major version; we auto-map maxTokens now
455
+ max_completion_tokens: (_b = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _b.maxCompletionTokens,
456
+ store: (_c = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _c.store,
457
+ metadata: (_d = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _d.metadata,
458
+ prediction: (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.prediction,
459
+ reasoning_effort: (_g = (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
460
+ // messages:
461
+ messages
462
+ };
463
+ if (isReasoningModel(this.modelId)) {
464
+ if (baseArgs.temperature != null) {
465
+ baseArgs.temperature = void 0;
466
+ warnings.push({
467
+ type: "unsupported-setting",
468
+ setting: "temperature",
469
+ details: "temperature is not supported for reasoning models"
470
+ });
471
+ }
472
+ if (baseArgs.top_p != null) {
473
+ baseArgs.top_p = void 0;
474
+ warnings.push({
475
+ type: "unsupported-setting",
476
+ setting: "topP",
477
+ details: "topP is not supported for reasoning models"
478
+ });
479
+ }
480
+ if (baseArgs.frequency_penalty != null) {
481
+ baseArgs.frequency_penalty = void 0;
482
+ warnings.push({
483
+ type: "unsupported-setting",
484
+ setting: "frequencyPenalty",
485
+ details: "frequencyPenalty is not supported for reasoning models"
486
+ });
487
+ }
488
+ if (baseArgs.presence_penalty != null) {
489
+ baseArgs.presence_penalty = void 0;
490
+ warnings.push({
491
+ type: "unsupported-setting",
492
+ setting: "presencePenalty",
493
+ details: "presencePenalty is not supported for reasoning models"
494
+ });
495
+ }
496
+ if (baseArgs.logit_bias != null) {
497
+ baseArgs.logit_bias = void 0;
498
+ warnings.push({
499
+ type: "other",
500
+ message: "logitBias is not supported for reasoning models"
501
+ });
502
+ }
503
+ if (baseArgs.logprobs != null) {
504
+ baseArgs.logprobs = void 0;
505
+ warnings.push({
506
+ type: "other",
507
+ message: "logprobs is not supported for reasoning models"
508
+ });
509
+ }
510
+ if (baseArgs.top_logprobs != null) {
511
+ baseArgs.top_logprobs = void 0;
512
+ warnings.push({
513
+ type: "other",
514
+ message: "topLogprobs is not supported for reasoning models"
515
+ });
516
+ }
517
+ if (baseArgs.max_tokens != null) {
518
+ if (baseArgs.max_completion_tokens == null) {
519
+ baseArgs.max_completion_tokens = baseArgs.max_tokens;
520
+ }
521
+ baseArgs.max_tokens = void 0;
522
+ }
523
+ } else if (this.modelId.startsWith("gpt-4o-search-preview") || this.modelId.startsWith("gpt-4o-mini-search-preview")) {
524
+ if (baseArgs.temperature != null) {
525
+ baseArgs.temperature = void 0;
526
+ warnings.push({
527
+ type: "unsupported-setting",
528
+ setting: "temperature",
529
+ details: "temperature is not supported for the search preview models and has been removed."
530
+ });
531
+ }
532
+ }
533
+ switch (type) {
534
+ case "regular": {
535
+ const { tools, tool_choice, functions, function_call, toolWarnings } = prepareTools({
536
+ mode,
537
+ useLegacyFunctionCalling,
538
+ structuredOutputs: this.supportsStructuredOutputs
539
+ });
540
+ return {
541
+ args: {
542
+ ...baseArgs,
543
+ tools,
544
+ tool_choice,
545
+ functions,
546
+ function_call
547
+ },
548
+ warnings: [...warnings, ...toolWarnings]
549
+ };
550
+ }
551
+ case "object-json": {
552
+ return {
553
+ args: {
554
+ ...baseArgs,
555
+ response_format: this.supportsStructuredOutputs && mode.schema != null ? {
556
+ type: "json_schema",
557
+ json_schema: {
558
+ schema: mode.schema,
559
+ strict: true,
560
+ name: (_h = mode.name) != null ? _h : "response",
561
+ description: mode.description
562
+ }
563
+ } : { type: "json_object" }
564
+ },
565
+ warnings
566
+ };
567
+ }
568
+ case "object-tool": {
569
+ return {
570
+ args: useLegacyFunctionCalling ? {
571
+ ...baseArgs,
572
+ function_call: {
573
+ name: mode.tool.name
574
+ },
575
+ functions: [
576
+ {
577
+ name: mode.tool.name,
578
+ description: mode.tool.description,
579
+ parameters: mode.tool.parameters
580
+ }
581
+ ]
582
+ } : {
583
+ ...baseArgs,
584
+ tool_choice: {
585
+ type: "function",
586
+ function: { name: mode.tool.name }
587
+ },
588
+ tools: [
589
+ {
590
+ type: "function",
591
+ function: {
592
+ name: mode.tool.name,
593
+ description: mode.tool.description,
594
+ parameters: mode.tool.parameters,
595
+ strict: this.supportsStructuredOutputs ? true : void 0
596
+ }
597
+ }
598
+ ]
599
+ },
600
+ warnings
601
+ };
602
+ }
603
+ default: {
604
+ const _exhaustiveCheck = type;
605
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
606
+ }
607
+ }
608
+ }
609
+ async doGenerate(options) {
610
+ var _a, _b, _c, _d, _e, _f, _g, _h;
611
+ const { args: body, warnings } = this.getArgs(options);
612
+ const {
613
+ responseHeaders,
614
+ value: response,
615
+ rawValue: rawResponse
616
+ } = await postJsonToApi({
617
+ url: this.config.url({
618
+ path: "/chat/completions",
619
+ modelId: this.modelId
620
+ }),
621
+ headers: combineHeaders(this.config.headers(), options.headers),
622
+ body,
623
+ failedResponseHandler: openaiFailedResponseHandler,
624
+ successfulResponseHandler: createJsonResponseHandler(
625
+ openaiChatResponseSchema
626
+ ),
627
+ abortSignal: options.abortSignal,
628
+ fetch: this.config.fetch
629
+ });
630
+ const { messages: rawPrompt, ...rawSettings } = body;
631
+ const choice = response.choices[0];
632
+ const completionTokenDetails = (_a = response.usage) == null ? void 0 : _a.completion_tokens_details;
633
+ const promptTokenDetails = (_b = response.usage) == null ? void 0 : _b.prompt_tokens_details;
634
+ const providerMetadata = { openai: {} };
635
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
636
+ providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
637
+ }
638
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
639
+ providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
640
+ }
641
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
642
+ providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
643
+ }
644
+ if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
645
+ providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
646
+ }
647
+ return {
648
+ text: (_c = choice.message.content) != null ? _c : void 0,
649
+ toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
650
+ {
651
+ toolCallType: "function",
652
+ toolCallId: generateId(),
653
+ toolName: choice.message.function_call.name,
654
+ args: choice.message.function_call.arguments
655
+ }
656
+ ] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
657
+ var _a2;
658
+ return {
659
+ toolCallType: "function",
660
+ toolCallId: (_a2 = toolCall.id) != null ? _a2 : generateId(),
661
+ toolName: toolCall.function.name,
662
+ args: toolCall.function.arguments
663
+ };
664
+ }),
665
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
666
+ usage: {
667
+ promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
668
+ completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
669
+ },
670
+ rawCall: { rawPrompt, rawSettings },
671
+ rawResponse: { headers: responseHeaders, body: rawResponse },
672
+ request: { body: JSON.stringify(body) },
673
+ response: getResponseMetadata(response),
674
+ warnings,
675
+ logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
676
+ providerMetadata
677
+ };
678
+ }
679
+ async doStream(options) {
680
+ if (this.settings.simulateStreaming) {
681
+ const result = await this.doGenerate(options);
682
+ const simulatedStream = new ReadableStream({
683
+ start(controller) {
684
+ controller.enqueue({ type: "response-metadata", ...result.response });
685
+ if (result.text) {
686
+ controller.enqueue({
687
+ type: "text-delta",
688
+ textDelta: result.text
689
+ });
690
+ }
691
+ if (result.toolCalls) {
692
+ for (const toolCall of result.toolCalls) {
693
+ controller.enqueue({
694
+ type: "tool-call-delta",
695
+ toolCallType: "function",
696
+ toolCallId: toolCall.toolCallId,
697
+ toolName: toolCall.toolName,
698
+ argsTextDelta: toolCall.args
699
+ });
700
+ controller.enqueue({
701
+ type: "tool-call",
702
+ ...toolCall
703
+ });
704
+ }
705
+ }
706
+ controller.enqueue({
707
+ type: "finish",
708
+ finishReason: result.finishReason,
709
+ usage: result.usage,
710
+ logprobs: result.logprobs,
711
+ providerMetadata: result.providerMetadata
712
+ });
713
+ controller.close();
714
+ }
715
+ });
716
+ return {
717
+ stream: simulatedStream,
718
+ rawCall: result.rawCall,
719
+ rawResponse: result.rawResponse,
720
+ warnings: result.warnings
721
+ };
722
+ }
723
+ const { args, warnings } = this.getArgs(options);
724
+ const body = {
725
+ ...args,
726
+ stream: true,
727
+ // only include stream_options when in strict compatibility mode:
728
+ stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
729
+ };
730
+ const { responseHeaders, value: response } = await postJsonToApi({
731
+ url: this.config.url({
732
+ path: "/chat/completions",
733
+ modelId: this.modelId
734
+ }),
735
+ headers: combineHeaders(this.config.headers(), options.headers),
736
+ body,
737
+ failedResponseHandler: openaiFailedResponseHandler,
738
+ successfulResponseHandler: createEventSourceResponseHandler(
739
+ openaiChatChunkSchema
740
+ ),
741
+ abortSignal: options.abortSignal,
742
+ fetch: this.config.fetch
743
+ });
744
+ const { messages: rawPrompt, ...rawSettings } = args;
745
+ const toolCalls = [];
746
+ let finishReason = "unknown";
747
+ let usage = {
748
+ promptTokens: void 0,
749
+ completionTokens: void 0
750
+ };
751
+ let logprobs;
752
+ let isFirstChunk = true;
753
+ const { useLegacyFunctionCalling } = this.settings;
754
+ const providerMetadata = { openai: {} };
755
+ return {
756
+ stream: response.pipeThrough(
757
+ new TransformStream({
758
+ transform(chunk, controller) {
759
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
760
+ if (!chunk.success) {
761
+ finishReason = "error";
762
+ controller.enqueue({ type: "error", error: chunk.error });
763
+ return;
764
+ }
765
+ const value = chunk.value;
766
+ if ("error" in value) {
767
+ finishReason = "error";
768
+ controller.enqueue({ type: "error", error: value.error });
769
+ return;
770
+ }
771
+ if (isFirstChunk) {
772
+ isFirstChunk = false;
773
+ controller.enqueue({
774
+ type: "response-metadata",
775
+ ...getResponseMetadata(value)
776
+ });
777
+ }
778
+ if (value.usage != null) {
779
+ const {
780
+ prompt_tokens,
781
+ completion_tokens,
782
+ prompt_tokens_details,
783
+ completion_tokens_details
784
+ } = value.usage;
785
+ usage = {
786
+ promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
787
+ completionTokens: completion_tokens != null ? completion_tokens : void 0
788
+ };
789
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
790
+ providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
791
+ }
792
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
793
+ providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
794
+ }
795
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
796
+ providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
797
+ }
798
+ if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
799
+ providerMetadata.openai.cachedPromptTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
800
+ }
801
+ }
802
+ const choice = value.choices[0];
803
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
804
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
805
+ }
806
+ if ((choice == null ? void 0 : choice.delta) == null) {
807
+ return;
808
+ }
809
+ const delta = choice.delta;
810
+ if (delta.content != null) {
811
+ controller.enqueue({
812
+ type: "text-delta",
813
+ textDelta: delta.content
814
+ });
815
+ }
816
+ const mappedLogprobs = mapOpenAIChatLogProbsOutput(
817
+ choice == null ? void 0 : choice.logprobs
818
+ );
819
+ if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
820
+ if (logprobs === void 0) logprobs = [];
821
+ logprobs.push(...mappedLogprobs);
822
+ }
823
+ const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
824
+ {
825
+ type: "function",
826
+ id: generateId(),
827
+ function: delta.function_call,
828
+ index: 0
829
+ }
830
+ ] : delta.tool_calls;
831
+ if (mappedToolCalls != null) {
832
+ for (const toolCallDelta of mappedToolCalls) {
833
+ const index = toolCallDelta.index;
834
+ if (toolCalls[index] == null) {
835
+ if (toolCallDelta.type !== "function") {
836
+ throw new InvalidResponseDataError({
837
+ data: toolCallDelta,
838
+ message: `Expected 'function' type.`
839
+ });
840
+ }
841
+ if (toolCallDelta.id == null) {
842
+ throw new InvalidResponseDataError({
843
+ data: toolCallDelta,
844
+ message: `Expected 'id' to be a string.`
845
+ });
846
+ }
847
+ if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
848
+ throw new InvalidResponseDataError({
849
+ data: toolCallDelta,
850
+ message: `Expected 'function.name' to be a string.`
851
+ });
852
+ }
853
+ toolCalls[index] = {
854
+ id: toolCallDelta.id,
855
+ type: "function",
856
+ function: {
857
+ name: toolCallDelta.function.name,
858
+ arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
859
+ },
860
+ hasFinished: false
861
+ };
862
+ const toolCall2 = toolCalls[index];
863
+ if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
864
+ if (toolCall2.function.arguments.length > 0) {
865
+ controller.enqueue({
866
+ type: "tool-call-delta",
867
+ toolCallType: "function",
868
+ toolCallId: toolCall2.id,
869
+ toolName: toolCall2.function.name,
870
+ argsTextDelta: toolCall2.function.arguments
871
+ });
872
+ }
873
+ if (isParsableJson(toolCall2.function.arguments)) {
874
+ controller.enqueue({
875
+ type: "tool-call",
876
+ toolCallType: "function",
877
+ toolCallId: (_e = toolCall2.id) != null ? _e : generateId(),
878
+ toolName: toolCall2.function.name,
879
+ args: toolCall2.function.arguments
880
+ });
881
+ toolCall2.hasFinished = true;
882
+ }
883
+ }
884
+ continue;
885
+ }
886
+ const toolCall = toolCalls[index];
887
+ if (toolCall.hasFinished) {
888
+ continue;
889
+ }
890
+ if (((_f = toolCallDelta.function) == null ? void 0 : _f.arguments) != null) {
891
+ toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
892
+ }
893
+ controller.enqueue({
894
+ type: "tool-call-delta",
895
+ toolCallType: "function",
896
+ toolCallId: toolCall.id,
897
+ toolName: toolCall.function.name,
898
+ argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
899
+ });
900
+ if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && isParsableJson(toolCall.function.arguments)) {
901
+ controller.enqueue({
902
+ type: "tool-call",
903
+ toolCallType: "function",
904
+ toolCallId: (_l = toolCall.id) != null ? _l : generateId(),
905
+ toolName: toolCall.function.name,
906
+ args: toolCall.function.arguments
907
+ });
908
+ toolCall.hasFinished = true;
909
+ }
910
+ }
911
+ }
912
+ },
913
+ flush(controller) {
914
+ var _a, _b;
915
+ controller.enqueue({
916
+ type: "finish",
917
+ finishReason,
918
+ logprobs,
919
+ usage: {
920
+ promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
921
+ completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
922
+ },
923
+ ...providerMetadata != null ? { providerMetadata } : {}
924
+ });
925
+ }
926
+ })
927
+ ),
928
+ rawCall: { rawPrompt, rawSettings },
929
+ rawResponse: { headers: responseHeaders },
930
+ request: { body: JSON.stringify(body) },
931
+ warnings
932
+ };
933
+ }
934
+ };
935
+ var openaiTokenUsageSchema = z2.object({
936
+ prompt_tokens: z2.number().nullish(),
937
+ completion_tokens: z2.number().nullish(),
938
+ prompt_tokens_details: z2.object({
939
+ cached_tokens: z2.number().nullish()
940
+ }).nullish(),
941
+ completion_tokens_details: z2.object({
942
+ reasoning_tokens: z2.number().nullish(),
943
+ accepted_prediction_tokens: z2.number().nullish(),
944
+ rejected_prediction_tokens: z2.number().nullish()
945
+ }).nullish()
946
+ }).nullish();
947
+ var openaiChatResponseSchema = z2.object({
948
+ id: z2.string().nullish(),
949
+ created: z2.number().nullish(),
950
+ model: z2.string().nullish(),
951
+ choices: z2.array(
952
+ z2.object({
953
+ message: z2.object({
954
+ role: z2.literal("assistant").nullish(),
955
+ content: z2.string().nullish(),
956
+ function_call: z2.object({
957
+ arguments: z2.string(),
958
+ name: z2.string()
959
+ }).nullish(),
960
+ tool_calls: z2.array(
961
+ z2.object({
962
+ id: z2.string().nullish(),
963
+ type: z2.literal("function"),
964
+ function: z2.object({
965
+ name: z2.string(),
966
+ arguments: z2.string()
967
+ })
968
+ })
969
+ ).nullish()
970
+ }),
971
+ index: z2.number(),
972
+ logprobs: z2.object({
973
+ content: z2.array(
974
+ z2.object({
975
+ token: z2.string(),
976
+ logprob: z2.number(),
977
+ top_logprobs: z2.array(
978
+ z2.object({
979
+ token: z2.string(),
980
+ logprob: z2.number()
981
+ })
982
+ )
983
+ })
984
+ ).nullable()
985
+ }).nullish(),
986
+ finish_reason: z2.string().nullish()
987
+ })
988
+ ),
989
+ usage: openaiTokenUsageSchema
990
+ });
991
+ var openaiChatChunkSchema = z2.union([
992
+ z2.object({
993
+ id: z2.string().nullish(),
994
+ created: z2.number().nullish(),
995
+ model: z2.string().nullish(),
996
+ choices: z2.array(
997
+ z2.object({
998
+ delta: z2.object({
999
+ role: z2.enum(["assistant"]).nullish(),
1000
+ content: z2.string().nullish(),
1001
+ function_call: z2.object({
1002
+ name: z2.string().optional(),
1003
+ arguments: z2.string().optional()
1004
+ }).nullish(),
1005
+ tool_calls: z2.array(
1006
+ z2.object({
1007
+ index: z2.number(),
1008
+ id: z2.string().nullish(),
1009
+ type: z2.literal("function").nullish(),
1010
+ function: z2.object({
1011
+ name: z2.string().nullish(),
1012
+ arguments: z2.string().nullish()
1013
+ })
1014
+ })
1015
+ ).nullish()
1016
+ }).nullish(),
1017
+ logprobs: z2.object({
1018
+ content: z2.array(
1019
+ z2.object({
1020
+ token: z2.string(),
1021
+ logprob: z2.number(),
1022
+ top_logprobs: z2.array(
1023
+ z2.object({
1024
+ token: z2.string(),
1025
+ logprob: z2.number()
1026
+ })
1027
+ )
1028
+ })
1029
+ ).nullable()
1030
+ }).nullish(),
1031
+ finish_reason: z2.string().nullish(),
1032
+ index: z2.number()
1033
+ })
1034
+ ),
1035
+ usage: openaiTokenUsageSchema
1036
+ }),
1037
+ openaiErrorDataSchema
1038
+ ]);
1039
+ function isReasoningModel(modelId) {
1040
+ return modelId.startsWith("o");
1041
+ }
1042
+ function isAudioModel(modelId) {
1043
+ return modelId.startsWith("gpt-4o-audio-preview");
1044
+ }
1045
+ function getSystemMessageMode(modelId) {
1046
+ var _a, _b;
1047
+ if (!isReasoningModel(modelId)) {
1048
+ return "system";
1049
+ }
1050
+ return (_b = (_a = reasoningModels[modelId]) == null ? void 0 : _a.systemMessageMode) != null ? _b : "developer";
1051
+ }
1052
+ var reasoningModels = {
1053
+ "o1-mini": {
1054
+ systemMessageMode: "remove"
1055
+ },
1056
+ "o1-mini-2024-09-12": {
1057
+ systemMessageMode: "remove"
1058
+ },
1059
+ "o1-preview": {
1060
+ systemMessageMode: "remove"
1061
+ },
1062
+ "o1-preview-2024-09-12": {
1063
+ systemMessageMode: "remove"
1064
+ },
1065
+ o3: {
1066
+ systemMessageMode: "developer"
1067
+ },
1068
+ "o3-2025-04-16": {
1069
+ systemMessageMode: "developer"
1070
+ },
1071
+ "o3-mini": {
1072
+ systemMessageMode: "developer"
1073
+ },
1074
+ "o3-mini-2025-01-31": {
1075
+ systemMessageMode: "developer"
1076
+ },
1077
+ "o4-mini": {
1078
+ systemMessageMode: "developer"
1079
+ },
1080
+ "o4-mini-2025-04-16": {
1081
+ systemMessageMode: "developer"
1082
+ }
1083
+ };
1084
+
1085
+ // src/openai-completion-language-model.ts
1086
+ import {
1087
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError5
1088
+ } from "@ai-sdk/provider";
1089
+ import {
1090
+ combineHeaders as combineHeaders2,
1091
+ createEventSourceResponseHandler as createEventSourceResponseHandler2,
1092
+ createJsonResponseHandler as createJsonResponseHandler2,
1093
+ postJsonToApi as postJsonToApi2
1094
+ } from "@ai-sdk/provider-utils";
1095
+ import { z as z3 } from "zod";
1096
+
1097
+ // src/convert-to-openai-completion-prompt.ts
1098
+ import {
1099
+ InvalidPromptError,
1100
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError4
1101
+ } from "@ai-sdk/provider";
1102
+ function convertToOpenAICompletionPrompt({
1103
+ prompt,
1104
+ inputFormat,
1105
+ user = "user",
1106
+ assistant = "assistant"
1107
+ }) {
1108
+ if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
1109
+ return { prompt: prompt[0].content[0].text };
1110
+ }
1111
+ let text = "";
1112
+ if (prompt[0].role === "system") {
1113
+ text += `${prompt[0].content}
1114
+
1115
+ `;
1116
+ prompt = prompt.slice(1);
1117
+ }
1118
+ for (const { role, content } of prompt) {
1119
+ switch (role) {
1120
+ case "system": {
1121
+ throw new InvalidPromptError({
1122
+ message: "Unexpected system message in prompt: ${content}",
1123
+ prompt
1124
+ });
1125
+ }
1126
+ case "user": {
1127
+ const userMessage = content.map((part) => {
1128
+ switch (part.type) {
1129
+ case "text": {
1130
+ return part.text;
1131
+ }
1132
+ case "image": {
1133
+ throw new UnsupportedFunctionalityError4({
1134
+ functionality: "images"
1135
+ });
1136
+ }
1137
+ }
1138
+ }).join("");
1139
+ text += `${user}:
1140
+ ${userMessage}
1141
+
1142
+ `;
1143
+ break;
1144
+ }
1145
+ case "assistant": {
1146
+ const assistantMessage = content.map((part) => {
1147
+ switch (part.type) {
1148
+ case "text": {
1149
+ return part.text;
1150
+ }
1151
+ case "tool-call": {
1152
+ throw new UnsupportedFunctionalityError4({
1153
+ functionality: "tool-call messages"
1154
+ });
1155
+ }
1156
+ }
1157
+ }).join("");
1158
+ text += `${assistant}:
1159
+ ${assistantMessage}
1160
+
1161
+ `;
1162
+ break;
1163
+ }
1164
+ case "tool": {
1165
+ throw new UnsupportedFunctionalityError4({
1166
+ functionality: "tool messages"
1167
+ });
1168
+ }
1169
+ default: {
1170
+ const _exhaustiveCheck = role;
1171
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1172
+ }
1173
+ }
1174
+ }
1175
+ text += `${assistant}:
1176
+ `;
1177
+ return {
1178
+ prompt: text,
1179
+ stopSequences: [`
1180
+ ${user}:`]
1181
+ };
1182
+ }
1183
+
1184
+ // src/map-openai-completion-logprobs.ts
1185
+ function mapOpenAICompletionLogProbs(logprobs) {
1186
+ return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => ({
1187
+ token,
1188
+ logprob: logprobs.token_logprobs[index],
1189
+ topLogprobs: logprobs.top_logprobs ? Object.entries(logprobs.top_logprobs[index]).map(
1190
+ ([token2, logprob]) => ({
1191
+ token: token2,
1192
+ logprob
1193
+ })
1194
+ ) : []
1195
+ }));
1196
+ }
1197
+
1198
+ // src/openai-completion-language-model.ts
1199
+ var OpenAICompletionLanguageModel = class {
1200
+ constructor(modelId, settings, config) {
1201
+ this.specificationVersion = "v1";
1202
+ this.defaultObjectGenerationMode = void 0;
1203
+ this.modelId = modelId;
1204
+ this.settings = settings;
1205
+ this.config = config;
1206
+ }
1207
+ get provider() {
1208
+ return this.config.provider;
1209
+ }
1210
+ getArgs({
1211
+ mode,
1212
+ inputFormat,
1213
+ prompt,
1214
+ maxTokens,
1215
+ temperature,
1216
+ topP,
1217
+ topK,
1218
+ frequencyPenalty,
1219
+ presencePenalty,
1220
+ stopSequences: userStopSequences,
1221
+ responseFormat,
1222
+ seed
1223
+ }) {
1224
+ var _a;
1225
+ const type = mode.type;
1226
+ const warnings = [];
1227
+ if (topK != null) {
1228
+ warnings.push({
1229
+ type: "unsupported-setting",
1230
+ setting: "topK"
1231
+ });
1232
+ }
1233
+ if (responseFormat != null && responseFormat.type !== "text") {
1234
+ warnings.push({
1235
+ type: "unsupported-setting",
1236
+ setting: "responseFormat",
1237
+ details: "JSON response format is not supported."
1238
+ });
1239
+ }
1240
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat });
1241
+ const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1242
+ const baseArgs = {
1243
+ // model id:
1244
+ model: this.modelId,
1245
+ // model specific settings:
1246
+ echo: this.settings.echo,
1247
+ logit_bias: this.settings.logitBias,
1248
+ logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
1249
+ suffix: this.settings.suffix,
1250
+ user: this.settings.user,
1251
+ // standardized settings:
1252
+ max_tokens: maxTokens,
1253
+ temperature,
1254
+ top_p: topP,
1255
+ frequency_penalty: frequencyPenalty,
1256
+ presence_penalty: presencePenalty,
1257
+ seed,
1258
+ // prompt:
1259
+ prompt: completionPrompt,
1260
+ // stop sequences:
1261
+ stop: stop.length > 0 ? stop : void 0
1262
+ };
1263
+ switch (type) {
1264
+ case "regular": {
1265
+ if ((_a = mode.tools) == null ? void 0 : _a.length) {
1266
+ throw new UnsupportedFunctionalityError5({
1267
+ functionality: "tools"
1268
+ });
1269
+ }
1270
+ if (mode.toolChoice) {
1271
+ throw new UnsupportedFunctionalityError5({
1272
+ functionality: "toolChoice"
1273
+ });
1274
+ }
1275
+ return { args: baseArgs, warnings };
1276
+ }
1277
+ case "object-json": {
1278
+ throw new UnsupportedFunctionalityError5({
1279
+ functionality: "object-json mode"
1280
+ });
1281
+ }
1282
+ case "object-tool": {
1283
+ throw new UnsupportedFunctionalityError5({
1284
+ functionality: "object-tool mode"
1285
+ });
1286
+ }
1287
+ default: {
1288
+ const _exhaustiveCheck = type;
1289
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
1290
+ }
1291
+ }
1292
+ }
1293
+ async doGenerate(options) {
1294
+ const { args, warnings } = this.getArgs(options);
1295
+ const {
1296
+ responseHeaders,
1297
+ value: response,
1298
+ rawValue: rawResponse
1299
+ } = await postJsonToApi2({
1300
+ url: this.config.url({
1301
+ path: "/completions",
1302
+ modelId: this.modelId
1303
+ }),
1304
+ headers: combineHeaders2(this.config.headers(), options.headers),
1305
+ body: args,
1306
+ failedResponseHandler: openaiFailedResponseHandler,
1307
+ successfulResponseHandler: createJsonResponseHandler2(
1308
+ openaiCompletionResponseSchema
1309
+ ),
1310
+ abortSignal: options.abortSignal,
1311
+ fetch: this.config.fetch
1312
+ });
1313
+ const { prompt: rawPrompt, ...rawSettings } = args;
1314
+ const choice = response.choices[0];
1315
+ return {
1316
+ text: choice.text,
1317
+ usage: {
1318
+ promptTokens: response.usage.prompt_tokens,
1319
+ completionTokens: response.usage.completion_tokens
1320
+ },
1321
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
1322
+ logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
1323
+ rawCall: { rawPrompt, rawSettings },
1324
+ rawResponse: { headers: responseHeaders, body: rawResponse },
1325
+ response: getResponseMetadata(response),
1326
+ warnings,
1327
+ request: { body: JSON.stringify(args) }
1328
+ };
1329
+ }
1330
+ async doStream(options) {
1331
+ const { args, warnings } = this.getArgs(options);
1332
+ const body = {
1333
+ ...args,
1334
+ stream: true,
1335
+ // only include stream_options when in strict compatibility mode:
1336
+ stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
1337
+ };
1338
+ const { responseHeaders, value: response } = await postJsonToApi2({
1339
+ url: this.config.url({
1340
+ path: "/completions",
1341
+ modelId: this.modelId
1342
+ }),
1343
+ headers: combineHeaders2(this.config.headers(), options.headers),
1344
+ body,
1345
+ failedResponseHandler: openaiFailedResponseHandler,
1346
+ successfulResponseHandler: createEventSourceResponseHandler2(
1347
+ openaiCompletionChunkSchema
1348
+ ),
1349
+ abortSignal: options.abortSignal,
1350
+ fetch: this.config.fetch
1351
+ });
1352
+ const { prompt: rawPrompt, ...rawSettings } = args;
1353
+ let finishReason = "unknown";
1354
+ let usage = {
1355
+ promptTokens: Number.NaN,
1356
+ completionTokens: Number.NaN
1357
+ };
1358
+ let logprobs;
1359
+ let isFirstChunk = true;
1360
+ return {
1361
+ stream: response.pipeThrough(
1362
+ new TransformStream({
1363
+ transform(chunk, controller) {
1364
+ if (!chunk.success) {
1365
+ finishReason = "error";
1366
+ controller.enqueue({ type: "error", error: chunk.error });
1367
+ return;
1368
+ }
1369
+ const value = chunk.value;
1370
+ if ("error" in value) {
1371
+ finishReason = "error";
1372
+ controller.enqueue({ type: "error", error: value.error });
1373
+ return;
1374
+ }
1375
+ if (isFirstChunk) {
1376
+ isFirstChunk = false;
1377
+ controller.enqueue({
1378
+ type: "response-metadata",
1379
+ ...getResponseMetadata(value)
1380
+ });
1381
+ }
1382
+ if (value.usage != null) {
1383
+ usage = {
1384
+ promptTokens: value.usage.prompt_tokens,
1385
+ completionTokens: value.usage.completion_tokens
1386
+ };
1387
+ }
1388
+ const choice = value.choices[0];
1389
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
1390
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
1391
+ }
1392
+ if ((choice == null ? void 0 : choice.text) != null) {
1393
+ controller.enqueue({
1394
+ type: "text-delta",
1395
+ textDelta: choice.text
1396
+ });
1397
+ }
1398
+ const mappedLogprobs = mapOpenAICompletionLogProbs(
1399
+ choice == null ? void 0 : choice.logprobs
1400
+ );
1401
+ if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
1402
+ if (logprobs === void 0) logprobs = [];
1403
+ logprobs.push(...mappedLogprobs);
1404
+ }
1405
+ },
1406
+ flush(controller) {
1407
+ controller.enqueue({
1408
+ type: "finish",
1409
+ finishReason,
1410
+ logprobs,
1411
+ usage
1412
+ });
1413
+ }
1414
+ })
1415
+ ),
1416
+ rawCall: { rawPrompt, rawSettings },
1417
+ rawResponse: { headers: responseHeaders },
1418
+ warnings,
1419
+ request: { body: JSON.stringify(body) }
1420
+ };
1421
+ }
1422
+ };
1423
+ var openaiCompletionResponseSchema = z3.object({
1424
+ id: z3.string().nullish(),
1425
+ created: z3.number().nullish(),
1426
+ model: z3.string().nullish(),
1427
+ choices: z3.array(
1428
+ z3.object({
1429
+ text: z3.string(),
1430
+ finish_reason: z3.string(),
1431
+ logprobs: z3.object({
1432
+ tokens: z3.array(z3.string()),
1433
+ token_logprobs: z3.array(z3.number()),
1434
+ top_logprobs: z3.array(z3.record(z3.string(), z3.number())).nullable()
1435
+ }).nullish()
1436
+ })
1437
+ ),
1438
+ usage: z3.object({
1439
+ prompt_tokens: z3.number(),
1440
+ completion_tokens: z3.number()
1441
+ })
1442
+ });
1443
+ var openaiCompletionChunkSchema = z3.union([
1444
+ z3.object({
1445
+ id: z3.string().nullish(),
1446
+ created: z3.number().nullish(),
1447
+ model: z3.string().nullish(),
1448
+ choices: z3.array(
1449
+ z3.object({
1450
+ text: z3.string(),
1451
+ finish_reason: z3.string().nullish(),
1452
+ index: z3.number(),
1453
+ logprobs: z3.object({
1454
+ tokens: z3.array(z3.string()),
1455
+ token_logprobs: z3.array(z3.number()),
1456
+ top_logprobs: z3.array(z3.record(z3.string(), z3.number())).nullable()
1457
+ }).nullish()
1458
+ })
1459
+ ),
1460
+ usage: z3.object({
1461
+ prompt_tokens: z3.number(),
1462
+ completion_tokens: z3.number()
1463
+ }).nullish()
1464
+ }),
1465
+ openaiErrorDataSchema
1466
+ ]);
1467
+
1468
+ // src/openai-embedding-model.ts
1469
+ import {
1470
+ TooManyEmbeddingValuesForCallError
1471
+ } from "@ai-sdk/provider";
1472
+ import {
1473
+ combineHeaders as combineHeaders3,
1474
+ createJsonResponseHandler as createJsonResponseHandler3,
1475
+ postJsonToApi as postJsonToApi3
1476
+ } from "@ai-sdk/provider-utils";
1477
+ import { z as z4 } from "zod";
1478
+ var OpenAIEmbeddingModel = class {
1479
+ constructor(modelId, settings, config) {
1480
+ this.specificationVersion = "v1";
1481
+ this.modelId = modelId;
1482
+ this.settings = settings;
1483
+ this.config = config;
1484
+ }
1485
+ get provider() {
1486
+ return this.config.provider;
1487
+ }
1488
+ get maxEmbeddingsPerCall() {
1489
+ var _a;
1490
+ return (_a = this.settings.maxEmbeddingsPerCall) != null ? _a : 2048;
1491
+ }
1492
+ get supportsParallelCalls() {
1493
+ var _a;
1494
+ return (_a = this.settings.supportsParallelCalls) != null ? _a : true;
1495
+ }
1496
+ async doEmbed({
1497
+ values,
1498
+ headers,
1499
+ abortSignal
1500
+ }) {
1501
+ if (values.length > this.maxEmbeddingsPerCall) {
1502
+ throw new TooManyEmbeddingValuesForCallError({
1503
+ provider: this.provider,
1504
+ modelId: this.modelId,
1505
+ maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
1506
+ values
1507
+ });
1508
+ }
1509
+ const { responseHeaders, value: response } = await postJsonToApi3({
1510
+ url: this.config.url({
1511
+ path: "/embeddings",
1512
+ modelId: this.modelId
1513
+ }),
1514
+ headers: combineHeaders3(this.config.headers(), headers),
1515
+ body: {
1516
+ model: this.modelId,
1517
+ input: values,
1518
+ encoding_format: "float",
1519
+ dimensions: this.settings.dimensions,
1520
+ user: this.settings.user
1521
+ },
1522
+ failedResponseHandler: openaiFailedResponseHandler,
1523
+ successfulResponseHandler: createJsonResponseHandler3(
1524
+ openaiTextEmbeddingResponseSchema
1525
+ ),
1526
+ abortSignal,
1527
+ fetch: this.config.fetch
1528
+ });
1529
+ return {
1530
+ embeddings: response.data.map((item) => item.embedding),
1531
+ usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1532
+ rawResponse: { headers: responseHeaders }
1533
+ };
1534
+ }
1535
+ };
1536
+ var openaiTextEmbeddingResponseSchema = z4.object({
1537
+ data: z4.array(z4.object({ embedding: z4.array(z4.number()) })),
1538
+ usage: z4.object({ prompt_tokens: z4.number() }).nullish()
1539
+ });
1540
+
1541
+ // src/openai-image-model.ts
1542
+ import {
1543
+ combineHeaders as combineHeaders4,
1544
+ createJsonResponseHandler as createJsonResponseHandler4,
1545
+ postJsonToApi as postJsonToApi4
1546
+ } from "@ai-sdk/provider-utils";
1547
+ import { z as z5 } from "zod";
1548
+
1549
+ // src/openai-image-settings.ts
1550
+ var modelMaxImagesPerCall = {
1551
+ "dall-e-3": 1,
1552
+ "dall-e-2": 10,
1553
+ "gpt-image-1": 10
1554
+ };
1555
+ var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
1556
+
1557
+ // src/openai-image-model.ts
1558
+ var OpenAIImageModel = class {
1559
+ constructor(modelId, settings, config) {
1560
+ this.modelId = modelId;
1561
+ this.settings = settings;
1562
+ this.config = config;
1563
+ this.specificationVersion = "v1";
1564
+ }
1565
+ get maxImagesPerCall() {
1566
+ var _a, _b;
1567
+ return (_b = (_a = this.settings.maxImagesPerCall) != null ? _a : modelMaxImagesPerCall[this.modelId]) != null ? _b : 1;
1568
+ }
1569
+ get provider() {
1570
+ return this.config.provider;
1571
+ }
1572
+ async doGenerate({
1573
+ prompt,
1574
+ n,
1575
+ size,
1576
+ aspectRatio,
1577
+ seed,
1578
+ providerOptions,
1579
+ headers,
1580
+ abortSignal
1581
+ }) {
1582
+ var _a, _b, _c, _d;
1583
+ const warnings = [];
1584
+ if (aspectRatio != null) {
1585
+ warnings.push({
1586
+ type: "unsupported-setting",
1587
+ setting: "aspectRatio",
1588
+ details: "This model does not support aspect ratio. Use `size` instead."
1589
+ });
1590
+ }
1591
+ if (seed != null) {
1592
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
1593
+ }
1594
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1595
+ const { value: response, responseHeaders } = await postJsonToApi4({
1596
+ url: this.config.url({
1597
+ path: "/images/generations",
1598
+ modelId: this.modelId
1599
+ }),
1600
+ headers: combineHeaders4(this.config.headers(), headers),
1601
+ body: {
1602
+ model: this.modelId,
1603
+ prompt,
1604
+ n,
1605
+ size,
1606
+ ...(_d = providerOptions.openai) != null ? _d : {},
1607
+ ...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
1608
+ },
1609
+ failedResponseHandler: openaiFailedResponseHandler,
1610
+ successfulResponseHandler: createJsonResponseHandler4(
1611
+ openaiImageResponseSchema
1612
+ ),
1613
+ abortSignal,
1614
+ fetch: this.config.fetch
1615
+ });
1616
+ return {
1617
+ images: response.data.map((item) => item.b64_json),
1618
+ warnings,
1619
+ response: {
1620
+ timestamp: currentDate,
1621
+ modelId: this.modelId,
1622
+ headers: responseHeaders
1623
+ }
1624
+ };
1625
+ }
1626
+ };
1627
+ var openaiImageResponseSchema = z5.object({
1628
+ data: z5.array(z5.object({ b64_json: z5.string() }))
1629
+ });
1630
+
1631
+ // src/openai-transcription-model.ts
1632
+ import {
1633
+ combineHeaders as combineHeaders5,
1634
+ convertBase64ToUint8Array,
1635
+ createJsonResponseHandler as createJsonResponseHandler5,
1636
+ parseProviderOptions,
1637
+ postFormDataToApi
1638
+ } from "@ai-sdk/provider-utils";
1639
+ import { z as z6 } from "zod";
1640
+ var openAIProviderOptionsSchema = z6.object({
1641
+ include: z6.array(z6.string()).nullish(),
1642
+ language: z6.string().nullish(),
1643
+ prompt: z6.string().nullish(),
1644
+ temperature: z6.number().min(0).max(1).nullish().default(0),
1645
+ timestampGranularities: z6.array(z6.enum(["word", "segment"])).nullish().default(["segment"])
1646
+ });
1647
+ var languageMap = {
1648
+ afrikaans: "af",
1649
+ arabic: "ar",
1650
+ armenian: "hy",
1651
+ azerbaijani: "az",
1652
+ belarusian: "be",
1653
+ bosnian: "bs",
1654
+ bulgarian: "bg",
1655
+ catalan: "ca",
1656
+ chinese: "zh",
1657
+ croatian: "hr",
1658
+ czech: "cs",
1659
+ danish: "da",
1660
+ dutch: "nl",
1661
+ english: "en",
1662
+ estonian: "et",
1663
+ finnish: "fi",
1664
+ french: "fr",
1665
+ galician: "gl",
1666
+ german: "de",
1667
+ greek: "el",
1668
+ hebrew: "he",
1669
+ hindi: "hi",
1670
+ hungarian: "hu",
1671
+ icelandic: "is",
1672
+ indonesian: "id",
1673
+ italian: "it",
1674
+ japanese: "ja",
1675
+ kannada: "kn",
1676
+ kazakh: "kk",
1677
+ korean: "ko",
1678
+ latvian: "lv",
1679
+ lithuanian: "lt",
1680
+ macedonian: "mk",
1681
+ malay: "ms",
1682
+ marathi: "mr",
1683
+ maori: "mi",
1684
+ nepali: "ne",
1685
+ norwegian: "no",
1686
+ persian: "fa",
1687
+ polish: "pl",
1688
+ portuguese: "pt",
1689
+ romanian: "ro",
1690
+ russian: "ru",
1691
+ serbian: "sr",
1692
+ slovak: "sk",
1693
+ slovenian: "sl",
1694
+ spanish: "es",
1695
+ swahili: "sw",
1696
+ swedish: "sv",
1697
+ tagalog: "tl",
1698
+ tamil: "ta",
1699
+ thai: "th",
1700
+ turkish: "tr",
1701
+ ukrainian: "uk",
1702
+ urdu: "ur",
1703
+ vietnamese: "vi",
1704
+ welsh: "cy"
1705
+ };
1706
+ var OpenAITranscriptionModel = class {
1707
+ constructor(modelId, config) {
1708
+ this.modelId = modelId;
1709
+ this.config = config;
1710
+ this.specificationVersion = "v1";
1711
+ }
1712
+ get provider() {
1713
+ return this.config.provider;
1714
+ }
1715
+ getArgs({
1716
+ audio,
1717
+ mediaType,
1718
+ providerOptions
1719
+ }) {
1720
+ var _a, _b, _c, _d, _e;
1721
+ const warnings = [];
1722
+ const openAIOptions = parseProviderOptions({
1723
+ provider: "openai",
1724
+ providerOptions,
1725
+ schema: openAIProviderOptionsSchema
1726
+ });
1727
+ const formData = new FormData();
1728
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([convertBase64ToUint8Array(audio)]);
1729
+ formData.append("model", this.modelId);
1730
+ formData.append("file", new File([blob], "audio", { type: mediaType }));
1731
+ if (openAIOptions) {
1732
+ const transcriptionModelOptions = {
1733
+ include: (_a = openAIOptions.include) != null ? _a : void 0,
1734
+ language: (_b = openAIOptions.language) != null ? _b : void 0,
1735
+ prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
1736
+ temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
1737
+ timestamp_granularities: (_e = openAIOptions.timestampGranularities) != null ? _e : void 0
1738
+ };
1739
+ for (const key in transcriptionModelOptions) {
1740
+ const value = transcriptionModelOptions[key];
1741
+ if (value !== void 0) {
1742
+ formData.append(key, String(value));
1743
+ }
1744
+ }
1745
+ }
1746
+ return {
1747
+ formData,
1748
+ warnings
1749
+ };
1750
+ }
1751
+ async doGenerate(options) {
1752
+ var _a, _b, _c, _d, _e, _f;
1753
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1754
+ const { formData, warnings } = this.getArgs(options);
1755
+ const {
1756
+ value: response,
1757
+ responseHeaders,
1758
+ rawValue: rawResponse
1759
+ } = await postFormDataToApi({
1760
+ url: this.config.url({
1761
+ path: "/audio/transcriptions",
1762
+ modelId: this.modelId
1763
+ }),
1764
+ headers: combineHeaders5(this.config.headers(), options.headers),
1765
+ formData,
1766
+ failedResponseHandler: openaiFailedResponseHandler,
1767
+ successfulResponseHandler: createJsonResponseHandler5(
1768
+ openaiTranscriptionResponseSchema
1769
+ ),
1770
+ abortSignal: options.abortSignal,
1771
+ fetch: this.config.fetch
1772
+ });
1773
+ const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
1774
+ return {
1775
+ text: response.text,
1776
+ segments: (_e = (_d = response.words) == null ? void 0 : _d.map((word) => ({
1777
+ text: word.word,
1778
+ startSecond: word.start,
1779
+ endSecond: word.end
1780
+ }))) != null ? _e : [],
1781
+ language,
1782
+ durationInSeconds: (_f = response.duration) != null ? _f : void 0,
1783
+ warnings,
1784
+ response: {
1785
+ timestamp: currentDate,
1786
+ modelId: this.modelId,
1787
+ headers: responseHeaders,
1788
+ body: rawResponse
1789
+ }
1790
+ };
1791
+ }
1792
+ };
1793
+ var openaiTranscriptionResponseSchema = z6.object({
1794
+ text: z6.string(),
1795
+ language: z6.string().nullish(),
1796
+ duration: z6.number().nullish(),
1797
+ words: z6.array(
1798
+ z6.object({
1799
+ word: z6.string(),
1800
+ start: z6.number(),
1801
+ end: z6.number()
1802
+ })
1803
+ ).nullish()
1804
+ });
1805
+
1806
+ // src/openai-speech-model.ts
1807
+ import {
1808
+ combineHeaders as combineHeaders6,
1809
+ createBinaryResponseHandler,
1810
+ parseProviderOptions as parseProviderOptions2,
1811
+ postJsonToApi as postJsonToApi5
1812
+ } from "@ai-sdk/provider-utils";
1813
+ import { z as z7 } from "zod";
1814
+ var OpenAIProviderOptionsSchema = z7.object({
1815
+ instructions: z7.string().nullish(),
1816
+ speed: z7.number().min(0.25).max(4).default(1).nullish()
1817
+ });
1818
+ var OpenAISpeechModel = class {
1819
+ constructor(modelId, config) {
1820
+ this.modelId = modelId;
1821
+ this.config = config;
1822
+ this.specificationVersion = "v1";
1823
+ }
1824
+ get provider() {
1825
+ return this.config.provider;
1826
+ }
1827
+ getArgs({
1828
+ text,
1829
+ voice = "alloy",
1830
+ outputFormat = "mp3",
1831
+ speed,
1832
+ instructions,
1833
+ providerOptions
1834
+ }) {
1835
+ const warnings = [];
1836
+ const openAIOptions = parseProviderOptions2({
1837
+ provider: "openai",
1838
+ providerOptions,
1839
+ schema: OpenAIProviderOptionsSchema
1840
+ });
1841
+ const requestBody = {
1842
+ model: this.modelId,
1843
+ input: text,
1844
+ voice,
1845
+ response_format: "mp3",
1846
+ speed,
1847
+ instructions
1848
+ };
1849
+ if (outputFormat) {
1850
+ if (["mp3", "opus", "aac", "flac", "wav", "pcm"].includes(outputFormat)) {
1851
+ requestBody.response_format = outputFormat;
1852
+ } else {
1853
+ warnings.push({
1854
+ type: "unsupported-setting",
1855
+ setting: "outputFormat",
1856
+ details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
1857
+ });
1858
+ }
1859
+ }
1860
+ if (openAIOptions) {
1861
+ const speechModelOptions = {};
1862
+ for (const key in speechModelOptions) {
1863
+ const value = speechModelOptions[key];
1864
+ if (value !== void 0) {
1865
+ requestBody[key] = value;
1866
+ }
1867
+ }
1868
+ }
1869
+ return {
1870
+ requestBody,
1871
+ warnings
1872
+ };
1873
+ }
1874
+ async doGenerate(options) {
1875
+ var _a, _b, _c;
1876
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1877
+ const { requestBody, warnings } = this.getArgs(options);
1878
+ const {
1879
+ value: audio,
1880
+ responseHeaders,
1881
+ rawValue: rawResponse
1882
+ } = await postJsonToApi5({
1883
+ url: this.config.url({
1884
+ path: "/audio/speech",
1885
+ modelId: this.modelId
1886
+ }),
1887
+ headers: combineHeaders6(this.config.headers(), options.headers),
1888
+ body: requestBody,
1889
+ failedResponseHandler: openaiFailedResponseHandler,
1890
+ successfulResponseHandler: createBinaryResponseHandler(),
1891
+ abortSignal: options.abortSignal,
1892
+ fetch: this.config.fetch
1893
+ });
1894
+ return {
1895
+ audio,
1896
+ warnings,
1897
+ request: {
1898
+ body: JSON.stringify(requestBody)
1899
+ },
1900
+ response: {
1901
+ timestamp: currentDate,
1902
+ modelId: this.modelId,
1903
+ headers: responseHeaders,
1904
+ body: rawResponse
1905
+ }
1906
+ };
1907
+ }
1908
+ };
1909
+
1910
+ // src/responses/openai-responses-language-model.ts
1911
+ import {
1912
+ combineHeaders as combineHeaders7,
1913
+ createEventSourceResponseHandler as createEventSourceResponseHandler3,
1914
+ createJsonResponseHandler as createJsonResponseHandler6,
1915
+ generateId as generateId2,
1916
+ parseProviderOptions as parseProviderOptions3,
1917
+ postJsonToApi as postJsonToApi6
1918
+ } from "@ai-sdk/provider-utils";
1919
+ import { z as z8 } from "zod";
1920
+
1921
+ // src/responses/convert-to-openai-responses-messages.ts
1922
+ import {
1923
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError6
1924
+ } from "@ai-sdk/provider";
1925
+ import { convertUint8ArrayToBase64 as convertUint8ArrayToBase642 } from "@ai-sdk/provider-utils";
1926
+ function convertToOpenAIResponsesMessages({
1927
+ prompt,
1928
+ systemMessageMode
1929
+ }) {
1930
+ const messages = [];
1931
+ const warnings = [];
1932
+ for (const { role, content } of prompt) {
1933
+ switch (role) {
1934
+ case "system": {
1935
+ switch (systemMessageMode) {
1936
+ case "system": {
1937
+ messages.push({ role: "system", content });
1938
+ break;
1939
+ }
1940
+ case "developer": {
1941
+ messages.push({ role: "developer", content });
1942
+ break;
1943
+ }
1944
+ case "remove": {
1945
+ warnings.push({
1946
+ type: "other",
1947
+ message: "system messages are removed for this model"
1948
+ });
1949
+ break;
1950
+ }
1951
+ default: {
1952
+ const _exhaustiveCheck = systemMessageMode;
1953
+ throw new Error(
1954
+ `Unsupported system message mode: ${_exhaustiveCheck}`
1955
+ );
1956
+ }
1957
+ }
1958
+ break;
1959
+ }
1960
+ case "user": {
1961
+ messages.push({
1962
+ role: "user",
1963
+ content: content.map((part, index) => {
1964
+ var _a, _b, _c, _d;
1965
+ switch (part.type) {
1966
+ case "text": {
1967
+ return { type: "input_text", text: part.text };
1968
+ }
1969
+ case "image": {
1970
+ return {
1971
+ type: "input_image",
1972
+ image_url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase642(part.image)}`,
1973
+ // OpenAI specific extension: image detail
1974
+ detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
1975
+ };
1976
+ }
1977
+ case "file": {
1978
+ if (part.data instanceof URL) {
1979
+ throw new UnsupportedFunctionalityError6({
1980
+ functionality: "File URLs in user messages"
1981
+ });
1982
+ }
1983
+ switch (part.mimeType) {
1984
+ case "application/pdf": {
1985
+ return {
1986
+ type: "input_file",
1987
+ filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
1988
+ file_data: `data:application/pdf;base64,${part.data}`
1989
+ };
1990
+ }
1991
+ default: {
1992
+ throw new UnsupportedFunctionalityError6({
1993
+ functionality: "Only PDF files are supported in user messages"
1994
+ });
1995
+ }
1996
+ }
1997
+ }
1998
+ }
1999
+ })
2000
+ });
2001
+ break;
2002
+ }
2003
+ case "assistant": {
2004
+ for (const part of content) {
2005
+ switch (part.type) {
2006
+ case "text": {
2007
+ messages.push({
2008
+ role: "assistant",
2009
+ content: [{ type: "output_text", text: part.text }]
2010
+ });
2011
+ break;
2012
+ }
2013
+ case "tool-call": {
2014
+ messages.push({
2015
+ type: "function_call",
2016
+ call_id: part.toolCallId,
2017
+ name: part.toolName,
2018
+ arguments: JSON.stringify(part.args)
2019
+ });
2020
+ break;
2021
+ }
2022
+ }
2023
+ }
2024
+ break;
2025
+ }
2026
+ case "tool": {
2027
+ for (const part of content) {
2028
+ messages.push({
2029
+ type: "function_call_output",
2030
+ call_id: part.toolCallId,
2031
+ output: JSON.stringify(part.result)
2032
+ });
2033
+ }
2034
+ break;
2035
+ }
2036
+ default: {
2037
+ const _exhaustiveCheck = role;
2038
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
2039
+ }
2040
+ }
2041
+ }
2042
+ return { messages, warnings };
2043
+ }
2044
+
2045
+ // src/responses/map-openai-responses-finish-reason.ts
2046
+ function mapOpenAIResponseFinishReason({
2047
+ finishReason,
2048
+ hasToolCalls
2049
+ }) {
2050
+ switch (finishReason) {
2051
+ case void 0:
2052
+ case null:
2053
+ return hasToolCalls ? "tool-calls" : "stop";
2054
+ case "max_output_tokens":
2055
+ return "length";
2056
+ case "content_filter":
2057
+ return "content-filter";
2058
+ default:
2059
+ return hasToolCalls ? "tool-calls" : "unknown";
2060
+ }
2061
+ }
2062
+
2063
+ // src/responses/openai-responses-prepare-tools.ts
2064
+ import {
2065
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError7
2066
+ } from "@ai-sdk/provider";
2067
+ function prepareResponsesTools({
2068
+ mode,
2069
+ strict
2070
+ }) {
2071
+ var _a;
2072
+ const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
2073
+ const toolWarnings = [];
2074
+ if (tools == null) {
2075
+ return { tools: void 0, tool_choice: void 0, toolWarnings };
2076
+ }
2077
+ const toolChoice = mode.toolChoice;
2078
+ const openaiTools = [];
2079
+ for (const tool of tools) {
2080
+ switch (tool.type) {
2081
+ case "function":
2082
+ openaiTools.push({
2083
+ type: "function",
2084
+ name: tool.name,
2085
+ description: tool.description,
2086
+ parameters: tool.parameters,
2087
+ strict: strict ? true : void 0
2088
+ });
2089
+ break;
2090
+ case "provider-defined":
2091
+ switch (tool.id) {
2092
+ case "openai.file_search":
2093
+ openaiTools.push({
2094
+ type: "file_search",
2095
+ vector_store_ids: tool.args.vectorStoreIds,
2096
+ max_num_results: tool.args.maxNumResults,
2097
+ ranking: tool.args.ranking,
2098
+ filters: tool.args.filters
2099
+ });
2100
+ break;
2101
+ case "openai.web_search_preview":
2102
+ openaiTools.push({
2103
+ type: "web_search_preview",
2104
+ search_context_size: tool.args.searchContextSize,
2105
+ user_location: tool.args.userLocation
2106
+ });
2107
+ break;
2108
+ case "openai.code_interpreter":
2109
+ openaiTools.push({
2110
+ type: "code_interpreter",
2111
+ container: tool.args.container
2112
+ });
2113
+ break;
2114
+ default:
2115
+ toolWarnings.push({ type: "unsupported-tool", tool });
2116
+ break;
2117
+ }
2118
+ break;
2119
+ default:
2120
+ toolWarnings.push({ type: "unsupported-tool", tool });
2121
+ break;
2122
+ }
2123
+ }
2124
+ if (toolChoice == null) {
2125
+ return { tools: openaiTools, tool_choice: void 0, toolWarnings };
2126
+ }
2127
+ const type = toolChoice.type;
2128
+ switch (type) {
2129
+ case "auto":
2130
+ case "none":
2131
+ case "required":
2132
+ return { tools: openaiTools, tool_choice: type, toolWarnings };
2133
+ case "tool": {
2134
+ if (toolChoice.toolName === "web_search_preview") {
2135
+ return {
2136
+ tools: openaiTools,
2137
+ tool_choice: {
2138
+ type: "web_search_preview"
2139
+ },
2140
+ toolWarnings
2141
+ };
2142
+ }
2143
+ if (toolChoice.toolName === "code_interpreter") {
2144
+ return {
2145
+ tools: openaiTools,
2146
+ tool_choice: {
2147
+ type: "code_interpreter"
2148
+ },
2149
+ toolWarnings
2150
+ };
2151
+ }
2152
+ return {
2153
+ tools: openaiTools,
2154
+ tool_choice: {
2155
+ type: "function",
2156
+ name: toolChoice.toolName
2157
+ },
2158
+ toolWarnings
2159
+ };
2160
+ }
2161
+ default: {
2162
+ const _exhaustiveCheck = type;
2163
+ throw new UnsupportedFunctionalityError7({
2164
+ functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
2165
+ });
2166
+ }
2167
+ }
2168
+ }
2169
+
2170
+ // src/responses/openai-responses-language-model.ts
2171
+ var OpenAIResponsesLanguageModel = class {
2172
+ constructor(modelId, config) {
2173
+ this.specificationVersion = "v1";
2174
+ this.defaultObjectGenerationMode = "json";
2175
+ this.supportsStructuredOutputs = true;
2176
+ this.modelId = modelId;
2177
+ this.config = config;
2178
+ }
2179
+ get provider() {
2180
+ return this.config.provider;
2181
+ }
2182
+ getArgs({
2183
+ mode,
2184
+ maxTokens,
2185
+ temperature,
2186
+ stopSequences,
2187
+ topP,
2188
+ topK,
2189
+ presencePenalty,
2190
+ frequencyPenalty,
2191
+ seed,
2192
+ prompt,
2193
+ providerMetadata,
2194
+ responseFormat
2195
+ }) {
2196
+ var _a, _b, _c;
2197
+ const warnings = [];
2198
+ const modelConfig = getResponsesModelConfig(this.modelId);
2199
+ const type = mode.type;
2200
+ if (topK != null) {
2201
+ warnings.push({
2202
+ type: "unsupported-setting",
2203
+ setting: "topK"
2204
+ });
2205
+ }
2206
+ if (seed != null) {
2207
+ warnings.push({
2208
+ type: "unsupported-setting",
2209
+ setting: "seed"
2210
+ });
2211
+ }
2212
+ if (presencePenalty != null) {
2213
+ warnings.push({
2214
+ type: "unsupported-setting",
2215
+ setting: "presencePenalty"
2216
+ });
2217
+ }
2218
+ if (frequencyPenalty != null) {
2219
+ warnings.push({
2220
+ type: "unsupported-setting",
2221
+ setting: "frequencyPenalty"
2222
+ });
2223
+ }
2224
+ if (stopSequences != null) {
2225
+ warnings.push({
2226
+ type: "unsupported-setting",
2227
+ setting: "stopSequences"
2228
+ });
2229
+ }
2230
+ const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
2231
+ prompt,
2232
+ systemMessageMode: modelConfig.systemMessageMode
2233
+ });
2234
+ warnings.push(...messageWarnings);
2235
+ console.log("providerMetadata", JSON.stringify(providerMetadata));
2236
+ const openaiOptions = parseProviderOptions3({
2237
+ provider: "openai",
2238
+ providerOptions: providerMetadata,
2239
+ schema: openaiResponsesProviderOptionsSchema
2240
+ });
2241
+ const isStrict = (_a = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a : true;
2242
+ console.log("openaiOptions", JSON.stringify(openaiOptions));
2243
+ const baseArgs = {
2244
+ model: this.modelId,
2245
+ input: messages,
2246
+ temperature: (openaiOptions == null ? void 0 : openaiOptions.forceNoTemperature) ? void 0 : temperature,
2247
+ top_p: topP,
2248
+ max_output_tokens: maxTokens,
2249
+ ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
2250
+ text: {
2251
+ format: responseFormat.schema != null ? {
2252
+ type: "json_schema",
2253
+ strict: isStrict,
2254
+ name: (_b = responseFormat.name) != null ? _b : "response",
2255
+ description: responseFormat.description,
2256
+ schema: responseFormat.schema
2257
+ } : { type: "json_object" }
2258
+ }
2259
+ },
2260
+ // provider options:
2261
+ metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
2262
+ parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
2263
+ previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
2264
+ store: openaiOptions == null ? void 0 : openaiOptions.store,
2265
+ user: openaiOptions == null ? void 0 : openaiOptions.user,
2266
+ instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2267
+ // model-specific settings:
2268
+ ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2269
+ reasoning: {
2270
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
2271
+ effort: openaiOptions.reasoningEffort
2272
+ },
2273
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
2274
+ summary: openaiOptions.reasoningSummary
2275
+ }
2276
+ }
2277
+ },
2278
+ ...modelConfig.requiredAutoTruncation && {
2279
+ truncation: "auto"
2280
+ }
2281
+ };
2282
+ console.log("baseArgs", JSON.stringify(baseArgs));
2283
+ if (modelConfig.isReasoningModel) {
2284
+ if (baseArgs.temperature != null) {
2285
+ baseArgs.temperature = void 0;
2286
+ warnings.push({
2287
+ type: "unsupported-setting",
2288
+ setting: "temperature",
2289
+ details: "temperature is not supported for reasoning models"
2290
+ });
2291
+ }
2292
+ if (baseArgs.top_p != null) {
2293
+ baseArgs.top_p = void 0;
2294
+ warnings.push({
2295
+ type: "unsupported-setting",
2296
+ setting: "topP",
2297
+ details: "topP is not supported for reasoning models"
2298
+ });
2299
+ }
2300
+ }
2301
+ switch (type) {
2302
+ case "regular": {
2303
+ const { tools, tool_choice, toolWarnings } = prepareResponsesTools({
2304
+ mode,
2305
+ strict: isStrict
2306
+ // TODO support provider options on tools
2307
+ });
2308
+ return {
2309
+ args: {
2310
+ ...baseArgs,
2311
+ tools,
2312
+ tool_choice
2313
+ },
2314
+ warnings: [...warnings, ...toolWarnings]
2315
+ };
2316
+ }
2317
+ case "object-json": {
2318
+ return {
2319
+ args: {
2320
+ ...baseArgs,
2321
+ text: {
2322
+ format: mode.schema != null ? {
2323
+ type: "json_schema",
2324
+ strict: isStrict,
2325
+ name: (_c = mode.name) != null ? _c : "response",
2326
+ description: mode.description,
2327
+ schema: mode.schema
2328
+ } : { type: "json_object" }
2329
+ }
2330
+ },
2331
+ warnings
2332
+ };
2333
+ }
2334
+ case "object-tool": {
2335
+ return {
2336
+ args: {
2337
+ ...baseArgs,
2338
+ tool_choice: { type: "function", name: mode.tool.name },
2339
+ tools: [
2340
+ {
2341
+ type: "function",
2342
+ name: mode.tool.name,
2343
+ description: mode.tool.description,
2344
+ parameters: mode.tool.parameters,
2345
+ strict: isStrict
2346
+ }
2347
+ ]
2348
+ },
2349
+ warnings
2350
+ };
2351
+ }
2352
+ default: {
2353
+ const _exhaustiveCheck = type;
2354
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
2355
+ }
2356
+ }
2357
+ }
2358
+ async doGenerate(options) {
2359
+ var _a, _b, _c, _d, _e, _f, _g;
2360
+ const { args: body, warnings } = this.getArgs(options);
2361
+ const {
2362
+ responseHeaders,
2363
+ value: response,
2364
+ rawValue: rawResponse
2365
+ } = await postJsonToApi6({
2366
+ url: this.config.url({
2367
+ path: "/responses",
2368
+ modelId: this.modelId
2369
+ }),
2370
+ headers: combineHeaders7(this.config.headers(), options.headers),
2371
+ body,
2372
+ failedResponseHandler: openaiFailedResponseHandler,
2373
+ successfulResponseHandler: createJsonResponseHandler6(
2374
+ z8.object({
2375
+ id: z8.string(),
2376
+ created_at: z8.number(),
2377
+ model: z8.string(),
2378
+ output: z8.array(
2379
+ z8.discriminatedUnion("type", [
2380
+ z8.object({
2381
+ type: z8.literal("message"),
2382
+ role: z8.literal("assistant"),
2383
+ content: z8.array(
2384
+ z8.object({
2385
+ type: z8.literal("output_text"),
2386
+ text: z8.string(),
2387
+ annotations: z8.array(
2388
+ z8.object({
2389
+ type: z8.literal("url_citation"),
2390
+ start_index: z8.number(),
2391
+ end_index: z8.number(),
2392
+ url: z8.string(),
2393
+ title: z8.string()
2394
+ })
2395
+ )
2396
+ })
2397
+ )
2398
+ }),
2399
+ z8.object({
2400
+ type: z8.literal("function_call"),
2401
+ call_id: z8.string(),
2402
+ name: z8.string(),
2403
+ arguments: z8.string()
2404
+ }),
2405
+ z8.object({
2406
+ type: z8.literal("web_search_call")
2407
+ }),
2408
+ z8.object({
2409
+ type: z8.literal("computer_call")
2410
+ }),
2411
+ z8.object({
2412
+ type: z8.literal("reasoning"),
2413
+ summary: z8.array(
2414
+ z8.object({
2415
+ type: z8.literal("summary_text"),
2416
+ text: z8.string()
2417
+ })
2418
+ )
2419
+ })
2420
+ ])
2421
+ ),
2422
+ incomplete_details: z8.object({ reason: z8.string() }).nullable(),
2423
+ usage: usageSchema
2424
+ })
2425
+ ),
2426
+ abortSignal: options.abortSignal,
2427
+ fetch: this.config.fetch
2428
+ });
2429
+ const outputTextElements = response.output.filter((output) => output.type === "message").flatMap((output) => output.content).filter((content) => content.type === "output_text");
2430
+ const toolCalls = response.output.filter((output) => output.type === "function_call").map((output) => ({
2431
+ toolCallType: "function",
2432
+ toolCallId: output.call_id,
2433
+ toolName: output.name,
2434
+ args: output.arguments
2435
+ }));
2436
+ const reasoningSummary = (_b = (_a = response.output.find((item) => item.type === "reasoning")) == null ? void 0 : _a.summary) != null ? _b : null;
2437
+ return {
2438
+ text: outputTextElements.map((content) => content.text).join("\n"),
2439
+ sources: outputTextElements.flatMap(
2440
+ (content) => content.annotations.map((annotation) => {
2441
+ var _a2, _b2, _c2;
2442
+ return {
2443
+ sourceType: "url",
2444
+ id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : generateId2(),
2445
+ url: annotation.url,
2446
+ title: annotation.title
2447
+ };
2448
+ })
2449
+ ),
2450
+ finishReason: mapOpenAIResponseFinishReason({
2451
+ finishReason: (_c = response.incomplete_details) == null ? void 0 : _c.reason,
2452
+ hasToolCalls: toolCalls.length > 0
2453
+ }),
2454
+ toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
2455
+ reasoning: reasoningSummary ? reasoningSummary.map((summary) => ({
2456
+ type: "text",
2457
+ text: summary.text
2458
+ })) : void 0,
2459
+ usage: {
2460
+ promptTokens: response.usage.input_tokens,
2461
+ completionTokens: response.usage.output_tokens
2462
+ },
2463
+ rawCall: {
2464
+ rawPrompt: void 0,
2465
+ rawSettings: {}
2466
+ },
2467
+ rawResponse: {
2468
+ headers: responseHeaders,
2469
+ body: rawResponse
2470
+ },
2471
+ request: {
2472
+ body: JSON.stringify(body)
2473
+ },
2474
+ response: {
2475
+ id: response.id,
2476
+ timestamp: new Date(response.created_at * 1e3),
2477
+ modelId: response.model
2478
+ },
2479
+ providerMetadata: {
2480
+ openai: {
2481
+ responseId: response.id,
2482
+ cachedPromptTokens: (_e = (_d = response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : null,
2483
+ reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : null
2484
+ }
2485
+ },
2486
+ warnings
2487
+ };
2488
+ }
2489
+ async doStream(options) {
2490
+ const { args: body, warnings } = this.getArgs(options);
2491
+ const { responseHeaders, value: response } = await postJsonToApi6({
2492
+ url: this.config.url({
2493
+ path: "/responses",
2494
+ modelId: this.modelId
2495
+ }),
2496
+ headers: combineHeaders7(this.config.headers(), options.headers),
2497
+ body: {
2498
+ ...body,
2499
+ stream: true
2500
+ },
2501
+ failedResponseHandler: openaiFailedResponseHandler,
2502
+ successfulResponseHandler: createEventSourceResponseHandler3(
2503
+ openaiResponsesChunkSchema
2504
+ ),
2505
+ abortSignal: options.abortSignal,
2506
+ fetch: this.config.fetch
2507
+ });
2508
+ const self = this;
2509
+ let finishReason = "unknown";
2510
+ let promptTokens = NaN;
2511
+ let completionTokens = NaN;
2512
+ let cachedPromptTokens = null;
2513
+ let reasoningTokens = null;
2514
+ let responseId = null;
2515
+ const ongoingToolCalls = {};
2516
+ let hasToolCalls = false;
2517
+ return {
2518
+ stream: response.pipeThrough(
2519
+ new TransformStream({
2520
+ transform(chunk, controller) {
2521
+ var _a, _b, _c, _d, _e, _f, _g, _h;
2522
+ if (!chunk.success) {
2523
+ finishReason = "error";
2524
+ controller.enqueue({ type: "error", error: chunk.error });
2525
+ return;
2526
+ }
2527
+ const value = chunk.value;
2528
+ if (isResponseOutputItemAddedChunk(value)) {
2529
+ if (value.item.type === "function_call") {
2530
+ ongoingToolCalls[value.output_index] = {
2531
+ toolName: value.item.name,
2532
+ toolCallId: value.item.call_id
2533
+ };
2534
+ controller.enqueue({
2535
+ type: "tool-call-delta",
2536
+ toolCallType: "function",
2537
+ toolCallId: value.item.call_id,
2538
+ toolName: value.item.name,
2539
+ argsTextDelta: value.item.arguments
2540
+ });
2541
+ }
2542
+ } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
2543
+ const toolCall = ongoingToolCalls[value.output_index];
2544
+ if (toolCall != null) {
2545
+ controller.enqueue({
2546
+ type: "tool-call-delta",
2547
+ toolCallType: "function",
2548
+ toolCallId: toolCall.toolCallId,
2549
+ toolName: toolCall.toolName,
2550
+ argsTextDelta: value.delta
2551
+ });
2552
+ }
2553
+ } else if (isResponseCreatedChunk(value)) {
2554
+ responseId = value.response.id;
2555
+ controller.enqueue({
2556
+ type: "response-metadata",
2557
+ id: value.response.id,
2558
+ timestamp: new Date(value.response.created_at * 1e3),
2559
+ modelId: value.response.model
2560
+ });
2561
+ } else if (isTextDeltaChunk(value)) {
2562
+ controller.enqueue({
2563
+ type: "text-delta",
2564
+ textDelta: value.delta
2565
+ });
2566
+ } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2567
+ controller.enqueue({
2568
+ type: "reasoning",
2569
+ textDelta: value.delta
2570
+ });
2571
+ } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2572
+ ongoingToolCalls[value.output_index] = void 0;
2573
+ hasToolCalls = true;
2574
+ controller.enqueue({
2575
+ type: "tool-call",
2576
+ toolCallType: "function",
2577
+ toolCallId: value.item.call_id,
2578
+ toolName: value.item.name,
2579
+ args: value.item.arguments
2580
+ });
2581
+ } else if (isResponseFinishedChunk(value)) {
2582
+ finishReason = mapOpenAIResponseFinishReason({
2583
+ finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2584
+ hasToolCalls
2585
+ });
2586
+ promptTokens = value.response.usage.input_tokens;
2587
+ completionTokens = value.response.usage.output_tokens;
2588
+ cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2589
+ reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2590
+ } else if (isResponseAnnotationAddedChunk(value)) {
2591
+ controller.enqueue({
2592
+ type: "source",
2593
+ source: {
2594
+ sourceType: "url",
2595
+ id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : generateId2(),
2596
+ url: value.annotation.url,
2597
+ title: value.annotation.title
2598
+ }
2599
+ });
2600
+ }
2601
+ },
2602
+ flush(controller) {
2603
+ controller.enqueue({
2604
+ type: "finish",
2605
+ finishReason,
2606
+ usage: { promptTokens, completionTokens },
2607
+ ...(cachedPromptTokens != null || reasoningTokens != null) && {
2608
+ providerMetadata: {
2609
+ openai: {
2610
+ responseId,
2611
+ cachedPromptTokens,
2612
+ reasoningTokens
2613
+ }
2614
+ }
2615
+ }
2616
+ });
2617
+ }
2618
+ })
2619
+ ),
2620
+ rawCall: {
2621
+ rawPrompt: void 0,
2622
+ rawSettings: {}
2623
+ },
2624
+ rawResponse: { headers: responseHeaders },
2625
+ request: { body: JSON.stringify(body) },
2626
+ warnings
2627
+ };
2628
+ }
2629
+ };
2630
+ var usageSchema = z8.object({
2631
+ input_tokens: z8.number(),
2632
+ input_tokens_details: z8.object({ cached_tokens: z8.number().nullish() }).nullish(),
2633
+ output_tokens: z8.number(),
2634
+ output_tokens_details: z8.object({ reasoning_tokens: z8.number().nullish() }).nullish()
2635
+ });
2636
+ var textDeltaChunkSchema = z8.object({
2637
+ type: z8.literal("response.output_text.delta"),
2638
+ delta: z8.string()
2639
+ });
2640
+ var responseFinishedChunkSchema = z8.object({
2641
+ type: z8.enum(["response.completed", "response.incomplete"]),
2642
+ response: z8.object({
2643
+ incomplete_details: z8.object({ reason: z8.string() }).nullish(),
2644
+ usage: usageSchema
2645
+ })
2646
+ });
2647
+ var responseCreatedChunkSchema = z8.object({
2648
+ type: z8.literal("response.created"),
2649
+ response: z8.object({
2650
+ id: z8.string(),
2651
+ created_at: z8.number(),
2652
+ model: z8.string()
2653
+ })
2654
+ });
2655
+ var responseOutputItemDoneSchema = z8.object({
2656
+ type: z8.literal("response.output_item.done"),
2657
+ output_index: z8.number(),
2658
+ item: z8.discriminatedUnion("type", [
2659
+ z8.object({
2660
+ type: z8.literal("message")
2661
+ }),
2662
+ z8.object({
2663
+ type: z8.literal("function_call"),
2664
+ id: z8.string(),
2665
+ call_id: z8.string(),
2666
+ name: z8.string(),
2667
+ arguments: z8.string(),
2668
+ status: z8.literal("completed")
2669
+ })
2670
+ ])
2671
+ });
2672
+ var responseFunctionCallArgumentsDeltaSchema = z8.object({
2673
+ type: z8.literal("response.function_call_arguments.delta"),
2674
+ item_id: z8.string(),
2675
+ output_index: z8.number(),
2676
+ delta: z8.string()
2677
+ });
2678
+ var responseOutputItemAddedSchema = z8.object({
2679
+ type: z8.literal("response.output_item.added"),
2680
+ output_index: z8.number(),
2681
+ item: z8.discriminatedUnion("type", [
2682
+ z8.object({
2683
+ type: z8.literal("message")
2684
+ }),
2685
+ z8.object({
2686
+ type: z8.literal("function_call"),
2687
+ id: z8.string(),
2688
+ call_id: z8.string(),
2689
+ name: z8.string(),
2690
+ arguments: z8.string()
2691
+ })
2692
+ ])
2693
+ });
2694
+ var responseAnnotationAddedSchema = z8.object({
2695
+ type: z8.literal("response.output_text.annotation.added"),
2696
+ annotation: z8.object({
2697
+ type: z8.literal("url_citation"),
2698
+ url: z8.string(),
2699
+ title: z8.string()
2700
+ })
2701
+ });
2702
+ var responseReasoningSummaryTextDeltaSchema = z8.object({
2703
+ type: z8.literal("response.reasoning_summary_text.delta"),
2704
+ item_id: z8.string(),
2705
+ output_index: z8.number(),
2706
+ summary_index: z8.number(),
2707
+ delta: z8.string()
2708
+ });
2709
+ var openaiResponsesChunkSchema = z8.union([
2710
+ textDeltaChunkSchema,
2711
+ responseFinishedChunkSchema,
2712
+ responseCreatedChunkSchema,
2713
+ responseOutputItemDoneSchema,
2714
+ responseFunctionCallArgumentsDeltaSchema,
2715
+ responseOutputItemAddedSchema,
2716
+ responseAnnotationAddedSchema,
2717
+ responseReasoningSummaryTextDeltaSchema,
2718
+ z8.object({ type: z8.string() }).passthrough()
2719
+ // fallback for unknown chunks
2720
+ ]);
2721
+ function isTextDeltaChunk(chunk) {
2722
+ return chunk.type === "response.output_text.delta";
2723
+ }
2724
+ function isResponseOutputItemDoneChunk(chunk) {
2725
+ return chunk.type === "response.output_item.done";
2726
+ }
2727
+ function isResponseFinishedChunk(chunk) {
2728
+ return chunk.type === "response.completed" || chunk.type === "response.incomplete";
2729
+ }
2730
+ function isResponseCreatedChunk(chunk) {
2731
+ return chunk.type === "response.created";
2732
+ }
2733
+ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
2734
+ return chunk.type === "response.function_call_arguments.delta";
2735
+ }
2736
+ function isResponseOutputItemAddedChunk(chunk) {
2737
+ return chunk.type === "response.output_item.added";
2738
+ }
2739
+ function isResponseAnnotationAddedChunk(chunk) {
2740
+ return chunk.type === "response.output_text.annotation.added";
2741
+ }
2742
+ function isResponseReasoningSummaryTextDeltaChunk(chunk) {
2743
+ return chunk.type === "response.reasoning_summary_text.delta";
2744
+ }
2745
+ function getResponsesModelConfig(modelId) {
2746
+ if (modelId.startsWith("o")) {
2747
+ if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
2748
+ return {
2749
+ isReasoningModel: true,
2750
+ systemMessageMode: "remove",
2751
+ requiredAutoTruncation: false
2752
+ };
2753
+ }
2754
+ return {
2755
+ isReasoningModel: true,
2756
+ systemMessageMode: "developer",
2757
+ requiredAutoTruncation: false
2758
+ };
2759
+ }
2760
+ return {
2761
+ isReasoningModel: false,
2762
+ systemMessageMode: "system",
2763
+ requiredAutoTruncation: false
2764
+ };
2765
+ }
2766
+ var openaiResponsesProviderOptionsSchema = z8.object({
2767
+ metadata: z8.any().nullish(),
2768
+ parallelToolCalls: z8.boolean().nullish(),
2769
+ previousResponseId: z8.string().nullish(),
2770
+ forceNoTemperature: z8.boolean().nullish(),
2771
+ store: z8.boolean().nullish(),
2772
+ user: z8.string().nullish(),
2773
+ reasoningEffort: z8.string().nullish(),
2774
+ strictSchemas: z8.boolean().nullish(),
2775
+ instructions: z8.string().nullish(),
2776
+ reasoningSummary: z8.string().nullish()
2777
+ });
2778
+ export {
2779
+ OpenAIChatLanguageModel,
2780
+ OpenAICompletionLanguageModel,
2781
+ OpenAIEmbeddingModel,
2782
+ OpenAIImageModel,
2783
+ OpenAIResponsesLanguageModel,
2784
+ OpenAISpeechModel,
2785
+ OpenAITranscriptionModel,
2786
+ hasDefaultResponseFormat,
2787
+ modelMaxImagesPerCall
2788
+ };
2789
+ //# sourceMappingURL=index.mjs.map