@ai-sdk/openai 0.0.0-85f9a635-20240518005312

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js ADDED
@@ -0,0 +1,1033 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var src_exports = {};
22
+ __export(src_exports, {
23
+ OpenAI: () => OpenAI,
24
+ createOpenAI: () => createOpenAI,
25
+ openai: () => openai
26
+ });
27
+ module.exports = __toCommonJS(src_exports);
28
+
29
+ // src/openai-facade.ts
30
+ var import_provider_utils5 = require("@ai-sdk/provider-utils");
31
+
32
+ // src/openai-chat-language-model.ts
33
+ var import_provider = require("@ai-sdk/provider");
34
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
35
+ var import_zod2 = require("zod");
36
+
37
+ // src/convert-to-openai-chat-messages.ts
38
+ var import_provider_utils = require("@ai-sdk/provider-utils");
39
+ function convertToOpenAIChatMessages(prompt) {
40
+ const messages = [];
41
+ for (const { role, content } of prompt) {
42
+ switch (role) {
43
+ case "system": {
44
+ messages.push({ role: "system", content });
45
+ break;
46
+ }
47
+ case "user": {
48
+ messages.push({
49
+ role: "user",
50
+ content: content.map((part) => {
51
+ var _a;
52
+ switch (part.type) {
53
+ case "text": {
54
+ return { type: "text", text: part.text };
55
+ }
56
+ case "image": {
57
+ return {
58
+ type: "image_url",
59
+ image_url: {
60
+ url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${(0, import_provider_utils.convertUint8ArrayToBase64)(part.image)}`
61
+ }
62
+ };
63
+ }
64
+ }
65
+ })
66
+ });
67
+ break;
68
+ }
69
+ case "assistant": {
70
+ let text = "";
71
+ const toolCalls = [];
72
+ for (const part of content) {
73
+ switch (part.type) {
74
+ case "text": {
75
+ text += part.text;
76
+ break;
77
+ }
78
+ case "tool-call": {
79
+ toolCalls.push({
80
+ id: part.toolCallId,
81
+ type: "function",
82
+ function: {
83
+ name: part.toolName,
84
+ arguments: JSON.stringify(part.args)
85
+ }
86
+ });
87
+ break;
88
+ }
89
+ default: {
90
+ const _exhaustiveCheck = part;
91
+ throw new Error(`Unsupported part: ${_exhaustiveCheck}`);
92
+ }
93
+ }
94
+ }
95
+ messages.push({
96
+ role: "assistant",
97
+ content: text,
98
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
99
+ });
100
+ break;
101
+ }
102
+ case "tool": {
103
+ for (const toolResponse of content) {
104
+ messages.push({
105
+ role: "tool",
106
+ tool_call_id: toolResponse.toolCallId,
107
+ content: JSON.stringify(toolResponse.result)
108
+ });
109
+ }
110
+ break;
111
+ }
112
+ default: {
113
+ const _exhaustiveCheck = role;
114
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
115
+ }
116
+ }
117
+ }
118
+ return messages;
119
+ }
120
+
121
+ // src/map-openai-finish-reason.ts
122
+ function mapOpenAIFinishReason(finishReason) {
123
+ switch (finishReason) {
124
+ case "stop":
125
+ return "stop";
126
+ case "length":
127
+ return "length";
128
+ case "content_filter":
129
+ return "content-filter";
130
+ case "function_call":
131
+ case "tool_calls":
132
+ return "tool-calls";
133
+ default:
134
+ return "other";
135
+ }
136
+ }
137
+
138
+ // src/openai-error.ts
139
+ var import_zod = require("zod");
140
+ var import_provider_utils2 = require("@ai-sdk/provider-utils");
141
+ var openAIErrorDataSchema = import_zod.z.object({
142
+ error: import_zod.z.object({
143
+ message: import_zod.z.string(),
144
+ type: import_zod.z.string(),
145
+ param: import_zod.z.any().nullable(),
146
+ code: import_zod.z.string().nullable()
147
+ })
148
+ });
149
+ var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
150
+ errorSchema: openAIErrorDataSchema,
151
+ errorToMessage: (data) => data.error.message
152
+ });
153
+
154
+ // src/map-openai-chat-logprobs.ts
155
+ function mapOpenAIChatLogProbsOutput(logprobs) {
156
+ var _a, _b;
157
+ return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
158
+ token,
159
+ logprob,
160
+ topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
161
+ token: token2,
162
+ logprob: logprob2
163
+ })) : []
164
+ }))) != null ? _b : void 0;
165
+ }
166
+
167
+ // src/openai-chat-language-model.ts
168
+ var OpenAIChatLanguageModel = class {
169
+ constructor(modelId, settings, config) {
170
+ this.specificationVersion = "v1";
171
+ this.defaultObjectGenerationMode = "tool";
172
+ this.modelId = modelId;
173
+ this.settings = settings;
174
+ this.config = config;
175
+ }
176
+ get provider() {
177
+ return this.config.provider;
178
+ }
179
+ getArgs({
180
+ mode,
181
+ prompt,
182
+ maxTokens,
183
+ temperature,
184
+ topP,
185
+ frequencyPenalty,
186
+ presencePenalty,
187
+ seed
188
+ }) {
189
+ var _a;
190
+ const type = mode.type;
191
+ const baseArgs = {
192
+ // model id:
193
+ model: this.modelId,
194
+ // model specific settings:
195
+ logit_bias: this.settings.logitBias,
196
+ logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number",
197
+ top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
198
+ user: this.settings.user,
199
+ // standardized settings:
200
+ max_tokens: maxTokens,
201
+ temperature,
202
+ top_p: topP,
203
+ frequency_penalty: frequencyPenalty,
204
+ presence_penalty: presencePenalty,
205
+ seed,
206
+ // messages:
207
+ messages: convertToOpenAIChatMessages(prompt)
208
+ };
209
+ switch (type) {
210
+ case "regular": {
211
+ const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
212
+ return {
213
+ ...baseArgs,
214
+ tools: tools == null ? void 0 : tools.map((tool) => ({
215
+ type: "function",
216
+ function: {
217
+ name: tool.name,
218
+ description: tool.description,
219
+ parameters: tool.parameters
220
+ }
221
+ }))
222
+ };
223
+ }
224
+ case "object-json": {
225
+ return {
226
+ ...baseArgs,
227
+ response_format: { type: "json_object" }
228
+ };
229
+ }
230
+ case "object-tool": {
231
+ return {
232
+ ...baseArgs,
233
+ tool_choice: { type: "function", function: { name: mode.tool.name } },
234
+ tools: [
235
+ {
236
+ type: "function",
237
+ function: {
238
+ name: mode.tool.name,
239
+ description: mode.tool.description,
240
+ parameters: mode.tool.parameters
241
+ }
242
+ }
243
+ ]
244
+ };
245
+ }
246
+ case "object-grammar": {
247
+ throw new import_provider.UnsupportedFunctionalityError({
248
+ functionality: "object-grammar mode"
249
+ });
250
+ }
251
+ default: {
252
+ const _exhaustiveCheck = type;
253
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
254
+ }
255
+ }
256
+ }
257
+ async doGenerate(options) {
258
+ var _a, _b;
259
+ const args = this.getArgs(options);
260
+ const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
261
+ url: `${this.config.baseURL}/chat/completions`,
262
+ headers: this.config.headers(),
263
+ body: args,
264
+ failedResponseHandler: openaiFailedResponseHandler,
265
+ successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
266
+ openAIChatResponseSchema
267
+ ),
268
+ abortSignal: options.abortSignal
269
+ });
270
+ const { messages: rawPrompt, ...rawSettings } = args;
271
+ const choice = response.choices[0];
272
+ return {
273
+ text: (_a = choice.message.content) != null ? _a : void 0,
274
+ toolCalls: (_b = choice.message.tool_calls) == null ? void 0 : _b.map((toolCall) => {
275
+ var _a2;
276
+ return {
277
+ toolCallType: "function",
278
+ toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils3.generateId)(),
279
+ toolName: toolCall.function.name,
280
+ args: toolCall.function.arguments
281
+ };
282
+ }),
283
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
284
+ usage: {
285
+ promptTokens: response.usage.prompt_tokens,
286
+ completionTokens: response.usage.completion_tokens
287
+ },
288
+ rawCall: { rawPrompt, rawSettings },
289
+ rawResponse: { headers: responseHeaders },
290
+ warnings: [],
291
+ logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs)
292
+ };
293
+ }
294
+ async doRawStream(options) {
295
+ const args = this.getArgs(options);
296
+ const { responseHeaders, value: responseBody } = await (0, import_provider_utils3.postJsonToApi)({
297
+ url: `${this.config.baseURL}/chat/completions`,
298
+ headers: this.config.headers(),
299
+ body: {
300
+ ...args,
301
+ stream: true,
302
+ // only include stream_options when in strict compatibility mode:
303
+ stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
304
+ },
305
+ failedResponseHandler: openaiFailedResponseHandler,
306
+ successfulResponseHandler: (0, import_provider_utils3.createEventSourcePassThroughHandler)(
307
+ openaiChatChunkSchema
308
+ ),
309
+ abortSignal: options.abortSignal
310
+ });
311
+ const { messages: rawPrompt, ...rawSettings } = args;
312
+ return {
313
+ stream: responseBody,
314
+ rawCall: { rawPrompt, rawSettings },
315
+ rawResponse: { headers: responseHeaders },
316
+ warnings: []
317
+ };
318
+ }
319
+ async doStream(options) {
320
+ const args = this.getArgs(options);
321
+ const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
322
+ url: `${this.config.baseURL}/chat/completions`,
323
+ headers: this.config.headers(),
324
+ body: {
325
+ ...args,
326
+ stream: true,
327
+ // only include stream_options when in strict compatibility mode:
328
+ stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
329
+ },
330
+ failedResponseHandler: openaiFailedResponseHandler,
331
+ successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(
332
+ openaiChatChunkSchema
333
+ ),
334
+ abortSignal: options.abortSignal
335
+ });
336
+ const { messages: rawPrompt, ...rawSettings } = args;
337
+ const toolCalls = [];
338
+ let finishReason = "other";
339
+ let usage = {
340
+ promptTokens: Number.NaN,
341
+ completionTokens: Number.NaN
342
+ };
343
+ let logprobs;
344
+ return {
345
+ stream: response.pipeThrough(
346
+ new TransformStream({
347
+ transform(chunk, controller) {
348
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
349
+ if (!chunk.success) {
350
+ controller.enqueue({ type: "error", error: chunk.error });
351
+ return;
352
+ }
353
+ const value = chunk.value;
354
+ if (value.usage != null) {
355
+ usage = {
356
+ promptTokens: value.usage.prompt_tokens,
357
+ completionTokens: value.usage.completion_tokens
358
+ };
359
+ }
360
+ const choice = value.choices[0];
361
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
362
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
363
+ }
364
+ if ((choice == null ? void 0 : choice.delta) == null) {
365
+ return;
366
+ }
367
+ const delta = choice.delta;
368
+ if (delta.content != null) {
369
+ controller.enqueue({
370
+ type: "text-delta",
371
+ textDelta: delta.content
372
+ });
373
+ }
374
+ const mappedLogprobs = mapOpenAIChatLogProbsOutput(
375
+ choice == null ? void 0 : choice.logprobs
376
+ );
377
+ if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
378
+ if (logprobs === void 0)
379
+ logprobs = [];
380
+ logprobs.push(...mappedLogprobs);
381
+ }
382
+ if (delta.tool_calls != null) {
383
+ for (const toolCallDelta of delta.tool_calls) {
384
+ const index = toolCallDelta.index;
385
+ if (toolCalls[index] == null) {
386
+ if (toolCallDelta.type !== "function") {
387
+ throw new import_provider.InvalidResponseDataError({
388
+ data: toolCallDelta,
389
+ message: `Expected 'function' type.`
390
+ });
391
+ }
392
+ if (toolCallDelta.id == null) {
393
+ throw new import_provider.InvalidResponseDataError({
394
+ data: toolCallDelta,
395
+ message: `Expected 'id' to be a string.`
396
+ });
397
+ }
398
+ if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
399
+ throw new import_provider.InvalidResponseDataError({
400
+ data: toolCallDelta,
401
+ message: `Expected 'function.name' to be a string.`
402
+ });
403
+ }
404
+ toolCalls[index] = {
405
+ id: toolCallDelta.id,
406
+ type: "function",
407
+ function: {
408
+ name: toolCallDelta.function.name,
409
+ arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
410
+ }
411
+ };
412
+ continue;
413
+ }
414
+ const toolCall = toolCalls[index];
415
+ if (((_c = toolCallDelta.function) == null ? void 0 : _c.arguments) != null) {
416
+ toolCall.function.arguments += (_e = (_d = toolCallDelta.function) == null ? void 0 : _d.arguments) != null ? _e : "";
417
+ }
418
+ controller.enqueue({
419
+ type: "tool-call-delta",
420
+ toolCallType: "function",
421
+ toolCallId: toolCall.id,
422
+ toolName: toolCall.function.name,
423
+ argsTextDelta: (_f = toolCallDelta.function.arguments) != null ? _f : ""
424
+ });
425
+ if (((_g = toolCall.function) == null ? void 0 : _g.name) == null || ((_h = toolCall.function) == null ? void 0 : _h.arguments) == null || !(0, import_provider_utils3.isParseableJson)(toolCall.function.arguments)) {
426
+ continue;
427
+ }
428
+ controller.enqueue({
429
+ type: "tool-call",
430
+ toolCallType: "function",
431
+ toolCallId: (_i = toolCall.id) != null ? _i : (0, import_provider_utils3.generateId)(),
432
+ toolName: toolCall.function.name,
433
+ args: toolCall.function.arguments
434
+ });
435
+ }
436
+ }
437
+ },
438
+ flush(controller) {
439
+ controller.enqueue({
440
+ type: "finish",
441
+ finishReason,
442
+ logprobs,
443
+ usage
444
+ });
445
+ }
446
+ })
447
+ ),
448
+ rawCall: { rawPrompt, rawSettings },
449
+ rawResponse: { headers: responseHeaders },
450
+ warnings: []
451
+ };
452
+ }
453
+ };
454
+ var openAIChatResponseSchema = import_zod2.z.object({
455
+ choices: import_zod2.z.array(
456
+ import_zod2.z.object({
457
+ message: import_zod2.z.object({
458
+ role: import_zod2.z.literal("assistant"),
459
+ content: import_zod2.z.string().nullable().optional(),
460
+ tool_calls: import_zod2.z.array(
461
+ import_zod2.z.object({
462
+ id: import_zod2.z.string().optional().nullable(),
463
+ type: import_zod2.z.literal("function"),
464
+ function: import_zod2.z.object({
465
+ name: import_zod2.z.string(),
466
+ arguments: import_zod2.z.string()
467
+ })
468
+ })
469
+ ).optional()
470
+ }),
471
+ index: import_zod2.z.number(),
472
+ logprobs: import_zod2.z.object({
473
+ content: import_zod2.z.array(
474
+ import_zod2.z.object({
475
+ token: import_zod2.z.string(),
476
+ logprob: import_zod2.z.number(),
477
+ top_logprobs: import_zod2.z.array(
478
+ import_zod2.z.object({
479
+ token: import_zod2.z.string(),
480
+ logprob: import_zod2.z.number()
481
+ })
482
+ )
483
+ })
484
+ ).nullable()
485
+ }).nullable().optional(),
486
+ finish_reason: import_zod2.z.string().optional().nullable()
487
+ })
488
+ ),
489
+ object: import_zod2.z.literal("chat.completion"),
490
+ usage: import_zod2.z.object({
491
+ prompt_tokens: import_zod2.z.number(),
492
+ completion_tokens: import_zod2.z.number()
493
+ })
494
+ });
495
+ var openaiChatChunkSchema = import_zod2.z.object({
496
+ object: import_zod2.z.enum([
497
+ "chat.completion.chunk",
498
+ "chat.completion"
499
+ // support for OpenAI-compatible providers such as Perplexity
500
+ ]),
501
+ choices: import_zod2.z.array(
502
+ import_zod2.z.object({
503
+ delta: import_zod2.z.object({
504
+ role: import_zod2.z.enum(["assistant"]).optional(),
505
+ content: import_zod2.z.string().nullable().optional(),
506
+ tool_calls: import_zod2.z.array(
507
+ import_zod2.z.object({
508
+ index: import_zod2.z.number(),
509
+ id: import_zod2.z.string().optional().nullable(),
510
+ type: import_zod2.z.literal("function").optional(),
511
+ function: import_zod2.z.object({
512
+ name: import_zod2.z.string().optional(),
513
+ arguments: import_zod2.z.string().optional()
514
+ })
515
+ })
516
+ ).optional()
517
+ }),
518
+ logprobs: import_zod2.z.object({
519
+ content: import_zod2.z.array(
520
+ import_zod2.z.object({
521
+ token: import_zod2.z.string(),
522
+ logprob: import_zod2.z.number(),
523
+ top_logprobs: import_zod2.z.array(
524
+ import_zod2.z.object({
525
+ token: import_zod2.z.string(),
526
+ logprob: import_zod2.z.number()
527
+ })
528
+ )
529
+ })
530
+ ).nullable()
531
+ }).nullable().optional(),
532
+ finish_reason: import_zod2.z.string().nullable().optional(),
533
+ index: import_zod2.z.number()
534
+ })
535
+ ),
536
+ usage: import_zod2.z.object({
537
+ prompt_tokens: import_zod2.z.number(),
538
+ completion_tokens: import_zod2.z.number()
539
+ }).optional().nullable()
540
+ });
541
+
542
+ // src/openai-completion-language-model.ts
543
+ var import_provider3 = require("@ai-sdk/provider");
544
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
545
+ var import_zod3 = require("zod");
546
+
547
+ // src/convert-to-openai-completion-prompt.ts
548
+ var import_provider2 = require("@ai-sdk/provider");
549
+ function convertToOpenAICompletionPrompt({
550
+ prompt,
551
+ inputFormat,
552
+ user = "user",
553
+ assistant = "assistant"
554
+ }) {
555
+ if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
556
+ return { prompt: prompt[0].content[0].text };
557
+ }
558
+ let text = "";
559
+ if (prompt[0].role === "system") {
560
+ text += `${prompt[0].content}
561
+
562
+ `;
563
+ prompt = prompt.slice(1);
564
+ }
565
+ for (const { role, content } of prompt) {
566
+ switch (role) {
567
+ case "system": {
568
+ throw new import_provider2.InvalidPromptError({
569
+ message: "Unexpected system message in prompt: ${content}",
570
+ prompt
571
+ });
572
+ }
573
+ case "user": {
574
+ const userMessage = content.map((part) => {
575
+ switch (part.type) {
576
+ case "text": {
577
+ return part.text;
578
+ }
579
+ case "image": {
580
+ throw new import_provider2.UnsupportedFunctionalityError({
581
+ functionality: "images"
582
+ });
583
+ }
584
+ }
585
+ }).join("");
586
+ text += `${user}:
587
+ ${userMessage}
588
+
589
+ `;
590
+ break;
591
+ }
592
+ case "assistant": {
593
+ const assistantMessage = content.map((part) => {
594
+ switch (part.type) {
595
+ case "text": {
596
+ return part.text;
597
+ }
598
+ case "tool-call": {
599
+ throw new import_provider2.UnsupportedFunctionalityError({
600
+ functionality: "tool-call messages"
601
+ });
602
+ }
603
+ }
604
+ }).join("");
605
+ text += `${assistant}:
606
+ ${assistantMessage}
607
+
608
+ `;
609
+ break;
610
+ }
611
+ case "tool": {
612
+ throw new import_provider2.UnsupportedFunctionalityError({
613
+ functionality: "tool messages"
614
+ });
615
+ }
616
+ default: {
617
+ const _exhaustiveCheck = role;
618
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
619
+ }
620
+ }
621
+ }
622
+ text += `${assistant}:
623
+ `;
624
+ return {
625
+ prompt: text,
626
+ stopSequences: [`
627
+ ${user}:`]
628
+ };
629
+ }
630
+
631
+ // src/map-openai-completion-logprobs.ts
632
+ function mapOpenAICompletionLogProbs(logprobs) {
633
+ return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => ({
634
+ token,
635
+ logprob: logprobs.token_logprobs[index],
636
+ topLogprobs: logprobs.top_logprobs ? Object.entries(logprobs.top_logprobs[index]).map(
637
+ ([token2, logprob]) => ({
638
+ token: token2,
639
+ logprob
640
+ })
641
+ ) : []
642
+ }));
643
+ }
644
+
645
+ // src/openai-completion-language-model.ts
646
+ var OpenAICompletionLanguageModel = class {
647
+ constructor(modelId, settings, config) {
648
+ this.specificationVersion = "v1";
649
+ this.defaultObjectGenerationMode = void 0;
650
+ this.modelId = modelId;
651
+ this.settings = settings;
652
+ this.config = config;
653
+ }
654
+ get provider() {
655
+ return this.config.provider;
656
+ }
657
+ getArgs({
658
+ mode,
659
+ inputFormat,
660
+ prompt,
661
+ maxTokens,
662
+ temperature,
663
+ topP,
664
+ frequencyPenalty,
665
+ presencePenalty,
666
+ seed
667
+ }) {
668
+ var _a;
669
+ const type = mode.type;
670
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat });
671
+ const baseArgs = {
672
+ // model id:
673
+ model: this.modelId,
674
+ // model specific settings:
675
+ echo: this.settings.echo,
676
+ logit_bias: this.settings.logitBias,
677
+ logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
678
+ suffix: this.settings.suffix,
679
+ user: this.settings.user,
680
+ // standardized settings:
681
+ max_tokens: maxTokens,
682
+ temperature,
683
+ top_p: topP,
684
+ frequency_penalty: frequencyPenalty,
685
+ presence_penalty: presencePenalty,
686
+ seed,
687
+ // prompt:
688
+ prompt: completionPrompt,
689
+ // stop sequences:
690
+ stop: stopSequences
691
+ };
692
+ switch (type) {
693
+ case "regular": {
694
+ if ((_a = mode.tools) == null ? void 0 : _a.length) {
695
+ throw new import_provider3.UnsupportedFunctionalityError({
696
+ functionality: "tools"
697
+ });
698
+ }
699
+ return baseArgs;
700
+ }
701
+ case "object-json": {
702
+ throw new import_provider3.UnsupportedFunctionalityError({
703
+ functionality: "object-json mode"
704
+ });
705
+ }
706
+ case "object-tool": {
707
+ throw new import_provider3.UnsupportedFunctionalityError({
708
+ functionality: "object-tool mode"
709
+ });
710
+ }
711
+ case "object-grammar": {
712
+ throw new import_provider3.UnsupportedFunctionalityError({
713
+ functionality: "object-grammar mode"
714
+ });
715
+ }
716
+ default: {
717
+ const _exhaustiveCheck = type;
718
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
719
+ }
720
+ }
721
+ }
722
+ async doGenerate(options) {
723
+ const args = this.getArgs(options);
724
+ const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
725
+ url: `${this.config.baseURL}/completions`,
726
+ headers: this.config.headers(),
727
+ body: args,
728
+ failedResponseHandler: openaiFailedResponseHandler,
729
+ successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
730
+ openAICompletionResponseSchema
731
+ ),
732
+ abortSignal: options.abortSignal
733
+ });
734
+ const { prompt: rawPrompt, ...rawSettings } = args;
735
+ const choice = response.choices[0];
736
+ return {
737
+ text: choice.text,
738
+ usage: {
739
+ promptTokens: response.usage.prompt_tokens,
740
+ completionTokens: response.usage.completion_tokens
741
+ },
742
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
743
+ logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
744
+ rawCall: { rawPrompt, rawSettings },
745
+ rawResponse: { headers: responseHeaders },
746
+ warnings: []
747
+ };
748
+ }
749
+ async doStream(options) {
750
+ const args = this.getArgs(options);
751
+ const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
752
+ url: `${this.config.baseURL}/completions`,
753
+ headers: this.config.headers(),
754
+ body: {
755
+ ...this.getArgs(options),
756
+ stream: true,
757
+ // only include stream_options when in strict compatibility mode:
758
+ stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
759
+ },
760
+ failedResponseHandler: openaiFailedResponseHandler,
761
+ successfulResponseHandler: (0, import_provider_utils4.createEventSourceResponseHandler)(
762
+ openaiCompletionChunkSchema
763
+ ),
764
+ abortSignal: options.abortSignal
765
+ });
766
+ const { prompt: rawPrompt, ...rawSettings } = args;
767
+ let finishReason = "other";
768
+ let usage = {
769
+ promptTokens: Number.NaN,
770
+ completionTokens: Number.NaN
771
+ };
772
+ let logprobs;
773
+ return {
774
+ stream: response.pipeThrough(
775
+ new TransformStream({
776
+ transform(chunk, controller) {
777
+ if (!chunk.success) {
778
+ controller.enqueue({ type: "error", error: chunk.error });
779
+ return;
780
+ }
781
+ const value = chunk.value;
782
+ if (value.usage != null) {
783
+ usage = {
784
+ promptTokens: value.usage.prompt_tokens,
785
+ completionTokens: value.usage.completion_tokens
786
+ };
787
+ }
788
+ const choice = value.choices[0];
789
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
790
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
791
+ }
792
+ if ((choice == null ? void 0 : choice.text) != null) {
793
+ controller.enqueue({
794
+ type: "text-delta",
795
+ textDelta: choice.text
796
+ });
797
+ }
798
+ const mappedLogprobs = mapOpenAICompletionLogProbs(
799
+ choice == null ? void 0 : choice.logprobs
800
+ );
801
+ if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
802
+ if (logprobs === void 0)
803
+ logprobs = [];
804
+ logprobs.push(...mappedLogprobs);
805
+ }
806
+ },
807
+ flush(controller) {
808
+ controller.enqueue({
809
+ type: "finish",
810
+ finishReason,
811
+ logprobs,
812
+ usage
813
+ });
814
+ }
815
+ })
816
+ ),
817
+ rawCall: { rawPrompt, rawSettings },
818
+ rawResponse: { headers: responseHeaders },
819
+ warnings: []
820
+ };
821
+ }
822
+ };
823
+ var openAICompletionResponseSchema = import_zod3.z.object({
824
+ choices: import_zod3.z.array(
825
+ import_zod3.z.object({
826
+ text: import_zod3.z.string(),
827
+ finish_reason: import_zod3.z.string(),
828
+ logprobs: import_zod3.z.object({
829
+ tokens: import_zod3.z.array(import_zod3.z.string()),
830
+ token_logprobs: import_zod3.z.array(import_zod3.z.number()),
831
+ top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
832
+ }).nullable().optional()
833
+ })
834
+ ),
835
+ usage: import_zod3.z.object({
836
+ prompt_tokens: import_zod3.z.number(),
837
+ completion_tokens: import_zod3.z.number()
838
+ })
839
+ });
840
+ var openaiCompletionChunkSchema = import_zod3.z.object({
841
+ object: import_zod3.z.literal("text_completion"),
842
+ choices: import_zod3.z.array(
843
+ import_zod3.z.object({
844
+ text: import_zod3.z.string(),
845
+ finish_reason: import_zod3.z.enum(["stop", "length", "content_filter"]).optional().nullable(),
846
+ index: import_zod3.z.number(),
847
+ logprobs: import_zod3.z.object({
848
+ tokens: import_zod3.z.array(import_zod3.z.string()),
849
+ token_logprobs: import_zod3.z.array(import_zod3.z.number()),
850
+ top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
851
+ }).nullable().optional()
852
+ })
853
+ ),
854
+ usage: import_zod3.z.object({
855
+ prompt_tokens: import_zod3.z.number(),
856
+ completion_tokens: import_zod3.z.number()
857
+ }).optional().nullable()
858
+ });
859
+
860
+ // src/openai-facade.ts
861
+ var OpenAI = class {
862
+ /**
863
+ * Creates a new OpenAI provider instance.
864
+ */
865
+ constructor(options = {}) {
866
+ var _a, _b;
867
+ this.baseURL = (_b = (0, import_provider_utils5.withoutTrailingSlash)((_a = options.baseURL) != null ? _a : options.baseUrl)) != null ? _b : "https://api.openai.com/v1";
868
+ this.apiKey = options.apiKey;
869
+ this.organization = options.organization;
870
+ this.project = options.project;
871
+ this.headers = options.headers;
872
+ }
873
+ get baseConfig() {
874
+ return {
875
+ organization: this.organization,
876
+ baseURL: this.baseURL,
877
+ headers: () => ({
878
+ Authorization: `Bearer ${(0, import_provider_utils5.loadApiKey)({
879
+ apiKey: this.apiKey,
880
+ environmentVariableName: "OPENAI_API_KEY",
881
+ description: "OpenAI"
882
+ })}`,
883
+ "OpenAI-Organization": this.organization,
884
+ "OpenAI-Project": this.project,
885
+ ...this.headers
886
+ })
887
+ };
888
+ }
889
+ chat(modelId, settings = {}) {
890
+ return new OpenAIChatLanguageModel(modelId, settings, {
891
+ provider: "openai.chat",
892
+ ...this.baseConfig,
893
+ compatibility: "strict"
894
+ });
895
+ }
896
+ completion(modelId, settings = {}) {
897
+ return new OpenAICompletionLanguageModel(modelId, settings, {
898
+ provider: "openai.completion",
899
+ ...this.baseConfig,
900
+ compatibility: "strict"
901
+ });
902
+ }
903
+ };
904
+
905
+ // src/openai-provider.ts
906
+ var import_provider_utils7 = require("@ai-sdk/provider-utils");
907
+
908
+ // src/openai-embedding-model.ts
909
+ var import_provider4 = require("@ai-sdk/provider");
910
+ var import_provider_utils6 = require("@ai-sdk/provider-utils");
911
+ var import_zod4 = require("zod");
912
+ var OpenAIEmbeddingModel = class {
913
+ constructor(modelId, settings, config) {
914
+ this.specificationVersion = "v1";
915
+ this.modelId = modelId;
916
+ this.settings = settings;
917
+ this.config = config;
918
+ }
919
+ get provider() {
920
+ return this.config.provider;
921
+ }
922
+ get maxEmbeddingsPerCall() {
923
+ var _a;
924
+ return (_a = this.settings.maxEmbeddingsPerCall) != null ? _a : 2048;
925
+ }
926
+ get supportsParallelCalls() {
927
+ var _a;
928
+ return (_a = this.settings.supportsParallelCalls) != null ? _a : true;
929
+ }
930
+ async doEmbed({
931
+ values,
932
+ abortSignal
933
+ }) {
934
+ if (values.length > this.maxEmbeddingsPerCall) {
935
+ throw new import_provider4.TooManyEmbeddingValuesForCallError({
936
+ provider: this.provider,
937
+ modelId: this.modelId,
938
+ maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
939
+ values
940
+ });
941
+ }
942
+ const { responseHeaders, value: response } = await (0, import_provider_utils6.postJsonToApi)({
943
+ url: `${this.config.baseURL}/embeddings`,
944
+ headers: this.config.headers(),
945
+ body: {
946
+ model: this.modelId,
947
+ input: values,
948
+ encoding_format: "float",
949
+ dimensions: this.settings.dimensions,
950
+ user: this.settings.user
951
+ },
952
+ failedResponseHandler: openaiFailedResponseHandler,
953
+ successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
954
+ openaiTextEmbeddingResponseSchema
955
+ ),
956
+ abortSignal
957
+ });
958
+ return {
959
+ embeddings: response.data.map((item) => item.embedding),
960
+ rawResponse: { headers: responseHeaders }
961
+ };
962
+ }
963
+ };
964
+ var openaiTextEmbeddingResponseSchema = import_zod4.z.object({
965
+ data: import_zod4.z.array(
966
+ import_zod4.z.object({
967
+ embedding: import_zod4.z.array(import_zod4.z.number())
968
+ })
969
+ )
970
+ });
971
+
972
+ // src/openai-provider.ts
973
+ function createOpenAI(options = {}) {
974
+ var _a, _b, _c;
975
+ const baseURL = (_b = (0, import_provider_utils7.withoutTrailingSlash)((_a = options.baseURL) != null ? _a : options.baseUrl)) != null ? _b : "https://api.openai.com/v1";
976
+ const compatibility = (_c = options.compatibility) != null ? _c : "compatible";
977
+ const getHeaders = () => ({
978
+ Authorization: `Bearer ${(0, import_provider_utils7.loadApiKey)({
979
+ apiKey: options.apiKey,
980
+ environmentVariableName: "OPENAI_API_KEY",
981
+ description: "OpenAI"
982
+ })}`,
983
+ "OpenAI-Organization": options.organization,
984
+ "OpenAI-Project": options.project,
985
+ ...options.headers
986
+ });
987
+ const createChatModel = (modelId, settings = {}) => new OpenAIChatLanguageModel(modelId, settings, {
988
+ provider: "openai.chat",
989
+ baseURL,
990
+ headers: getHeaders,
991
+ compatibility
992
+ });
993
+ const createCompletionModel = (modelId, settings = {}) => new OpenAICompletionLanguageModel(modelId, settings, {
994
+ provider: "openai.completion",
995
+ baseURL,
996
+ headers: getHeaders,
997
+ compatibility
998
+ });
999
+ const createEmbeddingModel = (modelId, settings = {}) => new OpenAIEmbeddingModel(modelId, settings, {
1000
+ provider: "openai.embedding",
1001
+ baseURL,
1002
+ headers: getHeaders
1003
+ });
1004
+ const provider = function(modelId, settings) {
1005
+ if (new.target) {
1006
+ throw new Error(
1007
+ "The OpenAI model function cannot be called with the new keyword."
1008
+ );
1009
+ }
1010
+ if (modelId === "gpt-3.5-turbo-instruct") {
1011
+ return createCompletionModel(
1012
+ modelId,
1013
+ settings
1014
+ );
1015
+ }
1016
+ return createChatModel(modelId, settings);
1017
+ };
1018
+ provider.chat = createChatModel;
1019
+ provider.completion = createCompletionModel;
1020
+ provider.embedding = createEmbeddingModel;
1021
+ return provider;
1022
+ }
1023
+ var openai = createOpenAI({
1024
+ compatibility: "strict"
1025
+ // strict for OpenAI API
1026
+ });
1027
+ // Annotate the CommonJS export names for ESM import in node:
1028
+ 0 && (module.exports = {
1029
+ OpenAI,
1030
+ createOpenAI,
1031
+ openai
1032
+ });
1033
+ //# sourceMappingURL=index.js.map