@ai-sdk/openai 0.0.0-85f9a635-20240518005312

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs ADDED
@@ -0,0 +1,1028 @@
1
+ // src/openai-facade.ts
2
+ import { loadApiKey, withoutTrailingSlash } from "@ai-sdk/provider-utils";
3
+
4
+ // src/openai-chat-language-model.ts
5
+ import {
6
+ InvalidResponseDataError,
7
+ UnsupportedFunctionalityError
8
+ } from "@ai-sdk/provider";
9
+ import {
10
+ createEventSourcePassThroughHandler,
11
+ createEventSourceResponseHandler,
12
+ createJsonResponseHandler,
13
+ generateId,
14
+ isParseableJson,
15
+ postJsonToApi
16
+ } from "@ai-sdk/provider-utils";
17
+ import { z as z2 } from "zod";
18
+
19
+ // src/convert-to-openai-chat-messages.ts
20
+ import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils";
21
+ function convertToOpenAIChatMessages(prompt) {
22
+ const messages = [];
23
+ for (const { role, content } of prompt) {
24
+ switch (role) {
25
+ case "system": {
26
+ messages.push({ role: "system", content });
27
+ break;
28
+ }
29
+ case "user": {
30
+ messages.push({
31
+ role: "user",
32
+ content: content.map((part) => {
33
+ var _a;
34
+ switch (part.type) {
35
+ case "text": {
36
+ return { type: "text", text: part.text };
37
+ }
38
+ case "image": {
39
+ return {
40
+ type: "image_url",
41
+ image_url: {
42
+ url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase64(part.image)}`
43
+ }
44
+ };
45
+ }
46
+ }
47
+ })
48
+ });
49
+ break;
50
+ }
51
+ case "assistant": {
52
+ let text = "";
53
+ const toolCalls = [];
54
+ for (const part of content) {
55
+ switch (part.type) {
56
+ case "text": {
57
+ text += part.text;
58
+ break;
59
+ }
60
+ case "tool-call": {
61
+ toolCalls.push({
62
+ id: part.toolCallId,
63
+ type: "function",
64
+ function: {
65
+ name: part.toolName,
66
+ arguments: JSON.stringify(part.args)
67
+ }
68
+ });
69
+ break;
70
+ }
71
+ default: {
72
+ const _exhaustiveCheck = part;
73
+ throw new Error(`Unsupported part: ${_exhaustiveCheck}`);
74
+ }
75
+ }
76
+ }
77
+ messages.push({
78
+ role: "assistant",
79
+ content: text,
80
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
81
+ });
82
+ break;
83
+ }
84
+ case "tool": {
85
+ for (const toolResponse of content) {
86
+ messages.push({
87
+ role: "tool",
88
+ tool_call_id: toolResponse.toolCallId,
89
+ content: JSON.stringify(toolResponse.result)
90
+ });
91
+ }
92
+ break;
93
+ }
94
+ default: {
95
+ const _exhaustiveCheck = role;
96
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
97
+ }
98
+ }
99
+ }
100
+ return messages;
101
+ }
102
+
103
+ // src/map-openai-finish-reason.ts
104
+ function mapOpenAIFinishReason(finishReason) {
105
+ switch (finishReason) {
106
+ case "stop":
107
+ return "stop";
108
+ case "length":
109
+ return "length";
110
+ case "content_filter":
111
+ return "content-filter";
112
+ case "function_call":
113
+ case "tool_calls":
114
+ return "tool-calls";
115
+ default:
116
+ return "other";
117
+ }
118
+ }
119
+
120
+ // src/openai-error.ts
121
+ import { z } from "zod";
122
+ import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
123
+ var openAIErrorDataSchema = z.object({
124
+ error: z.object({
125
+ message: z.string(),
126
+ type: z.string(),
127
+ param: z.any().nullable(),
128
+ code: z.string().nullable()
129
+ })
130
+ });
131
+ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
132
+ errorSchema: openAIErrorDataSchema,
133
+ errorToMessage: (data) => data.error.message
134
+ });
135
+
136
+ // src/map-openai-chat-logprobs.ts
137
+ function mapOpenAIChatLogProbsOutput(logprobs) {
138
+ var _a, _b;
139
+ return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
140
+ token,
141
+ logprob,
142
+ topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
143
+ token: token2,
144
+ logprob: logprob2
145
+ })) : []
146
+ }))) != null ? _b : void 0;
147
+ }
148
+
149
+ // src/openai-chat-language-model.ts
150
+ var OpenAIChatLanguageModel = class {
151
+ constructor(modelId, settings, config) {
152
+ this.specificationVersion = "v1";
153
+ this.defaultObjectGenerationMode = "tool";
154
+ this.modelId = modelId;
155
+ this.settings = settings;
156
+ this.config = config;
157
+ }
158
+ get provider() {
159
+ return this.config.provider;
160
+ }
161
+ getArgs({
162
+ mode,
163
+ prompt,
164
+ maxTokens,
165
+ temperature,
166
+ topP,
167
+ frequencyPenalty,
168
+ presencePenalty,
169
+ seed
170
+ }) {
171
+ var _a;
172
+ const type = mode.type;
173
+ const baseArgs = {
174
+ // model id:
175
+ model: this.modelId,
176
+ // model specific settings:
177
+ logit_bias: this.settings.logitBias,
178
+ logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number",
179
+ top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
180
+ user: this.settings.user,
181
+ // standardized settings:
182
+ max_tokens: maxTokens,
183
+ temperature,
184
+ top_p: topP,
185
+ frequency_penalty: frequencyPenalty,
186
+ presence_penalty: presencePenalty,
187
+ seed,
188
+ // messages:
189
+ messages: convertToOpenAIChatMessages(prompt)
190
+ };
191
+ switch (type) {
192
+ case "regular": {
193
+ const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
194
+ return {
195
+ ...baseArgs,
196
+ tools: tools == null ? void 0 : tools.map((tool) => ({
197
+ type: "function",
198
+ function: {
199
+ name: tool.name,
200
+ description: tool.description,
201
+ parameters: tool.parameters
202
+ }
203
+ }))
204
+ };
205
+ }
206
+ case "object-json": {
207
+ return {
208
+ ...baseArgs,
209
+ response_format: { type: "json_object" }
210
+ };
211
+ }
212
+ case "object-tool": {
213
+ return {
214
+ ...baseArgs,
215
+ tool_choice: { type: "function", function: { name: mode.tool.name } },
216
+ tools: [
217
+ {
218
+ type: "function",
219
+ function: {
220
+ name: mode.tool.name,
221
+ description: mode.tool.description,
222
+ parameters: mode.tool.parameters
223
+ }
224
+ }
225
+ ]
226
+ };
227
+ }
228
+ case "object-grammar": {
229
+ throw new UnsupportedFunctionalityError({
230
+ functionality: "object-grammar mode"
231
+ });
232
+ }
233
+ default: {
234
+ const _exhaustiveCheck = type;
235
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
236
+ }
237
+ }
238
+ }
239
+ async doGenerate(options) {
240
+ var _a, _b;
241
+ const args = this.getArgs(options);
242
+ const { responseHeaders, value: response } = await postJsonToApi({
243
+ url: `${this.config.baseURL}/chat/completions`,
244
+ headers: this.config.headers(),
245
+ body: args,
246
+ failedResponseHandler: openaiFailedResponseHandler,
247
+ successfulResponseHandler: createJsonResponseHandler(
248
+ openAIChatResponseSchema
249
+ ),
250
+ abortSignal: options.abortSignal
251
+ });
252
+ const { messages: rawPrompt, ...rawSettings } = args;
253
+ const choice = response.choices[0];
254
+ return {
255
+ text: (_a = choice.message.content) != null ? _a : void 0,
256
+ toolCalls: (_b = choice.message.tool_calls) == null ? void 0 : _b.map((toolCall) => {
257
+ var _a2;
258
+ return {
259
+ toolCallType: "function",
260
+ toolCallId: (_a2 = toolCall.id) != null ? _a2 : generateId(),
261
+ toolName: toolCall.function.name,
262
+ args: toolCall.function.arguments
263
+ };
264
+ }),
265
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
266
+ usage: {
267
+ promptTokens: response.usage.prompt_tokens,
268
+ completionTokens: response.usage.completion_tokens
269
+ },
270
+ rawCall: { rawPrompt, rawSettings },
271
+ rawResponse: { headers: responseHeaders },
272
+ warnings: [],
273
+ logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs)
274
+ };
275
+ }
276
+ async doRawStream(options) {
277
+ const args = this.getArgs(options);
278
+ const { responseHeaders, value: responseBody } = await postJsonToApi({
279
+ url: `${this.config.baseURL}/chat/completions`,
280
+ headers: this.config.headers(),
281
+ body: {
282
+ ...args,
283
+ stream: true,
284
+ // only include stream_options when in strict compatibility mode:
285
+ stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
286
+ },
287
+ failedResponseHandler: openaiFailedResponseHandler,
288
+ successfulResponseHandler: createEventSourcePassThroughHandler(
289
+ openaiChatChunkSchema
290
+ ),
291
+ abortSignal: options.abortSignal
292
+ });
293
+ const { messages: rawPrompt, ...rawSettings } = args;
294
+ return {
295
+ stream: responseBody,
296
+ rawCall: { rawPrompt, rawSettings },
297
+ rawResponse: { headers: responseHeaders },
298
+ warnings: []
299
+ };
300
+ }
301
+ async doStream(options) {
302
+ const args = this.getArgs(options);
303
+ const { responseHeaders, value: response } = await postJsonToApi({
304
+ url: `${this.config.baseURL}/chat/completions`,
305
+ headers: this.config.headers(),
306
+ body: {
307
+ ...args,
308
+ stream: true,
309
+ // only include stream_options when in strict compatibility mode:
310
+ stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
311
+ },
312
+ failedResponseHandler: openaiFailedResponseHandler,
313
+ successfulResponseHandler: createEventSourceResponseHandler(
314
+ openaiChatChunkSchema
315
+ ),
316
+ abortSignal: options.abortSignal
317
+ });
318
+ const { messages: rawPrompt, ...rawSettings } = args;
319
+ const toolCalls = [];
320
+ let finishReason = "other";
321
+ let usage = {
322
+ promptTokens: Number.NaN,
323
+ completionTokens: Number.NaN
324
+ };
325
+ let logprobs;
326
+ return {
327
+ stream: response.pipeThrough(
328
+ new TransformStream({
329
+ transform(chunk, controller) {
330
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
331
+ if (!chunk.success) {
332
+ controller.enqueue({ type: "error", error: chunk.error });
333
+ return;
334
+ }
335
+ const value = chunk.value;
336
+ if (value.usage != null) {
337
+ usage = {
338
+ promptTokens: value.usage.prompt_tokens,
339
+ completionTokens: value.usage.completion_tokens
340
+ };
341
+ }
342
+ const choice = value.choices[0];
343
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
344
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
345
+ }
346
+ if ((choice == null ? void 0 : choice.delta) == null) {
347
+ return;
348
+ }
349
+ const delta = choice.delta;
350
+ if (delta.content != null) {
351
+ controller.enqueue({
352
+ type: "text-delta",
353
+ textDelta: delta.content
354
+ });
355
+ }
356
+ const mappedLogprobs = mapOpenAIChatLogProbsOutput(
357
+ choice == null ? void 0 : choice.logprobs
358
+ );
359
+ if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
360
+ if (logprobs === void 0)
361
+ logprobs = [];
362
+ logprobs.push(...mappedLogprobs);
363
+ }
364
+ if (delta.tool_calls != null) {
365
+ for (const toolCallDelta of delta.tool_calls) {
366
+ const index = toolCallDelta.index;
367
+ if (toolCalls[index] == null) {
368
+ if (toolCallDelta.type !== "function") {
369
+ throw new InvalidResponseDataError({
370
+ data: toolCallDelta,
371
+ message: `Expected 'function' type.`
372
+ });
373
+ }
374
+ if (toolCallDelta.id == null) {
375
+ throw new InvalidResponseDataError({
376
+ data: toolCallDelta,
377
+ message: `Expected 'id' to be a string.`
378
+ });
379
+ }
380
+ if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
381
+ throw new InvalidResponseDataError({
382
+ data: toolCallDelta,
383
+ message: `Expected 'function.name' to be a string.`
384
+ });
385
+ }
386
+ toolCalls[index] = {
387
+ id: toolCallDelta.id,
388
+ type: "function",
389
+ function: {
390
+ name: toolCallDelta.function.name,
391
+ arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
392
+ }
393
+ };
394
+ continue;
395
+ }
396
+ const toolCall = toolCalls[index];
397
+ if (((_c = toolCallDelta.function) == null ? void 0 : _c.arguments) != null) {
398
+ toolCall.function.arguments += (_e = (_d = toolCallDelta.function) == null ? void 0 : _d.arguments) != null ? _e : "";
399
+ }
400
+ controller.enqueue({
401
+ type: "tool-call-delta",
402
+ toolCallType: "function",
403
+ toolCallId: toolCall.id,
404
+ toolName: toolCall.function.name,
405
+ argsTextDelta: (_f = toolCallDelta.function.arguments) != null ? _f : ""
406
+ });
407
+ if (((_g = toolCall.function) == null ? void 0 : _g.name) == null || ((_h = toolCall.function) == null ? void 0 : _h.arguments) == null || !isParseableJson(toolCall.function.arguments)) {
408
+ continue;
409
+ }
410
+ controller.enqueue({
411
+ type: "tool-call",
412
+ toolCallType: "function",
413
+ toolCallId: (_i = toolCall.id) != null ? _i : generateId(),
414
+ toolName: toolCall.function.name,
415
+ args: toolCall.function.arguments
416
+ });
417
+ }
418
+ }
419
+ },
420
+ flush(controller) {
421
+ controller.enqueue({
422
+ type: "finish",
423
+ finishReason,
424
+ logprobs,
425
+ usage
426
+ });
427
+ }
428
+ })
429
+ ),
430
+ rawCall: { rawPrompt, rawSettings },
431
+ rawResponse: { headers: responseHeaders },
432
+ warnings: []
433
+ };
434
+ }
435
+ };
436
+ var openAIChatResponseSchema = z2.object({
437
+ choices: z2.array(
438
+ z2.object({
439
+ message: z2.object({
440
+ role: z2.literal("assistant"),
441
+ content: z2.string().nullable().optional(),
442
+ tool_calls: z2.array(
443
+ z2.object({
444
+ id: z2.string().optional().nullable(),
445
+ type: z2.literal("function"),
446
+ function: z2.object({
447
+ name: z2.string(),
448
+ arguments: z2.string()
449
+ })
450
+ })
451
+ ).optional()
452
+ }),
453
+ index: z2.number(),
454
+ logprobs: z2.object({
455
+ content: z2.array(
456
+ z2.object({
457
+ token: z2.string(),
458
+ logprob: z2.number(),
459
+ top_logprobs: z2.array(
460
+ z2.object({
461
+ token: z2.string(),
462
+ logprob: z2.number()
463
+ })
464
+ )
465
+ })
466
+ ).nullable()
467
+ }).nullable().optional(),
468
+ finish_reason: z2.string().optional().nullable()
469
+ })
470
+ ),
471
+ object: z2.literal("chat.completion"),
472
+ usage: z2.object({
473
+ prompt_tokens: z2.number(),
474
+ completion_tokens: z2.number()
475
+ })
476
+ });
477
+ var openaiChatChunkSchema = z2.object({
478
+ object: z2.enum([
479
+ "chat.completion.chunk",
480
+ "chat.completion"
481
+ // support for OpenAI-compatible providers such as Perplexity
482
+ ]),
483
+ choices: z2.array(
484
+ z2.object({
485
+ delta: z2.object({
486
+ role: z2.enum(["assistant"]).optional(),
487
+ content: z2.string().nullable().optional(),
488
+ tool_calls: z2.array(
489
+ z2.object({
490
+ index: z2.number(),
491
+ id: z2.string().optional().nullable(),
492
+ type: z2.literal("function").optional(),
493
+ function: z2.object({
494
+ name: z2.string().optional(),
495
+ arguments: z2.string().optional()
496
+ })
497
+ })
498
+ ).optional()
499
+ }),
500
+ logprobs: z2.object({
501
+ content: z2.array(
502
+ z2.object({
503
+ token: z2.string(),
504
+ logprob: z2.number(),
505
+ top_logprobs: z2.array(
506
+ z2.object({
507
+ token: z2.string(),
508
+ logprob: z2.number()
509
+ })
510
+ )
511
+ })
512
+ ).nullable()
513
+ }).nullable().optional(),
514
+ finish_reason: z2.string().nullable().optional(),
515
+ index: z2.number()
516
+ })
517
+ ),
518
+ usage: z2.object({
519
+ prompt_tokens: z2.number(),
520
+ completion_tokens: z2.number()
521
+ }).optional().nullable()
522
+ });
523
+
524
+ // src/openai-completion-language-model.ts
525
+ import {
526
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError3
527
+ } from "@ai-sdk/provider";
528
+ import {
529
+ createEventSourceResponseHandler as createEventSourceResponseHandler2,
530
+ createJsonResponseHandler as createJsonResponseHandler2,
531
+ postJsonToApi as postJsonToApi2
532
+ } from "@ai-sdk/provider-utils";
533
+ import { z as z3 } from "zod";
534
+
535
+ // src/convert-to-openai-completion-prompt.ts
536
+ import {
537
+ InvalidPromptError,
538
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError2
539
+ } from "@ai-sdk/provider";
540
+ function convertToOpenAICompletionPrompt({
541
+ prompt,
542
+ inputFormat,
543
+ user = "user",
544
+ assistant = "assistant"
545
+ }) {
546
+ if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
547
+ return { prompt: prompt[0].content[0].text };
548
+ }
549
+ let text = "";
550
+ if (prompt[0].role === "system") {
551
+ text += `${prompt[0].content}
552
+
553
+ `;
554
+ prompt = prompt.slice(1);
555
+ }
556
+ for (const { role, content } of prompt) {
557
+ switch (role) {
558
+ case "system": {
559
+ throw new InvalidPromptError({
560
+ message: "Unexpected system message in prompt: ${content}",
561
+ prompt
562
+ });
563
+ }
564
+ case "user": {
565
+ const userMessage = content.map((part) => {
566
+ switch (part.type) {
567
+ case "text": {
568
+ return part.text;
569
+ }
570
+ case "image": {
571
+ throw new UnsupportedFunctionalityError2({
572
+ functionality: "images"
573
+ });
574
+ }
575
+ }
576
+ }).join("");
577
+ text += `${user}:
578
+ ${userMessage}
579
+
580
+ `;
581
+ break;
582
+ }
583
+ case "assistant": {
584
+ const assistantMessage = content.map((part) => {
585
+ switch (part.type) {
586
+ case "text": {
587
+ return part.text;
588
+ }
589
+ case "tool-call": {
590
+ throw new UnsupportedFunctionalityError2({
591
+ functionality: "tool-call messages"
592
+ });
593
+ }
594
+ }
595
+ }).join("");
596
+ text += `${assistant}:
597
+ ${assistantMessage}
598
+
599
+ `;
600
+ break;
601
+ }
602
+ case "tool": {
603
+ throw new UnsupportedFunctionalityError2({
604
+ functionality: "tool messages"
605
+ });
606
+ }
607
+ default: {
608
+ const _exhaustiveCheck = role;
609
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
610
+ }
611
+ }
612
+ }
613
+ text += `${assistant}:
614
+ `;
615
+ return {
616
+ prompt: text,
617
+ stopSequences: [`
618
+ ${user}:`]
619
+ };
620
+ }
621
+
622
+ // src/map-openai-completion-logprobs.ts
623
+ function mapOpenAICompletionLogProbs(logprobs) {
624
+ return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => ({
625
+ token,
626
+ logprob: logprobs.token_logprobs[index],
627
+ topLogprobs: logprobs.top_logprobs ? Object.entries(logprobs.top_logprobs[index]).map(
628
+ ([token2, logprob]) => ({
629
+ token: token2,
630
+ logprob
631
+ })
632
+ ) : []
633
+ }));
634
+ }
635
+
636
+ // src/openai-completion-language-model.ts
637
+ var OpenAICompletionLanguageModel = class {
638
+ constructor(modelId, settings, config) {
639
+ this.specificationVersion = "v1";
640
+ this.defaultObjectGenerationMode = void 0;
641
+ this.modelId = modelId;
642
+ this.settings = settings;
643
+ this.config = config;
644
+ }
645
+ get provider() {
646
+ return this.config.provider;
647
+ }
648
+ getArgs({
649
+ mode,
650
+ inputFormat,
651
+ prompt,
652
+ maxTokens,
653
+ temperature,
654
+ topP,
655
+ frequencyPenalty,
656
+ presencePenalty,
657
+ seed
658
+ }) {
659
+ var _a;
660
+ const type = mode.type;
661
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat });
662
+ const baseArgs = {
663
+ // model id:
664
+ model: this.modelId,
665
+ // model specific settings:
666
+ echo: this.settings.echo,
667
+ logit_bias: this.settings.logitBias,
668
+ logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
669
+ suffix: this.settings.suffix,
670
+ user: this.settings.user,
671
+ // standardized settings:
672
+ max_tokens: maxTokens,
673
+ temperature,
674
+ top_p: topP,
675
+ frequency_penalty: frequencyPenalty,
676
+ presence_penalty: presencePenalty,
677
+ seed,
678
+ // prompt:
679
+ prompt: completionPrompt,
680
+ // stop sequences:
681
+ stop: stopSequences
682
+ };
683
+ switch (type) {
684
+ case "regular": {
685
+ if ((_a = mode.tools) == null ? void 0 : _a.length) {
686
+ throw new UnsupportedFunctionalityError3({
687
+ functionality: "tools"
688
+ });
689
+ }
690
+ return baseArgs;
691
+ }
692
+ case "object-json": {
693
+ throw new UnsupportedFunctionalityError3({
694
+ functionality: "object-json mode"
695
+ });
696
+ }
697
+ case "object-tool": {
698
+ throw new UnsupportedFunctionalityError3({
699
+ functionality: "object-tool mode"
700
+ });
701
+ }
702
+ case "object-grammar": {
703
+ throw new UnsupportedFunctionalityError3({
704
+ functionality: "object-grammar mode"
705
+ });
706
+ }
707
+ default: {
708
+ const _exhaustiveCheck = type;
709
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
710
+ }
711
+ }
712
+ }
713
+ async doGenerate(options) {
714
+ const args = this.getArgs(options);
715
+ const { responseHeaders, value: response } = await postJsonToApi2({
716
+ url: `${this.config.baseURL}/completions`,
717
+ headers: this.config.headers(),
718
+ body: args,
719
+ failedResponseHandler: openaiFailedResponseHandler,
720
+ successfulResponseHandler: createJsonResponseHandler2(
721
+ openAICompletionResponseSchema
722
+ ),
723
+ abortSignal: options.abortSignal
724
+ });
725
+ const { prompt: rawPrompt, ...rawSettings } = args;
726
+ const choice = response.choices[0];
727
+ return {
728
+ text: choice.text,
729
+ usage: {
730
+ promptTokens: response.usage.prompt_tokens,
731
+ completionTokens: response.usage.completion_tokens
732
+ },
733
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
734
+ logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
735
+ rawCall: { rawPrompt, rawSettings },
736
+ rawResponse: { headers: responseHeaders },
737
+ warnings: []
738
+ };
739
+ }
740
+ async doStream(options) {
741
+ const args = this.getArgs(options);
742
+ const { responseHeaders, value: response } = await postJsonToApi2({
743
+ url: `${this.config.baseURL}/completions`,
744
+ headers: this.config.headers(),
745
+ body: {
746
+ ...this.getArgs(options),
747
+ stream: true,
748
+ // only include stream_options when in strict compatibility mode:
749
+ stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
750
+ },
751
+ failedResponseHandler: openaiFailedResponseHandler,
752
+ successfulResponseHandler: createEventSourceResponseHandler2(
753
+ openaiCompletionChunkSchema
754
+ ),
755
+ abortSignal: options.abortSignal
756
+ });
757
+ const { prompt: rawPrompt, ...rawSettings } = args;
758
+ let finishReason = "other";
759
+ let usage = {
760
+ promptTokens: Number.NaN,
761
+ completionTokens: Number.NaN
762
+ };
763
+ let logprobs;
764
+ return {
765
+ stream: response.pipeThrough(
766
+ new TransformStream({
767
+ transform(chunk, controller) {
768
+ if (!chunk.success) {
769
+ controller.enqueue({ type: "error", error: chunk.error });
770
+ return;
771
+ }
772
+ const value = chunk.value;
773
+ if (value.usage != null) {
774
+ usage = {
775
+ promptTokens: value.usage.prompt_tokens,
776
+ completionTokens: value.usage.completion_tokens
777
+ };
778
+ }
779
+ const choice = value.choices[0];
780
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
781
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
782
+ }
783
+ if ((choice == null ? void 0 : choice.text) != null) {
784
+ controller.enqueue({
785
+ type: "text-delta",
786
+ textDelta: choice.text
787
+ });
788
+ }
789
+ const mappedLogprobs = mapOpenAICompletionLogProbs(
790
+ choice == null ? void 0 : choice.logprobs
791
+ );
792
+ if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
793
+ if (logprobs === void 0)
794
+ logprobs = [];
795
+ logprobs.push(...mappedLogprobs);
796
+ }
797
+ },
798
+ flush(controller) {
799
+ controller.enqueue({
800
+ type: "finish",
801
+ finishReason,
802
+ logprobs,
803
+ usage
804
+ });
805
+ }
806
+ })
807
+ ),
808
+ rawCall: { rawPrompt, rawSettings },
809
+ rawResponse: { headers: responseHeaders },
810
+ warnings: []
811
+ };
812
+ }
813
+ };
814
+ var openAICompletionResponseSchema = z3.object({
815
+ choices: z3.array(
816
+ z3.object({
817
+ text: z3.string(),
818
+ finish_reason: z3.string(),
819
+ logprobs: z3.object({
820
+ tokens: z3.array(z3.string()),
821
+ token_logprobs: z3.array(z3.number()),
822
+ top_logprobs: z3.array(z3.record(z3.string(), z3.number())).nullable()
823
+ }).nullable().optional()
824
+ })
825
+ ),
826
+ usage: z3.object({
827
+ prompt_tokens: z3.number(),
828
+ completion_tokens: z3.number()
829
+ })
830
+ });
831
+ var openaiCompletionChunkSchema = z3.object({
832
+ object: z3.literal("text_completion"),
833
+ choices: z3.array(
834
+ z3.object({
835
+ text: z3.string(),
836
+ finish_reason: z3.enum(["stop", "length", "content_filter"]).optional().nullable(),
837
+ index: z3.number(),
838
+ logprobs: z3.object({
839
+ tokens: z3.array(z3.string()),
840
+ token_logprobs: z3.array(z3.number()),
841
+ top_logprobs: z3.array(z3.record(z3.string(), z3.number())).nullable()
842
+ }).nullable().optional()
843
+ })
844
+ ),
845
+ usage: z3.object({
846
+ prompt_tokens: z3.number(),
847
+ completion_tokens: z3.number()
848
+ }).optional().nullable()
849
+ });
850
+
851
+ // src/openai-facade.ts
852
+ var OpenAI = class {
853
+ /**
854
+ * Creates a new OpenAI provider instance.
855
+ */
856
+ constructor(options = {}) {
857
+ var _a, _b;
858
+ this.baseURL = (_b = withoutTrailingSlash((_a = options.baseURL) != null ? _a : options.baseUrl)) != null ? _b : "https://api.openai.com/v1";
859
+ this.apiKey = options.apiKey;
860
+ this.organization = options.organization;
861
+ this.project = options.project;
862
+ this.headers = options.headers;
863
+ }
864
+ get baseConfig() {
865
+ return {
866
+ organization: this.organization,
867
+ baseURL: this.baseURL,
868
+ headers: () => ({
869
+ Authorization: `Bearer ${loadApiKey({
870
+ apiKey: this.apiKey,
871
+ environmentVariableName: "OPENAI_API_KEY",
872
+ description: "OpenAI"
873
+ })}`,
874
+ "OpenAI-Organization": this.organization,
875
+ "OpenAI-Project": this.project,
876
+ ...this.headers
877
+ })
878
+ };
879
+ }
880
+ chat(modelId, settings = {}) {
881
+ return new OpenAIChatLanguageModel(modelId, settings, {
882
+ provider: "openai.chat",
883
+ ...this.baseConfig,
884
+ compatibility: "strict"
885
+ });
886
+ }
887
+ completion(modelId, settings = {}) {
888
+ return new OpenAICompletionLanguageModel(modelId, settings, {
889
+ provider: "openai.completion",
890
+ ...this.baseConfig,
891
+ compatibility: "strict"
892
+ });
893
+ }
894
+ };
895
+
896
+ // src/openai-provider.ts
897
+ import { loadApiKey as loadApiKey2, withoutTrailingSlash as withoutTrailingSlash2 } from "@ai-sdk/provider-utils";
898
+
899
+ // src/openai-embedding-model.ts
900
+ import {
901
+ TooManyEmbeddingValuesForCallError
902
+ } from "@ai-sdk/provider";
903
+ import {
904
+ createJsonResponseHandler as createJsonResponseHandler3,
905
+ postJsonToApi as postJsonToApi3
906
+ } from "@ai-sdk/provider-utils";
907
+ import { z as z4 } from "zod";
908
+ var OpenAIEmbeddingModel = class {
909
+ constructor(modelId, settings, config) {
910
+ this.specificationVersion = "v1";
911
+ this.modelId = modelId;
912
+ this.settings = settings;
913
+ this.config = config;
914
+ }
915
+ get provider() {
916
+ return this.config.provider;
917
+ }
918
+ get maxEmbeddingsPerCall() {
919
+ var _a;
920
+ return (_a = this.settings.maxEmbeddingsPerCall) != null ? _a : 2048;
921
+ }
922
+ get supportsParallelCalls() {
923
+ var _a;
924
+ return (_a = this.settings.supportsParallelCalls) != null ? _a : true;
925
+ }
926
+ async doEmbed({
927
+ values,
928
+ abortSignal
929
+ }) {
930
+ if (values.length > this.maxEmbeddingsPerCall) {
931
+ throw new TooManyEmbeddingValuesForCallError({
932
+ provider: this.provider,
933
+ modelId: this.modelId,
934
+ maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
935
+ values
936
+ });
937
+ }
938
+ const { responseHeaders, value: response } = await postJsonToApi3({
939
+ url: `${this.config.baseURL}/embeddings`,
940
+ headers: this.config.headers(),
941
+ body: {
942
+ model: this.modelId,
943
+ input: values,
944
+ encoding_format: "float",
945
+ dimensions: this.settings.dimensions,
946
+ user: this.settings.user
947
+ },
948
+ failedResponseHandler: openaiFailedResponseHandler,
949
+ successfulResponseHandler: createJsonResponseHandler3(
950
+ openaiTextEmbeddingResponseSchema
951
+ ),
952
+ abortSignal
953
+ });
954
+ return {
955
+ embeddings: response.data.map((item) => item.embedding),
956
+ rawResponse: { headers: responseHeaders }
957
+ };
958
+ }
959
+ };
960
+ var openaiTextEmbeddingResponseSchema = z4.object({
961
+ data: z4.array(
962
+ z4.object({
963
+ embedding: z4.array(z4.number())
964
+ })
965
+ )
966
+ });
967
+
968
+ // src/openai-provider.ts
969
+ function createOpenAI(options = {}) {
970
+ var _a, _b, _c;
971
+ const baseURL = (_b = withoutTrailingSlash2((_a = options.baseURL) != null ? _a : options.baseUrl)) != null ? _b : "https://api.openai.com/v1";
972
+ const compatibility = (_c = options.compatibility) != null ? _c : "compatible";
973
+ const getHeaders = () => ({
974
+ Authorization: `Bearer ${loadApiKey2({
975
+ apiKey: options.apiKey,
976
+ environmentVariableName: "OPENAI_API_KEY",
977
+ description: "OpenAI"
978
+ })}`,
979
+ "OpenAI-Organization": options.organization,
980
+ "OpenAI-Project": options.project,
981
+ ...options.headers
982
+ });
983
+ const createChatModel = (modelId, settings = {}) => new OpenAIChatLanguageModel(modelId, settings, {
984
+ provider: "openai.chat",
985
+ baseURL,
986
+ headers: getHeaders,
987
+ compatibility
988
+ });
989
+ const createCompletionModel = (modelId, settings = {}) => new OpenAICompletionLanguageModel(modelId, settings, {
990
+ provider: "openai.completion",
991
+ baseURL,
992
+ headers: getHeaders,
993
+ compatibility
994
+ });
995
+ const createEmbeddingModel = (modelId, settings = {}) => new OpenAIEmbeddingModel(modelId, settings, {
996
+ provider: "openai.embedding",
997
+ baseURL,
998
+ headers: getHeaders
999
+ });
1000
+ const provider = function(modelId, settings) {
1001
+ if (new.target) {
1002
+ throw new Error(
1003
+ "The OpenAI model function cannot be called with the new keyword."
1004
+ );
1005
+ }
1006
+ if (modelId === "gpt-3.5-turbo-instruct") {
1007
+ return createCompletionModel(
1008
+ modelId,
1009
+ settings
1010
+ );
1011
+ }
1012
+ return createChatModel(modelId, settings);
1013
+ };
1014
+ provider.chat = createChatModel;
1015
+ provider.completion = createCompletionModel;
1016
+ provider.embedding = createEmbeddingModel;
1017
+ return provider;
1018
+ }
1019
+ var openai = createOpenAI({
1020
+ compatibility: "strict"
1021
+ // strict for OpenAI API
1022
+ });
1023
+ export {
1024
+ OpenAI,
1025
+ createOpenAI,
1026
+ openai
1027
+ };
1028
+ //# sourceMappingURL=index.mjs.map