@ai-sdk/openai 0.0.25 → 0.0.27

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,971 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/internal/index.ts
21
+ var internal_exports = {};
22
+ __export(internal_exports, {
23
+ OpenAIChatLanguageModel: () => OpenAIChatLanguageModel,
24
+ OpenAICompletionLanguageModel: () => OpenAICompletionLanguageModel,
25
+ OpenAIEmbeddingModel: () => OpenAIEmbeddingModel
26
+ });
27
+ module.exports = __toCommonJS(internal_exports);
28
+
29
+ // src/openai-chat-language-model.ts
30
+ var import_provider = require("@ai-sdk/provider");
31
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
32
+ var import_zod2 = require("zod");
33
+
34
+ // src/convert-to-openai-chat-messages.ts
35
+ var import_provider_utils = require("@ai-sdk/provider-utils");
36
+ function convertToOpenAIChatMessages(prompt) {
37
+ const messages = [];
38
+ for (const { role, content } of prompt) {
39
+ switch (role) {
40
+ case "system": {
41
+ messages.push({ role: "system", content });
42
+ break;
43
+ }
44
+ case "user": {
45
+ if (content.length === 1 && content[0].type === "text") {
46
+ messages.push({ role: "user", content: content[0].text });
47
+ break;
48
+ }
49
+ messages.push({
50
+ role: "user",
51
+ content: content.map((part) => {
52
+ var _a;
53
+ switch (part.type) {
54
+ case "text": {
55
+ return { type: "text", text: part.text };
56
+ }
57
+ case "image": {
58
+ return {
59
+ type: "image_url",
60
+ image_url: {
61
+ url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${(0, import_provider_utils.convertUint8ArrayToBase64)(part.image)}`
62
+ }
63
+ };
64
+ }
65
+ }
66
+ })
67
+ });
68
+ break;
69
+ }
70
+ case "assistant": {
71
+ let text = "";
72
+ const toolCalls = [];
73
+ for (const part of content) {
74
+ switch (part.type) {
75
+ case "text": {
76
+ text += part.text;
77
+ break;
78
+ }
79
+ case "tool-call": {
80
+ toolCalls.push({
81
+ id: part.toolCallId,
82
+ type: "function",
83
+ function: {
84
+ name: part.toolName,
85
+ arguments: JSON.stringify(part.args)
86
+ }
87
+ });
88
+ break;
89
+ }
90
+ default: {
91
+ const _exhaustiveCheck = part;
92
+ throw new Error(`Unsupported part: ${_exhaustiveCheck}`);
93
+ }
94
+ }
95
+ }
96
+ messages.push({
97
+ role: "assistant",
98
+ content: text,
99
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
100
+ });
101
+ break;
102
+ }
103
+ case "tool": {
104
+ for (const toolResponse of content) {
105
+ messages.push({
106
+ role: "tool",
107
+ tool_call_id: toolResponse.toolCallId,
108
+ content: JSON.stringify(toolResponse.result)
109
+ });
110
+ }
111
+ break;
112
+ }
113
+ default: {
114
+ const _exhaustiveCheck = role;
115
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
116
+ }
117
+ }
118
+ }
119
+ return messages;
120
+ }
121
+
122
+ // src/map-openai-chat-logprobs.ts
123
+ function mapOpenAIChatLogProbsOutput(logprobs) {
124
+ var _a, _b;
125
+ return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
126
+ token,
127
+ logprob,
128
+ topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
129
+ token: token2,
130
+ logprob: logprob2
131
+ })) : []
132
+ }))) != null ? _b : void 0;
133
+ }
134
+
135
+ // src/map-openai-finish-reason.ts
136
+ function mapOpenAIFinishReason(finishReason) {
137
+ switch (finishReason) {
138
+ case "stop":
139
+ return "stop";
140
+ case "length":
141
+ return "length";
142
+ case "content_filter":
143
+ return "content-filter";
144
+ case "function_call":
145
+ case "tool_calls":
146
+ return "tool-calls";
147
+ default:
148
+ return "unknown";
149
+ }
150
+ }
151
+
152
+ // src/openai-error.ts
153
+ var import_zod = require("zod");
154
+ var import_provider_utils2 = require("@ai-sdk/provider-utils");
155
+ var openAIErrorDataSchema = import_zod.z.object({
156
+ error: import_zod.z.object({
157
+ message: import_zod.z.string(),
158
+ type: import_zod.z.string(),
159
+ param: import_zod.z.any().nullable(),
160
+ code: import_zod.z.string().nullable()
161
+ })
162
+ });
163
+ var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
164
+ errorSchema: openAIErrorDataSchema,
165
+ errorToMessage: (data) => data.error.message
166
+ });
167
+
168
+ // src/openai-chat-language-model.ts
169
+ var OpenAIChatLanguageModel = class {
170
+ constructor(modelId, settings, config) {
171
+ this.specificationVersion = "v1";
172
+ this.defaultObjectGenerationMode = "tool";
173
+ this.modelId = modelId;
174
+ this.settings = settings;
175
+ this.config = config;
176
+ }
177
+ get provider() {
178
+ return this.config.provider;
179
+ }
180
+ getArgs({
181
+ mode,
182
+ prompt,
183
+ maxTokens,
184
+ temperature,
185
+ topP,
186
+ frequencyPenalty,
187
+ presencePenalty,
188
+ seed
189
+ }) {
190
+ const type = mode.type;
191
+ const baseArgs = {
192
+ // model id:
193
+ model: this.modelId,
194
+ // model specific settings:
195
+ logit_bias: this.settings.logitBias,
196
+ logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number",
197
+ top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
198
+ user: this.settings.user,
199
+ // standardized settings:
200
+ max_tokens: maxTokens,
201
+ temperature,
202
+ top_p: topP,
203
+ frequency_penalty: frequencyPenalty,
204
+ presence_penalty: presencePenalty,
205
+ seed,
206
+ // messages:
207
+ messages: convertToOpenAIChatMessages(prompt)
208
+ };
209
+ switch (type) {
210
+ case "regular": {
211
+ return { ...baseArgs, ...prepareToolsAndToolChoice(mode) };
212
+ }
213
+ case "object-json": {
214
+ return {
215
+ ...baseArgs,
216
+ response_format: { type: "json_object" }
217
+ };
218
+ }
219
+ case "object-tool": {
220
+ return {
221
+ ...baseArgs,
222
+ tool_choice: { type: "function", function: { name: mode.tool.name } },
223
+ tools: [
224
+ {
225
+ type: "function",
226
+ function: {
227
+ name: mode.tool.name,
228
+ description: mode.tool.description,
229
+ parameters: mode.tool.parameters
230
+ }
231
+ }
232
+ ]
233
+ };
234
+ }
235
+ case "object-grammar": {
236
+ throw new import_provider.UnsupportedFunctionalityError({
237
+ functionality: "object-grammar mode"
238
+ });
239
+ }
240
+ default: {
241
+ const _exhaustiveCheck = type;
242
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
243
+ }
244
+ }
245
+ }
246
+ async doGenerate(options) {
247
+ var _a, _b;
248
+ const args = this.getArgs(options);
249
+ const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
250
+ url: this.config.url({
251
+ path: "/chat/completions",
252
+ modelId: this.modelId
253
+ }),
254
+ headers: this.config.headers(),
255
+ body: args,
256
+ failedResponseHandler: openaiFailedResponseHandler,
257
+ successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
258
+ openAIChatResponseSchema
259
+ ),
260
+ abortSignal: options.abortSignal
261
+ });
262
+ const { messages: rawPrompt, ...rawSettings } = args;
263
+ const choice = response.choices[0];
264
+ return {
265
+ text: (_a = choice.message.content) != null ? _a : void 0,
266
+ toolCalls: (_b = choice.message.tool_calls) == null ? void 0 : _b.map((toolCall) => {
267
+ var _a2;
268
+ return {
269
+ toolCallType: "function",
270
+ toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils3.generateId)(),
271
+ toolName: toolCall.function.name,
272
+ args: toolCall.function.arguments
273
+ };
274
+ }),
275
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
276
+ usage: {
277
+ promptTokens: response.usage.prompt_tokens,
278
+ completionTokens: response.usage.completion_tokens
279
+ },
280
+ rawCall: { rawPrompt, rawSettings },
281
+ rawResponse: { headers: responseHeaders },
282
+ warnings: [],
283
+ logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs)
284
+ };
285
+ }
286
+ async doStream(options) {
287
+ const args = this.getArgs(options);
288
+ const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
289
+ url: this.config.url({
290
+ path: "/chat/completions",
291
+ modelId: this.modelId
292
+ }),
293
+ headers: this.config.headers(),
294
+ body: {
295
+ ...args,
296
+ stream: true,
297
+ // only include stream_options when in strict compatibility mode:
298
+ stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
299
+ },
300
+ failedResponseHandler: openaiFailedResponseHandler,
301
+ successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(
302
+ openaiChatChunkSchema
303
+ ),
304
+ abortSignal: options.abortSignal
305
+ });
306
+ const { messages: rawPrompt, ...rawSettings } = args;
307
+ const toolCalls = [];
308
+ let finishReason = "other";
309
+ let usage = {
310
+ promptTokens: Number.NaN,
311
+ completionTokens: Number.NaN
312
+ };
313
+ let logprobs;
314
+ return {
315
+ stream: response.pipeThrough(
316
+ new TransformStream({
317
+ transform(chunk, controller) {
318
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
319
+ if (!chunk.success) {
320
+ finishReason = "error";
321
+ controller.enqueue({ type: "error", error: chunk.error });
322
+ return;
323
+ }
324
+ const value = chunk.value;
325
+ if ("error" in value) {
326
+ finishReason = "error";
327
+ controller.enqueue({ type: "error", error: value.error });
328
+ return;
329
+ }
330
+ if (value.usage != null) {
331
+ usage = {
332
+ promptTokens: value.usage.prompt_tokens,
333
+ completionTokens: value.usage.completion_tokens
334
+ };
335
+ }
336
+ const choice = value.choices[0];
337
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
338
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
339
+ }
340
+ if ((choice == null ? void 0 : choice.delta) == null) {
341
+ return;
342
+ }
343
+ const delta = choice.delta;
344
+ if (delta.content != null) {
345
+ controller.enqueue({
346
+ type: "text-delta",
347
+ textDelta: delta.content
348
+ });
349
+ }
350
+ const mappedLogprobs = mapOpenAIChatLogProbsOutput(
351
+ choice == null ? void 0 : choice.logprobs
352
+ );
353
+ if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
354
+ if (logprobs === void 0)
355
+ logprobs = [];
356
+ logprobs.push(...mappedLogprobs);
357
+ }
358
+ if (delta.tool_calls != null) {
359
+ for (const toolCallDelta of delta.tool_calls) {
360
+ const index = toolCallDelta.index;
361
+ if (toolCalls[index] == null) {
362
+ if (toolCallDelta.type !== "function") {
363
+ throw new import_provider.InvalidResponseDataError({
364
+ data: toolCallDelta,
365
+ message: `Expected 'function' type.`
366
+ });
367
+ }
368
+ if (toolCallDelta.id == null) {
369
+ throw new import_provider.InvalidResponseDataError({
370
+ data: toolCallDelta,
371
+ message: `Expected 'id' to be a string.`
372
+ });
373
+ }
374
+ if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
375
+ throw new import_provider.InvalidResponseDataError({
376
+ data: toolCallDelta,
377
+ message: `Expected 'function.name' to be a string.`
378
+ });
379
+ }
380
+ toolCalls[index] = {
381
+ id: toolCallDelta.id,
382
+ type: "function",
383
+ function: {
384
+ name: toolCallDelta.function.name,
385
+ arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
386
+ }
387
+ };
388
+ const toolCall2 = toolCalls[index];
389
+ if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall2.function.arguments)) {
390
+ controller.enqueue({
391
+ type: "tool-call-delta",
392
+ toolCallType: "function",
393
+ toolCallId: toolCall2.id,
394
+ toolName: toolCall2.function.name,
395
+ argsTextDelta: toolCall2.function.arguments
396
+ });
397
+ controller.enqueue({
398
+ type: "tool-call",
399
+ toolCallType: "function",
400
+ toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils3.generateId)(),
401
+ toolName: toolCall2.function.name,
402
+ args: toolCall2.function.arguments
403
+ });
404
+ }
405
+ continue;
406
+ }
407
+ const toolCall = toolCalls[index];
408
+ if (((_f = toolCallDelta.function) == null ? void 0 : _f.arguments) != null) {
409
+ toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
410
+ }
411
+ controller.enqueue({
412
+ type: "tool-call-delta",
413
+ toolCallType: "function",
414
+ toolCallId: toolCall.id,
415
+ toolName: toolCall.function.name,
416
+ argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
417
+ });
418
+ if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
419
+ controller.enqueue({
420
+ type: "tool-call",
421
+ toolCallType: "function",
422
+ toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils3.generateId)(),
423
+ toolName: toolCall.function.name,
424
+ args: toolCall.function.arguments
425
+ });
426
+ }
427
+ }
428
+ }
429
+ },
430
+ flush(controller) {
431
+ controller.enqueue({
432
+ type: "finish",
433
+ finishReason,
434
+ logprobs,
435
+ usage
436
+ });
437
+ }
438
+ })
439
+ ),
440
+ rawCall: { rawPrompt, rawSettings },
441
+ rawResponse: { headers: responseHeaders },
442
+ warnings: []
443
+ };
444
+ }
445
+ };
446
+ var openAIChatResponseSchema = import_zod2.z.object({
447
+ choices: import_zod2.z.array(
448
+ import_zod2.z.object({
449
+ message: import_zod2.z.object({
450
+ role: import_zod2.z.literal("assistant"),
451
+ content: import_zod2.z.string().nullable().optional(),
452
+ tool_calls: import_zod2.z.array(
453
+ import_zod2.z.object({
454
+ id: import_zod2.z.string().optional().nullable(),
455
+ type: import_zod2.z.literal("function"),
456
+ function: import_zod2.z.object({
457
+ name: import_zod2.z.string(),
458
+ arguments: import_zod2.z.string()
459
+ })
460
+ })
461
+ ).optional()
462
+ }),
463
+ index: import_zod2.z.number(),
464
+ logprobs: import_zod2.z.object({
465
+ content: import_zod2.z.array(
466
+ import_zod2.z.object({
467
+ token: import_zod2.z.string(),
468
+ logprob: import_zod2.z.number(),
469
+ top_logprobs: import_zod2.z.array(
470
+ import_zod2.z.object({
471
+ token: import_zod2.z.string(),
472
+ logprob: import_zod2.z.number()
473
+ })
474
+ )
475
+ })
476
+ ).nullable()
477
+ }).nullable().optional(),
478
+ finish_reason: import_zod2.z.string().optional().nullable()
479
+ })
480
+ ),
481
+ usage: import_zod2.z.object({
482
+ prompt_tokens: import_zod2.z.number(),
483
+ completion_tokens: import_zod2.z.number()
484
+ })
485
+ });
486
+ var openaiChatChunkSchema = import_zod2.z.union([
487
+ import_zod2.z.object({
488
+ choices: import_zod2.z.array(
489
+ import_zod2.z.object({
490
+ delta: import_zod2.z.object({
491
+ role: import_zod2.z.enum(["assistant"]).optional(),
492
+ content: import_zod2.z.string().nullish(),
493
+ tool_calls: import_zod2.z.array(
494
+ import_zod2.z.object({
495
+ index: import_zod2.z.number(),
496
+ id: import_zod2.z.string().nullish(),
497
+ type: import_zod2.z.literal("function").optional(),
498
+ function: import_zod2.z.object({
499
+ name: import_zod2.z.string().nullish(),
500
+ arguments: import_zod2.z.string().nullish()
501
+ })
502
+ })
503
+ ).nullish()
504
+ }).nullish(),
505
+ logprobs: import_zod2.z.object({
506
+ content: import_zod2.z.array(
507
+ import_zod2.z.object({
508
+ token: import_zod2.z.string(),
509
+ logprob: import_zod2.z.number(),
510
+ top_logprobs: import_zod2.z.array(
511
+ import_zod2.z.object({
512
+ token: import_zod2.z.string(),
513
+ logprob: import_zod2.z.number()
514
+ })
515
+ )
516
+ })
517
+ ).nullable()
518
+ }).nullish(),
519
+ finish_reason: import_zod2.z.string().nullable().optional(),
520
+ index: import_zod2.z.number()
521
+ })
522
+ ),
523
+ usage: import_zod2.z.object({
524
+ prompt_tokens: import_zod2.z.number(),
525
+ completion_tokens: import_zod2.z.number()
526
+ }).nullish()
527
+ }),
528
+ openAIErrorDataSchema
529
+ ]);
530
+ function prepareToolsAndToolChoice(mode) {
531
+ var _a;
532
+ const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
533
+ if (tools == null) {
534
+ return { tools: void 0, tool_choice: void 0 };
535
+ }
536
+ const mappedTools = tools.map((tool) => ({
537
+ type: "function",
538
+ function: {
539
+ name: tool.name,
540
+ description: tool.description,
541
+ parameters: tool.parameters
542
+ }
543
+ }));
544
+ const toolChoice = mode.toolChoice;
545
+ if (toolChoice == null) {
546
+ return { tools: mappedTools, tool_choice: void 0 };
547
+ }
548
+ const type = toolChoice.type;
549
+ switch (type) {
550
+ case "auto":
551
+ case "none":
552
+ case "required":
553
+ return { tools: mappedTools, tool_choice: type };
554
+ case "tool":
555
+ return {
556
+ tools: mappedTools,
557
+ tool_choice: {
558
+ type: "function",
559
+ function: {
560
+ name: toolChoice.toolName
561
+ }
562
+ }
563
+ };
564
+ default: {
565
+ const _exhaustiveCheck = type;
566
+ throw new Error(`Unsupported tool choice type: ${_exhaustiveCheck}`);
567
+ }
568
+ }
569
+ }
570
+
571
+ // src/openai-completion-language-model.ts
572
+ var import_provider3 = require("@ai-sdk/provider");
573
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
574
+ var import_zod3 = require("zod");
575
+
576
+ // src/convert-to-openai-completion-prompt.ts
577
+ var import_provider2 = require("@ai-sdk/provider");
578
+ function convertToOpenAICompletionPrompt({
579
+ prompt,
580
+ inputFormat,
581
+ user = "user",
582
+ assistant = "assistant"
583
+ }) {
584
+ if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
585
+ return { prompt: prompt[0].content[0].text };
586
+ }
587
+ let text = "";
588
+ if (prompt[0].role === "system") {
589
+ text += `${prompt[0].content}
590
+
591
+ `;
592
+ prompt = prompt.slice(1);
593
+ }
594
+ for (const { role, content } of prompt) {
595
+ switch (role) {
596
+ case "system": {
597
+ throw new import_provider2.InvalidPromptError({
598
+ message: "Unexpected system message in prompt: ${content}",
599
+ prompt
600
+ });
601
+ }
602
+ case "user": {
603
+ const userMessage = content.map((part) => {
604
+ switch (part.type) {
605
+ case "text": {
606
+ return part.text;
607
+ }
608
+ case "image": {
609
+ throw new import_provider2.UnsupportedFunctionalityError({
610
+ functionality: "images"
611
+ });
612
+ }
613
+ }
614
+ }).join("");
615
+ text += `${user}:
616
+ ${userMessage}
617
+
618
+ `;
619
+ break;
620
+ }
621
+ case "assistant": {
622
+ const assistantMessage = content.map((part) => {
623
+ switch (part.type) {
624
+ case "text": {
625
+ return part.text;
626
+ }
627
+ case "tool-call": {
628
+ throw new import_provider2.UnsupportedFunctionalityError({
629
+ functionality: "tool-call messages"
630
+ });
631
+ }
632
+ }
633
+ }).join("");
634
+ text += `${assistant}:
635
+ ${assistantMessage}
636
+
637
+ `;
638
+ break;
639
+ }
640
+ case "tool": {
641
+ throw new import_provider2.UnsupportedFunctionalityError({
642
+ functionality: "tool messages"
643
+ });
644
+ }
645
+ default: {
646
+ const _exhaustiveCheck = role;
647
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
648
+ }
649
+ }
650
+ }
651
+ text += `${assistant}:
652
+ `;
653
+ return {
654
+ prompt: text,
655
+ stopSequences: [`
656
+ ${user}:`]
657
+ };
658
+ }
659
+
660
+ // src/map-openai-completion-logprobs.ts
661
+ function mapOpenAICompletionLogProbs(logprobs) {
662
+ return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => ({
663
+ token,
664
+ logprob: logprobs.token_logprobs[index],
665
+ topLogprobs: logprobs.top_logprobs ? Object.entries(logprobs.top_logprobs[index]).map(
666
+ ([token2, logprob]) => ({
667
+ token: token2,
668
+ logprob
669
+ })
670
+ ) : []
671
+ }));
672
+ }
673
+
674
+ // src/openai-completion-language-model.ts
675
+ var OpenAICompletionLanguageModel = class {
676
+ constructor(modelId, settings, config) {
677
+ this.specificationVersion = "v1";
678
+ this.defaultObjectGenerationMode = void 0;
679
+ this.modelId = modelId;
680
+ this.settings = settings;
681
+ this.config = config;
682
+ }
683
+ get provider() {
684
+ return this.config.provider;
685
+ }
686
+ getArgs({
687
+ mode,
688
+ inputFormat,
689
+ prompt,
690
+ maxTokens,
691
+ temperature,
692
+ topP,
693
+ frequencyPenalty,
694
+ presencePenalty,
695
+ seed
696
+ }) {
697
+ var _a;
698
+ const type = mode.type;
699
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat });
700
+ const baseArgs = {
701
+ // model id:
702
+ model: this.modelId,
703
+ // model specific settings:
704
+ echo: this.settings.echo,
705
+ logit_bias: this.settings.logitBias,
706
+ logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
707
+ suffix: this.settings.suffix,
708
+ user: this.settings.user,
709
+ // standardized settings:
710
+ max_tokens: maxTokens,
711
+ temperature,
712
+ top_p: topP,
713
+ frequency_penalty: frequencyPenalty,
714
+ presence_penalty: presencePenalty,
715
+ seed,
716
+ // prompt:
717
+ prompt: completionPrompt,
718
+ // stop sequences:
719
+ stop: stopSequences
720
+ };
721
+ switch (type) {
722
+ case "regular": {
723
+ if ((_a = mode.tools) == null ? void 0 : _a.length) {
724
+ throw new import_provider3.UnsupportedFunctionalityError({
725
+ functionality: "tools"
726
+ });
727
+ }
728
+ if (mode.toolChoice) {
729
+ throw new import_provider3.UnsupportedFunctionalityError({
730
+ functionality: "toolChoice"
731
+ });
732
+ }
733
+ return baseArgs;
734
+ }
735
+ case "object-json": {
736
+ throw new import_provider3.UnsupportedFunctionalityError({
737
+ functionality: "object-json mode"
738
+ });
739
+ }
740
+ case "object-tool": {
741
+ throw new import_provider3.UnsupportedFunctionalityError({
742
+ functionality: "object-tool mode"
743
+ });
744
+ }
745
+ case "object-grammar": {
746
+ throw new import_provider3.UnsupportedFunctionalityError({
747
+ functionality: "object-grammar mode"
748
+ });
749
+ }
750
+ default: {
751
+ const _exhaustiveCheck = type;
752
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
753
+ }
754
+ }
755
+ }
756
+ async doGenerate(options) {
757
+ const args = this.getArgs(options);
758
+ const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
759
+ url: `${this.config.baseURL}/completions`,
760
+ headers: this.config.headers(),
761
+ body: args,
762
+ failedResponseHandler: openaiFailedResponseHandler,
763
+ successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
764
+ openAICompletionResponseSchema
765
+ ),
766
+ abortSignal: options.abortSignal
767
+ });
768
+ const { prompt: rawPrompt, ...rawSettings } = args;
769
+ const choice = response.choices[0];
770
+ return {
771
+ text: choice.text,
772
+ usage: {
773
+ promptTokens: response.usage.prompt_tokens,
774
+ completionTokens: response.usage.completion_tokens
775
+ },
776
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
777
+ logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
778
+ rawCall: { rawPrompt, rawSettings },
779
+ rawResponse: { headers: responseHeaders },
780
+ warnings: []
781
+ };
782
+ }
783
+ async doStream(options) {
784
+ const args = this.getArgs(options);
785
+ const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
786
+ url: `${this.config.baseURL}/completions`,
787
+ headers: this.config.headers(),
788
+ body: {
789
+ ...this.getArgs(options),
790
+ stream: true,
791
+ // only include stream_options when in strict compatibility mode:
792
+ stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
793
+ },
794
+ failedResponseHandler: openaiFailedResponseHandler,
795
+ successfulResponseHandler: (0, import_provider_utils4.createEventSourceResponseHandler)(
796
+ openaiCompletionChunkSchema
797
+ ),
798
+ abortSignal: options.abortSignal
799
+ });
800
+ const { prompt: rawPrompt, ...rawSettings } = args;
801
+ let finishReason = "other";
802
+ let usage = {
803
+ promptTokens: Number.NaN,
804
+ completionTokens: Number.NaN
805
+ };
806
+ let logprobs;
807
+ return {
808
+ stream: response.pipeThrough(
809
+ new TransformStream({
810
+ transform(chunk, controller) {
811
+ if (!chunk.success) {
812
+ finishReason = "error";
813
+ controller.enqueue({ type: "error", error: chunk.error });
814
+ return;
815
+ }
816
+ const value = chunk.value;
817
+ if ("error" in value) {
818
+ finishReason = "error";
819
+ controller.enqueue({ type: "error", error: value.error });
820
+ return;
821
+ }
822
+ if (value.usage != null) {
823
+ usage = {
824
+ promptTokens: value.usage.prompt_tokens,
825
+ completionTokens: value.usage.completion_tokens
826
+ };
827
+ }
828
+ const choice = value.choices[0];
829
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
830
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
831
+ }
832
+ if ((choice == null ? void 0 : choice.text) != null) {
833
+ controller.enqueue({
834
+ type: "text-delta",
835
+ textDelta: choice.text
836
+ });
837
+ }
838
+ const mappedLogprobs = mapOpenAICompletionLogProbs(
839
+ choice == null ? void 0 : choice.logprobs
840
+ );
841
+ if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
842
+ if (logprobs === void 0)
843
+ logprobs = [];
844
+ logprobs.push(...mappedLogprobs);
845
+ }
846
+ },
847
+ flush(controller) {
848
+ controller.enqueue({
849
+ type: "finish",
850
+ finishReason,
851
+ logprobs,
852
+ usage
853
+ });
854
+ }
855
+ })
856
+ ),
857
+ rawCall: { rawPrompt, rawSettings },
858
+ rawResponse: { headers: responseHeaders },
859
+ warnings: []
860
+ };
861
+ }
862
+ };
863
+ var openAICompletionResponseSchema = import_zod3.z.object({
864
+ choices: import_zod3.z.array(
865
+ import_zod3.z.object({
866
+ text: import_zod3.z.string(),
867
+ finish_reason: import_zod3.z.string(),
868
+ logprobs: import_zod3.z.object({
869
+ tokens: import_zod3.z.array(import_zod3.z.string()),
870
+ token_logprobs: import_zod3.z.array(import_zod3.z.number()),
871
+ top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
872
+ }).nullable().optional()
873
+ })
874
+ ),
875
+ usage: import_zod3.z.object({
876
+ prompt_tokens: import_zod3.z.number(),
877
+ completion_tokens: import_zod3.z.number()
878
+ })
879
+ });
880
+ var openaiCompletionChunkSchema = import_zod3.z.union([
881
+ import_zod3.z.object({
882
+ choices: import_zod3.z.array(
883
+ import_zod3.z.object({
884
+ text: import_zod3.z.string(),
885
+ finish_reason: import_zod3.z.string().nullish(),
886
+ index: import_zod3.z.number(),
887
+ logprobs: import_zod3.z.object({
888
+ tokens: import_zod3.z.array(import_zod3.z.string()),
889
+ token_logprobs: import_zod3.z.array(import_zod3.z.number()),
890
+ top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
891
+ }).nullable().optional()
892
+ })
893
+ ),
894
+ usage: import_zod3.z.object({
895
+ prompt_tokens: import_zod3.z.number(),
896
+ completion_tokens: import_zod3.z.number()
897
+ }).optional().nullable()
898
+ }),
899
+ openAIErrorDataSchema
900
+ ]);
901
+
902
+ // src/openai-embedding-model.ts
903
+ var import_provider4 = require("@ai-sdk/provider");
904
+ var import_provider_utils5 = require("@ai-sdk/provider-utils");
905
+ var import_zod4 = require("zod");
906
+ var OpenAIEmbeddingModel = class {
907
+ constructor(modelId, settings, config) {
908
+ this.specificationVersion = "v1";
909
+ this.modelId = modelId;
910
+ this.settings = settings;
911
+ this.config = config;
912
+ }
913
+ get provider() {
914
+ return this.config.provider;
915
+ }
916
+ get maxEmbeddingsPerCall() {
917
+ var _a;
918
+ return (_a = this.settings.maxEmbeddingsPerCall) != null ? _a : 2048;
919
+ }
920
+ get supportsParallelCalls() {
921
+ var _a;
922
+ return (_a = this.settings.supportsParallelCalls) != null ? _a : true;
923
+ }
924
+ async doEmbed({
925
+ values,
926
+ abortSignal
927
+ }) {
928
+ if (values.length > this.maxEmbeddingsPerCall) {
929
+ throw new import_provider4.TooManyEmbeddingValuesForCallError({
930
+ provider: this.provider,
931
+ modelId: this.modelId,
932
+ maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
933
+ values
934
+ });
935
+ }
936
+ const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
937
+ url: `${this.config.baseURL}/embeddings`,
938
+ headers: this.config.headers(),
939
+ body: {
940
+ model: this.modelId,
941
+ input: values,
942
+ encoding_format: "float",
943
+ dimensions: this.settings.dimensions,
944
+ user: this.settings.user
945
+ },
946
+ failedResponseHandler: openaiFailedResponseHandler,
947
+ successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
948
+ openaiTextEmbeddingResponseSchema
949
+ ),
950
+ abortSignal
951
+ });
952
+ return {
953
+ embeddings: response.data.map((item) => item.embedding),
954
+ rawResponse: { headers: responseHeaders }
955
+ };
956
+ }
957
+ };
958
+ var openaiTextEmbeddingResponseSchema = import_zod4.z.object({
959
+ data: import_zod4.z.array(
960
+ import_zod4.z.object({
961
+ embedding: import_zod4.z.array(import_zod4.z.number())
962
+ })
963
+ )
964
+ });
965
+ // Annotate the CommonJS export names for ESM import in node:
966
+ 0 && (module.exports = {
967
+ OpenAIChatLanguageModel,
968
+ OpenAICompletionLanguageModel,
969
+ OpenAIEmbeddingModel
970
+ });
971
+ //# sourceMappingURL=index.js.map