@ai-sdk/openai-compatible 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs ADDED
@@ -0,0 +1,1074 @@
1
+ // src/openai-compatible-provider.ts
2
+ import { withoutTrailingSlash } from "@ai-sdk/provider-utils";
3
+
4
+ // src/openai-compatible-chat-language-model.ts
5
+ import {
6
+ InvalidResponseDataError
7
+ } from "@ai-sdk/provider";
8
+ import {
9
+ combineHeaders,
10
+ createEventSourceResponseHandler,
11
+ createJsonResponseHandler,
12
+ generateId,
13
+ isParsableJson,
14
+ postJsonToApi
15
+ } from "@ai-sdk/provider-utils";
16
+ import { z as z2 } from "zod";
17
+
18
+ // src/convert-to-openai-compatible-chat-messages.ts
19
+ import {
20
+ UnsupportedFunctionalityError
21
+ } from "@ai-sdk/provider";
22
+ import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils";
23
+ function convertToOpenAICompatibleChatMessages(prompt) {
24
+ const messages = [];
25
+ for (const { role, content } of prompt) {
26
+ switch (role) {
27
+ case "system": {
28
+ messages.push({ role: "system", content });
29
+ break;
30
+ }
31
+ case "user": {
32
+ if (content.length === 1 && content[0].type === "text") {
33
+ messages.push({ role: "user", content: content[0].text });
34
+ break;
35
+ }
36
+ messages.push({
37
+ role: "user",
38
+ content: content.map((part) => {
39
+ var _a;
40
+ switch (part.type) {
41
+ case "text": {
42
+ return { type: "text", text: part.text };
43
+ }
44
+ case "image": {
45
+ return {
46
+ type: "image_url",
47
+ image_url: {
48
+ url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase64(part.image)}`
49
+ }
50
+ };
51
+ }
52
+ case "file": {
53
+ throw new UnsupportedFunctionalityError({
54
+ functionality: "File content parts in user messages"
55
+ });
56
+ }
57
+ }
58
+ })
59
+ });
60
+ break;
61
+ }
62
+ case "assistant": {
63
+ let text = "";
64
+ const toolCalls = [];
65
+ for (const part of content) {
66
+ switch (part.type) {
67
+ case "text": {
68
+ text += part.text;
69
+ break;
70
+ }
71
+ case "tool-call": {
72
+ toolCalls.push({
73
+ id: part.toolCallId,
74
+ type: "function",
75
+ function: {
76
+ name: part.toolName,
77
+ arguments: JSON.stringify(part.args)
78
+ }
79
+ });
80
+ break;
81
+ }
82
+ default: {
83
+ const _exhaustiveCheck = part;
84
+ throw new Error(`Unsupported part: ${_exhaustiveCheck}`);
85
+ }
86
+ }
87
+ }
88
+ messages.push({
89
+ role: "assistant",
90
+ content: text,
91
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
92
+ });
93
+ break;
94
+ }
95
+ case "tool": {
96
+ for (const toolResponse of content) {
97
+ messages.push({
98
+ role: "tool",
99
+ tool_call_id: toolResponse.toolCallId,
100
+ content: JSON.stringify(toolResponse.result)
101
+ });
102
+ }
103
+ break;
104
+ }
105
+ default: {
106
+ const _exhaustiveCheck = role;
107
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
108
+ }
109
+ }
110
+ }
111
+ return messages;
112
+ }
113
+
114
+ // src/get-response-metadata.ts
115
+ function getResponseMetadata({
116
+ id,
117
+ model,
118
+ created
119
+ }) {
120
+ return {
121
+ id: id != null ? id : void 0,
122
+ modelId: model != null ? model : void 0,
123
+ timestamp: created != null ? new Date(created * 1e3) : void 0
124
+ };
125
+ }
126
+
127
+ // src/openai-compatible-error.ts
128
+ import { z } from "zod";
129
+ import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
130
+ var openaiCompatibleErrorDataSchema = z.object({
131
+ error: z.object({
132
+ message: z.string(),
133
+ // The additional information below is handled loosely to support
134
+ // OpenAI-compatible providers that have slightly different error
135
+ // responses:
136
+ type: z.string().nullish(),
137
+ param: z.any().nullish(),
138
+ code: z.union([z.string(), z.number()]).nullish()
139
+ })
140
+ });
141
+ var openaiCompatibleFailedResponseHandler = createJsonErrorResponseHandler({
142
+ errorSchema: openaiCompatibleErrorDataSchema,
143
+ errorToMessage: (data) => data.error.message
144
+ });
145
+
146
+ // src/openai-compatible-prepare-tools.ts
147
+ import {
148
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError2
149
+ } from "@ai-sdk/provider";
150
+ function prepareTools({
151
+ mode
152
+ }) {
153
+ var _a;
154
+ const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
155
+ const toolWarnings = [];
156
+ if (tools == null) {
157
+ return { tools: void 0, tool_choice: void 0, toolWarnings };
158
+ }
159
+ const toolChoice = mode.toolChoice;
160
+ const openaiCompatTools = [];
161
+ for (const tool of tools) {
162
+ if (tool.type === "provider-defined") {
163
+ toolWarnings.push({ type: "unsupported-tool", tool });
164
+ } else {
165
+ openaiCompatTools.push({
166
+ type: "function",
167
+ function: {
168
+ name: tool.name,
169
+ description: tool.description,
170
+ parameters: tool.parameters
171
+ }
172
+ });
173
+ }
174
+ }
175
+ if (toolChoice == null) {
176
+ return { tools: openaiCompatTools, tool_choice: void 0, toolWarnings };
177
+ }
178
+ const type = toolChoice.type;
179
+ switch (type) {
180
+ case "auto":
181
+ case "none":
182
+ case "required":
183
+ return { tools: openaiCompatTools, tool_choice: type, toolWarnings };
184
+ case "tool":
185
+ return {
186
+ tools: openaiCompatTools,
187
+ tool_choice: {
188
+ type: "function",
189
+ function: {
190
+ name: toolChoice.toolName
191
+ }
192
+ },
193
+ toolWarnings
194
+ };
195
+ default: {
196
+ const _exhaustiveCheck = type;
197
+ throw new UnsupportedFunctionalityError2({
198
+ functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
199
+ });
200
+ }
201
+ }
202
+ }
203
+
204
+ // src/map-openai-compatible-finish-reason.ts
205
+ function mapOpenAICompatibleFinishReason(finishReason) {
206
+ switch (finishReason) {
207
+ case "stop":
208
+ return "stop";
209
+ case "length":
210
+ return "length";
211
+ case "content_filter":
212
+ return "content-filter";
213
+ case "function_call":
214
+ case "tool_calls":
215
+ return "tool-calls";
216
+ default:
217
+ return "unknown";
218
+ }
219
+ }
220
+
221
+ // src/openai-compatible-chat-language-model.ts
222
+ var OpenAICompatibleChatLanguageModel = class {
223
+ constructor(modelId, settings, config) {
224
+ this.specificationVersion = "v1";
225
+ this.supportsStructuredOutputs = false;
226
+ this.modelId = modelId;
227
+ this.settings = settings;
228
+ this.config = config;
229
+ }
230
+ get defaultObjectGenerationMode() {
231
+ return this.config.defaultObjectGenerationMode;
232
+ }
233
+ get provider() {
234
+ return this.config.provider;
235
+ }
236
+ getArgs({
237
+ mode,
238
+ prompt,
239
+ maxTokens,
240
+ temperature,
241
+ topP,
242
+ topK,
243
+ frequencyPenalty,
244
+ presencePenalty,
245
+ stopSequences,
246
+ responseFormat,
247
+ seed,
248
+ stream
249
+ }) {
250
+ const type = mode.type;
251
+ const warnings = [];
252
+ if (topK != null) {
253
+ warnings.push({
254
+ type: "unsupported-setting",
255
+ setting: "topK"
256
+ });
257
+ }
258
+ if (responseFormat != null && responseFormat.type === "json" && responseFormat.schema != null) {
259
+ warnings.push({
260
+ type: "unsupported-setting",
261
+ setting: "responseFormat",
262
+ details: "JSON response format schema is not supported"
263
+ });
264
+ }
265
+ const baseArgs = {
266
+ // model id:
267
+ model: this.modelId,
268
+ // model specific settings:
269
+ user: this.settings.user,
270
+ // standardized settings:
271
+ max_tokens: maxTokens,
272
+ temperature,
273
+ top_p: topP,
274
+ frequency_penalty: frequencyPenalty,
275
+ presence_penalty: presencePenalty,
276
+ stop: stopSequences,
277
+ seed,
278
+ // response format:
279
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? { type: "json_object" } : void 0,
280
+ // messages:
281
+ messages: convertToOpenAICompatibleChatMessages(prompt)
282
+ };
283
+ switch (type) {
284
+ case "regular": {
285
+ const { tools, tool_choice, toolWarnings } = prepareTools({ mode });
286
+ return {
287
+ args: {
288
+ ...baseArgs,
289
+ tools,
290
+ tool_choice
291
+ },
292
+ warnings: [...warnings, ...toolWarnings]
293
+ };
294
+ }
295
+ case "object-json": {
296
+ return {
297
+ args: {
298
+ ...baseArgs,
299
+ response_format: { type: "json_object" }
300
+ },
301
+ warnings
302
+ };
303
+ }
304
+ case "object-tool": {
305
+ return {
306
+ args: {
307
+ ...baseArgs,
308
+ tool_choice: {
309
+ type: "function",
310
+ function: { name: mode.tool.name }
311
+ },
312
+ tools: [
313
+ {
314
+ type: "function",
315
+ function: {
316
+ name: mode.tool.name,
317
+ description: mode.tool.description,
318
+ parameters: mode.tool.parameters
319
+ }
320
+ }
321
+ ]
322
+ },
323
+ warnings
324
+ };
325
+ }
326
+ default: {
327
+ const _exhaustiveCheck = type;
328
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
329
+ }
330
+ }
331
+ }
332
+ async doGenerate(options) {
333
+ var _a, _b, _c, _d, _e, _f;
334
+ const { args, warnings } = this.getArgs({ ...options, stream: false });
335
+ const body = JSON.stringify(args);
336
+ const { responseHeaders, value: response } = await postJsonToApi({
337
+ url: this.config.url({
338
+ path: "/chat/completions",
339
+ modelId: this.modelId
340
+ }),
341
+ headers: combineHeaders(this.config.headers(), options.headers),
342
+ body: args,
343
+ failedResponseHandler: openaiCompatibleFailedResponseHandler,
344
+ successfulResponseHandler: createJsonResponseHandler(
345
+ OpenAICompatibleChatResponseSchema
346
+ ),
347
+ abortSignal: options.abortSignal,
348
+ fetch: this.config.fetch
349
+ });
350
+ const { messages: rawPrompt, ...rawSettings } = args;
351
+ const choice = response.choices[0];
352
+ return {
353
+ text: (_a = choice.message.content) != null ? _a : void 0,
354
+ toolCalls: (_b = choice.message.tool_calls) == null ? void 0 : _b.map((toolCall) => {
355
+ var _a2;
356
+ return {
357
+ toolCallType: "function",
358
+ toolCallId: (_a2 = toolCall.id) != null ? _a2 : generateId(),
359
+ toolName: toolCall.function.name,
360
+ args: toolCall.function.arguments
361
+ };
362
+ }),
363
+ finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
364
+ usage: {
365
+ promptTokens: (_d = (_c = response.usage) == null ? void 0 : _c.prompt_tokens) != null ? _d : NaN,
366
+ completionTokens: (_f = (_e = response.usage) == null ? void 0 : _e.completion_tokens) != null ? _f : NaN
367
+ },
368
+ rawCall: { rawPrompt, rawSettings },
369
+ rawResponse: { headers: responseHeaders },
370
+ response: getResponseMetadata(response),
371
+ warnings,
372
+ request: { body }
373
+ };
374
+ }
375
+ async doStream(options) {
376
+ const { args, warnings } = this.getArgs({ ...options, stream: true });
377
+ const body = JSON.stringify({ ...args, stream: true });
378
+ const { responseHeaders, value: response } = await postJsonToApi({
379
+ url: this.config.url({
380
+ path: "/chat/completions",
381
+ modelId: this.modelId
382
+ }),
383
+ headers: combineHeaders(this.config.headers(), options.headers),
384
+ body: {
385
+ ...args,
386
+ stream: true
387
+ },
388
+ failedResponseHandler: openaiCompatibleFailedResponseHandler,
389
+ successfulResponseHandler: createEventSourceResponseHandler(
390
+ OpenAICompatibleChatChunkSchema
391
+ ),
392
+ abortSignal: options.abortSignal,
393
+ fetch: this.config.fetch
394
+ });
395
+ const { messages: rawPrompt, ...rawSettings } = args;
396
+ const toolCalls = [];
397
+ let finishReason = "unknown";
398
+ let usage = {
399
+ promptTokens: void 0,
400
+ completionTokens: void 0
401
+ };
402
+ let isFirstChunk = true;
403
+ let providerMetadata;
404
+ return {
405
+ stream: response.pipeThrough(
406
+ new TransformStream({
407
+ transform(chunk, controller) {
408
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
409
+ if (!chunk.success) {
410
+ finishReason = "error";
411
+ controller.enqueue({ type: "error", error: chunk.error });
412
+ return;
413
+ }
414
+ const value = chunk.value;
415
+ if ("error" in value) {
416
+ finishReason = "error";
417
+ controller.enqueue({ type: "error", error: value.error.message });
418
+ return;
419
+ }
420
+ if (isFirstChunk) {
421
+ isFirstChunk = false;
422
+ controller.enqueue({
423
+ type: "response-metadata",
424
+ ...getResponseMetadata(value)
425
+ });
426
+ }
427
+ if (value.usage != null) {
428
+ usage = {
429
+ promptTokens: (_a = value.usage.prompt_tokens) != null ? _a : void 0,
430
+ completionTokens: (_b = value.usage.completion_tokens) != null ? _b : void 0
431
+ };
432
+ }
433
+ const choice = value.choices[0];
434
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
435
+ finishReason = mapOpenAICompatibleFinishReason(
436
+ choice.finish_reason
437
+ );
438
+ }
439
+ if ((choice == null ? void 0 : choice.delta) == null) {
440
+ return;
441
+ }
442
+ const delta = choice.delta;
443
+ if (delta.content != null) {
444
+ controller.enqueue({
445
+ type: "text-delta",
446
+ textDelta: delta.content
447
+ });
448
+ }
449
+ if (delta.tool_calls != null) {
450
+ for (const toolCallDelta of delta.tool_calls) {
451
+ const index = toolCallDelta.index;
452
+ if (toolCalls[index] == null) {
453
+ if (toolCallDelta.type !== "function") {
454
+ throw new InvalidResponseDataError({
455
+ data: toolCallDelta,
456
+ message: `Expected 'function' type.`
457
+ });
458
+ }
459
+ if (toolCallDelta.id == null) {
460
+ throw new InvalidResponseDataError({
461
+ data: toolCallDelta,
462
+ message: `Expected 'id' to be a string.`
463
+ });
464
+ }
465
+ if (((_c = toolCallDelta.function) == null ? void 0 : _c.name) == null) {
466
+ throw new InvalidResponseDataError({
467
+ data: toolCallDelta,
468
+ message: `Expected 'function.name' to be a string.`
469
+ });
470
+ }
471
+ toolCalls[index] = {
472
+ id: toolCallDelta.id,
473
+ type: "function",
474
+ function: {
475
+ name: toolCallDelta.function.name,
476
+ arguments: (_d = toolCallDelta.function.arguments) != null ? _d : ""
477
+ }
478
+ };
479
+ const toolCall2 = toolCalls[index];
480
+ if (((_e = toolCall2.function) == null ? void 0 : _e.name) != null && ((_f = toolCall2.function) == null ? void 0 : _f.arguments) != null) {
481
+ if (toolCall2.function.arguments.length > 0) {
482
+ controller.enqueue({
483
+ type: "tool-call-delta",
484
+ toolCallType: "function",
485
+ toolCallId: toolCall2.id,
486
+ toolName: toolCall2.function.name,
487
+ argsTextDelta: toolCall2.function.arguments
488
+ });
489
+ }
490
+ if (isParsableJson(toolCall2.function.arguments)) {
491
+ controller.enqueue({
492
+ type: "tool-call",
493
+ toolCallType: "function",
494
+ toolCallId: (_g = toolCall2.id) != null ? _g : generateId(),
495
+ toolName: toolCall2.function.name,
496
+ args: toolCall2.function.arguments
497
+ });
498
+ }
499
+ }
500
+ continue;
501
+ }
502
+ const toolCall = toolCalls[index];
503
+ if (((_h = toolCallDelta.function) == null ? void 0 : _h.arguments) != null) {
504
+ toolCall.function.arguments += (_j = (_i = toolCallDelta.function) == null ? void 0 : _i.arguments) != null ? _j : "";
505
+ }
506
+ controller.enqueue({
507
+ type: "tool-call-delta",
508
+ toolCallType: "function",
509
+ toolCallId: toolCall.id,
510
+ toolName: toolCall.function.name,
511
+ argsTextDelta: (_k = toolCallDelta.function.arguments) != null ? _k : ""
512
+ });
513
+ if (((_l = toolCall.function) == null ? void 0 : _l.name) != null && ((_m = toolCall.function) == null ? void 0 : _m.arguments) != null && isParsableJson(toolCall.function.arguments)) {
514
+ controller.enqueue({
515
+ type: "tool-call",
516
+ toolCallType: "function",
517
+ toolCallId: (_n = toolCall.id) != null ? _n : generateId(),
518
+ toolName: toolCall.function.name,
519
+ args: toolCall.function.arguments
520
+ });
521
+ }
522
+ }
523
+ }
524
+ },
525
+ flush(controller) {
526
+ var _a, _b;
527
+ controller.enqueue({
528
+ type: "finish",
529
+ finishReason,
530
+ usage: {
531
+ promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
532
+ completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
533
+ },
534
+ ...providerMetadata != null ? { providerMetadata } : {}
535
+ });
536
+ }
537
+ })
538
+ ),
539
+ rawCall: { rawPrompt, rawSettings },
540
+ rawResponse: { headers: responseHeaders },
541
+ warnings,
542
+ request: { body }
543
+ };
544
+ }
545
+ };
546
+ var OpenAICompatibleChatResponseSchema = z2.object({
547
+ id: z2.string().nullish(),
548
+ created: z2.number().nullish(),
549
+ model: z2.string().nullish(),
550
+ choices: z2.array(
551
+ z2.object({
552
+ message: z2.object({
553
+ role: z2.literal("assistant").nullish(),
554
+ content: z2.string().nullish(),
555
+ tool_calls: z2.array(
556
+ z2.object({
557
+ id: z2.string().nullish(),
558
+ type: z2.literal("function"),
559
+ function: z2.object({
560
+ name: z2.string(),
561
+ arguments: z2.string()
562
+ })
563
+ })
564
+ ).nullish()
565
+ }),
566
+ index: z2.number(),
567
+ finish_reason: z2.string().nullish()
568
+ })
569
+ ),
570
+ usage: z2.object({
571
+ prompt_tokens: z2.number().nullish(),
572
+ completion_tokens: z2.number().nullish()
573
+ }).nullish()
574
+ });
575
+ var OpenAICompatibleChatChunkSchema = z2.union([
576
+ z2.object({
577
+ id: z2.string().nullish(),
578
+ created: z2.number().nullish(),
579
+ model: z2.string().nullish(),
580
+ choices: z2.array(
581
+ z2.object({
582
+ delta: z2.object({
583
+ role: z2.enum(["assistant"]).nullish(),
584
+ content: z2.string().nullish(),
585
+ tool_calls: z2.array(
586
+ z2.object({
587
+ index: z2.number(),
588
+ id: z2.string().nullish(),
589
+ type: z2.literal("function").optional(),
590
+ function: z2.object({
591
+ name: z2.string().nullish(),
592
+ arguments: z2.string().nullish()
593
+ })
594
+ })
595
+ ).nullish()
596
+ }).nullish(),
597
+ finish_reason: z2.string().nullable().optional(),
598
+ index: z2.number()
599
+ })
600
+ ),
601
+ usage: z2.object({
602
+ prompt_tokens: z2.number().nullish(),
603
+ completion_tokens: z2.number().nullish()
604
+ }).nullish()
605
+ }),
606
+ openaiCompatibleErrorDataSchema
607
+ ]);
608
+
609
+ // src/openai-compatible-completion-language-model.ts
610
+ import {
611
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError4
612
+ } from "@ai-sdk/provider";
613
+ import {
614
+ combineHeaders as combineHeaders2,
615
+ createEventSourceResponseHandler as createEventSourceResponseHandler2,
616
+ createJsonResponseHandler as createJsonResponseHandler2,
617
+ postJsonToApi as postJsonToApi2
618
+ } from "@ai-sdk/provider-utils";
619
+ import { z as z3 } from "zod";
620
+
621
+ // src/convert-to-openai-compatible-completion-prompt.ts
622
+ import {
623
+ InvalidPromptError,
624
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError3
625
+ } from "@ai-sdk/provider";
626
+ function convertToOpenAICompatibleCompletionPrompt({
627
+ prompt,
628
+ inputFormat,
629
+ user = "user",
630
+ assistant = "assistant"
631
+ }) {
632
+ if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
633
+ return { prompt: prompt[0].content[0].text };
634
+ }
635
+ let text = "";
636
+ if (prompt[0].role === "system") {
637
+ text += `${prompt[0].content}
638
+
639
+ `;
640
+ prompt = prompt.slice(1);
641
+ }
642
+ for (const { role, content } of prompt) {
643
+ switch (role) {
644
+ case "system": {
645
+ throw new InvalidPromptError({
646
+ message: "Unexpected system message in prompt: ${content}",
647
+ prompt
648
+ });
649
+ }
650
+ case "user": {
651
+ const userMessage = content.map((part) => {
652
+ switch (part.type) {
653
+ case "text": {
654
+ return part.text;
655
+ }
656
+ case "image": {
657
+ throw new UnsupportedFunctionalityError3({
658
+ functionality: "images"
659
+ });
660
+ }
661
+ }
662
+ }).join("");
663
+ text += `${user}:
664
+ ${userMessage}
665
+
666
+ `;
667
+ break;
668
+ }
669
+ case "assistant": {
670
+ const assistantMessage = content.map((part) => {
671
+ switch (part.type) {
672
+ case "text": {
673
+ return part.text;
674
+ }
675
+ case "tool-call": {
676
+ throw new UnsupportedFunctionalityError3({
677
+ functionality: "tool-call messages"
678
+ });
679
+ }
680
+ }
681
+ }).join("");
682
+ text += `${assistant}:
683
+ ${assistantMessage}
684
+
685
+ `;
686
+ break;
687
+ }
688
+ case "tool": {
689
+ throw new UnsupportedFunctionalityError3({
690
+ functionality: "tool messages"
691
+ });
692
+ }
693
+ default: {
694
+ const _exhaustiveCheck = role;
695
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
696
+ }
697
+ }
698
+ }
699
+ text += `${assistant}:
700
+ `;
701
+ return {
702
+ prompt: text,
703
+ stopSequences: [`
704
+ ${user}:`]
705
+ };
706
+ }
707
+
708
+ // src/openai-compatible-completion-language-model.ts
709
+ var OpenAICompatibleCompletionLanguageModel = class {
710
+ constructor(modelId, settings, config) {
711
+ this.specificationVersion = "v1";
712
+ this.defaultObjectGenerationMode = void 0;
713
+ this.modelId = modelId;
714
+ this.settings = settings;
715
+ this.config = config;
716
+ }
717
+ get provider() {
718
+ return this.config.provider;
719
+ }
720
+ getArgs({
721
+ mode,
722
+ inputFormat,
723
+ prompt,
724
+ maxTokens,
725
+ temperature,
726
+ topP,
727
+ topK,
728
+ frequencyPenalty,
729
+ presencePenalty,
730
+ stopSequences: userStopSequences,
731
+ responseFormat,
732
+ seed
733
+ }) {
734
+ var _a;
735
+ const type = mode.type;
736
+ const warnings = [];
737
+ if (topK != null) {
738
+ warnings.push({
739
+ type: "unsupported-setting",
740
+ setting: "topK"
741
+ });
742
+ }
743
+ if (responseFormat != null && responseFormat.type !== "text") {
744
+ warnings.push({
745
+ type: "unsupported-setting",
746
+ setting: "responseFormat",
747
+ details: "JSON response format is not supported."
748
+ });
749
+ }
750
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompatibleCompletionPrompt({ prompt, inputFormat });
751
+ const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
752
+ const baseArgs = {
753
+ // model id:
754
+ model: this.modelId,
755
+ // model specific settings:
756
+ echo: this.settings.echo,
757
+ logit_bias: this.settings.logitBias,
758
+ suffix: this.settings.suffix,
759
+ user: this.settings.user,
760
+ // standardized settings:
761
+ max_tokens: maxTokens,
762
+ temperature,
763
+ top_p: topP,
764
+ frequency_penalty: frequencyPenalty,
765
+ presence_penalty: presencePenalty,
766
+ seed,
767
+ // prompt:
768
+ prompt: completionPrompt,
769
+ // stop sequences:
770
+ stop: stop.length > 0 ? stop : void 0
771
+ };
772
+ switch (type) {
773
+ case "regular": {
774
+ if ((_a = mode.tools) == null ? void 0 : _a.length) {
775
+ throw new UnsupportedFunctionalityError4({
776
+ functionality: "tools"
777
+ });
778
+ }
779
+ if (mode.toolChoice) {
780
+ throw new UnsupportedFunctionalityError4({
781
+ functionality: "toolChoice"
782
+ });
783
+ }
784
+ return { args: baseArgs, warnings };
785
+ }
786
+ case "object-json": {
787
+ throw new UnsupportedFunctionalityError4({
788
+ functionality: "object-json mode"
789
+ });
790
+ }
791
+ case "object-tool": {
792
+ throw new UnsupportedFunctionalityError4({
793
+ functionality: "object-tool mode"
794
+ });
795
+ }
796
+ default: {
797
+ const _exhaustiveCheck = type;
798
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
799
+ }
800
+ }
801
+ }
802
+ async doGenerate(options) {
803
+ var _a, _b, _c, _d;
804
+ const { args, warnings } = this.getArgs(options);
805
+ const { responseHeaders, value: response } = await postJsonToApi2({
806
+ url: this.config.url({
807
+ path: "/completions",
808
+ modelId: this.modelId
809
+ }),
810
+ headers: combineHeaders2(this.config.headers(), options.headers),
811
+ body: args,
812
+ failedResponseHandler: openaiCompatibleFailedResponseHandler,
813
+ successfulResponseHandler: createJsonResponseHandler2(
814
+ openaiCompatibleCompletionResponseSchema
815
+ ),
816
+ abortSignal: options.abortSignal,
817
+ fetch: this.config.fetch
818
+ });
819
+ const { prompt: rawPrompt, ...rawSettings } = args;
820
+ const choice = response.choices[0];
821
+ return {
822
+ text: choice.text,
823
+ usage: {
824
+ promptTokens: (_b = (_a = response.usage) == null ? void 0 : _a.prompt_tokens) != null ? _b : NaN,
825
+ completionTokens: (_d = (_c = response.usage) == null ? void 0 : _c.completion_tokens) != null ? _d : NaN
826
+ },
827
+ finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
828
+ rawCall: { rawPrompt, rawSettings },
829
+ rawResponse: { headers: responseHeaders },
830
+ response: getResponseMetadata(response),
831
+ warnings,
832
+ request: { body: JSON.stringify(args) }
833
+ };
834
+ }
835
+ async doStream(options) {
836
+ const { args, warnings } = this.getArgs(options);
837
+ const body = {
838
+ ...args,
839
+ stream: true
840
+ };
841
+ const { responseHeaders, value: response } = await postJsonToApi2({
842
+ url: this.config.url({
843
+ path: "/completions",
844
+ modelId: this.modelId
845
+ }),
846
+ headers: combineHeaders2(this.config.headers(), options.headers),
847
+ body,
848
+ failedResponseHandler: openaiCompatibleFailedResponseHandler,
849
+ successfulResponseHandler: createEventSourceResponseHandler2(
850
+ openaiCompatibleCompletionChunkSchema
851
+ ),
852
+ abortSignal: options.abortSignal,
853
+ fetch: this.config.fetch
854
+ });
855
+ const { prompt: rawPrompt, ...rawSettings } = args;
856
+ let finishReason = "unknown";
857
+ let usage = {
858
+ promptTokens: Number.NaN,
859
+ completionTokens: Number.NaN
860
+ };
861
+ let isFirstChunk = true;
862
+ return {
863
+ stream: response.pipeThrough(
864
+ new TransformStream({
865
+ transform(chunk, controller) {
866
+ if (!chunk.success) {
867
+ finishReason = "error";
868
+ controller.enqueue({ type: "error", error: chunk.error });
869
+ return;
870
+ }
871
+ const value = chunk.value;
872
+ if ("error" in value) {
873
+ finishReason = "error";
874
+ controller.enqueue({ type: "error", error: value.error });
875
+ return;
876
+ }
877
+ if (isFirstChunk) {
878
+ isFirstChunk = false;
879
+ controller.enqueue({
880
+ type: "response-metadata",
881
+ ...getResponseMetadata(value)
882
+ });
883
+ }
884
+ if (value.usage != null) {
885
+ usage = {
886
+ promptTokens: value.usage.prompt_tokens,
887
+ completionTokens: value.usage.completion_tokens
888
+ };
889
+ }
890
+ const choice = value.choices[0];
891
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
892
+ finishReason = mapOpenAICompatibleFinishReason(
893
+ choice.finish_reason
894
+ );
895
+ }
896
+ if ((choice == null ? void 0 : choice.text) != null) {
897
+ controller.enqueue({
898
+ type: "text-delta",
899
+ textDelta: choice.text
900
+ });
901
+ }
902
+ },
903
+ flush(controller) {
904
+ controller.enqueue({
905
+ type: "finish",
906
+ finishReason,
907
+ usage
908
+ });
909
+ }
910
+ })
911
+ ),
912
+ rawCall: { rawPrompt, rawSettings },
913
+ rawResponse: { headers: responseHeaders },
914
+ warnings,
915
+ request: { body: JSON.stringify(body) }
916
+ };
917
+ }
918
+ };
919
+ var openaiCompatibleCompletionResponseSchema = z3.object({
920
+ id: z3.string().nullish(),
921
+ created: z3.number().nullish(),
922
+ model: z3.string().nullish(),
923
+ choices: z3.array(
924
+ z3.object({
925
+ text: z3.string(),
926
+ finish_reason: z3.string()
927
+ })
928
+ ),
929
+ usage: z3.object({
930
+ prompt_tokens: z3.number(),
931
+ completion_tokens: z3.number()
932
+ }).nullish()
933
+ });
934
+ var openaiCompatibleCompletionChunkSchema = z3.union([
935
+ z3.object({
936
+ id: z3.string().nullish(),
937
+ created: z3.number().nullish(),
938
+ model: z3.string().nullish(),
939
+ choices: z3.array(
940
+ z3.object({
941
+ text: z3.string(),
942
+ finish_reason: z3.string().nullish(),
943
+ index: z3.number()
944
+ })
945
+ ),
946
+ usage: z3.object({
947
+ prompt_tokens: z3.number(),
948
+ completion_tokens: z3.number()
949
+ }).nullish()
950
+ }),
951
+ openaiCompatibleErrorDataSchema
952
+ ]);
953
+
954
+ // src/openai-compatible-embedding-model.ts
955
+ import {
956
+ TooManyEmbeddingValuesForCallError
957
+ } from "@ai-sdk/provider";
958
+ import {
959
+ combineHeaders as combineHeaders3,
960
+ createJsonResponseHandler as createJsonResponseHandler3,
961
+ postJsonToApi as postJsonToApi3
962
+ } from "@ai-sdk/provider-utils";
963
+ import { z as z4 } from "zod";
964
+ var OpenAICompatibleEmbeddingModel = class {
965
+ constructor(modelId, settings, config) {
966
+ this.specificationVersion = "v1";
967
+ this.modelId = modelId;
968
+ this.settings = settings;
969
+ this.config = config;
970
+ }
971
+ get provider() {
972
+ return this.config.provider;
973
+ }
974
+ get maxEmbeddingsPerCall() {
975
+ var _a;
976
+ return (_a = this.config.maxEmbeddingsPerCall) != null ? _a : 2048;
977
+ }
978
+ get supportsParallelCalls() {
979
+ var _a;
980
+ return (_a = this.config.supportsParallelCalls) != null ? _a : true;
981
+ }
982
+ async doEmbed({
983
+ values,
984
+ headers,
985
+ abortSignal
986
+ }) {
987
+ if (values.length > this.maxEmbeddingsPerCall) {
988
+ throw new TooManyEmbeddingValuesForCallError({
989
+ provider: this.provider,
990
+ modelId: this.modelId,
991
+ maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
992
+ values
993
+ });
994
+ }
995
+ const { responseHeaders, value: response } = await postJsonToApi3({
996
+ url: this.config.url({
997
+ path: "/embeddings",
998
+ modelId: this.modelId
999
+ }),
1000
+ headers: combineHeaders3(this.config.headers(), headers),
1001
+ body: {
1002
+ model: this.modelId,
1003
+ input: values,
1004
+ encoding_format: "float",
1005
+ dimensions: this.settings.dimensions,
1006
+ user: this.settings.user
1007
+ },
1008
+ failedResponseHandler: openaiCompatibleFailedResponseHandler,
1009
+ successfulResponseHandler: createJsonResponseHandler3(
1010
+ openaiTextEmbeddingResponseSchema
1011
+ ),
1012
+ abortSignal,
1013
+ fetch: this.config.fetch
1014
+ });
1015
+ return {
1016
+ embeddings: response.data.map((item) => item.embedding),
1017
+ usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1018
+ rawResponse: { headers: responseHeaders }
1019
+ };
1020
+ }
1021
+ };
1022
+ var openaiTextEmbeddingResponseSchema = z4.object({
1023
+ data: z4.array(z4.object({ embedding: z4.array(z4.number()) })),
1024
+ usage: z4.object({ prompt_tokens: z4.number() }).nullish()
1025
+ });
1026
+
1027
+ // src/openai-compatible-provider.ts
1028
+ function createOpenAICompatible(options) {
1029
+ if (!options.baseURL) {
1030
+ throw new Error("Base URL is required");
1031
+ }
1032
+ const baseURL = withoutTrailingSlash(options.baseURL);
1033
+ if (!options.name) {
1034
+ throw new Error("Provider name is required");
1035
+ }
1036
+ const providerName = options.name;
1037
+ const getCommonModelConfig = (modelType) => ({
1038
+ provider: `${providerName}.${modelType}`,
1039
+ url: ({ path }) => `${baseURL}${path}`,
1040
+ headers: () => {
1041
+ var _a;
1042
+ return (_a = options.headers) != null ? _a : {};
1043
+ },
1044
+ fetch: options.fetch
1045
+ });
1046
+ const createLanguageModel = (modelId, settings = {}) => createChatModel(modelId, settings);
1047
+ const createChatModel = (modelId, settings = {}) => new OpenAICompatibleChatLanguageModel(modelId, settings, {
1048
+ ...getCommonModelConfig("chat"),
1049
+ defaultObjectGenerationMode: "tool"
1050
+ });
1051
+ const createCompletionModel = (modelId, settings = {}) => new OpenAICompatibleCompletionLanguageModel(
1052
+ modelId,
1053
+ settings,
1054
+ getCommonModelConfig("completion")
1055
+ );
1056
+ const createEmbeddingModel = (modelId, settings = {}) => new OpenAICompatibleEmbeddingModel(
1057
+ modelId,
1058
+ settings,
1059
+ getCommonModelConfig("embedding")
1060
+ );
1061
+ const provider = (modelId, settings) => createLanguageModel(modelId, settings);
1062
+ provider.languageModel = createLanguageModel;
1063
+ provider.chatModel = createChatModel;
1064
+ provider.completionModel = createCompletionModel;
1065
+ provider.textEmbeddingModel = createEmbeddingModel;
1066
+ return provider;
1067
+ }
1068
+ export {
1069
+ OpenAICompatibleChatLanguageModel,
1070
+ OpenAICompatibleCompletionLanguageModel,
1071
+ OpenAICompatibleEmbeddingModel,
1072
+ createOpenAICompatible
1073
+ };
1074
+ //# sourceMappingURL=index.mjs.map