@ai-sdk/openai-compatible 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js ADDED
@@ -0,0 +1,1075 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var src_exports = {};
22
+ __export(src_exports, {
23
+ OpenAICompatibleChatLanguageModel: () => OpenAICompatibleChatLanguageModel,
24
+ OpenAICompatibleCompletionLanguageModel: () => OpenAICompatibleCompletionLanguageModel,
25
+ OpenAICompatibleEmbeddingModel: () => OpenAICompatibleEmbeddingModel,
26
+ createOpenAICompatible: () => createOpenAICompatible
27
+ });
28
+ module.exports = __toCommonJS(src_exports);
29
+
30
+ // src/openai-compatible-provider.ts
31
+ var import_provider_utils6 = require("@ai-sdk/provider-utils");
32
+
33
+ // src/openai-compatible-chat-language-model.ts
34
+ var import_provider3 = require("@ai-sdk/provider");
35
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
36
+ var import_zod2 = require("zod");
37
+
38
+ // src/convert-to-openai-compatible-chat-messages.ts
39
+ var import_provider = require("@ai-sdk/provider");
40
+ var import_provider_utils = require("@ai-sdk/provider-utils");
41
+ function convertToOpenAICompatibleChatMessages(prompt) {
42
+ const messages = [];
43
+ for (const { role, content } of prompt) {
44
+ switch (role) {
45
+ case "system": {
46
+ messages.push({ role: "system", content });
47
+ break;
48
+ }
49
+ case "user": {
50
+ if (content.length === 1 && content[0].type === "text") {
51
+ messages.push({ role: "user", content: content[0].text });
52
+ break;
53
+ }
54
+ messages.push({
55
+ role: "user",
56
+ content: content.map((part) => {
57
+ var _a;
58
+ switch (part.type) {
59
+ case "text": {
60
+ return { type: "text", text: part.text };
61
+ }
62
+ case "image": {
63
+ return {
64
+ type: "image_url",
65
+ image_url: {
66
+ url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${(0, import_provider_utils.convertUint8ArrayToBase64)(part.image)}`
67
+ }
68
+ };
69
+ }
70
+ case "file": {
71
+ throw new import_provider.UnsupportedFunctionalityError({
72
+ functionality: "File content parts in user messages"
73
+ });
74
+ }
75
+ }
76
+ })
77
+ });
78
+ break;
79
+ }
80
+ case "assistant": {
81
+ let text = "";
82
+ const toolCalls = [];
83
+ for (const part of content) {
84
+ switch (part.type) {
85
+ case "text": {
86
+ text += part.text;
87
+ break;
88
+ }
89
+ case "tool-call": {
90
+ toolCalls.push({
91
+ id: part.toolCallId,
92
+ type: "function",
93
+ function: {
94
+ name: part.toolName,
95
+ arguments: JSON.stringify(part.args)
96
+ }
97
+ });
98
+ break;
99
+ }
100
+ default: {
101
+ const _exhaustiveCheck = part;
102
+ throw new Error(`Unsupported part: ${_exhaustiveCheck}`);
103
+ }
104
+ }
105
+ }
106
+ messages.push({
107
+ role: "assistant",
108
+ content: text,
109
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
110
+ });
111
+ break;
112
+ }
113
+ case "tool": {
114
+ for (const toolResponse of content) {
115
+ messages.push({
116
+ role: "tool",
117
+ tool_call_id: toolResponse.toolCallId,
118
+ content: JSON.stringify(toolResponse.result)
119
+ });
120
+ }
121
+ break;
122
+ }
123
+ default: {
124
+ const _exhaustiveCheck = role;
125
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
126
+ }
127
+ }
128
+ }
129
+ return messages;
130
+ }
131
+
132
+ // src/get-response-metadata.ts
133
+ function getResponseMetadata({
134
+ id,
135
+ model,
136
+ created
137
+ }) {
138
+ return {
139
+ id: id != null ? id : void 0,
140
+ modelId: model != null ? model : void 0,
141
+ timestamp: created != null ? new Date(created * 1e3) : void 0
142
+ };
143
+ }
144
+
145
+ // src/openai-compatible-error.ts
146
+ var import_zod = require("zod");
147
+ var import_provider_utils2 = require("@ai-sdk/provider-utils");
148
+ var openaiCompatibleErrorDataSchema = import_zod.z.object({
149
+ error: import_zod.z.object({
150
+ message: import_zod.z.string(),
151
+ // The additional information below is handled loosely to support
152
+ // OpenAI-compatible providers that have slightly different error
153
+ // responses:
154
+ type: import_zod.z.string().nullish(),
155
+ param: import_zod.z.any().nullish(),
156
+ code: import_zod.z.union([import_zod.z.string(), import_zod.z.number()]).nullish()
157
+ })
158
+ });
159
+ var openaiCompatibleFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
160
+ errorSchema: openaiCompatibleErrorDataSchema,
161
+ errorToMessage: (data) => data.error.message
162
+ });
163
+
164
+ // src/openai-compatible-prepare-tools.ts
165
+ var import_provider2 = require("@ai-sdk/provider");
166
+ function prepareTools({
167
+ mode
168
+ }) {
169
+ var _a;
170
+ const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
171
+ const toolWarnings = [];
172
+ if (tools == null) {
173
+ return { tools: void 0, tool_choice: void 0, toolWarnings };
174
+ }
175
+ const toolChoice = mode.toolChoice;
176
+ const openaiCompatTools = [];
177
+ for (const tool of tools) {
178
+ if (tool.type === "provider-defined") {
179
+ toolWarnings.push({ type: "unsupported-tool", tool });
180
+ } else {
181
+ openaiCompatTools.push({
182
+ type: "function",
183
+ function: {
184
+ name: tool.name,
185
+ description: tool.description,
186
+ parameters: tool.parameters
187
+ }
188
+ });
189
+ }
190
+ }
191
+ if (toolChoice == null) {
192
+ return { tools: openaiCompatTools, tool_choice: void 0, toolWarnings };
193
+ }
194
+ const type = toolChoice.type;
195
+ switch (type) {
196
+ case "auto":
197
+ case "none":
198
+ case "required":
199
+ return { tools: openaiCompatTools, tool_choice: type, toolWarnings };
200
+ case "tool":
201
+ return {
202
+ tools: openaiCompatTools,
203
+ tool_choice: {
204
+ type: "function",
205
+ function: {
206
+ name: toolChoice.toolName
207
+ }
208
+ },
209
+ toolWarnings
210
+ };
211
+ default: {
212
+ const _exhaustiveCheck = type;
213
+ throw new import_provider2.UnsupportedFunctionalityError({
214
+ functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
215
+ });
216
+ }
217
+ }
218
+ }
219
+
220
+ // src/map-openai-compatible-finish-reason.ts
221
+ function mapOpenAICompatibleFinishReason(finishReason) {
222
+ switch (finishReason) {
223
+ case "stop":
224
+ return "stop";
225
+ case "length":
226
+ return "length";
227
+ case "content_filter":
228
+ return "content-filter";
229
+ case "function_call":
230
+ case "tool_calls":
231
+ return "tool-calls";
232
+ default:
233
+ return "unknown";
234
+ }
235
+ }
236
+
237
+ // src/openai-compatible-chat-language-model.ts
238
+ var OpenAICompatibleChatLanguageModel = class {
239
+ constructor(modelId, settings, config) {
240
+ this.specificationVersion = "v1";
241
+ this.supportsStructuredOutputs = false;
242
+ this.modelId = modelId;
243
+ this.settings = settings;
244
+ this.config = config;
245
+ }
246
+ get defaultObjectGenerationMode() {
247
+ return this.config.defaultObjectGenerationMode;
248
+ }
249
+ get provider() {
250
+ return this.config.provider;
251
+ }
252
+ getArgs({
253
+ mode,
254
+ prompt,
255
+ maxTokens,
256
+ temperature,
257
+ topP,
258
+ topK,
259
+ frequencyPenalty,
260
+ presencePenalty,
261
+ stopSequences,
262
+ responseFormat,
263
+ seed,
264
+ stream
265
+ }) {
266
+ const type = mode.type;
267
+ const warnings = [];
268
+ if (topK != null) {
269
+ warnings.push({
270
+ type: "unsupported-setting",
271
+ setting: "topK"
272
+ });
273
+ }
274
+ if (responseFormat != null && responseFormat.type === "json" && responseFormat.schema != null) {
275
+ warnings.push({
276
+ type: "unsupported-setting",
277
+ setting: "responseFormat",
278
+ details: "JSON response format schema is not supported"
279
+ });
280
+ }
281
+ const baseArgs = {
282
+ // model id:
283
+ model: this.modelId,
284
+ // model specific settings:
285
+ user: this.settings.user,
286
+ // standardized settings:
287
+ max_tokens: maxTokens,
288
+ temperature,
289
+ top_p: topP,
290
+ frequency_penalty: frequencyPenalty,
291
+ presence_penalty: presencePenalty,
292
+ stop: stopSequences,
293
+ seed,
294
+ // response format:
295
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? { type: "json_object" } : void 0,
296
+ // messages:
297
+ messages: convertToOpenAICompatibleChatMessages(prompt)
298
+ };
299
+ switch (type) {
300
+ case "regular": {
301
+ const { tools, tool_choice, toolWarnings } = prepareTools({ mode });
302
+ return {
303
+ args: {
304
+ ...baseArgs,
305
+ tools,
306
+ tool_choice
307
+ },
308
+ warnings: [...warnings, ...toolWarnings]
309
+ };
310
+ }
311
+ case "object-json": {
312
+ return {
313
+ args: {
314
+ ...baseArgs,
315
+ response_format: { type: "json_object" }
316
+ },
317
+ warnings
318
+ };
319
+ }
320
+ case "object-tool": {
321
+ return {
322
+ args: {
323
+ ...baseArgs,
324
+ tool_choice: {
325
+ type: "function",
326
+ function: { name: mode.tool.name }
327
+ },
328
+ tools: [
329
+ {
330
+ type: "function",
331
+ function: {
332
+ name: mode.tool.name,
333
+ description: mode.tool.description,
334
+ parameters: mode.tool.parameters
335
+ }
336
+ }
337
+ ]
338
+ },
339
+ warnings
340
+ };
341
+ }
342
+ default: {
343
+ const _exhaustiveCheck = type;
344
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
345
+ }
346
+ }
347
+ }
348
+ async doGenerate(options) {
349
+ var _a, _b, _c, _d, _e, _f;
350
+ const { args, warnings } = this.getArgs({ ...options, stream: false });
351
+ const body = JSON.stringify(args);
352
+ const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
353
+ url: this.config.url({
354
+ path: "/chat/completions",
355
+ modelId: this.modelId
356
+ }),
357
+ headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
358
+ body: args,
359
+ failedResponseHandler: openaiCompatibleFailedResponseHandler,
360
+ successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
361
+ OpenAICompatibleChatResponseSchema
362
+ ),
363
+ abortSignal: options.abortSignal,
364
+ fetch: this.config.fetch
365
+ });
366
+ const { messages: rawPrompt, ...rawSettings } = args;
367
+ const choice = response.choices[0];
368
+ return {
369
+ text: (_a = choice.message.content) != null ? _a : void 0,
370
+ toolCalls: (_b = choice.message.tool_calls) == null ? void 0 : _b.map((toolCall) => {
371
+ var _a2;
372
+ return {
373
+ toolCallType: "function",
374
+ toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils3.generateId)(),
375
+ toolName: toolCall.function.name,
376
+ args: toolCall.function.arguments
377
+ };
378
+ }),
379
+ finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
380
+ usage: {
381
+ promptTokens: (_d = (_c = response.usage) == null ? void 0 : _c.prompt_tokens) != null ? _d : NaN,
382
+ completionTokens: (_f = (_e = response.usage) == null ? void 0 : _e.completion_tokens) != null ? _f : NaN
383
+ },
384
+ rawCall: { rawPrompt, rawSettings },
385
+ rawResponse: { headers: responseHeaders },
386
+ response: getResponseMetadata(response),
387
+ warnings,
388
+ request: { body }
389
+ };
390
+ }
391
+ async doStream(options) {
392
+ const { args, warnings } = this.getArgs({ ...options, stream: true });
393
+ const body = JSON.stringify({ ...args, stream: true });
394
+ const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
395
+ url: this.config.url({
396
+ path: "/chat/completions",
397
+ modelId: this.modelId
398
+ }),
399
+ headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
400
+ body: {
401
+ ...args,
402
+ stream: true
403
+ },
404
+ failedResponseHandler: openaiCompatibleFailedResponseHandler,
405
+ successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(
406
+ OpenAICompatibleChatChunkSchema
407
+ ),
408
+ abortSignal: options.abortSignal,
409
+ fetch: this.config.fetch
410
+ });
411
+ const { messages: rawPrompt, ...rawSettings } = args;
412
+ const toolCalls = [];
413
+ let finishReason = "unknown";
414
+ let usage = {
415
+ promptTokens: void 0,
416
+ completionTokens: void 0
417
+ };
418
+ let isFirstChunk = true;
419
+ let providerMetadata;
420
+ return {
421
+ stream: response.pipeThrough(
422
+ new TransformStream({
423
+ transform(chunk, controller) {
424
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
425
+ if (!chunk.success) {
426
+ finishReason = "error";
427
+ controller.enqueue({ type: "error", error: chunk.error });
428
+ return;
429
+ }
430
+ const value = chunk.value;
431
+ if ("error" in value) {
432
+ finishReason = "error";
433
+ controller.enqueue({ type: "error", error: value.error.message });
434
+ return;
435
+ }
436
+ if (isFirstChunk) {
437
+ isFirstChunk = false;
438
+ controller.enqueue({
439
+ type: "response-metadata",
440
+ ...getResponseMetadata(value)
441
+ });
442
+ }
443
+ if (value.usage != null) {
444
+ usage = {
445
+ promptTokens: (_a = value.usage.prompt_tokens) != null ? _a : void 0,
446
+ completionTokens: (_b = value.usage.completion_tokens) != null ? _b : void 0
447
+ };
448
+ }
449
+ const choice = value.choices[0];
450
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
451
+ finishReason = mapOpenAICompatibleFinishReason(
452
+ choice.finish_reason
453
+ );
454
+ }
455
+ if ((choice == null ? void 0 : choice.delta) == null) {
456
+ return;
457
+ }
458
+ const delta = choice.delta;
459
+ if (delta.content != null) {
460
+ controller.enqueue({
461
+ type: "text-delta",
462
+ textDelta: delta.content
463
+ });
464
+ }
465
+ if (delta.tool_calls != null) {
466
+ for (const toolCallDelta of delta.tool_calls) {
467
+ const index = toolCallDelta.index;
468
+ if (toolCalls[index] == null) {
469
+ if (toolCallDelta.type !== "function") {
470
+ throw new import_provider3.InvalidResponseDataError({
471
+ data: toolCallDelta,
472
+ message: `Expected 'function' type.`
473
+ });
474
+ }
475
+ if (toolCallDelta.id == null) {
476
+ throw new import_provider3.InvalidResponseDataError({
477
+ data: toolCallDelta,
478
+ message: `Expected 'id' to be a string.`
479
+ });
480
+ }
481
+ if (((_c = toolCallDelta.function) == null ? void 0 : _c.name) == null) {
482
+ throw new import_provider3.InvalidResponseDataError({
483
+ data: toolCallDelta,
484
+ message: `Expected 'function.name' to be a string.`
485
+ });
486
+ }
487
+ toolCalls[index] = {
488
+ id: toolCallDelta.id,
489
+ type: "function",
490
+ function: {
491
+ name: toolCallDelta.function.name,
492
+ arguments: (_d = toolCallDelta.function.arguments) != null ? _d : ""
493
+ }
494
+ };
495
+ const toolCall2 = toolCalls[index];
496
+ if (((_e = toolCall2.function) == null ? void 0 : _e.name) != null && ((_f = toolCall2.function) == null ? void 0 : _f.arguments) != null) {
497
+ if (toolCall2.function.arguments.length > 0) {
498
+ controller.enqueue({
499
+ type: "tool-call-delta",
500
+ toolCallType: "function",
501
+ toolCallId: toolCall2.id,
502
+ toolName: toolCall2.function.name,
503
+ argsTextDelta: toolCall2.function.arguments
504
+ });
505
+ }
506
+ if ((0, import_provider_utils3.isParsableJson)(toolCall2.function.arguments)) {
507
+ controller.enqueue({
508
+ type: "tool-call",
509
+ toolCallType: "function",
510
+ toolCallId: (_g = toolCall2.id) != null ? _g : (0, import_provider_utils3.generateId)(),
511
+ toolName: toolCall2.function.name,
512
+ args: toolCall2.function.arguments
513
+ });
514
+ }
515
+ }
516
+ continue;
517
+ }
518
+ const toolCall = toolCalls[index];
519
+ if (((_h = toolCallDelta.function) == null ? void 0 : _h.arguments) != null) {
520
+ toolCall.function.arguments += (_j = (_i = toolCallDelta.function) == null ? void 0 : _i.arguments) != null ? _j : "";
521
+ }
522
+ controller.enqueue({
523
+ type: "tool-call-delta",
524
+ toolCallType: "function",
525
+ toolCallId: toolCall.id,
526
+ toolName: toolCall.function.name,
527
+ argsTextDelta: (_k = toolCallDelta.function.arguments) != null ? _k : ""
528
+ });
529
+ if (((_l = toolCall.function) == null ? void 0 : _l.name) != null && ((_m = toolCall.function) == null ? void 0 : _m.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
530
+ controller.enqueue({
531
+ type: "tool-call",
532
+ toolCallType: "function",
533
+ toolCallId: (_n = toolCall.id) != null ? _n : (0, import_provider_utils3.generateId)(),
534
+ toolName: toolCall.function.name,
535
+ args: toolCall.function.arguments
536
+ });
537
+ }
538
+ }
539
+ }
540
+ },
541
+ flush(controller) {
542
+ var _a, _b;
543
+ controller.enqueue({
544
+ type: "finish",
545
+ finishReason,
546
+ usage: {
547
+ promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
548
+ completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
549
+ },
550
+ ...providerMetadata != null ? { providerMetadata } : {}
551
+ });
552
+ }
553
+ })
554
+ ),
555
+ rawCall: { rawPrompt, rawSettings },
556
+ rawResponse: { headers: responseHeaders },
557
+ warnings,
558
+ request: { body }
559
+ };
560
+ }
561
+ };
562
+ var OpenAICompatibleChatResponseSchema = import_zod2.z.object({
563
+ id: import_zod2.z.string().nullish(),
564
+ created: import_zod2.z.number().nullish(),
565
+ model: import_zod2.z.string().nullish(),
566
+ choices: import_zod2.z.array(
567
+ import_zod2.z.object({
568
+ message: import_zod2.z.object({
569
+ role: import_zod2.z.literal("assistant").nullish(),
570
+ content: import_zod2.z.string().nullish(),
571
+ tool_calls: import_zod2.z.array(
572
+ import_zod2.z.object({
573
+ id: import_zod2.z.string().nullish(),
574
+ type: import_zod2.z.literal("function"),
575
+ function: import_zod2.z.object({
576
+ name: import_zod2.z.string(),
577
+ arguments: import_zod2.z.string()
578
+ })
579
+ })
580
+ ).nullish()
581
+ }),
582
+ index: import_zod2.z.number(),
583
+ finish_reason: import_zod2.z.string().nullish()
584
+ })
585
+ ),
586
+ usage: import_zod2.z.object({
587
+ prompt_tokens: import_zod2.z.number().nullish(),
588
+ completion_tokens: import_zod2.z.number().nullish()
589
+ }).nullish()
590
+ });
591
+ var OpenAICompatibleChatChunkSchema = import_zod2.z.union([
592
+ import_zod2.z.object({
593
+ id: import_zod2.z.string().nullish(),
594
+ created: import_zod2.z.number().nullish(),
595
+ model: import_zod2.z.string().nullish(),
596
+ choices: import_zod2.z.array(
597
+ import_zod2.z.object({
598
+ delta: import_zod2.z.object({
599
+ role: import_zod2.z.enum(["assistant"]).nullish(),
600
+ content: import_zod2.z.string().nullish(),
601
+ tool_calls: import_zod2.z.array(
602
+ import_zod2.z.object({
603
+ index: import_zod2.z.number(),
604
+ id: import_zod2.z.string().nullish(),
605
+ type: import_zod2.z.literal("function").optional(),
606
+ function: import_zod2.z.object({
607
+ name: import_zod2.z.string().nullish(),
608
+ arguments: import_zod2.z.string().nullish()
609
+ })
610
+ })
611
+ ).nullish()
612
+ }).nullish(),
613
+ finish_reason: import_zod2.z.string().nullable().optional(),
614
+ index: import_zod2.z.number()
615
+ })
616
+ ),
617
+ usage: import_zod2.z.object({
618
+ prompt_tokens: import_zod2.z.number().nullish(),
619
+ completion_tokens: import_zod2.z.number().nullish()
620
+ }).nullish()
621
+ }),
622
+ openaiCompatibleErrorDataSchema
623
+ ]);
624
+
625
+ // src/openai-compatible-completion-language-model.ts
626
+ var import_provider5 = require("@ai-sdk/provider");
627
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
628
+ var import_zod3 = require("zod");
629
+
630
+ // src/convert-to-openai-compatible-completion-prompt.ts
631
+ var import_provider4 = require("@ai-sdk/provider");
632
+ function convertToOpenAICompatibleCompletionPrompt({
633
+ prompt,
634
+ inputFormat,
635
+ user = "user",
636
+ assistant = "assistant"
637
+ }) {
638
+ if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
639
+ return { prompt: prompt[0].content[0].text };
640
+ }
641
+ let text = "";
642
+ if (prompt[0].role === "system") {
643
+ text += `${prompt[0].content}
644
+
645
+ `;
646
+ prompt = prompt.slice(1);
647
+ }
648
+ for (const { role, content } of prompt) {
649
+ switch (role) {
650
+ case "system": {
651
+ throw new import_provider4.InvalidPromptError({
652
+ message: "Unexpected system message in prompt: ${content}",
653
+ prompt
654
+ });
655
+ }
656
+ case "user": {
657
+ const userMessage = content.map((part) => {
658
+ switch (part.type) {
659
+ case "text": {
660
+ return part.text;
661
+ }
662
+ case "image": {
663
+ throw new import_provider4.UnsupportedFunctionalityError({
664
+ functionality: "images"
665
+ });
666
+ }
667
+ }
668
+ }).join("");
669
+ text += `${user}:
670
+ ${userMessage}
671
+
672
+ `;
673
+ break;
674
+ }
675
+ case "assistant": {
676
+ const assistantMessage = content.map((part) => {
677
+ switch (part.type) {
678
+ case "text": {
679
+ return part.text;
680
+ }
681
+ case "tool-call": {
682
+ throw new import_provider4.UnsupportedFunctionalityError({
683
+ functionality: "tool-call messages"
684
+ });
685
+ }
686
+ }
687
+ }).join("");
688
+ text += `${assistant}:
689
+ ${assistantMessage}
690
+
691
+ `;
692
+ break;
693
+ }
694
+ case "tool": {
695
+ throw new import_provider4.UnsupportedFunctionalityError({
696
+ functionality: "tool messages"
697
+ });
698
+ }
699
+ default: {
700
+ const _exhaustiveCheck = role;
701
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
702
+ }
703
+ }
704
+ }
705
+ text += `${assistant}:
706
+ `;
707
+ return {
708
+ prompt: text,
709
+ stopSequences: [`
710
+ ${user}:`]
711
+ };
712
+ }
713
+
714
+ // src/openai-compatible-completion-language-model.ts
715
+ var OpenAICompatibleCompletionLanguageModel = class {
716
+ constructor(modelId, settings, config) {
717
+ this.specificationVersion = "v1";
718
+ this.defaultObjectGenerationMode = void 0;
719
+ this.modelId = modelId;
720
+ this.settings = settings;
721
+ this.config = config;
722
+ }
723
+ get provider() {
724
+ return this.config.provider;
725
+ }
726
+ getArgs({
727
+ mode,
728
+ inputFormat,
729
+ prompt,
730
+ maxTokens,
731
+ temperature,
732
+ topP,
733
+ topK,
734
+ frequencyPenalty,
735
+ presencePenalty,
736
+ stopSequences: userStopSequences,
737
+ responseFormat,
738
+ seed
739
+ }) {
740
+ var _a;
741
+ const type = mode.type;
742
+ const warnings = [];
743
+ if (topK != null) {
744
+ warnings.push({
745
+ type: "unsupported-setting",
746
+ setting: "topK"
747
+ });
748
+ }
749
+ if (responseFormat != null && responseFormat.type !== "text") {
750
+ warnings.push({
751
+ type: "unsupported-setting",
752
+ setting: "responseFormat",
753
+ details: "JSON response format is not supported."
754
+ });
755
+ }
756
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompatibleCompletionPrompt({ prompt, inputFormat });
757
+ const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
758
+ const baseArgs = {
759
+ // model id:
760
+ model: this.modelId,
761
+ // model specific settings:
762
+ echo: this.settings.echo,
763
+ logit_bias: this.settings.logitBias,
764
+ suffix: this.settings.suffix,
765
+ user: this.settings.user,
766
+ // standardized settings:
767
+ max_tokens: maxTokens,
768
+ temperature,
769
+ top_p: topP,
770
+ frequency_penalty: frequencyPenalty,
771
+ presence_penalty: presencePenalty,
772
+ seed,
773
+ // prompt:
774
+ prompt: completionPrompt,
775
+ // stop sequences:
776
+ stop: stop.length > 0 ? stop : void 0
777
+ };
778
+ switch (type) {
779
+ case "regular": {
780
+ if ((_a = mode.tools) == null ? void 0 : _a.length) {
781
+ throw new import_provider5.UnsupportedFunctionalityError({
782
+ functionality: "tools"
783
+ });
784
+ }
785
+ if (mode.toolChoice) {
786
+ throw new import_provider5.UnsupportedFunctionalityError({
787
+ functionality: "toolChoice"
788
+ });
789
+ }
790
+ return { args: baseArgs, warnings };
791
+ }
792
+ case "object-json": {
793
+ throw new import_provider5.UnsupportedFunctionalityError({
794
+ functionality: "object-json mode"
795
+ });
796
+ }
797
+ case "object-tool": {
798
+ throw new import_provider5.UnsupportedFunctionalityError({
799
+ functionality: "object-tool mode"
800
+ });
801
+ }
802
+ default: {
803
+ const _exhaustiveCheck = type;
804
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
805
+ }
806
+ }
807
+ }
808
+ async doGenerate(options) {
809
+ var _a, _b, _c, _d;
810
+ const { args, warnings } = this.getArgs(options);
811
+ const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
812
+ url: this.config.url({
813
+ path: "/completions",
814
+ modelId: this.modelId
815
+ }),
816
+ headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
817
+ body: args,
818
+ failedResponseHandler: openaiCompatibleFailedResponseHandler,
819
+ successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
820
+ openaiCompatibleCompletionResponseSchema
821
+ ),
822
+ abortSignal: options.abortSignal,
823
+ fetch: this.config.fetch
824
+ });
825
+ const { prompt: rawPrompt, ...rawSettings } = args;
826
+ const choice = response.choices[0];
827
+ return {
828
+ text: choice.text,
829
+ usage: {
830
+ promptTokens: (_b = (_a = response.usage) == null ? void 0 : _a.prompt_tokens) != null ? _b : NaN,
831
+ completionTokens: (_d = (_c = response.usage) == null ? void 0 : _c.completion_tokens) != null ? _d : NaN
832
+ },
833
+ finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
834
+ rawCall: { rawPrompt, rawSettings },
835
+ rawResponse: { headers: responseHeaders },
836
+ response: getResponseMetadata(response),
837
+ warnings,
838
+ request: { body: JSON.stringify(args) }
839
+ };
840
+ }
841
+ async doStream(options) {
842
+ const { args, warnings } = this.getArgs(options);
843
+ const body = {
844
+ ...args,
845
+ stream: true
846
+ };
847
+ const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
848
+ url: this.config.url({
849
+ path: "/completions",
850
+ modelId: this.modelId
851
+ }),
852
+ headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
853
+ body,
854
+ failedResponseHandler: openaiCompatibleFailedResponseHandler,
855
+ successfulResponseHandler: (0, import_provider_utils4.createEventSourceResponseHandler)(
856
+ openaiCompatibleCompletionChunkSchema
857
+ ),
858
+ abortSignal: options.abortSignal,
859
+ fetch: this.config.fetch
860
+ });
861
+ const { prompt: rawPrompt, ...rawSettings } = args;
862
+ let finishReason = "unknown";
863
+ let usage = {
864
+ promptTokens: Number.NaN,
865
+ completionTokens: Number.NaN
866
+ };
867
+ let isFirstChunk = true;
868
+ return {
869
+ stream: response.pipeThrough(
870
+ new TransformStream({
871
+ transform(chunk, controller) {
872
+ if (!chunk.success) {
873
+ finishReason = "error";
874
+ controller.enqueue({ type: "error", error: chunk.error });
875
+ return;
876
+ }
877
+ const value = chunk.value;
878
+ if ("error" in value) {
879
+ finishReason = "error";
880
+ controller.enqueue({ type: "error", error: value.error });
881
+ return;
882
+ }
883
+ if (isFirstChunk) {
884
+ isFirstChunk = false;
885
+ controller.enqueue({
886
+ type: "response-metadata",
887
+ ...getResponseMetadata(value)
888
+ });
889
+ }
890
+ if (value.usage != null) {
891
+ usage = {
892
+ promptTokens: value.usage.prompt_tokens,
893
+ completionTokens: value.usage.completion_tokens
894
+ };
895
+ }
896
+ const choice = value.choices[0];
897
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
898
+ finishReason = mapOpenAICompatibleFinishReason(
899
+ choice.finish_reason
900
+ );
901
+ }
902
+ if ((choice == null ? void 0 : choice.text) != null) {
903
+ controller.enqueue({
904
+ type: "text-delta",
905
+ textDelta: choice.text
906
+ });
907
+ }
908
+ },
909
+ flush(controller) {
910
+ controller.enqueue({
911
+ type: "finish",
912
+ finishReason,
913
+ usage
914
+ });
915
+ }
916
+ })
917
+ ),
918
+ rawCall: { rawPrompt, rawSettings },
919
+ rawResponse: { headers: responseHeaders },
920
+ warnings,
921
+ request: { body: JSON.stringify(body) }
922
+ };
923
+ }
924
+ };
925
+ var openaiCompatibleCompletionResponseSchema = import_zod3.z.object({
926
+ id: import_zod3.z.string().nullish(),
927
+ created: import_zod3.z.number().nullish(),
928
+ model: import_zod3.z.string().nullish(),
929
+ choices: import_zod3.z.array(
930
+ import_zod3.z.object({
931
+ text: import_zod3.z.string(),
932
+ finish_reason: import_zod3.z.string()
933
+ })
934
+ ),
935
+ usage: import_zod3.z.object({
936
+ prompt_tokens: import_zod3.z.number(),
937
+ completion_tokens: import_zod3.z.number()
938
+ }).nullish()
939
+ });
940
+ var openaiCompatibleCompletionChunkSchema = import_zod3.z.union([
941
+ import_zod3.z.object({
942
+ id: import_zod3.z.string().nullish(),
943
+ created: import_zod3.z.number().nullish(),
944
+ model: import_zod3.z.string().nullish(),
945
+ choices: import_zod3.z.array(
946
+ import_zod3.z.object({
947
+ text: import_zod3.z.string(),
948
+ finish_reason: import_zod3.z.string().nullish(),
949
+ index: import_zod3.z.number()
950
+ })
951
+ ),
952
+ usage: import_zod3.z.object({
953
+ prompt_tokens: import_zod3.z.number(),
954
+ completion_tokens: import_zod3.z.number()
955
+ }).nullish()
956
+ }),
957
+ openaiCompatibleErrorDataSchema
958
+ ]);
959
+
960
+ // src/openai-compatible-embedding-model.ts
961
+ var import_provider6 = require("@ai-sdk/provider");
962
+ var import_provider_utils5 = require("@ai-sdk/provider-utils");
963
+ var import_zod4 = require("zod");
964
+ var OpenAICompatibleEmbeddingModel = class {
965
+ constructor(modelId, settings, config) {
966
+ this.specificationVersion = "v1";
967
+ this.modelId = modelId;
968
+ this.settings = settings;
969
+ this.config = config;
970
+ }
971
+ get provider() {
972
+ return this.config.provider;
973
+ }
974
+ get maxEmbeddingsPerCall() {
975
+ var _a;
976
+ return (_a = this.config.maxEmbeddingsPerCall) != null ? _a : 2048;
977
+ }
978
+ get supportsParallelCalls() {
979
+ var _a;
980
+ return (_a = this.config.supportsParallelCalls) != null ? _a : true;
981
+ }
982
+ async doEmbed({
983
+ values,
984
+ headers,
985
+ abortSignal
986
+ }) {
987
+ if (values.length > this.maxEmbeddingsPerCall) {
988
+ throw new import_provider6.TooManyEmbeddingValuesForCallError({
989
+ provider: this.provider,
990
+ modelId: this.modelId,
991
+ maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
992
+ values
993
+ });
994
+ }
995
+ const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
996
+ url: this.config.url({
997
+ path: "/embeddings",
998
+ modelId: this.modelId
999
+ }),
1000
+ headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), headers),
1001
+ body: {
1002
+ model: this.modelId,
1003
+ input: values,
1004
+ encoding_format: "float",
1005
+ dimensions: this.settings.dimensions,
1006
+ user: this.settings.user
1007
+ },
1008
+ failedResponseHandler: openaiCompatibleFailedResponseHandler,
1009
+ successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
1010
+ openaiTextEmbeddingResponseSchema
1011
+ ),
1012
+ abortSignal,
1013
+ fetch: this.config.fetch
1014
+ });
1015
+ return {
1016
+ embeddings: response.data.map((item) => item.embedding),
1017
+ usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1018
+ rawResponse: { headers: responseHeaders }
1019
+ };
1020
+ }
1021
+ };
1022
+ var openaiTextEmbeddingResponseSchema = import_zod4.z.object({
1023
+ data: import_zod4.z.array(import_zod4.z.object({ embedding: import_zod4.z.array(import_zod4.z.number()) })),
1024
+ usage: import_zod4.z.object({ prompt_tokens: import_zod4.z.number() }).nullish()
1025
+ });
1026
+
1027
+ // src/openai-compatible-provider.ts
1028
+ function createOpenAICompatible(options) {
1029
+ if (!options.baseURL) {
1030
+ throw new Error("Base URL is required");
1031
+ }
1032
+ const baseURL = (0, import_provider_utils6.withoutTrailingSlash)(options.baseURL);
1033
+ if (!options.name) {
1034
+ throw new Error("Provider name is required");
1035
+ }
1036
+ const providerName = options.name;
1037
+ const getCommonModelConfig = (modelType) => ({
1038
+ provider: `${providerName}.${modelType}`,
1039
+ url: ({ path }) => `${baseURL}${path}`,
1040
+ headers: () => {
1041
+ var _a;
1042
+ return (_a = options.headers) != null ? _a : {};
1043
+ },
1044
+ fetch: options.fetch
1045
+ });
1046
+ const createLanguageModel = (modelId, settings = {}) => createChatModel(modelId, settings);
1047
+ const createChatModel = (modelId, settings = {}) => new OpenAICompatibleChatLanguageModel(modelId, settings, {
1048
+ ...getCommonModelConfig("chat"),
1049
+ defaultObjectGenerationMode: "tool"
1050
+ });
1051
+ const createCompletionModel = (modelId, settings = {}) => new OpenAICompatibleCompletionLanguageModel(
1052
+ modelId,
1053
+ settings,
1054
+ getCommonModelConfig("completion")
1055
+ );
1056
+ const createEmbeddingModel = (modelId, settings = {}) => new OpenAICompatibleEmbeddingModel(
1057
+ modelId,
1058
+ settings,
1059
+ getCommonModelConfig("embedding")
1060
+ );
1061
+ const provider = (modelId, settings) => createLanguageModel(modelId, settings);
1062
+ provider.languageModel = createLanguageModel;
1063
+ provider.chatModel = createChatModel;
1064
+ provider.completionModel = createCompletionModel;
1065
+ provider.textEmbeddingModel = createEmbeddingModel;
1066
+ return provider;
1067
+ }
1068
+ // Annotate the CommonJS export names for ESM import in node:
1069
+ 0 && (module.exports = {
1070
+ OpenAICompatibleChatLanguageModel,
1071
+ OpenAICompatibleCompletionLanguageModel,
1072
+ OpenAICompatibleEmbeddingModel,
1073
+ createOpenAICompatible
1074
+ });
1075
+ //# sourceMappingURL=index.js.map