@ai-sdk/openai-compatible 0.2.14 → 1.0.0-alpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -9,18 +9,18 @@ import {
9
9
  createJsonResponseHandler,
10
10
  generateId,
11
11
  isParsableJson,
12
+ parseProviderOptions,
12
13
  postJsonToApi
13
14
  } from "@ai-sdk/provider-utils";
14
- import { z as z2 } from "zod";
15
+ import { z as z3 } from "zod";
15
16
 
16
17
  // src/convert-to-openai-compatible-chat-messages.ts
17
18
  import {
18
19
  UnsupportedFunctionalityError
19
20
  } from "@ai-sdk/provider";
20
- import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils";
21
21
  function getOpenAIMetadata(message) {
22
22
  var _a, _b;
23
- return (_b = (_a = message == null ? void 0 : message.providerMetadata) == null ? void 0 : _a.openaiCompatible) != null ? _b : {};
23
+ return (_b = (_a = message == null ? void 0 : message.providerOptions) == null ? void 0 : _a.openaiCompatible) != null ? _b : {};
24
24
  }
25
25
  function convertToOpenAICompatibleChatMessages(prompt) {
26
26
  const messages = [];
@@ -43,25 +43,26 @@ function convertToOpenAICompatibleChatMessages(prompt) {
43
43
  messages.push({
44
44
  role: "user",
45
45
  content: content.map((part) => {
46
- var _a;
47
46
  const partMetadata = getOpenAIMetadata(part);
48
47
  switch (part.type) {
49
48
  case "text": {
50
49
  return { type: "text", text: part.text, ...partMetadata };
51
50
  }
52
- case "image": {
53
- return {
54
- type: "image_url",
55
- image_url: {
56
- url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase64(part.image)}`
57
- },
58
- ...partMetadata
59
- };
60
- }
61
51
  case "file": {
62
- throw new UnsupportedFunctionalityError({
63
- functionality: "File content parts in user messages"
64
- });
52
+ if (part.mediaType.startsWith("image/")) {
53
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
54
+ return {
55
+ type: "image_url",
56
+ image_url: {
57
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`
58
+ },
59
+ ...partMetadata
60
+ };
61
+ } else {
62
+ throw new UnsupportedFunctionalityError({
63
+ functionality: `file part media type ${part.mediaType}`
64
+ });
65
+ }
65
66
  }
66
67
  }
67
68
  }),
@@ -152,17 +153,31 @@ function mapOpenAICompatibleFinishReason(finishReason) {
152
153
  }
153
154
  }
154
155
 
155
- // src/openai-compatible-error.ts
156
+ // src/openai-compatible-chat-options.ts
156
157
  import { z } from "zod";
157
- var openaiCompatibleErrorDataSchema = z.object({
158
- error: z.object({
159
- message: z.string(),
158
+ var openaiCompatibleProviderOptions = z.object({
159
+ /**
160
+ * A unique identifier representing your end-user, which can help the provider to
161
+ * monitor and detect abuse.
162
+ */
163
+ user: z.string().optional(),
164
+ /**
165
+ * Reasoning effort for reasoning models. Defaults to `medium`.
166
+ */
167
+ reasoningEffort: z.enum(["low", "medium", "high"]).optional()
168
+ });
169
+
170
+ // src/openai-compatible-error.ts
171
+ import { z as z2 } from "zod";
172
+ var openaiCompatibleErrorDataSchema = z2.object({
173
+ error: z2.object({
174
+ message: z2.string(),
160
175
  // The additional information below is handled loosely to support
161
176
  // OpenAI-compatible providers that have slightly different error
162
177
  // responses:
163
- type: z.string().nullish(),
164
- param: z.any().nullish(),
165
- code: z.union([z.string(), z.number()]).nullish()
178
+ type: z2.string().nullish(),
179
+ param: z2.any().nullish(),
180
+ code: z2.union([z2.string(), z2.number()]).nullish()
166
181
  })
167
182
  });
168
183
  var defaultOpenAICompatibleErrorStructure = {
@@ -175,16 +190,14 @@ import {
175
190
  UnsupportedFunctionalityError as UnsupportedFunctionalityError2
176
191
  } from "@ai-sdk/provider";
177
192
  function prepareTools({
178
- mode,
179
- structuredOutputs
193
+ tools,
194
+ toolChoice
180
195
  }) {
181
- var _a;
182
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
196
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
183
197
  const toolWarnings = [];
184
198
  if (tools == null) {
185
- return { tools: void 0, tool_choice: void 0, toolWarnings };
199
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
186
200
  }
187
- const toolChoice = mode.toolChoice;
188
201
  const openaiCompatTools = [];
189
202
  for (const tool of tools) {
190
203
  if (tool.type === "provider-defined") {
@@ -201,29 +214,27 @@ function prepareTools({
201
214
  }
202
215
  }
203
216
  if (toolChoice == null) {
204
- return { tools: openaiCompatTools, tool_choice: void 0, toolWarnings };
217
+ return { tools: openaiCompatTools, toolChoice: void 0, toolWarnings };
205
218
  }
206
219
  const type = toolChoice.type;
207
220
  switch (type) {
208
221
  case "auto":
209
222
  case "none":
210
223
  case "required":
211
- return { tools: openaiCompatTools, tool_choice: type, toolWarnings };
224
+ return { tools: openaiCompatTools, toolChoice: type, toolWarnings };
212
225
  case "tool":
213
226
  return {
214
227
  tools: openaiCompatTools,
215
- tool_choice: {
228
+ toolChoice: {
216
229
  type: "function",
217
- function: {
218
- name: toolChoice.toolName
219
- }
230
+ function: { name: toolChoice.toolName }
220
231
  },
221
232
  toolWarnings
222
233
  };
223
234
  default: {
224
235
  const _exhaustiveCheck = type;
225
236
  throw new UnsupportedFunctionalityError2({
226
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
237
+ functionality: `tool choice type: ${_exhaustiveCheck}`
227
238
  });
228
239
  }
229
240
  }
@@ -232,11 +243,10 @@ function prepareTools({
232
243
  // src/openai-compatible-chat-language-model.ts
233
244
  var OpenAICompatibleChatLanguageModel = class {
234
245
  // type inferred via constructor
235
- constructor(modelId, settings, config) {
236
- this.specificationVersion = "v1";
246
+ constructor(modelId, config) {
247
+ this.specificationVersion = "v2";
237
248
  var _a, _b;
238
249
  this.modelId = modelId;
239
- this.settings = settings;
240
250
  this.config = config;
241
251
  const errorStructure = (_a = config.errorStructure) != null ? _a : defaultOpenAICompatibleErrorStructure;
242
252
  this.chunkSchema = createOpenAICompatibleChatChunkSchema(
@@ -245,37 +255,47 @@ var OpenAICompatibleChatLanguageModel = class {
245
255
  this.failedResponseHandler = createJsonErrorResponseHandler(errorStructure);
246
256
  this.supportsStructuredOutputs = (_b = config.supportsStructuredOutputs) != null ? _b : false;
247
257
  }
248
- get defaultObjectGenerationMode() {
249
- return this.config.defaultObjectGenerationMode;
250
- }
251
258
  get provider() {
252
259
  return this.config.provider;
253
260
  }
254
261
  get providerOptionsName() {
255
262
  return this.config.provider.split(".")[0].trim();
256
263
  }
257
- getArgs({
258
- mode,
264
+ get supportedUrls() {
265
+ var _a, _b, _c;
266
+ return (_c = (_b = (_a = this.config).supportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
267
+ }
268
+ async getArgs({
259
269
  prompt,
260
- maxTokens,
270
+ maxOutputTokens,
261
271
  temperature,
262
272
  topP,
263
273
  topK,
264
274
  frequencyPenalty,
265
275
  presencePenalty,
266
- providerMetadata,
276
+ providerOptions,
267
277
  stopSequences,
268
278
  responseFormat,
269
- seed
279
+ seed,
280
+ toolChoice,
281
+ tools
270
282
  }) {
271
- var _a, _b, _c, _d, _e;
272
- const type = mode.type;
283
+ var _a, _b, _c;
273
284
  const warnings = [];
285
+ const compatibleOptions = Object.assign(
286
+ (_a = await parseProviderOptions({
287
+ provider: "openai-compatible",
288
+ providerOptions,
289
+ schema: openaiCompatibleProviderOptions
290
+ })) != null ? _a : {},
291
+ (_b = await parseProviderOptions({
292
+ provider: this.providerOptionsName,
293
+ providerOptions,
294
+ schema: openaiCompatibleProviderOptions
295
+ })) != null ? _b : {}
296
+ );
274
297
  if (topK != null) {
275
- warnings.push({
276
- type: "unsupported-setting",
277
- setting: "topK"
278
- });
298
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
279
299
  }
280
300
  if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
281
301
  warnings.push({
@@ -284,90 +304,50 @@ var OpenAICompatibleChatLanguageModel = class {
284
304
  details: "JSON response format schema is only supported with structuredOutputs"
285
305
  });
286
306
  }
287
- const baseArgs = {
288
- // model id:
289
- model: this.modelId,
290
- // model specific settings:
291
- user: this.settings.user,
292
- // standardized settings:
293
- max_tokens: maxTokens,
294
- temperature,
295
- top_p: topP,
296
- frequency_penalty: frequencyPenalty,
297
- presence_penalty: presencePenalty,
298
- response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs === true && responseFormat.schema != null ? {
299
- type: "json_schema",
300
- json_schema: {
301
- schema: responseFormat.schema,
302
- name: (_a = responseFormat.name) != null ? _a : "response",
303
- description: responseFormat.description
304
- }
305
- } : { type: "json_object" } : void 0,
306
- stop: stopSequences,
307
- seed,
308
- ...providerMetadata == null ? void 0 : providerMetadata[this.providerOptionsName],
309
- reasoning_effort: (_d = (_b = providerMetadata == null ? void 0 : providerMetadata[this.providerOptionsName]) == null ? void 0 : _b.reasoningEffort) != null ? _d : (_c = providerMetadata == null ? void 0 : providerMetadata["openai-compatible"]) == null ? void 0 : _c.reasoningEffort,
310
- // messages:
311
- messages: convertToOpenAICompatibleChatMessages(prompt)
307
+ const {
308
+ tools: openaiTools,
309
+ toolChoice: openaiToolChoice,
310
+ toolWarnings
311
+ } = prepareTools({
312
+ tools,
313
+ toolChoice
314
+ });
315
+ return {
316
+ args: {
317
+ // model id:
318
+ model: this.modelId,
319
+ // model specific settings:
320
+ user: compatibleOptions.user,
321
+ // standardized settings:
322
+ max_tokens: maxOutputTokens,
323
+ temperature,
324
+ top_p: topP,
325
+ frequency_penalty: frequencyPenalty,
326
+ presence_penalty: presencePenalty,
327
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs === true && responseFormat.schema != null ? {
328
+ type: "json_schema",
329
+ json_schema: {
330
+ schema: responseFormat.schema,
331
+ name: (_c = responseFormat.name) != null ? _c : "response",
332
+ description: responseFormat.description
333
+ }
334
+ } : { type: "json_object" } : void 0,
335
+ stop: stopSequences,
336
+ seed,
337
+ ...providerOptions == null ? void 0 : providerOptions[this.providerOptionsName],
338
+ reasoning_effort: compatibleOptions.reasoningEffort,
339
+ // messages:
340
+ messages: convertToOpenAICompatibleChatMessages(prompt),
341
+ // tools:
342
+ tools: openaiTools,
343
+ tool_choice: openaiToolChoice
344
+ },
345
+ warnings: [...warnings, ...toolWarnings]
312
346
  };
313
- switch (type) {
314
- case "regular": {
315
- const { tools, tool_choice, toolWarnings } = prepareTools({
316
- mode,
317
- structuredOutputs: this.supportsStructuredOutputs
318
- });
319
- return {
320
- args: { ...baseArgs, tools, tool_choice },
321
- warnings: [...warnings, ...toolWarnings]
322
- };
323
- }
324
- case "object-json": {
325
- return {
326
- args: {
327
- ...baseArgs,
328
- response_format: this.supportsStructuredOutputs === true && mode.schema != null ? {
329
- type: "json_schema",
330
- json_schema: {
331
- schema: mode.schema,
332
- name: (_e = mode.name) != null ? _e : "response",
333
- description: mode.description
334
- }
335
- } : { type: "json_object" }
336
- },
337
- warnings
338
- };
339
- }
340
- case "object-tool": {
341
- return {
342
- args: {
343
- ...baseArgs,
344
- tool_choice: {
345
- type: "function",
346
- function: { name: mode.tool.name }
347
- },
348
- tools: [
349
- {
350
- type: "function",
351
- function: {
352
- name: mode.tool.name,
353
- description: mode.tool.description,
354
- parameters: mode.tool.parameters
355
- }
356
- }
357
- ]
358
- },
359
- warnings
360
- };
361
- }
362
- default: {
363
- const _exhaustiveCheck = type;
364
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
365
- }
366
- }
367
347
  }
368
348
  async doGenerate(options) {
369
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
370
- const { args, warnings } = this.getArgs({ ...options });
349
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p;
350
+ const { args, warnings } = await this.getArgs({ ...options });
371
351
  const body = JSON.stringify(args);
372
352
  const {
373
353
  responseHeaders,
@@ -387,109 +367,66 @@ var OpenAICompatibleChatLanguageModel = class {
387
367
  abortSignal: options.abortSignal,
388
368
  fetch: this.config.fetch
389
369
  });
390
- const { messages: rawPrompt, ...rawSettings } = args;
391
370
  const choice = responseBody.choices[0];
371
+ const content = [];
372
+ const text = choice.message.content;
373
+ if (text != null && text.length > 0) {
374
+ content.push({ type: "text", text });
375
+ }
376
+ const reasoning = choice.message.reasoning_content;
377
+ if (reasoning != null && reasoning.length > 0) {
378
+ content.push({
379
+ type: "reasoning",
380
+ text: reasoning
381
+ });
382
+ }
383
+ if (choice.message.tool_calls != null) {
384
+ for (const toolCall of choice.message.tool_calls) {
385
+ content.push({
386
+ type: "tool-call",
387
+ toolCallType: "function",
388
+ toolCallId: (_a = toolCall.id) != null ? _a : generateId(),
389
+ toolName: toolCall.function.name,
390
+ args: toolCall.function.arguments
391
+ });
392
+ }
393
+ }
392
394
  const providerMetadata = {
393
395
  [this.providerOptionsName]: {},
394
- ...(_b = (_a = this.config.metadataExtractor) == null ? void 0 : _a.extractMetadata) == null ? void 0 : _b.call(_a, {
396
+ ...await ((_c = (_b = this.config.metadataExtractor) == null ? void 0 : _b.extractMetadata) == null ? void 0 : _c.call(_b, {
395
397
  parsedBody: rawResponse
396
- })
398
+ }))
397
399
  };
398
- const completionTokenDetails = (_c = responseBody.usage) == null ? void 0 : _c.completion_tokens_details;
399
- const promptTokenDetails = (_d = responseBody.usage) == null ? void 0 : _d.prompt_tokens_details;
400
- if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
401
- providerMetadata[this.providerOptionsName].reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
402
- }
400
+ const completionTokenDetails = (_d = responseBody.usage) == null ? void 0 : _d.completion_tokens_details;
403
401
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
404
402
  providerMetadata[this.providerOptionsName].acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
405
403
  }
406
404
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
407
405
  providerMetadata[this.providerOptionsName].rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
408
406
  }
409
- if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
410
- providerMetadata[this.providerOptionsName].cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
411
- }
412
407
  return {
413
- text: (_e = choice.message.content) != null ? _e : void 0,
414
- reasoning: (_f = choice.message.reasoning_content) != null ? _f : void 0,
415
- toolCalls: (_g = choice.message.tool_calls) == null ? void 0 : _g.map((toolCall) => {
416
- var _a2;
417
- return {
418
- toolCallType: "function",
419
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : generateId(),
420
- toolName: toolCall.function.name,
421
- args: toolCall.function.arguments
422
- };
423
- }),
408
+ content,
424
409
  finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
425
410
  usage: {
426
- promptTokens: (_i = (_h = responseBody.usage) == null ? void 0 : _h.prompt_tokens) != null ? _i : NaN,
427
- completionTokens: (_k = (_j = responseBody.usage) == null ? void 0 : _j.completion_tokens) != null ? _k : NaN
411
+ inputTokens: (_f = (_e = responseBody.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : void 0,
412
+ outputTokens: (_h = (_g = responseBody.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : void 0,
413
+ totalTokens: (_j = (_i = responseBody.usage) == null ? void 0 : _i.total_tokens) != null ? _j : void 0,
414
+ reasoningTokens: (_m = (_l = (_k = responseBody.usage) == null ? void 0 : _k.completion_tokens_details) == null ? void 0 : _l.reasoning_tokens) != null ? _m : void 0,
415
+ cachedInputTokens: (_p = (_o = (_n = responseBody.usage) == null ? void 0 : _n.prompt_tokens_details) == null ? void 0 : _o.cached_tokens) != null ? _p : void 0
428
416
  },
429
417
  providerMetadata,
430
- rawCall: { rawPrompt, rawSettings },
431
- rawResponse: { headers: responseHeaders, body: rawResponse },
432
- response: getResponseMetadata(responseBody),
433
- warnings,
434
- request: { body }
418
+ request: { body },
419
+ response: {
420
+ ...getResponseMetadata(responseBody),
421
+ headers: responseHeaders,
422
+ body: rawResponse
423
+ },
424
+ warnings
435
425
  };
436
426
  }
437
427
  async doStream(options) {
438
428
  var _a;
439
- if (this.settings.simulateStreaming) {
440
- const result = await this.doGenerate(options);
441
- const simulatedStream = new ReadableStream({
442
- start(controller) {
443
- controller.enqueue({ type: "response-metadata", ...result.response });
444
- if (result.reasoning) {
445
- if (Array.isArray(result.reasoning)) {
446
- for (const part of result.reasoning) {
447
- if (part.type === "text") {
448
- controller.enqueue({
449
- type: "reasoning",
450
- textDelta: part.text
451
- });
452
- }
453
- }
454
- } else {
455
- controller.enqueue({
456
- type: "reasoning",
457
- textDelta: result.reasoning
458
- });
459
- }
460
- }
461
- if (result.text) {
462
- controller.enqueue({
463
- type: "text-delta",
464
- textDelta: result.text
465
- });
466
- }
467
- if (result.toolCalls) {
468
- for (const toolCall of result.toolCalls) {
469
- controller.enqueue({
470
- type: "tool-call",
471
- ...toolCall
472
- });
473
- }
474
- }
475
- controller.enqueue({
476
- type: "finish",
477
- finishReason: result.finishReason,
478
- usage: result.usage,
479
- logprobs: result.logprobs,
480
- providerMetadata: result.providerMetadata
481
- });
482
- controller.close();
483
- }
484
- });
485
- return {
486
- stream: simulatedStream,
487
- rawCall: result.rawCall,
488
- rawResponse: result.rawResponse,
489
- warnings: result.warnings
490
- };
491
- }
492
- const { args, warnings } = this.getArgs({ ...options });
429
+ const { args, warnings } = await this.getArgs({ ...options });
493
430
  const body = {
494
431
  ...args,
495
432
  stream: true,
@@ -511,10 +448,9 @@ var OpenAICompatibleChatLanguageModel = class {
511
448
  abortSignal: options.abortSignal,
512
449
  fetch: this.config.fetch
513
450
  });
514
- const { messages: rawPrompt, ...rawSettings } = args;
515
451
  const toolCalls = [];
516
452
  let finishReason = "unknown";
517
- let usage = {
453
+ const usage = {
518
454
  completionTokens: void 0,
519
455
  completionTokensDetails: {
520
456
  reasoningTokens: void 0,
@@ -524,13 +460,17 @@ var OpenAICompatibleChatLanguageModel = class {
524
460
  promptTokens: void 0,
525
461
  promptTokensDetails: {
526
462
  cachedTokens: void 0
527
- }
463
+ },
464
+ totalTokens: void 0
528
465
  };
529
466
  let isFirstChunk = true;
530
- let providerOptionsName = this.providerOptionsName;
467
+ const providerOptionsName = this.providerOptionsName;
531
468
  return {
532
469
  stream: response.pipeThrough(
533
470
  new TransformStream({
471
+ start(controller) {
472
+ controller.enqueue({ type: "stream-start", warnings });
473
+ },
534
474
  // TODO we lost type safety on Chunk, most likely due to the error schema. MUST FIX
535
475
  transform(chunk, controller) {
536
476
  var _a2, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
@@ -557,11 +497,13 @@ var OpenAICompatibleChatLanguageModel = class {
557
497
  const {
558
498
  prompt_tokens,
559
499
  completion_tokens,
500
+ total_tokens,
560
501
  prompt_tokens_details,
561
502
  completion_tokens_details
562
503
  } = value.usage;
563
504
  usage.promptTokens = prompt_tokens != null ? prompt_tokens : void 0;
564
505
  usage.completionTokens = completion_tokens != null ? completion_tokens : void 0;
506
+ usage.totalTokens = total_tokens != null ? total_tokens : void 0;
565
507
  if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
566
508
  usage.completionTokensDetails.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
567
509
  }
@@ -588,13 +530,13 @@ var OpenAICompatibleChatLanguageModel = class {
588
530
  if (delta.reasoning_content != null) {
589
531
  controller.enqueue({
590
532
  type: "reasoning",
591
- textDelta: delta.reasoning_content
533
+ text: delta.reasoning_content
592
534
  });
593
535
  }
594
536
  if (delta.content != null) {
595
537
  controller.enqueue({
596
- type: "text-delta",
597
- textDelta: delta.content
538
+ type: "text",
539
+ text: delta.content
598
540
  });
599
541
  }
600
542
  if (delta.tool_calls != null) {
@@ -680,104 +622,100 @@ var OpenAICompatibleChatLanguageModel = class {
680
622
  }
681
623
  },
682
624
  flush(controller) {
683
- var _a2, _b;
625
+ var _a2, _b, _c, _d, _e;
684
626
  const providerMetadata = {
685
627
  [providerOptionsName]: {},
686
628
  ...metadataExtractor == null ? void 0 : metadataExtractor.buildMetadata()
687
629
  };
688
- if (usage.completionTokensDetails.reasoningTokens != null) {
689
- providerMetadata[providerOptionsName].reasoningTokens = usage.completionTokensDetails.reasoningTokens;
690
- }
691
630
  if (usage.completionTokensDetails.acceptedPredictionTokens != null) {
692
631
  providerMetadata[providerOptionsName].acceptedPredictionTokens = usage.completionTokensDetails.acceptedPredictionTokens;
693
632
  }
694
633
  if (usage.completionTokensDetails.rejectedPredictionTokens != null) {
695
634
  providerMetadata[providerOptionsName].rejectedPredictionTokens = usage.completionTokensDetails.rejectedPredictionTokens;
696
635
  }
697
- if (usage.promptTokensDetails.cachedTokens != null) {
698
- providerMetadata[providerOptionsName].cachedPromptTokens = usage.promptTokensDetails.cachedTokens;
699
- }
700
636
  controller.enqueue({
701
637
  type: "finish",
702
638
  finishReason,
703
639
  usage: {
704
- promptTokens: (_a2 = usage.promptTokens) != null ? _a2 : NaN,
705
- completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
640
+ inputTokens: (_a2 = usage.promptTokens) != null ? _a2 : void 0,
641
+ outputTokens: (_b = usage.completionTokens) != null ? _b : void 0,
642
+ totalTokens: (_c = usage.totalTokens) != null ? _c : void 0,
643
+ reasoningTokens: (_d = usage.completionTokensDetails.reasoningTokens) != null ? _d : void 0,
644
+ cachedInputTokens: (_e = usage.promptTokensDetails.cachedTokens) != null ? _e : void 0
706
645
  },
707
646
  providerMetadata
708
647
  });
709
648
  }
710
649
  })
711
650
  ),
712
- rawCall: { rawPrompt, rawSettings },
713
- rawResponse: { headers: responseHeaders },
714
- warnings,
715
- request: { body: JSON.stringify(body) }
651
+ request: { body },
652
+ response: { headers: responseHeaders }
716
653
  };
717
654
  }
718
655
  };
719
- var openaiCompatibleTokenUsageSchema = z2.object({
720
- prompt_tokens: z2.number().nullish(),
721
- completion_tokens: z2.number().nullish(),
722
- prompt_tokens_details: z2.object({
723
- cached_tokens: z2.number().nullish()
656
+ var openaiCompatibleTokenUsageSchema = z3.object({
657
+ prompt_tokens: z3.number().nullish(),
658
+ completion_tokens: z3.number().nullish(),
659
+ total_tokens: z3.number().nullish(),
660
+ prompt_tokens_details: z3.object({
661
+ cached_tokens: z3.number().nullish()
724
662
  }).nullish(),
725
- completion_tokens_details: z2.object({
726
- reasoning_tokens: z2.number().nullish(),
727
- accepted_prediction_tokens: z2.number().nullish(),
728
- rejected_prediction_tokens: z2.number().nullish()
663
+ completion_tokens_details: z3.object({
664
+ reasoning_tokens: z3.number().nullish(),
665
+ accepted_prediction_tokens: z3.number().nullish(),
666
+ rejected_prediction_tokens: z3.number().nullish()
729
667
  }).nullish()
730
668
  }).nullish();
731
- var OpenAICompatibleChatResponseSchema = z2.object({
732
- id: z2.string().nullish(),
733
- created: z2.number().nullish(),
734
- model: z2.string().nullish(),
735
- choices: z2.array(
736
- z2.object({
737
- message: z2.object({
738
- role: z2.literal("assistant").nullish(),
739
- content: z2.string().nullish(),
740
- reasoning_content: z2.string().nullish(),
741
- tool_calls: z2.array(
742
- z2.object({
743
- id: z2.string().nullish(),
744
- type: z2.literal("function"),
745
- function: z2.object({
746
- name: z2.string(),
747
- arguments: z2.string()
669
+ var OpenAICompatibleChatResponseSchema = z3.object({
670
+ id: z3.string().nullish(),
671
+ created: z3.number().nullish(),
672
+ model: z3.string().nullish(),
673
+ choices: z3.array(
674
+ z3.object({
675
+ message: z3.object({
676
+ role: z3.literal("assistant").nullish(),
677
+ content: z3.string().nullish(),
678
+ reasoning_content: z3.string().nullish(),
679
+ tool_calls: z3.array(
680
+ z3.object({
681
+ id: z3.string().nullish(),
682
+ type: z3.literal("function"),
683
+ function: z3.object({
684
+ name: z3.string(),
685
+ arguments: z3.string()
748
686
  })
749
687
  })
750
688
  ).nullish()
751
689
  }),
752
- finish_reason: z2.string().nullish()
690
+ finish_reason: z3.string().nullish()
753
691
  })
754
692
  ),
755
693
  usage: openaiCompatibleTokenUsageSchema
756
694
  });
757
- var createOpenAICompatibleChatChunkSchema = (errorSchema) => z2.union([
758
- z2.object({
759
- id: z2.string().nullish(),
760
- created: z2.number().nullish(),
761
- model: z2.string().nullish(),
762
- choices: z2.array(
763
- z2.object({
764
- delta: z2.object({
765
- role: z2.enum(["assistant"]).nullish(),
766
- content: z2.string().nullish(),
767
- reasoning_content: z2.string().nullish(),
768
- tool_calls: z2.array(
769
- z2.object({
770
- index: z2.number(),
771
- id: z2.string().nullish(),
772
- type: z2.literal("function").nullish(),
773
- function: z2.object({
774
- name: z2.string().nullish(),
775
- arguments: z2.string().nullish()
695
+ var createOpenAICompatibleChatChunkSchema = (errorSchema) => z3.union([
696
+ z3.object({
697
+ id: z3.string().nullish(),
698
+ created: z3.number().nullish(),
699
+ model: z3.string().nullish(),
700
+ choices: z3.array(
701
+ z3.object({
702
+ delta: z3.object({
703
+ role: z3.enum(["assistant"]).nullish(),
704
+ content: z3.string().nullish(),
705
+ reasoning_content: z3.string().nullish(),
706
+ tool_calls: z3.array(
707
+ z3.object({
708
+ index: z3.number(),
709
+ id: z3.string().nullish(),
710
+ type: z3.literal("function").nullish(),
711
+ function: z3.object({
712
+ name: z3.string().nullish(),
713
+ arguments: z3.string().nullish()
776
714
  })
777
715
  })
778
716
  ).nullish()
779
717
  }).nullish(),
780
- finish_reason: z2.string().nullish()
718
+ finish_reason: z3.string().nullish()
781
719
  })
782
720
  ),
783
721
  usage: openaiCompatibleTokenUsageSchema
@@ -786,17 +724,15 @@ var createOpenAICompatibleChatChunkSchema = (errorSchema) => z2.union([
786
724
  ]);
787
725
 
788
726
  // src/openai-compatible-completion-language-model.ts
789
- import {
790
- UnsupportedFunctionalityError as UnsupportedFunctionalityError4
791
- } from "@ai-sdk/provider";
792
727
  import {
793
728
  combineHeaders as combineHeaders2,
794
729
  createEventSourceResponseHandler as createEventSourceResponseHandler2,
795
730
  createJsonErrorResponseHandler as createJsonErrorResponseHandler2,
796
731
  createJsonResponseHandler as createJsonResponseHandler2,
732
+ parseProviderOptions as parseProviderOptions2,
797
733
  postJsonToApi as postJsonToApi2
798
734
  } from "@ai-sdk/provider-utils";
799
- import { z as z3 } from "zod";
735
+ import { z as z5 } from "zod";
800
736
 
801
737
  // src/convert-to-openai-compatible-completion-prompt.ts
802
738
  import {
@@ -805,13 +741,9 @@ import {
805
741
  } from "@ai-sdk/provider";
806
742
  function convertToOpenAICompatibleCompletionPrompt({
807
743
  prompt,
808
- inputFormat,
809
744
  user = "user",
810
745
  assistant = "assistant"
811
746
  }) {
812
- if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
813
- return { prompt: prompt[0].content[0].text };
814
- }
815
747
  let text = "";
816
748
  if (prompt[0].role === "system") {
817
749
  text += `${prompt[0].content}
@@ -833,13 +765,8 @@ function convertToOpenAICompatibleCompletionPrompt({
833
765
  case "text": {
834
766
  return part.text;
835
767
  }
836
- case "image": {
837
- throw new UnsupportedFunctionalityError3({
838
- functionality: "images"
839
- });
840
- }
841
768
  }
842
- }).join("");
769
+ }).filter(Boolean).join("");
843
770
  text += `${user}:
844
771
  ${userMessage}
845
772
 
@@ -885,15 +812,38 @@ ${user}:`]
885
812
  };
886
813
  }
887
814
 
815
+ // src/openai-compatible-completion-options.ts
816
+ import { z as z4 } from "zod";
817
+ var openaiCompatibleCompletionProviderOptions = z4.object({
818
+ /**
819
+ * Echo back the prompt in addition to the completion.
820
+ */
821
+ echo: z4.boolean().optional(),
822
+ /**
823
+ * Modify the likelihood of specified tokens appearing in the completion.
824
+ *
825
+ * Accepts a JSON object that maps tokens (specified by their token ID in
826
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
827
+ */
828
+ logitBias: z4.record(z4.string(), z4.number()).optional(),
829
+ /**
830
+ * The suffix that comes after a completion of inserted text.
831
+ */
832
+ suffix: z4.string().optional(),
833
+ /**
834
+ * A unique identifier representing your end-user, which can help providers to
835
+ * monitor and detect abuse.
836
+ */
837
+ user: z4.string().optional()
838
+ });
839
+
888
840
  // src/openai-compatible-completion-language-model.ts
889
841
  var OpenAICompatibleCompletionLanguageModel = class {
890
842
  // type inferred via constructor
891
- constructor(modelId, settings, config) {
892
- this.specificationVersion = "v1";
893
- this.defaultObjectGenerationMode = void 0;
843
+ constructor(modelId, config) {
844
+ this.specificationVersion = "v2";
894
845
  var _a;
895
846
  this.modelId = modelId;
896
- this.settings = settings;
897
847
  this.config = config;
898
848
  const errorStructure = (_a = config.errorStructure) != null ? _a : defaultOpenAICompatibleErrorStructure;
899
849
  this.chunkSchema = createOpenAICompatibleCompletionChunkSchema(
@@ -907,11 +857,13 @@ var OpenAICompatibleCompletionLanguageModel = class {
907
857
  get providerOptionsName() {
908
858
  return this.config.provider.split(".")[0].trim();
909
859
  }
910
- getArgs({
911
- mode,
912
- inputFormat,
860
+ get supportedUrls() {
861
+ var _a, _b, _c;
862
+ return (_c = (_b = (_a = this.config).supportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
863
+ }
864
+ async getArgs({
913
865
  prompt,
914
- maxTokens,
866
+ maxOutputTokens,
915
867
  temperature,
916
868
  topP,
917
869
  topK,
@@ -920,16 +872,25 @@ var OpenAICompatibleCompletionLanguageModel = class {
920
872
  stopSequences: userStopSequences,
921
873
  responseFormat,
922
874
  seed,
923
- providerMetadata
875
+ providerOptions,
876
+ tools,
877
+ toolChoice
924
878
  }) {
925
879
  var _a;
926
- const type = mode.type;
927
880
  const warnings = [];
881
+ const completionOptions = (_a = await parseProviderOptions2({
882
+ provider: this.providerOptionsName,
883
+ providerOptions,
884
+ schema: openaiCompatibleCompletionProviderOptions
885
+ })) != null ? _a : {};
928
886
  if (topK != null) {
929
- warnings.push({
930
- type: "unsupported-setting",
931
- setting: "topK"
932
- });
887
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
888
+ }
889
+ if (tools == null ? void 0 : tools.length) {
890
+ warnings.push({ type: "unsupported-setting", setting: "tools" });
891
+ }
892
+ if (toolChoice != null) {
893
+ warnings.push({ type: "unsupported-setting", setting: "toolChoice" });
933
894
  }
934
895
  if (responseFormat != null && responseFormat.type !== "text") {
935
896
  warnings.push({
@@ -938,62 +899,36 @@ var OpenAICompatibleCompletionLanguageModel = class {
938
899
  details: "JSON response format is not supported."
939
900
  });
940
901
  }
941
- const { prompt: completionPrompt, stopSequences } = convertToOpenAICompatibleCompletionPrompt({ prompt, inputFormat });
902
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompatibleCompletionPrompt({ prompt });
942
903
  const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
943
- const baseArgs = {
944
- // model id:
945
- model: this.modelId,
946
- // model specific settings:
947
- echo: this.settings.echo,
948
- logit_bias: this.settings.logitBias,
949
- suffix: this.settings.suffix,
950
- user: this.settings.user,
951
- // standardized settings:
952
- max_tokens: maxTokens,
953
- temperature,
954
- top_p: topP,
955
- frequency_penalty: frequencyPenalty,
956
- presence_penalty: presencePenalty,
957
- seed,
958
- ...providerMetadata == null ? void 0 : providerMetadata[this.providerOptionsName],
959
- // prompt:
960
- prompt: completionPrompt,
961
- // stop sequences:
962
- stop: stop.length > 0 ? stop : void 0
904
+ return {
905
+ args: {
906
+ // model id:
907
+ model: this.modelId,
908
+ // model specific settings:
909
+ echo: completionOptions.echo,
910
+ logit_bias: completionOptions.logitBias,
911
+ suffix: completionOptions.suffix,
912
+ user: completionOptions.user,
913
+ // standardized settings:
914
+ max_tokens: maxOutputTokens,
915
+ temperature,
916
+ top_p: topP,
917
+ frequency_penalty: frequencyPenalty,
918
+ presence_penalty: presencePenalty,
919
+ seed,
920
+ ...providerOptions == null ? void 0 : providerOptions[this.providerOptionsName],
921
+ // prompt:
922
+ prompt: completionPrompt,
923
+ // stop sequences:
924
+ stop: stop.length > 0 ? stop : void 0
925
+ },
926
+ warnings
963
927
  };
964
- switch (type) {
965
- case "regular": {
966
- if ((_a = mode.tools) == null ? void 0 : _a.length) {
967
- throw new UnsupportedFunctionalityError4({
968
- functionality: "tools"
969
- });
970
- }
971
- if (mode.toolChoice) {
972
- throw new UnsupportedFunctionalityError4({
973
- functionality: "toolChoice"
974
- });
975
- }
976
- return { args: baseArgs, warnings };
977
- }
978
- case "object-json": {
979
- throw new UnsupportedFunctionalityError4({
980
- functionality: "object-json mode"
981
- });
982
- }
983
- case "object-tool": {
984
- throw new UnsupportedFunctionalityError4({
985
- functionality: "object-tool mode"
986
- });
987
- }
988
- default: {
989
- const _exhaustiveCheck = type;
990
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
991
- }
992
- }
993
928
  }
994
929
  async doGenerate(options) {
995
- var _a, _b, _c, _d;
996
- const { args, warnings } = this.getArgs(options);
930
+ var _a, _b, _c, _d, _e, _f;
931
+ const { args, warnings } = await this.getArgs(options);
997
932
  const {
998
933
  responseHeaders,
999
934
  value: response,
@@ -1012,24 +947,30 @@ var OpenAICompatibleCompletionLanguageModel = class {
1012
947
  abortSignal: options.abortSignal,
1013
948
  fetch: this.config.fetch
1014
949
  });
1015
- const { prompt: rawPrompt, ...rawSettings } = args;
1016
950
  const choice = response.choices[0];
951
+ const content = [];
952
+ if (choice.text != null && choice.text.length > 0) {
953
+ content.push({ type: "text", text: choice.text });
954
+ }
1017
955
  return {
1018
- text: choice.text,
956
+ content,
1019
957
  usage: {
1020
- promptTokens: (_b = (_a = response.usage) == null ? void 0 : _a.prompt_tokens) != null ? _b : NaN,
1021
- completionTokens: (_d = (_c = response.usage) == null ? void 0 : _c.completion_tokens) != null ? _d : NaN
958
+ inputTokens: (_b = (_a = response.usage) == null ? void 0 : _a.prompt_tokens) != null ? _b : void 0,
959
+ outputTokens: (_d = (_c = response.usage) == null ? void 0 : _c.completion_tokens) != null ? _d : void 0,
960
+ totalTokens: (_f = (_e = response.usage) == null ? void 0 : _e.total_tokens) != null ? _f : void 0
1022
961
  },
1023
962
  finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
1024
- rawCall: { rawPrompt, rawSettings },
1025
- rawResponse: { headers: responseHeaders, body: rawResponse },
1026
- response: getResponseMetadata(response),
1027
- warnings,
1028
- request: { body: JSON.stringify(args) }
963
+ request: { body: args },
964
+ response: {
965
+ ...getResponseMetadata(response),
966
+ headers: responseHeaders,
967
+ body: rawResponse
968
+ },
969
+ warnings
1029
970
  };
1030
971
  }
1031
972
  async doStream(options) {
1032
- const { args, warnings } = this.getArgs(options);
973
+ const { args, warnings } = await this.getArgs(options);
1033
974
  const body = {
1034
975
  ...args,
1035
976
  stream: true,
@@ -1050,17 +991,21 @@ var OpenAICompatibleCompletionLanguageModel = class {
1050
991
  abortSignal: options.abortSignal,
1051
992
  fetch: this.config.fetch
1052
993
  });
1053
- const { prompt: rawPrompt, ...rawSettings } = args;
1054
994
  let finishReason = "unknown";
1055
- let usage = {
1056
- promptTokens: Number.NaN,
1057
- completionTokens: Number.NaN
995
+ const usage = {
996
+ inputTokens: void 0,
997
+ outputTokens: void 0,
998
+ totalTokens: void 0
1058
999
  };
1059
1000
  let isFirstChunk = true;
1060
1001
  return {
1061
1002
  stream: response.pipeThrough(
1062
1003
  new TransformStream({
1004
+ start(controller) {
1005
+ controller.enqueue({ type: "stream-start", warnings });
1006
+ },
1063
1007
  transform(chunk, controller) {
1008
+ var _a, _b, _c;
1064
1009
  if (!chunk.success) {
1065
1010
  finishReason = "error";
1066
1011
  controller.enqueue({ type: "error", error: chunk.error });
@@ -1080,10 +1025,9 @@ var OpenAICompatibleCompletionLanguageModel = class {
1080
1025
  });
1081
1026
  }
1082
1027
  if (value.usage != null) {
1083
- usage = {
1084
- promptTokens: value.usage.prompt_tokens,
1085
- completionTokens: value.usage.completion_tokens
1086
- };
1028
+ usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
1029
+ usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
1030
+ usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
1087
1031
  }
1088
1032
  const choice = value.choices[0];
1089
1033
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1093,8 +1037,8 @@ var OpenAICompatibleCompletionLanguageModel = class {
1093
1037
  }
1094
1038
  if ((choice == null ? void 0 : choice.text) != null) {
1095
1039
  controller.enqueue({
1096
- type: "text-delta",
1097
- textDelta: choice.text
1040
+ type: "text",
1041
+ text: choice.text
1098
1042
  });
1099
1043
  }
1100
1044
  },
@@ -1107,44 +1051,41 @@ var OpenAICompatibleCompletionLanguageModel = class {
1107
1051
  }
1108
1052
  })
1109
1053
  ),
1110
- rawCall: { rawPrompt, rawSettings },
1111
- rawResponse: { headers: responseHeaders },
1112
- warnings,
1113
- request: { body: JSON.stringify(body) }
1054
+ request: { body },
1055
+ response: { headers: responseHeaders }
1114
1056
  };
1115
1057
  }
1116
1058
  };
1117
- var openaiCompatibleCompletionResponseSchema = z3.object({
1118
- id: z3.string().nullish(),
1119
- created: z3.number().nullish(),
1120
- model: z3.string().nullish(),
1121
- choices: z3.array(
1122
- z3.object({
1123
- text: z3.string(),
1124
- finish_reason: z3.string()
1059
+ var usageSchema = z5.object({
1060
+ prompt_tokens: z5.number(),
1061
+ completion_tokens: z5.number(),
1062
+ total_tokens: z5.number()
1063
+ });
1064
+ var openaiCompatibleCompletionResponseSchema = z5.object({
1065
+ id: z5.string().nullish(),
1066
+ created: z5.number().nullish(),
1067
+ model: z5.string().nullish(),
1068
+ choices: z5.array(
1069
+ z5.object({
1070
+ text: z5.string(),
1071
+ finish_reason: z5.string()
1125
1072
  })
1126
1073
  ),
1127
- usage: z3.object({
1128
- prompt_tokens: z3.number(),
1129
- completion_tokens: z3.number()
1130
- }).nullish()
1074
+ usage: usageSchema.nullish()
1131
1075
  });
1132
- var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => z3.union([
1133
- z3.object({
1134
- id: z3.string().nullish(),
1135
- created: z3.number().nullish(),
1136
- model: z3.string().nullish(),
1137
- choices: z3.array(
1138
- z3.object({
1139
- text: z3.string(),
1140
- finish_reason: z3.string().nullish(),
1141
- index: z3.number()
1076
+ var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => z5.union([
1077
+ z5.object({
1078
+ id: z5.string().nullish(),
1079
+ created: z5.number().nullish(),
1080
+ model: z5.string().nullish(),
1081
+ choices: z5.array(
1082
+ z5.object({
1083
+ text: z5.string(),
1084
+ finish_reason: z5.string().nullish(),
1085
+ index: z5.number()
1142
1086
  })
1143
1087
  ),
1144
- usage: z3.object({
1145
- prompt_tokens: z3.number(),
1146
- completion_tokens: z3.number()
1147
- }).nullish()
1088
+ usage: usageSchema.nullish()
1148
1089
  }),
1149
1090
  errorSchema
1150
1091
  ]);
@@ -1157,14 +1098,31 @@ import {
1157
1098
  combineHeaders as combineHeaders3,
1158
1099
  createJsonErrorResponseHandler as createJsonErrorResponseHandler3,
1159
1100
  createJsonResponseHandler as createJsonResponseHandler3,
1101
+ parseProviderOptions as parseProviderOptions3,
1160
1102
  postJsonToApi as postJsonToApi3
1161
1103
  } from "@ai-sdk/provider-utils";
1162
- import { z as z4 } from "zod";
1104
+ import { z as z7 } from "zod";
1105
+
1106
+ // src/openai-compatible-embedding-options.ts
1107
+ import { z as z6 } from "zod";
1108
+ var openaiCompatibleEmbeddingProviderOptions = z6.object({
1109
+ /**
1110
+ * The number of dimensions the resulting output embeddings should have.
1111
+ * Only supported in text-embedding-3 and later models.
1112
+ */
1113
+ dimensions: z6.number().optional(),
1114
+ /**
1115
+ * A unique identifier representing your end-user, which can help providers to
1116
+ * monitor and detect abuse.
1117
+ */
1118
+ user: z6.string().optional()
1119
+ });
1120
+
1121
+ // src/openai-compatible-embedding-model.ts
1163
1122
  var OpenAICompatibleEmbeddingModel = class {
1164
- constructor(modelId, settings, config) {
1165
- this.specificationVersion = "v1";
1123
+ constructor(modelId, config) {
1124
+ this.specificationVersion = "v2";
1166
1125
  this.modelId = modelId;
1167
- this.settings = settings;
1168
1126
  this.config = config;
1169
1127
  }
1170
1128
  get provider() {
@@ -1178,12 +1136,28 @@ var OpenAICompatibleEmbeddingModel = class {
1178
1136
  var _a;
1179
1137
  return (_a = this.config.supportsParallelCalls) != null ? _a : true;
1180
1138
  }
1139
+ get providerOptionsName() {
1140
+ return this.config.provider.split(".")[0].trim();
1141
+ }
1181
1142
  async doEmbed({
1182
1143
  values,
1183
1144
  headers,
1184
- abortSignal
1145
+ abortSignal,
1146
+ providerOptions
1185
1147
  }) {
1186
- var _a;
1148
+ var _a, _b, _c;
1149
+ const compatibleOptions = Object.assign(
1150
+ (_a = await parseProviderOptions3({
1151
+ provider: "openai-compatible",
1152
+ providerOptions,
1153
+ schema: openaiCompatibleEmbeddingProviderOptions
1154
+ })) != null ? _a : {},
1155
+ (_b = await parseProviderOptions3({
1156
+ provider: this.providerOptionsName,
1157
+ providerOptions,
1158
+ schema: openaiCompatibleEmbeddingProviderOptions
1159
+ })) != null ? _b : {}
1160
+ );
1187
1161
  if (values.length > this.maxEmbeddingsPerCall) {
1188
1162
  throw new TooManyEmbeddingValuesForCallError({
1189
1163
  provider: this.provider,
@@ -1192,7 +1166,11 @@ var OpenAICompatibleEmbeddingModel = class {
1192
1166
  values
1193
1167
  });
1194
1168
  }
1195
- const { responseHeaders, value: response } = await postJsonToApi3({
1169
+ const {
1170
+ responseHeaders,
1171
+ value: response,
1172
+ rawValue
1173
+ } = await postJsonToApi3({
1196
1174
  url: this.config.url({
1197
1175
  path: "/embeddings",
1198
1176
  modelId: this.modelId
@@ -1202,11 +1180,11 @@ var OpenAICompatibleEmbeddingModel = class {
1202
1180
  model: this.modelId,
1203
1181
  input: values,
1204
1182
  encoding_format: "float",
1205
- dimensions: this.settings.dimensions,
1206
- user: this.settings.user
1183
+ dimensions: compatibleOptions.dimensions,
1184
+ user: compatibleOptions.user
1207
1185
  },
1208
1186
  failedResponseHandler: createJsonErrorResponseHandler3(
1209
- (_a = this.config.errorStructure) != null ? _a : defaultOpenAICompatibleErrorStructure
1187
+ (_c = this.config.errorStructure) != null ? _c : defaultOpenAICompatibleErrorStructure
1210
1188
  ),
1211
1189
  successfulResponseHandler: createJsonResponseHandler3(
1212
1190
  openaiTextEmbeddingResponseSchema
@@ -1217,13 +1195,13 @@ var OpenAICompatibleEmbeddingModel = class {
1217
1195
  return {
1218
1196
  embeddings: response.data.map((item) => item.embedding),
1219
1197
  usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1220
- rawResponse: { headers: responseHeaders }
1198
+ response: { headers: responseHeaders, body: rawValue }
1221
1199
  };
1222
1200
  }
1223
1201
  };
1224
- var openaiTextEmbeddingResponseSchema = z4.object({
1225
- data: z4.array(z4.object({ embedding: z4.array(z4.number()) })),
1226
- usage: z4.object({ prompt_tokens: z4.number() }).nullish()
1202
+ var openaiTextEmbeddingResponseSchema = z7.object({
1203
+ data: z7.array(z7.object({ embedding: z7.array(z7.number()) })),
1204
+ usage: z7.object({ prompt_tokens: z7.number() }).nullish()
1227
1205
  });
1228
1206
 
1229
1207
  // src/openai-compatible-image-model.ts
@@ -1233,17 +1211,13 @@ import {
1233
1211
  createJsonResponseHandler as createJsonResponseHandler4,
1234
1212
  postJsonToApi as postJsonToApi4
1235
1213
  } from "@ai-sdk/provider-utils";
1236
- import { z as z5 } from "zod";
1214
+ import { z as z8 } from "zod";
1237
1215
  var OpenAICompatibleImageModel = class {
1238
- constructor(modelId, settings, config) {
1216
+ constructor(modelId, config) {
1239
1217
  this.modelId = modelId;
1240
- this.settings = settings;
1241
1218
  this.config = config;
1242
- this.specificationVersion = "v1";
1243
- }
1244
- get maxImagesPerCall() {
1245
- var _a;
1246
- return (_a = this.settings.maxImagesPerCall) != null ? _a : 10;
1219
+ this.specificationVersion = "v2";
1220
+ this.maxImagesPerCall = 10;
1247
1221
  }
1248
1222
  get provider() {
1249
1223
  return this.config.provider;
@@ -1283,8 +1257,7 @@ var OpenAICompatibleImageModel = class {
1283
1257
  n,
1284
1258
  size,
1285
1259
  ...(_d = providerOptions.openai) != null ? _d : {},
1286
- response_format: "b64_json",
1287
- ...this.settings.user ? { user: this.settings.user } : {}
1260
+ response_format: "b64_json"
1288
1261
  },
1289
1262
  failedResponseHandler: createJsonErrorResponseHandler4(
1290
1263
  (_e = this.config.errorStructure) != null ? _e : defaultOpenAICompatibleErrorStructure
@@ -1306,8 +1279,8 @@ var OpenAICompatibleImageModel = class {
1306
1279
  };
1307
1280
  }
1308
1281
  };
1309
- var openaiCompatibleImageResponseSchema = z5.object({
1310
- data: z5.array(z5.object({ b64_json: z5.string() }))
1282
+ var openaiCompatibleImageResponseSchema = z8.object({
1283
+ data: z8.array(z8.object({ b64_json: z8.string() }))
1311
1284
  });
1312
1285
 
1313
1286
  // src/openai-compatible-provider.ts
@@ -1331,27 +1304,20 @@ function createOpenAICompatible(options) {
1331
1304
  headers: getHeaders,
1332
1305
  fetch: options.fetch
1333
1306
  });
1334
- const createLanguageModel = (modelId, settings = {}) => createChatModel(modelId, settings);
1335
- const createChatModel = (modelId, settings = {}) => new OpenAICompatibleChatLanguageModel(modelId, settings, {
1336
- ...getCommonModelConfig("chat"),
1337
- defaultObjectGenerationMode: "tool"
1338
- });
1339
- const createCompletionModel = (modelId, settings = {}) => new OpenAICompatibleCompletionLanguageModel(
1307
+ const createLanguageModel = (modelId) => createChatModel(modelId);
1308
+ const createChatModel = (modelId) => new OpenAICompatibleChatLanguageModel(
1340
1309
  modelId,
1341
- settings,
1342
- getCommonModelConfig("completion")
1343
- );
1344
- const createEmbeddingModel = (modelId, settings = {}) => new OpenAICompatibleEmbeddingModel(
1345
- modelId,
1346
- settings,
1347
- getCommonModelConfig("embedding")
1310
+ getCommonModelConfig("chat")
1348
1311
  );
1349
- const createImageModel = (modelId, settings = {}) => new OpenAICompatibleImageModel(
1312
+ const createCompletionModel = (modelId) => new OpenAICompatibleCompletionLanguageModel(
1350
1313
  modelId,
1351
- settings,
1352
- getCommonModelConfig("image")
1314
+ getCommonModelConfig("completion")
1353
1315
  );
1354
- const provider = (modelId, settings) => createLanguageModel(modelId, settings);
1316
+ const createEmbeddingModel = (modelId) => new OpenAICompatibleEmbeddingModel(modelId, {
1317
+ ...getCommonModelConfig("embedding")
1318
+ });
1319
+ const createImageModel = (modelId) => new OpenAICompatibleImageModel(modelId, getCommonModelConfig("image"));
1320
+ const provider = (modelId) => createLanguageModel(modelId);
1355
1321
  provider.languageModel = createLanguageModel;
1356
1322
  provider.chatModel = createChatModel;
1357
1323
  provider.completionModel = createCompletionModel;