@ai-sdk/openai-compatible 1.0.0-canary.1 → 1.0.0-canary.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -30,15 +30,14 @@ module.exports = __toCommonJS(src_exports);
30
30
 
31
31
  // src/openai-compatible-chat-language-model.ts
32
32
  var import_provider3 = require("@ai-sdk/provider");
33
- var import_provider_utils2 = require("@ai-sdk/provider-utils");
34
- var import_zod2 = require("zod");
33
+ var import_provider_utils = require("@ai-sdk/provider-utils");
34
+ var import_zod3 = require("zod");
35
35
 
36
36
  // src/convert-to-openai-compatible-chat-messages.ts
37
37
  var import_provider = require("@ai-sdk/provider");
38
- var import_provider_utils = require("@ai-sdk/provider-utils");
39
38
  function getOpenAIMetadata(message) {
40
39
  var _a, _b;
41
- return (_b = (_a = message == null ? void 0 : message.providerMetadata) == null ? void 0 : _a.openaiCompatible) != null ? _b : {};
40
+ return (_b = (_a = message == null ? void 0 : message.providerOptions) == null ? void 0 : _a.openaiCompatible) != null ? _b : {};
42
41
  }
43
42
  function convertToOpenAICompatibleChatMessages(prompt) {
44
43
  const messages = [];
@@ -61,25 +60,26 @@ function convertToOpenAICompatibleChatMessages(prompt) {
61
60
  messages.push({
62
61
  role: "user",
63
62
  content: content.map((part) => {
64
- var _a;
65
63
  const partMetadata = getOpenAIMetadata(part);
66
64
  switch (part.type) {
67
65
  case "text": {
68
66
  return { type: "text", text: part.text, ...partMetadata };
69
67
  }
70
- case "image": {
71
- return {
72
- type: "image_url",
73
- image_url: {
74
- url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${(0, import_provider_utils.convertUint8ArrayToBase64)(part.image)}`
75
- },
76
- ...partMetadata
77
- };
78
- }
79
68
  case "file": {
80
- throw new import_provider.UnsupportedFunctionalityError({
81
- functionality: "File content parts in user messages"
82
- });
69
+ if (part.mediaType.startsWith("image/")) {
70
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
71
+ return {
72
+ type: "image_url",
73
+ image_url: {
74
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`
75
+ },
76
+ ...partMetadata
77
+ };
78
+ } else {
79
+ throw new import_provider.UnsupportedFunctionalityError({
80
+ functionality: `file part media type ${part.mediaType}`
81
+ });
82
+ }
83
83
  }
84
84
  }
85
85
  }),
@@ -170,17 +170,27 @@ function mapOpenAICompatibleFinishReason(finishReason) {
170
170
  }
171
171
  }
172
172
 
173
- // src/openai-compatible-error.ts
173
+ // src/openai-compatible-chat-options.ts
174
174
  var import_zod = require("zod");
175
- var openaiCompatibleErrorDataSchema = import_zod.z.object({
176
- error: import_zod.z.object({
177
- message: import_zod.z.string(),
175
+ var openaiCompatibleProviderOptions = import_zod.z.object({
176
+ /**
177
+ * A unique identifier representing your end-user, which can help the provider to
178
+ * monitor and detect abuse.
179
+ */
180
+ user: import_zod.z.string().optional()
181
+ });
182
+
183
+ // src/openai-compatible-error.ts
184
+ var import_zod2 = require("zod");
185
+ var openaiCompatibleErrorDataSchema = import_zod2.z.object({
186
+ error: import_zod2.z.object({
187
+ message: import_zod2.z.string(),
178
188
  // The additional information below is handled loosely to support
179
189
  // OpenAI-compatible providers that have slightly different error
180
190
  // responses:
181
- type: import_zod.z.string().nullish(),
182
- param: import_zod.z.any().nullish(),
183
- code: import_zod.z.union([import_zod.z.string(), import_zod.z.number()]).nullish()
191
+ type: import_zod2.z.string().nullish(),
192
+ param: import_zod2.z.any().nullish(),
193
+ code: import_zod2.z.union([import_zod2.z.string(), import_zod2.z.number()]).nullish()
184
194
  })
185
195
  });
186
196
  var defaultOpenAICompatibleErrorStructure = {
@@ -191,16 +201,14 @@ var defaultOpenAICompatibleErrorStructure = {
191
201
  // src/openai-compatible-prepare-tools.ts
192
202
  var import_provider2 = require("@ai-sdk/provider");
193
203
  function prepareTools({
194
- mode,
195
- structuredOutputs
204
+ tools,
205
+ toolChoice
196
206
  }) {
197
- var _a;
198
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
207
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
199
208
  const toolWarnings = [];
200
209
  if (tools == null) {
201
- return { tools: void 0, tool_choice: void 0, toolWarnings };
210
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
202
211
  }
203
- const toolChoice = mode.toolChoice;
204
212
  const openaiCompatTools = [];
205
213
  for (const tool of tools) {
206
214
  if (tool.type === "provider-defined") {
@@ -217,29 +225,27 @@ function prepareTools({
217
225
  }
218
226
  }
219
227
  if (toolChoice == null) {
220
- return { tools: openaiCompatTools, tool_choice: void 0, toolWarnings };
228
+ return { tools: openaiCompatTools, toolChoice: void 0, toolWarnings };
221
229
  }
222
230
  const type = toolChoice.type;
223
231
  switch (type) {
224
232
  case "auto":
225
233
  case "none":
226
234
  case "required":
227
- return { tools: openaiCompatTools, tool_choice: type, toolWarnings };
235
+ return { tools: openaiCompatTools, toolChoice: type, toolWarnings };
228
236
  case "tool":
229
237
  return {
230
238
  tools: openaiCompatTools,
231
- tool_choice: {
239
+ toolChoice: {
232
240
  type: "function",
233
- function: {
234
- name: toolChoice.toolName
235
- }
241
+ function: { name: toolChoice.toolName }
236
242
  },
237
243
  toolWarnings
238
244
  };
239
245
  default: {
240
246
  const _exhaustiveCheck = type;
241
247
  throw new import_provider2.UnsupportedFunctionalityError({
242
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
248
+ functionality: `tool choice type: ${_exhaustiveCheck}`
243
249
  });
244
250
  }
245
251
  }
@@ -248,50 +254,59 @@ function prepareTools({
248
254
  // src/openai-compatible-chat-language-model.ts
249
255
  var OpenAICompatibleChatLanguageModel = class {
250
256
  // type inferred via constructor
251
- constructor(modelId, settings, config) {
257
+ constructor(modelId, config) {
252
258
  this.specificationVersion = "v2";
253
259
  var _a, _b;
254
260
  this.modelId = modelId;
255
- this.settings = settings;
256
261
  this.config = config;
257
262
  const errorStructure = (_a = config.errorStructure) != null ? _a : defaultOpenAICompatibleErrorStructure;
258
263
  this.chunkSchema = createOpenAICompatibleChatChunkSchema(
259
264
  errorStructure.errorSchema
260
265
  );
261
- this.failedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)(errorStructure);
266
+ this.failedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)(errorStructure);
262
267
  this.supportsStructuredOutputs = (_b = config.supportsStructuredOutputs) != null ? _b : false;
263
268
  }
264
- get defaultObjectGenerationMode() {
265
- return this.config.defaultObjectGenerationMode;
266
- }
267
269
  get provider() {
268
270
  return this.config.provider;
269
271
  }
270
272
  get providerOptionsName() {
271
273
  return this.config.provider.split(".")[0].trim();
272
274
  }
275
+ async getSupportedUrls() {
276
+ var _a, _b, _c;
277
+ return (_c = (_b = (_a = this.config).getSupportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
278
+ }
273
279
  getArgs({
274
- mode,
275
280
  prompt,
276
- maxTokens,
281
+ maxOutputTokens,
277
282
  temperature,
278
283
  topP,
279
284
  topK,
280
285
  frequencyPenalty,
281
286
  presencePenalty,
282
- providerMetadata,
287
+ providerOptions,
283
288
  stopSequences,
284
289
  responseFormat,
285
- seed
290
+ seed,
291
+ toolChoice,
292
+ tools
286
293
  }) {
287
- var _a, _b;
288
- const type = mode.type;
294
+ var _a, _b, _c;
289
295
  const warnings = [];
296
+ const compatibleOptions = Object.assign(
297
+ (_a = (0, import_provider_utils.parseProviderOptions)({
298
+ provider: "openai-compatible",
299
+ providerOptions,
300
+ schema: openaiCompatibleProviderOptions
301
+ })) != null ? _a : {},
302
+ (_b = (0, import_provider_utils.parseProviderOptions)({
303
+ provider: this.providerOptionsName,
304
+ providerOptions,
305
+ schema: openaiCompatibleProviderOptions
306
+ })) != null ? _b : {}
307
+ );
290
308
  if (topK != null) {
291
- warnings.push({
292
- type: "unsupported-setting",
293
- setting: "topK"
294
- });
309
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
295
310
  }
296
311
  if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
297
312
  warnings.push({
@@ -300,118 +315,101 @@ var OpenAICompatibleChatLanguageModel = class {
300
315
  details: "JSON response format schema is only supported with structuredOutputs"
301
316
  });
302
317
  }
303
- const baseArgs = {
304
- // model id:
305
- model: this.modelId,
306
- // model specific settings:
307
- user: this.settings.user,
308
- // standardized settings:
309
- max_tokens: maxTokens,
310
- temperature,
311
- top_p: topP,
312
- frequency_penalty: frequencyPenalty,
313
- presence_penalty: presencePenalty,
314
- response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs === true && responseFormat.schema != null ? {
315
- type: "json_schema",
316
- json_schema: {
317
- schema: responseFormat.schema,
318
- name: (_a = responseFormat.name) != null ? _a : "response",
319
- description: responseFormat.description
320
- }
321
- } : { type: "json_object" } : void 0,
322
- stop: stopSequences,
323
- seed,
324
- ...providerMetadata == null ? void 0 : providerMetadata[this.providerOptionsName],
325
- // messages:
326
- messages: convertToOpenAICompatibleChatMessages(prompt)
318
+ const {
319
+ tools: openaiTools,
320
+ toolChoice: openaiToolChoice,
321
+ toolWarnings
322
+ } = prepareTools({
323
+ tools,
324
+ toolChoice
325
+ });
326
+ return {
327
+ args: {
328
+ // model id:
329
+ model: this.modelId,
330
+ // model specific settings:
331
+ user: compatibleOptions.user,
332
+ // standardized settings:
333
+ max_tokens: maxOutputTokens,
334
+ temperature,
335
+ top_p: topP,
336
+ frequency_penalty: frequencyPenalty,
337
+ presence_penalty: presencePenalty,
338
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs === true && responseFormat.schema != null ? {
339
+ type: "json_schema",
340
+ json_schema: {
341
+ schema: responseFormat.schema,
342
+ name: (_c = responseFormat.name) != null ? _c : "response",
343
+ description: responseFormat.description
344
+ }
345
+ } : { type: "json_object" } : void 0,
346
+ stop: stopSequences,
347
+ seed,
348
+ ...providerOptions == null ? void 0 : providerOptions[this.providerOptionsName],
349
+ // messages:
350
+ messages: convertToOpenAICompatibleChatMessages(prompt),
351
+ // tools:
352
+ tools: openaiTools,
353
+ tool_choice: openaiToolChoice
354
+ },
355
+ warnings: [...warnings, ...toolWarnings]
327
356
  };
328
- switch (type) {
329
- case "regular": {
330
- const { tools, tool_choice, toolWarnings } = prepareTools({
331
- mode,
332
- structuredOutputs: this.supportsStructuredOutputs
333
- });
334
- return {
335
- args: { ...baseArgs, tools, tool_choice },
336
- warnings: [...warnings, ...toolWarnings]
337
- };
338
- }
339
- case "object-json": {
340
- return {
341
- args: {
342
- ...baseArgs,
343
- response_format: this.supportsStructuredOutputs === true && mode.schema != null ? {
344
- type: "json_schema",
345
- json_schema: {
346
- schema: mode.schema,
347
- name: (_b = mode.name) != null ? _b : "response",
348
- description: mode.description
349
- }
350
- } : { type: "json_object" }
351
- },
352
- warnings
353
- };
354
- }
355
- case "object-tool": {
356
- return {
357
- args: {
358
- ...baseArgs,
359
- tool_choice: {
360
- type: "function",
361
- function: { name: mode.tool.name }
362
- },
363
- tools: [
364
- {
365
- type: "function",
366
- function: {
367
- name: mode.tool.name,
368
- description: mode.tool.description,
369
- parameters: mode.tool.parameters
370
- }
371
- }
372
- ]
373
- },
374
- warnings
375
- };
376
- }
377
- default: {
378
- const _exhaustiveCheck = type;
379
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
380
- }
381
- }
382
357
  }
383
358
  async doGenerate(options) {
384
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
359
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
385
360
  const { args, warnings } = this.getArgs({ ...options });
386
361
  const body = JSON.stringify(args);
387
362
  const {
388
363
  responseHeaders,
389
364
  value: responseBody,
390
365
  rawValue: rawResponse
391
- } = await (0, import_provider_utils2.postJsonToApi)({
366
+ } = await (0, import_provider_utils.postJsonToApi)({
392
367
  url: this.config.url({
393
368
  path: "/chat/completions",
394
369
  modelId: this.modelId
395
370
  }),
396
- headers: (0, import_provider_utils2.combineHeaders)(this.config.headers(), options.headers),
371
+ headers: (0, import_provider_utils.combineHeaders)(this.config.headers(), options.headers),
397
372
  body: args,
398
373
  failedResponseHandler: this.failedResponseHandler,
399
- successfulResponseHandler: (0, import_provider_utils2.createJsonResponseHandler)(
374
+ successfulResponseHandler: (0, import_provider_utils.createJsonResponseHandler)(
400
375
  OpenAICompatibleChatResponseSchema
401
376
  ),
402
377
  abortSignal: options.abortSignal,
403
378
  fetch: this.config.fetch
404
379
  });
405
- const { messages: rawPrompt, ...rawSettings } = args;
406
380
  const choice = responseBody.choices[0];
381
+ const content = [];
382
+ const text = choice.message.content;
383
+ if (text != null && text.length > 0) {
384
+ content.push({ type: "text", text });
385
+ }
386
+ const reasoning = choice.message.reasoning_content;
387
+ if (reasoning != null && reasoning.length > 0) {
388
+ content.push({
389
+ type: "reasoning",
390
+ reasoningType: "text",
391
+ text: reasoning
392
+ });
393
+ }
394
+ if (choice.message.tool_calls != null) {
395
+ for (const toolCall of choice.message.tool_calls) {
396
+ content.push({
397
+ type: "tool-call",
398
+ toolCallType: "function",
399
+ toolCallId: (_a = toolCall.id) != null ? _a : (0, import_provider_utils.generateId)(),
400
+ toolName: toolCall.function.name,
401
+ args: toolCall.function.arguments
402
+ });
403
+ }
404
+ }
407
405
  const providerMetadata = {
408
406
  [this.providerOptionsName]: {},
409
- ...(_b = (_a = this.config.metadataExtractor) == null ? void 0 : _a.extractMetadata) == null ? void 0 : _b.call(_a, {
407
+ ...(_c = (_b = this.config.metadataExtractor) == null ? void 0 : _b.extractMetadata) == null ? void 0 : _c.call(_b, {
410
408
  parsedBody: rawResponse
411
409
  })
412
410
  };
413
- const completionTokenDetails = (_c = responseBody.usage) == null ? void 0 : _c.completion_tokens_details;
414
- const promptTokenDetails = (_d = responseBody.usage) == null ? void 0 : _d.prompt_tokens_details;
411
+ const completionTokenDetails = (_d = responseBody.usage) == null ? void 0 : _d.completion_tokens_details;
412
+ const promptTokenDetails = (_e = responseBody.usage) == null ? void 0 : _e.prompt_tokens_details;
415
413
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
416
414
  providerMetadata[this.providerOptionsName].reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
417
415
  }
@@ -425,106 +423,46 @@ var OpenAICompatibleChatLanguageModel = class {
425
423
  providerMetadata[this.providerOptionsName].cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
426
424
  }
427
425
  return {
428
- text: (_e = choice.message.content) != null ? _e : void 0,
429
- reasoning: (_f = choice.message.reasoning_content) != null ? _f : void 0,
430
- toolCalls: (_g = choice.message.tool_calls) == null ? void 0 : _g.map((toolCall) => {
431
- var _a2;
432
- return {
433
- toolCallType: "function",
434
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils2.generateId)(),
435
- toolName: toolCall.function.name,
436
- args: toolCall.function.arguments
437
- };
438
- }),
426
+ content,
439
427
  finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
440
428
  usage: {
441
- promptTokens: (_i = (_h = responseBody.usage) == null ? void 0 : _h.prompt_tokens) != null ? _i : NaN,
442
- completionTokens: (_k = (_j = responseBody.usage) == null ? void 0 : _j.completion_tokens) != null ? _k : NaN
429
+ inputTokens: (_g = (_f = responseBody.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
430
+ outputTokens: (_i = (_h = responseBody.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0
443
431
  },
444
432
  providerMetadata,
445
- rawCall: { rawPrompt, rawSettings },
446
- rawResponse: { headers: responseHeaders, body: rawResponse },
447
- response: getResponseMetadata(responseBody),
448
- warnings,
449
- request: { body }
433
+ request: { body },
434
+ response: {
435
+ ...getResponseMetadata(responseBody),
436
+ headers: responseHeaders,
437
+ body: rawResponse
438
+ },
439
+ warnings
450
440
  };
451
441
  }
452
442
  async doStream(options) {
453
443
  var _a;
454
- if (this.settings.simulateStreaming) {
455
- const result = await this.doGenerate(options);
456
- const simulatedStream = new ReadableStream({
457
- start(controller) {
458
- controller.enqueue({ type: "response-metadata", ...result.response });
459
- if (result.reasoning) {
460
- if (Array.isArray(result.reasoning)) {
461
- for (const part of result.reasoning) {
462
- if (part.type === "text") {
463
- controller.enqueue({
464
- type: "reasoning",
465
- textDelta: part.text
466
- });
467
- }
468
- }
469
- } else {
470
- controller.enqueue({
471
- type: "reasoning",
472
- textDelta: result.reasoning
473
- });
474
- }
475
- }
476
- if (result.text) {
477
- controller.enqueue({
478
- type: "text-delta",
479
- textDelta: result.text
480
- });
481
- }
482
- if (result.toolCalls) {
483
- for (const toolCall of result.toolCalls) {
484
- controller.enqueue({
485
- type: "tool-call",
486
- ...toolCall
487
- });
488
- }
489
- }
490
- controller.enqueue({
491
- type: "finish",
492
- finishReason: result.finishReason,
493
- usage: result.usage,
494
- logprobs: result.logprobs,
495
- providerMetadata: result.providerMetadata
496
- });
497
- controller.close();
498
- }
499
- });
500
- return {
501
- stream: simulatedStream,
502
- rawCall: result.rawCall,
503
- rawResponse: result.rawResponse,
504
- warnings: result.warnings
505
- };
506
- }
507
444
  const { args, warnings } = this.getArgs({ ...options });
508
- const body = JSON.stringify({ ...args, stream: true });
445
+ const body = {
446
+ ...args,
447
+ stream: true,
448
+ // only include stream_options when in strict compatibility mode:
449
+ stream_options: this.config.includeUsage ? { include_usage: true } : void 0
450
+ };
509
451
  const metadataExtractor = (_a = this.config.metadataExtractor) == null ? void 0 : _a.createStreamExtractor();
510
- const { responseHeaders, value: response } = await (0, import_provider_utils2.postJsonToApi)({
452
+ const { responseHeaders, value: response } = await (0, import_provider_utils.postJsonToApi)({
511
453
  url: this.config.url({
512
454
  path: "/chat/completions",
513
455
  modelId: this.modelId
514
456
  }),
515
- headers: (0, import_provider_utils2.combineHeaders)(this.config.headers(), options.headers),
516
- body: {
517
- ...args,
518
- stream: true
519
- },
457
+ headers: (0, import_provider_utils.combineHeaders)(this.config.headers(), options.headers),
458
+ body,
520
459
  failedResponseHandler: this.failedResponseHandler,
521
- successfulResponseHandler: (0, import_provider_utils2.createEventSourceResponseHandler)(
460
+ successfulResponseHandler: (0, import_provider_utils.createEventSourceResponseHandler)(
522
461
  this.chunkSchema
523
462
  ),
524
463
  abortSignal: options.abortSignal,
525
464
  fetch: this.config.fetch
526
465
  });
527
- const { messages: rawPrompt, ...rawSettings } = args;
528
466
  const toolCalls = [];
529
467
  let finishReason = "unknown";
530
468
  let usage = {
@@ -544,6 +482,9 @@ var OpenAICompatibleChatLanguageModel = class {
544
482
  return {
545
483
  stream: response.pipeThrough(
546
484
  new TransformStream({
485
+ start(controller) {
486
+ controller.enqueue({ type: "stream-start", warnings });
487
+ },
547
488
  // TODO we lost type safety on Chunk, most likely due to the error schema. MUST FIX
548
489
  transform(chunk, controller) {
549
490
  var _a2, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
@@ -601,13 +542,14 @@ var OpenAICompatibleChatLanguageModel = class {
601
542
  if (delta.reasoning_content != null) {
602
543
  controller.enqueue({
603
544
  type: "reasoning",
604
- textDelta: delta.reasoning_content
545
+ reasoningType: "text",
546
+ text: delta.reasoning_content
605
547
  });
606
548
  }
607
549
  if (delta.content != null) {
608
550
  controller.enqueue({
609
- type: "text-delta",
610
- textDelta: delta.content
551
+ type: "text",
552
+ text: delta.content
611
553
  });
612
554
  }
613
555
  if (delta.tool_calls != null) {
@@ -652,11 +594,11 @@ var OpenAICompatibleChatLanguageModel = class {
652
594
  argsTextDelta: toolCall2.function.arguments
653
595
  });
654
596
  }
655
- if ((0, import_provider_utils2.isParsableJson)(toolCall2.function.arguments)) {
597
+ if ((0, import_provider_utils.isParsableJson)(toolCall2.function.arguments)) {
656
598
  controller.enqueue({
657
599
  type: "tool-call",
658
600
  toolCallType: "function",
659
- toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils2.generateId)(),
601
+ toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils.generateId)(),
660
602
  toolName: toolCall2.function.name,
661
603
  args: toolCall2.function.arguments
662
604
  });
@@ -679,11 +621,11 @@ var OpenAICompatibleChatLanguageModel = class {
679
621
  toolName: toolCall.function.name,
680
622
  argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
681
623
  });
682
- if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils2.isParsableJson)(toolCall.function.arguments)) {
624
+ if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils.isParsableJson)(toolCall.function.arguments)) {
683
625
  controller.enqueue({
684
626
  type: "tool-call",
685
627
  toolCallType: "function",
686
- toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils2.generateId)(),
628
+ toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils.generateId)(),
687
629
  toolName: toolCall.function.name,
688
630
  args: toolCall.function.arguments
689
631
  });
@@ -714,83 +656,81 @@ var OpenAICompatibleChatLanguageModel = class {
714
656
  type: "finish",
715
657
  finishReason,
716
658
  usage: {
717
- promptTokens: (_a2 = usage.promptTokens) != null ? _a2 : NaN,
718
- completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
659
+ inputTokens: (_a2 = usage.promptTokens) != null ? _a2 : void 0,
660
+ outputTokens: (_b = usage.completionTokens) != null ? _b : void 0
719
661
  },
720
662
  providerMetadata
721
663
  });
722
664
  }
723
665
  })
724
666
  ),
725
- rawCall: { rawPrompt, rawSettings },
726
- rawResponse: { headers: responseHeaders },
727
- warnings,
728
- request: { body }
667
+ request: { body },
668
+ response: { headers: responseHeaders }
729
669
  };
730
670
  }
731
671
  };
732
- var openaiCompatibleTokenUsageSchema = import_zod2.z.object({
733
- prompt_tokens: import_zod2.z.number().nullish(),
734
- completion_tokens: import_zod2.z.number().nullish(),
735
- prompt_tokens_details: import_zod2.z.object({
736
- cached_tokens: import_zod2.z.number().nullish()
672
+ var openaiCompatibleTokenUsageSchema = import_zod3.z.object({
673
+ prompt_tokens: import_zod3.z.number().nullish(),
674
+ completion_tokens: import_zod3.z.number().nullish(),
675
+ prompt_tokens_details: import_zod3.z.object({
676
+ cached_tokens: import_zod3.z.number().nullish()
737
677
  }).nullish(),
738
- completion_tokens_details: import_zod2.z.object({
739
- reasoning_tokens: import_zod2.z.number().nullish(),
740
- accepted_prediction_tokens: import_zod2.z.number().nullish(),
741
- rejected_prediction_tokens: import_zod2.z.number().nullish()
678
+ completion_tokens_details: import_zod3.z.object({
679
+ reasoning_tokens: import_zod3.z.number().nullish(),
680
+ accepted_prediction_tokens: import_zod3.z.number().nullish(),
681
+ rejected_prediction_tokens: import_zod3.z.number().nullish()
742
682
  }).nullish()
743
683
  }).nullish();
744
- var OpenAICompatibleChatResponseSchema = import_zod2.z.object({
745
- id: import_zod2.z.string().nullish(),
746
- created: import_zod2.z.number().nullish(),
747
- model: import_zod2.z.string().nullish(),
748
- choices: import_zod2.z.array(
749
- import_zod2.z.object({
750
- message: import_zod2.z.object({
751
- role: import_zod2.z.literal("assistant").nullish(),
752
- content: import_zod2.z.string().nullish(),
753
- reasoning_content: import_zod2.z.string().nullish(),
754
- tool_calls: import_zod2.z.array(
755
- import_zod2.z.object({
756
- id: import_zod2.z.string().nullish(),
757
- type: import_zod2.z.literal("function"),
758
- function: import_zod2.z.object({
759
- name: import_zod2.z.string(),
760
- arguments: import_zod2.z.string()
684
+ var OpenAICompatibleChatResponseSchema = import_zod3.z.object({
685
+ id: import_zod3.z.string().nullish(),
686
+ created: import_zod3.z.number().nullish(),
687
+ model: import_zod3.z.string().nullish(),
688
+ choices: import_zod3.z.array(
689
+ import_zod3.z.object({
690
+ message: import_zod3.z.object({
691
+ role: import_zod3.z.literal("assistant").nullish(),
692
+ content: import_zod3.z.string().nullish(),
693
+ reasoning_content: import_zod3.z.string().nullish(),
694
+ tool_calls: import_zod3.z.array(
695
+ import_zod3.z.object({
696
+ id: import_zod3.z.string().nullish(),
697
+ type: import_zod3.z.literal("function"),
698
+ function: import_zod3.z.object({
699
+ name: import_zod3.z.string(),
700
+ arguments: import_zod3.z.string()
761
701
  })
762
702
  })
763
703
  ).nullish()
764
704
  }),
765
- finish_reason: import_zod2.z.string().nullish()
705
+ finish_reason: import_zod3.z.string().nullish()
766
706
  })
767
707
  ),
768
708
  usage: openaiCompatibleTokenUsageSchema
769
709
  });
770
- var createOpenAICompatibleChatChunkSchema = (errorSchema) => import_zod2.z.union([
771
- import_zod2.z.object({
772
- id: import_zod2.z.string().nullish(),
773
- created: import_zod2.z.number().nullish(),
774
- model: import_zod2.z.string().nullish(),
775
- choices: import_zod2.z.array(
776
- import_zod2.z.object({
777
- delta: import_zod2.z.object({
778
- role: import_zod2.z.enum(["assistant"]).nullish(),
779
- content: import_zod2.z.string().nullish(),
780
- reasoning_content: import_zod2.z.string().nullish(),
781
- tool_calls: import_zod2.z.array(
782
- import_zod2.z.object({
783
- index: import_zod2.z.number(),
784
- id: import_zod2.z.string().nullish(),
785
- type: import_zod2.z.literal("function").optional(),
786
- function: import_zod2.z.object({
787
- name: import_zod2.z.string().nullish(),
788
- arguments: import_zod2.z.string().nullish()
710
+ var createOpenAICompatibleChatChunkSchema = (errorSchema) => import_zod3.z.union([
711
+ import_zod3.z.object({
712
+ id: import_zod3.z.string().nullish(),
713
+ created: import_zod3.z.number().nullish(),
714
+ model: import_zod3.z.string().nullish(),
715
+ choices: import_zod3.z.array(
716
+ import_zod3.z.object({
717
+ delta: import_zod3.z.object({
718
+ role: import_zod3.z.enum(["assistant"]).nullish(),
719
+ content: import_zod3.z.string().nullish(),
720
+ reasoning_content: import_zod3.z.string().nullish(),
721
+ tool_calls: import_zod3.z.array(
722
+ import_zod3.z.object({
723
+ index: import_zod3.z.number(),
724
+ id: import_zod3.z.string().nullish(),
725
+ type: import_zod3.z.literal("function").nullish(),
726
+ function: import_zod3.z.object({
727
+ name: import_zod3.z.string().nullish(),
728
+ arguments: import_zod3.z.string().nullish()
789
729
  })
790
730
  })
791
731
  ).nullish()
792
732
  }).nullish(),
793
- finish_reason: import_zod2.z.string().nullish()
733
+ finish_reason: import_zod3.z.string().nullish()
794
734
  })
795
735
  ),
796
736
  usage: openaiCompatibleTokenUsageSchema
@@ -799,9 +739,8 @@ var createOpenAICompatibleChatChunkSchema = (errorSchema) => import_zod2.z.union
799
739
  ]);
800
740
 
801
741
  // src/openai-compatible-completion-language-model.ts
802
- var import_provider5 = require("@ai-sdk/provider");
803
- var import_provider_utils3 = require("@ai-sdk/provider-utils");
804
- var import_zod3 = require("zod");
742
+ var import_provider_utils2 = require("@ai-sdk/provider-utils");
743
+ var import_zod5 = require("zod");
805
744
 
806
745
  // src/convert-to-openai-compatible-completion-prompt.ts
807
746
  var import_provider4 = require("@ai-sdk/provider");
@@ -835,13 +774,8 @@ function convertToOpenAICompatibleCompletionPrompt({
835
774
  case "text": {
836
775
  return part.text;
837
776
  }
838
- case "image": {
839
- throw new import_provider4.UnsupportedFunctionalityError({
840
- functionality: "images"
841
- });
842
- }
843
777
  }
844
- }).join("");
778
+ }).filter(Boolean).join("");
845
779
  text += `${user}:
846
780
  ${userMessage}
847
781
 
@@ -887,21 +821,44 @@ ${user}:`]
887
821
  };
888
822
  }
889
823
 
824
+ // src/openai-compatible-completion-options.ts
825
+ var import_zod4 = require("zod");
826
+ var openaiCompatibleCompletionProviderOptions = import_zod4.z.object({
827
+ /**
828
+ * Echo back the prompt in addition to the completion.
829
+ */
830
+ echo: import_zod4.z.boolean().optional(),
831
+ /**
832
+ * Modify the likelihood of specified tokens appearing in the completion.
833
+ *
834
+ * Accepts a JSON object that maps tokens (specified by their token ID in
835
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
836
+ */
837
+ logitBias: import_zod4.z.record(import_zod4.z.number(), import_zod4.z.number()).optional(),
838
+ /**
839
+ * The suffix that comes after a completion of inserted text.
840
+ */
841
+ suffix: import_zod4.z.string().optional(),
842
+ /**
843
+ * A unique identifier representing your end-user, which can help providers to
844
+ * monitor and detect abuse.
845
+ */
846
+ user: import_zod4.z.string().optional()
847
+ });
848
+
890
849
  // src/openai-compatible-completion-language-model.ts
891
850
  var OpenAICompatibleCompletionLanguageModel = class {
892
851
  // type inferred via constructor
893
- constructor(modelId, settings, config) {
852
+ constructor(modelId, config) {
894
853
  this.specificationVersion = "v2";
895
- this.defaultObjectGenerationMode = void 0;
896
854
  var _a;
897
855
  this.modelId = modelId;
898
- this.settings = settings;
899
856
  this.config = config;
900
857
  const errorStructure = (_a = config.errorStructure) != null ? _a : defaultOpenAICompatibleErrorStructure;
901
858
  this.chunkSchema = createOpenAICompatibleCompletionChunkSchema(
902
859
  errorStructure.errorSchema
903
860
  );
904
- this.failedResponseHandler = (0, import_provider_utils3.createJsonErrorResponseHandler)(errorStructure);
861
+ this.failedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)(errorStructure);
905
862
  }
906
863
  get provider() {
907
864
  return this.config.provider;
@@ -909,11 +866,14 @@ var OpenAICompatibleCompletionLanguageModel = class {
909
866
  get providerOptionsName() {
910
867
  return this.config.provider.split(".")[0].trim();
911
868
  }
869
+ async getSupportedUrls() {
870
+ var _a, _b, _c;
871
+ return (_c = (_b = (_a = this.config).getSupportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
872
+ }
912
873
  getArgs({
913
- mode,
914
874
  inputFormat,
915
875
  prompt,
916
- maxTokens,
876
+ maxOutputTokens,
917
877
  temperature,
918
878
  topP,
919
879
  topK,
@@ -922,16 +882,25 @@ var OpenAICompatibleCompletionLanguageModel = class {
922
882
  stopSequences: userStopSequences,
923
883
  responseFormat,
924
884
  seed,
925
- providerMetadata
885
+ providerOptions,
886
+ tools,
887
+ toolChoice
926
888
  }) {
927
889
  var _a;
928
- const type = mode.type;
929
890
  const warnings = [];
891
+ const completionOptions = (_a = (0, import_provider_utils2.parseProviderOptions)({
892
+ provider: this.providerOptionsName,
893
+ providerOptions,
894
+ schema: openaiCompatibleCompletionProviderOptions
895
+ })) != null ? _a : {};
930
896
  if (topK != null) {
931
- warnings.push({
932
- type: "unsupported-setting",
933
- setting: "topK"
934
- });
897
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
898
+ }
899
+ if (tools == null ? void 0 : tools.length) {
900
+ warnings.push({ type: "unsupported-setting", setting: "tools" });
901
+ }
902
+ if (toolChoice != null) {
903
+ warnings.push({ type: "unsupported-setting", setting: "toolChoice" });
935
904
  }
936
905
  if (responseFormat != null && responseFormat.type !== "text") {
937
906
  warnings.push({
@@ -942,56 +911,30 @@ var OpenAICompatibleCompletionLanguageModel = class {
942
911
  }
943
912
  const { prompt: completionPrompt, stopSequences } = convertToOpenAICompatibleCompletionPrompt({ prompt, inputFormat });
944
913
  const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
945
- const baseArgs = {
946
- // model id:
947
- model: this.modelId,
948
- // model specific settings:
949
- echo: this.settings.echo,
950
- logit_bias: this.settings.logitBias,
951
- suffix: this.settings.suffix,
952
- user: this.settings.user,
953
- // standardized settings:
954
- max_tokens: maxTokens,
955
- temperature,
956
- top_p: topP,
957
- frequency_penalty: frequencyPenalty,
958
- presence_penalty: presencePenalty,
959
- seed,
960
- ...providerMetadata == null ? void 0 : providerMetadata[this.providerOptionsName],
961
- // prompt:
962
- prompt: completionPrompt,
963
- // stop sequences:
964
- stop: stop.length > 0 ? stop : void 0
914
+ return {
915
+ args: {
916
+ // model id:
917
+ model: this.modelId,
918
+ // model specific settings:
919
+ echo: completionOptions.echo,
920
+ logit_bias: completionOptions.logitBias,
921
+ suffix: completionOptions.suffix,
922
+ user: completionOptions.user,
923
+ // standardized settings:
924
+ max_tokens: maxOutputTokens,
925
+ temperature,
926
+ top_p: topP,
927
+ frequency_penalty: frequencyPenalty,
928
+ presence_penalty: presencePenalty,
929
+ seed,
930
+ ...providerOptions == null ? void 0 : providerOptions[this.providerOptionsName],
931
+ // prompt:
932
+ prompt: completionPrompt,
933
+ // stop sequences:
934
+ stop: stop.length > 0 ? stop : void 0
935
+ },
936
+ warnings
965
937
  };
966
- switch (type) {
967
- case "regular": {
968
- if ((_a = mode.tools) == null ? void 0 : _a.length) {
969
- throw new import_provider5.UnsupportedFunctionalityError({
970
- functionality: "tools"
971
- });
972
- }
973
- if (mode.toolChoice) {
974
- throw new import_provider5.UnsupportedFunctionalityError({
975
- functionality: "toolChoice"
976
- });
977
- }
978
- return { args: baseArgs, warnings };
979
- }
980
- case "object-json": {
981
- throw new import_provider5.UnsupportedFunctionalityError({
982
- functionality: "object-json mode"
983
- });
984
- }
985
- case "object-tool": {
986
- throw new import_provider5.UnsupportedFunctionalityError({
987
- functionality: "object-tool mode"
988
- });
989
- }
990
- default: {
991
- const _exhaustiveCheck = type;
992
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
993
- }
994
- }
995
938
  }
996
939
  async doGenerate(options) {
997
940
  var _a, _b, _c, _d;
@@ -1000,67 +943,77 @@ var OpenAICompatibleCompletionLanguageModel = class {
1000
943
  responseHeaders,
1001
944
  value: response,
1002
945
  rawValue: rawResponse
1003
- } = await (0, import_provider_utils3.postJsonToApi)({
946
+ } = await (0, import_provider_utils2.postJsonToApi)({
1004
947
  url: this.config.url({
1005
948
  path: "/completions",
1006
949
  modelId: this.modelId
1007
950
  }),
1008
- headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
951
+ headers: (0, import_provider_utils2.combineHeaders)(this.config.headers(), options.headers),
1009
952
  body: args,
1010
953
  failedResponseHandler: this.failedResponseHandler,
1011
- successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
954
+ successfulResponseHandler: (0, import_provider_utils2.createJsonResponseHandler)(
1012
955
  openaiCompatibleCompletionResponseSchema
1013
956
  ),
1014
957
  abortSignal: options.abortSignal,
1015
958
  fetch: this.config.fetch
1016
959
  });
1017
- const { prompt: rawPrompt, ...rawSettings } = args;
1018
960
  const choice = response.choices[0];
961
+ const content = [];
962
+ if (choice.text != null && choice.text.length > 0) {
963
+ content.push({ type: "text", text: choice.text });
964
+ }
1019
965
  return {
1020
- text: choice.text,
966
+ content,
1021
967
  usage: {
1022
- promptTokens: (_b = (_a = response.usage) == null ? void 0 : _a.prompt_tokens) != null ? _b : NaN,
1023
- completionTokens: (_d = (_c = response.usage) == null ? void 0 : _c.completion_tokens) != null ? _d : NaN
968
+ inputTokens: (_b = (_a = response.usage) == null ? void 0 : _a.prompt_tokens) != null ? _b : void 0,
969
+ outputTokens: (_d = (_c = response.usage) == null ? void 0 : _c.completion_tokens) != null ? _d : void 0
1024
970
  },
1025
971
  finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
1026
- rawCall: { rawPrompt, rawSettings },
1027
- rawResponse: { headers: responseHeaders, body: rawResponse },
1028
- response: getResponseMetadata(response),
1029
- warnings,
1030
- request: { body: JSON.stringify(args) }
972
+ request: { body: args },
973
+ response: {
974
+ ...getResponseMetadata(response),
975
+ headers: responseHeaders,
976
+ body: rawResponse
977
+ },
978
+ warnings
1031
979
  };
1032
980
  }
1033
981
  async doStream(options) {
1034
982
  const { args, warnings } = this.getArgs(options);
1035
983
  const body = {
1036
984
  ...args,
1037
- stream: true
985
+ stream: true,
986
+ // only include stream_options when in strict compatibility mode:
987
+ stream_options: this.config.includeUsage ? { include_usage: true } : void 0
1038
988
  };
1039
- const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
989
+ const { responseHeaders, value: response } = await (0, import_provider_utils2.postJsonToApi)({
1040
990
  url: this.config.url({
1041
991
  path: "/completions",
1042
992
  modelId: this.modelId
1043
993
  }),
1044
- headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
994
+ headers: (0, import_provider_utils2.combineHeaders)(this.config.headers(), options.headers),
1045
995
  body,
1046
996
  failedResponseHandler: this.failedResponseHandler,
1047
- successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(
997
+ successfulResponseHandler: (0, import_provider_utils2.createEventSourceResponseHandler)(
1048
998
  this.chunkSchema
1049
999
  ),
1050
1000
  abortSignal: options.abortSignal,
1051
1001
  fetch: this.config.fetch
1052
1002
  });
1053
- const { prompt: rawPrompt, ...rawSettings } = args;
1054
1003
  let finishReason = "unknown";
1055
- let usage = {
1056
- promptTokens: Number.NaN,
1057
- completionTokens: Number.NaN
1004
+ const usage = {
1005
+ inputTokens: void 0,
1006
+ outputTokens: void 0
1058
1007
  };
1059
1008
  let isFirstChunk = true;
1060
1009
  return {
1061
1010
  stream: response.pipeThrough(
1062
1011
  new TransformStream({
1012
+ start(controller) {
1013
+ controller.enqueue({ type: "stream-start", warnings });
1014
+ },
1063
1015
  transform(chunk, controller) {
1016
+ var _a, _b;
1064
1017
  if (!chunk.success) {
1065
1018
  finishReason = "error";
1066
1019
  controller.enqueue({ type: "error", error: chunk.error });
@@ -1080,10 +1033,8 @@ var OpenAICompatibleCompletionLanguageModel = class {
1080
1033
  });
1081
1034
  }
1082
1035
  if (value.usage != null) {
1083
- usage = {
1084
- promptTokens: value.usage.prompt_tokens,
1085
- completionTokens: value.usage.completion_tokens
1086
- };
1036
+ usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
1037
+ usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
1087
1038
  }
1088
1039
  const choice = value.choices[0];
1089
1040
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1093,8 +1044,8 @@ var OpenAICompatibleCompletionLanguageModel = class {
1093
1044
  }
1094
1045
  if ((choice == null ? void 0 : choice.text) != null) {
1095
1046
  controller.enqueue({
1096
- type: "text-delta",
1097
- textDelta: choice.text
1047
+ type: "text",
1048
+ text: choice.text
1098
1049
  });
1099
1050
  }
1100
1051
  },
@@ -1107,57 +1058,71 @@ var OpenAICompatibleCompletionLanguageModel = class {
1107
1058
  }
1108
1059
  })
1109
1060
  ),
1110
- rawCall: { rawPrompt, rawSettings },
1111
- rawResponse: { headers: responseHeaders },
1112
- warnings,
1113
- request: { body: JSON.stringify(body) }
1061
+ request: { body },
1062
+ response: { headers: responseHeaders }
1114
1063
  };
1115
1064
  }
1116
1065
  };
1117
- var openaiCompatibleCompletionResponseSchema = import_zod3.z.object({
1118
- id: import_zod3.z.string().nullish(),
1119
- created: import_zod3.z.number().nullish(),
1120
- model: import_zod3.z.string().nullish(),
1121
- choices: import_zod3.z.array(
1122
- import_zod3.z.object({
1123
- text: import_zod3.z.string(),
1124
- finish_reason: import_zod3.z.string()
1066
+ var openaiCompatibleCompletionResponseSchema = import_zod5.z.object({
1067
+ id: import_zod5.z.string().nullish(),
1068
+ created: import_zod5.z.number().nullish(),
1069
+ model: import_zod5.z.string().nullish(),
1070
+ choices: import_zod5.z.array(
1071
+ import_zod5.z.object({
1072
+ text: import_zod5.z.string(),
1073
+ finish_reason: import_zod5.z.string()
1125
1074
  })
1126
1075
  ),
1127
- usage: import_zod3.z.object({
1128
- prompt_tokens: import_zod3.z.number(),
1129
- completion_tokens: import_zod3.z.number()
1076
+ usage: import_zod5.z.object({
1077
+ prompt_tokens: import_zod5.z.number(),
1078
+ completion_tokens: import_zod5.z.number()
1130
1079
  }).nullish()
1131
1080
  });
1132
- var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => import_zod3.z.union([
1133
- import_zod3.z.object({
1134
- id: import_zod3.z.string().nullish(),
1135
- created: import_zod3.z.number().nullish(),
1136
- model: import_zod3.z.string().nullish(),
1137
- choices: import_zod3.z.array(
1138
- import_zod3.z.object({
1139
- text: import_zod3.z.string(),
1140
- finish_reason: import_zod3.z.string().nullish(),
1141
- index: import_zod3.z.number()
1081
+ var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => import_zod5.z.union([
1082
+ import_zod5.z.object({
1083
+ id: import_zod5.z.string().nullish(),
1084
+ created: import_zod5.z.number().nullish(),
1085
+ model: import_zod5.z.string().nullish(),
1086
+ choices: import_zod5.z.array(
1087
+ import_zod5.z.object({
1088
+ text: import_zod5.z.string(),
1089
+ finish_reason: import_zod5.z.string().nullish(),
1090
+ index: import_zod5.z.number()
1142
1091
  })
1143
1092
  ),
1144
- usage: import_zod3.z.object({
1145
- prompt_tokens: import_zod3.z.number(),
1146
- completion_tokens: import_zod3.z.number()
1093
+ usage: import_zod5.z.object({
1094
+ prompt_tokens: import_zod5.z.number(),
1095
+ completion_tokens: import_zod5.z.number()
1147
1096
  }).nullish()
1148
1097
  }),
1149
1098
  errorSchema
1150
1099
  ]);
1151
1100
 
1152
1101
  // src/openai-compatible-embedding-model.ts
1153
- var import_provider6 = require("@ai-sdk/provider");
1154
- var import_provider_utils4 = require("@ai-sdk/provider-utils");
1155
- var import_zod4 = require("zod");
1102
+ var import_provider5 = require("@ai-sdk/provider");
1103
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
1104
+ var import_zod7 = require("zod");
1105
+
1106
+ // src/openai-compatible-embedding-options.ts
1107
+ var import_zod6 = require("zod");
1108
+ var openaiCompatibleEmbeddingProviderOptions = import_zod6.z.object({
1109
+ /**
1110
+ * The number of dimensions the resulting output embeddings should have.
1111
+ * Only supported in text-embedding-3 and later models.
1112
+ */
1113
+ dimensions: import_zod6.z.number().optional(),
1114
+ /**
1115
+ * A unique identifier representing your end-user, which can help providers to
1116
+ * monitor and detect abuse.
1117
+ */
1118
+ user: import_zod6.z.string().optional()
1119
+ });
1120
+
1121
+ // src/openai-compatible-embedding-model.ts
1156
1122
  var OpenAICompatibleEmbeddingModel = class {
1157
- constructor(modelId, settings, config) {
1158
- this.specificationVersion = "v1";
1123
+ constructor(modelId, config) {
1124
+ this.specificationVersion = "v2";
1159
1125
  this.modelId = modelId;
1160
- this.settings = settings;
1161
1126
  this.config = config;
1162
1127
  }
1163
1128
  get provider() {
@@ -1171,37 +1136,57 @@ var OpenAICompatibleEmbeddingModel = class {
1171
1136
  var _a;
1172
1137
  return (_a = this.config.supportsParallelCalls) != null ? _a : true;
1173
1138
  }
1139
+ get providerOptionsName() {
1140
+ return this.config.provider.split(".")[0].trim();
1141
+ }
1174
1142
  async doEmbed({
1175
1143
  values,
1176
1144
  headers,
1177
- abortSignal
1145
+ abortSignal,
1146
+ providerOptions
1178
1147
  }) {
1179
- var _a;
1148
+ var _a, _b, _c;
1149
+ const compatibleOptions = Object.assign(
1150
+ (_a = (0, import_provider_utils3.parseProviderOptions)({
1151
+ provider: "openai-compatible",
1152
+ providerOptions,
1153
+ schema: openaiCompatibleEmbeddingProviderOptions
1154
+ })) != null ? _a : {},
1155
+ (_b = (0, import_provider_utils3.parseProviderOptions)({
1156
+ provider: this.providerOptionsName,
1157
+ providerOptions,
1158
+ schema: openaiCompatibleEmbeddingProviderOptions
1159
+ })) != null ? _b : {}
1160
+ );
1180
1161
  if (values.length > this.maxEmbeddingsPerCall) {
1181
- throw new import_provider6.TooManyEmbeddingValuesForCallError({
1162
+ throw new import_provider5.TooManyEmbeddingValuesForCallError({
1182
1163
  provider: this.provider,
1183
1164
  modelId: this.modelId,
1184
1165
  maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
1185
1166
  values
1186
1167
  });
1187
1168
  }
1188
- const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
1169
+ const {
1170
+ responseHeaders,
1171
+ value: response,
1172
+ rawValue
1173
+ } = await (0, import_provider_utils3.postJsonToApi)({
1189
1174
  url: this.config.url({
1190
1175
  path: "/embeddings",
1191
1176
  modelId: this.modelId
1192
1177
  }),
1193
- headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), headers),
1178
+ headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), headers),
1194
1179
  body: {
1195
1180
  model: this.modelId,
1196
1181
  input: values,
1197
1182
  encoding_format: "float",
1198
- dimensions: this.settings.dimensions,
1199
- user: this.settings.user
1183
+ dimensions: compatibleOptions.dimensions,
1184
+ user: compatibleOptions.user
1200
1185
  },
1201
- failedResponseHandler: (0, import_provider_utils4.createJsonErrorResponseHandler)(
1202
- (_a = this.config.errorStructure) != null ? _a : defaultOpenAICompatibleErrorStructure
1186
+ failedResponseHandler: (0, import_provider_utils3.createJsonErrorResponseHandler)(
1187
+ (_c = this.config.errorStructure) != null ? _c : defaultOpenAICompatibleErrorStructure
1203
1188
  ),
1204
- successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
1189
+ successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
1205
1190
  openaiTextEmbeddingResponseSchema
1206
1191
  ),
1207
1192
  abortSignal,
@@ -1210,18 +1195,18 @@ var OpenAICompatibleEmbeddingModel = class {
1210
1195
  return {
1211
1196
  embeddings: response.data.map((item) => item.embedding),
1212
1197
  usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1213
- rawResponse: { headers: responseHeaders }
1198
+ response: { headers: responseHeaders, body: rawValue }
1214
1199
  };
1215
1200
  }
1216
1201
  };
1217
- var openaiTextEmbeddingResponseSchema = import_zod4.z.object({
1218
- data: import_zod4.z.array(import_zod4.z.object({ embedding: import_zod4.z.array(import_zod4.z.number()) })),
1219
- usage: import_zod4.z.object({ prompt_tokens: import_zod4.z.number() }).nullish()
1202
+ var openaiTextEmbeddingResponseSchema = import_zod7.z.object({
1203
+ data: import_zod7.z.array(import_zod7.z.object({ embedding: import_zod7.z.array(import_zod7.z.number()) })),
1204
+ usage: import_zod7.z.object({ prompt_tokens: import_zod7.z.number() }).nullish()
1220
1205
  });
1221
1206
 
1222
1207
  // src/openai-compatible-image-model.ts
1223
- var import_provider_utils5 = require("@ai-sdk/provider-utils");
1224
- var import_zod5 = require("zod");
1208
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
1209
+ var import_zod8 = require("zod");
1225
1210
  var OpenAICompatibleImageModel = class {
1226
1211
  constructor(modelId, settings, config) {
1227
1212
  this.modelId = modelId;
@@ -1259,12 +1244,12 @@ var OpenAICompatibleImageModel = class {
1259
1244
  warnings.push({ type: "unsupported-setting", setting: "seed" });
1260
1245
  }
1261
1246
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1262
- const { value: response, responseHeaders } = await (0, import_provider_utils5.postJsonToApi)({
1247
+ const { value: response, responseHeaders } = await (0, import_provider_utils4.postJsonToApi)({
1263
1248
  url: this.config.url({
1264
1249
  path: "/images/generations",
1265
1250
  modelId: this.modelId
1266
1251
  }),
1267
- headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), headers),
1252
+ headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), headers),
1268
1253
  body: {
1269
1254
  model: this.modelId,
1270
1255
  prompt,
@@ -1274,10 +1259,10 @@ var OpenAICompatibleImageModel = class {
1274
1259
  response_format: "b64_json",
1275
1260
  ...this.settings.user ? { user: this.settings.user } : {}
1276
1261
  },
1277
- failedResponseHandler: (0, import_provider_utils5.createJsonErrorResponseHandler)(
1262
+ failedResponseHandler: (0, import_provider_utils4.createJsonErrorResponseHandler)(
1278
1263
  (_e = this.config.errorStructure) != null ? _e : defaultOpenAICompatibleErrorStructure
1279
1264
  ),
1280
- successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
1265
+ successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
1281
1266
  openaiCompatibleImageResponseSchema
1282
1267
  ),
1283
1268
  abortSignal,
@@ -1294,14 +1279,14 @@ var OpenAICompatibleImageModel = class {
1294
1279
  };
1295
1280
  }
1296
1281
  };
1297
- var openaiCompatibleImageResponseSchema = import_zod5.z.object({
1298
- data: import_zod5.z.array(import_zod5.z.object({ b64_json: import_zod5.z.string() }))
1282
+ var openaiCompatibleImageResponseSchema = import_zod8.z.object({
1283
+ data: import_zod8.z.array(import_zod8.z.object({ b64_json: import_zod8.z.string() }))
1299
1284
  });
1300
1285
 
1301
1286
  // src/openai-compatible-provider.ts
1302
- var import_provider_utils6 = require("@ai-sdk/provider-utils");
1287
+ var import_provider_utils5 = require("@ai-sdk/provider-utils");
1303
1288
  function createOpenAICompatible(options) {
1304
- const baseURL = (0, import_provider_utils6.withoutTrailingSlash)(options.baseURL);
1289
+ const baseURL = (0, import_provider_utils5.withoutTrailingSlash)(options.baseURL);
1305
1290
  const providerName = options.name;
1306
1291
  const getHeaders = () => ({
1307
1292
  ...options.apiKey && { Authorization: `Bearer ${options.apiKey}` },
@@ -1319,27 +1304,24 @@ function createOpenAICompatible(options) {
1319
1304
  headers: getHeaders,
1320
1305
  fetch: options.fetch
1321
1306
  });
1322
- const createLanguageModel = (modelId, settings = {}) => createChatModel(modelId, settings);
1323
- const createChatModel = (modelId, settings = {}) => new OpenAICompatibleChatLanguageModel(modelId, settings, {
1324
- ...getCommonModelConfig("chat"),
1325
- defaultObjectGenerationMode: "tool"
1326
- });
1327
- const createCompletionModel = (modelId, settings = {}) => new OpenAICompatibleCompletionLanguageModel(
1307
+ const createLanguageModel = (modelId) => createChatModel(modelId);
1308
+ const createChatModel = (modelId) => new OpenAICompatibleChatLanguageModel(
1328
1309
  modelId,
1329
- settings,
1330
- getCommonModelConfig("completion")
1310
+ getCommonModelConfig("chat")
1331
1311
  );
1332
- const createEmbeddingModel = (modelId, settings = {}) => new OpenAICompatibleEmbeddingModel(
1312
+ const createCompletionModel = (modelId) => new OpenAICompatibleCompletionLanguageModel(
1333
1313
  modelId,
1334
- settings,
1335
- getCommonModelConfig("embedding")
1314
+ getCommonModelConfig("completion")
1336
1315
  );
1316
+ const createEmbeddingModel = (modelId) => new OpenAICompatibleEmbeddingModel(modelId, {
1317
+ ...getCommonModelConfig("embedding")
1318
+ });
1337
1319
  const createImageModel = (modelId, settings = {}) => new OpenAICompatibleImageModel(
1338
1320
  modelId,
1339
1321
  settings,
1340
1322
  getCommonModelConfig("image")
1341
1323
  );
1342
- const provider = (modelId, settings) => createLanguageModel(modelId, settings);
1324
+ const provider = (modelId) => createLanguageModel(modelId);
1343
1325
  provider.languageModel = createLanguageModel;
1344
1326
  provider.chatModel = createChatModel;
1345
1327
  provider.completionModel = createCompletionModel;