@ai-sdk/openai-compatible 1.0.0-canary.1 → 1.0.0-canary.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -9,18 +9,18 @@ import {
9
9
  createJsonResponseHandler,
10
10
  generateId,
11
11
  isParsableJson,
12
+ parseProviderOptions,
12
13
  postJsonToApi
13
14
  } from "@ai-sdk/provider-utils";
14
- import { z as z2 } from "zod";
15
+ import { z as z3 } from "zod";
15
16
 
16
17
  // src/convert-to-openai-compatible-chat-messages.ts
17
18
  import {
18
19
  UnsupportedFunctionalityError
19
20
  } from "@ai-sdk/provider";
20
- import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils";
21
21
  function getOpenAIMetadata(message) {
22
22
  var _a, _b;
23
- return (_b = (_a = message == null ? void 0 : message.providerMetadata) == null ? void 0 : _a.openaiCompatible) != null ? _b : {};
23
+ return (_b = (_a = message == null ? void 0 : message.providerOptions) == null ? void 0 : _a.openaiCompatible) != null ? _b : {};
24
24
  }
25
25
  function convertToOpenAICompatibleChatMessages(prompt) {
26
26
  const messages = [];
@@ -43,25 +43,26 @@ function convertToOpenAICompatibleChatMessages(prompt) {
43
43
  messages.push({
44
44
  role: "user",
45
45
  content: content.map((part) => {
46
- var _a;
47
46
  const partMetadata = getOpenAIMetadata(part);
48
47
  switch (part.type) {
49
48
  case "text": {
50
49
  return { type: "text", text: part.text, ...partMetadata };
51
50
  }
52
- case "image": {
53
- return {
54
- type: "image_url",
55
- image_url: {
56
- url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase64(part.image)}`
57
- },
58
- ...partMetadata
59
- };
60
- }
61
51
  case "file": {
62
- throw new UnsupportedFunctionalityError({
63
- functionality: "File content parts in user messages"
64
- });
52
+ if (part.mediaType.startsWith("image/")) {
53
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
54
+ return {
55
+ type: "image_url",
56
+ image_url: {
57
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`
58
+ },
59
+ ...partMetadata
60
+ };
61
+ } else {
62
+ throw new UnsupportedFunctionalityError({
63
+ functionality: `file part media type ${part.mediaType}`
64
+ });
65
+ }
65
66
  }
66
67
  }
67
68
  }),
@@ -152,17 +153,31 @@ function mapOpenAICompatibleFinishReason(finishReason) {
152
153
  }
153
154
  }
154
155
 
155
- // src/openai-compatible-error.ts
156
+ // src/openai-compatible-chat-options.ts
156
157
  import { z } from "zod";
157
- var openaiCompatibleErrorDataSchema = z.object({
158
- error: z.object({
159
- message: z.string(),
158
+ var openaiCompatibleProviderOptions = z.object({
159
+ /**
160
+ * A unique identifier representing your end-user, which can help the provider to
161
+ * monitor and detect abuse.
162
+ */
163
+ user: z.string().optional(),
164
+ /**
165
+ * Reasoning effort for reasoning models. Defaults to `medium`.
166
+ */
167
+ reasoningEffort: z.enum(["low", "medium", "high"]).optional()
168
+ });
169
+
170
+ // src/openai-compatible-error.ts
171
+ import { z as z2 } from "zod";
172
+ var openaiCompatibleErrorDataSchema = z2.object({
173
+ error: z2.object({
174
+ message: z2.string(),
160
175
  // The additional information below is handled loosely to support
161
176
  // OpenAI-compatible providers that have slightly different error
162
177
  // responses:
163
- type: z.string().nullish(),
164
- param: z.any().nullish(),
165
- code: z.union([z.string(), z.number()]).nullish()
178
+ type: z2.string().nullish(),
179
+ param: z2.any().nullish(),
180
+ code: z2.union([z2.string(), z2.number()]).nullish()
166
181
  })
167
182
  });
168
183
  var defaultOpenAICompatibleErrorStructure = {
@@ -175,16 +190,14 @@ import {
175
190
  UnsupportedFunctionalityError as UnsupportedFunctionalityError2
176
191
  } from "@ai-sdk/provider";
177
192
  function prepareTools({
178
- mode,
179
- structuredOutputs
193
+ tools,
194
+ toolChoice
180
195
  }) {
181
- var _a;
182
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
196
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
183
197
  const toolWarnings = [];
184
198
  if (tools == null) {
185
- return { tools: void 0, tool_choice: void 0, toolWarnings };
199
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
186
200
  }
187
- const toolChoice = mode.toolChoice;
188
201
  const openaiCompatTools = [];
189
202
  for (const tool of tools) {
190
203
  if (tool.type === "provider-defined") {
@@ -201,29 +214,27 @@ function prepareTools({
201
214
  }
202
215
  }
203
216
  if (toolChoice == null) {
204
- return { tools: openaiCompatTools, tool_choice: void 0, toolWarnings };
217
+ return { tools: openaiCompatTools, toolChoice: void 0, toolWarnings };
205
218
  }
206
219
  const type = toolChoice.type;
207
220
  switch (type) {
208
221
  case "auto":
209
222
  case "none":
210
223
  case "required":
211
- return { tools: openaiCompatTools, tool_choice: type, toolWarnings };
224
+ return { tools: openaiCompatTools, toolChoice: type, toolWarnings };
212
225
  case "tool":
213
226
  return {
214
227
  tools: openaiCompatTools,
215
- tool_choice: {
228
+ toolChoice: {
216
229
  type: "function",
217
- function: {
218
- name: toolChoice.toolName
219
- }
230
+ function: { name: toolChoice.toolName }
220
231
  },
221
232
  toolWarnings
222
233
  };
223
234
  default: {
224
235
  const _exhaustiveCheck = type;
225
236
  throw new UnsupportedFunctionalityError2({
226
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
237
+ functionality: `tool choice type: ${_exhaustiveCheck}`
227
238
  });
228
239
  }
229
240
  }
@@ -232,11 +243,10 @@ function prepareTools({
232
243
  // src/openai-compatible-chat-language-model.ts
233
244
  var OpenAICompatibleChatLanguageModel = class {
234
245
  // type inferred via constructor
235
- constructor(modelId, settings, config) {
246
+ constructor(modelId, config) {
236
247
  this.specificationVersion = "v2";
237
248
  var _a, _b;
238
249
  this.modelId = modelId;
239
- this.settings = settings;
240
250
  this.config = config;
241
251
  const errorStructure = (_a = config.errorStructure) != null ? _a : defaultOpenAICompatibleErrorStructure;
242
252
  this.chunkSchema = createOpenAICompatibleChatChunkSchema(
@@ -245,37 +255,47 @@ var OpenAICompatibleChatLanguageModel = class {
245
255
  this.failedResponseHandler = createJsonErrorResponseHandler(errorStructure);
246
256
  this.supportsStructuredOutputs = (_b = config.supportsStructuredOutputs) != null ? _b : false;
247
257
  }
248
- get defaultObjectGenerationMode() {
249
- return this.config.defaultObjectGenerationMode;
250
- }
251
258
  get provider() {
252
259
  return this.config.provider;
253
260
  }
254
261
  get providerOptionsName() {
255
262
  return this.config.provider.split(".")[0].trim();
256
263
  }
257
- getArgs({
258
- mode,
264
+ async getSupportedUrls() {
265
+ var _a, _b, _c;
266
+ return (_c = (_b = (_a = this.config).getSupportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
267
+ }
268
+ async getArgs({
259
269
  prompt,
260
- maxTokens,
270
+ maxOutputTokens,
261
271
  temperature,
262
272
  topP,
263
273
  topK,
264
274
  frequencyPenalty,
265
275
  presencePenalty,
266
- providerMetadata,
276
+ providerOptions,
267
277
  stopSequences,
268
278
  responseFormat,
269
- seed
279
+ seed,
280
+ toolChoice,
281
+ tools
270
282
  }) {
271
- var _a, _b;
272
- const type = mode.type;
283
+ var _a, _b, _c;
273
284
  const warnings = [];
285
+ const compatibleOptions = Object.assign(
286
+ (_a = await parseProviderOptions({
287
+ provider: "openai-compatible",
288
+ providerOptions,
289
+ schema: openaiCompatibleProviderOptions
290
+ })) != null ? _a : {},
291
+ (_b = await parseProviderOptions({
292
+ provider: this.providerOptionsName,
293
+ providerOptions,
294
+ schema: openaiCompatibleProviderOptions
295
+ })) != null ? _b : {}
296
+ );
274
297
  if (topK != null) {
275
- warnings.push({
276
- type: "unsupported-setting",
277
- setting: "topK"
278
- });
298
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
279
299
  }
280
300
  if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
281
301
  warnings.push({
@@ -284,89 +304,50 @@ var OpenAICompatibleChatLanguageModel = class {
284
304
  details: "JSON response format schema is only supported with structuredOutputs"
285
305
  });
286
306
  }
287
- const baseArgs = {
288
- // model id:
289
- model: this.modelId,
290
- // model specific settings:
291
- user: this.settings.user,
292
- // standardized settings:
293
- max_tokens: maxTokens,
294
- temperature,
295
- top_p: topP,
296
- frequency_penalty: frequencyPenalty,
297
- presence_penalty: presencePenalty,
298
- response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs === true && responseFormat.schema != null ? {
299
- type: "json_schema",
300
- json_schema: {
301
- schema: responseFormat.schema,
302
- name: (_a = responseFormat.name) != null ? _a : "response",
303
- description: responseFormat.description
304
- }
305
- } : { type: "json_object" } : void 0,
306
- stop: stopSequences,
307
- seed,
308
- ...providerMetadata == null ? void 0 : providerMetadata[this.providerOptionsName],
309
- // messages:
310
- messages: convertToOpenAICompatibleChatMessages(prompt)
307
+ const {
308
+ tools: openaiTools,
309
+ toolChoice: openaiToolChoice,
310
+ toolWarnings
311
+ } = prepareTools({
312
+ tools,
313
+ toolChoice
314
+ });
315
+ return {
316
+ args: {
317
+ // model id:
318
+ model: this.modelId,
319
+ // model specific settings:
320
+ user: compatibleOptions.user,
321
+ // standardized settings:
322
+ max_tokens: maxOutputTokens,
323
+ temperature,
324
+ top_p: topP,
325
+ frequency_penalty: frequencyPenalty,
326
+ presence_penalty: presencePenalty,
327
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs === true && responseFormat.schema != null ? {
328
+ type: "json_schema",
329
+ json_schema: {
330
+ schema: responseFormat.schema,
331
+ name: (_c = responseFormat.name) != null ? _c : "response",
332
+ description: responseFormat.description
333
+ }
334
+ } : { type: "json_object" } : void 0,
335
+ stop: stopSequences,
336
+ seed,
337
+ ...providerOptions == null ? void 0 : providerOptions[this.providerOptionsName],
338
+ reasoning_effort: compatibleOptions.reasoningEffort,
339
+ // messages:
340
+ messages: convertToOpenAICompatibleChatMessages(prompt),
341
+ // tools:
342
+ tools: openaiTools,
343
+ tool_choice: openaiToolChoice
344
+ },
345
+ warnings: [...warnings, ...toolWarnings]
311
346
  };
312
- switch (type) {
313
- case "regular": {
314
- const { tools, tool_choice, toolWarnings } = prepareTools({
315
- mode,
316
- structuredOutputs: this.supportsStructuredOutputs
317
- });
318
- return {
319
- args: { ...baseArgs, tools, tool_choice },
320
- warnings: [...warnings, ...toolWarnings]
321
- };
322
- }
323
- case "object-json": {
324
- return {
325
- args: {
326
- ...baseArgs,
327
- response_format: this.supportsStructuredOutputs === true && mode.schema != null ? {
328
- type: "json_schema",
329
- json_schema: {
330
- schema: mode.schema,
331
- name: (_b = mode.name) != null ? _b : "response",
332
- description: mode.description
333
- }
334
- } : { type: "json_object" }
335
- },
336
- warnings
337
- };
338
- }
339
- case "object-tool": {
340
- return {
341
- args: {
342
- ...baseArgs,
343
- tool_choice: {
344
- type: "function",
345
- function: { name: mode.tool.name }
346
- },
347
- tools: [
348
- {
349
- type: "function",
350
- function: {
351
- name: mode.tool.name,
352
- description: mode.tool.description,
353
- parameters: mode.tool.parameters
354
- }
355
- }
356
- ]
357
- },
358
- warnings
359
- };
360
- }
361
- default: {
362
- const _exhaustiveCheck = type;
363
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
364
- }
365
- }
366
347
  }
367
348
  async doGenerate(options) {
368
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
369
- const { args, warnings } = this.getArgs({ ...options });
349
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
350
+ const { args, warnings } = await this.getArgs({ ...options });
370
351
  const body = JSON.stringify(args);
371
352
  const {
372
353
  responseHeaders,
@@ -386,16 +367,38 @@ var OpenAICompatibleChatLanguageModel = class {
386
367
  abortSignal: options.abortSignal,
387
368
  fetch: this.config.fetch
388
369
  });
389
- const { messages: rawPrompt, ...rawSettings } = args;
390
370
  const choice = responseBody.choices[0];
371
+ const content = [];
372
+ const text = choice.message.content;
373
+ if (text != null && text.length > 0) {
374
+ content.push({ type: "text", text });
375
+ }
376
+ const reasoning = choice.message.reasoning_content;
377
+ if (reasoning != null && reasoning.length > 0) {
378
+ content.push({
379
+ type: "reasoning",
380
+ text: reasoning
381
+ });
382
+ }
383
+ if (choice.message.tool_calls != null) {
384
+ for (const toolCall of choice.message.tool_calls) {
385
+ content.push({
386
+ type: "tool-call",
387
+ toolCallType: "function",
388
+ toolCallId: (_a = toolCall.id) != null ? _a : generateId(),
389
+ toolName: toolCall.function.name,
390
+ args: toolCall.function.arguments
391
+ });
392
+ }
393
+ }
391
394
  const providerMetadata = {
392
395
  [this.providerOptionsName]: {},
393
- ...(_b = (_a = this.config.metadataExtractor) == null ? void 0 : _a.extractMetadata) == null ? void 0 : _b.call(_a, {
396
+ ...await ((_c = (_b = this.config.metadataExtractor) == null ? void 0 : _b.extractMetadata) == null ? void 0 : _c.call(_b, {
394
397
  parsedBody: rawResponse
395
- })
398
+ }))
396
399
  };
397
- const completionTokenDetails = (_c = responseBody.usage) == null ? void 0 : _c.completion_tokens_details;
398
- const promptTokenDetails = (_d = responseBody.usage) == null ? void 0 : _d.prompt_tokens_details;
400
+ const completionTokenDetails = (_d = responseBody.usage) == null ? void 0 : _d.completion_tokens_details;
401
+ const promptTokenDetails = (_e = responseBody.usage) == null ? void 0 : _e.prompt_tokens_details;
399
402
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
400
403
  providerMetadata[this.providerOptionsName].reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
401
404
  }
@@ -409,87 +412,31 @@ var OpenAICompatibleChatLanguageModel = class {
409
412
  providerMetadata[this.providerOptionsName].cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
410
413
  }
411
414
  return {
412
- text: (_e = choice.message.content) != null ? _e : void 0,
413
- reasoning: (_f = choice.message.reasoning_content) != null ? _f : void 0,
414
- toolCalls: (_g = choice.message.tool_calls) == null ? void 0 : _g.map((toolCall) => {
415
- var _a2;
416
- return {
417
- toolCallType: "function",
418
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : generateId(),
419
- toolName: toolCall.function.name,
420
- args: toolCall.function.arguments
421
- };
422
- }),
415
+ content,
423
416
  finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
424
417
  usage: {
425
- promptTokens: (_i = (_h = responseBody.usage) == null ? void 0 : _h.prompt_tokens) != null ? _i : NaN,
426
- completionTokens: (_k = (_j = responseBody.usage) == null ? void 0 : _j.completion_tokens) != null ? _k : NaN
418
+ inputTokens: (_g = (_f = responseBody.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
419
+ outputTokens: (_i = (_h = responseBody.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0
427
420
  },
428
421
  providerMetadata,
429
- rawCall: { rawPrompt, rawSettings },
430
- rawResponse: { headers: responseHeaders, body: rawResponse },
431
- response: getResponseMetadata(responseBody),
432
- warnings,
433
- request: { body }
422
+ request: { body },
423
+ response: {
424
+ ...getResponseMetadata(responseBody),
425
+ headers: responseHeaders,
426
+ body: rawResponse
427
+ },
428
+ warnings
434
429
  };
435
430
  }
436
431
  async doStream(options) {
437
432
  var _a;
438
- if (this.settings.simulateStreaming) {
439
- const result = await this.doGenerate(options);
440
- const simulatedStream = new ReadableStream({
441
- start(controller) {
442
- controller.enqueue({ type: "response-metadata", ...result.response });
443
- if (result.reasoning) {
444
- if (Array.isArray(result.reasoning)) {
445
- for (const part of result.reasoning) {
446
- if (part.type === "text") {
447
- controller.enqueue({
448
- type: "reasoning",
449
- textDelta: part.text
450
- });
451
- }
452
- }
453
- } else {
454
- controller.enqueue({
455
- type: "reasoning",
456
- textDelta: result.reasoning
457
- });
458
- }
459
- }
460
- if (result.text) {
461
- controller.enqueue({
462
- type: "text-delta",
463
- textDelta: result.text
464
- });
465
- }
466
- if (result.toolCalls) {
467
- for (const toolCall of result.toolCalls) {
468
- controller.enqueue({
469
- type: "tool-call",
470
- ...toolCall
471
- });
472
- }
473
- }
474
- controller.enqueue({
475
- type: "finish",
476
- finishReason: result.finishReason,
477
- usage: result.usage,
478
- logprobs: result.logprobs,
479
- providerMetadata: result.providerMetadata
480
- });
481
- controller.close();
482
- }
483
- });
484
- return {
485
- stream: simulatedStream,
486
- rawCall: result.rawCall,
487
- rawResponse: result.rawResponse,
488
- warnings: result.warnings
489
- };
490
- }
491
- const { args, warnings } = this.getArgs({ ...options });
492
- const body = JSON.stringify({ ...args, stream: true });
433
+ const { args, warnings } = await this.getArgs({ ...options });
434
+ const body = {
435
+ ...args,
436
+ stream: true,
437
+ // only include stream_options when in strict compatibility mode:
438
+ stream_options: this.config.includeUsage ? { include_usage: true } : void 0
439
+ };
493
440
  const metadataExtractor = (_a = this.config.metadataExtractor) == null ? void 0 : _a.createStreamExtractor();
494
441
  const { responseHeaders, value: response } = await postJsonToApi({
495
442
  url: this.config.url({
@@ -497,10 +444,7 @@ var OpenAICompatibleChatLanguageModel = class {
497
444
  modelId: this.modelId
498
445
  }),
499
446
  headers: combineHeaders(this.config.headers(), options.headers),
500
- body: {
501
- ...args,
502
- stream: true
503
- },
447
+ body,
504
448
  failedResponseHandler: this.failedResponseHandler,
505
449
  successfulResponseHandler: createEventSourceResponseHandler(
506
450
  this.chunkSchema
@@ -508,7 +452,6 @@ var OpenAICompatibleChatLanguageModel = class {
508
452
  abortSignal: options.abortSignal,
509
453
  fetch: this.config.fetch
510
454
  });
511
- const { messages: rawPrompt, ...rawSettings } = args;
512
455
  const toolCalls = [];
513
456
  let finishReason = "unknown";
514
457
  let usage = {
@@ -528,6 +471,9 @@ var OpenAICompatibleChatLanguageModel = class {
528
471
  return {
529
472
  stream: response.pipeThrough(
530
473
  new TransformStream({
474
+ start(controller) {
475
+ controller.enqueue({ type: "stream-start", warnings });
476
+ },
531
477
  // TODO we lost type safety on Chunk, most likely due to the error schema. MUST FIX
532
478
  transform(chunk, controller) {
533
479
  var _a2, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
@@ -585,13 +531,13 @@ var OpenAICompatibleChatLanguageModel = class {
585
531
  if (delta.reasoning_content != null) {
586
532
  controller.enqueue({
587
533
  type: "reasoning",
588
- textDelta: delta.reasoning_content
534
+ text: delta.reasoning_content
589
535
  });
590
536
  }
591
537
  if (delta.content != null) {
592
538
  controller.enqueue({
593
- type: "text-delta",
594
- textDelta: delta.content
539
+ type: "text",
540
+ text: delta.content
595
541
  });
596
542
  }
597
543
  if (delta.tool_calls != null) {
@@ -698,83 +644,81 @@ var OpenAICompatibleChatLanguageModel = class {
698
644
  type: "finish",
699
645
  finishReason,
700
646
  usage: {
701
- promptTokens: (_a2 = usage.promptTokens) != null ? _a2 : NaN,
702
- completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
647
+ inputTokens: (_a2 = usage.promptTokens) != null ? _a2 : void 0,
648
+ outputTokens: (_b = usage.completionTokens) != null ? _b : void 0
703
649
  },
704
650
  providerMetadata
705
651
  });
706
652
  }
707
653
  })
708
654
  ),
709
- rawCall: { rawPrompt, rawSettings },
710
- rawResponse: { headers: responseHeaders },
711
- warnings,
712
- request: { body }
655
+ request: { body },
656
+ response: { headers: responseHeaders }
713
657
  };
714
658
  }
715
659
  };
716
- var openaiCompatibleTokenUsageSchema = z2.object({
717
- prompt_tokens: z2.number().nullish(),
718
- completion_tokens: z2.number().nullish(),
719
- prompt_tokens_details: z2.object({
720
- cached_tokens: z2.number().nullish()
660
+ var openaiCompatibleTokenUsageSchema = z3.object({
661
+ prompt_tokens: z3.number().nullish(),
662
+ completion_tokens: z3.number().nullish(),
663
+ prompt_tokens_details: z3.object({
664
+ cached_tokens: z3.number().nullish()
721
665
  }).nullish(),
722
- completion_tokens_details: z2.object({
723
- reasoning_tokens: z2.number().nullish(),
724
- accepted_prediction_tokens: z2.number().nullish(),
725
- rejected_prediction_tokens: z2.number().nullish()
666
+ completion_tokens_details: z3.object({
667
+ reasoning_tokens: z3.number().nullish(),
668
+ accepted_prediction_tokens: z3.number().nullish(),
669
+ rejected_prediction_tokens: z3.number().nullish()
726
670
  }).nullish()
727
671
  }).nullish();
728
- var OpenAICompatibleChatResponseSchema = z2.object({
729
- id: z2.string().nullish(),
730
- created: z2.number().nullish(),
731
- model: z2.string().nullish(),
732
- choices: z2.array(
733
- z2.object({
734
- message: z2.object({
735
- role: z2.literal("assistant").nullish(),
736
- content: z2.string().nullish(),
737
- reasoning_content: z2.string().nullish(),
738
- tool_calls: z2.array(
739
- z2.object({
740
- id: z2.string().nullish(),
741
- type: z2.literal("function"),
742
- function: z2.object({
743
- name: z2.string(),
744
- arguments: z2.string()
672
+ var OpenAICompatibleChatResponseSchema = z3.object({
673
+ id: z3.string().nullish(),
674
+ created: z3.number().nullish(),
675
+ model: z3.string().nullish(),
676
+ choices: z3.array(
677
+ z3.object({
678
+ message: z3.object({
679
+ role: z3.literal("assistant").nullish(),
680
+ content: z3.string().nullish(),
681
+ reasoning_content: z3.string().nullish(),
682
+ tool_calls: z3.array(
683
+ z3.object({
684
+ id: z3.string().nullish(),
685
+ type: z3.literal("function"),
686
+ function: z3.object({
687
+ name: z3.string(),
688
+ arguments: z3.string()
745
689
  })
746
690
  })
747
691
  ).nullish()
748
692
  }),
749
- finish_reason: z2.string().nullish()
693
+ finish_reason: z3.string().nullish()
750
694
  })
751
695
  ),
752
696
  usage: openaiCompatibleTokenUsageSchema
753
697
  });
754
- var createOpenAICompatibleChatChunkSchema = (errorSchema) => z2.union([
755
- z2.object({
756
- id: z2.string().nullish(),
757
- created: z2.number().nullish(),
758
- model: z2.string().nullish(),
759
- choices: z2.array(
760
- z2.object({
761
- delta: z2.object({
762
- role: z2.enum(["assistant"]).nullish(),
763
- content: z2.string().nullish(),
764
- reasoning_content: z2.string().nullish(),
765
- tool_calls: z2.array(
766
- z2.object({
767
- index: z2.number(),
768
- id: z2.string().nullish(),
769
- type: z2.literal("function").optional(),
770
- function: z2.object({
771
- name: z2.string().nullish(),
772
- arguments: z2.string().nullish()
698
+ var createOpenAICompatibleChatChunkSchema = (errorSchema) => z3.union([
699
+ z3.object({
700
+ id: z3.string().nullish(),
701
+ created: z3.number().nullish(),
702
+ model: z3.string().nullish(),
703
+ choices: z3.array(
704
+ z3.object({
705
+ delta: z3.object({
706
+ role: z3.enum(["assistant"]).nullish(),
707
+ content: z3.string().nullish(),
708
+ reasoning_content: z3.string().nullish(),
709
+ tool_calls: z3.array(
710
+ z3.object({
711
+ index: z3.number(),
712
+ id: z3.string().nullish(),
713
+ type: z3.literal("function").nullish(),
714
+ function: z3.object({
715
+ name: z3.string().nullish(),
716
+ arguments: z3.string().nullish()
773
717
  })
774
718
  })
775
719
  ).nullish()
776
720
  }).nullish(),
777
- finish_reason: z2.string().nullish()
721
+ finish_reason: z3.string().nullish()
778
722
  })
779
723
  ),
780
724
  usage: openaiCompatibleTokenUsageSchema
@@ -783,17 +727,15 @@ var createOpenAICompatibleChatChunkSchema = (errorSchema) => z2.union([
783
727
  ]);
784
728
 
785
729
  // src/openai-compatible-completion-language-model.ts
786
- import {
787
- UnsupportedFunctionalityError as UnsupportedFunctionalityError4
788
- } from "@ai-sdk/provider";
789
730
  import {
790
731
  combineHeaders as combineHeaders2,
791
732
  createEventSourceResponseHandler as createEventSourceResponseHandler2,
792
733
  createJsonErrorResponseHandler as createJsonErrorResponseHandler2,
793
734
  createJsonResponseHandler as createJsonResponseHandler2,
735
+ parseProviderOptions as parseProviderOptions2,
794
736
  postJsonToApi as postJsonToApi2
795
737
  } from "@ai-sdk/provider-utils";
796
- import { z as z3 } from "zod";
738
+ import { z as z5 } from "zod";
797
739
 
798
740
  // src/convert-to-openai-compatible-completion-prompt.ts
799
741
  import {
@@ -830,13 +772,8 @@ function convertToOpenAICompatibleCompletionPrompt({
830
772
  case "text": {
831
773
  return part.text;
832
774
  }
833
- case "image": {
834
- throw new UnsupportedFunctionalityError3({
835
- functionality: "images"
836
- });
837
- }
838
775
  }
839
- }).join("");
776
+ }).filter(Boolean).join("");
840
777
  text += `${user}:
841
778
  ${userMessage}
842
779
 
@@ -882,15 +819,38 @@ ${user}:`]
882
819
  };
883
820
  }
884
821
 
822
+ // src/openai-compatible-completion-options.ts
823
+ import { z as z4 } from "zod";
824
+ var openaiCompatibleCompletionProviderOptions = z4.object({
825
+ /**
826
+ * Echo back the prompt in addition to the completion.
827
+ */
828
+ echo: z4.boolean().optional(),
829
+ /**
830
+ * Modify the likelihood of specified tokens appearing in the completion.
831
+ *
832
+ * Accepts a JSON object that maps tokens (specified by their token ID in
833
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
834
+ */
835
+ logitBias: z4.record(z4.string(), z4.number()).optional(),
836
+ /**
837
+ * The suffix that comes after a completion of inserted text.
838
+ */
839
+ suffix: z4.string().optional(),
840
+ /**
841
+ * A unique identifier representing your end-user, which can help providers to
842
+ * monitor and detect abuse.
843
+ */
844
+ user: z4.string().optional()
845
+ });
846
+
885
847
  // src/openai-compatible-completion-language-model.ts
886
848
  var OpenAICompatibleCompletionLanguageModel = class {
887
849
  // type inferred via constructor
888
- constructor(modelId, settings, config) {
850
+ constructor(modelId, config) {
889
851
  this.specificationVersion = "v2";
890
- this.defaultObjectGenerationMode = void 0;
891
852
  var _a;
892
853
  this.modelId = modelId;
893
- this.settings = settings;
894
854
  this.config = config;
895
855
  const errorStructure = (_a = config.errorStructure) != null ? _a : defaultOpenAICompatibleErrorStructure;
896
856
  this.chunkSchema = createOpenAICompatibleCompletionChunkSchema(
@@ -904,11 +864,14 @@ var OpenAICompatibleCompletionLanguageModel = class {
904
864
  get providerOptionsName() {
905
865
  return this.config.provider.split(".")[0].trim();
906
866
  }
907
- getArgs({
908
- mode,
867
+ async getSupportedUrls() {
868
+ var _a, _b, _c;
869
+ return (_c = (_b = (_a = this.config).getSupportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
870
+ }
871
+ async getArgs({
909
872
  inputFormat,
910
873
  prompt,
911
- maxTokens,
874
+ maxOutputTokens,
912
875
  temperature,
913
876
  topP,
914
877
  topK,
@@ -917,16 +880,25 @@ var OpenAICompatibleCompletionLanguageModel = class {
917
880
  stopSequences: userStopSequences,
918
881
  responseFormat,
919
882
  seed,
920
- providerMetadata
883
+ providerOptions,
884
+ tools,
885
+ toolChoice
921
886
  }) {
922
887
  var _a;
923
- const type = mode.type;
924
888
  const warnings = [];
889
+ const completionOptions = (_a = await parseProviderOptions2({
890
+ provider: this.providerOptionsName,
891
+ providerOptions,
892
+ schema: openaiCompatibleCompletionProviderOptions
893
+ })) != null ? _a : {};
925
894
  if (topK != null) {
926
- warnings.push({
927
- type: "unsupported-setting",
928
- setting: "topK"
929
- });
895
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
896
+ }
897
+ if (tools == null ? void 0 : tools.length) {
898
+ warnings.push({ type: "unsupported-setting", setting: "tools" });
899
+ }
900
+ if (toolChoice != null) {
901
+ warnings.push({ type: "unsupported-setting", setting: "toolChoice" });
930
902
  }
931
903
  if (responseFormat != null && responseFormat.type !== "text") {
932
904
  warnings.push({
@@ -937,60 +909,34 @@ var OpenAICompatibleCompletionLanguageModel = class {
937
909
  }
938
910
  const { prompt: completionPrompt, stopSequences } = convertToOpenAICompatibleCompletionPrompt({ prompt, inputFormat });
939
911
  const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
940
- const baseArgs = {
941
- // model id:
942
- model: this.modelId,
943
- // model specific settings:
944
- echo: this.settings.echo,
945
- logit_bias: this.settings.logitBias,
946
- suffix: this.settings.suffix,
947
- user: this.settings.user,
948
- // standardized settings:
949
- max_tokens: maxTokens,
950
- temperature,
951
- top_p: topP,
952
- frequency_penalty: frequencyPenalty,
953
- presence_penalty: presencePenalty,
954
- seed,
955
- ...providerMetadata == null ? void 0 : providerMetadata[this.providerOptionsName],
956
- // prompt:
957
- prompt: completionPrompt,
958
- // stop sequences:
959
- stop: stop.length > 0 ? stop : void 0
912
+ return {
913
+ args: {
914
+ // model id:
915
+ model: this.modelId,
916
+ // model specific settings:
917
+ echo: completionOptions.echo,
918
+ logit_bias: completionOptions.logitBias,
919
+ suffix: completionOptions.suffix,
920
+ user: completionOptions.user,
921
+ // standardized settings:
922
+ max_tokens: maxOutputTokens,
923
+ temperature,
924
+ top_p: topP,
925
+ frequency_penalty: frequencyPenalty,
926
+ presence_penalty: presencePenalty,
927
+ seed,
928
+ ...providerOptions == null ? void 0 : providerOptions[this.providerOptionsName],
929
+ // prompt:
930
+ prompt: completionPrompt,
931
+ // stop sequences:
932
+ stop: stop.length > 0 ? stop : void 0
933
+ },
934
+ warnings
960
935
  };
961
- switch (type) {
962
- case "regular": {
963
- if ((_a = mode.tools) == null ? void 0 : _a.length) {
964
- throw new UnsupportedFunctionalityError4({
965
- functionality: "tools"
966
- });
967
- }
968
- if (mode.toolChoice) {
969
- throw new UnsupportedFunctionalityError4({
970
- functionality: "toolChoice"
971
- });
972
- }
973
- return { args: baseArgs, warnings };
974
- }
975
- case "object-json": {
976
- throw new UnsupportedFunctionalityError4({
977
- functionality: "object-json mode"
978
- });
979
- }
980
- case "object-tool": {
981
- throw new UnsupportedFunctionalityError4({
982
- functionality: "object-tool mode"
983
- });
984
- }
985
- default: {
986
- const _exhaustiveCheck = type;
987
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
988
- }
989
- }
990
936
  }
991
937
  async doGenerate(options) {
992
938
  var _a, _b, _c, _d;
993
- const { args, warnings } = this.getArgs(options);
939
+ const { args, warnings } = await this.getArgs(options);
994
940
  const {
995
941
  responseHeaders,
996
942
  value: response,
@@ -1009,27 +955,34 @@ var OpenAICompatibleCompletionLanguageModel = class {
1009
955
  abortSignal: options.abortSignal,
1010
956
  fetch: this.config.fetch
1011
957
  });
1012
- const { prompt: rawPrompt, ...rawSettings } = args;
1013
958
  const choice = response.choices[0];
959
+ const content = [];
960
+ if (choice.text != null && choice.text.length > 0) {
961
+ content.push({ type: "text", text: choice.text });
962
+ }
1014
963
  return {
1015
- text: choice.text,
964
+ content,
1016
965
  usage: {
1017
- promptTokens: (_b = (_a = response.usage) == null ? void 0 : _a.prompt_tokens) != null ? _b : NaN,
1018
- completionTokens: (_d = (_c = response.usage) == null ? void 0 : _c.completion_tokens) != null ? _d : NaN
966
+ inputTokens: (_b = (_a = response.usage) == null ? void 0 : _a.prompt_tokens) != null ? _b : void 0,
967
+ outputTokens: (_d = (_c = response.usage) == null ? void 0 : _c.completion_tokens) != null ? _d : void 0
1019
968
  },
1020
969
  finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
1021
- rawCall: { rawPrompt, rawSettings },
1022
- rawResponse: { headers: responseHeaders, body: rawResponse },
1023
- response: getResponseMetadata(response),
1024
- warnings,
1025
- request: { body: JSON.stringify(args) }
970
+ request: { body: args },
971
+ response: {
972
+ ...getResponseMetadata(response),
973
+ headers: responseHeaders,
974
+ body: rawResponse
975
+ },
976
+ warnings
1026
977
  };
1027
978
  }
1028
979
  async doStream(options) {
1029
- const { args, warnings } = this.getArgs(options);
980
+ const { args, warnings } = await this.getArgs(options);
1030
981
  const body = {
1031
982
  ...args,
1032
- stream: true
983
+ stream: true,
984
+ // only include stream_options when in strict compatibility mode:
985
+ stream_options: this.config.includeUsage ? { include_usage: true } : void 0
1033
986
  };
1034
987
  const { responseHeaders, value: response } = await postJsonToApi2({
1035
988
  url: this.config.url({
@@ -1045,17 +998,20 @@ var OpenAICompatibleCompletionLanguageModel = class {
1045
998
  abortSignal: options.abortSignal,
1046
999
  fetch: this.config.fetch
1047
1000
  });
1048
- const { prompt: rawPrompt, ...rawSettings } = args;
1049
1001
  let finishReason = "unknown";
1050
- let usage = {
1051
- promptTokens: Number.NaN,
1052
- completionTokens: Number.NaN
1002
+ const usage = {
1003
+ inputTokens: void 0,
1004
+ outputTokens: void 0
1053
1005
  };
1054
1006
  let isFirstChunk = true;
1055
1007
  return {
1056
1008
  stream: response.pipeThrough(
1057
1009
  new TransformStream({
1010
+ start(controller) {
1011
+ controller.enqueue({ type: "stream-start", warnings });
1012
+ },
1058
1013
  transform(chunk, controller) {
1014
+ var _a, _b;
1059
1015
  if (!chunk.success) {
1060
1016
  finishReason = "error";
1061
1017
  controller.enqueue({ type: "error", error: chunk.error });
@@ -1075,10 +1031,8 @@ var OpenAICompatibleCompletionLanguageModel = class {
1075
1031
  });
1076
1032
  }
1077
1033
  if (value.usage != null) {
1078
- usage = {
1079
- promptTokens: value.usage.prompt_tokens,
1080
- completionTokens: value.usage.completion_tokens
1081
- };
1034
+ usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
1035
+ usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
1082
1036
  }
1083
1037
  const choice = value.choices[0];
1084
1038
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1088,8 +1042,8 @@ var OpenAICompatibleCompletionLanguageModel = class {
1088
1042
  }
1089
1043
  if ((choice == null ? void 0 : choice.text) != null) {
1090
1044
  controller.enqueue({
1091
- type: "text-delta",
1092
- textDelta: choice.text
1045
+ type: "text",
1046
+ text: choice.text
1093
1047
  });
1094
1048
  }
1095
1049
  },
@@ -1102,43 +1056,41 @@ var OpenAICompatibleCompletionLanguageModel = class {
1102
1056
  }
1103
1057
  })
1104
1058
  ),
1105
- rawCall: { rawPrompt, rawSettings },
1106
- rawResponse: { headers: responseHeaders },
1107
- warnings,
1108
- request: { body: JSON.stringify(body) }
1059
+ request: { body },
1060
+ response: { headers: responseHeaders }
1109
1061
  };
1110
1062
  }
1111
1063
  };
1112
- var openaiCompatibleCompletionResponseSchema = z3.object({
1113
- id: z3.string().nullish(),
1114
- created: z3.number().nullish(),
1115
- model: z3.string().nullish(),
1116
- choices: z3.array(
1117
- z3.object({
1118
- text: z3.string(),
1119
- finish_reason: z3.string()
1064
+ var openaiCompatibleCompletionResponseSchema = z5.object({
1065
+ id: z5.string().nullish(),
1066
+ created: z5.number().nullish(),
1067
+ model: z5.string().nullish(),
1068
+ choices: z5.array(
1069
+ z5.object({
1070
+ text: z5.string(),
1071
+ finish_reason: z5.string()
1120
1072
  })
1121
1073
  ),
1122
- usage: z3.object({
1123
- prompt_tokens: z3.number(),
1124
- completion_tokens: z3.number()
1074
+ usage: z5.object({
1075
+ prompt_tokens: z5.number(),
1076
+ completion_tokens: z5.number()
1125
1077
  }).nullish()
1126
1078
  });
1127
- var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => z3.union([
1128
- z3.object({
1129
- id: z3.string().nullish(),
1130
- created: z3.number().nullish(),
1131
- model: z3.string().nullish(),
1132
- choices: z3.array(
1133
- z3.object({
1134
- text: z3.string(),
1135
- finish_reason: z3.string().nullish(),
1136
- index: z3.number()
1079
+ var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => z5.union([
1080
+ z5.object({
1081
+ id: z5.string().nullish(),
1082
+ created: z5.number().nullish(),
1083
+ model: z5.string().nullish(),
1084
+ choices: z5.array(
1085
+ z5.object({
1086
+ text: z5.string(),
1087
+ finish_reason: z5.string().nullish(),
1088
+ index: z5.number()
1137
1089
  })
1138
1090
  ),
1139
- usage: z3.object({
1140
- prompt_tokens: z3.number(),
1141
- completion_tokens: z3.number()
1091
+ usage: z5.object({
1092
+ prompt_tokens: z5.number(),
1093
+ completion_tokens: z5.number()
1142
1094
  }).nullish()
1143
1095
  }),
1144
1096
  errorSchema
@@ -1152,14 +1104,31 @@ import {
1152
1104
  combineHeaders as combineHeaders3,
1153
1105
  createJsonErrorResponseHandler as createJsonErrorResponseHandler3,
1154
1106
  createJsonResponseHandler as createJsonResponseHandler3,
1107
+ parseProviderOptions as parseProviderOptions3,
1155
1108
  postJsonToApi as postJsonToApi3
1156
1109
  } from "@ai-sdk/provider-utils";
1157
- import { z as z4 } from "zod";
1110
+ import { z as z7 } from "zod";
1111
+
1112
+ // src/openai-compatible-embedding-options.ts
1113
+ import { z as z6 } from "zod";
1114
+ var openaiCompatibleEmbeddingProviderOptions = z6.object({
1115
+ /**
1116
+ * The number of dimensions the resulting output embeddings should have.
1117
+ * Only supported in text-embedding-3 and later models.
1118
+ */
1119
+ dimensions: z6.number().optional(),
1120
+ /**
1121
+ * A unique identifier representing your end-user, which can help providers to
1122
+ * monitor and detect abuse.
1123
+ */
1124
+ user: z6.string().optional()
1125
+ });
1126
+
1127
+ // src/openai-compatible-embedding-model.ts
1158
1128
  var OpenAICompatibleEmbeddingModel = class {
1159
- constructor(modelId, settings, config) {
1160
- this.specificationVersion = "v1";
1129
+ constructor(modelId, config) {
1130
+ this.specificationVersion = "v2";
1161
1131
  this.modelId = modelId;
1162
- this.settings = settings;
1163
1132
  this.config = config;
1164
1133
  }
1165
1134
  get provider() {
@@ -1173,12 +1142,28 @@ var OpenAICompatibleEmbeddingModel = class {
1173
1142
  var _a;
1174
1143
  return (_a = this.config.supportsParallelCalls) != null ? _a : true;
1175
1144
  }
1145
+ get providerOptionsName() {
1146
+ return this.config.provider.split(".")[0].trim();
1147
+ }
1176
1148
  async doEmbed({
1177
1149
  values,
1178
1150
  headers,
1179
- abortSignal
1151
+ abortSignal,
1152
+ providerOptions
1180
1153
  }) {
1181
- var _a;
1154
+ var _a, _b, _c;
1155
+ const compatibleOptions = Object.assign(
1156
+ (_a = await parseProviderOptions3({
1157
+ provider: "openai-compatible",
1158
+ providerOptions,
1159
+ schema: openaiCompatibleEmbeddingProviderOptions
1160
+ })) != null ? _a : {},
1161
+ (_b = await parseProviderOptions3({
1162
+ provider: this.providerOptionsName,
1163
+ providerOptions,
1164
+ schema: openaiCompatibleEmbeddingProviderOptions
1165
+ })) != null ? _b : {}
1166
+ );
1182
1167
  if (values.length > this.maxEmbeddingsPerCall) {
1183
1168
  throw new TooManyEmbeddingValuesForCallError({
1184
1169
  provider: this.provider,
@@ -1187,7 +1172,11 @@ var OpenAICompatibleEmbeddingModel = class {
1187
1172
  values
1188
1173
  });
1189
1174
  }
1190
- const { responseHeaders, value: response } = await postJsonToApi3({
1175
+ const {
1176
+ responseHeaders,
1177
+ value: response,
1178
+ rawValue
1179
+ } = await postJsonToApi3({
1191
1180
  url: this.config.url({
1192
1181
  path: "/embeddings",
1193
1182
  modelId: this.modelId
@@ -1197,11 +1186,11 @@ var OpenAICompatibleEmbeddingModel = class {
1197
1186
  model: this.modelId,
1198
1187
  input: values,
1199
1188
  encoding_format: "float",
1200
- dimensions: this.settings.dimensions,
1201
- user: this.settings.user
1189
+ dimensions: compatibleOptions.dimensions,
1190
+ user: compatibleOptions.user
1202
1191
  },
1203
1192
  failedResponseHandler: createJsonErrorResponseHandler3(
1204
- (_a = this.config.errorStructure) != null ? _a : defaultOpenAICompatibleErrorStructure
1193
+ (_c = this.config.errorStructure) != null ? _c : defaultOpenAICompatibleErrorStructure
1205
1194
  ),
1206
1195
  successfulResponseHandler: createJsonResponseHandler3(
1207
1196
  openaiTextEmbeddingResponseSchema
@@ -1212,13 +1201,13 @@ var OpenAICompatibleEmbeddingModel = class {
1212
1201
  return {
1213
1202
  embeddings: response.data.map((item) => item.embedding),
1214
1203
  usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1215
- rawResponse: { headers: responseHeaders }
1204
+ response: { headers: responseHeaders, body: rawValue }
1216
1205
  };
1217
1206
  }
1218
1207
  };
1219
- var openaiTextEmbeddingResponseSchema = z4.object({
1220
- data: z4.array(z4.object({ embedding: z4.array(z4.number()) })),
1221
- usage: z4.object({ prompt_tokens: z4.number() }).nullish()
1208
+ var openaiTextEmbeddingResponseSchema = z7.object({
1209
+ data: z7.array(z7.object({ embedding: z7.array(z7.number()) })),
1210
+ usage: z7.object({ prompt_tokens: z7.number() }).nullish()
1222
1211
  });
1223
1212
 
1224
1213
  // src/openai-compatible-image-model.ts
@@ -1228,7 +1217,7 @@ import {
1228
1217
  createJsonResponseHandler as createJsonResponseHandler4,
1229
1218
  postJsonToApi as postJsonToApi4
1230
1219
  } from "@ai-sdk/provider-utils";
1231
- import { z as z5 } from "zod";
1220
+ import { z as z8 } from "zod";
1232
1221
  var OpenAICompatibleImageModel = class {
1233
1222
  constructor(modelId, settings, config) {
1234
1223
  this.modelId = modelId;
@@ -1301,8 +1290,8 @@ var OpenAICompatibleImageModel = class {
1301
1290
  };
1302
1291
  }
1303
1292
  };
1304
- var openaiCompatibleImageResponseSchema = z5.object({
1305
- data: z5.array(z5.object({ b64_json: z5.string() }))
1293
+ var openaiCompatibleImageResponseSchema = z8.object({
1294
+ data: z8.array(z8.object({ b64_json: z8.string() }))
1306
1295
  });
1307
1296
 
1308
1297
  // src/openai-compatible-provider.ts
@@ -1326,27 +1315,24 @@ function createOpenAICompatible(options) {
1326
1315
  headers: getHeaders,
1327
1316
  fetch: options.fetch
1328
1317
  });
1329
- const createLanguageModel = (modelId, settings = {}) => createChatModel(modelId, settings);
1330
- const createChatModel = (modelId, settings = {}) => new OpenAICompatibleChatLanguageModel(modelId, settings, {
1331
- ...getCommonModelConfig("chat"),
1332
- defaultObjectGenerationMode: "tool"
1333
- });
1334
- const createCompletionModel = (modelId, settings = {}) => new OpenAICompatibleCompletionLanguageModel(
1318
+ const createLanguageModel = (modelId) => createChatModel(modelId);
1319
+ const createChatModel = (modelId) => new OpenAICompatibleChatLanguageModel(
1335
1320
  modelId,
1336
- settings,
1337
- getCommonModelConfig("completion")
1321
+ getCommonModelConfig("chat")
1338
1322
  );
1339
- const createEmbeddingModel = (modelId, settings = {}) => new OpenAICompatibleEmbeddingModel(
1323
+ const createCompletionModel = (modelId) => new OpenAICompatibleCompletionLanguageModel(
1340
1324
  modelId,
1341
- settings,
1342
- getCommonModelConfig("embedding")
1325
+ getCommonModelConfig("completion")
1343
1326
  );
1327
+ const createEmbeddingModel = (modelId) => new OpenAICompatibleEmbeddingModel(modelId, {
1328
+ ...getCommonModelConfig("embedding")
1329
+ });
1344
1330
  const createImageModel = (modelId, settings = {}) => new OpenAICompatibleImageModel(
1345
1331
  modelId,
1346
1332
  settings,
1347
1333
  getCommonModelConfig("image")
1348
1334
  );
1349
- const provider = (modelId, settings) => createLanguageModel(modelId, settings);
1335
+ const provider = (modelId) => createLanguageModel(modelId);
1350
1336
  provider.languageModel = createLanguageModel;
1351
1337
  provider.chatModel = createChatModel;
1352
1338
  provider.completionModel = createCompletionModel;