@ai-sdk/openai-compatible 1.0.0-canary.1 → 1.0.0-canary.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -9,18 +9,18 @@ import {
9
9
  createJsonResponseHandler,
10
10
  generateId,
11
11
  isParsableJson,
12
+ parseProviderOptions,
12
13
  postJsonToApi
13
14
  } from "@ai-sdk/provider-utils";
14
- import { z as z2 } from "zod";
15
+ import { z as z3 } from "zod";
15
16
 
16
17
  // src/convert-to-openai-compatible-chat-messages.ts
17
18
  import {
18
19
  UnsupportedFunctionalityError
19
20
  } from "@ai-sdk/provider";
20
- import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils";
21
21
  function getOpenAIMetadata(message) {
22
22
  var _a, _b;
23
- return (_b = (_a = message == null ? void 0 : message.providerMetadata) == null ? void 0 : _a.openaiCompatible) != null ? _b : {};
23
+ return (_b = (_a = message == null ? void 0 : message.providerOptions) == null ? void 0 : _a.openaiCompatible) != null ? _b : {};
24
24
  }
25
25
  function convertToOpenAICompatibleChatMessages(prompt) {
26
26
  const messages = [];
@@ -43,25 +43,26 @@ function convertToOpenAICompatibleChatMessages(prompt) {
43
43
  messages.push({
44
44
  role: "user",
45
45
  content: content.map((part) => {
46
- var _a;
47
46
  const partMetadata = getOpenAIMetadata(part);
48
47
  switch (part.type) {
49
48
  case "text": {
50
49
  return { type: "text", text: part.text, ...partMetadata };
51
50
  }
52
- case "image": {
53
- return {
54
- type: "image_url",
55
- image_url: {
56
- url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase64(part.image)}`
57
- },
58
- ...partMetadata
59
- };
60
- }
61
51
  case "file": {
62
- throw new UnsupportedFunctionalityError({
63
- functionality: "File content parts in user messages"
64
- });
52
+ if (part.mediaType.startsWith("image/")) {
53
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
54
+ return {
55
+ type: "image_url",
56
+ image_url: {
57
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`
58
+ },
59
+ ...partMetadata
60
+ };
61
+ } else {
62
+ throw new UnsupportedFunctionalityError({
63
+ functionality: `file part media type ${part.mediaType}`
64
+ });
65
+ }
65
66
  }
66
67
  }
67
68
  }),
@@ -152,17 +153,27 @@ function mapOpenAICompatibleFinishReason(finishReason) {
152
153
  }
153
154
  }
154
155
 
155
- // src/openai-compatible-error.ts
156
+ // src/openai-compatible-chat-options.ts
156
157
  import { z } from "zod";
157
- var openaiCompatibleErrorDataSchema = z.object({
158
- error: z.object({
159
- message: z.string(),
158
+ var openaiCompatibleProviderOptions = z.object({
159
+ /**
160
+ * A unique identifier representing your end-user, which can help the provider to
161
+ * monitor and detect abuse.
162
+ */
163
+ user: z.string().optional()
164
+ });
165
+
166
+ // src/openai-compatible-error.ts
167
+ import { z as z2 } from "zod";
168
+ var openaiCompatibleErrorDataSchema = z2.object({
169
+ error: z2.object({
170
+ message: z2.string(),
160
171
  // The additional information below is handled loosely to support
161
172
  // OpenAI-compatible providers that have slightly different error
162
173
  // responses:
163
- type: z.string().nullish(),
164
- param: z.any().nullish(),
165
- code: z.union([z.string(), z.number()]).nullish()
174
+ type: z2.string().nullish(),
175
+ param: z2.any().nullish(),
176
+ code: z2.union([z2.string(), z2.number()]).nullish()
166
177
  })
167
178
  });
168
179
  var defaultOpenAICompatibleErrorStructure = {
@@ -175,16 +186,14 @@ import {
175
186
  UnsupportedFunctionalityError as UnsupportedFunctionalityError2
176
187
  } from "@ai-sdk/provider";
177
188
  function prepareTools({
178
- mode,
179
- structuredOutputs
189
+ tools,
190
+ toolChoice
180
191
  }) {
181
- var _a;
182
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
192
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
183
193
  const toolWarnings = [];
184
194
  if (tools == null) {
185
- return { tools: void 0, tool_choice: void 0, toolWarnings };
195
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
186
196
  }
187
- const toolChoice = mode.toolChoice;
188
197
  const openaiCompatTools = [];
189
198
  for (const tool of tools) {
190
199
  if (tool.type === "provider-defined") {
@@ -201,29 +210,27 @@ function prepareTools({
201
210
  }
202
211
  }
203
212
  if (toolChoice == null) {
204
- return { tools: openaiCompatTools, tool_choice: void 0, toolWarnings };
213
+ return { tools: openaiCompatTools, toolChoice: void 0, toolWarnings };
205
214
  }
206
215
  const type = toolChoice.type;
207
216
  switch (type) {
208
217
  case "auto":
209
218
  case "none":
210
219
  case "required":
211
- return { tools: openaiCompatTools, tool_choice: type, toolWarnings };
220
+ return { tools: openaiCompatTools, toolChoice: type, toolWarnings };
212
221
  case "tool":
213
222
  return {
214
223
  tools: openaiCompatTools,
215
- tool_choice: {
224
+ toolChoice: {
216
225
  type: "function",
217
- function: {
218
- name: toolChoice.toolName
219
- }
226
+ function: { name: toolChoice.toolName }
220
227
  },
221
228
  toolWarnings
222
229
  };
223
230
  default: {
224
231
  const _exhaustiveCheck = type;
225
232
  throw new UnsupportedFunctionalityError2({
226
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
233
+ functionality: `tool choice type: ${_exhaustiveCheck}`
227
234
  });
228
235
  }
229
236
  }
@@ -232,11 +239,10 @@ function prepareTools({
232
239
  // src/openai-compatible-chat-language-model.ts
233
240
  var OpenAICompatibleChatLanguageModel = class {
234
241
  // type inferred via constructor
235
- constructor(modelId, settings, config) {
242
+ constructor(modelId, config) {
236
243
  this.specificationVersion = "v2";
237
244
  var _a, _b;
238
245
  this.modelId = modelId;
239
- this.settings = settings;
240
246
  this.config = config;
241
247
  const errorStructure = (_a = config.errorStructure) != null ? _a : defaultOpenAICompatibleErrorStructure;
242
248
  this.chunkSchema = createOpenAICompatibleChatChunkSchema(
@@ -245,37 +251,47 @@ var OpenAICompatibleChatLanguageModel = class {
245
251
  this.failedResponseHandler = createJsonErrorResponseHandler(errorStructure);
246
252
  this.supportsStructuredOutputs = (_b = config.supportsStructuredOutputs) != null ? _b : false;
247
253
  }
248
- get defaultObjectGenerationMode() {
249
- return this.config.defaultObjectGenerationMode;
250
- }
251
254
  get provider() {
252
255
  return this.config.provider;
253
256
  }
254
257
  get providerOptionsName() {
255
258
  return this.config.provider.split(".")[0].trim();
256
259
  }
260
+ async getSupportedUrls() {
261
+ var _a, _b, _c;
262
+ return (_c = (_b = (_a = this.config).getSupportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
263
+ }
257
264
  getArgs({
258
- mode,
259
265
  prompt,
260
- maxTokens,
266
+ maxOutputTokens,
261
267
  temperature,
262
268
  topP,
263
269
  topK,
264
270
  frequencyPenalty,
265
271
  presencePenalty,
266
- providerMetadata,
272
+ providerOptions,
267
273
  stopSequences,
268
274
  responseFormat,
269
- seed
275
+ seed,
276
+ toolChoice,
277
+ tools
270
278
  }) {
271
- var _a, _b;
272
- const type = mode.type;
279
+ var _a, _b, _c;
273
280
  const warnings = [];
281
+ const compatibleOptions = Object.assign(
282
+ (_a = parseProviderOptions({
283
+ provider: "openai-compatible",
284
+ providerOptions,
285
+ schema: openaiCompatibleProviderOptions
286
+ })) != null ? _a : {},
287
+ (_b = parseProviderOptions({
288
+ provider: this.providerOptionsName,
289
+ providerOptions,
290
+ schema: openaiCompatibleProviderOptions
291
+ })) != null ? _b : {}
292
+ );
274
293
  if (topK != null) {
275
- warnings.push({
276
- type: "unsupported-setting",
277
- setting: "topK"
278
- });
294
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
279
295
  }
280
296
  if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
281
297
  warnings.push({
@@ -284,88 +300,48 @@ var OpenAICompatibleChatLanguageModel = class {
284
300
  details: "JSON response format schema is only supported with structuredOutputs"
285
301
  });
286
302
  }
287
- const baseArgs = {
288
- // model id:
289
- model: this.modelId,
290
- // model specific settings:
291
- user: this.settings.user,
292
- // standardized settings:
293
- max_tokens: maxTokens,
294
- temperature,
295
- top_p: topP,
296
- frequency_penalty: frequencyPenalty,
297
- presence_penalty: presencePenalty,
298
- response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs === true && responseFormat.schema != null ? {
299
- type: "json_schema",
300
- json_schema: {
301
- schema: responseFormat.schema,
302
- name: (_a = responseFormat.name) != null ? _a : "response",
303
- description: responseFormat.description
304
- }
305
- } : { type: "json_object" } : void 0,
306
- stop: stopSequences,
307
- seed,
308
- ...providerMetadata == null ? void 0 : providerMetadata[this.providerOptionsName],
309
- // messages:
310
- messages: convertToOpenAICompatibleChatMessages(prompt)
303
+ const {
304
+ tools: openaiTools,
305
+ toolChoice: openaiToolChoice,
306
+ toolWarnings
307
+ } = prepareTools({
308
+ tools,
309
+ toolChoice
310
+ });
311
+ return {
312
+ args: {
313
+ // model id:
314
+ model: this.modelId,
315
+ // model specific settings:
316
+ user: compatibleOptions.user,
317
+ // standardized settings:
318
+ max_tokens: maxOutputTokens,
319
+ temperature,
320
+ top_p: topP,
321
+ frequency_penalty: frequencyPenalty,
322
+ presence_penalty: presencePenalty,
323
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs === true && responseFormat.schema != null ? {
324
+ type: "json_schema",
325
+ json_schema: {
326
+ schema: responseFormat.schema,
327
+ name: (_c = responseFormat.name) != null ? _c : "response",
328
+ description: responseFormat.description
329
+ }
330
+ } : { type: "json_object" } : void 0,
331
+ stop: stopSequences,
332
+ seed,
333
+ ...providerOptions == null ? void 0 : providerOptions[this.providerOptionsName],
334
+ // messages:
335
+ messages: convertToOpenAICompatibleChatMessages(prompt),
336
+ // tools:
337
+ tools: openaiTools,
338
+ tool_choice: openaiToolChoice
339
+ },
340
+ warnings: [...warnings, ...toolWarnings]
311
341
  };
312
- switch (type) {
313
- case "regular": {
314
- const { tools, tool_choice, toolWarnings } = prepareTools({
315
- mode,
316
- structuredOutputs: this.supportsStructuredOutputs
317
- });
318
- return {
319
- args: { ...baseArgs, tools, tool_choice },
320
- warnings: [...warnings, ...toolWarnings]
321
- };
322
- }
323
- case "object-json": {
324
- return {
325
- args: {
326
- ...baseArgs,
327
- response_format: this.supportsStructuredOutputs === true && mode.schema != null ? {
328
- type: "json_schema",
329
- json_schema: {
330
- schema: mode.schema,
331
- name: (_b = mode.name) != null ? _b : "response",
332
- description: mode.description
333
- }
334
- } : { type: "json_object" }
335
- },
336
- warnings
337
- };
338
- }
339
- case "object-tool": {
340
- return {
341
- args: {
342
- ...baseArgs,
343
- tool_choice: {
344
- type: "function",
345
- function: { name: mode.tool.name }
346
- },
347
- tools: [
348
- {
349
- type: "function",
350
- function: {
351
- name: mode.tool.name,
352
- description: mode.tool.description,
353
- parameters: mode.tool.parameters
354
- }
355
- }
356
- ]
357
- },
358
- warnings
359
- };
360
- }
361
- default: {
362
- const _exhaustiveCheck = type;
363
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
364
- }
365
- }
366
342
  }
367
343
  async doGenerate(options) {
368
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
344
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
369
345
  const { args, warnings } = this.getArgs({ ...options });
370
346
  const body = JSON.stringify(args);
371
347
  const {
@@ -386,16 +362,39 @@ var OpenAICompatibleChatLanguageModel = class {
386
362
  abortSignal: options.abortSignal,
387
363
  fetch: this.config.fetch
388
364
  });
389
- const { messages: rawPrompt, ...rawSettings } = args;
390
365
  const choice = responseBody.choices[0];
366
+ const content = [];
367
+ const text = choice.message.content;
368
+ if (text != null && text.length > 0) {
369
+ content.push({ type: "text", text });
370
+ }
371
+ const reasoning = choice.message.reasoning_content;
372
+ if (reasoning != null && reasoning.length > 0) {
373
+ content.push({
374
+ type: "reasoning",
375
+ reasoningType: "text",
376
+ text: reasoning
377
+ });
378
+ }
379
+ if (choice.message.tool_calls != null) {
380
+ for (const toolCall of choice.message.tool_calls) {
381
+ content.push({
382
+ type: "tool-call",
383
+ toolCallType: "function",
384
+ toolCallId: (_a = toolCall.id) != null ? _a : generateId(),
385
+ toolName: toolCall.function.name,
386
+ args: toolCall.function.arguments
387
+ });
388
+ }
389
+ }
391
390
  const providerMetadata = {
392
391
  [this.providerOptionsName]: {},
393
- ...(_b = (_a = this.config.metadataExtractor) == null ? void 0 : _a.extractMetadata) == null ? void 0 : _b.call(_a, {
392
+ ...(_c = (_b = this.config.metadataExtractor) == null ? void 0 : _b.extractMetadata) == null ? void 0 : _c.call(_b, {
394
393
  parsedBody: rawResponse
395
394
  })
396
395
  };
397
- const completionTokenDetails = (_c = responseBody.usage) == null ? void 0 : _c.completion_tokens_details;
398
- const promptTokenDetails = (_d = responseBody.usage) == null ? void 0 : _d.prompt_tokens_details;
396
+ const completionTokenDetails = (_d = responseBody.usage) == null ? void 0 : _d.completion_tokens_details;
397
+ const promptTokenDetails = (_e = responseBody.usage) == null ? void 0 : _e.prompt_tokens_details;
399
398
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
400
399
  providerMetadata[this.providerOptionsName].reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
401
400
  }
@@ -409,87 +408,31 @@ var OpenAICompatibleChatLanguageModel = class {
409
408
  providerMetadata[this.providerOptionsName].cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
410
409
  }
411
410
  return {
412
- text: (_e = choice.message.content) != null ? _e : void 0,
413
- reasoning: (_f = choice.message.reasoning_content) != null ? _f : void 0,
414
- toolCalls: (_g = choice.message.tool_calls) == null ? void 0 : _g.map((toolCall) => {
415
- var _a2;
416
- return {
417
- toolCallType: "function",
418
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : generateId(),
419
- toolName: toolCall.function.name,
420
- args: toolCall.function.arguments
421
- };
422
- }),
411
+ content,
423
412
  finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
424
413
  usage: {
425
- promptTokens: (_i = (_h = responseBody.usage) == null ? void 0 : _h.prompt_tokens) != null ? _i : NaN,
426
- completionTokens: (_k = (_j = responseBody.usage) == null ? void 0 : _j.completion_tokens) != null ? _k : NaN
414
+ inputTokens: (_g = (_f = responseBody.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
415
+ outputTokens: (_i = (_h = responseBody.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0
427
416
  },
428
417
  providerMetadata,
429
- rawCall: { rawPrompt, rawSettings },
430
- rawResponse: { headers: responseHeaders, body: rawResponse },
431
- response: getResponseMetadata(responseBody),
432
- warnings,
433
- request: { body }
418
+ request: { body },
419
+ response: {
420
+ ...getResponseMetadata(responseBody),
421
+ headers: responseHeaders,
422
+ body: rawResponse
423
+ },
424
+ warnings
434
425
  };
435
426
  }
436
427
  async doStream(options) {
437
428
  var _a;
438
- if (this.settings.simulateStreaming) {
439
- const result = await this.doGenerate(options);
440
- const simulatedStream = new ReadableStream({
441
- start(controller) {
442
- controller.enqueue({ type: "response-metadata", ...result.response });
443
- if (result.reasoning) {
444
- if (Array.isArray(result.reasoning)) {
445
- for (const part of result.reasoning) {
446
- if (part.type === "text") {
447
- controller.enqueue({
448
- type: "reasoning",
449
- textDelta: part.text
450
- });
451
- }
452
- }
453
- } else {
454
- controller.enqueue({
455
- type: "reasoning",
456
- textDelta: result.reasoning
457
- });
458
- }
459
- }
460
- if (result.text) {
461
- controller.enqueue({
462
- type: "text-delta",
463
- textDelta: result.text
464
- });
465
- }
466
- if (result.toolCalls) {
467
- for (const toolCall of result.toolCalls) {
468
- controller.enqueue({
469
- type: "tool-call",
470
- ...toolCall
471
- });
472
- }
473
- }
474
- controller.enqueue({
475
- type: "finish",
476
- finishReason: result.finishReason,
477
- usage: result.usage,
478
- logprobs: result.logprobs,
479
- providerMetadata: result.providerMetadata
480
- });
481
- controller.close();
482
- }
483
- });
484
- return {
485
- stream: simulatedStream,
486
- rawCall: result.rawCall,
487
- rawResponse: result.rawResponse,
488
- warnings: result.warnings
489
- };
490
- }
491
429
  const { args, warnings } = this.getArgs({ ...options });
492
- const body = JSON.stringify({ ...args, stream: true });
430
+ const body = {
431
+ ...args,
432
+ stream: true,
433
+ // only include stream_options when in strict compatibility mode:
434
+ stream_options: this.config.includeUsage ? { include_usage: true } : void 0
435
+ };
493
436
  const metadataExtractor = (_a = this.config.metadataExtractor) == null ? void 0 : _a.createStreamExtractor();
494
437
  const { responseHeaders, value: response } = await postJsonToApi({
495
438
  url: this.config.url({
@@ -497,10 +440,7 @@ var OpenAICompatibleChatLanguageModel = class {
497
440
  modelId: this.modelId
498
441
  }),
499
442
  headers: combineHeaders(this.config.headers(), options.headers),
500
- body: {
501
- ...args,
502
- stream: true
503
- },
443
+ body,
504
444
  failedResponseHandler: this.failedResponseHandler,
505
445
  successfulResponseHandler: createEventSourceResponseHandler(
506
446
  this.chunkSchema
@@ -508,7 +448,6 @@ var OpenAICompatibleChatLanguageModel = class {
508
448
  abortSignal: options.abortSignal,
509
449
  fetch: this.config.fetch
510
450
  });
511
- const { messages: rawPrompt, ...rawSettings } = args;
512
451
  const toolCalls = [];
513
452
  let finishReason = "unknown";
514
453
  let usage = {
@@ -528,6 +467,9 @@ var OpenAICompatibleChatLanguageModel = class {
528
467
  return {
529
468
  stream: response.pipeThrough(
530
469
  new TransformStream({
470
+ start(controller) {
471
+ controller.enqueue({ type: "stream-start", warnings });
472
+ },
531
473
  // TODO we lost type safety on Chunk, most likely due to the error schema. MUST FIX
532
474
  transform(chunk, controller) {
533
475
  var _a2, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
@@ -585,13 +527,14 @@ var OpenAICompatibleChatLanguageModel = class {
585
527
  if (delta.reasoning_content != null) {
586
528
  controller.enqueue({
587
529
  type: "reasoning",
588
- textDelta: delta.reasoning_content
530
+ reasoningType: "text",
531
+ text: delta.reasoning_content
589
532
  });
590
533
  }
591
534
  if (delta.content != null) {
592
535
  controller.enqueue({
593
- type: "text-delta",
594
- textDelta: delta.content
536
+ type: "text",
537
+ text: delta.content
595
538
  });
596
539
  }
597
540
  if (delta.tool_calls != null) {
@@ -698,83 +641,81 @@ var OpenAICompatibleChatLanguageModel = class {
698
641
  type: "finish",
699
642
  finishReason,
700
643
  usage: {
701
- promptTokens: (_a2 = usage.promptTokens) != null ? _a2 : NaN,
702
- completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
644
+ inputTokens: (_a2 = usage.promptTokens) != null ? _a2 : void 0,
645
+ outputTokens: (_b = usage.completionTokens) != null ? _b : void 0
703
646
  },
704
647
  providerMetadata
705
648
  });
706
649
  }
707
650
  })
708
651
  ),
709
- rawCall: { rawPrompt, rawSettings },
710
- rawResponse: { headers: responseHeaders },
711
- warnings,
712
- request: { body }
652
+ request: { body },
653
+ response: { headers: responseHeaders }
713
654
  };
714
655
  }
715
656
  };
716
- var openaiCompatibleTokenUsageSchema = z2.object({
717
- prompt_tokens: z2.number().nullish(),
718
- completion_tokens: z2.number().nullish(),
719
- prompt_tokens_details: z2.object({
720
- cached_tokens: z2.number().nullish()
657
+ var openaiCompatibleTokenUsageSchema = z3.object({
658
+ prompt_tokens: z3.number().nullish(),
659
+ completion_tokens: z3.number().nullish(),
660
+ prompt_tokens_details: z3.object({
661
+ cached_tokens: z3.number().nullish()
721
662
  }).nullish(),
722
- completion_tokens_details: z2.object({
723
- reasoning_tokens: z2.number().nullish(),
724
- accepted_prediction_tokens: z2.number().nullish(),
725
- rejected_prediction_tokens: z2.number().nullish()
663
+ completion_tokens_details: z3.object({
664
+ reasoning_tokens: z3.number().nullish(),
665
+ accepted_prediction_tokens: z3.number().nullish(),
666
+ rejected_prediction_tokens: z3.number().nullish()
726
667
  }).nullish()
727
668
  }).nullish();
728
- var OpenAICompatibleChatResponseSchema = z2.object({
729
- id: z2.string().nullish(),
730
- created: z2.number().nullish(),
731
- model: z2.string().nullish(),
732
- choices: z2.array(
733
- z2.object({
734
- message: z2.object({
735
- role: z2.literal("assistant").nullish(),
736
- content: z2.string().nullish(),
737
- reasoning_content: z2.string().nullish(),
738
- tool_calls: z2.array(
739
- z2.object({
740
- id: z2.string().nullish(),
741
- type: z2.literal("function"),
742
- function: z2.object({
743
- name: z2.string(),
744
- arguments: z2.string()
669
+ var OpenAICompatibleChatResponseSchema = z3.object({
670
+ id: z3.string().nullish(),
671
+ created: z3.number().nullish(),
672
+ model: z3.string().nullish(),
673
+ choices: z3.array(
674
+ z3.object({
675
+ message: z3.object({
676
+ role: z3.literal("assistant").nullish(),
677
+ content: z3.string().nullish(),
678
+ reasoning_content: z3.string().nullish(),
679
+ tool_calls: z3.array(
680
+ z3.object({
681
+ id: z3.string().nullish(),
682
+ type: z3.literal("function"),
683
+ function: z3.object({
684
+ name: z3.string(),
685
+ arguments: z3.string()
745
686
  })
746
687
  })
747
688
  ).nullish()
748
689
  }),
749
- finish_reason: z2.string().nullish()
690
+ finish_reason: z3.string().nullish()
750
691
  })
751
692
  ),
752
693
  usage: openaiCompatibleTokenUsageSchema
753
694
  });
754
- var createOpenAICompatibleChatChunkSchema = (errorSchema) => z2.union([
755
- z2.object({
756
- id: z2.string().nullish(),
757
- created: z2.number().nullish(),
758
- model: z2.string().nullish(),
759
- choices: z2.array(
760
- z2.object({
761
- delta: z2.object({
762
- role: z2.enum(["assistant"]).nullish(),
763
- content: z2.string().nullish(),
764
- reasoning_content: z2.string().nullish(),
765
- tool_calls: z2.array(
766
- z2.object({
767
- index: z2.number(),
768
- id: z2.string().nullish(),
769
- type: z2.literal("function").optional(),
770
- function: z2.object({
771
- name: z2.string().nullish(),
772
- arguments: z2.string().nullish()
695
+ var createOpenAICompatibleChatChunkSchema = (errorSchema) => z3.union([
696
+ z3.object({
697
+ id: z3.string().nullish(),
698
+ created: z3.number().nullish(),
699
+ model: z3.string().nullish(),
700
+ choices: z3.array(
701
+ z3.object({
702
+ delta: z3.object({
703
+ role: z3.enum(["assistant"]).nullish(),
704
+ content: z3.string().nullish(),
705
+ reasoning_content: z3.string().nullish(),
706
+ tool_calls: z3.array(
707
+ z3.object({
708
+ index: z3.number(),
709
+ id: z3.string().nullish(),
710
+ type: z3.literal("function").nullish(),
711
+ function: z3.object({
712
+ name: z3.string().nullish(),
713
+ arguments: z3.string().nullish()
773
714
  })
774
715
  })
775
716
  ).nullish()
776
717
  }).nullish(),
777
- finish_reason: z2.string().nullish()
718
+ finish_reason: z3.string().nullish()
778
719
  })
779
720
  ),
780
721
  usage: openaiCompatibleTokenUsageSchema
@@ -783,17 +724,15 @@ var createOpenAICompatibleChatChunkSchema = (errorSchema) => z2.union([
783
724
  ]);
784
725
 
785
726
  // src/openai-compatible-completion-language-model.ts
786
- import {
787
- UnsupportedFunctionalityError as UnsupportedFunctionalityError4
788
- } from "@ai-sdk/provider";
789
727
  import {
790
728
  combineHeaders as combineHeaders2,
791
729
  createEventSourceResponseHandler as createEventSourceResponseHandler2,
792
730
  createJsonErrorResponseHandler as createJsonErrorResponseHandler2,
793
731
  createJsonResponseHandler as createJsonResponseHandler2,
732
+ parseProviderOptions as parseProviderOptions2,
794
733
  postJsonToApi as postJsonToApi2
795
734
  } from "@ai-sdk/provider-utils";
796
- import { z as z3 } from "zod";
735
+ import { z as z5 } from "zod";
797
736
 
798
737
  // src/convert-to-openai-compatible-completion-prompt.ts
799
738
  import {
@@ -830,13 +769,8 @@ function convertToOpenAICompatibleCompletionPrompt({
830
769
  case "text": {
831
770
  return part.text;
832
771
  }
833
- case "image": {
834
- throw new UnsupportedFunctionalityError3({
835
- functionality: "images"
836
- });
837
- }
838
772
  }
839
- }).join("");
773
+ }).filter(Boolean).join("");
840
774
  text += `${user}:
841
775
  ${userMessage}
842
776
 
@@ -882,15 +816,38 @@ ${user}:`]
882
816
  };
883
817
  }
884
818
 
819
+ // src/openai-compatible-completion-options.ts
820
+ import { z as z4 } from "zod";
821
+ var openaiCompatibleCompletionProviderOptions = z4.object({
822
+ /**
823
+ * Echo back the prompt in addition to the completion.
824
+ */
825
+ echo: z4.boolean().optional(),
826
+ /**
827
+ * Modify the likelihood of specified tokens appearing in the completion.
828
+ *
829
+ * Accepts a JSON object that maps tokens (specified by their token ID in
830
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
831
+ */
832
+ logitBias: z4.record(z4.number(), z4.number()).optional(),
833
+ /**
834
+ * The suffix that comes after a completion of inserted text.
835
+ */
836
+ suffix: z4.string().optional(),
837
+ /**
838
+ * A unique identifier representing your end-user, which can help providers to
839
+ * monitor and detect abuse.
840
+ */
841
+ user: z4.string().optional()
842
+ });
843
+
885
844
  // src/openai-compatible-completion-language-model.ts
886
845
  var OpenAICompatibleCompletionLanguageModel = class {
887
846
  // type inferred via constructor
888
- constructor(modelId, settings, config) {
847
+ constructor(modelId, config) {
889
848
  this.specificationVersion = "v2";
890
- this.defaultObjectGenerationMode = void 0;
891
849
  var _a;
892
850
  this.modelId = modelId;
893
- this.settings = settings;
894
851
  this.config = config;
895
852
  const errorStructure = (_a = config.errorStructure) != null ? _a : defaultOpenAICompatibleErrorStructure;
896
853
  this.chunkSchema = createOpenAICompatibleCompletionChunkSchema(
@@ -904,11 +861,14 @@ var OpenAICompatibleCompletionLanguageModel = class {
904
861
  get providerOptionsName() {
905
862
  return this.config.provider.split(".")[0].trim();
906
863
  }
864
+ async getSupportedUrls() {
865
+ var _a, _b, _c;
866
+ return (_c = (_b = (_a = this.config).getSupportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
867
+ }
907
868
  getArgs({
908
- mode,
909
869
  inputFormat,
910
870
  prompt,
911
- maxTokens,
871
+ maxOutputTokens,
912
872
  temperature,
913
873
  topP,
914
874
  topK,
@@ -917,16 +877,25 @@ var OpenAICompatibleCompletionLanguageModel = class {
917
877
  stopSequences: userStopSequences,
918
878
  responseFormat,
919
879
  seed,
920
- providerMetadata
880
+ providerOptions,
881
+ tools,
882
+ toolChoice
921
883
  }) {
922
884
  var _a;
923
- const type = mode.type;
924
885
  const warnings = [];
886
+ const completionOptions = (_a = parseProviderOptions2({
887
+ provider: this.providerOptionsName,
888
+ providerOptions,
889
+ schema: openaiCompatibleCompletionProviderOptions
890
+ })) != null ? _a : {};
925
891
  if (topK != null) {
926
- warnings.push({
927
- type: "unsupported-setting",
928
- setting: "topK"
929
- });
892
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
893
+ }
894
+ if (tools == null ? void 0 : tools.length) {
895
+ warnings.push({ type: "unsupported-setting", setting: "tools" });
896
+ }
897
+ if (toolChoice != null) {
898
+ warnings.push({ type: "unsupported-setting", setting: "toolChoice" });
930
899
  }
931
900
  if (responseFormat != null && responseFormat.type !== "text") {
932
901
  warnings.push({
@@ -937,56 +906,30 @@ var OpenAICompatibleCompletionLanguageModel = class {
937
906
  }
938
907
  const { prompt: completionPrompt, stopSequences } = convertToOpenAICompatibleCompletionPrompt({ prompt, inputFormat });
939
908
  const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
940
- const baseArgs = {
941
- // model id:
942
- model: this.modelId,
943
- // model specific settings:
944
- echo: this.settings.echo,
945
- logit_bias: this.settings.logitBias,
946
- suffix: this.settings.suffix,
947
- user: this.settings.user,
948
- // standardized settings:
949
- max_tokens: maxTokens,
950
- temperature,
951
- top_p: topP,
952
- frequency_penalty: frequencyPenalty,
953
- presence_penalty: presencePenalty,
954
- seed,
955
- ...providerMetadata == null ? void 0 : providerMetadata[this.providerOptionsName],
956
- // prompt:
957
- prompt: completionPrompt,
958
- // stop sequences:
959
- stop: stop.length > 0 ? stop : void 0
909
+ return {
910
+ args: {
911
+ // model id:
912
+ model: this.modelId,
913
+ // model specific settings:
914
+ echo: completionOptions.echo,
915
+ logit_bias: completionOptions.logitBias,
916
+ suffix: completionOptions.suffix,
917
+ user: completionOptions.user,
918
+ // standardized settings:
919
+ max_tokens: maxOutputTokens,
920
+ temperature,
921
+ top_p: topP,
922
+ frequency_penalty: frequencyPenalty,
923
+ presence_penalty: presencePenalty,
924
+ seed,
925
+ ...providerOptions == null ? void 0 : providerOptions[this.providerOptionsName],
926
+ // prompt:
927
+ prompt: completionPrompt,
928
+ // stop sequences:
929
+ stop: stop.length > 0 ? stop : void 0
930
+ },
931
+ warnings
960
932
  };
961
- switch (type) {
962
- case "regular": {
963
- if ((_a = mode.tools) == null ? void 0 : _a.length) {
964
- throw new UnsupportedFunctionalityError4({
965
- functionality: "tools"
966
- });
967
- }
968
- if (mode.toolChoice) {
969
- throw new UnsupportedFunctionalityError4({
970
- functionality: "toolChoice"
971
- });
972
- }
973
- return { args: baseArgs, warnings };
974
- }
975
- case "object-json": {
976
- throw new UnsupportedFunctionalityError4({
977
- functionality: "object-json mode"
978
- });
979
- }
980
- case "object-tool": {
981
- throw new UnsupportedFunctionalityError4({
982
- functionality: "object-tool mode"
983
- });
984
- }
985
- default: {
986
- const _exhaustiveCheck = type;
987
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
988
- }
989
- }
990
933
  }
991
934
  async doGenerate(options) {
992
935
  var _a, _b, _c, _d;
@@ -1009,27 +952,34 @@ var OpenAICompatibleCompletionLanguageModel = class {
1009
952
  abortSignal: options.abortSignal,
1010
953
  fetch: this.config.fetch
1011
954
  });
1012
- const { prompt: rawPrompt, ...rawSettings } = args;
1013
955
  const choice = response.choices[0];
956
+ const content = [];
957
+ if (choice.text != null && choice.text.length > 0) {
958
+ content.push({ type: "text", text: choice.text });
959
+ }
1014
960
  return {
1015
- text: choice.text,
961
+ content,
1016
962
  usage: {
1017
- promptTokens: (_b = (_a = response.usage) == null ? void 0 : _a.prompt_tokens) != null ? _b : NaN,
1018
- completionTokens: (_d = (_c = response.usage) == null ? void 0 : _c.completion_tokens) != null ? _d : NaN
963
+ inputTokens: (_b = (_a = response.usage) == null ? void 0 : _a.prompt_tokens) != null ? _b : void 0,
964
+ outputTokens: (_d = (_c = response.usage) == null ? void 0 : _c.completion_tokens) != null ? _d : void 0
1019
965
  },
1020
966
  finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
1021
- rawCall: { rawPrompt, rawSettings },
1022
- rawResponse: { headers: responseHeaders, body: rawResponse },
1023
- response: getResponseMetadata(response),
1024
- warnings,
1025
- request: { body: JSON.stringify(args) }
967
+ request: { body: args },
968
+ response: {
969
+ ...getResponseMetadata(response),
970
+ headers: responseHeaders,
971
+ body: rawResponse
972
+ },
973
+ warnings
1026
974
  };
1027
975
  }
1028
976
  async doStream(options) {
1029
977
  const { args, warnings } = this.getArgs(options);
1030
978
  const body = {
1031
979
  ...args,
1032
- stream: true
980
+ stream: true,
981
+ // only include stream_options when in strict compatibility mode:
982
+ stream_options: this.config.includeUsage ? { include_usage: true } : void 0
1033
983
  };
1034
984
  const { responseHeaders, value: response } = await postJsonToApi2({
1035
985
  url: this.config.url({
@@ -1045,17 +995,20 @@ var OpenAICompatibleCompletionLanguageModel = class {
1045
995
  abortSignal: options.abortSignal,
1046
996
  fetch: this.config.fetch
1047
997
  });
1048
- const { prompt: rawPrompt, ...rawSettings } = args;
1049
998
  let finishReason = "unknown";
1050
- let usage = {
1051
- promptTokens: Number.NaN,
1052
- completionTokens: Number.NaN
999
+ const usage = {
1000
+ inputTokens: void 0,
1001
+ outputTokens: void 0
1053
1002
  };
1054
1003
  let isFirstChunk = true;
1055
1004
  return {
1056
1005
  stream: response.pipeThrough(
1057
1006
  new TransformStream({
1007
+ start(controller) {
1008
+ controller.enqueue({ type: "stream-start", warnings });
1009
+ },
1058
1010
  transform(chunk, controller) {
1011
+ var _a, _b;
1059
1012
  if (!chunk.success) {
1060
1013
  finishReason = "error";
1061
1014
  controller.enqueue({ type: "error", error: chunk.error });
@@ -1075,10 +1028,8 @@ var OpenAICompatibleCompletionLanguageModel = class {
1075
1028
  });
1076
1029
  }
1077
1030
  if (value.usage != null) {
1078
- usage = {
1079
- promptTokens: value.usage.prompt_tokens,
1080
- completionTokens: value.usage.completion_tokens
1081
- };
1031
+ usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
1032
+ usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
1082
1033
  }
1083
1034
  const choice = value.choices[0];
1084
1035
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1088,8 +1039,8 @@ var OpenAICompatibleCompletionLanguageModel = class {
1088
1039
  }
1089
1040
  if ((choice == null ? void 0 : choice.text) != null) {
1090
1041
  controller.enqueue({
1091
- type: "text-delta",
1092
- textDelta: choice.text
1042
+ type: "text",
1043
+ text: choice.text
1093
1044
  });
1094
1045
  }
1095
1046
  },
@@ -1102,43 +1053,41 @@ var OpenAICompatibleCompletionLanguageModel = class {
1102
1053
  }
1103
1054
  })
1104
1055
  ),
1105
- rawCall: { rawPrompt, rawSettings },
1106
- rawResponse: { headers: responseHeaders },
1107
- warnings,
1108
- request: { body: JSON.stringify(body) }
1056
+ request: { body },
1057
+ response: { headers: responseHeaders }
1109
1058
  };
1110
1059
  }
1111
1060
  };
1112
- var openaiCompatibleCompletionResponseSchema = z3.object({
1113
- id: z3.string().nullish(),
1114
- created: z3.number().nullish(),
1115
- model: z3.string().nullish(),
1116
- choices: z3.array(
1117
- z3.object({
1118
- text: z3.string(),
1119
- finish_reason: z3.string()
1061
+ var openaiCompatibleCompletionResponseSchema = z5.object({
1062
+ id: z5.string().nullish(),
1063
+ created: z5.number().nullish(),
1064
+ model: z5.string().nullish(),
1065
+ choices: z5.array(
1066
+ z5.object({
1067
+ text: z5.string(),
1068
+ finish_reason: z5.string()
1120
1069
  })
1121
1070
  ),
1122
- usage: z3.object({
1123
- prompt_tokens: z3.number(),
1124
- completion_tokens: z3.number()
1071
+ usage: z5.object({
1072
+ prompt_tokens: z5.number(),
1073
+ completion_tokens: z5.number()
1125
1074
  }).nullish()
1126
1075
  });
1127
- var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => z3.union([
1128
- z3.object({
1129
- id: z3.string().nullish(),
1130
- created: z3.number().nullish(),
1131
- model: z3.string().nullish(),
1132
- choices: z3.array(
1133
- z3.object({
1134
- text: z3.string(),
1135
- finish_reason: z3.string().nullish(),
1136
- index: z3.number()
1076
+ var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => z5.union([
1077
+ z5.object({
1078
+ id: z5.string().nullish(),
1079
+ created: z5.number().nullish(),
1080
+ model: z5.string().nullish(),
1081
+ choices: z5.array(
1082
+ z5.object({
1083
+ text: z5.string(),
1084
+ finish_reason: z5.string().nullish(),
1085
+ index: z5.number()
1137
1086
  })
1138
1087
  ),
1139
- usage: z3.object({
1140
- prompt_tokens: z3.number(),
1141
- completion_tokens: z3.number()
1088
+ usage: z5.object({
1089
+ prompt_tokens: z5.number(),
1090
+ completion_tokens: z5.number()
1142
1091
  }).nullish()
1143
1092
  }),
1144
1093
  errorSchema
@@ -1152,14 +1101,31 @@ import {
1152
1101
  combineHeaders as combineHeaders3,
1153
1102
  createJsonErrorResponseHandler as createJsonErrorResponseHandler3,
1154
1103
  createJsonResponseHandler as createJsonResponseHandler3,
1104
+ parseProviderOptions as parseProviderOptions3,
1155
1105
  postJsonToApi as postJsonToApi3
1156
1106
  } from "@ai-sdk/provider-utils";
1157
- import { z as z4 } from "zod";
1107
+ import { z as z7 } from "zod";
1108
+
1109
+ // src/openai-compatible-embedding-options.ts
1110
+ import { z as z6 } from "zod";
1111
+ var openaiCompatibleEmbeddingProviderOptions = z6.object({
1112
+ /**
1113
+ * The number of dimensions the resulting output embeddings should have.
1114
+ * Only supported in text-embedding-3 and later models.
1115
+ */
1116
+ dimensions: z6.number().optional(),
1117
+ /**
1118
+ * A unique identifier representing your end-user, which can help providers to
1119
+ * monitor and detect abuse.
1120
+ */
1121
+ user: z6.string().optional()
1122
+ });
1123
+
1124
+ // src/openai-compatible-embedding-model.ts
1158
1125
  var OpenAICompatibleEmbeddingModel = class {
1159
- constructor(modelId, settings, config) {
1160
- this.specificationVersion = "v1";
1126
+ constructor(modelId, config) {
1127
+ this.specificationVersion = "v2";
1161
1128
  this.modelId = modelId;
1162
- this.settings = settings;
1163
1129
  this.config = config;
1164
1130
  }
1165
1131
  get provider() {
@@ -1173,12 +1139,28 @@ var OpenAICompatibleEmbeddingModel = class {
1173
1139
  var _a;
1174
1140
  return (_a = this.config.supportsParallelCalls) != null ? _a : true;
1175
1141
  }
1142
+ get providerOptionsName() {
1143
+ return this.config.provider.split(".")[0].trim();
1144
+ }
1176
1145
  async doEmbed({
1177
1146
  values,
1178
1147
  headers,
1179
- abortSignal
1148
+ abortSignal,
1149
+ providerOptions
1180
1150
  }) {
1181
- var _a;
1151
+ var _a, _b, _c;
1152
+ const compatibleOptions = Object.assign(
1153
+ (_a = parseProviderOptions3({
1154
+ provider: "openai-compatible",
1155
+ providerOptions,
1156
+ schema: openaiCompatibleEmbeddingProviderOptions
1157
+ })) != null ? _a : {},
1158
+ (_b = parseProviderOptions3({
1159
+ provider: this.providerOptionsName,
1160
+ providerOptions,
1161
+ schema: openaiCompatibleEmbeddingProviderOptions
1162
+ })) != null ? _b : {}
1163
+ );
1182
1164
  if (values.length > this.maxEmbeddingsPerCall) {
1183
1165
  throw new TooManyEmbeddingValuesForCallError({
1184
1166
  provider: this.provider,
@@ -1187,7 +1169,11 @@ var OpenAICompatibleEmbeddingModel = class {
1187
1169
  values
1188
1170
  });
1189
1171
  }
1190
- const { responseHeaders, value: response } = await postJsonToApi3({
1172
+ const {
1173
+ responseHeaders,
1174
+ value: response,
1175
+ rawValue
1176
+ } = await postJsonToApi3({
1191
1177
  url: this.config.url({
1192
1178
  path: "/embeddings",
1193
1179
  modelId: this.modelId
@@ -1197,11 +1183,11 @@ var OpenAICompatibleEmbeddingModel = class {
1197
1183
  model: this.modelId,
1198
1184
  input: values,
1199
1185
  encoding_format: "float",
1200
- dimensions: this.settings.dimensions,
1201
- user: this.settings.user
1186
+ dimensions: compatibleOptions.dimensions,
1187
+ user: compatibleOptions.user
1202
1188
  },
1203
1189
  failedResponseHandler: createJsonErrorResponseHandler3(
1204
- (_a = this.config.errorStructure) != null ? _a : defaultOpenAICompatibleErrorStructure
1190
+ (_c = this.config.errorStructure) != null ? _c : defaultOpenAICompatibleErrorStructure
1205
1191
  ),
1206
1192
  successfulResponseHandler: createJsonResponseHandler3(
1207
1193
  openaiTextEmbeddingResponseSchema
@@ -1212,13 +1198,13 @@ var OpenAICompatibleEmbeddingModel = class {
1212
1198
  return {
1213
1199
  embeddings: response.data.map((item) => item.embedding),
1214
1200
  usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1215
- rawResponse: { headers: responseHeaders }
1201
+ response: { headers: responseHeaders, body: rawValue }
1216
1202
  };
1217
1203
  }
1218
1204
  };
1219
- var openaiTextEmbeddingResponseSchema = z4.object({
1220
- data: z4.array(z4.object({ embedding: z4.array(z4.number()) })),
1221
- usage: z4.object({ prompt_tokens: z4.number() }).nullish()
1205
+ var openaiTextEmbeddingResponseSchema = z7.object({
1206
+ data: z7.array(z7.object({ embedding: z7.array(z7.number()) })),
1207
+ usage: z7.object({ prompt_tokens: z7.number() }).nullish()
1222
1208
  });
1223
1209
 
1224
1210
  // src/openai-compatible-image-model.ts
@@ -1228,7 +1214,7 @@ import {
1228
1214
  createJsonResponseHandler as createJsonResponseHandler4,
1229
1215
  postJsonToApi as postJsonToApi4
1230
1216
  } from "@ai-sdk/provider-utils";
1231
- import { z as z5 } from "zod";
1217
+ import { z as z8 } from "zod";
1232
1218
  var OpenAICompatibleImageModel = class {
1233
1219
  constructor(modelId, settings, config) {
1234
1220
  this.modelId = modelId;
@@ -1301,8 +1287,8 @@ var OpenAICompatibleImageModel = class {
1301
1287
  };
1302
1288
  }
1303
1289
  };
1304
- var openaiCompatibleImageResponseSchema = z5.object({
1305
- data: z5.array(z5.object({ b64_json: z5.string() }))
1290
+ var openaiCompatibleImageResponseSchema = z8.object({
1291
+ data: z8.array(z8.object({ b64_json: z8.string() }))
1306
1292
  });
1307
1293
 
1308
1294
  // src/openai-compatible-provider.ts
@@ -1326,27 +1312,24 @@ function createOpenAICompatible(options) {
1326
1312
  headers: getHeaders,
1327
1313
  fetch: options.fetch
1328
1314
  });
1329
- const createLanguageModel = (modelId, settings = {}) => createChatModel(modelId, settings);
1330
- const createChatModel = (modelId, settings = {}) => new OpenAICompatibleChatLanguageModel(modelId, settings, {
1331
- ...getCommonModelConfig("chat"),
1332
- defaultObjectGenerationMode: "tool"
1333
- });
1334
- const createCompletionModel = (modelId, settings = {}) => new OpenAICompatibleCompletionLanguageModel(
1315
+ const createLanguageModel = (modelId) => createChatModel(modelId);
1316
+ const createChatModel = (modelId) => new OpenAICompatibleChatLanguageModel(
1335
1317
  modelId,
1336
- settings,
1337
- getCommonModelConfig("completion")
1318
+ getCommonModelConfig("chat")
1338
1319
  );
1339
- const createEmbeddingModel = (modelId, settings = {}) => new OpenAICompatibleEmbeddingModel(
1320
+ const createCompletionModel = (modelId) => new OpenAICompatibleCompletionLanguageModel(
1340
1321
  modelId,
1341
- settings,
1342
- getCommonModelConfig("embedding")
1322
+ getCommonModelConfig("completion")
1343
1323
  );
1324
+ const createEmbeddingModel = (modelId) => new OpenAICompatibleEmbeddingModel(modelId, {
1325
+ ...getCommonModelConfig("embedding")
1326
+ });
1344
1327
  const createImageModel = (modelId, settings = {}) => new OpenAICompatibleImageModel(
1345
1328
  modelId,
1346
1329
  settings,
1347
1330
  getCommonModelConfig("image")
1348
1331
  );
1349
- const provider = (modelId, settings) => createLanguageModel(modelId, settings);
1332
+ const provider = (modelId) => createLanguageModel(modelId);
1350
1333
  provider.languageModel = createLanguageModel;
1351
1334
  provider.chatModel = createChatModel;
1352
1335
  provider.completionModel = createCompletionModel;