@ai-sdk/openai-compatible 3.0.0-beta.7 → 3.0.0-beta.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,19 @@
1
1
  # @ai-sdk/openai-compatible
2
2
 
3
+ ## 3.0.0-beta.9
4
+
5
+ ### Patch Changes
6
+
7
+ - 74d520f: feat: migrate providers to support new top-level `reasoning` parameter
8
+
9
+ ## 3.0.0-beta.8
10
+
11
+ ### Patch Changes
12
+
13
+ - Updated dependencies [3887c70]
14
+ - @ai-sdk/provider-utils@5.0.0-beta.6
15
+ - @ai-sdk/provider@4.0.0-beta.4
16
+
3
17
  ## 3.0.0-beta.7
4
18
 
5
19
  ### Patch Changes
@@ -324,13 +338,13 @@
324
338
  Before
325
339
 
326
340
  ```ts
327
- model.textEmbeddingModel('my-model-id');
341
+ model.textEmbeddingModel("my-model-id");
328
342
  ```
329
343
 
330
344
  After
331
345
 
332
346
  ```ts
333
- model.embeddingModel('my-model-id');
347
+ model.embeddingModel("my-model-id");
334
348
  ```
335
349
 
336
350
  - 2625a04: feat(openai); update spec for mcp approval
@@ -545,13 +559,13 @@
545
559
  Before
546
560
 
547
561
  ```ts
548
- model.textEmbeddingModel('my-model-id');
562
+ model.textEmbeddingModel("my-model-id");
549
563
  ```
550
564
 
551
565
  After
552
566
 
553
567
  ```ts
554
- model.embeddingModel('my-model-id');
568
+ model.embeddingModel("my-model-id");
555
569
  ```
556
570
 
557
571
  - Updated dependencies [8d9e8ad]
@@ -987,7 +1001,7 @@
987
1001
 
988
1002
  ```js
989
1003
  await generateImage({
990
- model: luma.image('photon-flash-1', {
1004
+ model: luma.image("photon-flash-1", {
991
1005
  maxImagesPerCall: 5,
992
1006
  pollIntervalMillis: 500,
993
1007
  }),
@@ -1000,7 +1014,7 @@
1000
1014
 
1001
1015
  ```js
1002
1016
  await generateImage({
1003
- model: luma.image('photon-flash-1'),
1017
+ model: luma.image("photon-flash-1"),
1004
1018
  prompt,
1005
1019
  n: 10,
1006
1020
  maxImagesPerCall: 5,
@@ -1269,7 +1283,7 @@
1269
1283
 
1270
1284
  ```js
1271
1285
  await generateImage({
1272
- model: luma.image('photon-flash-1', {
1286
+ model: luma.image("photon-flash-1", {
1273
1287
  maxImagesPerCall: 5,
1274
1288
  pollIntervalMillis: 500,
1275
1289
  }),
@@ -1282,7 +1296,7 @@
1282
1296
 
1283
1297
  ```js
1284
1298
  await generateImage({
1285
- model: luma.image('photon-flash-1'),
1299
+ model: luma.image("photon-flash-1"),
1286
1300
  prompt,
1287
1301
  n: 10,
1288
1302
  maxImagesPerCall: 5,
package/dist/index.js CHANGED
@@ -441,6 +441,7 @@ var OpenAICompatibleChatLanguageModel = class {
441
441
  topK,
442
442
  frequencyPenalty,
443
443
  presencePenalty,
444
+ reasoning,
444
445
  providerOptions,
445
446
  stopSequences,
446
447
  responseFormat,
@@ -448,7 +449,7 @@ var OpenAICompatibleChatLanguageModel = class {
448
449
  toolChoice,
449
450
  tools
450
451
  }) {
451
- var _a, _b, _c, _d, _e;
452
+ var _a, _b, _c, _d, _e, _f;
452
453
  const warnings = [];
453
454
  const deprecatedOptions = await (0, import_provider_utils2.parseProviderOptions)({
454
455
  provider: "openai-compatible",
@@ -525,7 +526,7 @@ var OpenAICompatibleChatLanguageModel = class {
525
526
  ).includes(key)
526
527
  )
527
528
  ),
528
- reasoning_effort: compatibleOptions.reasoningEffort,
529
+ reasoning_effort: (_f = compatibleOptions.reasoningEffort) != null ? _f : (0, import_provider_utils2.isCustomReasoning)(reasoning) && reasoning !== "none" ? reasoning : void 0,
529
530
  verbosity: compatibleOptions.textVerbosity,
530
531
  // messages:
531
532
  messages: convertToOpenAICompatibleChatMessages(prompt),
@@ -1665,7 +1666,7 @@ function toCamelCase(str) {
1665
1666
  var import_provider_utils6 = require("@ai-sdk/provider-utils");
1666
1667
 
1667
1668
  // src/version.ts
1668
- var VERSION = true ? "3.0.0-beta.7" : "0.0.0-test";
1669
+ var VERSION = true ? "3.0.0-beta.9" : "0.0.0-test";
1669
1670
 
1670
1671
  // src/openai-compatible-provider.ts
1671
1672
  function createOpenAICompatible(options) {