@ai-sdk/google 4.0.0-beta.12 → 4.0.0-beta.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,22 @@
1
1
  # @ai-sdk/google
2
2
 
3
+ ## 4.0.0-beta.14
4
+
5
+ ### Patch Changes
6
+
7
+ - 3887c70: feat(provider): add new top-level reasoning parameter to spec and support it in `generateText` and `streamText`
8
+ - Updated dependencies [3887c70]
9
+ - @ai-sdk/provider-utils@5.0.0-beta.6
10
+ - @ai-sdk/provider@4.0.0-beta.4
11
+
12
+ ## 4.0.0-beta.13
13
+
14
+ ### Patch Changes
15
+
16
+ - Updated dependencies [776b617]
17
+ - @ai-sdk/provider-utils@5.0.0-beta.5
18
+ - @ai-sdk/provider@4.0.0-beta.3
19
+
3
20
  ## 4.0.0-beta.12
4
21
 
5
22
  ### Patch Changes
@@ -405,13 +422,13 @@
405
422
  Before
406
423
 
407
424
  ```ts
408
- model.textEmbeddingModel('my-model-id');
425
+ model.textEmbeddingModel("my-model-id");
409
426
  ```
410
427
 
411
428
  After
412
429
 
413
430
  ```ts
414
- model.embeddingModel('my-model-id');
431
+ model.embeddingModel("my-model-id");
415
432
  ```
416
433
 
417
434
  - 2625a04: feat(openai); update spec for mcp approval
@@ -720,13 +737,13 @@
720
737
  Before
721
738
 
722
739
  ```ts
723
- model.textEmbeddingModel('my-model-id');
740
+ model.textEmbeddingModel("my-model-id");
724
741
  ```
725
742
 
726
743
  After
727
744
 
728
745
  ```ts
729
- model.embeddingModel('my-model-id');
746
+ model.embeddingModel("my-model-id");
730
747
  ```
731
748
 
732
749
  - Updated dependencies [8d9e8ad]
package/dist/index.js CHANGED
@@ -30,7 +30,7 @@ module.exports = __toCommonJS(index_exports);
30
30
  var import_provider_utils16 = require("@ai-sdk/provider-utils");
31
31
 
32
32
  // src/version.ts
33
- var VERSION = true ? "4.0.0-beta.12" : "0.0.0-test";
33
+ var VERSION = true ? "4.0.0-beta.14" : "0.0.0-test";
34
34
 
35
35
  // src/google-generative-ai-embedding-model.ts
36
36
  var import_provider = require("@ai-sdk/provider");
@@ -977,6 +977,7 @@ var GoogleGenerativeAILanguageModel = class {
977
977
  seed,
978
978
  tools,
979
979
  toolChoice,
980
+ reasoning,
980
981
  providerOptions
981
982
  }) {
982
983
  var _a;
@@ -1016,6 +1017,12 @@ var GoogleGenerativeAILanguageModel = class {
1016
1017
  toolChoice,
1017
1018
  modelId: this.modelId
1018
1019
  });
1020
+ const resolvedThinking = resolveThinkingConfig({
1021
+ reasoning,
1022
+ modelId: this.modelId,
1023
+ warnings
1024
+ });
1025
+ const thinkingConfig = (googleOptions == null ? void 0 : googleOptions.thinkingConfig) || resolvedThinking ? { ...resolvedThinking, ...googleOptions == null ? void 0 : googleOptions.thinkingConfig } : void 0;
1019
1026
  return {
1020
1027
  args: {
1021
1028
  generationConfig: {
@@ -1039,7 +1046,7 @@ var GoogleGenerativeAILanguageModel = class {
1039
1046
  },
1040
1047
  // provider options:
1041
1048
  responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
1042
- thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig,
1049
+ thinkingConfig,
1043
1050
  ...(googleOptions == null ? void 0 : googleOptions.mediaResolution) && {
1044
1051
  mediaResolution: googleOptions.mediaResolution
1045
1052
  },
@@ -1470,6 +1477,75 @@ var GoogleGenerativeAILanguageModel = class {
1470
1477
  };
1471
1478
  }
1472
1479
  };
1480
+ function isGemini3Model(modelId) {
1481
+ return /gemini-3[\.\-]/i.test(modelId) || /gemini-3$/i.test(modelId);
1482
+ }
1483
+ function getMaxOutputTokensForGemini25Model() {
1484
+ return 65536;
1485
+ }
1486
+ function getMaxThinkingTokensForGemini25Model(modelId) {
1487
+ const id = modelId.toLowerCase();
1488
+ if (id.includes("2.5-pro") || id.includes("gemini-3-pro-image")) {
1489
+ return 32768;
1490
+ }
1491
+ return 24576;
1492
+ }
1493
+ function resolveThinkingConfig({
1494
+ reasoning,
1495
+ modelId,
1496
+ warnings
1497
+ }) {
1498
+ if (!(0, import_provider_utils6.isCustomReasoning)(reasoning)) {
1499
+ return void 0;
1500
+ }
1501
+ if (isGemini3Model(modelId) && !modelId.includes("gemini-3-pro-image")) {
1502
+ return resolveGemini3ThinkingConfig({ reasoning, warnings });
1503
+ }
1504
+ return resolveGemini25ThinkingConfig({ reasoning, modelId, warnings });
1505
+ }
1506
+ function resolveGemini3ThinkingConfig({
1507
+ reasoning,
1508
+ warnings
1509
+ }) {
1510
+ if (reasoning === "none") {
1511
+ return { thinkingLevel: "minimal" };
1512
+ }
1513
+ const thinkingLevel = (0, import_provider_utils6.mapReasoningToProviderEffort)({
1514
+ reasoning,
1515
+ effortMap: {
1516
+ minimal: "minimal",
1517
+ low: "low",
1518
+ medium: "medium",
1519
+ high: "high",
1520
+ xhigh: "high"
1521
+ },
1522
+ warnings
1523
+ });
1524
+ if (thinkingLevel == null) {
1525
+ return void 0;
1526
+ }
1527
+ return { thinkingLevel };
1528
+ }
1529
+ function resolveGemini25ThinkingConfig({
1530
+ reasoning,
1531
+ modelId,
1532
+ warnings
1533
+ }) {
1534
+ if (reasoning === "none") {
1535
+ return { thinkingBudget: 0 };
1536
+ }
1537
+ const thinkingBudget = (0, import_provider_utils6.mapReasoningToProviderBudget)({
1538
+ reasoning,
1539
+ maxOutputTokens: getMaxOutputTokensForGemini25Model(),
1540
+ maxReasoningBudget: getMaxThinkingTokensForGemini25Model(modelId),
1541
+ minReasoningBudget: 0,
1542
+ warnings
1543
+ });
1544
+ if (thinkingBudget == null) {
1545
+ return void 0;
1546
+ }
1547
+ return { thinkingBudget };
1548
+ }
1473
1549
  function getToolCallsFromParts({
1474
1550
  parts,
1475
1551
  generateId: generateId3,