@ai-sdk/google 4.0.0-beta.32 → 4.0.0-beta.34
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +13 -0
- package/dist/index.js +10 -6
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +10 -6
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.js +9 -5
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +9 -5
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +4 -4
- package/src/google-generative-ai-language-model.ts +27 -20
- package/src/google-generative-ai-options.ts +3 -2
package/dist/internal/index.mjs
CHANGED
|
@@ -674,9 +674,10 @@ var googleLanguageModelOptions = lazySchema2(
|
|
|
674
674
|
/**
|
|
675
675
|
* Optional. When set to true, function call arguments will be streamed
|
|
676
676
|
* incrementally via partialArgs in streaming responses. Only supported
|
|
677
|
-
* on the Vertex AI API (not the Gemini API)
|
|
677
|
+
* on the Vertex AI API (not the Gemini API) and only for Gemini 3+
|
|
678
|
+
* models.
|
|
678
679
|
*
|
|
679
|
-
* @default
|
|
680
|
+
* @default false
|
|
680
681
|
*
|
|
681
682
|
* https://docs.cloud.google.com/vertex-ai/generative-ai/docs/multimodal/function-calling#streaming-fc
|
|
682
683
|
*/
|
|
@@ -1221,7 +1222,7 @@ var GoogleGenerativeAILanguageModel = class {
|
|
|
1221
1222
|
toolChoice,
|
|
1222
1223
|
reasoning,
|
|
1223
1224
|
providerOptions
|
|
1224
|
-
}) {
|
|
1225
|
+
}, { isStreaming = false } = {}) {
|
|
1225
1226
|
var _a, _b;
|
|
1226
1227
|
const warnings = [];
|
|
1227
1228
|
const providerOptionsName = this.config.provider.includes("vertex") ? "vertex" : "google";
|
|
@@ -1277,7 +1278,7 @@ var GoogleGenerativeAILanguageModel = class {
|
|
|
1277
1278
|
warnings
|
|
1278
1279
|
});
|
|
1279
1280
|
const thinkingConfig = (googleOptions == null ? void 0 : googleOptions.thinkingConfig) || resolvedThinking ? { ...resolvedThinking, ...googleOptions == null ? void 0 : googleOptions.thinkingConfig } : void 0;
|
|
1280
|
-
const streamFunctionCallArguments = isVertexProvider ? (_a = googleOptions == null ? void 0 : googleOptions.streamFunctionCallArguments) != null ? _a :
|
|
1281
|
+
const streamFunctionCallArguments = isStreaming && isVertexProvider ? (_a = googleOptions == null ? void 0 : googleOptions.streamFunctionCallArguments) != null ? _a : false : void 0;
|
|
1281
1282
|
const toolConfig = googleToolConfig || streamFunctionCallArguments || (googleOptions == null ? void 0 : googleOptions.retrievalConfig) ? {
|
|
1282
1283
|
...googleToolConfig,
|
|
1283
1284
|
...streamFunctionCallArguments && {
|
|
@@ -1515,7 +1516,10 @@ var GoogleGenerativeAILanguageModel = class {
|
|
|
1515
1516
|
};
|
|
1516
1517
|
}
|
|
1517
1518
|
async doStream(options) {
|
|
1518
|
-
const { args, warnings, providerOptionsName } = await this.getArgs(
|
|
1519
|
+
const { args, warnings, providerOptionsName } = await this.getArgs(
|
|
1520
|
+
options,
|
|
1521
|
+
{ isStreaming: true }
|
|
1522
|
+
);
|
|
1519
1523
|
const headers = combineHeaders(
|
|
1520
1524
|
await resolve(this.config.headers),
|
|
1521
1525
|
options.headers
|