@ai-sdk/google 4.0.0-beta.33 → 4.0.0-beta.35

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ai-sdk/google",
3
- "version": "4.0.0-beta.33",
3
+ "version": "4.0.0-beta.35",
4
4
  "license": "Apache-2.0",
5
5
  "sideEffects": false,
6
6
  "main": "./dist/index.js",
@@ -37,7 +37,7 @@
37
37
  },
38
38
  "dependencies": {
39
39
  "@ai-sdk/provider": "4.0.0-beta.10",
40
- "@ai-sdk/provider-utils": "5.0.0-beta.17"
40
+ "@ai-sdk/provider-utils": "5.0.0-beta.18"
41
41
  },
42
42
  "devDependencies": {
43
43
  "@types/node": "20.17.24",
@@ -7,13 +7,11 @@ import {
7
7
  } from '@ai-sdk/provider';
8
8
  import {
9
9
  combineHeaders,
10
- convertUint8ArrayToBase64,
11
10
  createJsonResponseHandler,
12
11
  delay,
13
12
  type FetchFunction,
14
13
  lazySchema,
15
14
  parseProviderOptions,
16
- postJsonToApi,
17
15
  zodSchema,
18
16
  getFromApi,
19
17
  } from '@ai-sdk/provider-utils';
@@ -42,10 +42,7 @@ import {
42
42
  GoogleGenerativeAIModelId,
43
43
  googleLanguageModelOptions,
44
44
  } from './google-generative-ai-options';
45
- import {
46
- GoogleGenerativeAIContentPart,
47
- GoogleGenerativeAIProviderMetadata,
48
- } from './google-generative-ai-prompt';
45
+ import { GoogleGenerativeAIProviderMetadata } from './google-generative-ai-prompt';
49
46
  import { prepareTools } from './google-prepare-tools';
50
47
  import { GoogleJSONAccumulator, PartialArg } from './google-json-accumulator';
51
48
  import { mapGoogleGenerativeAIFinishReason } from './map-google-generative-ai-finish-reason';
@@ -88,22 +85,25 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV4 {
88
85
  return this.config.supportedUrls?.() ?? {};
89
86
  }
90
87
 
91
- private async getArgs({
92
- prompt,
93
- maxOutputTokens,
94
- temperature,
95
- topP,
96
- topK,
97
- frequencyPenalty,
98
- presencePenalty,
99
- stopSequences,
100
- responseFormat,
101
- seed,
102
- tools,
103
- toolChoice,
104
- reasoning,
105
- providerOptions,
106
- }: LanguageModelV4CallOptions) {
88
+ private async getArgs(
89
+ {
90
+ prompt,
91
+ maxOutputTokens,
92
+ temperature,
93
+ topP,
94
+ topK,
95
+ frequencyPenalty,
96
+ presencePenalty,
97
+ stopSequences,
98
+ responseFormat,
99
+ seed,
100
+ tools,
101
+ toolChoice,
102
+ reasoning,
103
+ providerOptions,
104
+ }: LanguageModelV4CallOptions,
105
+ { isStreaming = false }: { isStreaming?: boolean } = {},
106
+ ) {
107
107
  const warnings: SharedV4Warning[] = [];
108
108
 
109
109
  const providerOptionsName = this.config.provider.includes('vertex')
@@ -184,9 +184,10 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV4 {
184
184
  ? { ...resolvedThinking, ...googleOptions?.thinkingConfig }
185
185
  : undefined;
186
186
 
187
- const streamFunctionCallArguments = isVertexProvider
188
- ? (googleOptions?.streamFunctionCallArguments ?? true)
189
- : undefined;
187
+ const streamFunctionCallArguments =
188
+ isStreaming && isVertexProvider
189
+ ? (googleOptions?.streamFunctionCallArguments ?? false)
190
+ : undefined;
190
191
 
191
192
  const toolConfig =
192
193
  googleToolConfig ||
@@ -478,7 +479,10 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV4 {
478
479
  async doStream(
479
480
  options: LanguageModelV4CallOptions,
480
481
  ): Promise<LanguageModelV4StreamResult> {
481
- const { args, warnings, providerOptionsName } = await this.getArgs(options);
482
+ const { args, warnings, providerOptionsName } = await this.getArgs(
483
+ options,
484
+ { isStreaming: true },
485
+ );
482
486
 
483
487
  const headers = combineHeaders(
484
488
  await resolve(this.config.headers),
@@ -1408,17 +1412,10 @@ const responseSchema = lazySchema(() =>
1408
1412
  ),
1409
1413
  );
1410
1414
 
1411
- type ContentSchema = NonNullable<
1412
- InferSchema<typeof responseSchema>['candidates'][number]['content']
1413
- >;
1414
1415
  export type GroundingMetadataSchema = NonNullable<
1415
1416
  InferSchema<typeof responseSchema>['candidates'][number]['groundingMetadata']
1416
1417
  >;
1417
1418
 
1418
- type GroundingChunkSchema = NonNullable<
1419
- GroundingMetadataSchema['groundingChunks']
1420
- >[number];
1421
-
1422
1419
  export type UrlContextMetadataSchema = NonNullable<
1423
1420
  InferSchema<typeof responseSchema>['candidates'][number]['urlContextMetadata']
1424
1421
  >;
@@ -192,9 +192,10 @@ export const googleLanguageModelOptions = lazySchema(() =>
192
192
  /**
193
193
  * Optional. When set to true, function call arguments will be streamed
194
194
  * incrementally via partialArgs in streaming responses. Only supported
195
- * on the Vertex AI API (not the Gemini API).
195
+ * on the Vertex AI API (not the Gemini API) and only for Gemini 3+
196
+ * models.
196
197
  *
197
- * @default true
198
+ * @default false
198
199
  *
199
200
  * https://docs.cloud.google.com/vertex-ai/generative-ai/docs/multimodal/function-calling#streaming-fc
200
201
  */