@ai-sdk/google-vertex 5.0.0-beta.40 → 5.0.0-beta.41
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +7 -0
- package/dist/anthropic/edge/index.js +1 -1
- package/dist/anthropic/edge/index.mjs +1 -1
- package/dist/edge/index.js +1 -1
- package/dist/edge/index.mjs +1 -1
- package/dist/index.js +1 -1
- package/dist/index.mjs +1 -1
- package/dist/maas/edge/index.js +1 -1
- package/dist/maas/edge/index.mjs +1 -1
- package/docs/16-google-vertex.mdx +63 -0
- package/package.json +2 -2
package/CHANGELOG.md
CHANGED
|
@@ -32,7 +32,7 @@ var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
|
|
32
32
|
var import_provider_utils = require("@ai-sdk/provider-utils");
|
|
33
33
|
|
|
34
34
|
// src/version.ts
|
|
35
|
-
var VERSION = true ? "5.0.0-beta.
|
|
35
|
+
var VERSION = true ? "5.0.0-beta.41" : "0.0.0-test";
|
|
36
36
|
|
|
37
37
|
// src/edge/google-vertex-auth-edge.ts
|
|
38
38
|
var loadCredentials = async () => {
|
|
@@ -10,7 +10,7 @@ import {
|
|
|
10
10
|
} from "@ai-sdk/provider-utils";
|
|
11
11
|
|
|
12
12
|
// src/version.ts
|
|
13
|
-
var VERSION = true ? "5.0.0-beta.
|
|
13
|
+
var VERSION = true ? "5.0.0-beta.41" : "0.0.0-test";
|
|
14
14
|
|
|
15
15
|
// src/edge/google-vertex-auth-edge.ts
|
|
16
16
|
var loadCredentials = async () => {
|
package/dist/edge/index.js
CHANGED
|
@@ -33,7 +33,7 @@ var import_internal3 = require("@ai-sdk/google/internal");
|
|
|
33
33
|
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
|
34
34
|
|
|
35
35
|
// src/version.ts
|
|
36
|
-
var VERSION = true ? "5.0.0-beta.
|
|
36
|
+
var VERSION = true ? "5.0.0-beta.41" : "0.0.0-test";
|
|
37
37
|
|
|
38
38
|
// src/google-vertex-embedding-model.ts
|
|
39
39
|
var import_provider = require("@ai-sdk/provider");
|
package/dist/edge/index.mjs
CHANGED
package/dist/index.js
CHANGED
|
@@ -55,7 +55,7 @@ var import_internal3 = require("@ai-sdk/google/internal");
|
|
|
55
55
|
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
|
56
56
|
|
|
57
57
|
// src/version.ts
|
|
58
|
-
var VERSION = true ? "5.0.0-beta.
|
|
58
|
+
var VERSION = true ? "5.0.0-beta.41" : "0.0.0-test";
|
|
59
59
|
|
|
60
60
|
// src/google-vertex-embedding-model.ts
|
|
61
61
|
var import_provider = require("@ai-sdk/provider");
|
package/dist/index.mjs
CHANGED
package/dist/maas/edge/index.js
CHANGED
|
@@ -32,7 +32,7 @@ var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
|
|
32
32
|
var import_provider_utils = require("@ai-sdk/provider-utils");
|
|
33
33
|
|
|
34
34
|
// src/version.ts
|
|
35
|
-
var VERSION = true ? "5.0.0-beta.
|
|
35
|
+
var VERSION = true ? "5.0.0-beta.41" : "0.0.0-test";
|
|
36
36
|
|
|
37
37
|
// src/edge/google-vertex-auth-edge.ts
|
|
38
38
|
var loadCredentials = async () => {
|
package/dist/maas/edge/index.mjs
CHANGED
|
@@ -10,7 +10,7 @@ import {
|
|
|
10
10
|
} from "@ai-sdk/provider-utils";
|
|
11
11
|
|
|
12
12
|
// src/version.ts
|
|
13
|
-
var VERSION = true ? "5.0.0-beta.
|
|
13
|
+
var VERSION = true ? "5.0.0-beta.41" : "0.0.0-test";
|
|
14
14
|
|
|
15
15
|
// src/edge/google-vertex-auth-edge.ts
|
|
16
16
|
var loadCredentials = async () => {
|
|
@@ -345,6 +345,15 @@ The following optional provider options are available for Google Vertex models:
|
|
|
345
345
|
|
|
346
346
|
Consult [Google's Documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/add-labels-to-api-calls) for usage details.
|
|
347
347
|
|
|
348
|
+
- **streamFunctionCallArguments** _boolean_
|
|
349
|
+
|
|
350
|
+
Optional. When set to true, function call arguments will be streamed
|
|
351
|
+
incrementally in streaming responses. This enables `tool-input-delta` events
|
|
352
|
+
to arrive as the model generates function call arguments, reducing perceived
|
|
353
|
+
latency for tool calls. Defaults to `true` for Vertex AI providers. Only supported on the Vertex AI API (not the Gemini API).
|
|
354
|
+
|
|
355
|
+
Consult [Google's Documentation](https://docs.cloud.google.com/vertex-ai/generative-ai/docs/multimodal/function-calling#streaming-fc) for details.
|
|
356
|
+
|
|
348
357
|
You can use Google Vertex language models to generate text with the `generateText` function:
|
|
349
358
|
|
|
350
359
|
```ts highlight="1,4"
|
|
@@ -454,6 +463,60 @@ const result = await generateText({
|
|
|
454
463
|
|
|
455
464
|
The optional `retrievalConfig.latLng` provider option provides location context for queries about nearby places. This configuration applies to any grounding tools that support location context.
|
|
456
465
|
|
|
466
|
+
#### Streaming Function Call Arguments
|
|
467
|
+
|
|
468
|
+
For Gemini 3 Pro and later models on Vertex AI, you can stream function call
|
|
469
|
+
arguments as they are generated by setting `streamFunctionCallArguments` to
|
|
470
|
+
`true`. This reduces perceived latency when functions need to be called, as
|
|
471
|
+
`tool-input-delta` events arrive incrementally instead of waiting for the
|
|
472
|
+
complete arguments. This option is `true` by default and you can opt out by
|
|
473
|
+
setting it to false.
|
|
474
|
+
|
|
475
|
+
```ts
|
|
476
|
+
import { vertex } from '@ai-sdk/google-vertex';
|
|
477
|
+
import { type GoogleLanguageModelOptions } from '@ai-sdk/google';
|
|
478
|
+
import { streamText } from 'ai';
|
|
479
|
+
import { z } from 'zod';
|
|
480
|
+
|
|
481
|
+
const result = streamText({
|
|
482
|
+
model: vertex('gemini-3.1-pro-preview'),
|
|
483
|
+
prompt: 'What is the weather in Boston and San Francisco?',
|
|
484
|
+
tools: {
|
|
485
|
+
getWeather: {
|
|
486
|
+
description: 'Get the current weather in a given location',
|
|
487
|
+
inputSchema: z.object({
|
|
488
|
+
location: z.string().describe('City name'),
|
|
489
|
+
}),
|
|
490
|
+
},
|
|
491
|
+
},
|
|
492
|
+
providerOptions: {
|
|
493
|
+
vertex: {
|
|
494
|
+
streamFunctionCallArguments: false,
|
|
495
|
+
} satisfies GoogleLanguageModelOptions,
|
|
496
|
+
},
|
|
497
|
+
});
|
|
498
|
+
|
|
499
|
+
for await (const part of result.fullStream) {
|
|
500
|
+
switch (part.type) {
|
|
501
|
+
case 'tool-input-start':
|
|
502
|
+
console.log(`Tool call started: ${part.toolName}`);
|
|
503
|
+
break;
|
|
504
|
+
case 'tool-input-delta':
|
|
505
|
+
process.stdout.write(part.delta);
|
|
506
|
+
break;
|
|
507
|
+
case 'tool-call':
|
|
508
|
+
console.log(`Tool call complete: ${part.toolName}`, part.input);
|
|
509
|
+
break;
|
|
510
|
+
}
|
|
511
|
+
}
|
|
512
|
+
```
|
|
513
|
+
|
|
514
|
+
<Note>
|
|
515
|
+
This feature is only available on the Vertex AI API. It is not supported on
|
|
516
|
+
the Gemini API. When used with the Google Generative AI provider, a warning
|
|
517
|
+
will be emitted and the option will be ignored.
|
|
518
|
+
</Note>
|
|
519
|
+
|
|
457
520
|
#### Reasoning (Thinking Tokens)
|
|
458
521
|
|
|
459
522
|
Google Vertex AI, through its support for Gemini models, can also emit "thinking" tokens, representing the model's reasoning process. The AI SDK exposes these as reasoning information.
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@ai-sdk/google-vertex",
|
|
3
|
-
"version": "5.0.0-beta.
|
|
3
|
+
"version": "5.0.0-beta.41",
|
|
4
4
|
"license": "Apache-2.0",
|
|
5
5
|
"sideEffects": false,
|
|
6
6
|
"main": "./dist/index.js",
|
|
@@ -61,7 +61,7 @@
|
|
|
61
61
|
"dependencies": {
|
|
62
62
|
"google-auth-library": "^10.5.0",
|
|
63
63
|
"@ai-sdk/anthropic": "4.0.0-beta.23",
|
|
64
|
-
"@ai-sdk/google": "4.0.0-beta.
|
|
64
|
+
"@ai-sdk/google": "4.0.0-beta.32",
|
|
65
65
|
"@ai-sdk/openai-compatible": "3.0.0-beta.21",
|
|
66
66
|
"@ai-sdk/provider": "4.0.0-beta.10",
|
|
67
67
|
"@ai-sdk/provider-utils": "5.0.0-beta.16"
|