@ai-sdk/google 3.0.47 → 3.0.48

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -7,7 +7,7 @@ import {
7
7
  } from "@ai-sdk/provider-utils";
8
8
 
9
9
  // src/version.ts
10
- var VERSION = true ? "3.0.47" : "0.0.0-test";
10
+ var VERSION = true ? "3.0.48" : "0.0.0-test";
11
11
 
12
12
  // src/google-generative-ai-embedding-model.ts
13
13
  import {
@@ -53,6 +53,15 @@ import {
53
53
  zodSchema as zodSchema2
54
54
  } from "@ai-sdk/provider-utils";
55
55
  import { z as z2 } from "zod/v4";
56
+ var googleEmbeddingContentPartSchema = z2.union([
57
+ z2.object({ text: z2.string() }),
58
+ z2.object({
59
+ inlineData: z2.object({
60
+ mimeType: z2.string(),
61
+ data: z2.string()
62
+ })
63
+ })
64
+ ]);
56
65
  var googleEmbeddingModelOptions = lazySchema2(
57
66
  () => zodSchema2(
58
67
  z2.object({
@@ -82,7 +91,13 @@ var googleEmbeddingModelOptions = lazySchema2(
82
91
  "QUESTION_ANSWERING",
83
92
  "FACT_VERIFICATION",
84
93
  "CODE_RETRIEVAL_QUERY"
85
- ]).optional()
94
+ ]).optional(),
95
+ /**
96
+ * Optional. Multimodal content parts for embedding non-text content
97
+ * (images, video, PDF, audio). When provided, these parts are used
98
+ * instead of the text values in the embedding request.
99
+ */
100
+ content: z2.array(googleEmbeddingContentPartSchema).min(1).optional()
86
101
  })
87
102
  )
88
103
  );
@@ -105,6 +120,7 @@ var GoogleGenerativeAIEmbeddingModel = class {
105
120
  abortSignal,
106
121
  providerOptions
107
122
  }) {
123
+ var _a;
108
124
  const googleOptions = await parseProviderOptions({
109
125
  provider: "google",
110
126
  providerOptions,
@@ -122,7 +138,10 @@ var GoogleGenerativeAIEmbeddingModel = class {
122
138
  await resolve(this.config.headers),
123
139
  headers
124
140
  );
141
+ const multimodalContent = (_a = googleOptions == null ? void 0 : googleOptions.content) != null ? _a : [];
125
142
  if (values.length === 1) {
143
+ const textPart = values[0] ? [{ text: values[0] }] : [];
144
+ const parts = multimodalContent.length > 0 ? [...textPart, ...multimodalContent] : [{ text: values[0] }];
126
145
  const {
127
146
  responseHeaders: responseHeaders2,
128
147
  value: response2,
@@ -133,7 +152,7 @@ var GoogleGenerativeAIEmbeddingModel = class {
133
152
  body: {
134
153
  model: `models/${this.modelId}`,
135
154
  content: {
136
- parts: [{ text: values[0] }]
155
+ parts
137
156
  },
138
157
  outputDimensionality: googleOptions == null ? void 0 : googleOptions.outputDimensionality,
139
158
  taskType: googleOptions == null ? void 0 : googleOptions.taskType
@@ -160,12 +179,18 @@ var GoogleGenerativeAIEmbeddingModel = class {
160
179
  url: `${this.config.baseURL}/models/${this.modelId}:batchEmbedContents`,
161
180
  headers: mergedHeaders,
162
181
  body: {
163
- requests: values.map((value) => ({
164
- model: `models/${this.modelId}`,
165
- content: { role: "user", parts: [{ text: value }] },
166
- outputDimensionality: googleOptions == null ? void 0 : googleOptions.outputDimensionality,
167
- taskType: googleOptions == null ? void 0 : googleOptions.taskType
168
- }))
182
+ requests: values.map((value) => {
183
+ const textPart = value ? [{ text: value }] : [];
184
+ return {
185
+ model: `models/${this.modelId}`,
186
+ content: {
187
+ role: "user",
188
+ parts: multimodalContent.length > 0 ? [...textPart, ...multimodalContent] : [{ text: value }]
189
+ },
190
+ outputDimensionality: googleOptions == null ? void 0 : googleOptions.outputDimensionality,
191
+ taskType: googleOptions == null ? void 0 : googleOptions.taskType
192
+ };
193
+ })
169
194
  },
170
195
  failedResponseHandler: googleFailedResponseHandler,
171
196
  successfulResponseHandler: createJsonResponseHandler(