@ai-sdk/google 3.0.48 → 3.0.50

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -7,7 +7,7 @@ import {
7
7
  } from "@ai-sdk/provider-utils";
8
8
 
9
9
  // src/version.ts
10
- var VERSION = true ? "3.0.48" : "0.0.0-test";
10
+ var VERSION = true ? "3.0.50" : "0.0.0-test";
11
11
 
12
12
  // src/google-generative-ai-embedding-model.ts
13
13
  import {
@@ -93,11 +93,15 @@ var googleEmbeddingModelOptions = lazySchema2(
93
93
  "CODE_RETRIEVAL_QUERY"
94
94
  ]).optional(),
95
95
  /**
96
- * Optional. Multimodal content parts for embedding non-text content
97
- * (images, video, PDF, audio). When provided, these parts are used
98
- * instead of the text values in the embedding request.
96
+ * Optional. Per-value multimodal content parts for embedding non-text
97
+ * content (images, video, PDF, audio). Each entry corresponds to the
98
+ * embedding value at the same index and its parts are merged with the
99
+ * text value in the request. Use `null` for entries that are text-only.
100
+ *
101
+ * The array length must match the number of values being embedded. In
102
+ * the case of a single embedding, the array length must be 1.
99
103
  */
100
- content: z2.array(googleEmbeddingContentPartSchema).min(1).optional()
104
+ content: z2.array(z2.array(googleEmbeddingContentPartSchema).min(1).nullable()).optional()
101
105
  })
102
106
  )
103
107
  );
@@ -120,7 +124,6 @@ var GoogleGenerativeAIEmbeddingModel = class {
120
124
  abortSignal,
121
125
  providerOptions
122
126
  }) {
123
- var _a;
124
127
  const googleOptions = await parseProviderOptions({
125
128
  provider: "google",
126
129
  providerOptions,
@@ -138,10 +141,16 @@ var GoogleGenerativeAIEmbeddingModel = class {
138
141
  await resolve(this.config.headers),
139
142
  headers
140
143
  );
141
- const multimodalContent = (_a = googleOptions == null ? void 0 : googleOptions.content) != null ? _a : [];
144
+ const multimodalContent = googleOptions == null ? void 0 : googleOptions.content;
145
+ if (multimodalContent != null && multimodalContent.length !== values.length) {
146
+ throw new Error(
147
+ `The number of multimodal content entries (${multimodalContent.length}) must match the number of values (${values.length}).`
148
+ );
149
+ }
142
150
  if (values.length === 1) {
151
+ const valueParts = multimodalContent == null ? void 0 : multimodalContent[0];
143
152
  const textPart = values[0] ? [{ text: values[0] }] : [];
144
- const parts = multimodalContent.length > 0 ? [...textPart, ...multimodalContent] : [{ text: values[0] }];
153
+ const parts = valueParts != null ? [...textPart, ...valueParts] : [{ text: values[0] }];
145
154
  const {
146
155
  responseHeaders: responseHeaders2,
147
156
  value: response2,
@@ -179,13 +188,14 @@ var GoogleGenerativeAIEmbeddingModel = class {
179
188
  url: `${this.config.baseURL}/models/${this.modelId}:batchEmbedContents`,
180
189
  headers: mergedHeaders,
181
190
  body: {
182
- requests: values.map((value) => {
191
+ requests: values.map((value, index) => {
192
+ const valueParts = multimodalContent == null ? void 0 : multimodalContent[index];
183
193
  const textPart = value ? [{ text: value }] : [];
184
194
  return {
185
195
  model: `models/${this.modelId}`,
186
196
  content: {
187
197
  role: "user",
188
- parts: multimodalContent.length > 0 ? [...textPart, ...multimodalContent] : [{ text: value }]
198
+ parts: valueParts != null ? [...textPart, ...valueParts] : [{ text: value }]
189
199
  },
190
200
  outputDimensionality: googleOptions == null ? void 0 : googleOptions.outputDimensionality,
191
201
  taskType: googleOptions == null ? void 0 : googleOptions.taskType