@langchain/google-common 0.2.14 → 0.2.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -242,6 +242,12 @@ class ChatGoogleBase extends chat_models_1.BaseChatModel {
242
242
  writable: true,
243
243
  value: false
244
244
  });
245
+ Object.defineProperty(this, "labels", {
246
+ enumerable: true,
247
+ configurable: true,
248
+ writable: true,
249
+ value: void 0
250
+ });
245
251
  Object.defineProperty(this, "connection", {
246
252
  enumerable: true,
247
253
  configurable: true,
@@ -55,6 +55,7 @@ export declare abstract class ChatGoogleBase<AuthOptions> extends BaseChatModel<
55
55
  speechConfig: GoogleSpeechConfig;
56
56
  streamUsage: boolean;
57
57
  streaming: boolean;
58
+ labels?: Record<string, string>;
58
59
  protected connection: ChatConnection<AuthOptions>;
59
60
  protected streamedConnection: ChatConnection<AuthOptions>;
60
61
  constructor(fields?: ChatGoogleBaseInput<AuthOptions>);
@@ -238,6 +238,12 @@ export class ChatGoogleBase extends BaseChatModel {
238
238
  writable: true,
239
239
  value: false
240
240
  });
241
+ Object.defineProperty(this, "labels", {
242
+ enumerable: true,
243
+ configurable: true,
244
+ writable: true,
245
+ value: void 0
246
+ });
241
247
  Object.defineProperty(this, "connection", {
242
248
  enumerable: true,
243
249
  configurable: true,
@@ -282,12 +282,7 @@ class GoogleAIConnection extends GoogleHostConnection {
282
282
  get computedLocation() {
283
283
  switch (this.apiName) {
284
284
  case "google":
285
- if (this.modelName.startsWith("gemini-2.5-flash-lite")) {
286
- return "global";
287
- }
288
- else {
289
- return super.computedLocation;
290
- }
285
+ return super.computedLocation;
291
286
  case "anthropic":
292
287
  return "us-east5";
293
288
  default:
@@ -375,7 +370,13 @@ class AbstractGoogleLLMConnection extends GoogleAIConnection {
375
370
  }
376
371
  }
377
372
  async formatData(input, parameters) {
378
- return this.api.formatData(input, parameters);
373
+ // Filter out labels for non-Vertex AI platforms (labels are only supported on Vertex AI)
374
+ let filteredParameters = parameters;
375
+ if (parameters.labels && this.platform !== "gcp") {
376
+ const { labels, ...paramsWithoutLabels } = parameters;
377
+ filteredParameters = paramsWithoutLabels;
378
+ }
379
+ return this.api.formatData(input, filteredParameters);
379
380
  }
380
381
  }
381
382
  exports.AbstractGoogleLLMConnection = AbstractGoogleLLMConnection;
@@ -2,7 +2,7 @@ import { BaseLanguageModelCallOptions } from "@langchain/core/language_models/ba
2
2
  import { AsyncCaller, AsyncCallerCallOptions } from "@langchain/core/utils/async_caller";
3
3
  import { BaseRunManager } from "@langchain/core/callbacks/manager";
4
4
  import { BaseCallbackHandler } from "@langchain/core/callbacks/base";
5
- import type { GoogleAIBaseLLMInput, GoogleConnectionParams, GooglePlatformType, GoogleResponse, GoogleLLMResponse, GoogleAIModelRequestParams, GoogleRawResponse, GoogleAIAPI, VertexModelFamily, GoogleAIAPIConfig } from "./types.js";
5
+ import type { GoogleAIBaseLLMInput, GoogleConnectionParams, GooglePlatformType, GoogleResponse, GoogleLLMResponse, GoogleAIModelRequestParams, GoogleRawResponse, GoogleAIAPI, VertexModelFamily, GoogleAIAPIConfig, GoogleModelParams } from "./types.js";
6
6
  import { GoogleAbstractedClient, GoogleAbstractedClientOps, GoogleAbstractedClientOpsMethod } from "./auth.js";
7
7
  export declare abstract class GoogleConnection<CallOptions extends AsyncCallerCallOptions, ResponseType extends GoogleResponse> {
8
8
  caller: AsyncCaller;
@@ -64,7 +64,7 @@ export declare abstract class GoogleAIConnection<CallOptions extends AsyncCaller
64
64
  buildUrlVertexLocation(): Promise<string>;
65
65
  buildUrlVertex(): Promise<string>;
66
66
  buildUrl(): Promise<string>;
67
- abstract formatData(input: InputType, parameters: GoogleAIModelRequestParams): Promise<unknown>;
67
+ abstract formatData(input: InputType, parameters: GoogleModelParams): Promise<unknown>;
68
68
  request(input: InputType, parameters: GoogleAIModelRequestParams, options: CallOptions, runManager?: BaseRunManager): Promise<ResponseType>;
69
69
  }
70
70
  export declare abstract class AbstractGoogleLLMConnection<MessageType, AuthOptions> extends GoogleAIConnection<BaseLanguageModelCallOptions, MessageType, AuthOptions, GoogleLLMResponse> {
@@ -276,12 +276,7 @@ export class GoogleAIConnection extends GoogleHostConnection {
276
276
  get computedLocation() {
277
277
  switch (this.apiName) {
278
278
  case "google":
279
- if (this.modelName.startsWith("gemini-2.5-flash-lite")) {
280
- return "global";
281
- }
282
- else {
283
- return super.computedLocation;
284
- }
279
+ return super.computedLocation;
285
280
  case "anthropic":
286
281
  return "us-east5";
287
282
  default:
@@ -368,7 +363,13 @@ export class AbstractGoogleLLMConnection extends GoogleAIConnection {
368
363
  }
369
364
  }
370
365
  async formatData(input, parameters) {
371
- return this.api.formatData(input, parameters);
366
+ // Filter out labels for non-Vertex AI platforms (labels are only supported on Vertex AI)
367
+ let filteredParameters = parameters;
368
+ if (parameters.labels && this.platform !== "gcp") {
369
+ const { labels, ...paramsWithoutLabels } = parameters;
370
+ filteredParameters = paramsWithoutLabels;
371
+ }
372
+ return this.api.formatData(input, filteredParameters);
372
373
  }
373
374
  }
374
375
  export class GoogleRequestCallbackHandler extends BaseCallbackHandler {
@@ -16,19 +16,63 @@ class EmbeddingsConnection extends connection_js_1.GoogleAIConnection {
16
16
  value: void 0
17
17
  });
18
18
  }
19
- async buildUrlMethod() {
19
+ buildUrlMethodAiStudio() {
20
+ return "embedContent";
21
+ }
22
+ buildUrlMethodVertex() {
20
23
  return "predict";
21
24
  }
25
+ async buildUrlMethod() {
26
+ switch (this.platform) {
27
+ case "gcp":
28
+ return this.buildUrlMethodVertex();
29
+ case "gai":
30
+ return this.buildUrlMethodAiStudio();
31
+ default:
32
+ throw new Error(`Unknown platform when building method: ${this.platform}`);
33
+ }
34
+ }
22
35
  get modelPublisher() {
23
36
  // All the embedding models are currently published by "google"
24
37
  return "google";
25
38
  }
26
- async formatData(input, parameters) {
39
+ formatDataAiStudio(input, parameters) {
40
+ const parts = input.map((instance) => ({
41
+ text: instance.content,
42
+ }));
43
+ const content = {
44
+ parts,
45
+ };
46
+ const outputDimensionality = parameters?.outputDimensionality;
47
+ const ret = {
48
+ content,
49
+ outputDimensionality,
50
+ };
51
+ // Remove undefined attributes
52
+ let key;
53
+ for (key in ret) {
54
+ if (ret[key] === undefined) {
55
+ delete ret[key];
56
+ }
57
+ }
58
+ return ret;
59
+ }
60
+ formatDataVertex(input, parameters) {
27
61
  return {
28
62
  instances: input,
29
63
  parameters,
30
64
  };
31
65
  }
66
+ async formatData(input, parameters) {
67
+ switch (this.platform) {
68
+ case "gcp":
69
+ return this.formatDataVertex(input, parameters);
70
+ case "gai":
71
+ return this.formatDataAiStudio(input, parameters);
72
+ default:
73
+ throw new Error(`Unknown platform to format embeddings ${this.platform}`);
74
+ }
75
+ }
32
76
  }
33
77
  /**
34
78
  * Enables calls to Google APIs for generating
@@ -43,6 +87,12 @@ class BaseGoogleEmbeddings extends embeddings_1.Embeddings {
43
87
  writable: true,
44
88
  value: void 0
45
89
  });
90
+ Object.defineProperty(this, "dimensions", {
91
+ enumerable: true,
92
+ configurable: true,
93
+ writable: true,
94
+ value: void 0
95
+ });
46
96
  Object.defineProperty(this, "connection", {
47
97
  enumerable: true,
48
98
  configurable: true,
@@ -50,6 +100,7 @@ class BaseGoogleEmbeddings extends embeddings_1.Embeddings {
50
100
  value: void 0
51
101
  });
52
102
  this.model = fields.model;
103
+ this.dimensions = fields.dimensions ?? fields.outputDimensionality;
53
104
  this.connection = new EmbeddingsConnection({ ...fields, ...this }, this.caller, this.buildClient(fields), false);
54
105
  }
55
106
  buildApiKeyClient(apiKey) {
@@ -67,6 +118,37 @@ class BaseGoogleEmbeddings extends embeddings_1.Embeddings {
67
118
  return this.buildAbstractedClient(fields);
68
119
  }
69
120
  }
121
+ buildParameters() {
122
+ const ret = {
123
+ outputDimensionality: this.dimensions,
124
+ };
125
+ // Remove undefined attributes
126
+ let key;
127
+ for (key in ret) {
128
+ if (ret[key] === undefined) {
129
+ delete ret[key];
130
+ }
131
+ }
132
+ return ret;
133
+ }
134
+ vertexResponseToValues(response) {
135
+ const predictions = response?.data?.predictions ?? [];
136
+ return predictions.map((prediction) => prediction.embeddings.values);
137
+ }
138
+ aiStudioResponseToValues(response) {
139
+ const value = response?.data?.embedding?.values ?? [];
140
+ return [value];
141
+ }
142
+ responseToValues(response) {
143
+ switch (this.connection.platform) {
144
+ case "gcp":
145
+ return this.vertexResponseToValues(response);
146
+ case "gai":
147
+ return this.aiStudioResponseToValues(response);
148
+ default:
149
+ throw new Error(`Unknown response platform: ${this.connection.platform}`);
150
+ }
151
+ }
70
152
  /**
71
153
  * Takes an array of documents as input and returns a promise that
72
154
  * resolves to a 2D array of embeddings for each document. It splits the
@@ -76,17 +158,19 @@ class BaseGoogleEmbeddings extends embeddings_1.Embeddings {
76
158
  * @returns A promise that resolves to a 2D array of embeddings for each document.
77
159
  */
78
160
  async embedDocuments(documents) {
161
+ // Vertex "text-" models could do up 5 documents at once,
162
+ // but the "gemini-embedding-001" can only do 1.
163
+ // AI Studio can only do a chunk size of 1.
164
+ // TODO: Make this configurable
165
+ const chunkSize = 1;
79
166
  const instanceChunks = (0, chunk_array_1.chunkArray)(documents.map((document) => ({
80
167
  content: document,
81
- })), 5); // Vertex AI accepts max 5 instances per prediction
82
- const parameters = {};
168
+ })), chunkSize);
169
+ const parameters = this.buildParameters();
83
170
  const options = {};
84
171
  const responses = await Promise.all(instanceChunks.map((instances) => this.connection.request(instances, parameters, options)));
85
- const result = responses
86
- ?.map((response) => response?.data?.predictions?.map(
87
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
88
- (result) => result.embeddings?.values) ?? [])
89
- .flat() ?? [];
172
+ const result = responses?.map((response) => this.responseToValues(response)).flat() ??
173
+ [];
90
174
  return result;
91
175
  }
92
176
  /**
@@ -1,58 +1,23 @@
1
- import { Embeddings, EmbeddingsParams } from "@langchain/core/embeddings";
2
- import { AsyncCallerCallOptions } from "@langchain/core/utils/async_caller";
1
+ import { Embeddings } from "@langchain/core/embeddings";
3
2
  import { GoogleAbstractedClient } from "./auth.js";
4
- import { GoogleConnectionParams, GoogleResponse } from "./types.js";
5
- /**
6
- * Defines the parameters required to initialize a
7
- * GoogleEmbeddings instance. It extends EmbeddingsParams and
8
- * GoogleConnectionParams.
9
- */
10
- export interface BaseGoogleEmbeddingsParams<AuthOptions> extends EmbeddingsParams, GoogleConnectionParams<AuthOptions> {
11
- model: string;
12
- }
13
- /**
14
- * Defines additional options specific to the
15
- * GoogleEmbeddingsInstance. It extends AsyncCallerCallOptions.
16
- */
17
- export interface BaseGoogleEmbeddingsOptions extends AsyncCallerCallOptions {
18
- }
19
- /**
20
- * Represents an instance for generating embeddings using the Google
21
- * Vertex AI API. It contains the content to be embedded.
22
- */
23
- export interface GoogleEmbeddingsInstance {
24
- content: string;
25
- }
26
- /**
27
- * Defines the structure of the embeddings results returned by the Google
28
- * Vertex AI API. It extends GoogleBasePrediction and contains the
29
- * embeddings and their statistics.
30
- */
31
- export interface GoogleEmbeddingsResponse extends GoogleResponse {
32
- data: {
33
- predictions: {
34
- embeddings: {
35
- statistics: {
36
- token_count: number;
37
- truncated: boolean;
38
- };
39
- values: number[];
40
- };
41
- }[];
42
- };
43
- }
3
+ import { BaseGoogleEmbeddingsParams, GoogleConnectionParams, GoogleEmbeddingsResponse, VertexEmbeddingsParameters, VertexEmbeddingsResponse, AIStudioEmbeddingsResponse } from "./types.js";
44
4
  /**
45
5
  * Enables calls to Google APIs for generating
46
6
  * text embeddings.
47
7
  */
48
8
  export declare abstract class BaseGoogleEmbeddings<AuthOptions> extends Embeddings implements BaseGoogleEmbeddingsParams<AuthOptions> {
49
9
  model: string;
10
+ dimensions?: number;
50
11
  private connection;
51
12
  constructor(fields: BaseGoogleEmbeddingsParams<AuthOptions>);
52
13
  abstract buildAbstractedClient(fields?: GoogleConnectionParams<AuthOptions>): GoogleAbstractedClient;
53
14
  buildApiKeyClient(apiKey: string): GoogleAbstractedClient;
54
15
  buildApiKey(fields?: GoogleConnectionParams<AuthOptions>): string | undefined;
55
16
  buildClient(fields?: GoogleConnectionParams<AuthOptions>): GoogleAbstractedClient;
17
+ buildParameters(): VertexEmbeddingsParameters;
18
+ vertexResponseToValues(response: VertexEmbeddingsResponse): number[][];
19
+ aiStudioResponseToValues(response: AIStudioEmbeddingsResponse): number[][];
20
+ responseToValues(response: GoogleEmbeddingsResponse): number[][];
56
21
  /**
57
22
  * Takes an array of documents as input and returns a promise that
58
23
  * resolves to a 2D array of embeddings for each document. It splits the
@@ -13,19 +13,63 @@ class EmbeddingsConnection extends GoogleAIConnection {
13
13
  value: void 0
14
14
  });
15
15
  }
16
- async buildUrlMethod() {
16
+ buildUrlMethodAiStudio() {
17
+ return "embedContent";
18
+ }
19
+ buildUrlMethodVertex() {
17
20
  return "predict";
18
21
  }
22
+ async buildUrlMethod() {
23
+ switch (this.platform) {
24
+ case "gcp":
25
+ return this.buildUrlMethodVertex();
26
+ case "gai":
27
+ return this.buildUrlMethodAiStudio();
28
+ default:
29
+ throw new Error(`Unknown platform when building method: ${this.platform}`);
30
+ }
31
+ }
19
32
  get modelPublisher() {
20
33
  // All the embedding models are currently published by "google"
21
34
  return "google";
22
35
  }
23
- async formatData(input, parameters) {
36
+ formatDataAiStudio(input, parameters) {
37
+ const parts = input.map((instance) => ({
38
+ text: instance.content,
39
+ }));
40
+ const content = {
41
+ parts,
42
+ };
43
+ const outputDimensionality = parameters?.outputDimensionality;
44
+ const ret = {
45
+ content,
46
+ outputDimensionality,
47
+ };
48
+ // Remove undefined attributes
49
+ let key;
50
+ for (key in ret) {
51
+ if (ret[key] === undefined) {
52
+ delete ret[key];
53
+ }
54
+ }
55
+ return ret;
56
+ }
57
+ formatDataVertex(input, parameters) {
24
58
  return {
25
59
  instances: input,
26
60
  parameters,
27
61
  };
28
62
  }
63
+ async formatData(input, parameters) {
64
+ switch (this.platform) {
65
+ case "gcp":
66
+ return this.formatDataVertex(input, parameters);
67
+ case "gai":
68
+ return this.formatDataAiStudio(input, parameters);
69
+ default:
70
+ throw new Error(`Unknown platform to format embeddings ${this.platform}`);
71
+ }
72
+ }
29
73
  }
30
74
  /**
31
75
  * Enables calls to Google APIs for generating
@@ -40,6 +84,12 @@ export class BaseGoogleEmbeddings extends Embeddings {
40
84
  writable: true,
41
85
  value: void 0
42
86
  });
87
+ Object.defineProperty(this, "dimensions", {
88
+ enumerable: true,
89
+ configurable: true,
90
+ writable: true,
91
+ value: void 0
92
+ });
43
93
  Object.defineProperty(this, "connection", {
44
94
  enumerable: true,
45
95
  configurable: true,
@@ -47,6 +97,7 @@ export class BaseGoogleEmbeddings extends Embeddings {
47
97
  value: void 0
48
98
  });
49
99
  this.model = fields.model;
100
+ this.dimensions = fields.dimensions ?? fields.outputDimensionality;
50
101
  this.connection = new EmbeddingsConnection({ ...fields, ...this }, this.caller, this.buildClient(fields), false);
51
102
  }
52
103
  buildApiKeyClient(apiKey) {
@@ -64,6 +115,37 @@ export class BaseGoogleEmbeddings extends Embeddings {
64
115
  return this.buildAbstractedClient(fields);
65
116
  }
66
117
  }
118
+ buildParameters() {
119
+ const ret = {
120
+ outputDimensionality: this.dimensions,
121
+ };
122
+ // Remove undefined attributes
123
+ let key;
124
+ for (key in ret) {
125
+ if (ret[key] === undefined) {
126
+ delete ret[key];
127
+ }
128
+ }
129
+ return ret;
130
+ }
131
+ vertexResponseToValues(response) {
132
+ const predictions = response?.data?.predictions ?? [];
133
+ return predictions.map((prediction) => prediction.embeddings.values);
134
+ }
135
+ aiStudioResponseToValues(response) {
136
+ const value = response?.data?.embedding?.values ?? [];
137
+ return [value];
138
+ }
139
+ responseToValues(response) {
140
+ switch (this.connection.platform) {
141
+ case "gcp":
142
+ return this.vertexResponseToValues(response);
143
+ case "gai":
144
+ return this.aiStudioResponseToValues(response);
145
+ default:
146
+ throw new Error(`Unknown response platform: ${this.connection.platform}`);
147
+ }
148
+ }
67
149
  /**
68
150
  * Takes an array of documents as input and returns a promise that
69
151
  * resolves to a 2D array of embeddings for each document. It splits the
@@ -73,17 +155,19 @@ export class BaseGoogleEmbeddings extends Embeddings {
73
155
  * @returns A promise that resolves to a 2D array of embeddings for each document.
74
156
  */
75
157
  async embedDocuments(documents) {
158
+ // Vertex "text-" models could do up 5 documents at once,
159
+ // but the "gemini-embedding-001" can only do 1.
160
+ // AI Studio can only do a chunk size of 1.
161
+ // TODO: Make this configurable
162
+ const chunkSize = 1;
76
163
  const instanceChunks = chunkArray(documents.map((document) => ({
77
164
  content: document,
78
- })), 5); // Vertex AI accepts max 5 instances per prediction
79
- const parameters = {};
165
+ })), chunkSize);
166
+ const parameters = this.buildParameters();
80
167
  const options = {};
81
168
  const responses = await Promise.all(instanceChunks.map((instances) => this.connection.request(instances, parameters, options)));
82
- const result = responses
83
- ?.map((response) => response?.data?.predictions?.map(
84
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
85
- (result) => result.embeddings?.values) ?? [])
86
- .flat() ?? [];
169
+ const result = responses?.map((response) => this.responseToValues(response)).flat() ??
170
+ [];
87
171
  return result;
88
172
  }
89
173
  /**
package/dist/types.d.ts CHANGED
@@ -2,6 +2,8 @@ import type { BaseLLMParams } from "@langchain/core/language_models/llms";
2
2
  import type { BaseChatModelCallOptions, BindToolsInput } from "@langchain/core/language_models/chat_models";
3
3
  import { BaseMessage, BaseMessageChunk, MessageContent } from "@langchain/core/messages";
4
4
  import { ChatGenerationChunk, ChatResult } from "@langchain/core/outputs";
5
+ import { EmbeddingsParams } from "@langchain/core/embeddings";
6
+ import { AsyncCallerCallOptions } from "@langchain/core/utils/async_caller";
5
7
  import type { JsonStream } from "./utils/stream.js";
6
8
  import { MediaManager } from "./experimental/utils/media_core.js";
7
9
  import { AnthropicResponseData, AnthropicAPIConfig } from "./types-anthropic.js";
@@ -144,7 +146,7 @@ export type GoogleSpeechSimplifiedLanguage = GoogleSpeechVoiceLanguage | GoogleS
144
146
  * It can either be the voice (or voices), or the voice or voices with language configuration
145
147
  */
146
148
  export type GoogleSpeechConfigSimplified = GoogleSpeechVoice | GoogleSpeechSimplifiedLanguage;
147
- export interface GoogleAIModelParams {
149
+ export interface GoogleModelParams {
148
150
  /** Model to use */
149
151
  model?: string;
150
152
  /**
@@ -152,6 +154,8 @@ export interface GoogleAIModelParams {
152
154
  * Alias for `model`
153
155
  */
154
156
  modelName?: string;
157
+ }
158
+ export interface GoogleAIModelParams extends GoogleModelParams {
155
159
  /** Sampling temperature to use */
156
160
  temperature?: number;
157
161
  /**
@@ -259,6 +263,23 @@ export interface GoogleAIModelParams {
259
263
  * The modalities of the response.
260
264
  */
261
265
  responseModalities?: GoogleAIModelModality[];
266
+ /**
267
+ * Custom metadata labels to associate with the request.
268
+ * Only supported on Vertex AI (Google Cloud Platform).
269
+ * Labels are key-value pairs where both keys and values must be strings.
270
+ *
271
+ * Example:
272
+ * ```typescript
273
+ * {
274
+ * labels: {
275
+ * "team": "research",
276
+ * "component": "frontend",
277
+ * "environment": "production"
278
+ * }
279
+ * }
280
+ * ```
281
+ */
282
+ labels?: Record<string, string>;
262
283
  /**
263
284
  * Speech generation configuration.
264
285
  * You can use either Google's definition of the speech configuration,
@@ -526,6 +547,10 @@ export interface GeminiRequest {
526
547
  safetySettings?: GeminiSafetySetting[];
527
548
  generationConfig?: GeminiGenerationConfig;
528
549
  cachedContent?: string;
550
+ /**
551
+ * Custom metadata labels to associate with the API call.
552
+ */
553
+ labels?: Record<string, string>;
529
554
  }
530
555
  export interface GeminiResponseCandidate {
531
556
  content: {
@@ -630,3 +655,81 @@ export interface GoogleAIAPIParams {
630
655
  apiName?: string;
631
656
  apiConfig?: GoogleAIAPIConfig;
632
657
  }
658
+ /**
659
+ * Defines the parameters required to initialize a
660
+ * GoogleEmbeddings instance. It extends EmbeddingsParams and
661
+ * GoogleConnectionParams.
662
+ */
663
+ export interface BaseGoogleEmbeddingsParams<AuthOptions> extends EmbeddingsParams, GoogleConnectionParams<AuthOptions> {
664
+ model: string;
665
+ /**
666
+ * Used to specify output embedding size.
667
+ * If set, output embeddings will be truncated to the size specified.
668
+ */
669
+ dimensions?: number;
670
+ /**
671
+ * An alias for "dimensions"
672
+ */
673
+ outputDimensionality?: number;
674
+ }
675
+ /**
676
+ * Defines additional options specific to the
677
+ * GoogleEmbeddingsInstance. It extends AsyncCallerCallOptions.
678
+ */
679
+ export interface BaseGoogleEmbeddingsOptions extends AsyncCallerCallOptions {
680
+ }
681
+ export type GoogleEmbeddingsTaskType = "RETRIEVAL_QUERY" | "RETRIEVAL_DOCUMENT" | "SEMANTIC_SIMILARITY" | "CLASSIFICATION" | "CLUSTERING" | "QUESTION_ANSWERING" | "FACT_VERIFICATION" | "CODE_RETRIEVAL_QUERY" | string;
682
+ /**
683
+ * Represents an instance for generating embeddings using the Google
684
+ * Vertex AI API. It contains the content to be embedded.
685
+ */
686
+ export interface VertexEmbeddingsInstance {
687
+ content: string;
688
+ taskType?: GoogleEmbeddingsTaskType;
689
+ title?: string;
690
+ }
691
+ export interface VertexEmbeddingsParameters extends GoogleModelParams {
692
+ autoTruncate?: boolean;
693
+ outputDimensionality?: number;
694
+ }
695
+ export interface VertexEmbeddingsRequest {
696
+ instances: VertexEmbeddingsInstance[];
697
+ parameters?: VertexEmbeddingsParameters;
698
+ }
699
+ export interface AIStudioEmbeddingsRequest {
700
+ content: {
701
+ parts: GeminiPartText[];
702
+ };
703
+ model?: string;
704
+ taskType?: GoogleEmbeddingsTaskType;
705
+ title?: string;
706
+ outputDimensionality?: number;
707
+ }
708
+ export type GoogleEmbeddingsRequest = VertexEmbeddingsRequest | AIStudioEmbeddingsRequest;
709
+ export interface VertexEmbeddingsResponsePrediction {
710
+ embeddings: {
711
+ statistics: {
712
+ token_count: number;
713
+ truncated: boolean;
714
+ };
715
+ values: number[];
716
+ };
717
+ }
718
+ /**
719
+ * Defines the structure of the embeddings results returned by the Google
720
+ * Vertex AI API. It extends GoogleBasePrediction and contains the
721
+ * embeddings and their statistics.
722
+ */
723
+ export interface VertexEmbeddingsResponse extends GoogleResponse {
724
+ data: {
725
+ predictions: VertexEmbeddingsResponsePrediction[];
726
+ };
727
+ }
728
+ export interface AIStudioEmbeddingsResponse extends GoogleResponse {
729
+ data: {
730
+ embedding: {
731
+ values: number[];
732
+ };
733
+ };
734
+ }
735
+ export type GoogleEmbeddingsResponse = VertexEmbeddingsResponse | AIStudioEmbeddingsResponse;
@@ -180,6 +180,7 @@ function copyAIModelParamsInto(params, options, target) {
180
180
  if (options?.cachedContent) {
181
181
  ret.cachedContent = options.cachedContent;
182
182
  }
183
+ ret.labels = options?.labels ?? params?.labels ?? target?.labels;
183
184
  return ret;
184
185
  }
185
186
  function modelToFamily(modelName) {
@@ -171,6 +171,7 @@ export function copyAIModelParamsInto(params, options, target) {
171
171
  if (options?.cachedContent) {
172
172
  ret.cachedContent = options.cachedContent;
173
173
  }
174
+ ret.labels = options?.labels ?? params?.labels ?? target?.labels;
174
175
  return ret;
175
176
  }
176
177
  export function modelToFamily(modelName) {
@@ -1408,6 +1408,9 @@ function getGeminiAPI(config) {
1408
1408
  if (parameters.cachedContent) {
1409
1409
  ret.cachedContent = parameters.cachedContent;
1410
1410
  }
1411
+ if (parameters.labels && Object.keys(parameters.labels).length > 0) {
1412
+ ret.labels = parameters.labels;
1413
+ }
1411
1414
  return ret;
1412
1415
  }
1413
1416
  return {
@@ -1398,6 +1398,9 @@ export function getGeminiAPI(config) {
1398
1398
  if (parameters.cachedContent) {
1399
1399
  ret.cachedContent = parameters.cachedContent;
1400
1400
  }
1401
+ if (parameters.labels && Object.keys(parameters.labels).length > 0) {
1402
+ ret.labels = parameters.labels;
1403
+ }
1401
1404
  return ret;
1402
1405
  }
1403
1406
  return {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@langchain/google-common",
3
- "version": "0.2.14",
3
+ "version": "0.2.16",
4
4
  "description": "Core types and classes for Google services.",
5
5
  "type": "module",
6
6
  "engines": {