@langchain/google-common 0.0.22 → 0.0.24

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -57,10 +57,23 @@ class ChatConnection extends connection_js_1.AbstractGoogleLLMConnection {
57
57
  return input
58
58
  .map((msg, i) => (0, gemini_js_1.baseMessageToContent)(msg, input[i - 1], this.useSystemInstruction))
59
59
  .reduce((acc, cur) => {
60
- // Filter out the system content, since those don't belong
61
- // in the actual content.
62
- const hasNoSystem = cur.every((content) => content.role !== "system");
63
- return hasNoSystem ? [...acc, ...cur] : acc;
60
+ // Filter out the system content
61
+ if (cur.every((content) => content.role === "system")) {
62
+ return acc;
63
+ }
64
+ // Combine adjacent function messages
65
+ if (cur[0]?.role === "function" &&
66
+ acc.length > 0 &&
67
+ acc[acc.length - 1].role === "function") {
68
+ acc[acc.length - 1].parts = [
69
+ ...acc[acc.length - 1].parts,
70
+ ...cur[0].parts,
71
+ ];
72
+ }
73
+ else {
74
+ acc.push(...cur);
75
+ }
76
+ return acc;
64
77
  }, []);
65
78
  }
66
79
  formatSystemInstruction(input, _parameters) {
@@ -3,15 +3,14 @@ import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
3
3
  import { BaseChatModel, LangSmithParams, type BaseChatModelParams } from "@langchain/core/language_models/chat_models";
4
4
  import { ChatGenerationChunk, ChatResult } from "@langchain/core/outputs";
5
5
  import { AIMessageChunk } from "@langchain/core/messages";
6
- import { BaseLanguageModelInput, StructuredOutputMethodOptions, ToolDefinition } from "@langchain/core/language_models/base";
6
+ import { BaseLanguageModelInput, StructuredOutputMethodOptions } from "@langchain/core/language_models/base";
7
7
  import type { z } from "zod";
8
- import { Runnable, RunnableToolLike } from "@langchain/core/runnables";
8
+ import { Runnable } from "@langchain/core/runnables";
9
9
  import { AsyncCaller } from "@langchain/core/utils/async_caller";
10
- import { StructuredToolInterface } from "@langchain/core/tools";
11
10
  import { GoogleAIBaseLLMInput, GoogleAIModelParams, GoogleAISafetySetting, GoogleConnectionParams, GooglePlatformType, GeminiContent, GoogleAIBaseLanguageModelCallOptions } from "./types.js";
12
11
  import { AbstractGoogleLLMConnection } from "./connection.js";
13
12
  import { GoogleAbstractedClient } from "./auth.js";
14
- import type { GoogleBaseLLMInput, GoogleAISafetyHandler, GoogleAISafetyParams } from "./types.js";
13
+ import type { GoogleBaseLLMInput, GoogleAISafetyHandler, GoogleAISafetyParams, GoogleAIToolType } from "./types.js";
15
14
  declare class ChatConnection<AuthOptions> extends AbstractGoogleLLMConnection<BaseMessage[], AuthOptions> {
16
15
  convertSystemMessageToHumanContent: boolean | undefined;
17
16
  constructor(fields: GoogleAIBaseLLMInput<AuthOptions> | undefined, caller: AsyncCaller, client: GoogleAbstractedClient, streaming: boolean);
@@ -56,7 +55,7 @@ export declare abstract class ChatGoogleBase<AuthOptions> extends BaseChatModel<
56
55
  buildClient(fields?: GoogleAIBaseLLMInput<AuthOptions>): GoogleAbstractedClient;
57
56
  buildConnection(fields: GoogleBaseLLMInput<AuthOptions>, client: GoogleAbstractedClient): void;
58
57
  get platform(): GooglePlatformType;
59
- bindTools(tools: (StructuredToolInterface | Record<string, unknown> | ToolDefinition | RunnableToolLike)[], kwargs?: Partial<GoogleAIBaseLanguageModelCallOptions>): Runnable<BaseLanguageModelInput, AIMessageChunk, GoogleAIBaseLanguageModelCallOptions>;
58
+ bindTools(tools: GoogleAIToolType[], kwargs?: Partial<GoogleAIBaseLanguageModelCallOptions>): Runnable<BaseLanguageModelInput, AIMessageChunk, GoogleAIBaseLanguageModelCallOptions>;
60
59
  _llmType(): string;
61
60
  /**
62
61
  * Get the parameters used to invoke the model
@@ -54,10 +54,23 @@ class ChatConnection extends AbstractGoogleLLMConnection {
54
54
  return input
55
55
  .map((msg, i) => baseMessageToContent(msg, input[i - 1], this.useSystemInstruction))
56
56
  .reduce((acc, cur) => {
57
- // Filter out the system content, since those don't belong
58
- // in the actual content.
59
- const hasNoSystem = cur.every((content) => content.role !== "system");
60
- return hasNoSystem ? [...acc, ...cur] : acc;
57
+ // Filter out the system content
58
+ if (cur.every((content) => content.role === "system")) {
59
+ return acc;
60
+ }
61
+ // Combine adjacent function messages
62
+ if (cur[0]?.role === "function" &&
63
+ acc.length > 0 &&
64
+ acc[acc.length - 1].role === "function") {
65
+ acc[acc.length - 1].parts = [
66
+ ...acc[acc.length - 1].parts,
67
+ ...cur[0].parts,
68
+ ];
69
+ }
70
+ else {
71
+ acc.push(...cur);
72
+ }
73
+ return acc;
61
74
  }, []);
62
75
  }
63
76
  formatSystemInstruction(input, _parameters) {
@@ -2,6 +2,7 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.AbstractGoogleLLMConnection = exports.GoogleAIConnection = exports.GoogleHostConnection = exports.GoogleConnection = void 0;
4
4
  const env_1 = require("@langchain/core/utils/env");
5
+ const function_calling_1 = require("@langchain/core/utils/function_calling");
5
6
  const zod_to_gemini_parameters_js_1 = require("./utils/zod_to_gemini_parameters.cjs");
6
7
  class GoogleConnection {
7
8
  constructor(caller, client, streaming) {
@@ -221,16 +222,11 @@ class AbstractGoogleLLMConnection extends GoogleAIConnection {
221
222
  formatSystemInstruction(_input, _parameters) {
222
223
  return {};
223
224
  }
224
- // Borrowed from the OpenAI invocation params test
225
- isStructuredToolArray(tools) {
226
- return (tools !== undefined &&
227
- tools.every((tool) => Array.isArray(tool.lc_namespace)));
228
- }
229
225
  structuredToolToFunctionDeclaration(tool) {
230
226
  const jsonSchema = (0, zod_to_gemini_parameters_js_1.zodToGeminiParameters)(tool.schema);
231
227
  return {
232
228
  name: tool.name,
233
- description: tool.description,
229
+ description: tool.description ?? `A function available to call.`,
234
230
  parameters: jsonSchema,
235
231
  };
236
232
  }
@@ -246,11 +242,13 @@ class AbstractGoogleLLMConnection extends GoogleAIConnection {
246
242
  if (!tools || tools.length === 0) {
247
243
  return [];
248
244
  }
249
- if (this.isStructuredToolArray(tools)) {
245
+ if (tools.every(function_calling_1.isLangChainTool)) {
250
246
  return this.structuredToolsToGeminiTools(tools);
251
247
  }
252
248
  else {
253
- if (tools.length === 1 && !tools[0].functionDeclarations?.length) {
249
+ if (tools.length === 1 &&
250
+ (!("functionDeclarations" in tools[0]) ||
251
+ !tools[0].functionDeclarations?.length)) {
254
252
  return [];
255
253
  }
256
254
  return tools;
@@ -1,6 +1,6 @@
1
1
  import { BaseLanguageModelCallOptions } from "@langchain/core/language_models/base";
2
2
  import { AsyncCaller, AsyncCallerCallOptions } from "@langchain/core/utils/async_caller";
3
- import { StructuredToolInterface } from "@langchain/core/tools";
3
+ import { StructuredToolParams } from "@langchain/core/tools";
4
4
  import type { GoogleAIBaseLLMInput, GoogleConnectionParams, GoogleLLMModelFamily, GooglePlatformType, GoogleResponse, GoogleLLMResponse, GeminiContent, GeminiGenerationConfig, GeminiRequest, GeminiSafetySetting, GeminiTool, GeminiFunctionDeclaration, GoogleAIModelRequestParams } from "./types.js";
5
5
  import { GoogleAbstractedClient, GoogleAbstractedClientOpsMethod } from "./auth.js";
6
6
  export declare abstract class GoogleConnection<CallOptions extends AsyncCallerCallOptions, ResponseType extends GoogleResponse> {
@@ -28,7 +28,7 @@ export declare abstract class GoogleHostConnection<CallOptions extends AsyncCall
28
28
  get computedPlatformType(): GooglePlatformType;
29
29
  buildMethod(): GoogleAbstractedClientOpsMethod;
30
30
  }
31
- export declare abstract class GoogleAIConnection<CallOptions extends BaseLanguageModelCallOptions, MessageType, AuthOptions> extends GoogleHostConnection<CallOptions, GoogleLLMResponse, AuthOptions> implements GoogleAIBaseLLMInput<AuthOptions> {
31
+ export declare abstract class GoogleAIConnection<CallOptions extends AsyncCallerCallOptions, InputType, AuthOptions, ResponseType extends GoogleResponse> extends GoogleHostConnection<CallOptions, ResponseType, AuthOptions> implements GoogleAIBaseLLMInput<AuthOptions> {
32
32
  model: string;
33
33
  modelName: string;
34
34
  client: GoogleAbstractedClient;
@@ -39,19 +39,18 @@ export declare abstract class GoogleAIConnection<CallOptions extends BaseLanguag
39
39
  buildUrlGenerativeLanguage(): Promise<string>;
40
40
  buildUrlVertex(): Promise<string>;
41
41
  buildUrl(): Promise<string>;
42
- abstract formatData(input: MessageType, parameters: GoogleAIModelRequestParams): unknown;
43
- request(input: MessageType, parameters: GoogleAIModelRequestParams, options: CallOptions): Promise<GoogleLLMResponse>;
42
+ abstract formatData(input: InputType, parameters: GoogleAIModelRequestParams): unknown;
43
+ request(input: InputType, parameters: GoogleAIModelRequestParams, options: CallOptions): Promise<ResponseType>;
44
44
  }
45
- export declare abstract class AbstractGoogleLLMConnection<MessageType, AuthOptions> extends GoogleAIConnection<BaseLanguageModelCallOptions, MessageType, AuthOptions> {
45
+ export declare abstract class AbstractGoogleLLMConnection<MessageType, AuthOptions> extends GoogleAIConnection<BaseLanguageModelCallOptions, MessageType, AuthOptions, GoogleLLMResponse> {
46
46
  buildUrlMethodGemini(): Promise<string>;
47
47
  buildUrlMethod(): Promise<string>;
48
48
  abstract formatContents(input: MessageType, parameters: GoogleAIModelRequestParams): GeminiContent[];
49
49
  formatGenerationConfig(_input: MessageType, parameters: GoogleAIModelRequestParams): GeminiGenerationConfig;
50
50
  formatSafetySettings(_input: MessageType, parameters: GoogleAIModelRequestParams): GeminiSafetySetting[];
51
51
  formatSystemInstruction(_input: MessageType, _parameters: GoogleAIModelRequestParams): GeminiContent;
52
- isStructuredToolArray(tools?: unknown[]): tools is StructuredToolInterface[];
53
- structuredToolToFunctionDeclaration(tool: StructuredToolInterface): GeminiFunctionDeclaration;
54
- structuredToolsToGeminiTools(tools: StructuredToolInterface[]): GeminiTool[];
52
+ structuredToolToFunctionDeclaration(tool: StructuredToolParams): GeminiFunctionDeclaration;
53
+ structuredToolsToGeminiTools(tools: StructuredToolParams[]): GeminiTool[];
55
54
  formatTools(_input: MessageType, parameters: GoogleAIModelRequestParams): GeminiTool[];
56
55
  formatToolConfig(parameters: GoogleAIModelRequestParams): GeminiRequest["toolConfig"] | undefined;
57
56
  formatData(input: MessageType, parameters: GoogleAIModelRequestParams): GeminiRequest;
@@ -1,4 +1,5 @@
1
1
  import { getRuntimeEnvironment } from "@langchain/core/utils/env";
2
+ import { isLangChainTool } from "@langchain/core/utils/function_calling";
2
3
  import { zodToGeminiParameters } from "./utils/zod_to_gemini_parameters.js";
3
4
  export class GoogleConnection {
4
5
  constructor(caller, client, streaming) {
@@ -215,16 +216,11 @@ export class AbstractGoogleLLMConnection extends GoogleAIConnection {
215
216
  formatSystemInstruction(_input, _parameters) {
216
217
  return {};
217
218
  }
218
- // Borrowed from the OpenAI invocation params test
219
- isStructuredToolArray(tools) {
220
- return (tools !== undefined &&
221
- tools.every((tool) => Array.isArray(tool.lc_namespace)));
222
- }
223
219
  structuredToolToFunctionDeclaration(tool) {
224
220
  const jsonSchema = zodToGeminiParameters(tool.schema);
225
221
  return {
226
222
  name: tool.name,
227
- description: tool.description,
223
+ description: tool.description ?? `A function available to call.`,
228
224
  parameters: jsonSchema,
229
225
  };
230
226
  }
@@ -240,11 +236,13 @@ export class AbstractGoogleLLMConnection extends GoogleAIConnection {
240
236
  if (!tools || tools.length === 0) {
241
237
  return [];
242
238
  }
243
- if (this.isStructuredToolArray(tools)) {
239
+ if (tools.every(isLangChainTool)) {
244
240
  return this.structuredToolsToGeminiTools(tools);
245
241
  }
246
242
  else {
247
- if (tools.length === 1 && !tools[0].functionDeclarations?.length) {
243
+ if (tools.length === 1 &&
244
+ (!("functionDeclarations" in tools[0]) ||
245
+ !tools[0].functionDeclarations?.length)) {
248
246
  return [];
249
247
  }
250
248
  return tools;
@@ -0,0 +1,116 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.BaseGoogleEmbeddings = void 0;
4
+ const embeddings_1 = require("@langchain/core/embeddings");
5
+ const chunk_array_1 = require("@langchain/core/utils/chunk_array");
6
+ const env_1 = require("@langchain/core/utils/env");
7
+ const connection_js_1 = require("./connection.cjs");
8
+ const auth_js_1 = require("./auth.cjs");
9
+ class EmbeddingsConnection extends connection_js_1.GoogleAIConnection {
10
+ constructor(fields, caller, client, streaming) {
11
+ super(fields, caller, client, streaming);
12
+ Object.defineProperty(this, "convertSystemMessageToHumanContent", {
13
+ enumerable: true,
14
+ configurable: true,
15
+ writable: true,
16
+ value: void 0
17
+ });
18
+ }
19
+ async buildUrlMethod() {
20
+ return "predict";
21
+ }
22
+ formatData(input, parameters) {
23
+ return {
24
+ instances: input,
25
+ parameters,
26
+ };
27
+ }
28
+ }
29
+ /**
30
+ * Enables calls to the Google Cloud's Vertex AI API to access
31
+ * the embeddings generated by Large Language Models.
32
+ *
33
+ * To use, you will need to have one of the following authentication
34
+ * methods in place:
35
+ * - You are logged into an account permitted to the Google Cloud project
36
+ * using Vertex AI.
37
+ * - You are running this on a machine using a service account permitted to
38
+ * the Google Cloud project using Vertex AI.
39
+ * - The `GOOGLE_APPLICATION_CREDENTIALS` environment variable is set to the
40
+ * path of a credentials file for a service account permitted to the
41
+ * Google Cloud project using Vertex AI.
42
+ * @example
43
+ * ```typescript
44
+ * const model = new GoogleEmbeddings();
45
+ * const res = await model.embedQuery(
46
+ * "What would be a good company name for a company that makes colorful socks?"
47
+ * );
48
+ * console.log({ res });
49
+ * ```
50
+ */
51
+ class BaseGoogleEmbeddings extends embeddings_1.Embeddings {
52
+ constructor(fields) {
53
+ super(fields);
54
+ Object.defineProperty(this, "model", {
55
+ enumerable: true,
56
+ configurable: true,
57
+ writable: true,
58
+ value: void 0
59
+ });
60
+ Object.defineProperty(this, "connection", {
61
+ enumerable: true,
62
+ configurable: true,
63
+ writable: true,
64
+ value: void 0
65
+ });
66
+ this.model = fields.model;
67
+ this.connection = new EmbeddingsConnection({ ...fields, ...this }, this.caller, this.buildClient(fields), false);
68
+ }
69
+ buildApiKeyClient(apiKey) {
70
+ return new auth_js_1.ApiKeyGoogleAuth(apiKey);
71
+ }
72
+ buildApiKey(fields) {
73
+ return fields?.apiKey ?? (0, env_1.getEnvironmentVariable)("GOOGLE_API_KEY");
74
+ }
75
+ buildClient(fields) {
76
+ const apiKey = this.buildApiKey(fields);
77
+ if (apiKey) {
78
+ return this.buildApiKeyClient(apiKey);
79
+ }
80
+ else {
81
+ return this.buildAbstractedClient(fields);
82
+ }
83
+ }
84
+ /**
85
+ * Takes an array of documents as input and returns a promise that
86
+ * resolves to a 2D array of embeddings for each document. It splits the
87
+ * documents into chunks and makes requests to the Google Vertex AI API to
88
+ * generate embeddings.
89
+ * @param documents An array of documents to be embedded.
90
+ * @returns A promise that resolves to a 2D array of embeddings for each document.
91
+ */
92
+ async embedDocuments(documents) {
93
+ const instanceChunks = (0, chunk_array_1.chunkArray)(documents.map((document) => ({
94
+ content: document,
95
+ })), 5); // Vertex AI accepts max 5 instances per prediction
96
+ const parameters = {};
97
+ const options = {};
98
+ const responses = await Promise.all(instanceChunks.map((instances) => this.connection.request(instances, parameters, options)));
99
+ const result = responses
100
+ ?.map((response) => response?.data?.predictions?.map((result) => result.embeddings.values) ?? [])
101
+ .flat() ?? [];
102
+ return result;
103
+ }
104
+ /**
105
+ * Takes a document as input and returns a promise that resolves to an
106
+ * embedding for the document. It calls the embedDocuments method with the
107
+ * document as the input.
108
+ * @param document A document to be embedded.
109
+ * @returns A promise that resolves to an embedding for the document.
110
+ */
111
+ async embedQuery(document) {
112
+ const data = await this.embedDocuments([document]);
113
+ return data[0];
114
+ }
115
+ }
116
+ exports.BaseGoogleEmbeddings = BaseGoogleEmbeddings;
@@ -0,0 +1,91 @@
1
+ import { Embeddings, EmbeddingsParams } from "@langchain/core/embeddings";
2
+ import { AsyncCallerCallOptions } from "@langchain/core/utils/async_caller";
3
+ import { GoogleAbstractedClient } from "./auth.js";
4
+ import { GoogleConnectionParams, GoogleResponse } from "./types.js";
5
+ /**
6
+ * Defines the parameters required to initialize a
7
+ * GoogleEmbeddings instance. It extends EmbeddingsParams and
8
+ * GoogleConnectionParams.
9
+ */
10
+ export interface BaseGoogleEmbeddingsParams<AuthOptions> extends EmbeddingsParams, GoogleConnectionParams<AuthOptions> {
11
+ model: string;
12
+ }
13
+ /**
14
+ * Defines additional options specific to the
15
+ * GoogleEmbeddingsInstance. It extends AsyncCallerCallOptions.
16
+ */
17
+ export interface BaseGoogleEmbeddingsOptions extends AsyncCallerCallOptions {
18
+ }
19
+ /**
20
+ * Represents an instance for generating embeddings using the Google
21
+ * Vertex AI API. It contains the content to be embedded.
22
+ */
23
+ export interface GoogleEmbeddingsInstance {
24
+ content: string;
25
+ }
26
+ /**
27
+ * Defines the structure of the embeddings results returned by the Google
28
+ * Vertex AI API. It extends GoogleBasePrediction and contains the
29
+ * embeddings and their statistics.
30
+ */
31
+ export interface GoogleEmbeddingsResponse extends GoogleResponse {
32
+ data: {
33
+ predictions: {
34
+ embeddings: {
35
+ statistics: {
36
+ token_count: number;
37
+ truncated: boolean;
38
+ };
39
+ values: number[];
40
+ };
41
+ }[];
42
+ };
43
+ }
44
+ /**
45
+ * Enables calls to the Google Cloud's Vertex AI API to access
46
+ * the embeddings generated by Large Language Models.
47
+ *
48
+ * To use, you will need to have one of the following authentication
49
+ * methods in place:
50
+ * - You are logged into an account permitted to the Google Cloud project
51
+ * using Vertex AI.
52
+ * - You are running this on a machine using a service account permitted to
53
+ * the Google Cloud project using Vertex AI.
54
+ * - The `GOOGLE_APPLICATION_CREDENTIALS` environment variable is set to the
55
+ * path of a credentials file for a service account permitted to the
56
+ * Google Cloud project using Vertex AI.
57
+ * @example
58
+ * ```typescript
59
+ * const model = new GoogleEmbeddings();
60
+ * const res = await model.embedQuery(
61
+ * "What would be a good company name for a company that makes colorful socks?"
62
+ * );
63
+ * console.log({ res });
64
+ * ```
65
+ */
66
+ export declare abstract class BaseGoogleEmbeddings<AuthOptions> extends Embeddings implements BaseGoogleEmbeddingsParams<AuthOptions> {
67
+ model: string;
68
+ private connection;
69
+ constructor(fields: BaseGoogleEmbeddingsParams<AuthOptions>);
70
+ abstract buildAbstractedClient(fields?: GoogleConnectionParams<AuthOptions>): GoogleAbstractedClient;
71
+ buildApiKeyClient(apiKey: string): GoogleAbstractedClient;
72
+ buildApiKey(fields?: GoogleConnectionParams<AuthOptions>): string | undefined;
73
+ buildClient(fields?: GoogleConnectionParams<AuthOptions>): GoogleAbstractedClient;
74
+ /**
75
+ * Takes an array of documents as input and returns a promise that
76
+ * resolves to a 2D array of embeddings for each document. It splits the
77
+ * documents into chunks and makes requests to the Google Vertex AI API to
78
+ * generate embeddings.
79
+ * @param documents An array of documents to be embedded.
80
+ * @returns A promise that resolves to a 2D array of embeddings for each document.
81
+ */
82
+ embedDocuments(documents: string[]): Promise<number[][]>;
83
+ /**
84
+ * Takes a document as input and returns a promise that resolves to an
85
+ * embedding for the document. It calls the embedDocuments method with the
86
+ * document as the input.
87
+ * @param document A document to be embedded.
88
+ * @returns A promise that resolves to an embedding for the document.
89
+ */
90
+ embedQuery(document: string): Promise<number[]>;
91
+ }
@@ -0,0 +1,112 @@
1
+ import { Embeddings } from "@langchain/core/embeddings";
2
+ import { chunkArray } from "@langchain/core/utils/chunk_array";
3
+ import { getEnvironmentVariable } from "@langchain/core/utils/env";
4
+ import { GoogleAIConnection } from "./connection.js";
5
+ import { ApiKeyGoogleAuth } from "./auth.js";
6
+ class EmbeddingsConnection extends GoogleAIConnection {
7
+ constructor(fields, caller, client, streaming) {
8
+ super(fields, caller, client, streaming);
9
+ Object.defineProperty(this, "convertSystemMessageToHumanContent", {
10
+ enumerable: true,
11
+ configurable: true,
12
+ writable: true,
13
+ value: void 0
14
+ });
15
+ }
16
+ async buildUrlMethod() {
17
+ return "predict";
18
+ }
19
+ formatData(input, parameters) {
20
+ return {
21
+ instances: input,
22
+ parameters,
23
+ };
24
+ }
25
+ }
26
+ /**
27
+ * Enables calls to the Google Cloud's Vertex AI API to access
28
+ * the embeddings generated by Large Language Models.
29
+ *
30
+ * To use, you will need to have one of the following authentication
31
+ * methods in place:
32
+ * - You are logged into an account permitted to the Google Cloud project
33
+ * using Vertex AI.
34
+ * - You are running this on a machine using a service account permitted to
35
+ * the Google Cloud project using Vertex AI.
36
+ * - The `GOOGLE_APPLICATION_CREDENTIALS` environment variable is set to the
37
+ * path of a credentials file for a service account permitted to the
38
+ * Google Cloud project using Vertex AI.
39
+ * @example
40
+ * ```typescript
41
+ * const model = new GoogleEmbeddings();
42
+ * const res = await model.embedQuery(
43
+ * "What would be a good company name for a company that makes colorful socks?"
44
+ * );
45
+ * console.log({ res });
46
+ * ```
47
+ */
48
+ export class BaseGoogleEmbeddings extends Embeddings {
49
+ constructor(fields) {
50
+ super(fields);
51
+ Object.defineProperty(this, "model", {
52
+ enumerable: true,
53
+ configurable: true,
54
+ writable: true,
55
+ value: void 0
56
+ });
57
+ Object.defineProperty(this, "connection", {
58
+ enumerable: true,
59
+ configurable: true,
60
+ writable: true,
61
+ value: void 0
62
+ });
63
+ this.model = fields.model;
64
+ this.connection = new EmbeddingsConnection({ ...fields, ...this }, this.caller, this.buildClient(fields), false);
65
+ }
66
+ buildApiKeyClient(apiKey) {
67
+ return new ApiKeyGoogleAuth(apiKey);
68
+ }
69
+ buildApiKey(fields) {
70
+ return fields?.apiKey ?? getEnvironmentVariable("GOOGLE_API_KEY");
71
+ }
72
+ buildClient(fields) {
73
+ const apiKey = this.buildApiKey(fields);
74
+ if (apiKey) {
75
+ return this.buildApiKeyClient(apiKey);
76
+ }
77
+ else {
78
+ return this.buildAbstractedClient(fields);
79
+ }
80
+ }
81
+ /**
82
+ * Takes an array of documents as input and returns a promise that
83
+ * resolves to a 2D array of embeddings for each document. It splits the
84
+ * documents into chunks and makes requests to the Google Vertex AI API to
85
+ * generate embeddings.
86
+ * @param documents An array of documents to be embedded.
87
+ * @returns A promise that resolves to a 2D array of embeddings for each document.
88
+ */
89
+ async embedDocuments(documents) {
90
+ const instanceChunks = chunkArray(documents.map((document) => ({
91
+ content: document,
92
+ })), 5); // Vertex AI accepts max 5 instances per prediction
93
+ const parameters = {};
94
+ const options = {};
95
+ const responses = await Promise.all(instanceChunks.map((instances) => this.connection.request(instances, parameters, options)));
96
+ const result = responses
97
+ ?.map((response) => response?.data?.predictions?.map((result) => result.embeddings.values) ?? [])
98
+ .flat() ?? [];
99
+ return result;
100
+ }
101
+ /**
102
+ * Takes a document as input and returns a promise that resolves to an
103
+ * embedding for the document. It calls the embedDocuments method with the
104
+ * document as the input.
105
+ * @param document A document to be embedded.
106
+ * @returns A promise that resolves to an embedding for the document.
107
+ */
108
+ async embedQuery(document) {
109
+ const data = await this.embedDocuments([document]);
110
+ return data[0];
111
+ }
112
+ }
package/dist/index.cjs CHANGED
@@ -16,6 +16,7 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
16
16
  Object.defineProperty(exports, "__esModule", { value: true });
17
17
  __exportStar(require("./chat_models.cjs"), exports);
18
18
  __exportStar(require("./llms.cjs"), exports);
19
+ __exportStar(require("./embeddings.cjs"), exports);
19
20
  __exportStar(require("./auth.cjs"), exports);
20
21
  __exportStar(require("./connection.cjs"), exports);
21
22
  __exportStar(require("./types.cjs"), exports);
package/dist/index.d.ts CHANGED
@@ -1,5 +1,6 @@
1
1
  export * from "./chat_models.js";
2
2
  export * from "./llms.js";
3
+ export * from "./embeddings.js";
3
4
  export * from "./auth.js";
4
5
  export * from "./connection.js";
5
6
  export * from "./types.js";
package/dist/index.js CHANGED
@@ -1,5 +1,6 @@
1
1
  export * from "./chat_models.js";
2
2
  export * from "./llms.js";
3
+ export * from "./embeddings.js";
3
4
  export * from "./auth.js";
4
5
  export * from "./connection.js";
5
6
  export * from "./types.js";
package/dist/types.d.ts CHANGED
@@ -1,6 +1,5 @@
1
1
  import type { BaseLLMParams } from "@langchain/core/language_models/llms";
2
- import { StructuredToolInterface } from "@langchain/core/tools";
3
- import type { BaseChatModelCallOptions } from "@langchain/core/language_models/chat_models";
2
+ import type { BaseChatModelCallOptions, BindToolsInput } from "@langchain/core/language_models/chat_models";
4
3
  import type { JsonStream } from "./utils/stream.js";
5
4
  /**
6
5
  * Parameters needed to setup the client connection.
@@ -36,6 +35,7 @@ export interface GoogleConnectionParams<AuthOptions> extends GoogleClientParams<
36
35
  export interface GoogleAISafetySetting {
37
36
  category: string;
38
37
  threshold: string;
38
+ method?: string;
39
39
  }
40
40
  export type GoogleAIResponseMimeType = "text/plain" | "application/json";
41
41
  export interface GoogleAIModelParams {
@@ -91,11 +91,12 @@ export interface GoogleAIModelParams {
91
91
  */
92
92
  streaming?: boolean;
93
93
  }
94
+ export type GoogleAIToolType = BindToolsInput | GeminiTool;
94
95
  /**
95
96
  * The params which can be passed to the API at request time.
96
97
  */
97
98
  export interface GoogleAIModelRequestParams extends GoogleAIModelParams {
98
- tools?: StructuredToolInterface[] | GeminiTool[];
99
+ tools?: GoogleAIToolType[];
99
100
  /**
100
101
  * Force the model to use tools in a specific way.
101
102
  *
@@ -36,35 +36,35 @@ function processToolChoice(toolChoice, allowedFunctionNames) {
36
36
  }
37
37
  throw new Error("Object inputs for tool_choice not supported.");
38
38
  }
39
- function convertToGeminiTools(structuredTools) {
40
- const tools = [
39
+ function convertToGeminiTools(tools) {
40
+ const geminiTools = [
41
41
  {
42
42
  functionDeclarations: [],
43
43
  },
44
44
  ];
45
- structuredTools.forEach((tool) => {
45
+ tools.forEach((tool) => {
46
46
  if ("functionDeclarations" in tool &&
47
47
  Array.isArray(tool.functionDeclarations)) {
48
48
  const funcs = tool.functionDeclarations;
49
- tools[0].functionDeclarations?.push(...funcs);
49
+ geminiTools[0].functionDeclarations?.push(...funcs);
50
50
  }
51
- else if ((0, function_calling_1.isStructuredTool)(tool)) {
51
+ else if ((0, function_calling_1.isLangChainTool)(tool)) {
52
52
  const jsonSchema = (0, zod_to_gemini_parameters_js_1.zodToGeminiParameters)(tool.schema);
53
- tools[0].functionDeclarations?.push({
53
+ geminiTools[0].functionDeclarations?.push({
54
54
  name: tool.name,
55
- description: tool.description,
55
+ description: tool.description ?? `A function available to call.`,
56
56
  parameters: jsonSchema,
57
57
  });
58
58
  }
59
59
  else if ((0, base_1.isOpenAITool)(tool)) {
60
- tools[0].functionDeclarations?.push({
60
+ geminiTools[0].functionDeclarations?.push({
61
61
  name: tool.function.name,
62
62
  description: tool.function.description ?? `A function available to call.`,
63
63
  parameters: (0, zod_to_gemini_parameters_js_1.jsonSchemaToGeminiParameters)(tool.function.parameters),
64
64
  });
65
65
  }
66
66
  });
67
- return tools;
67
+ return geminiTools;
68
68
  }
69
69
  exports.convertToGeminiTools = convertToGeminiTools;
70
70
  function copyAIModelParamsInto(params, options, target) {
@@ -1,9 +1,6 @@
1
- import { StructuredToolInterface } from "@langchain/core/tools";
2
- import { ToolDefinition } from "@langchain/core/language_models/base";
3
- import { RunnableToolLike } from "@langchain/core/runnables";
4
- import type { GeminiTool, GoogleAIBaseLanguageModelCallOptions, GoogleAIModelParams, GoogleAIModelRequestParams, GoogleLLMModelFamily } from "../types.js";
1
+ import type { GeminiTool, GoogleAIBaseLanguageModelCallOptions, GoogleAIModelParams, GoogleAIModelRequestParams, GoogleAIToolType, GoogleLLMModelFamily } from "../types.js";
5
2
  export declare function copyAIModelParams(params: GoogleAIModelParams | undefined, options: GoogleAIBaseLanguageModelCallOptions | undefined): GoogleAIModelRequestParams;
6
- export declare function convertToGeminiTools(structuredTools: (StructuredToolInterface | Record<string, unknown> | ToolDefinition | RunnableToolLike)[]): GeminiTool[];
3
+ export declare function convertToGeminiTools(tools: GoogleAIToolType[]): GeminiTool[];
7
4
  export declare function copyAIModelParamsInto(params: GoogleAIModelParams | undefined, options: GoogleAIBaseLanguageModelCallOptions | undefined, target: GoogleAIModelParams): GoogleAIModelRequestParams;
8
5
  export declare function modelToFamily(modelName: string | undefined): GoogleLLMModelFamily;
9
6
  export declare function validateModelParams(params: GoogleAIModelParams | undefined): void;
@@ -1,5 +1,5 @@
1
- import { isOpenAITool, } from "@langchain/core/language_models/base";
2
- import { isStructuredTool } from "@langchain/core/utils/function_calling";
1
+ import { isOpenAITool } from "@langchain/core/language_models/base";
2
+ import { isLangChainTool } from "@langchain/core/utils/function_calling";
3
3
  import { isModelGemini, validateGeminiParams } from "./gemini.js";
4
4
  import { jsonSchemaToGeminiParameters, zodToGeminiParameters, } from "./zod_to_gemini_parameters.js";
5
5
  export function copyAIModelParams(params, options) {
@@ -32,35 +32,35 @@ function processToolChoice(toolChoice, allowedFunctionNames) {
32
32
  }
33
33
  throw new Error("Object inputs for tool_choice not supported.");
34
34
  }
35
- export function convertToGeminiTools(structuredTools) {
36
- const tools = [
35
+ export function convertToGeminiTools(tools) {
36
+ const geminiTools = [
37
37
  {
38
38
  functionDeclarations: [],
39
39
  },
40
40
  ];
41
- structuredTools.forEach((tool) => {
41
+ tools.forEach((tool) => {
42
42
  if ("functionDeclarations" in tool &&
43
43
  Array.isArray(tool.functionDeclarations)) {
44
44
  const funcs = tool.functionDeclarations;
45
- tools[0].functionDeclarations?.push(...funcs);
45
+ geminiTools[0].functionDeclarations?.push(...funcs);
46
46
  }
47
- else if (isStructuredTool(tool)) {
47
+ else if (isLangChainTool(tool)) {
48
48
  const jsonSchema = zodToGeminiParameters(tool.schema);
49
- tools[0].functionDeclarations?.push({
49
+ geminiTools[0].functionDeclarations?.push({
50
50
  name: tool.name,
51
- description: tool.description,
51
+ description: tool.description ?? `A function available to call.`,
52
52
  parameters: jsonSchema,
53
53
  });
54
54
  }
55
55
  else if (isOpenAITool(tool)) {
56
- tools[0].functionDeclarations?.push({
56
+ geminiTools[0].functionDeclarations?.push({
57
57
  name: tool.function.name,
58
58
  description: tool.function.description ?? `A function available to call.`,
59
59
  parameters: jsonSchemaToGeminiParameters(tool.function.parameters),
60
60
  });
61
61
  }
62
62
  });
63
- return tools;
63
+ return geminiTools;
64
64
  }
65
65
  export function copyAIModelParamsInto(params, options, target) {
66
66
  const ret = target || {};
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@langchain/google-common",
3
- "version": "0.0.22",
3
+ "version": "0.0.24",
4
4
  "description": "Core types and classes for Google services.",
5
5
  "type": "module",
6
6
  "engines": {
@@ -40,7 +40,7 @@
40
40
  "author": "LangChain",
41
41
  "license": "MIT",
42
42
  "dependencies": {
43
- "@langchain/core": ">=0.2.16 <0.3.0",
43
+ "@langchain/core": ">=0.2.21 <0.3.0",
44
44
  "uuid": "^10.0.0",
45
45
  "zod-to-json-schema": "^3.22.4"
46
46
  },
@@ -63,7 +63,7 @@
63
63
  "jest": "^29.5.0",
64
64
  "jest-environment-node": "^29.6.4",
65
65
  "prettier": "^2.8.3",
66
- "release-it": "^15.10.1",
66
+ "release-it": "^17.6.0",
67
67
  "rollup": "^4.5.2",
68
68
  "ts-jest": "^29.1.0",
69
69
  "typescript": "<5.2.0",