@langchain/google-genai 0.0.9 → 0.0.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -55,7 +55,7 @@ const model = new ChatGoogleGenerativeAI({
55
55
  modelName: "gemini-pro",
56
56
  maxOutputTokens: 2048,
57
57
  });
58
- const response = await mode.invoke(new HumanMessage("Hello world!"));
58
+ const response = await model.invoke(new HumanMessage("Hello world!"));
59
59
  ```
60
60
 
61
61
  #### Multimodal inputs
@@ -30,7 +30,7 @@ const utils_js_1 = require("./utils.cjs");
30
30
  * ]
31
31
  * })
32
32
  * ];
33
- * const res = await model.call(questions);
33
+ * const res = await model.invoke(questions);
34
34
  * console.log({ res });
35
35
  * ```
36
36
  */
@@ -44,7 +44,7 @@ class ChatGoogleGenerativeAI extends chat_models_1.BaseChatModel {
44
44
  };
45
45
  }
46
46
  get _isMultimodalModel() {
47
- return this.modelName.includes("vision");
47
+ return this.model.includes("vision");
48
48
  }
49
49
  constructor(fields) {
50
50
  super(fields ?? {});
@@ -60,6 +60,12 @@ class ChatGoogleGenerativeAI extends chat_models_1.BaseChatModel {
60
60
  writable: true,
61
61
  value: "gemini-pro"
62
62
  });
63
+ Object.defineProperty(this, "model", {
64
+ enumerable: true,
65
+ configurable: true,
66
+ writable: true,
67
+ value: "gemini-pro"
68
+ });
63
69
  Object.defineProperty(this, "temperature", {
64
70
  enumerable: true,
65
71
  configurable: true,
@@ -115,7 +121,10 @@ class ChatGoogleGenerativeAI extends chat_models_1.BaseChatModel {
115
121
  value: void 0
116
122
  });
117
123
  this.modelName =
118
- fields?.modelName?.replace(/^models\//, "") ?? this.modelName;
124
+ fields?.model?.replace(/^models\//, "") ??
125
+ fields?.modelName?.replace(/^models\//, "") ??
126
+ this.model;
127
+ this.model = this.modelName;
119
128
  this.maxOutputTokens = fields?.maxOutputTokens ?? this.maxOutputTokens;
120
129
  if (this.maxOutputTokens && this.maxOutputTokens < 0) {
121
130
  throw new Error("`maxOutputTokens` must be a positive integer");
@@ -152,7 +161,7 @@ class ChatGoogleGenerativeAI extends chat_models_1.BaseChatModel {
152
161
  }
153
162
  this.streaming = fields?.streaming ?? this.streaming;
154
163
  this.client = new generative_ai_1.GoogleGenerativeAI(this.apiKey).getGenerativeModel({
155
- model: this.modelName,
164
+ model: this.model,
156
165
  safetySettings: this.safetySettings,
157
166
  generationConfig: {
158
167
  candidateCount: 1,
@@ -14,9 +14,17 @@ export interface GoogleGenerativeAIChatInput extends BaseChatModelParams {
14
14
  /**
15
15
  * Model Name to use
16
16
  *
17
+ * Alias for `model`
18
+ *
17
19
  * Note: The format must follow the pattern - `{model}`
18
20
  */
19
21
  modelName?: string;
22
+ /**
23
+ * Model Name to use
24
+ *
25
+ * Note: The format must follow the pattern - `{model}`
26
+ */
27
+ model?: string;
20
28
  /**
21
29
  * Controls the randomness of the output.
22
30
  *
@@ -104,7 +112,7 @@ export interface GoogleGenerativeAIChatInput extends BaseChatModelParams {
104
112
  * ]
105
113
  * })
106
114
  * ];
107
- * const res = await model.call(questions);
115
+ * const res = await model.invoke(questions);
108
116
  * console.log({ res });
109
117
  * ```
110
118
  */
@@ -115,6 +123,7 @@ export declare class ChatGoogleGenerativeAI extends BaseChatModel implements Goo
115
123
  [key: string]: string;
116
124
  } | undefined;
117
125
  modelName: string;
126
+ model: string;
118
127
  temperature?: number;
119
128
  maxOutputTokens?: number;
120
129
  topP?: number;
@@ -27,7 +27,7 @@ import { convertBaseMessagesToContent, convertResponseContentToChatGenerationChu
27
27
  * ]
28
28
  * })
29
29
  * ];
30
- * const res = await model.call(questions);
30
+ * const res = await model.invoke(questions);
31
31
  * console.log({ res });
32
32
  * ```
33
33
  */
@@ -41,7 +41,7 @@ export class ChatGoogleGenerativeAI extends BaseChatModel {
41
41
  };
42
42
  }
43
43
  get _isMultimodalModel() {
44
- return this.modelName.includes("vision");
44
+ return this.model.includes("vision");
45
45
  }
46
46
  constructor(fields) {
47
47
  super(fields ?? {});
@@ -57,6 +57,12 @@ export class ChatGoogleGenerativeAI extends BaseChatModel {
57
57
  writable: true,
58
58
  value: "gemini-pro"
59
59
  });
60
+ Object.defineProperty(this, "model", {
61
+ enumerable: true,
62
+ configurable: true,
63
+ writable: true,
64
+ value: "gemini-pro"
65
+ });
60
66
  Object.defineProperty(this, "temperature", {
61
67
  enumerable: true,
62
68
  configurable: true,
@@ -112,7 +118,10 @@ export class ChatGoogleGenerativeAI extends BaseChatModel {
112
118
  value: void 0
113
119
  });
114
120
  this.modelName =
115
- fields?.modelName?.replace(/^models\//, "") ?? this.modelName;
121
+ fields?.model?.replace(/^models\//, "") ??
122
+ fields?.modelName?.replace(/^models\//, "") ??
123
+ this.model;
124
+ this.model = this.modelName;
116
125
  this.maxOutputTokens = fields?.maxOutputTokens ?? this.maxOutputTokens;
117
126
  if (this.maxOutputTokens && this.maxOutputTokens < 0) {
118
127
  throw new Error("`maxOutputTokens` must be a positive integer");
@@ -149,7 +158,7 @@ export class ChatGoogleGenerativeAI extends BaseChatModel {
149
158
  }
150
159
  this.streaming = fields?.streaming ?? this.streaming;
151
160
  this.client = new GenerativeAI(this.apiKey).getGenerativeModel({
152
- model: this.modelName,
161
+ model: this.model,
153
162
  safetySettings: this.safetySettings,
154
163
  generationConfig: {
155
164
  candidateCount: 1,
@@ -4,6 +4,7 @@ exports.GoogleGenerativeAIEmbeddings = void 0;
4
4
  const generative_ai_1 = require("@google/generative-ai");
5
5
  const env_1 = require("@langchain/core/utils/env");
6
6
  const embeddings_1 = require("@langchain/core/embeddings");
7
+ const chunk_array_1 = require("@langchain/core/utils/chunk_array");
7
8
  /**
8
9
  * Class that extends the Embeddings class and provides methods for
9
10
  * generating embeddings using the Google Palm API.
@@ -40,6 +41,12 @@ class GoogleGenerativeAIEmbeddings extends embeddings_1.Embeddings {
40
41
  writable: true,
41
42
  value: "embedding-001"
42
43
  });
44
+ Object.defineProperty(this, "model", {
45
+ enumerable: true,
46
+ configurable: true,
47
+ writable: true,
48
+ value: "embedding-001"
49
+ });
43
50
  Object.defineProperty(this, "taskType", {
44
51
  enumerable: true,
45
52
  configurable: true,
@@ -58,6 +65,12 @@ class GoogleGenerativeAIEmbeddings extends embeddings_1.Embeddings {
58
65
  writable: true,
59
66
  value: true
60
67
  });
68
+ Object.defineProperty(this, "maxBatchSize", {
69
+ enumerable: true,
70
+ configurable: true,
71
+ writable: true,
72
+ value: 100
73
+ }); // Max batch size for embedDocuments set by GenerativeModel client's batchEmbedContents call
61
74
  Object.defineProperty(this, "client", {
62
75
  enumerable: true,
63
76
  configurable: true,
@@ -65,7 +78,10 @@ class GoogleGenerativeAIEmbeddings extends embeddings_1.Embeddings {
65
78
  value: void 0
66
79
  });
67
80
  this.modelName =
68
- fields?.modelName?.replace(/^models\//, "") ?? this.modelName;
81
+ fields?.model?.replace(/^models\//, "") ??
82
+ fields?.modelName?.replace(/^models\//, "") ??
83
+ this.modelName;
84
+ this.model = this.modelName;
69
85
  this.taskType = fields?.taskType ?? this.taskType;
70
86
  this.title = fields?.title ?? this.title;
71
87
  if (this.title && this.taskType !== "RETRIEVAL_DOCUMENT") {
@@ -79,7 +95,7 @@ class GoogleGenerativeAIEmbeddings extends embeddings_1.Embeddings {
79
95
  "GoogleGenerativeAIEmbeddings constructor");
80
96
  }
81
97
  this.client = new generative_ai_1.GoogleGenerativeAI(this.apiKey).getGenerativeModel({
82
- model: this.modelName,
98
+ model: this.model,
83
99
  });
84
100
  }
85
101
  _convertToContent(text) {
@@ -96,11 +112,20 @@ class GoogleGenerativeAIEmbeddings extends embeddings_1.Embeddings {
96
112
  return res.embedding.values ?? [];
97
113
  }
98
114
  async _embedDocumentsContent(documents) {
99
- const req = {
100
- requests: documents.map((doc) => this._convertToContent(doc)),
101
- };
102
- const res = await this.client.batchEmbedContents(req);
103
- return res.embeddings.map((e) => e.values || []) ?? [];
115
+ const batchEmbedChunks = (0, chunk_array_1.chunkArray)(documents, this.maxBatchSize);
116
+ const batchEmbedRequests = batchEmbedChunks.map((chunk) => ({
117
+ requests: chunk.map((doc) => this._convertToContent(doc)),
118
+ }));
119
+ const responses = await Promise.allSettled(batchEmbedRequests.map((req) => this.client.batchEmbedContents(req)));
120
+ const embeddings = responses.flatMap((res, idx) => {
121
+ if (res.status === "fulfilled") {
122
+ return res.value.embeddings.map((e) => e.values || []);
123
+ }
124
+ else {
125
+ return Array(batchEmbedChunks[idx].length).fill([]);
126
+ }
127
+ });
128
+ return embeddings;
104
129
  }
105
130
  /**
106
131
  * Method that takes a document as input and returns a promise that
@@ -8,9 +8,17 @@ export interface GoogleGenerativeAIEmbeddingsParams extends EmbeddingsParams {
8
8
  /**
9
9
  * Model Name to use
10
10
  *
11
+ * Alias for `model`
12
+ *
11
13
  * Note: The format must follow the pattern - `{model}`
12
14
  */
13
15
  modelName?: string;
16
+ /**
17
+ * Model Name to use
18
+ *
19
+ * Note: The format must follow the pattern - `{model}`
20
+ */
21
+ model?: string;
14
22
  /**
15
23
  * Type of task for which the embedding will be used
16
24
  *
@@ -57,9 +65,11 @@ export interface GoogleGenerativeAIEmbeddingsParams extends EmbeddingsParams {
57
65
  export declare class GoogleGenerativeAIEmbeddings extends Embeddings implements GoogleGenerativeAIEmbeddingsParams {
58
66
  apiKey?: string;
59
67
  modelName: string;
68
+ model: string;
60
69
  taskType?: TaskType;
61
70
  title?: string;
62
71
  stripNewLines: boolean;
72
+ maxBatchSize: number;
63
73
  private client;
64
74
  constructor(fields?: GoogleGenerativeAIEmbeddingsParams);
65
75
  private _convertToContent;
@@ -1,6 +1,7 @@
1
1
  import { GoogleGenerativeAI } from "@google/generative-ai";
2
2
  import { getEnvironmentVariable } from "@langchain/core/utils/env";
3
3
  import { Embeddings } from "@langchain/core/embeddings";
4
+ import { chunkArray } from "@langchain/core/utils/chunk_array";
4
5
  /**
5
6
  * Class that extends the Embeddings class and provides methods for
6
7
  * generating embeddings using the Google Palm API.
@@ -37,6 +38,12 @@ export class GoogleGenerativeAIEmbeddings extends Embeddings {
37
38
  writable: true,
38
39
  value: "embedding-001"
39
40
  });
41
+ Object.defineProperty(this, "model", {
42
+ enumerable: true,
43
+ configurable: true,
44
+ writable: true,
45
+ value: "embedding-001"
46
+ });
40
47
  Object.defineProperty(this, "taskType", {
41
48
  enumerable: true,
42
49
  configurable: true,
@@ -55,6 +62,12 @@ export class GoogleGenerativeAIEmbeddings extends Embeddings {
55
62
  writable: true,
56
63
  value: true
57
64
  });
65
+ Object.defineProperty(this, "maxBatchSize", {
66
+ enumerable: true,
67
+ configurable: true,
68
+ writable: true,
69
+ value: 100
70
+ }); // Max batch size for embedDocuments set by GenerativeModel client's batchEmbedContents call
58
71
  Object.defineProperty(this, "client", {
59
72
  enumerable: true,
60
73
  configurable: true,
@@ -62,7 +75,10 @@ export class GoogleGenerativeAIEmbeddings extends Embeddings {
62
75
  value: void 0
63
76
  });
64
77
  this.modelName =
65
- fields?.modelName?.replace(/^models\//, "") ?? this.modelName;
78
+ fields?.model?.replace(/^models\//, "") ??
79
+ fields?.modelName?.replace(/^models\//, "") ??
80
+ this.modelName;
81
+ this.model = this.modelName;
66
82
  this.taskType = fields?.taskType ?? this.taskType;
67
83
  this.title = fields?.title ?? this.title;
68
84
  if (this.title && this.taskType !== "RETRIEVAL_DOCUMENT") {
@@ -76,7 +92,7 @@ export class GoogleGenerativeAIEmbeddings extends Embeddings {
76
92
  "GoogleGenerativeAIEmbeddings constructor");
77
93
  }
78
94
  this.client = new GoogleGenerativeAI(this.apiKey).getGenerativeModel({
79
- model: this.modelName,
95
+ model: this.model,
80
96
  });
81
97
  }
82
98
  _convertToContent(text) {
@@ -93,11 +109,20 @@ export class GoogleGenerativeAIEmbeddings extends Embeddings {
93
109
  return res.embedding.values ?? [];
94
110
  }
95
111
  async _embedDocumentsContent(documents) {
96
- const req = {
97
- requests: documents.map((doc) => this._convertToContent(doc)),
98
- };
99
- const res = await this.client.batchEmbedContents(req);
100
- return res.embeddings.map((e) => e.values || []) ?? [];
112
+ const batchEmbedChunks = chunkArray(documents, this.maxBatchSize);
113
+ const batchEmbedRequests = batchEmbedChunks.map((chunk) => ({
114
+ requests: chunk.map((doc) => this._convertToContent(doc)),
115
+ }));
116
+ const responses = await Promise.allSettled(batchEmbedRequests.map((req) => this.client.batchEmbedContents(req)));
117
+ const embeddings = responses.flatMap((res, idx) => {
118
+ if (res.status === "fulfilled") {
119
+ return res.value.embeddings.map((e) => e.values || []);
120
+ }
121
+ else {
122
+ return Array(batchEmbedChunks[idx].length).fill([]);
123
+ }
124
+ });
125
+ return embeddings;
101
126
  }
102
127
  /**
103
128
  * Method that takes a document as input and returns a promise that
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@langchain/google-genai",
3
- "version": "0.0.9",
3
+ "version": "0.0.11",
4
4
  "description": "Sample integration for LangChain.js",
5
5
  "type": "module",
6
6
  "engines": {
@@ -24,7 +24,7 @@
24
24
  "lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts",
25
25
  "lint": "yarn lint:eslint && yarn lint:dpdm",
26
26
  "lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm",
27
- "clean": "rm -rf dist/ && NODE_OPTIONS=--max-old-space-size=4096 yarn create-entrypoints -- --pre",
27
+ "clean": "rm -rf dist/ && NODE_OPTIONS=--max-old-space-size=4096 yarn lc-build --config ./langchain.config.js --create-entrypoints --pre",
28
28
  "prepack": "yarn build",
29
29
  "test": "NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%",
30
30
  "test:watch": "NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts",