@langchain/google-genai 0.0.3 → 0.0.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +163 -0
- package/dist/chat_models.cjs +7 -3
- package/dist/chat_models.d.ts +2 -2
- package/dist/chat_models.js +7 -3
- package/dist/utils.cjs +3 -1
- package/dist/utils.js +3 -1
- package/package.json +2 -2
package/README.md
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
# @langchain/google-genai
|
|
2
|
+
|
|
3
|
+
This package contains the LangChain.js integrations for Gemini through their generative-ai SDK.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash npm2yarn
|
|
8
|
+
npm install @langchain/google-genai
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
This package, along with the main LangChain package, depends on [`@langchain/core`](https://npmjs.com/package/@langchain/core/).
|
|
12
|
+
If you are using this package with other LangChain packages, you should make sure that all of the packages depend on the same instance of @langchain/core.
|
|
13
|
+
You can do so by adding appropriate field to your project's `package.json` like this:
|
|
14
|
+
|
|
15
|
+
```json
|
|
16
|
+
{
|
|
17
|
+
"name": "your-project",
|
|
18
|
+
"version": "0.0.0",
|
|
19
|
+
"dependencies": {
|
|
20
|
+
"@langchain/google-genai": "^0.0.0",
|
|
21
|
+
"langchain": "0.0.207"
|
|
22
|
+
},
|
|
23
|
+
"resolutions": {
|
|
24
|
+
"@langchain/core": "0.1.2"
|
|
25
|
+
},
|
|
26
|
+
"overrides": {
|
|
27
|
+
"@langchain/core": "0.1.2"
|
|
28
|
+
},
|
|
29
|
+
"pnpm": {
|
|
30
|
+
"overrides": {
|
|
31
|
+
"@langchain/core": "0.1.2"
|
|
32
|
+
}
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
The field you need depends on the package manager you're using, but we recommend adding a field for the common `yarn`, `npm`, and `pnpm` to maximize compatibility.
|
|
38
|
+
|
|
39
|
+
## Chat Models
|
|
40
|
+
|
|
41
|
+
This package contains the `ChatGoogleGenerativeAI` class, which is the recommended way to interface with the Google Gemini series of models.
|
|
42
|
+
|
|
43
|
+
To use, install the requirements, and configure your environment.
|
|
44
|
+
|
|
45
|
+
```bash
|
|
46
|
+
export GOOGLE_API_KEY=your-api-key
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
Then initialize
|
|
50
|
+
|
|
51
|
+
```typescript
|
|
52
|
+
import { ChatGoogleGenerativeAI } from "@langchain/google-genai";
|
|
53
|
+
|
|
54
|
+
const model = new ChatGoogleGenerativeAI({
|
|
55
|
+
modelName: "gemini-pro",
|
|
56
|
+
maxOutputTokens: 2048,
|
|
57
|
+
});
|
|
58
|
+
const response = await mode.invoke(new HumanMessage("Hello world!"));
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
#### Multimodal inputs
|
|
62
|
+
|
|
63
|
+
Gemini vision model supports image inputs when providing a single chat message. Example:
|
|
64
|
+
|
|
65
|
+
```bash npm2yarn
|
|
66
|
+
npm install @langchain/core
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
```typescript
|
|
70
|
+
import fs from "fs";
|
|
71
|
+
import { ChatGoogleGenerativeAI } from "@langchain/google-genai";
|
|
72
|
+
import { HumanMessage } from "@langchain/core/messages";
|
|
73
|
+
|
|
74
|
+
const vision = new ChatGoogleGenerativeAI({
|
|
75
|
+
modelName: "gemini-pro-vision",
|
|
76
|
+
maxOutputTokens: 2048,
|
|
77
|
+
});
|
|
78
|
+
const image = fs.readFileSync("./hotdog.jpg").toString("base64");
|
|
79
|
+
const input = [
|
|
80
|
+
new HumanMessage({
|
|
81
|
+
content: [
|
|
82
|
+
{
|
|
83
|
+
type: "text",
|
|
84
|
+
text: "Describe the following image.",
|
|
85
|
+
},
|
|
86
|
+
{
|
|
87
|
+
type: "image_url",
|
|
88
|
+
image_url: `data:image/png;base64,${image}`,
|
|
89
|
+
},
|
|
90
|
+
],
|
|
91
|
+
}),
|
|
92
|
+
];
|
|
93
|
+
|
|
94
|
+
const res = await vision.invoke(input);
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
The value of `image_url` can be any of the following:
|
|
98
|
+
|
|
99
|
+
- A public image URL
|
|
100
|
+
- An accessible gcs file (e.g., "gcs://path/to/file.png")
|
|
101
|
+
- A base64 encoded image (e.g., `data:image/png;base64,abcd124`)
|
|
102
|
+
- A PIL image
|
|
103
|
+
|
|
104
|
+
## Embeddings
|
|
105
|
+
|
|
106
|
+
This package also adds support for google's embeddings models.
|
|
107
|
+
|
|
108
|
+
```typescript
|
|
109
|
+
import { GoogleGenerativeAIEmbeddings } from "@langchain/google-genai";
|
|
110
|
+
import { TaskType } from "@google/generative-ai";
|
|
111
|
+
|
|
112
|
+
const embeddings = new GoogleGenerativeAIEmbeddings({
|
|
113
|
+
modelName: "embedding-001", // 768 dimensions
|
|
114
|
+
taskType: TaskType.RETRIEVAL_DOCUMENT,
|
|
115
|
+
title: "Document title",
|
|
116
|
+
});
|
|
117
|
+
|
|
118
|
+
const res = await embeddings.embedQuery("OK Google");
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
## Development
|
|
122
|
+
|
|
123
|
+
To develop the Google GenAI package, you'll need to follow these instructions:
|
|
124
|
+
|
|
125
|
+
### Install dependencies
|
|
126
|
+
|
|
127
|
+
```bash
|
|
128
|
+
yarn install
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
### Build the package
|
|
132
|
+
|
|
133
|
+
```bash
|
|
134
|
+
yarn build
|
|
135
|
+
```
|
|
136
|
+
|
|
137
|
+
Or from the repo root:
|
|
138
|
+
|
|
139
|
+
```bash
|
|
140
|
+
yarn build --filter=@langchain/google-genai
|
|
141
|
+
```
|
|
142
|
+
|
|
143
|
+
### Run tests
|
|
144
|
+
|
|
145
|
+
Test files should live within a `tests/` file in the `src/` folder. Unit tests should end in `.test.ts` and integration tests should
|
|
146
|
+
end in `.int.test.ts`:
|
|
147
|
+
|
|
148
|
+
```bash
|
|
149
|
+
$ yarn test
|
|
150
|
+
$ yarn test:int
|
|
151
|
+
```
|
|
152
|
+
|
|
153
|
+
### Lint & Format
|
|
154
|
+
|
|
155
|
+
Run the linter & formatter to ensure your code is up to standard:
|
|
156
|
+
|
|
157
|
+
```bash
|
|
158
|
+
yarn lint && yarn format
|
|
159
|
+
```
|
|
160
|
+
|
|
161
|
+
### Adding new entrypoints
|
|
162
|
+
|
|
163
|
+
If you add a new file to be exported, either import & re-export from `src/index.ts`, or add it to `scripts/create-entrypoints.js` and run `yarn build` to generate the new entrypoint.
|
package/dist/chat_models.cjs
CHANGED
|
@@ -129,6 +129,7 @@ class ChatGoogleGenerativeAI extends chat_models_1.BaseChatModel {
|
|
|
129
129
|
if (this.topK && this.topK < 0) {
|
|
130
130
|
throw new Error("`topK` must be a positive integer");
|
|
131
131
|
}
|
|
132
|
+
this.stopSequences = fields?.stopSequences ?? this.stopSequences;
|
|
132
133
|
this.apiKey = fields?.apiKey ?? (0, env_1.getEnvironmentVariable)("GOOGLE_API_KEY");
|
|
133
134
|
if (!this.apiKey) {
|
|
134
135
|
throw new Error("Please set an API key for Google GenerativeAI " +
|
|
@@ -162,7 +163,7 @@ class ChatGoogleGenerativeAI extends chat_models_1.BaseChatModel {
|
|
|
162
163
|
_llmType() {
|
|
163
164
|
return "googlegenerativeai";
|
|
164
165
|
}
|
|
165
|
-
async _generate(messages, options,
|
|
166
|
+
async _generate(messages, options, runManager) {
|
|
166
167
|
const prompt = (0, utils_js_1.convertBaseMessagesToContent)(messages, this._isMultimodalModel);
|
|
167
168
|
const res = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
|
|
168
169
|
let output;
|
|
@@ -181,9 +182,11 @@ class ChatGoogleGenerativeAI extends chat_models_1.BaseChatModel {
|
|
|
181
182
|
}
|
|
182
183
|
return output;
|
|
183
184
|
});
|
|
184
|
-
|
|
185
|
+
const generationResult = (0, utils_js_1.mapGenerateContentResultToChatResult)(res.response);
|
|
186
|
+
await runManager?.handleLLMNewToken(generationResult.generations[0].text ?? "");
|
|
187
|
+
return generationResult;
|
|
185
188
|
}
|
|
186
|
-
async *_streamResponseChunks(messages, options,
|
|
189
|
+
async *_streamResponseChunks(messages, options, runManager) {
|
|
187
190
|
const prompt = (0, utils_js_1.convertBaseMessagesToContent)(messages, this._isMultimodalModel);
|
|
188
191
|
const stream = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
|
|
189
192
|
const { stream } = await this.client.generateContentStream({
|
|
@@ -197,6 +200,7 @@ class ChatGoogleGenerativeAI extends chat_models_1.BaseChatModel {
|
|
|
197
200
|
continue;
|
|
198
201
|
}
|
|
199
202
|
yield chunk;
|
|
203
|
+
await runManager?.handleLLMNewToken(chunk.text ?? "");
|
|
200
204
|
}
|
|
201
205
|
}
|
|
202
206
|
}
|
package/dist/chat_models.d.ts
CHANGED
|
@@ -125,6 +125,6 @@ export declare class ChatGoogleGenerativeAI extends BaseChatModel implements Goo
|
|
|
125
125
|
constructor(fields?: GoogleGenerativeAIChatInput);
|
|
126
126
|
_combineLLMOutput(): never[];
|
|
127
127
|
_llmType(): string;
|
|
128
|
-
_generate(messages: BaseMessage[], options: this["ParsedCallOptions"],
|
|
129
|
-
_streamResponseChunks(messages: BaseMessage[], options: this["ParsedCallOptions"],
|
|
128
|
+
_generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
|
|
129
|
+
_streamResponseChunks(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
|
|
130
130
|
}
|
package/dist/chat_models.js
CHANGED
|
@@ -126,6 +126,7 @@ export class ChatGoogleGenerativeAI extends BaseChatModel {
|
|
|
126
126
|
if (this.topK && this.topK < 0) {
|
|
127
127
|
throw new Error("`topK` must be a positive integer");
|
|
128
128
|
}
|
|
129
|
+
this.stopSequences = fields?.stopSequences ?? this.stopSequences;
|
|
129
130
|
this.apiKey = fields?.apiKey ?? getEnvironmentVariable("GOOGLE_API_KEY");
|
|
130
131
|
if (!this.apiKey) {
|
|
131
132
|
throw new Error("Please set an API key for Google GenerativeAI " +
|
|
@@ -159,7 +160,7 @@ export class ChatGoogleGenerativeAI extends BaseChatModel {
|
|
|
159
160
|
_llmType() {
|
|
160
161
|
return "googlegenerativeai";
|
|
161
162
|
}
|
|
162
|
-
async _generate(messages, options,
|
|
163
|
+
async _generate(messages, options, runManager) {
|
|
163
164
|
const prompt = convertBaseMessagesToContent(messages, this._isMultimodalModel);
|
|
164
165
|
const res = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
|
|
165
166
|
let output;
|
|
@@ -178,9 +179,11 @@ export class ChatGoogleGenerativeAI extends BaseChatModel {
|
|
|
178
179
|
}
|
|
179
180
|
return output;
|
|
180
181
|
});
|
|
181
|
-
|
|
182
|
+
const generationResult = mapGenerateContentResultToChatResult(res.response);
|
|
183
|
+
await runManager?.handleLLMNewToken(generationResult.generations[0].text ?? "");
|
|
184
|
+
return generationResult;
|
|
182
185
|
}
|
|
183
|
-
async *_streamResponseChunks(messages, options,
|
|
186
|
+
async *_streamResponseChunks(messages, options, runManager) {
|
|
184
187
|
const prompt = convertBaseMessagesToContent(messages, this._isMultimodalModel);
|
|
185
188
|
const stream = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
|
|
186
189
|
const { stream } = await this.client.generateContentStream({
|
|
@@ -194,6 +197,7 @@ export class ChatGoogleGenerativeAI extends BaseChatModel {
|
|
|
194
197
|
continue;
|
|
195
198
|
}
|
|
196
199
|
yield chunk;
|
|
200
|
+
await runManager?.handleLLMNewToken(chunk.text ?? "");
|
|
197
201
|
}
|
|
198
202
|
}
|
|
199
203
|
}
|
package/dist/utils.cjs
CHANGED
|
@@ -128,7 +128,7 @@ function mapGenerateContentResultToChatResult(response) {
|
|
|
128
128
|
message: new messages_1.AIMessage({
|
|
129
129
|
content: text,
|
|
130
130
|
name: content === null ? undefined : content.role,
|
|
131
|
-
additional_kwargs:
|
|
131
|
+
additional_kwargs: generationInfo,
|
|
132
132
|
}),
|
|
133
133
|
generationInfo,
|
|
134
134
|
};
|
|
@@ -149,6 +149,8 @@ function convertResponseContentToChatGenerationChunk(response) {
|
|
|
149
149
|
message: new messages_1.AIMessageChunk({
|
|
150
150
|
content: text,
|
|
151
151
|
name: content === null ? undefined : content.role,
|
|
152
|
+
// Each chunk can have unique "generationInfo", and merging strategy is unclear,
|
|
153
|
+
// so leave blank for now.
|
|
152
154
|
additional_kwargs: {},
|
|
153
155
|
}),
|
|
154
156
|
generationInfo,
|
package/dist/utils.js
CHANGED
|
@@ -121,7 +121,7 @@ export function mapGenerateContentResultToChatResult(response) {
|
|
|
121
121
|
message: new AIMessage({
|
|
122
122
|
content: text,
|
|
123
123
|
name: content === null ? undefined : content.role,
|
|
124
|
-
additional_kwargs:
|
|
124
|
+
additional_kwargs: generationInfo,
|
|
125
125
|
}),
|
|
126
126
|
generationInfo,
|
|
127
127
|
};
|
|
@@ -141,6 +141,8 @@ export function convertResponseContentToChatGenerationChunk(response) {
|
|
|
141
141
|
message: new AIMessageChunk({
|
|
142
142
|
content: text,
|
|
143
143
|
name: content === null ? undefined : content.role,
|
|
144
|
+
// Each chunk can have unique "generationInfo", and merging strategy is unclear,
|
|
145
|
+
// so leave blank for now.
|
|
144
146
|
additional_kwargs: {},
|
|
145
147
|
}),
|
|
146
148
|
generationInfo,
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@langchain/google-genai",
|
|
3
|
-
"version": "0.0.
|
|
3
|
+
"version": "0.0.5",
|
|
4
4
|
"description": "Sample integration for LangChain.js",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"engines": {
|
|
@@ -35,7 +35,7 @@
|
|
|
35
35
|
"license": "MIT",
|
|
36
36
|
"dependencies": {
|
|
37
37
|
"@google/generative-ai": "^0.1.0",
|
|
38
|
-
"@langchain/core": "~0.1.
|
|
38
|
+
"@langchain/core": "~0.1.3"
|
|
39
39
|
},
|
|
40
40
|
"devDependencies": {
|
|
41
41
|
"@jest/globals": "^29.5.0",
|