@langchain/google-genai 0.0.6 → 0.0.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/chat_models.cjs +26 -0
- package/dist/chat_models.d.ts +3 -0
- package/dist/chat_models.js +26 -0
- package/dist/utils.cjs +5 -4
- package/dist/utils.js +5 -4
- package/package.json +8 -6
package/dist/chat_models.cjs
CHANGED
|
@@ -102,6 +102,12 @@ class ChatGoogleGenerativeAI extends chat_models_1.BaseChatModel {
|
|
|
102
102
|
writable: true,
|
|
103
103
|
value: void 0
|
|
104
104
|
});
|
|
105
|
+
Object.defineProperty(this, "streaming", {
|
|
106
|
+
enumerable: true,
|
|
107
|
+
configurable: true,
|
|
108
|
+
writable: true,
|
|
109
|
+
value: false
|
|
110
|
+
});
|
|
105
111
|
Object.defineProperty(this, "client", {
|
|
106
112
|
enumerable: true,
|
|
107
113
|
configurable: true,
|
|
@@ -144,6 +150,7 @@ class ChatGoogleGenerativeAI extends chat_models_1.BaseChatModel {
|
|
|
144
150
|
throw new Error("The categories in `safetySettings` array must be unique");
|
|
145
151
|
}
|
|
146
152
|
}
|
|
153
|
+
this.streaming = fields?.streaming ?? this.streaming;
|
|
147
154
|
this.client = new generative_ai_1.GoogleGenerativeAI(this.apiKey).getGenerativeModel({
|
|
148
155
|
model: this.modelName,
|
|
149
156
|
safetySettings: this.safetySettings,
|
|
@@ -165,6 +172,25 @@ class ChatGoogleGenerativeAI extends chat_models_1.BaseChatModel {
|
|
|
165
172
|
}
|
|
166
173
|
async _generate(messages, options, runManager) {
|
|
167
174
|
const prompt = (0, utils_js_1.convertBaseMessagesToContent)(messages, this._isMultimodalModel);
|
|
175
|
+
// Handle streaming
|
|
176
|
+
if (this.streaming) {
|
|
177
|
+
const tokenUsage = {};
|
|
178
|
+
const stream = this._streamResponseChunks(messages, options, runManager);
|
|
179
|
+
const finalChunks = {};
|
|
180
|
+
for await (const chunk of stream) {
|
|
181
|
+
const index = chunk.generationInfo?.completion ?? 0;
|
|
182
|
+
if (finalChunks[index] === undefined) {
|
|
183
|
+
finalChunks[index] = chunk;
|
|
184
|
+
}
|
|
185
|
+
else {
|
|
186
|
+
finalChunks[index] = finalChunks[index].concat(chunk);
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
const generations = Object.entries(finalChunks)
|
|
190
|
+
.sort(([aKey], [bKey]) => parseInt(aKey, 10) - parseInt(bKey, 10))
|
|
191
|
+
.map(([_, value]) => value);
|
|
192
|
+
return { generations, llmOutput: { estimatedTokenUsage: tokenUsage } };
|
|
193
|
+
}
|
|
168
194
|
const res = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
|
|
169
195
|
let output;
|
|
170
196
|
try {
|
package/dist/chat_models.d.ts
CHANGED
|
@@ -76,6 +76,8 @@ export interface GoogleGenerativeAIChatInput extends BaseChatModelParams {
|
|
|
76
76
|
* Google API key to use
|
|
77
77
|
*/
|
|
78
78
|
apiKey?: string;
|
|
79
|
+
/** Whether to stream the results or not */
|
|
80
|
+
streaming?: boolean;
|
|
79
81
|
}
|
|
80
82
|
/**
|
|
81
83
|
* A class that wraps the Google Palm chat model.
|
|
@@ -120,6 +122,7 @@ export declare class ChatGoogleGenerativeAI extends BaseChatModel implements Goo
|
|
|
120
122
|
stopSequences: string[];
|
|
121
123
|
safetySettings?: SafetySetting[];
|
|
122
124
|
apiKey?: string;
|
|
125
|
+
streaming: boolean;
|
|
123
126
|
private client;
|
|
124
127
|
get _isMultimodalModel(): boolean;
|
|
125
128
|
constructor(fields?: GoogleGenerativeAIChatInput);
|
package/dist/chat_models.js
CHANGED
|
@@ -99,6 +99,12 @@ export class ChatGoogleGenerativeAI extends BaseChatModel {
|
|
|
99
99
|
writable: true,
|
|
100
100
|
value: void 0
|
|
101
101
|
});
|
|
102
|
+
Object.defineProperty(this, "streaming", {
|
|
103
|
+
enumerable: true,
|
|
104
|
+
configurable: true,
|
|
105
|
+
writable: true,
|
|
106
|
+
value: false
|
|
107
|
+
});
|
|
102
108
|
Object.defineProperty(this, "client", {
|
|
103
109
|
enumerable: true,
|
|
104
110
|
configurable: true,
|
|
@@ -141,6 +147,7 @@ export class ChatGoogleGenerativeAI extends BaseChatModel {
|
|
|
141
147
|
throw new Error("The categories in `safetySettings` array must be unique");
|
|
142
148
|
}
|
|
143
149
|
}
|
|
150
|
+
this.streaming = fields?.streaming ?? this.streaming;
|
|
144
151
|
this.client = new GenerativeAI(this.apiKey).getGenerativeModel({
|
|
145
152
|
model: this.modelName,
|
|
146
153
|
safetySettings: this.safetySettings,
|
|
@@ -162,6 +169,25 @@ export class ChatGoogleGenerativeAI extends BaseChatModel {
|
|
|
162
169
|
}
|
|
163
170
|
async _generate(messages, options, runManager) {
|
|
164
171
|
const prompt = convertBaseMessagesToContent(messages, this._isMultimodalModel);
|
|
172
|
+
// Handle streaming
|
|
173
|
+
if (this.streaming) {
|
|
174
|
+
const tokenUsage = {};
|
|
175
|
+
const stream = this._streamResponseChunks(messages, options, runManager);
|
|
176
|
+
const finalChunks = {};
|
|
177
|
+
for await (const chunk of stream) {
|
|
178
|
+
const index = chunk.generationInfo?.completion ?? 0;
|
|
179
|
+
if (finalChunks[index] === undefined) {
|
|
180
|
+
finalChunks[index] = chunk;
|
|
181
|
+
}
|
|
182
|
+
else {
|
|
183
|
+
finalChunks[index] = finalChunks[index].concat(chunk);
|
|
184
|
+
}
|
|
185
|
+
}
|
|
186
|
+
const generations = Object.entries(finalChunks)
|
|
187
|
+
.sort(([aKey], [bKey]) => parseInt(aKey, 10) - parseInt(bKey, 10))
|
|
188
|
+
.map(([_, value]) => value);
|
|
189
|
+
return { generations, llmOutput: { estimatedTokenUsage: tokenUsage } };
|
|
190
|
+
}
|
|
165
191
|
const res = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
|
|
166
192
|
let output;
|
|
167
193
|
try {
|
package/dist/utils.cjs
CHANGED
|
@@ -24,6 +24,7 @@ function convertAuthorToRole(author) {
|
|
|
24
24
|
* we will convert them to human messages and merge with following
|
|
25
25
|
* */
|
|
26
26
|
case "ai":
|
|
27
|
+
case "model": // getMessageAuthor returns message.name. code ex.: return message.name ?? type;
|
|
27
28
|
return "model";
|
|
28
29
|
case "system":
|
|
29
30
|
case "human":
|
|
@@ -122,12 +123,12 @@ function mapGenerateContentResultToChatResult(response) {
|
|
|
122
123
|
}
|
|
123
124
|
const [candidate] = response.candidates;
|
|
124
125
|
const { content, ...generationInfo } = candidate;
|
|
125
|
-
const text = content
|
|
126
|
+
const text = content?.parts[0]?.text ?? "";
|
|
126
127
|
const generation = {
|
|
127
128
|
text,
|
|
128
129
|
message: new messages_1.AIMessage({
|
|
129
130
|
content: text,
|
|
130
|
-
name: content
|
|
131
|
+
name: !content ? undefined : content.role,
|
|
131
132
|
additional_kwargs: generationInfo,
|
|
132
133
|
}),
|
|
133
134
|
generationInfo,
|
|
@@ -143,12 +144,12 @@ function convertResponseContentToChatGenerationChunk(response) {
|
|
|
143
144
|
}
|
|
144
145
|
const [candidate] = response.candidates;
|
|
145
146
|
const { content, ...generationInfo } = candidate;
|
|
146
|
-
const text = content
|
|
147
|
+
const text = content?.parts[0]?.text ?? "";
|
|
147
148
|
return new outputs_1.ChatGenerationChunk({
|
|
148
149
|
text,
|
|
149
150
|
message: new messages_1.AIMessageChunk({
|
|
150
151
|
content: text,
|
|
151
|
-
name: content
|
|
152
|
+
name: !content ? undefined : content.role,
|
|
152
153
|
// Each chunk can have unique "generationInfo", and merging strategy is unclear,
|
|
153
154
|
// so leave blank for now.
|
|
154
155
|
additional_kwargs: {},
|
package/dist/utils.js
CHANGED
|
@@ -20,6 +20,7 @@ export function convertAuthorToRole(author) {
|
|
|
20
20
|
* we will convert them to human messages and merge with following
|
|
21
21
|
* */
|
|
22
22
|
case "ai":
|
|
23
|
+
case "model": // getMessageAuthor returns message.name. code ex.: return message.name ?? type;
|
|
23
24
|
return "model";
|
|
24
25
|
case "system":
|
|
25
26
|
case "human":
|
|
@@ -115,12 +116,12 @@ export function mapGenerateContentResultToChatResult(response) {
|
|
|
115
116
|
}
|
|
116
117
|
const [candidate] = response.candidates;
|
|
117
118
|
const { content, ...generationInfo } = candidate;
|
|
118
|
-
const text = content
|
|
119
|
+
const text = content?.parts[0]?.text ?? "";
|
|
119
120
|
const generation = {
|
|
120
121
|
text,
|
|
121
122
|
message: new AIMessage({
|
|
122
123
|
content: text,
|
|
123
|
-
name: content
|
|
124
|
+
name: !content ? undefined : content.role,
|
|
124
125
|
additional_kwargs: generationInfo,
|
|
125
126
|
}),
|
|
126
127
|
generationInfo,
|
|
@@ -135,12 +136,12 @@ export function convertResponseContentToChatGenerationChunk(response) {
|
|
|
135
136
|
}
|
|
136
137
|
const [candidate] = response.candidates;
|
|
137
138
|
const { content, ...generationInfo } = candidate;
|
|
138
|
-
const text = content
|
|
139
|
+
const text = content?.parts[0]?.text ?? "";
|
|
139
140
|
return new ChatGenerationChunk({
|
|
140
141
|
text,
|
|
141
142
|
message: new AIMessageChunk({
|
|
142
143
|
content: text,
|
|
143
|
-
name: content
|
|
144
|
+
name: !content ? undefined : content.role,
|
|
144
145
|
// Each chunk can have unique "generationInfo", and merging strategy is unclear,
|
|
145
146
|
// so leave blank for now.
|
|
146
147
|
additional_kwargs: {},
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@langchain/google-genai",
|
|
3
|
-
"version": "0.0.
|
|
3
|
+
"version": "0.0.8",
|
|
4
4
|
"description": "Sample integration for LangChain.js",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"engines": {
|
|
@@ -19,21 +19,23 @@
|
|
|
19
19
|
"build:cjs": "NODE_OPTIONS=--max-old-space-size=4096 tsc --outDir dist-cjs/ -p tsconfig.cjs.json && node scripts/move-cjs-to-dist.js && rm -rf dist-cjs",
|
|
20
20
|
"build:watch": "node scripts/create-entrypoints.js && tsc --outDir dist/ --watch",
|
|
21
21
|
"build:scripts": "node scripts/create-entrypoints.js && node scripts/check-tree-shaking.js",
|
|
22
|
-
"lint": "NODE_OPTIONS=--max-old-space-size=4096 eslint
|
|
23
|
-
"lint:
|
|
22
|
+
"lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/",
|
|
23
|
+
"lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts",
|
|
24
|
+
"lint": "yarn lint:eslint && yarn lint:dpdm",
|
|
25
|
+
"lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm",
|
|
24
26
|
"clean": "rm -rf dist/ && NODE_OPTIONS=--max-old-space-size=4096 node scripts/create-entrypoints.js pre",
|
|
25
27
|
"prepack": "yarn build",
|
|
26
28
|
"test": "NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%",
|
|
27
29
|
"test:watch": "NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts",
|
|
28
30
|
"test:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000",
|
|
29
31
|
"test:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%",
|
|
30
|
-
"format": "prettier --write \"src\"",
|
|
31
|
-
"format:check": "prettier --check \"src\""
|
|
32
|
+
"format": "prettier --config .prettierrc --write \"src\" \"scripts\"",
|
|
33
|
+
"format:check": "prettier --config .prettierrc --check \"src\" \"scripts\""
|
|
32
34
|
},
|
|
33
35
|
"author": "LangChain",
|
|
34
36
|
"license": "MIT",
|
|
35
37
|
"dependencies": {
|
|
36
|
-
"@google/generative-ai": "^0.1.
|
|
38
|
+
"@google/generative-ai": "^0.1.3",
|
|
37
39
|
"@langchain/core": "~0.1.5"
|
|
38
40
|
},
|
|
39
41
|
"devDependencies": {
|