@langchain/google-genai 0.0.4 → 0.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -21,14 +21,14 @@ You can do so by adding appropriate field to your project's `package.json` like
21
21
  "langchain": "0.0.207"
22
22
  },
23
23
  "resolutions": {
24
- "@langchain/core": "0.1.1"
24
+ "@langchain/core": "0.1.5"
25
25
  },
26
26
  "overrides": {
27
- "@langchain/core": "0.1.1"
27
+ "@langchain/core": "0.1.5"
28
28
  },
29
29
  "pnpm": {
30
30
  "overrides": {
31
- "@langchain/core": "0.1.1"
31
+ "@langchain/core": "0.1.5"
32
32
  }
33
33
  }
34
34
  }
@@ -163,7 +163,7 @@ class ChatGoogleGenerativeAI extends chat_models_1.BaseChatModel {
163
163
  _llmType() {
164
164
  return "googlegenerativeai";
165
165
  }
166
- async _generate(messages, options, _runManager) {
166
+ async _generate(messages, options, runManager) {
167
167
  const prompt = (0, utils_js_1.convertBaseMessagesToContent)(messages, this._isMultimodalModel);
168
168
  const res = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
169
169
  let output;
@@ -182,9 +182,11 @@ class ChatGoogleGenerativeAI extends chat_models_1.BaseChatModel {
182
182
  }
183
183
  return output;
184
184
  });
185
- return (0, utils_js_1.mapGenerateContentResultToChatResult)(res.response);
185
+ const generationResult = (0, utils_js_1.mapGenerateContentResultToChatResult)(res.response);
186
+ await runManager?.handleLLMNewToken(generationResult.generations[0].text ?? "");
187
+ return generationResult;
186
188
  }
187
- async *_streamResponseChunks(messages, options, _runManager) {
189
+ async *_streamResponseChunks(messages, options, runManager) {
188
190
  const prompt = (0, utils_js_1.convertBaseMessagesToContent)(messages, this._isMultimodalModel);
189
191
  const stream = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
190
192
  const { stream } = await this.client.generateContentStream({
@@ -198,6 +200,7 @@ class ChatGoogleGenerativeAI extends chat_models_1.BaseChatModel {
198
200
  continue;
199
201
  }
200
202
  yield chunk;
203
+ await runManager?.handleLLMNewToken(chunk.text ?? "");
201
204
  }
202
205
  }
203
206
  }
@@ -125,6 +125,6 @@ export declare class ChatGoogleGenerativeAI extends BaseChatModel implements Goo
125
125
  constructor(fields?: GoogleGenerativeAIChatInput);
126
126
  _combineLLMOutput(): never[];
127
127
  _llmType(): string;
128
- _generate(messages: BaseMessage[], options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
129
- _streamResponseChunks(messages: BaseMessage[], options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
128
+ _generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
129
+ _streamResponseChunks(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
130
130
  }
@@ -160,7 +160,7 @@ export class ChatGoogleGenerativeAI extends BaseChatModel {
160
160
  _llmType() {
161
161
  return "googlegenerativeai";
162
162
  }
163
- async _generate(messages, options, _runManager) {
163
+ async _generate(messages, options, runManager) {
164
164
  const prompt = convertBaseMessagesToContent(messages, this._isMultimodalModel);
165
165
  const res = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
166
166
  let output;
@@ -179,9 +179,11 @@ export class ChatGoogleGenerativeAI extends BaseChatModel {
179
179
  }
180
180
  return output;
181
181
  });
182
- return mapGenerateContentResultToChatResult(res.response);
182
+ const generationResult = mapGenerateContentResultToChatResult(res.response);
183
+ await runManager?.handleLLMNewToken(generationResult.generations[0].text ?? "");
184
+ return generationResult;
183
185
  }
184
- async *_streamResponseChunks(messages, options, _runManager) {
186
+ async *_streamResponseChunks(messages, options, runManager) {
185
187
  const prompt = convertBaseMessagesToContent(messages, this._isMultimodalModel);
186
188
  const stream = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
187
189
  const { stream } = await this.client.generateContentStream({
@@ -195,6 +197,7 @@ export class ChatGoogleGenerativeAI extends BaseChatModel {
195
197
  continue;
196
198
  }
197
199
  yield chunk;
200
+ await runManager?.handleLLMNewToken(chunk.text ?? "");
198
201
  }
199
202
  }
200
203
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@langchain/google-genai",
3
- "version": "0.0.4",
3
+ "version": "0.0.6",
4
4
  "description": "Sample integration for LangChain.js",
5
5
  "type": "module",
6
6
  "engines": {
@@ -23,7 +23,6 @@
23
23
  "lint:fix": "yarn lint --fix",
24
24
  "clean": "rm -rf dist/ && NODE_OPTIONS=--max-old-space-size=4096 node scripts/create-entrypoints.js pre",
25
25
  "prepack": "yarn build",
26
- "release": "release-it --only-version --config .release-it.json",
27
26
  "test": "NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%",
28
27
  "test:watch": "NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts",
29
28
  "test:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000",
@@ -35,7 +34,7 @@
35
34
  "license": "MIT",
36
35
  "dependencies": {
37
36
  "@google/generative-ai": "^0.1.0",
38
- "@langchain/core": "~0.1.2"
37
+ "@langchain/core": "~0.1.5"
39
38
  },
40
39
  "devDependencies": {
41
40
  "@jest/globals": "^29.5.0",
@@ -57,6 +56,7 @@
57
56
  "jest": "^29.5.0",
58
57
  "jest-environment-node": "^29.6.4",
59
58
  "prettier": "^2.8.3",
59
+ "release-it": "^15.10.1",
60
60
  "rollup": "^4.5.2",
61
61
  "ts-jest": "^29.1.0",
62
62
  "typescript": "<5.2.0"