@langchain/google-genai 0.1.9 → 0.1.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -690,15 +690,15 @@ class ChatGoogleGenerativeAI extends chat_models_1.BaseChatModel {
690
690
  const genAIUsageMetadata = response.usageMetadata;
691
691
  if (!usageMetadata) {
692
692
  usageMetadata = {
693
- input_tokens: genAIUsageMetadata.promptTokenCount,
694
- output_tokens: genAIUsageMetadata.candidatesTokenCount,
695
- total_tokens: genAIUsageMetadata.totalTokenCount,
693
+ input_tokens: genAIUsageMetadata.promptTokenCount ?? 0,
694
+ output_tokens: genAIUsageMetadata.candidatesTokenCount ?? 0,
695
+ total_tokens: genAIUsageMetadata.totalTokenCount ?? 0,
696
696
  };
697
697
  }
698
698
  else {
699
699
  // Under the hood, LangChain combines the prompt tokens. Google returns the updated
700
700
  // total each time, so we need to find the difference between the tokens.
701
- const outputTokenDiff = genAIUsageMetadata.candidatesTokenCount -
701
+ const outputTokenDiff = (genAIUsageMetadata.candidatesTokenCount ?? 0) -
702
702
  usageMetadata.output_tokens;
703
703
  usageMetadata = {
704
704
  input_tokens: 0,
@@ -687,15 +687,15 @@ export class ChatGoogleGenerativeAI extends BaseChatModel {
687
687
  const genAIUsageMetadata = response.usageMetadata;
688
688
  if (!usageMetadata) {
689
689
  usageMetadata = {
690
- input_tokens: genAIUsageMetadata.promptTokenCount,
691
- output_tokens: genAIUsageMetadata.candidatesTokenCount,
692
- total_tokens: genAIUsageMetadata.totalTokenCount,
690
+ input_tokens: genAIUsageMetadata.promptTokenCount ?? 0,
691
+ output_tokens: genAIUsageMetadata.candidatesTokenCount ?? 0,
692
+ total_tokens: genAIUsageMetadata.totalTokenCount ?? 0,
693
693
  };
694
694
  }
695
695
  else {
696
696
  // Under the hood, LangChain combines the prompt tokens. Google returns the updated
697
697
  // total each time, so we need to find the difference between the tokens.
698
- const outputTokenDiff = genAIUsageMetadata.candidatesTokenCount -
698
+ const outputTokenDiff = (genAIUsageMetadata.candidatesTokenCount ?? 0) -
699
699
  usageMetadata.output_tokens;
700
700
  usageMetadata = {
701
701
  input_tokens: 0,
@@ -276,6 +276,13 @@ function mapGenerateContentResultToChatResult(response, extra) {
276
276
  };
277
277
  return {
278
278
  generations: [generation],
279
+ llmOutput: {
280
+ tokenUsage: {
281
+ promptTokens: extra?.usageMetadata?.input_tokens,
282
+ completionTokens: extra?.usageMetadata?.output_tokens,
283
+ totalTokens: extra?.usageMetadata?.total_tokens,
284
+ },
285
+ },
279
286
  };
280
287
  }
281
288
  exports.mapGenerateContentResultToChatResult = mapGenerateContentResultToChatResult;
@@ -269,6 +269,13 @@ export function mapGenerateContentResultToChatResult(response, extra) {
269
269
  };
270
270
  return {
271
271
  generations: [generation],
272
+ llmOutput: {
273
+ tokenUsage: {
274
+ promptTokens: extra?.usageMetadata?.input_tokens,
275
+ completionTokens: extra?.usageMetadata?.output_tokens,
276
+ totalTokens: extra?.usageMetadata?.total_tokens,
277
+ },
278
+ },
272
279
  };
273
280
  }
274
281
  export function convertResponseContentToChatGenerationChunk(response, extra) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@langchain/google-genai",
3
- "version": "0.1.9",
3
+ "version": "0.1.11",
4
4
  "description": "Google Generative AI integration for LangChain.js",
5
5
  "type": "module",
6
6
  "engines": {