@langchain/google-genai 0.1.9 → 0.1.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/chat_models.cjs +4 -4
- package/dist/chat_models.js +4 -4
- package/package.json +1 -1
package/dist/chat_models.cjs
CHANGED
|
@@ -690,15 +690,15 @@ class ChatGoogleGenerativeAI extends chat_models_1.BaseChatModel {
|
|
|
690
690
|
const genAIUsageMetadata = response.usageMetadata;
|
|
691
691
|
if (!usageMetadata) {
|
|
692
692
|
usageMetadata = {
|
|
693
|
-
input_tokens: genAIUsageMetadata.promptTokenCount,
|
|
694
|
-
output_tokens: genAIUsageMetadata.candidatesTokenCount,
|
|
695
|
-
total_tokens: genAIUsageMetadata.totalTokenCount,
|
|
693
|
+
input_tokens: genAIUsageMetadata.promptTokenCount ?? 0,
|
|
694
|
+
output_tokens: genAIUsageMetadata.candidatesTokenCount ?? 0,
|
|
695
|
+
total_tokens: genAIUsageMetadata.totalTokenCount ?? 0,
|
|
696
696
|
};
|
|
697
697
|
}
|
|
698
698
|
else {
|
|
699
699
|
// Under the hood, LangChain combines the prompt tokens. Google returns the updated
|
|
700
700
|
// total each time, so we need to find the difference between the tokens.
|
|
701
|
-
const outputTokenDiff = genAIUsageMetadata.candidatesTokenCount -
|
|
701
|
+
const outputTokenDiff = (genAIUsageMetadata.candidatesTokenCount ?? 0) -
|
|
702
702
|
usageMetadata.output_tokens;
|
|
703
703
|
usageMetadata = {
|
|
704
704
|
input_tokens: 0,
|
package/dist/chat_models.js
CHANGED
|
@@ -687,15 +687,15 @@ export class ChatGoogleGenerativeAI extends BaseChatModel {
|
|
|
687
687
|
const genAIUsageMetadata = response.usageMetadata;
|
|
688
688
|
if (!usageMetadata) {
|
|
689
689
|
usageMetadata = {
|
|
690
|
-
input_tokens: genAIUsageMetadata.promptTokenCount,
|
|
691
|
-
output_tokens: genAIUsageMetadata.candidatesTokenCount,
|
|
692
|
-
total_tokens: genAIUsageMetadata.totalTokenCount,
|
|
690
|
+
input_tokens: genAIUsageMetadata.promptTokenCount ?? 0,
|
|
691
|
+
output_tokens: genAIUsageMetadata.candidatesTokenCount ?? 0,
|
|
692
|
+
total_tokens: genAIUsageMetadata.totalTokenCount ?? 0,
|
|
693
693
|
};
|
|
694
694
|
}
|
|
695
695
|
else {
|
|
696
696
|
// Under the hood, LangChain combines the prompt tokens. Google returns the updated
|
|
697
697
|
// total each time, so we need to find the difference between the tokens.
|
|
698
|
-
const outputTokenDiff = genAIUsageMetadata.candidatesTokenCount -
|
|
698
|
+
const outputTokenDiff = (genAIUsageMetadata.candidatesTokenCount ?? 0) -
|
|
699
699
|
usageMetadata.output_tokens;
|
|
700
700
|
usageMetadata = {
|
|
701
701
|
input_tokens: 0,
|