@langchain/google-genai 0.1.8 → 0.1.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -507,8 +507,8 @@ class ChatGoogleGenerativeAI extends chat_models_1.BaseChatModel {
507
507
  throw new Error("`maxOutputTokens` must be a positive integer");
508
508
  }
509
509
  this.temperature = fields?.temperature ?? this.temperature;
510
- if (this.temperature && (this.temperature < 0 || this.temperature > 1)) {
511
- throw new Error("`temperature` must be in the range of [0.0,1.0]");
510
+ if (this.temperature && (this.temperature < 0 || this.temperature > 2)) {
511
+ throw new Error("`temperature` must be in the range of [0.0,2.0]");
512
512
  }
513
513
  this.topP = fields?.topP ?? this.topP;
514
514
  if (this.topP && this.topP < 0) {
@@ -690,15 +690,15 @@ class ChatGoogleGenerativeAI extends chat_models_1.BaseChatModel {
690
690
  const genAIUsageMetadata = response.usageMetadata;
691
691
  if (!usageMetadata) {
692
692
  usageMetadata = {
693
- input_tokens: genAIUsageMetadata.promptTokenCount,
694
- output_tokens: genAIUsageMetadata.candidatesTokenCount,
695
- total_tokens: genAIUsageMetadata.totalTokenCount,
693
+ input_tokens: genAIUsageMetadata.promptTokenCount ?? 0,
694
+ output_tokens: genAIUsageMetadata.candidatesTokenCount ?? 0,
695
+ total_tokens: genAIUsageMetadata.totalTokenCount ?? 0,
696
696
  };
697
697
  }
698
698
  else {
699
699
  // Under the hood, LangChain combines the prompt tokens. Google returns the updated
700
700
  // total each time, so we need to find the difference between the tokens.
701
- const outputTokenDiff = genAIUsageMetadata.candidatesTokenCount -
701
+ const outputTokenDiff = (genAIUsageMetadata.candidatesTokenCount ?? 0) -
702
702
  usageMetadata.output_tokens;
703
703
  usageMetadata = {
704
704
  input_tokens: 0,
@@ -48,7 +48,7 @@ export interface GoogleGenerativeAIChatInput extends BaseChatModelParams, Pick<G
48
48
  /**
49
49
  * Controls the randomness of the output.
50
50
  *
51
- * Values can range from [0.0,1.0], inclusive. A value closer to 1.0
51
+ * Values can range from [0.0,2.0], inclusive. A value closer to 2.0
52
52
  * will produce responses that are more varied and creative, while
53
53
  * a value closer to 0.0 will typically result in less surprising
54
54
  * responses from the model.
@@ -504,8 +504,8 @@ export class ChatGoogleGenerativeAI extends BaseChatModel {
504
504
  throw new Error("`maxOutputTokens` must be a positive integer");
505
505
  }
506
506
  this.temperature = fields?.temperature ?? this.temperature;
507
- if (this.temperature && (this.temperature < 0 || this.temperature > 1)) {
508
- throw new Error("`temperature` must be in the range of [0.0,1.0]");
507
+ if (this.temperature && (this.temperature < 0 || this.temperature > 2)) {
508
+ throw new Error("`temperature` must be in the range of [0.0,2.0]");
509
509
  }
510
510
  this.topP = fields?.topP ?? this.topP;
511
511
  if (this.topP && this.topP < 0) {
@@ -687,15 +687,15 @@ export class ChatGoogleGenerativeAI extends BaseChatModel {
687
687
  const genAIUsageMetadata = response.usageMetadata;
688
688
  if (!usageMetadata) {
689
689
  usageMetadata = {
690
- input_tokens: genAIUsageMetadata.promptTokenCount,
691
- output_tokens: genAIUsageMetadata.candidatesTokenCount,
692
- total_tokens: genAIUsageMetadata.totalTokenCount,
690
+ input_tokens: genAIUsageMetadata.promptTokenCount ?? 0,
691
+ output_tokens: genAIUsageMetadata.candidatesTokenCount ?? 0,
692
+ total_tokens: genAIUsageMetadata.totalTokenCount ?? 0,
693
693
  };
694
694
  }
695
695
  else {
696
696
  // Under the hood, LangChain combines the prompt tokens. Google returns the updated
697
697
  // total each time, so we need to find the difference between the tokens.
698
- const outputTokenDiff = genAIUsageMetadata.candidatesTokenCount -
698
+ const outputTokenDiff = (genAIUsageMetadata.candidatesTokenCount ?? 0) -
699
699
  usageMetadata.output_tokens;
700
700
  usageMetadata = {
701
701
  input_tokens: 0,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@langchain/google-genai",
3
- "version": "0.1.8",
3
+ "version": "0.1.10",
4
4
  "description": "Google Generative AI integration for LangChain.js",
5
5
  "type": "module",
6
6
  "engines": {