@langchain/google-common 0.2.6 → 0.2.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/types.d.ts +19 -1
- package/dist/utils/gemini.cjs +43 -17
- package/dist/utils/gemini.js +43 -17
- package/package.json +3 -3
package/dist/types.d.ts
CHANGED
|
@@ -451,10 +451,28 @@ interface GeminiResponsePromptFeedback {
|
|
|
451
451
|
blockReason?: string;
|
|
452
452
|
safetyRatings: GeminiSafetyRating[];
|
|
453
453
|
}
|
|
454
|
+
export type ModalityEnum = "TEXT" | "IMAGE" | "VIDEO" | "AUDIO" | "DOCUMENT" | string;
|
|
455
|
+
export interface ModalityTokenCount {
|
|
456
|
+
modality: ModalityEnum;
|
|
457
|
+
tokenCount: number;
|
|
458
|
+
}
|
|
459
|
+
export interface GenerateContentResponseUsageMetadata {
|
|
460
|
+
promptTokenCount: number;
|
|
461
|
+
toolUsePromptTokenCount: number;
|
|
462
|
+
cachedContentTokenCount: number;
|
|
463
|
+
thoughtsTokenCount: number;
|
|
464
|
+
candidatesTokenCount: number;
|
|
465
|
+
totalTokenCount: number;
|
|
466
|
+
promptTokensDetails: ModalityTokenCount[];
|
|
467
|
+
toolUsePromptTokensDetails: ModalityTokenCount[];
|
|
468
|
+
cacheTokensDetails: ModalityTokenCount[];
|
|
469
|
+
candidatesTokensDetails: ModalityTokenCount[];
|
|
470
|
+
[key: string]: unknown;
|
|
471
|
+
}
|
|
454
472
|
export interface GenerateContentResponseData {
|
|
455
473
|
candidates: GeminiResponseCandidate[];
|
|
456
474
|
promptFeedback: GeminiResponsePromptFeedback;
|
|
457
|
-
usageMetadata:
|
|
475
|
+
usageMetadata: GenerateContentResponseUsageMetadata;
|
|
458
476
|
}
|
|
459
477
|
export type GoogleLLMModelFamily = null | "palm" | "gemini" | "gemma";
|
|
460
478
|
export type VertexModelFamily = GoogleLLMModelFamily | "claude";
|
package/dist/utils/gemini.cjs
CHANGED
|
@@ -639,6 +639,42 @@ function getGeminiAPI(config) {
|
|
|
639
639
|
content,
|
|
640
640
|
};
|
|
641
641
|
}
|
|
642
|
+
function addModalityCounts(modalityTokenCounts, details) {
|
|
643
|
+
modalityTokenCounts?.forEach((modalityTokenCount) => {
|
|
644
|
+
const { modality, tokenCount } = modalityTokenCount;
|
|
645
|
+
const modalityLc = modality.toLowerCase();
|
|
646
|
+
const currentCount = details[modalityLc] ?? 0;
|
|
647
|
+
// eslint-disable-next-line no-param-reassign
|
|
648
|
+
details[modalityLc] = currentCount + tokenCount;
|
|
649
|
+
});
|
|
650
|
+
}
|
|
651
|
+
function responseToUsageMetadata(response) {
|
|
652
|
+
if ("usageMetadata" in response.data) {
|
|
653
|
+
const data = response?.data;
|
|
654
|
+
const usageMetadata = data?.usageMetadata;
|
|
655
|
+
const input_tokens = usageMetadata.promptTokenCount ?? 0;
|
|
656
|
+
const candidatesTokenCount = usageMetadata.candidatesTokenCount ?? 0;
|
|
657
|
+
const thoughtsTokenCount = usageMetadata.thoughtsTokenCount ?? 0;
|
|
658
|
+
const output_tokens = candidatesTokenCount + thoughtsTokenCount;
|
|
659
|
+
const total_tokens = usageMetadata.totalTokenCount ?? input_tokens + output_tokens;
|
|
660
|
+
const input_token_details = {};
|
|
661
|
+
addModalityCounts(usageMetadata.promptTokensDetails, input_token_details);
|
|
662
|
+
const output_token_details = {};
|
|
663
|
+
addModalityCounts(usageMetadata?.candidatesTokensDetails, output_token_details);
|
|
664
|
+
if (typeof usageMetadata?.thoughtsTokenCount === "number") {
|
|
665
|
+
output_token_details.reasoning = usageMetadata.thoughtsTokenCount;
|
|
666
|
+
}
|
|
667
|
+
const ret = {
|
|
668
|
+
input_tokens,
|
|
669
|
+
output_tokens,
|
|
670
|
+
total_tokens,
|
|
671
|
+
input_token_details,
|
|
672
|
+
output_token_details,
|
|
673
|
+
};
|
|
674
|
+
return ret;
|
|
675
|
+
}
|
|
676
|
+
return undefined;
|
|
677
|
+
}
|
|
642
678
|
function responseToGenerationInfo(response) {
|
|
643
679
|
const data =
|
|
644
680
|
// eslint-disable-next-line no-nested-ternary
|
|
@@ -671,11 +707,7 @@ function getGeminiAPI(config) {
|
|
|
671
707
|
// Only add the usage_metadata on the last chunk
|
|
672
708
|
// sent while streaming (see issue 8102).
|
|
673
709
|
if (typeof finish_reason === "string") {
|
|
674
|
-
ret.usage_metadata =
|
|
675
|
-
prompt_token_count: data.usageMetadata?.promptTokenCount,
|
|
676
|
-
candidates_token_count: data.usageMetadata?.candidatesTokenCount,
|
|
677
|
-
total_token_count: data.usageMetadata?.totalTokenCount,
|
|
678
|
-
};
|
|
710
|
+
ret.usage_metadata = responseToUsageMetadata(response);
|
|
679
711
|
}
|
|
680
712
|
return ret;
|
|
681
713
|
}
|
|
@@ -852,15 +884,7 @@ function getGeminiAPI(config) {
|
|
|
852
884
|
const kwargs = combineAdditionalKwargs(gen.content);
|
|
853
885
|
const lastContent = gen.content[gen.content.length - 1];
|
|
854
886
|
// Add usage metadata
|
|
855
|
-
|
|
856
|
-
if ("usageMetadata" in response.data) {
|
|
857
|
-
usageMetadata = {
|
|
858
|
-
input_tokens: response.data.usageMetadata.promptTokenCount,
|
|
859
|
-
output_tokens: response.data.usageMetadata
|
|
860
|
-
.candidatesTokenCount,
|
|
861
|
-
total_tokens: response.data.usageMetadata.totalTokenCount,
|
|
862
|
-
};
|
|
863
|
-
}
|
|
887
|
+
const usage_metadata = responseToUsageMetadata(response);
|
|
864
888
|
// Add thinking / reasoning
|
|
865
889
|
// if (gen.reasoning && gen.reasoning.length > 0) {
|
|
866
890
|
// kwargs.reasoning_content = combineContent(gen.reasoning, true);
|
|
@@ -869,7 +893,7 @@ function getGeminiAPI(config) {
|
|
|
869
893
|
const message = new messages_1.AIMessageChunk({
|
|
870
894
|
content: combinedContent,
|
|
871
895
|
additional_kwargs: kwargs,
|
|
872
|
-
usage_metadata
|
|
896
|
+
usage_metadata,
|
|
873
897
|
tool_calls: combinedToolCalls.tool_calls,
|
|
874
898
|
invalid_tool_calls: combinedToolCalls.invalid_tool_calls,
|
|
875
899
|
});
|
|
@@ -1064,10 +1088,12 @@ function getGeminiAPI(config) {
|
|
|
1064
1088
|
}
|
|
1065
1089
|
}
|
|
1066
1090
|
// Add thinking configuration if explicitly set
|
|
1067
|
-
if (typeof parameters.maxReasoningTokens !== "undefined"
|
|
1091
|
+
if (typeof parameters.maxReasoningTokens !== "undefined" &&
|
|
1092
|
+
parameters.maxReasoningTokens !== 0) {
|
|
1068
1093
|
ret.thinkingConfig = {
|
|
1069
1094
|
thinkingBudget: parameters.maxReasoningTokens,
|
|
1070
|
-
|
|
1095
|
+
// TODO: Expose this configuration to the user once google fully supports it
|
|
1096
|
+
includeThoughts: false,
|
|
1071
1097
|
};
|
|
1072
1098
|
}
|
|
1073
1099
|
// Remove any undefined properties, so we don't send them
|
package/dist/utils/gemini.js
CHANGED
|
@@ -634,6 +634,42 @@ export function getGeminiAPI(config) {
|
|
|
634
634
|
content,
|
|
635
635
|
};
|
|
636
636
|
}
|
|
637
|
+
function addModalityCounts(modalityTokenCounts, details) {
|
|
638
|
+
modalityTokenCounts?.forEach((modalityTokenCount) => {
|
|
639
|
+
const { modality, tokenCount } = modalityTokenCount;
|
|
640
|
+
const modalityLc = modality.toLowerCase();
|
|
641
|
+
const currentCount = details[modalityLc] ?? 0;
|
|
642
|
+
// eslint-disable-next-line no-param-reassign
|
|
643
|
+
details[modalityLc] = currentCount + tokenCount;
|
|
644
|
+
});
|
|
645
|
+
}
|
|
646
|
+
function responseToUsageMetadata(response) {
|
|
647
|
+
if ("usageMetadata" in response.data) {
|
|
648
|
+
const data = response?.data;
|
|
649
|
+
const usageMetadata = data?.usageMetadata;
|
|
650
|
+
const input_tokens = usageMetadata.promptTokenCount ?? 0;
|
|
651
|
+
const candidatesTokenCount = usageMetadata.candidatesTokenCount ?? 0;
|
|
652
|
+
const thoughtsTokenCount = usageMetadata.thoughtsTokenCount ?? 0;
|
|
653
|
+
const output_tokens = candidatesTokenCount + thoughtsTokenCount;
|
|
654
|
+
const total_tokens = usageMetadata.totalTokenCount ?? input_tokens + output_tokens;
|
|
655
|
+
const input_token_details = {};
|
|
656
|
+
addModalityCounts(usageMetadata.promptTokensDetails, input_token_details);
|
|
657
|
+
const output_token_details = {};
|
|
658
|
+
addModalityCounts(usageMetadata?.candidatesTokensDetails, output_token_details);
|
|
659
|
+
if (typeof usageMetadata?.thoughtsTokenCount === "number") {
|
|
660
|
+
output_token_details.reasoning = usageMetadata.thoughtsTokenCount;
|
|
661
|
+
}
|
|
662
|
+
const ret = {
|
|
663
|
+
input_tokens,
|
|
664
|
+
output_tokens,
|
|
665
|
+
total_tokens,
|
|
666
|
+
input_token_details,
|
|
667
|
+
output_token_details,
|
|
668
|
+
};
|
|
669
|
+
return ret;
|
|
670
|
+
}
|
|
671
|
+
return undefined;
|
|
672
|
+
}
|
|
637
673
|
function responseToGenerationInfo(response) {
|
|
638
674
|
const data =
|
|
639
675
|
// eslint-disable-next-line no-nested-ternary
|
|
@@ -666,11 +702,7 @@ export function getGeminiAPI(config) {
|
|
|
666
702
|
// Only add the usage_metadata on the last chunk
|
|
667
703
|
// sent while streaming (see issue 8102).
|
|
668
704
|
if (typeof finish_reason === "string") {
|
|
669
|
-
ret.usage_metadata =
|
|
670
|
-
prompt_token_count: data.usageMetadata?.promptTokenCount,
|
|
671
|
-
candidates_token_count: data.usageMetadata?.candidatesTokenCount,
|
|
672
|
-
total_token_count: data.usageMetadata?.totalTokenCount,
|
|
673
|
-
};
|
|
705
|
+
ret.usage_metadata = responseToUsageMetadata(response);
|
|
674
706
|
}
|
|
675
707
|
return ret;
|
|
676
708
|
}
|
|
@@ -847,15 +879,7 @@ export function getGeminiAPI(config) {
|
|
|
847
879
|
const kwargs = combineAdditionalKwargs(gen.content);
|
|
848
880
|
const lastContent = gen.content[gen.content.length - 1];
|
|
849
881
|
// Add usage metadata
|
|
850
|
-
|
|
851
|
-
if ("usageMetadata" in response.data) {
|
|
852
|
-
usageMetadata = {
|
|
853
|
-
input_tokens: response.data.usageMetadata.promptTokenCount,
|
|
854
|
-
output_tokens: response.data.usageMetadata
|
|
855
|
-
.candidatesTokenCount,
|
|
856
|
-
total_tokens: response.data.usageMetadata.totalTokenCount,
|
|
857
|
-
};
|
|
858
|
-
}
|
|
882
|
+
const usage_metadata = responseToUsageMetadata(response);
|
|
859
883
|
// Add thinking / reasoning
|
|
860
884
|
// if (gen.reasoning && gen.reasoning.length > 0) {
|
|
861
885
|
// kwargs.reasoning_content = combineContent(gen.reasoning, true);
|
|
@@ -864,7 +888,7 @@ export function getGeminiAPI(config) {
|
|
|
864
888
|
const message = new AIMessageChunk({
|
|
865
889
|
content: combinedContent,
|
|
866
890
|
additional_kwargs: kwargs,
|
|
867
|
-
usage_metadata
|
|
891
|
+
usage_metadata,
|
|
868
892
|
tool_calls: combinedToolCalls.tool_calls,
|
|
869
893
|
invalid_tool_calls: combinedToolCalls.invalid_tool_calls,
|
|
870
894
|
});
|
|
@@ -1059,10 +1083,12 @@ export function getGeminiAPI(config) {
|
|
|
1059
1083
|
}
|
|
1060
1084
|
}
|
|
1061
1085
|
// Add thinking configuration if explicitly set
|
|
1062
|
-
if (typeof parameters.maxReasoningTokens !== "undefined"
|
|
1086
|
+
if (typeof parameters.maxReasoningTokens !== "undefined" &&
|
|
1087
|
+
parameters.maxReasoningTokens !== 0) {
|
|
1063
1088
|
ret.thinkingConfig = {
|
|
1064
1089
|
thinkingBudget: parameters.maxReasoningTokens,
|
|
1065
|
-
|
|
1090
|
+
// TODO: Expose this configuration to the user once google fully supports it
|
|
1091
|
+
includeThoughts: false,
|
|
1066
1092
|
};
|
|
1067
1093
|
}
|
|
1068
1094
|
// Remove any undefined properties, so we don't send them
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@langchain/google-common",
|
|
3
|
-
"version": "0.2.
|
|
3
|
+
"version": "0.2.7",
|
|
4
4
|
"description": "Core types and classes for Google services.",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"engines": {
|
|
@@ -36,11 +36,11 @@
|
|
|
36
36
|
"zod-to-json-schema": "^3.22.4"
|
|
37
37
|
},
|
|
38
38
|
"peerDependencies": {
|
|
39
|
-
"@langchain/core": ">=0.3.
|
|
39
|
+
"@langchain/core": ">=0.3.55 <0.4.0"
|
|
40
40
|
},
|
|
41
41
|
"devDependencies": {
|
|
42
42
|
"@jest/globals": "^29.5.0",
|
|
43
|
-
"@langchain/core": "0.3.
|
|
43
|
+
"@langchain/core": "0.3.55",
|
|
44
44
|
"@langchain/scripts": ">=0.1.0 <0.2.0",
|
|
45
45
|
"@swc/core": "^1.3.90",
|
|
46
46
|
"@swc/jest": "^0.2.29",
|