langchain-google-genai 2.1.6__tar.gz → 2.1.7__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-google-genai might be problematic. Click here for more details.
- {langchain_google_genai-2.1.6 → langchain_google_genai-2.1.7}/PKG-INFO +2 -2
- {langchain_google_genai-2.1.6 → langchain_google_genai-2.1.7}/langchain_google_genai/chat_models.py +29 -43
- {langchain_google_genai-2.1.6 → langchain_google_genai-2.1.7}/pyproject.toml +5 -5
- {langchain_google_genai-2.1.6 → langchain_google_genai-2.1.7}/LICENSE +0 -0
- {langchain_google_genai-2.1.6 → langchain_google_genai-2.1.7}/README.md +0 -0
- {langchain_google_genai-2.1.6 → langchain_google_genai-2.1.7}/langchain_google_genai/__init__.py +0 -0
- {langchain_google_genai-2.1.6 → langchain_google_genai-2.1.7}/langchain_google_genai/_common.py +0 -0
- {langchain_google_genai-2.1.6 → langchain_google_genai-2.1.7}/langchain_google_genai/_enums.py +0 -0
- {langchain_google_genai-2.1.6 → langchain_google_genai-2.1.7}/langchain_google_genai/_function_utils.py +0 -0
- {langchain_google_genai-2.1.6 → langchain_google_genai-2.1.7}/langchain_google_genai/_genai_extension.py +0 -0
- {langchain_google_genai-2.1.6 → langchain_google_genai-2.1.7}/langchain_google_genai/_image_utils.py +0 -0
- {langchain_google_genai-2.1.6 → langchain_google_genai-2.1.7}/langchain_google_genai/embeddings.py +0 -0
- {langchain_google_genai-2.1.6 → langchain_google_genai-2.1.7}/langchain_google_genai/genai_aqa.py +0 -0
- {langchain_google_genai-2.1.6 → langchain_google_genai-2.1.7}/langchain_google_genai/google_vector_store.py +0 -0
- {langchain_google_genai-2.1.6 → langchain_google_genai-2.1.7}/langchain_google_genai/llms.py +0 -0
- {langchain_google_genai-2.1.6 → langchain_google_genai-2.1.7}/langchain_google_genai/py.typed +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: langchain-google-genai
|
|
3
|
-
Version: 2.1.
|
|
3
|
+
Version: 2.1.7
|
|
4
4
|
Summary: An integration package connecting Google's genai package and LangChain
|
|
5
5
|
Home-page: https://github.com/langchain-ai/langchain-google
|
|
6
6
|
License: MIT
|
|
@@ -13,7 +13,7 @@ Classifier: Programming Language :: Python :: 3.11
|
|
|
13
13
|
Classifier: Programming Language :: Python :: 3.12
|
|
14
14
|
Requires-Dist: filetype (>=1.2.0,<2.0.0)
|
|
15
15
|
Requires-Dist: google-ai-generativelanguage (>=0.6.18,<0.7.0)
|
|
16
|
-
Requires-Dist: langchain-core (>=0.3.
|
|
16
|
+
Requires-Dist: langchain-core (>=0.3.68,<0.4.0)
|
|
17
17
|
Requires-Dist: pydantic (>=2,<3)
|
|
18
18
|
Project-URL: Repository, https://github.com/langchain-ai/langchain-google
|
|
19
19
|
Project-URL: Source Code, https://github.com/langchain-ai/langchain-google/tree/main/libs/genai
|
{langchain_google_genai-2.1.6 → langchain_google_genai-2.1.7}/langchain_google_genai/chat_models.py
RENAMED
|
@@ -72,7 +72,7 @@ from langchain_core.messages import (
|
|
|
72
72
|
ToolMessage,
|
|
73
73
|
is_data_content_block,
|
|
74
74
|
)
|
|
75
|
-
from langchain_core.messages.ai import UsageMetadata
|
|
75
|
+
from langchain_core.messages.ai import UsageMetadata, add_usage, subtract_usage
|
|
76
76
|
from langchain_core.messages.tool import invalid_tool_call, tool_call, tool_call_chunk
|
|
77
77
|
from langchain_core.output_parsers import JsonOutputParser, PydanticOutputParser
|
|
78
78
|
from langchain_core.output_parsers.base import OutputParserLike
|
|
@@ -716,35 +716,43 @@ def _response_to_result(
|
|
|
716
716
|
"""Converts a PaLM API response into a LangChain ChatResult."""
|
|
717
717
|
llm_output = {"prompt_feedback": proto.Message.to_dict(response.prompt_feedback)}
|
|
718
718
|
|
|
719
|
-
# previous usage metadata needs to be subtracted because gemini api returns
|
|
720
|
-
# already-accumulated token counts with each chunk
|
|
721
|
-
prev_input_tokens = prev_usage["input_tokens"] if prev_usage else 0
|
|
722
|
-
prev_output_tokens = prev_usage["output_tokens"] if prev_usage else 0
|
|
723
|
-
prev_total_tokens = prev_usage["total_tokens"] if prev_usage else 0
|
|
724
|
-
|
|
725
719
|
# Get usage metadata
|
|
726
720
|
try:
|
|
727
721
|
input_tokens = response.usage_metadata.prompt_token_count
|
|
728
|
-
output_tokens = response.usage_metadata.candidates_token_count
|
|
729
|
-
total_tokens = response.usage_metadata.total_token_count
|
|
730
722
|
thought_tokens = response.usage_metadata.thoughts_token_count
|
|
723
|
+
output_tokens = response.usage_metadata.candidates_token_count + thought_tokens
|
|
724
|
+
total_tokens = response.usage_metadata.total_token_count
|
|
731
725
|
cache_read_tokens = response.usage_metadata.cached_content_token_count
|
|
732
726
|
if input_tokens + output_tokens + cache_read_tokens + total_tokens > 0:
|
|
733
727
|
if thought_tokens > 0:
|
|
734
|
-
|
|
735
|
-
input_tokens=input_tokens
|
|
736
|
-
output_tokens=output_tokens
|
|
737
|
-
total_tokens=total_tokens
|
|
728
|
+
cumulative_usage = UsageMetadata(
|
|
729
|
+
input_tokens=input_tokens,
|
|
730
|
+
output_tokens=output_tokens,
|
|
731
|
+
total_tokens=total_tokens,
|
|
738
732
|
input_token_details={"cache_read": cache_read_tokens},
|
|
739
733
|
output_token_details={"reasoning": thought_tokens},
|
|
740
734
|
)
|
|
741
735
|
else:
|
|
742
|
-
|
|
743
|
-
input_tokens=input_tokens
|
|
744
|
-
output_tokens=output_tokens
|
|
745
|
-
total_tokens=total_tokens
|
|
736
|
+
cumulative_usage = UsageMetadata(
|
|
737
|
+
input_tokens=input_tokens,
|
|
738
|
+
output_tokens=output_tokens,
|
|
739
|
+
total_tokens=total_tokens,
|
|
746
740
|
input_token_details={"cache_read": cache_read_tokens},
|
|
747
741
|
)
|
|
742
|
+
# previous usage metadata needs to be subtracted because gemini api returns
|
|
743
|
+
# already-accumulated token counts with each chunk
|
|
744
|
+
lc_usage = subtract_usage(cumulative_usage, prev_usage)
|
|
745
|
+
if prev_usage and cumulative_usage["input_tokens"] < prev_usage.get(
|
|
746
|
+
"input_tokens", 0
|
|
747
|
+
):
|
|
748
|
+
# Gemini 1.5 and 2.0 return a lower cumulative count of prompt tokens
|
|
749
|
+
# in the final chunk. We take this count to be ground truth because
|
|
750
|
+
# it's consistent with the reported total tokens. So we need to
|
|
751
|
+
# ensure this chunk compensates (the subtract_usage funcction floors
|
|
752
|
+
# at zero).
|
|
753
|
+
lc_usage["input_tokens"] = cumulative_usage[
|
|
754
|
+
"input_tokens"
|
|
755
|
+
] - prev_usage.get("input_tokens", 0)
|
|
748
756
|
else:
|
|
749
757
|
lc_usage = None
|
|
750
758
|
except AttributeError:
|
|
@@ -1522,7 +1530,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
1522
1530
|
metadata=self.default_metadata,
|
|
1523
1531
|
)
|
|
1524
1532
|
|
|
1525
|
-
prev_usage_metadata: UsageMetadata | None = None
|
|
1533
|
+
prev_usage_metadata: UsageMetadata | None = None # cumulative usage
|
|
1526
1534
|
for chunk in response:
|
|
1527
1535
|
_chat_result = _response_to_result(
|
|
1528
1536
|
chunk, stream=True, prev_usage=prev_usage_metadata
|
|
@@ -1530,21 +1538,10 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
1530
1538
|
gen = cast(ChatGenerationChunk, _chat_result.generations[0])
|
|
1531
1539
|
message = cast(AIMessageChunk, gen.message)
|
|
1532
1540
|
|
|
1533
|
-
curr_usage_metadata: UsageMetadata | dict[str, int] = (
|
|
1534
|
-
message.usage_metadata or {}
|
|
1535
|
-
)
|
|
1536
|
-
|
|
1537
1541
|
prev_usage_metadata = (
|
|
1538
1542
|
message.usage_metadata
|
|
1539
1543
|
if prev_usage_metadata is None
|
|
1540
|
-
else
|
|
1541
|
-
input_tokens=prev_usage_metadata.get("input_tokens", 0)
|
|
1542
|
-
+ curr_usage_metadata.get("input_tokens", 0),
|
|
1543
|
-
output_tokens=prev_usage_metadata.get("output_tokens", 0)
|
|
1544
|
-
+ curr_usage_metadata.get("output_tokens", 0),
|
|
1545
|
-
total_tokens=prev_usage_metadata.get("total_tokens", 0)
|
|
1546
|
-
+ curr_usage_metadata.get("total_tokens", 0),
|
|
1547
|
-
)
|
|
1544
|
+
else add_usage(prev_usage_metadata, message.usage_metadata)
|
|
1548
1545
|
)
|
|
1549
1546
|
|
|
1550
1547
|
if run_manager:
|
|
@@ -1594,7 +1591,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
1594
1591
|
tool_choice=tool_choice,
|
|
1595
1592
|
**kwargs,
|
|
1596
1593
|
)
|
|
1597
|
-
prev_usage_metadata: UsageMetadata | None = None
|
|
1594
|
+
prev_usage_metadata: UsageMetadata | None = None # cumulative usage
|
|
1598
1595
|
async for chunk in await _achat_with_retry(
|
|
1599
1596
|
request=request,
|
|
1600
1597
|
generation_method=self.async_client.stream_generate_content,
|
|
@@ -1607,21 +1604,10 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
1607
1604
|
gen = cast(ChatGenerationChunk, _chat_result.generations[0])
|
|
1608
1605
|
message = cast(AIMessageChunk, gen.message)
|
|
1609
1606
|
|
|
1610
|
-
curr_usage_metadata: UsageMetadata | dict[str, int] = (
|
|
1611
|
-
message.usage_metadata or {}
|
|
1612
|
-
)
|
|
1613
|
-
|
|
1614
1607
|
prev_usage_metadata = (
|
|
1615
1608
|
message.usage_metadata
|
|
1616
1609
|
if prev_usage_metadata is None
|
|
1617
|
-
else
|
|
1618
|
-
input_tokens=prev_usage_metadata.get("input_tokens", 0)
|
|
1619
|
-
+ curr_usage_metadata.get("input_tokens", 0),
|
|
1620
|
-
output_tokens=prev_usage_metadata.get("output_tokens", 0)
|
|
1621
|
-
+ curr_usage_metadata.get("output_tokens", 0),
|
|
1622
|
-
total_tokens=prev_usage_metadata.get("total_tokens", 0)
|
|
1623
|
-
+ curr_usage_metadata.get("total_tokens", 0),
|
|
1624
|
-
)
|
|
1610
|
+
else add_usage(prev_usage_metadata, message.usage_metadata)
|
|
1625
1611
|
)
|
|
1626
1612
|
|
|
1627
1613
|
if run_manager:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[tool.poetry]
|
|
2
2
|
name = "langchain-google-genai"
|
|
3
|
-
version = "2.1.
|
|
3
|
+
version = "2.1.7"
|
|
4
4
|
description = "An integration package connecting Google's genai package and LangChain"
|
|
5
5
|
authors = []
|
|
6
6
|
readme = "README.md"
|
|
@@ -12,7 +12,7 @@ license = "MIT"
|
|
|
12
12
|
|
|
13
13
|
[tool.poetry.dependencies]
|
|
14
14
|
python = ">=3.9,<4.0"
|
|
15
|
-
langchain-core = "^0.3.
|
|
15
|
+
langchain-core = "^0.3.68"
|
|
16
16
|
google-ai-generativelanguage = "^0.6.18"
|
|
17
17
|
pydantic = ">=2,<3"
|
|
18
18
|
filetype = "^1.2.0"
|
|
@@ -29,7 +29,7 @@ pytest-watcher = "^0.3.4"
|
|
|
29
29
|
pytest-asyncio = "^0.21.1"
|
|
30
30
|
pytest-retry = "^1.7.0"
|
|
31
31
|
numpy = ">=1.26.2"
|
|
32
|
-
langchain-tests = "0.3.
|
|
32
|
+
langchain-tests = "0.3.20"
|
|
33
33
|
|
|
34
34
|
[tool.codespell]
|
|
35
35
|
ignore-words-list = "rouge"
|
|
@@ -58,7 +58,7 @@ ruff = "^0.1.5"
|
|
|
58
58
|
|
|
59
59
|
[tool.poetry.group.typing.dependencies]
|
|
60
60
|
mypy = "^1.10"
|
|
61
|
-
types-requests = "^2.
|
|
61
|
+
types-requests = "^2.31.0"
|
|
62
62
|
types-google-cloud-ndb = "^2.2.0.1"
|
|
63
63
|
types-protobuf = "^4.24.0.20240302"
|
|
64
64
|
numpy = ">=1.26.2"
|
|
@@ -68,7 +68,7 @@ numpy = ">=1.26.2"
|
|
|
68
68
|
optional = true
|
|
69
69
|
|
|
70
70
|
[tool.poetry.group.dev.dependencies]
|
|
71
|
-
types-requests = "^2.31.0
|
|
71
|
+
types-requests = "^2.31.0"
|
|
72
72
|
types-google-cloud-ndb = "^2.2.0.1"
|
|
73
73
|
|
|
74
74
|
[tool.ruff.lint]
|
|
File without changes
|
|
File without changes
|
{langchain_google_genai-2.1.6 → langchain_google_genai-2.1.7}/langchain_google_genai/__init__.py
RENAMED
|
File without changes
|
{langchain_google_genai-2.1.6 → langchain_google_genai-2.1.7}/langchain_google_genai/_common.py
RENAMED
|
File without changes
|
{langchain_google_genai-2.1.6 → langchain_google_genai-2.1.7}/langchain_google_genai/_enums.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{langchain_google_genai-2.1.6 → langchain_google_genai-2.1.7}/langchain_google_genai/_image_utils.py
RENAMED
|
File without changes
|
{langchain_google_genai-2.1.6 → langchain_google_genai-2.1.7}/langchain_google_genai/embeddings.py
RENAMED
|
File without changes
|
{langchain_google_genai-2.1.6 → langchain_google_genai-2.1.7}/langchain_google_genai/genai_aqa.py
RENAMED
|
File without changes
|
|
File without changes
|
{langchain_google_genai-2.1.6 → langchain_google_genai-2.1.7}/langchain_google_genai/llms.py
RENAMED
|
File without changes
|
{langchain_google_genai-2.1.6 → langchain_google_genai-2.1.7}/langchain_google_genai/py.typed
RENAMED
|
File without changes
|