langchain-google-genai 2.1.2__tar.gz → 2.1.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-google-genai might be problematic. Click here for more details.
- {langchain_google_genai-2.1.2 → langchain_google_genai-2.1.4}/PKG-INFO +19 -3
- {langchain_google_genai-2.1.2 → langchain_google_genai-2.1.4}/README.md +16 -0
- {langchain_google_genai-2.1.2 → langchain_google_genai-2.1.4}/langchain_google_genai/_common.py +11 -1
- {langchain_google_genai-2.1.2 → langchain_google_genai-2.1.4}/langchain_google_genai/_function_utils.py +2 -0
- {langchain_google_genai-2.1.2 → langchain_google_genai-2.1.4}/langchain_google_genai/chat_models.py +206 -28
- {langchain_google_genai-2.1.2 → langchain_google_genai-2.1.4}/langchain_google_genai/embeddings.py +14 -8
- {langchain_google_genai-2.1.2 → langchain_google_genai-2.1.4}/langchain_google_genai/llms.py +26 -0
- {langchain_google_genai-2.1.2 → langchain_google_genai-2.1.4}/pyproject.toml +6 -6
- {langchain_google_genai-2.1.2 → langchain_google_genai-2.1.4}/LICENSE +0 -0
- {langchain_google_genai-2.1.2 → langchain_google_genai-2.1.4}/langchain_google_genai/__init__.py +0 -0
- {langchain_google_genai-2.1.2 → langchain_google_genai-2.1.4}/langchain_google_genai/_enums.py +0 -0
- {langchain_google_genai-2.1.2 → langchain_google_genai-2.1.4}/langchain_google_genai/_genai_extension.py +0 -0
- {langchain_google_genai-2.1.2 → langchain_google_genai-2.1.4}/langchain_google_genai/_image_utils.py +0 -0
- {langchain_google_genai-2.1.2 → langchain_google_genai-2.1.4}/langchain_google_genai/genai_aqa.py +0 -0
- {langchain_google_genai-2.1.2 → langchain_google_genai-2.1.4}/langchain_google_genai/google_vector_store.py +0 -0
- {langchain_google_genai-2.1.2 → langchain_google_genai-2.1.4}/langchain_google_genai/py.typed +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: langchain-google-genai
|
|
3
|
-
Version: 2.1.
|
|
3
|
+
Version: 2.1.4
|
|
4
4
|
Summary: An integration package connecting Google's genai package and LangChain
|
|
5
5
|
Home-page: https://github.com/langchain-ai/langchain-google
|
|
6
6
|
License: MIT
|
|
@@ -12,8 +12,8 @@ Classifier: Programming Language :: Python :: 3.10
|
|
|
12
12
|
Classifier: Programming Language :: Python :: 3.11
|
|
13
13
|
Classifier: Programming Language :: Python :: 3.12
|
|
14
14
|
Requires-Dist: filetype (>=1.2.0,<2.0.0)
|
|
15
|
-
Requires-Dist: google-ai-generativelanguage (>=0.6.
|
|
16
|
-
Requires-Dist: langchain-core (>=0.3.
|
|
15
|
+
Requires-Dist: google-ai-generativelanguage (>=0.6.18,<0.7.0)
|
|
16
|
+
Requires-Dist: langchain-core (>=0.3.52,<0.4.0)
|
|
17
17
|
Requires-Dist: pydantic (>=2,<3)
|
|
18
18
|
Project-URL: Repository, https://github.com/langchain-ai/langchain-google
|
|
19
19
|
Project-URL: Source Code, https://github.com/langchain-ai/langchain-google/tree/main/libs/genai
|
|
@@ -116,6 +116,22 @@ chain = {"animal": RunnablePassthrough()} | prompt | llm
|
|
|
116
116
|
res = chain.invoke("cat")
|
|
117
117
|
```
|
|
118
118
|
|
|
119
|
+
#### Thinking support
|
|
120
|
+
|
|
121
|
+
Gemini 2.5 Flash model supports reasoning through their thoughts
|
|
122
|
+
|
|
123
|
+
```
|
|
124
|
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
125
|
+
|
|
126
|
+
llm = ChatGoogleGenerativeAI(model="models/gemini-2.5-flash-preview-04-17", thinking_budget=1024)
|
|
127
|
+
|
|
128
|
+
response = llm.invoke(
|
|
129
|
+
"How many O's are in Google? Please tell me how you double checked the result"
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
assert response.usage_metadata["output_token_details"]["reasoning"] > 0
|
|
133
|
+
```
|
|
134
|
+
|
|
119
135
|
## Embeddings
|
|
120
136
|
|
|
121
137
|
This package also adds support for google's embeddings models.
|
|
@@ -95,6 +95,22 @@ chain = {"animal": RunnablePassthrough()} | prompt | llm
|
|
|
95
95
|
res = chain.invoke("cat")
|
|
96
96
|
```
|
|
97
97
|
|
|
98
|
+
#### Thinking support
|
|
99
|
+
|
|
100
|
+
Gemini 2.5 Flash model supports reasoning through their thoughts
|
|
101
|
+
|
|
102
|
+
```
|
|
103
|
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
104
|
+
|
|
105
|
+
llm = ChatGoogleGenerativeAI(model="models/gemini-2.5-flash-preview-04-17", thinking_budget=1024)
|
|
106
|
+
|
|
107
|
+
response = llm.invoke(
|
|
108
|
+
"How many O's are in Google? Please tell me how you double checked the result"
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
assert response.usage_metadata["output_token_details"]["reasoning"] > 0
|
|
112
|
+
```
|
|
113
|
+
|
|
98
114
|
## Embeddings
|
|
99
115
|
|
|
100
116
|
This package also adds support for google's embeddings models.
|
{langchain_google_genai-2.1.2 → langchain_google_genai-2.1.4}/langchain_google_genai/_common.py
RENAMED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import os
|
|
1
2
|
from importlib import metadata
|
|
2
3
|
from typing import Any, Dict, List, Optional, Tuple, TypedDict
|
|
3
4
|
|
|
@@ -7,6 +8,9 @@ from pydantic import BaseModel, Field, SecretStr
|
|
|
7
8
|
|
|
8
9
|
from langchain_google_genai._enums import HarmBlockThreshold, HarmCategory, Modality
|
|
9
10
|
|
|
11
|
+
_TELEMETRY_TAG = "remote_reasoning_engine"
|
|
12
|
+
_TELEMETRY_ENV_VARIABLE_NAME = "GOOGLE_CLOUD_AGENT_ENGINE_ID"
|
|
13
|
+
|
|
10
14
|
|
|
11
15
|
class GoogleGenerativeAIError(Exception):
|
|
12
16
|
"""
|
|
@@ -36,7 +40,7 @@ Supported examples:
|
|
|
36
40
|
"the GOOGLE_API_KEY envvar"
|
|
37
41
|
temperature: float = 0.7
|
|
38
42
|
"""Run inference with this temperature. Must by in the closed interval
|
|
39
|
-
[0.0,
|
|
43
|
+
[0.0, 2.0]."""
|
|
40
44
|
top_p: Optional[float] = None
|
|
41
45
|
"""Decode using nucleus sampling: consider the smallest set of tokens whose
|
|
42
46
|
probability sum is at least top_p. Must be in the closed interval [0.0, 1.0]."""
|
|
@@ -76,6 +80,10 @@ Supported examples:
|
|
|
76
80
|
default=None, description=("A list of modalities of the response")
|
|
77
81
|
)
|
|
78
82
|
|
|
83
|
+
thinking_budget: Optional[int] = Field(
|
|
84
|
+
default=None, description="Indicates the thinking budget in tokens."
|
|
85
|
+
)
|
|
86
|
+
|
|
79
87
|
safety_settings: Optional[Dict[HarmCategory, HarmBlockThreshold]] = None
|
|
80
88
|
"""The default safety settings to use for all generations.
|
|
81
89
|
|
|
@@ -124,6 +132,8 @@ def get_user_agent(module: Optional[str] = None) -> Tuple[str, str]:
|
|
|
124
132
|
client_library_version = (
|
|
125
133
|
f"{langchain_version}-{module}" if module else langchain_version
|
|
126
134
|
)
|
|
135
|
+
if os.environ.get(_TELEMETRY_ENV_VARIABLE_NAME):
|
|
136
|
+
client_library_version += f"+{_TELEMETRY_TAG}"
|
|
127
137
|
return client_library_version, f"langchain-google-genai/{client_library_version}"
|
|
128
138
|
|
|
129
139
|
|
|
@@ -384,6 +384,8 @@ def _get_items_from_schema(schema: Union[Dict, List, str]) -> Dict[str, Any]:
|
|
|
384
384
|
items["type_"] = _get_type_from_schema(schema)
|
|
385
385
|
if items["type_"] == glm.Type.OBJECT and "properties" in schema:
|
|
386
386
|
items["properties"] = _get_properties_from_schema_any(schema["properties"])
|
|
387
|
+
if items["type_"] == glm.Type.ARRAY and "items" in schema:
|
|
388
|
+
items["items"] = _format_json_schema_to_gapic(schema["items"])
|
|
387
389
|
if "title" in schema or "description" in schema:
|
|
388
390
|
items["description"] = (
|
|
389
391
|
schema.get("description") or schema.get("title") or ""
|
{langchain_google_genai-2.1.2 → langchain_google_genai-2.1.4}/langchain_google_genai/chat_models.py
RENAMED
|
@@ -1,10 +1,13 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import asyncio
|
|
4
|
+
import base64
|
|
4
5
|
import json
|
|
5
6
|
import logging
|
|
7
|
+
import mimetypes
|
|
6
8
|
import uuid
|
|
7
9
|
import warnings
|
|
10
|
+
from difflib import get_close_matches
|
|
8
11
|
from operator import itemgetter
|
|
9
12
|
from typing import (
|
|
10
13
|
Any,
|
|
@@ -22,6 +25,7 @@ from typing import (
|
|
|
22
25
|
cast,
|
|
23
26
|
)
|
|
24
27
|
|
|
28
|
+
import filetype # type: ignore[import]
|
|
25
29
|
import google.api_core
|
|
26
30
|
|
|
27
31
|
# TODO: remove ignore once the google package is published with types
|
|
@@ -61,6 +65,7 @@ from langchain_core.messages import (
|
|
|
61
65
|
HumanMessage,
|
|
62
66
|
SystemMessage,
|
|
63
67
|
ToolMessage,
|
|
68
|
+
is_data_content_block,
|
|
64
69
|
)
|
|
65
70
|
from langchain_core.messages.ai import UsageMetadata
|
|
66
71
|
from langchain_core.messages.tool import invalid_tool_call, tool_call, tool_call_chunk
|
|
@@ -73,7 +78,9 @@ from langchain_core.output_parsers.openai_tools import (
|
|
|
73
78
|
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
|
|
74
79
|
from langchain_core.runnables import Runnable, RunnableConfig, RunnablePassthrough
|
|
75
80
|
from langchain_core.tools import BaseTool
|
|
81
|
+
from langchain_core.utils import get_pydantic_field_names
|
|
76
82
|
from langchain_core.utils.function_calling import convert_to_openai_tool
|
|
83
|
+
from langchain_core.utils.utils import _build_model_kwargs
|
|
77
84
|
from pydantic import (
|
|
78
85
|
BaseModel,
|
|
79
86
|
ConfigDict,
|
|
@@ -112,9 +119,6 @@ from langchain_google_genai._image_utils import (
|
|
|
112
119
|
|
|
113
120
|
from . import _genai_extension as genaix
|
|
114
121
|
|
|
115
|
-
WARNED_STRUCTURED_OUTPUT_JSON_MODE = False
|
|
116
|
-
|
|
117
|
-
|
|
118
122
|
logger = logging.getLogger(__name__)
|
|
119
123
|
|
|
120
124
|
|
|
@@ -239,7 +243,7 @@ async def _achat_with_retry(generation_method: Callable, **kwargs: Any) -> Any:
|
|
|
239
243
|
return await _achat_with_retry(**kwargs)
|
|
240
244
|
|
|
241
245
|
|
|
242
|
-
def
|
|
246
|
+
def _is_lc_content_block(part: dict) -> bool:
|
|
243
247
|
return "type" in part
|
|
244
248
|
|
|
245
249
|
|
|
@@ -254,10 +258,29 @@ def _convert_to_parts(
|
|
|
254
258
|
if isinstance(part, str):
|
|
255
259
|
parts.append(Part(text=part))
|
|
256
260
|
elif isinstance(part, Mapping):
|
|
257
|
-
|
|
258
|
-
if _is_openai_parts_format(part):
|
|
261
|
+
if _is_lc_content_block(part):
|
|
259
262
|
if part["type"] == "text":
|
|
260
263
|
parts.append(Part(text=part["text"]))
|
|
264
|
+
elif is_data_content_block(part):
|
|
265
|
+
if part["source_type"] == "url":
|
|
266
|
+
bytes_ = image_loader._bytes_from_url(part["url"])
|
|
267
|
+
elif part["source_type"] == "base64":
|
|
268
|
+
bytes_ = base64.b64decode(part["data"])
|
|
269
|
+
else:
|
|
270
|
+
raise ValueError("source_type must be url or base64.")
|
|
271
|
+
inline_data: dict = {"data": bytes_}
|
|
272
|
+
if "mime_type" in part:
|
|
273
|
+
inline_data["mime_type"] = part["mime_type"]
|
|
274
|
+
else:
|
|
275
|
+
source = cast(str, part.get("url") or part.get("data"))
|
|
276
|
+
mime_type, _ = mimetypes.guess_type(source)
|
|
277
|
+
if not mime_type:
|
|
278
|
+
kind = filetype.guess(bytes_)
|
|
279
|
+
if kind:
|
|
280
|
+
mime_type = kind.mime
|
|
281
|
+
if mime_type:
|
|
282
|
+
inline_data["mime_type"] = mime_type
|
|
283
|
+
parts.append(Part(inline_data=inline_data))
|
|
261
284
|
elif part["type"] == "image_url":
|
|
262
285
|
img_url = part["image_url"]
|
|
263
286
|
if isinstance(img_url, dict):
|
|
@@ -442,7 +465,7 @@ def _parse_response_candidate(
|
|
|
442
465
|
try:
|
|
443
466
|
text: Optional[str] = part.text
|
|
444
467
|
# Remove erroneous newline character if present
|
|
445
|
-
if text is not None:
|
|
468
|
+
if not streaming and text is not None:
|
|
446
469
|
text = text.rstrip("\n")
|
|
447
470
|
except AttributeError:
|
|
448
471
|
text = None
|
|
@@ -600,14 +623,24 @@ def _response_to_result(
|
|
|
600
623
|
input_tokens = response.usage_metadata.prompt_token_count
|
|
601
624
|
output_tokens = response.usage_metadata.candidates_token_count
|
|
602
625
|
total_tokens = response.usage_metadata.total_token_count
|
|
626
|
+
thought_tokens = response.usage_metadata.thoughts_token_count
|
|
603
627
|
cache_read_tokens = response.usage_metadata.cached_content_token_count
|
|
604
628
|
if input_tokens + output_tokens + cache_read_tokens + total_tokens > 0:
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
629
|
+
if thought_tokens > 0:
|
|
630
|
+
lc_usage = UsageMetadata(
|
|
631
|
+
input_tokens=input_tokens - prev_input_tokens,
|
|
632
|
+
output_tokens=output_tokens - prev_output_tokens,
|
|
633
|
+
total_tokens=total_tokens - prev_total_tokens,
|
|
634
|
+
input_token_details={"cache_read": cache_read_tokens},
|
|
635
|
+
output_token_details={"reasoning": thought_tokens},
|
|
636
|
+
)
|
|
637
|
+
else:
|
|
638
|
+
lc_usage = UsageMetadata(
|
|
639
|
+
input_tokens=input_tokens - prev_input_tokens,
|
|
640
|
+
output_tokens=output_tokens - prev_output_tokens,
|
|
641
|
+
total_tokens=total_tokens - prev_total_tokens,
|
|
642
|
+
input_token_details={"cache_read": cache_read_tokens},
|
|
643
|
+
)
|
|
611
644
|
else:
|
|
612
645
|
lc_usage = None
|
|
613
646
|
except AttributeError:
|
|
@@ -709,7 +742,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
709
742
|
.. code-block:: python
|
|
710
743
|
|
|
711
744
|
AIMessageChunk(content='J', response_metadata={'finish_reason': 'STOP', 'safety_ratings': []}, id='run-e905f4f4-58cb-4a10-a960-448a2bb649e3', usage_metadata={'input_tokens': 18, 'output_tokens': 1, 'total_tokens': 19})
|
|
712
|
-
AIMessageChunk(content="'adore programmer.
|
|
745
|
+
AIMessageChunk(content="'adore programmer. \\n", response_metadata={'finish_reason': 'STOP', 'safety_ratings': [{'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability': 'NEGLIGIBLE', 'blocked': False}]}, id='run-e905f4f4-58cb-4a10-a960-448a2bb649e3', usage_metadata={'input_tokens': 18, 'output_tokens': 5, 'total_tokens': 23})
|
|
713
746
|
|
|
714
747
|
.. code-block:: python
|
|
715
748
|
|
|
@@ -739,6 +772,109 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
739
772
|
# batch:
|
|
740
773
|
# await llm.abatch([messages])
|
|
741
774
|
|
|
775
|
+
Context Caching:
|
|
776
|
+
Context caching allows you to store and reuse content (e.g., PDFs, images) for faster processing.
|
|
777
|
+
The `cached_content` parameter accepts a cache name created via the Google Generative AI API.
|
|
778
|
+
Below are two examples: caching a single file directly and caching multiple files using `Part`.
|
|
779
|
+
|
|
780
|
+
Single File Example:
|
|
781
|
+
This caches a single file and queries it.
|
|
782
|
+
|
|
783
|
+
.. code-block:: python
|
|
784
|
+
|
|
785
|
+
from google import genai
|
|
786
|
+
from google.genai import types
|
|
787
|
+
import time
|
|
788
|
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
789
|
+
from langchain_core.messages import HumanMessage
|
|
790
|
+
|
|
791
|
+
client = genai.Client()
|
|
792
|
+
|
|
793
|
+
# Upload file
|
|
794
|
+
file = client.files.upload(file="./example_file")
|
|
795
|
+
while file.state.name == 'PROCESSING':
|
|
796
|
+
time.sleep(2)
|
|
797
|
+
file = client.files.get(name=file.name)
|
|
798
|
+
|
|
799
|
+
# Create cache
|
|
800
|
+
model = 'models/gemini-1.5-flash-001'
|
|
801
|
+
cache = client.caches.create(
|
|
802
|
+
model=model,
|
|
803
|
+
config=types.CreateCachedContentConfig(
|
|
804
|
+
display_name='Cached Content',
|
|
805
|
+
system_instruction=(
|
|
806
|
+
'You are an expert content analyzer, and your job is to answer '
|
|
807
|
+
'the user\'s query based on the file you have access to.'
|
|
808
|
+
),
|
|
809
|
+
contents=[file],
|
|
810
|
+
ttl="300s",
|
|
811
|
+
)
|
|
812
|
+
)
|
|
813
|
+
|
|
814
|
+
# Query with LangChain
|
|
815
|
+
llm = ChatGoogleGenerativeAI(
|
|
816
|
+
model=model,
|
|
817
|
+
cached_content=cache.name,
|
|
818
|
+
)
|
|
819
|
+
message = HumanMessage(content="Summarize the main points of the content.")
|
|
820
|
+
llm.invoke([message])
|
|
821
|
+
|
|
822
|
+
Multiple Files Example:
|
|
823
|
+
This caches two files using `Part` and queries them together.
|
|
824
|
+
|
|
825
|
+
.. code-block:: python
|
|
826
|
+
|
|
827
|
+
from google import genai
|
|
828
|
+
from google.genai.types import CreateCachedContentConfig, Content, Part
|
|
829
|
+
import time
|
|
830
|
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
831
|
+
from langchain_core.messages import HumanMessage
|
|
832
|
+
|
|
833
|
+
client = genai.Client()
|
|
834
|
+
|
|
835
|
+
# Upload files
|
|
836
|
+
file_1 = client.files.upload(file="./file1")
|
|
837
|
+
while file_1.state.name == 'PROCESSING':
|
|
838
|
+
time.sleep(2)
|
|
839
|
+
file_1 = client.files.get(name=file_1.name)
|
|
840
|
+
|
|
841
|
+
file_2 = client.files.upload(file="./file2")
|
|
842
|
+
while file_2.state.name == 'PROCESSING':
|
|
843
|
+
time.sleep(2)
|
|
844
|
+
file_2 = client.files.get(name=file_2.name)
|
|
845
|
+
|
|
846
|
+
# Create cache with multiple files
|
|
847
|
+
contents = [
|
|
848
|
+
Content(
|
|
849
|
+
role="user",
|
|
850
|
+
parts=[
|
|
851
|
+
Part.from_uri(file_uri=file_1.uri, mime_type=file_1.mime_type),
|
|
852
|
+
Part.from_uri(file_uri=file_2.uri, mime_type=file_2.mime_type),
|
|
853
|
+
],
|
|
854
|
+
)
|
|
855
|
+
]
|
|
856
|
+
model = "gemini-1.5-flash-001"
|
|
857
|
+
cache = client.caches.create(
|
|
858
|
+
model=model,
|
|
859
|
+
config=CreateCachedContentConfig(
|
|
860
|
+
display_name='Cached Contents',
|
|
861
|
+
system_instruction=(
|
|
862
|
+
'You are an expert content analyzer, and your job is to answer '
|
|
863
|
+
'the user\'s query based on the files you have access to.'
|
|
864
|
+
),
|
|
865
|
+
contents=contents,
|
|
866
|
+
ttl="300s",
|
|
867
|
+
)
|
|
868
|
+
)
|
|
869
|
+
|
|
870
|
+
# Query with LangChain
|
|
871
|
+
llm = ChatGoogleGenerativeAI(
|
|
872
|
+
model=model,
|
|
873
|
+
cached_content=cache.name,
|
|
874
|
+
)
|
|
875
|
+
message = HumanMessage(content="Provide a summary of the key information across both files.")
|
|
876
|
+
llm.invoke([message])
|
|
877
|
+
|
|
742
878
|
Tool calling:
|
|
743
879
|
.. code-block:: python
|
|
744
880
|
|
|
@@ -842,7 +978,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
842
978
|
|
|
843
979
|
.. code-block:: python
|
|
844
980
|
|
|
845
|
-
'The weather in this image appears to be sunny and pleasant. The sky is a bright blue with scattered white clouds, suggesting fair weather. The lush green grass and trees indicate a warm and possibly slightly breezy day. There are no signs of rain or storms.
|
|
981
|
+
'The weather in this image appears to be sunny and pleasant. The sky is a bright blue with scattered white clouds, suggesting fair weather. The lush green grass and trees indicate a warm and possibly slightly breezy day. There are no signs of rain or storms.'
|
|
846
982
|
|
|
847
983
|
Token usage:
|
|
848
984
|
.. code-block:: python
|
|
@@ -891,6 +1027,31 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
891
1027
|
``cachedContents/{cachedContent}``.
|
|
892
1028
|
"""
|
|
893
1029
|
|
|
1030
|
+
model_kwargs: dict[str, Any] = Field(default_factory=dict)
|
|
1031
|
+
"""Holds any unexpected initialization parameters."""
|
|
1032
|
+
|
|
1033
|
+
def __init__(self, **kwargs: Any) -> None:
|
|
1034
|
+
"""Needed for arg validation."""
|
|
1035
|
+
# Get all valid field names, including aliases
|
|
1036
|
+
valid_fields = set()
|
|
1037
|
+
for field_name, field_info in self.model_fields.items():
|
|
1038
|
+
valid_fields.add(field_name)
|
|
1039
|
+
if hasattr(field_info, "alias") and field_info.alias is not None:
|
|
1040
|
+
valid_fields.add(field_info.alias)
|
|
1041
|
+
|
|
1042
|
+
# Check for unrecognized arguments
|
|
1043
|
+
for arg in kwargs:
|
|
1044
|
+
if arg not in valid_fields:
|
|
1045
|
+
suggestions = get_close_matches(arg, valid_fields, n=1)
|
|
1046
|
+
suggestion = (
|
|
1047
|
+
f" Did you mean: '{suggestions[0]}'?" if suggestions else ""
|
|
1048
|
+
)
|
|
1049
|
+
logger.warning(
|
|
1050
|
+
f"Unexpected argument '{arg}' "
|
|
1051
|
+
f"provided to ChatGoogleGenerativeAI.{suggestion}"
|
|
1052
|
+
)
|
|
1053
|
+
super().__init__(**kwargs)
|
|
1054
|
+
|
|
894
1055
|
model_config = ConfigDict(
|
|
895
1056
|
populate_by_name=True,
|
|
896
1057
|
)
|
|
@@ -915,6 +1076,14 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
915
1076
|
def is_lc_serializable(self) -> bool:
|
|
916
1077
|
return True
|
|
917
1078
|
|
|
1079
|
+
@model_validator(mode="before")
|
|
1080
|
+
@classmethod
|
|
1081
|
+
def build_extra(cls, values: dict[str, Any]) -> Any:
|
|
1082
|
+
"""Build extra kwargs from additional params that were passed in."""
|
|
1083
|
+
all_required_field_names = get_pydantic_field_names(cls)
|
|
1084
|
+
values = _build_model_kwargs(values, all_required_field_names)
|
|
1085
|
+
return values
|
|
1086
|
+
|
|
918
1087
|
@model_validator(mode="after")
|
|
919
1088
|
def validate_environment(self) -> Self:
|
|
920
1089
|
"""Validates params and passes them to google-generativeai package."""
|
|
@@ -927,12 +1096,14 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
927
1096
|
if self.top_k is not None and self.top_k <= 0:
|
|
928
1097
|
raise ValueError("top_k must be positive")
|
|
929
1098
|
|
|
930
|
-
if not
|
|
1099
|
+
if not any(
|
|
1100
|
+
self.model.startswith(prefix) for prefix in ("models/", "tunedModels/")
|
|
1101
|
+
):
|
|
931
1102
|
self.model = f"models/{self.model}"
|
|
932
1103
|
|
|
933
1104
|
additional_headers = self.additional_headers or {}
|
|
934
1105
|
self.default_metadata = tuple(additional_headers.items())
|
|
935
|
-
client_info = get_client_info("ChatGoogleGenerativeAI")
|
|
1106
|
+
client_info = get_client_info(f"ChatGoogleGenerativeAI:{self.model}")
|
|
936
1107
|
google_api_key = None
|
|
937
1108
|
if not self.credentials:
|
|
938
1109
|
if isinstance(self.google_api_key, SecretStr):
|
|
@@ -964,12 +1135,17 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
964
1135
|
# this check ensures that async client is only initialized
|
|
965
1136
|
# within an asyncio event loop to avoid the error
|
|
966
1137
|
if not self.async_client_running and _is_event_loop_running():
|
|
1138
|
+
# async clients don't support "rest" transport
|
|
1139
|
+
# https://github.com/googleapis/gapic-generator-python/issues/1962
|
|
1140
|
+
transport = self.transport
|
|
1141
|
+
if transport == "rest":
|
|
1142
|
+
transport = "grpc_asyncio"
|
|
967
1143
|
self.async_client_running = genaix.build_generative_async_service(
|
|
968
1144
|
credentials=self.credentials,
|
|
969
1145
|
api_key=google_api_key,
|
|
970
|
-
client_info=get_client_info("ChatGoogleGenerativeAI"),
|
|
1146
|
+
client_info=get_client_info(f"ChatGoogleGenerativeAI:{self.model}"),
|
|
971
1147
|
client_options=self.client_options,
|
|
972
|
-
transport=
|
|
1148
|
+
transport=transport,
|
|
973
1149
|
)
|
|
974
1150
|
return self.async_client_running
|
|
975
1151
|
|
|
@@ -983,6 +1159,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
983
1159
|
"n": self.n,
|
|
984
1160
|
"safety_settings": self.safety_settings,
|
|
985
1161
|
"response_modalities": self.response_modalities,
|
|
1162
|
+
"thinking_budget": self.thinking_budget,
|
|
986
1163
|
}
|
|
987
1164
|
|
|
988
1165
|
def invoke(
|
|
@@ -1026,9 +1203,15 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
1026
1203
|
) -> LangSmithParams:
|
|
1027
1204
|
"""Get standard params for tracing."""
|
|
1028
1205
|
params = self._get_invocation_params(stop=stop, **kwargs)
|
|
1206
|
+
models_prefix = "models/"
|
|
1207
|
+
ls_model_name = (
|
|
1208
|
+
self.model[len(models_prefix) :]
|
|
1209
|
+
if self.model and self.model.startswith(models_prefix)
|
|
1210
|
+
else self.model
|
|
1211
|
+
)
|
|
1029
1212
|
ls_params = LangSmithParams(
|
|
1030
1213
|
ls_provider="google_genai",
|
|
1031
|
-
ls_model_name=
|
|
1214
|
+
ls_model_name=ls_model_name,
|
|
1032
1215
|
ls_model_type="chat",
|
|
1033
1216
|
ls_temperature=params.get("temperature", self.temperature),
|
|
1034
1217
|
)
|
|
@@ -1053,6 +1236,9 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
1053
1236
|
"top_k": self.top_k,
|
|
1054
1237
|
"top_p": self.top_p,
|
|
1055
1238
|
"response_modalities": self.response_modalities,
|
|
1239
|
+
"thinking_config": {"thinking_budget": self.thinking_budget}
|
|
1240
|
+
if self.thinking_budget is not None
|
|
1241
|
+
else None,
|
|
1056
1242
|
}.items()
|
|
1057
1243
|
if v is not None
|
|
1058
1244
|
}
|
|
@@ -1406,14 +1592,6 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
1406
1592
|
tools=[schema], first_tool_only=True
|
|
1407
1593
|
)
|
|
1408
1594
|
else:
|
|
1409
|
-
global WARNED_STRUCTURED_OUTPUT_JSON_MODE
|
|
1410
|
-
warnings.warn(
|
|
1411
|
-
"ChatGoogleGenerativeAI.with_structured_output with dict schema has "
|
|
1412
|
-
"changed recently to align with behavior of other LangChain chat "
|
|
1413
|
-
"models. More context: "
|
|
1414
|
-
"https://github.com/langchain-ai/langchain-google/pull/772"
|
|
1415
|
-
)
|
|
1416
|
-
WARNED_STRUCTURED_OUTPUT_JSON_MODE = True
|
|
1417
1595
|
parser = JsonOutputKeyToolsParser(key_name=tool_name, first_tool_only=True)
|
|
1418
1596
|
tool_choice = tool_name if self._supports_tool_choice else None
|
|
1419
1597
|
try:
|
{langchain_google_genai-2.1.2 → langchain_google_genai-2.1.4}/langchain_google_genai/embeddings.py
RENAMED
|
@@ -6,6 +6,7 @@ from typing import Any, Dict, List, Optional
|
|
|
6
6
|
from google.ai.generativelanguage_v1beta.types import (
|
|
7
7
|
BatchEmbedContentsRequest,
|
|
8
8
|
EmbedContentRequest,
|
|
9
|
+
EmbedContentResponse,
|
|
9
10
|
)
|
|
10
11
|
from langchain_core.embeddings import Embeddings
|
|
11
12
|
from langchain_core.utils import secret_from_env
|
|
@@ -239,7 +240,8 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
|
|
|
239
240
|
title: Optional[str] = None,
|
|
240
241
|
output_dimensionality: Optional[int] = None,
|
|
241
242
|
) -> List[float]:
|
|
242
|
-
"""Embed a text
|
|
243
|
+
"""Embed a text, using the non-batch endpoint:
|
|
244
|
+
https://ai.google.dev/api/rest/v1/models/embedContent#EmbedContentRequest
|
|
243
245
|
|
|
244
246
|
Args:
|
|
245
247
|
text: The text to embed.
|
|
@@ -247,15 +249,19 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
|
|
|
247
249
|
title: An optional title for the text.
|
|
248
250
|
Only applicable when TaskType is RETRIEVAL_DOCUMENT.
|
|
249
251
|
output_dimensionality: Optional reduced dimension for the output embedding.
|
|
250
|
-
https://ai.google.dev/api/rest/v1/models/batchEmbedContents#EmbedContentRequest
|
|
251
252
|
|
|
252
253
|
Returns:
|
|
253
254
|
Embedding for the text.
|
|
254
255
|
"""
|
|
255
256
|
task_type = self.task_type or "RETRIEVAL_QUERY"
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
257
|
+
try:
|
|
258
|
+
request: EmbedContentRequest = self._prepare_request(
|
|
259
|
+
text=text,
|
|
260
|
+
task_type=task_type,
|
|
261
|
+
title=title,
|
|
262
|
+
output_dimensionality=output_dimensionality,
|
|
263
|
+
)
|
|
264
|
+
result: EmbedContentResponse = self.client.embed_content(request)
|
|
265
|
+
except Exception as e:
|
|
266
|
+
raise GoogleGenerativeAIError(f"Error embedding content: {e}") from e
|
|
267
|
+
return list(result.embedding.values)
|
{langchain_google_genai-2.1.2 → langchain_google_genai-2.1.4}/langchain_google_genai/llms.py
RENAMED
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
import logging
|
|
4
|
+
from difflib import get_close_matches
|
|
3
5
|
from typing import Any, Iterator, List, Optional
|
|
4
6
|
|
|
5
7
|
from langchain_core.callbacks import (
|
|
@@ -17,6 +19,8 @@ from langchain_google_genai._common import (
|
|
|
17
19
|
)
|
|
18
20
|
from langchain_google_genai.chat_models import ChatGoogleGenerativeAI
|
|
19
21
|
|
|
22
|
+
logger = logging.getLogger(__name__)
|
|
23
|
+
|
|
20
24
|
|
|
21
25
|
class GoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseLLM):
|
|
22
26
|
"""Google GenerativeAI models.
|
|
@@ -33,6 +37,28 @@ class GoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseLLM):
|
|
|
33
37
|
populate_by_name=True,
|
|
34
38
|
)
|
|
35
39
|
|
|
40
|
+
def __init__(self, **kwargs: Any) -> None:
|
|
41
|
+
"""Needed for arg validation."""
|
|
42
|
+
# Get all valid field names, including aliases
|
|
43
|
+
valid_fields = set()
|
|
44
|
+
for field_name, field_info in self.model_fields.items():
|
|
45
|
+
valid_fields.add(field_name)
|
|
46
|
+
if hasattr(field_info, "alias") and field_info.alias is not None:
|
|
47
|
+
valid_fields.add(field_info.alias)
|
|
48
|
+
|
|
49
|
+
# Check for unrecognized arguments
|
|
50
|
+
for arg in kwargs:
|
|
51
|
+
if arg not in valid_fields:
|
|
52
|
+
suggestions = get_close_matches(arg, valid_fields, n=1)
|
|
53
|
+
suggestion = (
|
|
54
|
+
f" Did you mean: '{suggestions[0]}'?" if suggestions else ""
|
|
55
|
+
)
|
|
56
|
+
logger.warning(
|
|
57
|
+
f"Unexpected argument '{arg}' "
|
|
58
|
+
f"provided to GoogleGenerativeAI.{suggestion}"
|
|
59
|
+
)
|
|
60
|
+
super().__init__(**kwargs)
|
|
61
|
+
|
|
36
62
|
@model_validator(mode="after")
|
|
37
63
|
def validate_environment(self) -> Self:
|
|
38
64
|
"""Validates params and passes them to google-generativeai package."""
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[tool.poetry]
|
|
2
2
|
name = "langchain-google-genai"
|
|
3
|
-
version = "2.1.
|
|
3
|
+
version = "2.1.4"
|
|
4
4
|
description = "An integration package connecting Google's genai package and LangChain"
|
|
5
5
|
authors = []
|
|
6
6
|
readme = "README.md"
|
|
@@ -12,8 +12,8 @@ license = "MIT"
|
|
|
12
12
|
|
|
13
13
|
[tool.poetry.dependencies]
|
|
14
14
|
python = ">=3.9,<4.0"
|
|
15
|
-
langchain-core = "^0.3.
|
|
16
|
-
google-ai-generativelanguage = "^0.6.
|
|
15
|
+
langchain-core = "^0.3.52"
|
|
16
|
+
google-ai-generativelanguage = "^0.6.18"
|
|
17
17
|
pydantic = ">=2,<3"
|
|
18
18
|
filetype = "^1.2.0"
|
|
19
19
|
|
|
@@ -27,8 +27,8 @@ pytest-mock = "^3.10.0"
|
|
|
27
27
|
syrupy = "^4.0.2"
|
|
28
28
|
pytest-watcher = "^0.3.4"
|
|
29
29
|
pytest-asyncio = "^0.21.1"
|
|
30
|
-
numpy = "
|
|
31
|
-
langchain-tests = "0.3.
|
|
30
|
+
numpy = ">=1.26.2"
|
|
31
|
+
langchain-tests = "0.3.19"
|
|
32
32
|
|
|
33
33
|
[tool.codespell]
|
|
34
34
|
ignore-words-list = "rouge"
|
|
@@ -60,7 +60,7 @@ mypy = "^1.10"
|
|
|
60
60
|
types-requests = "^2.28.11.5"
|
|
61
61
|
types-google-cloud-ndb = "^2.2.0.1"
|
|
62
62
|
types-protobuf = "^4.24.0.20240302"
|
|
63
|
-
numpy = "
|
|
63
|
+
numpy = ">=1.26.2"
|
|
64
64
|
|
|
65
65
|
|
|
66
66
|
[tool.poetry.group.dev]
|
|
File without changes
|
{langchain_google_genai-2.1.2 → langchain_google_genai-2.1.4}/langchain_google_genai/__init__.py
RENAMED
|
File without changes
|
{langchain_google_genai-2.1.2 → langchain_google_genai-2.1.4}/langchain_google_genai/_enums.py
RENAMED
|
File without changes
|
|
File without changes
|
{langchain_google_genai-2.1.2 → langchain_google_genai-2.1.4}/langchain_google_genai/_image_utils.py
RENAMED
|
File without changes
|
{langchain_google_genai-2.1.2 → langchain_google_genai-2.1.4}/langchain_google_genai/genai_aqa.py
RENAMED
|
File without changes
|
|
File without changes
|
{langchain_google_genai-2.1.2 → langchain_google_genai-2.1.4}/langchain_google_genai/py.typed
RENAMED
|
File without changes
|