langchain-google-genai 3.0.0rc1__tar.gz → 3.0.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/PKG-INFO +4 -4
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/README.md +2 -2
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/langchain_google_genai/__init__.py +7 -1
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/langchain_google_genai/_common.py +67 -33
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/langchain_google_genai/_compat.py +4 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/langchain_google_genai/_enums.py +2 -1
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/langchain_google_genai/chat_models.py +23 -5
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/langchain_google_genai/embeddings.py +22 -4
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/langchain_google_genai/genai_aqa.py +14 -3
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/langchain_google_genai/google_vector_store.py +19 -9
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/langchain_google_genai/llms.py +1 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/pyproject.toml +3 -3
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/integration_tests/test_chat_models.py +43 -7
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/integration_tests/test_standard.py +32 -52
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/unit_tests/test_chat_models.py +56 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/unit_tests/test_embeddings.py +31 -1
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/unit_tests/test_imports.py +1 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/unit_tests/test_llms.py +46 -1
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/LICENSE +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/langchain_google_genai/_function_utils.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/langchain_google_genai/_genai_extension.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/langchain_google_genai/_image_utils.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/langchain_google_genai/py.typed +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/__init__.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/conftest.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/integration_tests/.env.example +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/integration_tests/__init__.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/integration_tests/terraform/main.tf +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/integration_tests/test_callbacks.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/integration_tests/test_compile.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/integration_tests/test_embeddings.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/integration_tests/test_function_call.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/integration_tests/test_llms.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/integration_tests/test_tools.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/unit_tests/__init__.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/unit_tests/__snapshots__/test_standard.ambr +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/unit_tests/test_chat_models_protobuf_fix.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/unit_tests/test_common.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/unit_tests/test_function_utils.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/unit_tests/test_genai_aqa.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/unit_tests/test_google_vector_store.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/unit_tests/test_standard.py +0 -0
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: langchain-google-genai
|
|
3
|
-
Version: 3.0.
|
|
3
|
+
Version: 3.0.1
|
|
4
4
|
Summary: An integration package connecting Google's genai package and LangChain
|
|
5
5
|
License: MIT
|
|
6
6
|
Project-URL: Source Code, https://github.com/langchain-ai/langchain-google/tree/main/libs/genai
|
|
7
7
|
Project-URL: Release Notes, https://github.com/langchain-ai/langchain-google/releases
|
|
8
8
|
Project-URL: repository, https://github.com/langchain-ai/langchain-google
|
|
9
9
|
Requires-Python: <4.0.0,>=3.10.0
|
|
10
|
-
Requires-Dist: langchain-core<2.0.0,>=1.0.
|
|
10
|
+
Requires-Dist: langchain-core<2.0.0,>=1.0.0
|
|
11
11
|
Requires-Dist: google-ai-generativelanguage<1.0.0,>=0.7.0
|
|
12
12
|
Requires-Dist: pydantic<3.0.0,>=2.0.0
|
|
13
13
|
Requires-Dist: filetype<2.0.0,>=1.2.0
|
|
@@ -131,7 +131,7 @@ Some Gemini models supports both text and inline image outputs.
|
|
|
131
131
|
```python
|
|
132
132
|
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
133
133
|
|
|
134
|
-
llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash-image
|
|
134
|
+
llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash-image")
|
|
135
135
|
|
|
136
136
|
response = llm.invoke(
|
|
137
137
|
"Generate an image of a cat and say meow",
|
|
@@ -179,7 +179,7 @@ from langchain_core.prompts import ChatPromptTemplate
|
|
|
179
179
|
from langchain_google_genai import ChatGoogleGenerativeAI, Modality
|
|
180
180
|
|
|
181
181
|
llm = ChatGoogleGenerativeAI(
|
|
182
|
-
model="gemini-2.5-flash-image
|
|
182
|
+
model="gemini-2.5-flash-image",
|
|
183
183
|
response_modalities=[Modality.TEXT, Modality.IMAGE],
|
|
184
184
|
)
|
|
185
185
|
|
|
@@ -116,7 +116,7 @@ Some Gemini models supports both text and inline image outputs.
|
|
|
116
116
|
```python
|
|
117
117
|
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
118
118
|
|
|
119
|
-
llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash-image
|
|
119
|
+
llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash-image")
|
|
120
120
|
|
|
121
121
|
response = llm.invoke(
|
|
122
122
|
"Generate an image of a cat and say meow",
|
|
@@ -164,7 +164,7 @@ from langchain_core.prompts import ChatPromptTemplate
|
|
|
164
164
|
from langchain_google_genai import ChatGoogleGenerativeAI, Modality
|
|
165
165
|
|
|
166
166
|
llm = ChatGoogleGenerativeAI(
|
|
167
|
-
model="gemini-2.5-flash-image
|
|
167
|
+
model="gemini-2.5-flash-image",
|
|
168
168
|
response_modalities=[Modality.TEXT, Modality.IMAGE],
|
|
169
169
|
)
|
|
170
170
|
|
{langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/langchain_google_genai/__init__.py
RENAMED
|
@@ -57,7 +57,12 @@ The package also supports creating embeddings with Google's models, useful for t
|
|
|
57
57
|
|
|
58
58
|
""" # noqa: E501
|
|
59
59
|
|
|
60
|
-
from langchain_google_genai._enums import
|
|
60
|
+
from langchain_google_genai._enums import (
|
|
61
|
+
HarmBlockThreshold,
|
|
62
|
+
HarmCategory,
|
|
63
|
+
MediaResolution,
|
|
64
|
+
Modality,
|
|
65
|
+
)
|
|
61
66
|
from langchain_google_genai.chat_models import ChatGoogleGenerativeAI
|
|
62
67
|
from langchain_google_genai.embeddings import GoogleGenerativeAIEmbeddings
|
|
63
68
|
from langchain_google_genai.genai_aqa import (
|
|
@@ -84,4 +89,5 @@ __all__ = [
|
|
|
84
89
|
"HarmBlockThreshold",
|
|
85
90
|
"HarmCategory",
|
|
86
91
|
"Modality",
|
|
92
|
+
"MediaResolution",
|
|
87
93
|
]
|
{langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/langchain_google_genai/_common.py
RENAMED
|
@@ -6,7 +6,12 @@ from google.api_core.gapic_v1.client_info import ClientInfo
|
|
|
6
6
|
from langchain_core.utils import secret_from_env
|
|
7
7
|
from pydantic import BaseModel, Field, SecretStr
|
|
8
8
|
|
|
9
|
-
from langchain_google_genai._enums import
|
|
9
|
+
from langchain_google_genai._enums import (
|
|
10
|
+
HarmBlockThreshold,
|
|
11
|
+
HarmCategory,
|
|
12
|
+
MediaResolution,
|
|
13
|
+
Modality,
|
|
14
|
+
)
|
|
10
15
|
|
|
11
16
|
_TELEMETRY_TAG = "remote_reasoning_engine"
|
|
12
17
|
_TELEMETRY_ENV_VARIABLE_NAME = "GOOGLE_CLOUD_AGENT_ENGINE_ID"
|
|
@@ -19,83 +24,112 @@ class GoogleGenerativeAIError(Exception):
|
|
|
19
24
|
class _BaseGoogleGenerativeAI(BaseModel):
|
|
20
25
|
"""Base class for Google Generative AI LLMs."""
|
|
21
26
|
|
|
22
|
-
model: str = Field(
|
|
23
|
-
...,
|
|
24
|
-
description="""The name of the model to use.
|
|
25
|
-
Examples:
|
|
26
|
-
- gemini-2.5-flash
|
|
27
|
-
- models/text-bison-001""",
|
|
28
|
-
)
|
|
27
|
+
model: str = Field(...)
|
|
29
28
|
"""Model name to use."""
|
|
29
|
+
|
|
30
30
|
google_api_key: Optional[SecretStr] = Field(
|
|
31
31
|
alias="api_key", default_factory=secret_from_env("GOOGLE_API_KEY", default=None)
|
|
32
32
|
)
|
|
33
33
|
"""Google AI API key.
|
|
34
|
-
|
|
34
|
+
|
|
35
|
+
If not specified will be read from env var å`GOOGLE_API_KEY`.
|
|
36
|
+
"""
|
|
35
37
|
|
|
36
38
|
credentials: Any = None
|
|
37
|
-
"The default custom credentials (google.auth.credentials.Credentials) to use
|
|
38
|
-
|
|
39
|
-
|
|
39
|
+
"""The default custom credentials (`google.auth.credentials.Credentials`) to use
|
|
40
|
+
when making API calls.
|
|
41
|
+
|
|
42
|
+
If not provided, credentials will be ascertained from the `GOOGLE_API_KEY` env var.
|
|
43
|
+
"""
|
|
40
44
|
|
|
41
45
|
temperature: float = 0.7
|
|
42
|
-
"""Run inference with this temperature. Must be within
|
|
43
|
-
|
|
46
|
+
"""Run inference with this temperature. Must be within `[0.0, 2.0]`.
|
|
47
|
+
|
|
48
|
+
If unset, will default to `0.7`.
|
|
49
|
+
"""
|
|
44
50
|
|
|
45
51
|
top_p: Optional[float] = None
|
|
46
52
|
"""Decode using nucleus sampling: consider the smallest set of tokens whose
|
|
47
|
-
probability sum is at least
|
|
53
|
+
probability sum is at least `top_p`.
|
|
54
|
+
|
|
55
|
+
Must be within `[0.0, 1.0]`.
|
|
56
|
+
"""
|
|
48
57
|
|
|
49
58
|
top_k: Optional[int] = None
|
|
50
|
-
"""Decode using top-k sampling: consider the set of
|
|
51
|
-
|
|
59
|
+
"""Decode using top-k sampling: consider the set of `top_k` most probable tokens.
|
|
60
|
+
|
|
61
|
+
Must be positive.
|
|
62
|
+
"""
|
|
52
63
|
|
|
53
64
|
max_output_tokens: Optional[int] = Field(default=None, alias="max_tokens")
|
|
54
65
|
"""Maximum number of tokens to include in a candidate. Must be greater than zero.
|
|
66
|
+
|
|
55
67
|
If unset, will use the model's default value, which varies by model.
|
|
56
|
-
|
|
68
|
+
|
|
69
|
+
See https://ai.google.dev/gemini-api/docs/models for model-specific limits.
|
|
70
|
+
"""
|
|
57
71
|
|
|
58
72
|
n: int = 1
|
|
59
|
-
"""Number of chat completions to generate for each prompt.
|
|
60
|
-
|
|
73
|
+
"""Number of chat completions to generate for each prompt.
|
|
74
|
+
|
|
75
|
+
Note that the API may not return the full `n` completions if duplicates are
|
|
76
|
+
generated.
|
|
77
|
+
"""
|
|
61
78
|
|
|
62
79
|
max_retries: int = Field(default=6, alias="retries")
|
|
63
|
-
"""The maximum number of retries to make when generating.
|
|
64
|
-
|
|
80
|
+
"""The maximum number of retries to make when generating.
|
|
81
|
+
|
|
82
|
+
If unset, will default to `6`.
|
|
83
|
+
"""
|
|
65
84
|
|
|
66
85
|
timeout: Optional[float] = Field(default=None, alias="request_timeout")
|
|
67
86
|
"""The maximum number of seconds to wait for a response."""
|
|
68
87
|
|
|
69
88
|
client_options: Optional[Dict] = Field(
|
|
70
89
|
default=None,
|
|
71
|
-
description=(
|
|
72
|
-
"A dictionary of client options to pass to the Google API client, "
|
|
73
|
-
"such as `api_endpoint`."
|
|
74
|
-
),
|
|
75
90
|
)
|
|
91
|
+
""""A dictionary of client options to pass to the Google API client, such as
|
|
92
|
+
`api_endpoint`.
|
|
93
|
+
"""
|
|
94
|
+
|
|
95
|
+
base_url: Optional[str] = Field(
|
|
96
|
+
default=None,
|
|
97
|
+
)
|
|
98
|
+
"""The base URL to use for the API client.
|
|
99
|
+
|
|
100
|
+
Alias of `client_options['api_endpoint']`.
|
|
101
|
+
"""
|
|
102
|
+
|
|
76
103
|
transport: Optional[str] = Field(
|
|
77
104
|
default=None,
|
|
78
|
-
description="A string, one of: [`rest`, `grpc`, `grpc_asyncio`].",
|
|
79
105
|
alias="api_transport",
|
|
80
106
|
)
|
|
107
|
+
"""A string, one of: `[rest, grpc, grpc_asyncio]`."""
|
|
108
|
+
|
|
81
109
|
additional_headers: Optional[Dict[str, str]] = Field(
|
|
82
110
|
default=None,
|
|
83
|
-
description=(
|
|
84
|
-
"A key-value dictionary representing additional headers for the model call"
|
|
85
|
-
),
|
|
86
111
|
)
|
|
112
|
+
""""Key-value dictionary representing additional headers for the model call"""
|
|
113
|
+
|
|
87
114
|
response_modalities: Optional[List[Modality]] = Field(
|
|
88
|
-
default=None,
|
|
115
|
+
default=None,
|
|
89
116
|
)
|
|
117
|
+
"""A list of modalities of the response"""
|
|
90
118
|
|
|
91
119
|
thinking_budget: Optional[int] = Field(
|
|
92
|
-
default=None,
|
|
120
|
+
default=None,
|
|
121
|
+
)
|
|
122
|
+
"""Indicates the thinking budget in tokens."""
|
|
123
|
+
|
|
124
|
+
media_resolution: Optional[MediaResolution] = Field(
|
|
125
|
+
default=None,
|
|
93
126
|
)
|
|
127
|
+
"""Media resolution for the input media."""
|
|
94
128
|
|
|
95
129
|
include_thoughts: Optional[bool] = Field(
|
|
96
130
|
default=None,
|
|
97
|
-
description="Indicates whether to include thoughts in the response.",
|
|
98
131
|
)
|
|
132
|
+
"""Indicates whether to include thoughts in the response."""
|
|
99
133
|
|
|
100
134
|
safety_settings: Optional[Dict[HarmCategory, HarmBlockThreshold]] = None
|
|
101
135
|
"""The default safety settings to use for all generations.
|
{langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/langchain_google_genai/_compat.py
RENAMED
|
@@ -143,6 +143,10 @@ def _convert_from_v1_to_generativelanguage_v1beta(
|
|
|
143
143
|
# TextContentBlock
|
|
144
144
|
if block_dict["type"] == "text":
|
|
145
145
|
new_block = {"text": block_dict.get("text", "")}
|
|
146
|
+
if (
|
|
147
|
+
thought_signature := (block_dict.get("extras") or {}).get("signature") # type: ignore[attr-defined]
|
|
148
|
+
) and model_provider == "google_genai":
|
|
149
|
+
new_block["thought_signature"] = thought_signature
|
|
146
150
|
new_content.append(new_block)
|
|
147
151
|
# Citations are only handled on output. Can't pass them back :/
|
|
148
152
|
|
{langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/langchain_google_genai/_enums.py
RENAMED
|
@@ -3,5 +3,6 @@ import google.ai.generativelanguage_v1beta as genai
|
|
|
3
3
|
HarmBlockThreshold = genai.SafetySetting.HarmBlockThreshold
|
|
4
4
|
HarmCategory = genai.HarmCategory
|
|
5
5
|
Modality = genai.GenerationConfig.Modality
|
|
6
|
+
MediaResolution = genai.GenerationConfig.MediaResolution
|
|
6
7
|
|
|
7
|
-
__all__ = ["HarmBlockThreshold", "HarmCategory", "Modality"]
|
|
8
|
+
__all__ = ["HarmBlockThreshold", "HarmCategory", "Modality", "MediaResolution"]
|
|
@@ -199,7 +199,7 @@ def _chat_with_retry(generation_method: Callable, **kwargs: Any) -> Any:
|
|
|
199
199
|
|
|
200
200
|
Args:
|
|
201
201
|
generation_method (Callable): The chat generation method to be executed.
|
|
202
|
-
**kwargs
|
|
202
|
+
**kwargs: Additional keyword arguments to pass to the generation method.
|
|
203
203
|
|
|
204
204
|
Returns:
|
|
205
205
|
Any: The result from the chat generation method.
|
|
@@ -256,7 +256,7 @@ async def _achat_with_retry(generation_method: Callable, **kwargs: Any) -> Any:
|
|
|
256
256
|
|
|
257
257
|
Args:
|
|
258
258
|
generation_method (Callable): The chat generation method to be executed.
|
|
259
|
-
**kwargs
|
|
259
|
+
**kwargs: Additional keyword arguments to pass to the generation method.
|
|
260
260
|
|
|
261
261
|
Returns:
|
|
262
262
|
Any: The result from the chat generation method.
|
|
@@ -360,7 +360,7 @@ def _convert_to_parts(
|
|
|
360
360
|
if "mime_type" in part:
|
|
361
361
|
inline_data["mime_type"] = part["mime_type"]
|
|
362
362
|
else:
|
|
363
|
-
# Guess
|
|
363
|
+
# Guess MIME type based on data field if not provided
|
|
364
364
|
source = cast(
|
|
365
365
|
"str",
|
|
366
366
|
part.get("url") or part.get("base64") or part.get("data"),
|
|
@@ -1912,11 +1912,17 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
1912
1912
|
else:
|
|
1913
1913
|
google_api_key = self.google_api_key
|
|
1914
1914
|
transport: Optional[str] = self.transport
|
|
1915
|
+
|
|
1916
|
+
# Merge base_url into client_options if provided
|
|
1917
|
+
client_options = self.client_options or {}
|
|
1918
|
+
if self.base_url and "api_endpoint" not in client_options:
|
|
1919
|
+
client_options = {**client_options, "api_endpoint": self.base_url}
|
|
1920
|
+
|
|
1915
1921
|
self.client = genaix.build_generative_service(
|
|
1916
1922
|
credentials=self.credentials,
|
|
1917
1923
|
api_key=google_api_key,
|
|
1918
1924
|
client_info=client_info,
|
|
1919
|
-
client_options=
|
|
1925
|
+
client_options=client_options,
|
|
1920
1926
|
transport=transport,
|
|
1921
1927
|
)
|
|
1922
1928
|
self.async_client_running = None
|
|
@@ -1941,11 +1947,17 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
1941
1947
|
transport = self.transport
|
|
1942
1948
|
if transport == "rest":
|
|
1943
1949
|
transport = "grpc_asyncio"
|
|
1950
|
+
|
|
1951
|
+
# Merge base_url into client_options if provided
|
|
1952
|
+
client_options = self.client_options or {}
|
|
1953
|
+
if self.base_url and "api_endpoint" not in client_options:
|
|
1954
|
+
client_options = {**client_options, "api_endpoint": self.base_url}
|
|
1955
|
+
|
|
1944
1956
|
self.async_client_running = genaix.build_generative_async_service(
|
|
1945
1957
|
credentials=self.credentials,
|
|
1946
1958
|
api_key=google_api_key,
|
|
1947
1959
|
client_info=get_client_info(f"ChatGoogleGenerativeAI:{self.model}"),
|
|
1948
|
-
client_options=
|
|
1960
|
+
client_options=client_options,
|
|
1949
1961
|
transport=transport,
|
|
1950
1962
|
)
|
|
1951
1963
|
return self.async_client_running
|
|
@@ -1960,6 +1972,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
1960
1972
|
"n": self.n,
|
|
1961
1973
|
"safety_settings": self.safety_settings,
|
|
1962
1974
|
"response_modalities": self.response_modalities,
|
|
1975
|
+
"media_resolution": self.media_resolution,
|
|
1963
1976
|
"thinking_budget": self.thinking_budget,
|
|
1964
1977
|
"include_thoughts": self.include_thoughts,
|
|
1965
1978
|
}
|
|
@@ -2077,6 +2090,11 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
2077
2090
|
gapic_response_schema = _dict_to_gapic_schema(response_schema)
|
|
2078
2091
|
if gapic_response_schema is not None:
|
|
2079
2092
|
gen_config["response_schema"] = gapic_response_schema
|
|
2093
|
+
|
|
2094
|
+
media_resolution = kwargs.get("media_resolution", self.media_resolution)
|
|
2095
|
+
if media_resolution is not None:
|
|
2096
|
+
gen_config["media_resolution"] = media_resolution
|
|
2097
|
+
|
|
2080
2098
|
return GenerationConfig(**gen_config)
|
|
2081
2099
|
|
|
2082
2100
|
def _generate(
|
|
@@ -87,6 +87,14 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
|
|
|
87
87
|
"such as `api_endpoint`."
|
|
88
88
|
),
|
|
89
89
|
)
|
|
90
|
+
base_url: Optional[str] = Field(
|
|
91
|
+
default=None,
|
|
92
|
+
)
|
|
93
|
+
"""The base URL to use for the API client.
|
|
94
|
+
|
|
95
|
+
Alias of `client_options['api_endpoint']`.
|
|
96
|
+
"""
|
|
97
|
+
|
|
90
98
|
transport: Optional[str] = Field(
|
|
91
99
|
default=None,
|
|
92
100
|
description="A string, one of: [``'rest'``, ``'grpc'``, ``'grpc_asyncio'``].",
|
|
@@ -109,11 +117,16 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
|
|
|
109
117
|
if not any(self.model.startswith(prefix) for prefix in ("models/",)):
|
|
110
118
|
self.model = f"models/{self.model}"
|
|
111
119
|
|
|
120
|
+
# Merge base_url into client_options if provided
|
|
121
|
+
client_options = self.client_options or {}
|
|
122
|
+
if self.base_url and "api_endpoint" not in client_options:
|
|
123
|
+
client_options = {**client_options, "api_endpoint": self.base_url}
|
|
124
|
+
|
|
112
125
|
self.client = build_generative_service(
|
|
113
126
|
credentials=self.credentials,
|
|
114
127
|
api_key=google_api_key,
|
|
115
128
|
client_info=client_info,
|
|
116
|
-
client_options=
|
|
129
|
+
client_options=client_options,
|
|
117
130
|
transport=self.transport,
|
|
118
131
|
)
|
|
119
132
|
# Always defer async client initialization to first async call.
|
|
@@ -137,11 +150,16 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
|
|
|
137
150
|
if transport == "rest":
|
|
138
151
|
transport = "grpc_asyncio"
|
|
139
152
|
|
|
153
|
+
# Merge base_url into client_options if provided
|
|
154
|
+
client_options = self.client_options or {}
|
|
155
|
+
if self.base_url and "api_endpoint" not in client_options:
|
|
156
|
+
client_options = {**client_options, "api_endpoint": self.base_url}
|
|
157
|
+
|
|
140
158
|
self.async_client = build_generative_async_service(
|
|
141
159
|
credentials=self.credentials,
|
|
142
160
|
api_key=google_api_key,
|
|
143
161
|
client_info=client_info,
|
|
144
|
-
client_options=
|
|
162
|
+
client_options=client_options,
|
|
145
163
|
transport=transport,
|
|
146
164
|
)
|
|
147
165
|
return self.async_client
|
|
@@ -242,7 +260,7 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
|
|
|
242
260
|
batch_size: [int] The batch size of embeddings to send to the model
|
|
243
261
|
task_type: `task_type <https://ai.google.dev/api/embeddings#tasktype>`__
|
|
244
262
|
titles: An optional list of titles for texts provided.
|
|
245
|
-
|
|
263
|
+
Only applicable when TaskType is ``'RETRIEVAL_DOCUMENT'``.
|
|
246
264
|
output_dimensionality: Optional `reduced dimension for the output embedding <https://ai.google.dev/api/embeddings#EmbedContentRequest>`__.
|
|
247
265
|
|
|
248
266
|
Returns:
|
|
@@ -293,7 +311,7 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
|
|
|
293
311
|
text: The text to embed.
|
|
294
312
|
task_type: `task_type <https://ai.google.dev/api/embeddings#tasktype>`__
|
|
295
313
|
title: An optional title for the text.
|
|
296
|
-
|
|
314
|
+
Only applicable when TaskType is ``'RETRIEVAL_DOCUMENT'``.
|
|
297
315
|
output_dimensionality: Optional `reduced dimension for the output embedding <https://ai.google.dev/api/embeddings#EmbedContentRequest>`__.
|
|
298
316
|
|
|
299
317
|
Returns:
|
{langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/langchain_google_genai/genai_aqa.py
RENAMED
|
@@ -103,19 +103,30 @@ class GenAIAqa(RunnableSerializable[AqaInput, AqaOutput]):
|
|
|
103
103
|
# google.generativeai installed.
|
|
104
104
|
answer_style: int = 1
|
|
105
105
|
|
|
106
|
-
def __init__(
|
|
106
|
+
def __init__(
|
|
107
|
+
self,
|
|
108
|
+
*,
|
|
109
|
+
answer_style: int = genai.GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
|
|
110
|
+
safety_settings: Optional[List[genai.SafetySetting]] = None,
|
|
111
|
+
temperature: Optional[float] = None,
|
|
112
|
+
**kwargs: Any,
|
|
113
|
+
) -> None:
|
|
107
114
|
"""Construct a Google Generative AI AQA model.
|
|
108
115
|
|
|
109
116
|
All arguments are optional.
|
|
110
117
|
|
|
111
118
|
Args:
|
|
112
119
|
answer_style: See
|
|
113
|
-
|
|
120
|
+
`google.ai.generativelanguage.GenerateAnswerRequest.AnswerStyle`.
|
|
114
121
|
safety_settings: See `google.ai.generativelanguage.SafetySetting`.
|
|
115
122
|
temperature: 0.0 to 1.0.
|
|
116
123
|
"""
|
|
117
124
|
super().__init__(**kwargs)
|
|
118
|
-
self._client = _AqaModel(
|
|
125
|
+
self._client = _AqaModel(
|
|
126
|
+
answer_style=answer_style,
|
|
127
|
+
safety_settings=safety_settings,
|
|
128
|
+
temperature=temperature,
|
|
129
|
+
)
|
|
119
130
|
|
|
120
131
|
def invoke(
|
|
121
132
|
self, input: AqaInput, config: Optional[RunnableConfig] = None, **kwargs: Any
|
|
@@ -262,9 +262,9 @@ class GoogleVectorStore(VectorStore):
|
|
|
262
262
|
document.
|
|
263
263
|
|
|
264
264
|
Raises:
|
|
265
|
-
DoesNotExistsException
|
|
266
|
-
server. In this case, consider using
|
|
267
|
-
|
|
265
|
+
DoesNotExistsException: If the IDs do not match to anything on Google
|
|
266
|
+
server. In this case, consider using ``create_corpus`` or
|
|
267
|
+
``create_document`` to create one.
|
|
268
268
|
"""
|
|
269
269
|
super().__init__(**kwargs)
|
|
270
270
|
self._retriever = _SemanticRetriever.from_ids(corpus_id, document_id)
|
|
@@ -352,7 +352,7 @@ class GoogleVectorStore(VectorStore):
|
|
|
352
352
|
Document.
|
|
353
353
|
|
|
354
354
|
Raises:
|
|
355
|
-
DoesNotExistsException
|
|
355
|
+
DoesNotExistsException: If the IDs do not match to anything at
|
|
356
356
|
Google server.
|
|
357
357
|
"""
|
|
358
358
|
if corpus_id is None or document_id is None:
|
|
@@ -463,16 +463,22 @@ class GoogleVectorStore(VectorStore):
|
|
|
463
463
|
"""
|
|
464
464
|
return lambda score: score
|
|
465
465
|
|
|
466
|
-
def as_aqa(
|
|
466
|
+
def as_aqa(
|
|
467
|
+
self,
|
|
468
|
+
*,
|
|
469
|
+
answer_style: int = 1,
|
|
470
|
+
safety_settings: Optional[List[Any]] = None,
|
|
471
|
+
temperature: Optional[float] = None,
|
|
472
|
+
) -> Runnable[str, AqaOutput]:
|
|
467
473
|
"""Construct a Google Generative AI AQA engine.
|
|
468
474
|
|
|
469
475
|
All arguments are optional.
|
|
470
476
|
|
|
471
477
|
Args:
|
|
472
478
|
answer_style: See
|
|
473
|
-
|
|
474
|
-
safety_settings: See
|
|
475
|
-
temperature: 0.0
|
|
479
|
+
``google.ai.generativelanguage.GenerateAnswerRequest.AnswerStyle``.
|
|
480
|
+
safety_settings: See ``google.ai.generativelanguage.SafetySetting``.
|
|
481
|
+
temperature: Value between 0.0 and 1.0 controlling randomness.
|
|
476
482
|
"""
|
|
477
483
|
return (
|
|
478
484
|
RunnablePassthrough[str]()
|
|
@@ -481,7 +487,11 @@ class GoogleVectorStore(VectorStore):
|
|
|
481
487
|
"passages": self.as_retriever(),
|
|
482
488
|
}
|
|
483
489
|
| RunnableLambda(_toAqaInput)
|
|
484
|
-
| GenAIAqa(
|
|
490
|
+
| GenAIAqa(
|
|
491
|
+
answer_style=answer_style,
|
|
492
|
+
safety_settings=safety_settings,
|
|
493
|
+
temperature=temperature,
|
|
494
|
+
)
|
|
485
495
|
)
|
|
486
496
|
|
|
487
497
|
|
{langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/langchain_google_genai/llms.py
RENAMED
|
@@ -77,6 +77,7 @@ class GoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseLLM):
|
|
|
77
77
|
timeout=self.timeout,
|
|
78
78
|
model=self.model,
|
|
79
79
|
client_options=self.client_options,
|
|
80
|
+
base_url=self.base_url,
|
|
80
81
|
transport=self.transport,
|
|
81
82
|
additional_headers=self.additional_headers,
|
|
82
83
|
safety_settings=self.safety_settings,
|
|
@@ -6,14 +6,14 @@ build-backend = "pdm.backend"
|
|
|
6
6
|
|
|
7
7
|
[project]
|
|
8
8
|
name = "langchain-google-genai"
|
|
9
|
-
version = "3.0.
|
|
9
|
+
version = "3.0.1"
|
|
10
10
|
description = "An integration package connecting Google's genai package and LangChain"
|
|
11
11
|
authors = []
|
|
12
12
|
requires-python = ">=3.10.0,<4.0.0"
|
|
13
13
|
readme = "README.md"
|
|
14
14
|
repository = "https://github.com/langchain-ai/langchain-google"
|
|
15
15
|
dependencies = [
|
|
16
|
-
"langchain-core>=1.0.
|
|
16
|
+
"langchain-core>=1.0.0,<2.0.0",
|
|
17
17
|
"google-ai-generativelanguage>=0.7.0,<1.0.0",
|
|
18
18
|
"pydantic>=2.0.0,<3.0.0",
|
|
19
19
|
"filetype>=1.2.0,<2.0.0",
|
|
@@ -49,7 +49,7 @@ test = [
|
|
|
49
49
|
"pytest-socket>=0.7.0,<1.0.0",
|
|
50
50
|
"numpy>=1.26.4; python_version<'3.13'",
|
|
51
51
|
"numpy>=2.1.0; python_version>='3.13'",
|
|
52
|
-
"langchain-tests>=1.0.
|
|
52
|
+
"langchain-tests>=1.0.0,<2.0.0",
|
|
53
53
|
]
|
|
54
54
|
test_integration = [
|
|
55
55
|
"pytest>=8.4.0,<9.0.0",
|
|
@@ -23,6 +23,7 @@ from langchain_google_genai import (
|
|
|
23
23
|
ChatGoogleGenerativeAI,
|
|
24
24
|
HarmBlockThreshold,
|
|
25
25
|
HarmCategory,
|
|
26
|
+
MediaResolution,
|
|
26
27
|
Modality,
|
|
27
28
|
)
|
|
28
29
|
|
|
@@ -286,6 +287,7 @@ def _check_thinking_output(content: list, output_version: str) -> None:
|
|
|
286
287
|
assert isinstance(block[thinking_key], str)
|
|
287
288
|
|
|
288
289
|
|
|
290
|
+
@pytest.mark.flaky(retries=3, delay=1)
|
|
289
291
|
@pytest.mark.parametrize("output_version", ["v0", "v1"])
|
|
290
292
|
def test_chat_google_genai_invoke_thinking_include_thoughts(
|
|
291
293
|
output_version: str,
|
|
@@ -557,6 +559,42 @@ def test_chat_google_genai_multimodal(
|
|
|
557
559
|
assert len(response.content.strip()) > 0
|
|
558
560
|
|
|
559
561
|
|
|
562
|
+
@pytest.mark.parametrize(
|
|
563
|
+
"message",
|
|
564
|
+
[
|
|
565
|
+
HumanMessage(
|
|
566
|
+
content=[
|
|
567
|
+
{
|
|
568
|
+
"type": "text",
|
|
569
|
+
"text": "Guess what's in this picture! You have 3 guesses.",
|
|
570
|
+
},
|
|
571
|
+
{
|
|
572
|
+
"type": "image_url",
|
|
573
|
+
"image_url": "https://picsum.photos/seed/picsum/200/300",
|
|
574
|
+
},
|
|
575
|
+
]
|
|
576
|
+
),
|
|
577
|
+
],
|
|
578
|
+
)
|
|
579
|
+
def test_chat_google_genai_invoke_media_resolution(message: BaseMessage) -> None:
|
|
580
|
+
"""Test invoke vision model with `media_resolution` set to low and without."""
|
|
581
|
+
llm = ChatGoogleGenerativeAI(model=_VISION_MODEL)
|
|
582
|
+
result = llm.invoke([message])
|
|
583
|
+
result_low_res = llm.invoke(
|
|
584
|
+
[message], media_resolution=MediaResolution.MEDIA_RESOLUTION_LOW
|
|
585
|
+
)
|
|
586
|
+
|
|
587
|
+
assert isinstance(result_low_res, AIMessage)
|
|
588
|
+
_check_usage_metadata(result_low_res)
|
|
589
|
+
|
|
590
|
+
assert result.usage_metadata is not None
|
|
591
|
+
assert result_low_res.usage_metadata is not None
|
|
592
|
+
assert (
|
|
593
|
+
result_low_res.usage_metadata["input_tokens"]
|
|
594
|
+
< result.usage_metadata["input_tokens"] / 3
|
|
595
|
+
)
|
|
596
|
+
|
|
597
|
+
|
|
560
598
|
def test_chat_google_genai_single_call_with_history() -> None:
|
|
561
599
|
model = ChatGoogleGenerativeAI(model=_MODEL)
|
|
562
600
|
text_question1, text_answer1 = "How much is 2+2?", "4"
|
|
@@ -570,21 +608,19 @@ def test_chat_google_genai_single_call_with_history() -> None:
|
|
|
570
608
|
|
|
571
609
|
|
|
572
610
|
@pytest.mark.parametrize(
|
|
573
|
-
|
|
574
|
-
[
|
|
611
|
+
"model_name",
|
|
612
|
+
[_MODEL, "models/gemini-2.5-pro"],
|
|
575
613
|
)
|
|
576
614
|
def test_chat_google_genai_system_message(
|
|
577
|
-
model_name: str,
|
|
615
|
+
model_name: str,
|
|
578
616
|
) -> None:
|
|
579
617
|
"""Test system message handling in ChatGoogleGenerativeAI.
|
|
580
618
|
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
Useful since I think some models (e.g. Gemini Pro) do not like system messages?
|
|
619
|
+
Tests that system messages are properly converted to system instructions
|
|
620
|
+
for different models.
|
|
584
621
|
"""
|
|
585
622
|
model = ChatGoogleGenerativeAI(
|
|
586
623
|
model=model_name,
|
|
587
|
-
convert_system_message_to_human=convert_system_message_to_human,
|
|
588
624
|
)
|
|
589
625
|
text_question1, text_answer1 = "How much is 2+2?", "4"
|
|
590
626
|
text_question2 = "How much is 3+3?"
|
|
@@ -1,12 +1,10 @@
|
|
|
1
1
|
"""Standard LangChain interface tests."""
|
|
2
2
|
|
|
3
|
-
import
|
|
3
|
+
import os
|
|
4
4
|
from typing import Literal
|
|
5
5
|
|
|
6
|
-
import httpx
|
|
7
6
|
import pytest
|
|
8
7
|
from langchain_core.language_models import BaseChatModel
|
|
9
|
-
from langchain_core.messages import HumanMessage
|
|
10
8
|
from langchain_core.rate_limiters import InMemoryRateLimiter
|
|
11
9
|
from langchain_core.tools import BaseTool
|
|
12
10
|
from langchain_tests.integration_tests import ChatModelIntegrationTests
|
|
@@ -16,6 +14,14 @@ from langchain_google_genai import ChatGoogleGenerativeAI
|
|
|
16
14
|
rate_limiter = InMemoryRateLimiter(requests_per_second=0.25)
|
|
17
15
|
|
|
18
16
|
|
|
17
|
+
def _has_multimodal_secrets() -> bool:
|
|
18
|
+
"""Check if integration test secrets are available.
|
|
19
|
+
|
|
20
|
+
Returns `True` if running in an environment with access to secrets.
|
|
21
|
+
"""
|
|
22
|
+
return bool(os.environ.get("LANGCHAIN_TESTS_USER_AGENT"))
|
|
23
|
+
|
|
24
|
+
|
|
19
25
|
class TestGeminiFlashStandard(ChatModelIntegrationTests):
|
|
20
26
|
@property
|
|
21
27
|
def chat_model_class(self) -> type[BaseChatModel]:
|
|
@@ -48,55 +54,29 @@ class TestGeminiFlashStandard(ChatModelIntegrationTests):
|
|
|
48
54
|
def supports_audio_inputs(self) -> bool:
|
|
49
55
|
return True
|
|
50
56
|
|
|
51
|
-
@pytest.mark.xfail(
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
message = HumanMessage(
|
|
75
|
-
content=[
|
|
76
|
-
{"type": "text", "text": "describe the weather in this image"},
|
|
77
|
-
{
|
|
78
|
-
"type": "image",
|
|
79
|
-
"source_type": "base64",
|
|
80
|
-
"mime_type": "image/jpeg",
|
|
81
|
-
"data": image_data,
|
|
82
|
-
},
|
|
83
|
-
],
|
|
84
|
-
)
|
|
85
|
-
_ = model.invoke([message])
|
|
86
|
-
|
|
87
|
-
# Standard format, URL
|
|
88
|
-
if self.supports_image_urls:
|
|
89
|
-
message = HumanMessage(
|
|
90
|
-
content=[
|
|
91
|
-
{"type": "text", "text": "describe the weather in this image"},
|
|
92
|
-
{
|
|
93
|
-
"type": "image",
|
|
94
|
-
"source_type": "url",
|
|
95
|
-
"url": image_url,
|
|
96
|
-
},
|
|
97
|
-
],
|
|
98
|
-
)
|
|
99
|
-
_ = model.invoke([message])
|
|
57
|
+
@pytest.mark.xfail(
|
|
58
|
+
not _has_multimodal_secrets(),
|
|
59
|
+
reason=(
|
|
60
|
+
"Multimodal tests require integration secrets (user agent to fetch "
|
|
61
|
+
"external resources)"
|
|
62
|
+
),
|
|
63
|
+
run=False,
|
|
64
|
+
)
|
|
65
|
+
def test_audio_inputs(self, model: BaseChatModel) -> None:
|
|
66
|
+
"""Skip audio tests in PR context - requires external resource fetching."""
|
|
67
|
+
super().test_audio_inputs(model)
|
|
68
|
+
|
|
69
|
+
@pytest.mark.xfail(
|
|
70
|
+
not _has_multimodal_secrets(),
|
|
71
|
+
reason=(
|
|
72
|
+
"Multimodal tests require integration secrets (user agent to fetch "
|
|
73
|
+
"external resources)"
|
|
74
|
+
),
|
|
75
|
+
run=False,
|
|
76
|
+
)
|
|
77
|
+
def test_pdf_inputs(self, model: BaseChatModel) -> None:
|
|
78
|
+
"""Skip PDF tests in PR context - requires external resource fetching."""
|
|
79
|
+
super().test_pdf_inputs(model)
|
|
100
80
|
|
|
101
81
|
|
|
102
82
|
class TestGeminiProStandard(ChatModelIntegrationTests):
|
|
@@ -27,6 +27,7 @@ from langchain_core.messages import (
|
|
|
27
27
|
SystemMessage,
|
|
28
28
|
ToolMessage,
|
|
29
29
|
)
|
|
30
|
+
from langchain_core.messages import content as types
|
|
30
31
|
from langchain_core.messages.block_translators.google_genai import (
|
|
31
32
|
_convert_to_v1_from_genai,
|
|
32
33
|
)
|
|
@@ -36,6 +37,9 @@ from pydantic import SecretStr
|
|
|
36
37
|
from pydantic_core._pydantic_core import ValidationError
|
|
37
38
|
|
|
38
39
|
from langchain_google_genai import HarmBlockThreshold, HarmCategory, Modality
|
|
40
|
+
from langchain_google_genai._compat import (
|
|
41
|
+
_convert_from_v1_to_generativelanguage_v1beta,
|
|
42
|
+
)
|
|
39
43
|
from langchain_google_genai.chat_models import (
|
|
40
44
|
ChatGoogleGenerativeAI,
|
|
41
45
|
_chat_with_retry,
|
|
@@ -431,6 +435,46 @@ def test_additional_headers_support(headers: Optional[dict[str, str]]) -> None:
|
|
|
431
435
|
assert "ChatGoogleGenerativeAI" in call_client_info.user_agent
|
|
432
436
|
|
|
433
437
|
|
|
438
|
+
def test_base_url_support() -> None:
|
|
439
|
+
"""Test that `base_url` is properly merged into `client_options`."""
|
|
440
|
+
mock_client = Mock()
|
|
441
|
+
mock_generate_content = Mock()
|
|
442
|
+
mock_generate_content.return_value = GenerateContentResponse(
|
|
443
|
+
candidates=[Candidate(content=Content(parts=[Part(text="test response")]))]
|
|
444
|
+
)
|
|
445
|
+
mock_client.return_value.generate_content = mock_generate_content
|
|
446
|
+
base_url = "https://example.com"
|
|
447
|
+
param_api_key = "[secret]"
|
|
448
|
+
param_secret_api_key = SecretStr(param_api_key)
|
|
449
|
+
param_transport = "rest"
|
|
450
|
+
|
|
451
|
+
with patch(
|
|
452
|
+
"langchain_google_genai._genai_extension.v1betaGenerativeServiceClient",
|
|
453
|
+
mock_client,
|
|
454
|
+
):
|
|
455
|
+
chat = ChatGoogleGenerativeAI(
|
|
456
|
+
model=MODEL_NAME,
|
|
457
|
+
google_api_key=param_secret_api_key,
|
|
458
|
+
base_url=base_url,
|
|
459
|
+
transport=param_transport,
|
|
460
|
+
)
|
|
461
|
+
|
|
462
|
+
response = chat.invoke("test")
|
|
463
|
+
assert response.content == "test response"
|
|
464
|
+
|
|
465
|
+
mock_client.assert_called_once_with(
|
|
466
|
+
transport=param_transport,
|
|
467
|
+
client_options=ANY,
|
|
468
|
+
client_info=ANY,
|
|
469
|
+
)
|
|
470
|
+
call_client_options = mock_client.call_args_list[0].kwargs["client_options"]
|
|
471
|
+
assert call_client_options.api_key == param_api_key
|
|
472
|
+
assert call_client_options.api_endpoint == base_url
|
|
473
|
+
call_client_info = mock_client.call_args_list[0].kwargs["client_info"]
|
|
474
|
+
assert "langchain-google-genai" in call_client_info.user_agent
|
|
475
|
+
assert "ChatGoogleGenerativeAI" in call_client_info.user_agent
|
|
476
|
+
|
|
477
|
+
|
|
434
478
|
def test_default_metadata_field_alias() -> None:
|
|
435
479
|
"""Test 'default_metadata' and 'default_metadata_input' fields work correctly."""
|
|
436
480
|
# Test with default_metadata_input field name (alias) - should accept None without
|
|
@@ -1859,3 +1903,15 @@ def test_chat_google_genai_invoke_with_audio_mocked() -> None:
|
|
|
1859
1903
|
assert audio_block["type"] == "audio"
|
|
1860
1904
|
assert "base64" in audio_block
|
|
1861
1905
|
assert audio_block["base64"] == base64.b64encode(wav_bytes).decode()
|
|
1906
|
+
|
|
1907
|
+
|
|
1908
|
+
def test_compat() -> None:
|
|
1909
|
+
block: types.TextContentBlock = {"type": "text", "text": "foo"}
|
|
1910
|
+
result = _convert_from_v1_to_generativelanguage_v1beta([block], "google_genai")
|
|
1911
|
+
expected = [{"text": "foo"}]
|
|
1912
|
+
assert result == expected
|
|
1913
|
+
|
|
1914
|
+
block = {"type": "text", "text": "foo", "extras": {"signature": "bar"}}
|
|
1915
|
+
result = _convert_from_v1_to_generativelanguage_v1beta([block], "google_genai")
|
|
1916
|
+
expected = [{"text": "foo", "thought_signature": "bar"}]
|
|
1917
|
+
assert result == expected
|
{langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/unit_tests/test_embeddings.py
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"""Test embeddings model integration."""
|
|
2
2
|
|
|
3
|
-
from unittest.mock import MagicMock, patch
|
|
3
|
+
from unittest.mock import ANY, MagicMock, patch
|
|
4
4
|
|
|
5
5
|
import pytest
|
|
6
6
|
from google.ai.generativelanguage_v1beta.types import (
|
|
@@ -160,3 +160,33 @@ def test_embed_documents_with_numerous_texts() -> None:
|
|
|
160
160
|
)
|
|
161
161
|
mock_embed.assert_called_with(request)
|
|
162
162
|
assert mock_embed.call_count == test_corpus_size / test_batch_size
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
def test_base_url_support() -> None:
|
|
166
|
+
"""Test that base_url is properly merged into client_options."""
|
|
167
|
+
base_url = "https://example.com"
|
|
168
|
+
param_api_key = "[secret]"
|
|
169
|
+
param_secret_api_key = SecretStr(param_api_key)
|
|
170
|
+
param_transport = "rest"
|
|
171
|
+
|
|
172
|
+
with patch(
|
|
173
|
+
"langchain_google_genai._genai_extension.v1betaGenerativeServiceClient"
|
|
174
|
+
) as mock_client:
|
|
175
|
+
_ = GoogleGenerativeAIEmbeddings(
|
|
176
|
+
model=f"models/{MODEL_NAME}",
|
|
177
|
+
google_api_key=param_secret_api_key,
|
|
178
|
+
base_url=base_url,
|
|
179
|
+
transport=param_transport,
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
mock_client.assert_called_once_with(
|
|
183
|
+
transport=param_transport,
|
|
184
|
+
client_options=ANY,
|
|
185
|
+
client_info=ANY,
|
|
186
|
+
)
|
|
187
|
+
call_client_options = mock_client.call_args_list[0].kwargs["client_options"]
|
|
188
|
+
assert call_client_options.api_key == param_api_key
|
|
189
|
+
assert call_client_options.api_endpoint == base_url
|
|
190
|
+
call_client_info = mock_client.call_args_list[0].kwargs["client_info"]
|
|
191
|
+
assert "langchain-google-genai" in call_client_info.user_agent
|
|
192
|
+
assert "GoogleGenerativeAIEmbeddings" in call_client_info.user_agent
|
{langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/unit_tests/test_llms.py
RENAMED
|
@@ -1,4 +1,12 @@
|
|
|
1
|
-
from unittest.mock import patch
|
|
1
|
+
from unittest.mock import ANY, Mock, patch
|
|
2
|
+
|
|
3
|
+
from google.ai.generativelanguage_v1beta.types import (
|
|
4
|
+
Candidate,
|
|
5
|
+
Content,
|
|
6
|
+
GenerateContentResponse,
|
|
7
|
+
Part,
|
|
8
|
+
)
|
|
9
|
+
from pydantic import SecretStr
|
|
2
10
|
|
|
3
11
|
from langchain_google_genai.llms import GoogleGenerativeAI
|
|
4
12
|
|
|
@@ -47,3 +55,40 @@ def test_tracing_params() -> None:
|
|
|
47
55
|
call_args = mock_warning.call_args[0][0]
|
|
48
56
|
assert "Unexpected argument 'safety_setting'" in call_args
|
|
49
57
|
assert "Did you mean: 'safety_settings'?" in call_args
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def test_base_url_support() -> None:
|
|
61
|
+
"""Test that base_url is properly passed through to ChatGoogleGenerativeAI."""
|
|
62
|
+
mock_client = Mock()
|
|
63
|
+
mock_generate_content = Mock()
|
|
64
|
+
mock_generate_content.return_value = GenerateContentResponse(
|
|
65
|
+
candidates=[Candidate(content=Content(parts=[Part(text="test response")]))]
|
|
66
|
+
)
|
|
67
|
+
mock_client.return_value.generate_content = mock_generate_content
|
|
68
|
+
base_url = "https://example.com"
|
|
69
|
+
param_api_key = "[secret]"
|
|
70
|
+
param_secret_api_key = SecretStr(param_api_key)
|
|
71
|
+
param_transport = "rest"
|
|
72
|
+
|
|
73
|
+
with patch(
|
|
74
|
+
"langchain_google_genai._genai_extension.v1betaGenerativeServiceClient",
|
|
75
|
+
mock_client,
|
|
76
|
+
):
|
|
77
|
+
llm = GoogleGenerativeAI(
|
|
78
|
+
model=MODEL_NAME,
|
|
79
|
+
google_api_key=param_secret_api_key,
|
|
80
|
+
base_url=base_url,
|
|
81
|
+
transport=param_transport,
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
response = llm.invoke("test")
|
|
85
|
+
assert response == "test response"
|
|
86
|
+
|
|
87
|
+
mock_client.assert_called_once_with(
|
|
88
|
+
transport=param_transport,
|
|
89
|
+
client_options=ANY,
|
|
90
|
+
client_info=ANY,
|
|
91
|
+
)
|
|
92
|
+
call_client_options = mock_client.call_args_list[0].kwargs["client_options"]
|
|
93
|
+
assert call_client_options.api_key == param_api_key
|
|
94
|
+
assert call_client_options.api_endpoint == base_url
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/langchain_google_genai/py.typed
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/integration_tests/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/unit_tests/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/unit_tests/test_common.py
RENAMED
|
File without changes
|
|
File without changes
|
{langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/unit_tests/test_genai_aqa.py
RENAMED
|
File without changes
|
|
File without changes
|
{langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.1}/tests/unit_tests/test_standard.py
RENAMED
|
File without changes
|