langchain-google-genai 2.1.1__tar.gz → 2.1.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-google-genai might be problematic. Click here for more details.
- {langchain_google_genai-2.1.1 → langchain_google_genai-2.1.3}/PKG-INFO +4 -4
- {langchain_google_genai-2.1.1 → langchain_google_genai-2.1.3}/README.md +2 -2
- {langchain_google_genai-2.1.1 → langchain_google_genai-2.1.3}/langchain_google_genai/_common.py +1 -1
- {langchain_google_genai-2.1.1 → langchain_google_genai-2.1.3}/langchain_google_genai/_function_utils.py +54 -20
- {langchain_google_genai-2.1.1 → langchain_google_genai-2.1.3}/langchain_google_genai/chat_models.py +286 -26
- {langchain_google_genai-2.1.1 → langchain_google_genai-2.1.3}/langchain_google_genai/embeddings.py +14 -8
- {langchain_google_genai-2.1.1 → langchain_google_genai-2.1.3}/langchain_google_genai/llms.py +26 -0
- {langchain_google_genai-2.1.1 → langchain_google_genai-2.1.3}/pyproject.toml +5 -5
- {langchain_google_genai-2.1.1 → langchain_google_genai-2.1.3}/LICENSE +0 -0
- {langchain_google_genai-2.1.1 → langchain_google_genai-2.1.3}/langchain_google_genai/__init__.py +0 -0
- {langchain_google_genai-2.1.1 → langchain_google_genai-2.1.3}/langchain_google_genai/_enums.py +0 -0
- {langchain_google_genai-2.1.1 → langchain_google_genai-2.1.3}/langchain_google_genai/_genai_extension.py +0 -0
- {langchain_google_genai-2.1.1 → langchain_google_genai-2.1.3}/langchain_google_genai/_image_utils.py +0 -0
- {langchain_google_genai-2.1.1 → langchain_google_genai-2.1.3}/langchain_google_genai/genai_aqa.py +0 -0
- {langchain_google_genai-2.1.1 → langchain_google_genai-2.1.3}/langchain_google_genai/google_vector_store.py +0 -0
- {langchain_google_genai-2.1.1 → langchain_google_genai-2.1.3}/langchain_google_genai/py.typed +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: langchain-google-genai
|
|
3
|
-
Version: 2.1.
|
|
3
|
+
Version: 2.1.3
|
|
4
4
|
Summary: An integration package connecting Google's genai package and LangChain
|
|
5
5
|
Home-page: https://github.com/langchain-ai/langchain-google
|
|
6
6
|
License: MIT
|
|
@@ -13,7 +13,7 @@ Classifier: Programming Language :: Python :: 3.11
|
|
|
13
13
|
Classifier: Programming Language :: Python :: 3.12
|
|
14
14
|
Requires-Dist: filetype (>=1.2.0,<2.0.0)
|
|
15
15
|
Requires-Dist: google-ai-generativelanguage (>=0.6.16,<0.7.0)
|
|
16
|
-
Requires-Dist: langchain-core (>=0.3.
|
|
16
|
+
Requires-Dist: langchain-core (>=0.3.52,<0.4.0)
|
|
17
17
|
Requires-Dist: pydantic (>=2,<3)
|
|
18
18
|
Project-URL: Repository, https://github.com/langchain-ai/langchain-google
|
|
19
19
|
Project-URL: Source Code, https://github.com/langchain-ai/langchain-google/tree/main/libs/genai
|
|
@@ -98,7 +98,7 @@ meow_str = response.content[1]
|
|
|
98
98
|
|
|
99
99
|
#### Multimodal Outputs in Chains
|
|
100
100
|
|
|
101
|
-
|
|
101
|
+
```
|
|
102
102
|
from langchain_core.runnables import RunnablePassthrough
|
|
103
103
|
from langchain_core.prompts import ChatPromptTemplate
|
|
104
104
|
|
|
@@ -114,7 +114,7 @@ prompt = ChatPromptTemplate(
|
|
|
114
114
|
)
|
|
115
115
|
chain = {"animal": RunnablePassthrough()} | prompt | llm
|
|
116
116
|
res = chain.invoke("cat")
|
|
117
|
-
|
|
117
|
+
```
|
|
118
118
|
|
|
119
119
|
## Embeddings
|
|
120
120
|
|
|
@@ -77,7 +77,7 @@ meow_str = response.content[1]
|
|
|
77
77
|
|
|
78
78
|
#### Multimodal Outputs in Chains
|
|
79
79
|
|
|
80
|
-
|
|
80
|
+
```
|
|
81
81
|
from langchain_core.runnables import RunnablePassthrough
|
|
82
82
|
from langchain_core.prompts import ChatPromptTemplate
|
|
83
83
|
|
|
@@ -93,7 +93,7 @@ prompt = ChatPromptTemplate(
|
|
|
93
93
|
)
|
|
94
94
|
chain = {"animal": RunnablePassthrough()} | prompt | llm
|
|
95
95
|
res = chain.invoke("cat")
|
|
96
|
-
|
|
96
|
+
```
|
|
97
97
|
|
|
98
98
|
## Embeddings
|
|
99
99
|
|
{langchain_google_genai-2.1.1 → langchain_google_genai-2.1.3}/langchain_google_genai/_common.py
RENAMED
|
@@ -36,7 +36,7 @@ Supported examples:
|
|
|
36
36
|
"the GOOGLE_API_KEY envvar"
|
|
37
37
|
temperature: float = 0.7
|
|
38
38
|
"""Run inference with this temperature. Must by in the closed interval
|
|
39
|
-
[0.0,
|
|
39
|
+
[0.0, 2.0]."""
|
|
40
40
|
top_p: Optional[float] = None
|
|
41
41
|
"""Decode using nucleus sampling: consider the smallest set of tokens whose
|
|
42
42
|
probability sum is at least top_p. Must be in the closed interval [0.0, 1.0]."""
|
|
@@ -61,18 +61,22 @@ _ALLOWED_SCHEMA_FIELDS_SET = set(_ALLOWED_SCHEMA_FIELDS)
|
|
|
61
61
|
_FunctionDeclarationLike = Union[
|
|
62
62
|
BaseTool, Type[BaseModel], gapic.FunctionDeclaration, Callable, Dict[str, Any]
|
|
63
63
|
]
|
|
64
|
+
_GoogleSearchRetrievalLike = Union[
|
|
65
|
+
gapic.GoogleSearchRetrieval,
|
|
66
|
+
Dict[str, Any],
|
|
67
|
+
]
|
|
64
68
|
|
|
65
69
|
|
|
66
70
|
class _ToolDict(TypedDict):
|
|
67
71
|
function_declarations: Sequence[_FunctionDeclarationLike]
|
|
72
|
+
google_search_retrieval: Optional[_GoogleSearchRetrievalLike]
|
|
68
73
|
|
|
69
74
|
|
|
70
75
|
# Info: This means one tool=Sequence of FunctionDeclaration
|
|
71
76
|
# The dict should be gapic.Tool like. {"function_declarations": [ { "name": ...}.
|
|
72
77
|
# OpenAI like dict is not be accepted. {{'type': 'function', 'function': {'name': ...}
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
]
|
|
78
|
+
_ToolType = Union[gapic.Tool, _ToolDict, _FunctionDeclarationLike]
|
|
79
|
+
_ToolsType = Sequence[_ToolType]
|
|
76
80
|
|
|
77
81
|
|
|
78
82
|
def _format_json_schema_to_gapic(schema: Dict[str, Any]) -> Dict[str, Any]:
|
|
@@ -122,7 +126,7 @@ def _format_dict_to_function_declaration(
|
|
|
122
126
|
|
|
123
127
|
# Info: gapic.Tool means function_declarations and proto.Message.
|
|
124
128
|
def convert_to_genai_function_declarations(
|
|
125
|
-
tools:
|
|
129
|
+
tools: _ToolsType,
|
|
126
130
|
) -> gapic.Tool:
|
|
127
131
|
if not isinstance(tools, collections.abc.Sequence):
|
|
128
132
|
logger.warning(
|
|
@@ -132,24 +136,54 @@ def convert_to_genai_function_declarations(
|
|
|
132
136
|
tools = [tools]
|
|
133
137
|
gapic_tool = gapic.Tool()
|
|
134
138
|
for tool in tools:
|
|
135
|
-
if
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
139
|
+
if any(f in gapic_tool for f in ["google_search_retrieval"]):
|
|
140
|
+
raise ValueError(
|
|
141
|
+
"Providing multiple google_search_retrieval"
|
|
142
|
+
" or mixing with function_declarations is not supported"
|
|
143
|
+
)
|
|
144
|
+
if isinstance(tool, (gapic.Tool)):
|
|
145
|
+
rt: gapic.Tool = (
|
|
146
|
+
tool if isinstance(tool, gapic.Tool) else tool._raw_tool # type: ignore
|
|
147
|
+
)
|
|
148
|
+
if "google_search_retrieval" in rt:
|
|
149
|
+
gapic_tool.google_search_retrieval = rt.google_search_retrieval
|
|
150
|
+
if "function_declarations" in rt:
|
|
151
|
+
gapic_tool.function_declarations.extend(rt.function_declarations)
|
|
152
|
+
if "google_search" in rt:
|
|
153
|
+
gapic_tool.google_search = rt.google_search
|
|
140
154
|
elif isinstance(tool, dict):
|
|
141
|
-
|
|
142
|
-
if not
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
if function_declarations:
|
|
148
|
-
fds = [
|
|
149
|
-
_format_to_gapic_function_declaration(fd)
|
|
150
|
-
for fd in function_declarations
|
|
155
|
+
# not _ToolDictLike
|
|
156
|
+
if not any(
|
|
157
|
+
f in tool
|
|
158
|
+
for f in [
|
|
159
|
+
"function_declarations",
|
|
160
|
+
"google_search_retrieval",
|
|
151
161
|
]
|
|
152
|
-
|
|
162
|
+
):
|
|
163
|
+
fd = _format_to_gapic_function_declaration(tool) # type: ignore[arg-type]
|
|
164
|
+
gapic_tool.function_declarations.append(fd)
|
|
165
|
+
continue
|
|
166
|
+
# _ToolDictLike
|
|
167
|
+
tool = cast(_ToolDict, tool)
|
|
168
|
+
if "function_declarations" in tool:
|
|
169
|
+
function_declarations = tool["function_declarations"]
|
|
170
|
+
if not isinstance(
|
|
171
|
+
tool["function_declarations"], collections.abc.Sequence
|
|
172
|
+
):
|
|
173
|
+
raise ValueError(
|
|
174
|
+
"function_declarations should be a list"
|
|
175
|
+
f"got '{type(function_declarations)}'"
|
|
176
|
+
)
|
|
177
|
+
if function_declarations:
|
|
178
|
+
fds = [
|
|
179
|
+
_format_to_gapic_function_declaration(fd)
|
|
180
|
+
for fd in function_declarations
|
|
181
|
+
]
|
|
182
|
+
gapic_tool.function_declarations.extend(fds)
|
|
183
|
+
if "google_search_retrieval" in tool:
|
|
184
|
+
gapic_tool.google_search_retrieval = gapic.GoogleSearchRetrieval(
|
|
185
|
+
tool["google_search_retrieval"]
|
|
186
|
+
)
|
|
153
187
|
else:
|
|
154
188
|
fd = _format_to_gapic_function_declaration(tool) # type: ignore[arg-type]
|
|
155
189
|
gapic_tool.function_declarations.append(fd)
|
{langchain_google_genai-2.1.1 → langchain_google_genai-2.1.3}/langchain_google_genai/chat_models.py
RENAMED
|
@@ -1,10 +1,13 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import asyncio
|
|
4
|
+
import base64
|
|
4
5
|
import json
|
|
5
6
|
import logging
|
|
7
|
+
import mimetypes
|
|
6
8
|
import uuid
|
|
7
9
|
import warnings
|
|
10
|
+
from difflib import get_close_matches
|
|
8
11
|
from operator import itemgetter
|
|
9
12
|
from typing import (
|
|
10
13
|
Any,
|
|
@@ -22,6 +25,7 @@ from typing import (
|
|
|
22
25
|
cast,
|
|
23
26
|
)
|
|
24
27
|
|
|
28
|
+
import filetype # type: ignore[import]
|
|
25
29
|
import google.api_core
|
|
26
30
|
|
|
27
31
|
# TODO: remove ignore once the google package is published with types
|
|
@@ -32,6 +36,7 @@ from google.ai.generativelanguage_v1beta import (
|
|
|
32
36
|
from google.ai.generativelanguage_v1beta.types import (
|
|
33
37
|
Blob,
|
|
34
38
|
Candidate,
|
|
39
|
+
CodeExecution,
|
|
35
40
|
Content,
|
|
36
41
|
FileData,
|
|
37
42
|
FunctionCall,
|
|
@@ -45,9 +50,7 @@ from google.ai.generativelanguage_v1beta.types import (
|
|
|
45
50
|
ToolConfig,
|
|
46
51
|
VideoMetadata,
|
|
47
52
|
)
|
|
48
|
-
from google.ai.generativelanguage_v1beta.types import
|
|
49
|
-
Tool as GoogleTool,
|
|
50
|
-
)
|
|
53
|
+
from google.ai.generativelanguage_v1beta.types import Tool as GoogleTool
|
|
51
54
|
from langchain_core.callbacks.manager import (
|
|
52
55
|
AsyncCallbackManagerForLLMRun,
|
|
53
56
|
CallbackManagerForLLMRun,
|
|
@@ -62,6 +65,7 @@ from langchain_core.messages import (
|
|
|
62
65
|
HumanMessage,
|
|
63
66
|
SystemMessage,
|
|
64
67
|
ToolMessage,
|
|
68
|
+
is_data_content_block,
|
|
65
69
|
)
|
|
66
70
|
from langchain_core.messages.ai import UsageMetadata
|
|
67
71
|
from langchain_core.messages.tool import invalid_tool_call, tool_call, tool_call_chunk
|
|
@@ -72,7 +76,7 @@ from langchain_core.output_parsers.openai_tools import (
|
|
|
72
76
|
parse_tool_calls,
|
|
73
77
|
)
|
|
74
78
|
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
|
|
75
|
-
from langchain_core.runnables import Runnable, RunnablePassthrough
|
|
79
|
+
from langchain_core.runnables import Runnable, RunnableConfig, RunnablePassthrough
|
|
76
80
|
from langchain_core.tools import BaseTool
|
|
77
81
|
from langchain_core.utils.function_calling import convert_to_openai_tool
|
|
78
82
|
from pydantic import (
|
|
@@ -113,9 +117,6 @@ from langchain_google_genai._image_utils import (
|
|
|
113
117
|
|
|
114
118
|
from . import _genai_extension as genaix
|
|
115
119
|
|
|
116
|
-
WARNED_STRUCTURED_OUTPUT_JSON_MODE = False
|
|
117
|
-
|
|
118
|
-
|
|
119
120
|
logger = logging.getLogger(__name__)
|
|
120
121
|
|
|
121
122
|
|
|
@@ -240,7 +241,7 @@ async def _achat_with_retry(generation_method: Callable, **kwargs: Any) -> Any:
|
|
|
240
241
|
return await _achat_with_retry(**kwargs)
|
|
241
242
|
|
|
242
243
|
|
|
243
|
-
def
|
|
244
|
+
def _is_lc_content_block(part: dict) -> bool:
|
|
244
245
|
return "type" in part
|
|
245
246
|
|
|
246
247
|
|
|
@@ -255,10 +256,29 @@ def _convert_to_parts(
|
|
|
255
256
|
if isinstance(part, str):
|
|
256
257
|
parts.append(Part(text=part))
|
|
257
258
|
elif isinstance(part, Mapping):
|
|
258
|
-
|
|
259
|
-
if _is_openai_parts_format(part):
|
|
259
|
+
if _is_lc_content_block(part):
|
|
260
260
|
if part["type"] == "text":
|
|
261
261
|
parts.append(Part(text=part["text"]))
|
|
262
|
+
elif is_data_content_block(part):
|
|
263
|
+
if part["source_type"] == "url":
|
|
264
|
+
bytes_ = image_loader._bytes_from_url(part["url"])
|
|
265
|
+
elif part["source_type"] == "base64":
|
|
266
|
+
bytes_ = base64.b64decode(part["data"])
|
|
267
|
+
else:
|
|
268
|
+
raise ValueError("source_type must be url or base64.")
|
|
269
|
+
inline_data: dict = {"data": bytes_}
|
|
270
|
+
if "mime_type" in part:
|
|
271
|
+
inline_data["mime_type"] = part["mime_type"]
|
|
272
|
+
else:
|
|
273
|
+
source = cast(str, part.get("url") or part.get("data"))
|
|
274
|
+
mime_type, _ = mimetypes.guess_type(source)
|
|
275
|
+
if not mime_type:
|
|
276
|
+
kind = filetype.guess(bytes_)
|
|
277
|
+
if kind:
|
|
278
|
+
mime_type = kind.mime
|
|
279
|
+
if mime_type:
|
|
280
|
+
inline_data["mime_type"] = mime_type
|
|
281
|
+
parts.append(Part(inline_data=inline_data))
|
|
262
282
|
elif part["type"] == "image_url":
|
|
263
283
|
img_url = part["image_url"]
|
|
264
284
|
if isinstance(img_url, dict):
|
|
@@ -458,6 +478,41 @@ def _parse_response_candidate(
|
|
|
458
478
|
elif text:
|
|
459
479
|
raise Exception("Unexpected content type")
|
|
460
480
|
|
|
481
|
+
if hasattr(part, "executable_code") and part.executable_code is not None:
|
|
482
|
+
if part.executable_code.code and part.executable_code.language:
|
|
483
|
+
code_message = {
|
|
484
|
+
"type": "executable_code",
|
|
485
|
+
"executable_code": part.executable_code.code,
|
|
486
|
+
"language": part.executable_code.language,
|
|
487
|
+
}
|
|
488
|
+
if not content:
|
|
489
|
+
content = [code_message]
|
|
490
|
+
elif isinstance(content, str):
|
|
491
|
+
content = [content, code_message]
|
|
492
|
+
elif isinstance(content, list):
|
|
493
|
+
content.append(code_message)
|
|
494
|
+
else:
|
|
495
|
+
raise Exception("Unexpected content type")
|
|
496
|
+
|
|
497
|
+
if (
|
|
498
|
+
hasattr(part, "code_execution_result")
|
|
499
|
+
and part.code_execution_result is not None
|
|
500
|
+
):
|
|
501
|
+
if part.code_execution_result.output:
|
|
502
|
+
execution_result = {
|
|
503
|
+
"type": "code_execution_result",
|
|
504
|
+
"code_execution_result": part.code_execution_result.output,
|
|
505
|
+
}
|
|
506
|
+
|
|
507
|
+
if not content:
|
|
508
|
+
content = [execution_result]
|
|
509
|
+
elif isinstance(content, str):
|
|
510
|
+
content = [content, execution_result]
|
|
511
|
+
elif isinstance(content, list):
|
|
512
|
+
content.append(execution_result)
|
|
513
|
+
else:
|
|
514
|
+
raise Exception("Unexpected content type")
|
|
515
|
+
|
|
461
516
|
if part.inline_data.mime_type.startswith("image/"):
|
|
462
517
|
image_format = part.inline_data.mime_type[6:]
|
|
463
518
|
message = {
|
|
@@ -521,6 +576,16 @@ def _parse_response_candidate(
|
|
|
521
576
|
)
|
|
522
577
|
if content is None:
|
|
523
578
|
content = ""
|
|
579
|
+
if any(isinstance(item, dict) and "executable_code" in item for item in content):
|
|
580
|
+
warnings.warn(
|
|
581
|
+
"""
|
|
582
|
+
⚠️ Warning: Output may vary each run.
|
|
583
|
+
- 'executable_code': Always present.
|
|
584
|
+
- 'execution_result' & 'image_url': May be absent for some queries.
|
|
585
|
+
|
|
586
|
+
Validate before using in production.
|
|
587
|
+
"""
|
|
588
|
+
)
|
|
524
589
|
|
|
525
590
|
if streaming:
|
|
526
591
|
return AIMessageChunk(
|
|
@@ -575,6 +640,8 @@ def _response_to_result(
|
|
|
575
640
|
generation_info = {}
|
|
576
641
|
if candidate.finish_reason:
|
|
577
642
|
generation_info["finish_reason"] = candidate.finish_reason.name
|
|
643
|
+
# Add model_name in last chunk
|
|
644
|
+
generation_info["model_name"] = response.model_version
|
|
578
645
|
generation_info["safety_ratings"] = [
|
|
579
646
|
proto.Message.to_dict(safety_rating, use_integers_for_enums=False)
|
|
580
647
|
for safety_rating in candidate.safety_ratings
|
|
@@ -663,7 +730,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
663
730
|
.. code-block:: python
|
|
664
731
|
|
|
665
732
|
AIMessageChunk(content='J', response_metadata={'finish_reason': 'STOP', 'safety_ratings': []}, id='run-e905f4f4-58cb-4a10-a960-448a2bb649e3', usage_metadata={'input_tokens': 18, 'output_tokens': 1, 'total_tokens': 19})
|
|
666
|
-
AIMessageChunk(content="'adore programmer.
|
|
733
|
+
AIMessageChunk(content="'adore programmer. \\n", response_metadata={'finish_reason': 'STOP', 'safety_ratings': [{'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability': 'NEGLIGIBLE', 'blocked': False}]}, id='run-e905f4f4-58cb-4a10-a960-448a2bb649e3', usage_metadata={'input_tokens': 18, 'output_tokens': 5, 'total_tokens': 23})
|
|
667
734
|
|
|
668
735
|
.. code-block:: python
|
|
669
736
|
|
|
@@ -693,6 +760,109 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
693
760
|
# batch:
|
|
694
761
|
# await llm.abatch([messages])
|
|
695
762
|
|
|
763
|
+
Context Caching:
|
|
764
|
+
Context caching allows you to store and reuse content (e.g., PDFs, images) for faster processing.
|
|
765
|
+
The `cached_content` parameter accepts a cache name created via the Google Generative AI API.
|
|
766
|
+
Below are two examples: caching a single file directly and caching multiple files using `Part`.
|
|
767
|
+
|
|
768
|
+
Single File Example:
|
|
769
|
+
This caches a single file and queries it.
|
|
770
|
+
|
|
771
|
+
.. code-block:: python
|
|
772
|
+
|
|
773
|
+
from google import genai
|
|
774
|
+
from google.genai import types
|
|
775
|
+
import time
|
|
776
|
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
777
|
+
from langchain_core.messages import HumanMessage
|
|
778
|
+
|
|
779
|
+
client = genai.Client()
|
|
780
|
+
|
|
781
|
+
# Upload file
|
|
782
|
+
file = client.files.upload(file="./example_file")
|
|
783
|
+
while file.state.name == 'PROCESSING':
|
|
784
|
+
time.sleep(2)
|
|
785
|
+
file = client.files.get(name=file.name)
|
|
786
|
+
|
|
787
|
+
# Create cache
|
|
788
|
+
model = 'models/gemini-1.5-flash-001'
|
|
789
|
+
cache = client.caches.create(
|
|
790
|
+
model=model,
|
|
791
|
+
config=types.CreateCachedContentConfig(
|
|
792
|
+
display_name='Cached Content',
|
|
793
|
+
system_instruction=(
|
|
794
|
+
'You are an expert content analyzer, and your job is to answer '
|
|
795
|
+
'the user\'s query based on the file you have access to.'
|
|
796
|
+
),
|
|
797
|
+
contents=[file],
|
|
798
|
+
ttl="300s",
|
|
799
|
+
)
|
|
800
|
+
)
|
|
801
|
+
|
|
802
|
+
# Query with LangChain
|
|
803
|
+
llm = ChatGoogleGenerativeAI(
|
|
804
|
+
model=model,
|
|
805
|
+
cached_content=cache.name,
|
|
806
|
+
)
|
|
807
|
+
message = HumanMessage(content="Summarize the main points of the content.")
|
|
808
|
+
llm.invoke([message])
|
|
809
|
+
|
|
810
|
+
Multiple Files Example:
|
|
811
|
+
This caches two files using `Part` and queries them together.
|
|
812
|
+
|
|
813
|
+
.. code-block:: python
|
|
814
|
+
|
|
815
|
+
from google import genai
|
|
816
|
+
from google.genai.types import CreateCachedContentConfig, Content, Part
|
|
817
|
+
import time
|
|
818
|
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
819
|
+
from langchain_core.messages import HumanMessage
|
|
820
|
+
|
|
821
|
+
client = genai.Client()
|
|
822
|
+
|
|
823
|
+
# Upload files
|
|
824
|
+
file_1 = client.files.upload(file="./file1")
|
|
825
|
+
while file_1.state.name == 'PROCESSING':
|
|
826
|
+
time.sleep(2)
|
|
827
|
+
file_1 = client.files.get(name=file_1.name)
|
|
828
|
+
|
|
829
|
+
file_2 = client.files.upload(file="./file2")
|
|
830
|
+
while file_2.state.name == 'PROCESSING':
|
|
831
|
+
time.sleep(2)
|
|
832
|
+
file_2 = client.files.get(name=file_2.name)
|
|
833
|
+
|
|
834
|
+
# Create cache with multiple files
|
|
835
|
+
contents = [
|
|
836
|
+
Content(
|
|
837
|
+
role="user",
|
|
838
|
+
parts=[
|
|
839
|
+
Part.from_uri(file_uri=file_1.uri, mime_type=file_1.mime_type),
|
|
840
|
+
Part.from_uri(file_uri=file_2.uri, mime_type=file_2.mime_type),
|
|
841
|
+
],
|
|
842
|
+
)
|
|
843
|
+
]
|
|
844
|
+
model = "gemini-1.5-flash-001"
|
|
845
|
+
cache = client.caches.create(
|
|
846
|
+
model=model,
|
|
847
|
+
config=CreateCachedContentConfig(
|
|
848
|
+
display_name='Cached Contents',
|
|
849
|
+
system_instruction=(
|
|
850
|
+
'You are an expert content analyzer, and your job is to answer '
|
|
851
|
+
'the user\'s query based on the files you have access to.'
|
|
852
|
+
),
|
|
853
|
+
contents=contents,
|
|
854
|
+
ttl="300s",
|
|
855
|
+
)
|
|
856
|
+
)
|
|
857
|
+
|
|
858
|
+
# Query with LangChain
|
|
859
|
+
llm = ChatGoogleGenerativeAI(
|
|
860
|
+
model=model,
|
|
861
|
+
cached_content=cache.name,
|
|
862
|
+
)
|
|
863
|
+
message = HumanMessage(content="Provide a summary of the key information across both files.")
|
|
864
|
+
llm.invoke([message])
|
|
865
|
+
|
|
696
866
|
Tool calling:
|
|
697
867
|
.. code-block:: python
|
|
698
868
|
|
|
@@ -736,6 +906,16 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
736
906
|
'args': {'location': 'New York City, NY'},
|
|
737
907
|
'id': '634582de-5186-4e4b-968b-f192f0a93678'}]
|
|
738
908
|
|
|
909
|
+
Use Search with Gemini 2:
|
|
910
|
+
.. code-block:: python
|
|
911
|
+
|
|
912
|
+
from google.ai.generativelanguage_v1beta.types import Tool as GenAITool
|
|
913
|
+
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash-exp")
|
|
914
|
+
resp = llm.invoke(
|
|
915
|
+
"When is the next total solar eclipse in US?",
|
|
916
|
+
tools=[GenAITool(google_search={})],
|
|
917
|
+
)
|
|
918
|
+
|
|
739
919
|
Structured output:
|
|
740
920
|
.. code-block:: python
|
|
741
921
|
|
|
@@ -786,7 +966,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
786
966
|
|
|
787
967
|
.. code-block:: python
|
|
788
968
|
|
|
789
|
-
'The weather in this image appears to be sunny and pleasant. The sky is a bright blue with scattered white clouds, suggesting fair weather. The lush green grass and trees indicate a warm and possibly slightly breezy day. There are no signs of rain or storms.
|
|
969
|
+
'The weather in this image appears to be sunny and pleasant. The sky is a bright blue with scattered white clouds, suggesting fair weather. The lush green grass and trees indicate a warm and possibly slightly breezy day. There are no signs of rain or storms.'
|
|
790
970
|
|
|
791
971
|
Token usage:
|
|
792
972
|
.. code-block:: python
|
|
@@ -835,6 +1015,28 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
835
1015
|
``cachedContents/{cachedContent}``.
|
|
836
1016
|
"""
|
|
837
1017
|
|
|
1018
|
+
def __init__(self, **kwargs: Any) -> None:
|
|
1019
|
+
"""Needed for arg validation."""
|
|
1020
|
+
# Get all valid field names, including aliases
|
|
1021
|
+
valid_fields = set()
|
|
1022
|
+
for field_name, field_info in self.model_fields.items():
|
|
1023
|
+
valid_fields.add(field_name)
|
|
1024
|
+
if hasattr(field_info, "alias") and field_info.alias is not None:
|
|
1025
|
+
valid_fields.add(field_info.alias)
|
|
1026
|
+
|
|
1027
|
+
# Check for unrecognized arguments
|
|
1028
|
+
for arg in kwargs:
|
|
1029
|
+
if arg not in valid_fields:
|
|
1030
|
+
suggestions = get_close_matches(arg, valid_fields, n=1)
|
|
1031
|
+
suggestion = (
|
|
1032
|
+
f" Did you mean: '{suggestions[0]}'?" if suggestions else ""
|
|
1033
|
+
)
|
|
1034
|
+
logger.warning(
|
|
1035
|
+
f"Unexpected argument '{arg}' "
|
|
1036
|
+
f"provided to ChatGoogleGenerativeAI.{suggestion}"
|
|
1037
|
+
)
|
|
1038
|
+
super().__init__(**kwargs)
|
|
1039
|
+
|
|
838
1040
|
model_config = ConfigDict(
|
|
839
1041
|
populate_by_name=True,
|
|
840
1042
|
)
|
|
@@ -847,6 +1049,14 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
847
1049
|
def _llm_type(self) -> str:
|
|
848
1050
|
return "chat-google-generative-ai"
|
|
849
1051
|
|
|
1052
|
+
@property
|
|
1053
|
+
def _supports_code_execution(self) -> bool:
|
|
1054
|
+
return (
|
|
1055
|
+
"gemini-1.5-pro" in self.model
|
|
1056
|
+
or "gemini-1.5-flash" in self.model
|
|
1057
|
+
or "gemini-2" in self.model
|
|
1058
|
+
)
|
|
1059
|
+
|
|
850
1060
|
@classmethod
|
|
851
1061
|
def is_lc_serializable(self) -> bool:
|
|
852
1062
|
return True
|
|
@@ -863,7 +1073,9 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
863
1073
|
if self.top_k is not None and self.top_k <= 0:
|
|
864
1074
|
raise ValueError("top_k must be positive")
|
|
865
1075
|
|
|
866
|
-
if not
|
|
1076
|
+
if not any(
|
|
1077
|
+
self.model.startswith(prefix) for prefix in ("models/", "tunedModels/")
|
|
1078
|
+
):
|
|
867
1079
|
self.model = f"models/{self.model}"
|
|
868
1080
|
|
|
869
1081
|
additional_headers = self.additional_headers or {}
|
|
@@ -900,12 +1112,17 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
900
1112
|
# this check ensures that async client is only initialized
|
|
901
1113
|
# within an asyncio event loop to avoid the error
|
|
902
1114
|
if not self.async_client_running and _is_event_loop_running():
|
|
1115
|
+
# async clients don't support "rest" transport
|
|
1116
|
+
# https://github.com/googleapis/gapic-generator-python/issues/1962
|
|
1117
|
+
transport = self.transport
|
|
1118
|
+
if transport == "rest":
|
|
1119
|
+
transport = "grpc_asyncio"
|
|
903
1120
|
self.async_client_running = genaix.build_generative_async_service(
|
|
904
1121
|
credentials=self.credentials,
|
|
905
1122
|
api_key=google_api_key,
|
|
906
1123
|
client_info=get_client_info("ChatGoogleGenerativeAI"),
|
|
907
1124
|
client_options=self.client_options,
|
|
908
|
-
transport=
|
|
1125
|
+
transport=transport,
|
|
909
1126
|
)
|
|
910
1127
|
return self.async_client_running
|
|
911
1128
|
|
|
@@ -921,6 +1138,42 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
921
1138
|
"response_modalities": self.response_modalities,
|
|
922
1139
|
}
|
|
923
1140
|
|
|
1141
|
+
def invoke(
|
|
1142
|
+
self,
|
|
1143
|
+
input: LanguageModelInput,
|
|
1144
|
+
config: Optional[RunnableConfig] = None,
|
|
1145
|
+
*,
|
|
1146
|
+
code_execution: Optional[bool] = None,
|
|
1147
|
+
stop: Optional[list[str]] = None,
|
|
1148
|
+
**kwargs: Any,
|
|
1149
|
+
) -> BaseMessage:
|
|
1150
|
+
"""
|
|
1151
|
+
Enable code execution. Supported on: gemini-1.5-pro, gemini-1.5-flash,
|
|
1152
|
+
gemini-2.0-flash, and gemini-2.0-pro. When enabled, the model can execute
|
|
1153
|
+
code to solve problems.
|
|
1154
|
+
"""
|
|
1155
|
+
|
|
1156
|
+
"""Override invoke to add code_execution parameter."""
|
|
1157
|
+
|
|
1158
|
+
if code_execution is not None:
|
|
1159
|
+
if not self._supports_code_execution:
|
|
1160
|
+
raise ValueError(
|
|
1161
|
+
f"Code execution is only supported on Gemini 1.5 Pro, \
|
|
1162
|
+
Gemini 1.5 Flash, "
|
|
1163
|
+
f"Gemini 2.0 Flash, and Gemini 2.0 Pro models. \
|
|
1164
|
+
Current model: {self.model}"
|
|
1165
|
+
)
|
|
1166
|
+
if "tools" not in kwargs:
|
|
1167
|
+
code_execution_tool = GoogleTool(code_execution=CodeExecution())
|
|
1168
|
+
kwargs["tools"] = [code_execution_tool]
|
|
1169
|
+
|
|
1170
|
+
else:
|
|
1171
|
+
raise ValueError(
|
|
1172
|
+
"Tools are already defined." "code_execution tool can't be defined"
|
|
1173
|
+
)
|
|
1174
|
+
|
|
1175
|
+
return super().invoke(input, config, stop=stop, **kwargs)
|
|
1176
|
+
|
|
924
1177
|
def _get_ls_params(
|
|
925
1178
|
self, stop: Optional[List[str]] = None, **kwargs: Any
|
|
926
1179
|
) -> LangSmithParams:
|
|
@@ -1199,8 +1452,12 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
1199
1452
|
"Must specify at most one of tool_choice and tool_config, received "
|
|
1200
1453
|
f"both:\n\n{tool_choice=}\n\n{tool_config=}"
|
|
1201
1454
|
)
|
|
1455
|
+
|
|
1202
1456
|
formatted_tools = None
|
|
1203
|
-
|
|
1457
|
+
code_execution_tool = GoogleTool(code_execution=CodeExecution())
|
|
1458
|
+
if tools == [code_execution_tool]:
|
|
1459
|
+
formatted_tools = tools
|
|
1460
|
+
elif tools:
|
|
1204
1461
|
formatted_tools = [convert_to_genai_function_declarations(tools)]
|
|
1205
1462
|
elif functions:
|
|
1206
1463
|
formatted_tools = [convert_to_genai_function_declarations(functions)]
|
|
@@ -1226,9 +1483,20 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
1226
1483
|
f"be specified if 'tools' is specified."
|
|
1227
1484
|
)
|
|
1228
1485
|
raise ValueError(msg)
|
|
1229
|
-
all_names = [
|
|
1230
|
-
|
|
1231
|
-
|
|
1486
|
+
all_names: List[str] = []
|
|
1487
|
+
for t in formatted_tools:
|
|
1488
|
+
if hasattr(t, "function_declarations"):
|
|
1489
|
+
t_with_declarations = cast(Any, t)
|
|
1490
|
+
all_names.extend(
|
|
1491
|
+
f.name for f in t_with_declarations.function_declarations
|
|
1492
|
+
)
|
|
1493
|
+
elif isinstance(t, GoogleTool) and hasattr(t, "code_execution"):
|
|
1494
|
+
continue
|
|
1495
|
+
else:
|
|
1496
|
+
raise TypeError(
|
|
1497
|
+
f"Tool {t} doesn't have function_declarations attribute"
|
|
1498
|
+
)
|
|
1499
|
+
|
|
1232
1500
|
tool_config = _tool_choice_to_tool_config(tool_choice, all_names)
|
|
1233
1501
|
|
|
1234
1502
|
formatted_tool_config = None
|
|
@@ -1291,14 +1559,6 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
1291
1559
|
tools=[schema], first_tool_only=True
|
|
1292
1560
|
)
|
|
1293
1561
|
else:
|
|
1294
|
-
global WARNED_STRUCTURED_OUTPUT_JSON_MODE
|
|
1295
|
-
warnings.warn(
|
|
1296
|
-
"ChatGoogleGenerativeAI.with_structured_output with dict schema has "
|
|
1297
|
-
"changed recently to align with behavior of other LangChain chat "
|
|
1298
|
-
"models. More context: "
|
|
1299
|
-
"https://github.com/langchain-ai/langchain-google/pull/772"
|
|
1300
|
-
)
|
|
1301
|
-
WARNED_STRUCTURED_OUTPUT_JSON_MODE = True
|
|
1302
1562
|
parser = JsonOutputKeyToolsParser(key_name=tool_name, first_tool_only=True)
|
|
1303
1563
|
tool_choice = tool_name if self._supports_tool_choice else None
|
|
1304
1564
|
try:
|
{langchain_google_genai-2.1.1 → langchain_google_genai-2.1.3}/langchain_google_genai/embeddings.py
RENAMED
|
@@ -6,6 +6,7 @@ from typing import Any, Dict, List, Optional
|
|
|
6
6
|
from google.ai.generativelanguage_v1beta.types import (
|
|
7
7
|
BatchEmbedContentsRequest,
|
|
8
8
|
EmbedContentRequest,
|
|
9
|
+
EmbedContentResponse,
|
|
9
10
|
)
|
|
10
11
|
from langchain_core.embeddings import Embeddings
|
|
11
12
|
from langchain_core.utils import secret_from_env
|
|
@@ -239,7 +240,8 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
|
|
|
239
240
|
title: Optional[str] = None,
|
|
240
241
|
output_dimensionality: Optional[int] = None,
|
|
241
242
|
) -> List[float]:
|
|
242
|
-
"""Embed a text
|
|
243
|
+
"""Embed a text, using the non-batch endpoint:
|
|
244
|
+
https://ai.google.dev/api/rest/v1/models/embedContent#EmbedContentRequest
|
|
243
245
|
|
|
244
246
|
Args:
|
|
245
247
|
text: The text to embed.
|
|
@@ -247,15 +249,19 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
|
|
|
247
249
|
title: An optional title for the text.
|
|
248
250
|
Only applicable when TaskType is RETRIEVAL_DOCUMENT.
|
|
249
251
|
output_dimensionality: Optional reduced dimension for the output embedding.
|
|
250
|
-
https://ai.google.dev/api/rest/v1/models/batchEmbedContents#EmbedContentRequest
|
|
251
252
|
|
|
252
253
|
Returns:
|
|
253
254
|
Embedding for the text.
|
|
254
255
|
"""
|
|
255
256
|
task_type = self.task_type or "RETRIEVAL_QUERY"
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
257
|
+
try:
|
|
258
|
+
request: EmbedContentRequest = self._prepare_request(
|
|
259
|
+
text=text,
|
|
260
|
+
task_type=task_type,
|
|
261
|
+
title=title,
|
|
262
|
+
output_dimensionality=output_dimensionality,
|
|
263
|
+
)
|
|
264
|
+
result: EmbedContentResponse = self.client.embed_content(request)
|
|
265
|
+
except Exception as e:
|
|
266
|
+
raise GoogleGenerativeAIError(f"Error embedding content: {e}") from e
|
|
267
|
+
return list(result.embedding.values)
|
{langchain_google_genai-2.1.1 → langchain_google_genai-2.1.3}/langchain_google_genai/llms.py
RENAMED
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
import logging
|
|
4
|
+
from difflib import get_close_matches
|
|
3
5
|
from typing import Any, Iterator, List, Optional
|
|
4
6
|
|
|
5
7
|
from langchain_core.callbacks import (
|
|
@@ -17,6 +19,8 @@ from langchain_google_genai._common import (
|
|
|
17
19
|
)
|
|
18
20
|
from langchain_google_genai.chat_models import ChatGoogleGenerativeAI
|
|
19
21
|
|
|
22
|
+
logger = logging.getLogger(__name__)
|
|
23
|
+
|
|
20
24
|
|
|
21
25
|
class GoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseLLM):
|
|
22
26
|
"""Google GenerativeAI models.
|
|
@@ -33,6 +37,28 @@ class GoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseLLM):
|
|
|
33
37
|
populate_by_name=True,
|
|
34
38
|
)
|
|
35
39
|
|
|
40
|
+
def __init__(self, **kwargs: Any) -> None:
|
|
41
|
+
"""Needed for arg validation."""
|
|
42
|
+
# Get all valid field names, including aliases
|
|
43
|
+
valid_fields = set()
|
|
44
|
+
for field_name, field_info in self.model_fields.items():
|
|
45
|
+
valid_fields.add(field_name)
|
|
46
|
+
if hasattr(field_info, "alias") and field_info.alias is not None:
|
|
47
|
+
valid_fields.add(field_info.alias)
|
|
48
|
+
|
|
49
|
+
# Check for unrecognized arguments
|
|
50
|
+
for arg in kwargs:
|
|
51
|
+
if arg not in valid_fields:
|
|
52
|
+
suggestions = get_close_matches(arg, valid_fields, n=1)
|
|
53
|
+
suggestion = (
|
|
54
|
+
f" Did you mean: '{suggestions[0]}'?" if suggestions else ""
|
|
55
|
+
)
|
|
56
|
+
logger.warning(
|
|
57
|
+
f"Unexpected argument '{arg}' "
|
|
58
|
+
f"provided to GoogleGenerativeAI.{suggestion}"
|
|
59
|
+
)
|
|
60
|
+
super().__init__(**kwargs)
|
|
61
|
+
|
|
36
62
|
@model_validator(mode="after")
|
|
37
63
|
def validate_environment(self) -> Self:
|
|
38
64
|
"""Validates params and passes them to google-generativeai package."""
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[tool.poetry]
|
|
2
2
|
name = "langchain-google-genai"
|
|
3
|
-
version = "2.1.
|
|
3
|
+
version = "2.1.3"
|
|
4
4
|
description = "An integration package connecting Google's genai package and LangChain"
|
|
5
5
|
authors = []
|
|
6
6
|
readme = "README.md"
|
|
@@ -12,7 +12,7 @@ license = "MIT"
|
|
|
12
12
|
|
|
13
13
|
[tool.poetry.dependencies]
|
|
14
14
|
python = ">=3.9,<4.0"
|
|
15
|
-
langchain-core = "^0.3.
|
|
15
|
+
langchain-core = "^0.3.52"
|
|
16
16
|
google-ai-generativelanguage = "^0.6.16"
|
|
17
17
|
pydantic = ">=2,<3"
|
|
18
18
|
filetype = "^1.2.0"
|
|
@@ -27,8 +27,8 @@ pytest-mock = "^3.10.0"
|
|
|
27
27
|
syrupy = "^4.0.2"
|
|
28
28
|
pytest-watcher = "^0.3.4"
|
|
29
29
|
pytest-asyncio = "^0.21.1"
|
|
30
|
-
numpy = "
|
|
31
|
-
langchain-tests = "0.3.
|
|
30
|
+
numpy = ">=1.26.2"
|
|
31
|
+
langchain-tests = "0.3.18"
|
|
32
32
|
|
|
33
33
|
[tool.codespell]
|
|
34
34
|
ignore-words-list = "rouge"
|
|
@@ -60,7 +60,7 @@ mypy = "^1.10"
|
|
|
60
60
|
types-requests = "^2.28.11.5"
|
|
61
61
|
types-google-cloud-ndb = "^2.2.0.1"
|
|
62
62
|
types-protobuf = "^4.24.0.20240302"
|
|
63
|
-
numpy = "
|
|
63
|
+
numpy = ">=1.26.2"
|
|
64
64
|
|
|
65
65
|
|
|
66
66
|
[tool.poetry.group.dev]
|
|
File without changes
|
{langchain_google_genai-2.1.1 → langchain_google_genai-2.1.3}/langchain_google_genai/__init__.py
RENAMED
|
File without changes
|
{langchain_google_genai-2.1.1 → langchain_google_genai-2.1.3}/langchain_google_genai/_enums.py
RENAMED
|
File without changes
|
|
File without changes
|
{langchain_google_genai-2.1.1 → langchain_google_genai-2.1.3}/langchain_google_genai/_image_utils.py
RENAMED
|
File without changes
|
{langchain_google_genai-2.1.1 → langchain_google_genai-2.1.3}/langchain_google_genai/genai_aqa.py
RENAMED
|
File without changes
|
|
File without changes
|
{langchain_google_genai-2.1.1 → langchain_google_genai-2.1.3}/langchain_google_genai/py.typed
RENAMED
|
File without changes
|