langchain-google-genai 2.1.10__tar.gz → 2.1.12__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-google-genai might be problematic. Click here for more details.
- {langchain_google_genai-2.1.10 → langchain_google_genai-2.1.12}/PKG-INFO +43 -39
- {langchain_google_genai-2.1.10 → langchain_google_genai-2.1.12}/README.md +35 -24
- {langchain_google_genai-2.1.10 → langchain_google_genai-2.1.12}/langchain_google_genai/__init__.py +3 -3
- {langchain_google_genai-2.1.10 → langchain_google_genai-2.1.12}/langchain_google_genai/_common.py +28 -17
- {langchain_google_genai-2.1.10 → langchain_google_genai-2.1.12}/langchain_google_genai/_function_utils.py +59 -59
- {langchain_google_genai-2.1.10 → langchain_google_genai-2.1.12}/langchain_google_genai/_genai_extension.py +35 -21
- {langchain_google_genai-2.1.10 → langchain_google_genai-2.1.12}/langchain_google_genai/_image_utils.py +10 -9
- {langchain_google_genai-2.1.10 → langchain_google_genai-2.1.12}/langchain_google_genai/chat_models.py +459 -254
- {langchain_google_genai-2.1.10 → langchain_google_genai-2.1.12}/langchain_google_genai/embeddings.py +62 -15
- {langchain_google_genai-2.1.10 → langchain_google_genai-2.1.12}/langchain_google_genai/genai_aqa.py +15 -15
- {langchain_google_genai-2.1.10 → langchain_google_genai-2.1.12}/langchain_google_genai/google_vector_store.py +26 -16
- {langchain_google_genai-2.1.10 → langchain_google_genai-2.1.12}/langchain_google_genai/llms.py +9 -8
- langchain_google_genai-2.1.12/pyproject.toml +139 -0
- langchain_google_genai-2.1.12/tests/__init__.py +0 -0
- langchain_google_genai-2.1.12/tests/conftest.py +50 -0
- langchain_google_genai-2.1.12/tests/integration_tests/.env.example +1 -0
- langchain_google_genai-2.1.12/tests/integration_tests/__init__.py +0 -0
- langchain_google_genai-2.1.12/tests/integration_tests/terraform/main.tf +12 -0
- langchain_google_genai-2.1.12/tests/integration_tests/test_callbacks.py +31 -0
- langchain_google_genai-2.1.12/tests/integration_tests/test_chat_models.py +894 -0
- langchain_google_genai-2.1.12/tests/integration_tests/test_compile.py +6 -0
- langchain_google_genai-2.1.12/tests/integration_tests/test_embeddings.py +144 -0
- langchain_google_genai-2.1.12/tests/integration_tests/test_function_call.py +90 -0
- langchain_google_genai-2.1.12/tests/integration_tests/test_llms.py +97 -0
- langchain_google_genai-2.1.12/tests/integration_tests/test_standard.py +141 -0
- langchain_google_genai-2.1.12/tests/integration_tests/test_tools.py +37 -0
- langchain_google_genai-2.1.12/tests/unit_tests/__init__.py +0 -0
- langchain_google_genai-2.1.12/tests/unit_tests/__snapshots__/test_standard.ambr +63 -0
- langchain_google_genai-2.1.12/tests/unit_tests/test_chat_models.py +1116 -0
- langchain_google_genai-2.1.12/tests/unit_tests/test_chat_models_protobuf_fix.py +132 -0
- langchain_google_genai-2.1.12/tests/unit_tests/test_common.py +31 -0
- langchain_google_genai-2.1.12/tests/unit_tests/test_embeddings.py +160 -0
- langchain_google_genai-2.1.12/tests/unit_tests/test_function_utils.py +1405 -0
- langchain_google_genai-2.1.12/tests/unit_tests/test_genai_aqa.py +95 -0
- langchain_google_genai-2.1.12/tests/unit_tests/test_google_vector_store.py +440 -0
- langchain_google_genai-2.1.12/tests/unit_tests/test_imports.py +20 -0
- langchain_google_genai-2.1.12/tests/unit_tests/test_llms.py +47 -0
- langchain_google_genai-2.1.12/tests/unit_tests/test_standard.py +40 -0
- langchain_google_genai-2.1.10/pyproject.toml +0 -109
- {langchain_google_genai-2.1.10 → langchain_google_genai-2.1.12}/LICENSE +0 -0
- {langchain_google_genai-2.1.10 → langchain_google_genai-2.1.12}/langchain_google_genai/_enums.py +0 -0
- {langchain_google_genai-2.1.10 → langchain_google_genai-2.1.12}/langchain_google_genai/py.typed +0 -0
|
@@ -1,22 +1,16 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: langchain-google-genai
|
|
3
|
-
Version: 2.1.
|
|
3
|
+
Version: 2.1.12
|
|
4
4
|
Summary: An integration package connecting Google's genai package and LangChain
|
|
5
|
-
Home-page: https://github.com/langchain-ai/langchain-google
|
|
6
5
|
License: MIT
|
|
7
|
-
Requires-Python: >=3.9,<4.0
|
|
8
|
-
Classifier: License :: OSI Approved :: MIT License
|
|
9
|
-
Classifier: Programming Language :: Python :: 3
|
|
10
|
-
Classifier: Programming Language :: Python :: 3.9
|
|
11
|
-
Classifier: Programming Language :: Python :: 3.10
|
|
12
|
-
Classifier: Programming Language :: Python :: 3.11
|
|
13
|
-
Classifier: Programming Language :: Python :: 3.12
|
|
14
|
-
Requires-Dist: filetype (>=1.2.0,<2.0.0)
|
|
15
|
-
Requires-Dist: google-ai-generativelanguage (>=0.6.18,<0.7.0)
|
|
16
|
-
Requires-Dist: langchain-core (>=0.3.75,<0.4.0)
|
|
17
|
-
Requires-Dist: pydantic (>=2,<3)
|
|
18
|
-
Project-URL: Repository, https://github.com/langchain-ai/langchain-google
|
|
19
6
|
Project-URL: Source Code, https://github.com/langchain-ai/langchain-google/tree/main/libs/genai
|
|
7
|
+
Project-URL: Release Notes, https://github.com/langchain-ai/langchain-google/releases
|
|
8
|
+
Project-URL: repository, https://github.com/langchain-ai/langchain-google
|
|
9
|
+
Requires-Python: >=3.9
|
|
10
|
+
Requires-Dist: langchain-core>=0.3.75
|
|
11
|
+
Requires-Dist: google-ai-generativelanguage<1,>=0.7
|
|
12
|
+
Requires-Dist: pydantic<3,>=2
|
|
13
|
+
Requires-Dist: filetype<2,>=1.2
|
|
20
14
|
Description-Content-Type: text/markdown
|
|
21
15
|
|
|
22
16
|
# langchain-google-genai
|
|
@@ -62,6 +56,9 @@ This package provides LangChain support for Google Gemini models (via the offici
|
|
|
62
56
|
|
|
63
57
|
```bash
|
|
64
58
|
pip install -U langchain-google-genai
|
|
59
|
+
|
|
60
|
+
# or, with uv:
|
|
61
|
+
uv add langchain-google-genai
|
|
65
62
|
````
|
|
66
63
|
|
|
67
64
|
---
|
|
@@ -79,7 +76,7 @@ Then use the `ChatGoogleGenerativeAI` interface:
|
|
|
79
76
|
```python
|
|
80
77
|
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
81
78
|
|
|
82
|
-
llm = ChatGoogleGenerativeAI(model="gemini-
|
|
79
|
+
llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
|
|
83
80
|
response = llm.invoke("Sing a ballad of LangChain.")
|
|
84
81
|
print(response.content)
|
|
85
82
|
```
|
|
@@ -88,22 +85,30 @@ print(response.content)
|
|
|
88
85
|
|
|
89
86
|
## Chat Models
|
|
90
87
|
|
|
91
|
-
|
|
88
|
+
See the LangChain documentation for general information about [Chat Models](https://docs.langchain.com/oss/python/langchain/models).
|
|
89
|
+
|
|
90
|
+
The main interface for the Gemini chat models is `ChatGoogleGenerativeAI`.
|
|
92
91
|
|
|
93
92
|
### Multimodal Inputs
|
|
94
93
|
|
|
95
|
-
Gemini
|
|
94
|
+
Most Gemini models support image inputs.
|
|
96
95
|
|
|
97
96
|
```python
|
|
98
97
|
from langchain_core.messages import HumanMessage
|
|
99
98
|
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
100
99
|
|
|
101
|
-
llm = ChatGoogleGenerativeAI(model="gemini-
|
|
100
|
+
llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
|
|
102
101
|
|
|
103
102
|
message = HumanMessage(
|
|
104
103
|
content=[
|
|
105
|
-
{
|
|
106
|
-
|
|
104
|
+
{
|
|
105
|
+
"type": "text",
|
|
106
|
+
"text": "What's in this image?"
|
|
107
|
+
},
|
|
108
|
+
{
|
|
109
|
+
"type": "image_url",
|
|
110
|
+
"image_url": "https://picsum.photos/seed/picsum/200/300"
|
|
111
|
+
},
|
|
107
112
|
]
|
|
108
113
|
)
|
|
109
114
|
|
|
@@ -111,7 +116,7 @@ response = llm.invoke([message])
|
|
|
111
116
|
print(response.content)
|
|
112
117
|
```
|
|
113
118
|
|
|
114
|
-
|
|
119
|
+
`image_url` can be:
|
|
115
120
|
|
|
116
121
|
- A public image URL
|
|
117
122
|
- A Google Cloud Storage path (`gcs://...`)
|
|
@@ -121,38 +126,44 @@ print(response.content)
|
|
|
121
126
|
|
|
122
127
|
### Multimodal Outputs
|
|
123
128
|
|
|
124
|
-
|
|
129
|
+
Some Gemini models supports both text and inline image outputs.
|
|
125
130
|
|
|
126
131
|
```python
|
|
127
132
|
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
128
133
|
|
|
129
|
-
llm = ChatGoogleGenerativeAI(model="
|
|
134
|
+
llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash-image-preview")
|
|
130
135
|
|
|
131
136
|
response = llm.invoke(
|
|
132
137
|
"Generate an image of a cat and say meow",
|
|
133
138
|
generation_config=dict(response_modalities=["TEXT", "IMAGE"]),
|
|
134
139
|
)
|
|
135
140
|
|
|
136
|
-
image_base64 = response.content[
|
|
137
|
-
meow_text = response.content[
|
|
141
|
+
image_base64 = response.content[1].get("image_url").get("url").split(",")[-1]
|
|
142
|
+
meow_text = response.content[0]
|
|
138
143
|
print(meow_text)
|
|
144
|
+
# In Jupyter, display the image:
|
|
145
|
+
from base64 import b64decode
|
|
146
|
+
from IPython.display import Image, display
|
|
147
|
+
|
|
148
|
+
img_bytes = b64decode(image_base64)
|
|
149
|
+
display(Image(data=img_bytes))
|
|
139
150
|
```
|
|
140
151
|
|
|
141
152
|
---
|
|
142
153
|
|
|
143
154
|
### Audio Output
|
|
144
155
|
|
|
145
|
-
```
|
|
156
|
+
```python
|
|
146
157
|
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
147
158
|
|
|
148
|
-
llm = ChatGoogleGenerativeAI(model="
|
|
149
|
-
|
|
159
|
+
llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash-preview-tts")
|
|
160
|
+
|
|
150
161
|
response = llm.invoke(
|
|
151
162
|
"Please say The quick brown fox jumps over the lazy dog",
|
|
152
163
|
generation_config=dict(response_modalities=["AUDIO"]),
|
|
153
164
|
)
|
|
154
165
|
|
|
155
|
-
# Base64 encoded binary data of the
|
|
166
|
+
# Base64 encoded binary data of the audio
|
|
156
167
|
wav_data = response.additional_kwargs.get("audio")
|
|
157
168
|
with open("output.wav", "wb") as f:
|
|
158
169
|
f.write(wav_data)
|
|
@@ -162,15 +173,13 @@ with open("output.wav", "wb") as f:
|
|
|
162
173
|
|
|
163
174
|
### Multimodal Outputs in Chains
|
|
164
175
|
|
|
165
|
-
You can use Gemini models in a LangChain chain:
|
|
166
|
-
|
|
167
176
|
```python
|
|
168
177
|
from langchain_core.runnables import RunnablePassthrough
|
|
169
178
|
from langchain_core.prompts import ChatPromptTemplate
|
|
170
179
|
from langchain_google_genai import ChatGoogleGenerativeAI, Modality
|
|
171
180
|
|
|
172
181
|
llm = ChatGoogleGenerativeAI(
|
|
173
|
-
model="
|
|
182
|
+
model="gemini-2.5-flash-image-preview",
|
|
174
183
|
response_modalities=[Modality.TEXT, Modality.IMAGE],
|
|
175
184
|
)
|
|
176
185
|
|
|
@@ -186,13 +195,11 @@ response = chain.invoke("cat")
|
|
|
186
195
|
|
|
187
196
|
### Thinking Support
|
|
188
197
|
|
|
189
|
-
Gemini 2.5 Flash Preview supports internal reasoning ("thoughts").
|
|
190
|
-
|
|
191
198
|
```python
|
|
192
199
|
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
193
200
|
|
|
194
201
|
llm = ChatGoogleGenerativeAI(
|
|
195
|
-
model="models/gemini-2.5-flash
|
|
202
|
+
model="models/gemini-2.5-flash",
|
|
196
203
|
thinking_budget=1024
|
|
197
204
|
)
|
|
198
205
|
|
|
@@ -207,8 +214,6 @@ print("Reasoning tokens used:", reasoning_score)
|
|
|
207
214
|
|
|
208
215
|
## Embeddings
|
|
209
216
|
|
|
210
|
-
You can use Gemini embeddings in LangChain:
|
|
211
|
-
|
|
212
217
|
```python
|
|
213
218
|
from langchain_google_genai import GoogleGenerativeAIEmbeddings
|
|
214
219
|
|
|
@@ -257,5 +262,4 @@ print("Answerable probability:", response.answerable_probability)
|
|
|
257
262
|
|
|
258
263
|
- [LangChain Documentation](https://docs.langchain.com/)
|
|
259
264
|
- [Google Generative AI SDK](https://googleapis.github.io/python-genai/)
|
|
260
|
-
- [Gemini Model Documentation](https://ai.google.dev/)
|
|
261
|
-
|
|
265
|
+
- [Gemini Model Documentation](https://ai.google.dev/gemini-api/docs)
|
|
@@ -41,6 +41,9 @@ This package provides LangChain support for Google Gemini models (via the offici
|
|
|
41
41
|
|
|
42
42
|
```bash
|
|
43
43
|
pip install -U langchain-google-genai
|
|
44
|
+
|
|
45
|
+
# or, with uv:
|
|
46
|
+
uv add langchain-google-genai
|
|
44
47
|
````
|
|
45
48
|
|
|
46
49
|
---
|
|
@@ -58,7 +61,7 @@ Then use the `ChatGoogleGenerativeAI` interface:
|
|
|
58
61
|
```python
|
|
59
62
|
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
60
63
|
|
|
61
|
-
llm = ChatGoogleGenerativeAI(model="gemini-
|
|
64
|
+
llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
|
|
62
65
|
response = llm.invoke("Sing a ballad of LangChain.")
|
|
63
66
|
print(response.content)
|
|
64
67
|
```
|
|
@@ -67,22 +70,30 @@ print(response.content)
|
|
|
67
70
|
|
|
68
71
|
## Chat Models
|
|
69
72
|
|
|
70
|
-
|
|
73
|
+
See the LangChain documentation for general information about [Chat Models](https://docs.langchain.com/oss/python/langchain/models).
|
|
74
|
+
|
|
75
|
+
The main interface for the Gemini chat models is `ChatGoogleGenerativeAI`.
|
|
71
76
|
|
|
72
77
|
### Multimodal Inputs
|
|
73
78
|
|
|
74
|
-
Gemini
|
|
79
|
+
Most Gemini models support image inputs.
|
|
75
80
|
|
|
76
81
|
```python
|
|
77
82
|
from langchain_core.messages import HumanMessage
|
|
78
83
|
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
79
84
|
|
|
80
|
-
llm = ChatGoogleGenerativeAI(model="gemini-
|
|
85
|
+
llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
|
|
81
86
|
|
|
82
87
|
message = HumanMessage(
|
|
83
88
|
content=[
|
|
84
|
-
{
|
|
85
|
-
|
|
89
|
+
{
|
|
90
|
+
"type": "text",
|
|
91
|
+
"text": "What's in this image?"
|
|
92
|
+
},
|
|
93
|
+
{
|
|
94
|
+
"type": "image_url",
|
|
95
|
+
"image_url": "https://picsum.photos/seed/picsum/200/300"
|
|
96
|
+
},
|
|
86
97
|
]
|
|
87
98
|
)
|
|
88
99
|
|
|
@@ -90,7 +101,7 @@ response = llm.invoke([message])
|
|
|
90
101
|
print(response.content)
|
|
91
102
|
```
|
|
92
103
|
|
|
93
|
-
|
|
104
|
+
`image_url` can be:
|
|
94
105
|
|
|
95
106
|
- A public image URL
|
|
96
107
|
- A Google Cloud Storage path (`gcs://...`)
|
|
@@ -100,38 +111,44 @@ print(response.content)
|
|
|
100
111
|
|
|
101
112
|
### Multimodal Outputs
|
|
102
113
|
|
|
103
|
-
|
|
114
|
+
Some Gemini models supports both text and inline image outputs.
|
|
104
115
|
|
|
105
116
|
```python
|
|
106
117
|
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
107
118
|
|
|
108
|
-
llm = ChatGoogleGenerativeAI(model="
|
|
119
|
+
llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash-image-preview")
|
|
109
120
|
|
|
110
121
|
response = llm.invoke(
|
|
111
122
|
"Generate an image of a cat and say meow",
|
|
112
123
|
generation_config=dict(response_modalities=["TEXT", "IMAGE"]),
|
|
113
124
|
)
|
|
114
125
|
|
|
115
|
-
image_base64 = response.content[
|
|
116
|
-
meow_text = response.content[
|
|
126
|
+
image_base64 = response.content[1].get("image_url").get("url").split(",")[-1]
|
|
127
|
+
meow_text = response.content[0]
|
|
117
128
|
print(meow_text)
|
|
129
|
+
# In Jupyter, display the image:
|
|
130
|
+
from base64 import b64decode
|
|
131
|
+
from IPython.display import Image, display
|
|
132
|
+
|
|
133
|
+
img_bytes = b64decode(image_base64)
|
|
134
|
+
display(Image(data=img_bytes))
|
|
118
135
|
```
|
|
119
136
|
|
|
120
137
|
---
|
|
121
138
|
|
|
122
139
|
### Audio Output
|
|
123
140
|
|
|
124
|
-
```
|
|
141
|
+
```python
|
|
125
142
|
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
126
143
|
|
|
127
|
-
llm = ChatGoogleGenerativeAI(model="
|
|
128
|
-
|
|
144
|
+
llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash-preview-tts")
|
|
145
|
+
|
|
129
146
|
response = llm.invoke(
|
|
130
147
|
"Please say The quick brown fox jumps over the lazy dog",
|
|
131
148
|
generation_config=dict(response_modalities=["AUDIO"]),
|
|
132
149
|
)
|
|
133
150
|
|
|
134
|
-
# Base64 encoded binary data of the
|
|
151
|
+
# Base64 encoded binary data of the audio
|
|
135
152
|
wav_data = response.additional_kwargs.get("audio")
|
|
136
153
|
with open("output.wav", "wb") as f:
|
|
137
154
|
f.write(wav_data)
|
|
@@ -141,15 +158,13 @@ with open("output.wav", "wb") as f:
|
|
|
141
158
|
|
|
142
159
|
### Multimodal Outputs in Chains
|
|
143
160
|
|
|
144
|
-
You can use Gemini models in a LangChain chain:
|
|
145
|
-
|
|
146
161
|
```python
|
|
147
162
|
from langchain_core.runnables import RunnablePassthrough
|
|
148
163
|
from langchain_core.prompts import ChatPromptTemplate
|
|
149
164
|
from langchain_google_genai import ChatGoogleGenerativeAI, Modality
|
|
150
165
|
|
|
151
166
|
llm = ChatGoogleGenerativeAI(
|
|
152
|
-
model="
|
|
167
|
+
model="gemini-2.5-flash-image-preview",
|
|
153
168
|
response_modalities=[Modality.TEXT, Modality.IMAGE],
|
|
154
169
|
)
|
|
155
170
|
|
|
@@ -165,13 +180,11 @@ response = chain.invoke("cat")
|
|
|
165
180
|
|
|
166
181
|
### Thinking Support
|
|
167
182
|
|
|
168
|
-
Gemini 2.5 Flash Preview supports internal reasoning ("thoughts").
|
|
169
|
-
|
|
170
183
|
```python
|
|
171
184
|
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
172
185
|
|
|
173
186
|
llm = ChatGoogleGenerativeAI(
|
|
174
|
-
model="models/gemini-2.5-flash
|
|
187
|
+
model="models/gemini-2.5-flash",
|
|
175
188
|
thinking_budget=1024
|
|
176
189
|
)
|
|
177
190
|
|
|
@@ -186,8 +199,6 @@ print("Reasoning tokens used:", reasoning_score)
|
|
|
186
199
|
|
|
187
200
|
## Embeddings
|
|
188
201
|
|
|
189
|
-
You can use Gemini embeddings in LangChain:
|
|
190
|
-
|
|
191
202
|
```python
|
|
192
203
|
from langchain_google_genai import GoogleGenerativeAIEmbeddings
|
|
193
204
|
|
|
@@ -236,4 +247,4 @@ print("Answerable probability:", response.answerable_probability)
|
|
|
236
247
|
|
|
237
248
|
- [LangChain Documentation](https://docs.langchain.com/)
|
|
238
249
|
- [Google Generative AI SDK](https://googleapis.github.io/python-genai/)
|
|
239
|
-
- [Gemini Model Documentation](https://ai.google.dev/)
|
|
250
|
+
- [Gemini Model Documentation](https://ai.google.dev/gemini-api/docs)
|
{langchain_google_genai-2.1.10 → langchain_google_genai-2.1.12}/langchain_google_genai/__init__.py
RENAMED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
"""**LangChain Google Generative AI Integration
|
|
1
|
+
"""**LangChain Google Generative AI Integration**.
|
|
2
2
|
|
|
3
3
|
This module integrates Google's Generative AI models, specifically the Gemini series, with the LangChain framework. It provides classes for interacting with chat models and generating embeddings, leveraging Google's advanced AI capabilities.
|
|
4
4
|
|
|
@@ -76,12 +76,12 @@ __all__ = [
|
|
|
76
76
|
"AqaOutput",
|
|
77
77
|
"ChatGoogleGenerativeAI",
|
|
78
78
|
"DoesNotExistsException",
|
|
79
|
+
"DoesNotExistsException",
|
|
79
80
|
"GenAIAqa",
|
|
80
|
-
"GoogleGenerativeAIEmbeddings",
|
|
81
81
|
"GoogleGenerativeAI",
|
|
82
|
+
"GoogleGenerativeAIEmbeddings",
|
|
82
83
|
"GoogleVectorStore",
|
|
83
84
|
"HarmBlockThreshold",
|
|
84
85
|
"HarmCategory",
|
|
85
86
|
"Modality",
|
|
86
|
-
"DoesNotExistsException",
|
|
87
87
|
]
|
{langchain_google_genai-2.1.10 → langchain_google_genai-2.1.12}/langchain_google_genai/_common.py
RENAMED
|
@@ -13,19 +13,17 @@ _TELEMETRY_ENV_VARIABLE_NAME = "GOOGLE_CLOUD_AGENT_ENGINE_ID"
|
|
|
13
13
|
|
|
14
14
|
|
|
15
15
|
class GoogleGenerativeAIError(Exception):
|
|
16
|
-
"""
|
|
17
|
-
Custom exception class for errors associated with the `Google GenAI` API.
|
|
18
|
-
"""
|
|
16
|
+
"""Custom exception class for errors associated with the `Google GenAI` API."""
|
|
19
17
|
|
|
20
18
|
|
|
21
19
|
class _BaseGoogleGenerativeAI(BaseModel):
|
|
22
|
-
"""Base class for Google Generative AI LLMs"""
|
|
20
|
+
"""Base class for Google Generative AI LLMs."""
|
|
23
21
|
|
|
24
22
|
model: str = Field(
|
|
25
23
|
...,
|
|
26
24
|
description="""The name of the model to use.
|
|
27
25
|
Examples:
|
|
28
|
-
- gemini-2.5-
|
|
26
|
+
- gemini-2.5-flash
|
|
29
27
|
- models/text-bison-001""",
|
|
30
28
|
)
|
|
31
29
|
"""Model name to use."""
|
|
@@ -34,28 +32,37 @@ Examples:
|
|
|
34
32
|
)
|
|
35
33
|
"""Google AI API key.
|
|
36
34
|
If not specified will be read from env var ``GOOGLE_API_KEY``."""
|
|
35
|
+
|
|
37
36
|
credentials: Any = None
|
|
38
37
|
"The default custom credentials (google.auth.credentials.Credentials) to use "
|
|
39
38
|
"when making API calls. If not provided, credentials will be ascertained from "
|
|
40
39
|
"the GOOGLE_API_KEY envvar"
|
|
40
|
+
|
|
41
41
|
temperature: float = 0.7
|
|
42
|
-
"""Run inference with this temperature. Must be within ``[0.0, 2.0]``.
|
|
42
|
+
"""Run inference with this temperature. Must be within ``[0.0, 2.0]``. If unset,
|
|
43
|
+
will default to ``0.7``."""
|
|
44
|
+
|
|
43
45
|
top_p: Optional[float] = None
|
|
44
46
|
"""Decode using nucleus sampling: consider the smallest set of tokens whose
|
|
45
|
-
|
|
47
|
+
probability sum is at least ``top_p``. Must be within ``[0.0, 1.0]``."""
|
|
48
|
+
|
|
46
49
|
top_k: Optional[int] = None
|
|
47
50
|
"""Decode using top-k sampling: consider the set of ``top_k`` most probable tokens.
|
|
48
|
-
|
|
51
|
+
Must be positive."""
|
|
52
|
+
|
|
49
53
|
max_output_tokens: Optional[int] = Field(default=None, alias="max_tokens")
|
|
50
54
|
"""Maximum number of tokens to include in a candidate. Must be greater than zero.
|
|
51
|
-
|
|
55
|
+
If unset, will default to ``64``."""
|
|
56
|
+
|
|
52
57
|
n: int = 1
|
|
53
58
|
"""Number of chat completions to generate for each prompt. Note that the API may
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
59
|
+
not return the full ``n`` completions if duplicates are generated."""
|
|
60
|
+
|
|
61
|
+
max_retries: int = Field(default=6, alias="retries")
|
|
62
|
+
"""The maximum number of retries to make when generating. If unset, will default
|
|
63
|
+
to ``6``."""
|
|
57
64
|
|
|
58
|
-
timeout: Optional[float] = None
|
|
65
|
+
timeout: Optional[float] = Field(default=None, alias="request_timeout")
|
|
59
66
|
"""The maximum number of seconds to wait for a response."""
|
|
60
67
|
|
|
61
68
|
client_options: Optional[Dict] = Field(
|
|
@@ -68,6 +75,7 @@ Examples:
|
|
|
68
75
|
transport: Optional[str] = Field(
|
|
69
76
|
default=None,
|
|
70
77
|
description="A string, one of: [`rest`, `grpc`, `grpc_asyncio`].",
|
|
78
|
+
alias="api_transport",
|
|
71
79
|
)
|
|
72
80
|
additional_headers: Optional[Dict[str, str]] = Field(
|
|
73
81
|
default=None,
|
|
@@ -89,9 +97,9 @@ Examples:
|
|
|
89
97
|
)
|
|
90
98
|
|
|
91
99
|
safety_settings: Optional[Dict[HarmCategory, HarmBlockThreshold]] = None
|
|
92
|
-
"""The default safety settings to use for all generations.
|
|
93
|
-
|
|
94
|
-
For example:
|
|
100
|
+
"""The default safety settings to use for all generations.
|
|
101
|
+
|
|
102
|
+
For example:
|
|
95
103
|
|
|
96
104
|
.. code-block:: python
|
|
97
105
|
from google.generativeai.types.safety_types import HarmBlockThreshold, HarmCategory
|
|
@@ -127,6 +135,7 @@ def get_user_agent(module: Optional[str] = None) -> Tuple[str, str]:
|
|
|
127
135
|
Args:
|
|
128
136
|
module (Optional[str]):
|
|
129
137
|
Optional. The module for a custom user agent header.
|
|
138
|
+
|
|
130
139
|
Returns:
|
|
131
140
|
Tuple[str, str]
|
|
132
141
|
"""
|
|
@@ -148,11 +157,13 @@ def get_client_info(module: Optional[str] = None) -> "ClientInfo":
|
|
|
148
157
|
Args:
|
|
149
158
|
module (Optional[str]):
|
|
150
159
|
Optional. The module for a custom user agent header.
|
|
160
|
+
|
|
151
161
|
Returns:
|
|
152
162
|
``google.api_core.gapic_v1.client_info.ClientInfo``
|
|
153
163
|
"""
|
|
154
164
|
client_library_version, user_agent = get_user_agent(module)
|
|
155
|
-
|
|
165
|
+
# TODO: remove ignore once google-auth has types.
|
|
166
|
+
return ClientInfo( # type: ignore[no-untyped-call]
|
|
156
167
|
client_library_version=client_library_version,
|
|
157
168
|
user_agent=user_agent,
|
|
158
169
|
)
|