langchain-google-genai 2.1.8__tar.gz → 2.1.10__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-google-genai might be problematic. Click here for more details.

Files changed (16) hide show
  1. {langchain_google_genai-2.1.8 → langchain_google_genai-2.1.10}/PKG-INFO +23 -22
  2. {langchain_google_genai-2.1.8 → langchain_google_genai-2.1.10}/README.md +21 -20
  3. {langchain_google_genai-2.1.8 → langchain_google_genai-2.1.10}/langchain_google_genai/__init__.py +3 -3
  4. {langchain_google_genai-2.1.8 → langchain_google_genai-2.1.10}/langchain_google_genai/_common.py +2 -2
  5. {langchain_google_genai-2.1.8 → langchain_google_genai-2.1.10}/langchain_google_genai/_function_utils.py +9 -2
  6. {langchain_google_genai-2.1.8 → langchain_google_genai-2.1.10}/langchain_google_genai/chat_models.py +174 -17
  7. {langchain_google_genai-2.1.8 → langchain_google_genai-2.1.10}/langchain_google_genai/embeddings.py +2 -2
  8. {langchain_google_genai-2.1.8 → langchain_google_genai-2.1.10}/langchain_google_genai/llms.py +1 -1
  9. {langchain_google_genai-2.1.8 → langchain_google_genai-2.1.10}/pyproject.toml +5 -5
  10. {langchain_google_genai-2.1.8 → langchain_google_genai-2.1.10}/LICENSE +0 -0
  11. {langchain_google_genai-2.1.8 → langchain_google_genai-2.1.10}/langchain_google_genai/_enums.py +0 -0
  12. {langchain_google_genai-2.1.8 → langchain_google_genai-2.1.10}/langchain_google_genai/_genai_extension.py +0 -0
  13. {langchain_google_genai-2.1.8 → langchain_google_genai-2.1.10}/langchain_google_genai/_image_utils.py +0 -0
  14. {langchain_google_genai-2.1.8 → langchain_google_genai-2.1.10}/langchain_google_genai/genai_aqa.py +0 -0
  15. {langchain_google_genai-2.1.8 → langchain_google_genai-2.1.10}/langchain_google_genai/google_vector_store.py +0 -0
  16. {langchain_google_genai-2.1.8 → langchain_google_genai-2.1.10}/langchain_google_genai/py.typed +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langchain-google-genai
3
- Version: 2.1.8
3
+ Version: 2.1.10
4
4
  Summary: An integration package connecting Google's genai package and LangChain
5
5
  Home-page: https://github.com/langchain-ai/langchain-google
6
6
  License: MIT
@@ -13,7 +13,7 @@ Classifier: Programming Language :: Python :: 3.11
13
13
  Classifier: Programming Language :: Python :: 3.12
14
14
  Requires-Dist: filetype (>=1.2.0,<2.0.0)
15
15
  Requires-Dist: google-ai-generativelanguage (>=0.6.18,<0.7.0)
16
- Requires-Dist: langchain-core (>=0.3.68,<0.4.0)
16
+ Requires-Dist: langchain-core (>=0.3.75,<0.4.0)
17
17
  Requires-Dist: pydantic (>=2,<3)
18
18
  Project-URL: Repository, https://github.com/langchain-ai/langchain-google
19
19
  Project-URL: Source Code, https://github.com/langchain-ai/langchain-google/tree/main/libs/genai
@@ -29,16 +29,20 @@ This package enables seamless access to Google Gemini's chat, vision, embeddings
29
29
 
30
30
  ## Table of Contents
31
31
 
32
- - [Overview](#overview)
33
- - [Installation](#installation)
34
- - [Quickstart](#quickstart)
35
- - [Chat Models](#chat-models)
36
- - [Multimodal Inputs](#multimodal-inputs)
37
- - [Multimodal Outputs](#multimodal-outputs)
38
- - [Multimodal Outputs in Chains](#multimodal-outputs-in-chains)
39
- - [Thinking Support](#thinking-support)
40
- - [Embeddings](#embeddings)
41
- - [Semantic Retrieval (RAG)](#semantic-retrieval-rag)
32
+ - [langchain-google-genai](#langchain-google-genai)
33
+ - [Table of Contents](#table-of-contents)
34
+ - [Overview](#overview)
35
+ - [Installation](#installation)
36
+ - [Quickstart](#quickstart)
37
+ - [Chat Models](#chat-models)
38
+ - [Multimodal Inputs](#multimodal-inputs)
39
+ - [Multimodal Outputs](#multimodal-outputs)
40
+ - [Audio Output](#audio-output)
41
+ - [Multimodal Outputs in Chains](#multimodal-outputs-in-chains)
42
+ - [Thinking Support](#thinking-support)
43
+ - [Embeddings](#embeddings)
44
+ - [Semantic Retrieval (RAG)](#semantic-retrieval-rag)
45
+ - [Resources](#resources)
42
46
 
43
47
  ---
44
48
 
@@ -109,9 +113,9 @@ print(response.content)
109
113
 
110
114
  ✅ `image_url` can be:
111
115
 
112
- * A public image URL
113
- * A Google Cloud Storage path (`gcs://...`)
114
- * A base64-encoded image (e.g., `data:image/png;base64,...`)
116
+ - A public image URL
117
+ - A Google Cloud Storage path (`gcs://...`)
118
+ - A base64-encoded image (e.g., `data:image/png;base64,...`)
115
119
 
116
120
  ---
117
121
 
@@ -208,7 +212,7 @@ You can use Gemini embeddings in LangChain:
208
212
  ```python
209
213
  from langchain_google_genai import GoogleGenerativeAIEmbeddings
210
214
 
211
- embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
215
+ embeddings = GoogleGenerativeAIEmbeddings(model="models/gemini-embedding-001")
212
216
  vector = embeddings.embed_query("hello, world!")
213
217
  print(vector)
214
218
  ```
@@ -249,12 +253,9 @@ print("Answerable probability:", response.answerable_probability)
249
253
 
250
254
  ---
251
255
 
252
-
253
256
  ## Resources
254
257
 
255
- * [LangChain Documentation](https://docs.langchain.com/)
256
- * [Google Generative AI SDK](https://googleapis.github.io/python-genai/)
257
- * [Gemini Model Documentation](https://ai.google.dev/)
258
-
259
-
258
+ - [LangChain Documentation](https://docs.langchain.com/)
259
+ - [Google Generative AI SDK](https://googleapis.github.io/python-genai/)
260
+ - [Gemini Model Documentation](https://ai.google.dev/)
260
261
 
@@ -8,16 +8,20 @@ This package enables seamless access to Google Gemini's chat, vision, embeddings
8
8
 
9
9
  ## Table of Contents
10
10
 
11
- - [Overview](#overview)
12
- - [Installation](#installation)
13
- - [Quickstart](#quickstart)
14
- - [Chat Models](#chat-models)
15
- - [Multimodal Inputs](#multimodal-inputs)
16
- - [Multimodal Outputs](#multimodal-outputs)
17
- - [Multimodal Outputs in Chains](#multimodal-outputs-in-chains)
18
- - [Thinking Support](#thinking-support)
19
- - [Embeddings](#embeddings)
20
- - [Semantic Retrieval (RAG)](#semantic-retrieval-rag)
11
+ - [langchain-google-genai](#langchain-google-genai)
12
+ - [Table of Contents](#table-of-contents)
13
+ - [Overview](#overview)
14
+ - [Installation](#installation)
15
+ - [Quickstart](#quickstart)
16
+ - [Chat Models](#chat-models)
17
+ - [Multimodal Inputs](#multimodal-inputs)
18
+ - [Multimodal Outputs](#multimodal-outputs)
19
+ - [Audio Output](#audio-output)
20
+ - [Multimodal Outputs in Chains](#multimodal-outputs-in-chains)
21
+ - [Thinking Support](#thinking-support)
22
+ - [Embeddings](#embeddings)
23
+ - [Semantic Retrieval (RAG)](#semantic-retrieval-rag)
24
+ - [Resources](#resources)
21
25
 
22
26
  ---
23
27
 
@@ -88,9 +92,9 @@ print(response.content)
88
92
 
89
93
  ✅ `image_url` can be:
90
94
 
91
- * A public image URL
92
- * A Google Cloud Storage path (`gcs://...`)
93
- * A base64-encoded image (e.g., `data:image/png;base64,...`)
95
+ - A public image URL
96
+ - A Google Cloud Storage path (`gcs://...`)
97
+ - A base64-encoded image (e.g., `data:image/png;base64,...`)
94
98
 
95
99
  ---
96
100
 
@@ -187,7 +191,7 @@ You can use Gemini embeddings in LangChain:
187
191
  ```python
188
192
  from langchain_google_genai import GoogleGenerativeAIEmbeddings
189
193
 
190
- embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
194
+ embeddings = GoogleGenerativeAIEmbeddings(model="models/gemini-embedding-001")
191
195
  vector = embeddings.embed_query("hello, world!")
192
196
  print(vector)
193
197
  ```
@@ -228,11 +232,8 @@ print("Answerable probability:", response.answerable_probability)
228
232
 
229
233
  ---
230
234
 
231
-
232
235
  ## Resources
233
236
 
234
- * [LangChain Documentation](https://docs.langchain.com/)
235
- * [Google Generative AI SDK](https://googleapis.github.io/python-genai/)
236
- * [Gemini Model Documentation](https://ai.google.dev/)
237
-
238
-
237
+ - [LangChain Documentation](https://docs.langchain.com/)
238
+ - [Google Generative AI SDK](https://googleapis.github.io/python-genai/)
239
+ - [Gemini Model Documentation](https://ai.google.dev/)
@@ -30,7 +30,7 @@ After setting up your environment with the required API key, you can interact wi
30
30
 
31
31
  from langchain_google_genai import ChatGoogleGenerativeAI
32
32
 
33
- llm = ChatGoogleGenerativeAI(model="gemini-pro")
33
+ llm = ChatGoogleGenerativeAI(model="gemini-2.5-pro")
34
34
  llm.invoke("Sing a ballad of LangChain.")
35
35
 
36
36
  **Using LLMs**
@@ -41,7 +41,7 @@ The package also supports generating text with Google's models.
41
41
 
42
42
  from langchain_google_genai import GoogleGenerativeAI
43
43
 
44
- llm = GoogleGenerativeAI(model="gemini-pro")
44
+ llm = GoogleGenerativeAI(model="gemini-2.5-pro")
45
45
  llm.invoke("Once upon a time, a library called LangChain")
46
46
 
47
47
  **Embedding Generation**
@@ -52,7 +52,7 @@ The package also supports creating embeddings with Google's models, useful for t
52
52
 
53
53
  from langchain_google_genai import GoogleGenerativeAIEmbeddings
54
54
 
55
- embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
55
+ embeddings = GoogleGenerativeAIEmbeddings(model="models/gemini-embedding-001")
56
56
  embeddings.embed_query("hello, world!")
57
57
 
58
58
  """ # noqa: E501
@@ -24,8 +24,8 @@ class _BaseGoogleGenerativeAI(BaseModel):
24
24
  model: str = Field(
25
25
  ...,
26
26
  description="""The name of the model to use.
27
- Supported examples:
28
- - gemini-pro
27
+ Examples:
28
+ - gemini-2.5-pro
29
29
  - models/text-bison-001""",
30
30
  )
31
31
  """Model name to use."""
@@ -322,7 +322,7 @@ def _get_properties_from_schema_any(schema: Any) -> Dict[str, Any]:
322
322
 
323
323
 
324
324
  def _get_properties_from_schema(schema: Dict) -> Dict[str, Any]:
325
- properties = {}
325
+ properties: Dict[str, Dict[str, Union[str, int, Dict, List]]] = {}
326
326
  for k, v in schema.items():
327
327
  if not isinstance(k, str):
328
328
  logger.warning(f"Key '{k}' is not supported in schema, type={type(k)}")
@@ -331,7 +331,14 @@ def _get_properties_from_schema(schema: Dict) -> Dict[str, Any]:
331
331
  logger.warning(f"Value '{v}' is not supported in schema, ignoring v={v}")
332
332
  continue
333
333
  properties_item: Dict[str, Union[str, int, Dict, List]] = {}
334
- if v.get("type") or v.get("anyOf") or v.get("type_"):
334
+ if v.get("anyOf") and all(
335
+ anyOf_type.get("type") != "null" for anyOf_type in v.get("anyOf", [])
336
+ ):
337
+ properties_item["anyOf"] = [
338
+ _format_json_schema_to_gapic(anyOf_type)
339
+ for anyOf_type in v.get("anyOf", [])
340
+ ]
341
+ elif v.get("type") or v.get("anyOf") or v.get("type_"):
335
342
  item_type_ = _get_type_from_schema(v)
336
343
  properties_item["type_"] = item_type_
337
344
  if _is_nullable_schema(v):
@@ -6,6 +6,7 @@ import io
6
6
  import json
7
7
  import logging
8
8
  import mimetypes
9
+ import time
9
10
  import uuid
10
11
  import warnings
11
12
  import wave
@@ -153,7 +154,12 @@ class ChatGoogleGenerativeAIError(GoogleGenerativeAIError):
153
154
  """
154
155
 
155
156
 
156
- def _create_retry_decorator() -> Callable[[Any], Any]:
157
+ def _create_retry_decorator(
158
+ max_retries: int = 6,
159
+ wait_exponential_multiplier: float = 2.0,
160
+ wait_exponential_min: float = 1.0,
161
+ wait_exponential_max: float = 60.0,
162
+ ) -> Callable[[Any], Any]:
157
163
  """
158
164
  Creates and returns a preconfigured tenacity retry decorator.
159
165
 
@@ -165,15 +171,14 @@ def _create_retry_decorator() -> Callable[[Any], Any]:
165
171
  Callable[[Any], Any]: A retry decorator configured for handling specific
166
172
  Google API exceptions.
167
173
  """
168
- multiplier = 2
169
- min_seconds = 1
170
- max_seconds = 60
171
- max_retries = 2
172
-
173
174
  return retry(
174
175
  reraise=True,
175
176
  stop=stop_after_attempt(max_retries),
176
- wait=wait_exponential(multiplier=multiplier, min=min_seconds, max=max_seconds),
177
+ wait=wait_exponential(
178
+ multiplier=wait_exponential_multiplier,
179
+ min=wait_exponential_min,
180
+ max=wait_exponential_max,
181
+ ),
177
182
  retry=(
178
183
  retry_if_exception_type(google.api_core.exceptions.ResourceExhausted)
179
184
  | retry_if_exception_type(google.api_core.exceptions.ServiceUnavailable)
@@ -198,13 +203,17 @@ def _chat_with_retry(generation_method: Callable, **kwargs: Any) -> Any:
198
203
  Returns:
199
204
  Any: The result from the chat generation method.
200
205
  """
201
- retry_decorator = _create_retry_decorator()
206
+ retry_decorator = _create_retry_decorator(
207
+ max_retries=kwargs.get("max_retries", 6),
208
+ wait_exponential_multiplier=kwargs.get("wait_exponential_multiplier", 2.0),
209
+ wait_exponential_min=kwargs.get("wait_exponential_min", 1.0),
210
+ wait_exponential_max=kwargs.get("wait_exponential_max", 60.0),
211
+ )
202
212
 
203
213
  @retry_decorator
204
214
  def _chat_with_retry(**kwargs: Any) -> Any:
205
215
  try:
206
216
  return generation_method(**kwargs)
207
- # Do not retry for these errors.
208
217
  except google.api_core.exceptions.FailedPrecondition as exc:
209
218
  if "location is not supported" in exc.message:
210
219
  error_msg = (
@@ -218,6 +227,13 @@ def _chat_with_retry(generation_method: Callable, **kwargs: Any) -> Any:
218
227
  raise ChatGoogleGenerativeAIError(
219
228
  f"Invalid argument provided to Gemini: {e}"
220
229
  ) from e
230
+ except google.api_core.exceptions.ResourceExhausted as e:
231
+ # Handle quota-exceeded error with recommended retry delay
232
+ if hasattr(e, "retry_after") and e.retry_after < kwargs.get(
233
+ "wait_exponential_max", 60.0
234
+ ):
235
+ time.sleep(e.retry_after)
236
+ raise e
221
237
  except Exception as e:
222
238
  raise e
223
239
 
@@ -682,7 +698,9 @@ def _parse_response_candidate(
682
698
  )
683
699
  if content is None:
684
700
  content = ""
685
- if any(isinstance(item, dict) and "executable_code" in item for item in content):
701
+ if isinstance(content, list) and any(
702
+ isinstance(item, dict) and "executable_code" in item for item in content
703
+ ):
686
704
  warnings.warn(
687
705
  """
688
706
  ⚠️ Warning: Output may vary each run.
@@ -830,7 +848,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
830
848
 
831
849
  from langchain_google_genai import ChatGoogleGenerativeAI
832
850
 
833
- llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash-001")
851
+ llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
834
852
  llm.invoke("Write me a ballad about LangChain")
835
853
 
836
854
  Invoke:
@@ -1040,7 +1058,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
1040
1058
  .. code-block:: python
1041
1059
 
1042
1060
  from google.ai.generativelanguage_v1beta.types import Tool as GenAITool
1043
- llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash-exp")
1061
+ llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
1044
1062
  resp = llm.invoke(
1045
1063
  "When is the next total solar eclipse in US?",
1046
1064
  tools=[GenAITool(google_search={})],
@@ -1098,6 +1116,144 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
1098
1116
 
1099
1117
  'The weather in this image appears to be sunny and pleasant. The sky is a bright blue with scattered white clouds, suggesting fair weather. The lush green grass and trees indicate a warm and possibly slightly breezy day. There are no signs of rain or storms.'
1100
1118
 
1119
+ PDF input:
1120
+ .. code-block:: python
1121
+
1122
+ import base64
1123
+ from langchain_core.messages import HumanMessage
1124
+
1125
+ pdf_bytes = open("/path/to/your/test.pdf", 'rb').read()
1126
+ pdf_base64 = base64.b64encode(pdf_bytes).decode('utf-8')
1127
+
1128
+ message = HumanMessage(
1129
+ content=[
1130
+ {"type": "text", "text": "describe the document in a sentence"},
1131
+ {
1132
+ "type": "file",
1133
+ "source_type": "base64",
1134
+ "mime_type":"application/pdf",
1135
+ "data": pdf_base64
1136
+ }
1137
+ ]
1138
+ )
1139
+ ai_msg = llm.invoke([message])
1140
+ ai_msg.content
1141
+
1142
+ .. code-block:: python
1143
+
1144
+ 'This research paper describes a system developed for SemEval-2025 Task 9, which aims to automate the detection of food hazards from recall reports, addressing the class imbalance problem by leveraging LLM-based data augmentation techniques and transformer-based models to improve performance.'
1145
+
1146
+ Video input:
1147
+ .. code-block:: python
1148
+
1149
+ import base64
1150
+ from langchain_core.messages import HumanMessage
1151
+
1152
+ video_bytes = open("/path/to/your/video.mp4", 'rb').read()
1153
+ video_base64 = base64.b64encode(video_bytes).decode('utf-8')
1154
+
1155
+ message = HumanMessage(
1156
+ content=[
1157
+ {"type": "text", "text": "describe what's in this video in a sentence"},
1158
+ {
1159
+ "type": "file",
1160
+ "source_type": "base64",
1161
+ "mime_type": "video/mp4",
1162
+ "data": video_base64
1163
+ }
1164
+ ]
1165
+ )
1166
+ ai_msg = llm.invoke([message])
1167
+ ai_msg.content
1168
+
1169
+ .. code-block:: python
1170
+
1171
+ 'Tom and Jerry, along with a turkey, engage in a chaotic Thanksgiving-themed adventure involving a corn-on-the-cob chase, maze antics, and a disastrous attempt to prepare a turkey dinner.'
1172
+
1173
+ You can also pass YouTube URLs directly:
1174
+
1175
+ .. code-block:: python
1176
+
1177
+ from langchain_core.messages import HumanMessage
1178
+
1179
+ message = HumanMessage(
1180
+ content=[
1181
+ {"type": "text", "text": "summarize the video in 3 sentences."},
1182
+ {
1183
+ "type": "media",
1184
+ "file_uri": "https://www.youtube.com/watch?v=9hE5-98ZeCg",
1185
+ "mime_type": "video/mp4",
1186
+ }
1187
+ ]
1188
+ )
1189
+ ai_msg = llm.invoke([message])
1190
+ ai_msg.content
1191
+
1192
+ .. code-block:: python
1193
+
1194
+ 'The video is a demo of multimodal live streaming in Gemini 2.0. The narrator is sharing his screen in AI Studio and asks if the AI can see it. The AI then reads text that is highlighted on the screen, defines the word “multimodal,” and summarizes everything that was seen and heard.'
1195
+
1196
+ Audio input:
1197
+ .. code-block:: python
1198
+
1199
+ import base64
1200
+ from langchain_core.messages import HumanMessage
1201
+
1202
+ audio_bytes = open("/path/to/your/audio.mp3", 'rb').read()
1203
+ audio_base64 = base64.b64encode(audio_bytes).decode('utf-8')
1204
+
1205
+ message = HumanMessage(
1206
+ content=[
1207
+ {"type": "text", "text": "summarize this audio in a sentence"},
1208
+ {
1209
+ "type": "file",
1210
+ "source_type": "base64",
1211
+ "mime_type":"audio/mp3",
1212
+ "data": audio_base64
1213
+ }
1214
+ ]
1215
+ )
1216
+ ai_msg = llm.invoke([message])
1217
+ ai_msg.content
1218
+
1219
+ .. code-block:: python
1220
+
1221
+ "In this episode of the Made by Google podcast, Stephen Johnson and Simon Tokumine discuss NotebookLM, a tool designed to help users understand complex material in various modalities, with a focus on its unexpected uses, the development of audio overviews, and the implementation of new features like mind maps and source discovery."
1222
+
1223
+ File upload (URI-based):
1224
+ You can also upload files to Google's servers and reference them by URI.
1225
+ This works for PDFs, images, videos, and audio files.
1226
+
1227
+ .. code-block:: python
1228
+
1229
+ import time
1230
+ from google import genai
1231
+ from langchain_core.messages import HumanMessage
1232
+
1233
+ client = genai.Client()
1234
+
1235
+ myfile = client.files.upload(file="/path/to/your/sample.pdf")
1236
+ while myfile.state.name == "PROCESSING":
1237
+ time.sleep(2)
1238
+ myfile = client.files.get(name=myfile.name)
1239
+
1240
+ message = HumanMessage(
1241
+ content=[
1242
+ {"type": "text", "text": "What is in the document?"},
1243
+ {
1244
+ "type": "media",
1245
+ "file_uri": myfile.uri,
1246
+ "mime_type": "application/pdf",
1247
+ },
1248
+ ]
1249
+ )
1250
+ ai_msg = llm.invoke([message])
1251
+ ai_msg.content
1252
+
1253
+ .. code-block:: python
1254
+
1255
+ "This research paper assesses and mitigates multi-turn jailbreak vulnerabilities in large language models using the Crescendo attack study, evaluating attack success rates and mitigation strategies like prompt hardening and LLM-as-guardrail."
1256
+
1101
1257
  Token usage:
1102
1258
  .. code-block:: python
1103
1259
 
@@ -1127,8 +1283,8 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
1127
1283
 
1128
1284
  client: Any = Field(default=None, exclude=True) #: :meta private:
1129
1285
  async_client_running: Any = Field(default=None, exclude=True) #: :meta private:
1130
- default_metadata: Sequence[Tuple[str, str]] = Field(
1131
- default_factory=list
1286
+ default_metadata: Optional[Sequence[Tuple[str, str]]] = Field(
1287
+ default=None, alias="default_metadata_input"
1132
1288
  ) #: :meta private:
1133
1289
 
1134
1290
  convert_system_message_to_human: bool = False
@@ -1545,7 +1701,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
1545
1701
  )
1546
1702
 
1547
1703
  if run_manager:
1548
- run_manager.on_llm_new_token(gen.text)
1704
+ run_manager.on_llm_new_token(gen.text, chunk=gen)
1549
1705
  yield gen
1550
1706
 
1551
1707
  async def _astream(
@@ -1611,7 +1767,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
1611
1767
  )
1612
1768
 
1613
1769
  if run_manager:
1614
- await run_manager.on_llm_new_token(gen.text)
1770
+ await run_manager.on_llm_new_token(gen.text, chunk=gen)
1615
1771
  yield gen
1616
1772
 
1617
1773
  def _prepare_request(
@@ -1816,7 +1972,8 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
1816
1972
  tools: A list of tool definitions to bind to this chat model.
1817
1973
  Can be a pydantic model, callable, or BaseTool. Pydantic
1818
1974
  models, callables, and BaseTools will be automatically converted to
1819
- their schema dictionary representation.
1975
+ their schema dictionary representation. Tools with Union types in
1976
+ their arguments are now supported and converted to `anyOf` schemas.
1820
1977
  **kwargs: Any additional parameters to pass to the
1821
1978
  :class:`~langchain.runnable.Runnable` constructor.
1822
1979
  """
@@ -40,7 +40,7 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
40
40
 
41
41
  from langchain_google_genai import GoogleGenerativeAIEmbeddings
42
42
 
43
- embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
43
+ embeddings = GoogleGenerativeAIEmbeddings(model="gemini-embedding-001")
44
44
  embeddings.embed_query("What's our Q1 revenue?")
45
45
  """
46
46
 
@@ -49,7 +49,7 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
49
49
  model: str = Field(
50
50
  ...,
51
51
  description="The name of the embedding model to use. "
52
- "Example: ``'models/embedding-001'``",
52
+ "Example: ``'models/gemini-embedding-001'``",
53
53
  )
54
54
  task_type: Optional[str] = Field(
55
55
  default=None,
@@ -29,7 +29,7 @@ class GoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseLLM):
29
29
  .. code-block:: python
30
30
 
31
31
  from langchain_google_genai import GoogleGenerativeAI
32
- llm = GoogleGenerativeAI(model="gemini-pro")
32
+ llm = GoogleGenerativeAI(model="gemini-2.5-pro")
33
33
  """
34
34
 
35
35
  client: Any = None #: :meta private:
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "langchain-google-genai"
3
- version = "2.1.8"
3
+ version = "2.1.10"
4
4
  description = "An integration package connecting Google's genai package and LangChain"
5
5
  authors = []
6
6
  readme = "README.md"
@@ -12,7 +12,7 @@ license = "MIT"
12
12
 
13
13
  [tool.poetry.dependencies]
14
14
  python = ">=3.9,<4.0"
15
- langchain-core = "^0.3.68"
15
+ langchain-core = "^0.3.75"
16
16
  google-ai-generativelanguage = "^0.6.18"
17
17
  pydantic = ">=2,<3"
18
18
  filetype = "^1.2.0"
@@ -23,7 +23,7 @@ optional = true
23
23
  [tool.poetry.group.test.dependencies]
24
24
  pytest = "^7.3.0"
25
25
  freezegun = "^1.2.2"
26
- pytest-mock = "^3.10.0"
26
+ pytest-mock = "^3.14.1"
27
27
  syrupy = "^4.0.2"
28
28
  pytest-watcher = "^0.3.4"
29
29
  pytest-asyncio = "^0.21.1"
@@ -53,11 +53,11 @@ pytest = "^7.3.0"
53
53
  optional = true
54
54
 
55
55
  [tool.poetry.group.lint.dependencies]
56
- ruff = "^0.1.5"
56
+ ruff = "^0.12.10"
57
57
 
58
58
 
59
59
  [tool.poetry.group.typing.dependencies]
60
- mypy = "^1.10"
60
+ mypy = "^1.17.1"
61
61
  types-requests = "^2.31.0"
62
62
  types-google-cloud-ndb = "^2.2.0.1"
63
63
  types-protobuf = "^4.24.0.20240302"