langchain-google-genai 2.1.4__tar.gz → 2.1.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-google-genai might be problematic. Click here for more details.

Files changed (16) hide show
  1. {langchain_google_genai-2.1.4 → langchain_google_genai-2.1.5}/PKG-INFO +2 -2
  2. {langchain_google_genai-2.1.4 → langchain_google_genai-2.1.5}/langchain_google_genai/_common.py +5 -0
  3. {langchain_google_genai-2.1.4 → langchain_google_genai-2.1.5}/langchain_google_genai/chat_models.py +81 -14
  4. {langchain_google_genai-2.1.4 → langchain_google_genai-2.1.5}/pyproject.toml +3 -2
  5. {langchain_google_genai-2.1.4 → langchain_google_genai-2.1.5}/LICENSE +0 -0
  6. {langchain_google_genai-2.1.4 → langchain_google_genai-2.1.5}/README.md +0 -0
  7. {langchain_google_genai-2.1.4 → langchain_google_genai-2.1.5}/langchain_google_genai/__init__.py +0 -0
  8. {langchain_google_genai-2.1.4 → langchain_google_genai-2.1.5}/langchain_google_genai/_enums.py +0 -0
  9. {langchain_google_genai-2.1.4 → langchain_google_genai-2.1.5}/langchain_google_genai/_function_utils.py +0 -0
  10. {langchain_google_genai-2.1.4 → langchain_google_genai-2.1.5}/langchain_google_genai/_genai_extension.py +0 -0
  11. {langchain_google_genai-2.1.4 → langchain_google_genai-2.1.5}/langchain_google_genai/_image_utils.py +0 -0
  12. {langchain_google_genai-2.1.4 → langchain_google_genai-2.1.5}/langchain_google_genai/embeddings.py +0 -0
  13. {langchain_google_genai-2.1.4 → langchain_google_genai-2.1.5}/langchain_google_genai/genai_aqa.py +0 -0
  14. {langchain_google_genai-2.1.4 → langchain_google_genai-2.1.5}/langchain_google_genai/google_vector_store.py +0 -0
  15. {langchain_google_genai-2.1.4 → langchain_google_genai-2.1.5}/langchain_google_genai/llms.py +0 -0
  16. {langchain_google_genai-2.1.4 → langchain_google_genai-2.1.5}/langchain_google_genai/py.typed +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langchain-google-genai
3
- Version: 2.1.4
3
+ Version: 2.1.5
4
4
  Summary: An integration package connecting Google's genai package and LangChain
5
5
  Home-page: https://github.com/langchain-ai/langchain-google
6
6
  License: MIT
@@ -13,7 +13,7 @@ Classifier: Programming Language :: Python :: 3.11
13
13
  Classifier: Programming Language :: Python :: 3.12
14
14
  Requires-Dist: filetype (>=1.2.0,<2.0.0)
15
15
  Requires-Dist: google-ai-generativelanguage (>=0.6.18,<0.7.0)
16
- Requires-Dist: langchain-core (>=0.3.52,<0.4.0)
16
+ Requires-Dist: langchain-core (>=0.3.62,<0.4.0)
17
17
  Requires-Dist: pydantic (>=2,<3)
18
18
  Project-URL: Repository, https://github.com/langchain-ai/langchain-google
19
19
  Project-URL: Source Code, https://github.com/langchain-ai/langchain-google/tree/main/libs/genai
@@ -84,6 +84,11 @@ Supported examples:
84
84
  default=None, description="Indicates the thinking budget in tokens."
85
85
  )
86
86
 
87
+ include_thoughts: Optional[bool] = Field(
88
+ default=None,
89
+ description="Indicates whether to include thoughts in the response.",
90
+ )
91
+
87
92
  safety_settings: Optional[Dict[HarmCategory, HarmBlockThreshold]] = None
88
93
  """The default safety settings to use for all generations.
89
94
 
@@ -247,6 +247,23 @@ def _is_lc_content_block(part: dict) -> bool:
247
247
  return "type" in part
248
248
 
249
249
 
250
+ def _is_openai_image_block(block: dict) -> bool:
251
+ """Check if the block contains image data in OpenAI Chat Completions format."""
252
+ if block.get("type") == "image_url":
253
+ if (
254
+ (set(block.keys()) <= {"type", "image_url", "detail"})
255
+ and (image_url := block.get("image_url"))
256
+ and isinstance(image_url, dict)
257
+ ):
258
+ url = image_url.get("url")
259
+ if isinstance(url, str):
260
+ return True
261
+ else:
262
+ return False
263
+
264
+ return False
265
+
266
+
250
267
  def _convert_to_parts(
251
268
  raw_content: Union[str, Sequence[Union[str, dict]]],
252
269
  ) -> List[Part]:
@@ -334,14 +351,28 @@ def _convert_to_parts(
334
351
  return parts
335
352
 
336
353
 
337
- def _convert_tool_message_to_part(
354
+ def _convert_tool_message_to_parts(
338
355
  message: ToolMessage | FunctionMessage, name: Optional[str] = None
339
- ) -> Part:
356
+ ) -> list[Part]:
340
357
  """Converts a tool or function message to a google part."""
341
358
  # Legacy agent stores tool name in message.additional_kwargs instead of message.name
342
359
  name = message.name or name or message.additional_kwargs.get("name")
343
360
  response: Any
344
- if not isinstance(message.content, str):
361
+ parts: list[Part] = []
362
+ if isinstance(message.content, list):
363
+ media_blocks = []
364
+ other_blocks = []
365
+ for block in message.content:
366
+ if isinstance(block, dict) and (
367
+ is_data_content_block(block) or _is_openai_image_block(block)
368
+ ):
369
+ media_blocks.append(block)
370
+ else:
371
+ other_blocks.append(block)
372
+ parts.extend(_convert_to_parts(media_blocks))
373
+ response = other_blocks
374
+
375
+ elif not isinstance(message.content, str):
345
376
  response = message.content
346
377
  else:
347
378
  try:
@@ -356,7 +387,8 @@ def _convert_tool_message_to_part(
356
387
  ),
357
388
  )
358
389
  )
359
- return part
390
+ parts.append(part)
391
+ return parts
360
392
 
361
393
 
362
394
  def _get_ai_message_tool_messages_parts(
@@ -374,8 +406,10 @@ def _get_ai_message_tool_messages_parts(
374
406
  break
375
407
  if message.tool_call_id in tool_calls_ids:
376
408
  tool_call = tool_calls_ids[message.tool_call_id]
377
- part = _convert_tool_message_to_part(message, name=tool_call.get("name"))
378
- parts.append(part)
409
+ message_parts = _convert_tool_message_to_parts(
410
+ message, name=tool_call.get("name")
411
+ )
412
+ parts.extend(message_parts)
379
413
  # remove the id from the dict, so that we do not iterate over it again
380
414
  tool_calls_ids.pop(message.tool_call_id)
381
415
  return parts
@@ -442,7 +476,7 @@ def _parse_chat_history(
442
476
  system_instruction = None
443
477
  elif isinstance(message, FunctionMessage):
444
478
  role = "user"
445
- parts = [_convert_tool_message_to_part(message)]
479
+ parts = _convert_tool_message_to_parts(message)
446
480
  else:
447
481
  raise ValueError(
448
482
  f"Unexpected message with type {type(message)} at the position {i}."
@@ -470,7 +504,21 @@ def _parse_response_candidate(
470
504
  except AttributeError:
471
505
  text = None
472
506
 
473
- if text is not None:
507
+ if part.thought:
508
+ thinking_message = {
509
+ "type": "thinking",
510
+ "thinking": part.text,
511
+ }
512
+ if not content:
513
+ content = [thinking_message]
514
+ elif isinstance(content, str):
515
+ content = [thinking_message, content]
516
+ elif isinstance(content, list):
517
+ content.append(thinking_message)
518
+ else:
519
+ raise Exception("Unexpected content type")
520
+
521
+ elif text is not None:
474
522
  if not content:
475
523
  content = text
476
524
  elif isinstance(content, str) and text:
@@ -658,6 +706,13 @@ def _response_to_result(
658
706
  proto.Message.to_dict(safety_rating, use_integers_for_enums=False)
659
707
  for safety_rating in candidate.safety_ratings
660
708
  ]
709
+ try:
710
+ if candidate.grounding_metadata:
711
+ generation_info["grounding_metadata"] = proto.Message.to_dict(
712
+ candidate.grounding_metadata
713
+ )
714
+ except AttributeError:
715
+ pass
661
716
  message = _parse_response_candidate(candidate, streaming=stream)
662
717
  message.usage_metadata = lc_usage
663
718
  if stream:
@@ -712,7 +767,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
712
767
 
713
768
  from langchain_google_genai import ChatGoogleGenerativeAI
714
769
 
715
- llm = ChatGoogleGenerativeAI(model="gemini-1.5-pro")
770
+ llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash-001")
716
771
  llm.invoke("Write me a ballad about LangChain")
717
772
 
718
773
  Invoke:
@@ -797,7 +852,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
797
852
  file = client.files.get(name=file.name)
798
853
 
799
854
  # Create cache
800
- model = 'models/gemini-1.5-flash-001'
855
+ model = 'models/gemini-1.5-flash-latest'
801
856
  cache = client.caches.create(
802
857
  model=model,
803
858
  config=types.CreateCachedContentConfig(
@@ -853,7 +908,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
853
908
  ],
854
909
  )
855
910
  ]
856
- model = "gemini-1.5-flash-001"
911
+ model = "gemini-1.5-flash-latest"
857
912
  cache = client.caches.create(
858
913
  model=model,
859
914
  config=CreateCachedContentConfig(
@@ -1034,7 +1089,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
1034
1089
  """Needed for arg validation."""
1035
1090
  # Get all valid field names, including aliases
1036
1091
  valid_fields = set()
1037
- for field_name, field_info in self.model_fields.items():
1092
+ for field_name, field_info in self.__class__.model_fields.items():
1038
1093
  valid_fields.add(field_name)
1039
1094
  if hasattr(field_info, "alias") and field_info.alias is not None:
1040
1095
  valid_fields.add(field_info.alias)
@@ -1160,6 +1215,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
1160
1215
  "safety_settings": self.safety_settings,
1161
1216
  "response_modalities": self.response_modalities,
1162
1217
  "thinking_budget": self.thinking_budget,
1218
+ "include_thoughts": self.include_thoughts,
1163
1219
  }
1164
1220
 
1165
1221
  def invoke(
@@ -1236,8 +1292,19 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
1236
1292
  "top_k": self.top_k,
1237
1293
  "top_p": self.top_p,
1238
1294
  "response_modalities": self.response_modalities,
1239
- "thinking_config": {"thinking_budget": self.thinking_budget}
1240
- if self.thinking_budget is not None
1295
+ "thinking_config": (
1296
+ (
1297
+ {"thinking_budget": self.thinking_budget}
1298
+ if self.thinking_budget is not None
1299
+ else {}
1300
+ )
1301
+ | (
1302
+ {"include_thoughts": self.include_thoughts}
1303
+ if self.include_thoughts is not None
1304
+ else {}
1305
+ )
1306
+ )
1307
+ if self.thinking_budget is not None or self.include_thoughts is not None
1241
1308
  else None,
1242
1309
  }.items()
1243
1310
  if v is not None
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "langchain-google-genai"
3
- version = "2.1.4"
3
+ version = "2.1.5"
4
4
  description = "An integration package connecting Google's genai package and LangChain"
5
5
  authors = []
6
6
  readme = "README.md"
@@ -12,7 +12,7 @@ license = "MIT"
12
12
 
13
13
  [tool.poetry.dependencies]
14
14
  python = ">=3.9,<4.0"
15
- langchain-core = "^0.3.52"
15
+ langchain-core = "^0.3.62"
16
16
  google-ai-generativelanguage = "^0.6.18"
17
17
  pydantic = ">=2,<3"
18
18
  filetype = "^1.2.0"
@@ -27,6 +27,7 @@ pytest-mock = "^3.10.0"
27
27
  syrupy = "^4.0.2"
28
28
  pytest-watcher = "^0.3.4"
29
29
  pytest-asyncio = "^0.21.1"
30
+ pytest-retry = "^1.7.0"
30
31
  numpy = ">=1.26.2"
31
32
  langchain-tests = "0.3.19"
32
33