langchain-google-genai 2.1.3__tar.gz → 2.1.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-google-genai might be problematic. Click here for more details.

Files changed (16) hide show
  1. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.5}/PKG-INFO +19 -3
  2. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.5}/README.md +16 -0
  3. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.5}/langchain_google_genai/_common.py +15 -0
  4. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.5}/langchain_google_genai/_function_utils.py +2 -0
  5. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.5}/langchain_google_genai/chat_models.py +122 -22
  6. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.5}/pyproject.toml +5 -4
  7. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.5}/LICENSE +0 -0
  8. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.5}/langchain_google_genai/__init__.py +0 -0
  9. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.5}/langchain_google_genai/_enums.py +0 -0
  10. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.5}/langchain_google_genai/_genai_extension.py +0 -0
  11. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.5}/langchain_google_genai/_image_utils.py +0 -0
  12. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.5}/langchain_google_genai/embeddings.py +0 -0
  13. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.5}/langchain_google_genai/genai_aqa.py +0 -0
  14. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.5}/langchain_google_genai/google_vector_store.py +0 -0
  15. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.5}/langchain_google_genai/llms.py +0 -0
  16. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.5}/langchain_google_genai/py.typed +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langchain-google-genai
3
- Version: 2.1.3
3
+ Version: 2.1.5
4
4
  Summary: An integration package connecting Google's genai package and LangChain
5
5
  Home-page: https://github.com/langchain-ai/langchain-google
6
6
  License: MIT
@@ -12,8 +12,8 @@ Classifier: Programming Language :: Python :: 3.10
12
12
  Classifier: Programming Language :: Python :: 3.11
13
13
  Classifier: Programming Language :: Python :: 3.12
14
14
  Requires-Dist: filetype (>=1.2.0,<2.0.0)
15
- Requires-Dist: google-ai-generativelanguage (>=0.6.16,<0.7.0)
16
- Requires-Dist: langchain-core (>=0.3.52,<0.4.0)
15
+ Requires-Dist: google-ai-generativelanguage (>=0.6.18,<0.7.0)
16
+ Requires-Dist: langchain-core (>=0.3.62,<0.4.0)
17
17
  Requires-Dist: pydantic (>=2,<3)
18
18
  Project-URL: Repository, https://github.com/langchain-ai/langchain-google
19
19
  Project-URL: Source Code, https://github.com/langchain-ai/langchain-google/tree/main/libs/genai
@@ -116,6 +116,22 @@ chain = {"animal": RunnablePassthrough()} | prompt | llm
116
116
  res = chain.invoke("cat")
117
117
  ```
118
118
 
119
+ #### Thinking support
120
+
121
+ Gemini 2.5 Flash model supports reasoning through their thoughts
122
+
123
+ ```
124
+ from langchain_google_genai import ChatGoogleGenerativeAI
125
+
126
+ llm = ChatGoogleGenerativeAI(model="models/gemini-2.5-flash-preview-04-17", thinking_budget=1024)
127
+
128
+ response = llm.invoke(
129
+ "How many O's are in Google? Please tell me how you double checked the result"
130
+ )
131
+
132
+ assert response.usage_metadata["output_token_details"]["reasoning"] > 0
133
+ ```
134
+
119
135
  ## Embeddings
120
136
 
121
137
  This package also adds support for google's embeddings models.
@@ -95,6 +95,22 @@ chain = {"animal": RunnablePassthrough()} | prompt | llm
95
95
  res = chain.invoke("cat")
96
96
  ```
97
97
 
98
+ #### Thinking support
99
+
100
+ Gemini 2.5 Flash model supports reasoning through their thoughts
101
+
102
+ ```
103
+ from langchain_google_genai import ChatGoogleGenerativeAI
104
+
105
+ llm = ChatGoogleGenerativeAI(model="models/gemini-2.5-flash-preview-04-17", thinking_budget=1024)
106
+
107
+ response = llm.invoke(
108
+ "How many O's are in Google? Please tell me how you double checked the result"
109
+ )
110
+
111
+ assert response.usage_metadata["output_token_details"]["reasoning"] > 0
112
+ ```
113
+
98
114
  ## Embeddings
99
115
 
100
116
  This package also adds support for google's embeddings models.
@@ -1,3 +1,4 @@
1
+ import os
1
2
  from importlib import metadata
2
3
  from typing import Any, Dict, List, Optional, Tuple, TypedDict
3
4
 
@@ -7,6 +8,9 @@ from pydantic import BaseModel, Field, SecretStr
7
8
 
8
9
  from langchain_google_genai._enums import HarmBlockThreshold, HarmCategory, Modality
9
10
 
11
+ _TELEMETRY_TAG = "remote_reasoning_engine"
12
+ _TELEMETRY_ENV_VARIABLE_NAME = "GOOGLE_CLOUD_AGENT_ENGINE_ID"
13
+
10
14
 
11
15
  class GoogleGenerativeAIError(Exception):
12
16
  """
@@ -76,6 +80,15 @@ Supported examples:
76
80
  default=None, description=("A list of modalities of the response")
77
81
  )
78
82
 
83
+ thinking_budget: Optional[int] = Field(
84
+ default=None, description="Indicates the thinking budget in tokens."
85
+ )
86
+
87
+ include_thoughts: Optional[bool] = Field(
88
+ default=None,
89
+ description="Indicates whether to include thoughts in the response.",
90
+ )
91
+
79
92
  safety_settings: Optional[Dict[HarmCategory, HarmBlockThreshold]] = None
80
93
  """The default safety settings to use for all generations.
81
94
 
@@ -124,6 +137,8 @@ def get_user_agent(module: Optional[str] = None) -> Tuple[str, str]:
124
137
  client_library_version = (
125
138
  f"{langchain_version}-{module}" if module else langchain_version
126
139
  )
140
+ if os.environ.get(_TELEMETRY_ENV_VARIABLE_NAME):
141
+ client_library_version += f"+{_TELEMETRY_TAG}"
127
142
  return client_library_version, f"langchain-google-genai/{client_library_version}"
128
143
 
129
144
 
@@ -384,6 +384,8 @@ def _get_items_from_schema(schema: Union[Dict, List, str]) -> Dict[str, Any]:
384
384
  items["type_"] = _get_type_from_schema(schema)
385
385
  if items["type_"] == glm.Type.OBJECT and "properties" in schema:
386
386
  items["properties"] = _get_properties_from_schema_any(schema["properties"])
387
+ if items["type_"] == glm.Type.ARRAY and "items" in schema:
388
+ items["items"] = _format_json_schema_to_gapic(schema["items"])
387
389
  if "title" in schema or "description" in schema:
388
390
  items["description"] = (
389
391
  schema.get("description") or schema.get("title") or ""
@@ -78,7 +78,9 @@ from langchain_core.output_parsers.openai_tools import (
78
78
  from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
79
79
  from langchain_core.runnables import Runnable, RunnableConfig, RunnablePassthrough
80
80
  from langchain_core.tools import BaseTool
81
+ from langchain_core.utils import get_pydantic_field_names
81
82
  from langchain_core.utils.function_calling import convert_to_openai_tool
83
+ from langchain_core.utils.utils import _build_model_kwargs
82
84
  from pydantic import (
83
85
  BaseModel,
84
86
  ConfigDict,
@@ -245,6 +247,23 @@ def _is_lc_content_block(part: dict) -> bool:
245
247
  return "type" in part
246
248
 
247
249
 
250
+ def _is_openai_image_block(block: dict) -> bool:
251
+ """Check if the block contains image data in OpenAI Chat Completions format."""
252
+ if block.get("type") == "image_url":
253
+ if (
254
+ (set(block.keys()) <= {"type", "image_url", "detail"})
255
+ and (image_url := block.get("image_url"))
256
+ and isinstance(image_url, dict)
257
+ ):
258
+ url = image_url.get("url")
259
+ if isinstance(url, str):
260
+ return True
261
+ else:
262
+ return False
263
+
264
+ return False
265
+
266
+
248
267
  def _convert_to_parts(
249
268
  raw_content: Union[str, Sequence[Union[str, dict]]],
250
269
  ) -> List[Part]:
@@ -332,14 +351,28 @@ def _convert_to_parts(
332
351
  return parts
333
352
 
334
353
 
335
- def _convert_tool_message_to_part(
354
+ def _convert_tool_message_to_parts(
336
355
  message: ToolMessage | FunctionMessage, name: Optional[str] = None
337
- ) -> Part:
356
+ ) -> list[Part]:
338
357
  """Converts a tool or function message to a google part."""
339
358
  # Legacy agent stores tool name in message.additional_kwargs instead of message.name
340
359
  name = message.name or name or message.additional_kwargs.get("name")
341
360
  response: Any
342
- if not isinstance(message.content, str):
361
+ parts: list[Part] = []
362
+ if isinstance(message.content, list):
363
+ media_blocks = []
364
+ other_blocks = []
365
+ for block in message.content:
366
+ if isinstance(block, dict) and (
367
+ is_data_content_block(block) or _is_openai_image_block(block)
368
+ ):
369
+ media_blocks.append(block)
370
+ else:
371
+ other_blocks.append(block)
372
+ parts.extend(_convert_to_parts(media_blocks))
373
+ response = other_blocks
374
+
375
+ elif not isinstance(message.content, str):
343
376
  response = message.content
344
377
  else:
345
378
  try:
@@ -354,7 +387,8 @@ def _convert_tool_message_to_part(
354
387
  ),
355
388
  )
356
389
  )
357
- return part
390
+ parts.append(part)
391
+ return parts
358
392
 
359
393
 
360
394
  def _get_ai_message_tool_messages_parts(
@@ -372,8 +406,10 @@ def _get_ai_message_tool_messages_parts(
372
406
  break
373
407
  if message.tool_call_id in tool_calls_ids:
374
408
  tool_call = tool_calls_ids[message.tool_call_id]
375
- part = _convert_tool_message_to_part(message, name=tool_call.get("name"))
376
- parts.append(part)
409
+ message_parts = _convert_tool_message_to_parts(
410
+ message, name=tool_call.get("name")
411
+ )
412
+ parts.extend(message_parts)
377
413
  # remove the id from the dict, so that we do not iterate over it again
378
414
  tool_calls_ids.pop(message.tool_call_id)
379
415
  return parts
@@ -440,7 +476,7 @@ def _parse_chat_history(
440
476
  system_instruction = None
441
477
  elif isinstance(message, FunctionMessage):
442
478
  role = "user"
443
- parts = [_convert_tool_message_to_part(message)]
479
+ parts = _convert_tool_message_to_parts(message)
444
480
  else:
445
481
  raise ValueError(
446
482
  f"Unexpected message with type {type(message)} at the position {i}."
@@ -463,12 +499,26 @@ def _parse_response_candidate(
463
499
  try:
464
500
  text: Optional[str] = part.text
465
501
  # Remove erroneous newline character if present
466
- if text is not None:
502
+ if not streaming and text is not None:
467
503
  text = text.rstrip("\n")
468
504
  except AttributeError:
469
505
  text = None
470
506
 
471
- if text is not None:
507
+ if part.thought:
508
+ thinking_message = {
509
+ "type": "thinking",
510
+ "thinking": part.text,
511
+ }
512
+ if not content:
513
+ content = [thinking_message]
514
+ elif isinstance(content, str):
515
+ content = [thinking_message, content]
516
+ elif isinstance(content, list):
517
+ content.append(thinking_message)
518
+ else:
519
+ raise Exception("Unexpected content type")
520
+
521
+ elif text is not None:
472
522
  if not content:
473
523
  content = text
474
524
  elif isinstance(content, str) and text:
@@ -621,14 +671,24 @@ def _response_to_result(
621
671
  input_tokens = response.usage_metadata.prompt_token_count
622
672
  output_tokens = response.usage_metadata.candidates_token_count
623
673
  total_tokens = response.usage_metadata.total_token_count
674
+ thought_tokens = response.usage_metadata.thoughts_token_count
624
675
  cache_read_tokens = response.usage_metadata.cached_content_token_count
625
676
  if input_tokens + output_tokens + cache_read_tokens + total_tokens > 0:
626
- lc_usage = UsageMetadata(
627
- input_tokens=input_tokens - prev_input_tokens,
628
- output_tokens=output_tokens - prev_output_tokens,
629
- total_tokens=total_tokens - prev_total_tokens,
630
- input_token_details={"cache_read": cache_read_tokens},
631
- )
677
+ if thought_tokens > 0:
678
+ lc_usage = UsageMetadata(
679
+ input_tokens=input_tokens - prev_input_tokens,
680
+ output_tokens=output_tokens - prev_output_tokens,
681
+ total_tokens=total_tokens - prev_total_tokens,
682
+ input_token_details={"cache_read": cache_read_tokens},
683
+ output_token_details={"reasoning": thought_tokens},
684
+ )
685
+ else:
686
+ lc_usage = UsageMetadata(
687
+ input_tokens=input_tokens - prev_input_tokens,
688
+ output_tokens=output_tokens - prev_output_tokens,
689
+ total_tokens=total_tokens - prev_total_tokens,
690
+ input_token_details={"cache_read": cache_read_tokens},
691
+ )
632
692
  else:
633
693
  lc_usage = None
634
694
  except AttributeError:
@@ -646,6 +706,13 @@ def _response_to_result(
646
706
  proto.Message.to_dict(safety_rating, use_integers_for_enums=False)
647
707
  for safety_rating in candidate.safety_ratings
648
708
  ]
709
+ try:
710
+ if candidate.grounding_metadata:
711
+ generation_info["grounding_metadata"] = proto.Message.to_dict(
712
+ candidate.grounding_metadata
713
+ )
714
+ except AttributeError:
715
+ pass
649
716
  message = _parse_response_candidate(candidate, streaming=stream)
650
717
  message.usage_metadata = lc_usage
651
718
  if stream:
@@ -700,7 +767,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
700
767
 
701
768
  from langchain_google_genai import ChatGoogleGenerativeAI
702
769
 
703
- llm = ChatGoogleGenerativeAI(model="gemini-1.5-pro")
770
+ llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash-001")
704
771
  llm.invoke("Write me a ballad about LangChain")
705
772
 
706
773
  Invoke:
@@ -785,7 +852,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
785
852
  file = client.files.get(name=file.name)
786
853
 
787
854
  # Create cache
788
- model = 'models/gemini-1.5-flash-001'
855
+ model = 'models/gemini-1.5-flash-latest'
789
856
  cache = client.caches.create(
790
857
  model=model,
791
858
  config=types.CreateCachedContentConfig(
@@ -841,7 +908,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
841
908
  ],
842
909
  )
843
910
  ]
844
- model = "gemini-1.5-flash-001"
911
+ model = "gemini-1.5-flash-latest"
845
912
  cache = client.caches.create(
846
913
  model=model,
847
914
  config=CreateCachedContentConfig(
@@ -1015,11 +1082,14 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
1015
1082
  ``cachedContents/{cachedContent}``.
1016
1083
  """
1017
1084
 
1085
+ model_kwargs: dict[str, Any] = Field(default_factory=dict)
1086
+ """Holds any unexpected initialization parameters."""
1087
+
1018
1088
  def __init__(self, **kwargs: Any) -> None:
1019
1089
  """Needed for arg validation."""
1020
1090
  # Get all valid field names, including aliases
1021
1091
  valid_fields = set()
1022
- for field_name, field_info in self.model_fields.items():
1092
+ for field_name, field_info in self.__class__.model_fields.items():
1023
1093
  valid_fields.add(field_name)
1024
1094
  if hasattr(field_info, "alias") and field_info.alias is not None:
1025
1095
  valid_fields.add(field_info.alias)
@@ -1061,6 +1131,14 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
1061
1131
  def is_lc_serializable(self) -> bool:
1062
1132
  return True
1063
1133
 
1134
+ @model_validator(mode="before")
1135
+ @classmethod
1136
+ def build_extra(cls, values: dict[str, Any]) -> Any:
1137
+ """Build extra kwargs from additional params that were passed in."""
1138
+ all_required_field_names = get_pydantic_field_names(cls)
1139
+ values = _build_model_kwargs(values, all_required_field_names)
1140
+ return values
1141
+
1064
1142
  @model_validator(mode="after")
1065
1143
  def validate_environment(self) -> Self:
1066
1144
  """Validates params and passes them to google-generativeai package."""
@@ -1080,7 +1158,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
1080
1158
 
1081
1159
  additional_headers = self.additional_headers or {}
1082
1160
  self.default_metadata = tuple(additional_headers.items())
1083
- client_info = get_client_info("ChatGoogleGenerativeAI")
1161
+ client_info = get_client_info(f"ChatGoogleGenerativeAI:{self.model}")
1084
1162
  google_api_key = None
1085
1163
  if not self.credentials:
1086
1164
  if isinstance(self.google_api_key, SecretStr):
@@ -1120,7 +1198,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
1120
1198
  self.async_client_running = genaix.build_generative_async_service(
1121
1199
  credentials=self.credentials,
1122
1200
  api_key=google_api_key,
1123
- client_info=get_client_info("ChatGoogleGenerativeAI"),
1201
+ client_info=get_client_info(f"ChatGoogleGenerativeAI:{self.model}"),
1124
1202
  client_options=self.client_options,
1125
1203
  transport=transport,
1126
1204
  )
@@ -1136,6 +1214,8 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
1136
1214
  "n": self.n,
1137
1215
  "safety_settings": self.safety_settings,
1138
1216
  "response_modalities": self.response_modalities,
1217
+ "thinking_budget": self.thinking_budget,
1218
+ "include_thoughts": self.include_thoughts,
1139
1219
  }
1140
1220
 
1141
1221
  def invoke(
@@ -1179,9 +1259,15 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
1179
1259
  ) -> LangSmithParams:
1180
1260
  """Get standard params for tracing."""
1181
1261
  params = self._get_invocation_params(stop=stop, **kwargs)
1262
+ models_prefix = "models/"
1263
+ ls_model_name = (
1264
+ self.model[len(models_prefix) :]
1265
+ if self.model and self.model.startswith(models_prefix)
1266
+ else self.model
1267
+ )
1182
1268
  ls_params = LangSmithParams(
1183
1269
  ls_provider="google_genai",
1184
- ls_model_name=self.model,
1270
+ ls_model_name=ls_model_name,
1185
1271
  ls_model_type="chat",
1186
1272
  ls_temperature=params.get("temperature", self.temperature),
1187
1273
  )
@@ -1206,6 +1292,20 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
1206
1292
  "top_k": self.top_k,
1207
1293
  "top_p": self.top_p,
1208
1294
  "response_modalities": self.response_modalities,
1295
+ "thinking_config": (
1296
+ (
1297
+ {"thinking_budget": self.thinking_budget}
1298
+ if self.thinking_budget is not None
1299
+ else {}
1300
+ )
1301
+ | (
1302
+ {"include_thoughts": self.include_thoughts}
1303
+ if self.include_thoughts is not None
1304
+ else {}
1305
+ )
1306
+ )
1307
+ if self.thinking_budget is not None or self.include_thoughts is not None
1308
+ else None,
1209
1309
  }.items()
1210
1310
  if v is not None
1211
1311
  }
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "langchain-google-genai"
3
- version = "2.1.3"
3
+ version = "2.1.5"
4
4
  description = "An integration package connecting Google's genai package and LangChain"
5
5
  authors = []
6
6
  readme = "README.md"
@@ -12,8 +12,8 @@ license = "MIT"
12
12
 
13
13
  [tool.poetry.dependencies]
14
14
  python = ">=3.9,<4.0"
15
- langchain-core = "^0.3.52"
16
- google-ai-generativelanguage = "^0.6.16"
15
+ langchain-core = "^0.3.62"
16
+ google-ai-generativelanguage = "^0.6.18"
17
17
  pydantic = ">=2,<3"
18
18
  filetype = "^1.2.0"
19
19
 
@@ -27,8 +27,9 @@ pytest-mock = "^3.10.0"
27
27
  syrupy = "^4.0.2"
28
28
  pytest-watcher = "^0.3.4"
29
29
  pytest-asyncio = "^0.21.1"
30
+ pytest-retry = "^1.7.0"
30
31
  numpy = ">=1.26.2"
31
- langchain-tests = "0.3.18"
32
+ langchain-tests = "0.3.19"
32
33
 
33
34
  [tool.codespell]
34
35
  ignore-words-list = "rouge"