langchain-google-genai 2.1.3__tar.gz → 2.1.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-google-genai might be problematic. Click here for more details.

Files changed (16) hide show
  1. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.4}/PKG-INFO +18 -2
  2. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.4}/README.md +16 -0
  3. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.4}/langchain_google_genai/_common.py +10 -0
  4. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.4}/langchain_google_genai/_function_utils.py +2 -0
  5. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.4}/langchain_google_genai/chat_models.py +43 -10
  6. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.4}/pyproject.toml +3 -3
  7. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.4}/LICENSE +0 -0
  8. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.4}/langchain_google_genai/__init__.py +0 -0
  9. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.4}/langchain_google_genai/_enums.py +0 -0
  10. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.4}/langchain_google_genai/_genai_extension.py +0 -0
  11. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.4}/langchain_google_genai/_image_utils.py +0 -0
  12. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.4}/langchain_google_genai/embeddings.py +0 -0
  13. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.4}/langchain_google_genai/genai_aqa.py +0 -0
  14. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.4}/langchain_google_genai/google_vector_store.py +0 -0
  15. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.4}/langchain_google_genai/llms.py +0 -0
  16. {langchain_google_genai-2.1.3 → langchain_google_genai-2.1.4}/langchain_google_genai/py.typed +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langchain-google-genai
3
- Version: 2.1.3
3
+ Version: 2.1.4
4
4
  Summary: An integration package connecting Google's genai package and LangChain
5
5
  Home-page: https://github.com/langchain-ai/langchain-google
6
6
  License: MIT
@@ -12,7 +12,7 @@ Classifier: Programming Language :: Python :: 3.10
12
12
  Classifier: Programming Language :: Python :: 3.11
13
13
  Classifier: Programming Language :: Python :: 3.12
14
14
  Requires-Dist: filetype (>=1.2.0,<2.0.0)
15
- Requires-Dist: google-ai-generativelanguage (>=0.6.16,<0.7.0)
15
+ Requires-Dist: google-ai-generativelanguage (>=0.6.18,<0.7.0)
16
16
  Requires-Dist: langchain-core (>=0.3.52,<0.4.0)
17
17
  Requires-Dist: pydantic (>=2,<3)
18
18
  Project-URL: Repository, https://github.com/langchain-ai/langchain-google
@@ -116,6 +116,22 @@ chain = {"animal": RunnablePassthrough()} | prompt | llm
116
116
  res = chain.invoke("cat")
117
117
  ```
118
118
 
119
+ #### Thinking support
120
+
121
+ Gemini 2.5 Flash model supports reasoning through their thoughts
122
+
123
+ ```
124
+ from langchain_google_genai import ChatGoogleGenerativeAI
125
+
126
+ llm = ChatGoogleGenerativeAI(model="models/gemini-2.5-flash-preview-04-17", thinking_budget=1024)
127
+
128
+ response = llm.invoke(
129
+ "How many O's are in Google? Please tell me how you double checked the result"
130
+ )
131
+
132
+ assert response.usage_metadata["output_token_details"]["reasoning"] > 0
133
+ ```
134
+
119
135
  ## Embeddings
120
136
 
121
137
  This package also adds support for google's embeddings models.
@@ -95,6 +95,22 @@ chain = {"animal": RunnablePassthrough()} | prompt | llm
95
95
  res = chain.invoke("cat")
96
96
  ```
97
97
 
98
+ #### Thinking support
99
+
100
+ Gemini 2.5 Flash model supports reasoning through their thoughts
101
+
102
+ ```
103
+ from langchain_google_genai import ChatGoogleGenerativeAI
104
+
105
+ llm = ChatGoogleGenerativeAI(model="models/gemini-2.5-flash-preview-04-17", thinking_budget=1024)
106
+
107
+ response = llm.invoke(
108
+ "How many O's are in Google? Please tell me how you double checked the result"
109
+ )
110
+
111
+ assert response.usage_metadata["output_token_details"]["reasoning"] > 0
112
+ ```
113
+
98
114
  ## Embeddings
99
115
 
100
116
  This package also adds support for google's embeddings models.
@@ -1,3 +1,4 @@
1
+ import os
1
2
  from importlib import metadata
2
3
  from typing import Any, Dict, List, Optional, Tuple, TypedDict
3
4
 
@@ -7,6 +8,9 @@ from pydantic import BaseModel, Field, SecretStr
7
8
 
8
9
  from langchain_google_genai._enums import HarmBlockThreshold, HarmCategory, Modality
9
10
 
11
+ _TELEMETRY_TAG = "remote_reasoning_engine"
12
+ _TELEMETRY_ENV_VARIABLE_NAME = "GOOGLE_CLOUD_AGENT_ENGINE_ID"
13
+
10
14
 
11
15
  class GoogleGenerativeAIError(Exception):
12
16
  """
@@ -76,6 +80,10 @@ Supported examples:
76
80
  default=None, description=("A list of modalities of the response")
77
81
  )
78
82
 
83
+ thinking_budget: Optional[int] = Field(
84
+ default=None, description="Indicates the thinking budget in tokens."
85
+ )
86
+
79
87
  safety_settings: Optional[Dict[HarmCategory, HarmBlockThreshold]] = None
80
88
  """The default safety settings to use for all generations.
81
89
 
@@ -124,6 +132,8 @@ def get_user_agent(module: Optional[str] = None) -> Tuple[str, str]:
124
132
  client_library_version = (
125
133
  f"{langchain_version}-{module}" if module else langchain_version
126
134
  )
135
+ if os.environ.get(_TELEMETRY_ENV_VARIABLE_NAME):
136
+ client_library_version += f"+{_TELEMETRY_TAG}"
127
137
  return client_library_version, f"langchain-google-genai/{client_library_version}"
128
138
 
129
139
 
@@ -384,6 +384,8 @@ def _get_items_from_schema(schema: Union[Dict, List, str]) -> Dict[str, Any]:
384
384
  items["type_"] = _get_type_from_schema(schema)
385
385
  if items["type_"] == glm.Type.OBJECT and "properties" in schema:
386
386
  items["properties"] = _get_properties_from_schema_any(schema["properties"])
387
+ if items["type_"] == glm.Type.ARRAY and "items" in schema:
388
+ items["items"] = _format_json_schema_to_gapic(schema["items"])
387
389
  if "title" in schema or "description" in schema:
388
390
  items["description"] = (
389
391
  schema.get("description") or schema.get("title") or ""
@@ -78,7 +78,9 @@ from langchain_core.output_parsers.openai_tools import (
78
78
  from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
79
79
  from langchain_core.runnables import Runnable, RunnableConfig, RunnablePassthrough
80
80
  from langchain_core.tools import BaseTool
81
+ from langchain_core.utils import get_pydantic_field_names
81
82
  from langchain_core.utils.function_calling import convert_to_openai_tool
83
+ from langchain_core.utils.utils import _build_model_kwargs
82
84
  from pydantic import (
83
85
  BaseModel,
84
86
  ConfigDict,
@@ -463,7 +465,7 @@ def _parse_response_candidate(
463
465
  try:
464
466
  text: Optional[str] = part.text
465
467
  # Remove erroneous newline character if present
466
- if text is not None:
468
+ if not streaming and text is not None:
467
469
  text = text.rstrip("\n")
468
470
  except AttributeError:
469
471
  text = None
@@ -621,14 +623,24 @@ def _response_to_result(
621
623
  input_tokens = response.usage_metadata.prompt_token_count
622
624
  output_tokens = response.usage_metadata.candidates_token_count
623
625
  total_tokens = response.usage_metadata.total_token_count
626
+ thought_tokens = response.usage_metadata.thoughts_token_count
624
627
  cache_read_tokens = response.usage_metadata.cached_content_token_count
625
628
  if input_tokens + output_tokens + cache_read_tokens + total_tokens > 0:
626
- lc_usage = UsageMetadata(
627
- input_tokens=input_tokens - prev_input_tokens,
628
- output_tokens=output_tokens - prev_output_tokens,
629
- total_tokens=total_tokens - prev_total_tokens,
630
- input_token_details={"cache_read": cache_read_tokens},
631
- )
629
+ if thought_tokens > 0:
630
+ lc_usage = UsageMetadata(
631
+ input_tokens=input_tokens - prev_input_tokens,
632
+ output_tokens=output_tokens - prev_output_tokens,
633
+ total_tokens=total_tokens - prev_total_tokens,
634
+ input_token_details={"cache_read": cache_read_tokens},
635
+ output_token_details={"reasoning": thought_tokens},
636
+ )
637
+ else:
638
+ lc_usage = UsageMetadata(
639
+ input_tokens=input_tokens - prev_input_tokens,
640
+ output_tokens=output_tokens - prev_output_tokens,
641
+ total_tokens=total_tokens - prev_total_tokens,
642
+ input_token_details={"cache_read": cache_read_tokens},
643
+ )
632
644
  else:
633
645
  lc_usage = None
634
646
  except AttributeError:
@@ -1015,6 +1027,9 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
1015
1027
  ``cachedContents/{cachedContent}``.
1016
1028
  """
1017
1029
 
1030
+ model_kwargs: dict[str, Any] = Field(default_factory=dict)
1031
+ """Holds any unexpected initialization parameters."""
1032
+
1018
1033
  def __init__(self, **kwargs: Any) -> None:
1019
1034
  """Needed for arg validation."""
1020
1035
  # Get all valid field names, including aliases
@@ -1061,6 +1076,14 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
1061
1076
  def is_lc_serializable(self) -> bool:
1062
1077
  return True
1063
1078
 
1079
+ @model_validator(mode="before")
1080
+ @classmethod
1081
+ def build_extra(cls, values: dict[str, Any]) -> Any:
1082
+ """Build extra kwargs from additional params that were passed in."""
1083
+ all_required_field_names = get_pydantic_field_names(cls)
1084
+ values = _build_model_kwargs(values, all_required_field_names)
1085
+ return values
1086
+
1064
1087
  @model_validator(mode="after")
1065
1088
  def validate_environment(self) -> Self:
1066
1089
  """Validates params and passes them to google-generativeai package."""
@@ -1080,7 +1103,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
1080
1103
 
1081
1104
  additional_headers = self.additional_headers or {}
1082
1105
  self.default_metadata = tuple(additional_headers.items())
1083
- client_info = get_client_info("ChatGoogleGenerativeAI")
1106
+ client_info = get_client_info(f"ChatGoogleGenerativeAI:{self.model}")
1084
1107
  google_api_key = None
1085
1108
  if not self.credentials:
1086
1109
  if isinstance(self.google_api_key, SecretStr):
@@ -1120,7 +1143,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
1120
1143
  self.async_client_running = genaix.build_generative_async_service(
1121
1144
  credentials=self.credentials,
1122
1145
  api_key=google_api_key,
1123
- client_info=get_client_info("ChatGoogleGenerativeAI"),
1146
+ client_info=get_client_info(f"ChatGoogleGenerativeAI:{self.model}"),
1124
1147
  client_options=self.client_options,
1125
1148
  transport=transport,
1126
1149
  )
@@ -1136,6 +1159,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
1136
1159
  "n": self.n,
1137
1160
  "safety_settings": self.safety_settings,
1138
1161
  "response_modalities": self.response_modalities,
1162
+ "thinking_budget": self.thinking_budget,
1139
1163
  }
1140
1164
 
1141
1165
  def invoke(
@@ -1179,9 +1203,15 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
1179
1203
  ) -> LangSmithParams:
1180
1204
  """Get standard params for tracing."""
1181
1205
  params = self._get_invocation_params(stop=stop, **kwargs)
1206
+ models_prefix = "models/"
1207
+ ls_model_name = (
1208
+ self.model[len(models_prefix) :]
1209
+ if self.model and self.model.startswith(models_prefix)
1210
+ else self.model
1211
+ )
1182
1212
  ls_params = LangSmithParams(
1183
1213
  ls_provider="google_genai",
1184
- ls_model_name=self.model,
1214
+ ls_model_name=ls_model_name,
1185
1215
  ls_model_type="chat",
1186
1216
  ls_temperature=params.get("temperature", self.temperature),
1187
1217
  )
@@ -1206,6 +1236,9 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
1206
1236
  "top_k": self.top_k,
1207
1237
  "top_p": self.top_p,
1208
1238
  "response_modalities": self.response_modalities,
1239
+ "thinking_config": {"thinking_budget": self.thinking_budget}
1240
+ if self.thinking_budget is not None
1241
+ else None,
1209
1242
  }.items()
1210
1243
  if v is not None
1211
1244
  }
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "langchain-google-genai"
3
- version = "2.1.3"
3
+ version = "2.1.4"
4
4
  description = "An integration package connecting Google's genai package and LangChain"
5
5
  authors = []
6
6
  readme = "README.md"
@@ -13,7 +13,7 @@ license = "MIT"
13
13
  [tool.poetry.dependencies]
14
14
  python = ">=3.9,<4.0"
15
15
  langchain-core = "^0.3.52"
16
- google-ai-generativelanguage = "^0.6.16"
16
+ google-ai-generativelanguage = "^0.6.18"
17
17
  pydantic = ">=2,<3"
18
18
  filetype = "^1.2.0"
19
19
 
@@ -28,7 +28,7 @@ syrupy = "^4.0.2"
28
28
  pytest-watcher = "^0.3.4"
29
29
  pytest-asyncio = "^0.21.1"
30
30
  numpy = ">=1.26.2"
31
- langchain-tests = "0.3.18"
31
+ langchain-tests = "0.3.19"
32
32
 
33
33
  [tool.codespell]
34
34
  ignore-words-list = "rouge"