langchain-google-genai 2.1.0__tar.gz → 2.1.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-google-genai might be problematic. Click here for more details.

Files changed (16) hide show
  1. {langchain_google_genai-2.1.0 → langchain_google_genai-2.1.2}/PKG-INFO +39 -2
  2. {langchain_google_genai-2.1.0 → langchain_google_genai-2.1.2}/README.md +37 -0
  3. {langchain_google_genai-2.1.0 → langchain_google_genai-2.1.2}/langchain_google_genai/__init__.py +2 -1
  4. {langchain_google_genai-2.1.0 → langchain_google_genai-2.1.2}/langchain_google_genai/_common.py +5 -2
  5. {langchain_google_genai-2.1.0 → langchain_google_genai-2.1.2}/langchain_google_genai/_enums.py +2 -1
  6. {langchain_google_genai-2.1.0 → langchain_google_genai-2.1.2}/langchain_google_genai/_function_utils.py +54 -20
  7. {langchain_google_genai-2.1.0 → langchain_google_genai-2.1.2}/langchain_google_genai/chat_models.py +154 -12
  8. {langchain_google_genai-2.1.0 → langchain_google_genai-2.1.2}/pyproject.toml +3 -3
  9. {langchain_google_genai-2.1.0 → langchain_google_genai-2.1.2}/LICENSE +0 -0
  10. {langchain_google_genai-2.1.0 → langchain_google_genai-2.1.2}/langchain_google_genai/_genai_extension.py +0 -0
  11. {langchain_google_genai-2.1.0 → langchain_google_genai-2.1.2}/langchain_google_genai/_image_utils.py +0 -0
  12. {langchain_google_genai-2.1.0 → langchain_google_genai-2.1.2}/langchain_google_genai/embeddings.py +0 -0
  13. {langchain_google_genai-2.1.0 → langchain_google_genai-2.1.2}/langchain_google_genai/genai_aqa.py +0 -0
  14. {langchain_google_genai-2.1.0 → langchain_google_genai-2.1.2}/langchain_google_genai/google_vector_store.py +0 -0
  15. {langchain_google_genai-2.1.0 → langchain_google_genai-2.1.2}/langchain_google_genai/llms.py +0 -0
  16. {langchain_google_genai-2.1.0 → langchain_google_genai-2.1.2}/langchain_google_genai/py.typed +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langchain-google-genai
3
- Version: 2.1.0
3
+ Version: 2.1.2
4
4
  Summary: An integration package connecting Google's genai package and LangChain
5
5
  Home-page: https://github.com/langchain-ai/langchain-google
6
6
  License: MIT
@@ -13,7 +13,7 @@ Classifier: Programming Language :: Python :: 3.11
13
13
  Classifier: Programming Language :: Python :: 3.12
14
14
  Requires-Dist: filetype (>=1.2.0,<2.0.0)
15
15
  Requires-Dist: google-ai-generativelanguage (>=0.6.16,<0.7.0)
16
- Requires-Dist: langchain-core (>=0.3.43,<0.4.0)
16
+ Requires-Dist: langchain-core (>=0.3.49,<0.4.0)
17
17
  Requires-Dist: pydantic (>=2,<3)
18
18
  Project-URL: Repository, https://github.com/langchain-ai/langchain-google
19
19
  Project-URL: Source Code, https://github.com/langchain-ai/langchain-google/tree/main/libs/genai
@@ -77,7 +77,44 @@ The value of `image_url` can be any of the following:
77
77
  - An accessible gcs file (e.g., "gcs://path/to/file.png")
78
78
  - A base64 encoded image (e.g., `data:image/png;base64,abcd124`)
79
79
 
80
+ #### Multimodal outputs
80
81
 
82
+ Gemini 2.0 Flash Experimental model supports text output with inline images
83
+
84
+ ```
85
+ from langchain_google_genai import ChatGoogleGenerativeAI
86
+
87
+ llm = ChatGoogleGenerativeAI(model="models/gemini-2.0-flash-exp-image-generation")
88
+ # example
89
+ response = llm.invoke(
90
+ "Generate an image of a cat and say meow",
91
+ generation_config=dict(response_modalities=["TEXT", "IMAGE"]),
92
+ )
93
+
94
+ # Base64 encoded binary data of the image
95
+ image_base64 = response.content[0].get("image_url").get("url").split(",")[-1]
96
+ meow_str = response.content[1]
97
+ ```
98
+
99
+ #### Multimodal Outputs in Chains
100
+
101
+ ```
102
+ from langchain_core.runnables import RunnablePassthrough
103
+ from langchain_core.prompts import ChatPromptTemplate
104
+
105
+ from langchain_google_genai import ChatGoogleGenerativeAI, Modality
106
+
107
+ llm = ChatGoogleGenerativeAI(
108
+ model="models/gemini-2.0-flash-exp-image-generation",
109
+ response_modalities=[Modality.TEXT, Modality.IMAGE],
110
+ )
111
+
112
+ prompt = ChatPromptTemplate(
113
+ [("human", "Generate an image of {animal} and tell me the sound of the animal")]
114
+ )
115
+ chain = {"animal": RunnablePassthrough()} | prompt | llm
116
+ res = chain.invoke("cat")
117
+ ```
81
118
 
82
119
  ## Embeddings
83
120
 
@@ -56,7 +56,44 @@ The value of `image_url` can be any of the following:
56
56
  - An accessible gcs file (e.g., "gcs://path/to/file.png")
57
57
  - A base64 encoded image (e.g., `data:image/png;base64,abcd124`)
58
58
 
59
+ #### Multimodal outputs
59
60
 
61
+ Gemini 2.0 Flash Experimental model supports text output with inline images
62
+
63
+ ```
64
+ from langchain_google_genai import ChatGoogleGenerativeAI
65
+
66
+ llm = ChatGoogleGenerativeAI(model="models/gemini-2.0-flash-exp-image-generation")
67
+ # example
68
+ response = llm.invoke(
69
+ "Generate an image of a cat and say meow",
70
+ generation_config=dict(response_modalities=["TEXT", "IMAGE"]),
71
+ )
72
+
73
+ # Base64 encoded binary data of the image
74
+ image_base64 = response.content[0].get("image_url").get("url").split(",")[-1]
75
+ meow_str = response.content[1]
76
+ ```
77
+
78
+ #### Multimodal Outputs in Chains
79
+
80
+ ```
81
+ from langchain_core.runnables import RunnablePassthrough
82
+ from langchain_core.prompts import ChatPromptTemplate
83
+
84
+ from langchain_google_genai import ChatGoogleGenerativeAI, Modality
85
+
86
+ llm = ChatGoogleGenerativeAI(
87
+ model="models/gemini-2.0-flash-exp-image-generation",
88
+ response_modalities=[Modality.TEXT, Modality.IMAGE],
89
+ )
90
+
91
+ prompt = ChatPromptTemplate(
92
+ [("human", "Generate an image of {animal} and tell me the sound of the animal")]
93
+ )
94
+ chain = {"animal": RunnablePassthrough()} | prompt | llm
95
+ res = chain.invoke("cat")
96
+ ```
60
97
 
61
98
  ## Embeddings
62
99
 
@@ -55,7 +55,7 @@ embeddings.embed_query("hello, world!")
55
55
  ```
56
56
  """ # noqa: E501
57
57
 
58
- from langchain_google_genai._enums import HarmBlockThreshold, HarmCategory
58
+ from langchain_google_genai._enums import HarmBlockThreshold, HarmCategory, Modality
59
59
  from langchain_google_genai.chat_models import ChatGoogleGenerativeAI
60
60
  from langchain_google_genai.embeddings import GoogleGenerativeAIEmbeddings
61
61
  from langchain_google_genai.genai_aqa import (
@@ -80,5 +80,6 @@ __all__ = [
80
80
  "GoogleVectorStore",
81
81
  "HarmBlockThreshold",
82
82
  "HarmCategory",
83
+ "Modality",
83
84
  "DoesNotExistsException",
84
85
  ]
@@ -1,11 +1,11 @@
1
1
  from importlib import metadata
2
- from typing import Any, Dict, Optional, Tuple, TypedDict
2
+ from typing import Any, Dict, List, Optional, Tuple, TypedDict
3
3
 
4
4
  from google.api_core.gapic_v1.client_info import ClientInfo
5
5
  from langchain_core.utils import secret_from_env
6
6
  from pydantic import BaseModel, Field, SecretStr
7
7
 
8
- from langchain_google_genai._enums import HarmBlockThreshold, HarmCategory
8
+ from langchain_google_genai._enums import HarmBlockThreshold, HarmCategory, Modality
9
9
 
10
10
 
11
11
  class GoogleGenerativeAIError(Exception):
@@ -72,6 +72,9 @@ Supported examples:
72
72
  "A key-value dictionary representing additional headers for the model call"
73
73
  ),
74
74
  )
75
+ response_modalities: Optional[List[Modality]] = Field(
76
+ default=None, description=("A list of modalities of the response")
77
+ )
75
78
 
76
79
  safety_settings: Optional[Dict[HarmCategory, HarmBlockThreshold]] = None
77
80
  """The default safety settings to use for all generations.
@@ -2,5 +2,6 @@ import google.ai.generativelanguage_v1beta as genai
2
2
 
3
3
  HarmBlockThreshold = genai.SafetySetting.HarmBlockThreshold
4
4
  HarmCategory = genai.HarmCategory
5
+ Modality = genai.GenerationConfig.Modality
5
6
 
6
- __all__ = ["HarmBlockThreshold", "HarmCategory"]
7
+ __all__ = ["HarmBlockThreshold", "HarmCategory", "Modality"]
@@ -61,18 +61,22 @@ _ALLOWED_SCHEMA_FIELDS_SET = set(_ALLOWED_SCHEMA_FIELDS)
61
61
  _FunctionDeclarationLike = Union[
62
62
  BaseTool, Type[BaseModel], gapic.FunctionDeclaration, Callable, Dict[str, Any]
63
63
  ]
64
+ _GoogleSearchRetrievalLike = Union[
65
+ gapic.GoogleSearchRetrieval,
66
+ Dict[str, Any],
67
+ ]
64
68
 
65
69
 
66
70
  class _ToolDict(TypedDict):
67
71
  function_declarations: Sequence[_FunctionDeclarationLike]
72
+ google_search_retrieval: Optional[_GoogleSearchRetrievalLike]
68
73
 
69
74
 
70
75
  # Info: This means one tool=Sequence of FunctionDeclaration
71
76
  # The dict should be gapic.Tool like. {"function_declarations": [ { "name": ...}.
72
77
  # OpenAI like dict is not be accepted. {{'type': 'function', 'function': {'name': ...}
73
- _ToolsType = Union[
74
- gapic.Tool, _ToolDict, _FunctionDeclarationLike, Sequence[_FunctionDeclarationLike]
75
- ]
78
+ _ToolType = Union[gapic.Tool, _ToolDict, _FunctionDeclarationLike]
79
+ _ToolsType = Sequence[_ToolType]
76
80
 
77
81
 
78
82
  def _format_json_schema_to_gapic(schema: Dict[str, Any]) -> Dict[str, Any]:
@@ -122,7 +126,7 @@ def _format_dict_to_function_declaration(
122
126
 
123
127
  # Info: gapic.Tool means function_declarations and proto.Message.
124
128
  def convert_to_genai_function_declarations(
125
- tools: Sequence[_ToolsType],
129
+ tools: _ToolsType,
126
130
  ) -> gapic.Tool:
127
131
  if not isinstance(tools, collections.abc.Sequence):
128
132
  logger.warning(
@@ -132,24 +136,54 @@ def convert_to_genai_function_declarations(
132
136
  tools = [tools]
133
137
  gapic_tool = gapic.Tool()
134
138
  for tool in tools:
135
- if isinstance(tool, gapic.Tool):
136
- gapic_tool.function_declarations.extend(tool.function_declarations) # type: ignore[union-attr]
137
- elif isinstance(tool, dict) and "function_declarations" not in tool:
138
- fd = _format_to_gapic_function_declaration(tool)
139
- gapic_tool.function_declarations.append(fd)
139
+ if any(f in gapic_tool for f in ["google_search_retrieval"]):
140
+ raise ValueError(
141
+ "Providing multiple google_search_retrieval"
142
+ " or mixing with function_declarations is not supported"
143
+ )
144
+ if isinstance(tool, (gapic.Tool)):
145
+ rt: gapic.Tool = (
146
+ tool if isinstance(tool, gapic.Tool) else tool._raw_tool # type: ignore
147
+ )
148
+ if "google_search_retrieval" in rt:
149
+ gapic_tool.google_search_retrieval = rt.google_search_retrieval
150
+ if "function_declarations" in rt:
151
+ gapic_tool.function_declarations.extend(rt.function_declarations)
152
+ if "google_search" in rt:
153
+ gapic_tool.google_search = rt.google_search
140
154
  elif isinstance(tool, dict):
141
- function_declarations = cast(_ToolDict, tool)["function_declarations"]
142
- if not isinstance(function_declarations, collections.abc.Sequence):
143
- raise ValueError(
144
- "function_declarations should be a list"
145
- f"got '{type(function_declarations)}'"
146
- )
147
- if function_declarations:
148
- fds = [
149
- _format_to_gapic_function_declaration(fd)
150
- for fd in function_declarations
155
+ # not _ToolDictLike
156
+ if not any(
157
+ f in tool
158
+ for f in [
159
+ "function_declarations",
160
+ "google_search_retrieval",
151
161
  ]
152
- gapic_tool.function_declarations.extend(fds)
162
+ ):
163
+ fd = _format_to_gapic_function_declaration(tool) # type: ignore[arg-type]
164
+ gapic_tool.function_declarations.append(fd)
165
+ continue
166
+ # _ToolDictLike
167
+ tool = cast(_ToolDict, tool)
168
+ if "function_declarations" in tool:
169
+ function_declarations = tool["function_declarations"]
170
+ if not isinstance(
171
+ tool["function_declarations"], collections.abc.Sequence
172
+ ):
173
+ raise ValueError(
174
+ "function_declarations should be a list"
175
+ f"got '{type(function_declarations)}'"
176
+ )
177
+ if function_declarations:
178
+ fds = [
179
+ _format_to_gapic_function_declaration(fd)
180
+ for fd in function_declarations
181
+ ]
182
+ gapic_tool.function_declarations.extend(fds)
183
+ if "google_search_retrieval" in tool:
184
+ gapic_tool.google_search_retrieval = gapic.GoogleSearchRetrieval(
185
+ tool["google_search_retrieval"]
186
+ )
153
187
  else:
154
188
  fd = _format_to_gapic_function_declaration(tool) # type: ignore[arg-type]
155
189
  gapic_tool.function_declarations.append(fd)
@@ -32,6 +32,7 @@ from google.ai.generativelanguage_v1beta import (
32
32
  from google.ai.generativelanguage_v1beta.types import (
33
33
  Blob,
34
34
  Candidate,
35
+ CodeExecution,
35
36
  Content,
36
37
  FileData,
37
38
  FunctionCall,
@@ -45,9 +46,7 @@ from google.ai.generativelanguage_v1beta.types import (
45
46
  ToolConfig,
46
47
  VideoMetadata,
47
48
  )
48
- from google.ai.generativelanguage_v1beta.types import (
49
- Tool as GoogleTool,
50
- )
49
+ from google.ai.generativelanguage_v1beta.types import Tool as GoogleTool
51
50
  from langchain_core.callbacks.manager import (
52
51
  AsyncCallbackManagerForLLMRun,
53
52
  CallbackManagerForLLMRun,
@@ -72,7 +71,7 @@ from langchain_core.output_parsers.openai_tools import (
72
71
  parse_tool_calls,
73
72
  )
74
73
  from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
75
- from langchain_core.runnables import Runnable, RunnablePassthrough
74
+ from langchain_core.runnables import Runnable, RunnableConfig, RunnablePassthrough
76
75
  from langchain_core.tools import BaseTool
77
76
  from langchain_core.utils.function_calling import convert_to_openai_tool
78
77
  from pydantic import (
@@ -106,7 +105,10 @@ from langchain_google_genai._function_utils import (
106
105
  is_basemodel_subclass_safe,
107
106
  tool_to_dict,
108
107
  )
109
- from langchain_google_genai._image_utils import ImageBytesLoader
108
+ from langchain_google_genai._image_utils import (
109
+ ImageBytesLoader,
110
+ image_bytes_to_b64_string,
111
+ )
110
112
 
111
113
  from . import _genai_extension as genaix
112
114
 
@@ -430,7 +432,7 @@ def _parse_chat_history(
430
432
  def _parse_response_candidate(
431
433
  response_candidate: Candidate, streaming: bool = False
432
434
  ) -> AIMessage:
433
- content: Union[None, str, List[str]] = None
435
+ content: Union[None, str, List[Union[str, dict]]] = None
434
436
  additional_kwargs = {}
435
437
  tool_calls = []
436
438
  invalid_tool_calls = []
@@ -455,6 +457,61 @@ def _parse_response_candidate(
455
457
  elif text:
456
458
  raise Exception("Unexpected content type")
457
459
 
460
+ if hasattr(part, "executable_code") and part.executable_code is not None:
461
+ if part.executable_code.code and part.executable_code.language:
462
+ code_message = {
463
+ "type": "executable_code",
464
+ "executable_code": part.executable_code.code,
465
+ "language": part.executable_code.language,
466
+ }
467
+ if not content:
468
+ content = [code_message]
469
+ elif isinstance(content, str):
470
+ content = [content, code_message]
471
+ elif isinstance(content, list):
472
+ content.append(code_message)
473
+ else:
474
+ raise Exception("Unexpected content type")
475
+
476
+ if (
477
+ hasattr(part, "code_execution_result")
478
+ and part.code_execution_result is not None
479
+ ):
480
+ if part.code_execution_result.output:
481
+ execution_result = {
482
+ "type": "code_execution_result",
483
+ "code_execution_result": part.code_execution_result.output,
484
+ }
485
+
486
+ if not content:
487
+ content = [execution_result]
488
+ elif isinstance(content, str):
489
+ content = [content, execution_result]
490
+ elif isinstance(content, list):
491
+ content.append(execution_result)
492
+ else:
493
+ raise Exception("Unexpected content type")
494
+
495
+ if part.inline_data.mime_type.startswith("image/"):
496
+ image_format = part.inline_data.mime_type[6:]
497
+ message = {
498
+ "type": "image_url",
499
+ "image_url": {
500
+ "url": image_bytes_to_b64_string(
501
+ part.inline_data.data, image_format=image_format
502
+ )
503
+ },
504
+ }
505
+
506
+ if not content:
507
+ content = [message]
508
+ elif isinstance(content, str) and message:
509
+ content = [content, message]
510
+ elif isinstance(content, list) and message:
511
+ content.append(message)
512
+ elif message:
513
+ raise Exception("Unexpected content type")
514
+
458
515
  if part.function_call:
459
516
  function_call = {"name": part.function_call.name}
460
517
  # dump to match other function calling llm for now
@@ -498,6 +555,16 @@ def _parse_response_candidate(
498
555
  )
499
556
  if content is None:
500
557
  content = ""
558
+ if any(isinstance(item, dict) and "executable_code" in item for item in content):
559
+ warnings.warn(
560
+ """
561
+ ⚠️ Warning: Output may vary each run.
562
+ - 'executable_code': Always present.
563
+ - 'execution_result' & 'image_url': May be absent for some queries.
564
+
565
+ Validate before using in production.
566
+ """
567
+ )
501
568
 
502
569
  if streaming:
503
570
  return AIMessageChunk(
@@ -552,6 +619,8 @@ def _response_to_result(
552
619
  generation_info = {}
553
620
  if candidate.finish_reason:
554
621
  generation_info["finish_reason"] = candidate.finish_reason.name
622
+ # Add model_name in last chunk
623
+ generation_info["model_name"] = response.model_version
555
624
  generation_info["safety_ratings"] = [
556
625
  proto.Message.to_dict(safety_rating, use_integers_for_enums=False)
557
626
  for safety_rating in candidate.safety_ratings
@@ -713,6 +782,16 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
713
782
  'args': {'location': 'New York City, NY'},
714
783
  'id': '634582de-5186-4e4b-968b-f192f0a93678'}]
715
784
 
785
+ Use Search with Gemini 2:
786
+ .. code-block:: python
787
+
788
+ from google.ai.generativelanguage_v1beta.types import Tool as GenAITool
789
+ llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash-exp")
790
+ resp = llm.invoke(
791
+ "When is the next total solar eclipse in US?",
792
+ tools=[GenAITool(google_search={})],
793
+ )
794
+
716
795
  Structured output:
717
796
  .. code-block:: python
718
797
 
@@ -824,6 +903,14 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
824
903
  def _llm_type(self) -> str:
825
904
  return "chat-google-generative-ai"
826
905
 
906
+ @property
907
+ def _supports_code_execution(self) -> bool:
908
+ return (
909
+ "gemini-1.5-pro" in self.model
910
+ or "gemini-1.5-flash" in self.model
911
+ or "gemini-2" in self.model
912
+ )
913
+
827
914
  @classmethod
828
915
  def is_lc_serializable(self) -> bool:
829
916
  return True
@@ -831,8 +918,8 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
831
918
  @model_validator(mode="after")
832
919
  def validate_environment(self) -> Self:
833
920
  """Validates params and passes them to google-generativeai package."""
834
- if self.temperature is not None and not 0 <= self.temperature <= 1:
835
- raise ValueError("temperature must be in the range [0.0, 1.0]")
921
+ if self.temperature is not None and not 0 <= self.temperature <= 2.0:
922
+ raise ValueError("temperature must be in the range [0.0, 2.0]")
836
923
 
837
924
  if self.top_p is not None and not 0 <= self.top_p <= 1:
838
925
  raise ValueError("top_p must be in the range [0.0, 1.0]")
@@ -895,8 +982,45 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
895
982
  "top_k": self.top_k,
896
983
  "n": self.n,
897
984
  "safety_settings": self.safety_settings,
985
+ "response_modalities": self.response_modalities,
898
986
  }
899
987
 
988
+ def invoke(
989
+ self,
990
+ input: LanguageModelInput,
991
+ config: Optional[RunnableConfig] = None,
992
+ *,
993
+ code_execution: Optional[bool] = None,
994
+ stop: Optional[list[str]] = None,
995
+ **kwargs: Any,
996
+ ) -> BaseMessage:
997
+ """
998
+ Enable code execution. Supported on: gemini-1.5-pro, gemini-1.5-flash,
999
+ gemini-2.0-flash, and gemini-2.0-pro. When enabled, the model can execute
1000
+ code to solve problems.
1001
+ """
1002
+
1003
+ """Override invoke to add code_execution parameter."""
1004
+
1005
+ if code_execution is not None:
1006
+ if not self._supports_code_execution:
1007
+ raise ValueError(
1008
+ f"Code execution is only supported on Gemini 1.5 Pro, \
1009
+ Gemini 1.5 Flash, "
1010
+ f"Gemini 2.0 Flash, and Gemini 2.0 Pro models. \
1011
+ Current model: {self.model}"
1012
+ )
1013
+ if "tools" not in kwargs:
1014
+ code_execution_tool = GoogleTool(code_execution=CodeExecution())
1015
+ kwargs["tools"] = [code_execution_tool]
1016
+
1017
+ else:
1018
+ raise ValueError(
1019
+ "Tools are already defined." "code_execution tool can't be defined"
1020
+ )
1021
+
1022
+ return super().invoke(input, config, stop=stop, **kwargs)
1023
+
900
1024
  def _get_ls_params(
901
1025
  self, stop: Optional[List[str]] = None, **kwargs: Any
902
1026
  ) -> LangSmithParams:
@@ -928,6 +1052,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
928
1052
  "max_output_tokens": self.max_output_tokens,
929
1053
  "top_k": self.top_k,
930
1054
  "top_p": self.top_p,
1055
+ "response_modalities": self.response_modalities,
931
1056
  }.items()
932
1057
  if v is not None
933
1058
  }
@@ -1174,8 +1299,12 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
1174
1299
  "Must specify at most one of tool_choice and tool_config, received "
1175
1300
  f"both:\n\n{tool_choice=}\n\n{tool_config=}"
1176
1301
  )
1302
+
1177
1303
  formatted_tools = None
1178
- if tools:
1304
+ code_execution_tool = GoogleTool(code_execution=CodeExecution())
1305
+ if tools == [code_execution_tool]:
1306
+ formatted_tools = tools
1307
+ elif tools:
1179
1308
  formatted_tools = [convert_to_genai_function_declarations(tools)]
1180
1309
  elif functions:
1181
1310
  formatted_tools = [convert_to_genai_function_declarations(functions)]
@@ -1201,9 +1330,20 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
1201
1330
  f"be specified if 'tools' is specified."
1202
1331
  )
1203
1332
  raise ValueError(msg)
1204
- all_names = [
1205
- f.name for t in formatted_tools for f in t.function_declarations
1206
- ]
1333
+ all_names: List[str] = []
1334
+ for t in formatted_tools:
1335
+ if hasattr(t, "function_declarations"):
1336
+ t_with_declarations = cast(Any, t)
1337
+ all_names.extend(
1338
+ f.name for f in t_with_declarations.function_declarations
1339
+ )
1340
+ elif isinstance(t, GoogleTool) and hasattr(t, "code_execution"):
1341
+ continue
1342
+ else:
1343
+ raise TypeError(
1344
+ f"Tool {t} doesn't have function_declarations attribute"
1345
+ )
1346
+
1207
1347
  tool_config = _tool_choice_to_tool_config(tool_choice, all_names)
1208
1348
 
1209
1349
  formatted_tool_config = None
@@ -1256,6 +1396,8 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
1256
1396
  include_raw: bool = False,
1257
1397
  **kwargs: Any,
1258
1398
  ) -> Runnable[LanguageModelInput, Union[Dict, BaseModel]]:
1399
+ _ = kwargs.pop("method", None)
1400
+ _ = kwargs.pop("strict", None)
1259
1401
  if kwargs:
1260
1402
  raise ValueError(f"Received unsupported arguments {kwargs}")
1261
1403
  tool_name = _get_tool_name(schema) # type: ignore[arg-type]
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "langchain-google-genai"
3
- version = "2.1.0"
3
+ version = "2.1.2"
4
4
  description = "An integration package connecting Google's genai package and LangChain"
5
5
  authors = []
6
6
  readme = "README.md"
@@ -12,7 +12,7 @@ license = "MIT"
12
12
 
13
13
  [tool.poetry.dependencies]
14
14
  python = ">=3.9,<4.0"
15
- langchain-core = "^0.3.43"
15
+ langchain-core = "^0.3.49"
16
16
  google-ai-generativelanguage = "^0.6.16"
17
17
  pydantic = ">=2,<3"
18
18
  filetype = "^1.2.0"
@@ -28,7 +28,7 @@ syrupy = "^4.0.2"
28
28
  pytest-watcher = "^0.3.4"
29
29
  pytest-asyncio = "^0.21.1"
30
30
  numpy = "^1.26.2"
31
- langchain-tests = "0.3.14"
31
+ langchain-tests = "0.3.17"
32
32
 
33
33
  [tool.codespell]
34
34
  ignore-words-list = "rouge"