langchain-google-genai 3.0.0a1__tar.gz → 3.0.0rc1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/PKG-INFO +4 -4
  2. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/README.md +2 -2
  3. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/langchain_google_genai/_compat.py +43 -5
  4. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/pyproject.toml +3 -13
  5. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/tests/integration_tests/test_callbacks.py +2 -2
  6. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/tests/integration_tests/test_chat_models.py +112 -127
  7. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/tests/integration_tests/test_function_call.py +4 -4
  8. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/tests/integration_tests/test_llms.py +7 -7
  9. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/tests/integration_tests/test_tools.py +3 -1
  10. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/tests/unit_tests/test_chat_models.py +36 -36
  11. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/tests/unit_tests/test_embeddings.py +15 -13
  12. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/tests/unit_tests/test_llms.py +9 -7
  13. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/LICENSE +0 -0
  14. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/langchain_google_genai/__init__.py +0 -0
  15. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/langchain_google_genai/_common.py +0 -0
  16. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/langchain_google_genai/_enums.py +0 -0
  17. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/langchain_google_genai/_function_utils.py +0 -0
  18. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/langchain_google_genai/_genai_extension.py +0 -0
  19. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/langchain_google_genai/_image_utils.py +0 -0
  20. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/langchain_google_genai/chat_models.py +0 -0
  21. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/langchain_google_genai/embeddings.py +0 -0
  22. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/langchain_google_genai/genai_aqa.py +0 -0
  23. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/langchain_google_genai/google_vector_store.py +0 -0
  24. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/langchain_google_genai/llms.py +0 -0
  25. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/langchain_google_genai/py.typed +0 -0
  26. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/tests/__init__.py +0 -0
  27. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/tests/conftest.py +0 -0
  28. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/tests/integration_tests/.env.example +0 -0
  29. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/tests/integration_tests/__init__.py +0 -0
  30. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/tests/integration_tests/terraform/main.tf +0 -0
  31. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/tests/integration_tests/test_compile.py +0 -0
  32. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/tests/integration_tests/test_embeddings.py +0 -0
  33. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/tests/integration_tests/test_standard.py +0 -0
  34. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/tests/unit_tests/__init__.py +0 -0
  35. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/tests/unit_tests/__snapshots__/test_standard.ambr +0 -0
  36. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/tests/unit_tests/test_chat_models_protobuf_fix.py +0 -0
  37. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/tests/unit_tests/test_common.py +0 -0
  38. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/tests/unit_tests/test_function_utils.py +0 -0
  39. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/tests/unit_tests/test_genai_aqa.py +0 -0
  40. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/tests/unit_tests/test_google_vector_store.py +0 -0
  41. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/tests/unit_tests/test_imports.py +0 -0
  42. {langchain_google_genai-3.0.0a1 → langchain_google_genai-3.0.0rc1}/tests/unit_tests/test_standard.py +0 -0
@@ -1,13 +1,13 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langchain-google-genai
3
- Version: 3.0.0a1
3
+ Version: 3.0.0rc1
4
4
  Summary: An integration package connecting Google's genai package and LangChain
5
5
  License: MIT
6
6
  Project-URL: Source Code, https://github.com/langchain-ai/langchain-google/tree/main/libs/genai
7
7
  Project-URL: Release Notes, https://github.com/langchain-ai/langchain-google/releases
8
8
  Project-URL: repository, https://github.com/langchain-ai/langchain-google
9
9
  Requires-Python: <4.0.0,>=3.10.0
10
- Requires-Dist: langchain-core<2.0.0,>=1.0.0a4
10
+ Requires-Dist: langchain-core<2.0.0,>=1.0.0rc2
11
11
  Requires-Dist: google-ai-generativelanguage<1.0.0,>=0.7.0
12
12
  Requires-Dist: pydantic<3.0.0,>=2.0.0
13
13
  Requires-Dist: filetype<2.0.0,>=1.2.0
@@ -76,7 +76,7 @@ Then use the `ChatGoogleGenerativeAI` interface:
76
76
  ```python
77
77
  from langchain_google_genai import ChatGoogleGenerativeAI
78
78
 
79
- llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
79
+ llm = ChatGoogleGenerativeAI(model="gemini-flash-latest")
80
80
  response = llm.invoke("Sing a ballad of LangChain.")
81
81
  print(response.content)
82
82
  ```
@@ -97,7 +97,7 @@ Most Gemini models support image inputs.
97
97
  from langchain_core.messages import HumanMessage
98
98
  from langchain_google_genai import ChatGoogleGenerativeAI
99
99
 
100
- llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
100
+ llm = ChatGoogleGenerativeAI(model="gemini-flash-latest")
101
101
 
102
102
  message = HumanMessage(
103
103
  content=[
@@ -61,7 +61,7 @@ Then use the `ChatGoogleGenerativeAI` interface:
61
61
  ```python
62
62
  from langchain_google_genai import ChatGoogleGenerativeAI
63
63
 
64
- llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
64
+ llm = ChatGoogleGenerativeAI(model="gemini-flash-latest")
65
65
  response = llm.invoke("Sing a ballad of LangChain.")
66
66
  print(response.content)
67
67
  ```
@@ -82,7 +82,7 @@ Most Gemini models support image inputs.
82
82
  from langchain_core.messages import HumanMessage
83
83
  from langchain_google_genai import ChatGoogleGenerativeAI
84
84
 
85
- llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
85
+ llm = ChatGoogleGenerativeAI(model="gemini-flash-latest")
86
86
 
87
87
  message = HumanMessage(
88
88
  content=[
@@ -1,7 +1,7 @@
1
1
  """Go from v1 content blocks to generativelanguage_v1beta format."""
2
2
 
3
3
  import json
4
- from typing import Any, Optional
4
+ from typing import Any, Optional, cast
5
5
 
6
6
  from langchain_core.messages import content as types
7
7
 
@@ -150,8 +150,6 @@ def _convert_from_v1_to_generativelanguage_v1beta(
150
150
  elif block_dict["type"] == "reasoning" and model_provider == "google_genai":
151
151
  # Google requires passing back the thought_signature when available.
152
152
  # Signatures are only provided when function calling is enabled.
153
- # If no signature is available, we skip the reasoning block as it cannot
154
- # be properly serialized back to the API.
155
153
  if "extras" in block_dict and isinstance(block_dict["extras"], dict):
156
154
  extras = block_dict["extras"]
157
155
  if "signature" in extras:
@@ -242,7 +240,47 @@ def _convert_from_v1_to_generativelanguage_v1beta(
242
240
  }
243
241
  new_content.append(function_call)
244
242
 
245
- # NonStandardContentBlock
246
- # TODO: Handle new server tools
243
+ elif block_dict["type"] == "server_tool_call":
244
+ if block_dict.get("name") == "code_interpreter":
245
+ # LangChain v0 format
246
+ args = cast(dict, block_dict.get("args", {}))
247
+ executable_code = {
248
+ "type": "executable_code",
249
+ "executable_code": args.get("code", ""),
250
+ "language": args.get("language", ""),
251
+ "id": block_dict.get("id", ""),
252
+ }
253
+ # Google generativelanguage format
254
+ new_content.append(
255
+ {
256
+ "executable_code": {
257
+ "language": executable_code["language"],
258
+ "code": executable_code["executable_code"],
259
+ }
260
+ }
261
+ )
262
+
263
+ elif block_dict["type"] == "server_tool_result":
264
+ extras = cast(dict, block_dict.get("extras", {}))
265
+ if extras.get("block_type") == "code_execution_result":
266
+ # LangChain v0 format
267
+ code_execution_result = {
268
+ "type": "code_execution_result",
269
+ "code_execution_result": block_dict.get("output", ""),
270
+ "outcome": extras.get("outcome", ""),
271
+ "tool_call_id": block_dict.get("tool_call_id", ""),
272
+ }
273
+ # Google generativelanguage format
274
+ new_content.append(
275
+ {
276
+ "code_execution_result": {
277
+ "outcome": code_execution_result["outcome"],
278
+ "output": code_execution_result["code_execution_result"],
279
+ }
280
+ }
281
+ )
282
+
283
+ elif block_dict["type"] == "non_standard":
284
+ new_content.append(block_dict["value"])
247
285
 
248
286
  return new_content
@@ -6,14 +6,14 @@ build-backend = "pdm.backend"
6
6
 
7
7
  [project]
8
8
  name = "langchain-google-genai"
9
- version = "3.0.0a1"
9
+ version = "3.0.0rc1"
10
10
  description = "An integration package connecting Google's genai package and LangChain"
11
11
  authors = []
12
12
  requires-python = ">=3.10.0,<4.0.0"
13
13
  readme = "README.md"
14
14
  repository = "https://github.com/langchain-ai/langchain-google"
15
15
  dependencies = [
16
- "langchain-core>=1.0.0a4,<2.0.0",
16
+ "langchain-core>=1.0.0rc2,<2.0.0",
17
17
  "google-ai-generativelanguage>=0.7.0,<1.0.0",
18
18
  "pydantic>=2.0.0,<3.0.0",
19
19
  "filetype>=1.2.0,<2.0.0",
@@ -49,23 +49,13 @@ test = [
49
49
  "pytest-socket>=0.7.0,<1.0.0",
50
50
  "numpy>=1.26.4; python_version<'3.13'",
51
51
  "numpy>=2.1.0; python_version>='3.13'",
52
- "langchain-tests>=1.0.0a1,<2.0.0",
52
+ "langchain-tests>=1.0.0rc1,<2.0.0",
53
53
  ]
54
54
  test_integration = [
55
55
  "pytest>=8.4.0,<9.0.0",
56
56
  ]
57
57
  dev = []
58
58
 
59
- [tool.uv.sources.langchain-core]
60
- git = "https://github.com/langchain-ai/langchain.git"
61
- subdirectory = "libs/core"
62
- branch = "mdrxy/genai"
63
-
64
- [tool.uv.sources.langchain-tests]
65
- git = "https://github.com/langchain-ai/langchain.git"
66
- subdirectory = "libs/standard-tests"
67
- branch = "mdrxy/genai"
68
-
69
59
  [tool.ruff]
70
60
  fix = true
71
61
 
@@ -7,7 +7,7 @@ from langchain_core.prompts import PromptTemplate
7
7
 
8
8
  from langchain_google_genai import ChatGoogleGenerativeAI
9
9
 
10
- model_names = ["gemini-2.5-flash"]
10
+ MODEL_NAMES = ["gemini-flash-lite-latest"]
11
11
 
12
12
 
13
13
  class StreamingLLMCallbackHandler(BaseCallbackHandler):
@@ -25,7 +25,7 @@ class StreamingLLMCallbackHandler(BaseCallbackHandler):
25
25
 
26
26
  @pytest.mark.parametrize(
27
27
  "model_name",
28
- model_names,
28
+ MODEL_NAMES,
29
29
  )
30
30
  def test_streaming_callback(model_name: str) -> None:
31
31
  prompt_template = "Tell me details about the Company {name} with 2 bullet point?"
@@ -26,7 +26,7 @@ from langchain_google_genai import (
26
26
  Modality,
27
27
  )
28
28
 
29
- _MODEL = "models/gemini-2.5-flash"
29
+ _MODEL = "gemini-flash-lite-latest"
30
30
  _VISION_MODEL = "models/gemini-2.0-flash-001"
31
31
  _IMAGE_OUTPUT_MODEL = "models/gemini-2.0-flash-exp-image-generation"
32
32
  _AUDIO_OUTPUT_MODEL = "models/gemini-2.5-flash-preview-tts"
@@ -263,6 +263,29 @@ def test_chat_google_genai_invoke_thinking() -> None:
263
263
  assert result.usage_metadata["output_token_details"]["reasoning"] > 0
264
264
 
265
265
 
266
+ def _check_thinking_output(content: list, output_version: str) -> None:
267
+ if output_version == "v0":
268
+ thinking_key = "thinking"
269
+ assert isinstance(content[-1], str)
270
+
271
+ else:
272
+ # v1
273
+ thinking_key = "reasoning"
274
+ assert isinstance(content[-1], dict)
275
+ assert content[-1].get("type") == "text"
276
+ assert isinstance(content[-1].get("text"), str)
277
+
278
+ assert isinstance(content, list)
279
+ thinking_blocks = [
280
+ item
281
+ for item in content
282
+ if isinstance(item, dict) and item.get("type") == thinking_key
283
+ ]
284
+ assert thinking_blocks
285
+ for block in thinking_blocks:
286
+ assert isinstance(block[thinking_key], str)
287
+
288
+
266
289
  @pytest.mark.parametrize("output_version", ["v0", "v1"])
267
290
  def test_chat_google_genai_invoke_thinking_include_thoughts(
268
291
  output_version: str,
@@ -280,49 +303,31 @@ def test_chat_google_genai_invoke_thinking_include_thoughts(
280
303
  ),
281
304
  }
282
305
 
283
- result = llm.invoke([input_message])
306
+ full: AIMessageChunk | None = None
307
+ for chunk in llm.stream([input_message]):
308
+ assert isinstance(chunk, AIMessageChunk)
309
+ full = chunk if full is None else full + chunk
284
310
 
285
- assert isinstance(result, AIMessage)
286
- content = result.content
311
+ assert isinstance(full, AIMessage)
287
312
 
288
- response_metadata = result.response_metadata
313
+ response_metadata = full.response_metadata
289
314
  model_provider = response_metadata.get("model_provider", "google_genai")
290
315
  assert model_provider == "google_genai"
291
316
 
292
- if output_version == "v0":
293
- assert isinstance(content[0], dict)
294
- assert content[0].get("type") == "thinking"
295
- assert isinstance(content[0].get("thinking"), str)
296
-
297
- assert isinstance(content[1], str)
298
-
299
- _check_usage_metadata(result)
300
-
301
- assert result.usage_metadata is not None
302
- if (
303
- "output_token_details" in result.usage_metadata
304
- and "reasoning" in result.usage_metadata["output_token_details"]
305
- ):
306
- assert result.usage_metadata["output_token_details"]["reasoning"] > 0
307
-
308
- # We don't test passing back in here as it's covered in the next test
309
- # (Google requires function declaration)
310
- else:
311
- # v1
312
- assert isinstance(content, list)
313
- assert len(content) == 2
314
- assert isinstance(content[0], dict)
315
- assert content[0].get("type") == "reasoning"
316
- assert isinstance(content[0].get("reasoning"), str)
317
-
318
- assert isinstance(content[1], dict)
319
- assert content[1].get("type") == "text"
320
- assert isinstance(content[1].get("text"), str)
321
-
322
- _check_usage_metadata(result)
317
+ _check_thinking_output(cast(list, full.content), output_version)
318
+ _check_usage_metadata(full)
319
+ assert full.usage_metadata is not None
320
+ if (
321
+ "output_token_details" in full.usage_metadata
322
+ and "reasoning" in full.usage_metadata["output_token_details"]
323
+ ):
324
+ assert full.usage_metadata["output_token_details"]["reasoning"] > 0
323
325
 
324
- # We don't test passing back in here as it's covered in the next test
325
- # (Google requires function declaration)
326
+ # Test we can pass back in
327
+ next_message = {"role": "user", "content": "Thanks!"}
328
+ result = llm.invoke([input_message, full, next_message])
329
+ assert isinstance(result, AIMessage)
330
+ _check_thinking_output(cast(list, result.content), output_version)
326
331
 
327
332
 
328
333
  @pytest.mark.flaky(retries=5, delay=1)
@@ -896,42 +901,47 @@ def test_model_methods_without_eventloop(is_async: bool, use_streaming: bool) ->
896
901
  assert isinstance(invoke_result, AIMessage)
897
902
 
898
903
 
899
- @pytest.mark.parametrize("use_streaming", [False, True])
900
- def test_search_builtin(use_streaming: bool) -> None:
901
- llm = ChatGoogleGenerativeAI(model="models/gemini-2.0-flash-001").bind_tools(
902
- [{"google_search": {}}]
903
- )
904
+ def _check_web_search_output(message: AIMessage, output_version: str) -> None:
905
+ assert "grounding_metadata" in message.response_metadata
906
+
907
+ # Lazy parsing
908
+ content_blocks = message.content_blocks
909
+ text_blocks = [block for block in content_blocks if block["type"] == "text"]
910
+ assert len(text_blocks) == 1
911
+ text_block = text_blocks[0]
912
+ assert text_block["annotations"]
913
+
914
+ if output_version == "v1":
915
+ text_blocks = [block for block in message.content if block["type"] == "text"] # type: ignore[misc,index]
916
+ assert len(text_blocks) == 1
917
+ text_block = text_blocks[0]
918
+ assert text_block["annotations"]
919
+
920
+
921
+ @pytest.mark.parametrize("output_version", ["v0", "v1"])
922
+ def test_search_builtin(output_version: str) -> None:
923
+ llm = ChatGoogleGenerativeAI(
924
+ model="models/gemini-2.0-flash-001", output_version=output_version
925
+ ).bind_tools([{"google_search": {}}])
904
926
  input_message = {
905
927
  "role": "user",
906
928
  "content": "What is today's news?",
907
929
  }
908
930
 
909
- if use_streaming:
910
- # Test streaming
911
- full: Optional[BaseMessageChunk] = None
912
- for chunk in llm.stream([input_message]):
913
- assert isinstance(chunk, AIMessageChunk)
914
- full = chunk if full is None else full + chunk
915
- assert isinstance(full, AIMessageChunk)
916
- assert "grounding_metadata" in full.response_metadata
917
-
918
- # Test we can process chat history without raising errors
919
- next_message = {
920
- "role": "user",
921
- "content": "Tell me more about that last story.",
922
- }
923
- _ = llm.invoke([input_message, full, next_message])
924
- else:
925
- # Test invoke
926
- response = llm.invoke([input_message])
927
- assert "grounding_metadata" in response.response_metadata
931
+ full: Optional[BaseMessageChunk] = None
932
+ for chunk in llm.stream([input_message]):
933
+ assert isinstance(chunk, AIMessageChunk)
934
+ full = chunk if full is None else full + chunk
935
+ assert isinstance(full, AIMessageChunk)
936
+ _check_web_search_output(full, output_version)
928
937
 
929
- # Test we can process chat history without raising errors
930
- next_message = {
931
- "role": "user",
932
- "content": "Tell me more about that last story.",
933
- }
934
- _ = llm.invoke([input_message, response, next_message])
938
+ # Test we can process chat history without raising errors
939
+ next_message = {
940
+ "role": "user",
941
+ "content": "Tell me more about that last story.",
942
+ }
943
+ response = llm.invoke([input_message, full, next_message])
944
+ _check_web_search_output(response, output_version)
935
945
 
936
946
 
937
947
  @pytest.mark.parametrize("use_streaming", [False, True])
@@ -1055,70 +1065,45 @@ def test_search_builtin_with_citations(use_streaming: bool) -> None:
1055
1065
  assert isinstance(google_metadata, dict)
1056
1066
 
1057
1067
 
1068
+ def _check_code_execution_output(message: AIMessage, output_version: str) -> None:
1069
+ if output_version == "v0":
1070
+ blocks = [block for block in message.content if isinstance(block, dict)]
1071
+ expected_block_types = {"executable_code", "code_execution_result"}
1072
+ assert {block.get("type") for block in blocks} == expected_block_types
1073
+
1074
+ else:
1075
+ # v1
1076
+ expected_block_types = {"server_tool_call", "server_tool_result", "text"}
1077
+ assert {block["type"] for block in message.content} == expected_block_types # type: ignore[index]
1078
+
1079
+ # Lazy parsing
1080
+ expected_block_types = {"server_tool_call", "server_tool_result", "text"}
1081
+ assert {block["type"] for block in message.content_blocks} == expected_block_types
1082
+
1083
+
1058
1084
  @pytest.mark.filterwarnings("ignore::UserWarning")
1059
- @pytest.mark.parametrize("use_streaming", [False, True])
1060
- def test_code_execution_builtin(use_streaming: bool) -> None:
1061
- llm = ChatGoogleGenerativeAI(model="models/gemini-2.0-flash-001").bind_tools(
1062
- [{"code_execution": {}}]
1063
- )
1085
+ @pytest.mark.parametrize("output_version", ["v0", "v1"])
1086
+ def test_code_execution_builtin(output_version: str) -> None:
1087
+ llm = ChatGoogleGenerativeAI(
1088
+ model="models/gemini-2.0-flash-001", output_version=output_version
1089
+ ).bind_tools([{"code_execution": {}}])
1064
1090
  input_message = {
1065
1091
  "role": "user",
1066
1092
  "content": "What is 3^3?",
1067
1093
  }
1068
1094
 
1069
- if use_streaming:
1070
- # Test streaming mode
1071
- full: Optional[BaseMessageChunk] = None
1072
- for chunk in llm.stream([input_message]):
1073
- assert isinstance(chunk, AIMessageChunk)
1074
- full = chunk if full is None else full + chunk
1075
- assert isinstance(full, AIMessageChunk)
1076
-
1077
- # Check raw content still has legacy format (backward compatibility)
1078
- blocks = [block for block in full.content if isinstance(block, dict)]
1079
- expected_block_types = {"executable_code", "code_execution_result"}
1080
- assert {block.get("type") for block in blocks} == expected_block_types
1095
+ full: Optional[BaseMessageChunk] = None
1096
+ for chunk in llm.stream([input_message]):
1097
+ assert isinstance(chunk, AIMessageChunk)
1098
+ full = chunk if full is None else full + chunk
1099
+ assert isinstance(full, AIMessageChunk)
1081
1100
 
1082
- content_blocks = full.content_blocks
1083
- standard_blocks = [block for block in content_blocks if isinstance(block, dict)]
1084
- standard_types = {block.get("type") for block in standard_blocks}
1085
- assert (
1086
- "server_tool_call" in standard_types or "executable_code" in standard_types
1087
- )
1088
- assert (
1089
- "server_tool_result" in standard_types
1090
- or "code_execution_result" in standard_types
1091
- )
1101
+ _check_code_execution_output(full, output_version)
1092
1102
 
1093
- # Test passing back in chat history without raising errors
1094
- next_message = {
1095
- "role": "user",
1096
- "content": "Can you show me the calculation again with comments?",
1097
- }
1098
- _ = llm.invoke([input_message, full, next_message])
1099
- else:
1100
- # Invoke
1101
- response = llm.invoke([input_message])
1102
- blocks = [block for block in response.content if isinstance(block, dict)]
1103
-
1104
- # Check raw content still has legacy format (backward compatibility)
1105
- expected_block_types = {"executable_code", "code_execution_result"}
1106
- assert {block.get("type") for block in blocks} == expected_block_types
1107
-
1108
- content_blocks = response.content_blocks
1109
- standard_blocks = [block for block in content_blocks if isinstance(block, dict)]
1110
- standard_types = {block.get("type") for block in standard_blocks}
1111
- assert (
1112
- "server_tool_call" in standard_types or "executable_code" in standard_types
1113
- )
1114
- assert (
1115
- "server_tool_result" in standard_types
1116
- or "code_execution_result" in standard_types
1117
- )
1118
-
1119
- # Test passing back in chat history without raising errors
1120
- next_message = {
1121
- "role": "user",
1122
- "content": "Can you show me the calculation again with comments?",
1123
- }
1124
- _ = llm.invoke([input_message, response, next_message])
1103
+ # Test passing back in chat history without raising errors
1104
+ next_message = {
1105
+ "role": "user",
1106
+ "content": "Can you show me the calculation again with comments?",
1107
+ }
1108
+ response = llm.invoke([input_message, full, next_message])
1109
+ _check_code_execution_output(response, output_version)
@@ -11,12 +11,12 @@ from langchain_google_genai.chat_models import (
11
11
  ChatGoogleGenerativeAI,
12
12
  )
13
13
 
14
- model_names = ["gemini-2.5-flash"]
14
+ MODEL_NAMES = ["gemini-flash-lite-latest"]
15
15
 
16
16
 
17
17
  @pytest.mark.parametrize(
18
18
  "model_name",
19
- model_names,
19
+ MODEL_NAMES,
20
20
  )
21
21
  def test_function_call(model_name: str) -> None:
22
22
  functions = [
@@ -50,7 +50,7 @@ def test_function_call(model_name: str) -> None:
50
50
 
51
51
  @pytest.mark.parametrize(
52
52
  "model_name",
53
- model_names,
53
+ MODEL_NAMES,
54
54
  )
55
55
  def test_tool_call(model_name: str) -> None:
56
56
  @tool
@@ -79,7 +79,7 @@ class MyModel(BaseModel):
79
79
 
80
80
  @pytest.mark.parametrize(
81
81
  "model_name",
82
- model_names,
82
+ MODEL_NAMES,
83
83
  )
84
84
  def test_pydantic_call(model_name: str) -> None:
85
85
  llm = ChatGoogleGenerativeAI(model=model_name).bind(functions=[MyModel])
@@ -10,12 +10,12 @@ from langchain_core.outputs import LLMResult
10
10
 
11
11
  from langchain_google_genai import GoogleGenerativeAI, HarmBlockThreshold, HarmCategory
12
12
 
13
- model_names = ["gemini-2.5-flash"]
13
+ MODEL_NAMES = ["gemini-flash-lite-latest"]
14
14
 
15
15
 
16
16
  @pytest.mark.parametrize(
17
17
  "model_name",
18
- model_names,
18
+ MODEL_NAMES,
19
19
  )
20
20
  def test_google_generativeai_call(model_name: str) -> None:
21
21
  """Test valid call to Google GenerativeAI text API."""
@@ -31,7 +31,7 @@ def test_google_generativeai_call(model_name: str) -> None:
31
31
 
32
32
  @pytest.mark.parametrize(
33
33
  "model_name",
34
- model_names,
34
+ MODEL_NAMES,
35
35
  )
36
36
  def test_google_generativeai_generate(model_name: str) -> None:
37
37
  llm = GoogleGenerativeAI(temperature=0.3, model=model_name)
@@ -47,7 +47,7 @@ def test_google_generativeai_generate(model_name: str) -> None:
47
47
 
48
48
  @pytest.mark.parametrize(
49
49
  "model_name",
50
- model_names,
50
+ MODEL_NAMES,
51
51
  )
52
52
  async def test_google_generativeai_agenerate(model_name: str) -> None:
53
53
  llm = GoogleGenerativeAI(temperature=0, model=model_name)
@@ -57,7 +57,7 @@ async def test_google_generativeai_agenerate(model_name: str) -> None:
57
57
 
58
58
  @pytest.mark.parametrize(
59
59
  "model_name",
60
- model_names,
60
+ MODEL_NAMES,
61
61
  )
62
62
  def test_generativeai_stream(model_name: str) -> None:
63
63
  llm = GoogleGenerativeAI(temperature=0, model=model_name)
@@ -67,7 +67,7 @@ def test_generativeai_stream(model_name: str) -> None:
67
67
 
68
68
  @pytest.mark.parametrize(
69
69
  "model_name",
70
- model_names,
70
+ MODEL_NAMES,
71
71
  )
72
72
  def test_generativeai_get_num_tokens_gemini(model_name: str) -> None:
73
73
  llm = GoogleGenerativeAI(temperature=0, model=model_name)
@@ -77,7 +77,7 @@ def test_generativeai_get_num_tokens_gemini(model_name: str) -> None:
77
77
 
78
78
  @pytest.mark.parametrize(
79
79
  "model_name",
80
- model_names,
80
+ MODEL_NAMES,
81
81
  )
82
82
  def test_safety_settings_gemini(model_name: str) -> None:
83
83
  # test with blocked prompt
@@ -2,6 +2,8 @@ from langchain_core.tools import tool
2
2
 
3
3
  from langchain_google_genai import ChatGoogleGenerativeAI
4
4
 
5
+ MODEL = "gemini-flash-lite-latest"
6
+
5
7
 
6
8
  @tool
7
9
  def check_weather(location: str) -> str:
@@ -25,7 +27,7 @@ def test_multiple_tools() -> None:
25
27
  tools = [check_weather, check_live_traffic, check_tennis_score]
26
28
 
27
29
  model = ChatGoogleGenerativeAI(
28
- model="gemini-2.5-flash",
30
+ model=MODEL,
29
31
  )
30
32
 
31
33
  model_with_tools = model.bind_tools(tools)
@@ -45,11 +45,13 @@ from langchain_google_genai.chat_models import (
45
45
  _response_to_result,
46
46
  )
47
47
 
48
+ MODEL_NAME = "gemini-flash-lite-latest"
49
+
48
50
 
49
51
  def test_integration_initialization() -> None:
50
52
  """Test chat model initialization."""
51
53
  llm = ChatGoogleGenerativeAI(
52
- model="gemini-2.5-flash",
54
+ model=MODEL_NAME,
53
55
  google_api_key=SecretStr("..."),
54
56
  top_k=2,
55
57
  top_p=1,
@@ -59,27 +61,27 @@ def test_integration_initialization() -> None:
59
61
  ls_params = llm._get_ls_params()
60
62
  assert ls_params == {
61
63
  "ls_provider": "google_genai",
62
- "ls_model_name": "gemini-2.5-flash",
64
+ "ls_model_name": MODEL_NAME,
63
65
  "ls_model_type": "chat",
64
66
  "ls_temperature": 0.7,
65
67
  }
66
68
 
67
69
  llm = ChatGoogleGenerativeAI(
68
- model="gemini-2.5-flash",
70
+ model=MODEL_NAME,
69
71
  google_api_key=SecretStr("..."),
70
72
  max_output_tokens=10,
71
73
  )
72
74
  ls_params = llm._get_ls_params()
73
75
  assert ls_params == {
74
76
  "ls_provider": "google_genai",
75
- "ls_model_name": "gemini-2.5-flash",
77
+ "ls_model_name": MODEL_NAME,
76
78
  "ls_model_type": "chat",
77
79
  "ls_temperature": 0.7,
78
80
  "ls_max_tokens": 10,
79
81
  }
80
82
 
81
83
  ChatGoogleGenerativeAI(
82
- model="gemini-2.5-flash",
84
+ model=MODEL_NAME,
83
85
  api_key=SecretStr("..."),
84
86
  top_k=2,
85
87
  top_p=1,
@@ -91,13 +93,13 @@ def test_integration_initialization() -> None:
91
93
  with warnings.catch_warnings():
92
94
  warnings.simplefilter("ignore", UserWarning)
93
95
  llm = ChatGoogleGenerativeAI(
94
- model="gemini-2.5-flash",
96
+ model=MODEL_NAME,
95
97
  google_api_key=SecretStr("..."),
96
98
  safety_setting={
97
99
  "HARM_CATEGORY_DANGEROUS_CONTENT": "BLOCK_LOW_AND_ABOVE"
98
100
  }, # Invalid arg
99
101
  )
100
- assert llm.model == "models/gemini-2.5-flash"
102
+ assert llm.model == f"models/{MODEL_NAME}"
101
103
  mock_warning.assert_called_once()
102
104
  call_args = mock_warning.call_args[0][0]
103
105
  assert "Unexpected argument 'safety_setting'" in call_args
@@ -112,7 +114,7 @@ def test_safety_settings_initialization() -> None:
112
114
 
113
115
  # Test initialization with safety_settings
114
116
  llm = ChatGoogleGenerativeAI(
115
- model="gemini-2.5-flash",
117
+ model=MODEL_NAME,
116
118
  google_api_key=SecretStr("test-key"),
117
119
  temperature=0.7,
118
120
  safety_settings=safety_settings,
@@ -121,7 +123,7 @@ def test_safety_settings_initialization() -> None:
121
123
  # Verify the safety_settings are stored correctly
122
124
  assert llm.safety_settings == safety_settings
123
125
  assert llm.temperature == 0.7
124
- assert llm.model == "models/gemini-2.5-flash"
126
+ assert llm.model == f"models/{MODEL_NAME}"
125
127
 
126
128
 
127
129
  def test_initialization_inside_threadpool() -> None:
@@ -130,30 +132,28 @@ def test_initialization_inside_threadpool() -> None:
130
132
  with ThreadPoolExecutor() as executor:
131
133
  executor.submit(
132
134
  ChatGoogleGenerativeAI,
133
- model="gemini-2.5-flash",
135
+ model=MODEL_NAME,
134
136
  google_api_key=SecretStr("secret-api-key"),
135
137
  ).result()
136
138
 
137
139
 
138
140
  def test_client_transport() -> None:
139
141
  """Test client transport configuration."""
140
- model = ChatGoogleGenerativeAI(model="gemini-2.5-flash", google_api_key="fake-key")
142
+ model = ChatGoogleGenerativeAI(model=MODEL_NAME, google_api_key="fake-key")
141
143
  assert model.client.transport.kind == "grpc"
142
144
 
143
145
  model = ChatGoogleGenerativeAI(
144
- model="gemini-2.5-flash", google_api_key="fake-key", transport="rest"
146
+ model=MODEL_NAME, google_api_key="fake-key", transport="rest"
145
147
  )
146
148
  assert model.client.transport.kind == "rest"
147
149
 
148
150
  async def check_async_client() -> None:
149
- model = ChatGoogleGenerativeAI(
150
- model="gemini-2.5-flash", google_api_key="fake-key"
151
- )
151
+ model = ChatGoogleGenerativeAI(model=MODEL_NAME, google_api_key="fake-key")
152
152
  assert model.async_client.transport.kind == "grpc_asyncio"
153
153
 
154
154
  # Test auto conversion of transport to "grpc_asyncio" from "rest"
155
155
  model = ChatGoogleGenerativeAI(
156
- model="gemini-2.5-flash", google_api_key="fake-key", transport="rest"
156
+ model=MODEL_NAME, google_api_key="fake-key", transport="rest"
157
157
  )
158
158
  assert model.async_client.transport.kind == "grpc_asyncio"
159
159
 
@@ -162,7 +162,7 @@ def test_client_transport() -> None:
162
162
 
163
163
  def test_initalization_without_async() -> None:
164
164
  chat = ChatGoogleGenerativeAI(
165
- model="gemini-2.5-flash",
165
+ model=MODEL_NAME,
166
166
  google_api_key=SecretStr("secret-api-key"),
167
167
  )
168
168
  assert chat.async_client is None
@@ -171,7 +171,7 @@ def test_initalization_without_async() -> None:
171
171
  def test_initialization_with_async() -> None:
172
172
  async def initialize_chat_with_async_client() -> ChatGoogleGenerativeAI:
173
173
  model = ChatGoogleGenerativeAI(
174
- model="gemini-2.5-flash",
174
+ model=MODEL_NAME,
175
175
  google_api_key=SecretStr("secret-api-key"),
176
176
  )
177
177
  _ = model.async_client
@@ -183,7 +183,7 @@ def test_initialization_with_async() -> None:
183
183
 
184
184
  def test_api_key_is_string() -> None:
185
185
  chat = ChatGoogleGenerativeAI(
186
- model="gemini-2.5-flash",
186
+ model=MODEL_NAME,
187
187
  google_api_key=SecretStr("secret-api-key"),
188
188
  )
189
189
  assert isinstance(chat.google_api_key, SecretStr)
@@ -193,7 +193,7 @@ def test_api_key_masked_when_passed_via_constructor(
193
193
  capsys: pytest.CaptureFixture,
194
194
  ) -> None:
195
195
  chat = ChatGoogleGenerativeAI(
196
- model="gemini-2.5-flash",
196
+ model=MODEL_NAME,
197
197
  google_api_key=SecretStr("secret-api-key"),
198
198
  )
199
199
  print(chat.google_api_key, end="") # noqa: T201
@@ -399,7 +399,7 @@ def test_additional_headers_support(headers: Optional[dict[str, str]]) -> None:
399
399
  mock_client,
400
400
  ):
401
401
  chat = ChatGoogleGenerativeAI(
402
- model="gemini-2.5-flash",
402
+ model=MODEL_NAME,
403
403
  google_api_key=param_secret_api_key,
404
404
  client_options=param_client_options,
405
405
  transport=param_transport,
@@ -437,7 +437,7 @@ def test_default_metadata_field_alias() -> None:
437
437
  # error
438
438
  # This is the main issue: LangSmith Playground passes None to default_metadata_input
439
439
  chat1 = ChatGoogleGenerativeAI(
440
- model="gemini-2.5-flash",
440
+ model=MODEL_NAME,
441
441
  google_api_key=SecretStr("test-key"),
442
442
  default_metadata_input=None,
443
443
  )
@@ -448,7 +448,7 @@ def test_default_metadata_field_alias() -> None:
448
448
  # Test with empty list for default_metadata_input (should not cause validation
449
449
  # error)
450
450
  chat2 = ChatGoogleGenerativeAI(
451
- model="gemini-2.5-flash",
451
+ model=MODEL_NAME,
452
452
  google_api_key=SecretStr("test-key"),
453
453
  default_metadata_input=[],
454
454
  )
@@ -457,7 +457,7 @@ def test_default_metadata_field_alias() -> None:
457
457
 
458
458
  # Test with tuple for default_metadata_input (should not cause validation error)
459
459
  chat3 = ChatGoogleGenerativeAI(
460
- model="gemini-2.5-flash",
460
+ model=MODEL_NAME,
461
461
  google_api_key=SecretStr("test-key"),
462
462
  default_metadata_input=[("X-Test", "test")],
463
463
  )
@@ -813,7 +813,7 @@ def test_parse_response_candidate_includes_model_name() -> None:
813
813
 
814
814
 
815
815
  def test_serialize() -> None:
816
- llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash", google_api_key="test-key")
816
+ llm = ChatGoogleGenerativeAI(model=MODEL_NAME, google_api_key="test-key")
817
817
  serialized = dumps(llm)
818
818
  llm_loaded = loads(
819
819
  serialized,
@@ -852,20 +852,20 @@ def test__convert_tool_message_to_parts__sets_tool_name(
852
852
  def test_temperature_range_pydantic_validation() -> None:
853
853
  """Test that temperature is in the range [0.0, 2.0]."""
854
854
  with pytest.raises(ValidationError):
855
- ChatGoogleGenerativeAI(model="gemini-2.5-flash", temperature=2.1)
855
+ ChatGoogleGenerativeAI(model=MODEL_NAME, temperature=2.1)
856
856
 
857
857
  with pytest.raises(ValidationError):
858
- ChatGoogleGenerativeAI(model="gemini-2.5-flash", temperature=-0.1)
858
+ ChatGoogleGenerativeAI(model=MODEL_NAME, temperature=-0.1)
859
859
 
860
860
  llm = ChatGoogleGenerativeAI(
861
- model="gemini-2.5-flash",
861
+ model=MODEL_NAME,
862
862
  google_api_key=SecretStr("..."),
863
863
  temperature=1.5,
864
864
  )
865
865
  ls_params = llm._get_ls_params()
866
866
  assert ls_params == {
867
867
  "ls_provider": "google_genai",
868
- "ls_model_name": "gemini-2.5-flash",
868
+ "ls_model_name": MODEL_NAME,
869
869
  "ls_model_type": "chat",
870
870
  "ls_temperature": 1.5,
871
871
  }
@@ -874,30 +874,30 @@ def test_temperature_range_pydantic_validation() -> None:
874
874
  def test_temperature_range_model_validation() -> None:
875
875
  """Test that temperature is in the range [0.0, 2.0]."""
876
876
  with pytest.raises(ValueError):
877
- ChatGoogleGenerativeAI(model="gemini-2.5-flash", temperature=2.5)
877
+ ChatGoogleGenerativeAI(model=MODEL_NAME, temperature=2.5)
878
878
 
879
879
  with pytest.raises(ValueError):
880
- ChatGoogleGenerativeAI(model="gemini-2.5-flash", temperature=-0.5)
880
+ ChatGoogleGenerativeAI(model=MODEL_NAME, temperature=-0.5)
881
881
 
882
882
 
883
883
  def test_model_kwargs() -> None:
884
884
  """Test we can transfer unknown params to model_kwargs."""
885
885
  llm = ChatGoogleGenerativeAI(
886
- model="my-model",
886
+ model=MODEL_NAME,
887
887
  convert_system_message_to_human=True,
888
888
  model_kwargs={"foo": "bar"},
889
889
  )
890
- assert llm.model == "models/my-model"
890
+ assert llm.model == f"models/{MODEL_NAME}"
891
891
  assert llm.convert_system_message_to_human is True
892
892
  assert llm.model_kwargs == {"foo": "bar"}
893
893
 
894
894
  with pytest.warns(match="transferred to model_kwargs"):
895
895
  llm = ChatGoogleGenerativeAI(
896
- model="my-model",
896
+ model=MODEL_NAME,
897
897
  convert_system_message_to_human=True,
898
898
  foo="bar",
899
899
  )
900
- assert llm.model == "models/my-model"
900
+ assert llm.model == f"models/{MODEL_NAME}"
901
901
  assert llm.convert_system_message_to_human is True
902
902
  assert llm.model_kwargs == {"foo": "bar"}
903
903
 
@@ -1507,7 +1507,7 @@ def test_thinking_config_merging_with_generation_config() -> None:
1507
1507
  mock_retry.return_value = mock_response
1508
1508
 
1509
1509
  llm = ChatGoogleGenerativeAI(
1510
- model="gemini-2.5-flash",
1510
+ model=MODEL_NAME,
1511
1511
  google_api_key=SecretStr("test-key"),
1512
1512
  )
1513
1513
 
@@ -14,6 +14,8 @@ from pydantic import SecretStr
14
14
 
15
15
  from langchain_google_genai.embeddings import GoogleGenerativeAIEmbeddings
16
16
 
17
+ MODEL_NAME = "gemini-embedding-001"
18
+
17
19
 
18
20
  def test_integration_initialization() -> None:
19
21
  """Test chat model initialization."""
@@ -21,7 +23,7 @@ def test_integration_initialization() -> None:
21
23
  "langchain_google_genai._genai_extension.v1betaGenerativeServiceClient"
22
24
  ) as mock_prediction_service:
23
25
  _ = GoogleGenerativeAIEmbeddings(
24
- model="models/gemini-embedding-001",
26
+ model=f"models/{MODEL_NAME}",
25
27
  google_api_key=SecretStr("..."),
26
28
  )
27
29
  mock_prediction_service.assert_called_once()
@@ -34,7 +36,7 @@ def test_integration_initialization() -> None:
34
36
  "langchain_google_genai._genai_extension.v1betaGenerativeServiceClient"
35
37
  ) as mock_prediction_service:
36
38
  _ = GoogleGenerativeAIEmbeddings(
37
- model="models/gemini-embedding-001",
39
+ model=f"models/{MODEL_NAME}",
38
40
  google_api_key=SecretStr("..."),
39
41
  task_type="retrieval_document",
40
42
  )
@@ -43,7 +45,7 @@ def test_integration_initialization() -> None:
43
45
 
44
46
  def test_api_key_is_string() -> None:
45
47
  embeddings = GoogleGenerativeAIEmbeddings(
46
- model="models/gemini-embedding-001",
48
+ model=f"models/{MODEL_NAME}",
47
49
  google_api_key=SecretStr("secret-api-key"),
48
50
  )
49
51
  assert isinstance(embeddings.google_api_key, SecretStr)
@@ -53,7 +55,7 @@ def test_api_key_masked_when_passed_via_constructor(
53
55
  capsys: pytest.CaptureFixture,
54
56
  ) -> None:
55
57
  embeddings = GoogleGenerativeAIEmbeddings(
56
- model="models/gemini-embedding-001",
58
+ model=f"models/{MODEL_NAME}",
57
59
  google_api_key=SecretStr("secret-api-key"),
58
60
  )
59
61
  print(embeddings.google_api_key, end="") # noqa: T201
@@ -72,13 +74,13 @@ def test_embed_query() -> None:
72
74
  )
73
75
  mock_prediction_service.return_value.embed_content = mock_embed
74
76
  llm = GoogleGenerativeAIEmbeddings(
75
- model="models/embedding-test",
77
+ model=f"models/{MODEL_NAME}",
76
78
  google_api_key=SecretStr("test-key"),
77
79
  task_type="classification",
78
80
  )
79
81
  llm.embed_query("test text", output_dimensionality=524)
80
82
  request = EmbedContentRequest(
81
- model="models/embedding-test",
83
+ model=f"models/{MODEL_NAME}",
82
84
  content={"parts": [{"text": "test text"}]},
83
85
  task_type="CLASSIFICATION",
84
86
  output_dimensionality=524,
@@ -97,22 +99,22 @@ def test_embed_documents() -> None:
97
99
  mock_prediction_service.return_value.batch_embed_contents = mock_embed
98
100
 
99
101
  llm = GoogleGenerativeAIEmbeddings(
100
- model="models/embedding-test",
102
+ model=f"models/{MODEL_NAME}",
101
103
  google_api_key=SecretStr("test-key"),
102
104
  )
103
105
 
104
106
  llm.embed_documents(["test text", "test text2"], titles=["title1", "title2"])
105
107
  request = BatchEmbedContentsRequest(
106
- model="models/embedding-test",
108
+ model=f"models/{MODEL_NAME}",
107
109
  requests=[
108
110
  EmbedContentRequest(
109
- model="models/embedding-test",
111
+ model=f"models/{MODEL_NAME}",
110
112
  content={"parts": [{"text": "test text"}]},
111
113
  task_type="RETRIEVAL_DOCUMENT",
112
114
  title="title1",
113
115
  ),
114
116
  EmbedContentRequest(
115
- model="models/embedding-test",
117
+ model=f"models/{MODEL_NAME}",
116
118
  content={"parts": [{"text": "test text2"}]},
117
119
  task_type="RETRIEVAL_DOCUMENT",
118
120
  title="title2",
@@ -135,7 +137,7 @@ def test_embed_documents_with_numerous_texts() -> None:
135
137
  mock_prediction_service.return_value.batch_embed_contents = mock_embed
136
138
 
137
139
  llm = GoogleGenerativeAIEmbeddings(
138
- model="models/embedding-test",
140
+ model=f"models/{MODEL_NAME}",
139
141
  google_api_key=SecretStr("test-key"),
140
142
  )
141
143
 
@@ -145,10 +147,10 @@ def test_embed_documents_with_numerous_texts() -> None:
145
147
  titles=["title1" for _ in range(test_corpus_size)],
146
148
  )
147
149
  request = BatchEmbedContentsRequest(
148
- model="models/embedding-test",
150
+ model=f"models/{MODEL_NAME}",
149
151
  requests=[
150
152
  EmbedContentRequest(
151
- model="models/embedding-test",
153
+ model=f"models/{MODEL_NAME}",
152
154
  content={"parts": [{"text": "test text"}]},
153
155
  task_type="RETRIEVAL_DOCUMENT",
154
156
  title="title1",
@@ -2,20 +2,22 @@ from unittest.mock import patch
2
2
 
3
3
  from langchain_google_genai.llms import GoogleGenerativeAI
4
4
 
5
+ MODEL_NAME = "gemini-flash-lite-latest"
6
+
5
7
 
6
8
  def test_tracing_params() -> None:
7
9
  # Test standard tracing params
8
- llm = GoogleGenerativeAI(model="gemini-2.5-flash", google_api_key="foo")
10
+ llm = GoogleGenerativeAI(model=MODEL_NAME, google_api_key="foo")
9
11
  ls_params = llm._get_ls_params()
10
12
  assert ls_params == {
11
13
  "ls_provider": "google_genai",
12
14
  "ls_model_type": "llm",
13
- "ls_model_name": "gemini-2.5-flash",
15
+ "ls_model_name": MODEL_NAME,
14
16
  "ls_temperature": 0.7,
15
17
  }
16
18
 
17
19
  llm = GoogleGenerativeAI(
18
- model="gemini-2.5-flash",
20
+ model=MODEL_NAME,
19
21
  temperature=0.1,
20
22
  max_output_tokens=10,
21
23
  google_api_key="foo",
@@ -24,7 +26,7 @@ def test_tracing_params() -> None:
24
26
  assert ls_params == {
25
27
  "ls_provider": "google_genai",
26
28
  "ls_model_type": "llm",
27
- "ls_model_name": "gemini-2.5-flash",
29
+ "ls_model_name": MODEL_NAME,
28
30
  "ls_temperature": 0.1,
29
31
  "ls_max_tokens": 10,
30
32
  }
@@ -32,15 +34,15 @@ def test_tracing_params() -> None:
32
34
  # Test initialization with an invalid argument to check warning
33
35
  with patch("langchain_google_genai.llms.logger.warning") as mock_warning:
34
36
  llm = GoogleGenerativeAI(
35
- model="gemini-2.5-flash",
37
+ model=MODEL_NAME,
36
38
  google_api_key="foo",
37
39
  safety_setting={
38
40
  "HARM_CATEGORY_DANGEROUS_CONTENT": "BLOCK_LOW_AND_ABOVE"
39
41
  }, # Invalid arg
40
42
  )
41
- assert llm.model == "models/gemini-2.5-flash"
43
+ assert llm.model == f"models/{MODEL_NAME}"
42
44
  ls_params = llm._get_ls_params()
43
- assert ls_params.get("ls_model_name") == "gemini-2.5-flash"
45
+ assert ls_params.get("ls_model_name") == MODEL_NAME
44
46
  mock_warning.assert_called_once()
45
47
  call_args = mock_warning.call_args[0][0]
46
48
  assert "Unexpected argument 'safety_setting'" in call_args