langchain-google-genai 2.1.12__tar.gz → 3.0.0rc1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/PKG-INFO +8 -8
  2. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/README.md +2 -2
  3. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/langchain_google_genai/_common.py +2 -1
  4. langchain_google_genai-3.0.0rc1/langchain_google_genai/_compat.py +286 -0
  5. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/langchain_google_genai/_function_utils.py +30 -3
  6. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/langchain_google_genai/_genai_extension.py +25 -6
  7. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/langchain_google_genai/chat_models.py +405 -79
  8. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/langchain_google_genai/embeddings.py +4 -16
  9. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/pyproject.toml +21 -22
  10. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/tests/integration_tests/test_callbacks.py +9 -2
  11. langchain_google_genai-3.0.0rc1/tests/integration_tests/test_chat_models.py +1109 -0
  12. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/tests/integration_tests/test_function_call.py +21 -12
  13. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/tests/integration_tests/test_llms.py +28 -12
  14. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/tests/integration_tests/test_standard.py +2 -2
  15. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/tests/integration_tests/test_tools.py +5 -3
  16. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/tests/unit_tests/__snapshots__/test_standard.ambr +2 -2
  17. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/tests/unit_tests/test_chat_models.py +772 -27
  18. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/tests/unit_tests/test_embeddings.py +15 -13
  19. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/tests/unit_tests/test_llms.py +9 -7
  20. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/tests/unit_tests/test_standard.py +2 -2
  21. langchain_google_genai-2.1.12/tests/integration_tests/test_chat_models.py +0 -894
  22. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/LICENSE +0 -0
  23. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/langchain_google_genai/__init__.py +0 -0
  24. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/langchain_google_genai/_enums.py +0 -0
  25. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/langchain_google_genai/_image_utils.py +0 -0
  26. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/langchain_google_genai/genai_aqa.py +0 -0
  27. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/langchain_google_genai/google_vector_store.py +0 -0
  28. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/langchain_google_genai/llms.py +0 -0
  29. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/langchain_google_genai/py.typed +0 -0
  30. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/tests/__init__.py +0 -0
  31. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/tests/conftest.py +0 -0
  32. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/tests/integration_tests/.env.example +0 -0
  33. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/tests/integration_tests/__init__.py +0 -0
  34. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/tests/integration_tests/terraform/main.tf +0 -0
  35. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/tests/integration_tests/test_compile.py +0 -0
  36. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/tests/integration_tests/test_embeddings.py +0 -0
  37. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/tests/unit_tests/__init__.py +0 -0
  38. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/tests/unit_tests/test_chat_models_protobuf_fix.py +0 -0
  39. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/tests/unit_tests/test_common.py +0 -0
  40. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/tests/unit_tests/test_function_utils.py +0 -0
  41. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/tests/unit_tests/test_genai_aqa.py +0 -0
  42. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/tests/unit_tests/test_google_vector_store.py +0 -0
  43. {langchain_google_genai-2.1.12 → langchain_google_genai-3.0.0rc1}/tests/unit_tests/test_imports.py +0 -0
@@ -1,16 +1,16 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langchain-google-genai
3
- Version: 2.1.12
3
+ Version: 3.0.0rc1
4
4
  Summary: An integration package connecting Google's genai package and LangChain
5
5
  License: MIT
6
6
  Project-URL: Source Code, https://github.com/langchain-ai/langchain-google/tree/main/libs/genai
7
7
  Project-URL: Release Notes, https://github.com/langchain-ai/langchain-google/releases
8
8
  Project-URL: repository, https://github.com/langchain-ai/langchain-google
9
- Requires-Python: >=3.9
10
- Requires-Dist: langchain-core>=0.3.75
11
- Requires-Dist: google-ai-generativelanguage<1,>=0.7
12
- Requires-Dist: pydantic<3,>=2
13
- Requires-Dist: filetype<2,>=1.2
9
+ Requires-Python: <4.0.0,>=3.10.0
10
+ Requires-Dist: langchain-core<2.0.0,>=1.0.0rc2
11
+ Requires-Dist: google-ai-generativelanguage<1.0.0,>=0.7.0
12
+ Requires-Dist: pydantic<3.0.0,>=2.0.0
13
+ Requires-Dist: filetype<2.0.0,>=1.2.0
14
14
  Description-Content-Type: text/markdown
15
15
 
16
16
  # langchain-google-genai
@@ -76,7 +76,7 @@ Then use the `ChatGoogleGenerativeAI` interface:
76
76
  ```python
77
77
  from langchain_google_genai import ChatGoogleGenerativeAI
78
78
 
79
- llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
79
+ llm = ChatGoogleGenerativeAI(model="gemini-flash-latest")
80
80
  response = llm.invoke("Sing a ballad of LangChain.")
81
81
  print(response.content)
82
82
  ```
@@ -97,7 +97,7 @@ Most Gemini models support image inputs.
97
97
  from langchain_core.messages import HumanMessage
98
98
  from langchain_google_genai import ChatGoogleGenerativeAI
99
99
 
100
- llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
100
+ llm = ChatGoogleGenerativeAI(model="gemini-flash-latest")
101
101
 
102
102
  message = HumanMessage(
103
103
  content=[
@@ -61,7 +61,7 @@ Then use the `ChatGoogleGenerativeAI` interface:
61
61
  ```python
62
62
  from langchain_google_genai import ChatGoogleGenerativeAI
63
63
 
64
- llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
64
+ llm = ChatGoogleGenerativeAI(model="gemini-flash-latest")
65
65
  response = llm.invoke("Sing a ballad of LangChain.")
66
66
  print(response.content)
67
67
  ```
@@ -82,7 +82,7 @@ Most Gemini models support image inputs.
82
82
  from langchain_core.messages import HumanMessage
83
83
  from langchain_google_genai import ChatGoogleGenerativeAI
84
84
 
85
- llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
85
+ llm = ChatGoogleGenerativeAI(model="gemini-flash-latest")
86
86
 
87
87
  message = HumanMessage(
88
88
  content=[
@@ -52,7 +52,8 @@ Examples:
52
52
 
53
53
  max_output_tokens: Optional[int] = Field(default=None, alias="max_tokens")
54
54
  """Maximum number of tokens to include in a candidate. Must be greater than zero.
55
- If unset, will default to ``64``."""
55
+ If unset, will use the model's default value, which varies by model.
56
+ See https://ai.google.dev/gemini-api/docs/models for model-specific limits."""
56
57
 
57
58
  n: int = 1
58
59
  """Number of chat completions to generate for each prompt. Note that the API may
@@ -0,0 +1,286 @@
1
+ """Go from v1 content blocks to generativelanguage_v1beta format."""
2
+
3
+ import json
4
+ from typing import Any, Optional, cast
5
+
6
+ from langchain_core.messages import content as types
7
+
8
+
9
+ def translate_citations_to_grounding_metadata(
10
+ citations: list[types.Citation], web_search_queries: Optional[list[str]] = None
11
+ ) -> dict[str, Any]:
12
+ """Translate LangChain Citations to Google AI grounding metadata format.
13
+
14
+ Args:
15
+ citations: List of Citation content blocks.
16
+ web_search_queries: Optional list of search queries that generated
17
+ the grounding data.
18
+
19
+ Returns:
20
+ Google AI grounding metadata dictionary.
21
+
22
+ Example:
23
+ >>> citations = [
24
+ ... create_citation(
25
+ ... url="https://uefa.com/euro2024",
26
+ ... title="UEFA Euro 2024 Results",
27
+ ... start_index=0,
28
+ ... end_index=47,
29
+ ... cited_text="Spain won the UEFA Euro 2024 championship",
30
+ ... )
31
+ ... ]
32
+ >>> metadata = translate_citations_to_grounding_metadata(citations)
33
+ >>> len(metadata["groundingChunks"])
34
+ 1
35
+ >>> metadata["groundingChunks"][0]["web"]["uri"]
36
+ 'https://uefa.com/euro2024'
37
+ """
38
+ if not citations:
39
+ return {}
40
+
41
+ # Group citations by text segment (start_index, end_index, cited_text)
42
+ segment_to_citations: dict[
43
+ tuple[Optional[int], Optional[int], Optional[str]], list[types.Citation]
44
+ ] = {}
45
+
46
+ for citation in citations:
47
+ key = (
48
+ citation.get("start_index"),
49
+ citation.get("end_index"),
50
+ citation.get("cited_text"),
51
+ )
52
+ if key not in segment_to_citations:
53
+ segment_to_citations[key] = []
54
+ segment_to_citations[key].append(citation)
55
+
56
+ # Build grounding chunks from unique URLs
57
+ url_to_chunk_index: dict[str, int] = {}
58
+ grounding_chunks: list[dict[str, Any]] = []
59
+
60
+ for citation in citations:
61
+ url = citation.get("url")
62
+ if url and url not in url_to_chunk_index:
63
+ url_to_chunk_index[url] = len(grounding_chunks)
64
+ grounding_chunks.append(
65
+ {"web": {"uri": url, "title": citation.get("title", "")}}
66
+ )
67
+
68
+ # Build grounding supports
69
+ grounding_supports: list[dict[str, Any]] = []
70
+
71
+ for (
72
+ start_index,
73
+ end_index,
74
+ cited_text,
75
+ ), citations_group in segment_to_citations.items():
76
+ if start_index is not None and end_index is not None and cited_text:
77
+ chunk_indices = []
78
+ confidence_scores = []
79
+
80
+ for citation in citations_group:
81
+ url = citation.get("url")
82
+ if url and url in url_to_chunk_index:
83
+ chunk_indices.append(url_to_chunk_index[url])
84
+
85
+ # Extract confidence scores from extras if available
86
+ extras = citation.get("extras", {})
87
+ google_metadata = extras.get("google_ai_metadata", {})
88
+ scores = google_metadata.get("confidence_scores", [])
89
+ confidence_scores.extend(scores)
90
+
91
+ support = {
92
+ "segment": {
93
+ "startIndex": start_index,
94
+ "endIndex": end_index,
95
+ "text": cited_text,
96
+ },
97
+ "groundingChunkIndices": chunk_indices,
98
+ }
99
+
100
+ if confidence_scores:
101
+ support["confidenceScores"] = confidence_scores
102
+
103
+ grounding_supports.append(support)
104
+
105
+ # Extract search queries from extras if not provided
106
+ if web_search_queries is None:
107
+ web_search_queries = []
108
+ for citation in citations:
109
+ extras = citation.get("extras", {})
110
+ google_metadata = extras.get("google_ai_metadata", {})
111
+ queries = google_metadata.get("web_search_queries", [])
112
+ web_search_queries.extend(queries)
113
+ # Remove duplicates while preserving order
114
+ web_search_queries = list(dict.fromkeys(web_search_queries))
115
+
116
+ return {
117
+ "webSearchQueries": web_search_queries,
118
+ "groundingChunks": grounding_chunks,
119
+ "groundingSupports": grounding_supports,
120
+ }
121
+
122
+
123
+ def _convert_from_v1_to_generativelanguage_v1beta(
124
+ content: list[types.ContentBlock], model_provider: str | None
125
+ ) -> list[dict[str, Any]]:
126
+ """Convert v1 content blocks to `google.ai.generativelanguage_v1beta.types.Content`.
127
+
128
+ Args:
129
+ content: List of v1 `ContentBlock` objects.
130
+ model_provider: The model provider name that generated the v1 content.
131
+
132
+ Returns:
133
+ List of dictionaries in `google.ai.generativelanguage_v1beta.types.Content`
134
+ format, ready to be sent to the API.
135
+ """
136
+ new_content: list = []
137
+ for block in content:
138
+ if not isinstance(block, dict) or "type" not in block:
139
+ continue
140
+
141
+ block_dict = dict(block) # (For typing)
142
+
143
+ # TextContentBlock
144
+ if block_dict["type"] == "text":
145
+ new_block = {"text": block_dict.get("text", "")}
146
+ new_content.append(new_block)
147
+ # Citations are only handled on output. Can't pass them back :/
148
+
149
+ # ReasoningContentBlock -> thinking
150
+ elif block_dict["type"] == "reasoning" and model_provider == "google_genai":
151
+ # Google requires passing back the thought_signature when available.
152
+ # Signatures are only provided when function calling is enabled.
153
+ if "extras" in block_dict and isinstance(block_dict["extras"], dict):
154
+ extras = block_dict["extras"]
155
+ if "signature" in extras:
156
+ new_block = {
157
+ "thought": True,
158
+ "text": block_dict.get("reasoning", ""),
159
+ "thought_signature": extras["signature"],
160
+ }
161
+ new_content.append(new_block)
162
+ # else: skip reasoning blocks without signatures
163
+ # TODO: log a warning?
164
+ # else: skip reasoning blocks without extras
165
+ # TODO: log a warning?
166
+
167
+ # ImageContentBlock
168
+ elif block_dict["type"] == "image":
169
+ if base64 := block_dict.get("base64"):
170
+ new_block = {
171
+ "inline_data": {
172
+ "mime_type": block_dict.get("mime_type", "image/jpeg"),
173
+ "data": base64.encode("utf-8")
174
+ if isinstance(base64, str)
175
+ else base64,
176
+ }
177
+ }
178
+ new_content.append(new_block)
179
+ elif url := block_dict.get("url") and model_provider == "google_genai":
180
+ # Google file service
181
+ new_block = {
182
+ "file_data": {
183
+ "mime_type": block_dict.get("mime_type", "image/jpeg"),
184
+ "file_uri": block_dict[str(url)],
185
+ }
186
+ }
187
+ new_content.append(new_block)
188
+
189
+ # TODO: AudioContentBlock -> audio once models support passing back in
190
+
191
+ # FileContentBlock (documents)
192
+ elif block_dict["type"] == "file":
193
+ if base64 := block_dict.get("base64"):
194
+ new_block = {
195
+ "inline_data": {
196
+ "mime_type": block_dict.get(
197
+ "mime_type", "application/octet-stream"
198
+ ),
199
+ "data": base64.encode("utf-8")
200
+ if isinstance(base64, str)
201
+ else base64,
202
+ }
203
+ }
204
+ new_content.append(new_block)
205
+ elif url := block_dict.get("url") and model_provider == "google_genai":
206
+ # Google file service
207
+ new_block = {
208
+ "file_data": {
209
+ "mime_type": block_dict.get(
210
+ "mime_type", "application/octet-stream"
211
+ ),
212
+ "file_uri": block_dict[str(url)],
213
+ }
214
+ }
215
+ new_content.append(new_block)
216
+
217
+ # ToolCall -> FunctionCall
218
+ elif block_dict["type"] == "tool_call":
219
+ function_call = {
220
+ "function_call": {
221
+ "name": block_dict.get("name", ""),
222
+ "args": block_dict.get("args", {}),
223
+ }
224
+ }
225
+ new_content.append(function_call)
226
+
227
+ # ToolCallChunk -> FunctionCall
228
+ elif block_dict["type"] == "tool_call_chunk":
229
+ try:
230
+ args_str = block_dict.get("args") or "{}"
231
+ input_ = json.loads(args_str) if isinstance(args_str, str) else args_str
232
+ except json.JSONDecodeError:
233
+ input_ = {}
234
+
235
+ function_call = {
236
+ "function_call": {
237
+ "name": block_dict.get("name", "no_tool_name_present"),
238
+ "args": input_,
239
+ }
240
+ }
241
+ new_content.append(function_call)
242
+
243
+ elif block_dict["type"] == "server_tool_call":
244
+ if block_dict.get("name") == "code_interpreter":
245
+ # LangChain v0 format
246
+ args = cast(dict, block_dict.get("args", {}))
247
+ executable_code = {
248
+ "type": "executable_code",
249
+ "executable_code": args.get("code", ""),
250
+ "language": args.get("language", ""),
251
+ "id": block_dict.get("id", ""),
252
+ }
253
+ # Google generativelanguage format
254
+ new_content.append(
255
+ {
256
+ "executable_code": {
257
+ "language": executable_code["language"],
258
+ "code": executable_code["executable_code"],
259
+ }
260
+ }
261
+ )
262
+
263
+ elif block_dict["type"] == "server_tool_result":
264
+ extras = cast(dict, block_dict.get("extras", {}))
265
+ if extras.get("block_type") == "code_execution_result":
266
+ # LangChain v0 format
267
+ code_execution_result = {
268
+ "type": "code_execution_result",
269
+ "code_execution_result": block_dict.get("output", ""),
270
+ "outcome": extras.get("outcome", ""),
271
+ "tool_call_id": block_dict.get("tool_call_id", ""),
272
+ }
273
+ # Google generativelanguage format
274
+ new_content.append(
275
+ {
276
+ "code_execution_result": {
277
+ "outcome": code_execution_result["outcome"],
278
+ "output": code_execution_result["code_execution_result"],
279
+ }
280
+ }
281
+ )
282
+
283
+ elif block_dict["type"] == "non_standard":
284
+ new_content.append(block_dict["value"])
285
+
286
+ return new_content
@@ -330,8 +330,10 @@ def _get_properties_from_schema(schema: Dict) -> Dict[str, Any]:
330
330
  continue
331
331
  properties_item: Dict[str, Union[str, int, Dict, List]] = {}
332
332
 
333
- # Get description from original schema before any modifications
334
- description = v.get("description")
333
+ # Preserve description and other schema properties before manipulation
334
+ original_description = v.get("description")
335
+ original_enum = v.get("enum")
336
+ original_items = v.get("items")
335
337
 
336
338
  if v.get("anyOf") and all(
337
339
  anyOf_type.get("type") != "null" for anyOf_type in v.get("anyOf", [])
@@ -354,11 +356,34 @@ def _get_properties_from_schema(schema: Dict) -> Dict[str, Any]:
354
356
  if any_of_types and item_type_ in [glm.Type.ARRAY, glm.Type.OBJECT]:
355
357
  json_type_ = "array" if item_type_ == glm.Type.ARRAY else "object"
356
358
  # Use Index -1 for consistency with `_get_nullable_type_from_schema`
357
- v = [val for val in any_of_types if val.get("type") == json_type_][-1]
359
+ filtered_schema = [
360
+ val for val in any_of_types if val.get("type") == json_type_
361
+ ][-1]
362
+ # Merge filtered schema with original properties to preserve enum/items
363
+ v = filtered_schema.copy()
364
+ if original_enum and not v.get("enum"):
365
+ v["enum"] = original_enum
366
+ if original_items and not v.get("items"):
367
+ v["items"] = original_items
368
+ elif any_of_types:
369
+ # For other types (like strings with enums), find the non-null schema
370
+ # and preserve enum/items from the original anyOf structure
371
+ non_null_schemas = [
372
+ val for val in any_of_types if val.get("type") != "null"
373
+ ]
374
+ if non_null_schemas:
375
+ filtered_schema = non_null_schemas[-1]
376
+ v = filtered_schema.copy()
377
+ if original_enum and not v.get("enum"):
378
+ v["enum"] = original_enum
379
+ if original_items and not v.get("items"):
380
+ v["items"] = original_items
358
381
 
359
382
  if v.get("enum"):
360
383
  properties_item["enum"] = v["enum"]
361
384
 
385
+ # Prefer description from the filtered schema, fall back to original
386
+ description = v.get("description") or original_description
362
387
  if description and isinstance(description, str):
363
388
  properties_item["description"] = description
364
389
 
@@ -415,6 +440,8 @@ def _get_items_from_schema(schema: Union[Dict, List, str]) -> Dict[str, Any]:
415
440
  items["description"] = (
416
441
  schema.get("description") or schema.get("title") or ""
417
442
  )
443
+ if "enum" in schema:
444
+ items["enum"] = schema["enum"]
418
445
  if _is_nullable_schema(schema):
419
446
  items["nullable"] = True
420
447
  if "required" in schema:
@@ -632,22 +632,41 @@ def generate_answer(
632
632
  )
633
633
 
634
634
 
635
- # TODO: Use candidate.finish_message when that field is launched.
636
- # For now, we derive this message from other existing fields.
637
635
  def _get_finish_message(candidate: genai.Candidate) -> str:
636
+ """Get a human-readable finish message from the candidate.
637
+
638
+ Uses the official finish_message field if available, otherwise falls back
639
+ to a manual mapping of finish reasons to descriptive messages.
640
+ """
641
+ # Use the official field when available
642
+ if hasattr(candidate, "finish_message") and candidate.finish_message:
643
+ return candidate.finish_message
644
+
645
+ # Fallback to manual mapping for all known finish reasons
638
646
  finish_messages: Dict[int, str] = {
647
+ genai.Candidate.FinishReason.STOP: "Generation completed successfully",
639
648
  genai.Candidate.FinishReason.MAX_TOKENS: (
640
649
  "Maximum token in context window reached"
641
650
  ),
642
651
  genai.Candidate.FinishReason.SAFETY: "Blocked because of safety",
643
652
  genai.Candidate.FinishReason.RECITATION: "Blocked because of recitation",
653
+ genai.Candidate.FinishReason.LANGUAGE: "Unsupported language detected",
654
+ genai.Candidate.FinishReason.BLOCKLIST: "Content hit forbidden terms",
655
+ genai.Candidate.FinishReason.PROHIBITED_CONTENT: (
656
+ "Inappropriate content detected"
657
+ ),
658
+ genai.Candidate.FinishReason.SPII: "Sensitive personal information detected",
659
+ genai.Candidate.FinishReason.IMAGE_SAFETY: "Image safety violation",
660
+ genai.Candidate.FinishReason.MALFORMED_FUNCTION_CALL: "Malformed function call",
661
+ genai.Candidate.FinishReason.UNEXPECTED_TOOL_CALL: "Unexpected tool call",
662
+ genai.Candidate.FinishReason.OTHER: "Other generation issue",
663
+ genai.Candidate.FinishReason.FINISH_REASON_UNSPECIFIED: (
664
+ "Unspecified finish reason"
665
+ ),
644
666
  }
645
667
 
646
668
  finish_reason = candidate.finish_reason
647
- if finish_reason not in finish_messages:
648
- return "Unexpected generation error"
649
-
650
- return finish_messages[finish_reason]
669
+ return finish_messages.get(finish_reason, "Unexpected generation error")
651
670
 
652
671
 
653
672
  def _convert_to_metadata(metadata: Dict[str, Any]) -> List[genai.CustomMetadata]: