langchain-google-genai 2.1.11__tar.gz → 3.0.0a1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-google-genai might be problematic. Click here for more details.

Files changed (45) hide show
  1. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/PKG-INFO +43 -30
  2. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/README.md +35 -24
  3. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/langchain_google_genai/__init__.py +3 -3
  4. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/langchain_google_genai/_common.py +29 -17
  5. langchain_google_genai-3.0.0a1/langchain_google_genai/_compat.py +248 -0
  6. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/langchain_google_genai/_function_utils.py +77 -59
  7. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/langchain_google_genai/_genai_extension.py +60 -27
  8. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/langchain_google_genai/_image_utils.py +10 -9
  9. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/langchain_google_genai/chat_models.py +803 -297
  10. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/langchain_google_genai/embeddings.py +15 -22
  11. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/langchain_google_genai/genai_aqa.py +15 -15
  12. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/langchain_google_genai/google_vector_store.py +26 -16
  13. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/langchain_google_genai/llms.py +8 -7
  14. langchain_google_genai-3.0.0a1/pyproject.toml +148 -0
  15. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/tests/conftest.py +6 -20
  16. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/tests/integration_tests/test_callbacks.py +14 -7
  17. langchain_google_genai-3.0.0a1/tests/integration_tests/test_chat_models.py +1124 -0
  18. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/tests/integration_tests/test_compile.py +0 -1
  19. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/tests/integration_tests/test_embeddings.py +1 -2
  20. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/tests/integration_tests/test_function_call.py +22 -13
  21. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/tests/integration_tests/test_llms.py +31 -18
  22. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/tests/integration_tests/test_standard.py +8 -8
  23. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/tests/integration_tests/test_tools.py +5 -5
  24. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/tests/unit_tests/__snapshots__/test_standard.ambr +4 -4
  25. langchain_google_genai-3.0.0a1/tests/unit_tests/test_chat_models.py +1861 -0
  26. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/tests/unit_tests/test_chat_models_protobuf_fix.py +3 -3
  27. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/tests/unit_tests/test_embeddings.py +12 -10
  28. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/tests/unit_tests/test_function_utils.py +94 -95
  29. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/tests/unit_tests/test_llms.py +9 -9
  30. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/tests/unit_tests/test_standard.py +7 -9
  31. langchain_google_genai-2.1.11/pyproject.toml +0 -84
  32. langchain_google_genai-2.1.11/tests/integration_tests/test_chat_models.py +0 -887
  33. langchain_google_genai-2.1.11/tests/unit_tests/test_chat_models.py +0 -916
  34. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/LICENSE +0 -0
  35. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/langchain_google_genai/_enums.py +0 -0
  36. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/langchain_google_genai/py.typed +0 -0
  37. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/tests/__init__.py +0 -0
  38. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/tests/integration_tests/.env.example +0 -0
  39. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/tests/integration_tests/__init__.py +0 -0
  40. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/tests/integration_tests/terraform/main.tf +0 -0
  41. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/tests/unit_tests/__init__.py +0 -0
  42. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/tests/unit_tests/test_common.py +0 -0
  43. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/tests/unit_tests/test_genai_aqa.py +0 -0
  44. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/tests/unit_tests/test_google_vector_store.py +0 -0
  45. {langchain_google_genai-2.1.11 → langchain_google_genai-3.0.0a1}/tests/unit_tests/test_imports.py +0 -0
@@ -1,14 +1,16 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langchain-google-genai
3
- Version: 2.1.11
3
+ Version: 3.0.0a1
4
4
  Summary: An integration package connecting Google's genai package and LangChain
5
5
  License: MIT
6
6
  Project-URL: Source Code, https://github.com/langchain-ai/langchain-google/tree/main/libs/genai
7
- Requires-Python: >=3.9
8
- Requires-Dist: langchain-core>=0.3.75
9
- Requires-Dist: google-ai-generativelanguage<1,>=0.7
10
- Requires-Dist: pydantic<3,>=2
11
- Requires-Dist: filetype<2,>=1.2
7
+ Project-URL: Release Notes, https://github.com/langchain-ai/langchain-google/releases
8
+ Project-URL: repository, https://github.com/langchain-ai/langchain-google
9
+ Requires-Python: <4.0.0,>=3.10.0
10
+ Requires-Dist: langchain-core<2.0.0,>=1.0.0a4
11
+ Requires-Dist: google-ai-generativelanguage<1.0.0,>=0.7.0
12
+ Requires-Dist: pydantic<3.0.0,>=2.0.0
13
+ Requires-Dist: filetype<2.0.0,>=1.2.0
12
14
  Description-Content-Type: text/markdown
13
15
 
14
16
  # langchain-google-genai
@@ -54,6 +56,9 @@ This package provides LangChain support for Google Gemini models (via the offici
54
56
 
55
57
  ```bash
56
58
  pip install -U langchain-google-genai
59
+
60
+ # or, with uv:
61
+ uv add langchain-google-genai
57
62
  ````
58
63
 
59
64
  ---
@@ -71,7 +76,7 @@ Then use the `ChatGoogleGenerativeAI` interface:
71
76
  ```python
72
77
  from langchain_google_genai import ChatGoogleGenerativeAI
73
78
 
74
- llm = ChatGoogleGenerativeAI(model="gemini-pro")
79
+ llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
75
80
  response = llm.invoke("Sing a ballad of LangChain.")
76
81
  print(response.content)
77
82
  ```
@@ -80,22 +85,30 @@ print(response.content)
80
85
 
81
86
  ## Chat Models
82
87
 
83
- The main interface for Gemini chat models is `ChatGoogleGenerativeAI`.
88
+ See the LangChain documentation for general information about [Chat Models](https://docs.langchain.com/oss/python/langchain/models).
89
+
90
+ The main interface for the Gemini chat models is `ChatGoogleGenerativeAI`.
84
91
 
85
92
  ### Multimodal Inputs
86
93
 
87
- Gemini vision models support image inputs in single messages.
94
+ Most Gemini models support image inputs.
88
95
 
89
96
  ```python
90
97
  from langchain_core.messages import HumanMessage
91
98
  from langchain_google_genai import ChatGoogleGenerativeAI
92
99
 
93
- llm = ChatGoogleGenerativeAI(model="gemini-pro-vision")
100
+ llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
94
101
 
95
102
  message = HumanMessage(
96
103
  content=[
97
- {"type": "text", "text": "What's in this image?"},
98
- {"type": "image_url", "image_url": "https://picsum.photos/seed/picsum/200/300"},
104
+ {
105
+ "type": "text",
106
+ "text": "What's in this image?"
107
+ },
108
+ {
109
+ "type": "image_url",
110
+ "image_url": "https://picsum.photos/seed/picsum/200/300"
111
+ },
99
112
  ]
100
113
  )
101
114
 
@@ -103,7 +116,7 @@ response = llm.invoke([message])
103
116
  print(response.content)
104
117
  ```
105
118
 
106
- `image_url` can be:
119
+ `image_url` can be:
107
120
 
108
121
  - A public image URL
109
122
  - A Google Cloud Storage path (`gcs://...`)
@@ -113,38 +126,44 @@ print(response.content)
113
126
 
114
127
  ### Multimodal Outputs
115
128
 
116
- The Gemini 2.0 Flash Experimental model supports both text and inline image outputs.
129
+ Some Gemini models supports both text and inline image outputs.
117
130
 
118
131
  ```python
119
132
  from langchain_google_genai import ChatGoogleGenerativeAI
120
133
 
121
- llm = ChatGoogleGenerativeAI(model="models/gemini-2.0-flash-exp-image-generation")
134
+ llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash-image-preview")
122
135
 
123
136
  response = llm.invoke(
124
137
  "Generate an image of a cat and say meow",
125
138
  generation_config=dict(response_modalities=["TEXT", "IMAGE"]),
126
139
  )
127
140
 
128
- image_base64 = response.content[0].get("image_url").get("url").split(",")[-1]
129
- meow_text = response.content[1]
141
+ image_base64 = response.content[1].get("image_url").get("url").split(",")[-1]
142
+ meow_text = response.content[0]
130
143
  print(meow_text)
144
+ # In Jupyter, display the image:
145
+ from base64 import b64decode
146
+ from IPython.display import Image, display
147
+
148
+ img_bytes = b64decode(image_base64)
149
+ display(Image(data=img_bytes))
131
150
  ```
132
151
 
133
152
  ---
134
153
 
135
154
  ### Audio Output
136
155
 
137
- ```
156
+ ```python
138
157
  from langchain_google_genai import ChatGoogleGenerativeAI
139
158
 
140
- llm = ChatGoogleGenerativeAI(model="models/gemini-2.5-flash-preview-tts")
141
- # example
159
+ llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash-preview-tts")
160
+
142
161
  response = llm.invoke(
143
162
  "Please say The quick brown fox jumps over the lazy dog",
144
163
  generation_config=dict(response_modalities=["AUDIO"]),
145
164
  )
146
165
 
147
- # Base64 encoded binary data of the image
166
+ # Base64 encoded binary data of the audio
148
167
  wav_data = response.additional_kwargs.get("audio")
149
168
  with open("output.wav", "wb") as f:
150
169
  f.write(wav_data)
@@ -154,15 +173,13 @@ with open("output.wav", "wb") as f:
154
173
 
155
174
  ### Multimodal Outputs in Chains
156
175
 
157
- You can use Gemini models in a LangChain chain:
158
-
159
176
  ```python
160
177
  from langchain_core.runnables import RunnablePassthrough
161
178
  from langchain_core.prompts import ChatPromptTemplate
162
179
  from langchain_google_genai import ChatGoogleGenerativeAI, Modality
163
180
 
164
181
  llm = ChatGoogleGenerativeAI(
165
- model="models/gemini-2.0-flash-exp-image-generation",
182
+ model="gemini-2.5-flash-image-preview",
166
183
  response_modalities=[Modality.TEXT, Modality.IMAGE],
167
184
  )
168
185
 
@@ -178,13 +195,11 @@ response = chain.invoke("cat")
178
195
 
179
196
  ### Thinking Support
180
197
 
181
- Gemini 2.5 Flash Preview supports internal reasoning ("thoughts").
182
-
183
198
  ```python
184
199
  from langchain_google_genai import ChatGoogleGenerativeAI
185
200
 
186
201
  llm = ChatGoogleGenerativeAI(
187
- model="models/gemini-2.5-flash-preview-04-17",
202
+ model="models/gemini-2.5-flash",
188
203
  thinking_budget=1024
189
204
  )
190
205
 
@@ -199,8 +214,6 @@ print("Reasoning tokens used:", reasoning_score)
199
214
 
200
215
  ## Embeddings
201
216
 
202
- You can use Gemini embeddings in LangChain:
203
-
204
217
  ```python
205
218
  from langchain_google_genai import GoogleGenerativeAIEmbeddings
206
219
 
@@ -249,4 +262,4 @@ print("Answerable probability:", response.answerable_probability)
249
262
 
250
263
  - [LangChain Documentation](https://docs.langchain.com/)
251
264
  - [Google Generative AI SDK](https://googleapis.github.io/python-genai/)
252
- - [Gemini Model Documentation](https://ai.google.dev/)
265
+ - [Gemini Model Documentation](https://ai.google.dev/gemini-api/docs)
@@ -41,6 +41,9 @@ This package provides LangChain support for Google Gemini models (via the offici
41
41
 
42
42
  ```bash
43
43
  pip install -U langchain-google-genai
44
+
45
+ # or, with uv:
46
+ uv add langchain-google-genai
44
47
  ````
45
48
 
46
49
  ---
@@ -58,7 +61,7 @@ Then use the `ChatGoogleGenerativeAI` interface:
58
61
  ```python
59
62
  from langchain_google_genai import ChatGoogleGenerativeAI
60
63
 
61
- llm = ChatGoogleGenerativeAI(model="gemini-pro")
64
+ llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
62
65
  response = llm.invoke("Sing a ballad of LangChain.")
63
66
  print(response.content)
64
67
  ```
@@ -67,22 +70,30 @@ print(response.content)
67
70
 
68
71
  ## Chat Models
69
72
 
70
- The main interface for Gemini chat models is `ChatGoogleGenerativeAI`.
73
+ See the LangChain documentation for general information about [Chat Models](https://docs.langchain.com/oss/python/langchain/models).
74
+
75
+ The main interface for the Gemini chat models is `ChatGoogleGenerativeAI`.
71
76
 
72
77
  ### Multimodal Inputs
73
78
 
74
- Gemini vision models support image inputs in single messages.
79
+ Most Gemini models support image inputs.
75
80
 
76
81
  ```python
77
82
  from langchain_core.messages import HumanMessage
78
83
  from langchain_google_genai import ChatGoogleGenerativeAI
79
84
 
80
- llm = ChatGoogleGenerativeAI(model="gemini-pro-vision")
85
+ llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
81
86
 
82
87
  message = HumanMessage(
83
88
  content=[
84
- {"type": "text", "text": "What's in this image?"},
85
- {"type": "image_url", "image_url": "https://picsum.photos/seed/picsum/200/300"},
89
+ {
90
+ "type": "text",
91
+ "text": "What's in this image?"
92
+ },
93
+ {
94
+ "type": "image_url",
95
+ "image_url": "https://picsum.photos/seed/picsum/200/300"
96
+ },
86
97
  ]
87
98
  )
88
99
 
@@ -90,7 +101,7 @@ response = llm.invoke([message])
90
101
  print(response.content)
91
102
  ```
92
103
 
93
- `image_url` can be:
104
+ `image_url` can be:
94
105
 
95
106
  - A public image URL
96
107
  - A Google Cloud Storage path (`gcs://...`)
@@ -100,38 +111,44 @@ print(response.content)
100
111
 
101
112
  ### Multimodal Outputs
102
113
 
103
- The Gemini 2.0 Flash Experimental model supports both text and inline image outputs.
114
+ Some Gemini models supports both text and inline image outputs.
104
115
 
105
116
  ```python
106
117
  from langchain_google_genai import ChatGoogleGenerativeAI
107
118
 
108
- llm = ChatGoogleGenerativeAI(model="models/gemini-2.0-flash-exp-image-generation")
119
+ llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash-image-preview")
109
120
 
110
121
  response = llm.invoke(
111
122
  "Generate an image of a cat and say meow",
112
123
  generation_config=dict(response_modalities=["TEXT", "IMAGE"]),
113
124
  )
114
125
 
115
- image_base64 = response.content[0].get("image_url").get("url").split(",")[-1]
116
- meow_text = response.content[1]
126
+ image_base64 = response.content[1].get("image_url").get("url").split(",")[-1]
127
+ meow_text = response.content[0]
117
128
  print(meow_text)
129
+ # In Jupyter, display the image:
130
+ from base64 import b64decode
131
+ from IPython.display import Image, display
132
+
133
+ img_bytes = b64decode(image_base64)
134
+ display(Image(data=img_bytes))
118
135
  ```
119
136
 
120
137
  ---
121
138
 
122
139
  ### Audio Output
123
140
 
124
- ```
141
+ ```python
125
142
  from langchain_google_genai import ChatGoogleGenerativeAI
126
143
 
127
- llm = ChatGoogleGenerativeAI(model="models/gemini-2.5-flash-preview-tts")
128
- # example
144
+ llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash-preview-tts")
145
+
129
146
  response = llm.invoke(
130
147
  "Please say The quick brown fox jumps over the lazy dog",
131
148
  generation_config=dict(response_modalities=["AUDIO"]),
132
149
  )
133
150
 
134
- # Base64 encoded binary data of the image
151
+ # Base64 encoded binary data of the audio
135
152
  wav_data = response.additional_kwargs.get("audio")
136
153
  with open("output.wav", "wb") as f:
137
154
  f.write(wav_data)
@@ -141,15 +158,13 @@ with open("output.wav", "wb") as f:
141
158
 
142
159
  ### Multimodal Outputs in Chains
143
160
 
144
- You can use Gemini models in a LangChain chain:
145
-
146
161
  ```python
147
162
  from langchain_core.runnables import RunnablePassthrough
148
163
  from langchain_core.prompts import ChatPromptTemplate
149
164
  from langchain_google_genai import ChatGoogleGenerativeAI, Modality
150
165
 
151
166
  llm = ChatGoogleGenerativeAI(
152
- model="models/gemini-2.0-flash-exp-image-generation",
167
+ model="gemini-2.5-flash-image-preview",
153
168
  response_modalities=[Modality.TEXT, Modality.IMAGE],
154
169
  )
155
170
 
@@ -165,13 +180,11 @@ response = chain.invoke("cat")
165
180
 
166
181
  ### Thinking Support
167
182
 
168
- Gemini 2.5 Flash Preview supports internal reasoning ("thoughts").
169
-
170
183
  ```python
171
184
  from langchain_google_genai import ChatGoogleGenerativeAI
172
185
 
173
186
  llm = ChatGoogleGenerativeAI(
174
- model="models/gemini-2.5-flash-preview-04-17",
187
+ model="models/gemini-2.5-flash",
175
188
  thinking_budget=1024
176
189
  )
177
190
 
@@ -186,8 +199,6 @@ print("Reasoning tokens used:", reasoning_score)
186
199
 
187
200
  ## Embeddings
188
201
 
189
- You can use Gemini embeddings in LangChain:
190
-
191
202
  ```python
192
203
  from langchain_google_genai import GoogleGenerativeAIEmbeddings
193
204
 
@@ -236,4 +247,4 @@ print("Answerable probability:", response.answerable_probability)
236
247
 
237
248
  - [LangChain Documentation](https://docs.langchain.com/)
238
249
  - [Google Generative AI SDK](https://googleapis.github.io/python-genai/)
239
- - [Gemini Model Documentation](https://ai.google.dev/)
250
+ - [Gemini Model Documentation](https://ai.google.dev/gemini-api/docs)
@@ -1,4 +1,4 @@
1
- """**LangChain Google Generative AI Integration**
1
+ """**LangChain Google Generative AI Integration**.
2
2
 
3
3
  This module integrates Google's Generative AI models, specifically the Gemini series, with the LangChain framework. It provides classes for interacting with chat models and generating embeddings, leveraging Google's advanced AI capabilities.
4
4
 
@@ -76,12 +76,12 @@ __all__ = [
76
76
  "AqaOutput",
77
77
  "ChatGoogleGenerativeAI",
78
78
  "DoesNotExistsException",
79
+ "DoesNotExistsException",
79
80
  "GenAIAqa",
80
- "GoogleGenerativeAIEmbeddings",
81
81
  "GoogleGenerativeAI",
82
+ "GoogleGenerativeAIEmbeddings",
82
83
  "GoogleVectorStore",
83
84
  "HarmBlockThreshold",
84
85
  "HarmCategory",
85
86
  "Modality",
86
- "DoesNotExistsException",
87
87
  ]
@@ -13,19 +13,17 @@ _TELEMETRY_ENV_VARIABLE_NAME = "GOOGLE_CLOUD_AGENT_ENGINE_ID"
13
13
 
14
14
 
15
15
  class GoogleGenerativeAIError(Exception):
16
- """
17
- Custom exception class for errors associated with the `Google GenAI` API.
18
- """
16
+ """Custom exception class for errors associated with the `Google GenAI` API."""
19
17
 
20
18
 
21
19
  class _BaseGoogleGenerativeAI(BaseModel):
22
- """Base class for Google Generative AI LLMs"""
20
+ """Base class for Google Generative AI LLMs."""
23
21
 
24
22
  model: str = Field(
25
23
  ...,
26
24
  description="""The name of the model to use.
27
25
  Examples:
28
- - gemini-2.5-pro
26
+ - gemini-2.5-flash
29
27
  - models/text-bison-001""",
30
28
  )
31
29
  """Model name to use."""
@@ -34,28 +32,38 @@ Examples:
34
32
  )
35
33
  """Google AI API key.
36
34
  If not specified will be read from env var ``GOOGLE_API_KEY``."""
35
+
37
36
  credentials: Any = None
38
37
  "The default custom credentials (google.auth.credentials.Credentials) to use "
39
38
  "when making API calls. If not provided, credentials will be ascertained from "
40
39
  "the GOOGLE_API_KEY envvar"
40
+
41
41
  temperature: float = 0.7
42
- """Run inference with this temperature. Must be within ``[0.0, 2.0]``."""
42
+ """Run inference with this temperature. Must be within ``[0.0, 2.0]``. If unset,
43
+ will default to ``0.7``."""
44
+
43
45
  top_p: Optional[float] = None
44
46
  """Decode using nucleus sampling: consider the smallest set of tokens whose
45
- probability sum is at least ``top_p``. Must be within ``[0.0, 1.0]``."""
47
+ probability sum is at least ``top_p``. Must be within ``[0.0, 1.0]``."""
48
+
46
49
  top_k: Optional[int] = None
47
50
  """Decode using top-k sampling: consider the set of ``top_k`` most probable tokens.
48
- Must be positive."""
51
+ Must be positive."""
52
+
49
53
  max_output_tokens: Optional[int] = Field(default=None, alias="max_tokens")
50
54
  """Maximum number of tokens to include in a candidate. Must be greater than zero.
51
- If unset, will default to ``64``."""
55
+ If unset, will use the model's default value, which varies by model.
56
+ See https://ai.google.dev/gemini-api/docs/models for model-specific limits."""
57
+
52
58
  n: int = 1
53
59
  """Number of chat completions to generate for each prompt. Note that the API may
54
- not return the full ``n`` completions if duplicates are generated."""
55
- max_retries: int = 6
56
- """The maximum number of retries to make when generating."""
60
+ not return the full ``n`` completions if duplicates are generated."""
61
+
62
+ max_retries: int = Field(default=6, alias="retries")
63
+ """The maximum number of retries to make when generating. If unset, will default
64
+ to ``6``."""
57
65
 
58
- timeout: Optional[float] = None
66
+ timeout: Optional[float] = Field(default=None, alias="request_timeout")
59
67
  """The maximum number of seconds to wait for a response."""
60
68
 
61
69
  client_options: Optional[Dict] = Field(
@@ -68,6 +76,7 @@ Examples:
68
76
  transport: Optional[str] = Field(
69
77
  default=None,
70
78
  description="A string, one of: [`rest`, `grpc`, `grpc_asyncio`].",
79
+ alias="api_transport",
71
80
  )
72
81
  additional_headers: Optional[Dict[str, str]] = Field(
73
82
  default=None,
@@ -89,9 +98,9 @@ Examples:
89
98
  )
90
99
 
91
100
  safety_settings: Optional[Dict[HarmCategory, HarmBlockThreshold]] = None
92
- """The default safety settings to use for all generations.
93
-
94
- For example:
101
+ """The default safety settings to use for all generations.
102
+
103
+ For example:
95
104
 
96
105
  .. code-block:: python
97
106
  from google.generativeai.types.safety_types import HarmBlockThreshold, HarmCategory
@@ -127,6 +136,7 @@ def get_user_agent(module: Optional[str] = None) -> Tuple[str, str]:
127
136
  Args:
128
137
  module (Optional[str]):
129
138
  Optional. The module for a custom user agent header.
139
+
130
140
  Returns:
131
141
  Tuple[str, str]
132
142
  """
@@ -148,11 +158,13 @@ def get_client_info(module: Optional[str] = None) -> "ClientInfo":
148
158
  Args:
149
159
  module (Optional[str]):
150
160
  Optional. The module for a custom user agent header.
161
+
151
162
  Returns:
152
163
  ``google.api_core.gapic_v1.client_info.ClientInfo``
153
164
  """
154
165
  client_library_version, user_agent = get_user_agent(module)
155
- return ClientInfo(
166
+ # TODO: remove ignore once google-auth has types.
167
+ return ClientInfo( # type: ignore[no-untyped-call]
156
168
  client_library_version=client_library_version,
157
169
  user_agent=user_agent,
158
170
  )