langchain-google-genai 2.1.10__py3-none-any.whl → 2.1.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-google-genai might be problematic. Click here for more details.
- langchain_google_genai/__init__.py +3 -3
- langchain_google_genai/_common.py +28 -17
- langchain_google_genai/_function_utils.py +59 -59
- langchain_google_genai/_genai_extension.py +35 -21
- langchain_google_genai/_image_utils.py +10 -9
- langchain_google_genai/chat_models.py +459 -254
- langchain_google_genai/embeddings.py +62 -15
- langchain_google_genai/genai_aqa.py +15 -15
- langchain_google_genai/google_vector_store.py +26 -16
- langchain_google_genai/llms.py +9 -8
- {langchain_google_genai-2.1.10.dist-info → langchain_google_genai-2.1.12.dist-info}/METADATA +43 -39
- langchain_google_genai-2.1.12.dist-info/RECORD +17 -0
- {langchain_google_genai-2.1.10.dist-info → langchain_google_genai-2.1.12.dist-info}/WHEEL +1 -1
- langchain_google_genai-2.1.12.dist-info/entry_points.txt +4 -0
- langchain_google_genai-2.1.10.dist-info/RECORD +0 -16
- {langchain_google_genai-2.1.10.dist-info → langchain_google_genai-2.1.12.dist-info/licenses}/LICENSE +0 -0
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import asyncio
|
|
1
2
|
import re
|
|
2
3
|
import string
|
|
3
4
|
from typing import Any, Dict, List, Optional
|
|
@@ -26,6 +27,14 @@ _MAX_TOKENS_PER_BATCH = 20000
|
|
|
26
27
|
_DEFAULT_BATCH_SIZE = 100
|
|
27
28
|
|
|
28
29
|
|
|
30
|
+
def _is_event_loop_running() -> bool:
|
|
31
|
+
try:
|
|
32
|
+
asyncio.get_running_loop()
|
|
33
|
+
return True
|
|
34
|
+
except RuntimeError:
|
|
35
|
+
return False
|
|
36
|
+
|
|
37
|
+
|
|
29
38
|
class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
|
|
30
39
|
"""`Google Generative AI Embeddings`.
|
|
31
40
|
|
|
@@ -107,15 +116,48 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
|
|
|
107
116
|
client_options=self.client_options,
|
|
108
117
|
transport=self.transport,
|
|
109
118
|
)
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
transport
|
|
116
|
-
|
|
119
|
+
# Only initialize async client if there's an event loop running
|
|
120
|
+
# to avoid RuntimeError during synchronous initialization
|
|
121
|
+
if _is_event_loop_running():
|
|
122
|
+
# async clients don't support "rest" transport
|
|
123
|
+
transport = self.transport
|
|
124
|
+
if transport == "rest":
|
|
125
|
+
transport = "grpc_asyncio"
|
|
126
|
+
self.async_client = build_generative_async_service(
|
|
127
|
+
credentials=self.credentials,
|
|
128
|
+
api_key=google_api_key,
|
|
129
|
+
client_info=client_info,
|
|
130
|
+
client_options=self.client_options,
|
|
131
|
+
transport=transport,
|
|
132
|
+
)
|
|
133
|
+
else:
|
|
134
|
+
self.async_client = None
|
|
117
135
|
return self
|
|
118
136
|
|
|
137
|
+
@property
|
|
138
|
+
def _async_client(self) -> Any:
|
|
139
|
+
"""Get or create the async client when needed."""
|
|
140
|
+
if self.async_client is None:
|
|
141
|
+
if isinstance(self.google_api_key, SecretStr):
|
|
142
|
+
google_api_key: Optional[str] = self.google_api_key.get_secret_value()
|
|
143
|
+
else:
|
|
144
|
+
google_api_key = self.google_api_key
|
|
145
|
+
|
|
146
|
+
client_info = get_client_info("GoogleGenerativeAIEmbeddings")
|
|
147
|
+
# async clients don't support "rest" transport
|
|
148
|
+
transport = self.transport
|
|
149
|
+
if transport == "rest":
|
|
150
|
+
transport = "grpc_asyncio"
|
|
151
|
+
|
|
152
|
+
self.async_client = build_generative_async_service(
|
|
153
|
+
credentials=self.credentials,
|
|
154
|
+
api_key=google_api_key,
|
|
155
|
+
client_info=client_info,
|
|
156
|
+
client_options=self.client_options,
|
|
157
|
+
transport=transport,
|
|
158
|
+
)
|
|
159
|
+
return self.async_client
|
|
160
|
+
|
|
119
161
|
@staticmethod
|
|
120
162
|
def _split_by_punctuation(text: str) -> List[str]:
|
|
121
163
|
"""Splits a string by punctuation and whitespace characters."""
|
|
@@ -186,14 +228,13 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
|
|
|
186
228
|
output_dimensionality: Optional[int] = None,
|
|
187
229
|
) -> EmbedContentRequest:
|
|
188
230
|
task_type = self.task_type or task_type or "RETRIEVAL_DOCUMENT"
|
|
189
|
-
|
|
231
|
+
return EmbedContentRequest(
|
|
190
232
|
content={"parts": [{"text": text}]},
|
|
191
233
|
model=self.model,
|
|
192
234
|
task_type=task_type.upper(),
|
|
193
235
|
title=title,
|
|
194
236
|
output_dimensionality=output_dimensionality,
|
|
195
237
|
)
|
|
196
|
-
return request
|
|
197
238
|
|
|
198
239
|
def embed_documents(
|
|
199
240
|
self,
|
|
@@ -215,6 +256,7 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
|
|
|
215
256
|
titles: An optional list of titles for texts provided.
|
|
216
257
|
Only applicable when TaskType is ``'RETRIEVAL_DOCUMENT'``.
|
|
217
258
|
output_dimensionality: Optional `reduced dimension for the output embedding <https://ai.google.dev/api/embeddings#EmbedContentRequest>`__.
|
|
259
|
+
|
|
218
260
|
Returns:
|
|
219
261
|
List of embeddings, one for each text.
|
|
220
262
|
"""
|
|
@@ -244,7 +286,8 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
|
|
|
244
286
|
BatchEmbedContentsRequest(requests=requests, model=self.model)
|
|
245
287
|
)
|
|
246
288
|
except Exception as e:
|
|
247
|
-
|
|
289
|
+
msg = f"Error embedding content: {e}"
|
|
290
|
+
raise GoogleGenerativeAIError(msg) from e
|
|
248
291
|
embeddings.extend([list(e.values) for e in result.embeddings])
|
|
249
292
|
return embeddings
|
|
250
293
|
|
|
@@ -280,7 +323,8 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
|
|
|
280
323
|
)
|
|
281
324
|
result: EmbedContentResponse = self.client.embed_content(request)
|
|
282
325
|
except Exception as e:
|
|
283
|
-
|
|
326
|
+
msg = f"Error embedding content: {e}"
|
|
327
|
+
raise GoogleGenerativeAIError(msg) from e
|
|
284
328
|
return list(result.embedding.values)
|
|
285
329
|
|
|
286
330
|
async def aembed_documents(
|
|
@@ -303,6 +347,7 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
|
|
|
303
347
|
titles: An optional list of titles for texts provided.
|
|
304
348
|
Only applicable when TaskType is ``'RETRIEVAL_DOCUMENT'``.
|
|
305
349
|
output_dimensionality: Optional `reduced dimension for the output embedding <https://ai.google.dev/api/embeddings#EmbedContentRequest>`__.
|
|
350
|
+
|
|
306
351
|
Returns:
|
|
307
352
|
List of embeddings, one for each text.
|
|
308
353
|
"""
|
|
@@ -328,11 +373,12 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
|
|
|
328
373
|
]
|
|
329
374
|
|
|
330
375
|
try:
|
|
331
|
-
result = await self.
|
|
376
|
+
result = await self._async_client.batch_embed_contents(
|
|
332
377
|
BatchEmbedContentsRequest(requests=requests, model=self.model)
|
|
333
378
|
)
|
|
334
379
|
except Exception as e:
|
|
335
|
-
|
|
380
|
+
msg = f"Error embedding content: {e}"
|
|
381
|
+
raise GoogleGenerativeAIError(msg) from e
|
|
336
382
|
embeddings.extend([list(e.values) for e in result.embeddings])
|
|
337
383
|
return embeddings
|
|
338
384
|
|
|
@@ -366,9 +412,10 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
|
|
|
366
412
|
title=title,
|
|
367
413
|
output_dimensionality=output_dimensionality,
|
|
368
414
|
)
|
|
369
|
-
result: EmbedContentResponse = await self.
|
|
415
|
+
result: EmbedContentResponse = await self._async_client.embed_content(
|
|
370
416
|
request
|
|
371
417
|
)
|
|
372
418
|
except Exception as e:
|
|
373
|
-
|
|
419
|
+
msg = f"Error embedding content: {e}"
|
|
420
|
+
raise GoogleGenerativeAIError(msg) from e
|
|
374
421
|
return list(result.embedding.values)
|
|
@@ -1,9 +1,8 @@
|
|
|
1
1
|
"""Google GenerativeAI Attributed Question and Answering (AQA) service.
|
|
2
2
|
|
|
3
|
-
The GenAI Semantic AQA API is a managed end to end service that allows
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
https://developers.generativeai.google/guide
|
|
3
|
+
The GenAI Semantic AQA API is a managed end to end service that allows developers to
|
|
4
|
+
create responses grounded on specified passages based on a user query. For more
|
|
5
|
+
information visit: https://developers.generativeai.google/guide
|
|
7
6
|
"""
|
|
8
7
|
|
|
9
8
|
from typing import Any, List, Optional
|
|
@@ -21,8 +20,8 @@ class AqaInput(BaseModel):
|
|
|
21
20
|
|
|
22
21
|
Attributes:
|
|
23
22
|
prompt: The user's inquiry.
|
|
24
|
-
source_passages: A list of passage that the LLM should use only to
|
|
25
|
-
|
|
23
|
+
source_passages: A list of passage that the LLM should use only to answer the
|
|
24
|
+
user's inquiry.
|
|
26
25
|
"""
|
|
27
26
|
|
|
28
27
|
prompt: str
|
|
@@ -34,10 +33,10 @@ class AqaOutput(BaseModel):
|
|
|
34
33
|
|
|
35
34
|
Attributes:
|
|
36
35
|
answer: The answer to the user's inquiry.
|
|
37
|
-
attributed_passages: A list of passages that the LLM used to construct
|
|
38
|
-
|
|
39
|
-
answerable_probability: The probability of the question being answered
|
|
40
|
-
|
|
36
|
+
attributed_passages: A list of passages that the LLM used to construct the
|
|
37
|
+
answer.
|
|
38
|
+
answerable_probability: The probability of the question being answered from the
|
|
39
|
+
provided passages.
|
|
41
40
|
"""
|
|
42
41
|
|
|
43
42
|
answer: str
|
|
@@ -56,10 +55,12 @@ class _AqaModel(BaseModel):
|
|
|
56
55
|
def __init__(
|
|
57
56
|
self,
|
|
58
57
|
answer_style: int = genai.GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
|
|
59
|
-
safety_settings: List[genai.SafetySetting] =
|
|
58
|
+
safety_settings: Optional[List[genai.SafetySetting]] = None,
|
|
60
59
|
temperature: Optional[float] = None,
|
|
61
60
|
**kwargs: Any,
|
|
62
61
|
) -> None:
|
|
62
|
+
if safety_settings is None:
|
|
63
|
+
safety_settings = []
|
|
63
64
|
super().__init__(**kwargs)
|
|
64
65
|
self._client = genaix.build_generative_service()
|
|
65
66
|
self._answer_style = answer_style
|
|
@@ -84,9 +85,9 @@ class _AqaModel(BaseModel):
|
|
|
84
85
|
class GenAIAqa(RunnableSerializable[AqaInput, AqaOutput]):
|
|
85
86
|
"""Google's Attributed Question and Answering service.
|
|
86
87
|
|
|
87
|
-
Given a user's query and a list of passages, Google's server will return
|
|
88
|
-
|
|
89
|
-
|
|
88
|
+
Given a user's query and a list of passages, Google's server will return a response
|
|
89
|
+
that is grounded to the provided list of passages. It will not base the response on
|
|
90
|
+
parametric memory.
|
|
90
91
|
|
|
91
92
|
Attributes:
|
|
92
93
|
answer_style: keyword-only argument. See
|
|
@@ -120,7 +121,6 @@ class GenAIAqa(RunnableSerializable[AqaInput, AqaOutput]):
|
|
|
120
121
|
self, input: AqaInput, config: Optional[RunnableConfig] = None, **kwargs: Any
|
|
121
122
|
) -> AqaOutput:
|
|
122
123
|
"""Generates a grounded response using the provided passages."""
|
|
123
|
-
|
|
124
124
|
response = self._client.generate_answer(
|
|
125
125
|
prompt=input.prompt, passages=input.source_passages
|
|
126
126
|
)
|
|
@@ -1,18 +1,17 @@
|
|
|
1
1
|
"""Google Generative AI Vector Store.
|
|
2
2
|
|
|
3
|
-
The GenAI Semantic Retriever API is a managed end-to-end service that allows
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
https://developers.generativeai.google/guide
|
|
3
|
+
The GenAI Semantic Retriever API is a managed end-to-end service that allows developers
|
|
4
|
+
to create a corpus of documents to perform semantic search on related passages given a
|
|
5
|
+
user query. For more information visit: https://developers.generativeai.google/guide
|
|
7
6
|
"""
|
|
8
7
|
|
|
9
8
|
import asyncio
|
|
9
|
+
from collections.abc import Iterable
|
|
10
10
|
from functools import partial
|
|
11
11
|
from typing import (
|
|
12
12
|
Any,
|
|
13
13
|
Callable,
|
|
14
14
|
Dict,
|
|
15
|
-
Iterable,
|
|
16
15
|
List,
|
|
17
16
|
Optional,
|
|
18
17
|
Tuple,
|
|
@@ -94,20 +93,22 @@ class _SemanticRetriever(BaseModel):
|
|
|
94
93
|
document_id: Optional[str] = None,
|
|
95
94
|
) -> List[str]:
|
|
96
95
|
if self.name.document_id is None and document_id is None:
|
|
97
|
-
|
|
96
|
+
msg = (
|
|
98
97
|
"Adding texts to a corpus directly is not supported. "
|
|
99
98
|
"Please provide a document ID under the corpus first. "
|
|
100
99
|
"Then add the texts to the document."
|
|
101
100
|
)
|
|
101
|
+
raise NotImplementedError(msg)
|
|
102
102
|
if (
|
|
103
103
|
self.name.document_id is not None
|
|
104
104
|
and document_id is not None
|
|
105
105
|
and self.name.document_id != document_id
|
|
106
106
|
):
|
|
107
|
-
|
|
107
|
+
msg = (
|
|
108
108
|
f"Parameter `document_id` {document_id} does not match the "
|
|
109
109
|
f"vector store's `document_id` {self.name.document_id}"
|
|
110
110
|
)
|
|
111
|
+
raise NotImplementedError(msg)
|
|
111
112
|
assert self.name.document_id or document_id is not None
|
|
112
113
|
new_document_id = self.name.document_id or document_id or ""
|
|
113
114
|
|
|
@@ -115,10 +116,11 @@ class _SemanticRetriever(BaseModel):
|
|
|
115
116
|
if metadatas is None:
|
|
116
117
|
metadatas = [{} for _ in texts]
|
|
117
118
|
if len(texts) != len(metadatas):
|
|
118
|
-
|
|
119
|
+
msg = (
|
|
119
120
|
f"metadatas's length {len(metadatas)} and "
|
|
120
121
|
f"texts's length {len(texts)} are mismatched"
|
|
121
122
|
)
|
|
123
|
+
raise ValueError(msg)
|
|
122
124
|
|
|
123
125
|
chunks = genaix.batch_create_chunk(
|
|
124
126
|
corpus_id=self.name.corpus_id,
|
|
@@ -182,7 +184,8 @@ def _delete_chunk(
|
|
|
182
184
|
) -> None:
|
|
183
185
|
if chunk_id is not None:
|
|
184
186
|
if document_id is None:
|
|
185
|
-
|
|
187
|
+
msg = f"Chunk {chunk_id} requires a document ID"
|
|
188
|
+
raise ValueError(msg)
|
|
186
189
|
genaix.delete_chunk(
|
|
187
190
|
corpus_id=corpus_id,
|
|
188
191
|
document_id=document_id,
|
|
@@ -249,7 +252,7 @@ class GoogleVectorStore(VectorStore):
|
|
|
249
252
|
|
|
250
253
|
def __init__(
|
|
251
254
|
self, *, corpus_id: str, document_id: Optional[str] = None, **kwargs: Any
|
|
252
|
-
):
|
|
255
|
+
) -> None:
|
|
253
256
|
"""Returns an existing Google Semantic Retriever corpus or document.
|
|
254
257
|
|
|
255
258
|
If just the corpus ID is provided, the vector store operates over all
|
|
@@ -353,9 +356,8 @@ class GoogleVectorStore(VectorStore):
|
|
|
353
356
|
Google server.
|
|
354
357
|
"""
|
|
355
358
|
if corpus_id is None or document_id is None:
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
)
|
|
359
|
+
msg = "Must provide an existing corpus ID and document ID"
|
|
360
|
+
raise NotImplementedError(msg)
|
|
359
361
|
|
|
360
362
|
doc_store = cls(corpus_id=corpus_id, document_id=document_id, **kwargs)
|
|
361
363
|
doc_store.add_texts(texts, metadatas)
|
|
@@ -428,7 +430,7 @@ class GoogleVectorStore(VectorStore):
|
|
|
428
430
|
]
|
|
429
431
|
|
|
430
432
|
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]:
|
|
431
|
-
"""Delete
|
|
433
|
+
"""Delete chunks.
|
|
432
434
|
|
|
433
435
|
Note that the "ids" are not corpus ID or document ID. Rather, these
|
|
434
436
|
are the entity names returned by `add_texts`.
|
|
@@ -441,13 +443,21 @@ class GoogleVectorStore(VectorStore):
|
|
|
441
443
|
async def adelete(
|
|
442
444
|
self, ids: Optional[List[str]] = None, **kwargs: Any
|
|
443
445
|
) -> Optional[bool]:
|
|
446
|
+
"""Delete chunks asynchronously.
|
|
447
|
+
|
|
448
|
+
Note that the "ids" are not corpus ID or document ID. Rather, these
|
|
449
|
+
are the entity names returned by `add_texts`.
|
|
450
|
+
|
|
451
|
+
Returns:
|
|
452
|
+
True if successful. Otherwise, you should get an exception anyway.
|
|
453
|
+
"""
|
|
444
454
|
return await asyncio.get_running_loop().run_in_executor(
|
|
445
455
|
None, partial(self.delete, **kwargs), ids
|
|
446
456
|
)
|
|
447
457
|
|
|
448
458
|
def _select_relevance_score_fn(self) -> Callable[[float], float]:
|
|
449
|
-
"""
|
|
450
|
-
|
|
459
|
+
"""TODO: Check with the team about this!
|
|
460
|
+
|
|
451
461
|
The underlying vector store already returns a "score proper",
|
|
452
462
|
i.e. one in [0, 1] where higher means more *similar*.
|
|
453
463
|
"""
|
langchain_google_genai/llms.py
CHANGED
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import logging
|
|
4
|
+
from collections.abc import Iterator
|
|
4
5
|
from difflib import get_close_matches
|
|
5
|
-
from typing import Any,
|
|
6
|
+
from typing import Any, Optional
|
|
6
7
|
|
|
7
8
|
from langchain_core.callbacks import (
|
|
8
9
|
CallbackManagerForLLMRun,
|
|
@@ -29,6 +30,7 @@ class GoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseLLM):
|
|
|
29
30
|
.. code-block:: python
|
|
30
31
|
|
|
31
32
|
from langchain_google_genai import GoogleGenerativeAI
|
|
33
|
+
|
|
32
34
|
llm = GoogleGenerativeAI(model="gemini-2.5-pro")
|
|
33
35
|
"""
|
|
34
36
|
|
|
@@ -41,7 +43,7 @@ class GoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseLLM):
|
|
|
41
43
|
"""Needed for arg validation."""
|
|
42
44
|
# Get all valid field names, including aliases
|
|
43
45
|
valid_fields = set()
|
|
44
|
-
for field_name, field_info in self.model_fields.items():
|
|
46
|
+
for field_name, field_info in self.__class__.model_fields.items():
|
|
45
47
|
valid_fields.add(field_name)
|
|
46
48
|
if hasattr(field_info, "alias") and field_info.alias is not None:
|
|
47
49
|
valid_fields.add(field_info.alias)
|
|
@@ -62,7 +64,6 @@ class GoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseLLM):
|
|
|
62
64
|
@model_validator(mode="after")
|
|
63
65
|
def validate_environment(self) -> Self:
|
|
64
66
|
"""Validates params and passes them to google-generativeai package."""
|
|
65
|
-
|
|
66
67
|
if not any(self.model.startswith(prefix) for prefix in ("models/",)):
|
|
67
68
|
self.model = f"models/{self.model}"
|
|
68
69
|
|
|
@@ -84,7 +85,7 @@ class GoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseLLM):
|
|
|
84
85
|
return self
|
|
85
86
|
|
|
86
87
|
def _get_ls_params(
|
|
87
|
-
self, stop: Optional[
|
|
88
|
+
self, stop: Optional[list[str]] = None, **kwargs: Any
|
|
88
89
|
) -> LangSmithParams:
|
|
89
90
|
"""Get standard params for tracing."""
|
|
90
91
|
ls_params = super()._get_ls_params(stop=stop, **kwargs)
|
|
@@ -104,8 +105,8 @@ class GoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseLLM):
|
|
|
104
105
|
|
|
105
106
|
def _generate(
|
|
106
107
|
self,
|
|
107
|
-
prompts:
|
|
108
|
-
stop: Optional[
|
|
108
|
+
prompts: list[str],
|
|
109
|
+
stop: Optional[list[str]] = None,
|
|
109
110
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
110
111
|
**kwargs: Any,
|
|
111
112
|
) -> LLMResult:
|
|
@@ -123,7 +124,7 @@ class GoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseLLM):
|
|
|
123
124
|
text=g.message.content,
|
|
124
125
|
generation_info={
|
|
125
126
|
**g.generation_info,
|
|
126
|
-
|
|
127
|
+
"usage_metadata": g.message.usage_metadata,
|
|
127
128
|
},
|
|
128
129
|
)
|
|
129
130
|
for g in chat_result.generations
|
|
@@ -134,7 +135,7 @@ class GoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseLLM):
|
|
|
134
135
|
def _stream(
|
|
135
136
|
self,
|
|
136
137
|
prompt: str,
|
|
137
|
-
stop: Optional[
|
|
138
|
+
stop: Optional[list[str]] = None,
|
|
138
139
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
139
140
|
**kwargs: Any,
|
|
140
141
|
) -> Iterator[GenerationChunk]:
|
{langchain_google_genai-2.1.10.dist-info → langchain_google_genai-2.1.12.dist-info}/METADATA
RENAMED
|
@@ -1,22 +1,16 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: langchain-google-genai
|
|
3
|
-
Version: 2.1.
|
|
3
|
+
Version: 2.1.12
|
|
4
4
|
Summary: An integration package connecting Google's genai package and LangChain
|
|
5
|
-
Home-page: https://github.com/langchain-ai/langchain-google
|
|
6
5
|
License: MIT
|
|
7
|
-
Requires-Python: >=3.9,<4.0
|
|
8
|
-
Classifier: License :: OSI Approved :: MIT License
|
|
9
|
-
Classifier: Programming Language :: Python :: 3
|
|
10
|
-
Classifier: Programming Language :: Python :: 3.9
|
|
11
|
-
Classifier: Programming Language :: Python :: 3.10
|
|
12
|
-
Classifier: Programming Language :: Python :: 3.11
|
|
13
|
-
Classifier: Programming Language :: Python :: 3.12
|
|
14
|
-
Requires-Dist: filetype (>=1.2.0,<2.0.0)
|
|
15
|
-
Requires-Dist: google-ai-generativelanguage (>=0.6.18,<0.7.0)
|
|
16
|
-
Requires-Dist: langchain-core (>=0.3.75,<0.4.0)
|
|
17
|
-
Requires-Dist: pydantic (>=2,<3)
|
|
18
|
-
Project-URL: Repository, https://github.com/langchain-ai/langchain-google
|
|
19
6
|
Project-URL: Source Code, https://github.com/langchain-ai/langchain-google/tree/main/libs/genai
|
|
7
|
+
Project-URL: Release Notes, https://github.com/langchain-ai/langchain-google/releases
|
|
8
|
+
Project-URL: repository, https://github.com/langchain-ai/langchain-google
|
|
9
|
+
Requires-Python: >=3.9
|
|
10
|
+
Requires-Dist: langchain-core>=0.3.75
|
|
11
|
+
Requires-Dist: google-ai-generativelanguage<1,>=0.7
|
|
12
|
+
Requires-Dist: pydantic<3,>=2
|
|
13
|
+
Requires-Dist: filetype<2,>=1.2
|
|
20
14
|
Description-Content-Type: text/markdown
|
|
21
15
|
|
|
22
16
|
# langchain-google-genai
|
|
@@ -62,6 +56,9 @@ This package provides LangChain support for Google Gemini models (via the offici
|
|
|
62
56
|
|
|
63
57
|
```bash
|
|
64
58
|
pip install -U langchain-google-genai
|
|
59
|
+
|
|
60
|
+
# or, with uv:
|
|
61
|
+
uv add langchain-google-genai
|
|
65
62
|
````
|
|
66
63
|
|
|
67
64
|
---
|
|
@@ -79,7 +76,7 @@ Then use the `ChatGoogleGenerativeAI` interface:
|
|
|
79
76
|
```python
|
|
80
77
|
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
81
78
|
|
|
82
|
-
llm = ChatGoogleGenerativeAI(model="gemini-
|
|
79
|
+
llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
|
|
83
80
|
response = llm.invoke("Sing a ballad of LangChain.")
|
|
84
81
|
print(response.content)
|
|
85
82
|
```
|
|
@@ -88,22 +85,30 @@ print(response.content)
|
|
|
88
85
|
|
|
89
86
|
## Chat Models
|
|
90
87
|
|
|
91
|
-
|
|
88
|
+
See the LangChain documentation for general information about [Chat Models](https://docs.langchain.com/oss/python/langchain/models).
|
|
89
|
+
|
|
90
|
+
The main interface for the Gemini chat models is `ChatGoogleGenerativeAI`.
|
|
92
91
|
|
|
93
92
|
### Multimodal Inputs
|
|
94
93
|
|
|
95
|
-
Gemini
|
|
94
|
+
Most Gemini models support image inputs.
|
|
96
95
|
|
|
97
96
|
```python
|
|
98
97
|
from langchain_core.messages import HumanMessage
|
|
99
98
|
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
100
99
|
|
|
101
|
-
llm = ChatGoogleGenerativeAI(model="gemini-
|
|
100
|
+
llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
|
|
102
101
|
|
|
103
102
|
message = HumanMessage(
|
|
104
103
|
content=[
|
|
105
|
-
{
|
|
106
|
-
|
|
104
|
+
{
|
|
105
|
+
"type": "text",
|
|
106
|
+
"text": "What's in this image?"
|
|
107
|
+
},
|
|
108
|
+
{
|
|
109
|
+
"type": "image_url",
|
|
110
|
+
"image_url": "https://picsum.photos/seed/picsum/200/300"
|
|
111
|
+
},
|
|
107
112
|
]
|
|
108
113
|
)
|
|
109
114
|
|
|
@@ -111,7 +116,7 @@ response = llm.invoke([message])
|
|
|
111
116
|
print(response.content)
|
|
112
117
|
```
|
|
113
118
|
|
|
114
|
-
|
|
119
|
+
`image_url` can be:
|
|
115
120
|
|
|
116
121
|
- A public image URL
|
|
117
122
|
- A Google Cloud Storage path (`gcs://...`)
|
|
@@ -121,38 +126,44 @@ print(response.content)
|
|
|
121
126
|
|
|
122
127
|
### Multimodal Outputs
|
|
123
128
|
|
|
124
|
-
|
|
129
|
+
Some Gemini models supports both text and inline image outputs.
|
|
125
130
|
|
|
126
131
|
```python
|
|
127
132
|
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
128
133
|
|
|
129
|
-
llm = ChatGoogleGenerativeAI(model="
|
|
134
|
+
llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash-image-preview")
|
|
130
135
|
|
|
131
136
|
response = llm.invoke(
|
|
132
137
|
"Generate an image of a cat and say meow",
|
|
133
138
|
generation_config=dict(response_modalities=["TEXT", "IMAGE"]),
|
|
134
139
|
)
|
|
135
140
|
|
|
136
|
-
image_base64 = response.content[
|
|
137
|
-
meow_text = response.content[
|
|
141
|
+
image_base64 = response.content[1].get("image_url").get("url").split(",")[-1]
|
|
142
|
+
meow_text = response.content[0]
|
|
138
143
|
print(meow_text)
|
|
144
|
+
# In Jupyter, display the image:
|
|
145
|
+
from base64 import b64decode
|
|
146
|
+
from IPython.display import Image, display
|
|
147
|
+
|
|
148
|
+
img_bytes = b64decode(image_base64)
|
|
149
|
+
display(Image(data=img_bytes))
|
|
139
150
|
```
|
|
140
151
|
|
|
141
152
|
---
|
|
142
153
|
|
|
143
154
|
### Audio Output
|
|
144
155
|
|
|
145
|
-
```
|
|
156
|
+
```python
|
|
146
157
|
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
147
158
|
|
|
148
|
-
llm = ChatGoogleGenerativeAI(model="
|
|
149
|
-
|
|
159
|
+
llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash-preview-tts")
|
|
160
|
+
|
|
150
161
|
response = llm.invoke(
|
|
151
162
|
"Please say The quick brown fox jumps over the lazy dog",
|
|
152
163
|
generation_config=dict(response_modalities=["AUDIO"]),
|
|
153
164
|
)
|
|
154
165
|
|
|
155
|
-
# Base64 encoded binary data of the
|
|
166
|
+
# Base64 encoded binary data of the audio
|
|
156
167
|
wav_data = response.additional_kwargs.get("audio")
|
|
157
168
|
with open("output.wav", "wb") as f:
|
|
158
169
|
f.write(wav_data)
|
|
@@ -162,15 +173,13 @@ with open("output.wav", "wb") as f:
|
|
|
162
173
|
|
|
163
174
|
### Multimodal Outputs in Chains
|
|
164
175
|
|
|
165
|
-
You can use Gemini models in a LangChain chain:
|
|
166
|
-
|
|
167
176
|
```python
|
|
168
177
|
from langchain_core.runnables import RunnablePassthrough
|
|
169
178
|
from langchain_core.prompts import ChatPromptTemplate
|
|
170
179
|
from langchain_google_genai import ChatGoogleGenerativeAI, Modality
|
|
171
180
|
|
|
172
181
|
llm = ChatGoogleGenerativeAI(
|
|
173
|
-
model="
|
|
182
|
+
model="gemini-2.5-flash-image-preview",
|
|
174
183
|
response_modalities=[Modality.TEXT, Modality.IMAGE],
|
|
175
184
|
)
|
|
176
185
|
|
|
@@ -186,13 +195,11 @@ response = chain.invoke("cat")
|
|
|
186
195
|
|
|
187
196
|
### Thinking Support
|
|
188
197
|
|
|
189
|
-
Gemini 2.5 Flash Preview supports internal reasoning ("thoughts").
|
|
190
|
-
|
|
191
198
|
```python
|
|
192
199
|
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
193
200
|
|
|
194
201
|
llm = ChatGoogleGenerativeAI(
|
|
195
|
-
model="models/gemini-2.5-flash
|
|
202
|
+
model="models/gemini-2.5-flash",
|
|
196
203
|
thinking_budget=1024
|
|
197
204
|
)
|
|
198
205
|
|
|
@@ -207,8 +214,6 @@ print("Reasoning tokens used:", reasoning_score)
|
|
|
207
214
|
|
|
208
215
|
## Embeddings
|
|
209
216
|
|
|
210
|
-
You can use Gemini embeddings in LangChain:
|
|
211
|
-
|
|
212
217
|
```python
|
|
213
218
|
from langchain_google_genai import GoogleGenerativeAIEmbeddings
|
|
214
219
|
|
|
@@ -257,5 +262,4 @@ print("Answerable probability:", response.answerable_probability)
|
|
|
257
262
|
|
|
258
263
|
- [LangChain Documentation](https://docs.langchain.com/)
|
|
259
264
|
- [Google Generative AI SDK](https://googleapis.github.io/python-genai/)
|
|
260
|
-
- [Gemini Model Documentation](https://ai.google.dev/)
|
|
261
|
-
|
|
265
|
+
- [Gemini Model Documentation](https://ai.google.dev/gemini-api/docs)
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
langchain_google_genai-2.1.12.dist-info/METADATA,sha256=cIpzlJRZXFdHG-aZmSE6MJUmSr-JAZzH7pDvU30nzOs,7051
|
|
2
|
+
langchain_google_genai-2.1.12.dist-info/WHEEL,sha256=9P2ygRxDrTJz3gsagc0Z96ukrxjr-LFBGOgv3AuKlCA,90
|
|
3
|
+
langchain_google_genai-2.1.12.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
|
|
4
|
+
langchain_google_genai-2.1.12.dist-info/licenses/LICENSE,sha256=DppmdYJVSc1jd0aio6ptnMUn5tIHrdAhQ12SclEBfBg,1072
|
|
5
|
+
langchain_google_genai/__init__.py,sha256=yVc4wCHzajs8mvDVQLGmkxxivzApvdu7jTytrLr_i7g,2891
|
|
6
|
+
langchain_google_genai/_common.py,sha256=dzzcJeDVnUUI_8Y20F40SZ5NjW6RHD4xJu74naYK58k,6192
|
|
7
|
+
langchain_google_genai/_enums.py,sha256=Zj3BXXLlkm_UybegCi6fLsfFhriJCt_LAJvgatgPWQ0,252
|
|
8
|
+
langchain_google_genai/_function_utils.py,sha256=Pur365t4M8O5dkrnFkDc_jHiPfuI_C7ucSxvxA3tioM,22254
|
|
9
|
+
langchain_google_genai/_genai_extension.py,sha256=LiNa7b6BZp41meJMtgaSEbaGJa59BY3UMCRMHO_CCtQ,21467
|
|
10
|
+
langchain_google_genai/_image_utils.py,sha256=iJ3KrFWQhA0UwsT8ryAFAmNM7-_2ahHAppT8t9WFZGQ,5304
|
|
11
|
+
langchain_google_genai/chat_models.py,sha256=G619buVU9Sef33Ubk9JL8gYLd5y_jsGMgFXeuPq7wJ0,85067
|
|
12
|
+
langchain_google_genai/embeddings.py,sha256=Kar6nHpAJ0rWQZ0nc7_anWETlwCIovd4DhieIviGNQ8,16687
|
|
13
|
+
langchain_google_genai/genai_aqa.py,sha256=NVW8wOWxU7T6VVshFrFlFHa5HzEPAedrgh1fOwFH7Og,4380
|
|
14
|
+
langchain_google_genai/google_vector_store.py,sha256=x4OcXkcYWTubu-AESNzNDJG_dbge16GeNCAOCsBoc4g,16537
|
|
15
|
+
langchain_google_genai/llms.py,sha256=P_e_ImBWexhKqc7LZPo6tQbwLH2-ljr6oqpE5M27TRc,5755
|
|
16
|
+
langchain_google_genai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
17
|
+
langchain_google_genai-2.1.12.dist-info/RECORD,,
|
|
@@ -1,16 +0,0 @@
|
|
|
1
|
-
langchain_google_genai/__init__.py,sha256=UR61QZZ_8TkdR7yuaKLzfB8c8ZBJdPYqJ8f7Cc5TbFA,2890
|
|
2
|
-
langchain_google_genai/_common.py,sha256=KyCvLpY7iiMAXdtwZDtxEU2C1iTtTGoXsEJ7-7a98cM,5942
|
|
3
|
-
langchain_google_genai/_enums.py,sha256=Zj3BXXLlkm_UybegCi6fLsfFhriJCt_LAJvgatgPWQ0,252
|
|
4
|
-
langchain_google_genai/_function_utils.py,sha256=VX1hj_hN3o8HGmLIiucGL7ggqrqA0Usgu4p_yvb240Q,21994
|
|
5
|
-
langchain_google_genai/_genai_extension.py,sha256=cK1J96xxy_NGFYGYJQVC8q8omURz3OF3KAZJnMhIF18,20795
|
|
6
|
-
langchain_google_genai/_image_utils.py,sha256=wRd-pf190F-GelSl_qPIRy8XifrBNvqTdGaHr63azlQ,5216
|
|
7
|
-
langchain_google_genai/chat_models.py,sha256=CyJfChuJOVq7nrjnwEE4WtUy9hPd40bRxhw4QlpTkds,79311
|
|
8
|
-
langchain_google_genai/embeddings.py,sha256=rLBb2V0j_NkflUdRE01e9Lxw-3dl94OywbyRCXJXRvI,15078
|
|
9
|
-
langchain_google_genai/genai_aqa.py,sha256=qB6h3-BSXqe0YLR3eeVllYzmNKK6ofI6xJLdBahUVZo,4300
|
|
10
|
-
langchain_google_genai/google_vector_store.py,sha256=4wvhIiOmc3Fo046FyafPmT9NBCLek-9bgluvuTfrbpQ,16148
|
|
11
|
-
langchain_google_genai/llms.py,sha256=heRbdOM9Qav1htRromaMyub-KjPYJl7gvLLFzbZWsmg,5728
|
|
12
|
-
langchain_google_genai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
13
|
-
langchain_google_genai-2.1.10.dist-info/LICENSE,sha256=DppmdYJVSc1jd0aio6ptnMUn5tIHrdAhQ12SclEBfBg,1072
|
|
14
|
-
langchain_google_genai-2.1.10.dist-info/METADATA,sha256=OJSsO9We9B62916uCt9AEfmaiypNgIt8KcCU-N7N08A,7180
|
|
15
|
-
langchain_google_genai-2.1.10.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
|
|
16
|
-
langchain_google_genai-2.1.10.dist-info/RECORD,,
|
{langchain_google_genai-2.1.10.dist-info → langchain_google_genai-2.1.12.dist-info/licenses}/LICENSE
RENAMED
|
File without changes
|