langchain-google-genai 2.1.11__py3-none-any.whl → 2.1.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-google-genai might be problematic. Click here for more details.

@@ -228,14 +228,13 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
228
228
  output_dimensionality: Optional[int] = None,
229
229
  ) -> EmbedContentRequest:
230
230
  task_type = self.task_type or task_type or "RETRIEVAL_DOCUMENT"
231
- request = EmbedContentRequest(
231
+ return EmbedContentRequest(
232
232
  content={"parts": [{"text": text}]},
233
233
  model=self.model,
234
234
  task_type=task_type.upper(),
235
235
  title=title,
236
236
  output_dimensionality=output_dimensionality,
237
237
  )
238
- return request
239
238
 
240
239
  def embed_documents(
241
240
  self,
@@ -257,6 +256,7 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
257
256
  titles: An optional list of titles for texts provided.
258
257
  Only applicable when TaskType is ``'RETRIEVAL_DOCUMENT'``.
259
258
  output_dimensionality: Optional `reduced dimension for the output embedding <https://ai.google.dev/api/embeddings#EmbedContentRequest>`__.
259
+
260
260
  Returns:
261
261
  List of embeddings, one for each text.
262
262
  """
@@ -286,7 +286,8 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
286
286
  BatchEmbedContentsRequest(requests=requests, model=self.model)
287
287
  )
288
288
  except Exception as e:
289
- raise GoogleGenerativeAIError(f"Error embedding content: {e}") from e
289
+ msg = f"Error embedding content: {e}"
290
+ raise GoogleGenerativeAIError(msg) from e
290
291
  embeddings.extend([list(e.values) for e in result.embeddings])
291
292
  return embeddings
292
293
 
@@ -322,7 +323,8 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
322
323
  )
323
324
  result: EmbedContentResponse = self.client.embed_content(request)
324
325
  except Exception as e:
325
- raise GoogleGenerativeAIError(f"Error embedding content: {e}") from e
326
+ msg = f"Error embedding content: {e}"
327
+ raise GoogleGenerativeAIError(msg) from e
326
328
  return list(result.embedding.values)
327
329
 
328
330
  async def aembed_documents(
@@ -345,6 +347,7 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
345
347
  titles: An optional list of titles for texts provided.
346
348
  Only applicable when TaskType is ``'RETRIEVAL_DOCUMENT'``.
347
349
  output_dimensionality: Optional `reduced dimension for the output embedding <https://ai.google.dev/api/embeddings#EmbedContentRequest>`__.
350
+
348
351
  Returns:
349
352
  List of embeddings, one for each text.
350
353
  """
@@ -374,7 +377,8 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
374
377
  BatchEmbedContentsRequest(requests=requests, model=self.model)
375
378
  )
376
379
  except Exception as e:
377
- raise GoogleGenerativeAIError(f"Error embedding content: {e}") from e
380
+ msg = f"Error embedding content: {e}"
381
+ raise GoogleGenerativeAIError(msg) from e
378
382
  embeddings.extend([list(e.values) for e in result.embeddings])
379
383
  return embeddings
380
384
 
@@ -412,5 +416,6 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
412
416
  request
413
417
  )
414
418
  except Exception as e:
415
- raise GoogleGenerativeAIError(f"Error embedding content: {e}") from e
419
+ msg = f"Error embedding content: {e}"
420
+ raise GoogleGenerativeAIError(msg) from e
416
421
  return list(result.embedding.values)
@@ -1,9 +1,8 @@
1
1
  """Google GenerativeAI Attributed Question and Answering (AQA) service.
2
2
 
3
- The GenAI Semantic AQA API is a managed end to end service that allows
4
- developers to create responses grounded on specified passages based on
5
- a user query. For more information visit:
6
- https://developers.generativeai.google/guide
3
+ The GenAI Semantic AQA API is a managed end to end service that allows developers to
4
+ create responses grounded on specified passages based on a user query. For more
5
+ information visit: https://developers.generativeai.google/guide
7
6
  """
8
7
 
9
8
  from typing import Any, List, Optional
@@ -21,8 +20,8 @@ class AqaInput(BaseModel):
21
20
 
22
21
  Attributes:
23
22
  prompt: The user's inquiry.
24
- source_passages: A list of passage that the LLM should use only to
25
- answer the user's inquiry.
23
+ source_passages: A list of passage that the LLM should use only to answer the
24
+ user's inquiry.
26
25
  """
27
26
 
28
27
  prompt: str
@@ -34,10 +33,10 @@ class AqaOutput(BaseModel):
34
33
 
35
34
  Attributes:
36
35
  answer: The answer to the user's inquiry.
37
- attributed_passages: A list of passages that the LLM used to construct
38
- the answer.
39
- answerable_probability: The probability of the question being answered
40
- from the provided passages.
36
+ attributed_passages: A list of passages that the LLM used to construct the
37
+ answer.
38
+ answerable_probability: The probability of the question being answered from the
39
+ provided passages.
41
40
  """
42
41
 
43
42
  answer: str
@@ -56,10 +55,12 @@ class _AqaModel(BaseModel):
56
55
  def __init__(
57
56
  self,
58
57
  answer_style: int = genai.GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
59
- safety_settings: List[genai.SafetySetting] = [],
58
+ safety_settings: Optional[List[genai.SafetySetting]] = None,
60
59
  temperature: Optional[float] = None,
61
60
  **kwargs: Any,
62
61
  ) -> None:
62
+ if safety_settings is None:
63
+ safety_settings = []
63
64
  super().__init__(**kwargs)
64
65
  self._client = genaix.build_generative_service()
65
66
  self._answer_style = answer_style
@@ -84,9 +85,9 @@ class _AqaModel(BaseModel):
84
85
  class GenAIAqa(RunnableSerializable[AqaInput, AqaOutput]):
85
86
  """Google's Attributed Question and Answering service.
86
87
 
87
- Given a user's query and a list of passages, Google's server will return
88
- a response that is grounded to the provided list of passages. It will not
89
- base the response on parametric memory.
88
+ Given a user's query and a list of passages, Google's server will return a response
89
+ that is grounded to the provided list of passages. It will not base the response on
90
+ parametric memory.
90
91
 
91
92
  Attributes:
92
93
  answer_style: keyword-only argument. See
@@ -120,7 +121,6 @@ class GenAIAqa(RunnableSerializable[AqaInput, AqaOutput]):
120
121
  self, input: AqaInput, config: Optional[RunnableConfig] = None, **kwargs: Any
121
122
  ) -> AqaOutput:
122
123
  """Generates a grounded response using the provided passages."""
123
-
124
124
  response = self._client.generate_answer(
125
125
  prompt=input.prompt, passages=input.source_passages
126
126
  )
@@ -1,18 +1,17 @@
1
1
  """Google Generative AI Vector Store.
2
2
 
3
- The GenAI Semantic Retriever API is a managed end-to-end service that allows
4
- developers to create a corpus of documents to perform semantic search on
5
- related passages given a user query. For more information visit:
6
- https://developers.generativeai.google/guide
3
+ The GenAI Semantic Retriever API is a managed end-to-end service that allows developers
4
+ to create a corpus of documents to perform semantic search on related passages given a
5
+ user query. For more information visit: https://developers.generativeai.google/guide
7
6
  """
8
7
 
9
8
  import asyncio
9
+ from collections.abc import Iterable
10
10
  from functools import partial
11
11
  from typing import (
12
12
  Any,
13
13
  Callable,
14
14
  Dict,
15
- Iterable,
16
15
  List,
17
16
  Optional,
18
17
  Tuple,
@@ -94,20 +93,22 @@ class _SemanticRetriever(BaseModel):
94
93
  document_id: Optional[str] = None,
95
94
  ) -> List[str]:
96
95
  if self.name.document_id is None and document_id is None:
97
- raise NotImplementedError(
96
+ msg = (
98
97
  "Adding texts to a corpus directly is not supported. "
99
98
  "Please provide a document ID under the corpus first. "
100
99
  "Then add the texts to the document."
101
100
  )
101
+ raise NotImplementedError(msg)
102
102
  if (
103
103
  self.name.document_id is not None
104
104
  and document_id is not None
105
105
  and self.name.document_id != document_id
106
106
  ):
107
- raise NotImplementedError(
107
+ msg = (
108
108
  f"Parameter `document_id` {document_id} does not match the "
109
109
  f"vector store's `document_id` {self.name.document_id}"
110
110
  )
111
+ raise NotImplementedError(msg)
111
112
  assert self.name.document_id or document_id is not None
112
113
  new_document_id = self.name.document_id or document_id or ""
113
114
 
@@ -115,10 +116,11 @@ class _SemanticRetriever(BaseModel):
115
116
  if metadatas is None:
116
117
  metadatas = [{} for _ in texts]
117
118
  if len(texts) != len(metadatas):
118
- raise ValueError(
119
+ msg = (
119
120
  f"metadatas's length {len(metadatas)} and "
120
121
  f"texts's length {len(texts)} are mismatched"
121
122
  )
123
+ raise ValueError(msg)
122
124
 
123
125
  chunks = genaix.batch_create_chunk(
124
126
  corpus_id=self.name.corpus_id,
@@ -182,7 +184,8 @@ def _delete_chunk(
182
184
  ) -> None:
183
185
  if chunk_id is not None:
184
186
  if document_id is None:
185
- raise ValueError(f"Chunk {chunk_id} requires a document ID")
187
+ msg = f"Chunk {chunk_id} requires a document ID"
188
+ raise ValueError(msg)
186
189
  genaix.delete_chunk(
187
190
  corpus_id=corpus_id,
188
191
  document_id=document_id,
@@ -249,7 +252,7 @@ class GoogleVectorStore(VectorStore):
249
252
 
250
253
  def __init__(
251
254
  self, *, corpus_id: str, document_id: Optional[str] = None, **kwargs: Any
252
- ):
255
+ ) -> None:
253
256
  """Returns an existing Google Semantic Retriever corpus or document.
254
257
 
255
258
  If just the corpus ID is provided, the vector store operates over all
@@ -353,9 +356,8 @@ class GoogleVectorStore(VectorStore):
353
356
  Google server.
354
357
  """
355
358
  if corpus_id is None or document_id is None:
356
- raise NotImplementedError(
357
- "Must provide an existing corpus ID and document ID"
358
- )
359
+ msg = "Must provide an existing corpus ID and document ID"
360
+ raise NotImplementedError(msg)
359
361
 
360
362
  doc_store = cls(corpus_id=corpus_id, document_id=document_id, **kwargs)
361
363
  doc_store.add_texts(texts, metadatas)
@@ -428,7 +430,7 @@ class GoogleVectorStore(VectorStore):
428
430
  ]
429
431
 
430
432
  def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]:
431
- """Delete chunnks.
433
+ """Delete chunks.
432
434
 
433
435
  Note that the "ids" are not corpus ID or document ID. Rather, these
434
436
  are the entity names returned by `add_texts`.
@@ -441,13 +443,21 @@ class GoogleVectorStore(VectorStore):
441
443
  async def adelete(
442
444
  self, ids: Optional[List[str]] = None, **kwargs: Any
443
445
  ) -> Optional[bool]:
446
+ """Delete chunks asynchronously.
447
+
448
+ Note that the "ids" are not corpus ID or document ID. Rather, these
449
+ are the entity names returned by `add_texts`.
450
+
451
+ Returns:
452
+ True if successful. Otherwise, you should get an exception anyway.
453
+ """
444
454
  return await asyncio.get_running_loop().run_in_executor(
445
455
  None, partial(self.delete, **kwargs), ids
446
456
  )
447
457
 
448
458
  def _select_relevance_score_fn(self) -> Callable[[float], float]:
449
- """
450
- TODO: Check with the team about this!
459
+ """TODO: Check with the team about this!
460
+
451
461
  The underlying vector store already returns a "score proper",
452
462
  i.e. one in [0, 1] where higher means more *similar*.
453
463
  """
@@ -1,8 +1,9 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import logging
4
+ from collections.abc import Iterator
4
5
  from difflib import get_close_matches
5
- from typing import Any, Iterator, List, Optional
6
+ from typing import Any, Optional
6
7
 
7
8
  from langchain_core.callbacks import (
8
9
  CallbackManagerForLLMRun,
@@ -29,6 +30,7 @@ class GoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseLLM):
29
30
  .. code-block:: python
30
31
 
31
32
  from langchain_google_genai import GoogleGenerativeAI
33
+
32
34
  llm = GoogleGenerativeAI(model="gemini-2.5-pro")
33
35
  """
34
36
 
@@ -62,7 +64,6 @@ class GoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseLLM):
62
64
  @model_validator(mode="after")
63
65
  def validate_environment(self) -> Self:
64
66
  """Validates params and passes them to google-generativeai package."""
65
-
66
67
  if not any(self.model.startswith(prefix) for prefix in ("models/",)):
67
68
  self.model = f"models/{self.model}"
68
69
 
@@ -84,7 +85,7 @@ class GoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseLLM):
84
85
  return self
85
86
 
86
87
  def _get_ls_params(
87
- self, stop: Optional[List[str]] = None, **kwargs: Any
88
+ self, stop: Optional[list[str]] = None, **kwargs: Any
88
89
  ) -> LangSmithParams:
89
90
  """Get standard params for tracing."""
90
91
  ls_params = super()._get_ls_params(stop=stop, **kwargs)
@@ -104,8 +105,8 @@ class GoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseLLM):
104
105
 
105
106
  def _generate(
106
107
  self,
107
- prompts: List[str],
108
- stop: Optional[List[str]] = None,
108
+ prompts: list[str],
109
+ stop: Optional[list[str]] = None,
109
110
  run_manager: Optional[CallbackManagerForLLMRun] = None,
110
111
  **kwargs: Any,
111
112
  ) -> LLMResult:
@@ -123,7 +124,7 @@ class GoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseLLM):
123
124
  text=g.message.content,
124
125
  generation_info={
125
126
  **g.generation_info,
126
- **{"usage_metadata": g.message.usage_metadata},
127
+ "usage_metadata": g.message.usage_metadata,
127
128
  },
128
129
  )
129
130
  for g in chat_result.generations
@@ -134,7 +135,7 @@ class GoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseLLM):
134
135
  def _stream(
135
136
  self,
136
137
  prompt: str,
137
- stop: Optional[List[str]] = None,
138
+ stop: Optional[list[str]] = None,
138
139
  run_manager: Optional[CallbackManagerForLLMRun] = None,
139
140
  **kwargs: Any,
140
141
  ) -> Iterator[GenerationChunk]:
@@ -1,9 +1,11 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langchain-google-genai
3
- Version: 2.1.11
3
+ Version: 2.1.12
4
4
  Summary: An integration package connecting Google's genai package and LangChain
5
5
  License: MIT
6
6
  Project-URL: Source Code, https://github.com/langchain-ai/langchain-google/tree/main/libs/genai
7
+ Project-URL: Release Notes, https://github.com/langchain-ai/langchain-google/releases
8
+ Project-URL: repository, https://github.com/langchain-ai/langchain-google
7
9
  Requires-Python: >=3.9
8
10
  Requires-Dist: langchain-core>=0.3.75
9
11
  Requires-Dist: google-ai-generativelanguage<1,>=0.7
@@ -54,6 +56,9 @@ This package provides LangChain support for Google Gemini models (via the offici
54
56
 
55
57
  ```bash
56
58
  pip install -U langchain-google-genai
59
+
60
+ # or, with uv:
61
+ uv add langchain-google-genai
57
62
  ````
58
63
 
59
64
  ---
@@ -71,7 +76,7 @@ Then use the `ChatGoogleGenerativeAI` interface:
71
76
  ```python
72
77
  from langchain_google_genai import ChatGoogleGenerativeAI
73
78
 
74
- llm = ChatGoogleGenerativeAI(model="gemini-pro")
79
+ llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
75
80
  response = llm.invoke("Sing a ballad of LangChain.")
76
81
  print(response.content)
77
82
  ```
@@ -80,22 +85,30 @@ print(response.content)
80
85
 
81
86
  ## Chat Models
82
87
 
83
- The main interface for Gemini chat models is `ChatGoogleGenerativeAI`.
88
+ See the LangChain documentation for general information about [Chat Models](https://docs.langchain.com/oss/python/langchain/models).
89
+
90
+ The main interface for the Gemini chat models is `ChatGoogleGenerativeAI`.
84
91
 
85
92
  ### Multimodal Inputs
86
93
 
87
- Gemini vision models support image inputs in single messages.
94
+ Most Gemini models support image inputs.
88
95
 
89
96
  ```python
90
97
  from langchain_core.messages import HumanMessage
91
98
  from langchain_google_genai import ChatGoogleGenerativeAI
92
99
 
93
- llm = ChatGoogleGenerativeAI(model="gemini-pro-vision")
100
+ llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
94
101
 
95
102
  message = HumanMessage(
96
103
  content=[
97
- {"type": "text", "text": "What's in this image?"},
98
- {"type": "image_url", "image_url": "https://picsum.photos/seed/picsum/200/300"},
104
+ {
105
+ "type": "text",
106
+ "text": "What's in this image?"
107
+ },
108
+ {
109
+ "type": "image_url",
110
+ "image_url": "https://picsum.photos/seed/picsum/200/300"
111
+ },
99
112
  ]
100
113
  )
101
114
 
@@ -103,7 +116,7 @@ response = llm.invoke([message])
103
116
  print(response.content)
104
117
  ```
105
118
 
106
- `image_url` can be:
119
+ `image_url` can be:
107
120
 
108
121
  - A public image URL
109
122
  - A Google Cloud Storage path (`gcs://...`)
@@ -113,38 +126,44 @@ print(response.content)
113
126
 
114
127
  ### Multimodal Outputs
115
128
 
116
- The Gemini 2.0 Flash Experimental model supports both text and inline image outputs.
129
+ Some Gemini models supports both text and inline image outputs.
117
130
 
118
131
  ```python
119
132
  from langchain_google_genai import ChatGoogleGenerativeAI
120
133
 
121
- llm = ChatGoogleGenerativeAI(model="models/gemini-2.0-flash-exp-image-generation")
134
+ llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash-image-preview")
122
135
 
123
136
  response = llm.invoke(
124
137
  "Generate an image of a cat and say meow",
125
138
  generation_config=dict(response_modalities=["TEXT", "IMAGE"]),
126
139
  )
127
140
 
128
- image_base64 = response.content[0].get("image_url").get("url").split(",")[-1]
129
- meow_text = response.content[1]
141
+ image_base64 = response.content[1].get("image_url").get("url").split(",")[-1]
142
+ meow_text = response.content[0]
130
143
  print(meow_text)
144
+ # In Jupyter, display the image:
145
+ from base64 import b64decode
146
+ from IPython.display import Image, display
147
+
148
+ img_bytes = b64decode(image_base64)
149
+ display(Image(data=img_bytes))
131
150
  ```
132
151
 
133
152
  ---
134
153
 
135
154
  ### Audio Output
136
155
 
137
- ```
156
+ ```python
138
157
  from langchain_google_genai import ChatGoogleGenerativeAI
139
158
 
140
- llm = ChatGoogleGenerativeAI(model="models/gemini-2.5-flash-preview-tts")
141
- # example
159
+ llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash-preview-tts")
160
+
142
161
  response = llm.invoke(
143
162
  "Please say The quick brown fox jumps over the lazy dog",
144
163
  generation_config=dict(response_modalities=["AUDIO"]),
145
164
  )
146
165
 
147
- # Base64 encoded binary data of the image
166
+ # Base64 encoded binary data of the audio
148
167
  wav_data = response.additional_kwargs.get("audio")
149
168
  with open("output.wav", "wb") as f:
150
169
  f.write(wav_data)
@@ -154,15 +173,13 @@ with open("output.wav", "wb") as f:
154
173
 
155
174
  ### Multimodal Outputs in Chains
156
175
 
157
- You can use Gemini models in a LangChain chain:
158
-
159
176
  ```python
160
177
  from langchain_core.runnables import RunnablePassthrough
161
178
  from langchain_core.prompts import ChatPromptTemplate
162
179
  from langchain_google_genai import ChatGoogleGenerativeAI, Modality
163
180
 
164
181
  llm = ChatGoogleGenerativeAI(
165
- model="models/gemini-2.0-flash-exp-image-generation",
182
+ model="gemini-2.5-flash-image-preview",
166
183
  response_modalities=[Modality.TEXT, Modality.IMAGE],
167
184
  )
168
185
 
@@ -178,13 +195,11 @@ response = chain.invoke("cat")
178
195
 
179
196
  ### Thinking Support
180
197
 
181
- Gemini 2.5 Flash Preview supports internal reasoning ("thoughts").
182
-
183
198
  ```python
184
199
  from langchain_google_genai import ChatGoogleGenerativeAI
185
200
 
186
201
  llm = ChatGoogleGenerativeAI(
187
- model="models/gemini-2.5-flash-preview-04-17",
202
+ model="models/gemini-2.5-flash",
188
203
  thinking_budget=1024
189
204
  )
190
205
 
@@ -199,8 +214,6 @@ print("Reasoning tokens used:", reasoning_score)
199
214
 
200
215
  ## Embeddings
201
216
 
202
- You can use Gemini embeddings in LangChain:
203
-
204
217
  ```python
205
218
  from langchain_google_genai import GoogleGenerativeAIEmbeddings
206
219
 
@@ -249,4 +262,4 @@ print("Answerable probability:", response.answerable_probability)
249
262
 
250
263
  - [LangChain Documentation](https://docs.langchain.com/)
251
264
  - [Google Generative AI SDK](https://googleapis.github.io/python-genai/)
252
- - [Gemini Model Documentation](https://ai.google.dev/)
265
+ - [Gemini Model Documentation](https://ai.google.dev/gemini-api/docs)
@@ -0,0 +1,17 @@
1
+ langchain_google_genai-2.1.12.dist-info/METADATA,sha256=cIpzlJRZXFdHG-aZmSE6MJUmSr-JAZzH7pDvU30nzOs,7051
2
+ langchain_google_genai-2.1.12.dist-info/WHEEL,sha256=9P2ygRxDrTJz3gsagc0Z96ukrxjr-LFBGOgv3AuKlCA,90
3
+ langchain_google_genai-2.1.12.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
4
+ langchain_google_genai-2.1.12.dist-info/licenses/LICENSE,sha256=DppmdYJVSc1jd0aio6ptnMUn5tIHrdAhQ12SclEBfBg,1072
5
+ langchain_google_genai/__init__.py,sha256=yVc4wCHzajs8mvDVQLGmkxxivzApvdu7jTytrLr_i7g,2891
6
+ langchain_google_genai/_common.py,sha256=dzzcJeDVnUUI_8Y20F40SZ5NjW6RHD4xJu74naYK58k,6192
7
+ langchain_google_genai/_enums.py,sha256=Zj3BXXLlkm_UybegCi6fLsfFhriJCt_LAJvgatgPWQ0,252
8
+ langchain_google_genai/_function_utils.py,sha256=Pur365t4M8O5dkrnFkDc_jHiPfuI_C7ucSxvxA3tioM,22254
9
+ langchain_google_genai/_genai_extension.py,sha256=LiNa7b6BZp41meJMtgaSEbaGJa59BY3UMCRMHO_CCtQ,21467
10
+ langchain_google_genai/_image_utils.py,sha256=iJ3KrFWQhA0UwsT8ryAFAmNM7-_2ahHAppT8t9WFZGQ,5304
11
+ langchain_google_genai/chat_models.py,sha256=G619buVU9Sef33Ubk9JL8gYLd5y_jsGMgFXeuPq7wJ0,85067
12
+ langchain_google_genai/embeddings.py,sha256=Kar6nHpAJ0rWQZ0nc7_anWETlwCIovd4DhieIviGNQ8,16687
13
+ langchain_google_genai/genai_aqa.py,sha256=NVW8wOWxU7T6VVshFrFlFHa5HzEPAedrgh1fOwFH7Og,4380
14
+ langchain_google_genai/google_vector_store.py,sha256=x4OcXkcYWTubu-AESNzNDJG_dbge16GeNCAOCsBoc4g,16537
15
+ langchain_google_genai/llms.py,sha256=P_e_ImBWexhKqc7LZPo6tQbwLH2-ljr6oqpE5M27TRc,5755
16
+ langchain_google_genai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
+ langchain_google_genai-2.1.12.dist-info/RECORD,,
@@ -1,17 +0,0 @@
1
- langchain_google_genai-2.1.11.dist-info/METADATA,sha256=8GByGj2KQZZXlvSU76OORlltVz90pJxUPYwA0oXK8r4,6706
2
- langchain_google_genai-2.1.11.dist-info/WHEEL,sha256=9P2ygRxDrTJz3gsagc0Z96ukrxjr-LFBGOgv3AuKlCA,90
3
- langchain_google_genai-2.1.11.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
4
- langchain_google_genai-2.1.11.dist-info/licenses/LICENSE,sha256=DppmdYJVSc1jd0aio6ptnMUn5tIHrdAhQ12SclEBfBg,1072
5
- langchain_google_genai/__init__.py,sha256=UR61QZZ_8TkdR7yuaKLzfB8c8ZBJdPYqJ8f7Cc5TbFA,2890
6
- langchain_google_genai/_common.py,sha256=KyCvLpY7iiMAXdtwZDtxEU2C1iTtTGoXsEJ7-7a98cM,5942
7
- langchain_google_genai/_enums.py,sha256=Zj3BXXLlkm_UybegCi6fLsfFhriJCt_LAJvgatgPWQ0,252
8
- langchain_google_genai/_function_utils.py,sha256=cn__CyiCgd3Cb7FqSwo3uJCQLhcwtU7lrf99nmvHLeg,22394
9
- langchain_google_genai/_genai_extension.py,sha256=cK1J96xxy_NGFYGYJQVC8q8omURz3OF3KAZJnMhIF18,20795
10
- langchain_google_genai/_image_utils.py,sha256=wRd-pf190F-GelSl_qPIRy8XifrBNvqTdGaHr63azlQ,5216
11
- langchain_google_genai/chat_models.py,sha256=bmFrvT8qGOjgRN-0v4qYGiIr44FD-syR81iqyC0tyY0,80420
12
- langchain_google_genai/embeddings.py,sha256=EjpWll1JXNxgXJUNpm3U2LQUHOZHMG6amv8en61GHo0,16615
13
- langchain_google_genai/genai_aqa.py,sha256=qB6h3-BSXqe0YLR3eeVllYzmNKK6ofI6xJLdBahUVZo,4300
14
- langchain_google_genai/google_vector_store.py,sha256=4wvhIiOmc3Fo046FyafPmT9NBCLek-9bgluvuTfrbpQ,16148
15
- langchain_google_genai/llms.py,sha256=BCjIXQ9kyKjwAlwhwFmB30pKl89XqTmIodtcoflyxmA,5738
16
- langchain_google_genai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
- langchain_google_genai-2.1.11.dist-info/RECORD,,