langchain-b12 0.1.6__py3-none-any.whl → 0.1.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,7 @@
1
1
  import os
2
2
 
3
3
  from google.genai import Client
4
+ from google.genai.types import EmbedContentConfigOrDict
4
5
  from google.oauth2 import service_account
5
6
  from langchain_core.embeddings import Embeddings
6
7
  from pydantic import BaseModel, ConfigDict, Field
@@ -19,6 +20,7 @@ class GenAIEmbeddings(Embeddings, BaseModel):
19
20
  ),
20
21
  exclude=True,
21
22
  )
23
+ embed_content_config: EmbedContentConfigOrDict | None = Field(default=None)
22
24
  model_config = ConfigDict(
23
25
  arbitrary_types_allowed=True,
24
26
  )
@@ -33,19 +35,17 @@ class GenAIEmbeddings(Embeddings, BaseModel):
33
35
  list[list[float]]: The embedding vectors.
34
36
  """
35
37
  embeddings = []
36
- for text in texts:
37
- response = self.client.models.embed_content(
38
- model=self.model_name,
39
- contents=[text],
40
- )
38
+ response = self.client.models.embed_content(
39
+ model=self.model_name,
40
+ contents=texts,
41
+ config=self.embed_content_config,
42
+ )
43
+ assert response.embeddings is not None, "No embeddings found in the response."
44
+ for embedding in response.embeddings:
41
45
  assert (
42
- response.embeddings is not None
43
- ), "No embeddings found in the response."
44
- for embedding in response.embeddings:
45
- assert (
46
- embedding.values is not None
47
- ), "No embedding values found in the response."
48
- embeddings.append(embedding.values)
46
+ embedding.values is not None
47
+ ), "No embedding values found in the response."
48
+ embeddings.append(embedding.values)
49
49
  assert len(embeddings) == len(
50
50
  texts
51
51
  ), "The number of embeddings does not match the number of texts."
@@ -75,6 +75,7 @@ class GenAIEmbeddings(Embeddings, BaseModel):
75
75
  response = await self.client.aio.models.embed_content(
76
76
  model=self.model_name,
77
77
  contents=texts,
78
+ config=self.embed_content_config,
78
79
  )
79
80
  assert response.embeddings is not None, "No embeddings found in the response."
80
81
  for embedding in response.embeddings:
@@ -75,6 +75,8 @@ class ChatGenAI(BaseChatModel):
75
75
  """How many completions to generate for each prompt."""
76
76
  seed: int | None = None
77
77
  """Random seed for the generation."""
78
+ max_retries: int | None = Field(default=3)
79
+ """Maximum number of retries when generation fails. None disables retries."""
78
80
  safety_settings: list[types.SafetySetting] | None = None
79
81
  """The default safety settings to use for all generations.
80
82
 
@@ -173,10 +175,24 @@ class ChatGenAI(BaseChatModel):
173
175
  run_manager: CallbackManagerForLLMRun | None = None,
174
176
  **kwargs: Any,
175
177
  ) -> ChatResult:
176
- stream_iter = self._stream(
177
- messages, stop=stop, run_manager=run_manager, **kwargs
178
- )
179
- return generate_from_stream(stream_iter)
178
+ attempts = 0
179
+ while True:
180
+ try:
181
+ stream_iter = self._stream(
182
+ messages, stop=stop, run_manager=run_manager, **kwargs
183
+ )
184
+ return generate_from_stream(stream_iter)
185
+ except Exception as e: # noqa: BLE001
186
+ if self.max_retries is None or attempts >= self.max_retries:
187
+ raise
188
+ attempts += 1
189
+ logger.warning(
190
+ "ChatGenAI._generate failed (attempt %d/%d). "
191
+ "Retrying... Error: %s",
192
+ attempts,
193
+ self.max_retries,
194
+ e,
195
+ )
180
196
 
181
197
  async def _agenerate(
182
198
  self,
@@ -185,10 +201,24 @@ class ChatGenAI(BaseChatModel):
185
201
  run_manager: AsyncCallbackManagerForLLMRun | None = None,
186
202
  **kwargs: Any,
187
203
  ) -> ChatResult:
188
- stream_iter = self._astream(
189
- messages, stop=stop, run_manager=run_manager, **kwargs
190
- )
191
- return await agenerate_from_stream(stream_iter)
204
+ attempts = 0
205
+ while True:
206
+ try:
207
+ stream_iter = self._astream(
208
+ messages, stop=stop, run_manager=run_manager, **kwargs
209
+ )
210
+ return await agenerate_from_stream(stream_iter)
211
+ except Exception as e: # noqa: BLE001
212
+ if self.max_retries is None or attempts >= self.max_retries:
213
+ raise
214
+ attempts += 1
215
+ logger.warning(
216
+ "ChatGenAI._agenerate failed (attempt %d/%d). "
217
+ "Retrying... Error: %s",
218
+ attempts,
219
+ self.max_retries,
220
+ e,
221
+ )
192
222
 
193
223
  def _stream(
194
224
  self,
@@ -419,6 +449,7 @@ class ChatGenAI(BaseChatModel):
419
449
  # add model name if final chunk
420
450
  if top_candidate.finish_reason is not None:
421
451
  message.response_metadata["model_name"] = self.model_name
452
+ message.response_metadata["tags"] = self.tags or []
422
453
 
423
454
  return (
424
455
  ChatGenerationChunk(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langchain-b12
3
- Version: 0.1.6
3
+ Version: 0.1.8
4
4
  Summary: A reusable collection of tools and implementations for Langchain
5
5
  Author-email: Vincent Min <vincent.min@b12-consulting.com>
6
6
  Requires-Python: >=3.11
@@ -0,0 +1,9 @@
1
+ langchain_b12/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ langchain_b12/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
+ langchain_b12/citations/citations.py,sha256=ZQvYayjQXIUaRosJ0qwL3Nc7kC8sBzmaIkE-BOslaVI,12261
4
+ langchain_b12/genai/embeddings.py,sha256=h0Z-5PltDW9q79AjSrLemsz-_QKMB-043XXDvYSRQds,3483
5
+ langchain_b12/genai/genai.py,sha256=4Q0j2YsAPLrHhIy_pYXoncb4hqJJkEUpxB3oD3qaECI,18120
6
+ langchain_b12/genai/genai_utils.py,sha256=tA6UiJURK25-11vtaX4768UV47jDCYwVKIIWydD4Egw,10736
7
+ langchain_b12-0.1.8.dist-info/METADATA,sha256=0-KZr-PXjE16ar4LpQbdWHX8CrViLBxlfV9uGwE0Qw0,1204
8
+ langchain_b12-0.1.8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
9
+ langchain_b12-0.1.8.dist-info/RECORD,,
@@ -1,9 +0,0 @@
1
- langchain_b12/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- langchain_b12/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- langchain_b12/citations/citations.py,sha256=ZQvYayjQXIUaRosJ0qwL3Nc7kC8sBzmaIkE-BOslaVI,12261
4
- langchain_b12/genai/embeddings.py,sha256=od2bVIgt7v9aNAHG0PVypVF1H_XgHto2nTd8vwfvyN8,3355
5
- langchain_b12/genai/genai.py,sha256=r7v_Z97N_Vd0zIR5mcQrlY3eWCPWhThvOvnXg59Ls8c,16868
6
- langchain_b12/genai/genai_utils.py,sha256=tA6UiJURK25-11vtaX4768UV47jDCYwVKIIWydD4Egw,10736
7
- langchain_b12-0.1.6.dist-info/METADATA,sha256=c2_hq-9spfRCb1nNRT6ztrCf1s1KNl8lkg-tLiB0mho,1204
8
- langchain_b12-0.1.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
9
- langchain_b12-0.1.6.dist-info/RECORD,,