langchain-b12 0.1.10__py3-none-any.whl → 0.1.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
langchain_b12/genai/genai.py
CHANGED
|
@@ -35,14 +35,7 @@ from langchain_core.tools import BaseTool
|
|
|
35
35
|
from langchain_core.utils.function_calling import (
|
|
36
36
|
convert_to_openai_tool,
|
|
37
37
|
)
|
|
38
|
-
from pydantic import BaseModel, ConfigDict, Field
|
|
39
|
-
from tenacity import (
|
|
40
|
-
retry,
|
|
41
|
-
retry_if_exception_type,
|
|
42
|
-
stop_after_attempt,
|
|
43
|
-
stop_never,
|
|
44
|
-
wait_exponential_jitter,
|
|
45
|
-
)
|
|
38
|
+
from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
|
|
46
39
|
|
|
47
40
|
from langchain_b12.genai.genai_utils import (
|
|
48
41
|
convert_messages_to_contents,
|
|
@@ -84,7 +77,9 @@ class ChatGenAI(BaseChatModel):
|
|
|
84
77
|
seed: int | None = None
|
|
85
78
|
"""Random seed for the generation."""
|
|
86
79
|
max_retries: int | None = Field(default=3)
|
|
87
|
-
"""Maximum number of retries
|
|
80
|
+
"""Maximum number of retries. Prefer `http_retry_options`, but this is kept for compatibility."""
|
|
81
|
+
http_retry_options: types.HttpRetryOptions | None = Field(default=None)
|
|
82
|
+
"""HTTP retry options for API requests. If not set, max_retries will be used to create default options."""
|
|
88
83
|
safety_settings: list[types.SafetySetting] | None = None
|
|
89
84
|
"""The default safety settings to use for all generations.
|
|
90
85
|
|
|
@@ -107,6 +102,13 @@ class ChatGenAI(BaseChatModel):
|
|
|
107
102
|
arbitrary_types_allowed=True,
|
|
108
103
|
)
|
|
109
104
|
|
|
105
|
+
@model_validator(mode="after")
|
|
106
|
+
def _setup_retry_options(self) -> "ChatGenAI":
|
|
107
|
+
"""Convert max_retries to http_retry_options if not explicitly set."""
|
|
108
|
+
if self.http_retry_options is None and self.max_retries is not None:
|
|
109
|
+
self.http_retry_options = types.HttpRetryOptions(attempts=self.max_retries)
|
|
110
|
+
return self
|
|
111
|
+
|
|
110
112
|
@property
|
|
111
113
|
def _llm_type(self) -> str:
|
|
112
114
|
return "vertexai"
|
|
@@ -208,64 +210,32 @@ class ChatGenAI(BaseChatModel):
|
|
|
208
210
|
**kwargs: Any,
|
|
209
211
|
) -> Iterator[ChatGenerationChunk]:
|
|
210
212
|
system_message, contents = self._prepare_request(messages=messages)
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
if self.max_retries is not None
|
|
216
|
-
else stop_never,
|
|
217
|
-
wait=wait_exponential_jitter(initial=1, max=60),
|
|
218
|
-
retry=retry_if_exception_type(Exception),
|
|
219
|
-
before_sleep=lambda retry_state: logger.warning(
|
|
220
|
-
"ChatGenAI._stream failed to start (attempt %d/%s). "
|
|
221
|
-
"Retrying in %.2fs... Error: %s",
|
|
222
|
-
retry_state.attempt_number,
|
|
223
|
-
self.max_retries + 1 if self.max_retries is not None else "∞",
|
|
224
|
-
retry_state.next_action.sleep,
|
|
225
|
-
retry_state.outcome.exception(),
|
|
226
|
-
),
|
|
213
|
+
http_options = (
|
|
214
|
+
types.HttpOptions(retry_options=self.http_retry_options)
|
|
215
|
+
if self.http_retry_options
|
|
216
|
+
else None
|
|
227
217
|
)
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
stop_sequences=stop or self.stop,
|
|
245
|
-
safety_settings=self.safety_settings,
|
|
246
|
-
thinking_config=self.thinking_config,
|
|
247
|
-
automatic_function_calling=types.AutomaticFunctionCallingConfig(
|
|
248
|
-
disable=True,
|
|
249
|
-
),
|
|
250
|
-
**kwargs,
|
|
218
|
+
response_iter = self.client.models.generate_content_stream(
|
|
219
|
+
model=self.model_name,
|
|
220
|
+
contents=contents,
|
|
221
|
+
config=types.GenerateContentConfig(
|
|
222
|
+
http_options=http_options,
|
|
223
|
+
system_instruction=system_message,
|
|
224
|
+
temperature=self.temperature,
|
|
225
|
+
top_k=self.top_k,
|
|
226
|
+
top_p=self.top_p,
|
|
227
|
+
max_output_tokens=self.max_output_tokens,
|
|
228
|
+
candidate_count=self.n,
|
|
229
|
+
stop_sequences=stop or self.stop,
|
|
230
|
+
safety_settings=self.safety_settings,
|
|
231
|
+
thinking_config=self.thinking_config,
|
|
232
|
+
automatic_function_calling=types.AutomaticFunctionCallingConfig(
|
|
233
|
+
disable=True,
|
|
251
234
|
),
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
first_response, prev_total_usage=None
|
|
257
|
-
)
|
|
258
|
-
return first_chunk, response_iter, total_usage
|
|
259
|
-
|
|
260
|
-
# Retry only covers stream initialization and first chunk
|
|
261
|
-
first_chunk, response_iter, total_lc_usage = _initiate_stream()
|
|
262
|
-
|
|
263
|
-
# Yield first chunk
|
|
264
|
-
if run_manager and isinstance(first_chunk.message.content, str):
|
|
265
|
-
run_manager.on_llm_new_token(first_chunk.message.content)
|
|
266
|
-
yield first_chunk
|
|
267
|
-
|
|
268
|
-
# Continue streaming without retry (retries during streaming are not well defined)
|
|
235
|
+
**kwargs,
|
|
236
|
+
),
|
|
237
|
+
)
|
|
238
|
+
total_lc_usage = None
|
|
269
239
|
for response_chunk in response_iter:
|
|
270
240
|
chunk, total_lc_usage = self._gemini_chunk_to_generation_chunk(
|
|
271
241
|
response_chunk, prev_total_usage=total_lc_usage
|
|
@@ -282,65 +252,33 @@ class ChatGenAI(BaseChatModel):
|
|
|
282
252
|
**kwargs: Any,
|
|
283
253
|
) -> AsyncIterator[ChatGenerationChunk]:
|
|
284
254
|
system_message, contents = self._prepare_request(messages=messages)
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
if self.max_retries is not None
|
|
290
|
-
else stop_never,
|
|
291
|
-
wait=wait_exponential_jitter(initial=1, max=60),
|
|
292
|
-
retry=retry_if_exception_type(Exception),
|
|
293
|
-
before_sleep=lambda retry_state: logger.warning(
|
|
294
|
-
"ChatGenAI._astream failed to start (attempt %d/%s). "
|
|
295
|
-
"Retrying in %.2fs... Error: %s",
|
|
296
|
-
retry_state.attempt_number,
|
|
297
|
-
self.max_retries + 1 if self.max_retries is not None else "∞",
|
|
298
|
-
retry_state.next_action.sleep,
|
|
299
|
-
retry_state.outcome.exception(),
|
|
300
|
-
),
|
|
255
|
+
http_options = (
|
|
256
|
+
types.HttpOptions(retry_options=self.http_retry_options)
|
|
257
|
+
if self.http_retry_options
|
|
258
|
+
else None
|
|
301
259
|
)
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
stop_sequences=stop or self.stop,
|
|
319
|
-
safety_settings=self.safety_settings,
|
|
320
|
-
thinking_config=self.thinking_config,
|
|
321
|
-
automatic_function_calling=types.AutomaticFunctionCallingConfig(
|
|
322
|
-
disable=True,
|
|
323
|
-
),
|
|
324
|
-
**kwargs,
|
|
260
|
+
response_iter = self.client.aio.models.generate_content_stream(
|
|
261
|
+
model=self.model_name,
|
|
262
|
+
contents=contents,
|
|
263
|
+
config=types.GenerateContentConfig(
|
|
264
|
+
http_options=http_options,
|
|
265
|
+
system_instruction=system_message,
|
|
266
|
+
temperature=self.temperature,
|
|
267
|
+
top_k=self.top_k,
|
|
268
|
+
top_p=self.top_p,
|
|
269
|
+
max_output_tokens=self.max_output_tokens,
|
|
270
|
+
candidate_count=self.n,
|
|
271
|
+
stop_sequences=stop or self.stop,
|
|
272
|
+
safety_settings=self.safety_settings,
|
|
273
|
+
thinking_config=self.thinking_config,
|
|
274
|
+
automatic_function_calling=types.AutomaticFunctionCallingConfig(
|
|
275
|
+
disable=True,
|
|
325
276
|
),
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
)
|
|
332
|
-
return first_chunk, response_iter, total_usage
|
|
333
|
-
|
|
334
|
-
# Retry only covers stream initialization and first chunk
|
|
335
|
-
first_chunk, response_iter, total_lc_usage = await _initiate_stream()
|
|
336
|
-
|
|
337
|
-
# Yield first chunk
|
|
338
|
-
if run_manager and isinstance(first_chunk.message.content, str):
|
|
339
|
-
await run_manager.on_llm_new_token(first_chunk.message.content)
|
|
340
|
-
yield first_chunk
|
|
341
|
-
|
|
342
|
-
# Continue streaming without retry (retries during streaming are not well defined)
|
|
343
|
-
async for response_chunk in response_iter:
|
|
277
|
+
**kwargs,
|
|
278
|
+
),
|
|
279
|
+
)
|
|
280
|
+
total_lc_usage = None
|
|
281
|
+
async for response_chunk in await response_iter:
|
|
344
282
|
chunk, total_lc_usage = self._gemini_chunk_to_generation_chunk(
|
|
345
283
|
response_chunk, prev_total_usage=total_lc_usage
|
|
346
284
|
)
|
|
@@ -1,12 +1,10 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: langchain-b12
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.11
|
|
4
4
|
Summary: A reusable collection of tools and implementations for Langchain
|
|
5
5
|
Author-email: Vincent Min <vincent.min@b12-consulting.com>
|
|
6
6
|
Requires-Python: >=3.11
|
|
7
7
|
Requires-Dist: langchain-core>=0.3.60
|
|
8
|
-
Requires-Dist: pytest-anyio>=0.0.0
|
|
9
|
-
Requires-Dist: tenacity>=9.1.2
|
|
10
8
|
Description-Content-Type: text/markdown
|
|
11
9
|
|
|
12
10
|
# Langchain B12
|
|
@@ -2,8 +2,8 @@ langchain_b12/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
|
2
2
|
langchain_b12/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
3
|
langchain_b12/citations/citations.py,sha256=ZQvYayjQXIUaRosJ0qwL3Nc7kC8sBzmaIkE-BOslaVI,12261
|
|
4
4
|
langchain_b12/genai/embeddings.py,sha256=h0Z-5PltDW9q79AjSrLemsz-_QKMB-043XXDvYSRQds,3483
|
|
5
|
-
langchain_b12/genai/genai.py,sha256=
|
|
5
|
+
langchain_b12/genai/genai.py,sha256=u-QAH_4VauBj99dWuYBaxAMT3bNbqMdxM-rYgRKctLw,18074
|
|
6
6
|
langchain_b12/genai/genai_utils.py,sha256=tA6UiJURK25-11vtaX4768UV47jDCYwVKIIWydD4Egw,10736
|
|
7
|
-
langchain_b12-0.1.
|
|
8
|
-
langchain_b12-0.1.
|
|
9
|
-
langchain_b12-0.1.
|
|
7
|
+
langchain_b12-0.1.11.dist-info/METADATA,sha256=w4uaxeVl7hg1h2Zj3J9ZOukgUAXHeJcQA0rrtBnmdyg,1205
|
|
8
|
+
langchain_b12-0.1.11.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
9
|
+
langchain_b12-0.1.11.dist-info/RECORD,,
|
|
File without changes
|