langchain-b12 0.1.10__py3-none-any.whl → 0.1.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
langchain_b12/genai/genai.py
CHANGED
|
@@ -35,14 +35,7 @@ from langchain_core.tools import BaseTool
|
|
|
35
35
|
from langchain_core.utils.function_calling import (
|
|
36
36
|
convert_to_openai_tool,
|
|
37
37
|
)
|
|
38
|
-
from pydantic import BaseModel, ConfigDict, Field
|
|
39
|
-
from tenacity import (
|
|
40
|
-
retry,
|
|
41
|
-
retry_if_exception_type,
|
|
42
|
-
stop_after_attempt,
|
|
43
|
-
stop_never,
|
|
44
|
-
wait_exponential_jitter,
|
|
45
|
-
)
|
|
38
|
+
from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
|
|
46
39
|
|
|
47
40
|
from langchain_b12.genai.genai_utils import (
|
|
48
41
|
convert_messages_to_contents,
|
|
@@ -84,7 +77,9 @@ class ChatGenAI(BaseChatModel):
|
|
|
84
77
|
seed: int | None = None
|
|
85
78
|
"""Random seed for the generation."""
|
|
86
79
|
max_retries: int | None = Field(default=3)
|
|
87
|
-
"""Maximum number of retries
|
|
80
|
+
"""Maximum number of retries. Prefer `http_retry_options`, but this is kept for compatibility."""
|
|
81
|
+
http_retry_options: types.HttpRetryOptions | None = Field(default=None)
|
|
82
|
+
"""HTTP retry options for API requests. If not set, max_retries will be used to create default options."""
|
|
88
83
|
safety_settings: list[types.SafetySetting] | None = None
|
|
89
84
|
"""The default safety settings to use for all generations.
|
|
90
85
|
|
|
@@ -107,6 +102,19 @@ class ChatGenAI(BaseChatModel):
|
|
|
107
102
|
arbitrary_types_allowed=True,
|
|
108
103
|
)
|
|
109
104
|
|
|
105
|
+
@model_validator(mode="after")
|
|
106
|
+
def _setup_retry_options(self) -> "ChatGenAI":
|
|
107
|
+
"""Convert max_retries to http_retry_options if not explicitly set."""
|
|
108
|
+
if self.http_retry_options is None and self.max_retries is not None:
|
|
109
|
+
self.http_retry_options = types.HttpRetryOptions(
|
|
110
|
+
attempts=self.max_retries,
|
|
111
|
+
initial_delay=1.0,
|
|
112
|
+
max_delay=60.0,
|
|
113
|
+
exp_base=2.0,
|
|
114
|
+
jitter=0.1,
|
|
115
|
+
)
|
|
116
|
+
return self
|
|
117
|
+
|
|
110
118
|
@property
|
|
111
119
|
def _llm_type(self) -> str:
|
|
112
120
|
return "vertexai"
|
|
@@ -208,64 +216,32 @@ class ChatGenAI(BaseChatModel):
|
|
|
208
216
|
**kwargs: Any,
|
|
209
217
|
) -> Iterator[ChatGenerationChunk]:
|
|
210
218
|
system_message, contents = self._prepare_request(messages=messages)
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
if self.max_retries is not None
|
|
216
|
-
else stop_never,
|
|
217
|
-
wait=wait_exponential_jitter(initial=1, max=60),
|
|
218
|
-
retry=retry_if_exception_type(Exception),
|
|
219
|
-
before_sleep=lambda retry_state: logger.warning(
|
|
220
|
-
"ChatGenAI._stream failed to start (attempt %d/%s). "
|
|
221
|
-
"Retrying in %.2fs... Error: %s",
|
|
222
|
-
retry_state.attempt_number,
|
|
223
|
-
self.max_retries + 1 if self.max_retries is not None else "∞",
|
|
224
|
-
retry_state.next_action.sleep,
|
|
225
|
-
retry_state.outcome.exception(),
|
|
226
|
-
),
|
|
219
|
+
http_options = (
|
|
220
|
+
types.HttpOptions(retry_options=self.http_retry_options)
|
|
221
|
+
if self.http_retry_options
|
|
222
|
+
else None
|
|
227
223
|
)
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
stop_sequences=stop or self.stop,
|
|
245
|
-
safety_settings=self.safety_settings,
|
|
246
|
-
thinking_config=self.thinking_config,
|
|
247
|
-
automatic_function_calling=types.AutomaticFunctionCallingConfig(
|
|
248
|
-
disable=True,
|
|
249
|
-
),
|
|
250
|
-
**kwargs,
|
|
224
|
+
response_iter = self.client.models.generate_content_stream(
|
|
225
|
+
model=self.model_name,
|
|
226
|
+
contents=contents,
|
|
227
|
+
config=types.GenerateContentConfig(
|
|
228
|
+
http_options=http_options,
|
|
229
|
+
system_instruction=system_message,
|
|
230
|
+
temperature=self.temperature,
|
|
231
|
+
top_k=self.top_k,
|
|
232
|
+
top_p=self.top_p,
|
|
233
|
+
max_output_tokens=self.max_output_tokens,
|
|
234
|
+
candidate_count=self.n,
|
|
235
|
+
stop_sequences=stop or self.stop,
|
|
236
|
+
safety_settings=self.safety_settings,
|
|
237
|
+
thinking_config=self.thinking_config,
|
|
238
|
+
automatic_function_calling=types.AutomaticFunctionCallingConfig(
|
|
239
|
+
disable=True,
|
|
251
240
|
),
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
first_response, prev_total_usage=None
|
|
257
|
-
)
|
|
258
|
-
return first_chunk, response_iter, total_usage
|
|
259
|
-
|
|
260
|
-
# Retry only covers stream initialization and first chunk
|
|
261
|
-
first_chunk, response_iter, total_lc_usage = _initiate_stream()
|
|
262
|
-
|
|
263
|
-
# Yield first chunk
|
|
264
|
-
if run_manager and isinstance(first_chunk.message.content, str):
|
|
265
|
-
run_manager.on_llm_new_token(first_chunk.message.content)
|
|
266
|
-
yield first_chunk
|
|
267
|
-
|
|
268
|
-
# Continue streaming without retry (retries during streaming are not well defined)
|
|
241
|
+
**kwargs,
|
|
242
|
+
),
|
|
243
|
+
)
|
|
244
|
+
total_lc_usage = None
|
|
269
245
|
for response_chunk in response_iter:
|
|
270
246
|
chunk, total_lc_usage = self._gemini_chunk_to_generation_chunk(
|
|
271
247
|
response_chunk, prev_total_usage=total_lc_usage
|
|
@@ -282,65 +258,33 @@ class ChatGenAI(BaseChatModel):
|
|
|
282
258
|
**kwargs: Any,
|
|
283
259
|
) -> AsyncIterator[ChatGenerationChunk]:
|
|
284
260
|
system_message, contents = self._prepare_request(messages=messages)
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
if self.max_retries is not None
|
|
290
|
-
else stop_never,
|
|
291
|
-
wait=wait_exponential_jitter(initial=1, max=60),
|
|
292
|
-
retry=retry_if_exception_type(Exception),
|
|
293
|
-
before_sleep=lambda retry_state: logger.warning(
|
|
294
|
-
"ChatGenAI._astream failed to start (attempt %d/%s). "
|
|
295
|
-
"Retrying in %.2fs... Error: %s",
|
|
296
|
-
retry_state.attempt_number,
|
|
297
|
-
self.max_retries + 1 if self.max_retries is not None else "∞",
|
|
298
|
-
retry_state.next_action.sleep,
|
|
299
|
-
retry_state.outcome.exception(),
|
|
300
|
-
),
|
|
261
|
+
http_options = (
|
|
262
|
+
types.HttpOptions(retry_options=self.http_retry_options)
|
|
263
|
+
if self.http_retry_options
|
|
264
|
+
else None
|
|
301
265
|
)
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
stop_sequences=stop or self.stop,
|
|
319
|
-
safety_settings=self.safety_settings,
|
|
320
|
-
thinking_config=self.thinking_config,
|
|
321
|
-
automatic_function_calling=types.AutomaticFunctionCallingConfig(
|
|
322
|
-
disable=True,
|
|
323
|
-
),
|
|
324
|
-
**kwargs,
|
|
266
|
+
response_iter = self.client.aio.models.generate_content_stream(
|
|
267
|
+
model=self.model_name,
|
|
268
|
+
contents=contents,
|
|
269
|
+
config=types.GenerateContentConfig(
|
|
270
|
+
http_options=http_options,
|
|
271
|
+
system_instruction=system_message,
|
|
272
|
+
temperature=self.temperature,
|
|
273
|
+
top_k=self.top_k,
|
|
274
|
+
top_p=self.top_p,
|
|
275
|
+
max_output_tokens=self.max_output_tokens,
|
|
276
|
+
candidate_count=self.n,
|
|
277
|
+
stop_sequences=stop or self.stop,
|
|
278
|
+
safety_settings=self.safety_settings,
|
|
279
|
+
thinking_config=self.thinking_config,
|
|
280
|
+
automatic_function_calling=types.AutomaticFunctionCallingConfig(
|
|
281
|
+
disable=True,
|
|
325
282
|
),
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
)
|
|
332
|
-
return first_chunk, response_iter, total_usage
|
|
333
|
-
|
|
334
|
-
# Retry only covers stream initialization and first chunk
|
|
335
|
-
first_chunk, response_iter, total_lc_usage = await _initiate_stream()
|
|
336
|
-
|
|
337
|
-
# Yield first chunk
|
|
338
|
-
if run_manager and isinstance(first_chunk.message.content, str):
|
|
339
|
-
await run_manager.on_llm_new_token(first_chunk.message.content)
|
|
340
|
-
yield first_chunk
|
|
341
|
-
|
|
342
|
-
# Continue streaming without retry (retries during streaming are not well defined)
|
|
343
|
-
async for response_chunk in response_iter:
|
|
283
|
+
**kwargs,
|
|
284
|
+
),
|
|
285
|
+
)
|
|
286
|
+
total_lc_usage = None
|
|
287
|
+
async for response_chunk in await response_iter:
|
|
344
288
|
chunk, total_lc_usage = self._gemini_chunk_to_generation_chunk(
|
|
345
289
|
response_chunk, prev_total_usage=total_lc_usage
|
|
346
290
|
)
|
|
@@ -1,12 +1,10 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: langchain-b12
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.12
|
|
4
4
|
Summary: A reusable collection of tools and implementations for Langchain
|
|
5
5
|
Author-email: Vincent Min <vincent.min@b12-consulting.com>
|
|
6
6
|
Requires-Python: >=3.11
|
|
7
7
|
Requires-Dist: langchain-core>=0.3.60
|
|
8
|
-
Requires-Dist: pytest-anyio>=0.0.0
|
|
9
|
-
Requires-Dist: tenacity>=9.1.2
|
|
10
8
|
Description-Content-Type: text/markdown
|
|
11
9
|
|
|
12
10
|
# Langchain B12
|
|
@@ -2,8 +2,8 @@ langchain_b12/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
|
2
2
|
langchain_b12/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
3
|
langchain_b12/citations/citations.py,sha256=ZQvYayjQXIUaRosJ0qwL3Nc7kC8sBzmaIkE-BOslaVI,12261
|
|
4
4
|
langchain_b12/genai/embeddings.py,sha256=h0Z-5PltDW9q79AjSrLemsz-_QKMB-043XXDvYSRQds,3483
|
|
5
|
-
langchain_b12/genai/genai.py,sha256=
|
|
5
|
+
langchain_b12/genai/genai.py,sha256=niECJjU4lGcA0YYU7pGfL4oDZoiHYSdKWXyvB3gEBe4,18230
|
|
6
6
|
langchain_b12/genai/genai_utils.py,sha256=tA6UiJURK25-11vtaX4768UV47jDCYwVKIIWydD4Egw,10736
|
|
7
|
-
langchain_b12-0.1.
|
|
8
|
-
langchain_b12-0.1.
|
|
9
|
-
langchain_b12-0.1.
|
|
7
|
+
langchain_b12-0.1.12.dist-info/METADATA,sha256=c5A1cSulh5NeVxvBn8lkfU4peFVU7ei5eNScNDTkYds,1205
|
|
8
|
+
langchain_b12-0.1.12.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
9
|
+
langchain_b12-0.1.12.dist-info/RECORD,,
|
|
File without changes
|