langchain-b12 0.1.10__tar.gz → 0.1.11__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {langchain_b12-0.1.10 → langchain_b12-0.1.11}/PKG-INFO +1 -3
- {langchain_b12-0.1.10 → langchain_b12-0.1.11}/pyproject.toml +1 -3
- {langchain_b12-0.1.10 → langchain_b12-0.1.11}/src/langchain_b12/genai/genai.py +60 -122
- langchain_b12-0.1.11/tests/test_genai.py +210 -0
- {langchain_b12-0.1.10 → langchain_b12-0.1.11}/uv.lock +2 -21
- langchain_b12-0.1.10/tests/test_genai.py +0 -279
- {langchain_b12-0.1.10 → langchain_b12-0.1.11}/.gitignore +0 -0
- {langchain_b12-0.1.10 → langchain_b12-0.1.11}/.python-version +0 -0
- {langchain_b12-0.1.10 → langchain_b12-0.1.11}/.vscode/extensions.json +0 -0
- {langchain_b12-0.1.10 → langchain_b12-0.1.11}/Makefile +0 -0
- {langchain_b12-0.1.10 → langchain_b12-0.1.11}/README.md +0 -0
- {langchain_b12-0.1.10 → langchain_b12-0.1.11}/src/langchain_b12/__init__.py +0 -0
- {langchain_b12-0.1.10 → langchain_b12-0.1.11}/src/langchain_b12/citations/citations.py +0 -0
- {langchain_b12-0.1.10 → langchain_b12-0.1.11}/src/langchain_b12/genai/embeddings.py +0 -0
- {langchain_b12-0.1.10 → langchain_b12-0.1.11}/src/langchain_b12/genai/genai_utils.py +0 -0
- {langchain_b12-0.1.10 → langchain_b12-0.1.11}/src/langchain_b12/py.typed +0 -0
- {langchain_b12-0.1.10 → langchain_b12-0.1.11}/tests/test_citation_mixin.py +0 -0
- {langchain_b12-0.1.10 → langchain_b12-0.1.11}/tests/test_citations.py +0 -0
- {langchain_b12-0.1.10 → langchain_b12-0.1.11}/tests/test_genai_utils.py +0 -0
|
@@ -1,12 +1,10 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: langchain-b12
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.11
|
|
4
4
|
Summary: A reusable collection of tools and implementations for Langchain
|
|
5
5
|
Author-email: Vincent Min <vincent.min@b12-consulting.com>
|
|
6
6
|
Requires-Python: >=3.11
|
|
7
7
|
Requires-Dist: langchain-core>=0.3.60
|
|
8
|
-
Requires-Dist: pytest-anyio>=0.0.0
|
|
9
|
-
Requires-Dist: tenacity>=9.1.2
|
|
10
8
|
Description-Content-Type: text/markdown
|
|
11
9
|
|
|
12
10
|
# Langchain B12
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "langchain-b12"
|
|
3
|
-
version = "0.1.
|
|
3
|
+
version = "0.1.11"
|
|
4
4
|
description = "A reusable collection of tools and implementations for Langchain"
|
|
5
5
|
readme = "README.md"
|
|
6
6
|
authors = [
|
|
@@ -9,8 +9,6 @@ authors = [
|
|
|
9
9
|
requires-python = ">=3.11"
|
|
10
10
|
dependencies = [
|
|
11
11
|
"langchain-core>=0.3.60",
|
|
12
|
-
"pytest-anyio>=0.0.0",
|
|
13
|
-
"tenacity>=9.1.2",
|
|
14
12
|
]
|
|
15
13
|
|
|
16
14
|
[dependency-groups]
|
|
@@ -35,14 +35,7 @@ from langchain_core.tools import BaseTool
|
|
|
35
35
|
from langchain_core.utils.function_calling import (
|
|
36
36
|
convert_to_openai_tool,
|
|
37
37
|
)
|
|
38
|
-
from pydantic import BaseModel, ConfigDict, Field
|
|
39
|
-
from tenacity import (
|
|
40
|
-
retry,
|
|
41
|
-
retry_if_exception_type,
|
|
42
|
-
stop_after_attempt,
|
|
43
|
-
stop_never,
|
|
44
|
-
wait_exponential_jitter,
|
|
45
|
-
)
|
|
38
|
+
from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
|
|
46
39
|
|
|
47
40
|
from langchain_b12.genai.genai_utils import (
|
|
48
41
|
convert_messages_to_contents,
|
|
@@ -84,7 +77,9 @@ class ChatGenAI(BaseChatModel):
|
|
|
84
77
|
seed: int | None = None
|
|
85
78
|
"""Random seed for the generation."""
|
|
86
79
|
max_retries: int | None = Field(default=3)
|
|
87
|
-
"""Maximum number of retries
|
|
80
|
+
"""Maximum number of retries. Prefer `http_retry_options`, but this is kept for compatibility."""
|
|
81
|
+
http_retry_options: types.HttpRetryOptions | None = Field(default=None)
|
|
82
|
+
"""HTTP retry options for API requests. If not set, max_retries will be used to create default options."""
|
|
88
83
|
safety_settings: list[types.SafetySetting] | None = None
|
|
89
84
|
"""The default safety settings to use for all generations.
|
|
90
85
|
|
|
@@ -107,6 +102,13 @@ class ChatGenAI(BaseChatModel):
|
|
|
107
102
|
arbitrary_types_allowed=True,
|
|
108
103
|
)
|
|
109
104
|
|
|
105
|
+
@model_validator(mode="after")
|
|
106
|
+
def _setup_retry_options(self) -> "ChatGenAI":
|
|
107
|
+
"""Convert max_retries to http_retry_options if not explicitly set."""
|
|
108
|
+
if self.http_retry_options is None and self.max_retries is not None:
|
|
109
|
+
self.http_retry_options = types.HttpRetryOptions(attempts=self.max_retries)
|
|
110
|
+
return self
|
|
111
|
+
|
|
110
112
|
@property
|
|
111
113
|
def _llm_type(self) -> str:
|
|
112
114
|
return "vertexai"
|
|
@@ -208,64 +210,32 @@ class ChatGenAI(BaseChatModel):
|
|
|
208
210
|
**kwargs: Any,
|
|
209
211
|
) -> Iterator[ChatGenerationChunk]:
|
|
210
212
|
system_message, contents = self._prepare_request(messages=messages)
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
if self.max_retries is not None
|
|
216
|
-
else stop_never,
|
|
217
|
-
wait=wait_exponential_jitter(initial=1, max=60),
|
|
218
|
-
retry=retry_if_exception_type(Exception),
|
|
219
|
-
before_sleep=lambda retry_state: logger.warning(
|
|
220
|
-
"ChatGenAI._stream failed to start (attempt %d/%s). "
|
|
221
|
-
"Retrying in %.2fs... Error: %s",
|
|
222
|
-
retry_state.attempt_number,
|
|
223
|
-
self.max_retries + 1 if self.max_retries is not None else "∞",
|
|
224
|
-
retry_state.next_action.sleep,
|
|
225
|
-
retry_state.outcome.exception(),
|
|
226
|
-
),
|
|
213
|
+
http_options = (
|
|
214
|
+
types.HttpOptions(retry_options=self.http_retry_options)
|
|
215
|
+
if self.http_retry_options
|
|
216
|
+
else None
|
|
227
217
|
)
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
stop_sequences=stop or self.stop,
|
|
245
|
-
safety_settings=self.safety_settings,
|
|
246
|
-
thinking_config=self.thinking_config,
|
|
247
|
-
automatic_function_calling=types.AutomaticFunctionCallingConfig(
|
|
248
|
-
disable=True,
|
|
249
|
-
),
|
|
250
|
-
**kwargs,
|
|
218
|
+
response_iter = self.client.models.generate_content_stream(
|
|
219
|
+
model=self.model_name,
|
|
220
|
+
contents=contents,
|
|
221
|
+
config=types.GenerateContentConfig(
|
|
222
|
+
http_options=http_options,
|
|
223
|
+
system_instruction=system_message,
|
|
224
|
+
temperature=self.temperature,
|
|
225
|
+
top_k=self.top_k,
|
|
226
|
+
top_p=self.top_p,
|
|
227
|
+
max_output_tokens=self.max_output_tokens,
|
|
228
|
+
candidate_count=self.n,
|
|
229
|
+
stop_sequences=stop or self.stop,
|
|
230
|
+
safety_settings=self.safety_settings,
|
|
231
|
+
thinking_config=self.thinking_config,
|
|
232
|
+
automatic_function_calling=types.AutomaticFunctionCallingConfig(
|
|
233
|
+
disable=True,
|
|
251
234
|
),
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
first_response, prev_total_usage=None
|
|
257
|
-
)
|
|
258
|
-
return first_chunk, response_iter, total_usage
|
|
259
|
-
|
|
260
|
-
# Retry only covers stream initialization and first chunk
|
|
261
|
-
first_chunk, response_iter, total_lc_usage = _initiate_stream()
|
|
262
|
-
|
|
263
|
-
# Yield first chunk
|
|
264
|
-
if run_manager and isinstance(first_chunk.message.content, str):
|
|
265
|
-
run_manager.on_llm_new_token(first_chunk.message.content)
|
|
266
|
-
yield first_chunk
|
|
267
|
-
|
|
268
|
-
# Continue streaming without retry (retries during streaming are not well defined)
|
|
235
|
+
**kwargs,
|
|
236
|
+
),
|
|
237
|
+
)
|
|
238
|
+
total_lc_usage = None
|
|
269
239
|
for response_chunk in response_iter:
|
|
270
240
|
chunk, total_lc_usage = self._gemini_chunk_to_generation_chunk(
|
|
271
241
|
response_chunk, prev_total_usage=total_lc_usage
|
|
@@ -282,65 +252,33 @@ class ChatGenAI(BaseChatModel):
|
|
|
282
252
|
**kwargs: Any,
|
|
283
253
|
) -> AsyncIterator[ChatGenerationChunk]:
|
|
284
254
|
system_message, contents = self._prepare_request(messages=messages)
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
if self.max_retries is not None
|
|
290
|
-
else stop_never,
|
|
291
|
-
wait=wait_exponential_jitter(initial=1, max=60),
|
|
292
|
-
retry=retry_if_exception_type(Exception),
|
|
293
|
-
before_sleep=lambda retry_state: logger.warning(
|
|
294
|
-
"ChatGenAI._astream failed to start (attempt %d/%s). "
|
|
295
|
-
"Retrying in %.2fs... Error: %s",
|
|
296
|
-
retry_state.attempt_number,
|
|
297
|
-
self.max_retries + 1 if self.max_retries is not None else "∞",
|
|
298
|
-
retry_state.next_action.sleep,
|
|
299
|
-
retry_state.outcome.exception(),
|
|
300
|
-
),
|
|
255
|
+
http_options = (
|
|
256
|
+
types.HttpOptions(retry_options=self.http_retry_options)
|
|
257
|
+
if self.http_retry_options
|
|
258
|
+
else None
|
|
301
259
|
)
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
stop_sequences=stop or self.stop,
|
|
319
|
-
safety_settings=self.safety_settings,
|
|
320
|
-
thinking_config=self.thinking_config,
|
|
321
|
-
automatic_function_calling=types.AutomaticFunctionCallingConfig(
|
|
322
|
-
disable=True,
|
|
323
|
-
),
|
|
324
|
-
**kwargs,
|
|
260
|
+
response_iter = self.client.aio.models.generate_content_stream(
|
|
261
|
+
model=self.model_name,
|
|
262
|
+
contents=contents,
|
|
263
|
+
config=types.GenerateContentConfig(
|
|
264
|
+
http_options=http_options,
|
|
265
|
+
system_instruction=system_message,
|
|
266
|
+
temperature=self.temperature,
|
|
267
|
+
top_k=self.top_k,
|
|
268
|
+
top_p=self.top_p,
|
|
269
|
+
max_output_tokens=self.max_output_tokens,
|
|
270
|
+
candidate_count=self.n,
|
|
271
|
+
stop_sequences=stop or self.stop,
|
|
272
|
+
safety_settings=self.safety_settings,
|
|
273
|
+
thinking_config=self.thinking_config,
|
|
274
|
+
automatic_function_calling=types.AutomaticFunctionCallingConfig(
|
|
275
|
+
disable=True,
|
|
325
276
|
),
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
)
|
|
332
|
-
return first_chunk, response_iter, total_usage
|
|
333
|
-
|
|
334
|
-
# Retry only covers stream initialization and first chunk
|
|
335
|
-
first_chunk, response_iter, total_lc_usage = await _initiate_stream()
|
|
336
|
-
|
|
337
|
-
# Yield first chunk
|
|
338
|
-
if run_manager and isinstance(first_chunk.message.content, str):
|
|
339
|
-
await run_manager.on_llm_new_token(first_chunk.message.content)
|
|
340
|
-
yield first_chunk
|
|
341
|
-
|
|
342
|
-
# Continue streaming without retry (retries during streaming are not well defined)
|
|
343
|
-
async for response_chunk in response_iter:
|
|
277
|
+
**kwargs,
|
|
278
|
+
),
|
|
279
|
+
)
|
|
280
|
+
total_lc_usage = None
|
|
281
|
+
async for response_chunk in await response_iter:
|
|
344
282
|
chunk, total_lc_usage = self._gemini_chunk_to_generation_chunk(
|
|
345
283
|
response_chunk, prev_total_usage=total_lc_usage
|
|
346
284
|
)
|
|
@@ -0,0 +1,210 @@
|
|
|
1
|
+
from unittest.mock import AsyncMock, MagicMock, patch
|
|
2
|
+
|
|
3
|
+
import pytest
|
|
4
|
+
from google.genai import Client, types
|
|
5
|
+
from langchain_b12.genai.genai import ChatGenAI
|
|
6
|
+
from langchain_core.messages import HumanMessage
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def _make_response_chunk(text: str) -> types.GenerateContentResponse:
|
|
10
|
+
"""Helper to create a response chunk."""
|
|
11
|
+
return types.GenerateContentResponse(
|
|
12
|
+
candidates=[
|
|
13
|
+
types.Candidate(content=types.Content(parts=[types.Part(text=text)]))
|
|
14
|
+
]
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def test_chatgenai():
|
|
19
|
+
client = MagicMock(spec=Client)
|
|
20
|
+
model = ChatGenAI(client=client, model="foo", temperature=1)
|
|
21
|
+
assert model.model_name == "foo"
|
|
22
|
+
assert model.temperature == 1
|
|
23
|
+
assert model.client == client
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def test_chatgenai_invocation():
|
|
27
|
+
client: Client = MagicMock(spec=Client)
|
|
28
|
+
client.models.generate_content_stream.return_value = iter(
|
|
29
|
+
(
|
|
30
|
+
_make_response_chunk("bar"),
|
|
31
|
+
_make_response_chunk("baz"),
|
|
32
|
+
)
|
|
33
|
+
)
|
|
34
|
+
model = ChatGenAI(client=client)
|
|
35
|
+
messages = [HumanMessage(content="foo")]
|
|
36
|
+
response = model.invoke(messages)
|
|
37
|
+
method: MagicMock = client.models.generate_content_stream
|
|
38
|
+
method.assert_called_once()
|
|
39
|
+
assert response.content == "barbaz"
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def test_max_retries_converts_to_http_retry_options():
|
|
43
|
+
"""Test that max_retries is properly converted to HttpRetryOptions."""
|
|
44
|
+
client = MagicMock(spec=Client)
|
|
45
|
+
model = ChatGenAI(client=client, max_retries=5)
|
|
46
|
+
|
|
47
|
+
assert model.http_retry_options is not None
|
|
48
|
+
assert model.http_retry_options.attempts == 5
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def test_http_retry_options_passed_directly():
|
|
52
|
+
"""Test that http_retry_options can be passed directly."""
|
|
53
|
+
client = MagicMock(spec=Client)
|
|
54
|
+
retry_options = types.HttpRetryOptions(
|
|
55
|
+
attempts=10,
|
|
56
|
+
initial_delay=2.0,
|
|
57
|
+
max_delay=30.0,
|
|
58
|
+
)
|
|
59
|
+
model = ChatGenAI(client=client, http_retry_options=retry_options)
|
|
60
|
+
|
|
61
|
+
assert model.http_retry_options == retry_options
|
|
62
|
+
assert model.http_retry_options.attempts == 10
|
|
63
|
+
assert model.http_retry_options.initial_delay == 2.0
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def test_http_retry_options_overrides_max_retries():
|
|
67
|
+
"""Test that explicit http_retry_options overrides max_retries."""
|
|
68
|
+
client = MagicMock(spec=Client)
|
|
69
|
+
retry_options = types.HttpRetryOptions(attempts=7)
|
|
70
|
+
model = ChatGenAI(client=client, max_retries=3, http_retry_options=retry_options)
|
|
71
|
+
|
|
72
|
+
# http_retry_options should take precedence
|
|
73
|
+
assert model.http_retry_options == retry_options
|
|
74
|
+
assert model.http_retry_options.attempts == 7
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def test_retry_options_passed_in_stream_config():
|
|
78
|
+
"""Test that retry options are passed to GenerateContentConfig."""
|
|
79
|
+
client: Client = MagicMock(spec=Client)
|
|
80
|
+
client.models.generate_content_stream.return_value = iter(
|
|
81
|
+
[_make_response_chunk("success")]
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
model = ChatGenAI(client=client, max_retries=5)
|
|
85
|
+
messages = [HumanMessage(content="foo")]
|
|
86
|
+
response = model.invoke(messages)
|
|
87
|
+
|
|
88
|
+
# Verify the config was called with http_options containing retry_options
|
|
89
|
+
call_args = client.models.generate_content_stream.call_args
|
|
90
|
+
config = call_args.kwargs["config"]
|
|
91
|
+
assert config.http_options is not None
|
|
92
|
+
assert config.http_options.retry_options is not None
|
|
93
|
+
assert config.http_options.retry_options.attempts == 5
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def test_no_retry_options_when_max_retries_none():
|
|
97
|
+
"""Test that no http_retry_options are set when max_retries is None."""
|
|
98
|
+
client = MagicMock(spec=Client)
|
|
99
|
+
model = ChatGenAI(client=client, max_retries=None)
|
|
100
|
+
|
|
101
|
+
assert model.http_retry_options is None
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
# --- Streaming behavior tests ---
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
def test_stream_yields_chunks_immediately():
|
|
108
|
+
"""Test that stream yields chunks as they arrive, not buffered."""
|
|
109
|
+
client: Client = MagicMock(spec=Client)
|
|
110
|
+
chunks_yielded: list[str] = []
|
|
111
|
+
|
|
112
|
+
def mock_stream():
|
|
113
|
+
for text in ["chunk1", "chunk2", "chunk3"]:
|
|
114
|
+
# Track when chunks are yielded from the source
|
|
115
|
+
chunks_yielded.append(f"source:{text}")
|
|
116
|
+
yield _make_response_chunk(text)
|
|
117
|
+
|
|
118
|
+
client.models.generate_content_stream.return_value = mock_stream()
|
|
119
|
+
|
|
120
|
+
model = ChatGenAI(client=client, max_retries=3)
|
|
121
|
+
messages = [HumanMessage(content="foo")]
|
|
122
|
+
|
|
123
|
+
received: list[str] = []
|
|
124
|
+
for chunk in model.stream(messages):
|
|
125
|
+
received.append(chunk.content)
|
|
126
|
+
# After receiving each chunk, check that source yielded it
|
|
127
|
+
assert len(received) == len([c for c in chunks_yielded if c.startswith("source:")])
|
|
128
|
+
|
|
129
|
+
assert received == ["chunk1", "chunk2", "chunk3"]
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def test_stream_error_propagates():
|
|
133
|
+
"""Test that errors during streaming are propagated."""
|
|
134
|
+
client: Client = MagicMock(spec=Client)
|
|
135
|
+
|
|
136
|
+
def failing_stream():
|
|
137
|
+
yield _make_response_chunk("first")
|
|
138
|
+
raise Exception("Mid-stream error")
|
|
139
|
+
|
|
140
|
+
client.models.generate_content_stream.return_value = failing_stream()
|
|
141
|
+
|
|
142
|
+
model = ChatGenAI(client=client, max_retries=3)
|
|
143
|
+
messages = [HumanMessage(content="foo")]
|
|
144
|
+
|
|
145
|
+
chunks = []
|
|
146
|
+
with pytest.raises(Exception, match="Mid-stream error"):
|
|
147
|
+
for chunk in model.stream(messages):
|
|
148
|
+
chunks.append(chunk.content)
|
|
149
|
+
|
|
150
|
+
# First chunk was received before error
|
|
151
|
+
assert chunks == ["first"]
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
# --- Async streaming tests ---
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
async def _async_iter(items):
|
|
158
|
+
"""Helper to create an async iterator from items."""
|
|
159
|
+
for item in items:
|
|
160
|
+
yield item
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
@pytest.mark.asyncio
|
|
164
|
+
async def test_astream_yields_chunks_immediately():
|
|
165
|
+
"""Test that async stream yields chunks as they arrive."""
|
|
166
|
+
client: Client = MagicMock(spec=Client)
|
|
167
|
+
|
|
168
|
+
chunks = [
|
|
169
|
+
_make_response_chunk("async1"),
|
|
170
|
+
_make_response_chunk("async2"),
|
|
171
|
+
_make_response_chunk("async3"),
|
|
172
|
+
]
|
|
173
|
+
|
|
174
|
+
# generate_content_stream returns a coroutine that resolves to async iterator
|
|
175
|
+
client.aio.models.generate_content_stream = AsyncMock(
|
|
176
|
+
return_value=_async_iter(chunks)
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
model = ChatGenAI(client=client, max_retries=3)
|
|
180
|
+
messages = [HumanMessage(content="foo")]
|
|
181
|
+
|
|
182
|
+
received: list[str] = []
|
|
183
|
+
async for chunk in model.astream(messages):
|
|
184
|
+
received.append(chunk.content)
|
|
185
|
+
|
|
186
|
+
assert received == ["async1", "async2", "async3"]
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
@pytest.mark.asyncio
|
|
190
|
+
async def test_astream_error_propagates():
|
|
191
|
+
"""Test that errors during async streaming are propagated."""
|
|
192
|
+
client: Client = MagicMock(spec=Client)
|
|
193
|
+
|
|
194
|
+
async def failing_after_first():
|
|
195
|
+
yield _make_response_chunk("first")
|
|
196
|
+
raise Exception("Async mid-stream error")
|
|
197
|
+
|
|
198
|
+
client.aio.models.generate_content_stream = AsyncMock(
|
|
199
|
+
return_value=failing_after_first()
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
model = ChatGenAI(client=client, max_retries=3)
|
|
203
|
+
messages = [HumanMessage(content="foo")]
|
|
204
|
+
|
|
205
|
+
chunks = []
|
|
206
|
+
with pytest.raises(Exception, match="Async mid-stream error"):
|
|
207
|
+
async for chunk in model.astream(messages):
|
|
208
|
+
chunks.append(chunk.content)
|
|
209
|
+
|
|
210
|
+
assert chunks == ["first"]
|
|
@@ -252,12 +252,10 @@ wheels = [
|
|
|
252
252
|
|
|
253
253
|
[[package]]
|
|
254
254
|
name = "langchain-b12"
|
|
255
|
-
version = "0.1.
|
|
255
|
+
version = "0.1.10"
|
|
256
256
|
source = { editable = "." }
|
|
257
257
|
dependencies = [
|
|
258
258
|
{ name = "langchain-core" },
|
|
259
|
-
{ name = "pytest-anyio" },
|
|
260
|
-
{ name = "tenacity" },
|
|
261
259
|
]
|
|
262
260
|
|
|
263
261
|
[package.dev-dependencies]
|
|
@@ -274,11 +272,7 @@ google = [
|
|
|
274
272
|
]
|
|
275
273
|
|
|
276
274
|
[package.metadata]
|
|
277
|
-
requires-dist = [
|
|
278
|
-
{ name = "langchain-core", specifier = ">=0.3.60" },
|
|
279
|
-
{ name = "pytest-anyio", specifier = ">=0.0.0" },
|
|
280
|
-
{ name = "tenacity", specifier = ">=9.1.2" },
|
|
281
|
-
]
|
|
275
|
+
requires-dist = [{ name = "langchain-core", specifier = ">=0.3.60" }]
|
|
282
276
|
|
|
283
277
|
[package.metadata.requires-dev]
|
|
284
278
|
citations = [
|
|
@@ -623,19 +617,6 @@ wheels = [
|
|
|
623
617
|
{ url = "https://files.pythonhosted.org/packages/29/16/c8a903f4c4dffe7a12843191437d7cd8e32751d5de349d45d3fe69544e87/pytest-8.4.1-py3-none-any.whl", hash = "sha256:539c70ba6fcead8e78eebbf1115e8b589e7565830d7d006a8723f19ac8a0afb7", size = 365474 },
|
|
624
618
|
]
|
|
625
619
|
|
|
626
|
-
[[package]]
|
|
627
|
-
name = "pytest-anyio"
|
|
628
|
-
version = "0.0.0"
|
|
629
|
-
source = { registry = "https://pypi.org/simple" }
|
|
630
|
-
dependencies = [
|
|
631
|
-
{ name = "anyio" },
|
|
632
|
-
{ name = "pytest" },
|
|
633
|
-
]
|
|
634
|
-
sdist = { url = "https://files.pythonhosted.org/packages/00/44/a02e5877a671b0940f21a7a0d9704c22097b123ed5cdbcca9cab39f17acc/pytest-anyio-0.0.0.tar.gz", hash = "sha256:b41234e9e9ad7ea1dbfefcc1d6891b23d5ef7c9f07ccf804c13a9cc338571fd3", size = 1560 }
|
|
635
|
-
wheels = [
|
|
636
|
-
{ url = "https://files.pythonhosted.org/packages/c6/25/bd6493ae85d0a281b6a0f248d0fdb1d9aa2b31f18bcd4a8800cf397d8209/pytest_anyio-0.0.0-py2.py3-none-any.whl", hash = "sha256:dc8b5c4741cb16ff90be37fddd585ca943ed12bbeb563de7ace6cd94441d8746", size = 1999 },
|
|
637
|
-
]
|
|
638
|
-
|
|
639
620
|
[[package]]
|
|
640
621
|
name = "pytest-asyncio"
|
|
641
622
|
version = "1.1.0"
|
|
@@ -1,279 +0,0 @@
|
|
|
1
|
-
from unittest.mock import AsyncMock, MagicMock, patch
|
|
2
|
-
|
|
3
|
-
import pytest
|
|
4
|
-
from google.genai import Client, types
|
|
5
|
-
from langchain_b12.genai.genai import ChatGenAI
|
|
6
|
-
from langchain_core.messages import HumanMessage
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
def _make_response_chunk(text: str) -> types.GenerateContentResponse:
|
|
10
|
-
"""Helper to create a response chunk."""
|
|
11
|
-
return types.GenerateContentResponse(
|
|
12
|
-
candidates=[
|
|
13
|
-
types.Candidate(content=types.Content(parts=[types.Part(text=text)]))
|
|
14
|
-
]
|
|
15
|
-
)
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
def test_chatgenai():
|
|
19
|
-
client = MagicMock(spec=Client)
|
|
20
|
-
model = ChatGenAI(client=client, model="foo", temperature=1)
|
|
21
|
-
assert model.model_name == "foo"
|
|
22
|
-
assert model.temperature == 1
|
|
23
|
-
assert model.client == client
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
def test_chatgenai_invocation():
|
|
27
|
-
client: Client = MagicMock(spec=Client)
|
|
28
|
-
client.models.generate_content_stream.return_value = iter(
|
|
29
|
-
(
|
|
30
|
-
_make_response_chunk("bar"),
|
|
31
|
-
_make_response_chunk("baz"),
|
|
32
|
-
)
|
|
33
|
-
)
|
|
34
|
-
model = ChatGenAI(client=client)
|
|
35
|
-
messages = [HumanMessage(content="foo")]
|
|
36
|
-
response = model.invoke(messages)
|
|
37
|
-
method: MagicMock = client.models.generate_content_stream
|
|
38
|
-
method.assert_called_once()
|
|
39
|
-
assert response.content == "barbaz"
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
def _make_success_iter():
|
|
43
|
-
"""Helper to create a successful streaming iterator."""
|
|
44
|
-
return iter([_make_response_chunk("success")])
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
@patch("langchain_b12.genai.genai.wait_exponential_jitter", return_value=lambda _: 0)
|
|
48
|
-
def test_chatgenai_retry_succeeds_after_failure(mock_wait):
|
|
49
|
-
"""Test that retry logic succeeds after transient failures."""
|
|
50
|
-
client: Client = MagicMock(spec=Client)
|
|
51
|
-
|
|
52
|
-
# First two calls fail, third succeeds
|
|
53
|
-
client.models.generate_content_stream.side_effect = [
|
|
54
|
-
Exception("Transient error 1"),
|
|
55
|
-
Exception("Transient error 2"),
|
|
56
|
-
_make_success_iter(),
|
|
57
|
-
]
|
|
58
|
-
|
|
59
|
-
model = ChatGenAI(client=client, max_retries=3)
|
|
60
|
-
messages = [HumanMessage(content="foo")]
|
|
61
|
-
response = model.invoke(messages)
|
|
62
|
-
|
|
63
|
-
assert response.content == "success"
|
|
64
|
-
assert client.models.generate_content_stream.call_count == 3
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
@patch("langchain_b12.genai.genai.wait_exponential_jitter", return_value=lambda _: 0)
|
|
68
|
-
def test_chatgenai_retry_exhausted_raises(mock_wait):
|
|
69
|
-
"""Test that exception is raised after all retries are exhausted."""
|
|
70
|
-
client: Client = MagicMock(spec=Client)
|
|
71
|
-
|
|
72
|
-
# All calls fail
|
|
73
|
-
client.models.generate_content_stream.side_effect = Exception("Persistent error")
|
|
74
|
-
|
|
75
|
-
model = ChatGenAI(client=client, max_retries=2)
|
|
76
|
-
messages = [HumanMessage(content="foo")]
|
|
77
|
-
|
|
78
|
-
with pytest.raises(Exception, match="Persistent error"):
|
|
79
|
-
model.invoke(messages)
|
|
80
|
-
|
|
81
|
-
# Initial attempt + 2 retries = 3 total calls
|
|
82
|
-
assert client.models.generate_content_stream.call_count == 3
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
@patch("langchain_b12.genai.genai.wait_exponential_jitter", return_value=lambda _: 0)
|
|
86
|
-
def test_chatgenai_no_retry_when_max_retries_zero(mock_wait):
|
|
87
|
-
"""Test that no retries occur when max_retries=0."""
|
|
88
|
-
client: Client = MagicMock(spec=Client)
|
|
89
|
-
client.models.generate_content_stream.side_effect = Exception("Error")
|
|
90
|
-
|
|
91
|
-
model = ChatGenAI(client=client, max_retries=0)
|
|
92
|
-
messages = [HumanMessage(content="foo")]
|
|
93
|
-
|
|
94
|
-
with pytest.raises(Exception, match="Error"):
|
|
95
|
-
model.invoke(messages)
|
|
96
|
-
|
|
97
|
-
# Only 1 attempt, no retries
|
|
98
|
-
assert client.models.generate_content_stream.call_count == 1
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
def test_chatgenai_no_retry_on_success():
|
|
102
|
-
"""Test that no retries occur when first attempt succeeds."""
|
|
103
|
-
client: Client = MagicMock(spec=Client)
|
|
104
|
-
client.models.generate_content_stream.return_value = _make_success_iter()
|
|
105
|
-
|
|
106
|
-
model = ChatGenAI(client=client, max_retries=3)
|
|
107
|
-
messages = [HumanMessage(content="foo")]
|
|
108
|
-
response = model.invoke(messages)
|
|
109
|
-
|
|
110
|
-
assert response.content == "success"
|
|
111
|
-
assert client.models.generate_content_stream.call_count == 1
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
# --- Streaming behavior tests ---
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
def test_stream_yields_chunks_immediately():
|
|
118
|
-
"""Test that stream yields chunks as they arrive, not buffered."""
|
|
119
|
-
client: Client = MagicMock(spec=Client)
|
|
120
|
-
chunks_yielded: list[str] = []
|
|
121
|
-
|
|
122
|
-
def mock_stream():
|
|
123
|
-
for text in ["chunk1", "chunk2", "chunk3"]:
|
|
124
|
-
# Track when chunks are yielded from the source
|
|
125
|
-
chunks_yielded.append(f"source:{text}")
|
|
126
|
-
yield _make_response_chunk(text)
|
|
127
|
-
|
|
128
|
-
client.models.generate_content_stream.return_value = mock_stream()
|
|
129
|
-
|
|
130
|
-
model = ChatGenAI(client=client, max_retries=3)
|
|
131
|
-
messages = [HumanMessage(content="foo")]
|
|
132
|
-
|
|
133
|
-
received: list[str] = []
|
|
134
|
-
for chunk in model.stream(messages):
|
|
135
|
-
received.append(chunk.content)
|
|
136
|
-
# After receiving each chunk, check that source yielded it
|
|
137
|
-
assert len(received) == len([c for c in chunks_yielded if c.startswith("source:")])
|
|
138
|
-
|
|
139
|
-
assert received == ["chunk1", "chunk2", "chunk3"]
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
@patch("langchain_b12.genai.genai.wait_exponential_jitter", return_value=lambda _: 0)
|
|
143
|
-
def test_stream_no_retry_after_first_chunk(mock_wait):
|
|
144
|
-
"""Test that errors after first chunk are NOT retried."""
|
|
145
|
-
client: Client = MagicMock(spec=Client)
|
|
146
|
-
|
|
147
|
-
def failing_after_first():
|
|
148
|
-
yield _make_response_chunk("first")
|
|
149
|
-
raise Exception("Mid-stream error")
|
|
150
|
-
|
|
151
|
-
client.models.generate_content_stream.return_value = failing_after_first()
|
|
152
|
-
|
|
153
|
-
model = ChatGenAI(client=client, max_retries=3)
|
|
154
|
-
messages = [HumanMessage(content="foo")]
|
|
155
|
-
|
|
156
|
-
chunks = []
|
|
157
|
-
with pytest.raises(Exception, match="Mid-stream error"):
|
|
158
|
-
for chunk in model.stream(messages):
|
|
159
|
-
chunks.append(chunk.content)
|
|
160
|
-
|
|
161
|
-
# First chunk was received
|
|
162
|
-
assert chunks == ["first"]
|
|
163
|
-
# Only one call - no retry after first chunk
|
|
164
|
-
assert client.models.generate_content_stream.call_count == 1
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
@patch("langchain_b12.genai.genai.wait_exponential_jitter", return_value=lambda _: 0)
|
|
168
|
-
def test_stream_retry_on_first_chunk_failure(mock_wait):
|
|
169
|
-
"""Test that failure on first chunk triggers retry."""
|
|
170
|
-
client: Client = MagicMock(spec=Client)
|
|
171
|
-
|
|
172
|
-
def fail_on_first_next():
|
|
173
|
-
raise Exception("First chunk error")
|
|
174
|
-
yield # Make it a generator
|
|
175
|
-
|
|
176
|
-
def success_stream():
|
|
177
|
-
yield _make_response_chunk("success1")
|
|
178
|
-
yield _make_response_chunk("success2")
|
|
179
|
-
|
|
180
|
-
client.models.generate_content_stream.side_effect = [
|
|
181
|
-
fail_on_first_next(),
|
|
182
|
-
success_stream(),
|
|
183
|
-
]
|
|
184
|
-
|
|
185
|
-
model = ChatGenAI(client=client, max_retries=3)
|
|
186
|
-
messages = [HumanMessage(content="foo")]
|
|
187
|
-
|
|
188
|
-
chunks = [chunk.content for chunk in model.stream(messages)]
|
|
189
|
-
assert chunks == ["success1", "success2"]
|
|
190
|
-
assert client.models.generate_content_stream.call_count == 2
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
# --- Async streaming tests ---
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
async def _async_iter(items):
|
|
197
|
-
"""Helper to create an async iterator from items."""
|
|
198
|
-
for item in items:
|
|
199
|
-
yield item
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
@pytest.mark.anyio
|
|
203
|
-
async def test_astream_yields_chunks_immediately():
|
|
204
|
-
"""Test that async stream yields chunks as they arrive."""
|
|
205
|
-
client: Client = MagicMock(spec=Client)
|
|
206
|
-
|
|
207
|
-
chunks = [
|
|
208
|
-
_make_response_chunk("async1"),
|
|
209
|
-
_make_response_chunk("async2"),
|
|
210
|
-
_make_response_chunk("async3"),
|
|
211
|
-
]
|
|
212
|
-
|
|
213
|
-
# generate_content_stream returns a coroutine that resolves to async iterator
|
|
214
|
-
client.aio.models.generate_content_stream = AsyncMock(
|
|
215
|
-
return_value=_async_iter(chunks)
|
|
216
|
-
)
|
|
217
|
-
|
|
218
|
-
model = ChatGenAI(client=client, max_retries=3)
|
|
219
|
-
messages = [HumanMessage(content="foo")]
|
|
220
|
-
|
|
221
|
-
received: list[str] = []
|
|
222
|
-
async for chunk in model.astream(messages):
|
|
223
|
-
received.append(chunk.content)
|
|
224
|
-
|
|
225
|
-
assert received == ["async1", "async2", "async3"]
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
@pytest.mark.anyio
|
|
229
|
-
@patch("langchain_b12.genai.genai.wait_exponential_jitter", return_value=lambda _: 0)
|
|
230
|
-
async def test_astream_no_retry_after_first_chunk(mock_wait):
|
|
231
|
-
"""Test that errors after first chunk are NOT retried in async."""
|
|
232
|
-
client: Client = MagicMock(spec=Client)
|
|
233
|
-
|
|
234
|
-
async def failing_after_first():
|
|
235
|
-
yield _make_response_chunk("first")
|
|
236
|
-
raise Exception("Async mid-stream error")
|
|
237
|
-
|
|
238
|
-
client.aio.models.generate_content_stream = AsyncMock(
|
|
239
|
-
return_value=failing_after_first()
|
|
240
|
-
)
|
|
241
|
-
|
|
242
|
-
model = ChatGenAI(client=client, max_retries=3)
|
|
243
|
-
messages = [HumanMessage(content="foo")]
|
|
244
|
-
|
|
245
|
-
chunks = []
|
|
246
|
-
with pytest.raises(Exception, match="Async mid-stream error"):
|
|
247
|
-
async for chunk in model.astream(messages):
|
|
248
|
-
chunks.append(chunk.content)
|
|
249
|
-
|
|
250
|
-
assert chunks == ["first"]
|
|
251
|
-
assert client.aio.models.generate_content_stream.call_count == 1
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
@pytest.mark.anyio
|
|
255
|
-
@patch("langchain_b12.genai.genai.wait_exponential_jitter", return_value=lambda _: 0)
|
|
256
|
-
async def test_astream_retry_succeeds_after_failure(mock_wait):
|
|
257
|
-
"""Test that async retry logic works for initial failures."""
|
|
258
|
-
client: Client = MagicMock(spec=Client)
|
|
259
|
-
|
|
260
|
-
call_count = 0
|
|
261
|
-
|
|
262
|
-
async def side_effect_fn(*args, **kwargs):
|
|
263
|
-
nonlocal call_count
|
|
264
|
-
call_count += 1
|
|
265
|
-
if call_count == 1:
|
|
266
|
-
raise Exception("Async transient error")
|
|
267
|
-
return _async_iter([_make_response_chunk("async_success")])
|
|
268
|
-
|
|
269
|
-
client.aio.models.generate_content_stream = AsyncMock(side_effect=side_effect_fn)
|
|
270
|
-
|
|
271
|
-
model = ChatGenAI(client=client, max_retries=3)
|
|
272
|
-
messages = [HumanMessage(content="foo")]
|
|
273
|
-
|
|
274
|
-
chunks = []
|
|
275
|
-
async for chunk in model.astream(messages):
|
|
276
|
-
chunks.append(chunk.content)
|
|
277
|
-
|
|
278
|
-
assert chunks == ["async_success"]
|
|
279
|
-
assert client.aio.models.generate_content_stream.call_count == 2
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|