langchain-core 0.4.0.dev0__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-core might be problematic. Click here for more details.
- langchain_core/__init__.py +1 -1
- langchain_core/_api/__init__.py +3 -4
- langchain_core/_api/beta_decorator.py +45 -70
- langchain_core/_api/deprecation.py +80 -80
- langchain_core/_api/path.py +22 -8
- langchain_core/_import_utils.py +10 -4
- langchain_core/agents.py +25 -21
- langchain_core/caches.py +53 -63
- langchain_core/callbacks/__init__.py +1 -8
- langchain_core/callbacks/base.py +341 -348
- langchain_core/callbacks/file.py +55 -44
- langchain_core/callbacks/manager.py +546 -683
- langchain_core/callbacks/stdout.py +29 -30
- langchain_core/callbacks/streaming_stdout.py +35 -36
- langchain_core/callbacks/usage.py +65 -70
- langchain_core/chat_history.py +48 -55
- langchain_core/document_loaders/base.py +46 -21
- langchain_core/document_loaders/langsmith.py +39 -36
- langchain_core/documents/__init__.py +0 -1
- langchain_core/documents/base.py +96 -74
- langchain_core/documents/compressor.py +12 -9
- langchain_core/documents/transformers.py +29 -28
- langchain_core/embeddings/fake.py +56 -57
- langchain_core/env.py +2 -3
- langchain_core/example_selectors/base.py +12 -0
- langchain_core/example_selectors/length_based.py +1 -1
- langchain_core/example_selectors/semantic_similarity.py +21 -25
- langchain_core/exceptions.py +15 -9
- langchain_core/globals.py +4 -163
- langchain_core/indexing/api.py +132 -125
- langchain_core/indexing/base.py +64 -67
- langchain_core/indexing/in_memory.py +26 -6
- langchain_core/language_models/__init__.py +15 -27
- langchain_core/language_models/_utils.py +267 -117
- langchain_core/language_models/base.py +92 -177
- langchain_core/language_models/chat_models.py +547 -407
- langchain_core/language_models/fake.py +11 -11
- langchain_core/language_models/fake_chat_models.py +72 -118
- langchain_core/language_models/llms.py +168 -242
- langchain_core/load/dump.py +8 -11
- langchain_core/load/load.py +32 -28
- langchain_core/load/mapping.py +2 -4
- langchain_core/load/serializable.py +50 -56
- langchain_core/messages/__init__.py +36 -51
- langchain_core/messages/ai.py +377 -150
- langchain_core/messages/base.py +239 -47
- langchain_core/messages/block_translators/__init__.py +111 -0
- langchain_core/messages/block_translators/anthropic.py +470 -0
- langchain_core/messages/block_translators/bedrock.py +94 -0
- langchain_core/messages/block_translators/bedrock_converse.py +297 -0
- langchain_core/messages/block_translators/google_genai.py +530 -0
- langchain_core/messages/block_translators/google_vertexai.py +21 -0
- langchain_core/messages/block_translators/groq.py +143 -0
- langchain_core/messages/block_translators/langchain_v0.py +301 -0
- langchain_core/messages/block_translators/openai.py +1010 -0
- langchain_core/messages/chat.py +2 -3
- langchain_core/messages/content.py +1423 -0
- langchain_core/messages/function.py +7 -7
- langchain_core/messages/human.py +44 -38
- langchain_core/messages/modifier.py +3 -2
- langchain_core/messages/system.py +40 -27
- langchain_core/messages/tool.py +160 -58
- langchain_core/messages/utils.py +527 -638
- langchain_core/output_parsers/__init__.py +1 -14
- langchain_core/output_parsers/base.py +68 -104
- langchain_core/output_parsers/json.py +13 -17
- langchain_core/output_parsers/list.py +11 -33
- langchain_core/output_parsers/openai_functions.py +56 -74
- langchain_core/output_parsers/openai_tools.py +68 -109
- langchain_core/output_parsers/pydantic.py +15 -13
- langchain_core/output_parsers/string.py +6 -2
- langchain_core/output_parsers/transform.py +17 -60
- langchain_core/output_parsers/xml.py +34 -44
- langchain_core/outputs/__init__.py +1 -1
- langchain_core/outputs/chat_generation.py +26 -11
- langchain_core/outputs/chat_result.py +1 -3
- langchain_core/outputs/generation.py +17 -6
- langchain_core/outputs/llm_result.py +15 -8
- langchain_core/prompt_values.py +29 -123
- langchain_core/prompts/__init__.py +3 -27
- langchain_core/prompts/base.py +48 -63
- langchain_core/prompts/chat.py +259 -288
- langchain_core/prompts/dict.py +19 -11
- langchain_core/prompts/few_shot.py +84 -90
- langchain_core/prompts/few_shot_with_templates.py +14 -12
- langchain_core/prompts/image.py +19 -14
- langchain_core/prompts/loading.py +6 -8
- langchain_core/prompts/message.py +7 -8
- langchain_core/prompts/prompt.py +42 -43
- langchain_core/prompts/string.py +37 -16
- langchain_core/prompts/structured.py +43 -46
- langchain_core/rate_limiters.py +51 -60
- langchain_core/retrievers.py +52 -192
- langchain_core/runnables/base.py +1727 -1683
- langchain_core/runnables/branch.py +52 -73
- langchain_core/runnables/config.py +89 -103
- langchain_core/runnables/configurable.py +128 -130
- langchain_core/runnables/fallbacks.py +93 -82
- langchain_core/runnables/graph.py +127 -127
- langchain_core/runnables/graph_ascii.py +63 -41
- langchain_core/runnables/graph_mermaid.py +87 -70
- langchain_core/runnables/graph_png.py +31 -36
- langchain_core/runnables/history.py +145 -161
- langchain_core/runnables/passthrough.py +141 -144
- langchain_core/runnables/retry.py +84 -68
- langchain_core/runnables/router.py +33 -37
- langchain_core/runnables/schema.py +79 -72
- langchain_core/runnables/utils.py +95 -139
- langchain_core/stores.py +85 -131
- langchain_core/structured_query.py +11 -15
- langchain_core/sys_info.py +31 -32
- langchain_core/tools/__init__.py +1 -14
- langchain_core/tools/base.py +221 -247
- langchain_core/tools/convert.py +144 -161
- langchain_core/tools/render.py +10 -10
- langchain_core/tools/retriever.py +12 -19
- langchain_core/tools/simple.py +52 -29
- langchain_core/tools/structured.py +56 -60
- langchain_core/tracers/__init__.py +1 -9
- langchain_core/tracers/_streaming.py +6 -7
- langchain_core/tracers/base.py +103 -112
- langchain_core/tracers/context.py +29 -48
- langchain_core/tracers/core.py +142 -105
- langchain_core/tracers/evaluation.py +30 -34
- langchain_core/tracers/event_stream.py +162 -117
- langchain_core/tracers/langchain.py +34 -36
- langchain_core/tracers/log_stream.py +87 -49
- langchain_core/tracers/memory_stream.py +3 -3
- langchain_core/tracers/root_listeners.py +18 -34
- langchain_core/tracers/run_collector.py +8 -20
- langchain_core/tracers/schemas.py +0 -125
- langchain_core/tracers/stdout.py +3 -3
- langchain_core/utils/__init__.py +1 -4
- langchain_core/utils/_merge.py +47 -9
- langchain_core/utils/aiter.py +70 -66
- langchain_core/utils/env.py +12 -9
- langchain_core/utils/function_calling.py +139 -206
- langchain_core/utils/html.py +7 -8
- langchain_core/utils/input.py +6 -6
- langchain_core/utils/interactive_env.py +6 -2
- langchain_core/utils/iter.py +48 -45
- langchain_core/utils/json.py +14 -4
- langchain_core/utils/json_schema.py +159 -43
- langchain_core/utils/mustache.py +32 -25
- langchain_core/utils/pydantic.py +67 -40
- langchain_core/utils/strings.py +5 -5
- langchain_core/utils/usage.py +1 -1
- langchain_core/utils/utils.py +104 -62
- langchain_core/vectorstores/base.py +131 -179
- langchain_core/vectorstores/in_memory.py +113 -182
- langchain_core/vectorstores/utils.py +23 -17
- langchain_core/version.py +1 -1
- langchain_core-1.0.0.dist-info/METADATA +68 -0
- langchain_core-1.0.0.dist-info/RECORD +172 -0
- {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0.dist-info}/WHEEL +1 -1
- langchain_core/beta/__init__.py +0 -1
- langchain_core/beta/runnables/__init__.py +0 -1
- langchain_core/beta/runnables/context.py +0 -448
- langchain_core/memory.py +0 -116
- langchain_core/messages/content_blocks.py +0 -1435
- langchain_core/prompts/pipeline.py +0 -133
- langchain_core/pydantic_v1/__init__.py +0 -30
- langchain_core/pydantic_v1/dataclasses.py +0 -23
- langchain_core/pydantic_v1/main.py +0 -23
- langchain_core/tracers/langchain_v1.py +0 -23
- langchain_core/utils/loading.py +0 -31
- langchain_core/v1/__init__.py +0 -1
- langchain_core/v1/chat_models.py +0 -1047
- langchain_core/v1/messages.py +0 -755
- langchain_core-0.4.0.dev0.dist-info/METADATA +0 -108
- langchain_core-0.4.0.dev0.dist-info/RECORD +0 -177
- langchain_core-0.4.0.dev0.dist-info/entry_points.txt +0 -4
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
import asyncio
|
|
4
4
|
import time
|
|
5
5
|
from collections.abc import AsyncIterator, Iterator, Mapping
|
|
6
|
-
from typing import Any
|
|
6
|
+
from typing import Any
|
|
7
7
|
|
|
8
8
|
from typing_extensions import override
|
|
9
9
|
|
|
@@ -23,7 +23,7 @@ class FakeListLLM(LLM):
|
|
|
23
23
|
"""List of responses to return in order."""
|
|
24
24
|
# This parameter should be removed from FakeListLLM since
|
|
25
25
|
# it's only used by sub-classes.
|
|
26
|
-
sleep:
|
|
26
|
+
sleep: float | None = None
|
|
27
27
|
"""Sleep time in seconds between responses.
|
|
28
28
|
|
|
29
29
|
Ignored by FakeListLLM, but used by sub-classes.
|
|
@@ -44,8 +44,8 @@ class FakeListLLM(LLM):
|
|
|
44
44
|
def _call(
|
|
45
45
|
self,
|
|
46
46
|
prompt: str,
|
|
47
|
-
stop:
|
|
48
|
-
run_manager:
|
|
47
|
+
stop: list[str] | None = None,
|
|
48
|
+
run_manager: CallbackManagerForLLMRun | None = None,
|
|
49
49
|
**kwargs: Any,
|
|
50
50
|
) -> str:
|
|
51
51
|
"""Return next response."""
|
|
@@ -60,8 +60,8 @@ class FakeListLLM(LLM):
|
|
|
60
60
|
async def _acall(
|
|
61
61
|
self,
|
|
62
62
|
prompt: str,
|
|
63
|
-
stop:
|
|
64
|
-
run_manager:
|
|
63
|
+
stop: list[str] | None = None,
|
|
64
|
+
run_manager: AsyncCallbackManagerForLLMRun | None = None,
|
|
65
65
|
**kwargs: Any,
|
|
66
66
|
) -> str:
|
|
67
67
|
"""Return next response."""
|
|
@@ -91,16 +91,16 @@ class FakeStreamingListLLM(FakeListLLM):
|
|
|
91
91
|
chunks in a streaming implementation.
|
|
92
92
|
"""
|
|
93
93
|
|
|
94
|
-
error_on_chunk_number:
|
|
94
|
+
error_on_chunk_number: int | None = None
|
|
95
95
|
"""If set, will raise an exception on the specified chunk number."""
|
|
96
96
|
|
|
97
97
|
@override
|
|
98
98
|
def stream(
|
|
99
99
|
self,
|
|
100
100
|
input: LanguageModelInput,
|
|
101
|
-
config:
|
|
101
|
+
config: RunnableConfig | None = None,
|
|
102
102
|
*,
|
|
103
|
-
stop:
|
|
103
|
+
stop: list[str] | None = None,
|
|
104
104
|
**kwargs: Any,
|
|
105
105
|
) -> Iterator[str]:
|
|
106
106
|
result = self.invoke(input, config)
|
|
@@ -119,9 +119,9 @@ class FakeStreamingListLLM(FakeListLLM):
|
|
|
119
119
|
async def astream(
|
|
120
120
|
self,
|
|
121
121
|
input: LanguageModelInput,
|
|
122
|
-
config:
|
|
122
|
+
config: RunnableConfig | None = None,
|
|
123
123
|
*,
|
|
124
|
-
stop:
|
|
124
|
+
stop: list[str] | None = None,
|
|
125
125
|
**kwargs: Any,
|
|
126
126
|
) -> AsyncIterator[str]:
|
|
127
127
|
result = await self.ainvoke(input, config)
|
|
@@ -1,10 +1,10 @@
|
|
|
1
|
-
"""Fake
|
|
1
|
+
"""Fake chat model for testing purposes."""
|
|
2
2
|
|
|
3
3
|
import asyncio
|
|
4
4
|
import re
|
|
5
5
|
import time
|
|
6
|
-
from collections.abc import AsyncIterator,
|
|
7
|
-
from typing import Any,
|
|
6
|
+
from collections.abc import AsyncIterator, Iterator
|
|
7
|
+
from typing import Any, Literal, cast
|
|
8
8
|
|
|
9
9
|
from typing_extensions import override
|
|
10
10
|
|
|
@@ -16,18 +16,14 @@ from langchain_core.language_models.chat_models import BaseChatModel, SimpleChat
|
|
|
16
16
|
from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage
|
|
17
17
|
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
|
|
18
18
|
from langchain_core.runnables import RunnableConfig
|
|
19
|
-
from langchain_core.v1.chat_models import BaseChatModel as BaseChatModelV1
|
|
20
|
-
from langchain_core.v1.messages import AIMessage as AIMessageV1
|
|
21
|
-
from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1
|
|
22
|
-
from langchain_core.v1.messages import MessageV1
|
|
23
19
|
|
|
24
20
|
|
|
25
21
|
class FakeMessagesListChatModel(BaseChatModel):
|
|
26
|
-
"""Fake
|
|
22
|
+
"""Fake chat model for testing purposes."""
|
|
27
23
|
|
|
28
24
|
responses: list[BaseMessage]
|
|
29
25
|
"""List of responses to **cycle** through in order."""
|
|
30
|
-
sleep:
|
|
26
|
+
sleep: float | None = None
|
|
31
27
|
"""Sleep time in seconds between responses."""
|
|
32
28
|
i: int = 0
|
|
33
29
|
"""Internally incremented after every model invocation."""
|
|
@@ -36,8 +32,8 @@ class FakeMessagesListChatModel(BaseChatModel):
|
|
|
36
32
|
def _generate(
|
|
37
33
|
self,
|
|
38
34
|
messages: list[BaseMessage],
|
|
39
|
-
stop:
|
|
40
|
-
run_manager:
|
|
35
|
+
stop: list[str] | None = None,
|
|
36
|
+
run_manager: CallbackManagerForLLMRun | None = None,
|
|
41
37
|
**kwargs: Any,
|
|
42
38
|
) -> ChatResult:
|
|
43
39
|
if self.sleep is not None:
|
|
@@ -61,14 +57,14 @@ class FakeListChatModelError(Exception):
|
|
|
61
57
|
|
|
62
58
|
|
|
63
59
|
class FakeListChatModel(SimpleChatModel):
|
|
64
|
-
"""Fake
|
|
60
|
+
"""Fake chat model for testing purposes."""
|
|
65
61
|
|
|
66
62
|
responses: list[str]
|
|
67
63
|
"""List of responses to **cycle** through in order."""
|
|
68
|
-
sleep:
|
|
64
|
+
sleep: float | None = None
|
|
69
65
|
i: int = 0
|
|
70
66
|
"""Internally incremented after every model invocation."""
|
|
71
|
-
error_on_chunk_number:
|
|
67
|
+
error_on_chunk_number: int | None = None
|
|
72
68
|
"""If set, raise an error on the specified chunk number during streaming."""
|
|
73
69
|
|
|
74
70
|
@property
|
|
@@ -79,12 +75,13 @@ class FakeListChatModel(SimpleChatModel):
|
|
|
79
75
|
@override
|
|
80
76
|
def _call(
|
|
81
77
|
self,
|
|
82
|
-
|
|
83
|
-
stop: Optional[list[str]] = None,
|
|
84
|
-
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
78
|
+
*args: Any,
|
|
85
79
|
**kwargs: Any,
|
|
86
80
|
) -> str:
|
|
87
|
-
"""
|
|
81
|
+
"""Return the next response in the list.
|
|
82
|
+
|
|
83
|
+
Cycle back to the start if at the end.
|
|
84
|
+
"""
|
|
88
85
|
if self.sleep is not None:
|
|
89
86
|
time.sleep(self.sleep)
|
|
90
87
|
response = self.responses[self.i]
|
|
@@ -98,8 +95,8 @@ class FakeListChatModel(SimpleChatModel):
|
|
|
98
95
|
def _stream(
|
|
99
96
|
self,
|
|
100
97
|
messages: list[BaseMessage],
|
|
101
|
-
stop:
|
|
102
|
-
run_manager:
|
|
98
|
+
stop: list[str] | None = None,
|
|
99
|
+
run_manager: CallbackManagerForLLMRun | None = None,
|
|
103
100
|
**kwargs: Any,
|
|
104
101
|
) -> Iterator[ChatGenerationChunk]:
|
|
105
102
|
response = self.responses[self.i]
|
|
@@ -116,14 +113,19 @@ class FakeListChatModel(SimpleChatModel):
|
|
|
116
113
|
):
|
|
117
114
|
raise FakeListChatModelError
|
|
118
115
|
|
|
119
|
-
|
|
116
|
+
chunk_position: Literal["last"] | None = (
|
|
117
|
+
"last" if i_c == len(response) - 1 else None
|
|
118
|
+
)
|
|
119
|
+
yield ChatGenerationChunk(
|
|
120
|
+
message=AIMessageChunk(content=c, chunk_position=chunk_position)
|
|
121
|
+
)
|
|
120
122
|
|
|
121
123
|
@override
|
|
122
124
|
async def _astream(
|
|
123
125
|
self,
|
|
124
126
|
messages: list[BaseMessage],
|
|
125
|
-
stop:
|
|
126
|
-
run_manager:
|
|
127
|
+
stop: list[str] | None = None,
|
|
128
|
+
run_manager: AsyncCallbackManagerForLLMRun | None = None,
|
|
127
129
|
**kwargs: Any,
|
|
128
130
|
) -> AsyncIterator[ChatGenerationChunk]:
|
|
129
131
|
response = self.responses[self.i]
|
|
@@ -139,7 +141,12 @@ class FakeListChatModel(SimpleChatModel):
|
|
|
139
141
|
and i_c == self.error_on_chunk_number
|
|
140
142
|
):
|
|
141
143
|
raise FakeListChatModelError
|
|
142
|
-
|
|
144
|
+
chunk_position: Literal["last"] | None = (
|
|
145
|
+
"last" if i_c == len(response) - 1 else None
|
|
146
|
+
)
|
|
147
|
+
yield ChatGenerationChunk(
|
|
148
|
+
message=AIMessageChunk(content=c, chunk_position=chunk_position)
|
|
149
|
+
)
|
|
143
150
|
|
|
144
151
|
@property
|
|
145
152
|
@override
|
|
@@ -151,27 +158,33 @@ class FakeListChatModel(SimpleChatModel):
|
|
|
151
158
|
def batch(
|
|
152
159
|
self,
|
|
153
160
|
inputs: list[Any],
|
|
154
|
-
config:
|
|
161
|
+
config: RunnableConfig | list[RunnableConfig] | None = None,
|
|
155
162
|
*,
|
|
156
163
|
return_exceptions: bool = False,
|
|
157
164
|
**kwargs: Any,
|
|
158
|
-
) -> list[
|
|
165
|
+
) -> list[AIMessage]:
|
|
159
166
|
if isinstance(config, list):
|
|
160
|
-
return [
|
|
167
|
+
return [
|
|
168
|
+
self.invoke(m, c, **kwargs)
|
|
169
|
+
for m, c in zip(inputs, config, strict=False)
|
|
170
|
+
]
|
|
161
171
|
return [self.invoke(m, config, **kwargs) for m in inputs]
|
|
162
172
|
|
|
163
173
|
@override
|
|
164
174
|
async def abatch(
|
|
165
175
|
self,
|
|
166
176
|
inputs: list[Any],
|
|
167
|
-
config:
|
|
177
|
+
config: RunnableConfig | list[RunnableConfig] | None = None,
|
|
168
178
|
*,
|
|
169
179
|
return_exceptions: bool = False,
|
|
170
180
|
**kwargs: Any,
|
|
171
|
-
) -> list[
|
|
181
|
+
) -> list[AIMessage]:
|
|
172
182
|
if isinstance(config, list):
|
|
173
183
|
# do Not use an async iterator here because need explicit ordering
|
|
174
|
-
return [
|
|
184
|
+
return [
|
|
185
|
+
await self.ainvoke(m, c, **kwargs)
|
|
186
|
+
for m, c in zip(inputs, config, strict=False)
|
|
187
|
+
]
|
|
175
188
|
# do Not use an async iterator here because need explicit ordering
|
|
176
189
|
return [await self.ainvoke(m, config, **kwargs) for m in inputs]
|
|
177
190
|
|
|
@@ -183,8 +196,8 @@ class FakeChatModel(SimpleChatModel):
|
|
|
183
196
|
def _call(
|
|
184
197
|
self,
|
|
185
198
|
messages: list[BaseMessage],
|
|
186
|
-
stop:
|
|
187
|
-
run_manager:
|
|
199
|
+
stop: list[str] | None = None,
|
|
200
|
+
run_manager: CallbackManagerForLLMRun | None = None,
|
|
188
201
|
**kwargs: Any,
|
|
189
202
|
) -> str:
|
|
190
203
|
return "fake response"
|
|
@@ -193,8 +206,8 @@ class FakeChatModel(SimpleChatModel):
|
|
|
193
206
|
async def _agenerate(
|
|
194
207
|
self,
|
|
195
208
|
messages: list[BaseMessage],
|
|
196
|
-
stop:
|
|
197
|
-
run_manager:
|
|
209
|
+
stop: list[str] | None = None,
|
|
210
|
+
run_manager: AsyncCallbackManagerForLLMRun | None = None,
|
|
198
211
|
**kwargs: Any,
|
|
199
212
|
) -> ChatResult:
|
|
200
213
|
output_str = "fake response"
|
|
@@ -215,34 +228,36 @@ class GenericFakeChatModel(BaseChatModel):
|
|
|
215
228
|
"""Generic fake chat model that can be used to test the chat model interface.
|
|
216
229
|
|
|
217
230
|
* Chat model should be usable in both sync and async tests
|
|
218
|
-
* Invokes on_llm_new_token to allow for testing of callback related code for new
|
|
219
|
-
|
|
231
|
+
* Invokes `on_llm_new_token` to allow for testing of callback related code for new
|
|
232
|
+
tokens.
|
|
220
233
|
* Includes logic to break messages into message chunk to facilitate testing of
|
|
221
|
-
|
|
234
|
+
streaming.
|
|
235
|
+
|
|
222
236
|
"""
|
|
223
237
|
|
|
224
|
-
messages: Iterator[
|
|
238
|
+
messages: Iterator[AIMessage | str]
|
|
225
239
|
"""Get an iterator over messages.
|
|
226
240
|
|
|
227
241
|
This can be expanded to accept other types like Callables / dicts / strings
|
|
228
242
|
to make the interface more generic if needed.
|
|
229
243
|
|
|
230
|
-
|
|
244
|
+
!!! note
|
|
245
|
+
if you want to pass a list, you can use `iter` to convert it to an iterator.
|
|
246
|
+
|
|
247
|
+
!!! warning
|
|
248
|
+
Streaming is not implemented yet. We should try to implement it in the future by
|
|
249
|
+
delegating to invoke and then breaking the resulting output into message chunks.
|
|
231
250
|
|
|
232
|
-
Please note that streaming is not implemented yet. We should try to implement it
|
|
233
|
-
in the future by delegating to invoke and then breaking the resulting output
|
|
234
|
-
into message chunks.
|
|
235
251
|
"""
|
|
236
252
|
|
|
237
253
|
@override
|
|
238
254
|
def _generate(
|
|
239
255
|
self,
|
|
240
256
|
messages: list[BaseMessage],
|
|
241
|
-
stop:
|
|
242
|
-
run_manager:
|
|
257
|
+
stop: list[str] | None = None,
|
|
258
|
+
run_manager: CallbackManagerForLLMRun | None = None,
|
|
243
259
|
**kwargs: Any,
|
|
244
260
|
) -> ChatResult:
|
|
245
|
-
"""Top Level call."""
|
|
246
261
|
message = next(self.messages)
|
|
247
262
|
message_ = AIMessage(content=message) if isinstance(message, str) else message
|
|
248
263
|
generation = ChatGeneration(message=message_)
|
|
@@ -251,11 +266,10 @@ class GenericFakeChatModel(BaseChatModel):
|
|
|
251
266
|
def _stream(
|
|
252
267
|
self,
|
|
253
268
|
messages: list[BaseMessage],
|
|
254
|
-
stop:
|
|
255
|
-
run_manager:
|
|
269
|
+
stop: list[str] | None = None,
|
|
270
|
+
run_manager: CallbackManagerForLLMRun | None = None,
|
|
256
271
|
**kwargs: Any,
|
|
257
272
|
) -> Iterator[ChatGenerationChunk]:
|
|
258
|
-
"""Stream the output of the model."""
|
|
259
273
|
chat_result = self._generate(
|
|
260
274
|
messages, stop=stop, run_manager=run_manager, **kwargs
|
|
261
275
|
)
|
|
@@ -286,10 +300,16 @@ class GenericFakeChatModel(BaseChatModel):
|
|
|
286
300
|
|
|
287
301
|
content_chunks = cast("list[str]", re.split(r"(\s)", content))
|
|
288
302
|
|
|
289
|
-
for token in content_chunks:
|
|
303
|
+
for idx, token in enumerate(content_chunks):
|
|
290
304
|
chunk = ChatGenerationChunk(
|
|
291
305
|
message=AIMessageChunk(content=token, id=message.id)
|
|
292
306
|
)
|
|
307
|
+
if (
|
|
308
|
+
idx == len(content_chunks) - 1
|
|
309
|
+
and isinstance(chunk.message, AIMessageChunk)
|
|
310
|
+
and not message.additional_kwargs
|
|
311
|
+
):
|
|
312
|
+
chunk.message.chunk_position = "last"
|
|
293
313
|
if run_manager:
|
|
294
314
|
run_manager.on_llm_new_token(token, chunk=chunk)
|
|
295
315
|
yield chunk
|
|
@@ -355,85 +375,19 @@ class ParrotFakeChatModel(BaseChatModel):
|
|
|
355
375
|
"""Generic fake chat model that can be used to test the chat model interface.
|
|
356
376
|
|
|
357
377
|
* Chat model should be usable in both sync and async tests
|
|
378
|
+
|
|
358
379
|
"""
|
|
359
380
|
|
|
360
381
|
@override
|
|
361
382
|
def _generate(
|
|
362
383
|
self,
|
|
363
384
|
messages: list[BaseMessage],
|
|
364
|
-
stop:
|
|
365
|
-
run_manager:
|
|
385
|
+
stop: list[str] | None = None,
|
|
386
|
+
run_manager: CallbackManagerForLLMRun | None = None,
|
|
366
387
|
**kwargs: Any,
|
|
367
388
|
) -> ChatResult:
|
|
368
|
-
"""Top Level call."""
|
|
369
389
|
return ChatResult(generations=[ChatGeneration(message=messages[-1])])
|
|
370
390
|
|
|
371
391
|
@property
|
|
372
392
|
def _llm_type(self) -> str:
|
|
373
393
|
return "parrot-fake-chat-model"
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
class GenericFakeChatModelV1(BaseChatModelV1):
|
|
377
|
-
"""Generic fake chat model that can be used to test the chat model interface."""
|
|
378
|
-
|
|
379
|
-
messages: Optional[Iterator[Union[AIMessageV1, str]]] = None
|
|
380
|
-
message_chunks: Optional[Iterable[Union[AIMessageChunkV1, str]]] = None
|
|
381
|
-
|
|
382
|
-
@override
|
|
383
|
-
def _invoke(
|
|
384
|
-
self,
|
|
385
|
-
messages: list[MessageV1],
|
|
386
|
-
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
387
|
-
**kwargs: Any,
|
|
388
|
-
) -> AIMessageV1:
|
|
389
|
-
"""Top Level call."""
|
|
390
|
-
if self.messages is None:
|
|
391
|
-
error_msg = "Messages iterator is not set."
|
|
392
|
-
raise ValueError(error_msg)
|
|
393
|
-
message = next(self.messages)
|
|
394
|
-
return AIMessageV1(content=message) if isinstance(message, str) else message
|
|
395
|
-
|
|
396
|
-
@override
|
|
397
|
-
def _stream(
|
|
398
|
-
self,
|
|
399
|
-
messages: list[MessageV1],
|
|
400
|
-
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
401
|
-
**kwargs: Any,
|
|
402
|
-
) -> Iterator[AIMessageChunkV1]:
|
|
403
|
-
"""Top Level call."""
|
|
404
|
-
if self.message_chunks is None:
|
|
405
|
-
error_msg = "Message chunks iterator is not set."
|
|
406
|
-
raise ValueError(error_msg)
|
|
407
|
-
for chunk in self.message_chunks:
|
|
408
|
-
if isinstance(chunk, str):
|
|
409
|
-
yield AIMessageChunkV1(chunk)
|
|
410
|
-
else:
|
|
411
|
-
yield chunk
|
|
412
|
-
|
|
413
|
-
@property
|
|
414
|
-
def _llm_type(self) -> str:
|
|
415
|
-
return "generic-fake-chat-model"
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
class ParrotFakeChatModelV1(BaseChatModelV1):
|
|
419
|
-
"""Generic fake chat model that can be used to test the chat model interface.
|
|
420
|
-
|
|
421
|
-
* Chat model should be usable in both sync and async tests
|
|
422
|
-
"""
|
|
423
|
-
|
|
424
|
-
@override
|
|
425
|
-
def _invoke(
|
|
426
|
-
self,
|
|
427
|
-
messages: list[MessageV1],
|
|
428
|
-
stop: Optional[list[str]] = None,
|
|
429
|
-
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
430
|
-
**kwargs: Any,
|
|
431
|
-
) -> AIMessageV1:
|
|
432
|
-
"""Top Level call."""
|
|
433
|
-
if isinstance(messages[-1], AIMessageV1):
|
|
434
|
-
return messages[-1]
|
|
435
|
-
return AIMessageV1(content=messages[-1].content)
|
|
436
|
-
|
|
437
|
-
@property
|
|
438
|
-
def _llm_type(self) -> str:
|
|
439
|
-
return "parrot-fake-chat-model"
|