langchain-core 0.3.75__py3-none-any.whl → 0.3.77__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-core might be problematic. Click here for more details.
- langchain_core/_api/beta_decorator.py +22 -44
- langchain_core/_api/deprecation.py +30 -17
- langchain_core/_api/path.py +19 -2
- langchain_core/_import_utils.py +7 -0
- langchain_core/agents.py +10 -6
- langchain_core/beta/runnables/context.py +1 -2
- langchain_core/callbacks/base.py +28 -15
- langchain_core/callbacks/manager.py +83 -71
- langchain_core/callbacks/usage.py +6 -4
- langchain_core/chat_history.py +29 -21
- langchain_core/document_loaders/base.py +34 -9
- langchain_core/document_loaders/langsmith.py +4 -1
- langchain_core/documents/base.py +35 -10
- langchain_core/documents/transformers.py +4 -2
- langchain_core/embeddings/fake.py +8 -5
- langchain_core/env.py +2 -3
- langchain_core/example_selectors/base.py +12 -0
- langchain_core/exceptions.py +7 -0
- langchain_core/globals.py +17 -28
- langchain_core/indexing/api.py +88 -76
- langchain_core/indexing/base.py +5 -8
- langchain_core/indexing/in_memory.py +23 -3
- langchain_core/language_models/__init__.py +3 -2
- langchain_core/language_models/base.py +31 -20
- langchain_core/language_models/chat_models.py +98 -27
- langchain_core/language_models/fake_chat_models.py +10 -9
- langchain_core/language_models/llms.py +52 -18
- langchain_core/load/dump.py +2 -3
- langchain_core/load/load.py +15 -1
- langchain_core/load/serializable.py +39 -44
- langchain_core/memory.py +7 -3
- langchain_core/messages/ai.py +53 -24
- langchain_core/messages/base.py +43 -22
- langchain_core/messages/chat.py +4 -1
- langchain_core/messages/content_blocks.py +23 -2
- langchain_core/messages/function.py +9 -5
- langchain_core/messages/human.py +13 -10
- langchain_core/messages/modifier.py +1 -0
- langchain_core/messages/system.py +11 -8
- langchain_core/messages/tool.py +60 -29
- langchain_core/messages/utils.py +250 -131
- langchain_core/output_parsers/base.py +5 -2
- langchain_core/output_parsers/json.py +4 -4
- langchain_core/output_parsers/list.py +7 -22
- langchain_core/output_parsers/openai_functions.py +3 -0
- langchain_core/output_parsers/openai_tools.py +6 -1
- langchain_core/output_parsers/pydantic.py +4 -0
- langchain_core/output_parsers/string.py +5 -1
- langchain_core/output_parsers/xml.py +19 -19
- langchain_core/outputs/chat_generation.py +25 -10
- langchain_core/outputs/generation.py +14 -3
- langchain_core/outputs/llm_result.py +8 -1
- langchain_core/prompt_values.py +16 -6
- langchain_core/prompts/base.py +4 -9
- langchain_core/prompts/chat.py +89 -57
- langchain_core/prompts/dict.py +16 -8
- langchain_core/prompts/few_shot.py +12 -11
- langchain_core/prompts/few_shot_with_templates.py +5 -1
- langchain_core/prompts/image.py +12 -5
- langchain_core/prompts/message.py +5 -6
- langchain_core/prompts/pipeline.py +13 -8
- langchain_core/prompts/prompt.py +22 -8
- langchain_core/prompts/string.py +18 -10
- langchain_core/prompts/structured.py +7 -2
- langchain_core/rate_limiters.py +2 -2
- langchain_core/retrievers.py +7 -6
- langchain_core/runnables/base.py +406 -186
- langchain_core/runnables/branch.py +14 -19
- langchain_core/runnables/config.py +9 -15
- langchain_core/runnables/configurable.py +34 -19
- langchain_core/runnables/fallbacks.py +20 -13
- langchain_core/runnables/graph.py +48 -38
- langchain_core/runnables/graph_ascii.py +41 -18
- langchain_core/runnables/graph_mermaid.py +54 -25
- langchain_core/runnables/graph_png.py +27 -31
- langchain_core/runnables/history.py +55 -58
- langchain_core/runnables/passthrough.py +44 -21
- langchain_core/runnables/retry.py +44 -23
- langchain_core/runnables/router.py +9 -8
- langchain_core/runnables/schema.py +2 -0
- langchain_core/runnables/utils.py +51 -89
- langchain_core/stores.py +19 -31
- langchain_core/sys_info.py +9 -8
- langchain_core/tools/base.py +37 -28
- langchain_core/tools/convert.py +26 -15
- langchain_core/tools/simple.py +36 -8
- langchain_core/tools/structured.py +25 -12
- langchain_core/tracers/base.py +2 -2
- langchain_core/tracers/context.py +5 -1
- langchain_core/tracers/core.py +109 -39
- langchain_core/tracers/evaluation.py +22 -26
- langchain_core/tracers/event_stream.py +45 -34
- langchain_core/tracers/langchain.py +12 -3
- langchain_core/tracers/langchain_v1.py +10 -2
- langchain_core/tracers/log_stream.py +56 -17
- langchain_core/tracers/root_listeners.py +4 -20
- langchain_core/tracers/run_collector.py +6 -16
- langchain_core/tracers/schemas.py +5 -1
- langchain_core/utils/aiter.py +15 -7
- langchain_core/utils/env.py +3 -0
- langchain_core/utils/function_calling.py +50 -28
- langchain_core/utils/interactive_env.py +6 -2
- langchain_core/utils/iter.py +12 -4
- langchain_core/utils/json.py +12 -3
- langchain_core/utils/json_schema.py +156 -40
- langchain_core/utils/loading.py +5 -1
- langchain_core/utils/mustache.py +24 -15
- langchain_core/utils/pydantic.py +38 -9
- langchain_core/utils/utils.py +25 -9
- langchain_core/vectorstores/base.py +7 -20
- langchain_core/vectorstores/in_memory.py +23 -17
- langchain_core/vectorstores/utils.py +18 -12
- langchain_core/version.py +1 -1
- langchain_core-0.3.77.dist-info/METADATA +67 -0
- langchain_core-0.3.77.dist-info/RECORD +174 -0
- langchain_core-0.3.75.dist-info/METADATA +0 -106
- langchain_core-0.3.75.dist-info/RECORD +0 -174
- {langchain_core-0.3.75.dist-info → langchain_core-0.3.77.dist-info}/WHEEL +0 -0
- {langchain_core-0.3.75.dist-info → langchain_core-0.3.77.dist-info}/entry_points.txt +0 -0
langchain_core/indexing/base.py
CHANGED
|
@@ -7,6 +7,8 @@ import time
|
|
|
7
7
|
from abc import ABC, abstractmethod
|
|
8
8
|
from typing import TYPE_CHECKING, Any, Optional, TypedDict
|
|
9
9
|
|
|
10
|
+
from typing_extensions import override
|
|
11
|
+
|
|
10
12
|
from langchain_core._api import beta
|
|
11
13
|
from langchain_core.retrievers import BaseRetriever
|
|
12
14
|
from langchain_core.runnables import run_in_executor
|
|
@@ -254,14 +256,14 @@ class InMemoryRecordManager(RecordManager):
|
|
|
254
256
|
"""In-memory schema creation is simply ensuring the structure is initialized."""
|
|
255
257
|
|
|
256
258
|
async def acreate_schema(self) -> None:
|
|
257
|
-
"""
|
|
259
|
+
"""In-memory schema creation is simply ensuring the structure is initialized."""
|
|
258
260
|
|
|
261
|
+
@override
|
|
259
262
|
def get_time(self) -> float:
|
|
260
|
-
"""Get the current server time as a high resolution timestamp!"""
|
|
261
263
|
return time.time()
|
|
262
264
|
|
|
265
|
+
@override
|
|
263
266
|
async def aget_time(self) -> float:
|
|
264
|
-
"""Async get the current server time as a high resolution timestamp!"""
|
|
265
267
|
return self.get_time()
|
|
266
268
|
|
|
267
269
|
def update(
|
|
@@ -322,11 +324,6 @@ class InMemoryRecordManager(RecordManager):
|
|
|
322
324
|
raise an error.
|
|
323
325
|
This is meant to help prevent time-drift issues since
|
|
324
326
|
time may not be monotonically increasing!
|
|
325
|
-
|
|
326
|
-
Raises:
|
|
327
|
-
ValueError: If the length of keys doesn't match the length of group
|
|
328
|
-
ids.
|
|
329
|
-
ValueError: If time_at_least is in the future.
|
|
330
327
|
"""
|
|
331
328
|
self.update(keys, group_ids=group_ids, time_at_least=time_at_least)
|
|
332
329
|
|
|
@@ -32,7 +32,17 @@ class InMemoryDocumentIndex(DocumentIndex):
|
|
|
32
32
|
|
|
33
33
|
@override
|
|
34
34
|
def upsert(self, items: Sequence[Document], /, **kwargs: Any) -> UpsertResponse:
|
|
35
|
-
"""Upsert
|
|
35
|
+
"""Upsert documents into the index.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
items: Sequence of documents to add to the index.
|
|
39
|
+
**kwargs: Additional keyword arguments.
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
A response object that contains the list of IDs that were
|
|
43
|
+
successfully added or updated in the index and the list of IDs that
|
|
44
|
+
failed to be added or updated.
|
|
45
|
+
"""
|
|
36
46
|
ok_ids = []
|
|
37
47
|
|
|
38
48
|
for item in items:
|
|
@@ -51,7 +61,18 @@ class InMemoryDocumentIndex(DocumentIndex):
|
|
|
51
61
|
|
|
52
62
|
@override
|
|
53
63
|
def delete(self, ids: Optional[list[str]] = None, **kwargs: Any) -> DeleteResponse:
|
|
54
|
-
"""Delete by
|
|
64
|
+
"""Delete by IDs.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
ids: List of ids to delete.
|
|
68
|
+
|
|
69
|
+
Raises:
|
|
70
|
+
ValueError: If ids is None.
|
|
71
|
+
|
|
72
|
+
Returns:
|
|
73
|
+
A response object that contains the list of IDs that were successfully
|
|
74
|
+
deleted and the list of IDs that failed to be deleted.
|
|
75
|
+
"""
|
|
55
76
|
if ids is None:
|
|
56
77
|
msg = "IDs must be provided for deletion"
|
|
57
78
|
raise ValueError(msg)
|
|
@@ -69,7 +90,6 @@ class InMemoryDocumentIndex(DocumentIndex):
|
|
|
69
90
|
|
|
70
91
|
@override
|
|
71
92
|
def get(self, ids: Sequence[str], /, **kwargs: Any) -> list[Document]:
|
|
72
|
-
"""Get by ids."""
|
|
73
93
|
return [self.store[id_] for id_ in ids if id_ in self.store]
|
|
74
94
|
|
|
75
95
|
@override
|
|
@@ -26,7 +26,8 @@ https://python.langchain.com/docs/how_to/custom_chat_model/
|
|
|
26
26
|
**LLMs**
|
|
27
27
|
|
|
28
28
|
Language models that takes a string as input and returns a string.
|
|
29
|
-
These are traditionally older models (newer models generally are Chat Models,
|
|
29
|
+
These are traditionally older models (newer models generally are Chat Models,
|
|
30
|
+
see below).
|
|
30
31
|
|
|
31
32
|
Although the underlying models are string in, string out, the LangChain wrappers
|
|
32
33
|
also allow these models to take messages as input. This gives them the same interface
|
|
@@ -39,7 +40,7 @@ Please see the following guide for more information on how to implement a custom
|
|
|
39
40
|
https://python.langchain.com/docs/how_to/custom_llm/
|
|
40
41
|
|
|
41
42
|
|
|
42
|
-
"""
|
|
43
|
+
"""
|
|
43
44
|
|
|
44
45
|
from typing import TYPE_CHECKING
|
|
45
46
|
|
|
@@ -22,19 +22,31 @@ from typing_extensions import TypeAlias, TypedDict, override
|
|
|
22
22
|
from langchain_core._api import deprecated
|
|
23
23
|
from langchain_core.caches import BaseCache
|
|
24
24
|
from langchain_core.callbacks import Callbacks
|
|
25
|
+
from langchain_core.globals import get_verbose
|
|
25
26
|
from langchain_core.messages import (
|
|
26
27
|
AnyMessage,
|
|
27
28
|
BaseMessage,
|
|
28
29
|
MessageLikeRepresentation,
|
|
29
30
|
get_buffer_string,
|
|
30
31
|
)
|
|
31
|
-
from langchain_core.prompt_values import
|
|
32
|
+
from langchain_core.prompt_values import (
|
|
33
|
+
ChatPromptValueConcrete,
|
|
34
|
+
PromptValue,
|
|
35
|
+
StringPromptValue,
|
|
36
|
+
)
|
|
32
37
|
from langchain_core.runnables import Runnable, RunnableSerializable
|
|
33
38
|
from langchain_core.utils import get_pydantic_field_names
|
|
34
39
|
|
|
35
40
|
if TYPE_CHECKING:
|
|
36
41
|
from langchain_core.outputs import LLMResult
|
|
37
42
|
|
|
43
|
+
try:
|
|
44
|
+
from transformers import GPT2TokenizerFast # type: ignore[import-not-found]
|
|
45
|
+
|
|
46
|
+
_HAS_TRANSFORMERS = True
|
|
47
|
+
except ImportError:
|
|
48
|
+
_HAS_TRANSFORMERS = False
|
|
49
|
+
|
|
38
50
|
|
|
39
51
|
class LangSmithParams(TypedDict, total=False):
|
|
40
52
|
"""LangSmith parameters for tracing."""
|
|
@@ -59,16 +71,20 @@ def get_tokenizer() -> Any:
|
|
|
59
71
|
|
|
60
72
|
This function is cached to avoid re-loading the tokenizer every time it is called.
|
|
61
73
|
|
|
74
|
+
Raises:
|
|
75
|
+
ImportError: If the transformers package is not installed.
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
The GPT-2 tokenizer instance.
|
|
79
|
+
|
|
62
80
|
"""
|
|
63
|
-
|
|
64
|
-
from transformers import GPT2TokenizerFast # type: ignore[import-not-found]
|
|
65
|
-
except ImportError as e:
|
|
81
|
+
if not _HAS_TRANSFORMERS:
|
|
66
82
|
msg = (
|
|
67
83
|
"Could not import transformers python package. "
|
|
68
84
|
"This is needed in order to calculate get_token_ids. "
|
|
69
85
|
"Please install it with `pip install transformers`."
|
|
70
86
|
)
|
|
71
|
-
raise ImportError(msg)
|
|
87
|
+
raise ImportError(msg)
|
|
72
88
|
# create a GPT-2 tokenizer instance
|
|
73
89
|
return GPT2TokenizerFast.from_pretrained("gpt2")
|
|
74
90
|
|
|
@@ -89,8 +105,6 @@ LanguageModelOutputVar = TypeVar("LanguageModelOutputVar", BaseMessage, str)
|
|
|
89
105
|
|
|
90
106
|
|
|
91
107
|
def _get_verbosity() -> bool:
|
|
92
|
-
from langchain_core.globals import get_verbose
|
|
93
|
-
|
|
94
108
|
return get_verbose()
|
|
95
109
|
|
|
96
110
|
|
|
@@ -152,11 +166,6 @@ class BaseLanguageModel(
|
|
|
152
166
|
@override
|
|
153
167
|
def InputType(self) -> TypeAlias:
|
|
154
168
|
"""Get the input type for this runnable."""
|
|
155
|
-
from langchain_core.prompt_values import (
|
|
156
|
-
ChatPromptValueConcrete,
|
|
157
|
-
StringPromptValue,
|
|
158
|
-
)
|
|
159
|
-
|
|
160
169
|
# This is a version of LanguageModelInput which replaces the abstract
|
|
161
170
|
# base class BaseMessage with a union of its subclasses, which makes
|
|
162
171
|
# for a much better schema.
|
|
@@ -180,10 +189,11 @@ class BaseLanguageModel(
|
|
|
180
189
|
API.
|
|
181
190
|
|
|
182
191
|
Use this method when you want to:
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
192
|
+
|
|
193
|
+
1. Take advantage of batched calls,
|
|
194
|
+
2. Need more output from the model than just the top generated value,
|
|
195
|
+
3. Are building chains that are agnostic to the underlying language model
|
|
196
|
+
type (e.g., pure text completion models vs chat models).
|
|
187
197
|
|
|
188
198
|
Args:
|
|
189
199
|
prompts: List of PromptValues. A PromptValue is an object that can be
|
|
@@ -216,10 +226,11 @@ class BaseLanguageModel(
|
|
|
216
226
|
API.
|
|
217
227
|
|
|
218
228
|
Use this method when you want to:
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
229
|
+
|
|
230
|
+
1. Take advantage of batched calls,
|
|
231
|
+
2. Need more output from the model than just the top generated value,
|
|
232
|
+
3. Are building chains that are agnostic to the underlying language model
|
|
233
|
+
type (e.g., pure text completion models vs chat models).
|
|
223
234
|
|
|
224
235
|
Args:
|
|
225
236
|
prompts: List of PromptValues. A PromptValue is an object that can be
|
|
@@ -46,6 +46,10 @@ from langchain_core.messages import (
|
|
|
46
46
|
message_chunk_to_message,
|
|
47
47
|
)
|
|
48
48
|
from langchain_core.messages.ai import _LC_ID_PREFIX
|
|
49
|
+
from langchain_core.output_parsers.openai_tools import (
|
|
50
|
+
JsonOutputKeyToolsParser,
|
|
51
|
+
PydanticToolsParser,
|
|
52
|
+
)
|
|
49
53
|
from langchain_core.outputs import (
|
|
50
54
|
ChatGeneration,
|
|
51
55
|
ChatGenerationChunk,
|
|
@@ -148,8 +152,6 @@ def _format_for_tracing(messages: list[BaseMessage]) -> list[BaseMessage]:
|
|
|
148
152
|
"type": key,
|
|
149
153
|
key: block[key],
|
|
150
154
|
}
|
|
151
|
-
else:
|
|
152
|
-
pass
|
|
153
155
|
messages_to_trace.append(message_to_trace)
|
|
154
156
|
|
|
155
157
|
return messages_to_trace
|
|
@@ -161,6 +163,9 @@ def generate_from_stream(stream: Iterator[ChatGenerationChunk]) -> ChatResult:
|
|
|
161
163
|
Args:
|
|
162
164
|
stream: Iterator of ``ChatGenerationChunk``.
|
|
163
165
|
|
|
166
|
+
Raises:
|
|
167
|
+
ValueError: If no generations are found in the stream.
|
|
168
|
+
|
|
164
169
|
Returns:
|
|
165
170
|
ChatResult: Chat result.
|
|
166
171
|
|
|
@@ -328,7 +333,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|
|
328
333
|
@model_validator(mode="before")
|
|
329
334
|
@classmethod
|
|
330
335
|
def raise_deprecation(cls, values: dict) -> Any:
|
|
331
|
-
"""
|
|
336
|
+
"""Emit deprecation warning if ``callback_manager`` is used.
|
|
332
337
|
|
|
333
338
|
Args:
|
|
334
339
|
values (Dict): Values to validate.
|
|
@@ -336,9 +341,6 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|
|
336
341
|
Returns:
|
|
337
342
|
Dict: Validated values.
|
|
338
343
|
|
|
339
|
-
Raises:
|
|
340
|
-
DeprecationWarning: If ``callback_manager`` is used.
|
|
341
|
-
|
|
342
344
|
"""
|
|
343
345
|
if values.get("callback_manager") is not None:
|
|
344
346
|
warnings.warn(
|
|
@@ -469,7 +471,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|
|
469
471
|
**kwargs: Any,
|
|
470
472
|
) -> Iterator[BaseMessageChunk]:
|
|
471
473
|
if not self._should_stream(async_api=False, **{**kwargs, "stream": True}):
|
|
472
|
-
#
|
|
474
|
+
# Model doesn't implement streaming, so use default implementation
|
|
473
475
|
yield cast(
|
|
474
476
|
"BaseMessageChunk",
|
|
475
477
|
self.invoke(input, config=config, stop=stop, **kwargs),
|
|
@@ -718,7 +720,9 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|
|
718
720
|
ls_params["ls_stop"] = stop
|
|
719
721
|
|
|
720
722
|
# model
|
|
721
|
-
if
|
|
723
|
+
if "model" in kwargs and isinstance(kwargs["model"], str):
|
|
724
|
+
ls_params["ls_model_name"] = kwargs["model"]
|
|
725
|
+
elif hasattr(self, "model") and isinstance(self.model, str):
|
|
722
726
|
ls_params["ls_model_name"] = self.model
|
|
723
727
|
elif hasattr(self, "model_name") and isinstance(self.model_name, str):
|
|
724
728
|
ls_params["ls_model_name"] = self.model_name
|
|
@@ -769,10 +773,11 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|
|
769
773
|
API.
|
|
770
774
|
|
|
771
775
|
Use this method when you want to:
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
+
|
|
777
|
+
1. Take advantage of batched calls,
|
|
778
|
+
2. Need more output from the model than just the top generated value,
|
|
779
|
+
3. Are building chains that are agnostic to the underlying language model
|
|
780
|
+
type (e.g., pure text completion models vs chat models).
|
|
776
781
|
|
|
777
782
|
Args:
|
|
778
783
|
messages: List of list of messages.
|
|
@@ -884,10 +889,11 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|
|
884
889
|
API.
|
|
885
890
|
|
|
886
891
|
Use this method when you want to:
|
|
887
|
-
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
|
|
892
|
+
|
|
893
|
+
1. Take advantage of batched calls,
|
|
894
|
+
2. Need more output from the model than just the top generated value,
|
|
895
|
+
3. Are building chains that are agnostic to the underlying language model
|
|
896
|
+
type (e.g., pure text completion models vs chat models).
|
|
891
897
|
|
|
892
898
|
Args:
|
|
893
899
|
messages: List of list of messages.
|
|
@@ -1185,7 +1191,17 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|
|
1185
1191
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
1186
1192
|
**kwargs: Any,
|
|
1187
1193
|
) -> ChatResult:
|
|
1188
|
-
"""
|
|
1194
|
+
"""Generate the result.
|
|
1195
|
+
|
|
1196
|
+
Args:
|
|
1197
|
+
messages: The messages to generate from.
|
|
1198
|
+
stop: Optional list of stop words to use when generating.
|
|
1199
|
+
run_manager: Optional callback manager to use for this call.
|
|
1200
|
+
**kwargs: Additional keyword arguments to pass to the model.
|
|
1201
|
+
|
|
1202
|
+
Returns:
|
|
1203
|
+
The chat result.
|
|
1204
|
+
"""
|
|
1189
1205
|
|
|
1190
1206
|
async def _agenerate(
|
|
1191
1207
|
self,
|
|
@@ -1194,7 +1210,17 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|
|
1194
1210
|
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
|
1195
1211
|
**kwargs: Any,
|
|
1196
1212
|
) -> ChatResult:
|
|
1197
|
-
"""
|
|
1213
|
+
"""Generate the result.
|
|
1214
|
+
|
|
1215
|
+
Args:
|
|
1216
|
+
messages: The messages to generate from.
|
|
1217
|
+
stop: Optional list of stop words to use when generating.
|
|
1218
|
+
run_manager: Optional callback manager to use for this call.
|
|
1219
|
+
**kwargs: Additional keyword arguments to pass to the model.
|
|
1220
|
+
|
|
1221
|
+
Returns:
|
|
1222
|
+
The chat result.
|
|
1223
|
+
"""
|
|
1198
1224
|
return await run_in_executor(
|
|
1199
1225
|
None,
|
|
1200
1226
|
self._generate,
|
|
@@ -1211,6 +1237,17 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|
|
1211
1237
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
1212
1238
|
**kwargs: Any,
|
|
1213
1239
|
) -> Iterator[ChatGenerationChunk]:
|
|
1240
|
+
"""Stream the output of the model.
|
|
1241
|
+
|
|
1242
|
+
Args:
|
|
1243
|
+
messages: The messages to generate from.
|
|
1244
|
+
stop: Optional list of stop words to use when generating.
|
|
1245
|
+
run_manager: Optional callback manager to use for this call.
|
|
1246
|
+
**kwargs: Additional keyword arguments to pass to the model.
|
|
1247
|
+
|
|
1248
|
+
Yields:
|
|
1249
|
+
The chat generation chunks.
|
|
1250
|
+
"""
|
|
1214
1251
|
raise NotImplementedError
|
|
1215
1252
|
|
|
1216
1253
|
async def _astream(
|
|
@@ -1220,6 +1257,17 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|
|
1220
1257
|
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
|
1221
1258
|
**kwargs: Any,
|
|
1222
1259
|
) -> AsyncIterator[ChatGenerationChunk]:
|
|
1260
|
+
"""Stream the output of the model.
|
|
1261
|
+
|
|
1262
|
+
Args:
|
|
1263
|
+
messages: The messages to generate from.
|
|
1264
|
+
stop: Optional list of stop words to use when generating.
|
|
1265
|
+
run_manager: Optional callback manager to use for this call.
|
|
1266
|
+
**kwargs: Additional keyword arguments to pass to the model.
|
|
1267
|
+
|
|
1268
|
+
Yields:
|
|
1269
|
+
The chat generation chunks.
|
|
1270
|
+
"""
|
|
1223
1271
|
iterator = await run_in_executor(
|
|
1224
1272
|
None,
|
|
1225
1273
|
self._stream,
|
|
@@ -1259,6 +1307,9 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|
|
1259
1307
|
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
|
1260
1308
|
to the model provider API call.
|
|
1261
1309
|
|
|
1310
|
+
Raises:
|
|
1311
|
+
ValueError: If the generation is not a chat generation.
|
|
1312
|
+
|
|
1262
1313
|
Returns:
|
|
1263
1314
|
The model output message.
|
|
1264
1315
|
|
|
@@ -1320,6 +1371,9 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|
|
1320
1371
|
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
|
1321
1372
|
to the model provider API call.
|
|
1322
1373
|
|
|
1374
|
+
Raises:
|
|
1375
|
+
ValueError: If the output is not a string.
|
|
1376
|
+
|
|
1323
1377
|
Returns:
|
|
1324
1378
|
The predicted output string.
|
|
1325
1379
|
|
|
@@ -1434,6 +1488,11 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|
|
1434
1488
|
will be caught and returned as well. The final output is always a dict
|
|
1435
1489
|
with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``.
|
|
1436
1490
|
|
|
1491
|
+
Raises:
|
|
1492
|
+
ValueError: If there are any unsupported ``kwargs``.
|
|
1493
|
+
NotImplementedError: If the model does not implement
|
|
1494
|
+
``with_structured_output()``.
|
|
1495
|
+
|
|
1437
1496
|
Returns:
|
|
1438
1497
|
A Runnable that takes same inputs as a :class:`langchain_core.language_models.chat.BaseChatModel`.
|
|
1439
1498
|
|
|
@@ -1453,15 +1512,20 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|
|
1453
1512
|
|
|
1454
1513
|
from pydantic import BaseModel
|
|
1455
1514
|
|
|
1515
|
+
|
|
1456
1516
|
class AnswerWithJustification(BaseModel):
|
|
1457
1517
|
'''An answer to the user question along with justification for the answer.'''
|
|
1518
|
+
|
|
1458
1519
|
answer: str
|
|
1459
1520
|
justification: str
|
|
1460
1521
|
|
|
1522
|
+
|
|
1461
1523
|
llm = ChatModel(model="model-name", temperature=0)
|
|
1462
1524
|
structured_llm = llm.with_structured_output(AnswerWithJustification)
|
|
1463
1525
|
|
|
1464
|
-
structured_llm.invoke(
|
|
1526
|
+
structured_llm.invoke(
|
|
1527
|
+
"What weighs more a pound of bricks or a pound of feathers"
|
|
1528
|
+
)
|
|
1465
1529
|
|
|
1466
1530
|
# -> AnswerWithJustification(
|
|
1467
1531
|
# answer='They weigh the same',
|
|
@@ -1473,15 +1537,22 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|
|
1473
1537
|
|
|
1474
1538
|
from pydantic import BaseModel
|
|
1475
1539
|
|
|
1540
|
+
|
|
1476
1541
|
class AnswerWithJustification(BaseModel):
|
|
1477
1542
|
'''An answer to the user question along with justification for the answer.'''
|
|
1543
|
+
|
|
1478
1544
|
answer: str
|
|
1479
1545
|
justification: str
|
|
1480
1546
|
|
|
1547
|
+
|
|
1481
1548
|
llm = ChatModel(model="model-name", temperature=0)
|
|
1482
|
-
structured_llm = llm.with_structured_output(
|
|
1549
|
+
structured_llm = llm.with_structured_output(
|
|
1550
|
+
AnswerWithJustification, include_raw=True
|
|
1551
|
+
)
|
|
1483
1552
|
|
|
1484
|
-
structured_llm.invoke(
|
|
1553
|
+
structured_llm.invoke(
|
|
1554
|
+
"What weighs more a pound of bricks or a pound of feathers"
|
|
1555
|
+
)
|
|
1485
1556
|
# -> {
|
|
1486
1557
|
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
|
|
1487
1558
|
# 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
|
|
@@ -1494,16 +1565,21 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|
|
1494
1565
|
from pydantic import BaseModel
|
|
1495
1566
|
from langchain_core.utils.function_calling import convert_to_openai_tool
|
|
1496
1567
|
|
|
1568
|
+
|
|
1497
1569
|
class AnswerWithJustification(BaseModel):
|
|
1498
1570
|
'''An answer to the user question along with justification for the answer.'''
|
|
1571
|
+
|
|
1499
1572
|
answer: str
|
|
1500
1573
|
justification: str
|
|
1501
1574
|
|
|
1575
|
+
|
|
1502
1576
|
dict_schema = convert_to_openai_tool(AnswerWithJustification)
|
|
1503
1577
|
llm = ChatModel(model="model-name", temperature=0)
|
|
1504
1578
|
structured_llm = llm.with_structured_output(dict_schema)
|
|
1505
1579
|
|
|
1506
|
-
structured_llm.invoke(
|
|
1580
|
+
structured_llm.invoke(
|
|
1581
|
+
"What weighs more a pound of bricks or a pound of feathers"
|
|
1582
|
+
)
|
|
1507
1583
|
# -> {
|
|
1508
1584
|
# 'answer': 'They weigh the same',
|
|
1509
1585
|
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
|
|
@@ -1520,11 +1596,6 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|
|
1520
1596
|
msg = f"Received unsupported arguments {kwargs}"
|
|
1521
1597
|
raise ValueError(msg)
|
|
1522
1598
|
|
|
1523
|
-
from langchain_core.output_parsers.openai_tools import (
|
|
1524
|
-
JsonOutputKeyToolsParser,
|
|
1525
|
-
PydanticToolsParser,
|
|
1526
|
-
)
|
|
1527
|
-
|
|
1528
1599
|
if type(self).bind_tools is BaseChatModel.bind_tools:
|
|
1529
1600
|
msg = "with_structured_output is not implemented for this model."
|
|
1530
1601
|
raise NotImplementedError(msg)
|
|
@@ -19,7 +19,7 @@ from langchain_core.runnables import RunnableConfig
|
|
|
19
19
|
|
|
20
20
|
|
|
21
21
|
class FakeMessagesListChatModel(BaseChatModel):
|
|
22
|
-
"""Fake ChatModel for testing purposes."""
|
|
22
|
+
"""Fake ``ChatModel`` for testing purposes."""
|
|
23
23
|
|
|
24
24
|
responses: list[BaseMessage]
|
|
25
25
|
"""List of responses to **cycle** through in order."""
|
|
@@ -75,12 +75,13 @@ class FakeListChatModel(SimpleChatModel):
|
|
|
75
75
|
@override
|
|
76
76
|
def _call(
|
|
77
77
|
self,
|
|
78
|
-
|
|
79
|
-
stop: Optional[list[str]] = None,
|
|
80
|
-
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
78
|
+
*args: Any,
|
|
81
79
|
**kwargs: Any,
|
|
82
80
|
) -> str:
|
|
83
|
-
"""
|
|
81
|
+
"""Return the next response in the list.
|
|
82
|
+
|
|
83
|
+
Cycle back to the start if at the end.
|
|
84
|
+
"""
|
|
84
85
|
if self.sleep is not None:
|
|
85
86
|
time.sleep(self.sleep)
|
|
86
87
|
response = self.responses[self.i]
|
|
@@ -211,10 +212,11 @@ class GenericFakeChatModel(BaseChatModel):
|
|
|
211
212
|
"""Generic fake chat model that can be used to test the chat model interface.
|
|
212
213
|
|
|
213
214
|
* Chat model should be usable in both sync and async tests
|
|
214
|
-
* Invokes on_llm_new_token to allow for testing of callback related code for new
|
|
215
|
+
* Invokes ``on_llm_new_token`` to allow for testing of callback related code for new
|
|
215
216
|
tokens.
|
|
216
217
|
* Includes logic to break messages into message chunk to facilitate testing of
|
|
217
218
|
streaming.
|
|
219
|
+
|
|
218
220
|
"""
|
|
219
221
|
|
|
220
222
|
messages: Iterator[Union[AIMessage, str]]
|
|
@@ -229,6 +231,7 @@ class GenericFakeChatModel(BaseChatModel):
|
|
|
229
231
|
.. warning::
|
|
230
232
|
Streaming is not implemented yet. We should try to implement it in the future by
|
|
231
233
|
delegating to invoke and then breaking the resulting output into message chunks.
|
|
234
|
+
|
|
232
235
|
"""
|
|
233
236
|
|
|
234
237
|
@override
|
|
@@ -239,7 +242,6 @@ class GenericFakeChatModel(BaseChatModel):
|
|
|
239
242
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
240
243
|
**kwargs: Any,
|
|
241
244
|
) -> ChatResult:
|
|
242
|
-
"""Top Level call."""
|
|
243
245
|
message = next(self.messages)
|
|
244
246
|
message_ = AIMessage(content=message) if isinstance(message, str) else message
|
|
245
247
|
generation = ChatGeneration(message=message_)
|
|
@@ -252,7 +254,6 @@ class GenericFakeChatModel(BaseChatModel):
|
|
|
252
254
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
253
255
|
**kwargs: Any,
|
|
254
256
|
) -> Iterator[ChatGenerationChunk]:
|
|
255
|
-
"""Stream the output of the model."""
|
|
256
257
|
chat_result = self._generate(
|
|
257
258
|
messages, stop=stop, run_manager=run_manager, **kwargs
|
|
258
259
|
)
|
|
@@ -352,6 +353,7 @@ class ParrotFakeChatModel(BaseChatModel):
|
|
|
352
353
|
"""Generic fake chat model that can be used to test the chat model interface.
|
|
353
354
|
|
|
354
355
|
* Chat model should be usable in both sync and async tests
|
|
356
|
+
|
|
355
357
|
"""
|
|
356
358
|
|
|
357
359
|
@override
|
|
@@ -362,7 +364,6 @@ class ParrotFakeChatModel(BaseChatModel):
|
|
|
362
364
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
363
365
|
**kwargs: Any,
|
|
364
366
|
) -> ChatResult:
|
|
365
|
-
"""Top Level call."""
|
|
366
367
|
return ChatResult(generations=[ChatGeneration(message=messages[-1])])
|
|
367
368
|
|
|
368
369
|
@property
|