langchain-core 0.3.74__py3-none-any.whl → 0.3.76__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-core might be problematic. Click here for more details.
- langchain_core/_api/beta_decorator.py +18 -41
- langchain_core/_api/deprecation.py +20 -7
- langchain_core/_api/path.py +19 -2
- langchain_core/_import_utils.py +7 -0
- langchain_core/agents.py +10 -6
- langchain_core/beta/runnables/context.py +2 -3
- langchain_core/callbacks/base.py +11 -4
- langchain_core/callbacks/file.py +13 -2
- langchain_core/callbacks/manager.py +129 -78
- langchain_core/callbacks/usage.py +4 -2
- langchain_core/chat_history.py +10 -12
- langchain_core/document_loaders/base.py +34 -9
- langchain_core/document_loaders/langsmith.py +3 -0
- langchain_core/documents/base.py +36 -11
- langchain_core/documents/compressor.py +9 -6
- langchain_core/documents/transformers.py +4 -2
- langchain_core/embeddings/fake.py +8 -5
- langchain_core/env.py +2 -3
- langchain_core/example_selectors/base.py +12 -0
- langchain_core/exceptions.py +7 -0
- langchain_core/globals.py +17 -28
- langchain_core/indexing/api.py +56 -44
- langchain_core/indexing/base.py +7 -10
- langchain_core/indexing/in_memory.py +23 -3
- langchain_core/language_models/__init__.py +3 -2
- langchain_core/language_models/base.py +64 -39
- langchain_core/language_models/chat_models.py +130 -42
- langchain_core/language_models/fake_chat_models.py +10 -11
- langchain_core/language_models/llms.py +49 -17
- langchain_core/load/dump.py +5 -7
- langchain_core/load/load.py +15 -1
- langchain_core/load/serializable.py +38 -43
- langchain_core/memory.py +7 -3
- langchain_core/messages/ai.py +36 -16
- langchain_core/messages/base.py +13 -6
- langchain_core/messages/content_blocks.py +23 -2
- langchain_core/messages/human.py +2 -6
- langchain_core/messages/modifier.py +1 -1
- langchain_core/messages/system.py +2 -6
- langchain_core/messages/tool.py +36 -16
- langchain_core/messages/utils.py +198 -87
- langchain_core/output_parsers/base.py +5 -2
- langchain_core/output_parsers/json.py +4 -4
- langchain_core/output_parsers/list.py +7 -22
- langchain_core/output_parsers/openai_functions.py +3 -0
- langchain_core/output_parsers/openai_tools.py +8 -1
- langchain_core/output_parsers/pydantic.py +4 -0
- langchain_core/output_parsers/string.py +5 -1
- langchain_core/output_parsers/transform.py +2 -2
- langchain_core/output_parsers/xml.py +23 -22
- langchain_core/outputs/chat_generation.py +18 -7
- langchain_core/outputs/generation.py +14 -3
- langchain_core/outputs/llm_result.py +8 -1
- langchain_core/prompt_values.py +10 -4
- langchain_core/prompts/base.py +4 -9
- langchain_core/prompts/chat.py +88 -61
- langchain_core/prompts/dict.py +16 -8
- langchain_core/prompts/few_shot.py +9 -11
- langchain_core/prompts/few_shot_with_templates.py +5 -1
- langchain_core/prompts/image.py +12 -5
- langchain_core/prompts/message.py +5 -6
- langchain_core/prompts/pipeline.py +13 -8
- langchain_core/prompts/prompt.py +22 -8
- langchain_core/prompts/string.py +18 -10
- langchain_core/prompts/structured.py +7 -2
- langchain_core/rate_limiters.py +2 -2
- langchain_core/retrievers.py +7 -6
- langchain_core/runnables/base.py +842 -567
- langchain_core/runnables/branch.py +15 -20
- langchain_core/runnables/config.py +11 -17
- langchain_core/runnables/configurable.py +34 -19
- langchain_core/runnables/fallbacks.py +24 -17
- langchain_core/runnables/graph.py +47 -40
- langchain_core/runnables/graph_ascii.py +40 -17
- langchain_core/runnables/graph_mermaid.py +27 -15
- langchain_core/runnables/graph_png.py +27 -31
- langchain_core/runnables/history.py +56 -59
- langchain_core/runnables/passthrough.py +47 -24
- langchain_core/runnables/retry.py +10 -6
- langchain_core/runnables/router.py +10 -9
- langchain_core/runnables/schema.py +2 -0
- langchain_core/runnables/utils.py +51 -89
- langchain_core/stores.py +13 -25
- langchain_core/structured_query.py +3 -7
- langchain_core/sys_info.py +9 -8
- langchain_core/tools/base.py +30 -23
- langchain_core/tools/convert.py +24 -13
- langchain_core/tools/simple.py +35 -3
- langchain_core/tools/structured.py +26 -3
- langchain_core/tracers/_streaming.py +6 -7
- langchain_core/tracers/base.py +2 -2
- langchain_core/tracers/context.py +5 -1
- langchain_core/tracers/core.py +109 -39
- langchain_core/tracers/evaluation.py +22 -26
- langchain_core/tracers/event_stream.py +41 -28
- langchain_core/tracers/langchain.py +12 -3
- langchain_core/tracers/langchain_v1.py +10 -2
- langchain_core/tracers/log_stream.py +57 -18
- langchain_core/tracers/root_listeners.py +4 -20
- langchain_core/tracers/run_collector.py +6 -16
- langchain_core/tracers/schemas.py +5 -1
- langchain_core/utils/aiter.py +14 -6
- langchain_core/utils/env.py +3 -0
- langchain_core/utils/function_calling.py +49 -30
- langchain_core/utils/interactive_env.py +6 -2
- langchain_core/utils/iter.py +11 -3
- langchain_core/utils/json.py +5 -2
- langchain_core/utils/json_schema.py +15 -5
- langchain_core/utils/loading.py +5 -1
- langchain_core/utils/mustache.py +24 -15
- langchain_core/utils/pydantic.py +32 -4
- langchain_core/utils/utils.py +24 -8
- langchain_core/vectorstores/base.py +7 -20
- langchain_core/vectorstores/in_memory.py +18 -12
- langchain_core/vectorstores/utils.py +18 -12
- langchain_core/version.py +1 -1
- langchain_core-0.3.76.dist-info/METADATA +77 -0
- langchain_core-0.3.76.dist-info/RECORD +174 -0
- langchain_core-0.3.74.dist-info/METADATA +0 -108
- langchain_core-0.3.74.dist-info/RECORD +0 -174
- {langchain_core-0.3.74.dist-info → langchain_core-0.3.76.dist-info}/WHEEL +0 -0
- {langchain_core-0.3.74.dist-info → langchain_core-0.3.76.dist-info}/entry_points.txt +0 -0
|
@@ -231,6 +231,9 @@ class JsonOutputKeyToolsParser(JsonOutputToolsParser):
|
|
|
231
231
|
If False, the output will be the full JSON object.
|
|
232
232
|
Default is False.
|
|
233
233
|
|
|
234
|
+
Raises:
|
|
235
|
+
OutputParserException: If the generation is not a chat generation.
|
|
236
|
+
|
|
234
237
|
Returns:
|
|
235
238
|
The parsed tool calls.
|
|
236
239
|
"""
|
|
@@ -246,6 +249,8 @@ class JsonOutputKeyToolsParser(JsonOutputToolsParser):
|
|
|
246
249
|
_ = tool_call.pop("id")
|
|
247
250
|
else:
|
|
248
251
|
try:
|
|
252
|
+
# This exists purely for backward compatibility / cached messages
|
|
253
|
+
# All new messages should use `message.tool_calls`
|
|
249
254
|
raw_tool_calls = copy.deepcopy(message.additional_kwargs["tool_calls"])
|
|
250
255
|
except KeyError:
|
|
251
256
|
if self.first_tool_only:
|
|
@@ -314,7 +319,9 @@ class PydanticToolsParser(JsonOutputToolsParser):
|
|
|
314
319
|
The parsed Pydantic objects.
|
|
315
320
|
|
|
316
321
|
Raises:
|
|
317
|
-
|
|
322
|
+
ValueError: If the tool call arguments are not a dict.
|
|
323
|
+
ValidationError: If the tool call arguments do not conform
|
|
324
|
+
to the Pydantic model.
|
|
318
325
|
"""
|
|
319
326
|
json_results = super().parse_result(result, partial=partial)
|
|
320
327
|
if not json_results:
|
|
@@ -54,6 +54,10 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
|
|
54
54
|
all the keys that have been returned so far.
|
|
55
55
|
Defaults to False.
|
|
56
56
|
|
|
57
|
+
Raises:
|
|
58
|
+
OutputParserException: If the result is not valid JSON
|
|
59
|
+
or does not conform to the pydantic model.
|
|
60
|
+
|
|
57
61
|
Returns:
|
|
58
62
|
The parsed pydantic object.
|
|
59
63
|
"""
|
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
"""String output parser."""
|
|
2
2
|
|
|
3
|
+
from typing_extensions import override
|
|
4
|
+
|
|
3
5
|
from langchain_core.output_parsers.transform import BaseTransformOutputParser
|
|
4
6
|
|
|
5
7
|
|
|
@@ -19,7 +21,8 @@ class StrOutputParser(BaseTransformOutputParser[str]):
|
|
|
19
21
|
def get_lc_namespace(cls) -> list[str]:
|
|
20
22
|
"""Get the namespace of the langchain object.
|
|
21
23
|
|
|
22
|
-
|
|
24
|
+
Returns:
|
|
25
|
+
``["langchain", "schema", "output_parser"]``
|
|
23
26
|
"""
|
|
24
27
|
return ["langchain", "schema", "output_parser"]
|
|
25
28
|
|
|
@@ -28,6 +31,7 @@ class StrOutputParser(BaseTransformOutputParser[str]):
|
|
|
28
31
|
"""Return the output parser type for serialization."""
|
|
29
32
|
return "default"
|
|
30
33
|
|
|
34
|
+
@override
|
|
31
35
|
def parse(self, text: str) -> str:
|
|
32
36
|
"""Returns the input text with no changes."""
|
|
33
37
|
return text
|
|
@@ -32,7 +32,7 @@ class BaseTransformOutputParser(BaseOutputParser[T]):
|
|
|
32
32
|
|
|
33
33
|
def _transform(
|
|
34
34
|
self,
|
|
35
|
-
input: Iterator[Union[str, BaseMessage]],
|
|
35
|
+
input: Iterator[Union[str, BaseMessage]],
|
|
36
36
|
) -> Iterator[T]:
|
|
37
37
|
for chunk in input:
|
|
38
38
|
if isinstance(chunk, BaseMessage):
|
|
@@ -42,7 +42,7 @@ class BaseTransformOutputParser(BaseOutputParser[T]):
|
|
|
42
42
|
|
|
43
43
|
async def _atransform(
|
|
44
44
|
self,
|
|
45
|
-
input: AsyncIterator[Union[str, BaseMessage]],
|
|
45
|
+
input: AsyncIterator[Union[str, BaseMessage]],
|
|
46
46
|
) -> AsyncIterator[T]:
|
|
47
47
|
async for chunk in input:
|
|
48
48
|
if isinstance(chunk, BaseMessage):
|
|
@@ -15,6 +15,14 @@ from langchain_core.messages import BaseMessage
|
|
|
15
15
|
from langchain_core.output_parsers.transform import BaseTransformOutputParser
|
|
16
16
|
from langchain_core.runnables.utils import AddableDict
|
|
17
17
|
|
|
18
|
+
try:
|
|
19
|
+
from defusedxml import ElementTree # type: ignore[import-untyped]
|
|
20
|
+
from defusedxml.ElementTree import XMLParser # type: ignore[import-untyped]
|
|
21
|
+
|
|
22
|
+
_HAS_DEFUSEDXML = True
|
|
23
|
+
except ImportError:
|
|
24
|
+
_HAS_DEFUSEDXML = False
|
|
25
|
+
|
|
18
26
|
XML_FORMAT_INSTRUCTIONS = """The output should be formatted as a XML file.
|
|
19
27
|
1. Output should conform to the tags below.
|
|
20
28
|
2. If tags are not given, make them on your own.
|
|
@@ -50,17 +58,13 @@ class _StreamingParser:
|
|
|
50
58
|
parser is requested.
|
|
51
59
|
"""
|
|
52
60
|
if parser == "defusedxml":
|
|
53
|
-
|
|
54
|
-
from defusedxml.ElementTree import ( # type: ignore[import-untyped]
|
|
55
|
-
XMLParser,
|
|
56
|
-
)
|
|
57
|
-
except ImportError as e:
|
|
61
|
+
if not _HAS_DEFUSEDXML:
|
|
58
62
|
msg = (
|
|
59
63
|
"defusedxml is not installed. "
|
|
60
64
|
"Please install it to use the defusedxml parser."
|
|
61
65
|
"You can install it with `pip install defusedxml` "
|
|
62
66
|
)
|
|
63
|
-
raise ImportError(msg)
|
|
67
|
+
raise ImportError(msg)
|
|
64
68
|
parser_ = XMLParser(target=TreeBuilder())
|
|
65
69
|
else:
|
|
66
70
|
parser_ = None
|
|
@@ -105,10 +109,11 @@ class _StreamingParser:
|
|
|
105
109
|
self.buffer = ""
|
|
106
110
|
# yield all events
|
|
107
111
|
try:
|
|
108
|
-
|
|
112
|
+
events = self.pull_parser.read_events()
|
|
113
|
+
for event, elem in events: # type: ignore[misc]
|
|
109
114
|
if event == "start":
|
|
110
115
|
# update current path
|
|
111
|
-
self.current_path.append(elem.tag)
|
|
116
|
+
self.current_path.append(elem.tag) # type: ignore[union-attr]
|
|
112
117
|
self.current_path_has_children = False
|
|
113
118
|
elif event == "end":
|
|
114
119
|
# remove last element from current path
|
|
@@ -116,7 +121,7 @@ class _StreamingParser:
|
|
|
116
121
|
self.current_path.pop()
|
|
117
122
|
# yield element
|
|
118
123
|
if not self.current_path_has_children:
|
|
119
|
-
yield nested_element(self.current_path, elem)
|
|
124
|
+
yield nested_element(self.current_path, elem) # type: ignore[arg-type]
|
|
120
125
|
# prevent yielding of parent element
|
|
121
126
|
if self.current_path:
|
|
122
127
|
self.current_path_has_children = True
|
|
@@ -135,9 +140,6 @@ class _StreamingParser:
|
|
|
135
140
|
"""Close the parser.
|
|
136
141
|
|
|
137
142
|
This should be called after all chunks have been parsed.
|
|
138
|
-
|
|
139
|
-
Raises:
|
|
140
|
-
xml.etree.ElementTree.ParseError: If the XML is not well-formed.
|
|
141
143
|
"""
|
|
142
144
|
# Ignore ParseError. This will ignore any incomplete XML at the end of the input
|
|
143
145
|
with contextlib.suppress(xml.etree.ElementTree.ParseError):
|
|
@@ -153,14 +155,15 @@ class XMLOutputParser(BaseTransformOutputParser):
|
|
|
153
155
|
Note this may not be perfect depending on the LLM implementation.
|
|
154
156
|
|
|
155
157
|
For example, with tags=["foo", "bar", "baz"]:
|
|
156
|
-
1. A well-formatted XML instance:
|
|
157
|
-
"<foo>\n <bar>\n <baz></baz>\n </bar>\n</foo>"
|
|
158
158
|
|
|
159
|
-
|
|
160
|
-
|
|
159
|
+
1. A well-formatted XML instance:
|
|
160
|
+
"<foo>\n <bar>\n <baz></baz>\n </bar>\n</foo>"
|
|
161
|
+
|
|
162
|
+
2. A badly-formatted XML instance (missing closing tag for 'bar'):
|
|
163
|
+
"<foo>\n <bar>\n </foo>"
|
|
161
164
|
|
|
162
|
-
|
|
163
|
-
|
|
165
|
+
3. A badly-formatted XML instance (unexpected 'tag' element):
|
|
166
|
+
"<foo>\n <tag>\n </tag>\n</foo>"
|
|
164
167
|
"""
|
|
165
168
|
encoding_matcher: re.Pattern = re.compile(
|
|
166
169
|
r"<([^>]*encoding[^>]*)>\n(.*)", re.MULTILINE | re.DOTALL
|
|
@@ -208,16 +211,14 @@ class XMLOutputParser(BaseTransformOutputParser):
|
|
|
208
211
|
# Imports are temporarily placed here to avoid issue with caching on CI
|
|
209
212
|
# likely if you're reading this you can move them to the top of the file
|
|
210
213
|
if self.parser == "defusedxml":
|
|
211
|
-
|
|
212
|
-
from defusedxml import ElementTree # type: ignore[import-untyped]
|
|
213
|
-
except ImportError as e:
|
|
214
|
+
if not _HAS_DEFUSEDXML:
|
|
214
215
|
msg = (
|
|
215
216
|
"defusedxml is not installed. "
|
|
216
217
|
"Please install it to use the defusedxml parser."
|
|
217
218
|
"You can install it with `pip install defusedxml`"
|
|
218
219
|
"See https://github.com/tiran/defusedxml for more details"
|
|
219
220
|
)
|
|
220
|
-
raise ImportError(msg)
|
|
221
|
+
raise ImportError(msg)
|
|
221
222
|
et = ElementTree # Use the defusedxml parser
|
|
222
223
|
else:
|
|
223
224
|
et = ET # Use the standard library parser
|
|
@@ -47,9 +47,6 @@ class ChatGeneration(Generation):
|
|
|
47
47
|
|
|
48
48
|
Returns:
|
|
49
49
|
The values of the object with the text attribute set.
|
|
50
|
-
|
|
51
|
-
Raises:
|
|
52
|
-
ValueError: If the message is not a string or a list.
|
|
53
50
|
"""
|
|
54
51
|
text = ""
|
|
55
52
|
if isinstance(self.message.content, str):
|
|
@@ -83,11 +80,18 @@ class ChatGenerationChunk(ChatGeneration):
|
|
|
83
80
|
def __add__(
|
|
84
81
|
self, other: Union[ChatGenerationChunk, list[ChatGenerationChunk]]
|
|
85
82
|
) -> ChatGenerationChunk:
|
|
86
|
-
"""Concatenate two
|
|
83
|
+
"""Concatenate two ``ChatGenerationChunk``s.
|
|
87
84
|
|
|
88
85
|
Args:
|
|
89
|
-
other: The other ChatGenerationChunk or list of
|
|
90
|
-
concatenate.
|
|
86
|
+
other: The other ``ChatGenerationChunk`` or list of ``ChatGenerationChunk``
|
|
87
|
+
to concatenate.
|
|
88
|
+
|
|
89
|
+
Raises:
|
|
90
|
+
TypeError: If other is not a ``ChatGenerationChunk`` or list of
|
|
91
|
+
``ChatGenerationChunk``.
|
|
92
|
+
|
|
93
|
+
Returns:
|
|
94
|
+
A new ``ChatGenerationChunk`` concatenated from self and other.
|
|
91
95
|
"""
|
|
92
96
|
if isinstance(other, ChatGenerationChunk):
|
|
93
97
|
generation_info = merge_dicts(
|
|
@@ -116,7 +120,14 @@ class ChatGenerationChunk(ChatGeneration):
|
|
|
116
120
|
def merge_chat_generation_chunks(
|
|
117
121
|
chunks: list[ChatGenerationChunk],
|
|
118
122
|
) -> Union[ChatGenerationChunk, None]:
|
|
119
|
-
"""Merge a list of
|
|
123
|
+
"""Merge a list of ``ChatGenerationChunk``s into a single ``ChatGenerationChunk``.
|
|
124
|
+
|
|
125
|
+
Args:
|
|
126
|
+
chunks: A list of ``ChatGenerationChunk`` to merge.
|
|
127
|
+
|
|
128
|
+
Returns:
|
|
129
|
+
A merged ``ChatGenerationChunk``, or None if the input list is empty.
|
|
130
|
+
"""
|
|
120
131
|
if not chunks:
|
|
121
132
|
return None
|
|
122
133
|
|
|
@@ -39,14 +39,15 @@ class Generation(Serializable):
|
|
|
39
39
|
|
|
40
40
|
@classmethod
|
|
41
41
|
def is_lc_serializable(cls) -> bool:
|
|
42
|
-
"""Return
|
|
42
|
+
"""Return True as this class is serializable."""
|
|
43
43
|
return True
|
|
44
44
|
|
|
45
45
|
@classmethod
|
|
46
46
|
def get_lc_namespace(cls) -> list[str]:
|
|
47
47
|
"""Get the namespace of the langchain object.
|
|
48
48
|
|
|
49
|
-
|
|
49
|
+
Returns:
|
|
50
|
+
``["langchain", "schema", "output"]``
|
|
50
51
|
"""
|
|
51
52
|
return ["langchain", "schema", "output"]
|
|
52
53
|
|
|
@@ -55,7 +56,17 @@ class GenerationChunk(Generation):
|
|
|
55
56
|
"""Generation chunk, which can be concatenated with other Generation chunks."""
|
|
56
57
|
|
|
57
58
|
def __add__(self, other: GenerationChunk) -> GenerationChunk:
|
|
58
|
-
"""Concatenate two
|
|
59
|
+
"""Concatenate two ``GenerationChunk``s.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
other: Another ``GenerationChunk`` to concatenate with.
|
|
63
|
+
|
|
64
|
+
Raises:
|
|
65
|
+
TypeError: If other is not a ``GenerationChunk``.
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
A new ``GenerationChunk`` concatenated from self and other.
|
|
69
|
+
"""
|
|
59
70
|
if isinstance(other, GenerationChunk):
|
|
60
71
|
generation_info = merge_dicts(
|
|
61
72
|
self.generation_info or {},
|
|
@@ -91,7 +91,14 @@ class LLMResult(BaseModel):
|
|
|
91
91
|
return llm_results
|
|
92
92
|
|
|
93
93
|
def __eq__(self, other: object) -> bool:
|
|
94
|
-
"""Check for LLMResult equality by ignoring any metadata related to runs.
|
|
94
|
+
"""Check for ``LLMResult`` equality by ignoring any metadata related to runs.
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
other: Another ``LLMResult`` object to compare against.
|
|
98
|
+
|
|
99
|
+
Returns:
|
|
100
|
+
True if the generations and ``llm_output`` are equal, False otherwise.
|
|
101
|
+
"""
|
|
95
102
|
if not isinstance(other, LLMResult):
|
|
96
103
|
return NotImplemented
|
|
97
104
|
return (
|
langchain_core/prompt_values.py
CHANGED
|
@@ -30,7 +30,7 @@ class PromptValue(Serializable, ABC):
|
|
|
30
30
|
|
|
31
31
|
@classmethod
|
|
32
32
|
def is_lc_serializable(cls) -> bool:
|
|
33
|
-
"""Return
|
|
33
|
+
"""Return True as this class is serializable."""
|
|
34
34
|
return True
|
|
35
35
|
|
|
36
36
|
@classmethod
|
|
@@ -38,7 +38,9 @@ class PromptValue(Serializable, ABC):
|
|
|
38
38
|
"""Get the namespace of the langchain object.
|
|
39
39
|
|
|
40
40
|
This is used to determine the namespace of the object when serializing.
|
|
41
|
-
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
``["langchain", "schema", "prompt"]``
|
|
42
44
|
"""
|
|
43
45
|
return ["langchain", "schema", "prompt"]
|
|
44
46
|
|
|
@@ -63,7 +65,9 @@ class StringPromptValue(PromptValue):
|
|
|
63
65
|
"""Get the namespace of the langchain object.
|
|
64
66
|
|
|
65
67
|
This is used to determine the namespace of the object when serializing.
|
|
66
|
-
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
``["langchain", "prompts", "base"]``
|
|
67
71
|
"""
|
|
68
72
|
return ["langchain", "prompts", "base"]
|
|
69
73
|
|
|
@@ -98,7 +102,9 @@ class ChatPromptValue(PromptValue):
|
|
|
98
102
|
"""Get the namespace of the langchain object.
|
|
99
103
|
|
|
100
104
|
This is used to determine the namespace of the object when serializing.
|
|
101
|
-
|
|
105
|
+
|
|
106
|
+
Returns:
|
|
107
|
+
``["langchain", "prompts", "chat"]``
|
|
102
108
|
"""
|
|
103
109
|
return ["langchain", "prompts", "chat"]
|
|
104
110
|
|
langchain_core/prompts/base.py
CHANGED
|
@@ -101,16 +101,14 @@ class BasePromptTemplate(
|
|
|
101
101
|
def get_lc_namespace(cls) -> list[str]:
|
|
102
102
|
"""Get the namespace of the langchain object.
|
|
103
103
|
|
|
104
|
-
Returns
|
|
104
|
+
Returns:
|
|
105
|
+
``["langchain", "schema", "prompt_template"]``
|
|
105
106
|
"""
|
|
106
107
|
return ["langchain", "schema", "prompt_template"]
|
|
107
108
|
|
|
108
109
|
@classmethod
|
|
109
110
|
def is_lc_serializable(cls) -> bool:
|
|
110
|
-
"""Return
|
|
111
|
-
|
|
112
|
-
Returns True.
|
|
113
|
-
"""
|
|
111
|
+
"""Return True as this class is serializable."""
|
|
114
112
|
return True
|
|
115
113
|
|
|
116
114
|
model_config = ConfigDict(
|
|
@@ -212,7 +210,7 @@ class BasePromptTemplate(
|
|
|
212
210
|
if self.metadata:
|
|
213
211
|
config["metadata"] = {**config["metadata"], **self.metadata}
|
|
214
212
|
if self.tags:
|
|
215
|
-
config["tags"]
|
|
213
|
+
config["tags"] += self.tags
|
|
216
214
|
return self._call_with_config(
|
|
217
215
|
self._format_prompt_with_error_handling,
|
|
218
216
|
input,
|
|
@@ -341,9 +339,6 @@ class BasePromptTemplate(
|
|
|
341
339
|
|
|
342
340
|
Returns:
|
|
343
341
|
Dict: Dictionary representation of the prompt.
|
|
344
|
-
|
|
345
|
-
Raises:
|
|
346
|
-
NotImplementedError: If the prompt type is not implemented.
|
|
347
342
|
"""
|
|
348
343
|
prompt_dict = super().model_dump(**kwargs)
|
|
349
344
|
with contextlib.suppress(NotImplementedError):
|
langchain_core/prompts/chat.py
CHANGED
|
@@ -67,10 +67,10 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
|
|
|
67
67
|
from langchain_core.prompts import MessagesPlaceholder
|
|
68
68
|
|
|
69
69
|
prompt = MessagesPlaceholder("history")
|
|
70
|
-
prompt.format_messages()
|
|
70
|
+
prompt.format_messages() # raises KeyError
|
|
71
71
|
|
|
72
72
|
prompt = MessagesPlaceholder("history", optional=True)
|
|
73
|
-
prompt.format_messages()
|
|
73
|
+
prompt.format_messages() # returns empty list []
|
|
74
74
|
|
|
75
75
|
prompt.format_messages(
|
|
76
76
|
history=[
|
|
@@ -93,14 +93,14 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
|
|
|
93
93
|
[
|
|
94
94
|
("system", "You are a helpful assistant."),
|
|
95
95
|
MessagesPlaceholder("history"),
|
|
96
|
-
("human", "{question}")
|
|
96
|
+
("human", "{question}"),
|
|
97
97
|
]
|
|
98
98
|
)
|
|
99
99
|
prompt.invoke(
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
100
|
+
{
|
|
101
|
+
"history": [("human", "what's 5 + 2"), ("ai", "5 + 2 is 7")],
|
|
102
|
+
"question": "now multiply that by 4",
|
|
103
|
+
}
|
|
104
104
|
)
|
|
105
105
|
# -> ChatPromptValue(messages=[
|
|
106
106
|
# SystemMessage(content="You are a helpful assistant."),
|
|
@@ -155,9 +155,7 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
|
|
|
155
155
|
"""
|
|
156
156
|
# mypy can't detect the init which is defined in the parent class
|
|
157
157
|
# b/c these are BaseModel classes.
|
|
158
|
-
super().__init__(
|
|
159
|
-
variable_name=variable_name, optional=optional, **kwargs
|
|
160
|
-
)
|
|
158
|
+
super().__init__(variable_name=variable_name, optional=optional, **kwargs)
|
|
161
159
|
|
|
162
160
|
def format_messages(self, **kwargs: Any) -> list[BaseMessage]:
|
|
163
161
|
"""Format messages from kwargs.
|
|
@@ -742,10 +740,18 @@ class BaseChatPromptTemplate(BasePromptTemplate, ABC):
|
|
|
742
740
|
|
|
743
741
|
@abstractmethod
|
|
744
742
|
def format_messages(self, **kwargs: Any) -> list[BaseMessage]:
|
|
745
|
-
"""Format kwargs into a list of messages.
|
|
743
|
+
"""Format kwargs into a list of messages.
|
|
744
|
+
|
|
745
|
+
Returns:
|
|
746
|
+
List of messages.
|
|
747
|
+
"""
|
|
746
748
|
|
|
747
749
|
async def aformat_messages(self, **kwargs: Any) -> list[BaseMessage]:
|
|
748
|
-
"""Async format kwargs into a list of messages.
|
|
750
|
+
"""Async format kwargs into a list of messages.
|
|
751
|
+
|
|
752
|
+
Returns:
|
|
753
|
+
List of messages.
|
|
754
|
+
"""
|
|
749
755
|
return self.format_messages(**kwargs)
|
|
750
756
|
|
|
751
757
|
def pretty_repr(
|
|
@@ -797,18 +803,17 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
797
803
|
|
|
798
804
|
from langchain_core.prompts import ChatPromptTemplate
|
|
799
805
|
|
|
800
|
-
template = ChatPromptTemplate(
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
+
template = ChatPromptTemplate(
|
|
807
|
+
[
|
|
808
|
+
("system", "You are a helpful AI bot. Your name is {name}."),
|
|
809
|
+
("human", "Hello, how are you doing?"),
|
|
810
|
+
("ai", "I'm doing well, thanks!"),
|
|
811
|
+
("human", "{user_input}"),
|
|
812
|
+
]
|
|
813
|
+
)
|
|
806
814
|
|
|
807
815
|
prompt_value = template.invoke(
|
|
808
|
-
{
|
|
809
|
-
"name": "Bob",
|
|
810
|
-
"user_input": "What is your name?"
|
|
811
|
-
}
|
|
816
|
+
{"name": "Bob", "user_input": "What is your name?"}
|
|
812
817
|
)
|
|
813
818
|
# Output:
|
|
814
819
|
# ChatPromptValue(
|
|
@@ -818,7 +823,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
818
823
|
# AIMessage(content="I'm doing well, thanks!"),
|
|
819
824
|
# HumanMessage(content='What is your name?')
|
|
820
825
|
# ]
|
|
821
|
-
#)
|
|
826
|
+
# )
|
|
822
827
|
|
|
823
828
|
Messages Placeholder:
|
|
824
829
|
|
|
@@ -828,14 +833,16 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
828
833
|
# you can initialize the template with a MessagesPlaceholder
|
|
829
834
|
# either using the class directly or with the shorthand tuple syntax:
|
|
830
835
|
|
|
831
|
-
template = ChatPromptTemplate(
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
836
|
+
template = ChatPromptTemplate(
|
|
837
|
+
[
|
|
838
|
+
("system", "You are a helpful AI bot."),
|
|
839
|
+
# Means the template will receive an optional list of messages under
|
|
840
|
+
# the "conversation" key
|
|
841
|
+
("placeholder", "{conversation}"),
|
|
842
|
+
# Equivalently:
|
|
843
|
+
# MessagesPlaceholder(variable_name="conversation", optional=True)
|
|
844
|
+
]
|
|
845
|
+
)
|
|
839
846
|
|
|
840
847
|
prompt_value = template.invoke(
|
|
841
848
|
{
|
|
@@ -843,7 +850,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
843
850
|
("human", "Hi!"),
|
|
844
851
|
("ai", "How can I assist you today?"),
|
|
845
852
|
("human", "Can you make me an ice cream sundae?"),
|
|
846
|
-
("ai", "No.")
|
|
853
|
+
("ai", "No."),
|
|
847
854
|
]
|
|
848
855
|
}
|
|
849
856
|
)
|
|
@@ -857,7 +864,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
857
864
|
# HumanMessage(content='Can you make me an ice cream sundae?'),
|
|
858
865
|
# AIMessage(content='No.'),
|
|
859
866
|
# ]
|
|
860
|
-
#)
|
|
867
|
+
# )
|
|
861
868
|
|
|
862
869
|
Single-variable template:
|
|
863
870
|
|
|
@@ -870,10 +877,12 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
870
877
|
|
|
871
878
|
from langchain_core.prompts import ChatPromptTemplate
|
|
872
879
|
|
|
873
|
-
template = ChatPromptTemplate(
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
|
|
880
|
+
template = ChatPromptTemplate(
|
|
881
|
+
[
|
|
882
|
+
("system", "You are a helpful AI bot. Your name is Carl."),
|
|
883
|
+
("human", "{user_input}"),
|
|
884
|
+
]
|
|
885
|
+
)
|
|
877
886
|
|
|
878
887
|
prompt_value = template.invoke("Hello, there!")
|
|
879
888
|
# Equivalent to
|
|
@@ -924,28 +933,29 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
924
933
|
input_types: A dictionary of the types of the variables the prompt template
|
|
925
934
|
expects. If not provided, all variables are assumed to be strings.
|
|
926
935
|
|
|
927
|
-
Returns:
|
|
928
|
-
A chat prompt template.
|
|
929
|
-
|
|
930
936
|
Examples:
|
|
931
937
|
Instantiation from a list of message templates:
|
|
932
938
|
|
|
933
939
|
.. code-block:: python
|
|
934
940
|
|
|
935
|
-
template = ChatPromptTemplate(
|
|
936
|
-
|
|
937
|
-
|
|
938
|
-
|
|
939
|
-
|
|
941
|
+
template = ChatPromptTemplate(
|
|
942
|
+
[
|
|
943
|
+
("human", "Hello, how are you?"),
|
|
944
|
+
("ai", "I'm doing well, thanks!"),
|
|
945
|
+
("human", "That's good to hear."),
|
|
946
|
+
]
|
|
947
|
+
)
|
|
940
948
|
|
|
941
949
|
Instantiation from mixed message formats:
|
|
942
950
|
|
|
943
951
|
.. code-block:: python
|
|
944
952
|
|
|
945
|
-
template = ChatPromptTemplate(
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
953
|
+
template = ChatPromptTemplate(
|
|
954
|
+
[
|
|
955
|
+
SystemMessage(content="hello"),
|
|
956
|
+
("human", "Hello, how are you?"),
|
|
957
|
+
]
|
|
958
|
+
)
|
|
949
959
|
|
|
950
960
|
"""
|
|
951
961
|
messages_ = [
|
|
@@ -976,7 +986,11 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
976
986
|
|
|
977
987
|
@classmethod
|
|
978
988
|
def get_lc_namespace(cls) -> list[str]:
|
|
979
|
-
"""Get the namespace of the langchain object.
|
|
989
|
+
"""Get the namespace of the langchain object.
|
|
990
|
+
|
|
991
|
+
Returns:
|
|
992
|
+
``["langchain", "prompts", "chat"]``
|
|
993
|
+
"""
|
|
980
994
|
return ["langchain", "prompts", "chat"]
|
|
981
995
|
|
|
982
996
|
def __add__(self, other: Any) -> ChatPromptTemplate:
|
|
@@ -1139,20 +1153,24 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
1139
1153
|
|
|
1140
1154
|
.. code-block:: python
|
|
1141
1155
|
|
|
1142
|
-
template = ChatPromptTemplate.from_messages(
|
|
1143
|
-
|
|
1144
|
-
|
|
1145
|
-
|
|
1146
|
-
|
|
1156
|
+
template = ChatPromptTemplate.from_messages(
|
|
1157
|
+
[
|
|
1158
|
+
("human", "Hello, how are you?"),
|
|
1159
|
+
("ai", "I'm doing well, thanks!"),
|
|
1160
|
+
("human", "That's good to hear."),
|
|
1161
|
+
]
|
|
1162
|
+
)
|
|
1147
1163
|
|
|
1148
1164
|
Instantiation from mixed message formats:
|
|
1149
1165
|
|
|
1150
1166
|
.. code-block:: python
|
|
1151
1167
|
|
|
1152
|
-
template = ChatPromptTemplate.from_messages(
|
|
1153
|
-
|
|
1154
|
-
|
|
1155
|
-
|
|
1168
|
+
template = ChatPromptTemplate.from_messages(
|
|
1169
|
+
[
|
|
1170
|
+
SystemMessage(content="hello"),
|
|
1171
|
+
("human", "Hello, how are you?"),
|
|
1172
|
+
]
|
|
1173
|
+
)
|
|
1156
1174
|
|
|
1157
1175
|
Args:
|
|
1158
1176
|
messages: sequence of message representations.
|
|
@@ -1176,6 +1194,9 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
1176
1194
|
**kwargs: keyword arguments to use for filling in template variables
|
|
1177
1195
|
in all the template messages in this chat template.
|
|
1178
1196
|
|
|
1197
|
+
Raises:
|
|
1198
|
+
ValueError: if messages are of unexpected types.
|
|
1199
|
+
|
|
1179
1200
|
Returns:
|
|
1180
1201
|
list of formatted messages.
|
|
1181
1202
|
"""
|
|
@@ -1286,7 +1307,13 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
1286
1307
|
def __getitem__(
|
|
1287
1308
|
self, index: Union[int, slice]
|
|
1288
1309
|
) -> Union[MessageLike, ChatPromptTemplate]:
|
|
1289
|
-
"""Use to index into the chat template.
|
|
1310
|
+
"""Use to index into the chat template.
|
|
1311
|
+
|
|
1312
|
+
Returns:
|
|
1313
|
+
If index is an int, returns the message at that index.
|
|
1314
|
+
If index is a slice, returns a new ``ChatPromptTemplate``
|
|
1315
|
+
containing the messages in that slice.
|
|
1316
|
+
"""
|
|
1290
1317
|
if isinstance(index, slice):
|
|
1291
1318
|
start, stop, step = index.indices(len(self.messages))
|
|
1292
1319
|
messages = self.messages[start:stop:step]
|
|
@@ -1294,7 +1321,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
1294
1321
|
return self.messages[index]
|
|
1295
1322
|
|
|
1296
1323
|
def __len__(self) -> int:
|
|
1297
|
-
"""
|
|
1324
|
+
"""Return the length of the chat template."""
|
|
1298
1325
|
return len(self.messages)
|
|
1299
1326
|
|
|
1300
1327
|
@property
|