langchain-core 1.0.0a2__py3-none-any.whl → 1.0.0a4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-core might be problematic. Click here for more details.
- langchain_core/_api/beta_decorator.py +17 -40
- langchain_core/_api/deprecation.py +20 -7
- langchain_core/_api/path.py +19 -2
- langchain_core/_import_utils.py +7 -0
- langchain_core/agents.py +10 -6
- langchain_core/callbacks/base.py +28 -15
- langchain_core/callbacks/manager.py +81 -69
- langchain_core/callbacks/usage.py +4 -2
- langchain_core/chat_history.py +29 -21
- langchain_core/document_loaders/base.py +34 -9
- langchain_core/document_loaders/langsmith.py +3 -0
- langchain_core/documents/base.py +35 -10
- langchain_core/documents/transformers.py +4 -2
- langchain_core/embeddings/fake.py +8 -5
- langchain_core/env.py +2 -3
- langchain_core/example_selectors/base.py +12 -0
- langchain_core/exceptions.py +7 -0
- langchain_core/globals.py +17 -28
- langchain_core/indexing/api.py +57 -45
- langchain_core/indexing/base.py +5 -8
- langchain_core/indexing/in_memory.py +23 -3
- langchain_core/language_models/__init__.py +6 -2
- langchain_core/language_models/_utils.py +27 -5
- langchain_core/language_models/base.py +33 -21
- langchain_core/language_models/chat_models.py +104 -31
- langchain_core/language_models/fake_chat_models.py +5 -7
- langchain_core/language_models/llms.py +54 -20
- langchain_core/load/dump.py +2 -3
- langchain_core/load/load.py +15 -1
- langchain_core/load/serializable.py +38 -43
- langchain_core/memory.py +7 -3
- langchain_core/messages/__init__.py +1 -1
- langchain_core/messages/ai.py +41 -34
- langchain_core/messages/base.py +20 -7
- langchain_core/messages/block_translators/__init__.py +10 -8
- langchain_core/messages/block_translators/anthropic.py +11 -7
- langchain_core/messages/block_translators/bedrock.py +76 -27
- langchain_core/messages/block_translators/bedrock_converse.py +259 -23
- langchain_core/messages/block_translators/google_genai.py +3 -1
- langchain_core/messages/block_translators/google_vertexai.py +3 -1
- langchain_core/messages/block_translators/groq.py +3 -1
- langchain_core/messages/block_translators/ollama.py +3 -1
- langchain_core/messages/block_translators/openai.py +50 -20
- langchain_core/messages/content.py +23 -13
- langchain_core/messages/human.py +2 -13
- langchain_core/messages/system.py +2 -6
- langchain_core/messages/tool.py +34 -14
- langchain_core/messages/utils.py +186 -73
- langchain_core/output_parsers/base.py +5 -2
- langchain_core/output_parsers/json.py +4 -4
- langchain_core/output_parsers/list.py +7 -22
- langchain_core/output_parsers/openai_functions.py +3 -0
- langchain_core/output_parsers/openai_tools.py +6 -1
- langchain_core/output_parsers/pydantic.py +4 -0
- langchain_core/output_parsers/string.py +5 -1
- langchain_core/output_parsers/xml.py +19 -19
- langchain_core/outputs/chat_generation.py +18 -7
- langchain_core/outputs/generation.py +14 -3
- langchain_core/outputs/llm_result.py +8 -1
- langchain_core/prompt_values.py +10 -4
- langchain_core/prompts/base.py +6 -11
- langchain_core/prompts/chat.py +88 -60
- langchain_core/prompts/dict.py +16 -8
- langchain_core/prompts/few_shot.py +9 -11
- langchain_core/prompts/few_shot_with_templates.py +5 -1
- langchain_core/prompts/image.py +12 -5
- langchain_core/prompts/loading.py +2 -2
- langchain_core/prompts/message.py +5 -6
- langchain_core/prompts/pipeline.py +13 -8
- langchain_core/prompts/prompt.py +22 -8
- langchain_core/prompts/string.py +18 -10
- langchain_core/prompts/structured.py +7 -2
- langchain_core/rate_limiters.py +2 -2
- langchain_core/retrievers.py +7 -6
- langchain_core/runnables/base.py +387 -246
- langchain_core/runnables/branch.py +11 -28
- langchain_core/runnables/config.py +20 -17
- langchain_core/runnables/configurable.py +34 -19
- langchain_core/runnables/fallbacks.py +20 -13
- langchain_core/runnables/graph.py +48 -38
- langchain_core/runnables/graph_ascii.py +40 -17
- langchain_core/runnables/graph_mermaid.py +54 -25
- langchain_core/runnables/graph_png.py +27 -31
- langchain_core/runnables/history.py +55 -58
- langchain_core/runnables/passthrough.py +44 -21
- langchain_core/runnables/retry.py +44 -23
- langchain_core/runnables/router.py +9 -8
- langchain_core/runnables/schema.py +9 -0
- langchain_core/runnables/utils.py +53 -90
- langchain_core/stores.py +19 -31
- langchain_core/sys_info.py +9 -8
- langchain_core/tools/base.py +36 -27
- langchain_core/tools/convert.py +25 -14
- langchain_core/tools/simple.py +36 -8
- langchain_core/tools/structured.py +25 -12
- langchain_core/tracers/base.py +2 -2
- langchain_core/tracers/context.py +5 -1
- langchain_core/tracers/core.py +110 -46
- langchain_core/tracers/evaluation.py +22 -26
- langchain_core/tracers/event_stream.py +97 -42
- langchain_core/tracers/langchain.py +12 -3
- langchain_core/tracers/langchain_v1.py +10 -2
- langchain_core/tracers/log_stream.py +56 -17
- langchain_core/tracers/root_listeners.py +4 -20
- langchain_core/tracers/run_collector.py +6 -16
- langchain_core/tracers/schemas.py +5 -1
- langchain_core/utils/aiter.py +14 -6
- langchain_core/utils/env.py +3 -0
- langchain_core/utils/function_calling.py +46 -20
- langchain_core/utils/interactive_env.py +6 -2
- langchain_core/utils/iter.py +12 -5
- langchain_core/utils/json.py +12 -3
- langchain_core/utils/json_schema.py +156 -40
- langchain_core/utils/loading.py +5 -1
- langchain_core/utils/mustache.py +25 -16
- langchain_core/utils/pydantic.py +38 -9
- langchain_core/utils/utils.py +25 -9
- langchain_core/vectorstores/base.py +7 -20
- langchain_core/vectorstores/in_memory.py +20 -14
- langchain_core/vectorstores/utils.py +18 -12
- langchain_core/version.py +1 -1
- langchain_core-1.0.0a4.dist-info/METADATA +77 -0
- langchain_core-1.0.0a4.dist-info/RECORD +181 -0
- langchain_core/beta/__init__.py +0 -1
- langchain_core/beta/runnables/__init__.py +0 -1
- langchain_core/beta/runnables/context.py +0 -448
- langchain_core-1.0.0a2.dist-info/METADATA +0 -106
- langchain_core-1.0.0a2.dist-info/RECORD +0 -184
- {langchain_core-1.0.0a2.dist-info → langchain_core-1.0.0a4.dist-info}/WHEEL +0 -0
- {langchain_core-1.0.0a2.dist-info → langchain_core-1.0.0a4.dist-info}/entry_points.txt +0 -0
|
@@ -15,6 +15,14 @@ from langchain_core.messages import BaseMessage
|
|
|
15
15
|
from langchain_core.output_parsers.transform import BaseTransformOutputParser
|
|
16
16
|
from langchain_core.runnables.utils import AddableDict
|
|
17
17
|
|
|
18
|
+
try:
|
|
19
|
+
from defusedxml import ElementTree # type: ignore[import-untyped]
|
|
20
|
+
from defusedxml.ElementTree import XMLParser # type: ignore[import-untyped]
|
|
21
|
+
|
|
22
|
+
_HAS_DEFUSEDXML = True
|
|
23
|
+
except ImportError:
|
|
24
|
+
_HAS_DEFUSEDXML = False
|
|
25
|
+
|
|
18
26
|
XML_FORMAT_INSTRUCTIONS = """The output should be formatted as a XML file.
|
|
19
27
|
1. Output should conform to the tags below.
|
|
20
28
|
2. If tags are not given, make them on your own.
|
|
@@ -50,17 +58,13 @@ class _StreamingParser:
|
|
|
50
58
|
parser is requested.
|
|
51
59
|
"""
|
|
52
60
|
if parser == "defusedxml":
|
|
53
|
-
|
|
54
|
-
from defusedxml.ElementTree import ( # type: ignore[import-untyped]
|
|
55
|
-
XMLParser,
|
|
56
|
-
)
|
|
57
|
-
except ImportError as e:
|
|
61
|
+
if not _HAS_DEFUSEDXML:
|
|
58
62
|
msg = (
|
|
59
63
|
"defusedxml is not installed. "
|
|
60
64
|
"Please install it to use the defusedxml parser."
|
|
61
65
|
"You can install it with `pip install defusedxml` "
|
|
62
66
|
)
|
|
63
|
-
raise ImportError(msg)
|
|
67
|
+
raise ImportError(msg)
|
|
64
68
|
parser_ = XMLParser(target=TreeBuilder())
|
|
65
69
|
else:
|
|
66
70
|
parser_ = None
|
|
@@ -136,9 +140,6 @@ class _StreamingParser:
|
|
|
136
140
|
"""Close the parser.
|
|
137
141
|
|
|
138
142
|
This should be called after all chunks have been parsed.
|
|
139
|
-
|
|
140
|
-
Raises:
|
|
141
|
-
xml.etree.ElementTree.ParseError: If the XML is not well-formed.
|
|
142
143
|
"""
|
|
143
144
|
# Ignore ParseError. This will ignore any incomplete XML at the end of the input
|
|
144
145
|
with contextlib.suppress(xml.etree.ElementTree.ParseError):
|
|
@@ -154,14 +155,15 @@ class XMLOutputParser(BaseTransformOutputParser):
|
|
|
154
155
|
Note this may not be perfect depending on the LLM implementation.
|
|
155
156
|
|
|
156
157
|
For example, with tags=["foo", "bar", "baz"]:
|
|
157
|
-
1. A well-formatted XML instance:
|
|
158
|
-
"<foo>\n <bar>\n <baz></baz>\n </bar>\n</foo>"
|
|
159
158
|
|
|
160
|
-
|
|
161
|
-
|
|
159
|
+
1. A well-formatted XML instance:
|
|
160
|
+
"<foo>\n <bar>\n <baz></baz>\n </bar>\n</foo>"
|
|
161
|
+
|
|
162
|
+
2. A badly-formatted XML instance (missing closing tag for 'bar'):
|
|
163
|
+
"<foo>\n <bar>\n </foo>"
|
|
162
164
|
|
|
163
|
-
|
|
164
|
-
|
|
165
|
+
3. A badly-formatted XML instance (unexpected 'tag' element):
|
|
166
|
+
"<foo>\n <tag>\n </tag>\n</foo>"
|
|
165
167
|
"""
|
|
166
168
|
encoding_matcher: re.Pattern = re.compile(
|
|
167
169
|
r"<([^>]*encoding[^>]*)>\n(.*)", re.MULTILINE | re.DOTALL
|
|
@@ -209,16 +211,14 @@ class XMLOutputParser(BaseTransformOutputParser):
|
|
|
209
211
|
# Imports are temporarily placed here to avoid issue with caching on CI
|
|
210
212
|
# likely if you're reading this you can move them to the top of the file
|
|
211
213
|
if self.parser == "defusedxml":
|
|
212
|
-
|
|
213
|
-
from defusedxml import ElementTree # type: ignore[import-untyped]
|
|
214
|
-
except ImportError as e:
|
|
214
|
+
if not _HAS_DEFUSEDXML:
|
|
215
215
|
msg = (
|
|
216
216
|
"defusedxml is not installed. "
|
|
217
217
|
"Please install it to use the defusedxml parser."
|
|
218
218
|
"You can install it with `pip install defusedxml`"
|
|
219
219
|
"See https://github.com/tiran/defusedxml for more details"
|
|
220
220
|
)
|
|
221
|
-
raise ImportError(msg)
|
|
221
|
+
raise ImportError(msg)
|
|
222
222
|
et = ElementTree # Use the defusedxml parser
|
|
223
223
|
else:
|
|
224
224
|
et = ET # Use the standard library parser
|
|
@@ -47,9 +47,6 @@ class ChatGeneration(Generation):
|
|
|
47
47
|
|
|
48
48
|
Returns:
|
|
49
49
|
The values of the object with the text attribute set.
|
|
50
|
-
|
|
51
|
-
Raises:
|
|
52
|
-
ValueError: If the message is not a string or a list.
|
|
53
50
|
"""
|
|
54
51
|
text = ""
|
|
55
52
|
if isinstance(self.message.content, str):
|
|
@@ -83,11 +80,18 @@ class ChatGenerationChunk(ChatGeneration):
|
|
|
83
80
|
def __add__(
|
|
84
81
|
self, other: Union[ChatGenerationChunk, list[ChatGenerationChunk]]
|
|
85
82
|
) -> ChatGenerationChunk:
|
|
86
|
-
"""Concatenate two
|
|
83
|
+
"""Concatenate two ``ChatGenerationChunk``s.
|
|
87
84
|
|
|
88
85
|
Args:
|
|
89
|
-
other: The other ChatGenerationChunk or list of
|
|
90
|
-
concatenate.
|
|
86
|
+
other: The other ``ChatGenerationChunk`` or list of ``ChatGenerationChunk``
|
|
87
|
+
to concatenate.
|
|
88
|
+
|
|
89
|
+
Raises:
|
|
90
|
+
TypeError: If other is not a ``ChatGenerationChunk`` or list of
|
|
91
|
+
``ChatGenerationChunk``.
|
|
92
|
+
|
|
93
|
+
Returns:
|
|
94
|
+
A new ``ChatGenerationChunk`` concatenated from self and other.
|
|
91
95
|
"""
|
|
92
96
|
if isinstance(other, ChatGenerationChunk):
|
|
93
97
|
generation_info = merge_dicts(
|
|
@@ -116,7 +120,14 @@ class ChatGenerationChunk(ChatGeneration):
|
|
|
116
120
|
def merge_chat_generation_chunks(
|
|
117
121
|
chunks: list[ChatGenerationChunk],
|
|
118
122
|
) -> Union[ChatGenerationChunk, None]:
|
|
119
|
-
"""Merge a list of
|
|
123
|
+
"""Merge a list of ``ChatGenerationChunk``s into a single ``ChatGenerationChunk``.
|
|
124
|
+
|
|
125
|
+
Args:
|
|
126
|
+
chunks: A list of ``ChatGenerationChunk`` to merge.
|
|
127
|
+
|
|
128
|
+
Returns:
|
|
129
|
+
A merged ``ChatGenerationChunk``, or None if the input list is empty.
|
|
130
|
+
"""
|
|
120
131
|
if not chunks:
|
|
121
132
|
return None
|
|
122
133
|
|
|
@@ -39,14 +39,15 @@ class Generation(Serializable):
|
|
|
39
39
|
|
|
40
40
|
@classmethod
|
|
41
41
|
def is_lc_serializable(cls) -> bool:
|
|
42
|
-
"""Return
|
|
42
|
+
"""Return True as this class is serializable."""
|
|
43
43
|
return True
|
|
44
44
|
|
|
45
45
|
@classmethod
|
|
46
46
|
def get_lc_namespace(cls) -> list[str]:
|
|
47
47
|
"""Get the namespace of the langchain object.
|
|
48
48
|
|
|
49
|
-
|
|
49
|
+
Returns:
|
|
50
|
+
``["langchain", "schema", "output"]``
|
|
50
51
|
"""
|
|
51
52
|
return ["langchain", "schema", "output"]
|
|
52
53
|
|
|
@@ -55,7 +56,17 @@ class GenerationChunk(Generation):
|
|
|
55
56
|
"""Generation chunk, which can be concatenated with other Generation chunks."""
|
|
56
57
|
|
|
57
58
|
def __add__(self, other: GenerationChunk) -> GenerationChunk:
|
|
58
|
-
"""Concatenate two
|
|
59
|
+
"""Concatenate two ``GenerationChunk``s.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
other: Another ``GenerationChunk`` to concatenate with.
|
|
63
|
+
|
|
64
|
+
Raises:
|
|
65
|
+
TypeError: If other is not a ``GenerationChunk``.
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
A new ``GenerationChunk`` concatenated from self and other.
|
|
69
|
+
"""
|
|
59
70
|
if isinstance(other, GenerationChunk):
|
|
60
71
|
generation_info = merge_dicts(
|
|
61
72
|
self.generation_info or {},
|
|
@@ -91,7 +91,14 @@ class LLMResult(BaseModel):
|
|
|
91
91
|
return llm_results
|
|
92
92
|
|
|
93
93
|
def __eq__(self, other: object) -> bool:
|
|
94
|
-
"""Check for LLMResult equality by ignoring any metadata related to runs.
|
|
94
|
+
"""Check for ``LLMResult`` equality by ignoring any metadata related to runs.
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
other: Another ``LLMResult`` object to compare against.
|
|
98
|
+
|
|
99
|
+
Returns:
|
|
100
|
+
True if the generations and ``llm_output`` are equal, False otherwise.
|
|
101
|
+
"""
|
|
95
102
|
if not isinstance(other, LLMResult):
|
|
96
103
|
return NotImplemented
|
|
97
104
|
return (
|
langchain_core/prompt_values.py
CHANGED
|
@@ -30,7 +30,7 @@ class PromptValue(Serializable, ABC):
|
|
|
30
30
|
|
|
31
31
|
@classmethod
|
|
32
32
|
def is_lc_serializable(cls) -> bool:
|
|
33
|
-
"""Return
|
|
33
|
+
"""Return True as this class is serializable."""
|
|
34
34
|
return True
|
|
35
35
|
|
|
36
36
|
@classmethod
|
|
@@ -38,7 +38,9 @@ class PromptValue(Serializable, ABC):
|
|
|
38
38
|
"""Get the namespace of the langchain object.
|
|
39
39
|
|
|
40
40
|
This is used to determine the namespace of the object when serializing.
|
|
41
|
-
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
``["langchain", "schema", "prompt"]``
|
|
42
44
|
"""
|
|
43
45
|
return ["langchain", "schema", "prompt"]
|
|
44
46
|
|
|
@@ -63,7 +65,9 @@ class StringPromptValue(PromptValue):
|
|
|
63
65
|
"""Get the namespace of the langchain object.
|
|
64
66
|
|
|
65
67
|
This is used to determine the namespace of the object when serializing.
|
|
66
|
-
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
``["langchain", "prompts", "base"]``
|
|
67
71
|
"""
|
|
68
72
|
return ["langchain", "prompts", "base"]
|
|
69
73
|
|
|
@@ -98,7 +102,9 @@ class ChatPromptValue(PromptValue):
|
|
|
98
102
|
"""Get the namespace of the langchain object.
|
|
99
103
|
|
|
100
104
|
This is used to determine the namespace of the object when serializing.
|
|
101
|
-
|
|
105
|
+
|
|
106
|
+
Returns:
|
|
107
|
+
``["langchain", "prompts", "chat"]``
|
|
102
108
|
"""
|
|
103
109
|
return ["langchain", "prompts", "chat"]
|
|
104
110
|
|
langchain_core/prompts/base.py
CHANGED
|
@@ -101,16 +101,14 @@ class BasePromptTemplate(
|
|
|
101
101
|
def get_lc_namespace(cls) -> list[str]:
|
|
102
102
|
"""Get the namespace of the langchain object.
|
|
103
103
|
|
|
104
|
-
Returns
|
|
104
|
+
Returns:
|
|
105
|
+
``["langchain", "schema", "prompt_template"]``
|
|
105
106
|
"""
|
|
106
107
|
return ["langchain", "schema", "prompt_template"]
|
|
107
108
|
|
|
108
109
|
@classmethod
|
|
109
110
|
def is_lc_serializable(cls) -> bool:
|
|
110
|
-
"""Return
|
|
111
|
-
|
|
112
|
-
Returns True.
|
|
113
|
-
"""
|
|
111
|
+
"""Return True as this class is serializable."""
|
|
114
112
|
return True
|
|
115
113
|
|
|
116
114
|
model_config = ConfigDict(
|
|
@@ -212,7 +210,7 @@ class BasePromptTemplate(
|
|
|
212
210
|
if self.metadata:
|
|
213
211
|
config["metadata"] = {**config["metadata"], **self.metadata}
|
|
214
212
|
if self.tags:
|
|
215
|
-
config["tags"]
|
|
213
|
+
config["tags"] += self.tags
|
|
216
214
|
return self._call_with_config(
|
|
217
215
|
self._format_prompt_with_error_handling,
|
|
218
216
|
input,
|
|
@@ -341,9 +339,6 @@ class BasePromptTemplate(
|
|
|
341
339
|
|
|
342
340
|
Returns:
|
|
343
341
|
Dict: Dictionary representation of the prompt.
|
|
344
|
-
|
|
345
|
-
Raises:
|
|
346
|
-
NotImplementedError: If the prompt type is not implemented.
|
|
347
342
|
"""
|
|
348
343
|
prompt_dict = super().model_dump(**kwargs)
|
|
349
344
|
with contextlib.suppress(NotImplementedError):
|
|
@@ -384,10 +379,10 @@ class BasePromptTemplate(
|
|
|
384
379
|
directory_path.mkdir(parents=True, exist_ok=True)
|
|
385
380
|
|
|
386
381
|
if save_path.suffix == ".json":
|
|
387
|
-
with save_path.open("w") as f:
|
|
382
|
+
with save_path.open("w", encoding="utf-8") as f:
|
|
388
383
|
json.dump(prompt_dict, f, indent=4)
|
|
389
384
|
elif save_path.suffix.endswith((".yaml", ".yml")):
|
|
390
|
-
with save_path.open("w") as f:
|
|
385
|
+
with save_path.open("w", encoding="utf-8") as f:
|
|
391
386
|
yaml.dump(prompt_dict, f, default_flow_style=False)
|
|
392
387
|
else:
|
|
393
388
|
msg = f"{save_path} must be json or yaml"
|
langchain_core/prompts/chat.py
CHANGED
|
@@ -67,10 +67,10 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
|
|
|
67
67
|
from langchain_core.prompts import MessagesPlaceholder
|
|
68
68
|
|
|
69
69
|
prompt = MessagesPlaceholder("history")
|
|
70
|
-
prompt.format_messages()
|
|
70
|
+
prompt.format_messages() # raises KeyError
|
|
71
71
|
|
|
72
72
|
prompt = MessagesPlaceholder("history", optional=True)
|
|
73
|
-
prompt.format_messages()
|
|
73
|
+
prompt.format_messages() # returns empty list []
|
|
74
74
|
|
|
75
75
|
prompt.format_messages(
|
|
76
76
|
history=[
|
|
@@ -93,14 +93,14 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
|
|
|
93
93
|
[
|
|
94
94
|
("system", "You are a helpful assistant."),
|
|
95
95
|
MessagesPlaceholder("history"),
|
|
96
|
-
("human", "{question}")
|
|
96
|
+
("human", "{question}"),
|
|
97
97
|
]
|
|
98
98
|
)
|
|
99
99
|
prompt.invoke(
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
100
|
+
{
|
|
101
|
+
"history": [("human", "what's 5 + 2"), ("ai", "5 + 2 is 7")],
|
|
102
|
+
"question": "now multiply that by 4",
|
|
103
|
+
}
|
|
104
104
|
)
|
|
105
105
|
# -> ChatPromptValue(messages=[
|
|
106
106
|
# SystemMessage(content="You are a helpful assistant."),
|
|
@@ -543,8 +543,7 @@ class _StringImageMessagePromptTemplate(BaseMessagePromptTemplate):
|
|
|
543
543
|
Returns:
|
|
544
544
|
A new instance of this class.
|
|
545
545
|
"""
|
|
546
|
-
template = Path(template_file).read_text()
|
|
547
|
-
# TODO: .read_text(encoding="utf-8") for v0.4
|
|
546
|
+
template = Path(template_file).read_text(encoding="utf-8")
|
|
548
547
|
return cls.from_template(template, input_variables=input_variables, **kwargs)
|
|
549
548
|
|
|
550
549
|
def format_messages(self, **kwargs: Any) -> list[BaseMessage]:
|
|
@@ -740,10 +739,18 @@ class BaseChatPromptTemplate(BasePromptTemplate, ABC):
|
|
|
740
739
|
|
|
741
740
|
@abstractmethod
|
|
742
741
|
def format_messages(self, **kwargs: Any) -> list[BaseMessage]:
|
|
743
|
-
"""Format kwargs into a list of messages.
|
|
742
|
+
"""Format kwargs into a list of messages.
|
|
743
|
+
|
|
744
|
+
Returns:
|
|
745
|
+
List of messages.
|
|
746
|
+
"""
|
|
744
747
|
|
|
745
748
|
async def aformat_messages(self, **kwargs: Any) -> list[BaseMessage]:
|
|
746
|
-
"""Async format kwargs into a list of messages.
|
|
749
|
+
"""Async format kwargs into a list of messages.
|
|
750
|
+
|
|
751
|
+
Returns:
|
|
752
|
+
List of messages.
|
|
753
|
+
"""
|
|
747
754
|
return self.format_messages(**kwargs)
|
|
748
755
|
|
|
749
756
|
def pretty_repr(
|
|
@@ -795,18 +802,17 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
795
802
|
|
|
796
803
|
from langchain_core.prompts import ChatPromptTemplate
|
|
797
804
|
|
|
798
|
-
template = ChatPromptTemplate(
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
805
|
+
template = ChatPromptTemplate(
|
|
806
|
+
[
|
|
807
|
+
("system", "You are a helpful AI bot. Your name is {name}."),
|
|
808
|
+
("human", "Hello, how are you doing?"),
|
|
809
|
+
("ai", "I'm doing well, thanks!"),
|
|
810
|
+
("human", "{user_input}"),
|
|
811
|
+
]
|
|
812
|
+
)
|
|
804
813
|
|
|
805
814
|
prompt_value = template.invoke(
|
|
806
|
-
{
|
|
807
|
-
"name": "Bob",
|
|
808
|
-
"user_input": "What is your name?"
|
|
809
|
-
}
|
|
815
|
+
{"name": "Bob", "user_input": "What is your name?"}
|
|
810
816
|
)
|
|
811
817
|
# Output:
|
|
812
818
|
# ChatPromptValue(
|
|
@@ -816,7 +822,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
816
822
|
# AIMessage(content="I'm doing well, thanks!"),
|
|
817
823
|
# HumanMessage(content='What is your name?')
|
|
818
824
|
# ]
|
|
819
|
-
#)
|
|
825
|
+
# )
|
|
820
826
|
|
|
821
827
|
Messages Placeholder:
|
|
822
828
|
|
|
@@ -826,14 +832,16 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
826
832
|
# you can initialize the template with a MessagesPlaceholder
|
|
827
833
|
# either using the class directly or with the shorthand tuple syntax:
|
|
828
834
|
|
|
829
|
-
template = ChatPromptTemplate(
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
835
|
+
template = ChatPromptTemplate(
|
|
836
|
+
[
|
|
837
|
+
("system", "You are a helpful AI bot."),
|
|
838
|
+
# Means the template will receive an optional list of messages under
|
|
839
|
+
# the "conversation" key
|
|
840
|
+
("placeholder", "{conversation}"),
|
|
841
|
+
# Equivalently:
|
|
842
|
+
# MessagesPlaceholder(variable_name="conversation", optional=True)
|
|
843
|
+
]
|
|
844
|
+
)
|
|
837
845
|
|
|
838
846
|
prompt_value = template.invoke(
|
|
839
847
|
{
|
|
@@ -841,7 +849,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
841
849
|
("human", "Hi!"),
|
|
842
850
|
("ai", "How can I assist you today?"),
|
|
843
851
|
("human", "Can you make me an ice cream sundae?"),
|
|
844
|
-
("ai", "No.")
|
|
852
|
+
("ai", "No."),
|
|
845
853
|
]
|
|
846
854
|
}
|
|
847
855
|
)
|
|
@@ -855,7 +863,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
855
863
|
# HumanMessage(content='Can you make me an ice cream sundae?'),
|
|
856
864
|
# AIMessage(content='No.'),
|
|
857
865
|
# ]
|
|
858
|
-
#)
|
|
866
|
+
# )
|
|
859
867
|
|
|
860
868
|
Single-variable template:
|
|
861
869
|
|
|
@@ -868,10 +876,12 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
868
876
|
|
|
869
877
|
from langchain_core.prompts import ChatPromptTemplate
|
|
870
878
|
|
|
871
|
-
template = ChatPromptTemplate(
|
|
872
|
-
|
|
873
|
-
|
|
874
|
-
|
|
879
|
+
template = ChatPromptTemplate(
|
|
880
|
+
[
|
|
881
|
+
("system", "You are a helpful AI bot. Your name is Carl."),
|
|
882
|
+
("human", "{user_input}"),
|
|
883
|
+
]
|
|
884
|
+
)
|
|
875
885
|
|
|
876
886
|
prompt_value = template.invoke("Hello, there!")
|
|
877
887
|
# Equivalent to
|
|
@@ -922,28 +932,29 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
922
932
|
input_types: A dictionary of the types of the variables the prompt template
|
|
923
933
|
expects. If not provided, all variables are assumed to be strings.
|
|
924
934
|
|
|
925
|
-
Returns:
|
|
926
|
-
A chat prompt template.
|
|
927
|
-
|
|
928
935
|
Examples:
|
|
929
936
|
Instantiation from a list of message templates:
|
|
930
937
|
|
|
931
938
|
.. code-block:: python
|
|
932
939
|
|
|
933
|
-
template = ChatPromptTemplate(
|
|
934
|
-
|
|
935
|
-
|
|
936
|
-
|
|
937
|
-
|
|
940
|
+
template = ChatPromptTemplate(
|
|
941
|
+
[
|
|
942
|
+
("human", "Hello, how are you?"),
|
|
943
|
+
("ai", "I'm doing well, thanks!"),
|
|
944
|
+
("human", "That's good to hear."),
|
|
945
|
+
]
|
|
946
|
+
)
|
|
938
947
|
|
|
939
948
|
Instantiation from mixed message formats:
|
|
940
949
|
|
|
941
950
|
.. code-block:: python
|
|
942
951
|
|
|
943
|
-
template = ChatPromptTemplate(
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
952
|
+
template = ChatPromptTemplate(
|
|
953
|
+
[
|
|
954
|
+
SystemMessage(content="hello"),
|
|
955
|
+
("human", "Hello, how are you?"),
|
|
956
|
+
]
|
|
957
|
+
)
|
|
947
958
|
|
|
948
959
|
"""
|
|
949
960
|
messages_ = [
|
|
@@ -974,7 +985,11 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
974
985
|
|
|
975
986
|
@classmethod
|
|
976
987
|
def get_lc_namespace(cls) -> list[str]:
|
|
977
|
-
"""Get the namespace of the langchain object.
|
|
988
|
+
"""Get the namespace of the langchain object.
|
|
989
|
+
|
|
990
|
+
Returns:
|
|
991
|
+
``["langchain", "prompts", "chat"]``
|
|
992
|
+
"""
|
|
978
993
|
return ["langchain", "prompts", "chat"]
|
|
979
994
|
|
|
980
995
|
def __add__(self, other: Any) -> ChatPromptTemplate:
|
|
@@ -1137,20 +1152,24 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
1137
1152
|
|
|
1138
1153
|
.. code-block:: python
|
|
1139
1154
|
|
|
1140
|
-
template = ChatPromptTemplate.from_messages(
|
|
1141
|
-
|
|
1142
|
-
|
|
1143
|
-
|
|
1144
|
-
|
|
1155
|
+
template = ChatPromptTemplate.from_messages(
|
|
1156
|
+
[
|
|
1157
|
+
("human", "Hello, how are you?"),
|
|
1158
|
+
("ai", "I'm doing well, thanks!"),
|
|
1159
|
+
("human", "That's good to hear."),
|
|
1160
|
+
]
|
|
1161
|
+
)
|
|
1145
1162
|
|
|
1146
1163
|
Instantiation from mixed message formats:
|
|
1147
1164
|
|
|
1148
1165
|
.. code-block:: python
|
|
1149
1166
|
|
|
1150
|
-
template = ChatPromptTemplate.from_messages(
|
|
1151
|
-
|
|
1152
|
-
|
|
1153
|
-
|
|
1167
|
+
template = ChatPromptTemplate.from_messages(
|
|
1168
|
+
[
|
|
1169
|
+
SystemMessage(content="hello"),
|
|
1170
|
+
("human", "Hello, how are you?"),
|
|
1171
|
+
]
|
|
1172
|
+
)
|
|
1154
1173
|
|
|
1155
1174
|
Args:
|
|
1156
1175
|
messages: sequence of message representations.
|
|
@@ -1174,6 +1193,9 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
1174
1193
|
**kwargs: keyword arguments to use for filling in template variables
|
|
1175
1194
|
in all the template messages in this chat template.
|
|
1176
1195
|
|
|
1196
|
+
Raises:
|
|
1197
|
+
ValueError: if messages are of unexpected types.
|
|
1198
|
+
|
|
1177
1199
|
Returns:
|
|
1178
1200
|
list of formatted messages.
|
|
1179
1201
|
"""
|
|
@@ -1284,7 +1306,13 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
1284
1306
|
def __getitem__(
|
|
1285
1307
|
self, index: Union[int, slice]
|
|
1286
1308
|
) -> Union[MessageLike, ChatPromptTemplate]:
|
|
1287
|
-
"""Use to index into the chat template.
|
|
1309
|
+
"""Use to index into the chat template.
|
|
1310
|
+
|
|
1311
|
+
Returns:
|
|
1312
|
+
If index is an int, returns the message at that index.
|
|
1313
|
+
If index is a slice, returns a new ``ChatPromptTemplate``
|
|
1314
|
+
containing the messages in that slice.
|
|
1315
|
+
"""
|
|
1288
1316
|
if isinstance(index, slice):
|
|
1289
1317
|
start, stop, step = index.indices(len(self.messages))
|
|
1290
1318
|
messages = self.messages[start:stop:step]
|
|
@@ -1292,7 +1320,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
1292
1320
|
return self.messages[index]
|
|
1293
1321
|
|
|
1294
1322
|
def __len__(self) -> int:
|
|
1295
|
-
"""
|
|
1323
|
+
"""Return the length of the chat template."""
|
|
1296
1324
|
return len(self.messages)
|
|
1297
1325
|
|
|
1298
1326
|
@property
|
langchain_core/prompts/dict.py
CHANGED
|
@@ -31,18 +31,25 @@ class DictPromptTemplate(RunnableSerializable[dict, dict]):
|
|
|
31
31
|
return _get_input_variables(self.template, self.template_format)
|
|
32
32
|
|
|
33
33
|
def format(self, **kwargs: Any) -> dict[str, Any]:
|
|
34
|
-
"""Format the prompt with the inputs.
|
|
34
|
+
"""Format the prompt with the inputs.
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
A formatted dict.
|
|
38
|
+
"""
|
|
35
39
|
return _insert_input_variables(self.template, kwargs, self.template_format)
|
|
36
40
|
|
|
37
41
|
async def aformat(self, **kwargs: Any) -> dict[str, Any]:
|
|
38
|
-
"""Format the prompt with the inputs.
|
|
42
|
+
"""Format the prompt with the inputs.
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
A formatted dict.
|
|
46
|
+
"""
|
|
39
47
|
return self.format(**kwargs)
|
|
40
48
|
|
|
41
49
|
@override
|
|
42
50
|
def invoke(
|
|
43
51
|
self, input: dict, config: Optional[RunnableConfig] = None, **kwargs: Any
|
|
44
52
|
) -> dict:
|
|
45
|
-
"""Invoke the prompt."""
|
|
46
53
|
return self._call_with_config(
|
|
47
54
|
lambda x: self.format(**x),
|
|
48
55
|
input,
|
|
@@ -62,15 +69,16 @@ class DictPromptTemplate(RunnableSerializable[dict, dict]):
|
|
|
62
69
|
|
|
63
70
|
@classmethod
|
|
64
71
|
def is_lc_serializable(cls) -> bool:
|
|
65
|
-
"""Return
|
|
66
|
-
|
|
67
|
-
Returns: True.
|
|
68
|
-
"""
|
|
72
|
+
"""Return True as this class is serializable."""
|
|
69
73
|
return True
|
|
70
74
|
|
|
71
75
|
@classmethod
|
|
72
76
|
def get_lc_namespace(cls) -> list[str]:
|
|
73
|
-
"""
|
|
77
|
+
"""Get the namespace of the langchain object.
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
``["langchain_core", "prompts", "dict"]``
|
|
81
|
+
"""
|
|
74
82
|
return ["langchain_core", "prompts", "dict"]
|
|
75
83
|
|
|
76
84
|
def pretty_repr(self, *, html: bool = False) -> str:
|