langchain-core 1.0.0a1__py3-none-any.whl → 1.0.0a3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (131) hide show
  1. langchain_core/_api/beta_decorator.py +17 -40
  2. langchain_core/_api/deprecation.py +20 -7
  3. langchain_core/_api/path.py +19 -2
  4. langchain_core/_import_utils.py +7 -0
  5. langchain_core/agents.py +10 -6
  6. langchain_core/callbacks/base.py +28 -15
  7. langchain_core/callbacks/manager.py +81 -69
  8. langchain_core/callbacks/usage.py +4 -2
  9. langchain_core/chat_history.py +29 -21
  10. langchain_core/document_loaders/base.py +34 -9
  11. langchain_core/document_loaders/langsmith.py +3 -0
  12. langchain_core/documents/base.py +35 -10
  13. langchain_core/documents/transformers.py +4 -2
  14. langchain_core/embeddings/fake.py +8 -5
  15. langchain_core/env.py +2 -3
  16. langchain_core/example_selectors/base.py +12 -0
  17. langchain_core/exceptions.py +7 -0
  18. langchain_core/globals.py +17 -28
  19. langchain_core/indexing/api.py +57 -45
  20. langchain_core/indexing/base.py +5 -8
  21. langchain_core/indexing/in_memory.py +23 -3
  22. langchain_core/language_models/__init__.py +6 -2
  23. langchain_core/language_models/_utils.py +28 -4
  24. langchain_core/language_models/base.py +33 -21
  25. langchain_core/language_models/chat_models.py +103 -29
  26. langchain_core/language_models/fake_chat_models.py +5 -7
  27. langchain_core/language_models/llms.py +54 -20
  28. langchain_core/load/dump.py +2 -3
  29. langchain_core/load/load.py +15 -1
  30. langchain_core/load/serializable.py +38 -43
  31. langchain_core/memory.py +7 -3
  32. langchain_core/messages/__init__.py +7 -17
  33. langchain_core/messages/ai.py +41 -34
  34. langchain_core/messages/base.py +16 -7
  35. langchain_core/messages/block_translators/__init__.py +10 -8
  36. langchain_core/messages/block_translators/anthropic.py +3 -1
  37. langchain_core/messages/block_translators/bedrock.py +3 -1
  38. langchain_core/messages/block_translators/bedrock_converse.py +3 -1
  39. langchain_core/messages/block_translators/google_genai.py +3 -1
  40. langchain_core/messages/block_translators/google_vertexai.py +3 -1
  41. langchain_core/messages/block_translators/groq.py +3 -1
  42. langchain_core/messages/block_translators/langchain_v0.py +3 -136
  43. langchain_core/messages/block_translators/ollama.py +3 -1
  44. langchain_core/messages/block_translators/openai.py +252 -10
  45. langchain_core/messages/content.py +26 -124
  46. langchain_core/messages/human.py +2 -13
  47. langchain_core/messages/system.py +2 -6
  48. langchain_core/messages/tool.py +34 -14
  49. langchain_core/messages/utils.py +189 -74
  50. langchain_core/output_parsers/base.py +5 -2
  51. langchain_core/output_parsers/json.py +4 -4
  52. langchain_core/output_parsers/list.py +7 -22
  53. langchain_core/output_parsers/openai_functions.py +3 -0
  54. langchain_core/output_parsers/openai_tools.py +6 -1
  55. langchain_core/output_parsers/pydantic.py +4 -0
  56. langchain_core/output_parsers/string.py +5 -1
  57. langchain_core/output_parsers/xml.py +19 -19
  58. langchain_core/outputs/chat_generation.py +18 -7
  59. langchain_core/outputs/generation.py +14 -3
  60. langchain_core/outputs/llm_result.py +8 -1
  61. langchain_core/prompt_values.py +10 -4
  62. langchain_core/prompts/base.py +6 -11
  63. langchain_core/prompts/chat.py +88 -60
  64. langchain_core/prompts/dict.py +16 -8
  65. langchain_core/prompts/few_shot.py +9 -11
  66. langchain_core/prompts/few_shot_with_templates.py +5 -1
  67. langchain_core/prompts/image.py +12 -5
  68. langchain_core/prompts/loading.py +2 -2
  69. langchain_core/prompts/message.py +5 -6
  70. langchain_core/prompts/pipeline.py +13 -8
  71. langchain_core/prompts/prompt.py +22 -8
  72. langchain_core/prompts/string.py +18 -10
  73. langchain_core/prompts/structured.py +7 -2
  74. langchain_core/rate_limiters.py +2 -2
  75. langchain_core/retrievers.py +7 -6
  76. langchain_core/runnables/base.py +387 -246
  77. langchain_core/runnables/branch.py +11 -28
  78. langchain_core/runnables/config.py +20 -17
  79. langchain_core/runnables/configurable.py +34 -19
  80. langchain_core/runnables/fallbacks.py +20 -13
  81. langchain_core/runnables/graph.py +48 -38
  82. langchain_core/runnables/graph_ascii.py +40 -17
  83. langchain_core/runnables/graph_mermaid.py +54 -25
  84. langchain_core/runnables/graph_png.py +27 -31
  85. langchain_core/runnables/history.py +55 -58
  86. langchain_core/runnables/passthrough.py +44 -21
  87. langchain_core/runnables/retry.py +44 -23
  88. langchain_core/runnables/router.py +9 -8
  89. langchain_core/runnables/schema.py +9 -0
  90. langchain_core/runnables/utils.py +53 -90
  91. langchain_core/stores.py +19 -31
  92. langchain_core/sys_info.py +9 -8
  93. langchain_core/tools/base.py +36 -27
  94. langchain_core/tools/convert.py +25 -14
  95. langchain_core/tools/simple.py +36 -8
  96. langchain_core/tools/structured.py +25 -12
  97. langchain_core/tracers/base.py +2 -2
  98. langchain_core/tracers/context.py +5 -1
  99. langchain_core/tracers/core.py +110 -46
  100. langchain_core/tracers/evaluation.py +22 -26
  101. langchain_core/tracers/event_stream.py +97 -42
  102. langchain_core/tracers/langchain.py +12 -3
  103. langchain_core/tracers/langchain_v1.py +10 -2
  104. langchain_core/tracers/log_stream.py +56 -17
  105. langchain_core/tracers/root_listeners.py +4 -20
  106. langchain_core/tracers/run_collector.py +6 -16
  107. langchain_core/tracers/schemas.py +5 -1
  108. langchain_core/utils/aiter.py +14 -6
  109. langchain_core/utils/env.py +3 -0
  110. langchain_core/utils/function_calling.py +46 -20
  111. langchain_core/utils/interactive_env.py +6 -2
  112. langchain_core/utils/iter.py +12 -5
  113. langchain_core/utils/json.py +12 -3
  114. langchain_core/utils/json_schema.py +156 -40
  115. langchain_core/utils/loading.py +5 -1
  116. langchain_core/utils/mustache.py +25 -16
  117. langchain_core/utils/pydantic.py +38 -9
  118. langchain_core/utils/utils.py +25 -9
  119. langchain_core/vectorstores/base.py +7 -20
  120. langchain_core/vectorstores/in_memory.py +20 -14
  121. langchain_core/vectorstores/utils.py +18 -12
  122. langchain_core/version.py +1 -1
  123. langchain_core-1.0.0a3.dist-info/METADATA +77 -0
  124. langchain_core-1.0.0a3.dist-info/RECORD +181 -0
  125. langchain_core/beta/__init__.py +0 -1
  126. langchain_core/beta/runnables/__init__.py +0 -1
  127. langchain_core/beta/runnables/context.py +0 -448
  128. langchain_core-1.0.0a1.dist-info/METADATA +0 -106
  129. langchain_core-1.0.0a1.dist-info/RECORD +0 -184
  130. {langchain_core-1.0.0a1.dist-info → langchain_core-1.0.0a3.dist-info}/WHEEL +0 -0
  131. {langchain_core-1.0.0a1.dist-info → langchain_core-1.0.0a3.dist-info}/entry_points.txt +0 -0
@@ -15,6 +15,14 @@ from langchain_core.messages import BaseMessage
15
15
  from langchain_core.output_parsers.transform import BaseTransformOutputParser
16
16
  from langchain_core.runnables.utils import AddableDict
17
17
 
18
+ try:
19
+ from defusedxml import ElementTree # type: ignore[import-untyped]
20
+ from defusedxml.ElementTree import XMLParser # type: ignore[import-untyped]
21
+
22
+ _HAS_DEFUSEDXML = True
23
+ except ImportError:
24
+ _HAS_DEFUSEDXML = False
25
+
18
26
  XML_FORMAT_INSTRUCTIONS = """The output should be formatted as a XML file.
19
27
  1. Output should conform to the tags below.
20
28
  2. If tags are not given, make them on your own.
@@ -50,17 +58,13 @@ class _StreamingParser:
50
58
  parser is requested.
51
59
  """
52
60
  if parser == "defusedxml":
53
- try:
54
- from defusedxml.ElementTree import ( # type: ignore[import-untyped]
55
- XMLParser,
56
- )
57
- except ImportError as e:
61
+ if not _HAS_DEFUSEDXML:
58
62
  msg = (
59
63
  "defusedxml is not installed. "
60
64
  "Please install it to use the defusedxml parser."
61
65
  "You can install it with `pip install defusedxml` "
62
66
  )
63
- raise ImportError(msg) from e
67
+ raise ImportError(msg)
64
68
  parser_ = XMLParser(target=TreeBuilder())
65
69
  else:
66
70
  parser_ = None
@@ -136,9 +140,6 @@ class _StreamingParser:
136
140
  """Close the parser.
137
141
 
138
142
  This should be called after all chunks have been parsed.
139
-
140
- Raises:
141
- xml.etree.ElementTree.ParseError: If the XML is not well-formed.
142
143
  """
143
144
  # Ignore ParseError. This will ignore any incomplete XML at the end of the input
144
145
  with contextlib.suppress(xml.etree.ElementTree.ParseError):
@@ -154,14 +155,15 @@ class XMLOutputParser(BaseTransformOutputParser):
154
155
  Note this may not be perfect depending on the LLM implementation.
155
156
 
156
157
  For example, with tags=["foo", "bar", "baz"]:
157
- 1. A well-formatted XML instance:
158
- "<foo>\n <bar>\n <baz></baz>\n </bar>\n</foo>"
159
158
 
160
- 2. A badly-formatted XML instance (missing closing tag for 'bar'):
161
- "<foo>\n <bar>\n </foo>"
159
+ 1. A well-formatted XML instance:
160
+ "<foo>\n <bar>\n <baz></baz>\n </bar>\n</foo>"
161
+
162
+ 2. A badly-formatted XML instance (missing closing tag for 'bar'):
163
+ "<foo>\n <bar>\n </foo>"
162
164
 
163
- 3. A badly-formatted XML instance (unexpected 'tag' element):
164
- "<foo>\n <tag>\n </tag>\n</foo>"
165
+ 3. A badly-formatted XML instance (unexpected 'tag' element):
166
+ "<foo>\n <tag>\n </tag>\n</foo>"
165
167
  """
166
168
  encoding_matcher: re.Pattern = re.compile(
167
169
  r"<([^>]*encoding[^>]*)>\n(.*)", re.MULTILINE | re.DOTALL
@@ -209,16 +211,14 @@ class XMLOutputParser(BaseTransformOutputParser):
209
211
  # Imports are temporarily placed here to avoid issue with caching on CI
210
212
  # likely if you're reading this you can move them to the top of the file
211
213
  if self.parser == "defusedxml":
212
- try:
213
- from defusedxml import ElementTree # type: ignore[import-untyped]
214
- except ImportError as e:
214
+ if not _HAS_DEFUSEDXML:
215
215
  msg = (
216
216
  "defusedxml is not installed. "
217
217
  "Please install it to use the defusedxml parser."
218
218
  "You can install it with `pip install defusedxml`"
219
219
  "See https://github.com/tiran/defusedxml for more details"
220
220
  )
221
- raise ImportError(msg) from e
221
+ raise ImportError(msg)
222
222
  et = ElementTree # Use the defusedxml parser
223
223
  else:
224
224
  et = ET # Use the standard library parser
@@ -47,9 +47,6 @@ class ChatGeneration(Generation):
47
47
 
48
48
  Returns:
49
49
  The values of the object with the text attribute set.
50
-
51
- Raises:
52
- ValueError: If the message is not a string or a list.
53
50
  """
54
51
  text = ""
55
52
  if isinstance(self.message.content, str):
@@ -83,11 +80,18 @@ class ChatGenerationChunk(ChatGeneration):
83
80
  def __add__(
84
81
  self, other: Union[ChatGenerationChunk, list[ChatGenerationChunk]]
85
82
  ) -> ChatGenerationChunk:
86
- """Concatenate two ChatGenerationChunks.
83
+ """Concatenate two ``ChatGenerationChunk``s.
87
84
 
88
85
  Args:
89
- other: The other ChatGenerationChunk or list of ChatGenerationChunks to
90
- concatenate.
86
+ other: The other ``ChatGenerationChunk`` or list of ``ChatGenerationChunk``
87
+ to concatenate.
88
+
89
+ Raises:
90
+ TypeError: If other is not a ``ChatGenerationChunk`` or list of
91
+ ``ChatGenerationChunk``.
92
+
93
+ Returns:
94
+ A new ``ChatGenerationChunk`` concatenated from self and other.
91
95
  """
92
96
  if isinstance(other, ChatGenerationChunk):
93
97
  generation_info = merge_dicts(
@@ -116,7 +120,14 @@ class ChatGenerationChunk(ChatGeneration):
116
120
  def merge_chat_generation_chunks(
117
121
  chunks: list[ChatGenerationChunk],
118
122
  ) -> Union[ChatGenerationChunk, None]:
119
- """Merge a list of ChatGenerationChunks into a single ChatGenerationChunk."""
123
+ """Merge a list of ``ChatGenerationChunk``s into a single ``ChatGenerationChunk``.
124
+
125
+ Args:
126
+ chunks: A list of ``ChatGenerationChunk`` to merge.
127
+
128
+ Returns:
129
+ A merged ``ChatGenerationChunk``, or None if the input list is empty.
130
+ """
120
131
  if not chunks:
121
132
  return None
122
133
 
@@ -39,14 +39,15 @@ class Generation(Serializable):
39
39
 
40
40
  @classmethod
41
41
  def is_lc_serializable(cls) -> bool:
42
- """Return whether this class is serializable."""
42
+ """Return True as this class is serializable."""
43
43
  return True
44
44
 
45
45
  @classmethod
46
46
  def get_lc_namespace(cls) -> list[str]:
47
47
  """Get the namespace of the langchain object.
48
48
 
49
- Default namespace is ["langchain", "schema", "output"].
49
+ Returns:
50
+ ``["langchain", "schema", "output"]``
50
51
  """
51
52
  return ["langchain", "schema", "output"]
52
53
 
@@ -55,7 +56,17 @@ class GenerationChunk(Generation):
55
56
  """Generation chunk, which can be concatenated with other Generation chunks."""
56
57
 
57
58
  def __add__(self, other: GenerationChunk) -> GenerationChunk:
58
- """Concatenate two GenerationChunks."""
59
+ """Concatenate two ``GenerationChunk``s.
60
+
61
+ Args:
62
+ other: Another ``GenerationChunk`` to concatenate with.
63
+
64
+ Raises:
65
+ TypeError: If other is not a ``GenerationChunk``.
66
+
67
+ Returns:
68
+ A new ``GenerationChunk`` concatenated from self and other.
69
+ """
59
70
  if isinstance(other, GenerationChunk):
60
71
  generation_info = merge_dicts(
61
72
  self.generation_info or {},
@@ -91,7 +91,14 @@ class LLMResult(BaseModel):
91
91
  return llm_results
92
92
 
93
93
  def __eq__(self, other: object) -> bool:
94
- """Check for LLMResult equality by ignoring any metadata related to runs."""
94
+ """Check for ``LLMResult`` equality by ignoring any metadata related to runs.
95
+
96
+ Args:
97
+ other: Another ``LLMResult`` object to compare against.
98
+
99
+ Returns:
100
+ True if the generations and ``llm_output`` are equal, False otherwise.
101
+ """
95
102
  if not isinstance(other, LLMResult):
96
103
  return NotImplemented
97
104
  return (
@@ -30,7 +30,7 @@ class PromptValue(Serializable, ABC):
30
30
 
31
31
  @classmethod
32
32
  def is_lc_serializable(cls) -> bool:
33
- """Return whether this class is serializable. Defaults to True."""
33
+ """Return True as this class is serializable."""
34
34
  return True
35
35
 
36
36
  @classmethod
@@ -38,7 +38,9 @@ class PromptValue(Serializable, ABC):
38
38
  """Get the namespace of the langchain object.
39
39
 
40
40
  This is used to determine the namespace of the object when serializing.
41
- Defaults to ["langchain", "schema", "prompt"].
41
+
42
+ Returns:
43
+ ``["langchain", "schema", "prompt"]``
42
44
  """
43
45
  return ["langchain", "schema", "prompt"]
44
46
 
@@ -63,7 +65,9 @@ class StringPromptValue(PromptValue):
63
65
  """Get the namespace of the langchain object.
64
66
 
65
67
  This is used to determine the namespace of the object when serializing.
66
- Defaults to ["langchain", "prompts", "base"].
68
+
69
+ Returns:
70
+ ``["langchain", "prompts", "base"]``
67
71
  """
68
72
  return ["langchain", "prompts", "base"]
69
73
 
@@ -98,7 +102,9 @@ class ChatPromptValue(PromptValue):
98
102
  """Get the namespace of the langchain object.
99
103
 
100
104
  This is used to determine the namespace of the object when serializing.
101
- Defaults to ["langchain", "prompts", "chat"].
105
+
106
+ Returns:
107
+ ``["langchain", "prompts", "chat"]``
102
108
  """
103
109
  return ["langchain", "prompts", "chat"]
104
110
 
@@ -101,16 +101,14 @@ class BasePromptTemplate(
101
101
  def get_lc_namespace(cls) -> list[str]:
102
102
  """Get the namespace of the langchain object.
103
103
 
104
- Returns ["langchain", "schema", "prompt_template"].
104
+ Returns:
105
+ ``["langchain", "schema", "prompt_template"]``
105
106
  """
106
107
  return ["langchain", "schema", "prompt_template"]
107
108
 
108
109
  @classmethod
109
110
  def is_lc_serializable(cls) -> bool:
110
- """Return whether this class is serializable.
111
-
112
- Returns True.
113
- """
111
+ """Return True as this class is serializable."""
114
112
  return True
115
113
 
116
114
  model_config = ConfigDict(
@@ -212,7 +210,7 @@ class BasePromptTemplate(
212
210
  if self.metadata:
213
211
  config["metadata"] = {**config["metadata"], **self.metadata}
214
212
  if self.tags:
215
- config["tags"] = config["tags"] + self.tags
213
+ config["tags"] += self.tags
216
214
  return self._call_with_config(
217
215
  self._format_prompt_with_error_handling,
218
216
  input,
@@ -341,9 +339,6 @@ class BasePromptTemplate(
341
339
 
342
340
  Returns:
343
341
  Dict: Dictionary representation of the prompt.
344
-
345
- Raises:
346
- NotImplementedError: If the prompt type is not implemented.
347
342
  """
348
343
  prompt_dict = super().model_dump(**kwargs)
349
344
  with contextlib.suppress(NotImplementedError):
@@ -384,10 +379,10 @@ class BasePromptTemplate(
384
379
  directory_path.mkdir(parents=True, exist_ok=True)
385
380
 
386
381
  if save_path.suffix == ".json":
387
- with save_path.open("w") as f:
382
+ with save_path.open("w", encoding="utf-8") as f:
388
383
  json.dump(prompt_dict, f, indent=4)
389
384
  elif save_path.suffix.endswith((".yaml", ".yml")):
390
- with save_path.open("w") as f:
385
+ with save_path.open("w", encoding="utf-8") as f:
391
386
  yaml.dump(prompt_dict, f, default_flow_style=False)
392
387
  else:
393
388
  msg = f"{save_path} must be json or yaml"
@@ -67,10 +67,10 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
67
67
  from langchain_core.prompts import MessagesPlaceholder
68
68
 
69
69
  prompt = MessagesPlaceholder("history")
70
- prompt.format_messages() # raises KeyError
70
+ prompt.format_messages() # raises KeyError
71
71
 
72
72
  prompt = MessagesPlaceholder("history", optional=True)
73
- prompt.format_messages() # returns empty list []
73
+ prompt.format_messages() # returns empty list []
74
74
 
75
75
  prompt.format_messages(
76
76
  history=[
@@ -93,14 +93,14 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
93
93
  [
94
94
  ("system", "You are a helpful assistant."),
95
95
  MessagesPlaceholder("history"),
96
- ("human", "{question}")
96
+ ("human", "{question}"),
97
97
  ]
98
98
  )
99
99
  prompt.invoke(
100
- {
101
- "history": [("human", "what's 5 + 2"), ("ai", "5 + 2 is 7")],
102
- "question": "now multiply that by 4"
103
- }
100
+ {
101
+ "history": [("human", "what's 5 + 2"), ("ai", "5 + 2 is 7")],
102
+ "question": "now multiply that by 4",
103
+ }
104
104
  )
105
105
  # -> ChatPromptValue(messages=[
106
106
  # SystemMessage(content="You are a helpful assistant."),
@@ -543,8 +543,7 @@ class _StringImageMessagePromptTemplate(BaseMessagePromptTemplate):
543
543
  Returns:
544
544
  A new instance of this class.
545
545
  """
546
- template = Path(template_file).read_text()
547
- # TODO: .read_text(encoding="utf-8") for v0.4
546
+ template = Path(template_file).read_text(encoding="utf-8")
548
547
  return cls.from_template(template, input_variables=input_variables, **kwargs)
549
548
 
550
549
  def format_messages(self, **kwargs: Any) -> list[BaseMessage]:
@@ -740,10 +739,18 @@ class BaseChatPromptTemplate(BasePromptTemplate, ABC):
740
739
 
741
740
  @abstractmethod
742
741
  def format_messages(self, **kwargs: Any) -> list[BaseMessage]:
743
- """Format kwargs into a list of messages."""
742
+ """Format kwargs into a list of messages.
743
+
744
+ Returns:
745
+ List of messages.
746
+ """
744
747
 
745
748
  async def aformat_messages(self, **kwargs: Any) -> list[BaseMessage]:
746
- """Async format kwargs into a list of messages."""
749
+ """Async format kwargs into a list of messages.
750
+
751
+ Returns:
752
+ List of messages.
753
+ """
747
754
  return self.format_messages(**kwargs)
748
755
 
749
756
  def pretty_repr(
@@ -795,18 +802,17 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
795
802
 
796
803
  from langchain_core.prompts import ChatPromptTemplate
797
804
 
798
- template = ChatPromptTemplate([
799
- ("system", "You are a helpful AI bot. Your name is {name}."),
800
- ("human", "Hello, how are you doing?"),
801
- ("ai", "I'm doing well, thanks!"),
802
- ("human", "{user_input}"),
803
- ])
805
+ template = ChatPromptTemplate(
806
+ [
807
+ ("system", "You are a helpful AI bot. Your name is {name}."),
808
+ ("human", "Hello, how are you doing?"),
809
+ ("ai", "I'm doing well, thanks!"),
810
+ ("human", "{user_input}"),
811
+ ]
812
+ )
804
813
 
805
814
  prompt_value = template.invoke(
806
- {
807
- "name": "Bob",
808
- "user_input": "What is your name?"
809
- }
815
+ {"name": "Bob", "user_input": "What is your name?"}
810
816
  )
811
817
  # Output:
812
818
  # ChatPromptValue(
@@ -816,7 +822,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
816
822
  # AIMessage(content="I'm doing well, thanks!"),
817
823
  # HumanMessage(content='What is your name?')
818
824
  # ]
819
- #)
825
+ # )
820
826
 
821
827
  Messages Placeholder:
822
828
 
@@ -826,14 +832,16 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
826
832
  # you can initialize the template with a MessagesPlaceholder
827
833
  # either using the class directly or with the shorthand tuple syntax:
828
834
 
829
- template = ChatPromptTemplate([
830
- ("system", "You are a helpful AI bot."),
831
- # Means the template will receive an optional list of messages under
832
- # the "conversation" key
833
- ("placeholder", "{conversation}")
834
- # Equivalently:
835
- # MessagesPlaceholder(variable_name="conversation", optional=True)
836
- ])
835
+ template = ChatPromptTemplate(
836
+ [
837
+ ("system", "You are a helpful AI bot."),
838
+ # Means the template will receive an optional list of messages under
839
+ # the "conversation" key
840
+ ("placeholder", "{conversation}"),
841
+ # Equivalently:
842
+ # MessagesPlaceholder(variable_name="conversation", optional=True)
843
+ ]
844
+ )
837
845
 
838
846
  prompt_value = template.invoke(
839
847
  {
@@ -841,7 +849,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
841
849
  ("human", "Hi!"),
842
850
  ("ai", "How can I assist you today?"),
843
851
  ("human", "Can you make me an ice cream sundae?"),
844
- ("ai", "No.")
852
+ ("ai", "No."),
845
853
  ]
846
854
  }
847
855
  )
@@ -855,7 +863,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
855
863
  # HumanMessage(content='Can you make me an ice cream sundae?'),
856
864
  # AIMessage(content='No.'),
857
865
  # ]
858
- #)
866
+ # )
859
867
 
860
868
  Single-variable template:
861
869
 
@@ -868,10 +876,12 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
868
876
 
869
877
  from langchain_core.prompts import ChatPromptTemplate
870
878
 
871
- template = ChatPromptTemplate([
872
- ("system", "You are a helpful AI bot. Your name is Carl."),
873
- ("human", "{user_input}"),
874
- ])
879
+ template = ChatPromptTemplate(
880
+ [
881
+ ("system", "You are a helpful AI bot. Your name is Carl."),
882
+ ("human", "{user_input}"),
883
+ ]
884
+ )
875
885
 
876
886
  prompt_value = template.invoke("Hello, there!")
877
887
  # Equivalent to
@@ -922,28 +932,29 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
922
932
  input_types: A dictionary of the types of the variables the prompt template
923
933
  expects. If not provided, all variables are assumed to be strings.
924
934
 
925
- Returns:
926
- A chat prompt template.
927
-
928
935
  Examples:
929
936
  Instantiation from a list of message templates:
930
937
 
931
938
  .. code-block:: python
932
939
 
933
- template = ChatPromptTemplate([
934
- ("human", "Hello, how are you?"),
935
- ("ai", "I'm doing well, thanks!"),
936
- ("human", "That's good to hear."),
937
- ])
940
+ template = ChatPromptTemplate(
941
+ [
942
+ ("human", "Hello, how are you?"),
943
+ ("ai", "I'm doing well, thanks!"),
944
+ ("human", "That's good to hear."),
945
+ ]
946
+ )
938
947
 
939
948
  Instantiation from mixed message formats:
940
949
 
941
950
  .. code-block:: python
942
951
 
943
- template = ChatPromptTemplate([
944
- SystemMessage(content="hello"),
945
- ("human", "Hello, how are you?"),
946
- ])
952
+ template = ChatPromptTemplate(
953
+ [
954
+ SystemMessage(content="hello"),
955
+ ("human", "Hello, how are you?"),
956
+ ]
957
+ )
947
958
 
948
959
  """
949
960
  messages_ = [
@@ -974,7 +985,11 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
974
985
 
975
986
  @classmethod
976
987
  def get_lc_namespace(cls) -> list[str]:
977
- """Get the namespace of the langchain object."""
988
+ """Get the namespace of the langchain object.
989
+
990
+ Returns:
991
+ ``["langchain", "prompts", "chat"]``
992
+ """
978
993
  return ["langchain", "prompts", "chat"]
979
994
 
980
995
  def __add__(self, other: Any) -> ChatPromptTemplate:
@@ -1137,20 +1152,24 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
1137
1152
 
1138
1153
  .. code-block:: python
1139
1154
 
1140
- template = ChatPromptTemplate.from_messages([
1141
- ("human", "Hello, how are you?"),
1142
- ("ai", "I'm doing well, thanks!"),
1143
- ("human", "That's good to hear."),
1144
- ])
1155
+ template = ChatPromptTemplate.from_messages(
1156
+ [
1157
+ ("human", "Hello, how are you?"),
1158
+ ("ai", "I'm doing well, thanks!"),
1159
+ ("human", "That's good to hear."),
1160
+ ]
1161
+ )
1145
1162
 
1146
1163
  Instantiation from mixed message formats:
1147
1164
 
1148
1165
  .. code-block:: python
1149
1166
 
1150
- template = ChatPromptTemplate.from_messages([
1151
- SystemMessage(content="hello"),
1152
- ("human", "Hello, how are you?"),
1153
- ])
1167
+ template = ChatPromptTemplate.from_messages(
1168
+ [
1169
+ SystemMessage(content="hello"),
1170
+ ("human", "Hello, how are you?"),
1171
+ ]
1172
+ )
1154
1173
 
1155
1174
  Args:
1156
1175
  messages: sequence of message representations.
@@ -1174,6 +1193,9 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
1174
1193
  **kwargs: keyword arguments to use for filling in template variables
1175
1194
  in all the template messages in this chat template.
1176
1195
 
1196
+ Raises:
1197
+ ValueError: if messages are of unexpected types.
1198
+
1177
1199
  Returns:
1178
1200
  list of formatted messages.
1179
1201
  """
@@ -1284,7 +1306,13 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
1284
1306
  def __getitem__(
1285
1307
  self, index: Union[int, slice]
1286
1308
  ) -> Union[MessageLike, ChatPromptTemplate]:
1287
- """Use to index into the chat template."""
1309
+ """Use to index into the chat template.
1310
+
1311
+ Returns:
1312
+ If index is an int, returns the message at that index.
1313
+ If index is a slice, returns a new ``ChatPromptTemplate``
1314
+ containing the messages in that slice.
1315
+ """
1288
1316
  if isinstance(index, slice):
1289
1317
  start, stop, step = index.indices(len(self.messages))
1290
1318
  messages = self.messages[start:stop:step]
@@ -1292,7 +1320,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
1292
1320
  return self.messages[index]
1293
1321
 
1294
1322
  def __len__(self) -> int:
1295
- """Get the length of the chat template."""
1323
+ """Return the length of the chat template."""
1296
1324
  return len(self.messages)
1297
1325
 
1298
1326
  @property
@@ -31,18 +31,25 @@ class DictPromptTemplate(RunnableSerializable[dict, dict]):
31
31
  return _get_input_variables(self.template, self.template_format)
32
32
 
33
33
  def format(self, **kwargs: Any) -> dict[str, Any]:
34
- """Format the prompt with the inputs."""
34
+ """Format the prompt with the inputs.
35
+
36
+ Returns:
37
+ A formatted dict.
38
+ """
35
39
  return _insert_input_variables(self.template, kwargs, self.template_format)
36
40
 
37
41
  async def aformat(self, **kwargs: Any) -> dict[str, Any]:
38
- """Format the prompt with the inputs."""
42
+ """Format the prompt with the inputs.
43
+
44
+ Returns:
45
+ A formatted dict.
46
+ """
39
47
  return self.format(**kwargs)
40
48
 
41
49
  @override
42
50
  def invoke(
43
51
  self, input: dict, config: Optional[RunnableConfig] = None, **kwargs: Any
44
52
  ) -> dict:
45
- """Invoke the prompt."""
46
53
  return self._call_with_config(
47
54
  lambda x: self.format(**x),
48
55
  input,
@@ -62,15 +69,16 @@ class DictPromptTemplate(RunnableSerializable[dict, dict]):
62
69
 
63
70
  @classmethod
64
71
  def is_lc_serializable(cls) -> bool:
65
- """Return whether or not the class is serializable.
66
-
67
- Returns: True.
68
- """
72
+ """Return True as this class is serializable."""
69
73
  return True
70
74
 
71
75
  @classmethod
72
76
  def get_lc_namespace(cls) -> list[str]:
73
- """Serialization namespace."""
77
+ """Get the namespace of the langchain object.
78
+
79
+ Returns:
80
+ ``["langchain_core", "prompts", "dict"]``
81
+ """
74
82
  return ["langchain_core", "prompts", "dict"]
75
83
 
76
84
  def pretty_repr(self, *, html: bool = False) -> str: